summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/vdso2
-rw-r--r--Documentation/ABI/testing/sysfs-block-rssd21
-rw-r--r--Documentation/ABI/testing/sysfs-block-zram2
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg2
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp88702
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd17
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-xen_cpu20
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-lenovo-tpkbd38
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-savu77
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-iommu_groups14
-rw-r--r--Documentation/ABI/testing/sysfs-power13
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml2
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml7
-rw-r--r--Documentation/ManagementStyle2
-rw-r--r--Documentation/RCU/checklist.txt39
-rw-r--r--Documentation/RCU/rcubarrier.txt15
-rw-r--r--Documentation/RCU/torture.txt9
-rw-r--r--Documentation/RCU/whatisRCU.txt6
-rw-r--r--Documentation/arm/Samsung-S3C24XX/H1940.txt2
-rw-r--r--Documentation/arm/Samsung-S3C24XX/SMDK2440.txt2
-rw-r--r--Documentation/cgroups/cgroups.txt27
-rw-r--r--Documentation/connector/cn_test.c13
-rw-r--r--Documentation/device-mapper/verity.txt131
-rw-r--r--Documentation/devices.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt23
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt11
-rw-r--r--Documentation/devicetree/bindings/arm/armada-370-xp.txt24
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-aic.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/davinci/cp-intc.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/mvebu-system-controller.txt17
-rw-r--r--Documentation/devicetree/bindings/arm/olimex.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/primecell.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-emc.txt (renamed from Documentation/devicetree/bindings/arm/tegra/emc.txt)2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/calxeda.txt17
-rw-r--r--Documentation/devicetree/bindings/clock/clock-bindings.txt117
-rw-r--r--Documentation/devicetree/bindings/clock/fixed-clock.txt21
-rw-r--r--Documentation/devicetree/bindings/fb/mxsfb.txt19
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt14
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.txt5
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-nmk.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/led.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/nvidia,tegra20-gpio.txt (renamed from Documentation/devicetree/bindings/gpio/gpio_nvidia.txt)0
-rw-r--r--Documentation/devicetree/bindings/input/fsl-mma8450.txt1
-rw-r--r--Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt (renamed from Documentation/devicetree/bindings/input/tegra-kbc.txt)0
-rw-r--r--Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt21
-rw-r--r--Documentation/devicetree/bindings/mfd/mc13xxx.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt90
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt10
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt12
-rw-r--r--Documentation/devicetree/bindings/mmc/mxs-mmc.txt8
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt (renamed from Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt)8
-rw-r--r--Documentation/devicetree/bindings/mmc/sdhci-pxa.txt21
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt7
-rw-r--r--Documentation/devicetree/bindings/mtd/partition.txt2
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt29
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt3
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt41
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt8
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt12
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt3
-rw-r--r--Documentation/devicetree/bindings/nvec/nvidia,nvec.txt (renamed from Documentation/devicetree/bindings/nvec/nvec_nvidia.txt)0
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt93
-rw-r--r--Documentation/devicetree/bindings/regulator/fixed-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt5
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65217.txt91
-rw-r--r--Documentation/devicetree/bindings/regulator/tps6586x.txt77
-rw-r--r--Documentation/devicetree/bindings/regulator/twl-regulator.txt1
-rw-r--r--Documentation/devicetree/bindings/rtc/dw-apb.txt25
-rw-r--r--Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt (renamed from Documentation/devicetree/bindings/sound/tegra-audio-alc5632.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt (renamed from Documentation/devicetree/bindings/sound/tegra-audio-trimslice.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt (renamed from Documentation/devicetree/bindings/sound/tegra-audio-wm8753.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt (renamed from Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt (renamed from Documentation/devicetree/bindings/sound/tegra20-das.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt (renamed from Documentation/devicetree/bindings/sound/tegra20-i2s.txt)0
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt (renamed from Documentation/devicetree/bindings/spi/spi_nvidia.txt)0
-rw-r--r--Documentation/devicetree/bindings/spi/spi-samsung.txt116
-rw-r--r--Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt27
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt (renamed from Documentation/devicetree/bindings/usb/tegra-usb.txt)0
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/watchdog/omap-wdt.txt14
-rw-r--r--Documentation/devicetree/usage-model.txt2
-rw-r--r--Documentation/feature-removal-schedule.txt44
-rw-r--r--Documentation/filesystems/Locking11
-rw-r--r--Documentation/filesystems/porting21
-rw-r--r--Documentation/filesystems/vfs.txt23
-rw-r--r--Documentation/hid/uhid.txt169
-rw-r--r--Documentation/hwmon/da905261
-rw-r--r--Documentation/hwmon/hih613037
-rw-r--r--Documentation/hwmon/submitting-patches3
-rw-r--r--Documentation/i2c/busses/i2c-i80113
-rw-r--r--Documentation/i2c/busses/i2c-piix49
-rw-r--r--Documentation/i2c/writing-clients23
-rw-r--r--Documentation/kdump/kdump.txt2
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/asus-laptop.txt3
-rw-r--r--Documentation/networking/batman-adv.txt5
-rw-r--r--Documentation/networking/bonding.txt6
-rw-r--r--Documentation/networking/bridge.txt13
-rw-r--r--Documentation/networking/caif/Linux-CAIF.txt91
-rw-r--r--Documentation/networking/can.txt186
-rw-r--r--Documentation/networking/ip-sysctl.txt62
-rw-r--r--Documentation/networking/openvswitch.txt2
-rw-r--r--Documentation/networking/s2io.txt14
-rw-r--r--Documentation/networking/stmmac.txt36
-rw-r--r--Documentation/networking/vxge.txt7
-rw-r--r--Documentation/nfc/nfc-hci.txt33
-rw-r--r--Documentation/power/devices.txt9
-rw-r--r--Documentation/power/swsusp.txt5
-rw-r--r--Documentation/prctl/no_new_privs.txt57
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt3
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt13
-rw-r--r--Documentation/sound/alsa/hdspm.txt2
-rw-r--r--Documentation/stable_kernel_rules.txt6
-rw-r--r--Documentation/video4linux/cpia2_overview.txt2
-rw-r--r--Documentation/video4linux/stv680.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt51
-rw-r--r--Documentation/virtual/kvm/locking.txt130
-rw-r--r--Documentation/virtual/kvm/msr.txt33
-rw-r--r--Documentation/virtual/kvm/ppc-pv.txt2
-rw-r--r--Documentation/vm/frontswap.txt4
-rw-r--r--Documentation/workqueue.txt103
-rw-r--r--MAINTAINERS111
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/kernel/pci.c17
-rw-r--r--arch/arm/Kconfig59
-rw-r--r--arch/arm/Kconfig.debug26
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/dts/aks-cdu.dts113
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts20
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts20
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi158
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts32
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts42
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi68
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi35
-rw-r--r--arch/arm/boot/dts/armada-xp-db.dts50
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi55
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi37
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi31
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi39
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi30
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi39
-rw-r--r--arch/arm/boot/dts/db8500.dtsi107
-rw-r--r--arch/arm/boot/dts/ea3250.dts174
-rw-r--r--arch/arm/boot/dts/evk-pro3.dts41
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts12
-rw-r--r--arch/arm/boot/dts/exynos4210-smdkv310.dts38
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi47
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts38
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi36
-rw-r--r--arch/arm/boot/dts/ge863-pro3.dtsi52
-rw-r--r--arch/arm/boot/dts/highbank.dts91
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts66
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts44
-rw-r--r--arch/arm/boot/dts/imx23-stmp378x_devb.dts78
-rw-r--r--arch/arm/boot/dts/imx23.dtsi169
-rw-r--r--arch/arm/boot/dts/imx27-3ds.dts41
-rw-r--r--arch/arm/boot/dts/imx27.dtsi12
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts198
-rw-r--r--arch/arm/boot/dts/imx28-cfa10036.dts52
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts164
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts210
-rw-r--r--arch/arm/boot/dts/imx28-tx28.dts97
-rw-r--r--arch/arm/boot/dts/imx28.dtsi353
-rw-r--r--arch/arm/boot/dts/imx31-bug.dts31
-rw-r--r--arch/arm/boot/dts/imx31.dtsi88
-rw-r--r--arch/arm/boot/dts/imx51.dtsi8
-rw-r--r--arch/arm/boot/dts/imx53.dtsi14
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts6
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts33
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi99
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi79
-rw-r--r--arch/arm/boot/dts/omap2420-h4.dts20
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts4
-rw-r--r--arch/arm/boot/dts/omap3-evm.dts28
-rw-r--r--arch/arm/boot/dts/omap3.dtsi5
-rw-r--r--arch/arm/boot/dts/omap4-panda.dts43
-rw-r--r--arch/arm/boot/dts/omap4-pandaES.dts24
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts72
-rw-r--r--arch/arm/boot/dts/omap4-var_som.dts96
-rw-r--r--arch/arm/boot/dts/omap4.dtsi23
-rw-r--r--arch/arm/boot/dts/omap5-evm.dts20
-rw-r--r--arch/arm/boot/dts/omap5.dtsi184
-rw-r--r--arch/arm/boot/dts/phy3250.dts61
-rw-r--r--arch/arm/boot/dts/snowball.dts21
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi147
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5.dts34
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi11
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts6
-rw-r--r--arch/arm/boot/dts/spear600.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts (renamed from arch/arm/boot/dts/tegra-harmony.dts)1
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts (renamed from arch/arm/boot/dts/tegra-paz00.dts)1
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts (renamed from arch/arm/boot/dts/tegra-seaboard.dts)88
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts (renamed from arch/arm/boot/dts/tegra-trimslice.dts)0
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts (renamed from arch/arm/boot/dts/tegra-ventana.dts)1
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts301
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi40
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dts (renamed from arch/arm/boot/dts/tegra-cardhu.dts)1
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi40
-rw-r--r--arch/arm/boot/dts/vexpress-v2m-rs1.dtsi11
-rw-r--r--arch/arm/boot/dts/vexpress-v2m.dtsi11
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts36
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts188
-rw-r--r--arch/arm/configs/exynos_defconfig92
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig51
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig61
-rw-r--r--arch/arm/configs/lpc32xx_defconfig26
-rw-r--r--arch/arm/configs/mvebu_defconfig46
-rw-r--r--arch/arm/configs/mxs_defconfig7
-rw-r--r--arch/arm/configs/omap2plus_defconfig3
-rw-r--r--arch/arm/configs/socfpga_defconfig83
-rw-r--r--arch/arm/configs/tegra_defconfig13
-rw-r--r--arch/arm/include/asm/atomic.h2
-rw-r--r--arch/arm/include/asm/domain.h18
-rw-r--r--arch/arm/include/asm/mach/irq.h2
-rw-r--r--arch/arm/include/asm/thread_info.h5
-rw-r--r--arch/arm/kernel/bios32.c4
-rw-r--r--arch/arm/kernel/fiq.c9
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c4
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/signal.c46
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/kernel/vmlinux.lds.S2
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/Makefile.boot2
-rw-r--r--arch/arm/mach-at91/at91rm9200.c1
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c84
-rw-r--r--arch/arm/mach-at91/at91sam9260.c1
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c92
-rw-r--r--arch/arm/mach-at91/at91sam9261.c1
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c68
-rw-r--r--arch/arm/mach-at91/at91sam9263.c1
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c80
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45.c1
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c108
-rw-r--r--arch/arm/mach-at91/at91sam9rl.c1
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c76
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c40
-rw-r--r--arch/arm/mach-at91/at91x40.c2
-rw-r--r--arch/arm/mach-at91/board-1arm.c2
-rw-r--r--arch/arm/mach-at91/board-afeb-9260v1.c2
-rw-r--r--arch/arm/mach-at91/board-cam60.c2
-rw-r--r--arch/arm/mach-at91/board-carmeva.c2
-rw-r--r--arch/arm/mach-at91/board-cpu9krea.c2
-rw-r--r--arch/arm/mach-at91/board-cpuat91.c2
-rw-r--r--arch/arm/mach-at91/board-csb337.c2
-rw-r--r--arch/arm/mach-at91/board-csb637.c2
-rw-r--r--arch/arm/mach-at91/board-dt.c2
-rw-r--r--arch/arm/mach-at91/board-eb01.c2
-rw-r--r--arch/arm/mach-at91/board-eb9200.c2
-rw-r--r--arch/arm/mach-at91/board-ecbat91.c2
-rw-r--r--arch/arm/mach-at91/board-eco920.c2
-rw-r--r--arch/arm/mach-at91/board-flexibity.c2
-rw-r--r--arch/arm/mach-at91/board-foxg20.c2
-rw-r--r--arch/arm/mach-at91/board-gsia18s.c2
-rw-r--r--arch/arm/mach-at91/board-kafa.c2
-rw-r--r--arch/arm/mach-at91/board-kb9202.c2
-rw-r--r--arch/arm/mach-at91/board-neocore926.c2
-rw-r--r--arch/arm/mach-at91/board-pcontrol-g20.c2
-rw-r--r--arch/arm/mach-at91/board-picotux200.c2
-rw-r--r--arch/arm/mach-at91/board-qil-a9260.c2
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c2
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c2
-rw-r--r--arch/arm/mach-at91/board-rsi-ews.c2
-rw-r--r--arch/arm/mach-at91/board-sam9-l9260.c2
-rw-r--r--arch/arm/mach-at91/board-sam9260ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9261ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9263ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9g20ek.c3
-rw-r--r--arch/arm/mach-at91/board-sam9m10g45ek.c2
-rw-r--r--arch/arm/mach-at91/board-sam9rlek.c2
-rw-r--r--arch/arm/mach-at91/board-snapper9260.c2
-rw-r--r--arch/arm/mach-at91/board-stamp9g20.c3
-rw-r--r--arch/arm/mach-at91/board-usb-a926x.c4
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c2
-rw-r--r--arch/arm/mach-at91/generic.h2
-rw-r--r--arch/arm/mach-at91/gpio.c9
-rw-r--r--arch/arm/mach-at91/include/mach/at91_aic.h36
-rw-r--r--arch/arm/mach-at91/include/mach/at91_spi.h81
-rw-r--r--arch/arm/mach-at91/include/mach/at91_ssc.h106
-rw-r--r--arch/arm/mach-at91/include/mach/entry-macro.S27
-rw-r--r--arch/arm/mach-at91/irq.c414
-rw-r--r--arch/arm/mach-at91/pm.c1
-rw-r--r--arch/arm/mach-clps711x/common.c6
-rw-r--r--arch/arm/mach-clps711x/include/mach/memory.h37
-rw-r--r--arch/arm/mach-clps711x/p720t.c34
-rw-r--r--arch/arm/mach-davinci/Kconfig7
-rw-r--r--arch/arm/mach-davinci/Makefile1
-rw-r--r--arch/arm/mach-davinci/cp_intc.c75
-rw-r--r--arch/arm/mach-davinci/include/mach/cp_intc.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S8
-rw-r--r--arch/arm/mach-davinci/pm_domain.c64
-rw-r--r--arch/arm/mach-dove/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-dove/include/mach/dove.h1
-rw-r--r--arch/arm/mach-ep93xx/core.c96
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c28
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h3
-rw-r--r--arch/arm/mach-ep93xx/soc.h1
-rw-r--r--arch/arm/mach-exynos/Kconfig15
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c67
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c95
-rw-r--r--arch/arm/mach-exynos/common.c28
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h4
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h3
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h5
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-usb-phy.h20
-rw-r--r--arch/arm/mach-exynos/include/mach/spi-clocks.h16
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c6
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c6
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c4
-rw-r--r--arch/arm/mach-exynos/mach-origen.c40
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c83
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c18
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c4
-rw-r--r--arch/arm/mach-exynos/pm_domains.c13
-rw-r--r--arch/arm/mach-exynos/pmu.c18
-rw-r--r--arch/arm/mach-exynos/setup-spi.c33
-rw-r--r--arch/arm/mach-exynos/setup-usb-phy.c60
-rw-r--r--arch/arm/mach-highbank/Makefile2
-rw-r--r--arch/arm/mach-highbank/clock.c62
-rw-r--r--arch/arm/mach-highbank/highbank.c7
-rw-r--r--arch/arm/mach-imx/Kconfig21
-rw-r--r--arch/arm/mach-imx/Makefile1
-rw-r--r--arch/arm/mach-imx/clk-imx27.c4
-rw-r--r--arch/arm/mach-imx/clk-imx31.c23
-rw-r--r--arch/arm/mach-imx/clk-imx35.c9
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c25
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c29
-rw-r--r--arch/arm/mach-imx/devices-imx21.h4
-rw-r--r--arch/arm/mach-imx/devices-imx25.h4
-rw-r--r--arch/arm/mach-imx/devices-imx27.h6
-rw-r--r--arch/arm/mach-imx/devices-imx31.h10
-rw-r--r--arch/arm/mach-imx/devices-imx35.h12
-rw-r--r--arch/arm/mach-imx/devices-imx51.h2
-rw-r--r--arch/arm/mach-imx/devices-imx53.h2
-rw-r--r--arch/arm/mach-imx/ehci-imx25.c24
-rw-r--r--arch/arm/mach-imx/ehci-imx35.c24
-rw-r--r--arch/arm/mach-imx/ehci-imx5.c31
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c3
-rw-r--r--arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c6
-rw-r--r--arch/arm/mach-imx/imx27-dt.c30
-rw-r--r--arch/arm/mach-imx/imx31-dt.c63
-rw-r--r--arch/arm/mach-imx/imx51-dt.c27
-rw-r--r--arch/arm/mach-imx/imx53-dt.c28
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c7
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c18
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c24
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c13
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c13
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c12
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c11
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c74
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c20
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c1
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c16
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c4
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c17
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c14
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c28
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c63
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c10
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c11
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c12
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c29
-rw-r--r--arch/arm/mach-imx/mach-mx51_3ds.c5
-rw-r--r--arch/arm/mach-imx/mach-mx51_babbage.c10
-rw-r--r--arch/arm/mach-imx/mach-mx53_ard.c8
-rw-r--r--arch/arm/mach-imx/mach-mx53_evk.c3
-rw-r--r--arch/arm/mach-imx/mach-mx53_loco.c3
-rw-r--r--arch/arm/mach-imx/mach-mx53_smd.c3
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c6
-rw-r--r--arch/arm/mach-imx/mach-pca100.c17
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c36
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c8
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c16
-rw-r--r--arch/arm/mach-imx/mach-qong.c12
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c7
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c12
-rw-r--r--arch/arm/mach-imx/mm-imx1.c1
-rw-r--r--arch/arm/mach-imx/mm-imx21.c1
-rw-r--r--arch/arm/mach-imx/mm-imx25.c1
-rw-r--r--arch/arm/mach-imx/mm-imx27.c1
-rw-r--r--arch/arm/mach-imx/mm-imx3.c1
-rw-r--r--arch/arm/mach-imx/mm-imx5.c26
-rw-r--r--arch/arm/mach-imx/mx31lilly-db.c11
-rw-r--r--arch/arm/mach-imx/mx31lite-db.c9
-rw-r--r--arch/arm/mach-imx/mx51_efika.c3
-rw-r--r--arch/arm/mach-imx/pcm970-baseboard.c13
-rw-r--r--arch/arm/mach-imx/pm-imx5.c111
-rw-r--r--arch/arm/mach-integrator/core.c55
-rw-r--r--arch/arm/mach-integrator/include/mach/clkdev.h26
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c8
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c69
-rw-r--r--arch/arm/mach-lpc32xx/Kconfig32
-rw-r--r--arch/arm/mach-lpc32xx/Makefile.boot1
-rw-r--r--arch/arm/mach-lpc32xx/clock.c123
-rw-r--r--arch/arm/mach-lpc32xx/common.c10
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/platform.h14
-rw-r--r--arch/arm/mach-lpc32xx/phy3250.c163
-rw-r--r--arch/arm/mach-lpc32xx/serial.c90
-rw-r--r--arch/arm/mach-mmp/include/mach/gpio-pxa.h29
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/mv78xx0.h2
-rw-r--r--arch/arm/mach-mvebu/Kconfig16
-rw-r--r--arch/arm/mach-mvebu/Makefile2
-rw-r--r--arch/arm/mach-mvebu/Makefile.boot3
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.c63
-rw-r--r--arch/arm/mach-mvebu/common.h23
-rw-r--r--arch/arm/mach-mvebu/include/mach/armada-370-xp.h22
-rw-r--r--arch/arm/mach-mvebu/include/mach/debug-macro.S24
-rw-r--r--arch/arm/mach-mvebu/include/mach/timex.h13
-rw-r--r--arch/arm/mach-mvebu/include/mach/uncompress.h43
-rw-r--r--arch/arm/mach-mvebu/irq-armada-370-xp.c133
-rw-r--r--arch/arm/mach-mvebu/system-controller.c105
-rw-r--r--arch/arm/mach-mxs/Kconfig1
-rw-r--r--arch/arm/mach-mxs/Makefile.boot9
-rw-r--r--arch/arm/mach-mxs/devices-mx23.h2
-rw-r--r--arch/arm/mach-mxs/devices-mx28.h2
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxsfb.c2
-rw-r--r--arch/arm/mach-mxs/mach-apx4devkit.c11
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c215
-rw-r--r--arch/arm/mach-mxs/module-tx28.c2
-rw-r--r--arch/arm/mach-nomadik/Makefile2
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c67
-rw-r--r--arch/arm/mach-nomadik/clock.c75
-rw-r--r--arch/arm/mach-nomadik/clock.h15
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c126
-rw-r--r--arch/arm/mach-nomadik/i2c-8815nhk.c38
-rw-r--r--arch/arm/mach-nomadik/include/mach/irqs.h85
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c2
-rw-r--r--arch/arm/mach-omap1/board-generic.c4
-rw-r--r--arch/arm/mach-omap1/board-h2.c2
-rw-r--r--arch/arm/mach-omap1/board-h3.c2
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c2
-rw-r--r--arch/arm/mach-omap1/board-innovator.c2
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap1/board-osk.c2
-rw-r--r--arch/arm/mach-omap1/board-palmte.c2
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c2
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c2
-rw-r--r--arch/arm/mach-omap1/board-sx1.c2
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c3
-rw-r--r--arch/arm/mach-omap1/clock_data.c3
-rw-r--r--arch/arm/mach-omap1/include/mach/usb.h165
-rw-r--r--arch/arm/mach-omap1/timer.c3
-rw-r--r--arch/arm/mach-omap1/usb.c116
-rw-r--r--arch/arm/mach-omap2/Kconfig25
-rw-r--r--arch/arm/mach-omap2/Makefile59
-rw-r--r--arch/arm/mach-omap2/am35xx-emac.c90
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c14
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c3
-rw-r--r--arch/arm/mach-omap2/board-apollon.c20
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c89
-rw-r--r--arch/arm/mach-omap2/board-flash.c5
-rw-r--r--arch/arm/mach-omap2/board-generic.c58
-rw-r--r--arch/arm/mach-omap2/board-h4.c13
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c78
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c3
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c5
-rw-r--r--arch/arm/mach-omap2/board-overo.c2
-rw-r--r--arch/arm/mach-omap2/clock.c18
-rw-r--r--arch/arm/mach-omap2/clock.h14
-rw-r--r--arch/arm/mach-omap2/clock2420_data.c43
-rw-r--r--arch/arm/mach-omap2/clock2430_data.c49
-rw-r--r--arch/arm/mach-omap2/clock33xx_data.c1105
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c86
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c133
-rw-r--r--arch/arm/mach-omap2/clock_common_data.c77
-rw-r--r--arch/arm/mach-omap2/clockdomain.h8
-rw-r--r--arch/arm/mach-omap2/clockdomain33xx.c74
-rw-r--r--arch/arm/mach-omap2/clockdomain44xx.c10
-rw-r--r--arch/arm/mach-omap2/clockdomains2420_data.c2
-rw-r--r--arch/arm/mach-omap2/clockdomains2430_data.c2
-rw-r--r--arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/clockdomains33xx_data.c196
-rw-r--r--arch/arm/mach-omap2/clockdomains3xxx_data.c159
-rw-r--r--arch/arm/mach-omap2/clockdomains44xx_data.c4
-rw-r--r--arch/arm/mach-omap2/clockdomains_common_data.c24
-rw-r--r--arch/arm/mach-omap2/cm-regbits-33xx.h687
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h4
-rw-r--r--arch/arm/mach-omap2/cm33xx.c313
-rw-r--r--arch/arm/mach-omap2/cm33xx.h420
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c14
-rw-r--r--arch/arm/mach-omap2/cminst44xx.h25
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c33
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h1
-rw-r--r--arch/arm/mach-omap2/common.c34
-rw-r--r--arch/arm/mach-omap2/common.h27
-rw-r--r--arch/arm/mach-omap2/control.c43
-rw-r--r--arch/arm/mach-omap2/control.h46
-rw-r--r--arch/arm/mach-omap2/cpuidle34xx.c79
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c8
-rw-r--r--arch/arm/mach-omap2/devices.c112
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c26
-rw-r--r--arch/arm/mach-omap2/drm.c61
-rw-r--r--arch/arm/mach-omap2/dsp.c4
-rw-r--r--arch/arm/mach-omap2/gpmc.c3
-rw-r--r--arch/arm/mach-omap2/hdq1w.c26
-rw-r--r--arch/arm/mach-omap2/id.c46
-rw-r--r--arch/arm/mach-omap2/include/mach/am35xx.h2
-rw-r--r--arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h1
-rw-r--r--arch/arm/mach-omap2/include/mach/debug-macro.S25
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-wakeupgen.h7
-rw-r--r--arch/arm/mach-omap2/io.c59
-rw-r--r--arch/arm/mach-omap2/iomap.h27
-rw-r--r--arch/arm/mach-omap2/irq.c23
-rw-r--r--arch/arm/mach-omap2/mailbox.c2
-rw-r--r--arch/arm/mach-omap2/msdi.c73
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S21
-rw-r--r--arch/arm/mach-omap2/omap-hotplug.c24
-rw-r--r--arch/arm/mach-omap2/omap-iommu.c6
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c6
-rw-r--r--arch/arm/mach-omap2/omap-smp.c52
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c114
-rw-r--r--arch/arm/mach-omap2/omap4-common.c14
-rw-r--r--arch/arm/mach-omap2/omap4-sar-layout.h12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c573
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2420_data.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2430_data.c16
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c188
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c41
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_common_data.c10
-rw-r--r--arch/arm/mach-omap2/omap_l3_noc.h22
-rw-r--r--arch/arm/mach-omap2/opp.c3
-rw-r--r--arch/arm/mach-omap2/pm.h19
-rw-r--r--arch/arm/mach-omap2/pm34xx.c78
-rw-r--r--arch/arm/mach-omap2/powerdomain.c22
-rw-r--r--arch/arm/mach-omap2/powerdomain.h27
-rw-r--r--arch/arm/mach-omap2/powerdomain33xx.c229
-rw-r--r--arch/arm/mach-omap2/powerdomains33xx_data.c185
-rw-r--r--arch/arm/mach-omap2/powerdomains3xxx_data.c139
-rw-r--r--arch/arm/mach-omap2/prcm-common.h14
-rw-r--r--arch/arm/mach-omap2/prcm.c25
-rw-r--r--arch/arm/mach-omap2/prm-regbits-33xx.h357
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c48
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.h68
-rw-r--r--arch/arm/mach-omap2/prm33xx.c135
-rw-r--r--arch/arm/mach-omap2/prm33xx.h129
-rw-r--r--arch/arm/mach-omap2/prm44xx.c63
-rw-r--r--arch/arm/mach-omap2/prm44xx.h2
-rw-r--r--arch/arm/mach-omap2/prm_common.c70
-rw-r--r--arch/arm/mach-omap2/smartreflex-class3.c29
-rw-r--r--arch/arm/mach-omap2/sr_device.c41
-rw-r--r--arch/arm/mach-omap2/timer.c93
-rw-r--r--arch/arm/mach-omap2/twl-common.c15
-rw-r--r--arch/arm/mach-omap2/usb-fs.c359
-rw-r--r--arch/arm/mach-omap2/voltage.h22
-rw-r--r--arch/arm/mach-omap2/voltagedomains33xx_data.c43
-rw-r--r--arch/arm/mach-picoxcell/Makefile1
-rw-r--r--arch/arm/mach-picoxcell/common.c3
-rw-r--r--arch/arm/mach-picoxcell/common.h2
-rw-r--r--arch/arm/mach-prima2/include/mach/gpio.h13
-rw-r--r--arch/arm/mach-prima2/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-pxa/hx4700.c15
-rw-r--r--arch/arm/mach-rpc/irq.c2
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2416.c3
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c2
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2443.c2
-rw-r--r--arch/arm/mach-s3c24xx/common-s3c2443.c4
-rw-r--r--arch/arm/mach-s3c24xx/common-smdk.c20
-rw-r--r--arch/arm/mach-s3c24xx/common.c1
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/bast-pmu.h40
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/gpio-nrs.h21
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/gta02.h69
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/regs-gpio.h17
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/regs-gpioj.h70
-rw-r--r--arch/arm/mach-s3c24xx/mach-gta02.c1
-rw-r--r--arch/arm/mach-s3c24xx/mach-mini2440.c4
-rw-r--r--arch/arm/mach-s3c24xx/mach-qt2410.c6
-rw-r--r--arch/arm/mach-s3c24xx/mach-rx1950.c1
-rw-r--r--arch/arm/mach-s3c24xx/pm-s3c2410.c12
-rw-r--r--arch/arm/mach-s3c24xx/pm-s3c2412.c1
-rw-r--r--arch/arm/mach-s3c24xx/s3c2412.c1
-rw-r--r--arch/arm/mach-s3c24xx/s3c244x.c1
-rw-r--r--arch/arm/mach-s3c24xx/setup-spi.c10
-rw-r--r--arch/arm/mach-s3c24xx/setup-ts.c6
-rw-r--r--arch/arm/mach-s3c64xx/clock.c20
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/crag6410.h4
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/dma.h1
-rw-r--r--arch/arm/mach-s3c64xx/include/mach/spi-clocks.h18
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c11
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c71
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/setup-spi.c19
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6440.c12
-rw-r--r--arch/arm/mach-s5p64x0/clock-s5p6450.c12
-rw-r--r--arch/arm/mach-s5p64x0/dma.c2
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/spi-clocks.h20
-rw-r--r--arch/arm/mach-s5p64x0/setup-spi.c21
-rw-r--r--arch/arm/mach-s5pc100/clock.c30
-rw-r--r--arch/arm/mach-s5pc100/dma.c2
-rw-r--r--arch/arm/mach-s5pc100/include/mach/spi-clocks.h18
-rw-r--r--arch/arm/mach-s5pc100/setup-spi.c30
-rw-r--r--arch/arm/mach-s5pv210/Kconfig2
-rw-r--r--arch/arm/mach-s5pv210/clock.c14
-rw-r--r--arch/arm/mach-s5pv210/include/mach/spi-clocks.h17
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c7
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c11
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c10
-rw-r--r--arch/arm/mach-s5pv210/setup-spi.c21
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c1
-rw-r--r--arch/arm/mach-shmobile/board-kzm9d.c1
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c8
-rw-r--r--arch/arm/mach-shmobile/intc-r8a7779.c7
-rw-r--r--arch/arm/mach-shmobile/platsmp.c10
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c2
-rw-r--r--arch/arm/mach-socfpga/Makefile5
-rw-r--r--arch/arm/mach-socfpga/Makefile.boot1
-rw-r--r--arch/arm/mach-socfpga/include/mach/debug-macro.S16
-rw-r--r--arch/arm/mach-socfpga/include/mach/timex.h (renamed from arch/arm/mach-at91/include/mach/irqs.h)33
-rw-r--r--arch/arm/mach-socfpga/include/mach/uncompress.h9
-rw-r--r--arch/arm/mach-socfpga/socfpga.c62
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c2
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c2
-rw-r--r--arch/arm/mach-tegra/Kconfig39
-rw-r--r--arch/arm/mach-tegra/Makefile18
-rw-r--r--arch/arm/mach-tegra/Makefile.boot13
-rw-r--r--arch/arm/mach-tegra/apbio.c194
-rw-r--r--arch/arm/mach-tegra/apbio.h19
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c74
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c8
-rw-r--r--arch/arm/mach-tegra/board-harmony-pcie.c15
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c25
-rw-r--r--arch/arm/mach-tegra/board-paz00.c7
-rw-r--r--arch/arm/mach-tegra/board-seaboard-pinmux.c197
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c306
-rw-r--r--arch/arm/mach-tegra/board-seaboard.h47
-rw-r--r--arch/arm/mach-tegra/board.h9
-rw-r--r--arch/arm/mach-tegra/common.c3
-rw-r--r--arch/arm/mach-tegra/cpu-tegra.c6
-rw-r--r--arch/arm/mach-tegra/cpuidle.c6
-rw-r--r--arch/arm/mach-tegra/dma.c4
-rw-r--r--arch/arm/mach-tegra/pcie.c6
-rw-r--r--arch/arm/mach-tegra/powergate.c4
-rw-r--r--arch/arm/mach-tegra/sleep.S29
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c58
-rw-r--r--arch/arm/mach-tegra/tegra30_clocks.c28
-rw-r--r--arch/arm/mach-tegra/timer.c4
-rw-r--r--arch/arm/mach-tegra/usb_phy.c16
-rw-r--r--arch/arm/mach-u300/Makefile2
-rw-r--r--arch/arm/mach-u300/clock.c1504
-rw-r--r--arch/arm/mach-u300/clock.h50
-rw-r--r--arch/arm/mach-u300/core.c21
-rw-r--r--arch/arm/mach-u300/timer.c2
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500-sdi.c4
-rw-r--r--arch/arm/mach-ux500/board-mop500.c89
-rw-r--r--arch/arm/mach-ux500/board-mop500.h3
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c4
-rw-r--r--arch/arm/mach-ux500/timer.c2
-rw-r--r--arch/arm/mach-versatile/pci.c1
-rw-r--r--arch/arm/mach-vexpress/Kconfig5
-rw-r--r--arch/arm/mach-vexpress/Makefile.boot3
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c64
-rw-r--r--arch/arm/mach-vexpress/include/mach/clkdev.h15
-rw-r--r--arch/arm/mach-vexpress/include/mach/debug-macro.S41
-rw-r--r--arch/arm/mach-vexpress/include/mach/motherboard.h28
-rw-r--r--arch/arm/mach-vexpress/include/mach/uncompress.h14
-rw-r--r--arch/arm/mach-vexpress/v2m.c296
-rw-r--r--arch/arm/mach-vt8500/Makefile2
-rw-r--r--arch/arm/mach-vt8500/bv07.c3
-rw-r--r--arch/arm/mach-vt8500/include/mach/restart.h17
-rw-r--r--arch/arm/mach-vt8500/include/mach/system.h13
-rw-r--r--arch/arm/mach-vt8500/restart.c54
-rw-r--r--arch/arm/mach-vt8500/wm8505_7in.c4
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c74
-rw-r--r--arch/arm/plat-mxc/3ds_debugboard.c50
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/avic.c35
-rw-r--r--arch/arm/plat-mxc/cpuidle.c80
-rw-r--r--arch/arm/plat-mxc/devices/platform-ipu-core.c5
-rw-r--r--arch/arm/plat-mxc/devices/platform-mxc_rtc.c5
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c2
-rw-r--r--arch/arm/plat-mxc/include/mach/3ds_debugboard.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/cpuidle.h22
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/hardware.h27
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx3.h3
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx51.h14
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-v1.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/ipu.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h44
-rw-r--r--arch/arm/plat-mxc/include/mach/mx1.h111
-rw-r--r--arch/arm/plat-mxc/include/mach/mx21.h107
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h72
-rw-r--r--arch/arm/plat-mxc/include/mach/mx27.h127
-rw-r--r--arch/arm/plat-mxc/include/mach/mx2x.h87
-rw-r--r--arch/arm/plat-mxc/include/mach/mx31.h118
-rw-r--r--arch/arm/plat-mxc/include/mach/mx35.h109
-rw-r--r--arch/arm/plat-mxc/include/mach/mx3x.h77
-rw-r--r--arch/arm/plat-mxc/include/mach/mx50.h187
-rw-r--r--arch/arm/plat-mxc/include/mach/mx51.h209
-rw-r--r--arch/arm/plat-mxc/include/mach/mx53.h217
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h16
-rw-r--r--arch/arm/plat-mxc/time.c3
-rw-r--r--arch/arm/plat-mxc/tzic.c34
-rw-r--r--arch/arm/plat-omap/Kconfig35
-rw-r--r--arch/arm/plat-omap/Makefile6
-rw-r--r--arch/arm/plat-omap/common.c9
-rw-r--r--arch/arm/plat-omap/counter_32k.c16
-rw-r--r--arch/arm/plat-omap/dma.c59
-rw-r--r--arch/arm/plat-omap/dmtimer.c164
-rw-r--r--arch/arm/plat-omap/include/plat/board.h38
-rw-r--r--arch/arm/plat-omap/include/plat/clkdev_omap.h1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h59
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h22
-rw-r--r--arch/arm/plat-omap/include/plat/dsp.h3
-rw-r--r--arch/arm/plat-omap/include/plat/hardware.h1
-rw-r--r--arch/arm/plat-omap/include/plat/multi.h9
-rw-r--r--arch/arm/plat-omap/include/plat/mux.h2
-rw-r--r--arch/arm/plat-omap/include/plat/omap-secure.h5
-rw-r--r--arch/arm/plat-omap/include/plat/omap54xx.h32
-rw-r--r--arch/arm/plat-omap/include/plat/omap730.h102
-rw-r--r--arch/arm/plat-omap/include/plat/omap850.h102
-rw-r--r--arch/arm/plat-omap/include/plat/omap_hwmod.h21
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h2
-rw-r--r--arch/arm/plat-omap/include/plat/serial.h14
-rw-r--r--arch/arm/plat-omap/include/plat/uncompress.h12
-rw-r--r--arch/arm/plat-omap/include/plat/usb.h196
-rw-r--r--arch/arm/plat-omap/include/plat/voltage.h21
-rw-r--r--arch/arm/plat-omap/mailbox.c13
-rw-r--r--arch/arm/plat-omap/sram.c17
-rw-r--r--arch/arm/plat-omap/usb.c145
-rw-r--r--arch/arm/plat-s3c24xx/irq.c2
-rw-r--r--arch/arm/plat-samsung/Kconfig12
-rw-r--r--arch/arm/plat-samsung/Makefile8
-rw-r--r--arch/arm/plat-samsung/adc.c8
-rw-r--r--arch/arm/plat-samsung/devs.c63
-rw-r--r--arch/arm/plat-samsung/dma-ops.c76
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-ops.h20
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/map-s3c.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/pd.h30
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h39
-rw-r--r--arch/arm/plat-samsung/include/plat/watchdog-reset.h2
-rw-r--r--arch/arm/plat-samsung/pd.c95
-rw-r--r--arch/arm/plat-samsung/pwm.c4
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c39
-rw-r--r--arch/arm/plat-samsung/s5p-clock.c1
-rw-r--r--arch/arm/plat-versatile/Kconfig3
-rw-r--r--arch/arm/plat-versatile/Makefile2
-rw-r--r--arch/blackfin/Kconfig16
-rw-r--r--arch/blackfin/configs/BF609-EZKIT_defconfig2
-rw-r--r--arch/blackfin/include/asm/bfin-global.h8
-rw-r--r--arch/blackfin/include/asm/bfin_crc.h14
-rw-r--r--arch/blackfin/include/asm/bfin_serial.h2
-rw-r--r--arch/blackfin/include/asm/bfin_simple_timer.h6
-rw-r--r--arch/blackfin/include/asm/bfin_twi.h10
-rw-r--r--arch/blackfin/include/asm/context.S9
-rw-r--r--arch/blackfin/include/asm/dpmc.h2
-rw-r--r--arch/blackfin/include/asm/gpio.h2
-rw-r--r--arch/blackfin/include/asm/irq.h10
-rw-r--r--arch/blackfin/include/asm/mem_init.h212
-rw-r--r--arch/blackfin/include/asm/traps.h2
-rw-r--r--arch/blackfin/kernel/bfin_dma.c4
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c8
-rw-r--r--arch/blackfin/kernel/dma-mapping.c10
-rw-r--r--arch/blackfin/mach-bf527/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf537/boards/stamp.c2
-rw-r--r--arch/blackfin/mach-bf548/boards/ezkit.c4
-rw-r--r--arch/blackfin/mach-bf548/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf561/boards/ezkit.c3
-rw-r--r--arch/blackfin/mach-bf609/Kconfig8
-rw-r--r--arch/blackfin/mach-bf609/Makefile4
-rw-r--r--arch/blackfin/mach-bf609/boards/ezkit.c304
-rw-r--r--arch/blackfin/mach-bf609/clock.c3
-rw-r--r--arch/blackfin/mach-bf609/dpm.S157
-rw-r--r--arch/blackfin/mach-bf609/hibernate.S65
-rw-r--r--arch/blackfin/mach-bf609/include/mach/anomaly.h141
-rw-r--r--arch/blackfin/mach-bf609/include/mach/defBF60x_base.h1
-rw-r--r--arch/blackfin/mach-bf609/include/mach/gpio.h2
-rw-r--r--arch/blackfin/mach-bf609/include/mach/irq.h4
-rw-r--r--arch/blackfin/mach-bf609/include/mach/pm.h9
-rw-r--r--arch/blackfin/mach-bf609/ints-priority.c156
-rw-r--r--arch/blackfin/mach-bf609/pm.c130
-rw-r--r--arch/blackfin/mach-common/clocks-init.c139
-rw-r--r--arch/blackfin/mach-common/cpufreq.c5
-rw-r--r--arch/blackfin/mach-common/entry.S7
-rw-r--r--arch/blackfin/mach-common/ints-priority.c331
-rw-r--r--arch/blackfin/mach-common/pm.c8
-rw-r--r--arch/c6x/boot/dts/evmc6678.dts83
-rw-r--r--arch/c6x/boot/dts/tms320c6678.dtsi146
-rw-r--r--arch/c6x/configs/evmc6678_defconfig42
-rw-r--r--arch/c6x/include/asm/irq.h2
-rw-r--r--arch/c6x/kernel/irq.c21
-rw-r--r--arch/c6x/kernel/setup.c4
-rw-r--r--arch/c6x/kernel/signal.c2
-rw-r--r--arch/c6x/kernel/soc.c2
-rw-r--r--arch/c6x/platforms/Kconfig4
-rw-r--r--arch/c6x/platforms/megamod-pic.c28
-rw-r--r--arch/c6x/platforms/plldata.c65
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c5
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c4
-rw-r--r--arch/h8300/include/asm/pgtable.h3
-rw-r--r--arch/h8300/include/asm/uaccess.h3
-rw-r--r--arch/h8300/kernel/signal.c2
-rw-r--r--arch/h8300/kernel/time.c1
-rw-r--r--arch/hexagon/kernel/smp.c2
-rw-r--r--arch/ia64/include/asm/iommu.h2
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/pci-dma.c1
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/kvm/vmm.c6
-rw-r--r--arch/ia64/mm/fault.c46
-rw-r--r--arch/ia64/pci/pci.c13
-rw-r--r--arch/m32r/boot/compressed/Makefile6
-rw-r--r--arch/m32r/boot/compressed/misc.c12
-rw-r--r--arch/m32r/include/asm/ptrace.h3
-rw-r--r--arch/m32r/include/asm/smp.h5
-rw-r--r--arch/m32r/kernel/ptrace.c7
-rw-r--r--arch/m32r/kernel/signal.c2
-rw-r--r--arch/m68k/Kconfig.bus7
-rw-r--r--arch/m68k/Kconfig.cpu18
-rw-r--r--arch/m68k/Makefile2
-rw-r--r--arch/m68k/include/asm/cacheflush_mm.h41
-rw-r--r--arch/m68k/include/asm/dma.h8
-rw-r--r--arch/m68k/include/asm/gpio.h179
-rw-r--r--arch/m68k/include/asm/io_mm.h50
-rw-r--r--arch/m68k/include/asm/m520xsim.h14
-rw-r--r--arch/m68k/include/asm/m523xsim.h1
-rw-r--r--arch/m68k/include/asm/m525xsim.h194
-rw-r--r--arch/m68k/include/asm/m527xsim.h1
-rw-r--r--arch/m68k/include/asm/m528xsim.h2
-rw-r--r--arch/m68k/include/asm/m532xsim.h17
-rw-r--r--arch/m68k/include/asm/m5441xsim.h276
-rw-r--r--arch/m68k/include/asm/m54xxacr.h4
-rw-r--r--arch/m68k/include/asm/m54xxpci.h138
-rw-r--r--arch/m68k/include/asm/m54xxsim.h3
-rw-r--r--arch/m68k/include/asm/mcf8390.h (renamed from arch/m68k/include/asm/mcfne.h)137
-rw-r--r--arch/m68k/include/asm/mcfclk.h43
-rw-r--r--arch/m68k/include/asm/mcfgpio.h343
-rw-r--r--arch/m68k/include/asm/mcfsim.h5
-rw-r--r--arch/m68k/include/asm/mcftimer.h2
-rw-r--r--arch/m68k/include/asm/mcfuart.h4
-rw-r--r--arch/m68k/include/asm/pci.h6
-rw-r--r--arch/m68k/include/asm/pinmux.h30
-rw-r--r--arch/m68k/kernel/Makefile1
-rw-r--r--arch/m68k/kernel/dma.c5
-rw-r--r--arch/m68k/kernel/entry.S452
-rw-r--r--arch/m68k/kernel/entry_mm.S419
-rw-r--r--arch/m68k/kernel/entry_no.S130
-rw-r--r--arch/m68k/kernel/module.c4
-rw-r--r--arch/m68k/kernel/pcibios.c109
-rw-r--r--arch/m68k/mm/memory.c2
-rw-r--r--arch/m68k/platform/coldfire/Makefile7
-rw-r--r--arch/m68k/platform/coldfire/clk.c108
-rw-r--r--arch/m68k/platform/coldfire/device.c57
-rw-r--r--arch/m68k/platform/coldfire/gpio.c172
-rw-r--r--arch/m68k/platform/coldfire/head.S6
-rw-r--r--arch/m68k/platform/coldfire/intc-525x.c91
-rw-r--r--arch/m68k/platform/coldfire/intc-simr.c26
-rw-r--r--arch/m68k/platform/coldfire/m5206.c9
-rw-r--r--arch/m68k/platform/coldfire/m520x.c103
-rw-r--r--arch/m68k/platform/coldfire/m523x.c22
-rw-r--r--arch/m68k/platform/coldfire/m5249.c10
-rw-r--r--arch/m68k/platform/coldfire/m525x.c66
-rw-r--r--arch/m68k/platform/coldfire/m5272.c11
-rw-r--r--arch/m68k/platform/coldfire/m527x.c43
-rw-r--r--arch/m68k/platform/coldfire/m528x.c33
-rw-r--r--arch/m68k/platform/coldfire/m5307.c9
-rw-r--r--arch/m68k/platform/coldfire/m532x.c154
-rw-r--r--arch/m68k/platform/coldfire/m5407.c9
-rw-r--r--arch/m68k/platform/coldfire/m5441x.c261
-rw-r--r--arch/m68k/platform/coldfire/m54xx.c7
-rw-r--r--arch/m68k/platform/coldfire/mcf8390.c38
-rw-r--r--arch/m68k/platform/coldfire/pci.c327
-rw-r--r--arch/m68k/platform/coldfire/pinmux.c28
-rw-r--r--arch/m68k/platform/coldfire/pit.c4
-rw-r--r--arch/m68k/platform/coldfire/timers.c2
-rw-r--r--arch/microblaze/pci/pci-common.c15
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/ar7/platform.c4
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/bcm63xx/dev-pcmcia.c4
-rw-r--r--arch/mips/cavium-octeon/Kconfig4
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/bitops.h1
-rw-r--r--arch/mips/include/asm/cmpxchg.h1
-rw-r--r--arch/mips/include/asm/cpu.h7
-rw-r--r--arch/mips/include/asm/gic.h15
-rw-r--r--arch/mips/include/asm/inst.h4
-rw-r--r--arch/mips/include/asm/io.h1
-rw-r--r--arch/mips/include/asm/irq.h1
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h2
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h10
-rw-r--r--arch/mips/include/asm/mipsmtregs.h2
-rw-r--r--arch/mips/include/asm/switch_to.h6
-rw-r--r--arch/mips/include/asm/thread_info.h4
-rw-r--r--arch/mips/kernel/cpu-probe.c11
-rw-r--r--arch/mips/kernel/mips_ksyms.c8
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c5
-rw-r--r--arch/mips/kernel/r2300_switch.S15
-rw-r--r--arch/mips/kernel/r4k_switch.S12
-rw-r--r--arch/mips/kernel/smp-bmips.c15
-rw-r--r--arch/mips/kernel/smp.c12
-rw-r--r--arch/mips/kernel/smtc.c13
-rw-r--r--arch/mips/kernel/sync-r4k.c5
-rw-r--r--arch/mips/kernel/traps.c7
-rw-r--r--arch/mips/kernel/vmlinux.lds.S3
-rw-r--r--arch/mips/mm/Makefile4
-rw-r--r--arch/mips/mm/c-r4k.c5
-rw-r--r--arch/mips/mm/page-funcs.S50
-rw-r--r--arch/mips/mm/page.c67
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/mti-malta/malta-pci.c7
-rw-r--r--arch/mips/mti-malta/malta-setup.c2
-rw-r--r--arch/mips/netlogic/xlp/setup.c8
-rw-r--r--arch/mips/oprofile/common.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c12
-rw-r--r--arch/mips/pci/fixup-lemote2f.c12
-rw-r--r--arch/mips/pci/fixup-malta.c6
-rw-r--r--arch/mips/pci/fixup-mpc30x.c4
-rw-r--r--arch/mips/pci/fixup-sb1250.c6
-rw-r--r--arch/mips/pci/ops-tx4927.c2
-rw-r--r--arch/mips/pci/pci-ip27.c2
-rw-r--r--arch/mips/pci/pci-lantiq.c4
-rw-r--r--arch/mips/pci/pci-xlr.c61
-rw-r--r--arch/mips/pci/pci.c6
-rw-r--r--arch/mips/pmc-sierra/yosemite/ht.c11
-rw-r--r--arch/mips/pmc-sierra/yosemite/smp.c2
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c2
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c2
-rw-r--r--arch/mips/powertv/asic/asic-gaia.c2
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c2
-rw-r--r--arch/mips/powertv/powertv_setup.c6
-rw-r--r--arch/mips/txx9/generic/pci.c8
-rw-r--r--arch/mn10300/include/asm/ptrace.h3
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/include/asm/timex.h11
-rw-r--r--arch/mn10300/kernel/cevt-mn10300.c10
-rw-r--r--arch/mn10300/kernel/internal.h2
-rw-r--r--arch/mn10300/kernel/irq.c4
-rw-r--r--arch/mn10300/kernel/signal.c5
-rw-r--r--arch/mn10300/kernel/smp.c2
-rw-r--r--arch/mn10300/kernel/traps.c1
-rw-r--r--arch/mn10300/mm/dma-alloc.c1
-rw-r--r--arch/mn10300/unit-asb2303/include/unit/timex.h4
-rw-r--r--arch/mn10300/unit-asb2303/smc91111.c1
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h4
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c1
-rw-r--r--arch/mn10300/unit-asb2364/include/unit/timex.h4
-rw-r--r--arch/parisc/include/asm/compat_rt_sigframe.h6
-rw-r--r--arch/parisc/kernel/pci.c5
-rw-r--r--arch/parisc/kernel/smp.c2
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/boot/Makefile57
-rw-r--r--arch/powerpc/boot/dts/bsc9131rdb.dts34
-rw-r--r--arch/powerpc/boot/dts/bsc9131rdb.dtsi142
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi193
-rw-r--r--arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi (renamed from arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi)84
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/p3060si-post.dtsi302
-rw-r--r--arch/powerpc/boot/dts/mgcoge.dts23
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi8
-rw-r--r--arch/powerpc/boot/dts/mpc8544ds.dtsi9
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts11
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi12
-rw-r--r--arch/powerpc/boot/dts/p1021rdb-pc.dtsi (renamed from arch/powerpc/boot/dts/p1021rdb.dtsi)2
-rw-r--r--arch/powerpc/boot/dts/p1021rdb-pc_32b.dts (renamed from arch/powerpc/boot/dts/p1021rdb.dts)4
-rw-r--r--arch/powerpc/boot/dts/p1021rdb-pc_36b.dts (renamed from arch/powerpc/boot/dts/p1021rdb_36b.dts)4
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi20
-rw-r--r--arch/powerpc/boot/dts/p1024rdb.dtsi228
-rw-r--r--arch/powerpc/boot/dts/p1024rdb_32b.dts87
-rw-r--r--arch/powerpc/boot/dts/p1024rdb_36b.dts87
-rw-r--r--arch/powerpc/boot/dts/p1025rdb.dtsi40
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dtsi10
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts2
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts41
-rw-r--r--arch/powerpc/boot/dts/p3060qds.dts242
-rw-r--r--arch/powerpc/boot/dts/sbc8560.dts406
-rw-r--r--arch/powerpc/boot/flatdevtree_env.h27
-rw-r--r--arch/powerpc/configs/83xx/kmeter1_defconfig22
-rw-r--r--arch/powerpc/configs/85xx/sbc8560_defconfig65
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig10
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig66
-rw-r--r--arch/powerpc/configs/mgcoge_defconfig12
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig24
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig25
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/include/asm/code-patching.h4
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h13
-rw-r--r--arch/powerpc/include/asm/immap_qe.h4
-rw-r--r--arch/powerpc/include/asm/io.h8
-rw-r--r--arch/powerpc/include/asm/iommu.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_host.h6
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h7
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/perf_event.h5
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h118
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h121
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/qe.h1
-rw-r--r--arch/powerpc/include/asm/reg.h8
-rw-r--r--arch/powerpc/include/asm/thread_info.h6
-rw-r--r--arch/powerpc/include/asm/trace.h45
-rw-r--r--arch/powerpc/include/asm/vdso.h2
-rw-r--r--arch/powerpc/include/asm/vio.h2
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S6
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/entry_32.S30
-rw-r--r--arch/powerpc/kernel/entry_64.S134
-rw-r--r--arch/powerpc/kernel/epapr_hcalls.S25
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c52
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S10
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S3
-rw-r--r--arch/powerpc/kernel/fpu.S16
-rw-r--r--arch/powerpc/kernel/ftrace.c81
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S25
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c2
-rw-r--r--arch/powerpc/kernel/idle_6xx.S4
-rw-r--r--arch/powerpc/kernel/idle_book3e.S2
-rw-r--r--arch/powerpc/kernel/idle_e500.S4
-rw-r--r--arch/powerpc/kernel/idle_power4.S2
-rw-r--r--arch/powerpc/kernel/iommu.c291
-rw-r--r--arch/powerpc/kernel/irq.c50
-rw-r--r--arch/powerpc/kernel/kvm.c30
-rw-r--r--arch/powerpc/kernel/kvm_emul.S12
-rw-r--r--arch/powerpc/kernel/misc_32.S4
-rw-r--r--arch/powerpc/kernel/misc_64.S9
-rw-r--r--arch/powerpc/kernel/pci-common.c28
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c3
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c27
-rw-r--r--arch/powerpc/kernel/setup_32.c24
-rw-r--r--arch/powerpc/kernel/smp.c5
-rw-r--r--arch/powerpc/kernel/vdso.c28
-rw-r--r--arch/powerpc/kernel/vdso32/Makefile4
-rw-r--r--arch/powerpc/kernel/vdso32/getcpu.S45
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/Makefile2
-rw-r--r--arch/powerpc/kernel/vdso64/getcpu.S45
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S1
-rw-r--r--arch/powerpc/kernel/vio.c47
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c123
-rw-r--r--arch/powerpc/kvm/book3s_hv.c40
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S229
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S80
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/booke.c26
-rw-r--r--arch/powerpc/kvm/booke_emulate.c28
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S328
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S231
-rw-r--r--arch/powerpc/kvm/e500_emulate.c3
-rw-r--r--arch/powerpc/kvm/e500mc.c8
-rw-r--r--arch/powerpc/kvm/emulate.c16
-rw-r--r--arch/powerpc/kvm/powerpc.c18
-rw-r--r--arch/powerpc/lib/Makefile5
-rw-r--r--arch/powerpc/lib/checksum_64.S27
-rw-r--r--arch/powerpc/lib/code-patching.c14
-rw-r--r--arch/powerpc/lib/copypage_64.S4
-rw-r--r--arch/powerpc/lib/copypage_power7.S165
-rw-r--r--arch/powerpc/lib/copyuser_power7.S157
-rw-r--r--arch/powerpc/lib/crtsavres.S5
-rw-r--r--arch/powerpc/lib/hweight_64.S14
-rw-r--r--arch/powerpc/lib/ldstfp.S12
-rw-r--r--arch/powerpc/lib/memcpy_64.S4
-rw-r--r--arch/powerpc/lib/memcpy_power7.S647
-rw-r--r--arch/powerpc/lib/string.S2
-rw-r--r--arch/powerpc/lib/string_64.S202
-rw-r--r--arch/powerpc/lib/vmx-helper.c (renamed from arch/powerpc/lib/copyuser_power7_vmx.c)27
-rw-r--r--arch/powerpc/mm/hash_low_32.S8
-rw-r--r--arch/powerpc/mm/hash_low_64.S156
-rw-r--r--arch/powerpc/mm/numa.c6
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S10
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S16
-rw-r--r--arch/powerpc/net/bpf_jit.h106
-rw-r--r--arch/powerpc/net/bpf_jit_64.S2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c4
-rw-r--r--arch/powerpc/perf/callchain.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c99
-rw-r--r--arch/powerpc/platforms/44x/currituck.c2
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c5
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c100
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig43
-rw-r--r--arch/powerpc/platforms/85xx/Makefile4
-rw-r--r--arch/powerpc/platforms/85xx/bsc913x_rdb.c67
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c97
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c22
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c116
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c77
-rw-r--r--arch/powerpc/platforms/85xx/qemu_e500.c72
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c254
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c2
-rw-r--r--arch/powerpc/platforms/86xx/mpc86xx_hpcn.c2
-rw-r--r--arch/powerpc/platforms/Kconfig9
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S28
-rw-r--r--arch/powerpc/platforms/cell/iommu.c4
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c48
-rw-r--r--arch/powerpc/platforms/powernv/opal-takeover.S10
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c6
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c4
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S78
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c119
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c8
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c53
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c16
-rw-r--r--arch/powerpc/platforms/pseries/smp.c1
-rw-r--r--arch/powerpc/sysdev/6xx-suspend.S2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c73
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h8
-rw-r--r--arch/powerpc/sysdev/mpic.c4
-rw-r--r--arch/powerpc/sysdev/mv64x60_pci.c2
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c3
-rw-r--r--arch/powerpc/xmon/xmon.c2
-rw-r--r--arch/s390/appldata/appldata.h2
-rw-r--r--arch/s390/appldata/appldata_base.c132
-rw-r--r--arch/s390/appldata/appldata_mem.c4
-rw-r--r--arch/s390/appldata/appldata_net_sum.c4
-rw-r--r--arch/s390/appldata/appldata_os.c4
-rw-r--r--arch/s390/crypto/aes_s390.c2
-rw-r--r--arch/s390/crypto/crypt_s390.h2
-rw-r--r--arch/s390/crypto/des_s390.c2
-rw-r--r--arch/s390/crypto/prng.c2
-rw-r--r--arch/s390/crypto/sha1_s390.c2
-rw-r--r--arch/s390/crypto/sha256_s390.c2
-rw-r--r--arch/s390/hypfs/hypfs.h3
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c1
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/airq.h4
-rw-r--r--arch/s390/include/asm/appldata.h4
-rw-r--r--arch/s390/include/asm/atomic.h8
-rw-r--r--arch/s390/include/asm/bitops.h10
-rw-r--r--arch/s390/include/asm/bugs.h4
-rw-r--r--arch/s390/include/asm/cache.h4
-rw-r--r--arch/s390/include/asm/ccwdev.h2
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/include/asm/checksum.h10
-rw-r--r--arch/s390/include/asm/chpid.h2
-rw-r--r--arch/s390/include/asm/chsc.h2
-rw-r--r--arch/s390/include/asm/cio.h3
-rw-r--r--arch/s390/include/asm/cpcmd.h4
-rw-r--r--arch/s390/include/asm/cpu.h2
-rw-r--r--arch/s390/include/asm/cputime.h12
-rw-r--r--arch/s390/include/asm/crw.h2
-rw-r--r--arch/s390/include/asm/current.h4
-rw-r--r--arch/s390/include/asm/dasd.h3
-rw-r--r--arch/s390/include/asm/debug.h4
-rw-r--r--arch/s390/include/asm/delay.h4
-rw-r--r--arch/s390/include/asm/dma.h2
-rw-r--r--arch/s390/include/asm/ebcdic.h3
-rw-r--r--arch/s390/include/asm/elf.h2
-rw-r--r--arch/s390/include/asm/errno.h2
-rw-r--r--arch/s390/include/asm/etr.h2
-rw-r--r--arch/s390/include/asm/extmem.h4
-rw-r--r--arch/s390/include/asm/hardirq.h4
-rw-r--r--arch/s390/include/asm/idals.h5
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/kexec.h4
-rw-r--r--arch/s390/include/asm/kprobes.h2
-rw-r--r--arch/s390/include/asm/kvm.h2
-rw-r--r--arch/s390/include/asm/kvm_host.h4
-rw-r--r--arch/s390/include/asm/kvm_para.h2
-rw-r--r--arch/s390/include/asm/kvm_virtio.h2
-rw-r--r--arch/s390/include/asm/lowcore.h9
-rw-r--r--arch/s390/include/asm/mathemu.h3
-rw-r--r--arch/s390/include/asm/mman.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/monwriter.h4
-rw-r--r--arch/s390/include/asm/nmi.h2
-rw-r--r--arch/s390/include/asm/page.h4
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/include/asm/posix_types.h2
-rw-r--r--arch/s390/include/asm/processor.h14
-rw-r--r--arch/s390/include/asm/ptrace.h4
-rw-r--r--arch/s390/include/asm/qdio.h4
-rw-r--r--arch/s390/include/asm/qeth.h4
-rw-r--r--arch/s390/include/asm/reset.h2
-rw-r--r--arch/s390/include/asm/resource.h2
-rw-r--r--arch/s390/include/asm/rwsem.h4
-rw-r--r--arch/s390/include/asm/sclp.h4
-rw-r--r--arch/s390/include/asm/scsw.h2
-rw-r--r--arch/s390/include/asm/setup.h4
-rw-r--r--arch/s390/include/asm/shmparam.h2
-rw-r--r--arch/s390/include/asm/sigcontext.h4
-rw-r--r--arch/s390/include/asm/siginfo.h2
-rw-r--r--arch/s390/include/asm/signal.h2
-rw-r--r--arch/s390/include/asm/sigp.h32
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/include/asm/socket.h2
-rw-r--r--arch/s390/include/asm/spinlock.h4
-rw-r--r--arch/s390/include/asm/stat.h2
-rw-r--r--arch/s390/include/asm/statfs.h2
-rw-r--r--arch/s390/include/asm/string.h4
-rw-r--r--arch/s390/include/asm/swab.h4
-rw-r--r--arch/s390/include/asm/sysinfo.h2
-rw-r--r--arch/s390/include/asm/tape390.h3
-rw-r--r--arch/s390/include/asm/termios.h2
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/asm/timer.h51
-rw-r--r--arch/s390/include/asm/timex.h4
-rw-r--r--arch/s390/include/asm/types.h2
-rw-r--r--arch/s390/include/asm/uaccess.h6
-rw-r--r--arch/s390/include/asm/ucontext.h2
-rw-r--r--arch/s390/include/asm/unistd.h2
-rw-r--r--arch/s390/include/asm/user.h2
-rw-r--r--arch/s390/include/asm/vtimer.h33
-rw-r--r--arch/s390/include/asm/vtoc.h4
-rw-r--r--arch/s390/include/asm/zcrypt.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c12
-rw-r--r--arch/s390/kernel/base.S5
-rw-r--r--arch/s390/kernel/bitmap.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c2
-rw-r--r--arch/s390/kernel/compat_linux.c4
-rw-r--r--arch/s390/kernel/compat_signal.c4
-rw-r--r--arch/s390/kernel/compat_wrapper.S3
-rw-r--r--arch/s390/kernel/cpcmd.c4
-rw-r--r--arch/s390/kernel/crash.c4
-rw-r--r--arch/s390/kernel/debug.c1
-rw-r--r--arch/s390/kernel/dis.c3
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/ebcdic.c3
-rw-r--r--arch/s390/kernel/entry.S49
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/entry64.S52
-rw-r--r--arch/s390/kernel/head.S2
-rw-r--r--arch/s390/kernel/head31.S4
-rw-r--r--arch/s390/kernel/head64.S4
-rw-r--r--arch/s390/kernel/head_kdump.S6
-rw-r--r--arch/s390/kernel/ipl.c16
-rw-r--r--arch/s390/kernel/irq.c2
-rw-r--r--arch/s390/kernel/kprobes.c2
-rw-r--r--arch/s390/kernel/lgr.c15
-rw-r--r--arch/s390/kernel/machine_kexec.c4
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/mcount64.S2
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/os_info.c2
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/processor.c6
-rw-r--r--arch/s390/kernel/ptrace.c2
-rw-r--r--arch/s390/kernel/reipl.S7
-rw-r--r--arch/s390/kernel/reipl64.S5
-rw-r--r--arch/s390/kernel/relocate_kernel.S7
-rw-r--r--arch/s390/kernel/relocate_kernel64.S9
-rw-r--r--arch/s390/kernel/sclp.S2
-rw-r--r--arch/s390/kernel/setup.c29
-rw-r--r--arch/s390/kernel/signal.c4
-rw-r--r--arch/s390/kernel/smp.c116
-rw-r--r--arch/s390/kernel/stacktrace.c4
-rw-r--r--arch/s390/kernel/swsusp_asm64.S13
-rw-r--r--arch/s390/kernel/sys_s390.c4
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kernel/vtime.c370
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/kvm/gaccess.h4
-rw-r--r--arch/s390/kvm/intercept.c4
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c5
-rw-r--r--arch/s390/kvm/kvm-s390.h4
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c121
-rw-r--r--arch/s390/lib/delay.c4
-rw-r--r--arch/s390/lib/div64.c4
-rw-r--r--arch/s390/lib/spinlock.c3
-rw-r--r--arch/s390/lib/string.c3
-rw-r--r--arch/s390/lib/uaccess.h2
-rw-r--r--arch/s390/lib/uaccess_mvcos.c4
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/lib/uaccess_std.c4
-rw-r--r--arch/s390/math-emu/math.c4
-rw-r--r--arch/s390/mm/cmm.c2
-rw-r--r--arch/s390/mm/extmem.c3
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/mm/hugetlbpage.c2
-rw-r--r--arch/s390/mm/init.c4
-rw-r--r--arch/s390/mm/mmap.c2
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/oprofile/backtrace.c6
-rw-r--r--arch/s390/oprofile/hwsampler.c4
-rw-r--r--arch/s390/oprofile/init.c6
-rw-r--r--arch/s390/oprofile/op_counter.h6
-rw-r--r--arch/sh/Kconfig1
-rw-r--r--arch/sh/boards/Kconfig5
-rw-r--r--arch/sh/boards/board-polaris.c2
-rw-r--r--arch/sh/boards/mach-dreamcast/irq.c32
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c129
-rw-r--r--arch/sh/boards/mach-se/7343/setup.c10
-rw-r--r--arch/sh/boards/mach-se/7722/irq.c131
-rw-r--r--arch/sh/boards/mach-se/7722/setup.c6
-rw-r--r--arch/sh/boards/mach-se/7724/irq.c36
-rw-r--r--arch/sh/boards/mach-x3proto/gpio.c57
-rw-r--r--arch/sh/cchips/hd6446x/hd64461.c33
-rw-r--r--arch/sh/drivers/pci/fixups-dreamcast.c2
-rw-r--r--arch/sh/drivers/pci/fixups-sdk7786.c4
-rw-r--r--arch/sh/drivers/pci/pci.c7
-rw-r--r--arch/sh/include/asm/bug.h4
-rw-r--r--arch/sh/include/asm/io_noioport.h17
-rw-r--r--arch/sh/include/asm/kdebug.h2
-rw-r--r--arch/sh/include/asm/siu.h1
-rw-r--r--arch/sh/include/mach-se/mach/se7343.h7
-rw-r--r--arch/sh/include/mach-se/mach/se7722.h10
-rw-r--r--arch/sh/kernel/cpu/sh3/serial-sh7720.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c1
-rw-r--r--arch/sh/kernel/cpu/sh5/unwind.c63
-rw-r--r--arch/sh/kernel/dumpstack.c58
-rw-r--r--arch/sh/kernel/irq.c10
-rw-r--r--arch/sh/kernel/traps.c71
-rw-r--r--arch/sh/kernel/traps_32.c121
-rw-r--r--arch/sh/kernel/traps_64.c589
-rw-r--r--arch/sh/lib64/Makefile2
-rw-r--r--arch/sh/lib64/dbg.c248
-rw-r--r--arch/sh/mm/tlb-sh5.c2
-rw-r--r--arch/sparc/kernel/leon_pci.c8
-rw-r--r--arch/sparc/kernel/of_device_64.c2
-rw-r--r--arch/sparc/kernel/pci.c102
-rw-r--r--arch/sparc/kernel/pci_impl.h1
-rw-r--r--arch/sparc/kernel/pcic.c13
-rw-r--r--arch/sparc/kernel/smp_64.c7
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/Kconfig42
-rw-r--r--arch/tile/Makefile2
-rw-r--r--arch/tile/gxio/Kconfig28
-rw-r--r--arch/tile/gxio/Makefile9
-rw-r--r--arch/tile/gxio/dma_queue.c176
-rw-r--r--arch/tile/gxio/iorpc_globals.c89
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c529
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c85
-rw-r--r--arch/tile/gxio/iorpc_trio.c327
-rw-r--r--arch/tile/gxio/iorpc_usb_host.c99
-rw-r--r--arch/tile/gxio/kiorpc.c61
-rw-r--r--arch/tile/gxio/mpipe.c545
-rw-r--r--arch/tile/gxio/trio.c49
-rw-r--r--arch/tile/gxio/usb_host.c91
-rw-r--r--arch/tile/include/arch/mpipe.h359
-rw-r--r--arch/tile/include/arch/mpipe_constants.h42
-rw-r--r--arch/tile/include/arch/mpipe_def.h39
-rw-r--r--arch/tile/include/arch/mpipe_shm.h509
-rw-r--r--arch/tile/include/arch/mpipe_shm_def.h23
-rw-r--r--arch/tile/include/arch/trio.h72
-rw-r--r--arch/tile/include/arch/trio_constants.h36
-rw-r--r--arch/tile/include/arch/trio_def.h41
-rw-r--r--arch/tile/include/arch/trio_pcie_intfc.h229
-rw-r--r--arch/tile/include/arch/trio_pcie_intfc_def.h32
-rw-r--r--arch/tile/include/arch/trio_pcie_rc.h156
-rw-r--r--arch/tile/include/arch/trio_pcie_rc_def.h24
-rw-r--r--arch/tile/include/arch/trio_shm.h125
-rw-r--r--arch/tile/include/arch/trio_shm_def.h19
-rw-r--r--arch/tile/include/arch/usb_host.h26
-rw-r--r--arch/tile/include/arch/usb_host_def.h19
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/cache.h12
-rw-r--r--arch/tile/include/asm/checksum.h18
-rw-r--r--arch/tile/include/asm/device.h33
-rw-r--r--arch/tile/include/asm/dma-mapping.h146
-rw-r--r--arch/tile/include/asm/fixmap.h14
-rw-r--r--arch/tile/include/asm/homecache.h19
-rw-r--r--arch/tile/include/asm/io.h144
-rw-r--r--arch/tile/include/asm/memprof.h33
-rw-r--r--arch/tile/include/asm/page.h7
-rw-r--r--arch/tile/include/asm/pci.h151
-rw-r--r--arch/tile/include/gxio/common.h40
-rw-r--r--arch/tile/include/gxio/dma_queue.h161
-rw-r--r--arch/tile/include/gxio/iorpc_globals.h38
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h136
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h46
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h97
-rw-r--r--arch/tile/include/gxio/iorpc_usb_host.h46
-rw-r--r--arch/tile/include/gxio/kiorpc.h29
-rw-r--r--arch/tile/include/gxio/mpipe.h1736
-rw-r--r--arch/tile/include/gxio/trio.h298
-rw-r--r--arch/tile/include/gxio/usb_host.h87
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h602
-rw-r--r--arch/tile/include/hv/drv_trio_intf.h195
-rw-r--r--arch/tile/include/hv/drv_usb_host_intf.h39
-rw-r--r--arch/tile/include/hv/iorpc.h714
-rw-r--r--arch/tile/kernel/Makefile5
-rw-r--r--arch/tile/kernel/backtrace.c9
-rw-r--r--arch/tile/kernel/pci-dma.c536
-rw-r--r--arch/tile/kernel/pci.c19
-rw-r--r--arch/tile/kernel/pci_gx.c1543
-rw-r--r--arch/tile/kernel/setup.c45
-rw-r--r--arch/tile/kernel/smpboot.c10
-rw-r--r--arch/tile/kernel/usb.c69
-rw-r--r--arch/tile/lib/checksum.c15
-rw-r--r--arch/tile/mm/homecache.c156
-rw-r--r--arch/tile/mm/init.c70
-rw-r--r--arch/tile/mm/pgtable.c7
-rw-r--r--arch/um/drivers/mconsole_kern.c1
-rw-r--r--arch/um/drivers/net_kern.c2
-rw-r--r--arch/unicore32/Kconfig1
-rw-r--r--arch/unicore32/kernel/pci.c2
-rw-r--r--arch/x86/Makefile3
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/alternative.h74
-rw-r--r--arch/x86/include/asm/amd_nb.h21
-rw-r--r--arch/x86/include/asm/apic.h64
-rw-r--r--arch/x86/include/asm/bitops.h7
-rw-r--r--arch/x86/include/asm/cpufeature.h2
-rw-r--r--arch/x86/include/asm/emergency-restart.h2
-rw-r--r--arch/x86/include/asm/floppy.h2
-rw-r--r--arch/x86/include/asm/hypervisor.h1
-rw-r--r--arch/x86/include/asm/iommu.h1
-rw-r--r--arch/x86/include/asm/kvm.h1
-rw-r--r--arch/x86/include/asm/kvm_emulate.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h35
-rw-r--r--arch/x86/include/asm/kvm_para.h7
-rw-r--r--arch/x86/include/asm/msr.h46
-rw-r--r--arch/x86/include/asm/nmi.h20
-rw-r--r--arch/x86/include/asm/paravirt.h41
-rw-r--r--arch/x86/include/asm/paravirt_types.h2
-rw-r--r--arch/x86/include/asm/pci_x86.h15
-rw-r--r--arch/x86/include/asm/perf_event.h22
-rw-r--r--arch/x86/include/asm/pgtable-2level.h4
-rw-r--r--arch/x86/include/asm/pgtable-3level.h6
-rw-r--r--arch/x86/include/asm/pgtable_64.h8
-rw-r--r--arch/x86/include/asm/processor-flags.h2
-rw-r--r--arch/x86/include/asm/realmode.h3
-rw-r--r--arch/x86/include/asm/reboot.h4
-rw-r--r--arch/x86/include/asm/smp.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h11
-rw-r--r--arch/x86/include/asm/uprobes.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h28
-rw-r--r--arch/x86/include/asm/vmx.h6
-rw-r--r--arch/x86/include/asm/x2apic.h18
-rw-r--r--arch/x86/include/asm/x86_init.h4
-rw-r--r--arch/x86/include/asm/xen/hypercall.h8
-rw-r--r--arch/x86/kernel/acpi/boot.c27
-rw-r--r--arch/x86/kernel/alternative.c19
-rw-r--r--arch/x86/kernel/amd_nb.c11
-rw-r--r--arch/x86/kernel/apic/apic.c36
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c76
-rw-r--r--arch/x86/kernel/apic/apic_noop.c9
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c50
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c48
-rw-r--r--arch/x86/kernel/apic/es7000_32.c51
-rw-r--r--arch/x86/kernel/apic/io_apic.c350
-rw-r--r--arch/x86/kernel/apic/numaq_32.c30
-rw-r--r--arch/x86/kernel/apic/probe_32.c23
-rw-r--r--arch/x86/kernel/apic/probe_64.c11
-rw-r--r--arch/x86/kernel/apic/summit_32.c68
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c82
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c39
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c45
-rw-r--r--arch/x86/kernel/apm_32.c29
-rw-r--r--arch/x86/kernel/cpu/Makefile4
-rw-r--r--arch/x86/kernel/cpu/amd.c39
-rw-r--r--arch/x86/kernel/cpu/bugs.c20
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c32
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c286
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.pl25
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.c111
-rw-r--r--arch/x86/kernel/cpu/perf_event.h26
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c103
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c134
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c1850
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h424
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c2
-rw-r--r--arch/x86/kernel/dumpstack.c5
-rw-r--r--arch/x86/kernel/dumpstack_32.c25
-rw-r--r--arch/x86/kernel/dumpstack_64.c21
-rw-r--r--arch/x86/kernel/entry_64.S20
-rw-r--r--arch/x86/kernel/irq.c4
-rw-r--r--arch/x86/kernel/kgdb.c8
-rw-r--r--arch/x86/kernel/kvm.c64
-rw-r--r--arch/x86/kernel/microcode_core.c66
-rw-r--r--arch/x86/kernel/module.c34
-rw-r--r--arch/x86/kernel/nmi.c47
-rw-r--r--arch/x86/kernel/nmi_selftest.c7
-rw-r--r--arch/x86/kernel/paravirt.c2
-rw-r--r--arch/x86/kernel/pci-calgary_64.c34
-rw-r--r--arch/x86/kernel/pci-dma.c11
-rw-r--r--arch/x86/kernel/process.c34
-rw-r--r--arch/x86/kernel/process_64.c12
-rw-r--r--arch/x86/kernel/quirks.c2
-rw-r--r--arch/x86/kernel/reboot.c82
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c5
-rw-r--r--arch/x86/kernel/smpboot.c106
-rw-r--r--arch/x86/kernel/traps.c19
-rw-r--r--arch/x86/kernel/tsc.c50
-rw-r--r--arch/x86/kernel/uprobes.c3
-rw-r--r--arch/x86/kernel/vm86_32.c6
-rw-r--r--arch/x86/kernel/vsmp_64.c44
-rw-r--r--arch/x86/kernel/vsyscall_64.c56
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c1
-rw-r--r--arch/x86/kernel/x86_init.c2
-rw-r--r--arch/x86/kernel/xsave.c12
-rw-r--r--arch/x86/kvm/cpuid.c46
-rw-r--r--arch/x86/kvm/cpuid.h9
-rw-r--r--arch/x86/kvm/emulate.c273
-rw-r--r--arch/x86/kvm/i8259.c17
-rw-r--r--arch/x86/kvm/lapic.c194
-rw-r--r--arch/x86/kvm/lapic.h11
-rw-r--r--arch/x86/kvm/mmu.c362
-rw-r--r--arch/x86/kvm/mmutrace.h45
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
-rw-r--r--arch/x86/kvm/pmu.c22
-rw-r--r--arch/x86/kvm/svm.c12
-rw-r--r--arch/x86/kvm/trace.h46
-rw-r--r--arch/x86/kvm/vmx.c189
-rw-r--r--arch/x86/kvm/x86.c123
-rw-r--r--arch/x86/lib/csum-wrappers_64.c2
-rw-r--r--arch/x86/lib/msr-reg-export.c4
-rw-r--r--arch/x86/lib/msr-reg.S10
-rw-r--r--arch/x86/mm/init.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c4
-rw-r--r--arch/x86/pci/acpi.c109
-rw-r--r--arch/x86/pci/amd_bus.c7
-rw-r--r--arch/x86/pci/bus_numa.c22
-rw-r--r--arch/x86/pci/bus_numa.h3
-rw-r--r--arch/x86/pci/common.c2
-rw-r--r--arch/x86/pci/mmconfig-shared.c372
-rw-r--r--arch/x86/pci/mmconfig_32.c30
-rw-r--r--arch/x86/pci/mmconfig_64.c52
-rw-r--r--arch/x86/pci/mrst.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c6
-rw-r--r--arch/x86/platform/uv/tlb_uv.c453
-rw-r--r--arch/x86/platform/uv/uv_irq.c9
-rw-r--r--arch/x86/realmode/rm/Makefile2
-rw-r--r--arch/x86/realmode/rm/header.S4
-rw-r--r--arch/x86/realmode/rm/reboot.S (renamed from arch/x86/realmode/rm/reboot_32.S)30
-rw-r--r--arch/x86/vdso/vdso32-setup.c6
-rw-r--r--arch/x86/xen/enlighten.c226
-rw-r--r--arch/x86/xen/mmu.c39
-rw-r--r--arch/x86/xen/setup.c23
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/suspend.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/kernel/pci.c8
-rw-r--r--arch/xtensa/kernel/process.c2
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c25
-rw-r--r--block/blk-exec.c11
-rw-r--r--block/blk-timeout.c41
-rw-r--r--block/cfq-iosched.c30
-rw-r--r--block/scsi_ioctl.c5
-rw-r--r--crypto/crypto_user.c7
-rw-r--r--drivers/acpi/ac.c17
-rw-r--r--drivers/acpi/acpi_pad.c7
-rw-r--r--drivers/acpi/acpica/hwsleep.c22
-rw-r--r--drivers/acpi/acpica/nspredef.c2
-rw-r--r--drivers/acpi/apei/apei-base.c17
-rw-r--r--drivers/acpi/apei/apei-internal.h9
-rw-r--r--drivers/acpi/apei/ghes.c6
-rw-r--r--drivers/acpi/battery.c15
-rw-r--r--drivers/acpi/button.c9
-rw-r--r--drivers/acpi/fan.c21
-rw-r--r--drivers/acpi/pci_root.c2
-rw-r--r--drivers/acpi/power.c12
-rw-r--r--drivers/acpi/processor_core.c6
-rw-r--r--drivers/acpi/processor_driver.c13
-rw-r--r--drivers/acpi/processor_idle.c21
-rw-r--r--drivers/acpi/sbs.c10
-rw-r--r--drivers/acpi/scan.c22
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c17
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/amba/tegra-ahb.c6
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/ata/libata-eh.c57
-rw-r--r--drivers/base/devtmpfs.c98
-rw-r--r--drivers/base/power/domain.c342
-rw-r--r--drivers/base/power/main.c32
-rw-r--r--drivers/base/power/qos.c2
-rw-r--r--drivers/base/power/sysfs.c4
-rw-r--r--drivers/base/regmap/internal.h17
-rw-r--r--drivers/base/regmap/regmap-irq.c57
-rw-r--r--drivers/base/regmap/regmap-mmio.c30
-rw-r--r--drivers/base/regmap/regmap.c344
-rw-r--r--drivers/bcma/Kconfig19
-rw-r--r--drivers/bcma/Makefile3
-rw-r--r--drivers/bcma/bcma_private.h31
-rw-r--r--drivers/bcma/core.c10
-rw-r--r--drivers/bcma/driver_chipcommon.c5
-rw-r--r--drivers/bcma/driver_chipcommon_nflash.c19
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c369
-rw-r--r--drivers/bcma/driver_chipcommon_sflash.c19
-rw-r--r--drivers/bcma/driver_gmac_cmn.c14
-rw-r--r--drivers/bcma/driver_mips.c33
-rw-r--r--drivers/bcma/driver_pci_host.c18
-rw-r--r--drivers/bcma/host_pci.c5
-rw-r--r--drivers/bcma/main.c44
-rw-r--r--drivers/bcma/scan.c48
-rw-r--r--drivers/bcma/scan.h2
-rw-r--r--drivers/bcma/sprom.c26
-rw-r--r--drivers/block/drbd/drbd_bitmap.c11
-rw-r--r--drivers/block/drbd/drbd_req.c66
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/mg_disk.c13
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c166
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h5
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/umem.c40
-rw-r--r--drivers/block/xen-blkback/common.h2
-rw-r--r--drivers/block/xen-blkfront.c58
-rw-r--r--drivers/bluetooth/Kconfig12
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/bluecard_cs.c16
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c6
-rw-r--r--drivers/bluetooth/btmrvl_main.c8
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btuart_cs.c6
-rw-r--r--drivers/bluetooth/btusb.c14
-rw-r--r--drivers/bluetooth/dtl1_cs.c22
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_h5.c747
-rw-r--r--drivers/bluetooth/hci_ldisc.c68
-rw-r--r--drivers/bluetooth/hci_ll.c6
-rw-r--r--drivers/bluetooth/hci_uart.h10
-rw-r--r--drivers/char/hw_random/omap-rng.c13
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c13
-rw-r--r--drivers/char/mem.c11
-rw-r--r--drivers/char/sonypi.c13
-rw-r--r--drivers/char/tpm/tpm.c29
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_atmel.c12
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_nsc.c13
-rw-r--r--drivers/char/tpm/tpm_tis.c18
-rw-r--r--drivers/clk/Kconfig7
-rw-r--r--drivers/clk/Makefile10
-rw-r--r--drivers/clk/clk-divider.c189
-rw-r--r--drivers/clk/clk-fixed-factor.c2
-rw-r--r--drivers/clk/clk-fixed-rate.c25
-rw-r--r--drivers/clk/clk-gate.c2
-rw-r--r--drivers/clk/clk-highbank.c346
-rw-r--r--drivers/clk/clk-mux.c2
-rw-r--r--drivers/clk/clk-nomadik.c47
-rw-r--r--drivers/clk/clk-u300.c746
-rw-r--r--drivers/clk/clk-wm831x.c428
-rw-r--r--drivers/clk/clk.c172
-rw-r--r--drivers/clk/clkdev.c77
-rw-r--r--drivers/clk/mxs/clk-imx23.c3
-rw-r--r--drivers/clk/mxs/clk-imx28.c13
-rw-r--r--drivers/clk/socfpga/Makefile1
-rw-r--r--drivers/clk/socfpga/clk.c51
-rw-r--r--drivers/clk/spear/spear1310_clock.c312
-rw-r--r--drivers/clk/spear/spear1340_clock.c279
-rw-r--r--drivers/clk/spear/spear3xx_clock.c180
-rw-r--r--drivers/clk/spear/spear6xx_clock.c122
-rw-r--r--drivers/clk/versatile/Makefile3
-rw-r--r--drivers/clk/versatile/clk-icst.c100
-rw-r--r--drivers/clk/versatile/clk-icst.h10
-rw-r--r--drivers/clk/versatile/clk-integrator.c111
-rw-r--r--drivers/clocksource/Kconfig6
-rw-r--r--drivers/clocksource/Makefile4
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c (renamed from arch/arm/mach-picoxcell/time.c)52
-rw-r--r--drivers/clocksource/time-armada-370-xp.c226
-rw-r--r--drivers/connector/cn_proc.c36
-rw-r--r--drivers/connector/cn_queue.c12
-rw-r--r--drivers/connector/connector.c30
-rw-r--r--drivers/cpufreq/cpufreq.c35
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c14
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-ich.c2
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/driver.c29
-rw-r--r--drivers/cpuidle/governors/menu.c6
-rw-r--r--drivers/cpuidle/sysfs.c21
-rw-r--r--drivers/crypto/tegra-aes.c12
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c39
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c33
-rw-r--r--drivers/dma/Kconfig26
-rw-r--r--drivers/dma/Makefile4
-rw-r--r--drivers/dma/at_hdmac.c11
-rw-r--r--drivers/dma/coh901318.c72
-rw-r--r--drivers/dma/dmaengine.c20
-rw-r--r--drivers/dma/dw_dmac.c182
-rw-r--r--drivers/dma/dw_dmac_regs.h8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c8
-rw-r--r--drivers/dma/ipu/ipu_irq.c14
-rw-r--r--drivers/dma/mmp_tdma.c610
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/sh/Makefile2
-rw-r--r--drivers/dma/sh/shdma-base.c934
-rw-r--r--drivers/dma/sh/shdma.c943
-rw-r--r--drivers/dma/sh/shdma.h (renamed from drivers/dma/shdma.h)46
-rw-r--r--drivers/dma/shdma.c1524
-rw-r--r--drivers/dma/tegra20-apb-dma.c1415
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/devres.c1
-rw-r--r--drivers/gpio/gpio-mxc.c66
-rw-r--r--drivers/gpio/gpio-omap.c14
-rw-r--r--drivers/gpio/gpio-sta2x11.c5
-rw-r--r--drivers/gpio/gpio-tps65910.c3
-rw-r--r--drivers/gpio/gpio-wm8994.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c27
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c35
-rw-r--r--drivers/gpu/drm/gma500/opregion.c8
-rw-r--r--drivers/gpu/drm/gma500/opregion.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c37
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c4
-rw-r--r--drivers/hid/Kconfig41
-rw-r--r--drivers/hid/Makefile6
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-chicony.c1
-rw-r--r--drivers/hid/hid-core.c28
-rw-r--r--drivers/hid/hid-cypress.c2
-rw-r--r--drivers/hid/hid-holtek-kbd.c183
-rw-r--r--drivers/hid/hid-ids.h24
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/hid-lenovo-tpkbd.c564
-rw-r--r--drivers/hid/hid-magicmouse.c157
-rw-r--r--drivers/hid/hid-multitouch.c23
-rw-r--r--drivers/hid/hid-picolcd.c6
-rw-r--r--drivers/hid/hid-roccat-arvo.c16
-rw-r--r--drivers/hid/hid-roccat-common.c72
-rw-r--r--drivers/hid/hid-roccat-common.h16
-rw-r--r--drivers/hid/hid-roccat-isku.c52
-rw-r--r--drivers/hid/hid-roccat-isku.h7
-rw-r--r--drivers/hid/hid-roccat-kone.c6
-rw-r--r--drivers/hid/hid-roccat-koneplus.c98
-rw-r--r--drivers/hid/hid-roccat-koneplus.h22
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c71
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h15
-rw-r--r--drivers/hid/hid-roccat-pyra.c59
-rw-r--r--drivers/hid/hid-roccat-pyra.h12
-rw-r--r--drivers/hid/hid-roccat-savu.c316
-rw-r--r--drivers/hid/hid-roccat-savu.h87
-rw-r--r--drivers/hid/hid-wiimote-ext.c2
-rw-r--r--drivers/hid/hidraw.c12
-rw-r--r--drivers/hid/uhid.c572
-rw-r--r--drivers/hid/usbhid/hid-core.c294
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hwmon/Kconfig20
-rw-r--r--drivers/hwmon/Makefile2
-rw-r--r--drivers/hwmon/abituguru.c19
-rw-r--r--drivers/hwmon/abituguru3.c19
-rw-r--r--drivers/hwmon/acpi_power_meter.c21
-rw-r--r--drivers/hwmon/adm1021.c18
-rw-r--r--drivers/hwmon/adm1025.c15
-rw-r--r--drivers/hwmon/adm1026.c15
-rw-r--r--drivers/hwmon/adm1031.c15
-rw-r--r--drivers/hwmon/adm9240.c14
-rw-r--r--drivers/hwmon/adt7475.c7
-rw-r--r--drivers/hwmon/applesmc.c133
-rw-r--r--drivers/hwmon/asc7621.c5
-rw-r--r--drivers/hwmon/atxp1.c16
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/da9052-hwmon.c344
-rw-r--r--drivers/hwmon/ds1621.c16
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/emc6w201.c15
-rw-r--r--drivers/hwmon/exynos4_tmu.c20
-rw-r--r--drivers/hwmon/f71805f.c26
-rw-r--r--drivers/hwmon/fam15h_power.c3
-rw-r--r--drivers/hwmon/gl518sm.c15
-rw-r--r--drivers/hwmon/gl520sm.c15
-rw-r--r--drivers/hwmon/gpio-fan.c77
-rw-r--r--drivers/hwmon/hih6130.c293
-rw-r--r--drivers/hwmon/it87.c2
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/k8temp.c25
-rw-r--r--drivers/hwmon/lm63.c14
-rw-r--r--drivers/hwmon/lm75.c9
-rw-r--r--drivers/hwmon/lm77.c73
-rw-r--r--drivers/hwmon/lm78.c36
-rw-r--r--drivers/hwmon/lm80.c14
-rw-r--r--drivers/hwmon/lm83.c15
-rw-r--r--drivers/hwmon/lm85.c7
-rw-r--r--drivers/hwmon/lm87.c15
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm92.c15
-rw-r--r--drivers/hwmon/lm93.c14
-rw-r--r--drivers/hwmon/max1111.c9
-rw-r--r--drivers/hwmon/max1619.c15
-rw-r--r--drivers/hwmon/max6639.c17
-rw-r--r--drivers/hwmon/max6642.c15
-rw-r--r--drivers/hwmon/max6650.c10
-rw-r--r--drivers/hwmon/mc13783-adc.c12
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pc87360.c41
-rw-r--r--drivers/hwmon/pc87427.c51
-rw-r--r--drivers/hwmon/pcf8591.c15
-rw-r--r--drivers/hwmon/s3c-hwmon.c7
-rw-r--r--drivers/hwmon/sis5595.c28
-rw-r--r--drivers/hwmon/smsc47b397.c22
-rw-r--r--drivers/hwmon/smsc47m1.c45
-rw-r--r--drivers/hwmon/smsc47m192.c16
-rw-r--r--drivers/hwmon/thmc50.c17
-rw-r--r--drivers/hwmon/tmp102.c14
-rw-r--r--drivers/hwmon/tmp401.c6
-rw-r--r--drivers/hwmon/tmp421.c13
-rw-r--r--drivers/hwmon/via686a.c23
-rw-r--r--drivers/hwmon/vt1211.c22
-rw-r--r--drivers/hwmon/w83627ehf.c1
-rw-r--r--drivers/hwmon/w83627hf.c46
-rw-r--r--drivers/hwmon/w83781d.c52
-rw-r--r--drivers/hwmon/w83791d.c15
-rw-r--r--drivers/hwmon/w83792d.c18
-rw-r--r--drivers/hwmon/w83795.c11
-rw-r--r--drivers/hwmon/w83l785ts.c34
-rw-r--r--drivers/hwmon/wm831x-hwmon.c9
-rw-r--r--drivers/hwspinlock/hwspinlock_core.c4
-rw-r--r--drivers/i2c/busses/Kconfig6
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c13
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c14
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c13
-rw-r--r--drivers/i2c/busses/i2c-amd756.c13
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c12
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c2
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c12
-rw-r--r--drivers/i2c/busses/i2c-hydra.c17
-rw-r--r--drivers/i2c/busses/i2c-i801.c343
-rw-r--r--drivers/i2c/busses/i2c-imx.c1
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c13
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c14
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c5
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c13
-rw-r--r--drivers/i2c/busses/i2c-piix4.c209
-rw-r--r--drivers/i2c/busses/i2c-powermac.c157
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c12
-rw-r--r--drivers/i2c/busses/i2c-sis630.c15
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c15
-rw-r--r--drivers/i2c/busses/i2c-tegra.c10
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c14
-rw-r--r--drivers/i2c/i2c-core.c16
-rw-r--r--drivers/i2c/i2c-smbus.c13
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/idle/intel_idle.c41
-rw-r--r--drivers/ieee802154/Kconfig6
-rw-r--r--drivers/ieee802154/Makefile1
-rw-r--r--drivers/ieee802154/at86rf230.c968
-rw-r--r--drivers/infiniband/core/addr.c4
-rw-r--r--drivers/infiniband/core/cm.c16
-rw-r--r--drivers/infiniband/core/cm_msgs.h12
-rw-r--r--drivers/infiniband/core/cma.c77
-rw-r--r--drivers/infiniband/core/netlink.c17
-rw-r--r--drivers/infiniband/core/sa_query.c133
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c12
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c141
-rw-r--r--drivers/infiniband/hw/mlx4/main.c98
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h20
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c28
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h45
-rw-r--r--drivers/infiniband/hw/qib/qib_diag.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_eeprom.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c63
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c21
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c91
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c92
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c160
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c238
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c152
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c327
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h198
-rw-r--r--drivers/infiniband/hw/qib/qib_mr.c247
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c56
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c24
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c14
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c11
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c246
-rw-r--r--drivers/infiniband/hw/qib/qib_twsi.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c31
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c66
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h56
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c14
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c12
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c35
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c15
-rw-r--r--drivers/input/joystick/as5011.c5
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/keyboard/lm8333.c2
-rw-r--r--drivers/input/keyboard/mcs_touchkey.c3
-rw-r--r--drivers/input/keyboard/mpr121_touchkey.c2
-rw-r--r--drivers/input/keyboard/qt1070.c3
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c3
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c4
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c8
-rw-r--r--drivers/input/misc/ad714x.c8
-rw-r--r--drivers/input/misc/cma3000_d0x.c2
-rw-r--r--drivers/input/misc/dm355evm_keys.c3
-rw-r--r--drivers/input/mouse/bcm5974.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c6
-rw-r--r--drivers/input/touchscreen/ad7879.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c3
-rw-r--r--drivers/input/touchscreen/bu21013_ts.c3
-rw-r--r--drivers/input/touchscreen/cy8ctmg110_ts.c3
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c2
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c2
-rw-r--r--drivers/input/touchscreen/tsc2005.c3
-rw-r--r--drivers/iommu/Kconfig6
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c102
-rw-r--r--drivers/iommu/amd_iommu_init.c571
-rw-r--r--drivers/iommu/amd_iommu_types.h15
-rw-r--r--drivers/iommu/amd_iommu_v2.c6
-rw-r--r--drivers/iommu/dmar.c194
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c95
-rw-r--r--drivers/iommu/intel_irq_remapping.c20
-rw-r--r--drivers/iommu/iommu.c611
-rw-r--r--drivers/iommu/iova.c14
-rw-r--r--drivers/iommu/irq_remapping.c10
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/msm_iommu.c5
-rw-r--r--drivers/iommu/of_iommu.c90
-rw-r--r--drivers/iommu/omap-iommu.c4
-rw-r--r--drivers/iommu/tegra-gart.c5
-rw-r--r--drivers/iommu/tegra-smmu.c285
-rw-r--r--drivers/isdn/gigaset/capi.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c18
-rw-r--r--drivers/isdn/hisax/hfc_usb.c18
-rw-r--r--drivers/isdn/hisax/isurf.c5
-rw-r--r--drivers/isdn/mISDN/stack.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c16
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/md/dm-thin.c13
-rw-r--r--drivers/md/md.c45
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/persistent-data/dm-space-map-checker.c54
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c11
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c11
-rw-r--r--drivers/md/raid1.c26
-rw-r--r--drivers/md/raid10.c26
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c1
-rw-r--r--drivers/media/rc/winbond-cir.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-audio.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-vbi.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c89
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c9
-rw-r--r--drivers/media/video/cx23885/cx23885.h1
-rw-r--r--drivers/media/video/cx25821/cx25821-core.c3
-rw-r--r--drivers/media/video/cx25821/cx25821.h2
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c76
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c2
-rw-r--r--drivers/media/video/gspca/sn9c20x.c13
-rw-r--r--drivers/media/video/mx1_camera.c1
-rw-r--r--drivers/media/video/mx2_camera.c27
-rw-r--r--drivers/media/video/omap3isp/isppreview.c6
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c69
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c19
-rw-r--r--drivers/media/video/s5p-fimc/fimc-lite.c73
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.c48
-rw-r--r--drivers/media/video/s5p-fimc/fimc-mdevice.h2
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_dec.c1
-rw-r--r--drivers/media/video/s5p-mfc/s5p_mfc_enc.c1
-rw-r--r--drivers/media/video/smiapp/smiapp-core.c1
-rw-r--r--drivers/media/video/v4l2-dev.c1
-rw-r--r--drivers/media/video/zoran/zoran.h4
-rw-r--r--drivers/media/video/zoran/zoran_driver.c4
-rw-r--r--drivers/media/video/zoran/zr36016.c4
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/ab5500-core.h87
-rw-r--r--drivers/mfd/mc13xxx-spi.c67
-rw-r--r--drivers/mfd/omap-usb-host.c48
-rw-r--r--drivers/mfd/palmas.c13
-rw-r--r--drivers/mfd/tps65217.c67
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/cb710/core.c2
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c137
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c358
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h205
-rw-r--r--drivers/misc/iwmc3200top/log.c348
-rw-r--r--drivers/misc/iwmc3200top/log.h171
-rw-r--r--drivers/misc/iwmc3200top/main.c662
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c4
-rw-r--r--drivers/mmc/card/block.c36
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/cd-gpio.c83
-rw-r--r--drivers/mmc/core/core.c90
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/mmc.c21
-rw-r--r--drivers/mmc/core/mmc_ops.c1
-rw-r--r--drivers/mmc/core/sd.c171
-rw-r--r--drivers/mmc/core/sdio.c7
-rw-r--r--drivers/mmc/core/sdio_cis.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c188
-rw-r--r--drivers/mmc/host/atmel-mci.c14
-rw-r--r--drivers/mmc/host/dw_mmc.c17
-rw-r--r--drivers/mmc/host/mxs-mmc.c26
-rw-r--r--drivers/mmc/host/omap_hsmmc.c16
-rw-r--r--drivers/mmc/host/s3cmci.c10
-rw-r--r--drivers/mmc/host/sdhci-dove.c51
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c9
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c54
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c52
-rw-r--r--drivers/mmc/host/sdhci-tegra.c11
-rw-r--r--drivers/mmc/host/sdhci.c144
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c273
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c74
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c132
-rw-r--r--drivers/mtd/mtdsuper.c4
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c10
-rw-r--r--drivers/mtd/nand/mxc_nand.c37
-rw-r--r--drivers/mtd/nand/nand_base.c7
-rw-r--r--drivers/mtd/nand/nandsim.c12
-rw-r--r--drivers/mtd/ubi/Kconfig2
-rw-r--r--drivers/mtd/ubi/cdev.c2
-rw-r--r--drivers/mtd/ubi/misc.c25
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/vmt.c20
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c13
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c26
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c93
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/bonding/bonding.h4
-rw-r--r--drivers/net/caif/caif_hsi.c548
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/c_can/Kconfig20
-rw-r--r--drivers/net/can/c_can/Makefile1
-rw-r--r--drivers/net/can/c_can/c_can.c122
-rw-r--r--drivers/net/can/c_can/c_can.h163
-rw-r--r--drivers/net/can/c_can/c_can_pci.c221
-rw-r--r--drivers/net/can/c_can/c_can_platform.c76
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/dev.c37
-rw-r--r--drivers/net/can/flexcan.c154
-rw-r--r--drivers/net/can/janz-ican3.c241
-rw-r--r--drivers/net/can/mcp251x.c5
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/vcan.c27
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dummy.c19
-rw-r--r--drivers/net/ethernet/3com/3c501.c2
-rw-r--r--drivers/net/ethernet/8390/Kconfig14
-rw-r--r--drivers/net/ethernet/8390/Makefile1
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c480
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/lance.c5
-rw-r--r--drivers/net/ethernet/apple/macmace.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c94
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c105
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_param.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c45
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c58
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c10
-rw-r--r--drivers/net/ethernet/broadcom/b44.c100
-rw-r--r--drivers/net/ethernet/broadcom/b44.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c106
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h197
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h63
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c585
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1232
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h168
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h42
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c68
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c63
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c288
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h47
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c97
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cs.h34
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h63
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_cna.h15
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs_status.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c393
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h43
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c48
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_msgq.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h81
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_cna.h42
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_enet.h107
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bna.h51
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c15
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_hw_defs.h33
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c17
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h66
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c12
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c171
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h57
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c518
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c11
-rw-r--r--drivers/net/ethernet/freescale/fec.c32
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c29
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c508
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c420
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c6
-rw-r--r--drivers/net/ethernet/i825xx/lp486e.c8
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/intel/e100.c40
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c120
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c43
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h25
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c164
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ids.h5
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c78
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c159
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c395
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c838
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c764
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c187
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c223
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c172
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h114
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1386
-rw-r--r--drivers/net/ethernet/jme.c14
-rw-r--r--drivers/net/ethernet/lantiq_etop.c3
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c18
-rw-r--r--drivers/net/ethernet/marvell/sky2.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c382
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c630
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c270
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c223
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c145
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c523
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c119
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c285
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c35
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c8
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c23
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h3
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c5
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c10
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c18
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c12
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c10
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c42
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c37
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c315
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c100
-rw-r--r--drivers/net/ethernet/rdc/r6040.c16
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1002
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c371
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h77
-rw-r--r--drivers/net/ethernet/sfc/efx.c10
-rw-r--r--drivers/net/ethernet/sfc/enum.h8
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon.c35
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c12
-rw-r--r--drivers/net/ethernet/sfc/filter.c2
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c11
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c1
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h3
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h11
-rw-r--r--drivers/net/ethernet/sfc/nic.c11
-rw-r--r--drivers/net/ethernet/sfc/nic.h18
-rw-r--r--drivers/net/ethernet/sfc/rx.c23
-rw-r--r--drivers/net/ethernet/sfc/selftest.c64
-rw-r--r--drivers/net/ethernet/sfc/siena.c37
-rw-r--r--drivers/net/ethernet/sfc/tx.c93
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c19
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c57
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c199
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c12
-rw-r--r--drivers/net/ethernet/sun/niu.c6
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c1
-rw-r--r--drivers/net/ethernet/sun/sunhme.c3
-rw-r--r--drivers/net/ethernet/sun/sunqe.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c177
-rw-r--r--drivers/net/ethernet/ti/cpsw.c25
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c208
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c25
-rw-r--r--drivers/net/ethernet/tile/tilegx.c61
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c6
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c3
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/fddi/defxx.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c8
-rw-r--r--drivers/net/hamradio/mkiss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c30
-rw-r--r--drivers/net/hyperv/rndis_filter.c79
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c2
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/macvtap.c8
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/amd.c8
-rw-r--r--drivers/net/phy/bcm63xx.c31
-rw-r--r--drivers/net/phy/bcm87xx.c231
-rw-r--r--drivers/net/phy/broadcom.c119
-rw-r--r--drivers/net/phy/cicada.c35
-rw-r--r--drivers/net/phy/davicom.c41
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/fixed.c4
-rw-r--r--drivers/net/phy/icplus.c31
-rw-r--r--drivers/net/phy/lxt.c47
-rw-r--r--drivers/net/phy/marvell.c22
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c14
-rw-r--r--drivers/net/phy/micrel.c62
-rw-r--r--drivers/net/phy/national.c8
-rw-r--r--drivers/net/phy/phy.c316
-rw-r--r--drivers/net/phy/phy_device.c139
-rw-r--r--drivers/net/phy/realtek.c6
-rw-r--r--drivers/net/phy/smsc.c66
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/phy/ste10Xp.c21
-rw-r--r--drivers/net/phy/vitesse.c52
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/team/Kconfig13
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c777
-rw-r--r--drivers/net/team/team_mode_activebackup.c17
-rw-r--r--drivers/net/team/team_mode_broadcast.c87
-rw-r--r--drivers/net/team/team_mode_loadbalance.c546
-rw-r--r--drivers/net/team/team_mode_roundrobin.c13
-rw-r--r--drivers/net/tun.c153
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h218
-rw-r--r--drivers/net/usb/asix_common.c631
-rw-r--r--drivers/net/usb/asix_devices.c (renamed from drivers/net/usb/asix.c)666
-rw-r--r--drivers/net/usb/ax88172a.c414
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c364
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c36
-rw-r--r--drivers/net/usb/usbnet.c77
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/wan/x25_asy.c2
-rw-r--r--drivers/net/wimax/i2400m/Kconfig22
-rw-r--r--drivers/net/wimax/i2400m/Makefile8
-rw-r--r--drivers/net/wimax/i2400m/control.c4
-rw-r--r--drivers/net/wimax/i2400m/driver.c5
-rw-r--r--drivers/net/wimax/i2400m/fw.c5
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h157
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h13
-rw-r--r--drivers/net/wimax/i2400m/sdio-debug-levels.h22
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c210
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c301
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c177
-rw-r--r--drivers/net/wimax/i2400m/sdio.c602
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile2
-rw-r--r--drivers/net/wireless/adm8211.c5
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/ath.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c288
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h46
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c28
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c48
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c158
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h58
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c489
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c776
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c164
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c214
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c734
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c124
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h53
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h882
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h755
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h1404
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h1284
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h772
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c528
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c122
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c287
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h107
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c555
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1392
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c246
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c780
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h171
-rw-r--r--drivers/net/wireless/ath/ath9k/wow.c532
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c165
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h11
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/fwdesc.h3
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c53
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h8
-rw-r--r--drivers/net/wireless/ath/key.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/b43/b43.h7
-rw-r--r--drivers/net/wireless/b43/main.c32
-rw-r--r--drivers/net/wireless/b43/phy_n.c17
-rw-r--r--drivers/net/wireless/b43/xmit.c9
-rw-r--r--drivers/net/wireless/b43legacy/dma.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c29
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h59
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c9
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c669
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c131
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h18
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c1223
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c17
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c127
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c142
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c172
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c2
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h62
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c17
-rw-r--r--drivers/net/wireless/iwlegacy/common.c23
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig5
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile32
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/Makefile13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn.h)113
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-calib.h)4
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h (renamed from drivers/net/wireless/iwlwifi/iwl-commands.h)48
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c (renamed from drivers/net/wireless/iwlwifi/iwl-debugfs.c)37
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h (renamed from drivers/net/wireless/iwlwifi/iwl-dev.h)192
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/devices.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-devices.c)191
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.c (renamed from drivers/net/wireless/iwlwifi/iwl-led.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/led.h (renamed from drivers/net/wireless/iwlwifi/iwl-led.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-lib.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c (renamed from drivers/net/wireless/iwlwifi/iwl-mac80211.c)213
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn.c)504
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.c (renamed from drivers/net/wireless/iwlwifi/iwl-power.c)11
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/power.h (renamed from drivers/net/wireless/iwlwifi/iwl-power.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.c)50
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rs.h)3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rx.c)78
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-rxon.c)54
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c (renamed from drivers/net/wireless/iwlwifi/iwl-scan.c)195
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-sta.c)60
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c471
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.c)13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tt.h (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tt.h)2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-tx.c)62
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-ucode.c)71
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h28
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c152
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c903
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c463
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h70
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c1148
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h269
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c856
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h161
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c1114
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h82
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/1000.c (renamed from drivers/net/wireless/iwlwifi/iwl-1000.c)19
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/2000.c (renamed from drivers/net/wireless/iwlwifi/iwl-2000.c)24
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/5000.c (renamed from drivers/net/wireless/iwlwifi/iwl-5000.c)20
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/6000.c (renamed from drivers/net/wireless/iwlwifi/iwl-6000.c)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h (renamed from drivers/net/wireless/iwlwifi/iwl-cfg.h)0
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c (renamed from drivers/net/wireless/iwlwifi/iwl-pci.c)5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h)25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c)106
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie.c)385
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c (renamed from drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c)200
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig39
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h57
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c882
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.h31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c1002
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c488
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.c234
-rw-r--r--drivers/net/wireless/iwmc3200wifi/eeprom.h127
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c416
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.h100
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c470
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h237
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h367
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h484
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c847
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c191
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c1701
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.h60
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c509
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.h64
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c529
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h789
-rw-r--r--drivers/net/wireless/libertas/cfg.c46
-rw-r--r--drivers/net/wireless/libertas/cmd.c25
-rw-r--r--drivers/net/wireless/libertas/cmd.h4
-rw-r--r--drivers/net/wireless/libertas/debugfs.c4
-rw-r--r--drivers/net/wireless/libertas/dev.h2
-rw-r--r--drivers/net/wireless/libertas/firmware.c2
-rw-r--r--drivers/net/wireless/libertas/host.h1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas/main.c6
-rw-r--r--drivers/net/wireless/libertas/mesh.c7
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c29
-rw-r--r--drivers/net/wireless/mwifiex/11n.c14
-rw-r--r--drivers/net/wireless/mwifiex/11n.h3
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c23
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c407
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c31
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c7
-rw-r--r--drivers/net/wireless/mwifiex/decl.h9
-rw-r--r--drivers/net/wireless/mwifiex/fw.h54
-rw-r--r--drivers/net/wireless/mwifiex/ie.c191
-rw-r--r--drivers/net/wireless/mwifiex/init.c67
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h21
-rw-r--r--drivers/net/wireless/mwifiex/join.c20
-rw-r--r--drivers/net/wireless/mwifiex/main.c11
-rw-r--r--drivers/net/wireless/mwifiex/main.h41
-rw-r--r--drivers/net/wireless/mwifiex/scan.c108
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c6
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c114
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c151
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c11
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c290
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c303
-rw-r--r--drivers/net/wireless/mwifiex/usb.c28
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/mwl8k.c5
-rw-r--r--drivers/net/wireless/orinoco/cfg.c11
-rw-r--r--drivers/net/wireless/p54/eeprom.c4
-rw-r--r--drivers/net/wireless/p54/fwio.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c2
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h181
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c388
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c83
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c7
-rw-r--r--drivers/net/wireless/rtlwifi/core.c14
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c33
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c10
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c43
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/fw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c34
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c46
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c14
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1
-rw-r--r--drivers/net/wireless/ti/Kconfig1
-rw-r--r--drivers/net/wireless/ti/Makefile1
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c9
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c67
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile2
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h237
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c58
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.c243
-rw-r--r--drivers/net/wireless/ti/wl12xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c621
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h22
-rw-r--r--drivers/net/wireless/ti/wl18xx/Kconfig7
-rw-r--r--drivers/net/wireless/ti/wl18xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.c111
-rw-r--r--drivers/net/wireless/ti/wl18xx/acx.h287
-rw-r--r--drivers/net/wireless/ti/wl18xx/conf.h111
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.c403
-rw-r--r--drivers/net/wireless/ti/wl18xx/debugfs.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.c75
-rw-r--r--drivers/net/wireless/ti/wl18xx/io.h28
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c1610
-rw-r--r--drivers/net/wireless/ti/wl18xx/reg.h191
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c127
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.h46
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h95
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig1
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h259
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c184
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c173
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h40
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h99
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c643
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h87
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c39
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h90
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h22
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c62
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h145
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c921
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c37
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c50
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h15
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c61
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h19
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c91
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c20
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c112
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c282
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h53
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h119
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h (renamed from drivers/net/wireless/ti/wlcore/wl12xx.h)75
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.h2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/nfc/nfcwilink.c7
-rw-r--r--drivers/nfc/pn533.c846
-rw-r--r--drivers/nfc/pn544_hci.c47
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/irq.c2
-rw-r--r--drivers/of/of_mdio.c16
-rw-r--r--drivers/of/of_mtd.c2
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/oprofile/oprofile_perf.c23
-rw-r--r--drivers/parisc/dino.c16
-rw-r--r--drivers/parisc/iosapic.c4
-rw-r--r--drivers/parisc/lba_pci.c26
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/access.c6
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/hotplug-pci.c30
-rw-r--r--drivers/pci/hotplug/acpiphp.h4
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c67
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c14
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c35
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c14
-rw-r--r--drivers/pci/hotplug/cpqphp_pci.c8
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c10
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_pci.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c101
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c28
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c14
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c12
-rw-r--r--drivers/pci/hotplug/shpchp_core.c14
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c3
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c45
-rw-r--r--drivers/pci/hotplug/shpchp_sysfs.c6
-rw-r--r--drivers/pci/iov.c4
-rw-r--r--drivers/pci/pci-acpi.c36
-rw-r--r--drivers/pci/pci-driver.c36
-rw-r--r--drivers/pci/pci-sysfs.c31
-rw-r--r--drivers/pci/pci.c427
-rw-r--r--drivers/pci/pci.h11
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c2
-rw-r--r--drivers/pci/pcie/portdrv_pci.c60
-rw-r--r--drivers/pci/probe.c246
-rw-r--r--drivers/pci/quirks.c264
-rw-r--r--drivers/pci/remove.c1
-rw-r--r--drivers/pci/search.c2
-rw-r--r--drivers/pci/setup-bus.c67
-rw-r--r--drivers/pci/setup-res.c125
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c26
-rw-r--r--drivers/pinctrl/Kconfig10
-rw-r--r--drivers/pinctrl/Makefile1
-rw-r--r--drivers/pinctrl/core.c41
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c6
-rw-r--r--drivers/pinctrl/pinctrl-imx.c15
-rw-r--r--drivers/pinctrl/pinctrl-imx6q.c2
-rw-r--r--drivers/pinctrl/pinctrl-nomadik-db8500.c33
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c39
-rw-r--r--drivers/pinctrl/pinctrl-single.c987
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c489
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c5
-rw-r--r--drivers/pinctrl/pinctrl-u300.c6
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c4
-rw-r--r--drivers/platform/x86/acer-wmi.c10
-rw-r--r--drivers/platform/x86/classmate-laptop.c13
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c8
-rw-r--r--drivers/platform/x86/hdaps.c6
-rw-r--r--drivers/platform/x86/hp_accel.c15
-rw-r--r--drivers/platform/x86/ideapad-laptop.c6
-rw-r--r--drivers/platform/x86/intel_ips.c39
-rw-r--r--drivers/platform/x86/intel_mid_thermal.c16
-rw-r--r--drivers/platform/x86/msi-laptop.c7
-rw-r--r--drivers/platform/x86/panasonic-laptop.c16
-rw-r--r--drivers/platform/x86/sony-laptop.c156
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c23
-rw-r--r--drivers/platform/x86/toshiba_acpi.c15
-rw-r--r--drivers/platform/x86/toshiba_bluetooth.c10
-rw-r--r--drivers/platform/x86/xo15-ebook.c8
-rw-r--r--drivers/pnp/pnpacpi/core.c4
-rw-r--r--drivers/power/Kconfig4
-rw-r--r--drivers/power/Makefile1
-rw-r--r--drivers/power/avs/Kconfig12
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/smartreflex.c (renamed from arch/arm/mach-omap2/smartreflex.c)161
-rw-r--r--drivers/regulator/Kconfig37
-rw-r--r--drivers/regulator/Makefile6
-rw-r--r--drivers/regulator/aat2870-regulator.c21
-rw-r--r--drivers/regulator/ab3100.c119
-rw-r--r--drivers/regulator/ab8500.c96
-rw-r--r--drivers/regulator/ad5398.c9
-rw-r--r--drivers/regulator/anatop-regulator.c42
-rw-r--r--drivers/regulator/arizona-ldo1.c138
-rw-r--r--drivers/regulator/arizona-micsupp.c188
-rw-r--r--drivers/regulator/core.c414
-rw-r--r--drivers/regulator/da903x.c6
-rw-r--r--drivers/regulator/da9052-regulator.c4
-rw-r--r--drivers/regulator/fixed-helper.c19
-rw-r--r--drivers/regulator/fixed.c163
-rw-r--r--drivers/regulator/gpio-regulator.c115
-rw-r--r--drivers/regulator/isl6271a-regulator.c13
-rw-r--r--drivers/regulator/lp3971.c66
-rw-r--r--drivers/regulator/lp3972.c102
-rw-r--r--drivers/regulator/lp872x.c943
-rw-r--r--drivers/regulator/lp8788-buck.c629
-rw-r--r--drivers/regulator/lp8788-ldo.c842
-rw-r--r--drivers/regulator/max1586.c108
-rw-r--r--drivers/regulator/max77686.c389
-rw-r--r--drivers/regulator/max8952.c60
-rw-r--r--drivers/regulator/max8997.c40
-rw-r--r--drivers/regulator/max8998.c133
-rw-r--r--drivers/regulator/mc13783-regulator.c38
-rw-r--r--drivers/regulator/mc13892-regulator.c43
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c36
-rw-r--r--drivers/regulator/mc13xxx.h11
-rw-r--r--drivers/regulator/of_regulator.c57
-rw-r--r--drivers/regulator/palmas-regulator.c59
-rw-r--r--drivers/regulator/pcap-regulator.c95
-rw-r--r--drivers/regulator/pcf50633-regulator.c20
-rw-r--r--drivers/regulator/rc5t583-regulator.c24
-rw-r--r--drivers/regulator/s2mps11.c363
-rw-r--r--drivers/regulator/s5m8767.c229
-rw-r--r--drivers/regulator/tps6105x-regulator.c14
-rw-r--r--drivers/regulator/tps62360-regulator.c57
-rw-r--r--drivers/regulator/tps65023-regulator.c201
-rw-r--r--drivers/regulator/tps6507x-regulator.c98
-rw-r--r--drivers/regulator/tps65217-regulator.c140
-rw-r--r--drivers/regulator/tps6524x-regulator.c94
-rw-r--r--drivers/regulator/tps6586x-regulator.c106
-rw-r--r--drivers/regulator/tps65910-regulator.c425
-rw-r--r--drivers/regulator/twl-regulator.c92
-rw-r--r--drivers/regulator/wm831x-dcdc.c78
-rw-r--r--drivers/regulator/wm831x-ldo.c131
-rw-r--r--drivers/regulator/wm8350-regulator.c426
-rw-r--r--drivers/regulator/wm8400-regulator.c25
-rw-r--r--drivers/regulator/wm8994-regulator.c93
-rw-r--r--drivers/remoteproc/Kconfig2
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c57
-rw-r--r--drivers/rtc/rtc-ab8500.c10
-rw-r--r--drivers/rtc/rtc-at91rm9200.c1
-rw-r--r--drivers/rtc/rtc-cmos.c1
-rw-r--r--drivers/rtc/rtc-mxc.c5
-rw-r--r--drivers/rtc/rtc-spear.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c8
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/s390/block/dasd.c35
-rw-r--r--drivers/s390/block/dasd_3990_erp.c3
-rw-r--r--drivers/s390/block/dasd_alias.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c3
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_diag.h3
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_eckd.h3
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_erp.c3
-rw-r--r--drivers/s390/block/dasd_fba.c1
-rw-r--r--drivers/s390/block/dasd_fba.h3
-rw-r--r--drivers/s390/block/dasd_genhd.c3
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c3
-rw-r--r--drivers/s390/block/dasd_proc.c3
-rw-r--r--drivers/s390/char/ctrlchar.c3
-rw-r--r--drivers/s390/char/ctrlchar.h3
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/keyboard.h3
-rw-r--r--drivers/s390/char/sclp.c10
-rw-r--r--drivers/s390/char/sclp.h10
-rw-r--r--drivers/s390/char/sclp_cmd.c38
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c1
-rw-r--r--drivers/s390/char/sclp_cpi_sys.h1
-rw-r--r--drivers/s390/char/sclp_ocf.c1
-rw-r--r--drivers/s390/char/sclp_quiesce.c3
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/s390/char/sclp_tty.c3
-rw-r--r--drivers/s390/char/sclp_tty.h3
-rw-r--r--drivers/s390/char/tape.h1
-rw-r--r--drivers/s390/char/tape_34xx.c1
-rw-r--r--drivers/s390/char/tape_3590.c1
-rw-r--r--drivers/s390/char/tape_3590.h3
-rw-r--r--drivers/s390/char/tape_char.c3
-rw-r--r--drivers/s390/char/tape_class.c5
-rw-r--r--drivers/s390/char/tape_class.h3
-rw-r--r--drivers/s390/char/tape_core.c1
-rw-r--r--drivers/s390/char/tape_proc.c3
-rw-r--r--drivers/s390/char/tape_std.c3
-rw-r--r--drivers/s390/char/tape_std.h3
-rw-r--r--drivers/s390/char/tty3270.c3
-rw-r--r--drivers/s390/char/tty3270.h2
-rw-r--r--drivers/s390/char/vmcp.c2
-rw-r--r--drivers/s390/char/vmcp.h2
-rw-r--r--drivers/s390/char/vmlogrdr.c45
-rw-r--r--drivers/s390/char/vmwatchdog.c2
-rw-r--r--drivers/s390/char/zcore.c2
-rw-r--r--drivers/s390/cio/airq.c3
-rw-r--r--drivers/s390/cio/blacklist.c4
-rw-r--r--drivers/s390/cio/chp.c16
-rw-r--r--drivers/s390/cio/chp.h4
-rw-r--r--drivers/s390/cio/chsc.c3
-rw-r--r--drivers/s390/cio/cio.c3
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/crw.c2
-rw-r--r--drivers/s390/cio/device.c3
-rw-r--r--drivers/s390/cio/device_fsm.c3
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_pgid.c2
-rw-r--r--drivers/s390/cio/device_status.c5
-rw-r--r--drivers/s390/cio/idset.c2
-rw-r--r--drivers/s390/cio/idset.h2
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_debug.h2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c4
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
-rw-r--r--drivers/s390/crypto/ap_bus.c12
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/crypto/zcrypt_api.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c6
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h4
-rw-r--r--drivers/s390/crypto/zcrypt_error.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcica.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcicc.h4
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c6
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.h4
-rw-r--r--drivers/s390/kvm/kvm_virtio.c5
-rw-r--r--drivers/s390/net/claw.c3
-rw-r--r--drivers/s390/net/ctcm_dbug.c2
-rw-r--r--drivers/s390/net/ctcm_dbug.h2
-rw-r--r--drivers/s390/net/ctcm_fsms.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.h2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_main.h2
-rw-r--r--drivers/s390/net/ctcm_mpc.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.h2
-rw-r--r--drivers/s390/net/ctcm_sysfs.c2
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.c2
-rw-r--r--drivers/s390/net/qeth_core_mpc.h2
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c4
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/s390/net/smsgiucv.h2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c2
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c2
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c2
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c2
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_erp.c2
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h2
-rw-r--r--drivers/s390/scsi/zfcp_reqlist.h2
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c2
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/s390/scsi/zfcp_unit.c2
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c237
-rw-r--r--drivers/scsi/aacraid/aacraid.h79
-rw-r--r--drivers/scsi/aacraid/commctrl.c2
-rw-r--r--drivers/scsi/aacraid/comminit.c54
-rw-r--r--drivers/scsi/aacraid/commsup.c31
-rw-r--r--drivers/scsi/aacraid/dpcsup.c6
-rw-r--r--drivers/scsi/aacraid/linit.c16
-rw-r--r--drivers/scsi/aacraid/nark.c4
-rw-r--r--drivers/scsi/aacraid/rkt.c2
-rw-r--r--drivers/scsi/aacraid/rx.c4
-rw-r--r--drivers/scsi/aacraid/sa.c4
-rw-r--r--drivers/scsi/aacraid/src.c96
-rw-r--r--drivers/scsi/aha152x.c4
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c2
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c4
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c5
-rw-r--r--drivers/scsi/bfa/bfad_im.c12
-rw-r--r--drivers/scsi/bnx2fc/Makefile3
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.c70
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_debug.h73
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c100
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c25
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c40
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h16
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h59
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c40
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c21
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c3
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c9
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c12
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c38
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c71
-rw-r--r--drivers/scsi/fcoe/fcoe.c36
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c13
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c2
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c12
-rw-r--r--drivers/scsi/hosts.c9
-rw-r--r--drivers/scsi/hptiop.c10
-rw-r--r--drivers/scsi/hptiop.h1
-rw-r--r--drivers/scsi/isci/init.c3
-rw-r--r--drivers/scsi/libfc/fc_exch.c130
-rw-r--r--drivers/scsi/libfc/fc_fcp.c22
-rw-r--r--drivers/scsi/libfc/fc_frame.c2
-rw-r--r--drivers/scsi/libfc/fc_lport.c69
-rw-r--r--drivers/scsi/libsas/sas_ata.c53
-rw-r--r--drivers/scsi/libsas/sas_discover.c23
-rw-r--r--drivers/scsi/libsas/sas_event.c12
-rw-r--r--drivers/scsi/libsas/sas_expander.c74
-rw-r--r--drivers/scsi/libsas/sas_init.c39
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c195
-rw-r--r--drivers/scsi/lpfc/Makefile2
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c233
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c32
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c131
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h3
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c51
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h3
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c156
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_error.c18
-rw-r--r--drivers/scsi/scsi_lib.c104
-rw-r--r--drivers/scsi/scsi_netlink.c7
-rw-r--r--drivers/scsi/scsi_pm.c23
-rw-r--r--drivers/scsi/scsi_priv.h10
-rw-r--r--drivers/scsi/scsi_scan.c34
-rw-r--r--drivers/scsi/scsi_sysfs.c56
-rw-r--r--drivers/scsi/scsi_transport_fc.c34
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c15
-rw-r--r--drivers/scsi/scsi_wait_scan.c42
-rw-r--r--drivers/scsi/sd.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.c35
-rw-r--r--drivers/scsi/virtio_scsi.c337
-rw-r--r--drivers/sh/Kconfig1
-rw-r--r--drivers/sh/Makefile3
-rw-r--r--drivers/sh/clk/cpg.c333
-rw-r--r--drivers/sh/intc/Makefile2
-rw-r--r--drivers/sh/intc/dynamic.c57
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/sh/pfc.c739
-rw-r--r--drivers/sh/pfc/Kconfig26
-rw-r--r--drivers/sh/pfc/Makefile3
-rw-r--r--drivers/sh/pfc/core.c572
-rw-r--r--drivers/sh/pfc/gpio.c239
-rw-r--r--drivers/sh/pfc/pinctrl.c530
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-s3c64xx.c542
-rw-r--r--drivers/spi/spi-tegra.c4
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/ssb/scan.c2
-rw-r--r--drivers/staging/comedi/drivers/s626.h2
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c16
-rw-r--r--drivers/staging/media/go7007/wis-i2c.h5
-rw-r--r--drivers/staging/nvec/nvec.c8
-rw-r--r--drivers/staging/omapdrm/omap_drv.h2
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/iscsi/iscsi_target.c30
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c66
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c46
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c27
-rw-r--r--drivers/target/loopback/tcm_loop.c11
-rw-r--r--drivers/target/sbp/sbp_target.c39
-rw-r--r--drivers/target/target_core_device.c170
-rw-r--r--drivers/target/target_core_fabric_configfs.c3
-rw-r--r--drivers/target/target_core_file.c25
-rw-r--r--drivers/target/target_core_iblock.c142
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_internal.h22
-rw-r--r--drivers/target/target_core_pr.c13
-rw-r--r--drivers/target/target_core_pscsi.c84
-rw-r--r--drivers/target/target_core_rd.c17
-rw-r--r--drivers/target/target_core_sbc.c581
-rw-r--r--drivers/target/target_core_spc.c (renamed from drivers/target/target_core_cdb.c)434
-rw-r--r--drivers/target/target_core_tmr.c57
-rw-r--r--drivers/target/target_core_tpg.c14
-rw-r--r--drivers/target/target_core_transport.c2002
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c12
-rw-r--r--drivers/target/tcm_fc/tfc_io.c13
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c3
-rw-r--r--drivers/tty/hvc/hvc_opal.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c15
-rw-r--r--drivers/tty/serial/imx.c6
-rw-r--r--drivers/tty/serial/mxs-auart.c42
-rw-r--r--drivers/tty/serial/sh-sci.c8
-rw-r--r--drivers/usb/atm/xusbatm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/omap_udc.c3
-rw-r--r--drivers/usb/gadget/tcm_usb_gadget.c54
-rw-r--r--drivers/usb/gadget/u_ether.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/ehci-hcd.c5
-rw-r--r--drivers/usb/host/ehci-omap.c18
-rw-r--r--drivers/usb/host/ehci-tegra.c20
-rw-r--r--drivers/usb/host/ehci-tilegx.c214
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-omap.c7
-rw-r--r--drivers/usb/host/ohci-tilegx.c203
-rw-r--r--drivers/usb/host/xhci-hub.c44
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/otg/isp1301_omap.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.h4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c14
-rw-r--r--drivers/usb/renesas_usbhs/mod.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod.h2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c4
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h4
-rw-r--r--drivers/usb/serial/metro-usb.c8
-rw-r--r--drivers/usb/serial/option.c26
-rw-r--r--drivers/usb/storage/scsiglue.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h12
-rw-r--r--drivers/usb/storage/usb.c5
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/test.c4
-rw-r--r--drivers/vhost/vhost.c5
-rw-r--r--drivers/vhost/vhost.h6
-rw-r--r--drivers/video/arcfb.c4
-rw-r--r--drivers/video/atmel_lcdfb.c2
-rw-r--r--drivers/video/aty/atyfb_base.c2
-rw-r--r--drivers/video/aty/radeon_base.c2
-rw-r--r--drivers/video/backlight/88pm860x_bl.c1
-rw-r--r--drivers/video/bfin_adv7393fb.c2
-rw-r--r--drivers/video/cirrusfb.c2
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c2
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c2
-rw-r--r--drivers/video/i740fb.c6
-rw-r--r--drivers/video/mxsfb.c62
-rw-r--r--drivers/video/omap2/dss/core.c43
-rw-r--r--drivers/video/omap2/dss/dispc.c2
-rw-r--r--drivers/video/omap2/dss/dsi.c2
-rw-r--r--drivers/video/omap2/dss/dss.c2
-rw-r--r--drivers/video/omap2/dss/hdmi.c2
-rw-r--r--drivers/video/omap2/dss/rfbi.c2
-rw-r--r--drivers/video/omap2/dss/venc.c2
-rw-r--r--drivers/video/s3c-fb.c2
-rw-r--r--drivers/video/savage/savagefb_driver.c6
-rw-r--r--drivers/video/sis/init.c3
-rw-r--r--drivers/video/smscufx.c8
-rw-r--r--drivers/video/sunxvr500.c2
-rw-r--r--drivers/virtio/virtio.c5
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/w1/masters/omap_hdq.c86
-rw-r--r--drivers/watchdog/Kconfig10
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c4
-rw-r--r--drivers/watchdog/booke_wdt.c4
-rw-r--r--drivers/watchdog/coh901327_wdt.c7
-rw-r--r--drivers/watchdog/f71808e_wdt.c4
-rw-r--r--drivers/watchdog/iTCO_wdt.c213
-rw-r--r--drivers/watchdog/ie6xx_wdt.c4
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c2
-rw-r--r--drivers/watchdog/omap_wdt.c24
-rw-r--r--drivers/watchdog/orion_wdt.c203
-rw-r--r--drivers/watchdog/s3c2410_wdt.c16
-rw-r--r--drivers/watchdog/sch311x_wdt.c10
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/mcelog.c414
-rw-r--r--drivers/xen/pcpu.c371
-rw-r--r--drivers/xen/platform-pci.c18
-rw-r--r--drivers/xen/xen-acpi-processor.c9
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c20
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/9p/vfs_dentry.c4
-rw-r--r--fs/9p/vfs_inode.c170
-rw-r--r--fs/9p/vfs_inode_dotl.c59
-rw-r--r--fs/9p/vfs_super.c4
-rw-r--r--fs/adfs/dir.c2
-rw-r--r--fs/adfs/super.c1
-rw-r--r--fs/affs/affs.h11
-rw-r--r--fs/affs/amigaffs.c22
-rw-r--r--fs/affs/bitmap.c4
-rw-r--r--fs/affs/namei.c4
-rw-r--r--fs/affs/super.c68
-rw-r--r--fs/afs/dir.c14
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/afs/super.c3
-rw-r--r--fs/aio.c73
-rw-r--r--fs/attr.c3
-rw-r--r--fs/autofs4/dev-ioctl.c4
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/bad_inode.c4
-rw-r--r--fs/befs/linuxvfs.c4
-rw-r--r--fs/bfs/dir.c4
-rw-r--r--fs/block_dev.c36
-rw-r--r--fs/btrfs/backref.c15
-rw-r--r--fs/btrfs/ctree.c60
-rw-r--r--fs/btrfs/disk-io.c34
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/extent_io.c17
-rw-r--r--fs/btrfs/file.c13
-rw-r--r--fs/btrfs/free-space-cache.c145
-rw-r--r--fs/btrfs/inode.c63
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/ioctl.h2
-rw-r--r--fs/btrfs/super.c8
-rw-r--r--fs/btrfs/tree-log.c6
-rw-r--r--fs/btrfs/volumes.c95
-rw-r--r--fs/btrfs/volumes.h3
-rw-r--r--fs/buffer.c22
-rw-r--r--fs/cachefiles/namei.c2
-rw-r--r--fs/cachefiles/rdwr.c8
-rw-r--r--fs/ceph/dir.c77
-rw-r--r--fs/ceph/file.c26
-rw-r--r--fs/ceph/super.c2
-rw-r--r--fs/ceph/super.h6
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsfs.h7
-rw-r--r--fs/cifs/cifssmb.c30
-rw-r--r--fs/cifs/connect.c59
-rw-r--r--fs/cifs/dir.c448
-rw-r--r--fs/cifs/inode.c5
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/cifs/transport.c26
-rw-r--r--fs/coda/cache.c10
-rw-r--r--fs/coda/dir.c14
-rw-r--r--fs/configfs/dir.c2
-rw-r--r--fs/cramfs/inode.c2
-rw-r--r--fs/dcache.c44
-rw-r--r--fs/debugfs/inode.c91
-rw-r--r--fs/devpts/inode.c6
-rw-r--r--fs/direct-io.c2
-rw-r--r--fs/ecryptfs/dentry.c20
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h14
-rw-r--r--fs/ecryptfs/inode.c9
-rw-r--r--fs/ecryptfs/kthread.c75
-rw-r--r--fs/ecryptfs/main.c8
-rw-r--r--fs/ecryptfs/miscdev.c48
-rw-r--r--fs/efs/efs.h2
-rw-r--r--fs/efs/namei.c3
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exofs/namei.c4
-rw-r--r--fs/exofs/ore.c8
-rw-r--r--fs/exofs/ore_raid.c91
-rw-r--r--fs/exportfs/expfs.c16
-rw-r--r--fs/ext2/namei.c8
-rw-r--r--fs/ext2/super.c12
-rw-r--r--fs/ext3/dir.c3
-rw-r--r--fs/ext3/fsync.c9
-rw-r--r--fs/ext3/namei.c8
-rw-r--r--fs/ext3/super.c8
-rw-r--r--fs/ext4/dir.c75
-rw-r--r--fs/ext4/file.c9
-rw-r--r--fs/ext4/fsync.c11
-rw-r--r--fs/ext4/ioctl.c5
-rw-r--r--fs/ext4/namei.c8
-rw-r--r--fs/ext4/super.c5
-rw-r--r--fs/fat/inode.c13
-rw-r--r--fs/fat/namei_msdos.c4
-rw-r--r--fs/fat/namei_vfat.c16
-rw-r--r--fs/fifo.c9
-rw-r--r--fs/file_table.c81
-rw-r--r--fs/freevxfs/vxfs_lookup.c4
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--fs/fs_struct.c32
-rw-r--r--fs/fuse/dir.c99
-rw-r--r--fs/gfs2/aops.c18
-rw-r--r--fs/gfs2/bmap.c21
-rw-r--r--fs/gfs2/dentry.c6
-rw-r--r--fs/gfs2/dir.c9
-rw-r--r--fs/gfs2/file.c65
-rw-r--r--fs/gfs2/glock.c39
-rw-r--r--fs/gfs2/incore.h54
-rw-r--r--fs/gfs2/inode.c101
-rw-r--r--fs/gfs2/lops.c9
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c41
-rw-r--r--fs/gfs2/quota.c76
-rw-r--r--fs/gfs2/quota.h2
-rw-r--r--fs/gfs2/rgrp.c833
-rw-r--r--fs/gfs2/rgrp.h45
-rw-r--r--fs/gfs2/super.c32
-rw-r--r--fs/gfs2/sys.c23
-rw-r--r--fs/gfs2/trace_gfs2.h59
-rw-r--r--fs/gfs2/trans.h2
-rw-r--r--fs/gfs2/util.h18
-rw-r--r--fs/gfs2/xattr.c30
-rw-r--r--fs/hfs/dir.c4
-rw-r--r--fs/hfs/extent.c2
-rw-r--r--fs/hfs/hfs_fs.h15
-rw-r--r--fs/hfs/inode.c16
-rw-r--r--fs/hfs/mdb.c13
-rw-r--r--fs/hfs/super.c73
-rw-r--r--fs/hfs/sysdep.c4
-rw-r--r--fs/hfsplus/bitmap.c4
-rw-r--r--fs/hfsplus/dir.c6
-rw-r--r--fs/hfsplus/hfsplus_fs.h7
-rw-r--r--fs/hfsplus/inode.c8
-rw-r--r--fs/hfsplus/super.c46
-rw-r--r--fs/hostfs/hostfs_kern.c4
-rw-r--r--fs/hpfs/dir.c2
-rw-r--r--fs/hpfs/hpfs_fn.h2
-rw-r--r--fs/hpfs/namei.c2
-rw-r--r--fs/hppfs/hppfs.c22
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c2
-rw-r--r--fs/internal.h10
-rw-r--r--fs/isofs/export.c1
-rw-r--r--fs/isofs/isofs.h2
-rw-r--r--fs/isofs/namei.c2
-rw-r--r--fs/jbd/recovery.c7
-rw-r--r--fs/jffs2/dir.c16
-rw-r--r--fs/jfs/namei.c18
-rw-r--r--fs/jfs/super.c5
-rw-r--r--fs/libfs.c6
-rw-r--r--fs/locks.c8
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/super.c3
-rw-r--r--fs/minix/namei.c4
-rw-r--r--fs/mount.h13
-rw-r--r--fs/namei.c808
-rw-r--r--fs/namespace.c195
-rw-r--r--fs/ncpfs/dir.c14
-rw-r--r--fs/nfs/dir.c314
-rw-r--r--fs/nfs/direct.c6
-rw-r--r--fs/nfs/getroot.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs4proc.c37
-rw-r--r--fs/nfs/objlayout/objio_osd.c25
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/vfs.c14
-rw-r--r--fs/nilfs2/namei.c4
-rw-r--r--fs/nilfs2/super.c4
-rw-r--r--fs/notify/fanotify/fanotify_user.c8
-rw-r--r--fs/notify/fsnotify.c3
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ocfs2/dcache.c22
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/dlmglue.c33
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/namei.c4
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/omfs/dir.c4
-rw-r--r--fs/open.c217
-rw-r--r--fs/openpromfs/inode.c4
-rw-r--r--fs/pnode.c5
-rw-r--r--fs/proc/base.c51
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/internal.h6
-rw-r--r--fs/proc/namespaces.c4
-rw-r--r--fs/proc/proc_devtree.c5
-rw-r--r--fs/proc/proc_net.c2
-rw-r--r--fs/proc/proc_sysctl.c6
-rw-r--r--fs/proc/root.c10
-rw-r--r--fs/proc_namespace.c7
-rw-r--r--fs/qnx4/namei.c2
-rw-r--r--fs/qnx4/qnx4.h2
-rw-r--r--fs/qnx6/inode.c1
-rw-r--r--fs/qnx6/namei.c2
-rw-r--r--fs/qnx6/qnx6.h2
-rw-r--r--fs/quota/dquot.c26
-rw-r--r--fs/quota/quota.c6
-rw-r--r--fs/ramfs/file-nommu.c1
-rw-r--r--fs/ramfs/inode.c2
-rw-r--r--fs/read_write.c18
-rw-r--r--fs/reiserfs/namei.c12
-rw-r--r--fs/reiserfs/procfs.c2
-rw-r--r--fs/reiserfs/super.c5
-rw-r--r--fs/reiserfs/xattr.c4
-rw-r--r--fs/romfs/super.c2
-rw-r--r--fs/seq_file.c18
-rw-r--r--fs/splice.c35
-rw-r--r--fs/squashfs/namei.c2
-rw-r--r--fs/super.c22
-rw-r--r--fs/sync.c63
-rw-r--r--fs/sysfs/dir.c31
-rw-r--r--fs/sysfs/mount.c4
-rw-r--r--fs/sysfs/sysfs.h1
-rw-r--r--fs/sysv/inode.c18
-rw-r--r--fs/sysv/namei.c4
-rw-r--r--fs/sysv/sysv.h1
-rw-r--r--fs/ubifs/debug.c11
-rw-r--r--fs/ubifs/debug.h5
-rw-r--r--fs/ubifs/dir.c6
-rw-r--r--fs/ubifs/orphan.c4
-rw-r--r--fs/ubifs/replay.c20
-rw-r--r--fs/ubifs/sb.c8
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/inode.c4
-rw-r--r--fs/udf/namei.c5
-rw-r--r--fs/udf/super.c130
-rw-r--r--fs/udf/truncate.c4
-rw-r--r--fs/udf/udfdecl.h1
-rw-r--r--fs/ufs/balloc.c8
-rw-r--r--fs/ufs/ialloc.c4
-rw-r--r--fs/ufs/namei.c4
-rw-r--r--fs/ufs/super.c148
-rw-r--r--fs/ufs/ufs.h5
-rw-r--r--fs/ufs/ufs_fs.h1
-rw-r--r--fs/xfs/xfs_alloc.c19
-rw-r--r--fs/xfs/xfs_buf.c53
-rw-r--r--fs/xfs/xfs_buf.h1
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_ioctl.c7
-rw-r--r--fs/xfs/xfs_iops.c6
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi_bus.h12
-rw-r--r--include/acpi/processor.h7
-rw-r--r--include/asm-generic/dma-contiguous.h2
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/ac97_codec.h362
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/async.h36
-rw-r--r--include/linux/bcma/bcma.h39
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h88
-rw-r--r--include/linux/bcma/bcma_driver_gmac_cmn.h100
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/bootmem.h5
-rw-r--r--include/linux/can.h70
-rw-r--r--include/linux/can/core.h4
-rw-r--r--include/linux/can/dev.h35
-rw-r--r--include/linux/can/error.h4
-rw-r--r--include/linux/can/raw.h3
-rw-r--r--include/linux/capability.h6
-rw-r--r--include/linux/ceph/messenger.h12
-rw-r--r--include/linux/cgroup.h17
-rw-r--r--include/linux/clk-private.h22
-rw-r--r--include/linux/clk-provider.h31
-rw-r--r--include/linux/clk.h24
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpu_rmap.h4
-rw-r--r--include/linux/cpuidle.h20
-rw-r--r--include/linux/dcache.h4
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dmaengine.h10
-rw-r--r--include/linux/dw_apb_timer.h1
-rw-r--r--include/linux/etherdevice.h25
-rw-r--r--include/linux/ethtool.h43
-rw-r--r--include/linux/eventpoll.h2
-rw-r--r--include/linux/file.h3
-rw-r--r--include/linux/fs.h33
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/genetlink.h2
-rw-r--r--include/linux/gfs2_ondisk.h14
-rw-r--r--include/linux/gpio.h4
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/hrtimer.h10
-rw-r--r--include/linux/i2c.h3
-rw-r--r--include/linux/i2c/twl.h1
-rw-r--r--include/linux/ieee80211.h212
-rw-r--r--include/linux/if.h2
-rw-r--r--include/linux/if_ether.h3
-rw-r--r--include/linux/if_link.h2
-rw-r--r--include/linux/if_team.h74
-rw-r--r--include/linux/if_tunnel.h14
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h4
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/io.h9
-rw-r--r--include/linux/iommu.h140
-rw-r--r--include/linux/ipv6.h36
-rw-r--r--include/linux/irq.h4
-rw-r--r--include/linux/jump_label.h17
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/key.h4
-rw-r--r--include/linux/kmsg_dump.h16
-rw-r--r--include/linux/ks8851_mll.h33
-rw-r--r--include/linux/kthread.h8
-rw-r--r--include/linux/kvm.h3
-rw-r--r--include/linux/kvm_host.h31
-rw-r--r--include/linux/libata.h4
-rw-r--r--include/linux/mdio.h28
-rw-r--r--include/linux/memblock.h4
-rw-r--r--include/linux/mfd/abx500/ab8500-codec.h52
-rw-r--r--include/linux/mfd/abx500/ab8500.h2
-rw-r--r--include/linux/mfd/s5m87xx/s5m-core.h5
-rw-r--r--include/linux/mfd/tmio.h4
-rw-r--r--include/linux/mfd/tps65217.h13
-rw-r--r--include/linux/mfd/tps65910.h6
-rw-r--r--include/linux/mii.h9
-rw-r--r--include/linux/miscdevice.h1
-rw-r--r--include/linux/mlx4/cmd.h4
-rw-r--r--include/linux/mlx4/device.h258
-rw-r--r--include/linux/mlx4/driver.h5
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/mmc/cd-gpio.h18
-rw-r--r--include/linux/mmc/host.h48
-rw-r--r--include/linux/mmc/sdhci.h4
-rw-r--r--include/linux/mmc/sh_mmcif.h10
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h1
-rw-r--r--include/linux/mmc/slot-gpio.h24
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/mxsfb.h (renamed from arch/arm/mach-mxs/include/mach/mxsfb.h)6
-rw-r--r--include/linux/namei.h18
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h18
-rw-r--r--include/linux/netfilter.h26
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_sip.h2
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h39
-rw-r--r--include/linux/netfilter/nfnetlink_cthelper.h55
-rw-r--r--include/linux/netfilter/nfnetlink_queue.h9
-rw-r--r--include/linux/netfilter/xt_connlimit.h9
-rw-r--r--include/linux/netfilter/xt_recent.h10
-rw-r--r--include/linux/netfilter_ipv4.h1
-rw-r--r--include/linux/netfilter_ipv4/Kbuild1
-rw-r--r--include/linux/netfilter_ipv4/ipt_addrtype.h27
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--include/linux/nfc.h26
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/nl80211.h234
-rw-r--r--include/linux/nl802154.h14
-rw-r--r--include/linux/of.h13
-rw-r--r--include/linux/of_iommu.h21
-rw-r--r--include/linux/of_mtd.h2
-rw-r--r--include/linux/pci-acpi.h1
-rw-r--r--include/linux/pci.h84
-rw-r--r--include/linux/pci_ids.h12
-rw-r--r--include/linux/pci_regs.h122
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phy.h31
-rw-r--r--include/linux/pinctrl/pinctrl.h5
-rw-r--r--include/linux/pkt_cls.h6
-rw-r--r--include/linux/platform_data/clk-integrator.h1
-rw-r--r--include/linux/platform_data/clk-nomadik.h2
-rw-r--r--include/linux/platform_data/clk-u300.h1
-rw-r--r--include/linux/platform_data/mmp_audio.h22
-rw-r--r--include/linux/platform_data/omap_drm.h (renamed from drivers/staging/omapdrm/omap_priv.h)19
-rw-r--r--include/linux/pm_domain.h18
-rw-r--r--include/linux/pm_qos.h2
-rw-r--r--include/linux/power/smartreflex.h (renamed from arch/arm/mach-omap2/smartreflex.h)74
-rw-r--r--include/linux/prctl.h2
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/rcupdate.h54
-rw-r--r--include/linux/rcutiny.h6
-rw-r--r--include/linux/regmap.h69
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h30
-rw-r--r--include/linux/regulator/fixed.h13
-rw-r--r--include/linux/regulator/lp872x.h90
-rw-r--r--include/linux/regulator/machine.h3
-rw-r--r--include/linux/rpmsg.h6
-rw-r--r--include/linux/rtnetlink.h132
-rw-r--r--include/linux/sched.h22
-rw-r--r--include/linux/seq_file.h1
-rw-r--r--include/linux/sfi_acpi.h4
-rw-r--r--include/linux/sh_clk.h21
-rw-r--r--include/linux/sh_dma.h41
-rw-r--r--include/linux/sh_pfc.h65
-rw-r--r--include/linux/shdma-base.h124
-rw-r--r--include/linux/skbuff.h16
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/snmp.h9
-rw-r--r--include/linux/sock_diag.h2
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/spi/at86rf230.h31
-rw-r--r--include/linux/splice.h8
-rw-r--r--include/linux/ssb/ssb.h1
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/task_work.h18
-rw-r--r--include/linux/tcp.h36
-rw-r--r--include/linux/tick.h8
-rw-r--r--include/linux/time-armada-370-xp.h18
-rw-r--r--include/linux/tipc_config.h4
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/linux/types.h9
-rw-r--r--include/linux/uhid.h104
-rw-r--r--include/linux/usb/tilegx.h34
-rw-r--r--include/linux/usb/usbnet.h5
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/linux/virtio_scsi.h9
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/af_unix.h3
-rw-r--r--include/net/arp.h25
-rw-r--r--include/net/bluetooth/a2mp.h126
-rw-r--r--include/net/bluetooth/bluetooth.h39
-rw-r--r--include/net/bluetooth/hci.h110
-rw-r--r--include/net/bluetooth/hci_core.h37
-rw-r--r--include/net/bluetooth/l2cap.h210
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/caif/caif_hsi.h71
-rw-r--r--include/net/cfg80211.h278
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/dst.h92
-rw-r--r--include/net/dst_ops.h10
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow.h3
-rw-r--r--include/net/genetlink.h2
-rw-r--r--include/net/inet6_connection_sock.h3
-rw-r--r--include/net/inet_common.h6
-rw-r--r--include/net/inet_connection_sock.h3
-rw-r--r--include/net/inet_hashtables.h4
-rw-r--r--include/net/inet_sock.h3
-rw-r--r--include/net/inetpeer.h90
-rw-r--r--include/net/ip.h8
-rw-r--r--include/net/ip6_fib.h35
-rw-r--r--include/net/ip6_route.h41
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_fib.h83
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/ipv6.h39
-rw-r--r--include/net/mac80211.h109
-rw-r--r--include/net/mac802154.h8
-rw-r--r--include/net/ndisc.h50
-rw-r--r--include/net/neighbour.h26
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netevent.h4
-rw-r--r--include/net/netfilter/nf_conntrack.h35
-rw-r--r--include/net/netfilter/nf_conntrack_core.h4
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h2
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h4
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h9
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h29
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h11
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h33
-rw-r--r--include/net/netfilter/nf_nat_helper.h4
-rw-r--r--include/net/netfilter/nfnetlink_queue.h43
-rw-r--r--include/net/netns/conntrack.h55
-rw-r--r--include/net/netns/ipv4.h14
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netprio_cgroup.h4
-rw-r--r--include/net/nfc/hci.h22
-rw-r--r--include/net/nfc/nfc.h16
-rw-r--r--include/net/nfc/shdlc.h3
-rw-r--r--include/net/protocol.h8
-rw-r--r--include/net/regulatory.h5
-rw-r--r--include/net/route.h76
-rw-r--r--include/net/rtnetlink.h10
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/constants.h1
-rw-r--r--include/net/sctp/sctp.h6
-rw-r--r--include/net/sctp/structs.h28
-rw-r--r--include/net/sctp/tsnmap.h3
-rw-r--r--include/net/sctp/user.h11
-rw-r--r--include/net/sock.h8
-rw-r--r--include/net/tcp.h48
-rw-r--r--include/net/timewait_sock.h8
-rw-r--r--include/net/xfrm.h12
-rw-r--r--include/rdma/ib_cm.h12
-rw-r--r--include/rdma/ib_sa.h33
-rw-r--r--include/rdma/rdma_cm.h10
-rw-r--r--include/rdma/rdma_user_cm.h1
-rw-r--r--include/scsi/libfc.h24
-rw-r--r--include/scsi/libsas.h36
-rw-r--r--include/scsi/sas_ata.h5
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h8
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_dh.h6
-rw-r--r--include/scsi/scsi_scan.h11
-rw-r--r--include/scsi/scsi_transport_fc.h12
-rw-r--r--include/sound/designware_i2s.h69
-rw-r--r--include/sound/dmaengine_pcm.h1
-rw-r--r--include/sound/pcm.h14
-rw-r--r--include/sound/pcm_params.h2
-rw-r--r--include/sound/soc-dapm.h12
-rw-r--r--include/sound/soc.h77
-rw-r--r--include/sound/spear_dma.h35
-rw-r--r--include/sound/spear_spdif.h29
-rw-r--r--include/sound/tlv.h29
-rw-r--r--include/sound/vx_core.h2
-rw-r--r--include/target/target_core_backend.h17
-rw-r--r--include/target/target_core_base.h56
-rw-r--r--include/target/target_core_fabric.h15
-rw-r--r--include/trace/events/kvm.h7
-rw-r--r--include/trace/events/rcu.h45
-rw-r--r--include/trace/events/workqueue.h2
-rw-r--r--include/trace/ftrace.h1
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/interface/io/xs_wire.h3
-rw-r--r--include/xen/interface/platform.h8
-rw-r--r--include/xen/interface/xen-mca.h385
-rw-r--r--include/xen/interface/xen.h1
-rw-r--r--init/main.c3
-rw-r--r--ipc/mqueue.c119
-rw-r--r--kernel/async.c76
-rw-r--r--kernel/audit.c30
-rw-r--r--kernel/audit_tree.c10
-rw-r--r--kernel/audit_watch.c25
-rw-r--r--kernel/cgroup.c76
-rw-r--r--kernel/debug/kdb/kdb_main.c91
-rw-r--r--kernel/debug/kdb/kdb_private.h1
-rw-r--r--kernel/events/core.c49
-rw-r--r--kernel/events/uprobes.c461
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c13
-rw-r--r--kernel/hrtimer.c53
-rw-r--r--kernel/irq/irqdomain.c8
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/kthread.c88
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/hibernate.c50
-rw-r--r--kernel/power/main.c45
-rw-r--r--kernel/power/power.h3
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/power/swap.c82
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/power/wakelock.c7
-rw-r--r--kernel/printk.c552
-rw-r--r--kernel/rcupdate.c44
-rw-r--r--kernel/rcutiny.c4
-rw-r--r--kernel/rcutiny_plugin.h56
-rw-r--r--kernel/rcutorture.c72
-rw-r--r--kernel/rcutree.c493
-rw-r--r--kernel/rcutree.h47
-rw-r--r--kernel/rcutree_plugin.h237
-rw-r--r--kernel/rcutree_trace.c148
-rw-r--r--kernel/relay.c5
-rw-r--r--kernel/resource.c13
-rw-r--r--kernel/sched/core.c276
-rw-r--r--kernel/sched/idle_task.c1
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/signal.c15
-rw-r--r--kernel/smp.c20
-rw-r--r--kernel/smpboot.h2
-rw-r--r--kernel/sys.c16
-rw-r--r--kernel/task_work.c94
-rw-r--r--kernel/time/ntp.c8
-rw-r--r--kernel/time/tick-sched.c194
-rw-r--r--kernel/time/timekeeping.c511
-rw-r--r--kernel/time/timer_list.c4
-rw-r--r--kernel/timer.c110
-rw-r--r--kernel/trace/ftrace.c8
-rw-r--r--kernel/trace/ring_buffer.c10
-rw-r--r--kernel/trace/trace.c39
-rw-r--r--kernel/trace/trace.h8
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_output.c2
-rw-r--r--kernel/workqueue.c1144
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/div64.c2
-rw-r--r--lib/dma-debug.c4
-rw-r--r--lib/kobject_uevent.c5
-rw-r--r--lib/list_debug.c6
-rw-r--r--lib/mpi/Makefile11
-rw-r--r--lib/mpi/generic_mpi-asm-defs.h4
-rw-r--r--lib/mpi/mpi-add.c234
-rw-r--r--lib/mpi/mpi-bit.c162
-rw-r--r--lib/mpi/mpi-cmp.c68
-rw-r--r--lib/mpi/mpi-div.c338
-rw-r--r--lib/mpi/mpi-gcd.c59
-rw-r--r--lib/mpi/mpi-inline.c31
-rw-r--r--lib/mpi/mpi-inv.c187
-rw-r--r--lib/mpi/mpi-mpow.c134
-rw-r--r--lib/mpi/mpi-mul.c194
-rw-r--r--lib/mpi/mpi-scan.c136
-rw-r--r--lib/mpi/mpicoder.c75
-rw-r--r--lib/mpi/mpih-div.c309
-rw-r--r--lib/mpi/mpih-mul.c30
-rw-r--r--lib/mpi/mpiutil.c88
-rw-r--r--mm/bootmem.c6
-rw-r--r--mm/bounce.c8
-rw-r--r--mm/compaction.c5
-rw-r--r--mm/frontswap.c150
-rw-r--r--mm/madvise.c18
-rw-r--r--mm/memblock.c51
-rw-r--r--mm/memory-failure.c14
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/nobootmem.c40
-rw-r--r--mm/page_alloc.c7
-rw-r--r--mm/shmem.c198
-rw-r--r--mm/sparse.c20
-rw-r--r--mm/vmscan.c17
-rw-r--r--net/8021q/vlan.c3
-rw-r--r--net/8021q/vlan_dev.c5
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c8
-rw-r--r--net/atm/lec.c8
-rw-r--r--net/atm/pppoatm.c2
-rw-r--r--net/ax25/af_ax25.c1
-rw-r--r--net/ax25/ax25_addr.c6
-rw-r--r--net/ax25/ax25_out.c2
-rw-r--r--net/ax25/ax25_route.c2
-rw-r--r--net/batman-adv/Makefile4
-rw-r--r--net/batman-adv/bat_algo.h6
-rw-r--r--net/batman-adv/bat_debugfs.c388
-rw-r--r--net/batman-adv/bat_iv_ogm.c1050
-rw-r--r--net/batman-adv/bat_sysfs.c735
-rw-r--r--net/batman-adv/bitarray.c65
-rw-r--r--net/batman-adv/bitarray.h24
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c810
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h75
-rw-r--r--net/batman-adv/debugfs.c409
-rw-r--r--net/batman-adv/debugfs.h (renamed from net/batman-adv/bat_debugfs.h)15
-rw-r--r--net/batman-adv/gateway_client.c354
-rw-r--r--net/batman-adv/gateway_client.h32
-rw-r--r--net/batman-adv/gateway_common.c61
-rw-r--r--net/batman-adv/gateway_common.h23
-rw-r--r--net/batman-adv/hard-interface.c342
-rw-r--r--net/batman-adv/hard-interface.h51
-rw-r--r--net/batman-adv/hash.c25
-rw-r--r--net/batman-adv/hash.h78
-rw-r--r--net/batman-adv/icmp_socket.c180
-rw-r--r--net/batman-adv/icmp_socket.h14
-rw-r--r--net/batman-adv/main.c276
-rw-r--r--net/batman-adv/main.h257
-rw-r--r--net/batman-adv/originator.c337
-rw-r--r--net/batman-adv/originator.h57
-rw-r--r--net/batman-adv/packet.h181
-rw-r--r--net/batman-adv/ring_buffer.c13
-rw-r--r--net/batman-adv/ring_buffer.h9
-rw-r--r--net/batman-adv/routing.c689
-rw-r--r--net/batman-adv/routing.h64
-rw-r--r--net/batman-adv/send.c237
-rw-r--r--net/batman-adv/send.h23
-rw-r--r--net/batman-adv/soft-interface.c304
-rw-r--r--net/batman-adv/soft-interface.h17
-rw-r--r--net/batman-adv/sysfs.c787
-rw-r--r--net/batman-adv/sysfs.h (renamed from net/batman-adv/bat_sysfs.h)24
-rw-r--r--net/batman-adv/translation-table.c1659
-rw-r--r--net/batman-adv/translation-table.h75
-rw-r--r--net/batman-adv/types.h183
-rw-r--r--net/batman-adv/unicast.c179
-rw-r--r--net/batman-adv/unicast.h34
-rw-r--r--net/batman-adv/vis.c728
-rw-r--r--net/batman-adv/vis.h26
-rw-r--r--net/bluetooth/Makefile3
-rw-r--r--net/bluetooth/a2mp.c568
-rw-r--r--net/bluetooth/af_bluetooth.c14
-rw-r--r--net/bluetooth/bnep/core.c21
-rw-r--r--net/bluetooth/bnep/netdev.c16
-rw-r--r--net/bluetooth/bnep/sock.c18
-rw-r--r--net/bluetooth/hci_conn.c143
-rw-r--r--net/bluetooth/hci_core.c265
-rw-r--r--net/bluetooth/hci_event.c479
-rw-r--r--net/bluetooth/hci_sock.c59
-rw-r--r--net/bluetooth/hci_sysfs.c99
-rw-r--r--net/bluetooth/hidp/core.c26
-rw-r--r--net/bluetooth/hidp/sock.c16
-rw-r--r--net/bluetooth/l2cap_core.c2248
-rw-r--r--net/bluetooth/l2cap_sock.c130
-rw-r--r--net/bluetooth/lib.c7
-rw-r--r--net/bluetooth/mgmt.c131
-rw-r--r--net/bluetooth/rfcomm/core.c32
-rw-r--r--net/bluetooth/rfcomm/sock.c21
-rw-r--r--net/bluetooth/rfcomm/tty.c9
-rw-r--r--net/bluetooth/sco.c43
-rw-r--r--net/bluetooth/smp.c7
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_multicast.c11
-rw-r--r--net/bridge/br_netfilter.c77
-rw-r--r--net/bridge/netfilter/ebt_ulog.c29
-rw-r--r--net/caif/caif_dev.c10
-rw-r--r--net/caif/cfctrl.c17
-rw-r--r--net/can/af_can.c126
-rw-r--r--net/can/af_can.h3
-rw-r--r--net/can/gw.c90
-rw-r--r--net/can/proc.c3
-rw-r--r--net/can/raw.c50
-rw-r--r--net/ceph/messenger.c12
-rw-r--r--net/ceph/pagelist.c14
-rw-r--r--net/compat.c4
-rw-r--r--net/core/datagram.c1
-rw-r--r--net/core/dev.c59
-rw-r--r--net/core/dst.c25
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/flow_dissector.c5
-rw-r--r--net/core/neighbour.c31
-rw-r--r--net/core/net-sysfs.c74
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/core/netprio_cgroup.c133
-rw-r--r--net/core/rtnetlink.c57
-rw-r--r--net/core/scm.c22
-rw-r--r--net/core/skbuff.c74
-rw-r--r--net/core/sock.c15
-rw-r--r--net/core/sock_diag.c42
-rw-r--r--net/dcb/dcbnl.c1168
-rw-r--r--net/dccp/ackvec.h7
-rw-r--r--net/dccp/ccid.c1
-rw-r--r--net/dccp/ccids/ccid3.c8
-rw-r--r--net/dccp/ccids/lib/loss_interval.c1
-rw-r--r--net/dccp/ccids/lib/packet_history.c3
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c2
-rw-r--r--net/dccp/dccp.h1
-rw-r--r--net/dccp/feat.c10
-rw-r--r--net/dccp/input.c1
-rw-r--r--net/dccp/ipv4.c24
-rw-r--r--net/dccp/ipv6.c61
-rw-r--r--net/dccp/options.c1
-rw-r--r--net/dccp/output.c1
-rw-r--r--net/decnet/dn_fib.c8
-rw-r--r--net/decnet/dn_neigh.c8
-rw-r--r--net/decnet/dn_nsp_out.c2
-rw-r--r--net/decnet/dn_route.c144
-rw-r--r--net/decnet/dn_table.c76
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c30
-rw-r--r--net/ethernet/Makefile2
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ieee802154/6lowpan.c251
-rw-r--r--net/ieee802154/dgram.c12
-rw-r--r--net/ieee802154/netlink.c4
-rw-r--r--net/ieee802154/nl-mac.c2
-rw-r--r--net/ieee802154/nl-phy.c2
-rw-r--r--net/ipv4/Kconfig11
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/af_inet.c75
-rw-r--r--net/ipv4/ah4.c17
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/cipso_ipv4.c6
-rw-r--r--net/ipv4/devinet.c5
-rw-r--r--net/ipv4/esp4.c17
-rw-r--r--net/ipv4/fib_frontend.c130
-rw-r--r--net/ipv4/fib_rules.c39
-rw-r--r--net/ipv4/fib_semantics.c46
-rw-r--r--net/ipv4/fib_trie.c13
-rw-r--r--net/ipv4/icmp.c191
-rw-r--r--net/ipv4/inet_connection_sock.c53
-rw-r--r--net/ipv4/inet_diag.c146
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inetpeer.c99
-rw-r--r--net/ipv4/ip_fragment.c10
-rw-r--r--net/ipv4/ip_gre.c25
-rw-r--r--net/ipv4/ip_input.c32
-rw-r--r--net/ipv4/ip_options.c29
-rw-r--r--net/ipv4/ip_output.c93
-rw-r--r--net/ipv4/ip_sockglue.c12
-rw-r--r--net/ipv4/ip_vti.c956
-rw-r--r--net/ipv4/ipcomp.c17
-rw-r--r--net/ipv4/ipip.c28
-rw-r--r--net/ipv4/ipmr.c41
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c23
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c172
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c81
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c13
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c4
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/protocol.c8
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c2146
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c28
-rw-r--r--net/ipv4/tcp.c72
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_fastopen.c11
-rw-r--r--net/ipv4/tcp_input.c375
-rw-r--r--net/ipv4/tcp_ipv4.c186
-rw-r--r--net/ipv4/tcp_metrics.c745
-rw-r--r--net/ipv4/tcp_minisocks.c61
-rw-r--r--net/ipv4/tcp_output.c343
-rw-r--r--net/ipv4/tcp_timer.c70
-rw-r--r--net/ipv4/udp.c9
-rw-r--r--net/ipv4/udp_diag.c10
-rw-r--r--net/ipv4/xfrm4_input.c4
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c68
-rw-r--r--net/ipv4/xfrm4_policy.c33
-rw-r--r--net/ipv6/addrconf.c21
-rw-r--r--net/ipv6/ah6.c11
-rw-r--r--net/ipv6/esp6.c11
-rw-r--r--net/ipv6/exthdrs.c4
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/inet6_connection_sock.c103
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_input.c9
-rw-r--r--net/ipv6/ip6_output.c40
-rw-r--r--net/ipv6/ip6_tunnel.c96
-rw-r--r--net/ipv6/ip6mr.c5
-rw-r--r--net/ipv6/ipcomp6.c11
-rw-r--r--net/ipv6/mcast.c3
-rw-r--r--net/ipv6/ndisc.c129
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c131
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c51
-rw-r--r--net/ipv6/protocol.c8
-rw-r--r--net/ipv6/raw.c11
-rw-r--r--net/ipv6/route.c538
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/tcp_ipv6.c204
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/xfrm6_policy.c26
-rw-r--r--net/ipx/Makefile2
-rw-r--r--net/ipx/pe2.c (renamed from net/ethernet/pe2.c)2
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/irda/irlan/irlan_provider.c2
-rw-r--r--net/irda/irqueue.c6
-rw-r--r--net/l2tp/l2tp_core.c11
-rw-r--r--net/l2tp/l2tp_eth.c15
-rw-r--r--net/l2tp/l2tp_netlink.c6
-rw-r--r--net/l2tp/l2tp_ppp.c8
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_station.c16
-rw-r--r--net/mac80211/Kconfig56
-rw-r--r--net/mac80211/Makefile7
-rw-r--r--net/mac80211/agg-rx.c38
-rw-r--r--net/mac80211/agg-tx.c118
-rw-r--r--net/mac80211/cfg.c734
-rw-r--r--net/mac80211/chan.c4
-rw-r--r--net/mac80211/debug.h170
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_key.c16
-rw-r--r--net/mac80211/debugfs_netdev.c49
-rw-r--r--net/mac80211/driver-ops.h39
-rw-r--r--net/mac80211/driver-trace.c9
-rw-r--r--net/mac80211/ht.c10
-rw-r--r--net/mac80211/ibss.c127
-rw-r--r--net/mac80211/ieee80211_i.h139
-rw-r--r--net/mac80211/iface.c325
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/main.c48
-rw-r--r--net/mac80211/mesh.c19
-rw-r--r--net/mac80211/mesh.h4
-rw-r--r--net/mac80211/mesh_hwmp.c173
-rw-r--r--net/mac80211/mesh_pathtbl.c34
-rw-r--r--net/mac80211/mesh_plink.c70
-rw-r--r--net/mac80211/mesh_sync.c47
-rw-r--r--net/mac80211/mlme.c399
-rw-r--r--net/mac80211/offchannel.c291
-rw-r--r--net/mac80211/pm.c11
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c10
-rw-r--r--net/mac80211/rx.c135
-rw-r--r--net/mac80211/scan.c123
-rw-r--r--net/mac80211/sta_info.c45
-rw-r--r--net/mac80211/status.c48
-rw-r--r--net/mac80211/tkip.c46
-rw-r--r--net/mac80211/trace.c75
-rw-r--r--net/mac80211/trace.h (renamed from net/mac80211/driver-trace.h)80
-rw-r--r--net/mac80211/tx.c95
-rw-r--r--net/mac80211/util.c178
-rw-r--r--net/mac80211/wme.c11
-rw-r--r--net/mac80211/wme.h2
-rw-r--r--net/mac80211/work.c370
-rw-r--r--net/mac802154/Makefile2
-rw-r--r--net/mac802154/ieee802154_dev.c4
-rw-r--r--net/mac802154/mac802154.h9
-rw-r--r--net/mac802154/mac_cmd.c33
-rw-r--r--net/mac802154/mib.c108
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/mac802154/tx.c2
-rw-r--r--net/mac802154/wpan.c559
-rw-r--r--net/netfilter/Kconfig21
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipset/ip_set_core.c12
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c32
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c22
-rw-r--r--net/netfilter/nf_conntrack_extend.c16
-rw-r--r--net/netfilter/nf_conntrack_ftp.c11
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/nf_conntrack_helper.c38
-rw-r--r--net/netfilter/nf_conntrack_irc.c8
-rw-r--r--net/netfilter/nf_conntrack_netlink.c407
-rw-r--r--net/netfilter/nf_conntrack_pptp.c17
-rw-r--r--net/netfilter/nf_conntrack_proto.c300
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c143
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c81
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c79
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c175
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c163
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c111
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c127
-rw-r--r--net/netfilter/nf_conntrack_sane.c12
-rw-r--r--net/netfilter/nf_conntrack_sip.c32
-rw-r--r--net/netfilter/nf_conntrack_tftp.c8
-rw-r--r--net/netfilter/nfnetlink.c44
-rw-r--r--net/netfilter/nfnetlink_cthelper.c672
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c13
-rw-r--r--net/netfilter/nfnetlink_log.c29
-rw-r--r--net/netfilter/nfnetlink_queue_core.c (renamed from net/netfilter/nfnetlink_queue.c)95
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c98
-rw-r--r--net/netfilter/xt_CT.c44
-rw-r--r--net/netfilter/xt_NFQUEUE.c28
-rw-r--r--net/netfilter/xt_TPROXY.c4
-rw-r--r--net/netfilter/xt_connlimit.c35
-rw-r--r--net/netfilter/xt_recent.c62
-rw-r--r--net/netfilter/xt_set.c4
-rw-r--r--net/netlink/af_netlink.c35
-rw-r--r--net/netlink/genetlink.c14
-rw-r--r--net/nfc/core.c157
-rw-r--r--net/nfc/hci/command.c26
-rw-r--r--net/nfc/hci/core.c137
-rw-r--r--net/nfc/hci/hci.h12
-rw-r--r--net/nfc/hci/hcp.c2
-rw-r--r--net/nfc/hci/shdlc.c44
-rw-r--r--net/nfc/llcp/commands.c54
-rw-r--r--net/nfc/llcp/llcp.c627
-rw-r--r--net/nfc/llcp/llcp.h31
-rw-r--r--net/nfc/llcp/sock.c76
-rw-r--r--net/nfc/nci/core.c23
-rw-r--r--net/nfc/nci/ntf.c15
-rw-r--r--net/nfc/netlink.c104
-rw-r--r--net/nfc/nfc.h12
-rw-r--r--net/nfc/rawsock.c5
-rw-r--r--net/openvswitch/actions.c5
-rw-r--r--net/openvswitch/datapath.c13
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/dp_notify.c2
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow.h2
-rw-r--r--net/openvswitch/vport-internal_dev.c10
-rw-r--r--net/openvswitch/vport-internal_dev.h2
-rw-r--r--net/openvswitch/vport-netdev.c2
-rw-r--r--net/openvswitch/vport-netdev.h2
-rw-r--r--net/openvswitch/vport.c2
-rw-r--r--net/openvswitch/vport.h2
-rw-r--r--net/packet/af_packet.c29
-rw-r--r--net/rds/page.c9
-rw-r--r--net/rds/recv.c3
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rxrpc/ar-error.c4
-rw-r--r--net/rxrpc/ar-output.c2
-rw-r--r--net/rxrpc/ar-peer.c2
-rw-r--r--net/sched/Kconfig20
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c59
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_canid.c240
-rw-r--r--net/sched/em_ipset.c135
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c24
-rw-r--r--net/sched/sch_netem.c51
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sched/sch_teql.c47
-rw-r--r--net/sctp/associola.c42
-rw-r--r--net/sctp/input.c27
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/output.c86
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_make_chunk.c18
-rw-r--r--net/sctp/sm_sideeffect.c35
-rw-r--r--net/sctp/socket.c119
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c22
-rw-r--r--net/sctp/tsnmap.c6
-rw-r--r--net/sctp/ulpevent.c3
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/backchannel_rqst.c9
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/svcauth_unix.c22
-rw-r--r--net/sunrpc/svcsock.c12
-rw-r--r--net/sunrpc/xdr.c12
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/tipc/Kconfig25
-rw-r--r--net/tipc/bcast.c75
-rw-r--r--net/tipc/bearer.c69
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c41
-rw-r--r--net/tipc/core.c18
-rw-r--r--net/tipc/core.h65
-rw-r--r--net/tipc/discover.c10
-rw-r--r--net/tipc/handler.c4
-rw-r--r--net/tipc/link.c326
-rw-r--r--net/tipc/link.h63
-rw-r--r--net/tipc/log.c302
-rw-r--r--net/tipc/log.h66
-rw-r--r--net/tipc/msg.c242
-rw-r--r--net/tipc/name_distr.c25
-rw-r--r--net/tipc/name_table.c142
-rw-r--r--net/tipc/net.c8
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/node.c22
-rw-r--r--net/tipc/node_subscr.c3
-rw-r--r--net/tipc/port.c77
-rw-r--r--net/tipc/port.h1
-rw-r--r--net/tipc/ref.c10
-rw-r--r--net/tipc/socket.c17
-rw-r--r--net/tipc/subscr.c14
-rw-r--r--net/unix/af_unix.c110
-rw-r--r--net/unix/diag.c115
-rw-r--r--net/wireless/Kconfig35
-rw-r--r--net/wireless/Makefile2
-rw-r--r--net/wireless/ap.c46
-rw-r--r--net/wireless/chan.c107
-rw-r--r--net/wireless/core.c134
-rw-r--r--net/wireless/core.h106
-rw-r--r--net/wireless/ibss.c11
-rw-r--r--net/wireless/mesh.c121
-rw-r--r--net/wireless/mlme.c64
-rw-r--r--net/wireless/nl80211.c1009
-rw-r--r--net/wireless/nl80211.h21
-rw-r--r--net/wireless/reg.c137
-rw-r--r--net/wireless/reg.h8
-rw-r--r--net/wireless/scan.c24
-rw-r--r--net/wireless/sme.c10
-rw-r--r--net/wireless/util.c171
-rw-r--r--net/wireless/wext-compat.c23
-rw-r--r--net/wireless/wext-sme.c10
-rw-r--r--net/x25/x25_route.c2
-rw-r--r--net/xfrm/xfrm_policy.c37
-rw-r--r--net/xfrm/xfrm_user.c401
-rw-r--r--samples/seccomp/.gitignore3
-rw-r--r--samples/uhid/Makefile10
-rw-r--r--samples/uhid/uhid-example.c381
-rw-r--r--scripts/mksysmap2
-rw-r--r--scripts/mod/modpost.c11
-rw-r--r--security/integrity/ima/Kconfig3
-rw-r--r--security/integrity/ima/Makefile3
-rw-r--r--security/integrity/ima/ima.h9
-rw-r--r--security/integrity/ima/ima_api.c4
-rw-r--r--security/integrity/ima/ima_audit.c3
-rw-r--r--security/integrity/ima/ima_fs.c11
-rw-r--r--security/integrity/ima/ima_init.c5
-rw-r--r--security/integrity/ima/ima_main.c50
-rw-r--r--security/integrity/ima/ima_policy.c2
-rw-r--r--security/keys/compat.c4
-rw-r--r--security/keys/internal.h4
-rw-r--r--security/keys/keyctl.c28
-rw-r--r--security/keys/keyring.c2
-rw-r--r--security/keys/process_keys.c5
-rw-r--r--security/security.c1
-rw-r--r--security/selinux/hooks.c15
-rw-r--r--security/selinux/include/classmap.h4
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/netlink.c17
-rw-r--r--security/selinux/selinuxfs.c6
-rw-r--r--security/smack/smack.h14
-rw-r--r--security/smack/smack_access.c9
-rw-r--r--security/smack/smack_lsm.c25
-rw-r--r--security/smack/smackfs.c53
-rw-r--r--sound/aoa/codecs/onyx.c75
-rw-r--r--sound/aoa/codecs/tas.c80
-rw-r--r--sound/arm/pxa2xx-ac97.c9
-rw-r--r--sound/atmel/abdac.c18
-rw-r--r--sound/atmel/ac97c.c18
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/core/pcm_misc.c18
-rw-r--r--sound/drivers/aloop.c22
-rw-r--r--sound/drivers/dummy.c21
-rw-r--r--sound/drivers/mpu401/mpu401.c3
-rw-r--r--sound/drivers/mtpav.c3
-rw-r--r--sound/drivers/mts64.c3
-rw-r--r--sound/drivers/pcsp/pcsp.c11
-rw-r--r--sound/drivers/portman2x4.c3
-rw-r--r--sound/drivers/serial-u16550.c3
-rw-r--r--sound/drivers/virmidi.c3
-rw-r--r--sound/drivers/vx/vx_core.c2
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c86
-rw-r--r--sound/isa/wss/wss_lib.c5
-rw-r--r--sound/oss/swarm_cs4297a.c17
-rw-r--r--sound/oss/vwsnd.c2
-rw-r--r--sound/pci/ali5451/ali5451.c24
-rw-r--r--sound/pci/als300.c24
-rw-r--r--sound/pci/als4000.c25
-rw-r--r--sound/pci/atiixp.c24
-rw-r--r--sound/pci/atiixp_modem.c25
-rw-r--r--sound/pci/au88x0/au88x0_mixer.c11
-rw-r--r--sound/pci/azt3328.c25
-rw-r--r--sound/pci/ca0106/ca0106_main.c24
-rw-r--r--sound/pci/cmipci.c24
-rw-r--r--sound/pci/cs4281.c24
-rw-r--r--sound/pci/cs46xx/cs46xx.c7
-rw-r--r--sound/pci/cs46xx/cs46xx.h (renamed from include/sound/cs46xx.h)11
-rw-r--r--sound/pci/cs46xx/cs46xx_dsp_scb_types.h (renamed from include/sound/cs46xx_dsp_scb_types.h)0
-rw-r--r--sound/pci/cs46xx/cs46xx_dsp_spos.h (renamed from include/sound/cs46xx_dsp_spos.h)0
-rw-r--r--sound/pci/cs46xx/cs46xx_dsp_task_types.h (renamed from include/sound/cs46xx_dsp_task_types.h)0
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c16
-rw-r--r--sound/pci/cs46xx/dsp_spos.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c2
-rw-r--r--sound/pci/cs5535audio/cs5535audio.c5
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h5
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pm.c13
-rw-r--r--sound/pci/ctxfi/ctatc.c4
-rw-r--r--sound/pci/ctxfi/ctatc.h2
-rw-r--r--sound/pci/ctxfi/cthardware.h2
-rw-r--r--sound/pci/ctxfi/cthw20k1.c4
-rw-r--r--sound/pci/ctxfi/cthw20k2.c4
-rw-r--r--sound/pci/ctxfi/xfi.c22
-rw-r--r--sound/pci/echoaudio/echoaudio.c22
-rw-r--r--sound/pci/emu10k1/emu10k1.c26
-rw-r--r--sound/pci/ens1370.c25
-rw-r--r--sound/pci/es1938.c49
-rw-r--r--sound/pci/es1968.c24
-rw-r--r--sound/pci/fm801.c26
-rw-r--r--sound/pci/hda/Kconfig7
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/hda_beep.c82
-rw-r--r--sound/pci/hda/hda_beep.h5
-rw-r--r--sound/pci/hda/hda_codec.c68
-rw-r--r--sound/pci/hda/hda_codec.h5
-rw-r--r--sound/pci/hda/hda_intel.c55
-rw-r--r--sound/pci/hda/hda_jack.c102
-rw-r--r--sound/pci/hda/hda_jack.h1
-rw-r--r--sound/pci/hda/hda_local.h4
-rw-r--r--sound/pci/hda/hda_proc.c17
-rw-r--r--sound/pci/hda/patch_analog.c2
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c310
-rw-r--r--sound/pci/hda/patch_realtek.c341
-rw-r--r--sound/pci/hda/patch_sigmatel.c2
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/ice1712/ice1724.c26
-rw-r--r--sound/pci/intel8x0.c24
-rw-r--r--sound/pci/intel8x0m.c24
-rw-r--r--sound/pci/maestro3.c92
-rw-r--r--sound/pci/nm256/nm256.c24
-rw-r--r--sound/pci/oxygen/oxygen.c5
-rw-r--r--sound/pci/oxygen/oxygen.h3
-rw-r--r--sound/pci/oxygen/oxygen_lib.c17
-rw-r--r--sound/pci/oxygen/virtuoso.c5
-rw-r--r--sound/pci/pcxhr/pcxhr.c63
-rw-r--r--sound/pci/pcxhr/pcxhr.h1
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c27
-rw-r--r--sound/pci/pcxhr/pcxhr_core.h4
-rw-r--r--sound/pci/pcxhr/pcxhr_mix22.c11
-rw-r--r--sound/pci/pcxhr/pcxhr_mix22.h1
-rw-r--r--sound/pci/riptide/riptide.c26
-rw-r--r--sound/pci/sis7019.c25
-rw-r--r--sound/pci/trident/trident.c7
-rw-r--r--sound/pci/trident/trident.h (renamed from include/sound/trident.h)11
-rw-r--r--sound/pci/trident/trident_main.c16
-rw-r--r--sound/pci/trident/trident_memory.c2
-rw-r--r--sound/pci/via82xx.c24
-rw-r--r--sound/pci/via82xx_modem.c24
-rw-r--r--sound/pci/vx222/vx222.c26
-rw-r--r--sound/pci/ymfpci/ymfpci.c7
-rw-r--r--sound/pci/ymfpci/ymfpci.h (renamed from include/sound/ymfpci.h)11
-rw-r--r--sound/pci/ymfpci/ymfpci_main.c16
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.c2
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf.h2
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_core.c2
-rw-r--r--sound/pcmcia/vx/vxpocket.c2
-rw-r--r--sound/ppc/powermac.c21
-rw-r--r--sound/sh/aica.c4
-rw-r--r--sound/sh/sh_dac_audio.c1
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile1
-rw-r--r--sound/soc/blackfin/Kconfig21
-rw-r--r--sound/soc/blackfin/Makefile4
-rw-r--r--sound/soc/blackfin/bf6xx-i2s.c234
-rw-r--r--sound/soc/blackfin/bf6xx-sport.c422
-rw-r--r--sound/soc/blackfin/bf6xx-sport.h82
-rw-r--r--sound/soc/codecs/Kconfig31
-rw-r--r--sound/soc/codecs/Makefile19
-rw-r--r--sound/soc/codecs/ab8500-codec.c2522
-rw-r--r--sound/soc/codecs/ab8500-codec.h590
-rw-r--r--sound/soc/codecs/ac97.c6
-rw-r--r--sound/soc/codecs/arizona.c937
-rw-r--r--sound/soc/codecs/arizona.h159
-rw-r--r--sound/soc/codecs/cs42l52.c19
-rw-r--r--sound/soc/codecs/cs42l73.c20
-rw-r--r--sound/soc/codecs/da732x.c1627
-rw-r--r--sound/soc/codecs/da732x.h133
-rw-r--r--sound/soc/codecs/da732x_reg.h654
-rw-r--r--sound/soc/codecs/isabelle.c1176
-rw-r--r--sound/soc/codecs/isabelle.h143
-rw-r--r--sound/soc/codecs/lm49453.c3
-rw-r--r--sound/soc/codecs/max98095.c5
-rw-r--r--sound/soc/codecs/ml26124.c5
-rw-r--r--sound/soc/codecs/spdif_receiver.c67
-rw-r--r--sound/soc/codecs/sta529.c442
-rw-r--r--sound/soc/codecs/tlv320aic3x.c44
-rw-r--r--sound/soc/codecs/tlv320aic3x.h28
-rw-r--r--sound/soc/codecs/twl6040.c2
-rw-r--r--sound/soc/codecs/wm1250-ev1.c7
-rw-r--r--sound/soc/codecs/wm2000.c32
-rw-r--r--sound/soc/codecs/wm2200.c1
-rw-r--r--sound/soc/codecs/wm5100-tables.c2
-rw-r--r--sound/soc/codecs/wm5100.c11
-rw-r--r--sound/soc/codecs/wm5102.c903
-rw-r--r--sound/soc/codecs/wm5102.h21
-rw-r--r--sound/soc/codecs/wm5110.c950
-rw-r--r--sound/soc/codecs/wm5110.h21
-rw-r--r--sound/soc/codecs/wm8350.c22
-rw-r--r--sound/soc/codecs/wm8400.c2
-rw-r--r--sound/soc/codecs/wm8580.c2
-rw-r--r--sound/soc/codecs/wm8731.c1
-rw-r--r--sound/soc/codecs/wm8741.c2
-rw-r--r--sound/soc/codecs/wm8753.c2
-rw-r--r--sound/soc/codecs/wm8776.c2
-rw-r--r--sound/soc/codecs/wm8804.c2
-rw-r--r--sound/soc/codecs/wm8903.c316
-rw-r--r--sound/soc/codecs/wm8904.c272
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8961.c2
-rw-r--r--sound/soc/codecs/wm8962.c8
-rw-r--r--sound/soc/codecs/wm8993.c2
-rw-r--r--sound/soc/codecs/wm8994.c46
-rw-r--r--sound/soc/codecs/wm8996.c587
-rw-r--r--sound/soc/codecs/wm9081.c2
-rw-r--r--sound/soc/codecs/wm9090.c2
-rw-r--r--sound/soc/codecs/wm9712.c2
-rw-r--r--sound/soc/codecs/wm9713.c2
-rw-r--r--sound/soc/codecs/wm_hubs.c2
-rw-r--r--sound/soc/dwc/Kconfig9
-rw-r--r--sound/soc/dwc/Makefile3
-rw-r--r--sound/soc/dwc/designware_i2s.c455
-rw-r--r--sound/soc/ep93xx/ep93xx-pcm.c2
-rw-r--r--sound/soc/fsl/imx-audmux.c2
-rw-r--r--sound/soc/fsl/imx-audmux.h1
-rw-r--r--sound/soc/fsl/imx-mc13783.c49
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c2
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c1
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c3
-rw-r--r--sound/soc/mxs/mxs-pcm.c2
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c2
-rw-r--r--sound/soc/omap/omap-mcpdm.c1
-rw-r--r--sound/soc/pxa/Kconfig42
-rw-r--r--sound/soc/pxa/Makefile8
-rw-r--r--sound/soc/pxa/brownstone.c174
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c33
-rw-r--r--sound/soc/pxa/mmp-pcm.c297
-rw-r--r--sound/soc/pxa/mmp-sspa.c480
-rw-r--r--sound/soc/pxa/mmp-sspa.h92
-rw-r--r--sound/soc/pxa/ttc-dkb.c173
-rw-r--r--sound/soc/samsung/dma.c18
-rw-r--r--sound/soc/samsung/littlemill.c7
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c10
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c10
-rw-r--r--sound/soc/samsung/smdk_wm8994.c36
-rw-r--r--sound/soc/sh/fsi.c52
-rw-r--r--sound/soc/sh/siu_pcm.c12
-rw-r--r--sound/soc/soc-core.c328
-rw-r--r--sound/soc/soc-dapm.c194
-rw-r--r--sound/soc/soc-dmaengine-pcm.c33
-rw-r--r--sound/soc/soc-io.c15
-rw-r--r--sound/soc/soc-pcm.c12
-rw-r--r--sound/soc/spear/spdif_in.c297
-rw-r--r--sound/soc/spear/spdif_in_regs.h60
-rw-r--r--sound/soc/spear/spdif_out.c389
-rw-r--r--sound/soc/spear/spdif_out_regs.h79
-rw-r--r--sound/soc/spear/spear_pcm.c214
-rw-r--r--sound/soc/tegra/Kconfig13
-rw-r--r--sound/soc/tegra/tegra20_i2s.c98
-rw-r--r--sound/soc/tegra/tegra20_i2s.h1
-rw-r--r--sound/soc/tegra/tegra20_spdif.c40
-rw-r--r--sound/soc/tegra/tegra20_spdif.h1
-rw-r--r--sound/soc/tegra/tegra30_ahub.c8
-rw-r--r--sound/soc/tegra/tegra30_i2s.c89
-rw-r--r--sound/soc/tegra/tegra30_i2s.h1
-rw-r--r--sound/soc/tegra/tegra_alc5632.c32
-rw-r--r--sound/soc/tegra/tegra_asoc_utils.c12
-rw-r--r--sound/soc/tegra/tegra_pcm.c115
-rw-r--r--sound/soc/tegra/tegra_pcm.h2
-rw-r--r--sound/soc/tegra/tegra_wm8753.c8
-rw-r--r--sound/soc/tegra/tegra_wm8903.c259
-rw-r--r--sound/soc/tegra/trimslice.c30
-rw-r--r--sound/soc/ux500/Kconfig18
-rw-r--r--sound/soc/ux500/Makefile6
-rw-r--r--sound/soc/ux500/mop500.c113
-rw-r--r--sound/soc/ux500/mop500_ab8500.c431
-rw-r--r--sound/soc/ux500/mop500_ab8500.h22
-rw-r--r--sound/soc/ux500/ux500_msp_dai.c2
-rw-r--r--sound/soc/ux500/ux500_msp_i2s.c2
-rw-r--r--sound/soc/ux500/ux500_pcm.c318
-rw-r--r--sound/soc/ux500/ux500_pcm.h35
-rw-r--r--sound/usb/caiaq/device.c2
-rw-r--r--sound/usb/endpoint.c73
-rw-r--r--sound/usb/mixer_quirks.c159
-rw-r--r--sound/usb/pcm.c61
-rw-r--r--tools/lib/traceevent/Makefile14
-rw-r--r--tools/lib/traceevent/event-parse.c399
-rw-r--r--tools/lib/traceevent/event-parse.h7
-rw-r--r--tools/lib/traceevent/parse-filter.c86
-rw-r--r--tools/perf/Documentation/perf-bench.txt78
-rw-r--r--tools/perf/Documentation/perf-report.txt2
-rw-r--r--tools/perf/Documentation/perf-top.txt2
-rw-r--r--tools/perf/Makefile8
-rw-r--r--tools/perf/bench/mem-memcpy.c84
-rw-r--r--tools/perf/bench/mem-memset.c88
-rw-r--r--tools/perf/builtin-bench.c4
-rw-r--r--tools/perf/builtin-evlist.c2
-rw-r--r--tools/perf/builtin-kmem.c37
-rw-r--r--tools/perf/builtin-lock.c4
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-report.c16
-rw-r--r--tools/perf/builtin-sched.c38
-rw-r--r--tools/perf/builtin-script.c106
-rw-r--r--tools/perf/builtin-stat.c12
-rw-r--r--tools/perf/builtin-test.c2
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/config/feature-tests.mak13
-rw-r--r--tools/perf/ui/browsers/annotate.c4
-rw-r--r--tools/perf/ui/browsers/hists.c210
-rw-r--r--tools/perf/ui/gtk/browser.c69
-rw-r--r--tools/perf/ui/gtk/gtk.h31
-rw-r--r--tools/perf/ui/gtk/setup.c5
-rw-r--r--tools/perf/ui/gtk/util.c129
-rw-r--r--tools/perf/ui/tui/setup.c6
-rw-r--r--tools/perf/ui/tui/util.c243
-rw-r--r--tools/perf/ui/util.c277
-rw-r--r--tools/perf/ui/util.h9
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/debug.h23
-rw-r--r--tools/perf/util/evlist.c4
-rw-r--r--tools/perf/util/evlist.h3
-rw-r--r--tools/perf/util/evsel.c227
-rw-r--r--tools/perf/util/evsel.h15
-rw-r--r--tools/perf/util/header.c33
-rw-r--r--tools/perf/util/hist.h1
-rw-r--r--tools/perf/util/include/linux/kernel.h10
-rw-r--r--tools/perf/util/map.c29
-rw-r--r--tools/perf/util/map.h2
-rw-r--r--tools/perf/util/parse-events-test.c179
-rw-r--r--tools/perf/util/parse-events.c470
-rw-r--r--tools/perf/util/parse-events.h17
-rw-r--r--tools/perf/util/parse-events.l134
-rw-r--r--tools/perf/util/parse-events.y86
-rw-r--r--tools/perf/util/pmu.c169
-rw-r--r--tools/perf/util/pmu.h11
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c32
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c25
-rw-r--r--tools/perf/util/session.c67
-rw-r--r--tools/perf/util/session.h14
-rw-r--r--tools/perf/util/sort.c49
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/string.c22
-rw-r--r--tools/perf/util/symbol.c94
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/top.c2
-rw-r--r--tools/perf/util/trace-event-parse.c61
-rw-r--r--tools/perf/util/trace-event-read.c97
-rw-r--r--tools/perf/util/trace-event-scripting.c7
-rw-r--r--tools/perf/util/trace-event.h38
-rw-r--r--tools/perf/util/util.h2
-rw-r--r--virt/kvm/assigned-dev.c15
-rw-r--r--virt/kvm/eventfd.c23
-rw-r--r--virt/kvm/ioapic.c19
-rw-r--r--virt/kvm/ioapic.h4
-rw-r--r--virt/kvm/irq_comm.c31
-rw-r--r--virt/kvm/kvm_main.c39
4696 files changed, 183520 insertions, 105976 deletions
diff --git a/Documentation/ABI/stable/vdso b/Documentation/ABI/stable/vdso
index 8a1cbb594497..7cdfc28cc2c6 100644
--- a/Documentation/ABI/stable/vdso
+++ b/Documentation/ABI/stable/vdso
@@ -24,4 +24,4 @@ though.
(As of this writing, this ABI documentation as been confirmed for x86_64.
The maintainers of the other vDSO-using architectures should confirm
- that it is correct for their architecture.) \ No newline at end of file
+ that it is correct for their architecture.)
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
index 679ce3543122..beef30c046b0 100644
--- a/Documentation/ABI/testing/sysfs-block-rssd
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -1,26 +1,5 @@
-What: /sys/block/rssd*/registers
-Date: March 2012
-KernelVersion: 3.3
-Contact: Asai Thambi S P <asamymuthupa@micron.com>
-Description: This is a read-only file. Dumps below driver information and
- hardware registers.
- - S ACTive
- - Command Issue
- - Completed
- - PORT IRQ STAT
- - HOST IRQ STAT
- - Allocated
- - Commands in Q
-
What: /sys/block/rssd*/status
Date: April 2012
KernelVersion: 3.4
Contact: Asai Thambi S P <asamymuthupa@micron.com>
Description: This is a read-only file. Indicates the status of the device.
-
-What: /sys/block/rssd*/flags
-Date: May 2012
-KernelVersion: 3.5
-Contact: Asai Thambi S P <asamymuthupa@micron.com>
-Description: This is a read-only file. Dumps the flags in port and driver
- data structure
diff --git a/Documentation/ABI/testing/sysfs-block-zram b/Documentation/ABI/testing/sysfs-block-zram
index c8b3b48ec62c..ec93fe33baa6 100644
--- a/Documentation/ABI/testing/sysfs-block-zram
+++ b/Documentation/ABI/testing/sysfs-block-zram
@@ -96,4 +96,4 @@ Description:
overhead, allocated for this disk. So, allocator space
efficiency can be calculated using compr_data_size and this
statistic.
- Unit: bytes \ No newline at end of file
+ Unit: bytes
diff --git a/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
index cb830df8777c..70d00dfa443d 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
+++ b/Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
@@ -40,4 +40,4 @@ Description: Controls the decimal places on the device.
the value of 10 ** n. Assume this field has
the value k and has 1 or more decimal places set,
to set the mth place (where m is not already set),
- change this fields value to k + 10 ** m. \ No newline at end of file
+ change this fields value to k + 10 ** m.
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
index 4a9c545bda4b..33e648808117 100644
--- a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
@@ -53,4 +53,4 @@ Description:
Documentation/ABI/stable/sysfs-class-backlight.
It can be enabled by writing the value stored in
/sys/class/backlight/<backlight>/max_brightness to
- /sys/class/backlight/<backlight>/brightness. \ No newline at end of file
+ /sys/class/backlight/<backlight>/brightness.
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index db1ad7e34fc3..938ef71e2035 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -142,13 +142,14 @@ KernelVersion: 3.4
Contact: linux-mtd@lists.infradead.org
Description:
This allows the user to examine and adjust the criteria by which
- mtd returns -EUCLEAN from mtd_read(). If the maximum number of
- bit errors that were corrected on any single region comprising
- an ecc step (as reported by the driver) equals or exceeds this
- value, -EUCLEAN is returned. Otherwise, absent an error, 0 is
- returned. Higher layers (e.g., UBI) use this return code as an
- indication that an erase block may be degrading and should be
- scrutinized as a candidate for being marked as bad.
+ mtd returns -EUCLEAN from mtd_read() and mtd_read_oob(). If the
+ maximum number of bit errors that were corrected on any single
+ region comprising an ecc step (as reported by the driver) equals
+ or exceeds this value, -EUCLEAN is returned. Otherwise, absent
+ an error, 0 is returned. Higher layers (e.g., UBI) use this
+ return code as an indication that an erase block may be
+ degrading and should be scrutinized as a candidate for being
+ marked as bad.
The initial value may be specified by the flash device driver.
If not, then the default value is ecc_strength.
@@ -167,7 +168,7 @@ Description:
block degradation, but high enough to avoid the consequences of
a persistent return value of -EUCLEAN on devices where sticky
bitflips occur. Note that if bitflip_threshold exceeds
- ecc_strength, -EUCLEAN is never returned by mtd_read().
+ ecc_strength, -EUCLEAN is never returned by the read operations.
Conversely, if bitflip_threshold is zero, -EUCLEAN is always
returned, absent a hard error.
diff --git a/Documentation/ABI/testing/sysfs-devices-system-xen_cpu b/Documentation/ABI/testing/sysfs-devices-system-xen_cpu
new file mode 100644
index 000000000000..9ca02fb2d498
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-system-xen_cpu
@@ -0,0 +1,20 @@
+What: /sys/devices/system/xen_cpu/
+Date: May 2012
+Contact: Liu, Jinsong <jinsong.liu@intel.com>
+Description:
+ A collection of global/individual Xen physical cpu attributes
+
+ Individual physical cpu attributes are contained in
+ subdirectories named by the Xen's logical cpu number, e.g.:
+ /sys/devices/system/xen_cpu/xen_cpu#/
+
+
+What: /sys/devices/system/xen_cpu/xen_cpu#/online
+Date: May 2012
+Contact: Liu, Jinsong <jinsong.liu@intel.com>
+Description:
+ Interface to online/offline Xen physical cpus
+
+ When running under Xen platform, it provide user interface
+ to online/offline physical cpus, except cpu0 due to several
+ logic restrictions and assumptions.
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-lenovo-tpkbd b/Documentation/ABI/testing/sysfs-driver-hid-lenovo-tpkbd
new file mode 100644
index 000000000000..57b92cbdceae
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-lenovo-tpkbd
@@ -0,0 +1,38 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/press_to_select
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: This controls if mouse clicks should be generated if the trackpoint is quickly pressed. How fast this press has to be
+ is being controlled by press_speed.
+ Values are 0 or 1.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/dragging
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: If this setting is enabled, it is possible to do dragging by pressing the trackpoint. This requires press_to_select to be enabled.
+ Values are 0 or 1.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/release_to_select
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: For details regarding this setting please refer to http://www.pc.ibm.com/ww/healthycomputing/trkpntb.html
+ Values are 0 or 1.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/select_right
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: This setting controls if the mouse click events generated by pressing the trackpoint (if press_to_select is enabled) generate
+ a left or right mouse button click.
+ Values are 0 or 1.
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/sensitivity
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: This file contains the trackpoint sensitivity.
+ Values are decimal integers from 1 (lowest sensitivity) to 255 (highest sensitivity).
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/press_speed
+Date: July 2011
+Contact: linux-input@vger.kernel.org
+Description: This setting controls how fast the trackpoint needs to be pressed to generate a mouse click if press_to_select is enabled.
+ Values are decimal integers from 1 (slowest) to 255 (fastest).
+
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu b/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu
new file mode 100644
index 000000000000..b42922cf6b1f
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-savu
@@ -0,0 +1,77 @@
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/buttons
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split into general settings and
+ button settings. buttons holds informations about button layout.
+ When written, this file lets one write the respective profile
+ buttons to the mouse. The data has to be 47 bytes long.
+ The mouse will reject invalid data.
+ Which profile to write is determined by the profile number
+ contained in the data.
+ Before reading this file, control has to be written to select
+ which profile to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/control
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one select which data from which
+ profile will be read next. The data has to be 3 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/general
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. A profile is split into general settings and
+ button settings. profile holds informations like resolution, sensitivity
+ and light effects.
+ When written, this file lets one write the respective profile
+ settings back to the mouse. The data has to be 43 bytes long.
+ The mouse will reject invalid data.
+ Which profile to write is determined by the profile number
+ contained in the data.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/info
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When read, this file returns general data like firmware version.
+ The data is 8 bytes long.
+ This file is readonly.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/macro
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: When written, this file lets one store macros with max 500
+ keystrokes for a specific button for a specific profile.
+ Button and profile numbers are included in written data.
+ The data has to be 2083 bytes long.
+ Before reading this file, control has to be written to select
+ which profile and key to read.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/profile
+Date: Mai 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse can store 5 profiles which can be switched by the
+ press of a button. profile holds number of actual profile.
+ This value is persistent, so its value determines the profile
+ that's active when the mouse is powered on next time.
+ When written, the mouse activates the set profile immediately.
+ The data has to be 3 bytes long.
+ The mouse will reject invalid data.
+Users: http://roccat.sourceforge.net
+
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/savu/roccatsavu<minor>/sensor
+Date: July 2012
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: The mouse has a Avago ADNS-3090 sensor.
+ This file allows reading and writing of the mouse sensors registers.
+ The data has to be 4 bytes long.
+Users: http://roccat.sourceforge.net
+
diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
new file mode 100644
index 000000000000..9b31556cfdda
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups
@@ -0,0 +1,14 @@
+What: /sys/kernel/iommu_groups/
+Date: May 2012
+KernelVersion: v3.5
+Contact: Alex Williamson <alex.williamson@redhat.com>
+Description: /sys/kernel/iommu_groups/ contains a number of sub-
+ directories, each representing an IOMMU group. The
+ name of the sub-directory matches the iommu_group_id()
+ for the group, which is an integer value. Within each
+ subdirectory is another directory named "devices" with
+ links to the sysfs devices contained in this group.
+ The group directory also optionally contains a "name"
+ file if the IOMMU driver has chosen to register a more
+ common name for the group.
+Users:
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power
index 31725ffeeb3a..217772615d02 100644
--- a/Documentation/ABI/testing/sysfs-power
+++ b/Documentation/ABI/testing/sysfs-power
@@ -231,3 +231,16 @@ Description:
Reads from this file return a string consisting of the names of
wakeup sources created with the help of /sys/power/wake_lock
that are inactive at the moment, separated with spaces.
+
+What: /sys/power/pm_print_times
+Date: May 2012
+Contact: Sameer Nanda <snanda@chromium.org>
+Description:
+ The /sys/power/pm_print_times file allows user space to
+ control whether the time taken by devices to suspend and
+ resume is printed. These prints are useful for hunting down
+ devices that take too long to suspend or resume.
+
+ Writing a "1" enables this printing while writing a "0"
+ disables it. The default value is "0". Reading from this file
+ will display the current value.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index f3e214f9e256..42e7f030cb16 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -404,7 +404,6 @@
!Finclude/net/mac80211.h ieee80211_get_tkip_p1k
!Finclude/net/mac80211.h ieee80211_get_tkip_p1k_iv
!Finclude/net/mac80211.h ieee80211_get_tkip_p2k
-!Finclude/net/mac80211.h ieee80211_key_removed
</chapter>
<chapter id="powersave">
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index 676bc46f9c52..cda0dfb6769a 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -3988,7 +3988,7 @@ interface and may change in the future.</para>
from RGB to Y'CbCr color space.
</entry>
</row>
- <row id = "v4l2-jpeg-chroma-subsampling">
+ <row>
<entrytbl spanname="descr" cols="2">
<tbody valign="top">
<row>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
index e3d5afcdafbb..0a4b90fcf2da 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
@@ -284,13 +284,6 @@ These controls are described in <xref
processing controls. These controls are described in <xref
linkend="image-process-controls" />.</entry>
</row>
- <row>
- <entry><constant>V4L2_CTRL_CLASS_JPEG</constant></entry>
- <entry>0x9d0000</entry>
- <entry>The class containing JPEG compression controls.
-These controls are described in <xref
- linkend="jpeg-controls" />.</entry>
- </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/ManagementStyle b/Documentation/ManagementStyle
index a5f0ea58c788..a211ee8d8b44 100644
--- a/Documentation/ManagementStyle
+++ b/Documentation/ManagementStyle
@@ -178,7 +178,7 @@ sadly that you are one too, and that while we can all bask in the secure
knowledge that we're better than the average person (let's face it,
nobody ever believes that they're average or below-average), we should
also admit that we're not the sharpest knife around, and there will be
-other people that are less of an idiot that you are.
+other people that are less of an idiot than you are.
Some people react badly to smart people. Others take advantage of them.
diff --git a/Documentation/RCU/checklist.txt b/Documentation/RCU/checklist.txt
index 5c8d74968090..fc103d7a0474 100644
--- a/Documentation/RCU/checklist.txt
+++ b/Documentation/RCU/checklist.txt
@@ -162,9 +162,9 @@ over a rather long period of time, but improvements are always welcome!
when publicizing a pointer to a structure that can
be traversed by an RCU read-side critical section.
-5. If call_rcu(), or a related primitive such as call_rcu_bh() or
- call_rcu_sched(), is used, the callback function must be
- written to be called from softirq context. In particular,
+5. If call_rcu(), or a related primitive such as call_rcu_bh(),
+ call_rcu_sched(), or call_srcu() is used, the callback function
+ must be written to be called from softirq context. In particular,
it cannot block.
6. Since synchronize_rcu() can block, it cannot be called from
@@ -202,11 +202,12 @@ over a rather long period of time, but improvements are always welcome!
updater uses call_rcu_sched() or synchronize_sched(), then
the corresponding readers must disable preemption, possibly
by calling rcu_read_lock_sched() and rcu_read_unlock_sched().
- If the updater uses synchronize_srcu(), the the corresponding
- readers must use srcu_read_lock() and srcu_read_unlock(),
- and with the same srcu_struct. The rules for the expedited
- primitives are the same as for their non-expedited counterparts.
- Mixing things up will result in confusion and broken kernels.
+ If the updater uses synchronize_srcu() or call_srcu(),
+ the the corresponding readers must use srcu_read_lock() and
+ srcu_read_unlock(), and with the same srcu_struct. The rules for
+ the expedited primitives are the same as for their non-expedited
+ counterparts. Mixing things up will result in confusion and
+ broken kernels.
One exception to this rule: rcu_read_lock() and rcu_read_unlock()
may be substituted for rcu_read_lock_bh() and rcu_read_unlock_bh()
@@ -333,14 +334,14 @@ over a rather long period of time, but improvements are always welcome!
victim CPU from ever going offline.)
14. SRCU (srcu_read_lock(), srcu_read_unlock(), srcu_dereference(),
- synchronize_srcu(), and synchronize_srcu_expedited()) may only
- be invoked from process context. Unlike other forms of RCU, it
- -is- permissible to block in an SRCU read-side critical section
- (demarked by srcu_read_lock() and srcu_read_unlock()), hence the
- "SRCU": "sleepable RCU". Please note that if you don't need
- to sleep in read-side critical sections, you should be using
- RCU rather than SRCU, because RCU is almost always faster and
- easier to use than is SRCU.
+ synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu())
+ may only be invoked from process context. Unlike other forms of
+ RCU, it -is- permissible to block in an SRCU read-side critical
+ section (demarked by srcu_read_lock() and srcu_read_unlock()),
+ hence the "SRCU": "sleepable RCU". Please note that if you
+ don't need to sleep in read-side critical sections, you should be
+ using RCU rather than SRCU, because RCU is almost always faster
+ and easier to use than is SRCU.
If you need to enter your read-side critical section in a
hardirq or exception handler, and then exit that same read-side
@@ -353,8 +354,8 @@ over a rather long period of time, but improvements are always welcome!
cleanup_srcu_struct(). These are passed a "struct srcu_struct"
that defines the scope of a given SRCU domain. Once initialized,
the srcu_struct is passed to srcu_read_lock(), srcu_read_unlock()
- synchronize_srcu(), and synchronize_srcu_expedited(). A given
- synchronize_srcu() waits only for SRCU read-side critical
+ synchronize_srcu(), synchronize_srcu_expedited(), and call_srcu().
+ A given synchronize_srcu() waits only for SRCU read-side critical
sections governed by srcu_read_lock() and srcu_read_unlock()
calls that have been passed the same srcu_struct. This property
is what makes sleeping read-side critical sections tolerable --
@@ -374,7 +375,7 @@ over a rather long period of time, but improvements are always welcome!
requiring SRCU's read-side deadlock immunity or low read-side
realtime latency.
- Note that, rcu_assign_pointer() relates to SRCU just as they do
+ Note that, rcu_assign_pointer() relates to SRCU just as it does
to other forms of RCU.
15. The whole point of call_rcu(), synchronize_rcu(), and friends
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index e439a0edee22..38428c125135 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -79,8 +79,6 @@ complete. Pseudo-code using rcu_barrier() is as follows:
2. Execute rcu_barrier().
3. Allow the module to be unloaded.
-Quick Quiz #1: Why is there no srcu_barrier()?
-
The rcutorture module makes use of rcu_barrier in its exit function
as follows:
@@ -162,7 +160,7 @@ for any pre-existing callbacks to complete.
Then lines 55-62 print status and do operation-specific cleanup, and
then return, permitting the module-unload operation to be completed.
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
be required?
Your module might have additional complications. For example, if your
@@ -242,7 +240,7 @@ reaches zero, as follows:
4 complete(&rcu_barrier_completion);
5 }
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
immediately (thus incrementing rcu_barrier_cpu_count to the
value one), but the other CPU's rcu_barrier_func() invocations
are delayed for a full grace period? Couldn't this result in
@@ -259,12 +257,7 @@ so that your module may be safely unloaded.
Answers to Quick Quizzes
-Quick Quiz #1: Why is there no srcu_barrier()?
-
-Answer: Since there is no call_srcu(), there can be no outstanding SRCU
- callbacks. Therefore, there is no need to wait for them.
-
-Quick Quiz #2: Is there any other situation where rcu_barrier() might
+Quick Quiz #1: Is there any other situation where rcu_barrier() might
be required?
Answer: Interestingly enough, rcu_barrier() was not originally
@@ -278,7 +271,7 @@ Answer: Interestingly enough, rcu_barrier() was not originally
implementing rcutorture, and found that rcu_barrier() solves
this problem as well.
-Quick Quiz #3: What happens if CPU 0's rcu_barrier_func() executes
+Quick Quiz #2: What happens if CPU 0's rcu_barrier_func() executes
immediately (thus incrementing rcu_barrier_cpu_count to the
value one), but the other CPU's rcu_barrier_func() invocations
are delayed for a full grace period? Couldn't this result in
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index 4ddf3913fd8c..7dce8a17eac2 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -174,11 +174,20 @@ torture_type The type of RCU to test, with string values as follows:
and synchronize_rcu_bh_expedited().
"srcu": srcu_read_lock(), srcu_read_unlock() and
+ call_srcu().
+
+ "srcu_sync": srcu_read_lock(), srcu_read_unlock() and
synchronize_srcu().
"srcu_expedited": srcu_read_lock(), srcu_read_unlock() and
synchronize_srcu_expedited().
+ "srcu_raw": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+ and call_srcu().
+
+ "srcu_raw_sync": srcu_read_lock_raw(), srcu_read_unlock_raw(),
+ and synchronize_srcu().
+
"sched": preempt_disable(), preempt_enable(), and
call_rcu_sched().
diff --git a/Documentation/RCU/whatisRCU.txt b/Documentation/RCU/whatisRCU.txt
index 6bbe8dcdc3da..69ee188515e7 100644
--- a/Documentation/RCU/whatisRCU.txt
+++ b/Documentation/RCU/whatisRCU.txt
@@ -833,9 +833,9 @@ sched: Critical sections Grace period Barrier
SRCU: Critical sections Grace period Barrier
- srcu_read_lock synchronize_srcu N/A
- srcu_read_unlock synchronize_srcu_expedited
- srcu_read_lock_raw
+ srcu_read_lock synchronize_srcu srcu_barrier
+ srcu_read_unlock call_srcu
+ srcu_read_lock_raw synchronize_srcu_expedited
srcu_read_unlock_raw
srcu_dereference
diff --git a/Documentation/arm/Samsung-S3C24XX/H1940.txt b/Documentation/arm/Samsung-S3C24XX/H1940.txt
index f4a7b22c8664..b738859b1fc0 100644
--- a/Documentation/arm/Samsung-S3C24XX/H1940.txt
+++ b/Documentation/arm/Samsung-S3C24XX/H1940.txt
@@ -37,4 +37,4 @@ Maintainers
Thanks to the many others who have also provided support.
-(c) 2005 Ben Dooks \ No newline at end of file
+(c) 2005 Ben Dooks
diff --git a/Documentation/arm/Samsung-S3C24XX/SMDK2440.txt b/Documentation/arm/Samsung-S3C24XX/SMDK2440.txt
index 32e1eae6a25f..429390bd4684 100644
--- a/Documentation/arm/Samsung-S3C24XX/SMDK2440.txt
+++ b/Documentation/arm/Samsung-S3C24XX/SMDK2440.txt
@@ -53,4 +53,4 @@ Maintainers
and to Simtec Electronics for allowing me time to work on this.
-(c) 2004 Ben Dooks \ No newline at end of file
+(c) 2004 Ben Dooks
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 8e74980ab385..4a0b64c605fc 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -370,15 +370,12 @@ To mount a cgroup hierarchy with just the cpuset and memory
subsystems, type:
# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
-To change the set of subsystems bound to a mounted hierarchy, just
-remount with different options:
-# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
-
-Now memory is removed from the hierarchy and blkio is added.
-
-Note this will add blkio to the hierarchy but won't remove memory or
-cpuset, because the new options are appended to the old ones:
-# mount -o remount,blkio /sys/fs/cgroup/rg1
+While remounting cgroups is currently supported, it is not recommend
+to use it. Remounting allows changing bound subsystems and
+release_agent. Rebinding is hardly useful as it only works when the
+hierarchy is empty and release_agent itself should be replaced with
+conventional fsnotify. The support for remounting will be removed in
+the future.
To Specify a hierarchy's release_agent:
# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
@@ -637,16 +634,6 @@ void exit(struct task_struct *task)
Called during task exit.
-int populate(struct cgroup *cgrp)
-(cgroup_mutex held by caller)
-
-Called after creation of a cgroup to allow a subsystem to populate
-the cgroup directory with file entries. The subsystem should make
-calls to cgroup_add_file() with objects of type cftype (see
-include/linux/cgroup.h for details). Note that although this
-method can return an error code, the error code is currently not
-always handled well.
-
void post_clone(struct cgroup *cgrp)
(cgroup_mutex held by caller)
@@ -656,7 +643,7 @@ example in cpusets, no task may attach before 'cpus' and 'mems' are set
up.
void bind(struct cgroup *root)
-(cgroup_mutex and ss->hierarchy_mutex held by caller)
+(cgroup_mutex held by caller)
Called when a cgroup subsystem is rebound to a different hierarchy
and root cgroup. Currently this will only involve movement between
diff --git a/Documentation/connector/cn_test.c b/Documentation/connector/cn_test.c
index 7764594778d4..adcca0368d60 100644
--- a/Documentation/connector/cn_test.c
+++ b/Documentation/connector/cn_test.c
@@ -69,9 +69,13 @@ static int cn_test_want_notify(void)
return -ENOMEM;
}
- nlh = NLMSG_PUT(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, 0x123, NLMSG_DONE, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
- msg = (struct cn_msg *)NLMSG_DATA(nlh);
+ msg = nlmsg_data(nlh);
memset(msg, 0, size0);
@@ -117,11 +121,6 @@ static int cn_test_want_notify(void)
pr_info("request was sent: group=0x%x\n", ctl->group);
return 0;
-
-nlmsg_failure:
- pr_err("failed to send %u.%u\n", msg->seq, msg->ack);
- kfree_skb(skb);
- return -EINVAL;
}
#endif
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 32e48797a14f..9884681535ee 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -7,39 +7,39 @@ This target is read-only.
Construction Parameters
=======================
- <version> <dev> <hash_dev> <hash_start>
+ <version> <dev> <hash_dev>
<data_block_size> <hash_block_size>
<num_data_blocks> <hash_start_block>
<algorithm> <digest> <salt>
<version>
- This is the version number of the on-disk format.
+ This is the type of the on-disk hash format.
0 is the original format used in the Chromium OS.
- The salt is appended when hashing, digests are stored continuously and
- the rest of the block is padded with zeros.
+ The salt is appended when hashing, digests are stored continuously and
+ the rest of the block is padded with zeros.
1 is the current format that should be used for new devices.
- The salt is prepended when hashing and each digest is
- padded with zeros to the power of two.
+ The salt is prepended when hashing and each digest is
+ padded with zeros to the power of two.
<dev>
- This is the device containing the data the integrity of which needs to be
+ This is the device containing data, the integrity of which needs to be
checked. It may be specified as a path, like /dev/sdaX, or a device number,
<major>:<minor>.
<hash_dev>
- This is the device that that supplies the hash tree data. It may be
+ This is the device that supplies the hash tree data. It may be
specified similarly to the device path and may be the same device. If the
- same device is used, the hash_start should be outside of the dm-verity
- configured device size.
+ same device is used, the hash_start should be outside the configured
+ dm-verity device.
<data_block_size>
- The block size on a data device. Each block corresponds to one digest on
- the hash device.
+ The block size on a data device in bytes.
+ Each block corresponds to one digest on the hash device.
<hash_block_size>
- The size of a hash block.
+ The size of a hash block in bytes.
<num_data_blocks>
The number of data blocks on the data device. Additional blocks are
@@ -65,7 +65,7 @@ Construction Parameters
Theory of operation
===================
-dm-verity is meant to be setup as part of a verified boot path. This
+dm-verity is meant to be set up as part of a verified boot path. This
may be anything ranging from a boot using tboot or trustedgrub to just
booting from a known-good device (like a USB drive or CD).
@@ -73,20 +73,20 @@ When a dm-verity device is configured, it is expected that the caller
has been authenticated in some way (cryptographic signatures, etc).
After instantiation, all hashes will be verified on-demand during
disk access. If they cannot be verified up to the root node of the
-tree, the root hash, then the I/O will fail. This should identify
+tree, the root hash, then the I/O will fail. This should detect
tampering with any data on the device and the hash data.
Cryptographic hashes are used to assert the integrity of the device on a
-per-block basis. This allows for a lightweight hash computation on first read
-into the page cache. Block hashes are stored linearly-aligned to the nearest
-block the size of a page.
+per-block basis. This allows for a lightweight hash computation on first read
+into the page cache. Block hashes are stored linearly, aligned to the nearest
+block size.
Hash Tree
---------
Each node in the tree is a cryptographic hash. If it is a leaf node, the hash
-is of some block data on disk. If it is an intermediary node, then the hash is
-of a number of child nodes.
+of some data block on disk is calculated. If it is an intermediary node,
+the hash of a number of child nodes is calculated.
Each entry in the tree is a collection of neighboring nodes that fit in one
block. The number is determined based on block_size and the size of the
@@ -110,63 +110,23 @@ alg = sha256, num_blocks = 32768, block_size = 4096
On-disk format
==============
-Below is the recommended on-disk format. The verity kernel code does not
-read the on-disk header. It only reads the hash blocks which directly
-follow the header. It is expected that a user-space tool will verify the
-integrity of the verity_header and then call dmsetup with the correct
-parameters. Alternatively, the header can be omitted and the dmsetup
-parameters can be passed via the kernel command-line in a rooted chain
-of trust where the command-line is verified.
+The verity kernel code does not read the verity metadata on-disk header.
+It only reads the hash blocks which directly follow the header.
+It is expected that a user-space tool will verify the integrity of the
+verity header.
-The on-disk format is especially useful in cases where the hash blocks
-are on a separate partition. The magic number allows easy identification
-of the partition contents. Alternatively, the hash blocks can be stored
-in the same partition as the data to be verified. In such a configuration
-the filesystem on the partition would be sized a little smaller than
-the full-partition, leaving room for the hash blocks.
-
-struct superblock {
- uint8_t signature[8]
- "verity\0\0";
-
- uint8_t version;
- 1 - current format
-
- uint8_t data_block_bits;
- log2(data block size)
-
- uint8_t hash_block_bits;
- log2(hash block size)
-
- uint8_t pad1[1];
- zero padding
-
- uint16_t salt_size;
- big-endian salt size
-
- uint8_t pad2[2];
- zero padding
-
- uint32_t data_blocks_hi;
- big-endian high 32 bits of the 64-bit number of data blocks
-
- uint32_t data_blocks_lo;
- big-endian low 32 bits of the 64-bit number of data blocks
-
- uint8_t algorithm[16];
- cryptographic algorithm
-
- uint8_t salt[384];
- salt (the salt size is specified above)
-
- uint8_t pad3[88];
- zero padding to 512-byte boundary
-}
+Alternatively, the header can be omitted and the dmsetup parameters can
+be passed via the kernel command-line in a rooted chain of trust where
+the command-line is verified.
Directly following the header (and with sector number padded to the next hash
block boundary) are the hash blocks which are stored a depth at a time
(starting from the root), sorted in order of increasing index.
+The full specification of kernel parameters and on-disk metadata format
+is available at the cryptsetup project's wiki page
+ http://code.google.com/p/cryptsetup/wiki/DMVerity
+
Status
======
V (for Valid) is returned if every check performed so far was valid.
@@ -174,21 +134,22 @@ If any check failed, C (for Corruption) is returned.
Example
=======
-
-Setup a device:
- dmsetup create vroot --table \
- "0 2097152 "\
- "verity 1 /dev/sda1 /dev/sda2 4096 4096 2097152 1 "\
+Set up a device:
+ # dmsetup create vroot --readonly --table \
+ "0 2097152 verity 1 /dev/sda1 /dev/sda2 4096 4096 262144 1 sha256 "\
"4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076 "\
"1234000000000000000000000000000000000000000000000000000000000000"
A command line tool veritysetup is available to compute or verify
-the hash tree or activate the kernel driver. This is available from
-the LVM2 upstream repository and may be supplied as a package called
-device-mapper-verity-tools:
- git://sources.redhat.com/git/lvm2
- http://sourceware.org/git/?p=lvm2.git
- http://sourceware.org/cgi-bin/cvsweb.cgi/LVM2/verity?cvsroot=lvm2
-
-veritysetup -a vroot /dev/sda1 /dev/sda2 \
- 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+the hash tree or activate the kernel device. This is available from
+the cryptsetup upstream repository http://code.google.com/p/cryptsetup/
+(as a libcryptsetup extension).
+
+Create hash on the device:
+ # veritysetup format /dev/sda1 /dev/sda2
+ ...
+ Root hash: 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
+
+Activate the device:
+ # veritysetup create vroot /dev/sda1 /dev/sda2 \
+ 4392712ba01368efdf14b05c76f9e4df0d53664630b5d48632ed17a137f39076
diff --git a/Documentation/devices.txt b/Documentation/devices.txt
index 47a154f30290..b6251cca9263 100644
--- a/Documentation/devices.txt
+++ b/Documentation/devices.txt
@@ -2416,6 +2416,8 @@ Your cooperation is appreciated.
1 = /dev/raw/raw1 First raw I/O device
2 = /dev/raw/raw2 Second raw I/O device
...
+ max minor number of raw device is set by kernel config
+ MAX_RAW_DEVS or raw module parameter 'max_raw_devs'
163 char
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
new file mode 100644
index 000000000000..70c0dc5f00ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-mpic.txt
@@ -0,0 +1,23 @@
+Marvell Armada 370 and Armada XP Interrupt Controller
+-----------------------------------------------------
+
+Required properties:
+- compatible: Should be "marvell,mpic"
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt-cells: The number of cells to define the interrupts. Should be 1.
+ The cell is the IRQ number
+- reg: Should contain PMIC registers location and length. First pair
+ for the main interrupt registers, second pair for the per-CPU
+ interrupt registers
+
+Example:
+
+ mpic: interrupt-controller@d0020000 {
+ compatible = "marvell,mpic";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-controller;
+ reg = <0xd0020000 0x1000>,
+ <0xd0021000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
new file mode 100644
index 000000000000..8b6ea2267c94
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp-timer.txt
@@ -0,0 +1,11 @@
+Marvell Armada 370 and Armada XP Global Timers
+----------------------------------------------
+
+Required properties:
+- compatible: Should be "marvell,armada-370-xp-timer"
+- interrupts: Should contain the list of Global Timer interrupts
+- reg: Should contain the base address of the Global Timer registers
+
+Optional properties:
+- marvell,timer-25Mhz: Tells whether the Global timer supports the 25
+ Mhz fixed mode (available on Armada XP and not on Armada 370)
diff --git a/Documentation/devicetree/bindings/arm/armada-370-xp.txt b/Documentation/devicetree/bindings/arm/armada-370-xp.txt
new file mode 100644
index 000000000000..c6ed90ea6e17
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/armada-370-xp.txt
@@ -0,0 +1,24 @@
+Marvell Armada 370 and Armada XP Platforms Device Tree Bindings
+---------------------------------------------------------------
+
+Boards with a SoC of the Marvell Armada 370 and Armada XP families
+shall have the following property:
+
+Required root node property:
+
+compatible: must contain "marvell,armada-370-xp"
+
+In addition, boards using the Marvell Armada 370 SoC shall have the
+following property:
+
+Required root node property:
+
+compatible: must contain "marvell,armada370"
+
+In addition, boards using the Marvell Armada XP SoC shall have the
+following property:
+
+Required root node property:
+
+compatible: must contain "marvell,armadaxp"
+
diff --git a/Documentation/devicetree/bindings/arm/atmel-aic.txt b/Documentation/devicetree/bindings/arm/atmel-aic.txt
index aabca4f83402..19078bf5cca8 100644
--- a/Documentation/devicetree/bindings/arm/atmel-aic.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-aic.txt
@@ -4,7 +4,7 @@ Required properties:
- compatible: Should be "atmel,<chip>-aic"
- interrupt-controller: Identifies the node as an interrupt controller.
- interrupt-parent: For single AIC system, it is an empty property.
-- #interrupt-cells: The number of cells to define the interrupts. It sould be 2.
+- #interrupt-cells: The number of cells to define the interrupts. It sould be 3.
The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
The second cell is used to specify flags:
bits[3:0] trigger type and level flags:
@@ -14,7 +14,10 @@ Required properties:
8 = active low level-sensitive.
Valid combinations are 1, 2, 3, 4, 8.
Default flag for internal sources should be set to 4 (active high).
+ The third cell is used to specify the irq priority from 0 (lowest) to 7
+ (highest).
- reg: Should contain AIC registers location and length
+- atmel,external-irqs: u32 array of external irqs.
Examples:
/*
@@ -24,7 +27,7 @@ Examples:
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
interrupt-parent;
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
reg = <0xfffff000 0x200>;
};
@@ -34,5 +37,5 @@ Examples:
dma: dma-controller@ffffec00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffec00 0x200>;
- interrupts = <21 4>;
+ interrupts = <21 4 5>;
};
diff --git a/Documentation/devicetree/bindings/arm/davinci/cp-intc.txt b/Documentation/devicetree/bindings/arm/davinci/cp-intc.txt
new file mode 100644
index 000000000000..597e8a089fe4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/davinci/cp-intc.txt
@@ -0,0 +1,27 @@
+* TI Common Platform Interrupt Controller
+
+Common Platform Interrupt Controller (cp_intc) is used on
+OMAP-L1x SoCs and can support several configurable number
+of interrupts.
+
+Main node required properties:
+
+- compatible : should be:
+ "ti,cp-intc"
+- interrupt-controller : Identifies the node as an interrupt controller
+- #interrupt-cells : Specifies the number of cells needed to encode an
+ interrupt source. The type shall be a <u32> and the value shall be 1.
+
+ The cell contains the interrupt number in the range [0-128].
+- ti,intc-size: Number of interrupts handled by the interrupt controller.
+- reg: physical base address and size of the intc registers map.
+
+Example:
+
+ intc: interrupt-controller@1 {
+ compatible = "ti,cp-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ ti,intc-size = <101>;
+ reg = <0xfffee000 0x2000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt b/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt
new file mode 100644
index 000000000000..081c6a786c8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mvebu-system-controller.txt
@@ -0,0 +1,17 @@
+MVEBU System Controller
+-----------------------
+MVEBU (Marvell SOCs: Armada 370/XP, Dove, mv78xx0, Kirkwood, Orion5x)
+
+Required properties:
+
+- compatible: one of:
+ - "marvell,orion-system-controller"
+ - "marvell,armada-370-xp-system-controller"
+- reg: Should contain system controller registers location and length.
+
+Example:
+
+ system-controller@d0018200 {
+ compatible = "marvell,armada-370-xp-system-controller";
+ reg = <0xd0018200 0x500>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/olimex.txt b/Documentation/devicetree/bindings/arm/olimex.txt
new file mode 100644
index 000000000000..007fb5c685a1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/olimex.txt
@@ -0,0 +1,6 @@
+Olimex i.MX Platforms Device Tree Bindings
+------------------------------------------
+
+i.MX23 Olinuxino Low Cost Board
+Required root node properties:
+ - compatible = "olimex,imx23-olinuxino", "fsl,imx23";
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index e78e8bccac30..ccdd0e53451f 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -47,3 +47,9 @@ Boards:
- AM335X EVM : Software Developement Board for AM335x
compatible = "ti,am335x-evm", "ti,am33xx", "ti,omap3"
+
+- AM335X Bone : Low cost community board
+ compatible = "ti,am335x-bone", "ti,am33xx", "ti,omap3"
+
+- OMAP5 EVM : Evaluation Module
+ compatible = "ti,omap5-evm", "ti,omap5"
diff --git a/Documentation/devicetree/bindings/arm/primecell.txt b/Documentation/devicetree/bindings/arm/primecell.txt
index 951ca46789d4..64fc82bc8928 100644
--- a/Documentation/devicetree/bindings/arm/primecell.txt
+++ b/Documentation/devicetree/bindings/arm/primecell.txt
@@ -13,11 +13,17 @@ Required properties:
Optional properties:
- arm,primecell-periphid : Value to override the h/w value with
+- clocks : From common clock binding. First clock is phandle to clock for apb
+ pclk. Additional clocks are optional and specific to those peripherals.
+- clock-names : From common clock binding. Shall be "apb_pclk" for first clock.
Example:
serial@fff36000 {
compatible = "arm,pl011", "arm,primecell";
arm,primecell-periphid = <0x00341011>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
+
};
diff --git a/Documentation/devicetree/bindings/arm/tegra/emc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-emc.txt
index 09335f8eee00..4c33b29dc660 100644
--- a/Documentation/devicetree/bindings/arm/tegra/emc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-emc.txt
@@ -15,7 +15,7 @@ Child device nodes describe the memory settings for different configurations and
Example:
- emc@7000f400 {
+ memory-controller@7000f400 {
#address-cells = < 1 >;
#size-cells = < 0 >;
compatible = "nvidia,tegra20-emc";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
index c25a0a55151d..866d93421eba 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-mc.txt
@@ -8,7 +8,7 @@ Required properties:
- interrupts : Should contain MC General interrupt.
Example:
- mc {
+ memory-controller@0x7000f000 {
compatible = "nvidia,tegra20-mc";
reg = <0x7000f000 0x024
0x7000f03c 0x3c4>;
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-mc.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-mc.txt
index e47e73f612f4..bdf1a612422b 100644
--- a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-mc.txt
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra30-mc.txt
@@ -8,7 +8,7 @@ Required properties:
- interrupts : Should contain MC General interrupt.
Example:
- mc {
+ memory-controller {
compatible = "nvidia,tegra30-mc";
reg = <0x7000f000 0x010
0x7000f03c 0x1b4
diff --git a/Documentation/devicetree/bindings/clock/calxeda.txt b/Documentation/devicetree/bindings/clock/calxeda.txt
new file mode 100644
index 000000000000..0a6ac1bdcda1
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/calxeda.txt
@@ -0,0 +1,17 @@
+Device Tree Clock bindings for Calxeda highbank platform
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be one of the following:
+ "calxeda,hb-pll-clock" - for a PLL clock
+ "calxeda,hb-a9periph-clock" - The A9 peripheral clock divided from the
+ A9 clock.
+ "calxeda,hb-a9bus-clock" - The A9 bus clock divided from the A9 clock.
+ "calxeda,hb-emmc-clock" - Divided clock for MMC/SD controller.
+- reg : shall be the control register offset from SYSREGs base for the clock.
+- clocks : shall be the input parent clock phandle for the clock. This is
+ either an oscillator or a pll output.
+- #clock-cells : from common clock binding; shall be set to 0.
diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt
new file mode 100644
index 000000000000..eb65d417f8c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -0,0 +1,117 @@
+This binding is a work-in-progress, and are based on some experimental
+work by benh[1].
+
+Sources of clock signal can be represented by any node in the device
+tree. Those nodes are designated as clock providers. Clock consumer
+nodes use a phandle and clock specifier pair to connect clock provider
+outputs to clock inputs. Similar to the gpio specifiers, a clock
+specifier is an array of one more more cells identifying the clock
+output on a device. The length of a clock specifier is defined by the
+value of a #clock-cells property in the clock provider node.
+
+[1] http://patchwork.ozlabs.org/patch/31551/
+
+==Clock providers==
+
+Required properties:
+#clock-cells: Number of cells in a clock specifier; Typically 0 for nodes
+ with a single clock output and 1 for nodes with multiple
+ clock outputs.
+
+Optional properties:
+clock-output-names: Recommended to be a list of strings of clock output signal
+ names indexed by the first cell in the clock specifier.
+ However, the meaning of clock-output-names is domain
+ specific to the clock provider, and is only provided to
+ encourage using the same meaning for the majority of clock
+ providers. This format may not work for clock providers
+ using a complex clock specifier format. In those cases it
+ is recommended to omit this property and create a binding
+ specific names property.
+
+ Clock consumer nodes must never directly reference
+ the provider's clock-output-names property.
+
+For example:
+
+ oscillator {
+ #clock-cells = <1>;
+ clock-output-names = "ckil", "ckih";
+ };
+
+- this node defines a device with two clock outputs, the first named
+ "ckil" and the second named "ckih". Consumer nodes always reference
+ clocks by index. The names should reflect the clock output signal
+ names for the device.
+
+==Clock consumers==
+
+Required properties:
+clocks: List of phandle and clock specifier pairs, one pair
+ for each clock input to the device. Note: if the
+ clock provider specifies '0' for #clock-cells, then
+ only the phandle portion of the pair will appear.
+
+Optional properties:
+clock-names: List of clock input name strings sorted in the same
+ order as the clocks property. Consumers drivers
+ will use clock-names to match clock input names
+ with clocks specifiers.
+clock-ranges: Empty property indicating that child nodes can inherit named
+ clocks from this node. Useful for bus nodes to provide a
+ clock to their children.
+
+For example:
+
+ device {
+ clocks = <&osc 1>, <&ref 0>;
+ clock-names = "baud", "register";
+ };
+
+
+This represents a device with two clock inputs, named "baud" and "register".
+The baud clock is connected to output 1 of the &osc device, and the register
+clock is connected to output 0 of the &ref.
+
+==Example==
+
+ /* external oscillator */
+ osc: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <32678>;
+ clock-output-names = "osc";
+ };
+
+ /* phase-locked-loop device, generates a higher frequency clock
+ * from the external oscillator reference */
+ pll: pll@4c000 {
+ compatible = "vendor,some-pll-interface"
+ #clock-cells = <1>;
+ clocks = <&osc 0>;
+ clock-names = "ref";
+ reg = <0x4c000 0x1000>;
+ clock-output-names = "pll", "pll-switched";
+ };
+
+ /* UART, using the low frequency oscillator for the baud clock,
+ * and the high frequency switched PLL output for register
+ * clocking */
+ uart@a000 {
+ compatible = "fsl,imx-uart";
+ reg = <0xa000 0x1000>;
+ interrupts = <33>;
+ clocks = <&osc 0>, <&pll 1>;
+ clock-names = "baud", "register";
+ };
+
+This DT fragment defines three devices: an external oscillator to provide a
+low-frequency reference clock, a PLL device to generate a higher frequency
+clock signal, and a UART.
+
+* The oscillator is fixed-frequency, and provides one clock output, named "osc".
+* The PLL is both a clock provider and a clock consumer. It uses the clock
+ signal generated by the external oscillator, and provides two output signals
+ ("pll" and "pll-switched").
+* The UART has its baud clock connected the external oscillator and its
+ register clock connected to the PLL clock (the "pll-switched" signal)
diff --git a/Documentation/devicetree/bindings/clock/fixed-clock.txt b/Documentation/devicetree/bindings/clock/fixed-clock.txt
new file mode 100644
index 000000000000..0b1fe7824093
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/fixed-clock.txt
@@ -0,0 +1,21 @@
+Binding for simple fixed-rate clock sources.
+
+This binding uses the common clock binding[1].
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "fixed-clock".
+- #clock-cells : from common clock binding; shall be set to 0.
+- clock-frequency : frequency of clock in Hz. Should be a single cell.
+
+Optional properties:
+- gpios : From common gpio binding; gpio connection to clock enable pin.
+- clock-output-names : From common clock binding.
+
+Example:
+ clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000000>;
+ };
diff --git a/Documentation/devicetree/bindings/fb/mxsfb.txt b/Documentation/devicetree/bindings/fb/mxsfb.txt
new file mode 100644
index 000000000000..b41e5e52a676
--- /dev/null
+++ b/Documentation/devicetree/bindings/fb/mxsfb.txt
@@ -0,0 +1,19 @@
+* Freescale MXS LCD Interface (LCDIF)
+
+Required properties:
+- compatible: Should be "fsl,<chip>-lcdif". Supported chips include
+ imx23 and imx28.
+- reg: Address and length of the register set for lcdif
+- interrupts: Should contain lcdif interrupts
+
+Optional properties:
+- panel-enable-gpios : Should specify the gpio for panel enable
+
+Examples:
+
+lcdif@80030000 {
+ compatible = "fsl,imx28-lcdif";
+ reg = <0x80030000 2000>;
+ interrupts = <38 86>;
+ panel-enable-gpios = <&gpio3 30 0>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
index 4363ae4b3c14..4f3929713ae4 100644
--- a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
@@ -8,8 +8,16 @@ Required properties:
by low 16 pins and the second one is for high 16 pins.
- gpio-controller : Marks the device node as a gpio controller.
- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify optional parameters (currently
- unused).
+ the second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
+- interrupt-controller: Marks the device node as an interrupt controller.
+- #interrupt-cells : Should be 2. The first cell is the GPIO number.
+ The second cell bits[3:0] is used to specify trigger type and level flags:
+ 1 = low-to-high edge triggered.
+ 2 = high-to-low edge triggered.
+ 4 = active high level-sensitive.
+ 8 = active low level-sensitive.
Example:
@@ -19,4 +27,6 @@ gpio0: gpio@73f84000 {
interrupts = <50 51>;
gpio-controller;
#gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
index 0c35673f7a3e..1e677a47b836 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
@@ -13,8 +13,9 @@ Required properties for GPIO node:
- interrupts : Should be the port interrupt shared by all 32 pins.
- gpio-controller : Marks the device node as a gpio controller.
- #gpio-cells : Should be two. The first cell is the pin number and
- the second cell is used to specify optional parameters (currently
- unused).
+ the second cell is used to specify the gpio polarity:
+ 0 = active high
+ 1 = active low
- interrupt-controller: Marks the device node as an interrupt controller.
- #interrupt-cells : Should be 2. The first cell is the GPIO number.
The second cell bits[3:0] is used to specify trigger type and level flags:
diff --git a/Documentation/devicetree/bindings/gpio/gpio-nmk.txt b/Documentation/devicetree/bindings/gpio/gpio-nmk.txt
index ee87467ad8d6..8315ac7780ef 100644
--- a/Documentation/devicetree/bindings/gpio/gpio-nmk.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio-nmk.txt
@@ -26,6 +26,6 @@ Example:
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-bank = <1>;
};
diff --git a/Documentation/devicetree/bindings/gpio/led.txt b/Documentation/devicetree/bindings/gpio/led.txt
index fd2bd56e7195..9bb308abd221 100644
--- a/Documentation/devicetree/bindings/gpio/led.txt
+++ b/Documentation/devicetree/bindings/gpio/led.txt
@@ -55,4 +55,4 @@ run-control {
gpios = <&mpc8572 7 0>;
default-state = "on";
};
-}
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio_nvidia.txt b/Documentation/devicetree/bindings/gpio/nvidia,tegra20-gpio.txt
index 023c9526e5f8..023c9526e5f8 100644
--- a/Documentation/devicetree/bindings/gpio/gpio_nvidia.txt
+++ b/Documentation/devicetree/bindings/gpio/nvidia,tegra20-gpio.txt
diff --git a/Documentation/devicetree/bindings/input/fsl-mma8450.txt b/Documentation/devicetree/bindings/input/fsl-mma8450.txt
index a00c94ccbdee..0b96e5737d3a 100644
--- a/Documentation/devicetree/bindings/input/fsl-mma8450.txt
+++ b/Documentation/devicetree/bindings/input/fsl-mma8450.txt
@@ -2,6 +2,7 @@
Required properties:
- compatible : "fsl,mma8450".
+- reg: the I2C address of MMA8450
Example:
diff --git a/Documentation/devicetree/bindings/input/tegra-kbc.txt b/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
index 72683be6de35..72683be6de35 100644
--- a/Documentation/devicetree/bindings/input/tegra-kbc.txt
+++ b/Documentation/devicetree/bindings/input/nvidia,tegra20-kbc.txt
diff --git a/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt b/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt
new file mode 100644
index 000000000000..89fb5434b730
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/nvidia,tegra30-smmu.txt
@@ -0,0 +1,21 @@
+NVIDIA Tegra 30 IOMMU H/W, SMMU (System Memory Management Unit)
+
+Required properties:
+- compatible : "nvidia,tegra30-smmu"
+- reg : Should contain 3 register banks(address and length) for each
+ of the SMMU register blocks.
+- interrupts : Should contain MC General interrupt.
+- nvidia,#asids : # of ASIDs
+- dma-window : IOVA start address and length.
+- nvidia,ahb : phandle to the ahb bus connected to SMMU.
+
+Example:
+ smmu {
+ compatible = "nvidia,tegra30-smmu";
+ reg = <0x7000f010 0x02c
+ 0x7000f1f0 0x010
+ 0x7000f228 0x05c>;
+ nvidia,#asids = <4>; /* # of ASIDs */
+ dma-window = <0 0x40000000>; /* IOVA start & length */
+ nvidia,ahb = <&ahb>;
+ };
diff --git a/Documentation/devicetree/bindings/mfd/mc13xxx.txt b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
index 19f6af47a792..baf07987ae68 100644
--- a/Documentation/devicetree/bindings/mfd/mc13xxx.txt
+++ b/Documentation/devicetree/bindings/mfd/mc13xxx.txt
@@ -46,8 +46,8 @@ Examples:
ecspi@70010000 { /* ECSPI1 */
fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
- <&gpio3 25 0>; /* GPIO4_25 */
+ cs-gpios = <&gpio4 24 0>, /* GPIO4_24 */
+ <&gpio4 25 0>; /* GPIO4_25 */
status = "okay";
pmic: mc13892@0 {
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index 645f5eaadb3f..d2802d4717bc 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -17,18 +17,46 @@ Required properties:
device need to be present. The definition for each of these nodes is defined
using the standard binding for regulators found at
Documentation/devicetree/bindings/regulator/regulator.txt.
+ The regulator is matched with the regulator-compatible.
- The valid names for regulators are:
+ The valid regulator-compatible values are:
tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
vaux2, vaux33, vmmc
tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
ldo6, ldo7, ldo8
+- xxx-supply: Input voltage supply regulator.
+ These entries are require if regulators are enabled for a device. Missing of these
+ properties can cause the regulator registration fails.
+ If some of input supply is powered through battery or always-on supply then
+ also it is require to have these parameters with proper node handle of always
+ on power supply.
+ tps65910:
+ vcc1-supply: VDD1 input.
+ vcc2-supply: VDD2 input.
+ vcc3-supply: VAUX33 and VMMC input.
+ vcc4-supply: VAUX1 and VAUX2 input.
+ vcc5-supply: VPLL and VDAC input.
+ vcc6-supply: VDIG1 and VDIG2 input.
+ vcc7-supply: VRTC input.
+ vccio-supply: VIO input.
+ tps65911:
+ vcc1-supply: VDD1 input.
+ vcc2-supply: VDD2 input.
+ vcc3-supply: LDO6, LDO7 and LDO8 input.
+ vcc4-supply: LDO5 input.
+ vcc5-supply: LDO3 and LDO4 input.
+ vcc6-supply: LDO1 and LDO2 input.
+ vcc7-supply: VRTC input.
+ vccio-supply: VIO input.
+
Optional properties:
- ti,vmbch-threshold: (tps65911) main battery charged threshold
comparator. (see VMBCH_VSEL in TPS65910 datasheet)
- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
comparator. (see VMBCH_VSEL in TPS65910 datasheet)
+- ti,en-ck32k-xtal: enable external 32-kHz crystal oscillator (see CK32K_CTRL
+ in TPS6591X datasheet)
- ti,en-gpio-sleep: enable sleep control for gpios
There should be 9 entries here, one for each gpio.
@@ -56,74 +84,110 @@ Example:
ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
+ vcc1-supply = <&reg_parent>;
+ vcc2-supply = <&some_reg>;
+ vcc3-supply = <...>;
+ vcc4-supply = <...>;
+ vcc5-supply = <...>;
+ vcc6-supply = <...>;
+ vcc7-supply = <...>;
+ vccio-supply = <...>;
+
regulators {
- vdd1_reg: vdd1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vdd1_reg: regulator@0 {
+ regulator-compatible = "vdd1";
+ reg = <0>;
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
regulator-boot-on;
ti,regulator-ext-sleep-control = <0>;
};
- vdd2_reg: vdd2 {
+ vdd2_reg: regulator@1 {
+ regulator-compatible = "vdd2";
+ reg = <1>;
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
regulator-always-on;
regulator-boot-on;
ti,regulator-ext-sleep-control = <4>;
};
- vddctrl_reg: vddctrl {
+ vddctrl_reg: regulator@2 {
+ regulator-compatible = "vddctrl";
+ reg = <2>;
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1400000>;
regulator-always-on;
regulator-boot-on;
ti,regulator-ext-sleep-control = <0>;
};
- vio_reg: vio {
+ vio_reg: regulator@3 {
+ regulator-compatible = "vio";
+ reg = <3>;
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-boot-on;
ti,regulator-ext-sleep-control = <1>;
};
- ldo1_reg: ldo1 {
+ ldo1_reg: regulator@4 {
+ regulator-compatible = "ldo1";
+ reg = <4>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
ti,regulator-ext-sleep-control = <0>;
};
- ldo2_reg: ldo2 {
+ ldo2_reg: regulator@5 {
+ regulator-compatible = "ldo2";
+ reg = <5>;
regulator-min-microvolt = <1050000>;
regulator-max-microvolt = <1050000>;
ti,regulator-ext-sleep-control = <0>;
};
- ldo3_reg: ldo3 {
+ ldo3_reg: regulator@6 {
+ regulator-compatible = "ldo3";
+ reg = <6>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
ti,regulator-ext-sleep-control = <0>;
};
- ldo4_reg: ldo4 {
+ ldo4_reg: regulator@7 {
+ regulator-compatible = "ldo4";
+ reg = <7>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
ti,regulator-ext-sleep-control = <0>;
};
- ldo5_reg: ldo5 {
+ ldo5_reg: regulator@8 {
+ regulator-compatible = "ldo5";
+ reg = <8>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
ti,regulator-ext-sleep-control = <0>;
};
- ldo6_reg: ldo6 {
+ ldo6_reg: regulator@9 {
+ regulator-compatible = "ldo6";
+ reg = <9>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
ti,regulator-ext-sleep-control = <0>;
};
- ldo7_reg: ldo7 {
+ ldo7_reg: regulator@10 {
+ regulator-compatible = "ldo7";
+ reg = <10>;
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
ti,regulator-ext-sleep-control = <1>;
};
- ldo8_reg: ldo8 {
+ ldo8_reg: regulator@11 {
+ regulator-compatible = "ldo8";
+ reg = <11>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
regulator-always-on;
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 0d93b4b0e0e3..bd9be0b5bc20 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -3,21 +3,22 @@
The Enhanced Secure Digital Host Controller provides an interface
for MMC, SD, and SDIO types of memory cards.
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the sdhci-esdhc driver.
+
Required properties:
- - compatible : should be
- "fsl,<chip>-esdhc", "fsl,esdhc"
- - reg : should contain eSDHC registers location and length.
- - interrupts : should contain eSDHC interrupt.
- interrupt-parent : interrupt source phandle.
- clock-frequency : specifies eSDHC base clock frequency.
- - sdhci,wp-inverted : (optional) specifies that eSDHC controller
- reports inverted write-protect state; New devices should use
- the generic "wp-inverted" property.
- - sdhci,1-bit-only : (optional) specifies that a controller can
- only handle 1-bit data transfers. New devices should use the
- generic "bus-width = <1>" property.
- - sdhci,auto-cmd12: (optional) specifies that a controller can
- only handle auto CMD12.
+
+Optional properties:
+ - sdhci,wp-inverted : specifies that eSDHC controller reports
+ inverted write-protect state; New devices should use the generic
+ "wp-inverted" property.
+ - sdhci,1-bit-only : specifies that a controller can only handle
+ 1-bit data transfers. New devices should use the generic
+ "bus-width = <1>" property.
+ - sdhci,auto-cmd12: specifies that a controller can only handle auto
+ CMD12.
Example:
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index c7e404b3ef05..70cd49b1caa8 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -3,17 +3,15 @@
The Enhanced Secure Digital Host Controller on Freescale i.MX family
provides an interface for MMC, SD, and SDIO types of memory cards.
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the sdhci-esdhc-imx driver.
+
Required properties:
- compatible : Should be "fsl,<chip>-esdhc"
-- reg : Should contain eSDHC registers location and length
-- interrupts : Should contain eSDHC interrupt
Optional properties:
-- non-removable : Indicate the card is wired to host permanently
- fsl,cd-internal : Indicate to use controller internal card detection
- fsl,wp-internal : Indicate to use controller internal write protection
-- cd-gpios : Specify GPIOs for card detection
-- wp-gpios : Specify GPIOs for write protection
Examples:
@@ -29,6 +27,6 @@ esdhc@70008000 {
compatible = "fsl,imx51-esdhc";
reg = <0x70008000 0x4000>;
interrupts = <2>;
- cd-gpios = <&gpio0 6 0>; /* GPIO1_6 */
- wp-gpios = <&gpio0 5 0>; /* GPIO1_5 */
+ cd-gpios = <&gpio1 6 0>; /* GPIO1_6 */
+ wp-gpios = <&gpio1 5 0>; /* GPIO1_5 */
};
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index d64aea5a4203..0e5e2ec4001d 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -1,8 +1,9 @@
MMC/SD/SDIO slot directly connected to a SPI bus
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the mmc_spi driver.
+
Required properties:
-- compatible : should be "mmc-spi-slot".
-- reg : should specify SPI address (chip-select number).
- spi-max-frequency : maximum frequency for this device (Hz).
- voltage-ranges : two cells are required, first cell specifies minimum
slot voltage (mV), second cell specifies maximum slot voltage (mV).
@@ -11,8 +12,7 @@ Required properties:
Optional properties:
- gpios : may specify GPIOs in this order: Card-Detect GPIO,
Write-Protect GPIO. Note that this does not follow the
- binding from mmc.txt, for historic reasons.
-- interrupts : the interrupt of a card detect interrupt.
+ binding from mmc.txt, for historical reasons.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
index 6e70dcde0a71..8a6811f4a02f 100644
--- a/Documentation/devicetree/bindings/mmc/mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -2,13 +2,17 @@ These properties are common to multiple MMC host controllers. Any host
that requires the respective functionality should implement them using
these definitions.
+Interpreted by the OF core:
+- reg: Registers location and length.
+- interrupts: Interrupts used by the MMC controller.
+
Required properties:
- bus-width: Number of data lines, can be <1>, <4>, or <8>
Optional properties:
-- cd-gpios : Specify GPIOs for card detection, see gpio binding
-- wp-gpios : Specify GPIOs for write protection, see gpio binding
-- cd-inverted: when present, polarity on the wp gpio line is inverted
+- cd-gpios: Specify GPIOs for card detection, see gpio binding
+- wp-gpios: Specify GPIOs for write protection, see gpio binding
+- cd-inverted: when present, polarity on the cd gpio line is inverted
- wp-inverted: when present, polarity on the wp gpio line is inverted
- non-removable: non-removable slot (like eMMC)
- max-frequency: maximum operating clock frequency
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
index 14a81d526118..2b584cae352a 100644
--- a/Documentation/devicetree/bindings/mmc/mmci.txt
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -1,19 +1,15 @@
* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
-The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
+The ARM PrimeCell MMCI PL180 and PL181 provides an interface for
reading and writing to MultiMedia and SD cards alike.
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the mmci driver.
+
Required properties:
- compatible : contains "arm,pl18x", "arm,primecell".
-- reg : contains pl18x registers and length.
-- interrupts : contains the device IRQ(s).
- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
Optional properties:
-- wp-gpios : contains any write protect (ro) gpios
-- cd-gpios : contains any card detection gpios
-- cd-inverted : indicates whether the cd gpio is inverted
-- max-frequency : contains the maximum operating frequency
-- bus-width : number of data lines, can be <1>, <4>, or <8>
- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable
- mmc-cap-sd-highspeed : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
index 14d870a9e3db..54949f6faede 100644
--- a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
+++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
@@ -3,16 +3,14 @@
The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
to support MMC, SD, and SDIO types of memory cards.
+This file documents differences between the core properties in mmc.txt
+and the properties used by the mxsmmc driver.
+
Required properties:
- compatible: Should be "fsl,<chip>-mmc". The supported chips include
imx23 and imx28.
-- reg: Should contain registers location and length
- interrupts: Should contain ERROR and DMA interrupts
- fsl,ssp-dma-channel: APBH DMA channel for the SSP
-- bus-width: Number of data lines, can be <1>, <4>, or <8>
-
-Optional properties:
-- wp-gpios: Specify GPIOs for write protection
Examples:
diff --git a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
index f77c3031607f..c6d7b11db9eb 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia,tegra20-sdhci.txt
@@ -3,15 +3,13 @@
This controller on Tegra family SoCs provides an interface for MMC, SD,
and SDIO types of memory cards.
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the sdhci-tegra driver.
+
Required properties:
- compatible : Should be "nvidia,<chip>-sdhci"
-- reg : Should contain SD/MMC registers location and length
-- interrupts : Should contain SD/MMC interrupt
-- bus-width : Number of data lines, can be <1>, <4>, or <8>
Optional properties:
-- cd-gpios : Specify GPIOs for card detection
-- wp-gpios : Specify GPIOs for write protection
- power-gpios : Specify GPIOs for power control
Example:
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
new file mode 100644
index 000000000000..dbe98a3c183a
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/sdhci-pxa.txt
@@ -0,0 +1,21 @@
+* Marvell sdhci-pxa v2/v3 controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci-pxav2 and sdhci-pxav3 drivers.
+
+Required properties:
+- compatible: Should be "mrvl,pxav2-mmc" or "mrvl,pxav3-mmc".
+
+Optional properties:
+- mrvl,clk-delay-cycles: Specify a number of cycles to delay for tuning.
+
+Example:
+
+sdhci@d4280800 {
+ compatible = "mrvl,pxav3-mmc";
+ reg = <0xd4280800 0x800>;
+ bus-width = <8>;
+ interrupts = <27>;
+ non-removable;
+ mrvl,clk-delay-cycles = <31>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 8a53958c9a9f..be76a23b34c4 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -3,21 +3,20 @@
The Highspeed MMC Host Controller on TI OMAP family
provides an interface for MMC, SD, and SDIO types of memory cards.
+This file documents differences between the core properties described
+by mmc.txt and the properties used by the omap_hsmmc driver.
+
Required properties:
- compatible:
Should be "ti,omap2-hsmmc", for OMAP2 controllers
Should be "ti,omap3-hsmmc", for OMAP3 controllers
Should be "ti,omap4-hsmmc", for OMAP4 controllers
- ti,hwmods: Must be "mmc<n>", n is controller instance starting 1
-- reg : should contain hsmmc registers location and length
Optional properties:
ti,dual-volt: boolean, supports dual voltage cards
<supply-name>-supply: phandle to the regulator device tree node
"supply-name" examples are "vmmc", "vmmc_aux" etc
-bus-width: Number of data lines, default assumed is 1 if the property is missing.
-cd-gpios: GPIOs for card detection
-wp-gpios: GPIOs for write protection
ti,non-removable: non-removable slot (like eMMC)
ti,needs-special-reset: Requires a special softreset sequence
diff --git a/Documentation/devicetree/bindings/mtd/partition.txt b/Documentation/devicetree/bindings/mtd/partition.txt
index f114ce1657c2..6e1f61f1e789 100644
--- a/Documentation/devicetree/bindings/mtd/partition.txt
+++ b/Documentation/devicetree/bindings/mtd/partition.txt
@@ -35,4 +35,4 @@ flash@0 {
uimage@100000 {
reg = <0x0100000 0x200000>;
};
-];
+};
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt b/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
new file mode 100644
index 000000000000..7c86d5e28a0e
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-bcm87xx.txt
@@ -0,0 +1,29 @@
+The Broadcom BCM87XX devices are a family of 10G Ethernet PHYs. They
+have these bindings in addition to the standard PHY bindings.
+
+Compatible: Should contain "broadcom,bcm8706" or "broadcom,bcm8727" and
+ "ethernet-phy-ieee802.3-c45"
+
+Optional Properties:
+
+- broadcom,c45-reg-init : one of more sets of 4 cells. The first cell
+ is the MDIO Manageable Device (MMD) address, the second a register
+ address within the MMD, the third cell contains a mask to be ANDed
+ with the existing register value, and the fourth cell is ORed with
+ he result to yield the new register value. If the third cell has a
+ value of zero, no read of the existing value is performed.
+
+Example:
+
+ ethernet-phy@5 {
+ reg = <5>;
+ compatible = "broadcom,bcm8706", "ethernet-phy-ieee802.3-c45";
+ interrupt-parent = <&gpio>;
+ interrupts = <12 8>; /* Pin 12, active low */
+ /*
+ * Set PMD Digital Control Register for
+ * GPIO[1] Tx/Rx
+ * GPIO[0] R64 Sync Acquired
+ */
+ broadcom,c45-reg-init = <1 0xc808 0xff8f 0x70>;
+ };
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index f31b686d4556..8ff324eaa889 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -11,6 +11,9 @@ Required properties:
- reg : Offset and length of the register set for this device
- interrupts : Interrupt tuple for this device
+
+Optional properties:
+
- clock-frequency : The oscillator frequency driving the flexcan device
Example:
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
new file mode 100644
index 000000000000..48b259e29e87
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -0,0 +1,41 @@
+* Texas Instruments Davinci EMAC
+
+This file provides information, what the device node
+for the davinci_emac interface contains.
+
+Required properties:
+- compatible: "ti,davinci-dm6467-emac";
+- reg: Offset and length of the register set for the device
+- ti,davinci-ctrl-reg-offset: offset to control register
+- ti,davinci-ctrl-mod-reg-offset: offset to control module register
+- ti,davinci-ctrl-ram-offset: offset to control module ram
+- ti,davinci-ctrl-ram-size: size of control module ram
+- ti,davinci-rmii-en: use RMII
+- ti,davinci-no-bd-ram: has the emac controller BD RAM
+- phy-handle: Contains a phandle to an Ethernet PHY.
+ if not, davinci_emac driver defaults to 100/FULL
+- interrupts: interrupt mapping for the davinci emac interrupts sources:
+ 4 sources: <Receive Threshold Interrupt
+ Receive Interrupt
+ Transmit Interrupt
+ Miscellaneous Interrupt>
+
+Optional properties:
+- local-mac-address : 6 bytes, mac address
+
+Example (enbw_cmc board):
+ eth0: emac@1e20000 {
+ compatible = "ti,davinci-dm6467-emac";
+ reg = <0x220000 0x4000>;
+ ti,davinci-ctrl-reg-offset = <0x3000>;
+ ti,davinci-ctrl-mod-reg-offset = <0x2000>;
+ ti,davinci-ctrl-ram-offset = <0>;
+ ti,davinci-ctrl-ram-size = <0x2000>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ interrupts = <33
+ 34
+ 35
+ 36
+ >;
+ interrupt-parent = <&intc>;
+ };
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 7ab9e1a2d8be..d53639221403 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -7,10 +7,14 @@ Required properties:
- phy-mode : String, operation mode of the PHY interface.
Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
"rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
-- phy-reset-gpios : Should specify the gpio for phy reset
Optional properties:
- local-mac-address : 6 bytes, mac address
+- phy-reset-gpios : Should specify the gpio for phy reset
+- phy-reset-duration : Reset duration in milliseconds. Should present
+ only if property "phy-reset-gpios" is available. Missing the property
+ will have the duration be 1 millisecond. Numbers greater than 1000 are
+ invalid and 1 millisecond will be used instead.
Example:
@@ -19,6 +23,6 @@ ethernet@83fec000 {
reg = <0x83fec000 0x4000>;
interrupts = <87>;
phy-mode = "mii";
- phy-reset-gpios = <&gpio1 14 0>; /* GPIO2_14 */
+ phy-reset-gpios = <&gpio2 14 0>; /* GPIO2_14 */
local-mac-address = [00 04 9F 01 1B B9];
};
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index bb8c742eb8c5..7cd18fbfcf71 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -14,10 +14,20 @@ Required properties:
- linux,phandle : phandle for this node; likely referenced by an
ethernet controller node.
+Optional Properties:
+
+- compatible: Compatible list, may contain
+ "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for
+ PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45
+ specifications. If neither of these are specified, the default is to
+ assume clause 22. The compatible list may also contain other
+ elements.
+
Example:
ethernet-phy@0 {
- linux,phandle = <2452000>
+ compatible = "ethernet-phy-ieee802.3-c22";
+ linux,phandle = <2452000>;
interrupt-parent = <40000>;
interrupts = <35 1>;
reg = <0>;
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 1f62623f8c3f..060bbf098ef3 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -1,7 +1,8 @@
* STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
Required properties:
-- compatible: Should be "st,spear600-gmac"
+- compatible: Should be "snps,dwmac-<ip_version>" "snps,dwmac"
+ For backwards compatibility: "st,spear600-gmac" is also supported.
- reg: Address and length of the register set for the device
- interrupt-parent: Should be the phandle for the interrupt controller
that services interrupts for this device
diff --git a/Documentation/devicetree/bindings/nvec/nvec_nvidia.txt b/Documentation/devicetree/bindings/nvec/nvidia,nvec.txt
index 5aeee53ff9f4..5aeee53ff9f4 100644
--- a/Documentation/devicetree/bindings/nvec/nvec_nvidia.txt
+++ b/Documentation/devicetree/bindings/nvec/nvidia,nvec.txt
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
index 82b43f915857..a4119f6422d9 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx6q-pinctrl.txt
@@ -1626,3 +1626,5 @@ MX6Q_PAD_SD2_DAT3__PCIE_CTRL_MUX_11 1587
MX6Q_PAD_SD2_DAT3__GPIO_1_12 1588
MX6Q_PAD_SD2_DAT3__SJC_DONE 1589
MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 1590
+MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID 1591
+MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID 1592
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
new file mode 100644
index 000000000000..5187f0dd8b28
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-single.txt
@@ -0,0 +1,93 @@
+One-register-per-pin type device tree based pinctrl driver
+
+Required properties:
+- compatible : "pinctrl-single"
+
+- reg : offset and length of the register set for the mux registers
+
+- pinctrl-single,register-width : pinmux register access width in bits
+
+- pinctrl-single,function-mask : mask of allowed pinmux function bits
+ in the pinmux register
+
+Optional properties:
+- pinctrl-single,function-off : function off mode for disabled state if
+ available and same for all registers; if not specified, disabling of
+ pin functions is ignored
+
+This driver assumes that there is only one register for each pin,
+and uses the common pinctrl bindings as specified in the pinctrl-bindings.txt
+document in this directory.
+
+The pin configuration nodes for pinctrl-single are specified as pinctrl
+register offset and value pairs using pinctrl-single,pins. Only the bits
+specified in pinctrl-single,function-mask are updated. For example, setting
+a pin for a device could be done with:
+
+ pinctrl-single,pins = <0xdc 0x118>;
+
+Where 0xdc is the offset from the pinctrl register base address for the
+device pinctrl register, and 0x118 contains the desired value of the
+pinctrl register. See the device example and static board pins example
+below for more information.
+
+Example:
+
+/* SoC common file */
+
+/* first controller instance for pins in core domain */
+pmx_core: pinmux@4a100040 {
+ compatible = "pinctrl-single";
+ reg = <0x4a100040 0x0196>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0xffff>;
+};
+
+/* second controller instance for pins in wkup domain */
+pmx_wkup: pinmux@4a31e040 {
+ compatible = "pinctrl-single;
+ reg = <0x4a31e040 0x0038>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-single,register-width = <16>;
+ pinctrl-single,function-mask = <0xffff>;
+};
+
+
+/* board specific .dts file */
+
+&pmx_core {
+
+ /*
+ * map all board specific static pins enabled by the pinctrl driver
+ * itself during the boot (or just set them up in the bootloader)
+ */
+ pinctrl-names = "default";
+ pinctrl-0 = <&board_pins>;
+
+ board_pins: pinmux_board_pins {
+ pinctrl-single,pins = <
+ 0x6c 0xf
+ 0x6e 0xf
+ 0x70 0xf
+ 0x72 0xf
+ >;
+ };
+
+ /* map uart2 pins */
+ uart2_pins: pinmux_uart2_pins {
+ pinctrl-single,pins = <
+ 0xd8 0x118
+ 0xda 0
+ 0xdc 0x118
+ 0xde 0
+ >;
+ };
+};
+
+&uart2 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_pins>;
+};
diff --git a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
index 2f5b6b1ba15f..4fae41d54798 100644
--- a/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/fixed-regulator.txt
@@ -10,6 +10,7 @@ Optional properties:
If this property is missing, the default assumed is Active low.
- gpio-open-drain: GPIO is open drain type.
If this property is missing then default assumption is false.
+-vin-supply: Input supply name.
Any property defined as part of the core regulator
binding, defined in regulator.txt, can also be used.
@@ -29,4 +30,5 @@ Example:
enable-active-high;
regulator-boot-on;
gpio-open-drain;
+ vin-supply = <&parent_reg>;
};
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 5b7a408acdaa..66ece3f87bbc 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -10,6 +10,11 @@ Optional properties:
- regulator-always-on: boolean, regulator should never be disabled
- regulator-boot-on: bootloader/firmware enabled regulator
- <name>-supply: phandle to the parent supply/regulator node
+- regulator-ramp-delay: ramp delay for regulator(in uV/uS)
+- regulator-compatible: If a regulator chip contains multiple
+ regulators, and if the chip's binding contains a child node that
+ describes each regulator, then this property indicates which regulator
+ this child node is intended to configure.
Example:
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
new file mode 100644
index 000000000000..0487e9675ba0
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -0,0 +1,91 @@
+TPS65217 family of regulators
+
+Required properties:
+- compatible: "ti,tps65217"
+- reg: I2C slave address
+- regulators: list of regulators provided by this controller, must be named
+ after their hardware counterparts: dcdc[1-3] and ldo[1-4]
+- regulators: This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. Not all regulators for the given
+ device need to be present. The definition for each of these nodes is defined
+ using the standard binding for regulators found at
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ The valid names for regulators are:
+ tps65217: dcdc1, dcdc2, dcdc3, ldo1, ldo2, ldo3 and ldo4
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+ tps: tps@24 {
+ compatible = "ti,tps65217";
+
+ regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dcdc1_reg: regulator@0 {
+ reg = <0>;
+ regulator-compatible = "dcdc1";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc2_reg: regulator@1 {
+ reg = <1>;
+ regulator-compatible = "dcdc2";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ dcdc3_reg: regulator@2 {
+ reg = <2>;
+ regulator-compatible = "dcdc3";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <1500000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo1_reg: regulator@3 {
+ reg = <3>;
+ regulator-compatible = "ldo1";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo2_reg: regulator@4 {
+ reg = <4>;
+ regulator-compatible = "ldo2";
+ regulator-min-microvolt = <900000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo3_reg: regulator@5 {
+ reg = <5>;
+ regulator-compatible = "ldo3";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ ldo4_reg: regulator@6 {
+ reg = <6>;
+ regulator-compatible = "ldo4";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/tps6586x.txt b/Documentation/devicetree/bindings/regulator/tps6586x.txt
index 0fcabaa3baa3..d156e1b5db12 100644
--- a/Documentation/devicetree/bindings/regulator/tps6586x.txt
+++ b/Documentation/devicetree/bindings/regulator/tps6586x.txt
@@ -6,8 +6,17 @@ Required properties:
- interrupts: the interrupt outputs of the controller
- #gpio-cells: number of cells to describe a GPIO
- gpio-controller: mark the device as a GPIO controller
-- regulators: list of regulators provided by this controller, must be named
- after their hardware counterparts: sm[0-2], ldo[0-9] and ldo_rtc
+- regulators: list of regulators provided by this controller, must have
+ property "regulator-compatible" to match their hardware counterparts:
+ sm[0-2], ldo[0-9] and ldo_rtc
+- sm0-supply: The input supply for the SM0.
+- sm1-supply: The input supply for the SM1.
+- sm2-supply: The input supply for the SM2.
+- vinldo01-supply: The input supply for the LDO1 and LDO2
+- vinldo23-supply: The input supply for the LDO2 and LDO3
+- vinldo4-supply: The input supply for the LDO4
+- vinldo678-supply: The input supply for the LDO6, LDO7 and LDO8
+- vinldo9-supply: The input supply for the LDO9
Each regulator is defined using the standard binding for regulators.
@@ -21,75 +30,113 @@ Example:
#gpio-cells = <2>;
gpio-controller;
+ sm0-supply = <&some_reg>;
+ sm1-supply = <&some_reg>;
+ sm2-supply = <&some_reg>;
+ vinldo01-supply = <...>;
+ vinldo23-supply = <...>;
+ vinldo4-supply = <...>;
+ vinldo678-supply = <...>;
+ vinldo9-supply = <...>;
+
regulators {
- sm0_reg: sm0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sm0_reg: regulator@0 {
+ reg = <0>;
+ regulator-compatible = "sm0";
regulator-min-microvolt = < 725000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
};
- sm1_reg: sm1 {
+ sm1_reg: regulator@1 {
+ reg = <1>;
+ regulator-compatible = "sm1";
regulator-min-microvolt = < 725000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
regulator-always-on;
};
- sm2_reg: sm2 {
+ sm2_reg: regulator@2 {
+ reg = <2>;
+ regulator-compatible = "sm2";
regulator-min-microvolt = <3000000>;
regulator-max-microvolt = <4550000>;
regulator-boot-on;
regulator-always-on;
};
- ldo0_reg: ldo0 {
+ ldo0_reg: regulator@3 {
+ reg = <3>;
+ regulator-compatible = "ldo0";
regulator-name = "PCIE CLK";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
};
- ldo1_reg: ldo1 {
+ ldo1_reg: regulator@4 {
+ reg = <4>;
+ regulator-compatible = "ldo1";
regulator-min-microvolt = < 725000>;
regulator-max-microvolt = <1500000>;
};
- ldo2_reg: ldo2 {
+ ldo2_reg: regulator@5 {
+ reg = <5>;
+ regulator-compatible = "ldo2";
regulator-min-microvolt = < 725000>;
regulator-max-microvolt = <1500000>;
};
- ldo3_reg: ldo3 {
+ ldo3_reg: regulator@6 {
+ reg = <6>;
+ regulator-compatible = "ldo3";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
- ldo4_reg: ldo4 {
+ ldo4_reg: regulator@7 {
+ reg = <7>;
+ regulator-compatible = "ldo4";
regulator-min-microvolt = <1700000>;
regulator-max-microvolt = <2475000>;
};
- ldo5_reg: ldo5 {
+ ldo5_reg: regulator@8 {
+ reg = <8>;
+ regulator-compatible = "ldo5";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
- ldo6_reg: ldo6 {
+ ldo6_reg: regulator@9 {
+ reg = <9>;
+ regulator-compatible = "ldo6";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
- ldo7_reg: ldo7 {
+ ldo7_reg: regulator@10 {
+ reg = <10>;
+ regulator-compatible = "ldo7";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
- ldo8_reg: ldo8 {
+ ldo8_reg: regulator@11 {
+ reg = <11>;
+ regulator-compatible = "ldo8";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
- ldo9_reg: ldo9 {
+ ldo9_reg: regulator@12 {
+ reg = <12>;
+ regulator-compatible = "ldo9";
regulator-min-microvolt = <1250000>;
regulator-max-microvolt = <3300000>;
};
diff --git a/Documentation/devicetree/bindings/regulator/twl-regulator.txt b/Documentation/devicetree/bindings/regulator/twl-regulator.txt
index 0c3395d55ac1..658749b90b97 100644
--- a/Documentation/devicetree/bindings/regulator/twl-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/twl-regulator.txt
@@ -15,7 +15,6 @@ For twl6030 regulators/LDOs
- "ti,twl6030-vusb" for VUSB LDO
- "ti,twl6030-v1v8" for V1V8 LDO
- "ti,twl6030-v2v1" for V2V1 LDO
- - "ti,twl6030-clk32kg" for CLK32KG RESOURCE
- "ti,twl6030-vdd1" for VDD1 SMPS
- "ti,twl6030-vdd2" for VDD2 SMPS
- "ti,twl6030-vdd3" for VDD3 SMPS
diff --git a/Documentation/devicetree/bindings/rtc/dw-apb.txt b/Documentation/devicetree/bindings/rtc/dw-apb.txt
new file mode 100644
index 000000000000..93e2b0f048e6
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/dw-apb.txt
@@ -0,0 +1,25 @@
+* Designware APB timer
+
+Required properties:
+- compatible: "snps,dw-apb-timer-sp" or "snps,dw-apb-timer-osc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: IRQ line for the timer.
+- clock-frequency: The frequency in HZ of the timer.
+- clock-freq: For backwards compatibility with picoxcell
+
+Example:
+
+ timer1: timer@ffc09000 {
+ compatible = "snps,dw-apb-timer-sp";
+ interrupts = <0 168 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffc09000 0x1000>;
+ };
+
+ timer2: timer@ffd00000 {
+ compatible = "snps,dw-apb-timer-osc";
+ interrupts = <0 169 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffd00000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt b/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt
new file mode 100644
index 000000000000..b800070fe6e9
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/stmp3xxx-rtc.txt
@@ -0,0 +1,16 @@
+* STMP3xxx/i.MX28 Time Clock controller
+
+Required properties:
+- compatible: should be one of the following.
+ * "fsl,stmp3xxx-rtc"
+- reg: physical base address of the controller and length of memory mapped
+ region.
+- interrupts: rtc alarm interrupt
+
+Example:
+
+rtc@80056000 {
+ compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc";
+ reg = <0x80056000 2000>;
+ interrupts = <29>;
+};
diff --git a/Documentation/devicetree/bindings/sound/tegra-audio-alc5632.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
index b77a97c9101e..b77a97c9101e 100644
--- a/Documentation/devicetree/bindings/sound/tegra-audio-alc5632.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
diff --git a/Documentation/devicetree/bindings/sound/tegra-audio-trimslice.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt
index 04b14cfb1f16..04b14cfb1f16 100644
--- a/Documentation/devicetree/bindings/sound/tegra-audio-trimslice.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt
diff --git a/Documentation/devicetree/bindings/sound/tegra-audio-wm8753.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
index c4dd39ce6165..c4dd39ce6165 100644
--- a/Documentation/devicetree/bindings/sound/tegra-audio-wm8753.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
diff --git a/Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
index d5b0da8bf1d8..d5b0da8bf1d8 100644
--- a/Documentation/devicetree/bindings/sound/tegra-audio-wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
diff --git a/Documentation/devicetree/bindings/sound/tegra20-das.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt
index 6de3a7ee4efb..6de3a7ee4efb 100644
--- a/Documentation/devicetree/bindings/sound/tegra20-das.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-das.txt
diff --git a/Documentation/devicetree/bindings/sound/tegra20-i2s.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt
index 0df2b5c816e3..0df2b5c816e3 100644
--- a/Documentation/devicetree/bindings/sound/tegra20-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
index 9841057d112b..4256a6df9b79 100644
--- a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -17,6 +17,6 @@ ecspi@70010000 {
reg = <0x70010000 0x4000>;
interrupts = <36>;
fsl,spi-num-chipselects = <2>;
- cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
- <&gpio3 25 0>; /* GPIO4_25 */
+ cs-gpios = <&gpio3 24 0>, /* GPIO3_24 */
+ <&gpio3 25 0>; /* GPIO3_25 */
};
diff --git a/Documentation/devicetree/bindings/spi/spi_nvidia.txt b/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
index 6b9e51896693..6b9e51896693 100644
--- a/Documentation/devicetree/bindings/spi/spi_nvidia.txt
+++ b/Documentation/devicetree/bindings/spi/nvidia,tegra20-spi.txt
diff --git a/Documentation/devicetree/bindings/spi/spi-samsung.txt b/Documentation/devicetree/bindings/spi/spi-samsung.txt
new file mode 100644
index 000000000000..a15ffeddfba4
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-samsung.txt
@@ -0,0 +1,116 @@
+* Samsung SPI Controller
+
+The Samsung SPI controller is used to interface with various devices such as flash
+and display controllers using the SPI communication interface.
+
+Required SoC Specific Properties:
+
+- compatible: should be one of the following.
+ - samsung,s3c2443-spi: for s3c2443, s3c2416 and s3c2450 platforms
+ - samsung,s3c6410-spi: for s3c6410 platforms
+ - samsung,s5p6440-spi: for s5p6440 and s5p6450 platforms
+ - samsung,s5pv210-spi: for s5pv210 and s5pc110 platforms
+ - samsung,exynos4210-spi: for exynos4 and exynos5 platforms
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- interrupts: The interrupt number to the cpu. The interrupt specifier format
+ depends on the interrupt controller.
+
+[PRELIMINARY: the dma channel allocation will change once there are
+official DMA bindings]
+
+- tx-dma-channel: The dma channel specifier for tx operations. The format of
+ the dma specifier depends on the dma controller.
+
+- rx-dma-channel: The dma channel specifier for rx operations. The format of
+ the dma specifier depends on the dma controller.
+
+Required Board Specific Properties:
+
+- #address-cells: should be 1.
+- #size-cells: should be 0.
+- gpios: The gpio specifier for clock, mosi and miso interface lines (in the
+ order specified). The format of the gpio specifier depends on the gpio
+ controller.
+
+Optional Board Specific Properties:
+
+- samsung,spi-src-clk: If the spi controller includes a internal clock mux to
+ select the clock source for the spi bus clock, this property can be used to
+ indicate the clock to be used for driving the spi bus clock. If not specified,
+ the clock number 0 is used as default.
+
+- num-cs: Specifies the number of chip select lines supported. If
+ not specified, the default number of chip select lines is set to 1.
+
+SPI Controller specific data in SPI slave nodes:
+
+- The spi slave nodes should provide the following information which is required
+ by the spi controller.
+
+ - cs-gpio: A gpio specifier that specifies the gpio line used as
+ the slave select line by the spi controller. The format of the gpio
+ specifier depends on the gpio controller.
+
+ - samsung,spi-feedback-delay: The sampling phase shift to be applied on the
+ miso line (to account for any lag in the miso line). The following are the
+ valid values.
+
+ - 0: No phase shift.
+ - 1: 90 degree phase shift sampling.
+ - 2: 180 degree phase shift sampling.
+ - 3: 270 degree phase shift sampling.
+
+Aliases:
+
+- All the SPI controller nodes should be represented in the aliases node using
+ the following format 'spi{n}' where n is a unique number for the alias.
+
+
+Example:
+
+- SoC Specific Portion:
+
+ spi_0: spi@12d20000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d20000 0x100>;
+ interrupts = <0 66 0>;
+ tx-dma-channel = <&pdma0 5>;
+ rx-dma-channel = <&pdma0 4>;
+ };
+
+- Board Specific Portion:
+
+ spi_0: spi@12d20000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ gpios = <&gpa2 4 2 3 0>,
+ <&gpa2 6 2 3 0>,
+ <&gpa2 7 2 3 0>;
+
+ w25q80bw@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "w25x80";
+ reg = <0>;
+ spi-max-frequency = <10000>;
+
+ controller-data {
+ cs-gpio = <&gpa2 5 1 0 3>;
+ samsung,spi-feedback-delay = <0>;
+ };
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0xc0000>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt
new file mode 100644
index 000000000000..2ee903fad25c
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt
@@ -0,0 +1,27 @@
+* Freescale MXS Application UART (AUART)
+
+Required properties:
+- compatible : Should be "fsl,<soc>-auart". The supported SoCs include
+ imx23 and imx28.
+- reg : Address and length of the register set for the device
+- interrupts : Should contain the auart interrupt numbers
+
+Example:
+auart0: serial@8006a000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
+ reg = <0x8006a000 0x2000>;
+ interrupts = <112 70 71>;
+};
+
+Note: Each auart port should have an alias correctly numbered in "aliases"
+node.
+
+Example:
+
+aliases {
+ serial0 = &auart0;
+ serial1 = &auart1;
+ serial2 = &auart2;
+ serial3 = &auart3;
+ serial4 = &auart4;
+};
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
index e9b005dc7625..e9b005dc7625 100644
--- a/Documentation/devicetree/bindings/usb/tegra-usb.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra20-ehci.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 6eab91747a86..db4d3af3643c 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -3,6 +3,7 @@ Device tree binding vendor prefix registry. Keep list in alphabetical order.
This isn't an exhaustive list, but you should add new prefixes to it before
using them to avoid name-space collisions.
+ad Avionic Design GmbH
adi Analog Devices, Inc.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
apm Applied Micro Circuits Corporation (APM)
diff --git a/Documentation/devicetree/bindings/watchdog/omap-wdt.txt b/Documentation/devicetree/bindings/watchdog/omap-wdt.txt
new file mode 100644
index 000000000000..c227970671ea
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/omap-wdt.txt
@@ -0,0 +1,14 @@
+TI Watchdog Timer (WDT) Controller for OMAP
+
+Required properties:
+compatible:
+- "ti,omap3-wdt" for OMAP3
+- "ti,omap4-wdt" for OMAP4
+- ti,hwmods: Name of the hwmod associated to the WDT
+
+Examples:
+
+wdt2: wdt@4a314000 {
+ compatible = "ti,omap4-wdt", "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+};
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
index c5a80099b71c..dca90fe22a90 100644
--- a/Documentation/devicetree/usage-model.txt
+++ b/Documentation/devicetree/usage-model.txt
@@ -312,7 +312,7 @@ device tree for the NVIDIA Tegra board.
};
};
-At .machine_init() time, Tegra board support code will need to look at
+At .init_machine() time, Tegra board support code will need to look at
this DT and decide which nodes to create platform_devices for.
However, looking at the tree, it is not immediately obvious what kind
of device each node represents, or even if a node represents a device
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 56000b33340b..61d1a89baeaf 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -249,15 +249,6 @@ Who: Ravikiran Thirumalai <kiran@scalex86.org>
---------------------------
-What: Code that is now under CONFIG_WIRELESS_EXT_SYSFS
- (in net/core/net-sysfs.c)
-When: 3.5
-Why: Over 1K .text/.data size reduction, data is available in other
- ways (ioctls)
-Who: Johannes Berg <johannes@sipsolutions.net>
-
----------------------------
-
What: sysfs ui for changing p4-clockmod parameters
When: September 2009
Why: See commits 129f8ae9b1b5be94517da76009ea956e89104ce8 and
@@ -414,21 +405,6 @@ Who: Jean Delvare <khali@linux-fr.org>
----------------------------
-What: xt_connlimit rev 0
-When: 2012
-Who: Jan Engelhardt <jengelh@medozas.de>
-Files: net/netfilter/xt_connlimit.c
-
-----------------------------
-
-What: ipt_addrtype match include file
-When: 2012
-Why: superseded by xt_addrtype
-Who: Florian Westphal <fw@strlen.de>
-Files: include/linux/netfilter_ipv4/ipt_addrtype.h
-
-----------------------------
-
What: i2c_driver.attach_adapter
i2c_driver.detach_adapter
When: September 2011
@@ -449,6 +425,19 @@ Who: Hans Verkuil <hans.verkuil@cisco.com>
----------------------------
+What: CONFIG_CFG80211_WEXT
+When: as soon as distributions ship new wireless tools, ie. wpa_supplicant 1.0
+ and NetworkManager/connman/etc. that are able to use nl80211
+Why: Wireless extensions are deprecated, and userland tools are moving to
+ using nl80211. New drivers are no longer using wireless extensions,
+ and while there might still be old drivers, both new drivers and new
+ userland no longer needs them and they can't be used for an feature
+ developed in the past couple of years. As such, compatibility with
+ wireless extensions in new drivers will be removed.
+Who: Johannes Berg <johannes@sipsolutions.net>
+
+----------------------------
+
What: g_file_storage driver
When: 3.8
Why: This driver has been superseded by g_mass_storage.
@@ -589,6 +578,13 @@ Why: Remount currently allows changing bound subsystems and
----------------------------
+What: xt_recent rev 0
+When: 2013
+Who: Pablo Neira Ayuso <pablo@netfilter.org>
+Files: net/netfilter/xt_recent.c
+
+----------------------------
+
What: KVM debugfs statistics
When: 2013
Why: KVM tracepoints provide mostly equivalent information in a much more
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 8e2da1e06e3b..e0cce2a5f820 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -9,7 +9,7 @@ be able to use diff(1).
--------------------------- dentry_operations --------------------------
prototypes:
- int (*d_revalidate)(struct dentry *, struct nameidata *);
+ int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
@@ -37,9 +37,8 @@ d_manage: no no yes (ref-walk) maybe
--------------------------- inode_operations ---------------------------
prototypes:
- int (*create) (struct inode *,struct dentry *,umode_t, struct nameidata *);
- struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameid
-ata *);
+ int (*create) (struct inode *,struct dentry *,umode_t, bool);
+ struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
@@ -62,6 +61,9 @@ ata *);
int (*removexattr) (struct dentry *, const char *);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
void (*update_time)(struct inode *, struct timespec *, int);
+ int (*atomic_open)(struct inode *, struct dentry *,
+ struct file *, unsigned open_flag,
+ umode_t create_mode, int *opened);
locking rules:
all may block
@@ -89,6 +91,7 @@ listxattr: no
removexattr: yes
fiemap: no
update_time: no
+atomic_open: yes
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 8c91d1057d9a..2bef2b3843d1 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -355,12 +355,10 @@ protects *all* the dcache state of a given dentry.
via rcu-walk path walk (basically, if the file can have had a path name in the
vfs namespace).
- i_dentry and i_rcu share storage in a union, and the vfs expects
-i_dentry to be reinitialized before it is freed, so an:
-
- INIT_LIST_HEAD(&inode->i_dentry);
-
-must be done in the RCU callback.
+ Even though i_dentry and i_rcu share storage in a union, we will
+initialize the former in inode_init_always(), so just leave it alone in
+the callback. It used to be necessary to clean it there, but not anymore
+(starting at 3.2).
--
[recommended]
@@ -433,3 +431,14 @@ release it yourself.
d_alloc_root() is gone, along with a lot of bugs caused by code
misusing it. Replacement: d_make_root(inode). The difference is,
d_make_root() drops the reference to inode if dentry allocation fails.
+
+--
+[mandatory]
+ The witch is dead! Well, 2/3 of it, anyway. ->d_revalidate() and
+->lookup() do *not* take struct nameidata anymore; just the flags.
+--
+[mandatory]
+ ->create() doesn't take struct nameidata *; unlike the previous
+two, it gets "is it an O_EXCL or equivalent?" boolean argument. Note that
+local filesystems can ignore tha argument - they are guaranteed that the
+object doesn't exist. It's remote/distributed ones that might care...
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index efd23f481704..aa754e01464e 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -341,8 +341,8 @@ This describes how the VFS can manipulate an inode in your
filesystem. As of kernel 2.6.22, the following members are defined:
struct inode_operations {
- int (*create) (struct inode *,struct dentry *, umode_t, struct nameidata *);
- struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+ int (*create) (struct inode *,struct dentry *, umode_t, bool);
+ struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
@@ -364,6 +364,9 @@ struct inode_operations {
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*update_time)(struct inode *, struct timespec *, int);
+ int (*atomic_open)(struct inode *, struct dentry *,
+ struct file *, unsigned open_flag,
+ umode_t create_mode, int *opened);
};
Again, all methods are called without any locks being held, unless
@@ -476,6 +479,14 @@ otherwise noted.
an inode. If this is not defined the VFS will update the inode itself
and call mark_inode_dirty_sync.
+ atomic_open: called on the last component of an open. Using this optional
+ method the filesystem can look up, possibly create and open the file in
+ one atomic operation. If it cannot perform this (e.g. the file type
+ turned out to be wrong) it may signal this by returning 1 instead of
+ usual 0 or -ve . This method is only called if the last
+ component is negative or needs lookup. Cached positive dentries are
+ still handled by f_op->open().
+
The Address Space Object
========================
@@ -891,7 +902,7 @@ the VFS uses a default. As of kernel 2.6.22, the following members are
defined:
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, struct nameidata *);
+ int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
@@ -910,11 +921,11 @@ struct dentry_operations {
dcache. Most filesystems leave this as NULL, because all their
dentries in the dcache are valid
- d_revalidate may be called in rcu-walk mode (nd->flags & LOOKUP_RCU).
+ d_revalidate may be called in rcu-walk mode (flags & LOOKUP_RCU).
If in rcu-walk mode, the filesystem must revalidate the dentry without
blocking or storing to the dentry, d_parent and d_inode should not be
- used without care (because they can go NULL), instead nd->inode should
- be used.
+ used without care (because they can change and, in d_inode case, even
+ become NULL under us).
If a situation is encountered that rcu-walk cannot handle, return
-ECHILD and it will be called again in ref-walk mode.
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt
new file mode 100644
index 000000000000..4627c4241ece
--- /dev/null
+++ b/Documentation/hid/uhid.txt
@@ -0,0 +1,169 @@
+ UHID - User-space I/O driver support for HID subsystem
+ ========================================================
+
+The HID subsystem needs two kinds of drivers. In this document we call them:
+
+ 1. The "HID I/O Driver" is the driver that performs raw data I/O to the
+ low-level device. Internally, they register an hid_ll_driver structure with
+ the HID core. They perform device setup, read raw data from the device and
+ push it into the HID subsystem and they provide a callback so the HID
+ subsystem can send data to the device.
+
+ 2. The "HID Device Driver" is the driver that parses HID reports and reacts on
+ them. There are generic drivers like "generic-usb" and "generic-bluetooth"
+ which adhere to the HID specification and provide the standardizes features.
+ But there may be special drivers and quirks for each non-standard device out
+ there. Internally, they use the hid_driver structure.
+
+Historically, the USB stack was the first subsystem to provide an HID I/O
+Driver. However, other standards like Bluetooth have adopted the HID specs and
+may provide HID I/O Drivers, too. The UHID driver allows to implement HID I/O
+Drivers in user-space and feed the data into the kernel HID-subsystem.
+
+This allows user-space to operate on the same level as USB-HID, Bluetooth-HID
+and similar. It does not provide a way to write HID Device Drivers, though. Use
+hidraw for this purpose.
+
+There is an example user-space application in ./samples/uhid/uhid-example.c
+
+The UHID API
+------------
+
+UHID is accessed through a character misc-device. The minor-number is allocated
+dynamically so you need to rely on udev (or similar) to create the device node.
+This is /dev/uhid by default.
+
+If a new device is detected by your HID I/O Driver and you want to register this
+device with the HID subsystem, then you need to open /dev/uhid once for each
+device you want to register. All further communication is done by read()'ing or
+write()'ing "struct uhid_event" objects. Non-blocking operations are supported
+by setting O_NONBLOCK.
+
+struct uhid_event {
+ __u32 type;
+ union {
+ struct uhid_create_req create;
+ struct uhid_data_req data;
+ ...
+ } u;
+};
+
+The "type" field contains the ID of the event. Depending on the ID different
+payloads are sent. You must not split a single event across multiple read()'s or
+multiple write()'s. A single event must always be sent as a whole. Furthermore,
+only a single event can be sent per read() or write(). Pending data is ignored.
+If you want to handle multiple events in a single syscall, then use vectored
+I/O with readv()/writev().
+
+The first thing you should do is sending an UHID_CREATE event. This will
+register the device. UHID will respond with an UHID_START event. You can now
+start sending data to and reading data from UHID. However, unless UHID sends the
+UHID_OPEN event, the internally attached HID Device Driver has no user attached.
+That is, you might put your device asleep unless you receive the UHID_OPEN
+event. If you receive the UHID_OPEN event, you should start I/O. If the last
+user closes the HID device, you will receive an UHID_CLOSE event. This may be
+followed by an UHID_OPEN event again and so on. There is no need to perform
+reference-counting in user-space. That is, you will never receive multiple
+UHID_OPEN events without an UHID_CLOSE event. The HID subsystem performs
+ref-counting for you.
+You may decide to ignore UHID_OPEN/UHID_CLOSE, though. I/O is allowed even
+though the device may have no users.
+
+If you want to send data to the HID subsystem, you send an HID_INPUT event with
+your raw data payload. If the kernel wants to send data to the device, you will
+read an UHID_OUTPUT or UHID_OUTPUT_EV event.
+
+If your device disconnects, you should send an UHID_DESTROY event. This will
+unregister the device. You can now send UHID_CREATE again to register a new
+device.
+If you close() the fd, the device is automatically unregistered and destroyed
+internally.
+
+write()
+-------
+write() allows you to modify the state of the device and feed input data into
+the kernel. The following types are supported: UHID_CREATE, UHID_DESTROY and
+UHID_INPUT. The kernel will parse the event immediately and if the event ID is
+not supported, it will return -EOPNOTSUPP. If the payload is invalid, then
+-EINVAL is returned, otherwise, the amount of data that was read is returned and
+the request was handled successfully.
+
+ UHID_CREATE:
+ This creates the internal HID device. No I/O is possible until you send this
+ event to the kernel. The payload is of type struct uhid_create_req and
+ contains information about your device. You can start I/O now.
+
+ UHID_DESTROY:
+ This destroys the internal HID device. No further I/O will be accepted. There
+ may still be pending messages that you can receive with read() but no further
+ UHID_INPUT events can be sent to the kernel.
+ You can create a new device by sending UHID_CREATE again. There is no need to
+ reopen the character device.
+
+ UHID_INPUT:
+ You must send UHID_CREATE before sending input to the kernel! This event
+ contains a data-payload. This is the raw data that you read from your device.
+ The kernel will parse the HID reports and react on it.
+
+ UHID_FEATURE_ANSWER:
+ If you receive a UHID_FEATURE request you must answer with this request. You
+ must copy the "id" field from the request into the answer. Set the "err" field
+ to 0 if no error occured or to EIO if an I/O error occurred.
+ If "err" is 0 then you should fill the buffer of the answer with the results
+ of the feature request and set "size" correspondingly.
+
+read()
+------
+read() will return a queued ouput report. These output reports can be of type
+UHID_START, UHID_STOP, UHID_OPEN, UHID_CLOSE, UHID_OUTPUT or UHID_OUTPUT_EV. No
+reaction is required to any of them but you should handle them according to your
+needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads.
+
+ UHID_START:
+ This is sent when the HID device is started. Consider this as an answer to
+ UHID_CREATE. This is always the first event that is sent.
+
+ UHID_STOP:
+ This is sent when the HID device is stopped. Consider this as an answer to
+ UHID_DESTROY.
+ If the kernel HID device driver closes the device manually (that is, you
+ didn't send UHID_DESTROY) then you should consider this device closed and send
+ an UHID_DESTROY event. You may want to reregister your device, though. This is
+ always the last message that is sent to you unless you reopen the device with
+ UHID_CREATE.
+
+ UHID_OPEN:
+ This is sent when the HID device is opened. That is, the data that the HID
+ device provides is read by some other process. You may ignore this event but
+ it is useful for power-management. As long as you haven't received this event
+ there is actually no other process that reads your data so there is no need to
+ send UHID_INPUT events to the kernel.
+
+ UHID_CLOSE:
+ This is sent when there are no more processes which read the HID data. It is
+ the counterpart of UHID_OPEN and you may as well ignore this event.
+
+ UHID_OUTPUT:
+ This is sent if the HID device driver wants to send raw data to the I/O
+ device. You should read the payload and forward it to the device. The payload
+ is of type "struct uhid_data_req".
+ This may be received even though you haven't received UHID_OPEN, yet.
+
+ UHID_OUTPUT_EV:
+ Same as UHID_OUTPUT but this contains a "struct input_event" as payload. This
+ is called for force-feedback, LED or similar events which are received through
+ an input device by the HID subsystem. You should convert this into raw reports
+ and send them to your device similar to events of type UHID_OUTPUT.
+
+ UHID_FEATURE:
+ This event is sent if the kernel driver wants to perform a feature request as
+ described in the HID specs. The report-type and report-number are available in
+ the payload.
+ The kernel serializes feature requests so there will never be two in parallel.
+ However, if you fail to respond with a UHID_FEATURE_ANSWER in a time-span of 5
+ seconds, then the requests will be dropped and a new one might be sent.
+ Therefore, the payload also contains an "id" field that identifies every
+ request.
+
+Document by:
+ David Herrmann <dh.herrmann@googlemail.com>
diff --git a/Documentation/hwmon/da9052 b/Documentation/hwmon/da9052
new file mode 100644
index 000000000000..ef898553638e
--- /dev/null
+++ b/Documentation/hwmon/da9052
@@ -0,0 +1,61 @@
+Supported chips:
+ * Dialog Semiconductors DA9052-BC and DA9053-AA/Bx PMICs
+ Prefix: 'da9052'
+ Datasheet: Datasheet is not publicly available.
+
+Authors: David Dajun Chen <dchen@diasemi.com>
+
+Description
+-----------
+
+The DA9052/53 provides an Analogue to Digital Converter (ADC) with 10 bits
+resolution and track and hold circuitry combined with an analogue input
+multiplexer. The analogue input multiplexer will allow conversion of up to 10
+different inputs. The track and hold circuit ensures stable input voltages at
+the input of the ADC during the conversion.
+
+The ADC is used to measure the following inputs:
+Channel 0: VDDOUT - measurement of the system voltage
+Channel 1: ICH - internal battery charger current measurement
+Channel 2: TBAT - output from the battery NTC
+Channel 3: VBAT - measurement of the battery voltage
+Channel 4: ADC_IN4 - high impedance input (0 - 2.5V)
+Channel 5: ADC_IN5 - high impedance input (0 - 2.5V)
+Channel 6: ADC_IN6 - high impedance input (0 - 2.5V)
+Channel 7: XY - TSI interface to measure the X and Y voltage of the touch
+ screen resistive potentiometers
+Channel 8: Internal Tjunc. - sense (internal temp. sensor)
+Channel 9: VBBAT - measurement of the backup battery voltage
+
+By using sysfs attributes we can measure the system voltage VDDOUT, the battery
+charging current ICH, battery temperature TBAT, battery junction temperature
+TJUNC, battery voltage VBAT and the back up battery voltage VBBAT.
+
+Voltage Monitoring
+------------------
+
+Voltages are sampled by a 10 bit ADC.
+
+The battery voltage is calculated as:
+ Milli volt = ((ADC value * 1000) / 512) + 2500
+
+The backup battery voltage is calculated as:
+ Milli volt = (ADC value * 2500) / 512;
+
+The voltages on ADC channels 4, 5 and 6 are calculated as:
+ Milli volt = (ADC value * 2500) / 1023
+
+Temperature Monitoring
+----------------------
+
+Temperatures are sampled by a 10 bit ADC. Junction and battery temperatures
+are monitored by the ADC channels.
+
+The junction temperature is calculated:
+ Degrees celsius = 1.708 * (TJUNC_RES - T_OFFSET) - 108.8
+The junction temperature attribute is supported by the driver.
+
+The battery temperature is calculated:
+ Degree Celcius = 1 / (t1 + 1/298)- 273
+where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
+Default values of R25, B, ITBAT are 10e3, 3380 and 50e-6 respectively.
diff --git a/Documentation/hwmon/hih6130 b/Documentation/hwmon/hih6130
new file mode 100644
index 000000000000..73dae918ea7b
--- /dev/null
+++ b/Documentation/hwmon/hih6130
@@ -0,0 +1,37 @@
+Kernel driver hih6130
+=====================
+
+Supported chips:
+ * Honeywell HIH-6130 / HIH-6131
+ Prefix: 'hih6130'
+ Addresses scanned: none
+ Datasheet: Publicly available at the Honeywell website
+ http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
+
+Author:
+ Iain Paton <ipaton0@gmail.com>
+
+Description
+-----------
+
+The HIH-6130 & HIH-6131 are humidity and temperature sensors in a SO8 package.
+The difference between the two devices is that the HIH-6131 has a condensation
+filter.
+
+The devices communicate with the I2C protocol. All sensors are set to the same
+I2C address 0x27 by default, so an entry with I2C_BOARD_INFO("hih6130", 0x27)
+can be used in the board setup code.
+
+Please see Documentation/i2c/instantiating-devices for details on how to
+instantiate I2C devices.
+
+sysfs-Interface
+---------------
+
+temp1_input - temperature input
+humidity1_input - humidity input
+
+Notes
+-----
+
+Command mode and alarms are not currently supported.
diff --git a/Documentation/hwmon/submitting-patches b/Documentation/hwmon/submitting-patches
index 86f42e8e9e49..790f774a3032 100644
--- a/Documentation/hwmon/submitting-patches
+++ b/Documentation/hwmon/submitting-patches
@@ -70,6 +70,9 @@ increase the chances of your change being accepted.
review more difficult. It may also result in code which is more complicated
than necessary. Use inline functions or just regular functions instead.
+* Use devres functions whenever possible to allocate resources. For rationale
+ and supported functions, please see Documentation/driver-model/devres.txt.
+
* If the driver has a detect function, make sure it is silent. Debug messages
and messages printed after a successful detection are acceptable, but it
must not print messages such as "Chip XXX not found/supported".
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 71f55bbcefc8..615142da4ef6 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -38,9 +38,10 @@ Module Parameters
Disable selected features normally supported by the device. This makes it
possible to work around possible driver or hardware bugs if the feature in
question doesn't work as intended for whatever reason. Bit values:
- 1 disable SMBus PEC
- 2 disable the block buffer
- 8 disable the I2C block read functionality
+ 0x01 disable SMBus PEC
+ 0x02 disable the block buffer
+ 0x08 disable the I2C block read functionality
+ 0x10 don't use interrupts
Description
@@ -86,6 +87,12 @@ SMBus 2.0 Support
The 82801DB (ICH4) and later chips support several SMBus 2.0 features.
+Interrupt Support
+-----------------
+
+PCI interrupt support is supported on the 82801EB (ICH5) and later chips.
+
+
Hidden ICH SMBus
----------------
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index 475bb4ae0720..1e6634f54c50 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -8,6 +8,11 @@ Supported adapters:
Datasheet: Only available via NDA from ServerWorks
* ATI IXP200, IXP300, IXP400, SB600, SB700 and SB800 southbridges
Datasheet: Not publicly available
+ SB700 register reference available at:
+ http://support.amd.com/us/Embedded_TechDocs/43009_sb7xx_rrg_pub_1.00.pdf
+ * AMD SP5100 (SB700 derivative found on some server mainboards)
+ Datasheet: Publicly available at the AMD website
+ http://support.amd.com/us/Embedded_TechDocs/44413.pdf
* AMD Hudson-2
Datasheet: Not publicly available
* Standard Microsystems (SMSC) SLC90E66 (Victory66) southbridge
@@ -68,6 +73,10 @@ this driver on those mainboards.
The ServerWorks Southbridges, the Intel 440MX, and the Victory66 are
identical to the PIIX4 in I2C/SMBus support.
+The AMD SB700 and SP5100 chipsets implement two PIIX4-compatible SMBus
+controllers. If your BIOS initializes the secondary controller, it will
+be detected by this driver as an "Auxiliary SMBus Host Controller".
+
If you own Force CPCI735 motherboard or other OSB4 based systems you may need
to change the SMBus Interrupt Select register so the SMBus controller uses
the SMI mode.
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index 5aa53374ea2a..3a94b0e6f601 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -245,21 +245,17 @@ static int __init foo_init(void)
{
return i2c_add_driver(&foo_driver);
}
+module_init(foo_init);
static void __exit foo_cleanup(void)
{
i2c_del_driver(&foo_driver);
}
+module_exit(foo_cleanup);
-/* Substitute your own name and email address */
-MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
-MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
-
-/* a few non-GPL license types are also allowed */
-MODULE_LICENSE("GPL");
+The module_i2c_driver() macro can be used to reduce above code.
-module_init(foo_init);
-module_exit(foo_cleanup);
+module_i2c_driver(foo_driver);
Note that some functions are marked by `__init'. These functions can
be removed after kernel booting (or module loading) is completed.
@@ -267,6 +263,17 @@ Likewise, functions marked by `__exit' are dropped by the compiler when
the code is built into the kernel, as they would never be called.
+Driver Information
+==================
+
+/* Substitute your own name and email address */
+MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"
+MODULE_DESCRIPTION("Driver for Barf Inc. Foo I2C devices");
+
+/* a few non-GPL license types are also allowed */
+MODULE_LICENSE("GPL");
+
+
Power Management
================
diff --git a/Documentation/kdump/kdump.txt b/Documentation/kdump/kdump.txt
index 506c7390c2b9..13f1aa09b938 100644
--- a/Documentation/kdump/kdump.txt
+++ b/Documentation/kdump/kdump.txt
@@ -86,7 +86,7 @@ There is also a gitweb interface available at
http://www.kernel.org/git/?p=utils/kernel/kexec/kexec-tools.git
More information about kexec-tools can be found at
-http://www.kernel.org/pub/linux/utils/kernel/kexec/README.html
+http://horms.net/projects/kexec/
3) Unpack the tarball with the tar command, as follows:
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a92c5ebf373e..c2619ef44a72 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1134,7 +1134,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
forcesac
soft
pt [x86, IA-64]
- group_mf [x86, IA-64]
io7= [HW] IO7 for Marvel based alpha systems
@@ -2367,6 +2366,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Set maximum number of finished RCU callbacks to process
in one batch.
+ rcutree.fanout_leaf= [KNL,BOOT]
+ Increase the number of CPUs assigned to each
+ leaf rcu_node structure. Useful for very large
+ systems.
+
rcutree.qhimark= [KNL,BOOT]
Set threshold of queued
RCU callbacks over which batch limiting is disabled.
@@ -2932,6 +2936,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
initial READ(10) command);
o = CAPACITY_OK (accept the capacity
reported by the device);
+ p = WRITE_CACHE (the device cache is ON
+ by default);
r = IGNORE_RESIDUE (the device reports
bogus residue values);
s = SINGLE_LUN (the device has only one
diff --git a/Documentation/laptops/asus-laptop.txt b/Documentation/laptops/asus-laptop.txt
index a1e04d679289..69f9fb3701e0 100644
--- a/Documentation/laptops/asus-laptop.txt
+++ b/Documentation/laptops/asus-laptop.txt
@@ -151,8 +151,7 @@ Display switching
Debugging:
1) Check whether the Fn+F8 key:
- a) does not lock the laptop (try disabling CONFIG_X86_UP_APIC or boot with
- noapic / nolapic if it does)
+ a) does not lock the laptop (try a boot with noapic / nolapic if it does)
b) generates events (0x6n, where n is the value corresponding to the
configuration above)
c) actually works
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 75a592365af9..8f3ae4a6147e 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -211,6 +211,11 @@ The debug output can be changed at runtime using the file
will enable debug messages for when routes change.
+Counters for different types of packets entering and leaving the
+batman-adv module are available through ethtool:
+
+# ethtool --statistics bat0
+
BATCTL
------
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index bfea8a338901..6b1c7110534e 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1210,7 +1210,7 @@ options, you may wish to use the "max_bonds" module parameter,
documented above.
To create multiple bonding devices with differing options, it is
-preferrable to use bonding parameters exported by sysfs, documented in the
+preferable to use bonding parameters exported by sysfs, documented in the
section below.
For versions of bonding without sysfs support, the only means to
@@ -1950,7 +1950,7 @@ access to fail over to. Additionally, the bonding load balance modes
support link monitoring of their members, so if individual links fail,
the load will be rebalanced across the remaining devices.
- See Section 13, "Configuring Bonding for Maximum Throughput"
+ See Section 12, "Configuring Bonding for Maximum Throughput"
for information on configuring bonding with one peer device.
11.2 High Availability in a Multiple Switch Topology
@@ -2620,7 +2620,7 @@ be found at:
https://lists.sourceforge.net/lists/listinfo/bonding-devel
- Discussions regarding the developpement of the bonding driver take place
+ Discussions regarding the development of the bonding driver take place
on the main Linux network mailing list, hosted at vger.kernel.org. The list
address is:
diff --git a/Documentation/networking/bridge.txt b/Documentation/networking/bridge.txt
index a7ba5e4e2c91..a27cb6214ed7 100644
--- a/Documentation/networking/bridge.txt
+++ b/Documentation/networking/bridge.txt
@@ -1,7 +1,14 @@
In order to use the Ethernet bridging functionality, you'll need the
-userspace tools. These programs and documentation are available
-at http://www.linuxfoundation.org/en/Net:Bridge. The download page is
-http://prdownloads.sourceforge.net/bridge.
+userspace tools.
+
+Documentation for Linux bridging is on:
+ http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge
+
+The bridge-utilities are maintained at:
+ git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git
+
+Additionally, the iproute2 utilities can be used to configure
+bridge devices.
If you still have questions, don't hesitate to post to the mailing list
(more info https://lists.linux-foundation.org/mailman/listinfo/bridge).
diff --git a/Documentation/networking/caif/Linux-CAIF.txt b/Documentation/networking/caif/Linux-CAIF.txt
index e52fd62bef3a..0aa4bd381bec 100644
--- a/Documentation/networking/caif/Linux-CAIF.txt
+++ b/Documentation/networking/caif/Linux-CAIF.txt
@@ -19,60 +19,36 @@ and host. Currently, UART and Loopback are available for Linux.
Architecture:
------------
The implementation of CAIF is divided into:
-* CAIF Socket Layer, Kernel API, and Net Device.
+* CAIF Socket Layer and GPRS IP Interface.
* CAIF Core Protocol Implementation
* CAIF Link Layer, implemented as NET devices.
RTNL
!
- ! +------+ +------+ +------+
- ! +------+! +------+! +------+!
- ! ! Sock !! !Kernel!! ! Net !!
- ! ! API !+ ! API !+ ! Dev !+ <- CAIF Client APIs
- ! +------+ +------! +------+
- ! ! ! !
- ! +----------!----------+
- ! +------+ <- CAIF Protocol Implementation
- +-------> ! CAIF !
- ! Core !
- +------+
- +--------!--------+
- ! !
- +------+ +-----+
- ! ! ! TTY ! <- Link Layer (Net Devices)
- +------+ +-----+
-
-
-Using the Kernel API
-----------------------
-The Kernel API is used for accessing CAIF channels from the
-kernel.
-The user of the API has to implement two callbacks for receive
-and control.
-The receive callback gives a CAIF packet as a SKB. The control
-callback will
-notify of channel initialization complete, and flow-on/flow-
-off.
-
-
- struct caif_device caif_dev = {
- .caif_config = {
- .name = "MYDEV"
- .type = CAIF_CHTY_AT
- }
- .receive_cb = my_receive,
- .control_cb = my_control,
- };
- caif_add_device(&caif_dev);
- caif_transmit(&caif_dev, skb);
-
-See the caif_kernel.h for details about the CAIF kernel API.
+ ! +------+ +------+
+ ! +------+! +------+!
+ ! ! IP !! !Socket!!
+ +-------> !interf!+ ! API !+ <- CAIF Client APIs
+ ! +------+ +------!
+ ! ! !
+ ! +-----------+
+ ! !
+ ! +------+ <- CAIF Core Protocol
+ ! ! CAIF !
+ ! ! Core !
+ ! +------+
+ ! +----------!---------+
+ ! ! ! !
+ ! +------+ +-----+ +------+
+ +--> ! HSI ! ! TTY ! ! USB ! <- Link Layer (Net Devices)
+ +------+ +-----+ +------+
+
I M P L E M E N T A T I O N
===========================
-===========================
+
CAIF Core Protocol Layer
=========================================
@@ -88,17 +64,13 @@ The Core CAIF implementation contains:
- Simple implementation of CAIF.
- Layered architecture (a la Streams), each layer in the CAIF
specification is implemented in a separate c-file.
- - Clients must implement PHY layer to access physical HW
- with receive and transmit functions.
- Clients must call configuration function to add PHY layer.
- Clients must implement CAIF layer to consume/produce
CAIF payload with receive and transmit functions.
- Clients must call configuration function to add and connect the
Client layer.
- When receiving / transmitting CAIF Packets (cfpkt), ownership is passed
- to the called function (except for framing layers' receive functions
- or if a transmit function returns an error, in which case the caller
- must free the packet).
+ to the called function (except for framing layers' receive function)
Layered Architecture
--------------------
@@ -109,11 +81,6 @@ Implementation. The support functions include:
CAIF Packet has functions for creating, destroying and adding content
and for adding/extracting header and trailers to protocol packets.
- - CFLST CAIF list implementation.
-
- - CFGLUE CAIF Glue. Contains OS Specifics, such as memory
- allocation, endianness, etc.
-
The CAIF Protocol implementation contains:
- CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
@@ -128,7 +95,7 @@ The CAIF Protocol implementation contains:
control and remote shutdown requests.
- CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
- External Interface). This layer encodes/decodes VEI frames.
+ External Interface). This layer encodes/decodes VEI frames.
- CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP
traffic), encodes/decodes Datagram frames.
@@ -170,7 +137,7 @@ The CAIF Protocol implementation contains:
+---------+ +---------+
! !
+---------+ +---------+
- | | | Serial |
+ | | | Serial |
| | | CFSERL |
+---------+ +---------+
@@ -186,24 +153,20 @@ In this layered approach the following "rules" apply.
layer->dn->transmit(layer->dn, packet);
-Linux Driver Implementation
+CAIF Socket and IP interface
===========================
-Linux GPRS Net Device and CAIF socket are implemented on top of the
-CAIF Core protocol. The Net device and CAIF socket have an instance of
+The IP interface and CAIF socket API are implemented on top of the
+CAIF Core protocol. The IP Interface and CAIF socket have an instance of
'struct cflayer', just like the CAIF Core protocol stack.
Net device and Socket implement the 'receive()' function defined by
'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and
receive of packets is handled as by the rest of the layers: the 'dn->transmit()'
function is called in order to transmit data.
-The layer on top of the CAIF Core implementation is
-sometimes referred to as the "Client layer".
-
-
Configuration of Link Layer
---------------------------
-The Link Layer is implemented as Linux net devices (struct net_device).
+The Link Layer is implemented as Linux network devices (struct net_device).
Payload handling and registration is done using standard Linux mechanisms.
The CAIF Protocol relies on a loss-less link layer without implementing
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index ac295399f0d4..820f55344edc 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -22,7 +22,8 @@ This file contains
4.1.2 RAW socket option CAN_RAW_ERR_FILTER
4.1.3 RAW socket option CAN_RAW_LOOPBACK
4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS
- 4.1.5 RAW socket returned message flags
+ 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
+ 4.1.6 RAW socket returned message flags
4.2 Broadcast Manager protocol sockets (SOCK_DGRAM)
4.3 connected transport protocols (SOCK_SEQPACKET)
4.4 unconnected transport protocols (SOCK_DGRAM)
@@ -41,7 +42,8 @@ This file contains
6.5.1 Netlink interface to set/get devices properties
6.5.2 Setting the CAN bit-timing
6.5.3 Starting and stopping the CAN network device
- 6.6 supported CAN hardware
+ 6.6 CAN FD (flexible data rate) driver support
+ 6.7 supported CAN hardware
7 Socket CAN resources
@@ -232,16 +234,16 @@ solution for a couple of reasons:
arbitration problems and error frames caused by the different
ECUs. The occurrence of detected errors are important for diagnosis
and have to be logged together with the exact timestamp. For this
- reason the CAN interface driver can generate so called Error Frames
- that can optionally be passed to the user application in the same
- way as other CAN frames. Whenever an error on the physical layer
+ reason the CAN interface driver can generate so called Error Message
+ Frames that can optionally be passed to the user application in the
+ same way as other CAN frames. Whenever an error on the physical layer
or the MAC layer is detected (e.g. by the CAN controller) the driver
- creates an appropriate error frame. Error frames can be requested by
- the user application using the common CAN filter mechanisms. Inside
- this filter definition the (interested) type of errors may be
- selected. The reception of error frames is disabled by default.
- The format of the CAN error frame is briefly described in the Linux
- header file "include/linux/can/error.h".
+ creates an appropriate error message frame. Error messages frames can
+ be requested by the user application using the common CAN filter
+ mechanisms. Inside this filter definition the (interested) type of
+ errors may be selected. The reception of error messages is disabled
+ by default. The format of the CAN error message frame is briefly
+ described in the Linux header file "include/linux/can/error.h".
4. How to use Socket CAN
------------------------
@@ -273,7 +275,7 @@ solution for a couple of reasons:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 can_dlc; /* data length code: 0 .. 8 */
+ __u8 can_dlc; /* frame payload length in byte (0 .. 8) */
__u8 data[8] __attribute__((aligned(8)));
};
@@ -375,6 +377,51 @@ solution for a couple of reasons:
nbytes = sendto(s, &frame, sizeof(struct can_frame),
0, (struct sockaddr*)&addr, sizeof(addr));
+ Remark about CAN FD (flexible data rate) support:
+
+ Generally the handling of CAN FD is very similar to the formerly described
+ examples. The new CAN FD capable CAN controllers support two different
+ bitrates for the arbitration phase and the payload phase of the CAN FD frame
+ and up to 64 bytes of payload. This extended payload length breaks all the
+ kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight
+ bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g.
+ the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that
+ switches the socket into a mode that allows the handling of CAN FD frames
+ and (legacy) CAN frames simultaneously (see section 4.1.5).
+
+ The struct canfd_frame is defined in include/linux/can.h:
+
+ struct canfd_frame {
+ canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+ __u8 len; /* frame payload length in byte (0 .. 64) */
+ __u8 flags; /* additional flags for CAN FD */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
+ __u8 data[64] __attribute__((aligned(8)));
+ };
+
+ The struct canfd_frame and the existing struct can_frame have the can_id,
+ the payload length and the payload data at the same offset inside their
+ structures. This allows to handle the different structures very similar.
+ When the content of a struct can_frame is copied into a struct canfd_frame
+ all structure elements can be used as-is - only the data[] becomes extended.
+
+ When introducing the struct canfd_frame it turned out that the data length
+ code (DLC) of the struct can_frame was used as a length information as the
+ length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve
+ the easy handling of the length information the canfd_frame.len element
+ contains a plain length value from 0 .. 64. So both canfd_frame.len and
+ can_frame.can_dlc are equal and contain a length information and no DLC.
+ For details about the distinction of CAN and CAN FD capable devices and
+ the mapping to the bus-relevant data length code (DLC), see chapter 6.6.
+
+ The length of the two CAN(FD) frame structures define the maximum transfer
+ unit (MTU) of the CAN(FD) network interface and skbuff data length. Two
+ definitions are specified for CAN specific MTUs in include/linux/can.h :
+
+ #define CAN_MTU (sizeof(struct can_frame)) == 16 => 'legacy' CAN frame
+ #define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame
+
4.1 RAW protocol sockets with can_filters (SOCK_RAW)
Using CAN_RAW sockets is extensively comparable to the commonly
@@ -383,7 +430,7 @@ solution for a couple of reasons:
defaults are set at RAW socket binding time:
- The filters are set to exactly one filter receiving everything
- - The socket only receives valid data frames (=> no error frames)
+ - The socket only receives valid data frames (=> no error message frames)
- The loopback of sent CAN frames is enabled (see chapter 3.2)
- The socket does not receive its own sent frames (in loopback mode)
@@ -434,7 +481,7 @@ solution for a couple of reasons:
4.1.2 RAW socket option CAN_RAW_ERR_FILTER
As described in chapter 3.4 the CAN interface driver can generate so
- called Error Frames that can optionally be passed to the user
+ called Error Message Frames that can optionally be passed to the user
application in the same way as other CAN frames. The possible
errors are divided into different error classes that may be filtered
using the appropriate error mask. To register for every possible
@@ -472,7 +519,69 @@ solution for a couple of reasons:
setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS,
&recv_own_msgs, sizeof(recv_own_msgs));
- 4.1.5 RAW socket returned message flags
+ 4.1.5 RAW socket option CAN_RAW_FD_FRAMES
+
+ CAN FD support in CAN_RAW sockets can be enabled with a new socket option
+ CAN_RAW_FD_FRAMES which is off by default. When the new socket option is
+ not supported by the CAN_RAW socket (e.g. on older kernels), switching the
+ CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT.
+
+ Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames
+ and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames
+ when reading from the socket.
+
+ CAN_RAW_FD_FRAMES enabled: CAN_MTU and CANFD_MTU are allowed
+ CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default)
+
+ Example:
+ [ remember: CANFD_MTU == sizeof(struct canfd_frame) ]
+
+ struct canfd_frame cfd;
+
+ nbytes = read(s, &cfd, CANFD_MTU);
+
+ if (nbytes == CANFD_MTU) {
+ printf("got CAN FD frame with length %d\n", cfd.len);
+ /* cfd.flags contains valid data */
+ } else if (nbytes == CAN_MTU) {
+ printf("got legacy CAN frame with length %d\n", cfd.len);
+ /* cfd.flags is undefined */
+ } else {
+ fprintf(stderr, "read: invalid CAN(FD) frame\n");
+ return 1;
+ }
+
+ /* the content can be handled independently from the received MTU size */
+
+ printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len);
+ for (i = 0; i < cfd.len; i++)
+ printf("%02X ", cfd.data[i]);
+
+ When reading with size CANFD_MTU only returns CAN_MTU bytes that have
+ been received from the socket a legacy CAN frame has been read into the
+ provided CAN FD structure. Note that the canfd_frame.flags data field is
+ not specified in the struct can_frame and therefore it is only valid in
+ CANFD_MTU sized CAN FD frames.
+
+ As long as the payload length is <=8 the received CAN frames from CAN FD
+ capable CAN devices can be received and read by legacy sockets too. When
+ user-generated CAN FD frames have a payload length <=8 these can be send
+ by legacy CAN network interfaces too. Sending CAN FD frames with payload
+ length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
+
+ Implementation hint for new CAN applications:
+
+ To build a CAN FD aware application use struct canfd_frame as basic CAN
+ data structure for CAN_RAW based applications. When the application is
+ executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES
+ socket option returns an error: No problem. You'll get legacy CAN frames
+ or CAN FD frames and can process them the same way.
+
+ When sending to CAN devices make sure that the device is capable to handle
+ CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU.
+ The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+
+ 4.1.6 RAW socket returned message flags
When using recvmsg() call, the msg->msg_flags may contain following flags:
@@ -527,7 +636,7 @@ solution for a couple of reasons:
rcvlist_all - list for unfiltered entries (no filter operations)
rcvlist_eff - list for single extended frame (EFF) entries
- rcvlist_err - list for error frames masks
+ rcvlist_err - list for error message frames masks
rcvlist_fil - list for mask/value filters
rcvlist_inv - list for mask/value filters (inverse semantic)
rcvlist_sff - list for single standard frame (SFF) entries
@@ -573,10 +682,13 @@ solution for a couple of reasons:
dev->type = ARPHRD_CAN; /* the netdevice hardware type */
dev->flags = IFF_NOARP; /* CAN has no arp */
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */
- The struct can_frame is the payload of each socket buffer in the
- protocol family PF_CAN.
+ or alternative, when the controller supports CAN with flexible data rate:
+ dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */
+
+ The struct can_frame or struct canfd_frame is the payload of each socket
+ buffer (skbuff) in the protocol family PF_CAN.
6.2 local loopback of sent frames
@@ -784,15 +896,41 @@ solution for a couple of reasons:
$ ip link set canX type can restart-ms 100
Alternatively, the application may realize the "bus-off" condition
- by monitoring CAN error frames and do a restart when appropriate with
- the command:
+ by monitoring CAN error message frames and do a restart when
+ appropriate with the command:
$ ip link set canX type can restart
- Note that a restart will also create a CAN error frame (see also
- chapter 3.4).
+ Note that a restart will also create a CAN error message frame (see
+ also chapter 3.4).
+
+ 6.6 CAN FD (flexible data rate) driver support
+
+ CAN FD capable CAN controllers support two different bitrates for the
+ arbitration phase and the payload phase of the CAN FD frame. Therefore a
+ second bittiming has to be specified in order to enable the CAN FD bitrate.
+
+ Additionally CAN FD capable CAN controllers support up to 64 bytes of
+ payload. The representation of this length in can_frame.can_dlc and
+ canfd_frame.len for userspace applications and inside the Linux network
+ layer is a plain value from 0 .. 64 instead of the CAN 'data length code'.
+ The data length code was a 1:1 mapping to the payload length in the legacy
+ CAN frames anyway. The payload length to the bus-relevant DLC mapping is
+ only performed inside the CAN drivers, preferably with the helper
+ functions can_dlc2len() and can_len2dlc().
+
+ The CAN netdevice driver capabilities can be distinguished by the network
+ devices maximum transfer unit (MTU):
+
+ MTU = 16 (CAN_MTU) => sizeof(struct can_frame) => 'legacy' CAN device
+ MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device
+
+ The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall.
+ N.B. CAN FD capable devices can also handle and send legacy CAN frames.
+
+ FIXME: Add details about the CAN FD controller configuration when available.
- 6.6 Supported CAN hardware
+ 6.7 Supported CAN hardware
Please check the "Kconfig" file in "drivers/net/can" to get an actual
list of the support CAN hardware. On the Socket CAN project website
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6f896b94abdc..406a5226220d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -468,6 +468,19 @@ tcp_syncookies - BOOLEAN
SYN flood warnings in logs not being really flooded, your server
is seriously misconfigured.
+tcp_fastopen - INTEGER
+ Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
+ in the opening SYN packet. To use this feature, the client application
+ must not use connect(). Instead, it should use sendmsg() or sendto()
+ with MSG_FASTOPEN flag which performs a TCP handshake automatically.
+
+ The values (bitmap) are:
+ 1: Enables sending data in the opening SYN on the client
+ 5: Enables sending data in the opening SYN on the client regardless
+ of cookie availability.
+
+ Default: 0
+
tcp_syn_retries - INTEGER
Number of times initial SYNs for an active TCP connection attempt
will be retransmitted. Should not be higher than 255. Default value
@@ -551,6 +564,25 @@ tcp_thin_dupack - BOOLEAN
Documentation/networking/tcp-thin.txt
Default: 0
+tcp_limit_output_bytes - INTEGER
+ Controls TCP Small Queue limit per tcp socket.
+ TCP bulk sender tends to increase packets in flight until it
+ gets losses notifications. With SNDBUF autotuning, this can
+ result in a large amount of packets queued in qdisc/device
+ on the local machine, hurting latency of other flows, for
+ typical pfifo_fast qdiscs.
+ tcp_limit_output_bytes limits the number of bytes on qdisc
+ or device to reduce artificial RTT/cwnd and reduce bufferbloat.
+ Note: For GSO/TSO enabled flows, we try to have at least two
+ packets in flight. Reducing tcp_limit_output_bytes might also
+ reduce the size of individual GSO packet (64KB being the max)
+ Default: 131072
+
+tcp_challenge_ack_limit - INTEGER
+ Limits number of Challenge ACK sent per second, as recommended
+ in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
+ Default: 100
+
UDP variables:
udp_mem - vector of 3 INTEGERs: min, pressure, max
@@ -857,9 +889,19 @@ accept_source_route - BOOLEAN
FALSE (host)
accept_local - BOOLEAN
- Accept packets with local source addresses. In combination with
- suitable routing, this can be used to direct packets between two
- local interfaces over the wire and have them accepted properly.
+ Accept packets with local source addresses. In combination
+ with suitable routing, this can be used to direct packets
+ between two local interfaces over the wire and have them
+ accepted properly.
+
+ rp_filter must be set to a non-zero value in order for
+ accept_local to have an effect.
+
+ default FALSE
+
+route_localnet - BOOLEAN
+ Do not consider loopback addresses as martian source or destination
+ while routing. This enables the use of 127/8 for local routing purposes.
default FALSE
rp_filter - INTEGER
@@ -1398,6 +1440,20 @@ path_max_retrans - INTEGER
Default: 5
+pf_retrans - INTEGER
+ The number of retransmissions that will be attempted on a given path
+ before traffic is redirected to an alternate transport (should one
+ exist). Note this is distinct from path_max_retrans, as a path that
+ passes the pf_retrans threshold can still be used. Its only
+ deprioritized when a transmission path is selected by the stack. This
+ setting is primarily used to enable fast failover mechanisms without
+ having to reduce path_max_retrans to a very low value. See:
+ http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ for details. Note also that a value of pf_retrans > path_max_retrans
+ disables this feature
+
+ Default: 0
+
rto_initial - INTEGER
The initial round trip timeout value in milliseconds that will be used
in calculating round trip times. This is the initial time interval
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
index b8a048b8df3a..8fa2dd1e792e 100644
--- a/Documentation/networking/openvswitch.txt
+++ b/Documentation/networking/openvswitch.txt
@@ -118,7 +118,7 @@ essentially like this, ignoring metadata:
Naively, to add VLAN support, it makes sense to add a new "vlan" flow
key attribute to contain the VLAN tag, then continue to decode the
encapsulated headers beyond the VLAN tag using the existing field
-definitions. With this change, an TCP packet in VLAN 10 would have a
+definitions. With this change, a TCP packet in VLAN 10 would have a
flow key much like this:
eth(...), vlan(vid=10, pcp=0), eth_type(0x0800), ip(proto=6, ...), tcp(...)
diff --git a/Documentation/networking/s2io.txt b/Documentation/networking/s2io.txt
index 4be0c039edbc..d2a9f43b5546 100644
--- a/Documentation/networking/s2io.txt
+++ b/Documentation/networking/s2io.txt
@@ -136,16 +136,6 @@ For more information, please review the AMD8131 errata at
http://vip.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/
26310_AMD-8131_HyperTransport_PCI-X_Tunnel_Revision_Guide_rev_3_18.pdf
-6. Available Downloads
-Neterion "s2io" driver in Red Hat and Suse 2.6-based distributions is kept up
-to date, also the latest "s2io" code (including support for 2.4 kernels) is
-available via "Support" link on the Neterion site: http://www.neterion.com.
-
-For Xframe User Guide (Programming manual), visit ftp site ns1.s2io.com,
-user: linuxdocs password: HALdocs
-
-7. Support
+6. Support
For further support please contact either your 10GbE Xframe NIC vendor (IBM,
-HP, SGI etc.) or click on the "Support" link on the Neterion site:
-http://www.neterion.com.
-
+HP, SGI etc.)
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 5cb9a1972460..c676b9cedbd0 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -257,9 +257,11 @@ reset procedure etc).
o Makefile
o stmmac_main.c: main network device driver;
o stmmac_mdio.c: mdio functions;
+ o stmmac_pci: PCI driver;
+ o stmmac_platform.c: platform driver
o stmmac_ethtool.c: ethtool support;
o stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
- Only tested on ST40 platforms based.
+ (only tested on ST40 platforms based);
o stmmac.h: private driver structure;
o common.h: common definitions and VFTs;
o descs.h: descriptor structure definitions;
@@ -269,9 +271,11 @@ reset procedure etc).
o dwmac100_core: MAC 100 core and dma code;
o dwmac100_dma.c: dma funtions for the MAC chip;
o dwmac1000.h: specific header file for the MAC;
- o dwmac_lib.c: generic DMA functions shared among chips
- o enh_desc.c: functions for handling enhanced descriptors
- o norm_desc.c: functions for handling normal descriptors
+ o dwmac_lib.c: generic DMA functions shared among chips;
+ o enh_desc.c: functions for handling enhanced descriptors;
+ o norm_desc.c: functions for handling normal descriptors;
+ o chain_mode.c/ring_mode.c:: functions to manage RING/CHAINED modes;
+ o mmc_core.c/mmc.h: Management MAC Counters;
5) Debug Information
@@ -304,7 +308,27 @@ All these are only useful during the developing stage
and should never enabled inside the code for general usage.
In fact, these can generate an huge amount of debug messages.
-6) TODO:
+6) Energy Efficient Ethernet
+
+Energy Efficient Ethernet(EEE) enables IEEE 802.3 MAC sublayer along
+with a family of Physical layer to operate in the Low power Idle(LPI)
+mode. The EEE mode supports the IEEE 802.3 MAC operation at 100Mbps,
+1000Mbps & 10Gbps.
+
+The LPI mode allows power saving by switching off parts of the
+communication device functionality when there is no data to be
+transmitted & received. The system on both the side of the link can
+disable some functionalities & save power during the period of low-link
+utilization. The MAC controls whether the system should enter or exit
+the LPI mode & communicate this to PHY.
+
+As soon as the interface is opened, the driver verifies if the EEE can
+be supported. This is done by looking at both the DMA HW capability
+register and the PHY devices MCD registers.
+To enter in Tx LPI mode the driver needs to have a software timer
+that enable and disable the LPI mode when there is nothing to be
+transmitted.
+
+7) TODO:
o XGMAC is not supported.
- o Add the EEE - Energy Efficient Ethernet
o Add the PTP - precision time protocol
diff --git a/Documentation/networking/vxge.txt b/Documentation/networking/vxge.txt
index d2e2997e6fa0..bb76c667a476 100644
--- a/Documentation/networking/vxge.txt
+++ b/Documentation/networking/vxge.txt
@@ -91,10 +91,3 @@ v) addr_learn_en
virtualization environment.
Valid range: 0,1 (disabled, enabled respectively)
Default: 0
-
-4) Troubleshooting:
--------------------
-
-To resolve an issue with the source code or X3100 series adapter, please collect
-the statistics, register dumps using ethool, relevant logs and email them to
-support@neterion.com.
diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt
index 320f9336c781..89a339c9b079 100644
--- a/Documentation/nfc/nfc-hci.txt
+++ b/Documentation/nfc/nfc-hci.txt
@@ -178,3 +178,36 @@ ANY_GET_PARAMETER to the reader A gate to get information on the target
that was discovered).
Typically, such an event will be propagated to NFC Core from MSGRXWQ context.
+
+Error management
+----------------
+
+Errors that occur synchronously with the execution of an NFC Core request are
+simply returned as the execution result of the request. These are easy.
+
+Errors that occur asynchronously (e.g. in a background protocol handling thread)
+must be reported such that upper layers don't stay ignorant that something
+went wrong below and know that expected events will probably never happen.
+Handling of these errors is done as follows:
+
+- driver (pn544) fails to deliver an incoming frame: it stores the error such
+that any subsequent call to the driver will result in this error. Then it calls
+the standard nfc_shdlc_recv_frame() with a NULL argument to report the problem
+above. shdlc stores a EREMOTEIO sticky status, which will trigger SMW to
+report above in turn.
+
+- SMW is basically a background thread to handle incoming and outgoing shdlc
+frames. This thread will also check the shdlc sticky status and report to HCI
+when it discovers it is not able to run anymore because of an unrecoverable
+error that happened within shdlc or below. If the problem occurs during shdlc
+connection, the error is reported through the connect completion.
+
+- HCI: if an internal HCI error happens (frame is lost), or HCI is reported an
+error from a lower layer, HCI will either complete the currently executing
+command with that error, or notify NFC Core directly if no command is executing.
+
+- NFC Core: when NFC Core is notified of an error from below and polling is
+active, it will send a tag discovered event with an empty tag list to the user
+space to let it know that the poll operation will never be able to detect a tag.
+If polling is not active and the error was sticky, lower levels will return it
+at next invocation.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 872815cd41d3..504dfe4d52eb 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -583,9 +583,10 @@ for the given device during all power transitions, instead of the respective
subsystem-level callbacks. Specifically, if a device's pm_domain pointer is
not NULL, the ->suspend() callback from the object pointed to by it will be
executed instead of its subsystem's (e.g. bus type's) ->suspend() callback and
-anlogously for all of the remaining callbacks. In other words, power management
-domain callbacks, if defined for the given device, always take precedence over
-the callbacks provided by the device's subsystem (e.g. bus type).
+analogously for all of the remaining callbacks. In other words, power
+management domain callbacks, if defined for the given device, always take
+precedence over the callbacks provided by the device's subsystem (e.g. bus
+type).
The support for device power management domains is only relevant to platforms
needing to use the same device driver power management callbacks in many
@@ -598,7 +599,7 @@ it into account in any way.
Device Low Power (suspend) States
---------------------------------
Device low-power states aren't standard. One device might only handle
-"on" and "off, while another might support a dozen different versions of
+"on" and "off", while another might support a dozen different versions of
"on" (how many engines are active?), plus a state that gets back to "on"
faster than from a full "off".
diff --git a/Documentation/power/swsusp.txt b/Documentation/power/swsusp.txt
index ac190cf1963e..92341b84250d 100644
--- a/Documentation/power/swsusp.txt
+++ b/Documentation/power/swsusp.txt
@@ -33,6 +33,11 @@ echo shutdown > /sys/power/disk; echo disk > /sys/power/state
echo platform > /sys/power/disk; echo disk > /sys/power/state
+. If you would like to write hibernation image to swap and then suspend
+to RAM (provided your platform supports it), you can try
+
+echo suspend > /sys/power/disk; echo disk > /sys/power/state
+
. If you have SATA disks, you'll need recent kernels with SATA suspend
support. For suspend and resume to work, make sure your disk drivers
are built into kernel -- not modules. [There's way to make
diff --git a/Documentation/prctl/no_new_privs.txt b/Documentation/prctl/no_new_privs.txt
new file mode 100644
index 000000000000..f7be84fba910
--- /dev/null
+++ b/Documentation/prctl/no_new_privs.txt
@@ -0,0 +1,57 @@
+The execve system call can grant a newly-started program privileges that
+its parent did not have. The most obvious examples are setuid/setgid
+programs and file capabilities. To prevent the parent program from
+gaining these privileges as well, the kernel and user code must be
+careful to prevent the parent from doing anything that could subvert the
+child. For example:
+
+ - The dynamic loader handles LD_* environment variables differently if
+ a program is setuid.
+
+ - chroot is disallowed to unprivileged processes, since it would allow
+ /etc/passwd to be replaced from the point of view of a process that
+ inherited chroot.
+
+ - The exec code has special handling for ptrace.
+
+These are all ad-hoc fixes. The no_new_privs bit (since Linux 3.5) is a
+new, generic mechanism to make it safe for a process to modify its
+execution environment in a manner that persists across execve. Any task
+can set no_new_privs. Once the bit is set, it is inherited across fork,
+clone, and execve and cannot be unset. With no_new_privs set, execve
+promises not to grant the privilege to do anything that could not have
+been done without the execve call. For example, the setuid and setgid
+bits will no longer change the uid or gid; file capabilities will not
+add to the permitted set, and LSMs will not relax constraints after
+execve.
+
+To set no_new_privs, use prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0).
+
+Be careful, though: LSMs might also not tighten constraints on exec
+in no_new_privs mode. (This means that setting up a general-purpose
+service launcher to set no_new_privs before execing daemons may
+interfere with LSM-based sandboxing.)
+
+Note that no_new_privs does not prevent privilege changes that do not
+involve execve. An appropriately privileged task can still call
+setuid(2) and receive SCM_RIGHTS datagrams.
+
+There are two main use cases for no_new_privs so far:
+
+ - Filters installed for the seccomp mode 2 sandbox persist across
+ execve and can change the behavior of newly-executed programs.
+ Unprivileged users are therefore only allowed to install such filters
+ if no_new_privs is set.
+
+ - By itself, no_new_privs can be used to reduce the attack surface
+ available to an unprivileged user. If everything running with a
+ given uid has no_new_privs set, then that uid will be unable to
+ escalate its privileges by directly attacking setuid, setgid, and
+ fcap-using binaries; it will need to compromise something without the
+ no_new_privs bit set first.
+
+In the future, other potentially dangerous kernel features could become
+available to unprivileged tasks if no_new_privs is set. In principle,
+several options to unshare(2) and clone(2) would be safe when
+no_new_privs is set, and no_new_privs + chroot is considerable less
+dangerous than chroot by itself.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 221b81016dba..4e4d0bc9816f 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -875,8 +875,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
setup before initializing the codecs. This option is
available only when CONFIG_SND_HDA_PATCH_LOADER=y is set.
See HD-Audio.txt for details.
- beep_mode - Selects the beep registration mode (0=off, 1=on, 2=
- dynamic registration via mute switch on/off); the default
+ beep_mode - Selects the beep registration mode (0=off, 1=on); default
value is set via CONFIG_SND_HDA_INPUT_BEEP_MODE kconfig.
[Single (global) options]
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 03f7897c6414..7456360e161c 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -15,19 +15,24 @@ ALC260
ALC262
======
- N/A
+ inv-dmic Inverted internal mic workaround
ALC267/268
==========
- N/A
+ inv-dmic Inverted internal mic workaround
-ALC269
+ALC269/270/275/276/280/282
======
laptop-amic Laptops with analog-mic input
laptop-dmic Laptops with digital-mic input
+ alc269-dmic Enable ALC269(VA) digital mic workaround
+ alc271-dmic Enable ALC271X digital mic workaround
+ inv-dmic Inverted internal mic workaround
+ lenovo-dock Enables docking station I/O for some Lenovos
ALC662/663/272
==============
+ mario Chromebook mario model fixup
asus-mode1 ASUS
asus-mode2 ASUS
asus-mode3 ASUS
@@ -36,6 +41,7 @@ ALC662/663/272
asus-mode6 ASUS
asus-mode7 ASUS
asus-mode8 ASUS
+ inv-dmic Inverted internal mic workaround
ALC680
======
@@ -46,6 +52,7 @@ ALC882/883/885/888/889
acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
acer-aspire-8930g Acer Aspire 8330G/6935G
acer-aspire Acer Aspire others
+ inv-dmic Inverted internal mic workaround
ALC861/660
==========
diff --git a/Documentation/sound/alsa/hdspm.txt b/Documentation/sound/alsa/hdspm.txt
index 7a67ff71a9f8..7ba31948dea7 100644
--- a/Documentation/sound/alsa/hdspm.txt
+++ b/Documentation/sound/alsa/hdspm.txt
@@ -359,4 +359,4 @@ Calling Parameter:
enable_monitor int array (min = 1, max = 8),
"Enable Analog Out on Channel 63/64 by default."
- note: here the analog output is enabled (but not routed). \ No newline at end of file
+ note: here the analog output is enabled (but not routed).
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index f0ab5cf28fca..4a7b54bd37e8 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -12,6 +12,12 @@ Rules on what kind of patches are accepted, and which ones are not, into the
marked CONFIG_BROKEN), an oops, a hang, data corruption, a real
security issue, or some "oh, that's not good" issue. In short, something
critical.
+ - Serious issues as reported by a user of a distribution kernel may also
+ be considered if they fix a notable performance or interactivity issue.
+ As these fixes are not as obvious and have a higher risk of a subtle
+ regression they should only be submitted by a distribution kernel
+ maintainer and include an addendum linking to a bugzilla entry if it
+ exists and additional information on the user-visible impact.
- New device IDs and quirks are also accepted.
- No "theoretical race condition" issues, unless an explanation of how the
race can be exploited is also provided.
diff --git a/Documentation/video4linux/cpia2_overview.txt b/Documentation/video4linux/cpia2_overview.txt
index a6e53665216b..ad6adbedfe50 100644
--- a/Documentation/video4linux/cpia2_overview.txt
+++ b/Documentation/video4linux/cpia2_overview.txt
@@ -35,4 +35,4 @@ the camera. There are three modes for this. Block mode requests a number
of contiguous registers. Random mode reads or writes random registers with
a tuple structure containing address/value pairs. The repeat mode is only
used by VP4 to load a firmware patch. It contains a starting address and
-a sequence of bytes to be written into a gpio port. \ No newline at end of file
+a sequence of bytes to be written into a gpio port.
diff --git a/Documentation/video4linux/stv680.txt b/Documentation/video4linux/stv680.txt
index 4f8946f32f51..e3de33645308 100644
--- a/Documentation/video4linux/stv680.txt
+++ b/Documentation/video4linux/stv680.txt
@@ -50,4 +50,4 @@ The latest info on this driver can be found at:
http://personal.clt.bellsouth.net/~kjsisson or at
http://stv0680-usb.sourceforge.net
-Any questions to me can be send to: kjsisson@bellsouth.net \ No newline at end of file
+Any questions to me can be send to: kjsisson@bellsouth.net
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 930126698a0f..bf33aaa4c59f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -1930,6 +1930,57 @@ The "pte_enc" field provides a value that can OR'ed into the hash
PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
into the hash PTE second double word).
+4.75 KVM_IRQFD
+
+Capability: KVM_CAP_IRQFD
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_irqfd (in)
+Returns: 0 on success, -1 on error
+
+Allows setting an eventfd to directly trigger a guest interrupt.
+kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
+kvm_irqfd.gsi specifies the irqchip pin toggled by this event. When
+an event is tiggered on the eventfd, an interrupt is injected into
+the guest using the specified gsi pin. The irqfd is removed using
+the KVM_IRQFD_FLAG_DEASSIGN flag, specifying both kvm_irqfd.fd
+and kvm_irqfd.gsi.
+
+4.76 KVM_PPC_ALLOCATE_HTAB
+
+Capability: KVM_CAP_PPC_ALLOC_HTAB
+Architectures: powerpc
+Type: vm ioctl
+Parameters: Pointer to u32 containing hash table order (in/out)
+Returns: 0 on success, -1 on error
+
+This requests the host kernel to allocate an MMU hash table for a
+guest using the PAPR paravirtualization interface. This only does
+anything if the kernel is configured to use the Book 3S HV style of
+virtualization. Otherwise the capability doesn't exist and the ioctl
+returns an ENOTTY error. The rest of this description assumes Book 3S
+HV.
+
+There must be no vcpus running when this ioctl is called; if there
+are, it will do nothing and return an EBUSY error.
+
+The parameter is a pointer to a 32-bit unsigned integer variable
+containing the order (log base 2) of the desired size of the hash
+table, which must be between 18 and 46. On successful return from the
+ioctl, it will have been updated with the order of the hash table that
+was allocated.
+
+If no hash table has been allocated when any vcpu is asked to run
+(with the KVM_RUN ioctl), the host kernel will allocate a
+default-sized hash table (16 MB).
+
+If this ioctl is called when a hash table has already been allocated,
+the kernel will clear out the existing hash table (zero all HPTEs) and
+return the hash table order in the parameter. (If the guest is using
+the virtualized real-mode area (VRMA) facility, the kernel will
+re-create the VMRA HPTEs on the next KVM_RUN of any vcpu.)
+
+
5. The kvm_run structure
------------------------
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virtual/kvm/locking.txt
index 3b4cd3bf5631..41b7ac9884b5 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virtual/kvm/locking.txt
@@ -6,7 +6,129 @@ KVM Lock Overview
(to be written)
-2. Reference
+2: Exception
+------------
+
+Fast page fault:
+
+Fast page fault is the fast path which fixes the guest page fault out of
+the mmu-lock on x86. Currently, the page fault can be fast only if the
+shadow page table is present and it is caused by write-protect, that means
+we just need change the W bit of the spte.
+
+What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and
+SPTE_MMU_WRITEABLE bit on the spte:
+- SPTE_HOST_WRITEABLE means the gfn is writable on host.
+- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when
+ the gfn is writable on guest mmu and it is not write-protected by shadow
+ page write-protection.
+
+On fast page fault path, we will use cmpxchg to atomically set the spte W
+bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, this
+is safe because whenever changing these bits can be detected by cmpxchg.
+
+But we need carefully check these cases:
+1): The mapping from gfn to pfn
+The mapping from gfn to pfn may be changed since we can only ensure the pfn
+is not changed during cmpxchg. This is a ABA problem, for example, below case
+will happen:
+
+At the beginning:
+gpte = gfn1
+gfn1 is mapped to pfn1 on host
+spte is the shadow page table entry corresponding with gpte and
+spte = pfn1
+
+ VCPU 0 VCPU0
+on fast page fault path:
+
+ old_spte = *spte;
+ pfn1 is swapped out:
+ spte = 0;
+
+ pfn1 is re-alloced for gfn2.
+
+ gpte is changed to point to
+ gfn2 by the guest:
+ spte = pfn1;
+
+ if (cmpxchg(spte, old_spte, old_spte+W)
+ mark_page_dirty(vcpu->kvm, gfn1)
+ OOPS!!!
+
+We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
+
+For direct sp, we can easily avoid it since the spte of direct sp is fixed
+to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
+to pin gfn to pfn, because after gfn_to_pfn_atomic():
+- We have held the refcount of pfn that means the pfn can not be freed and
+ be reused for another gfn.
+- The pfn is writable that means it can not be shared between different gfns
+ by KSM.
+
+Then, we can ensure the dirty bitmaps is correctly set for a gfn.
+
+Currently, to simplify the whole things, we disable fast page fault for
+indirect shadow page.
+
+2): Dirty bit tracking
+In the origin code, the spte can be fast updated (non-atomically) if the
+spte is read-only and the Accessed bit has already been set since the
+Accessed bit and Dirty bit can not be lost.
+
+But it is not true after fast page fault since the spte can be marked
+writable between reading spte and updating spte. Like below case:
+
+At the beginning:
+spte.W = 0
+spte.Accessed = 1
+
+ VCPU 0 VCPU0
+In mmu_spte_clear_track_bits():
+
+ old_spte = *spte;
+
+ /* 'if' condition is satisfied. */
+ if (old_spte.Accssed == 1 &&
+ old_spte.W == 0)
+ spte = 0ull;
+ on fast page fault path:
+ spte.W = 1
+ memory write on the spte:
+ spte.Dirty = 1
+
+
+ else
+ old_spte = xchg(spte, 0ull)
+
+
+ if (old_spte.Accssed == 1)
+ kvm_set_pfn_accessed(spte.pfn);
+ if (old_spte.Dirty == 1)
+ kvm_set_pfn_dirty(spte.pfn);
+ OOPS!!!
+
+The Dirty bit is lost in this case.
+
+In order to avoid this kind of issue, we always treat the spte as "volatile"
+if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means,
+the spte is always atomicly updated in this case.
+
+3): flush tlbs due to spte updated
+If the spte is updated from writable to readonly, we should flush all TLBs,
+otherwise rmap_write_protect will find a read-only spte, even though the
+writable spte might be cached on a CPU's TLB.
+
+As mentioned before, the spte can be updated to writable out of mmu-lock on
+fast page fault path, in order to easily audit the path, we see if TLBs need
+be flushed caused by this reason in mmu_spte_update() since this is a common
+function to update spte (present -> present).
+
+Since the spte is "volatile" if it can be updated out of mmu-lock, we always
+atomicly update the spte, the race caused by fast page fault can be avoided,
+See the comments in spte_has_volatile_bits() and mmu_spte_update().
+
+3. Reference
------------
Name: kvm_lock
@@ -23,3 +145,9 @@ Arch: x86
Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
- tsc offset in vmcb
Comment: 'raw' because updating the tsc offsets must not be preempted.
+
+Name: kvm->mmu_lock
+Type: spinlock_t
+Arch: any
+Protects: -shadow page/shadow tlb entry
+Comment: it is a spinlock since it is used in mmu notifier.
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 96b41bd97523..730471048583 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -223,3 +223,36 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
steal: the amount of time in which this vCPU did not run, in
nanoseconds. Time during which the vcpu is idle, will not be
reported as steal time.
+
+MSR_KVM_EOI_EN: 0x4b564d04
+ data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
+ when disabled. Bit 1 is reserved and must be zero. When PV end of
+ interrupt is enabled (bit 0 set), bits 63-2 hold a 4-byte aligned
+ physical address of a 4 byte memory area which must be in guest RAM and
+ must be zeroed.
+
+ The first, least significant bit of 4 byte memory location will be
+ written to by the hypervisor, typically at the time of interrupt
+ injection. Value of 1 means that guest can skip writing EOI to the apic
+ (using MSR or MMIO write); instead, it is sufficient to signal
+ EOI by clearing the bit in guest memory - this location will
+ later be polled by the hypervisor.
+ Value of 0 means that the EOI write is required.
+
+ It is always safe for the guest to ignore the optimization and perform
+ the APIC EOI write anyway.
+
+ Hypervisor is guaranteed to only modify this least
+ significant bit while in the current VCPU context, this means that
+ guest does not need to use either lock prefix or memory ordering
+ primitives to synchronise with the hypervisor.
+
+ However, hypervisor can set and clear this memory bit at any time:
+ therefore to make sure hypervisor does not interrupt the
+ guest and clear the least significant bit in the memory area
+ in the window between guest testing it to detect
+ whether it can skip EOI apic write and between guest
+ clearing it to signal EOI to the hypervisor,
+ guest must both read the least significant bit in the memory area and
+ clear it using a single CPU instruction, such as test and clear, or
+ compare and exchange.
diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt
index 6e7c37050930..4911cf95c67e 100644
--- a/Documentation/virtual/kvm/ppc-pv.txt
+++ b/Documentation/virtual/kvm/ppc-pv.txt
@@ -109,8 +109,6 @@ The following bits are safe to be set inside the guest:
MSR_EE
MSR_RI
- MSR_CR
- MSR_ME
If any other bit changes in the MSR, please still use mtmsr(d).
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
index 37067cf455f4..5ef2d1366425 100644
--- a/Documentation/vm/frontswap.txt
+++ b/Documentation/vm/frontswap.txt
@@ -25,7 +25,7 @@ with the specified swap device number (aka "type"). A "store" will
copy the page to transcendent memory and associate it with the type and
offset associated with the page. A "load" will copy the page, if found,
from transcendent memory into kernel memory, but will NOT remove the page
-from from transcendent memory. An "invalidate_page" will remove the page
+from transcendent memory. An "invalidate_page" will remove the page
from transcendent memory and an "invalidate_area" will remove ALL pages
associated with the swap type (e.g., like swapoff) and notify the "device"
to refuse further stores with that swap type.
@@ -99,7 +99,7 @@ server configured with a large amount of RAM... without pre-configuring
how much of the RAM is available for each of the clients!
In the virtual case, the whole point of virtualization is to statistically
-multiplex physical resources acrosst the varying demands of multiple
+multiplex physical resources across the varying demands of multiple
virtual machines. This is really hard to do with RAM and efforts to do
it well with no kernel changes have essentially failed (except in some
well-publicized special-case workloads).
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index a0b577de918f..a6ab4b62d926 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -89,25 +89,28 @@ called thread-pools.
The cmwq design differentiates between the user-facing workqueues that
subsystems and drivers queue work items on and the backend mechanism
-which manages thread-pool and processes the queued work items.
+which manages thread-pools and processes the queued work items.
The backend is called gcwq. There is one gcwq for each possible CPU
-and one gcwq to serve work items queued on unbound workqueues.
+and one gcwq to serve work items queued on unbound workqueues. Each
+gcwq has two thread-pools - one for normal work items and the other
+for high priority ones.
Subsystems and drivers can create and queue work items through special
workqueue API functions as they see fit. They can influence some
aspects of the way the work items are executed by setting flags on the
workqueue they are putting the work item on. These flags include
-things like CPU locality, reentrancy, concurrency limits and more. To
-get a detailed overview refer to the API description of
+things like CPU locality, reentrancy, concurrency limits, priority and
+more. To get a detailed overview refer to the API description of
alloc_workqueue() below.
-When a work item is queued to a workqueue, the target gcwq is
-determined according to the queue parameters and workqueue attributes
-and appended on the shared worklist of the gcwq. For example, unless
-specifically overridden, a work item of a bound workqueue will be
-queued on the worklist of exactly that gcwq that is associated to the
-CPU the issuer is running on.
+When a work item is queued to a workqueue, the target gcwq and
+thread-pool is determined according to the queue parameters and
+workqueue attributes and appended on the shared worklist of the
+thread-pool. For example, unless specifically overridden, a work item
+of a bound workqueue will be queued on the worklist of either normal
+or highpri thread-pool of the gcwq that is associated to the CPU the
+issuer is running on.
For any worker pool implementation, managing the concurrency level
(how many execution contexts are active) is an important issue. cmwq
@@ -115,26 +118,26 @@ tries to keep the concurrency at a minimal but sufficient level.
Minimal to save resources and sufficient in that the system is used at
its full capacity.
-Each gcwq bound to an actual CPU implements concurrency management by
-hooking into the scheduler. The gcwq is notified whenever an active
-worker wakes up or sleeps and keeps track of the number of the
-currently runnable workers. Generally, work items are not expected to
-hog a CPU and consume many cycles. That means maintaining just enough
-concurrency to prevent work processing from stalling should be
-optimal. As long as there are one or more runnable workers on the
-CPU, the gcwq doesn't start execution of a new work, but, when the
-last running worker goes to sleep, it immediately schedules a new
-worker so that the CPU doesn't sit idle while there are pending work
-items. This allows using a minimal number of workers without losing
-execution bandwidth.
+Each thread-pool bound to an actual CPU implements concurrency
+management by hooking into the scheduler. The thread-pool is notified
+whenever an active worker wakes up or sleeps and keeps track of the
+number of the currently runnable workers. Generally, work items are
+not expected to hog a CPU and consume many cycles. That means
+maintaining just enough concurrency to prevent work processing from
+stalling should be optimal. As long as there are one or more runnable
+workers on the CPU, the thread-pool doesn't start execution of a new
+work, but, when the last running worker goes to sleep, it immediately
+schedules a new worker so that the CPU doesn't sit idle while there
+are pending work items. This allows using a minimal number of workers
+without losing execution bandwidth.
Keeping idle workers around doesn't cost other than the memory space
for kthreads, so cmwq holds onto idle ones for a while before killing
them.
For an unbound wq, the above concurrency management doesn't apply and
-the gcwq for the pseudo unbound CPU tries to start executing all work
-items as soon as possible. The responsibility of regulating
+the thread-pools for the pseudo unbound CPU try to start executing all
+work items as soon as possible. The responsibility of regulating
concurrency level is on the users. There is also a flag to mark a
bound wq to ignore the concurrency management. Please refer to the
API section for details.
@@ -205,31 +208,22 @@ resources, scheduled and executed.
WQ_HIGHPRI
- Work items of a highpri wq are queued at the head of the
- worklist of the target gcwq and start execution regardless of
- the current concurrency level. In other words, highpri work
- items will always start execution as soon as execution
- resource is available.
+ Work items of a highpri wq are queued to the highpri
+ thread-pool of the target gcwq. Highpri thread-pools are
+ served by worker threads with elevated nice level.
- Ordering among highpri work items is preserved - a highpri
- work item queued after another highpri work item will start
- execution after the earlier highpri work item starts.
-
- Although highpri work items are not held back by other
- runnable work items, they still contribute to the concurrency
- level. Highpri work items in runnable state will prevent
- non-highpri work items from starting execution.
-
- This flag is meaningless for unbound wq.
+ Note that normal and highpri thread-pools don't interact with
+ each other. Each maintain its separate pool of workers and
+ implements concurrency management among its workers.
WQ_CPU_INTENSIVE
Work items of a CPU intensive wq do not contribute to the
concurrency level. In other words, runnable CPU intensive
- work items will not prevent other work items from starting
- execution. This is useful for bound work items which are
- expected to hog CPU cycles so that their execution is
- regulated by the system scheduler.
+ work items will not prevent other work items in the same
+ thread-pool from starting execution. This is useful for bound
+ work items which are expected to hog CPU cycles so that their
+ execution is regulated by the system scheduler.
Although CPU intensive work items don't contribute to the
concurrency level, start of their executions is still
@@ -239,14 +233,6 @@ resources, scheduled and executed.
This flag is meaningless for unbound wq.
- WQ_HIGHPRI | WQ_CPU_INTENSIVE
-
- This combination makes the wq avoid interaction with
- concurrency management completely and behave as a simple
- per-CPU execution context provider. Work items queued on a
- highpri CPU-intensive wq start execution as soon as resources
- are available and don't affect execution of other work items.
-
@max_active:
@max_active determines the maximum number of execution contexts per
@@ -328,20 +314,7 @@ If @max_active == 2,
35 w2 wakes up and finishes
Now, let's assume w1 and w2 are queued to a different wq q1 which has
-WQ_HIGHPRI set,
-
- TIME IN MSECS EVENT
- 0 w1 and w2 start and burn CPU
- 5 w1 sleeps
- 10 w2 sleeps
- 10 w0 starts and burns CPU
- 15 w0 sleeps
- 15 w1 wakes up and finishes
- 20 w2 wakes up and finishes
- 25 w0 wakes up and burns CPU
- 30 w0 finishes
-
-If q1 has WQ_CPU_INTENSIVE set,
+WQ_CPU_INTENSIVE set,
TIME IN MSECS EVENT
0 w0 starts and burns CPU
diff --git a/MAINTAINERS b/MAINTAINERS
index eb22272b2116..9b8a5daaf403 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -329,7 +329,7 @@ F: drivers/hwmon/adm1029.c
ADM8211 WIRELESS DRIVER
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/
+W: http://wireless.kernel.org/
S: Orphan
F: drivers/net/wireless/adm8211.*
@@ -894,6 +894,14 @@ ARM/MAGICIAN MACHINE SUPPORT
M: Philipp Zabel <philipp.zabel@gmail.com>
S: Maintained
+ARM/Marvell Armada 370 and Armada XP SOC support
+M: Jason Cooper <jason@lakedaemon.net>
+M: Andrew Lunn <andrew@lunn.ch>
+M: Gregory Clement <gregory.clement@free-electrons.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-mvebu/
+
ARM/Marvell Dove/Kirkwood/MV78xx0/Orion SOC support
M: Jason Cooper <jason@lakedaemon.net>
M: Andrew Lunn <andrew@lunn.ch>
@@ -1103,6 +1111,16 @@ S: Supported
F: arch/arm/mach-shmobile/
F: drivers/sh/
+ARM/SOCFPGA ARCHITECTURE
+M: Dinh Nguyen <dinguyen@altera.com>
+S: Maintained
+F: arch/arm/mach-socfpga/
+
+ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
+M: Dinh Nguyen <dinguyen@altera.com>
+S: Maintained
+F: drivers/clk/socfpga/
+
ARM/TECHNOLOGIC SYSTEMS TS7250 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1423,7 +1441,7 @@ B43 WIRELESS DRIVER
M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
-W: http://linuxwireless.org/en/users/Drivers/b43
+W: http://wireless.kernel.org/en/users/Drivers/b43
S: Maintained
F: drivers/net/wireless/b43/
@@ -1432,7 +1450,7 @@ M: Larry Finger <Larry.Finger@lwfinger.net>
M: Stefano Brivio <stefano.brivio@polimi.it>
L: linux-wireless@vger.kernel.org
L: b43-dev@lists.infradead.org
-W: http://linuxwireless.org/en/users/Drivers/b43
+W: http://wireless.kernel.org/en/users/Drivers/b43
S: Maintained
F: drivers/net/wireless/b43legacy/
@@ -1595,6 +1613,7 @@ M: Arend van Spriel <arend@broadcom.com>
M: Franky (Zhenhui) Lin <frankyl@broadcom.com>
M: Kan Yan <kanyan@broadcom.com>
L: linux-wireless@vger.kernel.org
+L: brcm80211-dev-list@broadcom.com
S: Supported
F: drivers/net/wireless/brcm80211/
@@ -3433,13 +3452,14 @@ S: Supported
F: drivers/idle/i7300_idle.c
IEEE 802.15.4 SUBSYSTEM
+M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
-M: Sergey Lapin <slapin@ossfans.org>
L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://apps.sourceforge.net/trac/linux-zigbee
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
S: Maintained
F: net/ieee802154/
+F: net/mac802154/
F: drivers/ieee802154/
IIO SUBSYSTEM AND DRIVERS
@@ -3660,14 +3680,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
S: Supported
F: drivers/net/wireless/iwlwifi/
-INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
-M: Samuel Ortiz <samuel.ortiz@intel.com>
-M: Intel Linux Wireless <ilw@linux.intel.com>
-L: linux-wireless@vger.kernel.org
-S: Supported
-W: http://wireless.kernel.org/en/users/Drivers/iwmc3200wifi
-F: drivers/net/wireless/iwmc3200wifi/
-
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org
@@ -3990,8 +4002,8 @@ F: arch/ia64/include/asm/kvm*
F: arch/ia64/kvm/
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
-M: Carsten Otte <cotte@de.ibm.com>
M: Christian Borntraeger <borntraeger@de.ibm.com>
+M: Cornelia Huck <cornelia.huck@de.ibm.com>
M: linux390@de.ibm.com
L: linux-s390@vger.kernel.org
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -4351,7 +4363,7 @@ F: arch/m68k/hp300/
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/
+W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
S: Maintained
@@ -4363,7 +4375,7 @@ MAC80211 PID RATE CONTROL
M: Stefano Brivio <stefano.brivio@polimi.it>
M: Mattias Nissler <mattias.nissler@gmx.de>
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
+W: http://wireless.kernel.org/en/developers/Documentation/mac80211/RateControl/PID
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
S: Maintained
@@ -4592,7 +4604,6 @@ S: Maintained
F: drivers/usb/musb/
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Jon Mason <mason@myri.com>
M: Andrew Gallatin <gallatin@myri.com>
L: netdev@vger.kernel.org
W: http://www.myri.com/scs/download-Myri10GE.html
@@ -4637,8 +4648,6 @@ F: net/sched/sch_netem.c
NETERION 10GbE DRIVERS (s2io/vxge)
M: Jon Mason <jdmason@kudzu.us>
L: netdev@vger.kernel.org
-W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
-W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
S: Supported
F: Documentation/networking/s2io.txt
F: Documentation/networking/vxge.txt
@@ -4654,8 +4663,8 @@ L: netfilter@vger.kernel.org
L: coreteam@netfilter.org
W: http://www.netfilter.org/
W: http://www.iptables.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-2.6.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next-2.6.git
+T: git git://1984.lsi.us.es/nf
+T: git git://1984.lsi.us.es/nf-next
S: Supported
F: include/linux/netfilter*
F: include/linux/netfilter/
@@ -4857,6 +4866,7 @@ M: Kevin Hilman <khilman@ti.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/*omap*/*pm*
+F: drivers/cpufreq/omap-cpufreq.c
OMAP POWERDOMAIN/CLOCKDOMAIN SOC ADAPTATION LAYER SUPPORT
M: Rajendra Nayak <rnayak@ti.com>
@@ -5048,7 +5058,7 @@ F: fs/ocfs2/
ORINOCO DRIVER
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/en/users/Drivers/orinoco
+W: http://wireless.kernel.org/en/users/Drivers/orinoco
W: http://www.nongnu.org/orinoco/
S: Orphan
F: drivers/net/wireless/orinoco/
@@ -5200,7 +5210,7 @@ PCI SUBSYSTEM
M: Bjorn Helgaas <bhelgaas@google.com>
L: linux-pci@vger.kernel.org
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
S: Supported
F: Documentation/PCI/
F: drivers/pci/
@@ -5563,7 +5573,7 @@ F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
+M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: linux-driver@qlogic.com
L: netdev@vger.kernel.org
@@ -5571,7 +5581,6 @@ S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
QLOGIC QLGE 10Gb ETHERNET DRIVER
-M: Anirban Chakraborty <anirban.chakraborty@qlogic.com>
M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
M: Ron Mercer <ron.mercer@qlogic.com>
M: linux-driver@qlogic.com
@@ -5753,7 +5762,7 @@ F: net/rose/
RTL8180 WIRELESS DRIVER
M: "John W. Linville" <linville@tuxdriver.com>
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/
+W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
@@ -5763,7 +5772,7 @@ M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/
+W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtl818x/rtl8187/
@@ -5772,7 +5781,7 @@ RTL8192CE WIRELESS DRIVER
M: Larry Finger <Larry.Finger@lwfinger.net>
M: Chaoming Li <chaoming_li@realsil.com.cn>
L: linux-wireless@vger.kernel.org
-W: http://linuxwireless.org/
+W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/rtlwifi/
@@ -5909,7 +5918,7 @@ M: Ingo Molnar <mingo@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
-F: kernel/sched*
+F: kernel/sched/
F: include/linux/sched.h
SCORE ARCHITECTURE
@@ -6211,6 +6220,15 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
F: include/linux/srcu*
F: kernel/srcu*
+SMACK SECURITY MODULE
+M: Casey Schaufler <casey@schaufler-ca.com>
+L: linux-security-module@vger.kernel.org
+W: http://schaufler-ca.com
+T: git git://git.gitorious.org/smack-next/kernel.git
+S: Maintained
+F: Documentation/security/Smack.txt
+F: security/smack/
+
SMC91x ETHERNET DRIVER
M: Nicolas Pitre <nico@fluxnic.net>
S: Odd Fixes
@@ -6224,9 +6242,9 @@ F: Documentation/hwmon/smm665
F: drivers/hwmon/smm665.c
SMSC EMC2103 HARDWARE MONITOR DRIVER
-M: Steve Glendinning <steve.glendinning@smsc.com>
+M: Steve Glendinning <steve.glendinning@shawell.net>
L: lm-sensors@lm-sensors.org
-S: Supported
+S: Maintained
F: Documentation/hwmon/emc2103
F: drivers/hwmon/emc2103.c
@@ -6245,22 +6263,22 @@ F: Documentation/hwmon/smsc47b397
F: drivers/hwmon/smsc47b397.c
SMSC911x ETHERNET DRIVER
-M: Steve Glendinning <steve.glendinning@smsc.com>
+M: Steve Glendinning <steve.glendinning@shawell.net>
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: include/linux/smsc911x.h
F: drivers/net/ethernet/smsc/smsc911x.*
SMSC9420 PCI ETHERNET DRIVER
-M: Steve Glendinning <steve.glendinning@smsc.com>
+M: Steve Glendinning <steve.glendinning@shawell.net>
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/net/ethernet/smsc/smsc9420.*
SMSC UFX6000 and UFX7000 USB to VGA DRIVER
-M: Steve Glendinning <steve.glendinning@smsc.com>
+M: Steve Glendinning <steve.glendinning@shawell.net>
L: linux-fbdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/video/smscufx.c
SN-IA64 (Itanium) SUB-PLATFORM
@@ -6747,9 +6765,11 @@ F: include/linux/tifm.h
TI LM49xxx FAMILY ASoC CODEC DRIVERS
M: M R Swami Reddy <mr.swami.reddy@ti.com>
+M: Vishwas A Deshpande <vishwas.a.deshpande@ti.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
F: sound/soc/codecs/lm49453*
+F: sound/soc/codecs/isabelle*
TI TWL4030 SERIES SOC CODEC DRIVER
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
@@ -6843,10 +6863,11 @@ F: include/linux/shmem_fs.h
F: mm/shmem.c
TPM DEVICE DRIVER
-M: Debora Velarde <debora@linux.vnet.ibm.com>
-M: Rajiv Andrade <srajiv@linux.vnet.ibm.com>
+M: Kent Yoder <key@linux.vnet.ibm.com>
+M: Rajiv Andrade <mail@srajiv.net>
W: http://tpmdd.sourceforge.net
-M: Marcel Selhorst <m.selhorst@sirrix.com>
+M: Marcel Selhorst <tpmdd@selhorst.net>
+M: Sirrix AG <tpmdd@sirrix.com>
W: http://www.sirrix.com
L: tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
S: Maintained
@@ -6946,6 +6967,13 @@ S: Maintained
F: Documentation/filesystems/ufs.txt
F: fs/ufs/
+UHID USERSPACE HID IO DRIVER:
+M: David Herrmann <dh.herrmann@googlemail.com>
+L: linux-input@vger.kernel.org
+S: Maintained
+F: drivers/hid/uhid.c
+F: include/linux/uhid.h
+
ULTRA-WIDEBAND (UWB) SUBSYSTEM:
L: linux-usb@vger.kernel.org
S: Orphan
@@ -7206,9 +7234,9 @@ S: Supported
F: drivers/usb/serial/whiteheat*
USB SMSC95XX ETHERNET DRIVER
-M: Steve Glendinning <steve.glendinning@smsc.com>
+M: Steve Glendinning <steve.glendinning@shawell.net>
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/net/usb/smsc95xx.*
USB SN9C1xx DRIVER
@@ -7577,6 +7605,7 @@ W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
S: Supported
F: Documentation/hwmon/wm83??
F: arch/arm/mach-s3c64xx/mach-crag6410*
+F: drivers/clk/clk-wm83*.c
F: drivers/leds/leds-wm83*.c
F: drivers/hwmon/wm83??-hwmon.c
F: drivers/input/misc/wm831x-on.c
diff --git a/Makefile b/Makefile
index 3fdfde2c1b7d..4bb09e1b1230 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 1a629636cc16..9816d5a4d176 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -59,15 +59,13 @@ struct pci_controller *pci_isa_hose;
* Quirks.
*/
-static void __init
-quirk_isa_bridge(struct pci_dev *dev)
+static void __devinit quirk_isa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_ISA << 8;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
-static void __init
-quirk_cypress(struct pci_dev *dev)
+static void __devinit quirk_cypress(struct pci_dev *dev)
{
/* The Notorious Cy82C693 chip. */
@@ -106,8 +104,7 @@ quirk_cypress(struct pci_dev *dev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
/* Called for each device after PCI setup is done. */
-static void __init
-pcibios_fixup_final(struct pci_dev *dev)
+static void __devinit pcibios_fixup_final(struct pci_dev *dev)
{
unsigned int class = dev->class >> 8;
@@ -198,12 +195,6 @@ pcibios_init(void)
subsys_initcall(pcibios_init);
-char * __devinit
-pcibios_setup(char *str)
-{
- return str;
-}
-
#ifdef ALPHA_RESTORE_SRM_SETUP
static struct pdev_srm_saved_conf *srm_saved_configs;
@@ -359,7 +350,7 @@ common_init_pci(void)
hose, &resources);
hose->bus = bus;
hose->need_domain_info = need_domain_info;
- next_busno = bus->subordinate + 1;
+ next_busno = bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a91009c61870..b25c9d3c379a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -250,16 +250,36 @@ choice
prompt "ARM system type"
default ARCH_VERSATILE
+config ARCH_SOCFPGA
+ bool "Altera SOCFPGA family"
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_AMBA
+ select ARM_GIC
+ select CACHE_L2X0
+ select CLKDEV_LOOKUP
+ select COMMON_CLK
+ select CPU_V7
+ select DW_APB_TIMER
+ select DW_APB_TIMER_OF
+ select GENERIC_CLOCKEVENTS
+ select GPIO_PL061 if GPIOLIB
+ select HAVE_ARM_SCU
+ select SPARSE_IRQ
+ select USE_OF
+ help
+ This enables support for Altera SOCFPGA Cyclone V platform
+
config ARCH_INTEGRATOR
bool "ARM Ltd. Integrator family"
select ARM_AMBA
select ARCH_HAS_CPUFREQ
- select CLKDEV_LOOKUP
- select HAVE_MACH_CLKDEV
+ select COMMON_CLK
+ select CLK_VERSATILE
select HAVE_TCM
select ICST
select GENERIC_CLOCKEVENTS
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_FPGA_IRQ
select NEED_MACH_IO_H
select NEED_MACH_MEMORY_H
@@ -277,6 +297,7 @@ config ARCH_REALVIEW
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
select ARM_TIMER_SP804
select GPIO_PL061 if GPIOLIB
@@ -295,6 +316,7 @@ config ARCH_VERSATILE
select ARCH_WANT_OPTIONAL_GPIOLIB
select NEED_MACH_IO_H if PCI
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
select PLAT_VERSATILE_FPGA_IRQ
select ARM_TIMER_SP804
@@ -307,14 +329,16 @@ config ARCH_VEXPRESS
select ARM_AMBA
select ARM_TIMER_SP804
select CLKDEV_LOOKUP
- select HAVE_MACH_CLKDEV
+ select COMMON_CLK
select GENERIC_CLOCKEVENTS
select HAVE_CLK
select HAVE_PATA_PLATFORM
select ICST
select NO_IOPORT
select PLAT_VERSATILE
+ select PLAT_VERSATILE_CLOCK
select PLAT_VERSATILE_CLCD
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
help
This enables support for the ARM Ltd Versatile Express boards.
@@ -349,6 +373,7 @@ config ARCH_HIGHBANK
select ARM_TIMER_SP804
select CACHE_L2X0
select CLKDEV_LOOKUP
+ select COMMON_CLK
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
@@ -389,6 +414,7 @@ config ARCH_PRIMA2
bool "CSR SiRFSoC PRIMA2 ARM Cortex A9 Platform"
select CPU_V7
select NO_IOPORT
+ select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
select GENERIC_IRQ_CHIP
@@ -447,6 +473,8 @@ config ARCH_MXC
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
+ select USE_OF
help
Support for Freescale MXC/iMX-based family of processors
@@ -533,6 +561,18 @@ config ARCH_IXP4XX
help
Support for Intel's IXP4XX (XScale) family of processors.
+config ARCH_MVEBU
+ bool "Marvell SOCs with Device Tree support"
+ select GENERIC_CLOCKEVENTS
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
+ select CLKSRC_MMIO
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
+ select COMMON_CLK
+ help
+ Support for the Marvell SoC Family with device tree support
+
config ARCH_DOVE
bool "Marvell Dove"
select CPU_V7
@@ -567,6 +607,7 @@ config ARCH_LPC32XX
select CLKDEV_LOOKUP
select GENERIC_CLOCKEVENTS
select USE_OF
+ select HAVE_PWM
help
Support for the NXP LPC32XX family of processors
@@ -647,6 +688,7 @@ config ARCH_TEGRA
select MIGHT_HAVE_CACHE_L2X0
select NEED_MACH_IO_H if PCI
select ARCH_HAS_CPUFREQ
+ select USE_OF
help
This enables support for NVIDIA Tegra based systems (Tegra APX,
Tegra 6xx and Tegra 2 series).
@@ -658,6 +700,7 @@ config ARCH_PICOXCELL
select ARM_VIC
select CPU_V6K
select DW_APB_TIMER
+ select DW_APB_TIMER_OF
select GENERIC_CLOCKEVENTS
select GENERIC_GPIO
select HAVE_TCM
@@ -888,7 +931,7 @@ config ARCH_U300
select ARM_VIC
select GENERIC_CLOCKEVENTS
select CLKDEV_LOOKUP
- select HAVE_MACH_CLKDEV
+ select COMMON_CLK
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
help
@@ -913,7 +956,7 @@ config ARCH_NOMADIK
select ARM_AMBA
select ARM_VIC
select CPU_ARM926T
- select CLKDEV_LOOKUP
+ select COMMON_CLK
select GENERIC_CLOCKEVENTS
select PINCTRL
select MIGHT_HAVE_CACHE_L2X0
@@ -936,6 +979,7 @@ config ARCH_DAVINCI
config ARCH_OMAP
bool "TI OMAP"
+ depends on MMU
select HAVE_CLK
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_CPUFREQ
@@ -987,6 +1031,8 @@ endchoice
# Kconfigs may be included either alphabetically (according to the
# plat- suffix) or along side the corresponding mach-* source.
#
+source "arch/arm/mach-mvebu/Kconfig"
+
source "arch/arm/mach-at91/Kconfig"
source "arch/arm/mach-bcmring/Kconfig"
@@ -1021,8 +1067,6 @@ source "arch/arm/mach-kirkwood/Kconfig"
source "arch/arm/mach-ks8695/Kconfig"
-source "arch/arm/mach-lpc32xx/Kconfig"
-
source "arch/arm/mach-msm/Kconfig"
source "arch/arm/mach-mv78xx0/Kconfig"
@@ -1581,6 +1625,7 @@ config ARCH_NR_GPIO
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
default 355 if ARCH_U8500
default 264 if MACH_H4700
+ default 512 if SOC_OMAP5
default 0
help
Maximum number of GPIOs in the system.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 01a134141216..a03b5a7059e2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -310,6 +310,32 @@ choice
The uncompressor code port configuration is now handled
by CONFIG_S3C_LOWLEVEL_UART_PORT.
+ config DEBUG_VEXPRESS_UART0_DETECT
+ bool "Autodetect UART0 on Versatile Express Cortex-A core tiles"
+ depends on ARCH_VEXPRESS && CPU_CP15_MMU
+ help
+ This option enables a simple heuristic which tries to determine
+ the motherboard's memory map variant (original or RS1) and then
+ choose the relevant UART0 base address.
+
+ Note that this will only work with standard A-class core tiles,
+ and may fail with non-standard SMM or custom software models.
+
+ config DEBUG_VEXPRESS_UART0_CA9
+ bool "Use PL011 UART0 at 0x10009000 (V2P-CA9 core tile)"
+ depends on ARCH_VEXPRESS
+ help
+ This option selects UART0 at 0x10009000. Except for custom models,
+ this applies only to the V2P-CA9 tile.
+
+ config DEBUG_VEXPRESS_UART0_RS1
+ bool "Use PL011 UART0 at 0x1c090000 (RS1 complaint tiles)"
+ depends on ARCH_VEXPRESS
+ help
+ This option selects UART0 at 0x1c090000. This applies to most
+ of the tiles using the RS1 memory map, including all new A-class
+ core tiles, FPGA-based SMMs and software models.
+
config DEBUG_LL_UART_NONE
bool "No low-level debugging UART"
help
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 0298b00fe241..4d6d31115cf2 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -157,6 +157,7 @@ machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
machine-$(CONFIG_ARCH_IMX_V4_V5) := imx
machine-$(CONFIG_ARCH_IMX_V6_V7) := imx
machine-$(CONFIG_ARCH_MXS) := mxs
+machine-$(CONFIG_ARCH_MVEBU) := mvebu
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_OMAP1) := omap1
@@ -186,6 +187,7 @@ machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
machine-$(CONFIG_ARCH_VT8500) := vt8500
machine-$(CONFIG_ARCH_W90X900) := w90x900
machine-$(CONFIG_FOOTBRIDGE) := footbridge
+machine-$(CONFIG_ARCH_SOCFPGA) := socfpga
machine-$(CONFIG_MACH_SPEAR1310) := spear13xx
machine-$(CONFIG_MACH_SPEAR1340) := spear13xx
machine-$(CONFIG_MACH_SPEAR300) := spear3xx
diff --git a/arch/arm/boot/dts/aks-cdu.dts b/arch/arm/boot/dts/aks-cdu.dts
new file mode 100644
index 000000000000..29b9f15e7599
--- /dev/null
+++ b/arch/arm/boot/dts/aks-cdu.dts
@@ -0,0 +1,113 @@
+/*
+ * aks-cdu.dts - Device Tree file for AK signal CDU
+ *
+ * Copyright (C) 2012 AK signal Brno a.s.
+ * 2012 Jiri Prchal <jiri.prchal@aksignal.cz>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/dts-v1/;
+
+/include/ "ge863-pro3.dtsi"
+
+/ {
+ chosen {
+ bootargs = "console=ttyS0,115200 ubi.mtd=4 root=ubi0:rootfs rootfstype=ubifs";
+ };
+
+ ahb {
+ apb {
+ usart0: serial@fffb0000 {
+ status = "okay";
+ };
+
+ usart1: serial@fffb4000 {
+ status = "okay";
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-delay = <0 0>;
+ };
+
+ usart2: serial@fffb8000 {
+ status = "okay";
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-delay = <0 0>;
+ };
+
+ usart3: serial@fffd0000 {
+ status = "okay";
+ linux,rs485-enabled-at-boot-time;
+ rs485-rts-delay = <0 0>;
+ };
+
+ macb0: ethernet@fffc4000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
+ usb1: gadget@fffa4000 {
+ atmel,vbus-gpio = <&pioC 15 0>;
+ status = "okay";
+ };
+ };
+
+ usb0: ohci@00500000 {
+ num-ports = <2>;
+ status = "okay";
+ };
+
+ nand0: nand@40000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "soft";
+ nand-on-flash-bbt;
+ status = "okay";
+
+ bootstrap@0 {
+ label = "bootstrap";
+ reg = <0x0 0x40000>;
+ };
+
+ uboot@40000 {
+ label = "uboot";
+ reg = <0x40000 0x80000>;
+ };
+ ubootenv@c0000 {
+ label = "ubootenv";
+ reg = <0xc0000 0x40000>;
+ };
+ kernel@100000 {
+ label = "kernel";
+ reg = <0x100000 0x400000>;
+ };
+ rootfs@500000 {
+ label = "rootfs";
+ reg = <0x500000 0x7b00000>;
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ red {
+ gpios = <&pioC 10 0>;
+ linux,default-trigger = "none";
+ };
+
+ green {
+ gpios = <&pioA 5 1>;
+ linux,default-trigger = "none";
+ default-state = "on";
+ };
+
+ yellow {
+ gpios = <&pioB 20 1>;
+ linux,default-trigger = "none";
+ };
+
+ blue {
+ gpios = <&pioB 21 1>;
+ linux,default-trigger = "none";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
new file mode 100644
index 000000000000..a9af4db7234c
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "am33xx.dtsi"
+
+/ {
+ model = "TI AM335x BeagleBone";
+ compatible = "ti,am335x-bone", "ti,am33xx";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+};
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
new file mode 100644
index 000000000000..d6a97d9eff72
--- /dev/null
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "am33xx.dtsi"
+
+/ {
+ model = "TI AM335x EVM";
+ compatible = "ti,am335x-evm", "ti,am33xx";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
new file mode 100644
index 000000000000..59509c48d7e5
--- /dev/null
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -0,0 +1,158 @@
+/*
+ * Device Tree Source for AM33XX SoC
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "ti,am33xx";
+
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ serial5 = &uart6;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a8";
+ };
+ };
+
+ /*
+ * The soc node represents the soc top level view. It is uses for IPs
+ * that are not memory mapped in the MPU view or for the MPU itself.
+ */
+ soc {
+ compatible = "ti,omap-infra";
+ mpu {
+ compatible = "ti,omap3-mpu";
+ ti,hwmods = "mpu";
+ };
+ };
+
+ /*
+ * XXX: Use a flat representation of the AM33XX interconnect.
+ * The real AM33XX interconnect network is quite complex.Since
+ * that will not bring real advantage to represent that in DT
+ * for the moment, just use a fake OCP bus entry to represent
+ * the whole bus hierarchy.
+ */
+ ocp {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "l3_main";
+
+ intc: interrupt-controller@48200000 {
+ compatible = "ti,omap2-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ ti,intc-size = <128>;
+ reg = <0x48200000 0x1000>;
+ };
+
+ gpio1: gpio@44e07000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio1";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio2: gpio@4804C000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio2";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio3: gpio@481AC000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio3";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio4: gpio@481AE000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio4";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart1: serial@44E09000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ };
+
+ uart2: serial@48022000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ };
+
+ uart3: serial@48024000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ };
+
+ uart4: serial@481A6000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
+
+ uart5: serial@481A8000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart5";
+ clock-frequency = <48000000>;
+ };
+
+ uart6: serial@481AA000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart6";
+ clock-frequency = <48000000>;
+ };
+
+ i2c1: i2c@44E0B000 {
+ compatible = "ti,omap4-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c1";
+ };
+
+ i2c2: i2c@4802A000 {
+ compatible = "ti,omap4-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c2";
+ };
+
+ i2c3: i2c@4819C000 {
+ compatible = "ti,omap4-i2c";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ti,hwmods = "i2c3";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
new file mode 100644
index 000000000000..474f760ecadf
--- /dev/null
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "omap3.dtsi"
+
+/ {
+ model = "TI AM3517 EVM (AM3517/05)";
+ compatible = "ti,am3517-evm", "ti,omap3";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x10000000>; /* 256 MB */
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+};
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+};
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
new file mode 100644
index 000000000000..fffd5c2a3041
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -0,0 +1,42 @@
+/*
+ * Device Tree file for Marvell Armada 370 evaluation board
+ * (DB-88F6710-BP-DDR3)
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-370.dtsi"
+
+/ {
+ model = "Marvell Armada 370 Evaluation Board";
+ compatible = "marvell,a370-db", "marvell,armada370", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x20000000>; /* 512 MB */
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <200000000>;
+ status = "okay";
+ };
+ timer@d0020300 {
+ clock-frequency = <600000000>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
new file mode 100644
index 000000000000..6b6b932a5a7d
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -0,0 +1,68 @@
+/*
+ * Device Tree Include file for Marvell Armada 370 and Armada XP SoC
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * This file contains the definitions that are common to the Armada
+ * 370 and Armada XP SoC.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ model = "Marvell Armada 370 and XP SoC";
+ compatible = "marvell,armada_370_xp";
+
+ cpus {
+ cpu@0 {
+ compatible = "marvell,sheeva-v7";
+ };
+ };
+
+ mpic: interrupt-controller@d0020000 {
+ compatible = "marvell,mpic";
+ #interrupt-cells = <1>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-controller;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&mpic>;
+ ranges;
+
+ serial@d0012000 {
+ compatible = "ns16550";
+ reg = <0xd0012000 0x100>;
+ reg-shift = <2>;
+ interrupts = <41>;
+ status = "disabled";
+ };
+ serial@d0012100 {
+ compatible = "ns16550";
+ reg = <0xd0012100 0x100>;
+ reg-shift = <2>;
+ interrupts = <42>;
+ status = "disabled";
+ };
+
+ timer@d0020300 {
+ compatible = "marvell,armada-370-xp-timer";
+ reg = <0xd0020300 0x30>;
+ interrupts = <37>, <38>, <39>, <40>;
+ };
+ };
+};
+
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
new file mode 100644
index 000000000000..3228ccc83332
--- /dev/null
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -0,0 +1,35 @@
+/*
+ * Device Tree Include file for Marvell Armada 370 family SoC
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Contains definitions specific to the Armada 370 SoC that are not
+ * common to all Armada SoCs.
+ */
+
+/include/ "armada-370-xp.dtsi"
+
+/ {
+ model = "Marvell Armada 370 family SoC";
+ compatible = "marvell,armada370", "marvell,armada-370-xp";
+
+ mpic: interrupt-controller@d0020000 {
+ reg = <0xd0020a00 0x1d0>,
+ <0xd0021870 0x58>;
+ };
+
+ soc {
+ system-controller@d0018200 {
+ compatible = "marvell,armada-370-xp-system-controller";
+ reg = <0xd0018200 0x100>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts
new file mode 100644
index 000000000000..f97040d4258d
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp-db.dts
@@ -0,0 +1,50 @@
+/*
+ * Device Tree file for Marvell Armada XP evaluation board
+ * (DB-78460-BP)
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+/dts-v1/;
+/include/ "armada-xp.dtsi"
+
+/ {
+ model = "Marvell Armada XP Evaluation Board";
+ compatible = "marvell,axp-db", "marvell,armadaxp", "marvell,armada-370-xp";
+
+ chosen {
+ bootargs = "console=ttyS0,115200 earlyprintk";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x80000000>; /* 2 GB */
+ };
+
+ soc {
+ serial@d0012000 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012100 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012200 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ serial@d0012300 {
+ clock-frequency = <250000000>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
new file mode 100644
index 000000000000..e1fa7e6edfe8
--- /dev/null
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -0,0 +1,55 @@
+/*
+ * Device Tree Include file for Marvell Armada XP family SoC
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Contains definitions specific to the Armada 370 SoC that are not
+ * common to all Armada SoCs.
+ */
+
+/include/ "armada-370-xp.dtsi"
+
+/ {
+ model = "Marvell Armada XP family SoC";
+ compatible = "marvell,armadaxp", "marvell,armada-370-xp";
+
+ mpic: interrupt-controller@d0020000 {
+ reg = <0xd0020a00 0x1d0>,
+ <0xd0021870 0x58>;
+ };
+
+ soc {
+ serial@d0012200 {
+ compatible = "ns16550";
+ reg = <0xd0012200 0x100>;
+ reg-shift = <2>;
+ interrupts = <43>;
+ status = "disabled";
+ };
+ serial@d0012300 {
+ compatible = "ns16550";
+ reg = <0xd0012300 0x100>;
+ reg-shift = <2>;
+ interrupts = <44>;
+ status = "disabled";
+ };
+
+ timer@d0020300 {
+ marvell,timer-25Mhz;
+ };
+
+ system-controller@d0018200 {
+ compatible = "marvell,armada-370-xp-system-controller";
+ reg = <0xd0018200 0x500>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index f449efc9825f..66389c1c6f62 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -52,10 +52,11 @@
ranges;
aic: interrupt-controller@fffff000 {
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
+ atmel,external-irqs = <29 30 31>;
};
ramc0: ramc@ffffea00 {
@@ -81,25 +82,25 @@
pit: timer@fffffd30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffd30 0xf>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
};
tcb0: timer@fffa0000 {
compatible = "atmel,at91rm9200-tcb";
reg = <0xfffa0000 0x100>;
- interrupts = <17 4 18 4 19 4>;
+ interrupts = <17 4 0 18 4 0 19 4 0>;
};
tcb1: timer@fffdc000 {
compatible = "atmel,at91rm9200-tcb";
reg = <0xfffdc000 0x100>;
- interrupts = <26 4 27 4 28 4>;
+ interrupts = <26 4 0 27 4 0 28 4 0>;
};
pioA: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -108,7 +109,7 @@
pioB: gpio@fffff600 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -117,7 +118,7 @@
pioC: gpio@fffff800 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x100>;
- interrupts = <4 4>;
+ interrupts = <4 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -126,14 +127,14 @@
dbgu: serial@fffff200 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
status = "disabled";
};
usart0: serial@fffb0000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffb0000 0x200>;
- interrupts = <6 4>;
+ interrupts = <6 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -142,7 +143,7 @@
usart1: serial@fffb4000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffb4000 0x200>;
- interrupts = <7 4>;
+ interrupts = <7 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -151,7 +152,7 @@
usart2: serial@fffb8000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffb8000 0x200>;
- interrupts = <8 4>;
+ interrupts = <8 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -160,7 +161,7 @@
usart3: serial@fffd0000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffd0000 0x200>;
- interrupts = <23 4>;
+ interrupts = <23 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -169,7 +170,7 @@
usart4: serial@fffd4000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffd4000 0x200>;
- interrupts = <24 4>;
+ interrupts = <24 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -178,7 +179,7 @@
usart5: serial@fffd8000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffd8000 0x200>;
- interrupts = <25 4>;
+ interrupts = <25 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -187,21 +188,21 @@
macb0: ethernet@fffc4000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xfffc4000 0x100>;
- interrupts = <21 4>;
+ interrupts = <21 4 3>;
status = "disabled";
};
usb1: gadget@fffa4000 {
compatible = "atmel,at91rm9200-udc";
reg = <0xfffa4000 0x4000>;
- interrupts = <10 4>;
+ interrupts = <10 4 2>;
status = "disabled";
};
adc0: adc@fffe0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffe0000 0x100>;
- interrupts = <5 4>;
+ interrupts = <5 4 0>;
atmel,adc-use-external-triggers;
atmel,adc-channels-used = <0xf>;
atmel,adc-vref = <3300>;
@@ -253,7 +254,7 @@
usb0: ohci@00500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x100000>;
- interrupts = <20 4>;
+ interrupts = <20 4 2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 0209913a65a2..b460d6ce9eb5 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -48,10 +48,11 @@
ranges;
aic: interrupt-controller@fffff000 {
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
+ atmel,external-irqs = <30 31>;
};
pmc: pmc@fffffc00 {
@@ -68,13 +69,13 @@
pit: timer@fffffd30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffd30 0xf>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
};
tcb0: timer@fff7c000 {
compatible = "atmel,at91rm9200-tcb";
reg = <0xfff7c000 0x100>;
- interrupts = <19 4>;
+ interrupts = <19 4 0>;
};
rstc@fffffd00 {
@@ -90,7 +91,7 @@
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -99,7 +100,7 @@
pioB: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -108,7 +109,7 @@
pioC: gpio@fffff600 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x100>;
- interrupts = <4 4>;
+ interrupts = <4 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -117,7 +118,7 @@
pioD: gpio@fffff800 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x100>;
- interrupts = <4 4>;
+ interrupts = <4 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -126,7 +127,7 @@
pioE: gpio@fffffa00 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffffa00 0x100>;
- interrupts = <4 4>;
+ interrupts = <4 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -135,14 +136,14 @@
dbgu: serial@ffffee00 {
compatible = "atmel,at91sam9260-usart";
reg = <0xffffee00 0x200>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
status = "disabled";
};
usart0: serial@fff8c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff8c000 0x200>;
- interrupts = <7 4>;
+ interrupts = <7 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -151,7 +152,7 @@
usart1: serial@fff90000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff90000 0x200>;
- interrupts = <8 4>;
+ interrupts = <8 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -160,7 +161,7 @@
usart2: serial@fff94000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff94000 0x200>;
- interrupts = <9 4>;
+ interrupts = <9 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -169,14 +170,14 @@
macb0: ethernet@fffbc000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
- interrupts = <21 4>;
+ interrupts = <21 4 3>;
status = "disabled";
};
usb1: gadget@fff78000 {
compatible = "atmel,at91rm9200-udc";
reg = <0xfff78000 0x4000>;
- interrupts = <24 4>;
+ interrupts = <24 4 2>;
status = "disabled";
};
};
@@ -200,7 +201,7 @@
usb0: ohci@00a00000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00a00000 0x100000>;
- interrupts = <29 4>;
+ interrupts = <29 4 2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 7dbccaf199f7..bafa8806fc17 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -53,10 +53,11 @@
ranges;
aic: interrupt-controller@fffff000 {
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
+ atmel,external-irqs = <31>;
};
ramc0: ramc@ffffe400 {
@@ -78,7 +79,7 @@
pit: timer@fffffd30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffd30 0xf>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
};
@@ -90,25 +91,25 @@
tcb0: timer@fff7c000 {
compatible = "atmel,at91rm9200-tcb";
reg = <0xfff7c000 0x100>;
- interrupts = <18 4>;
+ interrupts = <18 4 0>;
};
tcb1: timer@fffd4000 {
compatible = "atmel,at91rm9200-tcb";
reg = <0xfffd4000 0x100>;
- interrupts = <18 4>;
+ interrupts = <18 4 0>;
};
dma: dma-controller@ffffec00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffec00 0x200>;
- interrupts = <21 4>;
+ interrupts = <21 4 0>;
};
pioA: gpio@fffff200 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff200 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -117,7 +118,7 @@
pioB: gpio@fffff400 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -126,7 +127,7 @@
pioC: gpio@fffff600 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x100>;
- interrupts = <4 4>;
+ interrupts = <4 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -135,7 +136,7 @@
pioD: gpio@fffff800 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x100>;
- interrupts = <5 4>;
+ interrupts = <5 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -144,7 +145,7 @@
pioE: gpio@fffffa00 {
compatible = "atmel,at91rm9200-gpio";
reg = <0xfffffa00 0x100>;
- interrupts = <5 4>;
+ interrupts = <5 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -153,14 +154,14 @@
dbgu: serial@ffffee00 {
compatible = "atmel,at91sam9260-usart";
reg = <0xffffee00 0x200>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
status = "disabled";
};
usart0: serial@fff8c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff8c000 0x200>;
- interrupts = <7 4>;
+ interrupts = <7 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -169,7 +170,7 @@
usart1: serial@fff90000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff90000 0x200>;
- interrupts = <8 4>;
+ interrupts = <8 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -178,7 +179,7 @@
usart2: serial@fff94000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff94000 0x200>;
- interrupts = <9 4>;
+ interrupts = <9 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -187,7 +188,7 @@
usart3: serial@fff98000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff98000 0x200>;
- interrupts = <10 4>;
+ interrupts = <10 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -196,14 +197,14 @@
macb0: ethernet@fffbc000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xfffbc000 0x100>;
- interrupts = <25 4>;
+ interrupts = <25 4 3>;
status = "disabled";
};
adc0: adc@fffb0000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xfffb0000 0x100>;
- interrupts = <20 4>;
+ interrupts = <20 4 0>;
atmel,adc-use-external-triggers;
atmel,adc-channels-used = <0xff>;
atmel,adc-vref = <3300>;
@@ -257,14 +258,14 @@
usb0: ohci@00700000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00700000 0x100000>;
- interrupts = <22 4>;
+ interrupts = <22 4 2>;
status = "disabled";
};
usb1: ehci@00800000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00800000 0x100000>;
- interrupts = <22 4>;
+ interrupts = <22 4 2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index cb84de791b5a..bfac0dfc332c 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -50,7 +50,7 @@
ranges;
aic: interrupt-controller@fffff000 {
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
@@ -74,7 +74,7 @@
pit: timer@fffffe30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffe30 0xf>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
};
shdwc@fffffe10 {
@@ -85,25 +85,25 @@
tcb0: timer@f8008000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf8008000 0x100>;
- interrupts = <17 4>;
+ interrupts = <17 4 0>;
};
tcb1: timer@f800c000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf800c000 0x100>;
- interrupts = <17 4>;
+ interrupts = <17 4 0>;
};
dma: dma-controller@ffffec00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffec00 0x200>;
- interrupts = <20 4>;
+ interrupts = <20 4 0>;
};
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -112,7 +112,7 @@
pioB: gpio@fffff600 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -121,7 +121,7 @@
pioC: gpio@fffff800 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -130,7 +130,7 @@
pioD: gpio@fffffa00 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffffa00 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -139,14 +139,14 @@
dbgu: serial@fffff200 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
status = "disabled";
};
usart0: serial@f801c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf801c000 0x4000>;
- interrupts = <5 4>;
+ interrupts = <5 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -155,7 +155,7 @@
usart1: serial@f8020000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf8020000 0x4000>;
- interrupts = <6 4>;
+ interrupts = <6 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -164,7 +164,7 @@
usart2: serial@f8024000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf8024000 0x4000>;
- interrupts = <7 4>;
+ interrupts = <7 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -173,7 +173,7 @@
usart3: serial@f8028000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf8028000 0x4000>;
- interrupts = <8 4>;
+ interrupts = <8 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -201,7 +201,7 @@
usb0: ohci@00500000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00500000 0x00100000>;
- interrupts = <22 4>;
+ interrupts = <22 4 2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 6b3ef4339ae7..4a18c393b136 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -51,10 +51,11 @@
ranges;
aic: interrupt-controller@fffff000 {
- #interrupt-cells = <2>;
+ #interrupt-cells = <3>;
compatible = "atmel,at91rm9200-aic";
interrupt-controller;
reg = <0xfffff000 0x200>;
+ atmel,external-irqs = <31>;
};
ramc0: ramc@ffffe800 {
@@ -80,37 +81,37 @@
pit: timer@fffffe30 {
compatible = "atmel,at91sam9260-pit";
reg = <0xfffffe30 0xf>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
};
tcb0: timer@f8008000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf8008000 0x100>;
- interrupts = <17 4>;
+ interrupts = <17 4 0>;
};
tcb1: timer@f800c000 {
compatible = "atmel,at91sam9x5-tcb";
reg = <0xf800c000 0x100>;
- interrupts = <17 4>;
+ interrupts = <17 4 0>;
};
dma0: dma-controller@ffffec00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffec00 0x200>;
- interrupts = <20 4>;
+ interrupts = <20 4 0>;
};
dma1: dma-controller@ffffee00 {
compatible = "atmel,at91sam9g45-dma";
reg = <0xffffee00 0x200>;
- interrupts = <21 4>;
+ interrupts = <21 4 0>;
};
pioA: gpio@fffff400 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff400 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -119,7 +120,7 @@
pioB: gpio@fffff600 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff600 0x100>;
- interrupts = <2 4>;
+ interrupts = <2 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -128,7 +129,7 @@
pioC: gpio@fffff800 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffff800 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -137,7 +138,7 @@
pioD: gpio@fffffa00 {
compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
reg = <0xfffffa00 0x100>;
- interrupts = <3 4>;
+ interrupts = <3 4 1>;
#gpio-cells = <2>;
gpio-controller;
interrupt-controller;
@@ -146,14 +147,14 @@
dbgu: serial@fffff200 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfffff200 0x200>;
- interrupts = <1 4>;
+ interrupts = <1 4 7>;
status = "disabled";
};
usart0: serial@f801c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf801c000 0x200>;
- interrupts = <5 4>;
+ interrupts = <5 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -162,7 +163,7 @@
usart1: serial@f8020000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf8020000 0x200>;
- interrupts = <6 4>;
+ interrupts = <6 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -171,7 +172,7 @@
usart2: serial@f8024000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xf8024000 0x200>;
- interrupts = <7 4>;
+ interrupts = <7 4 5>;
atmel,use-dma-rx;
atmel,use-dma-tx;
status = "disabled";
@@ -180,21 +181,21 @@
macb0: ethernet@f802c000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
- interrupts = <24 4>;
+ interrupts = <24 4 3>;
status = "disabled";
};
macb1: ethernet@f8030000 {
compatible = "cdns,at32ap7000-macb", "cdns,macb";
reg = <0xf8030000 0x100>;
- interrupts = <27 4>;
+ interrupts = <27 4 3>;
status = "disabled";
};
adc0: adc@f804c000 {
compatible = "atmel,at91sam9260-adc";
reg = <0xf804c000 0x100>;
- interrupts = <19 4>;
+ interrupts = <19 4 0>;
atmel,adc-use-external;
atmel,adc-channels-used = <0xffff>;
atmel,adc-vref = <3300>;
@@ -248,14 +249,14 @@
usb0: ohci@00600000 {
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
- interrupts = <22 4>;
+ interrupts = <22 4 2>;
status = "disabled";
};
usb1: ehci@00700000 {
compatible = "atmel,at91sam9g45-ehci", "usb-ehci";
reg = <0x00700000 0x100000>;
- interrupts = <22 4>;
+ interrupts = <22 4 2>;
status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi
index 4ad5160018cb..3180a9c588b9 100644
--- a/arch/arm/boot/dts/db8500.dtsi
+++ b/arch/arm/boot/dts/db8500.dtsi
@@ -48,7 +48,7 @@
};
rtc@80154000 {
- compatible = "stericsson,db8500-rtc";
+ compatible = "arm,rtc-pl031", "arm,primecell";
reg = <0x80154000 0x1000>;
interrupts = <0 18 0x4>;
};
@@ -60,7 +60,7 @@
interrupts = <0 119 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <0>;
@@ -73,7 +73,7 @@
interrupts = <0 120 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <1>;
@@ -86,7 +86,7 @@
interrupts = <0 121 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <2>;
@@ -99,7 +99,7 @@
interrupts = <0 122 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <3>;
@@ -112,7 +112,7 @@
interrupts = <0 123 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <4>;
@@ -125,7 +125,7 @@
interrupts = <0 124 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <5>;
@@ -138,7 +138,7 @@
interrupts = <0 125 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <6>;
@@ -151,7 +151,7 @@
interrupts = <0 126 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <7>;
@@ -164,7 +164,7 @@
interrupts = <0 127 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- supports-sleepmode;
+ st,supports-sleepmode;
gpio-controller;
#gpio-cells = <2>;
gpio-bank = <8>;
@@ -206,62 +206,74 @@
// DB8500_REGULATOR_VAPE
db8500_vape_reg: db8500_vape {
+ regulator-compatible = "db8500_vape";
regulator-name = "db8500-vape";
regulator-always-on;
};
// DB8500_REGULATOR_VARM
db8500_varm_reg: db8500_varm {
+ regulator-compatible = "db8500_varm";
regulator-name = "db8500-varm";
};
// DB8500_REGULATOR_VMODEM
db8500_vmodem_reg: db8500_vmodem {
+ regulator-compatible = "db8500_vmodem";
regulator-name = "db8500-vmodem";
};
// DB8500_REGULATOR_VPLL
db8500_vpll_reg: db8500_vpll {
+ regulator-compatible = "db8500_vpll";
regulator-name = "db8500-vpll";
};
// DB8500_REGULATOR_VSMPS1
db8500_vsmps1_reg: db8500_vsmps1 {
+ regulator-compatible = "db8500_vsmps1";
regulator-name = "db8500-vsmps1";
};
// DB8500_REGULATOR_VSMPS2
db8500_vsmps2_reg: db8500_vsmps2 {
+ regulator-compatible = "db8500_vsmps2";
regulator-name = "db8500-vsmps2";
};
// DB8500_REGULATOR_VSMPS3
db8500_vsmps3_reg: db8500_vsmps3 {
+ regulator-compatible = "db8500_vsmps3";
regulator-name = "db8500-vsmps3";
};
// DB8500_REGULATOR_VRF1
db8500_vrf1_reg: db8500_vrf1 {
+ regulator-compatible = "db8500_vrf1";
regulator-name = "db8500-vrf1";
};
// DB8500_REGULATOR_SWITCH_SVAMMDSP
db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
+ regulator-compatible = "db8500_sva_mmdsp";
regulator-name = "db8500-sva-mmdsp";
};
// DB8500_REGULATOR_SWITCH_SVAMMDSPRET
db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
+ regulator-compatible = "db8500_sva_mmdsp_ret";
regulator-name = "db8500-sva-mmdsp-ret";
};
// DB8500_REGULATOR_SWITCH_SVAPIPE
db8500_sva_pipe_reg: db8500_sva_pipe {
+ regulator-compatible = "db8500_sva_pipe";
regulator-name = "db8500_sva_pipe";
};
// DB8500_REGULATOR_SWITCH_SIAMMDSP
db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
+ regulator-compatible = "db8500_sia_mmdsp";
regulator-name = "db8500_sia_mmdsp";
};
@@ -272,38 +284,45 @@
// DB8500_REGULATOR_SWITCH_SIAPIPE
db8500_sia_pipe_reg: db8500_sia_pipe {
+ regulator-compatible = "db8500_sia_pipe";
regulator-name = "db8500-sia-pipe";
};
// DB8500_REGULATOR_SWITCH_SGA
db8500_sga_reg: db8500_sga {
+ regulator-compatible = "db8500_sga";
regulator-name = "db8500-sga";
vin-supply = <&db8500_vape_reg>;
};
// DB8500_REGULATOR_SWITCH_B2R2_MCDE
db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
+ regulator-compatible = "db8500_b2r2_mcde";
regulator-name = "db8500-b2r2-mcde";
vin-supply = <&db8500_vape_reg>;
};
// DB8500_REGULATOR_SWITCH_ESRAM12
db8500_esram12_reg: db8500_esram12 {
+ regulator-compatible = "db8500_esram12";
regulator-name = "db8500-esram12";
};
// DB8500_REGULATOR_SWITCH_ESRAM12RET
db8500_esram12_ret_reg: db8500_esram12_ret {
+ regulator-compatible = "db8500_esram12_ret";
regulator-name = "db8500-esram12-ret";
};
// DB8500_REGULATOR_SWITCH_ESRAM34
db8500_esram34_reg: db8500_esram34 {
+ regulator-compatible = "db8500_esram34";
regulator-name = "db8500-esram34";
};
// DB8500_REGULATOR_SWITCH_ESRAM34RET
db8500_esram34_ret_reg: db8500_esram34_ret {
+ regulator-compatible = "db8500_esram34_ret";
regulator-name = "db8500-esram34-ret";
};
};
@@ -312,12 +331,70 @@
compatible = "stericsson,ab8500";
reg = <5>; /* mailbox 5 is i2c */
interrupts = <0 40 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ ab8500-rtc {
+ compatible = "stericsson,ab8500-rtc";
+ interrupts = <17 0x4
+ 18 0x4>;
+ interrupt-names = "60S", "ALARM";
+ };
+
+ ab8500-gpadc {
+ compatible = "stericsson,ab8500-gpadc";
+ interrupts = <32 0x4
+ 39 0x4>;
+ interrupt-names = "HW_CONV_END", "SW_CONV_END";
+ vddadc-supply = <&ab8500_ldo_tvout_reg>;
+ };
+
+ ab8500-usb {
+ compatible = "stericsson,ab8500-usb";
+ interrupts = < 90 0x4
+ 96 0x4
+ 14 0x4
+ 15 0x4
+ 79 0x4
+ 74 0x4
+ 75 0x4>;
+ interrupt-names = "ID_WAKEUP_R",
+ "ID_WAKEUP_F",
+ "VBUS_DET_F",
+ "VBUS_DET_R",
+ "USB_LINK_STATUS",
+ "USB_ADP_PROBE_PLUG",
+ "USB_ADP_PROBE_UNPLUG";
+ vddulpivio18-supply = <&ab8500_ldo_initcore_reg>;
+ v-ape-supply = <&db8500_vape_reg>;
+ musb_1v8-supply = <&db8500_vsmps2_reg>;
+ };
+
+ ab8500-ponkey {
+ compatible = "stericsson,ab8500-ponkey";
+ interrupts = <6 0x4
+ 7 0x4>;
+ interrupt-names = "ONKEY_DBF", "ONKEY_DBR";
+ };
+
+ ab8500-sysctrl {
+ compatible = "stericsson,ab8500-sysctrl";
+ };
+
+ ab8500-pwm {
+ compatible = "stericsson,ab8500-pwm";
+ };
+
+ ab8500-debugfs {
+ compatible = "stericsson,ab8500-debug";
+ };
ab8500-regulators {
compatible = "stericsson,ab8500-regulator";
// supplies to the display/camera
ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
+ regulator-compatible = "ab8500_ldo_aux1";
regulator-name = "V-DISPLAY";
regulator-min-microvolt = <2500000>;
regulator-max-microvolt = <2900000>;
@@ -328,6 +405,7 @@
// supplies to the on-board eMMC
ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
+ regulator-compatible = "ab8500_ldo_aux2";
regulator-name = "V-eMMC1";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <3300000>;
@@ -335,6 +413,7 @@
// supply for VAUX3; SDcard slots
ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
+ regulator-compatible = "ab8500_ldo_aux3";
regulator-name = "V-MMC-SD";
regulator-min-microvolt = <1100000>;
regulator-max-microvolt = <3300000>;
@@ -342,41 +421,49 @@
// supply for v-intcore12; VINTCORE12 LDO
ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
+ regulator-compatible = "ab8500_ldo_initcore";
regulator-name = "V-INTCORE";
};
// supply for tvout; gpadc; TVOUT LDO
ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
+ regulator-compatible = "ab8500_ldo_tvout";
regulator-name = "V-TVOUT";
};
// supply for ab8500-usb; USB LDO
ab8500_ldo_usb_reg: ab8500_ldo_usb {
+ regulator-compatible = "ab8500_ldo_usb";
regulator-name = "dummy";
};
// supply for ab8500-vaudio; VAUDIO LDO
ab8500_ldo_audio_reg: ab8500_ldo_audio {
+ regulator-compatible = "ab8500_ldo_audio";
regulator-name = "V-AUD";
};
// supply for v-anamic1 VAMic1-LDO
ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
+ regulator-compatible = "ab8500_ldo_anamic1";
regulator-name = "V-AMIC1";
};
// supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
+ regulator-compatible = "ab8500_ldo_amamic2";
regulator-name = "V-AMIC2";
};
// supply for v-dmic; VDMIC LDO
ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
+ regulator-compatible = "ab8500_ldo_dmic";
regulator-name = "V-DMIC";
};
// supply for U8500 CSI/DSI; VANA LDO
ab8500_ldo_ana_reg: ab8500_ldo_ana {
+ regulator-compatible = "ab8500_ldo_ana";
regulator-name = "V-CSI/DSI";
};
};
diff --git a/arch/arm/boot/dts/ea3250.dts b/arch/arm/boot/dts/ea3250.dts
new file mode 100644
index 000000000000..d79b28d9c963
--- /dev/null
+++ b/arch/arm/boot/dts/ea3250.dts
@@ -0,0 +1,174 @@
+/*
+ * Embedded Artists LPC3250 board
+ *
+ * Copyright 2012 Roland Stigge <stigge@antcom.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "lpc32xx.dtsi"
+
+/ {
+ model = "Embedded Artists LPC3250 board based on NXP LPC3250";
+ compatible = "ea,ea3250", "nxp,lpc3250";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ memory {
+ device_type = "memory";
+ reg = <0 0x4000000>;
+ };
+
+ ahb {
+ mac: ethernet@31060000 {
+ phy-mode = "rmii";
+ use-iram;
+ };
+
+ /* Here, choose exactly one from: ohci, usbd */
+ ohci@31020000 {
+ transceiver = <&isp1301>;
+ status = "okay";
+ };
+
+/*
+ usbd@31020000 {
+ transceiver = <&isp1301>;
+ status = "okay";
+ };
+*/
+
+ /* 128MB Flash via SLC NAND controller */
+ slc: flash@20020000 {
+ status = "okay";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ nxp,wdr-clks = <14>;
+ nxp,wwidth = <260000000>;
+ nxp,whold = <104000000>;
+ nxp,wsetup = <200000000>;
+ nxp,rdr-clks = <14>;
+ nxp,rwidth = <34666666>;
+ nxp,rhold = <104000000>;
+ nxp,rsetup = <200000000>;
+ nand-on-flash-bbt;
+ gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
+ mtd0@00000000 {
+ label = "ea3250-boot";
+ reg = <0x00000000 0x00080000>;
+ read-only;
+ };
+
+ mtd1@00080000 {
+ label = "ea3250-uboot";
+ reg = <0x00080000 0x000c0000>;
+ read-only;
+ };
+
+ mtd2@00140000 {
+ label = "ea3250-kernel";
+ reg = <0x00140000 0x00400000>;
+ };
+
+ mtd3@00540000 {
+ label = "ea3250-rootfs";
+ reg = <0x00540000 0x07ac0000>;
+ };
+ };
+
+ apb {
+ uart5: serial@40090000 {
+ status = "okay";
+ };
+
+ uart3: serial@40080000 {
+ status = "okay";
+ };
+
+ uart6: serial@40098000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@400A0000 {
+ clock-frequency = <100000>;
+
+ eeprom@50 {
+ compatible = "at,24c256";
+ reg = <0x50>;
+ };
+
+ eeprom@57 {
+ compatible = "at,24c64";
+ reg = <0x57>;
+ };
+
+ uda1380: uda1380@18 {
+ compatible = "nxp,uda1380";
+ reg = <0x18>;
+ power-gpio = <&gpio 0x59 0>;
+ reset-gpio = <&gpio 0x51 0>;
+ dac-clk = "wspll";
+ };
+
+ pca9532: pca9532@60 {
+ compatible = "nxp,pca9532";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x60>;
+ };
+ };
+
+ i2c2: i2c@400A8000 {
+ clock-frequency = <100000>;
+ };
+
+ i2cusb: i2c@31020300 {
+ clock-frequency = <100000>;
+
+ isp1301: usb-transceiver@2d {
+ compatible = "nxp,isp1301";
+ reg = <0x2d>;
+ };
+ };
+
+ sd@20098000 {
+ wp-gpios = <&pca9532 5 0>;
+ cd-gpios = <&pca9532 4 0>;
+ cd-inverted;
+ bus-width = <4>;
+ status = "okay";
+ };
+ };
+
+ fab {
+ uart1: serial@40014000 {
+ status = "okay";
+ };
+
+ /* 3-axis accelerometer X,Y,Z (or AD-IN instead of Z) */
+ adc@40048000 {
+ status = "okay";
+ };
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+ button@21 {
+ label = "GPIO Key UP";
+ linux,code = <103>;
+ gpios = <&gpio 4 1 0>; /* GPI_P3 1 */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/evk-pro3.dts b/arch/arm/boot/dts/evk-pro3.dts
new file mode 100644
index 000000000000..b7354e6506de
--- /dev/null
+++ b/arch/arm/boot/dts/evk-pro3.dts
@@ -0,0 +1,41 @@
+/*
+ * evk-pro3.dts - Device Tree file for Telit EVK-PRO3 with Telit GE863-PRO3
+ *
+ * Copyright (C) 2012 Telit,
+ * 2012 Fabio Porcedda <fabio.porcedda@gmail.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/dts-v1/;
+
+/include/ "ge863-pro3.dtsi"
+
+/ {
+ model = "Telit EVK-PRO3 for Telit GE863-PRO3";
+ compatible = "telit,evk-pro3", "atmel,at91sam9260", "atmel,at91sam9";
+
+ ahb {
+ apb {
+ macb0: ethernet@fffc4000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
+ usb1: gadget@fffa4000 {
+ atmel,vbus-gpio = <&pioC 5 0>;
+ status = "okay";
+ };
+ };
+
+ usb0: ohci@00500000 {
+ num-ports = <2>;
+ status = "okay";
+ };
+ };
+
+ i2c@0 {
+ status = "okay";
+ };
+
+}; \ No newline at end of file
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index b8c476384eef..0c49caa09978 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -134,4 +134,16 @@
i2c@138D0000 {
status = "disabled";
};
+
+ spi_0: spi@13920000 {
+ status = "disabled";
+ };
+
+ spi_1: spi@13930000 {
+ status = "disabled";
+ };
+
+ spi_2: spi@13940000 {
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/exynos4210-smdkv310.dts b/arch/arm/boot/dts/exynos4210-smdkv310.dts
index 27afc8e535ca..1beccc8f14ff 100644
--- a/arch/arm/boot/dts/exynos4210-smdkv310.dts
+++ b/arch/arm/boot/dts/exynos4210-smdkv310.dts
@@ -179,4 +179,42 @@
i2c@138D0000 {
status = "disabled";
};
+
+ spi_0: spi@13920000 {
+ status = "disabled";
+ };
+
+ spi_1: spi@13930000 {
+ status = "disabled";
+ };
+
+ spi_2: spi@13940000 {
+ gpios = <&gpc1 1 5 3 0>,
+ <&gpc1 3 5 3 0>,
+ <&gpc1 4 5 3 0>;
+
+ w25x80@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "w25x80";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+
+ controller-data {
+ cs-gpio = <&gpc1 2 1 0 3>;
+ samsung,spi-feedback-delay = <0>;
+ };
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0xc0000>;
+ };
+ };
+ };
};
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index a1dd2ee83753..02891fe876e4 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -25,6 +25,12 @@
compatible = "samsung,exynos4210";
interrupt-parent = <&gic>;
+ aliases {
+ spi0 = &spi_0;
+ spi1 = &spi_1;
+ spi2 = &spi_2;
+ };
+
gic:interrupt-controller@10490000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
@@ -33,6 +39,17 @@
reg = <0x10490000 0x1000>, <0x10480000 0x100>;
};
+ combiner:interrupt-controller@10440000 {
+ compatible = "samsung,exynos4210-combiner";
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ reg = <0x10440000 0x1000>;
+ interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
+ <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
+ <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
+ <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
+ };
+
watchdog@10060000 {
compatible = "samsung,s3c2410-wdt";
reg = <0x10060000 0x100>;
@@ -147,6 +164,36 @@
interrupts = <0 65 0>;
};
+ spi_0: spi@13920000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x13920000 0x100>;
+ interrupts = <0 66 0>;
+ tx-dma-channel = <&pdma0 7>; /* preliminary */
+ rx-dma-channel = <&pdma0 6>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi_1: spi@13930000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x13930000 0x100>;
+ interrupts = <0 67 0>;
+ tx-dma-channel = <&pdma1 7>; /* preliminary */
+ rx-dma-channel = <&pdma1 6>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi_2: spi@13940000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x13940000 0x100>;
+ interrupts = <0 68 0>;
+ tx-dma-channel = <&pdma0 9>; /* preliminary */
+ rx-dma-channel = <&pdma0 8>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
amba {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 49945cc1bc7d..8a5e348793c7 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -71,4 +71,42 @@
i2c@12CD0000 {
status = "disabled";
};
+
+ spi_0: spi@12d20000 {
+ status = "disabled";
+ };
+
+ spi_1: spi@12d30000 {
+ gpios = <&gpa2 4 2 3 0>,
+ <&gpa2 6 2 3 0>,
+ <&gpa2 7 2 3 0>;
+
+ w25q80bw@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "w25x80";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+
+ controller-data {
+ cs-gpio = <&gpa2 5 1 0 3>;
+ samsung,spi-feedback-delay = <0>;
+ };
+
+ partition@0 {
+ label = "U-Boot";
+ reg = <0x0 0x40000>;
+ read-only;
+ };
+
+ partition@40000 {
+ label = "Kernel";
+ reg = <0x40000 0xc0000>;
+ };
+ };
+ };
+
+ spi_2: spi@12d40000 {
+ status = "disabled";
+ };
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 4272b2949228..004aaa8d123c 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -23,6 +23,12 @@
compatible = "samsung,exynos5250";
interrupt-parent = <&gic>;
+ aliases {
+ spi0 = &spi_0;
+ spi1 = &spi_1;
+ spi2 = &spi_2;
+ };
+
gic:interrupt-controller@10481000 {
compatible = "arm,cortex-a9-gic";
#interrupt-cells = <3>;
@@ -146,6 +152,36 @@
#size-cells = <0>;
};
+ spi_0: spi@12d20000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d20000 0x100>;
+ interrupts = <0 66 0>;
+ tx-dma-channel = <&pdma0 5>; /* preliminary */
+ rx-dma-channel = <&pdma0 4>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi_1: spi@12d30000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d30000 0x100>;
+ interrupts = <0 67 0>;
+ tx-dma-channel = <&pdma1 5>; /* preliminary */
+ rx-dma-channel = <&pdma1 4>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi_2: spi@12d40000 {
+ compatible = "samsung,exynos4210-spi";
+ reg = <0x12d40000 0x100>;
+ interrupts = <0 68 0>;
+ tx-dma-channel = <&pdma0 7>; /* preliminary */
+ rx-dma-channel = <&pdma0 6>; /* preliminary */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
amba {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/ge863-pro3.dtsi b/arch/arm/boot/dts/ge863-pro3.dtsi
new file mode 100644
index 000000000000..17136fc7a516
--- /dev/null
+++ b/arch/arm/boot/dts/ge863-pro3.dtsi
@@ -0,0 +1,52 @@
+/*
+ * ge863_pro3.dtsi - Device Tree file for Telit GE863-PRO3
+ *
+ * Copyright (C) 2012 Telit,
+ * 2012 Fabio Porcedda <fabio.porcedda@gmail.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/include/ "at91sam9260.dtsi"
+
+/ {
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ main_clock: clock@0 {
+ compatible = "atmel,osc", "fixed-clock";
+ clock-frequency = <6000000>;
+ };
+ };
+
+ ahb {
+ apb {
+ dbgu: serial@fffff200 {
+ status = "okay";
+ };
+ };
+
+ nand0: nand@40000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "soft";
+ nand-on-flash-bbt;
+ status = "okay";
+
+ boot@0 {
+ label = "boot";
+ reg = <0x0 0x7c0000>;
+ };
+
+ root@07c0000 {
+ label = "root";
+ reg = <0x7c0000 0x7840000>;
+ };
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,115200 root=ubi0:rootfs ubi.mtd=1 rootfstype=ubifs";
+ };
+};
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 83e72294aefb..2e1cfa00c25b 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -1,5 +1,5 @@
/*
- * Copyright 2011 Calxeda, Inc.
+ * Copyright 2011-2012 Calxeda, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -24,6 +24,7 @@
compatible = "calxeda,highbank";
#address-cells = <1>;
#size-cells = <1>;
+ clock-ranges;
cpus {
#address-cells = <1>;
@@ -33,24 +34,32 @@
compatible = "arm,cortex-a9";
reg = <0>;
next-level-cache = <&L2>;
+ clocks = <&a9pll>;
+ clock-names = "cpu";
};
cpu@1 {
compatible = "arm,cortex-a9";
reg = <1>;
next-level-cache = <&L2>;
+ clocks = <&a9pll>;
+ clock-names = "cpu";
};
cpu@2 {
compatible = "arm,cortex-a9";
reg = <2>;
next-level-cache = <&L2>;
+ clocks = <&a9pll>;
+ clock-names = "cpu";
};
cpu@3 {
compatible = "arm,cortex-a9";
reg = <3>;
next-level-cache = <&L2>;
+ clocks = <&a9pll>;
+ clock-names = "cpu";
};
};
@@ -75,12 +84,14 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0xfff10600 0x20>;
interrupts = <1 13 0xf01>;
+ clocks = <&a9periphclk>;
};
watchdog@fff10620 {
compatible = "arm,cortex-a9-twd-wdt";
reg = <0xfff10620 0x20>;
interrupts = <1 14 0xf01>;
+ clocks = <&a9periphclk>;
};
intc: interrupt-controller@fff11000 {
@@ -116,12 +127,15 @@
compatible = "calxeda,hb-sdhci";
reg = <0xffe0e000 0x1000>;
interrupts = <0 90 4>;
+ clocks = <&eclk>;
};
ipc@fff20000 {
compatible = "arm,pl320", "arm,primecell";
reg = <0xfff20000 0x1000>;
interrupts = <0 7 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
gpioe: gpio@fff30000 {
@@ -130,6 +144,8 @@
gpio-controller;
reg = <0xfff30000 0x1000>;
interrupts = <0 14 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
gpiof: gpio@fff31000 {
@@ -138,6 +154,8 @@
gpio-controller;
reg = <0xfff31000 0x1000>;
interrupts = <0 15 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
gpiog: gpio@fff32000 {
@@ -146,6 +164,8 @@
gpio-controller;
reg = <0xfff32000 0x1000>;
interrupts = <0 16 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
gpioh: gpio@fff33000 {
@@ -154,24 +174,32 @@
gpio-controller;
reg = <0xfff33000 0x1000>;
interrupts = <0 17 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
timer {
compatible = "arm,sp804", "arm,primecell";
reg = <0xfff34000 0x1000>;
interrupts = <0 18 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
rtc@fff35000 {
compatible = "arm,pl031", "arm,primecell";
reg = <0xfff35000 0x1000>;
interrupts = <0 19 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
serial@fff36000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xfff36000 0x1000>;
interrupts = <0 20 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
smic@fff3a000 {
@@ -186,12 +214,73 @@
sregs@fff3c000 {
compatible = "calxeda,hb-sregs";
reg = <0xfff3c000 0x1000>;
+
+ clocks {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ osc: oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333000>;
+ };
+
+ ddrpll: ddrpll {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-pll-clock";
+ clocks = <&osc>;
+ reg = <0x108>;
+ };
+
+ a9pll: a9pll {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-pll-clock";
+ clocks = <&osc>;
+ reg = <0x100>;
+ };
+
+ a9periphclk: a9periphclk {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-a9periph-clock";
+ clocks = <&a9pll>;
+ reg = <0x104>;
+ };
+
+ a9bclk: a9bclk {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-a9bus-clock";
+ clocks = <&a9pll>;
+ reg = <0x104>;
+ };
+
+ emmcpll: emmcpll {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-pll-clock";
+ clocks = <&osc>;
+ reg = <0x10C>;
+ };
+
+ eclk: eclk {
+ #clock-cells = <0>;
+ compatible = "calxeda,hb-emmc-clock";
+ clocks = <&emmcpll>;
+ reg = <0x114>;
+ };
+
+ pclk: pclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <150000000>;
+ };
+ };
};
dma@fff3d000 {
compatible = "arm,pl330", "arm,primecell";
reg = <0xfff3d000 0x1000>;
interrupts = <0 92 4>;
+ clocks = <&pclk>;
+ clock-names = "apb_pclk";
};
ethernet@fff50000 {
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 70bffa929b65..e3486f486b40 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -22,17 +22,60 @@
apb@80000000 {
apbh@80000000 {
+ gpmi-nand@8000c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_pins_fixup>;
+ status = "okay";
+ };
+
ssp0: ssp@80010000 {
compatible = "fsl,imx23-mmc";
pinctrl-names = "default";
- pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
- bus-width = <8>;
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <4>;
wp-gpios = <&gpio1 30 0>;
+ vmmc-supply = <&reg_vddio_sd0>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1123 /* MX23_PAD_LCD_RESET__GPIO_1_18 */
+ 0x11d3 /* MX23_PAD_PWM3__GPIO_1_29 */
+ 0x11e3 /* MX23_PAD_PWM4__GPIO_1_30 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a>;
+ panel-enable-gpios = <&gpio1 18 0>;
status = "okay";
};
};
apbx@80040000 {
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2_pins_a>;
+ status = "okay";
+ };
+
+ auart0: serial@8006c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ status = "okay";
+ };
+
duart: serial@80070000 {
pinctrl-names = "default";
pinctrl-0 = <&duart_pins_a>;
@@ -40,4 +83,23 @@
};
};
};
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_vddio_sd0: vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 0>;
+ };
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 2 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
};
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
new file mode 100644
index 000000000000..20912b1d8893
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * Author: Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+ model = "i.MX23 Olinuxino Low Cost Board";
+ compatible = "olimex,imx23-olinuxino", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x04000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <4>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx23-stmp378x_devb.dts b/arch/arm/boot/dts/imx23-stmp378x_devb.dts
new file mode 100644
index 000000000000..757a327ff3e8
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-stmp378x_devb.dts
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx23.dtsi"
+
+/ {
+ model = "Freescale STMP378x Development Board";
+ compatible = "fsl,stmp378x-devb", "fsl,imx23";
+
+ memory {
+ reg = <0x40000000 0x04000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx23-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_pins_fixup>;
+ bus-width = <4>;
+ wp-gpios = <&gpio1 30 0>;
+ vmmc-supply = <&reg_vddio_sd0>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x11d3 /* MX23_PAD_PWM3__GPIO_1_29 */
+ 0x11e3 /* MX23_PAD_PWM4__GPIO_1_30 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ auart0: serial@8006c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_vddio_sd0: vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio1 29 0>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
index 8c5f9994f3fc..a874dbfb5ae6 100644
--- a/arch/arm/boot/dts/imx23.dtsi
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -18,6 +18,8 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
+ serial0 = &auart0;
+ serial1 = &auart1;
};
cpus {
@@ -57,13 +59,15 @@
status = "disabled";
};
- bch@8000a000 {
- reg = <0x8000a000 2000>;
- status = "disabled";
- };
-
- gpmi@8000c000 {
- reg = <0x8000c000 2000>;
+ gpmi-nand@8000c000 {
+ compatible = "fsl,imx23-gpmi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <13>, <56>;
+ interrupt-names = "gpmi-dma", "bch";
+ fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
@@ -114,24 +118,151 @@
duart_pins_a: duart@0 {
reg = <0>;
- fsl,pinmux-ids = <0x11a2 0x11b2>;
+ fsl,pinmux-ids = <
+ 0x11a2 /* MX23_PAD_PWM0__DUART_RX */
+ 0x11b2 /* MX23_PAD_PWM1__DUART_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart0_pins_a: auart0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x01c0 /* MX23_PAD_AUART1_RX__AUART1_RX */
+ 0x01d0 /* MX23_PAD_AUART1_TX__AUART1_TX */
+ 0x01a0 /* MX23_PAD_AUART1_CTS__AUART1_CTS */
+ 0x01b0 /* MX23_PAD_AUART1_RTS__AUART1_RTS */
+ >;
fsl,drive-strength = <0>;
fsl,voltage = <1>;
fsl,pull-up = <0>;
};
+ gpmi_pins_a: gpmi-nand@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0000 /* MX23_PAD_GPMI_D00__GPMI_D00 */
+ 0x0010 /* MX23_PAD_GPMI_D01__GPMI_D01 */
+ 0x0020 /* MX23_PAD_GPMI_D02__GPMI_D02 */
+ 0x0030 /* MX23_PAD_GPMI_D03__GPMI_D03 */
+ 0x0040 /* MX23_PAD_GPMI_D04__GPMI_D04 */
+ 0x0050 /* MX23_PAD_GPMI_D05__GPMI_D05 */
+ 0x0060 /* MX23_PAD_GPMI_D06__GPMI_D06 */
+ 0x0070 /* MX23_PAD_GPMI_D07__GPMI_D07 */
+ 0x0100 /* MX23_PAD_GPMI_CLE__GPMI_CLE */
+ 0x0110 /* MX23_PAD_GPMI_ALE__GPMI_ALE */
+ 0x0130 /* MX23_PAD_GPMI_RDY0__GPMI_RDY0 */
+ 0x0140 /* MX23_PAD_GPMI_RDY1__GPMI_RDY1 */
+ 0x0170 /* MX23_PAD_GPMI_WPN__GPMI_WPN */
+ 0x0180 /* MX23_PAD_GPMI_WRN__GPMI_WRN */
+ 0x0190 /* MX23_PAD_GPMI_RDN__GPMI_RDN */
+ 0x21b0 /* MX23_PAD_GPMI_CE1N__GPMI_CE1N */
+ 0x21c0 /* MX23_PAD_GPMI_CE0N__GPMI_CE0N */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ gpmi_pins_fixup: gpmi-pins-fixup {
+ fsl,pinmux-ids = <
+ 0x0170 /* MX23_PAD_GPMI_WPN__GPMI_WPN */
+ 0x0180 /* MX23_PAD_GPMI_WRN__GPMI_WRN */
+ 0x0190 /* MX23_PAD_GPMI_RDN__GPMI_RDN */
+ >;
+ fsl,drive-strength = <2>;
+ };
+
+ mmc0_4bit_pins_a: mmc0-4bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2020 /* MX23_PAD_SSP1_DATA0__SSP1_DATA0 */
+ 0x2030 /* MX23_PAD_SSP1_DATA1__SSP1_DATA1 */
+ 0x2040 /* MX23_PAD_SSP1_DATA2__SSP1_DATA2 */
+ 0x2050 /* MX23_PAD_SSP1_DATA3__SSP1_DATA3 */
+ 0x2000 /* MX23_PAD_SSP1_CMD__SSP1_CMD */
+ 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
+ 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ >;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
mmc0_8bit_pins_a: mmc0-8bit@0 {
reg = <0>;
- fsl,pinmux-ids = <0x2020 0x2030 0x2040
- 0x2050 0x0082 0x0092 0x00a2
- 0x00b2 0x2000 0x2010 0x2060>;
+ fsl,pinmux-ids = <
+ 0x2020 /* MX23_PAD_SSP1_DATA0__SSP1_DATA0 */
+ 0x2030 /* MX23_PAD_SSP1_DATA1__SSP1_DATA1 */
+ 0x2040 /* MX23_PAD_SSP1_DATA2__SSP1_DATA2 */
+ 0x2050 /* MX23_PAD_SSP1_DATA3__SSP1_DATA3 */
+ 0x0082 /* MX23_PAD_GPMI_D08__SSP1_DATA4 */
+ 0x0092 /* MX23_PAD_GPMI_D09__SSP1_DATA5 */
+ 0x00a2 /* MX23_PAD_GPMI_D10__SSP1_DATA6 */
+ 0x00b2 /* MX23_PAD_GPMI_D11__SSP1_DATA7 */
+ 0x2000 /* MX23_PAD_SSP1_CMD__SSP1_CMD */
+ 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
+ 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ >;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
};
mmc0_pins_fixup: mmc0-pins-fixup {
- fsl,pinmux-ids = <0x2010 0x2060>;
+ fsl,pinmux-ids = <
+ 0x2010 /* MX23_PAD_SSP1_DETECT__SSP1_DETECT */
+ 0x2060 /* MX23_PAD_SSP1_SCK__SSP1_SCK */
+ >;
+ fsl,pull-up = <0>;
+ };
+
+ pwm2_pins_a: pwm2@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x11c0 /* MX23_PAD_PWM2__PWM2 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_24bit_pins_a: lcdif-24bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1000 /* MX23_PAD_LCD_D00__LCD_D0 */
+ 0x1010 /* MX23_PAD_LCD_D01__LCD_D1 */
+ 0x1020 /* MX23_PAD_LCD_D02__LCD_D2 */
+ 0x1030 /* MX23_PAD_LCD_D03__LCD_D3 */
+ 0x1040 /* MX23_PAD_LCD_D04__LCD_D4 */
+ 0x1050 /* MX23_PAD_LCD_D05__LCD_D5 */
+ 0x1060 /* MX23_PAD_LCD_D06__LCD_D6 */
+ 0x1070 /* MX23_PAD_LCD_D07__LCD_D7 */
+ 0x1080 /* MX23_PAD_LCD_D08__LCD_D8 */
+ 0x1090 /* MX23_PAD_LCD_D09__LCD_D9 */
+ 0x10a0 /* MX23_PAD_LCD_D10__LCD_D10 */
+ 0x10b0 /* MX23_PAD_LCD_D11__LCD_D11 */
+ 0x10c0 /* MX23_PAD_LCD_D12__LCD_D12 */
+ 0x10d0 /* MX23_PAD_LCD_D13__LCD_D13 */
+ 0x10e0 /* MX23_PAD_LCD_D14__LCD_D14 */
+ 0x10f0 /* MX23_PAD_LCD_D15__LCD_D15 */
+ 0x1100 /* MX23_PAD_LCD_D16__LCD_D16 */
+ 0x1110 /* MX23_PAD_LCD_D17__LCD_D17 */
+ 0x0081 /* MX23_PAD_GPMI_D08__LCD_D18 */
+ 0x0091 /* MX23_PAD_GPMI_D09__LCD_D19 */
+ 0x00a1 /* MX23_PAD_GPMI_D10__LCD_D20 */
+ 0x00b1 /* MX23_PAD_GPMI_D11__LCD_D21 */
+ 0x00c1 /* MX23_PAD_GPMI_D12__LCD_D22 */
+ 0x00d1 /* MX23_PAD_GPMI_D13__LCD_D23 */
+ 0x1160 /* MX23_PAD_LCD_DOTCK__LCD_DOTCK */
+ 0x1170 /* MX23_PAD_LCD_ENABLE__LCD_ENABLE */
+ 0x1180 /* MX23_PAD_LCD_HSYNC__LCD_HSYNC */
+ 0x1190 /* MX23_PAD_LCD_VSYNC__LCD_VSYNC */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
fsl,pull-up = <0>;
};
};
@@ -172,7 +303,9 @@
};
lcdif@80030000 {
+ compatible = "fsl,imx23-lcdif";
reg = <0x80030000 2000>;
+ interrupts = <46 45>;
status = "disabled";
};
@@ -242,12 +375,16 @@
};
rtc@8005c000 {
+ compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc";
reg = <0x8005c000 2000>;
- status = "disabled";
+ interrupts = <22>;
};
- pwm@80064000 {
+ pwm: pwm@80064000 {
+ compatible = "fsl,imx23-pwm";
reg = <0x80064000 2000>;
+ #pwm-cells = <2>;
+ fsl,pwm-number = <5>;
status = "disabled";
};
@@ -257,12 +394,16 @@
};
auart0: serial@8006c000 {
+ compatible = "fsl,imx23-auart";
reg = <0x8006c000 0x2000>;
+ interrupts = <24 25 23>;
status = "disabled";
};
auart1: serial@8006e000 {
+ compatible = "fsl,imx23-auart";
reg = <0x8006e000 0x2000>;
+ interrupts = <59 60 58>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts
new file mode 100644
index 000000000000..d3f8296e19e0
--- /dev/null
+++ b/arch/arm/boot/dts/imx27-3ds.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx27.dtsi"
+
+/ {
+ model = "mx27_3ds";
+ compatible = "freescale,imx27-3ds", "fsl,imx27";
+
+ memory {
+ reg = <0x0 0x0>;
+ };
+
+ soc {
+ aipi@10000000 { /* aipi */
+
+ wdog@10002000 {
+ status = "okay";
+ };
+
+ uart@1000a000 {
+ fsl,uart-has-rtscts;
+ status = "okay";
+ };
+
+ fec@1002b000 {
+ status = "okay";
+ };
+ };
+ };
+
+};
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 386c769c38d1..00bae3aad5ab 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -121,7 +121,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio2: gpio@10015100 {
@@ -131,7 +131,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio3: gpio@10015200 {
@@ -141,7 +141,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio4: gpio@10015300 {
@@ -151,7 +151,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio5: gpio@10015400 {
@@ -161,7 +161,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio6: gpio@10015500 {
@@ -171,7 +171,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
cspi3: cspi@10017000 {
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
new file mode 100644
index 000000000000..b383417a558f
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -0,0 +1,198 @@
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Bluegiga APX4 Development Kit";
+ compatible = "bluegiga,apx4devkit", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x04000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ gpmi-nand@8000c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
+ status = "okay";
+ };
+
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a &mmc0_sck_cfg>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ ssp2: ssp@80014000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_4bit_pins_apx4 &mmc2_sck_cfg_apx4>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0113 /* MX28_PAD_GPMI_CE1N__GPIO_0_17 */
+ 0x0153 /* MX28_PAD_GPMI_RDY1__GPIO_0_21 */
+ 0x2123 /* MX28_PAD_SSP2_MISO__GPIO_2_18 */
+ 0x2131 /* MX28_PAD_SSP2_SS0__GPIO_2_19 */
+ 0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
+ 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
+ 0x4143 /* MX28_PAD_JTAG_RTCK__GPIO_4_20 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_pins_apx4: lcdif-apx4@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
+ 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
+ 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
+ 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ mmc2_4bit_pins_apx4: mmc2-4bit-apx4@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2041 /* MX28_PAD_SSP0_DATA4__SSP2_D0 */
+ 0x2051 /* MX28_PAD_SSP0_DATA5__SSP2_D3 */
+ 0x2061 /* MX28_PAD_SSP0_DATA6__SSP2_CMD */
+ 0x2071 /* MX28_PAD_SSP0_DATA7__SSP2_SCK */
+ 0x2141 /* MX28_PAD_SSP2_SS1__SSP2_D1 */
+ 0x2151 /* MX28_PAD_SSP2_SS2__SSP2_D2 */
+ >;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc2_sck_cfg_apx4: mmc2-sck-cfg-apx4 {
+ fsl,pinmux-ids = <
+ 0x2071 /* MX28_PAD_SSP0_DATA7__SSP2_SCK */
+ >;
+ fsl,drive-strength = <2>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a
+ &lcdif_pins_apx4>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ saif0: saif@80042000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ status = "okay";
+ };
+
+ saif1: saif@80046000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+
+ };
+
+ pcf8563: rtc@51 {
+ compatible = "phg,pcf8563";
+ reg = <0x51>;
+ };
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ status = "okay";
+ };
+
+ auart1: serial@8006c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart1_2pins_a>;
+ status = "okay";
+ };
+
+ auart2: serial@8006e000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart2_2pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "bluegiga,apx4devkit-sgtl5000",
+ "fsl,mxs-audio-sgtl5000";
+ model = "apx4devkit-sgtl5000";
+ saif-controllers = <&saif0 &saif1>;
+ audio-codec = <&sgtl5000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user {
+ label = "Heartbeat";
+ gpios = <&gpio3 28 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-cfa10036.dts b/arch/arm/boot/dts/imx28-cfa10036.dts
new file mode 100644
index 000000000000..c03a577beca3
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-cfa10036.dts
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2012 Free Electrons
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Crystalfontz CFA-10036 Board";
+ compatible = "crystalfontz,cfa10036", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a
+ &mmc0_cd_cfg &mmc0_sck_cfg>;
+ bus-width = <4>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_b>;
+ status = "okay";
+ };
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ power {
+ gpios = <&gpio3 4 1>;
+ default-state = "on";
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index ee520a529cb4..773c0e84d1fb 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -22,6 +22,13 @@
apb@80000000 {
apbh@80000000 {
+ gpmi-nand@8000c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg
+ &gpmi_pins_evk>;
+ status = "okay";
+ };
+
ssp0: ssp@80010000 {
compatible = "fsl,imx28-mmc";
pinctrl-names = "default";
@@ -29,6 +36,7 @@
&mmc0_cd_cfg &mmc0_sck_cfg>;
bus-width = <8>;
wp-gpios = <&gpio2 12 0>;
+ vmmc-supply = <&reg_vddio_sd0>;
status = "okay";
};
@@ -36,6 +44,72 @@
compatible = "fsl,imx28-mmc";
bus-width = <8>;
wp-gpios = <&gpio0 28 0>;
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x20d3 /* MX28_PAD_SSP1_CMD__GPIO_2_13 */
+ 0x20f3 /* MX28_PAD_SSP1_DATA3__GPIO_2_15 */
+ 0x40d3 /* MX28_PAD_ENET0_RX_CLK__GPIO_4_13 */
+ 0x20c3 /* MX28_PAD_SSP1_SCK__GPIO_2_12 */
+ 0x31c3 /* MX28_PAD_PWM3__GPIO_3_28 */
+ 0x31e3 /* MX28_PAD_LCD_RESET__GPIO_3_30 */
+ 0x3053 /* MX28_PAD_AUART1_TX__GPIO_3_5 */
+ 0x3083 /* MX28_PAD_AUART2_RX__GPIO_3_8 */
+ 0x3093 /* MX28_PAD_AUART2_TX__GPIO_3_9 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ gpmi_pins_evk: gpmi-nand-evk@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0110 /* MX28_PAD_GPMI_CE1N__GPMI_CE1N */
+ 0x0150 /* MX28_PAD_GPMI_RDY1__GPMI_READY1 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_pins_evk: lcdif-evk@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1181 /* MX28_PAD_LCD_RD_E__LCD_VSYNC */
+ 0x1191 /* MX28_PAD_LCD_WR_RWN__LCD_HSYNC */
+ 0x11a1 /* MX28_PAD_LCD_RS__LCD_DOTCLK */
+ 0x11b1 /* MX28_PAD_LCD_CS__LCD_ENABLE */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a
+ &lcdif_pins_evk>;
+ panel-enable-gpios = <&gpio3 30 0>;
+ status = "okay";
+ };
+
+ can0: can@80032000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins_a>;
+ status = "okay";
+ };
+
+ can1: can@80034000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins_a>;
status = "okay";
};
};
@@ -68,19 +142,58 @@
};
};
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm2_pins_a>;
+ status = "okay";
+ };
+
duart: serial@80074000 {
pinctrl-names = "default";
pinctrl-0 = <&duart_pins_a>;
status = "okay";
};
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_pins_a>;
+ status = "okay";
+ };
+
+ auart3: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart3_pins_a>;
+ status = "okay";
+ };
+
+ usbphy0: usbphy@8007c000 {
+ status = "okay";
+ };
+
+ usbphy1: usbphy@8007e000 {
+ status = "okay";
+ };
};
};
ahb@80080000 {
+ usb0: usb@80080000 {
+ vbus-supply = <&reg_usb0_vbus>;
+ status = "okay";
+ };
+
+ usb1: usb@80090000 {
+ vbus-supply = <&reg_usb1_vbus>;
+ status = "okay";
+ };
+
mac0: ethernet@800f0000 {
phy-mode = "rmii";
pinctrl-names = "default";
pinctrl-0 = <&mac0_pins_a>;
+ phy-supply = <&reg_fec_3v3>;
+ phy-reset-gpios = <&gpio4 13 0>;
+ phy-reset-duration = <100>;
status = "okay";
};
@@ -102,6 +215,40 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
+
+ reg_vddio_sd0: vddio-sd0 {
+ compatible = "regulator-fixed";
+ regulator-name = "vddio-sd0";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio3 28 0>;
+ };
+
+ reg_fec_3v3: fec-3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "fec-3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio2 15 0>;
+ };
+
+ reg_usb0_vbus: usb0_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb0_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 9 0>;
+ enable-active-high;
+ };
+
+ reg_usb1_vbus: usb1_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb1_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 8 0>;
+ enable-active-high;
+ };
};
sound {
@@ -111,4 +258,21 @@
saif-controllers = <&saif0 &saif1>;
audio-codec = <&sgtl5000>;
};
+
+ leds {
+ compatible = "gpio-leds";
+
+ user {
+ label = "Heartbeat";
+ gpios = <&gpio3 5 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 2 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
};
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
new file mode 100644
index 000000000000..183a3fd2d859
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2012 Marek Vasut <marex@denx.de>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "DENX M28EVK";
+ compatible = "denx,m28evk", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ gpmi-nand@8000c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&gpmi_pins_a &gpmi_status_cfg>;
+ status = "okay";
+
+ partition@0 {
+ label = "bootloader";
+ reg = <0x00000000 0x00300000>;
+ read-only;
+ };
+
+ partition@1 {
+ label = "environment";
+ reg = <0x00300000 0x00080000>;
+ };
+
+ partition@2 {
+ label = "redundant-environment";
+ reg = <0x00380000 0x00080000>;
+ };
+
+ partition@3 {
+ label = "kernel";
+ reg = <0x00400000 0x00400000>;
+ };
+
+ partition@4 {
+ label = "filesystem";
+ reg = <0x00800000 0x0f800000>;
+ };
+ };
+
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_8bit_pins_a
+ &mmc0_cd_cfg
+ &mmc0_sck_cfg>;
+ bus-width = <8>;
+ wp-gpios = <&gpio3 10 1>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x30a3 /* MX28_PAD_AUART2_CTS__GPIO_3_10 */
+ 0x30b3 /* MX28_PAD_AUART2_RTS__GPIO_3_11 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_pins_m28: lcdif-m28@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x11e0 /* MX28_PAD_LCD_DOTCLK__LCD_DOTCLK */
+ 0x11f0 /* MX28_PAD_LCD_ENABLE__LCD_ENABLE */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+
+ lcdif@80030000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&lcdif_24bit_pins_a
+ &lcdif_pins_m28>;
+ status = "okay";
+ };
+
+ can0: can@80032000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins_a>;
+ status = "okay";
+ };
+
+ can1: can@80034000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&can1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ apbx@80040000 {
+ saif0: saif@80042000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif0_pins_a>;
+ status = "okay";
+ };
+
+ saif1: saif@80046000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&saif1_pins_a>;
+ fsl,saif-master = <&saif0>;
+ status = "okay";
+ };
+
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ sgtl5000: codec@0a {
+ compatible = "fsl,sgtl5000";
+ reg = <0x0a>;
+ VDDA-supply = <&reg_3p3v>;
+ VDDIO-supply = <&reg_3p3v>;
+
+ };
+
+ eeprom: eeprom@51 {
+ compatible = "atmel,24c128";
+ reg = <0x51>;
+ pagesize = <32>;
+ };
+
+ rtc: rtc@68 {
+ compatible = "stm,mt41t62";
+ reg = <0x68>;
+ };
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_pins_a>;
+ status = "okay";
+ };
+
+ auart0: serial@8006a000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart0_2pins_a>;
+ status = "okay";
+ };
+
+ auart3: serial@80070000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart3_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ phy-reset-gpios = <&gpio3 11 0>;
+ status = "okay";
+ };
+
+ mac1: ethernet@800f4000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac1_pins_a>;
+ status = "okay";
+ };
+ };
+
+ regulators {
+ compatible = "simple-bus";
+
+ reg_3p3v: 3p3v {
+ compatible = "regulator-fixed";
+ regulator-name = "3P3V";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+
+ sound {
+ compatible = "denx,m28evk-sgtl5000",
+ "fsl,mxs-audio-sgtl5000";
+ model = "m28evk-sgtl5000";
+ saif-controllers = <&saif0 &saif1>;
+ audio-codec = <&sgtl5000>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx28-tx28.dts b/arch/arm/boot/dts/imx28-tx28.dts
new file mode 100644
index 000000000000..62bf767409a6
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-tx28.dts
@@ -0,0 +1,97 @@
+/dts-v1/;
+/include/ "imx28.dtsi"
+
+/ {
+ model = "Ka-Ro electronics TX28 module";
+ compatible = "karo,tx28", "fsl,imx28";
+
+ memory {
+ reg = <0x40000000 0x08000000>;
+ };
+
+ apb@80000000 {
+ apbh@80000000 {
+ ssp0: ssp@80010000 {
+ compatible = "fsl,imx28-mmc";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_4bit_pins_a
+ &mmc0_cd_cfg
+ &mmc0_sck_cfg>;
+ bus-width = <4>;
+ status = "okay";
+ };
+
+ pinctrl@80018000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&hog_pins_a>;
+
+ hog_pins_a: hog-gpios@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x40a3 /* MX28_PAD_ENET0_RXD3__GPIO_4_10 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+ };
+ };
+
+ apbx@80040000 {
+ i2c0: i2c@80058000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ status = "okay";
+
+ ds1339: rtc@68 {
+ compatible = "mxim,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ pwm: pwm@80064000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwm0_pins_a>;
+ status = "okay";
+ };
+
+ duart: serial@80074000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&duart_4pins_a>;
+ status = "okay";
+ };
+
+ auart1: serial@8006c000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&auart1_pins_a>;
+ status = "okay";
+ };
+ };
+ };
+
+ ahb@80080000 {
+ mac0: ethernet@800f0000 {
+ phy-mode = "rmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&mac0_pins_a>;
+ status = "okay";
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ user {
+ label = "Heartbeat";
+ gpios = <&gpio4 10 0>;
+ linux,default-trigger = "heartbeat";
+ };
+ };
+
+ backlight {
+ compatible = "pwm-backlight";
+ pwms = <&pwm 0 5000000>;
+ brightness-levels = <0 4 8 16 32 64 128 255>;
+ default-brightness-level = <6>;
+ };
+};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 4634cb861a59..915db89e3644 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -22,6 +22,11 @@
gpio4 = &gpio4;
saif0 = &saif0;
saif1 = &saif1;
+ serial0 = &auart0;
+ serial1 = &auart1;
+ serial2 = &auart2;
+ serial3 = &auart3;
+ serial4 = &auart4;
};
cpus {
@@ -68,15 +73,15 @@
status = "disabled";
};
- bch@8000a000 {
- reg = <0x8000a000 2000>;
- interrupts = <41>;
- status = "disabled";
- };
-
- gpmi@8000c000 {
- reg = <0x8000c000 2000>;
- interrupts = <42 88>;
+ gpmi-nand@8000c000 {
+ compatible = "fsl,imx28-gpmi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x8000c000 2000>, <0x8000a000 2000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <88>, <41>;
+ interrupt-names = "gpmi-dma", "bch";
+ fsl,gpmi-dma-channel = <4>;
status = "disabled";
};
@@ -161,7 +166,150 @@
duart_pins_a: duart@0 {
reg = <0>;
- fsl,pinmux-ids = <0x3102 0x3112>;
+ fsl,pinmux-ids = <
+ 0x3102 /* MX28_PAD_PWM0__DUART_RX */
+ 0x3112 /* MX28_PAD_PWM1__DUART_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ duart_pins_b: duart@1 {
+ reg = <1>;
+ fsl,pinmux-ids = <
+ 0x3022 /* MX28_PAD_AUART0_CTS__DUART_RX */
+ 0x3032 /* MX28_PAD_AUART0_RTS__DUART_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ duart_4pins_a: duart-4pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3022 /* MX28_PAD_AUART0_CTS__DUART_RX */
+ 0x3032 /* MX28_PAD_AUART0_RTS__DUART_TX */
+ 0x3002 /* MX28_PAD_AUART0_RX__DUART_CTS */
+ 0x3012 /* MX28_PAD_AUART0_TX__DUART_RTS */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ gpmi_pins_a: gpmi-nand@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0000 /* MX28_PAD_GPMI_D00__GPMI_D0 */
+ 0x0010 /* MX28_PAD_GPMI_D01__GPMI_D1 */
+ 0x0020 /* MX28_PAD_GPMI_D02__GPMI_D2 */
+ 0x0030 /* MX28_PAD_GPMI_D03__GPMI_D3 */
+ 0x0040 /* MX28_PAD_GPMI_D04__GPMI_D4 */
+ 0x0050 /* MX28_PAD_GPMI_D05__GPMI_D5 */
+ 0x0060 /* MX28_PAD_GPMI_D06__GPMI_D6 */
+ 0x0070 /* MX28_PAD_GPMI_D07__GPMI_D7 */
+ 0x0100 /* MX28_PAD_GPMI_CE0N__GPMI_CE0N */
+ 0x0140 /* MX28_PAD_GPMI_RDY0__GPMI_READY0 */
+ 0x0180 /* MX28_PAD_GPMI_RDN__GPMI_RDN */
+ 0x0190 /* MX28_PAD_GPMI_WRN__GPMI_WRN */
+ 0x01a0 /* MX28_PAD_GPMI_ALE__GPMI_ALE */
+ 0x01b0 /* MX28_PAD_GPMI_CLE__GPMI_CLE */
+ 0x01c0 /* MX28_PAD_GPMI_RESETN__GPMI_RESETN */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ gpmi_status_cfg: gpmi-status-cfg {
+ fsl,pinmux-ids = <
+ 0x0180 /* MX28_PAD_GPMI_RDN__GPMI_RDN */
+ 0x0190 /* MX28_PAD_GPMI_WRN__GPMI_WRN */
+ 0x01c0 /* MX28_PAD_GPMI_RESETN__GPMI_RESETN */
+ >;
+ fsl,drive-strength = <2>;
+ };
+
+ auart0_pins_a: auart0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3000 /* MX28_PAD_AUART0_RX__AUART0_RX */
+ 0x3010 /* MX28_PAD_AUART0_TX__AUART0_TX */
+ 0x3020 /* MX28_PAD_AUART0_CTS__AUART0_CTS */
+ 0x3030 /* MX28_PAD_AUART0_RTS__AUART0_RTS */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart0_2pins_a: auart0-2pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3000 /* MX28_PAD_AUART0_RX__AUART0_RX */
+ 0x3010 /* MX28_PAD_AUART0_TX__AUART0_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart1_pins_a: auart1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3040 /* MX28_PAD_AUART1_RX__AUART1_RX */
+ 0x3050 /* MX28_PAD_AUART1_TX__AUART1_TX */
+ 0x3060 /* MX28_PAD_AUART1_CTS__AUART1_CTS */
+ 0x3070 /* MX28_PAD_AUART1_RTS__AUART1_RTS */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart1_2pins_a: auart1-2pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3040 /* MX28_PAD_AUART1_RX__AUART1_RX */
+ 0x3050 /* MX28_PAD_AUART1_TX__AUART1_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart2_2pins_a: auart2-2pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2101 /* MX28_PAD_SSP2_SCK__AUART2_RX */
+ 0x2111 /* MX28_PAD_SSP2_MOSI__AUART2_TX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart3_pins_a: auart3@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x30c0 /* MX28_PAD_AUART3_RX__AUART3_RX */
+ 0x30d0 /* MX28_PAD_AUART3_TX__AUART3_TX */
+ 0x30e0 /* MX28_PAD_AUART3_CTS__AUART3_CTS */
+ 0x30f0 /* MX28_PAD_AUART3_RTS__AUART3_RTS */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ auart3_2pins_a: auart3-2pins@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2121 /* MX28_PAD_SSP2_MISO__AUART3_RX */
+ 0x2131 /* MX28_PAD_SSP2_SS0__AUART3_TX */
+ >;
fsl,drive-strength = <0>;
fsl,voltage = <1>;
fsl,pull-up = <0>;
@@ -169,9 +317,17 @@
mac0_pins_a: mac0@0 {
reg = <0>;
- fsl,pinmux-ids = <0x4000 0x4010 0x4020
- 0x4030 0x4040 0x4060 0x4070
- 0x4080 0x4100>;
+ fsl,pinmux-ids = <
+ 0x4000 /* MX28_PAD_ENET0_MDC__ENET0_MDC */
+ 0x4010 /* MX28_PAD_ENET0_MDIO__ENET0_MDIO */
+ 0x4020 /* MX28_PAD_ENET0_RX_EN__ENET0_RX_EN */
+ 0x4030 /* MX28_PAD_ENET0_RXD0__ENET0_RXD0 */
+ 0x4040 /* MX28_PAD_ENET0_RXD1__ENET0_RXD1 */
+ 0x4060 /* MX28_PAD_ENET0_TX_EN__ENET0_TX_EN */
+ 0x4070 /* MX28_PAD_ENET0_TXD0__ENET0_TXD0 */
+ 0x4080 /* MX28_PAD_ENET0_TXD1__ENET0_TXD1 */
+ 0x4100 /* MX28_PAD_ENET_CLK__CLKCTRL_ENET */
+ >;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
@@ -179,8 +335,14 @@
mac1_pins_a: mac1@0 {
reg = <0>;
- fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
- 0x40e1 0x40b1 0x40c1>;
+ fsl,pinmux-ids = <
+ 0x40f1 /* MX28_PAD_ENET0_CRS__ENET1_RX_EN */
+ 0x4091 /* MX28_PAD_ENET0_RXD2__ENET1_RXD0 */
+ 0x40a1 /* MX28_PAD_ENET0_RXD3__ENET1_RXD1 */
+ 0x40e1 /* MX28_PAD_ENET0_COL__ENET1_TX_EN */
+ 0x40b1 /* MX28_PAD_ENET0_TXD2__ENET1_TXD0 */
+ 0x40c1 /* MX28_PAD_ENET0_TXD3__ENET1_TXD1 */
+ >;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
@@ -188,28 +350,61 @@
mmc0_8bit_pins_a: mmc0-8bit@0 {
reg = <0>;
- fsl,pinmux-ids = <0x2000 0x2010 0x2020
- 0x2030 0x2040 0x2050 0x2060
- 0x2070 0x2080 0x2090 0x20a0>;
+ fsl,pinmux-ids = <
+ 0x2000 /* MX28_PAD_SSP0_DATA0__SSP0_D0 */
+ 0x2010 /* MX28_PAD_SSP0_DATA1__SSP0_D1 */
+ 0x2020 /* MX28_PAD_SSP0_DATA2__SSP0_D2 */
+ 0x2030 /* MX28_PAD_SSP0_DATA3__SSP0_D3 */
+ 0x2040 /* MX28_PAD_SSP0_DATA4__SSP0_D4 */
+ 0x2050 /* MX28_PAD_SSP0_DATA5__SSP0_D5 */
+ 0x2060 /* MX28_PAD_SSP0_DATA6__SSP0_D6 */
+ 0x2070 /* MX28_PAD_SSP0_DATA7__SSP0_D7 */
+ 0x2080 /* MX28_PAD_SSP0_CMD__SSP0_CMD */
+ 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
+ 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ >;
+ fsl,drive-strength = <1>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <1>;
+ };
+
+ mmc0_4bit_pins_a: mmc0-4bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x2000 /* MX28_PAD_SSP0_DATA0__SSP0_D0 */
+ 0x2010 /* MX28_PAD_SSP0_DATA1__SSP0_D1 */
+ 0x2020 /* MX28_PAD_SSP0_DATA2__SSP0_D2 */
+ 0x2030 /* MX28_PAD_SSP0_DATA3__SSP0_D3 */
+ 0x2080 /* MX28_PAD_SSP0_CMD__SSP0_CMD */
+ 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
+ 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ >;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
};
mmc0_cd_cfg: mmc0-cd-cfg {
- fsl,pinmux-ids = <0x2090>;
+ fsl,pinmux-ids = <
+ 0x2090 /* MX28_PAD_SSP0_DETECT__SSP0_CARD_DETECT */
+ >;
fsl,pull-up = <0>;
};
mmc0_sck_cfg: mmc0-sck-cfg {
- fsl,pinmux-ids = <0x20a0>;
+ fsl,pinmux-ids = <
+ 0x20a0 /* MX28_PAD_SSP0_SCK__SSP0_SCK */
+ >;
fsl,drive-strength = <2>;
fsl,pull-up = <0>;
};
i2c0_pins_a: i2c0@0 {
reg = <0>;
- fsl,pinmux-ids = <0x3180 0x3190>;
+ fsl,pinmux-ids = <
+ 0x3180 /* MX28_PAD_I2C0_SCL__I2C0_SCL */
+ 0x3190 /* MX28_PAD_I2C0_SDA__I2C0_SDA */
+ >;
fsl,drive-strength = <1>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
@@ -217,8 +412,12 @@
saif0_pins_a: saif0@0 {
reg = <0>;
- fsl,pinmux-ids =
- <0x3140 0x3150 0x3160 0x3170>;
+ fsl,pinmux-ids = <
+ 0x3140 /* MX28_PAD_SAIF0_MCLK__SAIF0_MCLK */
+ 0x3150 /* MX28_PAD_SAIF0_LRCLK__SAIF0_LRCLK */
+ 0x3160 /* MX28_PAD_SAIF0_BITCLK__SAIF0_BITCLK */
+ 0x3170 /* MX28_PAD_SAIF0_SDATA0__SAIF0_SDATA0 */
+ >;
fsl,drive-strength = <2>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
@@ -226,11 +425,88 @@
saif1_pins_a: saif1@0 {
reg = <0>;
- fsl,pinmux-ids = <0x31a0>;
+ fsl,pinmux-ids = <
+ 0x31a0 /* MX28_PAD_SAIF1_SDATA0__SAIF1_SDATA0 */
+ >;
fsl,drive-strength = <2>;
fsl,voltage = <1>;
fsl,pull-up = <1>;
};
+
+ pwm0_pins_a: pwm0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3100 /* MX28_PAD_PWM0__PWM_0 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ pwm2_pins_a: pwm2@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x3120 /* MX28_PAD_PWM2__PWM_2 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ lcdif_24bit_pins_a: lcdif-24bit@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x1000 /* MX28_PAD_LCD_D00__LCD_D0 */
+ 0x1010 /* MX28_PAD_LCD_D01__LCD_D1 */
+ 0x1020 /* MX28_PAD_LCD_D02__LCD_D2 */
+ 0x1030 /* MX28_PAD_LCD_D03__LCD_D3 */
+ 0x1040 /* MX28_PAD_LCD_D04__LCD_D4 */
+ 0x1050 /* MX28_PAD_LCD_D05__LCD_D5 */
+ 0x1060 /* MX28_PAD_LCD_D06__LCD_D6 */
+ 0x1070 /* MX28_PAD_LCD_D07__LCD_D7 */
+ 0x1080 /* MX28_PAD_LCD_D08__LCD_D8 */
+ 0x1090 /* MX28_PAD_LCD_D09__LCD_D9 */
+ 0x10a0 /* MX28_PAD_LCD_D10__LCD_D10 */
+ 0x10b0 /* MX28_PAD_LCD_D11__LCD_D11 */
+ 0x10c0 /* MX28_PAD_LCD_D12__LCD_D12 */
+ 0x10d0 /* MX28_PAD_LCD_D13__LCD_D13 */
+ 0x10e0 /* MX28_PAD_LCD_D14__LCD_D14 */
+ 0x10f0 /* MX28_PAD_LCD_D15__LCD_D15 */
+ 0x1100 /* MX28_PAD_LCD_D16__LCD_D16 */
+ 0x1110 /* MX28_PAD_LCD_D17__LCD_D17 */
+ 0x1120 /* MX28_PAD_LCD_D18__LCD_D18 */
+ 0x1130 /* MX28_PAD_LCD_D19__LCD_D19 */
+ 0x1140 /* MX28_PAD_LCD_D20__LCD_D20 */
+ 0x1150 /* MX28_PAD_LCD_D21__LCD_D21 */
+ 0x1160 /* MX28_PAD_LCD_D22__LCD_D22 */
+ 0x1170 /* MX28_PAD_LCD_D23__LCD_D23 */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ can0_pins_a: can0@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0161 /* MX28_PAD_GPMI_RDY2__CAN0_TX */
+ 0x0171 /* MX28_PAD_GPMI_RDY3__CAN0_RX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
+
+ can1_pins_a: can1@0 {
+ reg = <0>;
+ fsl,pinmux-ids = <
+ 0x0121 /* MX28_PAD_GPMI_CE2N__CAN1_TX */
+ 0x0131 /* MX28_PAD_GPMI_CE3N__CAN1_RX */
+ >;
+ fsl,drive-strength = <0>;
+ fsl,voltage = <1>;
+ fsl,pull-up = <0>;
+ };
};
digctl@8001c000 {
@@ -272,18 +548,21 @@
};
lcdif@80030000 {
+ compatible = "fsl,imx28-lcdif";
reg = <0x80030000 2000>;
interrupts = <38 86>;
status = "disabled";
};
can0: can@80032000 {
+ compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
reg = <0x80032000 2000>;
interrupts = <8>;
status = "disabled";
};
can1: can@80034000 {
+ compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan";
reg = <0x80034000 2000>;
interrupts = <9>;
status = "disabled";
@@ -370,9 +649,9 @@
};
rtc@80056000 {
+ compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc";
reg = <0x80056000 2000>;
- interrupts = <28 29>;
- status = "disabled";
+ interrupts = <29>;
};
i2c0: i2c@80058000 {
@@ -393,8 +672,11 @@
status = "disabled";
};
- pwm@80064000 {
+ pwm: pwm@80064000 {
+ compatible = "fsl,imx28-pwm", "fsl,imx23-pwm";
reg = <0x80064000 2000>;
+ #pwm-cells = <2>;
+ fsl,pwm-number = <8>;
status = "disabled";
};
@@ -404,30 +686,35 @@
};
auart0: serial@8006a000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x8006a000 0x2000>;
interrupts = <112 70 71>;
status = "disabled";
};
auart1: serial@8006c000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x8006c000 0x2000>;
interrupts = <113 72 73>;
status = "disabled";
};
auart2: serial@8006e000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x8006e000 0x2000>;
interrupts = <114 74 75>;
status = "disabled";
};
auart3: serial@80070000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x80070000 0x2000>;
interrupts = <115 76 77>;
status = "disabled";
};
auart4: serial@80072000 {
+ compatible = "fsl,imx28-auart", "fsl,imx23-auart";
reg = <0x80072000 0x2000>;
interrupts = <116 78 79>;
status = "disabled";
@@ -441,11 +728,13 @@
};
usbphy0: usbphy@8007c000 {
+ compatible = "fsl,imx28-usbphy", "fsl,imx23-usbphy";
reg = <0x8007c000 0x2000>;
status = "disabled";
};
usbphy1: usbphy@8007e000 {
+ compatible = "fsl,imx28-usbphy", "fsl,imx23-usbphy";
reg = <0x8007e000 0x2000>;
status = "disabled";
};
@@ -459,13 +748,19 @@
reg = <0x80080000 0x80000>;
ranges;
- usbctrl0: usbctrl@80080000 {
+ usb0: usb@80080000 {
+ compatible = "fsl,imx28-usb", "fsl,imx27-usb";
reg = <0x80080000 0x10000>;
+ interrupts = <93>;
+ fsl,usbphy = <&usbphy0>;
status = "disabled";
};
- usbctrl1: usbctrl@80090000 {
+ usb1: usb@80090000 {
+ compatible = "fsl,imx28-usb", "fsl,imx27-usb";
reg = <0x80090000 0x10000>;
+ interrupts = <92>;
+ fsl,usbphy = <&usbphy1>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx31-bug.dts b/arch/arm/boot/dts/imx31-bug.dts
new file mode 100644
index 000000000000..24731cb78e8e
--- /dev/null
+++ b/arch/arm/boot/dts/imx31-bug.dts
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2012 Denis 'GNUtoo' Carikli <GNUtoo@no-log.org>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/dts-v1/;
+/include/ "imx31.dtsi"
+
+/ {
+ model = "Buglabs i.MX31 Bug 1.x";
+ compatible = "fsl,imx31-bug", "fsl,imx31";
+
+ memory {
+ reg = <0x80000000 0x8000000>; /* 128M */
+ };
+
+ soc {
+ aips@43f00000 { /* AIPS1 */
+ uart5: serial@43fb4000 {
+ fsl,uart-has-rtscts;
+ status = "okay";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi
new file mode 100644
index 000000000000..eef7099f3e3c
--- /dev/null
+++ b/arch/arm/boot/dts/imx31.dtsi
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2012 Denis 'GNUtoo' Carikli <GNUtoo@no-log.org>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ };
+
+ avic: avic-interrupt-controller@60000000 {
+ compatible = "fsl,imx31-avic", "fsl,avic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x60000000 0x100000>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ interrupt-parent = <&avic>;
+ ranges;
+
+ aips@43f00000 { /* AIPS1 */
+ compatible = "fsl,aips-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x43f00000 0x100000>;
+ ranges;
+
+ uart1: serial@43f90000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x43f90000 0x4000>;
+ interrupts = <45>;
+ status = "disabled";
+ };
+
+ uart2: serial@43f94000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x43f94000 0x4000>;
+ interrupts = <32>;
+ status = "disabled";
+ };
+
+ uart4: serial@43fb0000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x43fb0000 0x4000>;
+ interrupts = <46>;
+ status = "disabled";
+ };
+
+ uart5: serial@43fb4000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x43fb4000 0x4000>;
+ interrupts = <47>;
+ status = "disabled";
+ };
+ };
+
+ spba@50000000 {
+ compatible = "fsl,spba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x50000000 0x100000>;
+ ranges;
+
+ uart3: serial@5000c000 {
+ compatible = "fsl,imx31-uart", "fsl,imx21-uart";
+ reg = <0x5000c000 0x4000>;
+ interrupts = <18>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index bfa65abe8ef2..922adefdd291 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -133,7 +133,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio2: gpio@73f88000 {
@@ -143,7 +143,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio3: gpio@73f8c000 {
@@ -153,7 +153,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio4: gpio@73f90000 {
@@ -163,7 +163,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
wdog@73f98000 { /* WDOG1 */
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index e3e869470cd3..4e735edc78ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -135,7 +135,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio2: gpio@53f88000 {
@@ -145,7 +145,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio3: gpio@53f8c000 {
@@ -155,7 +155,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio4: gpio@53f90000 {
@@ -165,7 +165,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
wdog@53f98000 { /* WDOG1 */
@@ -203,7 +203,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio6: gpio@53fe0000 {
@@ -213,7 +213,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio7: gpio@53fe4000 {
@@ -223,7 +223,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
i2c@53fec000 { /* I2C3 */
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index db4c6096c562..d792581672cc 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -22,6 +22,12 @@
};
soc {
+ gpmi-nand@00112000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpmi_nand_1>;
+ status = "disabled"; /* gpmi nand conflicts with SD */
+ };
+
aips-bus@02100000 { /* AIPS2 */
ethernet@02188000 {
phy-mode = "rgmii";
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index e0ec92973e7e..d42e851ceb97 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -27,6 +27,8 @@
ecspi@02008000 { /* eCSPI1 */
fsl,spi-num-chipselects = <1>;
cs-gpios = <&gpio3 19 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ecspi1_1>;
status = "okay";
flash: m25p80@0 {
@@ -42,9 +44,31 @@
};
};
+ iomuxc@020e0000 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_hog>;
+
+ gpios {
+ pinctrl_gpio_hog: gpiohog {
+ fsl,pins = <
+ 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */
+ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */
+ >;
+ };
+ };
+ };
};
aips-bus@02100000 { /* AIPS2 */
+ usb@02184000 { /* USB OTG */
+ vbus-supply = <&reg_usb_otg_vbus>;
+ status = "okay";
+ };
+
+ usb@02184200 { /* USB1 */
+ status = "okay";
+ };
+
ethernet@02188000 {
phy-mode = "rgmii";
phy-reset-gpios = <&gpio3 23 0>;
@@ -111,6 +135,15 @@
regulator-max-microvolt = <3300000>;
regulator-always-on;
};
+
+ reg_usb_otg_vbus: usb_otg_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_otg_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio3 22 0>;
+ enable-active-high;
+ };
};
sound {
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 8c90cbac945f..c25d49584814 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -87,6 +87,23 @@
interrupt-parent = <&intc>;
ranges;
+ dma-apbh@00110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+ };
+
+ gpmi-nand@00112000 {
+ compatible = "fsl,imx6q-gpmi-nand";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0x00112000 0x2000>, <0x00114000 0x2000>;
+ reg-names = "gpmi-nand", "bch";
+ interrupts = <0 13 0x04>, <0 15 0x04>;
+ interrupt-names = "gpmi-dma", "bch";
+ fsl,gpmi-dma-channel = <0>;
+ status = "disabled";
+ };
+
timer@00a00600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
@@ -266,7 +283,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio2: gpio@020a0000 {
@@ -276,7 +293,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio3: gpio@020a4000 {
@@ -286,7 +303,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio4: gpio@020a8000 {
@@ -296,7 +313,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio5: gpio@020ac000 {
@@ -306,7 +323,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio6: gpio@020b0000 {
@@ -316,7 +333,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
gpio7: gpio@020b4000 {
@@ -326,7 +343,7 @@
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
- #interrupt-cells = <1>;
+ #interrupt-cells = <2>;
};
kpp@020b8000 {
@@ -444,12 +461,14 @@
};
};
- usbphy@020c9000 { /* USBPHY1 */
+ usbphy1: usbphy@020c9000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
reg = <0x020c9000 0x1000>;
interrupts = <0 44 0x04>;
};
- usbphy@020ca000 { /* USBPHY2 */
+ usbphy2: usbphy@020ca000 {
+ compatible = "fsl,imx6q-usbphy", "fsl,imx23-usbphy";
reg = <0x020ca000 0x1000>;
interrupts = <0 45 0x04>;
};
@@ -495,6 +514,30 @@
};
};
+ gpmi-nand {
+ pinctrl_gpmi_nand_1: gpmi-nand-1 {
+ fsl,pins = <1328 0xb0b1 /* MX6Q_PAD_NANDF_CLE__RAWNAND_CLE */
+ 1336 0xb0b1 /* MX6Q_PAD_NANDF_ALE__RAWNAND_ALE */
+ 1344 0xb0b1 /* MX6Q_PAD_NANDF_WP_B__RAWNAND_RESETN */
+ 1352 0xb000 /* MX6Q_PAD_NANDF_RB0__RAWNAND_READY0 */
+ 1360 0xb0b1 /* MX6Q_PAD_NANDF_CS0__RAWNAND_CE0N */
+ 1365 0xb0b1 /* MX6Q_PAD_NANDF_CS1__RAWNAND_CE1N */
+ 1371 0xb0b1 /* MX6Q_PAD_NANDF_CS2__RAWNAND_CE2N */
+ 1378 0xb0b1 /* MX6Q_PAD_NANDF_CS3__RAWNAND_CE3N */
+ 1387 0xb0b1 /* MX6Q_PAD_SD4_CMD__RAWNAND_RDN */
+ 1393 0xb0b1 /* MX6Q_PAD_SD4_CLK__RAWNAND_WRN */
+ 1397 0xb0b1 /* MX6Q_PAD_NANDF_D0__RAWNAND_D0 */
+ 1405 0xb0b1 /* MX6Q_PAD_NANDF_D1__RAWNAND_D1 */
+ 1413 0xb0b1 /* MX6Q_PAD_NANDF_D2__RAWNAND_D2 */
+ 1421 0xb0b1 /* MX6Q_PAD_NANDF_D3__RAWNAND_D3 */
+ 1429 0xb0b1 /* MX6Q_PAD_NANDF_D4__RAWNAND_D4 */
+ 1437 0xb0b1 /* MX6Q_PAD_NANDF_D5__RAWNAND_D5 */
+ 1445 0xb0b1 /* MX6Q_PAD_NANDF_D6__RAWNAND_D6 */
+ 1453 0xb0b1 /* MX6Q_PAD_NANDF_D7__RAWNAND_D7 */
+ 1463 0x00b1>; /* MX6Q_PAD_SD4_DAT0__RAWNAND_DQS */
+ };
+ };
+
i2c1 {
pinctrl_i2c1_1: i2c1grp-1 {
fsl,pins = <137 0x4001b8b1 /* MX6Q_PAD_EIM_D21__I2C1_SCL */
@@ -538,6 +581,14 @@
1517 0x17059>; /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
};
};
+
+ ecspi1 {
+ pinctrl_ecspi1_1: ecspi1grp-1 {
+ fsl,pins = <101 0x100b1 /* MX6Q_PAD_EIM_D17__ECSPI1_MISO */
+ 109 0x100b1 /* MX6Q_PAD_EIM_D18__ECSPI1_MOSI */
+ 94 0x100b1>; /* MX6Q_PAD_EIM_D16__ECSPI1_SCLK */
+ };
+ };
};
dcic@020e4000 { /* DCIC1 */
@@ -573,6 +624,36 @@
reg = <0x0217c000 0x4000>;
};
+ usb@02184000 { /* USB OTG */
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184000 0x200>;
+ interrupts = <0 43 0x04>;
+ fsl,usbphy = <&usbphy1>;
+ status = "disabled";
+ };
+
+ usb@02184200 { /* USB1 */
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184200 0x200>;
+ interrupts = <0 40 0x04>;
+ fsl,usbphy = <&usbphy2>;
+ status = "disabled";
+ };
+
+ usb@02184400 { /* USB2 */
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184400 0x200>;
+ interrupts = <0 41 0x04>;
+ status = "disabled";
+ };
+
+ usb@02184600 { /* USB3 */
+ compatible = "fsl,imx6q-usb", "fsl,imx27-usb";
+ reg = <0x02184600 0x200>;
+ interrupts = <0 42 0x04>;
+ status = "disabled";
+ };
+
ethernet@02188000 {
compatible = "fsl,imx6q-fec";
reg = <0x02188000 0x4000>;
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 3f5dad801a98..e5ffe960dbf3 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -35,13 +35,14 @@
slc: flash@20020000 {
compatible = "nxp,lpc3220-slc";
reg = <0x20020000 0x1000>;
- status = "disable";
+ status = "disabled";
};
- mlc: flash@200B0000 {
+ mlc: flash@200a8000 {
compatible = "nxp,lpc3220-mlc";
- reg = <0x200B0000 0x1000>;
- status = "disable";
+ reg = <0x200a8000 0x11000>;
+ interrupts = <11 0>;
+ status = "disabled";
};
dma@31000000 {
@@ -57,21 +58,21 @@
compatible = "nxp,ohci-nxp", "usb-ohci";
reg = <0x31020000 0x300>;
interrupts = <0x3b 0>;
- status = "disable";
+ status = "disabled";
};
usbd@31020000 {
compatible = "nxp,lpc3220-udc";
reg = <0x31020000 0x300>;
interrupts = <0x3d 0>, <0x3e 0>, <0x3c 0>, <0x3a 0>;
- status = "disable";
+ status = "disabled";
};
clcd@31040000 {
compatible = "arm,pl110", "arm,primecell";
reg = <0x31040000 0x1000>;
interrupts = <0x0e 0>;
- status = "disable";
+ status = "disabled";
};
mac: ethernet@31060000 {
@@ -114,9 +115,10 @@
};
sd@20098000 {
- compatible = "arm,pl180", "arm,primecell";
+ compatible = "arm,pl18x", "arm,primecell";
reg = <0x20098000 0x1000>;
interrupts = <0x0f 0>, <0x0d 0>;
+ status = "disabled";
};
i2s1: i2s@2009C000 {
@@ -124,24 +126,42 @@
reg = <0x2009C000 0x1000>;
};
+ /* UART5 first since it is the default console, ttyS0 */
+ uart5: serial@40090000 {
+ /* actually, ns16550a w/ 64 byte fifos! */
+ compatible = "nxp,lpc3220-uart";
+ reg = <0x40090000 0x1000>;
+ interrupts = <9 0>;
+ clock-frequency = <13000000>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+
uart3: serial@40080000 {
- compatible = "nxp,serial";
+ compatible = "nxp,lpc3220-uart";
reg = <0x40080000 0x1000>;
+ interrupts = <7 0>;
+ clock-frequency = <13000000>;
+ reg-shift = <2>;
+ status = "disabled";
};
uart4: serial@40088000 {
- compatible = "nxp,serial";
+ compatible = "nxp,lpc3220-uart";
reg = <0x40088000 0x1000>;
- };
-
- uart5: serial@40090000 {
- compatible = "nxp,serial";
- reg = <0x40090000 0x1000>;
+ interrupts = <8 0>;
+ clock-frequency = <13000000>;
+ reg-shift = <2>;
+ status = "disabled";
};
uart6: serial@40098000 {
- compatible = "nxp,serial";
+ compatible = "nxp,lpc3220-uart";
reg = <0x40098000 0x1000>;
+ interrupts = <10 0>;
+ clock-frequency = <13000000>;
+ reg-shift = <2>;
+ status = "disabled";
};
i2c1: i2c@400A0000 {
@@ -192,18 +212,24 @@
};
uart1: serial@40014000 {
- compatible = "nxp,serial";
+ compatible = "nxp,lpc3220-hsuart";
reg = <0x40014000 0x1000>;
+ interrupts = <26 0>;
+ status = "disabled";
};
uart2: serial@40018000 {
- compatible = "nxp,serial";
+ compatible = "nxp,lpc3220-hsuart";
reg = <0x40018000 0x1000>;
+ interrupts = <25 0>;
+ status = "disabled";
};
- uart7: serial@4001C000 {
- compatible = "nxp,serial";
- reg = <0x4001C000 0x1000>;
+ uart7: serial@4001c000 {
+ compatible = "nxp,lpc3220-hsuart";
+ reg = <0x4001c000 0x1000>;
+ interrupts = <24 0>;
+ status = "disabled";
};
rtc@40024000 {
@@ -235,21 +261,28 @@
compatible = "nxp,lpc3220-adc";
reg = <0x40048000 0x1000>;
interrupts = <0x27 0>;
- status = "disable";
+ status = "disabled";
};
tsc@40048000 {
compatible = "nxp,lpc3220-tsc";
reg = <0x40048000 0x1000>;
interrupts = <0x27 0>;
- status = "disable";
+ status = "disabled";
};
key@40050000 {
compatible = "nxp,lpc3220-key";
reg = <0x40050000 0x1000>;
+ interrupts = <54 0>;
+ status = "disabled";
};
+ pwm: pwm@4005C000 {
+ compatible = "nxp,lpc3220-pwm";
+ reg = <0x4005C000 0x8>;
+ status = "disabled";
+ };
};
};
};
diff --git a/arch/arm/boot/dts/omap2420-h4.dts b/arch/arm/boot/dts/omap2420-h4.dts
new file mode 100644
index 000000000000..25b50b759dec
--- /dev/null
+++ b/arch/arm/boot/dts/omap2420-h4.dts
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "omap2.dtsi"
+
+/ {
+ model = "TI OMAP2420 H4 board";
+ compatible = "ti,omap2420-h4", "ti,omap2420", "ti,omap2";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x84000000>; /* 64 MB */
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 5b4506c0a8c4..cdcb98c7e075 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -61,9 +61,9 @@
};
&mmc2 {
- status = "disable";
+ status = "disabled";
};
&mmc3 {
- status = "disable";
+ status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap3-evm.dts b/arch/arm/boot/dts/omap3-evm.dts
index 2eee16ec59b4..f349ee9182ce 100644
--- a/arch/arm/boot/dts/omap3-evm.dts
+++ b/arch/arm/boot/dts/omap3-evm.dts
@@ -18,3 +18,31 @@
reg = <0x80000000 0x10000000>; /* 256 MB */
};
};
+
+&i2c1 {
+ clock-frequency = <2600000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ interrupts = <7>; /* SYS_NIRQ cascaded to intc */
+ interrupt-parent = <&intc>;
+ };
+};
+
+/include/ "twl4030.dtsi"
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ /*
+ * TVP5146 Video decoder-in for analog input support.
+ */
+ tvp5146@5c {
+ compatible = "ti,tvp5146m2";
+ reg = <0x5c>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 99474fa5fac4..810947198208 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -215,5 +215,10 @@
compatible = "ti,omap3-hsmmc";
ti,hwmods = "mmc3";
};
+
+ wdt2: wdt@48314000 {
+ compatible = "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+ };
};
};
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index 1efe0c587985..9880c12877b3 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -32,6 +32,30 @@
linux,default-trigger = "mmc0";
};
};
+
+ sound: sound {
+ compatible = "ti,abe-twl6040";
+ ti,model = "PandaBoard";
+
+ ti,mclk-freq = <38400000>;
+
+ ti,mcpdm = <&mcpdm>;
+
+ ti,twl6040 = <&twl6040>;
+
+ /* Audio routing */
+ ti,audio-routing =
+ "Headset Stereophone", "HSOL",
+ "Headset Stereophone", "HSOR",
+ "Ext Spk", "HFL",
+ "Ext Spk", "HFR",
+ "Line Out", "AUXL",
+ "Line Out", "AUXR",
+ "HSMIC", "Headset Mic",
+ "Headset Mic", "Headset Mic Bias",
+ "AFML", "Line In",
+ "AFMR", "Line In";
+ };
};
&i2c1 {
@@ -43,6 +67,19 @@
interrupts = <0 7 4>; /* IRQ_SYS_1N cascaded to gic */
interrupt-parent = <&gic>;
};
+
+ twl6040: twl@4b {
+ compatible = "ti,twl6040";
+ reg = <0x4b>;
+ /* SPI = 0, IRQ# = 119, 4 = active high level-sensitive */
+ interrupts = <0 119 4>; /* IRQ_SYS_2N cascaded to gic */
+ interrupt-parent = <&gic>;
+ ti,audpwron-gpio = <&gpio4 31 0>; /* gpio line 127 */
+
+ vio-supply = <&v1v8>;
+ v2v1-supply = <&v2v1>;
+ enable-active-high;
+ };
};
/include/ "twl6030.dtsi"
@@ -74,15 +111,15 @@
};
&mmc2 {
- status = "disable";
+ status = "disabled";
};
&mmc3 {
- status = "disable";
+ status = "disabled";
};
&mmc4 {
- status = "disable";
+ status = "disabled";
};
&mmc5 {
diff --git a/arch/arm/boot/dts/omap4-pandaES.dts b/arch/arm/boot/dts/omap4-pandaES.dts
new file mode 100644
index 000000000000..d4ba43a48d9b
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-pandaES.dts
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/include/ "omap4-panda.dts"
+
+/* Audio routing is differnet between PandaBoard4430 and PandaBoardES */
+&sound {
+ ti,model = "PandaBoardES";
+
+ /* Audio routing */
+ ti,audio-routing =
+ "Headset Stereophone", "HSOL",
+ "Headset Stereophone", "HSOR",
+ "Ext Spk", "HFL",
+ "Ext Spk", "HFR",
+ "Line Out", "AUXL",
+ "Line Out", "AUXR",
+ "AFML", "Line In",
+ "AFMR", "Line In";
+};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index d08c4d137280..72216e932fc0 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -28,6 +28,14 @@
regulator-boot-on;
};
+ vbat: fixedregulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "VBAT";
+ regulator-min-microvolt = <3750000>;
+ regulator-max-microvolt = <3750000>;
+ regulator-boot-on;
+ };
+
leds {
compatible = "gpio-leds";
debug0 {
@@ -70,6 +78,41 @@
gpios = <&gpio5 11 0>; /* 139 */
};
};
+
+ sound {
+ compatible = "ti,abe-twl6040";
+ ti,model = "SDP4430";
+
+ ti,jack-detection = <1>;
+ ti,mclk-freq = <38400000>;
+
+ ti,mcpdm = <&mcpdm>;
+ ti,dmic = <&dmic>;
+
+ ti,twl6040 = <&twl6040>;
+
+ /* Audio routing */
+ ti,audio-routing =
+ "Headset Stereophone", "HSOL",
+ "Headset Stereophone", "HSOR",
+ "Earphone Spk", "EP",
+ "Ext Spk", "HFL",
+ "Ext Spk", "HFR",
+ "Line Out", "AUXL",
+ "Line Out", "AUXR",
+ "Vibrator", "VIBRAL",
+ "Vibrator", "VIBRAR",
+ "HSMIC", "Headset Mic",
+ "Headset Mic", "Headset Mic Bias",
+ "MAINMIC", "Main Handset Mic",
+ "Main Handset Mic", "Main Mic Bias",
+ "SUBMIC", "Sub Handset Mic",
+ "Sub Handset Mic", "Main Mic Bias",
+ "AFML", "Line In",
+ "AFMR", "Line In",
+ "DMic", "Digital Mic",
+ "Digital Mic", "Digital Mic1 Bias";
+ };
};
&i2c1 {
@@ -81,6 +124,31 @@
interrupts = <0 7 4>; /* IRQ_SYS_1N cascaded to gic */
interrupt-parent = <&gic>;
};
+
+ twl6040: twl@4b {
+ compatible = "ti,twl6040";
+ reg = <0x4b>;
+ /* SPI = 0, IRQ# = 119, 4 = active high level-sensitive */
+ interrupts = <0 119 4>; /* IRQ_SYS_2N cascaded to gic */
+ interrupt-parent = <&gic>;
+ ti,audpwron-gpio = <&gpio4 31 0>; /* gpio line 127 */
+
+ vio-supply = <&v1v8>;
+ v2v1-supply = <&v2v1>;
+ enable-active-high;
+
+ /* regulators for vibra motor */
+ vddvibl-supply = <&vbat>;
+ vddvibr-supply = <&vbat>;
+
+ vibra {
+ /* Vibra driver, motor resistance parameters */
+ ti,vibldrv-res = <8>;
+ ti,vibrdrv-res = <3>;
+ ti,viblmotor-res = <10>;
+ ti,vibrmotor-res = <10>;
+ };
+ };
};
/include/ "twl6030.dtsi"
@@ -147,11 +215,11 @@
};
&mmc3 {
- status = "disable";
+ status = "disabled";
};
&mmc4 {
- status = "disable";
+ status = "disabled";
};
&mmc5 {
diff --git a/arch/arm/boot/dts/omap4-var_som.dts b/arch/arm/boot/dts/omap4-var_som.dts
new file mode 100644
index 000000000000..6601e6af6092
--- /dev/null
+++ b/arch/arm/boot/dts/omap4-var_som.dts
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012 Variscite Ltd. - http://www.variscite.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "omap4.dtsi"
+
+/ {
+ model = "Variscite OMAP4 SOM";
+ compatible = "var,omap4-var_som", "ti,omap4430", "ti,omap4";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+ };
+
+ vdd_eth: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "VDD_ETH";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+};
+
+&i2c1 {
+ clock-frequency = <400000>;
+
+ twl: twl@48 {
+ reg = <0x48>;
+ /* SPI = 0, IRQ# = 7, 4 = active high level-sensitive */
+ interrupts = <0 7 4>; /* IRQ_SYS_1N cascaded to gic */
+ interrupt-parent = <&gic>;
+ };
+};
+
+/include/ "twl6030.dtsi"
+
+&i2c2 {
+ clock-frequency = <400000>;
+};
+
+&i2c3 {
+ clock-frequency = <400000>;
+
+ /*
+ * Temperature Sensor
+ * http://www.ti.com/lit/ds/symlink/tmp105.pdf
+ */
+ tmp105@49 {
+ compatible = "ti,tmp105";
+ reg = <0x49>;
+ };
+};
+
+&i2c4 {
+ clock-frequency = <400000>;
+};
+
+&mcspi1 {
+ eth@0 {
+ compatible = "ks8851";
+ spi-max-frequency = <24000000>;
+ reg = <0>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <11>; /* gpio line 171 */
+ vdd-supply = <&vdd_eth>;
+ };
+};
+
+&mmc1 {
+ vmmc-supply = <&vmmc>;
+ ti,bus-width = <8>;
+ ti,non-removable;
+};
+
+&mmc2 {
+ status = "disabled";
+};
+
+&mmc3 {
+ status = "disabled";
+};
+
+&mmc4 {
+ status = "disabled";
+};
+
+&mmc5 {
+ ti,bus-width = <4>;
+};
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 359c4979c8aa..04cbbcb6ff91 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -272,5 +272,28 @@
ti,hwmods = "mmc5";
ti,needs-special-reset;
};
+
+ wdt2: wdt@4a314000 {
+ compatible = "ti,omap4-wdt", "ti,omap3-wdt";
+ ti,hwmods = "wd_timer2";
+ };
+
+ mcpdm: mcpdm@40132000 {
+ compatible = "ti,omap4-mcpdm";
+ reg = <0x40132000 0x7f>, /* MPU private access */
+ <0x49032000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 112 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "mcpdm";
+ };
+
+ dmic: dmic@4012e000 {
+ compatible = "ti,omap4-dmic";
+ reg = <0x4012e000 0x7f>, /* MPU private access */
+ <0x4902e000 0x7f>; /* L3 Interconnect */
+ interrupts = <0 114 0x4>;
+ interrupt-parent = <&gic>;
+ ti,hwmods = "dmic";
+ };
};
};
diff --git a/arch/arm/boot/dts/omap5-evm.dts b/arch/arm/boot/dts/omap5-evm.dts
new file mode 100644
index 000000000000..200c39ad1c82
--- /dev/null
+++ b/arch/arm/boot/dts/omap5-evm.dts
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "omap5.dtsi"
+
+/ {
+ model = "TI OMAP5 EVM board";
+ compatible = "ti,omap5-evm", "ti,omap5";
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x40000000>; /* 1 GB */
+ };
+};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
new file mode 100644
index 000000000000..57e527083746
--- /dev/null
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ * Based on "omap4.dtsi"
+ */
+
+/*
+ * Carveout for multimedia usecases
+ * It should be the last 48MB of the first 512MB memory part
+ * In theory, it should not even exist. That zone should be reserved
+ * dynamically during the .reserve callback.
+ */
+/memreserve/ 0x9d000000 0x03000000;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "ti,omap5";
+ interrupt-parent = <&gic>;
+
+ aliases {
+ serial0 = &uart1;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ serial5 = &uart6;
+ };
+
+ cpus {
+ cpu@0 {
+ compatible = "arm,cortex-a15";
+ };
+ cpu@1 {
+ compatible = "arm,cortex-a15";
+ };
+ };
+
+ /*
+ * The soc node represents the soc top level view. It is uses for IPs
+ * that are not memory mapped in the MPU view or for the MPU itself.
+ */
+ soc {
+ compatible = "ti,omap-infra";
+ mpu {
+ compatible = "ti,omap5-mpu";
+ ti,hwmods = "mpu";
+ };
+ };
+
+ /*
+ * XXX: Use a flat representation of the OMAP3 interconnect.
+ * The real OMAP interconnect network is quite complex.
+ * Since that will not bring real advantage to represent that in DT for
+ * the moment, just use a fake OCP bus entry to represent the whole bus
+ * hierarchy.
+ */
+ ocp {
+ compatible = "ti,omap4-l3-noc", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
+
+ gic: interrupt-controller@48211000 {
+ compatible = "arm,cortex-a15-gic";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ reg = <0x48211000 0x1000>,
+ <0x48212000 0x1000>;
+ };
+
+ gpio1: gpio@4ae10000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio1";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio2: gpio@48055000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio2";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio3: gpio@48057000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio3";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio4: gpio@48059000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio4";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio5: gpio@4805b000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio5";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio6: gpio@4805d000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio6";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio7: gpio@48051000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio7";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ gpio8: gpio@48053000 {
+ compatible = "ti,omap4-gpio";
+ ti,hwmods = "gpio8";
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ uart1: serial@4806a000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart1";
+ clock-frequency = <48000000>;
+ };
+
+ uart2: serial@4806c000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart2";
+ clock-frequency = <48000000>;
+ };
+
+ uart3: serial@48020000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart3";
+ clock-frequency = <48000000>;
+ };
+
+ uart4: serial@4806e000 {
+ compatible = "ti,omap4-uart";
+ ti,hwmods = "uart4";
+ clock-frequency = <48000000>;
+ };
+
+ uart5: serial@48066000 {
+ compatible = "ti,omap5-uart";
+ ti,hwmods = "uart5";
+ clock-frequency = <48000000>;
+ };
+
+ uart6: serial@48068000 {
+ compatible = "ti,omap6-uart";
+ ti,hwmods = "uart6";
+ clock-frequency = <48000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
index c4ff6d1a018b..802ec5b2fd00 100644
--- a/arch/arm/boot/dts/phy3250.dts
+++ b/arch/arm/boot/dts/phy3250.dts
@@ -54,6 +54,17 @@
#address-cells = <1>;
#size-cells = <1>;
+ nxp,wdr-clks = <14>;
+ nxp,wwidth = <40000000>;
+ nxp,whold = <100000000>;
+ nxp,wsetup = <100000000>;
+ nxp,rdr-clks = <14>;
+ nxp,rwidth = <40000000>;
+ nxp,rhold = <66666666>;
+ nxp,rsetup = <100000000>;
+ nand-on-flash-bbt;
+ gpios = <&gpio 5 19 1>; /* GPO_P3 19, active low */
+
mtd0@00000000 {
label = "phy3250-boot";
reg = <0x00000000 0x00064000>;
@@ -83,6 +94,14 @@
};
apb {
+ uart5: serial@40090000 {
+ status = "okay";
+ };
+
+ uart3: serial@40080000 {
+ status = "okay";
+ };
+
i2c1: i2c@400A0000 {
clock-frequency = <100000>;
@@ -114,16 +133,58 @@
};
ssp0: ssp@20084000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pl022,num-chipselects = <1>;
+ cs-gpios = <&gpio 3 5 0>;
+
eeprom: at25@0 {
+ pl022,hierarchy = <0>;
+ pl022,interface = <0>;
+ pl022,slave-tx-disable = <0>;
+ pl022,com-mode = <0>;
+ pl022,rx-level-trig = <1>;
+ pl022,tx-level-trig = <1>;
+ pl022,ctrl-len = <11>;
+ pl022,wait-state = <0>;
+ pl022,duplex = <0>;
+
+ at25,byte-len = <0x8000>;
+ at25,addr-mode = <2>;
+ at25,page-size = <64>;
+
compatible = "atmel,at25";
+ reg = <0>;
+ spi-max-frequency = <5000000>;
};
};
+
+ sd@20098000 {
+ wp-gpios = <&gpio 3 0 0>;
+ cd-gpios = <&gpio 3 1 0>;
+ cd-inverted;
+ bus-width = <4>;
+ status = "okay";
+ };
};
fab {
+ uart2: serial@40018000 {
+ status = "okay";
+ };
+
tsc@40048000 {
status = "okay";
};
+
+ key@40050000 {
+ status = "okay";
+ keypad,num-rows = <1>;
+ keypad,num-columns = <1>;
+ nxp,debounce-delay-ms = <3>;
+ nxp,scan-delay-ms = <34>;
+ linux,keymap = <0x00000002>;
+ };
};
};
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index ec3c33975110..7e334d4cae21 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -77,6 +77,8 @@
used-led {
label = "user_led";
gpios = <&gpio4 14 0x4>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
};
};
@@ -101,15 +103,30 @@
};
};
+ // External Micro SD slot
sdi@80126000 {
- status = "enabled";
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
+
+ #gpio-cells = <1>;
cd-gpios = <&gpio6 26 0x4>; // 218
+ cd-inverted;
+
+ status = "okay";
};
+ // On-board eMMC
sdi@80114000 {
- status = "enabled";
+ arm,primecell-periphid = <0x10480180>;
+ max-frequency = <50000000>;
+ bus-width = <8>;
+ mmc-cap-mmc-highspeed;
vmmc-supply = <&ab8500_ldo_aux2_reg>;
+
+ status = "okay";
};
uart@80120000 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
new file mode 100644
index 000000000000..0772f5739f59
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2012 Altera <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/include/ "skeleton.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ aliases {
+ ethernet0 = &gmac0;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <0>;
+ next-level-cache = <&L2>;
+ };
+ cpu@1 {
+ compatible = "arm,cortex-a9";
+ device_type = "cpu";
+ reg = <1>;
+ next-level-cache = <&L2>;
+ };
+ };
+
+ intc: intc@fffed000 {
+ compatible = "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0xfffed000 0x1000>,
+ <0xfffec100 0x100>;
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+ device_type = "soc";
+ interrupt-parent = <&intc>;
+ ranges;
+
+ amba {
+ compatible = "arm,amba-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ pdma: pdma@ffe01000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0xffe01000 0x1000>;
+ interrupts = <0 180 4>;
+ };
+ };
+
+ gmac0: stmmac@ff700000 {
+ compatible = "altr,socfpga-stmmac", "snps,dwmac-3.70a", "snps,dwmac";
+ reg = <0xff700000 0x2000>;
+ interrupts = <0 115 4>;
+ interrupt-names = "macirq";
+ mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
+ phy-mode = "gmii";
+ };
+
+ L2: l2-cache@fffef000 {
+ compatible = "arm,pl310-cache";
+ reg = <0xfffef000 0x1000>;
+ interrupts = <0 38 0x04>;
+ cache-unified;
+ cache-level = <2>;
+ };
+
+ /* Local timer */
+ timer@fffec600 {
+ compatible = "arm,cortex-a9-twd-timer";
+ reg = <0xfffec600 0x100>;
+ interrupts = <1 13 0xf04>;
+ };
+
+ timer0: timer@ffc08000 {
+ compatible = "snps,dw-apb-timer-sp";
+ interrupts = <0 167 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffc08000 0x1000>;
+ };
+
+ timer1: timer@ffc09000 {
+ compatible = "snps,dw-apb-timer-sp";
+ interrupts = <0 168 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffc09000 0x1000>;
+ };
+
+ timer2: timer@ffd00000 {
+ compatible = "snps,dw-apb-timer-osc";
+ interrupts = <0 169 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffd00000 0x1000>;
+ };
+
+ timer3: timer@ffd01000 {
+ compatible = "snps,dw-apb-timer-osc";
+ interrupts = <0 170 4>;
+ clock-frequency = <200000000>;
+ reg = <0xffd01000 0x1000>;
+ };
+
+ uart0: uart@ffc02000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xffc02000 0x1000>;
+ clock-frequency = <7372800>;
+ interrupts = <0 162 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+
+ uart1: uart@ffc03000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xffc03000 0x1000>;
+ clock-frequency = <7372800>;
+ interrupts = <0 163 4>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5.dts b/arch/arm/boot/dts/socfpga_cyclone5.dts
new file mode 100644
index 000000000000..ab7e4a94299f
--- /dev/null
+++ b/arch/arm/boot/dts/socfpga_cyclone5.dts
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/dts-v1/;
+/include/ "socfpga.dtsi"
+
+/ {
+ model = "Altera SOCFPGA Cyclone V";
+ compatible = "altr,socfpga-cyclone5";
+
+ chosen {
+ bootargs = "console=ttyS0,57600";
+ };
+
+ memory {
+ name = "memory";
+ device_type = "memory";
+ reg = <0x0 0x10000000>; /* 256MB */
+ };
+};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 10dcec7e7321..f7b84aced654 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -43,8 +43,8 @@
pmu {
compatible = "arm,cortex-a9-pmu";
- interrupts = <0 8 0x04
- 0 9 0x04>;
+ interrupts = <0 6 0x04
+ 0 7 0x04>;
};
L2: l2-cache {
@@ -119,8 +119,8 @@
gmac0: eth@e2000000 {
compatible = "st,spear600-gmac";
reg = <0xe2000000 0x8000>;
- interrupts = <0 23 0x4
- 0 24 0x4>;
+ interrupts = <0 33 0x4
+ 0 34 0x4>;
interrupt-names = "macirq", "eth_wake_irq";
status = "disabled";
};
@@ -202,6 +202,7 @@
kbd@e0300000 {
compatible = "st,spear300-kbd";
reg = <0xe0300000 0x1000>;
+ interrupts = <0 52 0x4>;
status = "disabled";
};
@@ -224,7 +225,7 @@
serial@e0000000 {
compatible = "arm,pl011", "arm,primecell";
reg = <0xe0000000 0x1000>;
- interrupts = <0 36 0x4>;
+ interrupts = <0 35 0x4>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index c13fd1f3b09f..e4e912f95024 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -15,8 +15,8 @@
/include/ "spear320.dtsi"
/ {
- model = "ST SPEAr300 Evaluation Board";
- compatible = "st,spear300-evb", "st,spear300";
+ model = "ST SPEAr320 Evaluation Board";
+ compatible = "st,spear320-evb", "st,spear320";
#address-cells = <1>;
#size-cells = <1>;
@@ -26,7 +26,7 @@
ahb {
pinmux@b3000000 {
- st,pinmux-mode = <3>;
+ st,pinmux-mode = <4>;
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index 089f0a42c50e..a3c36e47d7ef 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -181,6 +181,7 @@
timer@f0000000 {
compatible = "st,spear-timer";
reg = <0xf0000000 0x400>;
+ interrupt-parent = <&vic0>;
interrupts = <16>;
};
};
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index 7de701365fce..f146dbf6f7f8 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -307,7 +307,6 @@
cd-gpios = <&gpio 58 0>; /* gpio PH2 */
wp-gpios = <&gpio 59 0>; /* gpio PH3 */
power-gpios = <&gpio 70 0>; /* gpio PI6 */
- support-8bit;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index bfeb117d5aea..684a9e1ff7e9 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -301,7 +301,6 @@
sdhci@c8000600 {
status = "okay";
- support-8bit;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 89cb7f2acd92..85e621ab2968 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -64,11 +64,6 @@
nvidia,pins = "dap4";
nvidia,function = "dap4";
};
- ddc {
- nvidia,pins = "ddc", "owc", "spdi", "spdo",
- "uac";
- nvidia,function = "rsvd2";
- };
dta {
nvidia,pins = "dta", "dtb", "dtc", "dtd", "dte";
nvidia,function = "vi";
@@ -129,14 +124,14 @@
"lspi", "lvp1", "lvs";
nvidia,function = "displaya";
};
+ owc {
+ nvidia,pins = "owc", "spdi", "spdo", "uac";
+ nvidia,function = "rsvd2";
+ };
pmc {
nvidia,pins = "pmc";
nvidia,function = "pwr_on";
};
- pta {
- nvidia,pins = "pta";
- nvidia,function = "i2c2";
- };
rm {
nvidia,pins = "rm";
nvidia,function = "i2c1";
@@ -176,7 +171,7 @@
conf_ata {
nvidia,pins = "ata", "atb", "atc", "atd",
"cdev1", "cdev2", "dap1", "dap2",
- "dap4", "dtf", "gma", "gmc", "gmd",
+ "dap4", "ddc", "dtf", "gma", "gmc", "gmd",
"gme", "gpu", "gpu7", "i2cp", "irrx",
"irtx", "pta", "rm", "sdc", "sdd",
"slxd", "slxk", "spdi", "spdo", "uac",
@@ -185,7 +180,7 @@
nvidia,tristate = <0>;
};
conf_ate {
- nvidia,pins = "ate", "csus", "dap3", "ddc",
+ nvidia,pins = "ate", "csus", "dap3",
"gpv", "owc", "slxc", "spib", "spid",
"spie";
nvidia,pull = <0>;
@@ -255,6 +250,39 @@
nvidia,slew-rate-falling = <3>;
};
};
+
+ state_i2cmux_ddc: pinmux_i2cmux_ddc {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "i2c2";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
+ };
+
+ state_i2cmux_pta: pinmux_i2cmux_pta {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "i2c2";
+ };
+ };
+
+ state_i2cmux_idle: pinmux_i2cmux_idle {
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "rsvd4";
+ };
+ pta {
+ nvidia,pins = "pta";
+ nvidia,function = "rsvd4";
+ };
+ };
};
i2s@70002800 {
@@ -303,12 +331,37 @@
i2c@7000c400 {
status = "okay";
clock-frequency = <100000>;
+ };
+
+ i2cmux {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-parent = <&{/i2c@7000c400}>;
- smart-battery@b {
- compatible = "ti,bq20z75", "smart-battery-1.1";
- reg = <0xb>;
- ti,i2c-retry-count = <2>;
- ti,poll-retry-count = <10>;
+ pinctrl-names = "ddc", "pta", "idle";
+ pinctrl-0 = <&state_i2cmux_ddc>;
+ pinctrl-1 = <&state_i2cmux_pta>;
+ pinctrl-2 = <&state_i2cmux_idle>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ smart-battery@b {
+ compatible = "ti,bq20z75", "smart-battery-1.1";
+ reg = <0xb>;
+ ti,i2c-retry-count = <2>;
+ ti,poll-retry-count = <10>;
+ };
};
};
@@ -334,7 +387,7 @@
};
};
- emc {
+ memory-controller@0x7000f400 {
emc-table@190000 {
reg = <190000>;
compatible = "nvidia,tegra20-emc-table";
@@ -397,7 +450,6 @@
sdhci@c8000600 {
status = "okay";
- support-8bit;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 9de5636023f6..9de5636023f6 100644
--- a/arch/arm/boot/dts/tegra-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index 445343b0fbdd..be90544e6b59 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -314,7 +314,6 @@
sdhci@c8000600 {
status = "okay";
- support-8bit;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
new file mode 100644
index 000000000000..6916310bf58f
--- /dev/null
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -0,0 +1,301 @@
+/dts-v1/;
+
+/include/ "tegra20.dtsi"
+
+/ {
+ model = "NVIDIA Tegra2 Whistler evaluation board";
+ compatible = "nvidia,whistler", "nvidia,tegra20";
+
+ memory {
+ reg = <0x00000000 0x20000000>;
+ };
+
+ pinmux {
+ pinctrl-names = "default";
+ pinctrl-0 = <&state_default>;
+
+ state_default: pinmux {
+ ata {
+ nvidia,pins = "ata", "atb", "ate", "gma", "gmb",
+ "gmc", "gmd", "gpu";
+ nvidia,function = "gmi";
+ };
+ atc {
+ nvidia,pins = "atc", "atd";
+ nvidia,function = "sdio4";
+ };
+ cdev1 {
+ nvidia,pins = "cdev1";
+ nvidia,function = "plla_out";
+ };
+ cdev2 {
+ nvidia,pins = "cdev2";
+ nvidia,function = "osc";
+ };
+ crtp {
+ nvidia,pins = "crtp";
+ nvidia,function = "crt";
+ };
+ csus {
+ nvidia,pins = "csus";
+ nvidia,function = "vi_sensor_clk";
+ };
+ dap1 {
+ nvidia,pins = "dap1";
+ nvidia,function = "dap1";
+ };
+ dap2 {
+ nvidia,pins = "dap2";
+ nvidia,function = "dap2";
+ };
+ dap3 {
+ nvidia,pins = "dap3";
+ nvidia,function = "dap3";
+ };
+ dap4 {
+ nvidia,pins = "dap4";
+ nvidia,function = "dap4";
+ };
+ ddc {
+ nvidia,pins = "ddc";
+ nvidia,function = "i2c2";
+ };
+ dta {
+ nvidia,pins = "dta", "dtb", "dtc", "dtd";
+ nvidia,function = "vi";
+ };
+ dte {
+ nvidia,pins = "dte";
+ nvidia,function = "rsvd1";
+ };
+ dtf {
+ nvidia,pins = "dtf";
+ nvidia,function = "i2c3";
+ };
+ gme {
+ nvidia,pins = "gme";
+ nvidia,function = "dap5";
+ };
+ gpu7 {
+ nvidia,pins = "gpu7";
+ nvidia,function = "rtck";
+ };
+ gpv {
+ nvidia,pins = "gpv";
+ nvidia,function = "pcie";
+ };
+ hdint {
+ nvidia,pins = "hdint", "pta";
+ nvidia,function = "hdmi";
+ };
+ i2cp {
+ nvidia,pins = "i2cp";
+ nvidia,function = "i2cp";
+ };
+ irrx {
+ nvidia,pins = "irrx", "irtx";
+ nvidia,function = "uartb";
+ };
+ kbca {
+ nvidia,pins = "kbca", "kbcc", "kbce", "kbcf";
+ nvidia,function = "kbc";
+ };
+ kbcb {
+ nvidia,pins = "kbcb", "kbcd";
+ nvidia,function = "sdio2";
+ };
+ lcsn {
+ nvidia,pins = "lcsn", "lsck", "lsda", "lsdi",
+ "spia", "spib", "spic";
+ nvidia,function = "spi3";
+ };
+ ld0 {
+ nvidia,pins = "ld0", "ld1", "ld2", "ld3", "ld4",
+ "ld5", "ld6", "ld7", "ld8", "ld9",
+ "ld10", "ld11", "ld12", "ld13", "ld14",
+ "ld15", "ld16", "ld17", "ldc", "ldi",
+ "lhp0", "lhp1", "lhp2", "lhs", "lm0",
+ "lm1", "lpp", "lpw0", "lpw1", "lpw2",
+ "lsc0", "lsc1", "lspi", "lvp0", "lvp1",
+ "lvs";
+ nvidia,function = "displaya";
+ };
+ owc {
+ nvidia,pins = "owc", "uac";
+ nvidia,function = "owr";
+ };
+ pmc {
+ nvidia,pins = "pmc";
+ nvidia,function = "pwr_on";
+ };
+ rm {
+ nvidia,pins = "rm";
+ nvidia,function = "i2c1";
+ };
+ sdb {
+ nvidia,pins = "sdb", "sdc", "sdd", "slxa",
+ "slxc", "slxd", "slxk";
+ nvidia,function = "sdio3";
+ };
+ sdio1 {
+ nvidia,pins = "sdio1";
+ nvidia,function = "sdio1";
+ };
+ spdi {
+ nvidia,pins = "spdi", "spdo";
+ nvidia,function = "rsvd2";
+ };
+ spid {
+ nvidia,pins = "spid", "spie", "spig", "spih";
+ nvidia,function = "spi2_alt";
+ };
+ spif {
+ nvidia,pins = "spif";
+ nvidia,function = "spi2";
+ };
+ uaa {
+ nvidia,pins = "uaa", "uab";
+ nvidia,function = "uarta";
+ };
+ uad {
+ nvidia,pins = "uad";
+ nvidia,function = "irda";
+ };
+ uca {
+ nvidia,pins = "uca", "ucb";
+ nvidia,function = "uartc";
+ };
+ uda {
+ nvidia,pins = "uda";
+ nvidia,function = "spi1";
+ };
+ conf_ata {
+ nvidia,pins = "ata", "atb", "atc", "ddc", "gma",
+ "gmb", "gmc", "gmd", "irrx", "irtx",
+ "kbca", "kbcb", "kbcc", "kbcd", "kbce",
+ "kbcf", "sdc", "sdd", "spie", "spig",
+ "spih", "uaa", "uab", "uad", "uca",
+ "ucb";
+ nvidia,pull = <2>;
+ nvidia,tristate = <0>;
+ };
+ conf_atd {
+ nvidia,pins = "atd", "ate", "cdev1", "csus",
+ "dap1", "dap2", "dap3", "dap4", "dte",
+ "dtf", "gpu", "gpu7", "gpv", "i2cp",
+ "rm", "sdio1", "slxa", "slxc", "slxd",
+ "slxk", "spdi", "spdo", "uac", "uda";
+ nvidia,pull = <0>;
+ nvidia,tristate = <0>;
+ };
+ conf_cdev2 {
+ nvidia,pins = "cdev2", "spia", "spib";
+ nvidia,pull = <1>;
+ nvidia,tristate = <1>;
+ };
+ conf_ck32 {
+ nvidia,pins = "ck32", "ddrc", "lc", "pmca",
+ "pmcb", "pmcc", "pmcd", "xm2c",
+ "xm2d";
+ nvidia,pull = <0>;
+ };
+ conf_crtp {
+ nvidia,pins = "crtp";
+ nvidia,pull = <0>;
+ nvidia,tristate = <1>;
+ };
+ conf_dta {
+ nvidia,pins = "dta", "dtb", "dtc", "dtd",
+ "spid", "spif";
+ nvidia,pull = <1>;
+ nvidia,tristate = <0>;
+ };
+ conf_gme {
+ nvidia,pins = "gme", "owc", "pta", "spic";
+ nvidia,pull = <2>;
+ nvidia,tristate = <1>;
+ };
+ conf_ld17_0 {
+ nvidia,pins = "ld17_0", "ld19_18", "ld21_20",
+ "ld23_22";
+ nvidia,pull = <1>;
+ };
+ conf_ls {
+ nvidia,pins = "ls", "pmce";
+ nvidia,pull = <2>;
+ };
+ drive_dap1 {
+ nvidia,pins = "drive_dap1";
+ nvidia,high-speed-mode = <0>;
+ nvidia,schmitt = <1>;
+ nvidia,low-power-mode = <0>;
+ nvidia,pull-down-strength = <0>;
+ nvidia,pull-up-strength = <0>;
+ nvidia,slew-rate-rising = <0>;
+ nvidia,slew-rate-falling = <0>;
+ };
+ };
+ };
+
+ i2s@70002800 {
+ status = "okay";
+ };
+
+ serial@70006000 {
+ status = "okay";
+ clock-frequency = <216000000>;
+ };
+
+ i2c@7000d000 {
+ status = "okay";
+ clock-frequency = <100000>;
+
+ codec: codec@1a {
+ compatible = "wlf,wm8753";
+ reg = <0x1a>;
+ };
+
+ tca6416: gpio@20 {
+ compatible = "ti,tca6416";
+ reg = <0x20>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+ };
+
+ usb@c5000000 {
+ status = "okay";
+ nvidia,vbus-gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+ };
+
+ usb@c5008000 {
+ status = "okay";
+ nvidia,vbus-gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+ };
+
+ sdhci@c8000400 {
+ status = "okay";
+ wp-gpios = <&gpio 173 0>; /* gpio PV5 */
+ bus-width = <8>;
+ };
+
+ sdhci@c8000600 {
+ status = "okay";
+ bus-width = <8>;
+ };
+
+ sound {
+ compatible = "nvidia,tegra-audio-wm8753-whistler",
+ "nvidia,tegra-audio-wm8753";
+ nvidia,model = "NVIDIA Tegra Whistler";
+
+ nvidia,audio-routing =
+ "Headphone Jack", "LOUT1",
+ "Headphone Jack", "ROUT1",
+ "MIC2", "Mic Jack",
+ "MIC2N", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&codec>;
+ };
+};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index c417d67e9027..9f1921634eb7 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -72,7 +72,7 @@
reg = <0x70002800 0x200>;
interrupts = <0 13 0x04>;
nvidia,dma-request-selector = <&apbdma 2>;
- status = "disable";
+ status = "disabled";
};
tegra_i2s2: i2s@70002a00 {
@@ -80,7 +80,7 @@
reg = <0x70002a00 0x200>;
interrupts = <0 3 0x04>;
nvidia,dma-request-selector = <&apbdma 1>;
- status = "disable";
+ status = "disabled";
};
serial@70006000 {
@@ -88,7 +88,7 @@
reg = <0x70006000 0x40>;
reg-shift = <2>;
interrupts = <0 36 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006040 {
@@ -96,7 +96,7 @@
reg = <0x70006040 0x40>;
reg-shift = <2>;
interrupts = <0 37 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006200 {
@@ -104,7 +104,7 @@
reg = <0x70006200 0x100>;
reg-shift = <2>;
interrupts = <0 46 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006300 {
@@ -112,7 +112,7 @@
reg = <0x70006300 0x100>;
reg-shift = <2>;
interrupts = <0 90 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006400 {
@@ -120,7 +120,7 @@
reg = <0x70006400 0x100>;
reg-shift = <2>;
interrupts = <0 91 0x04>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c000 {
@@ -129,7 +129,7 @@
interrupts = <0 38 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c400 {
@@ -138,7 +138,7 @@
interrupts = <0 84 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c500 {
@@ -147,7 +147,7 @@
interrupts = <0 92 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000d000 {
@@ -156,7 +156,7 @@
interrupts = <0 53 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
pmc {
@@ -164,7 +164,7 @@
reg = <0x7000e400 0x400>;
};
- mc {
+ memory-controller@0x7000f000 {
compatible = "nvidia,tegra20-mc";
reg = <0x7000f000 0x024
0x7000f03c 0x3c4>;
@@ -177,7 +177,7 @@
0x58000000 0x02000000>; /* GART aperture */
};
- emc {
+ memory-controller@0x7000f400 {
compatible = "nvidia,tegra20-emc";
reg = <0x7000f400 0x200>;
#address-cells = <1>;
@@ -190,7 +190,7 @@
interrupts = <0 20 0x04>;
phy_type = "utmi";
nvidia,has-legacy-mode;
- status = "disable";
+ status = "disabled";
};
usb@c5004000 {
@@ -198,7 +198,7 @@
reg = <0xc5004000 0x4000>;
interrupts = <0 21 0x04>;
phy_type = "ulpi";
- status = "disable";
+ status = "disabled";
};
usb@c5008000 {
@@ -206,35 +206,35 @@
reg = <0xc5008000 0x4000>;
interrupts = <0 97 0x04>;
phy_type = "utmi";
- status = "disable";
+ status = "disabled";
};
sdhci@c8000000 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000000 0x200>;
interrupts = <0 14 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@c8000200 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000200 0x200>;
interrupts = <0 15 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@c8000400 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000400 0x200>;
interrupts = <0 19 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@c8000600 {
compatible = "nvidia,tegra20-sdhci";
reg = <0xc8000600 0x200>;
interrupts = <0 31 0x04>;
- status = "disable";
+ status = "disabled";
};
pmu {
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra30-cardhu.dts
index 36321bceec46..c169bced131e 100644
--- a/arch/arm/boot/dts/tegra-cardhu.dts
+++ b/arch/arm/boot/dts/tegra30-cardhu.dts
@@ -144,7 +144,6 @@
sdhci@78000600 {
status = "okay";
- support-8bit;
bus-width = <8>;
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 2dcc09e784b5..da740191771f 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -82,7 +82,7 @@
reg = <0x70006000 0x40>;
reg-shift = <2>;
interrupts = <0 36 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006040 {
@@ -90,7 +90,7 @@
reg = <0x70006040 0x40>;
reg-shift = <2>;
interrupts = <0 37 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006200 {
@@ -98,7 +98,7 @@
reg = <0x70006200 0x100>;
reg-shift = <2>;
interrupts = <0 46 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006300 {
@@ -106,7 +106,7 @@
reg = <0x70006300 0x100>;
reg-shift = <2>;
interrupts = <0 90 0x04>;
- status = "disable";
+ status = "disabled";
};
serial@70006400 {
@@ -114,7 +114,7 @@
reg = <0x70006400 0x100>;
reg-shift = <2>;
interrupts = <0 91 0x04>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c000 {
@@ -123,7 +123,7 @@
interrupts = <0 38 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c400 {
@@ -132,7 +132,7 @@
interrupts = <0 84 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c500 {
@@ -141,7 +141,7 @@
interrupts = <0 92 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000c700 {
@@ -150,7 +150,7 @@
interrupts = <0 120 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
i2c@7000d000 {
@@ -159,7 +159,7 @@
interrupts = <0 53 0x04>;
#address-cells = <1>;
#size-cells = <0>;
- status = "disable";
+ status = "disabled";
};
pmc {
@@ -167,7 +167,7 @@
reg = <0x7000e400 0x400>;
};
- mc {
+ memory-controller {
compatible = "nvidia,tegra30-mc";
reg = <0x7000f000 0x010
0x7000f03c 0x1b4
@@ -201,35 +201,35 @@
compatible = "nvidia,tegra30-i2s";
reg = <0x70080300 0x100>;
nvidia,ahub-cif-ids = <4 4>;
- status = "disable";
+ status = "disabled";
};
tegra_i2s1: i2s@70080400 {
compatible = "nvidia,tegra30-i2s";
reg = <0x70080400 0x100>;
nvidia,ahub-cif-ids = <5 5>;
- status = "disable";
+ status = "disabled";
};
tegra_i2s2: i2s@70080500 {
compatible = "nvidia,tegra30-i2s";
reg = <0x70080500 0x100>;
nvidia,ahub-cif-ids = <6 6>;
- status = "disable";
+ status = "disabled";
};
tegra_i2s3: i2s@70080600 {
compatible = "nvidia,tegra30-i2s";
reg = <0x70080600 0x100>;
nvidia,ahub-cif-ids = <7 7>;
- status = "disable";
+ status = "disabled";
};
tegra_i2s4: i2s@70080700 {
compatible = "nvidia,tegra30-i2s";
reg = <0x70080700 0x100>;
nvidia,ahub-cif-ids = <8 8>;
- status = "disable";
+ status = "disabled";
};
};
@@ -237,28 +237,28 @@
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000000 0x200>;
interrupts = <0 14 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@78000200 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000200 0x200>;
interrupts = <0 15 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@78000400 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000400 0x200>;
interrupts = <0 19 0x04>;
- status = "disable";
+ status = "disabled";
};
sdhci@78000600 {
compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
reg = <0x78000600 0x200>;
interrupts = <0 31 0x04>;
- status = "disable";
+ status = "disabled";
};
pmu {
diff --git a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
index 16076e2d0934..d8a827bd2bf3 100644
--- a/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m-rs1.dtsi
@@ -55,6 +55,8 @@
reg-io-width = <4>;
smsc,irq-active-high;
smsc,irq-push-pull;
+ vdd33a-supply = <&v2m_fixed_3v3>;
+ vddvario-supply = <&v2m_fixed_3v3>;
};
usb@2,03000000 {
@@ -157,6 +159,7 @@
v2m_timer23: timer@120000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x120000 0x1000>;
+ interrupts = <3>;
};
/* DVI I2C bus */
@@ -197,5 +200,13 @@
interrupts = <14>;
};
};
+
+ v2m_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
};
};
diff --git a/arch/arm/boot/dts/vexpress-v2m.dtsi b/arch/arm/boot/dts/vexpress-v2m.dtsi
index a6c9c7c82d53..dba53fd026bb 100644
--- a/arch/arm/boot/dts/vexpress-v2m.dtsi
+++ b/arch/arm/boot/dts/vexpress-v2m.dtsi
@@ -54,6 +54,8 @@
reg-io-width = <4>;
smsc,irq-active-high;
smsc,irq-push-pull;
+ vdd33a-supply = <&v2m_fixed_3v3>;
+ vddvario-supply = <&v2m_fixed_3v3>;
};
usb@3,03000000 {
@@ -156,6 +158,7 @@
v2m_timer23: timer@12000 {
compatible = "arm,sp804", "arm,primecell";
reg = <0x12000 0x1000>;
+ interrupts = <3>;
};
/* DVI I2C bus */
@@ -196,5 +199,13 @@
interrupts = <14>;
};
};
+
+ v2m_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
};
};
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 7e1091d91af8..d12b34ca0568 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -14,8 +14,8 @@
arm,hbi = <0x237>;
compatible = "arm,vexpress,v2p-ca15,tc1", "arm,vexpress,v2p-ca15", "arm,vexpress";
interrupt-parent = <&gic>;
- #address-cells = <1>;
- #size-cells = <1>;
+ #address-cells = <2>;
+ #size-cells = <2>;
chosen { };
@@ -47,23 +47,23 @@
memory@80000000 {
device_type = "memory";
- reg = <0x80000000 0x40000000>;
+ reg = <0 0x80000000 0 0x40000000>;
};
hdlcd@2b000000 {
compatible = "arm,hdlcd";
- reg = <0x2b000000 0x1000>;
+ reg = <0 0x2b000000 0 0x1000>;
interrupts = <0 85 4>;
};
memory-controller@2b0a0000 {
compatible = "arm,pl341", "arm,primecell";
- reg = <0x2b0a0000 0x1000>;
+ reg = <0 0x2b0a0000 0 0x1000>;
};
wdt@2b060000 {
compatible = "arm,sp805", "arm,primecell";
- reg = <0x2b060000 0x1000>;
+ reg = <0 0x2b060000 0 0x1000>;
interrupts = <98>;
};
@@ -72,23 +72,23 @@
#interrupt-cells = <3>;
#address-cells = <0>;
interrupt-controller;
- reg = <0x2c001000 0x1000>,
- <0x2c002000 0x1000>,
- <0x2c004000 0x2000>,
- <0x2c006000 0x2000>;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
interrupts = <1 9 0xf04>;
};
memory-controller@7ffd0000 {
compatible = "arm,pl354", "arm,primecell";
- reg = <0x7ffd0000 0x1000>;
+ reg = <0 0x7ffd0000 0 0x1000>;
interrupts = <0 86 4>,
<0 87 4>;
};
dma@7ffb0000 {
compatible = "arm,pl330", "arm,primecell";
- reg = <0x7ffb0000 0x1000>;
+ reg = <0 0x7ffb0000 0 0x1000>;
interrupts = <0 92 4>,
<0 88 4>,
<0 89 4>,
@@ -111,12 +111,12 @@
};
motherboard {
- ranges = <0 0 0x08000000 0x04000000>,
- <1 0 0x14000000 0x04000000>,
- <2 0 0x18000000 0x04000000>,
- <3 0 0x1c000000 0x04000000>,
- <4 0 0x0c000000 0x04000000>,
- <5 0 0x10000000 0x04000000>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
interrupt-map-mask = <0 0 63>;
interrupt-map = <0 0 0 &gic 0 0 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
new file mode 100644
index 000000000000..4890a81c5467
--- /dev/null
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -0,0 +1,188 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ * CoreTile Express A15x2 A7x3
+ * Cortex-A15_A7 MPCore (V2P-CA15_A7)
+ *
+ * HBI-0249A
+ */
+
+/dts-v1/;
+
+/ {
+ model = "V2P-CA15_CA7";
+ arm,hbi = <0x249>;
+ compatible = "arm,vexpress,v2p-ca15_a7", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ i2c0 = &v2m_i2c_dvi;
+ i2c1 = &v2m_i2c_pcie;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+
+/* A7s disabled till big.LITTLE patches are available...
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x100>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x101>;
+ };
+
+ cpu4: cpu@4 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a7";
+ reg = <0x102>;
+ };
+*/
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+
+ wdt@2a490000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0 0x2a490000 0 0x1000>;
+ interrupts = <98>;
+ };
+
+ hdlcd@2b000000 {
+ compatible = "arm,hdlcd";
+ reg = <0 0x2b000000 0 0x1000>;
+ interrupts = <0 85 4>;
+ };
+
+ memory-controller@2b0a0000 {
+ compatible = "arm,pl341", "arm,primecell";
+ reg = <0 0x2b0a0000 0 0x1000>;
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ memory-controller@7ffd0000 {
+ compatible = "arm,pl354", "arm,primecell";
+ reg = <0 0x7ffd0000 0 0x1000>;
+ interrupts = <0 86 4>,
+ <0 87 4>;
+ };
+
+ dma@7ff00000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0 0x7ff00000 0 0x1000>;
+ interrupts = <0 92 4>,
+ <0 88 4>,
+ <0 89 4>,
+ <0 90 4>,
+ <0 91 4>;
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
+ interrupts = <0 68 4>,
+ <0 69 4>;
+ };
+
+ motherboard {
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+ };
+};
+
+/include/ "vexpress-v2m-rs1.dtsi"
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
new file mode 100644
index 000000000000..e40b435d204e
--- /dev/null
+++ b/arch/arm/configs/exynos_defconfig
@@ -0,0 +1,92 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_EFI_PARTITION=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_S3C_LOWLEVEL_UART_PORT=1
+CONFIG_S3C24XX_PWM=y
+CONFIG_ARCH_EXYNOS5=y
+CONFIG_MACH_EXYNOS4_DT=y
+CONFIG_MACH_EXYNOS5_DT=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_PREEMPT=y
+CONFIG_AEABI=y
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_RFKILL_REGULATOR=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_NETDEVICES=y
+CONFIG_SMSC911X=y
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_SMSC75XX=y
+CONFIG_USB_NET_SMSC95XX=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C=y
+# CONFIG_HWMON is not set
+CONFIG_MFD_TPS65090=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_TPS65090=y
+CONFIG_FB=y
+CONFIG_EXYNOS_VIDEO=y
+CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_EXYNOS_DP=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FONTS=y
+CONFIG_FONT_7x14=y
+CONFIG_LOGO=y
+CONFIG_USB=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_CRAMFS=y
+CONFIG_ROMFS_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e05a2f1665a7..78ed575feb1a 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -2,7 +2,10 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
CONFIG_EXPERT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
@@ -36,8 +39,6 @@ CONFIG_MACH_IMX27IPCAM=y
CONFIG_MACH_IMX27_DT=y
CONFIG_MXC_IRQ_PRIOR=y
CONFIG_MXC_PWM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
@@ -46,7 +47,6 @@ CONFIG_FPE_NWFPE=y
CONFIG_FPE_NWFPE_XP=y
CONFIG_PM_DEBUG=y
CONFIG_NET=y
-CONFIG_SMSC911X=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
@@ -70,31 +70,31 @@ CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_ADV_OPTIONS=y
CONFIG_MTD_CFI_GEOMETRY=y
# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
-CONFIG_MTD_MAP_BANK_WIDTH_4=y
# CONFIG_MTD_CFI_I2 is not set
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_PHYSMAP=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_MXC=y
CONFIG_MTD_UBI=y
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
+CONFIG_ATA=y
+CONFIG_PATA_IMX=y
CONFIG_NETDEVICES=y
CONFIG_CS89x0=y
CONFIG_CS89x0_PLATFORM=y
CONFIG_DM9000=y
CONFIG_SMC91X=y
CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
CONFIG_SMSC_PHY=y
# CONFIG_INPUT_MOUSEDEV is not set
CONFIG_INPUT_EVDEV=y
-# CONFIG_INPUT_KEYBOARD is not set
+CONFIG_KEYBOARD_IMX=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ADS7846=m
-CONFIG_TOUCHSCREEN_MC13783=m
-# CONFIG_SERIO is not set
+CONFIG_TOUCHSCREEN_MC13783=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=m
CONFIG_SERIAL_IMX=y
@@ -113,31 +113,23 @@ CONFIG_HWMON=m
CONFIG_SENSORS_MC13783_ADC=m
CONFIG_WATCHDOG=y
CONFIG_IMX2_WDT=y
-CONFIG_MFD_MC13XXX=y
+CONFIG_MFD_MC13XXX_SPI=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_MC13783=y
CONFIG_REGULATOR_MC13892=y
-CONFIG_FB=y
-CONFIG_FB_IMX=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_LCD_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_LCD_L4F00242T03=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_VIDEO_DEV=y
-CONFIG_VIDEO_V4L2_COMMON=y
-CONFIG_VIDEO_MEDIA=y
-CONFIG_VIDEO_V4L2=y
-CONFIG_VIDEOBUF_GEN=y
-CONFIG_VIDEOBUF_DMA_CONTIG=y
-CONFIG_VIDEOBUF2_CORE=y
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_OV2640=y
-CONFIG_VIDEO_MX2_HOSTSUPPORT=y
CONFIG_VIDEO_MX2=y
+CONFIG_FB=y
+CONFIG_FB_IMX=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_L4F00242T03=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
@@ -152,13 +144,17 @@ CONFIG_SND_IMX_SOC=y
CONFIG_SND_SOC_MX27VIS_AIC32X4=y
CONFIG_SND_SOC_PHYCORE_AC97=y
CONFIG_SND_SOC_EUKREA_TLV320=y
+CONFIG_SND_SOC_IMX_SGTL5000=y
+CONFIG_SND_SOC_IMX_MC13783=y
CONFIG_USB_HID=m
CONFIG_USB=y
-# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
CONFIG_USB_ULPI=y
CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
CONFIG_MMC_MXC=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
@@ -173,22 +169,25 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=y
CONFIG_RTC_DRV_IMXDI=y
+CONFIG_RTC_DRV_MC13XXX=y
CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
CONFIG_IMX_DMA=y
+CONFIG_COMMON_CLK_DEBUG=y
# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_CODEPAGE_850=m
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=m
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index b1d3675df72c..f725b9637b33 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -2,6 +2,8 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZO=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_LOG_BUF_SHIFT=18
CONFIG_CGROUPS=y
CONFIG_RELAY=y
@@ -29,15 +31,12 @@ CONFIG_MACH_MX35_3DS=y
CONFIG_MACH_VPR200=y
CONFIG_MACH_IMX51_DT=y
CONFIG_MACH_MX51_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX51=y
CONFIG_MACH_EUKREA_CPUIMX51SD=y
CONFIG_MACH_MX51_EFIKAMX=y
CONFIG_MACH_MX51_EFIKASB=y
CONFIG_MACH_IMX53_DT=y
CONFIG_SOC_IMX6Q=y
CONFIG_MXC_PWM=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_VMSPLIT_2G=y
CONFIG_PREEMPT_VOLUNTARY=y
@@ -64,17 +63,29 @@ CONFIG_IPV6=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+CONFIG_CONNECTOR=y
CONFIG_MTD=y
-CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
CONFIG_MTD_DATAFLASH=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SST25L=y
-# CONFIG_STANDALONE is not set
-CONFIG_CONNECTOR=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_GPMI_NAND=y
+CONFIG_MTD_NAND_MXC=y
+CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
+CONFIG_EEPROM_AT24=y
+CONFIG_EEPROM_AT25=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_MULTI_LUN=y
@@ -105,8 +116,11 @@ CONFIG_SMSC911X=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_EVBUG=m
CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_IMX=y
CONFIG_MOUSE_PS2=m
CONFIG_MOUSE_PS2_ELANTECH=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_MC13783=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MMA8450=y
CONFIG_SERIO_SERPORT=m
@@ -116,6 +130,7 @@ CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_IMX=y
CONFIG_SERIAL_IMX_CONSOLE=y
CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MXC_RNGA=y
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
@@ -130,42 +145,37 @@ CONFIG_GPIO_SYSFS=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_IMX2_WDT=y
-CONFIG_MFD_MC13XXX=y
+CONFIG_MFD_MC13XXX_SPI=y
+CONFIG_MFD_MC13XXX_I2C=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_MC13783=y
CONFIG_REGULATOR_MC13892=y
CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_V4L2=y
CONFIG_VIDEO_DEV=y
-CONFIG_VIDEO_V4L2_COMMON=y
-CONFIG_VIDEOBUF_GEN=y
-CONFIG_VIDEOBUF2_CORE=y
-CONFIG_VIDEOBUF2_MEMOPS=y
-CONFIG_VIDEOBUF2_DMA_CONTIG=y
-CONFIG_VIDEO_CAPTURE_DRIVERS=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_OV2640=y
-CONFIG_MX3_VIDEO=y
CONFIG_VIDEO_MX3=y
CONFIG_FB=y
-CONFIG_FB_MX3=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_L4F00242T03=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
-CONFIG_BACKLIGHT_GENERIC=y
-CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_IMX_SOC=y
+CONFIG_SND_SOC_PHYCORE_AC97=y
+CONFIG_SND_SOC_EUKREA_TLV320=y
+CONFIG_SND_SOC_IMX_SGTL5000=y
+CONFIG_SND_SOC_IMX_MC13783=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MXC=y
@@ -178,9 +188,12 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
+CONFIG_RTC_DRV_MC13XXX=y
CONFIG_RTC_DRV_MXC=y
CONFIG_DMADEVICES=y
CONFIG_IMX_SDMA=y
+CONFIG_COMMON_CLK_DEBUG=y
+# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
@@ -204,8 +217,9 @@ CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=m
+CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
@@ -216,14 +230,11 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
# CONFIG_FTRACE is not set
# CONFIG_ARM_UNWIND is not set
CONFIG_SECURITYFS=y
-CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_LZO=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
CONFIG_CRC_CCITT=m
diff --git a/arch/arm/configs/lpc32xx_defconfig b/arch/arm/configs/lpc32xx_defconfig
index 4fa60547494a..e42a0e3d4c3a 100644
--- a/arch/arm/configs/lpc32xx_defconfig
+++ b/arch/arm/configs/lpc32xx_defconfig
@@ -1,5 +1,7 @@
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
@@ -16,8 +18,7 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_LPC32XX=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_ZBOOT_ROM_TEXT=0x0
@@ -52,13 +53,17 @@ CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
+CONFIG_MTD_M25P80=y
CONFIG_MTD_NAND=y
CONFIG_MTD_NAND_MUSEUM_IDS=y
+CONFIG_MTD_NAND_SLC_LPC32XX=y
+CONFIG_MTD_NAND_MLC_LPC32XX=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=1
CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_EEPROM_AT24=y
CONFIG_EEPROM_AT25=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -79,16 +84,23 @@ CONFIG_LPC_ENET=y
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_SMSC_PHY=y
# CONFIG_WLAN is not set
+CONFIG_INPUT_MATRIXKMAP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=240
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=320
CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_LPC32XX=y
# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_LPC32XX=y
+CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_HS_LPC32XX=y
+CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
@@ -96,7 +108,8 @@ CONFIG_I2C_PNX=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_SYSFS=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_DS620=y
+CONFIG_SENSORS_MAX6639=y
CONFIG_WATCHDOG=y
CONFIG_PNX4008_WATCHDOG=y
CONFIG_FB=y
@@ -133,6 +146,8 @@ CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_PCA9532=y
+CONFIG_LEDS_PCA9532_GPIO=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
@@ -146,10 +161,10 @@ CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_PCF8563=y
CONFIG_RTC_DRV_LPC32XX=y
CONFIG_DMADEVICES=y
-CONFIG_AMBA_PL08X=y
CONFIG_STAGING=y
-CONFIG_IIO=y
CONFIG_LPC32XX_ADC=y
+CONFIG_MAX517=y
+CONFIG_IIO=y
CONFIG_EXT2_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
@@ -159,7 +174,6 @@ CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_WBUF_VERIFY=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
diff --git a/arch/arm/configs/mvebu_defconfig b/arch/arm/configs/mvebu_defconfig
new file mode 100644
index 000000000000..2e86b31c33cf
--- /dev/null
+++ b/arch/arm/configs/mvebu_defconfig
@@ -0,0 +1,46 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370_XP=y
+CONFIG_AEABI=y
+CONFIG_HIGHMEM=y
+CONFIG_USE_OF=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_ARM_APPENDED_DTB=y
+CONFIG_VFP=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_UTF8=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_LL=y
+CONFIG_EARLY_PRINTK=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 5406c23a02e3..ccdb6357fb74 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -28,6 +28,7 @@ CONFIG_MACH_MX28EVK=y
CONFIG_MACH_STMP378X_DEVB=y
CONFIG_MACH_TX28=y
CONFIG_MACH_M28EVK=y
+CONFIG_MACH_APX4DEVKIT=y
# CONFIG_ARM_THUMB is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -58,6 +59,9 @@ CONFIG_CAN_FLEXCAN=m
CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
+CONFIG_MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_GPMI_NAND=y
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_ENC28J60=y
@@ -77,6 +81,7 @@ CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MXS_AUART=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
@@ -109,8 +114,10 @@ CONFIG_MMC=y
CONFIG_MMC_MXS=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_STMP=y
CONFIG_DMADEVICES=y
CONFIG_MXS_DMA=y
+CONFIG_COMMON_CLK_DEBUG=y
CONFIG_EXT3_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_FSCACHE=m
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9854ff4279e0..b152de79fd95 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -176,7 +176,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_DEVICEFS=y
CONFIG_USB_SUSPEND=y
CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=y
CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_LIBUSUAL=y
@@ -197,6 +196,7 @@ CONFIG_RTC_DRV_TWL4030=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_MSDOS_FS=y
@@ -236,3 +236,4 @@ CONFIG_CRC_T10DIF=y
CONFIG_CRC_ITU_T=y
CONFIG_CRC7=y
CONFIG_LIBCRC32C=y
+CONFIG_SOC_OMAP5=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
new file mode 100644
index 000000000000..0ac1293dba10
--- /dev/null
+++ b/arch/arm/configs/socfpga_defconfig
@@ -0,0 +1,83 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+CONFIG_NAMESPACES=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_MACH_SOCFPGA_CYCLONE5=y
+CONFIG_ARM_THUMBEE=y
+# CONFIG_CACHE_L2X0 is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_VMSPLIT_2G=y
+CONFIG_NR_CPUS=2
+CONFIG_AEABI=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE=""
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_STMMAC_ETH=y
+# CONFIG_STMMAC_PHY_ID_ZERO_WORKAROUND is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
+CONFIG_NTFS_RW=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_ENABLE_DEFAULT_TRACERS=y
+CONFIG_DEBUG_USER=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 1198dd61c7c4..4be9c1e80ee6 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -1,4 +1,6 @@
CONFIG_EXPERIMENTAL=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -25,14 +27,9 @@ CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_TEGRA_2x_SOC=y
CONFIG_ARCH_TEGRA_3x_SOC=y
CONFIG_MACH_HARMONY=y
-CONFIG_MACH_KAEN=y
CONFIG_MACH_PAZ00=y
CONFIG_MACH_TRIMSLICE=y
-CONFIG_MACH_WARIO=y
-CONFIG_MACH_VENTANA=y
CONFIG_TEGRA_EMC_SCALING_ENABLE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
@@ -103,19 +100,24 @@ CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PINCTRL=y
CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
CONFIG_SPI_TEGRA=y
+CONFIG_GPIO_TPS65910=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_SBS=y
CONFIG_SENSORS_LM90=y
CONFIG_MFD_TPS6586X=y
+CONFIG_MFD_TPS65910=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_TPS62360=y
CONFIG_REGULATOR_TPS6586X=y
+CONFIG_REGULATOR_TPS65910=y
CONFIG_SOUND=y
CONFIG_SND=y
# CONFIG_SND_SUPPORT_OLD_API is not set
@@ -126,6 +128,7 @@ CONFIG_SND=y
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
CONFIG_SND_SOC_TEGRA=y
+CONFIG_SND_SOC_TEGRA_WM8753=y
CONFIG_SND_SOC_TEGRA_WM8903=y
CONFIG_SND_SOC_TEGRA_TRIMSLICE=y
CONFIG_SND_SOC_TEGRA_ALC5632=y
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 68374ba6a943..c79f61faa3a5 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -243,7 +243,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) }
-static inline u64 atomic64_read(atomic64_t *v)
+static inline u64 atomic64_read(const atomic64_t *v)
{
u64 result;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 3d2220498abc..6ddbe446425e 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -60,13 +60,13 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_USE_DOMAINS
-#define set_domain(x) \
- do { \
- __asm__ __volatile__( \
- "mcr p15, 0, %0, c3, c0 @ set domain" \
- : : "r" (x)); \
- isb(); \
- } while (0)
+static inline void set_domain(unsigned val)
+{
+ asm volatile(
+ "mcr p15, 0, %0, c3, c0 @ set domain"
+ : : "r" (val));
+ isb();
+}
#define modify_domain(dom,type) \
do { \
@@ -78,8 +78,8 @@
} while (0)
#else
-#define set_domain(x) do { } while (0)
-#define modify_domain(dom,type) do { } while (0)
+static inline void set_domain(unsigned val) { }
+static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
/*
diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h
index febe495d0c6e..15cb035309f7 100644
--- a/arch/arm/include/asm/mach/irq.h
+++ b/arch/arm/include/asm/mach/irq.h
@@ -17,7 +17,7 @@ struct seq_file;
/*
* This is internal. Do not use it.
*/
-extern void init_FIQ(void);
+extern void init_FIQ(int);
extern int show_fiq_list(struct seq_file *, int);
#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index b79f8e97f775..af7b0bda3355 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,7 +148,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-#define TIF_SYSCALL_RESTARTSYS 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -164,11 +163,9 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
-#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
/* Checks for any syscall work in entry-common.S */
-#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
- _TIF_SYSCALL_RESTARTSYS)
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
/*
* Change these and you break ASM code in entry-common.S
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 25552508c3fd..2b2f25e7fef5 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -253,7 +253,7 @@ static void __devinit pci_fixup_cy82c693(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
-static void __init pci_fixup_it8152(struct pci_dev *dev)
+static void __devinit pci_fixup_it8152(struct pci_dev *dev)
{
int i;
/* fixup for ITE 8152 devices */
@@ -461,7 +461,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
if (!sys->bus)
panic("PCI: unable to scan bus!");
- busnr = sys->bus->subordinate + 1;
+ busnr = sys->bus->busn_res.end + 1;
list_add(&sys->node, head);
} else {
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index c32f8456aa09..2adda11f712f 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -122,14 +122,16 @@ void release_fiq(struct fiq_handler *f)
while (current_fiq->fiq_op(current_fiq->dev_id, 0));
}
+static int fiq_start;
+
void enable_fiq(int fiq)
{
- enable_irq(fiq + FIQ_START);
+ enable_irq(fiq + fiq_start);
}
void disable_fiq(int fiq)
{
- disable_irq(fiq + FIQ_START);
+ disable_irq(fiq + fiq_start);
}
EXPORT_SYMBOL(set_fiq_handler);
@@ -140,7 +142,8 @@ EXPORT_SYMBOL(release_fiq);
EXPORT_SYMBOL(enable_fiq);
EXPORT_SYMBOL(disable_fiq);
-void __init init_FIQ(void)
+void __init init_FIQ(int start)
{
no_fiq_insn = *(unsigned long *)0xffff001c;
+ fiq_start = start;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 8349d4e97e2b..16cedb42c0c3 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -40,13 +40,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
-/*
- * No architecture-specific irq_finish function defined in arm/arch/irqs.h.
- */
-#ifndef irq_finish
-#define irq_finish(irq) do { } while (0)
-#endif
-
unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
@@ -85,9 +78,6 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
generic_handle_irq(irq);
}
- /* AT91 specific workaround */
- irq_finish(irq);
-
irq_exit();
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index ba32b393b3f0..38c1a3b103a0 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -187,8 +187,8 @@ void kprobe_arm_test_cases(void)
TEST_BF_R ("mov pc, r",0,2f,"")
TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
TEST_BB( "sub pc, pc, #1b-2b+8")
-#if __LINUX_ARM_ARCH__ >= 6
- TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+ TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
#endif
TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 186c8cb982c5..a02eada3aa5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -503,7 +503,7 @@ __hw_perf_event_init(struct perf_event *event)
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
- return -EPERM;
+ return -EOPNOTSUPP;
}
/*
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 5700a7ae7f0b..14e38261cd31 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,7 +25,6 @@
#include <linux/regset.h>
#include <linux/audit.h>
#include <linux/tracehook.h>
-#include <linux/unistd.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
@@ -918,8 +917,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
- if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
- scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index fd2392a17ac1..536c5d6b340b 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -27,6 +27,7 @@
*/
#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
+#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
/*
* With EABI, the syscall number has to be loaded into r7.
@@ -47,6 +48,18 @@ const unsigned long sigreturn_codes[7] = {
};
/*
+ * Either we support OABI only, or we have EABI with the OABI
+ * compat layer enabled. In the later case we don't know if
+ * user space is EABI or not, and if not we must not clobber r7.
+ * Always using the OABI syscall solves that issue and works for
+ * all those cases.
+ */
+const unsigned long syscall_restart_code[2] = {
+ SWI_SYS_RESTART, /* swi __NR_restart_syscall */
+ 0xe49df004, /* ldr pc, [sp], #4 */
+};
+
+/*
* atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -592,10 +605,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- case -ERESTART_RESTARTBLOCK:
regs->ARM_r0 = regs->ARM_ORIG_r0;
regs->ARM_pc = restart_addr;
break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->ARM_r0 = -EINTR;
+ break;
}
}
@@ -611,14 +626,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
* debugger has chosen to restart at a different PC.
*/
if (regs->ARM_pc == restart_addr) {
- if (retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK
+ if (retval == -ERESTARTNOHAND
|| (retval == -ERESTARTSYS
&& !(ka.sa.sa_flags & SA_RESTART))) {
regs->ARM_r0 = -EINTR;
regs->ARM_pc = continue_addr;
}
- clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
}
handle_signal(signr, &ka, &info, regs);
@@ -632,8 +645,29 @@ static void do_signal(struct pt_regs *regs, int syscall)
* ignore the restart.
*/
if (retval == -ERESTART_RESTARTBLOCK
- && regs->ARM_pc == restart_addr)
- set_thread_flag(TIF_SYSCALL_RESTARTSYS);
+ && regs->ARM_pc == continue_addr) {
+ if (thumb_mode(regs)) {
+ regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
+ regs->ARM_pc -= 2;
+ } else {
+#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
+ regs->ARM_r7 = __NR_restart_syscall;
+ regs->ARM_pc -= 4;
+#else
+ u32 __user *usp;
+
+ regs->ARM_sp -= 4;
+ usp = (u32 __user *)regs->ARM_sp;
+
+ if (put_user(regs->ARM_pc, usp) == 0) {
+ regs->ARM_pc = KERN_RESTART_CODE;
+ } else {
+ regs->ARM_sp += 4;
+ force_sigsegv(0, current);
+ }
+#endif
+ }
+ }
}
restore_saved_sigmask();
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 5ff067b7c752..6fcfe8398aa4 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,5 +8,7 @@
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
+#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
extern const unsigned long sigreturn_codes[7];
+extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 4928d89758f4..3647170e9a16 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,6 +820,8 @@ void __init early_trap_init(void *vectors_base)
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
+ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+ syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 43a31fb06318..36ff15bbfdd4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -183,7 +183,9 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 19505c0a3f01..c8050b14e615 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -29,12 +29,16 @@ comment "Atmel AT91 Processor"
config SOC_AT91SAM9
bool
select CPU_ARM926T
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
select AT91_SAM9_TIME
select AT91_SAM9_SMC
config SOC_AT91RM9200
bool "AT91RM9200"
select CPU_ARM920T
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
select GENERIC_CLOCKEVENTS
select HAVE_AT91_DBGU0
@@ -140,6 +144,8 @@ config ARCH_AT91SAM9G45
config ARCH_AT91X40
bool "AT91x40"
depends on !MMU
+ select MULTI_IRQ_HANDLER
+ select SPARSE_IRQ
select ARCH_USES_GETTIMEOFFSET
endchoice
diff --git a/arch/arm/mach-at91/Makefile.boot b/arch/arm/mach-at91/Makefile.boot
index 9e84fe4f2aaa..30bb7332e30b 100644
--- a/arch/arm/mach-at91/Makefile.boot
+++ b/arch/arm/mach-at91/Makefile.boot
@@ -15,7 +15,9 @@ endif
# Keep dtb files sorted alphabetically for each SoC
# sam9260
+dtb-$(CONFIG_MACH_AT91SAM_DT) += aks-cdu.dtb
dtb-$(CONFIG_MACH_AT91SAM_DT) += ethernut5.dtb
+dtb-$(CONFIG_MACH_AT91SAM_DT) += evk-pro3.dtb
dtb-$(CONFIG_MACH_AT91SAM_DT) += tny_a9260.dtb
dtb-$(CONFIG_MACH_AT91SAM_DT) += usb_a9260.dtb
# sam9263
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 26917687fc30..6f50c6722276 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -17,6 +17,7 @@
#include <asm/mach/map.h>
#include <asm/system_misc.h>
#include <mach/at91rm9200.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_st.h>
#include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index e6b7d0533dd7..01fb7325fecc 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -41,8 +41,8 @@ static struct resource usbh_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_UHP,
- .end = AT91RM9200_ID_UHP,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_UHP,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
@@ -94,8 +94,8 @@ static struct resource udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_UDP,
- .end = AT91RM9200_ID_UDP,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_UDP,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
@@ -145,8 +145,8 @@ static struct resource eth_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_EMAC,
- .end = AT91RM9200_ID_EMAC,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_EMAC,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
@@ -305,8 +305,8 @@ static struct resource mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_MCI,
- .end = AT91RM9200_ID_MCI,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_MCI,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
@@ -488,8 +488,8 @@ static struct resource twi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_TWI,
- .end = AT91RM9200_ID_TWI,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TWI,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
@@ -532,8 +532,8 @@ static struct resource spi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_SPI,
- .end = AT91RM9200_ID_SPI,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_SPI,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_SPI,
.flags = IORESOURCE_IRQ,
},
};
@@ -598,18 +598,18 @@ static struct resource tcb0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_TC0,
- .end = AT91RM9200_ID_TC0,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC0,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91RM9200_ID_TC1,
- .end = AT91RM9200_ID_TC1,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC1,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91RM9200_ID_TC2,
- .end = AT91RM9200_ID_TC2,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC2,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -628,18 +628,18 @@ static struct resource tcb1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_TC3,
- .end = AT91RM9200_ID_TC3,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC3,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC3,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91RM9200_ID_TC4,
- .end = AT91RM9200_ID_TC4,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC4,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC4,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91RM9200_ID_TC5,
- .end = AT91RM9200_ID_TC5,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_TC5,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_TC5,
.flags = IORESOURCE_IRQ,
},
};
@@ -673,8 +673,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -729,8 +729,8 @@ static struct resource ssc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_SSC0,
- .end = AT91RM9200_ID_SSC0,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_SSC0,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
@@ -771,8 +771,8 @@ static struct resource ssc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_SSC1,
- .end = AT91RM9200_ID_SSC1,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_SSC1,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
@@ -813,8 +813,8 @@ static struct resource ssc2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_SSC2,
- .end = AT91RM9200_ID_SSC2,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_SSC2,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_SSC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -897,8 +897,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -935,8 +935,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_US0,
- .end = AT91RM9200_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -984,8 +984,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_US1,
- .end = AT91RM9200_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1035,8 +1035,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_US2,
- .end = AT91RM9200_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
@@ -1078,8 +1078,8 @@ static struct resource uart3_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91RM9200_ID_US3,
- .end = AT91RM9200_ID_US3,
+ .start = NR_IRQS_LEGACY + AT91RM9200_ID_US3,
+ .end = NR_IRQS_LEGACY + AT91RM9200_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam9260.c b/arch/arm/mach-at91/at91sam9260.c
index 2b1e438ed878..30c7f26a4668 100644
--- a/arch/arm/mach-at91/at91sam9260.c
+++ b/arch/arm/mach-at91/at91sam9260.c
@@ -20,6 +20,7 @@
#include <mach/cpu.h>
#include <mach/at91_dbgu.h>
#include <mach/at91sam9260.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 0ded951f785a..7b9c2ba396ed 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -45,8 +45,8 @@ static struct resource usbh_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_UHP,
- .end = AT91SAM9260_ID_UHP,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_UHP,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
@@ -98,8 +98,8 @@ static struct resource udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_UDP,
- .end = AT91SAM9260_ID_UDP,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_UDP,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
@@ -149,8 +149,8 @@ static struct resource eth_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_EMAC,
- .end = AT91SAM9260_ID_EMAC,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_EMAC,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
@@ -223,8 +223,8 @@ static struct resource mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_MCI,
- .end = AT91SAM9260_ID_MCI,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_MCI,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
@@ -305,8 +305,8 @@ static struct resource mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_MCI,
- .end = AT91SAM9260_ID_MCI,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_MCI,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
@@ -496,8 +496,8 @@ static struct resource twi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_TWI,
- .end = AT91SAM9260_ID_TWI,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TWI,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
@@ -540,8 +540,8 @@ static struct resource spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_SPI0,
- .end = AT91SAM9260_ID_SPI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_SPI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -566,8 +566,8 @@ static struct resource spi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_SPI1,
- .end = AT91SAM9260_ID_SPI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_SPI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -652,18 +652,18 @@ static struct resource tcb0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_TC0,
- .end = AT91SAM9260_ID_TC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91SAM9260_ID_TC1,
- .end = AT91SAM9260_ID_TC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91SAM9260_ID_TC2,
- .end = AT91SAM9260_ID_TC2,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC2,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -682,18 +682,18 @@ static struct resource tcb1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_TC3,
- .end = AT91SAM9260_ID_TC3,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC3,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC3,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91SAM9260_ID_TC4,
- .end = AT91SAM9260_ID_TC4,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC4,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC4,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91SAM9260_ID_TC5,
- .end = AT91SAM9260_ID_TC5,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_TC5,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_TC5,
.flags = IORESOURCE_IRQ,
},
};
@@ -807,8 +807,8 @@ static struct resource ssc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_SSC,
- .end = AT91SAM9260_ID_SSC,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_SSC,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_SSC,
.flags = IORESOURCE_IRQ,
},
};
@@ -882,8 +882,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -920,8 +920,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US0,
- .end = AT91SAM9260_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -971,8 +971,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US1,
- .end = AT91SAM9260_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1014,8 +1014,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US2,
- .end = AT91SAM9260_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
@@ -1057,8 +1057,8 @@ static struct resource uart3_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US3,
- .end = AT91SAM9260_ID_US3,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US3,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
@@ -1100,8 +1100,8 @@ static struct resource uart4_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US4,
- .end = AT91SAM9260_ID_US4,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US4,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US4,
.flags = IORESOURCE_IRQ,
},
};
@@ -1138,8 +1138,8 @@ static struct resource uart5_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_US5,
- .end = AT91SAM9260_ID_US5,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_US5,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_US5,
.flags = IORESOURCE_IRQ,
},
};
@@ -1357,8 +1357,8 @@ static struct resource adc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9260_ID_ADC,
- .end = AT91SAM9260_ID_ADC,
+ .start = NR_IRQS_LEGACY + AT91SAM9260_ID_ADC,
+ .end = NR_IRQS_LEGACY + AT91SAM9260_ID_ADC,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam9261.c b/arch/arm/mach-at91/at91sam9261.c
index c77d503d09d1..f40762c5fede 100644
--- a/arch/arm/mach-at91/at91sam9261.c
+++ b/arch/arm/mach-at91/at91sam9261.c
@@ -19,6 +19,7 @@
#include <asm/system_misc.h>
#include <mach/cpu.h>
#include <mach/at91sam9261.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 9295e90b08ff..8df5c1bdff92 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -45,8 +45,8 @@ static struct resource usbh_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_UHP,
- .end = AT91SAM9261_ID_UHP,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_UHP,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
@@ -98,8 +98,8 @@ static struct resource udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_UDP,
- .end = AT91SAM9261_ID_UDP,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_UDP,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
@@ -148,8 +148,8 @@ static struct resource mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_MCI,
- .end = AT91SAM9261_ID_MCI,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_MCI,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
@@ -310,8 +310,8 @@ static struct resource twi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_TWI,
- .end = AT91SAM9261_ID_TWI,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_TWI,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
@@ -354,8 +354,8 @@ static struct resource spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_SPI0,
- .end = AT91SAM9261_ID_SPI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_SPI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -380,8 +380,8 @@ static struct resource spi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_SPI1,
- .end = AT91SAM9261_ID_SPI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_SPI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -468,8 +468,8 @@ static struct resource lcdc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_LCDC,
- .end = AT91SAM9261_ID_LCDC,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_LCDC,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
#if defined(CONFIG_FB_INTSRAM)
@@ -566,18 +566,18 @@ static struct resource tcb_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_TC0,
- .end = AT91SAM9261_ID_TC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_TC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91SAM9261_ID_TC1,
- .end = AT91SAM9261_ID_TC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_TC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91SAM9261_ID_TC2,
- .end = AT91SAM9261_ID_TC2,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_TC2,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -689,8 +689,8 @@ static struct resource ssc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_SSC0,
- .end = AT91SAM9261_ID_SSC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
@@ -731,8 +731,8 @@ static struct resource ssc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_SSC1,
- .end = AT91SAM9261_ID_SSC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
@@ -773,8 +773,8 @@ static struct resource ssc2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_SSC2,
- .end = AT91SAM9261_ID_SSC2,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC2,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_SSC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -857,8 +857,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -895,8 +895,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_US0,
- .end = AT91SAM9261_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -938,8 +938,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_US1,
- .end = AT91SAM9261_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -981,8 +981,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9261_ID_US2,
- .end = AT91SAM9261_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91SAM9261_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91SAM9261_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam9263.c b/arch/arm/mach-at91/at91sam9263.c
index ed91c7e9f7c2..84b38105231e 100644
--- a/arch/arm/mach-at91/at91sam9263.c
+++ b/arch/arm/mach-at91/at91sam9263.c
@@ -18,6 +18,7 @@
#include <asm/mach/map.h>
#include <asm/system_misc.h>
#include <mach/at91sam9263.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index 175e0009eaa9..eb6bbf86fb9f 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -44,8 +44,8 @@ static struct resource usbh_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_UHP,
- .end = AT91SAM9263_ID_UHP,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_UHP,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
@@ -104,8 +104,8 @@ static struct resource udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_UDP,
- .end = AT91SAM9263_ID_UDP,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_UDP,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
@@ -155,8 +155,8 @@ static struct resource eth_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_EMAC,
- .end = AT91SAM9263_ID_EMAC,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_EMAC,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
@@ -229,8 +229,8 @@ static struct resource mmc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_MCI0,
- .end = AT91SAM9263_ID_MCI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -254,8 +254,8 @@ static struct resource mmc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_MCI1,
- .end = AT91SAM9263_ID_MCI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_MCI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -567,8 +567,8 @@ static struct resource twi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_TWI,
- .end = AT91SAM9263_ID_TWI,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_TWI,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
@@ -611,8 +611,8 @@ static struct resource spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_SPI0,
- .end = AT91SAM9263_ID_SPI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -637,8 +637,8 @@ static struct resource spi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_SPI1,
- .end = AT91SAM9263_ID_SPI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -725,8 +725,8 @@ static struct resource ac97_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_AC97C,
- .end = AT91SAM9263_ID_AC97C,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_AC97C,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_AC97C,
.flags = IORESOURCE_IRQ,
},
};
@@ -776,8 +776,8 @@ static struct resource can_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_CAN,
- .end = AT91SAM9263_ID_CAN,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_CAN,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_CAN,
.flags = IORESOURCE_IRQ,
},
};
@@ -816,8 +816,8 @@ static struct resource lcdc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_LCDC,
- .end = AT91SAM9263_ID_LCDC,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_LCDC,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
};
@@ -883,8 +883,8 @@ struct resource isi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_ISI,
- .end = AT91SAM9263_ID_ISI,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_ISI,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_ISI,
.flags = IORESOURCE_IRQ,
},
};
@@ -940,8 +940,8 @@ static struct resource tcb_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_TCB,
- .end = AT91SAM9263_ID_TCB,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_TCB,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_TCB,
.flags = IORESOURCE_IRQ,
},
};
@@ -1108,8 +1108,8 @@ static struct resource pwm_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_PWMC,
- .end = AT91SAM9263_ID_PWMC,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_PWMC,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_PWMC,
.flags = IORESOURCE_IRQ,
},
};
@@ -1161,8 +1161,8 @@ static struct resource ssc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_SSC0,
- .end = AT91SAM9263_ID_SSC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
@@ -1203,8 +1203,8 @@ static struct resource ssc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_SSC1,
- .end = AT91SAM9263_ID_SSC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1284,8 +1284,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -1322,8 +1322,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_US0,
- .end = AT91SAM9263_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -1365,8 +1365,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_US1,
- .end = AT91SAM9263_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1408,8 +1408,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9263_ID_US2,
- .end = AT91SAM9263_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91SAM9263_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91SAM9263_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index a94758b42737..ffc0957d7623 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -137,7 +137,7 @@ static struct irqaction at91sam926x_pit_irq = {
.name = "at91_tick",
.flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = at91sam926x_pit_interrupt,
- .irq = AT91_ID_SYS,
+ .irq = NR_IRQS_LEGACY + AT91_ID_SYS,
};
static void at91sam926x_pit_reset(void)
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c
index 4792682d52b9..977127368a7d 100644
--- a/arch/arm/mach-at91/at91sam9g45.c
+++ b/arch/arm/mach-at91/at91sam9g45.c
@@ -18,6 +18,7 @@
#include <asm/mach/map.h>
#include <asm/system_misc.h>
#include <mach/at91sam9g45.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/cpu.h>
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 933fc9afe7d0..40fb79df2de0 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -53,8 +53,8 @@ static struct resource hdmac_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_DMA,
- .end = AT91SAM9G45_ID_DMA,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_DMA,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_DMA,
.flags = IORESOURCE_IRQ,
},
};
@@ -94,8 +94,8 @@ static struct resource usbh_ohci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_UHPHS,
- .end = AT91SAM9G45_ID_UHPHS,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS,
.flags = IORESOURCE_IRQ,
},
};
@@ -156,8 +156,8 @@ static struct resource usbh_ehci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_UHPHS,
- .end = AT91SAM9G45_ID_UHPHS,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UHPHS,
.flags = IORESOURCE_IRQ,
},
};
@@ -213,8 +213,8 @@ static struct resource usba_udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = AT91SAM9G45_ID_UDPHS,
- .end = AT91SAM9G45_ID_UDPHS,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_UDPHS,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_UDPHS,
.flags = IORESOURCE_IRQ,
},
};
@@ -296,8 +296,8 @@ static struct resource eth_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_EMAC,
- .end = AT91SAM9G45_ID_EMAC,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_EMAC,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
@@ -370,8 +370,8 @@ static struct resource mmc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_MCI0,
- .end = AT91SAM9G45_ID_MCI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -395,8 +395,8 @@ static struct resource mmc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_MCI1,
- .end = AT91SAM9G45_ID_MCI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_MCI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -645,8 +645,8 @@ static struct resource twi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TWI0,
- .end = AT91SAM9G45_ID_TWI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -665,8 +665,8 @@ static struct resource twi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TWI1,
- .end = AT91SAM9G45_ID_TWI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TWI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -720,8 +720,8 @@ static struct resource spi0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_SPI0,
- .end = AT91SAM9G45_ID_SPI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -746,8 +746,8 @@ static struct resource spi1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_SPI1,
- .end = AT91SAM9G45_ID_SPI1,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI1,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SPI1,
.flags = IORESOURCE_IRQ,
},
};
@@ -834,8 +834,8 @@ static struct resource ac97_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_AC97C,
- .end = AT91SAM9G45_ID_AC97C,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_AC97C,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_AC97C,
.flags = IORESOURCE_IRQ,
},
};
@@ -887,8 +887,8 @@ struct resource isi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_ISI,
- .end = AT91SAM9G45_ID_ISI,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_ISI,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_ISI,
.flags = IORESOURCE_IRQ,
},
};
@@ -979,8 +979,8 @@ static struct resource lcdc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_LCDC,
- .end = AT91SAM9G45_ID_LCDC,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_LCDC,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
};
@@ -1054,8 +1054,8 @@ static struct resource tcb0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TCB,
- .end = AT91SAM9G45_ID_TCB,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB,
.flags = IORESOURCE_IRQ,
},
};
@@ -1075,8 +1075,8 @@ static struct resource tcb1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TCB,
- .end = AT91SAM9G45_ID_TCB,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TCB,
.flags = IORESOURCE_IRQ,
},
};
@@ -1110,8 +1110,8 @@ static struct resource rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -1147,8 +1147,8 @@ static struct resource tsadcc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TSC,
- .end = AT91SAM9G45_ID_TSC,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC,
.flags = IORESOURCE_IRQ,
}
};
@@ -1197,8 +1197,8 @@ static struct resource adc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_TSC,
- .end = AT91SAM9G45_ID_TSC,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_TSC,
.flags = IORESOURCE_IRQ,
}
};
@@ -1400,8 +1400,8 @@ static struct resource pwm_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_PWMC,
- .end = AT91SAM9G45_ID_PWMC,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_PWMC,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_PWMC,
.flags = IORESOURCE_IRQ,
},
};
@@ -1453,8 +1453,8 @@ static struct resource ssc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_SSC0,
- .end = AT91SAM9G45_ID_SSC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
@@ -1495,8 +1495,8 @@ static struct resource ssc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_SSC1,
- .end = AT91SAM9G45_ID_SSC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1575,8 +1575,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -1613,8 +1613,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_US0,
- .end = AT91SAM9G45_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -1656,8 +1656,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_US1,
- .end = AT91SAM9G45_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1699,8 +1699,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_US2,
- .end = AT91SAM9G45_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
@@ -1742,8 +1742,8 @@ static struct resource uart3_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9G45_ID_US3,
- .end = AT91SAM9G45_ID_US3,
+ .start = NR_IRQS_LEGACY + AT91SAM9G45_ID_US3,
+ .end = NR_IRQS_LEGACY + AT91SAM9G45_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c
index e420085a57ef..72ce50a50de5 100644
--- a/arch/arm/mach-at91/at91sam9rl.c
+++ b/arch/arm/mach-at91/at91sam9rl.c
@@ -19,6 +19,7 @@
#include <mach/cpu.h>
#include <mach/at91_dbgu.h>
#include <mach/at91sam9rl.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index 9c0b1481a9a7..f09fff932172 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -41,8 +41,8 @@ static struct resource hdmac_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = AT91SAM9RL_ID_DMA,
- .end = AT91SAM9RL_ID_DMA,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_DMA,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_DMA,
.flags = IORESOURCE_IRQ,
},
};
@@ -84,8 +84,8 @@ static struct resource usba_udc_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = AT91SAM9RL_ID_UDPHS,
- .end = AT91SAM9RL_ID_UDPHS,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_UDPHS,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_UDPHS,
.flags = IORESOURCE_IRQ,
},
};
@@ -172,8 +172,8 @@ static struct resource mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_MCI,
- .end = AT91SAM9RL_ID_MCI,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_MCI,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
@@ -339,8 +339,8 @@ static struct resource twi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_TWI0,
- .end = AT91SAM9RL_ID_TWI0,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_TWI0,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_TWI0,
.flags = IORESOURCE_IRQ,
},
};
@@ -383,8 +383,8 @@ static struct resource spi_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_SPI,
- .end = AT91SAM9RL_ID_SPI,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_SPI,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_SPI,
.flags = IORESOURCE_IRQ,
},
};
@@ -452,8 +452,8 @@ static struct resource ac97_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_AC97C,
- .end = AT91SAM9RL_ID_AC97C,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_AC97C,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_AC97C,
.flags = IORESOURCE_IRQ,
},
};
@@ -507,8 +507,8 @@ static struct resource lcdc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_LCDC,
- .end = AT91SAM9RL_ID_LCDC,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_LCDC,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_LCDC,
.flags = IORESOURCE_IRQ,
},
};
@@ -574,18 +574,18 @@ static struct resource tcb_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_TC0,
- .end = AT91SAM9RL_ID_TC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
- .start = AT91SAM9RL_ID_TC1,
- .end = AT91SAM9RL_ID_TC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
- .start = AT91SAM9RL_ID_TC2,
- .end = AT91SAM9RL_ID_TC2,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC2,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
@@ -621,8 +621,8 @@ static struct resource tsadcc_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_TSC,
- .end = AT91SAM9RL_ID_TSC,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_TSC,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_TSC,
.flags = IORESOURCE_IRQ,
}
};
@@ -768,8 +768,8 @@ static struct resource pwm_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_PWMC,
- .end = AT91SAM9RL_ID_PWMC,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_PWMC,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_PWMC,
.flags = IORESOURCE_IRQ,
},
};
@@ -821,8 +821,8 @@ static struct resource ssc0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_SSC0,
- .end = AT91SAM9RL_ID_SSC0,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_SSC0,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
@@ -863,8 +863,8 @@ static struct resource ssc1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_SSC1,
- .end = AT91SAM9RL_ID_SSC1,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_SSC1,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
@@ -943,8 +943,8 @@ static struct resource dbgu_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91_ID_SYS,
- .end = AT91_ID_SYS,
+ .start = NR_IRQS_LEGACY + AT91_ID_SYS,
+ .end = NR_IRQS_LEGACY + AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
@@ -981,8 +981,8 @@ static struct resource uart0_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_US0,
- .end = AT91SAM9RL_ID_US0,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_US0,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
@@ -1032,8 +1032,8 @@ static struct resource uart1_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_US1,
- .end = AT91SAM9RL_ID_US1,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_US1,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
@@ -1075,8 +1075,8 @@ static struct resource uart2_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_US2,
- .end = AT91SAM9RL_ID_US2,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_US2,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
@@ -1118,8 +1118,8 @@ static struct resource uart3_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AT91SAM9RL_ID_US3,
- .end = AT91SAM9RL_ID_US3,
+ .start = NR_IRQS_LEGACY + AT91SAM9RL_ID_US3,
+ .end = NR_IRQS_LEGACY + AT91SAM9RL_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 1b144b4d3ce1..477cf9d06672 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -312,8 +312,6 @@ static void __init at91sam9x5_map_io(void)
void __init at91sam9x5_initialize(void)
{
- at91_extern_irq = (1 << AT91SAM9X5_ID_IRQ0);
-
/* Register GPIO subsystem (using DT) */
at91_gpio_init(NULL, 0);
}
@@ -321,47 +319,9 @@ void __init at91sam9x5_initialize(void)
/* --------------------------------------------------------------------
* Interrupt initialization
* -------------------------------------------------------------------- */
-/*
- * The default interrupt priority levels (0 = lowest, 7 = highest).
- */
-static unsigned int at91sam9x5_default_irq_priority[NR_AIC_IRQS] __initdata = {
- 7, /* Advanced Interrupt Controller (FIQ) */
- 7, /* System Peripherals */
- 1, /* Parallel IO Controller A and B */
- 1, /* Parallel IO Controller C and D */
- 4, /* Soft Modem */
- 5, /* USART 0 */
- 5, /* USART 1 */
- 5, /* USART 2 */
- 5, /* USART 3 */
- 6, /* Two-Wire Interface 0 */
- 6, /* Two-Wire Interface 1 */
- 6, /* Two-Wire Interface 2 */
- 0, /* Multimedia Card Interface 0 */
- 5, /* Serial Peripheral Interface 0 */
- 5, /* Serial Peripheral Interface 1 */
- 5, /* UART 0 */
- 5, /* UART 1 */
- 0, /* Timer Counter 0, 1, 2, 3, 4 and 5 */
- 0, /* Pulse Width Modulation Controller */
- 0, /* ADC Controller */
- 0, /* DMA Controller 0 */
- 0, /* DMA Controller 1 */
- 2, /* USB Host High Speed port */
- 2, /* USB Device High speed port */
- 3, /* Ethernet MAC 0 */
- 3, /* LDC Controller or Image Sensor Interface */
- 0, /* Multimedia Card Interface 1 */
- 3, /* Ethernet MAC 1 */
- 4, /* Synchronous Serial Interface */
- 4, /* CAN Controller 0 */
- 4, /* CAN Controller 1 */
- 0, /* Advanced Interrupt Controller (IRQ0) */
-};
struct at91_init_soc __initdata at91sam9x5_soc = {
.map_io = at91sam9x5_map_io,
- .default_irq_priority = at91sam9x5_default_irq_priority,
.register_clocks = at91sam9x5_register_clocks,
.init = at91sam9x5_initialize,
};
diff --git a/arch/arm/mach-at91/at91x40.c b/arch/arm/mach-at91/at91x40.c
index d62fe090d814..46090e642d8e 100644
--- a/arch/arm/mach-at91/at91x40.c
+++ b/arch/arm/mach-at91/at91x40.c
@@ -13,10 +13,12 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/io.h>
#include <asm/proc-fns.h>
#include <asm/system_misc.h>
#include <asm/mach/arch.h>
#include <mach/at91x40.h>
+#include <mach/at91_aic.h>
#include <mach/at91_st.h>
#include <mach/timex.h>
#include "generic.h"
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 271f994314a4..22d8856094f1 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -36,6 +36,7 @@
#include <mach/board.h>
#include <mach/cpu.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -91,6 +92,7 @@ MACHINE_START(ONEARM, "Ajeco 1ARM single board computer")
/* Maintainer: Lennert Buytenhek <buytenh@wantstofly.org> */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = onearm_init_early,
.init_irq = at91_init_irq_default,
.init_machine = onearm_board_init,
diff --git a/arch/arm/mach-at91/board-afeb-9260v1.c b/arch/arm/mach-at91/board-afeb-9260v1.c
index b7d8aa7b81e6..de7be1931817 100644
--- a/arch/arm/mach-at91/board-afeb-9260v1.c
+++ b/arch/arm/mach-at91/board-afeb-9260v1.c
@@ -44,6 +44,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -212,6 +213,7 @@ MACHINE_START(AFEB9260, "Custom afeb9260 board")
/* Maintainer: Sergey Lapin <slapin@ossfans.org> */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = afeb9260_init_early,
.init_irq = at91_init_irq_default,
.init_machine = afeb9260_board_init,
diff --git a/arch/arm/mach-at91/board-cam60.c b/arch/arm/mach-at91/board-cam60.c
index 29d3ef0a50fb..477e708497bc 100644
--- a/arch/arm/mach-at91/board-cam60.c
+++ b/arch/arm/mach-at91/board-cam60.c
@@ -39,6 +39,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -188,6 +189,7 @@ MACHINE_START(CAM60, "KwikByte CAM60")
/* Maintainer: KwikByte */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = cam60_init_early,
.init_irq = at91_init_irq_default,
.init_machine = cam60_board_init,
diff --git a/arch/arm/mach-at91/board-carmeva.c b/arch/arm/mach-at91/board-carmeva.c
index 44328a6d4609..a5b002f32a61 100644
--- a/arch/arm/mach-at91/board-carmeva.c
+++ b/arch/arm/mach-at91/board-carmeva.c
@@ -36,6 +36,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -158,6 +159,7 @@ MACHINE_START(CARMEVA, "Carmeva")
/* Maintainer: Conitec Datasystems */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = carmeva_init_early,
.init_irq = at91_init_irq_default,
.init_machine = carmeva_board_init,
diff --git a/arch/arm/mach-at91/board-cpu9krea.c b/arch/arm/mach-at91/board-cpu9krea.c
index 69951ec7dbf3..ecbc13b594de 100644
--- a/arch/arm/mach-at91/board-cpu9krea.c
+++ b/arch/arm/mach-at91/board-cpu9krea.c
@@ -41,6 +41,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91sam9260_matrix.h>
#include <mach/at91_matrix.h>
@@ -376,6 +377,7 @@ MACHINE_START(CPUAT9G20, "Eukrea CPU9G20")
/* Maintainer: Eric Benard - EUKREA Electromatique */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = cpu9krea_init_early,
.init_irq = at91_init_irq_default,
.init_machine = cpu9krea_board_init,
diff --git a/arch/arm/mach-at91/board-cpuat91.c b/arch/arm/mach-at91/board-cpuat91.c
index 895cf2dba612..2e6d043c82f2 100644
--- a/arch/arm/mach-at91/board-cpuat91.c
+++ b/arch/arm/mach-at91/board-cpuat91.c
@@ -37,6 +37,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
#include <mach/cpu.h>
@@ -178,6 +179,7 @@ MACHINE_START(CPUAT91, "Eukrea")
/* Maintainer: Eric Benard - EUKREA Electromatique */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = cpuat91_init_early,
.init_irq = at91_init_irq_default,
.init_machine = cpuat91_board_init,
diff --git a/arch/arm/mach-at91/board-csb337.c b/arch/arm/mach-at91/board-csb337.c
index cd813361cd26..462bc319cbc5 100644
--- a/arch/arm/mach-at91/board-csb337.c
+++ b/arch/arm/mach-at91/board-csb337.c
@@ -39,6 +39,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -252,6 +253,7 @@ MACHINE_START(CSB337, "Cogent CSB337")
/* Maintainer: Bill Gatliff */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = csb337_init_early,
.init_irq = at91_init_irq_default,
.init_machine = csb337_board_init,
diff --git a/arch/arm/mach-at91/board-csb637.c b/arch/arm/mach-at91/board-csb637.c
index 7c8b05a57d7f..872871ab1160 100644
--- a/arch/arm/mach-at91/board-csb637.c
+++ b/arch/arm/mach-at91/board-csb637.c
@@ -36,6 +36,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -133,6 +134,7 @@ MACHINE_START(CSB637, "Cogent CSB637")
/* Maintainer: Bill Gatliff */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = csb637_init_early,
.init_irq = at91_init_irq_default,
.init_machine = csb637_board_init,
diff --git a/arch/arm/mach-at91/board-dt.c b/arch/arm/mach-at91/board-dt.c
index a1fce05aa7a5..e8f45c4e0ea8 100644
--- a/arch/arm/mach-at91/board-dt.c
+++ b/arch/arm/mach-at91/board-dt.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -53,6 +54,7 @@ DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = at91_dt_initialize,
.init_irq = at91_dt_init_irq,
.init_machine = at91_dt_device_init,
diff --git a/arch/arm/mach-at91/board-eb01.c b/arch/arm/mach-at91/board-eb01.c
index d2023f27c652..01f66e99ece7 100644
--- a/arch/arm/mach-at91/board-eb01.c
+++ b/arch/arm/mach-at91/board-eb01.c
@@ -28,6 +28,7 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
static void __init at91eb01_init_irq(void)
@@ -43,6 +44,7 @@ static void __init at91eb01_init_early(void)
MACHINE_START(AT91EB01, "Atmel AT91 EB01")
/* Maintainer: Greg Ungerer <gerg@snapgear.com> */
.timer = &at91x40_timer,
+ .handle_irq = at91_aic_handle_irq,
.init_early = at91eb01_init_early,
.init_irq = at91eb01_init_irq,
MACHINE_END
diff --git a/arch/arm/mach-at91/board-eb9200.c b/arch/arm/mach-at91/board-eb9200.c
index bd1017297989..d1e1f3fc0a47 100644
--- a/arch/arm/mach-at91/board-eb9200.c
+++ b/arch/arm/mach-at91/board-eb9200.c
@@ -36,6 +36,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -118,6 +119,7 @@ static void __init eb9200_board_init(void)
MACHINE_START(ATEB9200, "Embest ATEB9200")
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = eb9200_init_early,
.init_irq = at91_init_irq_default,
.init_machine = eb9200_board_init,
diff --git a/arch/arm/mach-at91/board-ecbat91.c b/arch/arm/mach-at91/board-ecbat91.c
index 89cc3726a9ce..9c24cb25707c 100644
--- a/arch/arm/mach-at91/board-ecbat91.c
+++ b/arch/arm/mach-at91/board-ecbat91.c
@@ -39,6 +39,7 @@
#include <mach/board.h>
#include <mach/cpu.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -170,6 +171,7 @@ MACHINE_START(ECBAT91, "emQbit's ECB_AT91")
/* Maintainer: emQbit.com */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ecb_at91init_early,
.init_irq = at91_init_irq_default,
.init_machine = ecb_at91board_init,
diff --git a/arch/arm/mach-at91/board-eco920.c b/arch/arm/mach-at91/board-eco920.c
index 558546cf63f4..82bdfde3405f 100644
--- a/arch/arm/mach-at91/board-eco920.c
+++ b/arch/arm/mach-at91/board-eco920.c
@@ -25,6 +25,7 @@
#include <asm/mach/map.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
#include <mach/cpu.h>
@@ -132,6 +133,7 @@ MACHINE_START(ECO920, "eco920")
/* Maintainer: Sascha Hauer */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = eco920_init_early,
.init_irq = at91_init_irq_default,
.init_machine = eco920_board_init,
diff --git a/arch/arm/mach-at91/board-flexibity.c b/arch/arm/mach-at91/board-flexibity.c
index 47658f78105d..6cc83a87d77c 100644
--- a/arch/arm/mach-at91/board-flexibity.c
+++ b/arch/arm/mach-at91/board-flexibity.c
@@ -34,6 +34,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include "generic.h"
@@ -160,6 +161,7 @@ MACHINE_START(FLEXIBITY, "Flexibity Connect")
/* Maintainer: Maxim Osipov */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = flexibity_init_early,
.init_irq = at91_init_irq_default,
.init_machine = flexibity_board_init,
diff --git a/arch/arm/mach-at91/board-foxg20.c b/arch/arm/mach-at91/board-foxg20.c
index 33411e6ecb1f..69ab1247ef81 100644
--- a/arch/arm/mach-at91/board-foxg20.c
+++ b/arch/arm/mach-at91/board-foxg20.c
@@ -42,6 +42,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -262,6 +263,7 @@ MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20")
/* Maintainer: Sergio Tanzilli */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = foxg20_init_early,
.init_irq = at91_init_irq_default,
.init_machine = foxg20_board_init,
diff --git a/arch/arm/mach-at91/board-gsia18s.c b/arch/arm/mach-at91/board-gsia18s.c
index 3e0dfa643a86..a9d5e78118c5 100644
--- a/arch/arm/mach-at91/board-gsia18s.c
+++ b/arch/arm/mach-at91/board-gsia18s.c
@@ -31,6 +31,7 @@
#include <asm/mach/arch.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/gsia18s.h>
#include <mach/stamp9g20.h>
@@ -575,6 +576,7 @@ static void __init gsia18s_board_init(void)
MACHINE_START(GSIA18S, "GS_IA18_S")
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = gsia18s_init_early,
.init_irq = at91_init_irq_default,
.init_machine = gsia18s_board_init,
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index f260657f32bc..64c1dbf88a07 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -35,6 +35,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/cpu.h>
#include "generic.h"
@@ -93,6 +94,7 @@ MACHINE_START(KAFA, "Sperry-Sun KAFA")
/* Maintainer: Sergei Sharonov */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = kafa_init_early,
.init_irq = at91_init_irq_default,
.init_machine = kafa_board_init,
diff --git a/arch/arm/mach-at91/board-kb9202.c b/arch/arm/mach-at91/board-kb9202.c
index ba39db5482b9..5d96cb85175f 100644
--- a/arch/arm/mach-at91/board-kb9202.c
+++ b/arch/arm/mach-at91/board-kb9202.c
@@ -37,6 +37,7 @@
#include <mach/board.h>
#include <mach/cpu.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
@@ -133,6 +134,7 @@ MACHINE_START(KB9200, "KB920x")
/* Maintainer: KwikByte, Inc. */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = kb9202_init_early,
.init_irq = at91_init_irq_default,
.init_machine = kb9202_board_init,
diff --git a/arch/arm/mach-at91/board-neocore926.c b/arch/arm/mach-at91/board-neocore926.c
index d2f4cc161766..18103c5d993c 100644
--- a/arch/arm/mach-at91/board-neocore926.c
+++ b/arch/arm/mach-at91/board-neocore926.c
@@ -45,6 +45,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -378,6 +379,7 @@ MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926")
/* Maintainer: ADENEO */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = neocore926_init_early,
.init_irq = at91_init_irq_default,
.init_machine = neocore926_board_init,
diff --git a/arch/arm/mach-at91/board-pcontrol-g20.c b/arch/arm/mach-at91/board-pcontrol-g20.c
index 7fe638342421..9ca3e32c54cb 100644
--- a/arch/arm/mach-at91/board-pcontrol-g20.c
+++ b/arch/arm/mach-at91/board-pcontrol-g20.c
@@ -30,6 +30,7 @@
#include <asm/mach/arch.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/stamp9g20.h>
@@ -218,6 +219,7 @@ MACHINE_START(PCONTROL_G20, "PControl G20")
/* Maintainer: pgsellmann@portner-elektronik.at */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = pcontrol_g20_init_early,
.init_irq = at91_init_irq_default,
.init_machine = pcontrol_g20_board_init,
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index b45c0a5d5ca7..127065504508 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -38,6 +38,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
@@ -120,6 +121,7 @@ MACHINE_START(PICOTUX2XX, "picotux 200")
/* Maintainer: Kleinhenz Elektronik GmbH */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = picotux200_init_early,
.init_irq = at91_init_irq_default,
.init_machine = picotux200_board_init,
diff --git a/arch/arm/mach-at91/board-qil-a9260.c b/arch/arm/mach-at91/board-qil-a9260.c
index 0c61bf0d272c..bf351e285422 100644
--- a/arch/arm/mach-at91/board-qil-a9260.c
+++ b/arch/arm/mach-at91/board-qil-a9260.c
@@ -41,6 +41,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
@@ -258,6 +259,7 @@ MACHINE_START(QIL_A9260, "CALAO QIL_A9260")
/* Maintainer: calao-systems */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-rm9200dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index afd7a4713766..cc2bf9796073 100644
--- a/arch/arm/mach-at91/board-rm9200dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -40,6 +40,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
@@ -223,6 +224,7 @@ MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
/* Maintainer: SAN People/Atmel */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = dk_init_early,
.init_irq = at91_init_irq_default,
.init_machine = dk_board_init,
diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 2b15b8adec4c..62e19e64c9d3 100644
--- a/arch/arm/mach-at91/board-rm9200ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -40,6 +40,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
@@ -190,6 +191,7 @@ MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
/* Maintainer: SAN People/Atmel */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-rsi-ews.c b/arch/arm/mach-at91/board-rsi-ews.c
index 24ab9be7510f..c3b43aefdb75 100644
--- a/arch/arm/mach-at91/board-rsi-ews.c
+++ b/arch/arm/mach-at91/board-rsi-ews.c
@@ -26,6 +26,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <linux/gpio.h>
@@ -225,6 +226,7 @@ MACHINE_START(RSI_EWS, "RSI EWS")
/* Maintainer: Josef Holzmayr <holzmayr@rsi-elektrotechnik.de> */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = rsi_ews_init_early,
.init_irq = at91_init_irq_default,
.init_machine = rsi_ews_board_init,
diff --git a/arch/arm/mach-at91/board-sam9-l9260.c b/arch/arm/mach-at91/board-sam9-l9260.c
index cdd21f2595d2..7bf6da70d7d5 100644
--- a/arch/arm/mach-at91/board-sam9-l9260.c
+++ b/arch/arm/mach-at91/board-sam9-l9260.c
@@ -38,6 +38,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -202,6 +203,7 @@ MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260")
/* Maintainer: Olimex */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c
index 7b3c3913551a..889c1bf71eb5 100644
--- a/arch/arm/mach-at91/board-sam9260ek.c
+++ b/arch/arm/mach-at91/board-sam9260ek.c
@@ -42,6 +42,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include <mach/system_rev.h>
@@ -344,6 +345,7 @@ MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c
index 2736453821b0..2269be5fa384 100644
--- a/arch/arm/mach-at91/board-sam9261ek.c
+++ b/arch/arm/mach-at91/board-sam9261ek.c
@@ -46,6 +46,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include <mach/system_rev.h>
@@ -615,6 +616,7 @@ MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c
index 983cb98d2465..82adf581afc2 100644
--- a/arch/arm/mach-at91/board-sam9263ek.c
+++ b/arch/arm/mach-at91/board-sam9263ek.c
@@ -45,6 +45,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include <mach/system_rev.h>
@@ -443,6 +444,7 @@ MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c
index 6860d3451100..4ea4ee00364b 100644
--- a/arch/arm/mach-at91/board-sam9g20ek.c
+++ b/arch/arm/mach-at91/board-sam9g20ek.c
@@ -44,6 +44,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/system_rev.h>
@@ -413,6 +414,7 @@ MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
@@ -422,6 +424,7 @@ MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c
index 63163dc7df46..3d48ec154685 100644
--- a/arch/arm/mach-at91/board-sam9m10g45ek.c
+++ b/arch/arm/mach-at91/board-sam9m10g45ek.c
@@ -43,6 +43,7 @@
#include <asm/mach/irq.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
#include <mach/system_rev.h>
@@ -503,6 +504,7 @@ MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-sam9rlek.c b/arch/arm/mach-at91/board-sam9rlek.c
index be3239f13daa..e7dc3ead7045 100644
--- a/arch/arm/mach-at91/board-sam9rlek.c
+++ b/arch/arm/mach-at91/board-sam9rlek.c
@@ -31,6 +31,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
@@ -319,6 +320,7 @@ MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK")
/* Maintainer: Atmel */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-snapper9260.c b/arch/arm/mach-at91/board-snapper9260.c
index 9d446f1bb45f..a4e031a039fd 100644
--- a/arch/arm/mach-at91/board-snapper9260.c
+++ b/arch/arm/mach-at91/board-snapper9260.c
@@ -33,6 +33,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -178,6 +179,7 @@ static void __init snapper9260_board_init(void)
MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module")
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = snapper9260_init_early,
.init_irq = at91_init_irq_default,
.init_machine = snapper9260_board_init,
diff --git a/arch/arm/mach-at91/board-stamp9g20.c b/arch/arm/mach-at91/board-stamp9g20.c
index ee86f9d7ee72..29eae1626bf7 100644
--- a/arch/arm/mach-at91/board-stamp9g20.c
+++ b/arch/arm/mach-at91/board-stamp9g20.c
@@ -26,6 +26,7 @@
#include <asm/mach/arch.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include "sam9_smc.h"
@@ -287,6 +288,7 @@ MACHINE_START(PORTUXG20, "taskit PortuxG20")
/* Maintainer: taskit GmbH */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = stamp9g20_init_early,
.init_irq = at91_init_irq_default,
.init_machine = portuxg20_board_init,
@@ -296,6 +298,7 @@ MACHINE_START(STAMP9G20, "taskit Stamp9G20")
/* Maintainer: taskit GmbH */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = stamp9g20_init_early,
.init_irq = at91_init_irq_default,
.init_machine = stamp9g20evb_board_init,
diff --git a/arch/arm/mach-at91/board-usb-a926x.c b/arch/arm/mach-at91/board-usb-a926x.c
index 95393fcaf199..c1476b9fe7b9 100644
--- a/arch/arm/mach-at91/board-usb-a926x.c
+++ b/arch/arm/mach-at91/board-usb-a926x.c
@@ -42,6 +42,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91sam9_smc.h>
#include <mach/at91_shdwc.h>
@@ -358,6 +359,7 @@ MACHINE_START(USB_A9263, "CALAO USB_A9263")
/* Maintainer: calao-systems */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
@@ -367,6 +369,7 @@ MACHINE_START(USB_A9260, "CALAO USB_A9260")
/* Maintainer: calao-systems */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
@@ -376,6 +379,7 @@ MACHINE_START(USB_A9G20, "CALAO USB_A92G0")
/* Maintainer: Jean-Christophe PLAGNIOL-VILLARD */
.timer = &at91sam926x_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = ek_init_early,
.init_irq = at91_init_irq_default,
.init_machine = ek_board_init,
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index d56665ea4b55..516d340549d8 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -44,6 +44,7 @@
#include <mach/hardware.h>
#include <mach/board.h>
+#include <mach/at91_aic.h>
#include <mach/at91rm9200_mc.h>
#include <mach/at91_ramc.h>
#include <mach/cpu.h>
@@ -590,6 +591,7 @@ MACHINE_START(YL9200, "uCdragon YL-9200")
/* Maintainer: S.Birtles */
.timer = &at91rm9200_timer,
.map_io = at91_map_io,
+ .handle_irq = at91_aic_handle_irq,
.init_early = yl9200_init_early,
.init_irq = at91_init_irq_default,
.init_machine = yl9200_board_init,
diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h
index 0a60bf837037..f49650677653 100644
--- a/arch/arm/mach-at91/generic.h
+++ b/arch/arm/mach-at91/generic.h
@@ -29,6 +29,8 @@ extern void __init at91x40_init_interrupts(unsigned int priority[]);
extern void __init at91_aic_init(unsigned int priority[]);
extern int __init at91_aic_of_init(struct device_node *node,
struct device_node *parent);
+extern int __init at91_aic5_of_init(struct device_node *node,
+ struct device_node *parent);
/* Timer */
diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
index 325837a264c9..be42cf0e74bd 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -26,6 +26,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <asm/mach/irq.h>
+
#include <mach/hardware.h>
#include <mach/at91_pio.h>
@@ -585,15 +587,14 @@ static struct irq_chip gpio_irqchip = {
static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_data *idata = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_irq_chip(idata);
struct at91_gpio_chip *at91_gpio = irq_data_get_irq_chip_data(idata);
void __iomem *pio = at91_gpio->regbase;
unsigned long isr;
int n;
- /* temporarily mask (level sensitive) parent IRQ */
- chip->irq_ack(idata);
+ chained_irq_enter(chip, desc);
for (;;) {
/* Reading ISR acks pending (edge triggered) GPIO interrupts.
* When there none are pending, we're finished unless we need
@@ -614,7 +615,7 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
n = find_next_bit(&isr, BITS_PER_LONG, n + 1);
}
}
- chip->irq_unmask(idata);
+ chained_irq_exit(chip, desc);
/* now it may re-trigger */
}
diff --git a/arch/arm/mach-at91/include/mach/at91_aic.h b/arch/arm/mach-at91/include/mach/at91_aic.h
index 3045781c473f..eaea66197fa1 100644
--- a/arch/arm/mach-at91/include/mach/at91_aic.h
+++ b/arch/arm/mach-at91/include/mach/at91_aic.h
@@ -23,12 +23,23 @@ extern void __iomem *at91_aic_base;
__raw_readl(at91_aic_base + field)
#define at91_aic_write(field, value) \
- __raw_writel(value, at91_aic_base + field);
+ __raw_writel(value, at91_aic_base + field)
#else
.extern at91_aic_base
#endif
+/* Number of irq lines managed by AIC */
+#define NR_AIC_IRQS 32
+#define NR_AIC5_IRQS 128
+
+#define AT91_AIC5_SSR 0x0 /* Source Select Register [AIC5] */
+#define AT91_AIC5_INTSEL_MSK (0x7f << 0) /* Interrupt Line Selection Mask */
+
+#define AT91_AIC_IRQ_MIN_PRIORITY 0
+#define AT91_AIC_IRQ_MAX_PRIORITY 7
+
#define AT91_AIC_SMR(n) ((n) * 4) /* Source Mode Registers 0-31 */
+#define AT91_AIC5_SMR 0x4 /* Source Mode Register [AIC5] */
#define AT91_AIC_PRIOR (7 << 0) /* Priority Level */
#define AT91_AIC_SRCTYPE (3 << 5) /* Interrupt Source Type */
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
@@ -37,29 +48,52 @@ extern void __iomem *at91_aic_base;
#define AT91_AIC_SRCTYPE_RISING (3 << 5)
#define AT91_AIC_SVR(n) (0x80 + ((n) * 4)) /* Source Vector Registers 0-31 */
+#define AT91_AIC5_SVR 0x8 /* Source Vector Register [AIC5] */
#define AT91_AIC_IVR 0x100 /* Interrupt Vector Register */
+#define AT91_AIC5_IVR 0x10 /* Interrupt Vector Register [AIC5] */
#define AT91_AIC_FVR 0x104 /* Fast Interrupt Vector Register */
+#define AT91_AIC5_FVR 0x14 /* Fast Interrupt Vector Register [AIC5] */
#define AT91_AIC_ISR 0x108 /* Interrupt Status Register */
+#define AT91_AIC5_ISR 0x18 /* Interrupt Status Register [AIC5] */
#define AT91_AIC_IRQID (0x1f << 0) /* Current Interrupt Identifier */
#define AT91_AIC_IPR 0x10c /* Interrupt Pending Register */
+#define AT91_AIC5_IPR0 0x20 /* Interrupt Pending Register 0 [AIC5] */
+#define AT91_AIC5_IPR1 0x24 /* Interrupt Pending Register 1 [AIC5] */
+#define AT91_AIC5_IPR2 0x28 /* Interrupt Pending Register 2 [AIC5] */
+#define AT91_AIC5_IPR3 0x2c /* Interrupt Pending Register 3 [AIC5] */
#define AT91_AIC_IMR 0x110 /* Interrupt Mask Register */
+#define AT91_AIC5_IMR 0x30 /* Interrupt Mask Register [AIC5] */
#define AT91_AIC_CISR 0x114 /* Core Interrupt Status Register */
+#define AT91_AIC5_CISR 0x34 /* Core Interrupt Status Register [AIC5] */
#define AT91_AIC_NFIQ (1 << 0) /* nFIQ Status */
#define AT91_AIC_NIRQ (1 << 1) /* nIRQ Status */
#define AT91_AIC_IECR 0x120 /* Interrupt Enable Command Register */
+#define AT91_AIC5_IECR 0x40 /* Interrupt Enable Command Register [AIC5] */
#define AT91_AIC_IDCR 0x124 /* Interrupt Disable Command Register */
+#define AT91_AIC5_IDCR 0x44 /* Interrupt Disable Command Register [AIC5] */
#define AT91_AIC_ICCR 0x128 /* Interrupt Clear Command Register */
+#define AT91_AIC5_ICCR 0x48 /* Interrupt Clear Command Register [AIC5] */
#define AT91_AIC_ISCR 0x12c /* Interrupt Set Command Register */
+#define AT91_AIC5_ISCR 0x4c /* Interrupt Set Command Register [AIC5] */
#define AT91_AIC_EOICR 0x130 /* End of Interrupt Command Register */
+#define AT91_AIC5_EOICR 0x38 /* End of Interrupt Command Register [AIC5] */
#define AT91_AIC_SPU 0x134 /* Spurious Interrupt Vector Register */
+#define AT91_AIC5_SPU 0x3c /* Spurious Interrupt Vector Register [AIC5] */
#define AT91_AIC_DCR 0x138 /* Debug Control Register */
+#define AT91_AIC5_DCR 0x6c /* Debug Control Register [AIC5] */
#define AT91_AIC_DCR_PROT (1 << 0) /* Protection Mode */
#define AT91_AIC_DCR_GMSK (1 << 1) /* General Mask */
#define AT91_AIC_FFER 0x140 /* Fast Forcing Enable Register [SAM9 only] */
+#define AT91_AIC5_FFER 0x50 /* Fast Forcing Enable Register [AIC5] */
#define AT91_AIC_FFDR 0x144 /* Fast Forcing Disable Register [SAM9 only] */
+#define AT91_AIC5_FFDR 0x54 /* Fast Forcing Disable Register [AIC5] */
#define AT91_AIC_FFSR 0x148 /* Fast Forcing Status Register [SAM9 only] */
+#define AT91_AIC5_FFSR 0x58 /* Fast Forcing Status Register [AIC5] */
+
+void at91_aic_handle_irq(struct pt_regs *regs);
+void at91_aic5_handle_irq(struct pt_regs *regs);
#endif
diff --git a/arch/arm/mach-at91/include/mach/at91_spi.h b/arch/arm/mach-at91/include/mach/at91_spi.h
deleted file mode 100644
index 2f6ba0c5636e..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_spi.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_spi.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * Serial Peripheral Interface (SPI) registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_SPI_H
-#define AT91_SPI_H
-
-#define AT91_SPI_CR 0x00 /* Control Register */
-#define AT91_SPI_SPIEN (1 << 0) /* SPI Enable */
-#define AT91_SPI_SPIDIS (1 << 1) /* SPI Disable */
-#define AT91_SPI_SWRST (1 << 7) /* SPI Software Reset */
-#define AT91_SPI_LASTXFER (1 << 24) /* Last Transfer [SAM9261 only] */
-
-#define AT91_SPI_MR 0x04 /* Mode Register */
-#define AT91_SPI_MSTR (1 << 0) /* Master/Slave Mode */
-#define AT91_SPI_PS (1 << 1) /* Peripheral Select */
-#define AT91_SPI_PS_FIXED (0 << 1)
-#define AT91_SPI_PS_VARIABLE (1 << 1)
-#define AT91_SPI_PCSDEC (1 << 2) /* Chip Select Decode */
-#define AT91_SPI_DIV32 (1 << 3) /* Clock Selection [AT91RM9200 only] */
-#define AT91_SPI_MODFDIS (1 << 4) /* Mode Fault Detection */
-#define AT91_SPI_LLB (1 << 7) /* Local Loopback Enable */
-#define AT91_SPI_PCS (0xf << 16) /* Peripheral Chip Select */
-#define AT91_SPI_DLYBCS (0xff << 24) /* Delay Between Chip Selects */
-
-#define AT91_SPI_RDR 0x08 /* Receive Data Register */
-#define AT91_SPI_RD (0xffff << 0) /* Receive Data */
-#define AT91_SPI_PCS (0xf << 16) /* Peripheral Chip Select */
-
-#define AT91_SPI_TDR 0x0c /* Transmit Data Register */
-#define AT91_SPI_TD (0xffff << 0) /* Transmit Data */
-#define AT91_SPI_PCS (0xf << 16) /* Peripheral Chip Select */
-#define AT91_SPI_LASTXFER (1 << 24) /* Last Transfer [SAM9261 only] */
-
-#define AT91_SPI_SR 0x10 /* Status Register */
-#define AT91_SPI_RDRF (1 << 0) /* Receive Data Register Full */
-#define AT91_SPI_TDRE (1 << 1) /* Transmit Data Register Full */
-#define AT91_SPI_MODF (1 << 2) /* Mode Fault Error */
-#define AT91_SPI_OVRES (1 << 3) /* Overrun Error Status */
-#define AT91_SPI_ENDRX (1 << 4) /* End of RX buffer */
-#define AT91_SPI_ENDTX (1 << 5) /* End of TX buffer */
-#define AT91_SPI_RXBUFF (1 << 6) /* RX Buffer Full */
-#define AT91_SPI_TXBUFE (1 << 7) /* TX Buffer Empty */
-#define AT91_SPI_NSSR (1 << 8) /* NSS Rising [SAM9261 only] */
-#define AT91_SPI_TXEMPTY (1 << 9) /* Transmission Register Empty [SAM9261 only] */
-#define AT91_SPI_SPIENS (1 << 16) /* SPI Enable Status */
-
-#define AT91_SPI_IER 0x14 /* Interrupt Enable Register */
-#define AT91_SPI_IDR 0x18 /* Interrupt Disable Register */
-#define AT91_SPI_IMR 0x1c /* Interrupt Mask Register */
-
-#define AT91_SPI_CSR(n) (0x30 + ((n) * 4)) /* Chip Select Registers 0-3 */
-#define AT91_SPI_CPOL (1 << 0) /* Clock Polarity */
-#define AT91_SPI_NCPHA (1 << 1) /* Clock Phase */
-#define AT91_SPI_CSAAT (1 << 3) /* Chip Select Active After Transfer [SAM9261 only] */
-#define AT91_SPI_BITS (0xf << 4) /* Bits Per Transfer */
-#define AT91_SPI_BITS_8 (0 << 4)
-#define AT91_SPI_BITS_9 (1 << 4)
-#define AT91_SPI_BITS_10 (2 << 4)
-#define AT91_SPI_BITS_11 (3 << 4)
-#define AT91_SPI_BITS_12 (4 << 4)
-#define AT91_SPI_BITS_13 (5 << 4)
-#define AT91_SPI_BITS_14 (6 << 4)
-#define AT91_SPI_BITS_15 (7 << 4)
-#define AT91_SPI_BITS_16 (8 << 4)
-#define AT91_SPI_SCBR (0xff << 8) /* Serial Clock Baud Rate */
-#define AT91_SPI_DLYBS (0xff << 16) /* Delay before SPCK */
-#define AT91_SPI_DLYBCT (0xff << 24) /* Delay between Consecutive Transfers */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/at91_ssc.h b/arch/arm/mach-at91/include/mach/at91_ssc.h
deleted file mode 100644
index a81114c11c74..000000000000
--- a/arch/arm/mach-at91/include/mach/at91_ssc.h
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91_ssc.h
- *
- * Copyright (C) SAN People
- *
- * Serial Synchronous Controller (SSC) registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91_SSC_H
-#define AT91_SSC_H
-
-#define AT91_SSC_CR 0x00 /* Control Register */
-#define AT91_SSC_RXEN (1 << 0) /* Receive Enable */
-#define AT91_SSC_RXDIS (1 << 1) /* Receive Disable */
-#define AT91_SSC_TXEN (1 << 8) /* Transmit Enable */
-#define AT91_SSC_TXDIS (1 << 9) /* Transmit Disable */
-#define AT91_SSC_SWRST (1 << 15) /* Software Reset */
-
-#define AT91_SSC_CMR 0x04 /* Clock Mode Register */
-#define AT91_SSC_CMR_DIV (0xfff << 0) /* Clock Divider */
-
-#define AT91_SSC_RCMR 0x10 /* Receive Clock Mode Register */
-#define AT91_SSC_CKS (3 << 0) /* Clock Selection */
-#define AT91_SSC_CKS_DIV (0 << 0)
-#define AT91_SSC_CKS_CLOCK (1 << 0)
-#define AT91_SSC_CKS_PIN (2 << 0)
-#define AT91_SSC_CKO (7 << 2) /* Clock Output Mode Selection */
-#define AT91_SSC_CKO_NONE (0 << 2)
-#define AT91_SSC_CKO_CONTINUOUS (1 << 2)
-#define AT91_SSC_CKI (1 << 5) /* Clock Inversion */
-#define AT91_SSC_CKI_FALLING (0 << 5)
-#define AT91_SSC_CK_RISING (1 << 5)
-#define AT91_SSC_CKG (1 << 6) /* Receive Clock Gating Selection [AT91SAM9261 only] */
-#define AT91_SSC_CKG_NONE (0 << 6)
-#define AT91_SSC_CKG_RFLOW (1 << 6)
-#define AT91_SSC_CKG_RFHIGH (2 << 6)
-#define AT91_SSC_START (0xf << 8) /* Start Selection */
-#define AT91_SSC_START_CONTINUOUS (0 << 8)
-#define AT91_SSC_START_TX_RX (1 << 8)
-#define AT91_SSC_START_LOW_RF (2 << 8)
-#define AT91_SSC_START_HIGH_RF (3 << 8)
-#define AT91_SSC_START_FALLING_RF (4 << 8)
-#define AT91_SSC_START_RISING_RF (5 << 8)
-#define AT91_SSC_START_LEVEL_RF (6 << 8)
-#define AT91_SSC_START_EDGE_RF (7 << 8)
-#define AT91_SSC_STOP (1 << 12) /* Receive Stop Selection [AT91SAM9261 only] */
-#define AT91_SSC_STTDLY (0xff << 16) /* Start Delay */
-#define AT91_SSC_PERIOD (0xff << 24) /* Period Divider Selection */
-
-#define AT91_SSC_RFMR 0x14 /* Receive Frame Mode Register */
-#define AT91_SSC_DATALEN (0x1f << 0) /* Data Length */
-#define AT91_SSC_LOOP (1 << 5) /* Loop Mode */
-#define AT91_SSC_MSBF (1 << 7) /* Most Significant Bit First */
-#define AT91_SSC_DATNB (0xf << 8) /* Data Number per Frame */
-#define AT91_SSC_FSLEN (0xf << 16) /* Frame Sync Length */
-#define AT91_SSC_FSOS (7 << 20) /* Frame Sync Output Selection */
-#define AT91_SSC_FSOS_NONE (0 << 20)
-#define AT91_SSC_FSOS_NEGATIVE (1 << 20)
-#define AT91_SSC_FSOS_POSITIVE (2 << 20)
-#define AT91_SSC_FSOS_LOW (3 << 20)
-#define AT91_SSC_FSOS_HIGH (4 << 20)
-#define AT91_SSC_FSOS_TOGGLE (5 << 20)
-#define AT91_SSC_FSEDGE (1 << 24) /* Frame Sync Edge Detection */
-#define AT91_SSC_FSEDGE_POSITIVE (0 << 24)
-#define AT91_SSC_FSEDGE_NEGATIVE (1 << 24)
-
-#define AT91_SSC_TCMR 0x18 /* Transmit Clock Mode Register */
-#define AT91_SSC_TFMR 0x1c /* Transmit Fram Mode Register */
-#define AT91_SSC_DATDEF (1 << 5) /* Data Default Value */
-#define AT91_SSC_FSDEN (1 << 23) /* Frame Sync Data Enable */
-
-#define AT91_SSC_RHR 0x20 /* Receive Holding Register */
-#define AT91_SSC_THR 0x24 /* Transmit Holding Register */
-#define AT91_SSC_RSHR 0x30 /* Receive Sync Holding Register */
-#define AT91_SSC_TSHR 0x34 /* Transmit Sync Holding Register */
-
-#define AT91_SSC_RC0R 0x38 /* Receive Compare 0 Register [AT91SAM9261 only] */
-#define AT91_SSC_RC1R 0x3c /* Receive Compare 1 Register [AT91SAM9261 only] */
-
-#define AT91_SSC_SR 0x40 /* Status Register */
-#define AT91_SSC_TXRDY (1 << 0) /* Transmit Ready */
-#define AT91_SSC_TXEMPTY (1 << 1) /* Transmit Empty */
-#define AT91_SSC_ENDTX (1 << 2) /* End of Transmission */
-#define AT91_SSC_TXBUFE (1 << 3) /* Transmit Buffer Empty */
-#define AT91_SSC_RXRDY (1 << 4) /* Receive Ready */
-#define AT91_SSC_OVRUN (1 << 5) /* Receive Overrun */
-#define AT91_SSC_ENDRX (1 << 6) /* End of Reception */
-#define AT91_SSC_RXBUFF (1 << 7) /* Receive Buffer Full */
-#define AT91_SSC_CP0 (1 << 8) /* Compare 0 [AT91SAM9261 only] */
-#define AT91_SSC_CP1 (1 << 9) /* Compare 1 [AT91SAM9261 only] */
-#define AT91_SSC_TXSYN (1 << 10) /* Transmit Sync */
-#define AT91_SSC_RXSYN (1 << 11) /* Receive Sync */
-#define AT91_SSC_TXENA (1 << 16) /* Transmit Enable */
-#define AT91_SSC_RXENA (1 << 17) /* Receive Enable */
-
-#define AT91_SSC_IER 0x44 /* Interrupt Enable Register */
-#define AT91_SSC_IDR 0x48 /* Interrupt Disable Register */
-#define AT91_SSC_IMR 0x4c /* Interrupt Mask Register */
-
-#endif
diff --git a/arch/arm/mach-at91/include/mach/entry-macro.S b/arch/arm/mach-at91/include/mach/entry-macro.S
deleted file mode 100644
index 903bf205a333..000000000000
--- a/arch/arm/mach-at91/include/mach/entry-macro.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/entry-macro.S
- *
- * Copyright (C) 2003-2005 SAN People
- *
- * Low-level IRQ helper macros for AT91RM9200 platforms
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <mach/hardware.h>
-#include <mach/at91_aic.h>
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =at91_aic_base @ base virtual address of AIC peripheral
- ldr \base, [\base]
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
- ldr \irqnr, [\base, #AT91_AIC_IVR] @ read IRQ vector register: de-asserts nIRQ to processor (and clears interrupt)
- ldr \irqstat, [\base, #AT91_AIC_ISR] @ read interrupt source number
- teq \irqstat, #0 @ ISR is 0 when no current interrupt, or spurious interrupt
- streq \tmp, [\base, #AT91_AIC_EOICR] @ not going to be handled further, then ACK it now.
- .endm
-
diff --git a/arch/arm/mach-at91/irq.c b/arch/arm/mach-at91/irq.c
index cfcfcbe36269..1e02c0e49dcc 100644
--- a/arch/arm/mach-at91/irq.c
+++ b/arch/arm/mach-at91/irq.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
+#include <linux/bitmap.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <linux/of.h>
@@ -30,38 +31,218 @@
#include <linux/of_irq.h>
#include <linux/irqdomain.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/setup.h>
+#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/map.h>
+#include <mach/at91_aic.h>
+
void __iomem *at91_aic_base;
static struct irq_domain *at91_aic_domain;
static struct device_node *at91_aic_np;
+static unsigned int n_irqs = NR_AIC_IRQS;
+static unsigned long at91_aic_caps = 0;
+
+/* AIC5 introduces a Source Select Register */
+#define AT91_AIC_CAP_AIC5 (1 << 0)
+#define has_aic5() (at91_aic_caps & AT91_AIC_CAP_AIC5)
+
+#ifdef CONFIG_PM
+
+static unsigned long *wakeups;
+static unsigned long *backups;
+
+#define set_backup(bit) set_bit(bit, backups)
+#define clear_backup(bit) clear_bit(bit, backups)
+
+static int at91_aic_pm_init(void)
+{
+ backups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
+ if (!backups)
+ return -ENOMEM;
+
+ wakeups = kzalloc(BITS_TO_LONGS(n_irqs) * sizeof(*backups), GFP_KERNEL);
+ if (!wakeups) {
+ kfree(backups);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int at91_aic_set_wake(struct irq_data *d, unsigned value)
+{
+ if (unlikely(d->hwirq >= n_irqs))
+ return -EINVAL;
+
+ if (value)
+ set_bit(d->hwirq, wakeups);
+ else
+ clear_bit(d->hwirq, wakeups);
+
+ return 0;
+}
+
+void at91_irq_suspend(void)
+{
+ int i = 0, bit;
+
+ if (has_aic5()) {
+ /* disable enabled irqs */
+ while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ at91_aic_write(AT91_AIC5_SSR,
+ bit & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IDCR, 1);
+ i = bit;
+ }
+ /* enable wakeup irqs */
+ i = 0;
+ while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ at91_aic_write(AT91_AIC5_SSR,
+ bit & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IECR, 1);
+ i = bit;
+ }
+ } else {
+ at91_aic_write(AT91_AIC_IDCR, *backups);
+ at91_aic_write(AT91_AIC_IECR, *wakeups);
+ }
+}
+
+void at91_irq_resume(void)
+{
+ int i = 0, bit;
+
+ if (has_aic5()) {
+ /* disable wakeup irqs */
+ while ((bit = find_next_bit(wakeups, n_irqs, i)) < n_irqs) {
+ at91_aic_write(AT91_AIC5_SSR,
+ bit & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IDCR, 1);
+ i = bit;
+ }
+ /* enable irqs disabled for suspend */
+ i = 0;
+ while ((bit = find_next_bit(backups, n_irqs, i)) < n_irqs) {
+ at91_aic_write(AT91_AIC5_SSR,
+ bit & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IECR, 1);
+ i = bit;
+ }
+ } else {
+ at91_aic_write(AT91_AIC_IDCR, *wakeups);
+ at91_aic_write(AT91_AIC_IECR, *backups);
+ }
+}
+
+#else
+static inline int at91_aic_pm_init(void)
+{
+ return 0;
+}
+
+#define set_backup(bit)
+#define clear_backup(bit)
+#define at91_aic_set_wake NULL
+
+#endif /* CONFIG_PM */
+
+asmlinkage void __exception_irq_entry
+at91_aic_handle_irq(struct pt_regs *regs)
+{
+ u32 irqnr;
+ u32 irqstat;
+
+ irqnr = at91_aic_read(AT91_AIC_IVR);
+ irqstat = at91_aic_read(AT91_AIC_ISR);
+
+ /*
+ * ISR value is 0 when there is no current interrupt or when there is
+ * a spurious interrupt
+ */
+ if (!irqstat)
+ at91_aic_write(AT91_AIC_EOICR, 0);
+ else
+ handle_IRQ(irqnr, regs);
+}
+
+asmlinkage void __exception_irq_entry
+at91_aic5_handle_irq(struct pt_regs *regs)
+{
+ u32 irqnr;
+ u32 irqstat;
+
+ irqnr = at91_aic_read(AT91_AIC5_IVR);
+ irqstat = at91_aic_read(AT91_AIC5_ISR);
+
+ if (!irqstat)
+ at91_aic_write(AT91_AIC5_EOICR, 0);
+ else
+ handle_IRQ(irqnr, regs);
+}
static void at91_aic_mask_irq(struct irq_data *d)
{
/* Disable interrupt on AIC */
at91_aic_write(AT91_AIC_IDCR, 1 << d->hwirq);
+ /* Update ISR cache */
+ clear_backup(d->hwirq);
+}
+
+static void __maybe_unused at91_aic5_mask_irq(struct irq_data *d)
+{
+ /* Disable interrupt on AIC5 */
+ at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IDCR, 1);
+ /* Update ISR cache */
+ clear_backup(d->hwirq);
}
static void at91_aic_unmask_irq(struct irq_data *d)
{
/* Enable interrupt on AIC */
at91_aic_write(AT91_AIC_IECR, 1 << d->hwirq);
+ /* Update ISR cache */
+ set_backup(d->hwirq);
+}
+
+static void __maybe_unused at91_aic5_unmask_irq(struct irq_data *d)
+{
+ /* Enable interrupt on AIC5 */
+ at91_aic_write(AT91_AIC5_SSR, d->hwirq & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IECR, 1);
+ /* Update ISR cache */
+ set_backup(d->hwirq);
}
-unsigned int at91_extern_irq;
+static void at91_aic_eoi(struct irq_data *d)
+{
+ /*
+ * Mark end-of-interrupt on AIC, the controller doesn't care about
+ * the value written. Moreover it's a write-only register.
+ */
+ at91_aic_write(AT91_AIC_EOICR, 0);
+}
+
+static void __maybe_unused at91_aic5_eoi(struct irq_data *d)
+{
+ at91_aic_write(AT91_AIC5_EOICR, 0);
+}
-#define is_extern_irq(hwirq) ((1 << (hwirq)) & at91_extern_irq)
+unsigned long *at91_extern_irq;
-static int at91_aic_set_type(struct irq_data *d, unsigned type)
+#define is_extern_irq(hwirq) test_bit(hwirq, at91_extern_irq)
+
+static int at91_aic_compute_srctype(struct irq_data *d, unsigned type)
{
- unsigned int smr, srctype;
+ int srctype;
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
@@ -74,65 +255,51 @@ static int at91_aic_set_type(struct irq_data *d, unsigned type)
if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_LOW;
else
- return -EINVAL;
+ srctype = -EINVAL;
break;
case IRQ_TYPE_EDGE_FALLING:
if ((d->hwirq == AT91_ID_FIQ) || is_extern_irq(d->hwirq)) /* only supported on external interrupts */
srctype = AT91_AIC_SRCTYPE_FALLING;
else
- return -EINVAL;
+ srctype = -EINVAL;
break;
default:
- return -EINVAL;
+ srctype = -EINVAL;
}
- smr = at91_aic_read(AT91_AIC_SMR(d->hwirq)) & ~AT91_AIC_SRCTYPE;
- at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
- return 0;
+ return srctype;
}
-#ifdef CONFIG_PM
-
-static u32 wakeups;
-static u32 backups;
-
-static int at91_aic_set_wake(struct irq_data *d, unsigned value)
+static int at91_aic_set_type(struct irq_data *d, unsigned type)
{
- if (unlikely(d->hwirq >= NR_AIC_IRQS))
- return -EINVAL;
-
- if (value)
- wakeups |= (1 << d->hwirq);
- else
- wakeups &= ~(1 << d->hwirq);
+ unsigned int smr;
+ int srctype;
+
+ srctype = at91_aic_compute_srctype(d, type);
+ if (srctype < 0)
+ return srctype;
+
+ if (has_aic5()) {
+ at91_aic_write(AT91_AIC5_SSR,
+ d->hwirq & AT91_AIC5_INTSEL_MSK);
+ smr = at91_aic_read(AT91_AIC5_SMR) & ~AT91_AIC_SRCTYPE;
+ at91_aic_write(AT91_AIC5_SMR, smr | srctype);
+ } else {
+ smr = at91_aic_read(AT91_AIC_SMR(d->hwirq))
+ & ~AT91_AIC_SRCTYPE;
+ at91_aic_write(AT91_AIC_SMR(d->hwirq), smr | srctype);
+ }
return 0;
}
-void at91_irq_suspend(void)
-{
- backups = at91_aic_read(AT91_AIC_IMR);
- at91_aic_write(AT91_AIC_IDCR, backups);
- at91_aic_write(AT91_AIC_IECR, wakeups);
-}
-
-void at91_irq_resume(void)
-{
- at91_aic_write(AT91_AIC_IDCR, wakeups);
- at91_aic_write(AT91_AIC_IECR, backups);
-}
-
-#else
-#define at91_aic_set_wake NULL
-#endif
-
static struct irq_chip at91_aic_chip = {
.name = "AIC",
- .irq_ack = at91_aic_mask_irq,
.irq_mask = at91_aic_mask_irq,
.irq_unmask = at91_aic_unmask_irq,
.irq_set_type = at91_aic_set_type,
.irq_set_wake = at91_aic_set_wake,
+ .irq_eoi = at91_aic_eoi,
};
static void __init at91_aic_hw_init(unsigned int spu_vector)
@@ -161,41 +328,172 @@ static void __init at91_aic_hw_init(unsigned int spu_vector)
at91_aic_write(AT91_AIC_ICCR, 0xFFFFFFFF);
}
+static void __init __maybe_unused at91_aic5_hw_init(unsigned int spu_vector)
+{
+ int i;
+
+ /*
+ * Perform 8 End Of Interrupt Command to make sure AIC
+ * will not Lock out nIRQ
+ */
+ for (i = 0; i < 8; i++)
+ at91_aic_write(AT91_AIC5_EOICR, 0);
+
+ /*
+ * Spurious Interrupt ID in Spurious Vector Register.
+ * When there is no current interrupt, the IRQ Vector Register
+ * reads the value stored in AIC_SPU
+ */
+ at91_aic_write(AT91_AIC5_SPU, spu_vector);
+
+ /* No debugging in AIC: Debug (Protect) Control Register */
+ at91_aic_write(AT91_AIC5_DCR, 0);
+
+ /* Disable and clear all interrupts initially */
+ for (i = 0; i < n_irqs; i++) {
+ at91_aic_write(AT91_AIC5_SSR, i & AT91_AIC5_INTSEL_MSK);
+ at91_aic_write(AT91_AIC5_IDCR, 1);
+ at91_aic_write(AT91_AIC5_ICCR, 1);
+ }
+}
+
#if defined(CONFIG_OF)
+static unsigned int *at91_aic_irq_priorities;
+
static int at91_aic_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
/* Put virq number in Source Vector Register */
at91_aic_write(AT91_AIC_SVR(hw), virq);
- /* Active Low interrupt, without priority */
- at91_aic_write(AT91_AIC_SMR(hw), AT91_AIC_SRCTYPE_LOW);
+ /* Active Low interrupt, with priority */
+ at91_aic_write(AT91_AIC_SMR(hw),
+ AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
- irq_set_chip_and_handler(virq, &at91_aic_chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
return 0;
}
+static int at91_aic5_irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
+{
+ at91_aic_write(AT91_AIC5_SSR, hw & AT91_AIC5_INTSEL_MSK);
+
+ /* Put virq number in Source Vector Register */
+ at91_aic_write(AT91_AIC5_SVR, virq);
+
+ /* Active Low interrupt, with priority */
+ at91_aic_write(AT91_AIC5_SMR,
+ AT91_AIC_SRCTYPE_LOW | at91_aic_irq_priorities[hw]);
+
+ irq_set_chip_and_handler(virq, &at91_aic_chip, handle_fasteoi_irq);
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static int at91_aic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+ if (WARN_ON(intsize < 3))
+ return -EINVAL;
+ if (WARN_ON(intspec[0] >= n_irqs))
+ return -EINVAL;
+ if (WARN_ON((intspec[2] < AT91_AIC_IRQ_MIN_PRIORITY)
+ || (intspec[2] > AT91_AIC_IRQ_MAX_PRIORITY)))
+ return -EINVAL;
+
+ *out_hwirq = intspec[0];
+ *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+ at91_aic_irq_priorities[*out_hwirq] = intspec[2];
+
+ return 0;
+}
+
static struct irq_domain_ops at91_aic_irq_ops = {
.map = at91_aic_irq_map,
- .xlate = irq_domain_xlate_twocell,
+ .xlate = at91_aic_irq_domain_xlate,
};
-int __init at91_aic_of_init(struct device_node *node,
- struct device_node *parent)
+int __init at91_aic_of_common_init(struct device_node *node,
+ struct device_node *parent)
{
+ struct property *prop;
+ const __be32 *p;
+ u32 val;
+
+ at91_extern_irq = kzalloc(BITS_TO_LONGS(n_irqs)
+ * sizeof(*at91_extern_irq), GFP_KERNEL);
+ if (!at91_extern_irq)
+ return -ENOMEM;
+
+ if (at91_aic_pm_init()) {
+ kfree(at91_extern_irq);
+ return -ENOMEM;
+ }
+
+ at91_aic_irq_priorities = kzalloc(n_irqs
+ * sizeof(*at91_aic_irq_priorities),
+ GFP_KERNEL);
+ if (!at91_aic_irq_priorities)
+ return -ENOMEM;
+
at91_aic_base = of_iomap(node, 0);
at91_aic_np = node;
- at91_aic_domain = irq_domain_add_linear(at91_aic_np, NR_AIC_IRQS,
+ at91_aic_domain = irq_domain_add_linear(at91_aic_np, n_irqs,
&at91_aic_irq_ops, NULL);
if (!at91_aic_domain)
panic("Unable to add AIC irq domain (DT)\n");
+ of_property_for_each_u32(node, "atmel,external-irqs", prop, p, val) {
+ if (val >= n_irqs)
+ pr_warn("AIC: external irq %d >= %d skip it\n",
+ val, n_irqs);
+ else
+ set_bit(val, at91_extern_irq);
+ }
+
irq_set_default_host(at91_aic_domain);
- at91_aic_hw_init(NR_AIC_IRQS);
+ return 0;
+}
+
+int __init at91_aic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int err;
+
+ err = at91_aic_of_common_init(node, parent);
+ if (err)
+ return err;
+
+ at91_aic_hw_init(n_irqs);
+
+ return 0;
+}
+
+int __init at91_aic5_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ int err;
+
+ at91_aic_caps |= AT91_AIC_CAP_AIC5;
+ n_irqs = NR_AIC5_IRQS;
+ at91_aic_chip.irq_ack = at91_aic5_mask_irq;
+ at91_aic_chip.irq_mask = at91_aic5_mask_irq;
+ at91_aic_chip.irq_unmask = at91_aic5_unmask_irq;
+ at91_aic_chip.irq_eoi = at91_aic5_eoi;
+ at91_aic_irq_ops.map = at91_aic5_irq_map;
+
+ err = at91_aic_of_common_init(node, parent);
+ if (err)
+ return err;
+
+ at91_aic5_hw_init(n_irqs);
return 0;
}
@@ -204,22 +502,25 @@ int __init at91_aic_of_init(struct device_node *node,
/*
* Initialize the AIC interrupt controller.
*/
-void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
+void __init at91_aic_init(unsigned int *priority)
{
unsigned int i;
int irq_base;
+ if (at91_aic_pm_init())
+ panic("Unable to allocate bit maps\n");
+
at91_aic_base = ioremap(AT91_AIC, 512);
if (!at91_aic_base)
panic("Unable to ioremap AIC registers\n");
/* Add irq domain for AIC */
- irq_base = irq_alloc_descs(-1, 0, NR_AIC_IRQS, 0);
+ irq_base = irq_alloc_descs(-1, 0, n_irqs, 0);
if (irq_base < 0) {
WARN(1, "Cannot allocate irq_descs, assuming pre-allocated\n");
irq_base = 0;
}
- at91_aic_domain = irq_domain_add_legacy(at91_aic_np, NR_AIC_IRQS,
+ at91_aic_domain = irq_domain_add_legacy(at91_aic_np, n_irqs,
irq_base, 0,
&irq_domain_simple_ops, NULL);
@@ -232,15 +533,14 @@ void __init at91_aic_init(unsigned int priority[NR_AIC_IRQS])
* The IVR is used by macro get_irqnr_and_base to read and verify.
* The irq number is NR_AIC_IRQS when a spurious interrupt has occurred.
*/
- for (i = 0; i < NR_AIC_IRQS; i++) {
+ for (i = 0; i < n_irqs; i++) {
/* Put hardware irq number in Source Vector Register: */
- at91_aic_write(AT91_AIC_SVR(i), i);
+ at91_aic_write(AT91_AIC_SVR(i), NR_IRQS_LEGACY + i);
/* Active Low interrupt, with the specified priority */
at91_aic_write(AT91_AIC_SMR(i), AT91_AIC_SRCTYPE_LOW | priority[i]);
-
- irq_set_chip_and_handler(i, &at91_aic_chip, handle_level_irq);
+ irq_set_chip_and_handler(NR_IRQS_LEGACY + i, &at91_aic_chip, handle_fasteoi_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
- at91_aic_hw_init(NR_AIC_IRQS);
+ at91_aic_hw_init(n_irqs);
}
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 1bfaad628731..2c2d86505a54 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -25,6 +25,7 @@
#include <asm/mach/time.h>
#include <asm/mach/irq.h>
+#include <mach/at91_aic.h>
#include <mach/at91_pmc.h>
#include <mach/cpu.h>
diff --git a/arch/arm/mach-clps711x/common.c b/arch/arm/mach-clps711x/common.c
index c965fd8eb31a..f15293bd7974 100644
--- a/arch/arm/mach-clps711x/common.c
+++ b/arch/arm/mach-clps711x/common.c
@@ -26,7 +26,6 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/sched.h>
-#include <linux/timex.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
@@ -188,7 +187,6 @@ static struct irqaction clps711x_timer_irq = {
static void __init clps711x_timer_init(void)
{
- struct timespec tv;
unsigned int syscon;
syscon = clps_readl(SYSCON1);
@@ -198,10 +196,6 @@ static void __init clps711x_timer_init(void)
clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
-
- tv.tv_nsec = 0;
- tv.tv_sec = clps_readl(RTCDR);
- do_settimeofday(&tv);
}
struct sys_timer clps711x_timer = {
diff --git a/arch/arm/mach-clps711x/include/mach/memory.h b/arch/arm/mach-clps711x/include/mach/memory.h
index 3a032a67725c..fc0e028d9405 100644
--- a/arch/arm/mach-clps711x/include/mach/memory.h
+++ b/arch/arm/mach-clps711x/include/mach/memory.h
@@ -25,26 +25,6 @@
*/
#define PLAT_PHYS_OFFSET UL(0xc0000000)
-#if !defined(CONFIG_ARCH_CDB89712) && !defined (CONFIG_ARCH_AUTCPU12)
-
-#define __virt_to_bus(x) ((x) - PAGE_OFFSET)
-#define __bus_to_virt(x) ((x) + PAGE_OFFSET)
-#define __pfn_to_bus(x) (__pfn_to_phys(x) - PHYS_OFFSET)
-#define __bus_to_pfn(x) __phys_to_pfn((x) + PHYS_OFFSET)
-
-#endif
-
-
-/*
- * Like the SA1100, the EDB7211 has a large gap between physical RAM
- * banks. In 2.2, the Psion (CL-PS7110) port added custom support for
- * discontiguous physical memory. In 2.4, we can use the standard
- * Linux NUMA support.
- *
- * This is not necessary for EP7211 implementations with only one used
- * memory bank. For those systems, simply undefine CONFIG_DISCONTIGMEM.
- */
-
/*
* The PS7211 allows up to 256MB max per DRAM bank, but the EDB7211
* uses only one of the two banks (bank #1). However, even within
@@ -54,23 +34,6 @@
* them, so we use 24 for the node max shift to get 16MB node sizes.
*/
-/*
- * Because of the wide memory address space between physical RAM banks on the
- * SA1100, it's much more convenient to use Linux's NUMA support to implement
- * our memory map representation. Assuming all memory nodes have equal access
- * characteristics, we then have generic discontiguous memory support.
- *
- * Of course, all this isn't mandatory for SA1100 implementations with only
- * one used memory bank. For those, simply undefine CONFIG_DISCONTIGMEM.
- *
- * The nodes are matched with the physical memory bank addresses which are
- * incidentally the same as virtual addresses.
- *
- * node 0: 0xc0000000 - 0xc7ffffff
- * node 1: 0xc8000000 - 0xcfffffff
- * node 2: 0xd0000000 - 0xd7ffffff
- * node 3: 0xd8000000 - 0xdfffffff
- */
#define SECTION_SIZE_BITS 24
#define MAX_PHYSMEM_BITS 32
diff --git a/arch/arm/mach-clps711x/p720t.c b/arch/arm/mach-clps711x/p720t.c
index 42ee8f33eafb..f266d90b9efc 100644
--- a/arch/arm/mach-clps711x/p720t.c
+++ b/arch/arm/mach-clps711x/p720t.c
@@ -86,17 +86,7 @@ static void __init p720t_map_io(void)
iotable_init(p720t_io_desc, ARRAY_SIZE(p720t_io_desc));
}
-MACHINE_START(P720T, "ARM-Prospector720T")
- /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
- .atag_offset = 0x100,
- .fixup = fixup_p720t,
- .map_io = p720t_map_io,
- .init_irq = clps711x_init_irq,
- .timer = &clps711x_timer,
- .restart = clps711x_restart,
-MACHINE_END
-
-static int p720t_hw_init(void)
+static void __init p720t_init_early(void)
{
/*
* Power down as much as possible in case we don't
@@ -111,13 +101,19 @@ static int p720t_hw_init(void)
PLD_CODEC = 0;
PLD_TCH = 0;
PLD_SPI = 0;
-#ifndef CONFIG_DEBUG_LL
- PLD_COM2 = 0;
- PLD_COM1 = 0;
-#endif
-
- return 0;
+ if (!IS_ENABLED(CONFIG_DEBUG_LL)) {
+ PLD_COM2 = 0;
+ PLD_COM1 = 0;
+ }
}
-__initcall(p720t_hw_init);
-
+MACHINE_START(P720T, "ARM-Prospector720T")
+ /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */
+ .atag_offset = 0x100,
+ .fixup = fixup_p720t,
+ .init_early = p720t_init_early,
+ .map_io = p720t_map_io,
+ .init_irq = clps711x_init_irq,
+ .timer = &clps711x_timer,
+ .restart = clps711x_restart,
+MACHINE_END
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 32d837d8eab9..ab99c3c3b752 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -4,6 +4,7 @@ config AINTC
bool
config CP_INTC
+ select IRQ_DOMAIN
bool
config ARCH_DAVINCI_DMx
@@ -61,7 +62,6 @@ config MACH_DAVINCI_EVM
bool "TI DM644x EVM"
default ARCH_DAVINCI_DM644x
depends on ARCH_DAVINCI_DM644x
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
@@ -71,7 +71,6 @@ config MACH_DAVINCI_EVM
config MACH_SFFSDR
bool "Lyrtech SFFSDR"
depends on ARCH_DAVINCI_DM644x
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
@@ -105,7 +104,6 @@ config MACH_DAVINCI_DM6467_EVM
default ARCH_DAVINCI_DM646x
depends on ARCH_DAVINCI_DM646x
select MACH_DAVINCI_DM6467TEVM
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
@@ -119,7 +117,6 @@ config MACH_DAVINCI_DM365_EVM
bool "TI DM365 EVM"
default ARCH_DAVINCI_DM365
depends on ARCH_DAVINCI_DM365
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
@@ -131,7 +128,6 @@ config MACH_DAVINCI_DA830_EVM
default ARCH_DAVINCI_DA830
depends on ARCH_DAVINCI_DA830
select GPIO_PCF857X
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
@@ -218,7 +214,6 @@ config MACH_TNETV107X
config MACH_MITYOMAPL138
bool "Critical Link MityDSP-L138/MityARM-1808 SoM"
depends on ARCH_DAVINCI_DA850
- select MISC_DEVICES
select EEPROM_AT24
select I2C
help
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index 2db78bd5c835..2227effcb0e9 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MACH_OMAPL138_HAWKBOARD) += board-omapl138-hawk.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_SUSPEND) += pm.o sleep.o
+obj-$(CONFIG_HAVE_CLK) += pm_domain.o
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index f83152d643c5..006dae8dfe44 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -9,9 +9,14 @@
* kind, whether express or implied.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <mach/common.h>
#include <mach/cp_intc.h>
@@ -28,7 +33,7 @@ static inline void cp_intc_write(unsigned long value, unsigned offset)
static void cp_intc_ack_irq(struct irq_data *d)
{
- cp_intc_write(d->irq, CP_INTC_SYS_STAT_IDX_CLR);
+ cp_intc_write(d->hwirq, CP_INTC_SYS_STAT_IDX_CLR);
}
/* Disable interrupt */
@@ -36,20 +41,20 @@ static void cp_intc_mask_irq(struct irq_data *d)
{
/* XXX don't know why we need to disable nIRQ here... */
cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_CLR);
- cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_CLR);
+ cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_CLR);
cp_intc_write(1, CP_INTC_HOST_ENABLE_IDX_SET);
}
/* Enable interrupt */
static void cp_intc_unmask_irq(struct irq_data *d)
{
- cp_intc_write(d->irq, CP_INTC_SYS_ENABLE_IDX_SET);
+ cp_intc_write(d->hwirq, CP_INTC_SYS_ENABLE_IDX_SET);
}
static int cp_intc_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- unsigned reg = BIT_WORD(d->irq);
- unsigned mask = BIT_MASK(d->irq);
+ unsigned reg = BIT_WORD(d->hwirq);
+ unsigned mask = BIT_MASK(d->hwirq);
unsigned polarity = cp_intc_read(CP_INTC_SYS_POLARITY(reg));
unsigned type = cp_intc_read(CP_INTC_SYS_TYPE(reg));
@@ -99,18 +104,43 @@ static struct irq_chip cp_intc_irq_chip = {
.irq_set_wake = cp_intc_set_wake,
};
-void __init cp_intc_init(void)
+static struct irq_domain *cp_intc_domain;
+
+static int cp_intc_host_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw)
{
- unsigned long num_irq = davinci_soc_info.intc_irq_num;
+ pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
+
+ irq_set_chip(virq, &cp_intc_irq_chip);
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+ irq_set_handler(virq, handle_edge_irq);
+ return 0;
+}
+
+static const struct irq_domain_ops cp_intc_host_ops = {
+ .map = cp_intc_host_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+int __init cp_intc_of_init(struct device_node *node, struct device_node *parent)
+{
+ u32 num_irq = davinci_soc_info.intc_irq_num;
u8 *irq_prio = davinci_soc_info.intc_irq_prios;
u32 *host_map = davinci_soc_info.intc_host_map;
unsigned num_reg = BITS_TO_LONGS(num_irq);
- int i;
+ int i, irq_base;
davinci_intc_type = DAVINCI_INTC_TYPE_CP_INTC;
- davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
+ if (node) {
+ davinci_intc_base = of_iomap(node, 0);
+ if (of_property_read_u32(node, "ti,intc-size", &num_irq))
+ pr_warn("unable to get intc-size, default to %d\n",
+ num_irq);
+ } else {
+ davinci_intc_base = ioremap(davinci_soc_info.intc_base, SZ_8K);
+ }
if (WARN_ON(!davinci_intc_base))
- return;
+ return -EINVAL;
cp_intc_write(0, CP_INTC_GLOBAL_ENABLE);
@@ -165,13 +195,28 @@ void __init cp_intc_init(void)
for (i = 0; host_map[i] != -1; i++)
cp_intc_write(host_map[i], CP_INTC_HOST_MAP(i));
- /* Set up genirq dispatching for cp_intc */
- for (i = 0; i < num_irq; i++) {
- irq_set_chip(i, &cp_intc_irq_chip);
- set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
- irq_set_handler(i, handle_edge_irq);
+ irq_base = irq_alloc_descs(-1, 0, num_irq, 0);
+ if (irq_base < 0) {
+ pr_warn("Couldn't allocate IRQ numbers\n");
+ irq_base = 0;
+ }
+
+ /* create a legacy host */
+ cp_intc_domain = irq_domain_add_legacy(node, num_irq,
+ irq_base, 0, &cp_intc_host_ops, NULL);
+
+ if (!cp_intc_domain) {
+ pr_err("cp_intc: failed to allocate irq host!\n");
+ return -EINVAL;
}
/* Enable global interrupt */
cp_intc_write(1, CP_INTC_GLOBAL_ENABLE);
+
+ return 0;
+}
+
+void __init cp_intc_init(void)
+{
+ cp_intc_of_init(NULL, NULL);
}
diff --git a/arch/arm/mach-davinci/include/mach/cp_intc.h b/arch/arm/mach-davinci/include/mach/cp_intc.h
index 4e8190eed673..d13d8dfa2b0d 100644
--- a/arch/arm/mach-davinci/include/mach/cp_intc.h
+++ b/arch/arm/mach-davinci/include/mach/cp_intc.h
@@ -52,5 +52,6 @@
#define CP_INTC_VECTOR_ADDR(n) (0x2000 + (n << 2))
void __init cp_intc_init(void);
+int __init cp_intc_of_init(struct device_node *, struct device_node *);
#endif /* __ASM_HARDWARE_CP_INTC_H */
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
deleted file mode 100644
index b9bf3d6a4423..000000000000
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ /dev/null
@@ -1 +0,0 @@
-/* empty, remove once unused */
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
deleted file mode 100644
index b9bf3d6a4423..000000000000
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ /dev/null
@@ -1 +0,0 @@
-/* empty, remove once unused */
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index 768b3c060214..cf5f573eb5fd 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -30,12 +30,10 @@
#endif
#if defined(CONFIG_CP_INTC)
1001: ldr \irqnr, [\base, #0x80] /* get irq number */
+ mov \tmp, \irqnr, lsr #31
and \irqnr, \irqnr, #0xff /* irq is in bits 0-9 */
- mov \tmp, \irqnr, lsr #3
- and \tmp, \tmp, #0xfc
- add \tmp, \tmp, #0x280 /* get the register offset */
- ldr \irqstat, [\base, \tmp] /* get the intc status */
- cmp \irqstat, #0x0
+ and \tmp, \tmp, #0x1
+ cmp \tmp, #0x1
#endif
1002:
.endm
diff --git a/arch/arm/mach-davinci/pm_domain.c b/arch/arm/mach-davinci/pm_domain.c
new file mode 100644
index 000000000000..00946e23c1ee
--- /dev/null
+++ b/arch/arm/mach-davinci/pm_domain.c
@@ -0,0 +1,64 @@
+/*
+ * Runtime PM support code for DaVinci
+ *
+ * Author: Kevin Hilman
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+#include <linux/init.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_clock.h>
+#include <linux/platform_device.h>
+
+#ifdef CONFIG_PM_RUNTIME
+static int davinci_pm_runtime_suspend(struct device *dev)
+{
+ int ret;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ ret = pm_generic_runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_suspend(dev);
+ if (ret) {
+ pm_generic_runtime_resume(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int davinci_pm_runtime_resume(struct device *dev)
+{
+ dev_dbg(dev, "%s\n", __func__);
+
+ pm_clk_resume(dev);
+ return pm_generic_runtime_resume(dev);
+}
+#endif
+
+static struct dev_pm_domain davinci_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(davinci_pm_runtime_suspend,
+ davinci_pm_runtime_resume, NULL)
+ USE_PLATFORM_PM_SLEEP_OPS
+ },
+};
+
+static struct pm_clk_notifier_block platform_bus_notifier = {
+ .pm_domain = &davinci_pm_domain,
+};
+
+static int __init davinci_pm_runtime_init(void)
+{
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+
+ return 0;
+}
+core_initcall(davinci_pm_runtime_init);
diff --git a/arch/arm/mach-dove/include/mach/bridge-regs.h b/arch/arm/mach-dove/include/mach/bridge-regs.h
index 226949dc4ac0..f953bb54aa9d 100644
--- a/arch/arm/mach-dove/include/mach/bridge-regs.h
+++ b/arch/arm/mach-dove/include/mach/bridge-regs.h
@@ -50,5 +50,6 @@
#define POWER_MANAGEMENT (BRIDGE_VIRT_BASE | 0x011c)
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-dove/include/mach/dove.h b/arch/arm/mach-dove/include/mach/dove.h
index ad1165d488c1..d52b0ef313b7 100644
--- a/arch/arm/mach-dove/include/mach/dove.h
+++ b/arch/arm/mach-dove/include/mach/dove.h
@@ -78,6 +78,7 @@
/* North-South Bridge */
#define BRIDGE_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0x20000)
+#define BRIDGE_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x20000)
/* Cryptographic Engine */
#define DOVE_CRYPT_PHYS_BASE (DOVE_SB_REGS_PHYS_BASE | 0x30000)
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 4dd07a0e3604..4afe52aaaff3 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -797,6 +797,102 @@ static struct platform_device ep93xx_wdt_device = {
.resource = ep93xx_wdt_resources,
};
+/*************************************************************************
+ * EP93xx IDE
+ *************************************************************************/
+static struct resource ep93xx_ide_resources[] = {
+ DEFINE_RES_MEM(EP93XX_IDE_PHYS_BASE, 0x38),
+ DEFINE_RES_IRQ(IRQ_EP93XX_EXT3),
+};
+
+static struct platform_device ep93xx_ide_device = {
+ .name = "ep93xx-ide",
+ .id = -1,
+ .dev = {
+ .dma_mask = &ep93xx_ide_device.dev.coherent_dma_mask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .num_resources = ARRAY_SIZE(ep93xx_ide_resources),
+ .resource = ep93xx_ide_resources,
+};
+
+void __init ep93xx_register_ide(void)
+{
+ platform_device_register(&ep93xx_ide_device);
+}
+
+int ep93xx_ide_acquire_gpio(struct platform_device *pdev)
+{
+ int err;
+ int i;
+
+ err = gpio_request(EP93XX_GPIO_LINE_EGPIO2, dev_name(&pdev->dev));
+ if (err)
+ return err;
+ err = gpio_request(EP93XX_GPIO_LINE_EGPIO15, dev_name(&pdev->dev));
+ if (err)
+ goto fail_egpio15;
+ for (i = 2; i < 8; i++) {
+ err = gpio_request(EP93XX_GPIO_LINE_E(i), dev_name(&pdev->dev));
+ if (err)
+ goto fail_gpio_e;
+ }
+ for (i = 4; i < 8; i++) {
+ err = gpio_request(EP93XX_GPIO_LINE_G(i), dev_name(&pdev->dev));
+ if (err)
+ goto fail_gpio_g;
+ }
+ for (i = 0; i < 8; i++) {
+ err = gpio_request(EP93XX_GPIO_LINE_H(i), dev_name(&pdev->dev));
+ if (err)
+ goto fail_gpio_h;
+ }
+
+ /* GPIO ports E[7:2], G[7:4] and H used by IDE */
+ ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE |
+ EP93XX_SYSCON_DEVCFG_HONIDE);
+ return 0;
+
+fail_gpio_h:
+ for (--i; i >= 0; --i)
+ gpio_free(EP93XX_GPIO_LINE_H(i));
+ i = 8;
+fail_gpio_g:
+ for (--i; i >= 4; --i)
+ gpio_free(EP93XX_GPIO_LINE_G(i));
+ i = 8;
+fail_gpio_e:
+ for (--i; i >= 2; --i)
+ gpio_free(EP93XX_GPIO_LINE_E(i));
+ gpio_free(EP93XX_GPIO_LINE_EGPIO15);
+fail_egpio15:
+ gpio_free(EP93XX_GPIO_LINE_EGPIO2);
+ return err;
+}
+EXPORT_SYMBOL(ep93xx_ide_acquire_gpio);
+
+void ep93xx_ide_release_gpio(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 2; i < 8; i++)
+ gpio_free(EP93XX_GPIO_LINE_E(i));
+ for (i = 4; i < 8; i++)
+ gpio_free(EP93XX_GPIO_LINE_G(i));
+ for (i = 0; i < 8; i++)
+ gpio_free(EP93XX_GPIO_LINE_H(i));
+ gpio_free(EP93XX_GPIO_LINE_EGPIO15);
+ gpio_free(EP93XX_GPIO_LINE_EGPIO2);
+
+
+ /* GPIO ports E[7:2], G[7:4] and H used by GPIO */
+ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE |
+ EP93XX_SYSCON_DEVCFG_HONIDE);
+}
+EXPORT_SYMBOL(ep93xx_ide_release_gpio);
+
void __init ep93xx_init_devices(void)
{
/* Disallow access to MaverickCrunch initially */
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index d74c5cddb98b..337ab7cf4c16 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -91,8 +91,8 @@ static void __init edb93xx_register_i2c(void)
ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
edb93xxa_i2c_board_info,
ARRAY_SIZE(edb93xxa_i2c_board_info));
- } else if (machine_is_edb9307() || machine_is_edb9312() ||
- machine_is_edb9315()) {
+ } else if (machine_is_edb9302() || machine_is_edb9307()
+ || machine_is_edb9312() || machine_is_edb9315()) {
ep93xx_register_i2c(&edb93xx_i2c_gpio_data,
edb93xx_i2c_board_info,
ARRAY_SIZE(edb93xx_i2c_board_info));
@@ -233,6 +233,29 @@ static void __init edb93xx_register_fb(void)
}
+/*************************************************************************
+ * EDB93xx IDE
+ *************************************************************************/
+static int __init edb93xx_has_ide(void)
+{
+ /*
+ * Although EDB9312 and EDB9315 do have IDE capability, they have
+ * INTRQ line wired as pull-up, which makes using IDE interface
+ * problematic.
+ */
+ return machine_is_edb9312() || machine_is_edb9315() ||
+ machine_is_edb9315a();
+}
+
+static void __init edb93xx_register_ide(void)
+{
+ if (!edb93xx_has_ide())
+ return;
+
+ ep93xx_register_ide();
+}
+
+
static void __init edb93xx_init_machine(void)
{
ep93xx_init_devices();
@@ -243,6 +266,7 @@ static void __init edb93xx_init_machine(void)
edb93xx_register_i2s();
edb93xx_register_pwm();
edb93xx_register_fb();
+ edb93xx_register_ide();
}
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 1ecb040d98bf..33a5122c6dc8 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -48,6 +48,9 @@ void ep93xx_register_i2s(void);
int ep93xx_i2s_acquire(void);
void ep93xx_i2s_release(void);
void ep93xx_register_ac97(void);
+void ep93xx_register_ide(void);
+int ep93xx_ide_acquire_gpio(struct platform_device *pdev);
+void ep93xx_ide_release_gpio(struct platform_device *pdev);
void ep93xx_init_devices(void);
extern struct sys_timer ep93xx_timer;
diff --git a/arch/arm/mach-ep93xx/soc.h b/arch/arm/mach-ep93xx/soc.h
index 979fba722926..7bf7ff8beae7 100644
--- a/arch/arm/mach-ep93xx/soc.h
+++ b/arch/arm/mach-ep93xx/soc.h
@@ -69,6 +69,7 @@
#define EP93XX_BOOT_ROM_BASE EP93XX_AHB_IOMEM(0x00090000)
+#define EP93XX_IDE_PHYS_BASE EP93XX_AHB_PHYS(0x000a0000)
#define EP93XX_IDE_BASE EP93XX_AHB_IOMEM(0x000a0000)
#define EP93XX_VIC1_BASE EP93XX_AHB_IOMEM(0x000b0000)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 573be57d3d28..b5b4c8c9db11 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -207,12 +207,13 @@ config MACH_SMDKV310
select S3C_DEV_HSMMC1
select S3C_DEV_HSMMC2
select S3C_DEV_HSMMC3
+ select S3C_DEV_USB_HSOTG
select SAMSUNG_DEV_BACKLIGHT
select EXYNOS_DEV_DRM
select EXYNOS_DEV_SYSMMU
select EXYNOS4_DEV_AHCI
select SAMSUNG_DEV_KEYPAD
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select SAMSUNG_DEV_PWM
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
@@ -264,7 +265,7 @@ config MACH_UNIVERSAL_C210
select S5P_DEV_ONENAND
select S5P_DEV_TV
select EXYNOS_DEV_SYSMMU
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
@@ -303,7 +304,7 @@ config MACH_NURI
select S5P_DEV_MFC
select S5P_DEV_USB_EHCI
select S5P_SETUP_MIPIPHY
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS_DEV_DRM
select EXYNOS4_SETUP_FIMC
select EXYNOS4_SETUP_FIMD0
@@ -326,6 +327,7 @@ config MACH_ORIGEN
select S3C_DEV_WDT
select S3C_DEV_HSMMC
select S3C_DEV_HSMMC2
+ select S3C_DEV_USB_HSOTG
select S5P_DEV_FIMC0
select S5P_DEV_FIMC1
select S5P_DEV_FIMC2
@@ -341,7 +343,7 @@ config MACH_ORIGEN
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_DRM
select EXYNOS_DEV_SYSMMU
- select EXYNOS4_DEV_DMA
+ select EXYNOS_DEV_DMA
select EXYNOS4_DEV_USB_OHCI
select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_SDHCI
@@ -360,22 +362,27 @@ config MACH_SMDK4212
select S3C_DEV_I2C3
select S3C_DEV_I2C7
select S3C_DEV_RTC
+ select S3C_DEV_USB_HSOTG
select S3C_DEV_WDT
select S5P_DEV_FIMC0
select S5P_DEV_FIMC1
select S5P_DEV_FIMC2
select S5P_DEV_FIMC3
+ select S5P_DEV_FIMD0
select S5P_DEV_MFC
select SAMSUNG_DEV_BACKLIGHT
select SAMSUNG_DEV_KEYPAD
select SAMSUNG_DEV_PWM
select EXYNOS_DEV_SYSMMU
select EXYNOS_DEV_DMA
+ select EXYNOS_DEV_DRM
+ select EXYNOS4_SETUP_FIMD0
select EXYNOS4_SETUP_I2C1
select EXYNOS4_SETUP_I2C3
select EXYNOS4_SETUP_I2C7
select EXYNOS4_SETUP_KEYPAD
select EXYNOS4_SETUP_SDHCI
+ select EXYNOS4_SETUP_USB_PHY
help
Machine support for Samsung SMDK4212
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index bcb7db453145..26fe9de35ecb 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -586,17 +586,17 @@ static struct clk exynos4_init_clocks_off[] = {
.ctrlbit = (1 << 13),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "exynos4210-spi.0",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 16),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "exynos4210-spi.1",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.2",
+ .devname = "exynos4210-spi.2",
.enable = exynos4_clk_ip_peril_ctrl,
.ctrlbit = (1 << 18),
}, {
@@ -1242,40 +1242,67 @@ static struct clksrc_clk exynos4_clk_sclk_mmc3 = {
.reg_div = { .reg = EXYNOS4_CLKDIV_FSYS2, .shift = 24, .size = 8 },
};
+static struct clksrc_clk exynos4_clk_mdout_spi0 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.0",
+ },
+ .sources = &exynos4_clkset_group,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk exynos4_clk_mdout_spi1 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.1",
+ },
+ .sources = &exynos4_clkset_group,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 20, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk exynos4_clk_mdout_spi2 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.2",
+ },
+ .sources = &exynos4_clkset_group,
+ .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 24, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL2, .shift = 0, .size = 4 },
+};
+
static struct clksrc_clk exynos4_clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "exynos4210-spi.0",
+ .parent = &exynos4_clk_mdout_spi0.clk,
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 16),
},
- .sources = &exynos4_clkset_group,
- .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 16, .size = 4 },
- .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 0, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 8, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "exynos4210-spi.1",
+ .parent = &exynos4_clk_mdout_spi1.clk,
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 20),
},
- .sources = &exynos4_clkset_group,
- .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 20, .size = 4 },
- .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL1, .shift = 24, .size = 8 },
};
static struct clksrc_clk exynos4_clk_sclk_spi2 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.2",
+ .devname = "exynos4210-spi.2",
+ .parent = &exynos4_clk_mdout_spi2.clk,
.enable = exynos4_clksrc_mask_peril1_ctrl,
.ctrlbit = (1 << 24),
},
- .sources = &exynos4_clkset_group,
- .reg_src = { .reg = EXYNOS4_CLKSRC_PERIL1, .shift = 24, .size = 4 },
- .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL2, .shift = 0, .size = 4 },
+ .reg_div = { .reg = EXYNOS4_CLKDIV_PERIL2, .shift = 8, .size = 8 },
};
/* Clock initialization code */
@@ -1331,7 +1358,9 @@ static struct clksrc_clk *exynos4_clksrc_cdev[] = {
&exynos4_clk_sclk_spi0,
&exynos4_clk_sclk_spi1,
&exynos4_clk_sclk_spi2,
-
+ &exynos4_clk_mdout_spi0,
+ &exynos4_clk_mdout_spi1,
+ &exynos4_clk_mdout_spi2,
};
static struct clk_lookup exynos4_clk_lookup[] = {
@@ -1347,9 +1376,9 @@ static struct clk_lookup exynos4_clk_lookup[] = {
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0),
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1),
CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos4_clk_mdma1),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &exynos4_clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk0", &exynos4_clk_sclk_spi1.clk),
- CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk0", &exynos4_clk_sclk_spi2.clk),
+ CLKDEV_INIT("exynos4210-spi.0", "spi_busclk0", &exynos4_clk_sclk_spi0.clk),
+ CLKDEV_INIT("exynos4210-spi.1", "spi_busclk0", &exynos4_clk_sclk_spi1.clk),
+ CLKDEV_INIT("exynos4210-spi.2", "spi_busclk0", &exynos4_clk_sclk_spi2.clk),
};
static int xtal_rate;
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index fefa336be2b4..774533c67066 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -131,6 +131,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
}
+static int exynos5_clksrc_mask_peric1_ctrl(struct clk *clk, int enable)
+{
+ return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC1, clk, enable);
+}
+
static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
{
return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
@@ -741,6 +746,24 @@ static struct clk exynos5_init_clocks_off[] = {
.enable = exynos5_clk_ip_peric_ctrl,
.ctrlbit = (1 << 14),
}, {
+ .name = "spi",
+ .devname = "exynos4210-spi.0",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peric_ctrl,
+ .ctrlbit = (1 << 16),
+ }, {
+ .name = "spi",
+ .devname = "exynos4210-spi.1",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peric_ctrl,
+ .ctrlbit = (1 << 17),
+ }, {
+ .name = "spi",
+ .devname = "exynos4210-spi.2",
+ .parent = &exynos5_clk_aclk_66.clk,
+ .enable = exynos5_clk_ip_peric_ctrl,
+ .ctrlbit = (1 << 18),
+ }, {
.name = SYSMMU_CLOCK_NAME,
.devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
.enable = &exynos5_clk_ip_mfc_ctrl,
@@ -1034,6 +1057,69 @@ static struct clksrc_clk exynos5_clk_sclk_mmc3 = {
.reg_div = { .reg = EXYNOS5_CLKDIV_FSYS2, .shift = 24, .size = 8 },
};
+static struct clksrc_clk exynos5_clk_mdout_spi0 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.0",
+ },
+ .sources = &exynos5_clkset_group,
+ .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC1, .shift = 16, .size = 4 },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC1, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk exynos5_clk_mdout_spi1 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.1",
+ },
+ .sources = &exynos5_clkset_group,
+ .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC1, .shift = 20, .size = 4 },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC1, .shift = 16, .size = 4 },
+};
+
+static struct clksrc_clk exynos5_clk_mdout_spi2 = {
+ .clk = {
+ .name = "mdout_spi",
+ .devname = "exynos4210-spi.2",
+ },
+ .sources = &exynos5_clkset_group,
+ .reg_src = { .reg = EXYNOS5_CLKSRC_PERIC1, .shift = 24, .size = 4 },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC2, .shift = 0, .size = 4 },
+};
+
+static struct clksrc_clk exynos5_clk_sclk_spi0 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "exynos4210-spi.0",
+ .parent = &exynos5_clk_mdout_spi0.clk,
+ .enable = exynos5_clksrc_mask_peric1_ctrl,
+ .ctrlbit = (1 << 16),
+ },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC1, .shift = 8, .size = 8 },
+};
+
+static struct clksrc_clk exynos5_clk_sclk_spi1 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "exynos4210-spi.1",
+ .parent = &exynos5_clk_mdout_spi1.clk,
+ .enable = exynos5_clksrc_mask_peric1_ctrl,
+ .ctrlbit = (1 << 20),
+ },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC1, .shift = 24, .size = 8 },
+};
+
+static struct clksrc_clk exynos5_clk_sclk_spi2 = {
+ .clk = {
+ .name = "sclk_spi",
+ .devname = "exynos4210-spi.2",
+ .parent = &exynos5_clk_mdout_spi2.clk,
+ .enable = exynos5_clksrc_mask_peric1_ctrl,
+ .ctrlbit = (1 << 24),
+ },
+ .reg_div = { .reg = EXYNOS5_CLKDIV_PERIC2, .shift = 8, .size = 8 },
+};
+
static struct clksrc_clk exynos5_clksrcs[] = {
{
.clk = {
@@ -1148,6 +1234,12 @@ static struct clksrc_clk *exynos5_sysclks[] = {
&exynos5_clk_dout_mmc4,
&exynos5_clk_aclk_acp,
&exynos5_clk_pclk_acp,
+ &exynos5_clk_sclk_spi0,
+ &exynos5_clk_sclk_spi1,
+ &exynos5_clk_sclk_spi2,
+ &exynos5_clk_mdout_spi0,
+ &exynos5_clk_mdout_spi1,
+ &exynos5_clk_mdout_spi2,
};
static struct clk *exynos5_clk_cdev[] = {
@@ -1176,6 +1268,9 @@ static struct clk_lookup exynos5_clk_lookup[] = {
CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk),
CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk),
CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk),
+ CLKDEV_INIT("exynos4210-spi.0", "spi_busclk0", &exynos5_clk_sclk_spi0.clk),
+ CLKDEV_INIT("exynos4210-spi.1", "spi_busclk0", &exynos5_clk_sclk_spi1.clk),
+ CLKDEV_INIT("exynos4210-spi.2", "spi_busclk0", &exynos5_clk_sclk_spi2.clk),
CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0),
CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1),
CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1),
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 742edd3bbec3..4eb39cdf75ea 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -540,7 +540,8 @@ static struct irq_domain_ops combiner_irq_domain_ops = {
.map = combiner_irq_domain_map,
};
-void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
+static void __init combiner_init(void __iomem *combiner_base,
+ struct device_node *np)
{
int i, irq, irq_base;
unsigned int max_nr, nr_irq;
@@ -712,31 +713,6 @@ static int __init exynos4_l2x0_cache_init(void)
early_initcall(exynos4_l2x0_cache_init);
#endif
-static int __init exynos5_l2_cache_init(void)
-{
- unsigned int val;
-
- if (!soc_is_exynos5250())
- return 0;
-
- asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
- "bic %0, %0, #(1 << 2)\n" /* cache disable */
- "mcr p15, 0, %0, c1, c0, 0\n"
- "mrc p15, 1, %0, c9, c0, 2\n"
- : "=r"(val));
-
- val |= (1 << 9) | (1 << 5) | (2 << 6) | (2 << 0);
-
- asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
- asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
- "orr %0, %0, #(1 << 2)\n" /* cache enable */
- "mcr p15, 0, %0, c1, c0, 0\n"
- : : "r"(val));
-
- return 0;
-}
-early_initcall(exynos5_l2_cache_init);
-
static int __init exynos_init(void)
{
printk(KERN_INFO "EXYNOS: Initializing architecture\n");
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index 7a4b4789eb72..35bced6f9092 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -195,6 +195,10 @@
#define IRQ_IIC6 EXYNOS4_IRQ_IIC6
#define IRQ_IIC7 EXYNOS4_IRQ_IIC7
+#define IRQ_SPI0 EXYNOS4_IRQ_SPI0
+#define IRQ_SPI1 EXYNOS4_IRQ_SPI1
+#define IRQ_SPI2 EXYNOS4_IRQ_SPI2
+
#define IRQ_USB_HOST EXYNOS4_IRQ_USB_HOST
#define IRQ_OTG EXYNOS4_IRQ_USB_HSOTG
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index ca4aa89aa46b..c72b675b3e4b 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -154,6 +154,9 @@
#define EXYNOS4_PA_SPI0 0x13920000
#define EXYNOS4_PA_SPI1 0x13930000
#define EXYNOS4_PA_SPI2 0x13940000
+#define EXYNOS5_PA_SPI0 0x12D20000
+#define EXYNOS5_PA_SPI1 0x12D30000
+#define EXYNOS5_PA_SPI2 0x12D40000
#define EXYNOS4_PA_GPIO1 0x11400000
#define EXYNOS4_PA_GPIO2 0x11000000
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 43a99e6f56ab..d4e392b811a3 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -232,6 +232,11 @@
#define EXYNOS5_USB_CFG S5P_PMUREG(0x0230)
+#define EXYNOS5_AUTO_WDTRESET_DISABLE S5P_PMUREG(0x0408)
+#define EXYNOS5_MASK_WDTRESET_REQUEST S5P_PMUREG(0x040C)
+
+#define EXYNOS5_SYS_WDTRESET (1 << 20)
+
#define EXYNOS5_ARM_CORE0_SYS_PWR_REG S5P_PMUREG(0x1000)
#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1004)
#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1008)
diff --git a/arch/arm/mach-exynos/include/mach/regs-usb-phy.h b/arch/arm/mach-exynos/include/mach/regs-usb-phy.h
index c337cf3a71bf..07277735252e 100644
--- a/arch/arm/mach-exynos/include/mach/regs-usb-phy.h
+++ b/arch/arm/mach-exynos/include/mach/regs-usb-phy.h
@@ -35,11 +35,21 @@
#define PHY1_COMMON_ON_N (1 << 7)
#define PHY0_COMMON_ON_N (1 << 4)
#define PHY0_ID_PULLUP (1 << 2)
-#define CLKSEL_MASK (0x3 << 0)
-#define CLKSEL_SHIFT (0)
-#define CLKSEL_48M (0x0 << 0)
-#define CLKSEL_12M (0x2 << 0)
-#define CLKSEL_24M (0x3 << 0)
+
+#define EXYNOS4_CLKSEL_SHIFT (0)
+
+#define EXYNOS4210_CLKSEL_MASK (0x3 << 0)
+#define EXYNOS4210_CLKSEL_48M (0x0 << 0)
+#define EXYNOS4210_CLKSEL_12M (0x2 << 0)
+#define EXYNOS4210_CLKSEL_24M (0x3 << 0)
+
+#define EXYNOS4X12_CLKSEL_MASK (0x7 << 0)
+#define EXYNOS4X12_CLKSEL_9600K (0x0 << 0)
+#define EXYNOS4X12_CLKSEL_10M (0x1 << 0)
+#define EXYNOS4X12_CLKSEL_12M (0x2 << 0)
+#define EXYNOS4X12_CLKSEL_19200K (0x3 << 0)
+#define EXYNOS4X12_CLKSEL_20M (0x4 << 0)
+#define EXYNOS4X12_CLKSEL_24M (0x5 << 0)
#define EXYNOS4_RSTCON EXYNOS4_HSOTG_PHYREG(0x08)
#define HOST_LINK_PORT_SWRST_MASK (0xf << 6)
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
deleted file mode 100644
index c71a5fba6a84..000000000000
--- a/arch/arm/mach-exynos/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* linux/arch/arm/mach-exynos4/include/mach/spi-clocks.h
- *
- * Copyright (C) 2011 Samsung Electronics Co. Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_ARCH_SPI_CLKS_H
-#define __ASM_ARCH_SPI_CLKS_H __FILE__
-
-/* Must source from SCLK_SPI */
-#define EXYNOS_SPI_SRCCLK_SCLK 0
-
-#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index e7e9743543ac..b2b5d5faa748 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -55,6 +55,12 @@ static const struct of_dev_auxdata exynos4210_auxdata_lookup[] __initconst = {
"exynos4-sdhci.3", NULL),
OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS4_PA_IIC(0),
"s3c2440-i2c.0", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS4_PA_SPI0,
+ "exynos4210-spi.0", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS4_PA_SPI1,
+ "exynos4210-spi.1", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS4_PA_SPI2,
+ "exynos4210-spi.2", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA1, "dma-pl330.1", NULL),
{},
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 7b1e11a228cc..ef770bc2318f 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -47,6 +47,12 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
"s3c2440-i2c.0", NULL),
OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
"s3c2440-i2c.1", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS5_PA_SPI0,
+ "exynos4210-spi.0", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS5_PA_SPI1,
+ "exynos4210-spi.1", NULL),
+ OF_DEV_AUXDATA("samsung,exynos4210-spi", EXYNOS5_PA_SPI2,
+ "exynos4210-spi.2", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 656f8fc9addd..f98a83a81ce7 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -50,7 +50,6 @@
#include <plat/gpio-cfg.h>
#include <plat/iic.h>
#include <plat/mfc.h>
-#include <plat/pd.h>
#include <plat/fimc-core.h>
#include <plat/camport.h>
#include <plat/mipi_csis.h>
@@ -1342,9 +1341,8 @@ static struct platform_device *nuri_devices[] __initdata = {
static void __init nuri_map_io(void)
{
- clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
}
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index f5572be9d7bf..5a12dc26f496 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -9,6 +9,7 @@
*/
#include <linux/serial_core.h>
+#include <linux/leds.h>
#include <linux/gpio.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
@@ -21,6 +22,7 @@
#include <linux/mfd/max8997.h>
#include <linux/lcd.h>
#include <linux/rfkill-gpio.h>
+#include <linux/platform_data/s3c-hsotg.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
@@ -38,7 +40,6 @@
#include <plat/clock.h>
#include <plat/gpio-cfg.h>
#include <plat/backlight.h>
-#include <plat/pd.h>
#include <plat/fb.h>
#include <plat/mfc.h>
@@ -499,6 +500,37 @@ static void __init origen_ohci_init(void)
exynos4_ohci_set_platdata(pdata);
}
+/* USB OTG */
+static struct s3c_hsotg_plat origen_hsotg_pdata;
+
+static struct gpio_led origen_gpio_leds[] = {
+ {
+ .name = "origen::status1",
+ .default_trigger = "heartbeat",
+ .gpio = EXYNOS4_GPX1(3),
+ .active_low = 1,
+ },
+ {
+ .name = "origen::status2",
+ .default_trigger = "mmc0",
+ .gpio = EXYNOS4_GPX1(4),
+ .active_low = 1,
+ },
+};
+
+static struct gpio_led_platform_data origen_gpio_led_info = {
+ .leds = origen_gpio_leds,
+ .num_leds = ARRAY_SIZE(origen_gpio_leds),
+};
+
+static struct platform_device origen_leds_gpio = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &origen_gpio_led_info,
+ },
+};
+
static struct gpio_keys_button origen_gpio_keys_table[] = {
{
.code = KEY_MENU,
@@ -655,6 +687,7 @@ static struct platform_device *origen_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_i2c0,
&s3c_device_rtc,
+ &s3c_device_usb_hsotg,
&s3c_device_wdt,
&s5p_device_ehci,
&s5p_device_fimc0,
@@ -677,6 +710,7 @@ static struct platform_device *origen_devices[] __initdata = {
&exynos4_device_ohci,
&origen_device_gpiokeys,
&origen_lcd_hv070wsa,
+ &origen_leds_gpio,
&origen_device_bluetooth,
};
@@ -712,7 +746,7 @@ static void s5p_tv_setup(void)
static void __init origen_map_io(void)
{
exynos_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(origen_uartcfgs, ARRAY_SIZE(origen_uartcfgs));
}
@@ -744,7 +778,7 @@ static void __init origen_machine_init(void)
origen_ehci_init();
origen_ohci_init();
- clk_xusbxti.rate = 24000000;
+ s3c_hsotg_set_platdata(&origen_hsotg_pdata);
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fb09c70e195a..b26beb13ebef 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -13,12 +13,14 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/io.h>
+#include <linux/lcd.h>
#include <linux/mfd/max8997.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/pwm_backlight.h>
#include <linux/regulator/machine.h>
#include <linux/serial_core.h>
+#include <linux/platform_data/s3c-hsotg.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
@@ -28,15 +30,18 @@
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/devs.h>
+#include <plat/fb.h>
#include <plat/gpio-cfg.h>
#include <plat/iic.h>
#include <plat/keypad.h>
#include <plat/mfc.h>
+#include <plat/regs-fb.h>
#include <plat/regs-serial.h>
#include <plat/sdhci.h>
#include <mach/map.h>
+#include <drm/exynos_drm.h>
#include "common.h"
/* Following are default values for UCON, ULCON and UFCON UART registers */
@@ -219,8 +224,10 @@ static struct platform_pwm_backlight_data smdk4x12_bl_data = {
static uint32_t smdk4x12_keymap[] __initdata = {
/* KEY(row, col, keycode) */
- KEY(1, 0, KEY_D), KEY(1, 1, KEY_A), KEY(1, 2, KEY_B),
- KEY(1, 3, KEY_E), KEY(1, 4, KEY_C)
+ KEY(1, 3, KEY_1), KEY(1, 4, KEY_2), KEY(1, 5, KEY_3),
+ KEY(1, 6, KEY_4), KEY(1, 7, KEY_5),
+ KEY(2, 5, KEY_D), KEY(2, 6, KEY_A), KEY(2, 7, KEY_B),
+ KEY(0, 7, KEY_E), KEY(0, 5, KEY_C)
};
static struct matrix_keymap_data smdk4x12_keymap_data __initdata = {
@@ -230,10 +237,62 @@ static struct matrix_keymap_data smdk4x12_keymap_data __initdata = {
static struct samsung_keypad_platdata smdk4x12_keypad_data __initdata = {
.keymap_data = &smdk4x12_keymap_data,
- .rows = 2,
- .cols = 5,
+ .rows = 3,
+ .cols = 8,
};
+#ifdef CONFIG_DRM_EXYNOS
+static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
+ .panel = {
+ .timing = {
+ .left_margin = 8,
+ .right_margin = 8,
+ .upper_margin = 6,
+ .lower_margin = 6,
+ .hsync_len = 6,
+ .vsync_len = 4,
+ .xres = 480,
+ .yres = 800,
+ },
+ },
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+ .default_win = 0,
+ .bpp = 32,
+};
+#else
+static struct s3c_fb_pd_win smdk4x12_fb_win0 = {
+ .xres = 480,
+ .yres = 800,
+ .virtual_x = 480,
+ .virtual_y = 800 * 2,
+ .max_bpp = 32,
+ .default_bpp = 24,
+};
+
+static struct fb_videomode smdk4x12_lcd_timing = {
+ .left_margin = 8,
+ .right_margin = 8,
+ .upper_margin = 6,
+ .lower_margin = 6,
+ .hsync_len = 6,
+ .vsync_len = 4,
+ .xres = 480,
+ .yres = 800,
+};
+
+static struct s3c_fb_platdata smdk4x12_lcd_pdata __initdata = {
+ .win[0] = &smdk4x12_fb_win0,
+ .vtiming = &smdk4x12_lcd_timing,
+ .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
+ .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+ .setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
+};
+#endif
+
+/* USB OTG */
+static struct s3c_hsotg_plat smdk4x12_hsotg_pdata;
+
static struct platform_device *smdk4x12_devices[] __initdata = {
&s3c_device_hsmmc2,
&s3c_device_hsmmc3,
@@ -242,22 +301,25 @@ static struct platform_device *smdk4x12_devices[] __initdata = {
&s3c_device_i2c3,
&s3c_device_i2c7,
&s3c_device_rtc,
+ &s3c_device_usb_hsotg,
&s3c_device_wdt,
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
&s5p_device_fimc3,
&s5p_device_fimc_md,
+ &s5p_device_fimd0,
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
+#ifdef CONFIG_DRM_EXYNOS
+ &exynos_device_drm,
+#endif
&samsung_device_keypad,
};
static void __init smdk4x12_map_io(void)
{
- clk_xusbxti.rate = 24000000;
-
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(smdk4x12_uartcfgs, ARRAY_SIZE(smdk4x12_uartcfgs));
@@ -293,6 +355,15 @@ static void __init smdk4x12_machine_init(void)
s3c_sdhci2_set_platdata(&smdk4x12_hsmmc2_pdata);
s3c_sdhci3_set_platdata(&smdk4x12_hsmmc3_pdata);
+ s3c_hsotg_set_platdata(&smdk4x12_hsotg_pdata);
+
+#ifdef CONFIG_DRM_EXYNOS
+ s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
+ exynos4_fimd0_gpio_setup_24bpp();
+#else
+ s5p_fimd0_set_platdata(&smdk4x12_lcd_pdata);
+#endif
+
platform_add_devices(smdk4x12_devices, ARRAY_SIZE(smdk4x12_devices));
}
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 262e9e446a96..3cfa688d274a 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/pwm_backlight.h>
+#include <linux/platform_data/s3c-hsotg.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
@@ -34,7 +35,6 @@
#include <plat/keypad.h>
#include <plat/sdhci.h>
#include <plat/iic.h>
-#include <plat/pd.h>
#include <plat/gpio-cfg.h>
#include <plat/backlight.h>
#include <plat/mfc.h>
@@ -271,6 +271,15 @@ static void __init smdkv310_ohci_init(void)
exynos4_ohci_set_platdata(pdata);
}
+/* USB OTG */
+static struct s3c_hsotg_plat smdkv310_hsotg_pdata;
+
+/* Audio device */
+static struct platform_device smdkv310_device_audio = {
+ .name = "smdk-audio",
+ .id = -1,
+};
+
static struct platform_device *smdkv310_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
@@ -279,6 +288,7 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&s3c_device_i2c1,
&s5p_device_i2c_hdmiphy,
&s3c_device_rtc,
+ &s3c_device_usb_hsotg,
&s3c_device_wdt,
&s5p_device_ehci,
&s5p_device_fimc0,
@@ -302,6 +312,7 @@ static struct platform_device *smdkv310_devices[] __initdata = {
&samsung_asoc_dma,
&samsung_asoc_idma,
&s5p_device_fimd0,
+ &smdkv310_device_audio,
&smdkv310_lcd_lte480wv,
&smdkv310_smsc911x,
&exynos4_device_ahci,
@@ -354,7 +365,7 @@ static void s5p_tv_setup(void)
static void __init smdkv310_map_io(void)
{
exynos_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(smdkv310_uartcfgs, ARRAY_SIZE(smdkv310_uartcfgs));
}
@@ -390,7 +401,7 @@ static void __init smdkv310_machine_init(void)
smdkv310_ehci_init();
smdkv310_ohci_init();
- clk_xusbxti.rate = 24000000;
+ s3c_hsotg_set_platdata(&smdkv310_hsotg_pdata);
platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
}
@@ -417,5 +428,6 @@ MACHINE_START(SMDKC210, "SMDKC210")
.init_machine = smdkv310_machine_init,
.init_late = exynos_init_late,
.timer = &exynos4_timer,
+ .reserve = &smdkv310_reserve,
.restart = exynos4_restart,
MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index cd92fa86ba41..4d1f40d44ed1 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -39,7 +39,6 @@
#include <plat/fb.h>
#include <plat/mfc.h>
#include <plat/sdhci.h>
-#include <plat/pd.h>
#include <plat/regs-fb-v4.h>
#include <plat/fimc-core.h>
#include <plat/s5p-time.h>
@@ -1100,9 +1099,8 @@ static struct platform_device *universal_devices[] __initdata = {
static void __init universal_map_io(void)
{
- clk_xusbxti.rate = 24000000;
exynos_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
}
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index e9fafcf163de..373c3c00d24c 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -119,7 +119,9 @@ static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev,
struct exynos_pm_domain *pd)
{
if (pdev->dev.bus) {
- if (pm_genpd_add_device(&pd->pd, &pdev->dev))
+ if (!pm_genpd_add_device(&pd->pd, &pdev->dev))
+ pm_genpd_dev_need_restore(&pdev->dev, true);
+ else
pr_info("%s: error in adding %s device to %s power"
"domain\n", __func__, dev_name(&pdev->dev),
pd->name);
@@ -151,9 +153,12 @@ static __init int exynos4_pm_init_power_domain(void)
if (of_have_populated_dt())
return exynos_pm_dt_parse_domains();
- for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++)
- pm_genpd_init(&exynos4_pm_domains[idx]->pd, NULL,
- exynos4_pm_domains[idx]->is_off);
+ for (idx = 0; idx < ARRAY_SIZE(exynos4_pm_domains); idx++) {
+ struct exynos_pm_domain *pd = exynos4_pm_domains[idx];
+ int on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
+
+ pm_genpd_init(&pd->pd, NULL, !on);
+ }
#ifdef CONFIG_S5P_DEV_FIMD0
exynos_pm_add_dev_to_genpd(&s5p_device_fimd0, &exynos4_pd_lcd0);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index 4aacb66f7161..3a48c852be6c 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -315,7 +315,7 @@ static struct exynos_pmu_conf exynos5250_pmu_config[] = {
{ PMU_TABLE_END,},
};
-void __iomem *exynos5_list_both_cnt_feed[] = {
+static void __iomem *exynos5_list_both_cnt_feed[] = {
EXYNOS5_ARM_CORE0_OPTION,
EXYNOS5_ARM_CORE1_OPTION,
EXYNOS5_ARM_COMMON_OPTION,
@@ -329,7 +329,7 @@ void __iomem *exynos5_list_both_cnt_feed[] = {
EXYNOS5_TOP_PWR_SYSMEM_OPTION,
};
-void __iomem *exynos5_list_diable_wfi_wfe[] = {
+static void __iomem *exynos5_list_diable_wfi_wfe[] = {
EXYNOS5_ARM_CORE1_OPTION,
EXYNOS5_FSYS_ARM_OPTION,
EXYNOS5_ISP_ARM_OPTION,
@@ -390,6 +390,8 @@ void exynos_sys_powerdown_conf(enum sys_powerdown mode)
static int __init exynos_pmu_init(void)
{
+ unsigned int value;
+
exynos_pmu_config = exynos4210_pmu_config;
if (soc_is_exynos4210()) {
@@ -399,6 +401,18 @@ static int __init exynos_pmu_init(void)
exynos_pmu_config = exynos4x12_pmu_config;
pr_info("EXYNOS4x12 PMU Initialize\n");
} else if (soc_is_exynos5250()) {
+ /*
+ * When SYS_WDTRESET is set, watchdog timer reset request
+ * is ignored by power management unit.
+ */
+ value = __raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ __raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+ value = __raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+ value &= ~EXYNOS5_SYS_WDTRESET;
+ __raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+
exynos_pmu_config = exynos5250_pmu_config;
pr_info("EXYNOS5250 PMU Initialize\n");
} else {
diff --git a/arch/arm/mach-exynos/setup-spi.c b/arch/arm/mach-exynos/setup-spi.c
index 833ff40ee0e8..4999829d1c6e 100644
--- a/arch/arm/mach-exynos/setup-spi.c
+++ b/arch/arm/mach-exynos/setup-spi.c
@@ -9,21 +9,10 @@
*/
#include <linux/gpio.h>
-#include <linux/platform_device.h>
-
#include <plat/gpio-cfg.h>
-#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .clk_from_cmu = true,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi0_cfg_gpio(void)
{
s3c_gpio_cfgpin(EXYNOS4_GPB(0), S3C_GPIO_SFN(2));
s3c_gpio_setpull(EXYNOS4_GPB(0), S3C_GPIO_PULL_UP);
@@ -34,15 +23,7 @@ int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
-struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .clk_from_cmu = true,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi1_cfg_gpio(void)
{
s3c_gpio_cfgpin(EXYNOS4_GPB(4), S3C_GPIO_SFN(2));
s3c_gpio_setpull(EXYNOS4_GPB(4), S3C_GPIO_PULL_UP);
@@ -53,15 +34,7 @@ int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI2
-struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .clk_from_cmu = true,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi2_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi2_cfg_gpio(void)
{
s3c_gpio_cfgpin(EXYNOS4_GPC1(1), S3C_GPIO_SFN(5));
s3c_gpio_setpull(EXYNOS4_GPC1(1), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-exynos/setup-usb-phy.c b/arch/arm/mach-exynos/setup-usb-phy.c
index 1af0a7f44e00..b81cc569a8dd 100644
--- a/arch/arm/mach-exynos/setup-usb-phy.c
+++ b/arch/arm/mach-exynos/setup-usb-phy.c
@@ -31,27 +31,55 @@ static void exynos4210_usb_phy_clkset(struct platform_device *pdev)
struct clk *xusbxti_clk;
u32 phyclk;
- /* set clock frequency for PLL */
- phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK;
-
xusbxti_clk = clk_get(&pdev->dev, "xusbxti");
if (xusbxti_clk && !IS_ERR(xusbxti_clk)) {
- switch (clk_get_rate(xusbxti_clk)) {
- case 12 * MHZ:
- phyclk |= CLKSEL_12M;
- break;
- case 24 * MHZ:
- phyclk |= CLKSEL_24M;
- break;
- default:
- case 48 * MHZ:
- /* default reference clock */
- break;
+ if (soc_is_exynos4210()) {
+ /* set clock frequency for PLL */
+ phyclk = readl(EXYNOS4_PHYCLK) & ~EXYNOS4210_CLKSEL_MASK;
+
+ switch (clk_get_rate(xusbxti_clk)) {
+ case 12 * MHZ:
+ phyclk |= EXYNOS4210_CLKSEL_12M;
+ break;
+ case 48 * MHZ:
+ phyclk |= EXYNOS4210_CLKSEL_48M;
+ break;
+ default:
+ case 24 * MHZ:
+ phyclk |= EXYNOS4210_CLKSEL_24M;
+ break;
+ }
+ writel(phyclk, EXYNOS4_PHYCLK);
+ } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
+ /* set clock frequency for PLL */
+ phyclk = readl(EXYNOS4_PHYCLK) & ~EXYNOS4X12_CLKSEL_MASK;
+
+ switch (clk_get_rate(xusbxti_clk)) {
+ case 9600 * KHZ:
+ phyclk |= EXYNOS4X12_CLKSEL_9600K;
+ break;
+ case 10 * MHZ:
+ phyclk |= EXYNOS4X12_CLKSEL_10M;
+ break;
+ case 12 * MHZ:
+ phyclk |= EXYNOS4X12_CLKSEL_12M;
+ break;
+ case 19200 * KHZ:
+ phyclk |= EXYNOS4X12_CLKSEL_19200K;
+ break;
+ case 20 * MHZ:
+ phyclk |= EXYNOS4X12_CLKSEL_20M;
+ break;
+ default:
+ case 24 * MHZ:
+ /* default reference clock */
+ phyclk |= EXYNOS4X12_CLKSEL_24M;
+ break;
+ }
+ writel(phyclk, EXYNOS4_PHYCLK);
}
clk_put(xusbxti_clk);
}
-
- writel(phyclk, EXYNOS4_PHYCLK);
}
static int exynos4210_usb_phy0_init(struct platform_device *pdev)
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile
index ded4652ada80..3ec8bdd25d09 100644
--- a/arch/arm/mach-highbank/Makefile
+++ b/arch/arm/mach-highbank/Makefile
@@ -1,4 +1,4 @@
-obj-y := clock.o highbank.o system.o smc.o
+obj-y := highbank.o system.o smc.o
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec)
diff --git a/arch/arm/mach-highbank/clock.c b/arch/arm/mach-highbank/clock.c
deleted file mode 100644
index c25a2ae4fde1..000000000000
--- a/arch/arm/mach-highbank/clock.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2011 Calxeda, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-
-struct clk {
- unsigned long rate;
-};
-
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-
-void clk_disable(struct clk *clk)
-{}
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- return clk->rate;
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- return 0;
-}
-
-static struct clk eclk = { .rate = 200000000 };
-static struct clk pclk = { .rate = 150000000 };
-
-static struct clk_lookup lookups[] = {
- { .clk = &pclk, .con_id = "apb_pclk", },
- { .clk = &pclk, .dev_id = "sp804", },
- { .clk = &eclk, .dev_id = "ffe0e000.sdhci", },
- { .clk = &pclk, .dev_id = "fff36000.serial", },
-};
-
-void __init highbank_clocks_init(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-}
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 8777612b1a42..d75b0a78d88a 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -105,6 +105,11 @@ static void __init highbank_init_irq(void)
#endif
}
+static struct clk_lookup lookup = {
+ .dev_id = "sp804",
+ .con_id = NULL,
+};
+
static void __init highbank_timer_init(void)
{
int irq;
@@ -122,6 +127,8 @@ static void __init highbank_timer_init(void)
irq = irq_of_parse_and_map(np, 0);
highbank_clocks_init();
+ lookup.clk = of_clk_get(np, 0);
+ clkdev_add(&lookup);
sp804_clocksource_and_sched_clock_init(timer_base + 0x20, "timer1");
sp804_clockevents_init(timer_base, irq, "timer0");
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index eff4db5de0dd..afd542ad6f97 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -52,6 +52,7 @@ config SOC_IMX25
select ARCH_MX25
select COMMON_CLK
select CPU_ARM926T
+ select HAVE_CAN_FLEXCAN if CAN
select ARCH_MXC_IOMUX_V3
select MXC_AVIC
@@ -73,12 +74,13 @@ config SOC_IMX31
config SOC_IMX35
bool
- select CPU_V6
+ select CPU_V6K
select ARCH_MXC_IOMUX_V3
select COMMON_CLK
select HAVE_EPIT
select MXC_AVIC
select SMP_ON_UP if SMP
+ select HAVE_CAN_FLEXCAN if CAN
config SOC_IMX5
select CPU_V7
@@ -105,6 +107,7 @@ config SOC_IMX53
select SOC_IMX5
select ARCH_MX5
select ARCH_MX53
+ select HAVE_CAN_FLEXCAN if CAN
if ARCH_IMX_V4_V5
@@ -158,7 +161,6 @@ config MACH_MX25_3DS
select IMX_HAVE_PLATFORM_IMX2_WDT
select IMX_HAVE_PLATFORM_IMXDI_RTC
select IMX_HAVE_PLATFORM_IMX_I2C
- select IMX_HAVE_PLATFORM_IMX_SSI
select IMX_HAVE_PLATFORM_IMX_FB
select IMX_HAVE_PLATFORM_IMX_KEYPAD
select IMX_HAVE_PLATFORM_IMX_UART
@@ -380,7 +382,6 @@ config MACH_IMX27IPCAM
config MACH_IMX27_DT
bool "Support i.MX27 platforms from device tree"
select SOC_IMX27
- select USE_OF
help
Include support for Freescale i.MX27 based platforms
using the device tree for discovery
@@ -557,6 +558,14 @@ config MACH_BUG
Include support for BUGBase 1.3 platform. This includes specific
configurations for the board and its peripherals.
+config MACH_IMX31_DT
+ bool "Support i.MX31 platforms from device tree"
+ select SOC_IMX31
+ select USE_OF
+ help
+ Include support for Freescale i.MX31 based platforms
+ using the device tree for discovery.
+
comment "MX35 platforms:"
config MACH_PCM043
@@ -589,6 +598,7 @@ config MACH_MX35_3DS
select IMX_HAVE_PLATFORM_IPU_CORE
select IMX_HAVE_PLATFORM_MXC_EHCI
select IMX_HAVE_PLATFORM_MXC_NAND
+ select IMX_HAVE_PLATFORM_MXC_RTC
select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
help
Include support for MX35PDK platform. This includes specific
@@ -663,7 +673,6 @@ comment "i.MX51 machines:"
config MACH_IMX51_DT
bool "Support i.MX51 platforms from device tree"
select SOC_IMX51
- select USE_OF
select MACH_MX51_BABBAGE
help
Include support for Freescale i.MX51 based platforms
@@ -759,7 +768,6 @@ comment "i.MX53 machines:"
config MACH_IMX53_DT
bool "Support i.MX53 platforms from device tree"
select SOC_IMX53
- select USE_OF
select MACH_MX53_ARD
select MACH_MX53_EVK
select MACH_MX53_LOCO
@@ -826,13 +834,14 @@ config SOC_IMX6Q
select COMMON_CLK
select CPU_V7
select HAVE_ARM_SCU
+ select HAVE_CAN_FLEXCAN if CAN
select HAVE_IMX_GPC
select HAVE_IMX_MMDC
select HAVE_IMX_SRC
select HAVE_SMP
+ select MFD_ANATOP
select PINCTRL
select PINCTRL_IMX6Q
- select USE_OF
help
This enables support for Freescale i.MX6 Quad processor.
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index ff29421414f2..07f7c226e4cf 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_MACH_QONG) += mach-qong.o
obj-$(CONFIG_MACH_ARMADILLO5X0) += mach-armadillo5x0.o
obj-$(CONFIG_MACH_KZM_ARM11_01) += mach-kzm_arm11_01.o
obj-$(CONFIG_MACH_BUG) += mach-bug.o
+obj-$(CONFIG_MACH_IMX31_DT) += imx31-dt.o
# i.MX35 based machines
obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 295cbd7c08dc..7aa6313fb167 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -256,7 +256,7 @@ int __init mx27_clocks_init(unsigned long fref)
clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
- clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
+ clk_register_clkdev(clk[rtc_ipg_gate], NULL, "mxc_rtc");
clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
clk_register_clkdev(clk[cpu_div], "cpu", NULL);
clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
@@ -267,6 +267,8 @@ int __init mx27_clocks_init(unsigned long fref)
clk_prepare_enable(clk[emi_ahb_gate]);
+ imx_print_silicon_rev("i.MX27", mx27_revision());
+
return 0;
}
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index c9a06d800f8e..8e19e70f90f9 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -20,6 +20,7 @@
#include <linux/clkdev.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/of.h>
#include <mach/hardware.h>
#include <mach/mx31.h>
@@ -123,7 +124,7 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
- clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
+ clk_register_clkdev(clk[rtc_gate], NULL, "mxc_rtc");
clk_register_clkdev(clk[epit1_gate], "epit", NULL);
clk_register_clkdev(clk[epit2_gate], "epit", NULL);
clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
@@ -165,7 +166,7 @@ int __init mx31_clocks_init(unsigned long fref)
clk_register_clkdev(clk[firi_gate], "firi", NULL);
clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
- clk_register_clkdev(clk[rng_gate], "rng", NULL);
+ clk_register_clkdev(clk[rng_gate], NULL, "mxc_rnga");
clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
clk_register_clkdev(clk[iim_gate], "iim", NULL);
@@ -179,3 +180,21 @@ int __init mx31_clocks_init(unsigned long fref)
return 0;
}
+
+#ifdef CONFIG_OF
+int __init mx31_clocks_init_dt(void)
+{
+ struct device_node *np;
+ u32 fref = 26000000; /* default */
+
+ for_each_compatible_node(np, NULL, "fixed-clock") {
+ if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
+ continue;
+
+ if (!of_property_read_u32(np, "clock-frequency", &fref))
+ break;
+ }
+
+ return mx31_clocks_init(fref);
+}
+#endif
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index 920a8cc42726..c6422fb10bae 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -201,7 +201,6 @@ int __init mx35_clocks_init()
pr_err("i.MX35 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
-
clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
@@ -264,6 +263,14 @@ int __init mx35_clocks_init()
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
+ /*
+ * SCC is needed to boot via mmc after a watchdog reset. The clock code
+ * before conversion to common clk also enabled UART1 (which isn't
+ * handled here and not needed for mmc) and IIM (which is enabled
+ * unconditionally above).
+ */
+ clk_prepare_enable(clk[scc_gate]);
+
imx_print_silicon_rev("i.MX35", mx35_revision());
#ifdef CONFIG_MXC_USE_EPIT
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index a2200c77bf70..f6086693ebd2 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -58,7 +58,7 @@ enum imx5_clks {
tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
- gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
+ gpt_hf_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
@@ -81,6 +81,7 @@ enum imx5_clks {
ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
+ epit1_ipg_gate, epit1_hf_gate, epit2_ipg_gate, epit2_hf_gate,
clk_max
};
@@ -167,12 +168,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
- clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
- clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
+ clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "per_root", MXC_CCM_CCGR2, 12);
clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
- clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
- clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
+ clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "per_root", MXC_CCM_CCGR2, 16);
+ clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 18);
+ clk[gpt_hf_gate] = imx_clk_gate2("gpt_hf_gate", "per_root", MXC_CCM_CCGR2, 20);
clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -226,13 +227,17 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
+ clk[epit1_ipg_gate] = imx_clk_gate2("epit1_ipg_gate", "ipg", MXC_CCM_CCGR2, 2);
+ clk[epit1_hf_gate] = imx_clk_gate2("epit1_hf_gate", "per_root", MXC_CCM_CCGR2, 4);
+ clk[epit2_ipg_gate] = imx_clk_gate2("epit2_ipg_gate", "ipg", MXC_CCM_CCGR2, 6);
+ clk[epit2_hf_gate] = imx_clk_gate2("epit2_hf_gate", "per_root", MXC_CCM_CCGR2, 8);
for (i = 0; i < ARRAY_SIZE(clk); i++)
if (IS_ERR(clk[i]))
pr_err("i.MX5 clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
- clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[gpt_hf_gate], "per", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
@@ -248,7 +253,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
- clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
+ clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx35-cspi.2");
clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
@@ -279,6 +284,11 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
+ clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
+ clk_register_clkdev(clk[epit1_ipg_gate], "ipg", "imx-epit.0");
+ clk_register_clkdev(clk[epit1_hf_gate], "per", "imx-epit.0");
+ clk_register_clkdev(clk[epit2_ipg_gate], "ipg", "imx-epit.1");
+ clk_register_clkdev(clk[epit2_hf_gate], "per", "imx-epit.1");
/* Set SDHC parents to be PLL2 */
clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
@@ -336,7 +346,6 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
- clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 17dc66a085a5..ea89520b6e22 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -147,18 +147,19 @@ enum mx6q_clks {
esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
- mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
+ mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4, per1_bch,
gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
- ssi2_ipg, ssi3_ipg, clk_max
+ ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2,
+ clk_max
};
static struct clk *clk[clk_max];
static enum mx6q_clks const clks_init_on[] __initconst = {
- mmdc_ch0_axi, mmdc_ch1_axi,
+ mmdc_ch0_axi, rom,
};
int __init mx6q_clocks_init(void)
@@ -197,6 +198,9 @@ int __init mx6q_clocks_init(void)
clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x2000, 0x3);
clk[pll8_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll8_enet", "osc", base + 0xe0, 0x182000, 0x3);
+ clk[usbphy1] = imx_clk_gate("usbphy1", "pll3_usb_otg", base + 0x10, 6);
+ clk[usbphy2] = imx_clk_gate("usbphy2", "pll7_usb_host", base + 0x20, 6);
+
/* name parent_name reg idx */
clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
@@ -317,7 +321,7 @@ int __init mx6q_clocks_init(void)
clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
/* name parent_name reg shift */
- clk[apbh_dma] = imx_clk_gate2("apbh_dma", "ahb", base + 0x68, 4);
+ clk[apbh_dma] = imx_clk_gate2("apbh_dma", "usdhc3", base + 0x68, 4);
clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
@@ -356,6 +360,7 @@ int __init mx6q_clocks_init(void)
clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
+ clk[per1_bch] = imx_clk_gate2("per1_bch", "usdhc3", base + 0x78, 12);
clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16);
clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18);
clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20);
@@ -364,6 +369,7 @@ int __init mx6q_clocks_init(void)
clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
+ clk[rom] = imx_clk_gate2("rom", "ahb", base + 0x7c, 0);
clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
@@ -386,12 +392,21 @@ int __init mx6q_clocks_init(void)
pr_err("i.MX6q clk %d: register failed with %ld\n",
i, PTR_ERR(clk[i]));
- clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
- clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
clk_register_clkdev(clk[twd], NULL, "smp_twd");
- clk_register_clkdev(clk[usboh3], NULL, "usboh3");
+ clk_register_clkdev(clk[apbh_dma], NULL, "110000.dma-apbh");
+ clk_register_clkdev(clk[per1_bch], "per1_bch", "112000.gpmi-nand");
+ clk_register_clkdev(clk[gpmi_bch_apb], "gpmi_bch_apb", "112000.gpmi-nand");
+ clk_register_clkdev(clk[gpmi_bch], "gpmi_bch", "112000.gpmi-nand");
+ clk_register_clkdev(clk[gpmi_apb], "gpmi_apb", "112000.gpmi-nand");
+ clk_register_clkdev(clk[gpmi_io], "gpmi_io", "112000.gpmi-nand");
+ clk_register_clkdev(clk[usboh3], NULL, "2184000.usb");
+ clk_register_clkdev(clk[usboh3], NULL, "2184200.usb");
+ clk_register_clkdev(clk[usboh3], NULL, "2184400.usb");
+ clk_register_clkdev(clk[usboh3], NULL, "2184600.usb");
+ clk_register_clkdev(clk[usbphy1], NULL, "20c9000.usbphy");
+ clk_register_clkdev(clk[usbphy2], NULL, "20ca000.usbphy");
clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
diff --git a/arch/arm/mach-imx/devices-imx21.h b/arch/arm/mach-imx/devices-imx21.h
index 2628e0c474dc..93ece55f75df 100644
--- a/arch/arm/mach-imx/devices-imx21.h
+++ b/arch/arm/mach-imx/devices-imx21.h
@@ -14,7 +14,7 @@ extern const struct imx_imx21_hcd_data imx21_imx21_hcd_data;
imx_add_imx21_hcd(&imx21_imx21_hcd_data, pdata)
extern const struct imx_imx2_wdt_data imx21_imx2_wdt_data;
-#define imx21_add_imx2_wdt(pdata) \
+#define imx21_add_imx2_wdt() \
imx_add_imx2_wdt(&imx21_imx2_wdt_data)
extern const struct imx_imx_fb_data imx21_imx_fb_data;
@@ -50,7 +50,7 @@ extern const struct imx_mxc_nand_data imx21_mxc_nand_data;
imx_add_mxc_nand(&imx21_mxc_nand_data, pdata)
extern const struct imx_mxc_w1_data imx21_mxc_w1_data;
-#define imx21_add_mxc_w1(pdata) \
+#define imx21_add_mxc_w1() \
imx_add_mxc_w1(&imx21_mxc_w1_data)
extern const struct imx_spi_imx_data imx21_cspi_data[];
diff --git a/arch/arm/mach-imx/devices-imx25.h b/arch/arm/mach-imx/devices-imx25.h
index efa0761c508d..f8e03dd1f116 100644
--- a/arch/arm/mach-imx/devices-imx25.h
+++ b/arch/arm/mach-imx/devices-imx25.h
@@ -24,11 +24,11 @@ extern const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data;
imx_add_fsl_usb2_udc(&imx25_fsl_usb2_udc_data, pdata)
extern struct imx_imxdi_rtc_data imx25_imxdi_rtc_data;
-#define imx25_add_imxdi_rtc(pdata) \
+#define imx25_add_imxdi_rtc() \
imx_add_imxdi_rtc(&imx25_imxdi_rtc_data)
extern const struct imx_imx2_wdt_data imx25_imx2_wdt_data;
-#define imx25_add_imx2_wdt(pdata) \
+#define imx25_add_imx2_wdt() \
imx_add_imx2_wdt(&imx25_imx2_wdt_data)
extern const struct imx_imx_fb_data imx25_imx_fb_data;
diff --git a/arch/arm/mach-imx/devices-imx27.h b/arch/arm/mach-imx/devices-imx27.h
index 28537a5d9048..436c5720fe6a 100644
--- a/arch/arm/mach-imx/devices-imx27.h
+++ b/arch/arm/mach-imx/devices-imx27.h
@@ -18,7 +18,7 @@ extern const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data;
imx_add_fsl_usb2_udc(&imx27_fsl_usb2_udc_data, pdata)
extern const struct imx_imx2_wdt_data imx27_imx2_wdt_data;
-#define imx27_add_imx2_wdt(pdata) \
+#define imx27_add_imx2_wdt() \
imx_add_imx2_wdt(&imx27_imx2_wdt_data)
extern const struct imx_imx_fb_data imx27_imx_fb_data;
@@ -50,7 +50,7 @@ extern const struct imx_imx_uart_1irq_data imx27_imx_uart_data[];
extern const struct imx_mx2_camera_data imx27_mx2_camera_data;
#define imx27_add_mx2_camera(pdata) \
imx_add_mx2_camera(&imx27_mx2_camera_data, pdata)
-#define imx27_add_mx2_emmaprp(pdata) \
+#define imx27_add_mx2_emmaprp() \
imx_add_mx2_emmaprp(&imx27_mx2_camera_data)
extern const struct imx_mxc_ehci_data imx27_mxc_ehci_otg_data;
@@ -69,7 +69,7 @@ extern const struct imx_mxc_nand_data imx27_mxc_nand_data;
imx_add_mxc_nand(&imx27_mxc_nand_data, pdata)
extern const struct imx_mxc_w1_data imx27_mxc_w1_data;
-#define imx27_add_mxc_w1(pdata) \
+#define imx27_add_mxc_w1() \
imx_add_mxc_w1(&imx27_mxc_w1_data)
extern const struct imx_spi_imx_data imx27_cspi_data[];
diff --git a/arch/arm/mach-imx/devices-imx31.h b/arch/arm/mach-imx/devices-imx31.h
index 488e241a6db6..8b2ceb45bb83 100644
--- a/arch/arm/mach-imx/devices-imx31.h
+++ b/arch/arm/mach-imx/devices-imx31.h
@@ -14,7 +14,7 @@ extern const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data;
imx_add_fsl_usb2_udc(&imx31_fsl_usb2_udc_data, pdata)
extern const struct imx_imx2_wdt_data imx31_imx2_wdt_data;
-#define imx31_add_imx2_wdt(pdata) \
+#define imx31_add_imx2_wdt() \
imx_add_imx2_wdt(&imx31_imx2_wdt_data)
extern const struct imx_imx_i2c_data imx31_imx_i2c_data[];
@@ -42,8 +42,8 @@ extern const struct imx_imx_uart_1irq_data imx31_imx_uart_data[];
#define imx31_add_imx_uart4(pdata) imx31_add_imx_uart(4, pdata)
extern const struct imx_ipu_core_data imx31_ipu_core_data;
-#define imx31_add_ipu_core(pdata) \
- imx_add_ipu_core(&imx31_ipu_core_data, pdata)
+#define imx31_add_ipu_core() \
+ imx_add_ipu_core(&imx31_ipu_core_data)
#define imx31_alloc_mx3_camera(pdata) \
imx_alloc_mx3_camera(&imx31_ipu_core_data, pdata)
#define imx31_add_mx3_sdc_fb(pdata) \
@@ -65,11 +65,11 @@ extern const struct imx_mxc_nand_data imx31_mxc_nand_data;
imx_add_mxc_nand(&imx31_mxc_nand_data, pdata)
extern const struct imx_mxc_rtc_data imx31_mxc_rtc_data;
-#define imx31_add_mxc_rtc(pdata) \
+#define imx31_add_mxc_rtc() \
imx_add_mxc_rtc(&imx31_mxc_rtc_data)
extern const struct imx_mxc_w1_data imx31_mxc_w1_data;
-#define imx31_add_mxc_w1(pdata) \
+#define imx31_add_mxc_w1() \
imx_add_mxc_w1(&imx31_mxc_w1_data)
extern const struct imx_spi_imx_data imx31_cspi_data[];
diff --git a/arch/arm/mach-imx/devices-imx35.h b/arch/arm/mach-imx/devices-imx35.h
index 7b99ef0bb501..c3e9f206ac2b 100644
--- a/arch/arm/mach-imx/devices-imx35.h
+++ b/arch/arm/mach-imx/devices-imx35.h
@@ -24,7 +24,7 @@ extern const struct imx_flexcan_data imx35_flexcan_data[];
#define imx35_add_flexcan1(pdata) imx35_add_flexcan(1, pdata)
extern const struct imx_imx2_wdt_data imx35_imx2_wdt_data;
-#define imx35_add_imx2_wdt(pdata) \
+#define imx35_add_imx2_wdt() \
imx_add_imx2_wdt(&imx35_imx2_wdt_data)
extern const struct imx_imx_i2c_data imx35_imx_i2c_data[];
@@ -50,8 +50,8 @@ extern const struct imx_imx_uart_1irq_data imx35_imx_uart_data[];
#define imx35_add_imx_uart2(pdata) imx35_add_imx_uart(2, pdata)
extern const struct imx_ipu_core_data imx35_ipu_core_data;
-#define imx35_add_ipu_core(pdata) \
- imx_add_ipu_core(&imx35_ipu_core_data, pdata)
+#define imx35_add_ipu_core() \
+ imx_add_ipu_core(&imx35_ipu_core_data)
#define imx35_alloc_mx3_camera(pdata) \
imx_alloc_mx3_camera(&imx35_ipu_core_data, pdata)
#define imx35_add_mx3_sdc_fb(pdata) \
@@ -68,8 +68,12 @@ extern const struct imx_mxc_nand_data imx35_mxc_nand_data;
#define imx35_add_mxc_nand(pdata) \
imx_add_mxc_nand(&imx35_mxc_nand_data, pdata)
+extern const struct imx_mxc_rtc_data imx35_mxc_rtc_data;
+#define imx35_add_mxc_rtc() \
+ imx_add_mxc_rtc(&imx35_mxc_rtc_data)
+
extern const struct imx_mxc_w1_data imx35_mxc_w1_data;
-#define imx35_add_mxc_w1(pdata) \
+#define imx35_add_mxc_w1() \
imx_add_mxc_w1(&imx35_mxc_w1_data)
extern const struct imx_sdhci_esdhc_imx_data imx35_sdhci_esdhc_imx_data[];
diff --git a/arch/arm/mach-imx/devices-imx51.h b/arch/arm/mach-imx/devices-imx51.h
index af488bc0e225..9f1718725195 100644
--- a/arch/arm/mach-imx/devices-imx51.h
+++ b/arch/arm/mach-imx/devices-imx51.h
@@ -55,7 +55,7 @@ extern const struct imx_spi_imx_data imx51_ecspi_data[];
imx_add_spi_imx(&imx51_ecspi_data[id], pdata)
extern const struct imx_imx2_wdt_data imx51_imx2_wdt_data[];
-#define imx51_add_imx2_wdt(id, pdata) \
+#define imx51_add_imx2_wdt(id) \
imx_add_imx2_wdt(&imx51_imx2_wdt_data[id])
extern const struct imx_mxc_pwm_data imx51_mxc_pwm_data[];
diff --git a/arch/arm/mach-imx/devices-imx53.h b/arch/arm/mach-imx/devices-imx53.h
index 6e1e5d1f8c3a..77e0db96c448 100644
--- a/arch/arm/mach-imx/devices-imx53.h
+++ b/arch/arm/mach-imx/devices-imx53.h
@@ -30,7 +30,7 @@ extern const struct imx_spi_imx_data imx53_ecspi_data[];
imx_add_spi_imx(&imx53_ecspi_data[id], pdata)
extern const struct imx_imx2_wdt_data imx53_imx2_wdt_data[];
-#define imx53_add_imx2_wdt(id, pdata) \
+#define imx53_add_imx2_wdt(id) \
imx_add_imx2_wdt(&imx53_imx2_wdt_data[id])
extern const struct imx_imx_ssi_data imx53_imx_ssi_data[];
diff --git a/arch/arm/mach-imx/ehci-imx25.c b/arch/arm/mach-imx/ehci-imx25.c
index 865daf0b09e9..05bb41d99728 100644
--- a/arch/arm/mach-imx/ehci-imx25.c
+++ b/arch/arm/mach-imx/ehci-imx25.c
@@ -24,14 +24,18 @@
#define MX25_OTG_SIC_SHIFT 29
#define MX25_OTG_SIC_MASK (0x3 << MX25_OTG_SIC_SHIFT)
#define MX25_OTG_PM_BIT (1 << 24)
+#define MX25_OTG_PP_BIT (1 << 11)
+#define MX25_OTG_OCPOL_BIT (1 << 3)
#define MX25_H1_SIC_SHIFT 21
#define MX25_H1_SIC_MASK (0x3 << MX25_H1_SIC_SHIFT)
+#define MX25_H1_PP_BIT (1 << 18)
#define MX25_H1_PM_BIT (1 << 8)
#define MX25_H1_IPPUE_UP_BIT (1 << 7)
#define MX25_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX25_H1_TLL_BIT (1 << 5)
#define MX25_H1_USBTE_BIT (1 << 4)
+#define MX25_H1_OCPOL_BIT (1 << 2)
int mx25_initialize_usb_hw(int port, unsigned int flags)
{
@@ -41,21 +45,35 @@ int mx25_initialize_usb_hw(int port, unsigned int flags)
switch (port) {
case 0: /* OTG port */
- v &= ~(MX25_OTG_SIC_MASK | MX25_OTG_PM_BIT);
+ v &= ~(MX25_OTG_SIC_MASK | MX25_OTG_PM_BIT | MX25_OTG_PP_BIT |
+ MX25_OTG_OCPOL_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_OTG_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX25_OTG_PM_BIT;
+ if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
+ v |= MX25_OTG_PP_BIT;
+
+ if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
+ v |= MX25_OTG_OCPOL_BIT;
+
break;
case 1: /* H1 port */
- v &= ~(MX25_H1_SIC_MASK | MX25_H1_PM_BIT | MX25_H1_TLL_BIT |
- MX25_H1_USBTE_BIT | MX25_H1_IPPUE_DOWN_BIT | MX25_H1_IPPUE_UP_BIT);
+ v &= ~(MX25_H1_SIC_MASK | MX25_H1_PM_BIT | MX25_H1_PP_BIT |
+ MX25_H1_OCPOL_BIT | MX25_H1_TLL_BIT | MX25_H1_USBTE_BIT |
+ MX25_H1_IPPUE_DOWN_BIT | MX25_H1_IPPUE_UP_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX25_H1_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX25_H1_PM_BIT;
+ if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
+ v |= MX25_H1_PP_BIT;
+
+ if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
+ v |= MX25_H1_OCPOL_BIT;
+
if (!(flags & MXC_EHCI_TTL_ENABLED))
v |= MX25_H1_TLL_BIT;
diff --git a/arch/arm/mach-imx/ehci-imx35.c b/arch/arm/mach-imx/ehci-imx35.c
index 001ec3971f5d..73574c30cf50 100644
--- a/arch/arm/mach-imx/ehci-imx35.c
+++ b/arch/arm/mach-imx/ehci-imx35.c
@@ -24,14 +24,18 @@
#define MX35_OTG_SIC_SHIFT 29
#define MX35_OTG_SIC_MASK (0x3 << MX35_OTG_SIC_SHIFT)
#define MX35_OTG_PM_BIT (1 << 24)
+#define MX35_OTG_PP_BIT (1 << 11)
+#define MX35_OTG_OCPOL_BIT (1 << 3)
#define MX35_H1_SIC_SHIFT 21
#define MX35_H1_SIC_MASK (0x3 << MX35_H1_SIC_SHIFT)
+#define MX35_H1_PP_BIT (1 << 18)
#define MX35_H1_PM_BIT (1 << 8)
#define MX35_H1_IPPUE_UP_BIT (1 << 7)
#define MX35_H1_IPPUE_DOWN_BIT (1 << 6)
#define MX35_H1_TLL_BIT (1 << 5)
#define MX35_H1_USBTE_BIT (1 << 4)
+#define MX35_H1_OCPOL_BIT (1 << 2)
int mx35_initialize_usb_hw(int port, unsigned int flags)
{
@@ -41,21 +45,35 @@ int mx35_initialize_usb_hw(int port, unsigned int flags)
switch (port) {
case 0: /* OTG port */
- v &= ~(MX35_OTG_SIC_MASK | MX35_OTG_PM_BIT);
+ v &= ~(MX35_OTG_SIC_MASK | MX35_OTG_PM_BIT | MX35_OTG_PP_BIT |
+ MX35_OTG_OCPOL_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX35_OTG_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX35_OTG_PM_BIT;
+ if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
+ v |= MX35_OTG_PP_BIT;
+
+ if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
+ v |= MX35_OTG_OCPOL_BIT;
+
break;
case 1: /* H1 port */
- v &= ~(MX35_H1_SIC_MASK | MX35_H1_PM_BIT | MX35_H1_TLL_BIT |
- MX35_H1_USBTE_BIT | MX35_H1_IPPUE_DOWN_BIT | MX35_H1_IPPUE_UP_BIT);
+ v &= ~(MX35_H1_SIC_MASK | MX35_H1_PM_BIT | MX35_H1_PP_BIT |
+ MX35_H1_OCPOL_BIT | MX35_H1_TLL_BIT | MX35_H1_USBTE_BIT |
+ MX35_H1_IPPUE_DOWN_BIT | MX35_H1_IPPUE_UP_BIT);
v |= (flags & MXC_EHCI_INTERFACE_MASK) << MX35_H1_SIC_SHIFT;
if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
v |= MX35_H1_PM_BIT;
+ if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
+ v |= MX35_H1_PP_BIT;
+
+ if (!(flags & MXC_EHCI_OC_PIN_ACTIVE_LOW))
+ v |= MX35_H1_OCPOL_BIT;
+
if (!(flags & MXC_EHCI_TTL_ENABLED))
v |= MX35_H1_TLL_BIT;
diff --git a/arch/arm/mach-imx/ehci-imx5.c b/arch/arm/mach-imx/ehci-imx5.c
index c17fa131728b..a6a4afb0ad62 100644
--- a/arch/arm/mach-imx/ehci-imx5.c
+++ b/arch/arm/mach-imx/ehci-imx5.c
@@ -28,11 +28,14 @@
#define MXC_OTG_UCTRL_OPM_BIT (1 << 24) /* OTG power mask */
#define MXC_H1_UCTRL_H1UIE_BIT (1 << 12) /* Host1 ULPI interrupt enable */
#define MXC_H1_UCTRL_H1WIE_BIT (1 << 11) /* HOST1 wakeup intr enable */
-#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
+#define MXC_H1_UCTRL_H1PM_BIT (1 << 8) /* HOST1 power mask */
/* USB_PHY_CTRL_FUNC */
+#define MXC_OTG_PHYCTRL_OC_POL_BIT (1 << 9) /* OTG Polarity of Overcurrent */
#define MXC_OTG_PHYCTRL_OC_DIS_BIT (1 << 8) /* OTG Disable Overcurrent Event */
+#define MXC_H1_OC_POL_BIT (1 << 6) /* UH1 Polarity of Overcurrent */
#define MXC_H1_OC_DIS_BIT (1 << 5) /* UH1 Disable Overcurrent Event */
+#define MXC_OTG_PHYCTRL_PWR_POL_BIT (1 << 3) /* OTG Power Pin Polarity */
/* USBH2CTRL */
#define MXC_H2_UCTRL_H2UIE_BIT (1 << 8)
@@ -80,13 +83,21 @@ int mx51_initialize_usb_hw(int port, unsigned int flags)
if (flags & MXC_EHCI_INTERNAL_PHY) {
v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+ if (flags & MXC_EHCI_OC_PIN_ACTIVE_LOW)
+ v |= MXC_OTG_PHYCTRL_OC_POL_BIT;
+ else
+ v &= ~MXC_OTG_PHYCTRL_OC_POL_BIT;
if (flags & MXC_EHCI_POWER_PINS_ENABLED) {
- /* OC/USBPWR is not used */
- v |= MXC_OTG_PHYCTRL_OC_DIS_BIT;
- } else {
/* OC/USBPWR is used */
v &= ~MXC_OTG_PHYCTRL_OC_DIS_BIT;
+ } else {
+ /* OC/USBPWR is not used */
+ v |= MXC_OTG_PHYCTRL_OC_DIS_BIT;
}
+ if (flags & MXC_EHCI_PWR_PIN_ACTIVE_HIGH)
+ v |= MXC_OTG_PHYCTRL_PWR_POL_BIT;
+ else
+ v &= ~MXC_OTG_PHYCTRL_PWR_POL_BIT;
__raw_writel(v, usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
v = __raw_readl(usbother_base + MXC_USBCTRL_OFFSET);
@@ -95,9 +106,9 @@ int mx51_initialize_usb_hw(int port, unsigned int flags)
else
v &= ~MXC_OTG_UCTRL_OWIE_BIT;/* OTG wakeup disable */
if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v |= MXC_OTG_UCTRL_OPM_BIT;
- else
v &= ~MXC_OTG_UCTRL_OPM_BIT;
+ else
+ v |= MXC_OTG_UCTRL_OPM_BIT;
__raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
}
break;
@@ -113,12 +124,16 @@ int mx51_initialize_usb_hw(int port, unsigned int flags)
}
if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
+ v &= ~MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask unused*/
else
v |= MXC_H1_UCTRL_H1PM_BIT; /* HOST1 power mask used*/
__raw_writel(v, usbother_base + MXC_USBCTRL_OFFSET);
v = __raw_readl(usbother_base + MXC_USB_PHY_CTR_FUNC_OFFSET);
+ if (flags & MXC_EHCI_OC_PIN_ACTIVE_LOW)
+ v |= MXC_H1_OC_POL_BIT;
+ else
+ v &= ~MXC_H1_OC_POL_BIT;
if (flags & MXC_EHCI_POWER_PINS_ENABLED)
v &= ~MXC_H1_OC_DIS_BIT; /* OC is used */
else
@@ -142,7 +157,7 @@ int mx51_initialize_usb_hw(int port, unsigned int flags)
}
if (flags & MXC_EHCI_POWER_PINS_ENABLED)
- v &= ~MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
+ v &= ~MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask unused*/
else
v |= MXC_H2_UCTRL_H2PM_BIT; /* HOST2 power mask used*/
__raw_writel(v, usbother_base + MXC_USBH2CTRL_OFFSET);
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index b46cab0ced53..fd3177f9e79a 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -266,7 +266,7 @@ static struct spi_board_info __maybe_unused
.bus_num = 0,
.chip_select = 0,
.max_speed_hz = 1500000,
- .irq = IRQ_GPIOD(25),
+ /* irq number is run-time assigned */
.platform_data = &ads7846_config,
.mode = SPI_MODE_2,
},
@@ -329,6 +329,7 @@ void __init eukrea_mbimx27_baseboard_init(void)
/* SPI_CS0 init */
mxc_gpio_mode(GPIO_PORTD | 28 | GPIO_GPIO | GPIO_OUT);
imx27_add_spi_imx0(&eukrea_mbimx27_spi0_data);
+ eukrea_mbimx27_spi_board_info[0].irq = gpio_to_irq(IMX_GPIO_NR(4, 25));
spi_register_board_info(eukrea_mbimx27_spi_board_info,
ARRAY_SIZE(eukrea_mbimx27_spi_board_info));
diff --git a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
index 557f6c486053..6e9dd12a6961 100644
--- a/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimxsd35-baseboard.c
@@ -95,10 +95,6 @@ static const struct fb_videomode fb_modedb[] = {
},
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "CMO-QVGA",
.mode = fb_modedb,
@@ -287,7 +283,7 @@ void __init eukrea_mbimxsd35_baseboard_init(void)
printk(KERN_ERR "error setting mbimxsd pads !\n");
imx35_add_imx_uart1(&uart_pdata);
- imx35_add_ipu_core(&mx3_ipu_data);
+ imx35_add_ipu_core();
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
imx35_add_imx_ssi(0, &eukrea_mbimxsd_ssi_pdata);
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index eee0cc8d92a4..e80d5235dac0 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -10,7 +10,6 @@
*/
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -33,35 +32,8 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static int __init imx27_avic_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL);
- return 0;
-}
-
-static int __init imx27_gpio_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
-
- gpio_irq_base -= 32;
- irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
- NULL);
-
- return 0;
-}
-
-static const struct of_device_id imx27_irq_match[] __initconst = {
- { .compatible = "fsl,imx27-avic", .data = imx27_avic_add_irq_domain, },
- { .compatible = "fsl,imx27-gpio", .data = imx27_gpio_add_irq_domain, },
- { /* sentinel */ }
-};
-
static void __init imx27_dt_init(void)
{
- of_irq_init(imx27_irq_match);
-
of_platform_populate(NULL, of_default_bus_match_table,
imx27_auxdata_lookup, NULL);
}
@@ -75,7 +47,7 @@ static struct sys_timer imx27_timer = {
.init = imx27_timer_init,
};
-static const char *imx27_dt_board_compat[] __initdata = {
+static const char * const imx27_dt_board_compat[] __initconst = {
"fsl,imx27",
NULL
};
diff --git a/arch/arm/mach-imx/imx31-dt.c b/arch/arm/mach-imx/imx31-dt.c
new file mode 100644
index 000000000000..a68ba207b2b7
--- /dev/null
+++ b/arch/arm/mach-imx/imx31-dt.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 Sascha Hauer, Pengutronix
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/time.h>
+#include <mach/common.h>
+#include <mach/mx31.h>
+
+static const struct of_dev_auxdata imx31_auxdata_lookup[] __initconst = {
+ OF_DEV_AUXDATA("fsl,imx31-uart", MX31_UART1_BASE_ADDR,
+ "imx21-uart.0", NULL),
+ OF_DEV_AUXDATA("fsl,imx31-uart", MX31_UART2_BASE_ADDR,
+ "imx21-uart.1", NULL),
+ OF_DEV_AUXDATA("fsl,imx31-uart", MX31_UART3_BASE_ADDR,
+ "imx21-uart.2", NULL),
+ OF_DEV_AUXDATA("fsl,imx31-uart", MX31_UART4_BASE_ADDR,
+ "imx21-uart.3", NULL),
+ OF_DEV_AUXDATA("fsl,imx31-uart", MX31_UART5_BASE_ADDR,
+ "imx21-uart.4", NULL),
+ { /* sentinel */ }
+};
+
+static void __init imx31_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table,
+ imx31_auxdata_lookup, NULL);
+}
+
+static void __init imx31_timer_init(void)
+{
+ mx31_clocks_init_dt();
+}
+
+static struct sys_timer imx31_timer = {
+ .init = imx31_timer_init,
+};
+
+static const char *imx31_dt_board_compat[] __initdata = {
+ "fsl,imx31",
+ NULL
+};
+
+DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
+ .map_io = mx31_map_io,
+ .init_early = imx31_init_early,
+ .init_irq = mx31_init_irq,
+ .handle_irq = imx31_handle_irq,
+ .timer = &imx31_timer,
+ .init_machine = imx31_dt_init,
+ .dt_compat = imx31_dt_board_compat,
+ .restart = mxc_restart,
+MACHINE_END
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 18e78dba4298..d4067fe36357 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -11,7 +11,6 @@
*/
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/machine.h>
@@ -45,30 +44,6 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static int __init imx51_tzic_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
- return 0;
-}
-
-static int __init imx51_gpio_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
-
- gpio_irq_base -= 32;
- irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
-
- return 0;
-}
-
-static const struct of_device_id imx51_irq_match[] __initconst = {
- { .compatible = "fsl,imx51-tzic", .data = imx51_tzic_add_irq_domain, },
- { .compatible = "fsl,imx51-gpio", .data = imx51_gpio_add_irq_domain, },
- { /* sentinel */ }
-};
-
static const struct of_device_id imx51_iomuxc_of_match[] __initconst = {
{ .compatible = "fsl,imx51-iomuxc-babbage", .data = imx51_babbage_common_init, },
{ /* sentinel */ }
@@ -80,8 +55,6 @@ static void __init imx51_dt_init(void)
const struct of_device_id *of_id;
void (*func)(void);
- of_irq_init(imx51_irq_match);
-
pinctrl_provide_dummies();
node = of_find_matching_node(NULL, imx51_iomuxc_of_match);
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index eb04b6248e48..1b7a2fc36591 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -15,7 +15,6 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/machine.h>
@@ -52,30 +51,6 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
{ /* sentinel */ }
};
-static int __init imx53_tzic_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
- return 0;
-}
-
-static int __init imx53_gpio_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
-
- gpio_irq_base -= 32;
- irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
-
- return 0;
-}
-
-static const struct of_device_id imx53_irq_match[] __initconst = {
- { .compatible = "fsl,imx53-tzic", .data = imx53_tzic_add_irq_domain, },
- { .compatible = "fsl,imx53-gpio", .data = imx53_gpio_add_irq_domain, },
- { /* sentinel */ }
-};
-
static const struct of_device_id imx53_iomuxc_of_match[] __initconst = {
{ .compatible = "fsl,imx53-iomuxc-ard", .data = imx53_ard_common_init, },
{ .compatible = "fsl,imx53-iomuxc-evk", .data = imx53_evk_common_init, },
@@ -103,8 +78,6 @@ static void __init imx53_dt_init(void)
const struct of_device_id *of_id;
void (*func)(void);
- of_irq_init(imx53_irq_match);
-
pinctrl_provide_dummies();
node = of_find_matching_node(NULL, imx53_iomuxc_of_match);
@@ -147,6 +120,7 @@ DT_MACHINE_START(IMX53_DT, "Freescale i.MX53 (Device Tree Support)")
.handle_irq = imx53_handle_irq,
.timer = &imx53_timer,
.init_machine = imx53_dt_init,
+ .init_late = imx53_init_late,
.dt_compat = imx53_dt_board_compat,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index f4a63ee9e217..7b99a79722b6 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/physmap.h>
#include <linux/dm9000.h>
+#include <linux/gpio.h>
#include <linux/i2c.h>
#include <asm/mach-types.h>
@@ -26,7 +27,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <mach/iomux-mx1.h>
#include "devices-imx1.h"
@@ -87,8 +87,7 @@ static struct resource dm9000_resources[] = {
.end = MX1_CS4_PHYS + 0x00C00003,
.flags = IORESOURCE_MEM,
}, {
- .start = IRQ_GPIOB(14),
- .end = IRQ_GPIOB(14),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
@@ -129,6 +128,8 @@ static void __init apf9328_init(void)
imx1_add_imx_i2c(&apf9328_i2c_data);
+ dm9000_resources[2].start = gpio_to_irq(IMX_GPIO_NR(2, 14));
+ dm9000_resources[2].end = gpio_to_irq(IMX_GPIO_NR(2, 14));
platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index c650145d1646..2c6ab3273f9e 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -367,10 +367,6 @@ static const struct fb_videomode fb_modedb[] = {
},
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "CRT-VGA",
.mode = fb_modedb,
@@ -408,7 +404,8 @@ static int armadillo5x0_sdhc1_init(struct device *dev,
gpio_direction_input(gpio_wp);
/* When supported the trigger type have to be BOTH */
- ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_ATA_DMACK), detect_irq,
+ ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK)),
+ detect_irq,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
"sdhc-detect", data);
@@ -429,7 +426,7 @@ err_gpio_free:
static void armadillo5x0_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IOMUX_TO_IRQ(MX31_PIN_ATA_DMACK), data);
+ free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK)), data);
gpio_free(IOMUX_TO_GPIO(MX31_PIN_ATA_DMACK));
gpio_free(IOMUX_TO_GPIO(MX31_PIN_ATA_RESET_B));
}
@@ -450,8 +447,7 @@ static struct resource armadillo5x0_smc911x_resources[] = {
.end = MX31_CS3_BASE_ADDR + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
- .end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
@@ -498,6 +494,10 @@ static void __init armadillo5x0_init(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+ armadillo5x0_smc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ armadillo5x0_smc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
platform_add_devices(devices, ARRAY_SIZE(devices));
imx_add_gpio_keys(&armadillo5x0_button_data);
imx31_add_imx_i2c1(NULL);
@@ -513,7 +513,7 @@ static void __init armadillo5x0_init(void)
imx31_add_mxc_mmc(0, &sdhc_pdata);
/* Register FB */
- imx31_add_ipu_core(&mx3_ipu_data);
+ imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
/* Register NOR Flash */
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index d085aea08709..2bb9e18d9ee1 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -169,28 +169,28 @@ static struct i2c_board_info eukrea_cpuimx27_i2c_devices[] = {
static struct plat_serial8250_port serial_platform_data[] = {
{
.mapbase = (unsigned long)(MX27_CS3_BASE_ADDR + 0x200000),
- .irq = IRQ_GPIOB(23),
+ /* irq number is run-time assigned */
.uartclk = 14745600,
.regshift = 1,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX27_CS3_BASE_ADDR + 0x400000),
- .irq = IRQ_GPIOB(22),
+ /* irq number is run-time assigned */
.uartclk = 14745600,
.regshift = 1,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX27_CS3_BASE_ADDR + 0x800000),
- .irq = IRQ_GPIOB(27),
+ /* irq number is run-time assigned */
.uartclk = 14745600,
.regshift = 1,
.iotype = UPIO_MEM,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP,
}, {
.mapbase = (unsigned long)(MX27_CS3_BASE_ADDR + 0x1000000),
- .irq = IRQ_GPIOB(30),
+ /* irq number is run-time assigned */
.uartclk = 14745600,
.regshift = 1,
.iotype = UPIO_MEM,
@@ -233,18 +233,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init eukrea_cpuimx27_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", eukrea_cpuimx27_otg_mode);
@@ -266,8 +266,8 @@ static void __init eukrea_cpuimx27_init(void)
imx27_add_fec(NULL);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- imx27_add_imx2_wdt(NULL);
- imx27_add_mxc_w1(NULL);
+ imx27_add_imx2_wdt();
+ imx27_add_mxc_w1();
#if defined(CONFIG_MACH_EUKREA_CPUIMX27_USESDHC2)
/* SDHC2 can be used for Wifi */
@@ -279,6 +279,10 @@ static void __init eukrea_cpuimx27_init(void)
#endif
#if defined(CONFIG_SERIAL_8250) || defined(CONFIG_SERIAL_8250_MODULE)
+ serial_platform_data[0].irq = IMX_GPIO_NR(2, 23);
+ serial_platform_data[1].irq = IMX_GPIO_NR(2, 22);
+ serial_platform_data[2].irq = IMX_GPIO_NR(2, 27);
+ serial_platform_data[3].irq = IMX_GPIO_NR(2, 30);
platform_device_register(&serial_device);
#endif
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 6450303f1a7a..d49b0ec6bdec 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -71,7 +71,7 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
}, {
I2C_BOARD_INFO("tsc2007", 0x48),
.platform_data = &tsc2007_info,
- .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
+ /* irq number is run-time assigned */
},
};
@@ -141,18 +141,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.workaround = FLS_USB2_WORKAROUND_ENGCM09152,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init eukrea_cpuimx35_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", eukrea_cpuimx35_otg_mode);
@@ -167,11 +167,12 @@ static void __init eukrea_cpuimx35_init(void)
ARRAY_SIZE(eukrea_cpuimx35_pads));
imx35_add_fec(NULL);
- imx35_add_imx2_wdt(NULL);
+ imx35_add_imx2_wdt();
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&eukrea_cpuimx35_nand_board_info);
+ eukrea_cpuimx35_i2c_devices[1].irq = gpio_to_irq(TSC2007_IRQGPIO);
i2c_register_board_info(0, eukrea_cpuimx35_i2c_devices,
ARRAY_SIZE(eukrea_cpuimx35_i2c_devices));
imx35_add_imx_i2c0(&eukrea_cpuimx35_i2c0_data);
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index 1e09de50cbcd..b87cc49ab1e8 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -217,18 +217,18 @@ static const struct mxc_usbh_platform_data usbh1_config __initconst = {
.portsc = MXC_EHCI_MODE_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init eukrea_cpuimx51sd_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", eukrea_cpuimx51sd_otg_mode);
@@ -258,7 +258,7 @@ static struct spi_board_info cpuimx51sd_spi_device[] = {
.mode = SPI_MODE_0,
.chip_select = 0,
.platform_data = &mcp251x_info,
- .irq = IMX_GPIO_TO_IRQ(CAN_IRQGPIO)
+ /* irq number is run-time assigned */
},
};
@@ -292,7 +292,7 @@ static void __init eukrea_cpuimx51sd_init(void)
imx51_add_imx_uart(0, &uart_pdata);
imx51_add_mxc_nand(&eukrea_cpuimx51sd_nand_board_info);
- imx51_add_imx2_wdt(0, NULL);
+ imx51_add_imx2_wdt(0);
gpio_request(ETH_RST, "eth_rst");
gpio_set_value(ETH_RST, 1);
@@ -309,6 +309,7 @@ static void __init eukrea_cpuimx51sd_init(void)
msleep(20);
gpio_set_value(CAN_RST, 1);
imx51_add_ecspi(0, &cpuimx51sd_ecspi1_pdata);
+ cpuimx51sd_spi_device[0].irq = gpio_to_irq(CAN_IRQGPIO);
spi_register_board_info(cpuimx51sd_spi_device,
ARRAY_SIZE(cpuimx51sd_spi_device));
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index d1e04e676e33..017bbb70ea41 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -109,18 +109,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.workaround = FLS_USB2_WORKAROUND_ENGCM09152,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init eukrea_cpuimx25_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", eukrea_cpuimx25_otg_mode);
@@ -134,9 +134,9 @@ static void __init eukrea_cpuimx25_init(void)
imx25_add_imx_uart0(&uart_pdata);
imx25_add_mxc_nand(&eukrea_cpuimx25_nand_board_info);
- imx25_add_imxdi_rtc(NULL);
+ imx25_add_imxdi_rtc();
imx25_add_fec(&mx25_fec_pdata);
- imx25_add_imx2_wdt(NULL);
+ imx25_add_imx2_wdt();
i2c_register_board_info(0, eukrea_cpuimx25_i2c_devices,
ARRAY_SIZE(eukrea_cpuimx25_i2c_devices));
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index f76edb96a48a..f264ddddd47c 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -38,8 +38,9 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
#include <mach/common.h>
+#include <mach/hardware.h>
#include <mach/iomux-mx27.h>
#include "devices-imx27.h"
@@ -47,7 +48,7 @@
#define TVP5150_RSTN (GPIO_PORTC + 18)
#define TVP5150_PWDN (GPIO_PORTC + 19)
#define OTG_PHY_CS_GPIO (GPIO_PORTF + 17)
-#define SDHC1_IRQ IRQ_GPIOB(25)
+#define SDHC1_IRQ_GPIO IMX_GPIO_NR(2, 25)
#define MOTHERBOARD_BIT2 (GPIO_PORTD + 31)
#define MOTHERBOARD_BIT1 (GPIO_PORTD + 30)
@@ -307,14 +308,14 @@ static int visstrim_m10_sdhc1_init(struct device *dev,
{
int ret;
- ret = request_irq(SDHC1_IRQ, detect_irq, IRQF_TRIGGER_FALLING,
- "mmc-detect", data);
+ ret = request_irq(gpio_to_irq(SDHC1_IRQ_GPIO), detect_irq,
+ IRQF_TRIGGER_FALLING, "mmc-detect", data);
return ret;
}
static void visstrim_m10_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(SDHC1_IRQ, data);
+ free_irq(gpio_to_irq(SDHC1_IRQ_GPIO), data);
}
static const struct imxmmc_platform_data visstrim_m10_sdhc_pdata __initconst = {
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index c9d350c5dcc8..7381387a8905 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -57,7 +57,7 @@ static void __init mx27ipcam_init(void)
imx27_add_imx_uart0(NULL);
imx27_add_fec(NULL);
- imx27_add_imx2_wdt(NULL);
+ imx27_add_imx2_wdt();
}
static void __init mx27ipcam_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index b47e98b7d539..5ec0608f2a76 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -12,11 +12,12 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpuidle.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -24,6 +25,8 @@
#include <linux/pinctrl/machine.h>
#include <linux/phy.h>
#include <linux/micrel_phy.h>
+#include <linux/mfd/anatop.h>
+#include <asm/cpuidle.h>
#include <asm/smp_twd.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
@@ -31,8 +34,10 @@
#include <asm/mach/time.h>
#include <asm/system_misc.h>
#include <mach/common.h>
+#include <mach/cpuidle.h>
#include <mach/hardware.h>
+
void imx6q_restart(char mode, const char *cmd)
{
struct device_node *np;
@@ -113,6 +118,45 @@ static void __init imx6q_sabrelite_init(void)
imx6q_sabrelite_cko1_setup();
}
+static void __init imx6q_usb_init(void)
+{
+ struct device_node *np;
+ struct platform_device *pdev = NULL;
+ struct anatop *adata = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+ if (np)
+ pdev = of_find_device_by_node(np);
+ if (pdev)
+ adata = platform_get_drvdata(pdev);
+ if (!adata) {
+ if (np)
+ of_node_put(np);
+ return;
+ }
+
+#define HW_ANADIG_USB1_CHRG_DETECT 0x000001b0
+#define HW_ANADIG_USB2_CHRG_DETECT 0x00000210
+
+#define BM_ANADIG_USB_CHRG_DETECT_EN_B 0x00100000
+#define BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B 0x00080000
+
+ /*
+ * The external charger detector needs to be disabled,
+ * or the signal at DP will be poor
+ */
+ anatop_write_reg(adata, HW_ANADIG_USB1_CHRG_DETECT,
+ BM_ANADIG_USB_CHRG_DETECT_EN_B
+ | BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B,
+ ~0);
+ anatop_write_reg(adata, HW_ANADIG_USB2_CHRG_DETECT,
+ BM_ANADIG_USB_CHRG_DETECT_EN_B |
+ BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B,
+ ~0);
+
+ of_node_put(np);
+}
+
static void __init imx6q_init_machine(void)
{
/*
@@ -127,6 +171,20 @@ static void __init imx6q_init_machine(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
imx6q_pm_init();
+ imx6q_usb_init();
+}
+
+static struct cpuidle_driver imx6q_cpuidle_driver = {
+ .name = "imx6q_cpuidle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = ARM_CPUIDLE_WFI_STATE,
+ .state_count = 1,
+};
+
+static void __init imx6q_init_late(void)
+{
+ imx_cpuidle_init(&imx6q_cpuidle_driver);
}
static void __init imx6q_map_io(void)
@@ -136,21 +194,8 @@ static void __init imx6q_map_io(void)
imx6q_clock_map_io();
}
-static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
- struct device_node *interrupt_parent)
-{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
-
- gpio_irq_base -= 32;
- irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
- NULL);
-
- return 0;
-}
-
static const struct of_device_id imx6q_irq_match[] __initconst = {
{ .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
- { .compatible = "fsl,imx6q-gpio", .data = imx6q_gpio_add_irq_domain, },
{ /* sentinel */ }
};
@@ -186,6 +231,7 @@ DT_MACHINE_START(IMX6Q, "Freescale i.MX6 Quad (Device Tree)")
.handle_irq = imx6q_handle_irq,
.timer = &imx6q_timer,
.init_machine = imx6q_init_machine,
+ .init_late = imx6q_init_late,
.dt_compat = imx6q_dt_compat,
.restart = imx6q_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 15a26e908260..5d08533ab2c7 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -73,7 +73,7 @@ static struct plat_serial8250_port serial_platform_data[] = {
{
.membase = KZM_ARM11_IO_ADDRESS(KZM_ARM11_16550),
.mapbase = KZM_ARM11_16550,
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_1),
+ /* irq number is run-time assigned */
.irqflags = IRQ_TYPE_EDGE_RISING,
.uartclk = 14745600,
.regshift = 0,
@@ -91,8 +91,7 @@ static struct resource serial8250_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_1),
- .end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_1),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ,
},
};
@@ -125,6 +124,13 @@ static int __init kzm_init_ext_uart(void)
tmp |= 0x2;
__raw_writeb(tmp, KZM_ARM11_IO_ADDRESS(KZM_ARM11_CTL1));
+ serial_platform_data[0].irq =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1));
+ serial8250_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1));
+ serial8250_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1));
+
return platform_device_register(&serial_device);
}
#else
@@ -152,8 +158,7 @@ static struct resource kzm_smsc9118_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_2),
- .end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_2),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
},
};
@@ -184,6 +189,11 @@ static int __init kzm_init_smsc9118(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+ kzm_smsc9118_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2));
+ kzm_smsc9118_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_2));
+
return platform_device_register(&kzm_smsc9118_device);
}
#else
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 7274e7928136..667f359a2e8b 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -26,7 +26,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-mx1.h>
-#include <mach/irqs.h>
#include "devices-imx1.h"
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 3e7401fca76c..ed22e3fe6ec8 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -38,7 +38,7 @@
(MX21ADS_MMIO_BASE_ADDR + (offset))
#define MX21ADS_CS8900A_MMIO_SIZE 0x200000
-#define MX21ADS_CS8900A_IRQ IRQ_GPIOE(11)
+#define MX21ADS_CS8900A_IRQ_GPIO IMX_GPIO_NR(5, 11)
#define MX21ADS_ST16C255_IOBASE_REG MX21ADS_REG_ADDR(0x200000)
#define MX21ADS_VERSION_REG MX21ADS_REG_ADDR(0x400000)
#define MX21ADS_IO_REG MX21ADS_REG_ADDR(0x800000)
@@ -159,9 +159,10 @@ static struct platform_device mx21ads_nor_mtd_device = {
.resource = &mx21ads_flash_resource,
};
-static const struct resource mx21ads_cs8900_resources[] __initconst = {
+static struct resource mx21ads_cs8900_resources[] __initdata = {
DEFINE_RES_MEM(MX21_CS1_BASE_ADDR, MX21ADS_CS8900A_MMIO_SIZE),
- DEFINE_RES_IRQ(MX21ADS_CS8900A_IRQ),
+ /* irq number is run-time assigned */
+ DEFINE_RES_IRQ(-1),
};
static const struct platform_device_info mx21ads_cs8900_devinfo __initconst = {
@@ -241,13 +242,13 @@ static int mx21ads_sdhc_get_ro(struct device *dev)
static int mx21ads_sdhc_init(struct device *dev, irq_handler_t detect_irq,
void *data)
{
- return request_irq(IRQ_GPIOD(25), detect_irq,
+ return request_irq(gpio_to_irq(IMX_GPIO_NR(4, 25)), detect_irq,
IRQF_TRIGGER_FALLING, "mmc-detect", data);
}
static void mx21ads_sdhc_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOD(25), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(4, 25)), data);
}
static const struct imxmmc_platform_data mx21ads_sdhc_pdata __initconst = {
@@ -304,6 +305,11 @@ static void __init mx21ads_board_init(void)
imx21_add_mxc_nand(&mx21ads_nand_board_info);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
+
+ mx21ads_cs8900_resources[1].start =
+ gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO);
+ mx21ads_cs8900_resources[1].end =
+ gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO);
platform_device_register_full(&mx21ads_cs8900_devinfo);
}
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index f26734298aa6..ce247fd1269a 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -237,9 +237,9 @@ static void __init mx25pdk_init(void)
imx25_add_fsl_usb2_udc(&otg_device_pdata);
imx25_add_mxc_ehci_hs(&usbh2_pdata);
imx25_add_mxc_nand(&mx25pdk_nand_board_info);
- imx25_add_imxdi_rtc(NULL);
+ imx25_add_imxdi_rtc();
imx25_add_imx_fb(&mx25pdk_fb_pdata);
- imx25_add_imx2_wdt(NULL);
+ imx25_add_imx2_wdt();
mx25pdk_fec_reset();
imx25_add_fec(&mx25_fec_pdata);
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index c6d385c52257..58c24c1a7ab7 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -40,7 +40,6 @@
#include <mach/common.h>
#include <mach/iomux-mx27.h>
#include <mach/ulpi.h>
-#include <mach/irqs.h>
#include <mach/3ds_debugboard.h>
#include "devices-imx27.h"
@@ -48,7 +47,6 @@
#define SD1_EN_GPIO IMX_GPIO_NR(2, 25)
#define OTG_PHY_RESET_GPIO IMX_GPIO_NR(2, 23)
#define SPI2_SS0 IMX_GPIO_NR(4, 21)
-#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(3, 28))
#define PMIC_INT IMX_GPIO_NR(3, 14)
#define SPI1_SS0 IMX_GPIO_NR(4, 28)
#define SD1_CD IMX_GPIO_NR(2, 26)
@@ -241,18 +239,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init mx27_3ds_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", mx27_3ds_otg_mode);
@@ -445,7 +443,7 @@ static struct spi_board_info mx27_3ds_spi_devs[] __initdata = {
.bus_num = 1,
.chip_select = 0, /* SS0 */
.platform_data = &mc13783_pdata,
- .irq = IMX_GPIO_TO_IRQ(PMIC_INT),
+ /* irq number is run-time assigned */
.mode = SPI_CS_HIGH,
}, {
.modalias = "l4f00242t03",
@@ -480,7 +478,7 @@ static void __init mx27pdk_init(void)
imx27_add_fec(NULL);
imx27_add_imx_keypad(&mx27_3ds_keymap_data);
imx27_add_mxc_mmc(0, &sdhc1_pdata);
- imx27_add_imx2_wdt(NULL);
+ imx27_add_imx2_wdt();
otg_phy_init();
if (otg_mode_host) {
@@ -496,10 +494,11 @@ static void __init mx27pdk_init(void)
imx27_add_spi_imx1(&spi2_pdata);
imx27_add_spi_imx0(&spi1_pdata);
+ mx27_3ds_spi_devs[0].irq = gpio_to_irq(PMIC_INT);
spi_register_board_info(mx27_3ds_spi_devs,
ARRAY_SIZE(mx27_3ds_spi_devs));
- if (mxc_expio_init(MX27_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ if (mxc_expio_init(MX27_CS5_BASE_ADDR, IMX_GPIO_NR(3, 28)))
pr_warn("Init of the debugboard failed, all devices on the debugboard are unusable.\n");
imx27_add_imx_i2c(0, &mx27_3ds_i2c0_data);
platform_add_devices(devices, ARRAY_SIZE(devices));
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 0228d2e07fe0..7dc59bac0e55 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -246,25 +246,25 @@ static const struct imx_fb_platform_data mx27ads_fb_data __initconst = {
static int mx27ads_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
void *data)
{
- return request_irq(IRQ_GPIOE(21), detect_irq, IRQF_TRIGGER_RISING,
- "sdhc1-card-detect", data);
+ return request_irq(gpio_to_irq(IMX_GPIO_NR(5, 21)), detect_irq,
+ IRQF_TRIGGER_RISING, "sdhc1-card-detect", data);
}
static int mx27ads_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
void *data)
{
- return request_irq(IRQ_GPIOB(7), detect_irq, IRQF_TRIGGER_RISING,
- "sdhc2-card-detect", data);
+ return request_irq(gpio_to_irq(IMX_GPIO_NR(2, 7)), detect_irq,
+ IRQF_TRIGGER_RISING, "sdhc2-card-detect", data);
}
static void mx27ads_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOE(21), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(5, 21)), data);
}
static void mx27ads_sdhc2_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOB(7), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(2, 7)), data);
}
static const struct imxmmc_platform_data sdhc1_pdata __initconst = {
@@ -310,7 +310,7 @@ static void __init mx27ads_board_init(void)
imx27_add_fec(NULL);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- imx27_add_mxc_w1(NULL);
+ imx27_add_mxc_w1();
}
static void __init mx27ads_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index 4eafdf275ea2..8915f937b7d5 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -44,9 +44,6 @@
#include "devices-imx31.h"
-/* CPLD IRQ line for external uart, external ethernet etc */
-#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_1)
-
static int mx31_3ds_pins[] = {
/* UART1 */
MX31_PIN_CTS1__CTS1,
@@ -277,10 +274,6 @@ static const struct fb_videomode fb_modedb[] = {
},
};
-static struct ipu_platform_data mx3_ipu_data = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "Epson-VGA",
.mode = fb_modedb,
@@ -317,7 +310,7 @@ static int mx31_3ds_sdhc1_init(struct device *dev,
return ret;
}
- ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO3_1),
+ ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1)),
detect_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"sdhc1-detect", data);
@@ -336,7 +329,7 @@ gpio_free:
static void mx31_3ds_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO3_1), data);
+ free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1)), data);
gpio_free_array(mx31_3ds_sdhc1_gpios,
ARRAY_SIZE(mx31_3ds_sdhc1_gpios));
}
@@ -539,7 +532,7 @@ static struct spi_board_info mx31_3ds_spi_devs[] __initdata = {
.bus_num = 1,
.chip_select = 1, /* SS2 */
.platform_data = &mc13783_pdata,
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
+ /* irq number is run-time assigned */
.mode = SPI_CS_HIGH,
}, {
.modalias = "l4f00242t03",
@@ -671,18 +664,18 @@ static const struct fsl_usb2_platform_data usbotg_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init mx31_3ds_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", mx31_3ds_otg_mode);
@@ -714,6 +707,7 @@ static void __init mx31_3ds_init(void)
imx31_add_mxc_nand(&mx31_3ds_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+ mx31_3ds_spi_devs[0].irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(mx31_3ds_spi_devs,
ARRAY_SIZE(mx31_3ds_spi_devs));
@@ -736,15 +730,15 @@ static void __init mx31_3ds_init(void)
if (!otg_mode_host)
imx31_add_fsl_usb2_udc(&usbotg_pdata);
- if (mxc_expio_init(MX31_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ if (mxc_expio_init(MX31_CS5_BASE_ADDR, IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)))
printk(KERN_WARNING "Init of the debug board failed, all "
"devices on the debug board are unusable.\n");
- imx31_add_imx2_wdt(NULL);
+ imx31_add_imx2_wdt();
imx31_add_imx_i2c0(&mx31_3ds_i2c0_data);
imx31_add_mxc_mmc(0, &sdhc1_pdata);
imx31_add_spi_imx0(&spi0_pdata);
- imx31_add_ipu_core(&mx3_ipu_data);
+ imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
/* CSI */
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index 4518e5448227..d37f4809c556 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -21,6 +21,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -62,20 +63,18 @@
#define PBC_INTSTATUS_REG (PBC_INTSTATUS + PBC_BASE_ADDRESS)
#define PBC_INTMASK_SET_REG (PBC_INTMASK_SET + PBC_BASE_ADDRESS)
#define PBC_INTMASK_CLEAR_REG (PBC_INTMASK_CLEAR + PBC_BASE_ADDRESS)
-#define EXPIO_PARENT_INT IOMUX_TO_IRQ(MX31_PIN_GPIO1_4)
-#define MXC_EXP_IO_BASE MXC_BOARD_IRQ_START
-#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_EXP_IO_BASE)
-
-#define EXPIO_INT_XUART_INTA (MXC_EXP_IO_BASE + 10)
-#define EXPIO_INT_XUART_INTB (MXC_EXP_IO_BASE + 11)
+#define EXPIO_INT_XUART_INTA 10
+#define EXPIO_INT_XUART_INTB 11
#define MXC_MAX_EXP_IO_LINES 16
/* CS8900 */
-#define EXPIO_INT_ENET_INT (MXC_EXP_IO_BASE + 8)
+#define EXPIO_INT_ENET_INT 8
#define CS4_CS8900_MMIO_START 0x20000
+static struct irq_domain *domain;
+
/*
* The serial port definition structure.
*/
@@ -83,7 +82,6 @@ static struct plat_serial8250_port serial_platform_data[] = {
{
.membase = (void *)(PBC_BASE_ADDRESS + PBC_SC16C652_UARTA),
.mapbase = (unsigned long)(MX31_CS4_BASE_ADDR + PBC_SC16C652_UARTA),
- .irq = EXPIO_INT_XUART_INTA,
.uartclk = 14745600,
.regshift = 0,
.iotype = UPIO_MEM,
@@ -91,7 +89,6 @@ static struct plat_serial8250_port serial_platform_data[] = {
}, {
.membase = (void *)(PBC_BASE_ADDRESS + PBC_SC16C652_UARTB),
.mapbase = (unsigned long)(MX31_CS4_BASE_ADDR + PBC_SC16C652_UARTB),
- .irq = EXPIO_INT_XUART_INTB,
.uartclk = 14745600,
.regshift = 0,
.iotype = UPIO_MEM,
@@ -108,9 +105,9 @@ static struct platform_device serial_device = {
},
};
-static const struct resource mx31ads_cs8900_resources[] __initconst = {
+static struct resource mx31ads_cs8900_resources[] __initdata = {
DEFINE_RES_MEM(MX31_CS4_BASE_ADDR + CS4_CS8900_MMIO_START, SZ_64K),
- DEFINE_RES_IRQ(EXPIO_INT_ENET_INT),
+ DEFINE_RES_IRQ(-1),
};
static const struct platform_device_info mx31ads_cs8900_devinfo __initconst = {
@@ -122,11 +119,19 @@ static const struct platform_device_info mx31ads_cs8900_devinfo __initconst = {
static int __init mxc_init_extuart(void)
{
+ serial_platform_data[0].irq = irq_find_mapping(domain,
+ EXPIO_INT_XUART_INTA);
+ serial_platform_data[1].irq = irq_find_mapping(domain,
+ EXPIO_INT_XUART_INTB);
return platform_device_register(&serial_device);
}
static void __init mxc_init_ext_ethernet(void)
{
+ mx31ads_cs8900_resources[1].start =
+ irq_find_mapping(domain, EXPIO_INT_ENET_INT);
+ mx31ads_cs8900_resources[1].end =
+ irq_find_mapping(domain, EXPIO_INT_ENET_INT);
platform_device_register_full(
(struct platform_device_info *)&mx31ads_cs8900_devinfo);
}
@@ -157,12 +162,12 @@ static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
imr_val = __raw_readw(PBC_INTMASK_SET_REG);
int_valid = __raw_readw(PBC_INTSTATUS_REG) & imr_val;
- expio_irq = MXC_EXP_IO_BASE;
+ expio_irq = 0;
for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
if ((int_valid & 1) == 0)
continue;
- generic_handle_irq(expio_irq);
+ generic_handle_irq(irq_find_mapping(domain, expio_irq));
}
}
@@ -172,7 +177,7 @@ static void mx31ads_expio_irq_handler(u32 irq, struct irq_desc *desc)
*/
static void expio_mask_irq(struct irq_data *d)
{
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
/* mask the interrupt */
__raw_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
__raw_readw(PBC_INTMASK_CLEAR_REG);
@@ -184,7 +189,7 @@ static void expio_mask_irq(struct irq_data *d)
*/
static void expio_ack_irq(struct irq_data *d)
{
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
/* clear the interrupt status */
__raw_writew(1 << expio, PBC_INTSTATUS_REG);
}
@@ -195,7 +200,7 @@ static void expio_ack_irq(struct irq_data *d)
*/
static void expio_unmask_irq(struct irq_data *d)
{
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
/* unmask the interrupt */
__raw_writew(1 << expio, PBC_INTMASK_SET_REG);
}
@@ -209,7 +214,8 @@ static struct irq_chip expio_irq_chip = {
static void __init mx31ads_init_expio(void)
{
- int i;
+ int irq_base;
+ int i, irq;
printk(KERN_INFO "MX31ADS EXPIO(CPLD) hardware\n");
@@ -221,13 +227,21 @@ static void __init mx31ads_init_expio(void)
/* disable the interrupt and clear the status */
__raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
__raw_writew(0xFFFF, PBC_INTSTATUS_REG);
- for (i = MXC_EXP_IO_BASE; i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES);
- i++) {
+
+ irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
+ WARN_ON(irq_base < 0);
+
+ domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ WARN_ON(!domain);
+
+ for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
- irq_set_irq_type(EXPIO_PARENT_INT, IRQ_TYPE_LEVEL_HIGH);
- irq_set_chained_handler(EXPIO_PARENT_INT, mx31ads_expio_irq_handler);
+ irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_4));
+ irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
+ irq_set_chained_handler(irq, mx31ads_expio_irq_handler);
}
#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
@@ -479,7 +493,6 @@ static int mx31_wm8350_init(struct wm8350 *wm8350)
static struct wm8350_platform_data __initdata mx31_wm8350_pdata = {
.init = mx31_wm8350_init,
- .irq_base = MXC_BOARD_IRQ_START + MXC_MAX_EXP_IO_LINES,
};
#endif
@@ -488,13 +501,17 @@ static struct i2c_board_info __initdata mx31ads_i2c1_devices[] = {
{
I2C_BOARD_INFO("wm8350", 0x1a),
.platform_data = &mx31_wm8350_pdata,
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
+ /* irq number is run-time assigned */
},
#endif
};
static void __init mxc_init_i2c(void)
{
+#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
+ mx31ads_i2c1_devices[0].irq =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
+#endif
i2c_register_board_info(1, mx31ads_i2c1_devices,
ARRAY_SIZE(mx31ads_i2c1_devices));
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 83714b0cc290..34b9bf075daf 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -65,8 +65,7 @@ static struct resource smsc91x_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
- .end = IOMUX_TO_IRQ(MX31_PIN_GPIO1_0),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
}
};
@@ -233,7 +232,7 @@ static struct spi_board_info mc13783_dev __initdata = {
.bus_num = 1,
.chip_select = 0,
.platform_data = &mc13783_pdata,
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
+ /* irq number is run-time assigned */
};
static struct platform_device *devices[] __initdata = {
@@ -285,10 +284,15 @@ static void __init mx31lilly_board_init(void)
imx31_add_spi_imx0(&spi0_pdata);
imx31_add_spi_imx1(&spi1_pdata);
+ mc13783_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(&mc13783_dev, 1);
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+ smsc91x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
+ smsc91x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_0));
platform_add_devices(devices, ARRAY_SIZE(devices));
/* USB */
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index 686c60587980..c8785b39eaed 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -43,7 +43,6 @@
#include <mach/common.h>
#include <mach/board-mx31lite.h>
#include <mach/iomux-mx3.h>
-#include <mach/irqs.h>
#include <mach/ulpi.h>
#include "devices-imx31.h"
@@ -83,8 +82,7 @@ static struct resource smsc911x_resources[] = {
.end = MX31_CS4_BASE_ADDR + 0x100,
.flags = IORESOURCE_MEM,
}, {
- .start = IOMUX_TO_IRQ(MX31_PIN_SFS6),
- .end = IOMUX_TO_IRQ(MX31_PIN_SFS6),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ,
},
};
@@ -124,7 +122,7 @@ static struct spi_board_info mc13783_spi_dev __initdata = {
.bus_num = 1,
.chip_select = 0,
.platform_data = &mc13783_pdata,
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
+ /* irq number is run-time assigned */
};
/*
@@ -258,6 +256,7 @@ static void __init mx31lite_init(void)
imx31_add_mxc_nand(&mx31lite_nand_board_info);
imx31_add_spi_imx1(&spi1_pdata);
+ mc13783_spi_dev.irq = gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(&mc13783_spi_dev, 1);
/* USB */
@@ -274,6 +273,10 @@ static void __init mx31lite_init(void)
pr_warning("could not get LAN irq gpio\n");
else {
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_SFS6));
+ smsc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_SFS6));
+ smsc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_SFS6));
platform_device_register(&smsc911x_device);
}
}
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index 016791f038b0..d46290b288ed 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -303,7 +303,7 @@ static struct imx_ssi_platform_data moboard_ssi_pdata = {
static struct spi_board_info moboard_spi_board_info[] __initdata = {
{
.modalias = "mc13783",
- .irq = IOMUX_TO_IRQ(MX31_PIN_GPIO1_3),
+ /* irq number is run-time assigned */
.max_speed_hz = 300000,
.bus_num = 1,
.chip_select = 0,
@@ -473,10 +473,6 @@ static const struct gpio_led_platform_data mx31moboard_led_pdata __initconst = {
.leds = mx31moboard_leds,
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct platform_device *devices[] __initdata = {
&mx31moboard_flash,
};
@@ -494,7 +490,7 @@ static int __init mx31moboard_init_cam(void)
int dma, ret = -ENOMEM;
struct platform_device *pdev;
- imx31_add_ipu_core(&mx3_ipu_data);
+ imx31_add_ipu_core();
pdev = imx31_alloc_mx3_camera(&camera_pdata);
if (IS_ERR(pdev))
@@ -544,7 +540,7 @@ static void __init mx31moboard_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
gpio_led_register_device(-1, &mx31moboard_led_pdata);
- imx31_add_imx2_wdt(NULL);
+ imx31_add_imx2_wdt();
imx31_add_imx_uart0(&uart0_pdata);
imx31_add_imx_uart4(&uart4_pdata);
@@ -557,6 +553,8 @@ static void __init mx31moboard_init(void)
gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3), "pmic-irq");
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
+ moboard_spi_board_info[0].irq =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_3));
spi_register_board_info(moboard_spi_board_info,
ARRAY_SIZE(moboard_spi_board_info));
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 28aa19476de7..504983c68aa8 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -46,7 +46,6 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
-#include <mach/irqs.h>
#include <mach/3ds_debugboard.h>
#include <video/platform_lcd.h>
@@ -80,10 +79,6 @@ static const struct fb_videomode fb_modedb[] = {
},
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "Ceramate-CLAA070VC01",
.mode = fb_modedb,
@@ -136,8 +131,6 @@ static struct platform_device mx35_3ds_lcd = {
.dev.platform_data = &mx35_3ds_lcd_data,
};
-#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(1, 1))
-
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -297,10 +290,6 @@ err:
return ret;
}
-static const struct ipu_platform_data mx35_3ds_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct i2c_board_info mx35_3ds_i2c_camera = {
I2C_BOARD_INFO("ov2640", 0x30),
};
@@ -492,7 +481,7 @@ static struct i2c_board_info mx35_3ds_i2c_mc13892 = {
I2C_BOARD_INFO("mc13892", 0x08),
.platform_data = &mx35_3ds_mc13892_data,
- .irq = IMX_GPIO_TO_IRQ(GPIO_PMIC_INT),
+ /* irq number is run-time assigned */
};
static void __init imx35_3ds_init_mc13892(void)
@@ -504,6 +493,7 @@ static void __init imx35_3ds_init_mc13892(void)
return;
}
+ mx35_3ds_i2c_mc13892.irq = gpio_to_irq(GPIO_PMIC_INT);
i2c_register_board_info(0, &mx35_3ds_i2c_mc13892, 1);
}
@@ -540,18 +530,18 @@ static const struct mxc_usbh_platform_data usb_host_pdata __initconst = {
.portsc = MXC_EHCI_MODE_SERIAL,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init mx35_3ds_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", mx35_3ds_otg_mode);
@@ -571,7 +561,8 @@ static void __init mx35_3ds_init(void)
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
imx35_add_fec(NULL);
- imx35_add_imx2_wdt(NULL);
+ imx35_add_imx2_wdt();
+ imx35_add_mxc_rtc();
platform_add_devices(devices, ARRAY_SIZE(devices));
imx35_add_imx_uart0(&uart_pdata);
@@ -587,7 +578,7 @@ static void __init mx35_3ds_init(void)
imx35_add_mxc_nand(&mx35pdk_nand_board_info);
imx35_add_sdhci_esdhc_imx(0, NULL);
- if (mxc_expio_init(MX35_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ if (mxc_expio_init(MX35_CS5_BASE_ADDR, IMX_GPIO_NR(1, 1)))
pr_warn("Init of the debugboard failed, all "
"devices on the debugboard are unusable.\n");
imx35_add_imx_i2c0(&mx35_3ds_i2c0_data);
@@ -595,7 +586,7 @@ static void __init mx35_3ds_init(void)
i2c_register_board_info(
0, i2c_devices_3ds, ARRAY_SIZE(i2c_devices_3ds));
- imx35_add_ipu_core(&mx35_3ds_ipu_data);
+ imx35_add_ipu_core();
platform_device_register(&mx35_3ds_ov2640);
imx35_3ds_init_camera();
diff --git a/arch/arm/mach-imx/mach-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 3c5b163923f6..9ee84a4af639 100644
--- a/arch/arm/mach-imx/mach-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
@@ -26,7 +26,6 @@
#include "devices-imx51.h"
-#define EXPIO_PARENT_INT gpio_to_irq(IMX_GPIO_NR(1, 6))
#define MX51_3DS_ECSPI2_CS (GPIO_PORTC + 28)
static iomux_v3_cfg_t mx51_3ds_pads[] = {
@@ -148,13 +147,13 @@ static void __init mx51_3ds_init(void)
spi_register_board_info(mx51_3ds_spi_nor_device,
ARRAY_SIZE(mx51_3ds_spi_nor_device));
- if (mxc_expio_init(MX51_CS5_BASE_ADDR, EXPIO_PARENT_INT))
+ if (mxc_expio_init(MX51_CS5_BASE_ADDR, IMX_GPIO_NR(1, 6)))
printk(KERN_WARNING "Init of the debugboard failed, all "
"devices on the board are unusable.\n");
imx51_add_sdhci_esdhc_imx(0, NULL);
imx51_add_imx_keypad(&mx51_3ds_map_data);
- imx51_add_imx2_wdt(0, NULL);
+ imx51_add_imx2_wdt(0);
}
static void __init mx51_3ds_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index dde397014d4b..7b31cbde8775 100644
--- a/arch/arm/mach-imx/mach-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
@@ -307,18 +307,18 @@ static const struct mxc_usbh_platform_data usbh1_config __initconst = {
.portsc = MXC_EHCI_MODE_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init babbage_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", babbage_otg_mode);
@@ -411,7 +411,7 @@ static void __init mx51_babbage_init(void)
spi_register_board_info(mx51_babbage_spi_board_info,
ARRAY_SIZE(mx51_babbage_spi_board_info));
imx51_add_ecspi(0, &mx51_babbage_spi_pdata);
- imx51_add_imx2_wdt(0, NULL);
+ imx51_add_imx2_wdt(0);
}
static void __init mx51_babbage_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-mx53_ard.c b/arch/arm/mach-imx/mach-mx53_ard.c
index 05641980dc5e..6c28e65f424d 100644
--- a/arch/arm/mach-imx/mach-mx53_ard.c
+++ b/arch/arm/mach-imx/mach-mx53_ard.c
@@ -135,8 +135,7 @@ static struct resource ard_smsc911x_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = IMX_GPIO_TO_IRQ(ARD_ETHERNET_INT_B),
- .end = IMX_GPIO_TO_IRQ(ARD_ETHERNET_INT_B),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ,
},
};
@@ -240,10 +239,12 @@ static void __init mx53_ard_board_init(void)
imx53_ard_common_init();
mx53_ard_io_init();
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
+ ard_smsc911x_resources[1].start = gpio_to_irq(ARD_ETHERNET_INT_B);
+ ard_smsc911x_resources[1].end = gpio_to_irq(ARD_ETHERNET_INT_B);
platform_add_devices(devices, ARRAY_SIZE(devices));
imx53_add_sdhci_esdhc_imx(0, &mx53_ard_sd1_data);
- imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx2_wdt(0);
imx53_add_imx_i2c(1, &mx53_ard_i2c2_data);
imx53_add_imx_i2c(2, &mx53_ard_i2c3_data);
imx_add_gpio_keys(&ard_button_data);
@@ -266,5 +267,6 @@ MACHINE_START(MX53_ARD, "Freescale MX53 ARD Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_ard_timer,
.init_machine = mx53_ard_board_init,
+ .init_late = imx53_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx53_evk.c b/arch/arm/mach-imx/mach-mx53_evk.c
index 5a72188b9cdb..09fe2197b491 100644
--- a/arch/arm/mach-imx/mach-mx53_evk.c
+++ b/arch/arm/mach-imx/mach-mx53_evk.c
@@ -154,7 +154,7 @@ static void __init mx53_evk_board_init(void)
spi_register_board_info(mx53_evk_spi_board_info,
ARRAY_SIZE(mx53_evk_spi_board_info));
imx53_add_ecspi(0, &mx53_evk_spi_data);
- imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx2_wdt(0);
gpio_led_register_device(-1, &mx53evk_leds_data);
}
@@ -174,5 +174,6 @@ MACHINE_START(MX53_EVK, "Freescale MX53 EVK Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_evk_timer,
.init_machine = mx53_evk_board_init,
+ .init_late = imx53_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx53_loco.c b/arch/arm/mach-imx/mach-mx53_loco.c
index 37f67cac15a4..8abe23c1d3c8 100644
--- a/arch/arm/mach-imx/mach-mx53_loco.c
+++ b/arch/arm/mach-imx/mach-mx53_loco.c
@@ -283,7 +283,7 @@ static void __init mx53_loco_board_init(void)
imx53_add_imx_uart(0, NULL);
mx53_loco_fec_reset();
imx53_add_fec(&mx53_loco_fec_data);
- imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx2_wdt(0);
ret = gpio_request_one(LOCO_ACCEL_EN, GPIOF_OUT_INIT_HIGH, "accel_en");
if (ret)
@@ -316,5 +316,6 @@ MACHINE_START(MX53_LOCO, "Freescale MX53 LOCO Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_loco_timer,
.init_machine = mx53_loco_board_init,
+ .init_late = imx53_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx53_smd.c b/arch/arm/mach-imx/mach-mx53_smd.c
index 8e972c5c3e13..b15d6a6d3b68 100644
--- a/arch/arm/mach-imx/mach-mx53_smd.c
+++ b/arch/arm/mach-imx/mach-mx53_smd.c
@@ -138,7 +138,7 @@ static void __init mx53_smd_board_init(void)
mx53_smd_init_uart();
mx53_smd_fec_reset();
imx53_add_fec(&mx53_smd_fec_data);
- imx53_add_imx2_wdt(0, NULL);
+ imx53_add_imx2_wdt(0);
imx53_add_imx_i2c(0, &mx53_smd_i2c_data);
imx53_add_sdhci_esdhc_imx(0, NULL);
imx53_add_sdhci_esdhc_imx(1, NULL);
@@ -163,5 +163,6 @@ MACHINE_START(MX53_SMD, "Freescale MX53 SMD Board")
.handle_irq = imx53_handle_irq,
.timer = &mx53_smd_timer,
.init_machine = mx53_smd_board_init,
+ .init_late = imx53_init_late,
.restart = mxc_restart,
MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 8b3d3f07d894..0bf6d30aa32d 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -213,13 +213,13 @@ static const struct imx_fb_platform_data mxt_td60_fb_data __initconst = {
static int mxt_td60_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
void *data)
{
- return request_irq(IRQ_GPIOF(8), detect_irq, IRQF_TRIGGER_FALLING,
- "sdhc1-card-detect", data);
+ return request_irq(gpio_to_irq(IMX_GPIO_NR(6, 8)), detect_irq,
+ IRQF_TRIGGER_FALLING, "sdhc1-card-detect", data);
}
static void mxt_td60_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOF(8), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(6, 8)), data);
}
static const struct imxmmc_platform_data sdhc1_pdata __initconst = {
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index 541152e450c4..de8516b7d69f 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -36,7 +36,6 @@
#include <mach/hardware.h>
#include <mach/iomux-mx27.h>
#include <asm/mach/time.h>
-#include <mach/irqs.h>
#include <mach/ulpi.h>
#include "devices-imx27.h"
@@ -245,7 +244,7 @@ static int pca100_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
{
int ret;
- ret = request_irq(IRQ_GPIOC(29), detect_irq,
+ ret = request_irq(gpio_to_irq(IMX_GPIO_NR(3, 29)), detect_irq,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
"imx-mmc-detect", data);
if (ret)
@@ -257,7 +256,7 @@ static int pca100_sdhc2_init(struct device *dev, irq_handler_t detect_irq,
static void pca100_sdhc2_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOC(29), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(3, 29)), data);
}
static const struct imxmmc_platform_data sdhc_pdata __initconst = {
@@ -298,18 +297,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init pca100_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", pca100_otg_mode);
@@ -408,8 +407,8 @@ static void __init pca100_init(void)
imx27_add_imx_fb(&pca100_fb_data);
imx27_add_fec(NULL);
- imx27_add_imx2_wdt(NULL);
- imx27_add_mxc_w1(NULL);
+ imx27_add_imx2_wdt();
+ imx27_add_mxc_w1();
}
static void __init pca100_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 0a40004154f2..e3c45130fb3c 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -225,8 +225,7 @@ static struct resource smsc911x_resources[] = {
.end = MX31_CS1_BASE_ADDR + 0x300 + SZ_64K - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = IOMUX_TO_IRQ(MX31_PIN_GPIO3_1),
- .end = IOMUX_TO_IRQ(MX31_PIN_GPIO3_1),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
@@ -371,7 +370,7 @@ static int pcm970_sdhc1_init(struct device *dev, irq_handler_t detect_irq,
gpio_direction_input(SDHC1_GPIO_WP);
#endif
- ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_SCK6), detect_irq,
+ ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_SCK6)), detect_irq,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
"sdhc-detect", data);
if (ret)
@@ -391,7 +390,7 @@ err_gpio_free:
static void pcm970_sdhc1_exit(struct device *dev, void *data)
{
- free_irq(IOMUX_TO_IRQ(MX31_PIN_SCK6), data);
+ free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_SCK6)), data);
gpio_free(SDHC1_GPIO_DET);
gpio_free(SDHC1_GPIO_WP);
}
@@ -442,10 +441,6 @@ static struct platform_device *devices[] __initdata = {
&pcm037_mt9v022,
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static const struct fb_videomode fb_modedb[] = {
{
/* 240x320 @ 60 Hz Sharp */
@@ -511,8 +506,7 @@ static struct resource pcm970_sja1000_resources[] = {
.end = MX31_CS5_BASE_ADDR + 0x100 - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = IOMUX_TO_IRQ(IOMUX_PIN(48, 105)),
- .end = IOMUX_TO_IRQ(IOMUX_PIN(48, 105)),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -557,18 +551,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init pcm037_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", pcm037_otg_mode);
@@ -619,13 +613,13 @@ static void __init pcm037_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices));
- imx31_add_imx2_wdt(NULL);
+ imx31_add_imx2_wdt();
imx31_add_imx_uart0(&uart_pdata);
/* XXX: should't this have .flags = 0 (i.e. no RTSCTS) on PCM037_EET? */
imx31_add_imx_uart1(&uart_pdata);
imx31_add_imx_uart2(&uart_pdata);
- imx31_add_mxc_w1(NULL);
+ imx31_add_mxc_w1();
/* LAN9217 IRQ pin */
ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1), "lan9217-irq");
@@ -633,6 +627,10 @@ static void __init pcm037_init(void)
pr_warning("could not get LAN irq gpio\n");
else {
gpio_direction_input(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
+ smsc911x_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO3_1));
platform_device_register(&pcm037_eth);
}
@@ -646,7 +644,7 @@ static void __init pcm037_init(void)
imx31_add_mxc_nand(&pcm037_nand_board_info);
imx31_add_mxc_mmc(0, &sdhc_pdata);
- imx31_add_ipu_core(&mx3_ipu_data);
+ imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&mx3fb_pdata);
/* CSI */
@@ -659,6 +657,10 @@ static void __init pcm037_init(void)
pcm037_init_camera();
+ pcm970_sja1000_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
+ pcm970_sja1000_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(IOMUX_PIN(48, 105)));
platform_device_register(&pcm970_sja1000);
if (otg_mode_host) {
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 2f3debe2a113..95f49d936fd3 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -27,6 +27,7 @@
#include <linux/mfd/mc13783.h>
#include <linux/spi/spi.h>
#include <linux/irq.h>
+#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -274,7 +275,7 @@ static struct mc13xxx_platform_data pcm038_pmic = {
static struct spi_board_info pcm038_spi_board_info[] __initdata = {
{
.modalias = "mc13783",
- .irq = IRQ_GPIOB(23),
+ /* irq number is run-time assigned */
.max_speed_hz = 300000,
.bus_num = 0,
.chip_select = 0,
@@ -325,6 +326,7 @@ static void __init pcm038_init(void)
mxc_gpio_mode(GPIO_PORTB | 23 | GPIO_GPIO | GPIO_IN);
imx27_add_spi_imx0(&pcm038_spi0_data);
+ pcm038_spi_board_info[0].irq = gpio_to_irq(IMX_GPIO_NR(2, 23));
spi_register_board_info(pcm038_spi_board_info,
ARRAY_SIZE(pcm038_spi_board_info));
@@ -332,8 +334,8 @@ static void __init pcm038_init(void)
imx27_add_fec(NULL);
platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices));
- imx27_add_imx2_wdt(NULL);
- imx27_add_mxc_w1(NULL);
+ imx27_add_imx2_wdt();
+ imx27_add_mxc_w1();
#ifdef CONFIG_MACH_PCM970_BASEBOARD
pcm970_baseboard_init();
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 73585f55cca0..e4bd4387e344 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -76,10 +76,6 @@ static const struct fb_videomode fb_modedb[] = {
},
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "Sharp-LQ035Q7",
.mode = fb_modedb,
@@ -330,18 +326,18 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
.phy_mode = FSL_USB2_PHY_UTMI,
};
-static int otg_mode_host;
+static bool otg_mode_host __initdata;
static int __init pcm043_otg_mode(char *options)
{
if (!strcmp(options, "host"))
- otg_mode_host = 1;
+ otg_mode_host = true;
else if (!strcmp(options, "device"))
- otg_mode_host = 0;
+ otg_mode_host = false;
else
pr_info("otg_mode neither \"host\" nor \"device\". "
"Defaulting to device\n");
- return 0;
+ return 1;
}
__setup("otg_mode=", pcm043_otg_mode);
@@ -363,7 +359,7 @@ static void __init pcm043_init(void)
imx35_add_fec(NULL);
platform_add_devices(devices, ARRAY_SIZE(devices));
- imx35_add_imx2_wdt(NULL);
+ imx35_add_imx2_wdt();
imx35_add_imx_uart0(&uart_pdata);
imx35_add_mxc_nand(&pcm037_nand_board_info);
@@ -376,7 +372,7 @@ static void __init pcm043_init(void)
imx35_add_imx_i2c0(&pcm043_i2c0_data);
- imx35_add_ipu_core(&mx3_ipu_data);
+ imx35_add_ipu_core();
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
if (otg_mode_host) {
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index 260621055b6b..fb25fbd31226 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -22,7 +22,6 @@
#include <linux/gpio.h>
#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
@@ -51,8 +50,6 @@
(QONG_FPGA_BASEADDR + QONG_DNET_ID * QONG_FPGA_PERIPH_SIZE)
#define QONG_DNET_SIZE 0x00001000
-#define QONG_FPGA_IRQ IOMUX_TO_IRQ(MX31_PIN_DTR_DCE1)
-
static const struct imxuart_platform_data uart_pdata __initconst = {
.flags = IMXUART_HAVE_RTSCTS,
};
@@ -78,8 +75,7 @@ static struct resource dnet_resources[] = {
.end = QONG_DNET_BASEADDR + QONG_DNET_SIZE - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = QONG_FPGA_IRQ,
- .end = QONG_FPGA_IRQ,
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ,
},
};
@@ -95,6 +91,10 @@ static int __init qong_init_dnet(void)
{
int ret;
+ dnet_resources[1].start =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1));
+ dnet_resources[1].end =
+ gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DTR_DCE1));
ret = platform_device_register(&dnet_device);
return ret;
}
@@ -252,7 +252,7 @@ static void __init qong_init(void)
mxc_init_imx_uart();
qong_init_nor_mtd();
qong_init_fpga();
- imx31_add_imx2_wdt(NULL);
+ imx31_add_imx2_wdt();
}
static void __init qong_timer_init(void)
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index cb9ceae2f648..67ff38e9a3ca 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -14,6 +14,7 @@
#include <linux/mtd/physmap.h>
#include <linux/interrupt.h>
#include <linux/dm9000.h>
+#include <linux/gpio.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -21,7 +22,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <mach/iomux-mx1.h>
#include "devices-imx1.h"
@@ -78,8 +78,7 @@ static struct resource dm9000x_resources[] = {
.end = MX1_CS5_PHYS + 5,
.flags = IORESOURCE_MEM, /* data access */
}, {
- .start = IRQ_GPIOC(3),
- .end = IRQ_GPIOC(3),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
},
};
@@ -123,6 +122,8 @@ static void __init scb9328_init(void)
imx1_add_imx_uart0(&uart_pdata);
printk(KERN_INFO"Scb9328: Adding devices\n");
+ dm9000x_resources[2].start = gpio_to_irq(IMX_GPIO_NR(3, 3));
+ dm9000x_resources[2].end = gpio_to_irq(IMX_GPIO_NR(3, 3));
platform_add_devices(devices, ARRAY_SIZE(devices));
}
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index add8c69c6c1a..39eb7960e2a4 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -31,7 +31,6 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-mx35.h>
-#include <mach/irqs.h>
#include <linux/i2c.h>
#include <linux/i2c/at24.h>
@@ -87,10 +86,6 @@ static const struct fb_videomode fb_modedb[] = {
}
};
-static const struct ipu_platform_data mx3_ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static struct mx3fb_platform_data mx3fb_pdata __initdata = {
.name = "PT0708048",
.mode = fb_modedb,
@@ -162,7 +157,7 @@ static struct i2c_board_info vpr200_i2c_devices[] = {
}, {
I2C_BOARD_INFO("mc13892", 0x08),
.platform_data = &vpr200_pmic,
- .irq = IMX_GPIO_TO_IRQ(GPIO_PMIC_INT),
+ /* irq number is run-time assigned */
}
};
@@ -272,7 +267,7 @@ static void __init vpr200_board_init(void)
mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads));
imx35_add_fec(NULL);
- imx35_add_imx2_wdt(NULL);
+ imx35_add_imx2_wdt();
imx_add_gpio_keys(&vpr200_gpio_keys_data);
platform_add_devices(devices, ARRAY_SIZE(devices));
@@ -290,7 +285,7 @@ static void __init vpr200_board_init(void)
imx35_add_imx_uart0(NULL);
imx35_add_imx_uart2(NULL);
- imx35_add_ipu_core(&mx3_ipu_data);
+ imx35_add_ipu_core();
imx35_add_mx3_sdc_fb(&mx3fb_pdata);
imx35_add_fsl_usb2_udc(&otg_device_pdata);
@@ -299,6 +294,7 @@ static void __init vpr200_board_init(void)
imx35_add_mxc_nand(&vpr200_nand_board_info);
imx35_add_sdhci_esdhc_imx(0, NULL);
+ vpr200_i2c_devices[1].irq = gpio_to_irq(GPIO_PMIC_INT);
i2c_register_board_info(0, vpr200_i2c_devices,
ARRAY_SIZE(vpr200_i2c_devices));
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index fcafd3dafb8c..6d60d51868bc 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -24,7 +24,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
-#include <mach/irqs.h>
#include <mach/iomux-v1.h>
static struct map_desc imx_io_desc[] __initdata = {
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index 5f43905e5290..d056dad0940d 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -26,7 +26,6 @@
#include <mach/devices-common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/irqs.h>
#include <mach/iomux-v1.h>
/* MX21 memory map definition */
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 6ff37140a4f8..388928fdb11a 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -29,7 +29,6 @@
#include <mach/hardware.h>
#include <mach/mx25.h>
#include <mach/iomux-v3.h>
-#include <mach/irqs.h>
/*
* This table defines static virtual address mappings for I/O regions.
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index 25662558e018..e7e24afc45ed 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -26,7 +26,6 @@
#include <mach/devices-common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/irqs.h>
#include <mach/iomux-v1.h>
/* MX27 memory map definition */
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index a8983b9778d1..fe96105109b3 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -30,7 +30,6 @@
#include <mach/devices-common.h>
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-#include <mach/irqs.h>
#include "crmregs-imx3.h"
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index 1d003053d562..f19d604e1b2a 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -16,7 +16,6 @@
#include <linux/clk.h>
#include <linux/pinctrl/machine.h>
-#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <mach/hardware.h>
@@ -24,24 +23,6 @@
#include <mach/devices-common.h>
#include <mach/iomux-v3.h>
-static struct clk *gpc_dvfs_clk;
-
-static void imx5_idle(void)
-{
- /* gpc clock is needed for SRPG */
- if (gpc_dvfs_clk == NULL) {
- gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
- if (IS_ERR(gpc_dvfs_clk))
- return;
- clk_prepare(gpc_dvfs_clk);
- }
- clk_enable(gpc_dvfs_clk);
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
- if (!tzic_enable_wake())
- cpu_do_idle();
- clk_disable(gpc_dvfs_clk);
-}
-
/*
* Define the MX50 memory map.
*/
@@ -105,7 +86,6 @@ void __init imx51_init_early(void)
mxc_set_cpu_type(MXC_CPU_MX51);
mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR));
mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR));
- arm_pm_idle = imx5_idle;
}
void __init imx53_init_early(void)
@@ -243,4 +223,10 @@ void __init imx53_soc_init(void)
void __init imx51_init_late(void)
{
mx51_neon_fixup();
+ imx51_pm_init();
+}
+
+void __init imx53_init_late(void)
+{
+ imx53_pm_init();
}
diff --git a/arch/arm/mach-imx/mx31lilly-db.c b/arch/arm/mach-imx/mx31lilly-db.c
index 7d26f766a4ee..29e890f92055 100644
--- a/arch/arm/mach-imx/mx31lilly-db.c
+++ b/arch/arm/mach-imx/mx31lilly-db.c
@@ -130,7 +130,8 @@ static int mxc_mmc1_init(struct device *dev,
gpio_direction_input(gpio_det);
gpio_direction_input(gpio_wp);
- ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO1_1), detect_irq,
+ ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)),
+ detect_irq,
IRQF_DISABLED | IRQF_TRIGGER_FALLING,
"MMC detect", data);
if (ret)
@@ -151,7 +152,7 @@ static void mxc_mmc1_exit(struct device *dev, void *data)
{
gpio_free(gpio_det);
gpio_free(gpio_wp);
- free_irq(IOMUX_TO_IRQ(MX31_PIN_GPIO1_1), data);
+ free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_GPIO1_1)), data);
}
static const struct imxmmc_platform_data mmc_pdata __initconst = {
@@ -161,10 +162,6 @@ static const struct imxmmc_platform_data mmc_pdata __initconst = {
};
/* Framebuffer support */
-static const struct ipu_platform_data ipu_data __initconst = {
- .irq_base = MXC_IPU_IRQ_START,
-};
-
static const struct fb_videomode fb_modedb = {
/* 640x480 TFT panel (IPS-056T) */
.name = "CRT-VGA",
@@ -198,7 +195,7 @@ static void __init mx31lilly_init_fb(void)
return;
}
- imx31_add_ipu_core(&ipu_data);
+ imx31_add_ipu_core();
imx31_add_mx3_sdc_fb(&fb_pdata);
gpio_direction_output(LCD_VCC_EN_GPIO, 1);
}
diff --git a/arch/arm/mach-imx/mx31lite-db.c b/arch/arm/mach-imx/mx31lite-db.c
index bf0fb87946ba..83d17d9e0bc8 100644
--- a/arch/arm/mach-imx/mx31lite-db.c
+++ b/arch/arm/mach-imx/mx31lite-db.c
@@ -116,7 +116,8 @@ static int mxc_mmc1_init(struct device *dev,
gpio_direction_input(gpio_det);
gpio_direction_input(gpio_wp);
- ret = request_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), detect_irq,
+ ret = request_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)),
+ detect_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"MMC detect", data);
if (ret)
@@ -137,7 +138,7 @@ static void mxc_mmc1_exit(struct device *dev, void *data)
{
gpio_free(gpio_det);
gpio_free(gpio_wp);
- free_irq(IOMUX_TO_IRQ(MX31_PIN_DCD_DCE1), data);
+ free_irq(gpio_to_irq(IOMUX_TO_GPIO(MX31_PIN_DCD_DCE1)), data);
}
static const struct imxmmc_platform_data mmc_pdata __initconst = {
@@ -191,6 +192,6 @@ void __init mx31lite_db_init(void)
imx31_add_mxc_mmc(0, &mmc_pdata);
imx31_add_spi_imx0(&spi0_pdata);
gpio_led_register_device(-1, &litekit_led_platform_data);
- imx31_add_imx2_wdt(NULL);
- imx31_add_mxc_rtc(NULL);
+ imx31_add_imx2_wdt();
+ imx31_add_mxc_rtc();
}
diff --git a/arch/arm/mach-imx/mx51_efika.c b/arch/arm/mach-imx/mx51_efika.c
index ec6ca91b299b..ee870c49bc63 100644
--- a/arch/arm/mach-imx/mx51_efika.c
+++ b/arch/arm/mach-imx/mx51_efika.c
@@ -587,7 +587,7 @@ static struct spi_board_info mx51_efika_spi_board_info[] __initdata = {
.bus_num = 0,
.chip_select = 0,
.platform_data = &mx51_efika_mc13892_data,
- .irq = IMX_GPIO_TO_IRQ(EFIKAMX_PMIC),
+ /* irq number is run-time assigned */
},
};
@@ -620,6 +620,7 @@ void __init efika_board_common_init(void)
gpio_request(EFIKAMX_PMIC, "pmic irq");
gpio_direction_input(EFIKAMX_PMIC);
+ mx51_efika_spi_board_info[1].irq = gpio_to_irq(EFIKAMX_PMIC);
spi_register_board_info(mx51_efika_spi_board_info,
ARRAY_SIZE(mx51_efika_spi_board_info));
imx51_add_ecspi(0, &mx51_efika_spi_pdata);
diff --git a/arch/arm/mach-imx/pcm970-baseboard.c b/arch/arm/mach-imx/pcm970-baseboard.c
index 99afbc3f43a3..9917e2ff51da 100644
--- a/arch/arm/mach-imx/pcm970-baseboard.c
+++ b/arch/arm/mach-imx/pcm970-baseboard.c
@@ -95,14 +95,14 @@ static int pcm970_sdhc2_init(struct device *dev, irq_handler_t detect_irq, void
{
int ret;
- ret = request_irq(IRQ_GPIOC(29), detect_irq, IRQF_TRIGGER_FALLING,
- "imx-mmc-detect", data);
+ ret = request_irq(gpio_to_irq(IMX_GPIO_NR(3, 29)), detect_irq,
+ IRQF_TRIGGER_FALLING, "imx-mmc-detect", data);
if (ret)
return ret;
ret = gpio_request(GPIO_PORTC + 28, "imx-mmc-ro");
if (ret) {
- free_irq(IRQ_GPIOC(29), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(3, 29)), data);
return ret;
}
@@ -113,7 +113,7 @@ static int pcm970_sdhc2_init(struct device *dev, irq_handler_t detect_irq, void
static void pcm970_sdhc2_exit(struct device *dev, void *data)
{
- free_irq(IRQ_GPIOC(29), data);
+ free_irq(gpio_to_irq(IMX_GPIO_NR(3, 29)), data);
gpio_free(GPIO_PORTC + 28);
}
@@ -192,8 +192,7 @@ static struct resource pcm970_sja1000_resources[] = {
.end = MX27_CS4_BASE_ADDR + 0x100 - 1,
.flags = IORESOURCE_MEM,
}, {
- .start = IRQ_GPIOE(19),
- .end = IRQ_GPIOE(19),
+ /* irq number is run-time assigned */
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
},
};
@@ -227,5 +226,7 @@ void __init pcm970_baseboard_init(void)
imx27_add_imx_fb(&pcm038_fb_data);
mxc_gpio_mode(GPIO_PORTC | 28 | GPIO_GPIO | GPIO_IN);
imx27_add_mxc_mmc(1, &sdhc_pdata);
+ pcm970_sja1000_resources[1].start = gpio_to_irq(IMX_GPIO_NR(5, 19));
+ pcm970_sja1000_resources[1].end = gpio_to_irq(IMX_GPIO_NR(5, 19));
platform_device_register(&pcm970_sja1000);
}
diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c
index e26a9cb05ed8..19621ed1ffa5 100644
--- a/arch/arm/mach-imx/pm-imx5.c
+++ b/arch/arm/mach-imx/pm-imx5.c
@@ -12,19 +12,30 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
#include <asm/tlbflush.h>
#include <mach/common.h>
+#include <mach/cpuidle.h>
#include <mach/hardware.h>
#include "crm-regs-imx5.h"
-static struct clk *gpc_dvfs_clk;
+/*
+ * The WAIT_UNCLOCKED_POWER_OFF state only requires <= 500ns to exit.
+ * This is also the lowest power state possible without affecting
+ * non-cpu parts of the system. For these reasons, imx5 should default
+ * to always using this state for cpu idling. The PM_SUSPEND_STANDBY also
+ * uses this state and needs to take no action when registers remain confgiured
+ * for this state.
+ */
+#define IMX5_DEFAULT_CPU_IDLE_STATE WAIT_UNCLOCKED_POWER_OFF
/*
* set cpu low power mode before WFI instruction. This function is called
* mx5 because it can be used for mx50, mx51, and mx53.
*/
-void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
+static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
{
u32 plat_lpc, arm_srpgcr, ccm_clpcr;
u32 empgc0, empgc1;
@@ -87,11 +98,6 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
}
}
-static int mx5_suspend_prepare(void)
-{
- return clk_prepare_enable(gpc_dvfs_clk);
-}
-
static int mx5_suspend_enter(suspend_state_t state)
{
switch (state) {
@@ -99,7 +105,7 @@ static int mx5_suspend_enter(suspend_state_t state)
mx5_cpu_lp_set(STOP_POWER_OFF);
break;
case PM_SUSPEND_STANDBY:
- mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+ /* DEFAULT_IDLE_STATE already configured */
break;
default:
return -EINVAL;
@@ -114,12 +120,10 @@ static int mx5_suspend_enter(suspend_state_t state)
__raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
}
cpu_do_idle();
- return 0;
-}
-static void mx5_suspend_finish(void)
-{
- clk_disable_unprepare(gpc_dvfs_clk);
+ /* return registers to default idle state */
+ mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE);
+ return 0;
}
static int mx5_pm_valid(suspend_state_t state)
@@ -129,25 +133,80 @@ static int mx5_pm_valid(suspend_state_t state)
static const struct platform_suspend_ops mx5_suspend_ops = {
.valid = mx5_pm_valid,
- .prepare = mx5_suspend_prepare,
.enter = mx5_suspend_enter,
- .finish = mx5_suspend_finish,
};
-static int __init mx5_pm_init(void)
+static inline int imx5_cpu_do_idle(void)
+{
+ int ret = tzic_enable_wake();
+
+ if (likely(!ret))
+ cpu_do_idle();
+
+ return ret;
+}
+
+static void imx5_pm_idle(void)
+{
+ imx5_cpu_do_idle();
+}
+
+static int imx5_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ int ret;
+
+ ret = imx5_cpu_do_idle();
+ if (ret < 0)
+ return ret;
+
+ return idx;
+}
+
+static struct cpuidle_driver imx5_cpuidle_driver = {
+ .name = "imx5_cpuidle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .states[0] = {
+ .enter = imx5_cpuidle_enter,
+ .exit_latency = 2,
+ .target_residency = 1,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "IMX5 SRPG",
+ .desc = "CPU state retained,powered off",
+ },
+ .state_count = 1,
+};
+
+static int __init imx5_pm_common_init(void)
{
- if (!cpu_is_mx51() && !cpu_is_mx53())
- return 0;
+ int ret;
+ struct clk *gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
- if (gpc_dvfs_clk == NULL)
- gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
+ if (IS_ERR(gpc_dvfs_clk))
+ return PTR_ERR(gpc_dvfs_clk);
- if (!IS_ERR(gpc_dvfs_clk)) {
- if (cpu_is_mx51())
- suspend_set_ops(&mx5_suspend_ops);
- } else
- return -EPERM;
+ ret = clk_prepare_enable(gpc_dvfs_clk);
+ if (ret)
+ return ret;
+ arm_pm_idle = imx5_pm_idle;
+
+ /* Set the registers to the default cpu idle state. */
+ mx5_cpu_lp_set(IMX5_DEFAULT_CPU_IDLE_STATE);
+
+ imx_cpuidle_init(&imx5_cpuidle_driver);
return 0;
}
-device_initcall(mx5_pm_init);
+
+void __init imx51_pm_init(void)
+{
+ int ret = imx5_pm_common_init();
+ if (!ret)
+ suspend_set_ops(&mx5_suspend_ops);
+}
+
+void __init imx53_pm_init(void)
+{
+ imx5_pm_common_init();
+}
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index eaf6c6366ffa..ebf680bebdf2 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -21,7 +21,6 @@
#include <linux/amba/bus.h>
#include <linux/amba/serial.h>
#include <linux/io.h>
-#include <linux/clkdev.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -41,17 +40,17 @@ static struct amba_pl010_data integrator_uart_data;
#define KMI0_IRQ { IRQ_KMIINT0 }
#define KMI1_IRQ { IRQ_KMIINT1 }
-static AMBA_APB_DEVICE(rtc, "mb:15", 0,
+static AMBA_APB_DEVICE(rtc, "rtc", 0,
INTEGRATOR_RTC_BASE, INTEGRATOR_RTC_IRQ, NULL);
-static AMBA_APB_DEVICE(uart0, "mb:16", 0,
+static AMBA_APB_DEVICE(uart0, "uart0", 0,
INTEGRATOR_UART0_BASE, INTEGRATOR_UART0_IRQ, &integrator_uart_data);
-static AMBA_APB_DEVICE(uart1, "mb:17", 0,
+static AMBA_APB_DEVICE(uart1, "uart1", 0,
INTEGRATOR_UART1_BASE, INTEGRATOR_UART1_IRQ, &integrator_uart_data);
-static AMBA_APB_DEVICE(kmi0, "mb:18", 0, KMI0_BASE, KMI0_IRQ, NULL);
-static AMBA_APB_DEVICE(kmi1, "mb:19", 0, KMI1_BASE, KMI1_IRQ, NULL);
+static AMBA_APB_DEVICE(kmi0, "kmi0", 0, KMI0_BASE, KMI0_IRQ, NULL);
+static AMBA_APB_DEVICE(kmi1, "kmi1", 0, KMI1_BASE, KMI1_IRQ, NULL);
static struct amba_device *amba_devs[] __initdata = {
&rtc_device,
@@ -61,50 +60,6 @@ static struct amba_device *amba_devs[] __initdata = {
&kmi1_device,
};
-/*
- * These are fixed clocks.
- */
-static struct clk clk24mhz = {
- .rate = 24000000,
-};
-
-static struct clk uartclk = {
- .rate = 14745600,
-};
-
-static struct clk dummy_apb_pclk;
-
-static struct clk_lookup lookups[] = {
- { /* Bus clock */
- .con_id = "apb_pclk",
- .clk = &dummy_apb_pclk,
- }, {
- /* Integrator/AP timer frequency */
- .dev_id = "ap_timer",
- .clk = &clk24mhz,
- }, { /* UART0 */
- .dev_id = "mb:16",
- .clk = &uartclk,
- }, { /* UART1 */
- .dev_id = "mb:17",
- .clk = &uartclk,
- }, { /* KMI0 */
- .dev_id = "mb:18",
- .clk = &clk24mhz,
- }, { /* KMI1 */
- .dev_id = "mb:19",
- .clk = &clk24mhz,
- }, { /* MMCI - IntegratorCP */
- .dev_id = "mb:1c",
- .clk = &uartclk,
- }
-};
-
-void __init integrator_init_early(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-}
-
static int __init integrator_init(void)
{
int i;
diff --git a/arch/arm/mach-integrator/include/mach/clkdev.h b/arch/arm/mach-integrator/include/mach/clkdev.h
deleted file mode 100644
index bfe07679faec..000000000000
--- a/arch/arm/mach-integrator/include/mach/clkdev.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_MACH_CLKDEV_H
-#define __ASM_MACH_CLKDEV_H
-
-#include <linux/module.h>
-#include <plat/clock.h>
-
-struct clk {
- unsigned long rate;
- const struct clk_ops *ops;
- struct module *owner;
- const struct icst_params *params;
- void __iomem *vcoreg;
- void *data;
-};
-
-static inline int __clk_get(struct clk *clk)
-{
- return try_module_get(clk->owner);
-}
-
-static inline void __clk_put(struct clk *clk)
-{
- module_put(clk->owner);
-}
-
-#endif
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index c857501c5783..7b1055c8e0b9 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -33,6 +33,7 @@
#include <linux/io.h>
#include <linux/mtd/physmap.h>
#include <linux/clk.h>
+#include <linux/platform_data/clk-integrator.h>
#include <video/vga.h>
#include <mach/hardware.h>
@@ -174,6 +175,7 @@ static void __init ap_init_irq(void)
fpga_irq_init(VA_IC_BASE, "SC", IRQ_PIC_START,
-1, INTEGRATOR_SC_VALID_INT, NULL);
+ integrator_clk_init(false);
}
#ifdef CONFIG_PM
@@ -440,6 +442,10 @@ static void integrator_clockevent_init(unsigned long inrate)
0xffffU);
}
+void __init ap_init_early(void)
+{
+}
+
/*
* Set up timer(s).
*/
@@ -471,7 +477,7 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator")
.reserve = integrator_reserve,
.map_io = ap_map_io,
.nr_irqs = NR_IRQS_INTEGRATOR_AP,
- .init_early = integrator_init_early,
+ .init_early = ap_init_early,
.init_irq = ap_init_irq,
.handle_irq = fpga_handle_irq,
.timer = &ap_timer,
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a56c53608939..82d5c837cc74 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -21,8 +21,8 @@
#include <linux/amba/mmci.h>
#include <linux/io.h>
#include <linux/gfp.h>
-#include <linux/clkdev.h>
#include <linux/mtd/physmap.h>
+#include <linux/platform_data/clk-integrator.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -171,65 +171,10 @@ static void __init intcp_init_irq(void)
fpga_irq_init(INTCP_VA_SIC_BASE, "SIC", IRQ_SIC_START,
IRQ_CP_CPPLDINT, sic_mask, NULL);
+ integrator_clk_init(true);
}
/*
- * Clock handling
- */
-#define CM_LOCK (__io_address(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
-#define CM_AUXOSC (__io_address(INTEGRATOR_HDR_BASE)+0x1c)
-
-static const struct icst_params cp_auxvco_params = {
- .ref = 24000000,
- .vco_max = ICST525_VCO_MAX_5V,
- .vco_min = ICST525_VCO_MIN,
- .vd_min = 8,
- .vd_max = 263,
- .rd_min = 3,
- .rd_max = 65,
- .s2div = icst525_s2div,
- .idx2s = icst525_idx2s,
-};
-
-static void cp_auxvco_set(struct clk *clk, struct icst_vco vco)
-{
- u32 val;
-
- val = readl(clk->vcoreg) & ~0x7ffff;
- val |= vco.v | (vco.r << 9) | (vco.s << 16);
-
- writel(0xa05f, CM_LOCK);
- writel(val, clk->vcoreg);
- writel(0, CM_LOCK);
-}
-
-static const struct clk_ops cp_auxclk_ops = {
- .round = icst_clk_round,
- .set = icst_clk_set,
- .setvco = cp_auxvco_set,
-};
-
-static struct clk cp_auxclk = {
- .ops = &cp_auxclk_ops,
- .params = &cp_auxvco_params,
- .vcoreg = CM_AUXOSC,
-};
-
-static struct clk sp804_clk = {
- .rate = 1000000,
-};
-
-static struct clk_lookup cp_lookups[] = {
- { /* CLCD */
- .dev_id = "mb:c0",
- .clk = &cp_auxclk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .clk = &sp804_clk,
- },
-};
-
-/*
* Flash handling.
*/
static int intcp_flash_init(struct platform_device *dev)
@@ -336,10 +281,10 @@ static struct mmci_platform_data mmc_data = {
#define INTEGRATOR_CP_MMC_IRQS { IRQ_CP_MMCIINT0, IRQ_CP_MMCIINT1 }
#define INTEGRATOR_CP_AACI_IRQS { IRQ_CP_AACIINT }
-static AMBA_APB_DEVICE(mmc, "mb:1c", 0, INTEGRATOR_CP_MMC_BASE,
+static AMBA_APB_DEVICE(mmc, "mmci", 0, INTEGRATOR_CP_MMC_BASE,
INTEGRATOR_CP_MMC_IRQS, &mmc_data);
-static AMBA_APB_DEVICE(aaci, "mb:1d", 0, INTEGRATOR_CP_AACI_BASE,
+static AMBA_APB_DEVICE(aaci, "aaci", 0, INTEGRATOR_CP_AACI_BASE,
INTEGRATOR_CP_AACI_IRQS, NULL);
@@ -393,7 +338,7 @@ static struct clcd_board clcd_data = {
.remove = versatile_clcd_remove_dma,
};
-static AMBA_AHB_DEVICE(clcd, "mb:c0", 0, INTCP_PA_CLCD_BASE,
+static AMBA_AHB_DEVICE(clcd, "clcd", 0, INTCP_PA_CLCD_BASE,
{ IRQ_CP_CLCDCINT }, &clcd_data);
static struct amba_device *amba_devs[] __initdata = {
@@ -406,10 +351,6 @@ static struct amba_device *amba_devs[] __initdata = {
static void __init intcp_init_early(void)
{
- clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups));
-
- integrator_init_early();
-
#ifdef CONFIG_PLAT_VERSATILE_SCHED_CLOCK
versatile_sched_clock_init(REFCOUNTER, 24000000);
#endif
diff --git a/arch/arm/mach-lpc32xx/Kconfig b/arch/arm/mach-lpc32xx/Kconfig
deleted file mode 100644
index e0b3eee83834..000000000000
--- a/arch/arm/mach-lpc32xx/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-if ARCH_LPC32XX
-
-menu "Individual UART enable selections"
-
-config ARCH_LPC32XX_UART3_SELECT
- bool "Add support for standard UART3"
- help
- Adds support for standard UART 3 when the 8250 serial support
- is enabled.
-
-config ARCH_LPC32XX_UART4_SELECT
- bool "Add support for standard UART4"
- help
- Adds support for standard UART 4 when the 8250 serial support
- is enabled.
-
-config ARCH_LPC32XX_UART5_SELECT
- bool "Add support for standard UART5"
- default y
- help
- Adds support for standard UART 5 when the 8250 serial support
- is enabled.
-
-config ARCH_LPC32XX_UART6_SELECT
- bool "Add support for standard UART6"
- help
- Adds support for standard UART 6 when the 8250 serial support
- is enabled.
-
-endmenu
-
-endif
diff --git a/arch/arm/mach-lpc32xx/Makefile.boot b/arch/arm/mach-lpc32xx/Makefile.boot
index 2cfe0ee635c5..697323b5f92d 100644
--- a/arch/arm/mach-lpc32xx/Makefile.boot
+++ b/arch/arm/mach-lpc32xx/Makefile.boot
@@ -2,3 +2,4 @@
params_phys-y := 0x80000100
initrd_phys-y := 0x82000000
+dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb
diff --git a/arch/arm/mach-lpc32xx/clock.c b/arch/arm/mach-lpc32xx/clock.c
index f6a3ffec1f4b..f48c2e961b84 100644
--- a/arch/arm/mach-lpc32xx/clock.c
+++ b/arch/arm/mach-lpc32xx/clock.c
@@ -607,6 +607,19 @@ static struct clk clk_dma = {
.get_rate = local_return_parent_rate,
};
+static struct clk clk_pwm = {
+ .parent = &clk_pclk,
+ .enable = local_onoff_enable,
+ .enable_reg = LPC32XX_CLKPWR_PWM_CLK_CTRL,
+ .enable_mask = LPC32XX_CLKPWR_PWMCLK_PWM1CLK_EN |
+ LPC32XX_CLKPWR_PWMCLK_PWM1SEL_PCLK |
+ LPC32XX_CLKPWR_PWMCLK_PWM1_DIV(1) |
+ LPC32XX_CLKPWR_PWMCLK_PWM2CLK_EN |
+ LPC32XX_CLKPWR_PWMCLK_PWM2SEL_PCLK |
+ LPC32XX_CLKPWR_PWMCLK_PWM2_DIV(1),
+ .get_rate = local_return_parent_rate,
+};
+
static struct clk clk_uart3 = {
.parent = &clk_pclk,
.enable = local_onoff_enable,
@@ -691,10 +704,21 @@ static struct clk clk_nand = {
.parent = &clk_hclk,
.enable = local_onoff_enable,
.enable_reg = LPC32XX_CLKPWR_NAND_CLK_CTRL,
- .enable_mask = LPC32XX_CLKPWR_NANDCLK_SLCCLK_EN,
+ .enable_mask = LPC32XX_CLKPWR_NANDCLK_SLCCLK_EN |
+ LPC32XX_CLKPWR_NANDCLK_SEL_SLC,
.get_rate = local_return_parent_rate,
};
+static struct clk clk_nand_mlc = {
+ .parent = &clk_hclk,
+ .enable = local_onoff_enable,
+ .enable_reg = LPC32XX_CLKPWR_NAND_CLK_CTRL,
+ .enable_mask = LPC32XX_CLKPWR_NANDCLK_MLCCLK_EN |
+ LPC32XX_CLKPWR_NANDCLK_DMA_INT |
+ LPC32XX_CLKPWR_NANDCLK_INTSEL_MLC,
+ .get_rate = local_return_parent_rate,
+};
+
static struct clk clk_i2s0 = {
.parent = &clk_hclk,
.enable = local_onoff_enable,
@@ -707,7 +731,8 @@ static struct clk clk_i2s1 = {
.parent = &clk_hclk,
.enable = local_onoff_enable,
.enable_reg = LPC32XX_CLKPWR_I2S_CLK_CTRL,
- .enable_mask = LPC32XX_CLKPWR_I2SCTRL_I2SCLK1_EN,
+ .enable_mask = LPC32XX_CLKPWR_I2SCTRL_I2SCLK1_EN |
+ LPC32XX_CLKPWR_I2SCTRL_I2S1_USE_DMA,
.get_rate = local_return_parent_rate,
};
@@ -727,14 +752,77 @@ static struct clk clk_rtc = {
.get_rate = local_return_parent_rate,
};
+static int local_usb_enable(struct clk *clk, int enable)
+{
+ u32 tmp;
+
+ if (enable) {
+ /* Set up I2C pull levels */
+ tmp = __raw_readl(LPC32XX_CLKPWR_I2C_CLK_CTRL);
+ tmp |= LPC32XX_CLKPWR_I2CCLK_USBI2CHI_DRIVE;
+ __raw_writel(tmp, LPC32XX_CLKPWR_I2C_CLK_CTRL);
+ }
+
+ return local_onoff_enable(clk, enable);
+}
+
static struct clk clk_usbd = {
.parent = &clk_usbpll,
- .enable = local_onoff_enable,
+ .enable = local_usb_enable,
.enable_reg = LPC32XX_CLKPWR_USB_CTRL,
.enable_mask = LPC32XX_CLKPWR_USBCTRL_HCLK_EN,
.get_rate = local_return_parent_rate,
};
+#define OTG_ALWAYS_MASK (LPC32XX_USB_OTG_OTG_CLOCK_ON | \
+ LPC32XX_USB_OTG_I2C_CLOCK_ON)
+
+static int local_usb_otg_enable(struct clk *clk, int enable)
+{
+ int to = 1000;
+
+ if (enable) {
+ __raw_writel(clk->enable_mask, clk->enable_reg);
+
+ while (((__raw_readl(LPC32XX_USB_OTG_CLK_STAT) &
+ clk->enable_mask) != clk->enable_mask) && (to > 0))
+ to--;
+ } else {
+ __raw_writel(OTG_ALWAYS_MASK, clk->enable_reg);
+
+ while (((__raw_readl(LPC32XX_USB_OTG_CLK_STAT) &
+ OTG_ALWAYS_MASK) != OTG_ALWAYS_MASK) && (to > 0))
+ to--;
+ }
+
+ if (to)
+ return 0;
+ else
+ return -1;
+}
+
+static struct clk clk_usb_otg_dev = {
+ .parent = &clk_usbpll,
+ .enable = local_usb_otg_enable,
+ .enable_reg = LPC32XX_USB_OTG_CLK_CTRL,
+ .enable_mask = LPC32XX_USB_OTG_AHB_M_CLOCK_ON |
+ LPC32XX_USB_OTG_OTG_CLOCK_ON |
+ LPC32XX_USB_OTG_DEV_CLOCK_ON |
+ LPC32XX_USB_OTG_I2C_CLOCK_ON,
+ .get_rate = local_return_parent_rate,
+};
+
+static struct clk clk_usb_otg_host = {
+ .parent = &clk_usbpll,
+ .enable = local_usb_otg_enable,
+ .enable_reg = LPC32XX_USB_OTG_CLK_CTRL,
+ .enable_mask = LPC32XX_USB_OTG_AHB_M_CLOCK_ON |
+ LPC32XX_USB_OTG_OTG_CLOCK_ON |
+ LPC32XX_USB_OTG_HOST_CLOCK_ON |
+ LPC32XX_USB_OTG_I2C_CLOCK_ON,
+ .get_rate = local_return_parent_rate,
+};
+
static int tsc_onoff_enable(struct clk *clk, int enable)
{
u32 tmp;
@@ -800,11 +888,17 @@ static int mmc_onoff_enable(struct clk *clk, int enable)
u32 tmp;
tmp = __raw_readl(LPC32XX_CLKPWR_MS_CTRL) &
- ~LPC32XX_CLKPWR_MSCARD_SDCARD_EN;
+ ~(LPC32XX_CLKPWR_MSCARD_SDCARD_EN |
+ LPC32XX_CLKPWR_MSCARD_MSDIO_PU_EN |
+ LPC32XX_CLKPWR_MSCARD_MSDIO_PIN_DIS |
+ LPC32XX_CLKPWR_MSCARD_MSDIO0_DIS |
+ LPC32XX_CLKPWR_MSCARD_MSDIO1_DIS |
+ LPC32XX_CLKPWR_MSCARD_MSDIO23_DIS);
/* If rate is 0, disable clock */
if (enable != 0)
- tmp |= LPC32XX_CLKPWR_MSCARD_SDCARD_EN;
+ tmp |= LPC32XX_CLKPWR_MSCARD_SDCARD_EN |
+ LPC32XX_CLKPWR_MSCARD_MSDIO_PU_EN;
__raw_writel(tmp, LPC32XX_CLKPWR_MS_CTRL);
@@ -853,7 +947,7 @@ static unsigned long mmc_round_rate(struct clk *clk, unsigned long rate)
static int mmc_set_rate(struct clk *clk, unsigned long rate)
{
- u32 oldclk, tmp;
+ u32 tmp;
unsigned long prate, div, crate = mmc_round_rate(clk, rate);
prate = clk->parent->get_rate(clk->parent);
@@ -861,16 +955,12 @@ static int mmc_set_rate(struct clk *clk, unsigned long rate)
div = prate / crate;
/* The MMC clock must be on when accessing an MMC register */
- oldclk = __raw_readl(LPC32XX_CLKPWR_MS_CTRL);
- __raw_writel(oldclk | LPC32XX_CLKPWR_MSCARD_SDCARD_EN,
- LPC32XX_CLKPWR_MS_CTRL);
tmp = __raw_readl(LPC32XX_CLKPWR_MS_CTRL) &
~LPC32XX_CLKPWR_MSCARD_SDCARD_DIV(0xf);
- tmp |= LPC32XX_CLKPWR_MSCARD_SDCARD_DIV(div);
+ tmp |= LPC32XX_CLKPWR_MSCARD_SDCARD_DIV(div) |
+ LPC32XX_CLKPWR_MSCARD_SDCARD_EN;
__raw_writel(tmp, LPC32XX_CLKPWR_MS_CTRL);
- __raw_writel(oldclk, LPC32XX_CLKPWR_MS_CTRL);
-
return 0;
}
@@ -1111,6 +1201,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_INIT(NULL, "vfp9_ck", &clk_vfp9),
CLKDEV_INIT("pl08xdmac", NULL, &clk_dma),
CLKDEV_INIT("4003c000.watchdog", NULL, &clk_wdt),
+ CLKDEV_INIT("4005c000.pwm", NULL, &clk_pwm),
CLKDEV_INIT(NULL, "uart3_ck", &clk_uart3),
CLKDEV_INIT(NULL, "uart4_ck", &clk_uart4),
CLKDEV_INIT(NULL, "uart5_ck", &clk_uart5),
@@ -1120,8 +1211,9 @@ static struct clk_lookup lookups[] = {
CLKDEV_INIT("31020300.i2c", NULL, &clk_i2c2),
CLKDEV_INIT("dev:ssp0", NULL, &clk_ssp0),
CLKDEV_INIT("dev:ssp1", NULL, &clk_ssp1),
- CLKDEV_INIT("lpc32xx_keys.0", NULL, &clk_kscan),
- CLKDEV_INIT("lpc32xx-nand.0", "nand_ck", &clk_nand),
+ CLKDEV_INIT("40050000.key", NULL, &clk_kscan),
+ CLKDEV_INIT("20020000.flash", NULL, &clk_nand),
+ CLKDEV_INIT("200a8000.flash", NULL, &clk_nand_mlc),
CLKDEV_INIT("40048000.adc", NULL, &clk_adc),
CLKDEV_INIT(NULL, "i2s0_ck", &clk_i2s0),
CLKDEV_INIT(NULL, "i2s1_ck", &clk_i2s1),
@@ -1130,6 +1222,9 @@ static struct clk_lookup lookups[] = {
CLKDEV_INIT("31060000.ethernet", NULL, &clk_net),
CLKDEV_INIT("dev:clcd", NULL, &clk_lcd),
CLKDEV_INIT("31020000.usbd", "ck_usbd", &clk_usbd),
+ CLKDEV_INIT("31020000.ohci", "ck_usbd", &clk_usbd),
+ CLKDEV_INIT("31020000.usbd", "ck_usb_otg", &clk_usb_otg_dev),
+ CLKDEV_INIT("31020000.ohci", "ck_usb_otg", &clk_usb_otg_host),
CLKDEV_INIT("lpc32xx_rtc", NULL, &clk_rtc),
};
diff --git a/arch/arm/mach-lpc32xx/common.c b/arch/arm/mach-lpc32xx/common.c
index 5c96057b6d78..a48dc2dec485 100644
--- a/arch/arm/mach-lpc32xx/common.c
+++ b/arch/arm/mach-lpc32xx/common.c
@@ -26,6 +26,7 @@
#include <linux/io.h>
#include <asm/mach/map.h>
+#include <asm/system_info.h>
#include <mach/hardware.h>
#include <mach/platform.h>
@@ -224,7 +225,7 @@ void lpc23xx_restart(char mode, const char *cmd)
;
}
-static int __init lpc32xx_display_uid(void)
+static int __init lpc32xx_check_uid(void)
{
u32 uid[4];
@@ -233,6 +234,11 @@ static int __init lpc32xx_display_uid(void)
printk(KERN_INFO "LPC32XX unique ID: %08x%08x%08x%08x\n",
uid[3], uid[2], uid[1], uid[0]);
+ if (!system_serial_low && !system_serial_high) {
+ system_serial_low = uid[0];
+ system_serial_high = uid[1];
+ }
+
return 1;
}
-arch_initcall(lpc32xx_display_uid);
+arch_initcall(lpc32xx_check_uid);
diff --git a/arch/arm/mach-lpc32xx/include/mach/gpio.h b/arch/arm/mach-lpc32xx/include/mach/gpio.h
index 2ba6ca412bef..0052e7a76179 100644
--- a/arch/arm/mach-lpc32xx/include/mach/gpio.h
+++ b/arch/arm/mach-lpc32xx/include/mach/gpio.h
@@ -3,6 +3,4 @@
#include "gpio-lpc32xx.h"
-#define ARCH_NR_GPIOS (LPC32XX_GPO_P3_GRP + LPC32XX_GPO_P3_MAX)
-
#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-lpc32xx/include/mach/platform.h b/arch/arm/mach-lpc32xx/include/mach/platform.h
index c584f5bb164f..acc4aabf1c7b 100644
--- a/arch/arm/mach-lpc32xx/include/mach/platform.h
+++ b/arch/arm/mach-lpc32xx/include/mach/platform.h
@@ -694,4 +694,18 @@
#define LPC32XX_GPIO_P2_MUX_CLR _GPREG(0x02C)
#define LPC32XX_GPIO_P2_MUX_STATE _GPREG(0x030)
+/*
+ * USB Otg Registers
+ */
+#define _OTGREG(x) io_p2v(LPC32XX_USB_OTG_BASE + (x))
+#define LPC32XX_USB_OTG_CLK_CTRL _OTGREG(0xFF4)
+#define LPC32XX_USB_OTG_CLK_STAT _OTGREG(0xFF8)
+
+/* USB OTG CLK CTRL bit defines */
+#define LPC32XX_USB_OTG_AHB_M_CLOCK_ON _BIT(4)
+#define LPC32XX_USB_OTG_OTG_CLOCK_ON _BIT(3)
+#define LPC32XX_USB_OTG_I2C_CLOCK_ON _BIT(2)
+#define LPC32XX_USB_OTG_DEV_CLOCK_ON _BIT(1)
+#define LPC32XX_USB_OTG_HOST_CLOCK_ON _BIT(0)
+
#endif
diff --git a/arch/arm/mach-lpc32xx/phy3250.c b/arch/arm/mach-lpc32xx/phy3250.c
index 540106cdb9ec..b07dcc90829d 100644
--- a/arch/arm/mach-lpc32xx/phy3250.c
+++ b/arch/arm/mach-lpc32xx/phy3250.c
@@ -30,12 +30,13 @@
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <linux/amba/pl022.h>
+#include <linux/amba/pl08x.h>
+#include <linux/amba/mmci.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/clk.h>
-#include <linux/amba/pl08x.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
@@ -50,9 +51,9 @@
/*
* Mapped GPIOLIB GPIOs
*/
-#define SPI0_CS_GPIO LPC32XX_GPIO(LPC32XX_GPIO_P3_GRP, 5)
-#define LCD_POWER_GPIO LPC32XX_GPIO(LPC32XX_GPO_P3_GRP, 0)
-#define BKL_POWER_GPIO LPC32XX_GPIO(LPC32XX_GPO_P3_GRP, 4)
+#define LCD_POWER_GPIO LPC32XX_GPIO(LPC32XX_GPO_P3_GRP, 0)
+#define BKL_POWER_GPIO LPC32XX_GPIO(LPC32XX_GPO_P3_GRP, 4)
+#define MMC_PWR_ENABLE_GPIO LPC32XX_GPIO(LPC32XX_GPO_P3_GRP, 5)
/*
* AMBA LCD controller
@@ -158,24 +159,6 @@ static struct clcd_board lpc32xx_clcd_data = {
/*
* AMBA SSP (SPI)
*/
-static void phy3250_spi_cs_set(u32 control)
-{
- gpio_set_value(SPI0_CS_GPIO, (int) control);
-}
-
-static struct pl022_config_chip spi0_chip_info = {
- .com_mode = INTERRUPT_TRANSFER,
- .iface = SSP_INTERFACE_MOTOROLA_SPI,
- .hierarchy = SSP_MASTER,
- .slave_tx_disable = 0,
- .rx_lev_trig = SSP_RX_4_OR_MORE_ELEM,
- .tx_lev_trig = SSP_TX_4_OR_MORE_EMPTY_LOC,
- .ctrl_len = SSP_BITS_8,
- .wait_state = SSP_MWIRE_WAIT_ZERO,
- .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
- .cs_control = phy3250_spi_cs_set,
-};
-
static struct pl022_ssp_controller lpc32xx_ssp0_data = {
.bus_id = 0,
.num_chipselect = 1,
@@ -188,45 +171,56 @@ static struct pl022_ssp_controller lpc32xx_ssp1_data = {
.enable_dma = 0,
};
-/* AT25 driver registration */
-static int __init phy3250_spi_board_register(void)
+static struct pl08x_channel_data pl08x_slave_channels[] = {
+ {
+ .bus_id = "nand-slc",
+ .min_signal = 1, /* SLC NAND Flash */
+ .max_signal = 1,
+ .periph_buses = PL08X_AHB1,
+ },
+ {
+ .bus_id = "nand-mlc",
+ .min_signal = 12, /* MLC NAND Flash */
+ .max_signal = 12,
+ .periph_buses = PL08X_AHB1,
+ },
+};
+
+static int pl08x_get_signal(const struct pl08x_channel_data *cd)
+{
+ return cd->min_signal;
+}
+
+static void pl08x_put_signal(const struct pl08x_channel_data *cd, int ch)
{
-#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
- static struct spi_board_info info[] = {
- {
- .modalias = "spidev",
- .max_speed_hz = 5000000,
- .bus_num = 0,
- .chip_select = 0,
- .controller_data = &spi0_chip_info,
- },
- };
-
-#else
- static struct spi_eeprom eeprom = {
- .name = "at25256a",
- .byte_len = 0x8000,
- .page_size = 64,
- .flags = EE_ADDR2,
- };
-
- static struct spi_board_info info[] = {
- {
- .modalias = "at25",
- .max_speed_hz = 5000000,
- .bus_num = 0,
- .chip_select = 0,
- .mode = SPI_MODE_0,
- .platform_data = &eeprom,
- .controller_data = &spi0_chip_info,
- },
- };
-#endif
- return spi_register_board_info(info, ARRAY_SIZE(info));
}
-arch_initcall(phy3250_spi_board_register);
static struct pl08x_platform_data pl08x_pd = {
+ .slave_channels = &pl08x_slave_channels[0],
+ .num_slave_channels = ARRAY_SIZE(pl08x_slave_channels),
+ .get_signal = pl08x_get_signal,
+ .put_signal = pl08x_put_signal,
+ .lli_buses = PL08X_AHB1,
+ .mem_buses = PL08X_AHB1,
+};
+
+static int mmc_handle_ios(struct device *dev, struct mmc_ios *ios)
+{
+ /* Only on and off are supported */
+ if (ios->power_mode == MMC_POWER_OFF)
+ gpio_set_value(MMC_PWR_ENABLE_GPIO, 0);
+ else
+ gpio_set_value(MMC_PWR_ENABLE_GPIO, 1);
+ return 0;
+}
+
+static struct mmci_platform_data lpc32xx_mmci_data = {
+ .ocr_mask = MMC_VDD_30_31 | MMC_VDD_31_32 |
+ MMC_VDD_32_33 | MMC_VDD_33_34,
+ .ios_handler = mmc_handle_ios,
+ .dma_filter = NULL,
+ /* No DMA for now since AMBA PL080 dmaengine driver only does scatter
+ * gather, and the MMCI driver doesn't do it this way */
};
static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
@@ -234,6 +228,8 @@ static const struct of_dev_auxdata lpc32xx_auxdata_lookup[] __initconst = {
OF_DEV_AUXDATA("arm,pl022", 0x2008C000, "dev:ssp1", &lpc32xx_ssp1_data),
OF_DEV_AUXDATA("arm,pl110", 0x31040000, "dev:clcd", &lpc32xx_clcd_data),
OF_DEV_AUXDATA("arm,pl080", 0x31000000, "pl08xdmac", &pl08x_pd),
+ OF_DEV_AUXDATA("arm,pl18x", 0x20098000, "20098000.sd",
+ &lpc32xx_mmci_data),
{ }
};
@@ -241,10 +237,6 @@ static void __init lpc3250_machine_init(void)
{
u32 tmp;
- /* Setup SLC NAND controller muxing */
- __raw_writel(LPC32XX_CLKPWR_NANDCLK_SEL_SLC,
- LPC32XX_CLKPWR_NAND_CLK_CTRL);
-
/* Setup LCD muxing to RGB565 */
tmp = __raw_readl(LPC32XX_CLKPWR_LCDCLK_CTRL) &
~(LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_MSK |
@@ -252,47 +244,8 @@ static void __init lpc3250_machine_init(void)
tmp |= LPC32XX_CLKPWR_LCDCTRL_LCDTYPE_TFT16;
__raw_writel(tmp, LPC32XX_CLKPWR_LCDCLK_CTRL);
- /* Set up USB power */
- tmp = __raw_readl(LPC32XX_CLKPWR_USB_CTRL);
- tmp |= LPC32XX_CLKPWR_USBCTRL_HCLK_EN |
- LPC32XX_CLKPWR_USBCTRL_USBI2C_EN;
- __raw_writel(tmp, LPC32XX_CLKPWR_USB_CTRL);
-
- /* Set up I2C pull levels */
- tmp = __raw_readl(LPC32XX_CLKPWR_I2C_CLK_CTRL);
- tmp |= LPC32XX_CLKPWR_I2CCLK_USBI2CHI_DRIVE |
- LPC32XX_CLKPWR_I2CCLK_I2C2HI_DRIVE;
- __raw_writel(tmp, LPC32XX_CLKPWR_I2C_CLK_CTRL);
-
- /* Disable IrDA pulsing support on UART6 */
- tmp = __raw_readl(LPC32XX_UARTCTL_CTRL);
- tmp |= LPC32XX_UART_UART6_IRDAMOD_BYPASS;
- __raw_writel(tmp, LPC32XX_UARTCTL_CTRL);
-
- /* Enable DMA for I2S1 channel */
- tmp = __raw_readl(LPC32XX_CLKPWR_I2S_CLK_CTRL);
- tmp = LPC32XX_CLKPWR_I2SCTRL_I2S1_USE_DMA;
- __raw_writel(tmp, LPC32XX_CLKPWR_I2S_CLK_CTRL);
-
lpc32xx_serial_init();
- /*
- * AMBA peripheral clocks need to be enabled prior to AMBA device
- * detection or a data fault will occur, so enable the clocks
- * here.
- */
- tmp = __raw_readl(LPC32XX_CLKPWR_LCDCLK_CTRL);
- __raw_writel((tmp | LPC32XX_CLKPWR_LCDCTRL_CLK_EN),
- LPC32XX_CLKPWR_LCDCLK_CTRL);
-
- tmp = __raw_readl(LPC32XX_CLKPWR_SSP_CLK_CTRL);
- __raw_writel((tmp | LPC32XX_CLKPWR_SSPCTRL_SSPCLK0_EN),
- LPC32XX_CLKPWR_SSP_CLK_CTRL);
-
- tmp = __raw_readl(LPC32XX_CLKPWR_DMA_CLK_CTRL);
- __raw_writel((tmp | LPC32XX_CLKPWR_DMACLKCTRL_CLK_EN),
- LPC32XX_CLKPWR_DMA_CLK_CTRL);
-
/* Test clock needed for UDA1380 initial init */
__raw_writel(LPC32XX_CLKPWR_TESTCLK2_SEL_MOSC |
LPC32XX_CLKPWR_TESTCLK_TESTCLK2_EN,
@@ -302,12 +255,10 @@ static void __init lpc3250_machine_init(void)
lpc32xx_auxdata_lookup, NULL);
/* Register GPIOs used on this board */
- if (gpio_request(SPI0_CS_GPIO, "spi0 cs"))
- printk(KERN_ERR "Error requesting gpio %u",
- SPI0_CS_GPIO);
- else if (gpio_direction_output(SPI0_CS_GPIO, 1))
- printk(KERN_ERR "Error setting gpio %u to output",
- SPI0_CS_GPIO);
+ if (gpio_request(MMC_PWR_ENABLE_GPIO, "mmc_power_en"))
+ pr_err("Error requesting gpio %u", MMC_PWR_ENABLE_GPIO);
+ else if (gpio_direction_output(MMC_PWR_ENABLE_GPIO, 1))
+ pr_err("Error setting gpio %u to output", MMC_PWR_ENABLE_GPIO);
}
static char const *lpc32xx_dt_compat[] __initdata = {
diff --git a/arch/arm/mach-lpc32xx/serial.c b/arch/arm/mach-lpc32xx/serial.c
index f2735281616a..05621a29fba2 100644
--- a/arch/arm/mach-lpc32xx/serial.c
+++ b/arch/arm/mach-lpc32xx/serial.c
@@ -31,59 +31,6 @@
#define LPC32XX_SUART_FIFO_SIZE 64
-/* Standard 8250/16550 compatible serial ports */
-static struct plat_serial8250_port serial_std_platform_data[] = {
-#ifdef CONFIG_ARCH_LPC32XX_UART5_SELECT
- {
- .membase = io_p2v(LPC32XX_UART5_BASE),
- .mapbase = LPC32XX_UART5_BASE,
- .irq = IRQ_LPC32XX_UART_IIR5,
- .uartclk = LPC32XX_MAIN_OSC_FREQ,
- .regshift = 2,
- .iotype = UPIO_MEM32,
- .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART |
- UPF_SKIP_TEST,
- },
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT
- {
- .membase = io_p2v(LPC32XX_UART3_BASE),
- .mapbase = LPC32XX_UART3_BASE,
- .irq = IRQ_LPC32XX_UART_IIR3,
- .uartclk = LPC32XX_MAIN_OSC_FREQ,
- .regshift = 2,
- .iotype = UPIO_MEM32,
- .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART |
- UPF_SKIP_TEST,
- },
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT
- {
- .membase = io_p2v(LPC32XX_UART4_BASE),
- .mapbase = LPC32XX_UART4_BASE,
- .irq = IRQ_LPC32XX_UART_IIR4,
- .uartclk = LPC32XX_MAIN_OSC_FREQ,
- .regshift = 2,
- .iotype = UPIO_MEM32,
- .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART |
- UPF_SKIP_TEST,
- },
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT
- {
- .membase = io_p2v(LPC32XX_UART6_BASE),
- .mapbase = LPC32XX_UART6_BASE,
- .irq = IRQ_LPC32XX_UART_IIR6,
- .uartclk = LPC32XX_MAIN_OSC_FREQ,
- .regshift = 2,
- .iotype = UPIO_MEM32,
- .flags = UPF_BOOT_AUTOCONF | UPF_BUGGY_UART |
- UPF_SKIP_TEST,
- },
-#endif
- { },
-};
-
struct uartinit {
char *uart_ck_name;
u32 ck_mode_mask;
@@ -92,7 +39,6 @@ struct uartinit {
};
static struct uartinit uartinit_data[] __initdata = {
-#ifdef CONFIG_ARCH_LPC32XX_UART5_SELECT
{
.uart_ck_name = "uart5_ck",
.ck_mode_mask =
@@ -100,8 +46,6 @@ static struct uartinit uartinit_data[] __initdata = {
.pdiv_clk_reg = LPC32XX_CLKPWR_UART5_CLK_CTRL,
.mapbase = LPC32XX_UART5_BASE,
},
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART3_SELECT
{
.uart_ck_name = "uart3_ck",
.ck_mode_mask =
@@ -109,8 +53,6 @@ static struct uartinit uartinit_data[] __initdata = {
.pdiv_clk_reg = LPC32XX_CLKPWR_UART3_CLK_CTRL,
.mapbase = LPC32XX_UART3_BASE,
},
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART4_SELECT
{
.uart_ck_name = "uart4_ck",
.ck_mode_mask =
@@ -118,8 +60,6 @@ static struct uartinit uartinit_data[] __initdata = {
.pdiv_clk_reg = LPC32XX_CLKPWR_UART4_CLK_CTRL,
.mapbase = LPC32XX_UART4_BASE,
},
-#endif
-#ifdef CONFIG_ARCH_LPC32XX_UART6_SELECT
{
.uart_ck_name = "uart6_ck",
.ck_mode_mask =
@@ -127,19 +67,6 @@ static struct uartinit uartinit_data[] __initdata = {
.pdiv_clk_reg = LPC32XX_CLKPWR_UART6_CLK_CTRL,
.mapbase = LPC32XX_UART6_BASE,
},
-#endif
-};
-
-static struct platform_device serial_std_platform_device = {
- .name = "serial8250",
- .id = 0,
- .dev = {
- .platform_data = serial_std_platform_data,
- },
-};
-
-static struct platform_device *lpc32xx_serial_devs[] __initdata = {
- &serial_std_platform_device,
};
void __init lpc32xx_serial_init(void)
@@ -156,15 +83,8 @@ void __init lpc32xx_serial_init(void)
clk = clk_get(NULL, uartinit_data[i].uart_ck_name);
if (!IS_ERR(clk)) {
clk_enable(clk);
- serial_std_platform_data[i].uartclk =
- clk_get_rate(clk);
}
- /* Fall back on main osc rate if clock rate return fails */
- if (serial_std_platform_data[i].uartclk == 0)
- serial_std_platform_data[i].uartclk =
- LPC32XX_MAIN_OSC_FREQ;
-
/* Setup UART clock modes for all UARTs, disable autoclock */
clkmodes |= uartinit_data[i].ck_mode_mask;
@@ -189,7 +109,7 @@ void __init lpc32xx_serial_init(void)
__raw_writel(clkmodes, LPC32XX_UARTCTL_CLKMODE);
for (i = 0; i < ARRAY_SIZE(uartinit_data); i++) {
/* Force a flush of the RX FIFOs to work around a HW bug */
- puart = serial_std_platform_data[i].mapbase;
+ puart = uartinit_data[i].mapbase;
__raw_writel(0xC1, LPC32XX_UART_IIR_FCR(puart));
__raw_writel(0x00, LPC32XX_UART_DLL_FIFO(puart));
j = LPC32XX_SUART_FIFO_SIZE;
@@ -198,11 +118,13 @@ void __init lpc32xx_serial_init(void)
__raw_writel(0, LPC32XX_UART_IIR_FCR(puart));
}
+ /* Disable IrDA pulsing support on UART6 */
+ tmp = __raw_readl(LPC32XX_UARTCTL_CTRL);
+ tmp |= LPC32XX_UART_UART6_IRDAMOD_BYPASS;
+ __raw_writel(tmp, LPC32XX_UARTCTL_CTRL);
+
/* Disable UART5->USB transparent mode or USB won't work */
tmp = __raw_readl(LPC32XX_UARTCTL_CTRL);
tmp &= ~LPC32XX_UART_U5_ROUTE_TO_USB;
__raw_writel(tmp, LPC32XX_UARTCTL_CTRL);
-
- platform_add_devices(lpc32xx_serial_devs,
- ARRAY_SIZE(lpc32xx_serial_devs));
}
diff --git a/arch/arm/mach-mmp/include/mach/gpio-pxa.h b/arch/arm/mach-mmp/include/mach/gpio-pxa.h
deleted file mode 100644
index 0e135a599f3e..000000000000
--- a/arch/arm/mach-mmp/include/mach/gpio-pxa.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_MACH_GPIO_PXA_H
-#define __ASM_MACH_GPIO_PXA_H
-
-#include <mach/addr-map.h>
-#include <mach/cputype.h>
-#include <mach/irqs.h>
-
-#define GPIO_REGS_VIRT (APB_VIRT_BASE + 0x19000)
-
-#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
-#define GPIO_REG(x) (*(volatile u32 *)(GPIO_REGS_VIRT + (x)))
-
-#define gpio_to_bank(gpio) ((gpio) >> 5)
-
-/* NOTE: these macros are defined here to make optimization of
- * gpio_{get,set}_value() to work when 'gpio' is a constant.
- * Usage of these macros otherwise is no longer recommended,
- * use generic GPIO API whenever possible.
- */
-#define GPIO_bit(gpio) (1 << ((gpio) & 0x1f))
-
-#define GPLR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x00)
-#define GPDR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x0c)
-#define GPSR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x18)
-#define GPCR(x) GPIO_REG(BANK_OFF(gpio_to_bank(x)) + 0x24)
-
-#include <plat/gpio-pxa.h>
-
-#endif /* __ASM_MACH_GPIO_PXA_H */
diff --git a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
index c64dbb96dbad..eb187e0e059b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
+++ b/arch/arm/mach-mv78xx0/include/mach/bridge-regs.h
@@ -31,5 +31,6 @@
#define IRQ_MASK_HIGH_OFF 0x0014
#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
+#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
#endif
diff --git a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
index 3674497162e3..e807c4c52a0b 100644
--- a/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
+++ b/arch/arm/mach-mv78xx0/include/mach/mv78xx0.h
@@ -42,6 +42,7 @@
#define MV78XX0_CORE0_REGS_PHYS_BASE 0xf1020000
#define MV78XX0_CORE1_REGS_PHYS_BASE 0xf1024000
#define MV78XX0_CORE_REGS_VIRT_BASE 0xfe400000
+#define MV78XX0_CORE_REGS_PHYS_BASE 0xfe400000
#define MV78XX0_CORE_REGS_SIZE SZ_16K
#define MV78XX0_PCIE_IO_PHYS_BASE(i) (0xf0800000 + ((i) << 20))
@@ -59,6 +60,7 @@
* Core-specific peripheral registers.
*/
#define BRIDGE_VIRT_BASE (MV78XX0_CORE_REGS_VIRT_BASE)
+#define BRIDGE_PHYS_BASE (MV78XX0_CORE_REGS_PHYS_BASE)
/*
* Register Map
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
new file mode 100644
index 000000000000..caa2c5e734fe
--- /dev/null
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -0,0 +1,16 @@
+if ARCH_MVEBU
+
+menu "Marvell SOC with device tree"
+
+config MACH_ARMADA_370_XP
+ bool "Marvell Armada 370 and Aramada XP boards"
+ select ARMADA_370_XP_TIMER
+ select CPU_V7
+ help
+
+ Say 'Y' here if you want your kernel to support boards based on
+ Marvell Armada 370 or Armada XP with device tree.
+
+endmenu
+
+endif
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
new file mode 100644
index 000000000000..e61d2b8fdf50
--- /dev/null
+++ b/arch/arm/mach-mvebu/Makefile
@@ -0,0 +1,2 @@
+obj-y += system-controller.o
+obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o
diff --git a/arch/arm/mach-mvebu/Makefile.boot b/arch/arm/mach-mvebu/Makefile.boot
new file mode 100644
index 000000000000..2579a2fc2334
--- /dev/null
+++ b/arch/arm/mach-mvebu/Makefile.boot
@@ -0,0 +1,3 @@
+zreladdr-y := 0x00008000
+dtb-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-db.dtb
+dtb-$(CONFIG_MACH_ARMADA_370_XP) += armada-xp-db.dtb
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
new file mode 100644
index 000000000000..4ef923b032ec
--- /dev/null
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
@@ -0,0 +1,63 @@
+/*
+ * Device Tree support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/time-armada-370-xp.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <mach/armada-370-xp.h>
+#include "common.h"
+
+static struct map_desc armada_370_xp_io_desc[] __initdata = {
+ {
+ .virtual = ARMADA_370_XP_REGS_VIRT_BASE,
+ .pfn = __phys_to_pfn(ARMADA_370_XP_REGS_PHYS_BASE),
+ .length = ARMADA_370_XP_REGS_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init armada_370_xp_map_io(void)
+{
+ iotable_init(armada_370_xp_io_desc, ARRAY_SIZE(armada_370_xp_io_desc));
+}
+
+struct sys_timer armada_370_xp_timer = {
+ .init = armada_370_xp_timer_init,
+};
+
+static void __init armada_370_xp_dt_init(void)
+{
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
+
+static const char * const armada_370_xp_dt_board_dt_compat[] = {
+ "marvell,a370-db",
+ "marvell,axp-db",
+ NULL,
+};
+
+DT_MACHINE_START(ARMADA_XP_DT, "Marvell Aramada 370/XP (Device Tree)")
+ .init_machine = armada_370_xp_dt_init,
+ .map_io = armada_370_xp_map_io,
+ .init_irq = armada_370_xp_init_irq,
+ .handle_irq = armada_370_xp_handle_irq,
+ .timer = &armada_370_xp_timer,
+ .restart = mvebu_restart,
+ .dt_compat = armada_370_xp_dt_board_dt_compat,
+MACHINE_END
diff --git a/arch/arm/mach-mvebu/common.h b/arch/arm/mach-mvebu/common.h
new file mode 100644
index 000000000000..02f89eaa25fe
--- /dev/null
+++ b/arch/arm/mach-mvebu/common.h
@@ -0,0 +1,23 @@
+/*
+ * Core functions for Marvell System On Chip
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ARCH_MVEBU_COMMON_H
+#define __ARCH_MVEBU_COMMON_H
+
+void mvebu_restart(char mode, const char *cmd);
+
+void armada_370_xp_init_irq(void);
+void armada_370_xp_handle_irq(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arm/mach-mvebu/include/mach/armada-370-xp.h b/arch/arm/mach-mvebu/include/mach/armada-370-xp.h
new file mode 100644
index 000000000000..25f0ca8d7820
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/armada-370-xp.h
@@ -0,0 +1,22 @@
+/*
+ * Generic definitions for Marvell Armada_370_XP SoCs
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __MACH_ARMADA_370_XP_H
+#define __MACH_ARMADA_370_XP_H
+
+#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000
+#define ARMADA_370_XP_REGS_VIRT_BASE 0xfeb00000
+#define ARMADA_370_XP_REGS_SIZE SZ_1M
+
+#endif /* __MACH_ARMADA_370_XP_H */
diff --git a/arch/arm/mach-mvebu/include/mach/debug-macro.S b/arch/arm/mach-mvebu/include/mach/debug-macro.S
new file mode 100644
index 000000000000..22825760c7e1
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/debug-macro.S
@@ -0,0 +1,24 @@
+/*
+ * Early serial output macro for Marvell SoC
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <mach/armada-370-xp.h>
+
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE
+ ldr \rv, =ARMADA_370_XP_REGS_VIRT_BASE
+ orr \rp, \rp, #0x00012000
+ orr \rv, \rv, #0x00012000
+ .endm
+
+#define UART_SHIFT 2
+#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-mvebu/include/mach/timex.h b/arch/arm/mach-mvebu/include/mach/timex.h
new file mode 100644
index 000000000000..ab324a3748f2
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/timex.h
@@ -0,0 +1,13 @@
+/*
+ * Marvell Armada SoC time definitions
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#define CLOCK_TICK_RATE (100 * HZ)
diff --git a/arch/arm/mach-mvebu/include/mach/uncompress.h b/arch/arm/mach-mvebu/include/mach/uncompress.h
new file mode 100644
index 000000000000..d6a100ccf302
--- /dev/null
+++ b/arch/arm/mach-mvebu/include/mach/uncompress.h
@@ -0,0 +1,43 @@
+/*
+ * Marvell Armada SoC kernel uncompression UART routines
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <mach/armada-370-xp.h>
+
+#define UART_THR ((volatile unsigned char *)(ARMADA_370_XP_REGS_PHYS_BASE\
+ + 0x12000))
+#define UART_LSR ((volatile unsigned char *)(ARMADA_370_XP_REGS_PHYS_BASE\
+ + 0x12014))
+
+#define LSR_THRE 0x20
+
+static void putc(const char c)
+{
+ int i;
+
+ for (i = 0; i < 0x1000; i++) {
+ /* Transmit fifo not full? */
+ if (*UART_LSR & LSR_THRE)
+ break;
+ }
+
+ *UART_THR = c;
+}
+
+static void flush(void)
+{
+}
+
+/*
+ * nothing to do
+ */
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
diff --git a/arch/arm/mach-mvebu/irq-armada-370-xp.c b/arch/arm/mach-mvebu/irq-armada-370-xp.c
new file mode 100644
index 000000000000..5f5f9394b6b2
--- /dev/null
+++ b/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -0,0 +1,133 @@
+/*
+ * Marvell Armada 370 and Armada XP SoC IRQ handling
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Ben Dooks <ben.dooks@codethink.co.uk>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <asm/mach/arch.h>
+#include <asm/exception.h>
+
+/* Interrupt Controller Registers Map */
+#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
+#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
+
+#define ARMADA_370_XP_INT_CONTROL (0x00)
+#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
+#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
+
+#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
+
+static void __iomem *per_cpu_int_base;
+static void __iomem *main_int_base;
+static struct irq_domain *armada_370_xp_mpic_domain;
+
+static void armada_370_xp_irq_mask(struct irq_data *d)
+{
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+}
+
+static void armada_370_xp_irq_unmask(struct irq_data *d)
+{
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+}
+
+static struct irq_chip armada_370_xp_irq_chip = {
+ .name = "armada_370_xp_irq",
+ .irq_mask = armada_370_xp_irq_mask,
+ .irq_mask_ack = armada_370_xp_irq_mask,
+ .irq_unmask = armada_370_xp_irq_unmask,
+};
+
+static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ armada_370_xp_irq_mask(irq_get_irq_data(virq));
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+
+ irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
+ handle_level_irq);
+ irq_set_status_flags(virq, IRQ_LEVEL);
+ set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+
+ return 0;
+}
+
+static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
+ .map = armada_370_xp_mpic_irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int __init armada_370_xp_mpic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ u32 control;
+
+ main_int_base = of_iomap(node, 0);
+ per_cpu_int_base = of_iomap(node, 1);
+
+ BUG_ON(!main_int_base);
+ BUG_ON(!per_cpu_int_base);
+
+ control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+
+ armada_370_xp_mpic_domain =
+ irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+ &armada_370_xp_mpic_irq_ops, NULL);
+
+ if (!armada_370_xp_mpic_domain)
+ panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n");
+
+ irq_set_default_host(armada_370_xp_mpic_domain);
+ return 0;
+}
+
+asmlinkage void __exception_irq_entry armada_370_xp_handle_irq(struct pt_regs
+ *regs)
+{
+ u32 irqstat, irqnr;
+
+ do {
+ irqstat = readl_relaxed(per_cpu_int_base +
+ ARMADA_370_XP_CPU_INTACK_OFFS);
+ irqnr = irqstat & 0x3FF;
+
+ if (irqnr < 1023) {
+ irqnr =
+ irq_find_mapping(armada_370_xp_mpic_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ continue;
+ }
+
+ break;
+ } while (1);
+}
+
+static const struct of_device_id mpic_of_match[] __initconst = {
+ {.compatible = "marvell,mpic", .data = armada_370_xp_mpic_of_init},
+ {},
+};
+
+void __init armada_370_xp_init_irq(void)
+{
+ of_irq_init(mpic_of_match);
+}
diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c
new file mode 100644
index 000000000000..b8079df8c986
--- /dev/null
+++ b/arch/arm/mach-mvebu/system-controller.c
@@ -0,0 +1,105 @@
+/*
+ * System controller support for Armada 370 and XP platforms.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * The Armada 370 and Armada XP SoCs both have a range of
+ * miscellaneous registers, that do not belong to a particular device,
+ * but rather provide system-level features. This basic
+ * system-controller driver provides a device tree binding for those
+ * registers, and implements utility functions offering various
+ * features related to those registers.
+ *
+ * For now, the feature set is limited to restarting the platform by a
+ * soft-reset, but it might be extended in the future.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+
+static void __iomem *system_controller_base;
+
+struct mvebu_system_controller {
+ u32 rstoutn_mask_offset;
+ u32 system_soft_reset_offset;
+
+ u32 rstoutn_mask_reset_out_en;
+ u32 system_soft_reset;
+};
+static struct mvebu_system_controller *mvebu_sc;
+
+const struct mvebu_system_controller armada_370_xp_system_controller = {
+ .rstoutn_mask_offset = 0x60,
+ .system_soft_reset_offset = 0x64,
+ .rstoutn_mask_reset_out_en = 0x1,
+ .system_soft_reset = 0x1,
+};
+
+const struct mvebu_system_controller orion_system_controller = {
+ .rstoutn_mask_offset = 0x108,
+ .system_soft_reset_offset = 0x10c,
+ .rstoutn_mask_reset_out_en = 0x4,
+ .system_soft_reset = 0x1,
+};
+
+static struct of_device_id of_system_controller_table[] = {
+ {
+ .compatible = "marvell,orion-system-controller",
+ .data = (void *) &orion_system_controller,
+ }, {
+ .compatible = "marvell,armada-370-xp-system-controller",
+ .data = (void *) &armada_370_xp_system_controller,
+ },
+ { /* end of list */ },
+};
+
+void mvebu_restart(char mode, const char *cmd)
+{
+ if (!system_controller_base) {
+ pr_err("Cannot restart, system-controller not available: check the device tree\n");
+ } else {
+ /*
+ * Enable soft reset to assert RSTOUTn.
+ */
+ writel(mvebu_sc->rstoutn_mask_reset_out_en,
+ system_controller_base +
+ mvebu_sc->rstoutn_mask_offset);
+ /*
+ * Assert soft reset.
+ */
+ writel(mvebu_sc->system_soft_reset,
+ system_controller_base +
+ mvebu_sc->system_soft_reset_offset);
+ }
+
+ while (1)
+ ;
+}
+
+static int __init mvebu_system_controller_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, of_system_controller_table);
+ if (np) {
+ const struct of_device_id *match =
+ of_match_node(of_system_controller_table, np);
+ BUG_ON(!match);
+ system_controller_base = of_iomap(np, 0);
+ mvebu_sc = (struct mvebu_system_controller *)match->data;
+ }
+
+ return 0;
+}
+
+arch_initcall(mvebu_system_controller_init);
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 91cf0625819c..ccdf83b17cf1 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -16,6 +16,7 @@ config SOC_IMX28
bool
select ARM_AMBA
select CPU_ARM926T
+ select HAVE_CAN_FLEXCAN if CAN
select HAVE_PWM
select PINCTRL_IMX28
diff --git a/arch/arm/mach-mxs/Makefile.boot b/arch/arm/mach-mxs/Makefile.boot
index 07b11fe6453f..4582999cf080 100644
--- a/arch/arm/mach-mxs/Makefile.boot
+++ b/arch/arm/mach-mxs/Makefile.boot
@@ -1 +1,10 @@
zreladdr-y += 0x40008000
+
+dtb-y += imx23-evk.dtb \
+ imx23-olinuxino.dtb \
+ imx23-stmp378x_devb.dtb \
+ imx28-apx4devkit.dtb \
+ imx28-cfa10036.dtb \
+ imx28-evk.dtb \
+ imx28-m28evk.dtb \
+ imx28-tx28.dtb \
diff --git a/arch/arm/mach-mxs/devices-mx23.h b/arch/arm/mach-mxs/devices-mx23.h
index 9acdd6387047..9ee5cede3d42 100644
--- a/arch/arm/mach-mxs/devices-mx23.h
+++ b/arch/arm/mach-mxs/devices-mx23.h
@@ -10,7 +10,7 @@
*/
#include <mach/mx23.h>
#include <mach/devices-common.h>
-#include <mach/mxsfb.h>
+#include <linux/mxsfb.h>
#include <linux/amba/bus.h>
static inline int mx23_add_duart(void)
diff --git a/arch/arm/mach-mxs/devices-mx28.h b/arch/arm/mach-mxs/devices-mx28.h
index 84b2960df117..fcab431060f4 100644
--- a/arch/arm/mach-mxs/devices-mx28.h
+++ b/arch/arm/mach-mxs/devices-mx28.h
@@ -10,7 +10,7 @@
*/
#include <mach/mx28.h>
#include <mach/devices-common.h>
-#include <mach/mxsfb.h>
+#include <linux/mxsfb.h>
#include <linux/amba/bus.h>
static inline int mx28_add_duart(void)
diff --git a/arch/arm/mach-mxs/devices/platform-mxsfb.c b/arch/arm/mach-mxs/devices/platform-mxsfb.c
index 5a75b7180f74..76b53f73418e 100644
--- a/arch/arm/mach-mxs/devices/platform-mxsfb.c
+++ b/arch/arm/mach-mxs/devices/platform-mxsfb.c
@@ -10,7 +10,7 @@
#include <mach/mx23.h>
#include <mach/mx28.h>
#include <mach/devices-common.h>
-#include <mach/mxsfb.h>
+#include <linux/mxsfb.h>
#ifdef CONFIG_SOC_IMX23
struct platform_device *__init mx23_add_mxsfb(
diff --git a/arch/arm/mach-mxs/mach-apx4devkit.c b/arch/arm/mach-mxs/mach-apx4devkit.c
index 5e90b9dcdef8..f5f061757deb 100644
--- a/arch/arm/mach-mxs/mach-apx4devkit.c
+++ b/arch/arm/mach-mxs/mach-apx4devkit.c
@@ -205,6 +205,16 @@ static int apx4devkit_phy_fixup(struct phy_device *phy)
return 0;
}
+static void __init apx4devkit_fec_phy_clk_enable(void)
+{
+ struct clk *clk;
+
+ /* Enable fec phy clock */
+ clk = clk_get_sys("enet_out", NULL);
+ if (!IS_ERR(clk))
+ clk_prepare_enable(clk);
+}
+
static void __init apx4devkit_init(void)
{
mx28_soc_init();
@@ -225,6 +235,7 @@ static void __init apx4devkit_init(void)
phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
apx4devkit_phy_fixup);
+ apx4devkit_fec_phy_clk_enable();
mx28_add_fec(0, &mx28_fec_pdata);
mx28_add_mxs_mmc(0, &apx4devkit_mmc_pdata);
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
index 8cac94b33020..648bdd05d38b 100644
--- a/arch/arm/mach-mxs/mach-mxs.c
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -16,12 +16,95 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/irqdomain.h>
+#include <linux/micrel_phy.h>
+#include <linux/mxsfb.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/phy.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#include <mach/common.h>
+static struct fb_videomode mx23evk_video_modes[] = {
+ {
+ .name = "Samsung-LMS430HF02",
+ .refresh = 60,
+ .xres = 480,
+ .yres = 272,
+ .pixclock = 108096, /* picosecond (9.2 MHz) */
+ .left_margin = 15,
+ .right_margin = 8,
+ .upper_margin = 12,
+ .lower_margin = 4,
+ .hsync_len = 1,
+ .vsync_len = 1,
+ .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
+ FB_SYNC_DOTCLK_FAILING_ACT,
+ },
+};
+
+static struct fb_videomode mx28evk_video_modes[] = {
+ {
+ .name = "Seiko-43WVF1G",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 29851, /* picosecond (33.5 MHz) */
+ .left_margin = 89,
+ .right_margin = 164,
+ .upper_margin = 23,
+ .lower_margin = 10,
+ .hsync_len = 10,
+ .vsync_len = 10,
+ .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT |
+ FB_SYNC_DOTCLK_FAILING_ACT,
+ },
+};
+
+static struct fb_videomode m28evk_video_modes[] = {
+ {
+ .name = "Ampire AM-800480R2TMQW-T01H",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 30066, /* picosecond (33.26 MHz) */
+ .left_margin = 0,
+ .right_margin = 256,
+ .upper_margin = 0,
+ .lower_margin = 45,
+ .hsync_len = 1,
+ .vsync_len = 1,
+ .sync = FB_SYNC_DATA_ENABLE_HIGH_ACT,
+ },
+};
+
+static struct fb_videomode apx4devkit_video_modes[] = {
+ {
+ .name = "HannStar PJ70112A",
+ .refresh = 60,
+ .xres = 800,
+ .yres = 480,
+ .pixclock = 33333, /* picosecond (30.00 MHz) */
+ .left_margin = 88,
+ .right_margin = 40,
+ .upper_margin = 32,
+ .lower_margin = 13,
+ .hsync_len = 48,
+ .vsync_len = 3,
+ .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT |
+ FB_SYNC_DATA_ENABLE_HIGH_ACT |
+ FB_SYNC_DOTCLK_FAILING_ACT,
+ },
+};
+
+static struct mxsfb_platform_data mxsfb_pdata __initdata;
+
+static struct of_dev_auxdata mxs_auxdata_lookup[] __initdata = {
+ OF_DEV_AUXDATA("fsl,imx23-lcdif", 0x80030000, NULL, &mxsfb_pdata),
+ OF_DEV_AUXDATA("fsl,imx28-lcdif", 0x80030000, NULL, &mxsfb_pdata),
+ { /* sentinel */ }
+};
+
static int __init mxs_icoll_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
@@ -71,33 +154,155 @@ static struct sys_timer imx28_timer = {
.init = imx28_timer_init,
};
-static void __init imx28_evk_init(void)
+enum mac_oui {
+ OUI_FSL,
+ OUI_DENX,
+};
+
+static void __init update_fec_mac_prop(enum mac_oui oui)
+{
+ struct device_node *np, *from = NULL;
+ struct property *oldmac, *newmac;
+ const u32 *ocotp = mxs_get_ocotp();
+ u8 *macaddr;
+ u32 val;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ np = of_find_compatible_node(from, NULL, "fsl,imx28-fec");
+ if (!np)
+ return;
+ from = np;
+
+ newmac = kzalloc(sizeof(*newmac) + 6, GFP_KERNEL);
+ if (!newmac)
+ return;
+ newmac->value = newmac + 1;
+ newmac->length = 6;
+
+ newmac->name = kstrdup("local-mac-address", GFP_KERNEL);
+ if (!newmac->name) {
+ kfree(newmac);
+ return;
+ }
+
+ /*
+ * OCOTP only stores the last 4 octets for each mac address,
+ * so hard-code OUI here.
+ */
+ macaddr = newmac->value;
+ switch (oui) {
+ case OUI_FSL:
+ macaddr[0] = 0x00;
+ macaddr[1] = 0x04;
+ macaddr[2] = 0x9f;
+ break;
+ case OUI_DENX:
+ macaddr[0] = 0xc0;
+ macaddr[1] = 0xe5;
+ macaddr[2] = 0x4e;
+ break;
+ }
+ val = ocotp[i];
+ macaddr[3] = (val >> 16) & 0xff;
+ macaddr[4] = (val >> 8) & 0xff;
+ macaddr[5] = (val >> 0) & 0xff;
+
+ oldmac = of_find_property(np, newmac->name, NULL);
+ if (oldmac)
+ prom_update_property(np, newmac, oldmac);
+ else
+ prom_add_property(np, newmac);
+ }
+}
+
+static void __init imx23_evk_init(void)
+{
+ mxsfb_pdata.mode_list = mx23evk_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(mx23evk_video_modes);
+ mxsfb_pdata.default_bpp = 32;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+}
+
+static inline void enable_clk_enet_out(void)
{
- struct clk *clk;
+ struct clk *clk = clk_get_sys("enet_out", NULL);
- /* Enable fec phy clock */
- clk = clk_get_sys("enet_out", NULL);
if (!IS_ERR(clk))
clk_prepare_enable(clk);
}
+static void __init imx28_evk_init(void)
+{
+ enable_clk_enet_out();
+ update_fec_mac_prop(OUI_FSL);
+
+ mxsfb_pdata.mode_list = mx28evk_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(mx28evk_video_modes);
+ mxsfb_pdata.default_bpp = 32;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+}
+
+static void __init m28evk_init(void)
+{
+ enable_clk_enet_out();
+ update_fec_mac_prop(OUI_DENX);
+
+ mxsfb_pdata.mode_list = m28evk_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(m28evk_video_modes);
+ mxsfb_pdata.default_bpp = 16;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_18BIT;
+}
+
+static int apx4devkit_phy_fixup(struct phy_device *phy)
+{
+ phy->dev_flags |= MICREL_PHY_50MHZ_CLK;
+ return 0;
+}
+
+static void __init apx4devkit_init(void)
+{
+ enable_clk_enet_out();
+
+ if (IS_BUILTIN(CONFIG_PHYLIB))
+ phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
+ apx4devkit_phy_fixup);
+
+ mxsfb_pdata.mode_list = apx4devkit_video_modes;
+ mxsfb_pdata.mode_count = ARRAY_SIZE(apx4devkit_video_modes);
+ mxsfb_pdata.default_bpp = 32;
+ mxsfb_pdata.ld_intf_width = STMLCDIF_24BIT;
+}
+
static void __init mxs_machine_init(void)
{
if (of_machine_is_compatible("fsl,imx28-evk"))
imx28_evk_init();
+ else if (of_machine_is_compatible("fsl,imx23-evk"))
+ imx23_evk_init();
+ else if (of_machine_is_compatible("denx,m28evk"))
+ m28evk_init();
+ else if (of_machine_is_compatible("bluegiga,apx4devkit"))
+ apx4devkit_init();
of_platform_populate(NULL, of_default_bus_match_table,
- NULL, NULL);
+ mxs_auxdata_lookup, NULL);
}
static const char *imx23_dt_compat[] __initdata = {
"fsl,imx23-evk",
+ "fsl,stmp378x_devb"
+ "olimex,imx23-olinuxino",
"fsl,imx23",
NULL,
};
static const char *imx28_dt_compat[] __initdata = {
+ "bluegiga,apx4devkit",
+ "crystalfontz,cfa10036",
+ "denx,m28evk",
"fsl,imx28-evk",
+ "karo,tx28",
"fsl,imx28",
NULL,
};
diff --git a/arch/arm/mach-mxs/module-tx28.c b/arch/arm/mach-mxs/module-tx28.c
index 9a7b08b2a925..0f71f82101cc 100644
--- a/arch/arm/mach-mxs/module-tx28.c
+++ b/arch/arm/mach-mxs/module-tx28.c
@@ -11,7 +11,7 @@
#include <linux/gpio.h>
#include <mach/iomux-mx28.h>
-#include "../devices-mx28.h"
+#include "devices-mx28.h"
#include "module-tx28.h"
diff --git a/arch/arm/mach-nomadik/Makefile b/arch/arm/mach-nomadik/Makefile
index a6bbd1a7b4e7..a42c9a33d3bf 100644
--- a/arch/arm/mach-nomadik/Makefile
+++ b/arch/arm/mach-nomadik/Makefile
@@ -7,8 +7,6 @@
# Object file lists.
-obj-y += clock.o
-
# Cpu revision
obj-$(CONFIG_NOMADIK_8815) += cpu-8815.o
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 2e8d3e176bc7..f4535a7dadf5 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -14,12 +14,14 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
+#include <linux/amba/mmci.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/onenand.h>
#include <linux/mtd/partitions.h>
+#include <linux/i2c.h>
#include <linux/io.h>
#include <asm/hardware/vic.h>
#include <asm/sizes.h>
@@ -185,16 +187,28 @@ static void __init nhk8815_onenand_init(void)
#endif
}
-static AMBA_APB_DEVICE(uart0, "uart0", 0, NOMADIK_UART0_BASE,
- { IRQ_UART0 }, NULL);
+static struct mmci_platform_data mmcsd_plat_data = {
+ .ocr_mask = MMC_VDD_29_30,
+ .f_max = 48000000,
+ .gpio_wp = -1,
+ .gpio_cd = 111,
+ .cd_invert = true,
+ .capabilities = MMC_CAP_MMC_HIGHSPEED |
+ MMC_CAP_SD_HIGHSPEED | MMC_CAP_4_BIT_DATA,
+};
-static AMBA_APB_DEVICE(uart1, "uart1", 0, NOMADIK_UART1_BASE,
- { IRQ_UART1 }, NULL);
+static int __init nhk8815_mmcsd_init(void)
+{
+ int ret;
-static struct amba_device *amba_devs[] __initdata = {
- &uart0_device,
- &uart1_device,
-};
+ ret = gpio_request(112, "card detect bias");
+ if (ret)
+ return ret;
+ gpio_direction_output(112, 0);
+ amba_apb_device_add(NULL, "mmci", NOMADIK_SDI_BASE, SZ_4K, IRQ_SDMMC, 0, &mmcsd_plat_data, 0x10180180);
+ return 0;
+}
+module_init(nhk8815_mmcsd_init);
static struct resource nhk8815_eth_resources[] = {
{
@@ -253,17 +267,46 @@ static struct sys_timer nomadik_timer = {
.init = nomadik_timer_init,
};
+static struct i2c_board_info __initdata nhk8815_i2c0_devices[] = {
+ {
+ I2C_BOARD_INFO("stw4811", 0x2d),
+ },
+};
+
+static struct i2c_board_info __initdata nhk8815_i2c1_devices[] = {
+ {
+ I2C_BOARD_INFO("camera", 0x10),
+ },
+ {
+ I2C_BOARD_INFO("stw5095", 0x1a),
+ },
+ {
+ I2C_BOARD_INFO("lis3lv02dl", 0x1d),
+ },
+};
+
+static struct i2c_board_info __initdata nhk8815_i2c2_devices[] = {
+ {
+ I2C_BOARD_INFO("stw4811-usb", 0x2d),
+ },
+};
+
static void __init nhk8815_platform_init(void)
{
- int i;
-
cpu8815_platform_init();
nhk8815_onenand_init();
platform_add_devices(nhk8815_platform_devices,
ARRAY_SIZE(nhk8815_platform_devices));
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
- amba_device_register(amba_devs[i], &iomem_resource);
+ amba_apb_device_add(NULL, "uart0", NOMADIK_UART0_BASE, SZ_4K, IRQ_UART0, 0, NULL, 0);
+ amba_apb_device_add(NULL, "uart1", NOMADIK_UART1_BASE, SZ_4K, IRQ_UART1, 0, NULL, 0);
+
+ i2c_register_board_info(0, nhk8815_i2c0_devices,
+ ARRAY_SIZE(nhk8815_i2c0_devices));
+ i2c_register_board_info(1, nhk8815_i2c1_devices,
+ ARRAY_SIZE(nhk8815_i2c1_devices));
+ i2c_register_board_info(2, nhk8815_i2c2_devices,
+ ARRAY_SIZE(nhk8815_i2c2_devices));
}
MACHINE_START(NOMADIK, "NHK8815")
diff --git a/arch/arm/mach-nomadik/clock.c b/arch/arm/mach-nomadik/clock.c
deleted file mode 100644
index 48a59f24e10c..000000000000
--- a/arch/arm/mach-nomadik/clock.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * linux/arch/arm/mach-nomadik/clock.c
- *
- * Copyright (C) 2009 Alessandro Rubini
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include "clock.h"
-
-/*
- * The nomadik board uses generic clocks, but the serial pl011 file
- * calls clk_enable(), clk_disable(), clk_get_rate(), so we provide them
- */
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-/* enable and disable do nothing */
-int clk_enable(struct clk *clk)
-{
- return 0;
-}
-EXPORT_SYMBOL(clk_enable);
-
-void clk_disable(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(clk_disable);
-
-static struct clk clk_24 = {
- .rate = 2400000,
-};
-
-static struct clk clk_48 = {
- .rate = 48 * 1000 * 1000,
-};
-
-/*
- * Catch-all default clock to satisfy drivers using the clk API. We don't
- * model the actual hardware clocks yet.
- */
-static struct clk clk_default;
-
-#define CLK(_clk, dev) \
- { \
- .clk = _clk, \
- .dev_id = dev, \
- }
-
-static struct clk_lookup lookups[] = {
- {
- .con_id = "apb_pclk",
- .clk = &clk_default,
- },
- CLK(&clk_24, "mtu0"),
- CLK(&clk_24, "mtu1"),
- CLK(&clk_48, "uart0"),
- CLK(&clk_48, "uart1"),
- CLK(&clk_default, "gpio.0"),
- CLK(&clk_default, "gpio.1"),
- CLK(&clk_default, "gpio.2"),
- CLK(&clk_default, "gpio.3"),
- CLK(&clk_default, "rng"),
-};
-
-int __init clk_init(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
- return 0;
-}
diff --git a/arch/arm/mach-nomadik/clock.h b/arch/arm/mach-nomadik/clock.h
deleted file mode 100644
index 78da2e7c3985..000000000000
--- a/arch/arm/mach-nomadik/clock.h
+++ /dev/null
@@ -1,15 +0,0 @@
-
-/*
- * linux/arch/arm/mach-nomadik/clock.h
- *
- * Copyright (C) 2009 Alessandro Rubini
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-struct clk {
- unsigned long rate;
-};
-
-int __init clk_init(void);
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index 27f43a46985e..6fd8e46567a4 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -22,6 +22,10 @@
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_data/clk-nomadik.h>
#include <plat/gpio-nomadik.h>
#include <mach/hardware.h>
@@ -32,91 +36,63 @@
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
-#include "clock.h"
#include "cpu-8815.h"
-#define __MEM_4K_RESOURCE(x) \
- .res = {.start = (x), .end = (x) + SZ_4K - 1, .flags = IORESOURCE_MEM}
-
/* The 8815 has 4 GPIO blocks, let's register them immediately */
-
-#define GPIO_RESOURCE(block) \
- { \
- .start = NOMADIK_GPIO##block##_BASE, \
- .end = NOMADIK_GPIO##block##_BASE + SZ_4K - 1, \
- .flags = IORESOURCE_MEM, \
- }, \
- { \
- .start = IRQ_GPIO##block, \
- .end = IRQ_GPIO##block, \
- .flags = IORESOURCE_IRQ, \
- }
-
-#define GPIO_DEVICE(block) \
- { \
- .name = "gpio", \
- .id = block, \
- .num_resources = 2, \
- .resource = &cpu8815_gpio_resources[block * 2], \
- .dev = { \
- .platform_data = &cpu8815_gpio[block], \
- }, \
- }
-
-static struct nmk_gpio_platform_data cpu8815_gpio[] = {
- {
- .name = "GPIO-0-31",
- .first_gpio = 0,
- .first_irq = NOMADIK_GPIO_TO_IRQ(0),
- }, {
- .name = "GPIO-32-63",
- .first_gpio = 32,
- .first_irq = NOMADIK_GPIO_TO_IRQ(32),
- }, {
- .name = "GPIO-64-95",
- .first_gpio = 64,
- .first_irq = NOMADIK_GPIO_TO_IRQ(64),
- }, {
- .name = "GPIO-96-127", /* 124..127 not routed to pin */
- .first_gpio = 96,
- .first_irq = NOMADIK_GPIO_TO_IRQ(96),
- }
+static resource_size_t __initdata cpu8815_gpio_base[] = {
+ NOMADIK_GPIO0_BASE,
+ NOMADIK_GPIO1_BASE,
+ NOMADIK_GPIO2_BASE,
+ NOMADIK_GPIO3_BASE,
};
-static struct resource cpu8815_gpio_resources[] = {
- GPIO_RESOURCE(0),
- GPIO_RESOURCE(1),
- GPIO_RESOURCE(2),
- GPIO_RESOURCE(3),
-};
-
-static struct platform_device cpu8815_platform_gpio[] = {
- GPIO_DEVICE(0),
- GPIO_DEVICE(1),
- GPIO_DEVICE(2),
- GPIO_DEVICE(3),
-};
+static struct platform_device *
+cpu8815_add_gpio(int id, resource_size_t addr, int irq,
+ struct nmk_gpio_platform_data *pdata)
+{
+ struct resource resources[] = {
+ {
+ .start = addr,
+ .end = addr + 127,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = irq,
+ .end = irq,
+ .flags = IORESOURCE_IRQ,
+ }
+ };
+
+ return platform_device_register_resndata(NULL, "gpio", id,
+ resources, ARRAY_SIZE(resources),
+ pdata, sizeof(*pdata));
+}
-static AMBA_APB_DEVICE(cpu8815_amba_rng, "rng", 0, NOMADIK_RNG_BASE, { }, NULL);
+void cpu8815_add_gpios(resource_size_t *base, int num, int irq,
+ struct nmk_gpio_platform_data *pdata)
+{
+ int first = 0;
+ int i;
-static struct platform_device *platform_devs[] __initdata = {
- cpu8815_platform_gpio + 0,
- cpu8815_platform_gpio + 1,
- cpu8815_platform_gpio + 2,
- cpu8815_platform_gpio + 3,
-};
+ for (i = 0; i < num; i++, first += 32, irq++) {
+ pdata->first_gpio = first;
+ pdata->first_irq = NOMADIK_GPIO_TO_IRQ(first);
+ pdata->num_gpio = 32;
-static struct amba_device *amba_devs[] __initdata = {
- &cpu8815_amba_rng_device
-};
+ cpu8815_add_gpio(i, base[i], irq, pdata);
+ }
+}
static int __init cpu8815_init(void)
{
- int i;
-
- platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
- for (i = 0; i < ARRAY_SIZE(amba_devs); i++)
- amba_device_register(amba_devs[i], &iomem_resource);
+ struct nmk_gpio_platform_data pdata = {
+ /* No custom data yet */
+ };
+
+ cpu8815_add_gpios(cpu8815_gpio_base, ARRAY_SIZE(cpu8815_gpio_base),
+ IRQ_GPIO0, &pdata);
+ amba_apb_device_add(NULL, "rng", NOMADIK_RNG_BASE, SZ_4K, 0, 0, NULL, 0);
+ amba_apb_device_add(NULL, "rtc-pl031", NOMADIK_RTC_BASE, SZ_4K, IRQ_RTC_RTT, 0, NULL, 0);
return 0;
}
arch_initcall(cpu8815_init);
@@ -147,7 +123,7 @@ void __init cpu8815_init_irq(void)
* Init clocks here so that they are available for system timer
* initialization.
*/
- clk_init();
+ nomadik_clk_init();
}
/*
diff --git a/arch/arm/mach-nomadik/i2c-8815nhk.c b/arch/arm/mach-nomadik/i2c-8815nhk.c
index 0fc2f6f1cc97..6d14454d4609 100644
--- a/arch/arm/mach-nomadik/i2c-8815nhk.c
+++ b/arch/arm/mach-nomadik/i2c-8815nhk.c
@@ -5,6 +5,7 @@
#include <linux/i2c-gpio.h>
#include <linux/platform_device.h>
#include <plat/gpio-nomadik.h>
+#include <plat/pincfg.h>
/*
* There are two busses in the 8815NHK.
@@ -12,19 +13,27 @@
* use bit-bang through GPIO by now, to keep things simple
*/
+/* I2C0 connected to the STw4811 power management chip */
static struct i2c_gpio_platform_data nhk8815_i2c_data0 = {
/* keep defaults for timeouts; pins are push-pull bidirectional */
.scl_pin = 62,
.sda_pin = 63,
};
+/* I2C1 connected to various sensors */
static struct i2c_gpio_platform_data nhk8815_i2c_data1 = {
/* keep defaults for timeouts; pins are push-pull bidirectional */
.scl_pin = 53,
.sda_pin = 54,
};
-/* first bus: GPIO XX and YY */
+/* I2C2 connected to the USB portions of the STw4811 only */
+static struct i2c_gpio_platform_data nhk8815_i2c_data2 = {
+ /* keep defaults for timeouts; pins are push-pull bidirectional */
+ .scl_pin = 73,
+ .sda_pin = 74,
+};
+
static struct platform_device nhk8815_i2c_dev0 = {
.name = "i2c-gpio",
.id = 0,
@@ -32,7 +41,7 @@ static struct platform_device nhk8815_i2c_dev0 = {
.platform_data = &nhk8815_i2c_data0,
},
};
-/* second bus: GPIO XX and YY */
+
static struct platform_device nhk8815_i2c_dev1 = {
.name = "i2c-gpio",
.id = 1,
@@ -41,15 +50,29 @@ static struct platform_device nhk8815_i2c_dev1 = {
},
};
+static struct platform_device nhk8815_i2c_dev2 = {
+ .name = "i2c-gpio",
+ .id = 2,
+ .dev = {
+ .platform_data = &nhk8815_i2c_data2,
+ },
+};
+
+static pin_cfg_t cpu8815_pins_i2c[] = {
+ PIN_CFG_INPUT(62, GPIO, PULLUP),
+ PIN_CFG_INPUT(63, GPIO, PULLUP),
+ PIN_CFG_INPUT(53, GPIO, PULLUP),
+ PIN_CFG_INPUT(54, GPIO, PULLUP),
+ PIN_CFG_INPUT(73, GPIO, PULLUP),
+ PIN_CFG_INPUT(74, GPIO, PULLUP),
+};
+
static int __init nhk8815_i2c_init(void)
{
- nmk_gpio_set_mode(nhk8815_i2c_data0.scl_pin, NMK_GPIO_ALT_GPIO);
- nmk_gpio_set_mode(nhk8815_i2c_data0.sda_pin, NMK_GPIO_ALT_GPIO);
+ nmk_config_pins(cpu8815_pins_i2c, ARRAY_SIZE(cpu8815_pins_i2c));
platform_device_register(&nhk8815_i2c_dev0);
-
- nmk_gpio_set_mode(nhk8815_i2c_data1.scl_pin, NMK_GPIO_ALT_GPIO);
- nmk_gpio_set_mode(nhk8815_i2c_data1.sda_pin, NMK_GPIO_ALT_GPIO);
platform_device_register(&nhk8815_i2c_dev1);
+ platform_device_register(&nhk8815_i2c_dev2);
return 0;
}
@@ -58,6 +81,7 @@ static void __exit nhk8815_i2c_exit(void)
{
platform_device_unregister(&nhk8815_i2c_dev0);
platform_device_unregister(&nhk8815_i2c_dev1);
+ platform_device_unregister(&nhk8815_i2c_dev2);
return;
}
diff --git a/arch/arm/mach-nomadik/include/mach/irqs.h b/arch/arm/mach-nomadik/include/mach/irqs.h
index 8faabc560398..a118e615f865 100644
--- a/arch/arm/mach-nomadik/include/mach/irqs.h
+++ b/arch/arm/mach-nomadik/include/mach/irqs.h
@@ -22,56 +22,56 @@
#include <mach/hardware.h>
-#define IRQ_VIC_START 0 /* first VIC interrupt is 0 */
+#define IRQ_VIC_START 1 /* first VIC interrupt is 1 */
/*
* Interrupt numbers generic for all Nomadik Chip cuts
*/
-#define IRQ_WATCHDOG 0
-#define IRQ_SOFTINT 1
-#define IRQ_CRYPTO 2
-#define IRQ_OWM 3
-#define IRQ_MTU0 4
-#define IRQ_MTU1 5
-#define IRQ_GPIO0 6
-#define IRQ_GPIO1 7
-#define IRQ_GPIO2 8
-#define IRQ_GPIO3 9
-#define IRQ_RTC_RTT 10
-#define IRQ_SSP 11
-#define IRQ_UART0 12
-#define IRQ_DMA1 13
-#define IRQ_CLCD_MDIF 14
-#define IRQ_DMA0 15
-#define IRQ_PWRFAIL 16
-#define IRQ_UART1 17
-#define IRQ_FIRDA 18
-#define IRQ_MSP0 19
-#define IRQ_I2C0 20
-#define IRQ_I2C1 21
-#define IRQ_SDMMC 22
-#define IRQ_USBOTG 23
-#define IRQ_SVA_IT0 24
-#define IRQ_SVA_IT1 25
-#define IRQ_SAA_IT0 26
-#define IRQ_SAA_IT1 27
-#define IRQ_UART2 28
-#define IRQ_MSP2 31
-#define IRQ_L2CC 48
-#define IRQ_HPI 49
-#define IRQ_SKE 50
-#define IRQ_KP 51
-#define IRQ_MEMST 54
-#define IRQ_SGA_IT 58
-#define IRQ_USBM 60
-#define IRQ_MSP1 62
+#define IRQ_WATCHDOG 1
+#define IRQ_SOFTINT 2
+#define IRQ_CRYPTO 3
+#define IRQ_OWM 4
+#define IRQ_MTU0 5
+#define IRQ_MTU1 6
+#define IRQ_GPIO0 7
+#define IRQ_GPIO1 8
+#define IRQ_GPIO2 9
+#define IRQ_GPIO3 10
+#define IRQ_RTC_RTT 11
+#define IRQ_SSP 12
+#define IRQ_UART0 13
+#define IRQ_DMA1 14
+#define IRQ_CLCD_MDIF 15
+#define IRQ_DMA0 16
+#define IRQ_PWRFAIL 17
+#define IRQ_UART1 18
+#define IRQ_FIRDA 19
+#define IRQ_MSP0 20
+#define IRQ_I2C0 21
+#define IRQ_I2C1 22
+#define IRQ_SDMMC 23
+#define IRQ_USBOTG 24
+#define IRQ_SVA_IT0 25
+#define IRQ_SVA_IT1 26
+#define IRQ_SAA_IT0 27
+#define IRQ_SAA_IT1 28
+#define IRQ_UART2 29
+#define IRQ_MSP2 30
+#define IRQ_L2CC 49
+#define IRQ_HPI 50
+#define IRQ_SKE 51
+#define IRQ_KP 52
+#define IRQ_MEMST 55
+#define IRQ_SGA_IT 59
+#define IRQ_USBM 61
+#define IRQ_MSP1 63
-#define NOMADIK_SOC_NR_IRQS 64
+#define NOMADIK_GPIO_OFFSET (IRQ_VIC_START+64)
/* After chip-specific IRQ numbers we have the GPIO ones */
#define NOMADIK_NR_GPIO 128 /* last 4 not wired to pins */
-#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + NOMADIK_SOC_NR_IRQS)
-#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - NOMADIK_SOC_NR_IRQS)
+#define NOMADIK_GPIO_TO_IRQ(gpio) ((gpio) + NOMADIK_GPIO_OFFSET)
+#define NOMADIK_IRQ_TO_GPIO(irq) ((irq) - NOMADIK_GPIO_OFFSET)
#define NR_IRQS NOMADIK_GPIO_TO_IRQ(NOMADIK_NR_GPIO)
/* Following two are used by entry_macro.S, to access our dual-vic */
@@ -79,4 +79,3 @@
#define VIC_REG_IRQSR1 0x20
#endif /* __ASM_ARCH_IRQS_H */
-
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index f2f8a5847018..c53469802c03 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -37,12 +37,12 @@
#include <plat/board-ams-delta.h>
#include <plat/keypad.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/board.h>
#include <mach/hardware.h>
#include <mach/ams-delta-fiq.h>
#include <mach/camera.h>
+#include <mach/usb.h>
#include "iomap.h"
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index e75e2d55a2d7..6ec385e2b98e 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -23,8 +23,10 @@
#include <asm/mach/map.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/board.h>
+
+#include <mach/usb.h>
+
#include "common.h"
/* assume no Mini-AB port */
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index a28e989a63f4..44a4ab195fbc 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -40,11 +40,11 @@
#include <plat/dma.h>
#include <plat/tc.h>
#include <plat/irda.h>
-#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/flash.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
#include "board-h2.h"
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 108a8640fc6f..86cb5a04a404 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -40,13 +40,13 @@
#include <plat/mux.h>
#include <plat/tc.h>
-#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/dma.h>
#include <plat/flash.h>
#include <mach/hardware.h>
#include <mach/irqs.h>
+#include <mach/usb.h>
#include "common.h"
#include "board-h3.h"
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 118a9d4a4c54..b3f6e943e661 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -44,10 +44,10 @@
#include <plat/omap7xx.h>
#include <plat/board.h>
#include <plat/keypad.h>
-#include <plat/usb.h>
#include <plat/mmc.h>
#include <mach/irqs.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 7970223a559d..f21c2966daad 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -35,11 +35,11 @@
#include <plat/flash.h>
#include <plat/fpga.h>
#include <plat/tc.h>
-#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/mmc.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "iomap.h"
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 7212ae97f44a..4007a372481b 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -26,7 +26,6 @@
#include <asm/mach/map.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/board.h>
#include <plat/keypad.h>
#include <plat/lcd_mipid.h>
@@ -34,6 +33,7 @@
#include <plat/clock.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index da8d872d3d1c..8784705edb60 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -45,11 +45,11 @@
#include <asm/mach/map.h>
#include <plat/flash.h>
-#include <plat/usb.h>
#include <plat/mux.h>
#include <plat/tc.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 949b62a73693..26bcb9defcdc 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -35,7 +35,6 @@
#include <plat/flash.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/tc.h>
#include <plat/dma.h>
#include <plat/board.h>
@@ -43,6 +42,7 @@
#include <plat/keypad.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 7f1e1cf2bf46..4d099446dfa8 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -35,7 +35,6 @@
#include <plat/led.h>
#include <plat/flash.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/dma.h>
#include <plat/tc.h>
#include <plat/board.h>
@@ -43,6 +42,7 @@
#include <plat/keypad.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 3c71c6bace2c..cc71a26723ef 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -37,7 +37,6 @@
#include <plat/flash.h>
#include <plat/mux.h>
-#include <plat/usb.h>
#include <plat/dma.h>
#include <plat/tc.h>
#include <plat/board.h>
@@ -45,6 +44,7 @@
#include <plat/keypad.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 3b7b82b13684..8c665bd16ac2 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -37,13 +37,13 @@
#include <plat/mux.h>
#include <plat/dma.h>
#include <plat/irda.h>
-#include <plat/usb.h>
#include <plat/tc.h>
#include <plat/board.h>
#include <plat/keypad.h>
#include <plat/board-sx1.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index afd67f0ec495..3497769eb353 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -35,9 +35,10 @@
#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/tc.h>
-#include <plat/usb.h>
+#include <plat/board.h>
#include <mach/hardware.h>
+#include <mach/usb.h>
#include "common.h"
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index c6ce93f71d08..c007d80dfb62 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -25,10 +25,11 @@
#include <plat/clock.h>
#include <plat/cpu.h>
#include <plat/clkdev_omap.h>
+#include <plat/board.h>
#include <plat/sram.h> /* for omap_sram_reprogram_clock() */
-#include <plat/usb.h> /* for OTG_BASE */
#include <mach/hardware.h>
+#include <mach/usb.h> /* for OTG_BASE */
#include "iomap.h"
#include "clock.h"
diff --git a/arch/arm/mach-omap1/include/mach/usb.h b/arch/arm/mach-omap1/include/mach/usb.h
new file mode 100644
index 000000000000..753cd5ce6949
--- /dev/null
+++ b/arch/arm/mach-omap1/include/mach/usb.h
@@ -0,0 +1,165 @@
+/*
+ * FIXME correct answer depends on hmc_mode,
+ * as does (on omap1) any nonzero value for config->otg port number
+ */
+#ifdef CONFIG_USB_GADGET_OMAP
+#define is_usb0_device(config) 1
+#else
+#define is_usb0_device(config) 0
+#endif
+
+struct omap_usb_config {
+ /* Configure drivers according to the connectors on your board:
+ * - "A" connector (rectagular)
+ * ... for host/OHCI use, set "register_host".
+ * - "B" connector (squarish) or "Mini-B"
+ * ... for device/gadget use, set "register_dev".
+ * - "Mini-AB" connector (very similar to Mini-B)
+ * ... for OTG use as device OR host, initialize "otg"
+ */
+ unsigned register_host:1;
+ unsigned register_dev:1;
+ u8 otg; /* port number, 1-based: usb1 == 2 */
+
+ u8 hmc_mode;
+
+ /* implicitly true if otg: host supports remote wakeup? */
+ u8 rwc;
+
+ /* signaling pins used to talk to transceiver on usbN:
+ * 0 == usbN unused
+ * 2 == usb0-only, using internal transceiver
+ * 3 == 3 wire bidirectional
+ * 4 == 4 wire bidirectional
+ * 6 == 6 wire unidirectional (or TLL)
+ */
+ u8 pins[3];
+
+ struct platform_device *udc_device;
+ struct platform_device *ohci_device;
+ struct platform_device *otg_device;
+
+ u32 (*usb0_init)(unsigned nwires, unsigned is_device);
+ u32 (*usb1_init)(unsigned nwires);
+ u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
+
+ int (*ocpi_enable)(void);
+};
+
+void omap_otg_init(struct omap_usb_config *config);
+
+#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
+void omap1_usb_init(struct omap_usb_config *pdata);
+#else
+static inline void omap1_usb_init(struct omap_usb_config *pdata)
+{
+}
+#endif
+
+#define OMAP1_OTG_BASE 0xfffb0400
+#define OMAP1_UDC_BASE 0xfffb4000
+#define OMAP1_OHCI_BASE 0xfffba000
+
+#define OMAP2_OHCI_BASE 0x4805e000
+#define OMAP2_UDC_BASE 0x4805e200
+#define OMAP2_OTG_BASE 0x4805e300
+#define OTG_BASE OMAP1_OTG_BASE
+#define UDC_BASE OMAP1_UDC_BASE
+#define OMAP_OHCI_BASE OMAP1_OHCI_BASE
+
+/*
+ * OTG and transceiver registers, for OMAPs starting with ARM926
+ */
+#define OTG_REV (OTG_BASE + 0x00)
+#define OTG_SYSCON_1 (OTG_BASE + 0x04)
+# define USB2_TRX_MODE(w) (((w)>>24)&0x07)
+# define USB1_TRX_MODE(w) (((w)>>20)&0x07)
+# define USB0_TRX_MODE(w) (((w)>>16)&0x07)
+# define OTG_IDLE_EN (1 << 15)
+# define HST_IDLE_EN (1 << 14)
+# define DEV_IDLE_EN (1 << 13)
+# define OTG_RESET_DONE (1 << 2)
+# define OTG_SOFT_RESET (1 << 1)
+#define OTG_SYSCON_2 (OTG_BASE + 0x08)
+# define OTG_EN (1 << 31)
+# define USBX_SYNCHRO (1 << 30)
+# define OTG_MST16 (1 << 29)
+# define SRP_GPDATA (1 << 28)
+# define SRP_GPDVBUS (1 << 27)
+# define SRP_GPUVBUS(w) (((w)>>24)&0x07)
+# define A_WAIT_VRISE(w) (((w)>>20)&0x07)
+# define B_ASE_BRST(w) (((w)>>16)&0x07)
+# define SRP_DPW (1 << 14)
+# define SRP_DATA (1 << 13)
+# define SRP_VBUS (1 << 12)
+# define OTG_PADEN (1 << 10)
+# define HMC_PADEN (1 << 9)
+# define UHOST_EN (1 << 8)
+# define HMC_TLLSPEED (1 << 7)
+# define HMC_TLLATTACH (1 << 6)
+# define OTG_HMC(w) (((w)>>0)&0x3f)
+#define OTG_CTRL (OTG_BASE + 0x0c)
+# define OTG_USB2_EN (1 << 29)
+# define OTG_USB2_DP (1 << 28)
+# define OTG_USB2_DM (1 << 27)
+# define OTG_USB1_EN (1 << 26)
+# define OTG_USB1_DP (1 << 25)
+# define OTG_USB1_DM (1 << 24)
+# define OTG_USB0_EN (1 << 23)
+# define OTG_USB0_DP (1 << 22)
+# define OTG_USB0_DM (1 << 21)
+# define OTG_ASESSVLD (1 << 20)
+# define OTG_BSESSEND (1 << 19)
+# define OTG_BSESSVLD (1 << 18)
+# define OTG_VBUSVLD (1 << 17)
+# define OTG_ID (1 << 16)
+# define OTG_DRIVER_SEL (1 << 15)
+# define OTG_A_SETB_HNPEN (1 << 12)
+# define OTG_A_BUSREQ (1 << 11)
+# define OTG_B_HNPEN (1 << 9)
+# define OTG_B_BUSREQ (1 << 8)
+# define OTG_BUSDROP (1 << 7)
+# define OTG_PULLDOWN (1 << 5)
+# define OTG_PULLUP (1 << 4)
+# define OTG_DRV_VBUS (1 << 3)
+# define OTG_PD_VBUS (1 << 2)
+# define OTG_PU_VBUS (1 << 1)
+# define OTG_PU_ID (1 << 0)
+#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */
+# define DRIVER_SWITCH (1 << 15)
+# define A_VBUS_ERR (1 << 13)
+# define A_REQ_TMROUT (1 << 12)
+# define A_SRP_DETECT (1 << 11)
+# define B_HNP_FAIL (1 << 10)
+# define B_SRP_TMROUT (1 << 9)
+# define B_SRP_DONE (1 << 8)
+# define B_SRP_STARTED (1 << 7)
+# define OPRT_CHG (1 << 0)
+#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */
+ // same bits as in IRQ_EN
+#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */
+# define OTGVPD (1 << 14)
+# define OTGVPU (1 << 13)
+# define OTGPUID (1 << 12)
+# define USB2VDR (1 << 10)
+# define USB2PDEN (1 << 9)
+# define USB2PUEN (1 << 8)
+# define USB1VDR (1 << 6)
+# define USB1PDEN (1 << 5)
+# define USB1PUEN (1 << 4)
+# define USB0VDR (1 << 2)
+# define USB0PDEN (1 << 1)
+# define USB0PUEN (1 << 0)
+#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */
+#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */
+
+/*-------------------------------------------------------------------------*/
+
+/* OMAP1 */
+#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064)
+# define CONF_USB2_UNI_R (1 << 8)
+# define CONF_USB1_UNI_R (1 << 7)
+# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7)
+# define CONF_USB0_ISOLATE_R (1 << 3)
+# define CONF_USB_PWRDN_DM_R (1 << 2)
+# define CONF_USB_PWRDN_DP_R (1 << 1)
diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c
index 64c65bcb2d67..aa81593db1af 100644
--- a/arch/arm/mach-omap1/timer.c
+++ b/arch/arm/mach-omap1/timer.c
@@ -140,7 +140,8 @@ static int __init omap1_dm_timer_init(void)
}
pdata->set_timer_src = omap1_dm_timer_set_src;
- pdata->needs_manual_reset = 1;
+ pdata->timer_capability = OMAP_TIMER_ALWON |
+ OMAP_TIMER_NEEDS_RESET;
ret = platform_device_add_data(pdev, pdata, sizeof(*pdata));
if (ret) {
diff --git a/arch/arm/mach-omap1/usb.c b/arch/arm/mach-omap1/usb.c
index e61afd922766..65f88176fba8 100644
--- a/arch/arm/mach-omap1/usb.c
+++ b/arch/arm/mach-omap1/usb.c
@@ -27,7 +27,8 @@
#include <asm/irq.h>
#include <plat/mux.h>
-#include <plat/usb.h>
+
+#include <mach/usb.h>
#include "common.h"
@@ -55,6 +56,119 @@
#define INT_USB_IRQ_HGEN INT_USB_HHC_1
#define INT_USB_IRQ_OTG IH2_BASE + 8
+#ifdef CONFIG_ARCH_OMAP_OTG
+
+void __init
+omap_otg_init(struct omap_usb_config *config)
+{
+ u32 syscon;
+ int alt_pingroup = 0;
+
+ /* NOTE: no bus or clock setup (yet?) */
+
+ syscon = omap_readl(OTG_SYSCON_1) & 0xffff;
+ if (!(syscon & OTG_RESET_DONE))
+ pr_debug("USB resets not complete?\n");
+
+ //omap_writew(0, OTG_IRQ_EN);
+
+ /* pin muxing and transceiver pinouts */
+ if (config->pins[0] > 2) /* alt pingroup 2 */
+ alt_pingroup = 1;
+ syscon |= config->usb0_init(config->pins[0], is_usb0_device(config));
+ syscon |= config->usb1_init(config->pins[1]);
+ syscon |= config->usb2_init(config->pins[2], alt_pingroup);
+ pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
+ omap_writel(syscon, OTG_SYSCON_1);
+
+ syscon = config->hmc_mode;
+ syscon |= USBX_SYNCHRO | (4 << 16) /* B_ASE0_BRST */;
+#ifdef CONFIG_USB_OTG
+ if (config->otg)
+ syscon |= OTG_EN;
+#endif
+ if (cpu_class_is_omap1())
+ pr_debug("USB_TRANSCEIVER_CTRL = %03x\n",
+ omap_readl(USB_TRANSCEIVER_CTRL));
+ pr_debug("OTG_SYSCON_2 = %08x\n", omap_readl(OTG_SYSCON_2));
+ omap_writel(syscon, OTG_SYSCON_2);
+
+ printk("USB: hmc %d", config->hmc_mode);
+ if (!alt_pingroup)
+ printk(", usb2 alt %d wires", config->pins[2]);
+ else if (config->pins[0])
+ printk(", usb0 %d wires%s", config->pins[0],
+ is_usb0_device(config) ? " (dev)" : "");
+ if (config->pins[1])
+ printk(", usb1 %d wires", config->pins[1]);
+ if (!alt_pingroup && config->pins[2])
+ printk(", usb2 %d wires", config->pins[2]);
+ if (config->otg)
+ printk(", Mini-AB on usb%d", config->otg - 1);
+ printk("\n");
+
+ if (cpu_class_is_omap1()) {
+ u16 w;
+
+ /* leave USB clocks/controllers off until needed */
+ w = omap_readw(ULPD_SOFT_REQ);
+ w &= ~SOFT_USB_CLK_REQ;
+ omap_writew(w, ULPD_SOFT_REQ);
+
+ w = omap_readw(ULPD_CLOCK_CTRL);
+ w &= ~USB_MCLK_EN;
+ w |= DIS_USB_PVCI_CLK;
+ omap_writew(w, ULPD_CLOCK_CTRL);
+ }
+ syscon = omap_readl(OTG_SYSCON_1);
+ syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN;
+
+#ifdef CONFIG_USB_GADGET_OMAP
+ if (config->otg || config->register_dev) {
+ struct platform_device *udc_device = config->udc_device;
+ int status;
+
+ syscon &= ~DEV_IDLE_EN;
+ udc_device->dev.platform_data = config;
+ status = platform_device_register(udc_device);
+ if (status)
+ pr_debug("can't register UDC device, %d\n", status);
+ }
+#endif
+
+#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
+ if (config->otg || config->register_host) {
+ struct platform_device *ohci_device = config->ohci_device;
+ int status;
+
+ syscon &= ~HST_IDLE_EN;
+ ohci_device->dev.platform_data = config;
+ status = platform_device_register(ohci_device);
+ if (status)
+ pr_debug("can't register OHCI device, %d\n", status);
+ }
+#endif
+
+#ifdef CONFIG_USB_OTG
+ if (config->otg) {
+ struct platform_device *otg_device = config->otg_device;
+ int status;
+
+ syscon &= ~OTG_IDLE_EN;
+ otg_device->dev.platform_data = config;
+ status = platform_device_register(otg_device);
+ if (status)
+ pr_debug("can't register OTG device, %d\n", status);
+ }
+#endif
+ pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
+ omap_writel(syscon, OTG_SYSCON_1);
+}
+
+#else
+void omap_otg_init(struct omap_usb_config *config) {}
+#endif
+
#ifdef CONFIG_USB_GADGET_OMAP
static struct resource udc_resources[] = {
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 4cf5142f22cc..dd0fbf76ac79 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -9,7 +9,7 @@ config ARCH_OMAP2PLUS_TYPICAL
select REGULATOR
select PM_RUNTIME
select VFP
- select NEON if ARCH_OMAP3 || ARCH_OMAP4
+ select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5
select SERIAL_OMAP
select SERIAL_OMAP_CONSOLE
select I2C
@@ -21,12 +21,16 @@ config ARCH_OMAP2PLUS_TYPICAL
help
Compile a kernel suitable for booting most boards
+config SOC_HAS_OMAP2_SDRC
+ bool "OMAP2 SDRAM Controller support"
+
config ARCH_OMAP2
bool "TI OMAP2"
depends on ARCH_OMAP2PLUS
default y
select CPU_V6
select MULTI_IRQ_HANDLER
+ select SOC_HAS_OMAP2_SDRC
config ARCH_OMAP3
bool "TI OMAP3"
@@ -35,9 +39,11 @@ config ARCH_OMAP3
select CPU_V7
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select ARCH_HAS_OPP
+ select PM_RUNTIME if CPU_IDLE
select PM_OPP if PM
select ARM_CPU_SUSPEND if PM
select MULTI_IRQ_HANDLER
+ select SOC_HAS_OMAP2_SDRC
config ARCH_OMAP4
bool "TI OMAP4"
@@ -52,10 +58,17 @@ config ARCH_OMAP4
select PL310_ERRATA_727915
select ARM_ERRATA_720789
select ARCH_HAS_OPP
+ select PM_RUNTIME if CPU_IDLE
select PM_OPP if PM
select USB_ARCH_HAS_EHCI if USB_SUPPORT
select ARM_CPU_SUSPEND if PM
+config SOC_OMAP5
+ bool "TI OMAP5"
+ select CPU_V7
+ select ARM_GIC
+ select HAVE_SMP
+
comment "OMAP Core Type"
depends on ARCH_OMAP2
@@ -64,19 +77,19 @@ config SOC_OMAP2420
depends on ARCH_OMAP2
default y
select OMAP_DM_TIMER
- select ARCH_OMAP_OTG
+ select SOC_HAS_OMAP2_SDRC
config SOC_OMAP2430
bool "OMAP2430 support"
depends on ARCH_OMAP2
default y
- select ARCH_OMAP_OTG
+ select SOC_HAS_OMAP2_SDRC
config SOC_OMAP3430
bool "OMAP3430 support"
depends on ARCH_OMAP3
default y
- select ARCH_OMAP_OTG
+ select SOC_HAS_OMAP2_SDRC
config SOC_TI81XX
bool "TI81XX support"
@@ -85,8 +98,10 @@ config SOC_TI81XX
config SOC_AM33XX
bool "AM33XX support"
- depends on ARCH_OMAP3
default y
+ select CPU_V7
+ select ARM_CPU_SUSPEND if PM
+ select MULTI_IRQ_HANDLER
config OMAP_PACKAGE_ZAF
bool
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index fa742f3c2629..f6a24b3f9c4f 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -6,7 +6,7 @@
obj-y := id.o io.o control.o mux.o devices.o serial.o gpmc.o timer.o pm.o \
common.o gpio.o dma.o wd_timer.o display.o i2c.o hdq1w.o
-omap-2-3-common = irq.o sdrc.o
+omap-2-3-common = irq.o
hwmod-common = omap_hwmod.o \
omap_hwmod_common_data.o
clock-common = clock.o clock_common_data.o \
@@ -16,19 +16,24 @@ secure-common = omap-smc.o omap-secure.o
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
+obj-$(CONFIG_SOC_AM33XX) += irq.o $(hwmod-common)
+obj-$(CONFIG_SOC_OMAP5) += prm44xx.o $(hwmod-common) $(secure-common)
ifneq ($(CONFIG_SND_OMAP_SOC_MCBSP),)
obj-y += mcbsp.o
endif
obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
+obj-$(CONFIG_SOC_HAS_OMAP2_SDRC) += sdrc.o
# SMP support ONLY available for OMAP4
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
-obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o
-obj-$(CONFIG_ARCH_OMAP4) += sleep44xx.o
+omap-4-5-common = omap4-common.o omap-wakeupgen.o \
+ sleep44xx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common)
+obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common)
plus_sec := $(call as-instr,.arch_extension sec,+sec)
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -66,12 +71,12 @@ ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o
-obj-$(CONFIG_ARCH_OMAP3) += cpuidle34xx.o
obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o
-obj-$(CONFIG_ARCH_OMAP4) += cpuidle44xx.o
+obj-$(CONFIG_SOC_OMAP5) += omap-mpuss-lowpower.o
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
-obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
-obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
+
+obj-$(CONFIG_POWER_AVS_OMAP) += sr_device.o
+obj-$(CONFIG_POWER_AVS_OMAP_CLASS3) += smartreflex-class3.o
AFLAGS_sleep24xx.o :=-Wa,-march=armv6
AFLAGS_sleep34xx.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -82,14 +87,22 @@ endif
endif
+ifeq ($(CONFIG_CPU_IDLE),y)
+obj-$(CONFIG_ARCH_OMAP3) += cpuidle34xx.o
+obj-$(CONFIG_ARCH_OMAP4) += cpuidle44xx.o
+endif
+
# PRCM
+omap-prcm-4-5-common = prcm.o cminst44xx.o cm44xx.o \
+ prcm_mpu44xx.o prminst44xx.o \
+ vc44xx_data.o vp44xx_data.o
obj-y += prm_common.o
obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
-obj-$(CONFIG_ARCH_OMAP4) += prcm.o cminst44xx.o cm44xx.o
-obj-$(CONFIG_ARCH_OMAP4) += prcm_mpu44xx.o prminst44xx.o
-obj-$(CONFIG_ARCH_OMAP4) += vc44xx_data.o vp44xx_data.o prm44xx.o
+obj-$(CONFIG_SOC_AM33XX) += prcm.o prm33xx.o cm33xx.o
+obj-$(CONFIG_ARCH_OMAP4) += $(omap-prcm-4-5-common) prm44xx.o
+obj-$(CONFIG_SOC_OMAP5) += $(omap-prcm-4-5-common)
# OMAP voltage domains
voltagedomain-common := voltage.o vc.o vp.o
@@ -99,6 +112,9 @@ obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common)
obj-$(CONFIG_ARCH_OMAP3) += voltagedomains3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common)
obj-$(CONFIG_ARCH_OMAP4) += voltagedomains44xx_data.o
+obj-$(CONFIG_SOC_AM33XX) += $(voltagedomain-common)
+obj-$(CONFIG_SOC_AM33XX) += voltagedomains33xx_data.o
+obj-$(CONFIG_SOC_OMAP5) += $(voltagedomain-common)
# OMAP powerdomain framework
powerdomain-common += powerdomain.o powerdomain-common.o
@@ -113,10 +129,14 @@ obj-$(CONFIG_ARCH_OMAP3) += powerdomains2xxx_3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common)
obj-$(CONFIG_ARCH_OMAP4) += powerdomain44xx.o
obj-$(CONFIG_ARCH_OMAP4) += powerdomains44xx_data.o
+obj-$(CONFIG_SOC_AM33XX) += $(powerdomain-common)
+obj-$(CONFIG_SOC_AM33XX) += powerdomain33xx.o
+obj-$(CONFIG_SOC_AM33XX) += powerdomains33xx_data.o
+obj-$(CONFIG_SOC_OMAP5) += $(powerdomain-common)
+obj-$(CONFIG_SOC_OMAP5) += powerdomain44xx.o
# PRCM clockdomain control
clockdomain-common += clockdomain.o
-clockdomain-common += clockdomains_common_data.o
obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common)
obj-$(CONFIG_ARCH_OMAP2) += clockdomain2xxx_3xxx.o
obj-$(CONFIG_ARCH_OMAP2) += clockdomains2xxx_3xxx_data.o
@@ -129,6 +149,11 @@ obj-$(CONFIG_ARCH_OMAP3) += clockdomains3xxx_data.o
obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common)
obj-$(CONFIG_ARCH_OMAP4) += clockdomain44xx.o
obj-$(CONFIG_ARCH_OMAP4) += clockdomains44xx_data.o
+obj-$(CONFIG_SOC_AM33XX) += $(clockdomain-common)
+obj-$(CONFIG_SOC_AM33XX) += clockdomain33xx.o
+obj-$(CONFIG_SOC_AM33XX) += clockdomains33xx_data.o
+obj-$(CONFIG_SOC_OMAP5) += $(clockdomain-common)
+obj-$(CONFIG_SOC_OMAP5) += clockdomain44xx.o
# Clock framework
obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o
@@ -146,6 +171,10 @@ obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o clock3xxx_data.o
obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o
obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
+obj-$(CONFIG_SOC_AM33XX) += $(clock-common) dpll3xxx.o
+obj-$(CONFIG_SOC_AM33XX) += clock33xx_data.o
+obj-$(CONFIG_SOC_OMAP5) += $(clock-common)
+obj-$(CONFIG_SOC_OMAP5) += dpll3xxx.o dpll44xx.o
# OMAP2 clock rate set data (old "OPP" data)
obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
@@ -173,6 +202,7 @@ obj-$(CONFIG_OMAP3_EMU) += emu.o
# L3 interconnect
obj-$(CONFIG_ARCH_OMAP3) += omap_l3_smx.o
obj-$(CONFIG_ARCH_OMAP4) += omap_l3_noc.o
+obj-$(CONFIG_SOC_OMAP5) += omap_l3_noc.o
obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
mailbox_mach-objs := mailbox.o
@@ -189,6 +219,10 @@ endif
# OMAP2420 MSDI controller integration support ("MMC")
obj-$(CONFIG_SOC_OMAP2420) += msdi.o
+ifneq ($(CONFIG_DRM_OMAP),)
+obj-y += drm.o
+endif
+
# Specific board support
obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
@@ -244,9 +278,6 @@ obj-y += $(omap-flash-y) $(omap-flash-m)
omap-hsmmc-$(CONFIG_MMC_OMAP_HS) := hsmmc.o
obj-y += $(omap-hsmmc-m) $(omap-hsmmc-y)
-
-usbfs-$(CONFIG_ARCH_OMAP_OTG) := usb-fs.o
-obj-y += $(usbfs-m) $(usbfs-y)
obj-y += usb-musb.o
obj-y += omap_phy_internal.o
diff --git a/arch/arm/mach-omap2/am35xx-emac.c b/arch/arm/mach-omap2/am35xx-emac.c
index 447682c4e11c..2c90ac686686 100644
--- a/arch/arm/mach-omap2/am35xx-emac.c
+++ b/arch/arm/mach-omap2/am35xx-emac.c
@@ -15,27 +15,13 @@
* General Public License for more details.
*/
-#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/davinci_emac.h>
-#include <linux/platform_device.h>
-#include <plat/irqs.h>
+#include <asm/system.h>
+#include <plat/omap_device.h>
#include <mach/am35xx.h>
-
#include "control.h"
-
-static struct mdio_platform_data am35xx_emac_mdio_pdata;
-
-static struct resource am35xx_emac_mdio_resources[] = {
- DEFINE_RES_MEM(AM35XX_IPSS_EMAC_BASE + AM35XX_EMAC_MDIO_OFFSET, SZ_4K),
-};
-
-static struct platform_device am35xx_emac_mdio_device = {
- .name = "davinci_mdio",
- .id = 0,
- .num_resources = ARRAY_SIZE(am35xx_emac_mdio_resources),
- .resource = am35xx_emac_mdio_resources,
- .dev.platform_data = &am35xx_emac_mdio_pdata,
-};
+#include "am35xx-emac.h"
static void am35xx_enable_emac_int(void)
{
@@ -69,41 +55,57 @@ static struct emac_platform_data am35xx_emac_pdata = {
.interrupt_disable = am35xx_disable_emac_int,
};
-static struct resource am35xx_emac_resources[] = {
- DEFINE_RES_MEM(AM35XX_IPSS_EMAC_BASE, 0x30000),
- DEFINE_RES_IRQ(INT_35XX_EMAC_C0_RXTHRESH_IRQ),
- DEFINE_RES_IRQ(INT_35XX_EMAC_C0_RX_PULSE_IRQ),
- DEFINE_RES_IRQ(INT_35XX_EMAC_C0_TX_PULSE_IRQ),
- DEFINE_RES_IRQ(INT_35XX_EMAC_C0_MISC_PULSE_IRQ),
-};
+static struct mdio_platform_data am35xx_mdio_pdata;
-static struct platform_device am35xx_emac_device = {
- .name = "davinci_emac",
- .id = -1,
- .num_resources = ARRAY_SIZE(am35xx_emac_resources),
- .resource = am35xx_emac_resources,
- .dev = {
- .platform_data = &am35xx_emac_pdata,
- },
-};
+static int __init omap_davinci_emac_dev_init(struct omap_hwmod *oh,
+ void *pdata, int pdata_len)
+{
+ struct platform_device *pdev;
+
+ pdev = omap_device_build(oh->class->name, 0, oh, pdata, pdata_len,
+ NULL, 0, false);
+ if (IS_ERR(pdev)) {
+ WARN(1, "Can't build omap_device for %s:%s.\n",
+ oh->class->name, oh->name);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
void __init am35xx_emac_init(unsigned long mdio_bus_freq, u8 rmii_en)
{
+ struct omap_hwmod *oh;
u32 v;
- int err;
+ int ret;
- am35xx_emac_pdata.rmii_en = rmii_en;
- am35xx_emac_mdio_pdata.bus_freq = mdio_bus_freq;
- err = platform_device_register(&am35xx_emac_device);
- if (err) {
- pr_err("AM35x: failed registering EMAC device: %d\n", err);
+ oh = omap_hwmod_lookup("davinci_mdio");
+ if (!oh) {
+ pr_err("Could not find davinci_mdio hwmod\n");
+ return;
+ }
+
+ am35xx_mdio_pdata.bus_freq = mdio_bus_freq;
+
+ ret = omap_davinci_emac_dev_init(oh, &am35xx_mdio_pdata,
+ sizeof(am35xx_mdio_pdata));
+ if (ret) {
+ pr_err("Could not build davinci_mdio hwmod device\n");
return;
}
- err = platform_device_register(&am35xx_emac_mdio_device);
- if (err) {
- pr_err("AM35x: failed registering EMAC MDIO device: %d\n", err);
- platform_device_unregister(&am35xx_emac_device);
+ oh = omap_hwmod_lookup("davinci_emac");
+ if (!oh) {
+ pr_err("Could not find davinci_emac hwmod\n");
+ return;
+ }
+
+ am35xx_emac_pdata.rmii_en = rmii_en;
+
+ ret = omap_davinci_emac_dev_init(oh, &am35xx_emac_pdata,
+ sizeof(am35xx_emac_pdata));
+ if (ret) {
+ pr_err("Could not build davinci_emac hwmod device\n");
return;
}
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 99ca6bad5c30..9511584fdc4f 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -218,9 +218,6 @@ static struct twl4030_gpio_platform_data sdp2430_gpio_data = {
};
static struct twl4030_platform_data sdp2430_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.gpio = &sdp2430_gpio_data,
.vmmc1 = &sdp2430_vmmc1,
@@ -254,16 +251,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
{} /* Terminator */
};
-static struct omap_usb_config sdp2430_usb_config __initdata = {
- .otg = 1,
-#ifdef CONFIG_USB_GADGET_OMAP
- .hmc_mode = 0x0,
-#elif defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
- .hmc_mode = 0x1,
-#endif
- .pins[0] = 3,
-};
-
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
@@ -280,7 +267,6 @@ static void __init omap_2430sdp_init(void)
omap_serial_init();
omap_sdrc_init(NULL, NULL);
omap_hsmmc_init(mmc);
- omap2_usbfs_init(&sdp2430_usb_config);
omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP);
usb_musb_init(NULL);
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 8e17284a803f..ad8a7d94afcd 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -821,6 +821,9 @@ static void __init omap_4430sdp_display_init(void)
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
OMAP4_MUX(USBB2_ULPITLL_CLK, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
+ /* NIRQ2 for twl6040 */
+ OMAP4_MUX(SYS_NIRQ2, OMAP_MUX_MODE0 |
+ OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 502c31e123be..e5fa46bfde2f 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -35,7 +35,6 @@
#include <asm/mach/flash.h>
#include <plat/led.h>
-#include <plat/usb.h>
#include <plat/board.h>
#include "common.h"
#include <plat/gpmc.h>
@@ -253,13 +252,6 @@ out:
clk_put(gpmc_fck);
}
-static struct omap_usb_config apollon_usb_config __initdata = {
- .register_dev = 1,
- .hmc_mode = 0x14, /* 0:dev 1:host1 2:disable */
-
- .pins[0] = 6,
-};
-
static struct panel_generic_dpi_data apollon_panel_data = {
.name = "apollon",
};
@@ -297,15 +289,6 @@ static void __init apollon_led_init(void)
gpio_request_array(apollon_gpio_leds, ARRAY_SIZE(apollon_gpio_leds));
}
-static void __init apollon_usb_init(void)
-{
- /* USB device */
- /* DEVICE_SUSPEND */
- omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0);
- gpio_request_one(12, GPIOF_OUT_INIT_LOW, "USB suspend");
- omap2_usbfs_init(&apollon_usb_config);
-}
-
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux board_mux[] __initdata = {
{ .reg_offset = OMAP_MUX_TERMINATOR },
@@ -321,7 +304,6 @@ static void __init omap_apollon_init(void)
apollon_init_smc91x();
apollon_led_init();
apollon_flash_init();
- apollon_usb_init();
/* REVISIT: where's the correct place */
omap_mux_init_signal("sys_nirq", OMAP_PULL_ENA | OMAP_PULL_UP);
@@ -329,7 +311,7 @@ static void __init omap_apollon_init(void)
/* LCD PWR_EN */
omap_mux_init_signal("mcbsp2_dr.gpio_11", OMAP_PULL_ENA | OMAP_PULL_UP);
- /* Use Interal loop-back in MMC/SDIO Module Input Clock selection */
+ /* Use Internal loop-back in MMC/SDIO Module Input Clock selection */
v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
v |= (1 << 24);
omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index ded100c80a91..97d719047af3 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -490,6 +490,71 @@ static struct twl4030_platform_data cm_t35_twldata = {
.power = &cm_t35_power_data,
};
+#if defined(CONFIG_VIDEO_OMAP3) || defined(CONFIG_VIDEO_OMAP3_MODULE)
+#include <media/omap3isp.h>
+#include "devices.h"
+
+static struct i2c_board_info cm_t35_isp_i2c_boardinfo[] = {
+ {
+ I2C_BOARD_INFO("mt9t001", 0x5d),
+ },
+ {
+ I2C_BOARD_INFO("tvp5150", 0x5c),
+ },
+};
+
+static struct isp_subdev_i2c_board_info cm_t35_isp_primary_subdevs[] = {
+ {
+ .board_info = &cm_t35_isp_i2c_boardinfo[0],
+ .i2c_adapter_id = 3,
+ },
+ { NULL, 0, },
+};
+
+static struct isp_subdev_i2c_board_info cm_t35_isp_secondary_subdevs[] = {
+ {
+ .board_info = &cm_t35_isp_i2c_boardinfo[1],
+ .i2c_adapter_id = 3,
+ },
+ { NULL, 0, },
+};
+
+static struct isp_v4l2_subdevs_group cm_t35_isp_subdevs[] = {
+ {
+ .subdevs = cm_t35_isp_primary_subdevs,
+ .interface = ISP_INTERFACE_PARALLEL,
+ .bus = {
+ .parallel = {
+ .clk_pol = 1,
+ },
+ },
+ },
+ {
+ .subdevs = cm_t35_isp_secondary_subdevs,
+ .interface = ISP_INTERFACE_PARALLEL,
+ .bus = {
+ .parallel = {
+ .clk_pol = 0,
+ },
+ },
+ },
+ { NULL, 0, },
+};
+
+static struct isp_platform_data cm_t35_isp_pdata = {
+ .subdevs = cm_t35_isp_subdevs,
+};
+
+static void __init cm_t35_init_camera(void)
+{
+ if (omap3_init_camera(&cm_t35_isp_pdata) < 0)
+ pr_warn("CM-T3x: Failed registering camera device!\n");
+}
+
+#else
+static inline void cm_t35_init_camera(void) {}
+#endif /* CONFIG_VIDEO_OMAP3 */
+
static void __init cm_t35_init_i2c(void)
{
omap3_pmic_get_config(&cm_t35_twldata, TWL_COMMON_PDATA_USB,
@@ -497,6 +562,8 @@ static void __init cm_t35_init_i2c(void)
TWL_COMMON_PDATA_AUDIO);
omap3_pmic_init("tps65930", &cm_t35_twldata);
+
+ omap_register_i2c_bus(3, 400, NULL, 0);
}
#ifdef CONFIG_OMAP_MUX
@@ -574,6 +641,27 @@ static struct omap_board_mux board_mux[] __initdata = {
OMAP3_MUX(DSS_DATA16, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
OMAP3_MUX(DSS_DATA17, OMAP_MUX_MODE0 | OMAP_PIN_OUTPUT),
+ /* Camera */
+ OMAP3_MUX(CAM_HS, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_VS, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_XCLKA, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_PCLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_FLD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D4, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D5, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D6, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D7, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+ OMAP3_MUX(CAM_D8, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+ OMAP3_MUX(CAM_D9, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLDOWN),
+ OMAP3_MUX(CAM_STROBE, OMAP_MUX_MODE0 | OMAP_PIN_INPUT),
+
+ OMAP3_MUX(CAM_D10, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLDOWN),
+ OMAP3_MUX(CAM_D11, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLDOWN),
+
/* display controls */
OMAP3_MUX(MCBSP1_FSR, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
OMAP3_MUX(GPMC_NCS7, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
@@ -646,6 +734,7 @@ static void __init cm_t3x_common_init(void)
usb_musb_init(NULL);
cm_t35_init_usbh();
+ cm_t35_init_camera();
}
static void __init cm_t35_init(void)
diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c
index 70a81f900bb5..53c39d239d6e 100644
--- a/arch/arm/mach-omap2/board-flash.c
+++ b/arch/arm/mach-omap2/board-flash.c
@@ -97,11 +97,6 @@ __init board_onenand_init(struct mtd_partition *onenand_parts,
gpmc_onenand_init(&board_onenand_data);
}
-#else
-void
-__init board_onenand_init(struct mtd_partition *nor_parts, u8 nr_parts, u8 cs)
-{
-}
#endif /* CONFIG_MTD_ONENAND_OMAP2 || CONFIG_MTD_ONENAND_OMAP2_MODULE */
#if defined(CONFIG_MTD_NAND_OMAP2) || \
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 202934657867..6f93a20536ea 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -25,23 +25,12 @@
#include "common-board-devices.h"
#if !(defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3))
-#define omap_intc_of_init NULL
+#define intc_of_init NULL
#endif
#ifndef CONFIG_ARCH_OMAP4
#define gic_of_init NULL
#endif
-static struct of_device_id irq_match[] __initdata = {
- { .compatible = "ti,omap2-intc", .data = omap_intc_of_init, },
- { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
- { }
-};
-
-static void __init omap_init_irq(void)
-{
- of_irq_init(irq_match);
-}
-
static struct of_device_id omap_dt_match_table[] __initdata = {
{ .compatible = "simple-bus", },
{ .compatible = "ti,omap-infra", },
@@ -65,7 +54,7 @@ DT_MACHINE_START(OMAP242X_DT, "Generic OMAP2420 (Flattened Device Tree)")
.reserve = omap_reserve,
.map_io = omap242x_map_io,
.init_early = omap2420_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap_intc_of_init,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_generic_init,
.timer = &omap2_timer,
@@ -84,7 +73,7 @@ DT_MACHINE_START(OMAP243X_DT, "Generic OMAP2430 (Flattened Device Tree)")
.reserve = omap_reserve,
.map_io = omap243x_map_io,
.init_early = omap2430_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap_intc_of_init,
.handle_irq = omap2_intc_handle_irq,
.init_machine = omap_generic_init,
.timer = &omap2_timer,
@@ -103,7 +92,7 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap3430_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap_intc_of_init,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap_generic_init,
.timer = &omap3_timer,
@@ -112,6 +101,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
MACHINE_END
#endif
+#ifdef CONFIG_SOC_AM33XX
+static const char *am33xx_boards_compat[] __initdata = {
+ "ti,am33xx",
+ NULL,
+};
+
+DT_MACHINE_START(AM33XX_DT, "Generic AM33XX (Flattened Device Tree)")
+ .reserve = omap_reserve,
+ .map_io = am33xx_map_io,
+ .init_early = am33xx_init_early,
+ .init_irq = omap_intc_of_init,
+ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap3_am33xx_timer,
+ .dt_compat = am33xx_boards_compat,
+MACHINE_END
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
static const char *omap4_boards_compat[] __initdata = {
"ti,omap4",
@@ -122,7 +129,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.reserve = omap_reserve,
.map_io = omap4_map_io,
.init_early = omap4430_init_early,
- .init_irq = omap_init_irq,
+ .init_irq = omap_gic_of_init,
.handle_irq = gic_handle_irq,
.init_machine = omap_generic_init,
.init_late = omap4430_init_late,
@@ -131,3 +138,22 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
.restart = omap_prcm_restart,
MACHINE_END
#endif
+
+#ifdef CONFIG_SOC_OMAP5
+static const char *omap5_boards_compat[] __initdata = {
+ "ti,omap5",
+ NULL,
+};
+
+DT_MACHINE_START(OMAP5_DT, "Generic OMAP5 (Flattened Device Tree)")
+ .reserve = omap_reserve,
+ .map_io = omap5_map_io,
+ .init_early = omap5_init_early,
+ .init_irq = omap_gic_of_init,
+ .handle_irq = gic_handle_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap5_timer,
+ .dt_compat = omap5_boards_compat,
+ .restart = omap_prcm_restart,
+MACHINE_END
+#endif
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 876becf8205a..ace20482e3e1 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -32,7 +32,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <plat/usb.h>
#include <plat/board.h>
#include "common.h"
#include <plat/menelaus.h>
@@ -329,17 +328,6 @@ static void __init h4_init_flash(void)
h4_flash_resource.end = base + SZ_64M - 1;
}
-static struct omap_usb_config h4_usb_config __initdata = {
- /* S1.10 OFF -- usb "download port"
- * usb0 switched to Mini-B port and isp1105 transceiver;
- * S2.POS3 = ON, S2.POS4 = OFF ... to enable battery charging
- */
- .register_dev = 1,
- .pins[0] = 3,
-/* .hmc_mode = 0x14,*/ /* 0:dev 1:host 2:disable */
- .hmc_mode = 0x00, /* 0:dev|otg 1:disable 2:disable */
-};
-
static struct at24_platform_data m24c01 = {
.byte_len = SZ_1K / 8,
.page_size = 16,
@@ -381,7 +369,6 @@ static void __init omap_h4_init(void)
ARRAY_SIZE(h4_i2c_board_info));
platform_add_devices(h4_devices, ARRAY_SIZE(h4_devices));
- omap2_usbfs_init(&h4_usb_config);
omap_serial_init();
omap_sdrc_init(NULL, NULL);
h4_init_flash();
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 580fd17208da..6202fc76e490 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -433,7 +433,7 @@ static struct platform_device *omap3_beagle_devices[] __initdata = {
static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
- .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
+ .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
.port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 639bd07ea38a..ef230a0eb5eb 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -24,6 +24,10 @@
#include <linux/leds.h>
#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand.h>
+
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/i2c/twl.h>
@@ -43,6 +47,7 @@
#include <plat/board.h>
#include <plat/usb.h>
+#include <plat/nand.h>
#include "common.h"
#include <plat/mcspi.h>
#include <video/omapdss.h>
@@ -53,7 +58,6 @@
#include "hsmmc.h"
#include "common-board-devices.h"
-#define OMAP3_EVM_TS_GPIO 175
#define OMAP3_EVM_EHCI_VBUS 22
#define OMAP3_EVM_EHCI_SELECT 61
@@ -355,6 +359,19 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
platform_device_register(&leds_gpio);
+ /* Enable VBUS switch by setting TWL4030.GPIO2DIR as output
+ * for starting USB tranceiver
+ */
+#ifdef CONFIG_TWL4030_CORE
+ if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
+ u8 val;
+
+ twl_i2c_read_u8(TWL4030_MODULE_GPIO, &val, REG_GPIODATADIR1);
+ val |= 0x04; /* TWL4030.GPIO2DIR BIT at GPIODATADIR1(0x9B) */
+ twl_i2c_write_u8(TWL4030_MODULE_GPIO, val, REG_GPIODATADIR1);
+ }
+#endif
+
return 0;
}
@@ -461,6 +478,28 @@ struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
};
#endif
+/* VAUX2 for USB */
+static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = {
+ REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */
+ REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */
+ REGULATOR_SUPPLY("hsusb1", "ehci-omap.0"),
+ REGULATOR_SUPPLY("vaux2", NULL),
+};
+
+static struct regulator_init_data omap3evm_vaux2 = {
+ .constraints = {
+ .min_uV = 2800000,
+ .max_uV = 2800000,
+ .apply_uV = true,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL
+ | REGULATOR_MODE_STANDBY,
+ .valid_ops_mask = REGULATOR_CHANGE_MODE
+ | REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = ARRAY_SIZE(omap3evm_vaux2_supplies),
+ .consumer_supplies = omap3evm_vaux2_supplies,
+};
+
static struct twl4030_platform_data omap3evm_twldata = {
/* platform_data for children goes here */
.keypad = &omap3evm_kp_data,
@@ -607,6 +646,37 @@ static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
+static struct mtd_partition omap3evm_nand_partitions[] = {
+ /* All the partition sizes are listed in terms of NAND block size */
+ {
+ .name = "X-Loader",
+ .offset = 0,
+ .size = 4*(SZ_128K),
+ .mask_flags = MTD_WRITEABLE
+ },
+ {
+ .name = "U-Boot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 14*(SZ_128K),
+ .mask_flags = MTD_WRITEABLE
+ },
+ {
+ .name = "U-Boot Env",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 2*(SZ_128K)
+ },
+ {
+ .name = "Kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 40*(SZ_128K)
+ },
+ {
+ .name = "File system",
+ .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ },
+};
+
static void __init omap3_evm_init(void)
{
struct omap_board_mux *obm;
@@ -623,6 +693,9 @@ static void __init omap3_evm_init(void)
omap_mux_init_gpio(63, OMAP_PIN_INPUT);
omap_hsmmc_init(mmc);
+ if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
+ omap3evm_twldata.vaux2 = &omap3evm_vaux2;
+
omap3_evm_i2c_init();
omap_display_init(&omap3_evm_dss_data);
@@ -656,6 +729,9 @@ static void __init omap3_evm_init(void)
}
usb_musb_init(&musb_board_data);
usbhs_init(&usbhs_bdata);
+ omap_nand_flash_init(NAND_BUSWIDTH_16, omap3evm_nand_partitions,
+ ARRAY_SIZE(omap3evm_nand_partitions));
+
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x();
omap3_evm_display_init();
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index 932e1778aff9..fca93d1afd43 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -93,9 +93,6 @@ static struct twl4030_usb_data omap3logic_usb_data = {
static struct twl4030_platform_data omap3logic_twldata = {
- .irq_base = TWL4030_IRQ_BASE,
- .irq_end = TWL4030_IRQ_END,
-
/* platform_data for children goes here */
.gpio = &omap3logic_gpio_data,
.vmmc1 = &omap3logic_vmmc1,
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 982fb2622ab8..70f6d1d25463 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -106,7 +106,7 @@ static struct platform_device leds_gpio = {
static struct omap_abe_twl6040_data panda_abe_audio_data = {
/* Audio out */
.has_hs = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
- /* HandsFree through expasion connector */
+ /* HandsFree through expansion connector */
.has_hf = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
/* PandaBoard: FM TX, PandaBoardES: can be connected to audio out */
.has_aux = ABE_TWL6040_LEFT | ABE_TWL6040_RIGHT,
@@ -379,6 +379,9 @@ static struct omap_board_mux board_mux[] __initdata = {
OMAP4_MUX(DPM_EMU18, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
/* dispc2_data0 */
OMAP4_MUX(DPM_EMU19, OMAP_PIN_OUTPUT | OMAP_MUX_MODE5),
+ /* NIRQ2 for twl6040 */
+ OMAP4_MUX(SYS_NIRQ2, OMAP_MUX_MODE0 |
+ OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE),
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 8fa2fc3a4c3c..779734d8ba37 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -494,8 +494,8 @@ static void __init overo_init(void)
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
- omap_hsmmc_init(mmc);
overo_i2c_init();
+ omap_hsmmc_init(mmc);
omap_display_init(&overo_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index 5c4e66542169..ea3f565ba1a4 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -398,24 +398,6 @@ int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
return omap2_clksel_set_parent(clk, new_parent);
}
-/* OMAP3/4 non-CORE DPLL clkops */
-
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
-
-const struct clkops clkops_omap3_noncore_dpll_ops = {
- .enable = omap3_noncore_dpll_enable,
- .disable = omap3_noncore_dpll_disable,
- .allow_idle = omap3_dpll_allow_idle,
- .deny_idle = omap3_dpll_deny_idle,
-};
-
-const struct clkops clkops_omap3_core_dpll_ops = {
- .allow_idle = omap3_dpll_allow_idle,
- .deny_idle = omap3_dpll_deny_idle,
-};
-
-#endif
-
/*
* OMAP2+ clock reset and init functions
*/
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index a1bb23a23351..35ec5f3d9a73 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -155,4 +155,18 @@ extern const struct clkops clkops_omap3_noncore_dpll_ops;
extern const struct clkops clkops_omap3_core_dpll_ops;
extern const struct clkops clkops_omap4_dpllmx_ops;
+/* clksel_rate blocks shared between OMAP44xx and AM33xx */
+extern const struct clksel_rate div_1_0_rates[];
+extern const struct clksel_rate div_1_1_rates[];
+extern const struct clksel_rate div_1_2_rates[];
+extern const struct clksel_rate div_1_3_rates[];
+extern const struct clksel_rate div_1_4_rates[];
+extern const struct clksel_rate div31_1to31_rates[];
+
+/* clocks shared between various OMAP SoCs */
+extern struct clk virt_19200000_ck;
+extern struct clk virt_26000000_ck;
+
+extern int am33xx_clk_init(void);
+
#endif
diff --git a/arch/arm/mach-omap2/clock2420_data.c b/arch/arm/mach-omap2/clock2420_data.c
index bace9308a4db..002745181ad6 100644
--- a/arch/arm/mach-omap2/clock2420_data.c
+++ b/arch/arm/mach-omap2/clock2420_data.c
@@ -1774,8 +1774,6 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "osc_ck", &osc_ck, CK_242X),
CLK(NULL, "sys_ck", &sys_ck, CK_242X),
CLK(NULL, "alt_ck", &alt_ck, CK_242X),
- CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_242X),
- CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_242X),
CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_242X),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck, CK_242X),
@@ -1784,8 +1782,6 @@ static struct omap_clk omap2420_clks[] = {
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck, CK_242X),
CLK(NULL, "core_ck", &core_ck, CK_242X),
- CLK("omap-mcbsp.1", "prcm_fck", &func_96m_ck, CK_242X),
- CLK("omap-mcbsp.2", "prcm_fck", &func_96m_ck, CK_242X),
CLK(NULL, "func_96m_ck", &func_96m_ck, CK_242X),
CLK(NULL, "func_48m_ck", &func_48m_ck, CK_242X),
CLK(NULL, "func_12m_ck", &func_12m_ck, CK_242X),
@@ -1901,42 +1897,9 @@ static struct omap_clk omap2420_clks[] = {
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
CLK(NULL, "usb_fck", &usb_fck, CK_242X),
CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
- CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.4", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.5", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.6", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.7", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.8", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.9", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.10", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.11", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.12", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.1", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.2", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.3", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.4", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.5", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.6", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.7", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.8", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.9", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.10", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.11", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.12", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.1", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.2", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.3", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.4", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.5", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.6", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.7", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.8", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.9", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.10", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.11", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.12", "alt_ck", &alt_ck, CK_243X),
+ CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_243X),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_243X),
+ CLK(NULL, "timer_ext_ck", &alt_ck, CK_243X),
};
/*
diff --git a/arch/arm/mach-omap2/clock2430_data.c b/arch/arm/mach-omap2/clock2430_data.c
index 3b4d09a50399..cacabb070e22 100644
--- a/arch/arm/mach-omap2/clock2430_data.c
+++ b/arch/arm/mach-omap2/clock2430_data.c
@@ -1858,11 +1858,6 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "osc_ck", &osc_ck, CK_243X),
CLK(NULL, "sys_ck", &sys_ck, CK_243X),
CLK(NULL, "alt_ck", &alt_ck, CK_243X),
- CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_243X),
- CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_243X),
- CLK("omap-mcbsp.3", "pad_fck", &mcbsp_clks, CK_243X),
- CLK("omap-mcbsp.4", "pad_fck", &mcbsp_clks, CK_243X),
- CLK("omap-mcbsp.5", "pad_fck", &mcbsp_clks, CK_243X),
CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_243X),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck, CK_243X),
@@ -1871,11 +1866,6 @@ static struct omap_clk omap2430_clks[] = {
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck, CK_243X),
CLK(NULL, "core_ck", &core_ck, CK_243X),
- CLK("omap-mcbsp.1", "prcm_fck", &func_96m_ck, CK_243X),
- CLK("omap-mcbsp.2", "prcm_fck", &func_96m_ck, CK_243X),
- CLK("omap-mcbsp.3", "prcm_fck", &func_96m_ck, CK_243X),
- CLK("omap-mcbsp.4", "prcm_fck", &func_96m_ck, CK_243X),
- CLK("omap-mcbsp.5", "prcm_fck", &func_96m_ck, CK_243X),
CLK(NULL, "func_96m_ck", &func_96m_ck, CK_243X),
CLK(NULL, "func_48m_ck", &func_48m_ck, CK_243X),
CLK(NULL, "func_12m_ck", &func_12m_ck, CK_243X),
@@ -2000,42 +1990,9 @@ static struct omap_clk omap2430_clks[] = {
CLK(NULL, "mdm_intc_ick", &mdm_intc_ick, CK_243X),
CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck, CK_243X),
CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck, CK_243X),
- CLK("omap_timer.1", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.2", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.3", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.4", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.5", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.6", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.7", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.8", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.9", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.10", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.11", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.12", "32k_ck", &func_32k_ck, CK_243X),
- CLK("omap_timer.1", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.2", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.3", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.4", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.5", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.6", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.7", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.8", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.9", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.10", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.11", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.12", "sys_ck", &sys_ck, CK_243X),
- CLK("omap_timer.1", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.2", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.3", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.4", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.5", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.6", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.7", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.8", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.9", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.10", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.11", "alt_ck", &alt_ck, CK_243X),
- CLK("omap_timer.12", "alt_ck", &alt_ck, CK_243X),
+ CLK(NULL, "timer_32k_ck", &func_32k_ck, CK_243X),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_243X),
+ CLK(NULL, "timer_ext_ck", &alt_ck, CK_243X),
};
/*
diff --git a/arch/arm/mach-omap2/clock33xx_data.c b/arch/arm/mach-omap2/clock33xx_data.c
new file mode 100644
index 000000000000..25bbcc7ca4dc
--- /dev/null
+++ b/arch/arm/mach-omap2/clock33xx_data.c
@@ -0,0 +1,1105 @@
+/*
+ * AM33XX Clock data
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk.h>
+#include <plat/clkdev_omap.h>
+#include <plat/am33xx.h>
+
+#include "iomap.h"
+#include "control.h"
+#include "clock.h"
+#include "cm.h"
+#include "cm33xx.h"
+#include "cm-regbits-33xx.h"
+#include "prm.h"
+
+/* Maximum DPLL multiplier, divider values for AM33XX */
+#define AM33XX_MAX_DPLL_MULT 2047
+#define AM33XX_MAX_DPLL_DIV 128
+
+/* Modulemode control */
+#define AM33XX_MODULEMODE_HWCTRL 0
+#define AM33XX_MODULEMODE_SWCTRL 1
+
+/* TRM ERRATA: Timer 3 & 6 default parent (TCLKIN) may not be always
+ * physically present, in such a case HWMOD enabling of
+ * clock would be failure with default parent. And timer
+ * probe thinks clock is already enabled, this leads to
+ * crash upon accessing timer 3 & 6 registers in probe.
+ * Fix by setting parent of both these timers to master
+ * oscillator clock.
+ */
+static inline void am33xx_init_timer_parent(struct clk *clk)
+{
+ omap2_clksel_set_parent(clk, clk->parent);
+}
+
+/* Root clocks */
+
+/* RTC 32k */
+static struct clk clk_32768_ck = {
+ .name = "clk_32768_ck",
+ .clkdm_name = "l4_rtc_clkdm",
+ .rate = 32768,
+ .ops = &clkops_null,
+};
+
+/* On-Chip 32KHz RC OSC */
+static struct clk clk_rc32k_ck = {
+ .name = "clk_rc32k_ck",
+ .rate = 32000,
+ .ops = &clkops_null,
+};
+
+/* Crystal input clks */
+static struct clk virt_24000000_ck = {
+ .name = "virt_24000000_ck",
+ .rate = 24000000,
+ .ops = &clkops_null,
+};
+
+static struct clk virt_25000000_ck = {
+ .name = "virt_25000000_ck",
+ .rate = 25000000,
+ .ops = &clkops_null,
+};
+
+/* Oscillator clock */
+/* 19.2, 24, 25 or 26 MHz */
+static const struct clksel sys_clkin_sel[] = {
+ { .parent = &virt_19200000_ck, .rates = div_1_0_rates },
+ { .parent = &virt_24000000_ck, .rates = div_1_1_rates },
+ { .parent = &virt_25000000_ck, .rates = div_1_2_rates },
+ { .parent = &virt_26000000_ck, .rates = div_1_3_rates },
+ { .parent = NULL },
+};
+
+/* External clock - 12 MHz */
+static struct clk tclkin_ck = {
+ .name = "tclkin_ck",
+ .rate = 12000000,
+ .ops = &clkops_null,
+};
+
+/*
+ * sys_clk in: input to the dpll and also used as funtional clock for,
+ * adc_tsc, smartreflex0-1, timer1-7, mcasp0-1, dcan0-1, cefuse
+ *
+ */
+static struct clk sys_clkin_ck = {
+ .name = "sys_clkin_ck",
+ .parent = &virt_24000000_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel_reg = AM33XX_CTRL_REGADDR(AM33XX_CONTROL_STATUS),
+ .clksel_mask = AM33XX_CONTROL_STATUS_SYSBOOT1_MASK,
+ .clksel = sys_clkin_sel,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* DPLL_CORE */
+static struct dpll_data dpll_core_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_CORE,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_CORE,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_CORE,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = AM33XX_MAX_DPLL_MULT,
+ .max_divider = AM33XX_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+/* CLKDCOLDO output */
+static struct clk dpll_core_ck = {
+ .name = "dpll_core_ck",
+ .parent = &sys_clkin_ck,
+ .dpll_data = &dpll_core_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_omap3_core_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+};
+
+static struct clk dpll_core_x2_ck = {
+ .name = "dpll_core_x2_ck",
+ .parent = &dpll_core_ck,
+ .flags = CLOCK_CLKOUTX2,
+ .ops = &clkops_null,
+ .recalc = &omap3_clkoutx2_recalc,
+};
+
+
+static const struct clksel dpll_core_m4_div[] = {
+ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_core_m4_ck = {
+ .name = "dpll_core_m4_ck",
+ .parent = &dpll_core_x2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = dpll_core_m4_div,
+ .clksel_reg = AM33XX_CM_DIV_M4_DPLL_CORE,
+ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT1_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static const struct clksel dpll_core_m5_div[] = {
+ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_core_m5_ck = {
+ .name = "dpll_core_m5_ck",
+ .parent = &dpll_core_x2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = dpll_core_m5_div,
+ .clksel_reg = AM33XX_CM_DIV_M5_DPLL_CORE,
+ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT2_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static const struct clksel dpll_core_m6_div[] = {
+ { .parent = &dpll_core_x2_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_core_m6_ck = {
+ .name = "dpll_core_m6_ck",
+ .parent = &dpll_core_x2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = dpll_core_m6_div,
+ .clksel_reg = AM33XX_CM_DIV_M6_DPLL_CORE,
+ .clksel_mask = AM33XX_HSDIVIDER_CLKOUT3_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+/* DPLL_MPU */
+static struct dpll_data dpll_mpu_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_MPU,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_MPU,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_MPU,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = AM33XX_MAX_DPLL_MULT,
+ .max_divider = AM33XX_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_mpu_ck = {
+ .name = "dpll_mpu_ck",
+ .parent = &sys_clkin_ck,
+ .dpll_data = &dpll_mpu_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_omap3_noncore_dpll_ops,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+static const struct clksel dpll_mpu_m2_div[] = {
+ { .parent = &dpll_mpu_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_mpu_m2_ck = {
+ .name = "dpll_mpu_m2_ck",
+ .clkdm_name = "mpu_clkdm",
+ .parent = &dpll_mpu_ck,
+ .clksel = dpll_mpu_m2_div,
+ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_MPU,
+ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+/* DPLL_DDR */
+static struct dpll_data dpll_ddr_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DDR,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_DDR,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DDR,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = AM33XX_MAX_DPLL_MULT,
+ .max_divider = AM33XX_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_ddr_ck = {
+ .name = "dpll_ddr_ck",
+ .parent = &sys_clkin_ck,
+ .dpll_data = &dpll_ddr_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_null,
+ .recalc = &omap3_dpll_recalc,
+};
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+static const struct clksel dpll_ddr_m2_div[] = {
+ { .parent = &dpll_ddr_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_ddr_m2_ck = {
+ .name = "dpll_ddr_m2_ck",
+ .parent = &dpll_ddr_ck,
+ .clksel = dpll_ddr_m2_div,
+ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DDR,
+ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+/* emif_fck functional clock */
+static struct clk dpll_ddr_m2_div2_ck = {
+ .name = "dpll_ddr_m2_div2_ck",
+ .clkdm_name = "l3_clkdm",
+ .parent = &dpll_ddr_m2_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+/* DPLL_DISP */
+static struct dpll_data dpll_disp_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_DISP,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_DISP,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_DISP,
+ .mult_mask = AM33XX_DPLL_MULT_MASK,
+ .div1_mask = AM33XX_DPLL_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = AM33XX_MAX_DPLL_MULT,
+ .max_divider = AM33XX_MAX_DPLL_DIV,
+ .min_divider = 1,
+};
+
+/* CLKOUT: fdpll/M2 */
+static struct clk dpll_disp_ck = {
+ .name = "dpll_disp_ck",
+ .parent = &sys_clkin_ck,
+ .dpll_data = &dpll_disp_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_null,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+/*
+ * TODO: Add clksel here (sys_clkin, CORE_CLKOUTM6, PER_CLKOUTM2
+ * and ALT_CLK1/2)
+ */
+static const struct clksel dpll_disp_m2_div[] = {
+ { .parent = &dpll_disp_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_disp_m2_ck = {
+ .name = "dpll_disp_m2_ck",
+ .parent = &dpll_disp_ck,
+ .clksel = dpll_disp_m2_div,
+ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_DISP,
+ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+/* DPLL_PER */
+static struct dpll_data dpll_per_dd = {
+ .mult_div1_reg = AM33XX_CM_CLKSEL_DPLL_PERIPH,
+ .clk_bypass = &sys_clkin_ck,
+ .clk_ref = &sys_clkin_ck,
+ .control_reg = AM33XX_CM_CLKMODE_DPLL_PER,
+ .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
+ .idlest_reg = AM33XX_CM_IDLEST_DPLL_PER,
+ .mult_mask = AM33XX_DPLL_MULT_PERIPH_MASK,
+ .div1_mask = AM33XX_DPLL_PER_DIV_MASK,
+ .enable_mask = AM33XX_DPLL_EN_MASK,
+ .idlest_mask = AM33XX_ST_DPLL_CLK_MASK,
+ .max_multiplier = AM33XX_MAX_DPLL_MULT,
+ .max_divider = AM33XX_MAX_DPLL_DIV,
+ .min_divider = 1,
+ .flags = DPLL_J_TYPE,
+};
+
+/* CLKDCOLDO */
+static struct clk dpll_per_ck = {
+ .name = "dpll_per_ck",
+ .parent = &sys_clkin_ck,
+ .dpll_data = &dpll_per_dd,
+ .init = &omap2_init_dpll_parent,
+ .ops = &clkops_null,
+ .recalc = &omap3_dpll_recalc,
+ .round_rate = &omap2_dpll_round_rate,
+ .set_rate = &omap3_noncore_dpll_set_rate,
+};
+
+/* CLKOUT: fdpll/M2 */
+static const struct clksel dpll_per_m2_div[] = {
+ { .parent = &dpll_per_ck, .rates = div31_1to31_rates },
+ { .parent = NULL },
+};
+
+static struct clk dpll_per_m2_ck = {
+ .name = "dpll_per_m2_ck",
+ .parent = &dpll_per_ck,
+ .clksel = dpll_per_m2_div,
+ .clksel_reg = AM33XX_CM_DIV_M2_DPLL_PER,
+ .clksel_mask = AM33XX_DPLL_CLKOUT_DIV_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static struct clk dpll_per_m2_div4_wkupdm_ck = {
+ .name = "dpll_per_m2_div4_wkupdm_ck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &dpll_per_m2_ck,
+ .fixed_div = 4,
+ .ops = &clkops_null,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk dpll_per_m2_div4_ck = {
+ .name = "dpll_per_m2_div4_ck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &dpll_per_m2_ck,
+ .fixed_div = 4,
+ .ops = &clkops_null,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk l3_gclk = {
+ .name = "l3_gclk",
+ .clkdm_name = "l3_clkdm",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dpll_core_m4_div2_ck = {
+ .name = "dpll_core_m4_div2_ck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk l4_rtc_gclk = {
+ .name = "l4_rtc_gclk",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk clk_24mhz = {
+ .name = "clk_24mhz",
+ .parent = &dpll_per_m2_ck,
+ .fixed_div = 8,
+ .ops = &clkops_null,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+/*
+ * Below clock nodes describes clockdomains derived out
+ * of core clock.
+ */
+static struct clk l4hs_gclk = {
+ .name = "l4hs_gclk",
+ .clkdm_name = "l4hs_clkdm",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l3s_gclk = {
+ .name = "l3s_gclk",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &dpll_core_m4_div2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l4fw_gclk = {
+ .name = "l4fw_gclk",
+ .clkdm_name = "l4fw_clkdm",
+ .parent = &dpll_core_m4_div2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk l4ls_gclk = {
+ .name = "l4ls_gclk",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &dpll_core_m4_div2_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk sysclk_div_ck = {
+ .name = "sysclk_div_ck",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * In order to match the clock domain with hwmod clockdomain entry,
+ * separate clock nodes is required for the modules which are
+ * directly getting their funtioncal clock from sys_clkin.
+ */
+static struct clk adc_tsc_fck = {
+ .name = "adc_tsc_fck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dcan0_fck = {
+ .name = "dcan0_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk dcan1_fck = {
+ .name = "dcan1_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcasp0_fck = {
+ .name = "mcasp0_fck",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mcasp1_fck = {
+ .name = "mcasp1_fck",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk smartreflex0_fck = {
+ .name = "smartreflex0_fck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk smartreflex1_fck = {
+ .name = "smartreflex1_fck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &sys_clkin_ck,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * Modules clock nodes
+ *
+ * The following clock leaf nodes are added for the moment because:
+ *
+ * - hwmod data is not present for these modules, either hwmod
+ * control is not required or its not populated.
+ * - Driver code is not yet migrated to use hwmod/runtime pm
+ * - Modules outside kernel access (to disable them by default)
+ *
+ * - debugss
+ * - mmu (gfx domain)
+ * - cefuse
+ * - usbotg_fck (its additional clock and not really a modulemode)
+ * - ieee5000
+ */
+static struct clk debugss_ick = {
+ .name = "debugss_ick",
+ .clkdm_name = "l3_aon_clkdm",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = AM33XX_CM_WKUP_DEBUGSS_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmu_fck = {
+ .name = "mmu_fck",
+ .clkdm_name = "gfx_l3_clkdm",
+ .parent = &dpll_core_m4_ck,
+ .ops = &clkops_omap2_dflt,
+ .enable_reg = AM33XX_CM_GFX_MMUDATA_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk cefuse_fck = {
+ .name = "cefuse_fck",
+ .clkdm_name = "l4_cefuse_clkdm",
+ .parent = &sys_clkin_ck,
+ .enable_reg = AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+/*
+ * clkdiv32 is generated from fixed division of 732.4219
+ */
+static struct clk clkdiv32k_ick = {
+ .name = "clkdiv32k_ick",
+ .clkdm_name = "clk_24mhz_clkdm",
+ .rate = 32768,
+ .parent = &clk_24mhz,
+ .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
+ .ops = &clkops_omap2_dflt,
+};
+
+static struct clk usbotg_fck = {
+ .name = "usbotg_fck",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &dpll_per_ck,
+ .enable_reg = AM33XX_CM_CLKDCOLDO_DPLL_PER,
+ .enable_bit = AM33XX_ST_DPLL_CLKDCOLDO_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk ieee5000_fck = {
+ .name = "ieee5000_fck",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &dpll_core_m4_div2_ck,
+ .enable_reg = AM33XX_CM_PER_IEEE5000_CLKCTRL,
+ .enable_bit = AM33XX_MODULEMODE_SWCTRL,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+/* Timers */
+static const struct clksel timer1_clkmux_sel[] = {
+ { .parent = &sys_clkin_ck, .rates = div_1_0_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
+ { .parent = &tclkin_ck, .rates = div_1_2_rates },
+ { .parent = &clk_rc32k_ck, .rates = div_1_3_rates },
+ { .parent = &clk_32768_ck, .rates = div_1_4_rates },
+ { .parent = NULL },
+};
+
+static struct clk timer1_fck = {
+ .name = "timer1_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = timer1_clkmux_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER1MS_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_2_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel timer2_to_7_clk_sel[] = {
+ { .parent = &tclkin_ck, .rates = div_1_0_rates },
+ { .parent = &sys_clkin_ck, .rates = div_1_1_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk timer2_fck = {
+ .name = "timer2_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER2_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk timer3_fck = {
+ .name = "timer3_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &am33xx_init_timer_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER3_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk timer4_fck = {
+ .name = "timer4_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER4_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk timer5_fck = {
+ .name = "timer5_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER5_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk timer6_fck = {
+ .name = "timer6_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &am33xx_init_timer_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER6_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk timer7_fck = {
+ .name = "timer7_fck",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &sys_clkin_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = timer2_to_7_clk_sel,
+ .clksel_reg = AM33XX_CLKSEL_TIMER7_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk cpsw_125mhz_gclk = {
+ .name = "cpsw_125mhz_gclk",
+ .clkdm_name = "cpsw_125mhz_clkdm",
+ .parent = &dpll_core_m5_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static const struct clksel cpsw_cpts_rft_clkmux_sel[] = {
+ { .parent = &dpll_core_m5_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m4_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk cpsw_cpts_rft_clk = {
+ .name = "cpsw_cpts_rft_clk",
+ .clkdm_name = "cpsw_125mhz_clkdm",
+ .parent = &dpll_core_m5_ck,
+ .clksel = cpsw_cpts_rft_clkmux_sel,
+ .clksel_reg = AM33XX_CM_CPTS_RFT_CLKSEL,
+ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+/* gpio */
+static const struct clksel gpio0_dbclk_mux_sel[] = {
+ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
+ { .parent = &clk_32768_ck, .rates = div_1_1_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk gpio0_dbclk_mux_ck = {
+ .name = "gpio0_dbclk_mux_ck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &clk_rc32k_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = gpio0_dbclk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_GPIO0_DBCLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gpio0_dbclk = {
+ .name = "gpio0_dbclk",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &gpio0_dbclk_mux_ck,
+ .enable_reg = AM33XX_CM_WKUP_GPIO0_CLKCTRL,
+ .enable_bit = AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio1_dbclk = {
+ .name = "gpio1_dbclk",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &clkdiv32k_ick,
+ .enable_reg = AM33XX_CM_PER_GPIO1_CLKCTRL,
+ .enable_bit = AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio2_dbclk = {
+ .name = "gpio2_dbclk",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &clkdiv32k_ick,
+ .enable_reg = AM33XX_CM_PER_GPIO2_CLKCTRL,
+ .enable_bit = AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk gpio3_dbclk = {
+ .name = "gpio3_dbclk",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &clkdiv32k_ick,
+ .enable_reg = AM33XX_CM_PER_GPIO3_CLKCTRL,
+ .enable_bit = AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT,
+ .ops = &clkops_omap2_dflt,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel pruss_ocp_clk_mux_sel[] = {
+ { .parent = &l3_gclk, .rates = div_1_0_rates },
+ { .parent = &dpll_disp_m2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk pruss_ocp_gclk = {
+ .name = "pruss_ocp_gclk",
+ .clkdm_name = "pruss_ocp_clkdm",
+ .parent = &l3_gclk,
+ .init = &omap2_init_clksel_parent,
+ .clksel = pruss_ocp_clk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_PRUSS_OCP_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel lcd_clk_mux_sel[] = {
+ { .parent = &dpll_disp_m2_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_core_m5_ck, .rates = div_1_1_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_2_rates },
+ { .parent = NULL },
+};
+
+static struct clk lcd_gclk = {
+ .name = "lcd_gclk",
+ .clkdm_name = "lcdc_clkdm",
+ .parent = &dpll_disp_m2_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = lcd_clk_mux_sel,
+ .clksel_reg = AM33XX_CLKSEL_LCDC_PIXEL_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static struct clk mmc_clk = {
+ .name = "mmc_clk",
+ .clkdm_name = "l4ls_clkdm",
+ .parent = &dpll_per_m2_ck,
+ .ops = &clkops_null,
+ .fixed_div = 2,
+ .recalc = &omap_fixed_divisor_recalc,
+};
+
+static struct clk mmc2_fck = {
+ .name = "mmc2_fck",
+ .clkdm_name = "l3s_clkdm",
+ .parent = &mmc_clk,
+ .ops = &clkops_null,
+ .recalc = &followparent_recalc,
+};
+
+static const struct clksel gfx_clksel_sel[] = {
+ { .parent = &dpll_core_m4_ck, .rates = div_1_0_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk gfx_fclk_clksel_ck = {
+ .name = "gfx_fclk_clksel_ck",
+ .parent = &dpll_core_m4_ck,
+ .clksel = gfx_clksel_sel,
+ .ops = &clkops_null,
+ .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
+ .clksel_mask = AM33XX_CLKSEL_GFX_FCLK_MASK,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static const struct clksel_rate div_1_0_2_1_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+static const struct clksel gfx_div_sel[] = {
+ { .parent = &gfx_fclk_clksel_ck, .rates = div_1_0_2_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk gfx_fck_div_ck = {
+ .name = "gfx_fck_div_ck",
+ .clkdm_name = "gfx_l3_clkdm",
+ .parent = &gfx_fclk_clksel_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = gfx_div_sel,
+ .clksel_reg = AM33XX_CLKSEL_GFX_FCLK,
+ .clksel_mask = AM33XX_CLKSEL_0_0_MASK,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+ .ops = &clkops_null,
+};
+
+static const struct clksel sysclkout_pre_sel[] = {
+ { .parent = &clk_32768_ck, .rates = div_1_0_rates },
+ { .parent = &l3_gclk, .rates = div_1_1_rates },
+ { .parent = &dpll_ddr_m2_ck, .rates = div_1_2_rates },
+ { .parent = &dpll_per_m2_ck, .rates = div_1_3_rates },
+ { .parent = &lcd_gclk, .rates = div_1_4_rates },
+ { .parent = NULL },
+};
+
+static struct clk sysclkout_pre_ck = {
+ .name = "sysclkout_pre_ck",
+ .parent = &clk_32768_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = sysclkout_pre_sel,
+ .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
+ .clksel_mask = AM33XX_CLKOUT2SOURCE_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* Divide by 8 clock rates with default clock is 1/1*/
+static const struct clksel_rate div8_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_AM33XX },
+ { .div = 2, .val = 1, .flags = RATE_IN_AM33XX },
+ { .div = 3, .val = 2, .flags = RATE_IN_AM33XX },
+ { .div = 4, .val = 3, .flags = RATE_IN_AM33XX },
+ { .div = 5, .val = 4, .flags = RATE_IN_AM33XX },
+ { .div = 6, .val = 5, .flags = RATE_IN_AM33XX },
+ { .div = 7, .val = 6, .flags = RATE_IN_AM33XX },
+ { .div = 8, .val = 7, .flags = RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+static const struct clksel clkout2_div[] = {
+ { .parent = &sysclkout_pre_ck, .rates = div8_rates },
+ { .parent = NULL },
+};
+
+static struct clk clkout2_ck = {
+ .name = "clkout2_ck",
+ .parent = &sysclkout_pre_ck,
+ .ops = &clkops_omap2_dflt,
+ .clksel = clkout2_div,
+ .clksel_reg = AM33XX_CM_CLKOUT_CTRL,
+ .clksel_mask = AM33XX_CLKOUT2DIV_MASK,
+ .enable_reg = AM33XX_CM_CLKOUT_CTRL,
+ .enable_bit = AM33XX_CLKOUT2EN_SHIFT,
+ .recalc = &omap2_clksel_recalc,
+ .round_rate = &omap2_clksel_round_rate,
+ .set_rate = &omap2_clksel_set_rate,
+};
+
+static const struct clksel wdt_clkmux_sel[] = {
+ { .parent = &clk_rc32k_ck, .rates = div_1_0_rates },
+ { .parent = &clkdiv32k_ick, .rates = div_1_1_rates },
+ { .parent = NULL },
+};
+
+static struct clk wdt1_fck = {
+ .name = "wdt1_fck",
+ .clkdm_name = "l4_wkup_clkdm",
+ .parent = &clk_rc32k_ck,
+ .init = &omap2_init_clksel_parent,
+ .clksel = wdt_clkmux_sel,
+ .clksel_reg = AM33XX_CLKSEL_WDT1_CLK,
+ .clksel_mask = AM33XX_CLKSEL_0_1_MASK,
+ .ops = &clkops_null,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * clkdev
+ */
+static struct omap_clk am33xx_clks[] = {
+ CLK(NULL, "clk_32768_ck", &clk_32768_ck, CK_AM33XX),
+ CLK(NULL, "clk_rc32k_ck", &clk_rc32k_ck, CK_AM33XX),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_AM33XX),
+ CLK(NULL, "virt_24000000_ck", &virt_24000000_ck, CK_AM33XX),
+ CLK(NULL, "virt_25000000_ck", &virt_25000000_ck, CK_AM33XX),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_AM33XX),
+ CLK(NULL, "sys_clkin_ck", &sys_clkin_ck, CK_AM33XX),
+ CLK(NULL, "tclkin_ck", &tclkin_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_ck", &dpll_core_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_x2_ck", &dpll_core_x2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m4_ck", &dpll_core_m4_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m5_ck", &dpll_core_m5_ck, CK_AM33XX),
+ CLK(NULL, "dpll_core_m6_ck", &dpll_core_m6_ck, CK_AM33XX),
+ CLK(NULL, "dpll_mpu_ck", &dpll_mpu_ck, CK_AM33XX),
+ CLK(NULL, "dpll_mpu_m2_ck", &dpll_mpu_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_ck", &dpll_ddr_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_m2_ck", &dpll_ddr_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_ddr_m2_div2_ck", &dpll_ddr_m2_div2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_disp_ck", &dpll_disp_ck, CK_AM33XX),
+ CLK(NULL, "dpll_disp_m2_ck", &dpll_disp_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_ck", &dpll_per_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_ck", &dpll_per_m2_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_div4_wkupdm_ck", &dpll_per_m2_div4_wkupdm_ck, CK_AM33XX),
+ CLK(NULL, "dpll_per_m2_div4_ck", &dpll_per_m2_div4_ck, CK_AM33XX),
+ CLK(NULL, "adc_tsc_fck", &adc_tsc_fck, CK_AM33XX),
+ CLK(NULL, "cefuse_fck", &cefuse_fck, CK_AM33XX),
+ CLK(NULL, "clkdiv32k_ick", &clkdiv32k_ick, CK_AM33XX),
+ CLK(NULL, "dcan0_fck", &dcan0_fck, CK_AM33XX),
+ CLK(NULL, "dcan1_fck", &dcan1_fck, CK_AM33XX),
+ CLK(NULL, "debugss_ick", &debugss_ick, CK_AM33XX),
+ CLK(NULL, "pruss_ocp_gclk", &pruss_ocp_gclk, CK_AM33XX),
+ CLK("davinci-mcasp.0", NULL, &mcasp0_fck, CK_AM33XX),
+ CLK("davinci-mcasp.1", NULL, &mcasp1_fck, CK_AM33XX),
+ CLK("NULL", "mmc2_fck", &mmc2_fck, CK_AM33XX),
+ CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
+ CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
+ CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
+ CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
+ CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
+ CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
+ CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
+ CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
+ CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
+ CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
+ CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
+ CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
+ CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
+ CLK(NULL, "l4_rtc_gclk", &l4_rtc_gclk, CK_AM33XX),
+ CLK(NULL, "l3_gclk", &l3_gclk, CK_AM33XX),
+ CLK(NULL, "dpll_core_m4_div2_ck", &dpll_core_m4_div2_ck, CK_AM33XX),
+ CLK(NULL, "l4hs_gclk", &l4hs_gclk, CK_AM33XX),
+ CLK(NULL, "l3s_gclk", &l3s_gclk, CK_AM33XX),
+ CLK(NULL, "l4fw_gclk", &l4fw_gclk, CK_AM33XX),
+ CLK(NULL, "l4ls_gclk", &l4ls_gclk, CK_AM33XX),
+ CLK(NULL, "clk_24mhz", &clk_24mhz, CK_AM33XX),
+ CLK(NULL, "sysclk_div_ck", &sysclk_div_ck, CK_AM33XX),
+ CLK(NULL, "cpsw_125mhz_gclk", &cpsw_125mhz_gclk, CK_AM33XX),
+ CLK(NULL, "cpsw_cpts_rft_clk", &cpsw_cpts_rft_clk, CK_AM33XX),
+ CLK(NULL, "gpio0_dbclk_mux_ck", &gpio0_dbclk_mux_ck, CK_AM33XX),
+ CLK(NULL, "gpio0_dbclk", &gpio0_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio1_dbclk", &gpio1_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio2_dbclk", &gpio2_dbclk, CK_AM33XX),
+ CLK(NULL, "gpio3_dbclk", &gpio3_dbclk, CK_AM33XX),
+ CLK(NULL, "lcd_gclk", &lcd_gclk, CK_AM33XX),
+ CLK(NULL, "mmc_clk", &mmc_clk, CK_AM33XX),
+ CLK(NULL, "gfx_fclk_clksel_ck", &gfx_fclk_clksel_ck, CK_AM33XX),
+ CLK(NULL, "gfx_fck_div_ck", &gfx_fck_div_ck, CK_AM33XX),
+ CLK(NULL, "sysclkout_pre_ck", &sysclkout_pre_ck, CK_AM33XX),
+ CLK(NULL, "clkout2_ck", &clkout2_ck, CK_AM33XX),
+};
+
+int __init am33xx_clk_init(void)
+{
+ struct omap_clk *c;
+ u32 cpu_clkflg;
+
+ if (soc_is_am33xx()) {
+ cpu_mask = RATE_IN_AM33XX;
+ cpu_clkflg = CK_AM33XX;
+ }
+
+ clk_init(&omap2_clk_functions);
+
+ for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++)
+ clk_preinit(c->lk.clk);
+
+ for (c = am33xx_clks; c < am33xx_clks + ARRAY_SIZE(am33xx_clks); c++) {
+ if (c->cpu & cpu_clkflg) {
+ clkdev_add(&c->lk);
+ clk_register(c->lk.clk);
+ omap2_init_clk_clkdm(c->lk.clk);
+ }
+ }
+
+ recalculate_root_clocks();
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_enable_init_clocks();
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 1efdec236ae8..91b3d5c60bfe 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -93,18 +93,6 @@ static struct clk virt_16_8m_ck = {
.rate = 16800000,
};
-static struct clk virt_19_2m_ck = {
- .name = "virt_19_2m_ck",
- .ops = &clkops_null,
- .rate = 19200000,
-};
-
-static struct clk virt_26m_ck = {
- .name = "virt_26m_ck",
- .ops = &clkops_null,
- .rate = 26000000,
-};
-
static struct clk virt_38_4m_ck = {
.name = "virt_38_4m_ck",
.ops = &clkops_null,
@@ -145,8 +133,8 @@ static const struct clksel osc_sys_clksel[] = {
{ .parent = &virt_12m_ck, .rates = osc_sys_12m_rates },
{ .parent = &virt_13m_ck, .rates = osc_sys_13m_rates },
{ .parent = &virt_16_8m_ck, .rates = osc_sys_16_8m_rates },
- { .parent = &virt_19_2m_ck, .rates = osc_sys_19_2m_rates },
- { .parent = &virt_26m_ck, .rates = osc_sys_26m_rates },
+ { .parent = &virt_19200000_ck, .rates = osc_sys_19_2m_rates },
+ { .parent = &virt_26000000_ck, .rates = osc_sys_26m_rates },
{ .parent = &virt_38_4m_ck, .rates = osc_sys_38_4m_rates },
{ .parent = NULL },
};
@@ -2490,13 +2478,13 @@ static struct clk uart4_fck = {
};
static struct clk uart4_fck_am35xx = {
- .name = "uart4_fck",
- .ops = &clkops_omap2_dflt_wait,
- .parent = &per_48m_fck,
- .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
- .enable_bit = OMAP3430_EN_UART4_SHIFT,
- .clkdm_name = "core_l4_clkdm",
- .recalc = &followparent_recalc,
+ .name = "uart4_fck",
+ .ops = &clkops_omap2_dflt_wait,
+ .parent = &core_48m_fck,
+ .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
+ .enable_bit = AM35XX_EN_UART4_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
+ .recalc = &followparent_recalc,
};
static struct clk gpt2_fck = {
@@ -3201,8 +3189,12 @@ static struct clk vpfe_fck = {
};
/*
- * The UART1/2 functional clock acts as the functional
- * clock for UART4. No separate fclk control available.
+ * The UART1/2 functional clock acts as the functional clock for
+ * UART4. No separate fclk control available. XXX Well now we have a
+ * uart4_fck that is apparently used as the UART4 functional clock,
+ * but it also seems that uart1_fck or uart2_fck are still needed, at
+ * least for UART4 softresets to complete. This really needs
+ * clarification.
*/
static struct clk uart4_ick_am35xx = {
.name = "uart4_ick",
@@ -3230,17 +3222,12 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "virt_12m_ck", &virt_12m_ck, CK_3XXX),
CLK(NULL, "virt_13m_ck", &virt_13m_ck, CK_3XXX),
CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK(NULL, "virt_19_2m_ck", &virt_19_2m_ck, CK_3XXX),
- CLK(NULL, "virt_26m_ck", &virt_26m_ck, CK_3XXX),
+ CLK(NULL, "virt_19200000_ck", &virt_19200000_ck, CK_3XXX),
+ CLK(NULL, "virt_26000000_ck", &virt_26000000_ck, CK_3XXX),
CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck, CK_3XXX),
CLK(NULL, "osc_sys_ck", &osc_sys_ck, CK_3XXX),
CLK(NULL, "sys_ck", &sys_ck, CK_3XXX),
CLK(NULL, "sys_altclk", &sys_altclk, CK_3XXX),
- CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_3XXX),
- CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_3XXX),
- CLK("omap-mcbsp.3", "pad_fck", &mcbsp_clks, CK_3XXX),
- CLK("omap-mcbsp.4", "pad_fck", &mcbsp_clks, CK_3XXX),
- CLK("omap-mcbsp.5", "pad_fck", &mcbsp_clks, CK_3XXX),
CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_3XXX),
CLK(NULL, "sys_clkout1", &sys_clkout1, CK_3XXX),
CLK(NULL, "dpll1_ck", &dpll1_ck, CK_3XXX),
@@ -3307,8 +3294,6 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "ts_fck", &ts_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK("usbhs_omap", "usbtll_fck", &usbtll_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
- CLK("omap-mcbsp.1", "prcm_fck", &core_96m_fck, CK_3XXX),
- CLK("omap-mcbsp.5", "prcm_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "core_96m_fck", &core_96m_fck, CK_3XXX),
CLK(NULL, "mmchs3_fck", &mmchs3_fck, CK_3430ES2PLUS | CK_AM35XX | CK_36XX),
CLK(NULL, "mmchs2_fck", &mmchs2_fck, CK_3XXX),
@@ -3413,9 +3398,6 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick, CK_3XXX),
CLK(NULL, "gpt12_ick", &gpt12_ick, CK_3XXX),
CLK(NULL, "gpt1_ick", &gpt1_ick, CK_3XXX),
- CLK("omap-mcbsp.2", "prcm_fck", &per_96m_fck, CK_3XXX),
- CLK("omap-mcbsp.3", "prcm_fck", &per_96m_fck, CK_3XXX),
- CLK("omap-mcbsp.4", "prcm_fck", &per_96m_fck, CK_3XXX),
CLK(NULL, "per_96m_fck", &per_96m_fck, CK_3XXX),
CLK(NULL, "per_48m_fck", &per_48m_fck, CK_3XXX),
CLK(NULL, "uart3_fck", &uart3_fck, CK_3XXX),
@@ -3474,38 +3456,16 @@ static struct omap_clk omap3xxx_clks[] = {
CLK(NULL, "ipss_ick", &ipss_ick, CK_AM35XX),
CLK(NULL, "rmii_ck", &rmii_ck, CK_AM35XX),
CLK(NULL, "pclk_ck", &pclk_ck, CK_AM35XX),
- CLK("davinci_emac", NULL, &emac_ick, CK_AM35XX),
+ CLK("davinci_emac.0", NULL, &emac_ick, CK_AM35XX),
CLK("davinci_mdio.0", NULL, &emac_fck, CK_AM35XX),
CLK("vpfe-capture", "master", &vpfe_ick, CK_AM35XX),
CLK("vpfe-capture", "slave", &vpfe_fck, CK_AM35XX),
- CLK("musb-am35x", "ick", &hsotgusb_ick_am35xx, CK_AM35XX),
- CLK("musb-am35x", "fck", &hsotgusb_fck_am35xx, CK_AM35XX),
+ CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx, CK_AM35XX),
+ CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx, CK_AM35XX),
CLK(NULL, "hecc_ck", &hecc_ck, CK_AM35XX),
CLK(NULL, "uart4_ick", &uart4_ick_am35xx, CK_AM35XX),
- CLK("omap_timer.1", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.2", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.3", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.4", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.5", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.6", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.7", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.8", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.9", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.10", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.11", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.12", "32k_ck", &omap_32k_fck, CK_3XXX),
- CLK("omap_timer.1", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.2", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.3", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.4", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.5", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.6", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.7", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.8", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.9", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.10", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.11", "sys_ck", &sys_ck, CK_3XXX),
- CLK("omap_timer.12", "sys_ck", &sys_ck, CK_3XXX),
+ CLK(NULL, "timer_32k_ck", &omap_32k_fck, CK_3XXX),
+ CLK(NULL, "timer_sys_ck", &sys_ck, CK_3XXX),
};
@@ -3523,7 +3483,7 @@ int __init omap3xxx_clk_init(void)
} else if (cpu_is_ti816x()) {
cpu_mask = RATE_IN_TI816X;
cpu_clkflg = CK_TI816X;
- } else if (cpu_is_am33xx()) {
+ } else if (soc_is_am33xx()) {
cpu_mask = RATE_IN_AM33XX;
} else if (cpu_is_ti814x()) {
cpu_mask = RATE_IN_TI814X;
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index e2b701e164f6..d7f55e43b761 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -107,18 +107,6 @@ static struct clk virt_16800000_ck = {
.rate = 16800000,
};
-static struct clk virt_19200000_ck = {
- .name = "virt_19200000_ck",
- .ops = &clkops_null,
- .rate = 19200000,
-};
-
-static struct clk virt_26000000_ck = {
- .name = "virt_26000000_ck",
- .ops = &clkops_null,
- .rate = 26000000,
-};
-
static struct clk virt_27000000_ck = {
.name = "virt_27000000_ck",
.ops = &clkops_null,
@@ -131,31 +119,6 @@ static struct clk virt_38400000_ck = {
.rate = 38400000,
};
-static const struct clksel_rate div_1_0_rates[] = {
- { .div = 1, .val = 0, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_1_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_2_rates[] = {
- { .div = 1, .val = 2, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_3_rates[] = {
- { .div = 1, .val = 3, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
-static const struct clksel_rate div_1_4_rates[] = {
- { .div = 1, .val = 4, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
static const struct clksel_rate div_1_5_rates[] = {
{ .div = 1, .val = 5, .flags = RATE_IN_4430 },
{ .div = 0 },
@@ -289,41 +252,6 @@ static struct clk dpll_abe_x2_ck = {
.recalc = &omap3_clkoutx2_recalc,
};
-static const struct clksel_rate div31_1to31_rates[] = {
- { .div = 1, .val = 1, .flags = RATE_IN_4430 },
- { .div = 2, .val = 2, .flags = RATE_IN_4430 },
- { .div = 3, .val = 3, .flags = RATE_IN_4430 },
- { .div = 4, .val = 4, .flags = RATE_IN_4430 },
- { .div = 5, .val = 5, .flags = RATE_IN_4430 },
- { .div = 6, .val = 6, .flags = RATE_IN_4430 },
- { .div = 7, .val = 7, .flags = RATE_IN_4430 },
- { .div = 8, .val = 8, .flags = RATE_IN_4430 },
- { .div = 9, .val = 9, .flags = RATE_IN_4430 },
- { .div = 10, .val = 10, .flags = RATE_IN_4430 },
- { .div = 11, .val = 11, .flags = RATE_IN_4430 },
- { .div = 12, .val = 12, .flags = RATE_IN_4430 },
- { .div = 13, .val = 13, .flags = RATE_IN_4430 },
- { .div = 14, .val = 14, .flags = RATE_IN_4430 },
- { .div = 15, .val = 15, .flags = RATE_IN_4430 },
- { .div = 16, .val = 16, .flags = RATE_IN_4430 },
- { .div = 17, .val = 17, .flags = RATE_IN_4430 },
- { .div = 18, .val = 18, .flags = RATE_IN_4430 },
- { .div = 19, .val = 19, .flags = RATE_IN_4430 },
- { .div = 20, .val = 20, .flags = RATE_IN_4430 },
- { .div = 21, .val = 21, .flags = RATE_IN_4430 },
- { .div = 22, .val = 22, .flags = RATE_IN_4430 },
- { .div = 23, .val = 23, .flags = RATE_IN_4430 },
- { .div = 24, .val = 24, .flags = RATE_IN_4430 },
- { .div = 25, .val = 25, .flags = RATE_IN_4430 },
- { .div = 26, .val = 26, .flags = RATE_IN_4430 },
- { .div = 27, .val = 27, .flags = RATE_IN_4430 },
- { .div = 28, .val = 28, .flags = RATE_IN_4430 },
- { .div = 29, .val = 29, .flags = RATE_IN_4430 },
- { .div = 30, .val = 30, .flags = RATE_IN_4430 },
- { .div = 31, .val = 31, .flags = RATE_IN_4430 },
- { .div = 0 },
-};
-
static const struct clksel dpll_abe_m2x2_div[] = {
{ .parent = &dpll_abe_x2_ck, .rates = div31_1to31_rates },
{ .parent = NULL },
@@ -3299,17 +3227,17 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "smartreflex_core_fck", &smartreflex_core_fck, CK_443X),
CLK(NULL, "smartreflex_iva_fck", &smartreflex_iva_fck, CK_443X),
CLK(NULL, "smartreflex_mpu_fck", &smartreflex_mpu_fck, CK_443X),
- CLK(NULL, "gpt1_fck", &timer1_fck, CK_443X),
- CLK(NULL, "gpt10_fck", &timer10_fck, CK_443X),
- CLK(NULL, "gpt11_fck", &timer11_fck, CK_443X),
- CLK(NULL, "gpt2_fck", &timer2_fck, CK_443X),
- CLK(NULL, "gpt3_fck", &timer3_fck, CK_443X),
- CLK(NULL, "gpt4_fck", &timer4_fck, CK_443X),
- CLK(NULL, "gpt5_fck", &timer5_fck, CK_443X),
- CLK(NULL, "gpt6_fck", &timer6_fck, CK_443X),
- CLK(NULL, "gpt7_fck", &timer7_fck, CK_443X),
- CLK(NULL, "gpt8_fck", &timer8_fck, CK_443X),
- CLK(NULL, "gpt9_fck", &timer9_fck, CK_443X),
+ CLK(NULL, "timer1_fck", &timer1_fck, CK_443X),
+ CLK(NULL, "timer10_fck", &timer10_fck, CK_443X),
+ CLK(NULL, "timer11_fck", &timer11_fck, CK_443X),
+ CLK(NULL, "timer2_fck", &timer2_fck, CK_443X),
+ CLK(NULL, "timer3_fck", &timer3_fck, CK_443X),
+ CLK(NULL, "timer4_fck", &timer4_fck, CK_443X),
+ CLK(NULL, "timer5_fck", &timer5_fck, CK_443X),
+ CLK(NULL, "timer6_fck", &timer6_fck, CK_443X),
+ CLK(NULL, "timer7_fck", &timer7_fck, CK_443X),
+ CLK(NULL, "timer8_fck", &timer8_fck, CK_443X),
+ CLK(NULL, "timer9_fck", &timer9_fck, CK_443X),
CLK(NULL, "uart1_fck", &uart1_fck, CK_443X),
CLK(NULL, "uart2_fck", &uart2_fck, CK_443X),
CLK(NULL, "uart3_fck", &uart3_fck, CK_443X),
@@ -3385,28 +3313,18 @@ static struct omap_clk omap44xx_clks[] = {
CLK("usbhs_omap", "usbhost_ick", &dummy_ck, CK_443X),
CLK("usbhs_omap", "usbtll_fck", &dummy_ck, CK_443X),
CLK("omap_wdt", "ick", &dummy_ck, CK_443X),
- CLK("omap_timer.1", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.2", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.3", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.4", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.5", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.6", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.7", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.8", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.9", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.10", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.11", "32k_ck", &sys_32k_ck, CK_443X),
- CLK("omap_timer.1", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.2", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.3", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.4", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.9", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.10", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.11", "sys_ck", &sys_clkin_ck, CK_443X),
- CLK("omap_timer.5", "sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.6", "sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.7", "sys_ck", &syc_clk_div_ck, CK_443X),
- CLK("omap_timer.8", "sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK(NULL, "timer_32k_ck", &sys_32k_ck, CK_443X),
+ CLK("omap_timer.1", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.2", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.3", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.4", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.9", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.10", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.11", "timer_sys_ck", &sys_clkin_ck, CK_443X),
+ CLK("omap_timer.5", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.6", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.7", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
+ CLK("omap_timer.8", "timer_sys_ck", &syc_clk_div_ck, CK_443X),
};
int __init omap4xxx_clk_init(void)
@@ -3417,9 +3335,12 @@ int __init omap4xxx_clk_init(void)
if (cpu_is_omap443x()) {
cpu_mask = RATE_IN_4430;
cpu_clkflg = CK_443X;
- } else if (cpu_is_omap446x()) {
+ } else if (cpu_is_omap446x() || cpu_is_omap447x()) {
cpu_mask = RATE_IN_4460 | RATE_IN_4430;
cpu_clkflg = CK_446X | CK_443X;
+
+ if (cpu_is_omap447x())
+ pr_warn("WARNING: OMAP4470 clock data incomplete!\n");
} else {
return 0;
}
diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c
index 6424d46be14a..b9f3ba68148c 100644
--- a/arch/arm/mach-omap2/clock_common_data.c
+++ b/arch/arm/mach-omap2/clock_common_data.c
@@ -43,3 +43,80 @@ const struct clksel_rate dsp_ick_rates[] = {
{ .div = 3, .val = 3, .flags = RATE_IN_243X },
{ .div = 0 },
};
+
+
+/* clksel_rate blocks shared between OMAP44xx and AM33xx */
+
+const struct clksel_rate div_1_0_rates[] = {
+ { .div = 1, .val = 0, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+const struct clksel_rate div_1_1_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+const struct clksel_rate div_1_2_rates[] = {
+ { .div = 1, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+const struct clksel_rate div_1_3_rates[] = {
+ { .div = 1, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+const struct clksel_rate div_1_4_rates[] = {
+ { .div = 1, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+const struct clksel_rate div31_1to31_rates[] = {
+ { .div = 1, .val = 1, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 2, .val = 2, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 3, .val = 3, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 4, .val = 4, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 5, .val = 5, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 6, .val = 6, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 7, .val = 7, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 8, .val = 8, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 9, .val = 9, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 10, .val = 10, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 11, .val = 11, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 12, .val = 12, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 13, .val = 13, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 14, .val = 14, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 15, .val = 15, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 16, .val = 16, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 17, .val = 17, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 18, .val = 18, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 19, .val = 19, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 20, .val = 20, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 21, .val = 21, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 22, .val = 22, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 23, .val = 23, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 24, .val = 24, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 25, .val = 25, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 26, .val = 26, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 27, .val = 27, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 28, .val = 28, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 29, .val = 29, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 30, .val = 30, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
+ { .div = 0 },
+};
+
+/* Clocks shared between various OMAP SoCs */
+
+struct clk virt_19200000_ck = {
+ .name = "virt_19200000_ck",
+ .ops = &clkops_null,
+ .rate = 19200000,
+};
+
+struct clk virt_26000000_ck = {
+ .name = "virt_26000000_ck",
+ .ops = &clkops_null,
+ .rate = 26000000,
+};
diff --git a/arch/arm/mach-omap2/clockdomain.h b/arch/arm/mach-omap2/clockdomain.h
index f7b58609bad8..5601dc13785e 100644
--- a/arch/arm/mach-omap2/clockdomain.h
+++ b/arch/arm/mach-omap2/clockdomain.h
@@ -31,12 +31,16 @@
*
* CLKDM_NO_AUTODEPS: Prevent "autodeps" from being added/removed from this
* clockdomain. (Currently, this applies to OMAP3 clockdomains only.)
+ * CLKDM_ACTIVE_WITH_MPU: The PRCM guarantees that this clockdomain is
+ * active whenever the MPU is active. True for interconnects and
+ * the WKUP clockdomains.
*/
#define CLKDM_CAN_FORCE_SLEEP (1 << 0)
#define CLKDM_CAN_FORCE_WAKEUP (1 << 1)
#define CLKDM_CAN_ENABLE_AUTO (1 << 2)
#define CLKDM_CAN_DISABLE_AUTO (1 << 3)
#define CLKDM_NO_AUTODEPS (1 << 4)
+#define CLKDM_ACTIVE_WITH_MPU (1 << 5)
#define CLKDM_CAN_HWSUP (CLKDM_CAN_ENABLE_AUTO | CLKDM_CAN_DISABLE_AUTO)
#define CLKDM_CAN_SWSUP (CLKDM_CAN_FORCE_SLEEP | CLKDM_CAN_FORCE_WAKEUP)
@@ -195,6 +199,7 @@ int clkdm_hwmod_disable(struct clockdomain *clkdm, struct omap_hwmod *oh);
extern void __init omap242x_clockdomains_init(void);
extern void __init omap243x_clockdomains_init(void);
extern void __init omap3xxx_clockdomains_init(void);
+extern void __init am33xx_clockdomains_init(void);
extern void __init omap44xx_clockdomains_init(void);
extern void _clkdm_add_autodeps(struct clockdomain *clkdm);
extern void _clkdm_del_autodeps(struct clockdomain *clkdm);
@@ -202,11 +207,10 @@ extern void _clkdm_del_autodeps(struct clockdomain *clkdm);
extern struct clkdm_ops omap2_clkdm_operations;
extern struct clkdm_ops omap3_clkdm_operations;
extern struct clkdm_ops omap4_clkdm_operations;
+extern struct clkdm_ops am33xx_clkdm_operations;
extern struct clkdm_dep gfx_24xx_wkdeps[];
extern struct clkdm_dep dsp_24xx_wkdeps[];
extern struct clockdomain wkup_common_clkdm;
-extern struct clockdomain prm_common_clkdm;
-extern struct clockdomain cm_common_clkdm;
#endif
diff --git a/arch/arm/mach-omap2/clockdomain33xx.c b/arch/arm/mach-omap2/clockdomain33xx.c
new file mode 100644
index 000000000000..aca6388fad76
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomain33xx.c
@@ -0,0 +1,74 @@
+/*
+ * AM33XX clockdomain control
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * Derived from mach-omap2/clockdomain44xx.c written by Rajendra Nayak
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+
+#include "clockdomain.h"
+#include "cm33xx.h"
+
+
+static int am33xx_clkdm_sleep(struct clockdomain *clkdm)
+{
+ am33xx_cm_clkdm_force_sleep(clkdm->cm_inst, clkdm->clkdm_offs);
+ return 0;
+}
+
+static int am33xx_clkdm_wakeup(struct clockdomain *clkdm)
+{
+ am33xx_cm_clkdm_force_wakeup(clkdm->cm_inst, clkdm->clkdm_offs);
+ return 0;
+}
+
+static void am33xx_clkdm_allow_idle(struct clockdomain *clkdm)
+{
+ am33xx_cm_clkdm_enable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
+}
+
+static void am33xx_clkdm_deny_idle(struct clockdomain *clkdm)
+{
+ am33xx_cm_clkdm_disable_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
+}
+
+static int am33xx_clkdm_clk_enable(struct clockdomain *clkdm)
+{
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ return am33xx_clkdm_wakeup(clkdm);
+
+ return 0;
+}
+
+static int am33xx_clkdm_clk_disable(struct clockdomain *clkdm)
+{
+ bool hwsup = false;
+
+ hwsup = am33xx_cm_is_clkdm_in_hwsup(clkdm->cm_inst, clkdm->clkdm_offs);
+
+ if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
+ am33xx_clkdm_sleep(clkdm);
+
+ return 0;
+}
+
+struct clkdm_ops am33xx_clkdm_operations = {
+ .clkdm_sleep = am33xx_clkdm_sleep,
+ .clkdm_wakeup = am33xx_clkdm_wakeup,
+ .clkdm_allow_idle = am33xx_clkdm_allow_idle,
+ .clkdm_deny_idle = am33xx_clkdm_deny_idle,
+ .clkdm_clk_enable = am33xx_clkdm_clk_enable,
+ .clkdm_clk_disable = am33xx_clkdm_clk_disable,
+};
diff --git a/arch/arm/mach-omap2/clockdomain44xx.c b/arch/arm/mach-omap2/clockdomain44xx.c
index 4f04dd11d655..762f2cc542ce 100644
--- a/arch/arm/mach-omap2/clockdomain44xx.c
+++ b/arch/arm/mach-omap2/clockdomain44xx.c
@@ -70,7 +70,7 @@ static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm)
static int omap4_clkdm_sleep(struct clockdomain *clkdm)
{
- omap4_cminst_clkdm_force_sleep(clkdm->prcm_partition,
+ omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
@@ -90,8 +90,12 @@ static void omap4_clkdm_allow_idle(struct clockdomain *clkdm)
static void omap4_clkdm_deny_idle(struct clockdomain *clkdm)
{
- omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition,
- clkdm->cm_inst, clkdm->clkdm_offs);
+ if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
+ omap4_clkdm_wakeup(clkdm);
+ else
+ omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition,
+ clkdm->cm_inst,
+ clkdm->clkdm_offs);
}
static int omap4_clkdm_clk_enable(struct clockdomain *clkdm)
diff --git a/arch/arm/mach-omap2/clockdomains2420_data.c b/arch/arm/mach-omap2/clockdomains2420_data.c
index 0ab8e46d5b2b..5c741852fac0 100644
--- a/arch/arm/mach-omap2/clockdomains2420_data.c
+++ b/arch/arm/mach-omap2/clockdomains2420_data.c
@@ -131,8 +131,6 @@ static struct clockdomain dss_2420_clkdm = {
static struct clockdomain *clockdomains_omap242x[] __initdata = {
&wkup_common_clkdm,
- &cm_common_clkdm,
- &prm_common_clkdm,
&mpu_2420_clkdm,
&iva1_2420_clkdm,
&dsp_2420_clkdm,
diff --git a/arch/arm/mach-omap2/clockdomains2430_data.c b/arch/arm/mach-omap2/clockdomains2430_data.c
index 3645ed044890..f09617555e15 100644
--- a/arch/arm/mach-omap2/clockdomains2430_data.c
+++ b/arch/arm/mach-omap2/clockdomains2430_data.c
@@ -157,8 +157,6 @@ static struct clockdomain dss_2430_clkdm = {
static struct clockdomain *clockdomains_omap243x[] __initdata = {
&wkup_common_clkdm,
- &cm_common_clkdm,
- &prm_common_clkdm,
&mpu_2430_clkdm,
&mdm_clkdm,
&dsp_2430_clkdm,
diff --git a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
index 839145e1cfbe..4972219653ce 100644
--- a/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
+++ b/arch/arm/mach-omap2/clockdomains2xxx_3xxx_data.c
@@ -88,4 +88,5 @@ struct clockdomain wkup_common_clkdm = {
.name = "wkup_clkdm",
.pwrdm = { .name = "wkup_pwrdm" },
.dep_bit = OMAP_EN_WKUP_SHIFT,
+ .flags = CLKDM_ACTIVE_WITH_MPU,
};
diff --git a/arch/arm/mach-omap2/clockdomains33xx_data.c b/arch/arm/mach-omap2/clockdomains33xx_data.c
new file mode 100644
index 000000000000..32c90fd9eba2
--- /dev/null
+++ b/arch/arm/mach-omap2/clockdomains33xx_data.c
@@ -0,0 +1,196 @@
+/*
+ * AM33XX Clock Domain data.
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "clockdomain.h"
+#include "cm.h"
+#include "cm33xx.h"
+#include "cm-regbits-33xx.h"
+
+static struct clockdomain l4ls_am33xx_clkdm = {
+ .name = "l4ls_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3s_am33xx_clkdm = {
+ .name = "l3s_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4fw_am33xx_clkdm = {
+ .name = "l4fw_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3_am33xx_clkdm = {
+ .name = "l3_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4hs_am33xx_clkdm = {
+ .name = "l4hs_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain ocpwp_l3_am33xx_clkdm = {
+ .name = "ocpwp_l3_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain pruss_ocp_am33xx_clkdm = {
+ .name = "pruss_ocp_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain cpsw_125mhz_am33xx_clkdm = {
+ .name = "cpsw_125mhz_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain lcdc_am33xx_clkdm = {
+ .name = "lcdc_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain clk_24mhz_am33xx_clkdm = {
+ .name = "clk_24mhz_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .cm_inst = AM33XX_CM_PER_MOD,
+ .clkdm_offs = AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_wkup_am33xx_clkdm = {
+ .name = "l4_wkup_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .cm_inst = AM33XX_CM_WKUP_MOD,
+ .clkdm_offs = AM33XX_CM_WKUP_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l3_aon_am33xx_clkdm = {
+ .name = "l3_aon_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .cm_inst = AM33XX_CM_WKUP_MOD,
+ .clkdm_offs = AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_wkup_aon_am33xx_clkdm = {
+ .name = "l4_wkup_aon_clkdm",
+ .pwrdm = { .name = "wkup_pwrdm" },
+ .cm_inst = AM33XX_CM_WKUP_MOD,
+ .clkdm_offs = AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain mpu_am33xx_clkdm = {
+ .name = "mpu_clkdm",
+ .pwrdm = { .name = "mpu_pwrdm" },
+ .cm_inst = AM33XX_CM_MPU_MOD,
+ .clkdm_offs = AM33XX_CM_MPU_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_rtc_am33xx_clkdm = {
+ .name = "l4_rtc_clkdm",
+ .pwrdm = { .name = "rtc_pwrdm" },
+ .cm_inst = AM33XX_CM_RTC_MOD,
+ .clkdm_offs = AM33XX_CM_RTC_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain gfx_l3_am33xx_clkdm = {
+ .name = "gfx_l3_clkdm",
+ .pwrdm = { .name = "gfx_pwrdm" },
+ .cm_inst = AM33XX_CM_GFX_MOD,
+ .clkdm_offs = AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain gfx_l4ls_gfx_am33xx_clkdm = {
+ .name = "gfx_l4ls_gfx_clkdm",
+ .pwrdm = { .name = "gfx_pwrdm" },
+ .cm_inst = AM33XX_CM_GFX_MOD,
+ .clkdm_offs = AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain l4_cefuse_am33xx_clkdm = {
+ .name = "l4_cefuse_clkdm",
+ .pwrdm = { .name = "cefuse_pwrdm" },
+ .cm_inst = AM33XX_CM_CEFUSE_MOD,
+ .clkdm_offs = AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET,
+ .flags = CLKDM_CAN_SWSUP,
+};
+
+static struct clockdomain *clockdomains_am33xx[] __initdata = {
+ &l4ls_am33xx_clkdm,
+ &l3s_am33xx_clkdm,
+ &l4fw_am33xx_clkdm,
+ &l3_am33xx_clkdm,
+ &l4hs_am33xx_clkdm,
+ &ocpwp_l3_am33xx_clkdm,
+ &pruss_ocp_am33xx_clkdm,
+ &cpsw_125mhz_am33xx_clkdm,
+ &lcdc_am33xx_clkdm,
+ &clk_24mhz_am33xx_clkdm,
+ &l4_wkup_am33xx_clkdm,
+ &l3_aon_am33xx_clkdm,
+ &l4_wkup_aon_am33xx_clkdm,
+ &mpu_am33xx_clkdm,
+ &l4_rtc_am33xx_clkdm,
+ &gfx_l3_am33xx_clkdm,
+ &gfx_l4ls_gfx_am33xx_clkdm,
+ &l4_cefuse_am33xx_clkdm,
+ NULL,
+};
+
+void __init am33xx_clockdomains_init(void)
+{
+ clkdm_register_platform_funcs(&am33xx_clkdm_operations);
+ clkdm_register_clkdms(clockdomains_am33xx);
+ clkdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/clockdomains3xxx_data.c b/arch/arm/mach-omap2/clockdomains3xxx_data.c
index 6038adb97710..56089c49142a 100644
--- a/arch/arm/mach-omap2/clockdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/clockdomains3xxx_data.c
@@ -59,6 +59,12 @@ static struct clkdm_dep gfx_sgx_3xxx_wkdeps[] = {
{ NULL },
};
+static struct clkdm_dep gfx_sgx_am35x_wkdeps[] = {
+ { .clkdm_name = "mpu_clkdm" },
+ { .clkdm_name = "wkup_clkdm" },
+ { NULL },
+};
+
/* 3430: PM_WKDEP_PER: CORE, IVA2, MPU, WKUP */
static struct clkdm_dep per_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
@@ -69,6 +75,14 @@ static struct clkdm_dep per_wkdeps[] = {
{ NULL },
};
+static struct clkdm_dep per_am35x_wkdeps[] = {
+ { .clkdm_name = "core_l3_clkdm" },
+ { .clkdm_name = "core_l4_clkdm" },
+ { .clkdm_name = "mpu_clkdm" },
+ { .clkdm_name = "wkup_clkdm" },
+ { NULL },
+};
+
/* 3430ES2: PM_WKDEP_USBHOST: CORE, IVA2, MPU, WKUP */
static struct clkdm_dep usbhost_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
@@ -79,6 +93,14 @@ static struct clkdm_dep usbhost_wkdeps[] = {
{ NULL },
};
+static struct clkdm_dep usbhost_am35x_wkdeps[] = {
+ { .clkdm_name = "core_l3_clkdm" },
+ { .clkdm_name = "core_l4_clkdm" },
+ { .clkdm_name = "mpu_clkdm" },
+ { .clkdm_name = "wkup_clkdm" },
+ { NULL },
+};
+
/* 3430 PM_WKDEP_MPU: CORE, IVA2, DSS, PER */
static struct clkdm_dep mpu_3xxx_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
@@ -89,6 +111,14 @@ static struct clkdm_dep mpu_3xxx_wkdeps[] = {
{ NULL },
};
+static struct clkdm_dep mpu_am35x_wkdeps[] = {
+ { .clkdm_name = "core_l3_clkdm" },
+ { .clkdm_name = "core_l4_clkdm" },
+ { .clkdm_name = "dss_clkdm" },
+ { .clkdm_name = "per_clkdm" },
+ { NULL },
+};
+
/* 3430 PM_WKDEP_IVA2: CORE, MPU, WKUP, DSS, PER */
static struct clkdm_dep iva2_wkdeps[] = {
{ .clkdm_name = "core_l3_clkdm" },
@@ -116,6 +146,12 @@ static struct clkdm_dep dss_wkdeps[] = {
{ NULL },
};
+static struct clkdm_dep dss_am35x_wkdeps[] = {
+ { .clkdm_name = "mpu_clkdm" },
+ { .clkdm_name = "wkup_clkdm" },
+ { NULL },
+};
+
/* 3430: PM_WKDEP_NEON: MPU */
static struct clkdm_dep neon_wkdeps[] = {
{ .clkdm_name = "mpu_clkdm" },
@@ -131,6 +167,11 @@ static struct clkdm_dep dss_sleepdeps[] = {
{ NULL },
};
+static struct clkdm_dep dss_am35x_sleepdeps[] = {
+ { .clkdm_name = "mpu_clkdm" },
+ { NULL },
+};
+
/* 3430: CM_SLEEPDEP_PER: MPU, IVA */
static struct clkdm_dep per_sleepdeps[] = {
{ .clkdm_name = "mpu_clkdm" },
@@ -138,6 +179,11 @@ static struct clkdm_dep per_sleepdeps[] = {
{ NULL },
};
+static struct clkdm_dep per_am35x_sleepdeps[] = {
+ { .clkdm_name = "mpu_clkdm" },
+ { NULL },
+};
+
/* 3430ES2: CM_SLEEPDEP_USBHOST: MPU, IVA */
static struct clkdm_dep usbhost_sleepdeps[] = {
{ .clkdm_name = "mpu_clkdm" },
@@ -145,6 +191,11 @@ static struct clkdm_dep usbhost_sleepdeps[] = {
{ NULL },
};
+static struct clkdm_dep usbhost_am35x_sleepdeps[] = {
+ { .clkdm_name = "mpu_clkdm" },
+ { NULL },
+};
+
/* 3430: CM_SLEEPDEP_CAM: MPU */
static struct clkdm_dep cam_sleepdeps[] = {
{ .clkdm_name = "mpu_clkdm" },
@@ -175,6 +226,15 @@ static struct clockdomain mpu_3xxx_clkdm = {
.clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK,
};
+static struct clockdomain mpu_am35x_clkdm = {
+ .name = "mpu_clkdm",
+ .pwrdm = { .name = "mpu_pwrdm" },
+ .flags = CLKDM_CAN_HWSUP | CLKDM_CAN_FORCE_WAKEUP,
+ .dep_bit = OMAP3430_EN_MPU_SHIFT,
+ .wkdep_srcs = mpu_am35x_wkdeps,
+ .clktrctrl_mask = OMAP3430_CLKTRCTRL_MPU_MASK,
+};
+
static struct clockdomain neon_clkdm = {
.name = "neon_clkdm",
.pwrdm = { .name = "neon_pwrdm" },
@@ -210,6 +270,15 @@ static struct clockdomain sgx_clkdm = {
.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK,
};
+static struct clockdomain sgx_am35x_clkdm = {
+ .name = "sgx_clkdm",
+ .pwrdm = { .name = "sgx_pwrdm" },
+ .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .wkdep_srcs = gfx_sgx_am35x_wkdeps,
+ .sleepdep_srcs = gfx_sgx_sleepdeps,
+ .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_SGX_MASK,
+};
+
/*
* The die-to-die clockdomain was documented in the 34xx ES1 TRM, but
* then that information was removed from the 34xx ES2+ TRM. It is
@@ -261,6 +330,16 @@ static struct clockdomain dss_3xxx_clkdm = {
.clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK,
};
+static struct clockdomain dss_am35x_clkdm = {
+ .name = "dss_clkdm",
+ .pwrdm = { .name = "dss_pwrdm" },
+ .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .dep_bit = OMAP3430_PM_WKDEP_MPU_EN_DSS_SHIFT,
+ .wkdep_srcs = dss_am35x_wkdeps,
+ .sleepdep_srcs = dss_am35x_sleepdeps,
+ .clktrctrl_mask = OMAP3430_CLKTRCTRL_DSS_MASK,
+};
+
static struct clockdomain cam_clkdm = {
.name = "cam_clkdm",
.pwrdm = { .name = "cam_pwrdm" },
@@ -279,6 +358,15 @@ static struct clockdomain usbhost_clkdm = {
.clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK,
};
+static struct clockdomain usbhost_am35x_clkdm = {
+ .name = "usbhost_clkdm",
+ .pwrdm = { .name = "core_pwrdm" },
+ .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .wkdep_srcs = usbhost_am35x_wkdeps,
+ .sleepdep_srcs = usbhost_am35x_sleepdeps,
+ .clktrctrl_mask = OMAP3430ES2_CLKTRCTRL_USBHOST_MASK,
+};
+
static struct clockdomain per_clkdm = {
.name = "per_clkdm",
.pwrdm = { .name = "per_pwrdm" },
@@ -289,6 +377,16 @@ static struct clockdomain per_clkdm = {
.clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK,
};
+static struct clockdomain per_am35x_clkdm = {
+ .name = "per_clkdm",
+ .pwrdm = { .name = "per_pwrdm" },
+ .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .dep_bit = OMAP3430_EN_PER_SHIFT,
+ .wkdep_srcs = per_am35x_wkdeps,
+ .sleepdep_srcs = per_am35x_sleepdeps,
+ .clktrctrl_mask = OMAP3430_CLKTRCTRL_PER_MASK,
+};
+
/*
* Disable hw supervised mode for emu_clkdm, because emu_pwrdm is
* switched of even if sdti is in use
@@ -341,31 +439,42 @@ static struct clkdm_autodep clkdm_autodeps[] = {
}
};
+static struct clkdm_autodep clkdm_am35x_autodeps[] = {
+ {
+ .clkdm = { .name = "mpu_clkdm" },
+ },
+ {
+ .clkdm = { .name = NULL },
+ }
+};
+
/*
*
*/
-static struct clockdomain *clockdomains_omap3430_common[] __initdata = {
+static struct clockdomain *clockdomains_common[] __initdata = {
&wkup_common_clkdm,
- &cm_common_clkdm,
- &prm_common_clkdm,
- &mpu_3xxx_clkdm,
&neon_clkdm,
- &iva2_clkdm,
- &d2d_clkdm,
&core_l3_3xxx_clkdm,
&core_l4_3xxx_clkdm,
- &dss_3xxx_clkdm,
- &cam_clkdm,
- &per_clkdm,
&emu_clkdm,
&dpll1_clkdm,
- &dpll2_clkdm,
&dpll3_clkdm,
&dpll4_clkdm,
NULL
};
+static struct clockdomain *clockdomains_omap3430[] __initdata = {
+ &mpu_3xxx_clkdm,
+ &iva2_clkdm,
+ &d2d_clkdm,
+ &dss_3xxx_clkdm,
+ &cam_clkdm,
+ &per_clkdm,
+ &dpll2_clkdm,
+ NULL
+};
+
static struct clockdomain *clockdomains_omap3430es1[] __initdata = {
&gfx_3430es1_clkdm,
NULL,
@@ -378,21 +487,41 @@ static struct clockdomain *clockdomains_omap3430es2plus[] __initdata = {
NULL,
};
+static struct clockdomain *clockdomains_am35x[] __initdata = {
+ &mpu_am35x_clkdm,
+ &sgx_am35x_clkdm,
+ &dss_am35x_clkdm,
+ &per_am35x_clkdm,
+ &usbhost_am35x_clkdm,
+ &dpll5_clkdm,
+ NULL
+};
+
void __init omap3xxx_clockdomains_init(void)
{
struct clockdomain **sc;
+ unsigned int rev;
if (!cpu_is_omap34xx())
return;
clkdm_register_platform_funcs(&omap3_clkdm_operations);
- clkdm_register_clkdms(clockdomains_omap3430_common);
+ clkdm_register_clkdms(clockdomains_common);
- sc = (omap_rev() == OMAP3430_REV_ES1_0) ? clockdomains_omap3430es1 :
- clockdomains_omap3430es2plus;
+ rev = omap_rev();
- clkdm_register_clkdms(sc);
+ if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
+ clkdm_register_clkdms(clockdomains_am35x);
+ clkdm_register_autodeps(clkdm_am35x_autodeps);
+ } else {
+ clkdm_register_clkdms(clockdomains_omap3430);
+
+ sc = (rev == OMAP3430_REV_ES1_0) ?
+ clockdomains_omap3430es1 : clockdomains_omap3430es2plus;
+
+ clkdm_register_clkdms(sc);
+ clkdm_register_autodeps(clkdm_autodeps);
+ }
- clkdm_register_autodeps(clkdm_autodeps);
clkdm_complete_init();
}
diff --git a/arch/arm/mach-omap2/clockdomains44xx_data.c b/arch/arm/mach-omap2/clockdomains44xx_data.c
index c53425847493..63d60a773d3b 100644
--- a/arch/arm/mach-omap2/clockdomains44xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains44xx_data.c
@@ -381,7 +381,7 @@ static struct clockdomain l4_wkup_44xx_clkdm = {
.cm_inst = OMAP4430_PRM_WKUP_CM_INST,
.clkdm_offs = OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS,
.dep_bit = OMAP4430_L4WKUP_STATDEP_SHIFT,
- .flags = CLKDM_CAN_HWSUP,
+ .flags = CLKDM_CAN_HWSUP | CLKDM_ACTIVE_WITH_MPU,
};
static struct clockdomain emu_sys_44xx_clkdm = {
@@ -430,8 +430,6 @@ static struct clockdomain *clockdomains_omap44xx[] __initdata = {
&l4_wkup_44xx_clkdm,
&emu_sys_44xx_clkdm,
&l3_dma_44xx_clkdm,
- &prm_common_clkdm,
- &cm_common_clkdm,
NULL
};
diff --git a/arch/arm/mach-omap2/clockdomains_common_data.c b/arch/arm/mach-omap2/clockdomains_common_data.c
deleted file mode 100644
index 615b1f04967d..000000000000
--- a/arch/arm/mach-omap2/clockdomains_common_data.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * OMAP2+-common clockdomain data
- *
- * Copyright (C) 2008-2012 Texas Instruments, Inc.
- * Copyright (C) 2008-2010 Nokia Corporation
- *
- * Paul Walmsley, Jouni Högander
- */
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-#include "clockdomain.h"
-
-/* These are implicit clockdomains - they are never defined as such in TRM */
-struct clockdomain prm_common_clkdm = {
- .name = "prm_clkdm",
- .pwrdm = { .name = "wkup_pwrdm" },
-};
-
-struct clockdomain cm_common_clkdm = {
- .name = "cm_clkdm",
- .pwrdm = { .name = "core_pwrdm" },
-};
diff --git a/arch/arm/mach-omap2/cm-regbits-33xx.h b/arch/arm/mach-omap2/cm-regbits-33xx.h
new file mode 100644
index 000000000000..532027ee3d8d
--- /dev/null
+++ b/arch/arm/mach-omap2/cm-regbits-33xx.h
@@ -0,0 +1,687 @@
+/*
+ * AM33XX Power Management register bits
+ *
+ * This file is automatically generated from the AM33XX hardware databases.
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM_REGBITS_33XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM_REGBITS_33XX_H
+
+/*
+ * Used by CM_AUTOIDLE_DPLL_CORE, CM_AUTOIDLE_DPLL_DDR, CM_AUTOIDLE_DPLL_DISP,
+ * CM_AUTOIDLE_DPLL_MPU, CM_AUTOIDLE_DPLL_PER
+ */
+#define AM33XX_AUTO_DPLL_MODE_SHIFT 0
+#define AM33XX_AUTO_DPLL_MODE_MASK (0x7 << 0)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_ADC_FCLK_SHIFT 14
+#define AM33XX_CLKACTIVITY_ADC_FCLK_MASK (1 << 16)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CAN_CLK_SHIFT 11
+#define AM33XX_CLKACTIVITY_CAN_CLK_MASK (1 << 11)
+
+/* Used by CM_PER_CLK_24MHZ_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CLK_24MHZ_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_CLK_24MHZ_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_CPSW_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CPSW_125MHZ_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_CPSW_125MHZ_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_L4HS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CPSW_250MHZ_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_CPSW_250MHZ_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_L4HS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CPSW_50MHZ_GCLK_SHIFT 5
+#define AM33XX_CLKACTIVITY_CPSW_50MHZ_GCLK_MASK (1 << 5)
+
+/* Used by CM_PER_L4HS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CPSW_5MHZ_GCLK_SHIFT 6
+#define AM33XX_CLKACTIVITY_CPSW_5MHZ_GCLK_MASK (1 << 6)
+
+/* Used by CM_PER_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CPTS_RFT_GCLK_SHIFT 6
+#define AM33XX_CLKACTIVITY_CPTS_RFT_GCLK_MASK (1 << 6)
+
+/* Used by CM_CEFUSE_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_CUST_EFUSE_SYS_CLK_SHIFT 9
+#define AM33XX_CLKACTIVITY_CUST_EFUSE_SYS_CLK_MASK (1 << 9)
+
+/* Used by CM_L3_AON_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_DBGSYSCLK_SHIFT 2
+#define AM33XX_CLKACTIVITY_DBGSYSCLK_MASK (1 << 2)
+
+/* Used by CM_L3_AON_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_DEBUG_CLKA_SHIFT 4
+#define AM33XX_CLKACTIVITY_DEBUG_CLKA_MASK (1 << 4)
+
+/* Used by CM_PER_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_EMIF_GCLK_SHIFT 2
+#define AM33XX_CLKACTIVITY_EMIF_GCLK_MASK (1 << 2)
+
+/* Used by CM_GFX_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GFX_FCLK_SHIFT 9
+#define AM33XX_CLKACTIVITY_GFX_FCLK_MASK (1 << 9)
+
+/* Used by CM_GFX_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GFX_L3_GCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_GFX_L3_GCLK_MASK (1 << 8)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO0_GDBCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_GPIO0_GDBCLK_MASK (1 << 8)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_1_GDBCLK_SHIFT 19
+#define AM33XX_CLKACTIVITY_GPIO_1_GDBCLK_MASK (1 << 19)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_2_GDBCLK_SHIFT 20
+#define AM33XX_CLKACTIVITY_GPIO_2_GDBCLK_MASK (1 << 20)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_3_GDBCLK_SHIFT 21
+#define AM33XX_CLKACTIVITY_GPIO_3_GDBCLK_MASK (1 << 21)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_4_GDBCLK_SHIFT 22
+#define AM33XX_CLKACTIVITY_GPIO_4_GDBCLK_MASK (1 << 22)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_5_GDBCLK_SHIFT 26
+#define AM33XX_CLKACTIVITY_GPIO_5_GDBCLK_MASK (1 << 26)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_GPIO_6_GDBCLK_SHIFT 18
+#define AM33XX_CLKACTIVITY_GPIO_6_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_I2C0_GFCLK_SHIFT 11
+#define AM33XX_CLKACTIVITY_I2C0_GFCLK_MASK (1 << 11)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_I2C_FCLK_SHIFT 24
+#define AM33XX_CLKACTIVITY_I2C_FCLK_MASK (1 << 24)
+
+/* Used by CM_PER_PRUSS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_PRUSS_IEP_GCLK_SHIFT 5
+#define AM33XX_CLKACTIVITY_PRUSS_IEP_GCLK_MASK (1 << 5)
+
+/* Used by CM_PER_PRUSS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_PRUSS_OCP_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_PRUSS_OCP_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_PRUSS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_PRUSS_UART_GCLK_SHIFT 6
+#define AM33XX_CLKACTIVITY_PRUSS_UART_GCLK_MASK (1 << 6)
+
+/* Used by CM_PER_L3S_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L3S_GCLK_SHIFT 3
+#define AM33XX_CLKACTIVITY_L3S_GCLK_MASK (1 << 3)
+
+/* Used by CM_L3_AON_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L3_AON_GCLK_SHIFT 3
+#define AM33XX_CLKACTIVITY_L3_AON_GCLK_MASK (1 << 3)
+
+/* Used by CM_PER_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L3_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_L3_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_L4FW_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4FW_GCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_L4FW_GCLK_MASK (1 << 8)
+
+/* Used by CM_PER_L4HS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4HS_GCLK_SHIFT 3
+#define AM33XX_CLKACTIVITY_L4HS_GCLK_MASK (1 << 3)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4LS_GCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_L4LS_GCLK_MASK (1 << 8)
+
+/* Used by CM_GFX_L4LS_GFX_CLKSTCTRL__1 */
+#define AM33XX_CLKACTIVITY_L4LS_GFX_GCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_L4LS_GFX_GCLK_MASK (1 << 8)
+
+/* Used by CM_CEFUSE_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4_CEFUSE_GICLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_L4_CEFUSE_GICLK_MASK (1 << 8)
+
+/* Used by CM_RTC_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4_RTC_GCLK_SHIFT 8
+#define AM33XX_CLKACTIVITY_L4_RTC_GCLK_MASK (1 << 8)
+
+/* Used by CM_L4_WKUP_AON_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4_WKUP_AON_GCLK_SHIFT 2
+#define AM33XX_CLKACTIVITY_L4_WKUP_AON_GCLK_MASK (1 << 2)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_L4_WKUP_GCLK_SHIFT 2
+#define AM33XX_CLKACTIVITY_L4_WKUP_GCLK_MASK (1 << 2)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_LCDC_GCLK_SHIFT 17
+#define AM33XX_CLKACTIVITY_LCDC_GCLK_MASK (1 << 17)
+
+/* Used by CM_PER_LCDC_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_LCDC_L3_OCP_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_LCDC_L3_OCP_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_LCDC_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_LCDC_L4_OCP_GCLK_SHIFT 5
+#define AM33XX_CLKACTIVITY_LCDC_L4_OCP_GCLK_MASK (1 << 5)
+
+/* Used by CM_PER_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_MCASP_GCLK_SHIFT 7
+#define AM33XX_CLKACTIVITY_MCASP_GCLK_MASK (1 << 7)
+
+/* Used by CM_PER_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_MMC_FCLK_SHIFT 3
+#define AM33XX_CLKACTIVITY_MMC_FCLK_MASK (1 << 3)
+
+/* Used by CM_MPU_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_MPU_CLK_SHIFT 2
+#define AM33XX_CLKACTIVITY_MPU_CLK_MASK (1 << 2)
+
+/* Used by CM_PER_OCPWP_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_OCPWP_L3_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_OCPWP_L3_GCLK_MASK (1 << 4)
+
+/* Used by CM_PER_OCPWP_L3_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_OCPWP_L4_GCLK_SHIFT 5
+#define AM33XX_CLKACTIVITY_OCPWP_L4_GCLK_MASK (1 << 5)
+
+/* Used by CM_RTC_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_RTC_32KCLK_SHIFT 9
+#define AM33XX_CLKACTIVITY_RTC_32KCLK_MASK (1 << 9)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_SPI_GCLK_SHIFT 25
+#define AM33XX_CLKACTIVITY_SPI_GCLK_MASK (1 << 25)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_SR_SYSCLK_SHIFT 3
+#define AM33XX_CLKACTIVITY_SR_SYSCLK_MASK (1 << 3)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER0_GCLK_SHIFT 10
+#define AM33XX_CLKACTIVITY_TIMER0_GCLK_MASK (1 << 10)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER1_GCLK_SHIFT 13
+#define AM33XX_CLKACTIVITY_TIMER1_GCLK_MASK (1 << 13)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER2_GCLK_SHIFT 14
+#define AM33XX_CLKACTIVITY_TIMER2_GCLK_MASK (1 << 14)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER3_GCLK_SHIFT 15
+#define AM33XX_CLKACTIVITY_TIMER3_GCLK_MASK (1 << 15)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER4_GCLK_SHIFT 16
+#define AM33XX_CLKACTIVITY_TIMER4_GCLK_MASK (1 << 16)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER5_GCLK_SHIFT 27
+#define AM33XX_CLKACTIVITY_TIMER5_GCLK_MASK (1 << 27)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER6_GCLK_SHIFT 28
+#define AM33XX_CLKACTIVITY_TIMER6_GCLK_MASK (1 << 28)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_TIMER7_GCLK_SHIFT 13
+#define AM33XX_CLKACTIVITY_TIMER7_GCLK_MASK (1 << 13)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_UART0_GFCLK_SHIFT 12
+#define AM33XX_CLKACTIVITY_UART0_GFCLK_MASK (1 << 12)
+
+/* Used by CM_PER_L4LS_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_UART_GFCLK_SHIFT 10
+#define AM33XX_CLKACTIVITY_UART_GFCLK_MASK (1 << 10)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_WDT0_GCLK_SHIFT 9
+#define AM33XX_CLKACTIVITY_WDT0_GCLK_MASK (1 << 9)
+
+/* Used by CM_WKUP_CLKSTCTRL */
+#define AM33XX_CLKACTIVITY_WDT1_GCLK_SHIFT 4
+#define AM33XX_CLKACTIVITY_WDT1_GCLK_MASK (1 << 4)
+
+/* Used by CLKSEL_GFX_FCLK */
+#define AM33XX_CLKDIV_SEL_GFX_FCLK_SHIFT 0
+#define AM33XX_CLKDIV_SEL_GFX_FCLK_MASK (1 << 0)
+
+/* Used by CM_CLKOUT_CTRL */
+#define AM33XX_CLKOUT2DIV_SHIFT 3
+#define AM33XX_CLKOUT2DIV_MASK (0x05 << 3)
+
+/* Used by CM_CLKOUT_CTRL */
+#define AM33XX_CLKOUT2EN_SHIFT 7
+#define AM33XX_CLKOUT2EN_MASK (1 << 7)
+
+/* Used by CM_CLKOUT_CTRL */
+#define AM33XX_CLKOUT2SOURCE_SHIFT 0
+#define AM33XX_CLKOUT2SOURCE_MASK (0x02 << 0)
+
+/*
+ * Used by CLKSEL_GPIO0_DBCLK, CLKSEL_LCDC_PIXEL_CLK, CLKSEL_TIMER2_CLK,
+ * CLKSEL_TIMER3_CLK, CLKSEL_TIMER4_CLK, CLKSEL_TIMER5_CLK, CLKSEL_TIMER6_CLK,
+ * CLKSEL_TIMER7_CLK
+ */
+#define AM33XX_CLKSEL_SHIFT 0
+#define AM33XX_CLKSEL_MASK (0x01 << 0)
+
+/*
+ * Renamed from CLKSEL Used by CLKSEL_PRUSS_OCP_CLK, CLKSEL_WDT1_CLK,
+ * CM_CPTS_RFT_CLKSEL
+ */
+#define AM33XX_CLKSEL_0_0_SHIFT 0
+#define AM33XX_CLKSEL_0_0_MASK (1 << 0)
+
+#define AM33XX_CLKSEL_0_1_SHIFT 0
+#define AM33XX_CLKSEL_0_1_MASK (3 << 0)
+
+/* Renamed from CLKSEL Used by CLKSEL_TIMER1MS_CLK */
+#define AM33XX_CLKSEL_0_2_SHIFT 0
+#define AM33XX_CLKSEL_0_2_MASK (7 << 0)
+
+/* Used by CLKSEL_GFX_FCLK */
+#define AM33XX_CLKSEL_GFX_FCLK_SHIFT 1
+#define AM33XX_CLKSEL_GFX_FCLK_MASK (1 << 1)
+
+/*
+ * Used by CM_MPU_CLKSTCTRL, CM_RTC_CLKSTCTRL, CM_PER_CLK_24MHZ_CLKSTCTRL,
+ * CM_PER_CPSW_CLKSTCTRL, CM_PER_PRUSS_CLKSTCTRL, CM_PER_L3S_CLKSTCTRL,
+ * CM_PER_L3_CLKSTCTRL, CM_PER_L4FW_CLKSTCTRL, CM_PER_L4HS_CLKSTCTRL,
+ * CM_PER_L4LS_CLKSTCTRL, CM_PER_LCDC_CLKSTCTRL, CM_PER_OCPWP_L3_CLKSTCTRL,
+ * CM_L3_AON_CLKSTCTRL, CM_L4_WKUP_AON_CLKSTCTRL, CM_WKUP_CLKSTCTRL,
+ * CM_GFX_L3_CLKSTCTRL, CM_GFX_L4LS_GFX_CLKSTCTRL__1, CM_CEFUSE_CLKSTCTRL
+ */
+#define AM33XX_CLKTRCTRL_SHIFT 0
+#define AM33XX_CLKTRCTRL_MASK (0x3 << 0)
+
+/*
+ * Used by CM_SSC_DELTAMSTEP_DPLL_CORE, CM_SSC_DELTAMSTEP_DPLL_DDR,
+ * CM_SSC_DELTAMSTEP_DPLL_DISP, CM_SSC_DELTAMSTEP_DPLL_MPU,
+ * CM_SSC_DELTAMSTEP_DPLL_PER
+ */
+#define AM33XX_DELTAMSTEP_SHIFT 0
+#define AM33XX_DELTAMSTEP_MASK (0x19 << 0)
+
+/* Used by CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP, CM_CLKSEL_DPLL_MPU */
+#define AM33XX_DPLL_BYP_CLKSEL_SHIFT 23
+#define AM33XX_DPLL_BYP_CLKSEL_MASK (1 << 23)
+
+/* Used by CM_CLKDCOLDO_DPLL_PER */
+#define AM33XX_DPLL_CLKDCOLDO_GATE_CTRL_SHIFT 8
+#define AM33XX_DPLL_CLKDCOLDO_GATE_CTRL_MASK (1 << 8)
+
+/* Used by CM_CLKDCOLDO_DPLL_PER */
+#define AM33XX_DPLL_CLKDCOLDO_PWDN_SHIFT 12
+#define AM33XX_DPLL_CLKDCOLDO_PWDN_MASK (1 << 12)
+
+/* Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU */
+#define AM33XX_DPLL_CLKOUT_DIV_SHIFT 0
+#define AM33XX_DPLL_CLKOUT_DIV_MASK (0x1f << 0)
+
+/* Renamed from DPLL_CLKOUT_DIV Used by CM_DIV_M2_DPLL_PER */
+#define AM33XX_DPLL_CLKOUT_DIV_0_6_SHIFT 0
+#define AM33XX_DPLL_CLKOUT_DIV_0_6_MASK (0x06 << 0)
+
+/* Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU */
+#define AM33XX_DPLL_CLKOUT_DIVCHACK_SHIFT 5
+#define AM33XX_DPLL_CLKOUT_DIVCHACK_MASK (1 << 5)
+
+/* Renamed from DPLL_CLKOUT_DIVCHACK Used by CM_DIV_M2_DPLL_PER */
+#define AM33XX_DPLL_CLKOUT_DIVCHACK_M2_PER_SHIFT 7
+#define AM33XX_DPLL_CLKOUT_DIVCHACK_M2_PER_MASK (1 << 7)
+
+/*
+ * Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER
+ */
+#define AM33XX_DPLL_CLKOUT_GATE_CTRL_SHIFT 8
+#define AM33XX_DPLL_CLKOUT_GATE_CTRL_MASK (1 << 8)
+
+/*
+ * Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP,
+ * CM_CLKSEL_DPLL_MPU
+ */
+#define AM33XX_DPLL_DIV_SHIFT 0
+#define AM33XX_DPLL_DIV_MASK (0x7f << 0)
+
+#define AM33XX_DPLL_PER_DIV_MASK (0xff << 0)
+
+/* Renamed from DPLL_DIV Used by CM_CLKSEL_DPLL_PERIPH */
+#define AM33XX_DPLL_DIV_0_7_SHIFT 0
+#define AM33XX_DPLL_DIV_0_7_MASK (0x07 << 0)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU
+ */
+#define AM33XX_DPLL_DRIFTGUARD_EN_SHIFT 8
+#define AM33XX_DPLL_DRIFTGUARD_EN_MASK (1 << 8)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ */
+#define AM33XX_DPLL_EN_SHIFT 0
+#define AM33XX_DPLL_EN_MASK (0x7 << 0)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU
+ */
+#define AM33XX_DPLL_LPMODE_EN_SHIFT 10
+#define AM33XX_DPLL_LPMODE_EN_MASK (1 << 10)
+
+/*
+ * Used by CM_CLKSEL_DPLL_CORE, CM_CLKSEL_DPLL_DDR, CM_CLKSEL_DPLL_DISP,
+ * CM_CLKSEL_DPLL_MPU
+ */
+#define AM33XX_DPLL_MULT_SHIFT 8
+#define AM33XX_DPLL_MULT_MASK (0x7ff << 8)
+
+/* Renamed from DPLL_MULT Used by CM_CLKSEL_DPLL_PERIPH */
+#define AM33XX_DPLL_MULT_PERIPH_SHIFT 8
+#define AM33XX_DPLL_MULT_PERIPH_MASK (0xfff << 8)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU
+ */
+#define AM33XX_DPLL_REGM4XEN_SHIFT 11
+#define AM33XX_DPLL_REGM4XEN_MASK (1 << 11)
+
+/* Used by CM_CLKSEL_DPLL_PERIPH */
+#define AM33XX_DPLL_SD_DIV_SHIFT 24
+#define AM33XX_DPLL_SD_DIV_MASK (24, 31)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ */
+#define AM33XX_DPLL_SSC_ACK_SHIFT 13
+#define AM33XX_DPLL_SSC_ACK_MASK (1 << 13)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ */
+#define AM33XX_DPLL_SSC_DOWNSPREAD_SHIFT 14
+#define AM33XX_DPLL_SSC_DOWNSPREAD_MASK (1 << 14)
+
+/*
+ * Used by CM_CLKMODE_DPLL_CORE, CM_CLKMODE_DPLL_DDR, CM_CLKMODE_DPLL_DISP,
+ * CM_CLKMODE_DPLL_MPU, CM_CLKMODE_DPLL_PER
+ */
+#define AM33XX_DPLL_SSC_EN_SHIFT 12
+#define AM33XX_DPLL_SSC_EN_MASK (1 << 12)
+
+/* Used by CM_DIV_M4_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT1_DIV_SHIFT 0
+#define AM33XX_HSDIVIDER_CLKOUT1_DIV_MASK (0x1f << 0)
+
+/* Used by CM_DIV_M4_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT1_DIVCHACK_SHIFT 5
+#define AM33XX_HSDIVIDER_CLKOUT1_DIVCHACK_MASK (1 << 5)
+
+/* Used by CM_DIV_M4_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT1_GATE_CTRL_SHIFT 8
+#define AM33XX_HSDIVIDER_CLKOUT1_GATE_CTRL_MASK (1 << 8)
+
+/* Used by CM_DIV_M4_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT1_PWDN_SHIFT 12
+#define AM33XX_HSDIVIDER_CLKOUT1_PWDN_MASK (1 << 12)
+
+/* Used by CM_DIV_M5_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT2_DIV_SHIFT 0
+#define AM33XX_HSDIVIDER_CLKOUT2_DIV_MASK (0x1f << 0)
+
+/* Used by CM_DIV_M5_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT2_DIVCHACK_SHIFT 5
+#define AM33XX_HSDIVIDER_CLKOUT2_DIVCHACK_MASK (1 << 5)
+
+/* Used by CM_DIV_M5_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT2_GATE_CTRL_SHIFT 8
+#define AM33XX_HSDIVIDER_CLKOUT2_GATE_CTRL_MASK (1 << 8)
+
+/* Used by CM_DIV_M5_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT2_PWDN_SHIFT 12
+#define AM33XX_HSDIVIDER_CLKOUT2_PWDN_MASK (1 << 12)
+
+/* Used by CM_DIV_M6_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT3_DIV_SHIFT 0
+#define AM33XX_HSDIVIDER_CLKOUT3_DIV_MASK (0x04 << 0)
+
+/* Used by CM_DIV_M6_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT3_DIVCHACK_SHIFT 5
+#define AM33XX_HSDIVIDER_CLKOUT3_DIVCHACK_MASK (1 << 5)
+
+/* Used by CM_DIV_M6_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT3_GATE_CTRL_SHIFT 8
+#define AM33XX_HSDIVIDER_CLKOUT3_GATE_CTRL_MASK (1 << 8)
+
+/* Used by CM_DIV_M6_DPLL_CORE */
+#define AM33XX_HSDIVIDER_CLKOUT3_PWDN_SHIFT 12
+#define AM33XX_HSDIVIDER_CLKOUT3_PWDN_MASK (1 << 12)
+
+/*
+ * Used by CM_MPU_MPU_CLKCTRL, CM_RTC_RTC_CLKCTRL, CM_PER_AES0_CLKCTRL,
+ * CM_PER_AES1_CLKCTRL, CM_PER_CLKDIV32K_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL,
+ * CM_PER_DCAN0_CLKCTRL, CM_PER_DCAN1_CLKCTRL, CM_PER_DES_CLKCTRL,
+ * CM_PER_ELM_CLKCTRL, CM_PER_EMIF_CLKCTRL, CM_PER_EMIF_FW_CLKCTRL,
+ * CM_PER_EPWMSS0_CLKCTRL, CM_PER_EPWMSS1_CLKCTRL, CM_PER_EPWMSS2_CLKCTRL,
+ * CM_PER_GPIO1_CLKCTRL, CM_PER_GPIO2_CLKCTRL, CM_PER_GPIO3_CLKCTRL,
+ * CM_PER_GPIO4_CLKCTRL, CM_PER_GPIO5_CLKCTRL, CM_PER_GPIO6_CLKCTRL,
+ * CM_PER_GPMC_CLKCTRL, CM_PER_I2C1_CLKCTRL, CM_PER_I2C2_CLKCTRL,
+ * CM_PER_PRUSS_CLKCTRL, CM_PER_IEEE5000_CLKCTRL, CM_PER_L3_CLKCTRL,
+ * CM_PER_L3_INSTR_CLKCTRL, CM_PER_L4FW_CLKCTRL, CM_PER_L4HS_CLKCTRL,
+ * CM_PER_L4LS_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MAILBOX0_CLKCTRL,
+ * CM_PER_MAILBOX1_CLKCTRL, CM_PER_MCASP0_CLKCTRL, CM_PER_MCASP1_CLKCTRL,
+ * CM_PER_MCASP2_CLKCTRL, CM_PER_MLB_CLKCTRL, CM_PER_MMC0_CLKCTRL,
+ * CM_PER_MMC1_CLKCTRL, CM_PER_MMC2_CLKCTRL, CM_PER_MSTR_EXPS_CLKCTRL,
+ * CM_PER_OCMCRAM_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
+ * CM_PER_PKA_CLKCTRL, CM_PER_RNG_CLKCTRL, CM_PER_SHA0_CLKCTRL,
+ * CM_PER_SLV_EXPS_CLKCTRL, CM_PER_SPARE0_CLKCTRL, CM_PER_SPARE1_CLKCTRL,
+ * CM_PER_SPARE_CLKCTRL, CM_PER_SPI0_CLKCTRL, CM_PER_SPI1_CLKCTRL,
+ * CM_PER_SPI2_CLKCTRL, CM_PER_SPI3_CLKCTRL, CM_PER_SPINLOCK_CLKCTRL,
+ * CM_PER_TIMER2_CLKCTRL, CM_PER_TIMER3_CLKCTRL, CM_PER_TIMER4_CLKCTRL,
+ * CM_PER_TIMER5_CLKCTRL, CM_PER_TIMER6_CLKCTRL, CM_PER_TIMER7_CLKCTRL,
+ * CM_PER_TPCC_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
+ * CM_PER_TPTC2_CLKCTRL, CM_PER_UART1_CLKCTRL, CM_PER_UART2_CLKCTRL,
+ * CM_PER_UART3_CLKCTRL, CM_PER_UART4_CLKCTRL, CM_PER_UART5_CLKCTRL,
+ * CM_PER_USB0_CLKCTRL, CM_WKUP_ADC_TSC_CLKCTRL, CM_WKUP_CONTROL_CLKCTRL,
+ * CM_WKUP_DEBUGSS_CLKCTRL, CM_WKUP_GPIO0_CLKCTRL, CM_WKUP_I2C0_CLKCTRL,
+ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_SMARTREFLEX0_CLKCTRL,
+ * CM_WKUP_SMARTREFLEX1_CLKCTRL, CM_WKUP_TIMER0_CLKCTRL,
+ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_UART0_CLKCTRL, CM_WKUP_WDT0_CLKCTRL,
+ * CM_WKUP_WDT1_CLKCTRL, CM_GFX_BITBLT_CLKCTRL, CM_GFX_GFX_CLKCTRL,
+ * CM_GFX_MMUCFG_CLKCTRL, CM_GFX_MMUDATA_CLKCTRL, CM_CEFUSE_CEFUSE_CLKCTRL
+ */
+#define AM33XX_IDLEST_SHIFT 16
+#define AM33XX_IDLEST_MASK (0x3 << 16)
+#define AM33XX_IDLEST_VAL 0x3
+
+/* Used by CM_MAC_CLKSEL */
+#define AM33XX_MII_CLK_SEL_SHIFT 2
+#define AM33XX_MII_CLK_SEL_MASK (1 << 2)
+
+/*
+ * Used by CM_SSC_MODFREQDIV_DPLL_CORE, CM_SSC_MODFREQDIV_DPLL_DDR,
+ * CM_SSC_MODFREQDIV_DPLL_DISP, CM_SSC_MODFREQDIV_DPLL_MPU,
+ * CM_SSC_MODFREQDIV_DPLL_PER
+ */
+#define AM33XX_MODFREQDIV_EXPONENT_SHIFT 8
+#define AM33XX_MODFREQDIV_EXPONENT_MASK (0x10 << 8)
+
+/*
+ * Used by CM_SSC_MODFREQDIV_DPLL_CORE, CM_SSC_MODFREQDIV_DPLL_DDR,
+ * CM_SSC_MODFREQDIV_DPLL_DISP, CM_SSC_MODFREQDIV_DPLL_MPU,
+ * CM_SSC_MODFREQDIV_DPLL_PER
+ */
+#define AM33XX_MODFREQDIV_MANTISSA_SHIFT 0
+#define AM33XX_MODFREQDIV_MANTISSA_MASK (0x06 << 0)
+
+/*
+ * Used by CM_MPU_MPU_CLKCTRL, CM_RTC_RTC_CLKCTRL, CM_PER_AES0_CLKCTRL,
+ * CM_PER_AES1_CLKCTRL, CM_PER_CLKDIV32K_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL,
+ * CM_PER_DCAN0_CLKCTRL, CM_PER_DCAN1_CLKCTRL, CM_PER_DES_CLKCTRL,
+ * CM_PER_ELM_CLKCTRL, CM_PER_EMIF_CLKCTRL, CM_PER_EMIF_FW_CLKCTRL,
+ * CM_PER_EPWMSS0_CLKCTRL, CM_PER_EPWMSS1_CLKCTRL, CM_PER_EPWMSS2_CLKCTRL,
+ * CM_PER_GPIO1_CLKCTRL, CM_PER_GPIO2_CLKCTRL, CM_PER_GPIO3_CLKCTRL,
+ * CM_PER_GPIO4_CLKCTRL, CM_PER_GPIO5_CLKCTRL, CM_PER_GPIO6_CLKCTRL,
+ * CM_PER_GPMC_CLKCTRL, CM_PER_I2C1_CLKCTRL, CM_PER_I2C2_CLKCTRL,
+ * CM_PER_PRUSS_CLKCTRL, CM_PER_IEEE5000_CLKCTRL, CM_PER_L3_CLKCTRL,
+ * CM_PER_L3_INSTR_CLKCTRL, CM_PER_L4FW_CLKCTRL, CM_PER_L4HS_CLKCTRL,
+ * CM_PER_L4LS_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MAILBOX0_CLKCTRL,
+ * CM_PER_MAILBOX1_CLKCTRL, CM_PER_MCASP0_CLKCTRL, CM_PER_MCASP1_CLKCTRL,
+ * CM_PER_MCASP2_CLKCTRL, CM_PER_MLB_CLKCTRL, CM_PER_MMC0_CLKCTRL,
+ * CM_PER_MMC1_CLKCTRL, CM_PER_MMC2_CLKCTRL, CM_PER_MSTR_EXPS_CLKCTRL,
+ * CM_PER_OCMCRAM_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
+ * CM_PER_PKA_CLKCTRL, CM_PER_RNG_CLKCTRL, CM_PER_SHA0_CLKCTRL,
+ * CM_PER_SLV_EXPS_CLKCTRL, CM_PER_SPARE0_CLKCTRL, CM_PER_SPARE1_CLKCTRL,
+ * CM_PER_SPARE_CLKCTRL, CM_PER_SPI0_CLKCTRL, CM_PER_SPI1_CLKCTRL,
+ * CM_PER_SPI2_CLKCTRL, CM_PER_SPI3_CLKCTRL, CM_PER_SPINLOCK_CLKCTRL,
+ * CM_PER_TIMER2_CLKCTRL, CM_PER_TIMER3_CLKCTRL, CM_PER_TIMER4_CLKCTRL,
+ * CM_PER_TIMER5_CLKCTRL, CM_PER_TIMER6_CLKCTRL, CM_PER_TIMER7_CLKCTRL,
+ * CM_PER_TPCC_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
+ * CM_PER_TPTC2_CLKCTRL, CM_PER_UART1_CLKCTRL, CM_PER_UART2_CLKCTRL,
+ * CM_PER_UART3_CLKCTRL, CM_PER_UART4_CLKCTRL, CM_PER_UART5_CLKCTRL,
+ * CM_PER_USB0_CLKCTRL, CM_WKUP_ADC_TSC_CLKCTRL, CM_WKUP_CONTROL_CLKCTRL,
+ * CM_WKUP_DEBUGSS_CLKCTRL, CM_WKUP_GPIO0_CLKCTRL, CM_WKUP_I2C0_CLKCTRL,
+ * CM_WKUP_L4WKUP_CLKCTRL, CM_WKUP_SMARTREFLEX0_CLKCTRL,
+ * CM_WKUP_SMARTREFLEX1_CLKCTRL, CM_WKUP_TIMER0_CLKCTRL,
+ * CM_WKUP_TIMER1_CLKCTRL, CM_WKUP_UART0_CLKCTRL, CM_WKUP_WDT0_CLKCTRL,
+ * CM_WKUP_WDT1_CLKCTRL, CM_WKUP_WKUP_M3_CLKCTRL, CM_GFX_BITBLT_CLKCTRL,
+ * CM_GFX_GFX_CLKCTRL, CM_GFX_MMUCFG_CLKCTRL, CM_GFX_MMUDATA_CLKCTRL,
+ * CM_CEFUSE_CEFUSE_CLKCTRL
+ */
+#define AM33XX_MODULEMODE_SHIFT 0
+#define AM33XX_MODULEMODE_MASK (0x3 << 0)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_OPTCLK_DEBUG_CLKA_SHIFT 30
+#define AM33XX_OPTCLK_DEBUG_CLKA_MASK (1 << 30)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_OPTFCLKEN_DBGSYSCLK_SHIFT 19
+#define AM33XX_OPTFCLKEN_DBGSYSCLK_MASK (1 << 19)
+
+/* Used by CM_WKUP_GPIO0_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO0_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO0_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO1_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_1_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO2_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_2_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO3_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_3_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO4_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_4_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_4_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO5_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_5_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_5_GDBCLK_MASK (1 << 18)
+
+/* Used by CM_PER_GPIO6_CLKCTRL */
+#define AM33XX_OPTFCLKEN_GPIO_6_GDBCLK_SHIFT 18
+#define AM33XX_OPTFCLKEN_GPIO_6_GDBCLK_MASK (1 << 18)
+
+/*
+ * Used by CM_MPU_MPU_CLKCTRL, CM_PER_CPGMAC0_CLKCTRL, CM_PER_PRUSS_CLKCTRL,
+ * CM_PER_IEEE5000_CLKCTRL, CM_PER_LCDC_CLKCTRL, CM_PER_MLB_CLKCTRL,
+ * CM_PER_MSTR_EXPS_CLKCTRL, CM_PER_OCPWP_CLKCTRL, CM_PER_PCIE_CLKCTRL,
+ * CM_PER_SPARE_CLKCTRL, CM_PER_TPTC0_CLKCTRL, CM_PER_TPTC1_CLKCTRL,
+ * CM_PER_TPTC2_CLKCTRL, CM_PER_USB0_CLKCTRL, CM_WKUP_DEBUGSS_CLKCTRL,
+ * CM_WKUP_WKUP_M3_CLKCTRL, CM_GFX_BITBLT_CLKCTRL, CM_GFX_GFX_CLKCTRL
+ */
+#define AM33XX_STBYST_SHIFT 18
+#define AM33XX_STBYST_MASK (1 << 18)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_STM_PMD_CLKDIVSEL_SHIFT 27
+#define AM33XX_STM_PMD_CLKDIVSEL_MASK (0x29 << 27)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_STM_PMD_CLKSEL_SHIFT 22
+#define AM33XX_STM_PMD_CLKSEL_MASK (0x23 << 22)
+
+/*
+ * Used by CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDR, CM_IDLEST_DPLL_DISP,
+ * CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER
+ */
+#define AM33XX_ST_DPLL_CLK_SHIFT 0
+#define AM33XX_ST_DPLL_CLK_MASK (1 << 0)
+
+/* Used by CM_CLKDCOLDO_DPLL_PER */
+#define AM33XX_ST_DPLL_CLKDCOLDO_SHIFT 8
+#define AM33XX_ST_DPLL_CLKDCOLDO_MASK (1 << 8)
+
+/*
+ * Used by CM_DIV_M2_DPLL_DDR, CM_DIV_M2_DPLL_DISP, CM_DIV_M2_DPLL_MPU,
+ * CM_DIV_M2_DPLL_PER
+ */
+#define AM33XX_ST_DPLL_CLKOUT_SHIFT 9
+#define AM33XX_ST_DPLL_CLKOUT_MASK (1 << 9)
+
+/* Used by CM_DIV_M4_DPLL_CORE */
+#define AM33XX_ST_HSDIVIDER_CLKOUT1_SHIFT 9
+#define AM33XX_ST_HSDIVIDER_CLKOUT1_MASK (1 << 9)
+
+/* Used by CM_DIV_M5_DPLL_CORE */
+#define AM33XX_ST_HSDIVIDER_CLKOUT2_SHIFT 9
+#define AM33XX_ST_HSDIVIDER_CLKOUT2_MASK (1 << 9)
+
+/* Used by CM_DIV_M6_DPLL_CORE */
+#define AM33XX_ST_HSDIVIDER_CLKOUT3_SHIFT 9
+#define AM33XX_ST_HSDIVIDER_CLKOUT3_MASK (1 << 9)
+
+/*
+ * Used by CM_IDLEST_DPLL_CORE, CM_IDLEST_DPLL_DDR, CM_IDLEST_DPLL_DISP,
+ * CM_IDLEST_DPLL_MPU, CM_IDLEST_DPLL_PER
+ */
+#define AM33XX_ST_MN_BYPASS_SHIFT 8
+#define AM33XX_ST_MN_BYPASS_MASK (1 << 8)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_TRC_PMD_CLKDIVSEL_SHIFT 24
+#define AM33XX_TRC_PMD_CLKDIVSEL_MASK (0x26 << 24)
+
+/* Used by CM_WKUP_DEBUGSS_CLKCTRL */
+#define AM33XX_TRC_PMD_CLKSEL_SHIFT 20
+#define AM33XX_TRC_PMD_CLKSEL_MASK (0x21 << 20)
+
+/* Used by CONTROL_SEC_CLK_CTRL */
+#define AM33XX_TIMER0_CLKSEL_MASK (0x3 << 4)
+#endif
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 8083a8cdc55f..766338fe4d34 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -169,8 +169,6 @@
/* AM35XX specific CM_ICLKEN1_CORE bits */
#define AM35XX_EN_IPSS_MASK (1 << 4)
#define AM35XX_EN_IPSS_SHIFT 4
-#define AM35XX_EN_UART4_MASK (1 << 23)
-#define AM35XX_EN_UART4_SHIFT 23
/* CM_ICLKEN2_CORE */
#define OMAP3430_EN_PKA_MASK (1 << 4)
@@ -207,6 +205,8 @@
#define OMAP3430_ST_DES2_MASK (1 << 26)
#define OMAP3430_ST_MSPRO_SHIFT 23
#define OMAP3430_ST_MSPRO_MASK (1 << 23)
+#define AM35XX_ST_UART4_SHIFT 23
+#define AM35XX_ST_UART4_MASK (1 << 23)
#define OMAP3430_ST_HDQ_SHIFT 22
#define OMAP3430_ST_HDQ_MASK (1 << 22)
#define OMAP3430ES1_ST_FAC_SHIFT 8
diff --git a/arch/arm/mach-omap2/cm33xx.c b/arch/arm/mach-omap2/cm33xx.c
new file mode 100644
index 000000000000..13f56eafef03
--- /dev/null
+++ b/arch/arm/mach-omap2/cm33xx.c
@@ -0,0 +1,313 @@
+/*
+ * AM33XX CM functions
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * Reference taken from from OMAP4 cminst44xx.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "cm.h"
+#include "cm33xx.h"
+#include "cm-regbits-34xx.h"
+#include "cm-regbits-33xx.h"
+#include "prm33xx.h"
+
+/*
+ * CLKCTRL_IDLEST_*: possible values for the CM_*_CLKCTRL.IDLEST bitfield:
+ *
+ * 0x0 func: Module is fully functional, including OCP
+ * 0x1 trans: Module is performing transition: wakeup, or sleep, or sleep
+ * abortion
+ * 0x2 idle: Module is in Idle mode (only OCP part). It is functional if
+ * using separate functional clock
+ * 0x3 disabled: Module is disabled and cannot be accessed
+ *
+ */
+#define CLKCTRL_IDLEST_FUNCTIONAL 0x0
+#define CLKCTRL_IDLEST_INTRANSITION 0x1
+#define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
+#define CLKCTRL_IDLEST_DISABLED 0x3
+
+/* Private functions */
+
+/* Read a register in a CM instance */
+static inline u32 am33xx_cm_read_reg(s16 inst, u16 idx)
+{
+ return __raw_readl(cm_base + inst + idx);
+}
+
+/* Write into a register in a CM */
+static inline void am33xx_cm_write_reg(u32 val, s16 inst, u16 idx)
+{
+ __raw_writel(val, cm_base + inst + idx);
+}
+
+/* Read-modify-write a register in CM */
+static inline u32 am33xx_cm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, idx);
+ v &= ~mask;
+ v |= bits;
+ am33xx_cm_write_reg(v, inst, idx);
+
+ return v;
+}
+
+static inline u32 am33xx_cm_set_reg_bits(u32 bits, s16 inst, s16 idx)
+{
+ return am33xx_cm_rmw_reg_bits(bits, bits, inst, idx);
+}
+
+static inline u32 am33xx_cm_clear_reg_bits(u32 bits, s16 inst, s16 idx)
+{
+ return am33xx_cm_rmw_reg_bits(bits, 0x0, inst, idx);
+}
+
+static inline u32 am33xx_cm_read_reg_bits(u16 inst, s16 idx, u32 mask)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, idx);
+ v &= mask;
+ v >>= __ffs(mask);
+
+ return v;
+}
+
+/**
+ * _clkctrl_idlest - read a CM_*_CLKCTRL register; mask & shift IDLEST bitfield
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * Return the IDLEST bitfield of a CM_*_CLKCTRL register, shifted down to
+ * bit 0.
+ */
+static u32 _clkctrl_idlest(u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ u32 v = am33xx_cm_read_reg(inst, clkctrl_offs);
+ v &= AM33XX_IDLEST_MASK;
+ v >>= AM33XX_IDLEST_SHIFT;
+ return v;
+}
+
+/**
+ * _is_module_ready - can module registers be accessed without causing an abort?
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * Returns true if the module's CM_*_CLKCTRL.IDLEST bitfield is either
+ * *FUNCTIONAL or *INTERFACE_IDLE; false otherwise.
+ */
+static bool _is_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ u32 v;
+
+ v = _clkctrl_idlest(inst, cdoffs, clkctrl_offs);
+
+ return (v == CLKCTRL_IDLEST_FUNCTIONAL ||
+ v == CLKCTRL_IDLEST_INTERFACE_IDLE) ? true : false;
+}
+
+/**
+ * _clktrctrl_write - write @c to a CM_CLKSTCTRL.CLKTRCTRL register bitfield
+ * @c: CLKTRCTRL register bitfield (LSB = bit 0, i.e., unshifted)
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * @c must be the unshifted value for CLKTRCTRL - i.e., this function
+ * will handle the shift itself.
+ */
+static void _clktrctrl_write(u8 c, s16 inst, u16 cdoffs)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, cdoffs);
+ v &= ~AM33XX_CLKTRCTRL_MASK;
+ v |= c << AM33XX_CLKTRCTRL_SHIFT;
+ am33xx_cm_write_reg(v, inst, cdoffs);
+}
+
+/* Public functions */
+
+/**
+ * am33xx_cm_is_clkdm_in_hwsup - is a clockdomain in hwsup idle mode?
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Returns true if the clockdomain referred to by (@inst, @cdoffs)
+ * is in hardware-supervised idle mode, or 0 otherwise.
+ */
+bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, cdoffs);
+ v &= AM33XX_CLKTRCTRL_MASK;
+ v >>= AM33XX_CLKTRCTRL_SHIFT;
+
+ return (v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ? true : false;
+}
+
+/**
+ * am33xx_cm_clkdm_enable_hwsup - put a clockdomain in hwsup-idle mode
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@inst, @cdoffs) into
+ * hardware-supervised idle mode. No return value.
+ */
+void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs)
+{
+ _clktrctrl_write(OMAP34XX_CLKSTCTRL_ENABLE_AUTO, inst, cdoffs);
+}
+
+/**
+ * am33xx_cm_clkdm_disable_hwsup - put a clockdomain in swsup-idle mode
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@inst, @cdoffs) into
+ * software-supervised idle mode, i.e., controlled manually by the
+ * Linux OMAP clockdomain code. No return value.
+ */
+void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs)
+{
+ _clktrctrl_write(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, inst, cdoffs);
+}
+
+/**
+ * am33xx_cm_clkdm_force_sleep - try to put a clockdomain into idle
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Put a clockdomain referred to by (@inst, @cdoffs) into idle
+ * No return value.
+ */
+void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs)
+{
+ _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, inst, cdoffs);
+}
+
+/**
+ * am33xx_cm_clkdm_force_wakeup - try to take a clockdomain out of idle
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ *
+ * Take a clockdomain referred to by (@inst, @cdoffs) out of idle,
+ * waking it up. No return value.
+ */
+void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 cdoffs)
+{
+ _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_WAKEUP, inst, cdoffs);
+}
+
+/*
+ *
+ */
+
+/**
+ * am33xx_cm_wait_module_ready - wait for a module to be in 'func' state
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * Wait for the module IDLEST to be functional. If the idle state is in any
+ * the non functional state (trans, idle or disabled), module and thus the
+ * sysconfig cannot be accessed and will probably lead to an "imprecise
+ * external abort"
+ */
+int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ int i = 0;
+
+ if (!clkctrl_offs)
+ return 0;
+
+ omap_test_timeout(_is_module_ready(inst, cdoffs, clkctrl_offs),
+ MAX_MODULE_READY_TIME, i);
+
+ return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+}
+
+/**
+ * am33xx_cm_wait_module_idle - wait for a module to be in 'disabled'
+ * state
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * Wait for the module IDLEST to be disabled. Some PRCM transition,
+ * like reset assertion or parent clock de-activation must wait the
+ * module to be fully disabled.
+ */
+int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ int i = 0;
+
+ if (!clkctrl_offs)
+ return 0;
+
+ omap_test_timeout((_clkctrl_idlest(inst, cdoffs, clkctrl_offs) ==
+ CLKCTRL_IDLEST_DISABLED),
+ MAX_MODULE_READY_TIME, i);
+
+ return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
+}
+
+/**
+ * am33xx_cm_module_enable - Enable the modulemode inside CLKCTRL
+ * @mode: Module mode (SW or HW)
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * No return value.
+ */
+void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, clkctrl_offs);
+ v &= ~AM33XX_MODULEMODE_MASK;
+ v |= mode << AM33XX_MODULEMODE_SHIFT;
+ am33xx_cm_write_reg(v, inst, clkctrl_offs);
+}
+
+/**
+ * am33xx_cm_module_disable - Disable the module inside CLKCTRL
+ * @inst: CM instance register offset (*_INST macro)
+ * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
+ * @clkctrl_offs: Module clock control register offset (*_CLKCTRL macro)
+ *
+ * No return value.
+ */
+void am33xx_cm_module_disable(u16 inst, s16 cdoffs, u16 clkctrl_offs)
+{
+ u32 v;
+
+ v = am33xx_cm_read_reg(inst, clkctrl_offs);
+ v &= ~AM33XX_MODULEMODE_MASK;
+ am33xx_cm_write_reg(v, inst, clkctrl_offs);
+}
diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
new file mode 100644
index 000000000000..5fa0b62e1a79
--- /dev/null
+++ b/arch/arm/mach-omap2/cm33xx.h
@@ -0,0 +1,420 @@
+/*
+ * AM33XX CM offset macros
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ * Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CM_33XX_H
+#define __ARCH_ARM_MACH_OMAP2_CM_33XX_H
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include "common.h"
+
+#include "cm.h"
+#include "cm-regbits-33xx.h"
+#include "cm33xx.h"
+
+/* CM base address */
+#define AM33XX_CM_BASE 0x44e00000
+
+#define AM33XX_CM_REGADDR(inst, reg) \
+ AM33XX_L4_WK_IO_ADDRESS(AM33XX_CM_BASE + (inst) + (reg))
+
+/* CM instances */
+#define AM33XX_CM_PER_MOD 0x0000
+#define AM33XX_CM_WKUP_MOD 0x0400
+#define AM33XX_CM_DPLL_MOD 0x0500
+#define AM33XX_CM_MPU_MOD 0x0600
+#define AM33XX_CM_DEVICE_MOD 0x0700
+#define AM33XX_CM_RTC_MOD 0x0800
+#define AM33XX_CM_GFX_MOD 0x0900
+#define AM33XX_CM_CEFUSE_MOD 0x0A00
+
+/* CM */
+
+/* CM.PER_CM register offsets */
+#define AM33XX_CM_PER_L4LS_CLKSTCTRL_OFFSET 0x0000
+#define AM33XX_CM_PER_L4LS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0000)
+#define AM33XX_CM_PER_L3S_CLKSTCTRL_OFFSET 0x0004
+#define AM33XX_CM_PER_L3S_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0004)
+#define AM33XX_CM_PER_L4FW_CLKSTCTRL_OFFSET 0x0008
+#define AM33XX_CM_PER_L4FW_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0008)
+#define AM33XX_CM_PER_L3_CLKSTCTRL_OFFSET 0x000c
+#define AM33XX_CM_PER_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x000c)
+#define AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET 0x0014
+#define AM33XX_CM_PER_CPGMAC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0014)
+#define AM33XX_CM_PER_LCDC_CLKCTRL_OFFSET 0x0018
+#define AM33XX_CM_PER_LCDC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0018)
+#define AM33XX_CM_PER_USB0_CLKCTRL_OFFSET 0x001c
+#define AM33XX_CM_PER_USB0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x001c)
+#define AM33XX_CM_PER_MLB_CLKCTRL_OFFSET 0x0020
+#define AM33XX_CM_PER_MLB_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0020)
+#define AM33XX_CM_PER_TPTC0_CLKCTRL_OFFSET 0x0024
+#define AM33XX_CM_PER_TPTC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0024)
+#define AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET 0x0028
+#define AM33XX_CM_PER_EMIF_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0028)
+#define AM33XX_CM_PER_OCMCRAM_CLKCTRL_OFFSET 0x002c
+#define AM33XX_CM_PER_OCMCRAM_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x002c)
+#define AM33XX_CM_PER_GPMC_CLKCTRL_OFFSET 0x0030
+#define AM33XX_CM_PER_GPMC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0030)
+#define AM33XX_CM_PER_MCASP0_CLKCTRL_OFFSET 0x0034
+#define AM33XX_CM_PER_MCASP0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0034)
+#define AM33XX_CM_PER_UART5_CLKCTRL_OFFSET 0x0038
+#define AM33XX_CM_PER_UART5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0038)
+#define AM33XX_CM_PER_MMC0_CLKCTRL_OFFSET 0x003c
+#define AM33XX_CM_PER_MMC0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x003c)
+#define AM33XX_CM_PER_ELM_CLKCTRL_OFFSET 0x0040
+#define AM33XX_CM_PER_ELM_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0040)
+#define AM33XX_CM_PER_I2C2_CLKCTRL_OFFSET 0x0044
+#define AM33XX_CM_PER_I2C2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0044)
+#define AM33XX_CM_PER_I2C1_CLKCTRL_OFFSET 0x0048
+#define AM33XX_CM_PER_I2C1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0048)
+#define AM33XX_CM_PER_SPI0_CLKCTRL_OFFSET 0x004c
+#define AM33XX_CM_PER_SPI0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x004c)
+#define AM33XX_CM_PER_SPI1_CLKCTRL_OFFSET 0x0050
+#define AM33XX_CM_PER_SPI1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0050)
+#define AM33XX_CM_PER_SPI2_CLKCTRL_OFFSET 0x0054
+#define AM33XX_CM_PER_SPI2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0054)
+#define AM33XX_CM_PER_SPI3_CLKCTRL_OFFSET 0x0058
+#define AM33XX_CM_PER_SPI3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0058)
+#define AM33XX_CM_PER_L4LS_CLKCTRL_OFFSET 0x0060
+#define AM33XX_CM_PER_L4LS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0060)
+#define AM33XX_CM_PER_L4FW_CLKCTRL_OFFSET 0x0064
+#define AM33XX_CM_PER_L4FW_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0064)
+#define AM33XX_CM_PER_MCASP1_CLKCTRL_OFFSET 0x0068
+#define AM33XX_CM_PER_MCASP1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0068)
+#define AM33XX_CM_PER_UART1_CLKCTRL_OFFSET 0x006c
+#define AM33XX_CM_PER_UART1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x006c)
+#define AM33XX_CM_PER_UART2_CLKCTRL_OFFSET 0x0070
+#define AM33XX_CM_PER_UART2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0070)
+#define AM33XX_CM_PER_UART3_CLKCTRL_OFFSET 0x0074
+#define AM33XX_CM_PER_UART3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0074)
+#define AM33XX_CM_PER_UART4_CLKCTRL_OFFSET 0x0078
+#define AM33XX_CM_PER_UART4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0078)
+#define AM33XX_CM_PER_TIMER7_CLKCTRL_OFFSET 0x007c
+#define AM33XX_CM_PER_TIMER7_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x007c)
+#define AM33XX_CM_PER_TIMER2_CLKCTRL_OFFSET 0x0080
+#define AM33XX_CM_PER_TIMER2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0080)
+#define AM33XX_CM_PER_TIMER3_CLKCTRL_OFFSET 0x0084
+#define AM33XX_CM_PER_TIMER3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0084)
+#define AM33XX_CM_PER_TIMER4_CLKCTRL_OFFSET 0x0088
+#define AM33XX_CM_PER_TIMER4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0088)
+#define AM33XX_CM_PER_MCASP2_CLKCTRL_OFFSET 0x008c
+#define AM33XX_CM_PER_MCASP2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x008c)
+#define AM33XX_CM_PER_RNG_CLKCTRL_OFFSET 0x0090
+#define AM33XX_CM_PER_RNG_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0090)
+#define AM33XX_CM_PER_AES0_CLKCTRL_OFFSET 0x0094
+#define AM33XX_CM_PER_AES0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0094)
+#define AM33XX_CM_PER_AES1_CLKCTRL_OFFSET 0x0098
+#define AM33XX_CM_PER_AES1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0098)
+#define AM33XX_CM_PER_DES_CLKCTRL_OFFSET 0x009c
+#define AM33XX_CM_PER_DES_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x009c)
+#define AM33XX_CM_PER_SHA0_CLKCTRL_OFFSET 0x00a0
+#define AM33XX_CM_PER_SHA0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a0)
+#define AM33XX_CM_PER_PKA_CLKCTRL_OFFSET 0x00a4
+#define AM33XX_CM_PER_PKA_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a4)
+#define AM33XX_CM_PER_GPIO6_CLKCTRL_OFFSET 0x00a8
+#define AM33XX_CM_PER_GPIO6_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00a8)
+#define AM33XX_CM_PER_GPIO1_CLKCTRL_OFFSET 0x00ac
+#define AM33XX_CM_PER_GPIO1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00ac)
+#define AM33XX_CM_PER_GPIO2_CLKCTRL_OFFSET 0x00b0
+#define AM33XX_CM_PER_GPIO2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b0)
+#define AM33XX_CM_PER_GPIO3_CLKCTRL_OFFSET 0x00b4
+#define AM33XX_CM_PER_GPIO3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b4)
+#define AM33XX_CM_PER_GPIO4_CLKCTRL_OFFSET 0x00b8
+#define AM33XX_CM_PER_GPIO4_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00b8)
+#define AM33XX_CM_PER_TPCC_CLKCTRL_OFFSET 0x00bc
+#define AM33XX_CM_PER_TPCC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00bc)
+#define AM33XX_CM_PER_DCAN0_CLKCTRL_OFFSET 0x00c0
+#define AM33XX_CM_PER_DCAN0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00c0)
+#define AM33XX_CM_PER_DCAN1_CLKCTRL_OFFSET 0x00c4
+#define AM33XX_CM_PER_DCAN1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00c4)
+#define AM33XX_CM_PER_EPWMSS1_CLKCTRL_OFFSET 0x00cc
+#define AM33XX_CM_PER_EPWMSS1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00cc)
+#define AM33XX_CM_PER_EMIF_FW_CLKCTRL_OFFSET 0x00d0
+#define AM33XX_CM_PER_EMIF_FW_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d0)
+#define AM33XX_CM_PER_EPWMSS0_CLKCTRL_OFFSET 0x00d4
+#define AM33XX_CM_PER_EPWMSS0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d4)
+#define AM33XX_CM_PER_EPWMSS2_CLKCTRL_OFFSET 0x00d8
+#define AM33XX_CM_PER_EPWMSS2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00d8)
+#define AM33XX_CM_PER_L3_INSTR_CLKCTRL_OFFSET 0x00dc
+#define AM33XX_CM_PER_L3_INSTR_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00dc)
+#define AM33XX_CM_PER_L3_CLKCTRL_OFFSET 0x00e0
+#define AM33XX_CM_PER_L3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e0)
+#define AM33XX_CM_PER_IEEE5000_CLKCTRL_OFFSET 0x00e4
+#define AM33XX_CM_PER_IEEE5000_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e4)
+#define AM33XX_CM_PER_PRUSS_CLKCTRL_OFFSET 0x00e8
+#define AM33XX_CM_PER_PRUSS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00e8)
+#define AM33XX_CM_PER_TIMER5_CLKCTRL_OFFSET 0x00ec
+#define AM33XX_CM_PER_TIMER5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00ec)
+#define AM33XX_CM_PER_TIMER6_CLKCTRL_OFFSET 0x00f0
+#define AM33XX_CM_PER_TIMER6_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f0)
+#define AM33XX_CM_PER_MMC1_CLKCTRL_OFFSET 0x00f4
+#define AM33XX_CM_PER_MMC1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f4)
+#define AM33XX_CM_PER_MMC2_CLKCTRL_OFFSET 0x00f8
+#define AM33XX_CM_PER_MMC2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00f8)
+#define AM33XX_CM_PER_TPTC1_CLKCTRL_OFFSET 0x00fc
+#define AM33XX_CM_PER_TPTC1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x00fc)
+#define AM33XX_CM_PER_TPTC2_CLKCTRL_OFFSET 0x0100
+#define AM33XX_CM_PER_TPTC2_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0100)
+#define AM33XX_CM_PER_GPIO5_CLKCTRL_OFFSET 0x0104
+#define AM33XX_CM_PER_GPIO5_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0104)
+#define AM33XX_CM_PER_SPINLOCK_CLKCTRL_OFFSET 0x010c
+#define AM33XX_CM_PER_SPINLOCK_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x010c)
+#define AM33XX_CM_PER_MAILBOX0_CLKCTRL_OFFSET 0x0110
+#define AM33XX_CM_PER_MAILBOX0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0110)
+#define AM33XX_CM_PER_L4HS_CLKSTCTRL_OFFSET 0x011c
+#define AM33XX_CM_PER_L4HS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x011c)
+#define AM33XX_CM_PER_L4HS_CLKCTRL_OFFSET 0x0120
+#define AM33XX_CM_PER_L4HS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0120)
+#define AM33XX_CM_PER_MSTR_EXPS_CLKCTRL_OFFSET 0x0124
+#define AM33XX_CM_PER_MSTR_EXPS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0124)
+#define AM33XX_CM_PER_SLV_EXPS_CLKCTRL_OFFSET 0x0128
+#define AM33XX_CM_PER_SLV_EXPS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0128)
+#define AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL_OFFSET 0x012c
+#define AM33XX_CM_PER_OCPWP_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x012c)
+#define AM33XX_CM_PER_OCPWP_CLKCTRL_OFFSET 0x0130
+#define AM33XX_CM_PER_OCPWP_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0130)
+#define AM33XX_CM_PER_MAILBOX1_CLKCTRL_OFFSET 0x0134
+#define AM33XX_CM_PER_MAILBOX1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0134)
+#define AM33XX_CM_PER_PRUSS_CLKSTCTRL_OFFSET 0x0140
+#define AM33XX_CM_PER_PRUSS_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0140)
+#define AM33XX_CM_PER_CPSW_CLKSTCTRL_OFFSET 0x0144
+#define AM33XX_CM_PER_CPSW_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0144)
+#define AM33XX_CM_PER_LCDC_CLKSTCTRL_OFFSET 0x0148
+#define AM33XX_CM_PER_LCDC_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0148)
+#define AM33XX_CM_PER_CLKDIV32K_CLKCTRL_OFFSET 0x014c
+#define AM33XX_CM_PER_CLKDIV32K_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x014c)
+#define AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL_OFFSET 0x0150
+#define AM33XX_CM_PER_CLK_24MHZ_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_PER_MOD, 0x0150)
+
+/* CM.WKUP_CM register offsets */
+#define AM33XX_CM_WKUP_CLKSTCTRL_OFFSET 0x0000
+#define AM33XX_CM_WKUP_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0000)
+#define AM33XX_CM_WKUP_CONTROL_CLKCTRL_OFFSET 0x0004
+#define AM33XX_CM_WKUP_CONTROL_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0004)
+#define AM33XX_CM_WKUP_GPIO0_CLKCTRL_OFFSET 0x0008
+#define AM33XX_CM_WKUP_GPIO0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0008)
+#define AM33XX_CM_WKUP_L4WKUP_CLKCTRL_OFFSET 0x000c
+#define AM33XX_CM_WKUP_L4WKUP_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x000c)
+#define AM33XX_CM_WKUP_TIMER0_CLKCTRL_OFFSET 0x0010
+#define AM33XX_CM_WKUP_TIMER0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0010)
+#define AM33XX_CM_WKUP_DEBUGSS_CLKCTRL_OFFSET 0x0014
+#define AM33XX_CM_WKUP_DEBUGSS_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0014)
+#define AM33XX_CM_L3_AON_CLKSTCTRL_OFFSET 0x0018
+#define AM33XX_CM_L3_AON_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0018)
+#define AM33XX_CM_AUTOIDLE_DPLL_MPU_OFFSET 0x001c
+#define AM33XX_CM_AUTOIDLE_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x001c)
+#define AM33XX_CM_IDLEST_DPLL_MPU_OFFSET 0x0020
+#define AM33XX_CM_IDLEST_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0020)
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_MPU_OFFSET 0x0024
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0024)
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_MPU_OFFSET 0x0028
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0028)
+#define AM33XX_CM_CLKSEL_DPLL_MPU_OFFSET 0x002c
+#define AM33XX_CM_CLKSEL_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x002c)
+#define AM33XX_CM_AUTOIDLE_DPLL_DDR_OFFSET 0x0030
+#define AM33XX_CM_AUTOIDLE_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0030)
+#define AM33XX_CM_IDLEST_DPLL_DDR_OFFSET 0x0034
+#define AM33XX_CM_IDLEST_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0034)
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DDR_OFFSET 0x0038
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0038)
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DDR_OFFSET 0x003c
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x003c)
+#define AM33XX_CM_CLKSEL_DPLL_DDR_OFFSET 0x0040
+#define AM33XX_CM_CLKSEL_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0040)
+#define AM33XX_CM_AUTOIDLE_DPLL_DISP_OFFSET 0x0044
+#define AM33XX_CM_AUTOIDLE_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0044)
+#define AM33XX_CM_IDLEST_DPLL_DISP_OFFSET 0x0048
+#define AM33XX_CM_IDLEST_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0048)
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DISP_OFFSET 0x004c
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x004c)
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DISP_OFFSET 0x0050
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0050)
+#define AM33XX_CM_CLKSEL_DPLL_DISP_OFFSET 0x0054
+#define AM33XX_CM_CLKSEL_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0054)
+#define AM33XX_CM_AUTOIDLE_DPLL_CORE_OFFSET 0x0058
+#define AM33XX_CM_AUTOIDLE_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0058)
+#define AM33XX_CM_IDLEST_DPLL_CORE_OFFSET 0x005c
+#define AM33XX_CM_IDLEST_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x005c)
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_CORE_OFFSET 0x0060
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0060)
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_CORE_OFFSET 0x0064
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0064)
+#define AM33XX_CM_CLKSEL_DPLL_CORE_OFFSET 0x0068
+#define AM33XX_CM_CLKSEL_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0068)
+#define AM33XX_CM_AUTOIDLE_DPLL_PER_OFFSET 0x006c
+#define AM33XX_CM_AUTOIDLE_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x006c)
+#define AM33XX_CM_IDLEST_DPLL_PER_OFFSET 0x0070
+#define AM33XX_CM_IDLEST_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0070)
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_PER_OFFSET 0x0074
+#define AM33XX_CM_SSC_DELTAMSTEP_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0074)
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_PER_OFFSET 0x0078
+#define AM33XX_CM_SSC_MODFREQDIV_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0078)
+#define AM33XX_CM_CLKDCOLDO_DPLL_PER_OFFSET 0x007c
+#define AM33XX_CM_CLKDCOLDO_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x007c)
+#define AM33XX_CM_DIV_M4_DPLL_CORE_OFFSET 0x0080
+#define AM33XX_CM_DIV_M4_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0080)
+#define AM33XX_CM_DIV_M5_DPLL_CORE_OFFSET 0x0084
+#define AM33XX_CM_DIV_M5_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0084)
+#define AM33XX_CM_CLKMODE_DPLL_MPU_OFFSET 0x0088
+#define AM33XX_CM_CLKMODE_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0088)
+#define AM33XX_CM_CLKMODE_DPLL_PER_OFFSET 0x008c
+#define AM33XX_CM_CLKMODE_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x008c)
+#define AM33XX_CM_CLKMODE_DPLL_CORE_OFFSET 0x0090
+#define AM33XX_CM_CLKMODE_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0090)
+#define AM33XX_CM_CLKMODE_DPLL_DDR_OFFSET 0x0094
+#define AM33XX_CM_CLKMODE_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0094)
+#define AM33XX_CM_CLKMODE_DPLL_DISP_OFFSET 0x0098
+#define AM33XX_CM_CLKMODE_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x0098)
+#define AM33XX_CM_CLKSEL_DPLL_PERIPH_OFFSET 0x009c
+#define AM33XX_CM_CLKSEL_DPLL_PERIPH AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x009c)
+#define AM33XX_CM_DIV_M2_DPLL_DDR_OFFSET 0x00a0
+#define AM33XX_CM_DIV_M2_DPLL_DDR AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a0)
+#define AM33XX_CM_DIV_M2_DPLL_DISP_OFFSET 0x00a4
+#define AM33XX_CM_DIV_M2_DPLL_DISP AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a4)
+#define AM33XX_CM_DIV_M2_DPLL_MPU_OFFSET 0x00a8
+#define AM33XX_CM_DIV_M2_DPLL_MPU AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00a8)
+#define AM33XX_CM_DIV_M2_DPLL_PER_OFFSET 0x00ac
+#define AM33XX_CM_DIV_M2_DPLL_PER AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00ac)
+#define AM33XX_CM_WKUP_WKUP_M3_CLKCTRL_OFFSET 0x00b0
+#define AM33XX_CM_WKUP_WKUP_M3_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b0)
+#define AM33XX_CM_WKUP_UART0_CLKCTRL_OFFSET 0x00b4
+#define AM33XX_CM_WKUP_UART0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b4)
+#define AM33XX_CM_WKUP_I2C0_CLKCTRL_OFFSET 0x00b8
+#define AM33XX_CM_WKUP_I2C0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00b8)
+#define AM33XX_CM_WKUP_ADC_TSC_CLKCTRL_OFFSET 0x00bc
+#define AM33XX_CM_WKUP_ADC_TSC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00bc)
+#define AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL_OFFSET 0x00c0
+#define AM33XX_CM_WKUP_SMARTREFLEX0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c0)
+#define AM33XX_CM_WKUP_TIMER1_CLKCTRL_OFFSET 0x00c4
+#define AM33XX_CM_WKUP_TIMER1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c4)
+#define AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL_OFFSET 0x00c8
+#define AM33XX_CM_WKUP_SMARTREFLEX1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00c8)
+#define AM33XX_CM_L4_WKUP_AON_CLKSTCTRL_OFFSET 0x00cc
+#define AM33XX_CM_L4_WKUP_AON_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00cc)
+#define AM33XX_CM_WKUP_WDT0_CLKCTRL_OFFSET 0x00d0
+#define AM33XX_CM_WKUP_WDT0_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d0)
+#define AM33XX_CM_WKUP_WDT1_CLKCTRL_OFFSET 0x00d4
+#define AM33XX_CM_WKUP_WDT1_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d4)
+#define AM33XX_CM_DIV_M6_DPLL_CORE_OFFSET 0x00d8
+#define AM33XX_CM_DIV_M6_DPLL_CORE AM33XX_CM_REGADDR(AM33XX_CM_WKUP_MOD, 0x00d8)
+
+/* CM.DPLL_CM register offsets */
+#define AM33XX_CLKSEL_TIMER7_CLK_OFFSET 0x0004
+#define AM33XX_CLKSEL_TIMER7_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0004)
+#define AM33XX_CLKSEL_TIMER2_CLK_OFFSET 0x0008
+#define AM33XX_CLKSEL_TIMER2_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0008)
+#define AM33XX_CLKSEL_TIMER3_CLK_OFFSET 0x000c
+#define AM33XX_CLKSEL_TIMER3_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x000c)
+#define AM33XX_CLKSEL_TIMER4_CLK_OFFSET 0x0010
+#define AM33XX_CLKSEL_TIMER4_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0010)
+#define AM33XX_CM_MAC_CLKSEL_OFFSET 0x0014
+#define AM33XX_CM_MAC_CLKSEL AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0014)
+#define AM33XX_CLKSEL_TIMER5_CLK_OFFSET 0x0018
+#define AM33XX_CLKSEL_TIMER5_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0018)
+#define AM33XX_CLKSEL_TIMER6_CLK_OFFSET 0x001c
+#define AM33XX_CLKSEL_TIMER6_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x001c)
+#define AM33XX_CM_CPTS_RFT_CLKSEL_OFFSET 0x0020
+#define AM33XX_CM_CPTS_RFT_CLKSEL AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0020)
+#define AM33XX_CLKSEL_TIMER1MS_CLK_OFFSET 0x0028
+#define AM33XX_CLKSEL_TIMER1MS_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0028)
+#define AM33XX_CLKSEL_GFX_FCLK_OFFSET 0x002c
+#define AM33XX_CLKSEL_GFX_FCLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x002c)
+#define AM33XX_CLKSEL_PRUSS_OCP_CLK_OFFSET 0x0030
+#define AM33XX_CLKSEL_PRUSS_OCP_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0030)
+#define AM33XX_CLKSEL_LCDC_PIXEL_CLK_OFFSET 0x0034
+#define AM33XX_CLKSEL_LCDC_PIXEL_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0034)
+#define AM33XX_CLKSEL_WDT1_CLK_OFFSET 0x0038
+#define AM33XX_CLKSEL_WDT1_CLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x0038)
+#define AM33XX_CLKSEL_GPIO0_DBCLK_OFFSET 0x003c
+#define AM33XX_CLKSEL_GPIO0_DBCLK AM33XX_CM_REGADDR(AM33XX_CM_DPLL_MOD, 0x003c)
+
+/* CM.MPU_CM register offsets */
+#define AM33XX_CM_MPU_CLKSTCTRL_OFFSET 0x0000
+#define AM33XX_CM_MPU_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_MPU_MOD, 0x0000)
+#define AM33XX_CM_MPU_MPU_CLKCTRL_OFFSET 0x0004
+#define AM33XX_CM_MPU_MPU_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_MPU_MOD, 0x0004)
+
+/* CM.DEVICE_CM register offsets */
+#define AM33XX_CM_CLKOUT_CTRL_OFFSET 0x0000
+#define AM33XX_CM_CLKOUT_CTRL AM33XX_CM_REGADDR(AM33XX_CM_DEVICE_MOD, 0x0000)
+
+/* CM.RTC_CM register offsets */
+#define AM33XX_CM_RTC_RTC_CLKCTRL_OFFSET 0x0000
+#define AM33XX_CM_RTC_RTC_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_RTC_MOD, 0x0000)
+#define AM33XX_CM_RTC_CLKSTCTRL_OFFSET 0x0004
+#define AM33XX_CM_RTC_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_RTC_MOD, 0x0004)
+
+/* CM.GFX_CM register offsets */
+#define AM33XX_CM_GFX_L3_CLKSTCTRL_OFFSET 0x0000
+#define AM33XX_CM_GFX_L3_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0000)
+#define AM33XX_CM_GFX_GFX_CLKCTRL_OFFSET 0x0004
+#define AM33XX_CM_GFX_GFX_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0004)
+#define AM33XX_CM_GFX_BITBLT_CLKCTRL_OFFSET 0x0008
+#define AM33XX_CM_GFX_BITBLT_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0008)
+#define AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1_OFFSET 0x000c
+#define AM33XX_CM_GFX_L4LS_GFX_CLKSTCTRL__1 AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x000c)
+#define AM33XX_CM_GFX_MMUCFG_CLKCTRL_OFFSET 0x0010
+#define AM33XX_CM_GFX_MMUCFG_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0010)
+#define AM33XX_CM_GFX_MMUDATA_CLKCTRL_OFFSET 0x0014
+#define AM33XX_CM_GFX_MMUDATA_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_GFX_MOD, 0x0014)
+
+/* CM.CEFUSE_CM register offsets */
+#define AM33XX_CM_CEFUSE_CLKSTCTRL_OFFSET 0x0000
+#define AM33XX_CM_CEFUSE_CLKSTCTRL AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0000)
+#define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL_OFFSET 0x0020
+#define AM33XX_CM_CEFUSE_CEFUSE_CLKCTRL AM33XX_CM_REGADDR(AM33XX_CM_CEFUSE_MOD, 0x0020)
+
+
+extern bool am33xx_cm_is_clkdm_in_hwsup(s16 inst, u16 cdoffs);
+extern void am33xx_cm_clkdm_enable_hwsup(s16 inst, u16 cdoffs);
+extern void am33xx_cm_clkdm_disable_hwsup(s16 inst, u16 cdoffs);
+extern void am33xx_cm_clkdm_force_sleep(s16 inst, u16 cdoffs);
+extern void am33xx_cm_clkdm_force_wakeup(s16 inst, u16 cdoffs);
+
+#ifdef CONFIG_SOC_AM33XX
+extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs);
+extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
+ u16 clkctrl_offs);
+extern void am33xx_cm_module_disable(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs);
+extern int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs);
+#else
+static inline int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+ return 0;
+}
+static inline void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+}
+static inline void am33xx_cm_module_disable(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+}
+static inline int am33xx_cm_wait_module_ready(u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 1a39945d9ff8..1894015ff04b 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -235,20 +235,6 @@ void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs)
}
/**
- * omap4_cminst_clkdm_force_sleep - try to put a clockdomain into idle
- * @part: PRCM partition ID that the clockdomain registers exist in
- * @inst: CM instance register offset (*_INST macro)
- * @cdoffs: Clockdomain register offset (*_CDOFFS macro)
- *
- * Put a clockdomain referred to by (@part, @inst, @cdoffs) into idle
- * No return value.
- */
-void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs)
-{
- _clktrctrl_write(OMAP34XX_CLKSTCTRL_FORCE_SLEEP, part, inst, cdoffs);
-}
-
-/**
* omap4_cminst_clkdm_force_sleep - try to take a clockdomain out of idle
* @part: PRCM partition ID that the clockdomain registers exist in
* @inst: CM instance register offset (*_INST macro)
diff --git a/arch/arm/mach-omap2/cminst44xx.h b/arch/arm/mach-omap2/cminst44xx.h
index a018a7327879..d69fdefef985 100644
--- a/arch/arm/mach-omap2/cminst44xx.h
+++ b/arch/arm/mach-omap2/cminst44xx.h
@@ -16,38 +16,13 @@ extern void omap4_cminst_clkdm_enable_hwsup(u8 part, s16 inst, u16 cdoffs);
extern void omap4_cminst_clkdm_disable_hwsup(u8 part, s16 inst, u16 cdoffs);
extern void omap4_cminst_clkdm_force_sleep(u8 part, s16 inst, u16 cdoffs);
extern void omap4_cminst_clkdm_force_wakeup(u8 part, s16 inst, u16 cdoffs);
-
extern int omap4_cminst_wait_module_ready(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_offs);
-
-# ifdef CONFIG_ARCH_OMAP4
extern int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
-
extern void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
extern void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
u16 clkctrl_offs);
-
-# else
-
-static inline int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
- u16 clkctrl_offs)
-{
- return 0;
-}
-
-static inline void omap4_cminst_module_enable(u8 mode, u8 part, u16 inst,
- s16 cdoffs, u16 clkctrl_offs)
-{
-}
-
-static inline void omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
- u16 clkctrl_offs)
-{
-}
-
-# endif
-
/*
* In an ideal world, we would not export these low-level functions,
* but this will probably take some time to fix properly
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index 1706ebcec08d..14734746457c 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -35,6 +35,16 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = {
.turbo_mode = 0,
};
+/*
+ * ADS7846 driver maybe request a gpio according to the value
+ * of pdata->get_pendown_state, but we have done this. So set
+ * get_pendown_state to avoid twice gpio requesting.
+ */
+static int omap3_get_pendown_state(void)
+{
+ return !gpio_get_value(OMAP3_EVM_TS_GPIO);
+}
+
static struct ads7846_platform_data ads7846_config = {
.x_max = 0x0fff,
.y_max = 0x0fff,
@@ -45,6 +55,7 @@ static struct ads7846_platform_data ads7846_config = {
.debounce_rep = 1,
.gpio_pendown = -EINVAL,
.keep_vref_on = 1,
+ .get_pendown_state = &omap3_get_pendown_state,
};
static struct spi_board_info ads7846_spi_board_info __initdata = {
@@ -63,28 +74,30 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
struct spi_board_info *spi_bi = &ads7846_spi_board_info;
int err;
- if (board_pdata && board_pdata->get_pendown_state) {
- err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
- if (err) {
- pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
- return;
- }
- gpio_export(gpio_pendown, 0);
-
- if (gpio_debounce)
- gpio_set_debounce(gpio_pendown, gpio_debounce);
+ err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+ if (err) {
+ pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+ return;
}
+ if (gpio_debounce)
+ gpio_set_debounce(gpio_pendown, gpio_debounce);
+
spi_bi->bus_num = bus_num;
spi_bi->irq = gpio_to_irq(gpio_pendown);
if (board_pdata) {
board_pdata->gpio_pendown = gpio_pendown;
spi_bi->platform_data = board_pdata;
+ if (board_pdata->get_pendown_state)
+ gpio_export(gpio_pendown, 0);
} else {
ads7846_config.gpio_pendown = gpio_pendown;
}
+ if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
+ gpio_free(gpio_pendown);
+
spi_register_board_info(&ads7846_spi_board_info, 1);
}
#else
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index a0b4a42836ab..4c4ef6a6166b 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -4,6 +4,7 @@
#include "twl-common.h"
#define NAND_BLOCK_SIZE SZ_128K
+#define OMAP3_EVM_TS_GPIO 175
struct mtd_partition;
struct ads7846_platform_data;
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index 8a6953a34fe2..069f9725b1c3 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -29,8 +29,6 @@
/* Global address base setup code */
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
-
static void __init __omap2_set_globals(struct omap_globals *omap2_globals)
{
omap2_set_globals_tap(omap2_globals);
@@ -39,8 +37,6 @@ static void __init __omap2_set_globals(struct omap_globals *omap2_globals)
omap2_set_globals_prcm(omap2_globals);
}
-#endif
-
#if defined(CONFIG_SOC_OMAP2420)
static struct omap_globals omap242x_globals = {
@@ -134,7 +130,9 @@ void __init ti81xx_map_io(void)
{
omapti81xx_map_common_io();
}
+#endif
+#if defined(CONFIG_SOC_AM33XX)
#define AM33XX_TAP_BASE (AM33XX_CTRL_BASE + \
TI81XX_CONTROL_DEVICE_ID - 0x204)
@@ -171,9 +169,7 @@ static struct omap_globals omap4_globals = {
void __init omap2_set_globals_443x(void)
{
- omap2_set_globals_tap(&omap4_globals);
- omap2_set_globals_control(&omap4_globals);
- omap2_set_globals_prcm(&omap4_globals);
+ __omap2_set_globals(&omap4_globals);
}
void __init omap4_map_io(void)
@@ -182,3 +178,27 @@ void __init omap4_map_io(void)
}
#endif
+#if defined(CONFIG_SOC_OMAP5)
+static struct omap_globals omap5_globals = {
+ .class = OMAP54XX_CLASS,
+ .tap = OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE),
+ .ctrl = OMAP2_L4_IO_ADDRESS(OMAP54XX_SCM_BASE),
+ .ctrl_pad = OMAP2_L4_IO_ADDRESS(OMAP54XX_CTRL_BASE),
+ .prm = OMAP2_L4_IO_ADDRESS(OMAP54XX_PRM_BASE),
+ .cm = OMAP2_L4_IO_ADDRESS(OMAP54XX_CM_CORE_AON_BASE),
+ .cm2 = OMAP2_L4_IO_ADDRESS(OMAP54XX_CM_CORE_BASE),
+ .prcm_mpu = OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE),
+};
+
+void __init omap2_set_globals_5xxx(void)
+{
+ omap2_set_globals_tap(&omap5_globals);
+ omap2_set_globals_control(&omap5_globals);
+ omap2_set_globals_prcm(&omap5_globals);
+}
+
+void __init omap5_map_io(void)
+{
+ omap5_map_common_io();
+}
+#endif
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index be9dfd1abe60..1f65b1871c23 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -115,12 +115,22 @@ static inline int omap_mux_late_init(void)
}
#endif
+#ifdef CONFIG_SOC_OMAP5
+extern void omap5_map_common_io(void);
+#else
+static inline void omap5_map_common_io(void)
+{
+}
+#endif
+
extern void omap2_init_common_infrastructure(void);
extern struct sys_timer omap2_timer;
extern struct sys_timer omap3_timer;
extern struct sys_timer omap3_secure_timer;
+extern struct sys_timer omap3_am33xx_timer;
extern struct sys_timer omap4_timer;
+extern struct sys_timer omap5_timer;
void omap2420_init_early(void);
void omap2430_init_early(void);
@@ -128,9 +138,12 @@ void omap3430_init_early(void);
void omap35xx_init_early(void);
void omap3630_init_early(void);
void omap3_init_early(void); /* Do not use this one */
+void am33xx_init_early(void);
void am35xx_init_early(void);
void ti81xx_init_early(void);
+void am33xx_init_early(void);
void omap4430_init_early(void);
+void omap5_init_early(void);
void omap3_init_late(void); /* Do not use this one */
void omap4430_init_late(void);
void omap2420_init_late(void);
@@ -166,12 +179,18 @@ void omap2_set_globals_242x(void);
void omap2_set_globals_243x(void);
void omap2_set_globals_3xxx(void);
void omap2_set_globals_443x(void);
+void omap2_set_globals_5xxx(void);
void omap2_set_globals_ti81xx(void);
void omap2_set_globals_am33xx(void);
/* These get called from omap2_set_globals_xxxx(), do not call these */
void omap2_set_globals_tap(struct omap_globals *);
+#if defined(CONFIG_SOC_HAS_OMAP2_SDRC)
void omap2_set_globals_sdrc(struct omap_globals *);
+#else
+static inline void omap2_set_globals_sdrc(struct omap_globals *omap2_globals)
+{ }
+#endif
void omap2_set_globals_control(struct omap_globals *);
void omap2_set_globals_prcm(struct omap_globals *);
@@ -180,6 +199,7 @@ void omap243x_map_io(void);
void omap3_map_io(void);
void am33xx_map_io(void);
void omap4_map_io(void);
+void omap5_map_io(void);
void ti81xx_map_io(void);
void omap_barriers_init(void);
@@ -219,6 +239,8 @@ void omap3_intc_prepare_idle(void);
void omap3_intc_resume_idle(void);
void omap2_intc_handle_irq(struct pt_regs *regs);
void omap3_intc_handle_irq(struct pt_regs *regs);
+void omap_intc_of_init(void);
+void omap_gic_of_init(void);
#ifdef CONFIG_CACHE_L2X0
extern void __iomem *omap4_get_l2cache_base(void);
@@ -226,10 +248,10 @@ extern void __iomem *omap4_get_l2cache_base(void);
struct device_node;
#ifdef CONFIG_OF
-int __init omap_intc_of_init(struct device_node *node,
+int __init intc_of_init(struct device_node *node,
struct device_node *parent);
#else
-int __init omap_intc_of_init(struct device_node *node,
+int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
return 0;
@@ -256,6 +278,7 @@ extern void omap_secondary_startup(void);
extern u32 omap_modify_auxcoreboot0(u32 set_mask, u32 clear_mask);
extern void omap_auxcoreboot_addr(u32 cpu_addr);
extern u32 omap_read_auxcoreboot0(void);
+extern void omap5_secondary_startup(void);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index 08e674bb0417..3223b81e7532 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -241,6 +241,49 @@ void omap3_ctrl_write_boot_mode(u8 bootmode)
#endif
+/**
+ * omap_ctrl_write_dsp_boot_addr - set boot address for a remote processor
+ * @bootaddr: physical address of the boot loader
+ *
+ * Set boot address for the boot loader of a supported processor
+ * when a power ON sequence occurs.
+ */
+void omap_ctrl_write_dsp_boot_addr(u32 bootaddr)
+{
+ u32 offset = cpu_is_omap243x() ? OMAP243X_CONTROL_IVA2_BOOTADDR :
+ cpu_is_omap34xx() ? OMAP343X_CONTROL_IVA2_BOOTADDR :
+ cpu_is_omap44xx() ? OMAP4_CTRL_MODULE_CORE_DSP_BOOTADDR :
+ 0;
+
+ if (!offset) {
+ pr_err("%s: unsupported omap type\n", __func__);
+ return;
+ }
+
+ omap_ctrl_writel(bootaddr, offset);
+}
+
+/**
+ * omap_ctrl_write_dsp_boot_mode - set boot mode for a remote processor
+ * @bootmode: 8-bit value to pass to some boot code
+ *
+ * Sets boot mode for the boot loader of a supported processor
+ * when a power ON sequence occurs.
+ */
+void omap_ctrl_write_dsp_boot_mode(u8 bootmode)
+{
+ u32 offset = cpu_is_omap243x() ? OMAP243X_CONTROL_IVA2_BOOTMOD :
+ cpu_is_omap34xx() ? OMAP343X_CONTROL_IVA2_BOOTMOD :
+ 0;
+
+ if (!offset) {
+ pr_err("%s: unsupported omap type\n", __func__);
+ return;
+ }
+
+ omap_ctrl_writel(bootmode, offset);
+}
+
#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
/*
* Clears the scratchpad contents in case of cold boot-
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a406fd045ce1..b8cdc8531b60 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -21,6 +21,8 @@
#include <mach/ctrl_module_pad_core_44xx.h>
#include <mach/ctrl_module_pad_wkup_44xx.h>
+#include <plat/am33xx.h>
+
#ifndef __ASSEMBLY__
#define OMAP242X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP242X_CTRL_BASE + (reg))
@@ -28,6 +30,8 @@
OMAP2_L4_IO_ADDRESS(OMAP243X_CTRL_BASE + (reg))
#define OMAP343X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
+#define AM33XX_CTRL_REGADDR(reg) \
+ AM33XX_L4_WK_IO_ADDRESS(AM33XX_SCM_BASE + (reg))
#else
#define OMAP242X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP242X_CTRL_BASE + (reg))
@@ -35,6 +39,8 @@
OMAP2_L4_IO_ADDRESS(OMAP243X_CTRL_BASE + (reg))
#define OMAP343X_CTRL_REGADDR(reg) \
OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
+#define AM33XX_CTRL_REGADDR(reg) \
+ AM33XX_L4_WK_IO_ADDRESS(AM33XX_SCM_BASE + (reg))
#endif /* __ASSEMBLY__ */
/*
@@ -182,6 +188,7 @@
#define OMAP3630_CONTROL_FUSE_OPP120_VDD1 (OMAP2_CONTROL_GENERAL + 0x0120)
#define OMAP3630_CONTROL_FUSE_OPP50_VDD2 (OMAP2_CONTROL_GENERAL + 0x0128)
#define OMAP3630_CONTROL_FUSE_OPP100_VDD2 (OMAP2_CONTROL_GENERAL + 0x012C)
+#define OMAP3630_CONTROL_CAMERA_PHY_CTRL (OMAP2_CONTROL_GENERAL + 0x02f0)
/* OMAP44xx control efuse offsets */
#define OMAP44XX_CONTROL_FUSE_IVA_OPP50 0x22C
@@ -246,6 +253,10 @@
/* TI81XX CONTROL_DEVCONF register offsets */
#define TI81XX_CONTROL_DEVICE_ID (TI81XX_CONTROL_DEVCONF + 0x000)
+/* OMAP54XX CONTROL STATUS register */
+#define OMAP5XXX_CONTROL_STATUS 0x134
+#define OMAP5_DEVICETYPE_MASK (0x7 << 6)
+
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
@@ -312,15 +323,15 @@
OMAP343X_SCRATCHPAD + reg)
/* AM35XX_CONTROL_IPSS_CLK_CTRL bits */
-#define AM35XX_USBOTG_VBUSP_CLK_SHIFT 0
-#define AM35XX_CPGMAC_VBUSP_CLK_SHIFT 1
-#define AM35XX_VPFE_VBUSP_CLK_SHIFT 2
-#define AM35XX_HECC_VBUSP_CLK_SHIFT 3
-#define AM35XX_USBOTG_FCLK_SHIFT 8
-#define AM35XX_CPGMAC_FCLK_SHIFT 9
-#define AM35XX_VPFE_FCLK_SHIFT 10
-
-/*AM35XX CONTROL_LVL_INTR_CLEAR bits*/
+#define AM35XX_USBOTG_VBUSP_CLK_SHIFT 0
+#define AM35XX_CPGMAC_VBUSP_CLK_SHIFT 1
+#define AM35XX_VPFE_VBUSP_CLK_SHIFT 2
+#define AM35XX_HECC_VBUSP_CLK_SHIFT 3
+#define AM35XX_USBOTG_FCLK_SHIFT 8
+#define AM35XX_CPGMAC_FCLK_SHIFT 9
+#define AM35XX_VPFE_FCLK_SHIFT 10
+
+/* AM35XX CONTROL_LVL_INTR_CLEAR bits */
#define AM35XX_CPGMAC_C0_MISC_PULSE_CLR BIT(0)
#define AM35XX_CPGMAC_C0_RX_PULSE_CLR BIT(1)
#define AM35XX_CPGMAC_C0_RX_THRESH_CLR BIT(2)
@@ -330,21 +341,22 @@
#define AM35XX_VPFE_CCDC_VD1_INT_CLR BIT(6)
#define AM35XX_VPFE_CCDC_VD2_INT_CLR BIT(7)
-/*AM35XX CONTROL_IP_SW_RESET bits*/
+/* AM35XX CONTROL_IP_SW_RESET bits */
#define AM35XX_USBOTGSS_SW_RST BIT(0)
#define AM35XX_CPGMACSS_SW_RST BIT(1)
#define AM35XX_VPFE_VBUSP_SW_RST BIT(2)
#define AM35XX_HECC_SW_RST BIT(3)
#define AM35XX_VPFE_PCLK_SW_RST BIT(4)
-/*
- * CONTROL AM33XX STATUS register
- */
+/* AM33XX CONTROL_STATUS register */
#define AM33XX_CONTROL_STATUS 0x040
+#define AM33XX_CONTROL_SEC_CLK_CTRL 0x1bc
-/*
- * CONTROL OMAP STATUS register to identify OMAP3 features
- */
+/* AM33XX CONTROL_STATUS bitfields (partial) */
+#define AM33XX_CONTROL_STATUS_SYSBOOT1_SHIFT 22
+#define AM33XX_CONTROL_STATUS_SYSBOOT1_MASK (0x3 << 22)
+
+/* CONTROL OMAP STATUS register to identify OMAP3 features */
#define OMAP3_CONTROL_OMAP_STATUS 0x044c
#define OMAP3_SGX_SHIFT 13
@@ -397,6 +409,8 @@ extern u32 omap3_arm_context[128];
extern void omap3_control_save_context(void);
extern void omap3_control_restore_context(void);
extern void omap3_ctrl_write_boot_mode(u8 bootmode);
+extern void omap_ctrl_write_dsp_boot_addr(u32 bootaddr);
+extern void omap_ctrl_write_dsp_boot_mode(u8 bootmode);
extern void omap3630_ctrl_disable_rta(void);
extern int omap3_ctrl_save_padconf(void);
#else
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
index 207bc1c7759f..f2a49a48ef59 100644
--- a/arch/arm/mach-omap2/cpuidle34xx.c
+++ b/arch/arm/mach-omap2/cpuidle34xx.c
@@ -36,8 +36,6 @@
#include "control.h"
#include "common.h"
-#ifdef CONFIG_CPU_IDLE
-
/* Mach specific information to be recorded in the C-state driver_data */
struct omap3_idle_statedata {
u32 mpu_state;
@@ -77,20 +75,6 @@ static struct omap3_idle_statedata omap3_idle_data[] = {
static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
-static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
- struct clockdomain *clkdm)
-{
- clkdm_allow_idle(clkdm);
- return 0;
-}
-
-static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
- struct clockdomain *clkdm)
-{
- clkdm_deny_idle(clkdm);
- return 0;
-}
-
static int __omap3_enter_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -108,8 +92,8 @@ static int __omap3_enter_idle(struct cpuidle_device *dev,
/* Deny idle for C1 */
if (index == 0) {
- pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
- pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
+ clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]);
+ clkdm_deny_idle(core_pd->pwrdm_clkdms[0]);
}
/*
@@ -131,8 +115,8 @@ static int __omap3_enter_idle(struct cpuidle_device *dev,
/* Re-allow idle for C1 */
if (index == 0) {
- pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
- pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
+ clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]);
+ clkdm_allow_idle(core_pd->pwrdm_clkdms[0]);
}
return_sleep_time:
@@ -178,7 +162,7 @@ static int next_valid_state(struct cpuidle_device *dev,
u32 mpu_deepest_state = PWRDM_POWER_RET;
u32 core_deepest_state = PWRDM_POWER_RET;
int idx;
- int next_index = -1;
+ int next_index = 0; /* C1 is the default value */
if (enable_off_mode) {
mpu_deepest_state = PWRDM_POWER_OFF;
@@ -209,12 +193,6 @@ static int next_valid_state(struct cpuidle_device *dev,
}
}
- /*
- * C1 is always valid.
- * So, no need to check for 'next_index == -1' outside
- * this loop.
- */
-
return next_index;
}
@@ -228,23 +206,22 @@ static int next_valid_state(struct cpuidle_device *dev,
* the device to the specified or a safer state.
*/
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
+ struct cpuidle_driver *drv,
int index)
{
int new_state_idx;
- u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
+ u32 core_next_state, per_next_state = 0, per_saved_state = 0;
struct omap3_idle_statedata *cx;
int ret;
/*
- * Prevent idle completely if CAM is active.
+ * Use only C1 if CAM is active.
* CAM does not have wakeup capability in OMAP3.
*/
- cam_state = pwrdm_read_pwrst(cam_pd);
- if (cam_state == PWRDM_POWER_ON) {
+ if (pwrdm_read_pwrst(cam_pd) == PWRDM_POWER_ON)
new_state_idx = drv->safe_state_index;
- goto select_state;
- }
+ else
+ new_state_idx = next_valid_state(dev, drv, index);
/*
* FIXME: we currently manage device-specific idle states
@@ -254,24 +231,28 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
* its own code.
*/
- /*
- * Prevent PER off if CORE is not in retention or off as this
- * would disable PER wakeups completely.
- */
- cx = &omap3_idle_data[index];
+ /* Program PER state */
+ cx = &omap3_idle_data[new_state_idx];
core_next_state = cx->core_state;
per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
- if ((per_next_state == PWRDM_POWER_OFF) &&
- (core_next_state > PWRDM_POWER_RET))
- per_next_state = PWRDM_POWER_RET;
+ if (new_state_idx == 0) {
+ /* In C1 do not allow PER state lower than CORE state */
+ if (per_next_state < core_next_state)
+ per_next_state = core_next_state;
+ } else {
+ /*
+ * Prevent PER OFF if CORE is not in RETention or OFF as this
+ * would disable PER wakeups completely.
+ */
+ if ((per_next_state == PWRDM_POWER_OFF) &&
+ (core_next_state > PWRDM_POWER_RET))
+ per_next_state = PWRDM_POWER_RET;
+ }
/* Are we changing PER target state? */
if (per_next_state != per_saved_state)
pwrdm_set_next_pwrst(per_pd, per_next_state);
- new_state_idx = next_valid_state(dev, drv, index);
-
-select_state:
ret = omap3_enter_idle(dev, drv, new_state_idx);
/* Restore original PER state if it was modified */
@@ -288,7 +269,7 @@ struct cpuidle_driver omap3_idle_driver = {
.owner = THIS_MODULE,
.states = {
{
- .enter = omap3_enter_idle,
+ .enter = omap3_enter_idle_bm,
.exit_latency = 2 + 2,
.target_residency = 5,
.flags = CPUIDLE_FLAG_TIME_VALID,
@@ -379,9 +360,3 @@ int __init omap3_idle_init(void)
return 0;
}
-#else
-int __init omap3_idle_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_CPU_IDLE */
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index be1617ca84bd..02d15bbd4e35 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -22,8 +22,6 @@
#include "pm.h"
#include "prm.h"
-#ifdef CONFIG_CPU_IDLE
-
/* Machine specific information */
struct omap4_idle_statedata {
u32 cpu_state;
@@ -199,9 +197,3 @@ int __init omap4_idle_init(void)
return 0;
}
-#else
-int __init omap4_idle_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_CPU_IDLE */
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 7b4b9327e543..c00c68961bb8 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -27,7 +27,6 @@
#include "iomap.h"
#include <plat/board.h>
-#include <plat/mmc.h>
#include <plat/dma.h>
#include <plat/omap_hwmod.h>
#include <plat/omap_device.h>
@@ -84,7 +83,7 @@ static int __init omap4_l3_init(void)
* To avoid code running on other OMAPs in
* multi-omap builds
*/
- if (!(cpu_is_omap44xx()))
+ if (!cpu_is_omap44xx() && !soc_is_omap54xx())
return -ENODEV;
for (i = 0; i < L3_MODULES; i++) {
@@ -603,112 +602,6 @@ static inline void omap_init_aes(void) { }
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
-
-static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
- *mmc_controller)
-{
- if ((mmc_controller->slots[0].switch_pin > 0) && \
- (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
- omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
- OMAP_PIN_INPUT_PULLUP);
- if ((mmc_controller->slots[0].gpio_wp > 0) && \
- (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
- omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
- OMAP_PIN_INPUT_PULLUP);
-
- omap_mux_init_signal("sdmmc_cmd", 0);
- omap_mux_init_signal("sdmmc_clki", 0);
- omap_mux_init_signal("sdmmc_clko", 0);
- omap_mux_init_signal("sdmmc_dat0", 0);
- omap_mux_init_signal("sdmmc_dat_dir0", 0);
- omap_mux_init_signal("sdmmc_cmd_dir", 0);
- if (mmc_controller->slots[0].caps & MMC_CAP_4_BIT_DATA) {
- omap_mux_init_signal("sdmmc_dat1", 0);
- omap_mux_init_signal("sdmmc_dat2", 0);
- omap_mux_init_signal("sdmmc_dat3", 0);
- omap_mux_init_signal("sdmmc_dat_dir1", 0);
- omap_mux_init_signal("sdmmc_dat_dir2", 0);
- omap_mux_init_signal("sdmmc_dat_dir3", 0);
- }
-
- /*
- * Use internal loop-back in MMC/SDIO Module Input Clock
- * selection
- */
- if (mmc_controller->slots[0].internal_clock) {
- u32 v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- v |= (1 << 24);
- omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
- }
-}
-
-void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
-{
- struct platform_device *pdev;
- struct omap_hwmod *oh;
- int id = 0;
- char *oh_name = "msdi1";
- char *dev_name = "mmci-omap";
-
- if (!mmc_data[0]) {
- pr_err("%s fails: Incomplete platform data\n", __func__);
- return;
- }
-
- omap242x_mmc_mux(mmc_data[0]);
-
- oh = omap_hwmod_lookup(oh_name);
- if (!oh) {
- pr_err("Could not look up %s\n", oh_name);
- return;
- }
- pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
- sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
- if (IS_ERR(pdev))
- WARN(1, "Can'd build omap_device for %s:%s.\n",
- dev_name, oh->name);
-}
-
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_HDQ_MASTER_OMAP) || defined(CONFIG_HDQ_MASTER_OMAP_MODULE)
-#define OMAP_HDQ_BASE 0x480B2000
-static struct resource omap_hdq_resources[] = {
- {
- .start = OMAP_HDQ_BASE,
- .end = OMAP_HDQ_BASE + 0x1C,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_24XX_HDQ_IRQ,
- .flags = IORESOURCE_IRQ,
- },
-};
-static struct platform_device omap_hdq_dev = {
- .name = "omap_hdq",
- .id = 0,
- .dev = {
- .platform_data = NULL,
- },
- .num_resources = ARRAY_SIZE(omap_hdq_resources),
- .resource = omap_hdq_resources,
-};
-static inline void omap_hdq_init(void)
-{
- if (cpu_is_omap2420())
- return;
-
- platform_device_register(&omap_hdq_dev);
-}
-#else
-static inline void omap_hdq_init(void) {}
-#endif
-
-/*---------------------------------------------------------------------------*/
-
#if defined(CONFIG_VIDEO_OMAP2_VOUT) || \
defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE)
#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
@@ -753,7 +646,6 @@ static int __init omap2_init_devices(void)
omap_init_mcspi();
}
omap_init_pmu();
- omap_hdq_init();
omap_init_sti();
omap_init_sham();
omap_init_aes();
@@ -772,7 +664,7 @@ static int __init omap_init_wdt(void)
char *oh_name = "wd_timer2";
char *dev_name = "omap_wdt";
- if (!cpu_class_is_omap2())
+ if (!cpu_class_is_omap2() || of_have_populated_dt())
return 0;
oh = omap_hwmod_lookup(oh_name);
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index f0f10beeffe8..b9c8d2f6a81f 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -135,11 +135,20 @@ static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
*/
static int _omap3_noncore_dpll_lock(struct clk *clk)
{
+ const struct dpll_data *dd;
u8 ai;
- int r;
+ u8 state = 1;
+ int r = 0;
pr_debug("clock: locking DPLL %s\n", clk->name);
+ dd = clk->dpll_data;
+ state <<= __ffs(dd->idlest_mask);
+
+ /* Check if already locked */
+ if ((__raw_readl(dd->idlest_reg) & dd->idlest_mask) == state)
+ goto done;
+
ai = omap3_dpll_autoidle_read(clk);
if (ai)
@@ -152,6 +161,7 @@ static int _omap3_noncore_dpll_lock(struct clk *clk)
if (ai)
omap3_dpll_allow_idle(clk);
+done:
return r;
}
@@ -628,3 +638,17 @@ unsigned long omap3_clkoutx2_recalc(struct clk *clk)
rate = clk->parent->rate * 2;
return rate;
}
+
+/* OMAP3/4 non-CORE DPLL clkops */
+
+const struct clkops clkops_omap3_noncore_dpll_ops = {
+ .enable = omap3_noncore_dpll_enable,
+ .disable = omap3_noncore_dpll_disable,
+ .allow_idle = omap3_dpll_allow_idle,
+ .deny_idle = omap3_dpll_deny_idle,
+};
+
+const struct clkops clkops_omap3_core_dpll_ops = {
+ .allow_idle = omap3_dpll_allow_idle,
+ .deny_idle = omap3_dpll_deny_idle,
+};
diff --git a/arch/arm/mach-omap2/drm.c b/arch/arm/mach-omap2/drm.c
new file mode 100644
index 000000000000..72e0f01b715c
--- /dev/null
+++ b/arch/arm/mach-omap2/drm.c
@@ -0,0 +1,61 @@
+/*
+ * DRM/KMS device registration for TI OMAP platforms
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <plat/omap_device.h>
+#include <plat/omap_hwmod.h>
+
+#if defined(CONFIG_DRM_OMAP) || (CONFIG_DRM_OMAP_MODULE)
+
+static struct platform_device omap_drm_device = {
+ .dev = {
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ .name = "omapdrm",
+ .id = 0,
+};
+
+static int __init omap_init_drm(void)
+{
+ struct omap_hwmod *oh = NULL;
+ struct platform_device *pdev;
+
+ /* lookup and populate the DMM information, if present - OMAP4+ */
+ oh = omap_hwmod_lookup("dmm");
+
+ if (oh) {
+ pdev = omap_device_build(oh->name, -1, oh, NULL, 0, NULL, 0,
+ false);
+ WARN(IS_ERR(pdev), "Could not build omap_device for %s\n",
+ oh->name);
+ }
+
+ return platform_device_register(&omap_drm_device);
+
+}
+
+arch_initcall(omap_init_drm);
+
+#endif
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 88ffa1e645cd..a636ebc16b39 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -23,6 +23,7 @@
#include <asm/memblock.h>
+#include "control.h"
#include "cm2xxx_3xxx.h"
#include "prm2xxx_3xxx.h"
#ifdef CONFIG_BRIDGE_DVFS
@@ -46,6 +47,9 @@ static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
.dsp_cm_read = omap2_cm_read_mod_reg,
.dsp_cm_write = omap2_cm_write_mod_reg,
.dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
+
+ .set_bootaddr = omap_ctrl_write_dsp_boot_addr,
+ .set_bootmode = omap_ctrl_write_dsp_boot_mode,
};
static phys_addr_t omap_dsp_phys_mempool_base;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 2286410671e7..b2b5759ab0fe 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -727,7 +727,8 @@ static int __init gpmc_init(void)
ck = "gpmc_fck";
l = OMAP34XX_GPMC_BASE;
gpmc_irq = INT_34XX_GPMC_IRQ;
- } else if (cpu_is_omap44xx()) {
+ } else if (cpu_is_omap44xx() || soc_is_omap54xx()) {
+ /* Base address and irq number are same for OMAP4/5 */
ck = "gpmc_ck";
l = OMAP44XX_GPMC_BASE;
gpmc_irq = OMAP44XX_IRQ_GPMC;
diff --git a/arch/arm/mach-omap2/hdq1w.c b/arch/arm/mach-omap2/hdq1w.c
index 297ebe03f09c..cdd6dda03828 100644
--- a/arch/arm/mach-omap2/hdq1w.c
+++ b/arch/arm/mach-omap2/hdq1w.c
@@ -22,7 +22,13 @@
* 02110-1301 USA
*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+
#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include <plat/hdq1w.h>
#include "common.h"
@@ -70,3 +76,23 @@ int omap_hdq1w_reset(struct omap_hwmod *oh)
return 0;
}
+
+static int __init omap_init_hdq(void)
+{
+ int id = -1;
+ struct platform_device *pdev;
+ struct omap_hwmod *oh;
+ char *oh_name = "hdq1w";
+ char *devname = "omap_hdq";
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh)
+ return 0;
+
+ pdev = omap_device_build(devname, id, oh, NULL, 0, NULL, 0, 0);
+ WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
+ devname, oh->name);
+
+ return 0;
+}
+arch_initcall(omap_init_hdq);
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 00486a8564fd..40373db649aa 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -44,12 +44,17 @@ int omap_type(void)
if (cpu_is_omap24xx()) {
val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
- } else if (cpu_is_am33xx()) {
+ } else if (soc_is_am33xx()) {
val = omap_ctrl_readl(AM33XX_CONTROL_STATUS);
} else if (cpu_is_omap34xx()) {
val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
} else if (cpu_is_omap44xx()) {
val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
+ } else if (soc_is_omap54xx()) {
+ val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS);
+ val &= OMAP5_DEVICETYPE_MASK;
+ val >>= 6;
+ goto out;
} else {
pr_err("Cannot detect omap type!\n");
goto out;
@@ -100,7 +105,7 @@ static u16 tap_prod_id;
void omap_get_die_id(struct omap_die_id *odi)
{
- if (cpu_is_omap44xx()) {
+ if (cpu_is_omap44xx() || soc_is_omap54xx()) {
odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_0);
odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_1);
odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_2);
@@ -189,7 +194,7 @@ static void __init omap3_cpuinfo(void)
cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
} else if (cpu_is_ti816x()) {
cpu_name = "TI816X";
- } else if (cpu_is_am335x()) {
+ } else if (soc_is_am335x()) {
cpu_name = "AM335X";
} else if (cpu_is_ti814x()) {
cpu_name = "TI814X";
@@ -513,6 +518,41 @@ void __init omap4xxx_check_revision(void)
((omap_rev() >> 12) & 0xf), ((omap_rev() >> 8) & 0xf));
}
+void __init omap5xxx_check_revision(void)
+{
+ u32 idcode;
+ u16 hawkeye;
+ u8 rev;
+
+ idcode = read_tap_reg(OMAP_TAP_IDCODE);
+ hawkeye = (idcode >> 12) & 0xffff;
+ rev = (idcode >> 28) & 0xff;
+ switch (hawkeye) {
+ case 0xb942:
+ switch (rev) {
+ case 0:
+ default:
+ omap_revision = OMAP5430_REV_ES1_0;
+ }
+ break;
+
+ case 0xb998:
+ switch (rev) {
+ case 0:
+ default:
+ omap_revision = OMAP5432_REV_ES1_0;
+ }
+ break;
+
+ default:
+ /* Unknown default to latest silicon rev as default*/
+ omap_revision = OMAP5430_REV_ES1_0;
+ }
+
+ pr_info("OMAP%04x ES%d.0\n",
+ omap_rev() >> 16, ((omap_rev() >> 12) & 0xf));
+}
+
/*
* Set up things for map_io and processor detection later on. Gets called
* pretty much first thing from board init. For multi-omap, this gets
diff --git a/arch/arm/mach-omap2/include/mach/am35xx.h b/arch/arm/mach-omap2/include/mach/am35xx.h
index f1e13d1ca5e7..95594495fcf6 100644
--- a/arch/arm/mach-omap2/include/mach/am35xx.h
+++ b/arch/arm/mach-omap2/include/mach/am35xx.h
@@ -36,6 +36,8 @@
#define AM35XX_EMAC_CNTRL_MOD_OFFSET (0x0)
#define AM35XX_EMAC_CNTRL_RAM_OFFSET (0x20000)
#define AM35XX_EMAC_MDIO_OFFSET (0x30000)
+#define AM35XX_IPSS_MDIO_BASE (AM35XX_IPSS_EMAC_BASE + \
+ AM35XX_EMAC_MDIO_OFFSET)
#define AM35XX_EMAC_CNTRL_RAM_SIZE (0x2000)
#define AM35XX_EMAC_RAM_ADDR (AM3517_EMAC_BASE + \
AM3517_EMAC_CNTRL_RAM_OFFSET)
diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
index 2f7ac70a20d8..01970824e0e5 100644
--- a/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
+++ b/arch/arm/mach-omap2/include/mach/ctrl_module_core_44xx.h
@@ -42,6 +42,7 @@
#define OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_DPLL_1 0x0268
#define OMAP4_CTRL_MODULE_CORE_STATUS 0x02c4
#define OMAP4_CTRL_MODULE_CORE_DEV_CONF 0x0300
+#define OMAP4_CTRL_MODULE_CORE_DSP_BOOTADDR 0x0304
#define OMAP4_CTRL_MODULE_CORE_LDOVBB_IVA_VOLTAGE_CTRL 0x0314
#define OMAP4_CTRL_MODULE_CORE_LDOVBB_MPU_VOLTAGE_CTRL 0x0318
#define OMAP4_CTRL_MODULE_CORE_LDOSRAM_IVA_VOLTAGE_CTRL 0x0320
diff --git a/arch/arm/mach-omap2/include/mach/debug-macro.S b/arch/arm/mach-omap2/include/mach/debug-macro.S
index cdfc2a1f0e75..93d10de7129f 100644
--- a/arch/arm/mach-omap2/include/mach/debug-macro.S
+++ b/arch/arm/mach-omap2/include/mach/debug-macro.S
@@ -60,18 +60,20 @@ omap_uart_lsr: .word 0
beq 23f @ configure OMAP2UART3
cmp \rp, #OMAP3UART3 @ only on 34xx
beq 33f @ configure OMAP3UART3
- cmp \rp, #OMAP4UART3 @ only on 44xx
- beq 43f @ configure OMAP4UART3
+ cmp \rp, #OMAP4UART3 @ only on 44xx/54xx
+ beq 43f @ configure OMAP4/5UART3
cmp \rp, #OMAP3UART4 @ only on 36xx
beq 34f @ configure OMAP3UART4
- cmp \rp, #OMAP4UART4 @ only on 44xx
- beq 44f @ configure OMAP4UART4
+ cmp \rp, #OMAP4UART4 @ only on 44xx/54xx
+ beq 44f @ configure OMAP4/5UART4
cmp \rp, #TI81XXUART1 @ ti81Xx UART offsets different
beq 81f @ configure UART1
cmp \rp, #TI81XXUART2 @ ti81Xx UART offsets different
beq 82f @ configure UART2
cmp \rp, #TI81XXUART3 @ ti81Xx UART offsets different
beq 83f @ configure UART3
+ cmp \rp, #AM33XXUART1 @ AM33XX UART offsets different
+ beq 84f @ configure UART1
cmp \rp, #ZOOM_UART @ only on zoom2/3
beq 95f @ configure ZOOM_UART
@@ -100,7 +102,9 @@ omap_uart_lsr: .word 0
b 98f
83: mov \rp, #UART_OFFSET(TI81XX_UART3_BASE)
b 98f
-
+84: ldr \rp, =AM33XX_UART1_BASE
+ and \rp, \rp, #0x00ffffff
+ b 97f
95: ldr \rp, =ZOOM_UART_BASE
str \rp, [\tmp, #0] @ omap_uart_phys
ldr \rp, =ZOOM_UART_VIRT
@@ -109,6 +113,17 @@ omap_uart_lsr: .word 0
str \rp, [\tmp, #8] @ omap_uart_lsr
b 10b
+ /* AM33XX: Store both phys and virt address for the uart */
+97: add \rp, \rp, #0x44000000 @ phys base
+ str \rp, [\tmp, #0] @ omap_uart_phys
+ sub \rp, \rp, #0x44000000 @ phys base
+ add \rp, \rp, #0xf9000000 @ virt base
+ str \rp, [\tmp, #4] @ omap_uart_virt
+ mov \rp, #(UART_LSR << OMAP_PORT_SHIFT)
+ str \rp, [\tmp, #8] @ omap_uart_lsr
+
+ b 10b
+
/* Store both phys and virt address for the uart */
98: add \rp, \rp, #0x48000000 @ phys base
str \rp, [\tmp, #0] @ omap_uart_phys
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
index 548de90b58c2..b0fd16f5c391 100644
--- a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -11,15 +11,20 @@
#ifndef OMAP_ARCH_WAKEUPGEN_H
#define OMAP_ARCH_WAKEUPGEN_H
+/* OMAP4 and OMAP5 has same base address */
+#define OMAP_WKUPGEN_BASE 0x48281000
+
#define OMAP_WKG_CONTROL_0 0x00
#define OMAP_WKG_ENB_A_0 0x10
#define OMAP_WKG_ENB_B_0 0x14
#define OMAP_WKG_ENB_C_0 0x18
#define OMAP_WKG_ENB_D_0 0x1c
+#define OMAP_WKG_ENB_E_0 0x20
#define OMAP_WKG_ENB_A_1 0x410
#define OMAP_WKG_ENB_B_1 0x414
#define OMAP_WKG_ENB_C_1 0x418
#define OMAP_WKG_ENB_D_1 0x41c
+#define OMAP_WKG_ENB_E_1 0x420
#define OMAP_AUX_CORE_BOOT_0 0x800
#define OMAP_AUX_CORE_BOOT_1 0x804
#define OMAP_PTMSYNCREQ_MASK 0xc00
@@ -28,4 +33,6 @@
#define OMAP_TIMESTAMPCYCLEHI 0xc0c
extern int __init omap_wakeupgen_init(void);
+extern void __iomem *omap_get_wakeupgen_base(void);
+extern int omap_secure_apis_support(void);
#endif
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 8d014ba04abc..4d2d981ff5c5 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -38,6 +38,7 @@
#include "powerdomain.h"
#include "clockdomain.h"
#include "common.h"
+#include "clock.h"
#include "clock2xxx.h"
#include "clock3xxx.h"
#include "clock44xx.h"
@@ -233,6 +234,35 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
};
#endif
+#ifdef CONFIG_SOC_OMAP5
+static struct map_desc omap54xx_io_desc[] __initdata = {
+ {
+ .virtual = L3_54XX_VIRT,
+ .pfn = __phys_to_pfn(L3_54XX_PHYS),
+ .length = L3_54XX_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = L4_54XX_VIRT,
+ .pfn = __phys_to_pfn(L4_54XX_PHYS),
+ .length = L4_54XX_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = L4_WK_54XX_VIRT,
+ .pfn = __phys_to_pfn(L4_WK_54XX_PHYS),
+ .length = L4_WK_54XX_SIZE,
+ .type = MT_DEVICE,
+ },
+ {
+ .virtual = L4_PER_54XX_VIRT,
+ .pfn = __phys_to_pfn(L4_PER_54XX_PHYS),
+ .length = L4_PER_54XX_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+#endif
+
#ifdef CONFIG_SOC_OMAP2420
void __init omap242x_map_common_io(void)
{
@@ -278,6 +308,12 @@ void __init omap44xx_map_common_io(void)
}
#endif
+#ifdef CONFIG_SOC_OMAP5
+void __init omap5_map_common_io(void)
+{
+ iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+}
+#endif
/*
* omap2_init_reprogram_sdrc - reprogram SDRC timing parameters
*
@@ -477,6 +513,20 @@ void __init ti81xx_init_late(void)
}
#endif
+#ifdef CONFIG_SOC_AM33XX
+void __init am33xx_init_early(void)
+{
+ omap2_set_globals_am33xx();
+ omap3xxx_check_revision();
+ ti81xx_check_features();
+ omap_common_init_early();
+ am33xx_voltagedomains_init();
+ am33xx_powerdomains_init();
+ am33xx_clockdomains_init();
+ am33xx_clk_init();
+}
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
void __init omap4430_init_early(void)
{
@@ -500,6 +550,15 @@ void __init omap4430_init_late(void)
}
#endif
+#ifdef CONFIG_SOC_OMAP5
+void __init omap5_init_early(void)
+{
+ omap2_set_globals_5xxx();
+ omap5xxx_check_revision();
+ omap_common_init_early();
+}
+#endif
+
void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1)
{
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index 80b88921faba..cce2b65039f1 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -1,6 +1,14 @@
/*
* IO mappings for OMAP2+
*
+ * IO definitions for TI OMAP processors and boards
+ *
+ * Copied from arch/arm/mach-sa1100/include/mach/io.h
+ * Copyright (C) 1997-1999 Russell King
+ *
+ * Copyright (C) 2009-2012 Texas Instruments
+ * Added OMAP4/5 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
@@ -166,4 +174,23 @@
/* 0x49000000 --> 0xfb000000 */
#define L4_ABE_44XX_VIRT (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
#define L4_ABE_44XX_SIZE SZ_1M
+/*
+ * ----------------------------------------------------------------------------
+ * Omap5 specific IO mapping
+ * ----------------------------------------------------------------------------
+ */
+#define L3_54XX_PHYS L3_54XX_BASE /* 0x44000000 --> 0xf8000000 */
+#define L3_54XX_VIRT (L3_54XX_PHYS + OMAP4_L3_IO_OFFSET)
+#define L3_54XX_SIZE SZ_1M
+
+#define L4_54XX_PHYS L4_54XX_BASE /* 0x4a000000 --> 0xfc000000 */
+#define L4_54XX_VIRT (L4_54XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_54XX_SIZE SZ_4M
+
+#define L4_WK_54XX_PHYS L4_WK_54XX_BASE /* 0x4ae00000 --> 0xfce00000 */
+#define L4_WK_54XX_VIRT (L4_WK_54XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_WK_54XX_SIZE SZ_2M
+#define L4_PER_54XX_PHYS L4_PER_54XX_BASE /* 0x48000000 --> 0xfa000000 */
+#define L4_PER_54XX_VIRT (L4_PER_54XX_PHYS + OMAP2_L4_IO_OFFSET)
+#define L4_PER_54XX_SIZE SZ_4M
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 6038a8c84b74..bcd83db41bbc 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -21,6 +21,7 @@
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <mach/hardware.h>
@@ -258,11 +259,11 @@ asmlinkage void __exception_irq_entry omap2_intc_handle_irq(struct pt_regs *regs
omap_intc_handle_irq(base_addr, regs);
}
-int __init omap_intc_of_init(struct device_node *node,
+int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource res;
- u32 nr_irqs = 96;
+ u32 nr_irq = 96;
if (WARN_ON(!node))
return -ENODEV;
@@ -272,15 +273,25 @@ int __init omap_intc_of_init(struct device_node *node,
return -EINVAL;
}
- if (of_property_read_u32(node, "ti,intc-size", &nr_irqs))
- pr_warn("unable to get intc-size, default to %d\n", nr_irqs);
+ if (of_property_read_u32(node, "ti,intc-size", &nr_irq))
+ pr_warn("unable to get intc-size, default to %d\n", nr_irq);
- omap_init_irq(res.start, nr_irqs, of_node_get(node));
+ omap_init_irq(res.start, nr_irq, of_node_get(node));
return 0;
}
-#ifdef CONFIG_ARCH_OMAP3
+static struct of_device_id irq_match[] __initdata = {
+ { .compatible = "ti,omap2-intc", .data = intc_of_init, },
+ { }
+};
+
+void __init omap_intc_of_init(void)
+{
+ of_irq_init(irq_match);
+}
+
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM33XX)
static struct omap3_intc_regs intc_context[ARRAY_SIZE(irq_banks)];
void omap_intc_save_context(void)
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 19b8b6774862..6875be837d9f 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -83,8 +83,6 @@ static int omap2_mbox_startup(struct omap_mbox *mbox)
l = mbox_read_reg(MAILBOX_REVISION);
pr_debug("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
- omap2_mbox_enable_irq(mbox, IRQ_RX);
-
return 0;
}
diff --git a/arch/arm/mach-omap2/msdi.c b/arch/arm/mach-omap2/msdi.c
index ef2a6924731a..fb5bc6cf3773 100644
--- a/arch/arm/mach-omap2/msdi.c
+++ b/arch/arm/mach-omap2/msdi.c
@@ -22,11 +22,15 @@
*/
#include <linux/kernel.h>
+#include <linux/err.h>
#include <plat/omap_hwmod.h>
+#include <plat/omap_device.h>
#include <plat/mmc.h>
#include "common.h"
+#include "control.h"
+#include "mux.h"
/*
* MSDI_CON_OFFSET: offset in bytes of the MSDI IP block's CON register
@@ -86,3 +90,72 @@ int omap_msdi_reset(struct omap_hwmod *oh)
return 0;
}
+
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+
+static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
+ *mmc_controller)
+{
+ if ((mmc_controller->slots[0].switch_pin > 0) && \
+ (mmc_controller->slots[0].switch_pin < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].switch_pin,
+ OMAP_PIN_INPUT_PULLUP);
+ if ((mmc_controller->slots[0].gpio_wp > 0) && \
+ (mmc_controller->slots[0].gpio_wp < OMAP_MAX_GPIO_LINES))
+ omap_mux_init_gpio(mmc_controller->slots[0].gpio_wp,
+ OMAP_PIN_INPUT_PULLUP);
+
+ omap_mux_init_signal("sdmmc_cmd", 0);
+ omap_mux_init_signal("sdmmc_clki", 0);
+ omap_mux_init_signal("sdmmc_clko", 0);
+ omap_mux_init_signal("sdmmc_dat0", 0);
+ omap_mux_init_signal("sdmmc_dat_dir0", 0);
+ omap_mux_init_signal("sdmmc_cmd_dir", 0);
+ if (mmc_controller->slots[0].caps & MMC_CAP_4_BIT_DATA) {
+ omap_mux_init_signal("sdmmc_dat1", 0);
+ omap_mux_init_signal("sdmmc_dat2", 0);
+ omap_mux_init_signal("sdmmc_dat3", 0);
+ omap_mux_init_signal("sdmmc_dat_dir1", 0);
+ omap_mux_init_signal("sdmmc_dat_dir2", 0);
+ omap_mux_init_signal("sdmmc_dat_dir3", 0);
+ }
+
+ /*
+ * Use internal loop-back in MMC/SDIO Module Input Clock
+ * selection
+ */
+ if (mmc_controller->slots[0].internal_clock) {
+ u32 v = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
+ v |= (1 << 24);
+ omap_ctrl_writel(v, OMAP2_CONTROL_DEVCONF0);
+ }
+}
+
+void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
+{
+ struct platform_device *pdev;
+ struct omap_hwmod *oh;
+ int id = 0;
+ char *oh_name = "msdi1";
+ char *dev_name = "mmci-omap";
+
+ if (!mmc_data[0]) {
+ pr_err("%s fails: Incomplete platform data\n", __func__);
+ return;
+ }
+
+ omap242x_mmc_mux(mmc_data[0]);
+
+ oh = omap_hwmod_lookup(oh_name);
+ if (!oh) {
+ pr_err("Could not look up %s\n", oh_name);
+ return;
+ }
+ pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
+ sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
+ if (IS_ERR(pdev))
+ WARN(1, "Can'd build omap_device for %s:%s.\n",
+ dev_name, oh->name);
+}
+
+#endif
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 503ac777a2ba..502e3135aad3 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -19,6 +19,27 @@
#include <linux/init.h>
__CPUINIT
+
+/* Physical address needed since MMU not enabled yet on secondary core */
+#define AUX_CORE_BOOT0_PA 0x48281800
+
+/*
+ * OMAP5 specific entry point for secondary CPU to jump from ROM
+ * code. This routine also provides a holding flag into which
+ * secondary core is held until we're ready for it to initialise.
+ * The primary core will update this flag using a hardware
++ * register AuxCoreBoot0.
+ */
+ENTRY(omap5_secondary_startup)
+wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
+ ldr r0, [r2]
+ mov r0, r0, lsr #5
+ mrc p15, 0, r4, c0, c0, 5
+ and r4, r4, #0x0f
+ cmp r0, r4
+ bne wait
+ b secondary_startup
+END(omap5_secondary_startup)
/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c
index 56c345b8b931..414083b427df 100644
--- a/arch/arm/mach-omap2/omap-hotplug.c
+++ b/arch/arm/mach-omap2/omap-hotplug.c
@@ -17,8 +17,10 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
+#include <mach/omap-wakeupgen.h>
#include "common.h"
@@ -35,7 +37,8 @@ int platform_cpu_kill(unsigned int cpu)
*/
void __ref platform_cpu_die(unsigned int cpu)
{
- unsigned int this_cpu;
+ unsigned int boot_cpu = 0;
+ void __iomem *base = omap_get_wakeupgen_base();
flush_cache_all();
dsb();
@@ -43,16 +46,27 @@ void __ref platform_cpu_die(unsigned int cpu)
/*
* we're ready for shutdown now, so do it
*/
- if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
- pr_err("Secure clear status failed\n");
+ if (omap_secure_apis_support()) {
+ if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
+ pr_err("Secure clear status failed\n");
+ } else {
+ __raw_writel(0, base + OMAP_AUX_CORE_BOOT_0);
+ }
+
for (;;) {
/*
* Enter into low power state
*/
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
- this_cpu = smp_processor_id();
- if (omap_read_auxcoreboot0() == this_cpu) {
+
+ if (omap_secure_apis_support())
+ boot_cpu = omap_read_auxcoreboot0();
+ else
+ boot_cpu =
+ __raw_readl(base + OMAP_AUX_CORE_BOOT_0) >> 5;
+
+ if (boot_cpu == smp_processor_id()) {
/*
* OK, proper wakeup, we're done
*/
diff --git a/arch/arm/mach-omap2/omap-iommu.c b/arch/arm/mach-omap2/omap-iommu.c
index ac49384d0285..1be8bcb52e93 100644
--- a/arch/arm/mach-omap2/omap-iommu.c
+++ b/arch/arm/mach-omap2/omap-iommu.c
@@ -73,19 +73,17 @@ static struct iommu_device omap4_devices[] = {
.da_end = 0xFFFFF000,
},
},
-#if defined(CONFIG_MPU_TESLA_IOMMU)
{
.base = OMAP4_MMU2_BASE,
- .irq = INT_44XX_DSP_MMU,
+ .irq = OMAP44XX_IRQ_TESLA_MMU,
.pdata = {
.name = "tesla",
.nr_tlb_entries = 32,
- .clk_name = "tesla_ick",
+ .clk_name = "dsp_fck",
.da_start = 0x0,
.da_end = 0xFFFFF000,
},
},
-#endif
};
#define NR_OMAP4_IOMMU_DEVICES ARRAY_SIZE(omap4_devices)
static struct platform_device *omap4_iommu_pdev[NR_OMAP4_IOMMU_DEVICES];
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index 13670aa84e58..637a1bdf2ac4 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -255,7 +255,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
return -ENXIO;
}
- pwrdm_pre_transition();
+ pwrdm_pre_transition(NULL);
/*
* Check MPUSS next state and save interrupt controller if needed.
@@ -287,7 +287,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
wakeup_cpu = smp_processor_id();
set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
- pwrdm_post_transition();
+ pwrdm_post_transition(NULL);
return 0;
}
@@ -313,7 +313,7 @@ int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
scu_pwrst_prepare(cpu, power_state);
/*
- * CPU never retuns back if targetted power state is OFF mode.
+ * CPU never retuns back if targeted power state is OFF mode.
* CPU ONLINE follows normal CPU ONLINE ptah via
* omap_secondary_startup().
*/
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index deffbf1c9627..7d118b9bdd5f 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -26,11 +26,19 @@
#include <mach/hardware.h>
#include <mach/omap-secure.h>
+#include <mach/omap-wakeupgen.h>
+#include <asm/cputype.h>
#include "iomap.h"
#include "common.h"
#include "clockdomain.h"
+#define CPU_MASK 0xff0ffff0
+#define CPU_CORTEX_A9 0x410FC090
+#define CPU_CORTEX_A15 0x410FC0F0
+
+#define OMAP5_CORE_COUNT 0x2
+
/* SCU base address */
static void __iomem *scu_base;
@@ -73,6 +81,8 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
static struct clockdomain *cpu1_clkdm;
static bool booted;
+ void __iomem *base = omap_get_wakeupgen_base();
+
/*
* Set synchronisation state between this boot processor
* and the secondary one
@@ -85,7 +95,11 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
* the AuxCoreBoot1 register is updated with cpu state
* A barrier is added to ensure that write buffer is drained
*/
- omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ if (omap_secure_apis_support())
+ omap_modify_auxcoreboot0(0x200, 0xfffffdff);
+ else
+ __raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0);
+
flush_cache_all();
smp_wmb();
@@ -124,13 +138,19 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
static void __init wakeup_secondary(void)
{
+ void __iomem *base = omap_get_wakeupgen_base();
/*
* Write the address of secondary startup routine into the
* AuxCoreBoot1 where ROM code will jump and start executing
* on secondary core once out of WFE
* A barrier is added to ensure that write buffer is drained
*/
- omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup));
+ if (omap_secure_apis_support())
+ omap_auxcoreboot_addr(virt_to_phys(omap_secondary_startup));
+ else
+ __raw_writel(virt_to_phys(omap5_secondary_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
+
smp_wmb();
/*
@@ -147,16 +167,21 @@ static void __init wakeup_secondary(void)
*/
void __init smp_init_cpus(void)
{
- unsigned int i, ncores;
-
- /*
- * Currently we can't call ioremap here because
- * SoC detection won't work until after init_early.
- */
- scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE);
- BUG_ON(!scu_base);
-
- ncores = scu_get_core_count(scu_base);
+ unsigned int i = 0, ncores = 1, cpu_id;
+
+ /* Use ARM cpuid check here, as SoC detection will not work so early */
+ cpu_id = read_cpuid(CPUID_ID) & CPU_MASK;
+ if (cpu_id == CPU_CORTEX_A9) {
+ /*
+ * Currently we can't call ioremap here because
+ * SoC detection won't work until after init_early.
+ */
+ scu_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_SCU_BASE);
+ BUG_ON(!scu_base);
+ ncores = scu_get_core_count(scu_base);
+ } else if (cpu_id == CPU_CORTEX_A15) {
+ ncores = OMAP5_CORE_COUNT;
+ }
/* sanity check */
if (ncores > nr_cpu_ids) {
@@ -178,6 +203,7 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
* Initialise the SCU and wake up the secondary core using
* wakeup_secondary().
*/
- scu_enable(scu_base);
+ if (scu_base)
+ scu_enable(scu_base);
wakeup_secondary();
}
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index d811c7790350..05fdebfaa195 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -33,18 +33,23 @@
#include "omap4-sar-layout.h"
#include "common.h"
-#define NR_REG_BANKS 4
-#define MAX_IRQS 128
+#define MAX_NR_REG_BANKS 5
+#define MAX_IRQS 160
#define WKG_MASK_ALL 0x00000000
#define WKG_UNMASK_ALL 0xffffffff
#define CPU_ENA_OFFSET 0x400
#define CPU0_ID 0x0
#define CPU1_ID 0x1
+#define OMAP4_NR_BANKS 4
+#define OMAP4_NR_IRQS 128
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_SPINLOCK(wakeupgen_lock);
static unsigned int irq_target_cpu[NR_IRQS];
+static unsigned int irq_banks = MAX_NR_REG_BANKS;
+static unsigned int max_irqs = MAX_IRQS;
+static unsigned int omap_secure_apis;
/*
* Static helper functions.
@@ -146,13 +151,13 @@ static void wakeupgen_unmask(struct irq_data *d)
}
#ifdef CONFIG_HOTPLUG_CPU
-static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks);
+static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
static void _wakeupgen_save_masks(unsigned int cpu)
{
u8 i;
- for (i = 0; i < NR_REG_BANKS; i++)
+ for (i = 0; i < irq_banks; i++)
per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
}
@@ -160,7 +165,7 @@ static void _wakeupgen_restore_masks(unsigned int cpu)
{
u8 i;
- for (i = 0; i < NR_REG_BANKS; i++)
+ for (i = 0; i < irq_banks; i++)
wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
}
@@ -168,7 +173,7 @@ static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
{
u8 i;
- for (i = 0; i < NR_REG_BANKS; i++)
+ for (i = 0; i < irq_banks; i++)
wakeupgen_writel(reg, i, cpu);
}
@@ -196,25 +201,14 @@ static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
#endif
#ifdef CONFIG_CPU_PM
-/*
- * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
- * ROM code. WakeupGen IP is integrated along with GIC to manage the
- * interrupt wakeups from CPU low power states. It manages
- * masking/unmasking of Shared peripheral interrupts(SPI). So the
- * interrupt enable/disable control should be in sync and consistent
- * at WakeupGen and GIC so that interrupts are not lost.
- */
-static void irq_save_context(void)
+static inline void omap4_irq_save_context(void)
{
u32 i, val;
if (omap_rev() == OMAP4430_REV_ES1_0)
return;
- if (!sar_base)
- sar_base = omap4_get_sar_ram_base();
-
- for (i = 0; i < NR_REG_BANKS; i++) {
+ for (i = 0; i < irq_banks; i++) {
/* Save the CPUx interrupt mask for IRQ 0 to 127 */
val = wakeupgen_readl(i, 0);
sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
@@ -254,6 +248,53 @@ static void irq_save_context(void)
val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
val |= SAR_BACKUP_STATUS_WAKEUPGEN;
__raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+
+}
+
+static inline void omap5_irq_save_context(void)
+{
+ u32 i, val;
+
+ for (i = 0; i < irq_banks; i++) {
+ /* Save the CPUx interrupt mask for IRQ 0 to 159 */
+ val = wakeupgen_readl(i, 0);
+ sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU0, i);
+ val = wakeupgen_readl(i, 1);
+ sar_writel(val, OMAP5_WAKEUPGENENB_OFFSET_CPU1, i);
+ sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
+ sar_writel(0x0, OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
+ }
+
+ /* Save AuxBoot* registers */
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT0_OFFSET);
+ val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
+ __raw_writel(val, sar_base + OMAP5_AUXCOREBOOT1_OFFSET);
+
+ /* Set the Backup Bit Mask status */
+ val = __raw_readl(sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
+ val |= SAR_BACKUP_STATUS_WAKEUPGEN;
+ __raw_writel(val, sar_base + OMAP5_SAR_BACKUP_STATUS_OFFSET);
+
+}
+
+/*
+ * Save WakeupGen interrupt context in SAR BANK3. Restore is done by
+ * ROM code. WakeupGen IP is integrated along with GIC to manage the
+ * interrupt wakeups from CPU low power states. It manages
+ * masking/unmasking of Shared peripheral interrupts(SPI). So the
+ * interrupt enable/disable control should be in sync and consistent
+ * at WakeupGen and GIC so that interrupts are not lost.
+ */
+static void irq_save_context(void)
+{
+ if (!sar_base)
+ sar_base = omap4_get_sar_ram_base();
+
+ if (soc_is_omap54xx())
+ omap5_irq_save_context();
+ else
+ omap4_irq_save_context();
}
/*
@@ -262,9 +303,14 @@ static void irq_save_context(void)
static void irq_sar_clear(void)
{
u32 val;
- val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
+ u32 offset = SAR_BACKUP_STATUS_OFFSET;
+
+ if (soc_is_omap54xx())
+ offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
+
+ val = __raw_readl(sar_base + offset);
val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
- __raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
+ __raw_writel(val, sar_base + offset);
}
/*
@@ -336,13 +382,25 @@ static struct notifier_block irq_notifier_block = {
static void __init irq_pm_init(void)
{
- cpu_pm_register_notifier(&irq_notifier_block);
+ /* FIXME: Remove this when MPU OSWR support is added */
+ if (!soc_is_omap54xx())
+ cpu_pm_register_notifier(&irq_notifier_block);
}
#else
static void __init irq_pm_init(void)
{}
#endif
+void __iomem *omap_get_wakeupgen_base(void)
+{
+ return wakeupgen_base;
+}
+
+int omap_secure_apis_support(void)
+{
+ return omap_secure_apis;
+}
+
/*
* Initialise the wakeupgen module.
*/
@@ -358,12 +416,18 @@ int __init omap_wakeupgen_init(void)
}
/* Static mapping, never released */
- wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
+ wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
if (WARN_ON(!wakeupgen_base))
return -ENOMEM;
+ if (cpu_is_omap44xx()) {
+ irq_banks = OMAP4_NR_BANKS;
+ max_irqs = OMAP4_NR_IRQS;
+ omap_secure_apis = 1;
+ }
+
/* Clear all IRQ bitmasks at wakeupGen level */
- for (i = 0; i < NR_REG_BANKS; i++) {
+ for (i = 0; i < irq_banks; i++) {
wakeupgen_writel(0, i, CPU0_ID);
wakeupgen_writel(0, i, CPU1_ID);
}
@@ -382,7 +446,7 @@ int __init omap_wakeupgen_init(void)
*/
/* Associate all the IRQs to boot CPU like GIC init does. */
- for (i = 0; i < NR_IRQS; i++)
+ for (i = 0; i < max_irqs; i++)
irq_target_cpu[i] = boot_cpu;
irq_hotplug_init();
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index a8161e5f3204..c29dee998a79 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -21,6 +21,8 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/mach/map.h>
#include <asm/memblock.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
#include <plat/irqs.h>
#include <plat/sram.h>
@@ -210,6 +212,18 @@ static int __init omap4_sar_ram_init(void)
}
early_initcall(omap4_sar_ram_init);
+static struct of_device_id irq_match[] __initdata = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+ { .compatible = "arm,cortex-a15-gic", .data = gic_of_init, },
+ { }
+};
+
+void __init omap_gic_of_init(void)
+{
+ omap_wakeupgen_init();
+ of_irq_init(irq_match);
+}
+
#if defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
static int omap4_twl6030_hsmmc_late_init(struct device *dev)
{
diff --git a/arch/arm/mach-omap2/omap4-sar-layout.h b/arch/arm/mach-omap2/omap4-sar-layout.h
index fe5b545ad443..e170fe803b04 100644
--- a/arch/arm/mach-omap2/omap4-sar-layout.h
+++ b/arch/arm/mach-omap2/omap4-sar-layout.h
@@ -12,7 +12,7 @@
#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
/*
- * SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
+ * SAR BANK offsets from base address OMAP44XX/54XX_SAR_RAM_BASE
*/
#define SAR_BANK1_OFFSET 0x0000
#define SAR_BANK2_OFFSET 0x1000
@@ -47,4 +47,14 @@
#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0)
#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10
+/* WakeUpGen save restore offset from OMAP54XX_SAR_RAM_BASE */
+#define OMAP5_WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x8d4)
+#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x8e8)
+#define OMAP5_WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x8fc)
+#define OMAP5_WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x910)
+#define OMAP5_AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x924)
+#define OMAP5_AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x928)
+#define OMAP5_AMBA_IF_MODE_OFFSET (SAR_BANK3_OFFSET + 0x92c)
+#define OMAP5_SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x800)
+
#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 773193670ea2..6ca8e519968d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -153,6 +153,7 @@
#include "prm44xx.h"
#include "prminst44xx.h"
#include "mux.h"
+#include "pm.h"
/* Maximum microseconds to wait for OMAP module to softreset */
#define MAX_MODULE_SOFTRESET_WAIT 10000
@@ -166,12 +167,40 @@
*/
#define LINKS_PER_OCP_IF 2
+/**
+ * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
+ * @enable_module: function to enable a module (via MODULEMODE)
+ * @disable_module: function to disable a module (via MODULEMODE)
+ *
+ * XXX Eventually this functionality will be hidden inside the PRM/CM
+ * device drivers. Until then, this should avoid huge blocks of cpu_is_*()
+ * conditionals in this code.
+ */
+struct omap_hwmod_soc_ops {
+ void (*enable_module)(struct omap_hwmod *oh);
+ int (*disable_module)(struct omap_hwmod *oh);
+ int (*wait_target_ready)(struct omap_hwmod *oh);
+ int (*assert_hardreset)(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri);
+ int (*deassert_hardreset)(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri);
+ int (*is_hardreset_asserted)(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri);
+ int (*init_clkdm)(struct omap_hwmod *oh);
+};
+
+/* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
+static struct omap_hwmod_soc_ops soc_ops;
+
/* omap_hwmod_list contains all registered struct omap_hwmods */
static LIST_HEAD(omap_hwmod_list);
/* mpu_oh: used to add/remove MPU initiator from sleepdep list */
static struct omap_hwmod *mpu_oh;
+/* io_chain_lock: used to serialize reconfigurations of the I/O chain */
+static DEFINE_SPINLOCK(io_chain_lock);
+
/*
* linkspace: ptr to a buffer that struct omap_hwmod_link records are
* allocated from - used to reduce the number of small memory
@@ -186,6 +215,9 @@ static struct omap_hwmod_link *linkspace;
*/
static unsigned short free_ls, max_ls, ls_supp;
+/* inited: set to true once the hwmod code is initialized */
+static bool inited;
+
/* Private functions */
/**
@@ -388,6 +420,49 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
}
/**
+ * _set_dmadisable: set OCP_SYSCONFIG.DMADISABLE bit in @v
+ * @oh: struct omap_hwmod *
+ *
+ * The DMADISABLE bit is a semi-automatic bit present in sysconfig register
+ * of some modules. When the DMA must perform read/write accesses, the
+ * DMADISABLE bit is cleared by the hardware. But when the DMA must stop
+ * for power management, software must set the DMADISABLE bit back to 1.
+ *
+ * Set the DMADISABLE bit in @v for hwmod @oh. Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _set_dmadisable(struct omap_hwmod *oh)
+{
+ u32 v;
+ u32 dmadisable_mask;
+
+ if (!oh->class->sysc ||
+ !(oh->class->sysc->sysc_flags & SYSC_HAS_DMADISABLE))
+ return -EINVAL;
+
+ if (!oh->class->sysc->sysc_fields) {
+ WARN(1, "omap_hwmod: %s: offset struct for sysconfig not provided in class\n", oh->name);
+ return -EINVAL;
+ }
+
+ /* clocks must be on for this operation */
+ if (oh->_state != _HWMOD_STATE_ENABLED) {
+ pr_warn("omap_hwmod: %s: dma can be disabled only from enabled state\n", oh->name);
+ return -EINVAL;
+ }
+
+ pr_debug("omap_hwmod: %s: setting DMADISABLE\n", oh->name);
+
+ v = oh->_sysc_cache;
+ dmadisable_mask =
+ (0x1 << oh->class->sysc->sysc_fields->dmadisable_shift);
+ v |= dmadisable_mask;
+ _write_sysconfig(v, oh);
+
+ return 0;
+}
+
+/**
* _set_module_autoidle: set the OCP_SYSCONFIG AUTOIDLE field in @v
* @oh: struct omap_hwmod *
* @autoidle: desired AUTOIDLE bitfield value (0 or 1)
@@ -771,23 +846,19 @@ static void _disable_optional_clocks(struct omap_hwmod *oh)
}
/**
- * _enable_module - enable CLKCTRL modulemode on OMAP4
+ * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
* @oh: struct omap_hwmod *
*
* Enables the PRCM module mode related to the hwmod @oh.
* No return value.
*/
-static void _enable_module(struct omap_hwmod *oh)
+static void _omap4_enable_module(struct omap_hwmod *oh)
{
- /* The module mode does not exist prior OMAP4 */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- return;
-
if (!oh->clkdm || !oh->prcm.omap4.modulemode)
return;
- pr_debug("omap_hwmod: %s: _enable_module: %d\n",
- oh->name, oh->prcm.omap4.modulemode);
+ pr_debug("omap_hwmod: %s: %s: %d\n",
+ oh->name, __func__, oh->prcm.omap4.modulemode);
omap4_cminst_module_enable(oh->prcm.omap4.modulemode,
oh->clkdm->prcm_partition,
@@ -807,10 +878,7 @@ static void _enable_module(struct omap_hwmod *oh)
*/
static int _omap4_wait_target_disable(struct omap_hwmod *oh)
{
- if (!cpu_is_omap44xx())
- return 0;
-
- if (!oh)
+ if (!oh || !oh->clkdm)
return -EINVAL;
if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
@@ -1124,15 +1192,18 @@ static struct omap_hwmod_addr_space * __init _find_mpu_rt_addr_space(struct omap
* _enable_sysc - try to bring a module out of idle via OCP_SYSCONFIG
* @oh: struct omap_hwmod *
*
- * If module is marked as SWSUP_SIDLE, force the module out of slave
- * idle; otherwise, configure it for smart-idle. If module is marked
- * as SWSUP_MSUSPEND, force the module out of master standby;
- * otherwise, configure it for smart-standby. No return value.
+ * Ensure that the OCP_SYSCONFIG register for the IP block represented
+ * by @oh is set to indicate to the PRCM that the IP block is active.
+ * Usually this means placing the module into smart-idle mode and
+ * smart-standby, but if there is a bug in the automatic idle handling
+ * for the IP block, it may need to be placed into the force-idle or
+ * no-idle variants of these modes. No return value.
*/
static void _enable_sysc(struct omap_hwmod *oh)
{
u8 idlemode, sf;
u32 v;
+ bool clkdm_act;
if (!oh->class->sysc)
return;
@@ -1141,8 +1212,16 @@ static void _enable_sysc(struct omap_hwmod *oh)
sf = oh->class->sysc->sysc_flags;
if (sf & SYSC_HAS_SIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
- HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
+ clkdm_act = ((oh->clkdm &&
+ oh->clkdm->flags & CLKDM_ACTIVE_WITH_MPU) ||
+ (oh->_clk && oh->_clk->clkdm &&
+ oh->_clk->clkdm->flags & CLKDM_ACTIVE_WITH_MPU));
+ if (clkdm_act && !(oh->class->sysc->idlemodes &
+ (SIDLE_SMART | SIDLE_SMART_WKUP)))
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ else
+ idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
+ HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
_set_slave_idlemode(oh, idlemode, &v);
}
@@ -1208,8 +1287,13 @@ static void _idle_sysc(struct omap_hwmod *oh)
sf = oh->class->sysc->sysc_flags;
if (sf & SYSC_HAS_SIDLEMODE) {
- idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
- HWMOD_IDLEMODE_FORCE : HWMOD_IDLEMODE_SMART;
+ /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */
+ if (oh->flags & HWMOD_SWSUP_SIDLE ||
+ !(oh->class->sysc->idlemodes &
+ (SIDLE_SMART | SIDLE_SMART_WKUP)))
+ idlemode = HWMOD_IDLEMODE_FORCE;
+ else
+ idlemode = HWMOD_IDLEMODE_SMART;
_set_slave_idlemode(oh, idlemode, &v);
}
@@ -1285,24 +1369,20 @@ static struct omap_hwmod *_lookup(const char *name)
return oh;
}
+
/**
* _init_clkdm - look up a clockdomain name, store pointer in omap_hwmod
* @oh: struct omap_hwmod *
*
* Convert a clockdomain name stored in a struct omap_hwmod into a
* clockdomain pointer, and save it into the struct omap_hwmod.
- * return -EINVAL if clkdm_name does not exist or if the lookup failed.
+ * Return -EINVAL if the clkdm_name lookup failed.
*/
static int _init_clkdm(struct omap_hwmod *oh)
{
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
+ if (!oh->clkdm_name)
return 0;
- if (!oh->clkdm_name) {
- pr_warning("omap_hwmod: %s: no clkdm_name\n", oh->name);
- return -EINVAL;
- }
-
oh->clkdm = clkdm_lookup(oh->clkdm_name);
if (!oh->clkdm) {
pr_warning("omap_hwmod: %s: could not associate to clkdm %s\n",
@@ -1338,7 +1418,8 @@ static int _init_clocks(struct omap_hwmod *oh, void *data)
ret |= _init_main_clk(oh);
ret |= _init_interface_clks(oh);
ret |= _init_opt_clks(oh);
- ret |= _init_clkdm(oh);
+ if (soc_ops.init_clkdm)
+ ret |= soc_ops.init_clkdm(oh);
if (!ret)
oh->_state = _HWMOD_STATE_CLKS_INITED;
@@ -1349,53 +1430,6 @@ static int _init_clocks(struct omap_hwmod *oh, void *data)
}
/**
- * _wait_target_ready - wait for a module to leave slave idle
- * @oh: struct omap_hwmod *
- *
- * Wait for a module @oh to leave slave idle. Returns 0 if the module
- * does not have an IDLEST bit or if the module successfully leaves
- * slave idle; otherwise, pass along the return value of the
- * appropriate *_cm*_wait_module_ready() function.
- */
-static int _wait_target_ready(struct omap_hwmod *oh)
-{
- struct omap_hwmod_ocp_if *os;
- int ret;
-
- if (!oh)
- return -EINVAL;
-
- if (oh->flags & HWMOD_NO_IDLEST)
- return 0;
-
- os = _find_mpu_rt_port(oh);
- if (!os)
- return 0;
-
- /* XXX check module SIDLEMODE */
-
- /* XXX check clock enable states */
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- ret = omap2_cm_wait_module_ready(oh->prcm.omap2.module_offs,
- oh->prcm.omap2.idlest_reg_id,
- oh->prcm.omap2.idlest_idle_bit);
- } else if (cpu_is_omap44xx()) {
- if (!oh->clkdm)
- return -EINVAL;
-
- ret = omap4_cminst_wait_module_ready(oh->clkdm->prcm_partition,
- oh->clkdm->cm_inst,
- oh->clkdm->clkdm_offs,
- oh->prcm.omap4.clkctrl_offs);
- } else {
- BUG();
- };
-
- return ret;
-}
-
-/**
* _lookup_hardreset - fill register bit info for this hwmod/reset line
* @oh: struct omap_hwmod *
* @name: name of the reset line in the context of this hwmod
@@ -1431,32 +1465,31 @@ static u8 _lookup_hardreset(struct omap_hwmod *oh, const char *name,
* @oh: struct omap_hwmod *
* @name: name of the reset line to lookup and assert
*
- * Some IP like dsp, ipu or iva contain processor that require
- * an HW reset line to be assert / deassert in order to enable fully
- * the IP.
+ * Some IP like dsp, ipu or iva contain processor that require an HW
+ * reset line to be assert / deassert in order to enable fully the IP.
+ * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of
+ * asserting the hardreset line on the currently-booted SoC, or passes
+ * along the return value from _lookup_hardreset() or the SoC's
+ * assert_hardreset code.
*/
static int _assert_hardreset(struct omap_hwmod *oh, const char *name)
{
struct omap_hwmod_rst_info ohri;
- u8 ret;
+ u8 ret = -EINVAL;
if (!oh)
return -EINVAL;
+ if (!soc_ops.assert_hardreset)
+ return -ENOSYS;
+
ret = _lookup_hardreset(oh, name, &ohri);
if (IS_ERR_VALUE(ret))
return ret;
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- return omap2_prm_assert_hardreset(oh->prcm.omap2.module_offs,
- ohri.rst_shift);
- else if (cpu_is_omap44xx())
- return omap4_prminst_assert_hardreset(ohri.rst_shift,
- oh->clkdm->pwrdm.ptr->prcm_partition,
- oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs);
- else
- return -EINVAL;
+ ret = soc_ops.assert_hardreset(oh, &ohri);
+
+ return ret;
}
/**
@@ -1465,38 +1498,29 @@ static int _assert_hardreset(struct omap_hwmod *oh, const char *name)
* @oh: struct omap_hwmod *
* @name: name of the reset line to look up and deassert
*
- * Some IP like dsp, ipu or iva contain processor that require
- * an HW reset line to be assert / deassert in order to enable fully
- * the IP.
+ * Some IP like dsp, ipu or iva contain processor that require an HW
+ * reset line to be assert / deassert in order to enable fully the IP.
+ * Returns -EINVAL if @oh is null, -ENOSYS if we have no way of
+ * deasserting the hardreset line on the currently-booted SoC, or passes
+ * along the return value from _lookup_hardreset() or the SoC's
+ * deassert_hardreset code.
*/
static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
{
struct omap_hwmod_rst_info ohri;
- int ret;
+ int ret = -EINVAL;
if (!oh)
return -EINVAL;
+ if (!soc_ops.deassert_hardreset)
+ return -ENOSYS;
+
ret = _lookup_hardreset(oh, name, &ohri);
if (IS_ERR_VALUE(ret))
return ret;
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- ret = omap2_prm_deassert_hardreset(oh->prcm.omap2.module_offs,
- ohri.rst_shift,
- ohri.st_shift);
- } else if (cpu_is_omap44xx()) {
- if (ohri.st_shift)
- pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
- oh->name, name);
- ret = omap4_prminst_deassert_hardreset(ohri.rst_shift,
- oh->clkdm->pwrdm.ptr->prcm_partition,
- oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs);
- } else {
- return -EINVAL;
- }
-
+ ret = soc_ops.deassert_hardreset(oh, &ohri);
if (ret == -EBUSY)
pr_warning("omap_hwmod: %s: failed to hardreset\n", oh->name);
@@ -1509,31 +1533,28 @@ static int _deassert_hardreset(struct omap_hwmod *oh, const char *name)
* @oh: struct omap_hwmod *
* @name: name of the reset line to look up and read
*
- * Return the state of the reset line.
+ * Return the state of the reset line. Returns -EINVAL if @oh is
+ * null, -ENOSYS if we have no way of reading the hardreset line
+ * status on the currently-booted SoC, or passes along the return
+ * value from _lookup_hardreset() or the SoC's is_hardreset_asserted
+ * code.
*/
static int _read_hardreset(struct omap_hwmod *oh, const char *name)
{
struct omap_hwmod_rst_info ohri;
- u8 ret;
+ u8 ret = -EINVAL;
if (!oh)
return -EINVAL;
+ if (!soc_ops.is_hardreset_asserted)
+ return -ENOSYS;
+
ret = _lookup_hardreset(oh, name, &ohri);
if (IS_ERR_VALUE(ret))
return ret;
- if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
- return omap2_prm_is_hardreset_asserted(oh->prcm.omap2.module_offs,
- ohri.st_shift);
- } else if (cpu_is_omap44xx()) {
- return omap4_prminst_is_hardreset_asserted(ohri.rst_shift,
- oh->clkdm->pwrdm.ptr->prcm_partition,
- oh->clkdm->pwrdm.ptr->prcm_offs,
- oh->prcm.omap4.rstctrl_offs);
- } else {
- return -EINVAL;
- }
+ return soc_ops.is_hardreset_asserted(oh, &ohri);
}
/**
@@ -1571,10 +1592,6 @@ static int _omap4_disable_module(struct omap_hwmod *oh)
{
int v;
- /* The module mode does not exist prior OMAP4 */
- if (!cpu_is_omap44xx())
- return -EINVAL;
-
if (!oh->clkdm || !oh->prcm.omap4.modulemode)
return -EINVAL;
@@ -1698,11 +1715,17 @@ dis_opt_clks:
* therefore have no OCP header registers to access. Others (like the
* IVA) have idiosyncratic reset sequences. So for these relatively
* rare cases, custom reset code can be supplied in the struct
- * omap_hwmod_class .reset function pointer. Passes along the return
- * value from either _ocp_softreset() or the custom reset function -
- * these must return -EINVAL if the hwmod cannot be reset this way or
- * if the hwmod is in the wrong state, -ETIMEDOUT if the module did
- * not reset in time, or 0 upon success.
+ * omap_hwmod_class .reset function pointer.
+ *
+ * _set_dmadisable() is called to set the DMADISABLE bit so that it
+ * does not prevent idling of the system. This is necessary for cases
+ * where ROMCODE/BOOTLOADER uses dma and transfers control to the
+ * kernel without disabling dma.
+ *
+ * Passes along the return value from either _ocp_softreset() or the
+ * custom reset function - these must return -EINVAL if the hwmod
+ * cannot be reset this way or if the hwmod is in the wrong state,
+ * -ETIMEDOUT if the module did not reset in time, or 0 upon success.
*/
static int _reset(struct omap_hwmod *oh)
{
@@ -1724,6 +1747,8 @@ static int _reset(struct omap_hwmod *oh)
}
}
+ _set_dmadisable(oh);
+
/*
* OCP_SYSCONFIG bits need to be reprogrammed after a
* softreset. The _enable() function should be split to avoid
@@ -1738,6 +1763,32 @@ static int _reset(struct omap_hwmod *oh)
}
/**
+ * _reconfigure_io_chain - clear any I/O chain wakeups and reconfigure chain
+ *
+ * Call the appropriate PRM function to clear any logged I/O chain
+ * wakeups and to reconfigure the chain. This apparently needs to be
+ * done upon every mux change. Since hwmods can be concurrently
+ * enabled and idled, hold a spinlock around the I/O chain
+ * reconfiguration sequence. No return value.
+ *
+ * XXX When the PRM code is moved to drivers, this function can be removed,
+ * as the PRM infrastructure should abstract this.
+ */
+static void _reconfigure_io_chain(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&io_chain_lock, flags);
+
+ if (cpu_is_omap34xx() && omap3_has_io_chain_ctrl())
+ omap3xxx_prm_reconfigure_io_chain();
+ else if (cpu_is_omap44xx())
+ omap44xx_prm_reconfigure_io_chain();
+
+ spin_unlock_irqrestore(&io_chain_lock, flags);
+}
+
+/**
* _enable - enable an omap_hwmod
* @oh: struct omap_hwmod *
*
@@ -1793,8 +1844,10 @@ static int _enable(struct omap_hwmod *oh)
/* Mux pins for device runtime if populated */
if (oh->mux && (!oh->mux->enabled ||
((oh->_state == _HWMOD_STATE_IDLE) &&
- oh->mux->pads_dynamic)))
+ oh->mux->pads_dynamic))) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
+ _reconfigure_io_chain();
+ }
_add_initiator_dep(oh, mpu_oh);
@@ -1814,9 +1867,11 @@ static int _enable(struct omap_hwmod *oh)
}
_enable_clocks(oh);
- _enable_module(oh);
+ if (soc_ops.enable_module)
+ soc_ops.enable_module(oh);
- r = _wait_target_ready(oh);
+ r = (soc_ops.wait_target_ready) ? soc_ops.wait_target_ready(oh) :
+ -EINVAL;
if (!r) {
/*
* Set the clockdomain to HW_AUTO only if the target is ready,
@@ -1870,7 +1925,8 @@ static int _idle(struct omap_hwmod *oh)
_idle_sysc(oh);
_del_initiator_dep(oh, mpu_oh);
- _omap4_disable_module(oh);
+ if (soc_ops.disable_module)
+ soc_ops.disable_module(oh);
/*
* The module must be in idle mode before disabling any parents
@@ -1883,8 +1939,10 @@ static int _idle(struct omap_hwmod *oh)
clkdm_hwmod_disable(oh->clkdm, oh);
/* Mux pins for device idle if populated */
- if (oh->mux && oh->mux->pads_dynamic)
+ if (oh->mux && oh->mux->pads_dynamic) {
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
+ _reconfigure_io_chain();
+ }
oh->_state = _HWMOD_STATE_IDLE;
@@ -1975,7 +2033,8 @@ static int _shutdown(struct omap_hwmod *oh)
if (oh->_state == _HWMOD_STATE_ENABLED) {
_del_initiator_dep(oh, mpu_oh);
/* XXX what about the other system initiators here? dma, dsp */
- _omap4_disable_module(oh);
+ if (soc_ops.disable_module)
+ soc_ops.disable_module(oh);
_disable_clocks(oh);
if (oh->clkdm)
clkdm_hwmod_disable(oh->clkdm, oh);
@@ -2431,6 +2490,194 @@ static int __init _alloc_linkspace(struct omap_hwmod_ocp_if **ois)
return 0;
}
+/* Static functions intended only for use in soc_ops field function pointers */
+
+/**
+ * _omap2_wait_target_ready - wait for a module to leave slave idle
+ * @oh: struct omap_hwmod *
+ *
+ * Wait for a module @oh to leave slave idle. Returns 0 if the module
+ * does not have an IDLEST bit or if the module successfully leaves
+ * slave idle; otherwise, pass along the return value of the
+ * appropriate *_cm*_wait_module_ready() function.
+ */
+static int _omap2_wait_target_ready(struct omap_hwmod *oh)
+{
+ if (!oh)
+ return -EINVAL;
+
+ if (oh->flags & HWMOD_NO_IDLEST)
+ return 0;
+
+ if (!_find_mpu_rt_port(oh))
+ return 0;
+
+ /* XXX check module SIDLEMODE, hardreset status, enabled clocks */
+
+ return omap2_cm_wait_module_ready(oh->prcm.omap2.module_offs,
+ oh->prcm.omap2.idlest_reg_id,
+ oh->prcm.omap2.idlest_idle_bit);
+}
+
+/**
+ * _omap4_wait_target_ready - wait for a module to leave slave idle
+ * @oh: struct omap_hwmod *
+ *
+ * Wait for a module @oh to leave slave idle. Returns 0 if the module
+ * does not have an IDLEST bit or if the module successfully leaves
+ * slave idle; otherwise, pass along the return value of the
+ * appropriate *_cm*_wait_module_ready() function.
+ */
+static int _omap4_wait_target_ready(struct omap_hwmod *oh)
+{
+ if (!oh || !oh->clkdm)
+ return -EINVAL;
+
+ if (oh->flags & HWMOD_NO_IDLEST)
+ return 0;
+
+ if (!_find_mpu_rt_port(oh))
+ return 0;
+
+ /* XXX check module SIDLEMODE, hardreset status */
+
+ return omap4_cminst_wait_module_ready(oh->clkdm->prcm_partition,
+ oh->clkdm->cm_inst,
+ oh->clkdm->clkdm_offs,
+ oh->prcm.omap4.clkctrl_offs);
+}
+
+/**
+ * _omap2_assert_hardreset - call OMAP2 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to assert hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap2_prm_assert_hardreset() with parameters extracted from
+ * the hwmod @oh and the hardreset line data @ohri. Only intended for
+ * use as an soc_ops function pointer. Passes along the return value
+ * from omap2_prm_assert_hardreset(). XXX This function is scheduled
+ * for removal when the PRM code is moved into drivers/.
+ */
+static int _omap2_assert_hardreset(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ return omap2_prm_assert_hardreset(oh->prcm.omap2.module_offs,
+ ohri->rst_shift);
+}
+
+/**
+ * _omap2_deassert_hardreset - call OMAP2 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to deassert hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap2_prm_deassert_hardreset() with parameters extracted from
+ * the hwmod @oh and the hardreset line data @ohri. Only intended for
+ * use as an soc_ops function pointer. Passes along the return value
+ * from omap2_prm_deassert_hardreset(). XXX This function is
+ * scheduled for removal when the PRM code is moved into drivers/.
+ */
+static int _omap2_deassert_hardreset(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ return omap2_prm_deassert_hardreset(oh->prcm.omap2.module_offs,
+ ohri->rst_shift,
+ ohri->st_shift);
+}
+
+/**
+ * _omap2_is_hardreset_asserted - call OMAP2 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to test hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap2_prm_is_hardreset_asserted() with parameters extracted
+ * from the hwmod @oh and the hardreset line data @ohri. Only
+ * intended for use as an soc_ops function pointer. Passes along the
+ * return value from omap2_prm_is_hardreset_asserted(). XXX This
+ * function is scheduled for removal when the PRM code is moved into
+ * drivers/.
+ */
+static int _omap2_is_hardreset_asserted(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ return omap2_prm_is_hardreset_asserted(oh->prcm.omap2.module_offs,
+ ohri->st_shift);
+}
+
+/**
+ * _omap4_assert_hardreset - call OMAP4 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to assert hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap4_prminst_assert_hardreset() with parameters extracted
+ * from the hwmod @oh and the hardreset line data @ohri. Only
+ * intended for use as an soc_ops function pointer. Passes along the
+ * return value from omap4_prminst_assert_hardreset(). XXX This
+ * function is scheduled for removal when the PRM code is moved into
+ * drivers/.
+ */
+static int _omap4_assert_hardreset(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ if (!oh->clkdm)
+ return -EINVAL;
+
+ return omap4_prminst_assert_hardreset(ohri->rst_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
+}
+
+/**
+ * _omap4_deassert_hardreset - call OMAP4 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to deassert hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap4_prminst_deassert_hardreset() with parameters extracted
+ * from the hwmod @oh and the hardreset line data @ohri. Only
+ * intended for use as an soc_ops function pointer. Passes along the
+ * return value from omap4_prminst_deassert_hardreset(). XXX This
+ * function is scheduled for removal when the PRM code is moved into
+ * drivers/.
+ */
+static int _omap4_deassert_hardreset(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ if (!oh->clkdm)
+ return -EINVAL;
+
+ if (ohri->st_shift)
+ pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
+ oh->name, ohri->name);
+ return omap4_prminst_deassert_hardreset(ohri->rst_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
+}
+
+/**
+ * _omap4_is_hardreset_asserted - call OMAP4 PRM hardreset fn with hwmod args
+ * @oh: struct omap_hwmod * to test hardreset
+ * @ohri: hardreset line data
+ *
+ * Call omap4_prminst_is_hardreset_asserted() with parameters
+ * extracted from the hwmod @oh and the hardreset line data @ohri.
+ * Only intended for use as an soc_ops function pointer. Passes along
+ * the return value from omap4_prminst_is_hardreset_asserted(). XXX
+ * This function is scheduled for removal when the PRM code is moved
+ * into drivers/.
+ */
+static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
+ struct omap_hwmod_rst_info *ohri)
+{
+ if (!oh->clkdm)
+ return -EINVAL;
+
+ return omap4_prminst_is_hardreset_asserted(ohri->rst_shift,
+ oh->clkdm->pwrdm.ptr->prcm_partition,
+ oh->clkdm->pwrdm.ptr->prcm_offs,
+ oh->prcm.omap4.rstctrl_offs);
+}
+
/* Public functions */
u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
@@ -2563,12 +2810,18 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
*
* Intended to be called early in boot before the clock framework is
* initialized. If @ois is not null, will register all omap_hwmods
- * listed in @ois that are valid for this chip. Returns 0.
+ * listed in @ois that are valid for this chip. Returns -EINVAL if
+ * omap_hwmod_init() hasn't been called before calling this function,
+ * -ENOMEM if the link memory area can't be allocated, or 0 upon
+ * success.
*/
int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois)
{
int r, i;
+ if (!inited)
+ return -EINVAL;
+
if (!ois)
return 0;
@@ -3401,3 +3654,47 @@ int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx)
return 0;
}
+
+/**
+ * omap_hwmod_init - initialize the hwmod code
+ *
+ * Sets up some function pointers needed by the hwmod code to operate on the
+ * currently-booted SoC. Intended to be called once during kernel init
+ * before any hwmods are registered. No return value.
+ */
+void __init omap_hwmod_init(void)
+{
+ if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
+ soc_ops.wait_target_ready = _omap2_wait_target_ready;
+ soc_ops.assert_hardreset = _omap2_assert_hardreset;
+ soc_ops.deassert_hardreset = _omap2_deassert_hardreset;
+ soc_ops.is_hardreset_asserted = _omap2_is_hardreset_asserted;
+ } else if (cpu_is_omap44xx() || soc_is_omap54xx()) {
+ soc_ops.enable_module = _omap4_enable_module;
+ soc_ops.disable_module = _omap4_disable_module;
+ soc_ops.wait_target_ready = _omap4_wait_target_ready;
+ soc_ops.assert_hardreset = _omap4_assert_hardreset;
+ soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
+ soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
+ soc_ops.init_clkdm = _init_clkdm;
+ } else {
+ WARN(1, "omap_hwmod: unknown SoC type\n");
+ }
+
+ inited = true;
+}
+
+/**
+ * omap_hwmod_get_main_clk - get pointer to main clock name
+ * @oh: struct omap_hwmod *
+ *
+ * Returns the main clock name assocated with @oh upon success,
+ * or NULL if @oh is NULL.
+ */
+const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh)
+{
+ if (!oh)
+ return NULL;
+
+ return oh->main_clk;
+}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
index a7640d1b215e..50cfab61b0e2 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c
@@ -192,6 +192,11 @@ static struct omap_hwmod_class omap2420_mcbsp_hwmod_class = {
.name = "mcbsp",
};
+static struct omap_hwmod_opt_clk mcbsp_opt_clks[] = {
+ { .role = "pad_fck", .clk = "mcbsp_clks" },
+ { .role = "prcm_fck", .clk = "func_96m_ck" },
+};
+
/* mcbsp1 */
static struct omap_hwmod_irq_info omap2420_mcbsp1_irqs[] = {
{ .name = "tx", .irq = 59 },
@@ -214,6 +219,8 @@ static struct omap_hwmod omap2420_mcbsp1_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* mcbsp2 */
@@ -238,6 +245,8 @@ static struct omap_hwmod omap2420_mcbsp2_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
static struct omap_hwmod_class_sysconfig omap2420_msdi_sysc = {
@@ -585,5 +594,6 @@ static struct omap_hwmod_ocp_if *omap2420_hwmod_ocp_ifs[] __initdata = {
int __init omap2420_hwmod_init(void)
{
+ omap_hwmod_init();
return omap_hwmod_register_links(omap2420_hwmod_ocp_ifs);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
index 4d7264981230..58b5bc196d32 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c
@@ -296,6 +296,11 @@ static struct omap_hwmod_class omap2430_mcbsp_hwmod_class = {
.rev = MCBSP_CONFIG_TYPE2,
};
+static struct omap_hwmod_opt_clk mcbsp_opt_clks[] = {
+ { .role = "pad_fck", .clk = "mcbsp_clks" },
+ { .role = "prcm_fck", .clk = "func_96m_ck" },
+};
+
/* mcbsp1 */
static struct omap_hwmod_irq_info omap2430_mcbsp1_irqs[] = {
{ .name = "tx", .irq = 59 },
@@ -320,6 +325,8 @@ static struct omap_hwmod omap2430_mcbsp1_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_MCBSP1_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* mcbsp2 */
@@ -345,6 +352,8 @@ static struct omap_hwmod omap2430_mcbsp2_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_MCBSP2_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* mcbsp3 */
@@ -370,6 +379,8 @@ static struct omap_hwmod omap2430_mcbsp3_hwmod = {
.idlest_idle_bit = OMAP2430_ST_MCBSP3_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* mcbsp4 */
@@ -401,6 +412,8 @@ static struct omap_hwmod omap2430_mcbsp4_hwmod = {
.idlest_idle_bit = OMAP2430_ST_MCBSP4_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* mcbsp5 */
@@ -432,6 +445,8 @@ static struct omap_hwmod omap2430_mcbsp5_hwmod = {
.idlest_idle_bit = OMAP2430_ST_MCBSP5_SHIFT,
},
},
+ .opt_clks = mcbsp_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp_opt_clks),
};
/* MMC/SD/SDIO common */
@@ -938,5 +953,6 @@ static struct omap_hwmod_ocp_if *omap2430_hwmod_ocp_ifs[] __initdata = {
int __init omap2430_hwmod_init(void)
{
+ omap_hwmod_init();
return omap_hwmod_register_links(omap2430_hwmod_ocp_ifs);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 83eafd96ecaa..afad69c6ba6e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -68,7 +68,6 @@ static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
struct omap_hwmod_class omap2xxx_timer_hwmod_class = {
.name = "timer",
.sysc = &omap2xxx_timer_sysc,
- .rev = OMAP_TIMER_IP_VERSION_1,
};
/*
@@ -257,7 +256,6 @@ struct omap_hwmod omap2xxx_timer2_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT2_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -276,7 +274,6 @@ struct omap_hwmod omap2xxx_timer3_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT3_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -295,7 +292,6 @@ struct omap_hwmod omap2xxx_timer4_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT4_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -314,7 +310,6 @@ struct omap_hwmod omap2xxx_timer5_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT5_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -333,7 +328,6 @@ struct omap_hwmod omap2xxx_timer6_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT6_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -352,7 +346,6 @@ struct omap_hwmod omap2xxx_timer7_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT7_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
@@ -371,7 +364,6 @@ struct omap_hwmod omap2xxx_timer8_hwmod = {
.idlest_idle_bit = OMAP24XX_ST_GPT8_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap2xxx_timer_hwmod_class,
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index b26d3c9bca16..c9e38200216b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -14,6 +14,8 @@
*
* XXX these should be marked initdata for multi-OMAP kernels
*/
+#include <linux/power/smartreflex.h>
+
#include <plat/omap_hwmod.h>
#include <mach/irqs.h>
#include <plat/cpu.h>
@@ -29,8 +31,6 @@
#include <plat/dmtimer.h>
#include "omap_hwmod_common_data.h"
-
-#include "smartreflex.h"
#include "prm-regbits-34xx.h"
#include "cm-regbits-34xx.h"
#include "wd_timer.h"
@@ -129,7 +129,6 @@ static struct omap_hwmod_class_sysconfig omap3xxx_timer_1ms_sysc = {
static struct omap_hwmod_class omap3xxx_timer_1ms_hwmod_class = {
.name = "timer",
.sysc = &omap3xxx_timer_1ms_sysc,
- .rev = OMAP_TIMER_IP_VERSION_1,
};
static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = {
@@ -145,12 +144,11 @@ static struct omap_hwmod_class_sysconfig omap3xxx_timer_sysc = {
static struct omap_hwmod_class omap3xxx_timer_hwmod_class = {
.name = "timer",
.sysc = &omap3xxx_timer_sysc,
- .rev = OMAP_TIMER_IP_VERSION_1,
};
/* secure timers dev attribute */
static struct omap_timer_capability_dev_attr capability_secure_dev_attr = {
- .timer_capability = OMAP_TIMER_SECURE,
+ .timer_capability = OMAP_TIMER_ALWON | OMAP_TIMER_SECURE,
};
/* always-on timers dev attribute */
@@ -195,7 +193,6 @@ static struct omap_hwmod omap3xxx_timer2_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT2_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_1ms_hwmod_class,
};
@@ -213,7 +210,6 @@ static struct omap_hwmod omap3xxx_timer3_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT3_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_hwmod_class,
};
@@ -231,7 +227,6 @@ static struct omap_hwmod omap3xxx_timer4_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT4_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_hwmod_class,
};
@@ -249,7 +244,6 @@ static struct omap_hwmod omap3xxx_timer5_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT5_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_hwmod_class,
};
@@ -267,7 +261,6 @@ static struct omap_hwmod omap3xxx_timer6_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT6_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_hwmod_class,
};
@@ -285,7 +278,6 @@ static struct omap_hwmod omap3xxx_timer7_hwmod = {
.idlest_idle_bit = OMAP3430_ST_GPT7_SHIFT,
},
},
- .dev_attr = &capability_alwon_dev_attr,
.class = &omap3xxx_timer_hwmod_class,
};
@@ -527,11 +519,27 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
static struct omap_hwmod_irq_info am35xx_uart4_mpu_irqs[] = {
{ .irq = INT_35XX_UART4_IRQ, },
+ { .irq = -1 }
};
static struct omap_hwmod_dma_info am35xx_uart4_sdma_reqs[] = {
{ .name = "rx", .dma_req = AM35XX_DMA_UART4_RX, },
{ .name = "tx", .dma_req = AM35XX_DMA_UART4_TX, },
+ { .dma_req = -1 }
+};
+
+/*
+ * XXX AM35xx UART4 cannot complete its softreset without uart1_fck or
+ * uart2_fck being enabled. So we add uart1_fck as an optional clock,
+ * below, and set the HWMOD_CONTROL_OPT_CLKS_IN_RESET. This really
+ * should not be needed. The functional clock structure of the AM35xx
+ * UART4 is extremely unclear and opaque; it is unclear what the role
+ * of uart1/2_fck is for the UART4. Any clarification from either
+ * empirical testing or the AM3505/3517 hardware designers would be
+ * most welcome.
+ */
+static struct omap_hwmod_opt_clk am35xx_uart4_opt_clks[] = {
+ { .role = "softreset_uart1_fck", .clk = "uart1_fck" },
};
static struct omap_hwmod am35xx_uart4_hwmod = {
@@ -543,11 +551,14 @@ static struct omap_hwmod am35xx_uart4_hwmod = {
.omap2 = {
.module_offs = CORE_MOD,
.prcm_reg_id = 1,
- .module_bit = OMAP3430_EN_UART4_SHIFT,
+ .module_bit = AM35XX_EN_UART4_SHIFT,
.idlest_reg_id = 1,
- .idlest_idle_bit = OMAP3430_EN_UART4_SHIFT,
+ .idlest_idle_bit = AM35XX_ST_UART4_SHIFT,
},
},
+ .opt_clks = am35xx_uart4_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(am35xx_uart4_opt_clks),
+ .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET,
.class = &omap2_uart_class,
};
@@ -1074,6 +1085,17 @@ static struct omap_hwmod_class omap3xxx_mcbsp_hwmod_class = {
.rev = MCBSP_CONFIG_TYPE3,
};
+/* McBSP functional clock mapping */
+static struct omap_hwmod_opt_clk mcbsp15_opt_clks[] = {
+ { .role = "pad_fck", .clk = "mcbsp_clks" },
+ { .role = "prcm_fck", .clk = "core_96m_fck" },
+};
+
+static struct omap_hwmod_opt_clk mcbsp234_opt_clks[] = {
+ { .role = "pad_fck", .clk = "mcbsp_clks" },
+ { .role = "prcm_fck", .clk = "per_96m_fck" },
+};
+
/* mcbsp1 */
static struct omap_hwmod_irq_info omap3xxx_mcbsp1_irqs[] = {
{ .name = "common", .irq = 16 },
@@ -1097,6 +1119,8 @@ static struct omap_hwmod omap3xxx_mcbsp1_hwmod = {
.idlest_idle_bit = OMAP3430_ST_MCBSP1_SHIFT,
},
},
+ .opt_clks = mcbsp15_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp15_opt_clks),
};
/* mcbsp2 */
@@ -1126,6 +1150,8 @@ static struct omap_hwmod omap3xxx_mcbsp2_hwmod = {
.idlest_idle_bit = OMAP3430_ST_MCBSP2_SHIFT,
},
},
+ .opt_clks = mcbsp234_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks),
.dev_attr = &omap34xx_mcbsp2_dev_attr,
};
@@ -1156,6 +1182,8 @@ static struct omap_hwmod omap3xxx_mcbsp3_hwmod = {
.idlest_idle_bit = OMAP3430_ST_MCBSP3_SHIFT,
},
},
+ .opt_clks = mcbsp234_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks),
.dev_attr = &omap34xx_mcbsp3_dev_attr,
};
@@ -1188,6 +1216,8 @@ static struct omap_hwmod omap3xxx_mcbsp4_hwmod = {
.idlest_idle_bit = OMAP3430_ST_MCBSP4_SHIFT,
},
},
+ .opt_clks = mcbsp234_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp234_opt_clks),
};
/* mcbsp5 */
@@ -1219,6 +1249,8 @@ static struct omap_hwmod omap3xxx_mcbsp5_hwmod = {
.idlest_idle_bit = OMAP3430_ST_MCBSP5_SHIFT,
},
},
+ .opt_clks = mcbsp15_opt_clks,
+ .opt_clks_cnt = ARRAY_SIZE(mcbsp15_opt_clks),
};
/* 'mcbsp sidetone' class */
@@ -1325,7 +1357,7 @@ static struct omap_hwmod_irq_info omap3_smartreflex_mpu_irqs[] = {
};
static struct omap_hwmod omap34xx_sr1_hwmod = {
- .name = "sr1",
+ .name = "smartreflex_mpu_iva",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
.prcm = {
@@ -1343,7 +1375,7 @@ static struct omap_hwmod omap34xx_sr1_hwmod = {
};
static struct omap_hwmod omap36xx_sr1_hwmod = {
- .name = "sr1",
+ .name = "smartreflex_mpu_iva",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr1_fck",
.prcm = {
@@ -1370,7 +1402,7 @@ static struct omap_hwmod_irq_info omap3_smartreflex_core_irqs[] = {
};
static struct omap_hwmod omap34xx_sr2_hwmod = {
- .name = "sr2",
+ .name = "smartreflex_core",
.class = &omap34xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
.prcm = {
@@ -1388,7 +1420,7 @@ static struct omap_hwmod omap34xx_sr2_hwmod = {
};
static struct omap_hwmod omap36xx_sr2_hwmod = {
- .name = "sr2",
+ .name = "smartreflex_core",
.class = &omap36xx_smartreflex_hwmod_class,
.main_clk = "sr2_fck",
.prcm = {
@@ -1638,25 +1670,20 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
/* usb_otg_hs */
static struct omap_hwmod_irq_info am35xx_usbhsotg_mpu_irqs[] = {
-
{ .name = "mc", .irq = 71 },
{ .irq = -1 }
};
static struct omap_hwmod_class am35xx_usbotg_class = {
.name = "am35xx_usbotg",
- .sysc = NULL,
};
static struct omap_hwmod am35xx_usbhsotg_hwmod = {
.name = "am35x_otg_hs",
.mpu_irqs = am35xx_usbhsotg_mpu_irqs,
- .main_clk = NULL,
- .prcm = {
- .omap2 = {
- },
- },
+ .main_clk = "hsotgusb_fck",
.class = &am35xx_usbotg_class,
+ .flags = HWMOD_NO_IDLEST,
};
/* MMC/SD/SDIO common */
@@ -2097,9 +2124,10 @@ static struct omap_hwmod_ocp_if omap3xxx_usbhsotg__l3 = {
static struct omap_hwmod_ocp_if am35xx_usbhsotg__l3 = {
.master = &am35xx_usbhsotg_hwmod,
.slave = &omap3xxx_l3_main_hwmod,
- .clk = "core_l3_ick",
+ .clk = "hsotgusb_ick",
.user = OCP_USER_MPU,
};
+
/* L4_CORE -> L4_WKUP interface */
static struct omap_hwmod_ocp_if omap3xxx_l4_core__l4_wkup = {
.master = &omap3xxx_l4_core_hwmod,
@@ -2243,6 +2271,7 @@ static struct omap_hwmod_addr_space am35xx_uart4_addr_space[] = {
.pa_end = OMAP3_UART4_AM35XX_BASE + SZ_1K - 1,
.flags = ADDR_MAP_ON_INIT | ADDR_TYPE_RT,
},
+ { }
};
static struct omap_hwmod_ocp_if am35xx_l4_core__uart4 = {
@@ -2393,7 +2422,7 @@ static struct omap_hwmod_addr_space am35xx_usbhsotg_addrs[] = {
static struct omap_hwmod_ocp_if am35xx_l4_core__usbhsotg = {
.master = &omap3xxx_l4_core_hwmod,
.slave = &am35xx_usbhsotg_hwmod,
- .clk = "l4_ick",
+ .clk = "hsotgusb_ick",
.addr = am35xx_usbhsotg_addrs,
.user = OCP_USER_MPU,
};
@@ -3138,6 +3167,107 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_wkup__counter_32k = {
.user = OCP_USER_MPU | OCP_USER_SDMA,
};
+/* am35xx has Davinci MDIO & EMAC */
+static struct omap_hwmod_class am35xx_mdio_class = {
+ .name = "davinci_mdio",
+};
+
+static struct omap_hwmod am35xx_mdio_hwmod = {
+ .name = "davinci_mdio",
+ .class = &am35xx_mdio_class,
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/*
+ * XXX Should be connected to an IPSS hwmod, not the L3 directly;
+ * but this will probably require some additional hwmod core support,
+ * so is left as a future to-do item.
+ */
+static struct omap_hwmod_ocp_if am35xx_mdio__l3 = {
+ .master = &am35xx_mdio_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "emac_fck",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am35xx_mdio_addrs[] = {
+ {
+ .pa_start = AM35XX_IPSS_MDIO_BASE,
+ .pa_end = AM35XX_IPSS_MDIO_BASE + SZ_4K - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+/* l4_core -> davinci mdio */
+/*
+ * XXX Should be connected to an IPSS hwmod, not the L4_CORE directly;
+ * but this will probably require some additional hwmod core support,
+ * so is left as a future to-do item.
+ */
+static struct omap_hwmod_ocp_if am35xx_l4_core__mdio = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &am35xx_mdio_hwmod,
+ .clk = "emac_fck",
+ .addr = am35xx_mdio_addrs,
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_irq_info am35xx_emac_mpu_irqs[] = {
+ { .name = "rxthresh", .irq = INT_35XX_EMAC_C0_RXTHRESH_IRQ },
+ { .name = "rx_pulse", .irq = INT_35XX_EMAC_C0_RX_PULSE_IRQ },
+ { .name = "tx_pulse", .irq = INT_35XX_EMAC_C0_TX_PULSE_IRQ },
+ { .name = "misc_pulse", .irq = INT_35XX_EMAC_C0_MISC_PULSE_IRQ },
+ { .irq = -1 }
+};
+
+static struct omap_hwmod_class am35xx_emac_class = {
+ .name = "davinci_emac",
+};
+
+static struct omap_hwmod am35xx_emac_hwmod = {
+ .name = "davinci_emac",
+ .mpu_irqs = am35xx_emac_mpu_irqs,
+ .class = &am35xx_emac_class,
+ .flags = HWMOD_NO_IDLEST,
+};
+
+/* l3_core -> davinci emac interface */
+/*
+ * XXX Should be connected to an IPSS hwmod, not the L3 directly;
+ * but this will probably require some additional hwmod core support,
+ * so is left as a future to-do item.
+ */
+static struct omap_hwmod_ocp_if am35xx_emac__l3 = {
+ .master = &am35xx_emac_hwmod,
+ .slave = &omap3xxx_l3_main_hwmod,
+ .clk = "emac_ick",
+ .user = OCP_USER_MPU,
+};
+
+static struct omap_hwmod_addr_space am35xx_emac_addrs[] = {
+ {
+ .pa_start = AM35XX_IPSS_EMAC_BASE,
+ .pa_end = AM35XX_IPSS_EMAC_BASE + 0x30000 - 1,
+ .flags = ADDR_TYPE_RT,
+ },
+ { }
+};
+
+/* l4_core -> davinci emac */
+/*
+ * XXX Should be connected to an IPSS hwmod, not the L4_CORE directly;
+ * but this will probably require some additional hwmod core support,
+ * so is left as a future to-do item.
+ */
+static struct omap_hwmod_ocp_if am35xx_l4_core__emac = {
+ .master = &omap3xxx_l4_core_hwmod,
+ .slave = &am35xx_emac_hwmod,
+ .clk = "emac_ick",
+ .addr = am35xx_emac_addrs,
+ .user = OCP_USER_MPU,
+};
+
static struct omap_hwmod_ocp_if *omap3xxx_hwmod_ocp_ifs[] __initdata = {
&omap3xxx_l3_main__l4_core,
&omap3xxx_l3_main__l4_per,
@@ -3266,6 +3396,10 @@ static struct omap_hwmod_ocp_if *am35xx_hwmod_ocp_ifs[] __initdata = {
&omap3xxx_l4_core__usb_tll_hs,
&omap3xxx_l4_core__es3plus_mmc1,
&omap3xxx_l4_core__es3plus_mmc2,
+ &am35xx_mdio__l3,
+ &am35xx_l4_core__mdio,
+ &am35xx_emac__l3,
+ &am35xx_l4_core__emac,
NULL
};
@@ -3283,6 +3417,8 @@ int __init omap3xxx_hwmod_init(void)
struct omap_hwmod_ocp_if **h = NULL;
unsigned int rev;
+ omap_hwmod_init();
+
/* Register hwmod links common to all OMAP3 */
r = omap_hwmod_register_links(omap3xxx_hwmod_ocp_ifs);
if (r < 0)
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index f30e861ce6d9..242aee498ceb 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -19,6 +19,7 @@
*/
#include <linux/io.h>
+#include <linux/power/smartreflex.h>
#include <plat/omap_hwmod.h>
#include <plat/cpu.h>
@@ -32,8 +33,6 @@
#include <plat/common.h>
#include "omap_hwmod_common_data.h"
-
-#include "smartreflex.h"
#include "cm1_44xx.h"
#include "cm2_44xx.h"
#include "prm44xx.h"
@@ -1928,7 +1927,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp1_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp1_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp1_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp1_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp1_hwmod = {
@@ -1963,7 +1962,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp2_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp2_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp2_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp2_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp2_hwmod = {
@@ -1998,7 +1997,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp3_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp3_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp3_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp3_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp3_hwmod = {
@@ -2033,7 +2032,7 @@ static struct omap_hwmod_dma_info omap44xx_mcbsp4_sdma_reqs[] = {
static struct omap_hwmod_opt_clk mcbsp4_opt_clks[] = {
{ .role = "pad_fck", .clk = "pad_clks_ck" },
- { .role = "prcm_clk", .clk = "mcbsp4_sync_mux_ck" },
+ { .role = "prcm_fck", .clk = "mcbsp4_sync_mux_ck" },
};
static struct omap_hwmod omap44xx_mcbsp4_hwmod = {
@@ -2544,14 +2543,12 @@ static struct omap_hwmod omap44xx_prcm_mpu_hwmod = {
static struct omap_hwmod omap44xx_cm_core_aon_hwmod = {
.name = "cm_core_aon",
.class = &omap44xx_prcm_hwmod_class,
- .clkdm_name = "cm_clkdm",
};
/* cm_core */
static struct omap_hwmod omap44xx_cm_core_hwmod = {
.name = "cm_core",
.class = &omap44xx_prcm_hwmod_class,
- .clkdm_name = "cm_clkdm",
};
/* prm */
@@ -2568,7 +2565,6 @@ static struct omap_hwmod_rst_info omap44xx_prm_resets[] = {
static struct omap_hwmod omap44xx_prm_hwmod = {
.name = "prm",
.class = &omap44xx_prcm_hwmod_class,
- .clkdm_name = "prm_clkdm",
.mpu_irqs = omap44xx_prm_irqs,
.rst_lines = omap44xx_prm_resets,
.rst_lines_cnt = ARRAY_SIZE(omap44xx_prm_resets),
@@ -2947,7 +2943,6 @@ static struct omap_hwmod omap44xx_timer2_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer3 */
@@ -2969,7 +2964,6 @@ static struct omap_hwmod omap44xx_timer3_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer4 */
@@ -2991,7 +2985,6 @@ static struct omap_hwmod omap44xx_timer4_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer5 */
@@ -3013,7 +3006,6 @@ static struct omap_hwmod omap44xx_timer5_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer6 */
@@ -3036,7 +3028,6 @@ static struct omap_hwmod omap44xx_timer6_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer7 */
@@ -3058,7 +3049,6 @@ static struct omap_hwmod omap44xx_timer7_hwmod = {
.modulemode = MODULEMODE_SWCTRL,
},
},
- .dev_attr = &capability_alwon_dev_attr,
};
/* timer8 */
@@ -3864,7 +3854,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_2 = {
};
/* usb_host_fs -> l3_main_2 */
-static struct omap_hwmod_ocp_if omap44xx_usb_host_fs__l3_main_2 = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_usb_host_fs__l3_main_2 = {
.master = &omap44xx_usb_host_fs_hwmod,
.slave = &omap44xx_l3_main_2_hwmod,
.clk = "l3_div_ck",
@@ -3922,7 +3912,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_cfg__l3_main_3 = {
};
/* aess -> l4_abe */
-static struct omap_hwmod_ocp_if omap44xx_aess__l4_abe = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_aess__l4_abe = {
.master = &omap44xx_aess_hwmod,
.slave = &omap44xx_l4_abe_hwmod,
.clk = "ocp_abe_iclk",
@@ -4013,7 +4003,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_addrs[] = {
};
/* l4_abe -> aess */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -4031,7 +4021,7 @@ static struct omap_hwmod_addr_space omap44xx_aess_dma_addrs[] = {
};
/* l4_abe -> aess (dma) */
-static struct omap_hwmod_ocp_if omap44xx_l4_abe__aess_dma = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_abe__aess_dma = {
.master = &omap44xx_l4_abe_hwmod,
.slave = &omap44xx_aess_hwmod,
.clk = "ocp_abe_iclk",
@@ -5857,7 +5847,7 @@ static struct omap_hwmod_addr_space omap44xx_usb_host_fs_addrs[] = {
};
/* l4_cfg -> usb_host_fs */
-static struct omap_hwmod_ocp_if omap44xx_l4_cfg__usb_host_fs = {
+static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l4_cfg__usb_host_fs = {
.master = &omap44xx_l4_cfg_hwmod,
.slave = &omap44xx_usb_host_fs_hwmod,
.clk = "l4_div_ck",
@@ -6014,13 +6004,13 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_iva__l3_main_2,
&omap44xx_l3_main_1__l3_main_2,
&omap44xx_l4_cfg__l3_main_2,
- &omap44xx_usb_host_fs__l3_main_2,
+ /* &omap44xx_usb_host_fs__l3_main_2, */
&omap44xx_usb_host_hs__l3_main_2,
&omap44xx_usb_otg_hs__l3_main_2,
&omap44xx_l3_main_1__l3_main_3,
&omap44xx_l3_main_2__l3_main_3,
&omap44xx_l4_cfg__l3_main_3,
- &omap44xx_aess__l4_abe,
+ /* &omap44xx_aess__l4_abe, */
&omap44xx_dsp__l4_abe,
&omap44xx_l3_main_1__l4_abe,
&omap44xx_mpu__l4_abe,
@@ -6029,8 +6019,8 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_cfg__l4_wkup,
&omap44xx_mpu__mpu_private,
&omap44xx_l4_cfg__ocp_wp_noc,
- &omap44xx_l4_abe__aess,
- &omap44xx_l4_abe__aess_dma,
+ /* &omap44xx_l4_abe__aess, */
+ /* &omap44xx_l4_abe__aess_dma, */
&omap44xx_l3_main_2__c2c,
&omap44xx_l4_wkup__counter_32k,
&omap44xx_l4_cfg__ctrl_module_core,
@@ -6136,7 +6126,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
&omap44xx_l4_per__uart2,
&omap44xx_l4_per__uart3,
&omap44xx_l4_per__uart4,
- &omap44xx_l4_cfg__usb_host_fs,
+ /* &omap44xx_l4_cfg__usb_host_fs, */
&omap44xx_l4_cfg__usb_host_hs,
&omap44xx_l4_cfg__usb_otg_hs,
&omap44xx_l4_cfg__usb_tll_hs,
@@ -6148,6 +6138,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
int __init omap44xx_hwmod_init(void)
{
+ omap_hwmod_init();
return omap_hwmod_register_links(omap44xx_hwmod_ocp_ifs);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_common_data.c b/arch/arm/mach-omap2/omap_hwmod_common_data.c
index 51e5418899fb..9f1ccdc8cc8c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_common_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_common_data.c
@@ -47,6 +47,16 @@ struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2 = {
.midle_shift = SYSC_TYPE2_MIDLEMODE_SHIFT,
.sidle_shift = SYSC_TYPE2_SIDLEMODE_SHIFT,
.srst_shift = SYSC_TYPE2_SOFTRESET_SHIFT,
+ .dmadisable_shift = SYSC_TYPE2_DMADISABLE_SHIFT,
+};
+
+/**
+ * struct omap_hwmod_sysc_type3 - TYPE3 sysconfig scheme.
+ * Used by some IPs on AM33xx
+ */
+struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3 = {
+ .midle_shift = SYSC_TYPE3_MIDLEMODE_SHIFT,
+ .sidle_shift = SYSC_TYPE3_SIDLEMODE_SHIFT,
};
struct omap_dss_dispc_dev_attr omap2_3_dss_dispc_dev_attr = {
diff --git a/arch/arm/mach-omap2/omap_l3_noc.h b/arch/arm/mach-omap2/omap_l3_noc.h
index 90b50984cd2e..a6ce34dc4814 100644
--- a/arch/arm/mach-omap2/omap_l3_noc.h
+++ b/arch/arm/mach-omap2/omap_l3_noc.h
@@ -51,7 +51,9 @@ static u32 l3_targ_inst_clk1[] = {
0x200, /* DMM2 */
0x300, /* ABE */
0x400, /* L4CFG */
- 0x600 /* CLK2 PWR DISC */
+ 0x600, /* CLK2 PWR DISC */
+ 0x0, /* Host CLK1 */
+ 0x900 /* L4 Wakeup */
};
static u32 l3_targ_inst_clk2[] = {
@@ -72,11 +74,16 @@ static u32 l3_targ_inst_clk2[] = {
0xE00, /* missing in TRM corresponds to AES2*/
0xC00, /* L4 PER3 */
0xA00, /* L4 PER1*/
- 0xB00 /* L4 PER2*/
+ 0xB00, /* L4 PER2*/
+ 0x0, /* HOST CLK2 */
+ 0x1800, /* CAL */
+ 0x1700 /* LLI */
};
static u32 l3_targ_inst_clk3[] = {
- 0x0100 /* EMUSS */
+ 0x0100 /* EMUSS */,
+ 0x0300, /* DEBUGSS_CT_TBR */
+ 0x0 /* HOST CLK3 */
};
static struct l3_masters_data {
@@ -110,13 +117,15 @@ static struct l3_masters_data {
{ 0xC8, "USBHOSTFS"}
};
-static char *l3_targ_inst_name[L3_MODULES][18] = {
+static char *l3_targ_inst_name[L3_MODULES][21] = {
{
"DMM1",
"DMM2",
"ABE",
"L4CFG",
"CLK2 PWR DISC",
+ "HOST CLK1",
+ "L4 WAKEUP"
},
{
"CORTEX M3" ,
@@ -137,9 +146,14 @@ static char *l3_targ_inst_name[L3_MODULES][18] = {
"L4 PER3",
"L4 PER1",
"L4 PER2",
+ "HOST CLK2",
+ "CAL",
+ "LLI"
},
{
"EMUSS",
+ "DEBUG SOURCE",
+ "HOST CLK3"
},
};
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index de6d46451746..d8f6dbf45d16 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
omap_table_init = 1;
/* Lets now register with OPP library */
- for (i = 0; i < opp_def_size; i++) {
+ for (i = 0; i < opp_def_size; i++, opp_def++) {
struct omap_hwmod *oh;
struct device *dev;
@@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
__func__, opp_def->freq,
opp_def->hwmod_name, i, r);
}
- opp_def++;
}
return 0;
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 78564895e914..686137d164da 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -15,12 +15,25 @@
#include "powerdomain.h"
+#ifdef CONFIG_CPU_IDLE
+extern int __init omap3_idle_init(void);
+extern int __init omap4_idle_init(void);
+#else
+static inline int omap3_idle_init(void)
+{
+ return 0;
+}
+
+static inline int omap4_idle_init(void)
+{
+ return 0;
+}
+#endif
+
extern void *omap3_secure_ram_storage;
extern void omap3_pm_off_mode_enable(int);
extern void omap_sram_idle(void);
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
-extern int omap3_idle_init(void);
-extern int omap4_idle_init(void);
extern int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused);
extern int (*omap_pm_suspend)(void);
@@ -88,7 +101,7 @@ extern void enable_omap3630_toggle_l2_on_restore(void);
static inline void enable_omap3630_toggle_l2_on_restore(void) { }
#endif /* defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3) */
-#ifdef CONFIG_OMAP_SMARTREFLEX
+#ifdef CONFIG_POWER_AVS_OMAP
extern int omap_devinit_smartreflex(void);
extern void omap_enable_smartreflex_on_init(void);
#else
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 3a595e899724..e4fc88c65dbd 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -70,34 +70,6 @@ void (*omap3_do_wfi_sram)(void);
static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
static struct powerdomain *core_pwrdm, *per_pwrdm;
-static struct powerdomain *cam_pwrdm;
-
-static void omap3_enable_io_chain(void)
-{
- int timeout = 0;
-
- omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
- PM_WKEN);
- /* Do a readback to assure write has been done */
- omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN);
-
- while (!(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
- OMAP3430_ST_IO_CHAIN_MASK)) {
- timeout++;
- if (timeout > 1000) {
- pr_err("Wake up daisy chain activation failed.\n");
- return;
- }
- omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
- WKUP_MOD, PM_WKEN);
- }
-}
-
-static void omap3_disable_io_chain(void)
-{
- omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
- PM_WKEN);
-}
static void omap3_core_save_context(void)
{
@@ -299,24 +271,22 @@ void omap_sram_idle(void)
/* Enable IO-PAD and IO-CHAIN wakeups */
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
- if (omap3_has_io_wakeup() &&
- (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)) {
- omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
- if (omap3_has_io_chain_ctrl())
- omap3_enable_io_chain();
- }
- pwrdm_pre_transition();
+ if (mpu_next_state < PWRDM_POWER_ON) {
+ pwrdm_pre_transition(mpu_pwrdm);
+ pwrdm_pre_transition(neon_pwrdm);
+ }
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
+ pwrdm_pre_transition(per_pwrdm);
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
omap2_gpio_prepare_for_idle(per_going_off);
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
+ pwrdm_pre_transition(core_pwrdm);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_cm_save_context();
@@ -369,26 +339,20 @@ void omap_sram_idle(void)
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
+ pwrdm_post_transition(core_pwrdm);
}
omap3_intc_resume_idle();
- pwrdm_post_transition();
-
/* PER */
- if (per_next_state < PWRDM_POWER_ON)
+ if (per_next_state < PWRDM_POWER_ON) {
omap2_gpio_resume_after_idle();
-
- /* Disable IO-PAD and IO-CHAIN wakeup */
- if (omap3_has_io_wakeup() &&
- (per_next_state < PWRDM_POWER_ON ||
- core_next_state < PWRDM_POWER_ON)) {
- omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
- PM_WKEN);
- if (omap3_has_io_chain_ctrl())
- omap3_disable_io_chain();
+ pwrdm_post_transition(per_pwrdm);
}
- clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
+ if (mpu_next_state < PWRDM_POWER_ON) {
+ pwrdm_post_transition(mpu_pwrdm);
+ pwrdm_post_transition(neon_pwrdm);
+ }
}
static void omap3_pm_idle(void)
@@ -581,10 +545,13 @@ static void __init prcm_setup_regs(void)
OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
/* Don't attach IVA interrupts */
- omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
- omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
- omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
- omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
+ if (omap3_has_iva()) {
+ omap2_prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
+ omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
+ omap2_prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
+ omap2_prm_write_mod_reg(0, OMAP3430_PER_MOD,
+ OMAP3430_PM_IVAGRPSEL);
+ }
/* Clear any pending 'reset' flags */
omap2_prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
@@ -598,7 +565,9 @@ static void __init prcm_setup_regs(void)
/* Clear any pending PRCM interrupts */
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
- omap3_iva_idle();
+ if (omap3_has_iva())
+ omap3_iva_idle();
+
omap3_d2d_idle();
}
@@ -749,7 +718,6 @@ int __init omap3_pm_init(void)
neon_pwrdm = pwrdm_lookup("neon_pwrdm");
per_pwrdm = pwrdm_lookup("per_pwrdm");
core_pwrdm = pwrdm_lookup("core_pwrdm");
- cam_pwrdm = pwrdm_lookup("cam_pwrdm");
neon_clkdm = clkdm_lookup("neon_clkdm");
mpu_clkdm = clkdm_lookup("mpu_clkdm");
diff --git a/arch/arm/mach-omap2/powerdomain.c b/arch/arm/mach-omap2/powerdomain.c
index 96114901b932..69b36e185e9b 100644
--- a/arch/arm/mach-omap2/powerdomain.c
+++ b/arch/arm/mach-omap2/powerdomain.c
@@ -526,7 +526,8 @@ int pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
*
* Return the powerdomain @pwrdm's current power state. Returns -EINVAL
* if the powerdomain pointer is null or returns the current power state
- * upon success.
+ * upon success. Note that if the power domain only supports the ON state
+ * then just return ON as the current state.
*/
int pwrdm_read_pwrst(struct powerdomain *pwrdm)
{
@@ -535,6 +536,9 @@ int pwrdm_read_pwrst(struct powerdomain *pwrdm)
if (!pwrdm)
return -EINVAL;
+ if (pwrdm->pwrsts == PWRSTS_ON)
+ return PWRDM_POWER_ON;
+
if (arch_pwrdm && arch_pwrdm->pwrdm_read_pwrst)
ret = arch_pwrdm->pwrdm_read_pwrst(pwrdm);
@@ -981,15 +985,23 @@ int pwrdm_state_switch(struct powerdomain *pwrdm)
return ret;
}
-int pwrdm_pre_transition(void)
+int pwrdm_pre_transition(struct powerdomain *pwrdm)
{
- pwrdm_for_each(_pwrdm_pre_transition_cb, NULL);
+ if (pwrdm)
+ _pwrdm_pre_transition_cb(pwrdm, NULL);
+ else
+ pwrdm_for_each(_pwrdm_pre_transition_cb, NULL);
+
return 0;
}
-int pwrdm_post_transition(void)
+int pwrdm_post_transition(struct powerdomain *pwrdm)
{
- pwrdm_for_each(_pwrdm_post_transition_cb, NULL);
+ if (pwrdm)
+ _pwrdm_post_transition_cb(pwrdm, NULL);
+ else
+ pwrdm_for_each(_pwrdm_post_transition_cb, NULL);
+
return 0;
}
diff --git a/arch/arm/mach-omap2/powerdomain.h b/arch/arm/mach-omap2/powerdomain.h
index 8f88d65c46ea..baee90608d11 100644
--- a/arch/arm/mach-omap2/powerdomain.h
+++ b/arch/arm/mach-omap2/powerdomain.h
@@ -67,9 +67,9 @@
/*
* Maximum number of clockdomains that can be associated with a powerdomain.
- * CORE powerdomain on OMAP4 is the worst case
+ * PER powerdomain on AM33XX is the worst case
*/
-#define PWRDM_MAX_CLKDMS 9
+#define PWRDM_MAX_CLKDMS 11
/* XXX A completely arbitrary number. What is reasonable here? */
#define PWRDM_TRANSITION_BAILOUT 100000
@@ -92,6 +92,15 @@ struct powerdomain;
* @pwrdm_clkdms: Clockdomains in this powerdomain
* @node: list_head linking all powerdomains
* @voltdm_node: list_head linking all powerdomains in a voltagedomain
+ * @pwrstctrl_offs: (AM33XX only) XXX_PWRSTCTRL reg offset from prcm_offs
+ * @pwrstst_offs: (AM33XX only) XXX_PWRSTST reg offset from prcm_offs
+ * @logicretstate_mask: (AM33XX only) mask for logic retention bitfield
+ * in @pwrstctrl_offs
+ * @mem_on_mask: (AM33XX only) mask for mem on bitfield in @pwrstctrl_offs
+ * @mem_ret_mask: (AM33XX only) mask for mem ret bitfield in @pwrstctrl_offs
+ * @mem_pwrst_mask: (AM33XX only) mask for mem state bitfield in @pwrstst_offs
+ * @mem_retst_mask: (AM33XX only) mask for mem retention state bitfield
+ * in @pwrstctrl_offs
* @state:
* @state_counter:
* @timer:
@@ -121,6 +130,14 @@ struct powerdomain {
unsigned ret_logic_off_counter;
unsigned ret_mem_off_counter[PWRDM_MAX_MEM_BANKS];
+ const u8 pwrstctrl_offs;
+ const u8 pwrstst_offs;
+ const u32 logicretstate_mask;
+ const u32 mem_on_mask[PWRDM_MAX_MEM_BANKS];
+ const u32 mem_ret_mask[PWRDM_MAX_MEM_BANKS];
+ const u32 mem_pwrst_mask[PWRDM_MAX_MEM_BANKS];
+ const u32 mem_retst_mask[PWRDM_MAX_MEM_BANKS];
+
#ifdef CONFIG_PM_DEBUG
s64 timer;
s64 state_timer[PWRDM_MAX_PWRSTS];
@@ -213,8 +230,8 @@ bool pwrdm_has_hdwr_sar(struct powerdomain *pwrdm);
int pwrdm_wait_transition(struct powerdomain *pwrdm);
int pwrdm_state_switch(struct powerdomain *pwrdm);
-int pwrdm_pre_transition(void);
-int pwrdm_post_transition(void);
+int pwrdm_pre_transition(struct powerdomain *pwrdm);
+int pwrdm_post_transition(struct powerdomain *pwrdm);
int pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm);
int pwrdm_get_context_loss_count(struct powerdomain *pwrdm);
bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
@@ -222,10 +239,12 @@ bool pwrdm_can_ever_lose_context(struct powerdomain *pwrdm);
extern void omap242x_powerdomains_init(void);
extern void omap243x_powerdomains_init(void);
extern void omap3xxx_powerdomains_init(void);
+extern void am33xx_powerdomains_init(void);
extern void omap44xx_powerdomains_init(void);
extern struct pwrdm_ops omap2_pwrdm_operations;
extern struct pwrdm_ops omap3_pwrdm_operations;
+extern struct pwrdm_ops am33xx_pwrdm_operations;
extern struct pwrdm_ops omap4_pwrdm_operations;
/* Common Internal functions used across OMAP rev's */
diff --git a/arch/arm/mach-omap2/powerdomain33xx.c b/arch/arm/mach-omap2/powerdomain33xx.c
new file mode 100644
index 000000000000..67c5663899b6
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomain33xx.c
@@ -0,0 +1,229 @@
+/*
+ * AM33XX Powerdomain control
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * Derived from mach-omap2/powerdomain44xx.c written by Rajendra Nayak
+ * <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <plat/prcm.h>
+
+#include "powerdomain.h"
+#include "prm33xx.h"
+#include "prm-regbits-33xx.h"
+
+
+static int am33xx_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
+{
+ am33xx_prm_rmw_reg_bits(OMAP_POWERSTATE_MASK,
+ (pwrst << OMAP_POWERSTATE_SHIFT),
+ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ return 0;
+}
+
+static int am33xx_pwrdm_read_next_pwrst(struct powerdomain *pwrdm)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ v &= OMAP_POWERSTATE_MASK;
+ v >>= OMAP_POWERSTATE_SHIFT;
+
+ return v;
+}
+
+static int am33xx_pwrdm_read_pwrst(struct powerdomain *pwrdm)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ v &= OMAP_POWERSTATEST_MASK;
+ v >>= OMAP_POWERSTATEST_SHIFT;
+
+ return v;
+}
+
+static int am33xx_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ v &= AM33XX_LASTPOWERSTATEENTERED_MASK;
+ v >>= AM33XX_LASTPOWERSTATEENTERED_SHIFT;
+
+ return v;
+}
+
+static int am33xx_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm)
+{
+ am33xx_prm_rmw_reg_bits(AM33XX_LOWPOWERSTATECHANGE_MASK,
+ (1 << AM33XX_LOWPOWERSTATECHANGE_SHIFT),
+ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ return 0;
+}
+
+static int am33xx_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm)
+{
+ am33xx_prm_rmw_reg_bits(AM33XX_LASTPOWERSTATEENTERED_MASK,
+ AM33XX_LASTPOWERSTATEENTERED_MASK,
+ pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ return 0;
+}
+
+static int am33xx_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst)
+{
+ u32 m;
+
+ m = pwrdm->logicretstate_mask;
+ if (!m)
+ return -EINVAL;
+
+ am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)),
+ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+
+ return 0;
+}
+
+static int am33xx_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ v &= AM33XX_LOGICSTATEST_MASK;
+ v >>= AM33XX_LOGICSTATEST_SHIFT;
+
+ return v;
+}
+
+static int am33xx_pwrdm_read_logic_retst(struct powerdomain *pwrdm)
+{
+ u32 v, m;
+
+ m = pwrdm->logicretstate_mask;
+ if (!m)
+ return -EINVAL;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ v &= m;
+ v >>= __ffs(m);
+
+ return v;
+}
+
+static int am33xx_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank,
+ u8 pwrst)
+{
+ u32 m;
+
+ m = pwrdm->mem_on_mask[bank];
+ if (!m)
+ return -EINVAL;
+
+ am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)),
+ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+
+ return 0;
+}
+
+static int am33xx_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank,
+ u8 pwrst)
+{
+ u32 m;
+
+ m = pwrdm->mem_ret_mask[bank];
+ if (!m)
+ return -EINVAL;
+
+ am33xx_prm_rmw_reg_bits(m, (pwrst << __ffs(m)),
+ pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+
+ return 0;
+}
+
+static int am33xx_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank)
+{
+ u32 m, v;
+
+ m = pwrdm->mem_pwrst_mask[bank];
+ if (!m)
+ return -EINVAL;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs);
+ v &= m;
+ v >>= __ffs(m);
+
+ return v;
+}
+
+static int am33xx_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank)
+{
+ u32 m, v;
+
+ m = pwrdm->mem_retst_mask[bank];
+ if (!m)
+ return -EINVAL;
+
+ v = am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstctrl_offs);
+ v &= m;
+ v >>= __ffs(m);
+
+ return v;
+}
+
+static int am33xx_pwrdm_wait_transition(struct powerdomain *pwrdm)
+{
+ u32 c = 0;
+
+ /*
+ * REVISIT: pwrdm_wait_transition() may be better implemented
+ * via a callback and a periodic timer check -- how long do we expect
+ * powerdomain transitions to take?
+ */
+
+ /* XXX Is this udelay() value meaningful? */
+ while ((am33xx_prm_read_reg(pwrdm->prcm_offs, pwrdm->pwrstst_offs)
+ & OMAP_INTRANSITION_MASK) &&
+ (c++ < PWRDM_TRANSITION_BAILOUT))
+ udelay(1);
+
+ if (c > PWRDM_TRANSITION_BAILOUT) {
+ pr_err("powerdomain: %s: waited too long to complete transition\n",
+ pwrdm->name);
+ return -EAGAIN;
+ }
+
+ pr_debug("powerdomain: completed transition in %d loops\n", c);
+
+ return 0;
+}
+
+struct pwrdm_ops am33xx_pwrdm_operations = {
+ .pwrdm_set_next_pwrst = am33xx_pwrdm_set_next_pwrst,
+ .pwrdm_read_next_pwrst = am33xx_pwrdm_read_next_pwrst,
+ .pwrdm_read_pwrst = am33xx_pwrdm_read_pwrst,
+ .pwrdm_read_prev_pwrst = am33xx_pwrdm_read_prev_pwrst,
+ .pwrdm_set_logic_retst = am33xx_pwrdm_set_logic_retst,
+ .pwrdm_read_logic_pwrst = am33xx_pwrdm_read_logic_pwrst,
+ .pwrdm_read_logic_retst = am33xx_pwrdm_read_logic_retst,
+ .pwrdm_clear_all_prev_pwrst = am33xx_pwrdm_clear_all_prev_pwrst,
+ .pwrdm_set_lowpwrstchange = am33xx_pwrdm_set_lowpwrstchange,
+ .pwrdm_read_mem_pwrst = am33xx_pwrdm_read_mem_pwrst,
+ .pwrdm_read_mem_retst = am33xx_pwrdm_read_mem_retst,
+ .pwrdm_set_mem_onst = am33xx_pwrdm_set_mem_onst,
+ .pwrdm_set_mem_retst = am33xx_pwrdm_set_mem_retst,
+ .pwrdm_wait_transition = am33xx_pwrdm_wait_transition,
+};
diff --git a/arch/arm/mach-omap2/powerdomains33xx_data.c b/arch/arm/mach-omap2/powerdomains33xx_data.c
new file mode 100644
index 000000000000..869adb82569e
--- /dev/null
+++ b/arch/arm/mach-omap2/powerdomains33xx_data.c
@@ -0,0 +1,185 @@
+/*
+ * AM33XX Power domain data
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "powerdomain.h"
+#include "prcm-common.h"
+#include "prm-regbits-33xx.h"
+#include "prm33xx.h"
+
+static struct powerdomain gfx_33xx_pwrdm = {
+ .name = "gfx_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM33XX_PRM_GFX_MOD,
+ .pwrstctrl_offs = AM33XX_PM_GFX_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_GFX_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_OFF_RET_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .banks = 1,
+ .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK,
+ .mem_on_mask = {
+ [0] = AM33XX_GFX_MEM_ONSTATE_MASK, /* gfx_mem */
+ },
+ .mem_ret_mask = {
+ [0] = AM33XX_GFX_MEM_RETSTATE_MASK, /* gfx_mem */
+ },
+ .mem_pwrst_mask = {
+ [0] = AM33XX_GFX_MEM_STATEST_MASK, /* gfx_mem */
+ },
+ .mem_retst_mask = {
+ [0] = AM33XX_GFX_MEM_RETSTATE_MASK, /* gfx_mem */
+ },
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* gfx_mem */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* gfx_mem */
+ },
+};
+
+static struct powerdomain rtc_33xx_pwrdm = {
+ .name = "rtc_pwrdm",
+ .voltdm = { .name = "rtc" },
+ .prcm_offs = AM33XX_PRM_RTC_MOD,
+ .pwrstctrl_offs = AM33XX_PM_RTC_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_RTC_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_ON,
+ .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK,
+};
+
+static struct powerdomain wkup_33xx_pwrdm = {
+ .name = "wkup_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM33XX_PRM_WKUP_MOD,
+ .pwrstctrl_offs = AM33XX_PM_WKUP_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_WKUP_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_ON,
+ .logicretstate_mask = AM33XX_LOGICRETSTATE_3_3_MASK,
+};
+
+static struct powerdomain per_33xx_pwrdm = {
+ .name = "per_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM33XX_PRM_PER_MOD,
+ .pwrstctrl_offs = AM33XX_PM_PER_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_PER_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_OFF_RET_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .banks = 3,
+ .logicretstate_mask = AM33XX_LOGICRETSTATE_3_3_MASK,
+ .mem_on_mask = {
+ [0] = AM33XX_PRUSS_MEM_ONSTATE_MASK, /* pruss_mem */
+ [1] = AM33XX_PER_MEM_ONSTATE_MASK, /* per_mem */
+ [2] = AM33XX_RAM_MEM_ONSTATE_MASK, /* ram_mem */
+ },
+ .mem_ret_mask = {
+ [0] = AM33XX_PRUSS_MEM_RETSTATE_MASK, /* pruss_mem */
+ [1] = AM33XX_PER_MEM_RETSTATE_MASK, /* per_mem */
+ [2] = AM33XX_RAM_MEM_RETSTATE_MASK, /* ram_mem */
+ },
+ .mem_pwrst_mask = {
+ [0] = AM33XX_PRUSS_MEM_STATEST_MASK, /* pruss_mem */
+ [1] = AM33XX_PER_MEM_STATEST_MASK, /* per_mem */
+ [2] = AM33XX_RAM_MEM_STATEST_MASK, /* ram_mem */
+ },
+ .mem_retst_mask = {
+ [0] = AM33XX_PRUSS_MEM_RETSTATE_MASK, /* pruss_mem */
+ [1] = AM33XX_PER_MEM_RETSTATE_MASK, /* per_mem */
+ [2] = AM33XX_RAM_MEM_RETSTATE_MASK, /* ram_mem */
+ },
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* pruss_mem */
+ [1] = PWRSTS_OFF_RET, /* per_mem */
+ [2] = PWRSTS_OFF_RET, /* ram_mem */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* pruss_mem */
+ [1] = PWRSTS_ON, /* per_mem */
+ [2] = PWRSTS_ON, /* ram_mem */
+ },
+};
+
+static struct powerdomain mpu_33xx_pwrdm = {
+ .name = "mpu_pwrdm",
+ .voltdm = { .name = "mpu" },
+ .prcm_offs = AM33XX_PRM_MPU_MOD,
+ .pwrstctrl_offs = AM33XX_PM_MPU_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_MPU_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_OFF_RET_ON,
+ .pwrsts_logic_ret = PWRSTS_OFF_RET,
+ .flags = PWRDM_HAS_LOWPOWERSTATECHANGE,
+ .banks = 3,
+ .logicretstate_mask = AM33XX_LOGICRETSTATE_MASK,
+ .mem_on_mask = {
+ [0] = AM33XX_MPU_L1_ONSTATE_MASK, /* mpu_l1 */
+ [1] = AM33XX_MPU_L2_ONSTATE_MASK, /* mpu_l2 */
+ [2] = AM33XX_MPU_RAM_ONSTATE_MASK, /* mpu_ram */
+ },
+ .mem_ret_mask = {
+ [0] = AM33XX_MPU_L1_RETSTATE_MASK, /* mpu_l1 */
+ [1] = AM33XX_MPU_L2_RETSTATE_MASK, /* mpu_l2 */
+ [2] = AM33XX_MPU_RAM_RETSTATE_MASK, /* mpu_ram */
+ },
+ .mem_pwrst_mask = {
+ [0] = AM33XX_MPU_L1_STATEST_MASK, /* mpu_l1 */
+ [1] = AM33XX_MPU_L2_STATEST_MASK, /* mpu_l2 */
+ [2] = AM33XX_MPU_RAM_STATEST_MASK, /* mpu_ram */
+ },
+ .mem_retst_mask = {
+ [0] = AM33XX_MPU_L1_RETSTATE_MASK, /* mpu_l1 */
+ [1] = AM33XX_MPU_L2_RETSTATE_MASK, /* mpu_l2 */
+ [2] = AM33XX_MPU_RAM_RETSTATE_MASK, /* mpu_ram */
+ },
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_OFF_RET, /* mpu_l1 */
+ [1] = PWRSTS_OFF_RET, /* mpu_l2 */
+ [2] = PWRSTS_OFF_RET, /* mpu_ram */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* mpu_l1 */
+ [1] = PWRSTS_ON, /* mpu_l2 */
+ [2] = PWRSTS_ON, /* mpu_ram */
+ },
+};
+
+static struct powerdomain cefuse_33xx_pwrdm = {
+ .name = "cefuse_pwrdm",
+ .voltdm = { .name = "core" },
+ .prcm_offs = AM33XX_PRM_CEFUSE_MOD,
+ .pwrstctrl_offs = AM33XX_PM_CEFUSE_PWRSTCTRL_OFFSET,
+ .pwrstst_offs = AM33XX_PM_CEFUSE_PWRSTST_OFFSET,
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct powerdomain *powerdomains_am33xx[] __initdata = {
+ &gfx_33xx_pwrdm,
+ &rtc_33xx_pwrdm,
+ &wkup_33xx_pwrdm,
+ &per_33xx_pwrdm,
+ &mpu_33xx_pwrdm,
+ &cefuse_33xx_pwrdm,
+ NULL,
+};
+
+void __init am33xx_powerdomains_init(void)
+{
+ pwrdm_register_platform_funcs(&am33xx_pwrdm_operations);
+ pwrdm_register_pwrdms(powerdomains_am33xx);
+ pwrdm_complete_init();
+}
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index fb0a0a6869d1..bb883e463078 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -71,6 +71,22 @@ static struct powerdomain mpu_3xxx_pwrdm = {
.voltdm = { .name = "mpu_iva" },
};
+static struct powerdomain mpu_am35x_pwrdm = {
+ .name = "mpu_pwrdm",
+ .prcm_offs = MPU_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .flags = PWRDM_HAS_MPU_QUIRK,
+ .banks = 1,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_ON,
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON,
+ },
+ .voltdm = { .name = "mpu_iva" },
+};
+
/*
* The USBTLL Save-and-Restore mechanism is broken on
* 3430s up to ES3.0 and 3630ES1.0. Hence this feature
@@ -120,6 +136,23 @@ static struct powerdomain core_3xxx_es3_1_pwrdm = {
.voltdm = { .name = "core" },
};
+static struct powerdomain core_am35x_pwrdm = {
+ .name = "core_pwrdm",
+ .prcm_offs = CORE_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .banks = 2,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_ON, /* MEM1RETSTATE */
+ [1] = PWRSTS_ON, /* MEM2RETSTATE */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* MEM1ONSTATE */
+ [1] = PWRSTS_ON, /* MEM2ONSTATE */
+ },
+ .voltdm = { .name = "core" },
+};
+
static struct powerdomain dss_pwrdm = {
.name = "dss_pwrdm",
.prcm_offs = OMAP3430_DSS_MOD,
@@ -135,6 +168,21 @@ static struct powerdomain dss_pwrdm = {
.voltdm = { .name = "core" },
};
+static struct powerdomain dss_am35x_pwrdm = {
+ .name = "dss_pwrdm",
+ .prcm_offs = OMAP3430_DSS_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .banks = 1,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_ON, /* MEMRETSTATE */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* MEMONSTATE */
+ },
+ .voltdm = { .name = "core" },
+};
+
/*
* Although the 34XX TRM Rev K Table 4-371 notes that retention is a
* possible SGX powerstate, the SGX device itself does not support
@@ -156,6 +204,21 @@ static struct powerdomain sgx_pwrdm = {
.voltdm = { .name = "core" },
};
+static struct powerdomain sgx_am35x_pwrdm = {
+ .name = "sgx_pwrdm",
+ .prcm_offs = OMAP3430ES2_SGX_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .banks = 1,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_ON, /* MEMRETSTATE */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* MEMONSTATE */
+ },
+ .voltdm = { .name = "core" },
+};
+
static struct powerdomain cam_pwrdm = {
.name = "cam_pwrdm",
.prcm_offs = OMAP3430_CAM_MOD,
@@ -186,6 +249,21 @@ static struct powerdomain per_pwrdm = {
.voltdm = { .name = "core" },
};
+static struct powerdomain per_am35x_pwrdm = {
+ .name = "per_pwrdm",
+ .prcm_offs = OMAP3430_PER_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .banks = 1,
+ .pwrsts_mem_ret = {
+ [0] = PWRSTS_ON, /* MEMRETSTATE */
+ },
+ .pwrsts_mem_on = {
+ [0] = PWRSTS_ON, /* MEMONSTATE */
+ },
+ .voltdm = { .name = "core" },
+};
+
static struct powerdomain emu_pwrdm = {
.name = "emu_pwrdm",
.prcm_offs = OMAP3430_EMU_MOD,
@@ -200,6 +278,14 @@ static struct powerdomain neon_pwrdm = {
.voltdm = { .name = "mpu_iva" },
};
+static struct powerdomain neon_am35x_pwrdm = {
+ .name = "neon_pwrdm",
+ .prcm_offs = OMAP3430_NEON_MOD,
+ .pwrsts = PWRSTS_ON,
+ .pwrsts_logic_ret = PWRSTS_ON,
+ .voltdm = { .name = "mpu_iva" },
+};
+
static struct powerdomain usbhost_pwrdm = {
.name = "usbhost_pwrdm",
.prcm_offs = OMAP3430ES2_USBHOST_MOD,
@@ -293,6 +379,22 @@ static struct powerdomain *powerdomains_omap3430es3_1plus[] __initdata = {
NULL
};
+static struct powerdomain *powerdomains_am35x[] __initdata = {
+ &wkup_omap2_pwrdm,
+ &mpu_am35x_pwrdm,
+ &neon_am35x_pwrdm,
+ &core_am35x_pwrdm,
+ &sgx_am35x_pwrdm,
+ &dss_am35x_pwrdm,
+ &per_am35x_pwrdm,
+ &emu_pwrdm,
+ &dpll1_pwrdm,
+ &dpll3_pwrdm,
+ &dpll4_pwrdm,
+ &dpll5_pwrdm,
+ NULL
+};
+
void __init omap3xxx_powerdomains_init(void)
{
unsigned int rev;
@@ -301,21 +403,34 @@ void __init omap3xxx_powerdomains_init(void)
return;
pwrdm_register_platform_funcs(&omap3_pwrdm_operations);
- pwrdm_register_pwrdms(powerdomains_omap3430_common);
rev = omap_rev();
- if (rev == OMAP3430_REV_ES1_0)
- pwrdm_register_pwrdms(powerdomains_omap3430es1);
- else if (rev == OMAP3430_REV_ES2_0 || rev == OMAP3430_REV_ES2_1 ||
- rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
- pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
- else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
- rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
- rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
- pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
- else
- WARN(1, "OMAP3 powerdomain init: unknown chip type\n");
+ if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
+ pwrdm_register_pwrdms(powerdomains_am35x);
+ } else {
+ pwrdm_register_pwrdms(powerdomains_omap3430_common);
+
+ switch (rev) {
+ case OMAP3430_REV_ES1_0:
+ pwrdm_register_pwrdms(powerdomains_omap3430es1);
+ break;
+ case OMAP3430_REV_ES2_0:
+ case OMAP3430_REV_ES2_1:
+ case OMAP3430_REV_ES3_0:
+ case OMAP3630_REV_ES1_0:
+ pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
+ break;
+ case OMAP3430_REV_ES3_1:
+ case OMAP3430_REV_ES3_1_2:
+ case OMAP3630_REV_ES1_1:
+ case OMAP3630_REV_ES1_2:
+ pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
+ break;
+ default:
+ WARN(1, "OMAP3 powerdomain init: unknown chip type\n");
+ }
+ }
pwrdm_complete_init();
}
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index 6da3ba483ad1..e5f0503a68b0 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -203,8 +203,8 @@
#define OMAP3430_EN_MMC2_SHIFT 25
#define OMAP3430_EN_MMC1_MASK (1 << 24)
#define OMAP3430_EN_MMC1_SHIFT 24
-#define OMAP3430_EN_UART4_MASK (1 << 23)
-#define OMAP3430_EN_UART4_SHIFT 23
+#define AM35XX_EN_UART4_MASK (1 << 23)
+#define AM35XX_EN_UART4_SHIFT 23
#define OMAP3430_EN_MCSPI4_MASK (1 << 21)
#define OMAP3430_EN_MCSPI4_SHIFT 21
#define OMAP3430_EN_MCSPI3_MASK (1 << 20)
@@ -410,13 +410,21 @@
*/
#define MAX_MODULE_HARDRESET_WAIT 10000
+/*
+ * Maximum time(us) it takes to output the signal WUCLKOUT of the last
+ * pad of the I/O ring after asserting WUCLKIN high. Tero measured
+ * the actual time at 7 to 8 microseconds on OMAP3 and 2 to 4
+ * microseconds on OMAP4, so this timeout may be too high.
+ */
+#define MAX_IOPAD_LATCH_TIME 100
+
# ifndef __ASSEMBLER__
extern void __iomem *prm_base;
extern void __iomem *cm_base;
extern void __iomem *cm2_base;
extern void __iomem *prcm_mpu_base;
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_OMAP5)
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
extern void omap_prm_base_init(void);
extern void omap_cm_base_init(void);
#else
diff --git a/arch/arm/mach-omap2/prcm.c b/arch/arm/mach-omap2/prcm.c
index 480f40a5ee42..053e24ed3c48 100644
--- a/arch/arm/mach-omap2/prcm.c
+++ b/arch/arm/mach-omap2/prcm.c
@@ -35,6 +35,7 @@
#include "prm2xxx_3xxx.h"
#include "prm44xx.h"
#include "prminst44xx.h"
+#include "cminst44xx.h"
#include "prm-regbits-24xx.h"
#include "prm-regbits-44xx.h"
#include "control.h"
@@ -159,8 +160,30 @@ void __init omap2_set_globals_prcm(struct omap_globals *omap2_globals)
if (omap2_globals->prcm_mpu)
prcm_mpu_base = omap2_globals->prcm_mpu;
- if (cpu_is_omap44xx()) {
+ if (cpu_is_omap44xx() || soc_is_omap54xx()) {
omap_prm_base_init();
omap_cm_base_init();
}
}
+
+/*
+ * Stubbed functions so that common files continue to build when
+ * custom builds are used
+ * XXX These are temporary and should be removed at the earliest possible
+ * opportunity
+ */
+int __weak omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+ return 0;
+}
+
+void __weak omap4_cminst_module_enable(u8 mode, u8 part, u16 inst,
+ s16 cdoffs, u16 clkctrl_offs)
+{
+}
+
+void __weak omap4_cminst_module_disable(u8 part, u16 inst, s16 cdoffs,
+ u16 clkctrl_offs)
+{
+}
diff --git a/arch/arm/mach-omap2/prm-regbits-33xx.h b/arch/arm/mach-omap2/prm-regbits-33xx.h
new file mode 100644
index 000000000000..0221b5c20e87
--- /dev/null
+++ b/arch/arm/mach-omap2/prm-regbits-33xx.h
@@ -0,0 +1,357 @@
+/*
+ * AM33XX PRM_XXX register bits
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_33XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM_REGBITS_33XX_H
+
+#include "prm.h"
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ABBOFF_ACT_EXPORT_SHIFT 1
+#define AM33XX_ABBOFF_ACT_EXPORT_MASK (1 << 1)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ABBOFF_SLEEP_EXPORT_SHIFT 2
+#define AM33XX_ABBOFF_SLEEP_EXPORT_MASK (1 << 2)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_AIPOFF_SHIFT 8
+#define AM33XX_AIPOFF_MASK (1 << 8)
+
+/* Used by PM_WKUP_PWRSTST */
+#define AM33XX_DEBUGSS_MEM_STATEST_SHIFT 17
+#define AM33XX_DEBUGSS_MEM_STATEST_MASK (0x3 << 17)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_DISABLE_RTA_EXPORT_SHIFT 0
+#define AM33XX_DISABLE_RTA_EXPORT_MASK (1 << 0)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_DPLL_CORE_RECAL_EN_SHIFT 12
+#define AM33XX_DPLL_CORE_RECAL_EN_MASK (1 << 12)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_DPLL_CORE_RECAL_ST_SHIFT 12
+#define AM33XX_DPLL_CORE_RECAL_ST_MASK (1 << 12)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_DPLL_DDR_RECAL_EN_SHIFT 14
+#define AM33XX_DPLL_DDR_RECAL_EN_MASK (1 << 14)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_DPLL_DDR_RECAL_ST_SHIFT 14
+#define AM33XX_DPLL_DDR_RECAL_ST_MASK (1 << 14)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_DPLL_DISP_RECAL_EN_SHIFT 15
+#define AM33XX_DPLL_DISP_RECAL_EN_MASK (1 << 15)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_DPLL_DISP_RECAL_ST_SHIFT 13
+#define AM33XX_DPLL_DISP_RECAL_ST_MASK (1 << 13)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_DPLL_MPU_RECAL_EN_SHIFT 11
+#define AM33XX_DPLL_MPU_RECAL_EN_MASK (1 << 11)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_DPLL_MPU_RECAL_ST_SHIFT 11
+#define AM33XX_DPLL_MPU_RECAL_ST_MASK (1 << 11)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_DPLL_PER_RECAL_EN_SHIFT 13
+#define AM33XX_DPLL_PER_RECAL_EN_MASK (1 << 13)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_DPLL_PER_RECAL_ST_SHIFT 15
+#define AM33XX_DPLL_PER_RECAL_ST_MASK (1 << 15)
+
+/* Used by RM_WKUP_RSTST */
+#define AM33XX_EMULATION_M3_RST_SHIFT 6
+#define AM33XX_EMULATION_M3_RST_MASK (1 << 6)
+
+/* Used by RM_MPU_RSTST */
+#define AM33XX_EMULATION_MPU_RST_SHIFT 5
+#define AM33XX_EMULATION_MPU_RST_MASK (1 << 5)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ENFUNC1_EXPORT_SHIFT 3
+#define AM33XX_ENFUNC1_EXPORT_MASK (1 << 3)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ENFUNC3_EXPORT_SHIFT 5
+#define AM33XX_ENFUNC3_EXPORT_MASK (1 << 5)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ENFUNC4_SHIFT 6
+#define AM33XX_ENFUNC4_MASK (1 << 6)
+
+/* Used by PRM_LDO_SRAM_CORE_SETUP, PRM_LDO_SRAM_MPU_SETUP */
+#define AM33XX_ENFUNC5_SHIFT 7
+#define AM33XX_ENFUNC5_MASK (1 << 7)
+
+/* Used by PRM_RSTST */
+#define AM33XX_EXTERNAL_WARM_RST_SHIFT 5
+#define AM33XX_EXTERNAL_WARM_RST_MASK (1 << 5)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_FORCEWKUP_EN_SHIFT 10
+#define AM33XX_FORCEWKUP_EN_MASK (1 << 10)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_FORCEWKUP_ST_SHIFT 10
+#define AM33XX_FORCEWKUP_ST_MASK (1 << 10)
+
+/* Used by PM_GFX_PWRSTCTRL */
+#define AM33XX_GFX_MEM_ONSTATE_SHIFT 17
+#define AM33XX_GFX_MEM_ONSTATE_MASK (0x3 << 17)
+
+/* Used by PM_GFX_PWRSTCTRL */
+#define AM33XX_GFX_MEM_RETSTATE_SHIFT 6
+#define AM33XX_GFX_MEM_RETSTATE_MASK (1 << 6)
+
+/* Used by PM_GFX_PWRSTST */
+#define AM33XX_GFX_MEM_STATEST_SHIFT 4
+#define AM33XX_GFX_MEM_STATEST_MASK (0x3 << 4)
+
+/* Used by RM_GFX_RSTCTRL, RM_GFX_RSTST */
+#define AM33XX_GFX_RST_SHIFT 0
+#define AM33XX_GFX_RST_MASK (1 << 0)
+
+/* Used by PRM_RSTST */
+#define AM33XX_GLOBAL_COLD_RST_SHIFT 0
+#define AM33XX_GLOBAL_COLD_RST_MASK (1 << 0)
+
+/* Used by PRM_RSTST */
+#define AM33XX_GLOBAL_WARM_SW_RST_SHIFT 1
+#define AM33XX_GLOBAL_WARM_SW_RST_MASK (1 << 1)
+
+/* Used by RM_WKUP_RSTST */
+#define AM33XX_ICECRUSHER_M3_RST_SHIFT 7
+#define AM33XX_ICECRUSHER_M3_RST_MASK (1 << 7)
+
+/* Used by RM_MPU_RSTST */
+#define AM33XX_ICECRUSHER_MPU_RST_SHIFT 6
+#define AM33XX_ICECRUSHER_MPU_RST_MASK (1 << 6)
+
+/* Used by PRM_RSTST */
+#define AM33XX_ICEPICK_RST_SHIFT 9
+#define AM33XX_ICEPICK_RST_MASK (1 << 9)
+
+/* Used by RM_PER_RSTCTRL */
+#define AM33XX_PRUSS_LRST_SHIFT 1
+#define AM33XX_PRUSS_LRST_MASK (1 << 1)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_PRUSS_MEM_ONSTATE_SHIFT 5
+#define AM33XX_PRUSS_MEM_ONSTATE_MASK (0x3 << 5)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_PRUSS_MEM_RETSTATE_SHIFT 7
+#define AM33XX_PRUSS_MEM_RETSTATE_MASK (1 << 7)
+
+/* Used by PM_PER_PWRSTST */
+#define AM33XX_PRUSS_MEM_STATEST_SHIFT 23
+#define AM33XX_PRUSS_MEM_STATEST_MASK (0x3 << 23)
+
+/*
+ * Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST,
+ * PM_WKUP_PWRSTST, PM_RTC_PWRSTST
+ */
+#define AM33XX_INTRANSITION_SHIFT 20
+#define AM33XX_INTRANSITION_MASK (1 << 20)
+
+/* Used by PM_CEFUSE_PWRSTST */
+#define AM33XX_LASTPOWERSTATEENTERED_SHIFT 24
+#define AM33XX_LASTPOWERSTATEENTERED_MASK (0x3 << 24)
+
+/* Used by PM_GFX_PWRSTCTRL, PM_MPU_PWRSTCTRL, PM_RTC_PWRSTCTRL */
+#define AM33XX_LOGICRETSTATE_SHIFT 2
+#define AM33XX_LOGICRETSTATE_MASK (1 << 2)
+
+/* Renamed from LOGICRETSTATE Used by PM_PER_PWRSTCTRL, PM_WKUP_PWRSTCTRL */
+#define AM33XX_LOGICRETSTATE_3_3_SHIFT 3
+#define AM33XX_LOGICRETSTATE_3_3_MASK (1 << 3)
+
+/*
+ * Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST,
+ * PM_WKUP_PWRSTST, PM_RTC_PWRSTST
+ */
+#define AM33XX_LOGICSTATEST_SHIFT 2
+#define AM33XX_LOGICSTATEST_MASK (1 << 2)
+
+/*
+ * Used by PM_GFX_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL, PM_PER_PWRSTCTRL,
+ * PM_MPU_PWRSTCTRL, PM_WKUP_PWRSTCTRL, PM_RTC_PWRSTCTRL
+ */
+#define AM33XX_LOWPOWERSTATECHANGE_SHIFT 4
+#define AM33XX_LOWPOWERSTATECHANGE_MASK (1 << 4)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_L1_ONSTATE_SHIFT 18
+#define AM33XX_MPU_L1_ONSTATE_MASK (0x3 << 18)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_L1_RETSTATE_SHIFT 22
+#define AM33XX_MPU_L1_RETSTATE_MASK (1 << 22)
+
+/* Used by PM_MPU_PWRSTST */
+#define AM33XX_MPU_L1_STATEST_SHIFT 6
+#define AM33XX_MPU_L1_STATEST_MASK (0x3 << 6)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_L2_ONSTATE_SHIFT 20
+#define AM33XX_MPU_L2_ONSTATE_MASK (0x3 << 20)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_L2_RETSTATE_SHIFT 23
+#define AM33XX_MPU_L2_RETSTATE_MASK (1 << 23)
+
+/* Used by PM_MPU_PWRSTST */
+#define AM33XX_MPU_L2_STATEST_SHIFT 8
+#define AM33XX_MPU_L2_STATEST_MASK (0x3 << 8)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_RAM_ONSTATE_SHIFT 16
+#define AM33XX_MPU_RAM_ONSTATE_MASK (0x3 << 16)
+
+/* Used by PM_MPU_PWRSTCTRL */
+#define AM33XX_MPU_RAM_RETSTATE_SHIFT 24
+#define AM33XX_MPU_RAM_RETSTATE_MASK (1 << 24)
+
+/* Used by PM_MPU_PWRSTST */
+#define AM33XX_MPU_RAM_STATEST_SHIFT 4
+#define AM33XX_MPU_RAM_STATEST_MASK (0x3 << 4)
+
+/* Used by PRM_RSTST */
+#define AM33XX_MPU_SECURITY_VIOL_RST_SHIFT 2
+#define AM33XX_MPU_SECURITY_VIOL_RST_MASK (1 << 2)
+
+/* Used by PRM_SRAM_COUNT */
+#define AM33XX_PCHARGECNT_VALUE_SHIFT 0
+#define AM33XX_PCHARGECNT_VALUE_MASK (0x3f << 0)
+
+/* Used by RM_PER_RSTCTRL */
+#define AM33XX_PCI_LRST_SHIFT 0
+#define AM33XX_PCI_LRST_MASK (1 << 0)
+
+/* Renamed from PCI_LRST Used by RM_PER_RSTST */
+#define AM33XX_PCI_LRST_5_5_SHIFT 5
+#define AM33XX_PCI_LRST_5_5_MASK (1 << 5)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_PER_MEM_ONSTATE_SHIFT 25
+#define AM33XX_PER_MEM_ONSTATE_MASK (0x3 << 25)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_PER_MEM_RETSTATE_SHIFT 29
+#define AM33XX_PER_MEM_RETSTATE_MASK (1 << 29)
+
+/* Used by PM_PER_PWRSTST */
+#define AM33XX_PER_MEM_STATEST_SHIFT 17
+#define AM33XX_PER_MEM_STATEST_MASK (0x3 << 17)
+
+/*
+ * Used by PM_GFX_PWRSTCTRL, PM_CEFUSE_PWRSTCTRL, PM_PER_PWRSTCTRL,
+ * PM_MPU_PWRSTCTRL
+ */
+#define AM33XX_POWERSTATE_SHIFT 0
+#define AM33XX_POWERSTATE_MASK (0x3 << 0)
+
+/* Used by PM_GFX_PWRSTST, PM_CEFUSE_PWRSTST, PM_PER_PWRSTST, PM_MPU_PWRSTST */
+#define AM33XX_POWERSTATEST_SHIFT 0
+#define AM33XX_POWERSTATEST_MASK (0x3 << 0)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_RAM_MEM_ONSTATE_SHIFT 30
+#define AM33XX_RAM_MEM_ONSTATE_MASK (0x3 << 30)
+
+/* Used by PM_PER_PWRSTCTRL */
+#define AM33XX_RAM_MEM_RETSTATE_SHIFT 27
+#define AM33XX_RAM_MEM_RETSTATE_MASK (1 << 27)
+
+/* Used by PM_PER_PWRSTST */
+#define AM33XX_RAM_MEM_STATEST_SHIFT 21
+#define AM33XX_RAM_MEM_STATEST_MASK (0x3 << 21)
+
+/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
+#define AM33XX_RETMODE_ENABLE_SHIFT 0
+#define AM33XX_RETMODE_ENABLE_MASK (1 << 0)
+
+/* Used by REVISION_PRM */
+#define AM33XX_REV_SHIFT 0
+#define AM33XX_REV_MASK (0xff << 0)
+
+/* Used by PRM_RSTTIME */
+#define AM33XX_RSTTIME1_SHIFT 0
+#define AM33XX_RSTTIME1_MASK (0xff << 0)
+
+/* Used by PRM_RSTTIME */
+#define AM33XX_RSTTIME2_SHIFT 8
+#define AM33XX_RSTTIME2_MASK (0x1f << 8)
+
+/* Used by PRM_RSTCTRL */
+#define AM33XX_RST_GLOBAL_COLD_SW_SHIFT 1
+#define AM33XX_RST_GLOBAL_COLD_SW_MASK (1 << 1)
+
+/* Used by PRM_RSTCTRL */
+#define AM33XX_RST_GLOBAL_WARM_SW_SHIFT 0
+#define AM33XX_RST_GLOBAL_WARM_SW_MASK (1 << 0)
+
+/* Used by PRM_SRAM_COUNT */
+#define AM33XX_SLPCNT_VALUE_SHIFT 16
+#define AM33XX_SLPCNT_VALUE_MASK (0xff << 16)
+
+/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
+#define AM33XX_SRAMLDO_STATUS_SHIFT 8
+#define AM33XX_SRAMLDO_STATUS_MASK (1 << 8)
+
+/* Used by PRM_LDO_SRAM_CORE_CTRL, PRM_LDO_SRAM_MPU_CTRL */
+#define AM33XX_SRAM_IN_TRANSITION_SHIFT 9
+#define AM33XX_SRAM_IN_TRANSITION_MASK (1 << 9)
+
+/* Used by PRM_SRAM_COUNT */
+#define AM33XX_STARTUP_COUNT_SHIFT 24
+#define AM33XX_STARTUP_COUNT_MASK (0xff << 24)
+
+/* Used by PRM_IRQENABLE_M3, PRM_IRQENABLE_MPU */
+#define AM33XX_TRANSITION_EN_SHIFT 8
+#define AM33XX_TRANSITION_EN_MASK (1 << 8)
+
+/* Used by PRM_IRQSTATUS_M3, PRM_IRQSTATUS_MPU */
+#define AM33XX_TRANSITION_ST_SHIFT 8
+#define AM33XX_TRANSITION_ST_MASK (1 << 8)
+
+/* Used by PRM_SRAM_COUNT */
+#define AM33XX_VSETUPCNT_VALUE_SHIFT 8
+#define AM33XX_VSETUPCNT_VALUE_MASK (0xff << 8)
+
+/* Used by PRM_RSTST */
+#define AM33XX_WDT0_RST_SHIFT 3
+#define AM33XX_WDT0_RST_MASK (1 << 3)
+
+/* Used by PRM_RSTST */
+#define AM33XX_WDT1_RST_SHIFT 4
+#define AM33XX_WDT1_RST_MASK (1 << 4)
+
+/* Used by RM_WKUP_RSTCTRL */
+#define AM33XX_WKUP_M3_LRST_SHIFT 3
+#define AM33XX_WKUP_M3_LRST_MASK (1 << 3)
+
+/* Renamed from WKUP_M3_LRST Used by RM_WKUP_RSTST */
+#define AM33XX_WKUP_M3_LRST_5_5_SHIFT 5
+#define AM33XX_WKUP_M3_LRST_5_5_MASK (1 << 5)
+
+#endif
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 21cb74003a56..a0309dea6794 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -302,11 +302,59 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
OMAP3_PRM_IRQENABLE_MPU_OFFSET);
}
+/**
+ * omap3xxx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
+ *
+ * Clear any previously-latched I/O wakeup events and ensure that the
+ * I/O wakeup gates are aligned with the current mux settings. Works
+ * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then
+ * deasserting WUCLKIN and clearing the ST_IO_CHAIN WKST bit. No
+ * return value.
+ */
+void omap3xxx_prm_reconfigure_io_chain(void)
+{
+ int i = 0;
+
+ omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+ PM_WKEN);
+
+ omap_test_timeout(omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST) &
+ OMAP3430_ST_IO_CHAIN_MASK,
+ MAX_IOPAD_LATCH_TIME, i);
+ if (i == MAX_IOPAD_LATCH_TIME)
+ pr_warn("PRM: I/O chain clock line assertion timed out\n");
+
+ omap2_prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
+ PM_WKEN);
+
+ omap2_prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK, WKUP_MOD,
+ PM_WKST);
+
+ omap2_prm_read_mod_reg(WKUP_MOD, PM_WKST);
+}
+
+/**
+ * omap3xxx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
+ *
+ * Activates the I/O wakeup event latches and allows events logged by
+ * those latches to signal a wakeup event to the PRCM. For I/O
+ * wakeups to occur, WAKEUPENABLE bits must be set in the pad mux
+ * registers, and omap3xxx_prm_reconfigure_io_chain() must be called.
+ * No return value.
+ */
+static void __init omap3xxx_prm_enable_io_wakeup(void)
+{
+ if (omap3_has_io_wakeup())
+ omap2_prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD,
+ PM_WKEN);
+}
+
static int __init omap3xxx_prcm_init(void)
{
int ret = 0;
if (cpu_is_omap34xx()) {
+ omap3xxx_prm_enable_io_wakeup();
ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
if (!ret)
irq_set_status_flags(omap_prcm_event_to_irq("io"),
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.h b/arch/arm/mach-omap2/prm2xxx_3xxx.h
index 70ac2a19dc5f..c19d249b4816 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.h
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.h
@@ -228,68 +228,6 @@
#ifndef __ASSEMBLER__
-/*
- * Stub omap2xxx/omap3xxx functions so that common files
- * continue to build when custom builds are used
- */
-#if defined(CONFIG_ARCH_OMAP4) && !(defined(CONFIG_ARCH_OMAP2) || \
- defined(CONFIG_ARCH_OMAP3))
-static inline u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
-}
-static inline u32 omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits,
- s16 module, s16 idx)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline u32 omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline u32 omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline u32 omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline int omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline int omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-static inline int omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift,
- u8 st_shift)
-{
- WARN(1, "prm: omap2xxx/omap3xxx specific function and "
- "not suppose to be used on omap4\n");
- return 0;
-}
-#else
/* Power/reset management domain register get/set */
extern u32 omap2_prm_read_mod_reg(s16 module, u16 idx);
extern void omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx);
@@ -315,15 +253,15 @@ extern u32 omap3_prm_vcvp_read(u8 offset);
extern void omap3_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
+extern void omap3xxx_prm_reconfigure_io_chain(void);
+
/* PRM interrupt-related functions */
extern void omap3xxx_prm_read_pending_irqs(unsigned long *events);
extern void omap3xxx_prm_ocp_barrier(void);
extern void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask);
extern void omap3xxx_prm_restore_irqen(u32 *saved_mask);
-#endif /* CONFIG_ARCH_OMAP4 */
-
-#endif
+#endif /* __ASSEMBLER */
/*
* Bits common to specific registers
diff --git a/arch/arm/mach-omap2/prm33xx.c b/arch/arm/mach-omap2/prm33xx.c
new file mode 100644
index 000000000000..e7dbb6cf1255
--- /dev/null
+++ b/arch/arm/mach-omap2/prm33xx.c
@@ -0,0 +1,135 @@
+/*
+ * AM33XX PRM functions
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/io.h>
+
+#include <plat/common.h>
+
+#include "common.h"
+#include "prm33xx.h"
+#include "prm-regbits-33xx.h"
+
+/* Read a register in a PRM instance */
+u32 am33xx_prm_read_reg(s16 inst, u16 idx)
+{
+ return __raw_readl(prm_base + inst + idx);
+}
+
+/* Write into a register in a PRM instance */
+void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx)
+{
+ __raw_writel(val, prm_base + inst + idx);
+}
+
+/* Read-modify-write a register in PRM. Caller must lock */
+u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(inst, idx);
+ v &= ~mask;
+ v |= bits;
+ am33xx_prm_write_reg(v, inst, idx);
+
+ return v;
+}
+
+/**
+ * am33xx_prm_is_hardreset_asserted - read the HW reset line state of
+ * submodules contained in the hwmod module
+ * @shift: register bit shift corresponding to the reset line to check
+ * @inst: CM instance register offset (*_INST macro)
+ * @rstctrl_offs: RM_RSTCTRL register address offset for this module
+ *
+ * Returns 1 if the (sub)module hardreset line is currently asserted,
+ * 0 if the (sub)module hardreset line is not currently asserted, or
+ * -EINVAL upon parameter error.
+ */
+int am33xx_prm_is_hardreset_asserted(u8 shift, s16 inst, u16 rstctrl_offs)
+{
+ u32 v;
+
+ v = am33xx_prm_read_reg(inst, rstctrl_offs);
+ v &= 1 << shift;
+ v >>= shift;
+
+ return v;
+}
+
+/**
+ * am33xx_prm_assert_hardreset - assert the HW reset line of a submodule
+ * @shift: register bit shift corresponding to the reset line to assert
+ * @inst: CM instance register offset (*_INST macro)
+ * @rstctrl_reg: RM_RSTCTRL register address for this module
+ *
+ * Some IPs like dsp, ipu or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * place the submodule into reset. Returns 0 upon success or -EINVAL
+ * upon an argument error.
+ */
+int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs)
+{
+ u32 mask = 1 << shift;
+
+ am33xx_prm_rmw_reg_bits(mask, mask, inst, rstctrl_offs);
+
+ return 0;
+}
+
+/**
+ * am33xx_prm_deassert_hardreset - deassert a submodule hardreset line and
+ * wait
+ * @shift: register bit shift corresponding to the reset line to deassert
+ * @inst: CM instance register offset (*_INST macro)
+ * @rstctrl_reg: RM_RSTCTRL register address for this module
+ * @rstst_reg: RM_RSTST register address for this module
+ *
+ * Some IPs like dsp, ipu or iva contain processors that require an HW
+ * reset line to be asserted / deasserted in order to fully enable the
+ * IP. These modules may have multiple hard-reset lines that reset
+ * different 'submodules' inside the IP block. This function will
+ * take the submodule out of reset and wait until the PRCM indicates
+ * that the reset has completed before returning. Returns 0 upon success or
+ * -EINVAL upon an argument error, -EEXIST if the submodule was already out
+ * of reset, or -EBUSY if the submodule did not exit reset promptly.
+ */
+int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+ u16 rstctrl_offs, u16 rstst_offs)
+{
+ int c;
+ u32 mask = 1 << shift;
+
+ /* Check the current status to avoid de-asserting the line twice */
+ if (am33xx_prm_is_hardreset_asserted(shift, inst, rstctrl_offs) == 0)
+ return -EEXIST;
+
+ /* Clear the reset status by writing 1 to the status bit */
+ am33xx_prm_rmw_reg_bits(0xffffffff, mask, inst, rstst_offs);
+ /* de-assert the reset control line */
+ am33xx_prm_rmw_reg_bits(mask, 0, inst, rstctrl_offs);
+ /* wait the status to be set */
+
+ omap_test_timeout(am33xx_prm_is_hardreset_asserted(shift, inst,
+ rstst_offs),
+ MAX_MODULE_HARDRESET_WAIT, c);
+
+ return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
+}
diff --git a/arch/arm/mach-omap2/prm33xx.h b/arch/arm/mach-omap2/prm33xx.h
new file mode 100644
index 000000000000..3f25c563a821
--- /dev/null
+++ b/arch/arm/mach-omap2/prm33xx.h
@@ -0,0 +1,129 @@
+/*
+ * AM33XX PRM instance offset macros
+ *
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_PRM33XX_H
+#define __ARCH_ARM_MACH_OMAP2_PRM33XX_H
+
+#include "prcm-common.h"
+#include "prm.h"
+
+#define AM33XX_PRM_BASE 0x44E00000
+
+#define AM33XX_PRM_REGADDR(inst, reg) \
+ AM33XX_L4_WK_IO_ADDRESS(AM33XX_PRM_BASE + (inst) + (reg))
+
+
+/* PRM instances */
+#define AM33XX_PRM_OCP_SOCKET_MOD 0x0B00
+#define AM33XX_PRM_PER_MOD 0x0C00
+#define AM33XX_PRM_WKUP_MOD 0x0D00
+#define AM33XX_PRM_MPU_MOD 0x0E00
+#define AM33XX_PRM_DEVICE_MOD 0x0F00
+#define AM33XX_PRM_RTC_MOD 0x1000
+#define AM33XX_PRM_GFX_MOD 0x1100
+#define AM33XX_PRM_CEFUSE_MOD 0x1200
+
+/* PRM */
+
+/* PRM.OCP_SOCKET_PRM register offsets */
+#define AM33XX_REVISION_PRM_OFFSET 0x0000
+#define AM33XX_REVISION_PRM AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0000)
+#define AM33XX_PRM_IRQSTATUS_MPU_OFFSET 0x0004
+#define AM33XX_PRM_IRQSTATUS_MPU AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0004)
+#define AM33XX_PRM_IRQENABLE_MPU_OFFSET 0x0008
+#define AM33XX_PRM_IRQENABLE_MPU AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0008)
+#define AM33XX_PRM_IRQSTATUS_M3_OFFSET 0x000c
+#define AM33XX_PRM_IRQSTATUS_M3 AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x000c)
+#define AM33XX_PRM_IRQENABLE_M3_OFFSET 0x0010
+#define AM33XX_PRM_IRQENABLE_M3 AM33XX_PRM_REGADDR(AM33XX_PRM_OCP_SOCKET_MOD, 0x0010)
+
+/* PRM.PER_PRM register offsets */
+#define AM33XX_RM_PER_RSTCTRL_OFFSET 0x0000
+#define AM33XX_RM_PER_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0000)
+#define AM33XX_RM_PER_RSTST_OFFSET 0x0004
+#define AM33XX_RM_PER_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0004)
+#define AM33XX_PM_PER_PWRSTST_OFFSET 0x0008
+#define AM33XX_PM_PER_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x0008)
+#define AM33XX_PM_PER_PWRSTCTRL_OFFSET 0x000c
+#define AM33XX_PM_PER_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_PER_MOD, 0x000c)
+
+/* PRM.WKUP_PRM register offsets */
+#define AM33XX_RM_WKUP_RSTCTRL_OFFSET 0x0000
+#define AM33XX_RM_WKUP_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0000)
+#define AM33XX_PM_WKUP_PWRSTCTRL_OFFSET 0x0004
+#define AM33XX_PM_WKUP_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0004)
+#define AM33XX_PM_WKUP_PWRSTST_OFFSET 0x0008
+#define AM33XX_PM_WKUP_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x0008)
+#define AM33XX_RM_WKUP_RSTST_OFFSET 0x000c
+#define AM33XX_RM_WKUP_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_WKUP_MOD, 0x000c)
+
+/* PRM.MPU_PRM register offsets */
+#define AM33XX_PM_MPU_PWRSTCTRL_OFFSET 0x0000
+#define AM33XX_PM_MPU_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0000)
+#define AM33XX_PM_MPU_PWRSTST_OFFSET 0x0004
+#define AM33XX_PM_MPU_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0004)
+#define AM33XX_RM_MPU_RSTST_OFFSET 0x0008
+#define AM33XX_RM_MPU_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_MPU_MOD, 0x0008)
+
+/* PRM.DEVICE_PRM register offsets */
+#define AM33XX_PRM_RSTCTRL_OFFSET 0x0000
+#define AM33XX_PRM_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0000)
+#define AM33XX_PRM_RSTTIME_OFFSET 0x0004
+#define AM33XX_PRM_RSTTIME AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0004)
+#define AM33XX_PRM_RSTST_OFFSET 0x0008
+#define AM33XX_PRM_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0008)
+#define AM33XX_PRM_SRAM_COUNT_OFFSET 0x000c
+#define AM33XX_PRM_SRAM_COUNT AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x000c)
+#define AM33XX_PRM_LDO_SRAM_CORE_SETUP_OFFSET 0x0010
+#define AM33XX_PRM_LDO_SRAM_CORE_SETUP AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0010)
+#define AM33XX_PRM_LDO_SRAM_CORE_CTRL_OFFSET 0x0014
+#define AM33XX_PRM_LDO_SRAM_CORE_CTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0014)
+#define AM33XX_PRM_LDO_SRAM_MPU_SETUP_OFFSET 0x0018
+#define AM33XX_PRM_LDO_SRAM_MPU_SETUP AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x0018)
+#define AM33XX_PRM_LDO_SRAM_MPU_CTRL_OFFSET 0x001c
+#define AM33XX_PRM_LDO_SRAM_MPU_CTRL AM33XX_PRM_REGADDR(AM33XX_PRM_DEVICE_MOD, 0x001c)
+
+/* PRM.RTC_PRM register offsets */
+#define AM33XX_PM_RTC_PWRSTCTRL_OFFSET 0x0000
+#define AM33XX_PM_RTC_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_RTC_MOD, 0x0000)
+#define AM33XX_PM_RTC_PWRSTST_OFFSET 0x0004
+#define AM33XX_PM_RTC_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_RTC_MOD, 0x0004)
+
+/* PRM.GFX_PRM register offsets */
+#define AM33XX_PM_GFX_PWRSTCTRL_OFFSET 0x0000
+#define AM33XX_PM_GFX_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0000)
+#define AM33XX_RM_GFX_RSTCTRL_OFFSET 0x0004
+#define AM33XX_RM_GFX_RSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0004)
+#define AM33XX_PM_GFX_PWRSTST_OFFSET 0x0010
+#define AM33XX_PM_GFX_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0010)
+#define AM33XX_RM_GFX_RSTST_OFFSET 0x0014
+#define AM33XX_RM_GFX_RSTST AM33XX_PRM_REGADDR(AM33XX_PRM_GFX_MOD, 0x0014)
+
+/* PRM.CEFUSE_PRM register offsets */
+#define AM33XX_PM_CEFUSE_PWRSTCTRL_OFFSET 0x0000
+#define AM33XX_PM_CEFUSE_PWRSTCTRL AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0000)
+#define AM33XX_PM_CEFUSE_PWRSTST_OFFSET 0x0004
+#define AM33XX_PM_CEFUSE_PWRSTST AM33XX_PRM_REGADDR(AM33XX_PRM_CEFUSE_MOD, 0x0004)
+
+extern u32 am33xx_prm_read_reg(s16 inst, u16 idx);
+extern void am33xx_prm_write_reg(u32 val, s16 inst, u16 idx);
+extern u32 am33xx_prm_rmw_reg_bits(u32 mask, u32 bits, s16 inst, s16 idx);
+extern void am33xx_prm_global_warm_sw_reset(void);
+extern int am33xx_prm_is_hardreset_asserted(u8 shift, s16 inst,
+ u16 rstctrl_offs);
+extern int am33xx_prm_assert_hardreset(u8 shift, s16 inst, u16 rstctrl_offs);
+extern int am33xx_prm_deassert_hardreset(u8 shift, s16 inst,
+ u16 rstctrl_offs, u16 rstst_offs);
+#endif
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index f106d21ff581..bb727c2d9337 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -233,10 +233,71 @@ void omap44xx_prm_restore_irqen(u32 *saved_mask)
OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
}
+/**
+ * omap44xx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain
+ *
+ * Clear any previously-latched I/O wakeup events and ensure that the
+ * I/O wakeup gates are aligned with the current mux settings. Works
+ * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then
+ * deasserting WUCLKIN and waiting for WUCLKOUT to be deasserted.
+ * No return value. XXX Are the final two steps necessary?
+ */
+void omap44xx_prm_reconfigure_io_chain(void)
+{
+ int i = 0;
+
+ /* Trigger WUCLKIN enable */
+ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK,
+ OMAP4430_WUCLK_CTRL_MASK,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET);
+ omap_test_timeout(
+ (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET) &
+ OMAP4430_WUCLK_STATUS_MASK) >>
+ OMAP4430_WUCLK_STATUS_SHIFT) == 1),
+ MAX_IOPAD_LATCH_TIME, i);
+ if (i == MAX_IOPAD_LATCH_TIME)
+ pr_warn("PRM: I/O chain clock line assertion timed out\n");
+
+ /* Trigger WUCLKIN disable */
+ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET);
+ omap_test_timeout(
+ (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET) &
+ OMAP4430_WUCLK_STATUS_MASK) >>
+ OMAP4430_WUCLK_STATUS_SHIFT) == 0),
+ MAX_IOPAD_LATCH_TIME, i);
+ if (i == MAX_IOPAD_LATCH_TIME)
+ pr_warn("PRM: I/O chain clock line deassertion timed out\n");
+
+ return;
+}
+
+/**
+ * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches
+ *
+ * Activates the I/O wakeup event latches and allows events logged by
+ * those latches to signal a wakeup event to the PRCM. For I/O wakeups
+ * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
+ * omap44xx_prm_reconfigure_io_chain() must be called. No return value.
+ */
+static void __init omap44xx_prm_enable_io_wakeup(void)
+{
+ omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK,
+ OMAP4430_GLOBAL_WUEN_MASK,
+ OMAP4430_PRM_DEVICE_INST,
+ OMAP4_PRM_IO_PMCTRL_OFFSET);
+}
+
static int __init omap4xxx_prcm_init(void)
{
- if (cpu_is_omap44xx())
+ if (cpu_is_omap44xx()) {
+ omap44xx_prm_enable_io_wakeup();
return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
+ }
return 0;
}
subsys_initcall(omap4xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/prm44xx.h b/arch/arm/mach-omap2/prm44xx.h
index 7978092946db..ee72ae6bd8c9 100644
--- a/arch/arm/mach-omap2/prm44xx.h
+++ b/arch/arm/mach-omap2/prm44xx.h
@@ -763,6 +763,8 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
extern void omap4_prm_vcvp_write(u32 val, u8 offset);
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
+extern void omap44xx_prm_reconfigure_io_chain(void);
+
/* PRM interrupt-related functions */
extern void omap44xx_prm_read_pending_irqs(unsigned long *events);
extern void omap44xx_prm_ocp_barrier(void);
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index dfe00ddb5c60..03b126d9ad94 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -85,7 +85,7 @@ static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int virtirq;
- int nr_irqs = prcm_irq_setup->nr_regs * 32;
+ int nr_irq = prcm_irq_setup->nr_regs * 32;
/*
* If we are suspended, mask all interrupts from PRCM level,
@@ -110,7 +110,7 @@ static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
prcm_irq_setup->read_pending_irqs(pending);
/* No bit set, then all IRQs are handled */
- if (find_first_bit(pending, nr_irqs) >= nr_irqs)
+ if (find_first_bit(pending, nr_irq) >= nr_irq)
break;
omap_prcm_events_filter_priority(pending, priority_pending);
@@ -121,11 +121,11 @@ static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
*/
/* Serve priority events first */
- for_each_set_bit(virtirq, priority_pending, nr_irqs)
+ for_each_set_bit(virtirq, priority_pending, nr_irq)
generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
/* Serve normal events next */
- for_each_set_bit(virtirq, pending, nr_irqs)
+ for_each_set_bit(virtirq, pending, nr_irq)
generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
}
if (chip->irq_ack)
@@ -319,3 +319,65 @@ err:
omap_prcm_irq_cleanup();
return -ENOMEM;
}
+
+/*
+ * Stubbed functions so that common files continue to build when
+ * custom builds are used
+ * XXX These are temporary and should be removed at the earliest possible
+ * opportunity
+ */
+u32 __weak omap2_prm_read_mod_reg(s16 module, u16 idx)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+void __weak omap2_prm_write_mod_reg(u32 val, s16 module, u16 idx)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+}
+
+u32 __weak omap2_prm_rmw_mod_reg_bits(u32 mask, u32 bits,
+ s16 module, s16 idx)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+u32 __weak omap2_prm_set_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+u32 __weak omap2_prm_clear_mod_reg_bits(u32 bits, s16 module, s16 idx)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+u32 __weak omap2_prm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+int __weak omap2_prm_is_hardreset_asserted(s16 prm_mod, u8 shift)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+int __weak omap2_prm_assert_hardreset(s16 prm_mod, u8 shift)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
+int __weak omap2_prm_deassert_hardreset(s16 prm_mod, u8 rst_shift,
+ u8 st_shift)
+{
+ WARN(1, "prm: omap2xxx/omap3xxx specific function called on non-omap2xxx/3xxx\n");
+ return 0;
+}
+
diff --git a/arch/arm/mach-omap2/smartreflex-class3.c b/arch/arm/mach-omap2/smartreflex-class3.c
index 955566eefac4..1da8f03c479e 100644
--- a/arch/arm/mach-omap2/smartreflex-class3.c
+++ b/arch/arm/mach-omap2/smartreflex-class3.c
@@ -11,36 +11,37 @@
* published by the Free Software Foundation.
*/
-#include "smartreflex.h"
+#include <linux/power/smartreflex.h>
+#include "voltage.h"
-static int sr_class3_enable(struct voltagedomain *voltdm)
+static int sr_class3_enable(struct omap_sr *sr)
{
- unsigned long volt = voltdm_get_voltage(voltdm);
+ unsigned long volt = voltdm_get_voltage(sr->voltdm);
if (!volt) {
- pr_warning("%s: Curr voltage unknown. Cannot enable sr_%s\n",
- __func__, voltdm->name);
+ pr_warning("%s: Curr voltage unknown. Cannot enable %s\n",
+ __func__, sr->name);
return -ENODATA;
}
- omap_vp_enable(voltdm);
- return sr_enable(voltdm, volt);
+ omap_vp_enable(sr->voltdm);
+ return sr_enable(sr->voltdm, volt);
}
-static int sr_class3_disable(struct voltagedomain *voltdm, int is_volt_reset)
+static int sr_class3_disable(struct omap_sr *sr, int is_volt_reset)
{
- sr_disable_errgen(voltdm);
- omap_vp_disable(voltdm);
- sr_disable(voltdm);
+ sr_disable_errgen(sr->voltdm);
+ omap_vp_disable(sr->voltdm);
+ sr_disable(sr->voltdm);
if (is_volt_reset)
- voltdm_reset(voltdm);
+ voltdm_reset(sr->voltdm);
return 0;
}
-static int sr_class3_configure(struct voltagedomain *voltdm)
+static int sr_class3_configure(struct omap_sr *sr)
{
- return sr_configure_errgen(voltdm);
+ return sr_configure_errgen(sr->voltdm);
}
/* SR class3 structure */
diff --git a/arch/arm/mach-omap2/sr_device.c b/arch/arm/mach-omap2/sr_device.c
index a503e1e8358c..d033a65f4e4e 100644
--- a/arch/arm/mach-omap2/sr_device.c
+++ b/arch/arm/mach-omap2/sr_device.c
@@ -17,6 +17,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/power/smartreflex.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -24,7 +25,6 @@
#include <plat/omap_device.h>
-#include "smartreflex.h"
#include "voltage.h"
#include "control.h"
#include "pm.h"
@@ -36,7 +36,10 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
struct omap_sr_data *sr_data)
{
struct omap_sr_nvalue_table *nvalue_table;
- int i, count = 0;
+ int i, j, count = 0;
+
+ sr_data->nvalue_count = 0;
+ sr_data->nvalue_table = NULL;
while (volt_data[count].volt_nominal)
count++;
@@ -44,8 +47,14 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
nvalue_table = kzalloc(sizeof(struct omap_sr_nvalue_table)*count,
GFP_KERNEL);
- for (i = 0; i < count; i++) {
+ if (!nvalue_table) {
+ pr_err("OMAP: SmartReflex: cannot allocate memory for n-value table\n");
+ return;
+ }
+
+ for (i = 0, j = 0; i < count; i++) {
u32 v;
+
/*
* In OMAP4 the efuse registers are 24 bit aligned.
* A __raw_readl will fail for non-32 bit aligned address
@@ -58,15 +67,30 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
omap_ctrl_readb(offset + 1) << 8 |
omap_ctrl_readb(offset + 2) << 16;
} else {
- v = omap_ctrl_readl(volt_data[i].sr_efuse_offs);
+ v = omap_ctrl_readl(volt_data[i].sr_efuse_offs);
}
- nvalue_table[i].efuse_offs = volt_data[i].sr_efuse_offs;
- nvalue_table[i].nvalue = v;
+ /*
+ * Many OMAP SoCs don't have the eFuse values set.
+ * For example, pretty much all OMAP3xxx before
+ * ES3.something.
+ *
+ * XXX There needs to be some way for board files or
+ * userspace to add these in.
+ */
+ if (v == 0)
+ continue;
+
+ nvalue_table[j].nvalue = v;
+ nvalue_table[j].efuse_offs = volt_data[i].sr_efuse_offs;
+ nvalue_table[j].errminlimit = volt_data[i].sr_errminlimit;
+ nvalue_table[j].volt_nominal = volt_data[i].volt_nominal;
+
+ j++;
}
sr_data->nvalue_table = nvalue_table;
- sr_data->nvalue_count = count;
+ sr_data->nvalue_count = j;
}
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
@@ -93,6 +117,7 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
goto exit;
}
+ sr_data->name = oh->name;
sr_data->ip_type = oh->class->rev;
sr_data->senn_mod = 0x1;
sr_data->senp_mod = 0x1;
@@ -106,7 +131,7 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
omap_voltage_get_volttable(sr_data->voltdm, &volt_data);
if (!volt_data) {
- pr_warning("%s: No Voltage table registerd fo VDD%d."
+ pr_warning("%s: No Voltage table registered fo VDD%d."
"Something really wrong\n\n", __func__, i + 1);
goto exit;
}
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 840929bd9dae..13d20c8a283d 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -69,11 +69,6 @@
#define OMAP3_SECURE_TIMER 1
#endif
-/* MAX_GPTIMER_ID: number of GPTIMERs on the chip */
-#define MAX_GPTIMER_ID 12
-
-static u32 sys_timer_reserved;
-
/* Clockevent code */
static struct omap_dm_timer clkev;
@@ -173,14 +168,14 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
return -ENXIO;
/* After the dmtimer is using hwmod these clocks won't be needed */
- sprintf(name, "gpt%d_fck", gptimer_id);
- timer->fclk = clk_get(NULL, name);
+ timer->fclk = clk_get(NULL, omap_hwmod_get_main_clk(oh));
if (IS_ERR(timer->fclk))
return -ENODEV;
omap_hwmod_enable(oh);
- sys_timer_reserved |= (1 << (gptimer_id - 1));
+ if (omap_dm_timer_reserve_systimer(gptimer_id))
+ return -ENODEV;
if (gptimer_id != 12) {
struct clk *src;
@@ -368,6 +363,11 @@ OMAP_SYS_TIMER_INIT(3_secure, OMAP3_SECURE_TIMER, OMAP3_CLKEV_SOURCE,
OMAP_SYS_TIMER(3_secure)
#endif
+#ifdef CONFIG_SOC_AM33XX
+OMAP_SYS_TIMER_INIT(3_am33xx, 1, OMAP4_MPU_SOURCE, 2, OMAP4_MPU_SOURCE)
+OMAP_SYS_TIMER(3_am33xx)
+#endif
+
#ifdef CONFIG_ARCH_OMAP4
#ifdef CONFIG_LOCAL_TIMERS
static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
@@ -393,65 +393,10 @@ static void __init omap4_timer_init(void)
OMAP_SYS_TIMER(4)
#endif
-/**
- * omap2_dm_timer_set_src - change the timer input clock source
- * @pdev: timer platform device pointer
- * @source: array index of parent clock source
- */
-static int omap2_dm_timer_set_src(struct platform_device *pdev, int source)
-{
- int ret;
- struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
- struct clk *fclk, *parent;
- char *parent_name = NULL;
-
- fclk = clk_get(&pdev->dev, "fck");
- if (IS_ERR_OR_NULL(fclk)) {
- dev_err(&pdev->dev, "%s: %d: clk_get() FAILED\n",
- __func__, __LINE__);
- return -EINVAL;
- }
-
- switch (source) {
- case OMAP_TIMER_SRC_SYS_CLK:
- parent_name = "sys_ck";
- break;
-
- case OMAP_TIMER_SRC_32_KHZ:
- parent_name = "32k_ck";
- break;
-
- case OMAP_TIMER_SRC_EXT_CLK:
- if (pdata->timer_ip_version == OMAP_TIMER_IP_VERSION_1) {
- parent_name = "alt_ck";
- break;
- }
- dev_err(&pdev->dev, "%s: %d: invalid clk src.\n",
- __func__, __LINE__);
- clk_put(fclk);
- return -EINVAL;
- }
-
- parent = clk_get(&pdev->dev, parent_name);
- if (IS_ERR_OR_NULL(parent)) {
- dev_err(&pdev->dev, "%s: %d: clk_get() %s FAILED\n",
- __func__, __LINE__, parent_name);
- clk_put(fclk);
- return -EINVAL;
- }
-
- ret = clk_set_parent(fclk, parent);
- if (IS_ERR_VALUE(ret)) {
- dev_err(&pdev->dev, "%s: clk_set_parent() to %s FAILED\n",
- __func__, parent_name);
- ret = -EINVAL;
- }
-
- clk_put(parent);
- clk_put(fclk);
-
- return ret;
-}
+#ifdef CONFIG_SOC_OMAP5
+OMAP_SYS_TIMER_INIT(5, 1, OMAP4_CLKEV_SOURCE, 2, OMAP4_MPU_SOURCE)
+OMAP_SYS_TIMER(5)
+#endif
/**
* omap_timer_init - build and register timer device with an
@@ -473,7 +418,6 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
struct dmtimer_platform_data *pdata;
struct platform_device *pdev;
struct omap_timer_capability_dev_attr *timer_dev_attr;
- struct powerdomain *pwrdm;
pr_debug("%s: %s\n", __func__, oh->name);
@@ -501,18 +445,9 @@ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused)
*/
sscanf(oh->name, "timer%2d", &id);
- pdata->set_timer_src = omap2_dm_timer_set_src;
- pdata->timer_ip_version = oh->class->rev;
+ if (timer_dev_attr)
+ pdata->timer_capability = timer_dev_attr->timer_capability;
- /* Mark clocksource and clockevent timers as reserved */
- if ((sys_timer_reserved >> (id - 1)) & 0x1)
- pdata->reserved = 1;
-
- pwrdm = omap_hwmod_get_pwrdm(oh);
- pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm);
-#ifdef CONFIG_PM
- pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count;
-#endif
pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata),
NULL, 0, 0);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 119d5a910f3a..de47f170ba50 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -32,6 +32,7 @@
#include "twl-common.h"
#include "pm.h"
#include "voltage.h"
+#include "mux.h"
static struct i2c_board_info __initdata pmic_i2c_board_info = {
.addr = 0x48,
@@ -48,6 +49,7 @@ static struct i2c_board_info __initdata omap4_i2c1_board_info[] = {
},
};
+#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
static int twl_set_voltage(void *data, int target_uV)
{
struct voltagedomain *voltdm = (struct voltagedomain *)data;
@@ -59,6 +61,7 @@ static int twl_get_voltage(void *data)
struct voltagedomain *voltdm = (struct voltagedomain *)data;
return voltdm_get_voltage(voltdm);
}
+#endif
void __init omap_pmic_init(int bus, u32 clkrate,
const char *pmic_type, int pmic_irq,
@@ -77,6 +80,7 @@ void __init omap4_pmic_init(const char *pmic_type,
struct twl6040_platform_data *twl6040_data, int twl6040_irq)
{
/* PMIC part*/
+ omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
strncpy(omap4_i2c1_board_info[0].type, pmic_type,
sizeof(omap4_i2c1_board_info[0].type));
omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N;
@@ -92,7 +96,7 @@ void __init omap4_pmic_init(const char *pmic_type,
void __init omap_pmic_late_init(void)
{
- /* Init the OMAP TWL parameters (if PMIC has been registerd) */
+ /* Init the OMAP TWL parameters (if PMIC has been registered) */
if (pmic_i2c_board_info.irq)
omap3_twl_init();
if (omap4_i2c1_board_info[0].irq)
@@ -211,10 +215,6 @@ static struct twl_regulator_driver_data omap3_vdd2_drvdata = {
void __init omap3_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
- if (!pmic_data->irq_base)
- pmic_data->irq_base = TWL4030_IRQ_BASE;
- if (!pmic_data->irq_end)
- pmic_data->irq_end = TWL4030_IRQ_END;
if (!pmic_data->vdd1) {
omap3_vdd1.driver_data = &omap3_vdd1_drvdata;
omap3_vdd1_drvdata.data = voltdm_lookup("mpu_iva");
@@ -479,11 +479,6 @@ static struct regulator_init_data omap4_v2v1_idata = {
void __init omap4_pmic_get_config(struct twl4030_platform_data *pmic_data,
u32 pdata_flags, u32 regulators_flags)
{
- if (!pmic_data->irq_base)
- pmic_data->irq_base = TWL6030_IRQ_BASE;
- if (!pmic_data->irq_end)
- pmic_data->irq_end = TWL6030_IRQ_END;
-
if (!pmic_data->vdd1) {
omap4_vdd1.driver_data = &omap4_vdd1_drvdata;
omap4_vdd1_drvdata.data = voltdm_lookup("mpu");
diff --git a/arch/arm/mach-omap2/usb-fs.c b/arch/arm/mach-omap2/usb-fs.c
deleted file mode 100644
index 1481078763b8..000000000000
--- a/arch/arm/mach-omap2/usb-fs.c
+++ /dev/null
@@ -1,359 +0,0 @@
-/*
- * Platform level USB initialization for FS USB OTG controller on omap1 and 24xx
- *
- * Copyright (C) 2004 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <asm/irq.h>
-
-#include <plat/usb.h>
-#include <plat/board.h>
-
-#include "control.h"
-#include "mux.h"
-
-#define INT_USB_IRQ_GEN INT_24XX_USB_IRQ_GEN
-#define INT_USB_IRQ_NISO INT_24XX_USB_IRQ_NISO
-#define INT_USB_IRQ_ISO INT_24XX_USB_IRQ_ISO
-#define INT_USB_IRQ_HGEN INT_24XX_USB_IRQ_HGEN
-#define INT_USB_IRQ_OTG INT_24XX_USB_IRQ_OTG
-
-#if defined(CONFIG_ARCH_OMAP2)
-
-#ifdef CONFIG_USB_GADGET_OMAP
-
-static struct resource udc_resources[] = {
- /* order is significant! */
- { /* registers */
- .start = UDC_BASE,
- .end = UDC_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- }, { /* general IRQ */
- .start = INT_USB_IRQ_GEN,
- .flags = IORESOURCE_IRQ,
- }, { /* PIO IRQ */
- .start = INT_USB_IRQ_NISO,
- .flags = IORESOURCE_IRQ,
- }, { /* SOF IRQ */
- .start = INT_USB_IRQ_ISO,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static u64 udc_dmamask = ~(u32)0;
-
-static struct platform_device udc_device = {
- .name = "omap_udc",
- .id = -1,
- .dev = {
- .dma_mask = &udc_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(udc_resources),
- .resource = udc_resources,
-};
-
-static inline void udc_device_init(struct omap_usb_config *pdata)
-{
- pdata->udc_device = &udc_device;
-}
-
-#else
-
-static inline void udc_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
-
-/* The dmamask must be set for OHCI to work */
-static u64 ohci_dmamask = ~(u32)0;
-
-static struct resource ohci_resources[] = {
- {
- .start = OMAP_OHCI_BASE,
- .end = OMAP_OHCI_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_IRQ_HGEN,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device ohci_device = {
- .name = "ohci",
- .id = -1,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = 0xffffffff,
- },
- .num_resources = ARRAY_SIZE(ohci_resources),
- .resource = ohci_resources,
-};
-
-static inline void ohci_device_init(struct omap_usb_config *pdata)
-{
- pdata->ohci_device = &ohci_device;
-}
-
-#else
-
-static inline void ohci_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
-#if defined(CONFIG_USB_OTG) && defined(CONFIG_ARCH_OMAP_OTG)
-
-static struct resource otg_resources[] = {
- /* order is significant! */
- {
- .start = OTG_BASE,
- .end = OTG_BASE + 0xff,
- .flags = IORESOURCE_MEM,
- }, {
- .start = INT_USB_IRQ_OTG,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device otg_device = {
- .name = "omap_otg",
- .id = -1,
- .num_resources = ARRAY_SIZE(otg_resources),
- .resource = otg_resources,
-};
-
-static inline void otg_device_init(struct omap_usb_config *pdata)
-{
- pdata->otg_device = &otg_device;
-}
-
-#else
-
-static inline void otg_device_init(struct omap_usb_config *pdata)
-{
-}
-
-#endif
-
-static void omap2_usb_devconf_clear(u8 port, u32 mask)
-{
- u32 r;
-
- r = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- r &= ~USBTXWRMODEI(port, mask);
- omap_ctrl_writel(r, OMAP2_CONTROL_DEVCONF0);
-}
-
-static void omap2_usb_devconf_set(u8 port, u32 mask)
-{
- u32 r;
-
- r = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- r |= USBTXWRMODEI(port, mask);
- omap_ctrl_writel(r, OMAP2_CONTROL_DEVCONF0);
-}
-
-static void omap2_usb2_disable_5pinbitll(void)
-{
- u32 r;
-
- r = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- r &= ~(USBTXWRMODEI(2, USB_BIDIR_TLL) | USBT2TLL5PI);
- omap_ctrl_writel(r, OMAP2_CONTROL_DEVCONF0);
-}
-
-static void omap2_usb2_enable_5pinunitll(void)
-{
- u32 r;
-
- r = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
- r |= USBTXWRMODEI(2, USB_UNIDIR_TLL) | USBT2TLL5PI;
- omap_ctrl_writel(r, OMAP2_CONTROL_DEVCONF0);
-}
-
-static u32 __init omap2_usb0_init(unsigned nwires, unsigned is_device)
-{
- u32 syscon1 = 0;
-
- omap2_usb_devconf_clear(0, USB_BIDIR_TLL);
-
- if (nwires == 0)
- return 0;
-
- if (is_device)
- omap_mux_init_signal("usb0_puen", 0);
-
- omap_mux_init_signal("usb0_dat", 0);
- omap_mux_init_signal("usb0_txen", 0);
- omap_mux_init_signal("usb0_se0", 0);
- if (nwires != 3)
- omap_mux_init_signal("usb0_rcv", 0);
-
- switch (nwires) {
- case 3:
- syscon1 = 2;
- omap2_usb_devconf_set(0, USB_BIDIR);
- break;
- case 4:
- syscon1 = 1;
- omap2_usb_devconf_set(0, USB_BIDIR);
- break;
- case 6:
- syscon1 = 3;
- omap_mux_init_signal("usb0_vp", 0);
- omap_mux_init_signal("usb0_vm", 0);
- omap2_usb_devconf_set(0, USB_UNIDIR);
- break;
- default:
- printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
- 0, nwires);
- }
-
- return syscon1 << 16;
-}
-
-static u32 __init omap2_usb1_init(unsigned nwires)
-{
- u32 syscon1 = 0;
-
- omap2_usb_devconf_clear(1, USB_BIDIR_TLL);
-
- if (nwires == 0)
- return 0;
-
- /* NOTE: board-specific code must set up pin muxing for usb1,
- * since each signal could come out on either of two balls.
- */
-
- switch (nwires) {
- case 2:
- /* NOTE: board-specific code must override this setting if
- * this TLL link is not using DP/DM
- */
- syscon1 = 1;
- omap2_usb_devconf_set(1, USB_BIDIR_TLL);
- break;
- case 3:
- syscon1 = 2;
- omap2_usb_devconf_set(1, USB_BIDIR);
- break;
- case 4:
- syscon1 = 1;
- omap2_usb_devconf_set(1, USB_BIDIR);
- break;
- case 6:
- default:
- printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
- 1, nwires);
- }
-
- return syscon1 << 20;
-}
-
-static u32 __init omap2_usb2_init(unsigned nwires, unsigned alt_pingroup)
-{
- u32 syscon1 = 0;
-
- omap2_usb2_disable_5pinbitll();
- alt_pingroup = 0;
-
- /* NOTE omap1 erratum: must leave USB2_UNI_R set if usb0 in use */
- if (alt_pingroup || nwires == 0)
- return 0;
-
- omap_mux_init_signal("usb2_dat", 0);
- omap_mux_init_signal("usb2_se0", 0);
- if (nwires > 2)
- omap_mux_init_signal("usb2_txen", 0);
- if (nwires > 3)
- omap_mux_init_signal("usb2_rcv", 0);
-
- switch (nwires) {
- case 2:
- /* NOTE: board-specific code must override this setting if
- * this TLL link is not using DP/DM
- */
- syscon1 = 1;
- omap2_usb_devconf_set(2, USB_BIDIR_TLL);
- break;
- case 3:
- syscon1 = 2;
- omap2_usb_devconf_set(2, USB_BIDIR);
- break;
- case 4:
- syscon1 = 1;
- omap2_usb_devconf_set(2, USB_BIDIR);
- break;
- case 5:
- /* NOTE: board-specific code must mux this setting depending
- * on TLL link using DP/DM. Something must also
- * set up OTG_SYSCON2.HMC_TLL{ATTACH,SPEED}
- * 2420: hdq_sio.usb2_tllse0 or vlynq_rx0.usb2_tllse0
- * 2430: hdq_sio.usb2_tllse0 or sdmmc2_dat0.usb2_tllse0
- */
-
- syscon1 = 3;
- omap2_usb2_enable_5pinunitll();
- break;
- case 6:
- default:
- printk(KERN_ERR "illegal usb%d %d-wire transceiver\n",
- 2, nwires);
- }
-
- return syscon1 << 24;
-}
-
-void __init omap2_usbfs_init(struct omap_usb_config *pdata)
-{
- struct clk *ick;
-
- if (!cpu_is_omap24xx())
- return;
-
- ick = clk_get(NULL, "usb_l4_ick");
- if (IS_ERR(ick))
- return;
-
- clk_enable(ick);
- pdata->usb0_init = omap2_usb0_init;
- pdata->usb1_init = omap2_usb1_init;
- pdata->usb2_init = omap2_usb2_init;
- udc_device_init(pdata);
- ohci_device_init(pdata);
- otg_device_init(pdata);
- omap_otg_init(pdata);
- clk_disable(ick);
- clk_put(ick);
-}
-
-#endif
diff --git a/arch/arm/mach-omap2/voltage.h b/arch/arm/mach-omap2/voltage.h
index 16a1b092cf36..0ac2caf15941 100644
--- a/arch/arm/mach-omap2/voltage.h
+++ b/arch/arm/mach-omap2/voltage.h
@@ -16,6 +16,8 @@
#include <linux/err.h>
+#include <plat/voltage.h>
+
#include "vc.h"
#include "vp.h"
@@ -91,25 +93,6 @@ struct voltagedomain {
};
/**
- * struct omap_volt_data - Omap voltage specific data.
- * @voltage_nominal: The possible voltage value in uV
- * @sr_efuse_offs: The offset of the efuse register(from system
- * control module base address) from where to read
- * the n-target value for the smartreflex module.
- * @sr_errminlimit: Error min limit value for smartreflex. This value
- * differs at differnet opp and thus is linked
- * with voltage.
- * @vp_errorgain: Error gain value for the voltage processor. This
- * field also differs according to the voltage/opp.
- */
-struct omap_volt_data {
- u32 volt_nominal;
- u32 sr_efuse_offs;
- u8 sr_errminlimit;
- u8 vp_errgain;
-};
-
-/**
* struct omap_voltdm_pmic - PMIC specific data required by voltage driver.
* @slew_rate: PMIC slew rate (in uv/us)
* @step_size: PMIC voltage step size (in uv)
@@ -156,6 +139,7 @@ int omap_voltage_late_init(void);
extern void omap2xxx_voltagedomains_init(void);
extern void omap3xxx_voltagedomains_init(void);
+extern void am33xx_voltagedomains_init(void);
extern void omap44xx_voltagedomains_init(void);
struct voltagedomain *voltdm_lookup(const char *name);
diff --git a/arch/arm/mach-omap2/voltagedomains33xx_data.c b/arch/arm/mach-omap2/voltagedomains33xx_data.c
new file mode 100644
index 000000000000..965458dc0cb9
--- /dev/null
+++ b/arch/arm/mach-omap2/voltagedomains33xx_data.c
@@ -0,0 +1,43 @@
+/*
+ * AM33XX voltage domain data
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include "voltage.h"
+
+static struct voltagedomain am33xx_voltdm_mpu = {
+ .name = "mpu",
+};
+
+static struct voltagedomain am33xx_voltdm_core = {
+ .name = "core",
+};
+
+static struct voltagedomain am33xx_voltdm_rtc = {
+ .name = "rtc",
+};
+
+static struct voltagedomain *voltagedomains_am33xx[] __initdata = {
+ &am33xx_voltdm_mpu,
+ &am33xx_voltdm_core,
+ &am33xx_voltdm_rtc,
+ NULL,
+};
+
+void __init am33xx_voltagedomains_init(void)
+{
+ voltdm_init(voltagedomains_am33xx);
+}
diff --git a/arch/arm/mach-picoxcell/Makefile b/arch/arm/mach-picoxcell/Makefile
index e5ec4a8d9bcb..8e39f80fce19 100644
--- a/arch/arm/mach-picoxcell/Makefile
+++ b/arch/arm/mach-picoxcell/Makefile
@@ -1,2 +1 @@
obj-y := common.o
-obj-y += time.o
diff --git a/arch/arm/mach-picoxcell/common.c b/arch/arm/mach-picoxcell/common.c
index a2e8ae8b5821..8f9a0b47a7fa 100644
--- a/arch/arm/mach-picoxcell/common.c
+++ b/arch/arm/mach-picoxcell/common.c
@@ -14,6 +14,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include <linux/dw_apb_timer.h>
#include <asm/mach/arch.h>
#include <asm/hardware/vic.h>
@@ -97,7 +98,7 @@ DT_MACHINE_START(PICOXCELL, "Picochip picoXcell")
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = picoxcell_init_irq,
.handle_irq = vic_handle_irq,
- .timer = &picoxcell_timer,
+ .timer = &dw_apb_timer,
.init_machine = picoxcell_init_machine,
.dt_compat = picoxcell_dt_match,
.restart = picoxcell_wdt_restart,
diff --git a/arch/arm/mach-picoxcell/common.h b/arch/arm/mach-picoxcell/common.h
index 83d55ab956a4..a65cb02f84c8 100644
--- a/arch/arm/mach-picoxcell/common.h
+++ b/arch/arm/mach-picoxcell/common.h
@@ -12,6 +12,6 @@
#include <asm/mach/time.h>
-extern struct sys_timer picoxcell_timer;
+extern struct sys_timer dw_apb_timer;
#endif /* __PICOXCELL_COMMON_H__ */
diff --git a/arch/arm/mach-prima2/include/mach/gpio.h b/arch/arm/mach-prima2/include/mach/gpio.h
new file mode 100644
index 000000000000..1904bb03876e
--- /dev/null
+++ b/arch/arm/mach-prima2/include/mach/gpio.h
@@ -0,0 +1,13 @@
+#ifndef __MACH_GPIO_H
+#define __MACH_GPIO_H
+
+/* Pull up/down values */
+enum sirfsoc_gpio_pull {
+ SIRFSOC_GPIO_PULL_NONE,
+ SIRFSOC_GPIO_PULL_UP,
+ SIRFSOC_GPIO_PULL_DOWN,
+};
+
+void sirfsoc_gpio_set_pull(unsigned gpio, unsigned mode);
+
+#endif
diff --git a/arch/arm/mach-prima2/include/mach/irqs.h b/arch/arm/mach-prima2/include/mach/irqs.h
index bb354f952fd6..f6014a07541f 100644
--- a/arch/arm/mach-prima2/include/mach/irqs.h
+++ b/arch/arm/mach-prima2/include/mach/irqs.h
@@ -11,7 +11,7 @@
#define SIRFSOC_INTENAL_IRQ_START 0
#define SIRFSOC_INTENAL_IRQ_END 59
-
+#define SIRFSOC_GPIO_IRQ_START (SIRFSOC_INTENAL_IRQ_END + 1)
#define NR_IRQS 220
#endif
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index d09da6a746b8..d3de84b0dcbe 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -127,7 +127,11 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO19_SSP2_SCLK,
GPIO86_SSP2_RXD,
GPIO87_SSP2_TXD,
- GPIO88_GPIO,
+ GPIO88_GPIO | MFP_LPM_DRIVE_HIGH, /* TSC2046_CS */
+
+ /* BQ24022 Regulator */
+ GPIO72_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_nCHARGE_EN */
+ GPIO96_GPIO | MFP_LPM_KEEP_OUTPUT, /* BQ24022_ISET2 */
/* HX4700 specific input GPIOs */
GPIO12_GPIO | WAKEUP_ON_EDGE_RISE, /* ASIC3_IRQ */
@@ -135,6 +139,10 @@ static unsigned long hx4700_pin_config[] __initdata = {
GPIO14_GPIO, /* nWLAN_IRQ */
/* HX4700 specific output GPIOs */
+ GPIO61_GPIO | MFP_LPM_DRIVE_HIGH, /* W3220_nRESET */
+ GPIO71_GPIO | MFP_LPM_DRIVE_HIGH, /* ASIC3_nRESET */
+ GPIO81_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_GP_nRESET */
+ GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* CPU_HW_nRESET */
GPIO102_GPIO | MFP_LPM_DRIVE_LOW, /* SYNAPTICS_POWER_ON */
GPIO10_GPIO, /* GSM_IRQ */
@@ -872,14 +880,19 @@ static struct gpio global_gpios[] = {
{ GPIO110_HX4700_LCD_LVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_LVDD" },
{ GPIO111_HX4700_LCD_AVDD_3V3_ON, GPIOF_OUT_INIT_HIGH, "LCD_AVDD" },
{ GPIO32_HX4700_RS232_ON, GPIOF_OUT_INIT_HIGH, "RS232_ON" },
+ { GPIO61_HX4700_W3220_nRESET, GPIOF_OUT_INIT_HIGH, "W3220_nRESET" },
{ GPIO71_HX4700_ASIC3_nRESET, GPIOF_OUT_INIT_HIGH, "ASIC3_nRESET" },
+ { GPIO81_HX4700_CPU_GP_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_GP_nRESET" },
{ GPIO82_HX4700_EUART_RESET, GPIOF_OUT_INIT_HIGH, "EUART_RESET" },
+ { GPIO116_HX4700_CPU_HW_nRESET, GPIOF_OUT_INIT_HIGH, "CPU_HW_nRESET" },
};
static void __init hx4700_init(void)
{
int ret;
+ PCFR = PCFR_GPR_EN | PCFR_OPDE;
+
pxa2xx_mfp_config(ARRAY_AND_SIZE(hx4700_pin_config));
gpio_set_wake(GPIO12_HX4700_ASIC3_IRQ, 1);
ret = gpio_request_array(ARRAY_AND_SIZE(global_gpios));
diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
index cf0e669eaf1a..3e4fa849c64d 100644
--- a/arch/arm/mach-rpc/irq.c
+++ b/arch/arm/mach-rpc/irq.c
@@ -163,6 +163,6 @@ void __init rpc_init_irq(void)
}
}
- init_FIQ();
+ init_FIQ(FIQ_START);
}
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2416.c b/arch/arm/mach-s3c24xx/clock-s3c2416.c
index 8702ecfaab30..14a81c2317a4 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2416.c
@@ -144,7 +144,8 @@ static struct clk_lookup s3c2416_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
+ /* s3c2443-spi.0 is used on s3c2416 and s3c2450 as well */
+ CLKDEV_INIT("s3c2443-spi.0", "spi_busclk2", &hsspi_mux.clk),
};
void __init s3c2416_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 414364eb426c..cb2883d553b5 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -106,7 +106,7 @@ static struct clk s3c2440_clk_cam_upll = {
static struct clk s3c2440_clk_ac97 = {
.name = "ac97",
.enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2440_CLKCON_CAMERA,
+ .ctrlbit = S3C2440_CLKCON_AC97,
};
static unsigned long s3c2440_fclk_n_getrate(struct clk *clk)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2443.c b/arch/arm/mach-s3c24xx/clock-s3c2443.c
index a4c5a520d994..7f689ce1be61 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2443.c
@@ -181,7 +181,7 @@ static struct clk *clks[] __initdata = {
static struct clk_lookup s3c2443_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
+ CLKDEV_INIT("s3c2443-spi.0", "spi_busclk2", &clk_hsspi.clk),
};
void __init s3c2443_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index aeeb2be283fa..aeb4a24ff3ed 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -559,7 +559,7 @@ static struct clk hsmmc1_clk = {
static struct clk hsspi_clk = {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s3c2443-spi.0",
.parent = &clk_p,
.enable = s3c2443_clkcon_enable_p,
.ctrlbit = S3C2443_PCLKCON_HSSPI,
@@ -633,7 +633,7 @@ static struct clk_lookup s3c2443_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
+ CLKDEV_INIT("s3c2443-spi.0", "spi_busclk0", &hsspi_clk),
};
void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/mach-s3c24xx/common-smdk.c b/arch/arm/mach-s3c24xx/common-smdk.c
index 084604be6ad1..87e75a250d5e 100644
--- a/arch/arm/mach-s3c24xx/common-smdk.c
+++ b/arch/arm/mach-s3c24xx/common-smdk.c
@@ -182,19 +182,21 @@ static struct platform_device __initdata *smdk_devs[] = {
&smdk_led7,
};
+static const struct gpio smdk_led_gpios[] = {
+ { S3C2410_GPF(4), GPIOF_OUT_INIT_HIGH, NULL },
+ { S3C2410_GPF(5), GPIOF_OUT_INIT_HIGH, NULL },
+ { S3C2410_GPF(6), GPIOF_OUT_INIT_HIGH, NULL },
+ { S3C2410_GPF(7), GPIOF_OUT_INIT_HIGH, NULL },
+};
+
void __init smdk_machine_init(void)
{
/* Configure the LEDs (even if we have no LED support)*/
- s3c_gpio_cfgpin(S3C2410_GPF(4), S3C2410_GPIO_OUTPUT);
- s3c_gpio_cfgpin(S3C2410_GPF(5), S3C2410_GPIO_OUTPUT);
- s3c_gpio_cfgpin(S3C2410_GPF(6), S3C2410_GPIO_OUTPUT);
- s3c_gpio_cfgpin(S3C2410_GPF(7), S3C2410_GPIO_OUTPUT);
-
- s3c2410_gpio_setpin(S3C2410_GPF(4), 1);
- s3c2410_gpio_setpin(S3C2410_GPF(5), 1);
- s3c2410_gpio_setpin(S3C2410_GPF(6), 1);
- s3c2410_gpio_setpin(S3C2410_GPF(7), 1);
+ int ret = gpio_request_array(smdk_led_gpios,
+ ARRAY_SIZE(smdk_led_gpios));
+ if (!WARN_ON(ret < 0))
+ gpio_free_array(smdk_led_gpios, ARRAY_SIZE(smdk_led_gpios));
if (machine_is_smdk2443())
smdk_nand_info.twrph0 = 50;
diff --git a/arch/arm/mach-s3c24xx/common.c b/arch/arm/mach-s3c24xx/common.c
index 56cdd34cce41..0c9e9a785ef6 100644
--- a/arch/arm/mach-s3c24xx/common.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -41,7 +41,6 @@
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <mach/regs-clock.h>
#include <mach/regs-gpio.h>
#include <plat/regs-serial.h>
diff --git a/arch/arm/mach-s3c24xx/include/mach/bast-pmu.h b/arch/arm/mach-s3c24xx/include/mach/bast-pmu.h
deleted file mode 100644
index 4c38b39b741d..000000000000
--- a/arch/arm/mach-s3c24xx/include/mach/bast-pmu.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/bast-pmu.h
- *
- * Copyright (c) 2003-2004 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * Vincent Sanders <vince@simtec.co.uk>
- *
- * Machine BAST - Power Management chip
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_BASTPMU_H
-#define __ASM_ARCH_BASTPMU_H "08_OCT_2004"
-
-#define BASTPMU_REG_IDENT (0x00)
-#define BASTPMU_REG_VERSION (0x01)
-#define BASTPMU_REG_DDCCTRL (0x02)
-#define BASTPMU_REG_POWER (0x03)
-#define BASTPMU_REG_RESET (0x04)
-#define BASTPMU_REG_GWO (0x05)
-#define BASTPMU_REG_WOL (0x06)
-#define BASTPMU_REG_WOR (0x07)
-#define BASTPMU_REG_UID (0x09)
-
-#define BASTPMU_EEPROM (0xC0)
-
-#define BASTPMU_EEP_UID (BASTPMU_EEPROM + 0)
-#define BASTPMU_EEP_WOL (BASTPMU_EEPROM + 8)
-#define BASTPMU_EEP_WOR (BASTPMU_EEPROM + 9)
-
-#define BASTPMU_IDENT_0 0x53
-#define BASTPMU_IDENT_1 0x42
-#define BASTPMU_IDENT_2 0x50
-#define BASTPMU_IDENT_3 0x4d
-
-#define BASTPMU_RESET_GUARD (0x55)
-
-#endif /* __ASM_ARCH_BASTPMU_H */
diff --git a/arch/arm/mach-s3c24xx/include/mach/gpio-nrs.h b/arch/arm/mach-s3c24xx/include/mach/gpio-nrs.h
index 019ea86057f6..3890a05948fb 100644
--- a/arch/arm/mach-s3c24xx/include/mach/gpio-nrs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/gpio-nrs.h
@@ -93,26 +93,5 @@ enum s3c_gpio_number {
#define S3C2410_GPL(_nr) (S3C2410_GPIO_L_START + (_nr))
#define S3C2410_GPM(_nr) (S3C2410_GPIO_M_START + (_nr))
-/* compatibility until drivers can be modified */
-
-#define S3C2410_GPA0 S3C2410_GPA(0)
-#define S3C2410_GPA1 S3C2410_GPA(1)
-#define S3C2410_GPA3 S3C2410_GPA(3)
-#define S3C2410_GPA7 S3C2410_GPA(7)
-
-#define S3C2410_GPE0 S3C2410_GPE(0)
-#define S3C2410_GPE1 S3C2410_GPE(1)
-#define S3C2410_GPE2 S3C2410_GPE(2)
-#define S3C2410_GPE3 S3C2410_GPE(3)
-#define S3C2410_GPE4 S3C2410_GPE(4)
-#define S3C2410_GPE5 S3C2410_GPE(5)
-#define S3C2410_GPE6 S3C2410_GPE(6)
-#define S3C2410_GPE7 S3C2410_GPE(7)
-#define S3C2410_GPE8 S3C2410_GPE(8)
-#define S3C2410_GPE9 S3C2410_GPE(9)
-#define S3C2410_GPE10 S3C2410_GPE(10)
-
-#define S3C2410_GPH10 S3C2410_GPH(10)
-
#endif /* __MACH_GPIONRS_H */
diff --git a/arch/arm/mach-s3c24xx/include/mach/gta02.h b/arch/arm/mach-s3c24xx/include/mach/gta02.h
index 3a56a229cac6..217393482153 100644
--- a/arch/arm/mach-s3c24xx/include/mach/gta02.h
+++ b/arch/arm/mach-s3c24xx/include/mach/gta02.h
@@ -3,82 +3,13 @@
#include <mach/regs-gpio.h>
-/* Different hardware revisions, passed in ATAG_REVISION by u-boot */
-#define GTA02v1_SYSTEM_REV 0x00000310
-#define GTA02v2_SYSTEM_REV 0x00000320
-#define GTA02v3_SYSTEM_REV 0x00000330
-#define GTA02v4_SYSTEM_REV 0x00000340
-#define GTA02v5_SYSTEM_REV 0x00000350
-/* since A7 is basically same as A6, we use A6 PCB ID */
-#define GTA02v6_SYSTEM_REV 0x00000360
-
-#define GTA02_GPIO_n3DL_GSM S3C2410_GPA(13) /* v1 + v2 + v3 only */
-
-#define GTA02_GPIO_PWR_LED1 S3C2410_GPB(0)
-#define GTA02_GPIO_PWR_LED2 S3C2410_GPB(1)
#define GTA02_GPIO_AUX_LED S3C2410_GPB(2)
-#define GTA02_GPIO_VIBRATOR_ON S3C2410_GPB(3)
-#define GTA02_GPIO_MODEM_RST S3C2410_GPB(5)
-#define GTA02_GPIO_BT_EN S3C2410_GPB(6)
-#define GTA02_GPIO_MODEM_ON S3C2410_GPB(7)
-#define GTA02_GPIO_EXTINT8 S3C2410_GPB(8)
#define GTA02_GPIO_USB_PULLUP S3C2410_GPB(9)
-
-#define GTA02_GPIO_PIO5 S3C2410_GPC(5) /* v3 + v4 only */
-
-#define GTA02v3_GPIO_nG1_CS S3C2410_GPD(12) /* v3 + v4 only */
-#define GTA02v3_GPIO_nG2_CS S3C2410_GPD(13) /* v3 + v4 only */
-#define GTA02v5_GPIO_HDQ S3C2410_GPD(14) /* v5 + */
-
-#define GTA02_GPIO_nG1_INT S3C2410_GPF(0)
-#define GTA02_GPIO_IO1 S3C2410_GPF(1)
-#define GTA02_GPIO_PIO_2 S3C2410_GPF(2) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_JACK_INSERT S3C2410_GPF(4)
-#define GTA02_GPIO_WLAN_GPIO1 S3C2410_GPF(5) /* v2 + v3 + v4 only */
#define GTA02_GPIO_AUX_KEY S3C2410_GPF(6)
#define GTA02_GPIO_HOLD_KEY S3C2410_GPF(7)
-
-#define GTA02_GPIO_3D_IRQ S3C2410_GPG(4)
-#define GTA02v2_GPIO_nG2_INT S3C2410_GPG(8) /* v2 + v3 + v4 only */
-#define GTA02v3_GPIO_nUSB_OC S3C2410_GPG(9) /* v3 + v4 only */
-#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
-#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
-
#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
-#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
-#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
-#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
-#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
-#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
-#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
-#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
-#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
-#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
-#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
-#define GTA02_IRQ_MODEM IRQ_EINT1
-#define GTA02_IRQ_PIO_2 IRQ_EINT2 /* v2 + v3 + v4 only */
-#define GTA02_IRQ_nJACK_INSERT IRQ_EINT4
-#define GTA02_IRQ_WLAN_GPIO1 IRQ_EINT5
-#define GTA02_IRQ_AUX IRQ_EINT6
-#define GTA02_IRQ_nHOLD IRQ_EINT7
#define GTA02_IRQ_PCF50633 IRQ_EINT9
-#define GTA02_IRQ_3D IRQ_EINT12
-#define GTA02_IRQ_GSENSOR_2 IRQ_EINT16 /* v2 + v3 + v4 only */
-#define GTA02v3_IRQ_nUSB_OC IRQ_EINT17 /* v3 + v4 only */
-#define GTA02v3_IRQ_nUSB_FLT IRQ_EINT18 /* v3 + v4 only */
-#define GTA02v3_IRQ_nGSM_OC IRQ_EINT19 /* v3 + v4 only */
-
-/* returns 00 000 on GTA02 A5 and earlier, A6 returns 01 001 */
-#define GTA02_PCB_ID1_0 S3C2410_GPC(13)
-#define GTA02_PCB_ID1_1 S3C2410_GPC(15)
-#define GTA02_PCB_ID1_2 S3C2410_GPD(0)
-#define GTA02_PCB_ID2_0 S3C2410_GPD(3)
-#define GTA02_PCB_ID2_1 S3C2410_GPD(4)
-
-int gta02_get_pcb_revision(void);
#endif /* _GTA02_H */
diff --git a/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h b/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
index cac1ad6b582c..a11a638bd599 100644
--- a/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
+++ b/arch/arm/mach-s3c24xx/include/mach/regs-gpio.h
@@ -302,7 +302,7 @@
/* S3C2410:
* Port G consists of 8 GPIO/IRQ/Special function
*
- * GPGCON has 2 bits for each of the input pins on port F
+ * GPGCON has 2 bits for each of the input pins on port G
* 00 = 0 input, 1 output, 2 interrupt (EINT0..7), 3 special func
*
* pull up works like all other ports.
@@ -366,7 +366,7 @@
/* Port H consists of11 GPIO/serial/Misc pins
*
- * GPGCON has 2 bits for each of the input pins on port F
+ * GPHCON has 2 bits for each of the input pins on port H
* 00 = 0 input, 1 output, 2 interrupt (EINT0..7), 3 special func
*
* pull up works like all other ports.
@@ -427,6 +427,19 @@
* for the 2412/2413 from the 2410/2440/2442
*/
+/*
+ * Port J consists of 13 GPIO/Camera pins. GPJCON has 2 bits
+ * for each of the pins on port J.
+ * 00 - input, 01 output, 10 - camera
+ *
+ * Pull up works like all other ports.
+ */
+
+#define S3C2413_GPJCON S3C2410_GPIOREG(0x80)
+#define S3C2413_GPJDAT S3C2410_GPIOREG(0x84)
+#define S3C2413_GPJUP S3C2410_GPIOREG(0x88)
+#define S3C2413_GPJSLPCON S3C2410_GPIOREG(0x8C)
+
/* S3C2443 and above */
#define S3C2440_GPJCON S3C2410_GPIOREG(0xD0)
#define S3C2440_GPJDAT S3C2410_GPIOREG(0xD4)
diff --git a/arch/arm/mach-s3c24xx/include/mach/regs-gpioj.h b/arch/arm/mach-s3c24xx/include/mach/regs-gpioj.h
deleted file mode 100644
index 19575e061114..000000000000
--- a/arch/arm/mach-s3c24xx/include/mach/regs-gpioj.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/regs-gpioj.h
- *
- * Copyright (c) 2004 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * S3C2440 GPIO J register definitions
-*/
-
-
-#ifndef __ASM_ARCH_REGS_GPIOJ_H
-#define __ASM_ARCH_REGS_GPIOJ_H "gpioj"
-
-/* Port J consists of 13 GPIO/Camera pins
- *
- * GPJCON has 2 bits for each of the input pins on port F
- * 00 = 0 input, 1 output, 2 Camera
- *
- * pull up works like all other ports.
-*/
-
-#define S3C2413_GPJCON S3C2410_GPIOREG(0x80)
-#define S3C2413_GPJDAT S3C2410_GPIOREG(0x84)
-#define S3C2413_GPJUP S3C2410_GPIOREG(0x88)
-#define S3C2413_GPJSLPCON S3C2410_GPIOREG(0x8C)
-
-#define S3C2440_GPJ0_OUTP (0x01 << 0)
-#define S3C2440_GPJ0_CAMDATA0 (0x02 << 0)
-
-#define S3C2440_GPJ1_OUTP (0x01 << 2)
-#define S3C2440_GPJ1_CAMDATA1 (0x02 << 2)
-
-#define S3C2440_GPJ2_OUTP (0x01 << 4)
-#define S3C2440_GPJ2_CAMDATA2 (0x02 << 4)
-
-#define S3C2440_GPJ3_OUTP (0x01 << 6)
-#define S3C2440_GPJ3_CAMDATA3 (0x02 << 6)
-
-#define S3C2440_GPJ4_OUTP (0x01 << 8)
-#define S3C2440_GPJ4_CAMDATA4 (0x02 << 8)
-
-#define S3C2440_GPJ5_OUTP (0x01 << 10)
-#define S3C2440_GPJ5_CAMDATA5 (0x02 << 10)
-
-#define S3C2440_GPJ6_OUTP (0x01 << 12)
-#define S3C2440_GPJ6_CAMDATA6 (0x02 << 12)
-
-#define S3C2440_GPJ7_OUTP (0x01 << 14)
-#define S3C2440_GPJ7_CAMDATA7 (0x02 << 14)
-
-#define S3C2440_GPJ8_OUTP (0x01 << 16)
-#define S3C2440_GPJ8_CAMPCLK (0x02 << 16)
-
-#define S3C2440_GPJ9_OUTP (0x01 << 18)
-#define S3C2440_GPJ9_CAMVSYNC (0x02 << 18)
-
-#define S3C2440_GPJ10_OUTP (0x01 << 20)
-#define S3C2440_GPJ10_CAMHREF (0x02 << 20)
-
-#define S3C2440_GPJ11_OUTP (0x01 << 22)
-#define S3C2440_GPJ11_CAMCLKOUT (0x02 << 22)
-
-#define S3C2440_GPJ12_OUTP (0x01 << 24)
-#define S3C2440_GPJ12_CAMRESET (0x02 << 24)
-
-#endif /* __ASM_ARCH_REGS_GPIOJ_H */
-
diff --git a/arch/arm/mach-s3c24xx/mach-gta02.c b/arch/arm/mach-s3c24xx/mach-gta02.c
index 0f29f64a3eeb..92e1f93a6bca 100644
--- a/arch/arm/mach-s3c24xx/mach-gta02.c
+++ b/arch/arm/mach-s3c24xx/mach-gta02.c
@@ -71,7 +71,6 @@
#include <mach/regs-irq.h>
#include <mach/regs-gpio.h>
-#include <mach/regs-gpioj.h>
#include <mach/fb.h>
#include <plat/usb-control.h>
diff --git a/arch/arm/mach-s3c24xx/mach-mini2440.c b/arch/arm/mach-s3c24xx/mach-mini2440.c
index f092b188ab70..bd6d2525debe 100644
--- a/arch/arm/mach-s3c24xx/mach-mini2440.c
+++ b/arch/arm/mach-s3c24xx/mach-mini2440.c
@@ -634,8 +634,8 @@ static void __init mini2440_init(void)
s3c_gpio_cfgpin(S3C2410_GPC(0), S3C2410_GPC0_LEND);
/* Turn the backlight early on */
- WARN_ON(gpio_request(S3C2410_GPG(4), "backlight"));
- gpio_direction_output(S3C2410_GPG(4), 1);
+ WARN_ON(gpio_request_one(S3C2410_GPG(4), GPIOF_OUT_INIT_HIGH, NULL));
+ gpio_free(S3C2410_GPG(4));
/* remove pullup on optional PWM backlight -- unused on 3.5 and 7"s */
s3c_gpio_setpull(S3C2410_GPB(1), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-s3c24xx/mach-qt2410.c b/arch/arm/mach-s3c24xx/mach-qt2410.c
index b868dddcb836..678bbca2b5e5 100644
--- a/arch/arm/mach-s3c24xx/mach-qt2410.c
+++ b/arch/arm/mach-s3c24xx/mach-qt2410.c
@@ -47,7 +47,6 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <mach/regs-gpio.h>
#include <mach/leds-gpio.h>
#include <mach/regs-lcd.h>
#include <plat/regs-serial.h>
@@ -325,8 +324,9 @@ static void __init qt2410_machine_init(void)
}
s3c24xx_fb_set_platdata(&qt2410_fb_info);
- s3c_gpio_cfgpin(S3C2410_GPB(0), S3C2410_GPIO_OUTPUT);
- s3c2410_gpio_setpin(S3C2410_GPB(0), 1);
+ /* set initial state of the LED GPIO */
+ WARN_ON(gpio_request_one(S3C2410_GPB(0), GPIOF_OUT_INIT_HIGH, NULL));
+ gpio_free(S3C2410_GPB(0));
s3c24xx_udc_set_platdata(&qt2410_udc_cfg);
s3c_i2c0_set_platdata(NULL);
diff --git a/arch/arm/mach-s3c24xx/mach-rx1950.c b/arch/arm/mach-s3c24xx/mach-rx1950.c
index a6762aae4727..7ee73f27f207 100644
--- a/arch/arm/mach-s3c24xx/mach-rx1950.c
+++ b/arch/arm/mach-s3c24xx/mach-rx1950.c
@@ -42,7 +42,6 @@
#include <asm/mach-types.h>
#include <mach/regs-gpio.h>
-#include <mach/regs-gpioj.h>
#include <mach/regs-lcd.h>
#include <mach/h1940.h>
#include <mach/fb.h>
diff --git a/arch/arm/mach-s3c24xx/pm-s3c2410.c b/arch/arm/mach-s3c24xx/pm-s3c2410.c
index 03f706dd6009..949ae05e07c5 100644
--- a/arch/arm/mach-s3c24xx/pm-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/pm-s3c2410.c
@@ -77,8 +77,10 @@ static void s3c2410_pm_prepare(void)
__raw_writel(calc, phys_to_virt(H1940_SUSPEND_CHECKSUM));
}
- if ( machine_is_aml_m5900() )
- s3c2410_gpio_setpin(S3C2410_GPF(2), 1);
+ if (machine_is_aml_m5900()) {
+ gpio_request_one(S3C2410_GPF(2), GPIOF_OUT_INIT_HIGH, NULL);
+ gpio_free(S3C2410_GPF(2));
+ }
if (machine_is_rx1950()) {
/* According to S3C2442 user's manual, page 7-17,
@@ -103,8 +105,10 @@ static void s3c2410_pm_resume(void)
tmp &= S3C2410_GSTATUS2_OFFRESET;
__raw_writel(tmp, S3C2410_GSTATUS2);
- if ( machine_is_aml_m5900() )
- s3c2410_gpio_setpin(S3C2410_GPF(2), 0);
+ if (machine_is_aml_m5900()) {
+ gpio_request_one(S3C2410_GPF(2), GPIOF_OUT_INIT_LOW, NULL);
+ gpio_free(S3C2410_GPF(2));
+ }
}
struct syscore_ops s3c2410_pm_syscore_ops = {
diff --git a/arch/arm/mach-s3c24xx/pm-s3c2412.c b/arch/arm/mach-s3c24xx/pm-s3c2412.c
index d04588506ec4..c60f67a75aff 100644
--- a/arch/arm/mach-s3c24xx/pm-s3c2412.c
+++ b/arch/arm/mach-s3c24xx/pm-s3c2412.c
@@ -26,7 +26,6 @@
#include <asm/irq.h>
#include <mach/regs-power.h>
-#include <mach/regs-gpioj.h>
#include <mach/regs-gpio.h>
#include <mach/regs-dsc.h>
diff --git a/arch/arm/mach-s3c24xx/s3c2412.c b/arch/arm/mach-s3c24xx/s3c2412.c
index d4bc7f960bbb..6c5f4031ff0c 100644
--- a/arch/arm/mach-s3c24xx/s3c2412.c
+++ b/arch/arm/mach-s3c24xx/s3c2412.c
@@ -39,7 +39,6 @@
#include <plat/regs-serial.h>
#include <mach/regs-power.h>
#include <mach/regs-gpio.h>
-#include <mach/regs-gpioj.h>
#include <mach/regs-dsc.h>
#include <plat/regs-spi.h>
#include <mach/regs-s3c2412.h>
diff --git a/arch/arm/mach-s3c24xx/s3c244x.c b/arch/arm/mach-s3c24xx/s3c244x.c
index 6f74118f60c6..b0b60a1154d6 100644
--- a/arch/arm/mach-s3c24xx/s3c244x.c
+++ b/arch/arm/mach-s3c24xx/s3c244x.c
@@ -36,7 +36,6 @@
#include <mach/regs-clock.h>
#include <plat/regs-serial.h>
#include <mach/regs-gpio.h>
-#include <mach/regs-gpioj.h>
#include <mach/regs-dsc.h>
#include <plat/s3c2410.h>
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
index 5712c85f39b1..3d47e023ce94 100644
--- a/arch/arm/mach-s3c24xx/setup-spi.c
+++ b/arch/arm/mach-s3c24xx/setup-spi.c
@@ -13,20 +13,12 @@
#include <linux/platform_device.h>
#include <plat/gpio-cfg.h>
-#include <plat/s3c64xx-spi.h>
#include <mach/hardware.h>
#include <mach/regs-gpio.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
- .high_speed = 1,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
+int s3c64xx_spi0_cfg_gpio(void)
{
/* enable hsspi bit in misccr */
s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
diff --git a/arch/arm/mach-s3c24xx/setup-ts.c b/arch/arm/mach-s3c24xx/setup-ts.c
index ed2638663675..4e11affce3a8 100644
--- a/arch/arm/mach-s3c24xx/setup-ts.c
+++ b/arch/arm/mach-s3c24xx/setup-ts.c
@@ -16,7 +16,6 @@
struct platform_device; /* don't need the contents */
#include <mach/hardware.h>
-#include <mach/regs-gpio.h>
/**
* s3c24xx_ts_cfg_gpio - configure gpio for s3c2410 systems
@@ -27,8 +26,5 @@ struct platform_device; /* don't need the contents */
*/
void s3c24xx_ts_cfg_gpio(struct platform_device *dev)
{
- s3c2410_gpio_cfgpin(S3C2410_GPG(12), S3C2410_GPG12_XMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(13), S3C2410_GPG13_nXPON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(14), S3C2410_GPG14_YMON);
- s3c2410_gpio_cfgpin(S3C2410_GPG(15), S3C2410_GPG15_nYPON);
+ s3c_gpio_cfgpin_range(S3C2410_GPG(12), 4, S3C_GPIO_SFN(3));
}
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index 52f079a691cb..28041e83dc82 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -178,13 +178,13 @@ static struct clk init_clocks_off[] = {
.ctrlbit = S3C_CLKCON_PCLK_KEYPAD,
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s3c6410-spi.0",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_SPI0,
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s3c6410-spi.1",
.parent = &clk_p,
.enable = s3c64xx_pclk_ctrl,
.ctrlbit = S3C_CLKCON_PCLK_SPI1,
@@ -331,7 +331,7 @@ static struct clk init_clocks_off[] = {
static struct clk clk_48m_spi0 = {
.name = "spi_48m",
- .devname = "s3c64xx-spi.0",
+ .devname = "s3c6410-spi.0",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_SPI0_48,
@@ -339,7 +339,7 @@ static struct clk clk_48m_spi0 = {
static struct clk clk_48m_spi1 = {
.name = "spi_48m",
- .devname = "s3c64xx-spi.1",
+ .devname = "s3c6410-spi.1",
.parent = &clk_48m,
.enable = s3c64xx_sclk_ctrl,
.ctrlbit = S3C_CLKCON_SCLK_SPI1_48,
@@ -802,7 +802,7 @@ static struct clksrc_clk clk_sclk_mmc2 = {
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "spi-bus",
- .devname = "s3c64xx-spi.0",
+ .devname = "s3c6410-spi.0",
.ctrlbit = S3C_CLKCON_SCLK_SPI0,
.enable = s3c64xx_sclk_ctrl,
},
@@ -814,7 +814,7 @@ static struct clksrc_clk clk_sclk_spi0 = {
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "spi-bus",
- .devname = "s3c64xx-spi.1",
+ .devname = "s3c6410-spi.1",
.ctrlbit = S3C_CLKCON_SCLK_SPI1,
.enable = s3c64xx_sclk_ctrl,
},
@@ -858,10 +858,10 @@ static struct clk_lookup s3c64xx_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_48m_spi0),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk2", &clk_48m_spi1),
+ CLKDEV_INIT("s3c6410-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s3c6410-spi.0", "spi_busclk2", &clk_48m_spi0),
+ CLKDEV_INIT("s3c6410-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s3c6410-spi.1", "spi_busclk2", &clk_48m_spi1),
};
#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
diff --git a/arch/arm/mach-s3c64xx/include/mach/crag6410.h b/arch/arm/mach-s3c64xx/include/mach/crag6410.h
index 4cb2f951f1e9..4c3c9994fc2c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/crag6410.h
+++ b/arch/arm/mach-s3c64xx/include/mach/crag6410.h
@@ -13,9 +13,7 @@
#include <linux/gpio.h>
-#define BANFF_PMIC_IRQ_BASE IRQ_BOARD_START
-#define GLENFARCLAS_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
-#define CODEC_IRQ_BASE (IRQ_BOARD_START + 128)
+#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
#define PCA935X_GPIO_BASE GPIO_BOARD_START
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
diff --git a/arch/arm/mach-s3c64xx/include/mach/dma.h b/arch/arm/mach-s3c64xx/include/mach/dma.h
index fe1a98cf0e4c..57b1ff4b2d7c 100644
--- a/arch/arm/mach-s3c64xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c64xx/include/mach/dma.h
@@ -21,6 +21,7 @@
*/
enum dma_ch {
/* DMA0/SDMA0 */
+ DMACH_DT_PROP = -1, /* not yet supported, do not use */
DMACH_UART0 = 0,
DMACH_UART0_SRC2,
DMACH_UART1,
diff --git a/arch/arm/mach-s3c64xx/include/mach/spi-clocks.h b/arch/arm/mach-s3c64xx/include/mach/spi-clocks.h
deleted file mode 100644
index 9d0c43b4b687..000000000000
--- a/arch/arm/mach-s3c64xx/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* linux/arch/arm/mach-s3c64xx/include/mach/spi-clocks.h
- *
- * Copyright (C) 2009 Samsung Electronics Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S3C64XX_PLAT_SPI_CLKS_H
-#define __S3C64XX_PLAT_SPI_CLKS_H __FILE__
-
-#define S3C64XX_SPI_SRCCLK_PCLK 0
-#define S3C64XX_SPI_SRCCLK_SPIBUS 1
-#define S3C64XX_SPI_SRCCLK_48M 2
-
-#endif /* __S3C64XX_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 7a27f5603c74..9e382e7c77cb 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -29,7 +29,6 @@
#include <mach/crag6410.h>
static struct s3c64xx_spi_csinfo wm0010_spi_csinfo = {
- .set_level = gpio_set_value,
.line = S3C64XX_GPC(3),
};
@@ -39,6 +38,7 @@ static struct spi_board_info wm1253_devs[] = {
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
+ .irq = S3C_EINT(5),
.controller_data = &wm0010_spi_csinfo,
},
};
@@ -168,7 +168,6 @@ static struct wm8994_pdata wm8994_pdata = {
.gpio_defaults = {
0x3, /* IRQ out, active high, CMOS */
},
- .irq_base = CODEC_IRQ_BASE,
.ldo = {
{ .init_data = &wm8994_ldo1, },
{ .init_data = &wm8994_ldo2, },
@@ -182,6 +181,11 @@ static const struct i2c_board_info wm1277_devs[] = {
},
};
+static const struct i2c_board_info wm5102_devs[] = {
+ { I2C_BOARD_INFO("wm5102", 0x1a),
+ .irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2, },
+};
+
static const struct i2c_board_info wm6230_i2c_devs[] = {
{ I2C_BOARD_INFO("wm9081", 0x6c),
.platform_data = &wm9081_pdata, },
@@ -209,6 +213,7 @@ static __devinitdata const struct {
.spi_devs = wm1253_devs, .num_spi_devs = ARRAY_SIZE(wm1253_devs) },
{ .id = 0x32, .name = "XXXX-EV1 Caol Illa" },
{ .id = 0x33, .name = "XXXX-EV1 Oban" },
+ { .id = 0x34, .name = "WM0010-6320-CS42 Balblair" },
{ .id = 0x39, .name = "1254-EV1 Dallas Dhu",
.i2c_devs = wm1254_devs, .num_i2c_devs = ARRAY_SIZE(wm1254_devs) },
{ .id = 0x3a, .name = "1259-EV1 Tobermory",
@@ -218,6 +223,8 @@ static __devinitdata const struct {
{ .id = 0x3c, .name = "1273-EV1 Longmorn" },
{ .id = 0x3d, .name = "1277-EV1 Littlemill",
.i2c_devs = wm1277_devs, .num_i2c_devs = ARRAY_SIZE(wm1277_devs) },
+ { .id = 0x3e, .name = "WM5102-6271-EV1-CS127",
+ .i2c_devs = wm5102_devs, .num_i2c_devs = ARRAY_SIZE(wm5102_devs) },
};
static __devinit int wlf_gf_module_probe(struct i2c_client *i2c,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index d0c352d861f8..09cd81207a3f 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -171,7 +171,7 @@ static struct fb_videomode crag6410_lcd_timing = {
};
/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
-static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
+static struct s3c_fb_platdata crag6410_lcd_pdata __devinitdata = {
.setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
.vtiming = &crag6410_lcd_timing,
.win[0] = &crag6410_fb_win0,
@@ -181,7 +181,7 @@ static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
/* 2x6 keypad */
-static uint32_t crag6410_keymap[] __initdata = {
+static uint32_t crag6410_keymap[] __devinitdata = {
/* KEY(row, col, keycode) */
KEY(0, 0, KEY_VOLUMEUP),
KEY(0, 1, KEY_HOME),
@@ -197,12 +197,12 @@ static uint32_t crag6410_keymap[] __initdata = {
KEY(1, 5, KEY_CAMERA),
};
-static struct matrix_keymap_data crag6410_keymap_data __initdata = {
+static struct matrix_keymap_data crag6410_keymap_data __devinitdata = {
.keymap = crag6410_keymap,
.keymap_size = ARRAY_SIZE(crag6410_keymap),
};
-static struct samsung_keypad_platdata crag6410_keypad_data __initdata = {
+static struct samsung_keypad_platdata crag6410_keypad_data __devinitdata = {
.keymap_data = &crag6410_keymap_data,
.rows = 2,
.cols = 6,
@@ -373,11 +373,11 @@ static struct wm831x_buckv_pdata vddarm_pdata = {
.dvs_gpio = S3C64XX_GPK(0),
};
-static struct regulator_consumer_supply vddarm_consumers[] __initdata = {
+static struct regulator_consumer_supply vddarm_consumers[] __devinitdata = {
REGULATOR_SUPPLY("vddarm", NULL),
};
-static struct regulator_init_data vddarm __initdata = {
+static struct regulator_init_data vddarm __devinitdata = {
.constraints = {
.name = "VDDARM",
.min_uV = 1000000,
@@ -391,11 +391,11 @@ static struct regulator_init_data vddarm __initdata = {
.driver_data = &vddarm_pdata,
};
-static struct regulator_consumer_supply vddint_consumers[] __initdata = {
+static struct regulator_consumer_supply vddint_consumers[] __devinitdata = {
REGULATOR_SUPPLY("vddint", NULL),
};
-static struct regulator_init_data vddint __initdata = {
+static struct regulator_init_data vddint __devinitdata = {
.constraints = {
.name = "VDDINT",
.min_uV = 1000000,
@@ -408,27 +408,27 @@ static struct regulator_init_data vddint __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddmem __initdata = {
+static struct regulator_init_data vddmem __devinitdata = {
.constraints = {
.name = "VDDMEM",
.always_on = 1,
},
};
-static struct regulator_init_data vddsys __initdata = {
+static struct regulator_init_data vddsys __devinitdata = {
.constraints = {
.name = "VDDSYS,VDDEXT,VDDPCM,VDDSS",
.always_on = 1,
},
};
-static struct regulator_consumer_supply vddmmc_consumers[] __initdata = {
+static struct regulator_consumer_supply vddmmc_consumers[] __devinitdata = {
REGULATOR_SUPPLY("vmmc", "s3c-sdhci.0"),
REGULATOR_SUPPLY("vmmc", "s3c-sdhci.1"),
REGULATOR_SUPPLY("vmmc", "s3c-sdhci.2"),
};
-static struct regulator_init_data vddmmc __initdata = {
+static struct regulator_init_data vddmmc __devinitdata = {
.constraints = {
.name = "VDDMMC,UH",
.always_on = 1,
@@ -438,7 +438,7 @@ static struct regulator_init_data vddmmc __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddotgi __initdata = {
+static struct regulator_init_data vddotgi __devinitdata = {
.constraints = {
.name = "VDDOTGi",
.always_on = 1,
@@ -446,7 +446,7 @@ static struct regulator_init_data vddotgi __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddotg __initdata = {
+static struct regulator_init_data vddotg __devinitdata = {
.constraints = {
.name = "VDDOTG",
.always_on = 1,
@@ -454,7 +454,7 @@ static struct regulator_init_data vddotg __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddhi __initdata = {
+static struct regulator_init_data vddhi __devinitdata = {
.constraints = {
.name = "VDDHI",
.always_on = 1,
@@ -462,7 +462,7 @@ static struct regulator_init_data vddhi __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddadc __initdata = {
+static struct regulator_init_data vddadc __devinitdata = {
.constraints = {
.name = "VDDADC,VDDDAC",
.always_on = 1,
@@ -470,7 +470,7 @@ static struct regulator_init_data vddadc __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddmem0 __initdata = {
+static struct regulator_init_data vddmem0 __devinitdata = {
.constraints = {
.name = "VDDMEM0",
.always_on = 1,
@@ -478,7 +478,7 @@ static struct regulator_init_data vddmem0 __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddpll __initdata = {
+static struct regulator_init_data vddpll __devinitdata = {
.constraints = {
.name = "VDDPLL",
.always_on = 1,
@@ -486,7 +486,7 @@ static struct regulator_init_data vddpll __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddlcd __initdata = {
+static struct regulator_init_data vddlcd __devinitdata = {
.constraints = {
.name = "VDDLCD",
.always_on = 1,
@@ -494,7 +494,7 @@ static struct regulator_init_data vddlcd __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct regulator_init_data vddalive __initdata = {
+static struct regulator_init_data vddalive __devinitdata = {
.constraints = {
.name = "VDDALIVE",
.always_on = 1,
@@ -502,30 +502,29 @@ static struct regulator_init_data vddalive __initdata = {
.supply_regulator = "WALLVDD",
};
-static struct wm831x_backup_pdata banff_backup_pdata __initdata = {
+static struct wm831x_backup_pdata banff_backup_pdata __devinitdata = {
.charger_enable = 1,
.vlim = 2500, /* mV */
.ilim = 200, /* uA */
};
-static struct wm831x_status_pdata banff_red_led __initdata = {
+static struct wm831x_status_pdata banff_red_led __devinitdata = {
.name = "banff:red:",
.default_src = WM831X_STATUS_MANUAL,
};
-static struct wm831x_status_pdata banff_green_led __initdata = {
+static struct wm831x_status_pdata banff_green_led __devinitdata = {
.name = "banff:green:",
.default_src = WM831X_STATUS_MANUAL,
};
-static struct wm831x_touch_pdata touch_pdata __initdata = {
+static struct wm831x_touch_pdata touch_pdata __devinitdata = {
.data_irq = S3C_EINT(26),
.pd_irq = S3C_EINT(27),
};
-static struct wm831x_pdata crag_pmic_pdata __initdata = {
+static struct wm831x_pdata crag_pmic_pdata __devinitdata = {
.wm831x_num = 1,
- .irq_base = BANFF_PMIC_IRQ_BASE,
.gpio_base = BANFF_PMIC_GPIO_BASE,
.soft_shutdown = true,
@@ -568,7 +567,7 @@ static struct wm831x_pdata crag_pmic_pdata __initdata = {
.touch = &touch_pdata,
};
-static struct i2c_board_info i2c_devs0[] __initdata = {
+static struct i2c_board_info i2c_devs0[] __devinitdata = {
{ I2C_BOARD_INFO("24c08", 0x50), },
{ I2C_BOARD_INFO("tca6408", 0x20),
.platform_data = &crag6410_pca_data,
@@ -583,12 +582,12 @@ static struct s3c2410_platform_i2c i2c0_pdata = {
.frequency = 400000,
};
-static struct regulator_consumer_supply pvdd_1v2_consumers[] __initdata = {
+static struct regulator_consumer_supply pvdd_1v2_consumers[] __devinitdata = {
REGULATOR_SUPPLY("DCVDD", "spi0.0"),
REGULATOR_SUPPLY("AVDD", "spi0.0"),
};
-static struct regulator_init_data pvdd_1v2 __initdata = {
+static struct regulator_init_data pvdd_1v2 __devinitdata = {
.constraints = {
.name = "PVDD_1V2",
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -598,7 +597,7 @@ static struct regulator_init_data pvdd_1v2 __initdata = {
.num_consumer_supplies = ARRAY_SIZE(pvdd_1v2_consumers),
};
-static struct regulator_consumer_supply pvdd_1v8_consumers[] __initdata = {
+static struct regulator_consumer_supply pvdd_1v8_consumers[] __devinitdata = {
REGULATOR_SUPPLY("LDOVDD", "1-001a"),
REGULATOR_SUPPLY("PLLVDD", "1-001a"),
REGULATOR_SUPPLY("DBVDD", "1-001a"),
@@ -612,7 +611,7 @@ static struct regulator_consumer_supply pvdd_1v8_consumers[] __initdata = {
REGULATOR_SUPPLY("DBVDD", "spi0.0"),
};
-static struct regulator_init_data pvdd_1v8 __initdata = {
+static struct regulator_init_data pvdd_1v8 __devinitdata = {
.constraints = {
.name = "PVDD_1V8",
.always_on = 1,
@@ -622,12 +621,12 @@ static struct regulator_init_data pvdd_1v8 __initdata = {
.num_consumer_supplies = ARRAY_SIZE(pvdd_1v8_consumers),
};
-static struct regulator_consumer_supply pvdd_3v3_consumers[] __initdata = {
+static struct regulator_consumer_supply pvdd_3v3_consumers[] __devinitdata = {
REGULATOR_SUPPLY("MICVDD", "1-001a"),
REGULATOR_SUPPLY("AVDD1", "1-001a"),
};
-static struct regulator_init_data pvdd_3v3 __initdata = {
+static struct regulator_init_data pvdd_3v3 __devinitdata = {
.constraints = {
.name = "PVDD_3V3",
.always_on = 1,
@@ -637,7 +636,7 @@ static struct regulator_init_data pvdd_3v3 __initdata = {
.num_consumer_supplies = ARRAY_SIZE(pvdd_3v3_consumers),
};
-static struct wm831x_pdata glenfarclas_pmic_pdata __initdata = {
+static struct wm831x_pdata glenfarclas_pmic_pdata __devinitdata = {
.wm831x_num = 2,
.irq_base = GLENFARCLAS_PMIC_IRQ_BASE,
.gpio_base = GLENFARCLAS_PMIC_GPIO_BASE,
@@ -669,7 +668,7 @@ static struct wm1250_ev1_pdata wm1250_ev1_pdata = {
},
};
-static struct i2c_board_info i2c_devs1[] __initdata = {
+static struct i2c_board_info i2c_devs1[] __devinitdata = {
{ I2C_BOARD_INFO("wm8311", 0x34),
.irq = S3C_EINT(0),
.platform_data = &glenfarclas_pmic_pdata },
@@ -799,7 +798,7 @@ static void __init crag6410_machine_init(void)
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
samsung_keypad_set_platdata(&crag6410_keypad_data);
- s3c64xx_spi0_set_platdata(&s3c64xx_spi0_pdata, 0, 1);
+ s3c64xx_spi0_set_platdata(NULL, 0, 1);
platform_add_devices(crag6410_devices, ARRAY_SIZE(crag6410_devices));
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index df3103d450e2..0fe4f1503f4f 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -566,7 +566,6 @@ static struct wm831x_status_pdata wm1192_led8_pdata = {
static struct wm831x_pdata smdk6410_wm1192_pdata = {
.pre_init = wm1192_pre_init,
- .irq_base = IRQ_BOARD_START,
.backlight = &wm1192_backlight_pdata,
.dcdc = {
diff --git a/arch/arm/mach-s3c64xx/setup-spi.c b/arch/arm/mach-s3c64xx/setup-spi.c
index d9592ad7a825..4dc53450d715 100644
--- a/arch/arm/mach-s3c64xx/setup-spi.c
+++ b/arch/arm/mach-s3c64xx/setup-spi.c
@@ -9,19 +9,10 @@
*/
#include <linux/gpio.h>
-#include <linux/platform_device.h>
-
#include <plat/gpio-cfg.h>
-#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi0_cfg_gpio(void)
{
s3c_gpio_cfgall_range(S3C64XX_GPC(0), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
@@ -30,13 +21,7 @@ int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
-struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .tx_st_done = 21,
-};
-
-int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi1_cfg_gpio(void)
{
s3c_gpio_cfgall_range(S3C64XX_GPC(4), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6440.c b/arch/arm/mach-s5p64x0/clock-s5p6440.c
index ee1e8e7f5631..000445596ec4 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6440.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6440.c
@@ -227,13 +227,13 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5p64x0-spi.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5p64x0-spi.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
@@ -467,7 +467,7 @@ static struct clksrc_clk clk_sclk_uclk = {
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5p64x0-spi.0",
.ctrlbit = (1 << 20),
.enable = s5p64x0_sclk_ctrl,
},
@@ -479,7 +479,7 @@ static struct clksrc_clk clk_sclk_spi0 = {
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5p64x0-spi.1",
.ctrlbit = (1 << 21),
.enable = s5p64x0_sclk_ctrl,
},
@@ -519,8 +519,8 @@ static struct clk_lookup s5p6440_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_pclk_low.clk),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s5p64x0-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s5p64x0-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
diff --git a/arch/arm/mach-s5p64x0/clock-s5p6450.c b/arch/arm/mach-s5p64x0/clock-s5p6450.c
index dae6a13f43bb..f3e0ef3d27c9 100644
--- a/arch/arm/mach-s5p64x0/clock-s5p6450.c
+++ b/arch/arm/mach-s5p64x0/clock-s5p6450.c
@@ -236,13 +236,13 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 17),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5p64x0-spi.0",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 21),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5p64x0-spi.1",
.parent = &clk_pclk_low.clk,
.enable = s5p64x0_pclk_ctrl,
.ctrlbit = (1 << 22),
@@ -528,7 +528,7 @@ static struct clksrc_clk clk_sclk_uclk = {
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5p64x0-spi.0",
.ctrlbit = (1 << 20),
.enable = s5p64x0_sclk_ctrl,
},
@@ -540,7 +540,7 @@ static struct clksrc_clk clk_sclk_spi0 = {
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5p64x0-spi.1",
.ctrlbit = (1 << 21),
.enable = s5p64x0_sclk_ctrl,
},
@@ -562,8 +562,8 @@ static struct clk_lookup s5p6450_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_pclk_low.clk),
CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_sclk_uclk.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s5p64x0-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s5p64x0-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &clk_sclk_mmc0.clk),
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
diff --git a/arch/arm/mach-s5p64x0/dma.c b/arch/arm/mach-s5p64x0/dma.c
index 2ee5dc069b37..9c4ce085f585 100644
--- a/arch/arm/mach-s5p64x0/dma.c
+++ b/arch/arm/mach-s5p64x0/dma.c
@@ -36,8 +36,6 @@
#include <plat/devs.h>
#include <plat/irqs.h>
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
static u8 s5p6440_pdma_peri[] = {
DMACH_UART0_RX,
DMACH_UART0_TX,
diff --git a/arch/arm/mach-s5p64x0/include/mach/spi-clocks.h b/arch/arm/mach-s5p64x0/include/mach/spi-clocks.h
deleted file mode 100644
index 170a20a9643a..000000000000
--- a/arch/arm/mach-s5p64x0/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* linux/arch/arm/mach-s5p64x0/include/mach/spi-clocks.h
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_SPI_CLKS_H
-#define __ASM_ARCH_SPI_CLKS_H __FILE__
-
-#define S5P64X0_SPI_SRCCLK_PCLK 0
-#define S5P64X0_SPI_SRCCLK_SCLK 1
-
-#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5p64x0/setup-spi.c b/arch/arm/mach-s5p64x0/setup-spi.c
index e9b841240352..7664356720ca 100644
--- a/arch/arm/mach-s5p64x0/setup-spi.c
+++ b/arch/arm/mach-s5p64x0/setup-spi.c
@@ -9,21 +9,10 @@
*/
#include <linux/gpio.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
#include <plat/gpio-cfg.h>
-#include <plat/cpu.h>
-#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi0_cfg_gpio(void)
{
if (soc_is_s5p6450())
s3c_gpio_cfgall_range(S5P6450_GPC(0), 3,
@@ -36,13 +25,7 @@ int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
-struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi1_cfg_gpio(void)
{
if (soc_is_s5p6450())
s3c_gpio_cfgall_range(S5P6450_GPC(4), 3,
diff --git a/arch/arm/mach-s5pc100/clock.c b/arch/arm/mach-s5pc100/clock.c
index 16eca4ea2010..926219791f0d 100644
--- a/arch/arm/mach-s5pc100/clock.c
+++ b/arch/arm/mach-s5pc100/clock.c
@@ -564,19 +564,19 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 5),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5pc100-spi.0",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 6),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5pc100-spi.1",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 7),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.2",
+ .devname = "s5pc100-spi.2",
.parent = &clk_div_d1_bus.clk,
.enable = s5pc100_d1_4_ctrl,
.ctrlbit = (1 << 8),
@@ -702,7 +702,7 @@ static struct clk clk_hsmmc0 = {
static struct clk clk_48m_spi0 = {
.name = "spi_48m",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5pc100-spi.0",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 7),
@@ -710,7 +710,7 @@ static struct clk clk_48m_spi0 = {
static struct clk clk_48m_spi1 = {
.name = "spi_48m",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5pc100-spi.1",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 8),
@@ -718,7 +718,7 @@ static struct clk clk_48m_spi1 = {
static struct clk clk_48m_spi2 = {
.name = "spi_48m",
- .devname = "s3c64xx-spi.2",
+ .devname = "s5pc100-spi.2",
.parent = &clk_mout_48m.clk,
.enable = s5pc100_sclk0_ctrl,
.ctrlbit = (1 << 9),
@@ -1085,7 +1085,7 @@ static struct clksrc_clk clk_sclk_mmc2 = {
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5pc100-spi.0",
.ctrlbit = (1 << 4),
.enable = s5pc100_sclk0_ctrl,
},
@@ -1097,7 +1097,7 @@ static struct clksrc_clk clk_sclk_spi0 = {
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5pc100-spi.1",
.ctrlbit = (1 << 5),
.enable = s5pc100_sclk0_ctrl,
},
@@ -1109,7 +1109,7 @@ static struct clksrc_clk clk_sclk_spi1 = {
static struct clksrc_clk clk_sclk_spi2 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.2",
+ .devname = "s5pc100-spi.2",
.ctrlbit = (1 << 6),
.enable = s5pc100_sclk0_ctrl,
},
@@ -1315,12 +1315,12 @@ static struct clk_lookup s5pc100_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_sclk_mmc1.clk),
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_48m_spi0),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_48m_spi1),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk2", &clk_sclk_spi1.clk),
- CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk1", &clk_48m_spi2),
- CLKDEV_INIT("s3c64xx-spi.2", "spi_busclk2", &clk_sclk_spi2.clk),
+ CLKDEV_INIT("s5pc100-spi.0", "spi_busclk1", &clk_48m_spi0),
+ CLKDEV_INIT("s5pc100-spi.0", "spi_busclk2", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s5pc100-spi.1", "spi_busclk1", &clk_48m_spi1),
+ CLKDEV_INIT("s5pc100-spi.1", "spi_busclk2", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s5pc100-spi.2", "spi_busclk1", &clk_48m_spi2),
+ CLKDEV_INIT("s5pc100-spi.2", "spi_busclk2", &clk_sclk_spi2.clk),
};
void __init s5pc100_register_clocks(void)
diff --git a/arch/arm/mach-s5pc100/dma.c b/arch/arm/mach-s5pc100/dma.c
index afd8db2d5991..b1418409709e 100644
--- a/arch/arm/mach-s5pc100/dma.c
+++ b/arch/arm/mach-s5pc100/dma.c
@@ -33,8 +33,6 @@
#include <mach/irqs.h>
#include <mach/dma.h>
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
static u8 pdma0_peri[] = {
DMACH_UART0_RX,
DMACH_UART0_TX,
diff --git a/arch/arm/mach-s5pc100/include/mach/spi-clocks.h b/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
deleted file mode 100644
index 65e426370bb2..000000000000
--- a/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* linux/arch/arm/mach-s5pc100/include/mach/spi-clocks.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S5PC100_PLAT_SPI_CLKS_H
-#define __S5PC100_PLAT_SPI_CLKS_H __FILE__
-
-#define S5PC100_SPI_SRCCLK_PCLK 0
-#define S5PC100_SPI_SRCCLK_48M 1
-#define S5PC100_SPI_SRCCLK_SPIBUS 2
-
-#endif /* __S5PC100_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5pc100/setup-spi.c b/arch/arm/mach-s5pc100/setup-spi.c
index 431a6f747caa..183567961de1 100644
--- a/arch/arm/mach-s5pc100/setup-spi.c
+++ b/arch/arm/mach-s5pc100/setup-spi.c
@@ -9,20 +9,10 @@
*/
#include <linux/gpio.h>
-#include <linux/platform_device.h>
-
#include <plat/gpio-cfg.h>
-#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi0_cfg_gpio(void)
{
s3c_gpio_cfgall_range(S5PC100_GPB(0), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
@@ -31,14 +21,7 @@ int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
-struct s3c64xx_spi_info s3c64xx_spi1_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi1_cfg_gpio(void)
{
s3c_gpio_cfgall_range(S5PC100_GPB(4), 3,
S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
@@ -47,14 +30,7 @@ int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI2
-struct s3c64xx_spi_info s3c64xx_spi2_pdata __initdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 13,
- .high_speed = 1,
- .tx_st_done = 21,
-};
-
-int s3c64xx_spi2_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi2_cfg_gpio(void)
{
s3c_gpio_cfgpin(S5PC100_GPG3(0), S3C_GPIO_SFN(3));
s3c_gpio_setpull(S5PC100_GPG3(0), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-s5pv210/Kconfig b/arch/arm/mach-s5pv210/Kconfig
index 88e983b0c82e..77185c38188b 100644
--- a/arch/arm/mach-s5pv210/Kconfig
+++ b/arch/arm/mach-s5pv210/Kconfig
@@ -152,6 +152,7 @@ config MACH_SMDKV210
select S3C_DEV_I2C1
select S3C_DEV_I2C2
select S3C_DEV_RTC
+ select S3C_DEV_USB_HSOTG
select S3C_DEV_WDT
select S5P_DEV_FIMC0
select S5P_DEV_FIMC1
@@ -170,6 +171,7 @@ config MACH_SMDKV210
select S5PV210_SETUP_IDE
select S5PV210_SETUP_KEYPAD
select S5PV210_SETUP_SDHCI
+ select S5PV210_SETUP_USB_PHY
help
Machine support for Samsung SMDKV210
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c
index 09609d50961d..fcdf52dbcc49 100644
--- a/arch/arm/mach-s5pv210/clock.c
+++ b/arch/arm/mach-s5pv210/clock.c
@@ -445,19 +445,19 @@ static struct clk init_clocks_off[] = {
.ctrlbit = (1 << 11),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5pv210-spi.0",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<12),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5pv210-spi.1",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<13),
}, {
.name = "spi",
- .devname = "s3c64xx-spi.2",
+ .devname = "s5pv210-spi.2",
.parent = &clk_pclk_psys.clk,
.enable = s5pv210_clk_ip3_ctrl,
.ctrlbit = (1<<14),
@@ -1035,7 +1035,7 @@ static struct clksrc_clk clk_sclk_mmc3 = {
static struct clksrc_clk clk_sclk_spi0 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.0",
+ .devname = "s5pv210-spi.0",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 16),
},
@@ -1047,7 +1047,7 @@ static struct clksrc_clk clk_sclk_spi0 = {
static struct clksrc_clk clk_sclk_spi1 = {
.clk = {
.name = "sclk_spi",
- .devname = "s3c64xx-spi.1",
+ .devname = "s5pv210-spi.1",
.enable = s5pv210_clk_mask0_ctrl,
.ctrlbit = (1 << 17),
},
@@ -1331,8 +1331,8 @@ static struct clk_lookup s5pv210_clk_lookup[] = {
CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &clk_sclk_mmc2.clk),
CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &clk_sclk_mmc3.clk),
CLKDEV_INIT(NULL, "spi_busclk0", &clk_p),
- CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
- CLKDEV_INIT("s3c64xx-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
+ CLKDEV_INIT("s5pv210-spi.0", "spi_busclk1", &clk_sclk_spi0.clk),
+ CLKDEV_INIT("s5pv210-spi.1", "spi_busclk1", &clk_sclk_spi1.clk),
};
void __init s5pv210_register_clocks(void)
diff --git a/arch/arm/mach-s5pv210/include/mach/spi-clocks.h b/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
deleted file mode 100644
index 02acded5f73d..000000000000
--- a/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* linux/arch/arm/mach-s5pv210/include/mach/spi-clocks.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __S5PV210_PLAT_SPI_CLKS_H
-#define __S5PV210_PLAT_SPI_CLKS_H __FILE__
-
-#define S5PV210_SPI_SRCCLK_PCLK 0
-#define S5PV210_SPI_SRCCLK_SCLK 1
-
-#endif /* __S5PV210_PLAT_SPI_CLKS_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index af528f9e97f9..78028df86c5d 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -600,10 +600,17 @@ static void aquila_setup_sdhci(void)
s3c_sdhci2_set_platdata(&aquila_hsmmc2_data);
};
+/* Audio device */
+static struct platform_device aquila_device_audio = {
+ .name = "smdk-audio",
+ .id = -1,
+};
+
static struct platform_device *aquila_devices[] __initdata = {
&aquila_i2c_gpio_pmic,
&aquila_i2c_gpio5,
&aquila_device_gpiokeys,
+ &aquila_device_audio,
&s3c_device_fb,
&s5p_device_onenand,
&s3c_device_hsmmc0,
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index bf5087c2b7fe..822a55950685 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -859,12 +859,19 @@ static struct s5p_platform_fimc goni_fimc_md_platdata __initdata = {
.num_clients = ARRAY_SIZE(goni_camera_sensors),
};
+/* Audio device */
+static struct platform_device goni_device_audio = {
+ .name = "smdk-audio",
+ .id = -1,
+};
+
static struct platform_device *goni_devices[] __initdata = {
&s3c_device_fb,
&s5p_device_onenand,
&goni_spi_gpio,
&goni_i2c_gpio_pmic,
&goni_i2c_gpio5,
+ &goni_device_audio,
&mmc2_fixed_voltage,
&goni_device_gpiokeys,
&s5p_device_mfc,
@@ -901,7 +908,7 @@ static void __init goni_sound_init(void)
static void __init goni_map_io(void)
{
s5pv210_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(goni_uartcfgs, ARRAY_SIZE(goni_uartcfgs));
s5p_set_timer_source(S5P_PWM3, S5P_PWM4);
}
@@ -959,8 +966,6 @@ static void __init goni_machine_init(void)
/* KEYPAD */
samsung_keypad_set_platdata(&keypad_data);
- clk_xusbxti.rate = 24000000;
-
platform_add_devices(goni_devices, ARRAY_SIZE(goni_devices));
}
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index 0d7ddec88eb7..918b23d71fdf 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -19,6 +19,7 @@
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/pwm_backlight.h>
+#include <linux/platform_data/s3c-hsotg.h>
#include <asm/hardware/vic.h>
#include <asm/mach/arch.h>
@@ -47,6 +48,7 @@
#include <plat/backlight.h>
#include <plat/regs-fb-v4.h>
#include <plat/mfc.h>
+#include <plat/clock.h>
#include "common.h"
@@ -203,6 +205,9 @@ static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
.setup_gpio = s5pv210_fb_gpio_setup_24bpp,
};
+/* USB OTG */
+static struct s3c_hsotg_plat smdkv210_hsotg_pdata;
+
static struct platform_device *smdkv210_devices[] __initdata = {
&s3c_device_adc,
&s3c_device_cfcon,
@@ -216,6 +221,7 @@ static struct platform_device *smdkv210_devices[] __initdata = {
&s3c_device_i2c2,
&s3c_device_rtc,
&s3c_device_ts,
+ &s3c_device_usb_hsotg,
&s3c_device_wdt,
&s5p_device_fimc0,
&s5p_device_fimc1,
@@ -279,7 +285,7 @@ static struct platform_pwm_backlight_data smdkv210_bl_data = {
static void __init smdkv210_map_io(void)
{
s5pv210_init_io(NULL, 0);
- s3c24xx_init_clocks(24000000);
+ s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(smdkv210_uartcfgs, ARRAY_SIZE(smdkv210_uartcfgs));
s5p_set_timer_source(S5P_PWM2, S5P_PWM4);
}
@@ -314,6 +320,8 @@ static void __init smdkv210_machine_init(void)
samsung_bl_set(&smdkv210_bl_gpio_info, &smdkv210_bl_data);
+ s3c_hsotg_set_platdata(&smdkv210_hsotg_pdata);
+
platform_add_devices(smdkv210_devices, ARRAY_SIZE(smdkv210_devices));
}
diff --git a/arch/arm/mach-s5pv210/setup-spi.c b/arch/arm/mach-s5pv210/setup-spi.c
index f43c5048a37d..81aecc162f82 100644
--- a/arch/arm/mach-s5pv210/setup-spi.c
+++ b/arch/arm/mach-s5pv210/setup-spi.c
@@ -9,20 +9,10 @@
*/
#include <linux/gpio.h>
-#include <linux/platform_device.h>
-
#include <plat/gpio-cfg.h>
-#include <plat/s3c64xx-spi.h>
#ifdef CONFIG_S3C64XX_DEV_SPI0
-struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
- .fifo_lvl_mask = 0x1ff,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi0_cfg_gpio(void)
{
s3c_gpio_cfgpin(S5PV210_GPB(0), S3C_GPIO_SFN(2));
s3c_gpio_setpull(S5PV210_GPB(0), S3C_GPIO_PULL_UP);
@@ -33,14 +23,7 @@ int s3c64xx_spi0_cfg_gpio(struct platform_device *dev)
#endif
#ifdef CONFIG_S3C64XX_DEV_SPI1
-struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
- .fifo_lvl_mask = 0x7f,
- .rx_lvl_offset = 15,
- .high_speed = 1,
- .tx_st_done = 25,
-};
-
-int s3c64xx_spi1_cfg_gpio(struct platform_device *dev)
+int s3c64xx_spi1_cfg_gpio(void)
{
s3c_gpio_cfgpin(S5PV210_GPB(4), S3C_GPIO_SFN(2));
s3c_gpio_setpull(S5PV210_GPB(4), S3C_GPIO_PULL_UP);
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 9e37026ef9dd..9bd135531d76 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -779,6 +779,7 @@ DT_MACHINE_START(ARMADILLO800EVA_DT, "armadillo800eva")
.init_irq = r8a7740_init_irq,
.handle_irq = shmobile_handle_irq_intc,
.init_machine = eva_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = eva_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9d.c b/arch/arm/mach-shmobile/board-kzm9d.c
index 7bc5e7d39f9b..6a33cf393428 100644
--- a/arch/arm/mach-shmobile/board-kzm9d.c
+++ b/arch/arm/mach-shmobile/board-kzm9d.c
@@ -80,6 +80,7 @@ DT_MACHINE_START(KZM9D_DT, "kzm9d")
.init_irq = emev2_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kzm9d_add_standard_devices,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = kzm9d_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index d8e33b682832..c0ae815e7beb 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -455,6 +455,7 @@ DT_MACHINE_START(KZM9G_DT, "kzm9g")
.init_irq = sh73a0_init_irq,
.handle_irq = gic_handle_irq,
.init_machine = kzm_init,
+ .init_late = shmobile_init_late,
.timer = &shmobile_timer,
.dt_compat = kzm9g_boards_compat_dt,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index b577f7c44678..150122a44630 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1512,6 +1512,9 @@ static void __init mackerel_init(void)
gpio_request(GPIO_FN_SDHID0_1, NULL);
gpio_request(GPIO_FN_SDHID0_0, NULL);
+ /* SDHI0 PORT172 card-detect IRQ26 */
+ gpio_request(GPIO_FN_IRQ26_172, NULL);
+
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* enable SDHI1 */
gpio_request(GPIO_FN_SDHICMD1, NULL);
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index 472d1f5361e5..3946c4ba2aa8 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -475,9 +475,9 @@ static struct clk *late_main_clks[] = {
enum { MSTP001,
MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
- MSTP219,
+ MSTP219, MSTP218,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
- MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
+ MSTP331, MSTP329, MSTP325, MSTP323,
MSTP314, MSTP313, MSTP312, MSTP311,
MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
@@ -497,6 +497,7 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
[MSTP219] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 19, 0), /* SCIFA7 */
+ [MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* SY-DMAC */
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@@ -508,7 +509,6 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP329] = MSTP(&r_clk, SMSTPCR3, 29, 0), /* CMT10 */
[MSTP325] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR3, 25, 0), /* IrDA */
[MSTP323] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 23, 0), /* IIC1 */
- [MSTP318] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 18, 0), /* SY-DMAC */
[MSTP314] = MSTP(&div6_clks[DIV6_SDHI0], SMSTPCR3, 14, 0), /* SDHI0 */
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
@@ -552,6 +552,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
+ CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* SY-DMAC */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */
@@ -563,7 +564,6 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
CLKDEV_DEV_ID("sh_irda.0", &mstp_clks[MSTP325]), /* IrDA */
CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* I2C1 */
- CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP318]), /* SY-DMAC */
CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
diff --git a/arch/arm/mach-shmobile/intc-r8a7779.c b/arch/arm/mach-shmobile/intc-r8a7779.c
index 550b23df4fd4..f04fad4ec4fb 100644
--- a/arch/arm/mach-shmobile/intc-r8a7779.c
+++ b/arch/arm/mach-shmobile/intc-r8a7779.c
@@ -35,6 +35,9 @@
#define INT2SMSKCR3 0xfe7822ac
#define INT2SMSKCR4 0xfe7822b0
+#define INT2NTSR0 0xfe700060
+#define INT2NTSR1 0xfe700064
+
static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
{
return 0; /* always allow wakeup */
@@ -49,6 +52,10 @@ void __init r8a7779_init_irq(void)
gic_init(0, 29, gic_dist_base, gic_cpu_base);
gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+ /* route all interrupts to ARM */
+ __raw_writel(0xffffffff, INT2NTSR0);
+ __raw_writel(0x3fffffff, INT2NTSR1);
+
/* unmask all known interrupts in INTCS2 */
__raw_writel(0xfffffff0, INT2SMSKCR0);
__raw_writel(0xfff7ffff, INT2SMSKCR1);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index bacdd667e3b1..fde0d23121dc 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -22,10 +22,20 @@
#include <mach/common.h>
#include <mach/emev2.h>
+#ifdef CONFIG_ARCH_SH73A0
#define is_sh73a0() (machine_is_ag5evm() || machine_is_kota2() || \
of_machine_is_compatible("renesas,sh73a0"))
+#else
+#define is_sh73a0() (0)
+#endif
+
#define is_r8a7779() machine_is_marzen()
+
+#ifdef CONFIG_ARCH_EMEV2
#define is_emev2() of_machine_is_compatible("renesas,emev2")
+#else
+#define is_emev2() (0)
+#endif
static unsigned int __init shmobile_smp_get_core_count(void)
{
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index 6a4bd582c028..fafce9ce8218 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -484,7 +484,7 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
},
};
-#define SH7372_CHCLR 0x220
+#define SH7372_CHCLR (0x220 - 0x20)
static const struct sh_dmae_channel sh7372_dmae_channels[] = {
{
diff --git a/arch/arm/mach-socfpga/Makefile b/arch/arm/mach-socfpga/Makefile
new file mode 100644
index 000000000000..4fb93240971d
--- /dev/null
+++ b/arch/arm/mach-socfpga/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y := socfpga.o
diff --git a/arch/arm/mach-socfpga/Makefile.boot b/arch/arm/mach-socfpga/Makefile.boot
new file mode 100644
index 000000000000..dae9661a7689
--- /dev/null
+++ b/arch/arm/mach-socfpga/Makefile.boot
@@ -0,0 +1 @@
+zreladdr-y := 0x00008000
diff --git a/arch/arm/mach-socfpga/include/mach/debug-macro.S b/arch/arm/mach-socfpga/include/mach/debug-macro.S
new file mode 100644
index 000000000000..d6f26d23374f
--- /dev/null
+++ b/arch/arm/mach-socfpga/include/mach/debug-macro.S
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 1994-1999 Russell King
+ * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ .macro addruart, rp, rv, tmp
+ mov \rp, #DEBUG_LL_UART_OFFSET
+ orr \rp, \rp, #0x00c00000
+ orr \rv, \rp, #0xfe000000 @ virtual base
+ orr \rp, \rp, #0xff000000 @ physical base
+ .endm
+
diff --git a/arch/arm/mach-at91/include/mach/irqs.h b/arch/arm/mach-socfpga/include/mach/timex.h
index ac8b7dfc85ef..43df4354e461 100644
--- a/arch/arm/mach-at91/include/mach/irqs.h
+++ b/arch/arm/mach-socfpga/include/mach/timex.h
@@ -1,7 +1,5 @@
/*
- * arch/arm/mach-at91/include/mach/irqs.h
- *
- * Copyright (C) 2004 SAN People
+ * Copyright (C) 2003 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -18,31 +16,4 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef __ASM_ARCH_IRQS_H
-#define __ASM_ARCH_IRQS_H
-
-#include <linux/io.h>
-#include <mach/at91_aic.h>
-
-#define NR_AIC_IRQS 32
-
-
-/*
- * Acknowledge interrupt with AIC after interrupt has been handled.
- * (by kernel/irq.c)
- */
-#define irq_finish(irq) do { at91_aic_write(AT91_AIC_EOICR, 0); } while (0)
-
-
-/*
- * IRQ interrupt symbols are the AT91xxx_ID_* symbols
- * for IRQs handled directly through the AIC, or else the AT91_PIN_*
- * symbols in gpio.h for ones handled indirectly as GPIOs.
- * We make provision for 5 banks of GPIO.
- */
-#define NR_IRQS (NR_AIC_IRQS + (5 * 32))
-
-/* FIQ is AIC source 0. */
-#define FIQ_START AT91_ID_FIQ
-
-#endif
+#define CLOCK_TICK_RATE (50000000 / 16)
diff --git a/arch/arm/mach-socfpga/include/mach/uncompress.h b/arch/arm/mach-socfpga/include/mach/uncompress.h
new file mode 100644
index 000000000000..bbe20e696325
--- /dev/null
+++ b/arch/arm/mach-socfpga/include/mach/uncompress.h
@@ -0,0 +1,9 @@
+#ifndef __MACH_UNCOMPRESS_H
+#define __MACH_UNCOMPRESS_H
+
+#define putc(c)
+#define flush()
+#define arch_decomp_setup()
+#define arch_decomp_wdog()
+
+#endif
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
new file mode 100644
index 000000000000..f01e1ebf5396
--- /dev/null
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2012 Altera Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/dw_apb_timer.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/gic.h>
+#include <asm/mach/arch.h>
+
+extern void socfpga_init_clocks(void);
+
+const static struct of_device_id irq_match[] = {
+ { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
+ {}
+};
+
+static void __init gic_init_irq(void)
+{
+ of_irq_init(irq_match);
+}
+
+static void socfpga_cyclone5_restart(char mode, const char *cmd)
+{
+ /* TODO: */
+}
+
+static void __init socfpga_cyclone5_init(void)
+{
+ l2x0_of_init(0, ~0UL);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ socfpga_init_clocks();
+}
+
+static const char *altera_dt_match[] = {
+ "altr,socfpga",
+ "altr,socfpga-cyclone5",
+ NULL
+};
+
+DT_MACHINE_START(SOCFPGA, "Altera SOCFPGA")
+ .init_irq = gic_init_irq,
+ .handle_irq = gic_handle_irq,
+ .timer = &dw_apb_timer,
+ .init_machine = socfpga_cyclone5_init,
+ .restart = socfpga_cyclone5_restart,
+ .dt_compat = altera_dt_match,
+MACHINE_END
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 0f41bd1c47c3..66db5f13af84 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -87,7 +87,7 @@ void __init spear3xx_map_io(void)
static void __init spear3xx_timer_init(void)
{
- char pclk_name[] = "pll3_48m_clk";
+ char pclk_name[] = "pll3_clk";
struct clk *gpt_clk, *pclk;
spear3xx_clk_init();
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index 2e2e3596583e..9af67d003c62 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -423,7 +423,7 @@ void __init spear6xx_map_io(void)
static void __init spear6xx_timer_init(void)
{
- char pclk_name[] = "pll3_48m_clk";
+ char pclk_name[] = "pll3_clk";
struct clk *gpt_clk, *pclk;
spear6xx_clk_init();
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index 6a113a9bb87a..9077aaa398d9 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -63,40 +63,15 @@ comment "Tegra board type"
config MACH_HARMONY
bool "Harmony board"
depends on ARCH_TEGRA_2x_SOC
- select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
help
Support for nVidia Harmony development platform
-config MACH_KAEN
- bool "Kaen board"
- depends on ARCH_TEGRA_2x_SOC
- select MACH_SEABOARD
- select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
- help
- Support for the Kaen version of Seaboard
-
config MACH_PAZ00
bool "Paz00 board"
depends on ARCH_TEGRA_2x_SOC
help
Support for the Toshiba AC100/Dynabook AZ netbook
-config MACH_SEABOARD
- bool "Seaboard board"
- depends on ARCH_TEGRA_2x_SOC
- select MACH_HAS_SND_SOC_TEGRA_WM8903 if SND_SOC
- help
- Support for nVidia Seaboard development platform. It will
- also be included for some of the derivative boards that
- have large similarities with the seaboard design.
-
-config MACH_TEGRA_DT
- bool "Generic Tegra20 board (FDT support)"
- depends on ARCH_TEGRA_2x_SOC
- select USE_OF
- help
- Support for generic NVIDIA Tegra20 boards using Flattened Device Tree
-
config MACH_TRIMSLICE
bool "TrimSlice board"
depends on ARCH_TEGRA_2x_SOC
@@ -104,20 +79,6 @@ config MACH_TRIMSLICE
help
Support for CompuLab TrimSlice platform
-config MACH_WARIO
- bool "Wario board"
- depends on ARCH_TEGRA_2x_SOC
- select MACH_SEABOARD
- help
- Support for the Wario version of Seaboard
-
-config MACH_VENTANA
- bool "Ventana board"
- depends on ARCH_TEGRA_2x_SOC
- select MACH_TEGRA_DT
- help
- Support for the nVidia Ventana development platform
-
choice
prompt "Default low-level debug console UART"
default TEGRA_DEBUG_UART_NONE
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 2eb4445ddb14..c3d7303b9ac8 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -8,21 +8,24 @@ obj-y += timer.o
obj-y += fuse.o
obj-y += pmc.o
obj-y += flowctrl.o
+obj-y += powergate.o
+obj-y += apbio.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_CPU_IDLE) += sleep.o
-obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += powergate.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_clocks.o
obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra2_emc.o
-obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += board-dt-tegra30.o
obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30_clocks.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_SMP) += reset.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
-obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o apbio.o
+obj-$(CONFIG_TEGRA_SYSTEM_DMA) += dma.o
obj-$(CONFIG_CPU_FREQ) += cpu-tegra.o
obj-$(CONFIG_TEGRA_PCI) += pcie.o
obj-$(CONFIG_USB_SUPPORT) += usb_phy.o
+obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += board-dt-tegra20.o
+obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += board-dt-tegra30.o
+
obj-$(CONFIG_MACH_HARMONY) += board-harmony.o
obj-$(CONFIG_MACH_HARMONY) += board-harmony-pinmux.o
obj-$(CONFIG_MACH_HARMONY) += board-harmony-pcie.o
@@ -31,14 +34,5 @@ obj-$(CONFIG_MACH_HARMONY) += board-harmony-power.o
obj-$(CONFIG_MACH_PAZ00) += board-paz00.o
obj-$(CONFIG_MACH_PAZ00) += board-paz00-pinmux.o
-obj-$(CONFIG_MACH_SEABOARD) += board-seaboard.o
-obj-$(CONFIG_MACH_SEABOARD) += board-seaboard-pinmux.o
-
-obj-$(CONFIG_MACH_TEGRA_DT) += board-dt-tegra20.o
-obj-$(CONFIG_MACH_TEGRA_DT) += board-harmony-pinmux.o
-obj-$(CONFIG_MACH_TEGRA_DT) += board-seaboard-pinmux.o
-obj-$(CONFIG_MACH_TEGRA_DT) += board-paz00-pinmux.o
-obj-$(CONFIG_MACH_TEGRA_DT) += board-trimslice-pinmux.o
-
obj-$(CONFIG_MACH_TRIMSLICE) += board-trimslice.o
obj-$(CONFIG_MACH_TRIMSLICE) += board-trimslice-pinmux.o
diff --git a/arch/arm/mach-tegra/Makefile.boot b/arch/arm/mach-tegra/Makefile.boot
index 9a82094092d7..7a1bb62ddcf0 100644
--- a/arch/arm/mach-tegra/Makefile.boot
+++ b/arch/arm/mach-tegra/Makefile.boot
@@ -2,9 +2,10 @@ zreladdr-$(CONFIG_ARCH_TEGRA_2x_SOC) += 0x00008000
params_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00000100
initrd_phys-$(CONFIG_ARCH_TEGRA_2x_SOC) := 0x00800000
-dtb-$(CONFIG_MACH_HARMONY) += tegra-harmony.dtb
-dtb-$(CONFIG_MACH_PAZ00) += tegra-paz00.dtb
-dtb-$(CONFIG_MACH_SEABOARD) += tegra-seaboard.dtb
-dtb-$(CONFIG_MACH_TRIMSLICE) += tegra-trimslice.dtb
-dtb-$(CONFIG_MACH_VENTANA) += tegra-ventana.dtb
-dtb-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra-cardhu.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-harmony.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-paz00.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-seaboard.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-trimslice.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-ventana.dtb
+dtb-$(CONFIG_ARCH_TEGRA_2x_SOC) += tegra20-whistler.dtb
+dtb-$(CONFIG_ARCH_TEGRA_3x_SOC) += tegra30-cardhu.dtb
diff --git a/arch/arm/mach-tegra/apbio.c b/arch/arm/mach-tegra/apbio.c
index e75451e517bd..dc0fe389be56 100644
--- a/arch/arm/mach-tegra/apbio.c
+++ b/arch/arm/mach-tegra/apbio.c
@@ -15,6 +15,9 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <mach/iomap.h>
+#include <linux/of.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
@@ -22,17 +25,21 @@
#include <linux/mutex.h>
#include <mach/dma.h>
-#include <mach/iomap.h>
#include "apbio.h"
+#if defined(CONFIG_TEGRA_SYSTEM_DMA) || defined(CONFIG_TEGRA20_APB_DMA)
static DEFINE_MUTEX(tegra_apb_dma_lock);
-
-static struct tegra_dma_channel *tegra_apb_dma;
static u32 *tegra_apb_bb;
static dma_addr_t tegra_apb_bb_phys;
static DECLARE_COMPLETION(tegra_apb_wait);
+static u32 tegra_apb_readl_direct(unsigned long offset);
+static void tegra_apb_writel_direct(u32 value, unsigned long offset);
+
+#if defined(CONFIG_TEGRA_SYSTEM_DMA)
+static struct tegra_dma_channel *tegra_apb_dma;
+
bool tegra_apb_init(void)
{
struct tegra_dma_channel *ch;
@@ -72,13 +79,13 @@ static void apb_dma_complete(struct tegra_dma_req *req)
complete(&tegra_apb_wait);
}
-u32 tegra_apb_readl(unsigned long offset)
+static u32 tegra_apb_readl_using_dma(unsigned long offset)
{
struct tegra_dma_req req;
int ret;
if (!tegra_apb_dma && !tegra_apb_init())
- return readl(IO_TO_VIRT(offset));
+ return tegra_apb_readl_direct(offset);
mutex_lock(&tegra_apb_dma_lock);
req.complete = apb_dma_complete;
@@ -108,13 +115,13 @@ u32 tegra_apb_readl(unsigned long offset)
return *((u32 *)tegra_apb_bb);
}
-void tegra_apb_writel(u32 value, unsigned long offset)
+static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
{
struct tegra_dma_req req;
int ret;
if (!tegra_apb_dma && !tegra_apb_init()) {
- writel(value, IO_TO_VIRT(offset));
+ tegra_apb_writel_direct(value, offset);
return;
}
@@ -143,3 +150,176 @@ void tegra_apb_writel(u32 value, unsigned long offset)
mutex_unlock(&tegra_apb_dma_lock);
}
+
+#else
+static struct dma_chan *tegra_apb_dma_chan;
+static struct dma_slave_config dma_sconfig;
+
+bool tegra_apb_dma_init(void)
+{
+ dma_cap_mask_t mask;
+
+ mutex_lock(&tegra_apb_dma_lock);
+
+ /* Check to see if we raced to setup */
+ if (tegra_apb_dma_chan)
+ goto skip_init;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ tegra_apb_dma_chan = dma_request_channel(mask, NULL, NULL);
+ if (!tegra_apb_dma_chan) {
+ /*
+ * This is common until the device is probed, so don't
+ * shout about it.
+ */
+ pr_debug("%s: can not allocate dma channel\n", __func__);
+ goto err_dma_alloc;
+ }
+
+ tegra_apb_bb = dma_alloc_coherent(NULL, sizeof(u32),
+ &tegra_apb_bb_phys, GFP_KERNEL);
+ if (!tegra_apb_bb) {
+ pr_err("%s: can not allocate bounce buffer\n", __func__);
+ goto err_buff_alloc;
+ }
+
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.slave_id = TEGRA_DMA_REQ_SEL_CNTR;
+ dma_sconfig.src_maxburst = 1;
+ dma_sconfig.dst_maxburst = 1;
+
+skip_init:
+ mutex_unlock(&tegra_apb_dma_lock);
+ return true;
+
+err_buff_alloc:
+ dma_release_channel(tegra_apb_dma_chan);
+ tegra_apb_dma_chan = NULL;
+
+err_dma_alloc:
+ mutex_unlock(&tegra_apb_dma_lock);
+ return false;
+}
+
+static void apb_dma_complete(void *args)
+{
+ complete(&tegra_apb_wait);
+}
+
+static int do_dma_transfer(unsigned long apb_add,
+ enum dma_transfer_direction dir)
+{
+ struct dma_async_tx_descriptor *dma_desc;
+ int ret;
+
+ if (dir == DMA_DEV_TO_MEM)
+ dma_sconfig.src_addr = apb_add;
+ else
+ dma_sconfig.dst_addr = apb_add;
+
+ ret = dmaengine_slave_config(tegra_apb_dma_chan, &dma_sconfig);
+ if (ret)
+ return ret;
+
+ dma_desc = dmaengine_prep_slave_single(tegra_apb_dma_chan,
+ tegra_apb_bb_phys, sizeof(u32), dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return -EINVAL;
+
+ dma_desc->callback = apb_dma_complete;
+ dma_desc->callback_param = NULL;
+
+ INIT_COMPLETION(tegra_apb_wait);
+
+ dmaengine_submit(dma_desc);
+ dma_async_issue_pending(tegra_apb_dma_chan);
+ ret = wait_for_completion_timeout(&tegra_apb_wait,
+ msecs_to_jiffies(50));
+
+ if (WARN(ret == 0, "apb read dma timed out")) {
+ dmaengine_terminate_all(tegra_apb_dma_chan);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static u32 tegra_apb_readl_using_dma(unsigned long offset)
+{
+ int ret;
+
+ if (!tegra_apb_dma_chan && !tegra_apb_dma_init())
+ return tegra_apb_readl_direct(offset);
+
+ mutex_lock(&tegra_apb_dma_lock);
+ ret = do_dma_transfer(offset, DMA_DEV_TO_MEM);
+ if (ret < 0) {
+ pr_err("error in reading offset 0x%08lx using dma\n", offset);
+ *(u32 *)tegra_apb_bb = 0;
+ }
+ mutex_unlock(&tegra_apb_dma_lock);
+ return *((u32 *)tegra_apb_bb);
+}
+
+static void tegra_apb_writel_using_dma(u32 value, unsigned long offset)
+{
+ int ret;
+
+ if (!tegra_apb_dma_chan && !tegra_apb_dma_init()) {
+ tegra_apb_writel_direct(value, offset);
+ return;
+ }
+
+ mutex_lock(&tegra_apb_dma_lock);
+ *((u32 *)tegra_apb_bb) = value;
+ ret = do_dma_transfer(offset, DMA_MEM_TO_DEV);
+ if (ret < 0)
+ pr_err("error in writing offset 0x%08lx using dma\n", offset);
+ mutex_unlock(&tegra_apb_dma_lock);
+}
+#endif
+#else
+#define tegra_apb_readl_using_dma tegra_apb_readl_direct
+#define tegra_apb_writel_using_dma tegra_apb_writel_direct
+#endif
+
+typedef u32 (*apbio_read_fptr)(unsigned long offset);
+typedef void (*apbio_write_fptr)(u32 value, unsigned long offset);
+
+static apbio_read_fptr apbio_read;
+static apbio_write_fptr apbio_write;
+
+static u32 tegra_apb_readl_direct(unsigned long offset)
+{
+ return readl(IO_TO_VIRT(offset));
+}
+
+static void tegra_apb_writel_direct(u32 value, unsigned long offset)
+{
+ writel(value, IO_TO_VIRT(offset));
+}
+
+void tegra_apb_io_init(void)
+{
+ /* Need to use dma only when it is Tegra20 based platform */
+ if (of_machine_is_compatible("nvidia,tegra20") ||
+ !of_have_populated_dt()) {
+ apbio_read = tegra_apb_readl_using_dma;
+ apbio_write = tegra_apb_writel_using_dma;
+ } else {
+ apbio_read = tegra_apb_readl_direct;
+ apbio_write = tegra_apb_writel_direct;
+ }
+}
+
+u32 tegra_apb_readl(unsigned long offset)
+{
+ return apbio_read(offset);
+}
+
+void tegra_apb_writel(u32 value, unsigned long offset)
+{
+ apbio_write(value, offset);
+}
diff --git a/arch/arm/mach-tegra/apbio.h b/arch/arm/mach-tegra/apbio.h
index 8b49e8c89a64..f05d71c303c7 100644
--- a/arch/arm/mach-tegra/apbio.h
+++ b/arch/arm/mach-tegra/apbio.h
@@ -16,24 +16,7 @@
#ifndef __MACH_TEGRA_APBIO_H
#define __MACH_TEGRA_APBIO_H
-#ifdef CONFIG_TEGRA_SYSTEM_DMA
-
+void tegra_apb_io_init(void);
u32 tegra_apb_readl(unsigned long offset);
void tegra_apb_writel(u32 value, unsigned long offset);
-
-#else
-#include <asm/io.h>
-#include <mach/io.h>
-
-static inline u32 tegra_apb_readl(unsigned long offset)
-{
- return readl(IO_TO_VIRT(offset));
-}
-
-static inline void tegra_apb_writel(u32 value, unsigned long offset)
-{
- writel(value, IO_TO_VIRT(offset));
-}
-#endif
-
#endif
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index eb7249db50a5..d0de9c1192f7 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -64,6 +64,7 @@ struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
&tegra_ehci2_pdata),
OF_DEV_AUXDATA("nvidia,tegra20-ehci", TEGRA_USB3_BASE, "tegra-ehci.2",
&tegra_ehci3_pdata),
+ OF_DEV_AUXDATA("nvidia,tegra20-apbdma", 0x6000a000, "tegra-apbdma", NULL),
{}
};
@@ -81,11 +82,6 @@ static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
{ NULL, NULL, 0, 0},
};
-static struct of_device_id tegra_dt_match_table[] __initdata = {
- { .compatible = "simple-bus", },
- {}
-};
-
static void __init tegra_dt_init(void)
{
tegra_clk_init_from_table(tegra_dt_clk_init_table);
@@ -94,10 +90,74 @@ static void __init tegra_dt_init(void)
* Finished with the static registrations now; fill in the missing
* devices
*/
- of_platform_populate(NULL, tegra_dt_match_table,
+ of_platform_populate(NULL, of_default_bus_match_table,
tegra20_auxdata_lookup, NULL);
}
+#ifdef CONFIG_MACH_TRIMSLICE
+static void __init trimslice_init(void)
+{
+ int ret;
+
+ ret = tegra_pcie_init(true, true);
+ if (ret)
+ pr_err("tegra_pci_init() failed: %d\n", ret);
+}
+#endif
+
+#ifdef CONFIG_MACH_HARMONY
+static void __init harmony_init(void)
+{
+ int ret;
+
+ ret = harmony_regulator_init();
+ if (ret) {
+ pr_err("harmony_regulator_init() failed: %d\n", ret);
+ return;
+ }
+
+ ret = harmony_pcie_init();
+ if (ret)
+ pr_err("harmony_pcie_init() failed: %d\n", ret);
+}
+#endif
+
+#ifdef CONFIG_MACH_PAZ00
+static void __init paz00_init(void)
+{
+ tegra_paz00_wifikill_init();
+}
+#endif
+
+static struct {
+ char *machine;
+ void (*init)(void);
+} board_init_funcs[] = {
+#ifdef CONFIG_MACH_TRIMSLICE
+ { "compulab,trimslice", trimslice_init },
+#endif
+#ifdef CONFIG_MACH_HARMONY
+ { "nvidia,harmony", harmony_init },
+#endif
+#ifdef CONFIG_MACH_PAZ00
+ { "compal,paz00", paz00_init },
+#endif
+};
+
+static void __init tegra_dt_init_late(void)
+{
+ int i;
+
+ tegra_init_late();
+
+ for (i = 0; i < ARRAY_SIZE(board_init_funcs); i++) {
+ if (of_machine_is_compatible(board_init_funcs[i].machine)) {
+ board_init_funcs[i].init();
+ break;
+ }
+ }
+}
+
static const char *tegra20_dt_board_compat[] = {
"nvidia,tegra20",
NULL
@@ -110,7 +170,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
.handle_irq = gic_handle_irq,
.timer = &tegra_timer,
.init_machine = tegra_dt_init,
- .init_late = tegra_init_late,
+ .init_late = tegra_dt_init_late,
.restart = tegra_assert_system_reset,
.dt_compat = tegra20_dt_board_compat,
MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index 4f76fa7a5da3..ee48214bfd89 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -36,11 +36,6 @@
#include "board.h"
#include "clock.h"
-static struct of_device_id tegra_dt_match_table[] __initdata = {
- { .compatible = "simple-bus", },
- {}
-};
-
struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-sdhci", 0x78000000, "sdhci-tegra.0", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-sdhci", 0x78000200, "sdhci-tegra.1", NULL),
@@ -52,6 +47,7 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
+ OF_DEV_AUXDATA("nvidia,tegra30-apbdma", 0x6000a000, "tegra-apbdma", NULL),
{}
};
@@ -74,7 +70,7 @@ static void __init tegra30_dt_init(void)
{
tegra_clk_init_from_table(tegra_dt_clk_init_table);
- of_platform_populate(NULL, tegra_dt_match_table,
+ of_platform_populate(NULL, of_default_bus_match_table,
tegra30_auxdata_lookup, NULL);
}
diff --git a/arch/arm/mach-tegra/board-harmony-pcie.c b/arch/arm/mach-tegra/board-harmony-pcie.c
index 33c4fedab840..e8c3fda9bec2 100644
--- a/arch/arm/mach-tegra/board-harmony-pcie.c
+++ b/arch/arm/mach-tegra/board-harmony-pcie.c
@@ -27,14 +27,11 @@
#ifdef CONFIG_TEGRA_PCI
-static int __init harmony_pcie_init(void)
+int __init harmony_pcie_init(void)
{
struct regulator *regulator = NULL;
int err;
- if (!machine_is_harmony())
- return 0;
-
err = gpio_request(TEGRA_GPIO_EN_VDD_1V05_GPIO, "EN_VDD_1V05");
if (err)
return err;
@@ -62,7 +59,15 @@ err_reg:
return err;
}
+static int __init harmony_pcie_initcall(void)
+{
+ if (!machine_is_harmony())
+ return 0;
+
+ return harmony_pcie_init();
+}
+
/* PCI should be initialized after I2C, mfd and regulators */
-subsys_initcall_sync(harmony_pcie_init);
+subsys_initcall_sync(harmony_pcie_initcall);
#endif
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index 82f32300796c..44dcb2e869b5 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -20,6 +20,10 @@
#include <linux/gpio.h>
#include <linux/regulator/machine.h>
#include <linux/mfd/tps6586x.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
+
+#include <asm/mach-types.h>
#include <mach/irqs.h>
@@ -110,7 +114,26 @@ static struct i2c_board_info __initdata harmony_regulators[] = {
int __init harmony_regulator_init(void)
{
- i2c_register_board_info(3, harmony_regulators, 1);
+ if (machine_is_harmony()) {
+ i2c_register_board_info(3, harmony_regulators, 1);
+ } else { /* Harmony, booted using device tree */
+ struct device_node *np;
+ struct i2c_adapter *adapter;
+
+ np = of_find_node_by_path("/i2c@7000d000");
+ if (np == NULL) {
+ pr_err("Could not find device_node for DVC I2C\n");
+ return -ENODEV;
+ }
+
+ adapter = of_find_i2c_adapter_by_node(np);
+ if (!adapter) {
+ pr_err("Could not find i2c_adapter for DVC I2C\n");
+ return -ENODEV;
+ }
+
+ i2c_new_device(adapter, harmony_regulators);
+ }
return 0;
}
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index bbc1907e98a6..4b64af5cab27 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -148,7 +148,6 @@ static struct platform_device *paz00_devices[] __initdata = {
&debug_uart,
&tegra_sdhci_device4,
&tegra_sdhci_device1,
- &wifi_rfkill_device,
&leds_gpio,
&gpio_keys_device,
};
@@ -201,6 +200,11 @@ static struct tegra_sdhci_platform_data sdhci_pdata4 = {
.is_8bit = 1,
};
+void __init tegra_paz00_wifikill_init(void)
+{
+ platform_device_register(&wifi_rfkill_device);
+}
+
static void __init tegra_paz00_init(void)
{
tegra_clk_init_from_table(paz00_clk_init_table);
@@ -211,6 +215,7 @@ static void __init tegra_paz00_init(void)
tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
platform_add_devices(paz00_devices, ARRAY_SIZE(paz00_devices));
+ tegra_paz00_wifikill_init();
paz00_i2c_init();
paz00_usb_init();
diff --git a/arch/arm/mach-tegra/board-seaboard-pinmux.c b/arch/arm/mach-tegra/board-seaboard-pinmux.c
deleted file mode 100644
index 11fc8a568c64..000000000000
--- a/arch/arm/mach-tegra/board-seaboard-pinmux.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Copyright (C) 2010-2012 NVIDIA Corporation
- * Copyright (C) 2011 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-
-#include "board-seaboard.h"
-#include "board-pinmux.h"
-
-static unsigned long seaboard_pincfg_drive_sdio1[] = {
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_HIGH_SPEED_MODE, 0),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_SCHMITT, 0),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_LOW_POWER_MODE, 3),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_DRIVE_DOWN_STRENGTH, 31),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_DRIVE_UP_STRENGTH, 31),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING, 3),
- TEGRA_PINCONF_PACK(TEGRA_PINCONF_PARAM_SLEW_RATE_RISING, 3),
-};
-
-static struct pinctrl_map common_map[] = {
- TEGRA_MAP_MUXCONF("ata", "ide", none, driven),
- TEGRA_MAP_MUXCONF("atb", "sdio4", none, driven),
- TEGRA_MAP_MUXCONF("atc", "nand", none, driven),
- TEGRA_MAP_MUXCONF("atd", "gmi", none, driven),
- TEGRA_MAP_MUXCONF("ate", "gmi", none, tristate),
- TEGRA_MAP_MUXCONF("cdev1", "plla_out", none, driven),
- TEGRA_MAP_MUXCONF("cdev2", "pllp_out4", none, driven),
- TEGRA_MAP_MUXCONF("crtp", "crt", up, tristate),
- TEGRA_MAP_MUXCONF("csus", "vi_sensor_clk", none, tristate),
- TEGRA_MAP_MUXCONF("dap1", "dap1", none, driven),
- TEGRA_MAP_MUXCONF("dap2", "dap2", none, driven),
- TEGRA_MAP_MUXCONF("dap3", "dap3", none, tristate),
- TEGRA_MAP_MUXCONF("dap4", "dap4", none, driven),
- TEGRA_MAP_MUXCONF("dta", "vi", down, driven),
- TEGRA_MAP_MUXCONF("dtb", "vi", down, driven),
- TEGRA_MAP_MUXCONF("dtc", "vi", down, driven),
- TEGRA_MAP_MUXCONF("dtd", "vi", down, driven),
- TEGRA_MAP_MUXCONF("dte", "vi", down, tristate),
- TEGRA_MAP_MUXCONF("dtf", "i2c3", none, driven),
- TEGRA_MAP_MUXCONF("gma", "sdio4", none, driven),
- TEGRA_MAP_MUXCONF("gmb", "gmi", up, tristate),
- TEGRA_MAP_MUXCONF("gmc", "uartd", none, driven),
- TEGRA_MAP_MUXCONF("gme", "sdio4", none, driven),
- TEGRA_MAP_MUXCONF("gpu", "pwm", none, driven),
- TEGRA_MAP_MUXCONF("gpu7", "rtck", none, driven),
- TEGRA_MAP_MUXCONF("gpv", "pcie", none, tristate),
- TEGRA_MAP_MUXCONF("hdint", "hdmi", na, tristate),
- TEGRA_MAP_MUXCONF("i2cp", "i2cp", none, driven),
- TEGRA_MAP_MUXCONF("irrx", "uartb", none, driven),
- TEGRA_MAP_MUXCONF("irtx", "uartb", none, driven),
- TEGRA_MAP_MUXCONF("kbca", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("kbcb", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("kbcc", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("kbcd", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("kbce", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("kbcf", "kbc", up, driven),
- TEGRA_MAP_MUXCONF("lcsn", "rsvd4", na, tristate),
- TEGRA_MAP_MUXCONF("ld0", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld1", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld10", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld11", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld12", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld13", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld14", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld15", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld16", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld17", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld2", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld3", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld4", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld5", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld6", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld7", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld8", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ld9", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("ldc", "rsvd4", na, tristate),
- TEGRA_MAP_MUXCONF("ldi", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lhp0", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lhp1", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lhp2", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lhs", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lm0", "rsvd4", na, driven),
- TEGRA_MAP_MUXCONF("lm1", "crt", na, tristate),
- TEGRA_MAP_MUXCONF("lpp", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lpw1", "rsvd4", na, tristate),
- TEGRA_MAP_MUXCONF("lsc0", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lsdi", "rsvd4", na, tristate),
- TEGRA_MAP_MUXCONF("lspi", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lvp0", "rsvd4", na, tristate),
- TEGRA_MAP_MUXCONF("lvp1", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lvs", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("owc", "rsvd2", none, tristate),
- TEGRA_MAP_MUXCONF("pmc", "pwr_on", na, driven),
- TEGRA_MAP_MUXCONF("pta", "hdmi", none, driven),
- TEGRA_MAP_MUXCONF("rm", "i2c1", none, driven),
- TEGRA_MAP_MUXCONF("sdb", "sdio3", na, driven),
- TEGRA_MAP_MUXCONF("sdc", "sdio3", none, driven),
- TEGRA_MAP_MUXCONF("sdd", "sdio3", none, driven),
- TEGRA_MAP_MUXCONF("sdio1", "sdio1", up, driven),
- TEGRA_MAP_MUXCONF("slxa", "pcie", up, tristate),
- TEGRA_MAP_MUXCONF("slxd", "spdif", none, driven),
- TEGRA_MAP_MUXCONF("slxk", "pcie", none, driven),
- TEGRA_MAP_MUXCONF("spdi", "rsvd2", none, driven),
- TEGRA_MAP_MUXCONF("spdo", "rsvd2", none, driven),
- TEGRA_MAP_MUXCONF("spib", "gmi", none, tristate),
- TEGRA_MAP_MUXCONF("spid", "spi1", none, tristate),
- TEGRA_MAP_MUXCONF("spie", "spi1", none, tristate),
- TEGRA_MAP_MUXCONF("spif", "spi1", down, tristate),
- TEGRA_MAP_MUXCONF("spih", "spi2_alt", up, tristate),
- TEGRA_MAP_MUXCONF("uaa", "ulpi", up, driven),
- TEGRA_MAP_MUXCONF("uab", "ulpi", up, driven),
- TEGRA_MAP_MUXCONF("uac", "rsvd2", none, driven),
- TEGRA_MAP_MUXCONF("uad", "irda", none, driven),
- TEGRA_MAP_MUXCONF("uca", "uartc", none, driven),
- TEGRA_MAP_MUXCONF("ucb", "uartc", none, driven),
- TEGRA_MAP_MUXCONF("uda", "ulpi", none, driven),
- TEGRA_MAP_CONF("ck32", none, na),
- TEGRA_MAP_CONF("ddrc", none, na),
- TEGRA_MAP_CONF("pmca", none, na),
- TEGRA_MAP_CONF("pmcb", none, na),
- TEGRA_MAP_CONF("pmcc", none, na),
- TEGRA_MAP_CONF("pmcd", none, na),
- TEGRA_MAP_CONF("pmce", none, na),
- TEGRA_MAP_CONF("xm2c", none, na),
- TEGRA_MAP_CONF("xm2d", none, na),
- TEGRA_MAP_CONF("ls", up, na),
- TEGRA_MAP_CONF("lc", up, na),
- TEGRA_MAP_CONF("ld17_0", down, na),
- TEGRA_MAP_CONF("ld19_18", down, na),
- TEGRA_MAP_CONF("ld21_20", down, na),
- TEGRA_MAP_CONF("ld23_22", down, na),
-};
-
-static struct pinctrl_map seaboard_map[] = {
- TEGRA_MAP_MUXCONF("ddc", "rsvd2", none, tristate),
- TEGRA_MAP_MUXCONF("gmd", "sflash", none, driven),
- TEGRA_MAP_MUXCONF("lpw0", "hdmi", na, driven),
- TEGRA_MAP_MUXCONF("lpw2", "hdmi", na, driven),
- TEGRA_MAP_MUXCONF("lsc1", "hdmi", na, tristate),
- TEGRA_MAP_MUXCONF("lsck", "hdmi", na, tristate),
- TEGRA_MAP_MUXCONF("lsda", "hdmi", na, tristate),
- TEGRA_MAP_MUXCONF("slxc", "spdif", none, tristate),
- TEGRA_MAP_MUXCONF("spia", "gmi", up, tristate),
- TEGRA_MAP_MUXCONF("spic", "gmi", up, driven),
- TEGRA_MAP_MUXCONF("spig", "spi2_alt", up, tristate),
- PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(PINMUX_DEV, "drive_sdio1", seaboard_pincfg_drive_sdio1),
-};
-
-static struct pinctrl_map ventana_map[] = {
- TEGRA_MAP_MUXCONF("ddc", "rsvd2", none, driven),
- TEGRA_MAP_MUXCONF("gmd", "sflash", none, tristate),
- TEGRA_MAP_MUXCONF("lpw0", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lpw2", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lsc1", "displaya", na, driven),
- TEGRA_MAP_MUXCONF("lsck", "displaya", na, tristate),
- TEGRA_MAP_MUXCONF("lsda", "displaya", na, tristate),
- TEGRA_MAP_MUXCONF("slxc", "sdio3", none, driven),
- TEGRA_MAP_MUXCONF("spia", "gmi", none, tristate),
- TEGRA_MAP_MUXCONF("spic", "gmi", none, tristate),
- TEGRA_MAP_MUXCONF("spig", "spi2_alt", none, tristate),
-};
-
-static struct tegra_board_pinmux_conf common_conf = {
- .maps = common_map,
- .map_count = ARRAY_SIZE(common_map),
-};
-
-static struct tegra_board_pinmux_conf seaboard_conf = {
- .maps = seaboard_map,
- .map_count = ARRAY_SIZE(seaboard_map),
-};
-
-static struct tegra_board_pinmux_conf ventana_conf = {
- .maps = ventana_map,
- .map_count = ARRAY_SIZE(ventana_map),
-};
-
-void seaboard_pinmux_init(void)
-{
- tegra_board_pinmux_init(&common_conf, &seaboard_conf);
-}
-
-void ventana_pinmux_init(void)
-{
- tegra_board_pinmux_init(&common_conf, &ventana_conf);
-}
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
deleted file mode 100644
index 71e9f3fc7fba..000000000000
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright (c) 2010, 2011 NVIDIA Corporation.
- * Copyright (C) 2010, 2011 Google, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/serial_8250.h>
-#include <linux/of_serial.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/input.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/gpio_keys.h>
-#include <linux/platform_data/tegra_usb.h>
-
-#include <sound/wm8903.h>
-
-#include <mach/iomap.h>
-#include <mach/irqs.h>
-#include <mach/sdhci.h>
-#include <mach/tegra_wm8903_pdata.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <asm/hardware/gic.h>
-
-#include "board.h"
-#include "board-seaboard.h"
-#include "clock.h"
-#include "devices.h"
-#include "gpio-names.h"
-
-static struct plat_serial8250_port debug_uart_platform_data[] = {
- {
- /* Memory and IRQ filled in before registration */
- .flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE,
- .type = PORT_TEGRA,
- .handle_break = tegra_serial_handle_break,
- .iotype = UPIO_MEM,
- .regshift = 2,
- .uartclk = 216000000,
- }, {
- .flags = 0,
- }
-};
-
-static struct platform_device debug_uart = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
- .dev = {
- .platform_data = debug_uart_platform_data,
- },
-};
-
-static __initdata struct tegra_clk_init_table seaboard_clk_init_table[] = {
- /* name parent rate enabled */
- { "uartb", "pll_p", 216000000, true},
- { "uartd", "pll_p", 216000000, true},
- { "pll_a", "pll_p_out1", 56448000, true },
- { "pll_a_out0", "pll_a", 11289600, true },
- { "cdev1", NULL, 0, true },
- { "i2s1", "pll_a_out0", 11289600, false},
- { "usbd", "clk_m", 12000000, true},
- { "usb3", "clk_m", 12000000, true},
- { NULL, NULL, 0, 0},
-};
-
-static struct gpio_keys_button seaboard_gpio_keys_buttons[] = {
- {
- .code = SW_LID,
- .gpio = TEGRA_GPIO_LIDSWITCH,
- .active_low = 0,
- .desc = "Lid",
- .type = EV_SW,
- .wakeup = 1,
- .debounce_interval = 1,
- },
- {
- .code = KEY_POWER,
- .gpio = TEGRA_GPIO_POWERKEY,
- .active_low = 1,
- .desc = "Power",
- .type = EV_KEY,
- .wakeup = 1,
- },
-};
-
-static struct gpio_keys_platform_data seaboard_gpio_keys = {
- .buttons = seaboard_gpio_keys_buttons,
- .nbuttons = ARRAY_SIZE(seaboard_gpio_keys_buttons),
-};
-
-static struct platform_device seaboard_gpio_keys_device = {
- .name = "gpio-keys",
- .id = -1,
- .dev = {
- .platform_data = &seaboard_gpio_keys,
- }
-};
-
-static struct tegra_sdhci_platform_data sdhci_pdata1 = {
- .cd_gpio = -1,
- .wp_gpio = -1,
- .power_gpio = -1,
-};
-
-static struct tegra_sdhci_platform_data sdhci_pdata3 = {
- .cd_gpio = TEGRA_GPIO_SD2_CD,
- .wp_gpio = TEGRA_GPIO_SD2_WP,
- .power_gpio = TEGRA_GPIO_SD2_POWER,
-};
-
-static struct tegra_sdhci_platform_data sdhci_pdata4 = {
- .cd_gpio = -1,
- .wp_gpio = -1,
- .power_gpio = -1,
- .is_8bit = 1,
-};
-
-static struct tegra_wm8903_platform_data seaboard_audio_pdata = {
- .gpio_spkr_en = TEGRA_GPIO_SPKR_EN,
- .gpio_hp_det = TEGRA_GPIO_HP_DET,
- .gpio_hp_mute = -1,
- .gpio_int_mic_en = -1,
- .gpio_ext_mic_en = -1,
-};
-
-static struct platform_device seaboard_audio_device = {
- .name = "tegra-snd-wm8903",
- .id = 0,
- .dev = {
- .platform_data = &seaboard_audio_pdata,
- },
-};
-
-static struct platform_device *seaboard_devices[] __initdata = {
- &debug_uart,
- &tegra_pmu_device,
- &tegra_sdhci_device4,
- &tegra_sdhci_device3,
- &tegra_sdhci_device1,
- &seaboard_gpio_keys_device,
- &tegra_i2s_device1,
- &tegra_das_device,
- &seaboard_audio_device,
-};
-
-static struct i2c_board_info __initdata isl29018_device = {
- I2C_BOARD_INFO("isl29018", 0x44),
-};
-
-static struct i2c_board_info __initdata adt7461_device = {
- I2C_BOARD_INFO("adt7461", 0x4c),
-};
-
-static struct wm8903_platform_data wm8903_pdata = {
- .irq_active_low = 0,
- .micdet_cfg = 0,
- .micdet_delay = 100,
- .gpio_base = SEABOARD_GPIO_WM8903(0),
- .gpio_cfg = {
- 0,
- 0,
- WM8903_GPIO_CONFIG_ZERO,
- 0,
- 0,
- },
-};
-
-static struct i2c_board_info __initdata wm8903_device = {
- I2C_BOARD_INFO("wm8903", 0x1a),
- .platform_data = &wm8903_pdata,
-};
-
-static int seaboard_ehci_init(void)
-{
- struct tegra_ehci_platform_data *pdata;
-
- pdata = tegra_ehci1_device.dev.platform_data;
- pdata->vbus_gpio = TEGRA_GPIO_USB1;
-
- platform_device_register(&tegra_ehci1_device);
- platform_device_register(&tegra_ehci3_device);
-
- return 0;
-}
-
-static void __init seaboard_i2c_init(void)
-{
- isl29018_device.irq = gpio_to_irq(TEGRA_GPIO_ISL29018_IRQ);
- i2c_register_board_info(0, &isl29018_device, 1);
-
- wm8903_device.irq = gpio_to_irq(TEGRA_GPIO_CDC_IRQ);
- i2c_register_board_info(0, &wm8903_device, 1);
-
- i2c_register_board_info(3, &adt7461_device, 1);
-
- platform_device_register(&tegra_i2c_device1);
- platform_device_register(&tegra_i2c_device2);
- platform_device_register(&tegra_i2c_device3);
- platform_device_register(&tegra_i2c_device4);
-}
-
-static void __init seaboard_common_init(void)
-{
- seaboard_pinmux_init();
-
- tegra_clk_init_from_table(seaboard_clk_init_table);
-
- tegra_sdhci_device1.dev.platform_data = &sdhci_pdata1;
- tegra_sdhci_device3.dev.platform_data = &sdhci_pdata3;
- tegra_sdhci_device4.dev.platform_data = &sdhci_pdata4;
-
- platform_add_devices(seaboard_devices, ARRAY_SIZE(seaboard_devices));
-
- seaboard_ehci_init();
-}
-
-static void __init tegra_seaboard_init(void)
-{
- /* Seaboard uses UARTD for the debug port. */
- debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTD_BASE);
- debug_uart_platform_data[0].mapbase = TEGRA_UARTD_BASE;
- debug_uart_platform_data[0].irq = INT_UARTD;
-
- seaboard_common_init();
-
- seaboard_i2c_init();
-}
-
-static void __init tegra_kaen_init(void)
-{
- /* Kaen uses UARTB for the debug port. */
- debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE);
- debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE;
- debug_uart_platform_data[0].irq = INT_UARTB;
-
- seaboard_audio_pdata.gpio_hp_mute = TEGRA_GPIO_KAEN_HP_MUTE;
-
- seaboard_common_init();
-
- seaboard_i2c_init();
-}
-
-static void __init tegra_wario_init(void)
-{
- /* Wario uses UARTB for the debug port. */
- debug_uart_platform_data[0].membase = IO_ADDRESS(TEGRA_UARTB_BASE);
- debug_uart_platform_data[0].mapbase = TEGRA_UARTB_BASE;
- debug_uart_platform_data[0].irq = INT_UARTB;
-
- seaboard_common_init();
-
- seaboard_i2c_init();
-}
-
-
-MACHINE_START(SEABOARD, "seaboard")
- .atag_offset = 0x100,
- .map_io = tegra_map_common_io,
- .init_early = tegra20_init_early,
- .init_irq = tegra_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &tegra_timer,
- .init_machine = tegra_seaboard_init,
- .init_late = tegra_init_late,
- .restart = tegra_assert_system_reset,
-MACHINE_END
-
-MACHINE_START(KAEN, "kaen")
- .atag_offset = 0x100,
- .map_io = tegra_map_common_io,
- .init_early = tegra20_init_early,
- .init_irq = tegra_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &tegra_timer,
- .init_machine = tegra_kaen_init,
- .init_late = tegra_init_late,
- .restart = tegra_assert_system_reset,
-MACHINE_END
-
-MACHINE_START(WARIO, "wario")
- .atag_offset = 0x100,
- .map_io = tegra_map_common_io,
- .init_early = tegra20_init_early,
- .init_irq = tegra_init_irq,
- .handle_irq = gic_handle_irq,
- .timer = &tegra_timer,
- .init_machine = tegra_wario_init,
- .init_late = tegra_init_late,
- .restart = tegra_assert_system_reset,
-MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.h b/arch/arm/mach-tegra/board-seaboard.h
deleted file mode 100644
index 4c45d4ca3c49..000000000000
--- a/arch/arm/mach-tegra/board-seaboard.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * arch/arm/mach-tegra/board-seaboard.h
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _MACH_TEGRA_BOARD_SEABOARD_H
-#define _MACH_TEGRA_BOARD_SEABOARD_H
-
-#include <mach/gpio-tegra.h>
-
-#define SEABOARD_GPIO_TPS6586X(_x_) (TEGRA_NR_GPIOS + (_x_))
-#define SEABOARD_GPIO_WM8903(_x_) (SEABOARD_GPIO_TPS6586X(4) + (_x_))
-
-#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5
-#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1
-#define TEGRA_GPIO_SD2_POWER TEGRA_GPIO_PI6
-#define TEGRA_GPIO_LIDSWITCH TEGRA_GPIO_PC7
-#define TEGRA_GPIO_USB1 TEGRA_GPIO_PD0
-#define TEGRA_GPIO_POWERKEY TEGRA_GPIO_PV2
-#define TEGRA_GPIO_BACKLIGHT TEGRA_GPIO_PD4
-#define TEGRA_GPIO_LVDS_SHUTDOWN TEGRA_GPIO_PB2
-#define TEGRA_GPIO_BACKLIGHT_PWM TEGRA_GPIO_PU5
-#define TEGRA_GPIO_BACKLIGHT_VDD TEGRA_GPIO_PW0
-#define TEGRA_GPIO_EN_VDD_PNL TEGRA_GPIO_PC6
-#define TEGRA_GPIO_MAGNETOMETER TEGRA_GPIO_PN5
-#define TEGRA_GPIO_ISL29018_IRQ TEGRA_GPIO_PZ2
-#define TEGRA_GPIO_AC_ONLINE TEGRA_GPIO_PV3
-#define TEGRA_GPIO_WWAN_PWR SEABOARD_GPIO_TPS6586X(2)
-#define TEGRA_GPIO_CDC_IRQ TEGRA_GPIO_PX3
-#define TEGRA_GPIO_SPKR_EN SEABOARD_GPIO_WM8903(2)
-#define TEGRA_GPIO_HP_DET TEGRA_GPIO_PX1
-#define TEGRA_GPIO_KAEN_HP_MUTE TEGRA_GPIO_PA5
-
-void seaboard_pinmux_init(void);
-
-#endif
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 65014968fc6c..f88e5143c767 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -46,5 +46,14 @@ int __init tegra_powergate_debugfs_init(void);
static inline int tegra_powergate_debugfs_init(void) { return 0; }
#endif
+int __init harmony_regulator_init(void);
+#ifdef CONFIG_TEGRA_PCI
+int __init harmony_pcie_init(void);
+#else
+static inline int harmony_pcie_init(void) { return 0; }
+#endif
+
+void __init tegra_paz00_wifikill_init(void);
+
extern struct sys_timer tegra_timer;
#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 204a5c8b0b57..96fef6bcc651 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -33,6 +33,7 @@
#include "clock.h"
#include "fuse.h"
#include "pmc.h"
+#include "apbio.h"
/*
* Storage for debug-macro.S's state.
@@ -127,6 +128,7 @@ static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
void __init tegra20_init_early(void)
{
+ tegra_apb_io_init();
tegra_init_fuse();
tegra2_init_clocks();
tegra_clk_init_from_table(tegra20_clk_init_table);
@@ -138,6 +140,7 @@ void __init tegra20_init_early(void)
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
void __init tegra30_init_early(void)
{
+ tegra_apb_io_init();
tegra_init_fuse();
tegra30_init_clocks();
tegra_clk_init_from_table(tegra30_clk_init_table);
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c
index 7a065f0cf633..ceb52db1e2f1 100644
--- a/arch/arm/mach-tegra/cpu-tegra.c
+++ b/arch/arm/mach-tegra/cpu-tegra.c
@@ -189,8 +189,8 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(emc_clk);
}
- clk_enable(emc_clk);
- clk_enable(cpu_clk);
+ clk_prepare_enable(emc_clk);
+ clk_prepare_enable(cpu_clk);
cpufreq_frequency_table_cpuinfo(policy, freq_table);
cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
@@ -212,7 +212,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)
static int tegra_cpu_exit(struct cpufreq_policy *policy)
{
cpufreq_frequency_table_cpuinfo(policy, freq_table);
- clk_disable(emc_clk);
+ clk_disable_unprepare(emc_clk);
clk_put(emc_clk);
clk_put(cpu_clk);
return 0;
diff --git a/arch/arm/mach-tegra/cpuidle.c b/arch/arm/mach-tegra/cpuidle.c
index d83a8c0296f5..566e2f88899b 100644
--- a/arch/arm/mach-tegra/cpuidle.c
+++ b/arch/arm/mach-tegra/cpuidle.c
@@ -27,9 +27,9 @@
#include <linux/cpuidle.h>
#include <linux/hrtimer.h>
-#include <mach/iomap.h>
+#include <asm/proc-fns.h>
-extern void tegra_cpu_wfi(void);
+#include <mach/iomap.h>
static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
@@ -64,7 +64,7 @@ static int tegra_idle_enter_lp3(struct cpuidle_device *dev,
enter = ktime_get();
- tegra_cpu_wfi();
+ cpu_do_idle();
exit = ktime_sub(ktime_get(), enter);
us = ktime_to_us(exit);
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index abea4f6e2dd5..29c5114d607c 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -714,13 +714,13 @@ int __init tegra_dma_init(void)
bitmap_fill(channel_usage, NV_DMA_MAX_CHANNELS);
- c = clk_get_sys("tegra-dma", NULL);
+ c = clk_get_sys("tegra-apbdma", NULL);
if (IS_ERR(c)) {
pr_err("Unable to get clock for APB DMA\n");
ret = PTR_ERR(c);
goto fail;
}
- ret = clk_enable(c);
+ ret = clk_prepare_enable(c);
if (ret != 0) {
pr_err("Unable to enable clock for APB DMA\n");
goto fail;
diff --git a/arch/arm/mach-tegra/pcie.c b/arch/arm/mach-tegra/pcie.c
index 0e09137506ec..d3ad5150d660 100644
--- a/arch/arm/mach-tegra/pcie.c
+++ b/arch/arm/mach-tegra/pcie.c
@@ -723,9 +723,9 @@ static int tegra_pcie_power_regate(void)
tegra_pcie_xclk_clamp(false);
- clk_enable(tegra_pcie.afi_clk);
- clk_enable(tegra_pcie.pex_clk);
- return clk_enable(tegra_pcie.pll_e);
+ clk_prepare_enable(tegra_pcie.afi_clk);
+ clk_prepare_enable(tegra_pcie.pex_clk);
+ return clk_prepare_enable(tegra_pcie.pll_e);
}
static int tegra_pcie_clocks_get(void)
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index f5b12fb4ff12..15d506501ccc 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -146,7 +146,7 @@ int tegra_powergate_sequence_power_up(int id, struct clk *clk)
if (ret)
goto err_power;
- ret = clk_enable(clk);
+ ret = clk_prepare_enable(clk);
if (ret)
goto err_clk;
@@ -162,7 +162,7 @@ int tegra_powergate_sequence_power_up(int id, struct clk *clk)
return 0;
err_clamp:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
err_clk:
tegra_powergate_power_off(id);
err_power:
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 5b20197bae7f..d29b156a8011 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -62,32 +62,3 @@
movw \reg, #:lower16:\val
movt \reg, #:upper16:\val
.endm
-
-/*
- * tegra_cpu_wfi
- *
- * puts current CPU in clock-gated wfi using the flow controller
- *
- * corrupts r0-r3
- * must be called with MMU on
- */
-
-ENTRY(tegra_cpu_wfi)
- cpu_id r0
- cpu_to_halt_reg r1, r0
- cpu_to_csr_reg r2, r0
- mov32 r0, TEGRA_FLOW_CTRL_VIRT
- mov r3, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
- str r3, [r0, r2] @ clear event & interrupt status
- mov r3, #FLOW_CTRL_WAIT_FOR_INTERRUPT | FLOW_CTRL_JTAG_RESUME
- str r3, [r0, r1] @ put flow controller in wait irq mode
- dsb
- wfi
- mov r3, #0
- str r3, [r0, r1] @ clear flow controller halt status
- mov r3, #FLOW_CTRL_CSR_INTR_FLAG | FLOW_CTRL_CSR_EVENT_FLAG
- str r3, [r0, r2] @ clear event & interrupt status
- dsb
- mov pc, lr
-ENDPROC(tegra_cpu_wfi)
-
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index b59315ce3691..a703844b2061 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -69,6 +69,8 @@
#define PERIPH_CLK_SOURCE_MASK (3<<30)
#define PERIPH_CLK_SOURCE_SHIFT 30
+#define PERIPH_CLK_SOURCE_PWM_MASK (7<<28)
+#define PERIPH_CLK_SOURCE_PWM_SHIFT 28
#define PERIPH_CLK_SOURCE_ENABLE (1<<28)
#define PERIPH_CLK_SOURCE_DIVU71_MASK 0xFF
#define PERIPH_CLK_SOURCE_DIVU16_MASK 0xFFFF
@@ -908,9 +910,20 @@ static void tegra2_periph_clk_init(struct clk *c)
u32 val = clk_readl(c->reg);
const struct clk_mux_sel *mux = NULL;
const struct clk_mux_sel *sel;
+ u32 shift;
+ u32 mask;
+
+ if (c->flags & MUX_PWM) {
+ shift = PERIPH_CLK_SOURCE_PWM_SHIFT;
+ mask = PERIPH_CLK_SOURCE_PWM_MASK;
+ } else {
+ shift = PERIPH_CLK_SOURCE_SHIFT;
+ mask = PERIPH_CLK_SOURCE_MASK;
+ }
+
if (c->flags & MUX) {
for (sel = c->inputs; sel->input != NULL; sel++) {
- if (val >> PERIPH_CLK_SOURCE_SHIFT == sel->value)
+ if ((val & mask) >> shift == sel->value)
mux = sel;
}
BUG_ON(!mux);
@@ -1023,12 +1036,23 @@ static int tegra2_periph_clk_set_parent(struct clk *c, struct clk *p)
{
u32 val;
const struct clk_mux_sel *sel;
+ u32 mask, shift;
+
pr_debug("%s: %s %s\n", __func__, c->name, p->name);
+
+ if (c->flags & MUX_PWM) {
+ shift = PERIPH_CLK_SOURCE_PWM_SHIFT;
+ mask = PERIPH_CLK_SOURCE_PWM_MASK;
+ } else {
+ shift = PERIPH_CLK_SOURCE_SHIFT;
+ mask = PERIPH_CLK_SOURCE_MASK;
+ }
+
for (sel = c->inputs; sel->input != NULL; sel++) {
if (sel->input == p) {
val = clk_readl(c->reg);
- val &= ~PERIPH_CLK_SOURCE_MASK;
- val |= (sel->value) << PERIPH_CLK_SOURCE_SHIFT;
+ val &= ~mask;
+ val |= (sel->value) << shift;
if (c->refcnt)
clk_enable(p);
@@ -2149,14 +2173,14 @@ static struct clk tegra_clk_emc = {
}
static struct clk tegra_list_clks[] = {
- PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 108000000, mux_pclk, 0),
+ PERIPH_CLK("apbdma", "tegra-apbdma", NULL, 34, 0, 108000000, mux_pclk, 0),
PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET),
PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
PERIPH_CLK("i2s1", "tegra20-i2s.0", NULL, 11, 0x100, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("i2s2", "tegra20-i2s.1", NULL, 18, 0x104, 26000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("spdif_out", "spdif_out", NULL, 10, 0x108, 100000000, mux_pllaout0_audio2x_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("spdif_in", "spdif_in", NULL, 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71),
- PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71),
+ PERIPH_CLK("pwm", "tegra-pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_audio_clkm_clk32, MUX | DIV_U71 | MUX_PWM),
PERIPH_CLK("spi", "spi", NULL, 43, 0x114, 40000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
PERIPH_CLK("xio", "xio", NULL, 45, 0x120, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
PERIPH_CLK("twc", "twc", NULL, 16, 0x12c, 150000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71),
@@ -2189,11 +2213,11 @@ static struct clk tegra_list_clks[] = {
PERIPH_CLK("i2c2_i2c", "tegra-i2c.1", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
PERIPH_CLK("i2c3_i2c", "tegra-i2c.2", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
PERIPH_CLK("dvc_i2c", "tegra-i2c.3", "i2c", 0, 0, 72000000, mux_pllp_out3, 0),
- PERIPH_CLK("uarta", "uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartb", "uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartc", "uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uartd", "uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
- PERIPH_CLK("uarte", "uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarta", "tegra-uart.0", NULL, 6, 0x178, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartb", "tegra-uart.1", NULL, 7, 0x17c, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartc", "tegra-uart.2", NULL, 55, 0x1a0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uartd", "tegra-uart.3", NULL, 65, 0x1c0, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
+ PERIPH_CLK("uarte", "tegra-uart.4", NULL, 66, 0x1c4, 600000000, mux_pllp_pllc_pllm_clkm, MUX),
PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | PERIPH_MANUAL_RESET), /* scales with voltage and process_id */
PERIPH_CLK("2d", "2d", NULL, 21, 0x15c, 300000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
PERIPH_CLK("vi", "tegra_camera", "vi", 20, 0x148, 150000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71), /* scales with voltage and process_id */
@@ -2245,20 +2269,16 @@ static struct clk tegra_list_clks[] = {
* table under two names.
*/
static struct clk_duplicate tegra_clk_duplicates[] = {
- CLK_DUPLICATE("uarta", "tegra_uart.0", NULL),
- CLK_DUPLICATE("uartb", "tegra_uart.1", NULL),
- CLK_DUPLICATE("uartc", "tegra_uart.2", NULL),
- CLK_DUPLICATE("uartd", "tegra_uart.3", NULL),
- CLK_DUPLICATE("uarte", "tegra_uart.4", NULL),
+ CLK_DUPLICATE("uarta", "serial8250.0", NULL),
+ CLK_DUPLICATE("uartb", "serial8250.1", NULL),
+ CLK_DUPLICATE("uartc", "serial8250.2", NULL),
+ CLK_DUPLICATE("uartd", "serial8250.3", NULL),
+ CLK_DUPLICATE("uarte", "serial8250.4", NULL),
CLK_DUPLICATE("usbd", "utmip-pad", NULL),
CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
CLK_DUPLICATE("usbd", "tegra-otg", NULL),
CLK_DUPLICATE("hdmi", "tegradc.0", "hdmi"),
CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
- CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
CLK_DUPLICATE("host1x", "tegra_grhost", "host1x"),
CLK_DUPLICATE("2d", "tegra_grhost", "gr2d"),
CLK_DUPLICATE("3d", "tegra_grhost", "gr3d"),
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index e33fe4b14a2a..6674f100e16f 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -2871,7 +2871,7 @@ static struct clk tegra30_clk_twd = {
}, \
}
struct clk tegra_list_clks[] = {
- PERIPH_CLK("apbdma", "tegra-dma", NULL, 34, 0, 26000000, mux_clk_m, 0),
+ PERIPH_CLK("apbdma", "tegra-apbdma", NULL, 34, 0, 26000000, mux_clk_m, 0),
PERIPH_CLK("rtc", "rtc-tegra", NULL, 4, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
PERIPH_CLK("kbc", "tegra-kbc", NULL, 36, 0, 32768, mux_clk_32k, PERIPH_NO_RESET | PERIPH_ON_APB),
PERIPH_CLK("timer", "timer", NULL, 5, 0, 26000000, mux_clk_m, 0),
@@ -2886,7 +2886,7 @@ struct clk tegra_list_clks[] = {
PERIPH_CLK("i2s4", "tegra30-i2s.4", NULL, 102, 0x3c0, 26000000, mux_pllaout0_audio4_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
PERIPH_CLK("spdif_out", "tegra30-spdif", "spdif_out", 10, 0x108, 100000000, mux_pllaout0_audio_2x_pllp_clkm, MUX | DIV_U71 | PERIPH_ON_APB),
PERIPH_CLK("spdif_in", "tegra30-spdif", "spdif_in", 10, 0x10c, 100000000, mux_pllp_pllc_pllm, MUX | DIV_U71 | PERIPH_ON_APB),
- PERIPH_CLK("pwm", "pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_clk32_clkm, MUX | MUX_PWM | DIV_U71 | PERIPH_ON_APB),
+ PERIPH_CLK("pwm", "tegra-pwm", NULL, 17, 0x110, 432000000, mux_pllp_pllc_clk32_clkm, MUX | MUX_PWM | DIV_U71 | PERIPH_ON_APB),
PERIPH_CLK("d_audio", "tegra30-ahub", "d_audio", 106, 0x3d0, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("dam0", "tegra30-dam.0", NULL, 108, 0x3d8, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
PERIPH_CLK("dam1", "tegra30-dam.1", NULL, 109, 0x3dc, 48000000, mux_plla_pllc_pllp_clkm, MUX | DIV_U71),
@@ -2924,16 +2924,11 @@ struct clk tegra_list_clks[] = {
PERIPH_CLK("i2c3", "tegra-i2c.2", NULL, 67, 0x1b8, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
PERIPH_CLK("i2c4", "tegra-i2c.3", NULL, 103, 0x3c4, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
PERIPH_CLK("i2c5", "tegra-i2c.4", NULL, 47, 0x128, 26000000, mux_pllp_clkm, MUX | DIV_U16 | PERIPH_ON_APB),
- PERIPH_CLK("uarta", "tegra_uart.0", NULL, 6, 0x178, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartb", "tegra_uart.1", NULL, 7, 0x17c, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartc", "tegra_uart.2", NULL, 55, 0x1a0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartd", "tegra_uart.3", NULL, 65, 0x1c0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uarte", "tegra_uart.4", NULL, 66, 0x1c4, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uarta_dbg", "serial8250.0", "uarta", 6, 0x178, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartb_dbg", "serial8250.0", "uartb", 7, 0x17c, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartc_dbg", "serial8250.0", "uartc", 55, 0x1a0, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uartd_dbg", "serial8250.0", "uartd", 65, 0x1c0, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
- PERIPH_CLK("uarte_dbg", "serial8250.0", "uarte", 66, 0x1c4, 800000000, mux_pllp_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uarta", "tegra-uart.0", NULL, 6, 0x178, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartb", "tegra-uart.1", NULL, 7, 0x17c, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartc", "tegra-uart.2", NULL, 55, 0x1a0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uartd", "tegra-uart.3", NULL, 65, 0x1c0, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
+ PERIPH_CLK("uarte", "tegra-uart.4", NULL, 66, 0x1c4, 800000000, mux_pllp_pllc_pllm_clkm, MUX | DIV_U71 | DIV_U71_UART | PERIPH_ON_APB),
PERIPH_CLK_EX("vi", "tegra_camera", "vi", 20, 0x148, 425000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT, &tegra_vi_clk_ops),
PERIPH_CLK("3d", "3d", NULL, 24, 0x158, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET),
PERIPH_CLK("3d2", "3d2", NULL, 98, 0x3b0, 520000000, mux_pllm_pllc_pllp_plla, MUX | DIV_U71 | DIV_U71_INT | DIV_U71_IDLE | PERIPH_MANUAL_RESET),
@@ -2983,6 +2978,11 @@ struct clk tegra_list_clks[] = {
* table under two names.
*/
struct clk_duplicate tegra_clk_duplicates[] = {
+ CLK_DUPLICATE("uarta", "serial8250.0", NULL),
+ CLK_DUPLICATE("uartb", "serial8250.1", NULL),
+ CLK_DUPLICATE("uartc", "serial8250.2", NULL),
+ CLK_DUPLICATE("uartd", "serial8250.3", NULL),
+ CLK_DUPLICATE("uarte", "serial8250.4", NULL),
CLK_DUPLICATE("usbd", "utmip-pad", NULL),
CLK_DUPLICATE("usbd", "tegra-ehci.0", NULL),
CLK_DUPLICATE("usbd", "tegra-otg", NULL),
@@ -2990,10 +2990,6 @@ struct clk_duplicate tegra_clk_duplicates[] = {
CLK_DUPLICATE("hdmi", "tegradc.1", "hdmi"),
CLK_DUPLICATE("dsib", "tegradc.0", "dsib"),
CLK_DUPLICATE("dsia", "tegradc.1", "dsia"),
- CLK_DUPLICATE("pwm", "tegra_pwm.0", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.1", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.2", NULL),
- CLK_DUPLICATE("pwm", "tegra_pwm.3", NULL),
CLK_DUPLICATE("bsev", "tegra-avp", "bsev"),
CLK_DUPLICATE("bsev", "nvavp", "bsev"),
CLK_DUPLICATE("vde", "tegra-aes", "vde"),
diff --git a/arch/arm/mach-tegra/timer.c b/arch/arm/mach-tegra/timer.c
index 315672c7bd48..57b5bdc13b9b 100644
--- a/arch/arm/mach-tegra/timer.c
+++ b/arch/arm/mach-tegra/timer.c
@@ -189,7 +189,7 @@ static void __init tegra_init_timer(void)
" Assuming 12Mhz input clock.\n");
rate = 12000000;
} else {
- clk_enable(clk);
+ clk_prepare_enable(clk);
rate = clk_get_rate(clk);
}
@@ -201,7 +201,7 @@ static void __init tegra_init_timer(void)
if (IS_ERR(clk))
pr_warn("Unable to get rtc-tegra clock\n");
else
- clk_enable(clk);
+ clk_prepare_enable(clk);
switch (rate) {
case 12000000:
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index 54e353c8e304..022b33a05c3a 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -247,7 +247,7 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
- clk_enable(phy->pad_clk);
+ clk_prepare_enable(phy->pad_clk);
spin_lock_irqsave(&utmip_pad_lock, flags);
@@ -259,7 +259,7 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
spin_unlock_irqrestore(&utmip_pad_lock, flags);
- clk_disable(phy->pad_clk);
+ clk_disable_unprepare(phy->pad_clk);
}
static int utmip_pad_power_off(struct tegra_usb_phy *phy)
@@ -272,7 +272,7 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
return -EINVAL;
}
- clk_enable(phy->pad_clk);
+ clk_prepare_enable(phy->pad_clk);
spin_lock_irqsave(&utmip_pad_lock, flags);
@@ -284,7 +284,7 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
spin_unlock_irqrestore(&utmip_pad_lock, flags);
- clk_disable(phy->pad_clk);
+ clk_disable_unprepare(phy->pad_clk);
return 0;
}
@@ -580,7 +580,7 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
msleep(5);
gpio_direction_output(config->reset_gpio, 1);
- clk_enable(phy->clk);
+ clk_prepare_enable(phy->clk);
msleep(1);
val = readl(base + USB_SUSP_CTRL);
@@ -689,7 +689,7 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
err = PTR_ERR(phy->pll_u);
goto err0;
}
- clk_enable(phy->pll_u);
+ clk_prepare_enable(phy->pll_u);
parent_rate = clk_get_rate(clk_get_parent(phy->pll_u));
for (i = 0; i < ARRAY_SIZE(tegra_freq_table); i++) {
@@ -735,7 +735,7 @@ struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
return phy;
err1:
- clk_disable(phy->pll_u);
+ clk_disable_unprepare(phy->pll_u);
clk_put(phy->pll_u);
err0:
kfree(phy);
@@ -810,7 +810,7 @@ void tegra_usb_phy_close(struct tegra_usb_phy *phy)
clk_put(phy->clk);
else
utmip_pad_close(phy);
- clk_disable(phy->pll_u);
+ clk_disable_unprepare(phy->pll_u);
clk_put(phy->pll_u);
kfree(phy);
}
diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile
index fd3a5c382f47..7e47d37aeb0e 100644
--- a/arch/arm/mach-u300/Makefile
+++ b/arch/arm/mach-u300/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel, U300 machine.
#
-obj-y := core.o clock.o timer.o
+obj-y := core.o timer.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
deleted file mode 100644
index 5535dd0a78c9..000000000000
--- a/arch/arm/mach-u300/clock.c
+++ /dev/null
@@ -1,1504 +0,0 @@
-/*
- *
- * arch/arm/mach-u300/clock.c
- *
- *
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Define clocks in the app platform.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
- *
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/string.h>
-#include <linux/clk.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/io.h>
-#include <linux/seq_file.h>
-#include <linux/clkdev.h>
-
-#include <mach/hardware.h>
-#include <mach/syscon.h>
-
-#include "clock.h"
-
-/*
- * TODO:
- * - move all handling of the CCR register into this file and create
- * a spinlock for the CCR register
- * - switch to the clkdevice lookup mechanism that maps clocks to
- * device ID:s instead when it becomes available in kernel 2.6.29.
- * - implement rate get/set for all clocks that need it.
- */
-
-/*
- * Syscon clock I/O registers lock so clock requests don't collide
- * NOTE: this is a local lock only used to lock access to clock and
- * reset registers in syscon.
- */
-static DEFINE_SPINLOCK(syscon_clkreg_lock);
-static DEFINE_SPINLOCK(syscon_resetreg_lock);
-
-/*
- * The clocking hierarchy currently looks like this.
- * NOTE: the idea is NOT to show how the clocks are routed on the chip!
- * The ideas is to show dependencies, so a clock higher up in the
- * hierarchy has to be on in order for another clock to be on. Now,
- * both CPU and DMA can actually be on top of the hierarchy, and that
- * is not modeled currently. Instead we have the backbone AMBA bus on
- * top. This bus cannot be programmed in any way but conceptually it
- * needs to be active for the bridges and devices to transport data.
- *
- * Please be aware that a few clocks are hw controlled, which mean that
- * the hw itself can turn on/off or change the rate of the clock when
- * needed!
- *
- * AMBA bus
- * |
- * +- CPU
- * +- FSMC NANDIF NAND Flash interface
- * +- SEMI Shared Memory interface
- * +- ISP Image Signal Processor (U335 only)
- * +- CDS (U335 only)
- * +- DMA Direct Memory Access Controller
- * +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
- * +- APEX
- * +- VIDEO_ENC AVE2/3 Video Encoder
- * +- XGAM Graphics Accelerator Controller
- * +- AHB
- * |
- * +- ahb:0 AHB Bridge
- * | |
- * | +- ahb:1 INTCON Interrupt controller
- * | +- ahb:3 MSPRO Memory Stick Pro controller
- * | +- ahb:4 EMIF External Memory interface
- * |
- * +- fast:0 FAST bridge
- * | |
- * | +- fast:1 MMCSD MMC/SD card reader controller
- * | +- fast:2 I2S0 PCM I2S channel 0 controller
- * | +- fast:3 I2S1 PCM I2S channel 1 controller
- * | +- fast:4 I2C0 I2C channel 0 controller
- * | +- fast:5 I2C1 I2C channel 1 controller
- * | +- fast:6 SPI SPI controller
- * | +- fast:7 UART1 Secondary UART (U335 only)
- * |
- * +- slow:0 SLOW bridge
- * |
- * +- slow:1 SYSCON (not possible to control)
- * +- slow:2 WDOG Watchdog
- * +- slow:3 UART0 primary UART
- * +- slow:4 TIMER_APP Application timer - used in Linux
- * +- slow:5 KEYPAD controller
- * +- slow:6 GPIO controller
- * +- slow:7 RTC controller
- * +- slow:8 BT Bus Tracer (not used currently)
- * +- slow:9 EH Event Handler (not used currently)
- * +- slow:a TIMER_ACC Access style timer (not used currently)
- * +- slow:b PPM (U335 only, what is that?)
- */
-
-/*
- * Reset control functions. We remember if a block has been
- * taken out of reset and don't remove the reset assertion again
- * and vice versa. Currently we only remove resets so the
- * enablement function is defined out.
- */
-static void syscon_block_reset_enable(struct clk *clk)
-{
- u16 val;
- unsigned long iflags;
-
- /* Not all blocks support resetting */
- if (!clk->res_reg || !clk->res_mask)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(clk->res_reg);
- val |= clk->res_mask;
- writew(val, clk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- clk->reset = true;
-}
-
-static void syscon_block_reset_disable(struct clk *clk)
-{
- u16 val;
- unsigned long iflags;
-
- /* Not all blocks support resetting */
- if (!clk->res_reg || !clk->res_mask)
- return;
- spin_lock_irqsave(&syscon_resetreg_lock, iflags);
- val = readw(clk->res_reg);
- val &= ~clk->res_mask;
- writew(val, clk->res_reg);
- spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
- clk->reset = false;
-}
-
-int __clk_get(struct clk *clk)
-{
- u16 val;
-
- /* The MMC and MSPRO clocks need some special set-up */
- if (!strcmp(clk->name, "MCLK")) {
- /* Set default MMC clock divisor to 18.9 MHz */
- writew(0x0054U, U300_SYSCON_VBASE + U300_SYSCON_MMF0R);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Disable MSPRO frequency */
- val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_MMCR);
- }
- if (!strcmp(clk->name, "MSPRO")) {
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_MMCR);
- /* Disable the MMC feedback clock */
- val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
- /* Enable MSPRO frequency */
- val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_MMCR);
- }
- return 1;
-}
-EXPORT_SYMBOL(__clk_get);
-
-void __clk_put(struct clk *clk)
-{
-}
-EXPORT_SYMBOL(__clk_put);
-
-static void syscon_clk_disable(struct clk *clk)
-{
- unsigned long iflags;
-
- /* Don't touch the hardware controlled clocks */
- if (clk->hw_ctrld)
- return;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- writew(clk->clk_val, U300_SYSCON_VBASE + U300_SYSCON_SBCDR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-static void syscon_clk_enable(struct clk *clk)
-{
- unsigned long iflags;
-
- /* Don't touch the hardware controlled clocks */
- if (clk->hw_ctrld)
- return;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- writew(clk->clk_val, U300_SYSCON_VBASE + U300_SYSCON_SBCER);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-static u16 syscon_clk_get_rate(void)
-{
- u16 val;
- unsigned long iflags;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
- return val;
-}
-
-#ifdef CONFIG_MACH_U300_USE_I2S_AS_MASTER
-static void enable_i2s0_vcxo(void)
-{
- u16 val;
- unsigned long iflags;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- /* Set I2S0 to use the VCXO 26 MHz clock */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val |= U300_SYSCON_CCR_TURN_VCXO_ON;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val |= U300_SYSCON_CCR_I2S0_USE_VCXO;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- val |= U300_SYSCON_CEFR_I2S0_CLK_EN;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-static void enable_i2s1_vcxo(void)
-{
- u16 val;
- unsigned long iflags;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- /* Set I2S1 to use the VCXO 26 MHz clock */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val |= U300_SYSCON_CCR_TURN_VCXO_ON;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val |= U300_SYSCON_CCR_I2S1_USE_VCXO;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- val |= U300_SYSCON_CEFR_I2S1_CLK_EN;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-static void disable_i2s0_vcxo(void)
-{
- u16 val;
- unsigned long iflags;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- /* Disable I2S0 use of the VCXO 26 MHz clock */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_I2S0_USE_VCXO;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- /* Deactivate VCXO if no one else is using VCXO */
- if (!(val & U300_SYSCON_CCR_I2S1_USE_VCXO))
- val &= ~U300_SYSCON_CCR_TURN_VCXO_ON;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- val &= ~U300_SYSCON_CEFR_I2S0_CLK_EN;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-static void disable_i2s1_vcxo(void)
-{
- u16 val;
- unsigned long iflags;
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- /* Disable I2S1 use of the VCXO 26 MHz clock */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_I2S1_USE_VCXO;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- /* Deactivate VCXO if no one else is using VCXO */
- if (!(val & U300_SYSCON_CCR_I2S0_USE_VCXO))
- val &= ~U300_SYSCON_CCR_TURN_VCXO_ON;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- val &= ~U300_SYSCON_CEFR_I2S0_CLK_EN;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CEFR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-#endif /* CONFIG_MACH_U300_USE_I2S_AS_MASTER */
-
-
-static void syscon_clk_rate_set_mclk(unsigned long rate)
-{
- u16 val;
- u32 reg;
- unsigned long iflags;
-
- switch (rate) {
- case 18900000:
- val = 0x0054;
- break;
- case 20800000:
- val = 0x0044;
- break;
- case 23100000:
- val = 0x0043;
- break;
- case 26000000:
- val = 0x0033;
- break;
- case 29700000:
- val = 0x0032;
- break;
- case 34700000:
- val = 0x0022;
- break;
- case 41600000:
- val = 0x0021;
- break;
- case 52000000:
- val = 0x0011;
- break;
- case 104000000:
- val = 0x0000;
- break;
- default:
- printk(KERN_ERR "Trying to set MCLK to unknown speed! %ld\n",
- rate);
- return;
- }
-
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- reg = readw(U300_SYSCON_VBASE + U300_SYSCON_MMF0R) &
- ~U300_SYSCON_MMF0R_MASK;
- writew(reg | val, U300_SYSCON_VBASE + U300_SYSCON_MMF0R);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-
-void syscon_clk_rate_set_cpuclk(unsigned long rate)
-{
- u16 val;
- unsigned long iflags;
-
- switch (rate) {
- case 13000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
- break;
- case 52000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
- break;
- case 104000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
- break;
- case 208000000:
- val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
- break;
- default:
- return;
- }
- spin_lock_irqsave(&syscon_clkreg_lock, iflags);
- val |= readw(U300_SYSCON_VBASE + U300_SYSCON_CCR) &
- ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- spin_unlock_irqrestore(&syscon_clkreg_lock, iflags);
-}
-EXPORT_SYMBOL(syscon_clk_rate_set_cpuclk);
-
-void clk_disable(struct clk *clk)
-{
- unsigned long iflags;
-
- spin_lock_irqsave(&clk->lock, iflags);
- if (clk->usecount > 0 && !(--clk->usecount)) {
- /* some blocks lack clocking registers and cannot be disabled */
- if (clk->disable)
- clk->disable(clk);
- if (likely((u32)clk->parent))
- clk_disable(clk->parent);
- }
-#ifdef CONFIG_MACH_U300_USE_I2S_AS_MASTER
- if (unlikely(!strcmp(clk->name, "I2S0")))
- disable_i2s0_vcxo();
- if (unlikely(!strcmp(clk->name, "I2S1")))
- disable_i2s1_vcxo();
-#endif
- spin_unlock_irqrestore(&clk->lock, iflags);
-}
-EXPORT_SYMBOL(clk_disable);
-
-int clk_enable(struct clk *clk)
-{
- int ret = 0;
- unsigned long iflags;
-
- spin_lock_irqsave(&clk->lock, iflags);
- if (clk->usecount++ == 0) {
- if (likely((u32)clk->parent))
- ret = clk_enable(clk->parent);
-
- if (unlikely(ret != 0))
- clk->usecount--;
- else {
- /* remove reset line (we never enable reset again) */
- syscon_block_reset_disable(clk);
- /* clocks without enable function are always on */
- if (clk->enable)
- clk->enable(clk);
-#ifdef CONFIG_MACH_U300_USE_I2S_AS_MASTER
- if (unlikely(!strcmp(clk->name, "I2S0")))
- enable_i2s0_vcxo();
- if (unlikely(!strcmp(clk->name, "I2S1")))
- enable_i2s1_vcxo();
-#endif
- }
- }
- spin_unlock_irqrestore(&clk->lock, iflags);
- return ret;
-
-}
-EXPORT_SYMBOL(clk_enable);
-
-/* Returns the clock rate in Hz */
-static unsigned long clk_get_rate_cpuclk(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- return 104000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- return 208000000;
- default:
- break;
- }
- return clk->rate;
-}
-
-static unsigned long clk_get_rate_ahb_clk(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 6500000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 26000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- return 52000000;
- default:
- break;
- }
- return clk->rate;
-
-}
-
-static unsigned long clk_get_rate_emif_clk(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 52000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- return 104000000;
- default:
- break;
- }
- return clk->rate;
-
-}
-
-static unsigned long clk_get_rate_xgamclk(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 6500000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- return 26000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- return 52000000;
- default:
- break;
- }
-
- return clk->rate;
-}
-
-static unsigned long clk_get_rate_mclk(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- /*
- * Here, the 208 MHz PLL gets shut down and the always
- * on 13 MHz PLL used for RTC etc kicks into use
- * instead.
- */
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- {
- /*
- * This clock is under program control. The register is
- * divided in two nybbles, bit 7-4 gives cycles-1 to count
- * high, bit 3-0 gives cycles-1 to count low. Distribute
- * these with no more than 1 cycle difference between
- * low and high and add low and high to get the actual
- * divisor. The base PLL is 208 MHz. Writing 0x00 will
- * divide by 1 and 1 so the highest frequency possible
- * is 104 MHz.
- *
- * e.g. 0x54 =>
- * f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
- */
- u16 val = readw(U300_SYSCON_VBASE + U300_SYSCON_MMF0R) &
- U300_SYSCON_MMF0R_MASK;
- switch (val) {
- case 0x0054:
- return 18900000;
- case 0x0044:
- return 20800000;
- case 0x0043:
- return 23100000;
- case 0x0033:
- return 26000000;
- case 0x0032:
- return 29700000;
- case 0x0022:
- return 34700000;
- case 0x0021:
- return 41600000;
- case 0x0011:
- return 52000000;
- case 0x0000:
- return 104000000;
- default:
- break;
- }
- }
- default:
- break;
- }
-
- return clk->rate;
-}
-
-static unsigned long clk_get_rate_i2s_i2c_spi(struct clk *clk)
-{
- u16 val;
-
- val = syscon_clk_get_rate();
-
- switch (val) {
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
- return 13000000;
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
- case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
- return 26000000;
- default:
- break;
- }
-
- return clk->rate;
-}
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- if (clk->get_rate)
- return clk->get_rate(clk);
- else
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-static unsigned long clk_round_rate_mclk(struct clk *clk, unsigned long rate)
-{
- if (rate <= 18900000)
- return 18900000;
- if (rate <= 20800000)
- return 20800000;
- if (rate <= 23100000)
- return 23100000;
- if (rate <= 26000000)
- return 26000000;
- if (rate <= 29700000)
- return 29700000;
- if (rate <= 34700000)
- return 34700000;
- if (rate <= 41600000)
- return 41600000;
- if (rate <= 52000000)
- return 52000000;
- return -EINVAL;
-}
-
-static unsigned long clk_round_rate_cpuclk(struct clk *clk, unsigned long rate)
-{
- if (rate <= 13000000)
- return 13000000;
- if (rate <= 52000000)
- return 52000000;
- if (rate <= 104000000)
- return 104000000;
- if (rate <= 208000000)
- return 208000000;
- return -EINVAL;
-}
-
-/*
- * This adjusts a requested rate to the closest exact rate
- * a certain clock can provide. For a fixed clock it's
- * mostly clk->rate.
- */
-long clk_round_rate(struct clk *clk, unsigned long rate)
-{
- /* TODO: get appropriate switches for EMIFCLK, AHBCLK and MCLK */
- /* Else default to fixed value */
-
- if (clk->round_rate) {
- return (long) clk->round_rate(clk, rate);
- } else {
- printk(KERN_ERR "clock: Failed to round rate of %s\n",
- clk->name);
- }
- return (long) clk->rate;
-}
-EXPORT_SYMBOL(clk_round_rate);
-
-static int clk_set_rate_mclk(struct clk *clk, unsigned long rate)
-{
- syscon_clk_rate_set_mclk(clk_round_rate(clk, rate));
- return 0;
-}
-
-static int clk_set_rate_cpuclk(struct clk *clk, unsigned long rate)
-{
- syscon_clk_rate_set_cpuclk(clk_round_rate(clk, rate));
- return 0;
-}
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
-{
- /* TODO: set for EMIFCLK and AHBCLK */
- /* Else assume the clock is fixed and fail */
- if (clk->set_rate) {
- return clk->set_rate(clk, rate);
- } else {
- printk(KERN_ERR "clock: Failed to set %s to %ld hz\n",
- clk->name, rate);
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(clk_set_rate);
-
-/*
- * Clock definitions. The clock parents are set to respective
- * bridge and the clock framework makes sure that the clocks have
- * parents activated and are brought out of reset when in use.
- *
- * Clocks that have hw_ctrld = true are hw controlled, and the hw
- * can by itself turn these clocks on and off.
- * So in other words, we don't really have to care about them.
- */
-
-static struct clk amba_clk = {
- .name = "AMBA",
- .rate = 52000000, /* this varies! */
- .hw_ctrld = true,
- .reset = false,
- .lock = __SPIN_LOCK_UNLOCKED(amba_clk.lock),
-};
-
-/*
- * These blocks are connected directly to the AMBA bus
- * with no bridge.
- */
-
-static struct clk cpu_clk = {
- .name = "CPU",
- .parent = &amba_clk,
- .rate = 208000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_CPU_RESET_EN,
- .set_rate = clk_set_rate_cpuclk,
- .get_rate = clk_get_rate_cpuclk,
- .round_rate = clk_round_rate_cpuclk,
- .lock = __SPIN_LOCK_UNLOCKED(cpu_clk.lock),
-};
-
-static struct clk nandif_clk = {
- .name = "FSMC",
- .parent = &amba_clk,
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_NANDIF_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_NANDIF_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(nandif_clk.lock),
-};
-
-static struct clk semi_clk = {
- .name = "SEMI",
- .parent = &amba_clk,
- .rate = 0, /* FIXME */
- /* It is not possible to reset SEMI */
- .hw_ctrld = false,
- .reset = false,
- .clk_val = U300_SYSCON_SBCER_SEMI_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(semi_clk.lock),
-};
-
-#ifdef CONFIG_MACH_U300_BS335
-static struct clk isp_clk = {
- .name = "ISP",
- .parent = &amba_clk,
- .rate = 0, /* FIXME */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_ISP_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_ISP_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(isp_clk.lock),
-};
-
-static struct clk cds_clk = {
- .name = "CDS",
- .parent = &amba_clk,
- .rate = 0, /* FIXME */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_CDS_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_CDS_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(cds_clk.lock),
-};
-#endif
-
-static struct clk dma_clk = {
- .name = "DMA",
- .parent = &amba_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_DMAC_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_DMAC_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(dma_clk.lock),
-};
-
-static struct clk aaif_clk = {
- .name = "AAIF",
- .parent = &amba_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_AAIF_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_AAIF_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(aaif_clk.lock),
-};
-
-static struct clk apex_clk = {
- .name = "APEX",
- .parent = &amba_clk,
- .rate = 0, /* FIXME */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_APEX_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_APEX_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(apex_clk.lock),
-};
-
-static struct clk video_enc_clk = {
- .name = "VIDEO_ENC",
- .parent = &amba_clk,
- .rate = 208000000, /* this varies! */
- .hw_ctrld = false,
- .reset = false,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- /* This has XGAM in the name but refers to the video encoder */
- .res_mask = U300_SYSCON_RRR_XGAM_VC_SYNC_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_VIDEO_ENC_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(video_enc_clk.lock),
-};
-
-static struct clk xgam_clk = {
- .name = "XGAMCLK",
- .parent = &amba_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_XGAM_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_XGAM_CLK_EN,
- .get_rate = clk_get_rate_xgamclk,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(xgam_clk.lock),
-};
-
-/* This clock is used to activate the video encoder */
-static struct clk ahb_clk = {
- .name = "AHB",
- .parent = &amba_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = false, /* This one is set to false due to HW bug */
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_AHB_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_AHB_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_ahb_clk,
- .lock = __SPIN_LOCK_UNLOCKED(ahb_clk.lock),
-};
-
-
-/*
- * Clocks on the AHB bridge
- */
-
-static struct clk ahb_subsys_clk = {
- .name = "AHB_SUBSYS",
- .parent = &amba_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = true,
- .reset = false,
- .clk_val = U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_ahb_clk,
- .lock = __SPIN_LOCK_UNLOCKED(ahb_subsys_clk.lock),
-};
-
-static struct clk intcon_clk = {
- .name = "INTCON",
- .parent = &ahb_subsys_clk,
- .rate = 52000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_INTCON_RESET_EN,
- /* INTCON can be reset but not clock-gated */
- .lock = __SPIN_LOCK_UNLOCKED(intcon_clk.lock),
-
-};
-
-static struct clk mspro_clk = {
- .name = "MSPRO",
- .parent = &ahb_subsys_clk,
- .rate = 0, /* FIXME */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_MSPRO_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_MSPRO_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(mspro_clk.lock),
-};
-
-static struct clk emif_clk = {
- .name = "EMIF",
- .parent = &ahb_subsys_clk,
- .rate = 104000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RRR,
- .res_mask = U300_SYSCON_RRR_EMIF_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_EMIF_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_emif_clk,
- .lock = __SPIN_LOCK_UNLOCKED(emif_clk.lock),
-};
-
-
-/*
- * Clocks on the FAST bridge
- */
-static struct clk fast_clk = {
- .name = "FAST_BRIDGE",
- .parent = &amba_clk,
- .rate = 13000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_FAST_BRIDGE_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(fast_clk.lock),
-};
-
-/*
- * The MMCI apb_pclk is hardwired to the same terminal as the
- * external MCI clock. Thus this will be referenced twice.
- */
-static struct clk mmcsd_clk = {
- .name = "MCLK",
- .parent = &fast_clk,
- .rate = 18900000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_MMC_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_MMC_CLK_EN,
- .get_rate = clk_get_rate_mclk,
- .set_rate = clk_set_rate_mclk,
- .round_rate = clk_round_rate_mclk,
- .disable = syscon_clk_disable,
- .enable = syscon_clk_enable,
- .lock = __SPIN_LOCK_UNLOCKED(mmcsd_clk.lock),
-};
-
-static struct clk i2s0_clk = {
- .name = "i2s0",
- .parent = &fast_clk,
- .rate = 26000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_PCM_I2S0_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_I2S0_CORE_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_i2s_i2c_spi,
- .lock = __SPIN_LOCK_UNLOCKED(i2s0_clk.lock),
-};
-
-static struct clk i2s1_clk = {
- .name = "i2s1",
- .parent = &fast_clk,
- .rate = 26000000, /* this varies! */
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_PCM_I2S1_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_I2S1_CORE_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_i2s_i2c_spi,
- .lock = __SPIN_LOCK_UNLOCKED(i2s1_clk.lock),
-};
-
-static struct clk i2c0_clk = {
- .name = "I2C0",
- .parent = &fast_clk,
- .rate = 26000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_I2C0_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_I2C0_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_i2s_i2c_spi,
- .lock = __SPIN_LOCK_UNLOCKED(i2c0_clk.lock),
-};
-
-static struct clk i2c1_clk = {
- .name = "I2C1",
- .parent = &fast_clk,
- .rate = 26000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_I2C1_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_I2C1_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_i2s_i2c_spi,
- .lock = __SPIN_LOCK_UNLOCKED(i2c1_clk.lock),
-};
-
-/*
- * The SPI apb_pclk is hardwired to the same terminal as the
- * external SPI clock. Thus this will be referenced twice.
- */
-static struct clk spi_clk = {
- .name = "SPI",
- .parent = &fast_clk,
- .rate = 26000000, /* this varies! */
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_SPI_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_SPI_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .get_rate = clk_get_rate_i2s_i2c_spi,
- .lock = __SPIN_LOCK_UNLOCKED(spi_clk.lock),
-};
-
-#ifdef CONFIG_MACH_U300_BS335
-static struct clk uart1_pclk = {
- .name = "UART1_PCLK",
- .parent = &fast_clk,
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RFR,
- .res_mask = U300_SYSCON_RFR_UART1_RESET_ENABLE,
- .clk_val = U300_SYSCON_SBCER_UART1_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(uart1_pclk.lock),
-};
-
-/* This one is hardwired to PLL13 */
-static struct clk uart1_clk = {
- .name = "UART1_CLK",
- .rate = 13000000,
- .hw_ctrld = true,
- .lock = __SPIN_LOCK_UNLOCKED(uart1_clk.lock),
-};
-#endif
-
-
-/*
- * Clocks on the SLOW bridge
- */
-static struct clk slow_clk = {
- .name = "SLOW_BRIDGE",
- .parent = &amba_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_SLOW_BRIDGE_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(slow_clk.lock),
-};
-
-/* TODO: implement SYSCON clock? */
-
-static struct clk wdog_clk = {
- .name = "WDOG",
- .parent = &slow_clk,
- .hw_ctrld = false,
- .rate = 32768,
- .reset = false,
- /* This is always on, cannot be enabled/disabled or reset */
- .lock = __SPIN_LOCK_UNLOCKED(wdog_clk.lock),
-};
-
-static struct clk uart0_pclk = {
- .name = "UART0_PCLK",
- .parent = &slow_clk,
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_UART_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_UART_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(uart0_pclk.lock),
-};
-
-/* This one is hardwired to PLL13 */
-static struct clk uart0_clk = {
- .name = "UART0_CLK",
- .parent = &slow_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .lock = __SPIN_LOCK_UNLOCKED(uart0_clk.lock),
-};
-
-static struct clk keypad_clk = {
- .name = "KEYPAD",
- .parent = &slow_clk,
- .rate = 32768,
- .hw_ctrld = false,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_KEYPAD_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_KEYPAD_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(keypad_clk.lock),
-};
-
-static struct clk gpio_clk = {
- .name = "GPIO",
- .parent = &slow_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_GPIO_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_GPIO_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(gpio_clk.lock),
-};
-
-static struct clk rtc_clk = {
- .name = "RTC",
- .parent = &slow_clk,
- .rate = 32768,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_RTC_RESET_EN,
- /* This clock is always on, cannot be enabled/disabled */
- .lock = __SPIN_LOCK_UNLOCKED(rtc_clk.lock),
-};
-
-static struct clk bustr_clk = {
- .name = "BUSTR",
- .parent = &slow_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_BTR_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_BTR_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(bustr_clk.lock),
-};
-
-static struct clk evhist_clk = {
- .name = "EVHIST",
- .parent = &slow_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_EH_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_EH_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(evhist_clk.lock),
-};
-
-static struct clk timer_clk = {
- .name = "TIMER",
- .parent = &slow_clk,
- .rate = 13000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_ACC_TMR_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_ACC_TMR_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(timer_clk.lock),
-};
-
-/*
- * There is a binary divider in the hardware that divides
- * the 13MHz PLL by 13 down to 1 MHz.
- */
-static struct clk app_timer_clk = {
- .name = "TIMER_APP",
- .parent = &slow_clk,
- .rate = 1000000,
- .hw_ctrld = true,
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_APP_TMR_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_APP_TMR_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(app_timer_clk.lock),
-};
-
-#ifdef CONFIG_MACH_U300_BS335
-static struct clk ppm_clk = {
- .name = "PPM",
- .parent = &slow_clk,
- .rate = 0, /* FIXME */
- .hw_ctrld = true, /* TODO: Look up if it is hw ctrld or not */
- .reset = true,
- .res_reg = U300_SYSCON_VBASE + U300_SYSCON_RSR,
- .res_mask = U300_SYSCON_RSR_PPM_RESET_EN,
- .clk_val = U300_SYSCON_SBCER_PPM_CLK_EN,
- .enable = syscon_clk_enable,
- .disable = syscon_clk_disable,
- .lock = __SPIN_LOCK_UNLOCKED(ppm_clk.lock),
-};
-#endif
-
-#define DEF_LOOKUP(devid, clkref) \
- { \
- .dev_id = devid, \
- .clk = clkref, \
- }
-
-#define DEF_LOOKUP_CON(devid, conid, clkref) \
- { \
- .dev_id = devid, \
- .con_id = conid, \
- .clk = clkref, \
- }
-
-/*
- * Here we only define clocks that are meaningful to
- * look up through clockdevice.
- */
-static struct clk_lookup lookups[] = {
- /* Connected directly to the AMBA bus */
- DEF_LOOKUP("amba", &amba_clk),
- DEF_LOOKUP("cpu", &cpu_clk),
- DEF_LOOKUP("fsmc-nand", &nandif_clk),
- DEF_LOOKUP("semi", &semi_clk),
-#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("isp", &isp_clk),
- DEF_LOOKUP("cds", &cds_clk),
-#endif
- DEF_LOOKUP("dma", &dma_clk),
- DEF_LOOKUP("msl", &aaif_clk),
- DEF_LOOKUP("apex", &apex_clk),
- DEF_LOOKUP("video_enc", &video_enc_clk),
- DEF_LOOKUP("xgam", &xgam_clk),
- DEF_LOOKUP("ahb", &ahb_clk),
- /* AHB bridge clocks */
- DEF_LOOKUP("ahb_subsys", &ahb_subsys_clk),
- DEF_LOOKUP("intcon", &intcon_clk),
- DEF_LOOKUP_CON("intcon", "apb_pclk", &intcon_clk),
- DEF_LOOKUP("mspro", &mspro_clk),
- DEF_LOOKUP("pl172", &emif_clk),
- DEF_LOOKUP_CON("pl172", "apb_pclk", &emif_clk),
- /* FAST bridge clocks */
- DEF_LOOKUP("fast", &fast_clk),
- DEF_LOOKUP("mmci", &mmcsd_clk),
- DEF_LOOKUP_CON("mmci", "apb_pclk", &mmcsd_clk),
- /*
- * The .0 and .1 identifiers on these comes from the platform device
- * .id field and are assigned when the platform devices are registered.
- */
- DEF_LOOKUP("i2s.0", &i2s0_clk),
- DEF_LOOKUP("i2s.1", &i2s1_clk),
- DEF_LOOKUP("stu300.0", &i2c0_clk),
- DEF_LOOKUP("stu300.1", &i2c1_clk),
- DEF_LOOKUP("pl022", &spi_clk),
- DEF_LOOKUP_CON("pl022", "apb_pclk", &spi_clk),
-#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("uart1", &uart1_clk),
- DEF_LOOKUP_CON("uart1", "apb_pclk", &uart1_pclk),
-#endif
- /* SLOW bridge clocks */
- DEF_LOOKUP("slow", &slow_clk),
- DEF_LOOKUP("coh901327_wdog", &wdog_clk),
- DEF_LOOKUP("uart0", &uart0_clk),
- DEF_LOOKUP_CON("uart0", "apb_pclk", &uart0_pclk),
- DEF_LOOKUP("apptimer", &app_timer_clk),
- DEF_LOOKUP("coh901461-keypad", &keypad_clk),
- DEF_LOOKUP("u300-gpio", &gpio_clk),
- DEF_LOOKUP("rtc-coh901331", &rtc_clk),
- DEF_LOOKUP("bustr", &bustr_clk),
- DEF_LOOKUP("evhist", &evhist_clk),
- DEF_LOOKUP("timer", &timer_clk),
-#ifdef CONFIG_MACH_U300_BS335
- DEF_LOOKUP("ppm", &ppm_clk),
-#endif
-};
-
-static void __init clk_register(void)
-{
- /* Register the lookups */
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-}
-
-#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
-/*
- * The following makes it possible to view the status (especially
- * reference count and reset status) for the clocks in the platform
- * by looking into the special file <debugfs>/u300_clocks
- */
-
-/* A list of all clocks in the platform */
-static struct clk *clks[] = {
- /* Top node clock for the AMBA bus */
- &amba_clk,
- /* Connected directly to the AMBA bus */
- &cpu_clk,
- &nandif_clk,
- &semi_clk,
-#ifdef CONFIG_MACH_U300_BS335
- &isp_clk,
- &cds_clk,
-#endif
- &dma_clk,
- &aaif_clk,
- &apex_clk,
- &video_enc_clk,
- &xgam_clk,
- &ahb_clk,
-
- /* AHB bridge clocks */
- &ahb_subsys_clk,
- &intcon_clk,
- &mspro_clk,
- &emif_clk,
- /* FAST bridge clocks */
- &fast_clk,
- &mmcsd_clk,
- &i2s0_clk,
- &i2s1_clk,
- &i2c0_clk,
- &i2c1_clk,
- &spi_clk,
-#ifdef CONFIG_MACH_U300_BS335
- &uart1_clk,
- &uart1_pclk,
-#endif
- /* SLOW bridge clocks */
- &slow_clk,
- &wdog_clk,
- &uart0_clk,
- &uart0_pclk,
- &app_timer_clk,
- &keypad_clk,
- &gpio_clk,
- &rtc_clk,
- &bustr_clk,
- &evhist_clk,
- &timer_clk,
-#ifdef CONFIG_MACH_U300_BS335
- &ppm_clk,
-#endif
-};
-
-static int u300_clocks_show(struct seq_file *s, void *data)
-{
- struct clk *clk;
- int i;
-
- seq_printf(s, "CLOCK DEVICE RESET STATE\t" \
- "ACTIVE\tUSERS\tHW CTRL FREQ\n");
- seq_printf(s, "---------------------------------------------" \
- "-----------------------------------------\n");
- for (i = 0; i < ARRAY_SIZE(clks); i++) {
- clk = clks[i];
- if (clk != ERR_PTR(-ENOENT)) {
- /* Format clock and device name nicely */
- char cdp[33];
- int chars;
-
- chars = snprintf(&cdp[0], 17, "%s", clk->name);
- while (chars < 16) {
- cdp[chars] = ' ';
- chars++;
- }
- chars = snprintf(&cdp[16], 17, "%s", clk->dev ?
- dev_name(clk->dev) : "N/A");
- while (chars < 16) {
- cdp[chars+16] = ' ';
- chars++;
- }
- cdp[32] = '\0';
- if (clk->get_rate || clk->rate != 0)
- seq_printf(s,
- "%s%s\t%s\t%d\t%s\t%lu Hz\n",
- &cdp[0],
- clk->reset ?
- "ASSERTED" : "RELEASED",
- clk->usecount ? "ON" : "OFF",
- clk->usecount,
- clk->hw_ctrld ? "YES" : "NO ",
- clk_get_rate(clk));
- else
- seq_printf(s,
- "%s%s\t%s\t%d\t%s\t" \
- "(unknown rate)\n",
- &cdp[0],
- clk->reset ?
- "ASSERTED" : "RELEASED",
- clk->usecount ? "ON" : "OFF",
- clk->usecount,
- clk->hw_ctrld ? "YES" : "NO ");
- }
- }
- return 0;
-}
-
-static int u300_clocks_open(struct inode *inode, struct file *file)
-{
- return single_open(file, u300_clocks_show, NULL);
-}
-
-static const struct file_operations u300_clocks_operations = {
- .open = u300_clocks_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int __init init_clk_read_debugfs(void)
-{
- /* Expose a simple debugfs interface to view all clocks */
- (void) debugfs_create_file("u300_clocks", S_IFREG | S_IRUGO,
- NULL, NULL,
- &u300_clocks_operations);
- return 0;
-}
-/*
- * This needs to come in after the core_initcall() for the
- * overall clocks, because debugfs is not available until
- * the subsystems come up.
- */
-module_init(init_clk_read_debugfs);
-#endif
-
-int __init u300_clock_init(void)
-{
- u16 val;
-
- /*
- * FIXME: shall all this powermanagement stuff really live here???
- */
-
- /* Set system to run at PLL208, max performance, a known state. */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- /* Wait for the PLL208 to lock if not locked in yet */
- while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) &
- U300_SYSCON_CSR_PLL208_LOCK_IND));
-
- /* Power management enable */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMCR);
- val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMCR);
-
- clk_register();
-
- /*
- * Some of these may be on when we boot the system so make sure they
- * are turned OFF.
- */
- syscon_block_reset_enable(&timer_clk);
- timer_clk.disable(&timer_clk);
-
- /*
- * These shall be turned on by default when we boot the system
- * so make sure they are ON. (Adding CPU here is a bit too much.)
- * These clocks will be claimed by drivers later.
- */
- syscon_block_reset_disable(&semi_clk);
- syscon_block_reset_disable(&emif_clk);
- clk_enable(&semi_clk);
- clk_enable(&emif_clk);
-
- return 0;
-}
diff --git a/arch/arm/mach-u300/clock.h b/arch/arm/mach-u300/clock.h
deleted file mode 100644
index 4f50ca8f901e..000000000000
--- a/arch/arm/mach-u300/clock.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * arch/arm/mach-u300/include/mach/clock.h
- *
- * Copyright (C) 2004 - 2005 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
- * Copyright (C) 2007-2009 ST-Ericsson AB
- * Adopted to ST-Ericsson U300 platforms by
- * Jonas Aaberg <jonas.aberg@stericsson.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifndef __MACH_CLOCK_H
-#define __MACH_CLOCK_H
-
-#include <linux/clk.h>
-
-struct clk {
- struct list_head node;
- struct module *owner;
- struct device *dev;
- const char *name;
- struct clk *parent;
-
- spinlock_t lock;
- unsigned long rate;
- bool reset;
- __u16 clk_val;
- __s8 usecount;
- void __iomem * res_reg;
- __u16 res_mask;
-
- bool hw_ctrld;
-
- void (*recalc) (struct clk *);
- int (*set_rate) (struct clk *, unsigned long);
- unsigned long (*get_rate) (struct clk *);
- unsigned long (*round_rate) (struct clk *, unsigned long);
- void (*init) (struct clk *);
- void (*enable) (struct clk *);
- void (*disable) (struct clk *);
-};
-
-int u300_clock_init(void);
-
-#endif
diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c
index 33339745d432..03acf1883ec7 100644
--- a/arch/arm/mach-u300/core.c
+++ b/arch/arm/mach-u300/core.c
@@ -30,6 +30,7 @@
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/clk-u300.h>
#include <asm/types.h>
#include <asm/setup.h>
@@ -44,7 +45,6 @@
#include <mach/dma_channels.h>
#include <mach/gpio-u300.h>
-#include "clock.h"
#include "spi.h"
#include "i2c.h"
#include "u300-gpio.h"
@@ -1658,12 +1658,20 @@ void __init u300_init_irq(void)
int i;
/* initialize clocking early, we want to clock the INTCON */
- u300_clock_init();
+ u300_clk_init(U300_SYSCON_VBASE);
+
+ /* Bootstrap EMIF and SEMI clocks */
+ clk = clk_get_sys("pl172", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_prepare_enable(clk);
+ clk = clk_get_sys("semi", NULL);
+ BUG_ON(IS_ERR(clk));
+ clk_prepare_enable(clk);
/* Clock the interrupt controller */
clk = clk_get_sys("intcon", NULL);
BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ clk_prepare_enable(clk);
for (i = 0; i < U300_VIC_IRQS_END; i++)
set_bit(i, (unsigned long *) &mask[0]);
@@ -1811,13 +1819,6 @@ void __init u300_init_devices(void)
/* Check what platform we run and print some status information */
u300_init_check_chip();
- /* Set system to run at PLL208, max performance, a known state. */
- val = readw(U300_SYSCON_VBASE + U300_SYSCON_CCR);
- val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
- writew(val, U300_SYSCON_VBASE + U300_SYSCON_CCR);
- /* Wait for the PLL208 to lock if not locked in yet */
- while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) &
- U300_SYSCON_CSR_PLL208_LOCK_IND));
/* Initialize SPI device with some board specifics */
u300_spi_init(&pl022_device);
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index bc1c7897e82d..56ac06d38ec1 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -354,7 +354,7 @@ static void __init u300_timer_init(void)
/* Clock the interrupt controller */
clk = clk_get_sys("apptimer", NULL);
BUG_ON(IS_ERR(clk));
- clk_enable(clk);
+ clk_prepare_enable(clk);
rate = clk_get_rate(clk);
setup_sched_clock(u300_read_sched_clock, 32, rate);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 53d3d46dec12..c013bbf79cac 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -41,6 +41,7 @@ config MACH_HREFV60
config MACH_SNOWBALL
bool "U8500 Snowball platform"
select MACH_MOP500
+ select LEDS_GPIO
help
Include support for the snowball development platform.
diff --git a/arch/arm/mach-ux500/board-mop500-sdi.c b/arch/arm/mach-ux500/board-mop500-sdi.c
index 920251cf834c..18ff781cfbe4 100644
--- a/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -80,7 +80,7 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
};
#endif
-static struct mmci_platform_data mop500_sdi0_data = {
+struct mmci_platform_data mop500_sdi0_data = {
.ios_handler = mop500_sdi0_ios_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
@@ -227,7 +227,7 @@ static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = {
};
#endif
-static struct mmci_platform_data mop500_sdi4_data = {
+struct mmci_platform_data mop500_sdi4_data = {
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 1509a3cb5833..a310222951da 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -25,6 +25,7 @@
#include <linux/mfd/tc3589x.h>
#include <linux/mfd/tps6105x.h>
#include <linux/mfd/abx500/ab8500-gpio.h>
+#include <linux/mfd/abx500/ab8500-codec.h>
#include <linux/leds-lp5521.h>
#include <linux/input.h>
#include <linux/smsc911x.h>
@@ -58,7 +59,7 @@
static struct gpio_led snowball_led_array[] = {
{
.name = "user_led",
- .default_trigger = "none",
+ .default_trigger = "heartbeat",
.gpio = 142,
},
};
@@ -97,6 +98,18 @@ static struct ab8500_gpio_platform_data ab8500_gpio_pdata = {
0x7A, 0x00, 0x00},
};
+/* ab8500-codec */
+static struct ab8500_codec_platform_data ab8500_codec_pdata = {
+ .amics = {
+ .mic1_type = AMIC_TYPE_DIFFERENTIAL,
+ .mic2_type = AMIC_TYPE_DIFFERENTIAL,
+ .mic1a_micbias = AMIC_MICBIAS_VAMIC1,
+ .mic1b_micbias = AMIC_MICBIAS_VAMIC1,
+ .mic2_micbias = AMIC_MICBIAS_VAMIC2
+ },
+ .ear_cmv = EAR_CMV_0_95V
+};
+
static struct gpio_keys_button snowball_key_array[] = {
{
.gpio = 32,
@@ -195,6 +208,7 @@ static struct ab8500_platform_data ab8500_platdata = {
.regulator = ab8500_regulators,
.num_regulator = ARRAY_SIZE(ab8500_regulators),
.gpio = &ab8500_gpio_pdata,
+ .codec = &ab8500_codec_pdata,
};
static struct resource ab8500_resources[] = {
@@ -331,43 +345,12 @@ static struct i2c_board_info __initdata mop500_i2c2_devices[] = {
},
};
-#define U8500_I2C_CONTROLLER(id, _slsu, _tft, _rft, clk, t_out, _sm) \
-static struct nmk_i2c_controller u8500_i2c##id##_data = { \
- /* \
- * slave data setup time, which is \
- * 250 ns,100ns,10ns which is 14,6,2 \
- * respectively for a 48 Mhz \
- * i2c clock \
- */ \
- .slsu = _slsu, \
- /* Tx FIFO threshold */ \
- .tft = _tft, \
- /* Rx FIFO threshold */ \
- .rft = _rft, \
- /* std. mode operation */ \
- .clk_freq = clk, \
- /* Slave response timeout(ms) */\
- .timeout = t_out, \
- .sm = _sm, \
-}
-
-/*
- * The board uses 4 i2c controllers, initialize all of
- * them with slave data setup time of 250 ns,
- * Tx & Rx FIFO threshold values as 8 and standard
- * mode of operation
- */
-U8500_I2C_CONTROLLER(0, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(1, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(2, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-U8500_I2C_CONTROLLER(3, 0xe, 1, 8, 100000, 200, I2C_FREQ_MODE_FAST);
-
static void __init mop500_i2c_init(struct device *parent)
{
- db8500_add_i2c0(parent, &u8500_i2c0_data);
- db8500_add_i2c1(parent, &u8500_i2c1_data);
- db8500_add_i2c2(parent, &u8500_i2c2_data);
- db8500_add_i2c3(parent, &u8500_i2c3_data);
+ db8500_add_i2c0(parent, NULL);
+ db8500_add_i2c1(parent, NULL);
+ db8500_add_i2c2(parent, NULL);
+ db8500_add_i2c3(parent, NULL);
}
static struct gpio_keys_button mop500_gpio_keys[] = {
@@ -625,11 +608,6 @@ static struct platform_device *snowball_platform_devs[] __initdata = {
&ab8500_device,
};
-static struct platform_device *snowball_of_platform_devs[] __initdata = {
- &snowball_led_dev,
- &snowball_key_dev,
-};
-
static void __init mop500_init_machine(void)
{
struct device *parent = NULL;
@@ -769,6 +747,11 @@ MACHINE_END
#ifdef CONFIG_MACH_UX500_DT
+static struct platform_device *snowball_of_platform_devs[] __initdata = {
+ &snowball_led_dev,
+ &snowball_key_dev,
+};
+
struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires DMA and call-back bindings. */
OF_DEV_AUXDATA("arm,pl011", 0x80120000, "uart0", &uart0_plat),
@@ -776,6 +759,8 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("arm,pl011", 0x80007000, "uart2", &uart2_plat),
/* Requires DMA bindings. */
OF_DEV_AUXDATA("arm,pl022", 0x80002000, "ssp0", &ssp0_plat),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80126000, "sdi0", &mop500_sdi0_data),
+ OF_DEV_AUXDATA("arm,pl18x", 0x80114000, "sdi4", &mop500_sdi4_data),
/* Requires clock name bindings. */
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e000, "gpio.0", NULL),
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8012e080, "gpio.1", NULL),
@@ -786,6 +771,13 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e000, "gpio.6", NULL),
OF_DEV_AUXDATA("st,nomadik-gpio", 0x8011e080, "gpio.7", NULL),
OF_DEV_AUXDATA("st,nomadik-gpio", 0xa03fe000, "gpio.8", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80004000, "nmk-i2c.0", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80122000, "nmk-i2c.1", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80128000, "nmk-i2c.2", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),
+ OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
+ /* Requires device name bindings. */
+ OF_DEV_AUXDATA("stericsson,nmk_pinctrl", 0, "pinctrl-db8500", NULL),
{},
};
@@ -818,8 +810,6 @@ static void __init u8500_init_machine(void)
for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
mop500_platform_devs[i]->dev.parent = parent;
- for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
- snowball_platform_devs[i]->dev.parent = parent;
/* automatically probe child nodes of db8500 device */
of_platform_populate(NULL, u8500_local_bus_nodes, u8500_auxdata_lookup, parent);
@@ -838,18 +828,6 @@ static void __init u8500_init_machine(void)
mop500_uib_init();
- } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
- /*
- * Devices to be DT:ed:
- * snowball_led_dev = todo
- * snowball_key_dev = todo
- * snowball_sbnet_dev = done
- * ab8500_device = done
- */
- platform_add_devices(snowball_of_platform_devs,
- ARRAY_SIZE(snowball_of_platform_devs));
-
- snowball_sdi_init(parent);
} else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
/*
* The HREFv60 board removed a GPIO expander and routed
@@ -871,7 +849,6 @@ static void __init u8500_init_machine(void)
mop500_uib_init();
}
- mop500_i2c_init(parent);
/* This board has full regulator constraints */
regulator_has_full_constraints();
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index 2f87b25a908a..b5bfc1a78b1a 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -9,6 +9,7 @@
/* For NOMADIK_NR_GPIO */
#include <mach/irqs.h>
+#include <linux/amba/mmci.h>
/* Snowball specific GPIO assignments, this board has no GPIO expander */
#define SNOWBALL_ACCEL_INT1_GPIO 163
@@ -78,6 +79,8 @@
struct device;
struct i2c_board_info;
+extern struct mmci_platform_data mop500_sdi0_data;
+extern struct mmci_platform_data mop500_sdi4_data;
extern void mop500_sdi_init(struct device *parent);
extern void snowball_sdi_init(struct device *parent);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 33275eb4c689..c8dd94f606dc 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -139,7 +139,6 @@ static struct platform_device *platform_devs[] __initdata = {
static struct platform_device *of_platform_devs[] __initdata = {
&u8500_dma40_device,
- &db8500_pmu_device,
};
static resource_size_t __initdata db8500_gpio_base[] = {
@@ -237,7 +236,6 @@ struct device * __init u8500_of_init_devices(void)
parent = db8500_soc_device_init();
- db8500_add_rtc(parent);
db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
platform_device_register_data(parent,
@@ -249,7 +247,7 @@ struct device * __init u8500_of_init_devices(void)
/*
* Devices to be DT:ed:
* u8500_dma40_device = todo
- * db8500_pmu_device = todo
+ * db8500_pmu_device = done
* db8500_prcmu_device = done
*/
platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs));
diff --git a/arch/arm/mach-ux500/timer.c b/arch/arm/mach-ux500/timer.c
index 741e71feca78..66e7f00884ab 100644
--- a/arch/arm/mach-ux500/timer.c
+++ b/arch/arm/mach-ux500/timer.c
@@ -63,8 +63,10 @@ static void __init ux500_timer_init(void)
/* TODO: Once MTU has been DT:ed place code above into else. */
if (of_have_populated_dt()) {
+#ifdef CONFIG_OF
np = of_find_matching_node(NULL, prcmu_timer_of_match);
if (!np)
+#endif
goto dt_fail;
tmp_base = of_iomap(np, 0);
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index bec933b04ef0..e95bf84cc837 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -339,7 +339,6 @@ void __init pci_versatile_preinit(void)
static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
- int devslot = PCI_SLOT(dev->devfn);
/* slot, pin, irq
* 24 1 27
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index cf8730d35e70..fc3730f01650 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -2,7 +2,8 @@ menu "Versatile Express platform type"
depends on ARCH_VEXPRESS
config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
- bool
+ bool "Enable A5 and A9 only errata work-arounds"
+ default y
select ARM_ERRATA_720789
select ARM_ERRATA_751472
select PL310_ERRATA_753970 if CACHE_PL310
@@ -14,7 +15,6 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
- select ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
select ARM_GIC
select CPU_V7
select HAVE_SMP
@@ -22,7 +22,6 @@ config ARCH_VEXPRESS_CA9X4
config ARCH_VEXPRESS_DT
bool "Device Tree support for Versatile Express platforms"
- select ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
select ARM_GIC
select ARM_PATCH_PHYS_VIRT
select AUTO_ZRELADDR
diff --git a/arch/arm/mach-vexpress/Makefile.boot b/arch/arm/mach-vexpress/Makefile.boot
index 909f85ebf5f4..318d308dfb93 100644
--- a/arch/arm/mach-vexpress/Makefile.boot
+++ b/arch/arm/mach-vexpress/Makefile.boot
@@ -6,4 +6,5 @@ initrd_phys-y := 0x60800000
dtb-$(CONFIG_ARCH_VEXPRESS_DT) += vexpress-v2p-ca5s.dtb \
vexpress-v2p-ca9.dtb \
- vexpress-v2p-ca15-tc1.dtb
+ vexpress-v2p-ca15-tc1.dtb \
+ vexpress-v2p-ca15_a7.dtb
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c65cc3b462a5..61c492403b05 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -66,8 +66,15 @@ static void __init ct_ca9x4_init_irq(void)
static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
{
- v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE_DB1, 0);
- v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE_DB1, 2);
+ u32 site = v2m_get_master_site();
+
+ /*
+ * Old firmware was using the "site" component of the command
+ * to control the DVI muxer (while it should be always 0 ie. MB).
+ * Newer firmware uses the data register. Keep both for compatibility.
+ */
+ v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE(site), site);
+ v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE(SYS_CFG_SITE_MB), 2);
}
static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
@@ -105,43 +112,11 @@ static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
};
-static long ct_round(struct clk *clk, unsigned long rate)
-{
- return rate;
-}
-
-static int ct_set(struct clk *clk, unsigned long rate)
-{
- return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_DB1 | 1, rate);
-}
-
-static const struct clk_ops osc1_clk_ops = {
- .round = ct_round,
- .set = ct_set,
-};
-
-static struct clk osc1_clk = {
- .ops = &osc1_clk_ops,
- .rate = 24000000,
-};
-
-static struct clk ct_sp804_clk = {
- .rate = 1000000,
-};
-
-static struct clk_lookup lookups[] = {
- { /* CLCD */
- .dev_id = "ct:clcd",
- .clk = &osc1_clk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "ct-timer0",
- .clk = &ct_sp804_clk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "ct-timer1",
- .clk = &ct_sp804_clk,
- },
+static struct v2m_osc ct_osc1 = {
+ .osc = 1,
+ .rate_min = 10000000,
+ .rate_max = 80000000,
+ .rate_default = 23750000,
};
static struct resource pmu_resources[] = {
@@ -174,14 +149,10 @@ static struct platform_device pmu_device = {
.resource = pmu_resources,
};
-static void __init ct_ca9x4_init_early(void)
-{
- clkdev_add_table(lookups, ARRAY_SIZE(lookups));
-}
-
static void __init ct_ca9x4_init(void)
{
int i;
+ struct clk *clk;
#ifdef CONFIG_CACHE_L2X0
void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
@@ -193,6 +164,10 @@ static void __init ct_ca9x4_init(void)
l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif
+ ct_osc1.site = v2m_get_master_site();
+ clk = v2m_osc_register("ct:osc1", &ct_osc1);
+ clk_register_clkdev(clk, NULL, "ct:clcd");
+
for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
@@ -234,7 +209,6 @@ struct ct_desc ct_ca9x4_desc __initdata = {
.id = V2M_CT_ID_CA9,
.name = "CA9x4",
.map_io = ct_ca9x4_map_io,
- .init_early = ct_ca9x4_init_early,
.init_irq = ct_ca9x4_init_irq,
.init_tile = ct_ca9x4_init,
#ifdef CONFIG_SMP
diff --git a/arch/arm/mach-vexpress/include/mach/clkdev.h b/arch/arm/mach-vexpress/include/mach/clkdev.h
deleted file mode 100644
index 3f8307d73cad..000000000000
--- a/arch/arm/mach-vexpress/include/mach/clkdev.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef __ASM_MACH_CLKDEV_H
-#define __ASM_MACH_CLKDEV_H
-
-#include <plat/clock.h>
-
-struct clk {
- const struct clk_ops *ops;
- unsigned long rate;
- const struct icst_params *params;
-};
-
-#define __clk_get(clk) ({ 1; })
-#define __clk_put(clk) do { } while (0)
-
-#endif
diff --git a/arch/arm/mach-vexpress/include/mach/debug-macro.S b/arch/arm/mach-vexpress/include/mach/debug-macro.S
index fa8224794e0b..9f509f55d078 100644
--- a/arch/arm/mach-vexpress/include/mach/debug-macro.S
+++ b/arch/arm/mach-vexpress/include/mach/debug-macro.S
@@ -18,6 +18,8 @@
#define DEBUG_LL_VIRT_BASE 0xf8000000
+#if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT)
+
.macro addruart,rp,rv,tmp
@ Make an educated guess regarding the memory map:
@@ -41,3 +43,42 @@
.endm
#include <asm/hardware/debug-pl01x.S>
+
+#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9)
+
+ .macro addruart,rp,rv,tmp
+ mov \rp, #DEBUG_LL_UART_OFFSET
+ orr \rv, \rp, #DEBUG_LL_VIRT_BASE
+ orr \rp, \rp, #DEBUG_LL_PHYS_BASE
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
+
+#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1)
+
+ .macro addruart,rp,rv,tmp
+ mov \rp, #DEBUG_LL_UART_OFFSET_RS1
+ orr \rv, \rp, #DEBUG_LL_VIRT_BASE
+ orr \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1
+ .endm
+
+#include <asm/hardware/debug-pl01x.S>
+
+#else /* CONFIG_DEBUG_LL_UART_NONE */
+
+ .macro addruart, rp, rv, tmp
+ /* Safe dummy values */
+ mov \rp, #0
+ mov \rv, #DEBUG_LL_VIRT_BASE
+ .endm
+
+ .macro senduart,rd,rx
+ .endm
+
+ .macro waituart,rd,rx
+ .endm
+
+ .macro busyuart,rd,rx
+ .endm
+
+#endif
diff --git a/arch/arm/mach-vexpress/include/mach/motherboard.h b/arch/arm/mach-vexpress/include/mach/motherboard.h
index 31a92890893d..1e388c7bf4d7 100644
--- a/arch/arm/mach-vexpress/include/mach/motherboard.h
+++ b/arch/arm/mach-vexpress/include/mach/motherboard.h
@@ -1,6 +1,8 @@
#ifndef __MACH_MOTHERBOARD_H
#define __MACH_MOTHERBOARD_H
+#include <linux/clk-provider.h>
+
/*
* Physical addresses, offset from V2M_PA_CS0-3
*/
@@ -104,9 +106,10 @@
#define SYS_CFG_REBOOT (9 << 20)
#define SYS_CFG_DVIMODE (11 << 20)
#define SYS_CFG_POWER (12 << 20)
-#define SYS_CFG_SITE_MB (0 << 16)
-#define SYS_CFG_SITE_DB1 (1 << 16)
-#define SYS_CFG_SITE_DB2 (2 << 16)
+#define SYS_CFG_SITE(n) ((n) << 16)
+#define SYS_CFG_SITE_MB 0
+#define SYS_CFG_SITE_DB1 1
+#define SYS_CFG_SITE_DB2 2
#define SYS_CFG_STACK(n) ((n) << 12)
#define SYS_CFG_ERR (1 << 1)
@@ -122,6 +125,8 @@ void v2m_flags_set(u32 data);
#define SYS_MISC_MASTERSITE (1 << 14)
#define SYS_PROCIDx_HBI_MASK 0xfff
+int v2m_get_master_site(void);
+
/*
* Core tile IDs
*/
@@ -144,4 +149,21 @@ struct ct_desc {
extern struct ct_desc *ct_desc;
+/*
+ * OSC clock provider
+ */
+struct v2m_osc {
+ struct clk_hw hw;
+ u8 site; /* 0 = motherboard, 1 = site 1, 2 = site 2 */
+ u8 stack; /* board stack position */
+ u16 osc;
+ unsigned long rate_min;
+ unsigned long rate_max;
+ unsigned long rate_default;
+};
+
+#define to_v2m_osc(osc) container_of(osc, struct v2m_osc, hw)
+
+struct clk *v2m_osc_register(const char *name, struct v2m_osc *osc);
+
#endif
diff --git a/arch/arm/mach-vexpress/include/mach/uncompress.h b/arch/arm/mach-vexpress/include/mach/uncompress.h
index 7dab5596b868..1e472eb0bbdc 100644
--- a/arch/arm/mach-vexpress/include/mach/uncompress.h
+++ b/arch/arm/mach-vexpress/include/mach/uncompress.h
@@ -27,6 +27,7 @@
static unsigned long get_uart_base(void)
{
+#if defined(CONFIG_DEBUG_VEXPRESS_UART0_DETECT)
unsigned long mpcore_periph;
/*
@@ -42,6 +43,13 @@ static unsigned long get_uart_base(void)
return UART_BASE;
else
return UART_BASE_RS1;
+#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9)
+ return UART_BASE;
+#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1)
+ return UART_BASE_RS1;
+#else
+ return 0;
+#endif
}
/*
@@ -51,6 +59,9 @@ static inline void putc(int c)
{
unsigned long base = get_uart_base();
+ if (!base)
+ return;
+
while (AMBA_UART_FR(base) & (1 << 5))
barrier();
@@ -61,6 +72,9 @@ static inline void flush(void)
{
unsigned long base = get_uart_base();
+ if (!base)
+ return;
+
while (AMBA_UART_FR(base) & (1 << 3))
barrier();
}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index fde26adaef32..37608f22ee31 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -16,7 +16,10 @@
#include <linux/spinlock.h>
#include <linux/usb/isp1760.h>
#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
#include <linux/mtd/physmap.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <asm/arch_timer.h>
#include <asm/mach-types.h>
@@ -81,16 +84,6 @@ static void __init v2m_sp804_init(void __iomem *base, unsigned int irq)
sp804_clockevents_init(base + TIMER_1_BASE, irq, "v2m-timer0");
}
-static void __init v2m_timer_init(void)
-{
- v2m_sysctl_init(ioremap(V2M_SYSCTL, SZ_4K));
- v2m_sp804_init(ioremap(V2M_TIMER01, SZ_4K), IRQ_V2M_TIMER0);
-}
-
-static struct sys_timer v2m_timer = {
- .init = v2m_timer_init,
-};
-
static DEFINE_SPINLOCK(v2m_cfg_lock);
@@ -147,6 +140,13 @@ void __init v2m_flags_set(u32 data)
writel(data, v2m_sysreg_base + V2M_SYS_FLAGSSET);
}
+int v2m_get_master_site(void)
+{
+ u32 misc = readl(v2m_sysreg_base + V2M_SYS_MISC);
+
+ return misc & SYS_MISC_MASTERSITE ? SYS_CFG_SITE_DB2 : SYS_CFG_SITE_DB1;
+}
+
static struct resource v2m_pcie_i2c_resource = {
.start = V2M_SERIAL_BUS_PCI,
@@ -201,6 +201,11 @@ static struct platform_device v2m_eth_device = {
.dev.platform_data = &v2m_eth_config,
};
+static struct regulator_consumer_supply v2m_eth_supplies[] = {
+ REGULATOR_SUPPLY("vddvario", "smsc911x"),
+ REGULATOR_SUPPLY("vdd33a", "smsc911x"),
+};
+
static struct resource v2m_usb_resources[] = {
{
.start = V2M_ISP1761,
@@ -319,98 +324,145 @@ static struct amba_device *v2m_amba_devs[] __initdata = {
};
-static long v2m_osc_round(struct clk *clk, unsigned long rate)
+static unsigned long v2m_osc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct v2m_osc *osc = to_v2m_osc(hw);
+
+ return !parent_rate ? osc->rate_default : parent_rate;
+}
+
+static long v2m_osc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
{
+ struct v2m_osc *osc = to_v2m_osc(hw);
+
+ if (WARN_ON(rate < osc->rate_min))
+ rate = osc->rate_min;
+
+ if (WARN_ON(rate > osc->rate_max))
+ rate = osc->rate_max;
+
return rate;
}
-static int v2m_osc1_set(struct clk *clk, unsigned long rate)
+static int v2m_osc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
{
- return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE_MB | 1, rate);
+ struct v2m_osc *osc = to_v2m_osc(hw);
+
+ v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE(osc->site) |
+ SYS_CFG_STACK(osc->stack) | osc->osc, rate);
+
+ return 0;
}
-static const struct clk_ops osc1_clk_ops = {
- .round = v2m_osc_round,
- .set = v2m_osc1_set,
-};
-
-static struct clk osc1_clk = {
- .ops = &osc1_clk_ops,
- .rate = 24000000,
-};
-
-static struct clk osc2_clk = {
- .rate = 24000000,
-};
-
-static struct clk v2m_sp804_clk = {
- .rate = 1000000,
-};
-
-static struct clk v2m_ref_clk = {
- .rate = 32768,
-};
-
-static struct clk dummy_apb_pclk;
-
-static struct clk_lookup v2m_lookups[] = {
- { /* AMBA bus clock */
- .con_id = "apb_pclk",
- .clk = &dummy_apb_pclk,
- }, { /* UART0 */
- .dev_id = "mb:uart0",
- .clk = &osc2_clk,
- }, { /* UART1 */
- .dev_id = "mb:uart1",
- .clk = &osc2_clk,
- }, { /* UART2 */
- .dev_id = "mb:uart2",
- .clk = &osc2_clk,
- }, { /* UART3 */
- .dev_id = "mb:uart3",
- .clk = &osc2_clk,
- }, { /* KMI0 */
- .dev_id = "mb:kmi0",
- .clk = &osc2_clk,
- }, { /* KMI1 */
- .dev_id = "mb:kmi1",
- .clk = &osc2_clk,
- }, { /* MMC0 */
- .dev_id = "mb:mmci",
- .clk = &osc2_clk,
- }, { /* CLCD */
- .dev_id = "mb:clcd",
- .clk = &osc1_clk,
- }, { /* SP805 WDT */
- .dev_id = "mb:wdt",
- .clk = &v2m_ref_clk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "v2m-timer0",
- .clk = &v2m_sp804_clk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "v2m-timer1",
- .clk = &v2m_sp804_clk,
- },
+static struct clk_ops v2m_osc_ops = {
+ .recalc_rate = v2m_osc_recalc_rate,
+ .round_rate = v2m_osc_round_rate,
+ .set_rate = v2m_osc_set_rate,
+};
+
+struct clk * __init v2m_osc_register(const char *name, struct v2m_osc *osc)
+{
+ struct clk_init_data init;
+
+ WARN_ON(osc->site > 2);
+ WARN_ON(osc->stack > 15);
+ WARN_ON(osc->osc > 4095);
+
+ init.name = name;
+ init.ops = &v2m_osc_ops;
+ init.flags = CLK_IS_ROOT;
+ init.num_parents = 0;
+
+ osc->hw.init = &init;
+
+ return clk_register(NULL, &osc->hw);
+}
+
+static struct v2m_osc v2m_mb_osc1 = {
+ .site = SYS_CFG_SITE_MB,
+ .osc = 1,
+ .rate_min = 23750000,
+ .rate_max = 63500000,
+ .rate_default = 23750000,
+};
+
+static const char *v2m_ref_clk_periphs[] __initconst = {
+ "mb:wdt", "1000f000.wdt", "1c0f0000.wdt", /* SP805 WDT */
+};
+
+static const char *v2m_osc1_periphs[] __initconst = {
+ "mb:clcd", "1001f000.clcd", "1c1f0000.clcd", /* PL111 CLCD */
+};
+
+static const char *v2m_osc2_periphs[] __initconst = {
+ "mb:mmci", "10005000.mmci", "1c050000.mmci", /* PL180 MMCI */
+ "mb:kmi0", "10006000.kmi", "1c060000.kmi", /* PL050 KMI0 */
+ "mb:kmi1", "10007000.kmi", "1c070000.kmi", /* PL050 KMI1 */
+ "mb:uart0", "10009000.uart", "1c090000.uart", /* PL011 UART0 */
+ "mb:uart1", "1000a000.uart", "1c0a0000.uart", /* PL011 UART1 */
+ "mb:uart2", "1000b000.uart", "1c0b0000.uart", /* PL011 UART2 */
+ "mb:uart3", "1000c000.uart", "1c0c0000.uart", /* PL011 UART3 */
+};
+
+static void __init v2m_clk_init(void)
+{
+ struct clk *clk;
+ int i;
+
+ clk = clk_register_fixed_rate(NULL, "dummy_apb_pclk", NULL,
+ CLK_IS_ROOT, 0);
+ WARN_ON(clk_register_clkdev(clk, "apb_pclk", NULL));
+
+ clk = clk_register_fixed_rate(NULL, "mb:ref_clk", NULL,
+ CLK_IS_ROOT, 32768);
+ for (i = 0; i < ARRAY_SIZE(v2m_ref_clk_periphs); i++)
+ WARN_ON(clk_register_clkdev(clk, NULL, v2m_ref_clk_periphs[i]));
+
+ clk = clk_register_fixed_rate(NULL, "mb:sp804_clk", NULL,
+ CLK_IS_ROOT, 1000000);
+ WARN_ON(clk_register_clkdev(clk, "v2m-timer0", "sp804"));
+ WARN_ON(clk_register_clkdev(clk, "v2m-timer1", "sp804"));
+
+ clk = v2m_osc_register("mb:osc1", &v2m_mb_osc1);
+ for (i = 0; i < ARRAY_SIZE(v2m_osc1_periphs); i++)
+ WARN_ON(clk_register_clkdev(clk, NULL, v2m_osc1_periphs[i]));
+
+ clk = clk_register_fixed_rate(NULL, "mb:osc2", NULL,
+ CLK_IS_ROOT, 24000000);
+ for (i = 0; i < ARRAY_SIZE(v2m_osc2_periphs); i++)
+ WARN_ON(clk_register_clkdev(clk, NULL, v2m_osc2_periphs[i]));
+}
+
+static void __init v2m_timer_init(void)
+{
+ v2m_sysctl_init(ioremap(V2M_SYSCTL, SZ_4K));
+ v2m_clk_init();
+ v2m_sp804_init(ioremap(V2M_TIMER01, SZ_4K), IRQ_V2M_TIMER0);
+}
+
+static struct sys_timer v2m_timer = {
+ .init = v2m_timer_init,
};
static void __init v2m_init_early(void)
{
- ct_desc->init_early();
- clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
+ if (ct_desc->init_early)
+ ct_desc->init_early();
versatile_sched_clock_init(v2m_sysreg_base + V2M_SYS_24MHZ, 24000000);
}
static void v2m_power_off(void)
{
- if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
+ if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE(SYS_CFG_SITE_MB), 0))
printk(KERN_EMERG "Unable to shutdown\n");
}
static void v2m_restart(char str, const char *cmd)
{
- if (v2m_cfg_write(SYS_CFG_REBOOT | SYS_CFG_SITE_MB, 0))
+ if (v2m_cfg_write(SYS_CFG_REBOOT | SYS_CFG_SITE(SYS_CFG_SITE_MB), 0))
printk(KERN_EMERG "Unable to reboot\n");
}
@@ -458,6 +510,9 @@ static void __init v2m_init(void)
{
int i;
+ regulator_register_fixed(0, v2m_eth_supplies,
+ ARRAY_SIZE(v2m_eth_supplies));
+
platform_device_register(&v2m_pcie_i2c_device);
platform_device_register(&v2m_ddc_i2c_device);
platform_device_register(&v2m_flash_device);
@@ -522,77 +577,6 @@ void __init v2m_dt_map_io(void)
#endif
}
-static struct clk_lookup v2m_dt_lookups[] = {
- { /* AMBA bus clock */
- .con_id = "apb_pclk",
- .clk = &dummy_apb_pclk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "v2m-timer0",
- .clk = &v2m_sp804_clk,
- }, { /* SP804 timers */
- .dev_id = "sp804",
- .con_id = "v2m-timer1",
- .clk = &v2m_sp804_clk,
- }, { /* PL180 MMCI */
- .dev_id = "mb:mmci", /* 10005000.mmci */
- .clk = &osc2_clk,
- }, { /* PL050 KMI0 */
- .dev_id = "10006000.kmi",
- .clk = &osc2_clk,
- }, { /* PL050 KMI1 */
- .dev_id = "10007000.kmi",
- .clk = &osc2_clk,
- }, { /* PL011 UART0 */
- .dev_id = "10009000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART1 */
- .dev_id = "1000a000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART2 */
- .dev_id = "1000b000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART3 */
- .dev_id = "1000c000.uart",
- .clk = &osc2_clk,
- }, { /* SP805 WDT */
- .dev_id = "1000f000.wdt",
- .clk = &v2m_ref_clk,
- }, { /* PL111 CLCD */
- .dev_id = "1001f000.clcd",
- .clk = &osc1_clk,
- },
- /* RS1 memory map */
- { /* PL180 MMCI */
- .dev_id = "mb:mmci", /* 1c050000.mmci */
- .clk = &osc2_clk,
- }, { /* PL050 KMI0 */
- .dev_id = "1c060000.kmi",
- .clk = &osc2_clk,
- }, { /* PL050 KMI1 */
- .dev_id = "1c070000.kmi",
- .clk = &osc2_clk,
- }, { /* PL011 UART0 */
- .dev_id = "1c090000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART1 */
- .dev_id = "1c0a0000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART2 */
- .dev_id = "1c0b0000.uart",
- .clk = &osc2_clk,
- }, { /* PL011 UART3 */
- .dev_id = "1c0c0000.uart",
- .clk = &osc2_clk,
- }, { /* SP805 WDT */
- .dev_id = "1c0f0000.wdt",
- .clk = &v2m_ref_clk,
- }, { /* PL111 CLCD */
- .dev_id = "1c1f0000.clcd",
- .clk = &osc1_clk,
- },
-};
-
void __init v2m_dt_init_early(void)
{
struct device_node *node;
@@ -605,8 +589,8 @@ void __init v2m_dt_init_early(void)
/* Confirm board type against DT property, if available */
if (of_property_read_u32(allnodes, "arm,hbi", &dt_hbi) == 0) {
- u32 misc = readl(v2m_sysreg_base + V2M_SYS_MISC);
- u32 id = readl(v2m_sysreg_base + (misc & SYS_MISC_MASTERSITE ?
+ int site = v2m_get_master_site();
+ u32 id = readl(v2m_sysreg_base + (site == SYS_CFG_SITE_DB2 ?
V2M_SYS_PROCID1 : V2M_SYS_PROCID0));
u32 hbi = id & SYS_PROCIDx_HBI_MASK;
@@ -614,8 +598,6 @@ void __init v2m_dt_init_early(void)
pr_warning("vexpress: DT HBI (%x) is not matching "
"hardware (%x)!\n", dt_hbi, hbi);
}
-
- clkdev_add_table(v2m_dt_lookups, ARRAY_SIZE(v2m_dt_lookups));
}
static struct of_device_id vexpress_irq_match[] __initdata = {
@@ -637,6 +619,8 @@ static void __init v2m_dt_timer_init(void)
node = of_find_compatible_node(NULL, NULL, "arm,sp810");
v2m_sysctl_init(of_iomap(node, 0));
+ v2m_clk_init();
+
err = of_property_read_string(of_aliases, "arm,v2m_timer", &path);
if (WARN_ON(err))
return;
diff --git a/arch/arm/mach-vt8500/Makefile b/arch/arm/mach-vt8500/Makefile
index 81aedb7c893c..54e69973f39b 100644
--- a/arch/arm/mach-vt8500/Makefile
+++ b/arch/arm/mach-vt8500/Makefile
@@ -1,4 +1,4 @@
-obj-y += devices.o gpio.o irq.o timer.o
+obj-y += devices.o gpio.o irq.o timer.o restart.o
obj-$(CONFIG_VTWM_VERSION_VT8500) += devices-vt8500.o
obj-$(CONFIG_VTWM_VERSION_WM8505) += devices-wm8505.o
diff --git a/arch/arm/mach-vt8500/bv07.c b/arch/arm/mach-vt8500/bv07.c
index a464c7584411..f9fbeb2d10e9 100644
--- a/arch/arm/mach-vt8500/bv07.c
+++ b/arch/arm/mach-vt8500/bv07.c
@@ -23,6 +23,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <mach/restart.h>
#include "devices.h"
@@ -62,6 +63,7 @@ void __init bv07_init(void)
else
printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n");
+ wmt_setup_restart();
vt8500_set_resources();
platform_add_devices(devices, ARRAY_SIZE(devices));
vt8500_gpio_init();
@@ -69,6 +71,7 @@ void __init bv07_init(void)
MACHINE_START(BV07, "Benign BV07 Mini Netbook")
.atag_offset = 0x100,
+ .restart = wmt_restart,
.reserve = vt8500_reserve_mem,
.map_io = vt8500_map_io,
.init_irq = vt8500_init_irq,
diff --git a/arch/arm/mach-vt8500/include/mach/restart.h b/arch/arm/mach-vt8500/include/mach/restart.h
new file mode 100644
index 000000000000..89f9b787d2a0
--- /dev/null
+++ b/arch/arm/mach-vt8500/include/mach/restart.h
@@ -0,0 +1,17 @@
+/* linux/arch/arm/mach-vt8500/restart.h
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+void wmt_setup_restart(void);
+void wmt_restart(char mode, const char *cmd);
diff --git a/arch/arm/mach-vt8500/include/mach/system.h b/arch/arm/mach-vt8500/include/mach/system.h
deleted file mode 100644
index 58fa8010ee61..000000000000
--- a/arch/arm/mach-vt8500/include/mach/system.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/*
- * arch/arm/mach-vt8500/include/mach/system.h
- *
- */
-#include <asm/io.h>
-
-/* PM Software Reset request register */
-#define VT8500_PMSR_VIRT 0xf8130060
-
-static inline void arch_reset(char mode, const char *cmd)
-{
- writel(1, VT8500_PMSR_VIRT);
-}
diff --git a/arch/arm/mach-vt8500/restart.c b/arch/arm/mach-vt8500/restart.c
new file mode 100644
index 000000000000..497e89a5e130
--- /dev/null
+++ b/arch/arm/mach-vt8500/restart.c
@@ -0,0 +1,54 @@
+/* linux/arch/arm/mach-vt8500/restart.c
+ *
+ * Copyright (C) 2012 Tony Prisk <linux@prisktech.co.nz>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <asm/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define LEGACY_PMC_BASE 0xD8130000
+#define WMT_PRIZM_PMSR_REG 0x60
+
+static void __iomem *pmc_base;
+
+void wmt_setup_restart(void)
+{
+ struct device_node *np;
+
+ /*
+ * Check if Power Mgmt Controller node is present in device tree. If no
+ * device tree node, use the legacy PMSR value (valid for all current
+ * SoCs).
+ */
+ np = of_find_compatible_node(NULL, NULL, "wmt,prizm-pmc");
+ if (np) {
+ pmc_base = of_iomap(np, 0);
+
+ if (!pmc_base)
+ pr_err("%s:of_iomap(pmc) failed\n", __func__);
+
+ of_node_put(np);
+ } else {
+ pmc_base = ioremap(LEGACY_PMC_BASE, 0x1000);
+ if (!pmc_base) {
+ pr_err("%s:ioremap(rstc) failed\n", __func__);
+ return;
+ }
+ }
+}
+
+void wmt_restart(char mode, const char *cmd)
+{
+ if (pmc_base)
+ writel(1, pmc_base + WMT_PRIZM_PMSR_REG);
+}
diff --git a/arch/arm/mach-vt8500/wm8505_7in.c b/arch/arm/mach-vt8500/wm8505_7in.c
index cf910a956080..db19886caf7c 100644
--- a/arch/arm/mach-vt8500/wm8505_7in.c
+++ b/arch/arm/mach-vt8500/wm8505_7in.c
@@ -23,6 +23,7 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <mach/restart.h>
#include "devices.h"
@@ -61,7 +62,7 @@ void __init wm8505_7in_init(void)
pm_power_off = &vt8500_power_off;
else
printk(KERN_ERR "PMC Hibernation register could not be remapped, not enabling power off!\n");
-
+ wmt_setup_restart();
wm8505_set_resources();
platform_add_devices(devices, ARRAY_SIZE(devices));
vt8500_gpio_init();
@@ -69,6 +70,7 @@ void __init wm8505_7in_init(void)
MACHINE_START(WM8505_7IN_NETBOOK, "WM8505 7-inch generic netbook")
.atag_offset = 0x100,
+ .restart = wmt_restart,
.reserve = wm8505_reserve_mem,
.map_io = wm8505_map_io,
.init_irq = wm8505_init_irq,
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4044abcf6f9d..655878bcc96d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1091,7 +1091,7 @@ error:
while (--i)
if (pages[i])
__free_pages(pages[i], 0);
- if (array_size < PAGE_SIZE)
+ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
@@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
- if (array_size < PAGE_SIZE)
+ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index c471436c7952..2e8a1efdf7b8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -64,7 +64,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
#ifdef CONFIG_ZONE_DMA
extern phys_addr_t arm_dma_limit;
#else
-#define arm_dma_limit ((u32)~0)
+#define arm_dma_limit ((phys_addr_t)~0)
#endif
extern phys_addr_t arm_lowmem_limit;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e5dad60b558b..cf4528d51774 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -791,6 +791,79 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
}
}
+#ifndef CONFIG_ARM_LPAE
+
+/*
+ * The Linux PMD is made of two consecutive section entries covering 2MB
+ * (see definition in include/asm/pgtable-2level.h). However a call to
+ * create_mapping() may optimize static mappings by using individual
+ * 1MB section mappings. This leaves the actual PMD potentially half
+ * initialized if the top or bottom section entry isn't used, leaving it
+ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ * the virtual space left free by that unused section entry.
+ *
+ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ * PMD halves once the static mappings are in place.
+ */
+
+static void __init pmd_empty_section_gap(unsigned long addr)
+{
+ struct vm_struct *vm;
+
+ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm->addr = (void *)addr;
+ vm->size = SECTION_SIZE;
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->caller = pmd_empty_section_gap;
+ vm_area_add_early(vm);
+}
+
+static void __init fill_pmd_gaps(void)
+{
+ struct vm_struct *vm;
+ unsigned long addr, next = 0;
+ pmd_t *pmd;
+
+ /* we're still single threaded hence no lock needed here */
+ for (vm = vmlist; vm; vm = vm->next) {
+ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ continue;
+ addr = (unsigned long)vm->addr;
+ if (addr < next)
+ continue;
+
+ /*
+ * Check if this vm starts on an odd section boundary.
+ * If so and the first section entry for this PMD is free
+ * then we block the corresponding virtual address.
+ */
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr);
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr & PMD_MASK);
+ }
+
+ /*
+ * Then check if this vm ends on an odd section boundary.
+ * If so and the second section entry for this PMD is empty
+ * then we block the corresponding virtual address.
+ */
+ addr += vm->size;
+ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ pmd = pmd_off_k(addr) + 1;
+ if (pmd_none(*pmd))
+ pmd_empty_section_gap(addr);
+ }
+
+ /* no need to look at any vm entry until we hit the next PMD */
+ next = (addr + PMD_SIZE - 1) & PMD_MASK;
+ }
+}
+
+#else
+#define fill_pmd_gaps() do { } while (0)
+#endif
+
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
@@ -1072,6 +1145,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
if (mdesc->map_io)
mdesc->map_io();
+ fill_pmd_gaps();
/*
* Finally flush the caches and tlb to ensure that we're in a
diff --git a/arch/arm/plat-mxc/3ds_debugboard.c b/arch/arm/plat-mxc/3ds_debugboard.c
index 5cac2c540f4f..5c10ad05df74 100644
--- a/arch/arm/plat-mxc/3ds_debugboard.c
+++ b/arch/arm/plat-mxc/3ds_debugboard.c
@@ -12,9 +12,11 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
+#include <linux/module.h>
#include <linux/smsc911x.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
@@ -48,27 +50,22 @@
/* CPU ID and Personality ID */
#define MCU_BOARD_ID_REG 0x68
-#define MXC_IRQ_TO_EXPIO(irq) ((irq) - MXC_BOARD_IRQ_START)
-#define MXC_IRQ_TO_GPIO(irq) ((irq) - MXC_INTERNAL_IRQS)
-
-#define MXC_EXP_IO_BASE (MXC_BOARD_IRQ_START)
#define MXC_MAX_EXP_IO_LINES 16
/* interrupts like external uart , external ethernet etc*/
-#define EXPIO_INT_ENET (MXC_BOARD_IRQ_START + 0)
-#define EXPIO_INT_XUART_A (MXC_BOARD_IRQ_START + 1)
-#define EXPIO_INT_XUART_B (MXC_BOARD_IRQ_START + 2)
-#define EXPIO_INT_BUTTON_A (MXC_BOARD_IRQ_START + 3)
-#define EXPIO_INT_BUTTON_B (MXC_BOARD_IRQ_START + 4)
+#define EXPIO_INT_ENET 0
+#define EXPIO_INT_XUART_A 1
+#define EXPIO_INT_XUART_B 2
+#define EXPIO_INT_BUTTON_A 3
+#define EXPIO_INT_BUTTON_B 4
static void __iomem *brd_io;
+static struct irq_domain *domain;
static struct resource smsc911x_resources[] = {
{
.flags = IORESOURCE_MEM,
} , {
- .start = EXPIO_INT_ENET,
- .end = EXPIO_INT_ENET,
.flags = IORESOURCE_IRQ,
},
};
@@ -100,11 +97,11 @@ static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
imr_val = __raw_readw(brd_io + INTR_MASK_REG);
int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
- expio_irq = MXC_BOARD_IRQ_START;
+ expio_irq = 0;
for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
if ((int_valid & 1) == 0)
continue;
- generic_handle_irq(expio_irq);
+ generic_handle_irq(irq_find_mapping(domain, expio_irq));
}
desc->irq_data.chip->irq_ack(&desc->irq_data);
@@ -118,7 +115,7 @@ static void mxc_expio_irq_handler(u32 irq, struct irq_desc *desc)
static void expio_mask_irq(struct irq_data *d)
{
u16 reg;
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg |= (1 << expio);
@@ -127,7 +124,7 @@ static void expio_mask_irq(struct irq_data *d)
static void expio_ack_irq(struct irq_data *d)
{
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
__raw_writew(1 << expio, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
@@ -137,7 +134,7 @@ static void expio_ack_irq(struct irq_data *d)
static void expio_unmask_irq(struct irq_data *d)
{
u16 reg;
- u32 expio = MXC_IRQ_TO_EXPIO(d->irq);
+ u32 expio = d->hwirq;
reg = __raw_readw(brd_io + INTR_MASK_REG);
reg &= ~(1 << expio);
@@ -155,8 +152,10 @@ static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x"),
};
-int __init mxc_expio_init(u32 base, u32 p_irq)
+int __init mxc_expio_init(u32 base, u32 intr_gpio)
{
+ u32 p_irq = gpio_to_irq(intr_gpio);
+ int irq_base;
int i;
brd_io = ioremap(BOARD_IO_ADDR(base), SZ_4K);
@@ -178,16 +177,23 @@ int __init mxc_expio_init(u32 base, u32 p_irq)
/*
* Configure INT line as GPIO input
*/
- gpio_request(MXC_IRQ_TO_GPIO(p_irq), "expio_pirq");
- gpio_direction_input(MXC_IRQ_TO_GPIO(p_irq));
+ gpio_request(intr_gpio, "expio_pirq");
+ gpio_direction_input(intr_gpio);
/* disable the interrupt and clear the status */
__raw_writew(0, brd_io + INTR_MASK_REG);
__raw_writew(0xFFFF, brd_io + INTR_RESET_REG);
__raw_writew(0, brd_io + INTR_RESET_REG);
__raw_writew(0x1F, brd_io + INTR_MASK_REG);
- for (i = MXC_EXP_IO_BASE;
- i < (MXC_EXP_IO_BASE + MXC_MAX_EXP_IO_LINES); i++) {
+
+ irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
+ WARN_ON(irq_base < 0);
+
+ domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ WARN_ON(!domain);
+
+ for (i = irq_base; i < irq_base + MXC_MAX_EXP_IO_LINES; i++) {
irq_set_chip_and_handler(i, &expio_irq_chip, handle_level_irq);
set_irq_flags(i, IRQF_VALID);
}
@@ -199,6 +205,8 @@ int __init mxc_expio_init(u32 base, u32 p_irq)
smsc911x_resources[0].start = LAN9217_BASE_ADDR(base);
smsc911x_resources[0].end = LAN9217_BASE_ADDR(base) + 0x100 - 1;
+ smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET);
+ smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET);
platform_device_register(&smsc_lan9217_device);
return 0;
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index e81290c27c65..63b064b5c1d5 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MXC_ULPI) += ulpi.o
obj-$(CONFIG_MXC_USE_EPIT) += epit.o
obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o
obj-$(CONFIG_CPU_FREQ_IMX) += cpufreq.o
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
ifdef CONFIG_SND_IMX_SOC
obj-y += ssi-fiq.o
obj-y += ssi-fiq-ksym.o
diff --git a/arch/arm/plat-mxc/avic.c b/arch/arm/plat-mxc/avic.c
index 689f81f9593b..cbd55c36def3 100644
--- a/arch/arm/plat-mxc/avic.c
+++ b/arch/arm/plat-mxc/avic.c
@@ -19,11 +19,14 @@
#include <linux/module.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <mach/common.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include "irq-common.h"
@@ -50,15 +53,19 @@
#define AVIC_NUM_IRQS 64
void __iomem *avic_base;
+static struct irq_domain *domain;
static u32 avic_saved_mask_reg[2];
#ifdef CONFIG_MXC_IRQ_PRIOR
static int avic_irq_set_priority(unsigned char irq, unsigned char prio)
{
+ struct irq_data *d = irq_get_irq_data(irq);
unsigned int temp;
unsigned int mask = 0x0F << irq % 8 * 4;
+ irq = d->hwirq;
+
if (irq >= AVIC_NUM_IRQS)
return -EINVAL;
@@ -75,8 +82,11 @@ static int avic_irq_set_priority(unsigned char irq, unsigned char prio)
#ifdef CONFIG_FIQ
static int avic_set_irq_fiq(unsigned int irq, unsigned int type)
{
+ struct irq_data *d = irq_get_irq_data(irq);
unsigned int irqt;
+ irq = d->hwirq;
+
if (irq >= AVIC_NUM_IRQS)
return -EINVAL;
@@ -108,7 +118,7 @@ static void avic_irq_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = gc->chip_types;
- int idx = gc->irq_base >> 5;
+ int idx = d->hwirq >> 5;
avic_saved_mask_reg[idx] = __raw_readl(avic_base + ct->regs.mask);
__raw_writel(gc->wake_active, avic_base + ct->regs.mask);
@@ -118,7 +128,7 @@ static void avic_irq_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = gc->chip_types;
- int idx = gc->irq_base >> 5;
+ int idx = d->hwirq >> 5;
__raw_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask);
}
@@ -128,11 +138,10 @@ static void avic_irq_resume(struct irq_data *d)
#define avic_irq_resume NULL
#endif
-static __init void avic_init_gc(unsigned int irq_start)
+static __init void avic_init_gc(int idx, unsigned int irq_start)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- int idx = irq_start >> 5;
gc = irq_alloc_generic_chip("mxc-avic", 1, irq_start, avic_base,
handle_level_irq);
@@ -161,7 +170,7 @@ asmlinkage void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
if (nivector == 0xffff)
break;
- handle_IRQ(nivector, regs);
+ handle_IRQ(irq_find_mapping(domain, nivector), regs);
} while (1);
}
@@ -172,6 +181,8 @@ asmlinkage void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
*/
void __init mxc_init_irq(void __iomem *irqbase)
{
+ struct device_node *np;
+ int irq_base;
int i;
avic_base = irqbase;
@@ -190,8 +201,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
__raw_writel(0, avic_base + AVIC_INTTYPEH);
__raw_writel(0, avic_base + AVIC_INTTYPEL);
- for (i = 0; i < AVIC_NUM_IRQS; i += 32)
- avic_init_gc(i);
+ irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id());
+ WARN_ON(irq_base < 0);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,avic");
+ domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ WARN_ON(!domain);
+
+ for (i = 0; i < AVIC_NUM_IRQS / 32; i++, irq_base += 32)
+ avic_init_gc(i, irq_base);
/* Set default priority value (0) for all IRQ's */
for (i = 0; i < 8; i++)
@@ -199,7 +218,7 @@ void __init mxc_init_irq(void __iomem *irqbase)
#ifdef CONFIG_FIQ
/* Initialize FIQ */
- init_FIQ();
+ init_FIQ(FIQ_START);
#endif
printk(KERN_INFO "MXC IRQ initialized\n");
diff --git a/arch/arm/plat-mxc/cpuidle.c b/arch/arm/plat-mxc/cpuidle.c
new file mode 100644
index 000000000000..d4cb511a44a8
--- /dev/null
+++ b/arch/arm/plat-mxc/cpuidle.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+static struct cpuidle_device __percpu * imx_cpuidle_devices;
+
+static void __init imx_cpuidle_devices_uninit(void)
+{
+ int cpu_id;
+ struct cpuidle_device *dev;
+
+ for_each_possible_cpu(cpu_id) {
+ dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
+ cpuidle_unregister_device(dev);
+ }
+
+ free_percpu(imx_cpuidle_devices);
+}
+
+int __init imx_cpuidle_init(struct cpuidle_driver *drv)
+{
+ struct cpuidle_device *dev;
+ int cpu_id, ret;
+
+ if (drv->state_count > CPUIDLE_STATE_MAX) {
+ pr_err("%s: state_count exceeds maximum\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = cpuidle_register_driver(drv);
+ if (ret) {
+ pr_err("%s: Failed to register cpuidle driver with error: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ imx_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (imx_cpuidle_devices == NULL) {
+ ret = -ENOMEM;
+ goto unregister_drv;
+ }
+
+ /* initialize state data for each cpuidle_device */
+ for_each_possible_cpu(cpu_id) {
+ dev = per_cpu_ptr(imx_cpuidle_devices, cpu_id);
+ dev->cpu = cpu_id;
+ dev->state_count = drv->state_count;
+
+ ret = cpuidle_register_device(dev);
+ if (ret) {
+ pr_err("%s: Failed to register cpu %u, error: %d\n",
+ __func__, cpu_id, ret);
+ goto uninit;
+ }
+ }
+
+ return 0;
+
+uninit:
+ imx_cpuidle_devices_uninit();
+
+unregister_drv:
+ cpuidle_unregister_driver(drv);
+ return ret;
+}
diff --git a/arch/arm/plat-mxc/devices/platform-ipu-core.c b/arch/arm/plat-mxc/devices/platform-ipu-core.c
index 79d340ae0af1..d1e33cc6f12e 100644
--- a/arch/arm/plat-mxc/devices/platform-ipu-core.c
+++ b/arch/arm/plat-mxc/devices/platform-ipu-core.c
@@ -30,8 +30,7 @@ const struct imx_ipu_core_data imx35_ipu_core_data __initconst =
static struct platform_device *imx_ipu_coredev __initdata;
struct platform_device *__init imx_add_ipu_core(
- const struct imx_ipu_core_data *data,
- const struct ipu_platform_data *pdata)
+ const struct imx_ipu_core_data *data)
{
/* The resource order is important! */
struct resource res[] = {
@@ -55,7 +54,7 @@ struct platform_device *__init imx_add_ipu_core(
};
return imx_ipu_coredev = imx_add_platform_device("ipu-core", -1,
- res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
+ res, ARRAY_SIZE(res), NULL, 0);
}
struct platform_device *__init imx_alloc_mx3_camera(
diff --git a/arch/arm/plat-mxc/devices/platform-mxc_rtc.c b/arch/arm/plat-mxc/devices/platform-mxc_rtc.c
index 16d0ec4df5f6..a5c9ad5721c2 100644
--- a/arch/arm/plat-mxc/devices/platform-mxc_rtc.c
+++ b/arch/arm/plat-mxc/devices/platform-mxc_rtc.c
@@ -20,6 +20,11 @@ const struct imx_mxc_rtc_data imx31_mxc_rtc_data __initconst =
imx_mxc_rtc_data_entry_single(MX31);
#endif /* ifdef CONFIG_SOC_IMX31 */
+#ifdef CONFIG_SOC_IMX35
+const struct imx_mxc_rtc_data imx35_mxc_rtc_data __initconst =
+ imx_mxc_rtc_data_entry_single(MX35);
+#endif /* ifdef CONFIG_SOC_IMX35 */
+
struct platform_device *__init imx_add_mxc_rtc(
const struct imx_mxc_rtc_data *data)
{
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index 9bfae8bd5b8d..9c50c14c8f92 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -95,7 +95,7 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
#ifdef CONFIG_SOC_IMX53
/* i.mx53 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx53_cspi_data __initconst =
- imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K);
+ imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 2, , SZ_4K);
/* i.mx53 has the i.mx51 type ecspi */
const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
diff --git a/arch/arm/plat-mxc/include/mach/3ds_debugboard.h b/arch/arm/plat-mxc/include/mach/3ds_debugboard.h
index a384fdd49c62..9fd6cb3f8fad 100644
--- a/arch/arm/plat-mxc/include/mach/3ds_debugboard.h
+++ b/arch/arm/plat-mxc/include/mach/3ds_debugboard.h
@@ -13,6 +13,6 @@
#ifndef __ASM_ARCH_MXC_3DS_DB_H__
#define __ASM_ARCH_MXC_3DS_DB_H__
-extern int __init mxc_expio_init(u32 base, u32 p_irq);
+extern int __init mxc_expio_init(u32 base, u32 intr_gpio);
#endif /* __ASM_ARCH_MXC_3DS_DB_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index e429ca1b814a..7128e9710417 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -54,6 +54,7 @@ extern void imx50_soc_init(void);
extern void imx51_soc_init(void);
extern void imx53_soc_init(void);
extern void imx51_init_late(void);
+extern void imx53_init_late(void);
extern void epit_timer_init(void __iomem *base, int irq);
extern void mxc_timer_init(void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
@@ -67,6 +68,7 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
extern int mx27_clocks_init_dt(void);
+extern int mx31_clocks_init_dt(void);
extern int mx51_clocks_init_dt(void);
extern int mx53_clocks_init_dt(void);
extern int mx6q_clocks_init(void);
@@ -95,7 +97,6 @@ enum mx3_cpu_pwr_mode {
};
extern void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode);
-extern void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode);
extern void imx_print_silicon_rev(const char *cpu, int srev);
void avic_handle_irq(struct pt_regs *);
@@ -146,8 +147,12 @@ extern void imx6q_clock_map_io(void);
#ifdef CONFIG_PM
extern void imx6q_pm_init(void);
+extern void imx51_pm_init(void);
+extern void imx53_pm_init(void);
#else
static inline void imx6q_pm_init(void) {}
+static inline void imx51_pm_init(void) {}
+static inline void imx53_pm_init(void) {}
#endif
#ifdef CONFIG_NEON
diff --git a/arch/arm/plat-mxc/include/mach/cpuidle.h b/arch/arm/plat-mxc/include/mach/cpuidle.h
new file mode 100644
index 000000000000..bc932d1af372
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/cpuidle.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+
+#ifdef CONFIG_CPU_IDLE
+extern int imx_cpuidle_init(struct cpuidle_driver *drv);
+#else
+static inline int imx_cpuidle_init(struct cpuidle_driver *drv)
+{
+ return -ENODEV;
+}
+#endif
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index 1b2258daa05b..a7f5bb1084d7 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -183,7 +183,6 @@ struct platform_device *__init imx_add_imx_udc(
const struct imx_imx_udc_data *data,
const struct imxusb_platform_data *pdata);
-#include <mach/ipu.h>
#include <mach/mx3fb.h>
#include <mach/mx3_camera.h>
struct imx_ipu_core_data {
@@ -192,8 +191,7 @@ struct imx_ipu_core_data {
resource_size_t errirq;
};
struct platform_device *__init imx_add_ipu_core(
- const struct imx_ipu_core_data *data,
- const struct ipu_platform_data *pdata);
+ const struct imx_ipu_core_data *data);
struct platform_device *__init imx_alloc_mx3_camera(
const struct imx_ipu_core_data *data,
const struct mx3_camera_pdata *pdata);
diff --git a/arch/arm/plat-mxc/include/mach/hardware.h b/arch/arm/plat-mxc/include/mach/hardware.h
index 0630513554de..ebf10654bb42 100644
--- a/arch/arm/plat-mxc/include/mach/hardware.h
+++ b/arch/arm/plat-mxc/include/mach/hardware.h
@@ -50,7 +50,7 @@
* IO 0x00200000+0x100000 -> 0xf4000000+0x100000
* mx21:
* AIPI 0x10000000+0x100000 -> 0xf4400000+0x100000
- * SAHB1 0x80000000+0x100000 -> 0xf4000000+0x100000
+ * SAHB1 0x80000000+0x100000 -> 0xf5000000+0x100000
* X_MEMC 0xdf000000+0x004000 -> 0xf5f00000+0x004000
* mx25:
* AIPS1 0x43f00000+0x100000 -> 0xf5300000+0x100000
@@ -58,47 +58,50 @@
* AVIC 0x68000000+0x100000 -> 0xf5800000+0x100000
* mx27:
* AIPI 0x10000000+0x100000 -> 0xf4400000+0x100000
- * SAHB1 0x80000000+0x100000 -> 0xf4000000+0x100000
+ * SAHB1 0x80000000+0x100000 -> 0xf5000000+0x100000
* X_MEMC 0xd8000000+0x100000 -> 0xf5c00000+0x100000
* mx31:
* AIPS1 0x43f00000+0x100000 -> 0xf5300000+0x100000
* AIPS2 0x53f00000+0x100000 -> 0xf5700000+0x100000
* AVIC 0x68000000+0x100000 -> 0xf5800000+0x100000
- * X_MEMC 0xb8000000+0x010000 -> 0xf4c00000+0x010000
+ * X_MEMC 0xb8000000+0x010000 -> 0xf5c00000+0x010000
* SPBA0 0x50000000+0x100000 -> 0xf5400000+0x100000
* mx35:
* AIPS1 0x43f00000+0x100000 -> 0xf5300000+0x100000
* AIPS2 0x53f00000+0x100000 -> 0xf5700000+0x100000
* AVIC 0x68000000+0x100000 -> 0xf5800000+0x100000
- * X_MEMC 0xb8000000+0x010000 -> 0xf4c00000+0x010000
+ * X_MEMC 0xb8000000+0x010000 -> 0xf5c00000+0x010000
* SPBA0 0x50000000+0x100000 -> 0xf5400000+0x100000
* mx50:
* TZIC 0x0fffc000+0x004000 -> 0xf4bfc000+0x004000
- * SPBA0 0x50000000+0x100000 -> 0xf5400000+0x100000
* AIPS1 0x53f00000+0x100000 -> 0xf5700000+0x100000
+ * SPBA0 0x50000000+0x100000 -> 0xf5400000+0x100000
* AIPS2 0x63f00000+0x100000 -> 0xf5300000+0x100000
* mx51:
- * TZIC 0xe0000000+0x004000 -> 0xf5000000+0x004000
+ * TZIC 0x0fffc000+0x004000 -> 0xf4bfc000+0x004000
* IRAM 0x1ffe0000+0x020000 -> 0xf4fe0000+0x020000
+ * DEBUG 0x60000000+0x100000 -> 0xf5000000+0x100000
* SPBA0 0x70000000+0x100000 -> 0xf5400000+0x100000
* AIPS1 0x73f00000+0x100000 -> 0xf5700000+0x100000
- * AIPS2 0x83f00000+0x100000 -> 0xf4300000+0x100000
+ * AIPS2 0x83f00000+0x100000 -> 0xf5300000+0x100000
* mx53:
* TZIC 0x0fffc000+0x004000 -> 0xf4bfc000+0x004000
+ * DEBUG 0x40000000+0x100000 -> 0xf5000000+0x100000
* SPBA0 0x50000000+0x100000 -> 0xf5400000+0x100000
* AIPS1 0x53f00000+0x100000 -> 0xf5700000+0x100000
* AIPS2 0x63f00000+0x100000 -> 0xf5300000+0x100000
* mx6q:
- * SCU 0x00a00000+0x001000 -> 0xf4000000+0x001000
+ * SCU 0x00a00000+0x004000 -> 0xf4000000+0x004000
* CCM 0x020c4000+0x004000 -> 0xf42c4000+0x004000
- * ANATOP 0x020c8000+0x001000 -> 0xf42c8000+0x001000
+ * ANATOP 0x020c8000+0x004000 -> 0xf42c8000+0x004000
* UART4 0x021f0000+0x004000 -> 0xf42f0000+0x004000
*/
#define IMX_IO_P2V(x) ( \
- 0xf4000000 + \
+ (((x) & 0x80000000) >> 7) | \
+ (0xf4000000 + \
(((x) & 0x50000000) >> 6) + \
(((x) & 0x0b000000) >> 4) + \
- (((x) & 0x000fffff)))
+ (((x) & 0x000fffff))))
#define IMX_IO_ADDRESS(x) IOMEM(IMX_IO_P2V(x))
@@ -128,6 +131,4 @@
/* range e.g. GPIO_1_5 is gpio 5 under linux */
#define IMX_GPIO_NR(bank, nr) (((bank) - 1) * 32 + (nr))
-#define IMX_GPIO_TO_IRQ(gpio) (MXC_GPIO_IRQ_START + (gpio))
-
#endif /* __ASM_ARCH_MXC_HARDWARE_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx3.h b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
index 63f22a009a65..d8b65b51f2a9 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx3.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx3.h
@@ -160,9 +160,6 @@ int mxc_iomux_mode(unsigned int pin_mode);
#define IOMUX_TO_GPIO(iomux_pin) \
((iomux_pin & IOMUX_GPIONUM_MASK) >> IOMUX_GPIONUM_SHIFT)
-#define IOMUX_TO_IRQ(iomux_pin) \
- (((iomux_pin & IOMUX_GPIONUM_MASK) >> IOMUX_GPIONUM_SHIFT) + \
- MXC_GPIO_IRQ_START)
/*
* This enumeration is constructed based on the Section
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx51.h b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
index 36c8989d9de6..2623e7a2e190 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx51.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx51.h
@@ -107,11 +107,13 @@
#define MX51_PAD_EIM_D25__UART2_CTS IOMUX_PAD(0x414, 0x080, 4, __NA_, 0, MX51_UART_PAD_CTRL)
#define MX51_PAD_EIM_D25__UART3_RXD IOMUX_PAD(0x414, 0x080, 3, 0x9f4, 0, MX51_UART_PAD_CTRL)
#define MX51_PAD_EIM_D25__USBOTG_DATA1 IOMUX_PAD(0x414, 0x080, 2, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_EIM_D25__GPT_CMPOUT1 IOMUX_PAD(0x414, 0x080, 5, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D26__EIM_D26 IOMUX_PAD(0x418, 0x084, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D26__KEY_COL7 IOMUX_PAD(0x418, 0x084, 1, 0x9cc, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D26__UART2_RTS IOMUX_PAD(0x418, 0x084, 4, 0x9e8, 3, MX51_UART_PAD_CTRL)
#define MX51_PAD_EIM_D26__UART3_TXD IOMUX_PAD(0x418, 0x084, 3, __NA_, 0, MX51_UART_PAD_CTRL)
#define MX51_PAD_EIM_D26__USBOTG_DATA2 IOMUX_PAD(0x418, 0x084, 2, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_EIM_D26__GPT_CMPOUT2 IOMUX_PAD(0x418, 0x084, 5, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D27__AUD6_RXC IOMUX_PAD(0x41c, 0x088, 5, 0x8f4, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D27__EIM_D27 IOMUX_PAD(0x41c, 0x088, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_D27__GPIO2_9 IOMUX_PAD(0x41c, 0x088, 1, __NA_, 0, MX51_GPIO_PAD_CTRL)
@@ -228,6 +230,7 @@
#define MX51_PAD_EIM_CRE__EIM_CRE IOMUX_PAD(0x4a0, 0x100, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_EIM_CRE__GPIO3_2 IOMUX_PAD(0x4a0, 0x100, 1, 0x97c, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_DRAM_CS1__DRAM_CS1 IOMUX_PAD(0x4d0, 0x104, 0, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_DRAM_CS1__CCM_CLKO IOMUX_PAD(0x4d0, 0x104, 1, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_WE_B__GPIO3_3 IOMUX_PAD(0x4e4, 0x108, 3, 0x980, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_NANDF_WE_B__NANDF_WE_B IOMUX_PAD(0x4e4, 0x108, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_WE_B__PATA_DIOW IOMUX_PAD(0x4e4, 0x108, 1, __NA_, 0, NO_PAD_CTRL)
@@ -256,12 +259,14 @@
#define MX51_PAD_NANDF_RB1__GPIO3_9 IOMUX_PAD(0x4fc, 0x120, 3, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_NANDF_RB1__NANDF_RB1 IOMUX_PAD(0x4fc, 0x120, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB1__PATA_IORDY IOMUX_PAD(0x4fc, 0x120, 1, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_NANDF_RB1__GPT_CMPOUT2 IOMUX_PAD(0x4fc, 0x120, 4, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB1__SD4_CMD IOMUX_PAD(0x4fc, 0x120, 0x15, __NA_, 0, MX51_SDHCI_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__DISP2_WAIT IOMUX_PAD(0x500, 0x124, 5, 0x9a8, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__ECSPI2_SCLK IOMUX_PAD(0x500, 0x124, 2, __NA_, 0, MX51_ECSPI_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__FEC_COL IOMUX_PAD(0x500, 0x124, 1, 0x94c, 0, MX51_PAD_CTRL_2)
#define MX51_PAD_NANDF_RB2__GPIO3_10 IOMUX_PAD(0x500, 0x124, 3, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__NANDF_RB2 IOMUX_PAD(0x500, 0x124, 0, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_NANDF_RB2__GPT_CMPOUT3 IOMUX_PAD(0x500, 0x124, 4, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__USBH3_H3_DP IOMUX_PAD(0x500, 0x124, 0x17, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB2__USBH3_NXT IOMUX_PAD(0x500, 0x124, 6, 0xa20, 0, NO_PAD_CTRL)
#define MX51_PAD_NANDF_RB3__DISP1_WAIT IOMUX_PAD(0x504, 0x128, 5, __NA_, 0, NO_PAD_CTRL)
@@ -637,7 +642,9 @@
#define MX51_PAD_DISP1_DAT23__DISP2_DAT17 IOMUX_PAD(0x728, 0x328, 5, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_DISP1_DAT23__DISP2_SER_CS IOMUX_PAD(0x728, 0x328, 4, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_DI1_PIN3__DI1_PIN3 IOMUX_PAD(0x72c, 0x32c, 0, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_DI1_DISP_CLK__DI1_DISP_CLK IOMUX_PAD(0x730, __NA_, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_DI1_PIN2__DI1_PIN2 IOMUX_PAD(0x734, 0x330, 0, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_DI1_PIN15__DI1_PIN15 IOMUX_PAD(0x738, __NA_, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_DI_GP2__DISP1_SER_CLK IOMUX_PAD(0x740, 0x338, 0, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_DI_GP2__DISP2_WAIT IOMUX_PAD(0x740, 0x338, 2, 0x9a8, 1, NO_PAD_CTRL)
#define MX51_PAD_DI_GP3__CSI1_DATA_EN IOMUX_PAD(0x744, 0x33c, 3, 0x9a0, 1, NO_PAD_CTRL)
@@ -780,6 +787,8 @@
#define MX51_PAD_GPIO1_2__PWM1_PWMO IOMUX_PAD(0x7d4, 0x3cc, 1, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_3__GPIO1_3 IOMUX_PAD(0x7d8, 0x3d0, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_3__I2C2_SDA IOMUX_PAD(0x7d8, 0x3d0, 0x12, 0x9bc, 3, MX51_I2C_PAD_CTRL)
+#define MX51_PAD_GPIO1_3__CCM_CLKO2 IOMUX_PAD(0x7d8, 0x3d0, 5, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_3__GPT_CLKIN IOMUX_PAD(0x7d8, 0x3d0, 6, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_3__PLL2_BYP IOMUX_PAD(0x7d8, 0x3d0, 7, 0x910, 1, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_3__PWM2_PWMO IOMUX_PAD(0x7d8, 0x3d0, 1, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_PMIC_INT_REQ__PMIC_INT_REQ IOMUX_PAD(0x7fc, 0x3d4, 0, __NA_, 0, NO_PAD_CTRL)
@@ -788,13 +797,16 @@
#define MX51_PAD_GPIO1_4__EIM_RDY IOMUX_PAD(0x804, 0x3d8, 3, 0x938, 1, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_4__GPIO1_4 IOMUX_PAD(0x804, 0x3d8, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_4__WDOG1_WDOG_B IOMUX_PAD(0x804, 0x3d8, 2, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_4__GPT_CAPIN1 IOMUX_PAD(0x804, 0x3d8, 6, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_5__CSI2_MCLK IOMUX_PAD(0x808, 0x3dc, 6, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_5__DISP2_PIN16 IOMUX_PAD(0x808, 0x3dc, 3, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_5__GPIO1_5 IOMUX_PAD(0x808, 0x3dc, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_5__WDOG2_WDOG_B IOMUX_PAD(0x808, 0x3dc, 2, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_5__CCM_CLKO IOMUX_PAD(0x808, 0x3dc, 5, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_6__DISP2_PIN17 IOMUX_PAD(0x80c, 0x3e0, 4, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_6__GPIO1_6 IOMUX_PAD(0x80c, 0x3e0, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_6__REF_EN_B IOMUX_PAD(0x80c, 0x3e0, 3, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_6__GPT_CAPIN2 IOMUX_PAD(0x80c, 0x3e0, 6, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_7__CCM_OUT_0 IOMUX_PAD(0x810, 0x3e4, 3, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_7__GPIO1_7 IOMUX_PAD(0x810, 0x3e4, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_7__SD2_WP IOMUX_PAD(0x810, 0x3e4, 6, __NA_, 0, MX51_ESDHC_PAD_CTRL)
@@ -803,11 +815,13 @@
#define MX51_PAD_GPIO1_8__GPIO1_8 IOMUX_PAD(0x814, 0x3e8, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_8__SD2_CD IOMUX_PAD(0x814, 0x3e8, 6, __NA_, 0, MX51_ESDHC_PAD_CTRL)
#define MX51_PAD_GPIO1_8__USBH3_PWR IOMUX_PAD(0x814, 0x3e8, 1, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_8__CCM_CLKO2 IOMUX_PAD(0x814, 0x3e8, 4, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__CCM_OUT_1 IOMUX_PAD(0x818, 0x3ec, 3, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__DISP2_D1_CS IOMUX_PAD(0x818, 0x3ec, 2, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__DISP2_SER_CS IOMUX_PAD(0x818, 0x3ec, 7, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__GPIO1_9 IOMUX_PAD(0x818, 0x3ec, 0, __NA_, 0, MX51_GPIO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__SD2_LCTL IOMUX_PAD(0x818, 0x3ec, 6, __NA_, 0, NO_PAD_CTRL)
#define MX51_PAD_GPIO1_9__USBH3_OC IOMUX_PAD(0x818, 0x3ec, 1, __NA_, 0, NO_PAD_CTRL)
+#define MX51_PAD_GPIO1_9__CCM_CLKO IOMUX_PAD(0x818, 0x3ec, 4, __NA_, 0, NO_PAD_CTRL)
#endif /* __MACH_IOMUX_MX51_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/iomux-v1.h b/arch/arm/plat-mxc/include/mach/iomux-v1.h
index f7d18046c04f..02651a40fe23 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-v1.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-v1.h
@@ -85,13 +85,6 @@
#define GPIO_BOUT_0 (2 << GPIO_BOUT_SHIFT)
#define GPIO_BOUT_1 (3 << GPIO_BOUT_SHIFT)
-#define IRQ_GPIOA(x) (MXC_GPIO_IRQ_START + x)
-#define IRQ_GPIOB(x) (IRQ_GPIOA(32) + x)
-#define IRQ_GPIOC(x) (IRQ_GPIOB(32) + x)
-#define IRQ_GPIOD(x) (IRQ_GPIOC(32) + x)
-#define IRQ_GPIOE(x) (IRQ_GPIOD(32) + x)
-#define IRQ_GPIOF(x) (IRQ_GPIOE(32) + x)
-
extern int mxc_gpio_mode(int gpio_mode);
extern int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count,
const char *label);
diff --git a/arch/arm/plat-mxc/include/mach/ipu.h b/arch/arm/plat-mxc/include/mach/ipu.h
index a9221f1cc1a0..539e559d18b2 100644
--- a/arch/arm/plat-mxc/include/mach/ipu.h
+++ b/arch/arm/plat-mxc/include/mach/ipu.h
@@ -110,10 +110,6 @@ enum ipu_rotate_mode {
IPU_ROTATE_90_LEFT = 7,
};
-struct ipu_platform_data {
- unsigned int irq_base;
-};
-
/*
* Enumeration of DI ports for ADC.
*/
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index fd9efb044656..d73f5e8ea9cb 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -11,50 +11,6 @@
#ifndef __ASM_ARCH_MXC_IRQS_H__
#define __ASM_ARCH_MXC_IRQS_H__
-#include <asm-generic/gpio.h>
-
-/*
- * SoCs with GIC interrupt controller have 160 IRQs, those with TZIC
- * have 128 IRQs, and those with AVIC have 64.
- *
- * To support single image, the biggest number should be defined on
- * top of the list.
- */
-#if defined CONFIG_ARM_GIC
-#define MXC_INTERNAL_IRQS 160
-#elif defined CONFIG_MXC_TZIC
-#define MXC_INTERNAL_IRQS 128
-#else
-#define MXC_INTERNAL_IRQS 64
-#endif
-
-#define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS
-
-/*
- * The next 16 interrupts are for board specific purposes. Since
- * the kernel can only run on one machine at a time, we can re-use
- * these. If you need more, increase MXC_BOARD_IRQS, but keep it
- * within sensible limits.
- */
-#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS)
-
-#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
-#define MXC_BOARD_IRQS 80
-#else
-#define MXC_BOARD_IRQS 16
-#endif
-
-#define MXC_IPU_IRQ_START (MXC_BOARD_IRQ_START + MXC_BOARD_IRQS)
-
-#ifdef CONFIG_MX3_IPU_IRQS
-#define MX3_IPU_IRQS CONFIG_MX3_IPU_IRQS
-#else
-#define MX3_IPU_IRQS 0
-#endif
-/* REVISIT: Add IPU irqs on IMX51 */
-
-#define NR_IRQS (MXC_IPU_IRQ_START + MX3_IPU_IRQS)
-
extern int imx_irq_set_priority(unsigned char irq, unsigned char prio);
/* all normal IRQs can be FIQs */
diff --git a/arch/arm/plat-mxc/include/mach/mx1.h b/arch/arm/plat-mxc/include/mach/mx1.h
index 2b7c08d13e89..45bd31cc34d6 100644
--- a/arch/arm/plat-mxc/include/mach/mx1.h
+++ b/arch/arm/plat-mxc/include/mach/mx1.h
@@ -78,61 +78,62 @@
#define MX1_IO_ADDRESS(x) IOMEM(MX1_IO_P2V(x))
/* fixed interrput numbers */
-#define MX1_INT_SOFTINT 0
-#define MX1_INT_CSI 6
-#define MX1_DSPA_MAC_INT 7
-#define MX1_DSPA_INT 8
-#define MX1_COMP_INT 9
-#define MX1_MSHC_XINT 10
-#define MX1_GPIO_INT_PORTA 11
-#define MX1_GPIO_INT_PORTB 12
-#define MX1_GPIO_INT_PORTC 13
-#define MX1_INT_LCDC 14
-#define MX1_SIM_INT 15
-#define MX1_SIM_DATA_INT 16
-#define MX1_RTC_INT 17
-#define MX1_RTC_SAMINT 18
-#define MX1_INT_UART2PFERR 19
-#define MX1_INT_UART2RTS 20
-#define MX1_INT_UART2DTR 21
-#define MX1_INT_UART2UARTC 22
-#define MX1_INT_UART2TX 23
-#define MX1_INT_UART2RX 24
-#define MX1_INT_UART1PFERR 25
-#define MX1_INT_UART1RTS 26
-#define MX1_INT_UART1DTR 27
-#define MX1_INT_UART1UARTC 28
-#define MX1_INT_UART1TX 29
-#define MX1_INT_UART1RX 30
-#define MX1_VOICE_DAC_INT 31
-#define MX1_VOICE_ADC_INT 32
-#define MX1_PEN_DATA_INT 33
-#define MX1_PWM_INT 34
-#define MX1_SDHC_INT 35
-#define MX1_INT_I2C 39
-#define MX1_INT_CSPI2 40
-#define MX1_INT_CSPI1 41
-#define MX1_SSI_TX_INT 42
-#define MX1_SSI_TX_ERR_INT 43
-#define MX1_SSI_RX_INT 44
-#define MX1_SSI_RX_ERR_INT 45
-#define MX1_TOUCH_INT 46
-#define MX1_INT_USBD0 47
-#define MX1_INT_USBD1 48
-#define MX1_INT_USBD2 49
-#define MX1_INT_USBD3 50
-#define MX1_INT_USBD4 51
-#define MX1_INT_USBD5 52
-#define MX1_INT_USBD6 53
-#define MX1_BTSYS_INT 55
-#define MX1_BTTIM_INT 56
-#define MX1_BTWUI_INT 57
-#define MX1_TIM2_INT 58
-#define MX1_TIM1_INT 59
-#define MX1_DMA_ERR 60
-#define MX1_DMA_INT 61
-#define MX1_GPIO_INT_PORTD 62
-#define MX1_WDT_INT 63
+#include <asm/irq.h>
+#define MX1_INT_SOFTINT (NR_IRQS_LEGACY + 0)
+#define MX1_INT_CSI (NR_IRQS_LEGACY + 6)
+#define MX1_DSPA_MAC_INT (NR_IRQS_LEGACY + 7)
+#define MX1_DSPA_INT (NR_IRQS_LEGACY + 8)
+#define MX1_COMP_INT (NR_IRQS_LEGACY + 9)
+#define MX1_MSHC_XINT (NR_IRQS_LEGACY + 10)
+#define MX1_GPIO_INT_PORTA (NR_IRQS_LEGACY + 11)
+#define MX1_GPIO_INT_PORTB (NR_IRQS_LEGACY + 12)
+#define MX1_GPIO_INT_PORTC (NR_IRQS_LEGACY + 13)
+#define MX1_INT_LCDC (NR_IRQS_LEGACY + 14)
+#define MX1_SIM_INT (NR_IRQS_LEGACY + 15)
+#define MX1_SIM_DATA_INT (NR_IRQS_LEGACY + 16)
+#define MX1_RTC_INT (NR_IRQS_LEGACY + 17)
+#define MX1_RTC_SAMINT (NR_IRQS_LEGACY + 18)
+#define MX1_INT_UART2PFERR (NR_IRQS_LEGACY + 19)
+#define MX1_INT_UART2RTS (NR_IRQS_LEGACY + 20)
+#define MX1_INT_UART2DTR (NR_IRQS_LEGACY + 21)
+#define MX1_INT_UART2UARTC (NR_IRQS_LEGACY + 22)
+#define MX1_INT_UART2TX (NR_IRQS_LEGACY + 23)
+#define MX1_INT_UART2RX (NR_IRQS_LEGACY + 24)
+#define MX1_INT_UART1PFERR (NR_IRQS_LEGACY + 25)
+#define MX1_INT_UART1RTS (NR_IRQS_LEGACY + 26)
+#define MX1_INT_UART1DTR (NR_IRQS_LEGACY + 27)
+#define MX1_INT_UART1UARTC (NR_IRQS_LEGACY + 28)
+#define MX1_INT_UART1TX (NR_IRQS_LEGACY + 29)
+#define MX1_INT_UART1RX (NR_IRQS_LEGACY + 30)
+#define MX1_VOICE_DAC_INT (NR_IRQS_LEGACY + 31)
+#define MX1_VOICE_ADC_INT (NR_IRQS_LEGACY + 32)
+#define MX1_PEN_DATA_INT (NR_IRQS_LEGACY + 33)
+#define MX1_PWM_INT (NR_IRQS_LEGACY + 34)
+#define MX1_SDHC_INT (NR_IRQS_LEGACY + 35)
+#define MX1_INT_I2C (NR_IRQS_LEGACY + 39)
+#define MX1_INT_CSPI2 (NR_IRQS_LEGACY + 40)
+#define MX1_INT_CSPI1 (NR_IRQS_LEGACY + 41)
+#define MX1_SSI_TX_INT (NR_IRQS_LEGACY + 42)
+#define MX1_SSI_TX_ERR_INT (NR_IRQS_LEGACY + 43)
+#define MX1_SSI_RX_INT (NR_IRQS_LEGACY + 44)
+#define MX1_SSI_RX_ERR_INT (NR_IRQS_LEGACY + 45)
+#define MX1_TOUCH_INT (NR_IRQS_LEGACY + 46)
+#define MX1_INT_USBD0 (NR_IRQS_LEGACY + 47)
+#define MX1_INT_USBD1 (NR_IRQS_LEGACY + 48)
+#define MX1_INT_USBD2 (NR_IRQS_LEGACY + 49)
+#define MX1_INT_USBD3 (NR_IRQS_LEGACY + 50)
+#define MX1_INT_USBD4 (NR_IRQS_LEGACY + 51)
+#define MX1_INT_USBD5 (NR_IRQS_LEGACY + 52)
+#define MX1_INT_USBD6 (NR_IRQS_LEGACY + 53)
+#define MX1_BTSYS_INT (NR_IRQS_LEGACY + 55)
+#define MX1_BTTIM_INT (NR_IRQS_LEGACY + 56)
+#define MX1_BTWUI_INT (NR_IRQS_LEGACY + 57)
+#define MX1_TIM2_INT (NR_IRQS_LEGACY + 58)
+#define MX1_TIM1_INT (NR_IRQS_LEGACY + 59)
+#define MX1_DMA_ERR (NR_IRQS_LEGACY + 60)
+#define MX1_DMA_INT (NR_IRQS_LEGACY + 61)
+#define MX1_GPIO_INT_PORTD (NR_IRQS_LEGACY + 62)
+#define MX1_WDT_INT (NR_IRQS_LEGACY + 63)
/* DMA */
#define MX1_DMA_REQ_UART3_T 2
diff --git a/arch/arm/plat-mxc/include/mach/mx21.h b/arch/arm/plat-mxc/include/mach/mx21.h
index 6cd049ebbd8d..468738aa997f 100644
--- a/arch/arm/plat-mxc/include/mach/mx21.h
+++ b/arch/arm/plat-mxc/include/mach/mx21.h
@@ -99,59 +99,60 @@
#define MX21_IO_ADDRESS(x) IOMEM(MX21_IO_P2V(x))
/* fixed interrupt numbers */
-#define MX21_INT_CSPI3 6
-#define MX21_INT_GPIO 8
-#define MX21_INT_FIRI 9
-#define MX21_INT_SDHC2 10
-#define MX21_INT_SDHC1 11
-#define MX21_INT_I2C 12
-#define MX21_INT_SSI2 13
-#define MX21_INT_SSI1 14
-#define MX21_INT_CSPI2 15
-#define MX21_INT_CSPI1 16
-#define MX21_INT_UART4 17
-#define MX21_INT_UART3 18
-#define MX21_INT_UART2 19
-#define MX21_INT_UART1 20
-#define MX21_INT_KPP 21
-#define MX21_INT_RTC 22
-#define MX21_INT_PWM 23
-#define MX21_INT_GPT3 24
-#define MX21_INT_GPT2 25
-#define MX21_INT_GPT1 26
-#define MX21_INT_WDOG 27
-#define MX21_INT_PCMCIA 28
-#define MX21_INT_NFC 29
-#define MX21_INT_BMI 30
-#define MX21_INT_CSI 31
-#define MX21_INT_DMACH0 32
-#define MX21_INT_DMACH1 33
-#define MX21_INT_DMACH2 34
-#define MX21_INT_DMACH3 35
-#define MX21_INT_DMACH4 36
-#define MX21_INT_DMACH5 37
-#define MX21_INT_DMACH6 38
-#define MX21_INT_DMACH7 39
-#define MX21_INT_DMACH8 40
-#define MX21_INT_DMACH9 41
-#define MX21_INT_DMACH10 42
-#define MX21_INT_DMACH11 43
-#define MX21_INT_DMACH12 44
-#define MX21_INT_DMACH13 45
-#define MX21_INT_DMACH14 46
-#define MX21_INT_DMACH15 47
-#define MX21_INT_EMMAENC 49
-#define MX21_INT_EMMADEC 50
-#define MX21_INT_EMMAPRP 51
-#define MX21_INT_EMMAPP 52
-#define MX21_INT_USBWKUP 53
-#define MX21_INT_USBDMA 54
-#define MX21_INT_USBHOST 55
-#define MX21_INT_USBFUNC 56
-#define MX21_INT_USBMNP 57
-#define MX21_INT_USBCTRL 58
-#define MX21_INT_SLCDC 60
-#define MX21_INT_LCDC 61
+#include <asm/irq.h>
+#define MX21_INT_CSPI3 (NR_IRQS_LEGACY + 6)
+#define MX21_INT_GPIO (NR_IRQS_LEGACY + 8)
+#define MX21_INT_FIRI (NR_IRQS_LEGACY + 9)
+#define MX21_INT_SDHC2 (NR_IRQS_LEGACY + 10)
+#define MX21_INT_SDHC1 (NR_IRQS_LEGACY + 11)
+#define MX21_INT_I2C (NR_IRQS_LEGACY + 12)
+#define MX21_INT_SSI2 (NR_IRQS_LEGACY + 13)
+#define MX21_INT_SSI1 (NR_IRQS_LEGACY + 14)
+#define MX21_INT_CSPI2 (NR_IRQS_LEGACY + 15)
+#define MX21_INT_CSPI1 (NR_IRQS_LEGACY + 16)
+#define MX21_INT_UART4 (NR_IRQS_LEGACY + 17)
+#define MX21_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX21_INT_UART2 (NR_IRQS_LEGACY + 19)
+#define MX21_INT_UART1 (NR_IRQS_LEGACY + 20)
+#define MX21_INT_KPP (NR_IRQS_LEGACY + 21)
+#define MX21_INT_RTC (NR_IRQS_LEGACY + 22)
+#define MX21_INT_PWM (NR_IRQS_LEGACY + 23)
+#define MX21_INT_GPT3 (NR_IRQS_LEGACY + 24)
+#define MX21_INT_GPT2 (NR_IRQS_LEGACY + 25)
+#define MX21_INT_GPT1 (NR_IRQS_LEGACY + 26)
+#define MX21_INT_WDOG (NR_IRQS_LEGACY + 27)
+#define MX21_INT_PCMCIA (NR_IRQS_LEGACY + 28)
+#define MX21_INT_NFC (NR_IRQS_LEGACY + 29)
+#define MX21_INT_BMI (NR_IRQS_LEGACY + 30)
+#define MX21_INT_CSI (NR_IRQS_LEGACY + 31)
+#define MX21_INT_DMACH0 (NR_IRQS_LEGACY + 32)
+#define MX21_INT_DMACH1 (NR_IRQS_LEGACY + 33)
+#define MX21_INT_DMACH2 (NR_IRQS_LEGACY + 34)
+#define MX21_INT_DMACH3 (NR_IRQS_LEGACY + 35)
+#define MX21_INT_DMACH4 (NR_IRQS_LEGACY + 36)
+#define MX21_INT_DMACH5 (NR_IRQS_LEGACY + 37)
+#define MX21_INT_DMACH6 (NR_IRQS_LEGACY + 38)
+#define MX21_INT_DMACH7 (NR_IRQS_LEGACY + 39)
+#define MX21_INT_DMACH8 (NR_IRQS_LEGACY + 40)
+#define MX21_INT_DMACH9 (NR_IRQS_LEGACY + 41)
+#define MX21_INT_DMACH10 (NR_IRQS_LEGACY + 42)
+#define MX21_INT_DMACH11 (NR_IRQS_LEGACY + 43)
+#define MX21_INT_DMACH12 (NR_IRQS_LEGACY + 44)
+#define MX21_INT_DMACH13 (NR_IRQS_LEGACY + 45)
+#define MX21_INT_DMACH14 (NR_IRQS_LEGACY + 46)
+#define MX21_INT_DMACH15 (NR_IRQS_LEGACY + 47)
+#define MX21_INT_EMMAENC (NR_IRQS_LEGACY + 49)
+#define MX21_INT_EMMADEC (NR_IRQS_LEGACY + 50)
+#define MX21_INT_EMMAPRP (NR_IRQS_LEGACY + 51)
+#define MX21_INT_EMMAPP (NR_IRQS_LEGACY + 52)
+#define MX21_INT_USBWKUP (NR_IRQS_LEGACY + 53)
+#define MX21_INT_USBDMA (NR_IRQS_LEGACY + 54)
+#define MX21_INT_USBHOST (NR_IRQS_LEGACY + 55)
+#define MX21_INT_USBFUNC (NR_IRQS_LEGACY + 56)
+#define MX21_INT_USBMNP (NR_IRQS_LEGACY + 57)
+#define MX21_INT_USBCTRL (NR_IRQS_LEGACY + 58)
+#define MX21_INT_SLCDC (NR_IRQS_LEGACY + 60)
+#define MX21_INT_LCDC (NR_IRQS_LEGACY + 61)
/* fixed DMA request numbers */
#define MX21_DMA_REQ_CSPI3_RX 1
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index ccebf5ba12f0..627d94f1b010 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -61,40 +61,44 @@
#define MX25_IO_P2V(x) IMX_IO_P2V(x)
#define MX25_IO_ADDRESS(x) IOMEM(MX25_IO_P2V(x))
-#define MX25_INT_CSPI3 0
-#define MX25_INT_I2C1 3
-#define MX25_INT_I2C2 4
-#define MX25_INT_UART4 5
-#define MX25_INT_ESDHC2 8
-#define MX25_INT_ESDHC1 9
-#define MX25_INT_I2C3 10
-#define MX25_INT_SSI2 11
-#define MX25_INT_SSI1 12
-#define MX25_INT_CSPI2 13
-#define MX25_INT_CSPI1 14
-#define MX25_INT_GPIO3 16
-#define MX25_INT_CSI 17
-#define MX25_INT_UART3 18
-#define MX25_INT_GPIO4 23
-#define MX25_INT_KPP 24
-#define MX25_INT_DRYICE 25
-#define MX25_INT_PWM1 26
-#define MX25_INT_UART2 32
-#define MX25_INT_NFC 33
-#define MX25_INT_SDMA 34
-#define MX25_INT_USB_HS 35
-#define MX25_INT_PWM2 36
-#define MX25_INT_USB_OTG 37
-#define MX25_INT_LCDC 39
-#define MX25_INT_UART5 40
-#define MX25_INT_PWM3 41
-#define MX25_INT_PWM4 42
-#define MX25_INT_CAN1 43
-#define MX25_INT_CAN2 44
-#define MX25_INT_UART1 45
-#define MX25_INT_GPIO2 51
-#define MX25_INT_GPIO1 52
-#define MX25_INT_FEC 57
+/*
+ * Interrupt numbers
+ */
+#include <asm/irq.h>
+#define MX25_INT_CSPI3 (NR_IRQS_LEGACY + 0)
+#define MX25_INT_I2C1 (NR_IRQS_LEGACY + 3)
+#define MX25_INT_I2C2 (NR_IRQS_LEGACY + 4)
+#define MX25_INT_UART4 (NR_IRQS_LEGACY + 5)
+#define MX25_INT_ESDHC2 (NR_IRQS_LEGACY + 8)
+#define MX25_INT_ESDHC1 (NR_IRQS_LEGACY + 9)
+#define MX25_INT_I2C3 (NR_IRQS_LEGACY + 10)
+#define MX25_INT_SSI2 (NR_IRQS_LEGACY + 11)
+#define MX25_INT_SSI1 (NR_IRQS_LEGACY + 12)
+#define MX25_INT_CSPI2 (NR_IRQS_LEGACY + 13)
+#define MX25_INT_CSPI1 (NR_IRQS_LEGACY + 14)
+#define MX25_INT_GPIO3 (NR_IRQS_LEGACY + 16)
+#define MX25_INT_CSI (NR_IRQS_LEGACY + 17)
+#define MX25_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX25_INT_GPIO4 (NR_IRQS_LEGACY + 23)
+#define MX25_INT_KPP (NR_IRQS_LEGACY + 24)
+#define MX25_INT_DRYICE (NR_IRQS_LEGACY + 25)
+#define MX25_INT_PWM1 (NR_IRQS_LEGACY + 26)
+#define MX25_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX25_INT_NFC (NR_IRQS_LEGACY + 33)
+#define MX25_INT_SDMA (NR_IRQS_LEGACY + 34)
+#define MX25_INT_USB_HS (NR_IRQS_LEGACY + 35)
+#define MX25_INT_PWM2 (NR_IRQS_LEGACY + 36)
+#define MX25_INT_USB_OTG (NR_IRQS_LEGACY + 37)
+#define MX25_INT_LCDC (NR_IRQS_LEGACY + 39)
+#define MX25_INT_UART5 (NR_IRQS_LEGACY + 40)
+#define MX25_INT_PWM3 (NR_IRQS_LEGACY + 41)
+#define MX25_INT_PWM4 (NR_IRQS_LEGACY + 42)
+#define MX25_INT_CAN1 (NR_IRQS_LEGACY + 43)
+#define MX25_INT_CAN2 (NR_IRQS_LEGACY + 44)
+#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
+#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
+#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
#define MX25_DMA_REQ_SSI2_RX1 22
#define MX25_DMA_REQ_SSI2_TX1 23
diff --git a/arch/arm/plat-mxc/include/mach/mx27.h b/arch/arm/plat-mxc/include/mach/mx27.h
index 6265357284d7..e074616d54ca 100644
--- a/arch/arm/plat-mxc/include/mach/mx27.h
+++ b/arch/arm/plat-mxc/include/mach/mx27.h
@@ -128,69 +128,70 @@
#define MX27_IO_ADDRESS(x) IOMEM(MX27_IO_P2V(x))
/* fixed interrupt numbers */
-#define MX27_INT_I2C2 1
-#define MX27_INT_GPT6 2
-#define MX27_INT_GPT5 3
-#define MX27_INT_GPT4 4
-#define MX27_INT_RTIC 5
-#define MX27_INT_CSPI3 6
-#define MX27_INT_SDHC 7
-#define MX27_INT_GPIO 8
-#define MX27_INT_SDHC3 9
-#define MX27_INT_SDHC2 10
-#define MX27_INT_SDHC1 11
-#define MX27_INT_I2C1 12
-#define MX27_INT_SSI2 13
-#define MX27_INT_SSI1 14
-#define MX27_INT_CSPI2 15
-#define MX27_INT_CSPI1 16
-#define MX27_INT_UART4 17
-#define MX27_INT_UART3 18
-#define MX27_INT_UART2 19
-#define MX27_INT_UART1 20
-#define MX27_INT_KPP 21
-#define MX27_INT_RTC 22
-#define MX27_INT_PWM 23
-#define MX27_INT_GPT3 24
-#define MX27_INT_GPT2 25
-#define MX27_INT_GPT1 26
-#define MX27_INT_WDOG 27
-#define MX27_INT_PCMCIA 28
-#define MX27_INT_NFC 29
-#define MX27_INT_ATA 30
-#define MX27_INT_CSI 31
-#define MX27_INT_DMACH0 32
-#define MX27_INT_DMACH1 33
-#define MX27_INT_DMACH2 34
-#define MX27_INT_DMACH3 35
-#define MX27_INT_DMACH4 36
-#define MX27_INT_DMACH5 37
-#define MX27_INT_DMACH6 38
-#define MX27_INT_DMACH7 39
-#define MX27_INT_DMACH8 40
-#define MX27_INT_DMACH9 41
-#define MX27_INT_DMACH10 42
-#define MX27_INT_DMACH11 43
-#define MX27_INT_DMACH12 44
-#define MX27_INT_DMACH13 45
-#define MX27_INT_DMACH14 46
-#define MX27_INT_DMACH15 47
-#define MX27_INT_UART6 48
-#define MX27_INT_UART5 49
-#define MX27_INT_FEC 50
-#define MX27_INT_EMMAPRP 51
-#define MX27_INT_EMMAPP 52
-#define MX27_INT_VPU 53
-#define MX27_INT_USB_HS1 54
-#define MX27_INT_USB_HS2 55
-#define MX27_INT_USB_OTG 56
-#define MX27_INT_SCC_SMN 57
-#define MX27_INT_SCC_SCM 58
-#define MX27_INT_SAHARA 59
-#define MX27_INT_SLCDC 60
-#define MX27_INT_LCDC 61
-#define MX27_INT_IIM 62
-#define MX27_INT_CCM 63
+#include <asm/irq.h>
+#define MX27_INT_I2C2 (NR_IRQS_LEGACY + 1)
+#define MX27_INT_GPT6 (NR_IRQS_LEGACY + 2)
+#define MX27_INT_GPT5 (NR_IRQS_LEGACY + 3)
+#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
+#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
+#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
+#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7)
+#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
+#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
+#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
+#define MX27_INT_SDHC1 (NR_IRQS_LEGACY + 11)
+#define MX27_INT_I2C1 (NR_IRQS_LEGACY + 12)
+#define MX27_INT_SSI2 (NR_IRQS_LEGACY + 13)
+#define MX27_INT_SSI1 (NR_IRQS_LEGACY + 14)
+#define MX27_INT_CSPI2 (NR_IRQS_LEGACY + 15)
+#define MX27_INT_CSPI1 (NR_IRQS_LEGACY + 16)
+#define MX27_INT_UART4 (NR_IRQS_LEGACY + 17)
+#define MX27_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX27_INT_UART2 (NR_IRQS_LEGACY + 19)
+#define MX27_INT_UART1 (NR_IRQS_LEGACY + 20)
+#define MX27_INT_KPP (NR_IRQS_LEGACY + 21)
+#define MX27_INT_RTC (NR_IRQS_LEGACY + 22)
+#define MX27_INT_PWM (NR_IRQS_LEGACY + 23)
+#define MX27_INT_GPT3 (NR_IRQS_LEGACY + 24)
+#define MX27_INT_GPT2 (NR_IRQS_LEGACY + 25)
+#define MX27_INT_GPT1 (NR_IRQS_LEGACY + 26)
+#define MX27_INT_WDOG (NR_IRQS_LEGACY + 27)
+#define MX27_INT_PCMCIA (NR_IRQS_LEGACY + 28)
+#define MX27_INT_NFC (NR_IRQS_LEGACY + 29)
+#define MX27_INT_ATA (NR_IRQS_LEGACY + 30)
+#define MX27_INT_CSI (NR_IRQS_LEGACY + 31)
+#define MX27_INT_DMACH0 (NR_IRQS_LEGACY + 32)
+#define MX27_INT_DMACH1 (NR_IRQS_LEGACY + 33)
+#define MX27_INT_DMACH2 (NR_IRQS_LEGACY + 34)
+#define MX27_INT_DMACH3 (NR_IRQS_LEGACY + 35)
+#define MX27_INT_DMACH4 (NR_IRQS_LEGACY + 36)
+#define MX27_INT_DMACH5 (NR_IRQS_LEGACY + 37)
+#define MX27_INT_DMACH6 (NR_IRQS_LEGACY + 38)
+#define MX27_INT_DMACH7 (NR_IRQS_LEGACY + 39)
+#define MX27_INT_DMACH8 (NR_IRQS_LEGACY + 40)
+#define MX27_INT_DMACH9 (NR_IRQS_LEGACY + 41)
+#define MX27_INT_DMACH10 (NR_IRQS_LEGACY + 42)
+#define MX27_INT_DMACH11 (NR_IRQS_LEGACY + 43)
+#define MX27_INT_DMACH12 (NR_IRQS_LEGACY + 44)
+#define MX27_INT_DMACH13 (NR_IRQS_LEGACY + 45)
+#define MX27_INT_DMACH14 (NR_IRQS_LEGACY + 46)
+#define MX27_INT_DMACH15 (NR_IRQS_LEGACY + 47)
+#define MX27_INT_UART6 (NR_IRQS_LEGACY + 48)
+#define MX27_INT_UART5 (NR_IRQS_LEGACY + 49)
+#define MX27_INT_FEC (NR_IRQS_LEGACY + 50)
+#define MX27_INT_EMMAPRP (NR_IRQS_LEGACY + 51)
+#define MX27_INT_EMMAPP (NR_IRQS_LEGACY + 52)
+#define MX27_INT_VPU (NR_IRQS_LEGACY + 53)
+#define MX27_INT_USB_HS1 (NR_IRQS_LEGACY + 54)
+#define MX27_INT_USB_HS2 (NR_IRQS_LEGACY + 55)
+#define MX27_INT_USB_OTG (NR_IRQS_LEGACY + 56)
+#define MX27_INT_SCC_SMN (NR_IRQS_LEGACY + 57)
+#define MX27_INT_SCC_SCM (NR_IRQS_LEGACY + 58)
+#define MX27_INT_SAHARA (NR_IRQS_LEGACY + 59)
+#define MX27_INT_SLCDC (NR_IRQS_LEGACY + 60)
+#define MX27_INT_LCDC (NR_IRQS_LEGACY + 61)
+#define MX27_INT_IIM (NR_IRQS_LEGACY + 62)
+#define MX27_INT_CCM (NR_IRQS_LEGACY + 63)
/* fixed DMA request numbers */
#define MX27_DMA_REQ_CSPI3_RX 1
diff --git a/arch/arm/plat-mxc/include/mach/mx2x.h b/arch/arm/plat-mxc/include/mach/mx2x.h
index 6d07839fdec2..11642f5b224c 100644
--- a/arch/arm/plat-mxc/include/mach/mx2x.h
+++ b/arch/arm/plat-mxc/include/mach/mx2x.h
@@ -68,49 +68,50 @@
#define MX2x_CSI_BASE_ADDR (MX2x_SAHB1_BASE_ADDR + 0x0000)
/* fixed interrupt numbers */
-#define MX2x_INT_CSPI3 6
-#define MX2x_INT_GPIO 8
-#define MX2x_INT_SDHC2 10
-#define MX2x_INT_SDHC1 11
-#define MX2x_INT_I2C 12
-#define MX2x_INT_SSI2 13
-#define MX2x_INT_SSI1 14
-#define MX2x_INT_CSPI2 15
-#define MX2x_INT_CSPI1 16
-#define MX2x_INT_UART4 17
-#define MX2x_INT_UART3 18
-#define MX2x_INT_UART2 19
-#define MX2x_INT_UART1 20
-#define MX2x_INT_KPP 21
-#define MX2x_INT_RTC 22
-#define MX2x_INT_PWM 23
-#define MX2x_INT_GPT3 24
-#define MX2x_INT_GPT2 25
-#define MX2x_INT_GPT1 26
-#define MX2x_INT_WDOG 27
-#define MX2x_INT_PCMCIA 28
-#define MX2x_INT_NANDFC 29
-#define MX2x_INT_CSI 31
-#define MX2x_INT_DMACH0 32
-#define MX2x_INT_DMACH1 33
-#define MX2x_INT_DMACH2 34
-#define MX2x_INT_DMACH3 35
-#define MX2x_INT_DMACH4 36
-#define MX2x_INT_DMACH5 37
-#define MX2x_INT_DMACH6 38
-#define MX2x_INT_DMACH7 39
-#define MX2x_INT_DMACH8 40
-#define MX2x_INT_DMACH9 41
-#define MX2x_INT_DMACH10 42
-#define MX2x_INT_DMACH11 43
-#define MX2x_INT_DMACH12 44
-#define MX2x_INT_DMACH13 45
-#define MX2x_INT_DMACH14 46
-#define MX2x_INT_DMACH15 47
-#define MX2x_INT_EMMAPRP 51
-#define MX2x_INT_EMMAPP 52
-#define MX2x_INT_SLCDC 60
-#define MX2x_INT_LCDC 61
+#include <asm/irq.h>
+#define MX2x_INT_CSPI3 (NR_IRQS_LEGACY + 6)
+#define MX2x_INT_GPIO (NR_IRQS_LEGACY + 8)
+#define MX2x_INT_SDHC2 (NR_IRQS_LEGACY + 10)
+#define MX2x_INT_SDHC1 (NR_IRQS_LEGACY + 11)
+#define MX2x_INT_I2C (NR_IRQS_LEGACY + 12)
+#define MX2x_INT_SSI2 (NR_IRQS_LEGACY + 13)
+#define MX2x_INT_SSI1 (NR_IRQS_LEGACY + 14)
+#define MX2x_INT_CSPI2 (NR_IRQS_LEGACY + 15)
+#define MX2x_INT_CSPI1 (NR_IRQS_LEGACY + 16)
+#define MX2x_INT_UART4 (NR_IRQS_LEGACY + 17)
+#define MX2x_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX2x_INT_UART2 (NR_IRQS_LEGACY + 19)
+#define MX2x_INT_UART1 (NR_IRQS_LEGACY + 20)
+#define MX2x_INT_KPP (NR_IRQS_LEGACY + 21)
+#define MX2x_INT_RTC (NR_IRQS_LEGACY + 22)
+#define MX2x_INT_PWM (NR_IRQS_LEGACY + 23)
+#define MX2x_INT_GPT3 (NR_IRQS_LEGACY + 24)
+#define MX2x_INT_GPT2 (NR_IRQS_LEGACY + 25)
+#define MX2x_INT_GPT1 (NR_IRQS_LEGACY + 26)
+#define MX2x_INT_WDOG (NR_IRQS_LEGACY + 27)
+#define MX2x_INT_PCMCIA (NR_IRQS_LEGACY + 28)
+#define MX2x_INT_NANDFC (NR_IRQS_LEGACY + 29)
+#define MX2x_INT_CSI (NR_IRQS_LEGACY + 31)
+#define MX2x_INT_DMACH0 (NR_IRQS_LEGACY + 32)
+#define MX2x_INT_DMACH1 (NR_IRQS_LEGACY + 33)
+#define MX2x_INT_DMACH2 (NR_IRQS_LEGACY + 34)
+#define MX2x_INT_DMACH3 (NR_IRQS_LEGACY + 35)
+#define MX2x_INT_DMACH4 (NR_IRQS_LEGACY + 36)
+#define MX2x_INT_DMACH5 (NR_IRQS_LEGACY + 37)
+#define MX2x_INT_DMACH6 (NR_IRQS_LEGACY + 38)
+#define MX2x_INT_DMACH7 (NR_IRQS_LEGACY + 39)
+#define MX2x_INT_DMACH8 (NR_IRQS_LEGACY + 40)
+#define MX2x_INT_DMACH9 (NR_IRQS_LEGACY + 41)
+#define MX2x_INT_DMACH10 (NR_IRQS_LEGACY + 42)
+#define MX2x_INT_DMACH11 (NR_IRQS_LEGACY + 43)
+#define MX2x_INT_DMACH12 (NR_IRQS_LEGACY + 44)
+#define MX2x_INT_DMACH13 (NR_IRQS_LEGACY + 45)
+#define MX2x_INT_DMACH14 (NR_IRQS_LEGACY + 46)
+#define MX2x_INT_DMACH15 (NR_IRQS_LEGACY + 47)
+#define MX2x_INT_EMMAPRP (NR_IRQS_LEGACY + 51)
+#define MX2x_INT_EMMAPP (NR_IRQS_LEGACY + 52)
+#define MX2x_INT_SLCDC (NR_IRQS_LEGACY + 60)
+#define MX2x_INT_LCDC (NR_IRQS_LEGACY + 61)
/* fixed DMA request numbers */
#define MX2x_DMA_REQ_CSPI3_RX 1
diff --git a/arch/arm/plat-mxc/include/mach/mx31.h b/arch/arm/plat-mxc/include/mach/mx31.h
index e27619e442c0..dbced61d9fda 100644
--- a/arch/arm/plat-mxc/include/mach/mx31.h
+++ b/arch/arm/plat-mxc/include/mach/mx31.h
@@ -118,63 +118,67 @@
#define MX31_IO_P2V(x) IMX_IO_P2V(x)
#define MX31_IO_ADDRESS(x) IOMEM(MX31_IO_P2V(x))
-#define MX31_INT_I2C3 3
-#define MX31_INT_I2C2 4
-#define MX31_INT_MPEG4_ENCODER 5
-#define MX31_INT_RTIC 6
-#define MX31_INT_FIRI 7
-#define MX31_INT_SDHC2 8
-#define MX31_INT_SDHC1 9
-#define MX31_INT_I2C1 10
-#define MX31_INT_SSI2 11
-#define MX31_INT_SSI1 12
-#define MX31_INT_CSPI2 13
-#define MX31_INT_CSPI1 14
-#define MX31_INT_ATA 15
-#define MX31_INT_MBX 16
-#define MX31_INT_CSPI3 17
-#define MX31_INT_UART3 18
-#define MX31_INT_IIM 19
-#define MX31_INT_SIM2 20
-#define MX31_INT_SIM1 21
-#define MX31_INT_RNGA 22
-#define MX31_INT_EVTMON 23
-#define MX31_INT_KPP 24
-#define MX31_INT_RTC 25
-#define MX31_INT_PWM 26
-#define MX31_INT_EPIT2 27
-#define MX31_INT_EPIT1 28
-#define MX31_INT_GPT 29
-#define MX31_INT_POWER_FAIL 30
-#define MX31_INT_CCM_DVFS 31
-#define MX31_INT_UART2 32
-#define MX31_INT_NFC 33
-#define MX31_INT_SDMA 34
-#define MX31_INT_USB_HS1 35
-#define MX31_INT_USB_HS2 36
-#define MX31_INT_USB_OTG 37
-#define MX31_INT_MSHC1 39
-#define MX31_INT_MSHC2 40
-#define MX31_INT_IPU_ERR 41
-#define MX31_INT_IPU_SYN 42
-#define MX31_INT_UART1 45
-#define MX31_INT_UART4 46
-#define MX31_INT_UART5 47
-#define MX31_INT_ECT 48
-#define MX31_INT_SCC_SCM 49
-#define MX31_INT_SCC_SMN 50
-#define MX31_INT_GPIO2 51
-#define MX31_INT_GPIO1 52
-#define MX31_INT_CCM 53
-#define MX31_INT_PCMCIA 54
-#define MX31_INT_WDOG 55
-#define MX31_INT_GPIO3 56
-#define MX31_INT_EXT_POWER 58
-#define MX31_INT_EXT_TEMPER 59
-#define MX31_INT_EXT_SENSOR60 60
-#define MX31_INT_EXT_SENSOR61 61
-#define MX31_INT_EXT_WDOG 62
-#define MX31_INT_EXT_TV 63
+/*
+ * Interrupt numbers
+ */
+#include <asm/irq.h>
+#define MX31_INT_I2C3 (NR_IRQS_LEGACY + 3)
+#define MX31_INT_I2C2 (NR_IRQS_LEGACY + 4)
+#define MX31_INT_MPEG4_ENCODER (NR_IRQS_LEGACY + 5)
+#define MX31_INT_RTIC (NR_IRQS_LEGACY + 6)
+#define MX31_INT_FIRI (NR_IRQS_LEGACY + 7)
+#define MX31_INT_SDHC2 (NR_IRQS_LEGACY + 8)
+#define MX31_INT_SDHC1 (NR_IRQS_LEGACY + 9)
+#define MX31_INT_I2C1 (NR_IRQS_LEGACY + 10)
+#define MX31_INT_SSI2 (NR_IRQS_LEGACY + 11)
+#define MX31_INT_SSI1 (NR_IRQS_LEGACY + 12)
+#define MX31_INT_CSPI2 (NR_IRQS_LEGACY + 13)
+#define MX31_INT_CSPI1 (NR_IRQS_LEGACY + 14)
+#define MX31_INT_ATA (NR_IRQS_LEGACY + 15)
+#define MX31_INT_MBX (NR_IRQS_LEGACY + 16)
+#define MX31_INT_CSPI3 (NR_IRQS_LEGACY + 17)
+#define MX31_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX31_INT_IIM (NR_IRQS_LEGACY + 19)
+#define MX31_INT_SIM2 (NR_IRQS_LEGACY + 20)
+#define MX31_INT_SIM1 (NR_IRQS_LEGACY + 21)
+#define MX31_INT_RNGA (NR_IRQS_LEGACY + 22)
+#define MX31_INT_EVTMON (NR_IRQS_LEGACY + 23)
+#define MX31_INT_KPP (NR_IRQS_LEGACY + 24)
+#define MX31_INT_RTC (NR_IRQS_LEGACY + 25)
+#define MX31_INT_PWM (NR_IRQS_LEGACY + 26)
+#define MX31_INT_EPIT2 (NR_IRQS_LEGACY + 27)
+#define MX31_INT_EPIT1 (NR_IRQS_LEGACY + 28)
+#define MX31_INT_GPT (NR_IRQS_LEGACY + 29)
+#define MX31_INT_POWER_FAIL (NR_IRQS_LEGACY + 30)
+#define MX31_INT_CCM_DVFS (NR_IRQS_LEGACY + 31)
+#define MX31_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX31_INT_NFC (NR_IRQS_LEGACY + 33)
+#define MX31_INT_SDMA (NR_IRQS_LEGACY + 34)
+#define MX31_INT_USB_HS1 (NR_IRQS_LEGACY + 35)
+#define MX31_INT_USB_HS2 (NR_IRQS_LEGACY + 36)
+#define MX31_INT_USB_OTG (NR_IRQS_LEGACY + 37)
+#define MX31_INT_MSHC1 (NR_IRQS_LEGACY + 39)
+#define MX31_INT_MSHC2 (NR_IRQS_LEGACY + 40)
+#define MX31_INT_IPU_ERR (NR_IRQS_LEGACY + 41)
+#define MX31_INT_IPU_SYN (NR_IRQS_LEGACY + 42)
+#define MX31_INT_UART1 (NR_IRQS_LEGACY + 45)
+#define MX31_INT_UART4 (NR_IRQS_LEGACY + 46)
+#define MX31_INT_UART5 (NR_IRQS_LEGACY + 47)
+#define MX31_INT_ECT (NR_IRQS_LEGACY + 48)
+#define MX31_INT_SCC_SCM (NR_IRQS_LEGACY + 49)
+#define MX31_INT_SCC_SMN (NR_IRQS_LEGACY + 50)
+#define MX31_INT_GPIO2 (NR_IRQS_LEGACY + 51)
+#define MX31_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX31_INT_CCM (NR_IRQS_LEGACY + 53)
+#define MX31_INT_PCMCIA (NR_IRQS_LEGACY + 54)
+#define MX31_INT_WDOG (NR_IRQS_LEGACY + 55)
+#define MX31_INT_GPIO3 (NR_IRQS_LEGACY + 56)
+#define MX31_INT_EXT_POWER (NR_IRQS_LEGACY + 58)
+#define MX31_INT_EXT_TEMPER (NR_IRQS_LEGACY + 59)
+#define MX31_INT_EXT_SENSOR60 (NR_IRQS_LEGACY + 60)
+#define MX31_INT_EXT_SENSOR61 (NR_IRQS_LEGACY + 61)
+#define MX31_INT_EXT_WDOG (NR_IRQS_LEGACY + 62)
+#define MX31_INT_EXT_TV (NR_IRQS_LEGACY + 63)
#define MX31_DMA_REQ_SDHC1 20
#define MX31_DMA_REQ_SDHC2 21
diff --git a/arch/arm/plat-mxc/include/mach/mx35.h b/arch/arm/plat-mxc/include/mach/mx35.h
index 80965a99aa55..2af5d3a699c7 100644
--- a/arch/arm/plat-mxc/include/mach/mx35.h
+++ b/arch/arm/plat-mxc/include/mach/mx35.h
@@ -120,60 +120,61 @@
/*
* Interrupt numbers
*/
-#define MX35_INT_OWIRE 2
-#define MX35_INT_I2C3 3
-#define MX35_INT_I2C2 4
-#define MX35_INT_RTIC 6
-#define MX35_INT_ESDHC1 7
-#define MX35_INT_ESDHC2 8
-#define MX35_INT_ESDHC3 9
-#define MX35_INT_I2C1 10
-#define MX35_INT_SSI1 11
-#define MX35_INT_SSI2 12
-#define MX35_INT_CSPI2 13
-#define MX35_INT_CSPI1 14
-#define MX35_INT_ATA 15
-#define MX35_INT_GPU2D 16
-#define MX35_INT_ASRC 17
-#define MX35_INT_UART3 18
-#define MX35_INT_IIM 19
-#define MX35_INT_RNGA 22
-#define MX35_INT_EVTMON 23
-#define MX35_INT_KPP 24
-#define MX35_INT_RTC 25
-#define MX35_INT_PWM 26
-#define MX35_INT_EPIT2 27
-#define MX35_INT_EPIT1 28
-#define MX35_INT_GPT 29
-#define MX35_INT_POWER_FAIL 30
-#define MX35_INT_UART2 32
-#define MX35_INT_NFC 33
-#define MX35_INT_SDMA 34
-#define MX35_INT_USB_HS 35
-#define MX35_INT_USB_OTG 37
-#define MX35_INT_MSHC1 39
-#define MX35_INT_ESAI 40
-#define MX35_INT_IPU_ERR 41
-#define MX35_INT_IPU_SYN 42
-#define MX35_INT_CAN1 43
-#define MX35_INT_CAN2 44
-#define MX35_INT_UART1 45
-#define MX35_INT_MLB 46
-#define MX35_INT_SPDIF 47
-#define MX35_INT_ECT 48
-#define MX35_INT_SCC_SCM 49
-#define MX35_INT_SCC_SMN 50
-#define MX35_INT_GPIO2 51
-#define MX35_INT_GPIO1 52
-#define MX35_INT_WDOG 55
-#define MX35_INT_GPIO3 56
-#define MX35_INT_FEC 57
-#define MX35_INT_EXT_POWER 58
-#define MX35_INT_EXT_TEMPER 59
-#define MX35_INT_EXT_SENSOR60 60
-#define MX35_INT_EXT_SENSOR61 61
-#define MX35_INT_EXT_WDOG 62
-#define MX35_INT_EXT_TV 63
+#include <asm/irq.h>
+#define MX35_INT_OWIRE (NR_IRQS_LEGACY + 2)
+#define MX35_INT_I2C3 (NR_IRQS_LEGACY + 3)
+#define MX35_INT_I2C2 (NR_IRQS_LEGACY + 4)
+#define MX35_INT_RTIC (NR_IRQS_LEGACY + 6)
+#define MX35_INT_ESDHC1 (NR_IRQS_LEGACY + 7)
+#define MX35_INT_ESDHC2 (NR_IRQS_LEGACY + 8)
+#define MX35_INT_ESDHC3 (NR_IRQS_LEGACY + 9)
+#define MX35_INT_I2C1 (NR_IRQS_LEGACY + 10)
+#define MX35_INT_SSI1 (NR_IRQS_LEGACY + 11)
+#define MX35_INT_SSI2 (NR_IRQS_LEGACY + 12)
+#define MX35_INT_CSPI2 (NR_IRQS_LEGACY + 13)
+#define MX35_INT_CSPI1 (NR_IRQS_LEGACY + 14)
+#define MX35_INT_ATA (NR_IRQS_LEGACY + 15)
+#define MX35_INT_GPU2D (NR_IRQS_LEGACY + 16)
+#define MX35_INT_ASRC (NR_IRQS_LEGACY + 17)
+#define MX35_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX35_INT_IIM (NR_IRQS_LEGACY + 19)
+#define MX35_INT_RNGA (NR_IRQS_LEGACY + 22)
+#define MX35_INT_EVTMON (NR_IRQS_LEGACY + 23)
+#define MX35_INT_KPP (NR_IRQS_LEGACY + 24)
+#define MX35_INT_RTC (NR_IRQS_LEGACY + 25)
+#define MX35_INT_PWM (NR_IRQS_LEGACY + 26)
+#define MX35_INT_EPIT2 (NR_IRQS_LEGACY + 27)
+#define MX35_INT_EPIT1 (NR_IRQS_LEGACY + 28)
+#define MX35_INT_GPT (NR_IRQS_LEGACY + 29)
+#define MX35_INT_POWER_FAIL (NR_IRQS_LEGACY + 30)
+#define MX35_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX35_INT_NFC (NR_IRQS_LEGACY + 33)
+#define MX35_INT_SDMA (NR_IRQS_LEGACY + 34)
+#define MX35_INT_USB_HS (NR_IRQS_LEGACY + 35)
+#define MX35_INT_USB_OTG (NR_IRQS_LEGACY + 37)
+#define MX35_INT_MSHC1 (NR_IRQS_LEGACY + 39)
+#define MX35_INT_ESAI (NR_IRQS_LEGACY + 40)
+#define MX35_INT_IPU_ERR (NR_IRQS_LEGACY + 41)
+#define MX35_INT_IPU_SYN (NR_IRQS_LEGACY + 42)
+#define MX35_INT_CAN1 (NR_IRQS_LEGACY + 43)
+#define MX35_INT_CAN2 (NR_IRQS_LEGACY + 44)
+#define MX35_INT_UART1 (NR_IRQS_LEGACY + 45)
+#define MX35_INT_MLB (NR_IRQS_LEGACY + 46)
+#define MX35_INT_SPDIF (NR_IRQS_LEGACY + 47)
+#define MX35_INT_ECT (NR_IRQS_LEGACY + 48)
+#define MX35_INT_SCC_SCM (NR_IRQS_LEGACY + 49)
+#define MX35_INT_SCC_SMN (NR_IRQS_LEGACY + 50)
+#define MX35_INT_GPIO2 (NR_IRQS_LEGACY + 51)
+#define MX35_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX35_INT_WDOG (NR_IRQS_LEGACY + 55)
+#define MX35_INT_GPIO3 (NR_IRQS_LEGACY + 56)
+#define MX35_INT_FEC (NR_IRQS_LEGACY + 57)
+#define MX35_INT_EXT_POWER (NR_IRQS_LEGACY + 58)
+#define MX35_INT_EXT_TEMPER (NR_IRQS_LEGACY + 59)
+#define MX35_INT_EXT_SENSOR60 (NR_IRQS_LEGACY + 60)
+#define MX35_INT_EXT_SENSOR61 (NR_IRQS_LEGACY + 61)
+#define MX35_INT_EXT_WDOG (NR_IRQS_LEGACY + 62)
+#define MX35_INT_EXT_TV (NR_IRQS_LEGACY + 63)
#define MX35_DMA_REQ_SSI2_RX1 22
#define MX35_DMA_REQ_SSI2_TX1 23
diff --git a/arch/arm/plat-mxc/include/mach/mx3x.h b/arch/arm/plat-mxc/include/mach/mx3x.h
index 30dbf424583e..96fb4fbc8ad7 100644
--- a/arch/arm/plat-mxc/include/mach/mx3x.h
+++ b/arch/arm/plat-mxc/include/mach/mx3x.h
@@ -143,44 +143,45 @@
/*
* Interrupt numbers
*/
-#define MX3x_INT_I2C3 3
-#define MX3x_INT_I2C2 4
-#define MX3x_INT_RTIC 6
-#define MX3x_INT_I2C 10
-#define MX3x_INT_CSPI2 13
-#define MX3x_INT_CSPI1 14
-#define MX3x_INT_ATA 15
-#define MX3x_INT_UART3 18
-#define MX3x_INT_IIM 19
-#define MX3x_INT_RNGA 22
-#define MX3x_INT_EVTMON 23
-#define MX3x_INT_KPP 24
-#define MX3x_INT_RTC 25
-#define MX3x_INT_PWM 26
-#define MX3x_INT_EPIT2 27
-#define MX3x_INT_EPIT1 28
-#define MX3x_INT_GPT 29
-#define MX3x_INT_POWER_FAIL 30
-#define MX3x_INT_UART2 32
-#define MX3x_INT_NANDFC 33
-#define MX3x_INT_SDMA 34
-#define MX3x_INT_MSHC1 39
-#define MX3x_INT_IPU_ERR 41
-#define MX3x_INT_IPU_SYN 42
-#define MX3x_INT_UART1 45
-#define MX3x_INT_ECT 48
-#define MX3x_INT_SCC_SCM 49
-#define MX3x_INT_SCC_SMN 50
-#define MX3x_INT_GPIO2 51
-#define MX3x_INT_GPIO1 52
-#define MX3x_INT_WDOG 55
-#define MX3x_INT_GPIO3 56
-#define MX3x_INT_EXT_POWER 58
-#define MX3x_INT_EXT_TEMPER 59
-#define MX3x_INT_EXT_SENSOR60 60
-#define MX3x_INT_EXT_SENSOR61 61
-#define MX3x_INT_EXT_WDOG 62
-#define MX3x_INT_EXT_TV 63
+#include <asm/irq.h>
+#define MX3x_INT_I2C3 (NR_IRQS_LEGACY + 3)
+#define MX3x_INT_I2C2 (NR_IRQS_LEGACY + 4)
+#define MX3x_INT_RTIC (NR_IRQS_LEGACY + 6)
+#define MX3x_INT_I2C (NR_IRQS_LEGACY + 10)
+#define MX3x_INT_CSPI2 (NR_IRQS_LEGACY + 13)
+#define MX3x_INT_CSPI1 (NR_IRQS_LEGACY + 14)
+#define MX3x_INT_ATA (NR_IRQS_LEGACY + 15)
+#define MX3x_INT_UART3 (NR_IRQS_LEGACY + 18)
+#define MX3x_INT_IIM (NR_IRQS_LEGACY + 19)
+#define MX3x_INT_RNGA (NR_IRQS_LEGACY + 22)
+#define MX3x_INT_EVTMON (NR_IRQS_LEGACY + 23)
+#define MX3x_INT_KPP (NR_IRQS_LEGACY + 24)
+#define MX3x_INT_RTC (NR_IRQS_LEGACY + 25)
+#define MX3x_INT_PWM (NR_IRQS_LEGACY + 26)
+#define MX3x_INT_EPIT2 (NR_IRQS_LEGACY + 27)
+#define MX3x_INT_EPIT1 (NR_IRQS_LEGACY + 28)
+#define MX3x_INT_GPT (NR_IRQS_LEGACY + 29)
+#define MX3x_INT_POWER_FAIL (NR_IRQS_LEGACY + 30)
+#define MX3x_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX3x_INT_NANDFC (NR_IRQS_LEGACY + 33)
+#define MX3x_INT_SDMA (NR_IRQS_LEGACY + 34)
+#define MX3x_INT_MSHC1 (NR_IRQS_LEGACY + 39)
+#define MX3x_INT_IPU_ERR (NR_IRQS_LEGACY + 41)
+#define MX3x_INT_IPU_SYN (NR_IRQS_LEGACY + 42)
+#define MX3x_INT_UART1 (NR_IRQS_LEGACY + 45)
+#define MX3x_INT_ECT (NR_IRQS_LEGACY + 48)
+#define MX3x_INT_SCC_SCM (NR_IRQS_LEGACY + 49)
+#define MX3x_INT_SCC_SMN (NR_IRQS_LEGACY + 50)
+#define MX3x_INT_GPIO2 (NR_IRQS_LEGACY + 51)
+#define MX3x_INT_GPIO1 (NR_IRQS_LEGACY + 52)
+#define MX3x_INT_WDOG (NR_IRQS_LEGACY + 55)
+#define MX3x_INT_GPIO3 (NR_IRQS_LEGACY + 56)
+#define MX3x_INT_EXT_POWER (NR_IRQS_LEGACY + 58)
+#define MX3x_INT_EXT_TEMPER (NR_IRQS_LEGACY + 59)
+#define MX3x_INT_EXT_SENSOR60 (NR_IRQS_LEGACY + 60)
+#define MX3x_INT_EXT_SENSOR61 (NR_IRQS_LEGACY + 61)
+#define MX3x_INT_EXT_WDOG (NR_IRQS_LEGACY + 62)
+#define MX3x_INT_EXT_TV (NR_IRQS_LEGACY + 63)
#define MX3x_PROD_SIGNATURE 0x1 /* For MX31 */
diff --git a/arch/arm/plat-mxc/include/mach/mx50.h b/arch/arm/plat-mxc/include/mach/mx50.h
index 5f2da75a47f4..09ac19c1570c 100644
--- a/arch/arm/plat-mxc/include/mach/mx50.h
+++ b/arch/arm/plat-mxc/include/mach/mx50.h
@@ -188,99 +188,100 @@
/*
* Interrupt numbers
*/
-#define MX50_INT_MMC_SDHC1 1
-#define MX50_INT_MMC_SDHC2 2
-#define MX50_INT_MMC_SDHC3 3
-#define MX50_INT_MMC_SDHC4 4
-#define MX50_INT_DAP 5
-#define MX50_INT_SDMA 6
-#define MX50_INT_IOMUX 7
-#define MX50_INT_UART4 13
-#define MX50_INT_USB_H1 14
-#define MX50_INT_USB_OTG 18
-#define MX50_INT_DATABAHN 19
-#define MX50_INT_ELCDIF 20
-#define MX50_INT_EPXP 21
-#define MX50_INT_SRTC_NTZ 24
-#define MX50_INT_SRTC_TZ 25
-#define MX50_INT_EPDC 27
-#define MX50_INT_NIC 28
-#define MX50_INT_SSI1 29
-#define MX50_INT_SSI2 30
-#define MX50_INT_UART1 31
-#define MX50_INT_UART2 32
-#define MX50_INT_UART3 33
-#define MX50_INT_RESV34 34
-#define MX50_INT_RESV35 35
-#define MX50_INT_CSPI1 36
-#define MX50_INT_CSPI2 37
-#define MX50_INT_CSPI 38
-#define MX50_INT_GPT 39
-#define MX50_INT_EPIT1 40
-#define MX50_INT_GPIO1_INT7 42
-#define MX50_INT_GPIO1_INT6 43
-#define MX50_INT_GPIO1_INT5 44
-#define MX50_INT_GPIO1_INT4 45
-#define MX50_INT_GPIO1_INT3 46
-#define MX50_INT_GPIO1_INT2 47
-#define MX50_INT_GPIO1_INT1 48
-#define MX50_INT_GPIO1_INT0 49
-#define MX50_INT_GPIO1_LOW 50
-#define MX50_INT_GPIO1_HIGH 51
-#define MX50_INT_GPIO2_LOW 52
-#define MX50_INT_GPIO2_HIGH 53
-#define MX50_INT_GPIO3_LOW 54
-#define MX50_INT_GPIO3_HIGH 55
-#define MX50_INT_GPIO4_LOW 56
-#define MX50_INT_GPIO4_HIGH 57
-#define MX50_INT_WDOG1 58
-#define MX50_INT_KPP 60
-#define MX50_INT_PWM1 61
-#define MX50_INT_I2C1 62
-#define MX50_INT_I2C2 63
-#define MX50_INT_I2C3 64
-#define MX50_INT_RESV65 65
-#define MX50_INT_DCDC 66
-#define MX50_INT_THERMAL_ALARM 67
-#define MX50_INT_ANA3 68
-#define MX50_INT_ANA4 69
-#define MX50_INT_CCM1 71
-#define MX50_INT_CCM2 72
-#define MX50_INT_GPC1 73
-#define MX50_INT_GPC2 74
-#define MX50_INT_SRC 75
-#define MX50_INT_NM 76
-#define MX50_INT_PMU 77
-#define MX50_INT_CTI_IRQ 78
-#define MX50_INT_CTI1_TG0 79
-#define MX50_INT_CTI1_TG1 80
-#define MX50_INT_GPU2_IRQ 84
-#define MX50_INT_GPU2_BUSY 85
-#define MX50_INT_UART5 86
-#define MX50_INT_FEC 87
-#define MX50_INT_OWIRE 88
-#define MX50_INT_CTI1_TG2 89
-#define MX50_INT_SJC 90
-#define MX50_INT_DCP_CHAN1_3 91
-#define MX50_INT_DCP_CHAN0 92
-#define MX50_INT_PWM2 94
-#define MX50_INT_RNGB 97
-#define MX50_INT_CTI1_TG3 98
-#define MX50_INT_RAWNAND_BCH 100
-#define MX50_INT_RAWNAND_GPMI 102
-#define MX50_INT_GPIO5_LOW 103
-#define MX50_INT_GPIO5_HIGH 104
-#define MX50_INT_GPIO6_LOW 105
-#define MX50_INT_GPIO6_HIGH 106
-#define MX50_INT_MSHC 109
-#define MX50_INT_APBHDMA_CHAN0 110
-#define MX50_INT_APBHDMA_CHAN1 111
-#define MX50_INT_APBHDMA_CHAN2 112
-#define MX50_INT_APBHDMA_CHAN3 113
-#define MX50_INT_APBHDMA_CHAN4 114
-#define MX50_INT_APBHDMA_CHAN5 115
-#define MX50_INT_APBHDMA_CHAN6 116
-#define MX50_INT_APBHDMA_CHAN7 117
+#include <asm/irq.h>
+#define MX50_INT_MMC_SDHC1 (NR_IRQS_LEGACY + 1)
+#define MX50_INT_MMC_SDHC2 (NR_IRQS_LEGACY + 2)
+#define MX50_INT_MMC_SDHC3 (NR_IRQS_LEGACY + 3)
+#define MX50_INT_MMC_SDHC4 (NR_IRQS_LEGACY + 4)
+#define MX50_INT_DAP (NR_IRQS_LEGACY + 5)
+#define MX50_INT_SDMA (NR_IRQS_LEGACY + 6)
+#define MX50_INT_IOMUX (NR_IRQS_LEGACY + 7)
+#define MX50_INT_UART4 (NR_IRQS_LEGACY + 13)
+#define MX50_INT_USB_H1 (NR_IRQS_LEGACY + 14)
+#define MX50_INT_USB_OTG (NR_IRQS_LEGACY + 18)
+#define MX50_INT_DATABAHN (NR_IRQS_LEGACY + 19)
+#define MX50_INT_ELCDIF (NR_IRQS_LEGACY + 20)
+#define MX50_INT_EPXP (NR_IRQS_LEGACY + 21)
+#define MX50_INT_SRTC_NTZ (NR_IRQS_LEGACY + 24)
+#define MX50_INT_SRTC_TZ (NR_IRQS_LEGACY + 25)
+#define MX50_INT_EPDC (NR_IRQS_LEGACY + 27)
+#define MX50_INT_NIC (NR_IRQS_LEGACY + 28)
+#define MX50_INT_SSI1 (NR_IRQS_LEGACY + 29)
+#define MX50_INT_SSI2 (NR_IRQS_LEGACY + 30)
+#define MX50_INT_UART1 (NR_IRQS_LEGACY + 31)
+#define MX50_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX50_INT_UART3 (NR_IRQS_LEGACY + 33)
+#define MX50_INT_RESV34 (NR_IRQS_LEGACY + 34)
+#define MX50_INT_RESV35 (NR_IRQS_LEGACY + 35)
+#define MX50_INT_CSPI1 (NR_IRQS_LEGACY + 36)
+#define MX50_INT_CSPI2 (NR_IRQS_LEGACY + 37)
+#define MX50_INT_CSPI (NR_IRQS_LEGACY + 38)
+#define MX50_INT_GPT (NR_IRQS_LEGACY + 39)
+#define MX50_INT_EPIT1 (NR_IRQS_LEGACY + 40)
+#define MX50_INT_GPIO1_INT7 (NR_IRQS_LEGACY + 42)
+#define MX50_INT_GPIO1_INT6 (NR_IRQS_LEGACY + 43)
+#define MX50_INT_GPIO1_INT5 (NR_IRQS_LEGACY + 44)
+#define MX50_INT_GPIO1_INT4 (NR_IRQS_LEGACY + 45)
+#define MX50_INT_GPIO1_INT3 (NR_IRQS_LEGACY + 46)
+#define MX50_INT_GPIO1_INT2 (NR_IRQS_LEGACY + 47)
+#define MX50_INT_GPIO1_INT1 (NR_IRQS_LEGACY + 48)
+#define MX50_INT_GPIO1_INT0 (NR_IRQS_LEGACY + 49)
+#define MX50_INT_GPIO1_LOW (NR_IRQS_LEGACY + 50)
+#define MX50_INT_GPIO1_HIGH (NR_IRQS_LEGACY + 51)
+#define MX50_INT_GPIO2_LOW (NR_IRQS_LEGACY + 52)
+#define MX50_INT_GPIO2_HIGH (NR_IRQS_LEGACY + 53)
+#define MX50_INT_GPIO3_LOW (NR_IRQS_LEGACY + 54)
+#define MX50_INT_GPIO3_HIGH (NR_IRQS_LEGACY + 55)
+#define MX50_INT_GPIO4_LOW (NR_IRQS_LEGACY + 56)
+#define MX50_INT_GPIO4_HIGH (NR_IRQS_LEGACY + 57)
+#define MX50_INT_WDOG1 (NR_IRQS_LEGACY + 58)
+#define MX50_INT_KPP (NR_IRQS_LEGACY + 60)
+#define MX50_INT_PWM1 (NR_IRQS_LEGACY + 61)
+#define MX50_INT_I2C1 (NR_IRQS_LEGACY + 62)
+#define MX50_INT_I2C2 (NR_IRQS_LEGACY + 63)
+#define MX50_INT_I2C3 (NR_IRQS_LEGACY + 64)
+#define MX50_INT_RESV65 (NR_IRQS_LEGACY + 65)
+#define MX50_INT_DCDC (NR_IRQS_LEGACY + 66)
+#define MX50_INT_THERMAL_ALARM (NR_IRQS_LEGACY + 67)
+#define MX50_INT_ANA3 (NR_IRQS_LEGACY + 68)
+#define MX50_INT_ANA4 (NR_IRQS_LEGACY + 69)
+#define MX50_INT_CCM1 (NR_IRQS_LEGACY + 71)
+#define MX50_INT_CCM2 (NR_IRQS_LEGACY + 72)
+#define MX50_INT_GPC1 (NR_IRQS_LEGACY + 73)
+#define MX50_INT_GPC2 (NR_IRQS_LEGACY + 74)
+#define MX50_INT_SRC (NR_IRQS_LEGACY + 75)
+#define MX50_INT_NM (NR_IRQS_LEGACY + 76)
+#define MX50_INT_PMU (NR_IRQS_LEGACY + 77)
+#define MX50_INT_CTI_IRQ (NR_IRQS_LEGACY + 78)
+#define MX50_INT_CTI1_TG0 (NR_IRQS_LEGACY + 79)
+#define MX50_INT_CTI1_TG1 (NR_IRQS_LEGACY + 80)
+#define MX50_INT_GPU2_IRQ (NR_IRQS_LEGACY + 84)
+#define MX50_INT_GPU2_BUSY (NR_IRQS_LEGACY + 85)
+#define MX50_INT_UART5 (NR_IRQS_LEGACY + 86)
+#define MX50_INT_FEC (NR_IRQS_LEGACY + 87)
+#define MX50_INT_OWIRE (NR_IRQS_LEGACY + 88)
+#define MX50_INT_CTI1_TG2 (NR_IRQS_LEGACY + 89)
+#define MX50_INT_SJC (NR_IRQS_LEGACY + 90)
+#define MX50_INT_DCP_CHAN1_3 (NR_IRQS_LEGACY + 91)
+#define MX50_INT_DCP_CHAN0 (NR_IRQS_LEGACY + 92)
+#define MX50_INT_PWM2 (NR_IRQS_LEGACY + 94)
+#define MX50_INT_RNGB (NR_IRQS_LEGACY + 97)
+#define MX50_INT_CTI1_TG3 (NR_IRQS_LEGACY + 98)
+#define MX50_INT_RAWNAND_BCH (NR_IRQS_LEGACY + 100)
+#define MX50_INT_RAWNAND_GPMI (NR_IRQS_LEGACY + 102)
+#define MX50_INT_GPIO5_LOW (NR_IRQS_LEGACY + 103)
+#define MX50_INT_GPIO5_HIGH (NR_IRQS_LEGACY + 104)
+#define MX50_INT_GPIO6_LOW (NR_IRQS_LEGACY + 105)
+#define MX50_INT_GPIO6_HIGH (NR_IRQS_LEGACY + 106)
+#define MX50_INT_MSHC (NR_IRQS_LEGACY + 109)
+#define MX50_INT_APBHDMA_CHAN0 (NR_IRQS_LEGACY + 110)
+#define MX50_INT_APBHDMA_CHAN1 (NR_IRQS_LEGACY + 111)
+#define MX50_INT_APBHDMA_CHAN2 (NR_IRQS_LEGACY + 112)
+#define MX50_INT_APBHDMA_CHAN3 (NR_IRQS_LEGACY + 113)
+#define MX50_INT_APBHDMA_CHAN4 (NR_IRQS_LEGACY + 114)
+#define MX50_INT_APBHDMA_CHAN5 (NR_IRQS_LEGACY + 115)
+#define MX50_INT_APBHDMA_CHAN6 (NR_IRQS_LEGACY + 116)
+#define MX50_INT_APBHDMA_CHAN7 (NR_IRQS_LEGACY + 117)
#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
extern int mx50_revision(void);
diff --git a/arch/arm/plat-mxc/include/mach/mx51.h b/arch/arm/plat-mxc/include/mach/mx51.h
index cdf07c65ec1e..af844f76261a 100644
--- a/arch/arm/plat-mxc/include/mach/mx51.h
+++ b/arch/arm/plat-mxc/include/mach/mx51.h
@@ -232,110 +232,111 @@
/*
* Interrupt numbers
*/
-#define MX51_INT_BASE 0
-#define MX51_INT_RESV0 0
-#define MX51_INT_ESDHC1 1
-#define MX51_INT_ESDHC2 2
-#define MX51_INT_ESDHC3 3
-#define MX51_INT_ESDHC4 4
-#define MX51_INT_RESV5 5
-#define MX51_INT_SDMA 6
-#define MX51_INT_IOMUX 7
-#define MX51_INT_NFC 8
-#define MX51_INT_VPU 9
-#define MX51_INT_IPU_ERR 10
-#define MX51_INT_IPU_SYN 11
-#define MX51_INT_GPU 12
-#define MX51_INT_RESV13 13
-#define MX51_INT_USB_HS1 14
-#define MX51_INT_EMI 15
-#define MX51_INT_USB_HS2 16
-#define MX51_INT_USB_HS3 17
-#define MX51_INT_USB_OTG 18
-#define MX51_INT_SAHARA_H0 19
-#define MX51_INT_SAHARA_H1 20
-#define MX51_INT_SCC_SMN 21
-#define MX51_INT_SCC_STZ 22
-#define MX51_INT_SCC_SCM 23
-#define MX51_INT_SRTC_NTZ 24
-#define MX51_INT_SRTC_TZ 25
-#define MX51_INT_RTIC 26
-#define MX51_INT_CSU 27
-#define MX51_INT_SLIM_B 28
-#define MX51_INT_SSI1 29
-#define MX51_INT_SSI2 30
-#define MX51_INT_UART1 31
-#define MX51_INT_UART2 32
-#define MX51_INT_UART3 33
-#define MX51_INT_RESV34 34
-#define MX51_INT_RESV35 35
-#define MX51_INT_ECSPI1 36
-#define MX51_INT_ECSPI2 37
-#define MX51_INT_CSPI 38
-#define MX51_INT_GPT 39
-#define MX51_INT_EPIT1 40
-#define MX51_INT_EPIT2 41
-#define MX51_INT_GPIO1_INT7 42
-#define MX51_INT_GPIO1_INT6 43
-#define MX51_INT_GPIO1_INT5 44
-#define MX51_INT_GPIO1_INT4 45
-#define MX51_INT_GPIO1_INT3 46
-#define MX51_INT_GPIO1_INT2 47
-#define MX51_INT_GPIO1_INT1 48
-#define MX51_INT_GPIO1_INT0 49
-#define MX51_INT_GPIO1_LOW 50
-#define MX51_INT_GPIO1_HIGH 51
-#define MX51_INT_GPIO2_LOW 52
-#define MX51_INT_GPIO2_HIGH 53
-#define MX51_INT_GPIO3_LOW 54
-#define MX51_INT_GPIO3_HIGH 55
-#define MX51_INT_GPIO4_LOW 56
-#define MX51_INT_GPIO4_HIGH 57
-#define MX51_INT_WDOG1 58
-#define MX51_INT_WDOG2 59
-#define MX51_INT_KPP 60
-#define MX51_INT_PWM1 61
-#define MX51_INT_I2C1 62
-#define MX51_INT_I2C2 63
-#define MX51_INT_HS_I2C 64
-#define MX51_INT_RESV65 65
-#define MX51_INT_RESV66 66
-#define MX51_INT_SIM_IPB 67
-#define MX51_INT_SIM_DAT 68
-#define MX51_INT_IIM 69
-#define MX51_INT_ATA 70
-#define MX51_INT_CCM1 71
-#define MX51_INT_CCM2 72
-#define MX51_INT_GPC1 73
-#define MX51_INT_GPC2 74
-#define MX51_INT_SRC 75
-#define MX51_INT_NM 76
-#define MX51_INT_PMU 77
-#define MX51_INT_CTI_IRQ 78
-#define MX51_INT_CTI1_TG0 79
-#define MX51_INT_CTI1_TG1 80
-#define MX51_INT_MCG_ERR 81
-#define MX51_INT_MCG_TMR 82
-#define MX51_INT_MCG_FUNC 83
-#define MX51_INT_GPU2_IRQ 84
-#define MX51_INT_GPU2_BUSY 85
-#define MX51_INT_RESV86 86
-#define MX51_INT_FEC 87
-#define MX51_INT_OWIRE 88
-#define MX51_INT_CTI1_TG2 89
-#define MX51_INT_SJC 90
-#define MX51_INT_SPDIF 91
-#define MX51_INT_TVE 92
-#define MX51_INT_FIRI 93
-#define MX51_INT_PWM2 94
-#define MX51_INT_SLIM_EXP 95
-#define MX51_INT_SSI3 96
-#define MX51_INT_EMI_BOOT 97
-#define MX51_INT_CTI1_TG3 98
-#define MX51_INT_SMC_RX 99
-#define MX51_INT_VPU_IDLE 100
-#define MX51_INT_EMI_NFC 101
-#define MX51_INT_GPU_IDLE 102
+#include <asm/irq.h>
+#define MX51_INT_BASE (NR_IRQS_LEGACY + 0)
+#define MX51_INT_RESV0 (NR_IRQS_LEGACY + 0)
+#define MX51_INT_ESDHC1 (NR_IRQS_LEGACY + 1)
+#define MX51_INT_ESDHC2 (NR_IRQS_LEGACY + 2)
+#define MX51_INT_ESDHC3 (NR_IRQS_LEGACY + 3)
+#define MX51_INT_ESDHC4 (NR_IRQS_LEGACY + 4)
+#define MX51_INT_RESV5 (NR_IRQS_LEGACY + 5)
+#define MX51_INT_SDMA (NR_IRQS_LEGACY + 6)
+#define MX51_INT_IOMUX (NR_IRQS_LEGACY + 7)
+#define MX51_INT_NFC (NR_IRQS_LEGACY + 8)
+#define MX51_INT_VPU (NR_IRQS_LEGACY + 9)
+#define MX51_INT_IPU_ERR (NR_IRQS_LEGACY + 10)
+#define MX51_INT_IPU_SYN (NR_IRQS_LEGACY + 11)
+#define MX51_INT_GPU (NR_IRQS_LEGACY + 12)
+#define MX51_INT_RESV13 (NR_IRQS_LEGACY + 13)
+#define MX51_INT_USB_HS1 (NR_IRQS_LEGACY + 14)
+#define MX51_INT_EMI (NR_IRQS_LEGACY + 15)
+#define MX51_INT_USB_HS2 (NR_IRQS_LEGACY + 16)
+#define MX51_INT_USB_HS3 (NR_IRQS_LEGACY + 17)
+#define MX51_INT_USB_OTG (NR_IRQS_LEGACY + 18)
+#define MX51_INT_SAHARA_H0 (NR_IRQS_LEGACY + 19)
+#define MX51_INT_SAHARA_H1 (NR_IRQS_LEGACY + 20)
+#define MX51_INT_SCC_SMN (NR_IRQS_LEGACY + 21)
+#define MX51_INT_SCC_STZ (NR_IRQS_LEGACY + 22)
+#define MX51_INT_SCC_SCM (NR_IRQS_LEGACY + 23)
+#define MX51_INT_SRTC_NTZ (NR_IRQS_LEGACY + 24)
+#define MX51_INT_SRTC_TZ (NR_IRQS_LEGACY + 25)
+#define MX51_INT_RTIC (NR_IRQS_LEGACY + 26)
+#define MX51_INT_CSU (NR_IRQS_LEGACY + 27)
+#define MX51_INT_SLIM_B (NR_IRQS_LEGACY + 28)
+#define MX51_INT_SSI1 (NR_IRQS_LEGACY + 29)
+#define MX51_INT_SSI2 (NR_IRQS_LEGACY + 30)
+#define MX51_INT_UART1 (NR_IRQS_LEGACY + 31)
+#define MX51_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX51_INT_UART3 (NR_IRQS_LEGACY + 33)
+#define MX51_INT_RESV34 (NR_IRQS_LEGACY + 34)
+#define MX51_INT_RESV35 (NR_IRQS_LEGACY + 35)
+#define MX51_INT_ECSPI1 (NR_IRQS_LEGACY + 36)
+#define MX51_INT_ECSPI2 (NR_IRQS_LEGACY + 37)
+#define MX51_INT_CSPI (NR_IRQS_LEGACY + 38)
+#define MX51_INT_GPT (NR_IRQS_LEGACY + 39)
+#define MX51_INT_EPIT1 (NR_IRQS_LEGACY + 40)
+#define MX51_INT_EPIT2 (NR_IRQS_LEGACY + 41)
+#define MX51_INT_GPIO1_INT7 (NR_IRQS_LEGACY + 42)
+#define MX51_INT_GPIO1_INT6 (NR_IRQS_LEGACY + 43)
+#define MX51_INT_GPIO1_INT5 (NR_IRQS_LEGACY + 44)
+#define MX51_INT_GPIO1_INT4 (NR_IRQS_LEGACY + 45)
+#define MX51_INT_GPIO1_INT3 (NR_IRQS_LEGACY + 46)
+#define MX51_INT_GPIO1_INT2 (NR_IRQS_LEGACY + 47)
+#define MX51_INT_GPIO1_INT1 (NR_IRQS_LEGACY + 48)
+#define MX51_INT_GPIO1_INT0 (NR_IRQS_LEGACY + 49)
+#define MX51_INT_GPIO1_LOW (NR_IRQS_LEGACY + 50)
+#define MX51_INT_GPIO1_HIGH (NR_IRQS_LEGACY + 51)
+#define MX51_INT_GPIO2_LOW (NR_IRQS_LEGACY + 52)
+#define MX51_INT_GPIO2_HIGH (NR_IRQS_LEGACY + 53)
+#define MX51_INT_GPIO3_LOW (NR_IRQS_LEGACY + 54)
+#define MX51_INT_GPIO3_HIGH (NR_IRQS_LEGACY + 55)
+#define MX51_INT_GPIO4_LOW (NR_IRQS_LEGACY + 56)
+#define MX51_INT_GPIO4_HIGH (NR_IRQS_LEGACY + 57)
+#define MX51_INT_WDOG1 (NR_IRQS_LEGACY + 58)
+#define MX51_INT_WDOG2 (NR_IRQS_LEGACY + 59)
+#define MX51_INT_KPP (NR_IRQS_LEGACY + 60)
+#define MX51_INT_PWM1 (NR_IRQS_LEGACY + 61)
+#define MX51_INT_I2C1 (NR_IRQS_LEGACY + 62)
+#define MX51_INT_I2C2 (NR_IRQS_LEGACY + 63)
+#define MX51_INT_HS_I2C (NR_IRQS_LEGACY + 64)
+#define MX51_INT_RESV65 (NR_IRQS_LEGACY + 65)
+#define MX51_INT_RESV66 (NR_IRQS_LEGACY + 66)
+#define MX51_INT_SIM_IPB (NR_IRQS_LEGACY + 67)
+#define MX51_INT_SIM_DAT (NR_IRQS_LEGACY + 68)
+#define MX51_INT_IIM (NR_IRQS_LEGACY + 69)
+#define MX51_INT_ATA (NR_IRQS_LEGACY + 70)
+#define MX51_INT_CCM1 (NR_IRQS_LEGACY + 71)
+#define MX51_INT_CCM2 (NR_IRQS_LEGACY + 72)
+#define MX51_INT_GPC1 (NR_IRQS_LEGACY + 73)
+#define MX51_INT_GPC2 (NR_IRQS_LEGACY + 74)
+#define MX51_INT_SRC (NR_IRQS_LEGACY + 75)
+#define MX51_INT_NM (NR_IRQS_LEGACY + 76)
+#define MX51_INT_PMU (NR_IRQS_LEGACY + 77)
+#define MX51_INT_CTI_IRQ (NR_IRQS_LEGACY + 78)
+#define MX51_INT_CTI1_TG0 (NR_IRQS_LEGACY + 79)
+#define MX51_INT_CTI1_TG1 (NR_IRQS_LEGACY + 80)
+#define MX51_INT_MCG_ERR (NR_IRQS_LEGACY + 81)
+#define MX51_INT_MCG_TMR (NR_IRQS_LEGACY + 82)
+#define MX51_INT_MCG_FUNC (NR_IRQS_LEGACY + 83)
+#define MX51_INT_GPU2_IRQ (NR_IRQS_LEGACY + 84)
+#define MX51_INT_GPU2_BUSY (NR_IRQS_LEGACY + 85)
+#define MX51_INT_RESV86 (NR_IRQS_LEGACY + 86)
+#define MX51_INT_FEC (NR_IRQS_LEGACY + 87)
+#define MX51_INT_OWIRE (NR_IRQS_LEGACY + 88)
+#define MX51_INT_CTI1_TG2 (NR_IRQS_LEGACY + 89)
+#define MX51_INT_SJC (NR_IRQS_LEGACY + 90)
+#define MX51_INT_SPDIF (NR_IRQS_LEGACY + 91)
+#define MX51_INT_TVE (NR_IRQS_LEGACY + 92)
+#define MX51_INT_FIRI (NR_IRQS_LEGACY + 93)
+#define MX51_INT_PWM2 (NR_IRQS_LEGACY + 94)
+#define MX51_INT_SLIM_EXP (NR_IRQS_LEGACY + 95)
+#define MX51_INT_SSI3 (NR_IRQS_LEGACY + 96)
+#define MX51_INT_EMI_BOOT (NR_IRQS_LEGACY + 97)
+#define MX51_INT_CTI1_TG3 (NR_IRQS_LEGACY + 98)
+#define MX51_INT_SMC_RX (NR_IRQS_LEGACY + 99)
+#define MX51_INT_VPU_IDLE (NR_IRQS_LEGACY + 100)
+#define MX51_INT_EMI_NFC (NR_IRQS_LEGACY + 101)
+#define MX51_INT_GPU_IDLE (NR_IRQS_LEGACY + 102)
#if !defined(__ASSEMBLY__) && !defined(__MXC_BOOT_UNCOMPRESS)
extern int mx51_revision(void);
diff --git a/arch/arm/plat-mxc/include/mach/mx53.h b/arch/arm/plat-mxc/include/mach/mx53.h
index a37e8c353994..f829d1c22501 100644
--- a/arch/arm/plat-mxc/include/mach/mx53.h
+++ b/arch/arm/plat-mxc/include/mach/mx53.h
@@ -229,113 +229,114 @@
/*
* Interrupt numbers
*/
-#define MX53_INT_RESV0 0
-#define MX53_INT_ESDHC1 1
-#define MX53_INT_ESDHC2 2
-#define MX53_INT_ESDHC3 3
-#define MX53_INT_ESDHC4 4
-#define MX53_INT_DAP 5
-#define MX53_INT_SDMA 6
-#define MX53_INT_IOMUX 7
-#define MX53_INT_NFC 8
-#define MX53_INT_VPU 9
-#define MX53_INT_IPU_ERR 10
-#define MX53_INT_IPU_SYN 11
-#define MX53_INT_GPU 12
-#define MX53_INT_UART4 13
-#define MX53_INT_USB_H1 14
-#define MX53_INT_EMI 15
-#define MX53_INT_USB_H2 16
-#define MX53_INT_USB_H3 17
-#define MX53_INT_USB_OTG 18
-#define MX53_INT_SAHARA_H0 19
-#define MX53_INT_SAHARA_H1 20
-#define MX53_INT_SCC_SMN 21
-#define MX53_INT_SCC_STZ 22
-#define MX53_INT_SCC_SCM 23
-#define MX53_INT_SRTC_NTZ 24
-#define MX53_INT_SRTC_TZ 25
-#define MX53_INT_RTIC 26
-#define MX53_INT_CSU 27
-#define MX53_INT_SATA 28
-#define MX53_INT_SSI1 29
-#define MX53_INT_SSI2 30
-#define MX53_INT_UART1 31
-#define MX53_INT_UART2 32
-#define MX53_INT_UART3 33
-#define MX53_INT_RTC 34
-#define MX53_INT_PTP 35
-#define MX53_INT_ECSPI1 36
-#define MX53_INT_ECSPI2 37
-#define MX53_INT_CSPI 38
-#define MX53_INT_GPT 39
-#define MX53_INT_EPIT1 40
-#define MX53_INT_EPIT2 41
-#define MX53_INT_GPIO1_INT7 42
-#define MX53_INT_GPIO1_INT6 43
-#define MX53_INT_GPIO1_INT5 44
-#define MX53_INT_GPIO1_INT4 45
-#define MX53_INT_GPIO1_INT3 46
-#define MX53_INT_GPIO1_INT2 47
-#define MX53_INT_GPIO1_INT1 48
-#define MX53_INT_GPIO1_INT0 49
-#define MX53_INT_GPIO1_LOW 50
-#define MX53_INT_GPIO1_HIGH 51
-#define MX53_INT_GPIO2_LOW 52
-#define MX53_INT_GPIO2_HIGH 53
-#define MX53_INT_GPIO3_LOW 54
-#define MX53_INT_GPIO3_HIGH 55
-#define MX53_INT_GPIO4_LOW 56
-#define MX53_INT_GPIO4_HIGH 57
-#define MX53_INT_WDOG1 58
-#define MX53_INT_WDOG2 59
-#define MX53_INT_KPP 60
-#define MX53_INT_PWM1 61
-#define MX53_INT_I2C1 62
-#define MX53_INT_I2C2 63
-#define MX53_INT_I2C3 64
-#define MX53_INT_MLB 65
-#define MX53_INT_ASRC 66
-#define MX53_INT_SPDIF 67
-#define MX53_INT_SIM_DAT 68
-#define MX53_INT_IIM 69
-#define MX53_INT_ATA 70
-#define MX53_INT_CCM1 71
-#define MX53_INT_CCM2 72
-#define MX53_INT_GPC1 73
-#define MX53_INT_GPC2 74
-#define MX53_INT_SRC 75
-#define MX53_INT_NM 76
-#define MX53_INT_PMU 77
-#define MX53_INT_CTI_IRQ 78
-#define MX53_INT_CTI1_TG0 79
-#define MX53_INT_CTI1_TG1 80
-#define MX53_INT_ESAI 81
-#define MX53_INT_CAN1 82
-#define MX53_INT_CAN2 83
-#define MX53_INT_GPU2_IRQ 84
-#define MX53_INT_GPU2_BUSY 85
-#define MX53_INT_UART5 86
-#define MX53_INT_FEC 87
-#define MX53_INT_OWIRE 88
-#define MX53_INT_CTI1_TG2 89
-#define MX53_INT_SJC 90
-#define MX53_INT_TVE 92
-#define MX53_INT_FIRI 93
-#define MX53_INT_PWM2 94
-#define MX53_INT_SLIM_EXP 95
-#define MX53_INT_SSI3 96
-#define MX53_INT_EMI_BOOT 97
-#define MX53_INT_CTI1_TG3 98
-#define MX53_INT_SMC_RX 99
-#define MX53_INT_VPU_IDLE 100
-#define MX53_INT_EMI_NFC 101
-#define MX53_INT_GPU_IDLE 102
-#define MX53_INT_GPIO5_LOW 103
-#define MX53_INT_GPIO5_HIGH 104
-#define MX53_INT_GPIO6_LOW 105
-#define MX53_INT_GPIO6_HIGH 106
-#define MX53_INT_GPIO7_LOW 107
-#define MX53_INT_GPIO7_HIGH 108
+#include <asm/irq.h>
+#define MX53_INT_RESV0 (NR_IRQS_LEGACY + 0)
+#define MX53_INT_ESDHC1 (NR_IRQS_LEGACY + 1)
+#define MX53_INT_ESDHC2 (NR_IRQS_LEGACY + 2)
+#define MX53_INT_ESDHC3 (NR_IRQS_LEGACY + 3)
+#define MX53_INT_ESDHC4 (NR_IRQS_LEGACY + 4)
+#define MX53_INT_DAP (NR_IRQS_LEGACY + 5)
+#define MX53_INT_SDMA (NR_IRQS_LEGACY + 6)
+#define MX53_INT_IOMUX (NR_IRQS_LEGACY + 7)
+#define MX53_INT_NFC (NR_IRQS_LEGACY + 8)
+#define MX53_INT_VPU (NR_IRQS_LEGACY + 9)
+#define MX53_INT_IPU_ERR (NR_IRQS_LEGACY + 10)
+#define MX53_INT_IPU_SYN (NR_IRQS_LEGACY + 11)
+#define MX53_INT_GPU (NR_IRQS_LEGACY + 12)
+#define MX53_INT_UART4 (NR_IRQS_LEGACY + 13)
+#define MX53_INT_USB_H1 (NR_IRQS_LEGACY + 14)
+#define MX53_INT_EMI (NR_IRQS_LEGACY + 15)
+#define MX53_INT_USB_H2 (NR_IRQS_LEGACY + 16)
+#define MX53_INT_USB_H3 (NR_IRQS_LEGACY + 17)
+#define MX53_INT_USB_OTG (NR_IRQS_LEGACY + 18)
+#define MX53_INT_SAHARA_H0 (NR_IRQS_LEGACY + 19)
+#define MX53_INT_SAHARA_H1 (NR_IRQS_LEGACY + 20)
+#define MX53_INT_SCC_SMN (NR_IRQS_LEGACY + 21)
+#define MX53_INT_SCC_STZ (NR_IRQS_LEGACY + 22)
+#define MX53_INT_SCC_SCM (NR_IRQS_LEGACY + 23)
+#define MX53_INT_SRTC_NTZ (NR_IRQS_LEGACY + 24)
+#define MX53_INT_SRTC_TZ (NR_IRQS_LEGACY + 25)
+#define MX53_INT_RTIC (NR_IRQS_LEGACY + 26)
+#define MX53_INT_CSU (NR_IRQS_LEGACY + 27)
+#define MX53_INT_SATA (NR_IRQS_LEGACY + 28)
+#define MX53_INT_SSI1 (NR_IRQS_LEGACY + 29)
+#define MX53_INT_SSI2 (NR_IRQS_LEGACY + 30)
+#define MX53_INT_UART1 (NR_IRQS_LEGACY + 31)
+#define MX53_INT_UART2 (NR_IRQS_LEGACY + 32)
+#define MX53_INT_UART3 (NR_IRQS_LEGACY + 33)
+#define MX53_INT_RTC (NR_IRQS_LEGACY + 34)
+#define MX53_INT_PTP (NR_IRQS_LEGACY + 35)
+#define MX53_INT_ECSPI1 (NR_IRQS_LEGACY + 36)
+#define MX53_INT_ECSPI2 (NR_IRQS_LEGACY + 37)
+#define MX53_INT_CSPI (NR_IRQS_LEGACY + 38)
+#define MX53_INT_GPT (NR_IRQS_LEGACY + 39)
+#define MX53_INT_EPIT1 (NR_IRQS_LEGACY + 40)
+#define MX53_INT_EPIT2 (NR_IRQS_LEGACY + 41)
+#define MX53_INT_GPIO1_INT7 (NR_IRQS_LEGACY + 42)
+#define MX53_INT_GPIO1_INT6 (NR_IRQS_LEGACY + 43)
+#define MX53_INT_GPIO1_INT5 (NR_IRQS_LEGACY + 44)
+#define MX53_INT_GPIO1_INT4 (NR_IRQS_LEGACY + 45)
+#define MX53_INT_GPIO1_INT3 (NR_IRQS_LEGACY + 46)
+#define MX53_INT_GPIO1_INT2 (NR_IRQS_LEGACY + 47)
+#define MX53_INT_GPIO1_INT1 (NR_IRQS_LEGACY + 48)
+#define MX53_INT_GPIO1_INT0 (NR_IRQS_LEGACY + 49)
+#define MX53_INT_GPIO1_LOW (NR_IRQS_LEGACY + 50)
+#define MX53_INT_GPIO1_HIGH (NR_IRQS_LEGACY + 51)
+#define MX53_INT_GPIO2_LOW (NR_IRQS_LEGACY + 52)
+#define MX53_INT_GPIO2_HIGH (NR_IRQS_LEGACY + 53)
+#define MX53_INT_GPIO3_LOW (NR_IRQS_LEGACY + 54)
+#define MX53_INT_GPIO3_HIGH (NR_IRQS_LEGACY + 55)
+#define MX53_INT_GPIO4_LOW (NR_IRQS_LEGACY + 56)
+#define MX53_INT_GPIO4_HIGH (NR_IRQS_LEGACY + 57)
+#define MX53_INT_WDOG1 (NR_IRQS_LEGACY + 58)
+#define MX53_INT_WDOG2 (NR_IRQS_LEGACY + 59)
+#define MX53_INT_KPP (NR_IRQS_LEGACY + 60)
+#define MX53_INT_PWM1 (NR_IRQS_LEGACY + 61)
+#define MX53_INT_I2C1 (NR_IRQS_LEGACY + 62)
+#define MX53_INT_I2C2 (NR_IRQS_LEGACY + 63)
+#define MX53_INT_I2C3 (NR_IRQS_LEGACY + 64)
+#define MX53_INT_MLB (NR_IRQS_LEGACY + 65)
+#define MX53_INT_ASRC (NR_IRQS_LEGACY + 66)
+#define MX53_INT_SPDIF (NR_IRQS_LEGACY + 67)
+#define MX53_INT_SIM_DAT (NR_IRQS_LEGACY + 68)
+#define MX53_INT_IIM (NR_IRQS_LEGACY + 69)
+#define MX53_INT_ATA (NR_IRQS_LEGACY + 70)
+#define MX53_INT_CCM1 (NR_IRQS_LEGACY + 71)
+#define MX53_INT_CCM2 (NR_IRQS_LEGACY + 72)
+#define MX53_INT_GPC1 (NR_IRQS_LEGACY + 73)
+#define MX53_INT_GPC2 (NR_IRQS_LEGACY + 74)
+#define MX53_INT_SRC (NR_IRQS_LEGACY + 75)
+#define MX53_INT_NM (NR_IRQS_LEGACY + 76)
+#define MX53_INT_PMU (NR_IRQS_LEGACY + 77)
+#define MX53_INT_CTI_IRQ (NR_IRQS_LEGACY + 78)
+#define MX53_INT_CTI1_TG0 (NR_IRQS_LEGACY + 79)
+#define MX53_INT_CTI1_TG1 (NR_IRQS_LEGACY + 80)
+#define MX53_INT_ESAI (NR_IRQS_LEGACY + 81)
+#define MX53_INT_CAN1 (NR_IRQS_LEGACY + 82)
+#define MX53_INT_CAN2 (NR_IRQS_LEGACY + 83)
+#define MX53_INT_GPU2_IRQ (NR_IRQS_LEGACY + 84)
+#define MX53_INT_GPU2_BUSY (NR_IRQS_LEGACY + 85)
+#define MX53_INT_UART5 (NR_IRQS_LEGACY + 86)
+#define MX53_INT_FEC (NR_IRQS_LEGACY + 87)
+#define MX53_INT_OWIRE (NR_IRQS_LEGACY + 88)
+#define MX53_INT_CTI1_TG2 (NR_IRQS_LEGACY + 89)
+#define MX53_INT_SJC (NR_IRQS_LEGACY + 90)
+#define MX53_INT_TVE (NR_IRQS_LEGACY + 92)
+#define MX53_INT_FIRI (NR_IRQS_LEGACY + 93)
+#define MX53_INT_PWM2 (NR_IRQS_LEGACY + 94)
+#define MX53_INT_SLIM_EXP (NR_IRQS_LEGACY + 95)
+#define MX53_INT_SSI3 (NR_IRQS_LEGACY + 96)
+#define MX53_INT_EMI_BOOT (NR_IRQS_LEGACY + 97)
+#define MX53_INT_CTI1_TG3 (NR_IRQS_LEGACY + 98)
+#define MX53_INT_SMC_RX (NR_IRQS_LEGACY + 99)
+#define MX53_INT_VPU_IDLE (NR_IRQS_LEGACY + 100)
+#define MX53_INT_EMI_NFC (NR_IRQS_LEGACY + 101)
+#define MX53_INT_GPU_IDLE (NR_IRQS_LEGACY + 102)
+#define MX53_INT_GPIO5_LOW (NR_IRQS_LEGACY + 103)
+#define MX53_INT_GPIO5_HIGH (NR_IRQS_LEGACY + 104)
+#define MX53_INT_GPIO6_LOW (NR_IRQS_LEGACY + 105)
+#define MX53_INT_GPIO6_HIGH (NR_IRQS_LEGACY + 106)
+#define MX53_INT_GPIO7_LOW (NR_IRQS_LEGACY + 107)
+#define MX53_INT_GPIO7_HIGH (NR_IRQS_LEGACY + 108)
#endif /* ifndef __MACH_MX53_H__ */
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
index 9ffd1bbe615f..7eb9d1329671 100644
--- a/arch/arm/plat-mxc/include/mach/mxc_ehci.h
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -20,13 +20,15 @@
#define MXC_EHCI_INTERFACE_MASK (0xf)
#define MXC_EHCI_POWER_PINS_ENABLED (1 << 5)
-#define MXC_EHCI_TTL_ENABLED (1 << 6)
-
-#define MXC_EHCI_INTERNAL_PHY (1 << 7)
-#define MXC_EHCI_IPPUE_DOWN (1 << 8)
-#define MXC_EHCI_IPPUE_UP (1 << 9)
-#define MXC_EHCI_WAKEUP_ENABLED (1 << 10)
-#define MXC_EHCI_ITC_NO_THRESHOLD (1 << 11)
+#define MXC_EHCI_PWR_PIN_ACTIVE_HIGH (1 << 6)
+#define MXC_EHCI_OC_PIN_ACTIVE_LOW (1 << 7)
+#define MXC_EHCI_TTL_ENABLED (1 << 8)
+
+#define MXC_EHCI_INTERNAL_PHY (1 << 9)
+#define MXC_EHCI_IPPUE_DOWN (1 << 10)
+#define MXC_EHCI_IPPUE_UP (1 << 11)
+#define MXC_EHCI_WAKEUP_ENABLED (1 << 12)
+#define MXC_EHCI_ITC_NO_THRESHOLD (1 << 13)
#define MXC_USBCTRL_OFFSET 0
#define MXC_USB_PHY_CTR_FUNC_OFFSET 0x8
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 00e8e659e667..a17abcf98325 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -160,7 +160,8 @@ static const char *clock_event_mode_label[] = {
[CLOCK_EVT_MODE_PERIODIC] = "CLOCK_EVT_MODE_PERIODIC",
[CLOCK_EVT_MODE_ONESHOT] = "CLOCK_EVT_MODE_ONESHOT",
[CLOCK_EVT_MODE_SHUTDOWN] = "CLOCK_EVT_MODE_SHUTDOWN",
- [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED"
+ [CLOCK_EVT_MODE_UNUSED] = "CLOCK_EVT_MODE_UNUSED",
+ [CLOCK_EVT_MODE_RESUME] = "CLOCK_EVT_MODE_RESUME",
};
#endif /* DEBUG */
diff --git a/arch/arm/plat-mxc/tzic.c b/arch/arm/plat-mxc/tzic.c
index 98308ec1f321..c2193178210b 100644
--- a/arch/arm/plat-mxc/tzic.c
+++ b/arch/arm/plat-mxc/tzic.c
@@ -15,6 +15,8 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
@@ -49,6 +51,7 @@
#define TZIC_ID0 0x0FD0 /* Indentification Register 0 */
void __iomem *tzic_base; /* Used as irq controller base in entry-macro.S */
+static struct irq_domain *domain;
#define TZIC_NUM_IRQS 128
@@ -77,15 +80,14 @@ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
static void tzic_irq_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- int idx = gc->irq_base >> 5;
+ int idx = d->hwirq >> 5;
__raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
}
static void tzic_irq_resume(struct irq_data *d)
{
- struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- int idx = gc->irq_base >> 5;
+ int idx = d->hwirq >> 5;
__raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)),
tzic_base + TZIC_WAKEUP0(idx));
@@ -102,11 +104,10 @@ static struct mxc_extra_irq tzic_extra_irq = {
#endif
};
-static __init void tzic_init_gc(unsigned int irq_start)
+static __init void tzic_init_gc(int idx, unsigned int irq_start)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- int idx = irq_start >> 5;
gc = irq_alloc_generic_chip("tzic", 1, irq_start, tzic_base,
handle_level_irq);
@@ -140,7 +141,8 @@ asmlinkage void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs)
while (stat) {
handled = 1;
irqofs = fls(stat) - 1;
- handle_IRQ(irqofs + i * 32, regs);
+ handle_IRQ(irq_find_mapping(domain,
+ irqofs + i * 32), regs);
stat &= ~(1 << irqofs);
}
}
@@ -154,6 +156,8 @@ asmlinkage void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs)
*/
void __init tzic_init_irq(void __iomem *irqbase)
{
+ struct device_node *np;
+ int irq_base;
int i;
tzic_base = irqbase;
@@ -175,12 +179,20 @@ void __init tzic_init_irq(void __iomem *irqbase)
/* all IRQ no FIQ Warning :: No selection */
- for (i = 0; i < TZIC_NUM_IRQS; i += 32)
- tzic_init_gc(i);
+ irq_base = irq_alloc_descs(-1, 0, TZIC_NUM_IRQS, numa_node_id());
+ WARN_ON(irq_base < 0);
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,tzic");
+ domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ WARN_ON(!domain);
+
+ for (i = 0; i < 4; i++, irq_base += 32)
+ tzic_init_gc(i, irq_base);
#ifdef CONFIG_FIQ
/* Initialize FIQ */
- init_FIQ();
+ init_FIQ(FIQ_START);
#endif
pr_info("TrustZone Interrupt Controller (TZIC) initialized\n");
@@ -190,6 +202,10 @@ void __init tzic_init_irq(void __iomem *irqbase)
* tzic_enable_wake() - enable wakeup interrupt
*
* @return 0 if successful; non-zero otherwise
+ *
+ * This function provides an interrupt synchronization point that is required
+ * by tzic enabled platforms before entering imx specific low power modes (ie,
+ * those low power modes beyond the WAIT_CLOCKED basic ARM WFI only mode).
*/
int tzic_enable_wake(void)
{
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index ad95c7a5d009..dd36eba9506c 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -29,7 +29,7 @@ config ARCH_OMAP2PLUS
select USE_OF
select PROC_DEVICETREE if PROC_FS
help
- "Systems based on OMAP2, OMAP3 or OMAP4"
+ "Systems based on OMAP2, OMAP3, OMAP4 or OMAP5"
endchoice
@@ -45,31 +45,30 @@ config OMAP_DEBUG_LEDS
depends on OMAP_DEBUG_DEVICES
default y if LEDS_CLASS
-config OMAP_SMARTREFLEX
- bool "SmartReflex support"
- depends on (ARCH_OMAP3 || ARCH_OMAP4) && PM
+config POWER_AVS_OMAP
+ bool "AVS(Adaptive Voltage Scaling) support for OMAP IP versions 1&2"
+ depends on POWER_AVS && (ARCH_OMAP3 || ARCH_OMAP4) && PM
help
- Say Y if you want to enable SmartReflex.
-
- SmartReflex can perform continuous dynamic voltage
- scaling around the nominal operating point voltage
- according to silicon characteristics and operating
- conditions. Enabling SmartReflex reduces power
- consumption.
+ Say Y to enable AVS(Adaptive Voltage Scaling)
+ support on OMAP containing the version 1 or
+ version 2 of the SmartReflex IP.
+ V1 is the 65nm version used in OMAP3430.
+ V2 is the update for the 45nm version of the IP used in OMAP3630
+ and OMAP4430
Please note, that by default SmartReflex is only
- initialized. To enable the automatic voltage
- compensation for vdd mpu and vdd core from user space,
+ initialized and not enabled. To enable the automatic voltage
+ compensation for vdd mpu and vdd core from user space,
user must write 1 to
- /debug/voltage/vdd_<X>/smartreflex/autocomp,
- where X is mpu or core for OMAP3.
+ /debug/smartreflex/sr_<X>/autocomp,
+ where X is mpu_iva or core for OMAP3.
Optionally autocompensation can be enabled in the kernel
by default during system init via the enable_on_init flag
which an be passed as platform data to the smartreflex driver.
-config OMAP_SMARTREFLEX_CLASS3
+config POWER_AVS_OMAP_CLASS3
bool "Class 3 mode of Smartreflex Implementation"
- depends on OMAP_SMARTREFLEX && TWL4030_CORE
+ depends on POWER_AVS_OMAP && TWL4030_CORE
help
Say Y to enable Class 3 implementation of Smartreflex
@@ -150,7 +149,7 @@ config OMAP_32K_TIMER
This timer saves power compared to the OMAP_MPU_TIMER, and has
support for no tick during idle. The 32KHz timer provides less
intra-tick resolution than OMAP_MPU_TIMER. The 32KHz timer is
- currently only available for OMAP16XX, 24XX, 34XX and OMAP4.
+ currently only available for OMAP16XX, 24XX, 34XX and OMAP4/5.
config OMAP3_L2_AUX_SECURE_SAVE_RESTORE
bool "OMAP3 HS/EMU save and restore for L2 AUX control register"
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index ed8605f01155..961bf859bc0c 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -4,15 +4,13 @@
# Common support
obj-y := common.o sram.o clock.o devices.o dma.o mux.o \
- usb.o fb.o counter_32k.o
+ fb.o counter_32k.o
obj-m :=
obj-n :=
obj- :=
# omap_device support (OMAP2+ only at the moment)
-obj-$(CONFIG_ARCH_OMAP2) += omap_device.o
-obj-$(CONFIG_ARCH_OMAP3) += omap_device.o
-obj-$(CONFIG_ARCH_OMAP4) += omap_device.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_device.o
obj-$(CONFIG_OMAP_DM_TIMER) += dmtimer.o
obj-$(CONFIG_OMAP_DEBUG_DEVICES) += debug-devices.o
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 0a9b9a970113..89a3723b3538 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -77,3 +77,12 @@ void __init omap_init_consistent_dma_size(void)
init_consistent_dma_size(CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE << 20);
#endif
}
+
+/*
+ * Stub function for OMAP2 so that common files
+ * continue to build when custom builds are used
+ */
+int __weak omap_secure_ram_reserve_memblock(void)
+{
+ return 0;
+}
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 2132c4f389e1..dbf1e03029a5 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -29,7 +29,10 @@
#include <plat/clock.h>
/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
-#define OMAP2_32KSYNCNT_CR_OFF 0x10
+#define OMAP2_32KSYNCNT_REV_OFF 0x0
+#define OMAP2_32KSYNCNT_REV_SCHEME (0x3 << 30)
+#define OMAP2_32KSYNCNT_CR_OFF_LOW 0x10
+#define OMAP2_32KSYNCNT_CR_OFF_HIGH 0x30
/*
* 32KHz clocksource ... always available, on pretty most chips except
@@ -84,9 +87,16 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
int ret;
/*
- * 32k sync Counter register offset is at 0x10
+ * 32k sync Counter IP register offsets vary between the
+ * highlander version and the legacy ones.
+ * The 'SCHEME' bits(30-31) of the revision register is used
+ * to identify the version.
*/
- sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
+ if (__raw_readl(vbase + OMAP2_32KSYNCNT_REV_OFF) &
+ OMAP2_32KSYNCNT_REV_SCHEME)
+ sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_HIGH;
+ else
+ sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF_LOW;
/*
* 120000 rough estimate from the calculations in
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index cb16ade437cb..7fe626761e53 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -573,22 +573,25 @@ EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
static inline void omap_enable_channel_irq(int lch)
{
- u32 status;
-
/* Clear CSR */
if (cpu_class_is_omap1())
- status = p->dma_read(CSR, lch);
- else if (cpu_class_is_omap2())
+ p->dma_read(CSR, lch);
+ else
p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
/* Enable some nice interrupts. */
p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
}
-static void omap_disable_channel_irq(int lch)
+static inline void omap_disable_channel_irq(int lch)
{
- if (cpu_class_is_omap2())
- p->dma_write(0, CICR, lch);
+ /* disable channel interrupts */
+ p->dma_write(0, CICR, lch);
+ /* Clear CSR */
+ if (cpu_class_is_omap1())
+ p->dma_read(CSR, lch);
+ else
+ p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
}
void omap_enable_dma_irq(int lch, u16 bits)
@@ -632,14 +635,14 @@ static inline void disable_lnk(int lch)
l = p->dma_read(CLNK_CTRL, lch);
/* Disable interrupts */
+ omap_disable_channel_irq(lch);
+
if (cpu_class_is_omap1()) {
- p->dma_write(0, CICR, lch);
/* Set the STOP_LNK bit */
l |= 1 << 14;
}
if (cpu_class_is_omap2()) {
- omap_disable_channel_irq(lch);
/* Clear the ENABLE_LNK bit */
l &= ~(1 << 15);
}
@@ -657,6 +660,9 @@ static inline void omap2_enable_irq_lch(int lch)
return;
spin_lock_irqsave(&dma_chan_lock, flags);
+ /* clear IRQ STATUS */
+ p->dma_write(1 << lch, IRQSTATUS_L0, lch);
+ /* Enable interrupt */
val = p->dma_read(IRQENABLE_L0, lch);
val |= 1 << lch;
p->dma_write(val, IRQENABLE_L0, lch);
@@ -672,9 +678,12 @@ static inline void omap2_disable_irq_lch(int lch)
return;
spin_lock_irqsave(&dma_chan_lock, flags);
+ /* Disable interrupt */
val = p->dma_read(IRQENABLE_L0, lch);
val &= ~(1 << lch);
p->dma_write(val, IRQENABLE_L0, lch);
+ /* clear IRQ STATUS */
+ p->dma_write(1 << lch, IRQSTATUS_L0, lch);
spin_unlock_irqrestore(&dma_chan_lock, flags);
}
@@ -745,11 +754,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
}
if (cpu_class_is_omap2()) {
- omap2_enable_irq_lch(free_ch);
omap_enable_channel_irq(free_ch);
- /* Clear the CSR register and IRQ status register */
- p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, free_ch);
- p->dma_write(1 << free_ch, IRQSTATUS_L0, 0);
+ omap2_enable_irq_lch(free_ch);
}
*dma_ch_out = free_ch;
@@ -768,27 +774,19 @@ void omap_free_dma(int lch)
return;
}
- if (cpu_class_is_omap1()) {
- /* Disable all DMA interrupts for the channel. */
- p->dma_write(0, CICR, lch);
- /* Make sure the DMA transfer is stopped. */
- p->dma_write(0, CCR, lch);
- }
-
- if (cpu_class_is_omap2()) {
+ /* Disable interrupt for logical channel */
+ if (cpu_class_is_omap2())
omap2_disable_irq_lch(lch);
- /* Clear the CSR register and IRQ status register */
- p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
- p->dma_write(1 << lch, IRQSTATUS_L0, lch);
+ /* Disable all DMA interrupts for the channel. */
+ omap_disable_channel_irq(lch);
- /* Disable all DMA interrupts for the channel. */
- p->dma_write(0, CICR, lch);
+ /* Make sure the DMA transfer is stopped. */
+ p->dma_write(0, CCR, lch);
- /* Make sure the DMA transfer is stopped. */
- p->dma_write(0, CCR, lch);
+ /* Clear registers */
+ if (cpu_class_is_omap2())
omap_clear_dma(lch);
- }
spin_lock_irqsave(&dma_chan_lock, flags);
dma_chan[lch].dev_id = -1;
@@ -943,8 +941,7 @@ void omap_stop_dma(int lch)
u32 l;
/* Disable all interrupts on the channel */
- if (cpu_class_is_omap1())
- p->dma_write(0, CICR, lch);
+ omap_disable_channel_irq(lch);
l = p->dma_read(CCR, lch);
if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index 3b0cfeb33d05..626ad8cad7a9 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -37,14 +37,16 @@
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/slab.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <plat/dmtimer.h>
+#include <plat/omap-pm.h>
#include <mach/hardware.h>
+static u32 omap_reserved_systimers;
static LIST_HEAD(omap_timer_list);
static DEFINE_SPINLOCK(dm_timer_lock);
@@ -133,17 +135,22 @@ static void omap_dm_timer_reset(struct omap_dm_timer *timer)
int omap_dm_timer_prepare(struct omap_dm_timer *timer)
{
- struct dmtimer_platform_data *pdata = timer->pdev->dev.platform_data;
int ret;
- timer->fclk = clk_get(&timer->pdev->dev, "fck");
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
- timer->fclk = NULL;
- dev_err(&timer->pdev->dev, ": No fclk handle.\n");
- return -EINVAL;
+ /*
+ * FIXME: OMAP1 devices do not use the clock framework for dmtimers so
+ * do not call clk_get() for these devices.
+ */
+ if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
+ timer->fclk = clk_get(&timer->pdev->dev, "fck");
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(timer->fclk))) {
+ timer->fclk = NULL;
+ dev_err(&timer->pdev->dev, ": No fclk handle.\n");
+ return -EINVAL;
+ }
}
- if (pdata->needs_manual_reset)
+ if (timer->capability & OMAP_TIMER_NEEDS_RESET)
omap_dm_timer_reset(timer);
ret = omap_dm_timer_set_source(timer, OMAP_TIMER_SRC_32_KHZ);
@@ -152,6 +159,21 @@ int omap_dm_timer_prepare(struct omap_dm_timer *timer)
return ret;
}
+static inline u32 omap_dm_timer_reserved_systimer(int id)
+{
+ return (omap_reserved_systimers & (1 << (id - 1))) ? 1 : 0;
+}
+
+int omap_dm_timer_reserve_systimer(int id)
+{
+ if (omap_dm_timer_reserved_systimer(id))
+ return -ENODEV;
+
+ omap_reserved_systimers |= (1 << (id - 1));
+
+ return 0;
+}
+
struct omap_dm_timer *omap_dm_timer_request(void)
{
struct omap_dm_timer *timer = NULL, *t;
@@ -325,10 +347,9 @@ int omap_dm_timer_start(struct omap_dm_timer *timer)
omap_dm_timer_enable(timer);
- if (timer->loses_context) {
- u32 ctx_loss_cnt_after =
- timer->get_context_loss_count(&timer->pdev->dev);
- if (ctx_loss_cnt_after != timer->ctx_loss_count)
+ if (!(timer->capability & OMAP_TIMER_ALWON)) {
+ if (omap_pm_get_dev_context_loss_count(&timer->pdev->dev) !=
+ timer->ctx_loss_count)
omap_timer_restore_context(timer);
}
@@ -347,20 +368,18 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_start);
int omap_dm_timer_stop(struct omap_dm_timer *timer)
{
unsigned long rate = 0;
- struct dmtimer_platform_data *pdata;
if (unlikely(!timer))
return -EINVAL;
- pdata = timer->pdev->dev.platform_data;
- if (!pdata->needs_manual_reset)
+ if (!(timer->capability & OMAP_TIMER_NEEDS_RESET))
rate = clk_get_rate(timer->fclk);
__omap_dm_timer_stop(timer, timer->posted, rate);
- if (timer->loses_context && timer->get_context_loss_count)
+ if (!(timer->capability & OMAP_TIMER_ALWON))
timer->ctx_loss_count =
- timer->get_context_loss_count(&timer->pdev->dev);
+ omap_pm_get_dev_context_loss_count(&timer->pdev->dev);
/*
* Since the register values are computed and written within
@@ -378,6 +397,8 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_stop);
int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
{
int ret;
+ char *parent_name = NULL;
+ struct clk *fclk, *parent;
struct dmtimer_platform_data *pdata;
if (unlikely(!timer))
@@ -388,7 +409,49 @@ int omap_dm_timer_set_source(struct omap_dm_timer *timer, int source)
if (source < 0 || source >= 3)
return -EINVAL;
- ret = pdata->set_timer_src(timer->pdev, source);
+ /*
+ * FIXME: Used for OMAP1 devices only because they do not currently
+ * use the clock framework to set the parent clock. To be removed
+ * once OMAP1 migrated to using clock framework for dmtimers
+ */
+ if (pdata->set_timer_src)
+ return pdata->set_timer_src(timer->pdev, source);
+
+ fclk = clk_get(&timer->pdev->dev, "fck");
+ if (IS_ERR_OR_NULL(fclk)) {
+ pr_err("%s: fck not found\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (source) {
+ case OMAP_TIMER_SRC_SYS_CLK:
+ parent_name = "timer_sys_ck";
+ break;
+
+ case OMAP_TIMER_SRC_32_KHZ:
+ parent_name = "timer_32k_ck";
+ break;
+
+ case OMAP_TIMER_SRC_EXT_CLK:
+ parent_name = "timer_ext_ck";
+ break;
+ }
+
+ parent = clk_get(&timer->pdev->dev, parent_name);
+ if (IS_ERR_OR_NULL(parent)) {
+ pr_err("%s: %s not found\n", __func__, parent_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = clk_set_parent(fclk, parent);
+ if (IS_ERR_VALUE(ret))
+ pr_err("%s: failed to set %s as parent\n", __func__,
+ parent_name);
+
+ clk_put(parent);
+out:
+ clk_put(fclk);
return ret;
}
@@ -431,10 +494,9 @@ int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload,
omap_dm_timer_enable(timer);
- if (timer->loses_context) {
- u32 ctx_loss_cnt_after =
- timer->get_context_loss_count(&timer->pdev->dev);
- if (ctx_loss_cnt_after != timer->ctx_loss_count)
+ if (!(timer->capability & OMAP_TIMER_ALWON)) {
+ if (omap_pm_get_dev_context_loss_count(&timer->pdev->dev) !=
+ timer->ctx_loss_count)
omap_timer_restore_context(timer);
}
@@ -627,68 +689,57 @@ EXPORT_SYMBOL_GPL(omap_dm_timers_active);
*/
static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
{
- int ret;
unsigned long flags;
struct omap_dm_timer *timer;
- struct resource *mem, *irq, *ioarea;
+ struct resource *mem, *irq;
+ struct device *dev = &pdev->dev;
struct dmtimer_platform_data *pdata = pdev->dev.platform_data;
if (!pdata) {
- dev_err(&pdev->dev, "%s: no platform data.\n", __func__);
+ dev_err(dev, "%s: no platform data.\n", __func__);
return -ENODEV;
}
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (unlikely(!irq)) {
- dev_err(&pdev->dev, "%s: no IRQ resource.\n", __func__);
+ dev_err(dev, "%s: no IRQ resource.\n", __func__);
return -ENODEV;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!mem)) {
- dev_err(&pdev->dev, "%s: no memory resource.\n", __func__);
+ dev_err(dev, "%s: no memory resource.\n", __func__);
return -ENODEV;
}
- ioarea = request_mem_region(mem->start, resource_size(mem),
- pdev->name);
- if (!ioarea) {
- dev_err(&pdev->dev, "%s: region already claimed.\n", __func__);
- return -EBUSY;
- }
-
- timer = kzalloc(sizeof(struct omap_dm_timer), GFP_KERNEL);
+ timer = devm_kzalloc(dev, sizeof(struct omap_dm_timer), GFP_KERNEL);
if (!timer) {
- dev_err(&pdev->dev, "%s: no memory for omap_dm_timer.\n",
- __func__);
- ret = -ENOMEM;
- goto err_free_ioregion;
+ dev_err(dev, "%s: memory alloc failed!\n", __func__);
+ return -ENOMEM;
}
- timer->io_base = ioremap(mem->start, resource_size(mem));
+ timer->io_base = devm_request_and_ioremap(dev, mem);
if (!timer->io_base) {
- dev_err(&pdev->dev, "%s: ioremap failed.\n", __func__);
- ret = -ENOMEM;
- goto err_free_mem;
+ dev_err(dev, "%s: region already claimed.\n", __func__);
+ return -ENOMEM;
}
timer->id = pdev->id;
timer->irq = irq->start;
- timer->reserved = pdata->reserved;
+ timer->reserved = omap_dm_timer_reserved_systimer(timer->id);
timer->pdev = pdev;
- timer->loses_context = pdata->loses_context;
- timer->get_context_loss_count = pdata->get_context_loss_count;
+ timer->capability = pdata->timer_capability;
/* Skip pm_runtime_enable for OMAP1 */
- if (!pdata->needs_manual_reset) {
- pm_runtime_enable(&pdev->dev);
- pm_runtime_irq_safe(&pdev->dev);
+ if (!(timer->capability & OMAP_TIMER_NEEDS_RESET)) {
+ pm_runtime_enable(dev);
+ pm_runtime_irq_safe(dev);
}
if (!timer->reserved) {
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_sync(dev);
__omap_dm_timer_init_regs(timer);
- pm_runtime_put(&pdev->dev);
+ pm_runtime_put(dev);
}
/* add the timer element to the list */
@@ -696,17 +747,9 @@ static int __devinit omap_dm_timer_probe(struct platform_device *pdev)
list_add_tail(&timer->node, &omap_timer_list);
spin_unlock_irqrestore(&dm_timer_lock, flags);
- dev_dbg(&pdev->dev, "Device Probed.\n");
+ dev_dbg(dev, "Device Probed.\n");
return 0;
-
-err_free_mem:
- kfree(timer);
-
-err_free_ioregion:
- release_mem_region(mem->start, resource_size(mem));
-
- return ret;
}
/**
@@ -727,7 +770,6 @@ static int __devexit omap_dm_timer_remove(struct platform_device *pdev)
list_for_each_entry(timer, &omap_timer_list, node)
if (timer->pdev->id == pdev->id) {
list_del(&timer->node);
- kfree(timer);
ret = 0;
break;
}
diff --git a/arch/arm/plat-omap/include/plat/board.h b/arch/arm/plat-omap/include/plat/board.h
index 4814c5b65306..e62f20a5c0af 100644
--- a/arch/arm/plat-omap/include/plat/board.h
+++ b/arch/arm/plat-omap/include/plat/board.h
@@ -57,44 +57,6 @@ struct omap_camera_sensor_config {
int (*power_off)(void * data);
};
-struct omap_usb_config {
- /* Configure drivers according to the connectors on your board:
- * - "A" connector (rectagular)
- * ... for host/OHCI use, set "register_host".
- * - "B" connector (squarish) or "Mini-B"
- * ... for device/gadget use, set "register_dev".
- * - "Mini-AB" connector (very similar to Mini-B)
- * ... for OTG use as device OR host, initialize "otg"
- */
- unsigned register_host:1;
- unsigned register_dev:1;
- u8 otg; /* port number, 1-based: usb1 == 2 */
-
- u8 hmc_mode;
-
- /* implicitly true if otg: host supports remote wakeup? */
- u8 rwc;
-
- /* signaling pins used to talk to transceiver on usbN:
- * 0 == usbN unused
- * 2 == usb0-only, using internal transceiver
- * 3 == 3 wire bidirectional
- * 4 == 4 wire bidirectional
- * 6 == 6 wire unidirectional (or TLL)
- */
- u8 pins[3];
-
- struct platform_device *udc_device;
- struct platform_device *ohci_device;
- struct platform_device *otg_device;
-
- u32 (*usb0_init)(unsigned nwires, unsigned is_device);
- u32 (*usb1_init)(unsigned nwires);
- u32 (*usb2_init)(unsigned nwires, unsigned alt_pingroup);
-
- int (*ocpi_enable)(void);
-};
-
struct omap_lcd_config {
char panel_name[16];
char ctrl_name[16];
diff --git a/arch/arm/plat-omap/include/plat/clkdev_omap.h b/arch/arm/plat-omap/include/plat/clkdev_omap.h
index d0ed8c443a63..025d85a3ee86 100644
--- a/arch/arm/plat-omap/include/plat/clkdev_omap.h
+++ b/arch/arm/plat-omap/include/plat/clkdev_omap.h
@@ -39,6 +39,7 @@ struct omap_clk {
#define CK_443X (1 << 11)
#define CK_TI816X (1 << 12)
#define CK_446X (1 << 13)
+#define CK_AM33XX (1 << 14) /* AM33xx specific clocks */
#define CK_1710 (1 << 15) /* 1710 extra for rate selection */
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index d0ef57c1d71b..656b9862279e 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -156,7 +156,6 @@ struct dpll_data {
u8 min_divider;
u16 max_divider;
u8 modes;
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
void __iomem *autoidle_reg;
void __iomem *idlest_reg;
u32 autoidle_mask;
@@ -167,7 +166,6 @@ struct dpll_data {
u8 auto_recal_bit;
u8 recal_en_bit;
u8 recal_st_bit;
-# endif
u8 flags;
};
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index de6c0a08f461..68b180edcfff 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -9,7 +9,7 @@
*
* Written by Tony Lindgren <tony.lindgren@nokia.com>
*
- * Added OMAP4 specific defines - Santosh Shilimkar<santosh.shilimkar@ti.com>
+ * Added OMAP4/5 specific defines - Santosh Shilimkar<santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -70,6 +70,7 @@ unsigned int omap_rev(void);
* cpu_is_omap443x(): True for OMAP4430
* cpu_is_omap446x(): True for OMAP4460
* cpu_is_omap447x(): True for OMAP4470
+ * soc_is_omap543x(): True for OMAP5430, OMAP5432
*/
#define GET_OMAP_CLASS (omap_rev() & 0xff)
@@ -122,6 +123,7 @@ IS_OMAP_CLASS(24xx, 0x24)
IS_OMAP_CLASS(34xx, 0x34)
IS_OMAP_CLASS(44xx, 0x44)
IS_AM_CLASS(35xx, 0x35)
+IS_OMAP_CLASS(54xx, 0x54)
IS_AM_CLASS(33xx, 0x33)
IS_TI_CLASS(81xx, 0x81)
@@ -133,6 +135,7 @@ IS_OMAP_SUBCLASS(363x, 0x363)
IS_OMAP_SUBCLASS(443x, 0x443)
IS_OMAP_SUBCLASS(446x, 0x446)
IS_OMAP_SUBCLASS(447x, 0x447)
+IS_OMAP_SUBCLASS(543x, 0x543)
IS_TI_SUBCLASS(816x, 0x816)
IS_TI_SUBCLASS(814x, 0x814)
@@ -150,12 +153,14 @@ IS_AM_SUBCLASS(335x, 0x335)
#define cpu_is_ti816x() 0
#define cpu_is_ti814x() 0
#define soc_is_am35xx() 0
-#define cpu_is_am33xx() 0
-#define cpu_is_am335x() 0
+#define soc_is_am33xx() 0
+#define soc_is_am335x() 0
#define cpu_is_omap44xx() 0
#define cpu_is_omap443x() 0
#define cpu_is_omap446x() 0
#define cpu_is_omap447x() 0
+#define soc_is_omap54xx() 0
+#define soc_is_omap543x() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
@@ -238,9 +243,7 @@ IS_AM_SUBCLASS(335x, 0x335)
/*
* Macros to detect individual cpu types.
* These are only rarely needed.
- * cpu_is_omap330(): True for OMAP330
- * cpu_is_omap730(): True for OMAP730
- * cpu_is_omap850(): True for OMAP850
+ * cpu_is_omap310(): True for OMAP310
* cpu_is_omap1510(): True for OMAP1510
* cpu_is_omap1610(): True for OMAP1610
* cpu_is_omap1611(): True for OMAP1611
@@ -262,8 +265,6 @@ static inline int is_omap ##type (void) \
}
IS_OMAP_TYPE(310, 0x0310)
-IS_OMAP_TYPE(730, 0x0730)
-IS_OMAP_TYPE(850, 0x0850)
IS_OMAP_TYPE(1510, 0x1510)
IS_OMAP_TYPE(1610, 0x1610)
IS_OMAP_TYPE(1611, 0x1611)
@@ -277,8 +278,6 @@ IS_OMAP_TYPE(2430, 0x2430)
IS_OMAP_TYPE(3430, 0x3430)
#define cpu_is_omap310() 0
-#define cpu_is_omap730() 0
-#define cpu_is_omap850() 0
#define cpu_is_omap1510() 0
#define cpu_is_omap1610() 0
#define cpu_is_omap5912() 0
@@ -291,22 +290,13 @@ IS_OMAP_TYPE(3430, 0x3430)
#define cpu_is_omap2430() 0
#define cpu_is_omap3430() 0
#define cpu_is_omap3630() 0
+#define soc_is_omap5430() 0
/*
* Whether we have MULTI_OMAP1 or not, we still need to distinguish
- * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710.
+ * between 310 vs. 1510 and 1611B/5912 vs. 1710.
*/
-#if defined(CONFIG_ARCH_OMAP730)
-# undef cpu_is_omap730
-# define cpu_is_omap730() is_omap730()
-#endif
-
-#if defined(CONFIG_ARCH_OMAP850)
-# undef cpu_is_omap850
-# define cpu_is_omap850() is_omap850()
-#endif
-
#if defined(CONFIG_ARCH_OMAP15XX)
# undef cpu_is_omap310
# undef cpu_is_omap1510
@@ -344,8 +334,6 @@ IS_OMAP_TYPE(3430, 0x3430)
# undef cpu_is_ti816x
# undef cpu_is_ti814x
# undef soc_is_am35xx
-# undef cpu_is_am33xx
-# undef cpu_is_am335x
# define cpu_is_omap3430() is_omap3430()
# undef cpu_is_omap3630
# define cpu_is_omap3630() is_omap363x()
@@ -353,8 +341,13 @@ IS_OMAP_TYPE(3430, 0x3430)
# define cpu_is_ti816x() is_ti816x()
# define cpu_is_ti814x() is_ti814x()
# define soc_is_am35xx() is_am35xx()
-# define cpu_is_am33xx() is_am33xx()
-# define cpu_is_am335x() is_am335x()
+#endif
+
+# if defined(CONFIG_SOC_AM33XX)
+# undef soc_is_am33xx
+# undef soc_is_am335x
+# define soc_is_am33xx() is_am33xx()
+# define soc_is_am335x() is_am335x()
#endif
# if defined(CONFIG_ARCH_OMAP4)
@@ -368,11 +361,18 @@ IS_OMAP_TYPE(3430, 0x3430)
# define cpu_is_omap447x() is_omap447x()
# endif
+# if defined(CONFIG_SOC_OMAP5)
+# undef soc_is_omap54xx
+# undef soc_is_omap543x
+# define soc_is_omap54xx() is_omap54xx()
+# define soc_is_omap543x() is_omap543x()
+#endif
+
/* Macros to detect if we have OMAP1 or OMAP2 */
#define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \
cpu_is_omap16xx())
#define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \
- cpu_is_omap44xx())
+ cpu_is_omap44xx() || soc_is_omap54xx())
/* Various silicon revisions for omap2 */
#define OMAP242X_CLASS 0x24200024
@@ -408,7 +408,7 @@ IS_OMAP_TYPE(3430, 0x3430)
#define AM35XX_REV_ES1_0 AM35XX_CLASS
#define AM35XX_REV_ES1_1 (AM35XX_CLASS | (0x1 << 8))
-#define AM335X_CLASS 0x33500034
+#define AM335X_CLASS 0x33500033
#define AM335X_REV_ES1_0 AM335X_CLASS
#define OMAP443X_CLASS 0x44300044
@@ -425,9 +425,14 @@ IS_OMAP_TYPE(3430, 0x3430)
#define OMAP447X_CLASS 0x44700044
#define OMAP4470_REV_ES1_0 (OMAP447X_CLASS | (0x10 << 8))
+#define OMAP54XX_CLASS 0x54000054
+#define OMAP5430_REV_ES1_0 (OMAP54XX_CLASS | (0x30 << 16) | (0x10 << 8))
+#define OMAP5432_REV_ES1_0 (OMAP54XX_CLASS | (0x32 << 16) | (0x10 << 8))
+
void omap2xxx_check_revision(void);
void omap3xxx_check_revision(void);
void omap4xxx_check_revision(void);
+void omap5xxx_check_revision(void);
void omap3xxx_check_features(void);
void ti81xx_check_features(void);
void omap4xxx_check_features(void);
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index 5da73562e486..19e7fa577bd0 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -55,23 +55,17 @@
#define OMAP_TIMER_TRIGGER_OVERFLOW 0x01
#define OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
-/*
- * IP revision identifier so that Highlander IP
- * in OMAP4 can be distinguished.
- */
-#define OMAP_TIMER_IP_VERSION_1 0x1
-
/* timer capabilities used in hwmod database */
#define OMAP_TIMER_SECURE 0x80000000
#define OMAP_TIMER_ALWON 0x40000000
#define OMAP_TIMER_HAS_PWM 0x20000000
+#define OMAP_TIMER_NEEDS_RESET 0x10000000
struct omap_timer_capability_dev_attr {
u32 timer_capability;
};
struct omap_dm_timer;
-struct clk;
struct timer_regs {
u32 tidr;
@@ -96,16 +90,12 @@ struct timer_regs {
};
struct dmtimer_platform_data {
+ /* set_timer_src - Only used for OMAP1 devices */
int (*set_timer_src)(struct platform_device *pdev, int source);
- int timer_ip_version;
- u32 needs_manual_reset:1;
- bool reserved;
-
- bool loses_context;
-
- int (*get_context_loss_count)(struct device *dev);
+ u32 timer_capability;
};
+int omap_dm_timer_reserve_systimer(int id);
struct omap_dm_timer *omap_dm_timer_request(void);
struct omap_dm_timer *omap_dm_timer_request_specific(int timer_id);
int omap_dm_timer_free(struct omap_dm_timer *timer);
@@ -272,13 +262,11 @@ struct omap_dm_timer {
unsigned reserved:1;
unsigned posted:1;
struct timer_regs context;
- bool loses_context;
int ctx_loss_count;
int revision;
+ u32 capability;
struct platform_device *pdev;
struct list_head node;
-
- int (*get_context_loss_count)(struct device *dev);
};
int omap_dm_timer_prepare(struct omap_dm_timer *timer);
diff --git a/arch/arm/plat-omap/include/plat/dsp.h b/arch/arm/plat-omap/include/plat/dsp.h
index 9c604b390f9f..5927709b1908 100644
--- a/arch/arm/plat-omap/include/plat/dsp.h
+++ b/arch/arm/plat-omap/include/plat/dsp.h
@@ -18,6 +18,9 @@ struct omap_dsp_platform_data {
u32 (*dsp_cm_read)(s16 , u16);
u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16);
+ void (*set_bootaddr)(u32);
+ void (*set_bootmode)(u8);
+
phys_addr_t phys_mempool_base;
phys_addr_t phys_mempool_size;
};
diff --git a/arch/arm/plat-omap/include/plat/hardware.h b/arch/arm/plat-omap/include/plat/hardware.h
index e897978371c2..ddbde38e1e33 100644
--- a/arch/arm/plat-omap/include/plat/hardware.h
+++ b/arch/arm/plat-omap/include/plat/hardware.h
@@ -288,5 +288,6 @@
#include <plat/omap44xx.h>
#include <plat/ti81xx.h>
#include <plat/am33xx.h>
+#include <plat/omap54xx.h>
#endif /* __ASM_ARCH_OMAP_HARDWARE_H */
diff --git a/arch/arm/plat-omap/include/plat/multi.h b/arch/arm/plat-omap/include/plat/multi.h
index 999ffba2690c..045e320f1067 100644
--- a/arch/arm/plat-omap/include/plat/multi.h
+++ b/arch/arm/plat-omap/include/plat/multi.h
@@ -99,4 +99,13 @@
# endif
#endif
+#ifdef CONFIG_SOC_OMAP5
+# ifdef OMAP_NAME
+# undef MULTI_OMAP2
+# define MULTI_OMAP2
+# else
+# define OMAP_NAME omap5
+# endif
+#endif
+
#endif /* __PLAT_OMAP_MULTI_H */
diff --git a/arch/arm/plat-omap/include/plat/mux.h b/arch/arm/plat-omap/include/plat/mux.h
index aeba71796ad9..323948959200 100644
--- a/arch/arm/plat-omap/include/plat/mux.h
+++ b/arch/arm/plat-omap/include/plat/mux.h
@@ -99,7 +99,7 @@
/*
* OMAP730/850 has a slightly different config for the pin mux.
- * - config regs are the OMAP7XX_IO_CONF_x regs (see omap730.h) regs and
+ * - config regs are the OMAP7XX_IO_CONF_x regs (see omap7xx.h) regs and
* not the FUNC_MUX_CTRL_x regs from hardware.h
* - for pull-up/down, only has one enable bit which is is in the same register
* as mux config
diff --git a/arch/arm/plat-omap/include/plat/omap-secure.h b/arch/arm/plat-omap/include/plat/omap-secure.h
index 8c7994ce9869..0e4acd2d2deb 100644
--- a/arch/arm/plat-omap/include/plat/omap-secure.h
+++ b/arch/arm/plat-omap/include/plat/omap-secure.h
@@ -3,12 +3,7 @@
#include <linux/types.h>
-#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
extern int omap_secure_ram_reserve_memblock(void);
-#else
-static inline void omap_secure_ram_reserve_memblock(void)
-{ }
-#endif
#ifdef CONFIG_OMAP4_ERRATA_I688
extern int omap_barrier_reserve_memblock(void);
diff --git a/arch/arm/plat-omap/include/plat/omap54xx.h b/arch/arm/plat-omap/include/plat/omap54xx.h
new file mode 100644
index 000000000000..a2582bb3cab3
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/omap54xx.h
@@ -0,0 +1,32 @@
+/*:
+ * Address mappings and base address for OMAP5 interconnects
+ * and peripherals.
+ *
+ * Copyright (C) 2012 Texas Instruments
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Sricharan <r.sricharan@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_SOC_OMAP54XX_H
+#define __ASM_SOC_OMAP54XX_H
+
+/*
+ * Please place only base defines here and put the rest in device
+ * specific headers.
+ */
+#define L4_54XX_BASE 0x4a000000
+#define L4_WK_54XX_BASE 0x4ae00000
+#define L4_PER_54XX_BASE 0x48000000
+#define L3_54XX_BASE 0x44000000
+#define OMAP54XX_32KSYNCT_BASE 0x4ae04000
+#define OMAP54XX_CM_CORE_AON_BASE 0x4a004000
+#define OMAP54XX_CM_CORE_BASE 0x4a008000
+#define OMAP54XX_PRM_BASE 0x4ae06000
+#define OMAP54XX_PRCM_MPU_BASE 0x48243000
+#define OMAP54XX_SCM_BASE 0x4a002000
+#define OMAP54XX_CTRL_BASE 0x4a002800
+
+#endif /* __ASM_SOC_OMAP555554XX_H */
diff --git a/arch/arm/plat-omap/include/plat/omap730.h b/arch/arm/plat-omap/include/plat/omap730.h
deleted file mode 100644
index 14272bc1a6fd..000000000000
--- a/arch/arm/plat-omap/include/plat/omap730.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* arch/arm/plat-omap/include/mach/omap730.h
- *
- * Hardware definitions for TI OMAP730 processor.
- *
- * Cleanup for Linux-2.6 by Dirk Behme <dirk.behme@de.bosch.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __ASM_ARCH_OMAP730_H
-#define __ASM_ARCH_OMAP730_H
-
-/*
- * ----------------------------------------------------------------------------
- * Base addresses
- * ----------------------------------------------------------------------------
- */
-
-/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */
-
-#define OMAP730_DSP_BASE 0xE0000000
-#define OMAP730_DSP_SIZE 0x50000
-#define OMAP730_DSP_START 0xE0000000
-
-#define OMAP730_DSPREG_BASE 0xE1000000
-#define OMAP730_DSPREG_SIZE SZ_128K
-#define OMAP730_DSPREG_START 0xE1000000
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP730 specific configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP730_CONFIG_BASE 0xfffe1000
-#define OMAP730_IO_CONF_0 0xfffe1070
-#define OMAP730_IO_CONF_1 0xfffe1074
-#define OMAP730_IO_CONF_2 0xfffe1078
-#define OMAP730_IO_CONF_3 0xfffe107c
-#define OMAP730_IO_CONF_4 0xfffe1080
-#define OMAP730_IO_CONF_5 0xfffe1084
-#define OMAP730_IO_CONF_6 0xfffe1088
-#define OMAP730_IO_CONF_7 0xfffe108c
-#define OMAP730_IO_CONF_8 0xfffe1090
-#define OMAP730_IO_CONF_9 0xfffe1094
-#define OMAP730_IO_CONF_10 0xfffe1098
-#define OMAP730_IO_CONF_11 0xfffe109c
-#define OMAP730_IO_CONF_12 0xfffe10a0
-#define OMAP730_IO_CONF_13 0xfffe10a4
-
-#define OMAP730_MODE_1 0xfffe1010
-#define OMAP730_MODE_2 0xfffe1014
-
-/* CSMI specials: in terms of base + offset */
-#define OMAP730_MODE2_OFFSET 0x14
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP730 traffic controller configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP730_FLASH_CFG_0 0xfffecc10
-#define OMAP730_FLASH_ACFG_0 0xfffecc50
-#define OMAP730_FLASH_CFG_1 0xfffecc14
-#define OMAP730_FLASH_ACFG_1 0xfffecc54
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP730 DSP control registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP730_ICR_BASE 0xfffbb800
-#define OMAP730_DSP_M_CTL 0xfffbb804
-#define OMAP730_DSP_MMU_BASE 0xfffed200
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP730 PCC_UPLD configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP730_PCC_UPLD_CTRL_BASE (0xfffe0900)
-#define OMAP730_PCC_UPLD_CTRL (OMAP730_PCC_UPLD_CTRL_BASE + 0x00)
-
-#endif /* __ASM_ARCH_OMAP730_H */
-
diff --git a/arch/arm/plat-omap/include/plat/omap850.h b/arch/arm/plat-omap/include/plat/omap850.h
deleted file mode 100644
index c33f67981712..000000000000
--- a/arch/arm/plat-omap/include/plat/omap850.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* arch/arm/plat-omap/include/mach/omap850.h
- *
- * Hardware definitions for TI OMAP850 processor.
- *
- * Derived from omap730.h by Zebediah C. McClure <zmc@lurian.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef __ASM_ARCH_OMAP850_H
-#define __ASM_ARCH_OMAP850_H
-
-/*
- * ----------------------------------------------------------------------------
- * Base addresses
- * ----------------------------------------------------------------------------
- */
-
-/* Syntax: XX_BASE = Virtual base address, XX_START = Physical base address */
-
-#define OMAP850_DSP_BASE 0xE0000000
-#define OMAP850_DSP_SIZE 0x50000
-#define OMAP850_DSP_START 0xE0000000
-
-#define OMAP850_DSPREG_BASE 0xE1000000
-#define OMAP850_DSPREG_SIZE SZ_128K
-#define OMAP850_DSPREG_START 0xE1000000
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP850 specific configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP850_CONFIG_BASE 0xfffe1000
-#define OMAP850_IO_CONF_0 0xfffe1070
-#define OMAP850_IO_CONF_1 0xfffe1074
-#define OMAP850_IO_CONF_2 0xfffe1078
-#define OMAP850_IO_CONF_3 0xfffe107c
-#define OMAP850_IO_CONF_4 0xfffe1080
-#define OMAP850_IO_CONF_5 0xfffe1084
-#define OMAP850_IO_CONF_6 0xfffe1088
-#define OMAP850_IO_CONF_7 0xfffe108c
-#define OMAP850_IO_CONF_8 0xfffe1090
-#define OMAP850_IO_CONF_9 0xfffe1094
-#define OMAP850_IO_CONF_10 0xfffe1098
-#define OMAP850_IO_CONF_11 0xfffe109c
-#define OMAP850_IO_CONF_12 0xfffe10a0
-#define OMAP850_IO_CONF_13 0xfffe10a4
-
-#define OMAP850_MODE_1 0xfffe1010
-#define OMAP850_MODE_2 0xfffe1014
-
-/* CSMI specials: in terms of base + offset */
-#define OMAP850_MODE2_OFFSET 0x14
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP850 traffic controller configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP850_FLASH_CFG_0 0xfffecc10
-#define OMAP850_FLASH_ACFG_0 0xfffecc50
-#define OMAP850_FLASH_CFG_1 0xfffecc14
-#define OMAP850_FLASH_ACFG_1 0xfffecc54
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP850 DSP control registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP850_ICR_BASE 0xfffbb800
-#define OMAP850_DSP_M_CTL 0xfffbb804
-#define OMAP850_DSP_MMU_BASE 0xfffed200
-
-/*
- * ----------------------------------------------------------------------------
- * OMAP850 PCC_UPLD configuration registers
- * ----------------------------------------------------------------------------
- */
-#define OMAP850_PCC_UPLD_CTRL_BASE (0xfffe0900)
-#define OMAP850_PCC_UPLD_CTRL (OMAP850_PCC_UPLD_CTRL_BASE + 0x00)
-
-#endif /* __ASM_ARCH_OMAP850_H */
-
diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h
index c835b7194ff5..6132972aff37 100644
--- a/arch/arm/plat-omap/include/plat/omap_hwmod.h
+++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h
@@ -41,6 +41,7 @@ struct omap_device;
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type1;
extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
+extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
/*
* OCP SYSCONFIG bit shifts/masks TYPE1. These are for IPs compliant
@@ -69,6 +70,17 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type2;
#define SYSC_TYPE2_SIDLEMODE_MASK (0x3 << SYSC_TYPE2_SIDLEMODE_SHIFT)
#define SYSC_TYPE2_MIDLEMODE_SHIFT 4
#define SYSC_TYPE2_MIDLEMODE_MASK (0x3 << SYSC_TYPE2_MIDLEMODE_SHIFT)
+#define SYSC_TYPE2_DMADISABLE_SHIFT 16
+#define SYSC_TYPE2_DMADISABLE_MASK (0x1 << SYSC_TYPE2_DMADISABLE_SHIFT)
+
+/*
+ * OCP SYSCONFIG bit shifts/masks TYPE3.
+ * This is applicable for some IPs present in AM33XX
+ */
+#define SYSC_TYPE3_SIDLEMODE_SHIFT 0
+#define SYSC_TYPE3_SIDLEMODE_MASK (0x3 << SYSC_TYPE3_SIDLEMODE_SHIFT)
+#define SYSC_TYPE3_MIDLEMODE_SHIFT 2
+#define SYSC_TYPE3_MIDLEMODE_MASK (0x3 << SYSC_TYPE3_MIDLEMODE_SHIFT)
/* OCP SYSSTATUS bit shifts/masks */
#define SYSS_RESETDONE_SHIFT 0
@@ -283,6 +295,7 @@ struct omap_hwmod_ocp_if {
#define SYSS_HAS_RESET_STATUS (1 << 7)
#define SYSC_NO_CACHE (1 << 8) /* XXX SW flag, belongs elsewhere */
#define SYSC_HAS_RESET_STATUS (1 << 9)
+#define SYSC_HAS_DMADISABLE (1 << 10)
/* omap_hwmod_sysconfig.clockact flags */
#define CLOCKACT_TEST_BOTH 0x0
@@ -298,6 +311,7 @@ struct omap_hwmod_ocp_if {
* @enwkup_shift: Offset of the enawakeup bit
* @srst_shift: Offset of the softreset bit
* @autoidle_shift: Offset of the autoidle bit
+ * @dmadisable_shift: Offset of the dmadisable bit
*/
struct omap_hwmod_sysc_fields {
u8 midle_shift;
@@ -306,6 +320,7 @@ struct omap_hwmod_sysc_fields {
u8 enwkup_shift;
u8 srst_shift;
u8 autoidle_shift;
+ u8 dmadisable_shift;
};
/**
@@ -374,11 +389,13 @@ struct omap_hwmod_omap2_prcm {
* struct omap_hwmod_omap4_prcm - OMAP4-specific PRCM data
* @clkctrl_reg: PRCM address of the clock control register
* @rstctrl_reg: address of the XXX_RSTCTRL register located in the PRM
+ * @rstst_reg: (AM33XX only) address of the XXX_RSTST register in the PRM
* @submodule_wkdep_bit: bit shift of the WKDEP range
*/
struct omap_hwmod_omap4_prcm {
u16 clkctrl_offs;
u16 rstctrl_offs;
+ u16 rstst_offs;
u16 context_offs;
u8 submodule_wkdep_bit;
u8 modulemode;
@@ -629,6 +646,10 @@ int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx);
+extern void __init omap_hwmod_init(void);
+
+const char *omap_hwmod_get_main_clk(struct omap_hwmod *oh);
+
/*
* Chip variant-specific hwmod init routines - XXX should be converted
* to use initcalls once the initial boot ordering is straightened out
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index 9bb978ecd884..36d6a7666216 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -123,7 +123,7 @@ struct omap_sdrc_params {
u32 mr;
};
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
+#ifdef CONFIG_SOC_HAS_OMAP2_SDRC
void omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
struct omap_sdrc_params *sdrc_cs1);
#else
diff --git a/arch/arm/plat-omap/include/plat/serial.h b/arch/arm/plat-omap/include/plat/serial.h
index b073e5f2b190..65fce44dce34 100644
--- a/arch/arm/plat-omap/include/plat/serial.h
+++ b/arch/arm/plat-omap/include/plat/serial.h
@@ -60,6 +60,17 @@
/* AM3505/3517 UART4 */
#define AM35XX_UART4_BASE 0x4809E000 /* Only on AM3505/3517 */
+/* AM33XX serial port */
+#define AM33XX_UART1_BASE 0x44E09000
+
+/* OMAP5 serial ports */
+#define OMAP5_UART1_BASE OMAP2_UART1_BASE
+#define OMAP5_UART2_BASE OMAP2_UART2_BASE
+#define OMAP5_UART3_BASE OMAP4_UART3_BASE
+#define OMAP5_UART4_BASE OMAP4_UART4_BASE
+#define OMAP5_UART5_BASE 0x48066000
+#define OMAP5_UART6_BASE 0x48068000
+
/* External port on Zoom2/3 */
#define ZOOM_UART_BASE 0x10000000
#define ZOOM_UART_VIRT 0xfa400000
@@ -93,6 +104,9 @@
#define TI81XXUART1 81
#define TI81XXUART2 82
#define TI81XXUART3 83
+#define AM33XXUART1 84
+#define OMAP5UART3 OMAP4UART3
+#define OMAP5UART4 OMAP4UART4
#define ZOOM_UART 95 /* Only on zoom2/3 */
/* This is only used by 8250.c for omap1510 */
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h
index cc3f11ba7a99..b8d19a136781 100644
--- a/arch/arm/plat-omap/include/plat/uncompress.h
+++ b/arch/arm/plat-omap/include/plat/uncompress.h
@@ -95,6 +95,9 @@ static inline void flush(void)
_DEBUG_LL_ENTRY(mach, OMAP4_UART##p##_BASE, OMAP_PORT_SHIFT, \
OMAP4UART##p)
+#define DEBUG_LL_OMAP5(p, mach) \
+ _DEBUG_LL_ENTRY(mach, OMAP5_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ OMAP5UART##p)
/* Zoom2/3 shift is different for UART1 and external port */
#define DEBUG_LL_ZOOM(mach) \
_DEBUG_LL_ENTRY(mach, ZOOM_UART_BASE, ZOOM_PORT_SHIFT, ZOOM_UART)
@@ -103,6 +106,10 @@ static inline void flush(void)
_DEBUG_LL_ENTRY(mach, TI81XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
TI81XXUART##p)
+#define DEBUG_LL_AM33XX(p, mach) \
+ _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \
+ AM33XXUART##p)
+
static inline void __arch_decomp_setup(unsigned long arch_id)
{
int port = 0;
@@ -173,6 +180,9 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
DEBUG_LL_OMAP4(3, omap_4430sdp);
DEBUG_LL_OMAP4(3, omap4_panda);
+ /* omap5 based boards using UART3 */
+ DEBUG_LL_OMAP5(3, omap5_sevm);
+
/* zoom2/3 external uart */
DEBUG_LL_ZOOM(omap_zoom2);
DEBUG_LL_ZOOM(omap_zoom3);
@@ -183,6 +193,8 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
/* TI8148 base boards using UART1 */
DEBUG_LL_TI81XX(1, ti8148evm);
+ /* AM33XX base boards using UART1 */
+ DEBUG_LL_AM33XX(1, am335xevm);
} while (0);
}
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h
index 762eeb0626c1..548a4c8d63df 100644
--- a/arch/arm/plat-omap/include/plat/usb.h
+++ b/arch/arm/plat-omap/include/plat/usb.h
@@ -44,6 +44,8 @@ struct usbhs_omap_board_data {
struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
+#ifdef CONFIG_ARCH_OMAP2PLUS
+
struct ehci_hcd_omap_platform_data {
enum usbhs_omap_port_mode port_mode[OMAP3_HS_USB_PORTS];
int reset_gpio_port[OMAP3_HS_USB_PORTS];
@@ -64,26 +66,6 @@ struct usbhs_omap_platform_data {
};
/*-------------------------------------------------------------------------*/
-#define OMAP1_OTG_BASE 0xfffb0400
-#define OMAP1_UDC_BASE 0xfffb4000
-#define OMAP1_OHCI_BASE 0xfffba000
-
-#define OMAP2_OHCI_BASE 0x4805e000
-#define OMAP2_UDC_BASE 0x4805e200
-#define OMAP2_OTG_BASE 0x4805e300
-
-#ifdef CONFIG_ARCH_OMAP1
-
-#define OTG_BASE OMAP1_OTG_BASE
-#define UDC_BASE OMAP1_UDC_BASE
-#define OMAP_OHCI_BASE OMAP1_OHCI_BASE
-
-#else
-
-#define OTG_BASE OMAP2_OTG_BASE
-#define UDC_BASE OMAP2_UDC_BASE
-#define OMAP_OHCI_BASE OMAP2_OHCI_BASE
-
struct omap_musb_board_data {
u8 interface_type;
u8 mode;
@@ -107,44 +89,6 @@ extern int omap4430_phy_init(struct device *dev);
extern int omap4430_phy_exit(struct device *dev);
extern int omap4430_phy_suspend(struct device *dev, int suspend);
-/*
- * NOTE: Please update omap USB drivers to use ioremap + read/write
- */
-
-#define OMAP2_L4_IO_OFFSET 0xb2000000
-#define OMAP2_L4_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_L4_IO_OFFSET)
-
-static inline u8 omap_readb(u32 pa)
-{
- return __raw_readb(OMAP2_L4_IO_ADDRESS(pa));
-}
-
-static inline u16 omap_readw(u32 pa)
-{
- return __raw_readw(OMAP2_L4_IO_ADDRESS(pa));
-}
-
-static inline u32 omap_readl(u32 pa)
-{
- return __raw_readl(OMAP2_L4_IO_ADDRESS(pa));
-}
-
-static inline void omap_writeb(u8 v, u32 pa)
-{
- __raw_writeb(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-
-
-static inline void omap_writew(u16 v, u32 pa)
-{
- __raw_writew(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-
-static inline void omap_writel(u32 v, u32 pa)
-{
- __raw_writel(v, OMAP2_L4_IO_ADDRESS(pa));
-}
-
#endif
extern void am35x_musb_reset(void);
@@ -153,142 +97,6 @@ extern void am35x_musb_clear_irq(void);
extern void am35x_set_mode(u8 musb_mode);
extern void ti81xx_musb_phy_power(u8 on);
-/*
- * FIXME correct answer depends on hmc_mode,
- * as does (on omap1) any nonzero value for config->otg port number
- */
-#ifdef CONFIG_USB_GADGET_OMAP
-#define is_usb0_device(config) 1
-#else
-#define is_usb0_device(config) 0
-#endif
-
-void omap_otg_init(struct omap_usb_config *config);
-
-#if defined(CONFIG_USB) || defined(CONFIG_USB_MODULE)
-void omap1_usb_init(struct omap_usb_config *pdata);
-#else
-static inline void omap1_usb_init(struct omap_usb_config *pdata)
-{
-}
-#endif
-
-#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP_OTG_MODULE)
-void omap2_usbfs_init(struct omap_usb_config *pdata);
-#else
-static inline void omap2_usbfs_init(struct omap_usb_config *pdata)
-{
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * OTG and transceiver registers, for OMAPs starting with ARM926
- */
-#define OTG_REV (OTG_BASE + 0x00)
-#define OTG_SYSCON_1 (OTG_BASE + 0x04)
-# define USB2_TRX_MODE(w) (((w)>>24)&0x07)
-# define USB1_TRX_MODE(w) (((w)>>20)&0x07)
-# define USB0_TRX_MODE(w) (((w)>>16)&0x07)
-# define OTG_IDLE_EN (1 << 15)
-# define HST_IDLE_EN (1 << 14)
-# define DEV_IDLE_EN (1 << 13)
-# define OTG_RESET_DONE (1 << 2)
-# define OTG_SOFT_RESET (1 << 1)
-#define OTG_SYSCON_2 (OTG_BASE + 0x08)
-# define OTG_EN (1 << 31)
-# define USBX_SYNCHRO (1 << 30)
-# define OTG_MST16 (1 << 29)
-# define SRP_GPDATA (1 << 28)
-# define SRP_GPDVBUS (1 << 27)
-# define SRP_GPUVBUS(w) (((w)>>24)&0x07)
-# define A_WAIT_VRISE(w) (((w)>>20)&0x07)
-# define B_ASE_BRST(w) (((w)>>16)&0x07)
-# define SRP_DPW (1 << 14)
-# define SRP_DATA (1 << 13)
-# define SRP_VBUS (1 << 12)
-# define OTG_PADEN (1 << 10)
-# define HMC_PADEN (1 << 9)
-# define UHOST_EN (1 << 8)
-# define HMC_TLLSPEED (1 << 7)
-# define HMC_TLLATTACH (1 << 6)
-# define OTG_HMC(w) (((w)>>0)&0x3f)
-#define OTG_CTRL (OTG_BASE + 0x0c)
-# define OTG_USB2_EN (1 << 29)
-# define OTG_USB2_DP (1 << 28)
-# define OTG_USB2_DM (1 << 27)
-# define OTG_USB1_EN (1 << 26)
-# define OTG_USB1_DP (1 << 25)
-# define OTG_USB1_DM (1 << 24)
-# define OTG_USB0_EN (1 << 23)
-# define OTG_USB0_DP (1 << 22)
-# define OTG_USB0_DM (1 << 21)
-# define OTG_ASESSVLD (1 << 20)
-# define OTG_BSESSEND (1 << 19)
-# define OTG_BSESSVLD (1 << 18)
-# define OTG_VBUSVLD (1 << 17)
-# define OTG_ID (1 << 16)
-# define OTG_DRIVER_SEL (1 << 15)
-# define OTG_A_SETB_HNPEN (1 << 12)
-# define OTG_A_BUSREQ (1 << 11)
-# define OTG_B_HNPEN (1 << 9)
-# define OTG_B_BUSREQ (1 << 8)
-# define OTG_BUSDROP (1 << 7)
-# define OTG_PULLDOWN (1 << 5)
-# define OTG_PULLUP (1 << 4)
-# define OTG_DRV_VBUS (1 << 3)
-# define OTG_PD_VBUS (1 << 2)
-# define OTG_PU_VBUS (1 << 1)
-# define OTG_PU_ID (1 << 0)
-#define OTG_IRQ_EN (OTG_BASE + 0x10) /* 16-bit */
-# define DRIVER_SWITCH (1 << 15)
-# define A_VBUS_ERR (1 << 13)
-# define A_REQ_TMROUT (1 << 12)
-# define A_SRP_DETECT (1 << 11)
-# define B_HNP_FAIL (1 << 10)
-# define B_SRP_TMROUT (1 << 9)
-# define B_SRP_DONE (1 << 8)
-# define B_SRP_STARTED (1 << 7)
-# define OPRT_CHG (1 << 0)
-#define OTG_IRQ_SRC (OTG_BASE + 0x14) /* 16-bit */
- // same bits as in IRQ_EN
-#define OTG_OUTCTRL (OTG_BASE + 0x18) /* 16-bit */
-# define OTGVPD (1 << 14)
-# define OTGVPU (1 << 13)
-# define OTGPUID (1 << 12)
-# define USB2VDR (1 << 10)
-# define USB2PDEN (1 << 9)
-# define USB2PUEN (1 << 8)
-# define USB1VDR (1 << 6)
-# define USB1PDEN (1 << 5)
-# define USB1PUEN (1 << 4)
-# define USB0VDR (1 << 2)
-# define USB0PDEN (1 << 1)
-# define USB0PUEN (1 << 0)
-#define OTG_TEST (OTG_BASE + 0x20) /* 16-bit */
-#define OTG_VENDOR_CODE (OTG_BASE + 0xfc) /* 16-bit */
-
-/*-------------------------------------------------------------------------*/
-
-/* OMAP1 */
-#define USB_TRANSCEIVER_CTRL (0xfffe1000 + 0x0064)
-# define CONF_USB2_UNI_R (1 << 8)
-# define CONF_USB1_UNI_R (1 << 7)
-# define CONF_USB_PORT0_R(x) (((x)>>4)&0x7)
-# define CONF_USB0_ISOLATE_R (1 << 3)
-# define CONF_USB_PWRDN_DM_R (1 << 2)
-# define CONF_USB_PWRDN_DP_R (1 << 1)
-
-/* OMAP2 */
-# define USB_UNIDIR 0x0
-# define USB_UNIDIR_TLL 0x1
-# define USB_BIDIR 0x2
-# define USB_BIDIR_TLL 0x3
-# define USBTXWRMODEI(port, x) ((x) << (22 - (port * 2)))
-# define USBT2TLL5PI (1 << 17)
-# define USB0PUENACTLOI (1 << 16)
-# define USBSTANDBYCTRL (1 << 15)
/* AM35x */
/* USB 2.0 PHY Control */
#define CONF2_PHY_GPIOMODE (1 << 23)
diff --git a/arch/arm/plat-omap/include/plat/voltage.h b/arch/arm/plat-omap/include/plat/voltage.h
index 0a6a482ec014..5be4d5def427 100644
--- a/arch/arm/plat-omap/include/plat/voltage.h
+++ b/arch/arm/plat-omap/include/plat/voltage.h
@@ -11,10 +11,29 @@
#ifndef __ARCH_ARM_OMAP_VOLTAGE_H
#define __ARCH_ARM_OMAP_VOLTAGE_H
+/**
+ * struct omap_volt_data - Omap voltage specific data.
+ * @voltage_nominal: The possible voltage value in uV
+ * @sr_efuse_offs: The offset of the efuse register(from system
+ * control module base address) from where to read
+ * the n-target value for the smartreflex module.
+ * @sr_errminlimit: Error min limit value for smartreflex. This value
+ * differs at differnet opp and thus is linked
+ * with voltage.
+ * @vp_errorgain: Error gain value for the voltage processor. This
+ * field also differs according to the voltage/opp.
+ */
+struct omap_volt_data {
+ u32 volt_nominal;
+ u32 sr_efuse_offs;
+ u8 sr_errminlimit;
+ u8 vp_errgain;
+};
struct voltagedomain;
struct voltagedomain *voltdm_lookup(const char *name);
int voltdm_scale(struct voltagedomain *voltdm, unsigned long target_volt);
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm);
-
+struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
+ unsigned long volt);
#endif
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index ad32621aa52e..5e13c3884aa4 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -282,6 +282,8 @@ static int omap_mbox_startup(struct omap_mbox *mbox)
}
mbox->rxq = mq;
mq->mbox = mbox;
+
+ omap_mbox_enable_irq(mbox, IRQ_RX);
}
mutex_unlock(&mbox_configured_lock);
return 0;
@@ -305,6 +307,7 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
mutex_lock(&mbox_configured_lock);
if (!--mbox->use_count) {
+ omap_mbox_disable_irq(mbox, IRQ_RX);
free_irq(mbox->irq, mbox);
tasklet_kill(&mbox->txq->tasklet);
flush_work_sync(&mbox->rxq->work);
@@ -338,13 +341,15 @@ struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
if (!mbox)
return ERR_PTR(-ENOENT);
- ret = omap_mbox_startup(mbox);
- if (ret)
- return ERR_PTR(-ENODEV);
-
if (nb)
blocking_notifier_chain_register(&mbox->notifier, nb);
+ ret = omap_mbox_startup(mbox);
+ if (ret) {
+ blocking_notifier_chain_unregister(&mbox->notifier, nb);
+ return ERR_PTR(-ENODEV);
+ }
+
return mbox;
}
EXPORT_SYMBOL(omap_mbox_get);
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 477363c163ec..766181cb5c95 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -6,8 +6,8 @@
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
- * Copyright (C) 2009 Texas Instruments
- * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Copyright (C) 2009-2012 Texas Instruments
+ * Added OMAP4/5 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -44,6 +44,7 @@
#else
#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
#endif
+#define OMAP5_SRAM_PA 0x40300000
#if defined(CONFIG_ARCH_OMAP2PLUS)
#define SRAM_BOOTLOADER_SZ 0x00
@@ -85,7 +86,7 @@ static int is_sram_locked(void)
__raw_writel(0xCFDE, OMAP24XX_VA_READPERM0); /* all i-read */
__raw_writel(0xCFDE, OMAP24XX_VA_WRITEPERM0); /* all i-write */
}
- if (cpu_is_omap34xx() && !cpu_is_am33xx()) {
+ if (cpu_is_omap34xx()) {
__raw_writel(0xFFFF, OMAP34XX_VA_REQINFOPERM0); /* all q-vects */
__raw_writel(0xFFFF, OMAP34XX_VA_READPERM0); /* all i-read */
__raw_writel(0xFFFF, OMAP34XX_VA_WRITEPERM0); /* all i-write */
@@ -118,12 +119,15 @@ static void __init omap_detect_sram(void)
} else if (cpu_is_omap44xx()) {
omap_sram_start = OMAP4_SRAM_PUB_PA;
omap_sram_size = 0xa000; /* 40K */
+ } else if (soc_is_omap54xx()) {
+ omap_sram_start = OMAP5_SRAM_PA;
+ omap_sram_size = SZ_128K; /* 128KB */
} else {
omap_sram_start = OMAP2_SRAM_PUB_PA;
omap_sram_size = 0x800; /* 2K */
}
} else {
- if (cpu_is_am33xx()) {
+ if (soc_is_am33xx()) {
omap_sram_start = AM33XX_SRAM_PA;
omap_sram_size = 0x10000; /* 64K */
} else if (cpu_is_omap34xx()) {
@@ -132,6 +136,9 @@ static void __init omap_detect_sram(void)
} else if (cpu_is_omap44xx()) {
omap_sram_start = OMAP4_SRAM_PA;
omap_sram_size = 0xe000; /* 56K */
+ } else if (soc_is_omap54xx()) {
+ omap_sram_start = OMAP5_SRAM_PA;
+ omap_sram_size = SZ_128K; /* 128KB */
} else {
omap_sram_start = OMAP2_SRAM_PA;
if (cpu_is_omap242x())
@@ -386,7 +393,7 @@ int __init omap_sram_init(void)
omap242x_sram_init();
else if (cpu_is_omap2430())
omap243x_sram_init();
- else if (cpu_is_am33xx())
+ else if (soc_is_am33xx())
am33xx_sram_init();
else if (cpu_is_omap34xx())
omap34xx_sram_init();
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
deleted file mode 100644
index daa0327381b5..000000000000
--- a/arch/arm/plat-omap/usb.c
+++ /dev/null
@@ -1,145 +0,0 @@
- /*
- * arch/arm/plat-omap/usb.c -- platform level USB initialization
- *
- * Copyright (C) 2004 Texas Instruments, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#undef DEBUG
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <plat/usb.h>
-#include <plat/board.h>
-
-#include <mach/hardware.h>
-
-#ifdef CONFIG_ARCH_OMAP_OTG
-
-void __init
-omap_otg_init(struct omap_usb_config *config)
-{
- u32 syscon;
- int alt_pingroup = 0;
-
- /* NOTE: no bus or clock setup (yet?) */
-
- syscon = omap_readl(OTG_SYSCON_1) & 0xffff;
- if (!(syscon & OTG_RESET_DONE))
- pr_debug("USB resets not complete?\n");
-
- //omap_writew(0, OTG_IRQ_EN);
-
- /* pin muxing and transceiver pinouts */
- if (config->pins[0] > 2) /* alt pingroup 2 */
- alt_pingroup = 1;
- syscon |= config->usb0_init(config->pins[0], is_usb0_device(config));
- syscon |= config->usb1_init(config->pins[1]);
- syscon |= config->usb2_init(config->pins[2], alt_pingroup);
- pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
- omap_writel(syscon, OTG_SYSCON_1);
-
- syscon = config->hmc_mode;
- syscon |= USBX_SYNCHRO | (4 << 16) /* B_ASE0_BRST */;
-#ifdef CONFIG_USB_OTG
- if (config->otg)
- syscon |= OTG_EN;
-#endif
- if (cpu_class_is_omap1())
- pr_debug("USB_TRANSCEIVER_CTRL = %03x\n",
- omap_readl(USB_TRANSCEIVER_CTRL));
- pr_debug("OTG_SYSCON_2 = %08x\n", omap_readl(OTG_SYSCON_2));
- omap_writel(syscon, OTG_SYSCON_2);
-
- printk("USB: hmc %d", config->hmc_mode);
- if (!alt_pingroup)
- printk(", usb2 alt %d wires", config->pins[2]);
- else if (config->pins[0])
- printk(", usb0 %d wires%s", config->pins[0],
- is_usb0_device(config) ? " (dev)" : "");
- if (config->pins[1])
- printk(", usb1 %d wires", config->pins[1]);
- if (!alt_pingroup && config->pins[2])
- printk(", usb2 %d wires", config->pins[2]);
- if (config->otg)
- printk(", Mini-AB on usb%d", config->otg - 1);
- printk("\n");
-
- if (cpu_class_is_omap1()) {
- u16 w;
-
- /* leave USB clocks/controllers off until needed */
- w = omap_readw(ULPD_SOFT_REQ);
- w &= ~SOFT_USB_CLK_REQ;
- omap_writew(w, ULPD_SOFT_REQ);
-
- w = omap_readw(ULPD_CLOCK_CTRL);
- w &= ~USB_MCLK_EN;
- w |= DIS_USB_PVCI_CLK;
- omap_writew(w, ULPD_CLOCK_CTRL);
- }
- syscon = omap_readl(OTG_SYSCON_1);
- syscon |= HST_IDLE_EN|DEV_IDLE_EN|OTG_IDLE_EN;
-
-#ifdef CONFIG_USB_GADGET_OMAP
- if (config->otg || config->register_dev) {
- struct platform_device *udc_device = config->udc_device;
- int status;
-
- syscon &= ~DEV_IDLE_EN;
- udc_device->dev.platform_data = config;
- status = platform_device_register(udc_device);
- if (status)
- pr_debug("can't register UDC device, %d\n", status);
- }
-#endif
-
-#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
- if (config->otg || config->register_host) {
- struct platform_device *ohci_device = config->ohci_device;
- int status;
-
- syscon &= ~HST_IDLE_EN;
- ohci_device->dev.platform_data = config;
- status = platform_device_register(ohci_device);
- if (status)
- pr_debug("can't register OHCI device, %d\n", status);
- }
-#endif
-
-#ifdef CONFIG_USB_OTG
- if (config->otg) {
- struct platform_device *otg_device = config->otg_device;
- int status;
-
- syscon &= ~OTG_IDLE_EN;
- otg_device->dev.platform_data = config;
- status = platform_device_register(otg_device);
- if (status)
- pr_debug("can't register OTG device, %d\n", status);
- }
-#endif
- pr_debug("OTG_SYSCON_1 = %08x\n", omap_readl(OTG_SYSCON_1));
- omap_writel(syscon, OTG_SYSCON_1);
-}
-
-#else
-void omap_otg_init(struct omap_usb_config *config) {}
-#endif
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index bc42c04091fd..fe57bbbf166b 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -533,7 +533,7 @@ void __init s3c24xx_init_irq(void)
int i;
#ifdef CONFIG_FIQ
- init_FIQ();
+ init_FIQ(FIQ_START);
#endif
irqdbf("s3c2410_init_irq: clearing interrupt status flags\n");
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index a2fae4ea0936..7aca31c1df1f 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -78,6 +78,10 @@ config S5P_HRT
# clock options
+config SAMSUNG_CLOCK
+ bool
+ default y if !COMMON_CLK
+
config SAMSUNG_CLKSRC
bool
help
@@ -491,14 +495,6 @@ config S5P_SLEEP
Internal config node to apply common S5P sleep management code.
Can be selected by S5P and newer SoCs with similar sleep procedure.
-comment "Power Domain"
-
-config SAMSUNG_PD
- bool "Samsung Power Domain"
- depends on PM_RUNTIME
- help
- Say Y here if you want to control Power Domain by Runtime PM.
-
config DEBUG_S3C_UART
depends on PLAT_SAMSUNG
int
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 860b2db4db15..b78717496677 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -15,8 +15,8 @@ obj-y += init.o cpu.o
obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET) += time.o
obj-$(CONFIG_S5P_HRT) += s5p-time.o
-obj-y += clock.o
-obj-y += pwm-clock.o
+obj-$(CONFIG_SAMSUNG_CLOCK) += clock.o
+obj-$(CONFIG_SAMSUNG_CLOCK) += pwm-clock.o
obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
obj-$(CONFIG_S5P_CLOCK) += s5p-clock.o
@@ -60,10 +60,6 @@ obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o
obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o
-# PD support
-
-obj-$(CONFIG_SAMSUNG_PD) += pd.o
-
# PWM support
obj-$(CONFIG_HAVE_PWM) += pwm.o
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index 33ecd0c9f0c3..b1e05ccff3ac 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -157,11 +157,13 @@ int s3c_adc_start(struct s3c_adc_client *client,
return -EINVAL;
}
- if (client->is_ts && adc->ts_pend)
- return -EAGAIN;
-
spin_lock_irqsave(&adc->lock, flags);
+ if (client->is_ts && adc->ts_pend) {
+ spin_unlock_irqrestore(&adc->lock, flags);
+ return -EAGAIN;
+ }
+
client->channel = channel;
client->nr_samples = nr_samples;
diff --git a/arch/arm/plat-samsung/devs.c b/arch/arm/plat-samsung/devs.c
index 1d214cb9d770..74e31ce35538 100644
--- a/arch/arm/plat-samsung/devs.c
+++ b/arch/arm/plat-samsung/devs.c
@@ -126,7 +126,8 @@ struct platform_device s3c_device_adc = {
#ifdef CONFIG_CPU_S3C2440
static struct resource s3c_camif_resource[] = {
[0] = DEFINE_RES_MEM(S3C2440_PA_CAMIF, S3C2440_SZ_CAMIF),
- [1] = DEFINE_RES_IRQ(IRQ_CAM),
+ [1] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_C),
+ [2] = DEFINE_RES_IRQ(IRQ_S3C2440_CAM_P),
};
struct platform_device s3c_device_camif = {
@@ -1512,7 +1513,7 @@ static struct resource s3c64xx_spi0_resource[] = {
};
struct platform_device s3c64xx_device_spi0 = {
- .name = "s3c64xx-spi",
+ .name = "s3c6410-spi",
.id = 0,
.num_resources = ARRAY_SIZE(s3c64xx_spi0_resource),
.resource = s3c64xx_spi0_resource,
@@ -1522,13 +1523,10 @@ struct platform_device s3c64xx_device_spi0 = {
},
};
-void __init s3c64xx_spi0_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs)
+void __init s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs)
{
- if (!pd) {
- pr_err("%s:Need to pass platform data\n", __func__);
- return;
- }
+ struct s3c64xx_spi_info pd;
/* Reject invalid configuration */
if (!num_cs || src_clk_nr < 0) {
@@ -1536,12 +1534,11 @@ void __init s3c64xx_spi0_set_platdata(struct s3c64xx_spi_info *pd,
return;
}
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- if (!pd->cfg_gpio)
- pd->cfg_gpio = s3c64xx_spi0_cfg_gpio;
+ pd.num_cs = num_cs;
+ pd.src_clk_nr = src_clk_nr;
+ pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi0_cfg_gpio;
- s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi0);
+ s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi0);
}
#endif /* CONFIG_S3C64XX_DEV_SPI0 */
@@ -1554,7 +1551,7 @@ static struct resource s3c64xx_spi1_resource[] = {
};
struct platform_device s3c64xx_device_spi1 = {
- .name = "s3c64xx-spi",
+ .name = "s3c6410-spi",
.id = 1,
.num_resources = ARRAY_SIZE(s3c64xx_spi1_resource),
.resource = s3c64xx_spi1_resource,
@@ -1564,26 +1561,20 @@ struct platform_device s3c64xx_device_spi1 = {
},
};
-void __init s3c64xx_spi1_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs)
+void __init s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs)
{
- if (!pd) {
- pr_err("%s:Need to pass platform data\n", __func__);
- return;
- }
-
/* Reject invalid configuration */
if (!num_cs || src_clk_nr < 0) {
pr_err("%s: Invalid SPI configuration\n", __func__);
return;
}
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- if (!pd->cfg_gpio)
- pd->cfg_gpio = s3c64xx_spi1_cfg_gpio;
+ pd.num_cs = num_cs;
+ pd.src_clk_nr = src_clk_nr;
+ pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi1_cfg_gpio;
- s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi1);
+ s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi1);
}
#endif /* CONFIG_S3C64XX_DEV_SPI1 */
@@ -1596,7 +1587,7 @@ static struct resource s3c64xx_spi2_resource[] = {
};
struct platform_device s3c64xx_device_spi2 = {
- .name = "s3c64xx-spi",
+ .name = "s3c6410-spi",
.id = 2,
.num_resources = ARRAY_SIZE(s3c64xx_spi2_resource),
.resource = s3c64xx_spi2_resource,
@@ -1606,13 +1597,10 @@ struct platform_device s3c64xx_device_spi2 = {
},
};
-void __init s3c64xx_spi2_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs)
+void __init s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs)
{
- if (!pd) {
- pr_err("%s:Need to pass platform data\n", __func__);
- return;
- }
+ struct s3c64xx_spi_info pd;
/* Reject invalid configuration */
if (!num_cs || src_clk_nr < 0) {
@@ -1620,11 +1608,10 @@ void __init s3c64xx_spi2_set_platdata(struct s3c64xx_spi_info *pd,
return;
}
- pd->num_cs = num_cs;
- pd->src_clk_nr = src_clk_nr;
- if (!pd->cfg_gpio)
- pd->cfg_gpio = s3c64xx_spi2_cfg_gpio;
+ pd.num_cs = num_cs;
+ pd.src_clk_nr = src_clk_nr;
+ pd.cfg_gpio = (cfg_gpio) ? cfg_gpio : s3c64xx_spi2_cfg_gpio;
- s3c_set_platdata(pd, sizeof(*pd), &s3c64xx_device_spi2);
+ s3c_set_platdata(&pd, sizeof(pd), &s3c64xx_device_spi2);
}
#endif /* CONFIG_S3C64XX_DEV_SPI2 */
diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c
index eb9f4f534006..c38d75489240 100644
--- a/arch/arm/plat-samsung/dma-ops.c
+++ b/arch/arm/plat-samsung/dma-ops.c
@@ -19,72 +19,79 @@
#include <mach/dma.h>
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
- struct samsung_dma_info *info)
+ struct samsung_dma_req *param)
{
- struct dma_chan *chan;
dma_cap_mask_t mask;
- struct dma_slave_config slave_config;
void *filter_param;
dma_cap_zero(mask);
- dma_cap_set(info->cap, mask);
+ dma_cap_set(param->cap, mask);
/*
* If a dma channel property of a device node from device tree is
* specified, use that as the fliter parameter.
*/
- filter_param = (dma_ch == DMACH_DT_PROP) ? (void *)info->dt_dmach_prop :
- (void *)dma_ch;
- chan = dma_request_channel(mask, pl330_filter, filter_param);
+ filter_param = (dma_ch == DMACH_DT_PROP) ?
+ (void *)param->dt_dmach_prop : (void *)dma_ch;
+ return (unsigned)dma_request_channel(mask, pl330_filter, filter_param);
+}
+
+static int samsung_dmadev_release(unsigned ch, void *param)
+{
+ dma_release_channel((struct dma_chan *)ch);
- if (info->direction == DMA_DEV_TO_MEM) {
+ return 0;
+}
+
+static int samsung_dmadev_config(unsigned ch,
+ struct samsung_dma_config *param)
+{
+ struct dma_chan *chan = (struct dma_chan *)ch;
+ struct dma_slave_config slave_config;
+
+ if (param->direction == DMA_DEV_TO_MEM) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
- slave_config.direction = info->direction;
- slave_config.src_addr = info->fifo;
- slave_config.src_addr_width = info->width;
+ slave_config.direction = param->direction;
+ slave_config.src_addr = param->fifo;
+ slave_config.src_addr_width = param->width;
slave_config.src_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
- } else if (info->direction == DMA_MEM_TO_DEV) {
+ } else if (param->direction == DMA_MEM_TO_DEV) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
- slave_config.direction = info->direction;
- slave_config.dst_addr = info->fifo;
- slave_config.dst_addr_width = info->width;
+ slave_config.direction = param->direction;
+ slave_config.dst_addr = param->fifo;
+ slave_config.dst_addr_width = param->width;
slave_config.dst_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
+ } else {
+ pr_warn("unsupported direction\n");
+ return -EINVAL;
}
- return (unsigned)chan;
-}
-
-static int samsung_dmadev_release(unsigned ch,
- struct s3c2410_dma_client *client)
-{
- dma_release_channel((struct dma_chan *)ch);
-
return 0;
}
static int samsung_dmadev_prepare(unsigned ch,
- struct samsung_dma_prep_info *info)
+ struct samsung_dma_prep *param)
{
struct scatterlist sg;
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_async_tx_descriptor *desc;
- switch (info->cap) {
+ switch (param->cap) {
case DMA_SLAVE:
sg_init_table(&sg, 1);
- sg_dma_len(&sg) = info->len;
- sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
- info->len, offset_in_page(info->buf));
- sg_dma_address(&sg) = info->buf;
+ sg_dma_len(&sg) = param->len;
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(param->buf)),
+ param->len, offset_in_page(param->buf));
+ sg_dma_address(&sg) = param->buf;
desc = dmaengine_prep_slave_sg(chan,
- &sg, 1, info->direction, DMA_PREP_INTERRUPT);
+ &sg, 1, param->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
- desc = dmaengine_prep_dma_cyclic(chan,
- info->buf, info->len, info->period, info->direction);
+ desc = dmaengine_prep_dma_cyclic(chan, param->buf,
+ param->len, param->period, param->direction);
break;
default:
dev_err(&chan->dev->device, "unsupported format\n");
@@ -96,8 +103,8 @@ static int samsung_dmadev_prepare(unsigned ch,
return -EFAULT;
}
- desc->callback = info->fp;
- desc->callback_param = info->fp_param;
+ desc->callback = param->fp;
+ desc->callback_param = param->fp_param;
dmaengine_submit((struct dma_async_tx_descriptor *)desc);
@@ -119,6 +126,7 @@ static inline int samsung_dmadev_flush(unsigned ch)
static struct samsung_dma_ops dmadev_ops = {
.request = samsung_dmadev_request,
.release = samsung_dmadev_release,
+ .config = samsung_dmadev_config,
.prepare = samsung_dmadev_prepare,
.trigger = samsung_dmadev_trigger,
.started = NULL,
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 0721293fad63..ace4451b7651 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -132,6 +132,10 @@ IS_SAMSUNG_CPU(exynos5250, EXYNOS5250_SOC_ID, EXYNOS5_SOC_MASK)
#define IODESC_ENT(x) { (unsigned long)S3C24XX_VA_##x, __phys_to_pfn(S3C24XX_PA_##x), S3C24XX_SZ_##x, MT_DEVICE }
+#ifndef KHZ
+#define KHZ (1000)
+#endif
+
#ifndef MHZ
#define MHZ (1000*1000)
#endif
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 61ca2f356c52..5da4b4f38f40 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -131,7 +131,6 @@ extern struct platform_device exynos4_device_ohci;
extern struct platform_device exynos4_device_pcm0;
extern struct platform_device exynos4_device_pcm1;
extern struct platform_device exynos4_device_pcm2;
-extern struct platform_device exynos4_device_pd[];
extern struct platform_device exynos4_device_spdif;
extern struct platform_device exynos_device_drm;
diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
index 71a6827c7706..f5144cdd3001 100644
--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
@@ -16,7 +16,13 @@
#include <linux/dmaengine.h>
#include <mach/dma.h>
-struct samsung_dma_prep_info {
+struct samsung_dma_req {
+ enum dma_transaction_type cap;
+ struct property *dt_dmach_prop;
+ struct s3c2410_dma_client *client;
+};
+
+struct samsung_dma_prep {
enum dma_transaction_type cap;
enum dma_transfer_direction direction;
dma_addr_t buf;
@@ -26,19 +32,17 @@ struct samsung_dma_prep_info {
void *fp_param;
};
-struct samsung_dma_info {
- enum dma_transaction_type cap;
+struct samsung_dma_config {
enum dma_transfer_direction direction;
enum dma_slave_buswidth width;
dma_addr_t fifo;
- struct s3c2410_dma_client *client;
- struct property *dt_dmach_prop;
};
struct samsung_dma_ops {
- unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info);
- int (*release)(unsigned ch, struct s3c2410_dma_client *client);
- int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info);
+ unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param);
+ int (*release)(unsigned ch, void *param);
+ int (*config)(unsigned ch, struct samsung_dma_config *param);
+ int (*prepare)(unsigned ch, struct samsung_dma_prep *param);
int (*trigger)(unsigned ch);
int (*started)(unsigned ch);
int (*flush)(unsigned ch);
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index 536002ff2ab8..b885322717a1 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -43,7 +43,6 @@ struct s3c_fb_pd_win {
* @setup_gpio: Setup the external GPIO pins to the right state to transfer
* the data from the display system to the connected display
* device.
- * @default_win: default window layer number to be used for UI layer.
* @vidcon0: The base vidcon0 values to control the panel data format.
* @vidcon1: The base vidcon1 values to control the panel data output.
* @vtiming: Video timing when connected to a RGB type panel.
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg.h b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
index df8155b9d4d1..08740eed050c 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg.h
@@ -24,7 +24,7 @@
#ifndef __PLAT_GPIO_CFG_H
#define __PLAT_GPIO_CFG_H __FILE__
-#include<linux/types.h>
+#include <linux/types.h>
typedef unsigned int __bitwise__ samsung_gpio_pull_t;
typedef unsigned int __bitwise__ s5p_gpio_drvstr_t;
diff --git a/arch/arm/plat-samsung/include/plat/map-s3c.h b/arch/arm/plat-samsung/include/plat/map-s3c.h
index 7d048759b772..c0c70a895ca8 100644
--- a/arch/arm/plat-samsung/include/plat/map-s3c.h
+++ b/arch/arm/plat-samsung/include/plat/map-s3c.h
@@ -22,7 +22,7 @@
#define S3C24XX_VA_WATCHDOG S3C_VA_WATCHDOG
#define S3C2412_VA_SSMC S3C_ADDR_CPU(0x00000000)
-#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00010000)
+#define S3C2412_VA_EBI S3C_ADDR_CPU(0x00100000)
#define S3C2410_PA_UART (0x50000000)
#define S3C24XX_PA_UART S3C2410_PA_UART
diff --git a/arch/arm/plat-samsung/include/plat/pd.h b/arch/arm/plat-samsung/include/plat/pd.h
deleted file mode 100644
index abb4bc32716a..000000000000
--- a/arch/arm/plat-samsung/include/plat/pd.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* linux/arch/arm/plat-samsung/include/plat/pd.h
- *
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_PLAT_SAMSUNG_PD_H
-#define __ASM_PLAT_SAMSUNG_PD_H __FILE__
-
-struct samsung_pd_info {
- int (*enable)(struct device *dev);
- int (*disable)(struct device *dev);
- void __iomem *base;
-};
-
-enum exynos4_pd_block {
- PD_MFC,
- PD_G3D,
- PD_LCD0,
- PD_LCD1,
- PD_TV,
- PD_CAM,
- PD_GPS
-};
-
-#endif /* __ASM_PLAT_SAMSUNG_PD_H */
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index fa95e9a00972..ceba18d23a5a 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -18,7 +18,6 @@ struct platform_device;
* @fb_delay: Slave specific feedback delay.
* Refer to FB_CLK_SEL register definition in SPI chapter.
* @line: Custom 'identity' of the CS line.
- * @set_level: CS line control.
*
* This is per SPI-Slave Chipselect information.
* Allocate and initialize one in machine init code and make the
@@ -27,57 +26,41 @@ struct platform_device;
struct s3c64xx_spi_csinfo {
u8 fb_delay;
unsigned line;
- void (*set_level)(unsigned line_id, int lvl);
};
/**
* struct s3c64xx_spi_info - SPI Controller defining structure
* @src_clk_nr: Clock source index for the CLK_CFG[SPI_CLKSEL] field.
- * @clk_from_cmu: If the SPI clock/prescalar control block is present
- * by the platform's clock-management-unit and not in SPI controller.
* @num_cs: Number of CS this controller emulates.
* @cfg_gpio: Configure pins for this SPI controller.
- * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
- * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number
- * @high_speed: If the controller supports HIGH_SPEED_EN bit
- * @tx_st_done: Depends on tx fifo_lvl field
*/
struct s3c64xx_spi_info {
int src_clk_nr;
- bool clk_from_cmu;
-
int num_cs;
-
- int (*cfg_gpio)(struct platform_device *pdev);
-
- /* Following two fields are for future compatibility */
- int fifo_lvl_mask;
- int rx_lvl_offset;
- int high_speed;
- int tx_st_done;
+ int (*cfg_gpio)(void);
};
/**
* s3c64xx_spi_set_platdata - SPI Controller configure callback by the board
* initialization code.
- * @pd: SPI platform data to set.
+ * @cfg_gpio: Pointer to gpio setup function.
* @src_clk_nr: Clock the SPI controller is to use to generate SPI clocks.
* @num_cs: Number of elements in the 'cs' array.
*
* Call this from machine init code for each SPI Controller that
* has some chips attached to it.
*/
-extern void s3c64xx_spi0_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs);
-extern void s3c64xx_spi1_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs);
-extern void s3c64xx_spi2_set_platdata(struct s3c64xx_spi_info *pd,
- int src_clk_nr, int num_cs);
+extern void s3c64xx_spi0_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
+extern void s3c64xx_spi1_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
+extern void s3c64xx_spi2_set_platdata(int (*cfg_gpio)(void), int src_clk_nr,
+ int num_cs);
/* defined by architecture to configure gpio */
-extern int s3c64xx_spi0_cfg_gpio(struct platform_device *dev);
-extern int s3c64xx_spi1_cfg_gpio(struct platform_device *dev);
-extern int s3c64xx_spi2_cfg_gpio(struct platform_device *dev);
+extern int s3c64xx_spi0_cfg_gpio(void);
+extern int s3c64xx_spi1_cfg_gpio(void);
+extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
diff --git a/arch/arm/plat-samsung/include/plat/watchdog-reset.h b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
index f19aff19205c..bc4db9b04e36 100644
--- a/arch/arm/plat-samsung/include/plat/watchdog-reset.h
+++ b/arch/arm/plat-samsung/include/plat/watchdog-reset.h
@@ -25,7 +25,7 @@ static inline void arch_wdt_reset(void)
__raw_writel(0, S3C2410_WTCON); /* disable watchdog, to be safe */
- if (s3c2410_wdtclk)
+ if (!IS_ERR(s3c2410_wdtclk))
clk_enable(s3c2410_wdtclk);
/* put initial values into count and data */
diff --git a/arch/arm/plat-samsung/pd.c b/arch/arm/plat-samsung/pd.c
deleted file mode 100644
index 312b510d86b7..000000000000
--- a/arch/arm/plat-samsung/pd.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/* linux/arch/arm/plat-samsung/pd.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * Samsung Power domain support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/pm_runtime.h>
-
-#include <plat/pd.h>
-
-static int samsung_pd_probe(struct platform_device *pdev)
-{
- struct samsung_pd_info *pdata = pdev->dev.platform_data;
- struct device *dev = &pdev->dev;
-
- if (!pdata) {
- dev_err(dev, "no device data specified\n");
- return -ENOENT;
- }
-
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
-
- dev_info(dev, "power domain registered\n");
- return 0;
-}
-
-static int __devexit samsung_pd_remove(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
-
- pm_runtime_disable(dev);
- return 0;
-}
-
-static int samsung_pd_runtime_suspend(struct device *dev)
-{
- struct samsung_pd_info *pdata = dev->platform_data;
- int ret = 0;
-
- if (pdata->disable)
- ret = pdata->disable(dev);
-
- dev_dbg(dev, "suspended\n");
- return ret;
-}
-
-static int samsung_pd_runtime_resume(struct device *dev)
-{
- struct samsung_pd_info *pdata = dev->platform_data;
- int ret = 0;
-
- if (pdata->enable)
- ret = pdata->enable(dev);
-
- dev_dbg(dev, "resumed\n");
- return ret;
-}
-
-static const struct dev_pm_ops samsung_pd_pm_ops = {
- .runtime_suspend = samsung_pd_runtime_suspend,
- .runtime_resume = samsung_pd_runtime_resume,
-};
-
-static struct platform_driver samsung_pd_driver = {
- .driver = {
- .name = "samsung-pd",
- .owner = THIS_MODULE,
- .pm = &samsung_pd_pm_ops,
- },
- .probe = samsung_pd_probe,
- .remove = __devexit_p(samsung_pd_remove),
-};
-
-static int __init samsung_pd_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&samsung_pd_driver);
- if (ret)
- printk(KERN_ERR "%s: failed to add PD driver\n", __func__);
-
- return ret;
-}
-arch_initcall(samsung_pd_init);
diff --git a/arch/arm/plat-samsung/pwm.c b/arch/arm/plat-samsung/pwm.c
index c559d8438c70..d3583050fb05 100644
--- a/arch/arm/plat-samsung/pwm.c
+++ b/arch/arm/plat-samsung/pwm.c
@@ -36,7 +36,6 @@ struct pwm_device {
unsigned int duty_ns;
unsigned char tcon_base;
- unsigned char running;
unsigned char use_count;
unsigned char pwm_id;
};
@@ -116,7 +115,6 @@ int pwm_enable(struct pwm_device *pwm)
local_irq_restore(flags);
- pwm->running = 1;
return 0;
}
@@ -134,8 +132,6 @@ void pwm_disable(struct pwm_device *pwm)
__raw_writel(tcon, S3C2410_TCON);
local_irq_restore(flags);
-
- pwm->running = 0;
}
EXPORT_SYMBOL(pwm_disable);
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index 781494912827..f99448c48d30 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -36,30 +36,26 @@ static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,
}
static unsigned s3c_dma_request(enum dma_ch dma_ch,
- struct samsung_dma_info *info)
+ struct samsung_dma_req *param)
{
struct cb_data *data;
- if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) {
- s3c2410_dma_free(dma_ch, info->client);
+ if (s3c2410_dma_request(dma_ch, param->client, NULL) < 0) {
+ s3c2410_dma_free(dma_ch, param->client);
return 0;
}
+ if (param->cap == DMA_CYCLIC)
+ s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
+
data = kzalloc(sizeof(struct cb_data), GFP_KERNEL);
data->ch = dma_ch;
list_add_tail(&data->node, &dma_list);
- s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo);
-
- if (info->cap == DMA_CYCLIC)
- s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR);
-
- s3c2410_dma_config(dma_ch, info->width);
-
return (unsigned)dma_ch;
}
-static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
+static int s3c_dma_release(unsigned ch, void *param)
{
struct cb_data *data;
@@ -68,16 +64,24 @@ static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client)
break;
list_del(&data->node);
- s3c2410_dma_free(ch, client);
+ s3c2410_dma_free(ch, param);
kfree(data);
return 0;
}
-static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
+static int s3c_dma_config(unsigned ch, struct samsung_dma_config *param)
+{
+ s3c2410_dma_devconfig(ch, param->direction, param->fifo);
+ s3c2410_dma_config(ch, param->width);
+
+ return 0;
+}
+
+static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
{
struct cb_data *data;
- int len = (info->cap == DMA_CYCLIC) ? info->period : info->len;
+ int len = (param->cap == DMA_CYCLIC) ? param->period : param->len;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
@@ -85,11 +89,11 @@ static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info)
if (!data->fp) {
s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb);
- data->fp = info->fp;
- data->fp_param = info->fp_param;
+ data->fp = param->fp;
+ data->fp_param = param->fp_param;
}
- s3c2410_dma_enqueue(ch, (void *)data, info->buf, len);
+ s3c2410_dma_enqueue(ch, (void *)data, param->buf, len);
return 0;
}
@@ -117,6 +121,7 @@ static inline int s3c_dma_stop(unsigned ch)
static struct samsung_dma_ops s3c_dma_ops = {
.request = s3c_dma_request,
.release = s3c_dma_release,
+ .config = s3c_dma_config,
.prepare = s3c_dma_prepare,
.trigger = s3c_dma_trigger,
.started = s3c_dma_started,
diff --git a/arch/arm/plat-samsung/s5p-clock.c b/arch/arm/plat-samsung/s5p-clock.c
index 031a61899bef..48a159911037 100644
--- a/arch/arm/plat-samsung/s5p-clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -37,6 +37,7 @@ struct clk clk_ext_xtal_mux = {
struct clk clk_xusbxti = {
.name = "xusbxti",
.id = -1,
+ .rate = 24000000,
};
struct clk s5p_clk_27m = {
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig
index 81ee7cc34457..8d5c10a5084d 100644
--- a/arch/arm/plat-versatile/Kconfig
+++ b/arch/arm/plat-versatile/Kconfig
@@ -1,5 +1,8 @@
if PLAT_VERSATILE
+config PLAT_VERSATILE_CLOCK
+ bool
+
config PLAT_VERSATILE_CLCD
bool
diff --git a/arch/arm/plat-versatile/Makefile b/arch/arm/plat-versatile/Makefile
index a5cb1945bdcc..272769a8a7d6 100644
--- a/arch/arm/plat-versatile/Makefile
+++ b/arch/arm/plat-versatile/Makefile
@@ -1,4 +1,4 @@
-obj-y := clock.o
+obj-$(CONFIG_PLAT_VERSATILE_CLOCK) += clock.o
obj-$(CONFIG_PLAT_VERSATILE_CLCD) += clcd.o
obj-$(CONFIG_PLAT_VERSATILE_FPGA_IRQ) += fpga-irq.o
obj-$(CONFIG_PLAT_VERSATILE_LEDS) += leds.o
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index fef96f47876c..9b765107e15c 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -352,6 +352,11 @@ config MEM_MT48H32M16LFCJ_75
depends on (BFIN526_EZBRD)
default y
+config MEM_MT47H64M16
+ bool
+ depends on (BFIN609_EZKIT)
+ default y
+
source "arch/blackfin/mach-bf518/Kconfig"
source "arch/blackfin/mach-bf527/Kconfig"
source "arch/blackfin/mach-bf533/Kconfig"
@@ -399,8 +404,9 @@ config ROM_BASE
hex "Kernel ROM Base"
depends on ROMKERNEL
default "0x20040040"
- range 0x20000000 0x20400000 if !(BF54x || BF561)
+ range 0x20000000 0x20400000 if !(BF54x || BF561 || BF60x)
range 0x20000000 0x30000000 if (BF54x || BF561)
+ range 0xB0000000 0xC0000000 if (BF60x)
help
Make sure your ROM base does not include any file-header
information that is prepended to the kernel.
@@ -1009,6 +1015,12 @@ config HAVE_PWM
choice
prompt "Uncached DMA region"
default DMA_UNCACHED_1M
+config DMA_UNCACHED_32M
+ bool "Enable 32M DMA region"
+config DMA_UNCACHED_16M
+ bool "Enable 16M DMA region"
+config DMA_UNCACHED_8M
+ bool "Enable 8M DMA region"
config DMA_UNCACHED_4M
bool "Enable 4M DMA region"
config DMA_UNCACHED_2M
@@ -1038,7 +1050,7 @@ config BFIN_EXTMEM_ICACHEABLE
config BFIN_L2_ICACHEABLE
bool "Enable ICACHE for L2 SRAM"
depends on BFIN_ICACHE
- depends on BF54x || BF561
+ depends on (BF54x || BF561 || BF60x) && !SMP
default n
config BFIN_DCACHE
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig
index be9526bee4fb..f4b02350e415 100644
--- a/arch/blackfin/configs/BF609-EZKIT_defconfig
+++ b/arch/blackfin/configs/BF609-EZKIT_defconfig
@@ -90,6 +90,7 @@ CONFIG_INPUT_BFIN_ROTARY=y
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
CONFIG_BFIN_SIMPLE_TIMER=m
+# CONFIG_BFIN_CRC is not set
CONFIG_BFIN_LINKPORT=y
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_BFIN=y
@@ -153,3 +154,4 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_ARC4=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DEV_BFIN_CRC=y
diff --git a/arch/blackfin/include/asm/bfin-global.h b/arch/blackfin/include/asm/bfin-global.h
index 608be5e6d25c..dc47d79287f9 100644
--- a/arch/blackfin/include/asm/bfin-global.h
+++ b/arch/blackfin/include/asm/bfin-global.h
@@ -14,7 +14,13 @@
#include <linux/linkage.h>
#include <linux/types.h>
-#if defined(CONFIG_DMA_UNCACHED_4M)
+#if defined(CONFIG_DMA_UNCACHED_32M)
+# define DMA_UNCACHED_REGION (32 * 1024 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_16M)
+# define DMA_UNCACHED_REGION (16 * 1024 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_8M)
+# define DMA_UNCACHED_REGION (8 * 1024 * 1024)
+#elif defined(CONFIG_DMA_UNCACHED_4M)
# define DMA_UNCACHED_REGION (4 * 1024 * 1024)
#elif defined(CONFIG_DMA_UNCACHED_2M)
# define DMA_UNCACHED_REGION (2 * 1024 * 1024)
diff --git a/arch/blackfin/include/asm/bfin_crc.h b/arch/blackfin/include/asm/bfin_crc.h
index 3deb4452ceed..75cef4dc85a1 100644
--- a/arch/blackfin/include/asm/bfin_crc.h
+++ b/arch/blackfin/include/asm/bfin_crc.h
@@ -79,20 +79,6 @@ struct crc_register {
u32 revid;
};
-struct bfin_crc {
- struct miscdevice mdev;
- struct list_head list;
- int irq;
- int dma_ch_src;
- int dma_ch_dest;
- volatile struct crc_register *regs;
- struct crc_info *info;
- struct mutex mutex;
- struct completion c;
- unsigned short opmode;
- char name[20];
-};
-
/* CRC_STATUS Masks */
#define CMPERR 0x00000002 /* Compare error */
#define DCNTEXP 0x00000010 /* datacnt register expired */
diff --git a/arch/blackfin/include/asm/bfin_serial.h b/arch/blackfin/include/asm/bfin_serial.h
index 8597158010b5..2d90d62edc97 100644
--- a/arch/blackfin/include/asm/bfin_serial.h
+++ b/arch/blackfin/include/asm/bfin_serial.h
@@ -282,7 +282,7 @@ struct bfin_uart_regs {
#define UART_GET_GCTL(p) UART_GET_CTL(p)
#define UART_GET_LCR(p) UART_GET_CTL(p)
#define UART_GET_MCR(p) UART_GET_CTL(p)
-#if ANOMALY_05001001
+#if ANOMALY_16000030
#define UART_GET_STAT(p) \
({ \
u32 __ret; \
diff --git a/arch/blackfin/include/asm/bfin_simple_timer.h b/arch/blackfin/include/asm/bfin_simple_timer.h
index aadfb1ad1fac..b2d5e733079e 100644
--- a/arch/blackfin/include/asm/bfin_simple_timer.h
+++ b/arch/blackfin/include/asm/bfin_simple_timer.h
@@ -17,5 +17,11 @@
#define BFIN_SIMPLE_TIMER_START _IO(BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 6)
#define BFIN_SIMPLE_TIMER_STOP _IO(BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 8)
#define BFIN_SIMPLE_TIMER_READ _IO(BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 10)
+#define BFIN_SIMPLE_TIMER_READ_COUNTER _IO(BFIN_SIMPLE_TIMER_IOCTL_MAGIC, 11)
+
+#define BFIN_SIMPLE_TIMER_MODE_PWM_ONESHOT 0
+#define BFIN_SIMPLE_TIMER_MODE_PWMOUT_CONT 1
+#define BFIN_SIMPLE_TIMER_MODE_WDTH_CAP 2
+#define BFIN_SIMPLE_TIMER_MODE_PWMOUT_CONT_NOIRQ 3
#endif
diff --git a/arch/blackfin/include/asm/bfin_twi.h b/arch/blackfin/include/asm/bfin_twi.h
index 2f3339a47626..f4a072787436 100644
--- a/arch/blackfin/include/asm/bfin_twi.h
+++ b/arch/blackfin/include/asm/bfin_twi.h
@@ -66,9 +66,9 @@ struct bfin_twi_iface {
#define DEFINE_TWI_REG(reg_name, reg) \
static inline u16 read_##reg_name(struct bfin_twi_iface *iface) \
- { return iface->regs_base->reg; } \
+ { return bfin_read16(&iface->regs_base->reg); } \
static inline void write_##reg_name(struct bfin_twi_iface *iface, u16 v) \
- { iface->regs_base->reg = v; }
+ { bfin_write16(&iface->regs_base->reg, v); }
DEFINE_TWI_REG(CLKDIV, clkdiv)
DEFINE_TWI_REG(CONTROL, control)
@@ -84,7 +84,7 @@ DEFINE_TWI_REG(FIFO_CTL, fifo_ctl)
DEFINE_TWI_REG(FIFO_STAT, fifo_stat)
DEFINE_TWI_REG(XMT_DATA8, xmt_data8)
DEFINE_TWI_REG(XMT_DATA16, xmt_data16)
-#if !ANOMALY_05001001
+#if !ANOMALY_16000030
DEFINE_TWI_REG(RCV_DATA8, rcv_data8)
DEFINE_TWI_REG(RCV_DATA16, rcv_data16)
#else
@@ -94,7 +94,7 @@ static inline u16 read_RCV_DATA8(struct bfin_twi_iface *iface)
unsigned long flags;
flags = hard_local_irq_save();
- ret = iface->regs_base->rcv_data8;
+ ret = bfin_read16(&iface->regs_base->rcv_data8);
hard_local_irq_restore(flags);
return ret;
@@ -106,7 +106,7 @@ static inline u16 read_RCV_DATA16(struct bfin_twi_iface *iface)
unsigned long flags;
flags = hard_local_irq_save();
- ret = iface->regs_base->rcv_data16;
+ ret = bfin_read16(&iface->regs_base->rcv_data16);
hard_local_irq_restore(flags);
return ret;
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S
index 1f9060395a0a..507e7aa6a561 100644
--- a/arch/blackfin/include/asm/context.S
+++ b/arch/blackfin/include/asm/context.S
@@ -396,3 +396,12 @@
call \func;
#endif
.endm
+
+#if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
+# define EX_SCRATCH_REG RETN
+#elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
+# define EX_SCRATCH_REG RETE
+#else
+# define EX_SCRATCH_REG CYCLES
+#endif
+
diff --git a/arch/blackfin/include/asm/dpmc.h b/arch/blackfin/include/asm/dpmc.h
index e91eae8330a6..2673b11376f4 100644
--- a/arch/blackfin/include/asm/dpmc.h
+++ b/arch/blackfin/include/asm/dpmc.h
@@ -280,7 +280,7 @@
PM_POP_SYNC(9)
#endif
-#ifdef EBIU_AMBCTL
+#ifdef EBIU_AMGCTL
PM_SYS_POP(9, EBIU_AMBCTL1)
PM_SYS_POP(8, EBIU_AMBCTL0)
PM_SYS_POP16(7, EBIU_AMGCTL)
diff --git a/arch/blackfin/include/asm/gpio.h b/arch/blackfin/include/asm/gpio.h
index 3d84d96f7c2c..98d0133346b5 100644
--- a/arch/blackfin/include/asm/gpio.h
+++ b/arch/blackfin/include/asm/gpio.h
@@ -141,6 +141,8 @@ static inline void bfin_pm_standby_restore(void)
void bfin_gpio_pm_hibernate_restore(void);
void bfin_gpio_pm_hibernate_suspend(void);
+void bfin_pint_suspend(void);
+void bfin_pint_resume(void);
# if !BFIN_GPIO_PINT
int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl);
diff --git a/arch/blackfin/include/asm/irq.h b/arch/blackfin/include/asm/irq.h
index 89de539ed010..4ae1144a4578 100644
--- a/arch/blackfin/include/asm/irq.h
+++ b/arch/blackfin/include/asm/irq.h
@@ -20,6 +20,16 @@
/* SYS_IRQS and NR_IRQS are defined in <mach-bf5xx/irq.h> */
#include <mach/irq.h>
+/*
+ * pm save bfin pint registers
+ */
+struct bfin_pm_pint_save {
+ u32 mask_set;
+ u32 assign;
+ u32 edge_set;
+ u32 invert_set;
+};
+
#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
#else
diff --git a/arch/blackfin/include/asm/mem_init.h b/arch/blackfin/include/asm/mem_init.h
index 237579935e29..f019e9bcefe9 100644
--- a/arch/blackfin/include/asm/mem_init.h
+++ b/arch/blackfin/include/asm/mem_init.h
@@ -6,6 +6,9 @@
* Licensed under the GPL-2 or later.
*/
+#ifndef __MEM_INIT_H__
+#define __MEM_INIT_H__
+
#if defined(EBIU_SDGCTL)
#if defined(CONFIG_MEM_MT48LC16M16A2TG_75) || \
defined(CONFIG_MEM_MT48LC64M4A2FB_7E) || \
@@ -277,3 +280,212 @@
#else
#define PLL_BYPASS 0
#endif
+
+#ifdef CONFIG_BF60x
+
+/* DMC status bits */
+#define IDLE 0x1
+#define MEMINITDONE 0x4
+#define SRACK 0x8
+#define PDACK 0x10
+#define DPDACK 0x20
+#define DLLCALDONE 0x2000
+#define PENDREF 0xF0000
+#define PHYRDPHASE 0xF00000
+#define PHYRDPHASE_OFFSET 20
+
+/* DMC control bits */
+#define LPDDR 0x2
+#define INIT 0x4
+#define SRREQ 0x8
+#define PDREQ 0x10
+#define DPDREQ 0x20
+#define PREC 0x40
+#define ADDRMODE 0x100
+#define RDTOWR 0xE00
+#define PPREF 0x1000
+#define DLLCAL 0x2000
+
+/* DMC DLL control bits */
+#define DLLCALRDCNT 0xFF
+#define DATACYC 0xF00
+#define DATACYC_OFFSET 8
+
+/* CGU Divisor bits */
+#define CSEL_OFFSET 0
+#define S0SEL_OFFSET 5
+#define SYSSEL_OFFSET 8
+#define S1SEL_OFFSET 13
+#define DSEL_OFFSET 16
+#define OSEL_OFFSET 22
+#define ALGN 0x20000000
+#define UPDT 0x40000000
+#define LOCK 0x80000000
+
+/* CGU Status bits */
+#define PLLEN 0x1
+#define PLLBP 0x2
+#define PLOCK 0x4
+#define CLKSALGN 0x8
+
+/* CGU Control bits */
+#define MSEL_MASK 0x7F00
+#define DF_MASK 0x1
+
+struct ddr_config {
+ u32 ddr_clk;
+ u32 dmc_ddrctl;
+ u32 dmc_ddrcfg;
+ u32 dmc_ddrtr0;
+ u32 dmc_ddrtr1;
+ u32 dmc_ddrtr2;
+ u32 dmc_ddrmr;
+ u32 dmc_ddrmr1;
+};
+
+#if defined(CONFIG_MEM_MT47H64M16)
+static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) = {
+ [0] = {
+ .ddr_clk = 125,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20705212,
+ .dmc_ddrtr1 = 0x201003CF,
+ .dmc_ddrtr2 = 0x00320107,
+ .dmc_ddrmr = 0x00000422,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [1] = {
+ .ddr_clk = 133,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20806313,
+ .dmc_ddrtr1 = 0x2013040D,
+ .dmc_ddrtr2 = 0x00320108,
+ .dmc_ddrmr = 0x00000632,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [2] = {
+ .ddr_clk = 150,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20A07323,
+ .dmc_ddrtr1 = 0x20160492,
+ .dmc_ddrtr2 = 0x00320209,
+ .dmc_ddrmr = 0x00000632,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [3] = {
+ .ddr_clk = 166,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20A07323,
+ .dmc_ddrtr1 = 0x2016050E,
+ .dmc_ddrtr2 = 0x00320209,
+ .dmc_ddrmr = 0x00000632,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [4] = {
+ .ddr_clk = 200,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20a07323,
+ .dmc_ddrtr1 = 0x2016050f,
+ .dmc_ddrtr2 = 0x00320509,
+ .dmc_ddrmr = 0x00000632,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [5] = {
+ .ddr_clk = 225,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20E0A424,
+ .dmc_ddrtr1 = 0x302006DB,
+ .dmc_ddrtr2 = 0x0032020D,
+ .dmc_ddrmr = 0x00000842,
+ .dmc_ddrmr1 = 0x4,
+ },
+ [6] = {
+ .ddr_clk = 250,
+ .dmc_ddrctl = 0x00000904,
+ .dmc_ddrcfg = 0x00000422,
+ .dmc_ddrtr0 = 0x20E0A424,
+ .dmc_ddrtr1 = 0x3020079E,
+ .dmc_ddrtr2 = 0x0032020D,
+ .dmc_ddrmr = 0x00000842,
+ .dmc_ddrmr1 = 0x4,
+ },
+};
+#endif
+
+static inline void dmc_enter_self_refresh(void)
+{
+ if (bfin_read_DMC0_STAT() & MEMINITDONE) {
+ bfin_write_DMC0_CTL(bfin_read_DMC0_CTL() | SRREQ);
+ while (!(bfin_read_DMC0_STAT() & SRACK))
+ continue;
+ }
+}
+
+static inline void dmc_exit_self_refresh(void)
+{
+ if (bfin_read_DMC0_STAT() & MEMINITDONE) {
+ bfin_write_DMC0_CTL(bfin_read_DMC0_CTL() & ~SRREQ);
+ while (bfin_read_DMC0_STAT() & SRACK)
+ continue;
+ }
+}
+
+static inline void init_cgu(u32 cgu_div, u32 cgu_ctl)
+{
+ dmc_enter_self_refresh();
+
+ /* Don't set the same value of MSEL and DF to CGU_CTL */
+ if ((bfin_read32(CGU0_CTL) & (MSEL_MASK | DF_MASK))
+ != cgu_ctl) {
+ bfin_write32(CGU0_DIV, cgu_div);
+ bfin_write32(CGU0_CTL, cgu_ctl);
+ while ((bfin_read32(CGU0_STAT) & (CLKSALGN | PLLBP)) ||
+ !(bfin_read32(CGU0_STAT) & PLOCK))
+ continue;
+ }
+
+ bfin_write32(CGU0_DIV, cgu_div | UPDT);
+ while (bfin_read32(CGU0_STAT) & CLKSALGN)
+ continue;
+
+ dmc_exit_self_refresh();
+}
+
+static inline void init_dmc(u32 dmc_clk)
+{
+ int i, dlldatacycle, dll_ctl;
+
+ for (i = 0; i < 7; i++) {
+ if (ddr_config_table[i].ddr_clk == dmc_clk) {
+ bfin_write_DMC0_CFG(ddr_config_table[i].dmc_ddrcfg);
+ bfin_write_DMC0_TR0(ddr_config_table[i].dmc_ddrtr0);
+ bfin_write_DMC0_TR1(ddr_config_table[i].dmc_ddrtr1);
+ bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2);
+ bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr);
+ bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1);
+ bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl);
+ break;
+ }
+ }
+
+ while (!(bfin_read_DMC0_STAT() & MEMINITDONE))
+ continue;
+
+ dlldatacycle = (bfin_read_DMC0_STAT() & PHYRDPHASE) >> PHYRDPHASE_OFFSET;
+ dll_ctl = bfin_read_DMC0_DLLCTL();
+ dll_ctl &= ~DATACYC;
+ bfin_write_DMC0_DLLCTL(dll_ctl | (dlldatacycle << DATACYC_OFFSET));
+
+ while (!(bfin_read_DMC0_STAT() & DLLCALDONE))
+ continue;
+}
+#endif
+
+#endif /*__MEM_INIT_H__*/
+
diff --git a/arch/blackfin/include/asm/traps.h b/arch/blackfin/include/asm/traps.h
index 70c4e511cae6..cec771b8100c 100644
--- a/arch/blackfin/include/asm/traps.h
+++ b/arch/blackfin/include/asm/traps.h
@@ -125,5 +125,7 @@
level " for Supervisor use: Supervisor only registers, all MMRs, and Supervisor\n" \
level " only instructions.\n"
+extern void double_fault_c(struct pt_regs *fp);
+
#endif /* __ASSEMBLY__ */
#endif /* _BFIN_TRAPS_H */
diff --git a/arch/blackfin/kernel/bfin_dma.c b/arch/blackfin/kernel/bfin_dma.c
index c166939ffb2b..4a32f2dd5ddc 100644
--- a/arch/blackfin/kernel/bfin_dma.c
+++ b/arch/blackfin/kernel/bfin_dma.c
@@ -45,7 +45,7 @@ static int __init blackfin_dma_init(void)
atomic_set(&dma_ch[i].chan_status, 0);
dma_ch[i].regs = dma_io_base_addr[i];
}
-#ifdef CH_MEM_STREAM3_SRC
+#if defined(CH_MEM_STREAM3_SRC) && defined(CONFIG_BF60x)
/* Mark MEMDMA Channel 3 as requested since we're using it internally */
request_dma(CH_MEM_STREAM3_DEST, "Blackfin dma_memcpy");
request_dma(CH_MEM_STREAM3_SRC, "Blackfin dma_memcpy");
@@ -361,7 +361,7 @@ void __init early_dma_memcpy_done(void)
__builtin_bfin_ssync();
}
-#ifdef CH_MEM_STREAM3_SRC
+#if defined(CH_MEM_STREAM3_SRC) && defined(CONFIG_BF60x)
#define bfin_read_MDMA_S_CONFIG bfin_read_MDMA_S3_CONFIG
#define bfin_write_MDMA_S_CONFIG bfin_write_MDMA_S3_CONFIG
#define bfin_write_MDMA_S_START_ADDR bfin_write_MDMA_S3_START_ADDR
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 3e366dc2d6e1..34e96ce02aa9 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -58,12 +58,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
#ifdef CONFIG_ROMKERNEL
/* Cover kernel XIP flash area */
+#ifdef CONFIG_BF60x
+ addr = CONFIG_ROM_BASE & ~(16 * 1024 * 1024 - 1);
+ d_tbl[i_d].addr = addr;
+ d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_16MB;
+ i_tbl[i_i].addr = addr;
+ i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_16MB;
+#else
addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
d_tbl[i_d].addr = addr;
d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
i_tbl[i_i].addr = addr;
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
#endif
+#endif
/* Cover L1 memory. One 4M area for code and data each is enough. */
if (cpu == 0) {
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index f0d1118f1825..e7be6532d6a0 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -122,12 +122,13 @@ void __dma_sync(dma_addr_t addr, size_t size,
EXPORT_SYMBOL(__dma_sync);
int
-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < nents; i++, sg++) {
+ for_each_sg(sg_list, sg, nents, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
@@ -136,12 +137,13 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
}
EXPORT_SYMBOL(dma_map_sg);
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
int nelems, enum dma_data_direction direction)
{
+ struct scatterlist *sg;
int i;
- for (i = 0; i < nelems; i++, sg++) {
+ for_each_sg(sg_list, sg, nelems, i) {
sg->dma_address = (dma_addr_t) sg_virt(sg);
__dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
}
diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c
index af732eb3a687..fc179ca07799 100644
--- a/arch/blackfin/mach-bf527/boards/ezkit.c
+++ b/arch/blackfin/mach-bf527/boards/ezkit.c
@@ -114,9 +114,9 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data musb_plat = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#if defined(CONFIG_USB_MUSB_HDRC) && defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#elif defined(CONFIG_USB_MUSB_HDRC)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c
index c9d9473a5ab2..5ed654ae66e1 100644
--- a/arch/blackfin/mach-bf537/boards/stamp.c
+++ b/arch/blackfin/mach-bf537/boards/stamp.c
@@ -873,7 +873,7 @@ static struct adf702x_platform_data adf7021_platform_data = {
};
static inline void adf702x_mac_init(void)
{
- random_ether_addr(adf7021_platform_data.mac_addr);
+ eth_random_addr(adf7021_platform_data.mac_addr);
}
#else
static inline void adf702x_mac_init(void) {}
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c
index 3bd75bae750d..c4d07f040947 100644
--- a/arch/blackfin/mach-bf548/boards/ezkit.c
+++ b/arch/blackfin/mach-bf548/boards/ezkit.c
@@ -635,9 +635,9 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data musb_plat = {
-#if defined(CONFIG_USB_MUSB_OTG)
+#if defined(CONFIG_USB_MUSB_HDRC) && defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_HDRC_HCD)
+#elif defined(CONFIG_USB_MUSB_HDRC)
.mode = MUSB_HOST,
#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
.mode = MUSB_PERIPHERAL,
diff --git a/arch/blackfin/mach-bf548/include/mach/gpio.h b/arch/blackfin/mach-bf548/include/mach/gpio.h
index 35c8ced46158..be9edb28f96b 100644
--- a/arch/blackfin/mach-bf548/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf548/include/mach/gpio.h
@@ -171,6 +171,8 @@
#define MAX_BLACKFIN_GPIOS 160
#define BFIN_GPIO_PINT 1
+#define NR_PINT_SYS_IRQS 4
+#define NR_PINTS 160
#ifndef __ASSEMBLY__
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c
index 838978808a15..7c36777c6455 100644
--- a/arch/blackfin/mach-bf561/boards/ezkit.c
+++ b/arch/blackfin/mach-bf561/boards/ezkit.c
@@ -452,18 +452,21 @@ static struct v4l2_input adv7183_inputs[] = {
.name = "Composite",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = V4L2_STD_ALL,
+ .capabilities = V4L2_IN_CAP_STD,
},
{
.index = 1,
.name = "S-Video",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = V4L2_STD_ALL,
+ .capabilities = V4L2_IN_CAP_STD,
},
{
.index = 2,
.name = "Component",
.type = V4L2_INPUT_TYPE_CAMERA,
.std = V4L2_STD_ALL,
+ .capabilities = V4L2_IN_CAP_STD,
},
};
diff --git a/arch/blackfin/mach-bf609/Kconfig b/arch/blackfin/mach-bf609/Kconfig
index 2cb727243778..101b33ee9bba 100644
--- a/arch/blackfin/mach-bf609/Kconfig
+++ b/arch/blackfin/mach-bf609/Kconfig
@@ -51,6 +51,14 @@ config PINT5_ASSIGN
endmenu
+config SEC_IRQ_PRIORITY_LEVELS
+ int "SEC interrupt priority levels"
+ default 7
+ range 0 7
+ help
+ Devide the total number of interrupt priority levels into sub-levels.
+ There is 2 ^ (SEC_IRQ_PRIORITY_LEVELS + 1) different levels.
+
endmenu
endif
diff --git a/arch/blackfin/mach-bf609/Makefile b/arch/blackfin/mach-bf609/Makefile
index 2a27f8174543..234fe1b4bb0e 100644
--- a/arch/blackfin/mach-bf609/Makefile
+++ b/arch/blackfin/mach-bf609/Makefile
@@ -2,5 +2,5 @@
# arch/blackfin/mach-bf609/Makefile
#
-obj-y := dma.o clock.o
-obj-$(CONFIG_PM) += pm.o hibernate.o
+obj-y := dma.o clock.o ints-priority.o
+obj-$(CONFIG_PM) += pm.o dpm.o
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c
index ac64f47217c1..c2cf1ae31189 100644
--- a/arch/blackfin/mach-bf609/boards/ezkit.c
+++ b/arch/blackfin/mach-bf609/boards/ezkit.c
@@ -677,11 +677,28 @@ int bf609_nor_flash_init(struct platform_device *dev)
return 0;
}
+void bf609_nor_flash_exit(struct platform_device *dev)
+{
+ const unsigned short pins[] = {
+ P_A3, P_A4, P_A5, P_A6, P_A7, P_A8, P_A9, P_A10, P_A11, P_A12,
+ P_A13, P_A14, P_A15, P_A16, P_A17, P_A18, P_A19, P_A20, P_A21,
+ P_A22, P_A23, P_A24, P_A25, P_NORCK, 0,
+ };
+
+ peripheral_free_list(pins);
+
+ bfin_write32(SMC_GCTL, 0);
+}
+
static struct physmap_flash_data ezkit_flash_data = {
.width = 2,
.parts = ezkit_partitions,
- .init = bf609_nor_flash_init,
+ .init = bf609_nor_flash_init,
+ .exit = bf609_nor_flash_exit,
.nr_parts = ARRAY_SIZE(ezkit_partitions),
+#ifdef CONFIG_ROMKERNEL
+ .probe_type = "map_rom",
+#endif
};
static struct resource ezkit_flash_resource = {
@@ -739,7 +756,7 @@ static struct bfin6xx_spi_chip spidev_chip_info = {
};
#endif
-#if defined(CONFIG_SND_BF6XX_I2S) || defined(CONFIG_SND_BF6XX_I2S_MODULE)
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
static struct platform_device bfin_i2s_pcm = {
.name = "bfin-i2s-pcm-audio",
.id = -1,
@@ -825,6 +842,12 @@ static struct adau1761_platform_data adau1761_info = {
static const unsigned short ppi_req[] = {
P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+#if !defined(CONFIG_VIDEO_VS6624) && !defined(CONFIG_VIDEO_VS6624_MODULE)
+ P_PPI0_D16, P_PPI0_D17, P_PPI0_D18, P_PPI0_D19,
+ P_PPI0_D20, P_PPI0_D21, P_PPI0_D22, P_PPI0_D23,
+#endif
P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
0,
};
@@ -855,7 +878,7 @@ static struct bcap_route vs6624_routes[] = {
},
};
-static const unsigned vs6624_ce_pin = GPIO_PD1;
+static const unsigned vs6624_ce_pin = GPIO_PE4;
static struct bfin_capture_config bfin_capture_data = {
.card_name = "BF609",
@@ -871,7 +894,128 @@ static struct bfin_capture_config bfin_capture_data = {
.ppi_info = &ppi_info,
.ppi_control = (PACK_EN | DLEN_8 | EPPI_CTL_FS1HI_FS2HI
| EPPI_CTL_POLC3 | EPPI_CTL_SYNC2 | EPPI_CTL_NON656),
- .blank_clocks = 8,
+ .blank_pixels = 4,
+};
+#endif
+
+#if defined(CONFIG_VIDEO_ADV7842) \
+ || defined(CONFIG_VIDEO_ADV7842_MODULE)
+#include <media/adv7842.h>
+
+static struct v4l2_input adv7842_inputs[] = {
+ {
+ .index = 0,
+ .name = "Composite",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL,
+ .capabilities = V4L2_IN_CAP_STD,
+ },
+ {
+ .index = 1,
+ .name = "S-Video",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .std = V4L2_STD_ALL,
+ .capabilities = V4L2_IN_CAP_STD,
+ },
+ {
+ .index = 2,
+ .name = "Component",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS,
+ },
+ {
+ .index = 3,
+ .name = "VGA",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS,
+ },
+ {
+ .index = 4,
+ .name = "HDMI",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS,
+ },
+};
+
+static struct bcap_route adv7842_routes[] = {
+ {
+ .input = 3,
+ .output = 0,
+ .ppi_control = (PACK_EN | DLEN_8 | EPPI_CTL_FLDSEL
+ | EPPI_CTL_ACTIVE656),
+ },
+ {
+ .input = 4,
+ .output = 0,
+ },
+ {
+ .input = 2,
+ .output = 0,
+ },
+ {
+ .input = 1,
+ .output = 0,
+ },
+ {
+ .input = 0,
+ .output = 1,
+ .ppi_control = (EPPI_CTL_SPLTWRD | PACK_EN | DLEN_16
+ | EPPI_CTL_FS1LO_FS2LO | EPPI_CTL_POLC2
+ | EPPI_CTL_SYNC2 | EPPI_CTL_NON656),
+ },
+};
+
+static struct adv7842_output_format adv7842_opf[] = {
+ {
+ .op_ch_sel = ADV7842_OP_CH_SEL_BRG,
+ .op_format_sel = ADV7842_OP_FORMAT_SEL_SDR_ITU656_8,
+ .op_656_range = 1,
+ .blank_data = 1,
+ .insert_av_codes = 1,
+ },
+ {
+ .op_ch_sel = ADV7842_OP_CH_SEL_RGB,
+ .op_format_sel = ADV7842_OP_FORMAT_SEL_SDR_ITU656_16,
+ .op_656_range = 1,
+ .blank_data = 1,
+ },
+};
+
+static struct adv7842_platform_data adv7842_data = {
+ .opf = adv7842_opf,
+ .num_opf = ARRAY_SIZE(adv7842_opf),
+ .ain_sel = ADV7842_AIN10_11_12_NC_SYNC_4_1,
+ .prim_mode = ADV7842_PRIM_MODE_SDP,
+ .vid_std_select = ADV7842_SDP_VID_STD_CVBS_SD_4x1,
+ .inp_color_space = ADV7842_INP_COLOR_SPACE_AUTO,
+ .i2c_sdp_io = 0x40,
+ .i2c_sdp = 0x41,
+ .i2c_cp = 0x42,
+ .i2c_vdp = 0x43,
+ .i2c_afe = 0x44,
+ .i2c_hdmi = 0x45,
+ .i2c_repeater = 0x46,
+ .i2c_edid = 0x47,
+ .i2c_infoframe = 0x48,
+ .i2c_cec = 0x49,
+ .i2c_avlink = 0x4a,
+ .i2c_ex = 0x26,
+};
+
+static struct bfin_capture_config bfin_capture_data = {
+ .card_name = "BF609",
+ .inputs = adv7842_inputs,
+ .num_inputs = ARRAY_SIZE(adv7842_inputs),
+ .routes = adv7842_routes,
+ .i2c_adapter_id = 0,
+ .board_info = {
+ .type = "adv7842",
+ .addr = 0x20,
+ .platform_data = (void *)&adv7842_data,
+ },
+ .ppi_info = &ppi_info,
+ .ppi_control = (PACK_EN | DLEN_8 | EPPI_CTL_FLDSEL
+ | EPPI_CTL_ACTIVE656),
};
#endif
@@ -883,6 +1027,80 @@ static struct platform_device bfin_capture_device = {
};
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_DISPLAY) \
+ || defined(CONFIG_VIDEO_BLACKFIN_DISPLAY_MODULE)
+#include <linux/videodev2.h>
+#include <media/blackfin/bfin_display.h>
+#include <media/blackfin/ppi.h>
+
+static const unsigned short ppi_req_disp[] = {
+ P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3,
+ P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7,
+ P_PPI0_D8, P_PPI0_D9, P_PPI0_D10, P_PPI0_D11,
+ P_PPI0_D12, P_PPI0_D13, P_PPI0_D14, P_PPI0_D15,
+ P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2,
+ 0,
+};
+
+static const struct ppi_info ppi_info = {
+ .type = PPI_TYPE_EPPI3,
+ .dma_ch = CH_EPPI0_CH0,
+ .irq_err = IRQ_EPPI0_STAT,
+ .base = (void __iomem *)EPPI0_STAT,
+ .pin_req = ppi_req_disp,
+};
+
+#if defined(CONFIG_VIDEO_ADV7511) \
+ || defined(CONFIG_VIDEO_ADV7511_MODULE)
+#include <media/adv7511.h>
+
+static struct v4l2_output adv7511_outputs[] = {
+ {
+ .index = 0,
+ .name = "HDMI",
+ .type = V4L2_INPUT_TYPE_CAMERA,
+ .capabilities = V4L2_OUT_CAP_CUSTOM_TIMINGS,
+ },
+};
+
+static struct disp_route adv7511_routes[] = {
+ {
+ .output = 0,
+ },
+};
+
+static struct adv7511_platform_data adv7511_data = {
+ .edid_addr = 0x7e,
+ .i2c_ex = 0x25,
+};
+
+static struct bfin_display_config bfin_display_data = {
+ .card_name = "BF609",
+ .outputs = adv7511_outputs,
+ .num_outputs = ARRAY_SIZE(adv7511_outputs),
+ .routes = adv7511_routes,
+ .i2c_adapter_id = 0,
+ .board_info = {
+ .type = "adv7511",
+ .addr = 0x39,
+ .platform_data = (void *)&adv7511_data,
+ },
+ .ppi_info = &ppi_info,
+ .ppi_control = (EPPI_CTL_SPLTWRD | PACK_EN | DLEN_16
+ | EPPI_CTL_FS1LO_FS2LO | EPPI_CTL_POLC3
+ | EPPI_CTL_IFSGEN | EPPI_CTL_SYNC2
+ | EPPI_CTL_NON656 | EPPI_CTL_DIR),
+};
+#endif
+
+static struct platform_device bfin_display_device = {
+ .name = "bfin_display",
+ .dev = {
+ .platform_data = &bfin_display_data,
+ },
+};
+#endif
+
#if defined(CONFIG_BFIN_CRC)
#define BFIN_CRC_NAME "bfin-crc"
@@ -947,6 +1165,39 @@ static struct platform_device bfin_crc1_device = {
};
#endif
+#if defined(CONFIG_CRYPTO_DEV_BFIN_CRC)
+#define BFIN_CRYPTO_CRC_NAME "bfin-hmac-crc"
+#define BFIN_CRYPTO_CRC_POLY_DATA 0x5c5c5c5c
+
+static struct resource bfin_crypto_crc_resources[] = {
+ {
+ .start = REG_CRC0_CTL,
+ .end = REG_CRC0_REVID+4,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_CRC0_DCNTEXP,
+ .end = IRQ_CRC0_DCNTEXP,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .start = CH_MEM_STREAM0_SRC_CRC0,
+ .end = CH_MEM_STREAM0_SRC_CRC0,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device bfin_crypto_crc_device = {
+ .name = BFIN_CRYPTO_CRC_NAME,
+ .id = 0,
+ .num_resources = ARRAY_SIZE(bfin_crypto_crc_resources),
+ .resource = bfin_crypto_crc_resources,
+ .dev = {
+ .platform_data = (void *)BFIN_CRYPTO_CRC_POLY_DATA,
+ },
+};
+#endif
+
#if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE)
static const struct ad7877_platform_data bfin_ad7877_ts_info = {
.model = 7877,
@@ -963,6 +1214,28 @@ static const struct ad7877_platform_data bfin_ad7877_ts_info = {
};
#endif
+#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+
+static struct gpio_keys_button bfin_gpio_keys_table[] = {
+ {BTN_0, GPIO_PB10, 1, "gpio-keys: BTN0"},
+ {BTN_1, GPIO_PE1, 1, "gpio-keys: BTN1"},
+};
+
+static struct gpio_keys_platform_data bfin_gpio_keys_data = {
+ .buttons = bfin_gpio_keys_table,
+ .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table),
+};
+
+static struct platform_device bfin_device_gpiokeys = {
+ .name = "gpio-keys",
+ .dev = {
+ .platform_data = &bfin_gpio_keys_data,
+ },
+};
+#endif
+
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) \
|| defined(CONFIG_MTD_M25P80_MODULE)
@@ -981,10 +1254,10 @@ static struct spi_board_info bfin_spi_board_info[] __initdata = {
{
.modalias = "ad7877",
.platform_data = &bfin_ad7877_ts_info,
- .irq = IRQ_PB4, /* old boards (<=Rev 1.3) use IRQ_PJ11 */
+ .irq = IRQ_PD9,
.max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
- .chip_select = 2,
+ .chip_select = 4,
},
#endif
#if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE)
@@ -1050,7 +1323,7 @@ static struct resource bfin_spi1_resource[] = {
/* SPI controller data */
static struct bfin6xx_spi_master bf60x_spi_master_info0 = {
- .num_chipselect = 4,
+ .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
@@ -1065,7 +1338,7 @@ static struct platform_device bf60x_spi_master0 = {
};
static struct bfin6xx_spi_master bf60x_spi_master_info1 = {
- .num_chipselect = 4,
+ .num_chipselect = MAX_CTRL_CS + MAX_BLACKFIN_GPIOS,
.pin_req = {P_SPI1_SCK, P_SPI1_MISO, P_SPI1_MOSI, 0},
};
@@ -1146,6 +1419,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info0[] = {
.platform_data = (void *)&adau1761_info
},
#endif
+#if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE)
+ {
+ I2C_BOARD_INFO("ssm2602", 0x1b),
+ },
+#endif
};
static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
@@ -1261,6 +1539,9 @@ static struct platform_device *ezkit_devices[] __initdata = {
&bfin_crc0_device,
&bfin_crc1_device,
#endif
+#if defined(CONFIG_CRYPTO_DEV_BFIN_CRC)
+ &bfin_crypto_crc_device,
+#endif
#if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
&bfin_device_gpiokeys,
@@ -1269,7 +1550,7 @@ static struct platform_device *ezkit_devices[] __initdata = {
#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
&ezkit_flash_device,
#endif
-#if defined(CONFIG_SND_BF6XX_I2S) || defined(CONFIG_SND_BF6XX_I2S_MODULE)
+#if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE)
&bfin_i2s_pcm,
#endif
#if defined(CONFIG_SND_BF6XX_SOC_I2S) || \
@@ -1284,6 +1565,11 @@ static struct platform_device *ezkit_devices[] __initdata = {
|| defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE)
&bfin_capture_device,
#endif
+#if defined(CONFIG_VIDEO_BLACKFIN_DISPLAY) \
+ || defined(CONFIG_VIDEO_BLACKFIN_DISPLAY_MODULE)
+ &bfin_display_device,
+#endif
+
};
static int __init ezkit_init(void)
diff --git a/arch/blackfin/mach-bf609/clock.c b/arch/blackfin/mach-bf609/clock.c
index 7f8f529693ae..437d56c82281 100644
--- a/arch/blackfin/mach-bf609/clock.c
+++ b/arch/blackfin/mach-bf609/clock.c
@@ -97,9 +97,10 @@ int wait_for_pll_align(void)
while (i-- && (bfin_read32(CGU0_STAT) & CGU0_STAT_CLKSALGN));
if (bfin_read32(CGU0_STAT) & CGU0_STAT_CLKSALGN) {
- printk(KERN_DEBUG "fail to align clk\n");
+ printk(KERN_CRIT "fail to align clk\n");
return -1;
}
+
return 0;
}
diff --git a/arch/blackfin/mach-bf609/dpm.S b/arch/blackfin/mach-bf609/dpm.S
new file mode 100644
index 000000000000..54d50c689db1
--- /dev/null
+++ b/arch/blackfin/mach-bf609/dpm.S
@@ -0,0 +1,157 @@
+#include <linux/linkage.h>
+#include <asm/blackfin.h>
+#include <asm/dpmc.h>
+
+#include <asm/context.S>
+
+#define PM_STACK (COREA_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
+
+.section .l1.text
+ENTRY(_enter_hibernate)
+ /* switch stack to L1 scratch, prepare for ddr srfr */
+ P0.H = HI(PM_STACK);
+ P0.L = LO(PM_STACK);
+ SP = P0;
+
+ call _bf609_ddr_sr;
+ call _bfin_hibernate_syscontrol;
+
+ P0.H = HI(DPM0_RESTORE4);
+ P0.L = LO(DPM0_RESTORE4);
+ P1.H = _bf609_pm_data;
+ P1.L = _bf609_pm_data;
+ [P0] = P1;
+
+ P0.H = HI(DPM0_CTL);
+ P0.L = LO(DPM0_CTL);
+ R3.H = HI(0x00000010);
+ R3.L = LO(0x00000010);
+
+ bfin_init_pm_bench_cycles;
+
+ [P0] = R3;
+
+ SSYNC;
+ENDPROC(_enter_hibernate)
+
+/* DPM wake up interrupt won't wake up core on bf60x if its core IMASK
+ * is disabled. This behavior differ from bf5xx serial processor.
+ */
+ENTRY(_dummy_deepsleep)
+ [--sp] = SYSCFG;
+ [--sp] = (R7:0,P5:0);
+ cli r0;
+
+ /* get wake up interrupt ID */
+ P0.l = LO(SEC_SCI_BASE + SEC_CSID);
+ P0.h = HI(SEC_SCI_BASE + SEC_CSID);
+ R0 = [P0];
+
+ /* ACK wake up interrupt in SEC */
+ P1.l = LO(SEC_END);
+ P1.h = HI(SEC_END);
+
+ [P1] = R0;
+ SSYNC;
+
+ /* restore EVT 11 entry */
+ p0.h = hi(EVT11);
+ p0.l = lo(EVT11);
+ p1.h = _evt_evt11;
+ p1.l = _evt_evt11;
+
+ [p0] = p1;
+ SSYNC;
+
+ (R7:0,P5:0) = [sp++];
+ SYSCFG = [sp++];
+ RTI;
+ENDPROC(_dummy_deepsleep)
+
+ENTRY(_enter_deepsleep)
+ LINK 0xC;
+ [--sp] = (R7:0,P5:0);
+
+ /* Change EVT 11 entry to dummy handler for wake up event */
+ p0.h = hi(EVT11);
+ p0.l = lo(EVT11);
+ p1.h = _dummy_deepsleep;
+ p1.l = _dummy_deepsleep;
+
+ [p0] = p1;
+
+ P0.H = HI(PM_STACK);
+ P0.L = LO(PM_STACK);
+
+ EX_SCRATCH_REG = SP;
+ SP = P0;
+
+ SSYNC;
+
+ /* should put ddr to self refresh mode before sleep */
+ call _bf609_ddr_sr;
+
+ /* Set DPM controller to deep sleep mode */
+ P0.H = HI(DPM0_CTL);
+ P0.L = LO(DPM0_CTL);
+ R3.H = HI(0x00000008);
+ R3.L = LO(0x00000008);
+ [P0] = R3;
+ CSYNC;
+
+ /* Enable evt 11 in IMASK before idle, otherwise core doesn't wake up. */
+ r0.l = 0x800;
+ r0.h = 0;
+ sti r0;
+ SSYNC;
+
+ bfin_init_pm_bench_cycles;
+
+ /* Fall into deep sleep in idle*/
+ idle;
+ SSYNC;
+
+ /* Restore PLL after wake up from deep sleep */
+ call _bf609_resume_ccbuf;
+
+ /* turn ddr out of self refresh mode */
+ call _bf609_ddr_sr_exit;
+
+ SP = EX_SCRATCH_REG;
+
+ (R7:0,P5:0) = [SP++];
+ UNLINK;
+ RTS;
+ENDPROC(_enter_deepsleep)
+
+.section .text
+ENTRY(_bf609_hibernate)
+ bfin_cpu_reg_save;
+ bfin_core_mmr_save;
+
+ P0.H = _bf609_pm_data;
+ P0.L = _bf609_pm_data;
+ R1.H = 0xDEAD;
+ R1.L = 0xBEEF;
+ R2.H = .Lpm_resume_here;
+ R2.L = .Lpm_resume_here;
+ [P0++] = R1;
+ [P0++] = R2;
+ [P0++] = SP;
+
+ P1.H = _enter_hibernate;
+ P1.L = _enter_hibernate;
+
+ call (P1);
+.Lpm_resume_here:
+
+ bfin_core_mmr_restore;
+ bfin_cpu_reg_restore;
+
+ [--sp] = RETI; /* Clear Global Interrupt Disable */
+ SP += 4;
+
+ RTS;
+
+ENDPROC(_bf609_hibernate)
+
diff --git a/arch/blackfin/mach-bf609/hibernate.S b/arch/blackfin/mach-bf609/hibernate.S
deleted file mode 100644
index d37a532519c8..000000000000
--- a/arch/blackfin/mach-bf609/hibernate.S
+++ /dev/null
@@ -1,65 +0,0 @@
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-#include <asm/dpmc.h>
-
-#define PM_STACK (COREA_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12)
-
-.section .l1.text
-ENTRY(_enter_hibernate)
- /* switch stack to L1 scratch, prepare for ddr srfr */
- P0.H = HI(PM_STACK);
- P0.L = LO(PM_STACK);
- SP = P0;
-
- call _bf609_ddr_sr;
- call _bfin_hibernate_syscontrol;
-
- P0.H = HI(DPM0_RESTORE4);
- P0.L = LO(DPM0_RESTORE4);
- P1.H = _bf609_pm_data;
- P1.L = _bf609_pm_data;
- [P0] = P1;
-
- P0.H = HI(DPM0_CTL);
- P0.L = LO(DPM0_CTL);
- R3.H = HI(0x00000010);
- R3.L = LO(0x00000010);
-
- bfin_init_pm_bench_cycles;
-
- [P0] = R3;
-
- SSYNC;
-ENDPROC(_enter_hibernate_mode)
-
-.section .text
-ENTRY(_bf609_hibernate)
- bfin_cpu_reg_save;
- bfin_core_mmr_save;
-
- P0.H = _bf609_pm_data;
- P0.L = _bf609_pm_data;
- R1.H = 0xDEAD;
- R1.L = 0xBEEF;
- R2.H = .Lpm_resume_here;
- R2.L = .Lpm_resume_here;
- [P0++] = R1;
- [P0++] = R2;
- [P0++] = SP;
-
- P1.H = _enter_hibernate;
- P1.L = _enter_hibernate;
-
- call (P1);
-.Lpm_resume_here:
-
- bfin_core_mmr_restore;
- bfin_cpu_reg_restore;
-
- [--sp] = RETI; /* Clear Global Interrupt Disable */
- SP += 4;
-
- RTS;
-
-ENDPROC(_bf609_hibernate)
-
diff --git a/arch/blackfin/mach-bf609/include/mach/anomaly.h b/arch/blackfin/mach-bf609/include/mach/anomaly.h
index bdd39aefb565..7a07374308ac 100644
--- a/arch/blackfin/mach-bf609/include/mach/anomaly.h
+++ b/arch/blackfin/mach-bf609/include/mach/anomaly.h
@@ -5,126 +5,99 @@
* and can be replaced with that version at any time
* DO NOT EDIT THIS FILE
*
- * Copyright 2004-2011 Analog Devices Inc.
+ * Copyright 2004-2012 Analog Devices Inc.
* Licensed under the Clear BSD license.
*/
/* This file should be up to date with:
+ * - Revision A, 15/06/2012; ADSP-BF609 Blackfin Processor Anomaly List
*/
#if __SILICON_REVISION__ < 0
-# error will not work on BF506 silicon version
+# error will not work on BF609 silicon version
#endif
#ifndef _MACH_ANOMALY_H_
#define _MACH_ANOMALY_H_
-/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
-#define ANOMALY_05000074 (1)
-/* DMA_RUN Bit Is Not Valid after a Peripheral Receive Channel DMA Stops */
-#define ANOMALY_05000119 (1)
-/* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
-#define ANOMALY_05000122 (1)
-/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
-#define ANOMALY_05000245 (1)
-/* Incorrect Timer Pulse Width in Single-Shot PWM_OUT Mode with External Clock */
-#define ANOMALY_05000254 (1)
-/* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
-#define ANOMALY_05000265 (1)
-/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
-#define ANOMALY_05000310 (1)
-/* PPI Underflow Error Goes Undetected in ITU-R 656 Mode */
-#define ANOMALY_05000366 (1)
-/* Speculative Fetches Can Cause Undesired External FIFO Operations */
-#define ANOMALY_05000416 (1)
-/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
-#define ANOMALY_05000426 (1)
+/* TRU_STAT.ADDRERR and TRU_ERRADDR.ADDR May Not Reflect the Correct Status */
+#define ANOMALY_16000003 (1)
+/* The EPPI Data Enable (DEN) Signal is Not Functional */
+#define ANOMALY_16000004 (1)
+/* Using L1 Instruction Cache with Parity Enabled is Unreliable */
+#define ANOMALY_16000005 (1)
+/* SEQSTAT.SYSNMI Clears Upon Entering the NMI ISR */
+#define ANOMALY_16000006 (1)
+/* DDR2 Memory Reads May Fail Intermittently */
+#define ANOMALY_16000007 (1)
+/* Instruction Memory Stalls Can Cause IFLUSH to Fail */
+#define ANOMALY_16000008 (1)
+/* TestSET Instruction Cannot Be Interrupted */
+#define ANOMALY_16000009 (1)
/* IFLUSH Instruction at End of Hardware Loop Causes Infinite Stall */
-#define ANOMALY_05000443 (1)
-/* UART IrDA Receiver Fails on Extended Bit Pulses */
-#define ANOMALY_05000447 (1)
+#define ANOMALY_16000010 (1)
/* False Hardware Error when RETI Points to Invalid Memory */
-#define ANOMALY_05000461 (1)
-/* PLL Latches Incorrect Settings During Reset */
-#define ANOMALY_05000469 (1)
-/* Incorrect Default MSEL Value in PLL_CTL */
-#define ANOMALY_05000472 (1)
-/* Interrupted SPORT Receive Data Register Read Results In Underflow when SLEN > 15 */
-#define ANOMALY_05000473 (1)
-/* TESTSET Instruction Cannot Be Interrupted */
-#define ANOMALY_05000477 (1)
-/* Reads of ITEST_COMMAND and ITEST_DATA Registers Cause Cache Corruption */
-#define ANOMALY_05000481 (1)
-/* IFLUSH sucks at life */
-#define ANOMALY_05000491 (1)
-/* Tempopary anomaly ID for data loss in MMR read operation if interrupted */
-#define ANOMALY_05001001 (__SILICON_REVISION__ < 1)
+#define ANOMALY_16000011 (1)
+/* Speculative Fetches of Indirect-Pointer Instructions Can Cause False Hardware Errors */
+#define ANOMALY_16000012 (1)
+/* False Hardware Errors Caused by Fetches at the Boundary of Reserved Memory */
+#define ANOMALY_16000013 (1)
+/* False Hardware Error from an Access in the Shadow of a Conditional Branch */
+#define ANOMALY_16000014 (1)
+/* Multi-Issue Instruction with dsp32shiftimm in slot1 and P-reg Store in slot2 Not Supported */
+#define ANOMALY_16000015 (1)
+/* Speculative Fetches Can Cause Undesired External FIFO Operations */
+#define ANOMALY_16000017 (1)
+/* RSI Boot Cleanup Routine Does Not Clear Registers */
+#define ANOMALY_16000018 (1)
+/* SPI Master Boot Device Auto-detection Frequency is Set Incorrectly */
+#define ANOMALY_16000019 (1)
+/* rom_SysControl() Fails to Set DDR0_CTL.INIT for Wakeup From Hibernate */
+#define ANOMALY_16000020 (1)
+/* rom_SysControl() Fails to Save and Restore DDR0_PHYCTL3 for Hibernate/Wakeup Sequence */
+#define ANOMALY_16000021 (1)
+/* Boot Code Fails to Enable Parity Fault Detection */
+#define ANOMALY_16000022 (1)
+/* USB DMA interrupt status do not show the DMA channel interrupt in the DMA ISR */
+#define ANOMALY_16000027 (1)
+/* Interrupted Core Reads of MMRs May Cause Data Loss */
+#define ANOMALY_16000030 (1)
/* Anomalies that don't exist on this proc */
-#define ANOMALY_05000099 (0)
-#define ANOMALY_05000120 (0)
-#define ANOMALY_05000125 (0)
-#define ANOMALY_05000149 (0)
#define ANOMALY_05000158 (0)
-#define ANOMALY_05000171 (0)
-#define ANOMALY_05000179 (0)
-#define ANOMALY_05000182 (0)
-#define ANOMALY_05000183 (0)
#define ANOMALY_05000189 (0)
#define ANOMALY_05000198 (0)
-#define ANOMALY_05000202 (0)
-#define ANOMALY_05000215 (0)
-#define ANOMALY_05000219 (0)
#define ANOMALY_05000220 (0)
-#define ANOMALY_05000227 (0)
#define ANOMALY_05000230 (0)
#define ANOMALY_05000231 (0)
-#define ANOMALY_05000233 (0)
-#define ANOMALY_05000234 (0)
-#define ANOMALY_05000242 (0)
#define ANOMALY_05000244 (0)
-#define ANOMALY_05000248 (0)
-#define ANOMALY_05000250 (0)
-#define ANOMALY_05000257 (0)
-#define ANOMALY_05000261 (0)
#define ANOMALY_05000263 (0)
-#define ANOMALY_05000266 (0)
#define ANOMALY_05000273 (0)
#define ANOMALY_05000274 (0)
#define ANOMALY_05000278 (0)
#define ANOMALY_05000281 (0)
-#define ANOMALY_05000283 (0)
-#define ANOMALY_05000285 (0)
#define ANOMALY_05000287 (0)
-#define ANOMALY_05000301 (0)
-#define ANOMALY_05000305 (0)
-#define ANOMALY_05000307 (0)
#define ANOMALY_05000311 (0)
#define ANOMALY_05000312 (0)
-#define ANOMALY_05000315 (0)
#define ANOMALY_05000323 (0)
-#define ANOMALY_05000353 (1)
-#define ANOMALY_05000357 (0)
-#define ANOMALY_05000362 (1)
#define ANOMALY_05000363 (0)
-#define ANOMALY_05000364 (0)
-#define ANOMALY_05000371 (0)
#define ANOMALY_05000380 (0)
-#define ANOMALY_05000386 (0)
-#define ANOMALY_05000389 (0)
-#define ANOMALY_05000400 (0)
-#define ANOMALY_05000402 (0)
-#define ANOMALY_05000412 (0)
-#define ANOMALY_05000432 (0)
-#define ANOMALY_05000440 (0)
#define ANOMALY_05000448 (0)
-#define ANOMALY_05000456 (0)
#define ANOMALY_05000450 (0)
-#define ANOMALY_05000465 (0)
-#define ANOMALY_05000467 (0)
-#define ANOMALY_05000474 (0)
-#define ANOMALY_05000475 (0)
+#define ANOMALY_05000456 (0)
#define ANOMALY_05000480 (0)
-#define ANOMALY_05000485 (0)
+#define ANOMALY_05000481 (1)
+
+/* Reuse BF5xx anomalies IDs for the same anomaly in BF60x */
+#define ANOMALY_05000491 ANOMALY_16000008
+#define ANOMALY_05000477 ANOMALY_16000009
+#define ANOMALY_05000443 ANOMALY_16000010
+#define ANOMALY_05000461 ANOMALY_16000011
+#define ANOMALY_05000426 ANOMALY_16000012
+#define ANOMALY_05000310 ANOMALY_16000013
+#define ANOMALY_05000245 ANOMALY_16000014
+#define ANOMALY_05000074 ANOMALY_16000015
+#define ANOMALY_05000416 ANOMALY_16000017
+
#endif
diff --git a/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
index 6aac38544cc9..f1a6afae1a71 100644
--- a/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
+++ b/arch/blackfin/mach-bf609/include/mach/defBF60x_base.h
@@ -2665,7 +2665,6 @@
#define DEVSZ_1G 0x400 /* DMC External Bank Size = 1Gbit */
#define DEVSZ_2G 0x500 /* DMC External Bank Size = 2Gbit */
-
/* =========================
L2CTL Registers
========================= */
diff --git a/arch/blackfin/mach-bf609/include/mach/gpio.h b/arch/blackfin/mach-bf609/include/mach/gpio.h
index 127586b1e04a..c32c8cc8db2e 100644
--- a/arch/blackfin/mach-bf609/include/mach/gpio.h
+++ b/arch/blackfin/mach-bf609/include/mach/gpio.h
@@ -123,6 +123,8 @@
#define BFIN_GPIO_PINT 1
+#define NR_PINT_SYS_IRQS 6
+#define NR_PINTS 112
#ifndef __ASSEMBLY__
diff --git a/arch/blackfin/mach-bf609/include/mach/irq.h b/arch/blackfin/mach-bf609/include/mach/irq.h
index 0004552433b2..23e74cdeeee8 100644
--- a/arch/blackfin/mach-bf609/include/mach/irq.h
+++ b/arch/blackfin/mach-bf609/include/mach/irq.h
@@ -293,9 +293,13 @@
#define NR_MACH_IRQS (IRQ_PG15 + 1)
+#define SEC_SCTL_PRIO_OFFSET 8
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
+extern u8 sec_int_priority[];
+
/*
* bfin pint registers layout
*/
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h
index 036d9bdc889e..3ca0fb965636 100644
--- a/arch/blackfin/mach-bf609/include/mach/pm.h
+++ b/arch/blackfin/mach-bf609/include/mach/pm.h
@@ -11,11 +11,14 @@
#include <linux/suspend.h>
-int bfin609_pm_enter(suspend_state_t state);
-int bf609_pm_prepare(void);
-void bf609_pm_finish(void);
+extern int bfin609_pm_enter(suspend_state_t state);
+extern int bf609_pm_prepare(void);
+extern void bf609_pm_finish(void);
void bf609_hibernate(void);
void bfin_sec_raise_irq(unsigned int sid);
void coreb_enable(void);
+
+int bf609_nor_flash_init(void);
+void bf609_nor_flash_exit(void);
#endif
diff --git a/arch/blackfin/mach-bf609/ints-priority.c b/arch/blackfin/mach-bf609/ints-priority.c
new file mode 100644
index 000000000000..f68abb9aa79e
--- /dev/null
+++ b/arch/blackfin/mach-bf609/ints-priority.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2007-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Set up the interrupt priorities
+ */
+
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <asm/blackfin.h>
+
+u8 sec_int_priority[] = {
+ 255, /* IRQ_SEC_ERR */
+ 255, /* IRQ_CGU_EVT */
+ 254, /* IRQ_WATCH0 */
+ 254, /* IRQ_WATCH1 */
+ 253, /* IRQ_L2CTL0_ECC_ERR */
+ 253, /* IRQ_L2CTL0_ECC_WARN */
+ 253, /* IRQ_C0_DBL_FAULT */
+ 253, /* IRQ_C1_DBL_FAULT */
+ 252, /* IRQ_C0_HW_ERR */
+ 252, /* IRQ_C1_HW_ERR */
+ 255, /* IRQ_C0_NMI_L1_PARITY_ERR */
+ 255, /* IRQ_C1_NMI_L1_PARITY_ERR */
+
+ 50, /* IRQ_TIMER0 */
+ 50, /* IRQ_TIMER1 */
+ 50, /* IRQ_TIMER2 */
+ 50, /* IRQ_TIMER3 */
+ 50, /* IRQ_TIMER4 */
+ 50, /* IRQ_TIMER5 */
+ 50, /* IRQ_TIMER6 */
+ 50, /* IRQ_TIMER7 */
+ 50, /* IRQ_TIMER_STAT */
+ 0, /* IRQ_PINT0 */
+ 0, /* IRQ_PINT1 */
+ 0, /* IRQ_PINT2 */
+ 0, /* IRQ_PINT3 */
+ 0, /* IRQ_PINT4 */
+ 0, /* IRQ_PINT5 */
+ 0, /* IRQ_CNT */
+ 50, /* RQ_PWM0_TRIP */
+ 50, /* IRQ_PWM0_SYNC */
+ 50, /* IRQ_PWM1_TRIP */
+ 50, /* IRQ_PWM1_SYNC */
+ 0, /* IRQ_TWI0 */
+ 0, /* IRQ_TWI1 */
+ 10, /* IRQ_SOFT0 */
+ 10, /* IRQ_SOFT1 */
+ 10, /* IRQ_SOFT2 */
+ 10, /* IRQ_SOFT3 */
+ 0, /* IRQ_ACM_EVT_MISS */
+ 0, /* IRQ_ACM_EVT_COMPLETE */
+ 0, /* IRQ_CAN0_RX */
+ 0, /* IRQ_CAN0_TX */
+ 0, /* IRQ_CAN0_STAT */
+ 100, /* IRQ_SPORT0_TX */
+ 100, /* IRQ_SPORT0_TX_STAT */
+ 100, /* IRQ_SPORT0_RX */
+ 100, /* IRQ_SPORT0_RX_STAT */
+ 100, /* IRQ_SPORT1_TX */
+ 100, /* IRQ_SPORT1_TX_STAT */
+ 100, /* IRQ_SPORT1_RX */
+ 100, /* IRQ_SPORT1_RX_STAT */
+ 100, /* IRQ_SPORT2_TX */
+ 100, /* IRQ_SPORT2_TX_STAT */
+ 100, /* IRQ_SPORT2_RX */
+ 100, /* IRQ_SPORT2_RX_STAT */
+ 0, /* IRQ_SPI0_TX */
+ 0, /* IRQ_SPI0_RX */
+ 0, /* IRQ_SPI0_STAT */
+ 0, /* IRQ_SPI1_TX */
+ 0, /* IRQ_SPI1_RX */
+ 0, /* IRQ_SPI1_STAT */
+ 0, /* IRQ_RSI */
+ 0, /* IRQ_RSI_INT0 */
+ 0, /* IRQ_RSI_INT1 */
+ 0, /* DMA11 Data (SDU) */
+ 0, /* DMA12 Data (Reserved) */
+ 0, /* Reserved */
+ 0, /* Reserved */
+ 30, /* IRQ_EMAC0_STAT */
+ 0, /* EMAC0 Power (Reserved) */
+ 30, /* IRQ_EMAC1_STAT */
+ 0, /* EMAC1 Power (Reserved) */
+ 0, /* IRQ_LP0 */
+ 0, /* IRQ_LP0_STAT */
+ 0, /* IRQ_LP1 */
+ 0, /* IRQ_LP1_STAT */
+ 0, /* IRQ_LP2 */
+ 0, /* IRQ_LP2_STAT */
+ 0, /* IRQ_LP3 */
+ 0, /* IRQ_LP3_STAT */
+ 0, /* IRQ_UART0_TX */
+ 0, /* IRQ_UART0_RX */
+ 0, /* IRQ_UART0_STAT */
+ 0, /* IRQ_UART1_TX */
+ 0, /* IRQ_UART1_RX */
+ 0, /* IRQ_UART1_STAT */
+ 0, /* IRQ_MDMA0_SRC_CRC0 */
+ 0, /* IRQ_MDMA0_DEST_CRC0 */
+ 0, /* IRQ_CRC0_DCNTEXP */
+ 0, /* IRQ_CRC0_ERR */
+ 0, /* IRQ_MDMA1_SRC_CRC1 */
+ 0, /* IRQ_MDMA1_DEST_CRC1 */
+ 0, /* IRQ_CRC1_DCNTEXP */
+ 0, /* IRQ_CRC1_ERR */
+ 0, /* IRQ_MDMA2_SRC */
+ 0, /* IRQ_MDMA2_DEST */
+ 0, /* IRQ_MDMA3_SRC */
+ 0, /* IRQ_MDMA3_DEST */
+ 120, /* IRQ_EPPI0_CH0 */
+ 120, /* IRQ_EPPI0_CH1 */
+ 120, /* IRQ_EPPI0_STAT */
+ 120, /* IRQ_EPPI2_CH0 */
+ 120, /* IRQ_EPPI2_CH1 */
+ 120, /* IRQ_EPPI2_STAT */
+ 120, /* IRQ_EPPI1_CH0 */
+ 120, /* IRQ_EPPI1_CH1 */
+ 120, /* IRQ_EPPI1_STAT */
+ 120, /* IRQ_PIXC_CH0 */
+ 120, /* IRQ_PIXC_CH1 */
+ 120, /* IRQ_PIXC_CH2 */
+ 120, /* IRQ_PIXC_STAT */
+ 120, /* IRQ_PVP_CPDOB */
+ 120, /* IRQ_PVP_CPDOC */
+ 120, /* IRQ_PVP_CPSTAT */
+ 120, /* IRQ_PVP_CPCI */
+ 120, /* IRQ_PVP_STAT0 */
+ 120, /* IRQ_PVP_MPDO */
+ 120, /* IRQ_PVP_MPDI */
+ 120, /* IRQ_PVP_MPSTAT */
+ 120, /* IRQ_PVP_MPCI */
+ 120, /* IRQ_PVP_CPDOA */
+ 120, /* IRQ_PVP_STAT1 */
+ 0, /* IRQ_USB_STAT */
+ 0, /* IRQ_USB_DMA */
+ 0, /* IRQ_TRU_INT0 */
+ 0, /* IRQ_TRU_INT1 */
+ 0, /* IRQ_TRU_INT2 */
+ 0, /* IRQ_TRU_INT3 */
+ 0, /* IRQ_DMAC0_ERROR */
+ 0, /* IRQ_CGU0_ERROR */
+ 0, /* Reserved */
+ 0, /* IRQ_DPM */
+ 0, /* Reserved */
+ 0, /* IRQ_SWU0 */
+ 0, /* IRQ_SWU1 */
+ 0, /* IRQ_SWU2 */
+ 0, /* IRQ_SWU3 */
+ 0, /* IRQ_SWU4 */
+ 0, /* IRQ_SWU4 */
+ 0, /* IRQ_SWU6 */
+};
+
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c
index b76966eb16ad..dacafc163f76 100644
--- a/arch/blackfin/mach-bf609/pm.c
+++ b/arch/blackfin/mach-bf609/pm.c
@@ -11,13 +11,14 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
#include <linux/irq.h>
-
#include <linux/delay.h>
+#include <linux/syscore_ops.h>
#include <asm/dpmc.h>
#include <asm/pm.h>
#include <mach/pm.h>
#include <asm/blackfin.h>
+#include <asm/mem_init.h>
/***********************************************************/
/* */
@@ -132,60 +133,30 @@ void bfin_cpu_suspend(void)
}
__attribute__((l1_text))
-void bfin_deepsleep(unsigned long mask)
+void bf609_ddr_sr(void)
{
- uint32_t dpm0_ctl;
-
- bfin_write32(DPM0_WAKE_EN, 0x10);
- bfin_write32(DPM0_WAKE_POL, 0x10);
- dpm0_ctl = 0x00000008;
- bfin_write32(DPM0_CTL, dpm0_ctl);
- SSYNC();
- __asm__ __volatile__( \
- ".align 8;" \
- "idle;" \
- : : \
- );
-#ifdef CONFIG_BFIN_PM_WAKEUP_TIME_BENCH
- __asm__ __volatile__(
- "R0 = 0;"
- "CYCLES = R0;"
- "CYCLES2 = R0;"
- "R0 = SYSCFG;"
- "BITSET(R0, 1);"
- "SYSCFG = R0;"
- : : : "R0"
- );
-#endif
-
+ dmc_enter_self_refresh();
}
__attribute__((l1_text))
-void bf609_ddr_sr(void)
+void bf609_ddr_sr_exit(void)
{
- uint32_t reg;
-
- reg = bfin_read_DMC0_CTL();
- reg |= 0x8;
- bfin_write_DMC0_CTL(reg);
+ dmc_exit_self_refresh();
- while (!(bfin_read_DMC0_STAT() & 0x8))
+ /* After wake up from deep sleep and exit DDR from self refress mode,
+ * should wait till CGU PLL is locked.
+ */
+ while (bfin_read32(CGU0_STAT) & CLKSALGN)
continue;
}
__attribute__((l1_text))
-void bf609_ddr_sr_exit(void)
+void bf609_resume_ccbuf(void)
{
- uint32_t reg;
- while (!(bfin_read_DMC0_STAT() & 0x1))
- continue;
+ bfin_write32(DPM0_CCBF_EN, 3);
+ bfin_write32(DPM0_CTL, 2);
- reg = bfin_read_DMC0_CTL();
- reg &= ~0x8;
- bfin_write_DMC0_CTL(reg);
-
- while ((bfin_read_DMC0_STAT() & 0x8))
- continue;
+ while ((bfin_read32(DPM0_STAT) & 0xf) != 1);
}
__attribute__((l1_text))
@@ -203,20 +174,25 @@ void bfin_hibernate_syscontrol(void)
bfin_write32(DPM0_RESTORE5, bfin_read32(DPM0_RESTORE5) | 4);
}
-#ifndef CONFIG_BF60x
-# define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
-#else
-# define SIC_SYSIRQ(irq) ((irq) - IVG15)
-#endif
-void bfin_hibernate(unsigned long mask)
+#define IRQ_SID(irq) ((irq) - IVG15)
+asmlinkage void enter_deepsleep(void);
+
+__attribute__((l1_text))
+void bfin_deepsleep(unsigned long mask, unsigned long pol_mask)
{
- bfin_write32(DPM0_WAKE_EN, 0x10);
- bfin_write32(DPM0_WAKE_POL, 0x10);
+ bfin_write32(DPM0_WAKE_EN, mask);
+ bfin_write32(DPM0_WAKE_POL, pol_mask);
+ SSYNC();
+ enter_deepsleep();
+}
+
+void bfin_hibernate(unsigned long mask, unsigned long pol_mask)
+{
+ bfin_write32(DPM0_WAKE_EN, mask);
+ bfin_write32(DPM0_WAKE_POL, pol_mask);
bfin_write32(DPM0_PGCNTR, 0x0000FFFF);
bfin_write32(DPM0_HIB_DIS, 0xFFFF);
- printk(KERN_DEBUG "hibernate: restore %x pgcnt %x\n", bfin_read32(DPM0_RESTORE0), bfin_read32(DPM0_PGCNTR));
-
bf609_hibernate();
}
@@ -290,10 +266,11 @@ void bf609_cpu_pm_enter(suspend_state_t state)
printk(KERN_DEBUG "Unable to get irq wake\n");
if (state == PM_SUSPEND_STANDBY)
- bfin_deepsleep(wakeup);
+ bfin_deepsleep(wakeup, wakeup_pol);
else {
- bfin_hibernate(wakeup);
+ bfin_hibernate(wakeup, wakeup_pol);
}
+
}
int bf609_cpu_pm_prepare(void)
@@ -312,20 +289,36 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
.finish = bf609_cpu_pm_finish,
};
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+static int smc_pm_syscore_suspend(void)
+{
+ bf609_nor_flash_exit();
+ return 0;
+}
+
+static void smc_pm_syscore_resume(void)
+{
+ bf609_nor_flash_init();
+}
+
+static struct syscore_ops smc_pm_syscore_ops = {
+ .suspend = smc_pm_syscore_suspend,
+ .resume = smc_pm_syscore_resume,
+};
+#endif
+
static irqreturn_t test_isr(int irq, void *dev_id)
{
printk(KERN_DEBUG "gpio irq %d\n", irq);
+ if (irq == 231)
+ bfin_sec_raise_irq(IRQ_SID(IRQ_SOFT1));
return IRQ_HANDLED;
}
static irqreturn_t dpm0_isr(int irq, void *dev_id)
{
- uint32_t wake_stat;
-
- wake_stat = bfin_read32(DPM0_WAKE_STAT);
- printk(KERN_DEBUG "enter %s wake stat %08x\n", __func__, wake_stat);
-
- bfin_write32(DPM0_WAKE_STAT, wake_stat);
+ bfin_write32(DPM0_WAKE_STAT, bfin_read32(DPM0_WAKE_STAT));
+ bfin_write32(CGU0_STAT, bfin_read32(CGU0_STAT));
return IRQ_HANDLED;
}
@@ -334,7 +327,11 @@ static int __init bf609_init_pm(void)
int irq;
int error;
-#if CONFIG_PM_BFIN_WAKE_PE12
+#if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
+ register_syscore_ops(&smc_pm_syscore_ops);
+#endif
+
+#ifdef CONFIG_PM_BFIN_WAKE_PE12
irq = gpio_to_irq(GPIO_PE12);
if (irq < 0) {
error = irq;
@@ -342,16 +339,19 @@ static int __init bf609_init_pm(void)
GPIO_PE12, error);
}
- error = request_irq(irq, test_isr, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, "gpiope12", NULL);
+ error = request_irq(irq, test_isr, IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND
+ | IRQF_FORCE_RESUME, "gpiope12", NULL);
if(error < 0)
printk(KERN_DEBUG "Unable to get irq\n");
#endif
- error = request_irq(IRQ_CGU_EVT, dpm0_isr, IRQF_NO_SUSPEND, "cgu0 event", NULL);
+ error = request_irq(IRQ_CGU_EVT, dpm0_isr, IRQF_NO_SUSPEND |
+ IRQF_FORCE_RESUME, "cgu0 event", NULL);
if(error < 0)
printk(KERN_DEBUG "Unable to get irq\n");
- error = request_irq(IRQ_DPM, dpm0_isr, IRQF_NO_SUSPEND, "dpm0 event", NULL);
+ error = request_irq(IRQ_DPM, dpm0_isr, IRQF_NO_SUSPEND |
+ IRQF_FORCE_RESUME, "dpm0 event", NULL);
if (error < 0)
printk(KERN_DEBUG "Unable to get irq\n");
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
index 7ad2407d1571..2308ce52f849 100644
--- a/arch/blackfin/mach-common/clocks-init.c
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -16,23 +16,14 @@
#include <asm/dpmc.h>
#ifdef CONFIG_BF60x
-#define CSEL_P 0
-#define S0SEL_P 5
-#define SYSSEL_P 8
-#define S1SEL_P 13
-#define DSEL_P 16
-#define OSEL_P 22
-#define ALGN_P 29
-#define UPDT_P 30
-#define LOCK_P 31
#define CGU_CTL_VAL ((CONFIG_VCO_MULT << 8) | CLKIN_HALF)
#define CGU_DIV_VAL \
- ((CONFIG_CCLK_DIV << CSEL_P) | \
- (CONFIG_SCLK_DIV << SYSSEL_P) | \
- (CONFIG_SCLK0_DIV << S0SEL_P) | \
- (CONFIG_SCLK1_DIV << S1SEL_P) | \
- (CONFIG_DCLK_DIV << DSEL_P))
+ ((CONFIG_CCLK_DIV << CSEL_OFFSET) | \
+ (CONFIG_SCLK_DIV << SYSSEL_OFFSET) | \
+ (CONFIG_SCLK0_DIV << S0SEL_OFFSET) | \
+ (CONFIG_SCLK1_DIV << S1SEL_OFFSET) | \
+ (CONFIG_DCLK_DIV << DSEL_OFFSET))
#define CONFIG_BFIN_DCLK (((CONFIG_CLKIN_HZ * CONFIG_VCO_MULT) / CONFIG_DCLK_DIV) / 1000000)
#if ((CONFIG_BFIN_DCLK != 125) && \
@@ -41,89 +32,7 @@
(CONFIG_BFIN_DCLK != 225) && (CONFIG_BFIN_DCLK != 250))
#error "DCLK must be in (125, 133, 150, 166, 200, 225, 250)MHz"
#endif
-struct ddr_config {
- u32 ddr_clk;
- u32 dmc_ddrctl;
- u32 dmc_ddrcfg;
- u32 dmc_ddrtr0;
- u32 dmc_ddrtr1;
- u32 dmc_ddrtr2;
- u32 dmc_ddrmr;
- u32 dmc_ddrmr1;
-};
-struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) = {
- [0] = {
- .ddr_clk = 125,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20705212,
- .dmc_ddrtr1 = 0x201003CF,
- .dmc_ddrtr2 = 0x00320107,
- .dmc_ddrmr = 0x00000422,
- .dmc_ddrmr1 = 0x4,
- },
- [1] = {
- .ddr_clk = 133,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20806313,
- .dmc_ddrtr1 = 0x2013040D,
- .dmc_ddrtr2 = 0x00320108,
- .dmc_ddrmr = 0x00000632,
- .dmc_ddrmr1 = 0x4,
- },
- [2] = {
- .ddr_clk = 150,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20A07323,
- .dmc_ddrtr1 = 0x20160492,
- .dmc_ddrtr2 = 0x00320209,
- .dmc_ddrmr = 0x00000632,
- .dmc_ddrmr1 = 0x4,
- },
- [3] = {
- .ddr_clk = 166,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20A07323,
- .dmc_ddrtr1 = 0x2016050E,
- .dmc_ddrtr2 = 0x00320209,
- .dmc_ddrmr = 0x00000632,
- .dmc_ddrmr1 = 0x4,
- },
- [4] = {
- .ddr_clk = 200,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20a07323,
- .dmc_ddrtr1 = 0x2016050f,
- .dmc_ddrtr2 = 0x00320509,
- .dmc_ddrmr = 0x00000632,
- .dmc_ddrmr1 = 0x4,
- },
- [5] = {
- .ddr_clk = 225,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20E0A424,
- .dmc_ddrtr1 = 0x302006DB,
- .dmc_ddrtr2 = 0x0032020D,
- .dmc_ddrmr = 0x00000842,
- .dmc_ddrmr1 = 0x4,
- },
- [6] = {
- .ddr_clk = 250,
- .dmc_ddrctl = 0x00000904,
- .dmc_ddrcfg = 0x00000422,
- .dmc_ddrtr0 = 0x20E0A424,
- .dmc_ddrtr1 = 0x3020079E,
- .dmc_ddrtr2 = 0x0032020D,
- .dmc_ddrmr = 0x00000842,
- .dmc_ddrmr1 = 0x4,
- },
-};
#else
#define SDGCTL_WIDTH (1 << 31) /* SDRAM external data path width */
#define PLL_CTL_VAL \
@@ -144,43 +53,9 @@ void init_clocks(void)
* in the middle of reprogramming things, and that'll screw us up.
* For example, any automatic DMAs left by U-Boot for splash screens.
*/
-
#ifdef CONFIG_BF60x
- int i, dlldatacycle, dll_ctl;
- bfin_write32(CGU0_DIV, CGU_DIV_VAL);
- bfin_write32(CGU0_CTL, CGU_CTL_VAL);
- while ((bfin_read32(CGU0_STAT) & 0x8) || !(bfin_read32(CGU0_STAT) & 0x4))
- continue;
-
- bfin_write32(CGU0_DIV, CGU_DIV_VAL | (1 << UPDT_P));
- while (bfin_read32(CGU0_STAT) & (1 << 3))
- continue;
-
- for (i = 0; i < 7; i++) {
- if (ddr_config_table[i].ddr_clk == CONFIG_BFIN_DCLK) {
- bfin_write_DDR0_CFG(ddr_config_table[i].dmc_ddrcfg);
- bfin_write_DDR0_TR0(ddr_config_table[i].dmc_ddrtr0);
- bfin_write_DDR0_TR1(ddr_config_table[i].dmc_ddrtr1);
- bfin_write_DDR0_TR2(ddr_config_table[i].dmc_ddrtr2);
- bfin_write_DDR0_MR(ddr_config_table[i].dmc_ddrmr);
- bfin_write_DDR0_EMR1(ddr_config_table[i].dmc_ddrmr1);
- bfin_write_DDR0_CTL(ddr_config_table[i].dmc_ddrctl);
- break;
- }
- }
-
- do_sync();
- while (!(bfin_read_DDR0_STAT() & 0x4))
- continue;
-
- dlldatacycle = (bfin_read_DDR0_STAT() & 0x00f00000) >> 20;
- dll_ctl = bfin_read_DDR0_DLLCTL();
- dll_ctl &= 0x0ff;
- bfin_write_DDR0_DLLCTL(dll_ctl | (dlldatacycle << 8));
-
- do_sync();
- while (!(bfin_read_DDR0_STAT() & 0x2000))
- continue;
+ init_cgu(CGU_DIV_VAL, CGU_CTL_VAL);
+ init_dmc(CONFIG_BFIN_DCLK);
#else
size_t i;
for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 6e87dc13f6bf..c854a27cbeab 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -64,7 +64,8 @@ static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
/* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
#if ANOMALY_05000273 || ANOMALY_05000274 || \
- (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
+ (!(defined(CONFIG_BF54x) || defined(CONFIG_BF60x)) \
+ && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
min_cclk = sclk * 2;
#else
min_cclk = sclk;
@@ -173,7 +174,7 @@ static int bfin_target(struct cpufreq_policy *poli,
#else
ret = cpu_set_cclk(cpu, freqs.new * 1000);
if (ret != 0) {
- pr_debug("cpufreq set freq failed %d\n", ret);
+ WARN_ONCE(ret, "cpufreq set freq failed %d\n", ret);
break;
}
#endif
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 04c2fbe41a7f..1c3d2c5bb0bb 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -25,13 +25,6 @@
#include <asm/context.S>
-#if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
-# define EX_SCRATCH_REG RETN
-#elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
-# define EX_SCRATCH_REG RETE
-#else
-# define EX_SCRATCH_REG CYCLES
-#endif
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
.section .l1.text
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 2729cba715b0..7ca09ec2ca53 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -26,8 +26,9 @@
#include <asm/gpio.h>
#include <asm/irq_handler.h>
#include <asm/dpmc.h>
+#include <asm/traps.h>
-#ifndef CONFIG_BF60x
+#ifndef SEC_GCTL
# define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
#else
# define SIC_SYSIRQ(irq) ((irq) - IVG15)
@@ -56,7 +57,7 @@ unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
unsigned vr_wakeup;
#endif
-#ifndef CONFIG_BF60x
+#ifndef SEC_GCTL
static struct ivgx {
/* irq number for request_irq, available in mach-bf5xx/irq.h */
unsigned int irqno;
@@ -143,7 +144,7 @@ static void bfin_core_unmask_irq(struct irq_data *d)
void bfin_internal_mask_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
-#ifndef CONFIG_BF60x
+#ifndef SEC_GCTL
#ifdef SIC_IMASK0
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
@@ -175,7 +176,7 @@ void bfin_internal_unmask_irq(unsigned int irq)
{
unsigned long flags = hard_local_irq_save();
-#ifndef CONFIG_BF60x
+#ifndef SEC_GCTL
#ifdef SIC_IMASK0
unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
@@ -199,7 +200,7 @@ void bfin_internal_unmask_irq(unsigned int irq)
hard_local_irq_restore(flags);
}
-#ifdef CONFIG_BF60x
+#ifdef SEC_GCTL
static void bfin_sec_preflow_handler(struct irq_data *d)
{
unsigned long flags = hard_local_irq_save();
@@ -310,7 +311,24 @@ static void bfin_sec_disable(struct irq_data *d)
hard_local_irq_restore(flags);
}
-static void bfin_sec_raise_irq(unsigned int sid)
+static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
+{
+ unsigned long flags = hard_local_irq_save();
+ uint32_t reg_sctl;
+ int i;
+
+ bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
+
+ for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
+ reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
+ reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
+ bfin_write_SEC_SCTL(i, reg_sctl);
+ }
+
+ hard_local_irq_restore(flags);
+}
+
+void bfin_sec_raise_irq(unsigned int sid)
{
unsigned long flags = hard_local_irq_save();
@@ -396,24 +414,34 @@ void handle_sec_fault(unsigned int irq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
}
-static int sec_suspend(void)
+void handle_core_fault(unsigned int irq, struct irq_desc *desc)
{
- return 0;
-}
+ struct pt_regs *fp = get_irq_regs();
-static void sec_resume(void)
-{
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
- udelay(100);
- bfin_write_SEC_GCTL(SEC_GCTL_EN);
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
-}
+ raw_spin_lock(&desc->lock);
-static struct syscore_ops sec_pm_syscore_ops = {
- .suspend = sec_suspend,
- .resume = sec_resume,
-};
+ switch (irq) {
+ case IRQ_C0_DBL_FAULT:
+ double_fault_c(fp);
+ break;
+ case IRQ_C0_HW_ERR:
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ printk(KERN_NOTICE "Kernel Stack\n");
+ show_stack(current, NULL);
+ print_modules();
+ panic("Kernel core hardware error");
+ break;
+ case IRQ_C0_NMI_L1_PARITY_ERR:
+ panic("NMI occurs unexpectedly");
+ break;
+ default:
+ panic("Core 1 fault occurs unexpectedly");
+ }
+ raw_spin_unlock(&desc->lock);
+}
#endif
#ifdef CONFIG_SMP
@@ -437,7 +465,7 @@ static void bfin_internal_unmask_irq_chip(struct irq_data *d)
}
#endif
-#if defined(CONFIG_PM) && !defined(CONFIG_BF60x)
+#if defined(CONFIG_PM) && !defined(SEC_GCTL)
int bfin_internal_set_wake(unsigned int irq, unsigned int state)
{
u32 bank, bit, wakeup = 0;
@@ -496,7 +524,10 @@ static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
return bfin_internal_set_wake(d->irq, state);
}
#else
-# define bfin_internal_set_wake(irq, state)
+inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
+{
+ return 0;
+}
# define bfin_internal_set_wake_chip NULL
#endif
@@ -518,7 +549,7 @@ static struct irq_chip bfin_internal_irqchip = {
.irq_set_wake = bfin_internal_set_wake_chip,
};
-#ifdef CONFIG_BF60x
+#ifdef SEC_GCTL
static struct irq_chip bfin_sec_irqchip = {
.name = "SEC",
.irq_mask_ack = bfin_sec_mask_ack_irq,
@@ -868,14 +899,6 @@ void bfin_demux_gpio_irq(unsigned int inta_irq,
#else
-# ifndef CONFIG_BF60x
-#define NR_PINT_SYS_IRQS 4
-#define NR_PINTS 160
-# else
-#define NR_PINT_SYS_IRQS 6
-#define NR_PINTS 112
-#endif
-
#define NR_PINT_BITS 32
#define IRQ_NOT_AVAIL 0xFF
@@ -897,29 +920,21 @@ static struct bfin_pint_regs * const pint[NR_PINT_SYS_IRQS] = {
#endif
};
-#ifndef CONFIG_BF60x
inline unsigned int get_irq_base(u32 bank, u8 bmap)
{
unsigned int irq_base;
+#ifndef CONFIG_BF60x
if (bank < 2) { /*PA-PB */
irq_base = IRQ_PA0 + bmap * 16;
} else { /*PC-PJ */
irq_base = IRQ_PC0 + bmap * 16;
}
-
- return irq_base;
-}
#else
-inline unsigned int get_irq_base(u32 bank, u8 bmap)
-{
- unsigned int irq_base;
-
irq_base = IRQ_PA0 + bank * 16 + bmap * 16;
-
+#endif
return irq_base;
}
-#endif
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
void init_pint_lut(void)
@@ -1089,6 +1104,9 @@ static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
}
#ifdef CONFIG_PM
+static struct bfin_pm_pint_save save_pint_reg[NR_PINT_SYS_IRQS];
+static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
+
static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
{
u32 pint_irq;
@@ -1124,6 +1142,59 @@ static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
return 0;
}
+
+void bfin_pint_suspend(void)
+{
+ u32 bank;
+
+ for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
+ save_pint_reg[bank].mask_set = pint[bank]->mask_set;
+ save_pint_reg[bank].assign = pint[bank]->assign;
+ save_pint_reg[bank].edge_set = pint[bank]->edge_set;
+ save_pint_reg[bank].invert_set = pint[bank]->invert_set;
+ }
+}
+
+void bfin_pint_resume(void)
+{
+ u32 bank;
+
+ for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
+ pint[bank]->mask_set = save_pint_reg[bank].mask_set;
+ pint[bank]->assign = save_pint_reg[bank].assign;
+ pint[bank]->edge_set = save_pint_reg[bank].edge_set;
+ pint[bank]->invert_set = save_pint_reg[bank].invert_set;
+ }
+}
+
+#ifdef SEC_GCTL
+static int sec_suspend(void)
+{
+ u32 bank;
+
+ for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
+ save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + SIC_SYSIRQ(IRQ_PINT0));
+ return 0;
+}
+
+static void sec_resume(void)
+{
+ u32 bank;
+
+ bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
+ udelay(100);
+ bfin_write_SEC_GCTL(SEC_GCTL_EN);
+ bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+
+ for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
+ bfin_write_SEC_SCTL(bank + SIC_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
+}
+
+static struct syscore_ops sec_pm_syscore_ops = {
+ .suspend = sec_suspend,
+ .resume = sec_resume,
+};
+#endif
#else
# define bfin_gpio_set_wake NULL
#endif
@@ -1230,6 +1301,7 @@ void __cpuinit init_exception_vectors(void)
CSYNC();
}
+#ifndef SEC_GCTL
/*
* This function should be called during kernel startup to initialize
* the BFin IRQ handling routines.
@@ -1240,7 +1312,6 @@ int __init init_arch_irq(void)
int irq;
unsigned long ilat = 0;
-#ifndef CONFIG_BF60x
/* Disable all the peripheral intrs - page 4-29 HW Ref manual */
#ifdef SIC_IMASK0
bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
@@ -1255,9 +1326,6 @@ int __init init_arch_irq(void)
#else
bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
#endif
-#else /* CONFIG_BF60x */
- bfin_write_SEC_GCTL(SEC_GCTL_RESET);
-#endif
local_irq_disable();
@@ -1267,10 +1335,6 @@ int __init init_arch_irq(void)
pint[1]->assign = CONFIG_PINT1_ASSIGN;
pint[2]->assign = CONFIG_PINT2_ASSIGN;
pint[3]->assign = CONFIG_PINT3_ASSIGN;
-# ifdef CONFIG_BF60x
- pint[4]->assign = CONFIG_PINT4_ASSIGN;
- pint[5]->assign = CONFIG_PINT5_ASSIGN;
-# endif
# endif
/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
init_pint_lut();
@@ -1283,7 +1347,6 @@ int __init init_arch_irq(void)
irq_set_chip(irq, &bfin_internal_irqchip);
switch (irq) {
-#ifndef CONFIG_BF60x
#if BFIN_GPIO_PINT
case IRQ_PINT0:
case IRQ_PINT1:
@@ -1319,7 +1382,6 @@ int __init init_arch_irq(void)
irq_set_handler(irq, handle_percpu_irq);
break;
#endif
-#endif
#ifdef CONFIG_TICKSOURCE_CORETMR
case IRQ_CORETMR:
@@ -1349,8 +1411,7 @@ int __init init_arch_irq(void)
init_mach_irq();
-#ifndef CONFIG_BF60x
-#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) && !defined(CONFIG_BF60x)
+#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
handle_level_irq);
@@ -1360,28 +1421,6 @@ int __init init_arch_irq(void)
irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
handle_level_irq);
-#else
- for (irq = BFIN_IRQ(0); irq <= SYS_IRQS; irq++) {
- if (irq < CORE_IRQS) {
- irq_set_chip(irq, &bfin_sec_irqchip);
- __irq_set_handler(irq, handle_sec_fault, 0, NULL);
- } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
- irq_set_chip(irq, &bfin_sec_irqchip);
- irq_set_chained_handler(irq, bfin_demux_gpio_irq);
- } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
- irq_set_chip(irq, &bfin_sec_irqchip);
- irq_set_handler(irq, handle_percpu_irq);
- } else {
- irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
- handle_fasteoi_irq);
- __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
- }
- }
- for (irq = GPIO_IRQ_BASE;
- irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
- irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
- handle_level_irq);
-#endif
bfin_write_IMASK(0);
CSYNC();
ilat = bfin_read_ILAT();
@@ -1393,7 +1432,6 @@ int __init init_arch_irq(void)
/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
* local_irq_enable()
*/
-#ifndef CONFIG_BF60x
program_IAR();
/* Therefore it's better to setup IARs before interrupts enabled */
search_IAR();
@@ -1427,23 +1465,6 @@ int __init init_arch_irq(void)
#else
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
-#else /* CONFIG_BF60x */
- /* Enable interrupts IVG7-15 */
- bfin_irq_flags |= IMASK_IVG15 |
- IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
- IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-
-
- bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
- bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
- bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
- udelay(100);
- bfin_write_SEC_GCTL(SEC_GCTL_EN);
- bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
- init_software_driven_irq();
- register_syscore_ops(&sec_pm_syscore_ops);
-#endif
return 0;
}
@@ -1452,14 +1473,11 @@ __attribute__((l1_text))
#endif
static int vec_to_irq(int vec)
{
-#ifndef CONFIG_BF60x
struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
unsigned long sic_status[3];
-#endif
if (likely(vec == EVT_IVTMR_P))
return IRQ_CORETMR;
-#ifndef CONFIG_BF60x
#ifdef SIC_ISR
sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
#else
@@ -1488,11 +1506,119 @@ static int vec_to_irq(int vec)
#endif
return ivg->irqno;
}
-#else
- /* for bf60x read */
+}
+
+#else /* SEC_GCTL */
+
+/*
+ * This function should be called during kernel startup to initialize
+ * the BFin IRQ handling routines.
+ */
+
+int __init init_arch_irq(void)
+{
+ int irq;
+ unsigned long ilat = 0;
+
+ bfin_write_SEC_GCTL(SEC_GCTL_RESET);
+
+ local_irq_disable();
+
+#if BFIN_GPIO_PINT
+# ifdef CONFIG_PINTx_REASSIGN
+ pint[0]->assign = CONFIG_PINT0_ASSIGN;
+ pint[1]->assign = CONFIG_PINT1_ASSIGN;
+ pint[2]->assign = CONFIG_PINT2_ASSIGN;
+ pint[3]->assign = CONFIG_PINT3_ASSIGN;
+ pint[4]->assign = CONFIG_PINT4_ASSIGN;
+ pint[5]->assign = CONFIG_PINT5_ASSIGN;
+# endif
+ /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
+ init_pint_lut();
+#endif
+
+ for (irq = 0; irq <= SYS_IRQS; irq++) {
+ if (irq <= IRQ_CORETMR) {
+ irq_set_chip(irq, &bfin_core_irqchip);
+#ifdef CONFIG_TICKSOURCE_CORETMR
+ if (irq == IRQ_CORETMR)
+# ifdef CONFIG_SMP
+ irq_set_handler(irq, handle_percpu_irq);
+# else
+ irq_set_handler(irq, handle_simple_irq);
+# endif
+#endif
+ } else if (irq < BFIN_IRQ(0)) {
+ irq_set_chip_and_handler(irq, &bfin_internal_irqchip,
+ handle_simple_irq);
+ } else if (irq == IRQ_SEC_ERR) {
+ irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
+ handle_sec_fault);
+ } else if (irq < CORE_IRQS && irq >= IRQ_C0_DBL_FAULT) {
+ irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
+ handle_core_fault);
+ } else if (irq >= BFIN_IRQ(21) && irq <= BFIN_IRQ(26)) {
+ irq_set_chip(irq, &bfin_sec_irqchip);
+ irq_set_chained_handler(irq, bfin_demux_gpio_irq);
+ } else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
+ irq_set_chip(irq, &bfin_sec_irqchip);
+ irq_set_handler(irq, handle_percpu_irq);
+ } else {
+ irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
+ handle_fasteoi_irq);
+ __irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
+ }
+ }
+ for (irq = GPIO_IRQ_BASE;
+ irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
+ irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
+ handle_level_irq);
+
+ bfin_write_IMASK(0);
+ CSYNC();
+ ilat = bfin_read_ILAT();
+ CSYNC();
+ bfin_write_ILAT(ilat);
+ CSYNC();
+
+ printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
+
+ bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
+
+ bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
+
+ /* Enable interrupts IVG7-15 */
+ bfin_irq_flags |= IMASK_IVG15 |
+ IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
+ IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
+
+
+ bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
+ bfin_sec_enable_sci(SIC_SYSIRQ(IRQ_WATCH0));
+ bfin_sec_enable_ssi(SIC_SYSIRQ(IRQ_WATCH0));
+ bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
+ udelay(100);
+ bfin_write_SEC_GCTL(SEC_GCTL_EN);
+ bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+ bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
+
+ init_software_driven_irq();
+ register_syscore_ops(&sec_pm_syscore_ops);
+
+ return 0;
+}
+
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+static int vec_to_irq(int vec)
+{
+ if (likely(vec == EVT_IVTMR_P))
+ return IRQ_CORETMR;
+
return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
-#endif /* end of CONFIG_BF60x */
}
+#endif /* SEC_GCTL */
#ifdef CONFIG_DO_IRQ_L1
__attribute__((l1_text))
@@ -1514,6 +1640,10 @@ int __ipipe_get_irq_priority(unsigned irq)
if (irq <= IRQ_CORETMR)
return irq;
+#ifdef SEC_GCTL
+ if (irq >= BFIN_IRQ(0))
+ return IVG11;
+#else
for (ient = 0; ient < NR_PERI_INTS; ient++) {
struct ivgx *ivg = ivg_table + ient;
if (ivg->irqno == irq) {
@@ -1524,6 +1654,7 @@ int __ipipe_get_irq_priority(unsigned irq)
}
}
}
+#endif
return IVG15;
}
@@ -1536,8 +1667,6 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
{
struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
struct ipipe_domain *this_domain = __ipipe_current_domain;
- struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
- struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
int irq, s = 0;
irq = vec_to_irq(vec);
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index ca6655e0d653..87bfe549ad3f 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -172,6 +172,10 @@ int bfin_pm_suspend_mem_enter(void)
bfin_gpio_pm_hibernate_suspend();
+#if BFIN_GPIO_PINT
+ bfin_pint_suspend();
+#endif
+
#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
flushinv_all_dcache();
#endif
@@ -190,6 +194,10 @@ int bfin_pm_suspend_mem_enter(void)
_enable_icplb();
_enable_dcplb();
+#if BFIN_GPIO_PINT
+ bfin_pint_resume();
+#endif
+
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
diff --git a/arch/c6x/boot/dts/evmc6678.dts b/arch/c6x/boot/dts/evmc6678.dts
new file mode 100644
index 000000000000..ab686301d321
--- /dev/null
+++ b/arch/c6x/boot/dts/evmc6678.dts
@@ -0,0 +1,83 @@
+/*
+ * arch/c6x/boot/dts/evmc6678.dts
+ *
+ * EVMC6678 Evaluation Platform For TMS320C6678
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated
+ *
+ * Author: Ken Cox <jkc@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+/dts-v1/;
+
+/include/ "tms320c6678.dtsi"
+
+/ {
+ model = "Advantech EVMC6678";
+ compatible = "advantech,evmc6678";
+
+ chosen {
+ bootargs = "root=/dev/nfs ip=dhcp rw";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x80000000 0x20000000>;
+ };
+
+ soc {
+ megamod_pic: interrupt-controller@1800000 {
+ interrupts = < 12 13 14 15 >;
+ };
+
+ timer8: timer@2280000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 66 >;
+ };
+
+ timer9: timer@2290000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 68 >;
+ };
+
+ timer10: timer@22A0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 70 >;
+ };
+
+ timer11: timer@22B0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 72 >;
+ };
+
+ timer12: timer@22C0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 74 >;
+ };
+
+ timer13: timer@22D0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 76 >;
+ };
+
+ timer14: timer@22E0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 78 >;
+ };
+
+ timer15: timer@22F0000 {
+ interrupt-parent = <&megamod_pic>;
+ interrupts = < 80 >;
+ };
+
+ clock-controller@2310000 {
+ clock-frequency = <100000000>;
+ };
+ };
+};
diff --git a/arch/c6x/boot/dts/tms320c6678.dtsi b/arch/c6x/boot/dts/tms320c6678.dtsi
new file mode 100644
index 000000000000..386196e5eae7
--- /dev/null
+++ b/arch/c6x/boot/dts/tms320c6678.dtsi
@@ -0,0 +1,146 @@
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ reg = <0>;
+ model = "ti,c66x";
+ };
+ cpu@1 {
+ device_type = "cpu";
+ reg = <1>;
+ model = "ti,c66x";
+ };
+ cpu@2 {
+ device_type = "cpu";
+ reg = <2>;
+ model = "ti,c66x";
+ };
+ cpu@3 {
+ device_type = "cpu";
+ reg = <3>;
+ model = "ti,c66x";
+ };
+ cpu@4 {
+ device_type = "cpu";
+ reg = <4>;
+ model = "ti,c66x";
+ };
+ cpu@5 {
+ device_type = "cpu";
+ reg = <5>;
+ model = "ti,c66x";
+ };
+ cpu@6 {
+ device_type = "cpu";
+ reg = <6>;
+ model = "ti,c66x";
+ };
+ cpu@7 {
+ device_type = "cpu";
+ reg = <7>;
+ model = "ti,c66x";
+ };
+ };
+
+ soc {
+ compatible = "simple-bus";
+ model = "tms320c6678";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ core_pic: interrupt-controller {
+ compatible = "ti,c64x+core-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ megamod_pic: interrupt-controller@1800000 {
+ compatible = "ti,c64x+megamod-pic";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ reg = <0x1800000 0x1000>;
+ interrupt-parent = <&core_pic>;
+ };
+
+ cache-controller@1840000 {
+ compatible = "ti,c64x+cache";
+ reg = <0x01840000 0x8400>;
+ };
+
+ timer8: timer@2280000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x01 >;
+ reg = <0x2280000 0x40>;
+ };
+
+ timer9: timer@2290000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x02 >;
+ reg = <0x2290000 0x40>;
+ };
+
+ timer10: timer@22A0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x04 >;
+ reg = <0x22A0000 0x40>;
+ };
+
+ timer11: timer@22B0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x08 >;
+ reg = <0x22B0000 0x40>;
+ };
+
+ timer12: timer@22C0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x10 >;
+ reg = <0x22C0000 0x40>;
+ };
+
+ timer13: timer@22D0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x20 >;
+ reg = <0x22D0000 0x40>;
+ };
+
+ timer14: timer@22E0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x40 >;
+ reg = <0x22E0000 0x40>;
+ };
+
+ timer15: timer@22F0000 {
+ compatible = "ti,c64x+timer64";
+ ti,core-mask = < 0x80 >;
+ reg = <0x22F0000 0x40>;
+ };
+
+ clock-controller@2310000 {
+ compatible = "ti,c6678-pll", "ti,c64x+pll";
+ reg = <0x02310000 0x200>;
+ ti,c64x+pll-bypass-delay = <200>;
+ ti,c64x+pll-reset-delay = <12000>;
+ ti,c64x+pll-lock-delay = <80000>;
+ };
+
+ device-state-controller@2620000 {
+ compatible = "ti,c64x+dscr";
+ reg = <0x02620000 0x1000>;
+
+ ti,dscr-devstat = <0x20>;
+ ti,dscr-silicon-rev = <0x18 28 0xf>;
+
+ ti,dscr-mac-fuse-regs = <0x110 1 2 3 4
+ 0x114 5 6 0 0>;
+
+ };
+ };
+};
diff --git a/arch/c6x/configs/evmc6678_defconfig b/arch/c6x/configs/evmc6678_defconfig
new file mode 100644
index 000000000000..5f126d4905b1
--- /dev/null
+++ b/arch/c6x/configs/evmc6678_defconfig
@@ -0,0 +1,42 @@
+CONFIG_SOC_TMS320C6678=y
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_SPARSE_IRQ=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_FUTEX is not set
+# CONFIG_SLUB_DEBUG is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_FORCE_LOAD=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE=""
+# CONFIG_CMDLINE_FORCE is not set
+CONFIG_BOARD_EVM6678=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=2
+CONFIG_BLK_DEV_RAM_SIZE=17000
+CONFIG_MISC_DEVICES=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_CRC16=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index ab4577f93d96..1324e62bd4ef 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -34,8 +34,6 @@
*/
#define NR_PRIORITY_IRQS 16
-#define NR_IRQS_LEGACY NR_PRIORITY_IRQS
-
/* Total number of virq in the platform */
#define NR_IRQS 256
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index c90fb5e82ad7..247e0eb5e467 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Texas Instruments Incorporated
+ * Copyright (C) 2011-2012 Texas Instruments Incorporated
*
* This borrows heavily from powerpc version, which is:
*
@@ -35,9 +35,7 @@ static DEFINE_RAW_SPINLOCK(core_irq_lock);
static void mask_core_irq(struct irq_data *data)
{
- unsigned int prio = data->irq;
-
- BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
+ unsigned int prio = data->hwirq;
raw_spin_lock(&core_irq_lock);
and_creg(IER, ~(1 << prio));
@@ -46,7 +44,7 @@ static void mask_core_irq(struct irq_data *data)
static void unmask_core_irq(struct irq_data *data)
{
- unsigned int prio = data->irq;
+ unsigned int prio = data->hwirq;
raw_spin_lock(&core_irq_lock);
or_creg(IER, 1 << prio);
@@ -59,15 +57,15 @@ static struct irq_chip core_chip = {
.irq_unmask = unmask_core_irq,
};
+static int prio_to_virq[NR_PRIORITY_IRQS];
+
asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- BUG_ON(prio < 4 || prio >= NR_PRIORITY_IRQS);
-
- generic_handle_irq(prio);
+ generic_handle_irq(prio_to_virq[prio]);
irq_exit();
@@ -82,6 +80,8 @@ static int core_domain_map(struct irq_domain *h, unsigned int virq,
if (hw < 4 || hw >= NR_PRIORITY_IRQS)
return -EINVAL;
+ prio_to_virq[hw] = virq;
+
irq_set_status_flags(virq, IRQ_LEVEL);
irq_set_chip_and_handler(virq, &core_chip, handle_level_irq);
return 0;
@@ -102,9 +102,8 @@ void __init init_IRQ(void)
np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
if (np != NULL) {
/* create the core host */
- core_domain = irq_domain_add_legacy(np, NR_PRIORITY_IRQS,
- 0, 0, &core_domain_ops,
- NULL);
+ core_domain = irq_domain_add_linear(np, NR_PRIORITY_IRQS,
+ &core_domain_ops, NULL);
if (core_domain)
irq_set_default_host(core_domain);
of_node_put(np);
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index ce46186600c5..f4e72bd8c103 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -143,6 +143,10 @@ static void __init get_cpuinfo(void)
p->cpu_name = "C64x+";
p->cpu_voltage = "1.2";
break;
+ case 21:
+ p->cpu_name = "C66X";
+ p->cpu_voltage = "1.2";
+ break;
default:
p->cpu_name = "unknown";
break;
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index 3d8f3c22a94f..3998b24e26f2 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -249,8 +249,6 @@ static void handle_signal(int sig,
siginfo_t *info, struct k_sigaction *ka,
struct pt_regs *regs, int syscall)
{
- int ret;
-
/* Are we from a system call? */
if (syscall) {
/* If so, check system call restarting.. */
diff --git a/arch/c6x/kernel/soc.c b/arch/c6x/kernel/soc.c
index 0748c94ebef6..3ac74080fded 100644
--- a/arch/c6x/kernel/soc.c
+++ b/arch/c6x/kernel/soc.c
@@ -80,7 +80,7 @@ int soc_mac_addr(unsigned int index, u8 *addr)
if (have_fuse_mac)
memcpy(addr, c6x_fuse_mac, 6);
else
- random_ether_addr(addr);
+ eth_random_addr(addr);
}
/* adjust for specific EMAC device */
diff --git a/arch/c6x/platforms/Kconfig b/arch/c6x/platforms/Kconfig
index 401ee678fd01..c4a0fad89aaf 100644
--- a/arch/c6x/platforms/Kconfig
+++ b/arch/c6x/platforms/Kconfig
@@ -14,3 +14,7 @@ config SOC_TMS320C6472
config SOC_TMS320C6474
bool "TMS320C6474"
default n
+
+config SOC_TMS320C6678
+ bool "TMS320C6678"
+ default n
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index c1c4e2ae3f85..74e3371eb824 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -243,27 +243,37 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
* as their interrupt parent.
*/
for (i = 0; i < NR_COMBINERS; i++) {
+ struct irq_data *irq_data;
+ irq_hw_number_t hwirq;
irq = irq_of_parse_and_map(np, i);
if (irq == NO_IRQ)
continue;
+ irq_data = irq_get_irq_data(irq);
+ if (!irq_data) {
+ pr_err("%s: combiner-%d no irq_data for virq %d!\n",
+ np->full_name, i, irq);
+ continue;
+ }
+
+ hwirq = irq_data->hwirq;
+
/*
- * We count on the core priority interrupts (4 - 15) being
- * direct mapped. Check that device tree provided something
- * in that range.
+ * Check that device tree provided something in the range
+ * of the core priority interrupts (4 - 15).
*/
- if (irq < 4 || irq >= NR_PRIORITY_IRQS) {
- pr_err("%s: combiner-%d virq %d out of range!\n",
- np->full_name, i, irq);
+ if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
+ pr_err("%s: combiner-%d core irq %ld out of range!\n",
+ np->full_name, i, hwirq);
continue;
}
/* record the mapping */
- mapping[irq - 4] = i;
+ mapping[hwirq - 4] = i;
- pr_debug("%s: combiner-%d cascading to virq %d\n",
- np->full_name, i, irq);
+ pr_debug("%s: combiner-%d cascading to hwirq %ld\n",
+ np->full_name, i, hwirq);
cascade_data[i].pic = pic;
cascade_data[i].index = i;
diff --git a/arch/c6x/platforms/plldata.c b/arch/c6x/platforms/plldata.c
index 2cfd6f42968f..755359eb6286 100644
--- a/arch/c6x/platforms/plldata.c
+++ b/arch/c6x/platforms/plldata.c
@@ -335,6 +335,68 @@ static void __init c6474_setup_clocks(struct device_node *node)
}
#endif /* CONFIG_SOC_TMS320C6474 */
+#ifdef CONFIG_SOC_TMS320C6678
+static struct clk_lookup c6678_clks[] = {
+ CLK(NULL, "pll1", &c6x_soc_pll1.sysclks[0]),
+ CLK(NULL, "pll1_refclk", &c6x_soc_pll1.sysclks[1]),
+ CLK(NULL, "pll1_sysclk2", &c6x_soc_pll1.sysclks[2]),
+ CLK(NULL, "pll1_sysclk3", &c6x_soc_pll1.sysclks[3]),
+ CLK(NULL, "pll1_sysclk4", &c6x_soc_pll1.sysclks[4]),
+ CLK(NULL, "pll1_sysclk5", &c6x_soc_pll1.sysclks[5]),
+ CLK(NULL, "pll1_sysclk6", &c6x_soc_pll1.sysclks[6]),
+ CLK(NULL, "pll1_sysclk7", &c6x_soc_pll1.sysclks[7]),
+ CLK(NULL, "pll1_sysclk8", &c6x_soc_pll1.sysclks[8]),
+ CLK(NULL, "pll1_sysclk9", &c6x_soc_pll1.sysclks[9]),
+ CLK(NULL, "pll1_sysclk10", &c6x_soc_pll1.sysclks[10]),
+ CLK(NULL, "pll1_sysclk11", &c6x_soc_pll1.sysclks[11]),
+ CLK(NULL, "core", &c6x_core_clk),
+ CLK("", NULL, NULL)
+};
+
+static void __init c6678_setup_clocks(struct device_node *node)
+{
+ struct pll_data *pll = &c6x_soc_pll1;
+ struct clk *sysclks = pll->sysclks;
+
+ pll->flags = PLL_HAS_MUL;
+
+ sysclks[1].flags |= FIXED_DIV_PLL;
+ sysclks[1].div = 1;
+
+ sysclks[2].div = PLLDIV2;
+
+ sysclks[3].flags |= FIXED_DIV_PLL;
+ sysclks[3].div = 2;
+
+ sysclks[4].flags |= FIXED_DIV_PLL;
+ sysclks[4].div = 3;
+
+ sysclks[5].div = PLLDIV5;
+
+ sysclks[6].flags |= FIXED_DIV_PLL;
+ sysclks[6].div = 64;
+
+ sysclks[7].flags |= FIXED_DIV_PLL;
+ sysclks[7].div = 6;
+
+ sysclks[8].div = PLLDIV8;
+
+ sysclks[9].flags |= FIXED_DIV_PLL;
+ sysclks[9].div = 12;
+
+ sysclks[10].flags |= FIXED_DIV_PLL;
+ sysclks[10].div = 3;
+
+ sysclks[11].flags |= FIXED_DIV_PLL;
+ sysclks[11].div = 6;
+
+ c6x_core_clk.parent = &sysclks[0];
+ c6x_i2c_clk.parent = &sysclks[7];
+
+ c6x_clks_init(c6678_clks);
+}
+#endif /* CONFIG_SOC_TMS320C6678 */
+
static struct of_device_id c6x_clkc_match[] __initdata = {
#ifdef CONFIG_SOC_TMS320C6455
{ .compatible = "ti,c6455-pll", .data = c6455_setup_clocks },
@@ -348,6 +410,9 @@ static struct of_device_id c6x_clkc_match[] __initdata = {
#ifdef CONFIG_SOC_TMS320C6474
{ .compatible = "ti,c6474-pll", .data = c6474_setup_clocks },
#endif
+#ifdef CONFIG_SOC_TMS320C6678
+ { .compatible = "ti,c6678-pll", .data = c6678_setup_clocks },
+#endif
{ .compatible = "ti,c64x+pll" },
{}
};
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index bc0cfdad1cbc..5b1ee82f63c5 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -6,11 +6,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *b)
{
}
-char * __devinit pcibios_setup(char *str)
-{
- return NULL;
-}
-
void pcibios_set_master(struct pci_dev *dev)
{
u8 lat;
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index 6b0b82ff4419..d04ed14bbf0c 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -268,7 +268,7 @@ static void __init pci_fixup_umc_ide(struct pci_dev *d)
d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
}
-static void __init pci_fixup_ide_bases(struct pci_dev *d)
+static void __devinit pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
@@ -287,7 +287,7 @@ static void __init pci_fixup_ide_bases(struct pci_dev *d)
}
}
-static void __init pci_fixup_ide_trash(struct pci_dev *d)
+static void __devinit pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h
index a09230a08e02..62ef17676b40 100644
--- a/arch/h8300/include/asm/pgtable.h
+++ b/arch/h8300/include/asm/pgtable.h
@@ -70,4 +70,7 @@ extern int is_in_rom(unsigned long);
#define VMALLOC_END 0xffffffff
#define arch_enter_lazy_cpu_mode() do {} while (0)
+
+#include <asm-generic/pgtable.h>
+
#endif /* _H8300_PGTABLE_H */
diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h
index 356068cd0879..8725d1ad4272 100644
--- a/arch/h8300/include/asm/uaccess.h
+++ b/arch/h8300/include/asm/uaccess.h
@@ -100,7 +100,6 @@ extern int __put_user_bad(void);
break; \
default: \
__gu_err = __get_user_bad(); \
- __gu_val = 0; \
break; \
} \
(x) = __gu_val; \
@@ -159,4 +158,6 @@ clear_user(void *to, unsigned long n)
return 0;
}
+#define __clear_user clear_user
+
#endif /* _H8300_UACCESS_H */
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index fca10378701b..5adaadaf9218 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -447,7 +447,7 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-statis void do_signal(struct pt_regs *regs)
+static void do_signal(struct pt_regs *regs)
{
siginfo_t info;
int signr;
diff --git a/arch/h8300/kernel/time.c b/arch/h8300/kernel/time.c
index 32263a138aa6..e0f74191d553 100644
--- a/arch/h8300/kernel/time.c
+++ b/arch/h8300/kernel/time.c
@@ -27,6 +27,7 @@
#include <linux/profile.h>
#include <asm/io.h>
+#include <asm/irq_regs.h>
#include <asm/timer.h>
#define TICK_SIZE (tick_nsec / 1000)
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index f7264621e58d..149fbefc1a4d 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -180,9 +180,7 @@ void __cpuinit start_secondary(void)
notify_cpu_starting(cpu);
- ipi_call_lock();
set_cpu_online(cpu, true);
- ipi_call_unlock();
local_irq_enable();
diff --git a/arch/ia64/include/asm/iommu.h b/arch/ia64/include/asm/iommu.h
index b6a809fa2995..105c93b00b1b 100644
--- a/arch/ia64/include/asm/iommu.h
+++ b/arch/ia64/include/asm/iommu.h
@@ -11,12 +11,10 @@ extern void no_iommu_init(void);
extern int force_iommu, no_iommu;
extern int iommu_pass_through;
extern int iommu_detected;
-extern int iommu_group_mf;
#else
#define iommu_pass_through (0)
#define no_iommu (1)
#define iommu_detected (0)
-#define iommu_group_mf (0)
#endif
extern void iommu_dma_init(void);
extern void machvec_init(const char *name);
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index b9f82c84f093..ec6c6b301238 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -26,6 +26,7 @@
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_IOAPIC
+#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_DEVICE_ASSIGNMENT
/* Architectural interrupt line count. */
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 7f4a0ed24152..5b7791dd3965 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -12,7 +12,7 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(strlen);
-#include<asm/pgtable.h>
+#include <asm/pgtable.h>
EXPORT_SYMBOL_GPL(empty_zero_page);
#include <asm/checksum.h>
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index 7cdc89b2483c..1ddcfe5ef353 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -32,7 +32,6 @@ int force_iommu __read_mostly;
#endif
int iommu_pass_through;
-int iommu_group_mf;
/* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 1113b8aba07f..963d2db53bfa 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -382,7 +382,6 @@ smp_callin (void)
set_numa_node(cpu_to_node_map[cpuid]);
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
- ipi_call_lock_irq();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
@@ -390,7 +389,6 @@ smp_callin (void)
set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
- ipi_call_unlock_irq();
smp_setup_percpu_timer();
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index 9806e55f91be..df5351e3eed7 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -19,6 +19,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
+ depends on BROKEN
depends on HAVE_KVM && MODULES && EXPERIMENTAL
# for device assignment:
depends on PCI
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f0b9cac82414..176a12cd56de 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -20,9 +20,9 @@
*/
-#include<linux/kernel.h>
-#include<linux/module.h>
-#include<asm/fpswa.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/fpswa.h>
#include "vcpu.h"
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02d29c2a132a..8443daf4f515 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -72,6 +72,10 @@ mapped_kernel_page_is_present (unsigned long address)
return pte_present(pte);
}
+# define VM_READ_BIT 0
+# define VM_WRITE_BIT 1
+# define VM_EXEC_BIT 2
+
void __kprobes
ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
{
@@ -81,6 +85,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
struct siginfo si;
unsigned long mask;
int fault;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
+
+ flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
/* mmap_sem is performance critical.... */
prefetchw(&mm->mmap_sem);
@@ -109,6 +119,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (notify_page_fault(regs, TRAP_BRKPT))
return;
+retry:
down_read(&mm->mmap_sem);
vma = find_vma_prev(mm, address, &prev_vma);
@@ -130,10 +141,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/* OK, we've got a good vm_area for this memory area. Check the access permissions: */
-# define VM_READ_BIT 0
-# define VM_WRITE_BIT 1
-# define VM_EXEC_BIT 2
-
# if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
|| (1 << VM_EXEC_BIT) != VM_EXEC)
# error File is out of sync with <linux/mm.h>. Please update.
@@ -142,9 +149,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
- mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
- | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
-
if ((vma->vm_flags & mask) != mask)
goto bad_area;
@@ -153,7 +157,11 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
- fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We ran out of memory, or some other thing happened
@@ -168,10 +176,24 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
}
BUG();
}
- if (fault & VM_FAULT_MAJOR)
- current->maj_flt++;
- else
- current->min_flt++;
+
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (fault & VM_FAULT_MAJOR)
+ current->maj_flt++;
+ else
+ current->min_flt++;
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+ /* No need to up_read(&mm->mmap_sem) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
+ }
+ }
+
up_read(&mm->mmap_sem);
return;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 524df4295c90..81acc7a57f3e 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -351,6 +351,8 @@ pci_acpi_scan_root(struct acpi_pci_root *root)
#endif
INIT_LIST_HEAD(&info.resources);
+ /* insert busn resource at first */
+ pci_add_resource(&info.resources, &root->secondary);
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
&windows);
if (windows) {
@@ -384,7 +386,7 @@ pci_acpi_scan_root(struct acpi_pci_root *root)
return NULL;
}
- pbus->subordinate = pci_scan_child_bus(pbus);
+ pci_scan_child_bus(pbus);
return pbus;
out3:
@@ -496,15 +498,6 @@ pcibios_align_resource (void *data, const struct resource *res,
return res->start;
}
-/*
- * PCI BIOS setup, always defaults to SAL interface
- */
-char * __init
-pcibios_setup (char *str)
-{
- return str;
-}
-
int
pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile
index 177716b1d613..01729c2979ba 100644
--- a/arch/m32r/boot/compressed/Makefile
+++ b/arch/m32r/boot/compressed/Makefile
@@ -43,9 +43,9 @@ endif
OBJCOPYFLAGS += -R .empty_zero_page
-suffix_$(CONFIG_KERNEL_GZIP) = gz
-suffix_$(CONFIG_KERNEL_BZIP2) = bz2
-suffix_$(CONFIG_KERNEL_LZMA) = lzma
+suffix-$(CONFIG_KERNEL_GZIP) = gz
+suffix-$(CONFIG_KERNEL_BZIP2) = bz2
+suffix-$(CONFIG_KERNEL_LZMA) = lzma
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
$(call if_changed,ld)
diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c
index 370d60881977..28a09529f206 100644
--- a/arch/m32r/boot/compressed/misc.c
+++ b/arch/m32r/boot/compressed/misc.c
@@ -28,7 +28,7 @@ static unsigned long free_mem_ptr;
static unsigned long free_mem_end_ptr;
#ifdef CONFIG_KERNEL_BZIP2
-static void *memset(void *s, int c, size_t n)
+void *memset(void *s, int c, size_t n)
{
char *ss = s;
@@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n)
#endif
#ifdef CONFIG_KERNEL_GZIP
+void *memcpy(void *dest, const void *src, size_t n)
+{
+ char *d = dest;
+ const char *s = src;
+ while (n--)
+ *d++ = *s++;
+
+ return dest;
+}
+
#define BOOT_HEAP_SIZE 0x10000
#include "../../../../lib/decompress_inflate.c"
#endif
diff --git a/arch/m32r/include/asm/ptrace.h b/arch/m32r/include/asm/ptrace.h
index 527527584dd0..4313aa62b51b 100644
--- a/arch/m32r/include/asm/ptrace.h
+++ b/arch/m32r/include/asm/ptrace.h
@@ -113,9 +113,6 @@ struct pt_regs {
#define PTRACE_OLDSETOPTIONS 21
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
#ifdef __KERNEL__
#include <asm/m32r.h> /* M32R_PSW_BSM, M32R_PSW_BPM */
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index cf7829a61551..c689b828dfe2 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -79,11 +79,6 @@ static __inline__ int cpu_number_map(int cpu)
return cpu;
}
-static __inline__ unsigned int num_booting_cpus(void)
-{
- return cpumask_weight(&cpu_callout_map);
-}
-
extern void smp_send_timer(void);
extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
diff --git a/arch/m32r/kernel/ptrace.c b/arch/m32r/kernel/ptrace.c
index 4c03361537aa..51f5e9aa4901 100644
--- a/arch/m32r/kernel/ptrace.c
+++ b/arch/m32r/kernel/ptrace.c
@@ -591,17 +591,16 @@ void user_enable_single_step(struct task_struct *child)
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
- return -EIO;
+ return;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
- return -EIO;
+ return;
if (embed_debug_trap(child, next_pc))
- return -EIO;
+ return;
invalidate_cache();
- return 0;
}
void user_disable_single_step(struct task_struct *child)
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index f3fb2c029cfc..d0f60b97bbc5 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -286,7 +286,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
case -ERESTARTNOINTR:
regs->r0 = regs->orig_r0;
if (prev_insn(regs) < 0)
- return -EFAULT;
+ return;
}
}
diff --git a/arch/m68k/Kconfig.bus b/arch/m68k/Kconfig.bus
index 3adb499584fb..ffc0601a2a19 100644
--- a/arch/m68k/Kconfig.bus
+++ b/arch/m68k/Kconfig.bus
@@ -48,6 +48,13 @@ config ISA
config GENERIC_ISA_DMA
def_bool ISA
+config PCI
+ bool "PCI support"
+ depends on M54xx
+ help
+ Enable the PCI bus. Support for the PCI bus hardware built into the
+ ColdFire 547x and 548x processors.
+
source "drivers/pci/Kconfig"
source "drivers/zorro/Kconfig"
diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
index 2b53254ad994..43a9f8f1b8eb 100644
--- a/arch/m68k/Kconfig.cpu
+++ b/arch/m68k/Kconfig.cpu
@@ -23,7 +23,7 @@ config M68KCLASSIC
config COLDFIRE
bool "Coldfire CPU family support"
select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
+ select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_CUSTOM_GPIO_H
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_MULDIV64
@@ -167,6 +167,14 @@ config M5249
help
Motorola ColdFire 5249 processor support.
+config M525x
+ bool "MCF525x"
+ depends on !MMU
+ select COLDFIRE_SW_A7
+ select HAVE_MBAR
+ help
+ Freescale (Motorola) Coldfire 5251/5253 processor support.
+
config M527x
bool
@@ -253,6 +261,14 @@ config M548x
help
Freescale ColdFire 5480/5481/5482/5483/5484/5485 processor support.
+config M5441x
+ bool "MCF5441x"
+ depends on !MMU
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CACHE_CB
+ help
+ Freescale Coldfire 54410/54415/54416/54417/54418 processor support.
+
endif # COLDFIRE
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index b7f2e2d5cd2e..7636751f2f87 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -41,6 +41,7 @@ cpuflags-$(CONFIG_M68030) :=
cpuflags-$(CONFIG_M68020) :=
cpuflags-$(CONFIG_M68360) := -m68332
cpuflags-$(CONFIG_M68000) := -m68000
+cpuflags-$(CONFIG_M5441x) := $(call cc-option,-mcpu=54455,-mcfv4e)
cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200)
cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200)
cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307)
@@ -50,6 +51,7 @@ cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307)
cpuflags-$(CONFIG_M5272) := $(call cc-option,-mcpu=5272,-m5307)
cpuflags-$(CONFIG_M5271) := $(call cc-option,-mcpu=5271,-m5307)
cpuflags-$(CONFIG_M523x) := $(call cc-option,-mcpu=523x,-m5307)
+cpuflags-$(CONFIG_M525x) := $(call cc-option,-mcpu=5253,-m5200)
cpuflags-$(CONFIG_M5249) := $(call cc-option,-mcpu=5249,-m5200)
cpuflags-$(CONFIG_M520x) := $(call cc-option,-mcpu=5208,-m5200)
cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 8104bd874649..fa2c3d681d84 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -16,7 +16,48 @@
#define DCACHE_MAX_ADDR 0
#define DCACHE_SETMASK 0
#endif
+#ifndef CACHE_MODE
+#define CACHE_MODE 0
+#define CACR_ICINVA 0
+#define CACR_DCINVA 0
+#define CACR_BCINVA 0
+#endif
+
+/*
+ * ColdFire architecture has no way to clear individual cache lines, so we
+ * are stuck invalidating all the cache entries when we want a clear operation.
+ */
+static inline void clear_cf_icache(unsigned long start, unsigned long end)
+{
+ __asm__ __volatile__ (
+ "movec %0,%%cacr\n\t"
+ "nop"
+ :
+ : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
+}
+
+static inline void clear_cf_dcache(unsigned long start, unsigned long end)
+{
+ __asm__ __volatile__ (
+ "movec %0,%%cacr\n\t"
+ "nop"
+ :
+ : "r" (CACHE_MODE | CACR_DCINVA));
+}
+static inline void clear_cf_bcache(unsigned long start, unsigned long end)
+{
+ __asm__ __volatile__ (
+ "movec %0,%%cacr\n\t"
+ "nop"
+ :
+ : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
+}
+
+/*
+ * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
+ * The start and end addresses are cache line numbers not memory addresses.
+ */
static inline void flush_cf_icache(unsigned long start, unsigned long end)
{
unsigned long set;
diff --git a/arch/m68k/include/asm/dma.h b/arch/m68k/include/asm/dma.h
index 6fbdfe895104..0ff3fc6a6d9a 100644
--- a/arch/m68k/include/asm/dma.h
+++ b/arch/m68k/include/asm/dma.h
@@ -33,7 +33,9 @@
* Set number of channels of DMA on ColdFire for different implementations.
*/
#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
- defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
+ defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
+ defined(CONFIG_M528x) || defined(CONFIG_M525x)
+
#define MAX_M68K_DMA_CHANNELS 4
#elif defined(CONFIG_M5272)
#define MAX_M68K_DMA_CHANNELS 1
@@ -486,6 +488,10 @@ static __inline__ int get_dma_residue(unsigned int dmanr)
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
+#ifdef CONFIG_PCI
+extern int isa_dma_bridge_buggy;
+#else
#define isa_dma_bridge_buggy (0)
+#endif
#endif /* _M68K_DMA_H */
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h
index 00d0071de4c3..4395ffc51fdb 100644
--- a/arch/m68k/include/asm/gpio.h
+++ b/arch/m68k/include/asm/gpio.h
@@ -17,170 +17,9 @@
#define coldfire_gpio_h
#include <linux/io.h>
-#include <asm-generic/gpio.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-
-/*
- * The Freescale Coldfire family is quite varied in how they implement GPIO.
- * Some parts have 8 bit ports, some have 16bit and some have 32bit; some have
- * only one port, others have multiple ports; some have a single data latch
- * for both input and output, others have a separate pin data register to read
- * input; some require a read-modify-write access to change an output, others
- * have set and clear registers for some of the outputs; Some have all the
- * GPIOs in a single control area, others have some GPIOs implemented in
- * different modules.
- *
- * This implementation attempts accommodate the differences while presenting
- * a generic interface that will optimize to as few instructions as possible.
- */
-#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
- defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
- defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M532x) || defined(CONFIG_M54xx)
-
-/* These parts have GPIO organized by 8 bit ports */
-
-#define MCFGPIO_PORTTYPE u8
-#define MCFGPIO_PORTSIZE 8
-#define mcfgpio_read(port) __raw_readb(port)
-#define mcfgpio_write(data, port) __raw_writeb(data, port)
-
-#elif defined(CONFIG_M5307) || defined(CONFIG_M5407) || defined(CONFIG_M5272)
-
-/* These parts have GPIO organized by 16 bit ports */
-
-#define MCFGPIO_PORTTYPE u16
-#define MCFGPIO_PORTSIZE 16
-#define mcfgpio_read(port) __raw_readw(port)
-#define mcfgpio_write(data, port) __raw_writew(data, port)
-
-#elif defined(CONFIG_M5249)
-
-/* These parts have GPIO organized by 32 bit ports */
-
-#define MCFGPIO_PORTTYPE u32
-#define MCFGPIO_PORTSIZE 32
-#define mcfgpio_read(port) __raw_readl(port)
-#define mcfgpio_write(data, port) __raw_writel(data, port)
-
-#endif
-
-#define mcfgpio_bit(gpio) (1 << ((gpio) % MCFGPIO_PORTSIZE))
-#define mcfgpio_port(gpio) ((gpio) / MCFGPIO_PORTSIZE)
-
-#if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
- defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
-/*
- * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
- * read-modify-write to change an output and a GPIO module which has separate
- * set/clr registers to directly change outputs with a single write access.
- */
-#if defined(CONFIG_M528x)
-/*
- * The 528x also has GPIOs in other modules (GPT, QADC) which use
- * read-modify-write as well as those controlled by the EPORT and GPIO modules.
- */
-#define MCFGPIO_SCR_START 40
-#else
-#define MCFGPIO_SCR_START 8
-#endif
-
-#define MCFGPIO_SETR_PORT(gpio) (MCFGPIO_SETR + \
- mcfgpio_port(gpio - MCFGPIO_SCR_START))
-
-#define MCFGPIO_CLRR_PORT(gpio) (MCFGPIO_CLRR + \
- mcfgpio_port(gpio - MCFGPIO_SCR_START))
-#else
-
-#define MCFGPIO_SCR_START MCFGPIO_PIN_MAX
-/* with MCFGPIO_SCR == MCFGPIO_PIN_MAX, these will be optimized away */
-#define MCFGPIO_SETR_PORT(gpio) 0
-#define MCFGPIO_CLRR_PORT(gpio) 0
-
-#endif
-/*
- * Coldfire specific helper functions
- */
-
-/* return the port pin data register for a gpio */
-static inline u32 __mcf_gpio_ppdr(unsigned gpio)
-{
-#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
- defined(CONFIG_M5307) || defined(CONFIG_M5407)
- return MCFSIM_PADAT;
-#elif defined(CONFIG_M5272)
- if (gpio < 16)
- return MCFSIM_PADAT;
- else if (gpio < 32)
- return MCFSIM_PBDAT;
- else
- return MCFSIM_PCDAT;
-#elif defined(CONFIG_M5249)
- if (gpio < 32)
- return MCFSIM2_GPIOREAD;
- else
- return MCFSIM2_GPIO1READ;
-#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
- defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
- if (gpio < 8)
- return MCFEPORT_EPPDR;
-#if defined(CONFIG_M528x)
- else if (gpio < 16)
- return MCFGPTA_GPTPORT;
- else if (gpio < 24)
- return MCFGPTB_GPTPORT;
- else if (gpio < 32)
- return MCFQADC_PORTQA;
- else if (gpio < 40)
- return MCFQADC_PORTQB;
-#endif
- else
- return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
-#else
- return 0;
-#endif
-}
-
-/* return the port output data register for a gpio */
-static inline u32 __mcf_gpio_podr(unsigned gpio)
-{
-#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
- defined(CONFIG_M5307) || defined(CONFIG_M5407)
- return MCFSIM_PADAT;
-#elif defined(CONFIG_M5272)
- if (gpio < 16)
- return MCFSIM_PADAT;
- else if (gpio < 32)
- return MCFSIM_PBDAT;
- else
- return MCFSIM_PCDAT;
-#elif defined(CONFIG_M5249)
- if (gpio < 32)
- return MCFSIM2_GPIOWRITE;
- else
- return MCFSIM2_GPIO1WRITE;
-#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
- defined(CONFIG_M527x) || defined(CONFIG_M528x) || defined(CONFIG_M532x)
- if (gpio < 8)
- return MCFEPORT_EPDR;
-#if defined(CONFIG_M528x)
- else if (gpio < 16)
- return MCFGPTA_GPTPORT;
- else if (gpio < 24)
- return MCFGPTB_GPTPORT;
- else if (gpio < 32)
- return MCFQADC_PORTQA;
- else if (gpio < 40)
- return MCFQADC_PORTQB;
-#endif
- else
- return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
-#else
- return 0;
-#endif
-}
-
+#include <asm/mcfgpio.h>
/*
* The Generic GPIO functions
*
@@ -191,7 +30,7 @@ static inline u32 __mcf_gpio_podr(unsigned gpio)
static inline int gpio_get_value(unsigned gpio)
{
if (__builtin_constant_p(gpio) && gpio < MCFGPIO_PIN_MAX)
- return mcfgpio_read(__mcf_gpio_ppdr(gpio)) & mcfgpio_bit(gpio);
+ return mcfgpio_read(__mcfgpio_ppdr(gpio)) & mcfgpio_bit(gpio);
else
return __gpio_get_value(gpio);
}
@@ -204,12 +43,12 @@ static inline void gpio_set_value(unsigned gpio, int value)
MCFGPIO_PORTTYPE data;
local_irq_save(flags);
- data = mcfgpio_read(__mcf_gpio_podr(gpio));
+ data = mcfgpio_read(__mcfgpio_podr(gpio));
if (value)
data |= mcfgpio_bit(gpio);
else
data &= ~mcfgpio_bit(gpio);
- mcfgpio_write(data, __mcf_gpio_podr(gpio));
+ mcfgpio_write(data, __mcfgpio_podr(gpio));
local_irq_restore(flags);
} else {
if (value)
@@ -225,8 +64,14 @@ static inline void gpio_set_value(unsigned gpio, int value)
static inline int gpio_to_irq(unsigned gpio)
{
- return (gpio < MCFGPIO_IRQ_MAX) ? gpio + MCFGPIO_IRQ_VECBASE
- : __gpio_to_irq(gpio);
+#if defined(MCFGPIO_IRQ_MIN)
+ if ((gpio >= MCFGPIO_IRQ_MIN) && (gpio < MCFGPIO_IRQ_MAX))
+#else
+ if (gpio < MCFGPIO_IRQ_MAX)
+#endif
+ return gpio + MCFGPIO_IRQ_VECBASE;
+ else
+ return __gpio_to_irq(gpio);
}
static inline int irq_to_gpio(unsigned irq)
diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
index fa4324bcf566..a6686d26fe17 100644
--- a/arch/m68k/include/asm/io_mm.h
+++ b/arch/m68k/include/asm/io_mm.h
@@ -65,7 +65,53 @@
-#ifdef CONFIG_ISA
+#if defined(CONFIG_PCI) && defined(CONFIG_COLDFIRE)
+
+#define HAVE_ARCH_PIO_SIZE
+#define PIO_OFFSET 0
+#define PIO_MASK 0xffff
+#define PIO_RESERVED 0x10000
+
+u8 mcf_pci_inb(u32 addr);
+u16 mcf_pci_inw(u32 addr);
+u32 mcf_pci_inl(u32 addr);
+void mcf_pci_insb(u32 addr, u8 *buf, u32 len);
+void mcf_pci_insw(u32 addr, u16 *buf, u32 len);
+void mcf_pci_insl(u32 addr, u32 *buf, u32 len);
+
+void mcf_pci_outb(u8 v, u32 addr);
+void mcf_pci_outw(u16 v, u32 addr);
+void mcf_pci_outl(u32 v, u32 addr);
+void mcf_pci_outsb(u32 addr, const u8 *buf, u32 len);
+void mcf_pci_outsw(u32 addr, const u16 *buf, u32 len);
+void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len);
+
+#define inb mcf_pci_inb
+#define inb_p mcf_pci_inb
+#define inw mcf_pci_inw
+#define inw_p mcf_pci_inw
+#define inl mcf_pci_inl
+#define inl_p mcf_pci_inl
+#define insb mcf_pci_insb
+#define insw mcf_pci_insw
+#define insl mcf_pci_insl
+
+#define outb mcf_pci_outb
+#define outb_p mcf_pci_outb
+#define outw mcf_pci_outw
+#define outw_p mcf_pci_outw
+#define outl mcf_pci_outl
+#define outl_p mcf_pci_outl
+#define outsb mcf_pci_outsb
+#define outsw mcf_pci_outsw
+#define outsl mcf_pci_outsl
+
+#define readb(addr) in_8(addr)
+#define writeb(v, addr) out_8((addr), (v))
+#define readw(addr) in_le16(addr)
+#define writew(v, addr) out_le16((addr), (v))
+
+#elif defined(CONFIG_ISA)
#if MULTI_ISA == 0
#undef MULTI_ISA
@@ -340,4 +386,6 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int
*/
#define xlate_dev_kmem_ptr(p) p
+#define ioport_map(port, nr) ((void __iomem *)(port))
+
#endif /* _IO_H */
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index 17f2aab9cf97..db3f8ee4a6c6 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -42,6 +42,9 @@
#define MCFINTC1_SIMR (0)
#define MCFINTC1_CIMR (0)
#define MCFINTC1_ICR0 (0)
+#define MCFINTC2_SIMR (0)
+#define MCFINTC2_CIMR (0)
+#define MCFINTC2_ICR0 (0)
#define MCFINT_VECBASE 64
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
@@ -62,6 +65,7 @@
#define MCF_IRQ_FECENTC0 (MCFINT_VECBASE + MCFINT_FECENTC0)
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
+#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
/*
* SDRAM configuration registers.
@@ -186,5 +190,15 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+/*
+ * Power Management.
+ */
+#define MCFPM_WCR 0xfc040013
+#define MCFPM_PPMSR0 0xfc04002c
+#define MCFPM_PPMCR0 0xfc04002d
+#define MCFPM_PPMHR0 0xfc040030
+#define MCFPM_PPMLR0 0xfc040034
+#define MCFPM_LPCR 0xfc0a0007
+
/****************************************************************************/
#endif /* m520xsim_h */
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index 075062d4eecd..91d3abc3f2a5 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -52,6 +52,7 @@
#define MCF_IRQ_FECENTC0 (MCFINT_VECBASE + MCFINT_FECENTC0)
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
+#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
/*
* SDRAM configuration registers.
diff --git a/arch/m68k/include/asm/m525xsim.h b/arch/m68k/include/asm/m525xsim.h
new file mode 100644
index 000000000000..6da24f653902
--- /dev/null
+++ b/arch/m68k/include/asm/m525xsim.h
@@ -0,0 +1,194 @@
+/****************************************************************************/
+
+/*
+ * m525xsim.h -- ColdFire 525x System Integration Module support.
+ *
+ * (C) Copyright 2012, Steven king <sfking@fdwdc.com>
+ * (C) Copyright 2002, Greg Ungerer (gerg@snapgear.com)
+ */
+
+/****************************************************************************/
+#ifndef m525xsim_h
+#define m525xsim_h
+/****************************************************************************/
+
+#define CPU_NAME "COLDFIRE(m525x)"
+#define CPU_INSTR_PER_JIFFY 3
+#define MCF_BUSCLK (MCF_CLK / 2)
+
+#include <asm/m52xxacr.h>
+
+/*
+ * The 525x has a second MBAR region, define its address.
+ */
+#define MCF_MBAR2 0x80000000
+
+/*
+ * Define the 525x SIM register set addresses.
+ */
+#define MCFSIM_RSR 0x00 /* Reset Status reg (r/w) */
+#define MCFSIM_SYPCR 0x01 /* System Protection reg (r/w)*/
+#define MCFSIM_SWIVR 0x02 /* SW Watchdog intr reg (r/w) */
+#define MCFSIM_SWSR 0x03 /* SW Watchdog service (r/w) */
+#define MCFSIM_MPARK 0x0C /* BUS Master Control Reg*/
+#define MCFSIM_IPR 0x40 /* Interrupt Pend reg (r/w) */
+#define MCFSIM_IMR 0x44 /* Interrupt Mask reg (r/w) */
+#define MCFSIM_ICR0 0x4c /* Intr Ctrl reg 0 (r/w) */
+#define MCFSIM_ICR1 0x4d /* Intr Ctrl reg 1 (r/w) */
+#define MCFSIM_ICR2 0x4e /* Intr Ctrl reg 2 (r/w) */
+#define MCFSIM_ICR3 0x4f /* Intr Ctrl reg 3 (r/w) */
+#define MCFSIM_ICR4 0x50 /* Intr Ctrl reg 4 (r/w) */
+#define MCFSIM_ICR5 0x51 /* Intr Ctrl reg 5 (r/w) */
+#define MCFSIM_ICR6 0x52 /* Intr Ctrl reg 6 (r/w) */
+#define MCFSIM_ICR7 0x53 /* Intr Ctrl reg 7 (r/w) */
+#define MCFSIM_ICR8 0x54 /* Intr Ctrl reg 8 (r/w) */
+#define MCFSIM_ICR9 0x55 /* Intr Ctrl reg 9 (r/w) */
+#define MCFSIM_ICR10 0x56 /* Intr Ctrl reg 10 (r/w) */
+#define MCFSIM_ICR11 0x57 /* Intr Ctrl reg 11 (r/w) */
+
+#define MCFSIM_CSAR0 0x80 /* CS 0 Address 0 reg (r/w) */
+#define MCFSIM_CSMR0 0x84 /* CS 0 Mask 0 reg (r/w) */
+#define MCFSIM_CSCR0 0x8a /* CS 0 Control reg (r/w) */
+#define MCFSIM_CSAR1 0x8c /* CS 1 Address reg (r/w) */
+#define MCFSIM_CSMR1 0x90 /* CS 1 Mask reg (r/w) */
+#define MCFSIM_CSCR1 0x96 /* CS 1 Control reg (r/w) */
+#define MCFSIM_CSAR2 0x98 /* CS 2 Address reg (r/w) */
+#define MCFSIM_CSMR2 0x9c /* CS 2 Mask reg (r/w) */
+#define MCFSIM_CSCR2 0xa2 /* CS 2 Control reg (r/w) */
+#define MCFSIM_CSAR3 0xa4 /* CS 3 Address reg (r/w) */
+#define MCFSIM_CSMR3 0xa8 /* CS 3 Mask reg (r/w) */
+#define MCFSIM_CSCR3 0xae /* CS 3 Control reg (r/w) */
+#define MCFSIM_CSAR4 0xb0 /* CS 4 Address reg (r/w) */
+#define MCFSIM_CSMR4 0xb4 /* CS 4 Mask reg (r/w) */
+#define MCFSIM_CSCR4 0xba /* CS 4 Control reg (r/w) */
+
+#define MCFSIM_DCR (MCF_MBAR + 0x100) /* DRAM Control */
+#define MCFSIM_DACR0 (MCF_MBAR + 0x108) /* DRAM 0 Addr/Ctrl */
+#define MCFSIM_DMR0 (MCF_MBAR + 0x10c) /* DRAM 0 Mask */
+
+/*
+ * Secondary Interrupt Controller (in MBAR2)
+*/
+#define MCFINTC2_INTBASE (MCF_MBAR2 + 0x168) /* Base Vector Reg */
+#define MCFINTC2_INTPRI1 (MCF_MBAR2 + 0x140) /* 0-7 priority */
+#define MCFINTC2_INTPRI2 (MCF_MBAR2 + 0x144) /* 8-15 priority */
+#define MCFINTC2_INTPRI3 (MCF_MBAR2 + 0x148) /* 16-23 priority */
+#define MCFINTC2_INTPRI4 (MCF_MBAR2 + 0x14c) /* 24-31 priority */
+#define MCFINTC2_INTPRI5 (MCF_MBAR2 + 0x150) /* 32-39 priority */
+#define MCFINTC2_INTPRI6 (MCF_MBAR2 + 0x154) /* 40-47 priority */
+#define MCFINTC2_INTPRI7 (MCF_MBAR2 + 0x158) /* 48-55 priority */
+#define MCFINTC2_INTPRI8 (MCF_MBAR2 + 0x15c) /* 56-63 priority */
+
+#define MCFINTC2_INTPRI_REG(i) (MCFINTC2_INTPRI1 + \
+ ((((i) - MCFINTC2_VECBASE) / 8) * 4))
+#define MCFINTC2_INTPRI_BITS(b, i) ((b) << (((i) % 8) * 4))
+
+/*
+ * Timer module.
+ */
+#define MCFTIMER_BASE1 (MCF_MBAR + 0x140) /* Base of TIMER1 */
+#define MCFTIMER_BASE2 (MCF_MBAR + 0x180) /* Base of TIMER2 */
+
+/*
+ * UART module.
+ */
+#define MCFUART_BASE0 (MCF_MBAR + 0x1c0) /* Base address UART0 */
+#define MCFUART_BASE1 (MCF_MBAR + 0x200) /* Base address UART1 */
+
+/*
+ * QSPI module.
+ */
+#define MCFQSPI_BASE (MCF_MBAR + 0x300) /* Base address QSPI */
+#define MCFQSPI_SIZE 0x40 /* Register set size */
+
+
+#define MCFQSPI_CS0 15
+#define MCFQSPI_CS1 16
+#define MCFQSPI_CS2 24
+#define MCFQSPI_CS3 28
+
+/*
+ * I2C module.
+ */
+#define MCFI2C_BASE0 (MCF_MBAR + 0x280) /* Base addreess I2C0 */
+#define MCFI2C_SIZE0 0x20 /* Register set size */
+
+#define MCFI2C_BASE1 (MCF_MBAR2 + 0x440) /* Base addreess I2C1 */
+#define MCFI2C_SIZE1 0x20 /* Register set size */
+/*
+ * DMA unit base addresses.
+ */
+#define MCFDMA_BASE0 (MCF_MBAR + 0x300) /* Base address DMA 0 */
+#define MCFDMA_BASE1 (MCF_MBAR + 0x340) /* Base address DMA 1 */
+#define MCFDMA_BASE2 (MCF_MBAR + 0x380) /* Base address DMA 2 */
+#define MCFDMA_BASE3 (MCF_MBAR + 0x3C0) /* Base address DMA 3 */
+
+/*
+ * Some symbol defines for the above...
+ */
+#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
+#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
+#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
+#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
+#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
+#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
+#define MCFSIM_DMA1ICR MCFSIM_ICR7 /* DMA 1 ICR */
+#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
+#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
+#define MCFSIM_QSPIICR MCFSIM_ICR10 /* QSPI ICR */
+
+/*
+ * Define system peripheral IRQ usage.
+ */
+#define MCF_IRQ_QSPI 28 /* QSPI, Level 4 */
+#define MCF_IRQ_I2C0 29
+#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
+#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
+
+#define MCF_IRQ_UART0 73 /* UART0 */
+#define MCF_IRQ_UART1 74 /* UART1 */
+
+/*
+ * Define the base interrupt for the second interrupt controller.
+ * We set it to 128, out of the way of the base interrupts, and plenty
+ * of room for its 64 interrupts.
+ */
+#define MCFINTC2_VECBASE 128
+
+#define MCF_IRQ_GPIO0 (MCFINTC2_VECBASE + 32)
+#define MCF_IRQ_GPIO1 (MCFINTC2_VECBASE + 33)
+#define MCF_IRQ_GPIO2 (MCFINTC2_VECBASE + 34)
+#define MCF_IRQ_GPIO3 (MCFINTC2_VECBASE + 35)
+#define MCF_IRQ_GPIO4 (MCFINTC2_VECBASE + 36)
+#define MCF_IRQ_GPIO5 (MCFINTC2_VECBASE + 37)
+#define MCF_IRQ_GPIO6 (MCFINTC2_VECBASE + 38)
+
+#define MCF_IRQ_USBWUP (MCFINTC2_VECBASE + 40)
+#define MCF_IRQ_I2C1 (MCFINTC2_VECBASE + 62)
+
+/*
+ * General purpose IO registers (in MBAR2).
+ */
+#define MCFSIM2_GPIOREAD (MCF_MBAR2 + 0x000) /* GPIO read values */
+#define MCFSIM2_GPIOWRITE (MCF_MBAR2 + 0x004) /* GPIO write values */
+#define MCFSIM2_GPIOENABLE (MCF_MBAR2 + 0x008) /* GPIO enabled */
+#define MCFSIM2_GPIOFUNC (MCF_MBAR2 + 0x00C) /* GPIO function */
+#define MCFSIM2_GPIO1READ (MCF_MBAR2 + 0x0B0) /* GPIO1 read values */
+#define MCFSIM2_GPIO1WRITE (MCF_MBAR2 + 0x0B4) /* GPIO1 write values */
+#define MCFSIM2_GPIO1ENABLE (MCF_MBAR2 + 0x0B8) /* GPIO1 enabled */
+#define MCFSIM2_GPIO1FUNC (MCF_MBAR2 + 0x0BC) /* GPIO1 function */
+
+#define MCFSIM2_GPIOINTSTAT (MCF_MBAR2 + 0xc0) /* GPIO intr status */
+#define MCFSIM2_GPIOINTCLEAR (MCF_MBAR2 + 0xc0) /* GPIO intr clear */
+#define MCFSIM2_GPIOINTENABLE (MCF_MBAR2 + 0xc4) /* GPIO intr enable */
+
+/*
+ * Generic GPIO support
+ */
+#define MCFGPIO_PIN_MAX 64
+#define MCFGPIO_IRQ_MAX 7
+#define MCFGPIO_IRQ_VECBASE MCF_IRQ_GPIO0
+
+/****************************************************************************/
+#endif /* m525xsim_h */
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 83db8106f50a..71aa5104d3d6 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -60,6 +60,7 @@
#define MCF_IRQ_FECENTC1 (MCFINT2_VECBASE + MCFINT2_FECENTC1)
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
+#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
/*
* SDRAM configuration registers.
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index 497c31c803ff..4acb3c0a642e 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -52,7 +52,7 @@
#define MCF_IRQ_FECENTC0 (MCFINT_VECBASE + MCFINT_FECENTC0)
#define MCF_IRQ_QSPI (MCFINT_VECBASE + MCFINT_QSPI)
-
+#define MCF_IRQ_PIT1 (MCFINT_VECBASE + MCFINT_PIT1)
/*
* SDRAM configuration registers.
*/
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index 29b66e21413a..5ca7b298c6eb 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -82,6 +82,9 @@
#define MCFINTC1_SIMR 0xFC04C01C
#define MCFINTC1_CIMR 0xFC04C01D
#define MCFINTC1_ICR0 0xFC04C040
+#define MCFINTC2_SIMR (0)
+#define MCFINTC2_CIMR (0)
+#define MCFINTC2_ICR0 (0)
#define MCFSIM_ICR_TIMER1 (0xFC048040+32)
#define MCFSIM_ICR_TIMER2 (0xFC048040+33)
@@ -135,6 +138,20 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+
+/*
+ * Power Management
+ */
+#define MCFPM_WCR 0xfc040013
+#define MCFPM_PPMSR0 0xfc04002c
+#define MCFPM_PPMCR0 0xfc04002d
+#define MCFPM_PPMSR1 0xfc04002e
+#define MCFPM_PPMCR1 0xfc04002f
+#define MCFPM_PPMHR0 0xfc040030
+#define MCFPM_PPMLR0 0xfc040034
+#define MCFPM_PPMHR1 0xfc040038
+#define MCFPM_LPCR 0xec090007
+
/*********************************************************************
*
* Inter-IC (I2C) Module
diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h
new file mode 100644
index 000000000000..cc798ab9524b
--- /dev/null
+++ b/arch/m68k/include/asm/m5441xsim.h
@@ -0,0 +1,276 @@
+/*
+ * m5441xsim.h -- Coldfire 5441x register definitions
+ *
+ * (C) Copyright 2012, Steven King <sfking@fdwdc.com>
+*/
+
+#ifndef m5441xsim_h
+#define m5441xsim_h
+
+#define CPU_NAME "COLDFIRE(m5441x)"
+#define CPU_INSTR_PER_JIFFY 2
+#define MCF_BUSCLK (MCF_CLK / 2)
+
+#include <asm/m54xxacr.h>
+
+/*
+ * Reset Controller Module.
+ */
+
+#define MCF_RCR 0xec090000
+#define MCF_RSR 0xec090001
+
+#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
+#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
+
+/*
+ * Interrupt Controller Modules.
+ */
+/* the 5441x have 3 interrupt controllers, each control 64 interrupts */
+#define MCFINT_VECBASE 64
+#define MCFINT0_VECBASE MCFINT_VECBASE
+#define MCFINT1_VECBASE (MCFINT0_VECBASE + 64)
+#define MCFINT2_VECBASE (MCFINT1_VECBASE + 64)
+
+/* interrupt controller 0 */
+#define MCFINTC0_SIMR 0xfc04801c
+#define MCFINTC0_CIMR 0xfc04801d
+#define MCFINTC0_ICR0 0xfc048040
+/* interrupt controller 1 */
+#define MCFINTC1_SIMR 0xfc04c01c
+#define MCFINTC1_CIMR 0xfc04c01d
+#define MCFINTC1_ICR0 0xfc04c040
+/* interrupt controller 2 */
+#define MCFINTC2_SIMR 0xfc05001c
+#define MCFINTC2_CIMR 0xfc05001d
+#define MCFINTC2_ICR0 0xfc050040
+
+/* on interrupt controller 0 */
+#define MCFINT0_EPORT0 1
+#define MCFINT0_UART0 26
+#define MCFINT0_UART1 27
+#define MCFINT0_UART2 28
+#define MCFINT0_UART3 29
+#define MCFINT0_I2C0 30
+#define MCFINT0_DSPI0 31
+
+#define MCFINT0_TIMER0 32
+#define MCFINT0_TIMER1 33
+#define MCFINT0_TIMER2 34
+#define MCFINT0_TIMER3 35
+
+#define MCFINT0_FECRX0 36
+#define MCFINT0_FECTX0 40
+#define MCFINT0_FECENTC0 42
+
+#define MCFINT0_FECRX1 49
+#define MCFINT0_FECTX1 53
+#define MCFINT0_FECENTC1 55
+
+/* on interrupt controller 1 */
+#define MCFINT1_UART4 48
+#define MCFINT1_UART5 49
+#define MCFINT1_UART6 50
+#define MCFINT1_UART7 51
+#define MCFINT1_UART8 52
+#define MCFINT1_UART9 53
+#define MCFINT1_DSPI1 54
+#define MCFINT1_DSPI2 55
+#define MCFINT1_DSPI3 56
+#define MCFINT1_I2C1 57
+#define MCFINT1_I2C2 58
+#define MCFINT1_I2C3 59
+#define MCFINT1_I2C4 60
+#define MCFINT1_I2C5 61
+
+/* on interrupt controller 2 */
+#define MCFINT2_PIT0 13
+#define MCFINT2_PIT1 14
+#define MCFINT2_PIT2 15
+#define MCFINT2_PIT3 16
+#define MCFINT2_RTC 26
+
+/*
+ * PIT timer module.
+ */
+#define MCFPIT_BASE0 0xFC080000 /* Base address of TIMER0 */
+#define MCFPIT_BASE1 0xFC084000 /* Base address of TIMER1 */
+#define MCFPIT_BASE2 0xFC088000 /* Base address of TIMER2 */
+#define MCFPIT_BASE3 0xFC08C000 /* Base address of TIMER3 */
+
+
+#define MCF_IRQ_PIT1 (MCFINT2_VECBASE + MCFINT2_PIT1)
+
+/*
+ * Power Management
+ */
+#define MCFPM_WCR 0xfc040013
+#define MCFPM_PPMSR0 0xfc04002c
+#define MCFPM_PPMCR0 0xfc04002d
+#define MCFPM_PPMSR1 0xfc04002e
+#define MCFPM_PPMCR1 0xfc04002f
+#define MCFPM_PPMHR0 0xfc040030
+#define MCFPM_PPMLR0 0xfc040034
+#define MCFPM_PPMHR1 0xfc040038
+#define MCFPM_PPMLR1 0xfc04003c
+#define MCFPM_LPCR 0xec090007
+/*
+ * UART module.
+ */
+#define MCFUART_BASE0 0xfc060000 /* Base address of UART0 */
+#define MCFUART_BASE1 0xfc064000 /* Base address of UART1 */
+#define MCFUART_BASE2 0xfc068000 /* Base address of UART2 */
+#define MCFUART_BASE3 0xfc06c000 /* Base address of UART3 */
+#define MCFUART_BASE4 0xec060000 /* Base address of UART4 */
+#define MCFUART_BASE5 0xec064000 /* Base address of UART5 */
+#define MCFUART_BASE6 0xec068000 /* Base address of UART6 */
+#define MCFUART_BASE7 0xec06c000 /* Base address of UART7 */
+#define MCFUART_BASE8 0xec070000 /* Base address of UART8 */
+#define MCFUART_BASE9 0xec074000 /* Base address of UART9 */
+
+#define MCF_IRQ_UART0 (MCFINT0_VECBASE + MCFINT0_UART0)
+#define MCF_IRQ_UART1 (MCFINT0_VECBASE + MCFINT0_UART1)
+#define MCF_IRQ_UART2 (MCFINT0_VECBASE + MCFINT0_UART2)
+#define MCF_IRQ_UART3 (MCFINT0_VECBASE + MCFINT0_UART3)
+#define MCF_IRQ_UART4 (MCFINT1_VECBASE + MCFINT1_UART4)
+#define MCF_IRQ_UART5 (MCFINT1_VECBASE + MCFINT1_UART5)
+#define MCF_IRQ_UART6 (MCFINT1_VECBASE + MCFINT1_UART6)
+#define MCF_IRQ_UART7 (MCFINT1_VECBASE + MCFINT1_UART7)
+#define MCF_IRQ_UART8 (MCFINT1_VECBASE + MCFINT1_UART8)
+#define MCF_IRQ_UART9 (MCFINT1_VECBASE + MCFINT1_UART9)
+/*
+ * FEC modules.
+ */
+#define MCFFEC_BASE0 0xfc0d4000
+#define MCFFEC_SIZE0 0x800
+#define MCF_IRQ_FECRX0 (MCFINT0_VECBASE + MCFINT0_FECRX0)
+#define MCF_IRQ_FECTX0 (MCFINT0_VECBASE + MCFINT0_FECTX0)
+#define MCF_IRQ_FECENTC0 (MCFINT0_VECBASE + MCFINT0_FECENTC0)
+
+#define MCFFEC_BASE1 0xfc0d8000
+#define MCFFEC_SIZE1 0x800
+#define MCF_IRQ_FECRX1 (MCFINT0_VECBASE + MCFINT0_FECRX1)
+#define MCF_IRQ_FECTX1 (MCFINT0_VECBASE + MCFINT0_FECTX1)
+#define MCF_IRQ_FECENTC1 (MCFINT0_VECBASE + MCFINT0_FECENTC1)
+/*
+ * I2C modules.
+ */
+#define MCFI2C_BASE0 0xfc058000
+#define MCFI2C_SIZE0 0x20
+#define MCFI2C_BASE1 0xfc038000
+#define MCFI2C_SIZE1 0x20
+#define MCFI2C_BASE2 0xec010000
+#define MCFI2C_SIZE2 0x20
+#define MCFI2C_BASE3 0xec014000
+#define MCFI2C_SIZE3 0x20
+#define MCFI2C_BASE4 0xec018000
+#define MCFI2C_SIZE4 0x20
+#define MCFI2C_BASE5 0xec01c000
+#define MCFI2C_SIZE5 0x20
+
+#define MCF_IRQ_I2C0 (MCFINT0_VECBASE + MCFINT0_I2C0)
+#define MCF_IRQ_I2C1 (MCFINT1_VECBASE + MCFINT1_I2C1)
+#define MCF_IRQ_I2C2 (MCFINT1_VECBASE + MCFINT1_I2C2)
+#define MCF_IRQ_I2C3 (MCFINT1_VECBASE + MCFINT1_I2C3)
+#define MCF_IRQ_I2C4 (MCFINT1_VECBASE + MCFINT1_I2C4)
+#define MCF_IRQ_I2C5 (MCFINT1_VECBASE + MCFINT1_I2C5)
+/*
+ * EPORT Module.
+ */
+#define MCFEPORT_EPPAR 0xfc090000
+#define MCFEPORT_EPIER 0xfc090003
+#define MCFEPORT_EPFR 0xfc090006
+/*
+ * RTC Module.
+ */
+#define MCFRTC_BASE 0xfc0a8000
+#define MCFRTC_SIZE (0xfc0a8840 - 0xfc0a8000)
+#define MCF_IRQ_RTC (MCFINT2_VECBASE + MCFINT2_RTC)
+
+/*
+ * GPIO Module.
+ */
+#define MCFGPIO_PODR_A 0xec094000
+#define MCFGPIO_PODR_B 0xec094001
+#define MCFGPIO_PODR_C 0xec094002
+#define MCFGPIO_PODR_D 0xec094003
+#define MCFGPIO_PODR_E 0xec094004
+#define MCFGPIO_PODR_F 0xec094005
+#define MCFGPIO_PODR_G 0xec094006
+#define MCFGPIO_PODR_H 0xec094007
+#define MCFGPIO_PODR_I 0xec094008
+#define MCFGPIO_PODR_J 0xec094009
+#define MCFGPIO_PODR_K 0xec09400a
+
+#define MCFGPIO_PDDR_A 0xec09400c
+#define MCFGPIO_PDDR_B 0xec09400d
+#define MCFGPIO_PDDR_C 0xec09400e
+#define MCFGPIO_PDDR_D 0xec09400f
+#define MCFGPIO_PDDR_E 0xec094010
+#define MCFGPIO_PDDR_F 0xec094011
+#define MCFGPIO_PDDR_G 0xec094012
+#define MCFGPIO_PDDR_H 0xec094013
+#define MCFGPIO_PDDR_I 0xec094014
+#define MCFGPIO_PDDR_J 0xec094015
+#define MCFGPIO_PDDR_K 0xec094016
+
+#define MCFGPIO_PPDSDR_A 0xec094018
+#define MCFGPIO_PPDSDR_B 0xec094019
+#define MCFGPIO_PPDSDR_C 0xec09401a
+#define MCFGPIO_PPDSDR_D 0xec09401b
+#define MCFGPIO_PPDSDR_E 0xec09401c
+#define MCFGPIO_PPDSDR_F 0xec09401d
+#define MCFGPIO_PPDSDR_G 0xec09401e
+#define MCFGPIO_PPDSDR_H 0xec09401f
+#define MCFGPIO_PPDSDR_I 0xec094020
+#define MCFGPIO_PPDSDR_J 0xec094021
+#define MCFGPIO_PPDSDR_K 0xec094022
+
+#define MCFGPIO_PCLRR_A 0xec094024
+#define MCFGPIO_PCLRR_B 0xec094025
+#define MCFGPIO_PCLRR_C 0xec094026
+#define MCFGPIO_PCLRR_D 0xec094027
+#define MCFGPIO_PCLRR_E 0xec094028
+#define MCFGPIO_PCLRR_F 0xec094029
+#define MCFGPIO_PCLRR_G 0xec09402a
+#define MCFGPIO_PCLRR_H 0xec09402b
+#define MCFGPIO_PCLRR_I 0xec09402c
+#define MCFGPIO_PCLRR_J 0xec09402d
+#define MCFGPIO_PCLRR_K 0xec09402e
+
+#define MCFGPIO_PAR_FBCTL 0xec094048
+#define MCFGPIO_PAR_BE 0xec094049
+#define MCFGPIO_PAR_CS 0xec09404a
+#define MCFGPIO_PAR_CANI2C 0xec09404b
+#define MCFGPIO_PAR_IRQ0H 0xec09404c
+#define MCFGPIO_PAR_IRQ0L 0xec09404d
+#define MCFGPIO_PAR_DSPIOWH 0xec09404e
+#define MCFGPIO_PAR_DSPIOWL 0xec09404f
+#define MCFGPIO_PAR_TIMER 0xec094050
+#define MCFGPIO_PAR_UART2 0xec094051
+#define MCFGPIO_PAR_UART1 0xec094052
+#define MCFGPIO_PAR_UART0 0xec094053
+#define MCFGPIO_PAR_SDHCH 0xec094054
+#define MCFGPIO_PAR_SDHCL 0xec094055
+#define MCFGPIO_PAR_SIMP0H 0xec094056
+#define MCFGPIO_PAR_SIMP0L 0xec094057
+#define MCFGPIO_PAR_SSI0H 0xec094058
+#define MCFGPIO_PAR_SSI0L 0xec094059
+#define MCFGPIO_PAR_DEBUGH1 0xec09405a
+#define MCFGPIO_PAR_DEBUGH0 0xec09405b
+#define MCFGPIO_PAR_DEBUGl 0xec09405c
+#define MCFGPIO_PAR_FEC 0xec09405e
+
+/* generalization for generic gpio support */
+#define MCFGPIO_PODR MCFGPIO_PODR_A
+#define MCFGPIO_PDDR MCFGPIO_PDDR_A
+#define MCFGPIO_PPDR MCFGPIO_PPDSDR_A
+#define MCFGPIO_SETR MCFGPIO_PPDSDR_A
+#define MCFGPIO_CLRR MCFGPIO_PCLRR_A
+
+#define MCFGPIO_IRQ_MIN 17
+#define MCFGPIO_IRQ_MAX 24
+#define MCFGPIO_IRQ_VECBASE (MCFINT_VECBASE - MCFGPIO_IRQ_MIN)
+#define MCFGPIO_PIN_MAX 87
+
+#endif /* m5441xsim_h */
diff --git a/arch/m68k/include/asm/m54xxacr.h b/arch/m68k/include/asm/m54xxacr.h
index 47906aafbf67..192bbfeabf70 100644
--- a/arch/m68k/include/asm/m54xxacr.h
+++ b/arch/m68k/include/asm/m54xxacr.h
@@ -55,6 +55,10 @@
#define ICACHE_SIZE 0x8000 /* instruction - 32k */
#define DCACHE_SIZE 0x8000 /* data - 32k */
+#elif defined(CONFIG_M5441x)
+
+#define ICACHE_SIZE 0x2000 /* instruction - 8k */
+#define DCACHE_SIZE 0x2000 /* data - 8k */
#endif
#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
diff --git a/arch/m68k/include/asm/m54xxpci.h b/arch/m68k/include/asm/m54xxpci.h
new file mode 100644
index 000000000000..6fbf54f72f2e
--- /dev/null
+++ b/arch/m68k/include/asm/m54xxpci.h
@@ -0,0 +1,138 @@
+/****************************************************************************/
+
+/*
+ * m54xxpci.h -- ColdFire 547x and 548x PCI bus support
+ *
+ * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/****************************************************************************/
+#ifndef M54XXPCI_H
+#define M54XXPCI_H
+/****************************************************************************/
+
+/*
+ * The core set of PCI support registers are mapped into the MBAR region.
+ */
+#define PCIIDR (CONFIG_MBAR + 0xb00) /* PCI device/vendor ID */
+#define PCISCR (CONFIG_MBAR + 0xb04) /* PCI status/command */
+#define PCICCRIR (CONFIG_MBAR + 0xb08) /* PCI class/revision */
+#define PCICR1 (CONFIG_MBAR + 0xb0c) /* PCI configuration 1 */
+#define PCIBAR0 (CONFIG_MBAR + 0xb10) /* PCI base address 0 */
+#define PCIBAR1 (CONFIG_MBAR + 0xb14) /* PCI base address 1 */
+#define PCICCPR (CONFIG_MBAR + 0xb28) /* PCI cardbus CIS pointer */
+#define PCISID (CONFIG_MBAR + 0xb2c) /* PCI subsystem IDs */
+#define PCIERBAR (CONFIG_MBAR + 0xb30) /* PCI expansion ROM */
+#define PCICPR (CONFIG_MBAR + 0xb34) /* PCI capabilities pointer */
+#define PCICR2 (CONFIG_MBAR + 0xb3c) /* PCI configuration 2 */
+
+#define PCIGSCR (CONFIG_MBAR + 0xb60) /* Global status/control */
+#define PCITBATR0 (CONFIG_MBAR + 0xb64) /* Target base translation 0 */
+#define PCITBATR1 (CONFIG_MBAR + 0xb68) /* Target base translation 1 */
+#define PCITCR (CONFIG_MBAR + 0xb6c) /* Target control */
+#define PCIIW0BTAR (CONFIG_MBAR + 0xb70) /* Initiator window 0 */
+#define PCIIW1BTAR (CONFIG_MBAR + 0xb74) /* Initiator window 1 */
+#define PCIIW2BTAR (CONFIG_MBAR + 0xb78) /* Initiator window 2 */
+#define PCIIWCR (CONFIG_MBAR + 0xb80) /* Initiator window config */
+#define PCIICR (CONFIG_MBAR + 0xb84) /* Initiator control */
+#define PCIISR (CONFIG_MBAR + 0xb88) /* Initiator status */
+#define PCICAR (CONFIG_MBAR + 0xbf8) /* Configuration address */
+
+#define PCITPSR (CONFIG_MBAR + 0x8400) /* TX packet size */
+#define PCITSAR (CONFIG_MBAR + 0x8404) /* TX start address */
+#define PCITTCR (CONFIG_MBAR + 0x8408) /* TX transaction control */
+#define PCITER (CONFIG_MBAR + 0x840c) /* TX enables */
+#define PCITNAR (CONFIG_MBAR + 0x8410) /* TX next address */
+#define PCITLWR (CONFIG_MBAR + 0x8414) /* TX last word */
+#define PCITDCR (CONFIG_MBAR + 0x8418) /* TX done counts */
+#define PCITSR (CONFIG_MBAR + 0x841c) /* TX status */
+#define PCITFDR (CONFIG_MBAR + 0x8440) /* TX FIFO data */
+#define PCITFSR (CONFIG_MBAR + 0x8444) /* TX FIFO status */
+#define PCITFCR (CONFIG_MBAR + 0x8448) /* TX FIFO control */
+#define PCITFAR (CONFIG_MBAR + 0x844c) /* TX FIFO alarm */
+#define PCITFRPR (CONFIG_MBAR + 0x8450) /* TX FIFO read pointer */
+#define PCITFWPR (CONFIG_MBAR + 0x8454) /* TX FIFO write pointer */
+
+#define PCIRPSR (CONFIG_MBAR + 0x8480) /* RX packet size */
+#define PCIRSAR (CONFIG_MBAR + 0x8484) /* RX start address */
+#define PCIRTCR (CONFIG_MBAR + 0x8488) /* RX transaction control */
+#define PCIRER (CONFIG_MBAR + 0x848c) /* RX enables */
+#define PCIRNAR (CONFIG_MBAR + 0x8490) /* RX next address */
+#define PCIRDCR (CONFIG_MBAR + 0x8498) /* RX done counts */
+#define PCIRSR (CONFIG_MBAR + 0x849c) /* RX status */
+#define PCIRFDR (CONFIG_MBAR + 0x84c0) /* RX FIFO data */
+#define PCIRFSR (CONFIG_MBAR + 0x84c4) /* RX FIFO status */
+#define PCIRFCR (CONFIG_MBAR + 0x84c8) /* RX FIFO control */
+#define PCIRFAR (CONFIG_MBAR + 0x84cc) /* RX FIFO alarm */
+#define PCIRFRPR (CONFIG_MBAR + 0x84d0) /* RX FIFO read pointer */
+#define PCIRFWPR (CONFIG_MBAR + 0x84d4) /* RX FIFO write pointer */
+
+#define PACR (CONFIG_MBAR + 0xc00) /* PCI arbiter control */
+#define PASR (COFNIG_MBAR + 0xc04) /* PCI arbiter status */
+
+/*
+ * Definitions for the Global status and control register.
+ */
+#define PCIGSCR_PE 0x20000000 /* Parity error detected */
+#define PCIGSCR_SE 0x10000000 /* System error detected */
+#define PCIGSCR_XCLKBIN 0x07000000 /* XLB2CLKIN mask */
+#define PCIGSCR_PEE 0x00002000 /* Parity error intr enable */
+#define PCIGSCR_SEE 0x00001000 /* System error intr enable */
+#define PCIGSCR_RESET 0x00000001 /* Reset bit */
+
+/*
+ * Bit definitions for the PCICAR configuration address register.
+ */
+#define PCICAR_E 0x80000000 /* Enable config space */
+#define PCICAR_BUSN 16 /* Move bus bits */
+#define PCICAR_DEVFNN 8 /* Move devfn bits */
+#define PCICAR_DWORDN 0 /* Move dword bits */
+
+/*
+ * The initiator windows hold the memory and IO mapping information.
+ * This macro creates the register values from the desired addresses.
+ */
+#define WXBTAR(hostaddr, pciaddr, size) \
+ (((hostaddr) & 0xff000000) | \
+ ((((size) - 1) & 0xff000000) >> 8) | \
+ (((pciaddr) & 0xff000000) >> 16))
+
+#define PCIIWCR_W0_MEM 0x00000000 /* Window 0 is memory */
+#define PCIIWCR_W0_IO 0x08000000 /* Window 0 is IO */
+#define PCIIWCR_W0_MRD 0x00000000 /* Window 0 memory read */
+#define PCIIWCR_W0_MRDL 0x02000000 /* Window 0 memory read line */
+#define PCIIWCR_W0_MRDM 0x04000000 /* Window 0 memory read mult */
+#define PCIIWCR_W0_E 0x01000000 /* Window 0 enable */
+
+#define PCIIWCR_W1_MEM 0x00000000 /* Window 0 is memory */
+#define PCIIWCR_W1_IO 0x00080000 /* Window 0 is IO */
+#define PCIIWCR_W1_MRD 0x00000000 /* Window 0 memory read */
+#define PCIIWCR_W1_MRDL 0x00020000 /* Window 0 memory read line */
+#define PCIIWCR_W1_MRDM 0x00040000 /* Window 0 memory read mult */
+#define PCIIWCR_W1_E 0x00010000 /* Window 0 enable */
+
+/*
+ * Bit definitions for the PCIBATR registers.
+ */
+#define PCITBATR0_E 0x00000001 /* Enable window 0 */
+#define PCITBATR1_E 0x00000001 /* Enable window 1 */
+
+/*
+ * PCI arbiter support definitions and macros.
+ */
+#define PACR_INTMPRI 0x00000001
+#define PACR_EXTMPRI(x) (((x) & 0x1f) << 1)
+#define PACR_INTMINTE 0x00010000
+#define PACR_EXTMINTE(x) (((x) & 0x1f) << 17)
+#define PACR_PKMD 0x40000000
+#define PACR_DS 0x80000000
+
+#define PCICR1_CL(x) ((x) & 0xf) /* Cacheline size field */
+#define PCICR1_LT(x) (((x) & 0xff) << 8) /* Latency timer field */
+
+/****************************************************************************/
+#endif /* M54XXPCI_H */
diff --git a/arch/m68k/include/asm/m54xxsim.h b/arch/m68k/include/asm/m54xxsim.h
index ae56b8848a9d..d3c5e0dbdadf 100644
--- a/arch/m68k/include/asm/m54xxsim.h
+++ b/arch/m68k/include/asm/m54xxsim.h
@@ -81,4 +81,7 @@
#define MCF_PAR_PSC_RTS_RTS (0x30)
#define MCF_PAR_PSC_CANRX (0x40)
+#define MCF_PAR_PCIBG (CONFIG_MBAR + 0xa48) /* PCI bus grant */
+#define MCF_PAR_PCIBR (CONFIG_MBAR + 0xa4a) /* PCI */
+
#endif /* m54xxsim_h */
diff --git a/arch/m68k/include/asm/mcfne.h b/arch/m68k/include/asm/mcf8390.h
index bf638be0958c..a72a20819a54 100644
--- a/arch/m68k/include/asm/mcfne.h
+++ b/arch/m68k/include/asm/mcf8390.h
@@ -1,7 +1,7 @@
/****************************************************************************/
/*
- * mcfne.h -- NE2000 in ColdFire eval boards.
+ * mcf8390.h -- NS8390 support for ColdFire eval boards.
*
* (C) Copyright 1999-2000, Greg Ungerer (gerg@snapgear.com)
* (C) Copyright 2000, Lineo (www.lineo.com)
@@ -14,8 +14,8 @@
*/
/****************************************************************************/
-#ifndef mcfne_h
-#define mcfne_h
+#ifndef mcf8390_h
+#define mcf8390_h
/****************************************************************************/
@@ -37,6 +37,7 @@
#if defined(CONFIG_ARN5206)
#define NE2000_ADDR 0x40000300
#define NE2000_ODDOFFSET 0x00010000
+#define NE2000_ADDRSIZE 0x00020000
#define NE2000_IRQ_VECTOR 0xf0
#define NE2000_IRQ_PRIORITY 2
#define NE2000_IRQ_LEVEL 4
@@ -46,6 +47,7 @@
#if defined(CONFIG_M5206eC3)
#define NE2000_ADDR 0x40000300
#define NE2000_ODDOFFSET 0x00010000
+#define NE2000_ADDRSIZE 0x00020000
#define NE2000_IRQ_VECTOR 0x1c
#define NE2000_IRQ_PRIORITY 2
#define NE2000_IRQ_LEVEL 4
@@ -54,6 +56,7 @@
#if defined(CONFIG_M5206e) && defined(CONFIG_NETtel)
#define NE2000_ADDR 0x30000300
+#define NE2000_ADDRSIZE 0x00001000
#define NE2000_IRQ_VECTOR 25
#define NE2000_IRQ_PRIORITY 1
#define NE2000_IRQ_LEVEL 3
@@ -63,6 +66,7 @@
#if defined(CONFIG_M5307C3)
#define NE2000_ADDR 0x40000300
#define NE2000_ODDOFFSET 0x00010000
+#define NE2000_ADDRSIZE 0x00020000
#define NE2000_IRQ_VECTOR 0x1b
#define NE2000_BYTE volatile unsigned short
#endif
@@ -70,6 +74,7 @@
#if defined(CONFIG_M5272) && defined(CONFIG_NETtel)
#define NE2000_ADDR 0x30600300
#define NE2000_ODDOFFSET 0x00008000
+#define NE2000_ADDRSIZE 0x00010000
#define NE2000_IRQ_VECTOR 67
#undef BSWAP
#define BSWAP(w) (w)
@@ -82,6 +87,7 @@
#define NE2000_ADDR0 0x30600300
#define NE2000_ADDR1 0x30800300
#define NE2000_ODDOFFSET 0x00008000
+#define NE2000_ADDRSIZE 0x00010000
#define NE2000_IRQ_VECTOR0 27
#define NE2000_IRQ_VECTOR1 29
#undef BSWAP
@@ -94,6 +100,7 @@
#if defined(CONFIG_M5307) && defined(CONFIG_SECUREEDGEMP3)
#define NE2000_ADDR 0x30600300
#define NE2000_ODDOFFSET 0x00008000
+#define NE2000_ADDRSIZE 0x00010000
#define NE2000_IRQ_VECTOR 27
#undef BSWAP
#define BSWAP(w) (w)
@@ -105,6 +112,7 @@
#if defined(CONFIG_ARN5307)
#define NE2000_ADDR 0xfe600300
#define NE2000_ODDOFFSET 0x00010000
+#define NE2000_ADDRSIZE 0x00020000
#define NE2000_IRQ_VECTOR 0x1b
#define NE2000_IRQ_PRIORITY 2
#define NE2000_IRQ_LEVEL 3
@@ -114,129 +122,10 @@
#if defined(CONFIG_M5407C3)
#define NE2000_ADDR 0x40000300
#define NE2000_ODDOFFSET 0x00010000
+#define NE2000_ADDRSIZE 0x00020000
#define NE2000_IRQ_VECTOR 0x1b
#define NE2000_BYTE volatile unsigned short
#endif
/****************************************************************************/
-
-/*
- * Side-band address space for odd address requires re-mapping
- * many of the standard ISA access functions.
- */
-#ifdef NE2000_ODDOFFSET
-
-#undef outb
-#undef outb_p
-#undef inb
-#undef inb_p
-#undef outsb
-#undef outsw
-#undef insb
-#undef insw
-
-#define outb ne2000_outb
-#define inb ne2000_inb
-#define outb_p ne2000_outb
-#define inb_p ne2000_inb
-#define outsb ne2000_outsb
-#define outsw ne2000_outsw
-#define insb ne2000_insb
-#define insw ne2000_insw
-
-
-#ifndef COLDFIRE_NE2000_FUNCS
-
-void ne2000_outb(unsigned int val, unsigned int addr);
-int ne2000_inb(unsigned int addr);
-void ne2000_insb(unsigned int addr, void *vbuf, int unsigned long len);
-void ne2000_insw(unsigned int addr, void *vbuf, unsigned long len);
-void ne2000_outsb(unsigned int addr, void *vbuf, unsigned long len);
-void ne2000_outsw(unsigned int addr, void *vbuf, unsigned long len);
-
-#else
-
-/*
- * This macro converts a conventional register address into the
- * real memory pointer of the mapped NE2000 device.
- * On most NE2000 implementations on ColdFire boards the chip is
- * mapped in kinda funny, due to its ISA heritage.
- */
-#define NE2000_PTR(addr) ((addr&0x1)?(NE2000_ODDOFFSET+addr-1):(addr))
-#define NE2000_DATA_PTR(addr) (addr)
-
-
-void ne2000_outb(unsigned int val, unsigned int addr)
-{
- NE2000_BYTE *rp;
-
- rp = (NE2000_BYTE *) NE2000_PTR(addr);
- *rp = RSWAP(val);
-}
-
-int ne2000_inb(unsigned int addr)
-{
- NE2000_BYTE *rp, val;
-
- rp = (NE2000_BYTE *) NE2000_PTR(addr);
- val = *rp;
- return((int) ((NE2000_BYTE) RSWAP(val)));
-}
-
-void ne2000_insb(unsigned int addr, void *vbuf, int unsigned long len)
-{
- NE2000_BYTE *rp, val;
- unsigned char *buf;
-
- buf = (unsigned char *) vbuf;
- rp = (NE2000_BYTE *) NE2000_DATA_PTR(addr);
- for (; (len > 0); len--) {
- val = *rp;
- *buf++ = RSWAP(val);
- }
-}
-
-void ne2000_insw(unsigned int addr, void *vbuf, unsigned long len)
-{
- volatile unsigned short *rp;
- unsigned short w, *buf;
-
- buf = (unsigned short *) vbuf;
- rp = (volatile unsigned short *) NE2000_DATA_PTR(addr);
- for (; (len > 0); len--) {
- w = *rp;
- *buf++ = BSWAP(w);
- }
-}
-
-void ne2000_outsb(unsigned int addr, const void *vbuf, unsigned long len)
-{
- NE2000_BYTE *rp, val;
- unsigned char *buf;
-
- buf = (unsigned char *) vbuf;
- rp = (NE2000_BYTE *) NE2000_DATA_PTR(addr);
- for (; (len > 0); len--) {
- val = *buf++;
- *rp = RSWAP(val);
- }
-}
-
-void ne2000_outsw(unsigned int addr, const void *vbuf, unsigned long len)
-{
- volatile unsigned short *rp;
- unsigned short w, *buf;
-
- buf = (unsigned short *) vbuf;
- rp = (volatile unsigned short *) NE2000_DATA_PTR(addr);
- for (; (len > 0); len--) {
- w = *buf++;
- *rp = BSWAP(w);
- }
-}
-
-#endif /* COLDFIRE_NE2000_FUNCS */
-#endif /* NE2000_OFFOFFSET */
-
-/****************************************************************************/
-#endif /* mcfne_h */
+#endif /* mcf8390_h */
diff --git a/arch/m68k/include/asm/mcfclk.h b/arch/m68k/include/asm/mcfclk.h
new file mode 100644
index 000000000000..b676a02bb392
--- /dev/null
+++ b/arch/m68k/include/asm/mcfclk.h
@@ -0,0 +1,43 @@
+/*
+ * mcfclk.h -- coldfire specific clock structure
+ */
+
+
+#ifndef mcfclk_h
+#define mcfclk_h
+
+struct clk;
+
+#ifdef MCFPM_PPMCR0
+struct clk_ops {
+ void (*enable)(struct clk *);
+ void (*disable)(struct clk *);
+};
+
+struct clk {
+ const char *name;
+ struct clk_ops *clk_ops;
+ unsigned long rate;
+ unsigned long enabled;
+ u8 slot;
+};
+
+extern struct clk *mcf_clks[];
+extern struct clk_ops clk_ops0;
+#ifdef MCFPM_PPMCR1
+extern struct clk_ops clk_ops1;
+#endif /* MCFPM_PPMCR1 */
+
+#define DEFINE_CLK(clk_bank, clk_name, clk_slot, clk_rate) \
+static struct clk __clk_##clk_bank##_##clk_slot = { \
+ .name = clk_name, \
+ .clk_ops = &clk_ops##clk_bank, \
+ .rate = clk_rate, \
+ .slot = clk_slot, \
+}
+
+void __clk_init_enabled(struct clk *);
+void __clk_init_disabled(struct clk *);
+#endif /* MCFPM_PPMCR0 */
+
+#endif /* mcfclk_h */
diff --git a/arch/m68k/include/asm/mcfgpio.h b/arch/m68k/include/asm/mcfgpio.h
index fe468eaa51e0..fa1059f50dfc 100644
--- a/arch/m68k/include/asm/mcfgpio.h
+++ b/arch/m68k/include/asm/mcfgpio.h
@@ -16,82 +16,289 @@
#ifndef mcfgpio_h
#define mcfgpio_h
-#include <linux/io.h>
+#ifdef CONFIG_GPIOLIB
#include <asm-generic/gpio.h>
+#else
+
+int __mcfgpio_get_value(unsigned gpio);
+void __mcfgpio_set_value(unsigned gpio, int value);
+int __mcfgpio_direction_input(unsigned gpio);
+int __mcfgpio_direction_output(unsigned gpio, int value);
+int __mcfgpio_request(unsigned gpio);
+void __mcfgpio_free(unsigned gpio);
+
+/* our alternate 'gpiolib' functions */
+static inline int __gpio_get_value(unsigned gpio)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ return __mcfgpio_get_value(gpio);
+ else
+ return -EINVAL;
+}
+
+static inline void __gpio_set_value(unsigned gpio, int value)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ __mcfgpio_set_value(gpio, value);
+}
+
+static inline int __gpio_cansleep(unsigned gpio)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static inline int __gpio_to_irq(unsigned gpio)
+{
+ return -EINVAL;
+}
+
+static inline int gpio_direction_input(unsigned gpio)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ return __mcfgpio_direction_input(gpio);
+ else
+ return -EINVAL;
+}
+
+static inline int gpio_direction_output(unsigned gpio, int value)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ return __mcfgpio_direction_output(gpio, value);
+ else
+ return -EINVAL;
+}
+
+static inline int gpio_request(unsigned gpio, const char *label)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ return __mcfgpio_request(gpio);
+ else
+ return -EINVAL;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+ if (gpio < MCFGPIO_PIN_MAX)
+ __mcfgpio_free(gpio);
+}
+
+#endif /* CONFIG_GPIOLIB */
-struct mcf_gpio_chip {
- struct gpio_chip gpio_chip;
- void __iomem *pddr;
- void __iomem *podr;
- void __iomem *ppdr;
- void __iomem *setr;
- void __iomem *clrr;
- const u8 *gpio_to_pinmux;
-};
-
-extern struct mcf_gpio_chip mcf_gpio_chips[];
-extern unsigned int mcf_gpio_chips_size;
-
-int mcf_gpio_direction_input(struct gpio_chip *, unsigned);
-int mcf_gpio_get_value(struct gpio_chip *, unsigned);
-int mcf_gpio_direction_output(struct gpio_chip *, unsigned, int);
-void mcf_gpio_set_value(struct gpio_chip *, unsigned, int);
-void mcf_gpio_set_value_fast(struct gpio_chip *, unsigned, int);
-int mcf_gpio_request(struct gpio_chip *, unsigned);
-void mcf_gpio_free(struct gpio_chip *, unsigned);
/*
- * Define macros to ease the pain of setting up the GPIO tables. There
- * are two cases we need to deal with here, they cover all currently
- * available ColdFire GPIO hardware. There are of course minor differences
- * in the layout and number of bits in each ColdFire part, but the macros
- * take all that in.
+ * The Freescale Coldfire family is quite varied in how they implement GPIO.
+ * Some parts have 8 bit ports, some have 16bit and some have 32bit; some have
+ * only one port, others have multiple ports; some have a single data latch
+ * for both input and output, others have a separate pin data register to read
+ * input; some require a read-modify-write access to change an output, others
+ * have set and clear registers for some of the outputs; Some have all the
+ * GPIOs in a single control area, others have some GPIOs implemented in
+ * different modules.
*
- * Firstly is the conventional GPIO registers where we toggle individual
- * bits in a register, preserving the other bits in the register. For
- * lack of a better term I have called this the slow method.
+ * This implementation attempts accommodate the differences while presenting
+ * a generic interface that will optimize to as few instructions as possible.
+ */
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \
+ defined(CONFIG_M5441x)
+
+/* These parts have GPIO organized by 8 bit ports */
+
+#define MCFGPIO_PORTTYPE u8
+#define MCFGPIO_PORTSIZE 8
+#define mcfgpio_read(port) __raw_readb(port)
+#define mcfgpio_write(data, port) __raw_writeb(data, port)
+
+#elif defined(CONFIG_M5307) || defined(CONFIG_M5407) || defined(CONFIG_M5272)
+
+/* These parts have GPIO organized by 16 bit ports */
+
+#define MCFGPIO_PORTTYPE u16
+#define MCFGPIO_PORTSIZE 16
+#define mcfgpio_read(port) __raw_readw(port)
+#define mcfgpio_write(data, port) __raw_writew(data, port)
+
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
+
+/* These parts have GPIO organized by 32 bit ports */
+
+#define MCFGPIO_PORTTYPE u32
+#define MCFGPIO_PORTSIZE 32
+#define mcfgpio_read(port) __raw_readl(port)
+#define mcfgpio_write(data, port) __raw_writel(data, port)
+
+#endif
+
+#define mcfgpio_bit(gpio) (1 << ((gpio) % MCFGPIO_PORTSIZE))
+#define mcfgpio_port(gpio) ((gpio) / MCFGPIO_PORTSIZE)
+
+#if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+/*
+ * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses
+ * read-modify-write to change an output and a GPIO module which has separate
+ * set/clr registers to directly change outputs with a single write access.
+ */
+#if defined(CONFIG_M528x)
+/*
+ * The 528x also has GPIOs in other modules (GPT, QADC) which use
+ * read-modify-write as well as those controlled by the EPORT and GPIO modules.
*/
-#define MCFGPS(mlabel, mbase, mngpio, mpddr, mpodr, mppdr) \
- { \
- .gpio_chip = { \
- .label = #mlabel, \
- .request = mcf_gpio_request, \
- .free = mcf_gpio_free, \
- .direction_input = mcf_gpio_direction_input, \
- .direction_output = mcf_gpio_direction_output,\
- .get = mcf_gpio_get_value, \
- .set = mcf_gpio_set_value, \
- .base = mbase, \
- .ngpio = mngpio, \
- }, \
- .pddr = (void __iomem *) mpddr, \
- .podr = (void __iomem *) mpodr, \
- .ppdr = (void __iomem *) mppdr, \
- }
+#define MCFGPIO_SCR_START 40
+#elif defined(CONFIGM5441x)
+/* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */
+#define MCFGPIO_SCR_START 0
+#else
+#define MCFGPIO_SCR_START 8
+#endif
+#define MCFGPIO_SETR_PORT(gpio) (MCFGPIO_SETR + \
+ mcfgpio_port(gpio - MCFGPIO_SCR_START))
+
+#define MCFGPIO_CLRR_PORT(gpio) (MCFGPIO_CLRR + \
+ mcfgpio_port(gpio - MCFGPIO_SCR_START))
+#else
+
+#define MCFGPIO_SCR_START MCFGPIO_PIN_MAX
+/* with MCFGPIO_SCR == MCFGPIO_PIN_MAX, these will be optimized away */
+#define MCFGPIO_SETR_PORT(gpio) 0
+#define MCFGPIO_CLRR_PORT(gpio) 0
+
+#endif
/*
- * Secondly is the faster case, where we have set and clear registers
- * that allow us to set or clear a bit with a single write, not having
- * to worry about preserving other bits.
+ * Coldfire specific helper functions
*/
-#define MCFGPF(mlabel, mbase, mngpio) \
- { \
- .gpio_chip = { \
- .label = #mlabel, \
- .request = mcf_gpio_request, \
- .free = mcf_gpio_free, \
- .direction_input = mcf_gpio_direction_input, \
- .direction_output = mcf_gpio_direction_output,\
- .get = mcf_gpio_get_value, \
- .set = mcf_gpio_set_value_fast, \
- .base = mbase, \
- .ngpio = mngpio, \
- }, \
- .pddr = (void __iomem *) MCFGPIO_PDDR_##mlabel, \
- .podr = (void __iomem *) MCFGPIO_PODR_##mlabel, \
- .ppdr = (void __iomem *) MCFGPIO_PPDSDR_##mlabel, \
- .setr = (void __iomem *) MCFGPIO_PPDSDR_##mlabel, \
- .clrr = (void __iomem *) MCFGPIO_PCLRR_##mlabel, \
- }
+/* return the port pin data register for a gpio */
+static inline u32 __mcfgpio_ppdr(unsigned gpio)
+{
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
+ return MCFSIM_PADAT;
+#elif defined(CONFIG_M5272)
+ if (gpio < 16)
+ return MCFSIM_PADAT;
+ else if (gpio < 32)
+ return MCFSIM_PBDAT;
+ else
+ return MCFSIM_PCDAT;
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
+ if (gpio < 32)
+ return MCFSIM2_GPIOREAD;
+ else
+ return MCFSIM2_GPIO1READ;
+#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if !defined(CONFIG_M5441x)
+ if (gpio < 8)
+ return MCFEPORT_EPPDR;
+#if defined(CONFIG_M528x)
+ else if (gpio < 16)
+ return MCFGPTA_GPTPORT;
+ else if (gpio < 24)
+ return MCFGPTB_GPTPORT;
+ else if (gpio < 32)
+ return MCFQADC_PORTQA;
+ else if (gpio < 40)
+ return MCFQADC_PORTQB;
+#endif /* defined(CONFIG_M528x) */
+ else
+#endif /* !defined(CONFIG_M5441x) */
+ return MCFGPIO_PPDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#else
+ return 0;
#endif
+}
+
+/* return the port output data register for a gpio */
+static inline u32 __mcfgpio_podr(unsigned gpio)
+{
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
+ return MCFSIM_PADAT;
+#elif defined(CONFIG_M5272)
+ if (gpio < 16)
+ return MCFSIM_PADAT;
+ else if (gpio < 32)
+ return MCFSIM_PBDAT;
+ else
+ return MCFSIM_PCDAT;
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
+ if (gpio < 32)
+ return MCFSIM2_GPIOWRITE;
+ else
+ return MCFSIM2_GPIO1WRITE;
+#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if !defined(CONFIG_M5441x)
+ if (gpio < 8)
+ return MCFEPORT_EPDR;
+#if defined(CONFIG_M528x)
+ else if (gpio < 16)
+ return MCFGPTA_GPTPORT;
+ else if (gpio < 24)
+ return MCFGPTB_GPTPORT;
+ else if (gpio < 32)
+ return MCFQADC_PORTQA;
+ else if (gpio < 40)
+ return MCFQADC_PORTQB;
+#endif /* defined(CONFIG_M528x) */
+ else
+#endif /* !defined(CONFIG_M5441x) */
+ return MCFGPIO_PODR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#else
+ return 0;
+#endif
+}
+
+/* return the port direction data register for a gpio */
+static inline u32 __mcfgpio_pddr(unsigned gpio)
+{
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
+ return MCFSIM_PADDR;
+#elif defined(CONFIG_M5272)
+ if (gpio < 16)
+ return MCFSIM_PADDR;
+ else if (gpio < 32)
+ return MCFSIM_PBDDR;
+ else
+ return MCFSIM_PCDDR;
+#elif defined(CONFIG_M5249) || defined(CONFIG_M525x)
+ if (gpio < 32)
+ return MCFSIM2_GPIOENABLE;
+ else
+ return MCFSIM2_GPIO1ENABLE;
+#elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M532x) || defined(CONFIG_M5441x)
+#if !defined(CONFIG_M5441x)
+ if (gpio < 8)
+ return MCFEPORT_EPDDR;
+#if defined(CONFIG_M528x)
+ else if (gpio < 16)
+ return MCFGPTA_GPTDDR;
+ else if (gpio < 24)
+ return MCFGPTB_GPTDDR;
+ else if (gpio < 32)
+ return MCFQADC_DDRQA;
+ else if (gpio < 40)
+ return MCFQADC_DDRQB;
+#endif /* defined(CONFIG_M528x) */
+ else
+#endif /* !defined(CONFIG_M5441x) */
+ return MCFGPIO_PDDR + mcfgpio_port(gpio - MCFGPIO_SCR_START);
+#else
+ return 0;
+#endif
+}
+
+#endif /* mcfgpio_h */
diff --git a/arch/m68k/include/asm/mcfsim.h b/arch/m68k/include/asm/mcfsim.h
index ebd0304054ad..7a83e619e73b 100644
--- a/arch/m68k/include/asm/mcfsim.h
+++ b/arch/m68k/include/asm/mcfsim.h
@@ -27,6 +27,9 @@
#elif defined(CONFIG_M5249)
#include <asm/m5249sim.h>
#include <asm/mcfintc.h>
+#elif defined(CONFIG_M525x)
+#include <asm/m525xsim.h>
+#include <asm/mcfintc.h>
#elif defined(CONFIG_M527x)
#include <asm/m527xsim.h>
#elif defined(CONFIG_M5272)
@@ -43,6 +46,8 @@
#include <asm/mcfintc.h>
#elif defined(CONFIG_M54xx)
#include <asm/m54xxsim.h>
+#elif defined(CONFIG_M5441x)
+#include <asm/m5441xsim.h>
#endif
/****************************************************************************/
diff --git a/arch/m68k/include/asm/mcftimer.h b/arch/m68k/include/asm/mcftimer.h
index 351c27237874..da2fa43c2e45 100644
--- a/arch/m68k/include/asm/mcftimer.h
+++ b/arch/m68k/include/asm/mcftimer.h
@@ -19,7 +19,7 @@
#define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */
#define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */
#define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */
-#if defined(CONFIG_M532x)
+#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
#define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */
#else
#define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */
diff --git a/arch/m68k/include/asm/mcfuart.h b/arch/m68k/include/asm/mcfuart.h
index 2d3bc774b3c5..b40c20f66647 100644
--- a/arch/m68k/include/asm/mcfuart.h
+++ b/arch/m68k/include/asm/mcfuart.h
@@ -43,8 +43,8 @@ struct mcf_platform_uart {
#define MCFUART_UFPD 0x30 /* Frac Prec. Divider (r/w) */
#endif
#if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
- defined(CONFIG_M5249) || defined(CONFIG_M5307) || \
- defined(CONFIG_M5407)
+ defined(CONFIG_M5249) || defined(CONFIG_M525x) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
#define MCFUART_UIVR 0x30 /* Interrupt Vector (r/w) */
#endif
#define MCFUART_UIPR 0x34 /* Input Port (r) */
diff --git a/arch/m68k/include/asm/pci.h b/arch/m68k/include/asm/pci.h
index 4ad0aea48ab4..848c3dfaad50 100644
--- a/arch/m68k/include/asm/pci.h
+++ b/arch/m68k/include/asm/pci.h
@@ -2,6 +2,7 @@
#define _ASM_M68K_PCI_H
#include <asm-generic/pci-dma-compat.h>
+#include <asm-generic/pci.h>
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
@@ -9,4 +10,9 @@
*/
#define PCI_DMA_BUS_IS_PHYS (1)
+#define pcibios_assign_all_busses() 1
+
+#define PCIBIOS_MIN_IO 0x00000100
+#define PCIBIOS_MIN_MEM 0x02000000
+
#endif /* _ASM_M68K_PCI_H */
diff --git a/arch/m68k/include/asm/pinmux.h b/arch/m68k/include/asm/pinmux.h
deleted file mode 100644
index 119ee686dbd1..000000000000
--- a/arch/m68k/include/asm/pinmux.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Coldfire generic GPIO pinmux support.
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef pinmux_h
-#define pinmux_h
-
-#define MCFPINMUX_NONE -1
-
-extern int mcf_pinmux_request(unsigned, unsigned);
-extern void mcf_pinmux_release(unsigned, unsigned);
-
-static inline int mcf_pinmux_is_valid(unsigned pinmux)
-{
- return pinmux != MCFPINMUX_NONE;
-}
-
-#endif
-
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
index 5c7070e21eb7..068ad49210d6 100644
--- a/arch/m68k/kernel/Makefile
+++ b/arch/m68k/kernel/Makefile
@@ -18,6 +18,7 @@ obj-y += setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
+obj-$(CONFIG_PCI) += pcibios.o
ifndef CONFIG_MMU_SUN3
obj-y += dma.o
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index f6daf6e15d2e..e546a5534dd4 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -16,7 +16,7 @@
#include <asm/pgalloc.h>
-#ifdef CONFIG_MMU
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag)
@@ -96,7 +96,7 @@ void dma_free_coherent(struct device *dev, size_t size,
free_pages((unsigned long)vaddr, get_order(size));
}
-#endif /* CONFIG_MMU */
+#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
EXPORT_SYMBOL(dma_alloc_coherent);
EXPORT_SYMBOL(dma_free_coherent);
@@ -105,6 +105,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
+ case DMA_BIDIRECTIONAL:
case DMA_TO_DEVICE:
cache_push(handle, size);
break;
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index b8daf64e347d..165ee9f9d5c9 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -1,5 +1,451 @@
-#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
-#include "entry_mm.S"
+/* -*- mode: asm -*-
+ *
+ * linux/arch/m68k/kernel/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ */
+
+/*
+ * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
+ * all pointers that used to be 'current' are now entry
+ * number 0 in the 'current_set' list.
+ *
+ * 6/05/00 RZ: addedd writeback completion after return from sighandler
+ * for 68040
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/unistd.h>
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+
+.globl system_call, buserr, trap, resume
+.globl sys_call_table
+.globl sys_fork, sys_clone, sys_vfork
+.globl ret_from_interrupt, bad_interrupt
+.globl auto_irqhandler_fixup
+.globl user_irqvec_fixup
+
+.text
+ENTRY(sys_fork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_fork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_clone
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_vfork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_vfork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_rt_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_rt_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(buserr)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- | stack frame pointer argument
+ jbsr buserr_c
+ addql #4,%sp
+ jra ret_from_exception
+
+ENTRY(trap)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- | stack frame pointer argument
+ jbsr trap_c
+ addql #4,%sp
+ jra ret_from_exception
+
+ | After a fork we jump here directly from resume,
+ | so that %d1 contains the previous task
+ | schedule_tail now used regardless of CONFIG_SMP
+ENTRY(ret_from_fork)
+ movel %d1,%sp@-
+ jsr schedule_tail
+ addql #4,%sp
+ jra ret_from_exception
+
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
+
+#ifdef TRAP_DBG_INTERRUPT
+
+.globl dbginterrupt
+ENTRY(dbginterrupt)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- /* stack frame pointer argument */
+ jsr dbginterrupt_c
+ addql #4,%sp
+ jra ret_from_exception
+#endif
+
+ENTRY(reschedule)
+ /* save top of frame */
+ pea %sp@
+ jbsr set_esp0
+ addql #4,%sp
+ pea ret_from_exception
+ jmp schedule
+
+ENTRY(ret_from_user_signal)
+ moveq #__NR_sigreturn,%d0
+ trap #0
+
+ENTRY(ret_from_user_rt_signal)
+ movel #__NR_rt_sigreturn,%d0
+ trap #0
+
#else
-#include "entry_no.S"
+
+do_trace_entry:
+ movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ movel %sp@(PT_OFF_ORIG_D0),%d0
+ cmpl #NR_syscalls,%d0
+ jcs syscall
+badsys:
+ movel #-ENOSYS,%sp@(PT_OFF_D0)
+ jra ret_from_syscall
+
+do_trace_exit:
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra .Lret_from_exception
+
+ENTRY(ret_from_signal)
+ movel %curptr@(TASK_STACK),%a1
+ tstb %a1@(TINFO_FLAGS+2)
+ jge 1f
+ jbsr syscall_trace
+1: RESTORE_SWITCH_STACK
+ addql #4,%sp
+/* on 68040 complete pending writebacks if any */
+#ifdef CONFIG_M68040
+ bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
+ subql #7,%d0 | bus error frame ?
+ jbne 1f
+ movel %sp,%sp@-
+ jbsr berr_040cleanup
+ addql #4,%sp
+1:
+#endif
+ jra .Lret_from_exception
+
+ENTRY(system_call)
+ SAVE_ALL_SYS
+
+ GET_CURRENT(%d1)
+ movel %d1,%a1
+
+ | save top of frame
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+
+ | syscall trace?
+ tstb %a1@(TINFO_FLAGS+2)
+ jmi do_trace_entry
+ cmpl #NR_syscalls,%d0
+ jcc badsys
+syscall:
+ jbsr @(sys_call_table,%d0:l:4)@(0)
+ movel %d0,%sp@(PT_OFF_D0) | save the return value
+ret_from_syscall:
+ |oriw #0x0700,%sr
+ movel %curptr@(TASK_STACK),%a1
+ movew %a1@(TINFO_FLAGS+2),%d0
+ jne syscall_exit_work
+1: RESTORE_ALL
+
+syscall_exit_work:
+ btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
+ bnes 1b | if so, skip resched, signals
+ lslw #1,%d0
+ jcs do_trace_exit
+ jmi do_delayed_trace
+ lslw #8,%d0
+ jne do_signal_return
+ pea resume_userspace
+ jra schedule
+
+
+ENTRY(ret_from_exception)
+.Lret_from_exception:
+ btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
+ bnes 1f | if so, skip resched, signals
+ | only allow interrupts when we are really the last one on the
+ | kernel stack, otherwise stack overflow can occur during
+ | heavy interrupt load
+ andw #ALLOWINT,%sr
+
+resume_userspace:
+ movel %curptr@(TASK_STACK),%a1
+ moveb %a1@(TINFO_FLAGS+3),%d0
+ jne exit_work
+1: RESTORE_ALL
+
+exit_work:
+ | save top of frame
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+ lslb #1,%d0
+ jne do_signal_return
+ pea resume_userspace
+ jra schedule
+
+
+do_signal_return:
+ |andw #ALLOWINT,%sr
+ subql #4,%sp | dummy return address
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ bsrl do_notify_resume
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jbra resume_userspace
+
+do_delayed_trace:
+ bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
+ pea 1 | send SIGTRAP
+ movel %curptr,%sp@-
+ pea LSIGTRAP
+ jbsr send_sig
+ addql #8,%sp
+ addql #4,%sp
+ jbra resume_userspace
+
+
+/* This is the main interrupt handler for autovector interrupts */
+
+ENTRY(auto_inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
+ | put exception # in d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
+ subw #VEC_SPUR,%d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- | put vector # on stack
+auto_irqhandler_fixup = . + 2
+ jsr do_IRQ | process the IRQ
+ addql #8,%sp | pop parameters off stack
+
+ret_from_interrupt:
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
+ jeq ret_from_last_interrupt
+2: RESTORE_ALL
+
+ ALIGN
+ret_from_last_interrupt:
+ moveq #(~ALLOWINT>>8)&0xff,%d0
+ andb %sp@(PT_OFF_SR),%d0
+ jne 2b
+
+ /* check if we need to do software interrupts */
+ tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
+ jeq .Lret_from_exception
+ pea ret_from_exception
+ jra do_softirq
+
+/* Handler for user defined interrupt vectors */
+
+ENTRY(user_inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
+ | put exception # in d0
+ bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
+user_irqvec_fixup = . + 2
+ subw #VEC_USER,%d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- | put vector # on stack
+ jsr do_IRQ | process the IRQ
+ addql #8,%sp | pop parameters off stack
+
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
+ jeq ret_from_last_interrupt
+ RESTORE_ALL
+
+/* Handler for uninitialized and spurious interrupts */
+
+ENTRY(bad_inthandler)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %d0,%a1
+ addqb #1,%a1@(TINFO_PREEMPT+1)
+
+ movel %sp,%sp@-
+ jsr handle_badint
+ addql #4,%sp
+
+ movel %curptr@(TASK_STACK),%a1
+ subqb #1,%a1@(TINFO_PREEMPT+1)
+ jeq ret_from_last_interrupt
+ RESTORE_ALL
+
+
+resume:
+ /*
+ * Beware - when entering resume, prev (the current task) is
+ * in a0, next (the new task) is in a1,so don't change these
+ * registers until their contents are no longer needed.
+ */
+
+ /* save sr */
+ movew %sr,%a0@(TASK_THREAD+THREAD_SR)
+
+ /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
+ movec %sfc,%d0
+ movew %d0,%a0@(TASK_THREAD+THREAD_FS)
+
+ /* save usp */
+ /* it is better to use a movel here instead of a movew 8*) */
+ movec %usp,%d0
+ movel %d0,%a0@(TASK_THREAD+THREAD_USP)
+
+ /* save non-scratch registers on stack */
+ SAVE_SWITCH_STACK
+
+ /* save current kernel stack pointer */
+ movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
+
+ /* save floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+ tstl m68k_fputype
+ jeq 3f
+#endif
+ fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
+
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+ btst #3,m68k_cputype+3
+ beqs 1f
+#endif
+ /* The 060 FPU keeps status in bits 15-8 of the first longword */
+ tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
+ jeq 3f
+#if !defined(CPU_M68060_ONLY)
+ jra 2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
+#endif
+2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
+ fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
+3:
+#endif /* CONFIG_M68KFPU_EMU_ONLY */
+ /* Return previous task in %d1 */
+ movel %curptr,%d1
+
+ /* switch to new task (a1 contains new task) */
+ movel %a1,%curptr
+
+ /* restore floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+ tstl m68k_fputype
+ jeq 4f
+#endif
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+ btst #3,m68k_cputype+3
+ beqs 1f
+#endif
+ /* The 060 FPU keeps status in bits 15-8 of the first longword */
+ tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
+ jeq 3f
+#if !defined(CPU_M68060_ONLY)
+ jra 2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
#endif
+2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
+ fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
+3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
+4:
+#endif /* CONFIG_M68KFPU_EMU_ONLY */
+
+ /* restore the kernel stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_KSP),%sp
+
+ /* restore non-scratch registers */
+ RESTORE_SWITCH_STACK
+
+ /* restore user stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_USP),%a0
+ movel %a0,%usp
+
+ /* restore fs (sfc,%dfc) */
+ movew %a1@(TASK_THREAD+THREAD_FS),%a0
+ movec %a0,%sfc
+ movec %a0,%dfc
+
+ /* restore status register */
+ movew %a1@(TASK_THREAD+THREAD_SR),%sr
+
+ rts
+
+#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/entry_mm.S b/arch/m68k/kernel/entry_mm.S
deleted file mode 100644
index f29e73ca9dbb..000000000000
--- a/arch/m68k/kernel/entry_mm.S
+++ /dev/null
@@ -1,419 +0,0 @@
-/* -*- mode: asm -*-
- *
- * linux/arch/m68k/kernel/entry.S
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file README.legal in the main directory of this archive
- * for more details.
- *
- * Linux/m68k support by Hamish Macdonald
- *
- * 68060 fixes by Jesper Skov
- *
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- */
-
-/*
- * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
- * all pointers that used to be 'current' are now entry
- * number 0 in the 'current_set' list.
- *
- * 6/05/00 RZ: addedd writeback completion after return from sighandler
- * for 68040
- */
-
-#include <linux/linkage.h>
-#include <asm/entry.h>
-#include <asm/errno.h>
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/traps.h>
-#include <asm/unistd.h>
-
-#include <asm/asm-offsets.h>
-
-.globl system_call, buserr, trap, resume
-.globl sys_call_table
-.globl sys_fork, sys_clone, sys_vfork
-.globl ret_from_interrupt, bad_interrupt
-.globl auto_irqhandler_fixup
-.globl user_irqvec_fixup
-
-.text
-ENTRY(buserr)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %sp,%sp@- | stack frame pointer argument
- bsrl buserr_c
- addql #4,%sp
- jra .Lret_from_exception
-
-ENTRY(trap)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %sp,%sp@- | stack frame pointer argument
- bsrl trap_c
- addql #4,%sp
- jra .Lret_from_exception
-
- | After a fork we jump here directly from resume,
- | so that %d1 contains the previous task
- | schedule_tail now used regardless of CONFIG_SMP
-ENTRY(ret_from_fork)
- movel %d1,%sp@-
- jsr schedule_tail
- addql #4,%sp
- jra .Lret_from_exception
-
-do_trace_entry:
- movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
- subql #4,%sp
- SAVE_SWITCH_STACK
- jbsr syscall_trace
- RESTORE_SWITCH_STACK
- addql #4,%sp
- movel %sp@(PT_OFF_ORIG_D0),%d0
- cmpl #NR_syscalls,%d0
- jcs syscall
-badsys:
- movel #-ENOSYS,%sp@(PT_OFF_D0)
- jra ret_from_syscall
-
-do_trace_exit:
- subql #4,%sp
- SAVE_SWITCH_STACK
- jbsr syscall_trace
- RESTORE_SWITCH_STACK
- addql #4,%sp
- jra .Lret_from_exception
-
-ENTRY(ret_from_signal)
- movel %curptr@(TASK_STACK),%a1
- tstb %a1@(TINFO_FLAGS+2)
- jge 1f
- jbsr syscall_trace
-1: RESTORE_SWITCH_STACK
- addql #4,%sp
-/* on 68040 complete pending writebacks if any */
-#ifdef CONFIG_M68040
- bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
- subql #7,%d0 | bus error frame ?
- jbne 1f
- movel %sp,%sp@-
- jbsr berr_040cleanup
- addql #4,%sp
-1:
-#endif
- jra .Lret_from_exception
-
-ENTRY(system_call)
- SAVE_ALL_SYS
-
- GET_CURRENT(%d1)
- movel %d1,%a1
-
- | save top of frame
- movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
-
- | syscall trace?
- tstb %a1@(TINFO_FLAGS+2)
- jmi do_trace_entry
- cmpl #NR_syscalls,%d0
- jcc badsys
-syscall:
- jbsr @(sys_call_table,%d0:l:4)@(0)
- movel %d0,%sp@(PT_OFF_D0) | save the return value
-ret_from_syscall:
- |oriw #0x0700,%sr
- movel %curptr@(TASK_STACK),%a1
- movew %a1@(TINFO_FLAGS+2),%d0
- jne syscall_exit_work
-1: RESTORE_ALL
-
-syscall_exit_work:
- btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
- bnes 1b | if so, skip resched, signals
- lslw #1,%d0
- jcs do_trace_exit
- jmi do_delayed_trace
- lslw #8,%d0
- jne do_signal_return
- pea resume_userspace
- jra schedule
-
-
-ENTRY(ret_from_exception)
-.Lret_from_exception:
- btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
- bnes 1f | if so, skip resched, signals
- | only allow interrupts when we are really the last one on the
- | kernel stack, otherwise stack overflow can occur during
- | heavy interrupt load
- andw #ALLOWINT,%sr
-
-resume_userspace:
- movel %curptr@(TASK_STACK),%a1
- moveb %a1@(TINFO_FLAGS+3),%d0
- jne exit_work
-1: RESTORE_ALL
-
-exit_work:
- | save top of frame
- movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
- lslb #1,%d0
- jne do_signal_return
- pea resume_userspace
- jra schedule
-
-
-do_signal_return:
- |andw #ALLOWINT,%sr
- subql #4,%sp | dummy return address
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- bsrl do_notify_resume
- addql #4,%sp
- RESTORE_SWITCH_STACK
- addql #4,%sp
- jbra resume_userspace
-
-do_delayed_trace:
- bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
- pea 1 | send SIGTRAP
- movel %curptr,%sp@-
- pea LSIGTRAP
- jbsr send_sig
- addql #8,%sp
- addql #4,%sp
- jbra resume_userspace
-
-
-/* This is the main interrupt handler for autovector interrupts */
-
-ENTRY(auto_inthandler)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %d0,%a1
- addqb #1,%a1@(TINFO_PREEMPT+1)
- | put exception # in d0
- bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
- subw #VEC_SPUR,%d0
-
- movel %sp,%sp@-
- movel %d0,%sp@- | put vector # on stack
-auto_irqhandler_fixup = . + 2
- jsr do_IRQ | process the IRQ
- addql #8,%sp | pop parameters off stack
-
-ret_from_interrupt:
- movel %curptr@(TASK_STACK),%a1
- subqb #1,%a1@(TINFO_PREEMPT+1)
- jeq ret_from_last_interrupt
-2: RESTORE_ALL
-
- ALIGN
-ret_from_last_interrupt:
- moveq #(~ALLOWINT>>8)&0xff,%d0
- andb %sp@(PT_OFF_SR),%d0
- jne 2b
-
- /* check if we need to do software interrupts */
- tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
- jeq .Lret_from_exception
- pea ret_from_exception
- jra do_softirq
-
-/* Handler for user defined interrupt vectors */
-
-ENTRY(user_inthandler)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %d0,%a1
- addqb #1,%a1@(TINFO_PREEMPT+1)
- | put exception # in d0
- bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
-user_irqvec_fixup = . + 2
- subw #VEC_USER,%d0
-
- movel %sp,%sp@-
- movel %d0,%sp@- | put vector # on stack
- jsr do_IRQ | process the IRQ
- addql #8,%sp | pop parameters off stack
-
- movel %curptr@(TASK_STACK),%a1
- subqb #1,%a1@(TINFO_PREEMPT+1)
- jeq ret_from_last_interrupt
- RESTORE_ALL
-
-/* Handler for uninitialized and spurious interrupts */
-
-ENTRY(bad_inthandler)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %d0,%a1
- addqb #1,%a1@(TINFO_PREEMPT+1)
-
- movel %sp,%sp@-
- jsr handle_badint
- addql #4,%sp
-
- movel %curptr@(TASK_STACK),%a1
- subqb #1,%a1@(TINFO_PREEMPT+1)
- jeq ret_from_last_interrupt
- RESTORE_ALL
-
-
-ENTRY(sys_fork)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_fork
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_clone)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_clone
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_vfork)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_vfork
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_sigreturn)
- SAVE_SWITCH_STACK
- jbsr do_sigreturn
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_rt_sigreturn)
- SAVE_SWITCH_STACK
- jbsr do_rt_sigreturn
- RESTORE_SWITCH_STACK
- rts
-
-resume:
- /*
- * Beware - when entering resume, prev (the current task) is
- * in a0, next (the new task) is in a1,so don't change these
- * registers until their contents are no longer needed.
- */
-
- /* save sr */
- movew %sr,%a0@(TASK_THREAD+THREAD_SR)
-
- /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
- movec %sfc,%d0
- movew %d0,%a0@(TASK_THREAD+THREAD_FS)
-
- /* save usp */
- /* it is better to use a movel here instead of a movew 8*) */
- movec %usp,%d0
- movel %d0,%a0@(TASK_THREAD+THREAD_USP)
-
- /* save non-scratch registers on stack */
- SAVE_SWITCH_STACK
-
- /* save current kernel stack pointer */
- movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
-
- /* save floating point context */
-#ifndef CONFIG_M68KFPU_EMU_ONLY
-#ifdef CONFIG_M68KFPU_EMU
- tstl m68k_fputype
- jeq 3f
-#endif
- fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
-
-#if defined(CONFIG_M68060)
-#if !defined(CPU_M68060_ONLY)
- btst #3,m68k_cputype+3
- beqs 1f
-#endif
- /* The 060 FPU keeps status in bits 15-8 of the first longword */
- tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
- jeq 3f
-#if !defined(CPU_M68060_ONLY)
- jra 2f
-#endif
-#endif /* CONFIG_M68060 */
-#if !defined(CPU_M68060_ONLY)
-1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
- jeq 3f
-#endif
-2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
- fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
-3:
-#endif /* CONFIG_M68KFPU_EMU_ONLY */
- /* Return previous task in %d1 */
- movel %curptr,%d1
-
- /* switch to new task (a1 contains new task) */
- movel %a1,%curptr
-
- /* restore floating point context */
-#ifndef CONFIG_M68KFPU_EMU_ONLY
-#ifdef CONFIG_M68KFPU_EMU
- tstl m68k_fputype
- jeq 4f
-#endif
-#if defined(CONFIG_M68060)
-#if !defined(CPU_M68060_ONLY)
- btst #3,m68k_cputype+3
- beqs 1f
-#endif
- /* The 060 FPU keeps status in bits 15-8 of the first longword */
- tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
- jeq 3f
-#if !defined(CPU_M68060_ONLY)
- jra 2f
-#endif
-#endif /* CONFIG_M68060 */
-#if !defined(CPU_M68060_ONLY)
-1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
- jeq 3f
-#endif
-2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
- fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
-3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
-4:
-#endif /* CONFIG_M68KFPU_EMU_ONLY */
-
- /* restore the kernel stack pointer */
- movel %a1@(TASK_THREAD+THREAD_KSP),%sp
-
- /* restore non-scratch registers */
- RESTORE_SWITCH_STACK
-
- /* restore user stack pointer */
- movel %a1@(TASK_THREAD+THREAD_USP),%a0
- movel %a0,%usp
-
- /* restore fs (sfc,%dfc) */
- movew %a1@(TASK_THREAD+THREAD_FS),%a0
- movec %a0,%sfc
- movec %a0,%dfc
-
- /* restore status register */
- movew %a1@(TASK_THREAD+THREAD_SR),%sr
-
- rts
-
diff --git a/arch/m68k/kernel/entry_no.S b/arch/m68k/kernel/entry_no.S
deleted file mode 100644
index d80cba45589f..000000000000
--- a/arch/m68k/kernel/entry_no.S
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * linux/arch/m68knommu/kernel/entry.S
- *
- * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
- * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
- * Kenneth Albanowski <kjahds@kjahds.com>,
- * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
- *
- * Based on:
- *
- * linux/arch/m68k/kernel/entry.S
- *
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file README.legal in the main directory of this archive
- * for more details.
- *
- * Linux/m68k support by Hamish Macdonald
- *
- * 68060 fixes by Jesper Skov
- * ColdFire support by Greg Ungerer (gerg@snapgear.com)
- * 5307 fixes by David W. Miller
- * linux 2.4 support David McCullough <davidm@snapgear.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/errno.h>
-#include <asm/setup.h>
-#include <asm/segment.h>
-#include <asm/asm-offsets.h>
-#include <asm/entry.h>
-#include <asm/unistd.h>
-
-.text
-
-.globl buserr
-.globl trap
-.globl ret_from_exception
-.globl ret_from_signal
-.globl sys_fork
-.globl sys_clone
-.globl sys_vfork
-
-ENTRY(buserr)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %sp,%sp@- /* stack frame pointer argument */
- jsr buserr_c
- addql #4,%sp
- jra ret_from_exception
-
-ENTRY(trap)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %sp,%sp@- /* stack frame pointer argument */
- jsr trap_c
- addql #4,%sp
- jra ret_from_exception
-
-#ifdef TRAP_DBG_INTERRUPT
-
-.globl dbginterrupt
-ENTRY(dbginterrupt)
- SAVE_ALL_INT
- GET_CURRENT(%d0)
- movel %sp,%sp@- /* stack frame pointer argument */
- jsr dbginterrupt_c
- addql #4,%sp
- jra ret_from_exception
-#endif
-
-ENTRY(reschedule)
- /* save top of frame */
- pea %sp@
- jbsr set_esp0
- addql #4,%sp
- pea ret_from_exception
- jmp schedule
-
-ENTRY(ret_from_fork)
- movel %d1,%sp@-
- jsr schedule_tail
- addql #4,%sp
- jra ret_from_exception
-
-ENTRY(sys_fork)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_fork
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_vfork)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_vfork
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_clone)
- SAVE_SWITCH_STACK
- pea %sp@(SWITCH_STACK_SIZE)
- jbsr m68k_clone
- addql #4,%sp
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_sigreturn)
- SAVE_SWITCH_STACK
- jbsr do_sigreturn
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(sys_rt_sigreturn)
- SAVE_SWITCH_STACK
- jbsr do_rt_sigreturn
- RESTORE_SWITCH_STACK
- rts
-
-ENTRY(ret_from_user_signal)
- moveq #__NR_sigreturn,%d0
- trap #0
-
-ENTRY(ret_from_user_rt_signal)
- movel #__NR_rt_sigreturn,%d0
- trap #0
-
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
index 34849c4c6e3d..eb46fd6038ca 100644
--- a/arch/m68k/kernel/module.c
+++ b/arch/m68k/kernel/module.c
@@ -47,7 +47,7 @@ int apply_relocate(Elf32_Shdr *sechdrs,
*location += sym->st_value;
break;
case R_68K_PC32:
- /* Add the value, subtract its postition */
+ /* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
default:
@@ -87,7 +87,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
- /* Add the value, subtract its postition */
+ /* Add the value, subtract its position */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
new file mode 100644
index 000000000000..b2988aa1840b
--- /dev/null
+++ b/arch/m68k/kernel/pcibios.c
@@ -0,0 +1,109 @@
+/*
+ * pci.c -- basic PCI support code
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ resource_size_t start = res->start;
+
+ if ((res->flags & IORESOURCE_IO) && (start & 0x300))
+ start = (start + 0x3ff) & ~0x3ff;
+
+ start = (start + align - 1) & ~(align - 1);
+
+ return start;
+}
+
+/*
+ * This is taken from the ARM code for this.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ struct resource *r;
+ u16 cmd, newcmd;
+ int idx;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ newcmd = cmd;
+
+ for (idx = 0; idx < 6; idx++) {
+ /* Only set up the requested stuff */
+ if (!(mask & (1 << idx)))
+ continue;
+
+ r = dev->resource + idx;
+ if (!r->start && r->end) {
+ pr_err(KERN_ERR "PCI: Device %s not available because of resource collisions\n",
+ pci_name(dev));
+ return -EINVAL;
+ }
+ if (r->flags & IORESOURCE_IO)
+ newcmd |= PCI_COMMAND_IO;
+ if (r->flags & IORESOURCE_MEM)
+ newcmd |= PCI_COMMAND_MEMORY;
+ }
+
+ /*
+ * Bridges (eg, cardbus bridges) need to be fully enabled
+ */
+ if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+ newcmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+
+ if (newcmd != cmd) {
+ pr_info("PCI: enabling device %s (0x%04x -> 0x%04x)\n",
+ pci_name(dev), cmd, newcmd);
+ pci_write_config_word(dev, PCI_COMMAND, newcmd);
+ }
+ return 0;
+}
+
+void pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 32);
+ }
+}
+
+char __devinit *pcibios_setup(char *str)
+{
+ return str;
+}
+
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index 250b8b786f4f..51bc9d258ede 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -203,7 +203,7 @@ static inline void pushcl040(unsigned long paddr)
void cache_clear (unsigned long paddr, int len)
{
if (CPU_IS_COLDFIRE) {
- flush_cf_bcache(0, DCACHE_MAX_ADDR);
+ clear_cf_bcache(0, DCACHE_MAX_ADDR);
} else if (CPU_IS_040_OR_060) {
int tmp;
diff --git a/arch/m68k/platform/coldfire/Makefile b/arch/m68k/platform/coldfire/Makefile
index 76d389d9a84e..02591a109f8c 100644
--- a/arch/m68k/platform/coldfire/Makefile
+++ b/arch/m68k/platform/coldfire/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_M5206e) += m5206.o timers.o intc.o reset.o
obj-$(CONFIG_M520x) += m520x.o pit.o intc-simr.o reset.o
obj-$(CONFIG_M523x) += m523x.o pit.o dma_timer.o intc-2.o reset.o
obj-$(CONFIG_M5249) += m5249.o timers.o intc.o intc-5249.o reset.o
+obj-$(CONFIG_M525x) += m525x.o timers.o intc.o intc-525x.o reset.o
obj-$(CONFIG_M527x) += m527x.o pit.o intc-2.o reset.o
obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o
obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o
@@ -27,10 +28,14 @@ obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o
obj-$(CONFIG_M532x) += m532x.o timers.o intc-simr.o reset.o
obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o
obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o
+obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o
obj-$(CONFIG_NETtel) += nettel.o
obj-$(CONFIG_CLEOPATRA) += nettel.o
obj-$(CONFIG_FIREBEE) += firebee.o
+obj-$(CONFIG_MCF8390) += mcf8390.o
-obj-y += pinmux.o gpio.o
+obj-$(CONFIG_PCI) += pci.o
+
+obj-y += gpio.o
extra-y := head.o
diff --git a/arch/m68k/platform/coldfire/clk.c b/arch/m68k/platform/coldfire/clk.c
index 44da406897e5..75f9ee967ea7 100644
--- a/arch/m68k/platform/coldfire/clk.c
+++ b/arch/m68k/platform/coldfire/clk.c
@@ -10,11 +10,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfclk.h>
/***************************************************************************/
-
+#ifndef MCFPM_PPMCR0
struct clk *clk_get(struct device *dev, const char *id)
{
return NULL;
@@ -42,11 +48,107 @@ unsigned long clk_get_rate(struct clk *clk)
return MCF_CLK;
}
EXPORT_SYMBOL(clk_get_rate);
+#else
+static DEFINE_SPINLOCK(clk_lock);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+ const char *clk_name = dev ? dev_name(dev) : id ? id : NULL;
+ struct clk *clk;
+ unsigned i;
+
+ for (i = 0; (clk = mcf_clks[i]) != NULL; ++i)
+ if (!strcmp(clk->name, clk_name))
+ return clk;
+ pr_warn("clk_get: didn't find clock %s\n", clk_name);
+ return ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(clk_get);
+
+int clk_enable(struct clk *clk)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clk_lock, flags);
+ if ((clk->enabled++ == 0) && clk->clk_ops)
+ clk->clk_ops->enable(clk);
+ spin_unlock_irqrestore(&clk_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clk_lock, flags);
+ if ((--clk->enabled == 0) && clk->clk_ops)
+ clk->clk_ops->disable(clk);
+ spin_unlock_irqrestore(&clk_lock, flags);
+}
+EXPORT_SYMBOL(clk_disable);
+
+void clk_put(struct clk *clk)
+{
+ if (clk->enabled != 0)
+ pr_warn("clk_put %s still enabled\n", clk->name);
+}
+EXPORT_SYMBOL(clk_put);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL(clk_get_rate);
+
+/***************************************************************************/
+
+void __clk_init_enabled(struct clk *clk)
+{
+ clk->enabled = 1;
+ clk->clk_ops->enable(clk);
+}
+
+void __clk_init_disabled(struct clk *clk)
+{
+ clk->enabled = 0;
+ clk->clk_ops->disable(clk);
+}
+
+static void __clk_enable0(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMCR0);
+}
+
+static void __clk_disable0(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMSR0);
+}
+
+struct clk_ops clk_ops0 = {
+ .enable = __clk_enable0,
+ .disable = __clk_disable0,
+};
+
+#ifdef MCFPM_PPMCR1
+static void __clk_enable1(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMCR1);
+}
+
+static void __clk_disable1(struct clk *clk)
+{
+ __raw_writeb(clk->slot, MCFPM_PPMSR1);
+}
+
+struct clk_ops clk_ops1 = {
+ .enable = __clk_enable1,
+ .disable = __clk_disable1,
+};
+#endif /* MCFPM_PPMCR1 */
+#endif /* MCFPM_PPMCR0 */
struct clk *devm_clk_get(struct device *dev, const char *id)
{
return NULL;
}
EXPORT_SYMBOL(devm_clk_get);
-
-/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c
index 3aa77ddea89d..81f0fb5e51cf 100644
--- a/arch/m68k/platform/coldfire/device.c
+++ b/arch/m68k/platform/coldfire/device.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
+#include <linux/fec.h>
#include <asm/traps.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
@@ -20,7 +21,7 @@
#include <asm/mcfqspi.h>
/*
- * All current ColdFire parts contain from 2, 3 or 4 UARTS.
+ * All current ColdFire parts contain from 2, 3, 4 or 10 UARTS.
*/
static struct mcf_platform_uart mcf_uart_platform_data[] = {
{
@@ -43,6 +44,42 @@ static struct mcf_platform_uart mcf_uart_platform_data[] = {
.irq = MCF_IRQ_UART3,
},
#endif
+#ifdef MCFUART_BASE4
+ {
+ .mapbase = MCFUART_BASE4,
+ .irq = MCF_IRQ_UART4,
+ },
+#endif
+#ifdef MCFUART_BASE5
+ {
+ .mapbase = MCFUART_BASE5,
+ .irq = MCF_IRQ_UART5,
+ },
+#endif
+#ifdef MCFUART_BASE6
+ {
+ .mapbase = MCFUART_BASE6,
+ .irq = MCF_IRQ_UART6,
+ },
+#endif
+#ifdef MCFUART_BASE7
+ {
+ .mapbase = MCFUART_BASE7,
+ .irq = MCF_IRQ_UART7,
+ },
+#endif
+#ifdef MCFUART_BASE8
+ {
+ .mapbase = MCFUART_BASE8,
+ .irq = MCF_IRQ_UART8,
+ },
+#endif
+#ifdef MCFUART_BASE9
+ {
+ .mapbase = MCFUART_BASE9,
+ .irq = MCF_IRQ_UART9,
+ },
+#endif
{ },
};
@@ -53,6 +90,18 @@ static struct platform_device mcf_uart = {
};
#ifdef CONFIG_FEC
+
+#ifdef CONFIG_M5441x
+#define FEC_NAME "enet-fec"
+static struct fec_platform_data fec_pdata = {
+ .phy = PHY_INTERFACE_MODE_RMII,
+};
+#define FEC_PDATA (&fec_pdata)
+#else
+#define FEC_NAME "fec"
+#define FEC_PDATA NULL
+#endif
+
/*
* Some ColdFire cores contain the Fast Ethernet Controller (FEC)
* block. It is Freescale's own hardware block. Some ColdFires
@@ -82,10 +131,11 @@ static struct resource mcf_fec0_resources[] = {
};
static struct platform_device mcf_fec0 = {
- .name = "fec",
+ .name = FEC_NAME,
.id = 0,
.num_resources = ARRAY_SIZE(mcf_fec0_resources),
.resource = mcf_fec0_resources,
+ .dev.platform_data = FEC_PDATA,
};
#ifdef MCFFEC_BASE1
@@ -113,10 +163,11 @@ static struct resource mcf_fec1_resources[] = {
};
static struct platform_device mcf_fec1 = {
- .name = "fec",
+ .name = FEC_NAME,
.id = 1,
.num_resources = ARRAY_SIZE(mcf_fec1_resources),
.resource = mcf_fec1_resources,
+ .dev.platform_data = FEC_PDATA,
};
#endif /* MCFFEC_BASE1 */
#endif /* CONFIG_FEC */
diff --git a/arch/m68k/platform/coldfire/gpio.c b/arch/m68k/platform/coldfire/gpio.c
index 4c8c42450a4e..9cd2b5c70519 100644
--- a/arch/m68k/platform/coldfire/gpio.c
+++ b/arch/m68k/platform/coldfire/gpio.c
@@ -14,119 +14,161 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
-#include <asm/gpio.h>
-#include <asm/pinmux.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
#include <asm/mcfgpio.h>
-#define MCF_CHIP(chip) container_of(chip, struct mcf_gpio_chip, gpio_chip)
+int __mcfgpio_get_value(unsigned gpio)
+{
+ return mcfgpio_read(__mcfgpio_ppdr(gpio)) & mcfgpio_bit(gpio);
+}
+EXPORT_SYMBOL(__mcfgpio_get_value);
+
+void __mcfgpio_set_value(unsigned gpio, int value)
+{
+ if (gpio < MCFGPIO_SCR_START) {
+ unsigned long flags;
+ MCFGPIO_PORTTYPE data;
+
+ local_irq_save(flags);
+ data = mcfgpio_read(__mcfgpio_podr(gpio));
+ if (value)
+ data |= mcfgpio_bit(gpio);
+ else
+ data &= ~mcfgpio_bit(gpio);
+ mcfgpio_write(data, __mcfgpio_podr(gpio));
+ local_irq_restore(flags);
+ } else {
+ if (value)
+ mcfgpio_write(mcfgpio_bit(gpio),
+ MCFGPIO_SETR_PORT(gpio));
+ else
+ mcfgpio_write(~mcfgpio_bit(gpio),
+ MCFGPIO_CLRR_PORT(gpio));
+ }
+}
+EXPORT_SYMBOL(__mcfgpio_set_value);
-int mcf_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+int __mcfgpio_direction_input(unsigned gpio)
{
unsigned long flags;
MCFGPIO_PORTTYPE dir;
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
local_irq_save(flags);
- dir = mcfgpio_read(mcf_chip->pddr);
- dir &= ~mcfgpio_bit(chip->base + offset);
- mcfgpio_write(dir, mcf_chip->pddr);
+ dir = mcfgpio_read(__mcfgpio_pddr(gpio));
+ dir &= ~mcfgpio_bit(gpio);
+ mcfgpio_write(dir, __mcfgpio_pddr(gpio));
local_irq_restore(flags);
return 0;
}
+EXPORT_SYMBOL(__mcfgpio_direction_input);
-int mcf_gpio_get_value(struct gpio_chip *chip, unsigned offset)
-{
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
-
- return mcfgpio_read(mcf_chip->ppdr) & mcfgpio_bit(chip->base + offset);
-}
-
-int mcf_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
+int __mcfgpio_direction_output(unsigned gpio, int value)
{
unsigned long flags;
MCFGPIO_PORTTYPE data;
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
local_irq_save(flags);
- /* write the value to the output latch */
- data = mcfgpio_read(mcf_chip->podr);
+ data = mcfgpio_read(__mcfgpio_pddr(gpio));
if (value)
- data |= mcfgpio_bit(chip->base + offset);
+ data |= mcfgpio_bit(gpio);
else
- data &= ~mcfgpio_bit(chip->base + offset);
- mcfgpio_write(data, mcf_chip->podr);
-
- /* now set the direction to output */
- data = mcfgpio_read(mcf_chip->pddr);
- data |= mcfgpio_bit(chip->base + offset);
- mcfgpio_write(data, mcf_chip->pddr);
+ data &= mcfgpio_bit(gpio);
+ mcfgpio_write(data, __mcfgpio_pddr(gpio));
+
+ /* now set the data to output */
+ if (gpio < MCFGPIO_SCR_START) {
+ data = mcfgpio_read(__mcfgpio_podr(gpio));
+ if (value)
+ data |= mcfgpio_bit(gpio);
+ else
+ data &= ~mcfgpio_bit(gpio);
+ mcfgpio_write(data, __mcfgpio_podr(gpio));
+ } else {
+ if (value)
+ mcfgpio_write(mcfgpio_bit(gpio),
+ MCFGPIO_SETR_PORT(gpio));
+ else
+ mcfgpio_write(~mcfgpio_bit(gpio),
+ MCFGPIO_CLRR_PORT(gpio));
+ }
local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(__mcfgpio_direction_output);
+int __mcfgpio_request(unsigned gpio)
+{
return 0;
}
+EXPORT_SYMBOL(__mcfgpio_request);
-void mcf_gpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
+void __mcfgpio_free(unsigned gpio)
{
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+ __mcfgpio_direction_input(gpio);
+}
+EXPORT_SYMBOL(__mcfgpio_free);
- unsigned long flags;
- MCFGPIO_PORTTYPE data;
+#ifdef CONFIG_GPIOLIB
- local_irq_save(flags);
- data = mcfgpio_read(mcf_chip->podr);
- if (value)
- data |= mcfgpio_bit(chip->base + offset);
- else
- data &= ~mcfgpio_bit(chip->base + offset);
- mcfgpio_write(data, mcf_chip->podr);
- local_irq_restore(flags);
+int mcfgpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ return __mcfgpio_direction_input(offset);
}
-void mcf_gpio_set_value_fast(struct gpio_chip *chip, unsigned offset, int value)
+int mcfgpio_get_value(struct gpio_chip *chip, unsigned offset)
{
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
-
- if (value)
- mcfgpio_write(mcfgpio_bit(chip->base + offset), mcf_chip->setr);
- else
- mcfgpio_write(~mcfgpio_bit(chip->base + offset), mcf_chip->clrr);
+ return __mcfgpio_get_value(offset);
}
-int mcf_gpio_request(struct gpio_chip *chip, unsigned offset)
+int mcfgpio_direction_output(struct gpio_chip *chip, unsigned offset, int value)
{
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
-
- return mcf_chip->gpio_to_pinmux ?
- mcf_pinmux_request(mcf_chip->gpio_to_pinmux[offset], 0) : 0;
+ return __mcfgpio_direction_output(offset, value);
}
-void mcf_gpio_free(struct gpio_chip *chip, unsigned offset)
+void mcfgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
{
- struct mcf_gpio_chip *mcf_chip = MCF_CHIP(chip);
+ __mcfgpio_set_value(offset, value);
+}
- mcf_gpio_direction_input(chip, offset);
+int mcfgpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return __mcfgpio_request(offset);
+}
- if (mcf_chip->gpio_to_pinmux)
- mcf_pinmux_release(mcf_chip->gpio_to_pinmux[offset], 0);
+void mcfgpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ __mcfgpio_free(offset);
}
-struct bus_type mcf_gpio_subsys = {
+struct bus_type mcfgpio_subsys = {
.name = "gpio",
.dev_name = "gpio",
};
-static int __init mcf_gpio_sysinit(void)
-{
- unsigned int i = 0;
+static struct gpio_chip mcfgpio_chip = {
+ .label = "mcfgpio",
+ .request = mcfgpio_request,
+ .free = mcfgpio_free,
+ .direction_input = mcfgpio_direction_input,
+ .direction_output = mcfgpio_direction_output,
+ .get = mcfgpio_get_value,
+ .set = mcfgpio_set_value,
+ .base = 0,
+ .ngpio = MCFGPIO_PIN_MAX,
+};
- while (i < mcf_gpio_chips_size)
- gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]);
- return subsys_system_register(&mcf_gpio_subsys, NULL);
+static int __init mcfgpio_sysinit(void)
+{
+ gpiochip_add(&mcfgpio_chip);
+ return subsys_system_register(&mcfgpio_subsys, NULL);
}
-core_initcall(mcf_gpio_sysinit);
+core_initcall(mcfgpio_sysinit);
+#endif
diff --git a/arch/m68k/platform/coldfire/head.S b/arch/m68k/platform/coldfire/head.S
index c3db70ed33b3..4e0c9eb3bd1f 100644
--- a/arch/m68k/platform/coldfire/head.S
+++ b/arch/m68k/platform/coldfire/head.S
@@ -31,9 +31,9 @@
.endm
#elif defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \
- defined(CONFIG_M5249) || defined(CONFIG_M527x) || \
- defined(CONFIG_M528x) || defined(CONFIG_M5307) || \
- defined(CONFIG_M5407)
+ defined(CONFIG_M5249) || defined(CONFIG_M525x) || \
+ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+ defined(CONFIG_M5307) || defined(CONFIG_M5407)
/*
* Not all these devices have exactly the same DRAM controller,
* but the DCMR register is virtually identical - give or take
diff --git a/arch/m68k/platform/coldfire/intc-525x.c b/arch/m68k/platform/coldfire/intc-525x.c
new file mode 100644
index 000000000000..b23204d059ac
--- /dev/null
+++ b/arch/m68k/platform/coldfire/intc-525x.c
@@ -0,0 +1,91 @@
+/*
+ * intc2.c -- support for the 2nd INTC controller of the 525x
+ *
+ * (C) Copyright 2012, Steven King <sfking@fdwdc.com>
+ * (C) Copyright 2009, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+static void intc2_irq_gpio_mask(struct irq_data *d)
+{
+ u32 imr = readl(MCFSIM2_GPIOINTENABLE);
+ u32 type = irqd_get_trigger_type(d);
+ int irq = d->irq - MCF_IRQ_GPIO0;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ imr &= ~(0x001 << irq);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ imr &= ~(0x100 << irq);
+ writel(imr, MCFSIM2_GPIOINTENABLE);
+}
+
+static void intc2_irq_gpio_unmask(struct irq_data *d)
+{
+ u32 imr = readl(MCFSIM2_GPIOINTENABLE);
+ u32 type = irqd_get_trigger_type(d);
+ int irq = d->irq - MCF_IRQ_GPIO0;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ imr |= (0x001 << irq);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ imr |= (0x100 << irq);
+ writel(imr, MCFSIM2_GPIOINTENABLE);
+}
+
+static void intc2_irq_gpio_ack(struct irq_data *d)
+{
+ u32 imr = 0;
+ u32 type = irqd_get_trigger_type(d);
+ int irq = d->irq - MCF_IRQ_GPIO0;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ imr |= (0x001 << irq);
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ imr |= (0x100 << irq);
+ writel(imr, MCFSIM2_GPIOINTCLEAR);
+}
+
+static int intc2_irq_gpio_set_type(struct irq_data *d, unsigned int f)
+{
+ if (f & ~IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+ return 0;
+}
+
+static struct irq_chip intc2_irq_gpio_chip = {
+ .name = "CF-INTC2",
+ .irq_mask = intc2_irq_gpio_mask,
+ .irq_unmask = intc2_irq_gpio_unmask,
+ .irq_ack = intc2_irq_gpio_ack,
+ .irq_set_type = intc2_irq_gpio_set_type,
+};
+
+static int __init mcf_intc2_init(void)
+{
+ int irq;
+
+ /* set the interrupt base for the second interrupt controller */
+ writel(MCFINTC2_VECBASE, MCFINTC2_INTBASE);
+
+ /* GPIO interrupt sources */
+ for (irq = MCF_IRQ_GPIO0; (irq <= MCF_IRQ_GPIO6); irq++) {
+ irq_set_chip(irq, &intc2_irq_gpio_chip);
+ irq_set_handler(irq, handle_edge_irq);
+ }
+
+ return 0;
+}
+
+arch_initcall(mcf_intc2_init);
diff --git a/arch/m68k/platform/coldfire/intc-simr.c b/arch/m68k/platform/coldfire/intc-simr.c
index 650d52e2927e..7cf2c156f72d 100644
--- a/arch/m68k/platform/coldfire/intc-simr.c
+++ b/arch/m68k/platform/coldfire/intc-simr.c
@@ -59,16 +59,18 @@ static unsigned int inline irq2ebit(unsigned int irq)
#endif
/*
- * There maybe one or two interrupt control units, each has 64
- * interrupts. If there is no second unit then MCFINTC1_* defines
- * will be 0 (and code for them optimized away).
+ * There maybe one, two or three interrupt control units, each has 64
+ * interrupts. If there is no second or third unit then MCFINTC1_* or
+ * MCFINTC2_* defines will be 0 (and code for them optimized away).
*/
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
- if (MCFINTC1_SIMR && (irq > 64))
+ if (MCFINTC2_SIMR && (irq > 128))
+ __raw_writeb(irq - 128, MCFINTC2_SIMR);
+ else if (MCFINTC1_SIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_SIMR);
else
__raw_writeb(irq, MCFINTC0_SIMR);
@@ -78,7 +80,9 @@ static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
- if (MCFINTC1_CIMR && (irq > 64))
+ if (MCFINTC2_CIMR && (irq > 128))
+ __raw_writeb(irq - 128, MCFINTC2_CIMR);
+ else if (MCFINTC1_CIMR && (irq > 64))
__raw_writeb(irq - 64, MCFINTC1_CIMR);
else
__raw_writeb(irq, MCFINTC0_CIMR);
@@ -99,9 +103,11 @@ static unsigned int intc_irq_startup(struct irq_data *d)
unsigned int ebit = irq2ebit(irq);
u8 v;
+#if defined(MCFEPORT_EPDDR)
/* Set EPORT line as input */
v = __raw_readb(MCFEPORT_EPDDR);
__raw_writeb(v & ~(0x1 << ebit), MCFEPORT_EPDDR);
+#endif
/* Set EPORT line as interrupt source */
v = __raw_readb(MCFEPORT_EPIER);
@@ -109,12 +115,13 @@ static unsigned int intc_irq_startup(struct irq_data *d)
}
irq -= MCFINT_VECBASE;
- if (MCFINTC1_ICR0 && (irq > 64))
+ if (MCFINTC2_ICR0 && (irq > 128))
+ __raw_writeb(5, MCFINTC2_ICR0 + irq - 128);
+ else if (MCFINTC1_ICR0 && (irq > 64))
__raw_writeb(5, MCFINTC1_ICR0 + irq - 64);
else
__raw_writeb(5, MCFINTC0_ICR0 + irq);
-
intc_irq_unmask(d);
return 0;
}
@@ -175,8 +182,11 @@ void __init init_IRQ(void)
__raw_writeb(0xff, MCFINTC0_SIMR);
if (MCFINTC1_SIMR)
__raw_writeb(0xff, MCFINTC1_SIMR);
+ if (MCFINTC2_SIMR)
+ __raw_writeb(0xff, MCFINTC2_SIMR);
- eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0);
+ eirq = MCFINT_VECBASE + 64 + (MCFINTC1_ICR0 ? 64 : 0) +
+ (MCFINTC2_ICR0 ? 64 : 0);
for (irq = MCFINT_VECBASE; (irq < eirq); irq++) {
if ((irq >= EINT1) && (irq <= EINT7))
irq_set_chip(irq, &intc_irq_chip_edge_port);
diff --git a/arch/m68k/platform/coldfire/m5206.c b/arch/m68k/platform/coldfire/m5206.c
index a8b81df653f0..6bfbeebd231b 100644
--- a/arch/m68k/platform/coldfire/m5206.c
+++ b/arch/m68k/platform/coldfire/m5206.c
@@ -16,15 +16,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PP, 0, 8, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m520x.c b/arch/m68k/platform/coldfire/m520x.c
index 3264b8883d5f..ea1be0e98ad6 100644
--- a/arch/m68k/platform/coldfire/m520x.c
+++ b/arch/m68k/platform/coldfire/m520x.c
@@ -19,22 +19,102 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
-#include <asm/mcfgpio.h>
+#include <asm/mcfclk.h>
/***************************************************************************/
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PIRQ, 0, 8, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPF(CS, 9, 3),
- MCFGPF(FECI2C, 16, 4),
- MCFGPF(QSPI, 24, 4),
- MCFGPF(TIMER, 32, 4),
- MCFGPF(UART, 40, 8),
- MCFGPF(FECH, 48, 8),
- MCFGPF(FECL, 56, 8),
+DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
+DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
+DEFINE_CLK(0, "edma", 17, MCF_CLK);
+DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
+DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
+DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
+DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
+
+DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
+DEFINE_CLK(0, "mcfeport.0", 34, MCF_CLK);
+DEFINE_CLK(0, "mcfwdt.0", 35, MCF_CLK);
+DEFINE_CLK(0, "pll.0", 36, MCF_CLK);
+DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
+DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
+DEFINE_CLK(0, "sdram.0", 42, MCF_CLK);
+
+struct clk *mcf_clks[] = {
+ &__clk_0_2, /* flexbus */
+ &__clk_0_12, /* fec.0 */
+ &__clk_0_17, /* edma */
+ &__clk_0_18, /* intc.0 */
+ &__clk_0_21, /* iack.0 */
+ &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_23, /* mcfqspi.0 */
+ &__clk_0_24, /* mcfuart.0 */
+ &__clk_0_25, /* mcfuart.1 */
+ &__clk_0_26, /* mcfuart.2 */
+ &__clk_0_28, /* mcftmr.0 */
+ &__clk_0_29, /* mcftmr.1 */
+ &__clk_0_30, /* mcftmr.2 */
+ &__clk_0_31, /* mcftmr.3 */
+
+ &__clk_0_32, /* mcfpit.0 */
+ &__clk_0_33, /* mcfpit.1 */
+ &__clk_0_34, /* mcfeport.0 */
+ &__clk_0_35, /* mcfwdt.0 */
+ &__clk_0_36, /* pll.0 */
+ &__clk_0_40, /* sys.0 */
+ &__clk_0_41, /* gpio.0 */
+ &__clk_0_42, /* sdram.0 */
+NULL,
};
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+static struct clk * const enable_clks[] __initconst = {
+ &__clk_0_2, /* flexbus */
+ &__clk_0_18, /* intc.0 */
+ &__clk_0_21, /* iack.0 */
+ &__clk_0_24, /* mcfuart.0 */
+ &__clk_0_25, /* mcfuart.1 */
+ &__clk_0_26, /* mcfuart.2 */
+
+ &__clk_0_32, /* mcfpit.0 */
+ &__clk_0_33, /* mcfpit.1 */
+ &__clk_0_34, /* mcfeport.0 */
+ &__clk_0_36, /* pll.0 */
+ &__clk_0_40, /* sys.0 */
+ &__clk_0_41, /* gpio.0 */
+ &__clk_0_42, /* sdram.0 */
+};
+
+static struct clk * const disable_clks[] __initconst = {
+ &__clk_0_12, /* fec.0 */
+ &__clk_0_17, /* edma */
+ &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_23, /* mcfqspi.0 */
+ &__clk_0_28, /* mcftmr.0 */
+ &__clk_0_29, /* mcftmr.1 */
+ &__clk_0_30, /* mcftmr.2 */
+ &__clk_0_31, /* mcftmr.3 */
+ &__clk_0_35, /* mcfwdt.0 */
+};
+
+
+static void __init m520x_clk_init(void)
+{
+ unsigned i;
+
+ /* make sure these clocks are enabled */
+ for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
+ __clk_init_enabled(enable_clks[i]);
+ /* make sure these clocks are disabled */
+ for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
+ __clk_init_disabled(disable_clks[i]);
+}
/***************************************************************************/
@@ -93,6 +173,7 @@ static void __init m520x_fec_init(void)
void __init config_BSP(char *commandp, int size)
{
mach_sched_init = hw_timer_init;
+ m520x_clk_init();
m520x_uarts_init();
m520x_fec_init();
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
diff --git a/arch/m68k/platform/coldfire/m523x.c b/arch/m68k/platform/coldfire/m523x.c
index 5d57a4249412..d47dfd8f50a2 100644
--- a/arch/m68k/platform/coldfire/m523x.c
+++ b/arch/m68k/platform/coldfire/m523x.c
@@ -19,28 +19,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPF(ADDR, 13, 3),
- MCFGPF(DATAH, 16, 8),
- MCFGPF(DATAL, 24, 8),
- MCFGPF(BUSCTL, 32, 8),
- MCFGPF(BS, 40, 4),
- MCFGPF(CS, 49, 7),
- MCFGPF(SDRAM, 56, 6),
- MCFGPF(FECI2C, 64, 4),
- MCFGPF(UARTH, 72, 2),
- MCFGPF(UARTL, 80, 8),
- MCFGPF(QSPI, 88, 5),
- MCFGPF(TIMER, 96, 8),
- MCFGPF(ETPU, 104, 3),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5249.c b/arch/m68k/platform/coldfire/m5249.c
index fdfa1edfd1ac..300e729a58d0 100644
--- a/arch/m68k/platform/coldfire/m5249.c
+++ b/arch/m68k/platform/coldfire/m5249.c
@@ -16,16 +16,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(GPIO0, 0, 32, MCFSIM2_GPIOENABLE, MCFSIM2_GPIOWRITE, MCFSIM2_GPIOREAD),
- MCFGPS(GPIO1, 32, 32, MCFSIM2_GPIO1ENABLE, MCFSIM2_GPIO1WRITE, MCFSIM2_GPIO1READ),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m525x.c b/arch/m68k/platform/coldfire/m525x.c
new file mode 100644
index 000000000000..8ce905f9b84f
--- /dev/null
+++ b/arch/m68k/platform/coldfire/m525x.c
@@ -0,0 +1,66 @@
+/***************************************************************************/
+
+/*
+ * 525x.c
+ *
+ * Copyright (C) 2012, Steven King <sfking@fdwdc.com>
+ */
+
+/***************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+
+/***************************************************************************/
+
+static void __init m525x_qspi_init(void)
+{
+#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
+ /* set the GPIO function for the qspi cs gpios */
+ /* FIXME: replace with pinmux/pinctl support */
+ u32 f = readl(MCFSIM2_GPIOFUNC);
+ f |= (1 << MCFQSPI_CS2) | (1 << MCFQSPI_CS1) | (1 << MCFQSPI_CS0);
+ writel(f, MCFSIM2_GPIOFUNC);
+
+ /* QSPI irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL4 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_QSPIICR);
+ mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI);
+#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */
+}
+
+static void __init m525x_i2c_init(void)
+{
+#if IS_ENABLED(CONFIG_I2C_COLDFIRE)
+ u32 r;
+
+ /* first I2C controller uses regular irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C0, MCFINTC_I2C);
+
+ /* second I2C controller is completely different */
+ r = readl(MCFINTC2_INTPRI_REG(MCF_IRQ_I2C1));
+ r &= ~MCFINTC2_INTPRI_BITS(0xf, MCF_IRQ_I2C1);
+ r |= MCFINTC2_INTPRI_BITS(0x5, MCF_IRQ_I2C1);
+ writel(r, MCFINTC2_INTPRI_REG(MCF_IRQ_I2C1));
+#endif /* IS_ENABLED(CONFIG_I2C_COLDFIRE) */
+}
+
+/***************************************************************************/
+
+void __init config_BSP(char *commandp, int size)
+{
+ mach_sched_init = hw_timer_init;
+
+ m525x_qspi_init();
+ m525x_i2c_init();
+}
+
+/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5272.c b/arch/m68k/platform/coldfire/m5272.c
index 43e36060da18..e68bc7a148eb 100644
--- a/arch/m68k/platform/coldfire/m5272.c
+++ b/arch/m68k/platform/coldfire/m5272.c
@@ -19,7 +19,6 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
-#include <asm/mcfgpio.h>
/***************************************************************************/
@@ -31,16 +30,6 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PA, 0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
- MCFGPS(PB, 16, 16, MCFSIM_PBDDR, MCFSIM_PBDAT, MCFSIM_PBDAT),
- MCFGPS(Pc, 32, 16, MCFSIM_PCDDR, MCFSIM_PCDAT, MCFSIM_PCDAT),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
-
-/***************************************************************************/
-
static void __init m5272_uarts_init(void)
{
u32 v;
diff --git a/arch/m68k/platform/coldfire/m527x.c b/arch/m68k/platform/coldfire/m527x.c
index 9b0b66aabd1b..b3cb378c5e94 100644
--- a/arch/m68k/platform/coldfire/m527x.c
+++ b/arch/m68k/platform/coldfire/m527x.c
@@ -20,49 +20,6 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
-#if defined(CONFIG_M5271)
- MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPF(ADDR, 13, 3),
- MCFGPF(DATAH, 16, 8),
- MCFGPF(DATAL, 24, 8),
- MCFGPF(BUSCTL, 32, 8),
- MCFGPF(BS, 40, 4),
- MCFGPF(CS, 49, 7),
- MCFGPF(SDRAM, 56, 6),
- MCFGPF(FECI2C, 64, 4),
- MCFGPF(UARTH, 72, 2),
- MCFGPF(UARTL, 80, 8),
- MCFGPF(QSPI, 88, 5),
- MCFGPF(TIMER, 96, 8),
-#elif defined(CONFIG_M5275)
- MCFGPS(PIRQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPF(BUSCTL, 8, 8),
- MCFGPF(ADDR, 21, 3),
- MCFGPF(CS, 25, 7),
- MCFGPF(FEC0H, 32, 8),
- MCFGPF(FEC0L, 40, 8),
- MCFGPF(FECI2C, 48, 6),
- MCFGPF(QSPI, 56, 7),
- MCFGPF(SDRAM, 64, 8),
- MCFGPF(TIMERH, 72, 4),
- MCFGPF(TIMERL, 80, 4),
- MCFGPF(UARTL, 88, 8),
- MCFGPF(FEC1H, 96, 8),
- MCFGPF(FEC1L, 104, 8),
- MCFGPF(BS, 114, 2),
- MCFGPF(IRQ, 121, 7),
- MCFGPF(USBH, 128, 1),
- MCFGPF(USBL, 136, 8),
- MCFGPF(UARTH, 144, 4),
-#endif
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m528x.c b/arch/m68k/platform/coldfire/m528x.c
index 7ed1276b29dc..f1319e5d2546 100644
--- a/arch/m68k/platform/coldfire/m528x.c
+++ b/arch/m68k/platform/coldfire/m528x.c
@@ -21,37 +21,6 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(NQ, 1, 7, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPS(TA, 8, 4, MCFGPTA_GPTDDR, MCFGPTA_GPTPORT, MCFGPTB_GPTPORT),
- MCFGPS(TB, 16, 4, MCFGPTB_GPTDDR, MCFGPTB_GPTPORT, MCFGPTB_GPTPORT),
- MCFGPS(QA, 24, 4, MCFQADC_DDRQA, MCFQADC_PORTQA, MCFQADC_PORTQA),
- MCFGPS(QB, 32, 4, MCFQADC_DDRQB, MCFQADC_PORTQB, MCFQADC_PORTQB),
- MCFGPF(A, 40, 8),
- MCFGPF(B, 48, 8),
- MCFGPF(C, 56, 8),
- MCFGPF(D, 64, 8),
- MCFGPF(E, 72, 8),
- MCFGPF(F, 80, 8),
- MCFGPF(G, 88, 8),
- MCFGPF(H, 96, 8),
- MCFGPF(J, 104, 8),
- MCFGPF(DD, 112, 8),
- MCFGPF(EH, 120, 8),
- MCFGPF(EL, 128, 8),
- MCFGPF(AS, 136, 6),
- MCFGPF(QS, 144, 7),
- MCFGPF(SD, 152, 6),
- MCFGPF(TC, 160, 4),
- MCFGPF(TD, 168, 4),
- MCFGPF(UA, 176, 4),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
@@ -74,7 +43,7 @@ static void __init m528x_uarts_init(void)
/* make sure PUAPAR is set for UART0 and UART1 */
port = readb(MCF5282_GPIO_PUAPAR);
port |= 0x03 | (0x03 << 2);
- writeb(port, MCF5282_GPIO_PUAPAR);
+ writeb(port, MCFGPIO_PUAPAR);
}
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5307.c b/arch/m68k/platform/coldfire/m5307.c
index 93b484976ab3..a568d2870d15 100644
--- a/arch/m68k/platform/coldfire/m5307.c
+++ b/arch/m68k/platform/coldfire/m5307.c
@@ -16,7 +16,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
#include <asm/mcfwdebug.h>
/***************************************************************************/
@@ -29,14 +28,6 @@ unsigned char ledbank = 0xff;
/***************************************************************************/
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PP, 0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
-
-/***************************************************************************/
-
void __init config_BSP(char *commandp, int size)
{
#if defined(CONFIG_NETtel) || \
diff --git a/arch/m68k/platform/coldfire/m532x.c b/arch/m68k/platform/coldfire/m532x.c
index 5394223639f8..4819a44991ed 100644
--- a/arch/m68k/platform/coldfire/m532x.c
+++ b/arch/m68k/platform/coldfire/m532x.c
@@ -26,32 +26,144 @@
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
-#include <asm/mcfgpio.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfclk.h>
/***************************************************************************/
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PIRQ, 0, 8, MCFEPORT_EPDDR, MCFEPORT_EPDR, MCFEPORT_EPPDR),
- MCFGPF(FECH, 8, 8),
- MCFGPF(FECL, 16, 8),
- MCFGPF(SSI, 24, 5),
- MCFGPF(BUSCTL, 32, 4),
- MCFGPF(BE, 40, 4),
- MCFGPF(CS, 49, 5),
- MCFGPF(PWM, 58, 4),
- MCFGPF(FECI2C, 64, 4),
- MCFGPF(UART, 72, 8),
- MCFGPF(QSPI, 80, 6),
- MCFGPF(TIMER, 88, 4),
- MCFGPF(LCDDATAH, 96, 2),
- MCFGPF(LCDDATAM, 104, 8),
- MCFGPF(LCDDATAL, 112, 8),
- MCFGPF(LCDCTLH, 120, 1),
- MCFGPF(LCDCTLL, 128, 8),
+DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
+DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
+DEFINE_CLK(0, "fec.0", 12, MCF_CLK);
+DEFINE_CLK(0, "edma", 17, MCF_CLK);
+DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
+DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
+DEFINE_CLK(0, "iack.0", 21, MCF_CLK);
+DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "mcfqspi.0", 23, MCF_CLK);
+DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
+
+DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
+DEFINE_CLK(0, "mcfpwm.0", 36, MCF_CLK);
+DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
+DEFINE_CLK(0, "mcfwdt.0", 38, MCF_CLK);
+DEFINE_CLK(0, "sys.0", 40, MCF_BUSCLK);
+DEFINE_CLK(0, "gpio.0", 41, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfrtc.0", 42, MCF_CLK);
+DEFINE_CLK(0, "mcflcd.0", 43, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-otg.0", 44, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-host.0", 45, MCF_CLK);
+DEFINE_CLK(0, "sdram.0", 46, MCF_CLK);
+DEFINE_CLK(0, "ssi.0", 47, MCF_CLK);
+DEFINE_CLK(0, "pll.0", 48, MCF_CLK);
+
+DEFINE_CLK(1, "mdha.0", 32, MCF_CLK);
+DEFINE_CLK(1, "skha.0", 33, MCF_CLK);
+DEFINE_CLK(1, "rng.0", 34, MCF_CLK);
+
+struct clk *mcf_clks[] = {
+ &__clk_0_2, /* flexbus */
+ &__clk_0_8, /* mcfcan.0 */
+ &__clk_0_12, /* fec.0 */
+ &__clk_0_17, /* edma */
+ &__clk_0_18, /* intc.0 */
+ &__clk_0_19, /* intc.1 */
+ &__clk_0_21, /* iack.0 */
+ &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_23, /* mcfqspi.0 */
+ &__clk_0_24, /* mcfuart.0 */
+ &__clk_0_25, /* mcfuart.1 */
+ &__clk_0_26, /* mcfuart.2 */
+ &__clk_0_28, /* mcftmr.0 */
+ &__clk_0_29, /* mcftmr.1 */
+ &__clk_0_30, /* mcftmr.2 */
+ &__clk_0_31, /* mcftmr.3 */
+
+ &__clk_0_32, /* mcfpit.0 */
+ &__clk_0_33, /* mcfpit.1 */
+ &__clk_0_34, /* mcfpit.2 */
+ &__clk_0_35, /* mcfpit.3 */
+ &__clk_0_36, /* mcfpwm.0 */
+ &__clk_0_37, /* mcfeport.0 */
+ &__clk_0_38, /* mcfwdt.0 */
+ &__clk_0_40, /* sys.0 */
+ &__clk_0_41, /* gpio.0 */
+ &__clk_0_42, /* mcfrtc.0 */
+ &__clk_0_43, /* mcflcd.0 */
+ &__clk_0_44, /* mcfusb-otg.0 */
+ &__clk_0_45, /* mcfusb-host.0 */
+ &__clk_0_46, /* sdram.0 */
+ &__clk_0_47, /* ssi.0 */
+ &__clk_0_48, /* pll.0 */
+
+ &__clk_1_32, /* mdha.0 */
+ &__clk_1_33, /* skha.0 */
+ &__clk_1_34, /* rng.0 */
+ NULL,
+};
+
+static struct clk * const enable_clks[] __initconst = {
+ &__clk_0_2, /* flexbus */
+ &__clk_0_18, /* intc.0 */
+ &__clk_0_19, /* intc.1 */
+ &__clk_0_21, /* iack.0 */
+ &__clk_0_24, /* mcfuart.0 */
+ &__clk_0_25, /* mcfuart.1 */
+ &__clk_0_26, /* mcfuart.2 */
+
+ &__clk_0_32, /* mcfpit.0 */
+ &__clk_0_33, /* mcfpit.1 */
+ &__clk_0_37, /* mcfeport.0 */
+ &__clk_0_40, /* sys.0 */
+ &__clk_0_41, /* gpio.0 */
+ &__clk_0_46, /* sdram.0 */
+ &__clk_0_48, /* pll.0 */
+};
+
+static struct clk * const disable_clks[] __initconst = {
+ &__clk_0_8, /* mcfcan.0 */
+ &__clk_0_12, /* fec.0 */
+ &__clk_0_17, /* edma */
+ &__clk_0_22, /* mcfi2c.0 */
+ &__clk_0_23, /* mcfqspi.0 */
+ &__clk_0_28, /* mcftmr.0 */
+ &__clk_0_29, /* mcftmr.1 */
+ &__clk_0_30, /* mcftmr.2 */
+ &__clk_0_31, /* mcftmr.3 */
+ &__clk_0_34, /* mcfpit.2 */
+ &__clk_0_35, /* mcfpit.3 */
+ &__clk_0_36, /* mcfpwm.0 */
+ &__clk_0_38, /* mcfwdt.0 */
+ &__clk_0_42, /* mcfrtc.0 */
+ &__clk_0_43, /* mcflcd.0 */
+ &__clk_0_44, /* mcfusb-otg.0 */
+ &__clk_0_45, /* mcfusb-host.0 */
+ &__clk_0_47, /* ssi.0 */
+ &__clk_1_32, /* mdha.0 */
+ &__clk_1_33, /* skha.0 */
+ &__clk_1_34, /* rng.0 */
};
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
+
+static void __init m532x_clk_init(void)
+{
+ unsigned i;
+
+ /* make sure these clocks are enabled */
+ for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
+ __clk_init_enabled(enable_clks[i]);
+ /* make sure these clocks are disabled */
+ for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
+ __clk_init_disabled(disable_clks[i]);
+}
/***************************************************************************/
@@ -98,8 +210,8 @@ void __init config_BSP(char *commandp, int size)
memset(commandp, 0, size);
}
#endif
-
mach_sched_init = hw_timer_init;
+ m532x_clk_init();
m532x_uarts_init();
m532x_fec_init();
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
diff --git a/arch/m68k/platform/coldfire/m5407.c b/arch/m68k/platform/coldfire/m5407.c
index faa6680b3404..bb6c746ae819 100644
--- a/arch/m68k/platform/coldfire/m5407.c
+++ b/arch/m68k/platform/coldfire/m5407.c
@@ -16,15 +16,6 @@
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
-#include <asm/mcfgpio.h>
-
-/***************************************************************************/
-
-struct mcf_gpio_chip mcf_gpio_chips[] = {
- MCFGPS(PP, 0, 16, MCFSIM_PADDR, MCFSIM_PADAT, MCFSIM_PADAT),
-};
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
/***************************************************************************/
diff --git a/arch/m68k/platform/coldfire/m5441x.c b/arch/m68k/platform/coldfire/m5441x.c
new file mode 100644
index 000000000000..98a13cce93d8
--- /dev/null
+++ b/arch/m68k/platform/coldfire/m5441x.c
@@ -0,0 +1,261 @@
+/*
+ * m5441x.c -- support for Coldfire m5441x processors
+ *
+ * (C) Copyright Steven King <sfking@fdwdc.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <asm/machdep.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfuart.h>
+#include <asm/mcfdma.h>
+#include <asm/mcfclk.h>
+
+DEFINE_CLK(0, "flexbus", 2, MCF_CLK);
+DEFINE_CLK(0, "mcfcan.0", 8, MCF_CLK);
+DEFINE_CLK(0, "mcfcan.1", 9, MCF_CLK);
+DEFINE_CLK(0, "mcfi2c.1", 14, MCF_CLK);
+DEFINE_CLK(0, "mcfdspi.1", 15, MCF_CLK);
+DEFINE_CLK(0, "edma", 17, MCF_CLK);
+DEFINE_CLK(0, "intc.0", 18, MCF_CLK);
+DEFINE_CLK(0, "intc.1", 19, MCF_CLK);
+DEFINE_CLK(0, "intc.2", 20, MCF_CLK);
+DEFINE_CLK(0, "mcfi2c.0", 22, MCF_CLK);
+DEFINE_CLK(0, "mcfdspi.0", 23, MCF_CLK);
+DEFINE_CLK(0, "mcfuart.0", 24, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.1", 25, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.2", 26, MCF_BUSCLK);
+DEFINE_CLK(0, "mcfuart.3", 27, MCF_BUSCLK);
+DEFINE_CLK(0, "mcftmr.0", 28, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.1", 29, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.2", 30, MCF_CLK);
+DEFINE_CLK(0, "mcftmr.3", 31, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.0", 32, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.1", 33, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.2", 34, MCF_CLK);
+DEFINE_CLK(0, "mcfpit.3", 35, MCF_CLK);
+DEFINE_CLK(0, "mcfeport.0", 37, MCF_CLK);
+DEFINE_CLK(0, "mcfadc.0", 38, MCF_CLK);
+DEFINE_CLK(0, "mcfdac.0", 39, MCF_CLK);
+DEFINE_CLK(0, "mcfrtc.0", 42, MCF_CLK);
+DEFINE_CLK(0, "mcfsim.0", 43, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-otg.0", 44, MCF_CLK);
+DEFINE_CLK(0, "mcfusb-host.0", 45, MCF_CLK);
+DEFINE_CLK(0, "mcfddr-sram.0", 46, MCF_CLK);
+DEFINE_CLK(0, "mcfssi.0", 47, MCF_CLK);
+DEFINE_CLK(0, "pll.0", 48, MCF_CLK);
+DEFINE_CLK(0, "mcfrng.0", 49, MCF_CLK);
+DEFINE_CLK(0, "mcfssi.1", 50, MCF_CLK);
+DEFINE_CLK(0, "mcfsdhc.0", 51, MCF_CLK);
+DEFINE_CLK(0, "enet-fec.0", 53, MCF_CLK);
+DEFINE_CLK(0, "enet-fec.1", 54, MCF_CLK);
+DEFINE_CLK(0, "switch.0", 55, MCF_CLK);
+DEFINE_CLK(0, "switch.1", 56, MCF_CLK);
+DEFINE_CLK(0, "nand.0", 63, MCF_CLK);
+
+DEFINE_CLK(1, "mcfow.0", 2, MCF_CLK);
+DEFINE_CLK(1, "mcfi2c.2", 4, MCF_CLK);
+DEFINE_CLK(1, "mcfi2c.3", 5, MCF_CLK);
+DEFINE_CLK(1, "mcfi2c.4", 6, MCF_CLK);
+DEFINE_CLK(1, "mcfi2c.5", 7, MCF_CLK);
+DEFINE_CLK(1, "mcfuart.4", 24, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfuart.5", 25, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfuart.6", 26, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfuart.7", 27, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfuart.8", 28, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfuart.9", 29, MCF_BUSCLK);
+DEFINE_CLK(1, "mcfpwm.0", 34, MCF_BUSCLK);
+DEFINE_CLK(1, "sys.0", 36, MCF_BUSCLK);
+DEFINE_CLK(1, "gpio.0", 37, MCF_BUSCLK);
+
+struct clk *mcf_clks[] = {
+ &__clk_0_2,
+ &__clk_0_8,
+ &__clk_0_9,
+ &__clk_0_14,
+ &__clk_0_15,
+ &__clk_0_17,
+ &__clk_0_18,
+ &__clk_0_19,
+ &__clk_0_20,
+ &__clk_0_22,
+ &__clk_0_23,
+ &__clk_0_24,
+ &__clk_0_25,
+ &__clk_0_26,
+ &__clk_0_27,
+ &__clk_0_28,
+ &__clk_0_29,
+ &__clk_0_30,
+ &__clk_0_31,
+ &__clk_0_32,
+ &__clk_0_33,
+ &__clk_0_34,
+ &__clk_0_35,
+ &__clk_0_37,
+ &__clk_0_38,
+ &__clk_0_39,
+ &__clk_0_42,
+ &__clk_0_43,
+ &__clk_0_44,
+ &__clk_0_45,
+ &__clk_0_46,
+ &__clk_0_47,
+ &__clk_0_48,
+ &__clk_0_49,
+ &__clk_0_50,
+ &__clk_0_51,
+ &__clk_0_53,
+ &__clk_0_54,
+ &__clk_0_55,
+ &__clk_0_56,
+ &__clk_0_63,
+
+ &__clk_1_2,
+ &__clk_1_4,
+ &__clk_1_5,
+ &__clk_1_6,
+ &__clk_1_7,
+ &__clk_1_24,
+ &__clk_1_25,
+ &__clk_1_26,
+ &__clk_1_27,
+ &__clk_1_28,
+ &__clk_1_29,
+ &__clk_1_34,
+ &__clk_1_36,
+ &__clk_1_37,
+ NULL,
+};
+
+
+static struct clk * const enable_clks[] __initconst = {
+ /* make sure these clocks are enabled */
+ &__clk_0_18, /* intc0 */
+ &__clk_0_19, /* intc0 */
+ &__clk_0_20, /* intc0 */
+ &__clk_0_24, /* uart0 */
+ &__clk_0_25, /* uart1 */
+ &__clk_0_26, /* uart2 */
+ &__clk_0_27, /* uart3 */
+
+ &__clk_0_33, /* pit.1 */
+ &__clk_0_37, /* eport */
+ &__clk_0_48, /* pll */
+
+ &__clk_1_36, /* CCM/reset module/Power management */
+ &__clk_1_37, /* gpio */
+};
+static struct clk * const disable_clks[] __initconst = {
+ &__clk_0_8, /* can.0 */
+ &__clk_0_9, /* can.1 */
+ &__clk_0_14, /* i2c.1 */
+ &__clk_0_15, /* dspi.1 */
+ &__clk_0_17, /* eDMA */
+ &__clk_0_22, /* i2c.0 */
+ &__clk_0_23, /* dspi.0 */
+ &__clk_0_28, /* tmr.1 */
+ &__clk_0_29, /* tmr.2 */
+ &__clk_0_30, /* tmr.2 */
+ &__clk_0_31, /* tmr.3 */
+ &__clk_0_32, /* pit.0 */
+ &__clk_0_34, /* pit.2 */
+ &__clk_0_35, /* pit.3 */
+ &__clk_0_38, /* adc */
+ &__clk_0_39, /* dac */
+ &__clk_0_44, /* usb otg */
+ &__clk_0_45, /* usb host */
+ &__clk_0_47, /* ssi.0 */
+ &__clk_0_49, /* rng */
+ &__clk_0_50, /* ssi.1 */
+ &__clk_0_51, /* eSDHC */
+ &__clk_0_53, /* enet-fec */
+ &__clk_0_54, /* enet-fec */
+ &__clk_0_55, /* switch.0 */
+ &__clk_0_56, /* switch.1 */
+
+ &__clk_1_2, /* 1-wire */
+ &__clk_1_4, /* i2c.2 */
+ &__clk_1_5, /* i2c.3 */
+ &__clk_1_6, /* i2c.4 */
+ &__clk_1_7, /* i2c.5 */
+ &__clk_1_24, /* uart 4 */
+ &__clk_1_25, /* uart 5 */
+ &__clk_1_26, /* uart 6 */
+ &__clk_1_27, /* uart 7 */
+ &__clk_1_28, /* uart 8 */
+ &__clk_1_29, /* uart 9 */
+};
+
+static void __init m5441x_clk_init(void)
+{
+ unsigned i;
+
+ for (i = 0; i < ARRAY_SIZE(enable_clks); ++i)
+ __clk_init_enabled(enable_clks[i]);
+ /* make sure these clocks are disabled */
+ for (i = 0; i < ARRAY_SIZE(disable_clks); ++i)
+ __clk_init_disabled(disable_clks[i]);
+}
+
+static void __init m5441x_uarts_init(void)
+{
+ __raw_writeb(0x0f, MCFGPIO_PAR_UART0);
+ __raw_writeb(0x00, MCFGPIO_PAR_UART1);
+ __raw_writeb(0x00, MCFGPIO_PAR_UART2);
+}
+
+static void __init m5441x_fec_init(void)
+{
+ __raw_writeb(0x03, MCFGPIO_PAR_FEC);
+}
+
+void __init config_BSP(char *commandp, int size)
+{
+ m5441x_clk_init();
+ mach_sched_init = hw_timer_init;
+ m5441x_uarts_init();
+ m5441x_fec_init();
+}
+
+
+#if IS_ENABLED(CONFIG_RTC_DRV_M5441x)
+static struct resource m5441x_rtc_resources[] = {
+ {
+ .start = MCFRTC_BASE,
+ .end = MCFRTC_BASE + MCFRTC_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_RTC,
+ .end = MCF_IRQ_RTC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device m5441x_rtc = {
+ .name = "mcfrtc",
+ .id = 0,
+ .resource = m5441x_rtc_resources,
+ .num_resources = ARRAY_SIZE(m5441x_rtc_resources),
+};
+#endif
+
+static struct platform_device *m5441x_devices[] __initdata = {
+#if IS_ENABLED(CONFIG_RTC_DRV_M5441x)
+ &m5441x_rtc,
+#endif
+};
+
+static int __init init_BSP(void)
+{
+ platform_add_devices(m5441x_devices, ARRAY_SIZE(m5441x_devices));
+ return 0;
+}
+
+arch_initcall(init_BSP);
diff --git a/arch/m68k/platform/coldfire/m54xx.c b/arch/m68k/platform/coldfire/m54xx.c
index 20672dadb252..2081c6cbb3de 100644
--- a/arch/m68k/platform/coldfire/m54xx.c
+++ b/arch/m68k/platform/coldfire/m54xx.c
@@ -21,19 +21,12 @@
#include <asm/m54xxsim.h>
#include <asm/mcfuart.h>
#include <asm/m54xxgpt.h>
-#include <asm/mcfgpio.h>
#ifdef CONFIG_MMU
#include <asm/mmu_context.h>
#endif
/***************************************************************************/
-struct mcf_gpio_chip mcf_gpio_chips[] = { };
-
-unsigned int mcf_gpio_chips_size = ARRAY_SIZE(mcf_gpio_chips);
-
-/***************************************************************************/
-
static void __init m54xx_uarts_init(void)
{
/* enable io pins */
diff --git a/arch/m68k/platform/coldfire/mcf8390.c b/arch/m68k/platform/coldfire/mcf8390.c
new file mode 100644
index 000000000000..23a6874a3248
--- /dev/null
+++ b/arch/m68k/platform/coldfire/mcf8390.c
@@ -0,0 +1,38 @@
+/*
+ * mcf8390.c -- platform support for 8390 ethernet on many boards
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
+#include <asm/mcf8390.h>
+
+static struct resource mcf8390_resources[] = {
+ {
+ .start = NE2000_ADDR,
+ .end = NE2000_ADDR + NE2000_ADDRSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = NE2000_IRQ_VECTOR,
+ .end = NE2000_IRQ_VECTOR,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static int __init mcf8390_platform_init(void)
+{
+ platform_device_register_simple("mcf8390", -1, mcf8390_resources,
+ ARRAY_SIZE(mcf8390_resources));
+ return 0;
+}
+
+arch_initcall(mcf8390_platform_init);
diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c
new file mode 100644
index 000000000000..553210d3d4c1
--- /dev/null
+++ b/arch/m68k/platform/coldfire/pci.c
@@ -0,0 +1,327 @@
+/*
+ * pci.c -- PCI bus support for ColdFire processors
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/m54xxpci.h>
+
+/*
+ * Memory and IO mappings. We use a 1:1 mapping for local host memory to
+ * PCI bus memory (no reason not to really). IO space doesn't matter, we
+ * always use access functions for that. The device configuration space is
+ * mapped over the IO map space when we enable it in the PCICAR register.
+ */
+#define PCI_MEM_PA 0xf0000000 /* Host physical address */
+#define PCI_MEM_BA 0xf0000000 /* Bus physical address */
+#define PCI_MEM_SIZE 0x08000000 /* 128 MB */
+#define PCI_MEM_MASK (PCI_MEM_SIZE - 1)
+
+#define PCI_IO_PA 0xf8000000 /* Host physical address */
+#define PCI_IO_BA 0x00000000 /* Bus physical address */
+#define PCI_IO_SIZE 0x00010000 /* 64k */
+#define PCI_IO_MASK (PCI_IO_SIZE - 1)
+
+static struct pci_bus *rootbus;
+static unsigned long iospace;
+
+/*
+ * We need to be carefull probing on bus 0 (directly connected to host
+ * bridge). We should only acccess the well defined possible devices in
+ * use, ignore aliases and the like.
+ */
+static unsigned char mcf_host_slot2sid[32] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 2, 0, 3, 4, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static unsigned char mcf_host_irq[] = {
+ 0, 69, 69, 71, 71,
+};
+
+
+static inline void syncio(void)
+{
+ /* The ColdFire "nop" instruction waits for all bus IO to complete */
+ __asm__ __volatile__ ("nop");
+}
+
+/*
+ * Configuration space access functions. Configuration space access is
+ * through the IO mapping window, enabling it via the PCICAR register.
+ */
+static unsigned long mcf_mk_pcicar(int bus, unsigned int devfn, int where)
+{
+ return (bus << PCICAR_BUSN) | (devfn << PCICAR_DEVFNN) | (where & 0xfc);
+}
+
+static int mcf_pci_readconfig(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ unsigned long addr;
+
+ *value = 0xffffffff;
+
+ if (bus->number == 0) {
+ if (mcf_host_slot2sid[PCI_SLOT(devfn)] == 0)
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ syncio();
+ addr = mcf_mk_pcicar(bus->number, devfn, where);
+ __raw_writel(PCICAR_E | addr, PCICAR);
+ addr = iospace + (where & 0x3);
+
+ switch (size) {
+ case 1:
+ *value = __raw_readb(addr);
+ break;
+ case 2:
+ *value = le16_to_cpu(__raw_readw(addr));
+ break;
+ default:
+ *value = le32_to_cpu(__raw_readl(addr));
+ break;
+ }
+
+ syncio();
+ __raw_writel(0, PCICAR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int mcf_pci_writeconfig(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ unsigned long addr;
+
+ if (bus->number == 0) {
+ if (mcf_host_slot2sid[PCI_SLOT(devfn)] == 0)
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ syncio();
+ addr = mcf_mk_pcicar(bus->number, devfn, where);
+ __raw_writel(PCICAR_E | addr, PCICAR);
+ addr = iospace + (where & 0x3);
+
+ switch (size) {
+ case 1:
+ __raw_writeb(value, addr);
+ break;
+ case 2:
+ __raw_writew(cpu_to_le16(value), addr);
+ break;
+ default:
+ __raw_writel(cpu_to_le32(value), addr);
+ break;
+ }
+
+ syncio();
+ __raw_writel(0, PCICAR);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops mcf_pci_ops = {
+ .read = mcf_pci_readconfig,
+ .write = mcf_pci_writeconfig,
+};
+
+/*
+ * IO address space access functions. Pretty strait forward, these are
+ * directly mapped in to the IO mapping window. And that is mapped into
+ * virtual address space.
+ */
+u8 mcf_pci_inb(u32 addr)
+{
+ return __raw_readb(iospace + (addr & PCI_IO_MASK));
+}
+EXPORT_SYMBOL(mcf_pci_inb);
+
+u16 mcf_pci_inw(u32 addr)
+{
+ return le16_to_cpu(__raw_readw(iospace + (addr & PCI_IO_MASK)));
+}
+EXPORT_SYMBOL(mcf_pci_inw);
+
+u32 mcf_pci_inl(u32 addr)
+{
+ return le32_to_cpu(__raw_readl(iospace + (addr & PCI_IO_MASK)));
+}
+EXPORT_SYMBOL(mcf_pci_inl);
+
+void mcf_pci_insb(u32 addr, u8 *buf, u32 len)
+{
+ for (; len; len--)
+ *buf++ = mcf_pci_inb(addr);
+}
+EXPORT_SYMBOL(mcf_pci_insb);
+
+void mcf_pci_insw(u32 addr, u16 *buf, u32 len)
+{
+ for (; len; len--)
+ *buf++ = mcf_pci_inw(addr);
+}
+EXPORT_SYMBOL(mcf_pci_insw);
+
+void mcf_pci_insl(u32 addr, u32 *buf, u32 len)
+{
+ for (; len; len--)
+ *buf++ = mcf_pci_inl(addr);
+}
+EXPORT_SYMBOL(mcf_pci_insl);
+
+void mcf_pci_outb(u8 v, u32 addr)
+{
+ __raw_writeb(v, iospace + (addr & PCI_IO_MASK));
+}
+EXPORT_SYMBOL(mcf_pci_outb);
+
+void mcf_pci_outw(u16 v, u32 addr)
+{
+ __raw_writew(cpu_to_le16(v), iospace + (addr & PCI_IO_MASK));
+}
+EXPORT_SYMBOL(mcf_pci_outw);
+
+void mcf_pci_outl(u32 v, u32 addr)
+{
+ __raw_writel(cpu_to_le32(v), iospace + (addr & PCI_IO_MASK));
+}
+EXPORT_SYMBOL(mcf_pci_outl);
+
+void mcf_pci_outsb(u32 addr, const u8 *buf, u32 len)
+{
+ for (; len; len--)
+ mcf_pci_outb(*buf++, addr);
+}
+EXPORT_SYMBOL(mcf_pci_outsb);
+
+void mcf_pci_outsw(u32 addr, const u16 *buf, u32 len)
+{
+ for (; len; len--)
+ mcf_pci_outw(*buf++, addr);
+}
+EXPORT_SYMBOL(mcf_pci_outsw);
+
+void mcf_pci_outsl(u32 addr, const u32 *buf, u32 len)
+{
+ for (; len; len--)
+ mcf_pci_outl(*buf++, addr);
+}
+EXPORT_SYMBOL(mcf_pci_outsl);
+
+/*
+ * Initialize the PCI bus registers, and scan the bus.
+ */
+static struct resource mcf_pci_mem = {
+ .name = "PCI Memory space",
+ .start = PCI_MEM_PA,
+ .end = PCI_MEM_PA + PCI_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct resource mcf_pci_io = {
+ .name = "PCI IO space",
+ .start = 0x400,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_IO,
+};
+
+/*
+ * Interrupt mapping and setting.
+ */
+static int mcf_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ int sid;
+
+ sid = mcf_host_slot2sid[slot];
+ if (sid)
+ return mcf_host_irq[sid];
+ return 0;
+}
+
+static int __init mcf_pci_init(void)
+{
+ pr_info("ColdFire: PCI bus initialization...\n");
+
+ /* Reset the external PCI bus */
+ __raw_writel(PCIGSCR_RESET, PCIGSCR);
+ __raw_writel(0, PCITCR);
+
+ request_resource(&iomem_resource, &mcf_pci_mem);
+ request_resource(&iomem_resource, &mcf_pci_io);
+
+ /* Configure PCI arbiter */
+ __raw_writel(PACR_INTMPRI | PACR_INTMINTE | PACR_EXTMPRI(0x1f) |
+ PACR_EXTMINTE(0x1f), PACR);
+
+ /* Set required multi-function pins for PCI bus use */
+ __raw_writew(0x3ff, MCF_PAR_PCIBG);
+ __raw_writew(0x3ff, MCF_PAR_PCIBR);
+
+ /* Set up config space for local host bus controller */
+ __raw_writel(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
+ PCI_COMMAND_INVALIDATE, PCISCR);
+ __raw_writel(PCICR1_LT(32) | PCICR1_CL(8), PCICR1);
+ __raw_writel(0, PCICR2);
+
+ /*
+ * Set up the initiator windows for memory and IO mapping.
+ * These give the CPU bus access onto the PCI bus. One for each of
+ * PCI memory and IO address spaces.
+ */
+ __raw_writel(WXBTAR(PCI_MEM_PA, PCI_MEM_BA, PCI_MEM_SIZE),
+ PCIIW0BTAR);
+ __raw_writel(WXBTAR(PCI_IO_PA, PCI_IO_BA, PCI_IO_SIZE),
+ PCIIW1BTAR);
+ __raw_writel(PCIIWCR_W0_MEM /*| PCIIWCR_W0_MRDL*/ | PCIIWCR_W0_E |
+ PCIIWCR_W1_IO | PCIIWCR_W1_E, PCIIWCR);
+
+ /*
+ * Set up the target windows for access from the PCI bus back to the
+ * CPU bus. All we need is access to system RAM (for mastering).
+ */
+ __raw_writel(CONFIG_RAMBASE, PCIBAR1);
+ __raw_writel(CONFIG_RAMBASE | PCITBATR1_E, PCITBATR1);
+
+ /* Keep a virtual mapping to IO/config space active */
+ iospace = (unsigned long) ioremap(PCI_IO_PA, PCI_IO_SIZE);
+ if (iospace == 0)
+ return -ENODEV;
+ pr_info("Coldfire: PCI IO/config window mapped to 0x%x\n",
+ (u32) iospace);
+
+ /* Turn of PCI reset, and wait for devices to settle */
+ __raw_writel(0, PCIGSCR);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(200));
+
+ rootbus = pci_scan_bus(0, &mcf_pci_ops, NULL);
+ rootbus->resource[0] = &mcf_pci_io;
+ rootbus->resource[1] = &mcf_pci_mem;
+
+ pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq);
+ pci_bus_size_bridges(rootbus);
+ pci_bus_assign_resources(rootbus);
+ pci_enable_bridges(rootbus);
+ pci_bus_add_devices(rootbus);
+ return 0;
+}
+
+subsys_initcall(mcf_pci_init);
diff --git a/arch/m68k/platform/coldfire/pinmux.c b/arch/m68k/platform/coldfire/pinmux.c
deleted file mode 100644
index 8c62b825939f..000000000000
--- a/arch/m68k/platform/coldfire/pinmux.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Coldfire generic GPIO pinmux support.
- *
- * (C) Copyright 2009, Steven King <sfking@fdwdc.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-
-#include <asm/pinmux.h>
-
-int mcf_pinmux_request(unsigned pinmux, unsigned func)
-{
- return 0;
-}
-
-void mcf_pinmux_release(unsigned pinmux, unsigned func)
-{
-}
diff --git a/arch/m68k/platform/coldfire/pit.c b/arch/m68k/platform/coldfire/pit.c
index e62dbbcb10f6..e8f3b97b0f77 100644
--- a/arch/m68k/platform/coldfire/pit.c
+++ b/arch/m68k/platform/coldfire/pit.c
@@ -93,7 +93,7 @@ struct clock_event_device cf_pit_clockevent = {
.set_mode = init_cf_pit_timer,
.set_next_event = cf_pit_next_event,
.shift = 32,
- .irq = MCFINT_VECBASE + MCFINT_PIT1,
+ .irq = MCF_IRQ_PIT1,
};
@@ -159,7 +159,7 @@ void hw_timer_init(irq_handler_t handler)
clockevent_delta2ns(0x3f, &cf_pit_clockevent);
clockevents_register_device(&cf_pit_clockevent);
- setup_irq(MCFINT_VECBASE + MCFINT_PIT1, &pit_irq);
+ setup_irq(MCF_IRQ_PIT1, &pit_irq);
clocksource_register_hz(&pit_clk, FREQ);
}
diff --git a/arch/m68k/platform/coldfire/timers.c b/arch/m68k/platform/coldfire/timers.c
index ed96ce50d79f..0a273e75408c 100644
--- a/arch/m68k/platform/coldfire/timers.c
+++ b/arch/m68k/platform/coldfire/timers.c
@@ -36,7 +36,7 @@
*/
void coldfire_profile_init(void);
-#if defined(CONFIG_M532x)
+#if defined(CONFIG_M532x) || defined(CONFIG_M5441x)
#define __raw_readtrr __raw_readl
#define __raw_writetrr __raw_writel
#else
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index ed22bfc5db14..4dbb5055d04b 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -192,11 +192,6 @@ void pcibios_set_master(struct pci_dev *dev)
/* No special bus mastering setup handling */
}
-char __devinit *pcibios_setup(char *str)
-{
- return str;
-}
-
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
@@ -249,8 +244,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
- oirq.controller ? oirq.controller->full_name :
- "<default>");
+ of_node_full_name(oirq.controller));
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -1493,8 +1487,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose)
struct pci_bus *bus;
struct device_node *node = hose->dn;
- pr_debug("PCI: Scanning PHB %s\n",
- node ? node->full_name : "<NO NAME>");
+ pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
pcibios_setup_phb_resources(hose, &resources);
@@ -1506,10 +1499,10 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose)
pci_free_resource_list(&resources);
return;
}
- bus->secondary = hose->first_busno;
+ bus->busn_res.start = hose->first_busno;
hose->bus = bus;
- hose->last_busno = bus->subordinate;
+ hose->last_busno = bus->busn_res.end;
}
static int __init pcibios_init(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 09ab87ee6fef..b3e10fdd3898 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -288,6 +288,7 @@ config MIPS_MALTA
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
select SYS_HAS_CPU_NEVADA
select SYS_HAS_CPU_RM7000
select SYS_HAS_EARLY_PRINTK
@@ -1423,6 +1424,7 @@ config CPU_SB1
config CPU_CAVIUM_OCTEON
bool "Cavium Octeon processor"
depends on SYS_HAS_CPU_CAVIUM_OCTEON
+ select ARCH_SPARSEMEM_ENABLE
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_SMP
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 1a24d317e7a3..1bbc24b08685 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -310,10 +310,10 @@ static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
&dev_addr[4], &dev_addr[5]) != 6) {
pr_warning("cannot parse mac address, "
"using random address\n");
- random_ether_addr(dev_addr);
+ eth_random_addr(dev_addr);
}
} else
- random_ether_addr(dev_addr);
+ eth_random_addr(dev_addr);
}
/*****************************************************************************
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index 6210b8d84109..b311be45a720 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -21,6 +21,7 @@ config BCM47XX_BCMA
select BCMA
select BCMA_HOST_SOC
select BCMA_DRIVER_MIPS
+ select BCMA_HOST_PCI if PCI
select BCMA_DRIVER_PCI_HOSTMODE if PCI
default y
help
diff --git a/arch/mips/bcm63xx/dev-pcmcia.c b/arch/mips/bcm63xx/dev-pcmcia.c
index de4d917fd54d..a551bab5ecb9 100644
--- a/arch/mips/bcm63xx/dev-pcmcia.c
+++ b/arch/mips/bcm63xx/dev-pcmcia.c
@@ -79,11 +79,11 @@ static int __init config_pcmcia_cs(unsigned int cs,
return ret;
}
-static const __initdata struct {
+static const struct {
unsigned int cs;
unsigned int base;
unsigned int size;
-} pcmcia_cs[3] = {
+} pcmcia_cs[3] __initconst = {
{
.cs = MPI_CS_PCMCIA_COMMON,
.base = BCM_PCMCIA_COMMON_BASE_PA,
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index f9e275a50d98..2f4f6d5e05b6 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -82,10 +82,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
help
Lock the kernel's implementation of memcpy() into L2.
-config ARCH_SPARSEMEM_ENABLE
- def_bool y
- select SPARSEMEM_STATIC
-
config IOMMU_HELPER
bool
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 4b93048044eb..ee1fb9f7f517 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -185,7 +185,6 @@ static void __cpuinit octeon_init_secondary(void)
octeon_init_cvmcount();
octeon_irq_setup_secondary();
- raw_local_irq_enable();
}
/**
@@ -233,6 +232,7 @@ static void octeon_smp_finish(void)
/* to generate the first CPU timer interrupt */
write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+ local_irq_enable();
}
/**
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 2e1ad4c652b7..82ad35ce2b45 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -17,7 +17,6 @@
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
-#include <asm/bug.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/cpu-features.h>
#include <asm/sgidefs.h>
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 285a41fa0b18..eee10dc07ac1 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -8,6 +8,7 @@
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
+#include <linux/bug.h>
#include <linux/irqflags.h>
#include <asm/war.h>
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index f9fa2a479dd0..95e40c1e8ed1 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -94,6 +94,7 @@
#define PRID_IMP_24KE 0x9600
#define PRID_IMP_74K 0x9700
#define PRID_IMP_1004K 0x9900
+#define PRID_IMP_M14KC 0x9c00
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -260,12 +261,12 @@ enum cpu_type_enum {
*/
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
- CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC,
+ CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_M14KC,
/*
* MIPS64 class processors
*/
- CPU_5KC, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
+ CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
CPU_XLR, CPU_XLP,
@@ -288,7 +289,7 @@ enum cpu_type_enum {
#define MIPS_CPU_ISA_M64R2 0x00000100
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
- MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 )
+ MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 86548da650e7..991b659e2548 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -206,7 +206,7 @@
#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100
#define GIC_VPE_EIC_SS(intr) \
- (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+ (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
#define GIC_VPE_EIC_VEC_BASE 0x0800
#define GIC_VPE_EIC_VEC(intr) \
@@ -330,6 +330,17 @@ struct gic_intr_map {
#define GIC_FLAG_TRANSPARENT 0x02
};
+/*
+ * This is only used in EIC mode. This helps to figure out which
+ * shared interrupts we need to process when we get a vector interrupt.
+ */
+#define GIC_MAX_SHARED_INTR 0x5
+struct gic_shared_intr_map {
+ unsigned int num_shared_intr;
+ unsigned int intr_list[GIC_MAX_SHARED_INTR];
+ unsigned int local_intr_mask;
+};
+
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
unsigned int intrmap_size, unsigned int irqbase);
@@ -338,5 +349,7 @@ extern unsigned int gic_get_int(void);
extern void gic_send_ipi(unsigned int intr);
extern unsigned int plat_ipi_call_int_xlate(unsigned int);
extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
+extern void gic_bind_eic_interrupt(int irq, int set);
+extern unsigned int gic_get_timer_pending(void);
#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 7ebfc392e58d..ab84064283db 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -251,7 +251,7 @@ struct f_format { /* FPU register format */
unsigned int func : 6;
};
-struct ma_format { /* FPU multipy and add format (MIPS IV) */
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
unsigned int opcode : 6;
unsigned int fr : 5;
unsigned int ft : 5;
@@ -324,7 +324,7 @@ struct f_format { /* FPU register format */
unsigned int opcode : 6;
};
-struct ma_format { /* FPU multipy and add format (MIPS IV) */
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
unsigned int fmt : 2;
unsigned int func : 4;
unsigned int fd : 5;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index a58f22998a86..29d9c23c20c7 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <asm/addrspace.h>
+#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index fb698dc09bc9..78dbb8a86da2 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -136,6 +136,7 @@ extern void free_irqno(unsigned int irq);
* IE7. Since R2 their number has to be read from the c0_intctl register.
*/
#define CP0_LEGACY_COMPARE_IRQ 7
+#define CP0_LEGACY_PERFCNT_IRQ 7
extern int cp0_compare_irq;
extern int cp0_compare_irq_shift;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 94d4faad29a1..fdcd78ca1b03 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -99,7 +99,7 @@
#define CKCTL_6368_USBH_CLK_EN (1 << 15)
#define CKCTL_6368_DISABLE_GLESS_EN (1 << 16)
#define CKCTL_6368_NAND_CLK_EN (1 << 17)
-#define CKCTL_6368_IPSEC_CLK_EN (1 << 17)
+#define CKCTL_6368_IPSEC_CLK_EN (1 << 18)
#define CKCTL_6368_ALL_SAFE_EN (CKCTL_6368_SWPKT_USB_EN | \
CKCTL_6368_SWPKT_SAR_EN | \
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index d11aa02a956a..5447d9fc4219 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -86,6 +86,16 @@
#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
+/* MALTA GIC local interrupts */
+#define GIC_INT_TMR (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR (GIC_CPU_INT5)
+
+/* GIC constants */
+/* Add 2 to convert non-eic hw int # to eic vector # */
+#define GIC_CPU_TO_VEC_OFFSET (2)
+/* If we map an intr to pin X, GIC will actually generate vector X+1 */
+#define GIC_PIN_TO_VEC_OFFSET (1)
+
#define GIC_EXT_INTR(x) x
/* External Interrupts used for IPI */
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index c9420aa97e32..e71ff4c317f2 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -48,7 +48,7 @@
#define CP0_VPECONF0 $1, 2
#define CP0_VPECONF1 $1, 3
#define CP0_YQMASK $1, 4
-#define CP0_VPESCHEDULE $1, 5
+#define CP0_VPESCHEDULE $1, 5
#define CP0_VPESCHEFBK $1, 6
#define CP0_TCSTATUS $2, 1
#define CP0_TCBIND $2, 2
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 5d33621b5658..4f8ddba8c360 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -22,7 +22,7 @@ struct task_struct;
* switch_to(n) should switch tasks to task nr n, first
* checking that n isn't the current task, in which case it does nothing.
*/
-extern asmlinkage void *resume(void *last, void *next, void *next_ti);
+extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
@@ -66,11 +66,13 @@ do { \
#define switch_to(prev, next, last) \
do { \
+ u32 __usedfpu; \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
__clear_software_ll_bit(); \
- (last) = resume(prev, next, task_thread_info(next)); \
+ __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
+ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
} while (0)
#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index e2eca7d10598..ca97e0ecb64b 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -60,6 +60,8 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("$28");
#define current_thread_info() __current_thread_info
+#endif /* !__ASSEMBLY__ */
+
/* thread information allocation */
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
#define THREAD_SIZE_ORDER (1)
@@ -85,8 +87,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define STACK_WARN (THREAD_SIZE / 8)
-#endif /* !__ASSEMBLY__ */
-
#define PREEMPT_ACTIVE 0x10000000
/*
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 6ae7ce4ac63e..f4630e1082ab 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004 MIPS Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -199,6 +199,7 @@ void __init check_wait(void)
cpu_wait = rm7k_wait_irqoff;
break;
+ case CPU_M14KC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
@@ -810,6 +811,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_5KC;
__cpu_name[cpu] = "MIPS 5Kc";
break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ __cpu_name[cpu] = "MIPS 5KE";
+ break;
case PRID_IMP_20KC:
c->cputype = CPU_20KC;
__cpu_name[cpu] = "MIPS 20Kc";
@@ -831,6 +836,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_74K;
__cpu_name[cpu] = "MIPS 74Kc";
break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
__cpu_name[cpu] = "MIPS 1004Kc";
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 57ba13edb03a..3fc1691110dc 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -5,7 +5,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05 by Ralf Baechle
+ * Copyright (C) 1996, 97, 98, 99, 2000, 01, 03, 04, 05, 12 by Ralf Baechle
* Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
*/
#include <linux/interrupt.h>
@@ -35,6 +35,12 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(kernel_thread);
/*
+ * Functions that operate on entire pages. Mostly used by memory management.
+ */
+EXPORT_SYMBOL(clear_page);
+EXPORT_SYMBOL(copy_page);
+
+/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(__copy_user);
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index ce89c8061708..0441f54b2a6a 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -31,7 +31,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 7
LEAF(resume)
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index f29099b104c4..eb5e394a4650 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -162,11 +162,6 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
return counters >> vpe_shift();
}
-static unsigned int counters_per_cpu_to_total(unsigned int counters)
-{
- return counters << vpe_shift();
-}
-
#else /* !CONFIG_MIPS_MT_SMP */
#define vpe_id() 0
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 293898391e67..9c51be5a163a 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -43,7 +43,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti) )
+ * struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
@@ -51,18 +51,9 @@ LEAF(resume)
cpu_save_nonscratch a0
sw ra, THREAD_REG31(a0)
- /*
- * check if we need to save FPU registers
- */
- lw t3, TASK_THREAD_INFO(a0)
- lw t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
+ beqz a3, 1f
- and t0, t0, t1
- sw t0, TI_FLAGS(t3)
+ PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 9414f9354469..42d2a3938420 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -41,7 +41,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 5
LEAF(resume)
@@ -53,16 +53,10 @@
/*
* check if we need to save FPU registers
*/
- PTR_L t3, TASK_THREAD_INFO(a0)
- LONG_L t0, TI_FLAGS(t3)
- li t1, _TIF_USEDFPU
- and t2, t0, t1
- beqz t2, 1f
- nor t1, zero, t1
- and t0, t0, t1
- LONG_S t0, TI_FLAGS(t3)
+ beqz a3, 1f
+ PTR_L t3, TASK_THREAD_INFO(a0)
/*
* clear saved user stack CU1 bit
*/
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 3046e2986006..8e393b8443f7 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -15,7 +15,6 @@
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/reboot.h>
@@ -197,13 +196,6 @@ static void bmips_init_secondary(void)
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
#endif
-
- /* make sure there won't be a timer interrupt for a little while */
- write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
-
- irq_enable_hazard();
- set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
- irq_enable_hazard();
}
/*
@@ -212,6 +204,13 @@ static void bmips_init_secondary(void)
static void bmips_smp_finish(void)
{
pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
}
/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 48650c818040..1268392f1d27 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -122,13 +122,21 @@ asmlinkage __cpuinit void start_secondary(void)
notify_cpu_starting(cpu);
- mp_ops->smp_finish();
+ set_cpu_online(cpu, true);
+
set_cpu_sibling_map(cpu);
cpu_set(cpu, cpu_callin_map);
synchronise_count_slave();
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
+
cpu_idle();
}
@@ -196,8 +204,6 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
while (!cpu_isset(cpu, cpu_callin_map))
udelay(100);
- set_cpu_online(cpu, true);
-
return 0;
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index f5dd38f1d015..15b5f3cfd20c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -322,7 +322,7 @@ int __init smtc_build_cpu_map(int start_cpu_slot)
/*
* Common setup before any secondaries are started
- * Make sure all CPU's are in a sensible state before we boot any of the
+ * Make sure all CPUs are in a sensible state before we boot any of the
* secondaries.
*
* For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
@@ -340,12 +340,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
/*
* TCContext gets an offset from the base of the IPIQ array
* to be used in low-level code to detect the presence of
- * an active IPI queue
+ * an active IPI queue.
*/
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
/* Bind tc to vpe */
write_tc_c0_tcbind(vpe);
- /* In general, all TCs should have the same cpu_data indications */
+ /* In general, all TCs should have the same cpu_data indications. */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
if (cpu_data[0].cputype == CPU_34K ||
@@ -358,8 +358,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
}
/*
- * Tweak to get Count registes in as close a sync as possible.
- * Value seems good for 34K-class cores.
+ * Tweak to get Count registes in as close a sync as possible. The
+ * value seems good for 34K-class cores.
*/
#define CP0_SKEW 8
@@ -615,7 +615,6 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
void smtc_init_secondary(void)
{
- local_irq_enable();
}
void smtc_smp_finish(void)
@@ -631,6 +630,8 @@ void smtc_smp_finish(void)
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
+ local_irq_enable();
+
printk("TC %d going on-line as CPU %d\n",
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 99f913c8d7a6..842d55e411fd 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -111,7 +111,6 @@ void __cpuinit synchronise_count_master(void)
void __cpuinit synchronise_count_slave(void)
{
int i;
- unsigned long flags;
unsigned int initcount;
int ncpus;
@@ -123,8 +122,6 @@ void __cpuinit synchronise_count_slave(void)
return;
#endif
- local_irq_save(flags);
-
/*
* Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready
@@ -154,7 +151,5 @@ void __cpuinit synchronise_count_slave(void)
}
/* Arrange for an interrupt in a short while */
write_c0_compare(read_c0_count() + COUNTON);
-
- local_irq_restore(flags);
}
#undef NR_LOOPS
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2d0c2a277f52..c3c293543703 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -132,6 +132,9 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
+ if (!task)
+ task = current;
+
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
@@ -1249,6 +1252,7 @@ static inline void parity_protection_init(void)
break;
case CPU_5KC:
+ case CPU_5KE:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
@@ -1498,6 +1502,7 @@ extern void flush_tlb_handlers(void);
* Timer interrupt
*/
int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
int cp0_compare_irq_shift;
/*
@@ -1597,7 +1602,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
- cp0_compare_irq_shift = cp0_compare_irq;
+ cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
cp0_perfcount_irq = -1;
}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 924da5eb7031..df243a64f430 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -72,7 +73,7 @@ SECTIONS
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- INIT_TASK_DATA(PAGE_SIZE)
+ INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 4aa20280613e..fd6203f14f1f 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -3,8 +3,8 @@
#
obj-y += cache.o dma-default.o extable.o fault.o \
- gup.o init.o mmap.o page.o tlbex.o \
- tlbex-fault.o uasm.o
+ gup.o init.o mmap.o page.o page-funcs.o \
+ tlbex.o tlbex-fault.o uasm.o
obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
obj-$(CONFIG_64BIT) += pgtable-64.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5109be96d98d..f092c265dc63 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -977,7 +977,7 @@ static void __cpuinit probe_pcache(void)
c->icache.linesz = 2 << lsize;
else
c->icache.linesz = lsize;
- c->icache.sets = 64 << ((config1 >> 22) & 7);
+ c->icache.sets = 32 << (((config1 >> 22) + 1) & 7);
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
@@ -997,7 +997,7 @@ static void __cpuinit probe_pcache(void)
c->dcache.linesz = 2 << lsize;
else
c->dcache.linesz= lsize;
- c->dcache.sets = 64 << ((config1 >> 13) & 7);
+ c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7);
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
@@ -1051,6 +1051,7 @@ static void __cpuinit probe_pcache(void)
case CPU_R14000:
break;
+ case CPU_M14KC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
diff --git a/arch/mips/mm/page-funcs.S b/arch/mips/mm/page-funcs.S
new file mode 100644
index 000000000000..48a6b38ff13e
--- /dev/null
+++ b/arch/mips/mm/page-funcs.S
@@ -0,0 +1,50 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Micro-assembler generated clear_page/copy_page functions.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2012 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#define cpu_clear_page_function_name clear_page_cpu
+#define cpu_copy_page_function_name copy_page_cpu
+#else
+#define cpu_clear_page_function_name clear_page
+#define cpu_copy_page_function_name copy_page
+#endif
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x058 bytes
+ * R4600 v1.7: 0x05c bytes
+ * R4600 v2.0: 0x060 bytes
+ * With prefetching, 16 word strides 0x120 bytes
+ */
+EXPORT(__clear_page_start)
+LEAF(cpu_clear_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 288
+END(cpu_clear_page_function_name)
+EXPORT(__clear_page_end)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache: 0x11c bytes
+ * R4600 v1.7: 0x080 bytes
+ * R4600 v2.0: 0x07c bytes
+ * With prefetching, 16 word strides 0x540 bytes
+ */
+EXPORT(__copy_page_start)
+LEAF(cpu_copy_page_function_name)
+1: j 1b /* Dummy, will be replaced. */
+ .space 1344
+END(cpu_copy_page_function_name)
+EXPORT(__copy_page_end)
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index cc0b626858b3..98f530e18216 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -6,6 +6,7 @@
* Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2007 Maciej W. Rozycki
* Copyright (C) 2008 Thiemo Seufer
+ * Copyright (C) 2012 MIPS Technologies, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
@@ -71,45 +72,6 @@ static struct uasm_reloc __cpuinitdata relocs[5];
#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x058 bytes
- * R4600 v1.7: 0x05c bytes
- * R4600 v2.0: 0x060 bytes
- * With prefetching, 16 word strides 0x120 bytes
- */
-
-static u32 clear_page_array[0x120 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
-#else
-void clear_page(void *page) __attribute__((alias("clear_page_array")));
-#endif
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache: 0x11c bytes
- * R4600 v1.7: 0x080 bytes
- * R4600 v2.0: 0x07c bytes
- * With prefetching, 16 word strides 0x540 bytes
- */
-static u32 copy_page_array[0x540 / 4];
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-void
-copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
-#else
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-#endif
-
-EXPORT_SYMBOL(copy_page);
-
-
static int pref_bias_clear_store __cpuinitdata;
static int pref_bias_copy_load __cpuinitdata;
static int pref_bias_copy_store __cpuinitdata;
@@ -282,10 +244,15 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off)
}
}
+extern u32 __clear_page_start;
+extern u32 __clear_page_end;
+extern u32 __copy_page_start;
+extern u32 __copy_page_end;
+
void __cpuinit build_clear_page(void)
{
int off;
- u32 *buf = (u32 *)&clear_page_array;
+ u32 *buf = &__clear_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -356,17 +323,17 @@ void __cpuinit build_clear_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+ BUG_ON(buf > &__clear_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized clear page handler (%u instructions).\n",
- (u32)(buf - clear_page_array));
+ (u32)(buf - &__clear_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - clear_page_array); i++)
- pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+ for (i = 0; i < (buf - &__clear_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
pr_debug("\t.set pop\n");
}
@@ -427,7 +394,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
void __cpuinit build_copy_page(void)
{
int off;
- u32 *buf = (u32 *)&copy_page_array;
+ u32 *buf = &__copy_page_start;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
int i;
@@ -595,21 +562,23 @@ void __cpuinit build_copy_page(void)
uasm_i_jr(&buf, RA);
uasm_i_nop(&buf);
- BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+ BUG_ON(buf > &__copy_page_end);
uasm_resolve_relocs(relocs, labels);
pr_debug("Synthesized copy page handler (%u instructions).\n",
- (u32)(buf - copy_page_array));
+ (u32)(buf - &__copy_page_start));
pr_debug("\t.set push\n");
pr_debug("\t.set noreorder\n");
- for (i = 0; i < (buf - copy_page_array); i++)
- pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+ for (i = 0; i < (buf - &__copy_page_start); i++)
+ pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
pr_debug("\t.set pop\n");
}
#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+extern void clear_page_cpu(void *page);
+extern void copy_page_cpu(void *to, void *from);
/*
* Pad descriptors to cacheline, since each is exclusively owned by a
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0bc485b3cd60..03eb0ef91580 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -9,6 +9,7 @@
* Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
*
* ... and the days got worse and worse and now you see
* I've gone completly out of my mind.
@@ -494,6 +495,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_R14000:
case CPU_4KC:
case CPU_4KEC:
+ case CPU_M14KC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index bf80921f2f56..284dea54faf5 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -241,8 +241,9 @@ void __init mips_pcibios_init(void)
return;
}
- if (controller->io_resource->start < 0x00001000UL) /* FIXME */
- controller->io_resource->start = 0x00001000UL;
+ /* Change start address to avoid conflicts with ACPI and SMB devices */
+ if (controller->io_resource->start < 0x00002000UL)
+ controller->io_resource->start = 0x00002000UL;
iomem_resource.end &= 0xfffffffffULL; /* 64 GB */
ioport_resource.end = controller->io_resource->end;
@@ -253,7 +254,7 @@ void __init mips_pcibios_init(void)
}
/* Enable PCI 2.1 compatibility in PIIX4 */
-static void __init quirk_dlcsetup(struct pci_dev *dev)
+static void __devinit quirk_dlcsetup(struct pci_dev *dev)
{
u8 odlc, ndlc;
(void) pci_read_config_byte(dev, 0x82, &odlc);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index b7f37d4982fa..2e28f653f66d 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -111,7 +111,7 @@ static void __init pci_clock_check(void)
unsigned int __iomem *jmpr_p =
(unsigned int *) ioremap(MALTA_JMPRS_REG, sizeof(unsigned int));
int jmpr = (__raw_readl(jmpr_p) >> 2) & 0x07;
- static const int pciclocks[] __initdata = {
+ static const int pciclocks[] __initconst = {
33, 20, 25, 30, 12, 16, 37, 10
};
int pciclock = pciclocks[jmpr];
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index acb677a1227c..b3df7c2aad1e 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -82,8 +82,10 @@ void __init prom_free_prom_memory(void)
void xlp_mmu_init(void)
{
+ /* enable extended TLB and Large Fixed TLB */
write_c0_config6(read_c0_config6() | 0x24);
- current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+ /* set page mask of Fixed TLB in config7 */
write_c0_config7(PM_DEFAULT_MASK >>
(13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
}
@@ -100,6 +102,10 @@ void __init prom_init(void)
nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
#ifdef CONFIG_SMP
nlm_wakeup_secondary_cpus(0xffffffff);
+
+ /* update TLB size after waking up threads */
+ current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
register_smp_ops(&nlm_smp_ops);
#endif
}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index d1f2d4c52d42..b6e378211a2c 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
switch (current_cpu_type()) {
case CPU_5KC:
+ case CPU_M14KC:
case CPU_20KC:
case CPU_24K:
case CPU_25KF:
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index baba3bcaa3c2..4d80a856048d 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -322,6 +322,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.num_counters = counters;
switch (current_cpu_type()) {
+ case CPU_M14KC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
+ break;
+
case CPU_20KC:
op_model_mipsxx_ops.cpu_type = "mips/20K";
break;
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index d5d4c018fb04..0857ab8c3919 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -48,7 +48,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
@@ -60,7 +60,7 @@ static void __init loongson2e_nec_fixup(struct pci_dev *pdev)
pci_write_config_dword(pdev, 0xe4, 1 << 5);
}
-static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func0_fixup(struct pci_dev *pdev)
{
unsigned char c;
@@ -135,7 +135,7 @@ static void __init loongson2e_686b_func0_fixup(struct pci_dev *pdev)
printk(KERN_INFO"via686b fix: ISA bridge done\n");
}
-static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func1_fixup(struct pci_dev *pdev)
{
printk(KERN_INFO"via686b fix: IDE\n");
@@ -168,19 +168,19 @@ static void __init loongson2e_686b_func1_fixup(struct pci_dev *pdev)
printk(KERN_INFO"via686b fix: IDE done\n");
}
-static void __init loongson2e_686b_func2_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func2_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 10);
}
-static void __init loongson2e_686b_func3_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func3_fixup(struct pci_dev *pdev)
{
/* irq routing */
pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, 11);
}
-static void __init loongson2e_686b_func5_fixup(struct pci_dev *pdev)
+static void __devinit loongson2e_686b_func5_fixup(struct pci_dev *pdev)
{
unsigned int val;
unsigned char c;
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
index 4b9768d5d729..a7b917dcf604 100644
--- a/arch/mips/pci/fixup-lemote2f.c
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -96,21 +96,21 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
}
/* CS5536 SPEC. fixup */
-static void __init loongson_cs5536_isa_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_isa_fixup(struct pci_dev *pdev)
{
/* the uart1 and uart2 interrupt in PIC is enabled as default */
pci_write_config_dword(pdev, PCI_UART1_INT_REG, 1);
pci_write_config_dword(pdev, PCI_UART2_INT_REG, 1);
}
-static void __init loongson_cs5536_ide_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ide_fixup(struct pci_dev *pdev)
{
/* setting the mutex pin as IDE function */
pci_write_config_dword(pdev, PCI_IDE_CFG_REG,
CS5536_IDE_FLASH_SIGNATURE);
}
-static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_acc_fixup(struct pci_dev *pdev)
{
/* enable the AUDIO interrupt in PIC */
pci_write_config_dword(pdev, PCI_ACC_INT_REG, 1);
@@ -118,14 +118,14 @@ static void __init loongson_cs5536_acc_fixup(struct pci_dev *pdev)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xc0);
}
-static void __init loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ohci_fixup(struct pci_dev *pdev)
{
/* enable the OHCI interrupt in PIC */
/* THE OHCI, EHCI, UDC, OTG are shared with interrupt in PIC */
pci_write_config_dword(pdev, PCI_OHCI_INT_REG, 1);
}
-static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
+static void __devinit loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
{
u32 hi, lo;
@@ -137,7 +137,7 @@ static void __init loongson_cs5536_ehci_fixup(struct pci_dev *pdev)
pci_write_config_dword(pdev, PCI_EHCI_FLADJ_REG, 0x2000);
}
-static void __init loongson_nec_fixup(struct pci_dev *pdev)
+static void __devinit loongson_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 0f48498bc231..70073c98ed32 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -49,10 +49,10 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
- static int piixirqmap[16] __initdata = { /* PIIX PIRQC[A:D] irq mappings */
+ static int piixirqmap[16] __devinitdata = { /* PIIX PIRQC[A:D] irq mappings */
0, 0, 0, 3,
4, 5, 6, 7,
0, 9, 10, 11,
@@ -83,7 +83,7 @@ static void __init malta_piix_func0_fixup(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0,
malta_piix_func0_fixup);
-static void __init malta_piix_func1_fixup(struct pci_dev *pdev)
+static void __devinit malta_piix_func1_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
diff --git a/arch/mips/pci/fixup-mpc30x.c b/arch/mips/pci/fixup-mpc30x.c
index e08f49cb6875..8e4f8288eca2 100644
--- a/arch/mips/pci/fixup-mpc30x.c
+++ b/arch/mips/pci/fixup-mpc30x.c
@@ -22,13 +22,13 @@
#include <asm/vr41xx/mpc30x.h>
-static const int internal_func_irqs[] __initdata = {
+static const int internal_func_irqs[] __initconst = {
VRC4173_CASCADE_IRQ,
VRC4173_AC97_IRQ,
VRC4173_USB_IRQ,
};
-static const int irq_tab_mpc30x[] __initdata = {
+static const int irq_tab_mpc30x[] __initconst = {
[12] = VRC4173_PCMCIA1_IRQ,
[13] = VRC4173_PCMCIA2_IRQ,
[29] = MQ200_IRQ,
diff --git a/arch/mips/pci/fixup-sb1250.c b/arch/mips/pci/fixup-sb1250.c
index f0bb9146e6c0..d02900a72916 100644
--- a/arch/mips/pci/fixup-sb1250.c
+++ b/arch/mips/pci/fixup-sb1250.c
@@ -15,7 +15,7 @@
* Set the BCM1250, etc. PCI host bridge's TRDY timeout
* to the finite max.
*/
-static void __init quirk_sb1250_pci(struct pci_dev *dev)
+static void __devinit quirk_sb1250_pci(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x40, 0xff);
}
@@ -25,7 +25,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_PCI,
/*
* The BCM1250, etc. PCI/HT bridge reports as a host bridge.
*/
-static void __init quirk_sb1250_ht(struct pci_dev *dev)
+static void __devinit quirk_sb1250_ht(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
}
@@ -35,7 +35,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIBYTE, PCI_DEVICE_ID_BCM1250_HT,
/*
* Set the SP1011 HT/PCI bridge's TRDY timeout to the finite max.
*/
-static void __init quirk_sp1011(struct pci_dev *dev)
+static void __devinit quirk_sp1011(struct pci_dev *dev)
{
pci_write_config_byte(dev, 0x64, 0xff);
}
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
index a1e7e6d80c8c..bc13e29d2bb3 100644
--- a/arch/mips/pci/ops-tx4927.c
+++ b/arch/mips/pci/ops-tx4927.c
@@ -495,7 +495,7 @@ irqreturn_t tx4927_pcierr_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_TOSHIBA_FPCIB0
-static void __init tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit tx4927_quirk_slc90e66_bridge(struct pci_dev *dev)
{
struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(dev->bus);
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 0fbe4c0c170a..fdc24440294c 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -212,7 +212,7 @@ static inline void pci_enable_swapping(struct pci_dev *dev)
bridge->b_widget.w_tflush; /* Flush */
}
-static void __init pci_fixup_ioc3(struct pci_dev *d)
+static void __devinit pci_fixup_ioc3(struct pci_dev *d)
{
pci_disable_swapping(d);
}
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index ea453532a33c..075d87acd12a 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -129,7 +129,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
/* setup reset gpio used by pci */
reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
- if (reset_gpio > 0)
+ if (gpio_is_valid(reset_gpio))
devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
/* enable auto-switching between PCI and EBU */
@@ -192,7 +192,7 @@ static int __devinit ltq_pci_startup(struct platform_device *pdev)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
/* toggle reset pin */
- if (reset_gpio > 0) {
+ if (gpio_is_valid(reset_gpio)) {
__gpio_set_value(reset_gpio, 0);
wmb();
mdelay(1);
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 1644805a6730..172af1cd5867 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -41,6 +41,7 @@
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/console.h>
+#include <linux/pci_regs.h>
#include <asm/io.h>
@@ -156,35 +157,55 @@ struct pci_controller nlm_pci_controller = {
.io_offset = 0x00000000UL,
};
+/*
+ * The top level PCIe links on the XLS PCIe controller appear as
+ * bridges. Given a device, this function finds which link it is
+ * on.
+ */
+static struct pci_dev *xls_get_pcie_link(const struct pci_dev *dev)
+{
+ struct pci_bus *bus, *p;
+
+ /* Find the bridge on bus 0 */
+ bus = dev->bus;
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+}
+
static int get_irq_vector(const struct pci_dev *dev)
{
+ struct pci_dev *lnk;
+
if (!nlm_chip_is_xls())
- return PIC_PCIX_IRQ; /* for XLR just one IRQ*/
+ return PIC_PCIX_IRQ; /* for XLR just one IRQ */
/*
* For XLS PCIe, there is an IRQ per Link, find out which
* link the device is on to assign interrupts
- */
- if (dev->bus->self == NULL)
+ */
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
return 0;
- switch (dev->bus->self->devfn) {
- case 0x0:
+ switch (PCI_SLOT(lnk->devfn)) {
+ case 0:
return PIC_PCIE_LINK0_IRQ;
- case 0x8:
+ case 1:
return PIC_PCIE_LINK1_IRQ;
- case 0x10:
+ case 2:
if (nlm_chip_is_xls_b())
return PIC_PCIE_XLSB0_LINK2_IRQ;
else
return PIC_PCIE_LINK2_IRQ;
- case 0x18:
+ case 3:
if (nlm_chip_is_xls_b())
return PIC_PCIE_XLSB0_LINK3_IRQ;
else
return PIC_PCIE_LINK3_IRQ;
}
- WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
+ WARN(1, "Unexpected devfn %d\n", lnk->devfn);
return 0;
}
@@ -202,7 +223,27 @@ void arch_teardown_msi_irq(unsigned int irq)
int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
{
struct msi_msg msg;
+ struct pci_dev *lnk;
int irq, ret;
+ u16 val;
+
+ /* MSI not supported on XLR */
+ if (!nlm_chip_is_xls())
+ return 1;
+
+ /*
+ * Enable MSI on the XLS PCIe controller bridge which was disabled
+ * at enumeration, the bridge MSI capability is at 0x50
+ */
+ lnk = xls_get_pcie_link(dev);
+ if (lnk == NULL)
+ return 1;
+
+ pci_read_config_word(lnk, 0x50 + PCI_MSI_FLAGS, &val);
+ if ((val & PCI_MSI_FLAGS_ENABLE) == 0) {
+ val |= PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(lnk, 0x50 + PCI_MSI_FLAGS, val);
+ }
irq = get_irq_vector(dev);
if (irq <= 0)
@@ -327,7 +368,7 @@ static int __init pcibios_init(void)
}
} else {
/* XLR PCI controller ACK */
- irq_set_handler_data(PIC_PCIE_XLSB0_LINK3_IRQ, xlr_pci_ack);
+ irq_set_handler_data(PIC_PCIX_IRQ, xlr_pci_ack);
}
return 0;
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 271e8c4a54c7..690356808f8a 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -102,7 +102,7 @@ static void __devinit pcibios_scanbus(struct pci_controller *hose)
need_domain_info = need_domain_info || hose->index;
hose->need_domain_info = need_domain_info;
if (bus) {
- next_busno = bus->subordinate + 1;
+ next_busno = bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
@@ -348,9 +348,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
-char * (*pcibios_plat_setup)(char *str) __devinitdata;
+char * (*pcibios_plat_setup)(char *str) __initdata;
-char *__devinit pcibios_setup(char *str)
+char *__init pcibios_setup(char *str)
{
if (pcibios_plat_setup)
return pcibios_plat_setup(str);
diff --git a/arch/mips/pmc-sierra/yosemite/ht.c b/arch/mips/pmc-sierra/yosemite/ht.c
index 63be40e470db..14dc9c8fff0e 100644
--- a/arch/mips/pmc-sierra/yosemite/ht.c
+++ b/arch/mips/pmc-sierra/yosemite/ht.c
@@ -395,17 +395,6 @@ void __init pcibios_init(void)
pci_scan_bus(3, &titan_pci_ops, NULL);
}
-/*
- * for parsing "pci=" kernel boot arguments.
- */
-char *pcibios_setup(char *str)
-{
- printk(KERN_INFO "rr: pcibios_setup\n");
- /* Nothing to do for now. */
-
- return str;
-}
-
unsigned __init int pcibios_assign_all_busses(void)
{
/* We want to use the PCI bus detection done by PMON */
diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c
index b71fae231049..5edab2bc6fc0 100644
--- a/arch/mips/pmc-sierra/yosemite/smp.c
+++ b/arch/mips/pmc-sierra/yosemite/smp.c
@@ -115,11 +115,11 @@ static void yos_send_ipi_mask(const struct cpumask *mask, unsigned int action)
*/
static void __cpuinit yos_init_secondary(void)
{
- set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
static void __cpuinit yos_smp_finish(void)
{
+ set_c0_status(ST0_CO | ST0_IM | ST0_IE);
}
/* Hook for after all CPUs are online */
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
index 0a170e0ffeaa..7773f3d956b0 100644
--- a/arch/mips/powertv/asic/asic-calliope.c
+++ b/arch/mips/powertv/asic/asic-calliope.c
@@ -28,7 +28,7 @@
#define CALLIOPE_ADDR(x) (CALLIOPE_IO_BASE + (x))
-const struct register_map calliope_register_map __initdata = {
+const struct register_map calliope_register_map __initconst = {
.eic_slow0_strt_add = {.phys = CALLIOPE_ADDR(0x800000)},
.eic_cfg_bits = {.phys = CALLIOPE_ADDR(0x800038)},
.eic_ready_status = {.phys = CALLIOPE_ADDR(0x80004c)},
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
index bbc0c122be5e..da076db7b7ed 100644
--- a/arch/mips/powertv/asic/asic-cronus.c
+++ b/arch/mips/powertv/asic/asic-cronus.c
@@ -28,7 +28,7 @@
#define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x))
-const struct register_map cronus_register_map __initdata = {
+const struct register_map cronus_register_map __initconst = {
.eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)},
.eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)},
.eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)},
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
index 91dda682752c..47683b370e74 100644
--- a/arch/mips/powertv/asic/asic-gaia.c
+++ b/arch/mips/powertv/asic/asic-gaia.c
@@ -23,7 +23,7 @@
#include <linux/init.h>
#include <asm/mach-powertv/asic.h>
-const struct register_map gaia_register_map __initdata = {
+const struct register_map gaia_register_map __initconst = {
.eic_slow0_strt_add = {.phys = GAIA_IO_BASE + 0x000000},
.eic_cfg_bits = {.phys = GAIA_IO_BASE + 0x000038},
.eic_ready_status = {.phys = GAIA_IO_BASE + 0x00004C},
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
index 4a05bb096476..6ff4b10f09da 100644
--- a/arch/mips/powertv/asic/asic-zeus.c
+++ b/arch/mips/powertv/asic/asic-zeus.c
@@ -28,7 +28,7 @@
#define ZEUS_ADDR(x) (ZEUS_IO_BASE + (x))
-const struct register_map zeus_register_map __initdata = {
+const struct register_map zeus_register_map __initconst = {
.eic_slow0_strt_add = {.phys = ZEUS_ADDR(0x000000)},
.eic_cfg_bits = {.phys = ZEUS_ADDR(0x000038)},
.eic_ready_status = {.phys = ZEUS_ADDR(0x00004c)},
diff --git a/arch/mips/powertv/powertv_setup.c b/arch/mips/powertv/powertv_setup.c
index 3933c373a438..820b8480f222 100644
--- a/arch/mips/powertv/powertv_setup.c
+++ b/arch/mips/powertv/powertv_setup.c
@@ -254,7 +254,7 @@ early_param("rfmac", rfmac_param);
* Generates an Ethernet MAC address that is highly likely to be unique for
* this particular system on a network with other systems of the same type.
*
- * The problem we are solving is that, when random_ether_addr() is used to
+ * The problem we are solving is that, when eth_random_addr() is used to
* generate MAC addresses at startup, there isn't much entropy for the random
* number generator to use and the addresses it produces are fairly likely to
* be the same as those of other identical systems on the same local network.
@@ -269,7 +269,7 @@ early_param("rfmac", rfmac_param);
* Still, this does give us something to work with.
*
* The approach we take is:
- * 1. If we can't get the RF MAC Address, just call random_ether_addr.
+ * 1. If we can't get the RF MAC Address, just call eth_random_addr.
* 2. Use the 24-bit NIC-specific bits of the RF MAC address as the last 24
* bits of the new address. This is very likely to be unique, except for
* the current box.
@@ -299,7 +299,7 @@ void platform_random_ether_addr(u8 addr[ETH_ALEN])
if (!have_rfmac) {
pr_warning("rfmac not available on command line; "
"generating random MAC address\n");
- random_ether_addr(addr);
+ eth_random_addr(addr);
}
else {
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index 682efb0c108d..125db323ab1e 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -256,7 +256,7 @@ static irqreturn_t i8259_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int __init
+static int __devinit
txx9_i8259_irq_setup(int irq)
{
int err;
@@ -269,7 +269,7 @@ txx9_i8259_irq_setup(int irq)
return err;
}
-static void __init quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __devinit quirk_slc90e66_bridge(struct pci_dev *dev)
{
int irq; /* PCI/ISA Bridge interrupt */
u8 reg_64;
@@ -398,9 +398,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return txx9_board_vec->pci_map_irq(dev, slot, pin);
}
-char * (*txx9_board_pcibios_setup)(char *str) __devinitdata;
+char * (*txx9_board_pcibios_setup)(char *str) __initdata;
-char *__devinit txx9_pcibios_setup(char *str)
+char *__init txx9_pcibios_setup(char *str)
{
if (txx9_board_pcibios_setup && !txx9_board_pcibios_setup(str))
return NULL;
diff --git a/arch/mn10300/include/asm/ptrace.h b/arch/mn10300/include/asm/ptrace.h
index 55b79ef10028..44251b974f1d 100644
--- a/arch/mn10300/include/asm/ptrace.h
+++ b/arch/mn10300/include/asm/ptrace.h
@@ -81,9 +81,6 @@ struct pt_regs {
#define PTRACE_GETFPREGS 14
#define PTRACE_SETFPREGS 15
-/* options set using PTRACE_SETOPTIONS */
-#define PTRACE_O_TRACESYSGOOD 0x00000001
-
#ifdef __KERNEL__
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 08251d6f6b11..ac519bbd42ff 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -123,7 +123,7 @@ static inline unsigned long current_stack_pointer(void)
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_info(struct thread_info *ti);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/include/asm/timex.h b/arch/mn10300/include/asm/timex.h
index bd4e90dfe6c2..f8e66425cbf8 100644
--- a/arch/mn10300/include/asm/timex.h
+++ b/arch/mn10300/include/asm/timex.h
@@ -11,7 +11,6 @@
#ifndef _ASM_TIMEX_H
#define _ASM_TIMEX_H
-#include <asm/hardirq.h>
#include <unit/timex.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -30,16 +29,6 @@ static inline cycles_t get_cycles(void)
extern int init_clockevents(void);
extern int init_clocksource(void);
-static inline void setup_jiffies_interrupt(int irq,
- struct irqaction *action)
-{
- u16 tmp;
- setup_irq(irq, action);
- set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
- GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
- tmp = GxICR(irq);
-}
-
#endif /* __KERNEL__ */
#endif /* _ASM_TIMEX_H */
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
index 69cae0260786..ccce35e3e179 100644
--- a/arch/mn10300/kernel/cevt-mn10300.c
+++ b/arch/mn10300/kernel/cevt-mn10300.c
@@ -70,6 +70,16 @@ static void event_handler(struct clock_event_device *dev)
{
}
+static inline void setup_jiffies_interrupt(int irq,
+ struct irqaction *action)
+{
+ u16 tmp;
+ setup_irq(irq, action);
+ set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+ GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+ tmp = GxICR(irq);
+}
+
int __init init_clockevents(void)
{
struct clock_event_device *cd;
diff --git a/arch/mn10300/kernel/internal.h b/arch/mn10300/kernel/internal.h
index a5ac755dd69f..2df440105a80 100644
--- a/arch/mn10300/kernel/internal.h
+++ b/arch/mn10300/kernel/internal.h
@@ -9,6 +9,8 @@
* 2 of the Licence, or (at your option) any later version.
*/
+#include <linux/irqreturn.h>
+
struct clocksource;
struct clock_event_device;
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c
index 2381df83bd00..35932a8de8b8 100644
--- a/arch/mn10300/kernel/irq.c
+++ b/arch/mn10300/kernel/irq.c
@@ -170,9 +170,9 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER9
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER9)
case TM9IRQ:
-#elif CONFIG_MN10300_TTYSM1_TIMER3
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 6ab0bee2a54f..4d584ae29ae1 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -459,10 +459,11 @@ static int handle_signal(int sig,
else
ret = setup_frame(sig, ka, oldset, regs);
if (ret)
- return;
+ return ret;
signal_delivered(sig, info, ka, regs,
- test_thread_flag(TIF_SINGLESTEP));
+ test_thread_flag(TIF_SINGLESTEP));
+ return 0;
}
/*
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
index 090d35d36973..e62c223e4c45 100644
--- a/arch/mn10300/kernel/smp.c
+++ b/arch/mn10300/kernel/smp.c
@@ -876,9 +876,7 @@ static void __init smp_online(void)
notify_cpu_starting(cpu);
- ipi_call_lock();
set_cpu_online(cpu, true);
- ipi_call_unlock();
local_irq_enable();
}
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index 94a9c6d53e1b..b900e5afa0ae 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -26,6 +26,7 @@
#include <linux/kdebug.h>
#include <linux/bug.h>
#include <linux/irq.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/io.h>
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index 159acb02cfd4..e244ebe637e1 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -15,6 +15,7 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/export.h>
#include <asm/io.h>
static unsigned long pci_sram_allocated = 0xbc000000;
diff --git a/arch/mn10300/unit-asb2303/include/unit/timex.h b/arch/mn10300/unit-asb2303/include/unit/timex.h
index cc18fe7d8b90..c37f9832cf17 100644
--- a/arch/mn10300/unit-asb2303/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2303/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/mn10300/unit-asb2303/smc91111.c b/arch/mn10300/unit-asb2303/smc91111.c
index 43c246439413..53677694b165 100644
--- a/arch/mn10300/unit-asb2303/smc91111.c
+++ b/arch/mn10300/unit-asb2303/smc91111.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/timex.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index 758af30d1a16..4cefc224f448 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index e1becd6b7571..bc4adfaf815c 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/io.h>
+#include <asm/irq.h>
#include <asm/setup.h>
#include <asm/processor.h>
#include <asm/intctl-regs.h>
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
index ddb7ed010706..42f32db75087 100644
--- a/arch/mn10300/unit-asb2364/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2364/include/unit/timex.h
@@ -11,10 +11,6 @@
#ifndef _ASM_UNIT_TIMEX_H
#define _ASM_UNIT_TIMEX_H
-#ifndef __ASSEMBLY__
-#include <linux/irq.h>
-#endif /* __ASSEMBLY__ */
-
#include <asm/timer-regs.h>
#include <unit/clock.h>
#include <asm/param.h>
diff --git a/arch/parisc/include/asm/compat_rt_sigframe.h b/arch/parisc/include/asm/compat_rt_sigframe.h
index 81bec28bdc48..b3f95a7f18b4 100644
--- a/arch/parisc/include/asm/compat_rt_sigframe.h
+++ b/arch/parisc/include/asm/compat_rt_sigframe.h
@@ -1,6 +1,6 @@
-#include<linux/compat.h>
-#include<linux/compat_siginfo.h>
-#include<asm/compat_ucontext.h>
+#include <linux/compat.h>
+#include <linux/compat_siginfo.h>
+#include <asm/compat_ucontext.h>
#ifndef _ASM_PARISC_COMPAT_RT_SIGFRAME_H
#define _ASM_PARISC_COMPAT_RT_SIGFRAME_H
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 24644aca10cb..60309051875e 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -139,11 +139,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
}
-char *pcibios_setup(char *str)
-{
- return str;
-}
-
/*
* Called by pci_set_master() - a driver interface.
*
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index a47828d31fe6..6266730efd61 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -300,9 +300,7 @@ smp_cpu_init(int cpunum)
notify_cpu_starting(cpunum);
- ipi_call_lock();
set_cpu_online(cpunum, true);
- ipi_call_unlock();
/* Initialise the idle task for this CPU */
atomic_inc(&init_mm.mm_count);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 050cb371a69e..9a5d3cdc3e12 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -653,7 +653,7 @@ config SBUS
config FSL_SOC
bool
select HAVE_CAN_FLEXCAN if NET && CAN
- select PPC_CLOCK if CAN_FLEXCAN
+ select PPC_CLOCK
config FSL_PCI
bool
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index e5f26890a69e..5416e28a7538 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -331,4 +331,13 @@ config STRICT_DEVMEM
If you are unsure, say Y.
+config FAIL_IOMMU
+ bool "Fault-injection capability for IOMMU"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for IOMMU. Each device can
+ be selectively enabled via the fail_iommu property.
+
+ If you are unsure, say N.
+
endmenu
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index e8461cb18d04..b7d833382be4 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -62,26 +62,45 @@ libfdtheader := fdt.h libfdt.h libfdt_internal.h
$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
$(addprefix $(obj)/,$(libfdtheader))
-src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \
+src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
$(libfdt) libfdt-wrapper.c \
ns16550.c serial.c simple_alloc.c div64.S util.S \
- gunzip_util.c elf_util.c $(zlib) devtree.c oflib.c ofconsole.c \
- 4xx.c ebony.c mv64x60.c mpsc.c mv64x60_i2c.c cuboot.c bamboo.c \
- cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \
- fsl-soc.c mpc8xx.c pq2.c ugecon.c
-src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \
- cuboot-ebony.c cuboot-hotfoot.c epapr.c treeboot-ebony.c \
- prpmc2800.c \
- ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \
- cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \
- cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \
- fixed-head.S ep88xc.c ep405.c cuboot-c2k.c \
- cuboot-katmai.c cuboot-rainier.c redboot-8xx.c ep8248e.c \
- cuboot-warp.c cuboot-85xx-cpm2.c cuboot-yosemite.c simpleboot.c \
- virtex405-head.S virtex.c redboot-83xx.c cuboot-sam440ep.c \
- cuboot-acadia.c cuboot-amigaone.c cuboot-kilauea.c \
- gamecube-head.S gamecube.c wii-head.S wii.c treeboot-iss4xx.c \
- treeboot-currituck.c
+ gunzip_util.c elf_util.c $(zlib) devtree.c stdlib.c \
+ oflib.c ofconsole.c cuboot.c mpsc.c cpm-serial.c \
+ uartlite.c mpc52xx-psc.c
+src-wlib-$(CONFIG_40x) += 4xx.c planetcore.c
+src-wlib-$(CONFIG_44x) += 4xx.c ebony.c bamboo.c
+src-wlib-$(CONFIG_8xx) += mpc8xx.c planetcore.c
+src-wlib-$(CONFIG_PPC_82xx) += pq2.c fsl-soc.c planetcore.c
+src-wlib-$(CONFIG_EMBEDDED6xx) += mv64x60.c mv64x60_i2c.c ugecon.c
+
+src-plat-y := of.c
+src-plat-$(CONFIG_40x) += fixed-head.S ep405.c cuboot-hotfoot.c \
+ treeboot-walnut.c cuboot-acadia.c \
+ cuboot-kilauea.c simpleboot.c \
+ virtex405-head.S virtex.c
+src-plat-$(CONFIG_44x) += treeboot-ebony.c cuboot-ebony.c treeboot-bamboo.c \
+ cuboot-bamboo.c cuboot-sam440ep.c \
+ cuboot-sequoia.c cuboot-rainier.c \
+ cuboot-taishan.c cuboot-katmai.c \
+ cuboot-warp.c cuboot-yosemite.c \
+ treeboot-iss4xx.c treeboot-currituck.c \
+ simpleboot.c fixed-head.S virtex.c
+src-plat-$(CONFIG_8xx) += cuboot-8xx.c fixed-head.S ep88xc.c redboot-8xx.c
+src-plat-$(CONFIG_PPC_MPC52xx) += cuboot-52xx.c
+src-plat-$(CONFIG_PPC_82xx) += cuboot-pq2.c fixed-head.S ep8248e.c cuboot-824x.c
+src-plat-$(CONFIG_PPC_83xx) += cuboot-83xx.c fixed-head.S redboot-83xx.c
+src-plat-$(CONFIG_FSL_SOC_BOOKE) += cuboot-85xx.c cuboot-85xx-cpm2.c
+src-plat-$(CONFIG_EMBEDDED6xx) += cuboot-pq2.c cuboot-mpc7448hpc2.c \
+ cuboot-c2k.c gamecube-head.S \
+ gamecube.c wii-head.S wii.c holly.c \
+ prpmc2800.c
+src-plat-$(CONFIG_AMIGAONE) += cuboot-amigaone.c
+src-plat-$(CONFIG_PPC_PS3) += ps3-head.S ps3-hvcall.S ps3.c
+src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c
+
+src-wlib := $(sort $(src-wlib-y))
+src-plat := $(sort $(src-plat-y))
src-boot := $(src-wlib) $(src-plat) empty.c
src-boot := $(addprefix $(obj)/, $(src-boot))
@@ -257,7 +276,6 @@ image-$(CONFIG_TQM8548) += cuImage.tqm8548
image-$(CONFIG_TQM8555) += cuImage.tqm8555
image-$(CONFIG_TQM8560) += cuImage.tqm8560
image-$(CONFIG_SBC8548) += cuImage.sbc8548
-image-$(CONFIG_SBC8560) += cuImage.sbc8560
image-$(CONFIG_KSI8560) += cuImage.ksi8560
# Board ports in arch/powerpc/platform/embedded6xx/Kconfig
@@ -412,4 +430,3 @@ $(wrapper-installed): $(DESTDIR)$(WRAPPER_BINDIR) $(srctree)/$(obj)/wrapper | $(
$(call cmd,install_wrapper)
$(obj)/bootwrapper_install: $(all-installed)
-
diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dts b/arch/powerpc/boot/dts/bsc9131rdb.dts
new file mode 100644
index 000000000000..e13d2d4877b0
--- /dev/null
+++ b/arch/powerpc/boot/dts/bsc9131rdb.dts
@@ -0,0 +1,34 @@
+/*
+ * BSC9131 RDB Device Tree Source
+ *
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+/include/ "fsl/bsc9131si-pre.dtsi"
+
+/ {
+ model = "fsl,bsc9131rdb";
+ compatible = "fsl,bsc9131rdb";
+
+ memory {
+ device_type = "memory";
+ };
+
+ board_ifc: ifc: ifc@ff71e000 {
+ /* NAND Flash on board */
+ ranges = <0x0 0x0 0x0 0xff800000 0x00004000>;
+ reg = <0x0 0xff71e000 0x0 0x2000>;
+ };
+
+ board_soc: soc: soc@ff700000 {
+ ranges = <0x0 0x0 0xff700000 0x100000>;
+ };
+};
+
+/include/ "bsc9131rdb.dtsi"
+/include/ "fsl/bsc9131si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dtsi b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
new file mode 100644
index 000000000000..638adda2c218
--- /dev/null
+++ b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
@@ -0,0 +1,142 @@
+/*
+ * BSC9131 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&board_ifc {
+
+ nand@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc-nand";
+ reg = <0x0 0x0 0x4000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 3MB for u-boot Bootloader Image */
+ reg = <0x0 0x00300000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@300000 {
+ /* 1MB for DTB Image */
+ reg = <0x00300000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@400000 {
+ /* 8MB for Linux Kernel Image */
+ reg = <0x00400000 0x00800000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@c00000 {
+ /* Rest space for Root file System Image */
+ reg = <0x00c00000 0x07400000>;
+ label = "NAND RFS Image";
+ };
+ };
+};
+
+&board_soc {
+ /* BSC9131RDB does not have any device on i2c@3100 */
+ i2c@3100 {
+ status = "disabled";
+ };
+
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl12801";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+
+ /* 512KB for u-boot Bootloader Image */
+ partition@0 {
+ reg = <0x0 0x00080000>;
+ label = "SPI Flash U-Boot Image";
+ read-only;
+ };
+
+ /* 512KB for DTB Image */
+ partition@80000 {
+ reg = <0x00080000 0x00080000>;
+ label = "SPI Flash DTB Image";
+ };
+
+ /* 4MB for Linux Kernel Image */
+ partition@100000 {
+ reg = <0x00100000 0x00400000>;
+ label = "SPI Flash Kernel Image";
+ };
+
+ /*11MB for RFS Image */
+ partition@500000 {
+ reg = <0x00500000 0x00B00000>;
+ label = "SPI Flash RFS Image";
+ };
+
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x3>;
+ };
+ };
+
+ sdhci@2e000 {
+ status = "disabled";
+ };
+
+ enet0: ethernet@b0000 {
+ phy-handle = <&phy0>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ enet1: ethernet@b1000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
new file mode 100644
index 000000000000..5180d9d37989
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-post.dtsi
@@ -0,0 +1,193 @@
+/*
+ * BSC9131 Silicon/SoC Device Tree Source (post include)
+ *
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&ifc {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "fsl,ifc", "simple-bus";
+ interrupts = <16 2 0 0 20 2 0 0>;
+};
+
+&soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ device_type = "soc";
+ compatible = "fsl,bsc9131-immr", "simple-bus";
+ bus-frequency = <0>; // Filled out by uboot.
+
+ ecm-law@0 {
+ compatible = "fsl,ecm-law";
+ reg = <0x0 0x1000>;
+ fsl,num-laws = <12>;
+ };
+
+ ecm@1000 {
+ compatible = "fsl,bsc9131-ecm", "fsl,ecm";
+ reg = <0x1000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+ memory-controller@2000 {
+ compatible = "fsl,bsc9131-memory-controller";
+ reg = <0x2000 0x1000>;
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-i2c-0.dtsi"
+ i2c@3000 {
+ interrupts = <17 2 0 0>;
+ };
+
+/include/ "pq3-i2c-1.dtsi"
+ i2c@3100 {
+ interrupts = <17 2 0 0>;
+ };
+
+/include/ "pq3-duart-0.dtsi"
+ serial0: serial@4500 {
+ interrupts = <18 2 0 0>;
+ };
+
+ serial1: serial@4600 {
+ interrupts = <18 2 0 0 >;
+ };
+/include/ "pq3-espi-0.dtsi"
+ spi0: spi@7000 {
+ fsl,espi-num-chipselects = <1>;
+ interrupts = <22 0x2 0 0>;
+ };
+
+/include/ "pq3-gpio-0.dtsi"
+ gpio-controller@f000 {
+ interrupts = <19 0x2 0 0>;
+ };
+
+ L2: l2-cache-controller@20000 {
+ compatible = "fsl,bsc9131-l2-cache-controller";
+ reg = <0x20000 0x1000>;
+ cache-line-size = <32>; // 32 bytes
+ cache-size = <0x40000>; // L2,256K
+ interrupts = <16 2 0 0>;
+ };
+
+/include/ "pq3-dma-0.dtsi"
+
+dma@21300 {
+
+ dma-channel@0 {
+ interrupts = <62 2 0 0>;
+ };
+
+ dma-channel@80 {
+ interrupts = <63 2 0 0>;
+ };
+
+ dma-channel@100 {
+ interrupts = <64 2 0 0>;
+ };
+
+ dma-channel@180 {
+ interrupts = <65 2 0 0>;
+ };
+};
+
+/include/ "pq3-usb2-dr-0.dtsi"
+usb@22000 {
+ compatible = "fsl-usb2-dr","fsl-usb2-dr-v2.2";
+ interrupts = <40 0x2 0 0>;
+};
+
+/include/ "pq3-esdhc-0.dtsi"
+ sdhc@2e000 {
+ fsl,sdhci-auto-cmd12;
+ interrupts = <41 0x2 0 0>;
+ };
+
+/include/ "pq3-sec4.4-0.dtsi"
+crypto@30000 {
+ interrupts = <57 2 0 0>;
+
+ sec_jr0: jr@1000 {
+ interrupts = <58 2 0 0>;
+ };
+
+ sec_jr1: jr@2000 {
+ interrupts = <59 2 0 0>;
+ };
+
+ sec_jr2: jr@3000 {
+ interrupts = <60 2 0 0>;
+ };
+
+ sec_jr3: jr@4000 {
+ interrupts = <61 2 0 0>;
+ };
+};
+
+/include/ "pq3-mpic.dtsi"
+
+timer@41100 {
+ compatible = "fsl,mpic-v1.2-msgr", "fsl,mpic-msg";
+ reg = <0x41400 0x200>;
+ interrupts = <
+ 0xb0 2
+ 0xb1 2
+ 0xb2 2
+ 0xb3 2>;
+};
+
+/include/ "pq3-etsec2-0.dtsi"
+enet0: ethernet@b0000 {
+ queue-group@b0000 {
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>;
+ };
+};
+
+/include/ "pq3-etsec2-1.dtsi"
+enet1: ethernet@b1000 {
+ queue-group@b1000 {
+ fsl,rx-bit-map = <0xff>;
+ fsl,tx-bit-map = <0xff>;
+ interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>;
+ };
+};
+
+global-utilities@e0000 {
+ compatible = "fsl,bsc9131-guts";
+ reg = <0xe0000 0x1000>;
+ fsl,has-rstcr;
+ };
+};
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi
index 00c8e70e7b90..743e4aeda349 100644
--- a/arch/powerpc/boot/dts/fsl/p3060si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/bsc9131si-pre.dtsi
@@ -1,7 +1,7 @@
/*
- * P3060 Silicon/SoC Device Tree Source (pre include)
+ * BSC9131 Silicon/SoC Device Tree Source (pre include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -34,92 +34,26 @@
/dts-v1/;
/ {
- compatible = "fsl,P3060";
+ compatible = "fsl,BSC9131";
#address-cells = <2>;
#size-cells = <2>;
interrupt-parent = <&mpic>;
aliases {
- ccsr = &soc;
- dcsr = &dcsr;
-
serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- pci0 = &pci0;
- pci1 = &pci1;
- usb0 = &usb0;
- usb1 = &usb1;
- dma0 = &dma0;
- dma1 = &dma1;
- msi0 = &msi0;
- msi1 = &msi1;
- msi2 = &msi2;
-
- crypto = &crypto;
- sec_jr0 = &sec_jr0;
- sec_jr1 = &sec_jr1;
- sec_jr2 = &sec_jr2;
- sec_jr3 = &sec_jr3;
- rtic_a = &rtic_a;
- rtic_b = &rtic_b;
- rtic_c = &rtic_c;
- rtic_d = &rtic_d;
- sec_mon = &sec_mon;
+ ethernet0 = &enet0;
+ ethernet1 = &enet1;
};
cpus {
#address-cells = <1>;
#size-cells = <0>;
- cpu0: PowerPC,e500mc@0 {
- device_type = "cpu";
- reg = <0>;
- next-level-cache = <&L2_0>;
- L2_0: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu1: PowerPC,e500mc@1 {
- device_type = "cpu";
- reg = <1>;
- next-level-cache = <&L2_1>;
- L2_1: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu4: PowerPC,e500mc@4 {
- device_type = "cpu";
- reg = <4>;
- next-level-cache = <&L2_4>;
- L2_4: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu5: PowerPC,e500mc@5 {
- device_type = "cpu";
- reg = <5>;
- next-level-cache = <&L2_5>;
- L2_5: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu6: PowerPC,e500mc@6 {
- device_type = "cpu";
- reg = <6>;
- next-level-cache = <&L2_6>;
- L2_6: l2-cache {
- next-level-cache = <&cpc>;
- };
- };
- cpu7: PowerPC,e500mc@7 {
+ PowerPC,BSC9131@0 {
device_type = "cpu";
- reg = <7>;
- next-level-cache = <&L2_7>;
- L2_7: l2-cache {
- next-level-cache = <&cpc>;
- };
+ compatible = "fsl,e500v2";
+ reg = <0x0>;
+ next-level-cache = <&L2>;
};
};
};
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index 4252ef85fb7a..adb82fd9057f 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P1021/P1012 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -213,6 +213,20 @@
interrupt-parent = <&qeic>;
};
+ ucc@2600 {
+ cell-index = <7>;
+ reg = <0x2600 0x200>;
+ interrupts = <42>;
+ interrupt-parent = <&qeic>;
+ };
+
+ ucc@2200 {
+ cell-index = <3>;
+ reg = <0x2200 0x200>;
+ interrupts = <34>;
+ interrupt-parent = <&qeic>;
+ };
+
muram@10000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
deleted file mode 100644
index b3e56929eee2..000000000000
--- a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * P3060 Silicon/SoC Device Tree Source (post include)
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-&lbc {
- compatible = "fsl,p3060-elbc", "fsl,elbc", "simple-bus";
- interrupts = <25 2 0 0>;
- #address-cells = <2>;
- #size-cells = <1>;
-};
-
-/* controller at 0x200000 */
-&pci0 {
- compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0x0 0xff>;
- clock-frequency = <33333333>;
- interrupts = <16 2 1 15>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 15>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 40 1 0 0
- 0000 0 0 2 &mpic 1 1 0 0
- 0000 0 0 3 &mpic 2 1 0 0
- 0000 0 0 4 &mpic 3 1 0 0
- >;
- };
-};
-
-/* controller at 0x201000 */
-&pci1 {
- compatible = "fsl,p3060-pcie", "fsl,qoriq-pcie-v2.2";
- device_type = "pci";
- #size-cells = <2>;
- #address-cells = <3>;
- bus-range = <0 0xff>;
- clock-frequency = <33333333>;
- interrupts = <16 2 1 14>;
- pcie@0 {
- reg = <0 0 0 0 0>;
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- device_type = "pci";
- interrupts = <16 2 1 14>;
- interrupt-map-mask = <0xf800 0 0 7>;
- interrupt-map = <
- /* IDSEL 0x0 */
- 0000 0 0 1 &mpic 41 1 0 0
- 0000 0 0 2 &mpic 5 1 0 0
- 0000 0 0 3 &mpic 6 1 0 0
- 0000 0 0 4 &mpic 7 1 0 0
- >;
- };
-};
-
-&rio {
- compatible = "fsl,srio";
- interrupts = <16 2 1 11>;
- #address-cells = <2>;
- #size-cells = <2>;
- fsl,srio-rmu-handle = <&rmu>;
- ranges;
-
- port1 {
- #address-cells = <2>;
- #size-cells = <2>;
- cell-index = <1>;
- };
-
- port2 {
- #address-cells = <2>;
- #size-cells = <2>;
- cell-index = <2>;
- };
-};
-
-&dcsr {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,dcsr", "simple-bus";
-
- dcsr-epu@0 {
- compatible = "fsl,dcsr-epu";
- interrupts = <52 2 0 0
- 84 2 0 0
- 85 2 0 0>;
- reg = <0x0 0x1000>;
- };
- dcsr-npc {
- compatible = "fsl,dcsr-npc";
- reg = <0x1000 0x1000 0x1000000 0x8000>;
- };
- dcsr-nxc@2000 {
- compatible = "fsl,dcsr-nxc";
- reg = <0x2000 0x1000>;
- };
- dcsr-corenet {
- compatible = "fsl,dcsr-corenet";
- reg = <0x8000 0x1000 0xB0000 0x1000>;
- };
- dcsr-dpaa@9000 {
- compatible = "fsl,p3060-dcsr-dpaa", "fsl,dcsr-dpaa";
- reg = <0x9000 0x1000>;
- };
- dcsr-ocn@11000 {
- compatible = "fsl,p3060-dcsr-ocn", "fsl,dcsr-ocn";
- reg = <0x11000 0x1000>;
- };
- dcsr-ddr@12000 {
- compatible = "fsl,dcsr-ddr";
- dev-handle = <&ddr1>;
- reg = <0x12000 0x1000>;
- };
- dcsr-nal@18000 {
- compatible = "fsl,p3060-dcsr-nal", "fsl,dcsr-nal";
- reg = <0x18000 0x1000>;
- };
- dcsr-rcpm@22000 {
- compatible = "fsl,p3060-dcsr-rcpm", "fsl,dcsr-rcpm";
- reg = <0x22000 0x1000>;
- };
- dcsr-cpu-sb-proxy@40000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu0>;
- reg = <0x40000 0x1000>;
- };
- dcsr-cpu-sb-proxy@41000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu1>;
- reg = <0x41000 0x1000>;
- };
- dcsr-cpu-sb-proxy@44000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu4>;
- reg = <0x44000 0x1000>;
- };
- dcsr-cpu-sb-proxy@45000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu5>;
- reg = <0x45000 0x1000>;
- };
- dcsr-cpu-sb-proxy@46000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu6>;
- reg = <0x46000 0x1000>;
- };
- dcsr-cpu-sb-proxy@47000 {
- compatible = "fsl,dcsr-e500mc-sb-proxy", "fsl,dcsr-cpu-sb-proxy";
- cpu-handle = <&cpu7>;
- reg = <0x47000 0x1000>;
- };
-
-};
-
-&soc {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 29>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.4", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 23>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,p3060-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 27
- 16 2 1 26>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 31>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x5000>;
- interrupts = <
- 24 2 0 0
- 16 2 1 30>;
- };
-
-/include/ "qoriq-rmu-0.dtsi"
-/include/ "qoriq-mpic.dtsi"
-
- guts: global-utilities@e0000 {
- compatible = "fsl,qoriq-device-config-1.0";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- #sleep-cells = <1>;
- fsl,liodn-bits = <12>;
- };
-
- pins: global-utilities@e0e00 {
- compatible = "fsl,qoriq-pin-control-1.0";
- reg = <0xe0e00 0x200>;
- #sleep-cells = <2>;
- };
-
- clockgen: global-utilities@e1000 {
- compatible = "fsl,p3060-clockgen", "fsl,qoriq-clockgen-1.0";
- reg = <0xe1000 0x1000>;
- clock-frequency = <0>;
- };
-
- rcpm: global-utilities@e2000 {
- compatible = "fsl,qoriq-rcpm-1.0";
- reg = <0xe2000 0x1000>;
- #sleep-cells = <1>;
- };
-
- sfp: sfp@e8000 {
- compatible = "fsl,p3060-sfp", "fsl,qoriq-sfp-1.0";
- reg = <0xe8000 0x1000>;
- };
-
- serdes: serdes@ea000 {
- compatible = "fsl,p3060-serdes";
- reg = <0xea000 0x1000>;
- };
-
-/include/ "qoriq-dma-0.dtsi"
-/include/ "qoriq-dma-1.dtsi"
-/include/ "qoriq-espi-0.dtsi"
- spi@110000 {
- fsl,espi-num-chipselects = <4>;
- };
-
-/include/ "qoriq-i2c-0.dtsi"
-/include/ "qoriq-i2c-1.dtsi"
-/include/ "qoriq-duart-0.dtsi"
-/include/ "qoriq-duart-1.dtsi"
-/include/ "qoriq-gpio-0.dtsi"
-/include/ "qoriq-usb2-mph-0.dtsi"
- usb@210000 {
- compatible = "fsl-usb2-mph-v2.2", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
- };
-/include/ "qoriq-usb2-dr-0.dtsi"
- usb@211000 {
- compatible = "fsl-usb2-dr-v2.2", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
- };
-/include/ "qoriq-sec4.1-0.dtsi"
-};
diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts
index ededaf5ac015..d72fb5e219d0 100644
--- a/arch/powerpc/boot/dts/mgcoge.dts
+++ b/arch/powerpc/boot/dts/mgcoge.dts
@@ -222,6 +222,29 @@
interrupt-parent = <&PIC>;
usb-clock = <5>;
};
+ spi@11aa0 {
+ cell-index = <0>;
+ compatible = "fsl,spi", "fsl,cpm2-spi";
+ reg = <0x11a80 0x40 0x89fc 0x2>;
+ interrupts = <2 8>;
+ interrupt-parent = <&PIC>;
+ gpios = < &cpm2_pio_d 19 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ds3106@1 {
+ compatible = "gen,spidev";
+ reg = <0>;
+ spi-max-frequency = <8000000>;
+ };
+ };
+
+ };
+
+ cpm2_pio_d: gpio-controller@10d60 {
+ #gpio-cells = <2>;
+ compatible = "fsl,cpm2-pario-bank";
+ reg = <0x10d60 0x14>;
+ gpio-controller;
};
cpm2_pio_c: gpio-controller@10d40 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
index cc46dbd9746d..d304a2d68c62 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -203,6 +203,14 @@
reg = <1>;
device_type = "ethernet-phy";
};
+ sgmii_phy0: sgmii-phy@0 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1d>;
+ };
+ sgmii_phy1: sgmii-phy@1 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1c>;
+ };
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
diff --git a/arch/powerpc/boot/dts/mpc8544ds.dtsi b/arch/powerpc/boot/dts/mpc8544ds.dtsi
index 270f64b90f4e..77ebc9f1d37c 100644
--- a/arch/powerpc/boot/dts/mpc8544ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8544ds.dtsi
@@ -51,6 +51,15 @@
device_type = "ethernet-phy";
};
+ sgmii_phy0: sgmii-phy@0 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1c>;
+ };
+ sgmii_phy1: sgmii-phy@1 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1d>;
+ };
+
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dtsi b/arch/powerpc/boot/dts/mpc8572ds.dtsi
index 14178944e220..357490bb84da 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8572ds.dtsi
@@ -169,6 +169,23 @@
reg = <0x3>;
};
+ sgmii_phy0: sgmii-phy@0 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1c>;
+ };
+ sgmii_phy1: sgmii-phy@1 {
+ interrupts = <6 1 0 0>;
+ reg = <0x1d>;
+ };
+ sgmii_phy2: sgmii-phy@2 {
+ interrupts = <7 1 0 0>;
+ reg = <0x1e>;
+ };
+ sgmii_phy3: sgmii-phy@3 {
+ interrupts = <7 1 0 0>;
+ reg = <0x1f>;
+ };
+
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
index d34d12712125..ef9ef56b3eeb 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core0.dts
@@ -67,10 +67,10 @@
msi@41600 {
msi-available-ranges = <0 0x80>;
interrupts = <
- 0xe0 0
- 0xe1 0
- 0xe2 0
- 0xe3 0>;
+ 0xe0 0 0 0
+ 0xe1 0 0 0
+ 0xe2 0 0 0
+ 0xe3 0 0 0>;
};
timer@42100 {
status = "disabled";
diff --git a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
index d6a8fafc0d0d..24564ee108e5 100644
--- a/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
+++ b/arch/powerpc/boot/dts/mpc8572ds_camp_core1.dts
@@ -67,9 +67,6 @@
ethernet@24000 {
status = "disabled";
};
- mdio@24520 {
- status = "disabled";
- };
ptp_clock@24e00 {
status = "disabled";
};
@@ -100,10 +97,10 @@
msi@41600 {
msi-available-ranges = <0x80 0x80>;
interrupts = <
- 0xe4 0
- 0xe5 0
- 0xe6 0
- 0xe7 0>;
+ 0xe4 0 0 0
+ 0xe5 0 0 0
+ 0xe6 0 0 0
+ 0xe7 0 0 0>;
};
global-utilities@e0000 {
status = "disabled";
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index 49776143a1b8..ec7c27a64671 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -126,12 +126,24 @@
&board_soc {
i2c@3000 {
+ eeprom@50 {
+ compatible = "st,24c256";
+ reg = <0x50>;
+ };
+
rtc@68 {
compatible = "pericom,pt7c4338";
reg = <0x68>;
};
};
+ i2c@3100 {
+ eeprom@52 {
+ compatible = "atmel,24c01";
+ reg = <0x52>;
+ };
+ };
+
spi@7000 {
flash@0 {
#address-cells = <1>;
diff --git a/arch/powerpc/boot/dts/p1021rdb.dtsi b/arch/powerpc/boot/dts/p1021rdb-pc.dtsi
index b973461ab751..c13abfbbe2e2 100644
--- a/arch/powerpc/boot/dts/p1021rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1021rdb-pc.dtsi
@@ -1,7 +1,7 @@
/*
* P1021 RDB Device Tree Source stub (no addresses or top-level ranges)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
diff --git a/arch/powerpc/boot/dts/p1021rdb.dts b/arch/powerpc/boot/dts/p1021rdb-pc_32b.dts
index 90b6b4caa273..7cefa12b629a 100644
--- a/arch/powerpc/boot/dts/p1021rdb.dts
+++ b/arch/powerpc/boot/dts/p1021rdb-pc_32b.dts
@@ -1,7 +1,7 @@
/*
* P1021 RDB Device Tree Source
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -92,5 +92,5 @@
};
};
-/include/ "p1021rdb.dtsi"
+/include/ "p1021rdb-pc.dtsi"
/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1021rdb_36b.dts b/arch/powerpc/boot/dts/p1021rdb-pc_36b.dts
index ea6d8b5fa10b..53d0c889039c 100644
--- a/arch/powerpc/boot/dts/p1021rdb_36b.dts
+++ b/arch/powerpc/boot/dts/p1021rdb-pc_36b.dts
@@ -1,7 +1,7 @@
/*
* P1021 RDB Device Tree Source (36-bit address map)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2012 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -92,5 +92,5 @@
};
};
-/include/ "p1021rdb.dtsi"
+/include/ "p1021rdb-pc.dtsi"
/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
index 7cdb505036bb..c3344b04d8ff 100644
--- a/arch/powerpc/boot/dts/p1022ds.dtsi
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -33,22 +33,6 @@
*/
&board_lbc {
- /*
- * This node is used to access the pixis via "indirect" mode,
- * which is done by writing the pixis register index to chip
- * select 0 and the value to/from chip select 1. Indirect
- * mode is the only way to access the pixis when DIU video
- * is enabled. Note that this assumes that the first column
- * of the 'ranges' property above is the chip select number.
- */
- board-control@0,0 {
- compatible = "fsl,p1022ds-indirect-pixis";
- reg = <0x0 0x0 1 /* CS0 */
- 0x1 0x0 1>; /* CS1 */
- interrupt-parent = <&mpic>;
- interrupts = <8 0 0 0>;
- };
-
nor@0,0 {
#address-cells = <1>;
#size-cells = <1>;
@@ -161,6 +145,10 @@
* the clock is enabled.
*/
};
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
};
spi@7000 {
diff --git a/arch/powerpc/boot/dts/p1024rdb.dtsi b/arch/powerpc/boot/dts/p1024rdb.dtsi
new file mode 100644
index 000000000000..b05dcb40f800
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1024rdb.dtsi
@@ -0,0 +1,228 @@
+/*
+ * P1024 RDB Device Tree Source stub (no addresses or top-level ranges)
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+&lbc {
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "cfi-flash";
+ reg = <0x0 0x0 0x1000000>;
+ bank-width = <2>;
+ device-width = <1>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 256KB for Vitesse 7385 Switch firmware */
+ reg = <0x0 0x00040000>;
+ label = "NOR Vitesse-7385 Firmware";
+ read-only;
+ };
+
+ partition@40000 {
+ /* 256KB for DTB Image */
+ reg = <0x00040000 0x00040000>;
+ label = "NOR DTB Image";
+ };
+
+ partition@80000 {
+ /* 3.5 MB for Linux Kernel Image */
+ reg = <0x00080000 0x00380000>;
+ label = "NOR Linux Kernel Image";
+ };
+
+ partition@400000 {
+ /* 11MB for JFFS2 based Root file System */
+ reg = <0x00400000 0x00b00000>;
+ label = "NOR JFFS2 Root File System";
+ };
+
+ partition@f00000 {
+ /* This location must not be altered */
+ /* 512KB for u-boot Bootloader Image */
+ /* 512KB for u-boot Environment Variables */
+ reg = <0x00f00000 0x00100000>;
+ label = "NOR U-Boot Image";
+ read-only;
+ };
+ };
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,p1020-fcm-nand",
+ "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ /* This location must not be altered */
+ /* 1MB for u-boot Bootloader Image */
+ reg = <0x0 0x00100000>;
+ label = "NAND U-Boot Image";
+ read-only;
+ };
+
+ partition@100000 {
+ /* 1MB for DTB Image */
+ reg = <0x00100000 0x00100000>;
+ label = "NAND DTB Image";
+ };
+
+ partition@200000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00200000 0x00400000>;
+ label = "NAND Linux Kernel Image";
+ };
+
+ partition@600000 {
+ /* 4MB for Compressed Root file System Image */
+ reg = <0x00600000 0x00400000>;
+ label = "NAND Compressed RFS Image";
+ };
+
+ partition@a00000 {
+ /* 15MB for JFFS2 based Root file System */
+ reg = <0x00a00000 0x00f00000>;
+ label = "NAND JFFS2 Root File System";
+ };
+
+ partition@1900000 {
+ /* 7MB for User Writable Area */
+ reg = <0x01900000 0x00700000>;
+ label = "NAND Writable User area";
+ };
+ };
+};
+
+&soc {
+ spi@7000 {
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,m25p80";
+ reg = <0>;
+ spi-max-frequency = <40000000>;
+
+ partition@0 {
+ /* 512KB for u-boot Bootloader Image */
+ reg = <0x0 0x00080000>;
+ label = "SPI U-Boot Image";
+ read-only;
+ };
+
+ partition@80000 {
+ /* 512KB for DTB Image */
+ reg = <0x00080000 0x00080000>;
+ label = "SPI DTB Image";
+ };
+
+ partition@100000 {
+ /* 4MB for Linux Kernel Image */
+ reg = <0x00100000 0x00400000>;
+ label = "SPI Linux Kernel Image";
+ };
+
+ partition@500000 {
+ /* 4MB for Compressed RFS Image */
+ reg = <0x00500000 0x00400000>;
+ label = "SPI Compressed RFS Image";
+ };
+
+ partition@900000 {
+ /* 7MB for JFFS2 based RFS */
+ reg = <0x00900000 0x00700000>;
+ label = "SPI JFFS2 RFS";
+ };
+ };
+ };
+
+ i2c@3000 {
+ rtc@68 {
+ compatible = "dallas,ds1339";
+ reg = <0x68>;
+ };
+ };
+
+ usb@22000 {
+ phy_type = "ulpi";
+ };
+
+ usb@23000 {
+ status = "disabled";
+ };
+
+ mdio@24000 {
+ phy0: ethernet-phy@0 {
+ interrupts = <3 1 0 0>;
+ reg = <0x0>;
+ };
+ phy1: ethernet-phy@1 {
+ interrupts = <2 1 0 0>;
+ reg = <0x1>;
+ };
+ phy2: ethernet-phy@2 {
+ interrupts = <1 1 0 0>;
+ reg = <0x2>;
+ };
+ };
+
+ mdio@25000 {
+ tbi0: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ mdio@26000 {
+ tbi1: tbi-phy@11 {
+ reg = <0x11>;
+ device_type = "tbi-phy";
+ };
+ };
+
+ ethernet@b0000 {
+ phy-handle = <&phy2>;
+ phy-connection-type = "rgmii-id";
+ };
+
+ ethernet@b1000 {
+ phy-handle = <&phy0>;
+ tbi-handle = <&tbi0>;
+ phy-connection-type = "sgmii";
+ };
+
+ ethernet@b2000 {
+ phy-handle = <&phy1>;
+ phy-connection-type = "rgmii-id";
+ };
+};
diff --git a/arch/powerpc/boot/dts/p1024rdb_32b.dts b/arch/powerpc/boot/dts/p1024rdb_32b.dts
new file mode 100644
index 000000000000..90e803e9ba5f
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1024rdb_32b.dts
@@ -0,0 +1,87 @@
+/*
+ * P1024 RDB 32Bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1024RDB";
+ compatible = "fsl,P1024RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@ffe05000 {
+ reg = <0x0 0xffe05000 0 0x1000>;
+ ranges = <0x0 0x0 0x0 0xef000000 0x01000000
+ 0x1 0x0 0x0 0xff800000 0x00040000>;
+ };
+
+ soc: soc@ffe00000 {
+ ranges = <0x0 0x0 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@ffe09000 {
+ reg = <0x0 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0xa0000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@ffe0a000 {
+ reg = <0x0 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0x0 0x80000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1024rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1024rdb_36b.dts b/arch/powerpc/boot/dts/p1024rdb_36b.dts
new file mode 100644
index 000000000000..3656825b65a1
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1024rdb_36b.dts
@@ -0,0 +1,87 @@
+/*
+ * P1024 RDB 36Bit Physical Address Map Device Tree Source
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/include/ "fsl/p1020si-pre.dtsi"
+/ {
+ model = "fsl,P1024RDB";
+ compatible = "fsl,P1024RDB";
+
+ memory {
+ device_type = "memory";
+ };
+
+ lbc: localbus@fffe05000 {
+ reg = <0xf 0xffe05000 0 0x1000>;
+ ranges = <0x0 0x0 0xf 0xef000000 0x01000000
+ 0x1 0x0 0xf 0xff800000 0x00040000>;
+ };
+
+ soc: soc@fffe00000 {
+ ranges = <0x0 0xf 0xffe00000 0x100000>;
+ };
+
+ pci0: pcie@fffe09000 {
+ reg = <0xf 0xffe09000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
+ pcie@0 {
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+
+ pci1: pcie@fffe0a000 {
+ reg = <0xf 0xffe0a000 0 0x1000>;
+ ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
+ 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
+ pcie@0 {
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ ranges = <0x2000000 0x0 0xe0000000
+ 0x2000000 0x0 0xe0000000
+ 0x0 0x20000000
+
+ 0x1000000 0x0 0x0
+ 0x1000000 0x0 0x0
+ 0x0 0x100000>;
+ };
+ };
+};
+
+/include/ "p1024rdb.dtsi"
+/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025rdb.dtsi b/arch/powerpc/boot/dts/p1025rdb.dtsi
index cf3676fc714b..f50256482297 100644
--- a/arch/powerpc/boot/dts/p1025rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1025rdb.dtsi
@@ -282,5 +282,45 @@
0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
};
+
+ pio3: ucc_pin@03 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x0 0x16 0x2 0x0 0x2 0x0 /* SER7_CD_B*/
+ 0x0 0x12 0x2 0x0 0x2 0x0 /* SER7_CTS_B*/
+ 0x0 0x13 0x1 0x0 0x2 0x0 /* SER7_RTS_B*/
+ 0x0 0x14 0x2 0x0 0x2 0x0 /* SER7_RXD0*/
+ 0x0 0x15 0x1 0x0 0x2 0x0>; /* SER7_TXD0*/
+ };
+
+ pio4: ucc_pin@04 {
+ pio-map = <
+ /* port pin dir open_drain assignment has_irq */
+ 0x1 0x0 0x2 0x0 0x2 0x0 /* SER3_CD_B*/
+ 0x0 0x1c 0x2 0x0 0x2 0x0 /* SER3_CTS_B*/
+ 0x0 0x1d 0x1 0x0 0x2 0x0 /* SER3_RTS_B*/
+ 0x0 0x1e 0x2 0x0 0x2 0x0 /* SER3_RXD0*/
+ 0x0 0x1f 0x1 0x0 0x2 0x0>; /* SER3_TXD0*/
+ };
+ };
+};
+
+&qe {
+ serial2: ucc@2600 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <0>;
+ rx-clock-name = "brg6";
+ tx-clock-name = "brg6";
+ pio-handle = <&pio3>;
+ };
+
+ serial3: ucc@2200 {
+ device_type = "serial";
+ compatible = "ucc_uart";
+ port-number = <1>;
+ rx-clock-name = "brg2";
+ tx-clock-name = "brg2";
+ pio-handle = <&pio4>;
};
};
diff --git a/arch/powerpc/boot/dts/p2020ds.dtsi b/arch/powerpc/boot/dts/p2020ds.dtsi
index d3b939c573b0..e699cf95b063 100644
--- a/arch/powerpc/boot/dts/p2020ds.dtsi
+++ b/arch/powerpc/boot/dts/p2020ds.dtsi
@@ -150,6 +150,16 @@
interrupts = <3 1 0 0>;
reg = <0x2>;
};
+
+ sgmii_phy1: sgmii-phy@1 {
+ interrupts = <5 1 0 0>;
+ reg = <0x1c>;
+ };
+ sgmii_phy2: sgmii-phy@2 {
+ interrupts = <5 1 0 0>;
+ reg = <0x1d>;
+ };
+
tbi0: tbi-phy@11 {
reg = <0x11>;
device_type = "tbi-phy";
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 153bc76bb48e..4d52bce1d5b0 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -34,7 +34,7 @@
/* NOR and NAND Flashes */
ranges = <0x0 0x0 0x0 0xef000000 0x01000000
- 0x1 0x0 0x0 0xff800000 0x00040000
+ 0x1 0x0 0x0 0xffa00000 0x00040000
0x2 0x0 0x0 0xffb00000 0x00020000>;
nor@0,0 {
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index 285213976a7f..baab0347dab0 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -121,7 +121,8 @@
lbc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x1000>;
- ranges = <0 0 0xf 0xe8000000 0x08000000>;
+ ranges = <0 0 0xf 0xe8000000 0x08000000
+ 1 0 0xf 0xffa00000 0x00040000>;
flash@0,0 {
compatible = "cfi-flash";
@@ -129,6 +130,44 @@
bank-width = <2>;
device-width = <2>;
};
+
+ nand@1,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "fsl,elbc-fcm-nand";
+ reg = <0x1 0x0 0x40000>;
+
+ partition@0 {
+ label = "NAND U-Boot Image";
+ reg = <0x0 0x02000000>;
+ read-only;
+ };
+
+ partition@2000000 {
+ label = "NAND Root File System";
+ reg = <0x02000000 0x10000000>;
+ };
+
+ partition@12000000 {
+ label = "NAND Compressed RFS Image";
+ reg = <0x12000000 0x08000000>;
+ };
+
+ partition@1a000000 {
+ label = "NAND Linux Kernel Image";
+ reg = <0x1a000000 0x04000000>;
+ };
+
+ partition@1e000000 {
+ label = "NAND DTB Image";
+ reg = <0x1e000000 0x01000000>;
+ };
+
+ partition@1f000000 {
+ label = "NAND Writable User area";
+ reg = <0x1f000000 0x01000000>;
+ };
+ };
};
pci0: pcie@ffe200000 {
diff --git a/arch/powerpc/boot/dts/p3060qds.dts b/arch/powerpc/boot/dts/p3060qds.dts
deleted file mode 100644
index 9ae875c8a211..000000000000
--- a/arch/powerpc/boot/dts/p3060qds.dts
+++ /dev/null
@@ -1,242 +0,0 @@
-/*
- * P3060QDS Device Tree Source
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/include/ "fsl/p3060si-pre.dtsi"
-
-/ {
- model = "fsl,P3060QDS";
- compatible = "fsl,P3060QDS";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- memory {
- device_type = "memory";
- };
-
- dcsr: dcsr@f00000000 {
- ranges = <0x00000000 0xf 0x00000000 0x01008000>;
- };
-
- soc: soc@ffe000000 {
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
- spi@110000 {
- flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,s25sl12801";
- reg = <0>;
- spi-max-frequency = <40000000>; /* input clock */
- partition@u-boot {
- label = "u-boot";
- reg = <0x00000000 0x00100000>;
- read-only;
- };
- partition@kernel {
- label = "kernel";
- reg = <0x00100000 0x00500000>;
- read-only;
- };
- partition@dtb {
- label = "dtb";
- reg = <0x00600000 0x00100000>;
- read-only;
- };
- partition@fs {
- label = "file system";
- reg = <0x00700000 0x00900000>;
- };
- };
- flash@1 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,en25q32b";
- reg = <1>;
- spi-max-frequency = <40000000>; /* input clock */
- partition@spi1 {
- label = "spi1";
- reg = <0x00000000 0x00400000>;
- };
- };
- flash@2 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "atmel,at45db081d";
- reg = <2>;
- spi-max-frequency = <40000000>; /* input clock */
- partition@spi1 {
- label = "spi2";
- reg = <0x00000000 0x00100000>;
- };
- };
- flash@3 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "spansion,sst25wf040";
- reg = <3>;
- spi-max-frequency = <40000000>; /* input clock */
- partition@spi3 {
- label = "spi3";
- reg = <0x00000000 0x00080000>;
- };
- };
- };
-
- i2c@118000 {
- eeprom@51 {
- compatible = "at24,24c256";
- reg = <0x51>;
- };
- eeprom@53 {
- compatible = "at24,24c256";
- reg = <0x53>;
- };
- rtc@68 {
- compatible = "dallas,ds3232";
- reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
- };
- };
-
- usb0: usb@210000 {
- phy_type = "ulpi";
- };
-
- usb1: usb@211000 {
- dr_mode = "host";
- phy_type = "ulpi";
- };
- };
-
- rio: rapidio@ffe0c0000 {
- reg = <0xf 0xfe0c0000 0 0x11000>;
-
- port1 {
- ranges = <0 0 0xc 0x20000000 0 0x10000000>;
- };
- port2 {
- ranges = <0 0 0xc 0x30000000 0 0x10000000>;
- };
- };
-
- lbc: localbus@ffe124000 {
- reg = <0xf 0xfe124000 0 0x1000>;
- ranges = <0 0 0xf 0xe8000000 0x08000000
- 2 0 0xf 0xffa00000 0x00040000
- 3 0 0xf 0xffdf0000 0x00008000>;
-
- flash@0,0 {
- compatible = "cfi-flash";
- reg = <0 0 0x08000000>;
- bank-width = <2>;
- device-width = <2>;
- };
-
- nand@2,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,elbc-fcm-nand";
- reg = <0x2 0x0 0x40000>;
-
- partition@0 {
- label = "NAND U-Boot Image";
- reg = <0x0 0x02000000>;
- read-only;
- };
-
- partition@2000000 {
- label = "NAND Root File System";
- reg = <0x02000000 0x10000000>;
- };
-
- partition@12000000 {
- label = "NAND Compressed RFS Image";
- reg = <0x12000000 0x08000000>;
- };
-
- partition@1a000000 {
- label = "NAND Linux Kernel Image";
- reg = <0x1a000000 0x04000000>;
- };
-
- partition@1e000000 {
- label = "NAND DTB Image";
- reg = <0x1e000000 0x01000000>;
- };
-
- partition@1f000000 {
- label = "NAND Writable User area";
- reg = <0x1f000000 0x21000000>;
- };
- };
-
- board-control@3,0 {
- compatible = "fsl,p3060qds-fpga", "fsl,fpga-qixis";
- reg = <3 0 0x100>;
- };
- };
-
- pci0: pcie@ffe200000 {
- reg = <0xf 0xfe200000 0 0x1000>;
- ranges = <0x02000000 0 0xe0000000 0xc 0x00000000 0x0 0x20000000
- 0x01000000 0 0x00000000 0xf 0xf8000000 0x0 0x00010000>;
- pcie@0 {
- ranges = <0x02000000 0 0xe0000000
- 0x02000000 0 0xe0000000
- 0 0x20000000
-
- 0x01000000 0 0x00000000
- 0x01000000 0 0x00000000
- 0 0x00010000>;
- };
- };
-
- pci1: pcie@ffe201000 {
- reg = <0xf 0xfe201000 0 0x1000>;
- ranges = <0x02000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xf 0xf8010000 0x0 0x00010000>;
- pcie@0 {
- ranges = <0x02000000 0 0xe0000000
- 0x02000000 0 0xe0000000
- 0 0x20000000
-
- 0x01000000 0 0x00000000
- 0x01000000 0 0x00000000
- 0 0x00010000>;
- };
- };
-};
-
-/include/ "fsl/p3060si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/sbc8560.dts b/arch/powerpc/boot/dts/sbc8560.dts
deleted file mode 100644
index 72078eb15616..000000000000
--- a/arch/powerpc/boot/dts/sbc8560.dts
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * SBC8560 Device Tree Source
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * Paul Gortmaker (see MAINTAINERS for contact information)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-/dts-v1/;
-
-/ {
- model = "SBC8560";
- compatible = "SBC8560";
- #address-cells = <1>;
- #size-cells = <1>;
-
- aliases {
- ethernet0 = &enet0;
- ethernet1 = &enet1;
- ethernet2 = &enet2;
- ethernet3 = &enet3;
- serial0 = &serial0;
- serial1 = &serial1;
- pci0 = &pci0;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- PowerPC,8560@0 {
- device_type = "cpu";
- reg = <0>;
- d-cache-line-size = <0x20>; // 32 bytes
- i-cache-line-size = <0x20>; // 32 bytes
- d-cache-size = <0x8000>; // L1, 32K
- i-cache-size = <0x8000>; // L1, 32K
- timebase-frequency = <0>; // From uboot
- bus-frequency = <0>;
- clock-frequency = <0>;
- next-level-cache = <&L2>;
- };
- };
-
- memory {
- device_type = "memory";
- reg = <0x00000000 0x20000000>;
- };
-
- soc@ff700000 {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- ranges = <0x0 0xff700000 0x00100000>;
- clock-frequency = <0>;
-
- ecm-law@0 {
- compatible = "fsl,ecm-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <8>;
- };
-
- ecm@1000 {
- compatible = "fsl,mpc8560-ecm", "fsl,ecm";
- reg = <0x1000 0x1000>;
- interrupts = <17 2>;
- interrupt-parent = <&mpic>;
- };
-
- memory-controller@2000 {
- compatible = "fsl,mpc8560-memory-controller";
- reg = <0x2000 0x1000>;
- interrupt-parent = <&mpic>;
- interrupts = <0x12 0x2>;
- };
-
- L2: l2-cache-controller@20000 {
- compatible = "fsl,mpc8560-l2-cache-controller";
- reg = <0x20000 0x1000>;
- cache-line-size = <0x20>; // 32 bytes
- cache-size = <0x40000>; // L2, 256K
- interrupt-parent = <&mpic>;
- interrupts = <0x10 0x2>;
- };
-
- i2c@3000 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <0>;
- compatible = "fsl-i2c";
- reg = <0x3000 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- i2c@3100 {
- #address-cells = <1>;
- #size-cells = <0>;
- cell-index = <1>;
- compatible = "fsl-i2c";
- reg = <0x3100 0x100>;
- interrupts = <0x2b 0x2>;
- interrupt-parent = <&mpic>;
- dfsrr;
- };
-
- dma@21300 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8560-dma", "fsl,eloplus-dma";
- reg = <0x21300 0x4>;
- ranges = <0x0 0x21100 0x200>;
- cell-index = <0>;
- dma-channel@0 {
- compatible = "fsl,mpc8560-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x0 0x80>;
- cell-index = <0>;
- interrupt-parent = <&mpic>;
- interrupts = <20 2>;
- };
- dma-channel@80 {
- compatible = "fsl,mpc8560-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x80 0x80>;
- cell-index = <1>;
- interrupt-parent = <&mpic>;
- interrupts = <21 2>;
- };
- dma-channel@100 {
- compatible = "fsl,mpc8560-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x100 0x80>;
- cell-index = <2>;
- interrupt-parent = <&mpic>;
- interrupts = <22 2>;
- };
- dma-channel@180 {
- compatible = "fsl,mpc8560-dma-channel",
- "fsl,eloplus-dma-channel";
- reg = <0x180 0x80>;
- cell-index = <3>;
- interrupt-parent = <&mpic>;
- interrupts = <23 2>;
- };
- };
-
- enet0: ethernet@24000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <0>;
- device_type = "network";
- model = "TSEC";
- compatible = "gianfar";
- reg = <0x24000 0x1000>;
- ranges = <0x0 0x24000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x1d 0x2 0x1e 0x2 0x22 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi0>;
- phy-handle = <&phy0>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-mdio";
- reg = <0x520 0x20>;
- phy0: ethernet-phy@19 {
- interrupt-parent = <&mpic>;
- interrupts = <0x6 0x1>;
- reg = <0x19>;
- device_type = "ethernet-phy";
- };
- phy1: ethernet-phy@1a {
- interrupt-parent = <&mpic>;
- interrupts = <0x7 0x1>;
- reg = <0x1a>;
- device_type = "ethernet-phy";
- };
- phy2: ethernet-phy@1b {
- interrupt-parent = <&mpic>;
- interrupts = <0x8 0x1>;
- reg = <0x1b>;
- device_type = "ethernet-phy";
- };
- phy3: ethernet-phy@1c {
- interrupt-parent = <&mpic>;
- interrupts = <0x8 0x1>;
- reg = <0x1c>;
- device_type = "ethernet-phy";
- };
- tbi0: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- enet1: ethernet@25000 {
- #address-cells = <1>;
- #size-cells = <1>;
- cell-index = <1>;
- device_type = "network";
- model = "TSEC";
- compatible = "gianfar";
- reg = <0x25000 0x1000>;
- ranges = <0x0 0x25000 0x1000>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- interrupts = <0x23 0x2 0x24 0x2 0x28 0x2>;
- interrupt-parent = <&mpic>;
- tbi-handle = <&tbi1>;
- phy-handle = <&phy1>;
-
- mdio@520 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "fsl,gianfar-tbi";
- reg = <0x520 0x20>;
-
- tbi1: tbi-phy@11 {
- reg = <0x11>;
- device_type = "tbi-phy";
- };
- };
- };
-
- mpic: pic@40000 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- compatible = "chrp,open-pic";
- reg = <0x40000 0x40000>;
- device_type = "open-pic";
- };
-
- cpm@919c0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "fsl,mpc8560-cpm", "fsl,cpm2";
- reg = <0x919c0 0x30>;
- ranges;
-
- muram@80000 {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges = <0x0 0x80000 0x10000>;
-
- data@0 {
- compatible = "fsl,cpm-muram-data";
- reg = <0x0 0x4000 0x9000 0x2000>;
- };
- };
-
- brg@919f0 {
- compatible = "fsl,mpc8560-brg",
- "fsl,cpm2-brg",
- "fsl,cpm-brg";
- reg = <0x919f0 0x10 0x915f0 0x10>;
- clock-frequency = <165000000>;
- };
-
- cpmpic: pic@90c00 {
- interrupt-controller;
- #address-cells = <0>;
- #interrupt-cells = <2>;
- interrupts = <0x2e 0x2>;
- interrupt-parent = <&mpic>;
- reg = <0x90c00 0x80>;
- compatible = "fsl,mpc8560-cpm-pic", "fsl,cpm2-pic";
- };
-
- enet2: ethernet@91320 {
- device_type = "network";
- compatible = "fsl,mpc8560-fcc-enet",
- "fsl,cpm2-fcc-enet";
- reg = <0x91320 0x20 0x88500 0x100 0x913b0 0x1>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- fsl,cpm-command = <0x16200300>;
- interrupts = <0x21 0x8>;
- interrupt-parent = <&cpmpic>;
- phy-handle = <&phy2>;
- };
-
- enet3: ethernet@91340 {
- device_type = "network";
- compatible = "fsl,mpc8560-fcc-enet",
- "fsl,cpm2-fcc-enet";
- reg = <0x91340 0x20 0x88600 0x100 0x913d0 0x1>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- fsl,cpm-command = <0x1a400300>;
- interrupts = <0x22 0x8>;
- interrupt-parent = <&cpmpic>;
- phy-handle = <&phy3>;
- };
- };
-
- global-utilities@e0000 {
- compatible = "fsl,mpc8560-guts";
- reg = <0xe0000 0x1000>;
- };
- };
-
- pci0: pci@ff708000 {
- #interrupt-cells = <1>;
- #size-cells = <2>;
- #address-cells = <3>;
- compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci";
- device_type = "pci";
- reg = <0xff708000 0x1000>;
- clock-frequency = <66666666>;
- interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
- interrupt-map = <
-
- /* IDSEL 0x02 */
- 0x1000 0x0 0x0 0x1 &mpic 0x2 0x1
- 0x1000 0x0 0x0 0x2 &mpic 0x3 0x1
- 0x1000 0x0 0x0 0x3 &mpic 0x4 0x1
- 0x1000 0x0 0x0 0x4 &mpic 0x5 0x1>;
-
- interrupt-parent = <&mpic>;
- interrupts = <0x18 0x2>;
- bus-range = <0x0 0x0>;
- ranges = <0x02000000 0x0 0x80000000 0x80000000 0x0 0x20000000
- 0x01000000 0x0 0x00000000 0xe2000000 0x0 0x00100000>;
- };
-
- localbus@ff705000 {
- compatible = "fsl,mpc8560-localbus", "simple-bus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0xff705000 0x100>; // BRx, ORx, etc.
-
- ranges = <
- 0x0 0x0 0xff800000 0x0800000 // 8MB boot flash
- 0x1 0x0 0xe4000000 0x4000000 // 64MB flash
- 0x3 0x0 0x20000000 0x4000000 // 64MB SDRAM
- 0x4 0x0 0x24000000 0x4000000 // 64MB SDRAM
- 0x5 0x0 0xfc000000 0x0c00000 // EPLD
- 0x6 0x0 0xe0000000 0x4000000 // 64MB flash
- 0x7 0x0 0x80000000 0x0200000 // ATM1,2
- >;
-
- epld@5,0 {
- compatible = "wrs,epld-localbus";
- #address-cells = <2>;
- #size-cells = <1>;
- reg = <0x5 0x0 0xc00000>;
- ranges = <
- 0x0 0x0 0x5 0x000000 0x1fff // LED disp.
- 0x1 0x0 0x5 0x100000 0x1fff // switches
- 0x2 0x0 0x5 0x200000 0x1fff // ID reg.
- 0x3 0x0 0x5 0x300000 0x1fff // status reg.
- 0x4 0x0 0x5 0x400000 0x1fff // reset reg.
- 0x5 0x0 0x5 0x500000 0x1fff // Wind port
- 0x7 0x0 0x5 0x700000 0x1fff // UART #1
- 0x8 0x0 0x5 0x800000 0x1fff // UART #2
- 0x9 0x0 0x5 0x900000 0x1fff // RTC
- 0xb 0x0 0x5 0xb00000 0x1fff // EEPROM
- >;
-
- bidr@2,0 {
- compatible = "wrs,sbc8560-bidr";
- reg = <0x2 0x0 0x10>;
- };
-
- bcsr@3,0 {
- compatible = "wrs,sbc8560-bcsr";
- reg = <0x3 0x0 0x10>;
- };
-
- brstcr@4,0 {
- compatible = "wrs,sbc8560-brstcr";
- reg = <0x4 0x0 0x10>;
- };
-
- serial0: serial@7,0 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x7 0x0 0x100>;
- clock-frequency = <1843200>;
- interrupts = <0x9 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- serial1: serial@8,0 {
- device_type = "serial";
- compatible = "ns16550";
- reg = <0x8 0x0 0x100>;
- clock-frequency = <1843200>;
- interrupts = <0xa 0x2>;
- interrupt-parent = <&mpic>;
- };
-
- rtc@9,0 {
- compatible = "m48t59";
- reg = <0x9 0x0 0x1fff>;
- };
- };
- };
-};
diff --git a/arch/powerpc/boot/flatdevtree_env.h b/arch/powerpc/boot/flatdevtree_env.h
deleted file mode 100644
index 66e0ebb1a364..000000000000
--- a/arch/powerpc/boot/flatdevtree_env.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * This file adds the header file glue so that the shared files
- * flatdevicetree.[ch] can compile and work in the powerpc bootwrapper.
- *
- * strncmp & strchr copied from <file:lib/string.c>
- * Copyright (C) 1991, 1992 Linus Torvalds
- *
- * Maintained by: Mark A. Greer <mgreer@mvista.com>
- */
-#ifndef _PPC_BOOT_FLATDEVTREE_ENV_H_
-#define _PPC_BOOT_FLATDEVTREE_ENV_H_
-
-#include <stdarg.h>
-#include <stddef.h>
-#include "types.h"
-#include "string.h"
-#include "stdio.h"
-#include "ops.h"
-
-#define be16_to_cpu(x) (x)
-#define cpu_to_be16(x) (x)
-#define be32_to_cpu(x) (x)
-#define cpu_to_be32(x) (x)
-#define be64_to_cpu(x) (x)
-#define cpu_to_be64(x) (x)
-
-#endif /* _PPC_BOOT_FLATDEVTREE_ENV_H_ */
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig
index 07e1bbadebfe..a0dfef1fcdb7 100644
--- a/arch/powerpc/configs/83xx/kmeter1_defconfig
+++ b/arch/powerpc/configs/83xx/kmeter1_defconfig
@@ -2,14 +2,14 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_SPARSE_IRQ=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EXPERT=y
-# CONFIG_HOTPLUG is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PPC_CHRP is not set
@@ -31,11 +31,10 @@ CONFIG_IP_PNP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
+CONFIG_TIPC=y
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=y
CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
@@ -50,17 +49,15 @@ CONFIG_MTD_UBI_DEBUG=y
CONFIG_PROC_DEVICETREE=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_TUN=y
CONFIG_MII=y
-CONFIG_MARVELL_PHY=y
-CONFIG_NET_ETHERNET=y
+CONFIG_TUN=y
CONFIG_UCC_GETH=y
-# CONFIG_NETDEV_10000 is not set
-CONFIG_WAN=y
-CONFIG_HDLC=y
+CONFIG_MARVELL_PHY=y
CONFIG_PPP=y
CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=y
+CONFIG_WAN=y
+CONFIG_HDLC=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
@@ -77,10 +74,7 @@ CONFIG_UIO=y
# CONFIG_DNOTIFY is not set
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
+CONFIG_UBIFS_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
diff --git a/arch/powerpc/configs/85xx/sbc8560_defconfig b/arch/powerpc/configs/85xx/sbc8560_defconfig
deleted file mode 100644
index f7fdb0318e4c..000000000000
--- a/arch/powerpc/configs/85xx/sbc8560_defconfig
+++ /dev/null
@@ -1,65 +0,0 @@
-CONFIG_PPC_85xx=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_SBC8560=y
-CONFIG_BINFMT_MISC=y
-CONFIG_SPARSE_IRQ=y
-# CONFIG_SECCOMP is not set
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-# CONFIG_FW_LOADER is not set
-CONFIG_PROC_DEVICETREE=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=32768
-CONFIG_NETDEVICES=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_GIANFAR=y
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-# CONFIG_HW_RANDOM is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_M48T59=y
-CONFIG_INOTIFY=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_MSDOS_PARTITION is not set
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_DEBUG_MUTEXES=y
-# CONFIG_DEBUG_BUGVERBOSE is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_PPC_EARLY_DEBUG=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 91db656294e8..cbb98c1234fd 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_P2041_RDB=y
CONFIG_P3041_DS=y
-CONFIG_P3060_QDS=y
CONFIG_P4080_DS=y
CONFIG_P5020_DS=y
CONFIG_HIGHMEM=y
@@ -32,10 +31,12 @@ CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_BINFMT_MISC=m
CONFIG_KEXEC=y
+CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_FSL_LBC=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
# CONFIG_PCIEASPM is not set
CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
@@ -76,6 +77,11 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND_FSL_ELBC=y
CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
@@ -136,6 +142,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_OF=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_EDAC=y
CONFIG_EDAC_MM_EDAC=y
CONFIG_EDAC_MPC85XX=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 6798343580f0..dd89de8b0b7f 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -6,7 +6,9 @@ CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_SPARSE_IRQ=y
+CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -18,11 +20,14 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_MAC_PARTITION=y
CONFIG_P5020_DS=y
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
+CONFIG_IRQ_ALL_CPUS=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
CONFIG_RAPIDIO=y
CONFIG_FSL_RIO=y
CONFIG_NET=y
@@ -51,12 +56,25 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_MISC_DEVICES=y
CONFIG_EEPROM_LEGACY=y
+CONFIG_ATA=y
+CONFIG_SATA_FSL=y
+CONFIG_SATA_SIL24=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
CONFIG_INPUT_FF_MEMLESS=m
@@ -66,39 +84,59 @@ CONFIG_INPUT_FF_MEMLESS=m
CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_MANY_PORTS=y
CONFIG_SERIAL_8250_DETECT_IRQ=y
CONFIG_SERIAL_8250_RSA=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_SPI=y
+CONFIG_SPI_GPIO=y
+CONFIG_SPI_FSL_SPI=y
+CONFIG_SPI_FSL_ESPI=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_HID_SUPPORT is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_FSL=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
CONFIG_DMADEVICES=y
CONFIG_FSL_DMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_NTFS_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_MAC_PARTITION=y
-CONFIG_NLS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=m
CONFIG_CRC_T10DIF=y
-CONFIG_CRC_ITU_T=m
CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_SHIRQ=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_IRQ_DOMAIN_DEBUG=y
+CONFIG_CRYPTO_NULL=y
CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_AES=y
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig
index 0d36b0e1e268..8fa84f156ef3 100644
--- a/arch/powerpc/configs/mgcoge_defconfig
+++ b/arch/powerpc/configs/mgcoge_defconfig
@@ -2,7 +2,6 @@ CONFIG_EXPERIMENTAL=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_SPARSE_IRQ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -12,6 +11,7 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
+CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_PPC_PMAC is not set
CONFIG_PPC_82xx=y
@@ -49,12 +49,9 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_NETDEVICES=y
-CONFIG_FIXED_PHY=y
-CONFIG_NET_ETHERNET=y
CONFIG_FS_ENET=y
CONFIG_FS_ENET_MDIO_FCC=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_FIXED_PHY=y
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
@@ -64,6 +61,8 @@ CONFIG_SERIAL_CPM_CONSOLE=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_CPM=y
+CONFIG_SPI=y
+CONFIG_SPI_FSL_SPI=y
# CONFIG_HWMON is not set
CONFIG_USB_GADGET=y
CONFIG_USB_FSL_USB2=y
@@ -80,8 +79,6 @@ CONFIG_SQUASHFS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
CONFIG_ROOT_NFS=y
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
CONFIG_NLS_ISO8859_1=y
@@ -90,7 +87,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_DEBUG_INFO=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_BDI_SWITCH=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_PCBC=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 62bb723c5b54..03ee911c4577 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -74,6 +74,30 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_FTL=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index d1828427ae55..fdfa84dc908f 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -46,6 +46,7 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BINFMT_MISC=m
CONFIG_MATH_EMULATION=y
+CONFIG_IRQ_ALL_CPUS=y
CONFIG_FORCE_MAX_ZONEORDER=12
CONFIG_PCI=y
CONFIG_PCI_MSI=y
@@ -76,6 +77,30 @@ CONFIG_INET_ESP=y
CONFIG_IPV6=y
CONFIG_IP_SCTP=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_FTL=y
+CONFIG_MTD_GEN_PROBE=y
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_UTIL=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_OF_PARTS=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_FSL_ELBC=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND_IDS=y
+CONFIG_MTD_NAND_ECC=y
+CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_NBD=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c1442a3758ae..f2fe0c2b41e4 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -16,6 +16,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
@@ -489,3 +490,4 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=y
CONFIG_VHOST_NET=m
+CONFIG_BPF_JIT=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 6608232663cb..187fb8d53605 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -24,6 +24,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 5d7fbe1950f9..6e82f5f9a6fd 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -29,7 +29,7 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
-#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), (RS))
+#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
#else /* 32-bit */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 37c32aba79b7..a6f8c7a5cbb7 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -26,8 +26,8 @@ unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags);
unsigned int create_cond_branch(const unsigned int *addr,
unsigned long target, int flags);
-void patch_branch(unsigned int *addr, unsigned long target, int flags);
-void patch_instruction(unsigned int *addr, unsigned int instr);
+int patch_branch(unsigned int *addr, unsigned long target, int flags);
+int patch_instruction(unsigned int *addr, unsigned int instr);
int instr_is_relative_branch(unsigned int instr);
int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 63d5ca49cece..77e97dd0c15d 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -34,6 +34,9 @@ struct dev_archdata {
#ifdef CONFIG_EEH
struct eeh_dev *edev;
#endif
+#ifdef CONFIG_FAIL_IOMMU
+ int fail_iommu;
+#endif
};
struct pdev_archdata {
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 976835d8f22e..bf2c06c33871 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -153,6 +153,8 @@
#define EV_HCALL_CLOBBERS2 EV_HCALL_CLOBBERS3, "r5"
#define EV_HCALL_CLOBBERS1 EV_HCALL_CLOBBERS2, "r4"
+extern bool epapr_paravirt_enabled;
+extern u32 epapr_hypercall_start[];
/*
* We use "uintptr_t" to define a register because it's guaranteed to be a
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index d58fc4e4149c..a43c1473915f 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -293,7 +293,7 @@ label##_hv: \
#define RUNLATCH_ON \
BEGIN_FTR_SECTION \
- clrrdi r3,r1,THREAD_SHIFT; \
+ CURRENT_THREAD_INFO(r3, r1); \
ld r4,TI_LOCAL_FLAGS(r3); \
andi. r0,r4,_TLF_RUNLATCH; \
beql ppc64_runlatch_on_trampoline; \
@@ -332,7 +332,7 @@ label##_common: \
#ifdef CONFIG_PPC_970_NAP
#define FINISH_NAP \
BEGIN_FTR_SECTION \
- clrrdi r11,r1,THREAD_SHIFT; \
+ CURRENT_THREAD_INFO(r11, r1); \
ld r9,TI_LOCAL_FLAGS(r11); \
andi. r10,r9,_TLF_NAPPING; \
bnel power4_fixup_nap; \
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 32b394f3b854..e45c4947a772 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -34,6 +34,8 @@ extern void __replay_interrupt(unsigned int vector);
extern void timer_interrupt(struct pt_regs *);
extern void performance_monitor_exception(struct pt_regs *regs);
+extern void WatchdogException(struct pt_regs *regs);
+extern void unknown_exception(struct pt_regs *regs);
#ifdef CONFIG_PPC64
#include <asm/paca.h>
@@ -86,8 +88,8 @@ static inline bool arch_irqs_disabled(void)
}
#ifdef CONFIG_PPC_BOOK3E
-#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
-#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
+#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory")
+#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory")
#else
#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
@@ -103,6 +105,11 @@ static inline void hard_irq_disable(void)
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
#define hard_irq_disable hard_irq_disable
+static inline bool lazy_irq_pending(void)
+{
+ return !!(get_paca()->irq_happened & ~PACA_IRQ_HARD_DIS);
+}
+
/*
* This is called by asynchronous interrupts to conditionally
* re-enable hard interrupts when soft-disabled after having
@@ -120,6 +127,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !regs->softe;
}
+extern bool prep_irq_for_idle(void);
+
#else /* CONFIG_PPC64 */
#define SET_MSR_EE(x) mtmsr(x)
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h
index 0edb6842b13d..61e8490786b8 100644
--- a/arch/powerpc/include/asm/immap_qe.h
+++ b/arch/powerpc/include/asm/immap_qe.h
@@ -26,7 +26,9 @@
struct qe_iram {
__be32 iadd; /* I-RAM Address Register */
__be32 idata; /* I-RAM Data Register */
- u8 res0[0x78];
+ u8 res0[0x04];
+ __be32 iready; /* I-RAM Ready Register */
+ u8 res1[0x70];
} __attribute__ ((packed));
/* QE Interrupt Controller */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index a3855b81eada..f94ef4213e9d 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -20,6 +20,14 @@ extern int check_legacy_ioport(unsigned long base_port);
#define _PNPWRP 0xa79
#define PNPBIOS_BASE 0xf000
+#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
+extern struct pci_dev *isa_bridge_pcidev;
+/*
+ * has legacy ISA devices ?
+ */
+#define arch_has_dev_port() (isa_bridge_pcidev != NULL)
+#endif
+
#include <linux/device.h>
#include <linux/io.h>
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 957a83f43646..cbfe678e3dbe 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -53,6 +53,16 @@ static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
*/
#define IOMAP_MAX_ORDER 13
+#define IOMMU_POOL_HASHBITS 2
+#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
+
+struct iommu_pool {
+ unsigned long start;
+ unsigned long end;
+ unsigned long hint;
+ spinlock_t lock;
+} ____cacheline_aligned_in_smp;
+
struct iommu_table {
unsigned long it_busno; /* Bus number this table belongs to */
unsigned long it_size; /* Size of iommu table in entries */
@@ -61,10 +71,10 @@ struct iommu_table {
unsigned long it_index; /* which iommu table this is */
unsigned long it_type; /* type: PCI or Virtual Bus */
unsigned long it_blocksize; /* Entries in each block (cacheline) */
- unsigned long it_hint; /* Hint for next alloc */
- unsigned long it_largehint; /* Hint for large allocs */
- unsigned long it_halfpoint; /* Breaking point for small/large allocs */
- spinlock_t it_lock; /* Protects it_map */
+ unsigned long poolsize;
+ unsigned long nr_pools;
+ struct iommu_pool large_pool;
+ struct iommu_pool pools[IOMMU_NR_POOLS];
unsigned long *it_map; /* A simple allocation bitmap for now */
};
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index b0c08b142770..0dd1d86d3e31 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -36,11 +36,8 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#define SPAPR_TCE_SHIFT 12
#ifdef CONFIG_KVM_BOOK3S_64_HV
-/* For now use fixed-size 16MB page table */
-#define HPT_ORDER 24
-#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
-#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
-#define HPT_HASH_MASK (HPT_NPTEG - 1)
+#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
+extern int kvm_hpt_order; /* order of preallocated HPTs */
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 88609b23b775..bfcd00c1485d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -74,6 +74,7 @@ struct kvmppc_host_state {
ulong vmhandler;
ulong scratch0;
ulong scratch1;
+ ulong sprg3;
u8 in_guest;
u8 restore_hid5;
u8 napping;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d848cdc49715..50ea12fd7bf5 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -237,6 +237,10 @@ struct kvm_arch {
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
+ u32 hpt_order;
+ atomic_t vcpus_running;
+ unsigned long hpt_npte;
+ unsigned long hpt_mask;
spinlock_t slot_phys_lock;
unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
int slot_npages[KVM_MEM_SLOTS_NUM];
@@ -414,7 +418,9 @@ struct kvm_vcpu_arch {
ulong mcsrr1;
ulong mcsr;
u32 dec;
+#ifdef CONFIG_BOOKE
u32 decar;
+#endif
u32 tbl;
u32 tbu;
u32 tcr;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index f68c22fa2fce..0124937a23b9 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -119,7 +119,8 @@ extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
-extern long kvmppc_alloc_hpt(struct kvm *kvm);
+extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
+extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
extern void kvmppc_free_hpt(struct kvm *kvm);
extern long kvmppc_prepare_vrma(struct kvm *kvm,
struct kvm_userspace_memory_region *mem);
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f0145522cfba..e8a26db2e8f3 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -163,12 +163,7 @@ extern u64 ppc64_rma_size;
* to think about, feedback welcome. --BenH.
*/
-/* There are #define as they have to be used in assembly
- *
- * WARNING: If you change this list, make sure to update the array of
- * names currently in arch/powerpc/mm/hugetlbpage.c or bad things will
- * happen
- */
+/* These are #defines as they have to be used in assembly */
#define MMU_PAGE_4K 0
#define MMU_PAGE_16K 1
#define MMU_PAGE_64K 2
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ac39e6a3b25a..8cccbee61519 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -30,6 +30,7 @@ struct pci_controller {
int first_busno;
int last_busno;
int self_busno;
+ struct resource busn;
void __iomem *io_base_virt;
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 5c16b891d501..0bb23725b1e7 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -26,8 +26,13 @@
#include <asm/ptrace.h>
#include <asm/reg.h>
+/*
+ * Overload regs->result to specify whether we should use the MSR (result
+ * is zero) or the SIAR (result is non zero).
+ */
#define perf_arch_fetch_caller_regs(regs, __ip) \
do { \
+ (regs)->result = 0; \
(regs)->nip = __ip; \
(regs)->gpr[1] = *(unsigned long *)__get_SP(); \
asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index d81f99430fe7..4c25319f2fbc 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -15,6 +15,72 @@
#include <linux/stringify.h>
#include <asm/asm-compat.h>
+#define __REG_R0 0
+#define __REG_R1 1
+#define __REG_R2 2
+#define __REG_R3 3
+#define __REG_R4 4
+#define __REG_R5 5
+#define __REG_R6 6
+#define __REG_R7 7
+#define __REG_R8 8
+#define __REG_R9 9
+#define __REG_R10 10
+#define __REG_R11 11
+#define __REG_R12 12
+#define __REG_R13 13
+#define __REG_R14 14
+#define __REG_R15 15
+#define __REG_R16 16
+#define __REG_R17 17
+#define __REG_R18 18
+#define __REG_R19 19
+#define __REG_R20 20
+#define __REG_R21 21
+#define __REG_R22 22
+#define __REG_R23 23
+#define __REG_R24 24
+#define __REG_R25 25
+#define __REG_R26 26
+#define __REG_R27 27
+#define __REG_R28 28
+#define __REG_R29 29
+#define __REG_R30 30
+#define __REG_R31 31
+
+#define __REGA0_0 0
+#define __REGA0_R1 1
+#define __REGA0_R2 2
+#define __REGA0_R3 3
+#define __REGA0_R4 4
+#define __REGA0_R5 5
+#define __REGA0_R6 6
+#define __REGA0_R7 7
+#define __REGA0_R8 8
+#define __REGA0_R9 9
+#define __REGA0_R10 10
+#define __REGA0_R11 11
+#define __REGA0_R12 12
+#define __REGA0_R13 13
+#define __REGA0_R14 14
+#define __REGA0_R15 15
+#define __REGA0_R16 16
+#define __REGA0_R17 17
+#define __REGA0_R18 18
+#define __REGA0_R19 19
+#define __REGA0_R20 20
+#define __REGA0_R21 21
+#define __REGA0_R22 22
+#define __REGA0_R23 23
+#define __REGA0_R24 24
+#define __REGA0_R25 25
+#define __REGA0_R26 26
+#define __REGA0_R27 27
+#define __REGA0_R28 28
+#define __REGA0_R29 29
+#define __REGA0_R30 30
+#define __REGA0_R31 31
+
/* sorted alphabetically */
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
@@ -107,12 +173,19 @@
#define PPC_INST_NEG 0x7c0000d0
#define PPC_INST_BRANCH 0x48000000
#define PPC_INST_BRANCH_COND 0x40800000
+#define PPC_INST_LBZCIX 0x7c0006aa
+#define PPC_INST_STBCIX 0x7c0007aa
/* macros to insert fields into opcodes */
-#define __PPC_RA(a) (((a) & 0x1f) << 16)
-#define __PPC_RB(b) (((b) & 0x1f) << 11)
-#define __PPC_RS(s) (((s) & 0x1f) << 21)
-#define __PPC_RT(s) __PPC_RS(s)
+#define ___PPC_RA(a) (((a) & 0x1f) << 16)
+#define ___PPC_RB(b) (((b) & 0x1f) << 11)
+#define ___PPC_RS(s) (((s) & 0x1f) << 21)
+#define ___PPC_RT(t) ___PPC_RS(t)
+#define __PPC_RA(a) ___PPC_RA(__REG_##a)
+#define __PPC_RA0(a) ___PPC_RA(__REGA0_##a)
+#define __PPC_RB(b) ___PPC_RB(__REG_##b)
+#define __PPC_RS(s) ___PPC_RS(__REG_##s)
+#define __PPC_RT(t) ___PPC_RT(__REG_##t)
#define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3))
#define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4))
#define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5))
@@ -141,13 +214,13 @@
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_INST_DCBZL | \
__PPC_RA(a) | __PPC_RB(b))
#define PPC_LDARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LDARX | \
- __PPC_RT(t) | __PPC_RA(a) | \
- __PPC_RB(b) | __PPC_EH(eh))
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_LWARX(t, a, b, eh) stringify_in_c(.long PPC_INST_LWARX | \
- __PPC_RT(t) | __PPC_RA(a) | \
- __PPC_RB(b) | __PPC_EH(eh))
+ ___PPC_RT(t) | ___PPC_RA(a) | \
+ ___PPC_RB(b) | __PPC_EH(eh))
#define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \
- __PPC_RB(b))
+ ___PPC_RB(b))
#define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \
__PPC_RA(a) | __PPC_RS(s))
#define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \
@@ -158,34 +231,39 @@
#define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI)
#define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI)
#define PPC_TLBILX(t, a, b) stringify_in_c(.long PPC_INST_TLBILX | \
- __PPC_T_TLB(t) | __PPC_RA(a) | __PPC_RB(b))
+ __PPC_T_TLB(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBILX_ALL(a, b) PPC_TLBILX(0, a, b)
#define PPC_TLBILX_PID(a, b) PPC_TLBILX(1, a, b)
#define PPC_TLBILX_VA(a, b) PPC_TLBILX(3, a, b)
#define PPC_WAIT(w) stringify_in_c(.long PPC_INST_WAIT | \
__PPC_WC(w))
#define PPC_TLBIE(lp,a) stringify_in_c(.long PPC_INST_TLBIE | \
- __PPC_RB(a) | __PPC_RS(lp))
+ ___PPC_RB(a) | ___PPC_RS(lp))
#define PPC_TLBSRX_DOT(a,b) stringify_in_c(.long PPC_INST_TLBSRX_DOT | \
- __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RA0(a) | __PPC_RB(b))
#define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \
- __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RA0(a) | __PPC_RB(b))
#define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \
__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
#define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \
__PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
#define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \
- __PPC_T_TLB(t) | __PPC_RA(a) | \
+ __PPC_T_TLB(t) | __PPC_RA0(a) | \
__PPC_RB(b))
#define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \
- __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RS(s) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \
- __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
- __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
+ __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
__PPC_RT(t) | __PPC_RB(b))
+/* PASemi instructions */
+#define LBZCIX(t,a,b) stringify_in_c(.long PPC_INST_LBZCIX | \
+ __PPC_RT(t) | __PPC_RA(a) | __PPC_RB(b))
+#define STBCIX(s,a,b) stringify_in_c(.long PPC_INST_STBCIX | \
+ __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b))
/*
* Define what the VSX XX1 form instructions will look like, then add
@@ -194,11 +272,11 @@
#define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b))
#define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b))
#define STXVD2X(s, a, b) stringify_in_c(.long PPC_INST_STXVD2X | \
- VSX_XX1((s), (a), (b)))
+ VSX_XX1((s), a, b))
#define LXVD2X(s, a, b) stringify_in_c(.long PPC_INST_LXVD2X | \
- VSX_XX1((s), (a), (b)))
+ VSX_XX1((s), a, b))
#define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \
- VSX_XX3((t), (a), (b)))
+ VSX_XX3((t), a, b))
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 15444204a3a1..ea2a86e8ff95 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -126,26 +126,26 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
/* Save the lower 32 VSRs in the thread VSR region */
-#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,base,b)
+#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b)
#define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
#define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
#define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
#define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
#define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
-#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,base,b)
+#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b)
#define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base)
#define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
-#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,base,b)
+#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,R##base,R##b)
#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
-#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,base,b)
+#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b)
#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
@@ -178,9 +178,24 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define HMT_HIGH or 3,3,3
#define HMT_EXTRA_HIGH or 7,7,7 # power7 only
+#ifdef CONFIG_PPC64
+#define ULONG_SIZE 8
+#else
+#define ULONG_SIZE 4
+#endif
+#define __VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
+#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
+
#ifdef __KERNEL__
#ifdef CONFIG_PPC64
+#define STACKFRAMESIZE 256
+#define __STK_REG(i) (112 + ((i)-14)*8)
+#define STK_REG(i) __STK_REG(__REG_##i)
+
+#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#define STK_PARAM(i) __STK_PARAM(__REG_##i)
+
#define XGLUE(a,b) a##b
#define GLUE(a,b) XGLUE(a,b)
@@ -295,14 +310,14 @@ n:
*/
#ifdef __powerpc64__
#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis (reg),(expr)@highest; \
- ori (reg),(reg),(expr)@higher; \
- rldicr (reg),(reg),32,31; \
- oris (reg),(reg),(expr)@h; \
- ori (reg),(reg),(expr)@l;
+ lis reg,(expr)@highest; \
+ ori reg,reg,(expr)@higher; \
+ rldicr reg,reg,32,31; \
+ oris reg,reg,(expr)@h; \
+ ori reg,reg,(expr)@l;
#define LOAD_REG_ADDR(reg,name) \
- ld (reg),name@got(r2)
+ ld reg,name@got(r2)
#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
#define ADDROFF(name) 0
@@ -313,12 +328,12 @@ n:
#else /* 32-bit */
#define LOAD_REG_IMMEDIATE(reg,expr) \
- lis (reg),(expr)@ha; \
- addi (reg),(reg),(expr)@l;
+ lis reg,(expr)@ha; \
+ addi reg,reg,(expr)@l;
#define LOAD_REG_ADDR(reg,name) LOAD_REG_IMMEDIATE(reg, name)
-#define LOAD_REG_ADDRBASE(reg, name) lis (reg),name@ha
+#define LOAD_REG_ADDRBASE(reg, name) lis reg,name@ha
#define ADDROFF(name) name@l
/* offsets for stack frame layout */
@@ -372,9 +387,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#ifdef CONFIG_PPC64
#define MTOCRF(FXM, RS) \
BEGIN_FTR_SECTION_NESTED(848); \
- mtcrf (FXM), (RS); \
+ mtcrf (FXM), RS; \
FTR_SECTION_ELSE_NESTED(848); \
- mtocrf (FXM), (RS); \
+ mtocrf (FXM), RS; \
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
#endif
@@ -463,6 +478,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#ifdef CONFIG_PPC_BOOK3S_64
#define RFI rfid
#define MTMSRD(r) mtmsrd r
+#define MTMSR_EERI(reg) mtmsrd reg,1
#else
#define FIX_SRR1(ra, rb)
#ifndef CONFIG_40x
@@ -471,6 +487,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define RFI rfi; b . /* Prevent prefetch past rfi */
#endif
#define MTMSRD(r) mtmsr r
+#define MTMSR_EERI(reg) mtmsr reg
#define CLR_TOP32(r)
#endif
@@ -490,40 +507,46 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define cr7 7
-/* General Purpose Registers (GPRs) */
-
-#define r0 0
-#define r1 1
-#define r2 2
-#define r3 3
-#define r4 4
-#define r5 5
-#define r6 6
-#define r7 7
-#define r8 8
-#define r9 9
-#define r10 10
-#define r11 11
-#define r12 12
-#define r13 13
-#define r14 14
-#define r15 15
-#define r16 16
-#define r17 17
-#define r18 18
-#define r19 19
-#define r20 20
-#define r21 21
-#define r22 22
-#define r23 23
-#define r24 24
-#define r25 25
-#define r26 26
-#define r27 27
-#define r28 28
-#define r29 29
-#define r30 30
-#define r31 31
+/*
+ * General Purpose Registers (GPRs)
+ *
+ * The lower case r0-r31 should be used in preference to the upper
+ * case R0-R31 as they provide more error checking in the assembler.
+ * Use R0-31 only when really nessesary.
+ */
+
+#define r0 %r0
+#define r1 %r1
+#define r2 %r2
+#define r3 %r3
+#define r4 %r4
+#define r5 %r5
+#define r6 %r6
+#define r7 %r7
+#define r8 %r8
+#define r9 %r9
+#define r10 %r10
+#define r11 %r11
+#define r12 %r12
+#define r13 %r13
+#define r14 %r14
+#define r15 %r15
+#define r16 %r16
+#define r17 %r17
+#define r18 %r18
+#define r19 %r19
+#define r20 %r20
+#define r21 %r21
+#define r22 %r22
+#define r23 %r23
+#define r24 %r24
+#define r25 %r25
+#define r26 %r26
+#define r27 %r27
+#define r28 %r28
+#define r29 %r29
+#define r30 %r30
+#define r31 %r31
/* Floating Point Registers (FPRs) */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 413a5eaef56c..53b6dfa83344 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -389,10 +389,8 @@ extern int powersave_nap; /* set if nap mode can be used in idle loop */
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
-extern int pseries_notify_cpuidle_add_cpu(int cpu);
#else
static inline void update_smt_snooze_delay(int snooze) {}
-static inline int pseries_notify_cpuidle_add_cpu(int cpu) { return 0; }
#endif
extern void flush_instruction_cache(void);
diff --git a/arch/powerpc/include/asm/qe.h b/arch/powerpc/include/asm/qe.h
index 5e0b6d511e14..229571a49391 100644
--- a/arch/powerpc/include/asm/qe.h
+++ b/arch/powerpc/include/asm/qe.h
@@ -499,6 +499,7 @@ enum comm_dir {
/* I-RAM */
#define QE_IRAM_IADD_AIE 0x80000000 /* Auto Increment Enable */
#define QE_IRAM_IADD_BADDR 0x00080000 /* Base Address */
+#define QE_IRAM_READY 0x80000000 /* Ready */
/* UPC */
#define UPGCR_PROTOCOL 0x80000000 /* protocol ul2 or pl2 */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index f0cb7f461b9d..638608677e2a 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -491,6 +491,7 @@
#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
#define SPRN_SPRG2 0x112 /* Special Purpose Register General 2 */
#define SPRN_SPRG3 0x113 /* Special Purpose Register General 3 */
+#define SPRN_USPRG3 0x103 /* SPRG3 userspace read */
#define SPRN_SPRG4 0x114 /* Special Purpose Register General 4 */
#define SPRN_SPRG5 0x115 /* Special Purpose Register General 5 */
#define SPRN_SPRG6 0x116 /* Special Purpose Register General 6 */
@@ -753,14 +754,14 @@
* 64-bit server:
* - SPRG0 unused (reserved for HV on Power4)
* - SPRG2 scratch for exception vectors
- * - SPRG3 unused (user visible)
+ * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - HSPRG0 stores PACA in HV mode
* - HSPRG1 scratch for "HV" exceptions
*
* 64-bit embedded
* - SPRG0 generic exception scratch
* - SPRG2 TLB exception stack
- * - SPRG3 unused (user visible)
+ * - SPRG3 CPU and NUMA node for VDSO getcpu (user visible)
* - SPRG4 unused (user visible)
* - SPRG6 TLB miss scratch (user visible, sorry !)
* - SPRG7 critical exception scratch
@@ -1024,7 +1025,8 @@
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+ asm volatile("mfmsr %0" : "=r" (rval) : \
+ : "memory"); rval;})
#ifdef CONFIG_PPC_BOOK3S_64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v) : "memory")
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 68831e9cf82f..faf93529cbf0 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -22,6 +22,12 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
+#ifdef CONFIG_PPC64
+#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#else
+#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#endif
+
#ifndef __ASSEMBLY__
#include <linux/cache.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index cbe2297d68b6..5712f06905a9 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -8,7 +8,7 @@
struct pt_regs;
-TRACE_EVENT(irq_entry,
+DECLARE_EVENT_CLASS(ppc64_interrupt_class,
TP_PROTO(struct pt_regs *regs),
@@ -25,55 +25,32 @@ TRACE_EVENT(irq_entry,
TP_printk("pt_regs=%p", __entry->regs)
);
-TRACE_EVENT(irq_exit,
+DEFINE_EVENT(ppc64_interrupt_class, irq_entry,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
-
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
-
- TP_fast_assign(
- __entry->regs = regs;
- ),
-
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
-TRACE_EVENT(timer_interrupt_entry,
+DEFINE_EVENT(ppc64_interrupt_class, irq_exit,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
-
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
-
- TP_fast_assign(
- __entry->regs = regs;
- ),
-
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
-TRACE_EVENT(timer_interrupt_exit,
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_entry,
TP_PROTO(struct pt_regs *regs),
- TP_ARGS(regs),
+ TP_ARGS(regs)
+);
- TP_STRUCT__entry(
- __field(struct pt_regs *, regs)
- ),
+DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
- TP_fast_assign(
- __entry->regs = regs;
- ),
+ TP_PROTO(struct pt_regs *regs),
- TP_printk("pt_regs=%p", __entry->regs)
+ TP_ARGS(regs)
);
#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index dc0419b66f17..50f261bc3e95 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -22,6 +22,8 @@ extern unsigned long vdso64_rt_sigtramp;
extern unsigned long vdso32_sigtramp;
extern unsigned long vdso32_rt_sigtramp;
+int __cpuinit vdso_getcpu_init(void);
+
#else /* __ASSEMBLY__ */
#ifdef __VDSO64__
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index b19adf751dd9..df81cb72d1e0 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -44,6 +44,8 @@
*/
#define VIO_CMO_MIN_ENT 1562624
+extern struct bus_type vio_bus_type;
+
struct iommu_table;
/*
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 83afacd3ba7b..bb282dd81612 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -128,6 +128,7 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
obj-y += ppc_save_regs.o
endif
+obj-$(CONFIG_EPAPR_PARAVIRT) += epapr_paravirt.o epapr_hcalls.o
obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o
# Disable GCOV in odd or sensitive code
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 52c7ad78242e..85b05c463fae 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -533,6 +533,7 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_SPRG3, sprg3);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
index ebc62f42a237..61f079e05b61 100644
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ b/arch/powerpc/kernel/cpu_setup_a2.S
@@ -100,19 +100,19 @@ _icswx_skip_guest:
lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_IERAT_SIZE-1
- PPC_ERATWE(r4,r4,3)
+ PPC_ERATWE(R4,R4,3)
/* Now set the D-ERAT watermark to 31 */
lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
mtspr SPRN_MMUCR0, r4
li r4,A2_DERAT_SIZE-1
- PPC_ERATWE(r4,r4,3)
+ PPC_ERATWE(R4,R4,3)
/* And invalidate the beast just in case. That won't get rid of
* a bolted entry though it will be in LRU and so will go away eventually
* but let's not bother for now
*/
- PPC_ERATILX(0,0,0)
+ PPC_ERATILX(0,0,R0)
1:
blr
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index b1ec983dcec8..289be751cd75 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -11,6 +11,8 @@
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/export.h>
+#include <linux/pci.h>
+#include <asm/vio.h>
#include <asm/bug.h>
#include <asm/abs_addr.h>
#include <asm/machdep.h>
@@ -205,7 +207,13 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init dma_init(void)
{
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+#ifdef CONFIG_PCI
+ dma_debug_add_bus(&pci_bus_type);
+#endif
+#ifdef CONFIG_IBMVIO
+ dma_debug_add_bus(&vio_bus_type);
+#endif
return 0;
}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index ba3aeb4bc06a..5207d5a405e2 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -92,7 +92,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,SAVED_KSP_LIMIT(r11)
- rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
@@ -112,7 +112,7 @@ crit_transfer_to_handler:
mfspr r8,SPRN_SPRG_THREAD
lwz r0,KSP_LIMIT(r8)
stw r0,saved_ksp_limit@l(0)
- rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r0, r1)
stw r0,KSP_LIMIT(r8)
/* fall through */
#endif
@@ -158,7 +158,7 @@ transfer_to_handler:
tophys(r11,r11)
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
@@ -179,7 +179,7 @@ transfer_to_handler:
ble- stack_ovf /* then the kernel stack overflowed */
5:
#if defined(CONFIG_6xx) || defined(CONFIG_E500)
- rlwinm r9,r1,0,0,31-THREAD_SHIFT
+ CURRENT_THREAD_INFO(r9, r1)
tophys(r9,r9) /* check local flags */
lwz r12,TI_LOCAL_FLAGS(r9)
mtcrf 0x01,r12
@@ -226,13 +226,7 @@ reenable_mmu: /* re-enable mmu so we can */
stw r3,16(r1)
stw r4,20(r1)
stw r5,24(r1)
- andi. r12,r12,MSR_PR
- b 11f
bl trace_hardirqs_off
- b 12f
-11:
- bl trace_hardirqs_off
-12:
lwz r5,24(r1)
lwz r4,20(r1)
lwz r3,16(r1)
@@ -333,7 +327,7 @@ _GLOBAL(DoSyscall)
mtmsr r11
1:
#endif /* CONFIG_TRACE_IRQFLAGS */
- rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ CURRENT_THREAD_INFO(r10, r1)
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -354,7 +348,7 @@ ret_from_syscall:
bl do_show_syscall_exit
#endif
mr r6,r3
- rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ CURRENT_THREAD_INFO(r12, r1)
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
/* Note: We don't bother telling lockdep about it */
@@ -815,7 +809,7 @@ ret_from_except:
user_exc_return: /* r10 contains MSR_KERNEL here */
/* Check current_thread_info()->flags */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_USER_WORK_MASK
bne do_work
@@ -835,7 +829,7 @@ restore_user:
/* N.B. the only way to get here is from the beq following ret_from_except. */
resume_kernel:
/* check current_thread_info->preempt_count */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r0,TI_PREEMPT(r9)
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
@@ -852,7 +846,7 @@ resume_kernel:
bl trace_hardirqs_off
#endif
1: bl preempt_schedule_irq
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
@@ -1122,7 +1116,7 @@ ret_from_debug_exc:
lwz r10,SAVED_KSP_LIMIT(r1)
stw r10,KSP_LIMIT(r9)
lwz r9,THREAD_INFO-THREAD(r9)
- rlwinm r10,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r10, r1)
lwz r10,TI_PREEMPT(r10)
stw r10,TI_PREEMPT(r9)
RESTORE_xSRR(SRR0,SRR1);
@@ -1156,7 +1150,7 @@ load_dbcr0:
lis r11,global_dbcr0@ha
addi r11,r11,global_dbcr0@l
#ifdef CONFIG_SMP
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_CPU(r9)
slwi r9,r9,3
add r11,r11,r9
@@ -1197,7 +1191,7 @@ recheck:
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
- rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
andi. r0,r9,_TIF_NEED_RESCHED
bne- do_resched
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ed1718feb9d9..4b01a25e29ef 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -146,7 +146,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
REST_2GPRS(7,r1)
addi r9,r1,STACK_FRAME_OVERHEAD
#endif
- clrrdi r11,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
@@ -181,7 +181,7 @@ syscall_exit:
bl .do_show_syscall_exit
ld r3,RESULT(r1)
#endif
- clrrdi r12,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r12, r1)
ld r8,_MSR(r1)
#ifdef CONFIG_PPC_BOOK3S
@@ -197,7 +197,16 @@ syscall_exit:
wrteei 0
#else
ld r10,PACAKMSR(r13)
- mtmsrd r10,1
+ /*
+ * For performance reasons we clear RI the same time that we
+ * clear EE. We only need to clear RI just before we restore r13
+ * below, but batching it with EE saves us one expensive mtmsrd call.
+ * We have to be careful to restore RI if we branch anywhere from
+ * here (eg syscall_exit_work).
+ */
+ li r9,MSR_RI
+ andc r11,r10,r9
+ mtmsrd r11,1
#endif /* CONFIG_PPC_BOOK3E */
ld r9,TI_FLAGS(r12)
@@ -214,17 +223,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
andi. r6,r8,MSR_PR
ld r4,_LINK(r1)
- /*
- * Clear RI before restoring r13. If we are returning to
- * userspace and we take an exception after restoring r13,
- * we end up corrupting the userspace r13 value.
- */
-#ifdef CONFIG_PPC_BOOK3S
- /* No MSR:RI on BookE */
- li r12,MSR_RI
- andc r11,r10,r12
- mtmsrd r11,1 /* clear MSR.RI */
-#endif /* CONFIG_PPC_BOOK3S */
beq- 1f
ACCOUNT_CPU_USER_EXIT(r11, r12)
@@ -262,7 +260,7 @@ syscall_dotrace:
ld r7,GPR7(r1)
ld r8,GPR8(r1)
addi r9,r1,STACK_FRAME_OVERHEAD
- clrrdi r10,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r10, r1)
ld r10,TI_FLAGS(r10)
b .Lsyscall_dotrace_cont
@@ -271,6 +269,9 @@ syscall_enosys:
b syscall_exit
syscall_exit_work:
+#ifdef CONFIG_PPC_BOOK3S
+ mtmsrd r10,1 /* Restore RI */
+#endif
/* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
If TIF_NOERROR is set, just save r3 as it is. */
@@ -499,7 +500,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
2:
#endif /* !CONFIG_PPC_BOOK3S */
- clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
+ CURRENT_THREAD_INFO(r7, r8) /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
because we don't need to leave the 288-byte ABI gap at the
top of the kernel stack. */
@@ -558,27 +559,54 @@ _GLOBAL(ret_from_except_lite)
mtmsrd r10,1 /* Update machine state */
#endif /* CONFIG_PPC_BOOK3E */
-#ifdef CONFIG_PREEMPT
- clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
- li r0,_TIF_NEED_RESCHED /* bits to check */
+ CURRENT_THREAD_INFO(r9, r1)
ld r3,_MSR(r1)
ld r4,TI_FLAGS(r9)
- /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
- rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
- and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
- bne do_work
-
-#else /* !CONFIG_PREEMPT */
- ld r3,_MSR(r1) /* Returning to user mode? */
andi. r3,r3,MSR_PR
- beq restore /* if not, just restore regs and return */
+ beq resume_kernel
/* Check current_thread_info()->flags */
- clrrdi r9,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r9)
andi. r0,r4,_TIF_USER_WORK_MASK
- bne do_work
-#endif /* !CONFIG_PREEMPT */
+ beq restore
+
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq 1f
+ bl .restore_interrupts
+ bl .schedule
+ b .ret_from_except_lite
+
+1: bl .save_nvgprs
+ bl .restore_interrupts
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_notify_resume
+ b .ret_from_except
+
+resume_kernel:
+#ifdef CONFIG_PREEMPT
+ /* Check if we need to preempt */
+ andi. r0,r4,_TIF_NEED_RESCHED
+ beq+ restore
+ /* Check that preempt_count() == 0 and interrupts are enabled */
+ lwz r8,TI_PREEMPT(r9)
+ cmpwi cr1,r8,0
+ ld r0,SOFTE(r1)
+ cmpdi r0,0
+ crandc eq,cr1*4+eq,eq
+ bne restore
+
+ /*
+ * Here we are preempting the current task. We want to make
+ * sure we are soft-disabled first
+ */
+ SOFT_DISABLE_INTS(r3,r4)
+1: bl .preempt_schedule_irq
+
+ /* Re-test flags and eventually loop */
+ CURRENT_THREAD_INFO(r9, r1)
+ ld r4,TI_FLAGS(r9)
+ andi. r0,r4,_TIF_NEED_RESCHED
+ bne 1b
+#endif /* CONFIG_PREEMPT */
.globl fast_exc_return_irq
fast_exc_return_irq:
@@ -759,50 +787,6 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */
1: b .ret_from_except /* What else to do here ? */
-
-
-3:
-do_work:
-#ifdef CONFIG_PREEMPT
- andi. r0,r3,MSR_PR /* Returning to user mode? */
- bne user_work
- /* Check that preempt_count() == 0 and interrupts are enabled */
- lwz r8,TI_PREEMPT(r9)
- cmpwi cr1,r8,0
- ld r0,SOFTE(r1)
- cmpdi r0,0
- crandc eq,cr1*4+eq,eq
- bne restore
-
- /*
- * Here we are preempting the current task. We want to make
- * sure we are soft-disabled first
- */
- SOFT_DISABLE_INTS(r3,r4)
-1: bl .preempt_schedule_irq
-
- /* Re-test flags and eventually loop */
- clrrdi r9,r1,THREAD_SHIFT
- ld r4,TI_FLAGS(r9)
- andi. r0,r4,_TIF_NEED_RESCHED
- bne 1b
- b restore
-
-user_work:
-#endif /* CONFIG_PREEMPT */
-
- andi. r0,r4,_TIF_NEED_RESCHED
- beq 1f
- bl .restore_interrupts
- bl .schedule
- b .ret_from_except_lite
-
-1: bl .save_nvgprs
- bl .restore_interrupts
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_notify_resume
- b .ret_from_except
-
unrecov_restore:
addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S
new file mode 100644
index 000000000000..697b390ebfd8
--- /dev/null
+++ b/arch/powerpc/kernel/epapr_hcalls.S
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/threads.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+/* Hypercall entry point. Will be patched with device tree instructions. */
+.global epapr_hypercall_start
+epapr_hypercall_start:
+ li r3, -1
+ nop
+ nop
+ nop
+ blr
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
new file mode 100644
index 000000000000..028aeae370b6
--- /dev/null
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -0,0 +1,52 @@
+/*
+ * ePAPR para-virtualization support.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/of.h>
+#include <asm/epapr_hcalls.h>
+#include <asm/cacheflush.h>
+#include <asm/code-patching.h>
+
+bool epapr_paravirt_enabled;
+
+static int __init epapr_paravirt_init(void)
+{
+ struct device_node *hyper_node;
+ const u32 *insts;
+ int len, i;
+
+ hyper_node = of_find_node_by_path("/hypervisor");
+ if (!hyper_node)
+ return -ENODEV;
+
+ insts = of_get_property(hyper_node, "hcall-instructions", &len);
+ if (!insts)
+ return -ENODEV;
+
+ if (len % 4 || len > (4 * 4))
+ return -ENODEV;
+
+ for (i = 0; i < (len / 4); i++)
+ patch_instruction(epapr_hypercall_start + i, insts[i]);
+
+ epapr_paravirt_enabled = true;
+
+ return 0;
+}
+
+early_initcall(epapr_paravirt_init);
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 7215cc2495df..98be7f0cd227 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -222,7 +222,7 @@ exc_##n##_bad_stack: \
* interrupts happen before the wait instruction.
*/
#define CHECK_NAPPING() \
- clrrdi r11,r1,THREAD_SHIFT; \
+ CURRENT_THREAD_INFO(r11, r1); \
ld r10,TI_LOCAL_FLAGS(r11); \
andi. r9,r10,_TLF_NAPPING; \
beq+ 1f; \
@@ -903,7 +903,7 @@ skpinv: addi r6,r6,1 /* Increment */
bne 1b /* If not, repeat */
/* Invalidate all TLBs */
- PPC_TLBILX_ALL(0,0)
+ PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -961,7 +961,7 @@ skpinv: addi r6,r6,1 /* Increment */
tlbwe
/* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,0)
+ PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1020,7 +1020,7 @@ skpinv: addi r6,r6,1 /* Increment */
tlbwe
/* Invalidate TLB1 */
- PPC_TLBILX_ALL(0,0)
+ PPC_TLBILX_ALL(0,R0)
sync
isync
@@ -1138,7 +1138,7 @@ a2_tlbinit_after_iprot_flush:
tlbwe
#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
- PPC_TLBILX(0,0,0)
+ PPC_TLBILX(0,0,R0)
sync
isync
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 1c06d2971545..e894515e77bb 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -239,6 +239,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
* out of line to handle them
*/
. = 0xe00
+hv_exception_trampoline:
b h_data_storage_hv
. = 0xe20
b h_instr_storage_hv
@@ -851,7 +852,7 @@ BEGIN_FTR_SECTION
bne- do_ste_alloc /* If so handle it */
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
- clrrdi r11,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r11, r1)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index de369558bf0a..e0ada05f2df3 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -26,7 +26,7 @@
#include <asm/ptrace.h>
#ifdef CONFIG_VSX
-#define REST_32FPVSRS(n,c,base) \
+#define __REST_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
@@ -35,7 +35,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: REST_32VSRS(n,c,base); \
3:
-#define SAVE_32FPVSRS(n,c,base) \
+#define __SAVE_32FPVSRS(n,c,base) \
BEGIN_FTR_SECTION \
b 2f; \
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
@@ -44,9 +44,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
2: SAVE_32VSRS(n,c,base); \
3:
#else
-#define REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
-#define SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
+#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
+#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
#endif
+#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
+#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
/*
* This task wants to use the FPU now.
@@ -79,7 +81,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
- SAVE_32FPVSRS(0, r5, r4)
+ SAVE_32FPVSRS(0, R5, R4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
PPC_LL r5,PT_REGS(r4)
@@ -106,7 +108,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
lfd fr0,THREAD_FPSCR(r5)
MTFSF_L(fr0)
- REST_32FPVSRS(0, r4, r5)
+ REST_32FPVSRS(0, R4, R5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
@@ -140,7 +142,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
addi r3,r3,THREAD /* want THREAD of task */
PPC_LL r5,PT_REGS(r3)
PPC_LCMPI 0,r5,0
- SAVE_32FPVSRS(0, r4 ,r3)
+ SAVE_32FPVSRS(0, R4 ,R3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index bf99cfa6bbfe..91b46b7f6f0d 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -63,11 +63,9 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
return -EINVAL;
/* replace the text with the new text */
- if (probe_kernel_write((void *)ip, &new, MCOUNT_INSN_SIZE))
+ if (patch_instruction((unsigned int *)ip, new))
return -EPERM;
- flush_icache_range(ip, ip + 8);
-
return 0;
}
@@ -212,12 +210,9 @@ __ftrace_make_nop(struct module *mod,
*/
op = 0x48000008; /* b +8 */
- if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ if (patch_instruction((unsigned int *)ip, op))
return -EPERM;
-
- flush_icache_range(ip, ip + 8);
-
return 0;
}
@@ -245,9 +240,9 @@ __ftrace_make_nop(struct module *mod,
/*
* On PPC32 the trampoline looks like:
- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
+ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
+ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
+ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
* 0x4e, 0x80, 0x04, 0x20 bctr
*/
@@ -262,9 +257,9 @@ __ftrace_make_nop(struct module *mod,
pr_devel(" %08x %08x ", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
- (jmp[2] != 0x7d6903a6) ||
+ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
+ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+ (jmp[2] != 0x7d8903a6) ||
(jmp[3] != 0x4e800420)) {
printk(KERN_ERR "Not a trampoline\n");
return -EINVAL;
@@ -286,11 +281,9 @@ __ftrace_make_nop(struct module *mod,
op = PPC_INST_NOP;
- if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ if (patch_instruction((unsigned int *)ip, op))
return -EPERM;
- flush_icache_range(ip, ip + 8);
-
return 0;
}
#endif /* PPC64 */
@@ -426,11 +419,9 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
pr_devel("write to %lx\n", rec->ip);
- if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
+ if (patch_instruction((unsigned int *)ip, op))
return -EPERM;
- flush_icache_range(ip, ip + 8);
-
return 0;
}
#endif /* CONFIG_PPC64 */
@@ -484,6 +475,58 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return ret;
}
+static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+{
+ unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
+ int ret;
+
+ ret = ftrace_update_record(rec, enable);
+
+ switch (ret) {
+ case FTRACE_UPDATE_IGNORE:
+ return 0;
+ case FTRACE_UPDATE_MAKE_CALL:
+ return ftrace_make_call(rec, ftrace_addr);
+ case FTRACE_UPDATE_MAKE_NOP:
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+
+ return 0;
+}
+
+void ftrace_replace_code(int enable)
+{
+ struct ftrace_rec_iter *iter;
+ struct dyn_ftrace *rec;
+ int ret;
+
+ for (iter = ftrace_rec_iter_start(); iter;
+ iter = ftrace_rec_iter_next(iter)) {
+ rec = ftrace_rec_iter_record(iter);
+ ret = __ftrace_replace_code(rec, enable);
+ if (ret) {
+ ftrace_bug(ret, rec->ip);
+ return;
+ }
+ }
+}
+
+void arch_ftrace_update_code(int command)
+{
+ if (command & FTRACE_UPDATE_CALLS)
+ ftrace_replace_code(1);
+ else if (command & FTRACE_DISABLE_CALLS)
+ ftrace_replace_code(0);
+
+ if (command & FTRACE_UPDATE_TRACE_FUNC)
+ ftrace_update_ftrace_func(ftrace_trace_function);
+
+ if (command & FTRACE_START_FUNC_RET)
+ ftrace_enable_ftrace_graph_caller();
+ else if (command & FTRACE_STOP_FUNC_RET)
+ ftrace_disable_ftrace_graph_caller();
+}
+
int __init ftrace_dyn_arch_init(void *data)
{
/* caller expects data to be zero */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 1f4434a38608..0f59863c3ade 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -192,7 +192,7 @@ _ENTRY(__early_start)
li r0,0
stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
- rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+ CURRENT_THREAD_INFO(r22, r1)
stw r24, TI_CPU(r22)
bl early_init
@@ -556,8 +556,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
/* SPE Unavailable */
START_EXCEPTION(SPEUnavailable)
NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
- bne load_up_spe
- addi r3,r1,STACK_FRAME_OVERHEAD
+ beq 1f
+ bl load_up_spe
+ b fast_exception_return
+1: addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0x2010, KernelSPE)
#else
EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
@@ -778,7 +780,7 @@ tlb_write_entry:
/* Note that the SPE support is closely modeled after the AltiVec
* support. Changes to one are likely to be applicable to the
* other! */
-load_up_spe:
+_GLOBAL(load_up_spe)
/*
* Disable SPE for the task which had SPE previously,
* and save its SPE registers in its thread_struct.
@@ -826,20 +828,7 @@ load_up_spe:
subi r4,r5,THREAD
stw r4,last_task_used_spe@l(r3)
#endif /* !CONFIG_SMP */
- /* restore registers and return */
-2: REST_4GPRS(3, r11)
- lwz r10,_CCR(r11)
- REST_GPR(1, r11)
- mtcr r10
- lwz r10,_LINK(r11)
- mtlr r10
- REST_GPR(10, r11)
- mtspr SPRN_SRR1,r9
- mtspr SPRN_SRR0,r12
- REST_GPR(9, r11)
- REST_GPR(12, r11)
- lwz r11,GPR11(r11)
- rfi
+ blr
/*
* SPE unavailable trap from kernel - print a message, but let
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 2bc0584be81c..f3a82dde61db 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -111,7 +111,7 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers.
*/
- if (bp->ctx->task)
+ if (bp->ctx && bp->ctx->task)
bp->ctx->task->thread.last_hit_ubp = NULL;
}
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 15c611de1ee2..1686916cc7f0 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -135,7 +135,7 @@ BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- rlwinm r9,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+ CURRENT_THREAD_INFO(r9, r1)
lwz r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
stw r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
@@ -158,7 +158,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP
- rlwinm r12,r11,0,0,31-THREAD_SHIFT
+ CURRENT_THREAD_INFO(r12, r11)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2
#else
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index ff007b59448d..4c7cb4008585 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -60,7 +60,7 @@ _GLOBAL(book3e_idle)
1: /* Let's set the _TLF_NAPPING flag so interrupts make us return
* to the right spot
*/
- clrrdi r11,r1,THREAD_SHIFT
+ CURRENT_THREAD_INFO(r11, r1)
ld r10,TI_LOCAL_FLAGS(r11)
ori r10,r10,_TLF_NAPPING
std r10,TI_LOCAL_FLAGS(r11)
diff --git a/arch/powerpc/kernel/idle_e500.S b/arch/powerpc/kernel/idle_e500.S
index 4f0ab85f3788..15448668988d 100644
--- a/arch/powerpc/kernel/idle_e500.S
+++ b/arch/powerpc/kernel/idle_e500.S
@@ -21,7 +21,7 @@
.text
_GLOBAL(e500_idle)
- rlwinm r3,r1,0,0,31-THREAD_SHIFT /* current thread_info */
+ CURRENT_THREAD_INFO(r3, r1)
lwz r4,TI_LOCAL_FLAGS(r3) /* set napping bit */
ori r4,r4,_TLF_NAPPING /* so when we take an exception */
stw r4,TI_LOCAL_FLAGS(r3) /* it will return to our caller */
@@ -96,7 +96,7 @@ _GLOBAL(power_save_ppc32_restore)
stw r9,_NIP(r11) /* make it do a blr */
#ifdef CONFIG_SMP
- rlwinm r12,r1,0,0,31-THREAD_SHIFT
+ CURRENT_THREAD_INFO(r12, r1)
lwz r11,TI_CPU(r12) /* get cpu number * 4 */
slwi r11,r11,2
#else
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index 2c71b0fc9f91..e3edaa189911 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -59,7 +59,7 @@ BEGIN_FTR_SECTION
DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
- clrrdi r9,r1,THREAD_SHIFT /* current thread_info */
+ CURRENT_THREAD_INFO(r9, r1)
ld r8,TI_LOCAL_FLAGS(r9) /* set napping bit */
ori r8,r8,_TLF_NAPPING /* so when we take an exception */
std r8,TI_LOCAL_FLAGS(r9) /* it will return to our caller */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 359f078571c7..ff5a6ce027b8 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -33,6 +33,9 @@
#include <linux/bitmap.h>
#include <linux/iommu-helper.h>
#include <linux/crash_dump.h>
+#include <linux/hash.h>
+#include <linux/fault-inject.h>
+#include <linux/pci.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
@@ -40,6 +43,7 @@
#include <asm/machdep.h>
#include <asm/kdump.h>
#include <asm/fadump.h>
+#include <asm/vio.h>
#define DBG(...)
@@ -58,6 +62,114 @@ static int __init setup_iommu(char *str)
__setup("iommu=", setup_iommu);
+static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
+
+/*
+ * We precalculate the hash to avoid doing it on every allocation.
+ *
+ * The hash is important to spread CPUs across all the pools. For example,
+ * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
+ * with 4 pools all primary threads would map to the same pool.
+ */
+static int __init setup_iommu_pool_hash(void)
+{
+ unsigned int i;
+
+ for_each_possible_cpu(i)
+ per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
+
+ return 0;
+}
+subsys_initcall(setup_iommu_pool_hash);
+
+#ifdef CONFIG_FAIL_IOMMU
+
+static DECLARE_FAULT_ATTR(fail_iommu);
+
+static int __init setup_fail_iommu(char *str)
+{
+ return setup_fault_attr(&fail_iommu, str);
+}
+__setup("fail_iommu=", setup_fail_iommu);
+
+static bool should_fail_iommu(struct device *dev)
+{
+ return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
+}
+
+static int __init fail_iommu_debugfs(void)
+{
+ struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
+ NULL, &fail_iommu);
+
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
+}
+late_initcall(fail_iommu_debugfs);
+
+static ssize_t fail_iommu_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
+}
+
+static ssize_t fail_iommu_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ int i;
+
+ if (count > 0 && sscanf(buf, "%d", &i) > 0)
+ dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
+
+ return count;
+}
+
+static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
+ fail_iommu_store);
+
+static int fail_iommu_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ if (device_create_file(dev, &dev_attr_fail_iommu))
+ pr_warn("Unable to create IOMMU fault injection sysfs "
+ "entries\n");
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ device_remove_file(dev, &dev_attr_fail_iommu);
+ }
+
+ return 0;
+}
+
+static struct notifier_block fail_iommu_bus_notifier = {
+ .notifier_call = fail_iommu_bus_notify
+};
+
+static int __init fail_iommu_setup(void)
+{
+#ifdef CONFIG_PCI
+ bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
+#endif
+#ifdef CONFIG_IBMVIO
+ bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
+#endif
+
+ return 0;
+}
+/*
+ * Must execute after PCI and VIO subsystem have initialised but before
+ * devices are probed.
+ */
+arch_initcall(fail_iommu_setup);
+#else
+static inline bool should_fail_iommu(struct device *dev)
+{
+ return false;
+}
+#endif
+
static unsigned long iommu_range_alloc(struct device *dev,
struct iommu_table *tbl,
unsigned long npages,
@@ -71,6 +183,9 @@ static unsigned long iommu_range_alloc(struct device *dev,
int pass = 0;
unsigned long align_mask;
unsigned long boundary_size;
+ unsigned long flags;
+ unsigned int pool_nr;
+ struct iommu_pool *pool;
align_mask = 0xffffffffffffffffl >> (64 - align_order);
@@ -83,36 +198,49 @@ static unsigned long iommu_range_alloc(struct device *dev,
return DMA_ERROR_CODE;
}
- if (handle && *handle)
- start = *handle;
+ if (should_fail_iommu(dev))
+ return DMA_ERROR_CODE;
+
+ /*
+ * We don't need to disable preemption here because any CPU can
+ * safely use any IOMMU pool.
+ */
+ pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
+
+ if (largealloc)
+ pool = &(tbl->large_pool);
else
- start = largealloc ? tbl->it_largehint : tbl->it_hint;
+ pool = &(tbl->pools[pool_nr]);
- /* Use only half of the table for small allocs (15 pages or less) */
- limit = largealloc ? tbl->it_size : tbl->it_halfpoint;
+ spin_lock_irqsave(&(pool->lock), flags);
- if (largealloc && start < tbl->it_halfpoint)
- start = tbl->it_halfpoint;
+again:
+ if ((pass == 0) && handle && *handle)
+ start = *handle;
+ else
+ start = pool->hint;
+
+ limit = pool->end;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the initial start.
*/
if (start >= limit)
- start = largealloc ? tbl->it_largehint : tbl->it_hint;
-
- again:
+ start = pool->start;
if (limit + tbl->it_offset > mask) {
limit = mask - tbl->it_offset + 1;
/* If we're constrained on address range, first try
* at the masked hint to avoid O(n) search complexity,
- * but on second pass, start at 0.
+ * but on second pass, start at 0 in pool 0.
*/
- if ((start & mask) >= limit || pass > 0)
- start = 0;
- else
+ if ((start & mask) >= limit || pass > 0) {
+ pool = &(tbl->pools[0]);
+ start = pool->start;
+ } else {
start &= mask;
+ }
}
if (dev)
@@ -126,16 +254,25 @@ static unsigned long iommu_range_alloc(struct device *dev,
tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT,
align_mask);
if (n == -1) {
- if (likely(pass < 2)) {
- /* First failure, just rescan the half of the table.
- * Second failure, rescan the other half of the table.
- */
- start = (largealloc ^ pass) ? tbl->it_halfpoint : 0;
- limit = pass ? tbl->it_size : limit;
+ if (likely(pass == 0)) {
+ /* First try the pool from the start */
+ pool->hint = pool->start;
pass++;
goto again;
+
+ } else if (pass <= tbl->nr_pools) {
+ /* Now try scanning all the other pools */
+ spin_unlock(&(pool->lock));
+ pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
+ pool = &tbl->pools[pool_nr];
+ spin_lock(&(pool->lock));
+ pool->hint = pool->start;
+ pass++;
+ goto again;
+
} else {
- /* Third failure, give up */
+ /* Give up */
+ spin_unlock_irqrestore(&(pool->lock), flags);
return DMA_ERROR_CODE;
}
}
@@ -145,10 +282,10 @@ static unsigned long iommu_range_alloc(struct device *dev,
/* Bump the hint to a new block for small allocs. */
if (largealloc) {
/* Don't bump to new block to avoid fragmentation */
- tbl->it_largehint = end;
+ pool->hint = end;
} else {
/* Overflow will be taken care of at the next allocation */
- tbl->it_hint = (end + tbl->it_blocksize - 1) &
+ pool->hint = (end + tbl->it_blocksize - 1) &
~(tbl->it_blocksize - 1);
}
@@ -156,6 +293,8 @@ static unsigned long iommu_range_alloc(struct device *dev,
if (handle)
*handle = end;
+ spin_unlock_irqrestore(&(pool->lock), flags);
+
return n;
}
@@ -165,18 +304,14 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
unsigned long mask, unsigned int align_order,
struct dma_attrs *attrs)
{
- unsigned long entry, flags;
+ unsigned long entry;
dma_addr_t ret = DMA_ERROR_CODE;
int build_fail;
- spin_lock_irqsave(&(tbl->it_lock), flags);
-
entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
- if (unlikely(entry == DMA_ERROR_CODE)) {
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
+ if (unlikely(entry == DMA_ERROR_CODE))
return DMA_ERROR_CODE;
- }
entry += tbl->it_offset; /* Offset into real TCE table */
ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */
@@ -193,8 +328,6 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
*/
if (unlikely(build_fail)) {
__iommu_free(tbl, ret, npages);
-
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
return DMA_ERROR_CODE;
}
@@ -202,16 +335,14 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
-
/* Make sure updates are seen by hardware */
mb();
return ret;
}
-static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
- unsigned int npages)
+static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
{
unsigned long entry, free_entry;
@@ -231,20 +362,57 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index);
WARN_ON(1);
}
- return;
+
+ return false;
+ }
+
+ return true;
+}
+
+static struct iommu_pool *get_pool(struct iommu_table *tbl,
+ unsigned long entry)
+{
+ struct iommu_pool *p;
+ unsigned long largepool_start = tbl->large_pool.start;
+
+ /* The large pool is the last pool at the top of the table */
+ if (entry >= largepool_start) {
+ p = &tbl->large_pool;
+ } else {
+ unsigned int pool_nr = entry / tbl->poolsize;
+
+ BUG_ON(pool_nr > tbl->nr_pools);
+ p = &tbl->pools[pool_nr];
}
+ return p;
+}
+
+static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
+ unsigned int npages)
+{
+ unsigned long entry, free_entry;
+ unsigned long flags;
+ struct iommu_pool *pool;
+
+ entry = dma_addr >> IOMMU_PAGE_SHIFT;
+ free_entry = entry - tbl->it_offset;
+
+ pool = get_pool(tbl, free_entry);
+
+ if (!iommu_free_check(tbl, dma_addr, npages))
+ return;
+
ppc_md.tce_free(tbl, entry, npages);
+
+ spin_lock_irqsave(&(pool->lock), flags);
bitmap_clear(tbl->it_map, free_entry, npages);
+ spin_unlock_irqrestore(&(pool->lock), flags);
}
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
unsigned int npages)
{
- unsigned long flags;
-
- spin_lock_irqsave(&(tbl->it_lock), flags);
-
__iommu_free(tbl, dma_addr, npages);
/* Make sure TLB cache is flushed if the HW needs it. We do
@@ -253,8 +421,6 @@ static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
*/
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
-
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
@@ -263,7 +429,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct dma_attrs *attrs)
{
dma_addr_t dma_next = 0, dma_addr;
- unsigned long flags;
struct scatterlist *s, *outs, *segstart;
int outcount, incount, i, build_fail = 0;
unsigned int align;
@@ -285,8 +450,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
DBG("sg mapping %d elements:\n", nelems);
- spin_lock_irqsave(&(tbl->it_lock), flags);
-
max_seg_size = dma_get_max_seg_size(dev);
for_each_sg(sglist, s, nelems, i) {
unsigned long vaddr, npages, entry, slen;
@@ -369,8 +532,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
-
DBG("mapped %d elements:\n", outcount);
/* For the sake of iommu_unmap_sg, we clear out the length in the
@@ -402,7 +563,6 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
if (s == outs)
break;
}
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
return 0;
}
@@ -412,15 +572,12 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
struct dma_attrs *attrs)
{
struct scatterlist *sg;
- unsigned long flags;
BUG_ON(direction == DMA_NONE);
if (!tbl)
return;
- spin_lock_irqsave(&(tbl->it_lock), flags);
-
sg = sglist;
while (nelems--) {
unsigned int npages;
@@ -440,8 +597,6 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
*/
if (ppc_md.tce_flush)
ppc_md.tce_flush(tbl);
-
- spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
static void iommu_table_clear(struct iommu_table *tbl)
@@ -494,9 +649,8 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
unsigned long sz;
static int welcomed = 0;
struct page *page;
-
- /* Set aside 1/4 of the table for large allocations. */
- tbl->it_halfpoint = tbl->it_size * 3 / 4;
+ unsigned int i;
+ struct iommu_pool *p;
/* number of bytes needed for the bitmap */
sz = (tbl->it_size + 7) >> 3;
@@ -515,9 +669,28 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
- tbl->it_hint = 0;
- tbl->it_largehint = tbl->it_halfpoint;
- spin_lock_init(&tbl->it_lock);
+ /* We only split the IOMMU table if we have 1GB or more of space */
+ if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024))
+ tbl->nr_pools = IOMMU_NR_POOLS;
+ else
+ tbl->nr_pools = 1;
+
+ /* We reserve the top 1/4 of the table for large allocations */
+ tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
+
+ for (i = 0; i < tbl->nr_pools; i++) {
+ p = &tbl->pools[i];
+ spin_lock_init(&(p->lock));
+ p->start = tbl->poolsize * i;
+ p->hint = p->start;
+ p->end = p->start + tbl->poolsize;
+ }
+
+ p = &tbl->large_pool;
+ spin_lock_init(&(p->lock));
+ p->start = tbl->poolsize * i;
+ p->hint = p->start;
+ p->end = tbl->it_size;
iommu_table_clear(tbl);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 7835a5e1ea5f..1f017bb7a7ce 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,7 +229,7 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
-#ifdef CONFIG_TRACE_IRQFLAG
+#ifdef CONFIG_TRACE_IRQFLAGS
else {
/*
* We should already be hard disabled here. We had bugs
@@ -277,7 +277,7 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* NOTE: This is called with interrupts hard disabled but not marked
* as such in paca->irq_happened, so we need to resync this.
*/
-void restore_interrupts(void)
+void notrace restore_interrupts(void)
{
if (irqs_disabled()) {
local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
@@ -286,6 +286,52 @@ void restore_interrupts(void)
__hard_irq_enable();
}
+/*
+ * This is a helper to use when about to go into idle low-power
+ * when the latter has the side effect of re-enabling interrupts
+ * (such as calling H_CEDE under pHyp).
+ *
+ * You call this function with interrupts soft-disabled (this is
+ * already the case when ppc_md.power_save is called). The function
+ * will return whether to enter power save or just return.
+ *
+ * In the former case, it will have notified lockdep of interrupts
+ * being re-enabled and generally sanitized the lazy irq state,
+ * and in the latter case it will leave with interrupts hard
+ * disabled and marked as such, so the local_irq_enable() call
+ * in cpu_idle() will properly re-enable everything.
+ */
+bool prep_irq_for_idle(void)
+{
+ /*
+ * First we need to hard disable to ensure no interrupt
+ * occurs before we effectively enter the low power state
+ */
+ hard_irq_disable();
+
+ /*
+ * If anything happened while we were soft-disabled,
+ * we return now and do not enter the low power state.
+ */
+ if (lazy_irq_pending())
+ return false;
+
+ /* Tell lockdep we are about to re-enable */
+ trace_hardirqs_on();
+
+ /*
+ * Mark interrupts as soft-enabled and clear the
+ * PACA_IRQ_HARD_DIS from the pending mask since we
+ * are about to hard enable as well as a side effect
+ * of entering the low power state.
+ */
+ local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ local_paca->soft_enabled = 1;
+
+ /* Tell the caller to enter the low power state */
+ return true;
+}
+
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 62bdf2389669..867db1de8949 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -31,6 +31,7 @@
#include <asm/cacheflush.h>
#include <asm/disassemble.h>
#include <asm/ppc-opcode.h>
+#include <asm/epapr_hcalls.h>
#define KVM_MAGIC_PAGE (-4096L)
#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
@@ -302,7 +303,7 @@ static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
if (imm_one) {
p[kvm_emulate_wrtee_reg_offs] =
- KVM_INST_LI | __PPC_RT(30) | MSR_EE;
+ KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
} else {
/* Make clobbered registers work too */
switch (get_rt(rt)) {
@@ -726,7 +727,7 @@ unsigned long kvm_hypercall(unsigned long *in,
unsigned long register r11 asm("r11") = nr;
unsigned long register r12 asm("r12");
- asm volatile("bl kvm_hypercall_start"
+ asm volatile("bl epapr_hypercall_start"
: "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
"=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
"=r"(r12)
@@ -747,29 +748,6 @@ unsigned long kvm_hypercall(unsigned long *in,
}
EXPORT_SYMBOL_GPL(kvm_hypercall);
-static int kvm_para_setup(void)
-{
- extern u32 kvm_hypercall_start;
- struct device_node *hyper_node;
- u32 *insts;
- int len, i;
-
- hyper_node = of_find_node_by_path("/hypervisor");
- if (!hyper_node)
- return -1;
-
- insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
- if (len % 4)
- return -1;
- if (len > (4 * 4))
- return -1;
-
- for (i = 0; i < (len / 4); i++)
- kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
-
- return 0;
-}
-
static __init void kvm_free_tmp(void)
{
unsigned long start, end;
@@ -791,7 +769,7 @@ static int __init kvm_guest_init(void)
if (!kvm_para_available())
goto free_tmp;
- if (kvm_para_setup())
+ if (!epapr_paravirt_enabled)
goto free_tmp;
if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index e291cf3cf954..e100ff324a85 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -24,16 +24,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-/* Hypercall entry point. Will be patched with device tree instructions. */
-
-.global kvm_hypercall_start
-kvm_hypercall_start:
- li r3, -1
- nop
- nop
- nop
- blr
-
#define KVM_MAGIC_PAGE (-4096)
#ifdef CONFIG_64BIT
@@ -132,7 +122,7 @@ kvm_emulate_mtmsrd_len:
.long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4
-#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI)
+#define MSR_SAFE_BITS (MSR_EE | MSR_RI)
#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS
.global kvm_emulate_mtmsr
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 386d57f66f28..407e293aad2f 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -179,7 +179,7 @@ _GLOBAL(low_choose_750fx_pll)
mtspr SPRN_HID1,r4
/* Store new HID1 image */
- rlwinm r6,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r6, r1)
lwz r6,TI_CPU(r6)
slwi r6,r6,2
addis r6,r6,nap_save_hid1@ha
@@ -699,7 +699,7 @@ _GLOBAL(kernel_thread)
#ifdef CONFIG_SMP
_GLOBAL(start_secondary_resume)
/* Reset stack */
- rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
+ CURRENT_THREAD_INFO(r1, r1)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
li r3,0
stw r3,0(r1) /* Zero the stack frame pointer */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 616921ef1439..565b78625a32 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -301,11 +301,6 @@ _GLOBAL(real_writeb)
#ifdef CONFIG_PPC_PASEMI
-/* No support in all binutils for these yet, so use defines */
-#define LBZCIX(RT,RA,RB) .long (0x7c0006aa|(RT<<21)|(RA<<16)|(RB << 11))
-#define STBCIX(RS,RA,RB) .long (0x7c0007aa|(RS<<21)|(RA<<16)|(RB << 11))
-
-
_GLOBAL(real_205_readb)
mfmsr r7
ori r0,r7,MSR_DR
@@ -314,7 +309,7 @@ _GLOBAL(real_205_readb)
mtmsrd r0
sync
isync
- LBZCIX(r3,0,r3)
+ LBZCIX(R3,R0,R3)
isync
mtmsrd r7
sync
@@ -329,7 +324,7 @@ _GLOBAL(real_205_writeb)
mtmsrd r0
sync
isync
- STBCIX(r3,0,r4)
+ STBCIX(R3,R0,R4)
isync
mtmsrd r7
sync
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 8e78e93c8185..2aa04f29e1de 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -200,11 +200,6 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
return device_create_file(&pdev->dev, &dev_attr_devspec);
}
-char __devinit *pcibios_setup(char *str)
-{
- return str;
-}
-
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
@@ -248,8 +243,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
- oirq.controller ? oirq.controller->full_name :
- "<default>");
+ of_node_full_name(oirq.controller));
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
@@ -1628,8 +1622,7 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
struct device_node *node = hose->dn;
int mode;
- pr_debug("PCI: Scanning PHB %s\n",
- node ? node->full_name : "<NO NAME>");
+ pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
/* Get some IO space for the new PHB */
pcibios_setup_phb_io_space(hose);
@@ -1637,6 +1630,11 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
/* Wire up PHB bus resources */
pcibios_setup_phb_resources(hose, &resources);
+ hose->busn.start = hose->first_busno;
+ hose->busn.end = hose->last_busno;
+ hose->busn.flags = IORESOURCE_BUS;
+ pci_add_resource(&resources, &hose->busn);
+
/* Create an empty bus for the toplevel */
bus = pci_create_root_bus(hose->parent, hose->first_busno,
hose->ops, hose, &resources);
@@ -1646,7 +1644,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
pci_free_resource_list(&resources);
return;
}
- bus->secondary = hose->first_busno;
hose->bus = bus;
/* Get probe mode and perform scan */
@@ -1654,13 +1651,14 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
if (node && ppc_md.pci_probe_mode)
mode = ppc_md.pci_probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
- if (mode == PCI_PROBE_DEVTREE) {
- bus->subordinate = hose->last_busno;
+ if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
- }
- if (mode == PCI_PROBE_NORMAL)
- hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+ if (mode == PCI_PROBE_NORMAL) {
+ pci_bus_update_busn_res_end(bus, 255);
+ hose->last_busno = pci_scan_child_bus(bus);
+ pci_bus_update_busn_res_end(bus, hose->last_busno);
+ }
/* Platform gets a chance to do some global fixups before
* we proceed to resource allocation
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 94a54f61d341..4ff190ff24a0 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -236,7 +236,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) {
bus = pci_bus_b(ln);
- if (in_bus >= bus->number && in_bus <= bus->subordinate)
+ if (in_bus >= bus->number && in_bus <= bus->busn_res.end)
break;
bus = NULL;
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 89dde171a6fa..30378a19f65d 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -198,7 +198,6 @@ EXPORT_SYMBOL(of_create_pci_dev);
/**
* of_scan_pci_bridge - Set up a PCI bridge and scan for child nodes
- * @node: device tree node of bridge
* @dev: pci_dev structure for the bridge
*
* of_scan_bus() calls this routine for each PCI bridge that it finds, and
@@ -240,7 +239,7 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
}
bus->primary = dev->bus->number;
- bus->subordinate = busrange[1];
+ pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
bus->bridge_ctl = 0;
/* parse ranges property */
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1b488e5305c5..0794a3017b1b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1312,7 +1312,7 @@ static struct opal_secondary_data {
extern char opal_secondary_entry;
-static void prom_query_opal(void)
+static void __init prom_query_opal(void)
{
long rc;
@@ -1436,7 +1436,7 @@ static void __init prom_opal_hold_cpus(void)
prom_debug("prom_opal_hold_cpus: end...\n");
}
-static void prom_opal_takeover(void)
+static void __init prom_opal_takeover(void)
{
struct opal_secondary_data *data = &RELOC(opal_secondary_data);
struct opal_takeover_args *args = &data->args;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index afd4f051f3f2..bdc499c17872 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -720,6 +720,33 @@ static int powerpc_debugfs_init(void)
arch_initcall(powerpc_debugfs_init);
#endif
+#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+
+/* Checks wdt=x and wdt_period=xx command-line option */
+notrace int __init early_parse_wdt(char *p)
+{
+ if (p && strncmp(p, "0", 1) != 0)
+ booke_wdt_enabled = 1;
+
+ return 0;
+}
+early_param("wdt", early_parse_wdt);
+
+int __init early_parse_wdt_period(char *p)
+{
+ unsigned long ret;
+ if (p) {
+ if (!kstrtol(p, 0, &ret))
+ booke_wdt_period = ret;
+ }
+
+ return 0;
+}
+early_param("wdt_period", early_parse_wdt_period);
+#endif /* CONFIG_BOOKE_WDT */
+
void ppc_printk_progress(char *s, unsigned short hex)
{
pr_info("%s\n", s);
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index ec8a53fa9e8f..a8f54ecb091f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -149,30 +149,6 @@ notrace void __init machine_init(u64 dt_ptr)
ppc_md.progress("id mach(): done", 0x200);
}
-#ifdef CONFIG_BOOKE_WDT
-extern u32 booke_wdt_enabled;
-extern u32 booke_wdt_period;
-
-/* Checks wdt=x and wdt_period=xx command-line option */
-notrace int __init early_parse_wdt(char *p)
-{
- if (p && strncmp(p, "0", 1) != 0)
- booke_wdt_enabled = 1;
-
- return 0;
-}
-early_param("wdt", early_parse_wdt);
-
-int __init early_parse_wdt_period (char *p)
-{
- if (p)
- booke_wdt_period = simple_strtoul(p, NULL, 0);
-
- return 0;
-}
-early_param("wdt_period", early_parse_wdt_period);
-#endif /* CONFIG_BOOKE_WDT */
-
/* Checks "l2cr=xxxx" command-line option */
int __init ppc_setup_l2cr(char *str)
{
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e4cb34322de4..0321007086f7 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -48,6 +48,7 @@
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
+#include <asm/vdso.h>
#include <asm/debug.h>
#ifdef DEBUG
@@ -570,8 +571,9 @@ void __devinit start_secondary(void *unused)
#ifdef CONFIG_PPC64
if (system_state == SYSTEM_RUNNING)
vdso_data->processorCount++;
+
+ vdso_getcpu_init();
#endif
- ipi_call_lock();
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
/* Update sibling maps */
@@ -601,7 +603,6 @@ void __devinit start_secondary(void *unused)
of_node_put(np);
}
of_node_put(l2_cache);
- ipi_call_unlock();
local_irq_enable();
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 9eb5b9b536a7..b67db22e102d 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -706,6 +706,34 @@ static void __init vdso_setup_syscall_map(void)
}
}
+#ifdef CONFIG_PPC64
+int __cpuinit vdso_getcpu_init(void)
+{
+ unsigned long cpu, node, val;
+
+ /*
+ * SPRG3 contains the CPU in the bottom 16 bits and the NUMA node in
+ * the next 16 bits. The VDSO uses this to implement getcpu().
+ */
+ cpu = get_cpu();
+ WARN_ON_ONCE(cpu > 0xffff);
+
+ node = cpu_to_node(cpu);
+ WARN_ON_ONCE(node > 0xffff);
+
+ val = (cpu & 0xfff) | ((node & 0xffff) << 16);
+ mtspr(SPRN_SPRG3, val);
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
+ get_paca()->kvm_hstate.sprg3 = val;
+#endif
+
+ put_cpu();
+
+ return 0;
+}
+/* We need to call this before SMP init */
+early_initcall(vdso_getcpu_init);
+#endif
static int __init vdso_init(void)
{
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile
index 9a7946c41738..53e6c9b979ec 100644
--- a/arch/powerpc/kernel/vdso32/Makefile
+++ b/arch/powerpc/kernel/vdso32/Makefile
@@ -1,7 +1,9 @@
# List of files in the vdso, has to be asm only for now
-obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o
+obj-vdso32-$(CONFIG_PPC64) = getcpu.o
+obj-vdso32 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o \
+ $(obj-vdso32-y)
# Build rules
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
new file mode 100644
index 000000000000..47afd08c90f7
--- /dev/null
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+#include <asm/vdso.h>
+
+ .text
+/*
+ * Exact prototype of getcpu
+ *
+ * int __kernel_getcpu(unsigned *cpu, unsigned *node);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_getcpu)
+ .cfi_startproc
+ mfspr r5,SPRN_USPRG3
+ cmpdi cr0,r3,0
+ cmpdi cr1,r4,0
+ clrlwi r6,r5,16
+ rlwinm r7,r5,16,31-15,31-0
+ beq cr0,1f
+ stw r6,0(r3)
+1: beq cr1,2f
+ stw r7,0(r4)
+2: crclr cr0*4+so
+ li r3,0 /* always success */
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_getcpu)
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 0546bcd49cd0..43200ba2e570 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -147,6 +147,9 @@ VERSION
__kernel_sync_dicache_p5;
__kernel_sigtramp32;
__kernel_sigtramp_rt32;
+#ifdef CONFIG_PPC64
+ __kernel_getcpu;
+#endif
local: *;
};
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile
index 8c500d8622e4..effca9404b17 100644
--- a/arch/powerpc/kernel/vdso64/Makefile
+++ b/arch/powerpc/kernel/vdso64/Makefile
@@ -1,6 +1,6 @@
# List of files in the vdso, has to be asm only for now
-obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o
+obj-vdso64 = sigtramp.o gettimeofday.o datapage.o cacheflush.o note.o getcpu.o
# Build rules
diff --git a/arch/powerpc/kernel/vdso64/getcpu.S b/arch/powerpc/kernel/vdso64/getcpu.S
new file mode 100644
index 000000000000..47afd08c90f7
--- /dev/null
+++ b/arch/powerpc/kernel/vdso64/getcpu.S
@@ -0,0 +1,45 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+#include <asm/vdso.h>
+
+ .text
+/*
+ * Exact prototype of getcpu
+ *
+ * int __kernel_getcpu(unsigned *cpu, unsigned *node);
+ *
+ */
+V_FUNCTION_BEGIN(__kernel_getcpu)
+ .cfi_startproc
+ mfspr r5,SPRN_USPRG3
+ cmpdi cr0,r3,0
+ cmpdi cr1,r4,0
+ clrlwi r6,r5,16
+ rlwinm r7,r5,16,31-15,31-0
+ beq cr0,1f
+ stw r6,0(r3)
+1: beq cr1,2f
+ stw r7,0(r4)
+2: crclr cr0*4+so
+ li r3,0 /* always success */
+ blr
+ .cfi_endproc
+V_FUNCTION_END(__kernel_getcpu)
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index 0e615404e247..e6c1758f3588 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -146,6 +146,7 @@ VERSION
__kernel_sync_dicache;
__kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
+ __kernel_getcpu;
local: *;
};
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index cb87301ccd55..3052a931f2b5 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -37,8 +37,6 @@
#include <asm/page.h>
#include <asm/hvcall.h>
-static struct bus_type vio_bus_type;
-
static struct vio_dev vio_bus_device = { /* fake "parent" device */
.name = "vio",
.type = "",
@@ -625,7 +623,7 @@ struct dma_map_ops vio_dma_mapping_ops = {
* vio_cmo_set_dev_desired - Set desired entitlement for a device
*
* @viodev: struct vio_dev for device to alter
- * @new_desired: new desired entitlement level in bytes
+ * @desired: new desired entitlement level in bytes
*
* For use by devices to request a change to their entitlement at runtime or
* through sysfs. The desired entitlement level is changed and a balancing
@@ -1262,7 +1260,7 @@ static int vio_bus_remove(struct device *dev)
/**
* vio_register_driver: - Register a new vio driver
- * @drv: The vio_driver structure to be registered.
+ * @viodrv: The vio_driver structure to be registered.
*/
int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
const char *mod_name)
@@ -1282,7 +1280,7 @@ EXPORT_SYMBOL(__vio_register_driver);
/**
* vio_unregister_driver - Remove registration of vio driver.
- * @driver: The vio_driver struct to be removed form registration
+ * @viodrv: The vio_driver struct to be removed form registration
*/
void vio_unregister_driver(struct vio_driver *viodrv)
{
@@ -1296,8 +1294,7 @@ static void __devinit vio_dev_release(struct device *dev)
struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl)
- iommu_free_table(tbl, dev->of_node ?
- dev->of_node->full_name : dev_name(dev));
+ iommu_free_table(tbl, of_node_full_name(dev->of_node));
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
@@ -1397,21 +1394,27 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
viodev->name = of_node->name;
viodev->dev.of_node = of_node_get(of_node);
- if (firmware_has_feature(FW_FEATURE_CMO))
- vio_cmo_set_dma_ops(viodev);
- else
- set_dma_ops(&viodev->dev, &dma_iommu_ops);
- set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
/* init generic 'struct device' fields: */
viodev->dev.parent = &vio_bus_device.dev;
viodev->dev.bus = &vio_bus_type;
viodev->dev.release = vio_dev_release;
- /* needed to ensure proper operation of coherent allocations
- * later, in case driver doesn't set it explicitly */
- dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
- dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+
+ if (of_get_property(viodev->dev.of_node, "ibm,my-dma-window", NULL)) {
+ if (firmware_has_feature(FW_FEATURE_CMO))
+ vio_cmo_set_dma_ops(viodev);
+ else
+ set_dma_ops(&viodev->dev, &dma_iommu_ops);
+
+ set_iommu_table_base(&viodev->dev,
+ vio_build_iommu_table(viodev));
+
+ /* needed to ensure proper operation of coherent allocations
+ * later, in case driver doesn't set it explicitly */
+ dma_set_mask(&viodev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64));
+ }
/* register with generic device framework */
if (device_register(&viodev->dev)) {
@@ -1491,12 +1494,18 @@ static int __init vio_bus_init(void)
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
+ return 0;
+}
+postcore_initcall(vio_bus_init);
+
+static int __init vio_device_init(void)
+{
vio_bus_scan_register_devices("vdevice");
vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
-__initcall(vio_bus_init);
+device_initcall(vio_device_init);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1509,7 +1518,7 @@ static ssize_t devspec_show(struct device *dev,
{
struct device_node *of_node = dev->of_node;
- return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
+ return sprintf(buf, "%s\n", of_node_full_name(of_node));
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
@@ -1568,7 +1577,7 @@ static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static struct bus_type vio_bus_type = {
+struct bus_type vio_bus_type = {
.name = "vio",
.dev_attrs = vio_dev_attrs,
.uevent = vio_hotplug,
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 80a577517584..d03eb6f7b058 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,56 +37,121 @@
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
-long kvmppc_alloc_hpt(struct kvm *kvm)
+/* Power architecture requires HPT is at least 256kB */
+#define PPC_MIN_HPT_ORDER 18
+
+long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
unsigned long hpt;
- long lpid;
struct revmap_entry *rev;
struct kvmppc_linear_info *li;
+ long order = kvm_hpt_order;
- /* Allocate guest's hashed page table */
- li = kvm_alloc_hpt();
- if (li) {
- /* using preallocated memory */
- hpt = (ulong)li->base_virt;
- kvm->arch.hpt_li = li;
- } else {
- /* using dynamic memory */
+ if (htab_orderp) {
+ order = *htab_orderp;
+ if (order < PPC_MIN_HPT_ORDER)
+ order = PPC_MIN_HPT_ORDER;
+ }
+
+ /*
+ * If the user wants a different size from default,
+ * try first to allocate it from the kernel page allocator.
+ */
+ hpt = 0;
+ if (order != kvm_hpt_order) {
hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
- __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
+ __GFP_NOWARN, order - PAGE_SHIFT);
+ if (!hpt)
+ --order;
}
+ /* Next try to allocate from the preallocated pool */
if (!hpt) {
- pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
- return -ENOMEM;
+ li = kvm_alloc_hpt();
+ if (li) {
+ hpt = (ulong)li->base_virt;
+ kvm->arch.hpt_li = li;
+ order = kvm_hpt_order;
+ }
}
+
+ /* Lastly try successively smaller sizes from the page allocator */
+ while (!hpt && order > PPC_MIN_HPT_ORDER) {
+ hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
+ __GFP_NOWARN, order - PAGE_SHIFT);
+ if (!hpt)
+ --order;
+ }
+
+ if (!hpt)
+ return -ENOMEM;
+
kvm->arch.hpt_virt = hpt;
+ kvm->arch.hpt_order = order;
+ /* HPTEs are 2**4 bytes long */
+ kvm->arch.hpt_npte = 1ul << (order - 4);
+ /* 128 (2**7) bytes in each HPTEG */
+ kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
/* Allocate reverse map array */
- rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
+ rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
if (!rev) {
pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
goto out_freehpt;
}
kvm->arch.revmap = rev;
+ kvm->arch.sdr1 = __pa(hpt) | (order - 18);
- lpid = kvmppc_alloc_lpid();
- if (lpid < 0)
- goto out_freeboth;
+ pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
+ hpt, order, kvm->arch.lpid);
- kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
- kvm->arch.lpid = lpid;
-
- pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
+ if (htab_orderp)
+ *htab_orderp = order;
return 0;
- out_freeboth:
- vfree(rev);
out_freehpt:
- free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
+ if (kvm->arch.hpt_li)
+ kvm_release_hpt(kvm->arch.hpt_li);
+ else
+ free_pages(hpt, order - PAGE_SHIFT);
return -ENOMEM;
}
+long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
+{
+ long err = -EBUSY;
+ long order;
+
+ mutex_lock(&kvm->lock);
+ if (kvm->arch.rma_setup_done) {
+ kvm->arch.rma_setup_done = 0;
+ /* order rma_setup_done vs. vcpus_running */
+ smp_mb();
+ if (atomic_read(&kvm->arch.vcpus_running)) {
+ kvm->arch.rma_setup_done = 1;
+ goto out;
+ }
+ }
+ if (kvm->arch.hpt_virt) {
+ order = kvm->arch.hpt_order;
+ /* Set the entire HPT to 0, i.e. invalid HPTEs */
+ memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
+ /*
+ * Set the whole last_vcpu array to an invalid vcpu number.
+ * This ensures that each vcpu will flush its TLB on next entry.
+ */
+ memset(kvm->arch.last_vcpu, 0xff, sizeof(kvm->arch.last_vcpu));
+ *htab_orderp = order;
+ err = 0;
+ } else {
+ err = kvmppc_alloc_hpt(kvm, htab_orderp);
+ order = *htab_orderp;
+ }
+ out:
+ mutex_unlock(&kvm->lock);
+ return err;
+}
+
void kvmppc_free_hpt(struct kvm *kvm)
{
kvmppc_free_lpid(kvm->arch.lpid);
@@ -94,7 +159,8 @@ void kvmppc_free_hpt(struct kvm *kvm)
if (kvm->arch.hpt_li)
kvm_release_hpt(kvm->arch.hpt_li);
else
- free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
+ free_pages(kvm->arch.hpt_virt,
+ kvm->arch.hpt_order - PAGE_SHIFT);
}
/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
@@ -119,6 +185,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
unsigned long psize;
unsigned long hp0, hp1;
long ret;
+ struct kvm *kvm = vcpu->kvm;
psize = 1ul << porder;
npages = memslot->npages >> (porder - PAGE_SHIFT);
@@ -127,8 +194,8 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
if (npages > 1ul << (40 - porder))
npages = 1ul << (40 - porder);
/* Can't use more than 1 HPTE per HPTEG */
- if (npages > HPT_NPTEG)
- npages = HPT_NPTEG;
+ if (npages > kvm->arch.hpt_mask + 1)
+ npages = kvm->arch.hpt_mask + 1;
hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
@@ -138,7 +205,7 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
for (i = 0; i < npages; ++i) {
addr = i << porder;
/* can't use hpt_hash since va > 64 bits */
- hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
+ hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
/*
* We assume that the hash table is empty and no
* vcpus are using it at this stage. Since we create
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3abe1b86e583..83e929e66f9d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -56,7 +56,7 @@
/* #define EXIT_DEBUG_INT */
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
-static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
+static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
@@ -1104,11 +1104,15 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
return -EINTR;
}
- /* On the first time here, set up VRMA or RMA */
+ atomic_inc(&vcpu->kvm->arch.vcpus_running);
+ /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
+ smp_mb();
+
+ /* On the first time here, set up HTAB and VRMA or RMA */
if (!vcpu->kvm->arch.rma_setup_done) {
- r = kvmppc_hv_setup_rma(vcpu);
+ r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
- return r;
+ goto out;
}
flush_fp_to_thread(current);
@@ -1126,6 +1130,9 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
}
} while (r == RESUME_GUEST);
+
+ out:
+ atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;
}
@@ -1341,7 +1348,7 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
{
}
-static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
+static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
@@ -1360,6 +1367,15 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
if (kvm->arch.rma_setup_done)
goto out; /* another vcpu beat us to it */
+ /* Allocate hashed page table (if not done already) and reset it */
+ if (!kvm->arch.hpt_virt) {
+ err = kvmppc_alloc_hpt(kvm, NULL);
+ if (err) {
+ pr_err("KVM: Couldn't alloc HPT\n");
+ goto out;
+ }
+ }
+
/* Look up the memslot for guest physical address 0 */
memslot = gfn_to_memslot(kvm, 0);
@@ -1471,13 +1487,14 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
int kvmppc_core_init_vm(struct kvm *kvm)
{
- long r;
- unsigned long lpcr;
+ unsigned long lpcr, lpid;
- /* Allocate hashed page table */
- r = kvmppc_alloc_hpt(kvm);
- if (r)
- return r;
+ /* Allocate the guest's logical partition ID */
+
+ lpid = kvmppc_alloc_lpid();
+ if (lpid < 0)
+ return -ENOMEM;
+ kvm->arch.lpid = lpid;
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
@@ -1487,7 +1504,6 @@ int kvmppc_core_init_vm(struct kvm *kvm)
if (cpu_has_feature(CPU_FTR_ARCH_201)) {
/* PPC970; HID4 is effectively the LPCR */
- unsigned long lpid = kvm->arch.lpid;
kvm->arch.host_lpid = 0;
kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index e1b60f56f2a1..fb4eac290fef 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -25,6 +25,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type);
static struct kvmppc_linear_info *kvm_alloc_linear(int type);
static void kvm_release_linear(struct kvmppc_linear_info *ri);
+int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER;
+EXPORT_SYMBOL_GPL(kvm_hpt_order);
+
/*************** RMA *************/
/*
@@ -209,7 +212,7 @@ static void kvm_release_linear(struct kvmppc_linear_info *ri)
void __init kvm_linear_init(void)
{
/* HPT */
- kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT);
+ kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT);
/* RMA */
/* Only do this on PPC970 in HV mode */
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index cec4daddbf31..5c70d19494f9 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -237,7 +237,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
/* Find and lock the HPTEG slot to use */
do_insert:
- if (pte_index >= HPT_NPTE)
+ if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
if (likely((flags & H_EXACT) == 0)) {
pte_index &= ~7UL;
@@ -352,7 +352,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long v, r, rb;
struct revmap_entry *rev;
- if (pte_index >= HPT_NPTE)
+ if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
@@ -419,7 +419,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
i = 4;
break;
}
- if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
+ if (req != 1 || flags == 3 ||
+ pte_index >= kvm->arch.hpt_npte) {
/* parameter error */
args[j] = ((0xa0 | flags) << 56) + pte_index;
ret = H_PARAMETER;
@@ -521,7 +522,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
struct revmap_entry *rev;
unsigned long v, r, rb, mask, bits;
- if (pte_index >= HPT_NPTE)
+ if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
@@ -583,7 +584,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
int i, n = 1;
struct revmap_entry *rev = NULL;
- if (pte_index >= HPT_NPTE)
+ if (pte_index >= kvm->arch.hpt_npte)
return H_PARAMETER;
if (flags & H_READ_4) {
pte_index &= ~3;
@@ -678,7 +679,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
somask = (1UL << 28) - 1;
vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
}
- hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
+ hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
avpn = slb_v & ~(somask >> 16); /* also includes B */
avpn |= (eaddr & somask) >> 16;
@@ -723,7 +724,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
if (val & HPTE_V_SECONDARY)
break;
val |= HPTE_V_SECONDARY;
- hash = hash ^ HPT_HASH_MASK;
+ hash = hash ^ kvm->arch.hpt_mask;
}
return -1;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..5a84c8d3d040 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -72,9 +72,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
mtsrr1 r6
RFI
-#define ULONG_SIZE 8
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
-
/******************************************************************************
* *
* Entry code *
@@ -206,24 +203,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ ld r14, VCPU_GPR(R14)(r4)
+ ld r15, VCPU_GPR(R15)(r4)
+ ld r16, VCPU_GPR(R16)(r4)
+ ld r17, VCPU_GPR(R17)(r4)
+ ld r18, VCPU_GPR(R18)(r4)
+ ld r19, VCPU_GPR(R19)(r4)
+ ld r20, VCPU_GPR(R20)(r4)
+ ld r21, VCPU_GPR(R21)(r4)
+ ld r22, VCPU_GPR(R22)(r4)
+ ld r23, VCPU_GPR(R23)(r4)
+ ld r24, VCPU_GPR(R24)(r4)
+ ld r25, VCPU_GPR(R25)(r4)
+ ld r26, VCPU_GPR(R26)(r4)
+ ld r27, VCPU_GPR(R27)(r4)
+ ld r28, VCPU_GPR(R28)(r4)
+ ld r29, VCPU_GPR(R29)(r4)
+ ld r30, VCPU_GPR(R30)(r4)
+ ld r31, VCPU_GPR(R31)(r4)
BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
@@ -547,21 +544,21 @@ fast_guest_return:
mtlr r5
mtcr r6
- ld r0, VCPU_GPR(r0)(r4)
- ld r1, VCPU_GPR(r1)(r4)
- ld r2, VCPU_GPR(r2)(r4)
- ld r3, VCPU_GPR(r3)(r4)
- ld r5, VCPU_GPR(r5)(r4)
- ld r6, VCPU_GPR(r6)(r4)
- ld r7, VCPU_GPR(r7)(r4)
- ld r8, VCPU_GPR(r8)(r4)
- ld r9, VCPU_GPR(r9)(r4)
- ld r10, VCPU_GPR(r10)(r4)
- ld r11, VCPU_GPR(r11)(r4)
- ld r12, VCPU_GPR(r12)(r4)
- ld r13, VCPU_GPR(r13)(r4)
-
- ld r4, VCPU_GPR(r4)(r4)
+ ld r0, VCPU_GPR(R0)(r4)
+ ld r1, VCPU_GPR(R1)(r4)
+ ld r2, VCPU_GPR(R2)(r4)
+ ld r3, VCPU_GPR(R3)(r4)
+ ld r5, VCPU_GPR(R5)(r4)
+ ld r6, VCPU_GPR(R6)(r4)
+ ld r7, VCPU_GPR(R7)(r4)
+ ld r8, VCPU_GPR(R8)(r4)
+ ld r9, VCPU_GPR(R9)(r4)
+ ld r10, VCPU_GPR(R10)(r4)
+ ld r11, VCPU_GPR(R11)(r4)
+ ld r12, VCPU_GPR(R12)(r4)
+ ld r13, VCPU_GPR(R13)(r4)
+
+ ld r4, VCPU_GPR(R4)(r4)
hrfid
b .
@@ -590,22 +587,22 @@ kvmppc_interrupt:
/* Save registers */
- std r0, VCPU_GPR(r0)(r9)
- std r1, VCPU_GPR(r1)(r9)
- std r2, VCPU_GPR(r2)(r9)
- std r3, VCPU_GPR(r3)(r9)
- std r4, VCPU_GPR(r4)(r9)
- std r5, VCPU_GPR(r5)(r9)
- std r6, VCPU_GPR(r6)(r9)
- std r7, VCPU_GPR(r7)(r9)
- std r8, VCPU_GPR(r8)(r9)
+ std r0, VCPU_GPR(R0)(r9)
+ std r1, VCPU_GPR(R1)(r9)
+ std r2, VCPU_GPR(R2)(r9)
+ std r3, VCPU_GPR(R3)(r9)
+ std r4, VCPU_GPR(R4)(r9)
+ std r5, VCPU_GPR(R5)(r9)
+ std r6, VCPU_GPR(R6)(r9)
+ std r7, VCPU_GPR(R7)(r9)
+ std r8, VCPU_GPR(R8)(r9)
ld r0, HSTATE_HOST_R2(r13)
- std r0, VCPU_GPR(r9)(r9)
- std r10, VCPU_GPR(r10)(r9)
- std r11, VCPU_GPR(r11)(r9)
+ std r0, VCPU_GPR(R9)(r9)
+ std r10, VCPU_GPR(R10)(r9)
+ std r11, VCPU_GPR(R11)(r9)
ld r3, HSTATE_SCRATCH0(r13)
lwz r4, HSTATE_SCRATCH1(r13)
- std r3, VCPU_GPR(r12)(r9)
+ std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9)
/* Restore R1/R2 so we can handle faults */
@@ -626,7 +623,7 @@ kvmppc_interrupt:
GET_SCRATCH0(r3)
mflr r4
- std r3, VCPU_GPR(r13)(r9)
+ std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9)
/* Unset guest mode */
@@ -810,7 +807,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
lwz r3,VCORE_NAPPING_THREADS(r5)
lwz r4,VCPU_PTID(r9)
li r0,1
- sldi r0,r0,r4
+ sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
@@ -968,24 +965,24 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */
- std r14, VCPU_GPR(r14)(r9)
- std r15, VCPU_GPR(r15)(r9)
- std r16, VCPU_GPR(r16)(r9)
- std r17, VCPU_GPR(r17)(r9)
- std r18, VCPU_GPR(r18)(r9)
- std r19, VCPU_GPR(r19)(r9)
- std r20, VCPU_GPR(r20)(r9)
- std r21, VCPU_GPR(r21)(r9)
- std r22, VCPU_GPR(r22)(r9)
- std r23, VCPU_GPR(r23)(r9)
- std r24, VCPU_GPR(r24)(r9)
- std r25, VCPU_GPR(r25)(r9)
- std r26, VCPU_GPR(r26)(r9)
- std r27, VCPU_GPR(r27)(r9)
- std r28, VCPU_GPR(r28)(r9)
- std r29, VCPU_GPR(r29)(r9)
- std r30, VCPU_GPR(r30)(r9)
- std r31, VCPU_GPR(r31)(r9)
+ std r14, VCPU_GPR(R14)(r9)
+ std r15, VCPU_GPR(R15)(r9)
+ std r16, VCPU_GPR(R16)(r9)
+ std r17, VCPU_GPR(R17)(r9)
+ std r18, VCPU_GPR(R18)(r9)
+ std r19, VCPU_GPR(R19)(r9)
+ std r20, VCPU_GPR(R20)(r9)
+ std r21, VCPU_GPR(R21)(r9)
+ std r22, VCPU_GPR(R22)(r9)
+ std r23, VCPU_GPR(R23)(r9)
+ std r24, VCPU_GPR(R24)(r9)
+ std r25, VCPU_GPR(R25)(r9)
+ std r26, VCPU_GPR(R26)(r9)
+ std r27, VCPU_GPR(R27)(r9)
+ std r28, VCPU_GPR(R28)(r9)
+ std r29, VCPU_GPR(R29)(r9)
+ std r30, VCPU_GPR(R30)(r9)
+ std r31, VCPU_GPR(R31)(r9)
/* Save SPRGs */
mfspr r3, SPRN_SPRG0
@@ -1067,6 +1064,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DABR,r5
mtspr SPRN_DABRX,r6
+ /* Restore SPRG3 */
+ ld r3,HSTATE_SPRG3(r13)
+ mtspr SPRN_SPRG3,r3
+
/*
* Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value.
@@ -1160,7 +1161,7 @@ kvmppc_hdsi:
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
clrrdi r0, r4, 28
- PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4: std r4, VCPU_FAULT_DAR(r9)
stw r6, VCPU_FAULT_DSISR(r9)
@@ -1234,7 +1235,7 @@ kvmppc_hisi:
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
clrrdi r0, r10, 28
- PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4:
/* Search the hash table. */
@@ -1278,7 +1279,7 @@ kvmppc_hisi:
*/
.globl hcall_try_real_mode
hcall_try_real_mode:
- ld r3,VCPU_GPR(r3)(r9)
+ ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
bne hcall_real_cont
clrrdi r3,r3,2
@@ -1291,12 +1292,12 @@ hcall_try_real_mode:
add r3,r3,r4
mtctr r3
mr r3,r9 /* get vcpu pointer */
- ld r4,VCPU_GPR(r4)(r9)
+ ld r4,VCPU_GPR(R4)(r9)
bctrl
cmpdi r3,H_TOO_HARD
beq hcall_real_fallback
ld r4,HSTATE_KVM_VCPU(r13)
- std r3,VCPU_GPR(r3)(r4)
+ std r3,VCPU_GPR(R3)(r4)
ld r10,VCPU_PC(r4)
ld r11,VCPU_MSR(r4)
b fast_guest_return
@@ -1424,7 +1425,7 @@ _GLOBAL(kvmppc_h_cede)
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
- std r0,VCPU_GPR(r3)(r3)
+ std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
b 2f /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
@@ -1443,7 +1444,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
addi r6,r5,VCORE_NAPPING_THREADS
31: lwarx r4,0,r6
or r4,r4,r0
- PPC_POPCNTW(r7,r4)
+ PPC_POPCNTW(R7,R4)
cmpw r7,r8
bge 2f
stwcx. r4,0,r6
@@ -1464,24 +1465,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
*/
/* Save non-volatile GPRs */
- std r14, VCPU_GPR(r14)(r3)
- std r15, VCPU_GPR(r15)(r3)
- std r16, VCPU_GPR(r16)(r3)
- std r17, VCPU_GPR(r17)(r3)
- std r18, VCPU_GPR(r18)(r3)
- std r19, VCPU_GPR(r19)(r3)
- std r20, VCPU_GPR(r20)(r3)
- std r21, VCPU_GPR(r21)(r3)
- std r22, VCPU_GPR(r22)(r3)
- std r23, VCPU_GPR(r23)(r3)
- std r24, VCPU_GPR(r24)(r3)
- std r25, VCPU_GPR(r25)(r3)
- std r26, VCPU_GPR(r26)(r3)
- std r27, VCPU_GPR(r27)(r3)
- std r28, VCPU_GPR(r28)(r3)
- std r29, VCPU_GPR(r29)(r3)
- std r30, VCPU_GPR(r30)(r3)
- std r31, VCPU_GPR(r31)(r3)
+ std r14, VCPU_GPR(R14)(r3)
+ std r15, VCPU_GPR(R15)(r3)
+ std r16, VCPU_GPR(R16)(r3)
+ std r17, VCPU_GPR(R17)(r3)
+ std r18, VCPU_GPR(R18)(r3)
+ std r19, VCPU_GPR(R19)(r3)
+ std r20, VCPU_GPR(R20)(r3)
+ std r21, VCPU_GPR(R21)(r3)
+ std r22, VCPU_GPR(R22)(r3)
+ std r23, VCPU_GPR(R23)(r3)
+ std r24, VCPU_GPR(R24)(r3)
+ std r25, VCPU_GPR(R25)(r3)
+ std r26, VCPU_GPR(R26)(r3)
+ std r27, VCPU_GPR(R27)(r3)
+ std r28, VCPU_GPR(R28)(r3)
+ std r29, VCPU_GPR(R29)(r3)
+ std r30, VCPU_GPR(R30)(r3)
+ std r31, VCPU_GPR(R31)(r3)
/* save FP state */
bl .kvmppc_save_fp
@@ -1513,24 +1514,24 @@ kvm_end_cede:
bl kvmppc_load_fp
/* Load NV GPRS */
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ ld r14, VCPU_GPR(R14)(r4)
+ ld r15, VCPU_GPR(R15)(r4)
+ ld r16, VCPU_GPR(R16)(r4)
+ ld r17, VCPU_GPR(R17)(r4)
+ ld r18, VCPU_GPR(R18)(r4)
+ ld r19, VCPU_GPR(R19)(r4)
+ ld r20, VCPU_GPR(R20)(r4)
+ ld r21, VCPU_GPR(R21)(r4)
+ ld r22, VCPU_GPR(R22)(r4)
+ ld r23, VCPU_GPR(R23)(r4)
+ ld r24, VCPU_GPR(R24)(r4)
+ ld r25, VCPU_GPR(R25)(r4)
+ ld r26, VCPU_GPR(R26)(r4)
+ ld r27, VCPU_GPR(R27)(r4)
+ ld r28, VCPU_GPR(R28)(r4)
+ ld r29, VCPU_GPR(R29)(r4)
+ ld r30, VCPU_GPR(R30)(r4)
+ ld r31, VCPU_GPR(R31)(r4)
/* clear our bit in vcore->napping_threads */
33: ld r5,HSTATE_KVM_VCORE(r13)
@@ -1649,7 +1650,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r6,reg*16+VCPU_VSRS
- STXVD2X(reg,r6,r3)
+ STXVD2X(reg,R6,R3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
@@ -1711,7 +1712,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_VSRS
- LXVD2X(reg,r7,r4)
+ LXVD2X(reg,R7,R4)
reg = reg + 1
.endr
FTR_SECTION_ELSE
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3e35383bdb21..48cbbf862958 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,38 +25,30 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-
-#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)
-
#elif defined(CONFIG_PPC_BOOK3S_32)
-
-#define ULONG_SIZE 4
#define FUNC(name) name
-
#endif /* CONFIG_PPC_BOOK3S_XX */
-
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
#define VCPU_LOAD_NVGPRS(vcpu) \
- PPC_LL r14, VCPU_GPR(r14)(vcpu); \
- PPC_LL r15, VCPU_GPR(r15)(vcpu); \
- PPC_LL r16, VCPU_GPR(r16)(vcpu); \
- PPC_LL r17, VCPU_GPR(r17)(vcpu); \
- PPC_LL r18, VCPU_GPR(r18)(vcpu); \
- PPC_LL r19, VCPU_GPR(r19)(vcpu); \
- PPC_LL r20, VCPU_GPR(r20)(vcpu); \
- PPC_LL r21, VCPU_GPR(r21)(vcpu); \
- PPC_LL r22, VCPU_GPR(r22)(vcpu); \
- PPC_LL r23, VCPU_GPR(r23)(vcpu); \
- PPC_LL r24, VCPU_GPR(r24)(vcpu); \
- PPC_LL r25, VCPU_GPR(r25)(vcpu); \
- PPC_LL r26, VCPU_GPR(r26)(vcpu); \
- PPC_LL r27, VCPU_GPR(r27)(vcpu); \
- PPC_LL r28, VCPU_GPR(r28)(vcpu); \
- PPC_LL r29, VCPU_GPR(r29)(vcpu); \
- PPC_LL r30, VCPU_GPR(r30)(vcpu); \
- PPC_LL r31, VCPU_GPR(r31)(vcpu); \
+ PPC_LL r14, VCPU_GPR(R14)(vcpu); \
+ PPC_LL r15, VCPU_GPR(R15)(vcpu); \
+ PPC_LL r16, VCPU_GPR(R16)(vcpu); \
+ PPC_LL r17, VCPU_GPR(R17)(vcpu); \
+ PPC_LL r18, VCPU_GPR(R18)(vcpu); \
+ PPC_LL r19, VCPU_GPR(R19)(vcpu); \
+ PPC_LL r20, VCPU_GPR(R20)(vcpu); \
+ PPC_LL r21, VCPU_GPR(R21)(vcpu); \
+ PPC_LL r22, VCPU_GPR(R22)(vcpu); \
+ PPC_LL r23, VCPU_GPR(R23)(vcpu); \
+ PPC_LL r24, VCPU_GPR(R24)(vcpu); \
+ PPC_LL r25, VCPU_GPR(R25)(vcpu); \
+ PPC_LL r26, VCPU_GPR(R26)(vcpu); \
+ PPC_LL r27, VCPU_GPR(R27)(vcpu); \
+ PPC_LL r28, VCPU_GPR(R28)(vcpu); \
+ PPC_LL r29, VCPU_GPR(R29)(vcpu); \
+ PPC_LL r30, VCPU_GPR(R30)(vcpu); \
+ PPC_LL r31, VCPU_GPR(R31)(vcpu); \
/*****************************************************************************
* *
@@ -131,24 +123,24 @@ kvmppc_handler_highmem:
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
- PPC_STL r14, VCPU_GPR(r14)(r7)
- PPC_STL r15, VCPU_GPR(r15)(r7)
- PPC_STL r16, VCPU_GPR(r16)(r7)
- PPC_STL r17, VCPU_GPR(r17)(r7)
- PPC_STL r18, VCPU_GPR(r18)(r7)
- PPC_STL r19, VCPU_GPR(r19)(r7)
- PPC_STL r20, VCPU_GPR(r20)(r7)
- PPC_STL r21, VCPU_GPR(r21)(r7)
- PPC_STL r22, VCPU_GPR(r22)(r7)
- PPC_STL r23, VCPU_GPR(r23)(r7)
- PPC_STL r24, VCPU_GPR(r24)(r7)
- PPC_STL r25, VCPU_GPR(r25)(r7)
- PPC_STL r26, VCPU_GPR(r26)(r7)
- PPC_STL r27, VCPU_GPR(r27)(r7)
- PPC_STL r28, VCPU_GPR(r28)(r7)
- PPC_STL r29, VCPU_GPR(r29)(r7)
- PPC_STL r30, VCPU_GPR(r30)(r7)
- PPC_STL r31, VCPU_GPR(r31)(r7)
+ PPC_STL r14, VCPU_GPR(R14)(r7)
+ PPC_STL r15, VCPU_GPR(R15)(r7)
+ PPC_STL r16, VCPU_GPR(R16)(r7)
+ PPC_STL r17, VCPU_GPR(R17)(r7)
+ PPC_STL r18, VCPU_GPR(R18)(r7)
+ PPC_STL r19, VCPU_GPR(R19)(r7)
+ PPC_STL r20, VCPU_GPR(R20)(r7)
+ PPC_STL r21, VCPU_GPR(R21)(r7)
+ PPC_STL r22, VCPU_GPR(R22)(r7)
+ PPC_STL r23, VCPU_GPR(R23)(r7)
+ PPC_STL r24, VCPU_GPR(R24)(r7)
+ PPC_STL r25, VCPU_GPR(R25)(r7)
+ PPC_STL r26, VCPU_GPR(R26)(r7)
+ PPC_STL r27, VCPU_GPR(R27)(r7)
+ PPC_STL r28, VCPU_GPR(R28)(r7)
+ PPC_STL r29, VCPU_GPR(R29)(r7)
+ PPC_STL r30, VCPU_GPR(R30)(r7)
+ PPC_STL r31, VCPU_GPR(R31)(r7)
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r12
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 3ff9013d6e79..ee02b30878ed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
+ vcpu->arch.shared->msr |= MSR_EE;
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34187585c507..ab523f3c1731 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -37,7 +37,6 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name)
-#define MTMSR_EERI(reg) mtmsrd (reg),1
.globl kvmppc_skip_interrupt
kvmppc_skip_interrupt:
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 798491a268b3..1abe4788191a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,7 +23,6 @@
#define GET_SHADOW_VCPU(reg) \
mr reg, r13
-#define MTMSR_EERI(reg) mtmsrd (reg),1
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -31,7 +30,6 @@
tophys(reg, r2); \
lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
tophys(reg, reg)
-#define MTMSR_EERI(reg) mtmsr (reg)
#endif
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 72f13f4a06e0..d25a097c852b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -612,6 +612,12 @@ static void kvmppc_fill_pt_regs(struct pt_regs *regs)
regs->link = lr;
}
+/*
+ * For interrupts needed to be handled by host interrupt handlers,
+ * corresponding host handler are called from here in similar way
+ * (but not exact) as they are called from low level handler
+ * (such as from arch/powerpc/kernel/head_fsl_booke.S).
+ */
static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
@@ -639,6 +645,17 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
kvmppc_fill_pt_regs(&regs);
performance_monitor_exception(&regs);
break;
+ case BOOKE_INTERRUPT_WATCHDOG:
+ kvmppc_fill_pt_regs(&regs);
+#ifdef CONFIG_BOOKE_WDT
+ WatchdogException(&regs);
+#else
+ unknown_exception(&regs);
+#endif
+ break;
+ case BOOKE_INTERRUPT_CRITICAL:
+ unknown_exception(&regs);
+ break;
}
}
@@ -683,6 +700,10 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
+ case BOOKE_INTERRUPT_WATCHDOG:
+ r = RESUME_GUEST;
+ break;
+
case BOOKE_INTERRUPT_DOORBELL:
kvmppc_account_exit(vcpu, DBELL_EXITS);
r = RESUME_GUEST;
@@ -1267,6 +1288,11 @@ void kvmppc_decrementer_func(unsigned long data)
{
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+ if (vcpu->arch.tcr & TCR_ARE) {
+ vcpu->arch.dec = vcpu->arch.decar;
+ kvmppc_emulate_dec(vcpu);
+ }
+
kvmppc_set_tsr_bits(vcpu, TSR_DIS);
}
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 6c76397f2af4..12834bb608ab 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -24,6 +24,7 @@
#include "booke.h"
#define OP_19_XOP_RFI 50
+#define OP_19_XOP_RFCI 51
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
@@ -36,6 +37,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
}
+static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.csrr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
+}
+
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -52,6 +59,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*advance = 0;
break;
+ case OP_19_XOP_RFCI:
+ kvmppc_emul_rfci(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS);
+ *advance = 0;
+ break;
+
default:
emulated = EMULATE_FAIL;
break;
@@ -113,6 +126,12 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_ESR:
vcpu->arch.shared->esr = spr_val;
break;
+ case SPRN_CSRR0:
+ vcpu->arch.csrr0 = spr_val;
+ break;
+ case SPRN_CSRR1:
+ vcpu->arch.csrr1 = spr_val;
+ break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val;
break;
@@ -129,6 +148,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
kvmppc_set_tcr(vcpu, spr_val);
break;
+ case SPRN_DECAR:
+ vcpu->arch.decar = spr_val;
+ break;
/*
* Note: SPRG4-7 are user-readable.
* These values are loaded into the real SPRGs when resuming the
@@ -229,6 +251,12 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_ESR:
*spr_val = vcpu->arch.shared->esr;
break;
+ case SPRN_CSRR0:
+ *spr_val = vcpu->arch.csrr0;
+ break;
+ case SPRN_CSRR1:
+ *spr_val = vcpu->arch.csrr1;
+ break;
case SPRN_DBCR0:
*spr_val = vcpu->arch.dbcr0;
break;
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 8feec2ff3928..bb46b32f9813 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -25,8 +25,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
-
/* The host stack layout: */
#define HOST_R1 0 /* Implied by stwu. */
#define HOST_CALLEE_LR 4
@@ -36,8 +34,9 @@
#define HOST_R2 12
#define HOST_CR 16
#define HOST_NV_GPRS 20
-#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
-#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
@@ -53,16 +52,21 @@
(1<<BOOKE_INTERRUPT_PROGRAM) | \
(1<<BOOKE_INTERRUPT_DTLB_MISS))
-.macro KVM_HANDLER ivor_nr
+.macro KVM_HANDLER ivor_nr scratch srr0
_GLOBAL(kvmppc_handler_\ivor_nr)
/* Get pointer to vcpu and record exit number. */
- mtspr SPRN_SPRG_WSCRATCH0, r4
+ mtspr \scratch , r4
mfspr r4, SPRN_SPRG_RVCPU
- stw r5, VCPU_GPR(r5)(r4)
- stw r6, VCPU_GPR(r6)(r4)
+ stw r3, VCPU_GPR(R3)(r4)
+ stw r5, VCPU_GPR(R5)(r4)
+ stw r6, VCPU_GPR(R6)(r4)
+ mfspr r3, \scratch
mfctr r5
- lis r6, kvmppc_resume_host@h
+ stw r3, VCPU_GPR(R4)(r4)
stw r5, VCPU_CTR(r4)
+ mfspr r3, \srr0
+ lis r6, kvmppc_resume_host@h
+ stw r3, VCPU_PC(r4)
li r5, \ivor_nr
ori r6, r6, kvmppc_resume_host@l
mtctr r6
@@ -70,42 +74,40 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
.endm
_GLOBAL(kvmppc_handlers_start)
-KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
-KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
-KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
-KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
-KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
-KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
-KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
-KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
-KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
-KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
-KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
-KVM_HANDLER BOOKE_INTERRUPT_FIT
-KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
-KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
-KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
-KVM_HANDLER BOOKE_INTERRUPT_DEBUG
-KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL
-KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA
-KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND
+KVM_HANDLER BOOKE_INTERRUPT_CRITICAL SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
+KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK SPRN_SPRG_RSCRATCH_MC SPRN_MCSRR0
+KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_PROGRAM SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_SYSCALL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_FIT SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
+KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_DEBUG SPRN_SPRG_RSCRATCH_CRIT SPRN_CSRR0
+KVM_HANDLER BOOKE_INTERRUPT_SPE_UNAVAIL SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_DATA SPRN_SPRG_RSCRATCH0 SPRN_SRR0
+KVM_HANDLER BOOKE_INTERRUPT_SPE_FP_ROUND SPRN_SPRG_RSCRATCH0 SPRN_SRR0
_GLOBAL(kvmppc_handler_len)
.long kvmppc_handler_1 - kvmppc_handler_0
-
/* Registers:
* SPRG_SCRATCH0: guest r4
* r4: vcpu pointer
* r5: KVM exit number
*/
_GLOBAL(kvmppc_resume_host)
- stw r3, VCPU_GPR(r3)(r4)
mfcr r3
stw r3, VCPU_CR(r4)
- stw r7, VCPU_GPR(r7)(r4)
- stw r8, VCPU_GPR(r8)(r4)
- stw r9, VCPU_GPR(r9)(r4)
+ stw r7, VCPU_GPR(R7)(r4)
+ stw r8, VCPU_GPR(R8)(r4)
+ stw r9, VCPU_GPR(R9)(r4)
li r6, 1
slw r6, r6, r5
@@ -135,23 +137,23 @@ _GLOBAL(kvmppc_resume_host)
isync
stw r9, VCPU_LAST_INST(r4)
- stw r15, VCPU_GPR(r15)(r4)
- stw r16, VCPU_GPR(r16)(r4)
- stw r17, VCPU_GPR(r17)(r4)
- stw r18, VCPU_GPR(r18)(r4)
- stw r19, VCPU_GPR(r19)(r4)
- stw r20, VCPU_GPR(r20)(r4)
- stw r21, VCPU_GPR(r21)(r4)
- stw r22, VCPU_GPR(r22)(r4)
- stw r23, VCPU_GPR(r23)(r4)
- stw r24, VCPU_GPR(r24)(r4)
- stw r25, VCPU_GPR(r25)(r4)
- stw r26, VCPU_GPR(r26)(r4)
- stw r27, VCPU_GPR(r27)(r4)
- stw r28, VCPU_GPR(r28)(r4)
- stw r29, VCPU_GPR(r29)(r4)
- stw r30, VCPU_GPR(r30)(r4)
- stw r31, VCPU_GPR(r31)(r4)
+ stw r15, VCPU_GPR(R15)(r4)
+ stw r16, VCPU_GPR(R16)(r4)
+ stw r17, VCPU_GPR(R17)(r4)
+ stw r18, VCPU_GPR(R18)(r4)
+ stw r19, VCPU_GPR(R19)(r4)
+ stw r20, VCPU_GPR(R20)(r4)
+ stw r21, VCPU_GPR(R21)(r4)
+ stw r22, VCPU_GPR(R22)(r4)
+ stw r23, VCPU_GPR(R23)(r4)
+ stw r24, VCPU_GPR(R24)(r4)
+ stw r25, VCPU_GPR(R25)(r4)
+ stw r26, VCPU_GPR(R26)(r4)
+ stw r27, VCPU_GPR(R27)(r4)
+ stw r28, VCPU_GPR(R28)(r4)
+ stw r29, VCPU_GPR(R29)(r4)
+ stw r30, VCPU_GPR(R30)(r4)
+ stw r31, VCPU_GPR(R31)(r4)
..skip_inst_copy:
/* Also grab DEAR and ESR before the host can clobber them. */
@@ -169,22 +171,18 @@ _GLOBAL(kvmppc_resume_host)
..skip_esr:
/* Save remaining volatile guest register state to vcpu. */
- stw r0, VCPU_GPR(r0)(r4)
- stw r1, VCPU_GPR(r1)(r4)
- stw r2, VCPU_GPR(r2)(r4)
- stw r10, VCPU_GPR(r10)(r4)
- stw r11, VCPU_GPR(r11)(r4)
- stw r12, VCPU_GPR(r12)(r4)
- stw r13, VCPU_GPR(r13)(r4)
- stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+ stw r0, VCPU_GPR(R0)(r4)
+ stw r1, VCPU_GPR(R1)(r4)
+ stw r2, VCPU_GPR(R2)(r4)
+ stw r10, VCPU_GPR(R10)(r4)
+ stw r11, VCPU_GPR(R11)(r4)
+ stw r12, VCPU_GPR(R12)(r4)
+ stw r13, VCPU_GPR(R13)(r4)
+ stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
mflr r3
stw r3, VCPU_LR(r4)
mfxer r3
stw r3, VCPU_XER(r4)
- mfspr r3, SPRN_SPRG_RSCRATCH0
- stw r3, VCPU_GPR(r4)(r4)
- mfspr r3, SPRN_SRR0
- stw r3, VCPU_PC(r4)
/* Restore host stack pointer and PID before IVPR, since the host
* exception handlers use them. */
@@ -214,28 +212,28 @@ _GLOBAL(kvmppc_resume_host)
/* Restore vcpu pointer and the nonvolatiles we used. */
mr r4, r14
- lwz r14, VCPU_GPR(r14)(r4)
+ lwz r14, VCPU_GPR(R14)(r4)
/* Sometimes instruction emulation must restore complete GPR state. */
andi. r5, r3, RESUME_FLAG_NV
beq ..skip_nv_load
- lwz r15, VCPU_GPR(r15)(r4)
- lwz r16, VCPU_GPR(r16)(r4)
- lwz r17, VCPU_GPR(r17)(r4)
- lwz r18, VCPU_GPR(r18)(r4)
- lwz r19, VCPU_GPR(r19)(r4)
- lwz r20, VCPU_GPR(r20)(r4)
- lwz r21, VCPU_GPR(r21)(r4)
- lwz r22, VCPU_GPR(r22)(r4)
- lwz r23, VCPU_GPR(r23)(r4)
- lwz r24, VCPU_GPR(r24)(r4)
- lwz r25, VCPU_GPR(r25)(r4)
- lwz r26, VCPU_GPR(r26)(r4)
- lwz r27, VCPU_GPR(r27)(r4)
- lwz r28, VCPU_GPR(r28)(r4)
- lwz r29, VCPU_GPR(r29)(r4)
- lwz r30, VCPU_GPR(r30)(r4)
- lwz r31, VCPU_GPR(r31)(r4)
+ lwz r15, VCPU_GPR(R15)(r4)
+ lwz r16, VCPU_GPR(R16)(r4)
+ lwz r17, VCPU_GPR(R17)(r4)
+ lwz r18, VCPU_GPR(R18)(r4)
+ lwz r19, VCPU_GPR(R19)(r4)
+ lwz r20, VCPU_GPR(R20)(r4)
+ lwz r21, VCPU_GPR(R21)(r4)
+ lwz r22, VCPU_GPR(R22)(r4)
+ lwz r23, VCPU_GPR(R23)(r4)
+ lwz r24, VCPU_GPR(R24)(r4)
+ lwz r25, VCPU_GPR(R25)(r4)
+ lwz r26, VCPU_GPR(R26)(r4)
+ lwz r27, VCPU_GPR(R27)(r4)
+ lwz r28, VCPU_GPR(R28)(r4)
+ lwz r29, VCPU_GPR(R29)(r4)
+ lwz r30, VCPU_GPR(R30)(r4)
+ lwz r31, VCPU_GPR(R31)(r4)
..skip_nv_load:
/* Should we return to the guest? */
@@ -257,43 +255,43 @@ heavyweight_exit:
/* We already saved guest volatile register state; now save the
* non-volatiles. */
- stw r15, VCPU_GPR(r15)(r4)
- stw r16, VCPU_GPR(r16)(r4)
- stw r17, VCPU_GPR(r17)(r4)
- stw r18, VCPU_GPR(r18)(r4)
- stw r19, VCPU_GPR(r19)(r4)
- stw r20, VCPU_GPR(r20)(r4)
- stw r21, VCPU_GPR(r21)(r4)
- stw r22, VCPU_GPR(r22)(r4)
- stw r23, VCPU_GPR(r23)(r4)
- stw r24, VCPU_GPR(r24)(r4)
- stw r25, VCPU_GPR(r25)(r4)
- stw r26, VCPU_GPR(r26)(r4)
- stw r27, VCPU_GPR(r27)(r4)
- stw r28, VCPU_GPR(r28)(r4)
- stw r29, VCPU_GPR(r29)(r4)
- stw r30, VCPU_GPR(r30)(r4)
- stw r31, VCPU_GPR(r31)(r4)
+ stw r15, VCPU_GPR(R15)(r4)
+ stw r16, VCPU_GPR(R16)(r4)
+ stw r17, VCPU_GPR(R17)(r4)
+ stw r18, VCPU_GPR(R18)(r4)
+ stw r19, VCPU_GPR(R19)(r4)
+ stw r20, VCPU_GPR(R20)(r4)
+ stw r21, VCPU_GPR(R21)(r4)
+ stw r22, VCPU_GPR(R22)(r4)
+ stw r23, VCPU_GPR(R23)(r4)
+ stw r24, VCPU_GPR(R24)(r4)
+ stw r25, VCPU_GPR(R25)(r4)
+ stw r26, VCPU_GPR(R26)(r4)
+ stw r27, VCPU_GPR(R27)(r4)
+ stw r28, VCPU_GPR(R28)(r4)
+ stw r29, VCPU_GPR(R29)(r4)
+ stw r30, VCPU_GPR(R30)(r4)
+ stw r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */
- lwz r14, HOST_NV_GPR(r14)(r1)
- lwz r15, HOST_NV_GPR(r15)(r1)
- lwz r16, HOST_NV_GPR(r16)(r1)
- lwz r17, HOST_NV_GPR(r17)(r1)
- lwz r18, HOST_NV_GPR(r18)(r1)
- lwz r19, HOST_NV_GPR(r19)(r1)
- lwz r20, HOST_NV_GPR(r20)(r1)
- lwz r21, HOST_NV_GPR(r21)(r1)
- lwz r22, HOST_NV_GPR(r22)(r1)
- lwz r23, HOST_NV_GPR(r23)(r1)
- lwz r24, HOST_NV_GPR(r24)(r1)
- lwz r25, HOST_NV_GPR(r25)(r1)
- lwz r26, HOST_NV_GPR(r26)(r1)
- lwz r27, HOST_NV_GPR(r27)(r1)
- lwz r28, HOST_NV_GPR(r28)(r1)
- lwz r29, HOST_NV_GPR(r29)(r1)
- lwz r30, HOST_NV_GPR(r30)(r1)
- lwz r31, HOST_NV_GPR(r31)(r1)
+ lwz r14, HOST_NV_GPR(R14)(r1)
+ lwz r15, HOST_NV_GPR(R15)(r1)
+ lwz r16, HOST_NV_GPR(R16)(r1)
+ lwz r17, HOST_NV_GPR(R17)(r1)
+ lwz r18, HOST_NV_GPR(R18)(r1)
+ lwz r19, HOST_NV_GPR(R19)(r1)
+ lwz r20, HOST_NV_GPR(R20)(r1)
+ lwz r21, HOST_NV_GPR(R21)(r1)
+ lwz r22, HOST_NV_GPR(R22)(r1)
+ lwz r23, HOST_NV_GPR(R23)(r1)
+ lwz r24, HOST_NV_GPR(R24)(r1)
+ lwz r25, HOST_NV_GPR(R25)(r1)
+ lwz r26, HOST_NV_GPR(R26)(r1)
+ lwz r27, HOST_NV_GPR(R27)(r1)
+ lwz r28, HOST_NV_GPR(R28)(r1)
+ lwz r29, HOST_NV_GPR(R29)(r1)
+ lwz r30, HOST_NV_GPR(R30)(r1)
+ lwz r31, HOST_NV_GPR(R31)(r1)
/* Return to kvm_vcpu_run(). */
lwz r4, HOST_STACK_LR(r1)
@@ -321,44 +319,44 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r5, HOST_CR(r1)
/* Save host non-volatile register state to stack. */
- stw r14, HOST_NV_GPR(r14)(r1)
- stw r15, HOST_NV_GPR(r15)(r1)
- stw r16, HOST_NV_GPR(r16)(r1)
- stw r17, HOST_NV_GPR(r17)(r1)
- stw r18, HOST_NV_GPR(r18)(r1)
- stw r19, HOST_NV_GPR(r19)(r1)
- stw r20, HOST_NV_GPR(r20)(r1)
- stw r21, HOST_NV_GPR(r21)(r1)
- stw r22, HOST_NV_GPR(r22)(r1)
- stw r23, HOST_NV_GPR(r23)(r1)
- stw r24, HOST_NV_GPR(r24)(r1)
- stw r25, HOST_NV_GPR(r25)(r1)
- stw r26, HOST_NV_GPR(r26)(r1)
- stw r27, HOST_NV_GPR(r27)(r1)
- stw r28, HOST_NV_GPR(r28)(r1)
- stw r29, HOST_NV_GPR(r29)(r1)
- stw r30, HOST_NV_GPR(r30)(r1)
- stw r31, HOST_NV_GPR(r31)(r1)
+ stw r14, HOST_NV_GPR(R14)(r1)
+ stw r15, HOST_NV_GPR(R15)(r1)
+ stw r16, HOST_NV_GPR(R16)(r1)
+ stw r17, HOST_NV_GPR(R17)(r1)
+ stw r18, HOST_NV_GPR(R18)(r1)
+ stw r19, HOST_NV_GPR(R19)(r1)
+ stw r20, HOST_NV_GPR(R20)(r1)
+ stw r21, HOST_NV_GPR(R21)(r1)
+ stw r22, HOST_NV_GPR(R22)(r1)
+ stw r23, HOST_NV_GPR(R23)(r1)
+ stw r24, HOST_NV_GPR(R24)(r1)
+ stw r25, HOST_NV_GPR(R25)(r1)
+ stw r26, HOST_NV_GPR(R26)(r1)
+ stw r27, HOST_NV_GPR(R27)(r1)
+ stw r28, HOST_NV_GPR(R28)(r1)
+ stw r29, HOST_NV_GPR(R29)(r1)
+ stw r30, HOST_NV_GPR(R30)(r1)
+ stw r31, HOST_NV_GPR(R31)(r1)
/* Load guest non-volatiles. */
- lwz r14, VCPU_GPR(r14)(r4)
- lwz r15, VCPU_GPR(r15)(r4)
- lwz r16, VCPU_GPR(r16)(r4)
- lwz r17, VCPU_GPR(r17)(r4)
- lwz r18, VCPU_GPR(r18)(r4)
- lwz r19, VCPU_GPR(r19)(r4)
- lwz r20, VCPU_GPR(r20)(r4)
- lwz r21, VCPU_GPR(r21)(r4)
- lwz r22, VCPU_GPR(r22)(r4)
- lwz r23, VCPU_GPR(r23)(r4)
- lwz r24, VCPU_GPR(r24)(r4)
- lwz r25, VCPU_GPR(r25)(r4)
- lwz r26, VCPU_GPR(r26)(r4)
- lwz r27, VCPU_GPR(r27)(r4)
- lwz r28, VCPU_GPR(r28)(r4)
- lwz r29, VCPU_GPR(r29)(r4)
- lwz r30, VCPU_GPR(r30)(r4)
- lwz r31, VCPU_GPR(r31)(r4)
+ lwz r14, VCPU_GPR(R14)(r4)
+ lwz r15, VCPU_GPR(R15)(r4)
+ lwz r16, VCPU_GPR(R16)(r4)
+ lwz r17, VCPU_GPR(R17)(r4)
+ lwz r18, VCPU_GPR(R18)(r4)
+ lwz r19, VCPU_GPR(R19)(r4)
+ lwz r20, VCPU_GPR(R20)(r4)
+ lwz r21, VCPU_GPR(R21)(r4)
+ lwz r22, VCPU_GPR(R22)(r4)
+ lwz r23, VCPU_GPR(R23)(r4)
+ lwz r24, VCPU_GPR(R24)(r4)
+ lwz r25, VCPU_GPR(R25)(r4)
+ lwz r26, VCPU_GPR(R26)(r4)
+ lwz r27, VCPU_GPR(R27)(r4)
+ lwz r28, VCPU_GPR(R28)(r4)
+ lwz r29, VCPU_GPR(R29)(r4)
+ lwz r30, VCPU_GPR(R30)(r4)
+ lwz r31, VCPU_GPR(R31)(r4)
#ifdef CONFIG_SPE
/* save host SPEFSCR and load guest SPEFSCR */
@@ -386,13 +384,13 @@ lightweight_exit:
#endif
/* Load some guest volatiles. */
- lwz r0, VCPU_GPR(r0)(r4)
- lwz r2, VCPU_GPR(r2)(r4)
- lwz r9, VCPU_GPR(r9)(r4)
- lwz r10, VCPU_GPR(r10)(r4)
- lwz r11, VCPU_GPR(r11)(r4)
- lwz r12, VCPU_GPR(r12)(r4)
- lwz r13, VCPU_GPR(r13)(r4)
+ lwz r0, VCPU_GPR(R0)(r4)
+ lwz r2, VCPU_GPR(R2)(r4)
+ lwz r9, VCPU_GPR(R9)(r4)
+ lwz r10, VCPU_GPR(R10)(r4)
+ lwz r11, VCPU_GPR(R11)(r4)
+ lwz r12, VCPU_GPR(R12)(r4)
+ lwz r13, VCPU_GPR(R13)(r4)
lwz r3, VCPU_LR(r4)
mtlr r3
lwz r3, VCPU_XER(r4)
@@ -411,7 +409,7 @@ lightweight_exit:
/* Can't switch the stack pointer until after IVPR is switched,
* because host interrupt handlers would get confused. */
- lwz r1, VCPU_GPR(r1)(r4)
+ lwz r1, VCPU_GPR(R1)(r4)
/*
* Host interrupt handlers may have clobbered these
@@ -449,10 +447,10 @@ lightweight_exit:
mtcr r5
mtsrr0 r6
mtsrr1 r7
- lwz r5, VCPU_GPR(r5)(r4)
- lwz r6, VCPU_GPR(r6)(r4)
- lwz r7, VCPU_GPR(r7)(r4)
- lwz r8, VCPU_GPR(r8)(r4)
+ lwz r5, VCPU_GPR(R5)(r4)
+ lwz r6, VCPU_GPR(R6)(r4)
+ lwz r7, VCPU_GPR(R7)(r4)
+ lwz r8, VCPU_GPR(R8)(r4)
/* Clear any debug events which occurred since we disabled MSR[DE].
* XXX This gives us a 3-instruction window in which a breakpoint
@@ -461,8 +459,8 @@ lightweight_exit:
ori r3, r3, 0xffff
mtspr SPRN_DBSR, r3
- lwz r3, VCPU_GPR(r3)(r4)
- lwz r4, VCPU_GPR(r4)(r4)
+ lwz r3, VCPU_GPR(R3)(r4)
+ lwz r4, VCPU_GPR(R4)(r4)
rfi
#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 6048a00515d7..d28c2d43ac1b 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -37,7 +37,6 @@
#define LONGBYTES (BITS_PER_LONG / 8)
-#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
/* The host stack layout: */
@@ -67,15 +66,15 @@
*/
.macro kvm_handler_common intno, srr0, flags
/* Restore host stack pointer */
- PPC_STL r1, VCPU_GPR(r1)(r4)
- PPC_STL r2, VCPU_GPR(r2)(r4)
+ PPC_STL r1, VCPU_GPR(R1)(r4)
+ PPC_STL r2, VCPU_GPR(R2)(r4)
PPC_LL r1, VCPU_HOST_STACK(r4)
PPC_LL r2, HOST_R2(r1)
mfspr r10, SPRN_PID
lwz r8, VCPU_HOST_PID(r4)
PPC_LL r11, VCPU_SHARED(r4)
- PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+ PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
li r14, \intno
stw r10, VCPU_GUEST_PID(r4)
@@ -137,35 +136,31 @@
*/
mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
- PPC_STL r15, VCPU_GPR(r15)(r4)
- PPC_STL r16, VCPU_GPR(r16)(r4)
- PPC_STL r17, VCPU_GPR(r17)(r4)
- PPC_STL r18, VCPU_GPR(r18)(r4)
- PPC_STL r19, VCPU_GPR(r19)(r4)
+ PPC_STL r15, VCPU_GPR(R15)(r4)
+ PPC_STL r16, VCPU_GPR(R16)(r4)
+ PPC_STL r17, VCPU_GPR(R17)(r4)
+ PPC_STL r18, VCPU_GPR(R18)(r4)
+ PPC_STL r19, VCPU_GPR(R19)(r4)
mr r8, r3
- PPC_STL r20, VCPU_GPR(r20)(r4)
+ PPC_STL r20, VCPU_GPR(R20)(r4)
rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
- PPC_STL r21, VCPU_GPR(r21)(r4)
+ PPC_STL r21, VCPU_GPR(R21)(r4)
rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
- PPC_STL r22, VCPU_GPR(r22)(r4)
+ PPC_STL r22, VCPU_GPR(R22)(r4)
rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
- PPC_STL r23, VCPU_GPR(r23)(r4)
- PPC_STL r24, VCPU_GPR(r24)(r4)
- PPC_STL r25, VCPU_GPR(r25)(r4)
- PPC_STL r26, VCPU_GPR(r26)(r4)
- PPC_STL r27, VCPU_GPR(r27)(r4)
- PPC_STL r28, VCPU_GPR(r28)(r4)
- PPC_STL r29, VCPU_GPR(r29)(r4)
- PPC_STL r30, VCPU_GPR(r30)(r4)
- PPC_STL r31, VCPU_GPR(r31)(r4)
+ PPC_STL r23, VCPU_GPR(R23)(r4)
+ PPC_STL r24, VCPU_GPR(R24)(r4)
+ PPC_STL r25, VCPU_GPR(R25)(r4)
+ PPC_STL r26, VCPU_GPR(R26)(r4)
+ PPC_STL r27, VCPU_GPR(R27)(r4)
+ PPC_STL r28, VCPU_GPR(R28)(r4)
+ PPC_STL r29, VCPU_GPR(R29)(r4)
+ PPC_STL r30, VCPU_GPR(R30)(r4)
+ PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr SPRN_EPLC, r8
/* disable preemption, so we are sure we hit the fixup handler */
-#ifdef CONFIG_PPC64
- clrrdi r8,r1,THREAD_SHIFT
-#else
- rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
-#endif
+ CURRENT_THREAD_INFO(r8, r1)
li r7, 1
stw r7, TI_PREEMPT(r8)
@@ -211,24 +206,24 @@
.macro kvm_handler intno srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
GET_VCPU(r11, r10)
- PPC_STL r3, VCPU_GPR(r3)(r11)
+ PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, SPRN_SPRG_RSCRATCH0
- PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
- PPC_STL r5, VCPU_GPR(r5)(r11)
+ PPC_STL r5, VCPU_GPR(R5)(r11)
stw r13, VCPU_CR(r11)
mfspr r5, \srr0
- PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_STL r3, VCPU_GPR(R10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
- PPC_STL r6, VCPU_GPR(r6)(r11)
- PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r6, VCPU_GPR(R6)(r11)
+ PPC_STL r4, VCPU_GPR(R11)(r11)
mfspr r6, \srr1
- PPC_STL r7, VCPU_GPR(r7)(r11)
- PPC_STL r8, VCPU_GPR(r8)(r11)
- PPC_STL r9, VCPU_GPR(r9)(r11)
- PPC_STL r3, VCPU_GPR(r13)(r11)
+ PPC_STL r7, VCPU_GPR(R7)(r11)
+ PPC_STL r8, VCPU_GPR(R8)(r11)
+ PPC_STL r9, VCPU_GPR(R9)(r11)
+ PPC_STL r3, VCPU_GPR(R13)(r11)
mfctr r7
- PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r12, VCPU_GPR(R12)(r11)
PPC_STL r7, VCPU_CTR(r11)
mr r4, r11
kvm_handler_common \intno, \srr0, \flags
@@ -238,25 +233,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r10, SPRN_SPRG_THREAD
GET_VCPU(r11, r10)
- PPC_STL r3, VCPU_GPR(r3)(r11)
+ PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, \scratch
- PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, GPR9(r8)
- PPC_STL r5, VCPU_GPR(r5)(r11)
+ PPC_STL r5, VCPU_GPR(R5)(r11)
stw r9, VCPU_CR(r11)
mfspr r5, \srr0
- PPC_STL r3, VCPU_GPR(r8)(r11)
+ PPC_STL r3, VCPU_GPR(R8)(r11)
PPC_LL r3, GPR10(r8)
- PPC_STL r6, VCPU_GPR(r6)(r11)
- PPC_STL r4, VCPU_GPR(r9)(r11)
+ PPC_STL r6, VCPU_GPR(R6)(r11)
+ PPC_STL r4, VCPU_GPR(R9)(r11)
mfspr r6, \srr1
PPC_LL r4, GPR11(r8)
- PPC_STL r7, VCPU_GPR(r7)(r11)
- PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_STL r7, VCPU_GPR(R7)(r11)
+ PPC_STL r3, VCPU_GPR(R10)(r11)
mfctr r7
- PPC_STL r12, VCPU_GPR(r12)(r11)
- PPC_STL r13, VCPU_GPR(r13)(r11)
- PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r12, VCPU_GPR(R12)(r11)
+ PPC_STL r13, VCPU_GPR(R13)(r11)
+ PPC_STL r4, VCPU_GPR(R11)(r11)
PPC_STL r7, VCPU_CTR(r11)
mr r4, r11
kvm_handler_common \intno, \srr0, \flags
@@ -267,7 +262,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
- SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
+ SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
@@ -310,7 +305,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
_GLOBAL(kvmppc_resume_host)
/* Save remaining volatile guest register state to vcpu. */
mfspr r3, SPRN_VRSAVE
- PPC_STL r0, VCPU_GPR(r0)(r4)
+ PPC_STL r0, VCPU_GPR(R0)(r4)
mflr r5
mfspr r6, SPRN_SPRG4
PPC_STL r5, VCPU_LR(r4)
@@ -358,27 +353,27 @@ _GLOBAL(kvmppc_resume_host)
/* Restore vcpu pointer and the nonvolatiles we used. */
mr r4, r14
- PPC_LL r14, VCPU_GPR(r14)(r4)
+ PPC_LL r14, VCPU_GPR(R14)(r4)
andi. r5, r3, RESUME_FLAG_NV
beq skip_nv_load
- PPC_LL r15, VCPU_GPR(r15)(r4)
- PPC_LL r16, VCPU_GPR(r16)(r4)
- PPC_LL r17, VCPU_GPR(r17)(r4)
- PPC_LL r18, VCPU_GPR(r18)(r4)
- PPC_LL r19, VCPU_GPR(r19)(r4)
- PPC_LL r20, VCPU_GPR(r20)(r4)
- PPC_LL r21, VCPU_GPR(r21)(r4)
- PPC_LL r22, VCPU_GPR(r22)(r4)
- PPC_LL r23, VCPU_GPR(r23)(r4)
- PPC_LL r24, VCPU_GPR(r24)(r4)
- PPC_LL r25, VCPU_GPR(r25)(r4)
- PPC_LL r26, VCPU_GPR(r26)(r4)
- PPC_LL r27, VCPU_GPR(r27)(r4)
- PPC_LL r28, VCPU_GPR(r28)(r4)
- PPC_LL r29, VCPU_GPR(r29)(r4)
- PPC_LL r30, VCPU_GPR(r30)(r4)
- PPC_LL r31, VCPU_GPR(r31)(r4)
+ PPC_LL r15, VCPU_GPR(R15)(r4)
+ PPC_LL r16, VCPU_GPR(R16)(r4)
+ PPC_LL r17, VCPU_GPR(R17)(r4)
+ PPC_LL r18, VCPU_GPR(R18)(r4)
+ PPC_LL r19, VCPU_GPR(R19)(r4)
+ PPC_LL r20, VCPU_GPR(R20)(r4)
+ PPC_LL r21, VCPU_GPR(R21)(r4)
+ PPC_LL r22, VCPU_GPR(R22)(r4)
+ PPC_LL r23, VCPU_GPR(R23)(r4)
+ PPC_LL r24, VCPU_GPR(R24)(r4)
+ PPC_LL r25, VCPU_GPR(R25)(r4)
+ PPC_LL r26, VCPU_GPR(R26)(r4)
+ PPC_LL r27, VCPU_GPR(R27)(r4)
+ PPC_LL r28, VCPU_GPR(R28)(r4)
+ PPC_LL r29, VCPU_GPR(R29)(r4)
+ PPC_LL r30, VCPU_GPR(R30)(r4)
+ PPC_LL r31, VCPU_GPR(R31)(r4)
skip_nv_load:
/* Should we return to the guest? */
andi. r5, r3, RESUME_FLAG_HOST
@@ -396,23 +391,23 @@ heavyweight_exit:
* non-volatiles.
*/
- PPC_STL r15, VCPU_GPR(r15)(r4)
- PPC_STL r16, VCPU_GPR(r16)(r4)
- PPC_STL r17, VCPU_GPR(r17)(r4)
- PPC_STL r18, VCPU_GPR(r18)(r4)
- PPC_STL r19, VCPU_GPR(r19)(r4)
- PPC_STL r20, VCPU_GPR(r20)(r4)
- PPC_STL r21, VCPU_GPR(r21)(r4)
- PPC_STL r22, VCPU_GPR(r22)(r4)
- PPC_STL r23, VCPU_GPR(r23)(r4)
- PPC_STL r24, VCPU_GPR(r24)(r4)
- PPC_STL r25, VCPU_GPR(r25)(r4)
- PPC_STL r26, VCPU_GPR(r26)(r4)
- PPC_STL r27, VCPU_GPR(r27)(r4)
- PPC_STL r28, VCPU_GPR(r28)(r4)
- PPC_STL r29, VCPU_GPR(r29)(r4)
- PPC_STL r30, VCPU_GPR(r30)(r4)
- PPC_STL r31, VCPU_GPR(r31)(r4)
+ PPC_STL r15, VCPU_GPR(R15)(r4)
+ PPC_STL r16, VCPU_GPR(R16)(r4)
+ PPC_STL r17, VCPU_GPR(R17)(r4)
+ PPC_STL r18, VCPU_GPR(R18)(r4)
+ PPC_STL r19, VCPU_GPR(R19)(r4)
+ PPC_STL r20, VCPU_GPR(R20)(r4)
+ PPC_STL r21, VCPU_GPR(R21)(r4)
+ PPC_STL r22, VCPU_GPR(R22)(r4)
+ PPC_STL r23, VCPU_GPR(R23)(r4)
+ PPC_STL r24, VCPU_GPR(R24)(r4)
+ PPC_STL r25, VCPU_GPR(R25)(r4)
+ PPC_STL r26, VCPU_GPR(R26)(r4)
+ PPC_STL r27, VCPU_GPR(R27)(r4)
+ PPC_STL r28, VCPU_GPR(R28)(r4)
+ PPC_STL r29, VCPU_GPR(R29)(r4)
+ PPC_STL r30, VCPU_GPR(R30)(r4)
+ PPC_STL r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */
PPC_LL r14, HOST_NV_GPR(r14)(r1)
@@ -478,24 +473,24 @@ _GLOBAL(__kvmppc_vcpu_run)
PPC_STL r31, HOST_NV_GPR(r31)(r1)
/* Load guest non-volatiles. */
- PPC_LL r14, VCPU_GPR(r14)(r4)
- PPC_LL r15, VCPU_GPR(r15)(r4)
- PPC_LL r16, VCPU_GPR(r16)(r4)
- PPC_LL r17, VCPU_GPR(r17)(r4)
- PPC_LL r18, VCPU_GPR(r18)(r4)
- PPC_LL r19, VCPU_GPR(r19)(r4)
- PPC_LL r20, VCPU_GPR(r20)(r4)
- PPC_LL r21, VCPU_GPR(r21)(r4)
- PPC_LL r22, VCPU_GPR(r22)(r4)
- PPC_LL r23, VCPU_GPR(r23)(r4)
- PPC_LL r24, VCPU_GPR(r24)(r4)
- PPC_LL r25, VCPU_GPR(r25)(r4)
- PPC_LL r26, VCPU_GPR(r26)(r4)
- PPC_LL r27, VCPU_GPR(r27)(r4)
- PPC_LL r28, VCPU_GPR(r28)(r4)
- PPC_LL r29, VCPU_GPR(r29)(r4)
- PPC_LL r30, VCPU_GPR(r30)(r4)
- PPC_LL r31, VCPU_GPR(r31)(r4)
+ PPC_LL r14, VCPU_GPR(R14)(r4)
+ PPC_LL r15, VCPU_GPR(R15)(r4)
+ PPC_LL r16, VCPU_GPR(R16)(r4)
+ PPC_LL r17, VCPU_GPR(R17)(r4)
+ PPC_LL r18, VCPU_GPR(R18)(r4)
+ PPC_LL r19, VCPU_GPR(R19)(r4)
+ PPC_LL r20, VCPU_GPR(R20)(r4)
+ PPC_LL r21, VCPU_GPR(R21)(r4)
+ PPC_LL r22, VCPU_GPR(R22)(r4)
+ PPC_LL r23, VCPU_GPR(R23)(r4)
+ PPC_LL r24, VCPU_GPR(R24)(r4)
+ PPC_LL r25, VCPU_GPR(R25)(r4)
+ PPC_LL r26, VCPU_GPR(R26)(r4)
+ PPC_LL r27, VCPU_GPR(R27)(r4)
+ PPC_LL r28, VCPU_GPR(R28)(r4)
+ PPC_LL r29, VCPU_GPR(R29)(r4)
+ PPC_LL r30, VCPU_GPR(R30)(r4)
+ PPC_LL r31, VCPU_GPR(R31)(r4)
lightweight_exit:
@@ -554,13 +549,13 @@ lightweight_exit:
lwz r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
PPC_LD(r9, VCPU_SHARED_MSR, r11)
- PPC_LL r0, VCPU_GPR(r0)(r4)
- PPC_LL r1, VCPU_GPR(r1)(r4)
- PPC_LL r2, VCPU_GPR(r2)(r4)
- PPC_LL r10, VCPU_GPR(r10)(r4)
- PPC_LL r11, VCPU_GPR(r11)(r4)
- PPC_LL r12, VCPU_GPR(r12)(r4)
- PPC_LL r13, VCPU_GPR(r13)(r4)
+ PPC_LL r0, VCPU_GPR(R0)(r4)
+ PPC_LL r1, VCPU_GPR(R1)(r4)
+ PPC_LL r2, VCPU_GPR(R2)(r4)
+ PPC_LL r10, VCPU_GPR(R10)(r4)
+ PPC_LL r11, VCPU_GPR(R11)(r4)
+ PPC_LL r12, VCPU_GPR(R12)(r4)
+ PPC_LL r13, VCPU_GPR(R13)(r4)
mtlr r3
mtxer r5
mtctr r6
@@ -586,12 +581,12 @@ lightweight_exit:
mtcr r7
/* Finish loading guest volatiles and jump to guest. */
- PPC_LL r5, VCPU_GPR(r5)(r4)
- PPC_LL r6, VCPU_GPR(r6)(r4)
- PPC_LL r7, VCPU_GPR(r7)(r4)
- PPC_LL r8, VCPU_GPR(r8)(r4)
- PPC_LL r9, VCPU_GPR(r9)(r4)
-
- PPC_LL r3, VCPU_GPR(r3)(r4)
- PPC_LL r4, VCPU_GPR(r4)(r4)
+ PPC_LL r5, VCPU_GPR(R5)(r4)
+ PPC_LL r6, VCPU_GPR(R6)(r4)
+ PPC_LL r7, VCPU_GPR(R7)(r4)
+ PPC_LL r8, VCPU_GPR(R8)(r4)
+ PPC_LL r9, VCPU_GPR(R9)(r4)
+
+ PPC_LL r3, VCPU_GPR(R3)(r4)
+ PPC_LL r4, VCPU_GPR(R4)(r4)
rfi
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 8b99e076dc81..e04b0ef55ce0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -269,6 +269,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
*spr_val = vcpu->arch.shared->mas7_3 >> 32;
break;
#endif
+ case SPRN_DECAR:
+ *spr_val = vcpu->arch.decar;
+ break;
case SPRN_TLB0CFG:
*spr_val = vcpu->arch.tlbcfg[0];
break;
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index fe6c1de6b701..1f89d26e65fb 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright (C) 2010,2012 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Varun Sethi, <varun.sethi@freescale.com>
*
@@ -57,7 +57,8 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
struct kvm_book3e_206_tlb_entry *gtlbe)
{
unsigned int tid, ts;
- u32 val, eaddr, lpid;
+ gva_t eaddr;
+ u32 val, lpid;
unsigned long flags;
ts = get_tlb_ts(gtlbe);
@@ -183,6 +184,9 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
SPRN_EPCR_DUVD;
+#ifdef CONFIG_64BIT
+ vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
+#endif
vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
vcpu->arch.epsc = vcpu->arch.eplc;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index f90e86dea7a2..ee04abaefe23 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -59,11 +59,13 @@
#define OP_31_XOP_STHBRX 918
#define OP_LWZ 32
+#define OP_LD 58
#define OP_LWZU 33
#define OP_LBZ 34
#define OP_LBZU 35
#define OP_STW 36
#define OP_STWU 37
+#define OP_STD 62
#define OP_STB 38
#define OP_STBU 39
#define OP_LHZ 40
@@ -392,6 +394,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
+ /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
+ case OP_LD:
+ rt = get_rt(inst);
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ break;
+
case OP_LWZU:
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
@@ -412,6 +420,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 1);
break;
+ /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
+ case OP_STD:
+ rs = get_rs(inst);
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
+ 8, 1);
+ break;
+
case OP_STWU:
emulated = kvmppc_handle_store(run, vcpu,
kvmppc_get_gpr(vcpu, rs),
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 1493c8de947b..87f4dc886076 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -246,6 +246,7 @@ int kvm_dev_ioctl_check_extension(long ext)
#endif
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
+ case KVM_CAP_PPC_ALLOC_HTAB:
r = 1;
break;
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -802,6 +803,23 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EFAULT;
break;
}
+
+ case KVM_PPC_ALLOCATE_HTAB: {
+ struct kvm *kvm = filp->private_data;
+ u32 htab_order;
+
+ r = -EFAULT;
+ if (get_user(htab_order, (u32 __user *)argp))
+ break;
+ r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
+ if (r)
+ break;
+ r = -EFAULT;
+ if (put_user(htab_order, (u32 __user *)argp))
+ break;
+ r = 0;
+ break;
+ }
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 7735a2c2e6d9..746e0c895cd7 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -17,14 +17,15 @@ obj-$(CONFIG_HAS_IOMEM) += devres.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
memcpy_64.o usercopy_64.o mem_64.o string.o \
checksum_wrappers_64.o hweight_64.o \
- copyuser_power7.o
+ copyuser_power7.o string_64.o copypage_power7.o \
+ memcpy_power7.o
obj-$(CONFIG_XMON) += sstep.o ldstfp.o
obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o
ifeq ($(CONFIG_PPC64),y)
obj-$(CONFIG_SMP) += locks.o
-obj-$(CONFIG_ALTIVEC) += copyuser_power7_vmx.o
+obj-$(CONFIG_ALTIVEC) += vmx-helper.o
endif
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 18245af38aea..167f72555d60 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -65,9 +65,6 @@ _GLOBAL(csum_tcpudp_magic)
srwi r3,r3,16
blr
-#define STACKFRAMESIZE 256
-#define STK_REG(i) (112 + ((i)-14)*8)
-
/*
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
@@ -114,9 +111,9 @@ _GLOBAL(csum_partial)
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
- std r14,STK_REG(r14)(r1)
- std r15,STK_REG(r15)(r1)
- std r16,STK_REG(r16)(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
ld r6,0(r3)
ld r9,8(r3)
@@ -175,9 +172,9 @@ _GLOBAL(csum_partial)
adde r0,r0,r15
adde r0,r0,r16
- ld r14,STK_REG(r14)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r16,STK_REG(r16)(r1)
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r4,r4,63
@@ -299,9 +296,9 @@ dest; sth r6,0(r4)
mtctr r6
stdu r1,-STACKFRAMESIZE(r1)
- std r14,STK_REG(r14)(r1)
- std r15,STK_REG(r15)(r1)
- std r16,STK_REG(r16)(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
source; ld r6,0(r3)
source; ld r9,8(r3)
@@ -382,9 +379,9 @@ dest; std r16,56(r4)
adde r0,r0,r15
adde r0,r0,r16
- ld r14,STK_REG(r14)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r16,STK_REG(r16)(r1)
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
addi r1,r1,STACKFRAMESIZE
andi. r5,r5,63
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 7c975d43e3f3..dd223b3eb333 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -13,17 +13,23 @@
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/code-patching.h>
+#include <asm/uaccess.h>
-void patch_instruction(unsigned int *addr, unsigned int instr)
+int patch_instruction(unsigned int *addr, unsigned int instr)
{
- *addr = instr;
+ int err;
+
+ err = __put_user(instr, addr);
+ if (err)
+ return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
+ return 0;
}
-void patch_branch(unsigned int *addr, unsigned long target, int flags)
+int patch_branch(unsigned int *addr, unsigned long target, int flags)
{
- patch_instruction(addr, create_branch(addr, target, flags));
+ return patch_instruction(addr, create_branch(addr, target, flags));
}
unsigned int create_branch(const unsigned int *addr,
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S
index 53dcb6b1b708..9f9434a85264 100644
--- a/arch/powerpc/lib/copypage_64.S
+++ b/arch/powerpc/lib/copypage_64.S
@@ -17,7 +17,11 @@ PPC64_CACHES:
.section ".text"
_GLOBAL(copy_page)
+BEGIN_FTR_SECTION
lis r5,PAGE_SIZE@h
+FTR_SECTION_ELSE
+ b .copypage_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
ori r5,r5,PAGE_SIZE@l
BEGIN_FTR_SECTION
ld r10,PPC64_CACHES@toc(r2)
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
new file mode 100644
index 000000000000..0ef75bf0695c
--- /dev/null
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -0,0 +1,165 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/page.h>
+#include <asm/ppc_asm.h>
+
+_GLOBAL(copypage_power7)
+ /*
+ * We prefetch both the source and destination using enhanced touch
+ * instructions. We use a stream ID of 0 for the load side and
+ * 1 for the store side. Since source and destination are page
+ * aligned we don't need to clear the bottom 7 bits of either
+ * address.
+ */
+ ori r9,r3,1 /* stream=1 */
+
+#ifdef CONFIG_PPC_64K_PAGES
+ lis r7,0x0E01 /* depth=7, units=512 */
+#else
+ lis r7,0x0E00 /* depth=7 */
+ ori r7,r7,0x1000 /* units=32 */
+#endif
+ ori r10,r7,1 /* stream=1 */
+
+ lis r8,0x8000 /* GO=1 */
+ clrldi r8,r8,32
+
+.machine push
+.machine "power4"
+ dcbt r0,r4,0b01000
+ dcbt r0,r7,0b01010
+ dcbtst r0,r9,0b01000
+ dcbtst r0,r10,0b01010
+ eieio
+ dcbt r0,r8,0b01010 /* GO */
+.machine pop
+
+#ifdef CONFIG_ALTIVEC
+ mflr r0
+ std r3,48(r1)
+ std r4,56(r1)
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ bl .enter_vmx_copy
+ cmpwi r3,0
+ ld r0,STACKFRAMESIZE+16(r1)
+ ld r3,STACKFRAMESIZE+48(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ mtlr r0
+
+ li r0,(PAGE_SIZE/128)
+ mtctr r0
+
+ beq .Lnonvmx_copy
+
+ addi r1,r1,STACKFRAMESIZE
+
+ li r6,16
+ li r7,32
+ li r8,48
+ li r9,64
+ li r10,80
+ li r11,96
+ li r12,112
+
+ .align 5
+1: lvx vr7,r0,r4
+ lvx vr6,r4,r6
+ lvx vr5,r4,r7
+ lvx vr4,r4,r8
+ lvx vr3,r4,r9
+ lvx vr2,r4,r10
+ lvx vr1,r4,r11
+ lvx vr0,r4,r12
+ addi r4,r4,128
+ stvx vr7,r0,r3
+ stvx vr6,r3,r6
+ stvx vr5,r3,r7
+ stvx vr4,r3,r8
+ stvx vr3,r3,r9
+ stvx vr2,r3,r10
+ stvx vr1,r3,r11
+ stvx vr0,r3,r12
+ addi r3,r3,128
+ bdnz 1b
+
+ b .exit_vmx_copy /* tail call optimise */
+
+#else
+ li r0,(PAGE_SIZE/128)
+ mtctr r0
+
+ stdu r1,-STACKFRAMESIZE(r1)
+#endif
+
+.Lnonvmx_copy:
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
+ std r17,STK_REG(R17)(r1)
+ std r18,STK_REG(R18)(r1)
+ std r19,STK_REG(R19)(r1)
+ std r20,STK_REG(R20)(r1)
+
+1: ld r0,0(r4)
+ ld r5,8(r4)
+ ld r6,16(r4)
+ ld r7,24(r4)
+ ld r8,32(r4)
+ ld r9,40(r4)
+ ld r10,48(r4)
+ ld r11,56(r4)
+ ld r12,64(r4)
+ ld r14,72(r4)
+ ld r15,80(r4)
+ ld r16,88(r4)
+ ld r17,96(r4)
+ ld r18,104(r4)
+ ld r19,112(r4)
+ ld r20,120(r4)
+ addi r4,r4,128
+ std r0,0(r3)
+ std r5,8(r3)
+ std r6,16(r3)
+ std r7,24(r3)
+ std r8,32(r3)
+ std r9,40(r3)
+ std r10,48(r3)
+ std r11,56(r3)
+ std r12,64(r3)
+ std r14,72(r3)
+ std r15,80(r3)
+ std r16,88(r3)
+ std r17,96(r3)
+ std r18,104(r3)
+ std r19,112(r3)
+ std r20,120(r3)
+ addi r3,r3,128
+ bdnz 1b
+
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ ld r17,STK_REG(R17)(r1)
+ ld r18,STK_REG(R18)(r1)
+ ld r19,STK_REG(R19)(r1)
+ ld r20,STK_REG(R20)(r1)
+ addi r1,r1,STACKFRAMESIZE
+ blr
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 497db7b23bb1..f9ede7c6606e 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -19,9 +19,6 @@
*/
#include <asm/ppc_asm.h>
-#define STACKFRAMESIZE 256
-#define STK_REG(i) (112 + ((i)-14)*8)
-
.macro err1
100:
.section __ex_table,"a"
@@ -57,26 +54,26 @@
.Ldo_err4:
- ld r16,STK_REG(r16)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r14,STK_REG(r14)(r1)
+ ld r16,STK_REG(R16)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r14,STK_REG(R14)(r1)
.Ldo_err3:
- bl .exit_vmx_copy
+ bl .exit_vmx_usercopy
ld r0,STACKFRAMESIZE+16(r1)
mtlr r0
b .Lexit
#endif /* CONFIG_ALTIVEC */
.Ldo_err2:
- ld r22,STK_REG(r22)(r1)
- ld r21,STK_REG(r21)(r1)
- ld r20,STK_REG(r20)(r1)
- ld r19,STK_REG(r19)(r1)
- ld r18,STK_REG(r18)(r1)
- ld r17,STK_REG(r17)(r1)
- ld r16,STK_REG(r16)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r14,STK_REG(r14)(r1)
+ ld r22,STK_REG(R22)(r1)
+ ld r21,STK_REG(R21)(r1)
+ ld r20,STK_REG(R20)(r1)
+ ld r19,STK_REG(R19)(r1)
+ ld r18,STK_REG(R18)(r1)
+ ld r17,STK_REG(R17)(r1)
+ ld r16,STK_REG(R16)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r14,STK_REG(R14)(r1)
.Lexit:
addi r1,r1,STACKFRAMESIZE
.Ldo_err1:
@@ -137,15 +134,15 @@ err1; stw r0,0(r3)
mflr r0
stdu r1,-STACKFRAMESIZE(r1)
- std r14,STK_REG(r14)(r1)
- std r15,STK_REG(r15)(r1)
- std r16,STK_REG(r16)(r1)
- std r17,STK_REG(r17)(r1)
- std r18,STK_REG(r18)(r1)
- std r19,STK_REG(r19)(r1)
- std r20,STK_REG(r20)(r1)
- std r21,STK_REG(r21)(r1)
- std r22,STK_REG(r22)(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
+ std r17,STK_REG(R17)(r1)
+ std r18,STK_REG(R18)(r1)
+ std r19,STK_REG(R19)(r1)
+ std r20,STK_REG(R20)(r1)
+ std r21,STK_REG(R21)(r1)
+ std r22,STK_REG(R22)(r1)
std r0,STACKFRAMESIZE+16(r1)
srdi r6,r5,7
@@ -192,15 +189,15 @@ err2; std r21,120(r3)
clrldi r5,r5,(64-7)
- ld r14,STK_REG(r14)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r16,STK_REG(r16)(r1)
- ld r17,STK_REG(r17)(r1)
- ld r18,STK_REG(r18)(r1)
- ld r19,STK_REG(r19)(r1)
- ld r20,STK_REG(r20)(r1)
- ld r21,STK_REG(r21)(r1)
- ld r22,STK_REG(r22)(r1)
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ ld r17,STK_REG(R17)(r1)
+ ld r18,STK_REG(R18)(r1)
+ ld r19,STK_REG(R19)(r1)
+ ld r20,STK_REG(R20)(r1)
+ ld r21,STK_REG(R21)(r1)
+ ld r22,STK_REG(R22)(r1)
addi r1,r1,STACKFRAMESIZE
/* Up to 127B to go */
@@ -290,7 +287,7 @@ err1; stb r0,0(r3)
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl .enter_vmx_copy
+ bl .enter_vmx_usercopy
cmpwi r3,0
ld r0,STACKFRAMESIZE+16(r1)
ld r3,STACKFRAMESIZE+48(r1)
@@ -298,6 +295,68 @@ err1; stb r0,0(r3)
ld r5,STACKFRAMESIZE+64(r1)
mtlr r0
+ /*
+ * We prefetch both the source and destination using enhanced touch
+ * instructions. We use a stream ID of 0 for the load side and
+ * 1 for the store side.
+ */
+ clrrdi r6,r4,7
+ clrrdi r9,r3,7
+ ori r9,r9,1 /* stream=1 */
+
+ srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
+ cmpldi r7,0x3FF
+ ble 1f
+ li r7,0x3FF
+1: lis r0,0x0E00 /* depth=7 */
+ sldi r7,r7,7
+ or r7,r7,r0
+ ori r10,r7,1 /* stream=1 */
+
+ lis r8,0x8000 /* GO=1 */
+ clrldi r8,r8,32
+
+.machine push
+.machine "power4"
+ dcbt r0,r6,0b01000
+ dcbt r0,r7,0b01010
+ dcbtst r0,r9,0b01000
+ dcbtst r0,r10,0b01010
+ eieio
+ dcbt r0,r8,0b01010 /* GO */
+.machine pop
+
+ /*
+ * We prefetch both the source and destination using enhanced touch
+ * instructions. We use a stream ID of 0 for the load side and
+ * 1 for the store side.
+ */
+ clrrdi r6,r4,7
+ clrrdi r9,r3,7
+ ori r9,r9,1 /* stream=1 */
+
+ srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
+ cmpldi cr1,r7,0x3FF
+ ble cr1,1f
+ li r7,0x3FF
+1: lis r0,0x0E00 /* depth=7 */
+ sldi r7,r7,7
+ or r7,r7,r0
+ ori r10,r7,1 /* stream=1 */
+
+ lis r8,0x8000 /* GO=1 */
+ clrldi r8,r8,32
+
+.machine push
+.machine "power4"
+ dcbt r0,r6,0b01000
+ dcbt r0,r7,0b01010
+ dcbtst r0,r9,0b01000
+ dcbtst r0,r10,0b01010
+ eieio
+ dcbt r0,r8,0b01010 /* GO */
+.machine pop
+
beq .Lunwind_stack_nonvmx_copy
/*
@@ -378,9 +437,9 @@ err3; stvx vr0,r3,r11
7: sub r5,r5,r6
srdi r6,r5,7
- std r14,STK_REG(r14)(r1)
- std r15,STK_REG(r15)(r1)
- std r16,STK_REG(r16)(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
@@ -415,9 +474,9 @@ err4; stvx vr0,r3,r16
addi r3,r3,128
bdnz 8b
- ld r14,STK_REG(r14)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r16,STK_REG(r16)(r1)
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
@@ -476,7 +535,7 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b .exit_vmx_copy /* tail call optimise */
+ b .exit_vmx_usercopy /* tail call optimise */
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -563,9 +622,9 @@ err3; stvx vr11,r3,r11
7: sub r5,r5,r6
srdi r6,r5,7
- std r14,STK_REG(r14)(r1)
- std r15,STK_REG(r15)(r1)
- std r16,STK_REG(r16)(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
li r12,64
li r14,80
@@ -608,9 +667,9 @@ err4; stvx vr15,r3,r16
addi r3,r3,128
bdnz 8b
- ld r14,STK_REG(r14)(r1)
- ld r15,STK_REG(r15)(r1)
- ld r16,STK_REG(r16)(r1)
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
/* Up to 127B to go */
clrldi r5,r5,(64-7)
@@ -679,5 +738,5 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b .exit_vmx_copy /* tail call optimise */
+ b .exit_vmx_usercopy /* tail call optimise */
#endif /* CONFiG_ALTIVEC */
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index 1c893f05d224..b2c68ce139ae 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -41,12 +41,13 @@
#include <asm/ppc_asm.h>
.file "crtsavres.S"
- .section ".text"
#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
#ifndef CONFIG_PPC64
+ .section ".text"
+
/* Routines for saving integer registers, called by the compiler. */
/* Called with r11 pointing to the stack header word of the caller of the */
/* function, just beyond the end of the integer save area. */
@@ -232,6 +233,8 @@ _GLOBAL(_rest32gpr_31_x)
#else /* CONFIG_PPC64 */
+ .section ".text.save.restore","ax",@progbits
+
.globl _savegpr0_14
_savegpr0_14:
std r14,-144(r1)
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S
index fda27868cf8c..9b96ff2ecd4d 100644
--- a/arch/powerpc/lib/hweight_64.S
+++ b/arch/powerpc/lib/hweight_64.S
@@ -28,7 +28,7 @@ BEGIN_FTR_SECTION
nop
nop
FTR_SECTION_ELSE
- PPC_POPCNTB(r3,r3)
+ PPC_POPCNTB(R3,R3)
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB)
@@ -42,14 +42,14 @@ BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(50)
- PPC_POPCNTB(r3,r3)
+ PPC_POPCNTB(R3,R3)
srdi r4,r3,8
add r3,r4,r3
clrldi r3,r3,64-8
blr
FTR_SECTION_ELSE_NESTED(50)
clrlwi r3,r3,16
- PPC_POPCNTW(r3,r3)
+ PPC_POPCNTW(R3,R3)
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50)
@@ -66,7 +66,7 @@ BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(51)
- PPC_POPCNTB(r3,r3)
+ PPC_POPCNTB(R3,R3)
srdi r4,r3,16
add r3,r4,r3
srdi r4,r3,8
@@ -74,7 +74,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8
blr
FTR_SECTION_ELSE_NESTED(51)
- PPC_POPCNTW(r3,r3)
+ PPC_POPCNTW(R3,R3)
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51)
@@ -93,7 +93,7 @@ BEGIN_FTR_SECTION
nop
FTR_SECTION_ELSE
BEGIN_FTR_SECTION_NESTED(52)
- PPC_POPCNTB(r3,r3)
+ PPC_POPCNTB(R3,R3)
srdi r4,r3,32
add r3,r4,r3
srdi r4,r3,16
@@ -103,7 +103,7 @@ FTR_SECTION_ELSE
clrldi r3,r3,64-8
blr
FTR_SECTION_ELSE_NESTED(52)
- PPC_POPCNTD(r3,r3)
+ PPC_POPCNTD(R3,R3)
clrldi r3,r3,64-8
blr
ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52)
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 6a85380520b6..85aec08ab234 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -330,13 +330,13 @@ _GLOBAL(do_lxvd2x)
MTMSRD(r7)
isync
beq cr7,1f
- STXVD2X(0,r1,r8)
+ STXVD2X(0,R1,R8)
1: li r9,-EFAULT
-2: LXVD2X(0,0,r4)
+2: LXVD2X(0,R0,R4)
li r9,0
3: beq cr7,4f
bl put_vsr
- LXVD2X(0,r1,r8)
+ LXVD2X(0,R1,R8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
@@ -358,13 +358,13 @@ _GLOBAL(do_stxvd2x)
MTMSRD(r7)
isync
beq cr7,1f
- STXVD2X(0,r1,r8)
+ STXVD2X(0,R1,R8)
bl get_vsr
1: li r9,-EFAULT
-2: STXVD2X(0,0,r4)
+2: STXVD2X(0,R0,R4)
li r9,0
3: beq cr7,4f
- LXVD2X(0,r1,r8)
+ LXVD2X(0,R1,R8)
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index 82fea3963e15..d2bbbc8d7dc0 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -11,7 +11,11 @@
.align 7
_GLOBAL(memcpy)
+BEGIN_FTR_SECTION
std r3,48(r1) /* save destination pointer for return value */
+FTR_SECTION_ELSE
+ b memcpy_power7
+ALT_FTR_SECTION_END_IFCLR(CPU_FTR_VMX_COPY)
PPC_MTOCRF(0x01,r5)
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
new file mode 100644
index 000000000000..0efdc51bc716
--- /dev/null
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -0,0 +1,647 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+#include <asm/ppc_asm.h>
+
+_GLOBAL(memcpy_power7)
+#ifdef CONFIG_ALTIVEC
+ cmpldi r5,16
+ cmpldi cr1,r5,4096
+
+ std r3,48(r1)
+
+ blt .Lshort_copy
+ bgt cr1,.Lvmx_copy
+#else
+ cmpldi r5,16
+
+ std r3,48(r1)
+
+ blt .Lshort_copy
+#endif
+
+.Lnonvmx_copy:
+ /* Get the source 8B aligned */
+ neg r6,r4
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-3)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: sub r5,r5,r6
+ cmpldi r5,128
+ blt 5f
+
+ mflr r0
+ stdu r1,-STACKFRAMESIZE(r1)
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
+ std r17,STK_REG(R17)(r1)
+ std r18,STK_REG(R18)(r1)
+ std r19,STK_REG(R19)(r1)
+ std r20,STK_REG(R20)(r1)
+ std r21,STK_REG(R21)(r1)
+ std r22,STK_REG(R22)(r1)
+ std r0,STACKFRAMESIZE+16(r1)
+
+ srdi r6,r5,7
+ mtctr r6
+
+ /* Now do cacheline (128B) sized loads and stores. */
+ .align 5
+4:
+ ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ ld r9,32(r4)
+ ld r10,40(r4)
+ ld r11,48(r4)
+ ld r12,56(r4)
+ ld r14,64(r4)
+ ld r15,72(r4)
+ ld r16,80(r4)
+ ld r17,88(r4)
+ ld r18,96(r4)
+ ld r19,104(r4)
+ ld r20,112(r4)
+ ld r21,120(r4)
+ addi r4,r4,128
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ std r9,32(r3)
+ std r10,40(r3)
+ std r11,48(r3)
+ std r12,56(r3)
+ std r14,64(r3)
+ std r15,72(r3)
+ std r16,80(r3)
+ std r17,88(r3)
+ std r18,96(r3)
+ std r19,104(r3)
+ std r20,112(r3)
+ std r21,120(r3)
+ addi r3,r3,128
+ bdnz 4b
+
+ clrldi r5,r5,(64-7)
+
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+ ld r17,STK_REG(R17)(r1)
+ ld r18,STK_REG(R18)(r1)
+ ld r19,STK_REG(R19)(r1)
+ ld r20,STK_REG(R20)(r1)
+ ld r21,STK_REG(R21)(r1)
+ ld r22,STK_REG(R22)(r1)
+ addi r1,r1,STACKFRAMESIZE
+
+ /* Up to 127B to go */
+5: srdi r6,r5,4
+ mtocrf 0x01,r6
+
+6: bf cr7*4+1,7f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ ld r9,32(r4)
+ ld r10,40(r4)
+ ld r11,48(r4)
+ ld r12,56(r4)
+ addi r4,r4,64
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ std r9,32(r3)
+ std r10,40(r3)
+ std r11,48(r3)
+ std r12,56(r3)
+ addi r3,r3,64
+
+ /* Up to 63B to go */
+7: bf cr7*4+2,8f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ ld r7,16(r4)
+ ld r8,24(r4)
+ addi r4,r4,32
+ std r0,0(r3)
+ std r6,8(r3)
+ std r7,16(r3)
+ std r8,24(r3)
+ addi r3,r3,32
+
+ /* Up to 31B to go */
+8: bf cr7*4+3,9f
+ ld r0,0(r4)
+ ld r6,8(r4)
+ addi r4,r4,16
+ std r0,0(r3)
+ std r6,8(r3)
+ addi r3,r3,16
+
+9: clrldi r5,r5,(64-4)
+
+ /* Up to 15B to go */
+.Lshort_copy:
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r6,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: ld r3,48(r1)
+ blr
+
+.Lunwind_stack_nonvmx_copy:
+ addi r1,r1,STACKFRAMESIZE
+ b .Lnonvmx_copy
+
+#ifdef CONFIG_ALTIVEC
+.Lvmx_copy:
+ mflr r0
+ std r4,56(r1)
+ std r5,64(r1)
+ std r0,16(r1)
+ stdu r1,-STACKFRAMESIZE(r1)
+ bl .enter_vmx_copy
+ cmpwi r3,0
+ ld r0,STACKFRAMESIZE+16(r1)
+ ld r3,STACKFRAMESIZE+48(r1)
+ ld r4,STACKFRAMESIZE+56(r1)
+ ld r5,STACKFRAMESIZE+64(r1)
+ mtlr r0
+
+ /*
+ * We prefetch both the source and destination using enhanced touch
+ * instructions. We use a stream ID of 0 for the load side and
+ * 1 for the store side.
+ */
+ clrrdi r6,r4,7
+ clrrdi r9,r3,7
+ ori r9,r9,1 /* stream=1 */
+
+ srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */
+ cmpldi cr1,r7,0x3FF
+ ble cr1,1f
+ li r7,0x3FF
+1: lis r0,0x0E00 /* depth=7 */
+ sldi r7,r7,7
+ or r7,r7,r0
+ ori r10,r7,1 /* stream=1 */
+
+ lis r8,0x8000 /* GO=1 */
+ clrldi r8,r8,32
+
+.machine push
+.machine "power4"
+ dcbt r0,r6,0b01000
+ dcbt r0,r7,0b01010
+ dcbtst r0,r9,0b01000
+ dcbtst r0,r10,0b01010
+ eieio
+ dcbt r0,r8,0b01010 /* GO */
+.machine pop
+
+ beq .Lunwind_stack_nonvmx_copy
+
+ /*
+ * If source and destination are not relatively aligned we use a
+ * slower permute loop.
+ */
+ xor r6,r4,r3
+ rldicl. r6,r6,0,(64-4)
+ bne .Lvmx_unaligned_copy
+
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+ ld r0,0(r4)
+ addi r4,r4,8
+ std r0,0(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ bf cr7*4+3,5f
+ lvx vr1,r0,r4
+ addi r4,r4,16
+ stvx vr1,r0,r3
+ addi r3,r3,16
+
+5: bf cr7*4+2,6f
+ lvx vr1,r0,r4
+ lvx vr0,r4,r9
+ addi r4,r4,32
+ stvx vr1,r0,r3
+ stvx vr0,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+ lvx vr3,r0,r4
+ lvx vr2,r4,r9
+ lvx vr1,r4,r10
+ lvx vr0,r4,r11
+ addi r4,r4,64
+ stvx vr3,r0,r3
+ stvx vr2,r3,r9
+ stvx vr1,r3,r10
+ stvx vr0,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+ lvx vr7,r0,r4
+ lvx vr6,r4,r9
+ lvx vr5,r4,r10
+ lvx vr4,r4,r11
+ lvx vr3,r4,r12
+ lvx vr2,r4,r14
+ lvx vr1,r4,r15
+ lvx vr0,r4,r16
+ addi r4,r4,128
+ stvx vr7,r0,r3
+ stvx vr6,r3,r9
+ stvx vr5,r3,r10
+ stvx vr4,r3,r11
+ stvx vr3,r3,r12
+ stvx vr2,r3,r14
+ stvx vr1,r3,r15
+ stvx vr0,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+ lvx vr3,r0,r4
+ lvx vr2,r4,r9
+ lvx vr1,r4,r10
+ lvx vr0,r4,r11
+ addi r4,r4,64
+ stvx vr3,r0,r3
+ stvx vr2,r3,r9
+ stvx vr1,r3,r10
+ stvx vr0,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+ lvx vr1,r0,r4
+ lvx vr0,r4,r9
+ addi r4,r4,32
+ stvx vr1,r0,r3
+ stvx vr0,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+ lvx vr1,r0,r4
+ addi r4,r4,16
+ stvx vr1,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ ld r0,0(r4)
+ addi r4,r4,8
+ std r0,0(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ ld r3,48(r1)
+ b .exit_vmx_copy /* tail call optimise */
+
+.Lvmx_unaligned_copy:
+ /* Get the destination 16B aligned */
+ neg r6,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-4)
+
+ bf cr7*4+3,1f
+ lbz r0,0(r4)
+ addi r4,r4,1
+ stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+3: bf cr7*4+0,4f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r7,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r7,4(r3)
+ addi r3,r3,8
+
+4: sub r5,r5,r6
+
+ /* Get the desination 128B aligned */
+ neg r6,r3
+ srdi r7,r6,4
+ mtocrf 0x01,r7
+ clrldi r6,r6,(64-7)
+
+ li r9,16
+ li r10,32
+ li r11,48
+
+ lvsl vr16,0,r4 /* Setup permute control vector */
+ lvx vr0,0,r4
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+ stvx vr8,r0,r3
+ addi r3,r3,16
+ vor vr0,vr1,vr1
+
+5: bf cr7*4+2,6f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ addi r3,r3,32
+
+6: bf cr7*4+1,7f
+ lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+ lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+ lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+ lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ addi r3,r3,64
+
+7: sub r5,r5,r6
+ srdi r6,r5,7
+
+ std r14,STK_REG(R14)(r1)
+ std r15,STK_REG(R15)(r1)
+ std r16,STK_REG(R16)(r1)
+
+ li r12,64
+ li r14,80
+ li r15,96
+ li r16,112
+
+ mtctr r6
+
+ /*
+ * Now do cacheline sized loads and stores. By this stage the
+ * cacheline stores are also cacheline aligned.
+ */
+ .align 5
+8:
+ lvx vr7,r0,r4
+ vperm vr8,vr0,vr7,vr16
+ lvx vr6,r4,r9
+ vperm vr9,vr7,vr6,vr16
+ lvx vr5,r4,r10
+ vperm vr10,vr6,vr5,vr16
+ lvx vr4,r4,r11
+ vperm vr11,vr5,vr4,vr16
+ lvx vr3,r4,r12
+ vperm vr12,vr4,vr3,vr16
+ lvx vr2,r4,r14
+ vperm vr13,vr3,vr2,vr16
+ lvx vr1,r4,r15
+ vperm vr14,vr2,vr1,vr16
+ lvx vr0,r4,r16
+ vperm vr15,vr1,vr0,vr16
+ addi r4,r4,128
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ stvx vr12,r3,r12
+ stvx vr13,r3,r14
+ stvx vr14,r3,r15
+ stvx vr15,r3,r16
+ addi r3,r3,128
+ bdnz 8b
+
+ ld r14,STK_REG(R14)(r1)
+ ld r15,STK_REG(R15)(r1)
+ ld r16,STK_REG(R16)(r1)
+
+ /* Up to 127B to go */
+ clrldi r5,r5,(64-7)
+ srdi r6,r5,4
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+ lvx vr3,r0,r4
+ vperm vr8,vr0,vr3,vr16
+ lvx vr2,r4,r9
+ vperm vr9,vr3,vr2,vr16
+ lvx vr1,r4,r10
+ vperm vr10,vr2,vr1,vr16
+ lvx vr0,r4,r11
+ vperm vr11,vr1,vr0,vr16
+ addi r4,r4,64
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ stvx vr10,r3,r10
+ stvx vr11,r3,r11
+ addi r3,r3,64
+
+9: bf cr7*4+2,10f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ lvx vr0,r4,r9
+ vperm vr9,vr1,vr0,vr16
+ addi r4,r4,32
+ stvx vr8,r0,r3
+ stvx vr9,r3,r9
+ addi r3,r3,32
+
+10: bf cr7*4+3,11f
+ lvx vr1,r0,r4
+ vperm vr8,vr0,vr1,vr16
+ addi r4,r4,16
+ stvx vr8,r0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+11: clrldi r5,r5,(64-4)
+ addi r4,r4,-16 /* Unwind the +16 load offset */
+ mtocrf 0x01,r5
+ bf cr7*4+0,12f
+ lwz r0,0(r4) /* Less chance of a reject with word ops */
+ lwz r6,4(r4)
+ addi r4,r4,8
+ stw r0,0(r3)
+ stw r6,4(r3)
+ addi r3,r3,8
+
+12: bf cr7*4+1,13f
+ lwz r0,0(r4)
+ addi r4,r4,4
+ stw r0,0(r3)
+ addi r3,r3,4
+
+13: bf cr7*4+2,14f
+ lhz r0,0(r4)
+ addi r4,r4,2
+ sth r0,0(r3)
+ addi r3,r3,2
+
+14: bf cr7*4+3,15f
+ lbz r0,0(r4)
+ stb r0,0(r3)
+
+15: addi r1,r1,STACKFRAMESIZE
+ ld r3,48(r1)
+ b .exit_vmx_copy /* tail call optimise */
+#endif /* CONFiG_ALTIVEC */
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 093d6316435c..1b5a0a09d609 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -119,6 +119,7 @@ _GLOBAL(memchr)
2: li r3,0
blr
+#ifdef CONFIG_PPC32
_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
@@ -160,3 +161,4 @@ _GLOBAL(__clear_user)
PPC_LONG 1b,91b
PPC_LONG 8b,92b
.text
+#endif
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
new file mode 100644
index 000000000000..3b1e48049faf
--- /dev/null
+++ b/arch/powerpc/lib/string_64.S
@@ -0,0 +1,202 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2012
+ *
+ * Author: Anton Blanchard <anton@au.ibm.com>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+ .section ".toc","aw"
+PPC64_CACHES:
+ .tc ppc64_caches[TC],ppc64_caches
+ .section ".text"
+
+/**
+ * __clear_user: - Zero a block of memory in user space, with less checking.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
+ *
+ * Zero a block of memory in user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ *
+ * Returns number of bytes that could not be cleared.
+ * On success, this will be zero.
+ */
+
+ .macro err1
+100:
+ .section __ex_table,"a"
+ .align 3
+ .llong 100b,.Ldo_err1
+ .previous
+ .endm
+
+ .macro err2
+200:
+ .section __ex_table,"a"
+ .align 3
+ .llong 200b,.Ldo_err2
+ .previous
+ .endm
+
+ .macro err3
+300:
+ .section __ex_table,"a"
+ .align 3
+ .llong 300b,.Ldo_err3
+ .previous
+ .endm
+
+.Ldo_err1:
+ mr r3,r8
+
+.Ldo_err2:
+ mtctr r4
+1:
+err3; stb r0,0(r3)
+ addi r3,r3,1
+ addi r4,r4,-1
+ bdnz 1b
+
+.Ldo_err3:
+ mr r3,r4
+ blr
+
+_GLOBAL(__clear_user)
+ cmpdi r4,32
+ neg r6,r3
+ li r0,0
+ blt .Lshort_clear
+ mr r8,r3
+ mtocrf 0x01,r6
+ clrldi r6,r6,(64-3)
+
+ /* Get the destination 8 byte aligned */
+ bf cr7*4+3,1f
+err1; stb r0,0(r3)
+ addi r3,r3,1
+
+1: bf cr7*4+2,2f
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+2: bf cr7*4+1,3f
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+3: sub r4,r4,r6
+
+ cmpdi r4,32
+ cmpdi cr1,r4,512
+ blt .Lshort_clear
+ bgt cr1,.Llong_clear
+
+.Lmedium_clear:
+ srdi r6,r4,5
+ mtctr r6
+
+ /* Do 32 byte chunks */
+4:
+err2; std r0,0(r3)
+err2; std r0,8(r3)
+err2; std r0,16(r3)
+err2; std r0,24(r3)
+ addi r3,r3,32
+ addi r4,r4,-32
+ bdnz 4b
+
+.Lshort_clear:
+ /* up to 31 bytes to go */
+ cmpdi r4,16
+ blt 6f
+err2; std r0,0(r3)
+err2; std r0,8(r3)
+ addi r3,r3,16
+ addi r4,r4,-16
+
+ /* Up to 15 bytes to go */
+6: mr r8,r3
+ clrldi r4,r4,(64-4)
+ mtocrf 0x01,r4
+ bf cr7*4+0,7f
+err1; std r0,0(r3)
+ addi r3,r3,8
+
+7: bf cr7*4+1,8f
+err1; stw r0,0(r3)
+ addi r3,r3,4
+
+8: bf cr7*4+2,9f
+err1; sth r0,0(r3)
+ addi r3,r3,2
+
+9: bf cr7*4+3,10f
+err1; stb r0,0(r3)
+
+10: li r3,0
+ blr
+
+.Llong_clear:
+ ld r5,PPC64_CACHES@toc(r2)
+
+ bf cr7*4+0,11f
+err2; std r0,0(r3)
+ addi r3,r3,8
+ addi r4,r4,-8
+
+ /* Destination is 16 byte aligned, need to get it cacheline aligned */
+11: lwz r7,DCACHEL1LOGLINESIZE(r5)
+ lwz r9,DCACHEL1LINESIZE(r5)
+
+ /*
+ * With worst case alignment the long clear loop takes a minimum
+ * of 1 byte less than 2 cachelines.
+ */
+ sldi r10,r9,2
+ cmpd r4,r10
+ blt .Lmedium_clear
+
+ neg r6,r3
+ addi r10,r9,-1
+ and. r5,r6,r10
+ beq 13f
+
+ srdi r6,r5,4
+ mtctr r6
+ mr r8,r3
+12:
+err1; std r0,0(r3)
+err1; std r0,8(r3)
+ addi r3,r3,16
+ bdnz 12b
+
+ sub r4,r4,r5
+
+13: srd r6,r4,r7
+ mtctr r6
+ mr r8,r3
+14:
+err1; dcbz r0,r3
+ add r3,r3,r9
+ bdnz 14b
+
+ and r4,r4,r10
+
+ cmpdi r4,32
+ blt .Lshort_clear
+ b .Lmedium_clear
diff --git a/arch/powerpc/lib/copyuser_power7_vmx.c b/arch/powerpc/lib/vmx-helper.c
index bf2654f2b68e..3cf529ceec5b 100644
--- a/arch/powerpc/lib/copyuser_power7_vmx.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -22,7 +22,7 @@
#include <linux/hardirq.h>
#include <asm/switch_to.h>
-int enter_vmx_copy(void)
+int enter_vmx_usercopy(void)
{
if (in_interrupt())
return 0;
@@ -44,8 +44,31 @@ int enter_vmx_copy(void)
* This function must return 0 because we tail call optimise when calling
* from __copy_tofrom_user_power7 which returns 0 on success.
*/
-int exit_vmx_copy(void)
+int exit_vmx_usercopy(void)
{
pagefault_enable();
return 0;
}
+
+int enter_vmx_copy(void)
+{
+ if (in_interrupt())
+ return 0;
+
+ preempt_disable();
+
+ enable_kernel_altivec();
+
+ return 1;
+}
+
+/*
+ * All calls to this function will be optimised into tail calls. We are
+ * passed a pointer to the destination which we return as required by a
+ * memcpy implementation.
+ */
+void *exit_vmx_copy(void *dest)
+{
+ preempt_enable();
+ return dest;
+}
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index b13d58932bf6..115347f74ce5 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -184,7 +184,7 @@ _GLOBAL(add_hash_page)
add r3,r3,r0 /* note create_hpte trims to 24 bits */
#ifdef CONFIG_SMP
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT) /* use cpu number to make tag */
+ CURRENT_THREAD_INFO(r8, r1) /* use cpu number to make tag */
lwz r8,TI_CPU(r8) /* to go in mmu_hash_lock */
oris r8,r8,12
#endif /* CONFIG_SMP */
@@ -545,7 +545,7 @@ _GLOBAL(flush_hash_pages)
#ifdef CONFIG_SMP
addis r9,r7,mmu_hash_lock@ha
addi r9,r9,mmu_hash_lock@l
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r8, r1)
add r8,r8,r7
lwz r8,TI_CPU(r8)
oris r8,r8,9
@@ -639,7 +639,7 @@ _GLOBAL(flush_hash_patch_B)
*/
_GLOBAL(_tlbie)
#ifdef CONFIG_SMP
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r8, r1)
lwz r8,TI_CPU(r8)
oris r8,r8,11
mfmsr r10
@@ -677,7 +677,7 @@ _GLOBAL(_tlbie)
*/
_GLOBAL(_tlbia)
#if defined(CONFIG_SMP)
- rlwinm r8,r1,0,0,(31-THREAD_SHIFT)
+ CURRENT_THREAD_INFO(r8, r1)
lwz r8,TI_CPU(r8)
oris r8,r8,10
mfmsr r10
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index a242b5d7cbe4..602aeb06d298 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -34,14 +34,6 @@
* | CR save area (SP + 8)
* SP ---> +-- Back chain (SP + 0)
*/
-#define STACKFRAMESIZE 256
-
-/* Save parameters offsets */
-#define STK_PARM(i) (STACKFRAMESIZE + 48 + ((i)-3)*8)
-
-/* Save non-volatile offsets */
-#define STK_REG(i) (112 + ((i)-14)*8)
-
#ifndef CONFIG_PPC_64K_PAGES
@@ -64,9 +56,9 @@ _GLOBAL(__hash_page_4K)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
/* Save all params that we need after a function call */
- std r6,STK_PARM(r6)(r1)
- std r8,STK_PARM(r8)(r1)
- std r9,STK_PARM(r9)(r1)
+ std r6,STK_PARAM(R6)(r1)
+ std r8,STK_PARAM(R8)(r1)
+ std r9,STK_PARAM(R9)(r1)
/* Save non-volatile registers.
* r31 will hold "old PTE"
@@ -75,11 +67,11 @@ _GLOBAL(__hash_page_4K)
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
- std r27,STK_REG(r27)(r1)
- std r28,STK_REG(r28)(r1)
- std r29,STK_REG(r29)(r1)
- std r30,STK_REG(r30)(r1)
- std r31,STK_REG(r31)(r1)
+ std r27,STK_REG(R27)(r1)
+ std r28,STK_REG(R28)(r1)
+ std r29,STK_REG(R29)(r1)
+ std r30,STK_REG(R30)(r1)
+ std r31,STK_REG(R31)(r1)
/* Step 1:
*
@@ -162,7 +154,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in
* place of "access" in the param area (sic)
*/
- std r3,STK_PARM(r4)(r1)
+ std r3,STK_PARAM(R4)(r1)
/* Get htab_hash_mask */
ld r4,htab_hash_mask@got(2)
@@ -192,11 +184,11 @@ htab_insert_pte:
rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -215,11 +207,11 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* Patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -255,15 +247,15 @@ htab_pte_insert_ok:
* (maybe add eieio may be good still ?)
*/
htab_write_out_pte:
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r30,0(r6)
li r3, 0
htab_bail:
- ld r27,STK_REG(r27)(r1)
- ld r28,STK_REG(r28)(r1)
- ld r29,STK_REG(r29)(r1)
- ld r30,STK_REG(r30)(r1)
- ld r31,STK_REG(r31)(r1)
+ ld r27,STK_REG(R27)(r1)
+ ld r28,STK_REG(R28)(r1)
+ ld r29,STK_REG(R29)(r1)
+ ld r30,STK_REG(R30)(r1)
+ ld r31,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
ld r0,16(r1)
mtlr r0
@@ -288,8 +280,8 @@ htab_modify_pte:
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
li r6,MMU_PAGE_4K /* page size */
- ld r7,STK_PARM(r9)(r1) /* segment size */
- ld r8,STK_PARM(r8)(r1) /* get "local" param */
+ ld r7,STK_PARAM(R9)(r1) /* segment size */
+ ld r8,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(htab_call_hpte_updatepp)
bl . /* Patched by htab_finish_init() */
@@ -312,7 +304,7 @@ htab_wrong_access:
htab_pte_insert_failure:
/* Bail out restoring old PTE */
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r31,0(r6)
li r3,-1
b htab_bail
@@ -340,9 +332,9 @@ _GLOBAL(__hash_page_4K)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
/* Save all params that we need after a function call */
- std r6,STK_PARM(r6)(r1)
- std r8,STK_PARM(r8)(r1)
- std r9,STK_PARM(r9)(r1)
+ std r6,STK_PARAM(R6)(r1)
+ std r8,STK_PARAM(R8)(r1)
+ std r9,STK_PARAM(R9)(r1)
/* Save non-volatile registers.
* r31 will hold "old PTE"
@@ -353,13 +345,13 @@ _GLOBAL(__hash_page_4K)
* r26 is the hidx mask
* r25 is the index in combo page
*/
- std r25,STK_REG(r25)(r1)
- std r26,STK_REG(r26)(r1)
- std r27,STK_REG(r27)(r1)
- std r28,STK_REG(r28)(r1)
- std r29,STK_REG(r29)(r1)
- std r30,STK_REG(r30)(r1)
- std r31,STK_REG(r31)(r1)
+ std r25,STK_REG(R25)(r1)
+ std r26,STK_REG(R26)(r1)
+ std r27,STK_REG(R27)(r1)
+ std r28,STK_REG(R28)(r1)
+ std r29,STK_REG(R29)(r1)
+ std r30,STK_REG(R30)(r1)
+ std r31,STK_REG(R31)(r1)
/* Step 1:
*
@@ -452,7 +444,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in
* place of "access" in the param area (sic)
*/
- std r3,STK_PARM(r4)(r1)
+ std r3,STK_PARAM(R4)(r1)
/* Get htab_hash_mask */
ld r4,htab_hash_mask@got(2)
@@ -473,7 +465,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
andis. r0,r31,_PAGE_COMBO@h
beq htab_inval_old_hpte
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
ori r26,r6,0x8000 /* Load the hidx mask */
ld r26,0(r26)
addi r5,r25,36 /* Check actual HPTE_SUB bit, this */
@@ -495,11 +487,11 @@ htab_special_pfn:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -522,11 +514,11 @@ _GLOBAL(htab_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(htab_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -559,8 +551,8 @@ htab_inval_old_hpte:
mr r4,r31 /* PTE.pte */
li r5,0 /* PTE.hidx */
li r6,MMU_PAGE_64K /* psize */
- ld r7,STK_PARM(r9)(r1) /* ssize */
- ld r8,STK_PARM(r8)(r1) /* local */
+ ld r7,STK_PARAM(R9)(r1) /* ssize */
+ ld r8,STK_PARAM(R8)(r1) /* local */
bl .flush_hash_page
/* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
lis r0,_PAGE_HPTE_SUB@h
@@ -576,7 +568,7 @@ htab_pte_insert_ok:
/* Insert slot number & secondary bit in PTE second half,
* clear _PAGE_BUSY and set approriate HPTE slot bit
*/
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
li r0,_PAGE_BUSY
andc r30,r30,r0
/* HPTE SUB bit */
@@ -597,13 +589,13 @@ htab_pte_insert_ok:
std r30,0(r6)
li r3, 0
htab_bail:
- ld r25,STK_REG(r25)(r1)
- ld r26,STK_REG(r26)(r1)
- ld r27,STK_REG(r27)(r1)
- ld r28,STK_REG(r28)(r1)
- ld r29,STK_REG(r29)(r1)
- ld r30,STK_REG(r30)(r1)
- ld r31,STK_REG(r31)(r1)
+ ld r25,STK_REG(R25)(r1)
+ ld r26,STK_REG(R26)(r1)
+ ld r27,STK_REG(R27)(r1)
+ ld r28,STK_REG(R28)(r1)
+ ld r29,STK_REG(R29)(r1)
+ ld r30,STK_REG(R30)(r1)
+ ld r31,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
ld r0,16(r1)
mtlr r0
@@ -630,8 +622,8 @@ htab_modify_pte:
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
li r6,MMU_PAGE_4K /* page size */
- ld r7,STK_PARM(r9)(r1) /* segment size */
- ld r8,STK_PARM(r8)(r1) /* get "local" param */
+ ld r7,STK_PARAM(R9)(r1) /* segment size */
+ ld r8,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(htab_call_hpte_updatepp)
bl . /* patched by htab_finish_init() */
@@ -644,7 +636,7 @@ _GLOBAL(htab_call_hpte_updatepp)
/* Clear the BUSY bit and Write out the PTE */
li r0,_PAGE_BUSY
andc r30,r30,r0
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r30,0(r6)
li r3,0
b htab_bail
@@ -657,7 +649,7 @@ htab_wrong_access:
htab_pte_insert_failure:
/* Bail out restoring old PTE */
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r31,0(r6)
li r3,-1
b htab_bail
@@ -677,9 +669,9 @@ _GLOBAL(__hash_page_64K)
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
/* Save all params that we need after a function call */
- std r6,STK_PARM(r6)(r1)
- std r8,STK_PARM(r8)(r1)
- std r9,STK_PARM(r9)(r1)
+ std r6,STK_PARAM(R6)(r1)
+ std r8,STK_PARAM(R8)(r1)
+ std r9,STK_PARAM(R9)(r1)
/* Save non-volatile registers.
* r31 will hold "old PTE"
@@ -688,11 +680,11 @@ _GLOBAL(__hash_page_64K)
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
- std r27,STK_REG(r27)(r1)
- std r28,STK_REG(r28)(r1)
- std r29,STK_REG(r29)(r1)
- std r30,STK_REG(r30)(r1)
- std r31,STK_REG(r31)(r1)
+ std r27,STK_REG(R27)(r1)
+ std r28,STK_REG(R28)(r1)
+ std r29,STK_REG(R29)(r1)
+ std r30,STK_REG(R30)(r1)
+ std r31,STK_REG(R31)(r1)
/* Step 1:
*
@@ -780,7 +772,7 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
/* At this point, r3 contains new PP bits, save them in
* place of "access" in the param area (sic)
*/
- std r3,STK_PARM(r4)(r1)
+ std r3,STK_PARAM(R4)(r1)
/* Get htab_hash_mask */
ld r4,htab_hash_mask@got(2)
@@ -813,11 +805,11 @@ ht64_insert_pte:
rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert1)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -836,11 +828,11 @@ _GLOBAL(ht64_call_hpte_insert1)
rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */
/* Call ppc_md.hpte_insert */
- ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */
+ ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
mr r4,r29 /* Retrieve va */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
- ld r9,STK_PARM(r9)(r1) /* segment size */
+ ld r9,STK_PARAM(R9)(r1) /* segment size */
_GLOBAL(ht64_call_hpte_insert2)
bl . /* patched by htab_finish_init() */
cmpdi 0,r3,0
@@ -876,15 +868,15 @@ ht64_pte_insert_ok:
* (maybe add eieio may be good still ?)
*/
ht64_write_out_pte:
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r30,0(r6)
li r3, 0
ht64_bail:
- ld r27,STK_REG(r27)(r1)
- ld r28,STK_REG(r28)(r1)
- ld r29,STK_REG(r29)(r1)
- ld r30,STK_REG(r30)(r1)
- ld r31,STK_REG(r31)(r1)
+ ld r27,STK_REG(R27)(r1)
+ ld r28,STK_REG(R28)(r1)
+ ld r29,STK_REG(R29)(r1)
+ ld r30,STK_REG(R30)(r1)
+ ld r31,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
ld r0,16(r1)
mtlr r0
@@ -909,8 +901,8 @@ ht64_modify_pte:
/* Call ppc_md.hpte_updatepp */
mr r5,r29 /* va */
li r6,MMU_PAGE_64K
- ld r7,STK_PARM(r9)(r1) /* segment size */
- ld r8,STK_PARM(r8)(r1) /* get "local" param */
+ ld r7,STK_PARAM(R9)(r1) /* segment size */
+ ld r8,STK_PARAM(R8)(r1) /* get "local" param */
_GLOBAL(ht64_call_hpte_updatepp)
bl . /* patched by htab_finish_init() */
@@ -933,7 +925,7 @@ ht64_wrong_access:
ht64_pte_insert_failure:
/* Bail out restoring old PTE */
- ld r6,STK_PARM(r6)(r1)
+ ld r6,STK_PARAM(R6)(r1)
std r31,0(r6)
li r3,-1
b ht64_bail
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index b6edbb3b4a54..39b159751c35 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -340,6 +340,8 @@ static int __init find_min_common_depth(void)
dbg("Using form 1 affinity\n");
form1_affinity = 1;
}
+
+ of_node_put(chosen);
}
}
@@ -635,11 +637,11 @@ static inline int __init read_usm_ranges(const u32 **usm)
*/
static void __init parse_drconf_memory(struct device_node *memory)
{
- const u32 *dm, *usm;
+ const u32 *uninitialized_var(dm), *usm;
unsigned int n, rc, ranges, is_kexec_kdump = 0;
unsigned long lmb_size, base, size, sz;
int nid;
- struct assoc_arrays aa;
+ struct assoc_arrays aa = { .arrays = NULL };
n = of_get_drconf_memory(memory, &dm);
if (!n)
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index ff672bd8fea9..f09d48e3268d 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -126,7 +126,7 @@ BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
- PPC_TLBSRX_DOT(0,r16)
+ PPC_TLBSRX_DOT(0,R16)
ldx r14,r14,r15 /* grab pgd entry */
beq normal_tlb_miss_done /* tlb exists already, bail */
MMU_FTR_SECTION_ELSE
@@ -395,7 +395,7 @@ BEGIN_MMU_FTR_SECTION
/* Set the TLB reservation and search for existing entry. Then load
* the entry.
*/
- PPC_TLBSRX_DOT(0,r16)
+ PPC_TLBSRX_DOT(0,R16)
ld r14,0(r10)
beq normal_tlb_miss_done
MMU_FTR_SECTION_ELSE
@@ -528,7 +528,7 @@ BEGIN_MMU_FTR_SECTION
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*/
- PPC_TLBSRX_DOT(0,r16)
+ PPC_TLBSRX_DOT(0,R16)
beq virt_page_table_tlb_miss_done
END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
@@ -779,7 +779,7 @@ htw_tlb_miss:
*
* MAS1:IND should be already set based on MAS4
*/
- PPC_TLBSRX_DOT(0,r16)
+ PPC_TLBSRX_DOT(0,R16)
beq htw_tlb_miss_done
/* Now, we need to walk the page tables. First check if we are in
@@ -919,7 +919,7 @@ tlb_load_linear:
mtspr SPRN_MAS1,r15
/* Already somebody there ? */
- PPC_TLBSRX_DOT(0,r16)
+ PPC_TLBSRX_DOT(0,R16)
beq tlb_load_linear_done
/* Now we build the remaining MAS. MAS0 and 2 should be fine
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 7c63c0ed4f1b..fab919fd1384 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -266,7 +266,7 @@ BEGIN_MMU_FTR_SECTION
andi. r3,r3,MMUCSR0_TLBFI@l
bne 1b
MMU_FTR_SECTION_ELSE
- PPC_TLBILX_ALL(0,0)
+ PPC_TLBILX_ALL(0,R0)
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
msync
isync
@@ -279,7 +279,7 @@ BEGIN_MMU_FTR_SECTION
wrteei 0
mfspr r4,SPRN_MAS6 /* save MAS6 */
mtspr SPRN_MAS6,r3
- PPC_TLBILX_PID(0,0)
+ PPC_TLBILX_PID(0,R0)
mtspr SPRN_MAS6,r4 /* restore MAS6 */
wrtee r10
MMU_FTR_SECTION_ELSE
@@ -313,7 +313,7 @@ BEGIN_MMU_FTR_SECTION
mtspr SPRN_MAS1,r4
tlbwe
MMU_FTR_SECTION_ELSE
- PPC_TLBILX_VA(0,r3)
+ PPC_TLBILX_VA(0,R3)
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
msync
isync
@@ -331,7 +331,7 @@ _GLOBAL(_tlbil_pid)
mfmsr r10
wrteei 0
mtspr SPRN_MAS6,r4
- PPC_TLBILX_PID(0,0)
+ PPC_TLBILX_PID(0,R0)
wrtee r10
msync
isync
@@ -343,14 +343,14 @@ _GLOBAL(_tlbil_pid_noind)
ori r4,r4,MAS6_SIND
wrteei 0
mtspr SPRN_MAS6,r4
- PPC_TLBILX_PID(0,0)
+ PPC_TLBILX_PID(0,R0)
wrtee r10
msync
isync
blr
_GLOBAL(_tlbil_all)
- PPC_TLBILX_ALL(0,0)
+ PPC_TLBILX_ALL(0,R0)
msync
isync
blr
@@ -364,7 +364,7 @@ _GLOBAL(_tlbil_va)
beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
- PPC_TLBILX_VA(0,r3)
+ PPC_TLBILX_VA(0,R3)
msync
isync
wrtee r10
@@ -379,7 +379,7 @@ _GLOBAL(_tlbivax_bcast)
beq 1f
rlwimi r4,r6,MAS6_SIND_SHIFT,MAS6_SIND
1: mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
- PPC_TLBIVAX(0,r3)
+ PPC_TLBIVAX(0,R3)
eieio
tlbsync
sync
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 5c3cf2d04e41..1fc8109bf2f9 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -75,23 +75,23 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define PPC_NOP() EMIT(PPC_INST_NOP)
#define PPC_BLR() EMIT(PPC_INST_BLR)
#define PPC_BLRL() EMIT(PPC_INST_BLRL)
-#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | __PPC_RT(r))
-#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | __PPC_RT(d) | \
- __PPC_RA(a) | IMM_L(i))
+#define PPC_MTLR(r) EMIT(PPC_INST_MTLR | ___PPC_RT(r))
+#define PPC_ADDI(d, a, i) EMIT(PPC_INST_ADDI | ___PPC_RT(d) | \
+ ___PPC_RA(a) | IMM_L(i))
#define PPC_MR(d, a) PPC_OR(d, a, a)
#define PPC_LI(r, i) PPC_ADDI(r, 0, i)
#define PPC_ADDIS(d, a, i) EMIT(PPC_INST_ADDIS | \
- __PPC_RS(d) | __PPC_RA(a) | IMM_L(i))
+ ___PPC_RS(d) | ___PPC_RA(a) | IMM_L(i))
#define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
-#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | __PPC_RS(r) | \
- __PPC_RA(base) | ((i) & 0xfffc))
-
-#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | __PPC_RT(r) | \
- __PPC_RA(base) | IMM_L(i))
-#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | __PPC_RT(r) | \
- __PPC_RA(base) | IMM_L(i))
-#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | __PPC_RT(r) | \
- __PPC_RA(base) | IMM_L(i))
+#define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
+ ___PPC_RA(base) | ((i) & 0xfffc))
+
+#define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
+#define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
+ ___PPC_RA(base) | IMM_L(i))
/* Convenience helpers for the above with 'far' offsets: */
#define PPC_LD_OFFS(r, base, i) do { if ((i) < 32768) PPC_LD(r, base, i); \
else { PPC_ADDIS(r, base, IMM_HA(i)); \
@@ -105,52 +105,52 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
else { PPC_ADDIS(r, base, IMM_HA(i)); \
PPC_LHZ(r, r, IMM_L(i)); } } while(0)
-#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | __PPC_RA(a) | IMM_L(i))
-#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | __PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | __PPC_RA(a) | IMM_L(i))
-#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | __PPC_RA(a) | __PPC_RB(b))
-
-#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | __PPC_RT(d) | \
- __PPC_RB(a) | __PPC_RA(b))
-#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | __PPC_RT(d) | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | __PPC_RT(d) | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | __PPC_RT(d) | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | __PPC_RT(d) | \
- __PPC_RA(a) | IMM_L(i))
-#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | __PPC_RT(d) | \
- __PPC_RA(a) | __PPC_RB(b))
-#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_RB(b))
-#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | __PPC_RA(d) | \
- __PPC_RS(a) | IMM_L(i))
-#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_RB(b))
-#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_RB(b))
-#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | __PPC_RA(d) | \
- __PPC_RS(a) | IMM_L(i))
-#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | __PPC_RA(d) | \
- __PPC_RS(a) | IMM_L(i))
-#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_RB(s))
-#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_RB(s))
+#define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLWI(a, i) EMIT(PPC_INST_CMPLWI | ___PPC_RA(a) | IMM_L(i))
+#define PPC_CMPLW(a, b) EMIT(PPC_INST_CMPLW | ___PPC_RA(a) | ___PPC_RB(b))
+
+#define PPC_SUB(d, a, b) EMIT(PPC_INST_SUB | ___PPC_RT(d) | \
+ ___PPC_RB(a) | ___PPC_RA(b))
+#define PPC_ADD(d, a, b) EMIT(PPC_INST_ADD | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_MUL(d, a, b) EMIT(PPC_INST_MULLW | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_MULHWU(d, a, b) EMIT(PPC_INST_MULHWU | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_MULI(d, a, i) EMIT(PPC_INST_MULLI | ___PPC_RT(d) | \
+ ___PPC_RA(a) | IMM_L(i))
+#define PPC_DIVWU(d, a, b) EMIT(PPC_INST_DIVWU | ___PPC_RT(d) | \
+ ___PPC_RA(a) | ___PPC_RB(b))
+#define PPC_AND(d, a, b) EMIT(PPC_INST_AND | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_ANDI(d, a, i) EMIT(PPC_INST_ANDI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | IMM_L(i))
+#define PPC_AND_DOT(d, a, b) EMIT(PPC_INST_ANDDOT | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_OR(d, a, b) EMIT(PPC_INST_OR | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(b))
+#define PPC_ORI(d, a, i) EMIT(PPC_INST_ORI | ___PPC_RA(d) | \
+ ___PPC_RS(a) | IMM_L(i))
+#define PPC_ORIS(d, a, i) EMIT(PPC_INST_ORIS | ___PPC_RA(d) | \
+ ___PPC_RS(a) | IMM_L(i))
+#define PPC_SLW(d, a, s) EMIT(PPC_INST_SLW | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
+#define PPC_SRW(d, a, s) EMIT(PPC_INST_SRW | ___PPC_RA(d) | \
+ ___PPC_RS(a) | ___PPC_RB(s))
/* slwi = rlwinm Rx, Ry, n, 0, 31-n */
-#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_SH(i) | \
+#define PPC_SLWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(0) | __PPC_ME(31-(i)))
/* srwi = rlwinm Rx, Ry, 32-n, n, 31 */
-#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_SH(32-(i)) | \
+#define PPC_SRWI(d, a, i) EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(32-(i)) | \
__PPC_MB(i) | __PPC_ME(31))
/* sldi = rldicr Rx, Ry, n, 63-n */
-#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | __PPC_RA(d) | \
- __PPC_RS(a) | __PPC_SH(i) | \
+#define PPC_SLDI(d, a, i) EMIT(PPC_INST_RLDICR | ___PPC_RA(d) | \
+ ___PPC_RS(a) | __PPC_SH(i) | \
__PPC_MB(63-(i)) | (((i) & 0x20) >> 4))
-#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | __PPC_RT(d) | __PPC_RA(a))
+#define PPC_NEG(d, a) EMIT(PPC_INST_NEG | ___PPC_RT(d) | ___PPC_RA(a))
/* Long jump; (unconditional 'branch') */
#define PPC_JMP(dest) EMIT(PPC_INST_BRANCH | \
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index 55ba3855a97f..7d3a3b5619a2 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -105,6 +105,7 @@ sk_load_byte_msh_positive_offset:
mr r4, r_addr; \
li r6, SIZE; \
bl skb_copy_bits; \
+ nop; \
/* R3 = 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
@@ -156,6 +157,7 @@ bpf_slow_path_byte_msh:
mr r4, r_addr; \
li r5, SIZE; \
bl bpf_internal_load_pointer_neg_helper; \
+ nop; \
/* R3 != 0 on success */ \
addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
ld r0, 16(r1); \
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 2dc8b1484845..dd1130642d07 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -39,7 +39,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
/* Make stackframe */
if (ctx->seen & SEEN_DATAREF) {
/* If we call any helpers (for loads), save LR */
- EMIT(PPC_INST_MFLR | __PPC_RT(0));
+ EMIT(PPC_INST_MFLR | __PPC_RT(R0));
PPC_STD(0, 1, 16);
/* Back up non-volatile regs. */
@@ -56,7 +56,7 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
PPC_STD(i, 1, -(8*(32-i)));
}
}
- EMIT(PPC_INST_STDU | __PPC_RS(1) | __PPC_RA(1) |
+ EMIT(PPC_INST_STDU | __PPC_RS(R1) | __PPC_RA(R1) |
(-BPF_PPC_STACKFRAME & 0xfffc));
}
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index e8a18d1cc7c9..74d1e780748b 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -57,7 +57,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, regs->nip);
+ perf_callchain_store(entry, perf_instruction_pointer(regs));
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
@@ -238,7 +238,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
- next_ip = regs->nip;
+ next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
@@ -444,7 +444,7 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
long level = 0;
unsigned int __user *fp, *uregs;
- next_ip = regs->nip;
+ next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 8f84bcba18da..77b49ddda9d3 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -73,7 +73,10 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
return 0;
}
-static inline void perf_read_regs(struct pt_regs *regs) { }
+static inline void perf_read_regs(struct pt_regs *regs)
+{
+ regs->result = 0;
+}
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
return 0;
@@ -116,6 +119,26 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
*addrp = mfspr(SPRN_SDAR);
}
+static bool mmcra_sihv(unsigned long mmcra)
+{
+ unsigned long sihv = MMCRA_SIHV;
+
+ if (ppmu->flags & PPMU_ALT_SIPR)
+ sihv = POWER6_MMCRA_SIHV;
+
+ return !!(mmcra & sihv);
+}
+
+static bool mmcra_sipr(unsigned long mmcra)
+{
+ unsigned long sipr = MMCRA_SIPR;
+
+ if (ppmu->flags & PPMU_ALT_SIPR)
+ sipr = POWER6_MMCRA_SIPR;
+
+ return !!(mmcra & sipr);
+}
+
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
{
if (regs->msr & MSR_PR)
@@ -128,19 +151,9 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
- unsigned long sihv = MMCRA_SIHV;
- unsigned long sipr = MMCRA_SIPR;
+ unsigned long use_siar = regs->result;
- /* Not a PMU interrupt: Make up flags from regs->msr */
- if (TRAP(regs) != 0xf00)
- return perf_flags_from_msr(regs);
-
- /*
- * If we don't support continuous sampling and this
- * is not a marked event, same deal
- */
- if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
- !(mmcra & MMCRA_SAMPLE_ENABLE))
+ if (!use_siar)
return perf_flags_from_msr(regs);
/*
@@ -156,15 +169,10 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
return PERF_RECORD_MISC_USER;
}
- if (ppmu->flags & PPMU_ALT_SIPR) {
- sihv = POWER6_MMCRA_SIHV;
- sipr = POWER6_MMCRA_SIPR;
- }
-
/* PR has priority over HV, so order below is important */
- if (mmcra & sipr)
+ if (mmcra_sipr(mmcra))
return PERF_RECORD_MISC_USER;
- if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
+ if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
return PERF_RECORD_MISC_KERNEL;
}
@@ -172,10 +180,45 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
+ * Overload regs->result to specify whether we should use the MSR (result
+ * is zero) or the SIAR (result is non zero).
*/
static inline void perf_read_regs(struct pt_regs *regs)
{
- regs->dsisr = mfspr(SPRN_MMCRA);
+ unsigned long mmcra = mfspr(SPRN_MMCRA);
+ int marked = mmcra & MMCRA_SAMPLE_ENABLE;
+ int use_siar;
+
+ /*
+ * If this isn't a PMU exception (eg a software event) the SIAR is
+ * not valid. Use pt_regs.
+ *
+ * If it is a marked event use the SIAR.
+ *
+ * If the PMU doesn't update the SIAR for non marked events use
+ * pt_regs.
+ *
+ * If the PMU has HV/PR flags then check to see if they
+ * place the exception in userspace. If so, use pt_regs. In
+ * continuous sampling mode the SIAR and the PMU exception are
+ * not synchronised, so they may be many instructions apart.
+ * This can result in confusing backtraces. We still want
+ * hypervisor samples as well as samples in the kernel with
+ * interrupts off hence the userspace check.
+ */
+ if (TRAP(regs) != 0xf00)
+ use_siar = 0;
+ else if (marked)
+ use_siar = 1;
+ else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
+ use_siar = 0;
+ else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
+ use_siar = 0;
+ else
+ use_siar = 1;
+
+ regs->dsisr = mmcra;
+ regs->result = use_siar;
}
/*
@@ -1329,18 +1372,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long mmcra = regs->dsisr;
+ unsigned long use_siar = regs->result;
- /* Not a PMU interrupt */
- if (TRAP(regs) != 0xf00)
- return regs->nip;
-
- /* Processor doesn't support sampling non marked events */
- if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
- !(mmcra & MMCRA_SAMPLE_ENABLE))
+ if (use_siar)
+ return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
+ else
return regs->nip;
-
- return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
}
static bool pmc_overflow(unsigned long val)
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 583e67fee37e..9f6c33d63a42 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -160,7 +160,7 @@ static void __init ppc47x_setup_arch(void)
/* No need to check the DMA config as we /know/ our windows are all of
* RAM. Lets hope that doesn't change */
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > 0xffffffff) {
+ if ((memblock_end_of_DRAM() - 1) > 0xffffffff) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 3661bcdc326a..cf964e19573a 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -128,6 +128,11 @@ static __initdata struct cpm_pin km82xx_pins[] = {
{3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */
{3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */
{3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */
+
+ /* SPI */
+ {3, 16, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_MISO PD16 */
+ {3, 17, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_MOSI PD17 */
+ {3, 18, CPM_PIN_INPUT | CPM_PIN_SECONDARY},/* SPI_CLK PD18 */
};
static void __init init_ioports(void)
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index a266ba876863..89923d723349 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -3,7 +3,7 @@
* Author: Heiko Schocher <hs@denx.de>
*
* Description:
- * Keymile KMETER1 board specific routines.
+ * Keymile 83xx platform specific routines.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -70,54 +70,88 @@ static void __init mpc83xx_km_setup_arch(void)
for_each_node_by_name(np, "spi")
par_io_of_config(np);
- for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;)
+ for_each_node_by_name(np, "ucc")
par_io_of_config(np);
}
np = of_find_compatible_node(NULL, "network", "ucc_geth");
if (np != NULL) {
- uint svid;
+ /*
+ * handle mpc8360E Erratum QE_ENET10:
+ * RGMII AC values do not meet the specification
+ */
+ uint svid = mfspr(SPRN_SVR);
+ struct device_node *np_par;
+ struct resource res;
+ void __iomem *base;
+ int ret;
+
+ np_par = of_find_node_by_name(NULL, "par_io");
+ if (np_par == NULL) {
+ printk(KERN_WARNING "%s couldn;t find par_io node\n",
+ __func__);
+ return;
+ }
+ /* Map Parallel I/O ports registers */
+ ret = of_address_to_resource(np_par, 0, &res);
+ if (ret) {
+ printk(KERN_WARNING "%s couldn;t map par_io registers\n",
+ __func__);
+ return;
+ }
+
+ base = ioremap(res.start, res.end - res.start + 1);
+
+ /*
+ * set output delay adjustments to default values according
+ * table 5 in Errata Rev. 5, 9/2011:
+ *
+ * write 0b01 to UCC1 bits 18:19
+ * write 0b01 to UCC2 option 1 bits 4:5
+ * write 0b01 to UCC2 option 2 bits 16:17
+ */
+ clrsetbits_be32((base + 0xa8), 0x0c00f000, 0x04005000);
+
+ /*
+ * set output delay adjustments to default values according
+ * table 3-13 in Reference Manual Rev.3 05/2010:
+ *
+ * write 0b01 to UCC2 option 2 bits 16:17
+ * write 0b0101 to UCC1 bits 20:23
+ * write 0b0101 to UCC2 option 1 bits 24:27
+ */
+ clrsetbits_be32((base + 0xac), 0x0000cff0, 0x00004550);
- /* handle mpc8360ea rev.2.1 erratum 2: RGMII Timing */
- svid = mfspr(SPRN_SVR);
if (SVR_REV(svid) == 0x0021) {
- struct device_node *np_par;
- struct resource res;
- void __iomem *base;
- int ret;
-
- np_par = of_find_node_by_name(NULL, "par_io");
- if (np_par == NULL) {
- printk(KERN_WARNING "%s couldn;t find par_io node\n",
- __func__);
- return;
- }
- /* Map Parallel I/O ports registers */
- ret = of_address_to_resource(np_par, 0, &res);
- if (ret) {
- printk(KERN_WARNING "%s couldn;t map par_io registers\n",
- __func__);
- return;
- }
- base = ioremap(res.start, resource_size(&res));
+ /*
+ * UCC2 option 1: write 0b1010 to bits 24:27
+ * at address IMMRBAR+0x14AC
+ */
+ clrsetbits_be32((base + 0xac), 0x000000f0, 0x000000a0);
+ } else if (SVR_REV(svid) == 0x0020) {
+ /*
+ * UCC1: write 0b11 to bits 18:19
+ * at address IMMRBAR+0x14A8
+ */
+ setbits32((base + 0xa8), 0x00003000);
/*
- * IMMR + 0x14A8[4:5] = 11 (clk delay for UCC 2)
- * IMMR + 0x14A8[18:19] = 11 (clk delay for UCC 1)
+ * UCC2 option 1: write 0b11 to bits 4:5
+ * at address IMMRBAR+0x14A8
*/
- setbits32((base + 0xa8), 0x0c003000);
+ setbits32((base + 0xa8), 0x0c000000);
/*
- * IMMR + 0x14AC[20:27] = 10101010
- * (data delay for both UCC's)
+ * UCC2 option 2: write 0b11 to bits 16:17
+ * at address IMMRBAR+0x14AC
*/
- clrsetbits_be32((base + 0xac), 0xff0, 0xaa0);
- iounmap(base);
- of_node_put(np_par);
+ setbits32((base + 0xac), 0x0000c000);
}
+ iounmap(base);
+ of_node_put(np_par);
of_node_put(np);
}
-#endif /* CONFIG_QUICC_ENGINE */
+#endif /* CONFIG_QUICC_ENGINE */
}
machine_device_initcall(mpc83xx_km, mpc83xx_declare_of_platform_devices);
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index f000d81c4e31..159c01e91463 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -23,6 +23,15 @@ config FSL_85XX_CACHE_SRAM
cache-sram-size and cache-sram-offset kernel boot
parameters should be passed when this option is enabled.
+config BSC9131_RDB
+ bool "Freescale BSC9131RDB"
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for the Freescale BSC9131RDB board.
+ The BSC9131 is a heterogeneous SoC containing an e500v2 powerpc and a
+ StarCore SC3850 DSP
+ Manufacturer : Freescale Semiconductor, Inc
+
config MPC8540_ADS
bool "Freescale MPC8540 ADS"
select DEFAULT_UIMAGE
@@ -175,12 +184,6 @@ config SBC8548
help
This option enables support for the Wind River SBC8548 board
-config SBC8560
- bool "Wind River SBC8560"
- select DEFAULT_UIMAGE
- help
- This option enables support for the Wind River SBC8560 board
-
config GE_IMP3A
bool "GE Intelligent Platforms IMP3A"
select DEFAULT_UIMAGE
@@ -222,18 +225,6 @@ config P3041_DS
help
This option enables support for the P3041 DS board
-config P3060_QDS
- bool "Freescale P3060 QDS"
- select DEFAULT_UIMAGE
- select PPC_E500MC
- select PHYS_64BIT
- select SWIOTLB
- select GPIO_MPC8XXX
- select HAS_RAPIDIO
- select PPC_EPAPR_HV_PIC
- help
- This option enables support for the P3060 QDS board
-
config P4080_DS
bool "Freescale P4080 DS"
select DEFAULT_UIMAGE
@@ -263,6 +254,22 @@ config P5020_DS
help
This option enables support for the P5020 DS board
+config PPC_QEMU_E500
+ bool "QEMU generic e500 platform"
+ depends on EXPERIMENTAL
+ select DEFAULT_UIMAGE
+ help
+ This option enables support for running as a QEMU guest using
+ QEMU's generic e500 machine. This is not required if you're
+ using a QEMU machine that targets a specific board, such as
+ mpc8544ds.
+
+ Unlike most e500 boards that target a specific CPU, this
+ platform works with any e500-family CPU that QEMU supports.
+ Thus, you'll need to make sure CONFIG_PPC_E500MC is set or
+ unset based on the emulated CPU (or actual host CPU in the case
+ of KVM).
+
endif # FSL_SOC_BOOKE
config TQM85xx
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 2125d4ca068a..3dfe81175036 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-y += common.o
+obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o
obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
@@ -17,14 +18,13 @@ obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
-obj-$(CONFIG_P3060_QDS) += p3060_qds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
obj-$(CONFIG_TQM85xx) += tqm85xx.o
-obj-$(CONFIG_SBC8560) += sbc8560.o
obj-$(CONFIG_SBC8548) += sbc8548.o
obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o
obj-$(CONFIG_KSI8560) += ksi8560.o
obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o
obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o
+obj-$(CONFIG_PPC_QEMU_E500) += qemu_e500.o
diff --git a/arch/powerpc/platforms/85xx/bsc913x_rdb.c b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
new file mode 100644
index 000000000000..9d57bedb940c
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/bsc913x_rdb.c
@@ -0,0 +1,67 @@
+/*
+ * BSC913xRDB Board Setup
+ *
+ * Author: Priyanka Jain <Priyanka.Jain@freescale.com>
+ *
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/of_platform.h>
+#include <linux/pci.h>
+#include <asm/mpic.h>
+#include <sysdev/fsl_soc.h>
+#include <asm/udbg.h>
+
+#include "mpc85xx.h"
+
+void __init bsc913x_rdb_pic_init(void)
+{
+ struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
+ MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ if (!mpic)
+ pr_err("bsc913x: Failed to allocate MPIC structure\n");
+ else
+ mpic_init(mpic);
+}
+
+/*
+ * Setup the architecture
+ */
+static void __init bsc913x_rdb_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("bsc913x_rdb_setup_arch()", 0);
+
+ pr_info("bsc913x board from Freescale Semiconductor\n");
+}
+
+machine_device_initcall(bsc9131_rdb, mpc85xx_common_publish_devices);
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+
+static int __init bsc9131_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,bsc9131rdb");
+}
+
+define_machine(bsc9131_rdb) {
+ .name = "BSC9131 RDB",
+ .probe = bsc9131_rdb_probe,
+ .setup_arch = bsc913x_rdb_setup_arch,
+ .init_IRQ = bsc913x_rdb_pic_init,
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index dd3617c531d7..925b02874233 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -77,7 +77,7 @@ void __init corenet_ds_setup_arch(void)
#endif
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 18014629416d..b6a728b0a8ca 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -125,7 +125,7 @@ static void __init ge_imp3a_setup_arch(void)
mpc85xx_smp_init();
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index 585bd22b1406..767c7cf18a9c 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -75,7 +75,7 @@ static void __init mpc8536_ds_setup_arch(void)
#endif
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index 1fd91e9e0ffb..6d3265fe7718 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -114,71 +114,53 @@ void __init mpc85xx_ds_pic_init(void)
}
#ifdef CONFIG_PCI
-static int primary_phb_addr;
extern int uli_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn);
+static struct device_node *pci_with_uli;
+
static int mpc85xx_exclude_device(struct pci_controller *hose,
u_char bus, u_char devfn)
{
- struct device_node* node;
- struct resource rsrc;
-
- node = hose->dn;
- of_address_to_resource(node, 0, &rsrc);
-
- if ((rsrc.start & 0xfffff) == primary_phb_addr) {
+ if (hose->dn == pci_with_uli)
return uli_exclude_device(hose, bus, devfn);
- }
return PCIBIOS_SUCCESSFUL;
}
#endif /* CONFIG_PCI */
-/*
- * Setup the architecture
- */
-static void __init mpc85xx_ds_setup_arch(void)
+static void __init mpc85xx_ds_pci_init(void)
{
#ifdef CONFIG_PCI
- struct device_node *np;
- struct pci_controller *hose;
-#endif
- dma_addr_t max = 0xffffffff;
+ struct device_node *node;
- if (ppc_md.progress)
- ppc_md.progress("mpc85xx_ds_setup_arch()", 0);
+ fsl_pci_init();
-#ifdef CONFIG_PCI
- for_each_node_by_type(np, "pci") {
- if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
- of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
- of_device_is_compatible(np, "fsl,p2020-pcie")) {
- struct resource rsrc;
- of_address_to_resource(np, 0, &rsrc);
- if ((rsrc.start & 0xfffff) == primary_phb_addr)
- fsl_add_bridge(np, 1);
- else
- fsl_add_bridge(np, 0);
-
- hose = pci_find_hose_for_OF_device(np);
- max = min(max, hose->dma_window_base_cur +
- hose->dma_window_size);
+ /* See if we have a ULI under the primary */
+
+ node = of_find_node_by_name(NULL, "uli1575");
+ while ((pci_with_uli = of_get_parent(node))) {
+ of_node_put(node);
+ node = pci_with_uli;
+
+ if (pci_with_uli == fsl_pci_primary) {
+ ppc_md.pci_exclude_device = mpc85xx_exclude_device;
+ break;
}
}
-
- ppc_md.pci_exclude_device = mpc85xx_exclude_device;
#endif
+}
- mpc85xx_smp_init();
+/*
+ * Setup the architecture
+ */
+static void __init mpc85xx_ds_setup_arch(void)
+{
+ if (ppc_md.progress)
+ ppc_md.progress("mpc85xx_ds_setup_arch()", 0);
-#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
- ppc_swiotlb_enable = 1;
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- }
-#endif
+ mpc85xx_ds_pci_init();
+ mpc85xx_smp_init();
printk("MPC85xx DS board from Freescale Semiconductor\n");
}
@@ -190,14 +172,7 @@ static int __init mpc8544_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "MPC8544DS")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0xb000;
-#endif
- return 1;
- }
-
- return 0;
+ return !!of_flat_dt_is_compatible(root, "MPC8544DS");
}
machine_device_initcall(mpc8544_ds, mpc85xx_common_publish_devices);
@@ -215,14 +190,7 @@ static int __init mpc8572_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0x8000;
-#endif
- return 1;
- }
-
- return 0;
+ return !!of_flat_dt_is_compatible(root, "fsl,MPC8572DS");
}
/*
@@ -232,14 +200,7 @@ static int __init p2020_ds_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- if (of_flat_dt_is_compatible(root, "fsl,P2020DS")) {
-#ifdef CONFIG_PCI
- primary_phb_addr = 0x9000;
-#endif
- return 1;
- }
-
- return 0;
+ return !!of_flat_dt_is_compatible(root, "fsl,P2020DS");
}
define_machine(mpc8544_ds) {
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index d208ebccb91c..8e4b094c553b 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -359,7 +359,7 @@ static void __init mpc85xx_mds_setup_arch(void)
mpc85xx_mds_qe_init();
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index 313fce4f5574..1910fdcb75b2 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -169,6 +169,7 @@ machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices);
+machine_device_initcall(p1024_rdb, mpc85xx_common_publish_devices);
/*
* Called very early, device-tree isn't unflattened
@@ -237,6 +238,13 @@ static int __init p1020_utm_pc_probe(void)
return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC");
}
+static int __init p1024_rdb_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return of_flat_dt_is_compatible(root, "fsl,P1024RDB");
+}
+
define_machine(p2020_rdb) {
.name = "P2020 RDB",
.probe = p2020_rdb_probe,
@@ -348,3 +356,17 @@ define_machine(p1020_rdb_pc) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
+
+define_machine(p1024_rdb) {
+ .name = "P1024 RDB",
+ .probe = p1024_rdb_probe,
+ .setup_arch = mpc85xx_rdb_setup_arch,
+ .init_IRQ = mpc85xx_rdb_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index f700c81a1321..89ee02c54561 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -27,6 +27,7 @@
#include <sysdev/fsl_pci.h>
#include <asm/udbg.h>
#include <asm/fsl_guts.h>
+#include <asm/fsl_lbc.h>
#include "smp.h"
#include "mpc85xx.h"
@@ -142,17 +143,73 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
{
}
+struct fsl_law {
+ u32 lawbar;
+ u32 reserved1;
+ u32 lawar;
+ u32 reserved[5];
+};
+
+#define LAWBAR_MASK 0x00F00000
+#define LAWBAR_SHIFT 12
+
+#define LAWAR_EN 0x80000000
+#define LAWAR_TGT_MASK 0x01F00000
+#define LAW_TRGT_IF_LBC (0x04 << 20)
+
+#define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK)
+#define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC)
+
+#define BR_BA 0xFFFF8000
+
+/*
+ * Map a BRx value to a physical address
+ *
+ * The localbus BRx registers only store the lower 32 bits of the address. To
+ * obtain the upper four bits, we need to scan the LAW table. The entry which
+ * maps to the localbus will contain the upper four bits.
+ */
+static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br)
+{
+#ifndef CONFIG_PHYS_64BIT
+ /*
+ * If we only have 32-bit addressing, then the BRx address *is* the
+ * physical address.
+ */
+ return br & BR_BA;
+#else
+ const struct fsl_law *law = ecm + 0xc08;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ u64 lawbar = in_be32(&law[i].lawbar);
+ u32 lawar = in_be32(&law[i].lawar);
+
+ if ((lawar & LAWAR_MASK) == LAWAR_MATCH)
+ /* Extract the upper four bits */
+ return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12);
+ }
+
+ return 0;
+#endif
+}
+
/**
* p1022ds_set_monitor_port: switch the output to a different monitor port
- *
*/
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
struct device_node *guts_node;
- struct device_node *indirect_node = NULL;
+ struct device_node *lbc_node = NULL;
+ struct device_node *law_node = NULL;
struct ccsr_guts __iomem *guts;
+ struct fsl_lbc_regs *lbc = NULL;
+ void *ecm = NULL;
u8 __iomem *lbc_lcs0_ba = NULL;
u8 __iomem *lbc_lcs1_ba = NULL;
+ phys_addr_t cs0_addr, cs1_addr;
+ const __be32 *iprop;
+ unsigned int num_laws;
u8 b;
/* Map the global utilities registers. */
@@ -168,24 +225,42 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
goto exit;
}
- indirect_node = of_find_compatible_node(NULL, NULL,
- "fsl,p1022ds-indirect-pixis");
- if (!indirect_node) {
- pr_err("p1022ds: missing pixis indirect mode node\n");
+ lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
+ if (!lbc_node) {
+ pr_err("p1022ds: missing localbus node\n");
+ goto exit;
+ }
+
+ lbc = of_iomap(lbc_node, 0);
+ if (!lbc) {
+ pr_err("p1022ds: could not map localbus node\n");
+ goto exit;
+ }
+
+ law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
+ if (!law_node) {
+ pr_err("p1022ds: missing local access window node\n");
goto exit;
}
- lbc_lcs0_ba = of_iomap(indirect_node, 0);
- if (!lbc_lcs0_ba) {
- pr_err("p1022ds: could not map localbus chip select 0\n");
+ ecm = of_iomap(law_node, 0);
+ if (!ecm) {
+ pr_err("p1022ds: could not map local access window node\n");
goto exit;
}
- lbc_lcs1_ba = of_iomap(indirect_node, 1);
- if (!lbc_lcs1_ba) {
- pr_err("p1022ds: could not map localbus chip select 1\n");
+ iprop = of_get_property(law_node, "fsl,num-laws", 0);
+ if (!iprop) {
+ pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
goto exit;
}
+ num_laws = be32_to_cpup(iprop);
+
+ cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
+ cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
+
+ lbc_lcs0_ba = ioremap(cs0_addr, 1);
+ lbc_lcs1_ba = ioremap(cs1_addr, 1);
/* Make sure we're in indirect mode first. */
if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
@@ -254,10 +329,15 @@ exit:
iounmap(lbc_lcs1_ba);
if (lbc_lcs0_ba)
iounmap(lbc_lcs0_ba);
+ if (lbc)
+ iounmap(lbc);
+ if (ecm)
+ iounmap(ecm);
if (guts)
iounmap(guts);
- of_node_put(indirect_node);
+ of_node_put(law_node);
+ of_node_put(lbc_node);
of_node_put(guts_node);
}
@@ -348,13 +428,7 @@ void __init p1022_ds_pic_init(void)
*/
static void __init disable_one_node(struct device_node *np, struct property *new)
{
- struct property *old;
-
- old = of_find_property(np, new->name, NULL);
- if (old)
- prom_update_property(np, new, old);
- else
- prom_add_property(np, new);
+ prom_update_property(np, new);
}
/* TRUE if there is a "video=fslfb" command-line parameter. */
@@ -450,7 +524,7 @@ static void __init p1022_ds_setup_arch(void)
mpc85xx_smp_init();
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
deleted file mode 100644
index 081cf4ac1881..000000000000
--- a/arch/powerpc/platforms/85xx/p3060_qds.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * P3060 QDS Setup
- *
- * Copyright 2011 Freescale Semiconductor Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/phy.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-#include <asm/mpic.h>
-#include <linux/of_platform.h>
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-#include <asm/ehv_pic.h>
-#include "corenet_ds.h"
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init p3060_qds_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-#ifdef CONFIG_SMP
- extern struct smp_ops_t smp_85xx_ops;
-#endif
-
- if (of_flat_dt_is_compatible(root, "fsl,P3060QDS"))
- return 1;
-
- /* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P3060QDS-hv")) {
- ppc_md.init_IRQ = ehv_pic_init;
- ppc_md.get_irq = ehv_pic_get_irq;
- ppc_md.restart = fsl_hv_restart;
- ppc_md.power_off = fsl_hv_halt;
- ppc_md.halt = fsl_hv_halt;
-#ifdef CONFIG_SMP
- /*
- * Disable the timebase sync operations because we can't write
- * to the timebase registers under the hypervisor.
- */
- smp_85xx_ops.give_timebase = NULL;
- smp_85xx_ops.take_timebase = NULL;
-#endif
- return 1;
- }
-
- return 0;
-}
-
-define_machine(p3060_qds) {
- .name = "P3060 QDS",
- .probe = p3060_qds_probe,
- .setup_arch = corenet_ds_setup_arch,
- .init_IRQ = corenet_ds_pic_init,
-#ifdef CONFIG_PCI
- .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
-#endif
- .get_irq = mpic_get_coreint_irq,
- .restart = fsl_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
- .power_save = e500_idle,
-};
-
-machine_device_initcall(p3060_qds, corenet_ds_publish_devices);
-
-#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
-#endif
diff --git a/arch/powerpc/platforms/85xx/qemu_e500.c b/arch/powerpc/platforms/85xx/qemu_e500.c
new file mode 100644
index 000000000000..95a2e53af71b
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/qemu_e500.c
@@ -0,0 +1,72 @@
+/*
+ * Paravirt target for a generic QEMU e500 machine
+ *
+ * This is intended to be a flexible device-tree-driven platform, not fixed
+ * to a particular piece of hardware or a particular spec of virtual hardware,
+ * beyond the assumption of an e500-family CPU. Some things are still hardcoded
+ * here, such as MPIC, but this is a limitation of the current code rather than
+ * an interface contract with QEMU.
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_fdt.h>
+#include <asm/machdep.h>
+#include <asm/time.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include "smp.h"
+#include "mpc85xx.h"
+
+void __init qemu_e500_pic_init(void)
+{
+ struct mpic *mpic;
+
+ mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU,
+ 0, 256, " OpenPIC ");
+
+ BUG_ON(mpic == NULL);
+ mpic_init(mpic);
+}
+
+static void __init qemu_e500_setup_arch(void)
+{
+ ppc_md.progress("qemu_e500_setup_arch()", 0);
+
+ fsl_pci_init();
+ mpc85xx_smp_init();
+}
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init qemu_e500_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ return !!of_flat_dt_is_compatible(root, "fsl,qemu-e500");
+}
+
+machine_device_initcall(qemu_e500, mpc85xx_common_publish_devices);
+
+define_machine(qemu_e500) {
+ .name = "QEMU e500",
+ .probe = qemu_e500_probe,
+ .setup_arch = qemu_e500_setup_arch,
+ .init_IRQ = qemu_e500_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+};
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
deleted file mode 100644
index b1be632ede43..000000000000
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Wind River SBC8560 setup and early boot code.
- *
- * Copyright 2007 Wind River Systems Inc.
- *
- * By Paul Gortmaker (see MAINTAINERS for contact information)
- *
- * Based largely on the MPC8560ADS support - Copyright 2005 Freescale Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/stddef.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/kdev_t.h>
-#include <linux/delay.h>
-#include <linux/seq_file.h>
-#include <linux/of_platform.h>
-
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/pci-bridge.h>
-#include <asm/mpic.h>
-#include <mm/mmu_decl.h>
-#include <asm/udbg.h>
-
-#include <sysdev/fsl_soc.h>
-#include <sysdev/fsl_pci.h>
-
-#include "mpc85xx.h"
-
-#ifdef CONFIG_CPM2
-#include <asm/cpm2.h>
-#include <sysdev/cpm2_pic.h>
-#endif
-
-static void __init sbc8560_pic_init(void)
-{
- struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
- 0, 256, " OpenPIC ");
- BUG_ON(mpic == NULL);
- mpic_init(mpic);
-
- mpc85xx_cpm2_pic_init();
-}
-
-/*
- * Setup the architecture
- */
-#ifdef CONFIG_CPM2
-struct cpm_pin {
- int port, pin, flags;
-};
-
-static const struct cpm_pin sbc8560_pins[] = {
- /* SCC1 */
- {3, 29, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
- {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
-
- /* SCC2 */
- {3, 26, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {3, 27, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {3, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
-
- /* FCC2 */
- {1, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 20, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 22, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 25, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 26, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 28, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 29, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY},
- {1, 30, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 31, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {2, 18, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK14 */
- {2, 19, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* CLK13 */
-
- /* FCC3 */
- {1, 4, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 6, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 7, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 9, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 12, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 13, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 14, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 15, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
- {1, 16, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {1, 17, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
- {2, 16, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK16 */
- {2, 17, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* CLK15 */
-};
-
-static void __init init_ioports(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sbc8560_pins); i++) {
- const struct cpm_pin *pin = &sbc8560_pins[i];
- cpm2_set_pin(pin->port, pin->pin, pin->flags);
- }
-
- cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_RX);
- cpm2_clk_setup(CPM_CLK_SCC1, CPM_BRG1, CPM_CLK_TX);
- cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_RX);
- cpm2_clk_setup(CPM_CLK_SCC2, CPM_BRG2, CPM_CLK_TX);
- cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX);
- cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX);
- cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK15, CPM_CLK_RX);
- cpm2_clk_setup(CPM_CLK_FCC3, CPM_CLK16, CPM_CLK_TX);
-}
-#endif
-
-static void __init sbc8560_setup_arch(void)
-{
-#ifdef CONFIG_PCI
- struct device_node *np;
-#endif
-
- if (ppc_md.progress)
- ppc_md.progress("sbc8560_setup_arch()", 0);
-
-#ifdef CONFIG_CPM2
- cpm2_reset();
- init_ioports();
-#endif
-
-#ifdef CONFIG_PCI
- for_each_compatible_node(np, "pci", "fsl,mpc8540-pci")
- fsl_add_bridge(np, 1);
-#endif
-}
-
-static void sbc8560_show_cpuinfo(struct seq_file *m)
-{
- uint pvid, svid, phid1;
-
- pvid = mfspr(SPRN_PVR);
- svid = mfspr(SPRN_SVR);
-
- seq_printf(m, "Vendor\t\t: Wind River\n");
- seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
- seq_printf(m, "SVR\t\t: 0x%x\n", svid);
-
- /* Display cpu Pll setting */
- phid1 = mfspr(SPRN_HID1);
- seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
-}
-
-machine_device_initcall(sbc8560, mpc85xx_common_publish_devices);
-
-/*
- * Called very early, device-tree isn't unflattened
- */
-static int __init sbc8560_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- return of_flat_dt_is_compatible(root, "SBC8560");
-}
-
-#ifdef CONFIG_RTC_DRV_M48T59
-static int __init sbc8560_rtc_init(void)
-{
- struct device_node *np;
- struct resource res;
- struct platform_device *rtc_dev;
-
- np = of_find_compatible_node(NULL, NULL, "m48t59");
- if (np == NULL) {
- printk("No RTC in DTB. Has it been eaten by wild dogs?\n");
- return -ENODEV;
- }
-
- of_address_to_resource(np, 0, &res);
- of_node_put(np);
-
- printk("Found RTC (m48t59) at i/o 0x%x\n", res.start);
-
- rtc_dev = platform_device_register_simple("rtc-m48t59", 0, &res, 1);
-
- if (IS_ERR(rtc_dev)) {
- printk("Registering sbc8560 RTC device failed\n");
- return PTR_ERR(rtc_dev);
- }
-
- return 0;
-}
-
-arch_initcall(sbc8560_rtc_init);
-
-#endif /* M48T59 */
-
-static __u8 __iomem *brstcr;
-
-static int __init sbc8560_bdrstcr_init(void)
-{
- struct device_node *np;
- struct resource res;
-
- np = of_find_compatible_node(NULL, NULL, "wrs,sbc8560-brstcr");
- if (np == NULL) {
- printk(KERN_WARNING "sbc8560: No board specific RSTCR in DTB.\n");
- return -ENODEV;
- }
-
- of_address_to_resource(np, 0, &res);
-
- printk(KERN_INFO "sbc8560: Found BRSTCR at %pR\n", &res);
-
- brstcr = ioremap(res.start, resource_size(&res));
- if(!brstcr)
- printk(KERN_WARNING "sbc8560: ioremap of brstcr failed.\n");
-
- of_node_put(np);
-
- return 0;
-}
-
-arch_initcall(sbc8560_bdrstcr_init);
-
-void sbc8560_rstcr_restart(char * cmd)
-{
- local_irq_disable();
- if(brstcr)
- clrbits8(brstcr, 0x80);
-
- while(1);
-}
-
-define_machine(sbc8560) {
- .name = "SBC8560",
- .probe = sbc8560_probe,
- .setup_arch = sbc8560_setup_arch,
- .init_IRQ = sbc8560_pic_init,
- .show_cpuinfo = sbc8560_show_cpuinfo,
- .get_irq = mpic_get_irq,
- .restart = sbc8560_rstcr_restart,
- .calibrate_decr = generic_calibrate_decr,
- .progress = udbg_progress,
-};
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index 4d786c25d3e5..3e70a2035e53 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -102,7 +102,7 @@ static void tqm85xx_show_cpuinfo(struct seq_file *m)
seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f));
}
-static void __init tqm85xx_ti1520_fixup(struct pci_dev *pdev)
+static void __devinit tqm85xx_ti1520_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 1fca663f1b25..563aafa8629c 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -164,7 +164,7 @@ static void gef_ppc9a_show_cpuinfo(struct seq_file *m)
gef_ppc9a_get_vme_is_syscon() ? "yes" : "no");
}
-static void __init gef_ppc9a_nec_fixup(struct pci_dev *pdev)
+static void __devinit gef_ppc9a_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 14e0e576bcbd..cc6a91ae0889 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -152,7 +152,7 @@ static void gef_sbc310_show_cpuinfo(struct seq_file *m)
}
-static void __init gef_sbc310_nec_fixup(struct pci_dev *pdev)
+static void __devinit gef_sbc310_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 1638f43599f0..aead6b337f4a 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -141,7 +141,7 @@ static void gef_sbc610_show_cpuinfo(struct seq_file *m)
seq_printf(m, "SVR\t\t: 0x%x\n", svid);
}
-static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
+static void __devinit gef_sbc610_nec_fixup(struct pci_dev *pdev)
{
unsigned int val;
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
index 3755e61d7ecf..817245bc0219 100644
--- a/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
+++ b/arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
@@ -102,7 +102,7 @@ mpc86xx_hpcn_setup_arch(void)
#endif
#ifdef CONFIG_SWIOTLB
- if (memblock_end_of_DRAM() > max) {
+ if ((memblock_end_of_DRAM() - 1) > max) {
ppc_swiotlb_enable = 1;
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index a35ca44ade66..e7a896acd982 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -25,6 +25,7 @@ source "arch/powerpc/platforms/wsp/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
default n
+ select EPAPR_PARAVIRT
---help---
This option enables various optimizations for running under the KVM
hypervisor. Overhead for the kernel when not running inside KVM should
@@ -32,6 +33,14 @@ config KVM_GUEST
In case of doubt, say Y
+config EPAPR_PARAVIRT
+ bool "ePAPR para-virtualization support"
+ default n
+ help
+ Enables ePAPR para-virtualization support for guests.
+
+ In case of doubt, say Y
+
config PPC_NATIVE
bool
depends on 6xx || PPC64
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 61c9550819a2..30fd01de6bed 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -159,6 +159,10 @@ config PPC_E500MC
bool "e500mc Support"
select PPC_FPU
depends on E500
+ help
+ This must be enabled for running on e500mc (and derivatives
+ such as e5500/e6500), and must be disabled for running on
+ e500v1 or e500v2.
config PPC_FPU
bool
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
index 74c817448948..96c801907126 100644
--- a/arch/powerpc/platforms/cell/beat_hvCall.S
+++ b/arch/powerpc/platforms/cell/beat_hvCall.S
@@ -22,8 +22,6 @@
#include <asm/ppc_asm.h>
-#define STK_PARM(i) (48 + ((i)-3)*8)
-
/* Not implemented on Beat, now */
#define HCALL_INST_PRECALL
#define HCALL_INST_POSTCALL
@@ -74,7 +72,7 @@ _GLOBAL(beat_hcall_norets8)
mr r6,r7
mr r7,r8
mr r8,r9
- ld r10,STK_PARM(r10)(r1)
+ ld r10,STK_PARAM(R10)(r1)
HVSC /* invoke the hypervisor */
@@ -94,7 +92,7 @@ _GLOBAL(beat_hcall1)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -108,7 +106,7 @@ _GLOBAL(beat_hcall1)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
lwz r0,8(r1)
@@ -125,7 +123,7 @@ _GLOBAL(beat_hcall2)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -139,7 +137,7 @@ _GLOBAL(beat_hcall2)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
@@ -157,7 +155,7 @@ _GLOBAL(beat_hcall3)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -171,7 +169,7 @@ _GLOBAL(beat_hcall3)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -190,7 +188,7 @@ _GLOBAL(beat_hcall4)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -204,7 +202,7 @@ _GLOBAL(beat_hcall4)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -224,7 +222,7 @@ _GLOBAL(beat_hcall5)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -238,7 +236,7 @@ _GLOBAL(beat_hcall5)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -259,7 +257,7 @@ _GLOBAL(beat_hcall6)
HCALL_INST_PRECALL
- std r4,STK_PARM(r4)(r1) /* save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* save ret buffer */
mr r11,r3
mr r3,r5
@@ -273,7 +271,7 @@ _GLOBAL(beat_hcall6)
HCALL_INST_POSTCALL
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b9f509a34c01..dca213666747 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -518,7 +518,6 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
__set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1,
(unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
- window->table.it_hint = window->table.it_blocksize;
return window;
}
@@ -552,8 +551,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
- dev->of_node ? dev->of_node->full_name : "?",
- dev_to_node(dev));
+ of_node_full_name(dev->of_node), dev_to_node(dev));
return NULL;
}
window = list_entry(iommu->windows.next, struct iommu_window, list);
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index efdacc829576..d17e98bc0c10 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -42,11 +42,9 @@ static void cbe_power_save(void)
{
unsigned long ctrl, thread_switch_control;
- /*
- * We need to hard disable interrupts, the local_irq_enable() done by
- * our caller upon return will hard re-enable.
- */
- hard_irq_disable();
+ /* Ensure our interrupt state is properly tracked */
+ if (!prep_irq_for_idle())
+ return;
ctrl = mfspr(SPRN_CTRLF);
@@ -81,6 +79,9 @@ static void cbe_power_save(void)
*/
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl);
+
+ /* Re-enable interrupts in MSR */
+ __hard_irq_enable();
}
static int cbe_system_reset_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 66519d263da7..d544d7816df3 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -317,28 +317,23 @@ out:
return ret;
}
-static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
+static int spufs_context_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd();
- if (ret < 0) {
- dput(dentry);
- mntput(mnt);
- goto out;
- }
+ if (ret < 0)
+ return ret;
- filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
+ filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
- ret = PTR_ERR(filp);
- goto out;
+ return PTR_ERR(filp);
}
filp->f_op = &spufs_context_fops;
fd_install(ret, filp);
-out:
return ret;
}
@@ -453,6 +448,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
int affinity;
struct spu_gang *gang;
struct spu_context *neighbor;
+ struct path path = {.mnt = mnt, .dentry = dentry};
ret = -EPERM;
if ((flags & SPU_CREATE_NOSCHED) &&
@@ -495,11 +491,7 @@ spufs_create_context(struct inode *inode, struct dentry *dentry,
put_spu_context(neighbor);
}
- /*
- * get references for dget and mntget, will be released
- * in error path of *_open().
- */
- ret = spufs_context_open(dget(dentry), mntget(mnt));
+ ret = spufs_context_open(&path);
if (ret < 0) {
WARN_ON(spufs_rmdir(inode, dentry));
if (affinity)
@@ -556,28 +548,27 @@ out:
return ret;
}
-static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
+static int spufs_gang_open(struct path *path)
{
int ret;
struct file *filp;
ret = get_unused_fd();
- if (ret < 0) {
- dput(dentry);
- mntput(mnt);
- goto out;
- }
+ if (ret < 0)
+ return ret;
- filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
+ /*
+ * get references for dget and mntget, will be released
+ * in error path of *_open().
+ */
+ filp = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
- ret = PTR_ERR(filp);
- goto out;
+ return PTR_ERR(filp);
}
filp->f_op = &simple_dir_operations;
fd_install(ret, filp);
-out:
return ret;
}
@@ -585,17 +576,14 @@ static int spufs_create_gang(struct inode *inode,
struct dentry *dentry,
struct vfsmount *mnt, umode_t mode)
{
+ struct path path = {.mnt = mnt, .dentry = dentry};
int ret;
ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
if (ret)
goto out;
- /*
- * get references for dget and mntget, will be released
- * in error path of *_open().
- */
- ret = spufs_gang_open(dget(dentry), mntget(mnt));
+ ret = spufs_gang_open(&path);
if (ret < 0) {
int err = simple_rmdir(inode, dentry);
WARN_ON(err);
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
index 77b48b2b9309..3cd262897c27 100644
--- a/arch/powerpc/platforms/powernv/opal-takeover.S
+++ b/arch/powerpc/platforms/powernv/opal-takeover.S
@@ -14,8 +14,6 @@
#include <asm/asm-offsets.h>
#include <asm/opal.h>
-#define STK_PARAM(i) (48 + ((i)-3)*8)
-
#define H_HAL_TAKEOVER 0x5124
#define H_HAL_TAKEOVER_QUERY_MAGIC -1
@@ -23,14 +21,14 @@
_GLOBAL(opal_query_takeover)
mfcr r0
stw r0,8(r1)
- std r3,STK_PARAM(r3)(r1)
- std r4,STK_PARAM(r4)(r1)
+ std r3,STK_PARAM(R3)(r1)
+ std r4,STK_PARAM(R4)(r1)
li r3,H_HAL_TAKEOVER
li r4,H_HAL_TAKEOVER_QUERY_MAGIC
HVSC
- ld r10,STK_PARAM(r3)(r1)
+ ld r10,STK_PARAM(R3)(r1)
std r4,0(r10)
- ld r10,STK_PARAM(r4)(r1)
+ ld r10,STK_PARAM(R4)(r1)
std r5,0(r10)
lwz r0,8(r1)
mtcrf 0xff,r0
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index fbdd74dac3ac..9cda6a1ad0cf 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -589,7 +589,7 @@ static int __devinit pnv_ioda_configure_pe(struct pnv_phb *phb,
dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
parent = pe->pbus->self;
- count = pe->pbus->subordinate - pe->pbus->secondary + 1;
+ count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
switch(count) {
case 1: bcomp = OpalPciBusAll; break;
case 2: bcomp = OpalPciBus7Bits; break;
@@ -816,11 +816,11 @@ static void __devinit pnv_ioda_setup_bus_PE(struct pci_dev *dev,
pe->pdev = NULL;
pe->tce32_seg = -1;
pe->mve_number = -1;
- pe->rid = bus->secondary << 8;
+ pe->rid = bus->busn_res.start << 8;
pe->dma_weight = 0;
- pe_info(pe, "Secondary busses %d..%d associated with PE\n",
- bus->secondary, bus->subordinate);
+ pe_info(pe, "Secondary busses %pR associated with PE\n",
+ &bus->busn_res);
if (pnv_ioda_configure_pe(phb, pe)) {
/* XXX What do we do here ? */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 4cb375c0f8d1..fb506317ebb0 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -85,8 +85,10 @@ static int eeh_event_handler(void * dummy)
set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
edev = handle_eeh_events(event);
- eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
- pci_dev_put(edev->pdev);
+ if (edev) {
+ eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
+ pci_dev_put(edev->pdev);
+ }
kfree(event);
mutex_unlock(&eeh_event_mutex);
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 8752f79a6af8..c33360ec4f4f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -81,7 +81,7 @@ static int pseries_eeh_init(void)
ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
ibm_configure_pe = rtas_token("ibm,configure-pe");
- ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
+ ibm_configure_bridge = rtas_token("ibm,configure-bridge");
/* necessary sanity check */
if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
@@ -89,7 +89,7 @@ static int pseries_eeh_init(void)
__func__);
return -EINVAL;
} else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) {
- pr_warning("%s: RTAS service <ibm, set-slot-reset> invalid\n",
+ pr_warning("%s: RTAS service <ibm,set-slot-reset> invalid\n",
__func__);
return -EINVAL;
} else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 3ce73d0052b1..444fe7759e55 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -13,8 +13,6 @@
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
-#define STK_PARM(i) (48 + ((i)-3)*8)
-
#ifdef CONFIG_TRACEPOINTS
.section ".toc","aw"
@@ -26,7 +24,7 @@ hcall_tracepoint_refcount:
.section ".text"
/*
- * precall must preserve all registers. use unused STK_PARM()
+ * precall must preserve all registers. use unused STK_PARAM()
* areas to save snapshots and opcode. We branch around this
* in early init (eg when populating the MMU hashtable) by using an
* unconditional cpu feature.
@@ -40,28 +38,28 @@ END_FTR_SECTION(0, 1); \
cmpdi r12,0; \
beq+ 1f; \
mflr r0; \
- std r3,STK_PARM(r3)(r1); \
- std r4,STK_PARM(r4)(r1); \
- std r5,STK_PARM(r5)(r1); \
- std r6,STK_PARM(r6)(r1); \
- std r7,STK_PARM(r7)(r1); \
- std r8,STK_PARM(r8)(r1); \
- std r9,STK_PARM(r9)(r1); \
- std r10,STK_PARM(r10)(r1); \
+ std r3,STK_PARAM(R3)(r1); \
+ std r4,STK_PARAM(R4)(r1); \
+ std r5,STK_PARAM(R5)(r1); \
+ std r6,STK_PARAM(R6)(r1); \
+ std r7,STK_PARAM(R7)(r1); \
+ std r8,STK_PARAM(R8)(r1); \
+ std r9,STK_PARAM(R9)(r1); \
+ std r10,STK_PARAM(R10)(r1); \
std r0,16(r1); \
- addi r4,r1,STK_PARM(FIRST_REG); \
+ addi r4,r1,STK_PARAM(FIRST_REG); \
stdu r1,-STACK_FRAME_OVERHEAD(r1); \
bl .__trace_hcall_entry; \
addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \
- ld r3,STK_PARM(r3)(r1); \
- ld r4,STK_PARM(r4)(r1); \
- ld r5,STK_PARM(r5)(r1); \
- ld r6,STK_PARM(r6)(r1); \
- ld r7,STK_PARM(r7)(r1); \
- ld r8,STK_PARM(r8)(r1); \
- ld r9,STK_PARM(r9)(r1); \
- ld r10,STK_PARM(r10)(r1); \
+ ld r3,STK_PARAM(R3)(r1); \
+ ld r4,STK_PARAM(R4)(r1); \
+ ld r5,STK_PARAM(R5)(r1); \
+ ld r6,STK_PARAM(R6)(r1); \
+ ld r7,STK_PARAM(R7)(r1); \
+ ld r8,STK_PARAM(R8)(r1); \
+ ld r9,STK_PARAM(R9)(r1); \
+ ld r10,STK_PARAM(R10)(r1); \
mtlr r0; \
1:
@@ -79,8 +77,8 @@ END_FTR_SECTION(0, 1); \
cmpdi r12,0; \
beq+ 1f; \
mflr r0; \
- ld r6,STK_PARM(r3)(r1); \
- std r3,STK_PARM(r3)(r1); \
+ ld r6,STK_PARAM(R3)(r1); \
+ std r3,STK_PARAM(R3)(r1); \
mr r4,r3; \
mr r3,r6; \
std r0,16(r1); \
@@ -88,7 +86,7 @@ END_FTR_SECTION(0, 1); \
bl .__trace_hcall_exit; \
addi r1,r1,STACK_FRAME_OVERHEAD; \
ld r0,16(r1); \
- ld r3,STK_PARM(r3)(r1); \
+ ld r3,STK_PARAM(R3)(r1); \
mtlr r0; \
1:
@@ -114,7 +112,7 @@ _GLOBAL(plpar_hcall_norets)
mfcr r0
stw r0,8(r1)
- HCALL_INST_PRECALL(r4)
+ HCALL_INST_PRECALL(R4)
HVSC /* invoke the hypervisor */
@@ -130,9 +128,9 @@ _GLOBAL(plpar_hcall)
mfcr r0
stw r0,8(r1)
- HCALL_INST_PRECALL(r5)
+ HCALL_INST_PRECALL(R5)
- std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
mr r4,r5
mr r5,r6
@@ -143,7 +141,7 @@ _GLOBAL(plpar_hcall)
HVSC /* invoke the hypervisor */
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -168,7 +166,7 @@ _GLOBAL(plpar_hcall_raw)
mfcr r0
stw r0,8(r1)
- std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
mr r4,r5
mr r5,r6
@@ -179,7 +177,7 @@ _GLOBAL(plpar_hcall_raw)
HVSC /* invoke the hypervisor */
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -196,9 +194,9 @@ _GLOBAL(plpar_hcall9)
mfcr r0
stw r0,8(r1)
- HCALL_INST_PRECALL(r5)
+ HCALL_INST_PRECALL(R5)
- std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
mr r4,r5
mr r5,r6
@@ -206,14 +204,14 @@ _GLOBAL(plpar_hcall9)
mr r7,r8
mr r8,r9
mr r9,r10
- ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
- ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
- ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
+ ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */
+ ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */
+ ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */
HVSC /* invoke the hypervisor */
mr r0,r12
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
@@ -238,7 +236,7 @@ _GLOBAL(plpar_hcall9_raw)
mfcr r0
stw r0,8(r1)
- std r4,STK_PARM(r4)(r1) /* Save ret buffer */
+ std r4,STK_PARAM(R4)(r1) /* Save ret buffer */
mr r4,r5
mr r5,r6
@@ -246,14 +244,14 @@ _GLOBAL(plpar_hcall9_raw)
mr r7,r8
mr r8,r9
mr r9,r10
- ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
- ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
- ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
+ ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */
+ ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */
+ ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */
HVSC /* invoke the hypervisor */
mr r0,r12
- ld r12,STK_PARM(r4)(r1)
+ ld r12,STK_PARAM(R4)(r1)
std r4, 0(r12)
std r5, 8(r12)
std r6, 16(r12)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 0915b1ad66ce..bca220f2873c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -106,7 +106,7 @@ static int tce_build_pSeries(struct iommu_table *tbl, long index,
tcep++;
}
- if (tbl->it_type == TCE_PCI_SWINV_CREATE)
+ if (tbl->it_type & TCE_PCI_SWINV_CREATE)
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
return 0;
}
@@ -121,7 +121,7 @@ static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
- if (tbl->it_type == TCE_PCI_SWINV_FREE)
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
tce_invalidate_pSeries_sw(tbl, tces, tcep - 1);
}
@@ -192,12 +192,15 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long l, limit;
long tcenum_start = tcenum, npages_start = npages;
int ret = 0;
+ unsigned long flags;
if (npages == 1) {
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
+ local_irq_save(flags); /* to protect tcep and the page behind it */
+
tcep = __get_cpu_var(tce_page);
/* This is safe to do since interrupts are off when we're called
@@ -207,6 +210,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
tcep = (u64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
+ local_irq_restore(flags);
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
@@ -240,6 +244,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
tcenum += limit;
} while (npages > 0 && !rc);
+ local_irq_restore(flags);
+
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
tce_freemulti_pSeriesLP(tbl, tcenum_start,
@@ -707,6 +713,21 @@ static int __init disable_ddw_setup(char *str)
early_param("disable_ddw", disable_ddw_setup);
+static inline void __remove_ddw(struct device_node *np, const u32 *ddw_avail, u64 liobn)
+{
+ int ret;
+
+ ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
+ if (ret)
+ pr_warning("%s: failed to remove DMA window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+ else
+ pr_debug("%s: successfully removed DMA window: rtas returned "
+ "%d to ibm,remove-pe-dma-window(%x) %llx\n",
+ np->full_name, ret, ddw_avail[2], liobn);
+}
+
static void remove_ddw(struct device_node *np)
{
struct dynamic_dma_window_prop *dwp;
@@ -736,15 +757,7 @@ static void remove_ddw(struct device_node *np)
pr_debug("%s successfully cleared tces in window.\n",
np->full_name);
- ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn);
- if (ret)
- pr_warning("%s: failed to remove direct window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
- else
- pr_debug("%s: successfully removed direct window: rtas returned "
- "%d to ibm,remove-pe-dma-window(%x) %llx\n",
- np->full_name, ret, ddw_avail[2], liobn);
+ __remove_ddw(np, ddw_avail, liobn);
delprop:
ret = prom_remove_property(np, win64);
@@ -869,6 +882,35 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
return ret;
}
+static void restore_default_window(struct pci_dev *dev,
+ u32 ddw_restore_token, unsigned long liobn)
+{
+ struct eeh_dev *edev;
+ u32 cfg_addr;
+ u64 buid;
+ int ret;
+
+ /*
+ * Get the config address and phb buid of the PE window.
+ * Rely on eeh to retrieve this for us.
+ * Retrieve them from the pci device, not the node with the
+ * dma-window property
+ */
+ edev = pci_dev_to_eeh_dev(dev);
+ cfg_addr = edev->config_addr;
+ if (edev->pe_config_addr)
+ cfg_addr = edev->pe_config_addr;
+ buid = edev->phb->buid;
+
+ do {
+ ret = rtas_call(ddw_restore_token, 3, 1, NULL, cfg_addr,
+ BUID_HI(buid), BUID_LO(buid));
+ } while (rtas_busy_delay(ret));
+ dev_info(&dev->dev,
+ "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d\n",
+ ddw_restore_token, cfg_addr, BUID_HI(buid), BUID_LO(buid), ret);
+}
+
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
@@ -889,9 +931,13 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
u64 dma_addr, max_addr;
struct device_node *dn;
const u32 *uninitialized_var(ddw_avail);
+ const u32 *uninitialized_var(ddw_extensions);
+ u32 ddw_restore_token = 0;
struct direct_window *window;
struct property *win64;
struct dynamic_dma_window_prop *ddwprop;
+ const void *dma_window = NULL;
+ unsigned long liobn, offset, size;
mutex_lock(&direct_window_init_mutex);
@@ -911,7 +957,40 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (!ddw_avail || len < 3 * sizeof(u32))
goto out_unlock;
- /*
+ /*
+ * the extensions property is only required to exist in certain
+ * levels of firmware and later
+ * the ibm,ddw-extensions property is a list with the first
+ * element containing the number of extensions and each
+ * subsequent entry is a value corresponding to that extension
+ */
+ ddw_extensions = of_get_property(pdn, "ibm,ddw-extensions", &len);
+ if (ddw_extensions) {
+ /*
+ * each new defined extension length should be added to
+ * the top of the switch so the "earlier" entries also
+ * get picked up
+ */
+ switch (ddw_extensions[0]) {
+ /* ibm,reset-pe-dma-windows */
+ case 1:
+ ddw_restore_token = ddw_extensions[1];
+ break;
+ }
+ }
+
+ /*
+ * Only remove the existing DMA window if we can restore back to
+ * the default state. Removing the existing window maximizes the
+ * resources available to firmware for dynamic window creation.
+ */
+ if (ddw_restore_token) {
+ dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
+ of_parse_dma_window(pdn, dma_window, &liobn, &offset, &size);
+ __remove_ddw(pdn, ddw_avail, liobn);
+ }
+
+ /*
* Query if there is a second window of size to map the
* whole partition. Query returns number of windows, largest
* block assigned to PE (partition endpoint), and two bitmasks
@@ -920,7 +999,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dn = pci_device_to_OF_node(dev);
ret = query_ddw(dev, ddw_avail, &query);
if (ret != 0)
- goto out_unlock;
+ goto out_restore_window;
if (query.windows_available == 0) {
/*
@@ -929,7 +1008,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
* trading in for a larger page size.
*/
dev_dbg(&dev->dev, "no free dynamic windows");
- goto out_unlock;
+ goto out_restore_window;
}
if (query.page_size & 4) {
page_shift = 24; /* 16MB */
@@ -940,7 +1019,7 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
} else {
dev_dbg(&dev->dev, "no supported direct page size in mask %x",
query.page_size);
- goto out_unlock;
+ goto out_restore_window;
}
/* verify the window * number of ptes will map the partition */
/* check largest block * page size > max memory hotplug addr */
@@ -949,14 +1028,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u "
"%llu-sized pages\n", max_addr, query.largest_available_block,
1ULL << page_shift);
- goto out_unlock;
+ goto out_restore_window;
}
len = order_base_2(max_addr);
win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
if (!win64) {
dev_info(&dev->dev,
"couldn't allocate property for 64bit dma window\n");
- goto out_unlock;
+ goto out_restore_window;
}
win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
@@ -1018,6 +1097,10 @@ out_free_prop:
kfree(win64->value);
kfree(win64);
+out_restore_window:
+ if (ddw_restore_token)
+ restore_default_window(dev, ddw_restore_token, liobn);
+
out_unlock:
mutex_unlock(&direct_window_init_mutex);
return dma_addr;
@@ -1051,7 +1134,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%s\n",
- pci_name(dev), dn? dn->full_name : "<null>");
+ pci_name(dev), of_node_full_name(dn));
return;
}
pr_debug(" parent is %s\n", pdn->full_name);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 029a562af373..dd30b12edfe4 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -67,7 +67,6 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
const char *name, u32 vd, char *value)
{
struct property *new_prop = *prop;
- struct property *old_prop;
int more = 0;
/* A negative 'vd' value indicates that only part of the new property
@@ -117,12 +116,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
}
if (!more) {
- old_prop = of_find_property(dn, new_prop->name, NULL);
- if (old_prop)
- prom_update_property(dn, new_prop, old_prop);
- else
- prom_add_property(dn, new_prop);
-
+ prom_update_property(dn, new_prop);
new_prop = NULL;
}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 8b7bafa489c2..3ccebc83dc02 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -121,7 +121,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
if (!num)
return;
pcibios_setup_bus_devices(bus);
- max = bus->secondary;
+ max = bus->busn_res.start;
for (pass=0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 41a34bc4a9a2..455760b1fe6e 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -11,6 +11,7 @@
#include <linux/moduleparam.h>
#include <linux/cpuidle.h>
#include <linux/cpu.h>
+#include <linux/notifier.h>
#include <asm/paca.h>
#include <asm/reg.h>
@@ -99,15 +100,18 @@ out:
static void check_and_cede_processor(void)
{
/*
- * Interrupts are soft-disabled at this point,
- * but not hard disabled. So an interrupt might have
- * occurred before entering NAP, and would be potentially
- * lost (edge events, decrementer events, etc...) unless
- * we first hard disable then check.
+ * Ensure our interrupt state is properly tracked,
+ * also checks if no interrupt has occurred while we
+ * were soft-disabled
*/
- hard_irq_disable();
- if (get_paca()->irq_happened == 0)
+ if (prep_irq_for_idle()) {
cede_processor();
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Ensure that H_CEDE returns with IRQs on */
+ if (WARN_ON(!(mfmsr() & MSR_EE)))
+ __hard_irq_enable();
+#endif
+ }
}
static int dedicated_cede_loop(struct cpuidle_device *dev,
@@ -186,17 +190,40 @@ static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = {
.enter = &shared_cede_loop },
};
-int pseries_notify_cpuidle_add_cpu(int cpu)
+static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n,
+ unsigned long action, void *hcpu)
{
+ int hotcpu = (unsigned long)hcpu;
struct cpuidle_device *dev =
- per_cpu_ptr(pseries_cpuidle_devices, cpu);
+ per_cpu_ptr(pseries_cpuidle_devices, hotcpu);
+
if (dev && cpuidle_get_driver()) {
- cpuidle_disable_device(dev);
- cpuidle_enable_device(dev);
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_enable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ cpuidle_pause_and_lock();
+ cpuidle_disable_device(dev);
+ cpuidle_resume_and_unlock();
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
}
- return 0;
+ return NOTIFY_OK;
}
+static struct notifier_block setup_hotplug_notifier = {
+ .notifier_call = pseries_cpuidle_add_cpu_notifier,
+};
+
/*
* pseries_cpuidle_driver_init()
*/
@@ -321,6 +348,7 @@ static int __init pseries_processor_idle_init(void)
return retval;
}
+ register_cpu_notifier(&setup_hotplug_notifier);
printk(KERN_DEBUG "pseries_idle_driver registered\n");
return 0;
@@ -329,6 +357,7 @@ static int __init pseries_processor_idle_init(void)
static void __exit pseries_processor_idle_exit(void)
{
+ unregister_cpu_notifier(&setup_hotplug_notifier);
pseries_idle_devices_uninit();
cpuidle_unregister_driver(&pseries_idle_driver);
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7b3bf76ef834..39f71fba9b38 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -432,7 +432,7 @@ static int do_update_property(char *buf, size_t bufsize)
unsigned char *value;
char *name, *end, *next_prop;
int rc, length;
- struct property *newprop, *oldprop;
+ struct property *newprop;
buf = parse_node(buf, bufsize, &np);
end = buf + bufsize;
@@ -443,6 +443,9 @@ static int do_update_property(char *buf, size_t bufsize)
if (!next_prop)
return -EINVAL;
+ if (!strlen(name))
+ return -ENODEV;
+
newprop = new_property(name, length, value, NULL);
if (!newprop)
return -ENOMEM;
@@ -450,18 +453,11 @@ static int do_update_property(char *buf, size_t bufsize)
if (!strcmp(name, "slb-size") || !strcmp(name, "ibm,slb-size"))
slb_set_size(*(int *)value);
- oldprop = of_find_property(np, name,NULL);
- if (!oldprop) {
- if (strlen(name))
- return prom_add_property(np, newprop);
- return -ENODEV;
- }
-
upd_value.node = np;
upd_value.property = newprop;
pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value);
- rc = prom_update_property(np, newprop, oldprop);
+ rc = prom_update_property(np, newprop);
if (rc)
return rc;
@@ -486,7 +482,7 @@ static int do_update_property(char *buf, size_t bufsize)
rc = pSeries_reconfig_notify(action, value);
if (rc) {
- prom_update_property(np, oldprop, newprop);
+ prom_update_property(np, newprop);
return rc;
}
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index e16bb8d48550..71706bc34a0d 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -147,7 +147,6 @@ static void __devinit smp_xics_setup_cpu(int cpu)
set_cpu_current_state(cpu, CPU_STATE_ONLINE);
set_default_offline_state(cpu);
#endif
- pseries_notify_cpuidle_add_cpu(cpu);
}
static int __devinit smp_pSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/sysdev/6xx-suspend.S b/arch/powerpc/sysdev/6xx-suspend.S
index 21cda085d926..cf48e9cb2575 100644
--- a/arch/powerpc/sysdev/6xx-suspend.S
+++ b/arch/powerpc/sysdev/6xx-suspend.S
@@ -29,7 +29,7 @@ _GLOBAL(mpc6xx_enter_standby)
ori r5, r5, ret_from_standby@l
mtlr r5
- rlwinm r5, r1, 0, 0, 31-THREAD_SHIFT
+ CURRENT_THREAD_INFO(r5, r1)
lwz r6, TI_LOCAL_FLAGS(r5)
ori r6, r6, _TLF_SLEEPING
stw r6, TI_LOCAL_FLAGS(r5)
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 6073288fed29..a7b2a600d0a4 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1,7 +1,7 @@
/*
* MPC83xx/85xx/86xx PCI/PCIE support routing.
*
- * Copyright 2007-2011 Freescale Semiconductor, Inc.
+ * Copyright 2007-2012 Freescale Semiconductor, Inc.
* Copyright 2008-2009 MontaVista Software, Inc.
*
* Initial author: Xianghua Xiao <x.xiao@freescale.com>
@@ -36,7 +36,7 @@
static int fsl_pcie_bus_fixup, is_mpc83xx_pci;
-static void __init quirk_fsl_pcie_header(struct pci_dev *dev)
+static void __devinit quirk_fsl_pcie_header(struct pci_dev *dev)
{
u8 progif;
@@ -807,3 +807,72 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose)
return 0;
}
+
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+static const struct of_device_id pci_ids[] = {
+ { .compatible = "fsl,mpc8540-pci", },
+ { .compatible = "fsl,mpc8548-pcie", },
+ { .compatible = "fsl,mpc8610-pci", },
+ { .compatible = "fsl,mpc8641-pcie", },
+ { .compatible = "fsl,p1022-pcie", },
+ { .compatible = "fsl,p1010-pcie", },
+ { .compatible = "fsl,p1023-pcie", },
+ { .compatible = "fsl,p4080-pcie", },
+ { .compatible = "fsl,qoriq-pcie-v2.3", },
+ { .compatible = "fsl,qoriq-pcie-v2.2", },
+ {},
+};
+
+struct device_node *fsl_pci_primary;
+
+void __devinit fsl_pci_init(void)
+{
+ struct device_node *node;
+ struct pci_controller *hose;
+ dma_addr_t max = 0xffffffff;
+
+ /* Callers can specify the primary bus using other means. */
+ if (!fsl_pci_primary) {
+ /* If a PCI host bridge contains an ISA node, it's primary. */
+ node = of_find_node_by_type(NULL, "isa");
+ while ((fsl_pci_primary = of_get_parent(node))) {
+ of_node_put(node);
+ node = fsl_pci_primary;
+
+ if (of_match_node(pci_ids, node))
+ break;
+ }
+ }
+
+ node = NULL;
+ for_each_node_by_type(node, "pci") {
+ if (of_match_node(pci_ids, node)) {
+ /*
+ * If there's no PCI host bridge with ISA, arbitrarily
+ * designate one as primary. This can go away once
+ * various bugs with primary-less systems are fixed.
+ */
+ if (!fsl_pci_primary)
+ fsl_pci_primary = node;
+
+ fsl_add_bridge(node, fsl_pci_primary == node);
+ hose = pci_find_hose_for_OF_device(node);
+ max = min(max, hose->dma_window_base_cur +
+ hose->dma_window_size);
+ }
+ }
+
+#ifdef CONFIG_SWIOTLB
+ /*
+ * if we couldn't map all of DRAM via the dma windows
+ * we need SWIOTLB to handle buffers located outside of
+ * dma capable memory region
+ */
+ if (memblock_end_of_DRAM() - 1 > max) {
+ ppc_swiotlb_enable = 1;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ }
+#endif
+}
+#endif
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index a39ed5cc2c5a..baa0fd18289f 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -93,5 +93,13 @@ extern void fsl_pcibios_fixup_bus(struct pci_bus *bus);
extern int mpc83xx_add_bridge(struct device_node *dev);
u64 fsl_pci_immrbar_base(struct pci_controller *hose);
+extern struct device_node *fsl_pci_primary;
+
+#ifdef CONFIG_FSL_PCI
+void fsl_pci_init(void);
+#else
+static inline void fsl_pci_init(void) {}
+#endif
+
#endif /* __POWERPC_FSL_PCI_H */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 395af1347749..bfc6211e5422 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1211,7 +1211,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (of_get_property(node, "single-cpu-affinity", NULL))
flags |= MPIC_SINGLE_DEST_CPU;
if (of_device_is_compatible(node, "fsl,mpic"))
- flags |= MPIC_FSL;
+ flags |= MPIC_FSL | MPIC_LARGE_VECTORS;
mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
if (mpic == NULL)
@@ -1376,7 +1376,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->isu_mask = (1 << mpic->isu_shift) - 1;
mpic->irqhost = irq_domain_add_linear(mpic->node,
- last_irq + 1,
+ intvec_top,
&mpic_host_ops, mpic);
/*
diff --git a/arch/powerpc/sysdev/mv64x60_pci.c b/arch/powerpc/sysdev/mv64x60_pci.c
index b0037cefaada..364b14d4754b 100644
--- a/arch/powerpc/sysdev/mv64x60_pci.c
+++ b/arch/powerpc/sysdev/mv64x60_pci.c
@@ -104,7 +104,7 @@ subsys_initcall(mv64x60_sysfs_init);
#endif /* CONFIG_SYSFS */
-static void __init mv64x60_pci_fixup_early(struct pci_dev *dev)
+static void __devinit mv64x60_pci_fixup_early(struct pci_dev *dev)
{
/*
* Set the host bridge hdr_type to an invalid value so that
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 818e763f8265..b04367529729 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -395,6 +395,9 @@ static void qe_upload_microcode(const void *base,
for (i = 0; i < be32_to_cpu(ucode->count); i++)
out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+
+ /* Set I-RAM Ready Register */
+ out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
}
/*
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 0f3ab06d2222..eab3492a45c5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -971,7 +971,7 @@ static int cpu_cmd(void)
/* print cpus waiting or in xmon */
printf("cpus stopped:");
count = 0;
- for (cpu = 0; cpu < NR_CPUS; ++cpu) {
+ for_each_possible_cpu(cpu) {
if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
if (count == 0)
printf(" %x", cpu);
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index f0b23fc759ba..4a67f2b5f6aa 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -1,6 +1,4 @@
/*
- * arch/s390/appldata/appldata.h
- *
* Definitions and interface for Linux - z/VM Monitor Stream.
*
* Copyright IBM Corp. 2003, 2008
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 24bff4f1cc52..bae0f402bf2a 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/appldata/appldata_base.c
- *
* Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
* Exports appldata_register_ops() and appldata_unregister_ops() for the
* data gathering modules.
@@ -29,7 +27,7 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/appldata.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -84,8 +82,7 @@ static struct ctl_table appldata_dir_table[] = {
/*
* Timer
*/
-static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
-static atomic_t appldata_expire_count = ATOMIC_INIT(0);
+static struct vtimer_list appldata_timer;
static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL;
@@ -115,10 +112,7 @@ static LIST_HEAD(appldata_ops_list);
*/
static void appldata_timer_function(unsigned long data)
{
- if (atomic_dec_and_test(&appldata_expire_count)) {
- atomic_set(&appldata_expire_count, num_online_cpus());
- queue_work(appldata_wq, (struct work_struct *) data);
- }
+ queue_work(appldata_wq, (struct work_struct *) data);
}
/*
@@ -131,7 +125,6 @@ static void appldata_work_fn(struct work_struct *work)
struct list_head *lh;
struct appldata_ops *ops;
- get_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
@@ -140,7 +133,6 @@ static void appldata_work_fn(struct work_struct *work)
}
}
mutex_unlock(&appldata_ops_mutex);
- put_online_cpus();
}
/*
@@ -168,20 +160,6 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/
-/*
- * appldata_mod_vtimer_wrap()
- *
- * wrapper function for mod_virt_timer(), because smp_call_function_single()
- * accepts only one parameter.
- */
-static void __appldata_mod_vtimer_wrap(void *p) {
- struct {
- struct vtimer_list *timer;
- u64 expires;
- } *args = p;
- mod_virt_timer_periodic(args->timer, args->expires);
-}
-
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
@@ -192,49 +170,28 @@ static void __appldata_mod_vtimer_wrap(void *p) {
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
-static void
-__appldata_vtimer_setup(int cmd)
+static void __appldata_vtimer_setup(int cmd)
{
- u64 per_cpu_interval;
- int i;
+ u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
- per_cpu_interval = (u64) (appldata_interval*1000 /
- num_online_cpus()) * TOD_MICRO;
- for_each_online_cpu(i) {
- per_cpu(appldata_timer, i).expires = per_cpu_interval;
- smp_call_function_single(i, add_virt_timer_periodic,
- &per_cpu(appldata_timer, i),
- 1);
- }
+ appldata_timer.expires = timer_interval;
+ add_virt_timer_periodic(&appldata_timer);
appldata_timer_active = 1;
break;
case APPLDATA_DEL_TIMER:
- for_each_online_cpu(i)
- del_virt_timer(&per_cpu(appldata_timer, i));
+ del_virt_timer(&appldata_timer);
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
- atomic_set(&appldata_expire_count, num_online_cpus());
break;
case APPLDATA_MOD_TIMER:
- per_cpu_interval = (u64) (appldata_interval*1000 /
- num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active)
break;
- for_each_online_cpu(i) {
- struct {
- struct vtimer_list *timer;
- u64 expires;
- } args;
- args.timer = &per_cpu(appldata_timer, i);
- args.expires = per_cpu_interval;
- smp_call_function_single(i, __appldata_mod_vtimer_wrap,
- &args, 1);
- }
+ mod_virt_timer_periodic(&appldata_timer, timer_interval);
}
}
@@ -265,14 +222,12 @@ appldata_timer_handler(ctl_table *ctl, int write,
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
out:
*lenp = len;
*ppos += len;
@@ -305,20 +260,17 @@ appldata_interval_handler(ctl_table *ctl, int write,
goto out;
}
len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
+ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
- }
interval = 0;
sscanf(buf, "%i", &interval);
if (interval <= 0)
return -EINVAL;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
out:
*lenp = len;
*ppos += len;
@@ -485,14 +437,12 @@ static int appldata_freeze(struct device *dev)
int rc;
struct list_head *lh;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (appldata_timer_active) {
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
appldata_timer_suspended = 1;
}
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
@@ -516,14 +466,12 @@ static int appldata_restore(struct device *dev)
int rc;
struct list_head *lh;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (appldata_timer_suspended) {
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
appldata_timer_suspended = 0;
}
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
@@ -567,53 +515,6 @@ static struct platform_driver appldata_pdrv = {
/******************************* init / exit *********************************/
-static void __cpuinit appldata_online_cpu(int cpu)
-{
- init_virt_timer(&per_cpu(appldata_timer, cpu));
- per_cpu(appldata_timer, cpu).function = appldata_timer_function;
- per_cpu(appldata_timer, cpu).data = (unsigned long)
- &appldata_work;
- atomic_inc(&appldata_expire_count);
- spin_lock(&appldata_timer_lock);
- __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
- spin_unlock(&appldata_timer_lock);
-}
-
-static void __cpuinit appldata_offline_cpu(int cpu)
-{
- del_virt_timer(&per_cpu(appldata_timer, cpu));
- if (atomic_dec_and_test(&appldata_expire_count)) {
- atomic_set(&appldata_expire_count, num_online_cpus());
- queue_work(appldata_wq, &appldata_work);
- }
- spin_lock(&appldata_timer_lock);
- __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
- spin_unlock(&appldata_timer_lock);
-}
-
-static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
- unsigned long action,
- void *hcpu)
-{
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- appldata_online_cpu((long) hcpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- appldata_offline_cpu((long) hcpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata appldata_nb = {
- .notifier_call = appldata_cpu_notify,
-};
-
/*
* appldata_init()
*
@@ -621,7 +522,10 @@ static struct notifier_block __cpuinitdata appldata_nb = {
*/
static int __init appldata_init(void)
{
- int i, rc;
+ int rc;
+
+ appldata_timer.function = appldata_timer_function;
+ appldata_timer.data = (unsigned long) &appldata_work;
rc = platform_driver_register(&appldata_pdrv);
if (rc)
@@ -639,14 +543,6 @@ static int __init appldata_init(void)
goto out_device;
}
- get_online_cpus();
- for_each_online_cpu(i)
- appldata_online_cpu(i);
- put_online_cpus();
-
- /* Register cpu hotplug notifier */
- register_hotcpu_notifier(&appldata_nb);
-
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
return 0;
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index f7d3dc555bdb..02d9a1cf5057 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -1,10 +1,8 @@
/*
- * arch/s390/appldata/appldata_mem.c
- *
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
*
- * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
index 5da7c562a90b..1370e358d49a 100644
--- a/arch/s390/appldata/appldata_net_sum.c
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -1,11 +1,9 @@
/*
- * arch/s390/appldata/appldata_net_sum.c
- *
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects accumulated network statistics (Packets received/transmitted,
* dropped, errors, ...).
*
- * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 4de031d6b76c..87521ba682e5 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -1,10 +1,8 @@
/*
- * arch/s390/appldata/appldata_os.c
- *
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes).
*
- * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright IBM Corp. 2003, 2006
*
* Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index a9ce135893f8..e402a9dd4eda 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -4,7 +4,7 @@
* s390 implementation of the AES Cipher Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2007
+ * Copyright IBM Corp. 2005, 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
* Sebastian Siewior (sebastian@breakpoint.cc> SW-Fallback
*
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index 9178db6db0a5..6c5cc6da7111 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,7 +3,7 @@
*
* Support for s390 cryptographic instructions.
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index a52bfd124d86..1eaa371ca3ee 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -3,7 +3,7 @@
*
* s390 implementation of the DES Cipher Algorithm.
*
- * Copyright IBM Corp. 2003,2011
+ * Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 0808fbf0f7d3..94a35a4c1b48 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2006,2007
+ * Copyright IBM Corp. 2006, 2007
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
* Driver for the s390 pseudo random number generator
*/
diff --git a/arch/s390/crypto/sha1_s390.c b/arch/s390/crypto/sha1_s390.c
index e9868c6e0a08..a1b3a9dc9d8a 100644
--- a/arch/s390/crypto/sha1_s390.c
+++ b/arch/s390/crypto/sha1_s390.c
@@ -8,7 +8,7 @@
* implementation written by Steve Reid.
*
* s390 Version:
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
*
diff --git a/arch/s390/crypto/sha256_s390.c b/arch/s390/crypto/sha256_s390.c
index 0317a3547cb9..9b853809a492 100644
--- a/arch/s390/crypto/sha256_s390.c
+++ b/arch/s390/crypto/sha256_s390.c
@@ -4,7 +4,7 @@
* s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
*
* s390 Version:
- * Copyright IBM Corp. 2005,2011
+ * Copyright IBM Corp. 2005, 2011
* Author(s): Jan Glauber (jang@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index d9df5a060a83..f41e0ef7fdf9 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -1,8 +1,7 @@
/*
- * arch/s390/hypfs/hypfs.h
* Hypervisor filesystem for Linux on s390.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index b478013b7fec..13e76dabbe8b 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -1,7 +1,7 @@
/*
* Hypervisor filesystem for Linux on s390 - debugfs interface
*
- * Copyright (C) IBM Corp. 2010
+ * Copyright IBM Corp. 2010
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 74c8f5e76ce4..7fd3690b6760 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -1,5 +1,4 @@
/*
- * arch/s390/hypfs/hypfs_diag.c
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation.
*
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index e54796002f61..4f6afaa8bd8f 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -1,7 +1,7 @@
/*
* Hypervisor filesystem for Linux on s390. z/VM implementation.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 73dae8b9b77a..6767b437a103 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -1,5 +1,4 @@
/*
- * arch/s390/hypfs/inode.c
* Hypervisor filesystem for Linux on s390.
*
* Copyright IBM Corp. 2006, 2008
@@ -103,6 +102,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
if (ret) {
struct hypfs_sb_info *hypfs_info = sb->s_fs_info;
+ ret->i_ino = get_next_ino();
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 1ac80d6b0588..9819891ed7a2 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -1,7 +1,5 @@
/*
- * include/asm-s390/airq.h
- *
- * Copyright IBM Corp. 2002,2007
+ * Copyright IBM Corp. 2002, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
diff --git a/arch/s390/include/asm/appldata.h b/arch/s390/include/asm/appldata.h
index 79283dac8281..f328294faeae 100644
--- a/arch/s390/include/asm/appldata.h
+++ b/arch/s390/include/asm/appldata.h
@@ -1,7 +1,5 @@
/*
- * include/asm-s390/appldata.h
- *
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
*
* Author(s): Melissa Howland <melissah@us.ibm.com>
*/
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 748347baecb8..c797832daa5f 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -1,8 +1,5 @@
-#ifndef __ARCH_S390_ATOMIC__
-#define __ARCH_S390_ATOMIC__
-
/*
- * Copyright 1999,2009 IBM Corp.
+ * Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
* Arnd Bergmann <arndb@de.ibm.com>,
@@ -13,6 +10,9 @@
*
*/
+#ifndef __ARCH_S390_ATOMIC__
+#define __ARCH_S390_ATOMIC__
+
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index a6ff5a83e227..6f573890fb28 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -1,11 +1,6 @@
-#ifndef _S390_BITOPS_H
-#define _S390_BITOPS_H
-
/*
- * include/asm-s390/bitops.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/bitops.h"
@@ -13,6 +8,9 @@
*
*/
+#ifndef _S390_BITOPS_H
+#define _S390_BITOPS_H
+
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
diff --git a/arch/s390/include/asm/bugs.h b/arch/s390/include/asm/bugs.h
index 011f1e6a2a6c..0f5bd894f4dc 100644
--- a/arch/s390/include/asm/bugs.h
+++ b/arch/s390/include/asm/bugs.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/bugs.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/bugs.h"
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 2a30d5ac0667..4d7ccac5fd1d 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/cache.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
*
* Derived from "include/asm-i386/cache.h"
* Copyright (C) 1992, Linus Torvalds
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 9381c92cc779..1cb4bb3f32d9 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2002, 2009
+ * Copyright IBM Corp. 2002, 2009
*
* Author(s): Arnd Bergmann <arndb@de.ibm.com>
*
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index f2ef34f6d6e5..01a905eb11e0 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -6,14 +6,12 @@ struct ccw_driver;
/**
* struct ccwgroup_device - ccw group device
- * @creator_id: unique number of the driver
* @state: online/offline state
* @count: number of attached slave devices
* @dev: embedded device structure
* @cdev: variable number of slave devices, allocated as needed
*/
struct ccwgroup_device {
- unsigned long creator_id;
enum {
CCWGROUP_OFFLINE,
CCWGROUP_ONLINE,
diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h
index 6c00f6800a34..4f57a4f3909a 100644
--- a/arch/s390/include/asm/checksum.h
+++ b/arch/s390/include/asm/checksum.h
@@ -1,18 +1,16 @@
-#ifndef _S390_CHECKSUM_H
-#define _S390_CHECKSUM_H
-
/*
- * include/asm-s390/checksum.h
* S390 fast network checksum routines
- * see also arch/S390/lib/checksum.c
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Ulrich Hild (first version)
* Martin Schwidefsky (heavily optimized CKSM version)
* D.J. Barrow (third attempt)
*/
+#ifndef _S390_CHECKSUM_H
+#define _S390_CHECKSUM_H
+
#include <asm/uaccess.h>
/*
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index 8e88e2221771..e5bde9f9291f 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/chpid.h
- *
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 4943654ed7fd..bf115b49f444 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -1,7 +1,7 @@
/*
* ioctl interface for /dev/chsc
*
- * Copyright 2008 IBM Corp.
+ * Copyright IBM Corp. 2008
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 4c8d4d5b8bd2..77043aa44d67 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -1,7 +1,4 @@
/*
- * include/asm-s390/cio.h
- * include/asm-s390x/cio.h
- *
* Common interface for I/O on S/390
*/
#ifndef _ASM_S390_CIO_H_
diff --git a/arch/s390/include/asm/cpcmd.h b/arch/s390/include/asm/cpcmd.h
index 48a9eab16429..3dfadb5d648f 100644
--- a/arch/s390/include/asm/cpcmd.h
+++ b/arch/s390/include/asm/cpcmd.h
@@ -1,8 +1,6 @@
/*
- * arch/s390/kernel/cpcmd.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Christian Borntraeger (cborntra@de.ibm.com),
*/
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index e0b69540216f..f5a8e2fcde0c 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Christian Ehrhardt <ehrhardt@de.ibm.com>,
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 718374de9c7f..8709bdef233c 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -1,7 +1,5 @@
/*
- * include/asm-s390/cputime.h
- *
- * (C) Copyright IBM Corp. 2004
+ * Copyright IBM Corp. 2004
*
* Author: Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
@@ -167,12 +165,14 @@ static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
}
struct s390_idle_data {
+ int nohz_delay;
unsigned int sequence;
unsigned long long idle_count;
- unsigned long long idle_enter;
- unsigned long long idle_exit;
unsigned long long idle_time;
- int nohz_delay;
+ unsigned long long clock_idle_enter;
+ unsigned long long clock_idle_exit;
+ unsigned long long timer_idle_enter;
+ unsigned long long timer_idle_exit;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
diff --git a/arch/s390/include/asm/crw.h b/arch/s390/include/asm/crw.h
index 749a97e61bea..7c31d3e25cd1 100644
--- a/arch/s390/include/asm/crw.h
+++ b/arch/s390/include/asm/crw.h
@@ -1,6 +1,6 @@
/*
* Data definitions for channel report processing
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 7a68084ec2f0..b80941f30df5 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/current.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/current.h"
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 0be28efe5b66..38eca3ba40e2 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -1,8 +1,7 @@
/*
- * File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
* EMC Symmetrix ioctl Copyright EMC Corporation, 2008
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 8a8245ed14d2..f39677e6ccde 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -1,9 +1,7 @@
/*
- * include/asm-s390/debug.h
* S/390 debug facility
*
- * Copyright (C) 1999, 2000 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
*/
#ifndef DEBUG_H
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h
index 0e3b35f96be1..3f6e4095f471 100644
--- a/arch/s390/include/asm/delay.h
+++ b/arch/s390/include/asm/delay.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/delay.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/delay.h"
diff --git a/arch/s390/include/asm/dma.h b/arch/s390/include/asm/dma.h
index 7425c6af6cd4..6fb6de4f15b0 100644
--- a/arch/s390/include/asm/dma.h
+++ b/arch/s390/include/asm/dma.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/dma.h
- *
* S390 version
*/
diff --git a/arch/s390/include/asm/ebcdic.h b/arch/s390/include/asm/ebcdic.h
index 7f6f641d32f4..c5befc5a3bf5 100644
--- a/arch/s390/include/asm/ebcdic.h
+++ b/arch/s390/include/asm/ebcdic.h
@@ -1,9 +1,8 @@
/*
- * include/asm-s390/ebcdic.h
* EBCDIC -> ASCII, ASCII -> EBCDIC conversion routines.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 06151e6a3098..32e8449640fa 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/elf.h
- *
* S390 version
*
* Derived from "include/asm-i386/elf.h"
diff --git a/arch/s390/include/asm/errno.h b/arch/s390/include/asm/errno.h
index e41d5b37c4d6..395e97d8005e 100644
--- a/arch/s390/include/asm/errno.h
+++ b/arch/s390/include/asm/errno.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/errno.h
- *
* S390 version
*
*/
diff --git a/arch/s390/include/asm/etr.h b/arch/s390/include/asm/etr.h
index 538e1b36a726..a24b03b9fb64 100644
--- a/arch/s390/include/asm/etr.h
+++ b/arch/s390/include/asm/etr.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/etr.h
- *
* Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/include/asm/extmem.h b/arch/s390/include/asm/extmem.h
index 33837d756184..6276002d76ba 100644
--- a/arch/s390/include/asm/extmem.h
+++ b/arch/s390/include/asm/extmem.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390x/extmem.h
- *
* definitions for external memory segment support
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
*/
#ifndef _ASM_S390X_DCSS_H
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 510ba9ef4248..0c82ba86e997 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/hardirq.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index aef0dde340d1..ea5a6e45fd93 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -1,10 +1,9 @@
/*
- * File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
-
+ * Copyright IBM Corp. 2000
+ *
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index f81a0975cbea..559e921a6bba 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/io.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/io.h"
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 38fdf451febb..37b9091ab8c0 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2006,2010
+ * Copyright IBM Corp. 2006, 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index f4f38826eebb..694bcd6bd927 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -1,7 +1,5 @@
/*
- * include/asm-s390/kexec.h
- *
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005
*
* Author(s): Rolf Adelsberger <adelsberger@de.ibm.com>
*
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index a231a9439c4b..dcf6948a875c 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright (C) IBM Corporation, 2002, 2006
+ * Copyright IBM Corp. 2002, 2006
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
* Probes initial implementation ( includes suggestions from
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index bdcbe0f8dd7b..d25da598ec62 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -1,7 +1,7 @@
#ifndef __LINUX_KVM_S390_H
#define __LINUX_KVM_S390_H
/*
- * asm-s390/kvm.h - KVM s390 specific structures and definitions
+ * KVM s390 specific structures and definitions
*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index dd17537b9a9d..b7841546991f 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1,7 +1,7 @@
/*
- * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
+ * definition for kernel virtual machines on s390
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index a98832961035..da44867de60f 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -1,5 +1,5 @@
/*
- * asm-s390/kvm_para.h - definition for paravirtual devices on s390
+ * definition for paravirtual devices on s390
*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/include/asm/kvm_virtio.h b/arch/s390/include/asm/kvm_virtio.h
index 72f614181eff..44a438ca9e72 100644
--- a/arch/s390/include/asm/kvm_virtio.h
+++ b/arch/s390/include/asm/kvm_virtio.h
@@ -1,5 +1,5 @@
/*
- * kvm_virtio.h - definition for virtio for kvm on s390
+ * definition for virtio for kvm on s390
*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 47853debb3b9..aab5555bbbda 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Hartmut Penner <hp@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Denis Joseph Barrow,
@@ -302,12 +302,7 @@ struct _lowcore {
*/
__u64 ipib; /* 0x0e00 */
__u32 ipib_checksum; /* 0x0e08 */
- /*
- * Because the vmcore_info pointer is not 8 byte aligned it never
- * should not be accessed directly. For accessing the pointer, first
- * copy it to a local pointer variable.
- */
- __u8 vmcore_info[8]; /* 0x0e0c */
+ __u64 vmcore_info; /* 0x0e0c */
__u8 pad_0x0e14[0x0e18-0x0e14]; /* 0x0e14 */
__u64 os_info; /* 0x0e18 */
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
diff --git a/arch/s390/include/asm/mathemu.h b/arch/s390/include/asm/mathemu.h
index e8dd1ba8edb0..614dfaf47f71 100644
--- a/arch/s390/include/asm/mathemu.h
+++ b/arch/s390/include/asm/mathemu.h
@@ -1,9 +1,8 @@
/*
- * arch/s390/kernel/mathemu.h
* IEEE floating point emulation.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/include/asm/mman.h b/arch/s390/include/asm/mman.h
index d49760e63506..abc1932ac4e1 100644
--- a/arch/s390/include/asm/mman.h
+++ b/arch/s390/include/asm/mman.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/mman.h
- *
* S390 version
*
* Derived from "include/asm-i386/mman.h"
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 69bdf72e95ec..5c63615f1349 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/mmu_context.h
- *
* S390 version
*
* Derived from "include/asm-i386/mmu_context.h"
diff --git a/arch/s390/include/asm/monwriter.h b/arch/s390/include/asm/monwriter.h
index f0cbf96c52e6..f845c8e2f861 100644
--- a/arch/s390/include/asm/monwriter.h
+++ b/arch/s390/include/asm/monwriter.h
@@ -1,7 +1,5 @@
/*
- * include/asm-s390/monwriter.h
- *
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Character device driver for writing z/VM APPLDATA monitor records
* Version 1.0
* Author(s): Melissa Howland <melissah@us.ibm.com>
diff --git a/arch/s390/include/asm/nmi.h b/arch/s390/include/asm/nmi.h
index f4b60441adca..35f8ec185616 100644
--- a/arch/s390/include/asm/nmi.h
+++ b/arch/s390/include/asm/nmi.h
@@ -1,7 +1,7 @@
/*
* Machine check handler definitions
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index f7ec548c2b9d..27ab3c7c1e8b 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/page.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
*/
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 43078c194394..590c3219c634 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/pgalloc.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index b3227415abda..6bd7d7483017 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/pgtable.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (weigand@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7be104c0f192..7bcc14e395f0 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/posix_types.h
- *
* S390 version
*
*/
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 20d0585cf905..c40fa91e38a8 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/processor.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
@@ -348,4 +346,14 @@ extern void (*s390_base_ext_handler_fn)(void);
".previous\n"
#endif
+extern int memcpy_real(void *, void *, size_t);
+extern void memcpy_absolute(void *, void *, size_t);
+
+#define mem_assign_absolute(dest, val) { \
+ __typeof__(dest) __tmp = (val); \
+ \
+ BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
+ memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
+}
+
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index aeb77f017985..d5f08ea566ed 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/ptrace.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*/
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index f039d86adf67..57d0d7e794b1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -1,7 +1,5 @@
/*
- * linux/include/asm-s390/qdio.h
- *
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 2c7c898c03e4..3a896cf52589 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -1,9 +1,7 @@
/*
- * include/asm-s390/qeth.h
- *
* ioctl definitions for qeth driver
*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright IBM Corp. 2004
*
* Author(s): Thomas Spatzier <tspat@de.ibm.com>
*
diff --git a/arch/s390/include/asm/reset.h b/arch/s390/include/asm/reset.h
index 3d6ad4ad2a3f..804578587a7a 100644
--- a/arch/s390/include/asm/reset.h
+++ b/arch/s390/include/asm/reset.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/reset.h
- *
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/resource.h b/arch/s390/include/asm/resource.h
index 366c01de04f2..ec23d1c73c92 100644
--- a/arch/s390/include/asm/resource.h
+++ b/arch/s390/include/asm/resource.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/resource.h
- *
* S390 version
*
* Derived from "include/asm-i386/resources.h"
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index 1ceee10264c3..487f9b64efb9 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -2,10 +2,8 @@
#define _S390_RWSEM_H
/*
- * include/asm-s390/rwsem.h
- *
* S390 version
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2002
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index bf238c55740b..e62a555557ee 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/sclp.h
- *
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
@@ -55,5 +53,7 @@ int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
+bool sclp_has_linemode(void);
+bool sclp_has_vt220(void);
#endif /* _ASM_S390_SCLP_H */
diff --git a/arch/s390/include/asm/scsw.h b/arch/s390/include/asm/scsw.h
index de389cb54d28..4071d00978cb 100644
--- a/arch/s390/include/asm/scsw.h
+++ b/arch/s390/include/asm/scsw.h
@@ -1,7 +1,7 @@
/*
* Helper functions for scsw access.
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 40eb2ff88e9e..57e80534375a 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/setup.h
- *
* S390 version
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
*/
#ifndef _ASM_S390_SETUP_H
diff --git a/arch/s390/include/asm/shmparam.h b/arch/s390/include/asm/shmparam.h
index c2e0c0508e73..e985182738f8 100644
--- a/arch/s390/include/asm/shmparam.h
+++ b/arch/s390/include/asm/shmparam.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/shmparam.h
- *
* S390 version
*
* Derived from "include/asm-i386/shmparam.h"
diff --git a/arch/s390/include/asm/sigcontext.h b/arch/s390/include/asm/sigcontext.h
index aeb6e0b13329..584787f6ce44 100644
--- a/arch/s390/include/asm/sigcontext.h
+++ b/arch/s390/include/asm/sigcontext.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/sigcontext.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
*/
#ifndef _ASM_S390_SIGCONTEXT_H
diff --git a/arch/s390/include/asm/siginfo.h b/arch/s390/include/asm/siginfo.h
index e0ff1ab054be..91fd3e4b70ce 100644
--- a/arch/s390/include/asm/siginfo.h
+++ b/arch/s390/include/asm/siginfo.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/siginfo.h
- *
* S390 version
*
* Derived from "include/asm-i386/siginfo.h"
diff --git a/arch/s390/include/asm/signal.h b/arch/s390/include/asm/signal.h
index cdf5cb2fe03f..6d4d9d1faee9 100644
--- a/arch/s390/include/asm/signal.h
+++ b/arch/s390/include/asm/signal.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/signal.h
- *
* S390 version
*
* Derived from "include/asm-i386/signal.h"
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
new file mode 100644
index 000000000000..5a87d16d3e7c
--- /dev/null
+++ b/arch/s390/include/asm/sigp.h
@@ -0,0 +1,32 @@
+#ifndef __S390_ASM_SIGP_H
+#define __S390_ASM_SIGP_H
+
+/* SIGP order codes */
+#define SIGP_SENSE 1
+#define SIGP_EXTERNAL_CALL 2
+#define SIGP_EMERGENCY_SIGNAL 3
+#define SIGP_STOP 5
+#define SIGP_RESTART 6
+#define SIGP_STOP_AND_STORE_STATUS 9
+#define SIGP_INITIAL_CPU_RESET 11
+#define SIGP_SET_PREFIX 13
+#define SIGP_STORE_STATUS_AT_ADDRESS 14
+#define SIGP_SET_ARCHITECTURE 18
+#define SIGP_SENSE_RUNNING 21
+
+/* SIGP condition codes */
+#define SIGP_CC_ORDER_CODE_ACCEPTED 0
+#define SIGP_CC_STATUS_STORED 1
+#define SIGP_CC_BUSY 2
+#define SIGP_CC_NOT_OPERATIONAL 3
+
+/* SIGP cpu status bits */
+
+#define SIGP_STATUS_CHECK_STOP 0x00000010UL
+#define SIGP_STATUS_STOPPED 0x00000040UL
+#define SIGP_STATUS_EXT_CALL_PENDING 0x00000080UL
+#define SIGP_STATUS_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL
+#define SIGP_STATUS_NOT_RUNNING 0x00000400UL
+
+#endif /* __S390_ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 0b6f586c1383..a0a8340daafa 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
diff --git a/arch/s390/include/asm/socket.h b/arch/s390/include/asm/socket.h
index c91b720965c0..69718cd6d635 100644
--- a/arch/s390/include/asm/socket.h
+++ b/arch/s390/include/asm/socket.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/socket.h
- *
* S390 version
*
* Derived from "include/asm-i386/socket.h"
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index fd94dfec8d08..701fe8c59e1f 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/spinlock.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Derived from "include/asm-i386/spinlock.h"
diff --git a/arch/s390/include/asm/stat.h b/arch/s390/include/asm/stat.h
index d92959eebb65..b4ca97d91466 100644
--- a/arch/s390/include/asm/stat.h
+++ b/arch/s390/include/asm/stat.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/stat.h
- *
* S390 version
*
* Derived from "include/asm-i386/stat.h"
diff --git a/arch/s390/include/asm/statfs.h b/arch/s390/include/asm/statfs.h
index 3be7fbd406c8..5acca0a34c20 100644
--- a/arch/s390/include/asm/statfs.h
+++ b/arch/s390/include/asm/statfs.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/statfs.h
- *
* S390 version
*
* Derived from "include/asm-i386/statfs.h"
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index 8cc160c9e1cb..1bd1352fa3b5 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/string.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
index a3e4ebb32090..da3bfe5cc161 100644
--- a/arch/s390/include/asm/swab.h
+++ b/arch/s390/include/asm/swab.h
@@ -2,10 +2,8 @@
#define _S390_SWAB_H
/*
- * include/asm-s390/swab.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/include/asm/sysinfo.h b/arch/s390/include/asm/sysinfo.h
index 79d3d6e2e9c5..282ee36f6162 100644
--- a/arch/s390/include/asm/sysinfo.h
+++ b/arch/s390/include/asm/sysinfo.h
@@ -1,7 +1,7 @@
/*
* definition for store system information stsi
*
- * Copyright IBM Corp. 2001,2008
+ * Copyright IBM Corp. 2001, 2008
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/include/asm/tape390.h b/arch/s390/include/asm/tape390.h
index 884fba48f1ff..b2bc4bab7929 100644
--- a/arch/s390/include/asm/tape390.h
+++ b/arch/s390/include/asm/tape390.h
@@ -1,10 +1,9 @@
/*************************************************************************
*
- * tape390.h
* enables user programs to display messages and control encryption
* on s390 tape devices
*
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*
*************************************************************************/
diff --git a/arch/s390/include/asm/termios.h b/arch/s390/include/asm/termios.h
index bc3a35cefc96..cb9fe2786b81 100644
--- a/arch/s390/include/asm/termios.h
+++ b/arch/s390/include/asm/termios.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/termios.h
- *
* S390 version
*
* Derived from "include/asm-i386/termios.h"
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 4e40b25cd060..bb08e2afc5de 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/thread_info.h
- *
* S390 version
- * Copyright (C) IBM Corp. 2002,2006
+ * Copyright IBM Corp. 2002, 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
deleted file mode 100644
index 15d647901e5c..000000000000
--- a/arch/s390/include/asm/timer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * include/asm-s390/timer.h
- *
- * (C) Copyright IBM Corp. 2003,2006
- * Virtual CPU timer
- *
- * Author: Jan Glauber (jang@de.ibm.com)
- */
-
-#ifndef _ASM_S390_TIMER_H
-#define _ASM_S390_TIMER_H
-
-#include <linux/timer.h>
-
-#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
-
-struct vtimer_list {
- struct list_head entry;
-
- int cpu;
- __u64 expires;
- __u64 interval;
-
- void (*function)(unsigned long);
- unsigned long data;
-};
-
-/* the vtimer value will wrap after ca. 71 years */
-struct vtimer_queue {
- struct list_head list;
- spinlock_t lock;
- __u64 timer; /* last programmed timer */
- __u64 elapsed; /* elapsed time of timer expire values */
- __u64 idle_enter; /* cpu timer on idle enter */
- __u64 idle_exit; /* cpu timer on idle exit */
-};
-
-extern void init_virt_timer(struct vtimer_list *timer);
-extern void add_virt_timer(void *new);
-extern void add_virt_timer_periodic(void *new);
-extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
-extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires);
-extern int del_virt_timer(struct vtimer_list *timer);
-
-extern void init_cpu_vtimer(void);
-extern void vtime_init(void);
-
-extern void vtime_stop_cpu(void);
-extern void vtime_start_leave(void);
-
-#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 239ece9e53c1..fba4d66788a2 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/timex.h
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
*
* Derived from "include/asm-i386/timex.h"
* Copyright (C) 1992, Linus Torvalds
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 6c8c35f8df14..6ba7c2c7217a 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/types.h
- *
* S390 version
*
* Derived from "include/asm-i386/types.h"
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 1f3a79bcd262..a8ab18b18b54 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -1,8 +1,6 @@
/*
- * include/asm-s390/uaccess.h
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
@@ -381,8 +379,6 @@ clear_user(void __user *to, unsigned long n)
return n;
}
-extern int memcpy_real(void *, void *, size_t);
-extern void memcpy_absolute(void *, void *, size_t);
extern int copy_to_user_real(void __user *dest, void *src, size_t count);
extern int copy_from_user_real(void *dest, void __user *src, size_t count);
diff --git a/arch/s390/include/asm/ucontext.h b/arch/s390/include/asm/ucontext.h
index cfb874e66c9a..200e06325c6a 100644
--- a/arch/s390/include/asm/ucontext.h
+++ b/arch/s390/include/asm/ucontext.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/ucontext.h
- *
* S390 version
*
* Derived from "include/asm-i386/ucontext.h"
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 8a8008fe7b8f..2e37157ba6a9 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/unistd.h
- *
* S390 version
*
* Derived from "include/asm-i386/unistd.h"
diff --git a/arch/s390/include/asm/user.h b/arch/s390/include/asm/user.h
index 1b050e35fdc6..6ed1d1886333 100644
--- a/arch/s390/include/asm/user.h
+++ b/arch/s390/include/asm/user.h
@@ -1,6 +1,4 @@
/*
- * include/asm-s390/user.h
- *
* S390 version
*
* Derived from "include/asm-i386/usr.h"
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
new file mode 100644
index 000000000000..bfe25d513ad2
--- /dev/null
+++ b/arch/s390/include/asm/vtimer.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright IBM Corp. 2003, 2012
+ * Virtual CPU timer
+ *
+ * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_TIMER_H
+#define _ASM_S390_TIMER_H
+
+#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
+
+struct vtimer_list {
+ struct list_head entry;
+ u64 expires;
+ u64 interval;
+ void (*function)(unsigned long);
+ unsigned long data;
+};
+
+extern void init_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer_periodic(struct vtimer_list *timer);
+extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
+extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
+extern int del_virt_timer(struct vtimer_list *timer);
+
+extern void init_cpu_vtimer(void);
+extern void vtime_init(void);
+
+extern void vtime_stop_cpu(void);
+
+#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/vtoc.h b/arch/s390/include/asm/vtoc.h
index 8406a2b3157a..221419de275e 100644
--- a/arch/s390/include/asm/vtoc.h
+++ b/arch/s390/include/asm/vtoc.h
@@ -1,9 +1,7 @@
/*
- * include/asm-s390/vtoc.h
- *
* This file contains volume label definitions for DASD devices.
*
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005
*
* Author(s): Volker Sameske <sameske@de.ibm.com>
*
diff --git a/arch/s390/include/asm/zcrypt.h b/arch/s390/include/asm/zcrypt.h
index 00d3bbd44117..e83fc116f5bf 100644
--- a/arch/s390/include/asm/zcrypt.h
+++ b/arch/s390/include/asm/zcrypt.h
@@ -3,7 +3,7 @@
*
* zcrypt 2.1.0 (user-visible header)
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 83e6edf5cf17..45ef1a7b08f9 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
#include <linux/kbuild.h>
#include <linux/sched.h>
#include <asm/cputime.h>
-#include <asm/timer.h>
#include <asm/vdso.h>
#include <asm/pgtable.h>
@@ -72,11 +71,10 @@ int main(void)
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* idle data offsets */
- DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter));
- DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit));
- /* vtimer queue offsets */
- DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter));
- DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
+ DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
+ DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
+ DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
+ DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
/* lowcore offsets */
DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
@@ -131,6 +129,8 @@ int main(void)
DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack));
DEFINE(__LC_RESTART_STACK, offsetof(struct _lowcore, restart_stack));
DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
+ DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
+ DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index c880ff72db44..797a823a2275 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -1,7 +1,7 @@
/*
* arch/s390/kernel/base.S
*
- * Copyright IBM Corp. 2006,2007
+ * Copyright IBM Corp. 2006, 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
*/
@@ -9,6 +9,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/ptrace.h>
+#include <asm/sigp.h>
#ifdef CONFIG_64BIT
@@ -100,7 +101,7 @@ ENTRY(diag308_reset)
.Lrestart_part2:
lhi %r0,0 # Load r0 with zero
lhi %r1,2 # Use mode 2 = ESAME (dump)
- sigp %r1,%r0,0x12 # Switch to ESAME mode
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to ESAME mode
sam64 # Switch to 64 bit addressing mode
larl %r4,.Lctlregs # Restore control registers
lctlg %c0,%c15,0(%r4)
diff --git a/arch/s390/kernel/bitmap.c b/arch/s390/kernel/bitmap.c
index 3ae4757b006a..102da5e23037 100644
--- a/arch/s390/kernel/bitmap.c
+++ b/arch/s390/kernel/bitmap.c
@@ -2,7 +2,7 @@
* Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
* See include/asm/{bitops.h|posix_types.h} for details
*
- * Copyright IBM Corp. 1999,2009
+ * Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*/
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 914d49444f92..765fabdada9f 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -1,7 +1,7 @@
/*
* Support for 32-bit Linux for S390 personality.
*
- * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2000
* Author(s): Gerhard Tonn (ton@de.ibm.com)
*
*
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 65426525d9f2..d1225089a4bb 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -1,8 +1,6 @@
/*
- * arch/s390x/kernel/linux32.c
- *
* S390 version
- * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Gerhard Tonn (ton@de.ibm.com)
* Thomas Spatzier (tspat@de.ibm.com)
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 3c0c19830c37..a1e8a8694bb7 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/compat_signal.c
- *
- * Copyright (C) IBM Corp. 2000,2006
+ * Copyright IBM Corp. 2000, 2006
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
* Gerhard Tonn (ton@de.ibm.com)
*
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index ff605a39cf43..e835d6d5b7fd 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1,8 +1,7 @@
/*
-* arch/s390/kernel/compat_wrapper.S
* wrapper for 31 bit compatible system calls.
*
-* Copyright (C) IBM Corp. 2000,2006
+* Copyright IBM Corp. 2000, 2006
* Author(s): Gerhard Tonn (ton@de.ibm.com),
* Thomas Spatzier (tspat@de.ibm.com)
*/
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index e3dd886e1b32..d7b0c4d27880 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/kernel/cpcmd.c
- *
* S390 version
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Christian Borntraeger (cborntra@de.ibm.com),
*/
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 8cc7c9fa64f5..3819153de8bd 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/crash.c
- *
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index 19e5e9eba546..21be961e8a43 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1,5 +1,4 @@
/*
- * arch/s390/kernel/debug.c
* S/390 debug facility
*
* Copyright IBM Corp. 1999, 2012
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 3221c6fca8bb..1f6b428e2762 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/kernel/dis.c
- *
* Disassemble s390 instructions.
*
* Copyright IBM Corp. 2007
@@ -613,6 +611,7 @@ static struct insn opcode_b2[] = {
{ "sie", 0x14, INSTR_S_RD },
{ "pc", 0x18, INSTR_S_RD },
{ "sac", 0x19, INSTR_S_RD },
+ { "servc", 0x20, INSTR_RRE_RR },
{ "cfc", 0x1a, INSTR_S_RD },
{ "ipte", 0x21, INSTR_RRE_RR },
{ "ipm", 0x22, INSTR_RRE_R0 },
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 6684fff17558..bc95a8ebd9cc 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/kernel/early.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index cc0dc609d738..b971c6be6298 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -1,10 +1,9 @@
/*
- * arch/s390/kernel/ebcdic.c
* ECBDIC -> ASCII, ASCII -> ECBDIC,
* upper to lower case (EBCDIC) conversion tables.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Martin Peschke <peschke@fh-brandenburg.de>
*/
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 1ae93b573d7d..870bad6d56fc 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1,8 +1,7 @@
/*
- * arch/s390/kernel/entry.S
* S390 low-level entry points.
*
- * Copyright (C) IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -18,6 +17,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/page.h>
+#include <asm/sigp.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 4
@@ -616,17 +616,13 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle.
*/
ENTRY(psw_idle)
- st %r4,__SF_EMPTY(%r15)
+ st %r3,__SF_EMPTY(%r15)
basr %r1,0
la %r1,psw_idle_lpsw+4-.(%r1)
st %r1,__SF_EMPTY+4(%r15)
oi __SF_EMPTY+4(%r15),0x80
- la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
- stck __IDLE_ENTER(%r2)
- ltr %r5,%r5
- stpt __VQ_IDLE_ENTER(%r3)
- jz psw_idle_lpsw
- spt 0(%r1)
+ stck __CLOCK_IDLE_ENTER(%r2)
+ stpt __TIMER_IDLE_ENTER(%r2)
psw_idle_lpsw:
lpsw __SF_EMPTY(%r15)
br %r14
@@ -723,15 +719,17 @@ ENTRY(restart_int_handler)
mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw
ahi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lm %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
+ l %r1,__LC_RESTART_FN # load fn, parm & source cpu
+ l %r2,__LC_RESTART_DATA
+ l %r3,__LC_RESTART_SOURCE
ltr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
-0: sigp %r4,%r3,1 # sigp sense to source cpu
+0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
brc 10,0b # wait for status stored
1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
lh %r3,__SF_EMPTY(%r15)
-2: sigp %r4,%r3,5 # sigp stop to current cpu
+2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
@@ -883,33 +881,28 @@ cleanup_io_restore_insn:
cleanup_idle:
# copy interrupt clock & cpu timer
- mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
chi %r11,__LC_SAVE_AREA_ASYNC
je 0f
- mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck has been executed
cl %r9,BASED(cleanup_idle_insn)
jhe 1f
- mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
- mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
- j 2f
-1: # check if the cpu timer has been reprogrammed
- ltr %r5,%r5
- jz 2f
- spt __VQ_IDLE_ENTER(%r3)
-2: # account system time going idle
+ mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
+1: # account system time going idle
lm %r9,%r10,__LC_STEAL_TIMER
- ADD64 %r9,%r10,__IDLE_ENTER(%r2)
+ ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
stm %r9,%r10,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lm %r9,%r10,__LC_SYSTEM_TIMER
ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
- SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3)
+ SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
stm %r9,%r10,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
n %r8,BASED(cleanup_idle_wait) # clear wait state bit
l %r9,24(%r11) # return from psw_idle
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index f66a229ab0b3..a5f4dc42a5db 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,7 +5,6 @@
#include <linux/signal.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
-#include <asm/timer.h>
extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
@@ -17,8 +16,7 @@ void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
void restart_call_handler(void);
-void psw_idle(struct s390_idle_data *, struct vtimer_queue *,
- unsigned long, int);
+void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 229fe1d07749..349b7eeb348a 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -1,8 +1,7 @@
/*
- * arch/s390/kernel/entry64.S
* S390 low-level entry points.
*
- * Copyright (C) IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Hartmut Penner (hp@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
@@ -18,6 +17,7 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
#include <asm/page.h>
+#include <asm/sigp.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@@ -642,15 +642,11 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle.
*/
ENTRY(psw_idle)
- stg %r4,__SF_EMPTY(%r15)
+ stg %r3,__SF_EMPTY(%r15)
larl %r1,psw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
- larl %r1,.Lvtimer_max
- STCK __IDLE_ENTER(%r2)
- ltr %r5,%r5
- stpt __VQ_IDLE_ENTER(%r3)
- jz psw_idle_lpsw
- spt 0(%r1)
+ STCK __CLOCK_IDLE_ENTER(%r2)
+ stpt __TIMER_IDLE_ENTER(%r2)
psw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
br %r14
@@ -750,15 +746,17 @@ ENTRY(restart_int_handler)
mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
- lmg %r1,%r3,__LC_RESTART_FN # load fn, parm & source cpu
+ lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
+ lg %r2,__LC_RESTART_DATA
+ lg %r3,__LC_RESTART_SOURCE
ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
-0: sigp %r4,%r3,1 # sigp sense to source cpu
+0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
brc 10,0b # wait for status stored
1: basr %r14,%r1 # call function
stap __SF_EMPTY(%r15) # store cpu address
llgh %r3,__SF_EMPTY(%r15)
-2: sigp %r4,%r3,5 # sigp stop to current cpu
+2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
brc 2,2b
3: j 3b
@@ -916,33 +914,28 @@ cleanup_io_restore_insn:
cleanup_idle:
# copy interrupt clock & cpu timer
- mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
- mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck & stpt have been executed
clg %r9,BASED(cleanup_idle_insn)
jhe 1f
- mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
- mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
- j 2f
-1: # check if the cpu timer has been reprogrammed
- ltr %r5,%r5
- jz 2f
- spt __VQ_IDLE_ENTER(%r3)
-2: # account system time going idle
+ mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
+1: # account system time going idle
lg %r9,__LC_STEAL_TIMER
- alg %r9,__IDLE_ENTER(%r2)
+ alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK
stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lg %r9,__LC_SYSTEM_TIMER
alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__VQ_IDLE_ENTER(%r3)
+ slg %r9,__TIMER_IDLE_ENTER(%r2)
stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
nihh %r8,0xfffd # clear wait state bit
lg %r9,48(%r11) # return from psw_idle
@@ -958,8 +951,6 @@ cleanup_idle_insn:
.quad __critical_start
.Lcritical_length:
.quad __critical_end - __critical_start
-.Lvtimer_max:
- .quad 0x7fffffffffffffff
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
@@ -974,7 +965,6 @@ ENTRY(sie64a)
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0
lmg %r0,%r13,0(%r3) # load guest gprs 0-13
- lg %r14,__LC_THREAD_INFO # pointer thread_info struct
sie_loop:
lg %r14,__LC_THREAD_INFO # pointer thread_info struct
tm __TI_flags+7(%r14),_TIF_EXIT_SIE
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 4939d15375aa..805b6686b641 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index d3f1ab7d90ad..a1372ae24ae1 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/head31.S
- *
- * Copyright (C) IBM Corp. 2005,2010
+ * Copyright IBM Corp. 2005, 2010
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 99348c0eaa41..c108af28bbe8 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/head64.S
- *
- * Copyright (C) IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
*
* Author(s): Hartmut Penner <hp@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index 796c976b5fdc..acaaaf4b7055 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -5,6 +5,8 @@
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
*/
+#include <asm/sigp.h>
+
#define DATAMOVER_ADDR 0x4000
#define COPY_PAGE_ADDR 0x6000
@@ -19,7 +21,7 @@
.align 2
.Lep_startup_kdump:
lhi %r1,2 # mode 2 = esame (dump)
- sigp %r1,%r0,0x12 # Switch to esame mode
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # Switch to esame mode
sam64 # Switch to 64 bit addressing
basr %r13,0
.Lbase:
@@ -88,7 +90,7 @@ startup_kdump_relocated:
sam31 # Switch to 31 bit addr mode
sr %r1,%r1 # Erase register r1
sr %r2,%r2 # Erase register r2
- sigp %r1,%r2,0x12 # Switch to 31 bit arch mode
+ sigp %r1,%r2,SIGP_SET_ARCHITECTURE # Switch to 31 bit arch mode
lpsw 0 # Start new kernel...
.align 8
.Lrestart_psw:
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 2f6cfd460cb6..e64d141555ce 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1,8 +1,7 @@
/*
- * arch/s390/kernel/ipl.c
* ipl/reipl/dump support for Linux on s390.
*
- * Copyright IBM Corp. 2005,2012
+ * Copyright IBM Corp. 2005, 2012
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
* Heiko Carstens <heiko.carstens@de.ibm.com>
* Volker Sameske <sameske@de.ibm.com>
@@ -1528,15 +1527,12 @@ static struct shutdown_action __refdata dump_action = {
static void dump_reipl_run(struct shutdown_trigger *trigger)
{
- struct {
- void *addr;
- __u32 csum;
- } __packed ipib;
+ unsigned long ipib = (unsigned long) reipl_block_actual;
+ unsigned int csum;
- ipib.csum = csum_partial(reipl_block_actual,
- reipl_block_actual->hdr.len, 0);
- ipib.addr = reipl_block_actual;
- memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
+ csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
+ mem_assign_absolute(S390_lowcore.ipib, ipib);
+ mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
dump_run(trigger);
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index b4f4a7133fa1..dd7630d8aab7 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2004,2011
+ * Copyright IBM Corp. 2004, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 64b761aef004..8aa634f5944b 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -15,7 +15,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright (C) IBM Corporation, 2002, 2006
+ * Copyright IBM Corp. 2002, 2006
*
* s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
*/
diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c
index 87f080b17af1..eca94e74d19a 100644
--- a/arch/s390/kernel/lgr.c
+++ b/arch/s390/kernel/lgr.c
@@ -45,7 +45,7 @@ struct lgr_info {
/*
* LGR globals
*/
-static void *lgr_page;
+static char lgr_page[PAGE_SIZE] __aligned(PAGE_SIZE);
static struct lgr_info lgr_info_last;
static struct lgr_info lgr_info_cur;
static struct debug_info *lgr_dbf;
@@ -74,7 +74,7 @@ static void cpascii(char *dst, char *src, int size)
*/
static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
{
- struct sysinfo_1_1_1 *si = lgr_page;
+ struct sysinfo_1_1_1 *si = (void *) lgr_page;
if (stsi(si, 1, 1, 1) == -ENOSYS)
return;
@@ -91,7 +91,7 @@ static void lgr_stsi_1_1_1(struct lgr_info *lgr_info)
*/
static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
{
- struct sysinfo_2_2_2 *si = lgr_page;
+ struct sysinfo_2_2_2 *si = (void *) lgr_page;
if (stsi(si, 2, 2, 2) == -ENOSYS)
return;
@@ -105,7 +105,7 @@ static void lgr_stsi_2_2_2(struct lgr_info *lgr_info)
*/
static void lgr_stsi_3_2_2(struct lgr_info *lgr_info)
{
- struct sysinfo_3_2_2 *si = lgr_page;
+ struct sysinfo_3_2_2 *si = (void *) lgr_page;
int i;
if (stsi(si, 3, 2, 2) == -ENOSYS)
@@ -183,14 +183,9 @@ static void lgr_timer_set(void)
*/
static int __init lgr_init(void)
{
- lgr_page = (void *) __get_free_pages(GFP_KERNEL, 0);
- if (!lgr_page)
- return -ENOMEM;
lgr_dbf = debug_register("lgr", 1, 1, sizeof(struct lgr_info));
- if (!lgr_dbf) {
- free_page((unsigned long) lgr_page);
+ if (!lgr_dbf)
return -ENOMEM;
- }
debug_register_view(lgr_dbf, &debug_hex_ascii_view);
lgr_info_get(&lgr_info_last);
debug_event(lgr_dbf, 1, &lgr_info_last, sizeof(lgr_info_last));
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index cdacf8f91b2d..493304bdf1c7 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/machine_kexec.c
- *
- * Copyright IBM Corp. 2005,2011
+ * Copyright IBM Corp. 2005, 2011
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 7e2c38ba1373..4567ce20d900 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index f70cadec68fc..11332193db30 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
*
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index dfcb3436bad0..46412b1d7e1e 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -1,9 +1,8 @@
/*
- * arch/s390/kernel/module.c - Kernel module help for s390.
+ * Kernel module help for s390.
*
* S390 version
- * Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 2002, 2003
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 8c372ca61350..a6daa5c5cdb0 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -1,7 +1,7 @@
/*
* Machine check handler
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index 95fa5ac6c4ce..46480d81df00 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -60,7 +60,7 @@ void __init os_info_init(void)
os_info.version_minor = OS_INFO_VERSION_MINOR;
os_info.magic = OS_INFO_MAGIC;
os_info.csum = os_info_csum(&os_info);
- memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
+ mem_assign_absolute(S390_lowcore.os_info, (unsigned long) ptr);
}
#ifdef CONFIG_CRASH_DUMP
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 60055cefdd04..733175373a4c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -1,7 +1,7 @@
/*
* This file handles the architecture dependent parts of process handling.
*
- * Copyright IBM Corp. 1999,2009
+ * Copyright IBM Corp. 1999, 2009
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Hartmut Penner <hp@de.ibm.com>,
* Denis Joseph Barrow,
@@ -25,8 +25,8 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/processor.h>
+#include <asm/vtimer.h>
#include <asm/irq.h>
-#include <asm/timer.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/switch_to.h>
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 6e0073e43f54..572d4c9cb33b 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/kernel/processor.c
- *
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
@@ -25,13 +23,15 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
*/
void __cpuinit cpu_init(void)
{
- struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct cpuid *id = &__get_cpu_var(cpu_id);
get_cpu_id(id);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
+ memset(idle, 0, sizeof(*idle));
}
/*
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 4993e689b2c2..f4eb37680b91 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -1,7 +1,7 @@
/*
* Ptrace user space interface.
*
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Denis Joseph Barrow
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index ad67c214be04..dd8016b0477e 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -1,13 +1,12 @@
/*
- * arch/s390/kernel/reipl.S
- *
* S390 version
- * Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2000
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/sigp.h>
#
# store_status: Empty implementation until kdump is supported on 31 bit
@@ -60,7 +59,7 @@ ENTRY(do_reipl_asm)
bas %r14,.Ldisab-.Lpg0(%r13)
.L003: st %r1,__LC_SUBCHANNEL_ID
lpsw 0
- sigp 0,0,0(6)
+ sigp 0,0,SIGP_RESTART
.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
lpsw .Ldispsw-.Lpg0(%r13)
.align 8
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 36b32658fb24..dc3b1273c4dc 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -1,11 +1,12 @@
/*
- * Copyright IBM Corp 2000,2011
+ * Copyright IBM Corp 2000, 2011
* Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
* Denis Joseph Barrow,
*/
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/sigp.h>
#
# store_status
@@ -106,7 +107,7 @@ ENTRY(do_reipl_asm)
.L003: st %r1,__LC_SUBCHANNEL_ID
lhi %r1,0 # mode 0 = esa
slr %r0,%r0 # set cpuid to zero
- sigp %r1,%r0,0x12 # switch to esa mode
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esa mode
lpsw 0
.Ldisab: sll %r14,1
srl %r14,1 # need to kill hi bit to avoid specification exceptions.
diff --git a/arch/s390/kernel/relocate_kernel.S b/arch/s390/kernel/relocate_kernel.S
index c91d70aede91..f4e6f20e117a 100644
--- a/arch/s390/kernel/relocate_kernel.S
+++ b/arch/s390/kernel/relocate_kernel.S
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/relocate_kernel.S
- *
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -9,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/sigp.h>
/*
* moves the new kernel to its destination...
@@ -93,7 +92,7 @@ ENTRY(relocate_kernel)
.no_diag308:
sr %r1,%r1 # clear %r1
sr %r2,%r2 # clear %r2
- sigp %r1,%r2,0x12 # set cpuid to zero
+ sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
lpsw 0 # hopefully start new kernel...
.align 8
diff --git a/arch/s390/kernel/relocate_kernel64.S b/arch/s390/kernel/relocate_kernel64.S
index 7c3ce589a7f0..cfac28330b03 100644
--- a/arch/s390/kernel/relocate_kernel64.S
+++ b/arch/s390/kernel/relocate_kernel64.S
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/relocate_kernel64.S
- *
- * (C) Copyright IBM Corp. 2005
+ * Copyright IBM Corp. 2005
*
* Author(s): Rolf Adelsberger,
* Heiko Carstens <heiko.carstens@de.ibm.com>
@@ -9,6 +7,7 @@
*/
#include <linux/linkage.h>
+#include <asm/sigp.h>
/*
* moves the new kernel to its destination...
@@ -45,7 +44,7 @@ ENTRY(relocate_kernel)
diag %r0,%r0,0x308
.back:
lhi %r1,1 # mode 1 = esame
- sigp %r1,%r0,0x12 # switch to esame mode
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE # switch to esame mode
sam64 # switch to 64 bit addressing mode
basr %r13,0
.back_base:
@@ -96,7 +95,7 @@ ENTRY(relocate_kernel)
sam31 # 31 bit mode
sr %r1,%r1 # erase register r1
sr %r2,%r2 # erase register r2
- sigp %r1,%r2,0x12 # set cpuid to zero
+ sigp %r1,%r2,SIGP_SET_ARCHITECTURE # set cpuid to zero
lpsw 0 # hopefully start new kernel...
.align 8
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 95792d846bb6..bf053898630d 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -1,7 +1,7 @@
/*
* Mini SCLP driver.
*
- * Copyright IBM Corp. 2004,2009
+ * Copyright IBM Corp. 2004, 2009
*
* Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 489d1d8d96b0..743c0f32fe3b 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/kernel/setup.c
- *
* S390 version
- * Copyright (C) IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Hartmut Penner (hp@de.ibm.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
@@ -63,6 +61,7 @@
#include <asm/kvm_virtio.h>
#include <asm/diag.h>
#include <asm/os_info.h>
+#include <asm/sclp.h>
#include "entry.h"
long psw_kernel_bits = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
@@ -138,9 +137,14 @@ __setup("condev=", condev_setup);
static void __init set_preferred_console(void)
{
- if (MACHINE_IS_KVM)
- add_preferred_console("hvc", 0, NULL);
- else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
+ if (MACHINE_IS_KVM) {
+ if (sclp_has_vt220())
+ add_preferred_console("ttyS", 1, NULL);
+ else if (sclp_has_linemode())
+ add_preferred_console("ttyS", 0, NULL);
+ else
+ add_preferred_console("hvc", 0, NULL);
+ } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
add_preferred_console("ttyS", 0, NULL);
else if (CONSOLE_IS_3270)
add_preferred_console("tty3270", 0, NULL);
@@ -430,10 +434,11 @@ static void __init setup_lowcore(void)
lc->restart_source = -1UL;
/* Setup absolute zero lowcore */
- memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
- 4 * sizeof(unsigned long));
- memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
- sizeof(lc->restart_psw));
+ mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
+ mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
+ mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
+ mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
+ mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -598,9 +603,7 @@ static void __init setup_memory_end(void)
static void __init setup_vmcoreinfo(void)
{
#ifdef CONFIG_KEXEC
- unsigned long ptr = paddr_vmcoreinfo_note();
-
- memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
+ mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
#endif
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index ac565b44aabb..c13a2a37ef00 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -1,7 +1,5 @@
/*
- * arch/s390/kernel/signal.c
- *
- * Copyright (C) IBM Corp. 1999,2006
+ * Copyright IBM Corp. 1999, 2006
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* Based on Intel version
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 15cca26ccb6c..720fda1620f2 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1,7 +1,7 @@
/*
* SMP related functions
*
- * Copyright IBM Corp. 1999,2012
+ * Copyright IBM Corp. 1999, 2012
* Author(s): Denis Joseph Barrow,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
@@ -38,40 +38,16 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/tlbflush.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include <asm/vdso.h>
#include <asm/debug.h>
#include <asm/os_info.h>
+#include <asm/sigp.h>
#include "entry.h"
enum {
- sigp_sense = 1,
- sigp_external_call = 2,
- sigp_emergency_signal = 3,
- sigp_start = 4,
- sigp_stop = 5,
- sigp_restart = 6,
- sigp_stop_and_store_status = 9,
- sigp_initial_cpu_reset = 11,
- sigp_cpu_reset = 12,
- sigp_set_prefix = 13,
- sigp_store_status_at_address = 14,
- sigp_store_extended_status_at_address = 15,
- sigp_set_architecture = 18,
- sigp_conditional_emergency_signal = 19,
- sigp_sense_running = 21,
-};
-
-enum {
- sigp_order_code_accepted = 0,
- sigp_status_stored = 1,
- sigp_busy = 2,
- sigp_not_operational = 3,
-};
-
-enum {
ec_schedule = 0,
ec_call_function,
ec_call_function_single,
@@ -124,7 +100,7 @@ static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
while (1) {
cc = __pcpu_sigp(addr, order, parm, status);
- if (cc != sigp_busy)
+ if (cc != SIGP_CC_BUSY)
return cc;
cpu_relax();
}
@@ -136,7 +112,7 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
for (retry = 0; ; retry++) {
cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
- if (cc != sigp_busy)
+ if (cc != SIGP_CC_BUSY)
break;
if (retry >= 3)
udelay(10);
@@ -146,20 +122,19 @@ static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
static inline int pcpu_stopped(struct pcpu *pcpu)
{
- if (__pcpu_sigp(pcpu->address, sigp_sense,
- 0, &pcpu->status) != sigp_status_stored)
+ if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
+ 0, &pcpu->status) != SIGP_CC_STATUS_STORED)
return 0;
- /* Check for stopped and check stop state */
- return !!(pcpu->status & 0x50);
+ return !!(pcpu->status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
}
static inline int pcpu_running(struct pcpu *pcpu)
{
- if (__pcpu_sigp(pcpu->address, sigp_sense_running,
- 0, &pcpu->status) != sigp_status_stored)
+ if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
+ 0, &pcpu->status) != SIGP_CC_STATUS_STORED)
return 1;
- /* Check for running status */
- return !(pcpu->status & 0x400);
+ /* Status stored condition code is equivalent to cpu not running. */
+ return 0;
}
/*
@@ -181,7 +156,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
set_bit(ec_bit, &pcpu->ec_mask);
order = pcpu_running(pcpu) ?
- sigp_external_call : sigp_emergency_signal;
+ SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
pcpu_sigp_retry(pcpu, order, 0);
}
@@ -214,7 +189,7 @@ static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
goto out;
#endif
lowcore_ptr[cpu] = lc;
- pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
+ pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
return 0;
out:
if (pcpu != &pcpu_devices[0]) {
@@ -229,7 +204,7 @@ out:
static void pcpu_free_lowcore(struct pcpu *pcpu)
{
- pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
+ pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
lowcore_ptr[pcpu - pcpu_devices] = NULL;
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE) {
@@ -288,7 +263,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1UL;
- pcpu_sigp_retry(pcpu, sigp_restart, 0);
+ pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
}
/*
@@ -298,26 +273,26 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
void *data, unsigned long stack)
{
struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
- struct {
- unsigned long stack;
- void *func;
- void *data;
- unsigned long source;
- } restart = { stack, func, data, stap() };
+ unsigned long source_cpu = stap();
__load_psw_mask(psw_kernel_bits);
- if (pcpu->address == restart.source)
+ if (pcpu->address == source_cpu)
func(data); /* should not return */
/* Stop target cpu (if func returns this stops the current cpu). */
- pcpu_sigp_retry(pcpu, sigp_stop, 0);
+ pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
/* Restart func on the target cpu and stop the current cpu. */
- memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
+ mem_assign_absolute(lc->restart_stack, stack);
+ mem_assign_absolute(lc->restart_fn, (unsigned long) func);
+ mem_assign_absolute(lc->restart_data, (unsigned long) data);
+ mem_assign_absolute(lc->restart_source, source_cpu);
asm volatile(
- "0: sigp 0,%0,6 # sigp restart to target cpu\n"
+ "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
- "1: sigp 0,%1,5 # sigp stop to current cpu\n"
+ "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
" brc 2,1b # busy, try again\n"
- : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
+ : : "d" (pcpu->address), "d" (source_cpu),
+ "K" (SIGP_RESTART), "K" (SIGP_STOP)
+ : "0", "1", "cc");
for (;;) ;
}
@@ -388,8 +363,8 @@ void smp_emergency_stop(cpumask_t *cpumask)
for_each_cpu(cpu, cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
set_bit(ec_stop_cpu, &pcpu->ec_mask);
- while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
- 0, NULL) == sigp_busy &&
+ while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
+ 0, NULL) == SIGP_CC_BUSY &&
get_clock() < end)
cpu_relax();
}
@@ -425,7 +400,7 @@ void smp_send_stop(void)
/* stop all processors */
for_each_cpu(cpu, &cpumask) {
struct pcpu *pcpu = pcpu_devices + cpu;
- pcpu_sigp_retry(pcpu, sigp_stop, 0);
+ pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
while (!pcpu_stopped(pcpu))
cpu_relax();
}
@@ -436,7 +411,7 @@ void smp_send_stop(void)
*/
void smp_stop_cpu(void)
{
- pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
+ pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
@@ -590,7 +565,7 @@ static void __init smp_get_save_area(int cpu, u16 address)
}
#endif
/* Get the registers of a non-boot cpu. */
- __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
+ __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
}
@@ -599,8 +574,8 @@ int smp_store_status(int cpu)
struct pcpu *pcpu;
pcpu = pcpu_devices + cpu;
- if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
- 0, NULL) != sigp_order_code_accepted)
+ if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
+ 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
return 0;
}
@@ -621,8 +596,8 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
use_sigp_detection = 1;
for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
- if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
- sigp_not_operational)
+ if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
+ SIGP_CC_NOT_OPERATIONAL)
continue;
info->cpu[info->configured].address = address;
info->configured++;
@@ -717,9 +692,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
init_cpu_vtimer();
pfault_init();
notify_cpu_starting(smp_processor_id());
- ipi_call_lock();
set_cpu_online(smp_processor_id(), true);
- ipi_call_unlock();
local_irq_enable();
/* cpu_idle will call schedule for us */
cpu_idle();
@@ -734,8 +707,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
pcpu = pcpu_devices + cpu;
if (pcpu->state != CPU_STATE_CONFIGURED)
return -EIO;
- if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
- sigp_order_code_accepted)
+ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
+ SIGP_CC_ORDER_CODE_ACCEPTED)
return -EIO;
rc = pcpu_alloc_lowcore(pcpu, cpu);
@@ -795,7 +768,7 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
- pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
+ pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
@@ -942,7 +915,7 @@ static ssize_t show_idle_count(struct device *dev,
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
- if (ACCESS_ONCE(idle->idle_enter))
+ if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (idle->sequence != sequence));
return sprintf(buf, "%llu\n", idle_count);
@@ -960,8 +933,8 @@ static ssize_t show_idle_time(struct device *dev,
now = get_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
- idle_enter = ACCESS_ONCE(idle->idle_enter);
- idle_exit = ACCESS_ONCE(idle->idle_exit);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
@@ -984,14 +957,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
unsigned int cpu = (unsigned int)(long)hcpu;
struct cpu *c = &pcpu_devices[cpu].cpu;
struct device *s = &c->dev;
- struct s390_idle_data *idle;
int err = 0;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- idle = &per_cpu(s390_idle, cpu);
- memset(idle, 0, sizeof(struct s390_idle_data));
err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
break;
case CPU_DEAD:
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 8841919ef7e6..1785cd82253c 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -1,9 +1,7 @@
/*
- * arch/s390/kernel/stacktrace.c
- *
* Stack trace management functions
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index dd70ef046058..d4ca4e0617b5 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -12,6 +12,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/sigp.h>
/*
* Save register context in absolute 0 lowcore and call swsusp_save() to
@@ -163,7 +164,7 @@ ENTRY(swsusp_arch_resume)
diag %r0,%r0,0x308
restart_entry:
lhi %r1,1
- sigp %r1,%r0,0x12
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
sam64
larl %r1,.Lnew_pgm_check_psw
lpswe 0(%r1)
@@ -179,7 +180,7 @@ pgm_check_entry:
larl %r4,.Lrestart_suspend_psw /* Set new restart PSW */
mvc __LC_RST_NEW_PSW(16,%r0),0(%r4)
3:
- sigp %r9,%r1,11 /* sigp initial cpu reset */
+ sigp %r9,%r1,SIGP_INITIAL_CPU_RESET /* sigp initial cpu reset */
brc 8,4f /* accepted */
brc 2,3b /* busy, try again */
@@ -190,16 +191,16 @@ pgm_check_entry:
larl %r3,_sclp_print_early
lghi %r1,0
sam31
- sigp %r1,%r0,0x12
+ sigp %r1,%r0,SIGP_SET_ARCHITECTURE
basr %r14,%r3
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
/* Switch to suspend CPU */
- sigp %r9,%r1,6 /* sigp restart to suspend CPU */
+ sigp %r9,%r1,SIGP_RESTART /* sigp restart to suspend CPU */
brc 2,4b /* busy, try again */
5:
- sigp %r9,%r2,5 /* sigp stop to current resume CPU */
+ sigp %r9,%r2,SIGP_STOP /* sigp stop to current resume CPU */
brc 2,5b /* busy, try again */
6: j 6b
@@ -207,7 +208,7 @@ restart_suspend:
larl %r1,.Lresume_cpu
llgh %r2,0(%r1)
7:
- sigp %r9,%r2,1 /* sigp sense, wait for resume CPU */
+ sigp %r9,%r2,SIGP_SENSE /* sigp sense, wait for resume CPU */
brc 8,7b /* accepted, status 0, still running */
brc 2,7b /* busy, try again */
tmll %r9,0x40 /* Test if resume CPU is stopped */
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 78ea1948ff51..b4a29eee41b8 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/kernel/sys_s390.c
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Thomas Spatzier (tspat@de.ibm.com)
*
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index d4e1cb1dbcd1..dcec960fc724 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -1,5 +1,4 @@
/*
- * arch/s390/kernel/time.c
* Time of day based timer functions.
*
* S390 version
@@ -45,7 +44,7 @@
#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/etr.h>
#include <asm/cio.h>
#include "entry.h"
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 4f8dc942257c..05151e06c388 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007,2011
+ * Copyright IBM Corp. 2007, 2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 77cdf4234ebc..af2421a0f315 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/kernel/traps.c
- *
* S390 version
- * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2000
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
*
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 39ebff506946..4fc97b40a6e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,71 +1,82 @@
/*
- * arch/s390/kernel/vtime.c
* Virtual cpu timer based timer functions.
*
- * S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004, 2012
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/types.h>
#include <linux/timex.h>
-#include <linux/notifier.h>
-#include <linux/kernel_stat.h>
-#include <linux/rcupdate.h>
-#include <linux/posix-timers.h>
+#include <linux/types.h>
+#include <linux/time.h>
#include <linux/cpu.h>
-#include <linux/kprobes.h>
+#include <linux/smp.h>
-#include <asm/timer.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
+#include <asm/vtimer.h>
#include <asm/irq.h>
#include "entry.h"
-static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+static void virt_timer_expire(void);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-static inline __u64 get_vtimer(void)
+static LIST_HEAD(virt_timer_list);
+static DEFINE_SPINLOCK(virt_timer_lock);
+static atomic64_t virt_timer_current;
+static atomic64_t virt_timer_elapsed;
+
+static inline u64 get_vtimer(void)
{
- __u64 timer;
+ u64 timer;
- asm volatile("STPT %0" : "=m" (timer));
+ asm volatile("stpt %0" : "=m" (timer));
return timer;
}
-static inline void set_vtimer(__u64 expires)
+static inline void set_vtimer(u64 expires)
{
- __u64 timer;
+ u64 timer;
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " SPT %1" /* Set new value immediately afterwards */
- : "=m" (timer) : "m" (expires) );
+ asm volatile(
+ " stpt %0\n" /* Store current cpu timer value */
+ " spt %1" /* Set new value imm. afterwards */
+ : "=m" (timer) : "m" (expires));
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires;
}
+static inline int virt_timer_forward(u64 elapsed)
+{
+ BUG_ON(!irqs_disabled());
+
+ if (list_empty(&virt_timer_list))
+ return 0;
+ elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
+ return elapsed >= atomic64_read(&virt_timer_current);
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
- __u64 timer, clock, user, system, steal;
+ u64 timer, clock, user, system, steal;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " STCK %1" /* Store current tod clock value */
- : "=m" (S390_lowcore.last_update_timer),
- "=m" (S390_lowcore.last_update_clock) );
+ asm volatile(
+ " stpt %0\n" /* Store current cpu timer value */
+ " stck %1" /* Store current tod clock value */
+ : "=m" (S390_lowcore.last_update_timer),
+ "=m" (S390_lowcore.last_update_clock));
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
@@ -84,6 +95,8 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.steal_timer = 0;
account_steal_time(steal);
}
+
+ return virt_timer_forward(user + system);
}
void account_vtime(struct task_struct *prev, struct task_struct *next)
@@ -101,7 +114,8 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
void account_process_tick(struct task_struct *tsk, int user_tick)
{
- do_account_vtime(tsk, HARDIRQ_OFFSET);
+ if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+ virt_timer_expire();
}
/*
@@ -111,7 +125,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
void account_system_vtime(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- __u64 timer, system;
+ u64 timer, system;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -121,13 +135,14 @@ void account_system_vtime(struct task_struct *tsk)
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
account_system_time(tsk, 0, system, system);
+
+ virt_timer_forward(system);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
void __kprobes vtime_stop_cpu(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
- struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
unsigned long long idle_time;
unsigned long psw_mask;
@@ -141,7 +156,7 @@ void __kprobes vtime_stop_cpu(void)
idle->nohz_delay = 0;
/* Call the assembler magic in entry.S */
- psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
+ psw_idle(idle, psw_mask);
/* Reenable preemption tracer. */
start_critical_timings();
@@ -149,9 +164,9 @@ void __kprobes vtime_stop_cpu(void)
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
- idle_time = idle->idle_exit - idle->idle_enter;
+ idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
+ idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
- idle->idle_enter = idle->idle_exit = 0ULL;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
@@ -167,10 +182,10 @@ cputime64_t s390_get_idle_time(int cpu)
do {
now = get_clock();
sequence = ACCESS_ONCE(idle->sequence);
- idle_enter = ACCESS_ONCE(idle->idle_enter);
- idle_exit = ACCESS_ONCE(idle->idle_exit);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence));
- return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
/*
@@ -179,11 +194,11 @@ cputime64_t s390_get_idle_time(int cpu)
*/
static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
- struct vtimer_list *event;
+ struct vtimer_list *tmp;
- list_for_each_entry(event, head, entry) {
- if (event->expires > timer->expires) {
- list_add_tail(&timer->entry, &event->entry);
+ list_for_each_entry(tmp, head, entry) {
+ if (tmp->expires > timer->expires) {
+ list_add_tail(&timer->entry, &tmp->entry);
return;
}
}
@@ -191,82 +206,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
}
/*
- * Do the callback functions of expired vtimer events.
- * Called from within the interrupt handler.
- */
-static void do_callbacks(struct list_head *cb_list)
-{
- struct vtimer_queue *vq;
- struct vtimer_list *event, *tmp;
-
- if (list_empty(cb_list))
- return;
-
- vq = &__get_cpu_var(virt_cpu_timer);
-
- list_for_each_entry_safe(event, tmp, cb_list, entry) {
- list_del_init(&event->entry);
- (event->function)(event->data);
- if (event->interval) {
- /* Recharge interval timer */
- event->expires = event->interval + vq->elapsed;
- spin_lock(&vq->lock);
- list_add_sorted(event, &vq->list);
- spin_unlock(&vq->lock);
- }
- }
-}
-
-/*
- * Handler for the virtual CPU timer.
+ * Handler for expired virtual CPU timer.
*/
-static void do_cpu_timer_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
+static void virt_timer_expire(void)
{
- struct vtimer_queue *vq;
- struct vtimer_list *event, *tmp;
- struct list_head cb_list; /* the callback queue */
- __u64 elapsed, next;
-
- kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
- INIT_LIST_HEAD(&cb_list);
- vq = &__get_cpu_var(virt_cpu_timer);
-
- /* walk timer list, fire all expired events */
- spin_lock(&vq->lock);
-
- elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
- BUG_ON((s64) elapsed < 0);
- vq->elapsed = 0;
- list_for_each_entry_safe(event, tmp, &vq->list, entry) {
- if (event->expires < elapsed)
+ struct vtimer_list *timer, *tmp;
+ unsigned long elapsed;
+ LIST_HEAD(cb_list);
+
+ /* walk timer list, fire all expired timers */
+ spin_lock(&virt_timer_lock);
+ elapsed = atomic64_read(&virt_timer_elapsed);
+ list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
+ if (timer->expires < elapsed)
/* move expired timer to the callback queue */
- list_move_tail(&event->entry, &cb_list);
+ list_move_tail(&timer->entry, &cb_list);
else
- event->expires -= elapsed;
+ timer->expires -= elapsed;
}
- spin_unlock(&vq->lock);
-
- do_callbacks(&cb_list);
-
- /* next event is first in list */
- next = VTIMER_MAX_SLICE;
- spin_lock(&vq->lock);
- if (!list_empty(&vq->list)) {
- event = list_first_entry(&vq->list, struct vtimer_list, entry);
- next = event->expires;
+ if (!list_empty(&virt_timer_list)) {
+ timer = list_first_entry(&virt_timer_list,
+ struct vtimer_list, entry);
+ atomic64_set(&virt_timer_current, timer->expires);
+ }
+ atomic64_sub(elapsed, &virt_timer_elapsed);
+ spin_unlock(&virt_timer_lock);
+
+ /* Do callbacks and recharge periodic timers */
+ list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
+ list_del_init(&timer->entry);
+ timer->function(timer->data);
+ if (timer->interval) {
+ /* Recharge interval timer */
+ timer->expires = timer->interval +
+ atomic64_read(&virt_timer_elapsed);
+ spin_lock(&virt_timer_lock);
+ list_add_sorted(timer, &virt_timer_list);
+ spin_unlock(&virt_timer_lock);
+ }
}
- spin_unlock(&vq->lock);
- /*
- * To improve precision add the time spent by the
- * interrupt handler to the elapsed time.
- * Note: CPU timer counts down and we got an interrupt,
- * the current content is negative
- */
- elapsed = S390_lowcore.async_enter_timer - get_vtimer();
- set_vtimer(next - elapsed);
- vq->timer = next - elapsed;
- vq->elapsed = elapsed;
}
void init_virt_timer(struct vtimer_list *timer)
@@ -278,179 +256,108 @@ EXPORT_SYMBOL(init_virt_timer);
static inline int vtimer_pending(struct vtimer_list *timer)
{
- return (!list_empty(&timer->entry));
+ return !list_empty(&timer->entry);
}
-/*
- * this function should only run on the specified CPU
- */
static void internal_add_vtimer(struct vtimer_list *timer)
{
- struct vtimer_queue *vq;
- unsigned long flags;
- __u64 left, expires;
-
- vq = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vq->lock, flags);
-
- BUG_ON(timer->cpu != smp_processor_id());
-
- if (list_empty(&vq->list)) {
- /* First timer on this cpu, just program it. */
- list_add(&timer->entry, &vq->list);
- set_vtimer(timer->expires);
- vq->timer = timer->expires;
- vq->elapsed = 0;
+ if (list_empty(&virt_timer_list)) {
+ /* First timer, just program it. */
+ atomic64_set(&virt_timer_current, timer->expires);
+ atomic64_set(&virt_timer_elapsed, 0);
+ list_add(&timer->entry, &virt_timer_list);
} else {
- /* Check progress of old timers. */
- expires = timer->expires;
- left = get_vtimer();
- if (likely((s64) expires < (s64) left)) {
+ /* Update timer against current base. */
+ timer->expires += atomic64_read(&virt_timer_elapsed);
+ if (likely((s64) timer->expires <
+ (s64) atomic64_read(&virt_timer_current)))
/* The new timer expires before the current timer. */
- set_vtimer(expires);
- vq->elapsed += vq->timer - left;
- vq->timer = expires;
- } else {
- vq->elapsed += vq->timer - left;
- vq->timer = left;
- }
- /* Insert new timer into per cpu list. */
- timer->expires += vq->elapsed;
- list_add_sorted(timer, &vq->list);
+ atomic64_set(&virt_timer_current, timer->expires);
+ /* Insert new timer into the list. */
+ list_add_sorted(timer, &virt_timer_list);
}
-
- spin_unlock_irqrestore(&vq->lock, flags);
- /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
- put_cpu();
}
-static inline void prepare_vtimer(struct vtimer_list *timer)
+static void __add_vtimer(struct vtimer_list *timer, int periodic)
{
- BUG_ON(!timer->function);
- BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
- BUG_ON(vtimer_pending(timer));
- timer->cpu = get_cpu();
+ unsigned long flags;
+
+ timer->interval = periodic ? timer->expires : 0;
+ spin_lock_irqsave(&virt_timer_lock, flags);
+ internal_add_vtimer(timer);
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
-void add_virt_timer(void *new)
+void add_virt_timer(struct vtimer_list *timer)
{
- struct vtimer_list *timer;
-
- timer = (struct vtimer_list *)new;
- prepare_vtimer(timer);
- timer->interval = 0;
- internal_add_vtimer(timer);
+ __add_vtimer(timer, 0);
}
EXPORT_SYMBOL(add_virt_timer);
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
-void add_virt_timer_periodic(void *new)
+void add_virt_timer_periodic(struct vtimer_list *timer)
{
- struct vtimer_list *timer;
-
- timer = (struct vtimer_list *)new;
- prepare_vtimer(timer);
- timer->interval = timer->expires;
- internal_add_vtimer(timer);
+ __add_vtimer(timer, 1);
}
EXPORT_SYMBOL(add_virt_timer_periodic);
-static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
+static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
{
- struct vtimer_queue *vq;
unsigned long flags;
- int cpu;
+ int rc;
BUG_ON(!timer->function);
- BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
if (timer->expires == expires && vtimer_pending(timer))
return 1;
-
- cpu = get_cpu();
- vq = &per_cpu(virt_cpu_timer, cpu);
-
- /* disable interrupts before test if timer is pending */
- spin_lock_irqsave(&vq->lock, flags);
-
- /* if timer isn't pending add it on the current CPU */
- if (!vtimer_pending(timer)) {
- spin_unlock_irqrestore(&vq->lock, flags);
-
- if (periodic)
- timer->interval = expires;
- else
- timer->interval = 0;
- timer->expires = expires;
- timer->cpu = cpu;
- internal_add_vtimer(timer);
- return 0;
- }
-
- /* check if we run on the right CPU */
- BUG_ON(timer->cpu != cpu);
-
- list_del_init(&timer->entry);
+ spin_lock_irqsave(&virt_timer_lock, flags);
+ rc = vtimer_pending(timer);
+ if (rc)
+ list_del_init(&timer->entry);
+ timer->interval = periodic ? expires : 0;
timer->expires = expires;
- if (periodic)
- timer->interval = expires;
-
- /* the timer can't expire anymore so we can release the lock */
- spin_unlock_irqrestore(&vq->lock, flags);
internal_add_vtimer(timer);
- return 1;
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
+ return rc;
}
/*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
* returns whether it has modified a pending timer (1) or not (0)
*/
-int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 0);
}
EXPORT_SYMBOL(mod_virt_timer);
/*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
* returns whether it has modified a pending timer (1) or not (0)
*/
-int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 1);
}
EXPORT_SYMBOL(mod_virt_timer_periodic);
/*
- * delete a virtual timer
+ * Delete a virtual timer.
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
- struct vtimer_queue *vq;
- /* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
-
- vq = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vq->lock, flags);
-
- /* we don't interrupt a running timer, just let it expire! */
+ spin_lock_irqsave(&virt_timer_lock, flags);
list_del_init(&timer->entry);
-
- spin_unlock_irqrestore(&vq->lock, flags);
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
@@ -458,20 +365,10 @@ EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
-void init_cpu_vtimer(void)
+void __cpuinit init_cpu_vtimer(void)
{
- struct vtimer_queue *vq;
-
- /* initialize per cpu vtimer structure */
- vq = &__get_cpu_var(virt_cpu_timer);
- INIT_LIST_HEAD(&vq->list);
- spin_lock_init(&vq->lock);
-
- /* enable cpu timer interrupts */
- __ctl_set_bit(0,10);
-
/* set initial cpu timer */
- set_vtimer(0x7fffffffffffffffULL);
+ set_vtimer(VTIMER_MAX_SLICE);
}
static int __cpuinit s390_nohz_notify(struct notifier_block *self,
@@ -493,12 +390,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
void __init vtime_init(void)
{
- /* request the cpu timer external interrupt */
- if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
- panic("Couldn't request external interrupt 0x1005");
-
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
cpu_notifier(s390_nohz_notify, 0);
}
-
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index b23d9ac77dfc..c88bb7793390 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -1,7 +1,7 @@
/*
- * diag.c - handling diagnose instructions
+ * handling diagnose instructions
*
- * Copyright IBM Corp. 2008,2011
+ * Copyright IBM Corp. 2008, 2011
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index c86f6ae43f76..4703f129e95e 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -1,7 +1,7 @@
/*
- * access.h - access guest memory
+ * access guest memory
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 979cbe55bf5e..adae539f12e2 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -1,7 +1,7 @@
/*
- * intercept.c - in-kernel handling for sie intercepts
+ * in-kernel handling for sie intercepts
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2d9f9a72bb81..b7bc1aac8ed2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1,5 +1,5 @@
/*
- * interrupt.c - handling kvm guest interrupts
+ * handling kvm guest interrupts
*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 664766d0c83c..d470ccbfabae 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1,7 +1,7 @@
/*
- * s390host.c -- hosting zSeries kernel virtual machines
+ * hosting zSeries kernel virtual machines
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -347,6 +347,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
vcpu->arch.guest_fpregs.fpc = 0;
asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
vcpu->arch.sie_block->gbea = 1;
+ atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 2294377975e8..d75bc5e92c5b 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -1,7 +1,7 @@
/*
- * kvm_s390.h - definition for kvm on s390
+ * definition for kvm on s390
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 68a6b2ed16bf..60da903d6f3e 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -1,5 +1,5 @@
/*
- * priv.c - handling privileged instructions
+ * handling privileged instructions
*
* Copyright IBM Corp. 2008
*
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 0ad4cf238391..56f80e1f98f7 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -1,7 +1,7 @@
/*
- * sigp.c - handlinge interprocessor communication
+ * handling interprocessor communication
*
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (version 2 only)
@@ -15,38 +15,10 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/slab.h>
+#include <asm/sigp.h>
#include "gaccess.h"
#include "kvm-s390.h"
-/* sigp order codes */
-#define SIGP_SENSE 0x01
-#define SIGP_EXTERNAL_CALL 0x02
-#define SIGP_EMERGENCY 0x03
-#define SIGP_START 0x04
-#define SIGP_STOP 0x05
-#define SIGP_RESTART 0x06
-#define SIGP_STOP_STORE_STATUS 0x09
-#define SIGP_INITIAL_CPU_RESET 0x0b
-#define SIGP_CPU_RESET 0x0c
-#define SIGP_SET_PREFIX 0x0d
-#define SIGP_STORE_STATUS_ADDR 0x0e
-#define SIGP_SET_ARCH 0x12
-#define SIGP_SENSE_RUNNING 0x15
-
-/* cpu status bits */
-#define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
-#define SIGP_STAT_NOT_RUNNING 0x00000400UL
-#define SIGP_STAT_INCORRECT_STATE 0x00000200UL
-#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
-#define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
-#define SIGP_STAT_STOPPED 0x00000040UL
-#define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
-#define SIGP_STAT_CHECK_STOP 0x00000010UL
-#define SIGP_STAT_INOPERATIVE 0x00000004UL
-#define SIGP_STAT_INVALID_ORDER 0x00000002UL
-#define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
-
-
static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
u64 *reg)
{
@@ -54,19 +26,23 @@ static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
- & CPUSTAT_STOPPED)) {
- *reg &= 0xffffffff00000000UL;
- rc = 1; /* status stored */
- } else {
+ & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
+ else {
*reg &= 0xffffffff00000000UL;
- *reg |= SIGP_STAT_STOPPED;
- rc = 1; /* status stored */
+ if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+ & CPUSTAT_ECALL_PEND)
+ *reg |= SIGP_STATUS_EXT_CALL_PENDING;
+ if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+ & CPUSTAT_STOPPED)
+ *reg |= SIGP_STATUS_STOPPED;
+ rc = SIGP_CC_STATUS_STORED;
}
spin_unlock(&fi->lock);
@@ -82,7 +58,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
@@ -94,7 +70,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
kfree(inti);
goto unlock;
}
@@ -105,7 +81,7 @@ static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
spin_unlock_bh(&li->lock);
- rc = 0; /* order accepted */
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
@@ -120,7 +96,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
@@ -132,7 +108,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
kfree(inti);
goto unlock;
}
@@ -143,7 +119,7 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
spin_unlock_bh(&li->lock);
- rc = 0; /* order accepted */
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
unlock:
spin_unlock(&fi->lock);
@@ -171,7 +147,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
out:
spin_unlock_bh(&li->lock);
- return 0; /* order accepted */
+ return SIGP_CC_ORDER_CODE_ACCEPTED;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
@@ -181,12 +157,12 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
int rc;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
goto unlock;
}
@@ -210,11 +186,11 @@ static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
switch (parameter & 0xff) {
case 0:
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
break;
case 1:
case 2:
- rc = 0; /* order accepted */
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
break;
default:
rc = -EOPNOTSUPP;
@@ -235,21 +211,23 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
address = address & 0x7fffe000u;
if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
- *reg |= SIGP_STAT_INVALID_PARAMETER;
- return 1; /* invalid parameter */
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INVALID_PARAMETER;
+ return SIGP_CC_STATUS_STORED;
}
inti = kzalloc(sizeof(*inti), GFP_KERNEL);
if (!inti)
- return 2; /* busy */
+ return SIGP_CC_BUSY;
spin_lock(&fi->lock);
if (cpu_addr < KVM_MAX_VCPUS)
li = fi->local_int[cpu_addr];
if (li == NULL) {
- rc = 1; /* incorrect state */
- *reg &= SIGP_STAT_INCORRECT_STATE;
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INCORRECT_STATE;
+ rc = SIGP_CC_STATUS_STORED;
kfree(inti);
goto out_fi;
}
@@ -257,8 +235,9 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
spin_lock_bh(&li->lock);
/* cpu must be in stopped state */
if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
- rc = 1; /* incorrect state */
- *reg &= SIGP_STAT_INCORRECT_STATE;
+ *reg &= 0xffffffff00000000UL;
+ *reg |= SIGP_STATUS_INCORRECT_STATE;
+ rc = SIGP_CC_STATUS_STORED;
kfree(inti);
goto out_li;
}
@@ -270,7 +249,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
atomic_set(&li->active, 1);
if (waitqueue_active(&li->wq))
wake_up_interruptible(&li->wq);
- rc = 0; /* order accepted */
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
out_li:
@@ -287,21 +266,21 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
if (fi->local_int[cpu_addr] == NULL)
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
else {
if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
& CPUSTAT_RUNNING) {
/* running */
- rc = 1;
+ rc = SIGP_CC_ORDER_CODE_ACCEPTED;
} else {
/* not running */
*reg &= 0xffffffff00000000UL;
- *reg |= SIGP_STAT_NOT_RUNNING;
- rc = 0;
+ *reg |= SIGP_STATUS_NOT_RUNNING;
+ rc = SIGP_CC_STATUS_STORED;
}
}
spin_unlock(&fi->lock);
@@ -314,23 +293,23 @@ static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
static int __sigp_restart(struct kvm_vcpu *vcpu, u16 cpu_addr)
{
- int rc = 0;
struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
struct kvm_s390_local_interrupt *li;
+ int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
if (cpu_addr >= KVM_MAX_VCPUS)
- return 3; /* not operational */
+ return SIGP_CC_NOT_OPERATIONAL;
spin_lock(&fi->lock);
li = fi->local_int[cpu_addr];
if (li == NULL) {
- rc = 3; /* not operational */
+ rc = SIGP_CC_NOT_OPERATIONAL;
goto out;
}
spin_lock_bh(&li->lock);
if (li->action_bits & ACTION_STOP_ON_STOP)
- rc = 2; /* busy */
+ rc = SIGP_CC_BUSY;
else
VCPU_EVENT(vcpu, 4, "sigp restart %x to handle userspace",
cpu_addr);
@@ -375,7 +354,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_sigp_external_call++;
rc = __sigp_external_call(vcpu, cpu_addr);
break;
- case SIGP_EMERGENCY:
+ case SIGP_EMERGENCY_SIGNAL:
vcpu->stat.instruction_sigp_emergency++;
rc = __sigp_emergency(vcpu, cpu_addr);
break;
@@ -383,12 +362,12 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
break;
- case SIGP_STOP_STORE_STATUS:
+ case SIGP_STOP_AND_STORE_STATUS:
vcpu->stat.instruction_sigp_stop++;
rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
ACTION_STOP_ON_STOP);
break;
- case SIGP_SET_ARCH:
+ case SIGP_SET_ARCHITECTURE:
vcpu->stat.instruction_sigp_arch++;
rc = __sigp_set_arch(vcpu, parameter);
break;
@@ -405,7 +384,7 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
case SIGP_RESTART:
vcpu->stat.instruction_sigp_restart++;
rc = __sigp_restart(vcpu, cpu_addr);
- if (rc == 2) /* busy */
+ if (rc == SIGP_CC_BUSY)
break;
/* user space must know about restart */
default:
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 9f1f71e85778..42d0cf89121d 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -1,7 +1,7 @@
/*
* Precise Delay Loops for S390
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Heiko Carstens <heiko.carstens@de.ibm.com>,
*/
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
+#include <asm/vtimer.h>
#include <asm/div64.h>
-#include <asm/timer.h>
void __delay(unsigned long loops)
{
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
index d9e62c0b576a..261152f83242 100644
--- a/arch/s390/lib/div64.c
+++ b/arch/s390/lib/div64.c
@@ -1,9 +1,7 @@
/*
- * arch/s390/lib/div64.c
- *
* __div64_32 implementation for 31 bit.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 093eb694d9c1..f709983f41f8 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -1,8 +1,7 @@
/*
- * arch/s390/lib/spinlock.c
* Out of line spinlock code.
*
- * Copyright (C) IBM Corp. 2004, 2006
+ * Copyright IBM Corp. 2004, 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 4143b7c19096..846ec64ab2c9 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -1,9 +1,8 @@
/*
- * arch/s390/lib/string.c
* Optimized string functions
*
* S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
index 1d2536cb630b..315dbe09983e 100644
--- a/arch/s390/lib/uaccess.h
+++ b/arch/s390/lib/uaccess.h
@@ -1,6 +1,4 @@
/*
- * arch/s390/uaccess.h
- *
* Copyright IBM Corp. 2007
*
*/
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 58a75a8ae90c..2443ae476e33 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -1,9 +1,7 @@
/*
- * arch/s390/lib/uaccess_mvcos.c
- *
* Optimized user space space access functions based on mvcos.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 342ae35a5ba9..60ee2b883797 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/lib/uaccess_pt.c
- *
* User access functions based on page table walks for enhanced
* system layout without hardware support.
*
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 57e94298539b..6fbd06338270 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -1,10 +1,8 @@
/*
- * arch/s390/lib/uaccess_std.c
- *
* Standard user space access functions based on mvcp/mvcs and doing
* interesting things in the secondary space mode.
*
- * Copyright (C) IBM Corp. 2006
+ * Copyright IBM Corp. 2006
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Gerald Schaefer (gerald.schaefer@de.ibm.com)
*/
diff --git a/arch/s390/math-emu/math.c b/arch/s390/math-emu/math.c
index cd4e9c168dd7..58bff541fde9 100644
--- a/arch/s390/math-emu/math.c
+++ b/arch/s390/math-emu/math.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/math-emu/math.c
- *
* S390 version
- * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999, 2001
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*
* 'math.c' emulates IEEE instructions on a S390 processor
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index 1f1dba9dcf58..479e94282910 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -1,7 +1,7 @@
/*
* Collaborative memory management interface.
*
- * Copyright IBM Corp 2003,2010
+ * Copyright IBM Corp 2003, 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
*/
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 075ddada4911..519bba716cc3 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -1,10 +1,9 @@
/*
- * File...........: arch/s390/mm/extmem.c
* Author(s)......: Carsten Otte <cotte@de.ibm.com>
* Rob M van der Heij <rvdheij@nl.ibm.com>
* Steven Shultz <shultzss@us.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation 2002-2004
+ * Copyright IBM Corp. 2002, 2004
*/
#define KMSG_COMPONENT "extmem"
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 72cec9ecd96c..6a12d1bb6e09 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/fault.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
* Ulrich Weigand (uweigand@de.ibm.com)
*
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 900de2b3cf28..532525ec88c1 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -1,7 +1,7 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
- * Copyright 2007 IBM Corp.
+ * Copyright IBM Corp. 2007
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 2bea0605856e..6adbc082618a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -1,8 +1,6 @@
/*
- * arch/s390/mm/init.c
- *
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Hartmut Penner (hp@de.ibm.com)
*
* Derived from "arch/i386/mm/init.c"
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 2857c48486ea..573384256c5c 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -1,6 +1,4 @@
/*
- * linux/arch/s390/mm/mmap.c
- *
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a3db5a3ea083..1cab221077cc 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2007,2011
+ * Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 71ae20df674e..6f896e75ab49 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -1,6 +1,4 @@
/*
- * arch/s390/mm/vmem.c
- *
* Copyright IBM Corp. 2006
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index bc4b84a35cad..c82f62fb9c28 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -1,8 +1,6 @@
-/**
- * arch/s390/oprofile/backtrace.c
- *
+/*
* S390 Version
- * Copyright (C) 2005 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright IBM Corp. 2005
* Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
*/
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index a4a89fa980d6..0cb385da202c 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1,6 +1,4 @@
-/**
- * arch/s390/oprofile/hwsampler.c
- *
+/*
* Copyright IBM Corp. 2010
* Author: Heinz Graalfs <graalfs@de.ibm.com>
*/
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index 2297be406c61..a1e9d69a9c90 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -1,8 +1,6 @@
-/**
- * arch/s390/oprofile/init.c
- *
+/*
* S390 Version
- * Copyright (C) 2002-2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2002, 2011
* Author(s): Thomas Spatzier (tspat@de.ibm.com)
* Author(s): Mahesh Salgaonkar (mahesh@linux.vnet.ibm.com)
* Author(s): Heinz Graalfs (graalfs@linux.vnet.ibm.com)
diff --git a/arch/s390/oprofile/op_counter.h b/arch/s390/oprofile/op_counter.h
index 1a8d3ca09014..61b2531eef17 100644
--- a/arch/s390/oprofile/op_counter.h
+++ b/arch/s390/oprofile/op_counter.h
@@ -1,7 +1,5 @@
-/**
- * arch/s390/oprofile/op_counter.h
- *
- * Copyright (C) 2011 IBM Deutschland Entwicklung GmbH, IBM Corporation
+/*
+ * Copyright IBM Corp. 2011
* Author(s): Andreas Krebbel (krebbel@linux.vnet.ibm.com)
*
* @remark Copyright 2011 OProfile authors
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 31d9db7913e4..a24595d83ad6 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -60,6 +60,7 @@ config SUPERH32
config SUPERH64
def_bool ARCH = "sh64"
+ select KALLSYMS
config ARCH_DEFCONFIG
string
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 1f56b35d3248..7048c03490d9 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -44,6 +44,8 @@ config SH_7721_SOLUTION_ENGINE
config SH_7722_SOLUTION_ENGINE
bool "SolutionEngine7722"
select SOLUTION_ENGINE
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
depends on CPU_SUBTYPE_SH7722
help
Select 7722 SolutionEngine if configuring for a Hitachi SH772
@@ -80,6 +82,8 @@ config SH_7780_SOLUTION_ENGINE
config SH_7343_SOLUTION_ENGINE
bool "SolutionEngine7343"
select SOLUTION_ENGINE
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN
depends on CPU_SUBTYPE_SH7343
help
Select 7343 SolutionEngine if configuring for a Hitachi
@@ -295,6 +299,7 @@ config SH_X3PROTO
bool "SH-X3 Prototype board"
depends on CPU_SUBTYPE_SHX3
select NO_IOPORT if !PCI
+ select IRQ_DOMAIN
config SH_MAGIC_PANEL_R2
bool "Magic Panel R2"
diff --git a/arch/sh/boards/board-polaris.c b/arch/sh/boards/board-polaris.c
index 37d03c097ae9..0978ae2e4847 100644
--- a/arch/sh/boards/board-polaris.c
+++ b/arch/sh/boards/board-polaris.c
@@ -1,5 +1,5 @@
/*
- * June 2006 steve.glendinning@smsc.com
+ * June 2006 Steve Glendinning <steve.glendinning@shawell.net>
*
* Polaris-specific resource declaration
*
diff --git a/arch/sh/boards/mach-dreamcast/irq.c b/arch/sh/boards/mach-dreamcast/irq.c
index f63d323f411f..2789647abebe 100644
--- a/arch/sh/boards/mach-dreamcast/irq.c
+++ b/arch/sh/boards/mach-dreamcast/irq.c
@@ -8,10 +8,11 @@
* This file is part of the LinuxDC project (www.linuxdc.org)
* Released under the terms of the GNU GPL v2.0
*/
-
#include <linux/irq.h>
#include <linux/io.h>
-#include <asm/irq.h>
+#include <linux/irq.h>
+#include <linux/export.h>
+#include <linux/err.h>
#include <mach/sysasic.h>
/*
@@ -141,26 +142,15 @@ int systemasic_irq_demux(int irq)
void systemasic_irq_init(void)
{
- int i, nid = cpu_to_node(boot_cpu_data);
-
- /* Assign all virtual IRQs to the System ASIC int. handler */
- for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++) {
- unsigned int irq;
-
- irq = create_irq_nr(i, nid);
- if (unlikely(irq == 0)) {
- pr_err("%s: failed hooking irq %d for systemasic\n",
- __func__, i);
- return;
- }
+ int irq_base, i;
- if (unlikely(irq != i)) {
- pr_err("%s: got irq %d but wanted %d, bailing.\n",
- __func__, irq, i);
- destroy_irq(irq);
- return;
- }
+ irq_base = irq_alloc_descs(HW_EVENT_IRQ_BASE, HW_EVENT_IRQ_BASE,
+ HW_EVENT_IRQ_MAX - HW_EVENT_IRQ_BASE, -1);
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: failed hooking irqs\n", __func__);
+ return;
+ }
+ for (i = HW_EVENT_IRQ_BASE; i < HW_EVENT_IRQ_MAX; i++)
irq_set_chip_and_handler(i, &systemasic_int, handle_level_irq);
- }
}
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index fd45ffc48340..7646bf0486c2 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -1,86 +1,129 @@
/*
- * linux/arch/sh/boards/se/7343/irq.c
+ * Hitachi UL SolutionEngine 7343 FPGA IRQ Support.
*
* Copyright (C) 2008 Yoshihiro Shimoda
+ * Copyright (C) 2012 Paul Mundt
*
- * Based on linux/arch/sh/boards/se/7722/irq.c
+ * Based on linux/arch/sh/boards/se/7343/irq.c
* Copyright (C) 2007 Nobuhiro Iwamatsu
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define DRV_NAME "SE7343-FPGA"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#define irq_reg_readl ioread16
+#define irq_reg_writel iowrite16
+
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
+#include <asm/sizes.h>
#include <mach-se/mach/se7343.h>
-unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
+#define PA_CPLD_BASE_ADDR 0x11400000
+#define PA_CPLD_ST_REG 0x08 /* CPLD Interrupt status register */
+#define PA_CPLD_IMSK_REG 0x0a /* CPLD Interrupt mask register */
-static void disable_se7343_irq(struct irq_data *data)
-{
- unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
- __raw_writew(__raw_readw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
-}
+static void __iomem *se7343_irq_regs;
+struct irq_domain *se7343_irq_domain;
-static void enable_se7343_irq(struct irq_data *data)
+static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
- __raw_writew(__raw_readw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
-}
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ unsigned long mask;
+ int bit;
-static struct irq_chip se7343_irq_chip __read_mostly = {
- .name = "SE7343-FPGA",
- .irq_mask = disable_se7343_irq,
- .irq_unmask = enable_se7343_irq,
-};
+ chip->irq_mask_ack(data);
-static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
+ mask = ioread16(se7343_irq_regs + PA_CPLD_ST_REG);
+
+ for_each_set_bit(bit, &mask, SE7343_FPGA_IRQ_NR)
+ generic_handle_irq(irq_linear_revmap(se7343_irq_domain, bit));
+
+ chip->irq_unmask(data);
+}
+
+static void __init se7343_domain_init(void)
{
- unsigned short intv = __raw_readw(PA_CPLD_ST);
- unsigned int ext_irq = 0;
+ int i;
- intv &= (1 << SE7343_FPGA_IRQ_NR) - 1;
+ se7343_irq_domain = irq_domain_add_linear(NULL, SE7343_FPGA_IRQ_NR,
+ &irq_domain_simple_ops, NULL);
+ if (unlikely(!se7343_irq_domain)) {
+ printk("Failed to get IRQ domain\n");
+ return;
+ }
- for (; intv; intv >>= 1, ext_irq++) {
- if (!(intv & 1))
- continue;
+ for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) {
+ int irq = irq_create_mapping(se7343_irq_domain, i);
- generic_handle_irq(se7343_fpga_irq[ext_irq]);
+ if (unlikely(irq == 0)) {
+ printk("Failed to allocate IRQ %d\n", i);
+ return;
+ }
}
}
-/*
- * Initialize IRQ setting
- */
-void __init init_7343se_IRQ(void)
+static void __init se7343_gc_init(void)
{
- int i, irq;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int irq_base;
- __raw_writew(0, PA_CPLD_IMSK); /* disable all irqs */
- __raw_writew(0x2000, 0xb03fffec); /* mrshpc irq enable */
+ irq_base = irq_linear_revmap(se7343_irq_domain, 0);
- for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) {
- irq = create_irq();
- if (irq < 0)
- return;
- se7343_fpga_irq[i] = irq;
+ gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7343_irq_regs,
+ handle_level_irq);
+ if (unlikely(!gc))
+ return;
- irq_set_chip_and_handler_name(se7343_fpga_irq[i],
- &se7343_irq_chip,
- handle_level_irq,
- "level");
+ ct = gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- irq_set_chip_data(se7343_fpga_irq[i], (void *)i);
- }
+ ct->regs.mask = PA_CPLD_IMSK_REG;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(SE7343_FPGA_IRQ_NR),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
irq_set_chained_handler(IRQ0_IRQ, se7343_irq_demux);
irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
+
irq_set_chained_handler(IRQ1_IRQ, se7343_irq_demux);
irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
+
irq_set_chained_handler(IRQ4_IRQ, se7343_irq_demux);
irq_set_irq_type(IRQ4_IRQ, IRQ_TYPE_LEVEL_LOW);
+
irq_set_chained_handler(IRQ5_IRQ, se7343_irq_demux);
irq_set_irq_type(IRQ5_IRQ, IRQ_TYPE_LEVEL_LOW);
}
+
+/*
+ * Initialize IRQ setting
+ */
+void __init init_7343se_IRQ(void)
+{
+ se7343_irq_regs = ioremap(PA_CPLD_BASE_ADDR, SZ_16);
+ if (unlikely(!se7343_irq_regs)) {
+ pr_err("Failed to remap CPLD\n");
+ return;
+ }
+
+ /*
+ * All FPGA IRQs disabled by default
+ */
+ iowrite16(0, se7343_irq_regs + PA_CPLD_IMSK_REG);
+
+ __raw_writew(0x2000, 0xb03fffec); /* mrshpc irq enable */
+
+ se7343_domain_init();
+ se7343_gc_init();
+}
diff --git a/arch/sh/boards/mach-se/7343/setup.c b/arch/sh/boards/mach-se/7343/setup.c
index d2370af56d77..8ce4f2a202a8 100644
--- a/arch/sh/boards/mach-se/7343/setup.c
+++ b/arch/sh/boards/mach-se/7343/setup.c
@@ -5,6 +5,7 @@
#include <linux/serial_reg.h>
#include <linux/usb/isp116x.h>
#include <linux/delay.h>
+#include <linux/irqdomain.h>
#include <asm/machvec.h>
#include <mach-se/mach/se7343.h>
#include <asm/heartbeat.h>
@@ -145,11 +146,12 @@ static struct platform_device *sh7343se_platform_devices[] __initdata = {
static int __init sh7343se_devices_setup(void)
{
/* Wire-up dynamic vectors */
- serial_platform_data[0].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTA];
- serial_platform_data[1].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTB];
-
+ serial_platform_data[0].irq = irq_find_mapping(se7343_irq_domain,
+ SE7343_FPGA_IRQ_UARTA);
+ serial_platform_data[1].irq = irq_find_mapping(se7343_irq_domain,
+ SE7343_FPGA_IRQ_UARTB);
usb_resources[2].start = usb_resources[2].end =
- se7343_fpga_irq[SE7343_FPGA_IRQ_USB];
+ irq_find_mapping(se7343_irq_domain, SE7343_FPGA_IRQ_USB);
return platform_add_devices(sh7343se_platform_devices,
ARRAY_SIZE(sh7343se_platform_devices));
diff --git a/arch/sh/boards/mach-se/7722/irq.c b/arch/sh/boards/mach-se/7722/irq.c
index aac92f21ebd2..f5e2af1bf040 100644
--- a/arch/sh/boards/mach-se/7722/irq.c
+++ b/arch/sh/boards/mach-se/7722/irq.c
@@ -1,79 +1,96 @@
/*
- * linux/arch/sh/boards/se/7722/irq.c
+ * Hitachi UL SolutionEngine 7722 FPGA IRQ Support.
*
* Copyright (C) 2007 Nobuhiro Iwamatsu
- *
- * Hitachi UL SolutionEngine 7722 Support.
+ * Copyright (C) 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
+#define DRV_NAME "SE7722-FPGA"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
+#define irq_reg_readl ioread16
+#define irq_reg_writel iowrite16
+
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/irqdomain.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <asm/sizes.h>
#include <mach-se/mach/se7722.h>
-unsigned int se7722_fpga_irq[SE7722_FPGA_IRQ_NR] = { 0, };
+#define IRQ01_BASE_ADDR 0x11800000
+#define IRQ01_MODE_REG 0
+#define IRQ01_STS_REG 4
+#define IRQ01_MASK_REG 8
-static void disable_se7722_irq(struct irq_data *data)
-{
- unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
- __raw_writew(__raw_readw(IRQ01_MASK) | 1 << bit, IRQ01_MASK);
-}
+static void __iomem *se7722_irq_regs;
+struct irq_domain *se7722_irq_domain;
-static void enable_se7722_irq(struct irq_data *data)
+static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
{
- unsigned int bit = (unsigned int)irq_data_get_irq_chip_data(data);
- __raw_writew(__raw_readw(IRQ01_MASK) & ~(1 << bit), IRQ01_MASK);
-}
+ struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_chip *chip = irq_data_get_irq_chip(data);
+ unsigned long mask;
+ int bit;
-static struct irq_chip se7722_irq_chip __read_mostly = {
- .name = "SE7722-FPGA",
- .irq_mask = disable_se7722_irq,
- .irq_unmask = enable_se7722_irq,
-};
+ chip->irq_mask_ack(data);
-static void se7722_irq_demux(unsigned int irq, struct irq_desc *desc)
+ mask = ioread16(se7722_irq_regs + IRQ01_STS_REG);
+
+ for_each_set_bit(bit, &mask, SE7722_FPGA_IRQ_NR)
+ generic_handle_irq(irq_linear_revmap(se7722_irq_domain, bit));
+
+ chip->irq_unmask(data);
+}
+
+static void __init se7722_domain_init(void)
{
- unsigned short intv = __raw_readw(IRQ01_STS);
- unsigned int ext_irq = 0;
+ int i;
- intv &= (1 << SE7722_FPGA_IRQ_NR) - 1;
+ se7722_irq_domain = irq_domain_add_linear(NULL, SE7722_FPGA_IRQ_NR,
+ &irq_domain_simple_ops, NULL);
+ if (unlikely(!se7722_irq_domain)) {
+ printk("Failed to get IRQ domain\n");
+ return;
+ }
- for (; intv; intv >>= 1, ext_irq++) {
- if (!(intv & 1))
- continue;
+ for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
+ int irq = irq_create_mapping(se7722_irq_domain, i);
- generic_handle_irq(se7722_fpga_irq[ext_irq]);
+ if (unlikely(irq == 0)) {
+ printk("Failed to allocate IRQ %d\n", i);
+ return;
+ }
}
}
-/*
- * Initialize IRQ setting
- */
-void __init init_se7722_IRQ(void)
+static void __init se7722_gc_init(void)
{
- int i, irq;
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+ unsigned int irq_base;
- __raw_writew(0, IRQ01_MASK); /* disable all irqs */
- __raw_writew(0x2000, 0xb03fffec); /* mrshpc irq enable */
+ irq_base = irq_linear_revmap(se7722_irq_domain, 0);
- for (i = 0; i < SE7722_FPGA_IRQ_NR; i++) {
- irq = create_irq();
- if (irq < 0)
- return;
- se7722_fpga_irq[i] = irq;
+ gc = irq_alloc_generic_chip(DRV_NAME, 1, irq_base, se7722_irq_regs,
+ handle_level_irq);
+ if (unlikely(!gc))
+ return;
- irq_set_chip_and_handler_name(se7722_fpga_irq[i],
- &se7722_irq_chip,
- handle_level_irq,
- "level");
+ ct = gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- irq_set_chip_data(se7722_fpga_irq[i], (void *)i);
- }
+ ct->regs.mask = IRQ01_MASK_REG;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(SE7722_FPGA_IRQ_NR),
+ IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
irq_set_chained_handler(IRQ0_IRQ, se7722_irq_demux);
irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
@@ -81,3 +98,25 @@ void __init init_se7722_IRQ(void)
irq_set_chained_handler(IRQ1_IRQ, se7722_irq_demux);
irq_set_irq_type(IRQ1_IRQ, IRQ_TYPE_LEVEL_LOW);
}
+
+/*
+ * Initialize FPGA IRQs
+ */
+void __init init_se7722_IRQ(void)
+{
+ se7722_irq_regs = ioremap(IRQ01_BASE_ADDR, SZ_16);
+ if (unlikely(!se7722_irq_regs)) {
+ printk("Failed to remap IRQ01 regs\n");
+ return;
+ }
+
+ /*
+ * All FPGA IRQs disabled by default
+ */
+ iowrite16(0, se7722_irq_regs + IRQ01_MASK_REG);
+
+ __raw_writew(0x2000, 0xb03fffec); /* mrshpc irq enable */
+
+ se7722_domain_init();
+ se7722_gc_init();
+}
diff --git a/arch/sh/boards/mach-se/7722/setup.c b/arch/sh/boards/mach-se/7722/setup.c
index 8f7f0550cfde..e04e2bc46984 100644
--- a/arch/sh/boards/mach-se/7722/setup.c
+++ b/arch/sh/boards/mach-se/7722/setup.c
@@ -2,6 +2,7 @@
* linux/arch/sh/boards/se/7722/setup.c
*
* Copyright (C) 2007 Nobuhiro Iwamatsu
+ * Copyright (C) 2012 Paul Mundt
*
* Hitachi UL SolutionEngine 7722 Support.
*
@@ -15,6 +16,7 @@
#include <linux/ata_platform.h>
#include <linux/input.h>
#include <linux/input/sh_keysc.h>
+#include <linux/irqdomain.h>
#include <linux/smc91x.h>
#include <linux/sh_intc.h>
#include <mach-se/mach/se7722.h>
@@ -143,10 +145,10 @@ static int __init se7722_devices_setup(void)
/* Wire-up dynamic vectors */
cf_ide_resources[2].start = cf_ide_resources[2].end =
- se7722_fpga_irq[SE7722_FPGA_IRQ_MRSHPC0];
+ irq_find_mapping(se7722_irq_domain, SE7722_FPGA_IRQ_MRSHPC0);
smc91x_eth_resources[1].start = smc91x_eth_resources[1].end =
- se7722_fpga_irq[SE7722_FPGA_IRQ_SMC];
+ irq_find_mapping(se7722_irq_domain, SE7722_FPGA_IRQ_SMC);
return platform_add_devices(se7722_devices, ARRAY_SIZE(se7722_devices));
}
diff --git a/arch/sh/boards/mach-se/7724/irq.c b/arch/sh/boards/mach-se/7724/irq.c
index c6342ce7768d..5d1d3ec9a6cd 100644
--- a/arch/sh/boards/mach-se/7724/irq.c
+++ b/arch/sh/boards/mach-se/7724/irq.c
@@ -17,8 +17,10 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/export.h>
+#include <linux/topology.h>
+#include <linux/io.h>
+#include <linux/err.h>
#include <mach-se/mach/se7724.h>
struct fpga_irq {
@@ -111,7 +113,7 @@ static void se7724_irq_demux(unsigned int irq, struct irq_desc *desc)
*/
void __init init_se7724_IRQ(void)
{
- int i, nid = cpu_to_node(boot_cpu_data);
+ int irq_base, i;
__raw_writew(0xffff, IRQ0_MR); /* mask all */
__raw_writew(0xffff, IRQ1_MR); /* mask all */
@@ -121,28 +123,16 @@ void __init init_se7724_IRQ(void)
__raw_writew(0x0000, IRQ2_SR); /* clear irq */
__raw_writew(0x002a, IRQ_MODE); /* set irq type */
- for (i = 0; i < SE7724_FPGA_IRQ_NR; i++) {
- int irq, wanted;
-
- wanted = SE7724_FPGA_IRQ_BASE + i;
-
- irq = create_irq_nr(wanted, nid);
- if (unlikely(irq == 0)) {
- pr_err("%s: failed hooking irq %d for FPGA\n",
- __func__, wanted);
- return;
- }
-
- if (unlikely(irq != wanted)) {
- pr_err("%s: got irq %d but wanted %d, bailing.\n",
- __func__, irq, wanted);
- destroy_irq(irq);
- return;
- }
+ irq_base = irq_alloc_descs(SE7724_FPGA_IRQ_BASE, SE7724_FPGA_IRQ_BASE,
+ SE7724_FPGA_IRQ_NR, numa_node_id());
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: failed hooking irqs for FPGA\n", __func__);
+ return;
+ }
- irq_set_chip_and_handler_name(irq, &se7724_irq_chip,
+ for (i = 0; i < SE7724_FPGA_IRQ_NR; i++)
+ irq_set_chip_and_handler_name(irq_base + i, &se7724_irq_chip,
handle_level_irq, "level");
- }
irq_set_chained_handler(IRQ0_IRQ, se7724_irq_demux);
irq_set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/sh/boards/mach-x3proto/gpio.c b/arch/sh/boards/mach-x3proto/gpio.c
index f33b2b57019c..3ea65e9b56e8 100644
--- a/arch/sh/boards/mach-x3proto/gpio.c
+++ b/arch/sh/boards/mach-x3proto/gpio.c
@@ -3,7 +3,7 @@
*
* Renesas SH-X3 Prototype Baseboard GPIO Support.
*
- * Copyright (C) 2010 Paul Mundt
+ * Copyright (C) 2010 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
@@ -17,6 +17,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/spinlock.h>
+#include <linux/irqdomain.h>
#include <linux/io.h>
#include <mach/ilsel.h>
#include <mach/hardware.h>
@@ -26,7 +27,7 @@
#define KEYDETR 0xb81c0004
static DEFINE_SPINLOCK(x3proto_gpio_lock);
-static unsigned int x3proto_gpio_irq_map[NR_BASEBOARD_GPIOS] = { 0, };
+static struct irq_domain *x3proto_irq_domain;
static int x3proto_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
{
@@ -49,7 +50,14 @@ static int x3proto_gpio_get(struct gpio_chip *chip, unsigned gpio)
static int x3proto_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
{
- return x3proto_gpio_irq_map[gpio];
+ int virq;
+
+ if (gpio < chip->ngpio)
+ virq = irq_create_mapping(x3proto_irq_domain, gpio);
+ else
+ virq = -ENXIO;
+
+ return virq;
}
static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -62,9 +70,8 @@ static void x3proto_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chip->irq_mask_ack(data);
mask = __raw_readw(KEYDETR);
-
for_each_set_bit(pin, &mask, NR_BASEBOARD_GPIOS)
- generic_handle_irq(x3proto_gpio_to_irq(NULL, pin));
+ generic_handle_irq(irq_linear_revmap(x3proto_irq_domain, pin));
chip->irq_unmask(data);
}
@@ -78,10 +85,23 @@ struct gpio_chip x3proto_gpio_chip = {
.ngpio = NR_BASEBOARD_GPIOS,
};
+static int x3proto_gpio_irq_map(struct irq_domain *domain, unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler_name(virq, &dummy_irq_chip, handle_simple_irq,
+ "gpio");
+
+ return 0;
+}
+
+static struct irq_domain_ops x3proto_gpio_irq_ops = {
+ .map = x3proto_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
int __init x3proto_gpio_setup(void)
{
- int ilsel;
- int ret, i;
+ int ilsel, ret;
ilsel = ilsel_enable(ILSEL_KEY);
if (unlikely(ilsel < 0))
@@ -91,21 +111,10 @@ int __init x3proto_gpio_setup(void)
if (unlikely(ret))
goto err_gpio;
- for (i = 0; i < NR_BASEBOARD_GPIOS; i++) {
- unsigned long flags;
- int irq = create_irq();
-
- if (unlikely(irq < 0)) {
- ret = -EINVAL;
- goto err_irq;
- }
-
- spin_lock_irqsave(&x3proto_gpio_lock, flags);
- x3proto_gpio_irq_map[i] = irq;
- irq_set_chip_and_handler_name(irq, &dummy_irq_chip,
- handle_simple_irq, "gpio");
- spin_unlock_irqrestore(&x3proto_gpio_lock, flags);
- }
+ x3proto_irq_domain = irq_domain_add_linear(NULL, NR_BASEBOARD_GPIOS,
+ &x3proto_gpio_irq_ops, NULL);
+ if (unlikely(!x3proto_irq_domain))
+ goto err_irq;
pr_info("registering '%s' support, handling GPIOs %u -> %u, "
"bound to IRQ %u\n",
@@ -119,10 +128,6 @@ int __init x3proto_gpio_setup(void)
return 0;
err_irq:
- for (; i >= 0; --i)
- if (x3proto_gpio_irq_map[i])
- destroy_irq(x3proto_gpio_irq_map[i]);
-
ret = gpiochip_remove(&x3proto_gpio_chip);
if (unlikely(ret))
pr_err("Failed deregistering GPIO\n");
diff --git a/arch/sh/cchips/hd6446x/hd64461.c b/arch/sh/cchips/hd6446x/hd64461.c
index eb4ea4d44d59..e9735616bdc8 100644
--- a/arch/sh/cchips/hd6446x/hd64461.c
+++ b/arch/sh/cchips/hd6446x/hd64461.c
@@ -73,10 +73,7 @@ static void hd64461_irq_demux(unsigned int irq, struct irq_desc *desc)
int __init setup_hd64461(void)
{
- int i, nid = cpu_to_node(boot_cpu_data);
-
- if (!MACH_HD64461)
- return 0;
+ int irq_base, i;
printk(KERN_INFO
"HD64461 configured at 0x%x on irq %d(mapped into %d to %d)\n",
@@ -89,28 +86,16 @@ int __init setup_hd64461(void)
#endif
__raw_writew(0xffff, HD64461_NIMR);
- /* IRQ 80 -> 95 belongs to HD64461 */
- for (i = HD64461_IRQBASE; i < HD64461_IRQBASE + 16; i++) {
- unsigned int irq;
-
- irq = create_irq_nr(i, nid);
- if (unlikely(irq == 0)) {
- pr_err("%s: failed hooking irq %d for HD64461\n",
- __func__, i);
- return -EBUSY;
- }
-
- if (unlikely(irq != i)) {
- pr_err("%s: got irq %d but wanted %d, bailing.\n",
- __func__, irq, i);
- destroy_irq(irq);
- return -EINVAL;
- }
-
- irq_set_chip_and_handler(i, &hd64461_irq_chip,
- handle_level_irq);
+ irq_base = irq_alloc_descs(HD64461_IRQBASE, HD64461_IRQBASE, 16, -1);
+ if (IS_ERR_VALUE(irq_base)) {
+ pr_err("%s: failed hooking irqs for HD64461\n", __func__);
+ return irq_base;
}
+ for (i = 0; i < 16; i++)
+ irq_set_chip_and_handler(irq_base + i, &hd64461_irq_chip,
+ handle_level_irq);
+
irq_set_chained_handler(CONFIG_HD64461_IRQ, hd64461_irq_demux);
irq_set_irq_type(CONFIG_HD64461_IRQ, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/sh/drivers/pci/fixups-dreamcast.c b/arch/sh/drivers/pci/fixups-dreamcast.c
index edeea8960c30..a5fe1b54c952 100644
--- a/arch/sh/drivers/pci/fixups-dreamcast.c
+++ b/arch/sh/drivers/pci/fixups-dreamcast.c
@@ -28,7 +28,7 @@
#include <asm/irq.h>
#include <mach/pci.h>
-static void __init gapspci_fixup_resources(struct pci_dev *dev)
+static void __devinit gapspci_fixup_resources(struct pci_dev *dev)
{
struct pci_channel *p = dev->sysdata;
diff --git a/arch/sh/drivers/pci/fixups-sdk7786.c b/arch/sh/drivers/pci/fixups-sdk7786.c
index 0e18ee332553..36eb6fc3c18a 100644
--- a/arch/sh/drivers/pci/fixups-sdk7786.c
+++ b/arch/sh/drivers/pci/fixups-sdk7786.c
@@ -23,9 +23,9 @@
* Misconfigurations can be detected through the FPGA via the slot
* resistors to determine card presence. Hotplug remains unsupported.
*/
-static unsigned int slot4en __devinitdata;
+static unsigned int slot4en __initdata;
-char *__devinit pcibios_setup(char *str)
+char *__init pcibios_setup(char *str)
{
if (strcmp(str, "slot4en") == 0) {
slot4en = 1;
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 9d10a3cb8797..40db2d0aef3f 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -59,7 +59,7 @@ static void __devinit pcibios_scanbus(struct pci_channel *hose)
need_domain_info = need_domain_info || hose->index;
hose->need_domain_info = need_domain_info;
if (bus) {
- next_busno = bus->subordinate + 1;
+ next_busno = bus->busn_res.end + 1;
/* Don't allow 8-bit bus number overflow inside the hose -
reserve some space for bridges. */
if (next_busno > 224) {
@@ -197,11 +197,6 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq)
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
-char * __devinit __weak pcibios_setup(char *str)
-{
- return str;
-}
-
static void __init
pcibios_bus_report_status_early(struct pci_channel *hose,
int top_bus, int current_bus,
diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h
index 2b87d86bfc41..dcf278075429 100644
--- a/arch/sh/include/asm/bug.h
+++ b/arch/sh/include/asm/bug.h
@@ -110,6 +110,10 @@ do { \
#include <asm-generic/bug.h>
struct pt_regs;
+
+/* arch/sh/kernel/traps.c */
extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
+extern void die_if_kernel(const char *str, struct pt_regs *regs, long err);
+extern void die_if_no_fixup(const char *str, struct pt_regs *regs, long err);
#endif /* __ASM_SH_BUG_H */
diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
index e136d28d1d2e..4d48f1436a63 100644
--- a/arch/sh/include/asm/io_noioport.h
+++ b/arch/sh/include/asm/io_noioport.h
@@ -19,9 +19,20 @@ static inline u32 inl(unsigned long addr)
return -1;
}
-#define outb(x, y) BUG()
-#define outw(x, y) BUG()
-#define outl(x, y) BUG()
+static inline void outb(unsigned char x, unsigned long port)
+{
+ BUG();
+}
+
+static inline void outw(unsigned short x, unsigned long port)
+{
+ BUG();
+}
+
+static inline void outl(unsigned int x, unsigned long port)
+{
+ BUG();
+}
#define inb_p(addr) inb(addr)
#define inw_p(addr) inw(addr)
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index a6201f10c273..8d6a831e7ba1 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -10,6 +10,8 @@ enum die_val {
DIE_SSTEP,
};
+/* arch/sh/kernel/dumpstack.c */
extern void printk_address(unsigned long address, int reliable);
+extern void dump_mem(const char *str, unsigned long bottom, unsigned long top);
#endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h
index 1d95c78808d1..580b7ac228b7 100644
--- a/arch/sh/include/asm/siu.h
+++ b/arch/sh/include/asm/siu.h
@@ -14,7 +14,6 @@
struct device;
struct siu_platform {
- struct device *dma_dev;
unsigned int dma_slave_tx_a;
unsigned int dma_slave_rx_a;
unsigned int dma_slave_tx_b;
diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h
index 50b5d575dff0..542521c970c6 100644
--- a/arch/sh/include/mach-se/mach/se7343.h
+++ b/arch/sh/include/mach-se/mach/se7343.h
@@ -50,9 +50,6 @@
#define PA_LED 0xb0C00000 /* LED */
#define LED_SHIFT 0
#define PA_DIPSW 0xb0900000 /* Dip switch 31 */
-#define PA_CPLD_MODESET 0xb1400004 /* CPLD Mode set register */
-#define PA_CPLD_ST 0xb1400008 /* CPLD Interrupt status register */
-#define PA_CPLD_IMSK 0xb140000a /* CPLD Interrupt mask register */
/* Area 5 */
#define PA_EXT5 0x14000000
#define PA_EXT5_SIZE 0x04000000
@@ -135,8 +132,10 @@
#define SE7343_FPGA_IRQ_NR 12
+struct irq_domain;
+
/* arch/sh/boards/se/7343/irq.c */
-extern unsigned int se7343_fpga_irq[];
+extern struct irq_domain *se7343_irq_domain;
void init_7343se_IRQ(void);
diff --git a/arch/sh/include/mach-se/mach/se7722.h b/arch/sh/include/mach-se/mach/se7722.h
index 201081ebdbce..637e7ac753f8 100644
--- a/arch/sh/include/mach-se/mach/se7722.h
+++ b/arch/sh/include/mach-se/mach/se7722.h
@@ -81,12 +81,6 @@
#define IRQ0_IRQ evt2irq(0x600)
#define IRQ1_IRQ evt2irq(0x620)
-#define IRQ01_MODE 0xb1800000
-#define IRQ01_STS 0xb1800004
-#define IRQ01_MASK 0xb1800008
-
-/* Bits in IRQ01_* registers */
-
#define SE7722_FPGA_IRQ_USB 0 /* IRQ0 */
#define SE7722_FPGA_IRQ_SMC 1 /* IRQ0 */
#define SE7722_FPGA_IRQ_MRSHPC0 2 /* IRQ1 */
@@ -95,8 +89,10 @@
#define SE7722_FPGA_IRQ_MRSHPC3 5 /* IRQ1 */
#define SE7722_FPGA_IRQ_NR 6
+struct irq_domain;
+
/* arch/sh/boards/se/7722/irq.c */
-extern unsigned int se7722_fpga_irq[];
+extern struct irq_domain *se7722_irq_domain;
void init_se7722_IRQ(void);
diff --git a/arch/sh/kernel/cpu/sh3/serial-sh7720.c b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
index 8832c526cdf9..c4a0336660dd 100644
--- a/arch/sh/kernel/cpu/sh3/serial-sh7720.c
+++ b/arch/sh/kernel/cpu/sh3/serial-sh7720.c
@@ -2,7 +2,7 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <cpu/serial.h>
-#include <asm/gpio.h>
+#include <cpu/gpio.h>
static void sh7720_sci_init_pins(struct uart_port *port, unsigned int cflag)
{
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index 0f5a21907da6..65786c7f5ded 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -512,7 +512,6 @@ static struct platform_device tmu2_device = {
};
static struct siu_platform siu_platform_data = {
- .dma_dev = &dma_device.dev,
.dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
.dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
.dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
diff --git a/arch/sh/kernel/cpu/sh5/unwind.c b/arch/sh/kernel/cpu/sh5/unwind.c
index b205b25eaf45..10aed41757fc 100644
--- a/arch/sh/kernel/cpu/sh5/unwind.c
+++ b/arch/sh/kernel/cpu/sh5/unwind.c
@@ -16,6 +16,8 @@
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
+#include <asm/unwinder.h>
+#include <asm/stacktrace.h>
static u8 regcache[63];
@@ -199,8 +201,11 @@ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
return 0;
}
-/* Don't put this on the stack since we'll want to call sh64_unwind
- * when we're close to underflowing the stack anyway. */
+/*
+ * Don't put this on the stack since we'll want to call in to
+ * sh64_unwinder_dump() when we're close to underflowing the stack
+ * anyway.
+ */
static struct pt_regs here_regs;
extern const char syscall_ret;
@@ -208,17 +213,19 @@ extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
-static void sh64_unwind_inner(struct pt_regs *regs);
+static void sh64_unwind_inner(const struct stacktrace_ops *ops,
+ void *data, struct pt_regs *regs);
-static void unwind_nested (unsigned long pc, unsigned long fp)
+static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
+ unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
- ((fp & 7) == 0)) {
- sh64_unwind_inner((struct pt_regs *) fp);
- }
+ ((fp & 7) == 0))
+ sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
}
-static void sh64_unwind_inner(struct pt_regs *regs)
+static void sh64_unwind_inner(const struct stacktrace_ops *ops,
+ void *data, struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
@@ -232,29 +239,29 @@ static void sh64_unwind_inner(struct pt_regs *regs)
int cond;
unsigned long next_fp, next_pc;
- if (pc == ((unsigned long) &syscall_ret & ~1)) {
+ if (pc == ((unsigned long)&syscall_ret & ~1)) {
printk("SYSCALL\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
- if (pc == ((unsigned long) &ret_from_syscall & ~1)) {
+ if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
- if (pc == ((unsigned long) &ret_from_exception & ~1)) {
+ if (pc == ((unsigned long)&ret_from_exception & ~1)) {
printk("EXCEPTION\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
- if (pc == ((unsigned long) &ret_from_irq & ~1)) {
+ if (pc == ((unsigned long)&ret_from_irq & ~1)) {
printk("IRQ\n");
- unwind_nested(pc,fp);
+ unwind_nested(ops, data, pc, fp);
return;
}
@@ -263,8 +270,7 @@ static void sh64_unwind_inner(struct pt_regs *regs)
pc -= ofs;
- printk("[<%08lx>] ", pc);
- print_symbol("%s\n", pc);
+ ops->address(data, pc, 1);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
@@ -287,10 +293,13 @@ static void sh64_unwind_inner(struct pt_regs *regs)
}
printk("\n");
-
}
-void sh64_unwind(struct pt_regs *regs)
+static void sh64_unwinder_dump(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *sp,
+ const struct stacktrace_ops *ops,
+ void *data)
{
if (!regs) {
/*
@@ -320,7 +329,17 @@ void sh64_unwind(struct pt_regs *regs)
);
}
- printk("\nCall Trace:\n");
- sh64_unwind_inner(regs);
+ sh64_unwind_inner(ops, data, regs);
}
+static struct unwinder sh64_unwinder = {
+ .name = "sh64-unwinder",
+ .dump = sh64_unwinder_dump,
+ .rating = 150,
+};
+
+static int __init sh64_unwinder_init(void)
+{
+ return unwinder_register(&sh64_unwinder);
+}
+early_initcall(sh64_unwinder_init);
diff --git a/arch/sh/kernel/dumpstack.c b/arch/sh/kernel/dumpstack.c
index 694158b9a50f..7617dc4129ac 100644
--- a/arch/sh/kernel/dumpstack.c
+++ b/arch/sh/kernel/dumpstack.c
@@ -2,13 +2,48 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
* Copyright (C) 2009 Matt Fleming
+ * Copyright (C) 2002 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/kallsyms.h>
#include <linux/ftrace.h>
#include <linux/debug_locks.h>
+#include <linux/kdebug.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
+void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+ unsigned long p;
+ int i;
+
+ printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+ for (p = bottom & ~31; p < top; ) {
+ printk("%04lx: ", p & 0xffff);
+
+ for (i = 0; i < 8; i++, p += 4) {
+ unsigned int val;
+
+ if (p < bottom || p >= top)
+ printk(" ");
+ else {
+ if (__get_user(val, (unsigned int __user *)p)) {
+ printk("\n");
+ return;
+ }
+ printk("%08x ", val);
+ }
+ }
+ printk("\n");
+ }
+}
+
void printk_address(unsigned long address, int reliable)
{
printk(" [<%p>] %s%pS\n", (void *) address,
@@ -106,3 +141,26 @@ void show_trace(struct task_struct *tsk, unsigned long *sp,
debug_show_held_locks(tsk);
}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ unsigned long stack;
+
+ if (!tsk)
+ tsk = current;
+ if (tsk == current)
+ sp = (unsigned long *)current_stack_pointer;
+ else
+ sp = (unsigned long *)tsk->thread.sp;
+
+ stack = (unsigned long)sp;
+ dump_mem("Stack: ", stack, THREAD_SIZE +
+ (unsigned long)task_stack_page(tsk));
+ show_trace(tsk, sp, NULL);
+}
+
+void dump_stack(void)
+{
+ show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index dadce735f746..063af10ff3c1 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -231,16 +231,6 @@ void __init init_IRQ(void)
irq_ctx_init(smp_processor_id());
}
-#ifdef CONFIG_SPARSE_IRQ
-int __init arch_probe_nr_irqs(void)
-{
- /*
- * No pre-allocated IRQs.
- */
- return 0;
-}
-#endif
-
#ifdef CONFIG_HOTPLUG_CPU
static void route_irq(struct irq_data *data, unsigned int irq, unsigned int cpu)
{
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index a87e58a9e38f..72246bc06884 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -6,9 +6,80 @@
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
#include <asm/unwinder.h>
#include <asm/traps.h>
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ static int die_counter;
+
+ oops_enter();
+
+ spin_lock_irq(&die_lock);
+ console_verbose();
+ bust_spinlocks(1);
+
+ printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ print_modules();
+ show_regs(regs);
+
+ printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+ task_pid_nr(current), task_stack_page(current) + 1);
+
+ if (!user_mode(regs) || in_interrupt())
+ dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+ (unsigned long)task_stack_page(current));
+
+ notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE);
+ spin_unlock_irq(&die_lock);
+ oops_exit();
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ do_exit(SIGSEGV);
+}
+
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs))
+ die(str, regs, err);
+}
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ */
+void die_if_no_fixup(const char *str, struct pt_regs *regs, long err)
+{
+ if (!user_mode(regs)) {
+ const struct exception_table_entry *fixup;
+ fixup = search_exception_tables(regs->pc);
+ if (fixup) {
+ regs->pc = fixup->fixup;
+ return;
+ }
+
+ die(str, regs, err);
+ }
+}
+
#ifdef CONFIG_GENERIC_BUG
static void handle_BUG(struct pt_regs *regs)
{
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index a37175deb73f..5f513a64dedf 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -16,13 +16,11 @@
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
-#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
@@ -48,102 +46,6 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-{
- unsigned long p;
- int i;
-
- printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
-
- for (p = bottom & ~31; p < top; ) {
- printk("%04lx: ", p & 0xffff);
-
- for (i = 0; i < 8; i++, p += 4) {
- unsigned int val;
-
- if (p < bottom || p >= top)
- printk(" ");
- else {
- if (__get_user(val, (unsigned int __user *)p)) {
- printk("\n");
- return;
- }
- printk("%08x ", val);
- }
- }
- printk("\n");
- }
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- static int die_counter;
-
- oops_enter();
-
- spin_lock_irq(&die_lock);
- console_verbose();
- bust_spinlocks(1);
-
- printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
- print_modules();
- show_regs(regs);
-
- printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
- task_pid_nr(current), task_stack_page(current) + 1);
-
- if (!user_mode(regs) || in_interrupt())
- dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
- (unsigned long)task_stack_page(current));
-
- notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
-
- bust_spinlocks(0);
- add_taint(TAINT_DIE);
- spin_unlock_irq(&die_lock);
- oops_exit();
-
- if (kexec_should_crash(current))
- crash_kexec(regs);
-
- if (in_interrupt())
- panic("Fatal exception in interrupt");
-
- if (panic_on_oops)
- panic("Fatal exception");
-
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char *str, struct pt_regs *regs,
- long err)
-{
- if (!user_mode(regs))
- die(str, regs, err);
-}
-
-/*
- * try and fix up kernelspace address errors
- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
- * - kernel/userspace interfaces cause a jump to an appropriate handler
- * - other kernel errors are bad
- */
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs)) {
- const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return;
- }
-
- die(str, regs, err);
- }
-}
-
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
@@ -900,26 +802,3 @@ void __init trap_init(void)
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
- unsigned long stack;
-
- if (!tsk)
- tsk = current;
- if (tsk == current)
- sp = (unsigned long *)current_stack_pointer;
- else
- sp = (unsigned long *)tsk->thread.sp;
-
- stack = (unsigned long)sp;
- dump_mem("Stack: ", stack, THREAD_SIZE +
- (unsigned long)task_stack_page(tsk));
- show_trace(tsk, sp, NULL);
-}
-
-void dump_stack(void)
-{
- show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index 8dae93ed8aff..f87d20da1791 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -27,283 +27,25 @@
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/io.h>
-#include <linux/atomic.h>
+#include <asm/alignment.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
-#undef DEBUG_EXCEPTION
-#ifdef DEBUG_EXCEPTION
-/* implemented in ../lib/dbg.c */
-extern void show_excp_regs(char *fname, int trapnr, int signr,
- struct pt_regs *regs);
-#else
-#define show_excp_regs(a, b, c, d)
-#endif
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
- unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
-
-#define DO_ERROR(trapnr, signr, str, name, tsk) \
-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-{ \
- do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
- console_verbose();
- spin_lock_irq(&die_lock);
- printk("%s: %lx\n", str, (err & 0xffffff));
- show_regs(regs);
- spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs))
- die(str, regs, err);
-}
-
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
- if (!user_mode(regs)) {
- const struct exception_table_entry *fixup;
- fixup = search_exception_tables(regs->pc);
- if (fixup) {
- regs->pc = fixup->fixup;
- return;
- }
- die(str, regs, err);
- }
-}
-
-DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
-DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
-
-
-/* Implement misaligned load/store handling for kernel (and optionally for user
- mode too). Limitation : only SHmedia mode code is handled - there is no
- handling at all for misaligned accesses occurring in SHcompact code yet. */
-
-static int misaligned_fixup(struct pt_regs *regs);
-
-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0) {
- do_unhandled_exception(7, SIGSEGV, "address error(load)",
- "do_address_error_load",
- error_code, regs, current);
- }
- return;
-}
-
-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
-{
- if (misaligned_fixup(regs) < 0) {
- do_unhandled_exception(8, SIGSEGV, "address error(store)",
- "do_address_error_store",
- error_code, regs, current);
- }
- return;
-}
-
-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
-
-#define OPCODE_INVALID 0
-#define OPCODE_USER_VALID 1
-#define OPCODE_PRIV_VALID 2
-
-/* getcon/putcon - requires checking which control register is referenced. */
-#define OPCODE_CTRL_REG 3
-
-/* Table of valid opcodes for SHmedia mode.
- Form a 10-bit value by concatenating the major/minor opcodes i.e.
- opcode[31:26,20:16]. The 6 MSBs of this value index into the following
- array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
- LSBs==4'b0000 etc). */
-static unsigned long shmedia_opcode_table[64] = {
- 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
- 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
- 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
- 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-};
-
-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
-{
- /* Workaround SH5-101 cut2 silicon defect #2815 :
- in some situations, inter-mode branches from SHcompact -> SHmedia
- which should take ITLBMISS or EXECPROT exceptions at the target
- falsely take RESINST at the target instead. */
-
- unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
- unsigned long pc, aligned_pc;
- int get_user_error;
- int trapnr = 12;
- int signr = SIGILL;
- char *exception_name = "reserved_instruction";
-
- pc = regs->pc;
- if ((pc & 3) == 1) {
- /* SHmedia : check for defect. This requires executable vmas
- to be readable too. */
- aligned_pc = pc & ~3;
- if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
- get_user_error = -EFAULT;
- } else {
- get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
- }
- if (get_user_error >= 0) {
- unsigned long index, shift;
- unsigned long major, minor, combined;
- unsigned long reserved_field;
- reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
- major = (opcode >> 26) & 0x3f;
- minor = (opcode >> 16) & 0xf;
- combined = (major << 4) | minor;
- index = major;
- shift = minor << 1;
- if (reserved_field == 0) {
- int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
- switch (opcode_state) {
- case OPCODE_INVALID:
- /* Trap. */
- break;
- case OPCODE_USER_VALID:
- /* Restart the instruction : the branch to the instruction will now be from an RTE
- not from SHcompact so the silicon defect won't be triggered. */
- return;
- case OPCODE_PRIV_VALID:
- if (!user_mode(regs)) {
- /* Should only ever get here if a module has
- SHcompact code inside it. If so, the same fix up is needed. */
- return; /* same reason */
- }
- /* Otherwise, user mode trying to execute a privileged instruction -
- fall through to trap. */
- break;
- case OPCODE_CTRL_REG:
- /* If in privileged mode, return as above. */
- if (!user_mode(regs)) return;
- /* In user mode ... */
- if (combined == 0x9f) { /* GETCON */
- unsigned long regno = (opcode >> 20) & 0x3f;
- if (regno >= 62) {
- return;
- }
- /* Otherwise, reserved or privileged control register, => trap */
- } else if (combined == 0x1bf) { /* PUTCON */
- unsigned long regno = (opcode >> 4) & 0x3f;
- if (regno >= 62) {
- return;
- }
- /* Otherwise, reserved or privileged control register, => trap */
- } else {
- /* Trap */
- }
- break;
- default:
- /* Fall through to trap. */
- break;
- }
- }
- /* fall through to normal resinst processing */
- } else {
- /* Error trying to read opcode. This typically means a
- real fault, not a RESINST any more. So change the
- codes. */
- trapnr = 87;
- exception_name = "address error (exec)";
- signr = SIGSEGV;
- }
- }
-
- do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
-}
-
-#else /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* If the workaround isn't needed, this is just a straightforward reserved
- instruction */
-DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
-
-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* Called with interrupts disabled */
-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
-{
- show_excp_regs(__func__, -1, -1, regs);
- die_if_kernel("exception", regs, ex);
-}
-
-int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-{
- /* Syscall debug */
- printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
-
- die_if_kernel("unknown trapa", regs, scId);
-
- return -ENOSYS;
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
-#ifdef CONFIG_KALLSYMS
- extern void sh64_unwind(struct pt_regs *regs);
- struct pt_regs *regs;
-
- regs = tsk ? tsk->thread.kregs : NULL;
-
- sh64_unwind(regs);
-#else
- printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
-#endif
-}
-
-void show_task(unsigned long *sp)
-{
- show_stack(NULL, sp);
-}
-
-void dump_stack(void)
-{
- show_task(NULL);
-}
-/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
-EXPORT_SYMBOL(dump_stack);
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
- unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
-{
- show_excp_regs(fn_name, trapnr, signr, regs);
-
- if (user_mode(regs))
- force_sig(signr, tsk);
-
- die_if_no_fixup(str, regs, error_code);
-}
-
-static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
+static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
- unsigned long opcode;
+ insn_size_t opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
- if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
get_user_error = -EFAULT;
} else {
- get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+ get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
@@ -311,7 +53,7 @@ static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
- *result_opcode = *(unsigned long *) aligned_pc;
+ *result_opcode = *(insn_size_t *)aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
@@ -337,17 +79,23 @@ static int address_is_sign_extended(__u64 a)
#endif
}
+/* return -1 for fault, 0 for OK */
static int generate_and_check_address(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
- /* return -1 for fault, 0 for OK */
-
__u64 base_address, addr;
int basereg;
+ switch (1 << width_shift) {
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
+ }
+
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
@@ -364,28 +112,28 @@ static int generate_and_check_address(struct pt_regs *regs,
}
/* Check sign extended */
- if (!address_is_sign_extended(addr)) {
+ if (!address_is_sign_extended(addr))
return -1;
- }
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
- if (addr >= TASK_SIZE) {
+ inc_unaligned_user_access();
+
+ if (addr >= TASK_SIZE)
return -1;
- }
- /* Do access_ok check later - it depends on whether it's a load or a store. */
- }
+ } else
+ inc_unaligned_kernel_access();
*address = addr;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
+ unaligned_fixups_notify(current, opcode, regs);
+
return 0;
}
-static int user_mode_unaligned_fixup_count = 10;
-static int user_mode_unaligned_fixup_enable = 1;
-static int kernel_mode_unaligned_fixup_count = 32;
-
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
@@ -415,7 +163,7 @@ static void misaligned_kernel_word_store(__u64 address, __u64 value)
}
static int misaligned_load(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
@@ -427,11 +175,8 @@ static int misaligned_load(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
-
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -490,11 +235,10 @@ static int misaligned_load(struct pt_regs *regs,
}
return 0;
-
}
static int misaligned_store(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift)
{
@@ -505,11 +249,8 @@ static int misaligned_store(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
-
- perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -563,13 +304,12 @@ static int misaligned_store(struct pt_regs *regs,
}
return 0;
-
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
@@ -581,11 +321,8 @@ static int misaligned_fpu_load(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
-
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -641,12 +378,10 @@ static int misaligned_fpu_load(struct pt_regs *regs,
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
-
-
}
static int misaligned_fpu_store(struct pt_regs *regs,
- __u32 opcode,
+ insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
@@ -658,11 +393,8 @@ static int misaligned_fpu_store(struct pt_regs *regs,
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
- if (error < 0) {
+ if (error < 0)
return error;
- }
-
- perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
@@ -723,11 +455,13 @@ static int misaligned_fpu_store(struct pt_regs *regs,
static int misaligned_fixup(struct pt_regs *regs)
{
- unsigned long opcode;
+ insn_size_t opcode;
int error;
int major, minor;
+ unsigned int user_action;
- if (!user_mode_unaligned_fixup_enable)
+ user_action = unaligned_user_action();
+ if (!(user_action & UM_FIXUP))
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
@@ -737,23 +471,6 @@ static int misaligned_fixup(struct pt_regs *regs)
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
- if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
- --user_mode_unaligned_fixup_count;
- /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
- printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
- current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
- } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
- --kernel_mode_unaligned_fixup_count;
- if (in_interrupt()) {
- printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
- (__u32)regs->pc, opcode);
- } else {
- printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
- current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
- }
- }
-
-
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
@@ -878,59 +595,202 @@ static int misaligned_fixup(struct pt_regs *regs)
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
+}
+
+static void do_unhandled_exception(int signr, char *str, unsigned long error,
+ struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ force_sig(signr, current);
+ die_if_no_fixup(str, regs, error);
}
-static ctl_table unaligned_table[] = {
- {
- .procname = "kernel_reports",
- .data = &kernel_mode_unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
- .procname = "user_reports",
- .data = &user_mode_unaligned_fixup_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
- .procname = "user_enable",
- .data = &user_mode_unaligned_fixup_enable,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec},
- {}
-};
+#define DO_ERROR(signr, str, name) \
+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+{ \
+ do_unhandled_exception(signr, str, error_code, regs); \
+}
-static ctl_table unaligned_root[] = {
- {
- .procname = "unaligned_fixup",
- .mode = 0555,
- .child = unaligned_table
- },
- {}
-};
+DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
+DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
+
+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
+
+#define OPCODE_INVALID 0
+#define OPCODE_USER_VALID 1
+#define OPCODE_PRIV_VALID 2
-static ctl_table sh64_root[] = {
- {
- .procname = "sh64",
- .mode = 0555,
- .child = unaligned_root
- },
- {}
+/* getcon/putcon - requires checking which control register is referenced. */
+#define OPCODE_CTRL_REG 3
+
+/* Table of valid opcodes for SHmedia mode.
+ Form a 10-bit value by concatenating the major/minor opcodes i.e.
+ opcode[31:26,20:16]. The 6 MSBs of this value index into the following
+ array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+ LSBs==4'b0000 etc). */
+static unsigned long shmedia_opcode_table[64] = {
+ 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+ 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+ 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+ 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
-static struct ctl_table_header *sysctl_header;
-static int __init init_sysctl(void)
+
+/* Workaround SH5-101 cut2 silicon defect #2815 :
+ in some situations, inter-mode branches from SHcompact -> SHmedia
+ which should take ITLBMISS or EXECPROT exceptions at the target
+ falsely take RESINST at the target instead. */
+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
- sysctl_header = register_sysctl_table(sh64_root);
- return 0;
+ insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+ unsigned long pc, aligned_pc;
+ unsigned long index, shift;
+ unsigned long major, minor, combined;
+ unsigned long reserved_field;
+ int opcode_state;
+ int get_user_error;
+ int signr = SIGILL;
+ char *exception_name = "reserved_instruction";
+
+ pc = regs->pc;
+
+ /* SHcompact is not handled */
+ if (unlikely((pc & 3) == 0))
+ goto out;
+
+ /* SHmedia : check for defect. This requires executable vmas
+ to be readable too. */
+ aligned_pc = pc & ~3;
+ if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
+ get_user_error = -EFAULT;
+ else
+ get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
+
+ if (get_user_error < 0) {
+ /*
+ * Error trying to read opcode. This typically means a
+ * real fault, not a RESINST any more. So change the
+ * codes.
+ */
+ exception_name = "address error (exec)";
+ signr = SIGSEGV;
+ goto out;
+ }
+
+ /* These bits are currently reserved as zero in all valid opcodes */
+ reserved_field = opcode & 0xf;
+ if (unlikely(reserved_field))
+ goto out; /* invalid opcode */
+
+ major = (opcode >> 26) & 0x3f;
+ minor = (opcode >> 16) & 0xf;
+ combined = (major << 4) | minor;
+ index = major;
+ shift = minor << 1;
+ opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+ switch (opcode_state) {
+ case OPCODE_INVALID:
+ /* Trap. */
+ break;
+ case OPCODE_USER_VALID:
+ /*
+ * Restart the instruction: the branch to the instruction
+ * will now be from an RTE not from SHcompact so the
+ * silicon defect won't be triggered.
+ */
+ return;
+ case OPCODE_PRIV_VALID:
+ if (!user_mode(regs)) {
+ /*
+ * Should only ever get here if a module has
+ * SHcompact code inside it. If so, the same fix
+ * up is needed.
+ */
+ return; /* same reason */
+ }
+
+ /*
+ * Otherwise, user mode trying to execute a privileged
+ * instruction - fall through to trap.
+ */
+ break;
+ case OPCODE_CTRL_REG:
+ /* If in privileged mode, return as above. */
+ if (!user_mode(regs))
+ return;
+
+ /* In user mode ... */
+ if (combined == 0x9f) { /* GETCON */
+ unsigned long regno = (opcode >> 20) & 0x3f;
+
+ if (regno >= 62)
+ return;
+
+ /* reserved/privileged control register => trap */
+ } else if (combined == 0x1bf) { /* PUTCON */
+ unsigned long regno = (opcode >> 4) & 0x3f;
+
+ if (regno >= 62)
+ return;
+
+ /* reserved/privileged control register => trap */
+ }
+
+ break;
+ default:
+ /* Fall through to trap. */
+ break;
+ }
+
+out:
+ do_unhandled_exception(signr, exception_name, error_code, regs);
}
-__initcall(init_sysctl);
+#else /* CONFIG_SH64_ID2815_WORKAROUND */
+/* If the workaround isn't needed, this is just a straightforward reserved
+ instruction */
+DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
+
+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* Called with interrupts disabled */
+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+{
+ die_if_kernel("exception", regs, ex);
+}
+
+asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+{
+ /* Syscall debug */
+ printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
+
+ die_if_kernel("unknown trapa", regs, scId);
+
+ return -ENOSYS;
+}
+
+/* Implement misaligned load/store handling for kernel (and optionally for user
+ mode too). Limitation : only SHmedia mode code is handled - there is no
+ handling at all for misaligned accesses occurring in SHcompact code yet. */
+
+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0)
+ do_unhandled_exception(SIGSEGV, "address error(load)",
+ error_code, regs);
+}
+
+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+{
+ if (misaligned_fixup(regs) < 0)
+ do_unhandled_exception(SIGSEGV, "address error(store)",
+ error_code, regs);
+}
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
@@ -942,10 +802,9 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
- if (exp_cause & ~4) {
+ if (exp_cause & ~4)
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
- }
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
diff --git a/arch/sh/lib64/Makefile b/arch/sh/lib64/Makefile
index 1fee75aa1f98..69779ff741df 100644
--- a/arch/sh/lib64/Makefile
+++ b/arch/sh/lib64/Makefile
@@ -10,7 +10,7 @@
#
# Panic should really be compiled as PIC
-lib-y := udelay.o dbg.o panic.o memcpy.o memset.o \
+lib-y := udelay.o panic.o memcpy.o memset.o \
copy_user_memcpy.o copy_page.o strcpy.o strlen.o
# Extracted from libgcc
diff --git a/arch/sh/lib64/dbg.c b/arch/sh/lib64/dbg.c
deleted file mode 100644
index 6152a6a6d9c6..000000000000
--- a/arch/sh/lib64/dbg.c
+++ /dev/null
@@ -1,248 +0,0 @@
-/*--------------------------------------------------------------------------
---
--- Identity : Linux50 Debug Funcions
---
--- File : arch/sh/lib64/dbg.c
---
--- Copyright 2000, 2001 STMicroelectronics Limited.
--- Copyright 2004 Richard Curnow (evt_debug etc)
---
---------------------------------------------------------------------------*/
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <asm/mmu_context.h>
-
-typedef u64 regType_t;
-
-static regType_t getConfigReg(u64 id)
-{
- register u64 reg __asm__("r2");
- asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id));
- return (reg);
-}
-
-/* ======================================================================= */
-
-static char *szTab[] = { "4k", "64k", "1M", "512M" };
-static char *protTab[] = { "----",
- "---R",
- "--X-",
- "--XR",
- "-W--",
- "-W-R",
- "-WX-",
- "-WXR",
- "U---",
- "U--R",
- "U-X-",
- "U-XR",
- "UW--",
- "UW-R",
- "UWX-",
- "UWXR"
-};
-#define ITLB_BASE 0x00000000
-#define DTLB_BASE 0x00800000
-#define MAX_TLBs 64
-/* PTE High */
-#define GET_VALID(pte) ((pte) & 0x1)
-#define GET_SHARED(pte) ((pte) & 0x2)
-#define GET_ASID(pte) ((pte >> 2) & 0x0ff)
-#define GET_EPN(pte) ((pte) & 0xfffff000)
-
-/* PTE Low */
-#define GET_CBEHAVIOR(pte) ((pte) & 0x3)
-#define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)]
-#define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)]
-#define GET_PPN(pte) ((pte) & 0xfffff000)
-
-#define PAGE_1K_MASK 0x00000000
-#define PAGE_4K_MASK 0x00000010
-#define PAGE_64K_MASK 0x00000080
-#define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK)
-#define PAGE_1MB_MASK MMU_PAGESIZE_MASK
-#define PAGE_1K (1024)
-#define PAGE_4K (1024 * 4)
-#define PAGE_64K (1024 * 64)
-#define PAGE_1MB (1024 * 1024)
-
-#define HOW_TO_READ_TLB_CONTENT \
- "[ ID] PPN EPN ASID Share CB P.Size PROT.\n"
-
-void print_single_tlb(unsigned long tlb, int single_print)
-{
- regType_t pteH;
- regType_t pteL;
- unsigned int valid, shared, asid, epn, cb, ppn;
- char *pSize;
- char *pProt;
-
- /*
- ** in case of single print <single_print> is true, this implies:
- ** 1) print the TLB in any case also if NOT VALID
- ** 2) print out the header
- */
-
- pteH = getConfigReg(tlb);
- valid = GET_VALID(pteH);
- if (single_print)
- printk(HOW_TO_READ_TLB_CONTENT);
- else if (!valid)
- return;
-
- pteL = getConfigReg(tlb + 1);
-
- shared = GET_SHARED(pteH);
- asid = GET_ASID(pteH);
- epn = GET_EPN(pteH);
- cb = GET_CBEHAVIOR(pteL);
- pSize = GET_PAGE_SIZE(pteL);
- pProt = GET_PROTECTION(pteL);
- ppn = GET_PPN(pteL);
- printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n",
- ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP),
- ppn, epn, asid, shared, cb, pSize, pProt);
-}
-
-void print_dtlb(void)
-{
- int count;
- unsigned long tlb;
-
- printk(" ================= SH-5 D-TLBs Status ===================\n");
- printk(HOW_TO_READ_TLB_CONTENT);
- tlb = DTLB_BASE;
- for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
- print_single_tlb(tlb, 0);
- printk
- (" =============================================================\n");
-}
-
-void print_itlb(void)
-{
- int count;
- unsigned long tlb;
-
- printk(" ================= SH-5 I-TLBs Status ===================\n");
- printk(HOW_TO_READ_TLB_CONTENT);
- tlb = ITLB_BASE;
- for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP)
- print_single_tlb(tlb, 0);
- printk
- (" =============================================================\n");
-}
-
-void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs)
-{
-
- unsigned long long ah, al, bh, bl, ch, cl;
-
- printk("\n");
- printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n",
- ((from) ? from : "???"), current->pid, trapnr, signr);
-
- asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah));
- asm volatile ("getcon " __EXPEVT ", %0":"=r"(al));
- ah = (ah) >> 32;
- al = (al) & 0xffffffff;
- asm volatile ("getcon " __KCR1 ", %0":"=r"(bh));
- asm volatile ("getcon " __KCR1 ", %0":"=r"(bl));
- bh = (bh) >> 32;
- bl = (bl) & 0xffffffff;
- asm volatile ("getcon " __INTEVT ", %0":"=r"(ch));
- asm volatile ("getcon " __INTEVT ", %0":"=r"(cl));
- ch = (ch) >> 32;
- cl = (cl) & 0xffffffff;
- printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah));
- asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al));
- ah = (ah) >> 32;
- al = (al) & 0xffffffff;
- asm volatile ("getcon " __PSPC ", %0":"=r"(bh));
- asm volatile ("getcon " __PSPC ", %0":"=r"(bl));
- bh = (bh) >> 32;
- bl = (bl) & 0xffffffff;
- asm volatile ("getcon " __PSSR ", %0":"=r"(ch));
- asm volatile ("getcon " __PSSR ", %0":"=r"(cl));
- ch = (ch) >> 32;
- cl = (cl) & 0xffffffff;
- printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->pc) >> 32;
- al = (regs->pc) & 0xffffffff;
- bh = (regs->regs[18]) >> 32;
- bl = (regs->regs[18]) & 0xffffffff;
- ch = (regs->regs[15]) >> 32;
- cl = (regs->regs[15]) & 0xffffffff;
- printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->sr) >> 32;
- al = (regs->sr) & 0xffffffff;
- asm volatile ("getcon " __TEA ", %0":"=r"(bh));
- asm volatile ("getcon " __TEA ", %0":"=r"(bl));
- bh = (bh) >> 32;
- bl = (bl) & 0xffffffff;
- asm volatile ("getcon " __KCR0 ", %0":"=r"(ch));
- asm volatile ("getcon " __KCR0 ", %0":"=r"(cl));
- ch = (ch) >> 32;
- cl = (cl) & 0xffffffff;
- printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[0]) >> 32;
- al = (regs->regs[0]) & 0xffffffff;
- bh = (regs->regs[1]) >> 32;
- bl = (regs->regs[1]) & 0xffffffff;
- ch = (regs->regs[2]) >> 32;
- cl = (regs->regs[2]) & 0xffffffff;
- printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[3]) >> 32;
- al = (regs->regs[3]) & 0xffffffff;
- bh = (regs->regs[4]) >> 32;
- bl = (regs->regs[4]) & 0xffffffff;
- ch = (regs->regs[5]) >> 32;
- cl = (regs->regs[5]) & 0xffffffff;
- printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[6]) >> 32;
- al = (regs->regs[6]) & 0xffffffff;
- bh = (regs->regs[7]) >> 32;
- bl = (regs->regs[7]) & 0xffffffff;
- ch = (regs->regs[8]) >> 32;
- cl = (regs->regs[8]) & 0xffffffff;
- printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
-
- ah = (regs->regs[9]) >> 32;
- al = (regs->regs[9]) & 0xffffffff;
- bh = (regs->regs[10]) >> 32;
- bl = (regs->regs[10]) & 0xffffffff;
- ch = (regs->regs[11]) >> 32;
- cl = (regs->regs[11]) & 0xffffffff;
- printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
- printk("....\n");
-
- ah = (regs->tregs[0]) >> 32;
- al = (regs->tregs[0]) & 0xffffffff;
- bh = (regs->tregs[1]) >> 32;
- bl = (regs->tregs[1]) & 0xffffffff;
- ch = (regs->tregs[2]) >> 32;
- cl = (regs->tregs[2]) & 0xffffffff;
- printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n",
- ah, al, bh, bl, ch, cl);
- printk("....\n");
-
- print_dtlb();
- print_itlb();
-}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index 3aea25dc431a..ff1c40a31cbc 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -17,7 +17,7 @@
/**
* sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
*/
-int __init sh64_tlb_init(void)
+int __cpuinit sh64_tlb_init(void)
{
/* Assign some sane DTLB defaults */
cpu_data->dtlb.entries = 64;
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index 19f56058742b..21dcda75a520 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -91,14 +91,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
}
}
-/*
- * Other archs parse arguments here.
- */
-char * __devinit pcibios_setup(char *str)
-{
- return str;
-}
-
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
index 7a3be6f6737a..7bbdc26d9512 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
@@ -580,7 +580,7 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
op->dev.of_node->full_name,
pp->full_name, this_orig_irq,
- (iret ? iret->full_name : "NULL"), irq);
+ of_node_full_name(iret), irq);
if (!iret)
break;
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index fdaf21811670..065b88c4f868 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -375,93 +375,6 @@ static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
*last_p = last;
}
-/* For PCI bus devices which lack a 'ranges' property we interrogate
- * the config space values to set the resources, just like the generic
- * Linux PCI probing code does.
- */
-static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
- struct pci_bus *bus,
- struct pci_pbm_info *pbm)
-{
- struct pci_bus_region region;
- struct resource *res, res2;
- u8 io_base_lo, io_limit_lo;
- u16 mem_base_lo, mem_limit_lo;
- unsigned long base, limit;
-
- pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
- pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
- base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
- limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
-
- if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
- u16 io_base_hi, io_limit_hi;
-
- pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
- pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
- base |= (io_base_hi << 16);
- limit |= (io_limit_hi << 16);
- }
-
- res = bus->resource[0];
- if (base <= limit) {
- res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
- res2.flags = res->flags;
- region.start = base;
- region.end = limit + 0xfff;
- pcibios_bus_to_resource(dev, &res2, &region);
- if (!res->start)
- res->start = res2.start;
- if (!res->end)
- res->end = res2.end;
- }
-
- pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
- pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
-
- res = bus->resource[1];
- if (base <= limit) {
- res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
- IORESOURCE_MEM);
- region.start = base;
- region.end = limit + 0xfffff;
- pcibios_bus_to_resource(dev, res, &region);
- }
-
- pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
- pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
-
- if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
- u32 mem_base_hi, mem_limit_hi;
-
- pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
- pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
-
- /*
- * Some bridges set the base > limit by default, and some
- * (broken) BIOSes do not initialize them. If we find
- * this, just assume they are not being used.
- */
- if (mem_base_hi <= mem_limit_hi) {
- base |= ((long) mem_base_hi) << 32;
- limit |= ((long) mem_limit_hi) << 32;
- }
- }
-
- res = bus->resource[2];
- if (base <= limit) {
- res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
- IORESOURCE_MEM | IORESOURCE_PREFETCH);
- region.start = base;
- region.end = limit + 0xfffff;
- pcibios_bus_to_resource(dev, res, &region);
- }
-}
-
/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
* a proper 'ranges' property.
*/
@@ -535,7 +448,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
}
bus->primary = dev->bus->number;
- bus->subordinate = busrange[1];
+ pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
bus->bridge_ctl = 0;
/* parse ranges property, or cook one up by hand for Simba */
@@ -550,7 +463,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
apb_fake_ranges(dev, bus, pbm);
goto after_ranges;
} else if (ranges == NULL) {
- pci_cfg_fake_ranges(dev, bus, pbm);
+ pci_read_bridge_bases(bus);
goto after_ranges;
}
i = 1;
@@ -685,6 +598,10 @@ struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm,
pbm->io_space.start);
pci_add_resource_offset(&resources, &pbm->mem_space,
pbm->mem_space.start);
+ pbm->busn.start = pbm->pci_first_busno;
+ pbm->busn.end = pbm->pci_last_busno;
+ pbm->busn.flags = IORESOURCE_BUS;
+ pci_add_resource(&resources, &pbm->busn);
bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
pbm, &resources);
if (!bus) {
@@ -693,8 +610,6 @@ struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm,
pci_free_resource_list(&resources);
return NULL;
}
- bus->secondary = pbm->pci_first_busno;
- bus->subordinate = pbm->pci_last_busno;
pci_of_scan_bus(pbm, node, bus);
pci_bus_add_devices(bus);
@@ -747,11 +662,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return 0;
}
-char * __devinit pcibios_setup(char *str)
-{
- return str;
-}
-
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
/* If the user uses a host-bridge as the PCI device, he may use
diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h
index 6beb60df31d0..918a2031c8bb 100644
--- a/arch/sparc/kernel/pci_impl.h
+++ b/arch/sparc/kernel/pci_impl.h
@@ -97,6 +97,7 @@ struct pci_pbm_info {
/* PBM I/O and Memory space resources. */
struct resource io_space;
struct resource mem_space;
+ struct resource busn;
/* Base of PCI Config space, can be per-PBM or shared. */
unsigned long config_space;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index ded3f6090c3f..521fdf1b20e5 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -767,14 +767,6 @@ static void watchdog_reset() {
}
#endif
-/*
- * Other archs parse arguments here.
- */
-char * __devinit pcibios_setup(char *str)
-{
- return str;
-}
-
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
@@ -884,11 +876,6 @@ void __init sun4m_pci_init_IRQ(void)
sparc_config.load_profile_irq = pcic_load_profile_irq;
}
-int pcibios_assign_resource(struct pci_dev *pdev, int resource)
-{
- return -ENXIO;
-}
-
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index f591598d92f6..781bcb10b8bd 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -103,8 +103,6 @@ void __cpuinit smp_callin(void)
if (cheetah_pcache_forced_on)
cheetah_enable_pcache();
- local_irq_enable();
-
callin_flag = 1;
__asm__ __volatile__("membar #Sync\n\t"
"flush %%g6" : : : "memory");
@@ -124,9 +122,8 @@ void __cpuinit smp_callin(void)
while (!cpumask_test_cpu(cpuid, &smp_commenced_mask))
rmb();
- ipi_call_lock_irq();
set_cpu_online(cpuid, true);
- ipi_call_unlock_irq();
+ local_irq_enable();
/* idle thread is expected to have preempt disabled */
preempt_disable();
@@ -1308,9 +1305,7 @@ int __cpu_disable(void)
mdelay(1);
local_irq_disable();
- ipi_call_lock();
set_cpu_online(cpu, false);
- ipi_call_unlock();
cpu_map_rebuild();
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 1a69244e785b..e9073e9501b3 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -96,6 +96,7 @@ static void bpf_flush_icache(void *start_, void *end_)
#define AND F3(2, 0x01)
#define ANDCC F3(2, 0x11)
#define OR F3(2, 0x02)
+#define XOR F3(2, 0x03)
#define SUB F3(2, 0x04)
#define SUBCC F3(2, 0x14)
#define MUL F3(2, 0x0a) /* umul */
@@ -462,6 +463,9 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_OR_K: /* A |= K */
emit_alu_K(OR, K);
break;
+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+ emit_alu_X(XOR);
+ break;
case BPF_S_ALU_LSH_X: /* A <<= X */
emit_alu_X(SLL);
break;
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index fe128816c448..932e4430f7f3 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -3,6 +3,8 @@
config TILE
def_bool y
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
select HAVE_KVM if !TILEGX
select GENERIC_FIND_FIRST_BIT
select USE_GENERIC_SMP_HELPERS
@@ -79,6 +81,9 @@ config ARCH_DMA_ADDR_T_64BIT
config NEED_DMA_MAP_STATE
def_bool y
+config ARCH_HAS_DMA_SET_COHERENT_MASK
+ bool
+
config LOCKDEP_SUPPORT
def_bool y
@@ -212,6 +217,22 @@ config HIGHMEM
If unsure, say "true".
+config ZONE_DMA
+ def_bool y
+
+config IOMMU_HELPER
+ bool
+
+config NEED_SG_DMA_LENGTH
+ bool
+
+config SWIOTLB
+ bool
+ default TILEGX
+ select IOMMU_HELPER
+ select NEED_SG_DMA_LENGTH
+ select ARCH_HAS_DMA_SET_COHERENT_MASK
+
# We do not currently support disabling NUMA.
config NUMA
bool # "NUMA Memory Allocation and Scheduler Support"
@@ -345,6 +366,8 @@ config KERNEL_PL
kernel will be built to run at. Generally you should use
the default value here.
+source "arch/tile/gxio/Kconfig"
+
endmenu # Tilera-specific configuration
menu "Bus options"
@@ -354,6 +377,9 @@ config PCI
default y
select PCI_DOMAINS
select GENERIC_PCI_IOMAP
+ select TILE_GXIO_TRIO if TILEGX
+ select ARCH_SUPPORTS_MSI if TILEGX
+ select PCI_MSI if TILEGX
---help---
Enable PCI root complex support, so PCIe endpoint devices can
be attached to the Tile chip. Many, but not all, PCI devices
@@ -370,6 +396,22 @@ config NO_IOPORT
source "drivers/pci/Kconfig"
+config TILE_USB
+ tristate "Tilera USB host adapter support"
+ default y
+ depends on USB
+ depends on TILEGX
+ select TILE_GXIO_USB_HOST
+ ---help---
+ Provides USB host adapter support for the built-in EHCI and OHCI
+ interfaces on TILE-Gx chips.
+
+# USB OHCI needs the bounce pool since tilegx will often have more
+# than 4GB of memory, but we don't currently use the IOTLB to present
+# a 32-bit address to OHCI. So we need to use a bounce pool instead.
+config NEED_BOUNCE_POOL
+ def_bool USB_OHCI_HCD
+
config HOTPLUG
bool "Support for hot-pluggable devices"
---help---
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index e20b0a0b64a1..55640cf92597 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -59,6 +59,8 @@ libs-y += $(LIBGCC_PATH)
# See arch/tile/Kbuild for content of core part of the kernel
core-y += arch/tile/
+core-$(CONFIG_TILE_GXIO) += arch/tile/gxio/
+
ifdef TILERA_ROOT
INSTALL_PATH ?= $(TILERA_ROOT)/tile/boot
endif
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig
new file mode 100644
index 000000000000..d221f8d6de8b
--- /dev/null
+++ b/arch/tile/gxio/Kconfig
@@ -0,0 +1,28 @@
+# Support direct access to TILE-Gx hardware from user space, via the
+# gxio library, or from kernel space, via kernel IORPC support.
+config TILE_GXIO
+ bool
+ depends on TILEGX
+
+# Support direct access to the common I/O DMA facility within the
+# TILE-Gx mPIPE and Trio hardware from kernel space.
+config TILE_GXIO_DMA
+ bool
+ select TILE_GXIO
+
+# Support direct access to the TILE-Gx mPIPE hardware from kernel space.
+config TILE_GXIO_MPIPE
+ bool
+ select TILE_GXIO
+ select TILE_GXIO_DMA
+
+# Support direct access to the TILE-Gx TRIO hardware from kernel space.
+config TILE_GXIO_TRIO
+ bool
+ select TILE_GXIO
+ select TILE_GXIO_DMA
+
+# Support direct access to the TILE-Gx USB hardware from kernel space.
+config TILE_GXIO_USB_HOST
+ bool
+ select TILE_GXIO
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile
new file mode 100644
index 000000000000..8684bcaa74ea
--- /dev/null
+++ b/arch/tile/gxio/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Tile-Gx device access support.
+#
+
+obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
+obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
+obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
+obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
+obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
diff --git a/arch/tile/gxio/dma_queue.c b/arch/tile/gxio/dma_queue.c
new file mode 100644
index 000000000000..baa60357f8ba
--- /dev/null
+++ b/arch/tile/gxio/dma_queue.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/io.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <gxio/dma_queue.h>
+
+/* Wait for a memory read to complete. */
+#define wait_for_value(val) \
+ __asm__ __volatile__("move %0, %0" :: "r"(val))
+
+/* The index is in the low 16. */
+#define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
+
+/*
+ * The hardware descriptor-ring type.
+ * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
+ * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
+ * See those types for more documentation on the individual fields.
+ */
+typedef union {
+ struct {
+#ifndef __BIG_ENDIAN__
+ uint64_t ring_idx:16;
+ uint64_t count:16;
+ uint64_t gen:1;
+ uint64_t __reserved:31;
+#else
+ uint64_t __reserved:31;
+ uint64_t gen:1;
+ uint64_t count:16;
+ uint64_t ring_idx:16;
+#endif
+ };
+ uint64_t word;
+} __gxio_ring_t;
+
+void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
+ void *post_region_addr, unsigned int num_entries)
+{
+ /*
+ * Limit 65536 entry rings to 65535 credits because we only have a
+ * 16 bit completion counter.
+ */
+ int64_t credits = (num_entries < 65536) ? num_entries : 65535;
+
+ memset(dma_queue, 0, sizeof(*dma_queue));
+
+ dma_queue->post_region_addr = post_region_addr;
+ dma_queue->hw_complete_count = 0;
+ dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
+}
+
+EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
+
+void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
+{
+ __gxio_ring_t val;
+ uint64_t count;
+ uint64_t delta;
+ uint64_t new_count;
+
+ /*
+ * Read the 64-bit completion count without touching the cache, so
+ * we later avoid having to evict any sharers of this cache line
+ * when we update it below.
+ */
+ uint64_t orig_hw_complete_count =
+ cmpxchg(&dma_queue->hw_complete_count,
+ -1, -1);
+
+ /* Make sure the load completes before we access the hardware. */
+ wait_for_value(orig_hw_complete_count);
+
+ /* Read the 16-bit count of how many packets it has completed. */
+ val.word = __gxio_mmio_read(dma_queue->post_region_addr);
+ count = val.count;
+
+ /*
+ * Calculate the number of completions since we last updated the
+ * 64-bit counter. It's safe to ignore the high bits because the
+ * maximum credit value is 65535.
+ */
+ delta = (count - orig_hw_complete_count) & 0xffff;
+ if (delta == 0)
+ return;
+
+ /*
+ * Try to write back the count, advanced by delta. If we race with
+ * another thread, this might fail, in which case we return
+ * immediately on the assumption that some credits are (or at least
+ * were) available.
+ */
+ new_count = orig_hw_complete_count + delta;
+ if (cmpxchg(&dma_queue->hw_complete_count,
+ orig_hw_complete_count,
+ new_count) != orig_hw_complete_count)
+ return;
+
+ /*
+ * We succeeded in advancing the completion count; add back the
+ * corresponding number of egress credits.
+ */
+ __insn_fetchadd(&dma_queue->credits_and_next_index,
+ (delta << DMA_QUEUE_CREDIT_SHIFT));
+}
+
+EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
+
+/*
+ * A separate 'blocked' method for put() so that backtraces and
+ * profiles will clearly indicate that we're wasting time spinning on
+ * egress availability rather than actually posting commands.
+ */
+int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
+ int64_t modifier)
+{
+ int backoff = 16;
+ int64_t old;
+
+ do {
+ int i;
+ /* Back off to avoid spamming memory networks. */
+ for (i = backoff; i > 0; i--)
+ __insn_mfspr(SPR_PASS);
+
+ /* Check credits again. */
+ __gxio_dma_queue_update_credits(dma_queue);
+ old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
+ modifier);
+
+ /* Calculate bounded exponential backoff for next iteration. */
+ if (backoff < 256)
+ backoff *= 2;
+ } while (old + modifier < 0);
+
+ return old;
+}
+
+EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
+
+int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
+ unsigned int num, int wait)
+{
+ return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
+}
+
+EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
+
+int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
+ int64_t completion_slot, int update)
+{
+ if (update) {
+ if (ACCESS_ONCE(dma_queue->hw_complete_count) >
+ completion_slot)
+ return 1;
+
+ __gxio_dma_queue_update_credits(dma_queue);
+ }
+
+ return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
+}
+
+EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);
diff --git a/arch/tile/gxio/iorpc_globals.c b/arch/tile/gxio/iorpc_globals.c
new file mode 100644
index 000000000000..e178e90805a2
--- /dev/null
+++ b/arch/tile/gxio/iorpc_globals.c
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_globals.h"
+
+struct arm_pollfd_param {
+ union iorpc_pollfd pollfd;
+};
+
+int __iorpc_arm_pollfd(int fd, int pollfd_cookie)
+{
+ struct arm_pollfd_param temp;
+ struct arm_pollfd_param *params = &temp;
+
+ params->pollfd.kernel.cookie = pollfd_cookie;
+
+ return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ IORPC_OP_ARM_POLLFD);
+}
+
+EXPORT_SYMBOL(__iorpc_arm_pollfd);
+
+struct close_pollfd_param {
+ union iorpc_pollfd pollfd;
+};
+
+int __iorpc_close_pollfd(int fd, int pollfd_cookie)
+{
+ struct close_pollfd_param temp;
+ struct close_pollfd_param *params = &temp;
+
+ params->pollfd.kernel.cookie = pollfd_cookie;
+
+ return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ IORPC_OP_CLOSE_POLLFD);
+}
+
+EXPORT_SYMBOL(__iorpc_close_pollfd);
+
+struct get_mmio_base_param {
+ HV_PTE base;
+};
+
+int __iorpc_get_mmio_base(int fd, HV_PTE *base)
+{
+ int __result;
+ struct get_mmio_base_param temp;
+ struct get_mmio_base_param *params = &temp;
+
+ __result =
+ hv_dev_pread(fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ IORPC_OP_GET_MMIO_BASE);
+ *base = params->base;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(__iorpc_get_mmio_base);
+
+struct check_mmio_offset_param {
+ unsigned long offset;
+ unsigned long size;
+};
+
+int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size)
+{
+ struct check_mmio_offset_param temp;
+ struct check_mmio_offset_param *params = &temp;
+
+ params->offset = offset;
+ params->size = size;
+
+ return hv_dev_pwrite(fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ IORPC_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(__iorpc_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
new file mode 100644
index 000000000000..31b87bf8c027
--- /dev/null
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_mpipe.h"
+
+struct alloc_buffer_stacks_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_buffer_stacks_param temp;
+ struct alloc_buffer_stacks_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_alloc_buffer_stacks);
+
+struct init_buffer_stack_aux_param {
+ union iorpc_mem_buffer buffer;
+ unsigned int stack;
+ unsigned int buffer_size_enum;
+};
+
+int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
+ void *mem_va, size_t mem_size,
+ unsigned int mem_flags, unsigned int stack,
+ unsigned int buffer_size_enum)
+{
+ int __result;
+ unsigned long long __cpa;
+ pte_t __pte;
+ struct init_buffer_stack_aux_param temp;
+ struct init_buffer_stack_aux_param *params = &temp;
+
+ __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
+ if (__result != 0)
+ return __result;
+ params->buffer.kernel.cpa = __cpa;
+ params->buffer.kernel.size = mem_size;
+ params->buffer.kernel.pte = __pte;
+ params->buffer.kernel.flags = mem_flags;
+ params->stack = stack;
+ params->buffer_size_enum = buffer_size_enum;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_init_buffer_stack_aux);
+
+
+struct alloc_notif_rings_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_notif_rings_param temp;
+ struct alloc_notif_rings_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_alloc_notif_rings);
+
+struct init_notif_ring_aux_param {
+ union iorpc_mem_buffer buffer;
+ unsigned int ring;
+};
+
+int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+ size_t mem_size, unsigned int mem_flags,
+ unsigned int ring)
+{
+ int __result;
+ unsigned long long __cpa;
+ pte_t __pte;
+ struct init_notif_ring_aux_param temp;
+ struct init_notif_ring_aux_param *params = &temp;
+
+ __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
+ if (__result != 0)
+ return __result;
+ params->buffer.kernel.cpa = __cpa;
+ params->buffer.kernel.size = mem_size;
+ params->buffer.kernel.pte = __pte;
+ params->buffer.kernel.flags = mem_flags;
+ params->ring = ring;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_init_notif_ring_aux);
+
+struct request_notif_ring_interrupt_param {
+ union iorpc_interrupt interrupt;
+ unsigned int ring;
+};
+
+int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
+ int inter_x, int inter_y,
+ int inter_ipi, int inter_event,
+ unsigned int ring)
+{
+ struct request_notif_ring_interrupt_param temp;
+ struct request_notif_ring_interrupt_param *params = &temp;
+
+ params->interrupt.kernel.x = inter_x;
+ params->interrupt.kernel.y = inter_y;
+ params->interrupt.kernel.ipi = inter_ipi;
+ params->interrupt.kernel.event = inter_event;
+ params->ring = ring;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_request_notif_ring_interrupt);
+
+struct enable_notif_ring_interrupt_param {
+ unsigned int ring;
+};
+
+int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
+ unsigned int ring)
+{
+ struct enable_notif_ring_interrupt_param temp;
+ struct enable_notif_ring_interrupt_param *params = &temp;
+
+ params->ring = ring;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_enable_notif_ring_interrupt);
+
+struct alloc_notif_groups_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_notif_groups_param temp;
+ struct alloc_notif_groups_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_alloc_notif_groups);
+
+struct init_notif_group_param {
+ unsigned int group;
+ gxio_mpipe_notif_group_bits_t bits;
+};
+
+int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
+ unsigned int group,
+ gxio_mpipe_notif_group_bits_t bits)
+{
+ struct init_notif_group_param temp;
+ struct init_notif_group_param *params = &temp;
+
+ params->group = group;
+ params->bits = bits;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_INIT_NOTIF_GROUP);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_init_notif_group);
+
+struct alloc_buckets_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
+ unsigned int first, unsigned int flags)
+{
+ struct alloc_buckets_param temp;
+ struct alloc_buckets_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_ALLOC_BUCKETS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_alloc_buckets);
+
+struct init_bucket_param {
+ unsigned int bucket;
+ MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info;
+};
+
+int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
+ MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info)
+{
+ struct init_bucket_param temp;
+ struct init_bucket_param *params = &temp;
+
+ params->bucket = bucket;
+ params->bucket_info = bucket_info;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_INIT_BUCKET);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_init_bucket);
+
+struct alloc_edma_rings_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_edma_rings_param temp;
+ struct alloc_edma_rings_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_ALLOC_EDMA_RINGS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_alloc_edma_rings);
+
+struct init_edma_ring_aux_param {
+ union iorpc_mem_buffer buffer;
+ unsigned int ring;
+ unsigned int channel;
+};
+
+int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+ size_t mem_size, unsigned int mem_flags,
+ unsigned int ring, unsigned int channel)
+{
+ int __result;
+ unsigned long long __cpa;
+ pte_t __pte;
+ struct init_edma_ring_aux_param temp;
+ struct init_edma_ring_aux_param *params = &temp;
+
+ __result = va_to_cpa_and_pte(mem_va, &__cpa, &__pte);
+ if (__result != 0)
+ return __result;
+ params->buffer.kernel.cpa = __cpa;
+ params->buffer.kernel.size = mem_size;
+ params->buffer.kernel.pte = __pte;
+ params->buffer.kernel.flags = mem_flags;
+ params->ring = ring;
+ params->channel = channel;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_INIT_EDMA_RING_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_init_edma_ring_aux);
+
+
+int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
+ size_t blob_size)
+{
+ const void *params = blob;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params, blob_size,
+ GXIO_MPIPE_OP_COMMIT_RULES);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_commit_rules);
+
+struct register_client_memory_param {
+ unsigned int iotlb;
+ HV_PTE pte;
+ unsigned int flags;
+};
+
+int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
+ unsigned int iotlb, HV_PTE pte,
+ unsigned int flags)
+{
+ struct register_client_memory_param temp;
+ struct register_client_memory_param *params = &temp;
+
+ params->iotlb = iotlb;
+ params->pte = pte;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_register_client_memory);
+
+struct link_open_aux_param {
+ _gxio_mpipe_link_name_t name;
+ unsigned int flags;
+};
+
+int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
+ _gxio_mpipe_link_name_t name, unsigned int flags)
+{
+ struct link_open_aux_param temp;
+ struct link_open_aux_param *params = &temp;
+
+ params->name = name;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_LINK_OPEN_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_link_open_aux);
+
+struct link_close_aux_param {
+ int mac;
+};
+
+int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
+{
+ struct link_close_aux_param temp;
+ struct link_close_aux_param *params = &temp;
+
+ params->mac = mac;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_LINK_CLOSE_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
+
+
+struct get_timestamp_aux_param {
+ uint64_t sec;
+ uint64_t nsec;
+ uint64_t cycles;
+};
+
+int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
+ uint64_t * nsec, uint64_t * cycles)
+{
+ int __result;
+ struct get_timestamp_aux_param temp;
+ struct get_timestamp_aux_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_MPIPE_OP_GET_TIMESTAMP_AUX);
+ *sec = params->sec;
+ *nsec = params->nsec;
+ *cycles = params->cycles;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_mpipe_get_timestamp_aux);
+
+struct set_timestamp_aux_param {
+ uint64_t sec;
+ uint64_t nsec;
+ uint64_t cycles;
+};
+
+int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
+ uint64_t nsec, uint64_t cycles)
+{
+ struct set_timestamp_aux_param temp;
+ struct set_timestamp_aux_param *params = &temp;
+
+ params->sec = sec;
+ params->nsec = nsec;
+ params->cycles = cycles;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_SET_TIMESTAMP_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_set_timestamp_aux);
+
+struct adjust_timestamp_aux_param {
+ int64_t nsec;
+};
+
+int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
+ int64_t nsec)
+{
+ struct adjust_timestamp_aux_param temp;
+ struct adjust_timestamp_aux_param *params = &temp;
+
+ params->nsec = nsec;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
+
+struct arm_pollfd_param {
+ union iorpc_pollfd pollfd;
+};
+
+int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
+{
+ struct arm_pollfd_param temp;
+ struct arm_pollfd_param *params = &temp;
+
+ params->pollfd.kernel.cookie = pollfd_cookie;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_ARM_POLLFD);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_arm_pollfd);
+
+struct close_pollfd_param {
+ union iorpc_pollfd pollfd;
+};
+
+int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie)
+{
+ struct close_pollfd_param temp;
+ struct close_pollfd_param *params = &temp;
+
+ params->pollfd.kernel.cookie = pollfd_cookie;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_CLOSE_POLLFD);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_close_pollfd);
+
+struct get_mmio_base_param {
+ HV_PTE base;
+};
+
+int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base)
+{
+ int __result;
+ struct get_mmio_base_param temp;
+ struct get_mmio_base_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_MPIPE_OP_GET_MMIO_BASE);
+ *base = params->base;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_mpipe_get_mmio_base);
+
+struct check_mmio_offset_param {
+ unsigned long offset;
+ unsigned long size;
+};
+
+int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
+ unsigned long offset, unsigned long size)
+{
+ struct check_mmio_offset_param temp;
+ struct check_mmio_offset_param *params = &temp;
+
+ params->offset = offset;
+ params->size = size;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
new file mode 100644
index 000000000000..d0254aa60cba
--- /dev/null
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_mpipe_info.h"
+
+
+struct enumerate_aux_param {
+ _gxio_mpipe_link_name_t name;
+ _gxio_mpipe_link_mac_t mac;
+};
+
+int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
+ unsigned int idx,
+ _gxio_mpipe_link_name_t * name,
+ _gxio_mpipe_link_mac_t * mac)
+{
+ int __result;
+ struct enumerate_aux_param temp;
+ struct enumerate_aux_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ (((uint64_t) idx << 32) |
+ GXIO_MPIPE_INFO_OP_ENUMERATE_AUX));
+ *name = params->name;
+ *mac = params->mac;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_enumerate_aux);
+
+struct get_mmio_base_param {
+ HV_PTE base;
+};
+
+int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
+ HV_PTE *base)
+{
+ int __result;
+ struct get_mmio_base_param temp;
+ struct get_mmio_base_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_MPIPE_INFO_OP_GET_MMIO_BASE);
+ *base = params->base;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_get_mmio_base);
+
+struct check_mmio_offset_param {
+ unsigned long offset;
+ unsigned long size;
+};
+
+int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
+ unsigned long offset, unsigned long size)
+{
+ struct check_mmio_offset_param temp;
+ struct check_mmio_offset_param *params = &temp;
+
+ params->offset = offset;
+ params->size = size;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c
new file mode 100644
index 000000000000..cef4b2209cda
--- /dev/null
+++ b/arch/tile/gxio/iorpc_trio.c
@@ -0,0 +1,327 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_trio.h"
+
+struct alloc_asids_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count,
+ unsigned int first, unsigned int flags)
+{
+ struct alloc_asids_param temp;
+ struct alloc_asids_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_ALLOC_ASIDS);
+}
+
+EXPORT_SYMBOL(gxio_trio_alloc_asids);
+
+
+struct alloc_memory_maps_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_memory_maps_param temp;
+ struct alloc_memory_maps_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_ALLOC_MEMORY_MAPS);
+}
+
+EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
+
+
+struct alloc_pio_regions_param {
+ unsigned int count;
+ unsigned int first;
+ unsigned int flags;
+};
+
+int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags)
+{
+ struct alloc_pio_regions_param temp;
+ struct alloc_pio_regions_param *params = &temp;
+
+ params->count = count;
+ params->first = first;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_ALLOC_PIO_REGIONS);
+}
+
+EXPORT_SYMBOL(gxio_trio_alloc_pio_regions);
+
+struct init_pio_region_aux_param {
+ unsigned int pio_region;
+ unsigned int mac;
+ uint32_t bus_address_hi;
+ unsigned int flags;
+};
+
+int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context,
+ unsigned int pio_region, unsigned int mac,
+ uint32_t bus_address_hi, unsigned int flags)
+{
+ struct init_pio_region_aux_param temp;
+ struct init_pio_region_aux_param *params = &temp;
+
+ params->pio_region = pio_region;
+ params->mac = mac;
+ params->bus_address_hi = bus_address_hi;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_INIT_PIO_REGION_AUX);
+}
+
+EXPORT_SYMBOL(gxio_trio_init_pio_region_aux);
+
+
+struct init_memory_map_mmu_aux_param {
+ unsigned int map;
+ unsigned long va;
+ uint64_t size;
+ unsigned int asid;
+ unsigned int mac;
+ uint64_t bus_address;
+ unsigned int node;
+ unsigned int order_mode;
+};
+
+int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context,
+ unsigned int map, unsigned long va,
+ uint64_t size, unsigned int asid,
+ unsigned int mac, uint64_t bus_address,
+ unsigned int node,
+ unsigned int order_mode)
+{
+ struct init_memory_map_mmu_aux_param temp;
+ struct init_memory_map_mmu_aux_param *params = &temp;
+
+ params->map = map;
+ params->va = va;
+ params->size = size;
+ params->asid = asid;
+ params->mac = mac;
+ params->bus_address = bus_address;
+ params->node = node;
+ params->order_mode = order_mode;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX);
+}
+
+EXPORT_SYMBOL(gxio_trio_init_memory_map_mmu_aux);
+
+struct get_port_property_param {
+ struct pcie_trio_ports_property trio_ports;
+};
+
+int gxio_trio_get_port_property(gxio_trio_context_t * context,
+ struct pcie_trio_ports_property *trio_ports)
+{
+ int __result;
+ struct get_port_property_param temp;
+ struct get_port_property_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_TRIO_OP_GET_PORT_PROPERTY);
+ *trio_ports = params->trio_ports;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_trio_get_port_property);
+
+struct config_legacy_intr_param {
+ union iorpc_interrupt interrupt;
+ unsigned int mac;
+ unsigned int intx;
+};
+
+int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event,
+ unsigned int mac, unsigned int intx)
+{
+ struct config_legacy_intr_param temp;
+ struct config_legacy_intr_param *params = &temp;
+
+ params->interrupt.kernel.x = inter_x;
+ params->interrupt.kernel.y = inter_y;
+ params->interrupt.kernel.ipi = inter_ipi;
+ params->interrupt.kernel.event = inter_event;
+ params->mac = mac;
+ params->intx = intx;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_CONFIG_LEGACY_INTR);
+}
+
+EXPORT_SYMBOL(gxio_trio_config_legacy_intr);
+
+struct config_msi_intr_param {
+ union iorpc_interrupt interrupt;
+ unsigned int mac;
+ unsigned int mem_map;
+ uint64_t mem_map_base;
+ uint64_t mem_map_limit;
+ unsigned int asid;
+};
+
+int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event,
+ unsigned int mac, unsigned int mem_map,
+ uint64_t mem_map_base, uint64_t mem_map_limit,
+ unsigned int asid)
+{
+ struct config_msi_intr_param temp;
+ struct config_msi_intr_param *params = &temp;
+
+ params->interrupt.kernel.x = inter_x;
+ params->interrupt.kernel.y = inter_y;
+ params->interrupt.kernel.ipi = inter_ipi;
+ params->interrupt.kernel.event = inter_event;
+ params->mac = mac;
+ params->mem_map = mem_map;
+ params->mem_map_base = mem_map_base;
+ params->mem_map_limit = mem_map_limit;
+ params->asid = asid;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_CONFIG_MSI_INTR);
+}
+
+EXPORT_SYMBOL(gxio_trio_config_msi_intr);
+
+
+struct set_mps_mrs_param {
+ uint16_t mps;
+ uint16_t mrs;
+ unsigned int mac;
+};
+
+int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps,
+ uint16_t mrs, unsigned int mac)
+{
+ struct set_mps_mrs_param temp;
+ struct set_mps_mrs_param *params = &temp;
+
+ params->mps = mps;
+ params->mrs = mrs;
+ params->mac = mac;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_SET_MPS_MRS);
+}
+
+EXPORT_SYMBOL(gxio_trio_set_mps_mrs);
+
+struct force_rc_link_up_param {
+ unsigned int mac;
+};
+
+int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac)
+{
+ struct force_rc_link_up_param temp;
+ struct force_rc_link_up_param *params = &temp;
+
+ params->mac = mac;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_FORCE_RC_LINK_UP);
+}
+
+EXPORT_SYMBOL(gxio_trio_force_rc_link_up);
+
+struct force_ep_link_up_param {
+ unsigned int mac;
+};
+
+int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac)
+{
+ struct force_ep_link_up_param temp;
+ struct force_ep_link_up_param *params = &temp;
+
+ params->mac = mac;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_FORCE_EP_LINK_UP);
+}
+
+EXPORT_SYMBOL(gxio_trio_force_ep_link_up);
+
+struct get_mmio_base_param {
+ HV_PTE base;
+};
+
+int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base)
+{
+ int __result;
+ struct get_mmio_base_param temp;
+ struct get_mmio_base_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_TRIO_OP_GET_MMIO_BASE);
+ *base = params->base;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_trio_get_mmio_base);
+
+struct check_mmio_offset_param {
+ unsigned long offset;
+ unsigned long size;
+};
+
+int gxio_trio_check_mmio_offset(gxio_trio_context_t * context,
+ unsigned long offset, unsigned long size)
+{
+ struct check_mmio_offset_param temp;
+ struct check_mmio_offset_param *params = &temp;
+
+ params->offset = offset;
+ params->size = size;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_TRIO_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(gxio_trio_check_mmio_offset);
diff --git a/arch/tile/gxio/iorpc_usb_host.c b/arch/tile/gxio/iorpc_usb_host.c
new file mode 100644
index 000000000000..cf3c3cc12204
--- /dev/null
+++ b/arch/tile/gxio/iorpc_usb_host.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#include "gxio/iorpc_usb_host.h"
+
+struct cfg_interrupt_param {
+ union iorpc_interrupt interrupt;
+};
+
+int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event)
+{
+ struct cfg_interrupt_param temp;
+ struct cfg_interrupt_param *params = &temp;
+
+ params->interrupt.kernel.x = inter_x;
+ params->interrupt.kernel.y = inter_y;
+ params->interrupt.kernel.ipi = inter_ipi;
+ params->interrupt.kernel.event = inter_event;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_USB_HOST_OP_CFG_INTERRUPT);
+}
+
+EXPORT_SYMBOL(gxio_usb_host_cfg_interrupt);
+
+struct register_client_memory_param {
+ HV_PTE pte;
+ unsigned int flags;
+};
+
+int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context,
+ HV_PTE pte, unsigned int flags)
+{
+ struct register_client_memory_param temp;
+ struct register_client_memory_param *params = &temp;
+
+ params->pte = pte;
+ params->flags = flags;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY);
+}
+
+EXPORT_SYMBOL(gxio_usb_host_register_client_memory);
+
+struct get_mmio_base_param {
+ HV_PTE base;
+};
+
+int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context, HV_PTE *base)
+{
+ int __result;
+ struct get_mmio_base_param temp;
+ struct get_mmio_base_param *params = &temp;
+
+ __result =
+ hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
+ GXIO_USB_HOST_OP_GET_MMIO_BASE);
+ *base = params->base;
+
+ return __result;
+}
+
+EXPORT_SYMBOL(gxio_usb_host_get_mmio_base);
+
+struct check_mmio_offset_param {
+ unsigned long offset;
+ unsigned long size;
+};
+
+int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context,
+ unsigned long offset, unsigned long size)
+{
+ struct check_mmio_offset_param temp;
+ struct check_mmio_offset_param *params = &temp;
+
+ params->offset = offset;
+ params->size = size;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET);
+}
+
+EXPORT_SYMBOL(gxio_usb_host_check_mmio_offset);
diff --git a/arch/tile/gxio/kiorpc.c b/arch/tile/gxio/kiorpc.c
new file mode 100644
index 000000000000..c8096aa5a3fc
--- /dev/null
+++ b/arch/tile/gxio/kiorpc.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * TILE-Gx IORPC support for kernel I/O drivers.
+ */
+
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <gxio/iorpc_globals.h>
+#include <gxio/kiorpc.h>
+
+#ifdef DEBUG_IORPC
+#define TRACE(FMT, ...) pr_info(SIMPLE_MSG_LINE FMT, ## __VA_ARGS__)
+#else
+#define TRACE(...)
+#endif
+
+/* Create kernel-VA-space MMIO mapping for an on-chip IO device. */
+void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
+ unsigned long size)
+{
+ pgprot_t mmio_base, prot = { 0 };
+ unsigned long pfn;
+ int err;
+
+ /* Look up the shim's lotar and base PA. */
+ err = __iorpc_get_mmio_base(hv_fd, &mmio_base);
+ if (err) {
+ TRACE("get_mmio_base() failure: %d\n", err);
+ return NULL;
+ }
+
+ /* Make sure the HV driver approves of our offset and size. */
+ err = __iorpc_check_mmio_offset(hv_fd, offset, size);
+ if (err) {
+ TRACE("check_mmio_offset() failure: %d\n", err);
+ return NULL;
+ }
+
+ /*
+ * mmio_base contains a base pfn and homing coordinates. Turn
+ * it into an MMIO pgprot and offset pfn.
+ */
+ prot = hv_pte_set_lotar(prot, hv_pte_get_lotar(mmio_base));
+ pfn = pte_pfn(mmio_base) + PFN_DOWN(offset);
+
+ return ioremap_prot(PFN_PHYS(pfn), size, prot);
+}
+
+EXPORT_SYMBOL(iorpc_ioremap);
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
new file mode 100644
index 000000000000..e71c63390acc
--- /dev/null
+++ b/arch/tile/gxio/mpipe.c
@@ -0,0 +1,545 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Implementation of mpipe gxio calls.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <gxio/iorpc_globals.h>
+#include <gxio/iorpc_mpipe.h>
+#include <gxio/iorpc_mpipe_info.h>
+#include <gxio/kiorpc.h>
+#include <gxio/mpipe.h>
+
+/* HACK: Avoid pointless "shadow" warnings. */
+#define link link_shadow
+
+int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
+{
+ char file[32];
+
+ int fd;
+ int i;
+
+ snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
+ fd = hv_dev_open((HV_VirtAddr) file, 0);
+ if (fd < 0) {
+ if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
+ return fd;
+ else
+ return -ENODEV;
+ }
+
+ context->fd = fd;
+
+ /* Map in the MMIO space. */
+ context->mmio_cfg_base = (void __force *)
+ iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
+ HV_MPIPE_CONFIG_MMIO_SIZE);
+ if (context->mmio_cfg_base == NULL)
+ goto cfg_failed;
+
+ context->mmio_fast_base = (void __force *)
+ iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
+ HV_MPIPE_FAST_MMIO_SIZE);
+ if (context->mmio_fast_base == NULL)
+ goto fast_failed;
+
+ /* Initialize the stacks. */
+ for (i = 0; i < 8; i++)
+ context->__stacks.stacks[i] = 255;
+
+ return 0;
+
+ fast_failed:
+ iounmap((void __force __iomem *)(context->mmio_cfg_base));
+ cfg_failed:
+ hv_dev_close(context->fd);
+ return -ENODEV;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_init);
+
+int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
+{
+ iounmap((void __force __iomem *)(context->mmio_cfg_base));
+ iounmap((void __force __iomem *)(context->mmio_fast_base));
+ return hv_dev_close(context->fd);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
+
+static int16_t gxio_mpipe_buffer_sizes[8] =
+ { 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
+
+gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
+ size)
+{
+ int i;
+ for (i = 0; i < 7; i++)
+ if (size <= gxio_mpipe_buffer_sizes[i])
+ break;
+ return i;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
+
+size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
+ buffer_size_enum)
+{
+ if (buffer_size_enum > 7)
+ buffer_size_enum = 7;
+
+ return gxio_mpipe_buffer_sizes[buffer_size_enum];
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
+
+size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
+{
+ const int BUFFERS_PER_LINE = 12;
+
+ /* Count the number of cachlines. */
+ unsigned long lines =
+ (buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
+
+ /* Convert to bytes. */
+ return lines * CHIP_L2_LINE_SIZE();
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
+
+int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
+ unsigned int stack,
+ gxio_mpipe_buffer_size_enum_t
+ buffer_size_enum, void *mem, size_t mem_size,
+ unsigned int mem_flags)
+{
+ int result;
+
+ memset(mem, 0, mem_size);
+
+ result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
+ mem_flags, stack,
+ buffer_size_enum);
+ if (result < 0)
+ return result;
+
+ /* Save the stack. */
+ context->__stacks.stacks[buffer_size_enum] = stack;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
+
+int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
+ unsigned int ring,
+ void *mem, size_t mem_size,
+ unsigned int mem_flags)
+{
+ return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
+ mem_flags, ring);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
+
+int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
+ unsigned int group,
+ unsigned int ring,
+ unsigned int num_rings,
+ unsigned int bucket,
+ unsigned int num_buckets,
+ gxio_mpipe_bucket_mode_t mode)
+{
+ int i;
+ int result;
+
+ gxio_mpipe_bucket_info_t bucket_info = { {
+ .group = group,
+ .mode = mode,
+ }
+ };
+
+ gxio_mpipe_notif_group_bits_t bits = { {0} };
+
+ for (i = 0; i < num_rings; i++)
+ gxio_mpipe_notif_group_add_ring(&bits, ring + i);
+
+ result = gxio_mpipe_init_notif_group(context, group, bits);
+ if (result != 0)
+ return result;
+
+ for (i = 0; i < num_buckets; i++) {
+ bucket_info.notifring = ring + (i % num_rings);
+
+ result = gxio_mpipe_init_bucket(context, bucket + i,
+ bucket_info);
+ if (result != 0)
+ return result;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
+
+int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
+ unsigned int ring, unsigned int channel,
+ void *mem, size_t mem_size,
+ unsigned int mem_flags)
+{
+ memset(mem, 0, mem_size);
+
+ return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
+ ring, channel);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
+
+void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
+ gxio_mpipe_context_t *context)
+{
+ rules->context = context;
+ memset(&rules->list, 0, sizeof(rules->list));
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
+
+int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
+ unsigned int bucket, unsigned int num_buckets,
+ gxio_mpipe_rules_stacks_t *stacks)
+{
+ int i;
+ int stack = 255;
+
+ gxio_mpipe_rules_list_t *list = &rules->list;
+
+ /* Current rule. */
+ gxio_mpipe_rules_rule_t *rule =
+ (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
+
+ unsigned int head = list->tail;
+
+ /*
+ * Align next rule properly.
+ *Note that "dmacs_and_vlans" will also be aligned.
+ */
+ unsigned int pad = 0;
+ while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
+ pad++;
+
+ /*
+ * Verify room.
+ * ISSUE: Mark rules as broken on error?
+ */
+ if (head + pad + sizeof(*rule) >= sizeof(list->rules))
+ return GXIO_MPIPE_ERR_RULES_FULL;
+
+ /* Verify num_buckets is a power of 2. */
+ if (__builtin_popcount(num_buckets) != 1)
+ return GXIO_MPIPE_ERR_RULES_INVALID;
+
+ /* Add padding to previous rule. */
+ rule->size += pad;
+
+ /* Start a new rule. */
+ list->head = head + pad;
+
+ rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
+
+ /* Default some values. */
+ rule->headroom = 2;
+ rule->tailroom = 0;
+ rule->capacity = 16384;
+
+ /* Save the bucket info. */
+ rule->bucket_mask = num_buckets - 1;
+ rule->bucket_first = bucket;
+
+ for (i = 8 - 1; i >= 0; i--) {
+ int maybe =
+ stacks ? stacks->stacks[i] : rules->context->__stacks.
+ stacks[i];
+ if (maybe != 255)
+ stack = maybe;
+ rule->stacks.stacks[i] = stack;
+ }
+
+ if (stack == 255)
+ return GXIO_MPIPE_ERR_RULES_INVALID;
+
+ /* NOTE: Only entries at the end of the array can be 255. */
+ for (i = 8 - 1; i > 0; i--) {
+ if (rule->stacks.stacks[i] == 255) {
+ rule->stacks.stacks[i] = stack;
+ rule->capacity =
+ gxio_mpipe_buffer_size_enum_to_buffer_size(i -
+ 1);
+ }
+ }
+
+ rule->size = sizeof(*rule);
+ list->tail = list->head + rule->size;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
+
+int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
+ unsigned int channel)
+{
+ gxio_mpipe_rules_list_t *list = &rules->list;
+
+ gxio_mpipe_rules_rule_t *rule =
+ (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
+
+ /* Verify channel. */
+ if (channel >= 32)
+ return GXIO_MPIPE_ERR_RULES_INVALID;
+
+ /* Verify begun. */
+ if (list->tail == 0)
+ return GXIO_MPIPE_ERR_RULES_EMPTY;
+
+ rule->channel_bits |= (1UL << channel);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
+
+int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
+{
+ gxio_mpipe_rules_list_t *list = &rules->list;
+
+ gxio_mpipe_rules_rule_t *rule =
+ (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
+
+ /* Verify begun. */
+ if (list->tail == 0)
+ return GXIO_MPIPE_ERR_RULES_EMPTY;
+
+ rule->headroom = headroom;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
+
+int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
+{
+ gxio_mpipe_rules_list_t *list = &rules->list;
+ unsigned int size =
+ offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
+ return gxio_mpipe_commit_rules(rules->context, list, size);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
+
+int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_context_t *context,
+ unsigned int ring,
+ void *mem, size_t mem_size, unsigned int mem_flags)
+{
+ /* The init call below will verify that "mem_size" is legal. */
+ unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
+
+ iqueue->context = context;
+ iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
+ iqueue->ring = ring;
+ iqueue->num_entries = num_entries;
+ iqueue->mask_num_entries = num_entries - 1;
+ iqueue->log2_num_entries = __builtin_ctz(num_entries);
+ iqueue->head = 1;
+#ifdef __BIG_ENDIAN__
+ iqueue->swapped = 0;
+#endif
+
+ /* Initialize the "tail". */
+ __gxio_mmio_write(mem, iqueue->head);
+
+ return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
+ mem_flags);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
+
+int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
+ gxio_mpipe_context_t *context,
+ unsigned int edma_ring_id,
+ unsigned int channel,
+ void *mem, unsigned int mem_size,
+ unsigned int mem_flags)
+{
+ /* The init call below will verify that "mem_size" is legal. */
+ unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
+
+ /* Offset used to read number of completed commands. */
+ MPIPE_EDMA_POST_REGION_ADDR_t offset;
+
+ int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
+ mem, mem_size, mem_flags);
+ if (result < 0)
+ return result;
+
+ memset(equeue, 0, sizeof(*equeue));
+
+ offset.word = 0;
+ offset.region =
+ MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
+ MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
+ offset.ring = edma_ring_id;
+
+ __gxio_dma_queue_init(&equeue->dma_queue,
+ context->mmio_fast_base + offset.word,
+ num_entries);
+ equeue->edescs = mem;
+ equeue->mask_num_entries = num_entries - 1;
+ equeue->log2_num_entries = __builtin_ctz(num_entries);
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
+
+int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
+ const struct timespec *ts)
+{
+ cycles_t cycles = get_cycles();
+ return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
+ (uint64_t)ts->tv_nsec,
+ (uint64_t)cycles);
+}
+
+int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
+ struct timespec *ts)
+{
+ int ret;
+ cycles_t cycles_prev, cycles_now, clock_rate;
+ cycles_prev = get_cycles();
+ ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
+ (uint64_t *)&ts->tv_nsec,
+ (uint64_t *)&cycles_now);
+ if (ret < 0) {
+ return ret;
+ }
+
+ clock_rate = get_clock_rate();
+ ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
+ if (ts->tv_nsec < 0) {
+ ts->tv_nsec += 1000000000LL;
+ ts->tv_sec -= 1;
+ }
+ return ret;
+}
+
+int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
+{
+ return gxio_mpipe_adjust_timestamp_aux(context, delta);
+}
+
+/* Get our internal context used for link name access. This context is
+ * special in that it is not associated with an mPIPE service domain.
+ */
+static gxio_mpipe_context_t *_gxio_get_link_context(void)
+{
+ static gxio_mpipe_context_t context;
+ static gxio_mpipe_context_t *contextp;
+ static int tried_open = 0;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+
+ if (!tried_open) {
+ int i = 0;
+ tried_open = 1;
+
+ /*
+ * "4" here is the maximum possible number of mPIPE shims; it's
+ * an exaggeration but we shouldn't ever go beyond 2 anyway.
+ */
+ for (i = 0; i < 4; i++) {
+ char file[80];
+
+ snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
+ context.fd = hv_dev_open((HV_VirtAddr) file, 0);
+ if (context.fd < 0)
+ continue;
+
+ contextp = &context;
+ break;
+ }
+ }
+
+ mutex_unlock(&mutex);
+
+ return contextp;
+}
+
+int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
+{
+ int rv;
+ _gxio_mpipe_link_name_t name;
+ _gxio_mpipe_link_mac_t mac;
+
+ gxio_mpipe_context_t *context = _gxio_get_link_context();
+ if (!context)
+ return GXIO_ERR_NO_DEVICE;
+
+ rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
+ if (rv >= 0) {
+ strncpy(link_name, name.name, sizeof(name.name));
+ memcpy(link_mac, mac.mac, sizeof(mac.mac));
+ }
+
+ return rv;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
+
+int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
+ gxio_mpipe_context_t *context, const char *link_name,
+ unsigned int flags)
+{
+ _gxio_mpipe_link_name_t name;
+ int rv;
+
+ strncpy(name.name, link_name, sizeof(name.name));
+ name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+
+ rv = gxio_mpipe_link_open_aux(context, name, flags);
+ if (rv < 0)
+ return rv;
+
+ link->context = context;
+ link->channel = rv >> 8;
+ link->mac = rv & 0xFF;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
+
+int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
+{
+ return gxio_mpipe_link_close_aux(link->context, link->mac);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
diff --git a/arch/tile/gxio/trio.c b/arch/tile/gxio/trio.c
new file mode 100644
index 000000000000..69f0b8df3ce3
--- /dev/null
+++ b/arch/tile/gxio/trio.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Implementation of trio gxio calls.
+ */
+
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include <gxio/trio.h>
+#include <gxio/iorpc_globals.h>
+#include <gxio/iorpc_trio.h>
+#include <gxio/kiorpc.h>
+
+int gxio_trio_init(gxio_trio_context_t *context, unsigned int trio_index)
+{
+ char file[32];
+ int fd;
+
+ snprintf(file, sizeof(file), "trio/%d/iorpc", trio_index);
+ fd = hv_dev_open((HV_VirtAddr) file, 0);
+ if (fd < 0) {
+ context->fd = -1;
+
+ if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
+ return fd;
+ else
+ return -ENODEV;
+ }
+
+ context->fd = fd;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_trio_init);
diff --git a/arch/tile/gxio/usb_host.c b/arch/tile/gxio/usb_host.c
new file mode 100644
index 000000000000..66b002f54ecc
--- /dev/null
+++ b/arch/tile/gxio/usb_host.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ *
+ * Implementation of USB gxio calls.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+
+#include <gxio/iorpc_globals.h>
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/kiorpc.h>
+#include <gxio/usb_host.h>
+
+int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
+ int is_ehci)
+{
+ char file[32];
+ int fd;
+
+ if (is_ehci)
+ snprintf(file, sizeof(file), "usb_host/%d/iorpc/ehci",
+ usb_index);
+ else
+ snprintf(file, sizeof(file), "usb_host/%d/iorpc/ohci",
+ usb_index);
+
+ fd = hv_dev_open((HV_VirtAddr) file, 0);
+ if (fd < 0) {
+ if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
+ return fd;
+ else
+ return -ENODEV;
+ }
+
+ context->fd = fd;
+
+ // Map in the MMIO space.
+ context->mmio_base =
+ (void __force *)iorpc_ioremap(fd, 0, HV_USB_HOST_MMIO_SIZE);
+
+ if (context->mmio_base == NULL) {
+ hv_dev_close(context->fd);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_usb_host_init);
+
+int gxio_usb_host_destroy(gxio_usb_host_context_t * context)
+{
+ iounmap((void __force __iomem *)(context->mmio_base));
+ hv_dev_close(context->fd);
+
+ context->mmio_base = NULL;
+ context->fd = -1;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(gxio_usb_host_destroy);
+
+void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context)
+{
+ return context->mmio_base;
+}
+
+EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_start);
+
+size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context)
+{
+ return HV_USB_HOST_MMIO_SIZE;
+}
+
+EXPORT_SYMBOL_GPL(gxio_usb_host_get_reg_len);
diff --git a/arch/tile/include/arch/mpipe.h b/arch/tile/include/arch/mpipe.h
new file mode 100644
index 000000000000..8a33912fd6cc
--- /dev/null
+++ b/arch/tile/include/arch/mpipe.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_MPIPE_H__
+#define __ARCH_MPIPE_H__
+
+#include <arch/abi.h>
+#include <arch/mpipe_def.h>
+
+#ifndef __ASSEMBLER__
+
+/*
+ * MMIO Ingress DMA Release Region Address.
+ * This is a description of the physical addresses used to manipulate ingress
+ * credit counters. Accesses to this address space should use an address of
+ * this form and a value like that specified in IDMA_RELEASE_REGION_VAL.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 3;
+ /* NotifRing to be released */
+ uint_reg_t ring : 8;
+ /* Bucket to be released */
+ uint_reg_t bucket : 13;
+ /* Enable NotifRing release */
+ uint_reg_t ring_enable : 1;
+ /* Enable Bucket release */
+ uint_reg_t bucket_enable : 1;
+ /*
+ * This field of the address selects the region (address space) to be
+ * accessed. For the iDMA release region, this field must be 4.
+ */
+ uint_reg_t region : 3;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 6;
+ /* This field of the address indexes the 32 entry service domain table. */
+ uint_reg_t svc_dom : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 24;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_2 : 24;
+ uint_reg_t svc_dom : 5;
+ uint_reg_t __reserved_1 : 6;
+ uint_reg_t region : 3;
+ uint_reg_t bucket_enable : 1;
+ uint_reg_t ring_enable : 1;
+ uint_reg_t bucket : 13;
+ uint_reg_t ring : 8;
+ uint_reg_t __reserved_0 : 3;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_IDMA_RELEASE_REGION_ADDR_t;
+
+/*
+ * MMIO Ingress DMA Release Region Value - Release NotifRing and/or Bucket.
+ * Provides release of the associated NotifRing. The address of the MMIO
+ * operation is described in IDMA_RELEASE_REGION_ADDR.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /*
+ * Number of packets being released. The load balancer's count of
+ * inflight packets will be decremented by this amount for the associated
+ * Bucket and/or NotifRing
+ */
+ uint_reg_t count : 16;
+ /* Reserved. */
+ uint_reg_t __reserved : 48;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved : 48;
+ uint_reg_t count : 16;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_IDMA_RELEASE_REGION_VAL_t;
+
+/*
+ * MMIO Buffer Stack Manager Region Address.
+ * This MMIO region is used for posting or fetching buffers to/from the
+ * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
+ * the top of stack if one is available. On an MMIO store, this pushes a
+ * buffer to the stack. The value read or written is described in
+ * BSM_REGION_VAL.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 3;
+ /* BufferStack being accessed. */
+ uint_reg_t stack : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 18;
+ /*
+ * This field of the address selects the region (address space) to be
+ * accessed. For the buffer stack manager region, this field must be 6.
+ */
+ uint_reg_t region : 3;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 6;
+ /* This field of the address indexes the 32 entry service domain table. */
+ uint_reg_t svc_dom : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_3 : 24;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_3 : 24;
+ uint_reg_t svc_dom : 5;
+ uint_reg_t __reserved_2 : 6;
+ uint_reg_t region : 3;
+ uint_reg_t __reserved_1 : 18;
+ uint_reg_t stack : 5;
+ uint_reg_t __reserved_0 : 3;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_BSM_REGION_ADDR_t;
+
+/*
+ * MMIO Buffer Stack Manager Region Value.
+ * This MMIO region is used for posting or fetching buffers to/from the
+ * buffer stack manager. On an MMIO load, this pops a buffer descriptor from
+ * the top of stack if one is available. On an MMIO store, this pushes a
+ * buffer to the stack. The address of the MMIO operation is described in
+ * BSM_REGION_ADDR.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 7;
+ /*
+ * Base virtual address of the buffer. Must be sign extended by consumer.
+ */
+ int_reg_t va : 35;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 6;
+ /*
+ * Index of the buffer stack to which this buffer belongs. Ignored on
+ * writes since the offset bits specify the stack being accessed.
+ */
+ uint_reg_t stack_idx : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 5;
+ /*
+ * Reads as one to indicate that this is a hardware managed buffer.
+ * Ignored on writes since all buffers on a given stack are the same size.
+ */
+ uint_reg_t hwb : 1;
+ /*
+ * Encoded size of buffer (ignored on writes):
+ * 0 = 128 bytes
+ * 1 = 256 bytes
+ * 2 = 512 bytes
+ * 3 = 1024 bytes
+ * 4 = 1664 bytes
+ * 5 = 4096 bytes
+ * 6 = 10368 bytes
+ * 7 = 16384 bytes
+ */
+ uint_reg_t size : 3;
+ /*
+ * Valid indication for the buffer. Ignored on writes.
+ * 0 : Valid buffer descriptor popped from stack.
+ * 3 : Could not pop a buffer from the stack. Either the stack is empty,
+ * or the hardware's prefetch buffer is empty for this stack.
+ */
+ uint_reg_t c : 2;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t c : 2;
+ uint_reg_t size : 3;
+ uint_reg_t hwb : 1;
+ uint_reg_t __reserved_2 : 5;
+ uint_reg_t stack_idx : 5;
+ uint_reg_t __reserved_1 : 6;
+ int_reg_t va : 35;
+ uint_reg_t __reserved_0 : 7;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_BSM_REGION_VAL_t;
+
+/*
+ * MMIO Egress DMA Post Region Address.
+ * Used to post descriptor locations to the eDMA descriptor engine. The
+ * value to be written is described in EDMA_POST_REGION_VAL
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 3;
+ /* eDMA ring being accessed */
+ uint_reg_t ring : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 18;
+ /*
+ * This field of the address selects the region (address space) to be
+ * accessed. For the egress DMA post region, this field must be 5.
+ */
+ uint_reg_t region : 3;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 6;
+ /* This field of the address indexes the 32 entry service domain table. */
+ uint_reg_t svc_dom : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_3 : 24;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_3 : 24;
+ uint_reg_t svc_dom : 5;
+ uint_reg_t __reserved_2 : 6;
+ uint_reg_t region : 3;
+ uint_reg_t __reserved_1 : 18;
+ uint_reg_t ring : 5;
+ uint_reg_t __reserved_0 : 3;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_EDMA_POST_REGION_ADDR_t;
+
+/*
+ * MMIO Egress DMA Post Region Value.
+ * Used to post descriptor locations to the eDMA descriptor engine. The
+ * address is described in EDMA_POST_REGION_ADDR.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /*
+ * For writes, this specifies the current ring tail pointer prior to any
+ * post. For example, to post 1 or more descriptors starting at location
+ * 23, this would contain 23 (not 24). On writes, this index must be
+ * masked based on the ring size. The new tail pointer after this post
+ * is COUNT+RING_IDX (masked by the ring size).
+ *
+ * For reads, this provides the hardware descriptor fetcher's head
+ * pointer. The descriptors prior to the head pointer, however, may not
+ * yet have been processed so this indicator is only used to determine
+ * how full the ring is and if software may post more descriptors.
+ */
+ uint_reg_t ring_idx : 16;
+ /*
+ * For writes, this specifies number of contiguous descriptors that are
+ * being posted. Software may post up to RingSize descriptors with a
+ * single MMIO store. A zero in this field on a write will "wake up" an
+ * eDMA ring and cause it fetch descriptors regardless of the hardware's
+ * current view of the state of the tail pointer.
+ *
+ * For reads, this field provides a rolling count of the number of
+ * descriptors that have been completely processed. This may be used by
+ * software to determine when buffers associated with a descriptor may be
+ * returned or reused. When the ring's flush bit is cleared by software
+ * (after having been set by HW or SW), the COUNT will be cleared.
+ */
+ uint_reg_t count : 16;
+ /*
+ * For writes, this specifies the generation number of the tail being
+ * posted. Note that if tail+cnt wraps to the beginning of the ring, the
+ * eDMA hardware assumes that the descriptors posted at the beginning of
+ * the ring are also valid so it is okay to post around the wrap point.
+ *
+ * For reads, this is the current generation number. Valid descriptors
+ * will have the inverse of this generation number.
+ */
+ uint_reg_t gen : 1;
+ /* Reserved. */
+ uint_reg_t __reserved : 31;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved : 31;
+ uint_reg_t gen : 1;
+ uint_reg_t count : 16;
+ uint_reg_t ring_idx : 16;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_EDMA_POST_REGION_VAL_t;
+
+/*
+ * Load Balancer Bucket Status Data.
+ * Read/Write data for load balancer Bucket-Status Table. 4160 entries
+ * indexed by LBL_INIT_CTL.IDX when LBL_INIT_CTL.STRUCT_SEL is BSTS_TBL
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* NotifRing currently assigned to this bucket. */
+ uint_reg_t notifring : 8;
+ /* Current reference count. */
+ uint_reg_t count : 16;
+ /* Group associated with this bucket. */
+ uint_reg_t group : 5;
+ /* Mode select for this bucket. */
+ uint_reg_t mode : 3;
+ /* Reserved. */
+ uint_reg_t __reserved : 32;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved : 32;
+ uint_reg_t mode : 3;
+ uint_reg_t group : 5;
+ uint_reg_t count : 16;
+ uint_reg_t notifring : 8;
+#endif
+ };
+
+ uint_reg_t word;
+} MPIPE_LBL_INIT_DAT_BSTS_TBL_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_MPIPE_H__) */
diff --git a/arch/tile/include/arch/mpipe_constants.h b/arch/tile/include/arch/mpipe_constants.h
new file mode 100644
index 000000000000..410a0400e055
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_constants.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+
+#ifndef __ARCH_MPIPE_CONSTANTS_H__
+#define __ARCH_MPIPE_CONSTANTS_H__
+
+#define MPIPE_NUM_CLASSIFIERS 10
+#define MPIPE_CLS_MHZ 1200
+
+#define MPIPE_NUM_EDMA_RINGS 32
+
+#define MPIPE_NUM_SGMII_MACS 16
+#define MPIPE_NUM_XAUI_MACS 4
+#define MPIPE_NUM_LOOPBACK_CHANNELS 4
+#define MPIPE_NUM_NON_LB_CHANNELS 28
+
+#define MPIPE_NUM_IPKT_BLOCKS 1536
+
+#define MPIPE_NUM_BUCKETS 4160
+
+#define MPIPE_NUM_NOTIF_RINGS 256
+
+#define MPIPE_NUM_NOTIF_GROUPS 32
+
+#define MPIPE_NUM_TLBS_PER_ASID 16
+#define MPIPE_TLB_IDX_WIDTH 4
+
+#define MPIPE_MMIO_NUM_SVC_DOM 32
+
+#endif /* __ARCH_MPIPE_CONSTANTS_H__ */
diff --git a/arch/tile/include/arch/mpipe_def.h b/arch/tile/include/arch/mpipe_def.h
new file mode 100644
index 000000000000..c3d30217fc66
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_def.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_MPIPE_DEF_H__
+#define __ARCH_MPIPE_DEF_H__
+#define MPIPE_MMIO_ADDR__REGION_SHIFT 26
+#define MPIPE_MMIO_ADDR__REGION_VAL_CFG 0x0
+#define MPIPE_MMIO_ADDR__REGION_VAL_IDMA 0x4
+#define MPIPE_MMIO_ADDR__REGION_VAL_EDMA 0x5
+#define MPIPE_MMIO_ADDR__REGION_VAL_BSM 0x6
+#define MPIPE_BSM_REGION_VAL__VA_SHIFT 7
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128 0x0
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256 0x1
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512 0x2
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024 0x3
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664 0x4
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096 0x5
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368 0x6
+#define MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384 0x7
+#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA 0x0
+#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED 0x1
+#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK 0x2
+#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY 0x3
+#define MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND 0x7
+#define MPIPE_LBL_NR_STATE__FIRST_WORD 0x2138
+#endif /* !defined(__ARCH_MPIPE_DEF_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm.h b/arch/tile/include/arch/mpipe_shm.h
new file mode 100644
index 000000000000..f2e9e122818d
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_shm.h
@@ -0,0 +1,509 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+
+#ifndef __ARCH_MPIPE_SHM_H__
+#define __ARCH_MPIPE_SHM_H__
+
+#include <arch/abi.h>
+#include <arch/mpipe_shm_def.h>
+
+#ifndef __ASSEMBLER__
+/**
+ * MPIPE eDMA Descriptor.
+ * The eDMA descriptor is written by software and consumed by hardware. It
+ * is used to specify the location of egress packet data to be sent out of
+ * the chip via one of the packet interfaces.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+ /* Word 0 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Generation number. Used to indicate a valid descriptor in ring. When
+ * a new descriptor is written into the ring, software must toggle this
+ * bit. The net effect is that the GEN bit being written into new
+ * descriptors toggles each time the ring tail pointer wraps.
+ */
+ uint_reg_t gen : 1;
+ /** Reserved. Must be zero. */
+ uint_reg_t r0 : 7;
+ /** Checksum generation enabled for this transfer. */
+ uint_reg_t csum : 1;
+ /**
+ * Nothing to be sent. Used, for example, when software has dropped a
+ * packet but still wishes to return all of the associated buffers.
+ */
+ uint_reg_t ns : 1;
+ /**
+ * Notification interrupt will be delivered when packet has been egressed.
+ */
+ uint_reg_t notif : 1;
+ /**
+ * Boundary indicator. When 1, this transfer includes the EOP for this
+ * command. Must be clear on all but the last descriptor for an egress
+ * packet.
+ */
+ uint_reg_t bound : 1;
+ /** Reserved. Must be zero. */
+ uint_reg_t r1 : 4;
+ /**
+ * Number of bytes to be sent for this descriptor. When zero, no data
+ * will be moved and the buffer descriptor will be ignored. If the
+ * buffer descriptor indicates that it is chained, the low 7 bits of the
+ * VA indicate the offset within the first buffer (e.g. 127 bytes is the
+ * maximum offset into the first buffer). If the size exceeds a single
+ * buffer, subsequent buffer descriptors will be fetched prior to
+ * processing the next eDMA descriptor in the ring.
+ */
+ uint_reg_t xfer_size : 14;
+ /** Reserved. Must be zero. */
+ uint_reg_t r2 : 2;
+ /**
+ * Destination of checksum relative to CSUM_START relative to the first
+ * byte moved by this descriptor. Must be zero if CSUM=0 in this
+ * descriptor. Must be less than XFER_SIZE (e.g. the first byte of the
+ * CSUM_DEST must be within the span of this descriptor).
+ */
+ uint_reg_t csum_dest : 8;
+ /**
+ * Start byte of checksum relative to the first byte moved by this
+ * descriptor. If this is not the first descriptor for the egress
+ * packet, CSUM_START is still relative to the first byte in this
+ * descriptor. Must be zero if CSUM=0 in this descriptor.
+ */
+ uint_reg_t csum_start : 8;
+ /**
+ * Initial value for 16-bit 1's compliment checksum if enabled via CSUM.
+ * Specified in network order. That is, bits[7:0] will be added to the
+ * byte pointed to by CSUM_START and bits[15:8] will be added to the byte
+ * pointed to by CSUM_START+1 (with appropriate 1's compliment carries).
+ * Must be zero if CSUM=0 in this descriptor.
+ */
+ uint_reg_t csum_seed : 16;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t csum_seed : 16;
+ uint_reg_t csum_start : 8;
+ uint_reg_t csum_dest : 8;
+ uint_reg_t r2 : 2;
+ uint_reg_t xfer_size : 14;
+ uint_reg_t r1 : 4;
+ uint_reg_t bound : 1;
+ uint_reg_t notif : 1;
+ uint_reg_t ns : 1;
+ uint_reg_t csum : 1;
+ uint_reg_t r0 : 7;
+ uint_reg_t gen : 1;
+#endif
+
+ /* Word 1 */
+
+#ifndef __BIG_ENDIAN__
+ /** Virtual address. Must be sign extended by consumer. */
+ int_reg_t va : 42;
+ /** Reserved. */
+ uint_reg_t __reserved_0 : 6;
+ /** Index of the buffer stack to which this buffer belongs. */
+ uint_reg_t stack_idx : 5;
+ /** Reserved. */
+ uint_reg_t __reserved_1 : 3;
+ /**
+ * Instance ID. For devices that support more than one mPIPE instance,
+ * this field indicates the buffer owner. If the INST field does not
+ * match the mPIPE's instance number when a packet is egressed, buffers
+ * with HWB set will be returned to the other mPIPE instance.
+ */
+ uint_reg_t inst : 1;
+ /** Reserved. */
+ uint_reg_t __reserved_2 : 1;
+ /**
+ * Always set to one by hardware in iDMA packet descriptors. For eDMA,
+ * indicates whether the buffer will be released to the buffer stack
+ * manager. When 0, software is responsible for releasing the buffer.
+ */
+ uint_reg_t hwb : 1;
+ /**
+ * Encoded size of buffer. Set by the ingress hardware for iDMA packet
+ * descriptors. For eDMA descriptors, indicates the buffer size if .c
+ * indicates a chained packet. If an eDMA descriptor is not chained and
+ * the .hwb bit is not set, this field is ignored and the size is
+ * specified by the .xfer_size field.
+ * 0 = 128 bytes
+ * 1 = 256 bytes
+ * 2 = 512 bytes
+ * 3 = 1024 bytes
+ * 4 = 1664 bytes
+ * 5 = 4096 bytes
+ * 6 = 10368 bytes
+ * 7 = 16384 bytes
+ */
+ uint_reg_t size : 3;
+ /**
+ * Chaining configuration for the buffer. Indicates that an ingress
+ * packet or egress command is chained across multiple buffers, with each
+ * buffer's size indicated by the .size field.
+ */
+ uint_reg_t c : 2;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t c : 2;
+ uint_reg_t size : 3;
+ uint_reg_t hwb : 1;
+ uint_reg_t __reserved_2 : 1;
+ uint_reg_t inst : 1;
+ uint_reg_t __reserved_1 : 3;
+ uint_reg_t stack_idx : 5;
+ uint_reg_t __reserved_0 : 6;
+ int_reg_t va : 42;
+#endif
+
+ };
+
+ /** Word access */
+ uint_reg_t words[2];
+} MPIPE_EDMA_DESC_t;
+
+/**
+ * MPIPE Packet Descriptor.
+ * The packet descriptor is filled by the mPIPE's classification,
+ * load-balancing, and buffer management services. Some fields are consumed
+ * by mPIPE hardware, and others are consumed by Tile software.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+ /* Word 0 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Notification ring into which this packet descriptor is written.
+ * Typically written by load balancer, but can be overridden by
+ * classification program if NR is asserted.
+ */
+ uint_reg_t notif_ring : 8;
+ /** Source channel for this packet. Written by mPIPE DMA hardware. */
+ uint_reg_t channel : 5;
+ /** Reserved. */
+ uint_reg_t __reserved_0 : 1;
+ /**
+ * MAC Error.
+ * Generated by the MAC interface. Asserted if there was an overrun of
+ * the MAC's receive FIFO. This condition generally only occurs if the
+ * mPIPE clock is running too slowly.
+ */
+ uint_reg_t me : 1;
+ /**
+ * Truncation Error.
+ * Written by the iDMA hardware. Asserted if packet was truncated due to
+ * insufficient space in iPkt buffer
+ */
+ uint_reg_t tr : 1;
+ /**
+ * Written by the iDMA hardware. Indicates the number of bytes written
+ * to Tile memory. In general, this is the actual size of the packet as
+ * received from the MAC. But if the packet is truncated due to running
+ * out of buffers or due to the iPkt buffer filling up, then the L2_SIZE
+ * will be reduced to reflect the actual number of valid bytes written to
+ * Tile memory.
+ */
+ uint_reg_t l2_size : 14;
+ /**
+ * CRC Error.
+ * Generated by the MAC. Asserted if MAC indicated an L2 CRC error or
+ * other L2 error (bad length etc.) on the packet.
+ */
+ uint_reg_t ce : 1;
+ /**
+ * Cut Through.
+ * Written by the iDMA hardware. Asserted if packet was not completely
+ * received before being sent to classifier. L2_Size will indicate
+ * number of bytes received so far.
+ */
+ uint_reg_t ct : 1;
+ /**
+ * Written by the classification program. Used by the load balancer to
+ * select the ring into which this packet descriptor is written.
+ */
+ uint_reg_t bucket_id : 13;
+ /** Reserved. */
+ uint_reg_t __reserved_1 : 3;
+ /**
+ * Checksum.
+ * Written by classification program. When 1, the checksum engine will
+ * perform checksum based on the CSUM_SEED, CSUM_START, and CSUM_BYTES
+ * fields. The result will be placed in CSUM_VAL.
+ */
+ uint_reg_t cs : 1;
+ /**
+ * Notification Ring Select.
+ * Written by the classification program. When 1, the NotifRingIDX is
+ * set by classification program rather than being set by load balancer.
+ */
+ uint_reg_t nr : 1;
+ /**
+ * Written by classification program. Indicates whether packet and
+ * descriptor should both be dropped, both be delivered, or only the
+ * descriptor should be delivered.
+ */
+ uint_reg_t dest : 2;
+ /**
+ * General Purpose Sequence Number Enable.
+ * Written by the classification program. When 1, the GP_SQN_SEL field
+ * contains the sequence number selector and the GP_SQN field will be
+ * replaced with the associated sequence number. When clear, the GP_SQN
+ * field is left intact and be used as "Custom" bytes.
+ */
+ uint_reg_t sq : 1;
+ /**
+ * TimeStamp Enable.
+ * Enable TimeStamp insertion. When clear, timestamp field may be filled
+ * with custom data by classifier. When set, hardware inserts the
+ * timestamp when the start of packet is received from the MAC.
+ */
+ uint_reg_t ts : 1;
+ /**
+ * Packet Sequence Number Enable.
+ * Enable PacketSQN insertion. When clear, PacketSQN field may be filled
+ * with custom data by classifier. When set, hardware inserts the packet
+ * sequence number when the packet descriptor is written to a
+ * notification ring.
+ */
+ uint_reg_t ps : 1;
+ /**
+ * Buffer Error.
+ * Written by the iDMA hardware. Asserted if iDMA ran out of buffers
+ * while writing the packet. Software must still return any buffer
+ * descriptors whose C field indicates a valid descriptor was consumed.
+ */
+ uint_reg_t be : 1;
+ /**
+ * Written by the classification program. The associated counter is
+ * incremented when the packet is sent.
+ */
+ uint_reg_t ctr0 : 5;
+ /** Reserved. */
+ uint_reg_t __reserved_2 : 3;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_2 : 3;
+ uint_reg_t ctr0 : 5;
+ uint_reg_t be : 1;
+ uint_reg_t ps : 1;
+ uint_reg_t ts : 1;
+ uint_reg_t sq : 1;
+ uint_reg_t dest : 2;
+ uint_reg_t nr : 1;
+ uint_reg_t cs : 1;
+ uint_reg_t __reserved_1 : 3;
+ uint_reg_t bucket_id : 13;
+ uint_reg_t ct : 1;
+ uint_reg_t ce : 1;
+ uint_reg_t l2_size : 14;
+ uint_reg_t tr : 1;
+ uint_reg_t me : 1;
+ uint_reg_t __reserved_0 : 1;
+ uint_reg_t channel : 5;
+ uint_reg_t notif_ring : 8;
+#endif
+
+ /* Word 1 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Written by the classification program. The associated counter is
+ * incremented when the packet is sent.
+ */
+ uint_reg_t ctr1 : 5;
+ /** Reserved. */
+ uint_reg_t __reserved_3 : 3;
+ /**
+ * Written by classification program. Indicates the start byte for
+ * checksum. Relative to 1st byte received from MAC.
+ */
+ uint_reg_t csum_start : 8;
+ /**
+ * Checksum seed written by classification program. Overwritten with
+ * resultant checksum if CS bit is asserted. The endianness of the CSUM
+ * value bits when viewed by Tile software match the packet byte order.
+ * That is, bits[7:0] of the resulting checksum value correspond to
+ * earlier (more significant) bytes in the packet. To avoid classifier
+ * software from having to byte swap the CSUM_SEED, the iDMA checksum
+ * engine byte swaps the classifier's result before seeding the checksum
+ * calculation. Thus, the CSUM_START byte of packet data is added to
+ * bits[15:8] of the CSUM_SEED field generated by the classifier. This
+ * byte swap will be visible to Tile software if the CS bit is clear.
+ */
+ uint_reg_t csum_seed_val : 16;
+ /**
+ * Written by the classification program. Not interpreted by mPIPE
+ * hardware.
+ */
+ uint_reg_t custom0 : 32;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t custom0 : 32;
+ uint_reg_t csum_seed_val : 16;
+ uint_reg_t csum_start : 8;
+ uint_reg_t __reserved_3 : 3;
+ uint_reg_t ctr1 : 5;
+#endif
+
+ /* Word 2 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Written by the classification program. Not interpreted by mPIPE
+ * hardware.
+ */
+ uint_reg_t custom1 : 64;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t custom1 : 64;
+#endif
+
+ /* Word 3 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Written by the classification program. Not interpreted by mPIPE
+ * hardware.
+ */
+ uint_reg_t custom2 : 64;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t custom2 : 64;
+#endif
+
+ /* Word 4 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Written by the classification program. Not interpreted by mPIPE
+ * hardware.
+ */
+ uint_reg_t custom3 : 64;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t custom3 : 64;
+#endif
+
+ /* Word 5 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Sequence number applied when packet is distributed. Classifier
+ * selects which sequence number is to be applied by writing the 13-bit
+ * SQN-selector into this field.
+ */
+ uint_reg_t gp_sqn : 16;
+ /**
+ * Written by notification hardware. The packet sequence number is
+ * incremented for each packet that wasn't dropped.
+ */
+ uint_reg_t packet_sqn : 48;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t packet_sqn : 48;
+ uint_reg_t gp_sqn : 16;
+#endif
+
+ /* Word 6 */
+
+#ifndef __BIG_ENDIAN__
+ /**
+ * Written by hardware when the start-of-packet is received by the mPIPE
+ * from the MAC. This is the nanoseconds part of the packet timestamp.
+ */
+ uint_reg_t time_stamp_ns : 32;
+ /**
+ * Written by hardware when the start-of-packet is received by the mPIPE
+ * from the MAC. This is the seconds part of the packet timestamp.
+ */
+ uint_reg_t time_stamp_sec : 32;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t time_stamp_sec : 32;
+ uint_reg_t time_stamp_ns : 32;
+#endif
+
+ /* Word 7 */
+
+#ifndef __BIG_ENDIAN__
+ /** Virtual address. Must be sign extended by consumer. */
+ int_reg_t va : 42;
+ /** Reserved. */
+ uint_reg_t __reserved_4 : 6;
+ /** Index of the buffer stack to which this buffer belongs. */
+ uint_reg_t stack_idx : 5;
+ /** Reserved. */
+ uint_reg_t __reserved_5 : 3;
+ /**
+ * Instance ID. For devices that support more than one mPIPE instance,
+ * this field indicates the buffer owner. If the INST field does not
+ * match the mPIPE's instance number when a packet is egressed, buffers
+ * with HWB set will be returned to the other mPIPE instance.
+ */
+ uint_reg_t inst : 1;
+ /** Reserved. */
+ uint_reg_t __reserved_6 : 1;
+ /**
+ * Always set to one by hardware in iDMA packet descriptors. For eDMA,
+ * indicates whether the buffer will be released to the buffer stack
+ * manager. When 0, software is responsible for releasing the buffer.
+ */
+ uint_reg_t hwb : 1;
+ /**
+ * Encoded size of buffer. Set by the ingress hardware for iDMA packet
+ * descriptors. For eDMA descriptors, indicates the buffer size if .c
+ * indicates a chained packet. If an eDMA descriptor is not chained and
+ * the .hwb bit is not set, this field is ignored and the size is
+ * specified by the .xfer_size field.
+ * 0 = 128 bytes
+ * 1 = 256 bytes
+ * 2 = 512 bytes
+ * 3 = 1024 bytes
+ * 4 = 1664 bytes
+ * 5 = 4096 bytes
+ * 6 = 10368 bytes
+ * 7 = 16384 bytes
+ */
+ uint_reg_t size : 3;
+ /**
+ * Chaining configuration for the buffer. Indicates that an ingress
+ * packet or egress command is chained across multiple buffers, with each
+ * buffer's size indicated by the .size field.
+ */
+ uint_reg_t c : 2;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t c : 2;
+ uint_reg_t size : 3;
+ uint_reg_t hwb : 1;
+ uint_reg_t __reserved_6 : 1;
+ uint_reg_t inst : 1;
+ uint_reg_t __reserved_5 : 3;
+ uint_reg_t stack_idx : 5;
+ uint_reg_t __reserved_4 : 6;
+ int_reg_t va : 42;
+#endif
+
+ };
+
+ /** Word access */
+ uint_reg_t words[8];
+} MPIPE_PDESC_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_MPIPE_SHM_H__) */
diff --git a/arch/tile/include/arch/mpipe_shm_def.h b/arch/tile/include/arch/mpipe_shm_def.h
new file mode 100644
index 000000000000..6124d39c8318
--- /dev/null
+++ b/arch/tile/include/arch/mpipe_shm_def.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_MPIPE_SHM_DEF_H__
+#define __ARCH_MPIPE_SHM_DEF_H__
+#define MPIPE_EDMA_DESC_WORD1__C_VAL_UNCHAINED 0x0
+#define MPIPE_EDMA_DESC_WORD1__C_VAL_CHAINED 0x1
+#define MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY 0x2
+#define MPIPE_EDMA_DESC_WORD1__C_VAL_INVALID 0x3
+#endif /* !defined(__ARCH_MPIPE_SHM_DEF_H__) */
diff --git a/arch/tile/include/arch/trio.h b/arch/tile/include/arch/trio.h
new file mode 100644
index 000000000000..d3000a871a21
--- /dev/null
+++ b/arch/tile/include/arch/trio.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_H__
+#define __ARCH_TRIO_H__
+
+#include <arch/abi.h>
+#include <arch/trio_def.h>
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Tile PIO Region Configuration - CFG Address Format.
+ * This register describes the address format for PIO accesses when the
+ * associated region is setup with TYPE=CFG.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Register Address (full byte address). */
+ uint_reg_t reg_addr : 12;
+ /* Function Number */
+ uint_reg_t fn : 3;
+ /* Device Number */
+ uint_reg_t dev : 5;
+ /* BUS Number */
+ uint_reg_t bus : 8;
+ /* Config Type: 0 for access to directly-attached device. 1 otherwise. */
+ uint_reg_t type : 1;
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 1;
+ /*
+ * MAC select. This must match the configuration in
+ * TILE_PIO_REGION_SETUP.MAC.
+ */
+ uint_reg_t mac : 2;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 32;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_1 : 32;
+ uint_reg_t mac : 2;
+ uint_reg_t __reserved_0 : 1;
+ uint_reg_t type : 1;
+ uint_reg_t bus : 8;
+ uint_reg_t dev : 5;
+ uint_reg_t fn : 3;
+ uint_reg_t reg_addr : 12;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_TRIO_H__) */
diff --git a/arch/tile/include/arch/trio_constants.h b/arch/tile/include/arch/trio_constants.h
new file mode 100644
index 000000000000..628b045436b8
--- /dev/null
+++ b/arch/tile/include/arch/trio_constants.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+
+#ifndef __ARCH_TRIO_CONSTANTS_H__
+#define __ARCH_TRIO_CONSTANTS_H__
+
+#define TRIO_NUM_ASIDS 16
+#define TRIO_NUM_TLBS_PER_ASID 16
+
+#define TRIO_NUM_TPIO_REGIONS 8
+#define TRIO_LOG2_NUM_TPIO_REGIONS 3
+
+#define TRIO_NUM_MAP_MEM_REGIONS 16
+#define TRIO_LOG2_NUM_MAP_MEM_REGIONS 4
+#define TRIO_NUM_MAP_SQ_REGIONS 8
+#define TRIO_LOG2_NUM_MAP_SQ_REGIONS 3
+
+#define TRIO_LOG2_NUM_SQ_FIFO_ENTRIES 6
+
+#define TRIO_NUM_PUSH_DMA_RINGS 32
+
+#define TRIO_NUM_PULL_DMA_RINGS 32
+
+#endif /* __ARCH_TRIO_CONSTANTS_H__ */
diff --git a/arch/tile/include/arch/trio_def.h b/arch/tile/include/arch/trio_def.h
new file mode 100644
index 000000000000..e80500317dc4
--- /dev/null
+++ b/arch/tile/include/arch/trio_def.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_DEF_H__
+#define __ARCH_TRIO_DEF_H__
+#define TRIO_CFG_REGION_ADDR__REG_SHIFT 0
+#define TRIO_CFG_REGION_ADDR__INTFC_SHIFT 16
+#define TRIO_CFG_REGION_ADDR__INTFC_VAL_TRIO 0x0
+#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE 0x1
+#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD 0x2
+#define TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED 0x3
+#define TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT 18
+#define TRIO_CFG_REGION_ADDR__PROT_SHIFT 20
+#define TRIO_PIO_REGIONS_ADDR__REGION_SHIFT 32
+#define TRIO_MAP_MEM_REG_INT0 0x1000000000
+#define TRIO_MAP_MEM_REG_INT1 0x1000000008
+#define TRIO_MAP_MEM_REG_INT2 0x1000000010
+#define TRIO_MAP_MEM_REG_INT3 0x1000000018
+#define TRIO_MAP_MEM_REG_INT4 0x1000000020
+#define TRIO_MAP_MEM_REG_INT5 0x1000000028
+#define TRIO_MAP_MEM_REG_INT6 0x1000000030
+#define TRIO_MAP_MEM_REG_INT7 0x1000000038
+#define TRIO_MAP_MEM_LIM__ADDR_SHIFT 12
+#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED 0x0
+#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT 0x1
+#define TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD 0x2
+#define TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT 30
+#endif /* !defined(__ARCH_TRIO_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_intfc.h b/arch/tile/include/arch/trio_pcie_intfc.h
new file mode 100644
index 000000000000..0487fdb9d581
--- /dev/null
+++ b/arch/tile/include/arch/trio_pcie_intfc.h
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_PCIE_INTFC_H__
+#define __ARCH_TRIO_PCIE_INTFC_H__
+
+#include <arch/abi.h>
+#include <arch/trio_pcie_intfc_def.h>
+
+#ifndef __ASSEMBLER__
+
+/*
+ * Port Configuration.
+ * Configuration of the PCIe Port
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Provides the state of the strapping pins for this port. */
+ uint_reg_t strap_state : 3;
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 1;
+ /*
+ * When 1, the device type will be overridden using OVD_DEV_TYPE_VAL.
+ * When 0, the device type is determined based on the STRAP_STATE.
+ */
+ uint_reg_t ovd_dev_type : 1;
+ /* Provides the device type when OVD_DEV_TYPE is 1. */
+ uint_reg_t ovd_dev_type_val : 4;
+ /* Determines how link is trained. */
+ uint_reg_t train_mode : 2;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 1;
+ /*
+ * For PCIe, used to flip physical RX lanes that were not properly wired.
+ * This is not the same as lane reversal which is handled automatically
+ * during link training. When 0, RX Lane0 must be wired to the link
+ * partner (either to its Lane0 or it's LaneN). When RX_LANE_FLIP is 1,
+ * the highest numbered lane for this port becomes Lane0 and Lane0 does
+ * NOT have to be wired to the link partner.
+ */
+ uint_reg_t rx_lane_flip : 1;
+ /*
+ * For PCIe, used to flip physical TX lanes that were not properly wired.
+ * This is not the same as lane reversal which is handled automatically
+ * during link training. When 0, TX Lane0 must be wired to the link
+ * partner (either to its Lane0 or it's LaneN). When TX_LANE_FLIP is 1,
+ * the highest numbered lane for this port becomes Lane0 and Lane0 does
+ * NOT have to be wired to the link partner.
+ */
+ uint_reg_t tx_lane_flip : 1;
+ /*
+ * For StreamIO port, configures the width of the port when TRAIN_MODE is
+ * not STRAP.
+ */
+ uint_reg_t stream_width : 2;
+ /*
+ * For StreamIO port, configures the rate of the port when TRAIN_MODE is
+ * not STRAP.
+ */
+ uint_reg_t stream_rate : 2;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 46;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_2 : 46;
+ uint_reg_t stream_rate : 2;
+ uint_reg_t stream_width : 2;
+ uint_reg_t tx_lane_flip : 1;
+ uint_reg_t rx_lane_flip : 1;
+ uint_reg_t __reserved_1 : 1;
+ uint_reg_t train_mode : 2;
+ uint_reg_t ovd_dev_type_val : 4;
+ uint_reg_t ovd_dev_type : 1;
+ uint_reg_t __reserved_0 : 1;
+ uint_reg_t strap_state : 3;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_PCIE_INTFC_PORT_CONFIG_t;
+
+/*
+ * Port Status.
+ * Status of the PCIe Port. This register applies to the StreamIO port when
+ * StreamIO is enabled.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /*
+ * Indicates the DL state of the port. When 1, the port is up and ready
+ * to receive traffic.
+ */
+ uint_reg_t dl_up : 1;
+ /*
+ * Indicates the number of times the link has gone down. Clears on read.
+ */
+ uint_reg_t dl_down_cnt : 7;
+ /* Indicates the SERDES PLL has spun up and is providing a valid clock. */
+ uint_reg_t clock_ready : 1;
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 7;
+ /* Device revision ID. */
+ uint_reg_t device_rev : 8;
+ /* Link state (PCIe). */
+ uint_reg_t ltssm_state : 6;
+ /* Link power management state (PCIe). */
+ uint_reg_t pm_state : 3;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 31;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_1 : 31;
+ uint_reg_t pm_state : 3;
+ uint_reg_t ltssm_state : 6;
+ uint_reg_t device_rev : 8;
+ uint_reg_t __reserved_0 : 7;
+ uint_reg_t clock_ready : 1;
+ uint_reg_t dl_down_cnt : 7;
+ uint_reg_t dl_up : 1;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_PCIE_INTFC_PORT_STATUS_t;
+
+/*
+ * Transmit FIFO Control.
+ * Contains TX FIFO thresholds. These registers are for diagnostics purposes
+ * only. Changing these values causes undefined behavior.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /*
+ * Almost-Empty level for TX0 data. Typically set to at least
+ * roundup(38.0*M/N) where N=tclk frequency and M=MAC symbol rate in MHz
+ * for a x4 port (250MHz).
+ */
+ uint_reg_t tx0_data_ae_lvl : 7;
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 1;
+ /* Almost-Empty level for TX1 data. */
+ uint_reg_t tx1_data_ae_lvl : 7;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 1;
+ /* Almost-Full level for TX0 data. */
+ uint_reg_t tx0_data_af_lvl : 7;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 1;
+ /* Almost-Full level for TX1 data. */
+ uint_reg_t tx1_data_af_lvl : 7;
+ /* Reserved. */
+ uint_reg_t __reserved_3 : 1;
+ /* Almost-Full level for TX0 info. */
+ uint_reg_t tx0_info_af_lvl : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_4 : 3;
+ /* Almost-Full level for TX1 info. */
+ uint_reg_t tx1_info_af_lvl : 5;
+ /* Reserved. */
+ uint_reg_t __reserved_5 : 3;
+ /*
+ * This register provides performance adjustment for high bandwidth
+ * flows. The MAC will assert almost-full to TRIO if non-posted credits
+ * fall below this level. Note that setting this larger than the initial
+ * PORT_CREDIT.NPH value will cause READS to never be sent. If the
+ * initial credit value from the link partner is smaller than this value
+ * when the link comes up, the value will be reset to the initial credit
+ * value to prevent lockup.
+ */
+ uint_reg_t min_np_credits : 8;
+ /*
+ * This register provides performance adjustment for high bandwidth
+ * flows. The MAC will assert almost-full to TRIO if posted credits fall
+ * below this level. Note that setting this larger than the initial
+ * PORT_CREDIT.PH value will cause WRITES to never be sent. If the
+ * initial credit value from the link partner is smaller than this value
+ * when the link comes up, the value will be reset to the initial credit
+ * value to prevent lockup.
+ */
+ uint_reg_t min_p_credits : 8;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t min_p_credits : 8;
+ uint_reg_t min_np_credits : 8;
+ uint_reg_t __reserved_5 : 3;
+ uint_reg_t tx1_info_af_lvl : 5;
+ uint_reg_t __reserved_4 : 3;
+ uint_reg_t tx0_info_af_lvl : 5;
+ uint_reg_t __reserved_3 : 1;
+ uint_reg_t tx1_data_af_lvl : 7;
+ uint_reg_t __reserved_2 : 1;
+ uint_reg_t tx0_data_af_lvl : 7;
+ uint_reg_t __reserved_1 : 1;
+ uint_reg_t tx1_data_ae_lvl : 7;
+ uint_reg_t __reserved_0 : 1;
+ uint_reg_t tx0_data_ae_lvl : 7;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_PCIE_INTFC_TX_FIFO_CTL_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_intfc_def.h b/arch/tile/include/arch/trio_pcie_intfc_def.h
new file mode 100644
index 000000000000..d3fd6781fb24
--- /dev/null
+++ b/arch/tile/include/arch/trio_pcie_intfc_def.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_PCIE_INTFC_DEF_H__
+#define __ARCH_TRIO_PCIE_INTFC_DEF_H__
+#define TRIO_PCIE_INTFC_MAC_INT_STS 0x0000
+#define TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK 0xf000
+#define TRIO_PCIE_INTFC_PORT_CONFIG 0x0018
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_DISABLED 0x0
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT 0x1
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC 0x2
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1 0x3
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1 0x4
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_XLINK 0x5
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X1 0x6
+#define TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_STREAM_X4 0x7
+#define TRIO_PCIE_INTFC_PORT_STATUS 0x0020
+#define TRIO_PCIE_INTFC_TX_FIFO_CTL 0x0050
+#endif /* !defined(__ARCH_TRIO_PCIE_INTFC_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_rc.h b/arch/tile/include/arch/trio_pcie_rc.h
new file mode 100644
index 000000000000..6a25d0aca857
--- /dev/null
+++ b/arch/tile/include/arch/trio_pcie_rc.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_PCIE_RC_H__
+#define __ARCH_TRIO_PCIE_RC_H__
+
+#include <arch/abi.h>
+#include <arch/trio_pcie_rc_def.h>
+
+#ifndef __ASSEMBLER__
+
+/* Device Capabilities Register. */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /*
+ * Max_Payload_Size Supported, writablethrough the MAC_STANDARD interface
+ */
+ uint_reg_t mps_sup : 3;
+ /*
+ * This field is writable through the MAC_STANDARD interface. However,
+ * Phantom Function is not supported. Therefore, the application must
+ * not write any value other than 0x0 to this field.
+ */
+ uint_reg_t phantom_function_supported : 2;
+ /* This bit is writable through the MAC_STANDARD interface. */
+ uint_reg_t ext_tag_field_supported : 1;
+ /* Reserved. */
+ uint_reg_t __reserved_0 : 3;
+ /* Endpoint L1 Acceptable Latency Must be 0x0 for non-Endpoint devices. */
+ uint_reg_t l1_lat : 3;
+ /*
+ * Undefined since PCI Express 1.1 (Was Attention Button Present for PCI
+ * Express 1.0a)
+ */
+ uint_reg_t r1 : 1;
+ /*
+ * Undefined since PCI Express 1.1 (Was Attention Indicator Present for
+ * PCI Express 1.0a)
+ */
+ uint_reg_t r2 : 1;
+ /*
+ * Undefined since PCI Express 1.1 (Was Power Indicator Present for PCI
+ * Express 1.0a)
+ */
+ uint_reg_t r3 : 1;
+ /*
+ * Role-Based Error Reporting, writable through the MAC_STANDARD
+ * interface. Required to be set for device compliant to 1.1 spec and
+ * later.
+ */
+ uint_reg_t rer : 1;
+ /* Reserved. */
+ uint_reg_t __reserved_1 : 2;
+ /* Captured Slot Power Limit Value Upstream port only. */
+ uint_reg_t slot_pwr_lim : 8;
+ /* Captured Slot Power Limit Scale Upstream port only. */
+ uint_reg_t slot_pwr_scale : 2;
+ /* Reserved. */
+ uint_reg_t __reserved_2 : 4;
+ /* Endpoint L0s Acceptable LatencyMust be 0x0 for non-Endpoint devices. */
+ uint_reg_t l0s_lat : 1;
+ /* Reserved. */
+ uint_reg_t __reserved_3 : 31;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved_3 : 31;
+ uint_reg_t l0s_lat : 1;
+ uint_reg_t __reserved_2 : 4;
+ uint_reg_t slot_pwr_scale : 2;
+ uint_reg_t slot_pwr_lim : 8;
+ uint_reg_t __reserved_1 : 2;
+ uint_reg_t rer : 1;
+ uint_reg_t r3 : 1;
+ uint_reg_t r2 : 1;
+ uint_reg_t r1 : 1;
+ uint_reg_t l1_lat : 3;
+ uint_reg_t __reserved_0 : 3;
+ uint_reg_t ext_tag_field_supported : 1;
+ uint_reg_t phantom_function_supported : 2;
+ uint_reg_t mps_sup : 3;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_PCIE_RC_DEVICE_CAP_t;
+
+/* Device Control Register. */
+
+__extension__
+typedef union
+{
+ struct
+ {
+#ifndef __BIG_ENDIAN__
+ /* Correctable Error Reporting Enable */
+ uint_reg_t cor_err_ena : 1;
+ /* Non-Fatal Error Reporting Enable */
+ uint_reg_t nf_err_ena : 1;
+ /* Fatal Error Reporting Enable */
+ uint_reg_t fatal_err_ena : 1;
+ /* Unsupported Request Reporting Enable */
+ uint_reg_t ur_ena : 1;
+ /* Relaxed orderring enable */
+ uint_reg_t ro_ena : 1;
+ /* Max Payload Size */
+ uint_reg_t max_payload_size : 3;
+ /* Extended Tag Field Enable */
+ uint_reg_t ext_tag : 1;
+ /* Phantom Function Enable */
+ uint_reg_t ph_fn_ena : 1;
+ /* AUX Power PM Enable */
+ uint_reg_t aux_pm_ena : 1;
+ /* Enable NoSnoop */
+ uint_reg_t no_snoop : 1;
+ /* Max read request size */
+ uint_reg_t max_read_req_sz : 3;
+ /* Reserved. */
+ uint_reg_t __reserved : 49;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t __reserved : 49;
+ uint_reg_t max_read_req_sz : 3;
+ uint_reg_t no_snoop : 1;
+ uint_reg_t aux_pm_ena : 1;
+ uint_reg_t ph_fn_ena : 1;
+ uint_reg_t ext_tag : 1;
+ uint_reg_t max_payload_size : 3;
+ uint_reg_t ro_ena : 1;
+ uint_reg_t ur_ena : 1;
+ uint_reg_t fatal_err_ena : 1;
+ uint_reg_t nf_err_ena : 1;
+ uint_reg_t cor_err_ena : 1;
+#endif
+ };
+
+ uint_reg_t word;
+} TRIO_PCIE_RC_DEVICE_CONTROL_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_TRIO_PCIE_RC_H__) */
diff --git a/arch/tile/include/arch/trio_pcie_rc_def.h b/arch/tile/include/arch/trio_pcie_rc_def.h
new file mode 100644
index 000000000000..74081a65b6f2
--- /dev/null
+++ b/arch/tile/include/arch/trio_pcie_rc_def.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_PCIE_RC_DEF_H__
+#define __ARCH_TRIO_PCIE_RC_DEF_H__
+#define TRIO_PCIE_RC_DEVICE_CAP 0x0074
+#define TRIO_PCIE_RC_DEVICE_CONTROL 0x0078
+#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID 0x0000
+#define TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT 16
+#define TRIO_PCIE_RC_REVISION_ID 0x0008
+#endif /* !defined(__ARCH_TRIO_PCIE_RC_DEF_H__) */
diff --git a/arch/tile/include/arch/trio_shm.h b/arch/tile/include/arch/trio_shm.h
new file mode 100644
index 000000000000..3382e38245af
--- /dev/null
+++ b/arch/tile/include/arch/trio_shm.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+
+#ifndef __ARCH_TRIO_SHM_H__
+#define __ARCH_TRIO_SHM_H__
+
+#include <arch/abi.h>
+#include <arch/trio_shm_def.h>
+
+#ifndef __ASSEMBLER__
+/**
+ * TRIO DMA Descriptor.
+ * The TRIO DMA descriptor is written by software and consumed by hardware.
+ * It is used to specify the location of transaction data in the IO and Tile
+ * domains.
+ */
+
+__extension__
+typedef union
+{
+ struct
+ {
+ /* Word 0 */
+
+#ifndef __BIG_ENDIAN__
+ /** Tile side virtual address. */
+ int_reg_t va : 42;
+ /**
+ * Encoded size of buffer used on push DMA when C=1:
+ * 0 = 128 bytes
+ * 1 = 256 bytes
+ * 2 = 512 bytes
+ * 3 = 1024 bytes
+ * 4 = 1664 bytes
+ * 5 = 4096 bytes
+ * 6 = 10368 bytes
+ * 7 = 16384 bytes
+ */
+ uint_reg_t bsz : 3;
+ /**
+ * Chaining designation. Always zero for pull DMA
+ * 0 : Unchained buffer pointer
+ * 1 : Chained buffer pointer. Next buffer descriptor (e.g. VA) stored
+ * in 1st 8-bytes in buffer. For chained buffers, first 8-bytes of each
+ * buffer contain the next buffer descriptor formatted exactly like a PDE
+ * buffer descriptor. This allows a chained PDE buffer to be sent using
+ * push DMA.
+ */
+ uint_reg_t c : 1;
+ /**
+ * Notification interrupt will be delivered when the transaction has
+ * completed (all data has been read from or written to the Tile-side
+ * buffer).
+ */
+ uint_reg_t notif : 1;
+ /**
+ * When 0, the XSIZE field specifies the total byte count for the
+ * transaction. When 1, the XSIZE field is encoded as 2^(N+14) for N in
+ * {0..6}:
+ * 0 = 16KB
+ * 1 = 32KB
+ * 2 = 64KB
+ * 3 = 128KB
+ * 4 = 256KB
+ * 5 = 512KB
+ * 6 = 1MB
+ * All other encodings of the XSIZE field are reserved when SMOD=1
+ */
+ uint_reg_t smod : 1;
+ /**
+ * Total number of bytes to move for this transaction. When SMOD=1,
+ * this field is encoded - see SMOD description.
+ */
+ uint_reg_t xsize : 14;
+ /** Reserved. */
+ uint_reg_t __reserved_0 : 1;
+ /**
+ * Generation number. Used to indicate a valid descriptor in ring. When
+ * a new descriptor is written into the ring, software must toggle this
+ * bit. The net effect is that the GEN bit being written into new
+ * descriptors toggles each time the ring tail pointer wraps.
+ */
+ uint_reg_t gen : 1;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t gen : 1;
+ uint_reg_t __reserved_0 : 1;
+ uint_reg_t xsize : 14;
+ uint_reg_t smod : 1;
+ uint_reg_t notif : 1;
+ uint_reg_t c : 1;
+ uint_reg_t bsz : 3;
+ int_reg_t va : 42;
+#endif
+
+ /* Word 1 */
+
+#ifndef __BIG_ENDIAN__
+ /** IO-side address */
+ uint_reg_t io_address : 64;
+#else /* __BIG_ENDIAN__ */
+ uint_reg_t io_address : 64;
+#endif
+
+ };
+
+ /** Word access */
+ uint_reg_t words[2];
+} TRIO_DMA_DESC_t;
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_TRIO_SHM_H__) */
diff --git a/arch/tile/include/arch/trio_shm_def.h b/arch/tile/include/arch/trio_shm_def.h
new file mode 100644
index 000000000000..72a59c88b06a
--- /dev/null
+++ b/arch/tile/include/arch/trio_shm_def.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_TRIO_SHM_DEF_H__
+#define __ARCH_TRIO_SHM_DEF_H__
+#endif /* !defined(__ARCH_TRIO_SHM_DEF_H__) */
diff --git a/arch/tile/include/arch/usb_host.h b/arch/tile/include/arch/usb_host.h
new file mode 100644
index 000000000000..d09f32683962
--- /dev/null
+++ b/arch/tile/include/arch/usb_host.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_USB_HOST_H__
+#define __ARCH_USB_HOST_H__
+
+#include <arch/abi.h>
+#include <arch/usb_host_def.h>
+
+#ifndef __ASSEMBLER__
+#endif /* !defined(__ASSEMBLER__) */
+
+#endif /* !defined(__ARCH_USB_HOST_H__) */
diff --git a/arch/tile/include/arch/usb_host_def.h b/arch/tile/include/arch/usb_host_def.h
new file mode 100644
index 000000000000..aeed7753e8e1
--- /dev/null
+++ b/arch/tile/include/arch/usb_host_def.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* Machine-generated file; do not edit. */
+
+#ifndef __ARCH_USB_HOST_DEF_H__
+#define __ARCH_USB_HOST_DEF_H__
+#endif /* !defined(__ARCH_USB_HOST_DEF_H__) */
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 143473e3a0bb..fb7c65ae8de0 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -9,7 +9,6 @@ header-y += hardwall.h
generic-y += bug.h
generic-y += bugs.h
generic-y += cputime.h
-generic-y += device.h
generic-y += div64.h
generic-y += emergency-restart.h
generic-y += errno.h
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index 392e5333dd8b..a9a529964e07 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -27,11 +27,17 @@
#define L2_CACHE_ALIGN(x) (((x)+(L2_CACHE_BYTES-1)) & -L2_CACHE_BYTES)
/*
- * TILE-Gx is fully coherent so we don't need to define ARCH_DMA_MINALIGN.
+ * TILEPro I/O is not always coherent (networking typically uses coherent
+ * I/O, but PCI traffic does not) and setting ARCH_DMA_MINALIGN to the
+ * L2 cacheline size helps ensure that kernel heap allocations are aligned.
+ * TILE-Gx I/O is always coherent when used on hash-for-home pages.
+ *
+ * However, it's possible at runtime to request not to use hash-for-home
+ * for the kernel heap, in which case the kernel will use flush-and-inval
+ * to manage coherence. As a result, we use L2_CACHE_BYTES for the
+ * DMA minimum alignment to avoid false sharing in the kernel heap.
*/
-#ifndef __tilegx__
#define ARCH_DMA_MINALIGN L2_CACHE_BYTES
-#endif
/* use the cache line size for the L2, which is where it counts */
#define SMP_CACHE_BYTES_SHIFT L2_CACHE_SHIFT
diff --git a/arch/tile/include/asm/checksum.h b/arch/tile/include/asm/checksum.h
index a120766c7264..b21a2fdec9f7 100644
--- a/arch/tile/include/asm/checksum.h
+++ b/arch/tile/include/asm/checksum.h
@@ -21,4 +21,22 @@
__wsum do_csum(const unsigned char *buff, int len);
#define do_csum do_csum
+/*
+ * Return the sum of all the 16-bit subwords in a long.
+ * This sums two subwords on a 32-bit machine, and four on 64 bits.
+ * The implementation does two vector adds to capture any overflow.
+ */
+static inline unsigned int csum_long(unsigned long x)
+{
+ unsigned long ret;
+#ifdef __tilegx__
+ ret = __insn_v2sadu(x, 0);
+ ret = __insn_v2sadu(ret, 0);
+#else
+ ret = __insn_sadh_u(x, 0);
+ ret = __insn_sadh_u(ret, 0);
+#endif
+ return ret;
+}
+
#endif /* _ASM_TILE_CHECKSUM_H */
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h
new file mode 100644
index 000000000000..5182705bd056
--- /dev/null
+++ b/arch/tile/include/asm/device.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ * Arch specific extensions to struct device
+ */
+
+#ifndef _ASM_TILE_DEVICE_H
+#define _ASM_TILE_DEVICE_H
+
+struct dev_archdata {
+ /* DMA operations on that device */
+ struct dma_map_ops *dma_ops;
+
+ /* Offset of the DMA address from the PA. */
+ dma_addr_t dma_offset;
+
+ /* Highest DMA address that can be generated by this device. */
+ dma_addr_t max_direct_dma_addr;
+};
+
+struct pdev_archdata {
+};
+
+#endif /* _ASM_TILE_DEVICE_H */
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index eaa06d175b39..4b6247d1a315 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -20,69 +20,80 @@
#include <linux/cache.h>
#include <linux/io.h>
-/*
- * Note that on x86 and powerpc, there is a "struct dma_mapping_ops"
- * that is used for all the DMA operations. For now, we don't have an
- * equivalent on tile, because we only have a single way of doing DMA.
- * (Tilera bug 7994 to use dma_mapping_ops.)
- */
+extern struct dma_map_ops *tile_dma_map_ops;
+extern struct dma_map_ops *gx_pci_dma_map_ops;
+extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dma_ops)
+ return dev->archdata.dma_ops;
+ else
+ return tile_dma_map_ops;
+}
+
+static inline dma_addr_t get_dma_offset(struct device *dev)
+{
+ return dev->archdata.dma_offset;
+}
+
+static inline void set_dma_offset(struct device *dev, dma_addr_t off)
+{
+ dev->archdata.dma_offset = off;
+}
-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction);
-extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction);
-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction);
-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nhwentries, enum dma_data_direction);
-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction);
-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
- size_t size, enum dma_data_direction);
-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction);
-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
- int nelems, enum dma_data_direction);
-
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
-extern void dma_sync_single_for_cpu(struct device *, dma_addr_t, size_t,
- enum dma_data_direction);
-extern void dma_sync_single_for_device(struct device *, dma_addr_t,
- size_t, enum dma_data_direction);
-extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t,
- unsigned long offset, size_t,
- enum dma_data_direction);
-extern void dma_sync_single_range_for_device(struct device *, dma_addr_t,
- unsigned long offset, size_t,
- enum dma_data_direction);
-extern void dma_cache_sync(struct device *dev, void *vaddr, size_t,
- enum dma_data_direction);
+static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr + get_dma_offset(dev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+{
+ return daddr - get_dma_offset(dev);
+}
+
+static inline void dma_mark_clean(void *addr, size_t size) {}
+
+#include <asm-generic/dma-mapping-common.h>
+
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ dev->archdata.dma_ops = ops;
+}
+
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ return addr + size - 1 <= *dev->dma_mask;
+}
static inline int
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- return 0;
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
}
static inline int
dma_supported(struct device *dev, u64 mask)
{
- return 1;
+ return get_dma_ops(dev)->dma_supported(dev, mask);
}
static inline int
dma_set_mask(struct device *dev, u64 mask)
{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /* Handle legacy PCI devices with limited memory addressability. */
+ if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) {
+ set_dma_ops(dev, gx_legacy_pci_dma_map_ops);
+ set_dma_offset(dev, 0);
+ if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
if (!dev->dma_mask || !dma_supported(dev, mask))
return -EIO;
@@ -91,4 +102,43 @@ dma_set_mask(struct device *dev, u64 mask)
return 0;
}
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
+
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+
+ dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
+#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
+
+/*
+ * dma_alloc_noncoherent() is #defined to return coherent memory,
+ * so there's no need to do any flushing here.
+ */
+static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+}
+
#endif /* _ASM_TILE_DMA_MAPPING_H */
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h
index c66f7933beaa..e16dbf929cb5 100644
--- a/arch/tile/include/asm/fixmap.h
+++ b/arch/tile/include/asm/fixmap.h
@@ -45,15 +45,23 @@
*
* TLB entries of such buffers will not be flushed across
* task switches.
- *
- * We don't bother with a FIX_HOLE since above the fixmaps
- * is unmapped memory in any case.
*/
enum fixed_addresses {
+#ifdef __tilegx__
+ /*
+ * TILEPro has unmapped memory above so the hole isn't needed,
+ * and in any case the hole pushes us over a single 16MB pmd.
+ */
+ FIX_HOLE,
+#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif
+#ifdef __tilegx__ /* see homecache.c */
+ FIX_HOMECACHE_BEGIN,
+ FIX_HOMECACHE_END = FIX_HOMECACHE_BEGIN+(NR_CPUS)-1,
+#endif
__end_of_permanent_fixed_addresses,
/*
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h
index a8243865d49e..7b7771328642 100644
--- a/arch/tile/include/asm/homecache.h
+++ b/arch/tile/include/asm/homecache.h
@@ -79,10 +79,17 @@ extern void homecache_change_page_home(struct page *, int order, int home);
/*
* Flush a page out of whatever cache(s) it is in.
* This is more than just finv, since it properly handles waiting
- * for the data to reach memory on tilepro, but it can be quite
- * heavyweight, particularly on hash-for-home memory.
+ * for the data to reach memory, but it can be quite
+ * heavyweight, particularly on incoherent or immutable memory.
*/
-extern void homecache_flush_cache(struct page *, int order);
+extern void homecache_finv_page(struct page *);
+
+/*
+ * Flush a page out of the specified home cache.
+ * Note that the specified home need not be the actual home of the page,
+ * as for example might be the case when coordinating with I/O devices.
+ */
+extern void homecache_finv_map_page(struct page *, int home);
/*
* Allocate a page with the given GFP flags, home, and optionally
@@ -104,10 +111,10 @@ extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
* routines use homecache_change_page_home() to reset the home
* back to the default before returning the page to the allocator.
*/
+void __homecache_free_pages(struct page *, unsigned int order);
void homecache_free_pages(unsigned long addr, unsigned int order);
-#define homecache_free_page(page) \
- homecache_free_pages((page), 0)
-
+#define __homecache_free_page(page) __homecache_free_pages((page), 0)
+#define homecache_free_page(page) homecache_free_pages((page), 0)
/*
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index d2152deb1f3c..2a9b293fece6 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -62,6 +62,92 @@ extern void iounmap(volatile void __iomem *addr);
#define mm_ptov(addr) ((void *)phys_to_virt(addr))
#define mm_vtop(addr) ((unsigned long)virt_to_phys(addr))
+#if CHIP_HAS_MMIO()
+
+/*
+ * We use inline assembly to guarantee that the compiler does not
+ * split an access into multiple byte-sized accesses as it might
+ * sometimes do if a register data structure is marked "packed".
+ * Obviously on tile we can't tolerate such an access being
+ * actually unaligned, but we want to avoid the case where the
+ * compiler conservatively would generate multiple accesses even
+ * for an aligned read or write.
+ */
+
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *)addr;
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 ret;
+ asm volatile("ld2u %0, %1" : "=r" (ret) : "r" (addr));
+ barrier();
+ return le16_to_cpu(ret);
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 ret;
+ /* Sign-extend to conform to u32 ABI sign-extension convention. */
+ asm volatile("ld4s %0, %1" : "=r" (ret) : "r" (addr));
+ barrier();
+ return le32_to_cpu(ret);
+}
+
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 ret;
+ asm volatile("ld %0, %1" : "=r" (ret) : "r" (addr));
+ barrier();
+ return le64_to_cpu(ret);
+}
+
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *)addr = val;
+}
+
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("st2 %0, %1" :: "r" (addr), "r" (cpu_to_le16(val)));
+}
+
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("st4 %0, %1" :: "r" (addr), "r" (cpu_to_le32(val)));
+}
+
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("st %0, %1" :: "r" (addr), "r" (cpu_to_le64(val)));
+}
+
+/*
+ * The on-chip I/O hardware on tilegx is configured with VA=PA for the
+ * kernel's PA range. The low-level APIs and field names use "va" and
+ * "void *" nomenclature, to be consistent with the general notion
+ * that the addresses in question are virtualizable, but in the kernel
+ * context we are actually manipulating PA values. (In other contexts,
+ * e.g. access from user space, we do in fact use real virtual addresses
+ * in the va fields.) To allow readers of the code to understand what's
+ * happening, we direct their attention to this comment by using the
+ * following two functions that just duplicate __va() and __pa().
+ */
+typedef unsigned long tile_io_addr_t;
+static inline tile_io_addr_t va_to_tile_io_addr(void *va)
+{
+ BUILD_BUG_ON(sizeof(phys_addr_t) != sizeof(tile_io_addr_t));
+ return __pa(va);
+}
+static inline void *tile_io_addr_to_va(tile_io_addr_t tile_io_addr)
+{
+ return __va(tile_io_addr);
+}
+
+#else /* CHIP_HAS_MMIO() */
+
#ifdef CONFIG_PCI
extern u8 _tile_readb(unsigned long addr);
@@ -73,10 +159,19 @@ extern void _tile_writew(u16 val, unsigned long addr);
extern void _tile_writel(u32 val, unsigned long addr);
extern void _tile_writeq(u64 val, unsigned long addr);
-#else
+#define __raw_readb(addr) _tile_readb((unsigned long)addr)
+#define __raw_readw(addr) _tile_readw((unsigned long)addr)
+#define __raw_readl(addr) _tile_readl((unsigned long)addr)
+#define __raw_readq(addr) _tile_readq((unsigned long)addr)
+#define __raw_writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
+#define __raw_writew(val, addr) _tile_writew(val, (unsigned long)addr)
+#define __raw_writel(val, addr) _tile_writel(val, (unsigned long)addr)
+#define __raw_writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
+
+#else /* CONFIG_PCI */
/*
- * The Tile architecture does not support IOMEM unless PCI is enabled.
+ * The tilepro architecture does not support IOMEM unless PCI is enabled.
* Unfortunately we can't yet simply not declare these methods,
* since some generic code that compiles into the kernel, but
* we never run, uses them unconditionally.
@@ -88,65 +183,58 @@ static inline int iomem_panic(void)
return 0;
}
-static inline u8 _tile_readb(unsigned long addr)
+static inline u8 readb(unsigned long addr)
{
return iomem_panic();
}
-static inline u16 _tile_readw(unsigned long addr)
+static inline u16 _readw(unsigned long addr)
{
return iomem_panic();
}
-static inline u32 _tile_readl(unsigned long addr)
+static inline u32 readl(unsigned long addr)
{
return iomem_panic();
}
-static inline u64 _tile_readq(unsigned long addr)
+static inline u64 readq(unsigned long addr)
{
return iomem_panic();
}
-static inline void _tile_writeb(u8 val, unsigned long addr)
+static inline void writeb(u8 val, unsigned long addr)
{
iomem_panic();
}
-static inline void _tile_writew(u16 val, unsigned long addr)
+static inline void writew(u16 val, unsigned long addr)
{
iomem_panic();
}
-static inline void _tile_writel(u32 val, unsigned long addr)
+static inline void writel(u32 val, unsigned long addr)
{
iomem_panic();
}
-static inline void _tile_writeq(u64 val, unsigned long addr)
+static inline void writeq(u64 val, unsigned long addr)
{
iomem_panic();
}
-#endif
+#endif /* CONFIG_PCI */
+
+#endif /* CHIP_HAS_MMIO() */
-#define readb(addr) _tile_readb((unsigned long)addr)
-#define readw(addr) _tile_readw((unsigned long)addr)
-#define readl(addr) _tile_readl((unsigned long)addr)
-#define readq(addr) _tile_readq((unsigned long)addr)
-#define writeb(val, addr) _tile_writeb(val, (unsigned long)addr)
-#define writew(val, addr) _tile_writew(val, (unsigned long)addr)
-#define writel(val, addr) _tile_writel(val, (unsigned long)addr)
-#define writeq(val, addr) _tile_writeq(val, (unsigned long)addr)
-
-#define __raw_readb readb
-#define __raw_readw readw
-#define __raw_readl readl
-#define __raw_readq readq
-#define __raw_writeb writeb
-#define __raw_writew writew
-#define __raw_writel writel
-#define __raw_writeq writeq
+#define readb __raw_readb
+#define readw __raw_readw
+#define readl __raw_readl
+#define readq __raw_readq
+#define writeb __raw_writeb
+#define writew __raw_writew
+#define writel __raw_writel
+#define writeq __raw_writeq
#define readb_relaxed readb
#define readw_relaxed readw
diff --git a/arch/tile/include/asm/memprof.h b/arch/tile/include/asm/memprof.h
deleted file mode 100644
index 359949be28c1..000000000000
--- a/arch/tile/include/asm/memprof.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2010 Tilera Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation, version 2.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for
- * more details.
- *
- * The hypervisor's memory controller profiling infrastructure allows
- * the programmer to find out what fraction of the available memory
- * bandwidth is being consumed at each memory controller. The
- * profiler provides start, stop, and clear operations to allows
- * profiling over a specific time window, as well as an interface for
- * reading the most recent profile values.
- *
- * This header declares IOCTL codes necessary to control memprof.
- */
-#ifndef _ASM_TILE_MEMPROF_H
-#define _ASM_TILE_MEMPROF_H
-
-#include <linux/ioctl.h>
-
-#define MEMPROF_IOCTL_TYPE 0xB4
-#define MEMPROF_IOCTL_START _IO(MEMPROF_IOCTL_TYPE, 0)
-#define MEMPROF_IOCTL_STOP _IO(MEMPROF_IOCTL_TYPE, 1)
-#define MEMPROF_IOCTL_CLEAR _IO(MEMPROF_IOCTL_TYPE, 2)
-
-#endif /* _ASM_TILE_MEMPROF_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index 9d9131e5c552..dd033a4fd627 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -174,7 +174,9 @@ static inline __attribute_const__ int get_order(unsigned long size)
#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */
#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */
#define PAGE_OFFSET MEM_HIGH_START
-#define _VMALLOC_START _AC(0xfffffff500000000, UL) /* 4 GB */
+#define FIXADDR_BASE _AC(0xfffffff400000000, UL) /* 4 GB */
+#define FIXADDR_TOP _AC(0xfffffff500000000, UL) /* 4 GB */
+#define _VMALLOC_START FIXADDR_TOP
#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */
#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */
#define MEM_SV_INTRPT MEM_SV_START
@@ -185,9 +187,6 @@ static inline __attribute_const__ int get_order(unsigned long size)
/* Highest DTLB address we will use */
#define KERNEL_HIGH_VADDR MEM_SV_START
-/* Since we don't currently provide any fixmaps, we use an impossible VA. */
-#define FIXADDR_TOP MEM_HV_START
-
#else /* !__tilegx__ */
/*
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index 32e6cbe8dff3..302cdf71ceed 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -15,9 +15,13 @@
#ifndef _ASM_TILE_PCI_H
#define _ASM_TILE_PCI_H
+#include <linux/dma-mapping.h>
#include <linux/pci.h>
+#include <linux/numa.h>
#include <asm-generic/pci_iomap.h>
+#ifndef __tilegx__
+
/*
* Structure of a PCI controller (host bridge)
*/
@@ -41,21 +45,151 @@ struct pci_controller {
};
/*
+ * This flag tells if the platform is TILEmpower that needs
+ * special configuration for the PLX switch chip.
+ */
+extern int tile_plx_gen1;
+
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
+
+#define TILE_NUM_PCIE 2
+
+/*
* The hypervisor maps the entirety of CPA-space as bus addresses, so
* bus addresses are physical addresses. The networking and block
* device layers use this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS 1
+/* generic pci stuff */
+#include <asm-generic/pci.h>
+
+#else
+
+#include <asm/page.h>
+#include <gxio/trio.h>
+
+/**
+ * We reserve the hugepage-size address range at the top of the 64-bit address
+ * space to serve as the PCI window, emulating the BAR0 space of an endpoint
+ * device. This window is used by the chip-to-chip applications running on
+ * the RC node. The reason for carving out this window is that Mem-Maps that
+ * back up this window will not overlap with those that map the real physical
+ * memory.
+ */
+#define PCIE_HOST_BAR0_SIZE HPAGE_SIZE
+#define PCIE_HOST_BAR0_START HPAGE_MASK
+
+/**
+ * The first PAGE_SIZE of the above "BAR" window is mapped to the
+ * gxpci_host_regs structure.
+ */
+#define PCIE_HOST_REGS_SIZE PAGE_SIZE
+
+/*
+ * This is the PCI address where the Mem-Map interrupt regions start.
+ * We use the 2nd to the last huge page of the 64-bit address space.
+ * The last huge page is used for the rootcomplex "bar", for C2C purpose.
+ */
+#define MEM_MAP_INTR_REGIONS_BASE (HPAGE_MASK - HPAGE_SIZE)
+
+/*
+ * Each Mem-Map interrupt region occupies 4KB.
+ */
+#define MEM_MAP_INTR_REGION_SIZE (1 << TRIO_MAP_MEM_LIM__ADDR_SHIFT)
+
+/*
+ * Allocate the PCI BAR window right below 4GB.
+ */
+#define TILE_PCI_BAR_WINDOW_TOP (1ULL << 32)
+
+/*
+ * Allocate 1GB for the PCI BAR window.
+ */
+#define TILE_PCI_BAR_WINDOW_SIZE (1 << 30)
+
+/*
+ * This is the highest bus address targeting the host memory that
+ * can be generated by legacy PCI devices with 32-bit or less
+ * DMA capability, dictated by the BAR window size and location.
+ */
+#define TILE_PCI_MAX_DIRECT_DMA_ADDRESS \
+ (TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE - 1)
+
+/*
+ * We shift the PCI bus range for all the physical memory up by the whole PA
+ * range. The corresponding CPA of an incoming PCI request will be the PCI
+ * address minus TILE_PCI_MEM_MAP_BASE_OFFSET. This also implies
+ * that the 64-bit capable devices will be given DMA addresses as
+ * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
+ * devices, we create a separate map region that handles the low
+ * 4GB.
+ */
+#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
+
+/*
+ * Start of the PCI memory resource, which starts at the end of the
+ * maximum system physical RAM address.
+ */
+#define TILE_PCI_MEM_START (1ULL << CHIP_PA_WIDTH())
+
+/*
+ * Structure of a PCI controller (host bridge) on Gx.
+ */
+struct pci_controller {
+
+ /* Pointer back to the TRIO that this PCIe port is connected to. */
+ gxio_trio_context_t *trio;
+ int mac; /* PCIe mac index on the TRIO shim */
+ int trio_index; /* Index of TRIO shim that contains the MAC. */
+
+ int pio_mem_index; /* PIO region index for memory access */
+
+ /*
+ * Mem-Map regions for all the memory controllers so that Linux can
+ * map all of its physical memory space to the PCI bus.
+ */
+ int mem_maps[MAX_NUMNODES];
+
+ int index; /* PCI domain number */
+ struct pci_bus *root_bus;
+
+ /* PCI memory space resource for this controller. */
+ struct resource mem_space;
+ char mem_space_name[32];
+
+ uint64_t mem_offset; /* cpu->bus memory mapping offset. */
+
+ int first_busno;
+
+ struct pci_ops *ops;
+
+ /* Table that maps the INTx numbers to Linux irq numbers. */
+ int irq_intx_table[4];
+
+ /* Address ranges that are routed to this controller/bridge. */
+ struct resource mem_resources[3];
+};
+
+extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
+extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
+
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+/*
+ * The PCI address space does not equal the physical memory address
+ * space (we have an IOMMU). The IDE and SCSI device layers use this
+ * boolean for bounce buffer decisions.
+ */
+#define PCI_DMA_BUS_IS_PHYS 0
+
+#endif /* __tilegx__ */
+
int __init tile_pci_init(void);
int __init pcibios_init(void);
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
-
void __devinit pcibios_fixup_bus(struct pci_bus *bus);
-#define TILE_NUM_PCIE 2
-
#define pci_domain_nr(bus) (((struct pci_controller *)(bus)->sysdata)->index)
/*
@@ -79,19 +213,10 @@ static inline int pcibios_assign_all_busses(void)
#define PCIBIOS_MIN_MEM 0
#define PCIBIOS_MIN_IO 0
-/*
- * This flag tells if the platform is TILEmpower that needs
- * special configuration for the PLX switch chip.
- */
-extern int tile_plx_gen1;
-
/* Use any cpu for PCI. */
#define cpumask_of_pcibus(bus) cpu_online_mask
/* implement the pci_ DMA API in terms of the generic device dma_ one */
#include <asm-generic/pci-dma-compat.h>
-/* generic pci stuff */
-#include <asm-generic/pci.h>
-
#endif /* _ASM_TILE_PCI_H */
diff --git a/arch/tile/include/gxio/common.h b/arch/tile/include/gxio/common.h
new file mode 100644
index 000000000000..724595a24d04
--- /dev/null
+++ b/arch/tile/include/gxio/common.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _GXIO_COMMON_H_
+#define _GXIO_COMMON_H_
+
+/*
+ * Routines shared between the various GXIO device components.
+ */
+
+#include <hv/iorpc.h>
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/io.h>
+
+/* Define the standard gxio MMIO functions using kernel functions. */
+#define __gxio_mmio_read8(addr) readb(addr)
+#define __gxio_mmio_read16(addr) readw(addr)
+#define __gxio_mmio_read32(addr) readl(addr)
+#define __gxio_mmio_read64(addr) readq(addr)
+#define __gxio_mmio_write8(addr, val) writeb((val), (addr))
+#define __gxio_mmio_write16(addr, val) writew((val), (addr))
+#define __gxio_mmio_write32(addr, val) writel((val), (addr))
+#define __gxio_mmio_write64(addr, val) writeq((val), (addr))
+#define __gxio_mmio_read(addr) __gxio_mmio_read64(addr)
+#define __gxio_mmio_write(addr, val) __gxio_mmio_write64((addr), (val))
+
+#endif /* !_GXIO_COMMON_H_ */
diff --git a/arch/tile/include/gxio/dma_queue.h b/arch/tile/include/gxio/dma_queue.h
new file mode 100644
index 000000000000..00654feb7db0
--- /dev/null
+++ b/arch/tile/include/gxio/dma_queue.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _GXIO_DMA_QUEUE_H_
+#define _GXIO_DMA_QUEUE_H_
+
+/*
+ * DMA queue management APIs shared between TRIO and mPIPE.
+ */
+
+#include "common.h"
+
+/* The credit counter lives in the high 32 bits. */
+#define DMA_QUEUE_CREDIT_SHIFT 32
+
+/*
+ * State object that tracks a DMA queue's head and tail indices, as
+ * well as the number of commands posted and completed. The
+ * structure is accessed via a thread-safe, lock-free algorithm.
+ */
+typedef struct {
+ /*
+ * Address of a MPIPE_EDMA_POST_REGION_VAL_t,
+ * TRIO_PUSH_DMA_REGION_VAL_t, or TRIO_PULL_DMA_REGION_VAL_t
+ * register. These register have identical encodings and provide
+ * information about how many commands have been processed.
+ */
+ void *post_region_addr;
+
+ /*
+ * A lazily-updated count of how many edescs the hardware has
+ * completed.
+ */
+ uint64_t hw_complete_count __attribute__ ((aligned(64)));
+
+ /*
+ * High 32 bits are a count of available egress command credits,
+ * low 24 bits are the next egress "slot".
+ */
+ int64_t credits_and_next_index;
+
+} __gxio_dma_queue_t;
+
+/* Initialize a dma queue. */
+extern void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
+ void *post_region_addr,
+ unsigned int num_entries);
+
+/*
+ * Update the "credits_and_next_index" and "hw_complete_count" fields
+ * based on pending hardware completions. Note that some other thread
+ * may have already done this and, importantly, may still be in the
+ * process of updating "credits_and_next_index".
+ */
+extern void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue);
+
+/* Wait for credits to become available. */
+extern int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
+ int64_t modifier);
+
+/* Reserve slots in the queue, optionally waiting for slots to become
+ * available, and optionally returning a "completion_slot" suitable for
+ * direct comparison to "hw_complete_count".
+ */
+static inline int64_t __gxio_dma_queue_reserve(__gxio_dma_queue_t *dma_queue,
+ unsigned int num, bool wait,
+ bool completion)
+{
+ uint64_t slot;
+
+ /*
+ * Try to reserve 'num' egress command slots. We do this by
+ * constructing a constant that subtracts N credits and adds N to
+ * the index, and using fetchaddgez to only apply it if the credits
+ * count doesn't go negative.
+ */
+ int64_t modifier = (((int64_t)(-num)) << DMA_QUEUE_CREDIT_SHIFT) | num;
+ int64_t old =
+ __insn_fetchaddgez(&dma_queue->credits_and_next_index,
+ modifier);
+
+ if (unlikely(old + modifier < 0)) {
+ /*
+ * We're out of credits. Try once to get more by checking for
+ * completed egress commands. If that fails, wait or fail.
+ */
+ __gxio_dma_queue_update_credits(dma_queue);
+ old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
+ modifier);
+ if (old + modifier < 0) {
+ if (wait)
+ old = __gxio_dma_queue_wait_for_credits
+ (dma_queue, modifier);
+ else
+ return GXIO_ERR_DMA_CREDITS;
+ }
+ }
+
+ /* The bottom 24 bits of old encode the "slot". */
+ slot = (old & 0xffffff);
+
+ if (completion) {
+ /*
+ * A "completion_slot" is a "slot" which can be compared to
+ * "hw_complete_count" at any time in the future. To convert
+ * "slot" into a "completion_slot", we access "hw_complete_count"
+ * once (knowing that we have reserved a slot, and thus, it will
+ * be "basically" accurate), and combine its high 40 bits with
+ * the 24 bit "slot", and handle "wrapping" by adding "1 << 24"
+ * if the result is LESS than "hw_complete_count".
+ */
+ uint64_t complete;
+ complete = ACCESS_ONCE(dma_queue->hw_complete_count);
+ slot |= (complete & 0xffffffffff000000);
+ if (slot < complete)
+ slot += 0x1000000;
+ }
+
+ /*
+ * If any of our slots mod 256 were equivalent to 0, go ahead and
+ * collect some egress credits, and update "hw_complete_count", and
+ * make sure the index doesn't overflow into the credits.
+ */
+ if (unlikely(((old + num) & 0xff) < num)) {
+ __gxio_dma_queue_update_credits(dma_queue);
+
+ /* Make sure the index doesn't overflow into the credits. */
+#ifdef __BIG_ENDIAN__
+ *(((uint8_t *)&dma_queue->credits_and_next_index) + 4) = 0;
+#else
+ *(((uint8_t *)&dma_queue->credits_and_next_index) + 3) = 0;
+#endif
+ }
+
+ return slot;
+}
+
+/* Non-inlinable "__gxio_dma_queue_reserve(..., true)". */
+extern int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
+ unsigned int num, int wait);
+
+/* Check whether a particular "completion slot" has completed.
+ *
+ * Note that this function requires a "completion slot", and thus
+ * cannot be used with the result of any "reserve_fast" function.
+ */
+extern int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
+ int64_t completion_slot, int update);
+
+#endif /* !_GXIO_DMA_QUEUE_H_ */
diff --git a/arch/tile/include/gxio/iorpc_globals.h b/arch/tile/include/gxio/iorpc_globals.h
new file mode 100644
index 000000000000..52c721f8dad9
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_globals.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __IORPC_LINUX_RPC_H__
+#define __IORPC_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+#define IORPC_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
+#define IORPC_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
+#define IORPC_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define IORPC_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+int __iorpc_arm_pollfd(int fd, int pollfd_cookie);
+
+int __iorpc_close_pollfd(int fd, int pollfd_cookie);
+
+int __iorpc_get_mmio_base(int fd, HV_PTE *base);
+
+int __iorpc_check_mmio_offset(int fd, unsigned long offset, unsigned long size);
+
+#endif /* !__IORPC_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
new file mode 100644
index 000000000000..9d50fce1b1a7
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __GXIO_MPIPE_LINUX_RPC_H__
+#define __GXIO_MPIPE_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <hv/drv_mpipe_intf.h>
+#include <asm/page.h>
+#include <gxio/kiorpc.h>
+#include <gxio/mpipe.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+#define GXIO_MPIPE_OP_ALLOC_BUFFER_STACKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1200)
+#define GXIO_MPIPE_OP_INIT_BUFFER_STACK_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1201)
+
+#define GXIO_MPIPE_OP_ALLOC_NOTIF_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1203)
+#define GXIO_MPIPE_OP_INIT_NOTIF_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x1204)
+#define GXIO_MPIPE_OP_REQUEST_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1205)
+#define GXIO_MPIPE_OP_ENABLE_NOTIF_RING_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1206)
+#define GXIO_MPIPE_OP_ALLOC_NOTIF_GROUPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1207)
+#define GXIO_MPIPE_OP_INIT_NOTIF_GROUP IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1208)
+#define GXIO_MPIPE_OP_ALLOC_BUCKETS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1209)
+#define GXIO_MPIPE_OP_INIT_BUCKET IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120a)
+#define GXIO_MPIPE_OP_ALLOC_EDMA_RINGS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120b)
+#define GXIO_MPIPE_OP_INIT_EDMA_RING_AUX IORPC_OPCODE(IORPC_FORMAT_KERNEL_MEM, 0x120c)
+
+#define GXIO_MPIPE_OP_COMMIT_RULES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x120f)
+#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
+#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
+#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
+
+#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e)
+#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220)
+#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
+#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
+#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define GXIO_MPIPE_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+int gxio_mpipe_init_buffer_stack_aux(gxio_mpipe_context_t * context,
+ void *mem_va, size_t mem_size,
+ unsigned int mem_flags, unsigned int stack,
+ unsigned int buffer_size_enum);
+
+
+int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+int gxio_mpipe_init_notif_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+ size_t mem_size, unsigned int mem_flags,
+ unsigned int ring);
+
+int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t * context,
+ int inter_x, int inter_y,
+ int inter_ipi, int inter_event,
+ unsigned int ring);
+
+int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t * context,
+ unsigned int ring);
+
+int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+int gxio_mpipe_init_notif_group(gxio_mpipe_context_t * context,
+ unsigned int group,
+ gxio_mpipe_notif_group_bits_t bits);
+
+int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t * context, unsigned int count,
+ unsigned int first, unsigned int flags);
+
+int gxio_mpipe_init_bucket(gxio_mpipe_context_t * context, unsigned int bucket,
+ MPIPE_LBL_INIT_DAT_BSTS_TBL_t bucket_info);
+
+int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+int gxio_mpipe_init_edma_ring_aux(gxio_mpipe_context_t * context, void *mem_va,
+ size_t mem_size, unsigned int mem_flags,
+ unsigned int ring, unsigned int channel);
+
+
+int gxio_mpipe_commit_rules(gxio_mpipe_context_t * context, const void *blob,
+ size_t blob_size);
+
+int gxio_mpipe_register_client_memory(gxio_mpipe_context_t * context,
+ unsigned int iotlb, HV_PTE pte,
+ unsigned int flags);
+
+int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
+ _gxio_mpipe_link_name_t name, unsigned int flags);
+
+int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
+
+
+int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
+ uint64_t * nsec, uint64_t * cycles);
+
+int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
+ uint64_t nsec, uint64_t cycles);
+
+int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
+ int64_t nsec);
+
+int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
+
+int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
+
+int gxio_mpipe_get_mmio_base(gxio_mpipe_context_t * context, HV_PTE *base);
+
+int gxio_mpipe_check_mmio_offset(gxio_mpipe_context_t * context,
+ unsigned long offset, unsigned long size);
+
+#endif /* !__GXIO_MPIPE_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
new file mode 100644
index 000000000000..0bcf3f71ce8b
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __GXIO_MPIPE_INFO_LINUX_RPC_H__
+#define __GXIO_MPIPE_INFO_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <hv/drv_mpipe_intf.h>
+#include <asm/page.h>
+#include <gxio/kiorpc.h>
+#include <gxio/mpipe.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+
+#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
+#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+
+int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
+ unsigned int idx,
+ _gxio_mpipe_link_name_t * name,
+ _gxio_mpipe_link_mac_t * mac);
+
+int gxio_mpipe_info_get_mmio_base(gxio_mpipe_info_context_t * context,
+ HV_PTE *base);
+
+int gxio_mpipe_info_check_mmio_offset(gxio_mpipe_info_context_t * context,
+ unsigned long offset, unsigned long size);
+
+#endif /* !__GXIO_MPIPE_INFO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
new file mode 100644
index 000000000000..15fb77992083
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __GXIO_TRIO_LINUX_RPC_H__
+#define __GXIO_TRIO_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <hv/drv_trio_intf.h>
+#include <gxio/trio.h>
+#include <gxio/kiorpc.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
+
+#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402)
+
+#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
+#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f)
+
+#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417)
+#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418)
+#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419)
+#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a)
+
+#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c)
+#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d)
+#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
+#define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+int gxio_trio_alloc_asids(gxio_trio_context_t * context, unsigned int count,
+ unsigned int first, unsigned int flags);
+
+
+int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+
+int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+int gxio_trio_init_pio_region_aux(gxio_trio_context_t * context,
+ unsigned int pio_region, unsigned int mac,
+ uint32_t bus_address_hi, unsigned int flags);
+
+
+int gxio_trio_init_memory_map_mmu_aux(gxio_trio_context_t * context,
+ unsigned int map, unsigned long va,
+ uint64_t size, unsigned int asid,
+ unsigned int mac, uint64_t bus_address,
+ unsigned int node,
+ unsigned int order_mode);
+
+int gxio_trio_get_port_property(gxio_trio_context_t * context,
+ struct pcie_trio_ports_property *trio_ports);
+
+int gxio_trio_config_legacy_intr(gxio_trio_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event,
+ unsigned int mac, unsigned int intx);
+
+int gxio_trio_config_msi_intr(gxio_trio_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event,
+ unsigned int mac, unsigned int mem_map,
+ uint64_t mem_map_base, uint64_t mem_map_limit,
+ unsigned int asid);
+
+
+int gxio_trio_set_mps_mrs(gxio_trio_context_t * context, uint16_t mps,
+ uint16_t mrs, unsigned int mac);
+
+int gxio_trio_force_rc_link_up(gxio_trio_context_t * context, unsigned int mac);
+
+int gxio_trio_force_ep_link_up(gxio_trio_context_t * context, unsigned int mac);
+
+int gxio_trio_get_mmio_base(gxio_trio_context_t * context, HV_PTE *base);
+
+int gxio_trio_check_mmio_offset(gxio_trio_context_t * context,
+ unsigned long offset, unsigned long size);
+
+#endif /* !__GXIO_TRIO_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/iorpc_usb_host.h b/arch/tile/include/gxio/iorpc_usb_host.h
new file mode 100644
index 000000000000..8622e7d126ad
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_usb_host.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/* This file is machine-generated; DO NOT EDIT! */
+#ifndef __GXIO_USB_HOST_LINUX_RPC_H__
+#define __GXIO_USB_HOST_LINUX_RPC_H__
+
+#include <hv/iorpc.h>
+
+#include <hv/drv_usb_host_intf.h>
+#include <asm/page.h>
+#include <gxio/kiorpc.h>
+#include <gxio/usb_host.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/pgtable.h>
+
+#define GXIO_USB_HOST_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1800)
+#define GXIO_USB_HOST_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1801)
+#define GXIO_USB_HOST_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
+#define GXIO_USB_HOST_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+
+int gxio_usb_host_cfg_interrupt(gxio_usb_host_context_t * context, int inter_x,
+ int inter_y, int inter_ipi, int inter_event);
+
+int gxio_usb_host_register_client_memory(gxio_usb_host_context_t * context,
+ HV_PTE pte, unsigned int flags);
+
+int gxio_usb_host_get_mmio_base(gxio_usb_host_context_t * context,
+ HV_PTE *base);
+
+int gxio_usb_host_check_mmio_offset(gxio_usb_host_context_t * context,
+ unsigned long offset, unsigned long size);
+
+#endif /* !__GXIO_USB_HOST_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/kiorpc.h b/arch/tile/include/gxio/kiorpc.h
new file mode 100644
index 000000000000..ee5820979ff3
--- /dev/null
+++ b/arch/tile/include/gxio/kiorpc.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Support routines for kernel IORPC drivers.
+ */
+
+#ifndef _GXIO_KIORPC_H
+#define _GXIO_KIORPC_H
+
+#include <linux/types.h>
+#include <asm/page.h>
+#include <arch/chip.h>
+
+#if CHIP_HAS_MMIO()
+void __iomem *iorpc_ioremap(int hv_fd, resource_size_t offset,
+ unsigned long size);
+#endif
+
+#endif /* _GXIO_KIORPC_H */
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
new file mode 100644
index 000000000000..78c598618c97
--- /dev/null
+++ b/arch/tile/include/gxio/mpipe.h
@@ -0,0 +1,1736 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _GXIO_MPIPE_H_
+#define _GXIO_MPIPE_H_
+
+/*
+ *
+ * An API for allocating, configuring, and manipulating mPIPE hardware
+ * resources.
+ */
+
+#include "common.h"
+#include "dma_queue.h"
+
+#include <linux/time.h>
+
+#include <arch/mpipe_def.h>
+#include <arch/mpipe_shm.h>
+
+#include <hv/drv_mpipe_intf.h>
+#include <hv/iorpc.h>
+
+/*
+ *
+ * The TILE-Gx mPIPE&tm; shim provides Ethernet connectivity, packet
+ * classification, and packet load balancing services. The
+ * gxio_mpipe_ API, declared in <gxio/mpipe.h>, allows applications to
+ * allocate mPIPE IO channels, configure packet distribution
+ * parameters, and send and receive Ethernet packets. The API is
+ * designed to be a minimal wrapper around the mPIPE hardware, making
+ * system calls only where necessary to preserve inter-process
+ * protection guarantees.
+ *
+ * The APIs described below allow the programmer to allocate and
+ * configure mPIPE resources. As described below, the mPIPE is a
+ * single shared hardware device that provides partitionable resources
+ * that are shared between all applications in the system. The
+ * gxio_mpipe_ API allows userspace code to make resource request
+ * calls to the hypervisor, which in turns keeps track of the
+ * resources in use by all applications, maintains protection
+ * guarantees, and resets resources upon application shutdown.
+ *
+ * We strongly recommend reading the mPIPE section of the IO Device
+ * Guide (UG404) before working with this API. Most functions in the
+ * gxio_mpipe_ API are directly analogous to hardware interfaces and
+ * the documentation assumes that the reader understands those
+ * hardware interfaces.
+ *
+ * @section mpipe__ingress mPIPE Ingress Hardware Resources
+ *
+ * The mPIPE ingress hardware provides extensive hardware offload for
+ * tasks like packet header parsing, load balancing, and memory
+ * management. This section provides a brief introduction to the
+ * hardware components and the gxio_mpipe_ calls used to manage them;
+ * see the IO Device Guide for a much more detailed description of the
+ * mPIPE's capabilities.
+ *
+ * When a packet arrives at one of the mPIPE's Ethernet MACs, it is
+ * assigned a channel number indicating which MAC received it. It
+ * then proceeds through the following hardware pipeline:
+ *
+ * @subsection mpipe__classification Classification
+ *
+ * A set of classification processors run header parsing code on each
+ * incoming packet, extracting information including the destination
+ * MAC address, VLAN, Ethernet type, and five-tuple hash. Some of
+ * this information is then used to choose which buffer stack will be
+ * used to hold the packet, and which bucket will be used by the load
+ * balancer to determine which application will receive the packet.
+ *
+ * The rules by which the buffer stack and bucket are chosen can be
+ * configured via the @ref gxio_mpipe_classifier API. A given app can
+ * specify multiple rules, each one specifying a bucket range, and a
+ * set of buffer stacks, to be used for packets matching the rule.
+ * Each rule can optionally specify a restricted set of channels,
+ * VLANs, and/or dMACs, in which it is interested. By default, a
+ * given rule starts out matching all channels associated with the
+ * mPIPE context's set of open links; all VLANs; and all dMACs.
+ * Subsequent restrictions can then be added.
+ *
+ * @subsection mpipe__load_balancing Load Balancing
+ *
+ * The mPIPE load balancer is responsible for choosing the NotifRing
+ * to which the packet will be delivered. This decision is based on
+ * the bucket number indicated by the classification program. In
+ * general, the bucket number is based on some number of low bits of
+ * the packet's flow hash (applications that aren't interested in flow
+ * hashing use a single bucket). Each load balancer bucket keeps a
+ * record of the NotifRing to which packets directed to that bucket
+ * are currently being delivered. Based on the bucket's load
+ * balancing mode (@ref gxio_mpipe_bucket_mode_t), the load balancer
+ * either forwards the packet to the previously assigned NotifRing or
+ * decides to choose a new NotifRing. If a new NotifRing is required,
+ * the load balancer chooses the least loaded ring in the NotifGroup
+ * associated with the bucket.
+ *
+ * The load balancer is a shared resource. Each application needs to
+ * explicitly allocate NotifRings, NotifGroups, and buckets, using
+ * gxio_mpipe_alloc_notif_rings(), gxio_mpipe_alloc_notif_groups(),
+ * and gxio_mpipe_alloc_buckets(). Then the application needs to
+ * configure them using gxio_mpipe_init_notif_ring() and
+ * gxio_mpipe_init_notif_group_and_buckets().
+ *
+ * @subsection mpipe__buffers Buffer Selection and Packet Delivery
+ *
+ * Once the load balancer has chosen the destination NotifRing, the
+ * mPIPE DMA engine pops at least one buffer off of the 'buffer stack'
+ * chosen by the classification program and DMAs the packet data into
+ * that buffer. Each buffer stack provides a hardware-accelerated
+ * stack of data buffers with the same size. If the packet data is
+ * larger than the buffers provided by the chosen buffer stack, the
+ * mPIPE hardware pops off multiple buffers and chains the packet data
+ * through a multi-buffer linked list. Once the packet data is
+ * delivered to the buffer(s), the mPIPE hardware writes the
+ * ::gxio_mpipe_idesc_t metadata object (calculated by the classifier)
+ * into the NotifRing and increments the number of packets delivered
+ * to that ring.
+ *
+ * Applications can push buffers onto a buffer stack by calling
+ * gxio_mpipe_push_buffer() or by egressing a packet with the
+ * ::gxio_mpipe_edesc_t::hwb bit set, indicating that the egressed
+ * buffers should be returned to the stack.
+ *
+ * Applications can allocate and initialize buffer stacks with the
+ * gxio_mpipe_alloc_buffer_stacks() and gxio_mpipe_init_buffer_stack()
+ * APIs.
+ *
+ * The application must also register the memory pages that will hold
+ * packets. This requires calling gxio_mpipe_register_page() for each
+ * memory page that will hold packets allocated by the application for
+ * a given buffer stack. Since each buffer stack is limited to 16
+ * registered pages, it may be necessary to use huge pages, or even
+ * extremely huge pages, to hold all the buffers.
+ *
+ * @subsection mpipe__iqueue NotifRings
+ *
+ * Each NotifRing is a region of shared memory, allocated by the
+ * application, to which the mPIPE delivers packet descriptors
+ * (::gxio_mpipe_idesc_t). The application can allocate them via
+ * gxio_mpipe_alloc_notif_rings(). The application can then either
+ * explicitly initialize them with gxio_mpipe_init_notif_ring() and
+ * then read from them manually, or can make use of the convenience
+ * wrappers provided by @ref gxio_mpipe_wrappers.
+ *
+ * @section mpipe__egress mPIPE Egress Hardware
+ *
+ * Applications use eDMA rings to queue packets for egress. The
+ * application can allocate them via gxio_mpipe_alloc_edma_rings().
+ * The application can then either explicitly initialize them with
+ * gxio_mpipe_init_edma_ring() and then write to them manually, or
+ * can make use of the convenience wrappers provided by
+ * @ref gxio_mpipe_wrappers.
+ *
+ * @section gxio__shortcomings Plans for Future API Revisions
+ *
+ * The API defined here is only an initial version of the mPIPE API.
+ * Future plans include:
+ *
+ * - Higher level wrapper functions to provide common initialization
+ * patterns. This should help users start writing mPIPE programs
+ * without having to learn the details of the hardware.
+ *
+ * - Support for reset and deallocation of resources, including
+ * cleanup upon application shutdown.
+ *
+ * - Support for calling these APIs in the BME.
+ *
+ * - Support for IO interrupts.
+ *
+ * - Clearer definitions of thread safety guarantees.
+ *
+ * @section gxio__mpipe_examples Examples
+ *
+ * See the following mPIPE example programs for more information about
+ * allocating mPIPE resources and using them in real applications:
+ *
+ * - @ref mpipe/ingress/app.c : Receiving packets.
+ *
+ * - @ref mpipe/forward/app.c : Forwarding packets.
+ *
+ * Note that there are several more examples.
+ */
+
+/* Flags that can be passed to resource allocation functions. */
+enum gxio_mpipe_alloc_flags_e {
+ /* Require an allocation to start at a specified resource index. */
+ GXIO_MPIPE_ALLOC_FIXED = HV_MPIPE_ALLOC_FIXED,
+};
+
+/* Flags that can be passed to memory registration functions. */
+enum gxio_mpipe_mem_flags_e {
+ /* Do not fill L3 when writing, and invalidate lines upon egress. */
+ GXIO_MPIPE_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT,
+
+ /* L3 cache fills should only populate IO cache ways. */
+ GXIO_MPIPE_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN,
+};
+
+/* An ingress packet descriptor. When a packet arrives, the mPIPE
+ * hardware generates this structure and writes it into a NotifRing.
+ */
+typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
+
+/* An egress command descriptor. Applications write this structure
+ * into eDMA rings and the hardware performs the indicated operation
+ * (normally involving egressing some bytes). Note that egressing a
+ * single packet may involve multiple egress command descriptors.
+ */
+typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
+
+/* Get the "va" field from an "idesc".
+ *
+ * This is the address at which the ingress hardware copied the first
+ * byte of the packet.
+ *
+ * If the classifier detected a custom header, then this will point to
+ * the custom header, and gxio_mpipe_idesc_get_l2_start() will point
+ * to the actual L2 header.
+ *
+ * Note that this value may be misleading if "idesc->be" is set.
+ *
+ * @param idesc An ingress packet descriptor.
+ */
+static inline unsigned char *gxio_mpipe_idesc_get_va(gxio_mpipe_idesc_t *idesc)
+{
+ return (unsigned char *)(long)idesc->va;
+}
+
+/* Get the "xfer_size" from an "idesc".
+ *
+ * This is the actual number of packet bytes transferred into memory
+ * by the hardware.
+ *
+ * Note that this value may be misleading if "idesc->be" is set.
+ *
+ * @param idesc An ingress packet descriptor.
+ *
+ * ISSUE: Is this the best name for this?
+ * FIXME: Add more docs about chaining, clipping, etc.
+ */
+static inline unsigned int gxio_mpipe_idesc_get_xfer_size(gxio_mpipe_idesc_t
+ *idesc)
+{
+ return idesc->l2_size;
+}
+
+/* Get the "l2_offset" from an "idesc".
+ *
+ * Extremely customized classifiers might not support this function.
+ *
+ * This is the number of bytes between the "va" and the L2 header.
+ *
+ * The L2 header consists of a destination mac address, a source mac
+ * address, and an initial ethertype. Various initial ethertypes
+ * allow encoding extra information in the L2 header, often including
+ * a vlan, and/or a new ethertype.
+ *
+ * Note that the "l2_offset" will be non-zero if (and only if) the
+ * classifier processed a custom header for the packet.
+ *
+ * @param idesc An ingress packet descriptor.
+ */
+static inline uint8_t gxio_mpipe_idesc_get_l2_offset(gxio_mpipe_idesc_t *idesc)
+{
+ return (idesc->custom1 >> 32) & 0xFF;
+}
+
+/* Get the "l2_start" from an "idesc".
+ *
+ * This is simply gxio_mpipe_idesc_get_va() plus
+ * gxio_mpipe_idesc_get_l2_offset().
+ *
+ * @param idesc An ingress packet descriptor.
+ */
+static inline unsigned char *gxio_mpipe_idesc_get_l2_start(gxio_mpipe_idesc_t
+ *idesc)
+{
+ unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
+ return va + gxio_mpipe_idesc_get_l2_offset(idesc);
+}
+
+/* Get the "l2_length" from an "idesc".
+ *
+ * This is simply gxio_mpipe_idesc_get_xfer_size() minus
+ * gxio_mpipe_idesc_get_l2_offset().
+ *
+ * @param idesc An ingress packet descriptor.
+ */
+static inline unsigned int gxio_mpipe_idesc_get_l2_length(gxio_mpipe_idesc_t
+ *idesc)
+{
+ unsigned int xfer_size = idesc->l2_size;
+ return xfer_size - gxio_mpipe_idesc_get_l2_offset(idesc);
+}
+
+/* A context object used to manage mPIPE hardware resources. */
+typedef struct {
+
+ /* File descriptor for calling up to Linux (and thus the HV). */
+ int fd;
+
+ /* The VA at which configuration registers are mapped. */
+ char *mmio_cfg_base;
+
+ /* The VA at which IDMA, EDMA, and buffer manager are mapped. */
+ char *mmio_fast_base;
+
+ /* The "initialized" buffer stacks. */
+ gxio_mpipe_rules_stacks_t __stacks;
+
+} gxio_mpipe_context_t;
+
+/* This is only used internally, but it's most easily made visible here. */
+typedef gxio_mpipe_context_t gxio_mpipe_info_context_t;
+
+/* Initialize an mPIPE context.
+ *
+ * This function allocates an mPIPE "service domain" and maps the MMIO
+ * registers into the caller's VA space.
+ *
+ * @param context Context object to be initialized.
+ * @param mpipe_instance Instance number of mPIPE shim to be controlled via
+ * context.
+ */
+extern int gxio_mpipe_init(gxio_mpipe_context_t *context,
+ unsigned int mpipe_instance);
+
+/* Destroy an mPIPE context.
+ *
+ * This function frees the mPIPE "service domain" and unmaps the MMIO
+ * registers from the caller's VA space.
+ *
+ * If a user process exits without calling this routine, the kernel
+ * will destroy the mPIPE context as part of process teardown.
+ *
+ * @param context Context object to be destroyed.
+ */
+extern int gxio_mpipe_destroy(gxio_mpipe_context_t *context);
+
+/*****************************************************************
+ * Buffer Stacks *
+ ******************************************************************/
+
+/* Allocate a set of buffer stacks.
+ *
+ * The return value is NOT interesting if count is zero.
+ *
+ * @param context An initialized mPIPE context.
+ * @param count Number of stacks required.
+ * @param first Index of first stack if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
+ * otherwise ignored.
+ * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
+ * @return Index of first allocated buffer stack, or
+ * ::GXIO_MPIPE_ERR_NO_BUFFER_STACK if allocation failed.
+ */
+extern int gxio_mpipe_alloc_buffer_stacks(gxio_mpipe_context_t *context,
+ unsigned int count,
+ unsigned int first,
+ unsigned int flags);
+
+/* Enum codes for buffer sizes supported by mPIPE. */
+typedef enum {
+ /* 128 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_128 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_128,
+ /* 256 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_256 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_256,
+ /* 512 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_512 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_512,
+ /* 1024 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_1024 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1024,
+ /* 1664 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_1664 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_1664,
+ /* 4096 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_4096 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_4096,
+ /* 10368 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_10368 =
+ MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_10368,
+ /* 16384 byte packet data buffer. */
+ GXIO_MPIPE_BUFFER_SIZE_16384 = MPIPE_BSM_INIT_DAT_1__SIZE_VAL_BSZ_16384
+} gxio_mpipe_buffer_size_enum_t;
+
+/* Convert a buffer size in bytes into a buffer size enum. */
+extern gxio_mpipe_buffer_size_enum_t
+gxio_mpipe_buffer_size_to_buffer_size_enum(size_t size);
+
+/* Convert a buffer size enum into a buffer size in bytes. */
+extern size_t
+gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
+ buffer_size_enum);
+
+/* Calculate the number of bytes required to store a given number of
+ * buffers in the memory registered with a buffer stack via
+ * gxio_mpipe_init_buffer_stack().
+ */
+extern size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers);
+
+/* Initialize a buffer stack. This function binds a region of memory
+ * to be used by the hardware for storing buffer addresses pushed via
+ * gxio_mpipe_push_buffer() or as the result of sending a buffer out
+ * the egress with the 'push to stack when done' bit set. Once this
+ * function returns, the memory region's contents may be arbitrarily
+ * modified by the hardware at any time and software should not access
+ * the memory region again.
+ *
+ * @param context An initialized mPIPE context.
+ * @param stack The buffer stack index.
+ * @param buffer_size_enum The size of each buffer in the buffer stack,
+ * as an enum.
+ * @param mem The address of the buffer stack. This memory must be
+ * physically contiguous and aligned to a 64kB boundary.
+ * @param mem_size The size of the buffer stack, in bytes.
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ * @return Zero on success, ::GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE if
+ * buffer_size_enum is invalid, ::GXIO_MPIPE_ERR_BAD_BUFFER_STACK if
+ * stack has not been allocated.
+ */
+extern int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
+ unsigned int stack,
+ gxio_mpipe_buffer_size_enum_t
+ buffer_size_enum, void *mem,
+ size_t mem_size,
+ unsigned int mem_flags);
+
+/* Push a buffer onto a previously initialized buffer stack.
+ *
+ * The size of the buffer being pushed must match the size that was
+ * registered with gxio_mpipe_init_buffer_stack(). All packet buffer
+ * addresses are 128-byte aligned; the low 7 bits of the specified
+ * buffer address will be ignored.
+ *
+ * @param context An initialized mPIPE context.
+ * @param stack The buffer stack index.
+ * @param buffer The buffer (the low seven bits are ignored).
+ */
+static inline void gxio_mpipe_push_buffer(gxio_mpipe_context_t *context,
+ unsigned int stack, void *buffer)
+{
+ MPIPE_BSM_REGION_ADDR_t offset = { {0} };
+ MPIPE_BSM_REGION_VAL_t val = { {0} };
+
+ /*
+ * The mmio_fast_base region starts at the IDMA region, so subtract
+ * off that initial offset.
+ */
+ offset.region =
+ MPIPE_MMIO_ADDR__REGION_VAL_BSM -
+ MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
+ offset.stack = stack;
+
+#if __SIZEOF_POINTER__ == 4
+ val.va = ((ulong) buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
+#else
+ val.va = ((long)buffer) >> MPIPE_BSM_REGION_VAL__VA_SHIFT;
+#endif
+
+ __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
+}
+
+/* Pop a buffer off of a previously initialized buffer stack.
+ *
+ * @param context An initialized mPIPE context.
+ * @param stack The buffer stack index.
+ * @return The buffer, or NULL if the stack is empty.
+ */
+static inline void *gxio_mpipe_pop_buffer(gxio_mpipe_context_t *context,
+ unsigned int stack)
+{
+ MPIPE_BSM_REGION_ADDR_t offset = { {0} };
+
+ /*
+ * The mmio_fast_base region starts at the IDMA region, so subtract
+ * off that initial offset.
+ */
+ offset.region =
+ MPIPE_MMIO_ADDR__REGION_VAL_BSM -
+ MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
+ offset.stack = stack;
+
+ while (1) {
+ /*
+ * Case 1: val.c == ..._UNCHAINED, va is non-zero.
+ * Case 2: val.c == ..._INVALID, va is zero.
+ * Case 3: val.c == ..._NOT_RDY, va is zero.
+ */
+ MPIPE_BSM_REGION_VAL_t val;
+ val.word =
+ __gxio_mmio_read(context->mmio_fast_base +
+ offset.word);
+
+ /*
+ * Handle case 1 and 2 by returning the buffer (or NULL).
+ * Handle case 3 by waiting for the prefetch buffer to refill.
+ */
+ if (val.c != MPIPE_EDMA_DESC_WORD1__C_VAL_NOT_RDY)
+ return (void *)((unsigned long)val.
+ va << MPIPE_BSM_REGION_VAL__VA_SHIFT);
+ }
+}
+
+/*****************************************************************
+ * NotifRings *
+ ******************************************************************/
+
+/* Allocate a set of NotifRings.
+ *
+ * The return value is NOT interesting if count is zero.
+ *
+ * Note that NotifRings are allocated in chunks, so allocating one at
+ * a time is much less efficient than allocating several at once.
+ *
+ * @param context An initialized mPIPE context.
+ * @param count Number of NotifRings required.
+ * @param first Index of first NotifRing if ::GXIO_MPIPE_ALLOC_FIXED flag
+ * is set, otherwise ignored.
+ * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
+ * @return Index of first allocated buffer NotifRing, or
+ * ::GXIO_MPIPE_ERR_NO_NOTIF_RING if allocation failed.
+ */
+extern int gxio_mpipe_alloc_notif_rings(gxio_mpipe_context_t *context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+/* Initialize a NotifRing, using the given memory and size.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ring The NotifRing index.
+ * @param mem A physically contiguous region of memory to be filled
+ * with a ring of ::gxio_mpipe_idesc_t structures.
+ * @param mem_size Number of bytes in the ring. Must be 128, 512,
+ * 2048, or 65536 * sizeof(gxio_mpipe_idesc_t).
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_NOTIF_RING or
+ * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
+ */
+extern int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
+ unsigned int ring,
+ void *mem, size_t mem_size,
+ unsigned int mem_flags);
+
+/* Configure an interrupt to be sent to a tile on incoming NotifRing
+ * traffic. Once an interrupt is sent for a particular ring, no more
+ * will be sent until gxio_mica_enable_notif_ring_interrupt() is called.
+ *
+ * @param context An initialized mPIPE context.
+ * @param x X coordinate of interrupt target tile.
+ * @param y Y coordinate of interrupt target tile.
+ * @param i Index of the IPI register which will receive the interrupt.
+ * @param e Specific event which will be set in the target IPI register when
+ * the interrupt occurs.
+ * @param ring The NotifRing index.
+ * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
+ */
+extern int gxio_mpipe_request_notif_ring_interrupt(gxio_mpipe_context_t
+ *context, int x, int y,
+ int i, int e,
+ unsigned int ring);
+
+/* Enable an interrupt on incoming NotifRing traffic.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ring The NotifRing index.
+ * @return Zero on success, GXIO_ERR_INVAL if params are out of range.
+ */
+extern int gxio_mpipe_enable_notif_ring_interrupt(gxio_mpipe_context_t
+ *context, unsigned int ring);
+
+/* Map all of a client's memory via the given IOTLB.
+ * @param context An initialized mPIPE context.
+ * @param iotlb IOTLB index.
+ * @param pte Page table entry.
+ * @param flags Flags.
+ * @return Zero on success, or a negative error code.
+ */
+extern int gxio_mpipe_register_client_memory(gxio_mpipe_context_t *context,
+ unsigned int iotlb, HV_PTE pte,
+ unsigned int flags);
+
+/*****************************************************************
+ * Notif Groups *
+ ******************************************************************/
+
+/* Allocate a set of NotifGroups.
+ *
+ * The return value is NOT interesting if count is zero.
+ *
+ * @param context An initialized mPIPE context.
+ * @param count Number of NotifGroups required.
+ * @param first Index of first NotifGroup if ::GXIO_MPIPE_ALLOC_FIXED flag
+ * is set, otherwise ignored.
+ * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
+ * @return Index of first allocated buffer NotifGroup, or
+ * ::GXIO_MPIPE_ERR_NO_NOTIF_GROUP if allocation failed.
+ */
+extern int gxio_mpipe_alloc_notif_groups(gxio_mpipe_context_t *context,
+ unsigned int count,
+ unsigned int first,
+ unsigned int flags);
+
+/* Add a NotifRing to a NotifGroup. This only sets a bit in the
+ * application's 'group' object; the hardware NotifGroup can be
+ * initialized by passing 'group' to gxio_mpipe_init_notif_group() or
+ * gxio_mpipe_init_notif_group_and_buckets().
+ */
+static inline void
+gxio_mpipe_notif_group_add_ring(gxio_mpipe_notif_group_bits_t *bits, int ring)
+{
+ bits->ring_mask[ring / 64] |= (1ull << (ring % 64));
+}
+
+/* Set a particular NotifGroup bitmask. Since the load balancer
+ * makes decisions based on both bucket and NotifGroup state, most
+ * applications should use gxio_mpipe_init_notif_group_and_buckets()
+ * rather than using this function to configure just a NotifGroup.
+ */
+extern int gxio_mpipe_init_notif_group(gxio_mpipe_context_t *context,
+ unsigned int group,
+ gxio_mpipe_notif_group_bits_t bits);
+
+/*****************************************************************
+ * Load Balancer *
+ ******************************************************************/
+
+/* Allocate a set of load balancer buckets.
+ *
+ * The return value is NOT interesting if count is zero.
+ *
+ * Note that buckets are allocated in chunks, so allocating one at
+ * a time is much less efficient than allocating several at once.
+ *
+ * Note that the buckets are actually divided into two sub-ranges, of
+ * different sizes, and different chunk sizes, and the range you get
+ * by default is determined by the size of the request. Allocations
+ * cannot span the two sub-ranges.
+ *
+ * @param context An initialized mPIPE context.
+ * @param count Number of buckets required.
+ * @param first Index of first bucket if ::GXIO_MPIPE_ALLOC_FIXED flag is set,
+ * otherwise ignored.
+ * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
+ * @return Index of first allocated buffer bucket, or
+ * ::GXIO_MPIPE_ERR_NO_BUCKET if allocation failed.
+ */
+extern int gxio_mpipe_alloc_buckets(gxio_mpipe_context_t *context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+/* The legal modes for gxio_mpipe_bucket_info_t and
+ * gxio_mpipe_init_notif_group_and_buckets().
+ *
+ * All modes except ::GXIO_MPIPE_BUCKET_ROUND_ROBIN expect that the user
+ * will allocate a power-of-two number of buckets and initialize them
+ * to the same mode. The classifier program then uses the appropriate
+ * number of low bits from the incoming packet's flow hash to choose a
+ * load balancer bucket. Based on that bucket's load balancing mode,
+ * reference count, and currently active NotifRing, the load balancer
+ * chooses the NotifRing to which the packet will be delivered.
+ */
+typedef enum {
+ /* All packets for a bucket go to the same NotifRing unless the
+ * NotifRing gets full, in which case packets will be dropped. If
+ * the bucket reference count ever reaches zero, a new NotifRing may
+ * be chosen.
+ */
+ GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY =
+ MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_DFA,
+
+ /* All packets for a bucket always go to the same NotifRing.
+ */
+ GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY =
+ MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_FIXED,
+
+ /* All packets for a bucket go to the least full NotifRing in the
+ * group, providing load balancing round robin behavior.
+ */
+ GXIO_MPIPE_BUCKET_ROUND_ROBIN =
+ MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_ALWAYS_PICK,
+
+ /* All packets for a bucket go to the same NotifRing unless the
+ * NotifRing gets full, at which point the bucket starts using the
+ * least full NotifRing in the group. If all NotifRings in the
+ * group are full, packets will be dropped.
+ */
+ GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY =
+ MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY,
+
+ /* All packets for a bucket go to the same NotifRing unless the
+ * NotifRing gets full, or a random timer fires, at which point the
+ * bucket starts using the least full NotifRing in the group. If
+ * all NotifRings in the group are full, packets will be dropped.
+ * WARNING: This mode is BROKEN on chips with fewer than 64 tiles.
+ */
+ GXIO_MPIPE_BUCKET_PREFER_FLOW_LOCALITY =
+ MPIPE_LBL_INIT_DAT_BSTS_TBL__MODE_VAL_STICKY_RAND,
+
+} gxio_mpipe_bucket_mode_t;
+
+/* Copy a set of bucket initialization values into the mPIPE
+ * hardware. Since the load balancer makes decisions based on both
+ * bucket and NotifGroup state, most applications should use
+ * gxio_mpipe_init_notif_group_and_buckets() rather than using this
+ * function to configure a single bucket.
+ *
+ * @param context An initialized mPIPE context.
+ * @param bucket Bucket index to be initialized.
+ * @param bucket_info Initial reference count, NotifRing index, and mode.
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET on failure.
+ */
+extern int gxio_mpipe_init_bucket(gxio_mpipe_context_t *context,
+ unsigned int bucket,
+ gxio_mpipe_bucket_info_t bucket_info);
+
+/* Initializes a group and range of buckets and range of rings such
+ * that the load balancer runs a particular load balancing function.
+ *
+ * First, the group is initialized with the given rings.
+ *
+ * Second, each bucket is initialized with the mode and group, and a
+ * ring chosen round-robin from the given rings.
+ *
+ * Normally, the classifier picks a bucket, and then the load balancer
+ * picks a ring, based on the bucket's mode, group, and current ring,
+ * possibly updating the bucket's ring.
+ *
+ * @param context An initialized mPIPE context.
+ * @param group The group.
+ * @param ring The first ring.
+ * @param num_rings The number of rings.
+ * @param bucket The first bucket.
+ * @param num_buckets The number of buckets.
+ * @param mode The load balancing mode.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_BUCKET,
+ * ::GXIO_MPIPE_ERR_BAD_NOTIF_GROUP, or
+ * ::GXIO_MPIPE_ERR_BAD_NOTIF_RING on failure.
+ */
+extern int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t
+ *context,
+ unsigned int group,
+ unsigned int ring,
+ unsigned int num_rings,
+ unsigned int bucket,
+ unsigned int num_buckets,
+ gxio_mpipe_bucket_mode_t
+ mode);
+
+/* Return credits to a NotifRing and/or bucket.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ring The NotifRing index, or -1.
+ * @param bucket The bucket, or -1.
+ * @param count The number of credits to return.
+ */
+static inline void gxio_mpipe_credit(gxio_mpipe_context_t *context,
+ int ring, int bucket, unsigned int count)
+{
+ /* NOTE: Fancy struct initialization would break "C89" header test. */
+
+ MPIPE_IDMA_RELEASE_REGION_ADDR_t offset = { {0} };
+ MPIPE_IDMA_RELEASE_REGION_VAL_t val = { {0} };
+
+ /*
+ * The mmio_fast_base region starts at the IDMA region, so subtract
+ * off that initial offset.
+ */
+ offset.region =
+ MPIPE_MMIO_ADDR__REGION_VAL_IDMA -
+ MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
+ offset.ring = ring;
+ offset.bucket = bucket;
+ offset.ring_enable = (ring >= 0);
+ offset.bucket_enable = (bucket >= 0);
+ val.count = count;
+
+ __gxio_mmio_write(context->mmio_fast_base + offset.word, val.word);
+}
+
+/*****************************************************************
+ * Egress Rings *
+ ******************************************************************/
+
+/* Allocate a set of eDMA rings.
+ *
+ * The return value is NOT interesting if count is zero.
+ *
+ * @param context An initialized mPIPE context.
+ * @param count Number of eDMA rings required.
+ * @param first Index of first eDMA ring if ::GXIO_MPIPE_ALLOC_FIXED flag
+ * is set, otherwise ignored.
+ * @param flags Flag bits from ::gxio_mpipe_alloc_flags_e.
+ * @return Index of first allocated buffer eDMA ring, or
+ * ::GXIO_MPIPE_ERR_NO_EDMA_RING if allocation failed.
+ */
+extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+/* Initialize an eDMA ring, using the given memory and size.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ring The eDMA ring index.
+ * @param channel The channel to use. This must be one of the channels
+ * associated with the context's set of open links.
+ * @param mem A physically contiguous region of memory to be filled
+ * with a ring of ::gxio_mpipe_edesc_t structures.
+ * @param mem_size Number of bytes in the ring. Must be 512, 2048,
+ * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
+ * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
+ */
+extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
+ unsigned int ring, unsigned int channel,
+ void *mem, size_t mem_size,
+ unsigned int mem_flags);
+
+/*****************************************************************
+ * Classifier Program *
+ ******************************************************************/
+
+/*
+ *
+ * Functions for loading or configuring the mPIPE classifier program.
+ *
+ * The mPIPE classification processors all run a special "classifier"
+ * program which, for each incoming packet, parses the packet headers,
+ * encodes some packet metadata in the "idesc", and either drops the
+ * packet, or picks a notif ring to handle the packet, and a buffer
+ * stack to contain the packet, usually based on the channel, VLAN,
+ * dMAC, flow hash, and packet size, under the guidance of the "rules"
+ * API described below.
+ *
+ * @section gxio_mpipe_classifier_default Default Classifier
+ *
+ * The MDE provides a simple "default" classifier program. It is
+ * shipped as source in "$TILERA_ROOT/src/sys/mpipe/classifier.c",
+ * which serves as its official documentation. It is shipped as a
+ * binary program in "$TILERA_ROOT/tile/boot/classifier", which is
+ * automatically included in bootroms created by "tile-monitor", and
+ * is automatically loaded by the hypervisor at boot time.
+ *
+ * The L2 analysis handles LLC packets, SNAP packets, and "VLAN
+ * wrappers" (keeping the outer VLAN).
+ *
+ * The L3 analysis handles IPv4 and IPv6, dropping packets with bad
+ * IPv4 header checksums, requesting computation of a TCP/UDP checksum
+ * if appropriate, and hashing the dest and src IP addresses, plus the
+ * ports for TCP/UDP packets, into the flow hash. No special analysis
+ * is done for "fragmented" packets or "tunneling" protocols. Thus,
+ * the first fragment of a fragmented TCP/UDP packet is hashed using
+ * src/dest IP address and ports and all subsequent fragments are only
+ * hashed according to src/dest IP address.
+ *
+ * The L3 analysis handles other packets too, hashing the dMAC
+ * smac into a flow hash.
+ *
+ * The channel, VLAN, and dMAC used to pick a "rule" (see the
+ * "rules" APIs below), which in turn is used to pick a buffer stack
+ * (based on the packet size) and a bucket (based on the flow hash).
+ *
+ * To receive traffic matching a particular (channel/VLAN/dMAC
+ * pattern, an application should allocate its own buffer stacks and
+ * load balancer buckets, and map traffic to those stacks and buckets,
+ * as decribed by the "rules" API below.
+ *
+ * Various packet metadata is encoded in the idesc. The flow hash is
+ * four bytes at 0x0C. The VLAN is two bytes at 0x10. The ethtype is
+ * two bytes at 0x12. The l3 start is one byte at 0x14. The l4 start
+ * is one byte at 0x15 for IPv4 and IPv6 packets, and otherwise zero.
+ * The protocol is one byte at 0x16 for IPv4 and IPv6 packets, and
+ * otherwise zero.
+ *
+ * @section gxio_mpipe_classifier_custom Custom Classifiers.
+ *
+ * A custom classifier may be created using "tile-mpipe-cc" with a
+ * customized version of the default classifier sources.
+ *
+ * The custom classifier may be included in bootroms using the
+ * "--classifier" option to "tile-monitor", or loaded dynamically
+ * using gxio_mpipe_classifier_load_from_file().
+ *
+ * Be aware that "extreme" customizations may break the assumptions of
+ * the "rules" APIs described below, but simple customizations, such
+ * as adding new packet metadata, should be fine.
+ */
+
+/* A set of classifier rules, plus a context. */
+typedef struct {
+
+ /* The context. */
+ gxio_mpipe_context_t *context;
+
+ /* The actual rules. */
+ gxio_mpipe_rules_list_t list;
+
+} gxio_mpipe_rules_t;
+
+/* Initialize a classifier program rules list.
+ *
+ * This function can be called on a previously initialized rules list
+ * to discard any previously added rules.
+ *
+ * @param rules Rules list to initialize.
+ * @param context An initialized mPIPE context.
+ */
+extern void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
+ gxio_mpipe_context_t *context);
+
+/* Begin a new rule on the indicated rules list.
+ *
+ * Note that an empty rule matches all packets, but an empty rule list
+ * matches no packets.
+ *
+ * @param rules Rules list to which new rule is appended.
+ * @param bucket First load balancer bucket to which packets will be
+ * delivered.
+ * @param num_buckets Number of buckets (must be a power of two) across
+ * which packets will be distributed based on the "flow hash".
+ * @param stacks Either NULL, to assign each packet to the smallest
+ * initialized buffer stack which does not induce chaining (and to
+ * drop packets which exceed the largest initialized buffer stack
+ * buffer size), or an array, with each entry indicating which buffer
+ * stack should be used for packets up to that size (with 255
+ * indicating that those packets should be dropped).
+ * @return 0 on success, or a negative error code on failure.
+ */
+extern int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
+ unsigned int bucket,
+ unsigned int num_buckets,
+ gxio_mpipe_rules_stacks_t *stacks);
+
+/* Set the headroom of the current rule.
+ *
+ * @param rules Rules list whose current rule will be modified.
+ * @param headroom The headroom.
+ * @return 0 on success, or a negative error code on failure.
+ */
+extern int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules,
+ uint8_t headroom);
+
+/* Indicate that packets from a particular channel can be delivered
+ * to the buckets and buffer stacks associated with the current rule.
+ *
+ * Channels added must be associated with links opened by the mPIPE context
+ * used in gxio_mpipe_rules_init(). A rule with no channels is equivalent
+ * to a rule naming all such associated channels.
+ *
+ * @param rules Rules list whose current rule will be modified.
+ * @param channel The channel to add.
+ * @return 0 on success, or a negative error code on failure.
+ */
+extern int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
+ unsigned int channel);
+
+/* Commit rules.
+ *
+ * The rules are sent to the hypervisor, where they are combined with
+ * the rules from other apps, and used to program the hardware classifier.
+ *
+ * Note that if this function returns an error, then the rules will NOT
+ * have been committed, even if the error is due to interactions with
+ * rules from another app.
+ *
+ * @param rules Rules list to commit.
+ * @return 0 on success, or a negative error code on failure.
+ */
+extern int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules);
+
+/*****************************************************************
+ * Ingress Queue Wrapper *
+ ******************************************************************/
+
+/*
+ *
+ * Convenience functions for receiving packets from a NotifRing and
+ * sending packets via an eDMA ring.
+ *
+ * The mpipe ingress and egress hardware uses shared memory packet
+ * descriptors to describe packets that have arrived on ingress or
+ * are destined for egress. These descriptors are stored in shared
+ * memory ring buffers and written or read by hardware as necessary.
+ * The gxio library provides wrapper functions that manage the head and
+ * tail pointers for these rings, allowing the user to easily read or
+ * write packet descriptors.
+ *
+ * The initialization interface for ingress and egress rings is quite
+ * similar. For example, to create an ingress queue, the user passes
+ * a ::gxio_mpipe_iqueue_t state object, a ring number from
+ * gxio_mpipe_alloc_notif_rings(), and the address of memory to hold a
+ * ring buffer to the gxio_mpipe_iqueue_init() function. The function
+ * returns success when the state object has been initialized and the
+ * hardware configured to deliver packets to the specified ring
+ * buffer. Similarly, gxio_mpipe_equeue_init() takes a
+ * ::gxio_mpipe_equeue_t state object, a ring number from
+ * gxio_mpipe_alloc_edma_rings(), and a shared memory buffer.
+ *
+ * @section gxio_mpipe_iqueue Working with Ingress Queues
+ *
+ * Once initialized, the gxio_mpipe_iqueue_t API provides two flows
+ * for getting the ::gxio_mpipe_idesc_t packet descriptor associated
+ * with incoming packets. The simplest is to call
+ * gxio_mpipe_iqueue_get() or gxio_mpipe_iqueue_try_get(). These
+ * functions copy the oldest packet descriptor out of the NotifRing and
+ * into a descriptor provided by the caller. They also immediately
+ * inform the hardware that a descriptor has been processed.
+ *
+ * For applications with stringent performance requirements, higher
+ * efficiency can be achieved by avoiding the packet descriptor copy
+ * and processing multiple descriptors at once. The
+ * gxio_mpipe_iqueue_peek() and gxio_mpipe_iqueue_try_peek() functions
+ * allow such optimizations. These functions provide a pointer to the
+ * next valid ingress descriptor in the NotifRing's shared memory ring
+ * buffer, and a count of how many contiguous descriptors are ready to
+ * be processed. The application can then process any number of those
+ * descriptors in place, calling gxio_mpipe_iqueue_consume() to inform
+ * the hardware after each one has been processed.
+ *
+ * @section gxio_mpipe_equeue Working with Egress Queues
+ *
+ * Similarly, the egress queue API provides a high-performance
+ * interface plus a simple wrapper for use in posting
+ * ::gxio_mpipe_edesc_t egress packet descriptors. The simple
+ * version, gxio_mpipe_equeue_put(), allows the programmer to wait for
+ * an eDMA ring slot to become available and write a single descriptor
+ * into the ring.
+ *
+ * Alternatively, you can reserve slots in the eDMA ring using
+ * gxio_mpipe_equeue_reserve() or gxio_mpipe_equeue_try_reserve(), and
+ * then fill in each slot using gxio_mpipe_equeue_put_at(). This
+ * capability can be used to amortize the cost of reserving slots
+ * across several packets. It also allows gather operations to be
+ * performed on a shared equeue, by ensuring that the edescs for all
+ * the fragments are all contiguous in the eDMA ring.
+ *
+ * The gxio_mpipe_equeue_reserve() and gxio_mpipe_equeue_try_reserve()
+ * functions return a 63-bit "completion slot", which is actually a
+ * sequence number, the low bits of which indicate the ring buffer
+ * index and the high bits the number of times the application has
+ * gone around the egress ring buffer. The extra bits allow an
+ * application to check for egress completion by calling
+ * gxio_mpipe_equeue_is_complete() to see whether a particular 'slot'
+ * number has finished. Given the maximum packet rates of the Gx
+ * processor, the 63-bit slot number will never wrap.
+ *
+ * In practice, most applications use the ::gxio_mpipe_edesc_t::hwb
+ * bit to indicate that the buffers containing egress packet data
+ * should be pushed onto a buffer stack when egress is complete. Such
+ * applications generally do not need to know when an egress operation
+ * completes (since there is no need to free a buffer post-egress),
+ * and thus can use the optimized gxio_mpipe_equeue_reserve_fast() or
+ * gxio_mpipe_equeue_try_reserve_fast() functions, which return a 24
+ * bit "slot", instead of a 63-bit "completion slot".
+ *
+ * Once a slot has been "reserved", it MUST be filled. If the
+ * application reserves a slot and then decides that it does not
+ * actually need it, it can set the ::gxio_mpipe_edesc_t::ns (no send)
+ * bit on the descriptor passed to gxio_mpipe_equeue_put_at() to
+ * indicate that no data should be sent. This technique can also be
+ * used to drop an incoming packet, instead of forwarding it, since
+ * any buffer will still be pushed onto the buffer stack when the
+ * egress descriptor is processed.
+ */
+
+/* A convenient interface to a NotifRing, for use by a single thread.
+ */
+typedef struct {
+
+ /* The context. */
+ gxio_mpipe_context_t *context;
+
+ /* The actual NotifRing. */
+ gxio_mpipe_idesc_t *idescs;
+
+ /* The number of entries. */
+ unsigned long num_entries;
+
+ /* The number of entries minus one. */
+ unsigned long mask_num_entries;
+
+ /* The log2() of the number of entries. */
+ unsigned long log2_num_entries;
+
+ /* The next entry. */
+ unsigned int head;
+
+ /* The NotifRing id. */
+ unsigned int ring;
+
+#ifdef __BIG_ENDIAN__
+ /* The number of byteswapped entries. */
+ unsigned int swapped;
+#endif
+
+} gxio_mpipe_iqueue_t;
+
+/* Initialize an "iqueue".
+ *
+ * Takes the iqueue plus the same args as gxio_mpipe_init_notif_ring().
+ */
+extern int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_context_t *context,
+ unsigned int ring,
+ void *mem, size_t mem_size,
+ unsigned int mem_flags);
+
+/* Advance over some old entries in an iqueue.
+ *
+ * Please see the documentation for gxio_mpipe_iqueue_consume().
+ *
+ * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
+ * @param count The number of entries to advance over.
+ */
+static inline void gxio_mpipe_iqueue_advance(gxio_mpipe_iqueue_t *iqueue,
+ int count)
+{
+ /* Advance with proper wrap. */
+ int head = iqueue->head + count;
+ iqueue->head =
+ (head & iqueue->mask_num_entries) +
+ (head >> iqueue->log2_num_entries);
+
+#ifdef __BIG_ENDIAN__
+ /* HACK: Track swapped entries. */
+ iqueue->swapped -= count;
+#endif
+}
+
+/* Release the ring and bucket for an old entry in an iqueue.
+ *
+ * Releasing the ring allows more packets to be delivered to the ring.
+ *
+ * Releasing the bucket allows flows using the bucket to be moved to a
+ * new ring when using GXIO_MPIPE_BUCKET_DYNAMIC_FLOW_AFFINITY.
+ *
+ * This function is shorthand for "gxio_mpipe_credit(iqueue->context,
+ * iqueue->ring, idesc->bucket_id, 1)", and it may be more convenient
+ * to make that underlying call, using those values, instead of
+ * tracking the entire "idesc".
+ *
+ * If packet processing is deferred, optimal performance requires that
+ * the releasing be deferred as well.
+ *
+ * Please see the documentation for gxio_mpipe_iqueue_consume().
+ *
+ * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
+ * @param idesc The descriptor which was processed.
+ */
+static inline void gxio_mpipe_iqueue_release(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_idesc_t *idesc)
+{
+ gxio_mpipe_credit(iqueue->context, iqueue->ring, idesc->bucket_id, 1);
+}
+
+/* Consume a packet from an "iqueue".
+ *
+ * After processing packets peeked at via gxio_mpipe_iqueue_peek()
+ * or gxio_mpipe_iqueue_try_peek(), you must call this function, or
+ * gxio_mpipe_iqueue_advance() plus gxio_mpipe_iqueue_release(), to
+ * advance over those entries, and release their rings and buckets.
+ *
+ * You may call this function as each packet is processed, or you can
+ * wait until several packets have been processed.
+ *
+ * Note that if you are using a single bucket, and you are handling
+ * batches of N packets, then you can replace several calls to this
+ * function with calls to "gxio_mpipe_iqueue_advance(iqueue, N)" and
+ * "gxio_mpipe_credit(iqueue->context, iqueue->ring, bucket, N)".
+ *
+ * Note that if your classifier sets "idesc->nr", then you should
+ * explicitly call "gxio_mpipe_iqueue_advance(iqueue, idesc)" plus
+ * "gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, 1)", to
+ * avoid incorrectly crediting the (unused) bucket.
+ *
+ * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
+ * @param idesc The descriptor which was processed.
+ */
+static inline void gxio_mpipe_iqueue_consume(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_idesc_t *idesc)
+{
+ gxio_mpipe_iqueue_advance(iqueue, 1);
+ gxio_mpipe_iqueue_release(iqueue, idesc);
+}
+
+/* Peek at the next packet(s) in an "iqueue", without waiting.
+ *
+ * If no packets are available, fills idesc_ref with NULL, and then
+ * returns ::GXIO_MPIPE_ERR_IQUEUE_EMPTY. Otherwise, fills idesc_ref
+ * with the address of the next valid packet descriptor, and returns
+ * the maximum number of valid descriptors which can be processed.
+ * You may process fewer descriptors if desired.
+ *
+ * Call gxio_mpipe_iqueue_consume() on each packet once it has been
+ * processed (or dropped), to allow more packets to be delivered.
+ *
+ * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
+ * @param idesc_ref A pointer to a packet descriptor pointer.
+ * @return The (positive) number of packets which can be processed,
+ * or ::GXIO_MPIPE_ERR_IQUEUE_EMPTY if no packets are available.
+ */
+static inline int gxio_mpipe_iqueue_try_peek(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_idesc_t **idesc_ref)
+{
+ gxio_mpipe_idesc_t *next;
+
+ uint64_t head = iqueue->head;
+ uint64_t tail = __gxio_mmio_read(iqueue->idescs);
+
+ /* Available entries. */
+ uint64_t avail =
+ (tail >= head) ? (tail - head) : (iqueue->num_entries - head);
+
+ if (avail == 0) {
+ *idesc_ref = NULL;
+ return GXIO_MPIPE_ERR_IQUEUE_EMPTY;
+ }
+
+ next = &iqueue->idescs[head];
+
+ /* ISSUE: Is this helpful? */
+ __insn_prefetch(next);
+
+#ifdef __BIG_ENDIAN__
+ /* HACK: Swap new entries directly in memory. */
+ {
+ int i, j;
+ for (i = iqueue->swapped; i < avail; i++) {
+ for (j = 0; j < 8; j++)
+ next[i].words[j] =
+ __builtin_bswap64(next[i].words[j]);
+ }
+ iqueue->swapped = avail;
+ }
+#endif
+
+ *idesc_ref = next;
+
+ return avail;
+}
+
+/* Drop a packet by pushing its buffer (if appropriate).
+ *
+ * NOTE: The caller must still call gxio_mpipe_iqueue_consume() if idesc
+ * came from gxio_mpipe_iqueue_try_peek() or gxio_mpipe_iqueue_peek().
+ *
+ * @param iqueue An ingress queue initialized via gxio_mpipe_iqueue_init().
+ * @param idesc A packet descriptor.
+ */
+static inline void gxio_mpipe_iqueue_drop(gxio_mpipe_iqueue_t *iqueue,
+ gxio_mpipe_idesc_t *idesc)
+{
+ /* FIXME: Handle "chaining" properly. */
+
+ if (!idesc->be) {
+ unsigned char *va = gxio_mpipe_idesc_get_va(idesc);
+ gxio_mpipe_push_buffer(iqueue->context, idesc->stack_idx, va);
+ }
+}
+
+/*****************************************************************
+ * Egress Queue Wrapper *
+ ******************************************************************/
+
+/* A convenient, thread-safe interface to an eDMA ring. */
+typedef struct {
+
+ /* State object for tracking head and tail pointers. */
+ __gxio_dma_queue_t dma_queue;
+
+ /* The ring entries. */
+ gxio_mpipe_edesc_t *edescs;
+
+ /* The number of entries minus one. */
+ unsigned long mask_num_entries;
+
+ /* The log2() of the number of entries. */
+ unsigned long log2_num_entries;
+
+} gxio_mpipe_equeue_t;
+
+/* Initialize an "equeue".
+ *
+ * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring().
+ */
+extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
+ gxio_mpipe_context_t *context,
+ unsigned int edma_ring_id,
+ unsigned int channel,
+ void *mem, unsigned int mem_size,
+ unsigned int mem_flags);
+
+/* Reserve completion slots for edescs.
+ *
+ * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
+ *
+ * This function is slower than gxio_mpipe_equeue_reserve_fast(), but
+ * returns a full 64 bit completion slot, which can be used with
+ * gxio_mpipe_equeue_is_complete().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param num Number of slots to reserve (must be non-zero).
+ * @return The first reserved completion slot, or a negative error code.
+ */
+static inline int64_t gxio_mpipe_equeue_reserve(gxio_mpipe_equeue_t *equeue,
+ unsigned int num)
+{
+ return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, true);
+}
+
+/* Reserve completion slots for edescs, if possible.
+ *
+ * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
+ *
+ * This function is slower than gxio_mpipe_equeue_try_reserve_fast(),
+ * but returns a full 64 bit completion slot, which can be used with
+ * gxio_mpipe_equeue_is_complete().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param num Number of slots to reserve (must be non-zero).
+ * @return The first reserved completion slot, or a negative error code.
+ */
+static inline int64_t gxio_mpipe_equeue_try_reserve(gxio_mpipe_equeue_t
+ *equeue, unsigned int num)
+{
+ return __gxio_dma_queue_reserve_aux(&equeue->dma_queue, num, false);
+}
+
+/* Reserve slots for edescs.
+ *
+ * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
+ *
+ * This function is faster than gxio_mpipe_equeue_reserve(), but
+ * returns a 24 bit slot (instead of a 64 bit completion slot), which
+ * thus cannot be used with gxio_mpipe_equeue_is_complete().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param num Number of slots to reserve (should be non-zero).
+ * @return The first reserved slot, or a negative error code.
+ */
+static inline int64_t gxio_mpipe_equeue_reserve_fast(gxio_mpipe_equeue_t
+ *equeue, unsigned int num)
+{
+ return __gxio_dma_queue_reserve(&equeue->dma_queue, num, true, false);
+}
+
+/* Reserve slots for edescs, if possible.
+ *
+ * Use gxio_mpipe_equeue_put_at() to actually populate the slots.
+ *
+ * This function is faster than gxio_mpipe_equeue_try_reserve(), but
+ * returns a 24 bit slot (instead of a 64 bit completion slot), which
+ * thus cannot be used with gxio_mpipe_equeue_is_complete().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param num Number of slots to reserve (should be non-zero).
+ * @return The first reserved slot, or a negative error code.
+ */
+static inline int64_t gxio_mpipe_equeue_try_reserve_fast(gxio_mpipe_equeue_t
+ *equeue,
+ unsigned int num)
+{
+ return __gxio_dma_queue_reserve(&equeue->dma_queue, num, false, false);
+}
+
+/*
+ * HACK: This helper function tricks gcc 4.6 into avoiding saving
+ * a copy of "edesc->words[0]" on the stack for no obvious reason.
+ */
+
+static inline void gxio_mpipe_equeue_put_at_aux(gxio_mpipe_equeue_t *equeue,
+ uint_reg_t ew[2],
+ unsigned long slot)
+{
+ unsigned long edma_slot = slot & equeue->mask_num_entries;
+ gxio_mpipe_edesc_t *edesc_p = &equeue->edescs[edma_slot];
+
+ /*
+ * ISSUE: Could set eDMA ring to be on generation 1 at start, which
+ * would avoid the negation here, perhaps allowing "__insn_bfins()".
+ */
+ ew[0] |= !((slot >> equeue->log2_num_entries) & 1);
+
+ /*
+ * NOTE: We use "__gxio_mpipe_write()", plus the fact that the eDMA
+ * queue alignment restrictions ensure that these two words are on
+ * the same cacheline, to force proper ordering between the stores.
+ */
+ __gxio_mmio_write64(&edesc_p->words[1], ew[1]);
+ __gxio_mmio_write64(&edesc_p->words[0], ew[0]);
+}
+
+/* Post an edesc to a given slot in an equeue.
+ *
+ * This function copies the supplied edesc into entry "slot mod N" in
+ * the underlying ring, setting the "gen" bit to the appropriate value
+ * based on "(slot mod N*2)", where "N" is the size of the ring. Note
+ * that the higher bits of slot are unused, and thus, this function
+ * can handle "slots" as well as "completion slots".
+ *
+ * Normally this function is used to fill in slots reserved by
+ * gxio_mpipe_equeue_try_reserve(), gxio_mpipe_equeue_reserve(),
+ * gxio_mpipe_equeue_try_reserve_fast(), or
+ * gxio_mpipe_equeue_reserve_fast(),
+ *
+ * This function can also be used without "reserving" slots, if the
+ * application KNOWS that the ring can never overflow, for example, by
+ * pushing fewer buffers into the buffer stacks than there are total
+ * slots in the equeue, but this is NOT recommended.
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param edesc The egress descriptor to be posted.
+ * @param slot An egress slot (only the low bits are actually used).
+ */
+static inline void gxio_mpipe_equeue_put_at(gxio_mpipe_equeue_t *equeue,
+ gxio_mpipe_edesc_t edesc,
+ unsigned long slot)
+{
+ gxio_mpipe_equeue_put_at_aux(equeue, edesc.words, slot);
+}
+
+/* Post an edesc to the next slot in an equeue.
+ *
+ * This is a convenience wrapper around
+ * gxio_mpipe_equeue_reserve_fast() and gxio_mpipe_equeue_put_at().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param edesc The egress descriptor to be posted.
+ * @return 0 on success.
+ */
+static inline int gxio_mpipe_equeue_put(gxio_mpipe_equeue_t *equeue,
+ gxio_mpipe_edesc_t edesc)
+{
+ int64_t slot = gxio_mpipe_equeue_reserve_fast(equeue, 1);
+ if (slot < 0)
+ return (int)slot;
+
+ gxio_mpipe_equeue_put_at(equeue, edesc, slot);
+
+ return 0;
+}
+
+/* Ask the mPIPE hardware to egress outstanding packets immediately.
+ *
+ * This call is not necessary, but may slightly reduce overall latency.
+ *
+ * Technically, you should flush all gxio_mpipe_equeue_put_at() writes
+ * to memory before calling this function, to ensure the descriptors
+ * are visible in memory before the mPIPE hardware actually looks for
+ * them. But this should be very rare, and the only side effect would
+ * be increased latency, so it is up to the caller to decide whether
+ * or not to flush memory.
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ */
+static inline void gxio_mpipe_equeue_flush(gxio_mpipe_equeue_t *equeue)
+{
+ /* Use "ring_idx = 0" and "count = 0" to "wake up" the eDMA ring. */
+ MPIPE_EDMA_POST_REGION_VAL_t val = { {0} };
+ /* Flush the write buffers. */
+ __insn_flushwb();
+ __gxio_mmio_write(equeue->dma_queue.post_region_addr, val.word);
+}
+
+/* Determine if a given edesc has been completed.
+ *
+ * Note that this function requires a "completion slot", and thus may
+ * NOT be used with a "slot" from gxio_mpipe_equeue_reserve_fast() or
+ * gxio_mpipe_equeue_try_reserve_fast().
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param completion_slot The completion slot used by the edesc.
+ * @param update If true, and the desc does not appear to have completed
+ * yet, then update any software cache of the hardware completion counter,
+ * and check again. This should normally be true.
+ * @return True iff the given edesc has been completed.
+ */
+static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
+ int64_t completion_slot,
+ int update)
+{
+ return __gxio_dma_queue_is_complete(&equeue->dma_queue,
+ completion_slot, update);
+}
+
+/*****************************************************************
+ * Link Management *
+ ******************************************************************/
+
+/*
+ *
+ * Functions for manipulating and sensing the state and configuration
+ * of physical network links.
+ *
+ * @section gxio_mpipe_link_perm Link Permissions
+ *
+ * Opening a link (with gxio_mpipe_link_open()) requests a set of link
+ * permissions, which control what may be done with the link, and potentially
+ * what permissions may be granted to other processes.
+ *
+ * Data permission allows the process to receive packets from the link by
+ * specifying the link's channel number in mPIPE packet distribution rules,
+ * and to send packets to the link by using the link's channel number as
+ * the target for an eDMA ring.
+ *
+ * Stats permission allows the process to retrieve link attributes (such as
+ * the speeds it is capable of running at, or whether it is currently up), and
+ * to read and write certain statistics-related registers in the link's MAC.
+ *
+ * Control permission allows the process to retrieve and modify link attributes
+ * (so that it may, for example, bring the link up and take it down), and
+ * read and write many registers in the link's MAC and PHY.
+ *
+ * Any permission may be requested as shared, which allows other processes
+ * to also request shared permission, or exclusive, which prevents other
+ * processes from requesting it. In keeping with GXIO's typical usage in
+ * an embedded environment, the defaults for all permissions are shared.
+ *
+ * Permissions are granted on a first-come, first-served basis, so if two
+ * applications request an exclusive permission on the same link, the one
+ * to run first will win. Note, however, that some system components, like
+ * the kernel Ethernet driver, may get an opportunity to open links before
+ * any applications run.
+ *
+ * @section gxio_mpipe_link_names Link Names
+ *
+ * Link names are of the form gbe<em>number</em> (for Gigabit Ethernet),
+ * xgbe<em>number</em> (for 10 Gigabit Ethernet), loop<em>number</em> (for
+ * internal mPIPE loopback), or ilk<em>number</em>/<em>channel</em>
+ * (for Interlaken links); for instance, gbe0, xgbe1, loop3, and
+ * ilk0/12 are all possible link names. The correspondence between
+ * the link name and an mPIPE instance number or mPIPE channel number is
+ * system-dependent; all links will not exist on all systems, and the set
+ * of numbers used for a particular link type may not start at zero and may
+ * not be contiguous. Use gxio_mpipe_link_enumerate() to retrieve the set of
+ * links which exist on a system, and always use gxio_mpipe_link_instance()
+ * to determine which mPIPE controls a particular link.
+ *
+ * Note that in some cases, links may share hardware, such as PHYs, or
+ * internal mPIPE buffers; in these cases, only one of the links may be
+ * opened at a time. This is especially common with xgbe and gbe ports,
+ * since each xgbe port uses 4 SERDES lanes, each of which may also be
+ * configured as one gbe port.
+ *
+ * @section gxio_mpipe_link_states Link States
+ *
+ * The mPIPE link management model revolves around three different states,
+ * which are maintained for each link:
+ *
+ * 1. The <em>current</em> link state: is the link up now, and if so, at
+ * what speed?
+ *
+ * 2. The <em>desired</em> link state: what do we want the link state to be?
+ * The system is always working to make this state the current state;
+ * thus, if the desired state is up, and the link is down, we'll be
+ * constantly trying to bring it up, automatically.
+ *
+ * 3. The <em>possible</em> link state: what speeds are valid for this
+ * particular link? Or, in other words, what are the capabilities of
+ * the link hardware?
+ *
+ * These link states are not, strictly speaking, related to application
+ * state; they may be manipulated at any time, whether or not the link
+ * is currently being used for data transfer. However, for convenience,
+ * gxio_mpipe_link_open() and gxio_mpipe_link_close() (or application exit)
+ * can affect the link state. These implicit link management operations
+ * may be modified or disabled by the use of link open flags.
+ *
+ * From an application, you can use gxio_mpipe_link_get_attr()
+ * and gxio_mpipe_link_set_attr() to manipulate the link states.
+ * gxio_mpipe_link_get_attr() with ::GXIO_MPIPE_LINK_POSSIBLE_STATE
+ * gets you the possible link state. gxio_mpipe_link_get_attr() with
+ * ::GXIO_MPIPE_LINK_CURRENT_STATE gets you the current link state.
+ * Finally, gxio_mpipe_link_set_attr() and gxio_mpipe_link_get_attr()
+ * with ::GXIO_MPIPE_LINK_DESIRED_STATE allow you to modify or retrieve
+ * the desired link state.
+ *
+ * If you want to manage a link from a part of your application which isn't
+ * involved in packet processing, you can use the ::GXIO_MPIPE_LINK_NO_DATA
+ * flags on a gxio_mpipe_link_open() call. This opens the link, but does
+ * not request data permission, so it does not conflict with any exclusive
+ * permissions which may be held by other processes. You can then can use
+ * gxio_mpipe_link_get_attr() and gxio_mpipe_link_set_attr() on this link
+ * object to bring up or take down the link.
+ *
+ * Some links support link state bits which support various loopback
+ * modes. ::GXIO_MPIPE_LINK_LOOP_MAC tests datapaths within the Tile
+ * Processor itself; ::GXIO_MPIPE_LINK_LOOP_PHY tests the datapath between
+ * the Tile Processor and the external physical layer interface chip; and
+ * ::GXIO_MPIPE_LINK_LOOP_EXT tests the entire network datapath with the
+ * aid of an external loopback connector. In addition to enabling hardware
+ * testing, such configuration can be useful for software testing, as well.
+ *
+ * When LOOP_MAC or LOOP_PHY is enabled, packets transmitted on a channel
+ * will be received by that channel, instead of being emitted on the
+ * physical link, and packets received on the physical link will be ignored.
+ * Other than that, all standard GXIO operations work as you might expect.
+ * Note that loopback operation requires that the link be brought up using
+ * one or more of the GXIO_MPIPE_LINK_SPEED_xxx link state bits.
+ *
+ * Those familiar with previous versions of the MDE on TILEPro hardware
+ * will notice significant similarities between the NetIO link management
+ * model and the mPIPE link management model. However, the NetIO model
+ * was developed in stages, and some of its features -- for instance,
+ * the default setting of certain flags -- were shaped by the need to be
+ * compatible with previous versions of NetIO. Since the features provided
+ * by the mPIPE hardware and the mPIPE GXIO library are significantly
+ * different than those provided by NetIO, in some cases, we have made
+ * different choices in the mPIPE link management API. Thus, please read
+ * this documentation carefully before assuming that mPIPE link management
+ * operations are exactly equivalent to their NetIO counterparts.
+ */
+
+/* An object used to manage mPIPE link state and resources. */
+typedef struct {
+ /* The overall mPIPE context. */
+ gxio_mpipe_context_t *context;
+
+ /* The channel number used by this link. */
+ uint8_t channel;
+
+ /* The MAC index used by this link. */
+ uint8_t mac;
+} gxio_mpipe_link_t;
+
+/* Retrieve one of this system's legal link names, and its MAC address.
+ *
+ * @param index Link name index. If a system supports N legal link names,
+ * then indices between 0 and N - 1, inclusive, each correspond to one of
+ * those names. Thus, to retrieve all of a system's legal link names,
+ * call this function in a loop, starting with an index of zero, and
+ * incrementing it once per iteration until -1 is returned.
+ * @param link_name Pointer to the buffer which will receive the retrieved
+ * link name. The buffer should contain space for at least
+ * ::GXIO_MPIPE_LINK_NAME_LEN bytes; the returned name, including the
+ * terminating null byte, will be no longer than that.
+ * @param link_name Pointer to the buffer which will receive the retrieved
+ * MAC address. The buffer should contain space for at least 6 bytes.
+ * @return Zero if a link name was successfully retrieved; -1 if one was
+ * not.
+ */
+extern int gxio_mpipe_link_enumerate_mac(int index, char *link_name,
+ uint8_t *mac_addr);
+
+/* Open an mPIPE link.
+ *
+ * A link must be opened before it may be used to send or receive packets,
+ * and before its state may be examined or changed. Depending up on the
+ * link's intended use, one or more link permissions may be requested via
+ * the flags parameter; see @ref gxio_mpipe_link_perm. In addition, flags
+ * may request that the link's state be modified at open time. See @ref
+ * gxio_mpipe_link_states and @ref gxio_mpipe_link_open_flags for more detail.
+ *
+ * @param link A link state object, which will be initialized if this
+ * function completes successfully.
+ * @param context An initialized mPIPE context.
+ * @param link_name Name of the link.
+ * @param flags Zero or more @ref gxio_mpipe_link_open_flags, ORed together.
+ * @return 0 if the link was successfully opened, or a negative error code.
+ *
+ */
+extern int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
+ gxio_mpipe_context_t *context,
+ const char *link_name, unsigned int flags);
+
+/* Close an mPIPE link.
+ *
+ * Closing a link makes it available for use by other processes. Once
+ * a link has been closed, packets may no longer be sent on or received
+ * from the link, and its state may not be examined or changed.
+ *
+ * @param link A link state object, which will no longer be initialized
+ * if this function completes successfully.
+ * @return 0 if the link was successfully closed, or a negative error code.
+ *
+ */
+extern int gxio_mpipe_link_close(gxio_mpipe_link_t *link);
+
+/* Return a link's channel number.
+ *
+ * @param link A properly initialized link state object.
+ * @return The channel number for the link.
+ */
+static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
+{
+ return link->channel;
+}
+
+///////////////////////////////////////////////////////////////////
+// Timestamp //
+///////////////////////////////////////////////////////////////////
+
+/* Get the timestamp of mPIPE when this routine is called.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ts A timespec structure to store the current clock.
+ * @return If the call was successful, zero; otherwise, a negative error
+ * code.
+ */
+extern int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
+ struct timespec *ts);
+
+/* Set the timestamp of mPIPE.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ts A timespec structure to store the requested clock.
+ * @return If the call was successful, zero; otherwise, a negative error
+ * code.
+ */
+extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
+ const struct timespec *ts);
+
+/* Adjust the timestamp of mPIPE.
+ *
+ * @param context An initialized mPIPE context.
+ * @param delta A signed time offset to adjust, in nanoseconds.
+ * The absolute value of this parameter must be less than or
+ * equal to 1000000000.
+ * @return If the call was successful, zero; otherwise, a negative error
+ * code.
+ */
+extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
+ int64_t delta);
+
+#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/gxio/trio.h b/arch/tile/include/gxio/trio.h
new file mode 100644
index 000000000000..77b80cdd46d8
--- /dev/null
+++ b/arch/tile/include/gxio/trio.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ *
+ * An API for allocating, configuring, and manipulating TRIO hardware
+ * resources
+ */
+
+/*
+ *
+ * The TILE-Gx TRIO shim provides connections to external devices via
+ * PCIe or other transaction IO standards. The gxio_trio_ API,
+ * declared in <gxio/trio.h>, allows applications to allocate and
+ * configure TRIO IO resources like DMA command rings, memory map
+ * windows, and device interrupts. The following sections introduce
+ * the various components of the API. We strongly recommend reading
+ * the TRIO section of the IO Device Guide (UG404) before working with
+ * this API.
+ *
+ * @section trio__ingress TRIO Ingress Hardware Resources
+ *
+ * The TRIO ingress hardware is responsible for examining incoming
+ * PCIe or StreamIO packets and choosing a processing mechanism based
+ * on the packets' bus address. The gxio_trio_ API can be used to
+ * configure different handlers for different ranges of bus address
+ * space. The user can configure "mapped memory" and "scatter queue"
+ * regions to match incoming packets within 4kB-aligned ranges of bus
+ * addresses. Each range specifies a different set of mapping
+ * parameters to be applied when handling the ingress packet. The
+ * following sections describe how to work with MapMem and scatter
+ * queue regions.
+ *
+ * @subsection trio__mapmem TRIO MapMem Regions
+ *
+ * TRIO mapped memory (or MapMem) regions allow the user to map
+ * incoming read and write requests directly to the application's
+ * memory space. MapMem regions are allocated via
+ * gxio_trio_alloc_memory_maps(). Given an integer MapMem number,
+ * applications can use gxio_trio_init_memory_map() to specify the
+ * range of bus addresses that will match the region and the range of
+ * virtual addresses to which those packets will be applied.
+ *
+ * As with many other gxio APIs, the programmer must be sure to
+ * register memory pages that will be used with MapMem regions. Pages
+ * can be registered with TRIO by allocating an ASID (address space
+ * identifier) and then using gxio_trio_register_page() to register up to
+ * 16 pages with the hardware. The initialization functions for
+ * resources that require registered memory (MapMem, scatter queues,
+ * push DMA, and pull DMA) then take an 'asid' parameter in order to
+ * configure which set of registered pages is used by each resource.
+ *
+ * @subsection trio__scatter_queue TRIO Scatter Queues
+ *
+ * The TRIO shim's scatter queue regions allow users to dynamically
+ * map buffers from a large address space into a small range of bus
+ * addresses. This is particularly helpful for PCIe endpoint devices,
+ * where the host generally limits the size of BARs to tens of
+ * megabytes.
+ *
+ * Each scatter queue consists of a memory map region, a queue of
+ * tile-side buffer VAs to be mapped to that region, and a bus-mapped
+ * "doorbell" register that the remote endpoint can write to trigger a
+ * dequeue of the current buffer VA, thus swapping in a new buffer.
+ * The VAs pushed onto a scatter queue must be 4kB aligned, so
+ * applications may need to use higher-level protocols to inform
+ * remote entities that they should apply some additional, sub-4kB
+ * offset when reading or writing the scatter queue region. For more
+ * information, see the IO Device Guide (UG404).
+ *
+ * @section trio__egress TRIO Egress Hardware Resources
+ *
+ * The TRIO shim supports two mechanisms for egress packet generation:
+ * programmed IO (PIO) and push/pull DMA. PIO allows applications to
+ * create MMIO mappings for PCIe or StreamIO address space, such that
+ * the application can generate word-sized read or write transactions
+ * by issuing load or store instructions. Push and pull DMA are tuned
+ * for larger transactions; they use specialized hardware engines to
+ * transfer large blocks of data at line rate.
+ *
+ * @subsection trio__pio TRIO Programmed IO
+ *
+ * Programmed IO allows applications to create MMIO mappings for PCIe
+ * or StreamIO address space. The hardware PIO regions support access
+ * to PCIe configuration, IO, and memory space, but the gxio_trio API
+ * only supports memory space accesses. PIO regions are allocated
+ * with gxio_trio_alloc_pio_regions() and initialized via
+ * gxio_trio_init_pio_region(). Once a region is bound to a range of
+ * bus address via the initialization function, the application can
+ * use gxio_trio_map_pio_region() to create MMIO mappings from its VA
+ * space onto the range of bus addresses supported by the PIO region.
+ *
+ * @subsection trio_dma TRIO Push and Pull DMA
+ *
+ * The TRIO push and pull DMA engines allow users to copy blocks of
+ * data between application memory and the bus. Push DMA generates
+ * write packets that copy from application memory to the bus and pull
+ * DMA generates read packets that copy from the bus into application
+ * memory. The DMA engines are managed via an API that is very
+ * similar to the mPIPE eDMA interface. For a detailed explanation of
+ * the eDMA queue API, see @ref gxio_mpipe_wrappers.
+ *
+ * Push and pull DMA queues are allocated via
+ * gxio_trio_alloc_push_dma_ring() / gxio_trio_alloc_pull_dma_ring().
+ * Once allocated, users generally use a ::gxio_trio_dma_queue_t
+ * object to manage the queue, providing easy wrappers for reserving
+ * command slots in the DMA command ring, filling those slots, and
+ * waiting for commands to complete. DMA queues can be initialized
+ * via gxio_trio_init_push_dma_queue() or
+ * gxio_trio_init_pull_dma_queue().
+ *
+ * See @ref trio/push_dma/app.c for an example of how to use push DMA.
+ *
+ * @section trio_shortcomings Plans for Future API Revisions
+ *
+ * The simulation framework is incomplete. Future features include:
+ *
+ * - Support for reset and deallocation of resources.
+ *
+ * - Support for pull DMA.
+ *
+ * - Support for interrupt regions and user-space interrupt delivery.
+ *
+ * - Support for getting BAR mappings and reserving regions of BAR
+ * address space.
+ */
+#ifndef _GXIO_TRIO_H_
+#define _GXIO_TRIO_H_
+
+#include <linux/types.h>
+
+#include "common.h"
+#include "dma_queue.h"
+
+#include <arch/trio_constants.h>
+#include <arch/trio.h>
+#include <arch/trio_pcie_intfc.h>
+#include <arch/trio_pcie_rc.h>
+#include <arch/trio_shm.h>
+#include <hv/drv_trio_intf.h>
+#include <hv/iorpc.h>
+
+/* A context object used to manage TRIO hardware resources. */
+typedef struct {
+
+ /* File descriptor for calling up to Linux (and thus the HV). */
+ int fd;
+
+ /* The VA at which the MAC MMIO registers are mapped. */
+ char *mmio_base_mac;
+
+ /* The VA at which the PIO config space are mapped for each PCIe MAC.
+ Gx36 has max 3 PCIe MACs per TRIO shim. */
+ char *mmio_base_pio_cfg[TILEGX_TRIO_PCIES];
+
+#ifdef USE_SHARED_PCIE_CONFIG_REGION
+ /* Index of the shared PIO region for PCI config access. */
+ int pio_cfg_index;
+#else
+ /* Index of the PIO region for PCI config access per MAC. */
+ int pio_cfg_index[TILEGX_TRIO_PCIES];
+#endif
+
+ /* The VA at which the push DMA MMIO registers are mapped. */
+ char *mmio_push_dma[TRIO_NUM_PUSH_DMA_RINGS];
+
+ /* The VA at which the pull DMA MMIO registers are mapped. */
+ char *mmio_pull_dma[TRIO_NUM_PUSH_DMA_RINGS];
+
+ /* Application space ID. */
+ unsigned int asid;
+
+} gxio_trio_context_t;
+
+/* Command descriptor for push or pull DMA. */
+typedef TRIO_DMA_DESC_t gxio_trio_dma_desc_t;
+
+/* A convenient, thread-safe interface to an eDMA ring. */
+typedef struct {
+
+ /* State object for tracking head and tail pointers. */
+ __gxio_dma_queue_t dma_queue;
+
+ /* The ring entries. */
+ gxio_trio_dma_desc_t *dma_descs;
+
+ /* The number of entries minus one. */
+ unsigned long mask_num_entries;
+
+ /* The log2() of the number of entries. */
+ unsigned int log2_num_entries;
+
+} gxio_trio_dma_queue_t;
+
+/* Initialize a TRIO context.
+ *
+ * This function allocates a TRIO "service domain" and maps the MMIO
+ * registers into the the caller's VA space.
+ *
+ * @param trio_index Which TRIO shim; Gx36 must pass 0.
+ * @param context Context object to be initialized.
+ */
+extern int gxio_trio_init(gxio_trio_context_t *context,
+ unsigned int trio_index);
+
+/* This indicates that an ASID hasn't been allocated. */
+#define GXIO_ASID_NULL -1
+
+/* Ordering modes for map memory regions and scatter queue regions. */
+typedef enum gxio_trio_order_mode_e {
+ /* Writes are not ordered. Reads always wait for previous writes. */
+ GXIO_TRIO_ORDER_MODE_UNORDERED =
+ TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_UNORDERED,
+ /* Both writes and reads wait for previous transactions to complete. */
+ GXIO_TRIO_ORDER_MODE_STRICT =
+ TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_STRICT,
+ /* Writes are ordered unless the incoming packet has the
+ relaxed-ordering attributes set. */
+ GXIO_TRIO_ORDER_MODE_OBEY_PACKET =
+ TRIO_MAP_MEM_SETUP__ORDER_MODE_VAL_REL_ORD
+} gxio_trio_order_mode_t;
+
+/* Initialize a memory mapping region.
+ *
+ * @param context An initialized TRIO context.
+ * @param map A Memory map region allocated by gxio_trio_alloc_memory_map().
+ * @param target_mem VA of backing memory, should be registered via
+ * gxio_trio_register_page() and aligned to 4kB.
+ * @param target_size Length of the memory mapping, must be a multiple
+ * of 4kB.
+ * @param asid ASID to be used for Tile-side address translation.
+ * @param mac MAC number.
+ * @param bus_address Bus address at which the mapping starts.
+ * @param order_mode Memory ordering mode for this mapping.
+ * @return Zero on success, else ::GXIO_TRIO_ERR_BAD_MEMORY_MAP,
+ * GXIO_TRIO_ERR_BAD_ASID, or ::GXIO_TRIO_ERR_BAD_BUS_RANGE.
+ */
+extern int gxio_trio_init_memory_map(gxio_trio_context_t *context,
+ unsigned int map, void *target_mem,
+ size_t target_size, unsigned int asid,
+ unsigned int mac, uint64_t bus_address,
+ gxio_trio_order_mode_t order_mode);
+
+/* Flags that can be passed to resource allocation functions. */
+enum gxio_trio_alloc_flags_e {
+ GXIO_TRIO_ALLOC_FIXED = HV_TRIO_ALLOC_FIXED,
+};
+
+/* Flags that can be passed to memory registration functions. */
+enum gxio_trio_mem_flags_e {
+ /* Do not fill L3 when writing, and invalidate lines upon egress. */
+ GXIO_TRIO_MEM_FLAG_NT_HINT = IORPC_MEM_BUFFER_FLAG_NT_HINT,
+
+ /* L3 cache fills should only populate IO cache ways. */
+ GXIO_TRIO_MEM_FLAG_IO_PIN = IORPC_MEM_BUFFER_FLAG_IO_PIN,
+};
+
+/* Flag indicating a request generator uses a special traffic
+ class. */
+#define GXIO_TRIO_FLAG_TRAFFIC_CLASS(N) HV_TRIO_FLAG_TC(N)
+
+/* Flag indicating a request generator uses a virtual function
+ number. */
+#define GXIO_TRIO_FLAG_VFUNC(N) HV_TRIO_FLAG_VFUNC(N)
+
+/*****************************************************************
+ * Memory Registration *
+ ******************************************************************/
+
+/* Allocate Application Space Identifiers (ASIDs). Each ASID can
+ * register up to 16 page translations. ASIDs are used by memory map
+ * regions, scatter queues, and DMA queues to translate application
+ * VAs into memory system PAs.
+ *
+ * @param context An initialized TRIO context.
+ * @param count Number of ASIDs required.
+ * @param first Index of first ASID if ::GXIO_TRIO_ALLOC_FIXED flag
+ * is set, otherwise ignored.
+ * @param flags Flag bits, including bits from ::gxio_trio_alloc_flags_e.
+ * @return Index of first ASID, or ::GXIO_TRIO_ERR_NO_ASID if allocation
+ * failed.
+ */
+extern int gxio_trio_alloc_asids(gxio_trio_context_t *context,
+ unsigned int count, unsigned int first,
+ unsigned int flags);
+
+#endif /* ! _GXIO_TRIO_H_ */
diff --git a/arch/tile/include/gxio/usb_host.h b/arch/tile/include/gxio/usb_host.h
new file mode 100644
index 000000000000..a60a126e4565
--- /dev/null
+++ b/arch/tile/include/gxio/usb_host.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _GXIO_USB_H_
+#define _GXIO_USB_H_
+
+#include "common.h"
+
+#include <hv/drv_usb_host_intf.h>
+#include <hv/iorpc.h>
+
+/*
+ *
+ * An API for manipulating general-purpose I/O pins.
+ */
+
+/*
+ *
+ * The USB shim allows access to the processor's Universal Serial Bus
+ * connections.
+ */
+
+/* A context object used to manage USB hardware resources. */
+typedef struct {
+
+ /* File descriptor for calling up to the hypervisor. */
+ int fd;
+
+ /* The VA at which our MMIO registers are mapped. */
+ char *mmio_base;
+} gxio_usb_host_context_t;
+
+/* Initialize a USB context.
+ *
+ * A properly initialized context must be obtained before any of the other
+ * gxio_usb_host routines may be used.
+ *
+ * @param context Pointer to a gxio_usb_host_context_t, which will be
+ * initialized by this routine, if it succeeds.
+ * @param usb_index Index of the USB shim to use.
+ * @param is_ehci Nonzero to use the EHCI interface; zero to use the OHCI
+ * intereface.
+ * @return Zero if the context was successfully initialized, else a
+ * GXIO_ERR_xxx error code.
+ */
+extern int gxio_usb_host_init(gxio_usb_host_context_t * context, int usb_index,
+ int is_ehci);
+
+/* Destroy a USB context.
+ *
+ * Once destroyed, a context may not be used with any gxio_usb_host routines
+ * other than gxio_usb_host_init(). After this routine returns, no further
+ * interrupts or signals requested on this context will be delivered. The
+ * state and configuration of the pins which had been attached to this
+ * context are unchanged by this operation.
+ *
+ * @param context Pointer to a gxio_usb_host_context_t.
+ * @return Zero if the context was successfully destroyed, else a
+ * GXIO_ERR_xxx error code.
+ */
+extern int gxio_usb_host_destroy(gxio_usb_host_context_t * context);
+
+/* Retrieve the address of the shim's MMIO registers.
+ *
+ * @param context Pointer to a properly initialized gxio_usb_host_context_t.
+ * @return The address of the shim's MMIO registers.
+ */
+extern void *gxio_usb_host_get_reg_start(gxio_usb_host_context_t * context);
+
+/* Retrieve the length of the shim's MMIO registers.
+ *
+ * @param context Pointer to a properly initialized gxio_usb_host_context_t.
+ * @return The length of the shim's MMIO registers.
+ */
+extern size_t gxio_usb_host_get_reg_len(gxio_usb_host_context_t * context);
+
+#endif /* _GXIO_USB_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
new file mode 100644
index 000000000000..6cdae3bf046e
--- /dev/null
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -0,0 +1,602 @@
+/*
+ * Copyright 2011 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * Interface definitions for the mpipe driver.
+ */
+
+#ifndef _SYS_HV_DRV_MPIPE_INTF_H
+#define _SYS_HV_DRV_MPIPE_INTF_H
+
+#include <arch/mpipe.h>
+#include <arch/mpipe_constants.h>
+
+
+/** Number of buffer stacks (32). */
+#define HV_MPIPE_NUM_BUFFER_STACKS \
+ (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
+
+/** Number of NotifRings (256). */
+#define HV_MPIPE_NUM_NOTIF_RINGS (MPIPE_NUM_NOTIF_RINGS)
+
+/** Number of NotifGroups (32). */
+#define HV_MPIPE_NUM_NOTIF_GROUPS (MPIPE_NUM_NOTIF_GROUPS)
+
+/** Number of buckets (4160). */
+#define HV_MPIPE_NUM_BUCKETS (MPIPE_NUM_BUCKETS)
+
+/** Number of "lo" buckets (4096). */
+#define HV_MPIPE_NUM_LO_BUCKETS 4096
+
+/** Number of "hi" buckets (64). */
+#define HV_MPIPE_NUM_HI_BUCKETS \
+ (HV_MPIPE_NUM_BUCKETS - HV_MPIPE_NUM_LO_BUCKETS)
+
+/** Number of edma rings (24). */
+#define HV_MPIPE_NUM_EDMA_RINGS \
+ (MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH)
+
+
+
+
+/** A flag bit indicating a fixed resource allocation. */
+#define HV_MPIPE_ALLOC_FIXED 0x01
+
+/** Offset for the config register MMIO region. */
+#define HV_MPIPE_CONFIG_MMIO_OFFSET \
+ (MPIPE_MMIO_ADDR__REGION_VAL_CFG << MPIPE_MMIO_ADDR__REGION_SHIFT)
+
+/** Size of the config register MMIO region. */
+#define HV_MPIPE_CONFIG_MMIO_SIZE (64 * 1024)
+
+/** Offset for the config register MMIO region. */
+#define HV_MPIPE_FAST_MMIO_OFFSET \
+ (MPIPE_MMIO_ADDR__REGION_VAL_IDMA << MPIPE_MMIO_ADDR__REGION_SHIFT)
+
+/** Size of the fast register MMIO region (IDMA, EDMA, buffer stack). */
+#define HV_MPIPE_FAST_MMIO_SIZE \
+ ((MPIPE_MMIO_ADDR__REGION_VAL_BSM + 1 - MPIPE_MMIO_ADDR__REGION_VAL_IDMA) \
+ << MPIPE_MMIO_ADDR__REGION_SHIFT)
+
+
+/*
+ * Each type of resource allocation comes in quantized chunks, where
+ * XXX_BITS is the number of chunks, and XXX_RES_PER_BIT is the number
+ * of resources in each chunk.
+ */
+
+/** Number of buffer stack chunks available (32). */
+#define HV_MPIPE_ALLOC_BUFFER_STACKS_BITS \
+ MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH
+
+/** Granularity of buffer stack allocation (1). */
+#define HV_MPIPE_ALLOC_BUFFER_STACKS_RES_PER_BIT \
+ (HV_MPIPE_NUM_BUFFER_STACKS / HV_MPIPE_ALLOC_BUFFER_STACKS_BITS)
+
+/** Number of NotifRing chunks available (32). */
+#define HV_MPIPE_ALLOC_NOTIF_RINGS_BITS \
+ MPIPE_MMIO_INIT_DAT_GX36_0__NOTIF_RING_MASK_WIDTH
+
+/** Granularity of NotifRing allocation (8). */
+#define HV_MPIPE_ALLOC_NOTIF_RINGS_RES_PER_BIT \
+ (HV_MPIPE_NUM_NOTIF_RINGS / HV_MPIPE_ALLOC_NOTIF_RINGS_BITS)
+
+/** Number of NotifGroup chunks available (32). */
+#define HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS \
+ HV_MPIPE_NUM_NOTIF_GROUPS
+
+/** Granularity of NotifGroup allocation (1). */
+#define HV_MPIPE_ALLOC_NOTIF_GROUPS_RES_PER_BIT \
+ (HV_MPIPE_NUM_NOTIF_GROUPS / HV_MPIPE_ALLOC_NOTIF_GROUPS_BITS)
+
+/** Number of lo bucket chunks available (16). */
+#define HV_MPIPE_ALLOC_LO_BUCKETS_BITS \
+ MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_LO_WIDTH
+
+/** Granularity of lo bucket allocation (256). */
+#define HV_MPIPE_ALLOC_LO_BUCKETS_RES_PER_BIT \
+ (HV_MPIPE_NUM_LO_BUCKETS / HV_MPIPE_ALLOC_LO_BUCKETS_BITS)
+
+/** Number of hi bucket chunks available (16). */
+#define HV_MPIPE_ALLOC_HI_BUCKETS_BITS \
+ MPIPE_MMIO_INIT_DAT_GX36_0__BUCKET_RELEASE_MASK_HI_WIDTH
+
+/** Granularity of hi bucket allocation (4). */
+#define HV_MPIPE_ALLOC_HI_BUCKETS_RES_PER_BIT \
+ (HV_MPIPE_NUM_HI_BUCKETS / HV_MPIPE_ALLOC_HI_BUCKETS_BITS)
+
+/** Number of eDMA ring chunks available (24). */
+#define HV_MPIPE_ALLOC_EDMA_RINGS_BITS \
+ MPIPE_MMIO_INIT_DAT_GX36_1__EDMA_POST_MASK_WIDTH
+
+/** Granularity of eDMA ring allocation (1). */
+#define HV_MPIPE_ALLOC_EDMA_RINGS_RES_PER_BIT \
+ (HV_MPIPE_NUM_EDMA_RINGS / HV_MPIPE_ALLOC_EDMA_RINGS_BITS)
+
+
+
+
+/** Bit vector encoding which NotifRings are in a NotifGroup. */
+typedef struct
+{
+ /** The actual bits. */
+ uint64_t ring_mask[4];
+
+} gxio_mpipe_notif_group_bits_t;
+
+
+/** Another name for MPIPE_LBL_INIT_DAT_BSTS_TBL_t. */
+typedef MPIPE_LBL_INIT_DAT_BSTS_TBL_t gxio_mpipe_bucket_info_t;
+
+
+
+/** Eight buffer stack ids. */
+typedef struct
+{
+ /** The stacks. */
+ uint8_t stacks[8];
+
+} gxio_mpipe_rules_stacks_t;
+
+
+/** A destination mac address. */
+typedef struct
+{
+ /** The octets. */
+ uint8_t octets[6];
+
+} gxio_mpipe_rules_dmac_t;
+
+
+/** A vlan. */
+typedef uint16_t gxio_mpipe_rules_vlan_t;
+
+
+
+/** Maximum number of characters in a link name. */
+#define GXIO_MPIPE_LINK_NAME_LEN 32
+
+
+/** Structure holding a link name. Only needed, and only typedef'ed,
+ * because the IORPC stub generator only handles types which are single
+ * words coming before the parameter name. */
+typedef struct
+{
+ /** The name itself. */
+ char name[GXIO_MPIPE_LINK_NAME_LEN];
+}
+_gxio_mpipe_link_name_t;
+
+/** Maximum number of characters in a symbol name. */
+#define GXIO_MPIPE_SYMBOL_NAME_LEN 128
+
+
+/** Structure holding a symbol name. Only needed, and only typedef'ed,
+ * because the IORPC stub generator only handles types which are single
+ * words coming before the parameter name. */
+typedef struct
+{
+ /** The name itself. */
+ char name[GXIO_MPIPE_SYMBOL_NAME_LEN];
+}
+_gxio_mpipe_symbol_name_t;
+
+
+/** Structure holding a MAC address. */
+typedef struct
+{
+ /** The address. */
+ uint8_t mac[6];
+}
+_gxio_mpipe_link_mac_t;
+
+
+
+/** Request shared data permission -- that is, the ability to send and
+ * receive packets -- on the specified link. Other processes may also
+ * request shared data permission on the same link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
+ * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
+ */
+#define GXIO_MPIPE_LINK_DATA 0x00000001UL
+
+/** Do not request data permission on the specified link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
+ * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
+ */
+#define GXIO_MPIPE_LINK_NO_DATA 0x00000002UL
+
+/** Request exclusive data permission -- that is, the ability to send and
+ * receive packets -- on the specified link. No other processes may
+ * request data permission on this link, and if any process already has
+ * data permission on it, this open will fail.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_DATA, ::GXIO_MPIPE_LINK_NO_DATA,
+ * or ::GXIO_MPIPE_LINK_EXCL_DATA may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_DATA is assumed.
+ */
+#define GXIO_MPIPE_LINK_EXCL_DATA 0x00000004UL
+
+/** Request shared stats permission -- that is, the ability to read and write
+ * registers which contain link statistics, and to get link attributes --
+ * on the specified link. Other processes may also request shared stats
+ * permission on the same link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
+ * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
+ */
+#define GXIO_MPIPE_LINK_STATS 0x00000008UL
+
+/** Do not request stats permission on the specified link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
+ * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
+ */
+#define GXIO_MPIPE_LINK_NO_STATS 0x00000010UL
+
+/** Request exclusive stats permission -- that is, the ability to read and
+ * write registers which contain link statistics, and to get link
+ * attributes -- on the specified link. No other processes may request
+ * stats permission on this link, and if any process already
+ * has stats permission on it, this open will fail.
+ *
+ * Requesting exclusive stats permission is normally a very bad idea, since
+ * it prevents programs like mpipe-stat from providing information on this
+ * link. Applications should only do this if they use MAC statistics
+ * registers, and cannot tolerate any of the clear-on-read registers being
+ * reset by other statistics programs.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_STATS, ::GXIO_MPIPE_LINK_NO_STATS,
+ * or ::GXIO_MPIPE_LINK_EXCL_STATS may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_STATS is assumed.
+ */
+#define GXIO_MPIPE_LINK_EXCL_STATS 0x00000020UL
+
+/** Request shared control permission -- that is, the ability to modify link
+ * attributes, and read and write MAC and MDIO registers -- on the
+ * specified link. Other processes may also request shared control
+ * permission on the same link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
+ * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
+ */
+#define GXIO_MPIPE_LINK_CTL 0x00000040UL
+
+/** Do not request control permission on the specified link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
+ * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
+ */
+#define GXIO_MPIPE_LINK_NO_CTL 0x00000080UL
+
+/** Request exclusive control permission -- that is, the ability to modify
+ * link attributes, and read and write MAC and MDIO registers -- on the
+ * specified link. No other processes may request control permission on
+ * this link, and if any process already has control permission on it,
+ * this open will fail.
+ *
+ * Requesting exclusive control permission is not always a good idea, since
+ * it prevents programs like mpipe-link from configuring the link.
+ *
+ * No more than one of ::GXIO_MPIPE_LINK_CTL, ::GXIO_MPIPE_LINK_NO_CTL,
+ * or ::GXIO_MPIPE_LINK_EXCL_CTL may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_CTL is assumed.
+ */
+#define GXIO_MPIPE_LINK_EXCL_CTL 0x00000100UL
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ * supported by the link hardware, as part of this open operation; do not
+ * change the desired state of the link when it is closed or the process
+ * exits. No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
+ * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
+ * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
+ */
+#define GXIO_MPIPE_LINK_AUTO_UP 0x00000200UL
+
+/** Set the desired state of the link to up, allowing any speeds which are
+ * supported by the link hardware, as part of this open operation; when the
+ * link is closed or this process exits, if no other process has the link
+ * open, set the desired state of the link to down. No more than one of
+ * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
+ * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
+ * specifed in a gxio_mpipe_link_open() call. If none are specified,
+ * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
+ */
+#define GXIO_MPIPE_LINK_AUTO_UPDOWN 0x00000400UL
+
+/** Do not change the desired state of the link as part of the open
+ * operation; when the link is closed or this process exits, if no other
+ * process has the link open, set the desired state of the link to down.
+ * No more than one of ::GXIO_MPIPE_LINK_AUTO_UP,
+ * ::GXIO_MPIPE_LINK_AUTO_UPDOWN, ::GXIO_MPIPE_LINK_AUTO_DOWN, or
+ * ::GXIO_MPIPE_LINK_AUTO_NONE may be specifed in a gxio_mpipe_link_open()
+ * call. If none are specified, ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
+ */
+#define GXIO_MPIPE_LINK_AUTO_DOWN 0x00000800UL
+
+/** Do not change the desired state of the link as part of the open
+ * operation; do not change the desired state of the link when it is
+ * closed or the process exits. No more than one of
+ * ::GXIO_MPIPE_LINK_AUTO_UP, ::GXIO_MPIPE_LINK_AUTO_UPDOWN,
+ * ::GXIO_MPIPE_LINK_AUTO_DOWN, or ::GXIO_MPIPE_LINK_AUTO_NONE may be
+ * specifed in a gxio_mpipe_link_open() call. If none are specified,
+ * ::GXIO_MPIPE_LINK_AUTO_UPDOWN is assumed.
+ */
+#define GXIO_MPIPE_LINK_AUTO_NONE 0x00001000UL
+
+/** Request that this open call not complete until the network link is up.
+ * The process will wait as long as necessary for this to happen;
+ * applications which wish to abandon waiting for the link after a
+ * specific time period should not specify this flag when opening a link,
+ * but should instead call gxio_mpipe_link_wait() afterward. The link
+ * must be opened with stats permission. Note that this flag by itself
+ * does not change the desired link state; if other open flags or previous
+ * link state changes have not requested a desired state of up, the open
+ * call will never complete. This flag is not available to kernel
+ * clients.
+ */
+#define GXIO_MPIPE_LINK_WAIT 0x00002000UL
+
+
+/*
+ * Note: link attributes must fit in 24 bits, since we use the top 8 bits
+ * of the IORPC offset word for the channel number.
+ */
+
+/** Determine whether jumbo frames may be received. If this attribute's
+ * value value is nonzero, the MAC will accept frames of up to 10240 bytes.
+ * If the value is zero, the MAC will only accept frames of up to 1544
+ * bytes. The default value is zero. */
+#define GXIO_MPIPE_LINK_RECEIVE_JUMBO 0x010000
+
+/** Determine whether to send pause frames on this link if the mPIPE packet
+ * FIFO is nearly full. If the value is zero, pause frames are not sent.
+ * If the value is nonzero, it is the delay value which will be sent in any
+ * pause frames which are output, in units of 512 bit times.
+ *
+ * Bear in mind that in almost all circumstances, the mPIPE packet FIFO
+ * will never fill up, since mPIPE will empty it as fast as or faster than
+ * the incoming data rate, by either delivering or dropping packets. The
+ * only situation in which this is not true is if the memory and cache
+ * subsystem is extremely heavily loaded, and mPIPE cannot perform DMA of
+ * packet data to memory in a timely fashion. In particular, pause frames
+ * will <em>not</em> be sent if packets cannot be delivered because
+ * NotifRings are full, buckets are full, or buffers are not available in
+ * a buffer stack. */
+#define GXIO_MPIPE_LINK_SEND_PAUSE 0x020000
+
+/** Determine whether to suspend output on the receipt of pause frames.
+ * If the value is nonzero, mPIPE shim will suspend output on the link's
+ * channel when a pause frame is received. If the value is zero, pause
+ * frames will be ignored. The default value is zero. */
+#define GXIO_MPIPE_LINK_RECEIVE_PAUSE 0x030000
+
+/** Interface MAC address. The value is a 6-byte MAC address, in the least
+ * significant 48 bits of the value; in other words, an address which would
+ * be printed as '12:34:56:78:90:AB' in IEEE 802 canonical format would
+ * be returned as 0x12345678ab.
+ *
+ * Depending upon the overall system design, a MAC address may or may not
+ * be available for each interface. Note that the interface's MAC address
+ * does not limit the packets received on its channel, although the
+ * classifier's rules could be configured to do that. Similarly, the MAC
+ * address is not used when transmitting packets, although applications
+ * could certainly decide to use the assigned address as a source MAC
+ * address when doing so. This attribute may only be retrieved with
+ * gxio_mpipe_link_get_attr(); it may not be modified.
+ */
+#define GXIO_MPIPE_LINK_MAC 0x040000
+
+/** Determine whether to discard egress packets on link down. If this value
+ * is nonzero, packets sent on this link while the link is down will be
+ * discarded. If this value is zero, no packets will be sent on this link
+ * while it is down. The default value is one. */
+#define GXIO_MPIPE_LINK_DISCARD_IF_DOWN 0x050000
+
+/** Possible link state. The value is a combination of link state flags,
+ * ORed together, that indicate link modes which are actually supported by
+ * the hardware. This attribute may only be retrieved with
+ * gxio_mpipe_link_get_attr(); it may not be modified. */
+#define GXIO_MPIPE_LINK_POSSIBLE_STATE 0x060000
+
+/** Current link state. The value is a combination of link state flags,
+ * ORed together, that indicate the current state of the hardware. If the
+ * link is down, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will be zero;
+ * if the link is up, the value ANDed with ::GXIO_MPIPE_LINK_SPEED will
+ * result in exactly one of the speed values, indicating the current speed.
+ * This attribute may only be retrieved with gxio_mpipe_link_get_attr(); it
+ * may not be modified. */
+#define GXIO_MPIPE_LINK_CURRENT_STATE 0x070000
+
+/** Desired link state. The value is a conbination of flags, which specify
+ * the desired state for the link. With gxio_mpipe_link_set_attr(), this
+ * will, in the background, attempt to bring up the link using whichever of
+ * the requested flags are reasonable, or take down the link if the flags
+ * are zero. The actual link up or down operation may happen after this
+ * call completes. If the link state changes in the future, the system
+ * will continue to try to get back to the desired link state; for
+ * instance, if the link is brought up successfully, and then the network
+ * cable is disconnected, the link will go down. However, the desired
+ * state of the link is still up, so if the cable is reconnected, the link
+ * will be brought up again.
+ *
+ * With gxio_mpipe_link_set_attr(), this will indicate the desired state
+ * for the link, as set with a previous gxio_mpipe_link_set_attr() call,
+ * or implicitly by a gxio_mpipe_link_open() or link close operation.
+ * This may not reflect the current state of the link; to get that, use
+ * ::GXIO_MPIPE_LINK_CURRENT_STATE.
+ */
+#define GXIO_MPIPE_LINK_DESIRED_STATE 0x080000
+
+
+
+/** Link can run, should run, or is running at 10 Mbps. */
+#define GXIO_MPIPE_LINK_10M 0x0000000000000001UL
+
+/** Link can run, should run, or is running at 100 Mbps. */
+#define GXIO_MPIPE_LINK_100M 0x0000000000000002UL
+
+/** Link can run, should run, or is running at 1 Gbps. */
+#define GXIO_MPIPE_LINK_1G 0x0000000000000004UL
+
+/** Link can run, should run, or is running at 10 Gbps. */
+#define GXIO_MPIPE_LINK_10G 0x0000000000000008UL
+
+/** Link can run, should run, or is running at 20 Gbps. */
+#define GXIO_MPIPE_LINK_20G 0x0000000000000010UL
+
+/** Link can run, should run, or is running at 25 Gbps. */
+#define GXIO_MPIPE_LINK_25G 0x0000000000000020UL
+
+/** Link can run, should run, or is running at 50 Gbps. */
+#define GXIO_MPIPE_LINK_50G 0x0000000000000040UL
+
+/** Link should run at the highest speed supported by the link and by
+ * the device connected to the link. Only usable as a value for
+ * the link's desired state; never returned as a value for the current
+ * or possible states. */
+#define GXIO_MPIPE_LINK_ANYSPEED 0x0000000000000800UL
+
+/** All legal link speeds. This value is provided for use in extracting
+ * the speed-related subset of the link state flags; it is not intended
+ * to be set directly as a value for one of the GXIO_MPIPE_LINK_xxx_STATE
+ * attributes. A link is up or is requested to be up if its current or
+ * desired state, respectively, ANDED with this value, is nonzero. */
+#define GXIO_MPIPE_LINK_SPEED_MASK 0x0000000000000FFFUL
+
+/** Link can run, should run, or is running in MAC loopback mode. This
+ * loops transmitted packets back to the receiver, inside the Tile
+ * Processor. */
+#define GXIO_MPIPE_LINK_LOOP_MAC 0x0000000000001000UL
+
+/** Link can run, should run, or is running in PHY loopback mode. This
+ * loops transmitted packets back to the receiver, inside the external
+ * PHY chip. */
+#define GXIO_MPIPE_LINK_LOOP_PHY 0x0000000000002000UL
+
+/** Link can run, should run, or is running in external loopback mode.
+ * This requires that an external loopback plug be installed on the
+ * Ethernet port. Note that only some links require that this be
+ * configured via the gxio_mpipe_link routines; other links can do
+ * external loopack with the plug and no special configuration. */
+#define GXIO_MPIPE_LINK_LOOP_EXT 0x0000000000004000UL
+
+/** All legal loopback types. */
+#define GXIO_MPIPE_LINK_LOOP_MASK 0x000000000000F000UL
+
+/** Link can run, should run, or is running in full-duplex mode.
+ * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
+ * specified in a set of desired state flags, both are assumed. */
+#define GXIO_MPIPE_LINK_FDX 0x0000000000010000UL
+
+/** Link can run, should run, or is running in half-duplex mode.
+ * If neither ::GXIO_MPIPE_LINK_FDX nor ::GXIO_MPIPE_LINK_HDX are
+ * specified in a set of desired state flags, both are assumed. */
+#define GXIO_MPIPE_LINK_HDX 0x0000000000020000UL
+
+
+/** An individual rule. */
+typedef struct
+{
+ /** The total size. */
+ uint16_t size;
+
+ /** The priority. */
+ int16_t priority;
+
+ /** The "headroom" in each buffer. */
+ uint8_t headroom;
+
+ /** The "tailroom" in each buffer. */
+ uint8_t tailroom;
+
+ /** The "capacity" of the largest buffer. */
+ uint16_t capacity;
+
+ /** The mask for converting a flow hash into a bucket. */
+ uint16_t bucket_mask;
+
+ /** The offset for converting a flow hash into a bucket. */
+ uint16_t bucket_first;
+
+ /** The buffer stack ids. */
+ gxio_mpipe_rules_stacks_t stacks;
+
+ /** The actual channels. */
+ uint32_t channel_bits;
+
+ /** The number of dmacs. */
+ uint16_t num_dmacs;
+
+ /** The number of vlans. */
+ uint16_t num_vlans;
+
+ /** The actual dmacs and vlans. */
+ uint8_t dmacs_and_vlans[];
+
+} gxio_mpipe_rules_rule_t;
+
+
+/** A list of classifier rules. */
+typedef struct
+{
+ /** The offset to the end of the current rule. */
+ uint16_t tail;
+
+ /** The offset to the start of the current rule. */
+ uint16_t head;
+
+ /** The actual rules. */
+ uint8_t rules[4096 - 4];
+
+} gxio_mpipe_rules_list_t;
+
+
+
+
+/** mPIPE statistics structure. These counters include all relevant
+ * events occurring on all links within the mPIPE shim. */
+typedef struct
+{
+ /** Number of ingress packets dropped for any reason. */
+ uint64_t ingress_drops;
+ /** Number of ingress packets dropped because a buffer stack was empty. */
+ uint64_t ingress_drops_no_buf;
+ /** Number of ingress packets dropped or truncated due to lack of space in
+ * the iPkt buffer. */
+ uint64_t ingress_drops_ipkt;
+ /** Number of ingress packets dropped by the classifier or load balancer */
+ uint64_t ingress_drops_cls_lb;
+ /** Total number of ingress packets. */
+ uint64_t ingress_packets;
+ /** Total number of egress packets. */
+ uint64_t egress_packets;
+ /** Total number of ingress bytes. */
+ uint64_t ingress_bytes;
+ /** Total number of egress bytes. */
+ uint64_t egress_bytes;
+}
+gxio_mpipe_stats_t;
+
+
+#endif /* _SYS_HV_DRV_MPIPE_INTF_H */
diff --git a/arch/tile/include/hv/drv_trio_intf.h b/arch/tile/include/hv/drv_trio_intf.h
new file mode 100644
index 000000000000..ef9f3f52ee27
--- /dev/null
+++ b/arch/tile/include/hv/drv_trio_intf.h
@@ -0,0 +1,195 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * Interface definitions for the trio driver.
+ */
+
+#ifndef _SYS_HV_DRV_TRIO_INTF_H
+#define _SYS_HV_DRV_TRIO_INTF_H
+
+#include <arch/trio.h>
+
+/** The vendor ID for all Tilera processors. */
+#define TILERA_VENDOR_ID 0x1a41
+
+/** The device ID for the Gx36 processor. */
+#define TILERA_GX36_DEV_ID 0x0200
+
+/** Device ID for our internal bridge when running as RC. */
+#define TILERA_GX36_RC_DEV_ID 0x2000
+
+/** Maximum number of TRIO interfaces. */
+#define TILEGX_NUM_TRIO 2
+
+/** Gx36 has max 3 PCIe MACs per TRIO interface. */
+#define TILEGX_TRIO_PCIES 3
+
+/** Specify port properties for a PCIe MAC. */
+struct pcie_port_property
+{
+ /** If true, the link can be configured in PCIe root complex mode. */
+ uint8_t allow_rc: 1;
+
+ /** If true, the link can be configured in PCIe endpoint mode. */
+ uint8_t allow_ep: 1;
+
+ /** If true, the link can be configured in StreamIO mode. */
+ uint8_t allow_sio: 1;
+
+ /** If true, the link is allowed to support 1-lane operation. Software
+ * will not consider it an error if the link comes up as a x1 link. */
+ uint8_t allow_x1: 1;
+
+ /** If true, the link is allowed to support 2-lane operation. Software
+ * will not consider it an error if the link comes up as a x2 link. */
+ uint8_t allow_x2: 1;
+
+ /** If true, the link is allowed to support 4-lane operation. Software
+ * will not consider it an error if the link comes up as a x4 link. */
+ uint8_t allow_x4: 1;
+
+ /** If true, the link is allowed to support 8-lane operation. Software
+ * will not consider it an error if the link comes up as a x8 link. */
+ uint8_t allow_x8: 1;
+
+ /** Reserved. */
+ uint8_t reserved: 1;
+
+};
+
+/** Configurations can be issued to configure a char stream interrupt. */
+typedef enum pcie_stream_intr_config_sel_e
+{
+ /** Interrupt configuration for memory map regions. */
+ MEM_MAP_SEL,
+
+ /** Interrupt configuration for push DMAs. */
+ PUSH_DMA_SEL,
+
+ /** Interrupt configuration for pull DMAs. */
+ PULL_DMA_SEL,
+}
+pcie_stream_intr_config_sel_t;
+
+
+/** The mmap file offset (PA) of the TRIO config region. */
+#define HV_TRIO_CONFIG_OFFSET \
+ ((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_CFG << \
+ TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT)
+
+/** The maximum size of the TRIO config region. */
+#define HV_TRIO_CONFIG_SIZE \
+ (1ULL << TRIO_CFG_REGION_ADDR__REGION_SHIFT)
+
+/** Size of the config region mapped into client. We can't use
+ * TRIO_MMIO_ADDRESS_SPACE__OFFSET_WIDTH because it
+ * will require the kernel to allocate 4GB VA space
+ * from the VMALLOC region which has a total range
+ * of 4GB.
+ */
+#define HV_TRIO_CONFIG_IOREMAP_SIZE \
+ ((uint64_t) 1 << TRIO_CFG_REGION_ADDR__PROT_SHIFT)
+
+/** The mmap file offset (PA) of a scatter queue region. */
+#define HV_TRIO_SQ_OFFSET(queue) \
+ (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_SQ << \
+ TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
+ ((queue) << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT))
+
+/** The maximum size of a scatter queue region. */
+#define HV_TRIO_SQ_SIZE \
+ (1ULL << TRIO_MAP_SQ_REGION_ADDR__SQ_SEL_SHIFT)
+
+
+/** The "hardware MMIO region" of the first PIO region. */
+#define HV_TRIO_FIRST_PIO_REGION 8
+
+/** The mmap file offset (PA) of a PIO region. */
+#define HV_TRIO_PIO_OFFSET(region) \
+ (((unsigned long long)(region) + HV_TRIO_FIRST_PIO_REGION) \
+ << TRIO_PIO_REGIONS_ADDR__REGION_SHIFT)
+
+/** The maximum size of a PIO region. */
+#define HV_TRIO_PIO_SIZE (1ULL << TRIO_PIO_REGIONS_ADDR__ADDR_WIDTH)
+
+
+/** The mmap file offset (PA) of a push DMA region. */
+#define HV_TRIO_PUSH_DMA_OFFSET(ring) \
+ (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PUSH_DMA << \
+ TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
+ ((ring) << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT))
+
+/** The mmap file offset (PA) of a pull DMA region. */
+#define HV_TRIO_PULL_DMA_OFFSET(ring) \
+ (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_PULL_DMA << \
+ TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
+ ((ring) << TRIO_PULL_DMA_REGION_ADDR__RING_SEL_SHIFT))
+
+/** The maximum size of a DMA region. */
+#define HV_TRIO_DMA_REGION_SIZE \
+ (1ULL << TRIO_PUSH_DMA_REGION_ADDR__RING_SEL_SHIFT)
+
+
+/** The mmap file offset (PA) of a Mem-Map interrupt region. */
+#define HV_TRIO_MEM_MAP_INTR_OFFSET(map) \
+ (((unsigned long long)TRIO_MMIO_ADDRESS_SPACE__REGION_VAL_MAP_MEM << \
+ TRIO_MMIO_ADDRESS_SPACE__REGION_SHIFT) | \
+ ((map) << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT))
+
+/** The maximum size of a Mem-Map interrupt region. */
+#define HV_TRIO_MEM_MAP_INTR_SIZE \
+ (1ULL << TRIO_MAP_MEM_REGION_ADDR__MAP_SEL_SHIFT)
+
+
+/** A flag bit indicating a fixed resource allocation. */
+#define HV_TRIO_ALLOC_FIXED 0x01
+
+/** TRIO requires that all mappings have 4kB aligned start addresses. */
+#define HV_TRIO_PAGE_SHIFT 12
+
+/** TRIO requires that all mappings have 4kB aligned start addresses. */
+#define HV_TRIO_PAGE_SIZE (1ull << HV_TRIO_PAGE_SHIFT)
+
+
+/* Specify all PCIe port properties for a TRIO. */
+struct pcie_trio_ports_property
+{
+ struct pcie_port_property ports[TILEGX_TRIO_PCIES];
+};
+
+/* Flags indicating traffic class. */
+#define HV_TRIO_FLAG_TC_SHIFT 4
+#define HV_TRIO_FLAG_TC_RMASK 0xf
+#define HV_TRIO_FLAG_TC(N) \
+ ((((N) & HV_TRIO_FLAG_TC_RMASK) + 1) << HV_TRIO_FLAG_TC_SHIFT)
+
+/* Flags indicating virtual functions. */
+#define HV_TRIO_FLAG_VFUNC_SHIFT 8
+#define HV_TRIO_FLAG_VFUNC_RMASK 0xff
+#define HV_TRIO_FLAG_VFUNC(N) \
+ ((((N) & HV_TRIO_FLAG_VFUNC_RMASK) + 1) << HV_TRIO_FLAG_VFUNC_SHIFT)
+
+
+/* Flag indicating an ordered PIO region. */
+#define HV_TRIO_PIO_FLAG_ORDERED (1 << 16)
+
+/* Flags indicating special types of PIO regions. */
+#define HV_TRIO_PIO_FLAG_SPACE_SHIFT 17
+#define HV_TRIO_PIO_FLAG_SPACE_MASK (0x3 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
+#define HV_TRIO_PIO_FLAG_CONFIG_SPACE (0x1 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
+#define HV_TRIO_PIO_FLAG_IO_SPACE (0x2 << HV_TRIO_PIO_FLAG_SPACE_SHIFT)
+
+
+#endif /* _SYS_HV_DRV_TRIO_INTF_H */
diff --git a/arch/tile/include/hv/drv_usb_host_intf.h b/arch/tile/include/hv/drv_usb_host_intf.h
new file mode 100644
index 000000000000..24ce774a3f1d
--- /dev/null
+++ b/arch/tile/include/hv/drv_usb_host_intf.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/**
+ * Interface definitions for the USB host driver.
+ */
+
+#ifndef _SYS_HV_DRV_USB_HOST_INTF_H
+#define _SYS_HV_DRV_USB_HOST_INTF_H
+
+#include <arch/usb_host.h>
+
+
+/** Offset for the EHCI register MMIO region. */
+#define HV_USB_HOST_MMIO_OFFSET_EHCI ((uint64_t) USB_HOST_HCCAPBASE_REG)
+
+/** Offset for the OHCI register MMIO region. */
+#define HV_USB_HOST_MMIO_OFFSET_OHCI ((uint64_t) USB_HOST_OHCD_HC_REVISION_REG)
+
+/** Size of the register MMIO region. This turns out to be the same for
+ * both EHCI and OHCI. */
+#define HV_USB_HOST_MMIO_SIZE ((uint64_t) 0x1000)
+
+/** The number of service domains supported by the USB host shim. */
+#define HV_USB_HOST_NUM_SVC_DOM 1
+
+
+#endif /* _SYS_HV_DRV_USB_HOST_INTF_H */
diff --git a/arch/tile/include/hv/iorpc.h b/arch/tile/include/hv/iorpc.h
new file mode 100644
index 000000000000..89c72a5d9341
--- /dev/null
+++ b/arch/tile/include/hv/iorpc.h
@@ -0,0 +1,714 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _HV_IORPC_H_
+#define _HV_IORPC_H_
+
+/**
+ *
+ * Error codes and struct definitions for the IO RPC library.
+ *
+ * The hypervisor's IO RPC component provides a convenient way for
+ * driver authors to proxy system calls between user space, linux, and
+ * the hypervisor driver. The core of the system is a set of Python
+ * files that take ".idl" files as input and generates the following
+ * source code:
+ *
+ * - _rpc_call() routines for use in userspace IO libraries. These
+ * routines take an argument list specified in the .idl file, pack the
+ * arguments in to a buffer, and read or write that buffer via the
+ * Linux iorpc driver.
+ *
+ * - dispatch_read() and dispatch_write() routines that hypervisor
+ * drivers can use to implement most of their dev_pread() and
+ * dev_pwrite() methods. These routines decode the incoming parameter
+ * blob, permission check and translate parameters where appropriate,
+ * and then invoke a callback routine for whichever RPC call has
+ * arrived. The driver simply implements the set of callback
+ * routines.
+ *
+ * The IO RPC system also includes the Linux 'iorpc' driver, which
+ * proxies calls between the userspace library and the hypervisor
+ * driver. The Linux driver is almost entirely device agnostic; it
+ * watches for special flags indicating cases where a memory buffer
+ * address might need to be translated, etc. As a result, driver
+ * writers can avoid many of the problem cases related to registering
+ * hardware resources like memory pages or interrupts. However, the
+ * drivers must be careful to obey the conventions documented below in
+ * order to work properly with the generic Linux iorpc driver.
+ *
+ * @section iorpc_domains Service Domains
+ *
+ * All iorpc-based drivers must support a notion of service domains.
+ * A service domain is basically an application context - state
+ * indicating resources that are allocated to that particular app
+ * which it may access and (perhaps) other applications may not
+ * access. Drivers can support any number of service domains they
+ * choose. In some cases the design is limited by a number of service
+ * domains supported by the IO hardware; in other cases the service
+ * domains are a purely software concept and the driver chooses a
+ * maximum number of domains based on how much state memory it is
+ * willing to preallocate.
+ *
+ * For example, the mPIPE driver only supports as many service domains
+ * as are supported by the mPIPE hardware. This limitation is
+ * required because the hardware implements its own MMIO protection
+ * scheme to allow large MMIO mappings while still protecting small
+ * register ranges within the page that should only be accessed by the
+ * hypervisor.
+ *
+ * In contrast, drivers with no hardware service domain limitations
+ * (for instance the TRIO shim) can implement an arbitrary number of
+ * service domains. In these cases, each service domain is limited to
+ * a carefully restricted set of legal MMIO addresses if necessary to
+ * keep one application from corrupting another application's state.
+ *
+ * @section iorpc_conventions System Call Conventions
+ *
+ * The driver's open routine is responsible for allocating a new
+ * service domain for each hv_dev_open() call. By convention, the
+ * return value from open() should be the service domain number on
+ * success, or GXIO_ERR_NO_SVC_DOM if no more service domains are
+ * available.
+ *
+ * The implementations of hv_dev_pread() and hv_dev_pwrite() are
+ * responsible for validating the devhdl value passed up by the
+ * client. Since the device handle returned by hv_dev_open() should
+ * embed the positive service domain number, drivers should make sure
+ * that DRV_HDL2BITS(devhdl) is a legal service domain. If the client
+ * passes an illegal service domain number, the routine should return
+ * GXIO_ERR_INVAL_SVC_DOM. Once the service domain number has been
+ * validated, the driver can copy to/from the client buffer and call
+ * the dispatch_read() or dispatch_write() methods created by the RPC
+ * generator.
+ *
+ * The hv_dev_close() implementation should reset all service domain
+ * state and put the service domain back on a free list for
+ * reallocation by a future application. In most cases, this will
+ * require executing a hardware reset or drain flow and denying any
+ * MMIO regions that were created for the service domain.
+ *
+ * @section iorpc_data Special Data Types
+ *
+ * The .idl file syntax allows the creation of syscalls with special
+ * parameters that require permission checks or translations as part
+ * of the system call path. Because of limitations in the code
+ * generator, APIs are generally limited to just one of these special
+ * parameters per system call, and they are sometimes required to be
+ * the first or last parameter to the call. Special parameters
+ * include:
+ *
+ * @subsection iorpc_mem_buffer MEM_BUFFER
+ *
+ * The MEM_BUFFER() datatype allows user space to "register" memory
+ * buffers with a device. Registering memory accomplishes two tasks:
+ * Linux keeps track of all buffers that might be modified by a
+ * hardware device, and the hardware device drivers bind registered
+ * buffers to particular hardware resources like ingress NotifRings.
+ * The MEM_BUFFER() idl syntax can take extra flags like ALIGN_64KB,
+ * ALIGN_SELF_SIZE, and FLAGS indicating that memory buffers must have
+ * certain alignment or that the user should be able to pass a "memory
+ * flags" word specifying attributes like nt_hint or IO cache pinning.
+ * The parser will accept multiple MEM_BUFFER() flags.
+ *
+ * Implementations must obey the following conventions when
+ * registering memory buffers via the iorpc flow. These rules are a
+ * result of the Linux driver implementation, which needs to keep
+ * track of how many times a particular page has been registered with
+ * the hardware so that it can release the page when all those
+ * registrations are cleared.
+ *
+ * - Memory registrations that refer to a resource which has already
+ * been bound must return GXIO_ERR_ALREADY_INIT. Thus, it is an
+ * error to register memory twice without resetting (i.e. closing) the
+ * resource in between. This convention keeps the Linux driver from
+ * having to track which particular devices a page is bound to.
+ *
+ * - At present, a memory registration is only cleared when the
+ * service domain is reset. In this case, the Linux driver simply
+ * closes the HV device file handle and then decrements the reference
+ * counts of all pages that were previously registered with the
+ * device.
+ *
+ * - In the future, we may add a mechanism for unregistering memory.
+ * One possible implementation would require that the user specify
+ * which buffer is currently registered. The HV would then verify
+ * that that page was actually the one currently mapped and return
+ * success or failure to Linux, which would then only decrement the
+ * page reference count if the addresses were mapped. Another scheme
+ * might allow Linux to pass a token to the HV to be returned when the
+ * resource is unmapped.
+ *
+ * @subsection iorpc_interrupt INTERRUPT
+ *
+ * The INTERRUPT .idl datatype allows the client to bind hardware
+ * interrupts to a particular combination of IPI parameters - CPU, IPI
+ * PL, and event bit number. This data is passed via a special
+ * datatype so that the Linux driver can validate the CPU and PL and
+ * the HV generic iorpc code can translate client CPUs to real CPUs.
+ *
+ * @subsection iorpc_pollfd_setup POLLFD_SETUP
+ *
+ * The POLLFD_SETUP .idl datatype allows the client to set up hardware
+ * interrupt bindings which are received by Linux but which are made
+ * visible to user processes as state transitions on a file descriptor;
+ * this allows user processes to use Linux primitives, such as poll(), to
+ * await particular hardware events. This data is passed via a special
+ * datatype so that the Linux driver may recognize the pollable file
+ * descriptor and translate it to a set of interrupt target information,
+ * and so that the HV generic iorpc code can translate client CPUs to real
+ * CPUs.
+ *
+ * @subsection iorpc_pollfd POLLFD
+ *
+ * The POLLFD .idl datatype allows manipulation of hardware interrupt
+ * bindings set up via the POLLFD_SETUP datatype; common operations are
+ * resetting the state of the requested interrupt events, and unbinding any
+ * bound interrupts. This data is passed via a special datatype so that
+ * the Linux driver may recognize the pollable file descriptor and
+ * translate it to an interrupt identifier previously supplied by the
+ * hypervisor as the result of an earlier pollfd_setup operation.
+ *
+ * @subsection iorpc_blob BLOB
+ *
+ * The BLOB .idl datatype allows the client to write an arbitrary
+ * length string of bytes up to the hypervisor driver. This can be
+ * useful for passing up large, arbitrarily structured data like
+ * classifier programs. The iorpc stack takes care of validating the
+ * buffer VA and CPA as the data passes up to the hypervisor. Unlike
+ * MEM_BUFFER(), the buffer is not registered - Linux does not bump
+ * page refcounts and the HV driver should not reuse the buffer once
+ * the system call is complete.
+ *
+ * @section iorpc_translation Translating User Space Calls
+ *
+ * The ::iorpc_offset structure describes the formatting of the offset
+ * that is passed to pread() or pwrite() as part of the generated RPC code.
+ * When the user calls up to Linux, the rpc code fills in all the fields of
+ * the offset, including a 16-bit opcode, a 16 bit format indicator, and 32
+ * bits of user-specified "sub-offset". The opcode indicates which syscall
+ * is being requested. The format indicates whether there is a "prefix
+ * struct" at the start of the memory buffer passed to pwrite(), and if so
+ * what data is in that prefix struct. These prefix structs are used to
+ * implement special datatypes like MEM_BUFFER() and INTERRUPT - we arrange
+ * to put data that needs translation and permission checks at the start of
+ * the buffer so that the Linux driver and generic portions of the HV iorpc
+ * code can easily access the data. The 32 bits of user-specified
+ * "sub-offset" are most useful for pread() calls where the user needs to
+ * also pass in a few bits indicating which register to read, etc.
+ *
+ * The Linux iorpc driver watches for system calls that contain prefix
+ * structs so that it can translate parameters and bump reference
+ * counts as appropriate. It does not (currently) have any knowledge
+ * of the per-device opcodes - it doesn't care what operation you're
+ * doing to mPIPE, so long as it can do all the generic book-keeping.
+ * The hv/iorpc.h header file defines all of the generic encoding bits
+ * needed to translate iorpc calls without knowing which particular
+ * opcode is being issued.
+ *
+ * @section iorpc_globals Global iorpc Calls
+ *
+ * Implementing mmap() required adding some special iorpc syscalls
+ * that are only called by the Linux driver, never by userspace.
+ * These include get_mmio_base() and check_mmio_offset(). These
+ * routines are described in globals.idl and must be included in every
+ * iorpc driver. By providing these routines in every driver, Linux's
+ * mmap implementation can easily get the PTE bits it needs and
+ * validate the PA offset without needing to know the per-device
+ * opcodes to perform those tasks.
+ *
+ * @section iorpc_kernel Supporting gxio APIs in the Kernel
+ *
+ * The iorpc code generator also supports generation of kernel code
+ * implementing the gxio APIs. This capability is currently used by
+ * the mPIPE network driver, and will likely be used by the TRIO root
+ * complex and endpoint drivers and perhaps an in-kernel crypto
+ * driver. Each driver that wants to instantiate iorpc calls in the
+ * kernel needs to generate a kernel version of the generate rpc code
+ * and (probably) copy any related gxio source files into the kernel.
+ * The mPIPE driver provides a good example of this pattern.
+ */
+
+#ifdef __KERNEL__
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#if defined(__HV__)
+#include <hv/hypervisor.h>
+#elif defined(__KERNEL__)
+#include "hypervisor.h"
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+
+/** Code indicating translation services required within the RPC path.
+ * These indicate whether there is a translatable struct at the start
+ * of the RPC buffer and what information that struct contains.
+ */
+enum iorpc_format_e
+{
+ /** No translation required, no prefix struct. */
+ IORPC_FORMAT_NONE,
+
+ /** No translation required, no prefix struct, no access to this
+ * operation from user space. */
+ IORPC_FORMAT_NONE_NOUSER,
+
+ /** Prefix struct contains user VA and size. */
+ IORPC_FORMAT_USER_MEM,
+
+ /** Prefix struct contains CPA, size, and homing bits. */
+ IORPC_FORMAT_KERNEL_MEM,
+
+ /** Prefix struct contains interrupt. */
+ IORPC_FORMAT_KERNEL_INTERRUPT,
+
+ /** Prefix struct contains user-level interrupt. */
+ IORPC_FORMAT_USER_INTERRUPT,
+
+ /** Prefix struct contains pollfd_setup (interrupt information). */
+ IORPC_FORMAT_KERNEL_POLLFD_SETUP,
+
+ /** Prefix struct contains user-level pollfd_setup (file descriptor). */
+ IORPC_FORMAT_USER_POLLFD_SETUP,
+
+ /** Prefix struct contains pollfd (interrupt cookie). */
+ IORPC_FORMAT_KERNEL_POLLFD,
+
+ /** Prefix struct contains user-level pollfd (file descriptor). */
+ IORPC_FORMAT_USER_POLLFD,
+};
+
+
+/** Generate an opcode given format and code. */
+#define IORPC_OPCODE(FORMAT, CODE) (((FORMAT) << 16) | (CODE))
+
+/** The offset passed through the read() and write() system calls
+ combines an opcode with 32 bits of user-specified offset. */
+union iorpc_offset
+{
+#ifndef __BIG_ENDIAN__
+ uint64_t offset; /**< All bits. */
+
+ struct
+ {
+ uint16_t code; /**< RPC code. */
+ uint16_t format; /**< iorpc_format_e */
+ uint32_t sub_offset; /**< caller-specified offset. */
+ };
+
+ uint32_t opcode; /**< Opcode combines code & format. */
+#else
+ uint64_t offset; /**< All bits. */
+
+ struct
+ {
+ uint32_t sub_offset; /**< caller-specified offset. */
+ uint16_t format; /**< iorpc_format_e */
+ uint16_t code; /**< RPC code. */
+ };
+
+ struct
+ {
+ uint32_t padding;
+ uint32_t opcode; /**< Opcode combines code & format. */
+ };
+#endif
+};
+
+
+/** Homing and cache hinting bits that can be used by IO devices. */
+struct iorpc_mem_attr
+{
+ unsigned int lotar_x:4; /**< lotar X bits (or Gx page_mask). */
+ unsigned int lotar_y:4; /**< lotar Y bits (or Gx page_offset). */
+ unsigned int hfh:1; /**< Uses hash-for-home. */
+ unsigned int nt_hint:1; /**< Non-temporal hint. */
+ unsigned int io_pin:1; /**< Only fill 'IO' cache ways. */
+};
+
+/** Set the nt_hint bit. */
+#define IORPC_MEM_BUFFER_FLAG_NT_HINT (1 << 0)
+
+/** Set the IO pin bit. */
+#define IORPC_MEM_BUFFER_FLAG_IO_PIN (1 << 1)
+
+
+/** A structure used to describe memory registration. Different
+ protection levels describe memory differently, so this union
+ contains all the different possible descriptions. As a request
+ moves up the call chain, each layer translates from one
+ description format to the next. In particular, the Linux iorpc
+ driver translates user VAs into CPAs and homing parameters. */
+union iorpc_mem_buffer
+{
+ struct
+ {
+ uint64_t va; /**< User virtual address. */
+ uint64_t size; /**< Buffer size. */
+ unsigned int flags; /**< nt_hint, IO pin. */
+ }
+ user; /**< Buffer as described by user apps. */
+
+ struct
+ {
+ unsigned long long cpa; /**< Client physical address. */
+#if defined(__KERNEL__) || defined(__HV__)
+ size_t size; /**< Buffer size. */
+ HV_PTE pte; /**< PTE describing memory homing. */
+#else
+ uint64_t size;
+ uint64_t pte;
+#endif
+ unsigned int flags; /**< nt_hint, IO pin. */
+ }
+ kernel; /**< Buffer as described by kernel. */
+
+ struct
+ {
+ unsigned long long pa; /**< Physical address. */
+ size_t size; /**< Buffer size. */
+ struct iorpc_mem_attr attr; /**< Homing and locality hint bits. */
+ }
+ hv; /**< Buffer parameters for HV driver. */
+};
+
+
+/** A structure used to describe interrupts. The format differs slightly
+ * for user and kernel interrupts. As with the mem_buffer_t, translation
+ * between the formats is done at each level. */
+union iorpc_interrupt
+{
+ struct
+ {
+ int cpu; /**< CPU. */
+ int event; /**< evt_num */
+ }
+ user; /**< Interrupt as described by user applications. */
+
+ struct
+ {
+ int x; /**< X coord. */
+ int y; /**< Y coord. */
+ int ipi; /**< int_num */
+ int event; /**< evt_num */
+ }
+ kernel; /**< Interrupt as described by the kernel. */
+
+};
+
+
+/** A structure used to describe interrupts used with poll(). The format
+ * differs significantly for requests from user to kernel, and kernel to
+ * hypervisor. As with the mem_buffer_t, translation between the formats
+ * is done at each level. */
+union iorpc_pollfd_setup
+{
+ struct
+ {
+ int fd; /**< Pollable file descriptor. */
+ }
+ user; /**< pollfd_setup as described by user applications. */
+
+ struct
+ {
+ int x; /**< X coord. */
+ int y; /**< Y coord. */
+ int ipi; /**< int_num */
+ int event; /**< evt_num */
+ }
+ kernel; /**< pollfd_setup as described by the kernel. */
+
+};
+
+
+/** A structure used to describe previously set up interrupts used with
+ * poll(). The format differs significantly for requests from user to
+ * kernel, and kernel to hypervisor. As with the mem_buffer_t, translation
+ * between the formats is done at each level. */
+union iorpc_pollfd
+{
+ struct
+ {
+ int fd; /**< Pollable file descriptor. */
+ }
+ user; /**< pollfd as described by user applications. */
+
+ struct
+ {
+ int cookie; /**< hv cookie returned by the pollfd_setup operation. */
+ }
+ kernel; /**< pollfd as described by the kernel. */
+
+};
+
+
+/** The various iorpc devices use error codes from -1100 to -1299.
+ *
+ * This range is distinct from netio (-700 to -799), the hypervisor
+ * (-800 to -899), tilepci (-900 to -999), ilib (-1000 to -1099),
+ * gxcr (-1300 to -1399) and gxpci (-1400 to -1499).
+ */
+enum gxio_err_e {
+
+ /** Largest iorpc error number. */
+ GXIO_ERR_MAX = -1101,
+
+
+ /********************************************************/
+ /* Generic Error Codes */
+ /********************************************************/
+
+ /** Bad RPC opcode - possible version incompatibility. */
+ GXIO_ERR_OPCODE = -1101,
+
+ /** Invalid parameter. */
+ GXIO_ERR_INVAL = -1102,
+
+ /** Memory buffer did not meet alignment requirements. */
+ GXIO_ERR_ALIGNMENT = -1103,
+
+ /** Memory buffers must be coherent and cacheable. */
+ GXIO_ERR_COHERENCE = -1104,
+
+ /** Resource already initialized. */
+ GXIO_ERR_ALREADY_INIT = -1105,
+
+ /** No service domains available. */
+ GXIO_ERR_NO_SVC_DOM = -1106,
+
+ /** Illegal service domain number. */
+ GXIO_ERR_INVAL_SVC_DOM = -1107,
+
+ /** Illegal MMIO address. */
+ GXIO_ERR_MMIO_ADDRESS = -1108,
+
+ /** Illegal interrupt binding. */
+ GXIO_ERR_INTERRUPT = -1109,
+
+ /** Unreasonable client memory. */
+ GXIO_ERR_CLIENT_MEMORY = -1110,
+
+ /** No more IOTLB entries. */
+ GXIO_ERR_IOTLB_ENTRY = -1111,
+
+ /** Invalid memory size. */
+ GXIO_ERR_INVAL_MEMORY_SIZE = -1112,
+
+ /** Unsupported operation. */
+ GXIO_ERR_UNSUPPORTED_OP = -1113,
+
+ /** Insufficient DMA credits. */
+ GXIO_ERR_DMA_CREDITS = -1114,
+
+ /** Operation timed out. */
+ GXIO_ERR_TIMEOUT = -1115,
+
+ /** No such device or object. */
+ GXIO_ERR_NO_DEVICE = -1116,
+
+ /** Device or resource busy. */
+ GXIO_ERR_BUSY = -1117,
+
+ /** I/O error. */
+ GXIO_ERR_IO = -1118,
+
+ /** Permissions error. */
+ GXIO_ERR_PERM = -1119,
+
+
+
+ /********************************************************/
+ /* Test Device Error Codes */
+ /********************************************************/
+
+ /** Illegal register number. */
+ GXIO_TEST_ERR_REG_NUMBER = -1120,
+
+ /** Illegal buffer slot. */
+ GXIO_TEST_ERR_BUFFER_SLOT = -1121,
+
+
+ /********************************************************/
+ /* MPIPE Error Codes */
+ /********************************************************/
+
+
+ /** Invalid buffer size. */
+ GXIO_MPIPE_ERR_INVAL_BUFFER_SIZE = -1131,
+
+ /** Cannot allocate buffer stack. */
+ GXIO_MPIPE_ERR_NO_BUFFER_STACK = -1140,
+
+ /** Invalid buffer stack number. */
+ GXIO_MPIPE_ERR_BAD_BUFFER_STACK = -1141,
+
+ /** Cannot allocate NotifRing. */
+ GXIO_MPIPE_ERR_NO_NOTIF_RING = -1142,
+
+ /** Invalid NotifRing number. */
+ GXIO_MPIPE_ERR_BAD_NOTIF_RING = -1143,
+
+ /** Cannot allocate NotifGroup. */
+ GXIO_MPIPE_ERR_NO_NOTIF_GROUP = -1144,
+
+ /** Invalid NotifGroup number. */
+ GXIO_MPIPE_ERR_BAD_NOTIF_GROUP = -1145,
+
+ /** Cannot allocate bucket. */
+ GXIO_MPIPE_ERR_NO_BUCKET = -1146,
+
+ /** Invalid bucket number. */
+ GXIO_MPIPE_ERR_BAD_BUCKET = -1147,
+
+ /** Cannot allocate eDMA ring. */
+ GXIO_MPIPE_ERR_NO_EDMA_RING = -1148,
+
+ /** Invalid eDMA ring number. */
+ GXIO_MPIPE_ERR_BAD_EDMA_RING = -1149,
+
+ /** Invalid channel number. */
+ GXIO_MPIPE_ERR_BAD_CHANNEL = -1150,
+
+ /** Bad configuration. */
+ GXIO_MPIPE_ERR_BAD_CONFIG = -1151,
+
+ /** Empty iqueue. */
+ GXIO_MPIPE_ERR_IQUEUE_EMPTY = -1152,
+
+ /** Empty rules. */
+ GXIO_MPIPE_ERR_RULES_EMPTY = -1160,
+
+ /** Full rules. */
+ GXIO_MPIPE_ERR_RULES_FULL = -1161,
+
+ /** Corrupt rules. */
+ GXIO_MPIPE_ERR_RULES_CORRUPT = -1162,
+
+ /** Invalid rules. */
+ GXIO_MPIPE_ERR_RULES_INVALID = -1163,
+
+ /** Classifier is too big. */
+ GXIO_MPIPE_ERR_CLASSIFIER_TOO_BIG = -1170,
+
+ /** Classifier is too complex. */
+ GXIO_MPIPE_ERR_CLASSIFIER_TOO_COMPLEX = -1171,
+
+ /** Classifier has bad header. */
+ GXIO_MPIPE_ERR_CLASSIFIER_BAD_HEADER = -1172,
+
+ /** Classifier has bad contents. */
+ GXIO_MPIPE_ERR_CLASSIFIER_BAD_CONTENTS = -1173,
+
+ /** Classifier encountered invalid symbol. */
+ GXIO_MPIPE_ERR_CLASSIFIER_INVAL_SYMBOL = -1174,
+
+ /** Classifier encountered invalid bounds. */
+ GXIO_MPIPE_ERR_CLASSIFIER_INVAL_BOUNDS = -1175,
+
+ /** Classifier encountered invalid relocation. */
+ GXIO_MPIPE_ERR_CLASSIFIER_INVAL_RELOCATION = -1176,
+
+ /** Classifier encountered undefined symbol. */
+ GXIO_MPIPE_ERR_CLASSIFIER_UNDEF_SYMBOL = -1177,
+
+
+ /********************************************************/
+ /* TRIO Error Codes */
+ /********************************************************/
+
+ /** Cannot allocate memory map region. */
+ GXIO_TRIO_ERR_NO_MEMORY_MAP = -1180,
+
+ /** Invalid memory map region number. */
+ GXIO_TRIO_ERR_BAD_MEMORY_MAP = -1181,
+
+ /** Cannot allocate scatter queue. */
+ GXIO_TRIO_ERR_NO_SCATTER_QUEUE = -1182,
+
+ /** Invalid scatter queue number. */
+ GXIO_TRIO_ERR_BAD_SCATTER_QUEUE = -1183,
+
+ /** Cannot allocate push DMA ring. */
+ GXIO_TRIO_ERR_NO_PUSH_DMA_RING = -1184,
+
+ /** Invalid push DMA ring index. */
+ GXIO_TRIO_ERR_BAD_PUSH_DMA_RING = -1185,
+
+ /** Cannot allocate pull DMA ring. */
+ GXIO_TRIO_ERR_NO_PULL_DMA_RING = -1186,
+
+ /** Invalid pull DMA ring index. */
+ GXIO_TRIO_ERR_BAD_PULL_DMA_RING = -1187,
+
+ /** Cannot allocate PIO region. */
+ GXIO_TRIO_ERR_NO_PIO = -1188,
+
+ /** Invalid PIO region index. */
+ GXIO_TRIO_ERR_BAD_PIO = -1189,
+
+ /** Cannot allocate ASID. */
+ GXIO_TRIO_ERR_NO_ASID = -1190,
+
+ /** Invalid ASID. */
+ GXIO_TRIO_ERR_BAD_ASID = -1191,
+
+
+ /********************************************************/
+ /* MICA Error Codes */
+ /********************************************************/
+
+ /** No such accelerator type. */
+ GXIO_MICA_ERR_BAD_ACCEL_TYPE = -1220,
+
+ /** Cannot allocate context. */
+ GXIO_MICA_ERR_NO_CONTEXT = -1221,
+
+ /** PKA command queue is full, can't add another command. */
+ GXIO_MICA_ERR_PKA_CMD_QUEUE_FULL = -1222,
+
+ /** PKA result queue is empty, can't get a result from the queue. */
+ GXIO_MICA_ERR_PKA_RESULT_QUEUE_EMPTY = -1223,
+
+ /********************************************************/
+ /* GPIO Error Codes */
+ /********************************************************/
+
+ /** Pin not available. Either the physical pin does not exist, or
+ * it is reserved by the hypervisor for system usage. */
+ GXIO_GPIO_ERR_PIN_UNAVAILABLE = -1240,
+
+ /** Pin busy. The pin exists, and is available for use via GXIO, but
+ * it has been attached by some other process or driver. */
+ GXIO_GPIO_ERR_PIN_BUSY = -1241,
+
+ /** Cannot access unattached pin. One or more of the pins being
+ * manipulated by this call are not attached to the requesting
+ * context. */
+ GXIO_GPIO_ERR_PIN_UNATTACHED = -1242,
+
+ /** Invalid I/O mode for pin. The wiring of the pin in the system
+ * is such that the I/O mode or electrical control parameters
+ * requested could cause damage. */
+ GXIO_GPIO_ERR_PIN_INVALID_MODE = -1243,
+
+ /** Smallest iorpc error number. */
+ GXIO_ERR_MIN = -1299
+};
+
+
+#endif /* !_HV_IORPC_H_ */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 5de99248d8df..5334be8e2538 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -14,4 +14,9 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
+ifdef CONFIG_TILEGX
+obj-$(CONFIG_PCI) += pci_gx.o
+else
obj-$(CONFIG_PCI) += pci.o
+endif
+obj-$(CONFIG_TILE_USB) += usb.o
diff --git a/arch/tile/kernel/backtrace.c b/arch/tile/kernel/backtrace.c
index 9092ce8aa6b4..f8b74ca83b92 100644
--- a/arch/tile/kernel/backtrace.c
+++ b/arch/tile/kernel/backtrace.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <asm/byteorder.h>
#include <asm/backtrace.h>
#include <asm/tile-desc.h>
#include <arch/abi.h>
@@ -336,8 +337,12 @@ static void find_caller_pc_and_caller_sp(CallerLocation *location,
bytes_to_prefetch / sizeof(tile_bundle_bits);
}
- /* Decode the next bundle. */
- bundle.bits = prefetched_bundles[next_bundle++];
+ /*
+ * Decode the next bundle.
+ * TILE always stores instruction bundles in little-endian
+ * mode, even when the chip is running in big-endian mode.
+ */
+ bundle.bits = le64_to_cpu(prefetched_bundles[next_bundle++]);
bundle.num_insns =
parse_insn_tile(bundle.bits, pc, bundle.insns);
num_info_ops = bt_get_info_ops(&bundle, info_operands);
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index b3ed19f8779c..b9fe80ec1089 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/tlbflush.h>
@@ -22,13 +23,18 @@
/* Generic DMA mapping functions: */
/*
- * Allocate what Linux calls "coherent" memory, which for us just
- * means uncached.
+ * Allocate what Linux calls "coherent" memory. On TILEPro this is
+ * uncached memory; on TILE-Gx it is hash-for-home memory.
*/
-void *dma_alloc_coherent(struct device *dev,
- size_t size,
- dma_addr_t *dma_handle,
- gfp_t gfp)
+#ifdef __tilepro__
+#define PAGE_HOME_DMA PAGE_HOME_UNCACHED
+#else
+#define PAGE_HOME_DMA PAGE_HOME_HASH
+#endif
+
+static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
int node = dev_to_node(dev);
@@ -39,39 +45,42 @@ void *dma_alloc_coherent(struct device *dev,
gfp |= __GFP_ZERO;
/*
- * By forcing NUMA node 0 for 32-bit masks we ensure that the
- * high 32 bits of the resulting PA will be zero. If the mask
- * size is, e.g., 24, we may still not be able to guarantee a
- * suitable memory address, in which case we will return NULL.
- * But such devices are uncommon.
+ * If the mask specifies that the memory be in the first 4 GB, then
+ * we force the allocation to come from the DMA zone. We also
+ * force the node to 0 since that's the only node where the DMA
+ * zone isn't empty. If the mask size is smaller than 32 bits, we
+ * may still not be able to guarantee a suitable memory address, in
+ * which case we will return NULL. But such devices are uncommon.
*/
- if (dma_mask <= DMA_BIT_MASK(32))
+ if (dma_mask <= DMA_BIT_MASK(32)) {
+ gfp |= GFP_DMA;
node = 0;
+ }
- pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED);
+ pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
if (pg == NULL)
return NULL;
addr = page_to_phys(pg);
if (addr + size > dma_mask) {
- homecache_free_pages(addr, order);
+ __homecache_free_pages(pg, order);
return NULL;
}
*dma_handle = addr;
+
return page_address(pg);
}
-EXPORT_SYMBOL(dma_alloc_coherent);
/*
- * Free memory that was allocated with dma_alloc_coherent.
+ * Free memory that was allocated with tile_dma_alloc_coherent.
*/
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle)
+static void tile_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_free_coherent);
/*
* The map routines "map" the specified address range for DMA
@@ -87,52 +96,285 @@ EXPORT_SYMBOL(dma_free_coherent);
* can count on nothing having been touched.
*/
-/* Flush a PA range from cache page by page. */
-static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
+/* Set up a single page for DMA access. */
+static void __dma_prep_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+ /*
+ * Flush the page from cache if necessary.
+ * On tilegx, data is delivered to hash-for-home L3; on tilepro,
+ * data is delivered direct to memory.
+ *
+ * NOTE: If we were just doing DMA_TO_DEVICE we could optimize
+ * this to be a "flush" not a "finv" and keep some of the
+ * state in cache across the DMA operation, but it doesn't seem
+ * worth creating the necessary flush_buffer_xxx() infrastructure.
+ */
+ int home = page_home(page);
+ switch (home) {
+ case PAGE_HOME_HASH:
+#ifdef __tilegx__
+ return;
+#endif
+ break;
+ case PAGE_HOME_UNCACHED:
+#ifdef __tilepro__
+ return;
+#endif
+ break;
+ case PAGE_HOME_IMMUTABLE:
+ /* Should be going to the device only. */
+ BUG_ON(direction == DMA_FROM_DEVICE ||
+ direction == DMA_BIDIRECTIONAL);
+ return;
+ case PAGE_HOME_INCOHERENT:
+ /* Incoherent anyway, so no need to work hard here. */
+ return;
+ default:
+ BUG_ON(home < 0 || home >= NR_CPUS);
+ break;
+ }
+ homecache_finv_page(page);
+
+#ifdef DEBUG_ALIGNMENT
+ /* Warn if the region isn't cacheline aligned. */
+ if (offset & (L2_CACHE_BYTES - 1) || (size & (L2_CACHE_BYTES - 1)))
+ pr_warn("Unaligned DMA to non-hfh memory: PA %#llx/%#lx\n",
+ PFN_PHYS(page_to_pfn(page)) + offset, size);
+#endif
+}
+
+/* Make the page ready to be read by the core. */
+static void __dma_complete_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction)
+{
+#ifdef __tilegx__
+ switch (page_home(page)) {
+ case PAGE_HOME_HASH:
+ /* I/O device delivered data the way the cpu wanted it. */
+ break;
+ case PAGE_HOME_INCOHERENT:
+ /* Incoherent anyway, so no need to work hard here. */
+ break;
+ case PAGE_HOME_IMMUTABLE:
+ /* Extra read-only copies are not a problem. */
+ break;
+ default:
+ /* Flush the bogus hash-for-home I/O entries to memory. */
+ homecache_finv_map_page(page, PAGE_HOME_HASH);
+ break;
+ }
+#endif
+}
+
+static void __dma_prep_pa_range(dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
{
struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
- size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
+ unsigned long offset = dma_addr & (PAGE_SIZE - 1);
+ size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
+
+ while (size != 0) {
+ __dma_prep_page(page, offset, bytes, direction);
+ size -= bytes;
+ ++page;
+ offset = 0;
+ bytes = min((size_t)PAGE_SIZE, size);
+ }
+}
- while ((ssize_t)size > 0) {
- /* Flush the page. */
- homecache_flush_cache(page++, 0);
+static void __dma_complete_pa_range(dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
+ unsigned long offset = dma_addr & (PAGE_SIZE - 1);
+ size_t bytes = min(size, (size_t)(PAGE_SIZE - offset));
+
+ while (size != 0) {
+ __dma_complete_page(page, offset, bytes, direction);
+ size -= bytes;
+ ++page;
+ offset = 0;
+ bytes = min((size_t)PAGE_SIZE, size);
+ }
+}
+
+static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+
+ WARN_ON(nents == 0 || sglist->length == 0);
- /* Figure out if we need to continue on the next page. */
- size -= bytesleft;
- bytesleft = PAGE_SIZE;
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ __dma_prep_pa_range(sg->dma_address, sg->length, direction);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
}
+
+ return nents;
}
-/*
- * dma_map_single can be passed any memory address, and there appear
- * to be no alignment constraints.
- *
- * There is a chance that the start of the buffer will share a cache
- * line with some other data that has been touched in the meantime.
- */
-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
- enum dma_data_direction direction)
+static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ __dma_complete_pa_range(sg->dma_address, sg->length,
+ direction);
+ }
+}
+
+static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
- dma_addr_t dma_addr = __pa(ptr);
+ BUG_ON(!valid_dma_direction(direction));
+
+ BUG_ON(offset + size > PAGE_SIZE);
+ __dma_prep_page(page, offset, size, direction);
+ return page_to_pa(page) + offset;
+}
+
+static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
BUG_ON(!valid_dma_direction(direction));
- WARN_ON(size == 0);
- __dma_map_pa_range(dma_addr, size);
+ __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
+ dma_address & PAGE_OFFSET, size, direction);
+}
- return dma_addr;
+static void tile_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(!valid_dma_direction(direction));
+
+ __dma_complete_pa_range(dma_handle, size, direction);
+}
+
+static void tile_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ __dma_prep_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_map_single);
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction direction)
+static void tile_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
{
+ struct scatterlist *sg;
+ int i;
+
+ BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nelems == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ dma_sync_single_for_cpu(dev, sg->dma_address,
+ sg_dma_len(sg), direction);
+ }
+}
+
+static void tile_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist, int nelems,
+ enum dma_data_direction direction)
+{
+ struct scatterlist *sg;
+ int i;
+
BUG_ON(!valid_dma_direction(direction));
+ WARN_ON(nelems == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ dma_sync_single_for_device(dev, sg->dma_address,
+ sg_dma_len(sg), direction);
+ }
+}
+
+static inline int
+tile_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int
+tile_dma_supported(struct device *dev, u64 mask)
+{
+ return 1;
+}
+
+static struct dma_map_ops tile_default_dma_map_ops = {
+ .alloc = tile_dma_alloc_coherent,
+ .free = tile_dma_free_coherent,
+ .map_page = tile_dma_map_page,
+ .unmap_page = tile_dma_unmap_page,
+ .map_sg = tile_dma_map_sg,
+ .unmap_sg = tile_dma_unmap_sg,
+ .sync_single_for_cpu = tile_dma_sync_single_for_cpu,
+ .sync_single_for_device = tile_dma_sync_single_for_device,
+ .sync_sg_for_cpu = tile_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = tile_dma_sync_sg_for_device,
+ .mapping_error = tile_dma_mapping_error,
+ .dma_supported = tile_dma_supported
+};
+
+struct dma_map_ops *tile_dma_map_ops = &tile_default_dma_map_ops;
+EXPORT_SYMBOL(tile_dma_map_ops);
+
+/* Generic PCI DMA mapping functions */
+
+static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{
+ int node = dev_to_node(dev);
+ int order = get_order(size);
+ struct page *pg;
+ dma_addr_t addr;
+
+ gfp |= __GFP_ZERO;
+
+ pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_DMA);
+ if (pg == NULL)
+ return NULL;
+
+ addr = page_to_phys(pg);
+
+ *dma_handle = phys_to_dma(dev, addr);
+
+ return page_address(pg);
+}
+
+/*
+ * Free memory that was allocated with tile_pci_dma_alloc_coherent.
+ */
+static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ homecache_free_pages((unsigned long)vaddr, get_order(size));
}
-EXPORT_SYMBOL(dma_unmap_single);
-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction)
+static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
struct scatterlist *sg;
int i;
@@ -143,73 +385,103 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
for_each_sg(sglist, sg, nents, i) {
sg->dma_address = sg_phys(sg);
- __dma_map_pa_range(sg->dma_address, sg->length);
+ __dma_prep_pa_range(sg->dma_address, sg->length, direction);
+
+ sg->dma_address = phys_to_dma(dev, sg->dma_address);
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->dma_length = sg->length;
+#endif
}
return nents;
}
-EXPORT_SYMBOL(dma_map_sg);
-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
- enum dma_data_direction direction)
+static void tile_pci_dma_unmap_sg(struct device *dev,
+ struct scatterlist *sglist, int nents,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
+ struct scatterlist *sg;
+ int i;
+
BUG_ON(!valid_dma_direction(direction));
+ for_each_sg(sglist, sg, nents, i) {
+ sg->dma_address = sg_phys(sg);
+ __dma_complete_pa_range(sg->dma_address, sg->length,
+ direction);
+ }
}
-EXPORT_SYMBOL(dma_unmap_sg);
-dma_addr_t dma_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(!valid_dma_direction(direction));
BUG_ON(offset + size > PAGE_SIZE);
- homecache_flush_cache(page, 0);
+ __dma_prep_page(page, offset, size, direction);
- return page_to_pa(page) + offset;
+ return phys_to_dma(dev, page_to_pa(page) + offset);
}
-EXPORT_SYMBOL(dma_map_page);
-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
- enum dma_data_direction direction)
+static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+ size_t size,
+ enum dma_data_direction direction,
+ struct dma_attrs *attrs)
{
BUG_ON(!valid_dma_direction(direction));
+
+ dma_address = dma_to_phys(dev, dma_address);
+
+ __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
+ dma_address & PAGE_OFFSET, size, direction);
}
-EXPORT_SYMBOL(dma_unmap_page);
-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction direction)
{
BUG_ON(!valid_dma_direction(direction));
+
+ dma_handle = dma_to_phys(dev, dma_handle);
+
+ __dma_complete_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_cpu);
-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
- size_t size, enum dma_data_direction direction)
+static void tile_pci_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle,
+ size_t size,
+ enum dma_data_direction
+ direction)
{
- unsigned long start = PFN_DOWN(dma_handle);
- unsigned long end = PFN_DOWN(dma_handle + size - 1);
- unsigned long i;
+ dma_handle = dma_to_phys(dev, dma_handle);
- BUG_ON(!valid_dma_direction(direction));
- for (i = start; i <= end; ++i)
- homecache_flush_cache(pfn_to_page(i), 0);
+ __dma_prep_pa_range(dma_handle, size, direction);
}
-EXPORT_SYMBOL(dma_sync_single_for_device);
-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
- enum dma_data_direction direction)
+static void tile_pci_dma_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems,
+ enum dma_data_direction direction)
{
+ struct scatterlist *sg;
+ int i;
+
BUG_ON(!valid_dma_direction(direction));
- WARN_ON(nelems == 0 || sg[0].length == 0);
+ WARN_ON(nelems == 0 || sglist->length == 0);
+
+ for_each_sg(sglist, sg, nelems, i) {
+ dma_sync_single_for_cpu(dev, sg->dma_address,
+ sg_dma_len(sg), direction);
+ }
}
-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
-/*
- * Flush and invalidate cache for scatterlist.
- */
-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
- int nelems, enum dma_data_direction direction)
+static void tile_pci_dma_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sglist,
+ int nelems,
+ enum dma_data_direction direction)
{
struct scatterlist *sg;
int i;
@@ -222,31 +494,93 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
sg_dma_len(sg), direction);
}
}
-EXPORT_SYMBOL(dma_sync_sg_for_device);
-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline int
+tile_pci_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
- dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
+ return 0;
}
-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
-void dma_sync_single_range_for_device(struct device *dev,
- dma_addr_t dma_handle,
- unsigned long offset, size_t size,
- enum dma_data_direction direction)
+static inline int
+tile_pci_dma_supported(struct device *dev, u64 mask)
{
- dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
+ return 1;
}
-EXPORT_SYMBOL(dma_sync_single_range_for_device);
-/*
- * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
- * need to do any flushing here.
- */
-void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
- enum dma_data_direction direction)
+static struct dma_map_ops tile_pci_default_dma_map_ops = {
+ .alloc = tile_pci_dma_alloc_coherent,
+ .free = tile_pci_dma_free_coherent,
+ .map_page = tile_pci_dma_map_page,
+ .unmap_page = tile_pci_dma_unmap_page,
+ .map_sg = tile_pci_dma_map_sg,
+ .unmap_sg = tile_pci_dma_unmap_sg,
+ .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
+ .sync_single_for_device = tile_pci_dma_sync_single_for_device,
+ .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
+ .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
+ .mapping_error = tile_pci_dma_mapping_error,
+ .dma_supported = tile_pci_dma_supported
+};
+
+struct dma_map_ops *gx_pci_dma_map_ops = &tile_pci_default_dma_map_ops;
+EXPORT_SYMBOL(gx_pci_dma_map_ops);
+
+/* PCI DMA mapping functions for legacy PCI devices */
+
+#ifdef CONFIG_SWIOTLB
+static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
{
+ gfp |= GFP_DMA;
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+}
+
+static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs)
+{
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+}
+
+static struct dma_map_ops pci_swiotlb_dma_ops = {
+ .alloc = tile_swiotlb_alloc_coherent,
+ .free = tile_swiotlb_free_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
+struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
+#else
+struct dma_map_ops *gx_legacy_pci_dma_map_ops;
+#endif
+EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
+
+#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
+int dma_set_coherent_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *dma_ops = get_dma_ops(dev);
+
+ /* Handle legacy PCI devices with limited memory addressability. */
+ if (((dma_ops == gx_pci_dma_map_ops) ||
+ (dma_ops == gx_legacy_pci_dma_map_ops)) &&
+ (mask <= DMA_BIT_MASK(32))) {
+ if (mask > dev->archdata.max_direct_dma_addr)
+ mask = dev->archdata.max_direct_dma_addr;
+ }
+
+ if (!dma_supported(dev, mask))
+ return -EIO;
+ dev->coherent_dma_mask = mask;
+ return 0;
}
-EXPORT_SYMBOL(dma_cache_sync);
+EXPORT_SYMBOL(dma_set_coherent_mask);
+#endif
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index b56d12bf5900..0fdd99d0d8b7 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -310,6 +310,7 @@ int __init pcibios_init(void)
if (pci_scan_flags[i] == 0 && controllers[i].ops != NULL) {
struct pci_controller *controller = &controllers[i];
struct pci_bus *bus;
+ LIST_HEAD(resources);
if (tile_init_irqs(i, controller)) {
pr_err("PCI: Could not initialize IRQs\n");
@@ -327,9 +328,11 @@ int __init pcibios_init(void)
* This is inlined in linux/pci.h and calls into
* pci_scan_bus_parented() in probe.c.
*/
- bus = pci_scan_bus(0, controller->ops, controller);
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources);
controller->root_bus = bus;
- controller->last_busno = bus->subordinate;
+ controller->last_busno = bus->busn_res.end;
}
}
@@ -366,7 +369,7 @@ int __init pcibios_init(void)
*/
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
(PCI_SLOT(dev->devfn) == 0)) {
- next_bus = dev->subordinate;
+ next_bus = dev->busn_res.end;
controllers[i].mem_resources[0] =
*next_bus->resource[0];
controllers[i].mem_resources[1] =
@@ -401,16 +404,6 @@ void pcibios_set_master(struct pci_dev *dev)
}
/*
- * This can be called from the generic PCI layer, but doesn't need to
- * do anything.
- */
-char __devinit *pcibios_setup(char *str)
-{
- /* Nothing needs to be done. */
- return str;
-}
-
-/*
* This is called from the generic Linux layer.
*/
void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
new file mode 100644
index 000000000000..fa75264a82ae
--- /dev/null
+++ b/arch/tile/kernel/pci_gx.c
@@ -0,0 +1,1543 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/capability.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/msi.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/byteorder.h>
+
+#include <gxio/iorpc_globals.h>
+#include <gxio/kiorpc.h>
+#include <gxio/trio.h>
+#include <gxio/iorpc_trio.h>
+#include <hv/drv_trio_intf.h>
+
+#include <arch/sim.h>
+
+/*
+ * This file containes the routines to search for PCI buses,
+ * enumerate the buses, and configure any attached devices.
+ */
+
+#define DEBUG_PCI_CFG 0
+
+#if DEBUG_PCI_CFG
+#define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
+ pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
+ size, val, bus, dev, func, offset & 0xFFF);
+#define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
+ pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
+ size, val, bus, dev, func, offset & 0xFFF);
+#else
+#define TRACE_CFG_WR(...)
+#define TRACE_CFG_RD(...)
+#endif
+
+static int __devinitdata pci_probe = 1;
+
+/* Information on the PCIe RC ports configuration. */
+static int __devinitdata pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
+
+/*
+ * On some platforms with one or more Gx endpoint ports, we need to
+ * delay the PCIe RC port probe for a few seconds to work around
+ * a HW PCIe link-training bug. The exact delay is specified with
+ * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
+ * where T is the TRIO instance number, P is the port number and S is
+ * the delay in seconds. If the delay is not provided, the value
+ * will be DEFAULT_RC_DELAY.
+ */
+static int __devinitdata rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
+
+/* Default number of seconds that the PCIe RC port probe can be delayed. */
+#define DEFAULT_RC_DELAY 10
+
+/* Max number of seconds that the PCIe RC port probe can be delayed. */
+#define MAX_RC_DELAY 20
+
+/* Array of the PCIe ports configuration info obtained from the BIB. */
+struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
+
+/* All drivers share the TRIO contexts defined here. */
+gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
+
+/* Pointer to an array of PCIe RC controllers. */
+struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
+int num_rc_controllers;
+static int num_ep_controllers;
+
+static struct pci_ops tile_cfg_ops;
+
+/* Mask of CPUs that should receive PCIe interrupts. */
+static struct cpumask intr_cpus_map;
+
+/*
+ * We don't need to worry about the alignment of resources.
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return res->start;
+}
+EXPORT_SYMBOL(pcibios_align_resource);
+
+
+/*
+ * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
+ * For now, we simply send interrupts to non-dataplane CPUs.
+ * We may implement methods to allow user to specify the target CPUs,
+ * e.g. via boot arguments.
+ */
+static int tile_irq_cpu(int irq)
+{
+ unsigned int count;
+ int i = 0;
+ int cpu;
+
+ count = cpumask_weight(&intr_cpus_map);
+ if (unlikely(count == 0)) {
+ pr_warning("intr_cpus_map empty, interrupts will be"
+ " delievered to dataplane tiles\n");
+ return irq % (smp_height * smp_width);
+ }
+
+ count = irq % count;
+ for_each_cpu(cpu, &intr_cpus_map) {
+ if (i++ == count)
+ break;
+ }
+ return cpu;
+}
+
+/*
+ * Open a file descriptor to the TRIO shim.
+ */
+static int __devinit tile_pcie_open(int trio_index)
+{
+ gxio_trio_context_t *context = &trio_contexts[trio_index];
+ int ret;
+
+ /*
+ * This opens a file descriptor to the TRIO shim.
+ */
+ ret = gxio_trio_init(context, trio_index);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Allocate an ASID for the kernel.
+ */
+ ret = gxio_trio_alloc_asids(context, 1, 0, 0);
+ if (ret < 0) {
+ pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
+ trio_index);
+ goto asid_alloc_failure;
+ }
+
+ context->asid = ret;
+
+#ifdef USE_SHARED_PCIE_CONFIG_REGION
+ /*
+ * Alloc a PIO region for config access, shared by all MACs per TRIO.
+ * This shouldn't fail since the kernel is supposed to the first
+ * client of the TRIO's PIO regions.
+ */
+ ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
+ if (ret < 0) {
+ pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
+ trio_index);
+ goto pio_alloc_failure;
+ }
+
+ context->pio_cfg_index = ret;
+
+ /*
+ * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
+ * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
+ */
+ ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
+ 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
+ if (ret < 0) {
+ pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
+ trio_index);
+ goto pio_alloc_failure;
+ }
+#endif
+
+ return ret;
+
+asid_alloc_failure:
+#ifdef USE_SHARED_PCIE_CONFIG_REGION
+pio_alloc_failure:
+#endif
+ hv_dev_close(context->fd);
+
+ return ret;
+}
+
+static void
+tilegx_legacy_irq_ack(struct irq_data *d)
+{
+ __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
+}
+
+static void
+tilegx_legacy_irq_mask(struct irq_data *d)
+{
+ __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
+}
+
+static void
+tilegx_legacy_irq_unmask(struct irq_data *d)
+{
+ __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
+}
+
+static struct irq_chip tilegx_legacy_irq_chip = {
+ .name = "tilegx_legacy_irq",
+ .irq_ack = tilegx_legacy_irq_ack,
+ .irq_mask = tilegx_legacy_irq_mask,
+ .irq_unmask = tilegx_legacy_irq_unmask,
+
+ /* TBD: support set_affinity. */
+};
+
+/*
+ * This is a wrapper function of the kernel level-trigger interrupt
+ * handler handle_level_irq() for PCI legacy interrupts. The TRIO
+ * is configured such that only INTx Assert interrupts are proxied
+ * to Linux which just calls handle_level_irq() after clearing the
+ * MAC INTx Assert status bit associated with this interrupt.
+ */
+static void
+trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct pci_controller *controller = irq_desc_get_handler_data(desc);
+ gxio_trio_context_t *trio_context = controller->trio;
+ uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
+ int mac = controller->mac;
+ unsigned int reg_offset;
+ uint64_t level_mask;
+
+ handle_level_irq(irq, desc);
+
+ /*
+ * Clear the INTx Level status, otherwise future interrupts are
+ * not sent.
+ */
+ reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
+
+ __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
+}
+
+/*
+ * Create kernel irqs and set up the handlers for the legacy interrupts.
+ * Also some minimum initialization for the MSI support.
+ */
+static int __devinit tile_init_irqs(struct pci_controller *controller)
+{
+ int i;
+ int j;
+ int irq;
+ int result;
+
+ cpumask_copy(&intr_cpus_map, cpu_online_mask);
+
+
+ for (i = 0; i < 4; i++) {
+ gxio_trio_context_t *context = controller->trio;
+ int cpu;
+
+ /* Ask the kernel to allocate an IRQ. */
+ irq = create_irq();
+ if (irq < 0) {
+ pr_err("PCI: no free irq vectors, failed for %d\n", i);
+
+ goto free_irqs;
+ }
+ controller->irq_intx_table[i] = irq;
+
+ /* Distribute the 4 IRQs to different tiles. */
+ cpu = tile_irq_cpu(irq);
+
+ /* Configure the TRIO intr binding for this IRQ. */
+ result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
+ cpu_y(cpu), KERNEL_PL,
+ irq, controller->mac, i);
+ if (result < 0) {
+ pr_err("PCI: MAC intx config failed for %d\n", i);
+
+ goto free_irqs;
+ }
+
+ /*
+ * Register the IRQ handler with the kernel.
+ */
+ irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
+ trio_handle_level_irq);
+ irq_set_chip_data(irq, (void *)(uint64_t)i);
+ irq_set_handler_data(irq, controller);
+ }
+
+ return 0;
+
+free_irqs:
+ for (j = 0; j < i; j++)
+ destroy_irq(controller->irq_intx_table[j]);
+
+ return -1;
+}
+
+/*
+ * Find valid controllers and fill in pci_controller structs for each
+ * of them.
+ *
+ * Returns the number of controllers discovered.
+ */
+int __init tile_pci_init(void)
+{
+ int num_trio_shims = 0;
+ int ctl_index = 0;
+ int i, j;
+
+ if (!pci_probe) {
+ pr_info("PCI: disabled by boot argument\n");
+ return 0;
+ }
+
+ pr_info("PCI: Searching for controllers...\n");
+
+ /*
+ * We loop over all the TRIO shims.
+ */
+ for (i = 0; i < TILEGX_NUM_TRIO; i++) {
+ int ret;
+
+ ret = tile_pcie_open(i);
+ if (ret < 0)
+ continue;
+
+ num_trio_shims++;
+ }
+
+ if (num_trio_shims == 0 || sim_is_simulator())
+ return 0;
+
+ /*
+ * Now determine which PCIe ports are configured to operate in RC mode.
+ * We look at the Board Information Block first and then see if there
+ * are any overriding configuration by the HW strapping pin.
+ */
+ for (i = 0; i < TILEGX_NUM_TRIO; i++) {
+ gxio_trio_context_t *context = &trio_contexts[i];
+ int ret;
+
+ if (context->fd < 0)
+ continue;
+
+ ret = hv_dev_pread(context->fd, 0,
+ (HV_VirtAddr)&pcie_ports[i][0],
+ sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES,
+ GXIO_TRIO_OP_GET_PORT_PROPERTY);
+ if (ret < 0) {
+ pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
+ " on TRIO %d\n", ret, i);
+ continue;
+ }
+
+ for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
+ if (pcie_ports[i][j].allow_rc) {
+ pcie_rc[i][j] = 1;
+ num_rc_controllers++;
+ }
+ else if (pcie_ports[i][j].allow_ep) {
+ num_ep_controllers++;
+ }
+ }
+ }
+
+ /*
+ * Return if no PCIe ports are configured to operate in RC mode.
+ */
+ if (num_rc_controllers == 0)
+ return 0;
+
+ /*
+ * Set the TRIO pointer and MAC index for each PCIe RC port.
+ */
+ for (i = 0; i < TILEGX_NUM_TRIO; i++) {
+ for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
+ if (pcie_rc[i][j]) {
+ pci_controllers[ctl_index].trio =
+ &trio_contexts[i];
+ pci_controllers[ctl_index].mac = j;
+ pci_controllers[ctl_index].trio_index = i;
+ ctl_index++;
+ if (ctl_index == num_rc_controllers)
+ goto out;
+ }
+ }
+ }
+
+out:
+ /*
+ * Configure each PCIe RC port.
+ */
+ for (i = 0; i < num_rc_controllers; i++) {
+ /*
+ * Configure the PCIe MAC to run in RC mode.
+ */
+
+ struct pci_controller *controller = &pci_controllers[i];
+
+ controller->index = i;
+ controller->ops = &tile_cfg_ops;
+
+ /*
+ * The PCI memory resource is located above the PA space.
+ * For every host bridge, the BAR window or the MMIO aperture
+ * is in range [3GB, 4GB - 1] of a 4GB space beyond the
+ * PA space.
+ */
+
+ controller->mem_offset = TILE_PCI_MEM_START +
+ (i * TILE_PCI_BAR_WINDOW_TOP);
+ controller->mem_space.start = controller->mem_offset +
+ TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
+ controller->mem_space.end = controller->mem_offset +
+ TILE_PCI_BAR_WINDOW_TOP - 1;
+ controller->mem_space.flags = IORESOURCE_MEM;
+ snprintf(controller->mem_space_name,
+ sizeof(controller->mem_space_name),
+ "PCI mem domain %d", i);
+ controller->mem_space.name = controller->mem_space_name;
+ }
+
+ return num_rc_controllers;
+}
+
+/*
+ * (pin - 1) converts from the PCI standard's [1:4] convention to
+ * a normal [0:3] range.
+ */
+static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
+{
+ struct pci_controller *controller =
+ (struct pci_controller *)dev->sysdata;
+ return controller->irq_intx_table[pin - 1];
+}
+
+
+static void __devinit fixup_read_and_payload_sizes(struct pci_controller *
+ controller)
+{
+ gxio_trio_context_t *trio_context = controller->trio;
+ struct pci_bus *root_bus = controller->root_bus;
+ TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
+ TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
+ unsigned int reg_offset;
+ struct pci_bus *child;
+ int mac;
+ int err;
+
+ mac = controller->mac;
+
+ /*
+ * Set our max read request size to be 4KB.
+ */
+ reg_offset =
+ (TRIO_PCIE_RC_DEVICE_CONTROL <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
+ reg_offset);
+ dev_control.max_read_req_sz = 5;
+ __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
+ dev_control.word);
+
+ /*
+ * Set the max payload size supported by this Gx PCIe MAC.
+ * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
+ * experiments have shown that setting MPS to 256 yields the
+ * best performance.
+ */
+ reg_offset =
+ (TRIO_PCIE_RC_DEVICE_CAP <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
+ reg_offset);
+ rc_dev_cap.mps_sup = 1;
+ __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
+ rc_dev_cap.word);
+
+ /* Configure PCI Express MPS setting. */
+ list_for_each_entry(child, &root_bus->children, node) {
+ struct pci_dev *self = child->self;
+ if (!self)
+ continue;
+
+ pcie_bus_configure_settings(child, self->pcie_mpss);
+ }
+
+ /*
+ * Set the mac_config register in trio based on the MPS/MRS of the link.
+ */
+ reg_offset =
+ (TRIO_PCIE_RC_DEVICE_CONTROL <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
+ reg_offset);
+
+ err = gxio_trio_set_mps_mrs(trio_context,
+ dev_control.max_payload_size,
+ dev_control.max_read_req_sz,
+ mac);
+ if (err < 0) {
+ pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
+ "MAC %d on TRIO %d\n",
+ mac, controller->trio_index);
+ }
+}
+
+static int __devinit setup_pcie_rc_delay(char *str)
+{
+ unsigned long delay = 0;
+ unsigned long trio_index;
+ unsigned long mac;
+
+ if (str == NULL || !isdigit(*str))
+ return -EINVAL;
+ trio_index = simple_strtoul(str, (char **)&str, 10);
+ if (trio_index >= TILEGX_NUM_TRIO)
+ return -EINVAL;
+
+ if (*str != ',')
+ return -EINVAL;
+
+ str++;
+ if (!isdigit(*str))
+ return -EINVAL;
+ mac = simple_strtoul(str, (char **)&str, 10);
+ if (mac >= TILEGX_TRIO_PCIES)
+ return -EINVAL;
+
+ if (*str != '\0') {
+ if (*str != ',')
+ return -EINVAL;
+
+ str++;
+ if (!isdigit(*str))
+ return -EINVAL;
+ delay = simple_strtoul(str, (char **)&str, 10);
+ if (delay > MAX_RC_DELAY)
+ return -EINVAL;
+ }
+
+ rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
+ pr_info("Delaying PCIe RC link training for %u sec"
+ " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac],
+ mac, trio_index);
+ return 0;
+}
+early_param("pcie_rc_delay", setup_pcie_rc_delay);
+
+/*
+ * PCI initialization entry point, called by subsys_initcall.
+ */
+int __init pcibios_init(void)
+{
+ resource_size_t offset;
+ LIST_HEAD(resources);
+ int next_busno;
+ int i;
+
+ tile_pci_init();
+
+ if (num_rc_controllers == 0 && num_ep_controllers == 0)
+ return 0;
+
+ /*
+ * We loop over all the TRIO shims and set up the MMIO mappings.
+ */
+ for (i = 0; i < TILEGX_NUM_TRIO; i++) {
+ gxio_trio_context_t *context = &trio_contexts[i];
+
+ if (context->fd < 0)
+ continue;
+
+ /*
+ * Map in the MMIO space for the MAC.
+ */
+ offset = 0;
+ context->mmio_base_mac =
+ iorpc_ioremap(context->fd, offset,
+ HV_TRIO_CONFIG_IOREMAP_SIZE);
+ if (context->mmio_base_mac == NULL) {
+ pr_err("PCI: MAC map failure on TRIO %d\n", i);
+
+ hv_dev_close(context->fd);
+ context->fd = -1;
+ continue;
+ }
+ }
+
+ /*
+ * Delay a bit in case devices aren't ready. Some devices are
+ * known to require at least 20ms here, but we use a more
+ * conservative value.
+ */
+ msleep(250);
+
+ /* Scan all of the recorded PCI controllers. */
+ for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
+ struct pci_controller *controller = &pci_controllers[i];
+ gxio_trio_context_t *trio_context = controller->trio;
+ TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
+ TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
+ TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
+ struct pci_bus *bus;
+ unsigned int reg_offset;
+ unsigned int class_code_revision;
+ int trio_index;
+ int mac;
+ int ret;
+
+ if (trio_context->fd < 0)
+ continue;
+
+ trio_index = controller->trio_index;
+ mac = controller->mac;
+
+ /*
+ * Check the port strap state which will override the BIB
+ * setting.
+ */
+
+ reg_offset =
+ (TRIO_PCIE_INTFC_PORT_CONFIG <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ port_config.word =
+ __gxio_mmio_read(trio_context->mmio_base_mac +
+ reg_offset);
+
+ if ((port_config.strap_state !=
+ TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) &&
+ (port_config.strap_state !=
+ TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) {
+ /*
+ * If this is really intended to be an EP port,
+ * record it so that the endpoint driver will know about it.
+ */
+ if (port_config.strap_state ==
+ TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT ||
+ port_config.strap_state ==
+ TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1)
+ pcie_ports[trio_index][mac].allow_ep = 1;
+
+ continue;
+ }
+
+ /*
+ * Delay the RC link training if needed.
+ */
+ if (rc_delay[trio_index][mac])
+ msleep(rc_delay[trio_index][mac] * 1000);
+
+ ret = gxio_trio_force_rc_link_up(trio_context, mac);
+ if (ret < 0)
+ pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
+ "MAC %d on TRIO %d\n", mac, trio_index);
+
+ pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
+ trio_index, controller->mac);
+
+ /*
+ * Wait a bit here because some EP devices take longer
+ * to come up.
+ */
+ msleep(1000);
+
+ /*
+ * Check for PCIe link-up status.
+ */
+
+ reg_offset =
+ (TRIO_PCIE_INTFC_PORT_STATUS <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ port_status.word =
+ __gxio_mmio_read(trio_context->mmio_base_mac +
+ reg_offset);
+ if (!port_status.dl_up) {
+ pr_err("PCI: link is down, MAC %d on TRIO %d\n",
+ mac, trio_index);
+ continue;
+ }
+
+ /*
+ * Ensure that the link can come out of L1 power down state.
+ * Strictly speaking, this is needed only in the case of
+ * heavy RC-initiated DMAs.
+ */
+ reg_offset =
+ (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+ tx_fifo_ctl.word =
+ __gxio_mmio_read(trio_context->mmio_base_mac +
+ reg_offset);
+ tx_fifo_ctl.min_p_credits = 0;
+ __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
+ tx_fifo_ctl.word);
+
+ /*
+ * Change the device ID so that Linux bus crawl doesn't confuse
+ * the internal bridge with any Tilera endpoints.
+ */
+
+ reg_offset =
+ (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
+ (TILERA_GX36_RC_DEV_ID <<
+ TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
+ TILERA_VENDOR_ID);
+
+ /*
+ * Set the internal P2P bridge class code.
+ */
+
+ reg_offset =
+ (TRIO_PCIE_RC_REVISION_ID <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
+ TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ class_code_revision =
+ __gxio_mmio_read32(trio_context->mmio_base_mac +
+ reg_offset);
+ class_code_revision = (class_code_revision & 0xff ) |
+ (PCI_CLASS_BRIDGE_PCI << 16);
+
+ __gxio_mmio_write32(trio_context->mmio_base_mac +
+ reg_offset, class_code_revision);
+
+#ifdef USE_SHARED_PCIE_CONFIG_REGION
+
+ /*
+ * Map in the MMIO space for the PIO region.
+ */
+ offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
+ (((unsigned long long)mac) <<
+ TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
+
+#else
+
+ /*
+ * Alloc a PIO region for PCI config access per MAC.
+ */
+ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
+ if (ret < 0) {
+ pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
+ "on TRIO %d, give up\n", mac, trio_index);
+
+ continue;
+ }
+
+ trio_context->pio_cfg_index[mac] = ret;
+
+ /*
+ * For PIO CFG, the bus_address_hi parameter is 0.
+ */
+ ret = gxio_trio_init_pio_region_aux(trio_context,
+ trio_context->pio_cfg_index[mac],
+ mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
+ if (ret < 0) {
+ pr_err("PCI: PCI CFG PIO init failure for mac %d "
+ "on TRIO %d, give up\n", mac, trio_index);
+
+ continue;
+ }
+
+ offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
+ (((unsigned long long)mac) <<
+ TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
+
+#endif
+
+ trio_context->mmio_base_pio_cfg[mac] =
+ iorpc_ioremap(trio_context->fd, offset,
+ (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT));
+ if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
+ pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
+ mac, trio_index);
+
+ continue;
+ }
+
+ /*
+ * Initialize the PCIe interrupts.
+ */
+ if (tile_init_irqs(controller)) {
+ pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
+ mac, trio_index);
+
+ continue;
+ }
+
+ /*
+ * The PCI memory resource is located above the PA space.
+ * The memory range for the PCI root bus should not overlap
+ * with the physical RAM
+ */
+ pci_add_resource_offset(&resources, &controller->mem_space,
+ controller->mem_offset);
+
+ controller->first_busno = next_busno;
+ bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
+ controller, &resources);
+ controller->root_bus = bus;
+ next_busno = bus->subordinate + 1;
+
+ }
+
+ /* Do machine dependent PCI interrupt routing */
+ pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
+
+ /*
+ * This comes from the generic Linux PCI driver.
+ *
+ * It allocates all of the resources (I/O memory, etc)
+ * associated with the devices read in above.
+ */
+
+ pci_assign_unassigned_resources();
+
+ /* Record the I/O resources in the PCI controller structure. */
+ for (i = 0; i < num_rc_controllers; i++) {
+ struct pci_controller *controller = &pci_controllers[i];
+ gxio_trio_context_t *trio_context = controller->trio;
+ struct pci_bus *root_bus = pci_controllers[i].root_bus;
+ struct pci_bus *next_bus;
+ uint32_t bus_address_hi;
+ struct pci_dev *dev;
+ int ret;
+ int j;
+
+ /*
+ * Skip controllers that are not properly initialized or
+ * have down links.
+ */
+ if (root_bus == NULL)
+ continue;
+
+ /* Configure the max_payload_size values for this domain. */
+ fixup_read_and_payload_sizes(controller);
+
+ list_for_each_entry(dev, &root_bus->devices, bus_list) {
+ /* Find the PCI host controller, ie. the 1st bridge. */
+ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
+ (PCI_SLOT(dev->devfn) == 0)) {
+ next_bus = dev->subordinate;
+ pci_controllers[i].mem_resources[0] =
+ *next_bus->resource[0];
+ pci_controllers[i].mem_resources[1] =
+ *next_bus->resource[1];
+ pci_controllers[i].mem_resources[2] =
+ *next_bus->resource[2];
+
+ break;
+ }
+ }
+
+ if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM)
+ bus_address_hi =
+ pci_controllers[i].mem_resources[1].start >> 32;
+ else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH)
+ bus_address_hi =
+ pci_controllers[i].mem_resources[2].start >> 32;
+ else {
+ /* This is unlikely. */
+ pr_err("PCI: no memory resources on TRIO %d mac %d\n",
+ controller->trio_index, controller->mac);
+ continue;
+ }
+
+ /*
+ * Alloc a PIO region for PCI memory access for each RC port.
+ */
+ ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
+ if (ret < 0) {
+ pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
+ "give up\n", controller->trio_index,
+ controller->mac);
+
+ continue;
+ }
+
+ controller->pio_mem_index = ret;
+
+ /*
+ * For PIO MEM, the bus_address_hi parameter is hard-coded 0
+ * because we always assign 32-bit PCI bus BAR ranges.
+ */
+ ret = gxio_trio_init_pio_region_aux(trio_context,
+ controller->pio_mem_index,
+ controller->mac,
+ 0,
+ 0);
+ if (ret < 0) {
+ pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
+ "give up\n", controller->trio_index,
+ controller->mac);
+
+ continue;
+ }
+
+ /*
+ * Configure a Mem-Map region for each memory controller so
+ * that Linux can map all of its PA space to the PCI bus.
+ * Use the IOMMU to handle hash-for-home memory.
+ */
+ for_each_online_node(j) {
+ unsigned long start_pfn = node_start_pfn[j];
+ unsigned long end_pfn = node_end_pfn[j];
+ unsigned long nr_pages = end_pfn - start_pfn;
+
+ ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
+ 0);
+ if (ret < 0) {
+ pr_err("PCI: Mem-Map alloc failure on TRIO %d "
+ "mac %d for MC %d, give up\n",
+ controller->trio_index,
+ controller->mac, j);
+
+ goto alloc_mem_map_failed;
+ }
+
+ controller->mem_maps[j] = ret;
+
+ /*
+ * Initialize the Mem-Map and the I/O MMU so that all
+ * the physical memory can be accessed by the endpoint
+ * devices. The base bus address is set to the base CPA
+ * of this memory controller plus an offset (see pci.h).
+ * The region's base VA is set to the base CPA. The
+ * I/O MMU table essentially translates the CPA to
+ * the real PA. Implicitly, for node 0, we create
+ * a separate Mem-Map region that serves as the inbound
+ * window for legacy 32-bit devices. This is a direct
+ * map of the low 4GB CPA space.
+ */
+ ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
+ controller->mem_maps[j],
+ start_pfn << PAGE_SHIFT,
+ nr_pages << PAGE_SHIFT,
+ trio_context->asid,
+ controller->mac,
+ (start_pfn << PAGE_SHIFT) +
+ TILE_PCI_MEM_MAP_BASE_OFFSET,
+ j,
+ GXIO_TRIO_ORDER_MODE_UNORDERED);
+ if (ret < 0) {
+ pr_err("PCI: Mem-Map init failure on TRIO %d "
+ "mac %d for MC %d, give up\n",
+ controller->trio_index,
+ controller->mac, j);
+
+ goto alloc_mem_map_failed;
+ }
+ continue;
+
+alloc_mem_map_failed:
+ break;
+ }
+
+ }
+
+ return 0;
+}
+subsys_initcall(pcibios_init);
+
+/* Note: to be deleted after Linux 3.6 merge. */
+void __devinit pcibios_fixup_bus(struct pci_bus *bus)
+{
+}
+
+/*
+ * This can be called from the generic PCI layer, but doesn't need to
+ * do anything.
+ */
+char __devinit *pcibios_setup(char *str)
+{
+ if (!strcmp(str, "off")) {
+ pci_probe = 0;
+ return NULL;
+ }
+ return str;
+}
+
+/*
+ * This is called from the generic Linux layer.
+ */
+void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/*
+ * Enable memory address decoding, as appropriate, for the
+ * device described by the 'dev' struct. The I/O decoding
+ * is disabled, though the TILE-Gx supports I/O addressing.
+ *
+ * This is called from the generic PCI layer, and can be called
+ * for bridges or endpoints.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ return pci_enable_resources(dev, mask);
+}
+
+/* Called for each device after PCI setup is done. */
+static void __init
+pcibios_fixup_final(struct pci_dev *pdev)
+{
+ set_dma_ops(&pdev->dev, gx_pci_dma_map_ops);
+ set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
+ pdev->dev.archdata.max_direct_dma_addr =
+ TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
+
+/* Map a PCI MMIO bus address into VA space. */
+void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ struct pci_controller *controller = NULL;
+ resource_size_t bar_start;
+ resource_size_t bar_end;
+ resource_size_t offset;
+ resource_size_t start;
+ resource_size_t end;
+ int trio_fd;
+ int i, j;
+
+ start = phys_addr;
+ end = phys_addr + size - 1;
+
+ /*
+ * In the following, each PCI controller's mem_resources[1]
+ * represents its (non-prefetchable) PCI memory resource and
+ * mem_resources[2] refers to its prefetchable PCI memory resource.
+ * By searching phys_addr in each controller's mem_resources[], we can
+ * determine the controller that should accept the PCI memory access.
+ */
+
+ for (i = 0; i < num_rc_controllers; i++) {
+ /*
+ * Skip controllers that are not properly initialized or
+ * have down links.
+ */
+ if (pci_controllers[i].root_bus == NULL)
+ continue;
+
+ for (j = 1; j < 3; j++) {
+ bar_start =
+ pci_controllers[i].mem_resources[j].start;
+ bar_end =
+ pci_controllers[i].mem_resources[j].end;
+
+ if ((start >= bar_start) && (end <= bar_end)) {
+
+ controller = &pci_controllers[i];
+
+ goto got_it;
+ }
+ }
+ }
+
+ if (controller == NULL)
+ return NULL;
+
+got_it:
+ trio_fd = controller->trio->fd;
+
+ /* Convert the resource start to the bus address offset. */
+ start = phys_addr - controller->mem_offset;
+
+ offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
+
+ /*
+ * We need to keep the PCI bus address's in-page offset in the VA.
+ */
+ return iorpc_ioremap(trio_fd, offset, size) +
+ (phys_addr & (PAGE_SIZE - 1));
+}
+EXPORT_SYMBOL(ioremap);
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+/****************************************************************
+ *
+ * Tile PCI config space read/write routines
+ *
+ ****************************************************************/
+
+/*
+ * These are the normal read and write ops
+ * These are expanded with macros from pci_bus_read_config_byte() etc.
+ *
+ * devfn is the combined PCI device & function.
+ *
+ * offset is in bytes, from the start of config space for the
+ * specified bus & device.
+ */
+
+static int __devinit tile_cfg_read(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 *val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ gxio_trio_context_t *trio_context = controller->trio;
+ int busnum = bus->number & 0xff;
+ int device = PCI_SLOT(devfn);
+ int function = PCI_FUNC(devfn);
+ int config_type = 1;
+ TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
+ void *mmio_addr;
+
+ /*
+ * Map all accesses to the local device on root bus into the
+ * MMIO space of the MAC. Accesses to the downstream devices
+ * go to the PIO space.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (device == 0) {
+ /*
+ * This is the internal downstream P2P bridge,
+ * access directly.
+ */
+ unsigned int reg_offset;
+
+ reg_offset = ((offset & 0xFFF) <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
+ << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (controller->mac <<
+ TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ mmio_addr = trio_context->mmio_base_mac + reg_offset;
+
+ goto valid_device;
+
+ } else {
+ /*
+ * We fake an empty device for (device > 0),
+ * since there is only one device on bus 0.
+ */
+ goto invalid_device;
+ }
+ }
+
+ /*
+ * Accesses to the directly attached device have to be
+ * sent as type-0 configs.
+ */
+
+ if (busnum == (controller->first_busno + 1)) {
+ /*
+ * There is only one device off of our built-in P2P bridge.
+ */
+ if (device != 0)
+ goto invalid_device;
+
+ config_type = 0;
+ }
+
+ cfg_addr.word = 0;
+ cfg_addr.reg_addr = (offset & 0xFFF);
+ cfg_addr.fn = function;
+ cfg_addr.dev = device;
+ cfg_addr.bus = busnum;
+ cfg_addr.type = config_type;
+
+ /*
+ * Note that we don't set the mac field in cfg_addr because the
+ * mapping is per port.
+ */
+
+ mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
+ cfg_addr.word;
+
+valid_device:
+
+ switch (size) {
+ case 4:
+ *val = __gxio_mmio_read32(mmio_addr);
+ break;
+
+ case 2:
+ *val = __gxio_mmio_read16(mmio_addr);
+ break;
+
+ case 1:
+ *val = __gxio_mmio_read8(mmio_addr);
+ break;
+
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ TRACE_CFG_RD(size, *val, busnum, device, function, offset);
+
+ return 0;
+
+invalid_device:
+
+ switch (size) {
+ case 4:
+ *val = 0xFFFFFFFF;
+ break;
+
+ case 2:
+ *val = 0xFFFF;
+ break;
+
+ case 1:
+ *val = 0xFF;
+ break;
+
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return 0;
+}
+
+
+/*
+ * See tile_cfg_read() for relevent comments.
+ * Note that "val" is the value to write, not a pointer to that value.
+ */
+static int __devinit tile_cfg_write(struct pci_bus *bus,
+ unsigned int devfn,
+ int offset,
+ int size,
+ u32 val)
+{
+ struct pci_controller *controller = bus->sysdata;
+ gxio_trio_context_t *trio_context = controller->trio;
+ int busnum = bus->number & 0xff;
+ int device = PCI_SLOT(devfn);
+ int function = PCI_FUNC(devfn);
+ int config_type = 1;
+ TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
+ void *mmio_addr;
+ u32 val_32 = (u32)val;
+ u16 val_16 = (u16)val;
+ u8 val_8 = (u8)val;
+
+ /*
+ * Map all accesses to the local device on root bus into the
+ * MMIO space of the MAC. Accesses to the downstream devices
+ * go to the PIO space.
+ */
+ if (pci_is_root_bus(bus)) {
+ if (device == 0) {
+ /*
+ * This is the internal downstream P2P bridge,
+ * access directly.
+ */
+ unsigned int reg_offset;
+
+ reg_offset = ((offset & 0xFFF) <<
+ TRIO_CFG_REGION_ADDR__REG_SHIFT) |
+ (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
+ << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
+ (controller->mac <<
+ TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
+
+ mmio_addr = trio_context->mmio_base_mac + reg_offset;
+
+ goto valid_device;
+
+ } else {
+ /*
+ * We fake an empty device for (device > 0),
+ * since there is only one device on bus 0.
+ */
+ goto invalid_device;
+ }
+ }
+
+ /*
+ * Accesses to the directly attached device have to be
+ * sent as type-0 configs.
+ */
+
+ if (busnum == (controller->first_busno + 1)) {
+ /*
+ * There is only one device off of our built-in P2P bridge.
+ */
+ if (device != 0)
+ goto invalid_device;
+
+ config_type = 0;
+ }
+
+ cfg_addr.word = 0;
+ cfg_addr.reg_addr = (offset & 0xFFF);
+ cfg_addr.fn = function;
+ cfg_addr.dev = device;
+ cfg_addr.bus = busnum;
+ cfg_addr.type = config_type;
+
+ /*
+ * Note that we don't set the mac field in cfg_addr because the
+ * mapping is per port.
+ */
+
+ mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
+ cfg_addr.word;
+
+valid_device:
+
+ switch (size) {
+ case 4:
+ __gxio_mmio_write32(mmio_addr, val_32);
+ TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
+ break;
+
+ case 2:
+ __gxio_mmio_write16(mmio_addr, val_16);
+ TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
+ break;
+
+ case 1:
+ __gxio_mmio_write8(mmio_addr, val_8);
+ TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
+ break;
+
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+invalid_device:
+
+ return 0;
+}
+
+
+static struct pci_ops tile_cfg_ops = {
+ .read = tile_cfg_read,
+ .write = tile_cfg_write,
+};
+
+
+/*
+ * MSI support starts here.
+ */
+static unsigned int
+tilegx_msi_startup(struct irq_data *d)
+{
+ if (d->msi_desc)
+ unmask_msi_irq(d);
+
+ return 0;
+}
+
+static void
+tilegx_msi_ack(struct irq_data *d)
+{
+ __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
+}
+
+static void
+tilegx_msi_mask(struct irq_data *d)
+{
+ mask_msi_irq(d);
+ __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
+}
+
+static void
+tilegx_msi_unmask(struct irq_data *d)
+{
+ __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
+ unmask_msi_irq(d);
+}
+
+static struct irq_chip tilegx_msi_chip = {
+ .name = "tilegx_msi",
+ .irq_startup = tilegx_msi_startup,
+ .irq_ack = tilegx_msi_ack,
+ .irq_mask = tilegx_msi_mask,
+ .irq_unmask = tilegx_msi_unmask,
+
+ /* TBD: support set_affinity. */
+};
+
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+{
+ struct pci_controller *controller;
+ gxio_trio_context_t *trio_context;
+ struct msi_msg msg;
+ int default_irq;
+ uint64_t mem_map_base;
+ uint64_t mem_map_limit;
+ u64 msi_addr;
+ int mem_map;
+ int cpu;
+ int irq;
+ int ret;
+
+ irq = create_irq();
+ if (irq < 0)
+ return irq;
+
+ /*
+ * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
+ * devices that are not capable of generating a 64-bit message address.
+ * These devices will fall back to using the legacy interrupts.
+ * Most PCIe endpoint devices do support 64-bit message addressing.
+ */
+ if (desc->msi_attrib.is_64 == 0) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "64-bit MSI message address not supported, "
+ "falling back to legacy interrupts.\n");
+
+ ret = -ENOMEM;
+ goto is_64_failure;
+ }
+
+ default_irq = desc->msi_attrib.default_irq;
+ controller = irq_get_handler_data(default_irq);
+
+ BUG_ON(!controller);
+
+ trio_context = controller->trio;
+
+ /*
+ * Allocate the Mem-Map that will accept the MSI write and
+ * trigger the TILE-side interrupts.
+ */
+ mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
+ if (mem_map < 0) {
+ dev_printk(KERN_INFO, &pdev->dev,
+ "%s Mem-Map alloc failure. "
+ "Failed to initialize MSI interrupts. "
+ "Falling back to legacy interrupts.\n",
+ desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
+
+ ret = -ENOMEM;
+ goto msi_mem_map_alloc_failure;
+ }
+
+ /* We try to distribute different IRQs to different tiles. */
+ cpu = tile_irq_cpu(irq);
+
+ /*
+ * Now call up to the HV to configure the Mem-Map interrupt and
+ * set up the IPI binding.
+ */
+ mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
+ mem_map * MEM_MAP_INTR_REGION_SIZE;
+ mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
+
+ ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
+ KERNEL_PL, irq, controller->mac,
+ mem_map, mem_map_base, mem_map_limit,
+ trio_context->asid);
+ if (ret < 0) {
+ dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
+
+ goto hv_msi_config_failure;
+ }
+
+ irq_set_msi_desc(irq, desc);
+
+ msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0;
+
+ msg.address_hi = msi_addr >> 32;
+ msg.address_lo = msi_addr & 0xffffffff;
+
+ msg.data = mem_map;
+
+ write_msi_msg(irq, &msg);
+ irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
+ irq_set_handler_data(irq, controller);
+
+ return 0;
+
+hv_msi_config_failure:
+ /* Free mem-map */
+msi_mem_map_alloc_failure:
+is_64_failure:
+ destroy_irq(irq);
+ return ret;
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+ destroy_irq(irq);
+}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index dd87f3420390..6a649a4462d3 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -23,6 +23,7 @@
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/pci.h>
+#include <linux/swiotlb.h>
#include <linux/initrd.h>
#include <linux/io.h>
#include <linux/highmem.h>
@@ -109,7 +110,7 @@ static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = {
};
static nodemask_t __initdata isolnodes;
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
enum { DEFAULT_PCI_RESERVE_MB = 64 };
static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB;
unsigned long __initdata pci_reserve_start_pfn = -1U;
@@ -160,7 +161,7 @@ static int __init setup_isolnodes(char *str)
}
early_param("isolnodes", setup_isolnodes);
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
static int __init setup_pci_reserve(char* str)
{
unsigned long mb;
@@ -171,7 +172,7 @@ static int __init setup_pci_reserve(char* str)
pci_reserve_mb = mb;
pr_info("Reserving %dMB for PCIE root complex mappings\n",
- pci_reserve_mb);
+ pci_reserve_mb);
return 0;
}
early_param("pci_reserve", setup_pci_reserve);
@@ -411,7 +412,7 @@ static void __init setup_memory(void)
continue;
}
#endif
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
/*
* Blocks that overlap the pci reserved region must
* have enough space to hold the maximum percpu data
@@ -604,11 +605,9 @@ static void __init setup_bootmem_allocator_node(int i)
/* Free all the space back into the allocator. */
free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
-#if defined(CONFIG_PCI)
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
/*
- * Throw away any memory aliased by the PCI region. FIXME: this
- * is a temporary hack to work around bug 10502, and needs to be
- * fixed properly.
+ * Throw away any memory aliased by the PCI region.
*/
if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
@@ -658,6 +657,8 @@ static void __init zone_sizes_init(void)
unsigned long zones_size[MAX_NR_ZONES] = { 0 };
int size = percpu_size();
int num_cpus = smp_height * smp_width;
+ const unsigned long dma_end = (1UL << (32 - PAGE_SHIFT));
+
int i;
for (i = 0; i < num_cpus; ++i)
@@ -729,6 +730,14 @@ static void __init zone_sizes_init(void)
zones_size[ZONE_NORMAL] = end - start;
#endif
+ if (start < dma_end) {
+ zones_size[ZONE_DMA] = min(zones_size[ZONE_NORMAL],
+ dma_end - start);
+ zones_size[ZONE_NORMAL] -= zones_size[ZONE_DMA];
+ } else {
+ zones_size[ZONE_DMA] = 0;
+ }
+
/* Take zone metadata from controller 0 if we're isolnode. */
if (node_isset(i, isolnodes))
NODE_DATA(i)->bdata = &bootmem_node_data[0];
@@ -738,7 +747,7 @@ static void __init zone_sizes_init(void)
PFN_UP(node_percpu[i]));
/* Track the type of memory on each node */
- if (zones_size[ZONE_NORMAL])
+ if (zones_size[ZONE_NORMAL] || zones_size[ZONE_DMA])
node_set_state(i, N_NORMAL_MEMORY);
#ifdef CONFIG_HIGHMEM
if (end != start)
@@ -1343,7 +1352,7 @@ void __init setup_arch(char **cmdline_p)
setup_cpu_maps();
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
/*
* Initialize the PCI structures. This is done before memory
* setup so that we know whether or not a pci_reserve region
@@ -1372,6 +1381,10 @@ void __init setup_arch(char **cmdline_p)
* any memory using the bootmem allocator.
*/
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(0);
+#endif
+
paging_init();
setup_numa_mapping();
zone_sizes_init();
@@ -1522,11 +1535,10 @@ static struct resource code_resource = {
};
/*
- * We reserve all resources above 4GB so that PCI won't try to put
- * mappings above 4GB; the standard allows that for some devices but
- * the probing code trunates values to 32 bits.
+ * On Pro, we reserve all resources above 4GB so that PCI won't try to put
+ * mappings above 4GB.
*/
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
static struct resource* __init
insert_non_bus_resource(void)
{
@@ -1571,8 +1583,7 @@ static int __init request_standard_resources(void)
int i;
enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
- iomem_resource.end = -1LL;
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
insert_non_bus_resource();
#endif
@@ -1580,7 +1591,7 @@ static int __init request_standard_resources(void)
u64 start_pfn = node_start_pfn[i];
u64 end_pfn = node_end_pfn[i];
-#ifdef CONFIG_PCI
+#if defined(CONFIG_PCI) && !defined(__tilegx__)
if (start_pfn <= pci_reserve_start_pfn &&
end_pfn > pci_reserve_start_pfn) {
if (end_pfn > pci_reserve_end_pfn)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 84873fbe8f27..e686c5ac90be 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -198,17 +198,7 @@ void __cpuinit online_secondary(void)
notify_cpu_starting(smp_processor_id());
- /*
- * We need to hold call_lock, so there is no inconsistency
- * between the time smp_call_function() determines number of
- * IPI recipients, and the time when the determination is made
- * for which cpus receive the IPI. Holding this
- * lock helps us to not include this cpu in a currently in progress
- * smp_call_function().
- */
- ipi_call_lock();
set_cpu_online(smp_processor_id(), 1);
- ipi_call_unlock();
__get_cpu_var(cpu_state) = CPU_ONLINE;
/* Set up tile-specific state for this cpu. */
diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c
new file mode 100644
index 000000000000..5af8debc6a71
--- /dev/null
+++ b/arch/tile/kernel/usb.c
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Register the Tile-Gx USB interfaces as platform devices.
+ *
+ * The actual USB driver is just some glue (in
+ * drivers/usb/host/[eo]hci-tilegx.c) which makes the registers available
+ * to the standard kernel EHCI and OHCI drivers.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/types.h>
+
+static u64 ehci_dmamask = DMA_BIT_MASK(32);
+
+#define USB_HOST_DEF(unit, type, dmamask) \
+ static struct \
+ tilegx_usb_platform_data tilegx_usb_platform_data_ ## type ## \
+ hci ## unit = { \
+ .dev_index = unit, \
+ }; \
+ \
+ static struct platform_device tilegx_usb_ ## type ## hci ## unit = { \
+ .name = "tilegx-" #type "hci", \
+ .id = unit, \
+ .dev = { \
+ .dma_mask = dmamask, \
+ .coherent_dma_mask = DMA_BIT_MASK(32), \
+ .platform_data = \
+ &tilegx_usb_platform_data_ ## type ## hci ## \
+ unit, \
+ }, \
+ };
+
+USB_HOST_DEF(0, e, &ehci_dmamask)
+USB_HOST_DEF(0, o, NULL)
+USB_HOST_DEF(1, e, &ehci_dmamask)
+USB_HOST_DEF(1, o, NULL)
+
+#undef USB_HOST_DEF
+
+static struct platform_device *tilegx_usb_devices[] __initdata = {
+ &tilegx_usb_ehci0,
+ &tilegx_usb_ehci1,
+ &tilegx_usb_ohci0,
+ &tilegx_usb_ohci1,
+};
+
+/** Add our set of possible USB devices. */
+static int __init tilegx_usb_init(void)
+{
+ platform_add_devices(tilegx_usb_devices,
+ ARRAY_SIZE(tilegx_usb_devices));
+
+ return 0;
+}
+arch_initcall(tilegx_usb_init);
diff --git a/arch/tile/lib/checksum.c b/arch/tile/lib/checksum.c
index e4bab5bd3f31..c3ca3e64d9d9 100644
--- a/arch/tile/lib/checksum.c
+++ b/arch/tile/lib/checksum.c
@@ -16,19 +16,6 @@
#include <net/checksum.h>
#include <linux/module.h>
-static inline unsigned int longto16(unsigned long x)
-{
- unsigned long ret;
-#ifdef __tilegx__
- ret = __insn_v2sadu(x, 0);
- ret = __insn_v2sadu(ret, 0);
-#else
- ret = __insn_sadh_u(x, 0);
- ret = __insn_sadh_u(ret, 0);
-#endif
- return ret;
-}
-
__wsum do_csum(const unsigned char *buff, int len)
{
int odd, count;
@@ -94,7 +81,7 @@ __wsum do_csum(const unsigned char *buff, int len)
}
if (len & 1)
result += *buff;
- result = longto16(result);
+ result = csum_long(result);
if (odd)
result = swab16(result);
out:
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index dbcbdf7b8aa8..5f7868dcd6d4 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -64,10 +64,6 @@ early_param("noallocl2", set_noallocl2);
#endif
-/* Provide no-op versions of these routines to keep flush_remote() cleaner. */
-#define mark_caches_evicted_start() 0
-#define mark_caches_evicted_finish(mask, timestamp) do {} while (0)
-
/*
* Update the irq_stat for cpus that we are going to interrupt
@@ -107,7 +103,6 @@ static void hv_flush_update(const struct cpumask *cache_cpumask,
* there's never any good reason for hv_flush_remote() to fail.
* - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
* is the type that Linux wants to pass around anyway.
- * - Centralizes the mark_caches_evicted() handling.
* - Canonicalizes that lengths of zero make cpumasks NULL.
* - Handles deferring TLB flushes for dataplane tiles.
* - Tracks remote interrupts in the per-cpu irq_cpustat_t.
@@ -126,7 +121,6 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
HV_Remote_ASID *asids, int asidcount)
{
int rc;
- int timestamp = 0; /* happy compiler */
struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
struct cpumask *cache_cpumask, *tlb_cpumask;
HV_PhysAddr cache_pa;
@@ -157,15 +151,11 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
asids, asidcount);
cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
- if (cache_control & HV_FLUSH_EVICT_L2)
- timestamp = mark_caches_evicted_start();
rc = hv_flush_remote(cache_pa, cache_control,
cpumask_bits(cache_cpumask),
tlb_va, tlb_length, tlb_pgsize,
cpumask_bits(tlb_cpumask),
asids, asidcount);
- if (cache_control & HV_FLUSH_EVICT_L2)
- mark_caches_evicted_finish(cache_cpumask, timestamp);
if (rc == 0)
return;
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
@@ -180,85 +170,86 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
panic("Unsafe to continue.");
}
-void flush_remote_page(struct page *page, int order)
+static void homecache_finv_page_va(void* va, int home)
{
- int i, pages = (1 << order);
- for (i = 0; i < pages; ++i, ++page) {
- void *p = kmap_atomic(page);
- int hfh = 0;
- int home = page_home(page);
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (home == PAGE_HOME_HASH)
- hfh = 1;
- else
-#endif
- BUG_ON(home < 0 || home >= NR_CPUS);
- finv_buffer_remote(p, PAGE_SIZE, hfh);
- kunmap_atomic(p);
+ if (home == smp_processor_id()) {
+ finv_buffer_local(va, PAGE_SIZE);
+ } else if (home == PAGE_HOME_HASH) {
+ finv_buffer_remote(va, PAGE_SIZE, 1);
+ } else {
+ BUG_ON(home < 0 || home >= NR_CPUS);
+ finv_buffer_remote(va, PAGE_SIZE, 0);
}
}
-void homecache_evict(const struct cpumask *mask)
+void homecache_finv_map_page(struct page *page, int home)
{
- flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
+ unsigned long flags;
+ unsigned long va;
+ pte_t *ptep;
+ pte_t pte;
+
+ if (home == PAGE_HOME_UNCACHED)
+ return;
+ local_irq_save(flags);
+#ifdef CONFIG_HIGHMEM
+ va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
+ (KM_TYPE_NR * smp_processor_id()));
+#else
+ va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
+#endif
+ ptep = virt_to_pte(NULL, (unsigned long)va);
+ pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
+ __set_pte(ptep, pte_set_home(pte, home));
+ homecache_finv_page_va((void *)va, home);
+ __pte_clear(ptep);
+ hv_flush_page(va, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+ kmap_atomic_idx_pop();
+#endif
+ local_irq_restore(flags);
}
-/*
- * Return a mask of the cpus whose caches currently own these pages.
- * The return value is whether the pages are all coherently cached
- * (i.e. none are immutable, incoherent, or uncached).
- */
-static int homecache_mask(struct page *page, int pages,
- struct cpumask *home_mask)
+static void homecache_finv_page_home(struct page *page, int home)
{
- int i;
- int cached_coherently = 1;
- cpumask_clear(home_mask);
- for (i = 0; i < pages; ++i) {
- int home = page_home(&page[i]);
- if (home == PAGE_HOME_IMMUTABLE ||
- home == PAGE_HOME_INCOHERENT) {
- cpumask_copy(home_mask, cpu_possible_mask);
- return 0;
- }
-#if CHIP_HAS_CBOX_HOME_MAP()
- if (home == PAGE_HOME_HASH) {
- cpumask_or(home_mask, home_mask, &hash_for_home_map);
- continue;
- }
-#endif
- if (home == PAGE_HOME_UNCACHED) {
- cached_coherently = 0;
- continue;
- }
- BUG_ON(home < 0 || home >= NR_CPUS);
- cpumask_set_cpu(home, home_mask);
- }
- return cached_coherently;
+ if (!PageHighMem(page) && home == page_home(page))
+ homecache_finv_page_va(page_address(page), home);
+ else
+ homecache_finv_map_page(page, home);
}
-/*
- * Return the passed length, or zero if it's long enough that we
- * believe we should evict the whole L2 cache.
- */
-static unsigned long cache_flush_length(unsigned long length)
+static inline bool incoherent_home(int home)
{
- return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length;
+ return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
}
-/* Flush a page out of whatever cache(s) it is in. */
-void homecache_flush_cache(struct page *page, int order)
+static void homecache_finv_page_internal(struct page *page, int force_map)
{
- int pages = 1 << order;
- int length = cache_flush_length(pages * PAGE_SIZE);
- unsigned long pfn = page_to_pfn(page);
- struct cpumask home_mask;
-
- homecache_mask(page, pages, &home_mask);
- flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0);
- sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE);
+ int home = page_home(page);
+ if (home == PAGE_HOME_UNCACHED)
+ return;
+ if (incoherent_home(home)) {
+ int cpu;
+ for_each_cpu(cpu, &cpu_cacheable_map)
+ homecache_finv_map_page(page, cpu);
+ } else if (force_map) {
+ /* Force if, e.g., the normal mapping is migrating. */
+ homecache_finv_map_page(page, home);
+ } else {
+ homecache_finv_page_home(page, home);
+ }
+ sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
}
+void homecache_finv_page(struct page *page)
+{
+ homecache_finv_page_internal(page, 0);
+}
+
+void homecache_evict(const struct cpumask *mask)
+{
+ flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
+}
/* Report the home corresponding to a given PTE. */
static int pte_to_home(pte_t pte)
@@ -441,15 +432,8 @@ struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
return page;
}
-void homecache_free_pages(unsigned long addr, unsigned int order)
+void __homecache_free_pages(struct page *page, unsigned int order)
{
- struct page *page;
-
- if (addr == 0)
- return;
-
- VM_BUG_ON(!virt_addr_valid((void *)addr));
- page = virt_to_page((void *)addr);
if (put_page_testzero(page)) {
homecache_change_page_home(page, order, initial_page_home());
if (order == 0) {
@@ -460,3 +444,13 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
}
}
}
+EXPORT_SYMBOL(__homecache_free_pages);
+
+void homecache_free_pages(unsigned long addr, unsigned int order)
+{
+ if (addr != 0) {
+ VM_BUG_ON(!virt_addr_valid((void *)addr));
+ __homecache_free_pages(virt_to_page((void *)addr), order);
+ }
+}
+EXPORT_SYMBOL(homecache_free_pages);
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 630dd2ce2afe..ef29d6c5e10e 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -150,7 +150,21 @@ void __init shatter_pmd(pmd_t *pmd)
assign_pte(pmd, pte);
}
-#ifdef CONFIG_HIGHMEM
+#ifdef __tilegx__
+static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
+{
+ pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
+ if (pud_none(*pud))
+ assign_pmd(pud, alloc_pmd());
+ return pmd_offset(pud, va);
+}
+#else
+static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
+{
+ return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
+}
+#endif
+
/*
* This function initializes a certain range of kernel virtual memory
* with new bootmem page tables, everywhere page tables are missing in
@@ -163,24 +177,17 @@ void __init shatter_pmd(pmd_t *pmd)
* checking the pgd every time.
*/
static void __init page_table_range_init(unsigned long start,
- unsigned long end, pgd_t *pgd_base)
+ unsigned long end, pgd_t *pgd)
{
- pgd_t *pgd;
- int pgd_idx;
unsigned long vaddr;
-
- vaddr = start;
- pgd_idx = pgd_index(vaddr);
- pgd = pgd_base + pgd_idx;
-
- for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
- pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
+ start = round_down(start, PMD_SIZE);
+ end = round_up(end, PMD_SIZE);
+ for (vaddr = start; vaddr < end; vaddr += PMD_SIZE) {
+ pmd_t *pmd = get_pmd(pgd, vaddr);
if (pmd_none(*pmd))
assign_pte(pmd, alloc_pte());
- vaddr += PMD_SIZE;
}
}
-#endif /* CONFIG_HIGHMEM */
#if CHIP_HAS_CBOX_HOME_MAP()
@@ -404,21 +411,6 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot)
return prot;
}
-#ifndef __tilegx__
-static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
-{
- return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
-}
-#else
-static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
-{
- pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
- if (pud_none(*pud))
- assign_pmd(pud, alloc_pmd());
- return pmd_offset(pud, va);
-}
-#endif
-
/* Temporary page table we use for staging. */
static pgd_t pgtables[PTRS_PER_PGD]
__attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
@@ -741,16 +733,15 @@ static void __init set_non_bootmem_pages_init(void)
for_each_zone(z) {
unsigned long start, end;
int nid = z->zone_pgdat->node_id;
+#ifdef CONFIG_HIGHMEM
int idx = zone_idx(z);
+#endif
start = z->zone_start_pfn;
- if (start == 0)
- continue; /* bootmem */
end = start + z->spanned_pages;
- if (idx == ZONE_NORMAL) {
- BUG_ON(start != node_start_pfn[nid]);
- start = node_free_pfn[nid];
- }
+ start = max(start, node_free_pfn[nid]);
+ start = max(start, max_low_pfn);
+
#ifdef CONFIG_HIGHMEM
if (idx == ZONE_HIGHMEM)
totalhigh_pages += z->spanned_pages;
@@ -779,9 +770,6 @@ static void __init set_non_bootmem_pages_init(void)
*/
void __init paging_init(void)
{
-#ifdef CONFIG_HIGHMEM
- unsigned long vaddr, end;
-#endif
#ifdef __tilegx__
pud_t *pud;
#endif
@@ -789,14 +777,14 @@ void __init paging_init(void)
kernel_physical_mapping_init(pgd_base);
-#ifdef CONFIG_HIGHMEM
/*
* Fixed mappings, only the page table structure has to be
* created - mappings will be set by set_fixmap():
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
- page_table_range_init(vaddr, end, pgd_base);
+ page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
+ FIXADDR_TOP, pgd_base);
+
+#ifdef CONFIG_HIGHMEM
permanent_kmaps_init(pgd_base);
#endif
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 345edfed9fcd..de0de0c0e8a1 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -575,13 +575,6 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
}
EXPORT_SYMBOL(ioremap_prot);
-/* Map a PCI MMIO bus address into VA space. */
-void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
-{
- panic("ioremap for PCI MMIO is not supported");
-}
-EXPORT_SYMBOL(ioremap);
-
/* Unmap an MMIO VA mapping. */
void iounmap(volatile void __iomem *addr_in)
{
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index 88e466b159dc..43b39d61b538 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -705,7 +705,6 @@ static void stack_proc(void *arg)
struct task_struct *from = current, *to = arg;
to->thread.saved_task = from;
- rcu_switch_from(from);
switch_to(from, to, from);
}
diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c
index 0d60c5685c26..458d324f062d 100644
--- a/arch/um/drivers/net_kern.c
+++ b/arch/um/drivers/net_kern.c
@@ -339,7 +339,7 @@ static int setup_etheraddr(char *str, unsigned char *addr, char *name)
random:
printk(KERN_INFO
"Choosing a random ethernet address for device %s\n", name);
- random_ether_addr(addr);
+ eth_random_addr(addr);
return 1;
}
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index 03c9ff808b5a..b0a47433341e 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -238,7 +238,6 @@ config I2C_BATTERY_BQ27200
config I2C_EEPROM_AT24
tristate "I2C EEPROMs AT24 support"
select I2C_PUV3
- select MISC_DEVICES
select EEPROM_AT24
config LCD_BACKLIGHT
diff --git a/arch/unicore32/kernel/pci.c b/arch/unicore32/kernel/pci.c
index 2fc2b1ba825e..46cb6c9de6c9 100644
--- a/arch/unicore32/kernel/pci.c
+++ b/arch/unicore32/kernel/pci.c
@@ -296,7 +296,7 @@ static int __init pci_common_init(void)
}
subsys_initcall(pci_common_init);
-char * __devinit pcibios_setup(char *str)
+char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "debug")) {
debug_pci = 1;
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1f2521434554..b0c5276861ec 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -49,6 +49,9 @@ else
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
+ # Use -mpreferred-stack-boundary=3 if supported.
+ KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index daeca56211e3..673ac9b63d6b 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -38,7 +38,7 @@
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err = 0;
- bool ia32 = is_ia32_task();
+ bool ia32 = test_thread_flag(TIF_IA32);
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 49331bedc158..70780689599a 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -75,23 +75,54 @@ static inline int alternatives_text_reserved(void *start, void *end)
}
#endif /* CONFIG_SMP */
+#define OLDINSTR(oldinstr) "661:\n\t" oldinstr "\n662:\n"
+
+#define b_replacement(number) "663"#number
+#define e_replacement(number) "664"#number
+
+#define alt_slen "662b-661b"
+#define alt_rlen(number) e_replacement(number)"f-"b_replacement(number)"f"
+
+#define ALTINSTR_ENTRY(feature, number) \
+ " .long 661b - .\n" /* label */ \
+ " .long " b_replacement(number)"f - .\n" /* new instruction */ \
+ " .word " __stringify(feature) "\n" /* feature bit */ \
+ " .byte " alt_slen "\n" /* source len */ \
+ " .byte " alt_rlen(number) "\n" /* replacement len */
+
+#define DISCARD_ENTRY(number) /* rlen <= slen */ \
+ " .byte 0xff + (" alt_rlen(number) ") - (" alt_slen ")\n"
+
+#define ALTINSTR_REPLACEMENT(newinstr, feature, number) /* replacement */ \
+ b_replacement(number)":\n\t" newinstr "\n" e_replacement(number) ":\n\t"
+
/* alternative assembly primitive: */
#define ALTERNATIVE(oldinstr, newinstr, feature) \
- \
- "661:\n\t" oldinstr "\n662:\n" \
- ".section .altinstructions,\"a\"\n" \
- " .long 661b - .\n" /* label */ \
- " .long 663f - .\n" /* new instruction */ \
- " .word " __stringify(feature) "\n" /* feature bit */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .discard,\"aw\",@progbits\n" \
- " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
- ".previous\n" \
- ".section .altinstr_replacement, \"ax\"\n" \
- "663:\n\t" newinstr "\n664:\n" /* replacement */ \
- ".previous"
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature, 1) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
+ ".previous"
+
+#define ALTERNATIVE_2(oldinstr, newinstr1, feature1, newinstr2, feature2)\
+ OLDINSTR(oldinstr) \
+ ".section .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(feature1, 1) \
+ ALTINSTR_ENTRY(feature2, 2) \
+ ".previous\n" \
+ ".section .discard,\"aw\",@progbits\n" \
+ DISCARD_ENTRY(1) \
+ DISCARD_ENTRY(2) \
+ ".previous\n" \
+ ".section .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
+ ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
+ ".previous"
/*
* This must be included *after* the definition of ALTERNATIVE due to
@@ -140,6 +171,19 @@ static inline int alternatives_text_reserved(void *start, void *end)
: output : [old] "i" (oldfunc), [new] "i" (newfunc), ## input)
/*
+ * Like alternative_call, but there are two features and respective functions.
+ * If CPU has feature2, function2 is used.
+ * Otherwise, if CPU has feature1, function1 is used.
+ * Otherwise, old function is used.
+ */
+#define alternative_call_2(oldfunc, newfunc1, feature1, newfunc2, feature2, \
+ output, input...) \
+ asm volatile (ALTERNATIVE_2("call %P[old]", "call %P[new1]", feature1,\
+ "call %P[new2]", feature2) \
+ : output : [old] "i" (oldfunc), [new1] "i" (newfunc1), \
+ [new2] "i" (newfunc2), ## input)
+
+/*
* use this macro(s) if you need more than one output parameter
* in alternative_io
*/
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index 49ad773f4b9f..b3341e9cd8fd 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -26,10 +26,31 @@ struct amd_l3_cache {
u8 subcaches[4];
};
+struct threshold_block {
+ unsigned int block;
+ unsigned int bank;
+ unsigned int cpu;
+ u32 address;
+ u16 interrupt_enable;
+ bool interrupt_capable;
+ u16 threshold_limit;
+ struct kobject kobj;
+ struct list_head miscj;
+};
+
+struct threshold_bank {
+ struct kobject *kobj;
+ struct threshold_block *blocks;
+
+ /* initialized to the number of CPUs on the node sharing this bank */
+ atomic_t cpus;
+};
+
struct amd_northbridge {
struct pci_dev *misc;
struct pci_dev *link;
struct amd_l3_cache l3_cache;
+ struct threshold_bank *bank4;
};
struct amd_northbridge_info {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index eaff4790ed96..3ea51a84a0e4 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,7 +306,8 @@ struct apic {
unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid);
unsigned long (*check_apicid_present)(int apicid);
- void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+ void (*vector_allocation_domain)(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask);
void (*init_apic_ldr)(void);
void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap);
@@ -331,9 +332,9 @@ struct apic {
unsigned long (*set_apic_id)(unsigned int id);
unsigned long apic_id_mask;
- unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
- unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
- const struct cpumask *andmask);
+ int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid);
/* ipi */
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
@@ -464,6 +465,8 @@ static inline u32 safe_apic_wait_icr_idle(void)
return apic->safe_wait_icr_idle();
}
+extern void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v));
+
#else /* CONFIG_X86_LOCAL_APIC */
static inline u32 apic_read(u32 reg) { return 0; }
@@ -473,6 +476,7 @@ static inline u64 apic_icr_read(void) { return 0; }
static inline void apic_icr_write(u32 low, u32 high) { }
static inline void apic_wait_icr_idle(void) { }
static inline u32 safe_apic_wait_icr_idle(void) { return 0; }
+static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
#endif /* CONFIG_X86_LOCAL_APIC */
@@ -537,6 +541,11 @@ static inline const struct cpumask *default_target_cpus(void)
#endif
}
+static inline const struct cpumask *online_target_cpus(void)
+{
+ return cpu_online_mask;
+}
+
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
@@ -586,21 +595,50 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
#endif
-static inline unsigned int
-default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+static inline int
+flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
+ unsigned long cpu_mask = cpumask_bits(cpumask)[0] &
+ cpumask_bits(andmask)[0] &
+ cpumask_bits(cpu_online_mask)[0] &
+ APIC_ALL_CPUS;
+
+ if (likely(cpu_mask)) {
+ *apicid = (unsigned int)cpu_mask;
+ return 0;
+ } else {
+ return -EINVAL;
+ }
}
-static inline unsigned int
+extern int
default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+ const struct cpumask *andmask,
+ unsigned int *apicid);
+
+static inline void
+flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
{
- unsigned long mask1 = cpumask_bits(cpumask)[0];
- unsigned long mask2 = cpumask_bits(andmask)[0];
- unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+ /* Careful. Some cpus do not strictly honor the set of cpus
+ * specified in the interrupt destination when using lowest
+ * priority interrupt delivery mode.
+ *
+ * In particular there was a hyperthreading cpu observed to
+ * deliver interrupts to the wrong hyperthread when only one
+ * hyperthread was specified in the interrupt desitination.
+ */
+ cpumask_clear(retmask);
+ cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
+}
- return (unsigned int)(mask1 & mask2 & mask3);
+static inline void
+default_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ cpumask_copy(retmask, cpumask_of(cpu));
}
static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index a6983b277220..72f5009deb5a 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -264,6 +264,13 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is non-atomic and can be reordered.
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
+ *
+ * Note: the operation is performed atomically with respect to
+ * the local CPU, but not other CPUs. Portable code should not
+ * rely on this behaviour.
+ * KVM relies on this behaviour on x86 for modifying memory that is also
+ * accessed from a hypervisor on the same CPU if running in a VM: don't change
+ * this without also updating arch/x86/kernel/kvm.c
*/
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 340ee49961a6..f91e80f4f180 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -176,7 +176,7 @@
#define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */
#define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */
#define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTS (7*32+ 7) /* Digital Thermal Sensor */
+#define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */
#define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */
/* Virtualization flags: Linux defined, word 8 */
diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
index cc70c1c78ca4..75ce3f47d204 100644
--- a/arch/x86/include/asm/emergency-restart.h
+++ b/arch/x86/include/asm/emergency-restart.h
@@ -4,9 +4,7 @@
enum reboot_type {
BOOT_TRIPLE = 't',
BOOT_KBD = 'k',
-#ifdef CONFIG_X86_32
BOOT_BIOS = 'b',
-#endif
BOOT_ACPI = 'a',
BOOT_EFI = 'e',
BOOT_CF9 = 'p',
diff --git a/arch/x86/include/asm/floppy.h b/arch/x86/include/asm/floppy.h
index dbe82a5c5eac..d3d74698dce9 100644
--- a/arch/x86/include/asm/floppy.h
+++ b/arch/x86/include/asm/floppy.h
@@ -99,7 +99,7 @@ static irqreturn_t floppy_hardint(int irq, void *dev_id)
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
- printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
+ printk(KERN_DEBUG "count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 7a15153c675d..b518c7509933 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -49,6 +49,7 @@ extern const struct hypervisor_x86 *x86_hyper;
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_kvm;
static inline bool hypervisor_x2apic_available(void)
{
diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
index dffc38ee6255..345c99cef152 100644
--- a/arch/x86/include/asm/iommu.h
+++ b/arch/x86/include/asm/iommu.h
@@ -5,7 +5,6 @@ extern struct dma_map_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
extern int iommu_pass_through;
-extern int iommu_group_mf;
/* 10 seconds */
#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000)
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h
index e7d1c194d272..246617efd67f 100644
--- a/arch/x86/include/asm/kvm.h
+++ b/arch/x86/include/asm/kvm.h
@@ -12,6 +12,7 @@
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
#define __KVM_HAVE_IOAPIC
+#define __KVM_HAVE_IRQ_LINE
#define __KVM_HAVE_DEVICE_ASSIGNMENT
#define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 1ac46c22dd50..c764f43b71c5 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -192,8 +192,8 @@ struct x86_emulate_ops {
struct x86_instruction_info *info,
enum x86_intercept_stage stage);
- bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
- u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
+ void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
+ u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -280,9 +280,9 @@ struct x86_emulate_ctxt {
u8 modrm_seg;
bool rip_relative;
unsigned long _eip;
+ struct operand memop;
/* Fields above regs are cleared together. */
unsigned long regs[NR_VCPU_REGS];
- struct operand memop;
struct operand *memopp;
struct fetch_cache fetch;
struct read_cache io_read;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index db7c1f2709a2..09155d64cf7e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -48,12 +48,13 @@
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
+#define CR3_PCID_ENABLED_RESERVED_BITS 0xFFFFFF0000000000ULL
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)
#define CR4_RESERVED_BITS \
(~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
| X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
- | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
| X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_RDWRGSFS \
| X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
@@ -175,6 +176,13 @@ enum {
/* apic attention bits */
#define KVM_APIC_CHECK_VAPIC 0
+/*
+ * The following bit is set with PV-EOI, unset on EOI.
+ * We detect PV-EOI changes by guest by comparing
+ * this bit with PV-EOI in guest memory.
+ * See the implementation in apic_update_pv_eoi.
+ */
+#define KVM_APIC_PV_EOI_PENDING 1
/*
* We don't want allocation failures within the mmu code, so we preallocate
@@ -313,8 +321,8 @@ struct kvm_pmu {
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
- struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
- struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
+ struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
+ struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};
@@ -484,6 +492,11 @@ struct kvm_vcpu_arch {
u64 length;
u64 status;
} osvw;
+
+ struct {
+ u64 msr_val;
+ struct gfn_to_hva_cache data;
+ } pv_eoi;
};
struct kvm_lpage_info {
@@ -661,6 +674,7 @@ struct kvm_x86_ops {
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
int (*get_lpage_level)(void);
bool (*rdtscp_supported)(void);
+ bool (*invpcid_supported)(void);
void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
@@ -802,7 +816,20 @@ int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
-int kvm_pic_set_irq(void *opaque, int irq, int level);
+static inline int __kvm_irq_line_state(unsigned long *irq_state,
+ int irq_source_id, int level)
+{
+ /* Logical OR for level trig interrupt */
+ if (level)
+ __set_bit(irq_source_id, irq_state);
+ else
+ __clear_bit(irq_source_id, irq_state);
+
+ return !!(*irq_state);
+}
+
+int kvm_pic_set_irq(struct kvm_pic *pic, int irq, int irq_source_id, int level);
+void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 63ab1661d00e..2f7712e08b1e 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -22,6 +22,7 @@
#define KVM_FEATURE_CLOCKSOURCE2 3
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
+#define KVM_FEATURE_PV_EOI 6
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
@@ -37,6 +38,7 @@
#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
#define MSR_KVM_STEAL_TIME 0x4b564d03
+#define MSR_KVM_PV_EOI_EN 0x4b564d04
struct kvm_steal_time {
__u64 steal;
@@ -89,6 +91,11 @@ struct kvm_vcpu_pv_apf_data {
__u32 enabled;
};
+#define KVM_PV_EOI_BIT 0
+#define KVM_PV_EOI_MASK (0x1 << KVM_PV_EOI_BIT)
+#define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
+#define KVM_PV_EOI_DISABLED 0x0
+
#ifdef __KERNEL__
#include <asm/processor.h>
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 084ef95274cd..813ed103f45e 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -115,8 +115,8 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
extern unsigned long long native_read_tsc(void);
-extern int native_rdmsr_safe_regs(u32 regs[8]);
-extern int native_wrmsr_safe_regs(u32 regs[8]);
+extern int rdmsr_safe_regs(u32 regs[8]);
+extern int wrmsr_safe_regs(u32 regs[8]);
static __always_inline unsigned long long __native_read_tsc(void)
{
@@ -187,43 +187,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = native_rdmsr_safe_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return native_wrmsr_safe_regs(gprs);
-}
-
-static inline int rdmsr_safe_regs(u32 regs[8])
-{
- return native_rdmsr_safe_regs(regs);
-}
-
-static inline int wrmsr_safe_regs(u32 regs[8])
-{
- return native_wrmsr_safe_regs(regs);
-}
-
#define rdtscl(low) \
((low) = (u32)__native_read_tsc())
@@ -237,6 +200,8 @@ do { \
(high) = (u32)(_l >> 32); \
} while (0)
+#define rdpmcl(counter, val) ((val) = native_read_pmc(counter))
+
#define rdtscp(low, high, aux) \
do { \
unsigned long long _val = native_read_tscp(&(aux)); \
@@ -248,8 +213,7 @@ do { \
#endif /* !CONFIG_PARAVIRT */
-
-#define checking_wrmsrl(msr, val) wrmsr_safe((msr), (u32)(val), \
+#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
(u32)((val) >> 32))
#define write_tsc(val1, val2) wrmsr(MSR_IA32_TSC, (val1), (val2))
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index dc580c42851c..c0fa356e90de 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -44,28 +44,14 @@ struct nmiaction {
const char *name;
};
-#define register_nmi_handler(t, fn, fg, n) \
+#define register_nmi_handler(t, fn, fg, n, init...) \
({ \
- static struct nmiaction fn##_na = { \
+ static struct nmiaction init fn##_na = { \
.handler = (fn), \
.name = (n), \
.flags = (fg), \
}; \
- __register_nmi_handler((t), &fn##_na); \
-})
-
-/*
- * For special handlers that register/unregister in the
- * init section only. This should be considered rare.
- */
-#define register_nmi_handler_initonly(t, fn, fg, n) \
-({ \
- static struct nmiaction fn##_na __initdata = { \
- .handler = (fn), \
- .name = (n), \
- .flags = (fg), \
- }; \
- __register_nmi_handler((t), &fn##_na); \
+ __register_nmi_handler((t), &fn##_na); \
})
int __register_nmi_handler(unsigned int, struct nmiaction *);
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 6cbbabf52707..0b47ddb6f00b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -128,21 +128,11 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
}
-static inline int paravirt_rdmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.rdmsr_regs, regs);
-}
-
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
}
-static inline int paravirt_wrmsr_regs(u32 *regs)
-{
- return PVOP_CALL1(int, pv_cpu_ops.wrmsr_regs, regs);
-}
-
/* These should all do BUG_ON(_err), but our headers are too tangled. */
#define rdmsr(msr, val1, val2) \
do { \
@@ -176,9 +166,6 @@ do { \
_err; \
})
-#define rdmsr_safe_regs(regs) paravirt_rdmsr_regs(regs)
-#define wrmsr_safe_regs(regs) paravirt_wrmsr_regs(regs)
-
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
int err;
@@ -186,32 +173,6 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr(msr, &err);
return err;
}
-static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
-{
- u32 gprs[8] = { 0 };
- int err;
-
- gprs[1] = msr;
- gprs[7] = 0x9c5a203a;
-
- err = paravirt_rdmsr_regs(gprs);
-
- *p = gprs[0] | ((u64)gprs[2] << 32);
-
- return err;
-}
-
-static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
-{
- u32 gprs[8] = { 0 };
-
- gprs[0] = (u32)val;
- gprs[1] = msr;
- gprs[2] = val >> 32;
- gprs[7] = 0x9c5a203a;
-
- return paravirt_wrmsr_regs(gprs);
-}
static inline u64 paravirt_read_tsc(void)
{
@@ -252,6 +213,8 @@ do { \
high = _l >> 32; \
} while (0)
+#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
+
static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
{
return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 8e8b9a4987ee..8613cbb7ba41 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -153,9 +153,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64 (*read_msr)(unsigned int msr, int *err);
- int (*rdmsr_regs)(u32 *regs);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
- int (*wrmsr_regs)(u32 *regs);
u64 (*read_tsc)(void);
u64 (*read_pmc)(int counter);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b3a531746026..73e8eeff22ee 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -7,9 +7,13 @@
#undef DEBUG
#ifdef DEBUG
-#define DBG(x...) printk(x)
+#define DBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define DBG(x...)
+#define DBG(fmt, ...) \
+do { \
+ if (0) \
+ printk(fmt, ##__VA_ARGS__); \
+} while (0)
#endif
#define PCI_PROBE_BIOS 0x0001
@@ -100,6 +104,7 @@ struct pci_raw_ops {
extern const struct pci_raw_ops *raw_pci_ops;
extern const struct pci_raw_ops *raw_pci_ext_ops;
+extern const struct pci_raw_ops pci_mmcfg;
extern const struct pci_raw_ops pci_direct_conf1;
extern bool port_cf9_safe;
@@ -135,6 +140,12 @@ struct pci_mmcfg_region {
extern int __init pci_mmcfg_arch_init(void);
extern void __init pci_mmcfg_arch_free(void);
+extern int __devinit pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg);
+extern void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg);
+extern int __devinit pci_mmconfig_insert(struct device *dev,
+ u16 seg, u8 start,
+ u8 end, phys_addr_t addr);
+extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end);
extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
extern struct list_head pci_mmcfg_list;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 588f52ea810e..c78f14a0df00 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -5,11 +5,10 @@
* Performance event hw details:
*/
-#define X86_PMC_MAX_GENERIC 32
-#define X86_PMC_MAX_FIXED 3
+#define INTEL_PMC_MAX_GENERIC 32
+#define INTEL_PMC_MAX_FIXED 3
+#define INTEL_PMC_IDX_FIXED 32
-#define X86_PMC_IDX_GENERIC 0
-#define X86_PMC_IDX_FIXED 32
#define X86_PMC_IDX_MAX 64
#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
@@ -48,8 +47,7 @@
(X86_RAW_EVENT_MASK | \
AMD64_EVENTSEL_EVENT)
#define AMD64_NUM_COUNTERS 4
-#define AMD64_NUM_COUNTERS_F15H 6
-#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
+#define AMD64_NUM_COUNTERS_CORE 6
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
@@ -121,16 +119,16 @@ struct x86_pmu_capability {
/* Instr_Retired.Any: */
#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
-#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
+#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS (INTEL_PMC_IDX_FIXED + 0)
/* CPU_CLK_Unhalted.Core: */
#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
-#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
+#define INTEL_PMC_IDX_FIXED_CPU_CYCLES (INTEL_PMC_IDX_FIXED + 1)
/* CPU_CLK_Unhalted.Ref: */
#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
-#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
-#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
+#define INTEL_PMC_IDX_FIXED_REF_CYCLES (INTEL_PMC_IDX_FIXED + 2)
+#define INTEL_PMC_MSK_FIXED_REF_CYCLES (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
/*
* We model BTS tracing as another fixed-mode PMC.
@@ -139,7 +137,7 @@ struct x86_pmu_capability {
* values are used by actual fixed events and higher values are used
* to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
*/
-#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
+#define INTEL_PMC_IDX_FIXED_BTS (INTEL_PMC_IDX_FIXED + 16)
/*
* IBS cpuid feature detection
@@ -234,6 +232,7 @@ struct perf_guest_switch_msr {
extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
+extern void perf_check_microcode(void);
#else
static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
{
@@ -247,6 +246,7 @@ static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
}
static inline void perf_events_lapic_init(void) { }
+static inline void perf_check_microcode(void) { }
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 98391db840c6..f2b489cf1602 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -2,9 +2,9 @@
#define _ASM_X86_PGTABLE_2LEVEL_H
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
+ pr_err("%s:%d: bad pte %08lx\n", __FILE__, __LINE__, (e).pte_low)
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+ pr_err("%s:%d: bad pgd %08lx\n", __FILE__, __LINE__, pgd_val(e))
/*
* Certain architectures need to do special things when PTEs
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index cb00ccc7d571..4cc9f2b7cdc3 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -9,13 +9,13 @@
*/
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%08lx%08lx).\n", \
+ pr_err("%s:%d: bad pte %p(%08lx%08lx)\n", \
__FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016Lx).\n", \
+ pr_err("%s:%d: bad pmd %p(%016Lx)\n", \
__FILE__, __LINE__, &(e), pmd_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016Lx).\n", \
+ pr_err("%s:%d: bad pgd %p(%016Lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
/* Rules for using set_pte: the pte being assigned *must* be
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 975f709e09ae..8251be02301e 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -26,16 +26,16 @@ extern pgd_t init_level4_pgt[];
extern void paging_init(void);
#define pte_ERROR(e) \
- printk("%s:%d: bad pte %p(%016lx).\n", \
+ pr_err("%s:%d: bad pte %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pte_val(e))
#define pmd_ERROR(e) \
- printk("%s:%d: bad pmd %p(%016lx).\n", \
+ pr_err("%s:%d: bad pmd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pmd_val(e))
#define pud_ERROR(e) \
- printk("%s:%d: bad pud %p(%016lx).\n", \
+ pr_err("%s:%d: bad pud %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pud_val(e))
#define pgd_ERROR(e) \
- printk("%s:%d: bad pgd %p(%016lx).\n", \
+ pr_err("%s:%d: bad pgd %p(%016lx)\n", \
__FILE__, __LINE__, &(e), pgd_val(e))
struct mm_struct;
diff --git a/arch/x86/include/asm/processor-flags.h b/arch/x86/include/asm/processor-flags.h
index f8ab3eaad128..aea1d1d848c7 100644
--- a/arch/x86/include/asm/processor-flags.h
+++ b/arch/x86/include/asm/processor-flags.h
@@ -44,6 +44,7 @@
*/
#define X86_CR3_PWT 0x00000008 /* Page Write Through */
#define X86_CR3_PCD 0x00000010 /* Page Cache Disable */
+#define X86_CR3_PCID_MASK 0x00000fff /* PCID Mask */
/*
* Intel CPU features in CR4
@@ -61,6 +62,7 @@
#define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */
#define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */
#define X86_CR4_RDWRGSFS 0x00010000 /* enable RDWRGSFS support */
+#define X86_CR4_PCIDE 0x00020000 /* enable PCID support */
#define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */
#define X86_CR4_SMEP 0x00100000 /* enable SMEP support */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
index fce3f4ae5bd6..fe1ec5bcd846 100644
--- a/arch/x86/include/asm/realmode.h
+++ b/arch/x86/include/asm/realmode.h
@@ -21,8 +21,9 @@ struct real_mode_header {
u32 wakeup_header;
#endif
/* APM/BIOS reboot */
-#ifdef CONFIG_X86_32
u32 machine_real_restart_asm;
+#ifdef CONFIG_X86_64
+ u32 machine_real_restart_seg;
#endif
};
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
index 92f297069e87..a82c4f1b4d83 100644
--- a/arch/x86/include/asm/reboot.h
+++ b/arch/x86/include/asm/reboot.h
@@ -18,8 +18,8 @@ extern struct machine_ops machine_ops;
void native_machine_crash_shutdown(struct pt_regs *regs);
void native_machine_shutdown(void);
-void machine_real_restart(unsigned int type);
-/* These must match dispatch_table in reboot_32.S */
+void __noreturn machine_real_restart(unsigned int type);
+/* These must match dispatch in arch/x86/realmore/rm/reboot.S */
#define MRR_BIOS 0
#define MRR_APM 1
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index f48394513c37..2ffa95dc2333 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -169,11 +169,6 @@ void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
void smp_store_cpu_info(int id);
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
-/* We don't mark CPUs online until __cpu_up(), so we need another measure */
-static inline int num_booting_cpus(void)
-{
- return cpumask_weight(cpu_callout_mask);
-}
#else /* !CONFIG_SMP */
#define wbinvd_on_cpu(cpu) wbinvd()
static inline int wbinvd_on_all_cpus(void)
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 8e796fbbf9c6..d8def8b3dba0 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
/* Handles exceptions in both to and from, but doesn't do access_ok */
__must_check unsigned long
+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
+__must_check unsigned long
copy_user_generic_string(void *to, const void *from, unsigned len);
__must_check unsigned long
copy_user_generic_unrolled(void *to, const void *from, unsigned len);
@@ -26,9 +28,16 @@ copy_user_generic(void *to, const void *from, unsigned len)
{
unsigned ret;
- alternative_call(copy_user_generic_unrolled,
+ /*
+ * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
+ * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
+ * Otherwise, use copy_user_generic_unrolled.
+ */
+ alternative_call_2(copy_user_generic_unrolled,
copy_user_generic_string,
X86_FEATURE_REP_GOOD,
+ copy_user_enhanced_fast_string,
+ X86_FEATURE_ERMS,
ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
"=d" (len)),
"1" (to), "2" (from), "3" (len)
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 1e9bed14f7ae..f3971bbcd1de 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -48,7 +48,7 @@ struct arch_uprobe_task {
#endif
};
-extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm);
+extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr);
extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs);
extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index 6149b476d9df..a06983cdc125 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -140,6 +140,9 @@
#define IPI_RESET_LIMIT 1
/* after this # consecutive successes, bump up the throttle if it was lowered */
#define COMPLETE_THRESHOLD 5
+/* after this # of giveups (fall back to kernel IPI's) disable the use of
+ the BAU for a period of time */
+#define GIVEUP_LIMIT 100
#define UV_LB_SUBNODEID 0x10
@@ -166,7 +169,6 @@
#define FLUSH_RETRY_TIMEOUT 2
#define FLUSH_GIVEUP 3
#define FLUSH_COMPLETE 4
-#define FLUSH_RETRY_BUSYBUG 5
/*
* tuning the action when the numalink network is extremely delayed
@@ -175,7 +177,7 @@
microseconds */
#define CONGESTED_REPS 10 /* long delays averaged over
this many broadcasts */
-#define CONGESTED_PERIOD 30 /* time for the bau to be
+#define DISABLED_PERIOD 10 /* time for the bau to be
disabled, in seconds */
/* see msg_type: */
#define MSG_NOOP 0
@@ -520,6 +522,12 @@ struct ptc_stats {
unsigned long s_uv2_wars; /* uv2 workaround, perm. busy */
unsigned long s_uv2_wars_hw; /* uv2 workaround, hiwater */
unsigned long s_uv2_war_waits; /* uv2 workaround, long waits */
+ unsigned long s_overipilimit; /* over the ipi reset limit */
+ unsigned long s_giveuplimit; /* disables, over giveup limit*/
+ unsigned long s_enters; /* entries to the driver */
+ unsigned long s_ipifordisabled; /* fall back to IPI; disabled */
+ unsigned long s_plugged; /* plugged by h/w bug*/
+ unsigned long s_congested; /* giveup on long wait */
/* destination statistics */
unsigned long d_alltlb; /* times all tlb's on this
cpu were flushed */
@@ -586,8 +594,8 @@ struct bau_control {
int timeout_tries;
int ipi_attempts;
int conseccompletes;
- int baudisabled;
- int set_bau_off;
+ short nobau;
+ short baudisabled;
short cpu;
short osnode;
short uvhub_cpu;
@@ -596,14 +604,16 @@ struct bau_control {
short cpus_in_socket;
short cpus_in_uvhub;
short partition_base_pnode;
- short using_desc; /* an index, like uvhub_cpu */
- unsigned int inuse_map;
+ short busy; /* all were busy (war) */
unsigned short message_number;
unsigned short uvhub_quiesce;
short socket_acknowledge_count[DEST_Q_SIZE];
cycles_t send_message;
+ cycles_t period_end;
+ cycles_t period_time;
spinlock_t uvhub_lock;
spinlock_t queue_lock;
+ spinlock_t disable_lock;
/* tunables */
int max_concurr;
int max_concurr_const;
@@ -614,9 +624,9 @@ struct bau_control {
int complete_threshold;
int cong_response_us;
int cong_reps;
- int cong_period;
- unsigned long clocks_per_100_usec;
- cycles_t period_time;
+ cycles_t disabled_period;
+ int period_giveups;
+ int giveup_limit;
long period_requests;
struct hub_and_pnode *thp;
};
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 31f180c21ce9..74fcb963595b 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -60,6 +60,7 @@
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
#define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
+#define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
#define PIN_BASED_EXT_INTR_MASK 0x00000001
@@ -281,6 +282,7 @@ enum vmcs_field {
#define EXIT_REASON_EPT_MISCONFIG 49
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
+#define EXIT_REASON_INVPCID 58
/*
* Interruption-information format
@@ -404,6 +406,7 @@ enum vmcs_field {
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
+#define VMX_EPT_AD_BIT (1ull << 21)
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
@@ -415,11 +418,14 @@ enum vmcs_field {
#define VMX_EPT_MAX_GAW 0x4
#define VMX_EPT_MT_EPTE_SHIFT 3
#define VMX_EPT_GAW_EPTP_SHIFT 3
+#define VMX_EPT_AD_ENABLE_BIT (1ull << 6)
#define VMX_EPT_DEFAULT_MT 0x6ull
#define VMX_EPT_READABLE_MASK 0x1ull
#define VMX_EPT_WRITABLE_MASK 0x2ull
#define VMX_EPT_EXECUTABLE_MASK 0x4ull
#define VMX_EPT_IPAT_BIT (1ull << 6)
+#define VMX_EPT_ACCESS_BIT (1ull << 8)
+#define VMX_EPT_DIRTY_BIT (1ull << 9)
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
diff --git a/arch/x86/include/asm/x2apic.h b/arch/x86/include/asm/x2apic.h
index 92e54abf89e0..f90f0a587c66 100644
--- a/arch/x86/include/asm/x2apic.h
+++ b/arch/x86/include/asm/x2apic.h
@@ -9,15 +9,6 @@
#include <asm/ipi.h>
#include <linux/cpumask.h>
-/*
- * Need to use more than cpu 0, because we need more vectors
- * when MSI-X are used.
- */
-static const struct cpumask *x2apic_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
static int x2apic_apic_id_valid(int apicid)
{
return 1;
@@ -28,15 +19,6 @@ static int x2apic_apic_id_registered(void)
return 1;
}
-/*
- * For now each logical cpu is in its own vector allocation domain.
- */
-static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static void
__x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
{
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index c090af10ac7d..38155f667144 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -156,7 +156,6 @@ struct x86_cpuinit_ops {
/**
* struct x86_platform_ops - platform specific runtime functions
* @calibrate_tsc: calibrate TSC
- * @wallclock_init: init the wallclock device
* @get_wallclock: get time from HW clock like RTC etc.
* @set_wallclock: set time back to HW clock
* @is_untracked_pat_range exclude from PAT logic
@@ -164,10 +163,10 @@ struct x86_cpuinit_ops {
* @i8042_detect pre-detect if i8042 controller exists
* @save_sched_clock_state: save state for sched_clock() on suspend
* @restore_sched_clock_state: restore state for sched_clock() on resume
+ * @apic_post_init: adjust apic if neeeded
*/
struct x86_platform_ops {
unsigned long (*calibrate_tsc)(void);
- void (*wallclock_init)(void);
unsigned long (*get_wallclock)(void);
int (*set_wallclock)(unsigned long nowtime);
void (*iommu_shutdown)(void);
@@ -177,6 +176,7 @@ struct x86_platform_ops {
int (*i8042_detect)(void);
void (*save_sched_clock_state)(void);
void (*restore_sched_clock_state)(void);
+ void (*apic_post_init)(void);
};
struct pci_dev;
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 5728852fb90f..59c226d120cd 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -48,6 +48,7 @@
#include <xen/interface/sched.h>
#include <xen/interface/physdev.h>
#include <xen/interface/platform.h>
+#include <xen/interface/xen-mca.h>
/*
* The hypercall asms have to meet several constraints:
@@ -302,6 +303,13 @@ HYPERVISOR_set_timer_op(u64 timeout)
}
static inline int
+HYPERVISOR_mca(struct xen_mc *mc_op)
+{
+ mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
+ return _hypercall1(int, mca, mc_op);
+}
+
+static inline int
HYPERVISOR_dom0_op(struct xen_platform_op *platform_op)
{
platform_op->interface_version = XENPF_INTERFACE_VERSION;
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 8afb69319815..b2297e58c6ed 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -422,12 +422,14 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
- if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
+ if (intsrc->source_irq == 0) {
if (acpi_skip_timer_override) {
- printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
+ printk(PREFIX "BIOS IRQ0 override ignored.\n");
return 0;
}
- if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
+
+ if ((intsrc->global_irq == 2) && acpi_fix_pin2_polarity
+ && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
@@ -1334,17 +1336,12 @@ static int __init dmi_disable_acpi(const struct dmi_system_id *d)
}
/*
- * Force ignoring BIOS IRQ0 pin2 override
+ * Force ignoring BIOS IRQ0 override
*/
static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
{
- /*
- * The ati_ixp4x0_rev() early PCI quirk should have set
- * the acpi_skip_timer_override flag already:
- */
if (!acpi_skip_timer_override) {
- WARN(1, KERN_ERR "ati_ixp4x0 quirk not complete.\n");
- pr_notice("%s detected: Ignoring BIOS IRQ0 pin2 override\n",
+ pr_notice("%s detected: Ignoring BIOS IRQ0 override\n",
d->ident);
acpi_skip_timer_override = 1;
}
@@ -1438,7 +1435,7 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
* is enabled. This input is incorrectly designated the
* ISA IRQ 0 via an interrupt source override even though
* it is wired to the output of the master 8259A and INTIN0
- * is not connected at all. Force ignoring BIOS IRQ0 pin2
+ * is not connected at all. Force ignoring BIOS IRQ0
* override in that cases.
*/
{
@@ -1473,6 +1470,14 @@ static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
},
},
+ {
+ .callback = dmi_ignore_irq0_timer_override,
+ .ident = "FUJITSU SIEMENS",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
+ },
+ },
{}
};
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 1f84794f0759..931280ff8299 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "SMP alternatives: " fmt
+
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mutex.h>
@@ -63,8 +65,11 @@ static int __init setup_noreplace_paravirt(char *str)
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif
-#define DPRINTK(fmt, args...) if (debug_alternative) \
- printk(KERN_DEBUG fmt, args)
+#define DPRINTK(fmt, ...) \
+do { \
+ if (debug_alternative) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
/*
* Each GENERIC_NOPX is of X bytes, and defined as an array of bytes
@@ -428,7 +433,7 @@ void alternatives_smp_switch(int smp)
* If this still occurs then you should see a hang
* or crash shortly after this line:
*/
- printk("lockdep: fixing up alternatives.\n");
+ pr_info("lockdep: fixing up alternatives\n");
#endif
if (noreplace_smp || smp_alt_once || skip_smp_alternatives)
@@ -444,14 +449,14 @@ void alternatives_smp_switch(int smp)
if (smp == smp_mode) {
/* nothing */
} else if (smp) {
- printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
+ pr_info("switching to SMP code\n");
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
list_for_each_entry(mod, &smp_alt_modules, next)
alternatives_smp_lock(mod->locks, mod->locks_end,
mod->text, mod->text_end);
} else {
- printk(KERN_INFO "SMP alternatives: switching to UP code\n");
+ pr_info("switching to UP code\n");
set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
list_for_each_entry(mod, &smp_alt_modules, next)
@@ -546,7 +551,7 @@ void __init alternative_instructions(void)
#ifdef CONFIG_SMP
if (smp_alt_once) {
if (1 == num_possible_cpus()) {
- printk(KERN_INFO "SMP alternatives: switching to UP code\n");
+ pr_info("switching to UP code\n");
set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
@@ -664,7 +669,7 @@ static int __kprobes stop_machine_text_poke(void *data)
struct text_poke_param *p;
int i;
- if (atomic_dec_and_test(&stop_machine_first)) {
+ if (atomic_xchg(&stop_machine_first, 0)) {
for (i = 0; i < tpp->nparams; i++) {
p = &tpp->params[i];
text_poke(p->addr, p->opcode, p->len);
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index be16854591cc..aadf3359e2a7 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -2,6 +2,9 @@
* Shared support code for AMD K8 northbridges and derivates.
* Copyright 2006 Andi Kleen, SUSE Labs. Subject to GPLv2.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -16,6 +19,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{}
};
EXPORT_SYMBOL(amd_nb_misc_ids);
@@ -258,7 +262,7 @@ void amd_flush_garts(void)
}
spin_unlock_irqrestore(&gart_lock, flags);
if (!flushed)
- printk("nothing to flush?\n");
+ pr_notice("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
@@ -269,11 +273,10 @@ static __init int init_amd_nbs(void)
err = amd_cache_northbridges();
if (err < 0)
- printk(KERN_NOTICE "AMD NB: Cannot enumerate AMD northbridges.\n");
+ pr_notice("Cannot enumerate AMD northbridges\n");
if (amd_cache_gart() < 0)
- printk(KERN_NOTICE "AMD NB: Cannot initialize GART flush words, "
- "GART support disabled.\n");
+ pr_notice("Cannot initialize GART flush words, GART support disabled\n");
return err;
}
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 39a222e094af..98e24131ff3a 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -2123,6 +2123,42 @@ void default_init_apic_ldr(void)
apic_write(APIC_LDR, val);
}
+int default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
+{
+ unsigned int cpu;
+
+ for_each_cpu_and(cpu, cpumask, andmask) {
+ if (cpumask_test_cpu(cpu, cpu_online_mask))
+ break;
+ }
+
+ if (likely(cpu < nr_cpu_ids)) {
+ *apicid = per_cpu(x86_cpu_to_apicid, cpu);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Override the generic EOI implementation with an optimized version.
+ * Only called during early boot when only one CPU is active and with
+ * interrupts disabled, so we know this does not race with actual APIC driver
+ * use.
+ */
+void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
+{
+ struct apic **drv;
+
+ for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
+ /* Should happen once for each apic */
+ WARN_ON((*drv)->eoi_write == eoi_write);
+ (*drv)->eoi_write = eoi_write;
+ }
+}
+
/*
* Power management
*/
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 0e881c46e8c8..00c77cf78e9e 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -36,25 +36,6 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 1;
}
-static const struct cpumask *flat_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
-static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
/*
* Set up the logical destination ID.
*
@@ -92,7 +73,7 @@ static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
}
static void
- flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
+flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
{
unsigned long mask = cpumask_bits(cpumask)[0];
int cpu = smp_processor_id();
@@ -186,7 +167,7 @@ static struct apic apic_flat = {
.irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */
- .target_cpus = flat_target_cpus,
+ .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
@@ -210,8 +191,7 @@ static struct apic apic_flat = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = flat_send_IPI_mask,
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
@@ -262,17 +242,6 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
return 0;
}
-static const struct cpumask *physflat_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
-static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
{
default_send_IPI_mask_sequence_phys(cpumask, vector);
@@ -294,38 +263,6 @@ static void physflat_send_IPI_all(int vector)
physflat_send_IPI_mask(cpu_online_mask, vector);
}
-static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- cpu = cpumask_first(cpumask);
- if ((unsigned)cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
- else
- return BAD_APICID;
-}
-
-static unsigned int
-physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask) {
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
- }
- return per_cpu(x86_cpu_to_apicid, cpu);
-}
-
static int physflat_probe(void)
{
if (apic == &apic_physflat || num_possible_cpus() > 8)
@@ -345,13 +282,13 @@ static struct apic apic_physflat = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = physflat_target_cpus,
+ .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
- .vector_allocation_domain = physflat_vector_allocation_domain,
+ .vector_allocation_domain = default_vector_allocation_domain,
/* not needed, but shouldn't hurt: */
.init_apic_ldr = flat_init_apic_ldr,
@@ -370,8 +307,7 @@ static struct apic apic_physflat = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFu << 24,
- .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = physflat_send_IPI_mask,
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index a6e4c6e06c08..e145f28b4099 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -100,12 +100,12 @@ static unsigned long noop_check_apicid_present(int bit)
return physid_isset(bit, phys_cpu_present_map);
}
-static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
+static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
{
if (cpu != 0)
pr_warning("APIC: Vector allocated for non-BSP cpu\n");
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
+ cpumask_copy(retmask, cpumask_of(cpu));
}
static u32 noop_apic_read(u32 reg)
@@ -159,8 +159,7 @@ struct apic apic_noop = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = noop_send_IPI_mask,
.send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 6ec6d5d297c3..bc552cff2578 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -72,17 +72,6 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
return initial_apic_id >> index_msb;
}
-static const struct cpumask *numachip_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
-static void numachip_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
union numachip_csr_g3_ext_irq_gen int_gen;
@@ -157,38 +146,6 @@ static void numachip_send_IPI_self(int vector)
__default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
}
-static unsigned int numachip_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- cpu = cpumask_first(cpumask);
- if (likely((unsigned)cpu < nr_cpu_ids))
- return per_cpu(x86_cpu_to_apicid, cpu);
-
- return BAD_APICID;
-}
-
-static unsigned int
-numachip_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask) {
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
- }
- return per_cpu(x86_cpu_to_apicid, cpu);
-}
-
static int __init numachip_probe(void)
{
return apic == &apic_numachip;
@@ -253,13 +210,13 @@ static struct apic apic_numachip __refconst = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = numachip_target_cpus,
+ .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
- .vector_allocation_domain = numachip_vector_allocation_domain,
+ .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = flat_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -277,8 +234,7 @@ static struct apic apic_numachip __refconst = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xffU << 24,
- .cpu_mask_to_apicid = numachip_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = numachip_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = numachip_send_IPI_mask,
.send_IPI_mask_allbutself = numachip_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 31fbdbfbf960..d50e3640d5ae 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -26,15 +26,6 @@ static int bigsmp_apic_id_registered(void)
return 1;
}
-static const struct cpumask *bigsmp_target_cpus(void)
-{
-#ifdef CONFIG_SMP
- return cpu_online_mask;
-#else
- return cpumask_of(0);
-#endif
-}
-
static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
{
return 0;
@@ -105,32 +96,6 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
return 1;
}
-/* As we are using single CPU as destination, pick only one CPU here */
-static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- int cpu = cpumask_first(cpumask);
-
- if (cpu < nr_cpu_ids)
- return cpu_physical_id(cpu);
- return BAD_APICID;
-}
-
-static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask) {
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- return cpu_physical_id(cpu);
- }
- return BAD_APICID;
-}
-
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
@@ -177,12 +142,6 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
{ } /* NULL entry stops DMI scanning */
};
-static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static int probe_bigsmp(void)
{
if (def_to_bigsmp)
@@ -205,13 +164,13 @@ static struct apic apic_bigsmp = {
/* phys delivery to target CPU: */
.irq_dest_mode = 0,
- .target_cpus = bigsmp_target_cpus,
+ .target_cpus = default_target_cpus,
.disable_esr = 1,
.dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used,
.check_apicid_present = bigsmp_check_apicid_present,
- .vector_allocation_domain = bigsmp_vector_allocation_domain,
+ .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr,
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
@@ -229,8 +188,7 @@ static struct apic apic_bigsmp = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = bigsmp_send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index db4ab1be3c79..0874799a98c6 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -394,21 +394,6 @@ static void es7000_enable_apic_mode(void)
WARN(1, "Command failed, status = %x\n", mip_status);
}
-static void es7000_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
-
static void es7000_wait_for_init_deassert(atomic_t *deassert)
{
while (!atomic_read(deassert))
@@ -540,45 +525,49 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
return 1;
}
-static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
+static inline int
+es7000_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
- int cpu, uninitialized_var(apicid);
+ unsigned int cpu, uninitialized_var(apicid);
/*
* The cpus in the mask must all be on the apic cluster.
*/
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
WARN(1, "Not a valid mask!");
- return BAD_APICID;
+ return -EINVAL;
}
- apicid = new_apicid;
+ apicid |= new_apicid;
round++;
}
- return apicid;
+ if (!round)
+ return -EINVAL;
+ *dest_id = apicid;
+ return 0;
}
-static unsigned int
+static int
es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask;
+ *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
+ return 0;
cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = es7000_cpu_mask_to_apicid(cpumask);
+ es7000_cpu_mask_to_apicid(cpumask, apicid);
free_cpumask_var(cpumask);
- return apicid;
+ return 0;
}
static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -638,7 +627,7 @@ static struct apic __refdata apic_es7000_cluster = {
.check_apicid_used = es7000_check_apicid_used,
.check_apicid_present = es7000_check_apicid_present,
- .vector_allocation_domain = es7000_vector_allocation_domain,
+ .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = es7000_init_apic_ldr_cluster,
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -656,7 +645,6 @@ static struct apic __refdata apic_es7000_cluster = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,
@@ -705,7 +693,7 @@ static struct apic __refdata apic_es7000 = {
.check_apicid_used = es7000_check_apicid_used,
.check_apicid_present = es7000_check_apicid_present,
- .vector_allocation_domain = es7000_vector_allocation_domain,
+ .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = es7000_init_apic_ldr,
.ioapic_phys_id_map = es7000_ioapic_phys_id_map,
@@ -723,7 +711,6 @@ static struct apic __refdata apic_es7000 = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
.send_IPI_mask = es7000_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 5f0ff597437c..406eee784684 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -448,8 +448,8 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
entry = alloc_irq_pin_list(node);
if (!entry) {
- printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
- node, apic, pin);
+ pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
+ node, apic, pin);
return -ENOMEM;
}
entry->apic = apic;
@@ -661,7 +661,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
ioapic_mask_entry(apic, pin);
entry = ioapic_read_entry(apic, pin);
if (entry.irr)
- printk(KERN_ERR "Unable to reset IRR for apic: %d, pin :%d\n",
+ pr_err("Unable to reset IRR for apic: %d, pin :%d\n",
mpc_ioapic_id(apic), pin);
}
@@ -895,7 +895,7 @@ static int irq_polarity(int idx)
}
case 2: /* reserved */
{
- printk(KERN_WARNING "broken BIOS!!\n");
+ pr_warn("broken BIOS!!\n");
polarity = 1;
break;
}
@@ -906,7 +906,7 @@ static int irq_polarity(int idx)
}
default: /* invalid */
{
- printk(KERN_WARNING "broken BIOS!!\n");
+ pr_warn("broken BIOS!!\n");
polarity = 1;
break;
}
@@ -948,7 +948,7 @@ static int irq_trigger(int idx)
}
default:
{
- printk(KERN_WARNING "broken BIOS!!\n");
+ pr_warn("broken BIOS!!\n");
trigger = 1;
break;
}
@@ -962,7 +962,7 @@ static int irq_trigger(int idx)
}
case 2: /* reserved */
{
- printk(KERN_WARNING "broken BIOS!!\n");
+ pr_warn("broken BIOS!!\n");
trigger = 1;
break;
}
@@ -973,7 +973,7 @@ static int irq_trigger(int idx)
}
default: /* invalid */
{
- printk(KERN_WARNING "broken BIOS!!\n");
+ pr_warn("broken BIOS!!\n");
trigger = 0;
break;
}
@@ -991,7 +991,7 @@ static int pin_2_irq(int idx, int apic, int pin)
* Debugging check, we are in big trouble if this message pops up!
*/
if (mp_irqs[idx].dstirq != pin)
- printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
+ pr_err("broken BIOS or MPTABLE parser, ayiee!!\n");
if (test_bit(bus, mp_bus_not_pci)) {
irq = mp_irqs[idx].srcbusirq;
@@ -1112,8 +1112,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
* 0x80, because int 0x80 is hm, kind of importantish. ;)
*/
static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
- static int current_offset = VECTOR_OFFSET_START % 8;
- unsigned int old_vector;
+ static int current_offset = VECTOR_OFFSET_START % 16;
int cpu, err;
cpumask_var_t tmp_mask;
@@ -1123,35 +1122,45 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
return -ENOMEM;
- old_vector = cfg->vector;
- if (old_vector) {
- cpumask_and(tmp_mask, mask, cpu_online_mask);
- cpumask_and(tmp_mask, cfg->domain, tmp_mask);
- if (!cpumask_empty(tmp_mask)) {
- free_cpumask_var(tmp_mask);
- return 0;
- }
- }
-
/* Only try and allocate irqs on cpus that are present */
err = -ENOSPC;
- for_each_cpu_and(cpu, mask, cpu_online_mask) {
- int new_cpu;
- int vector, offset;
+ cpumask_clear(cfg->old_domain);
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+ while (cpu < nr_cpu_ids) {
+ int new_cpu, vector, offset;
- apic->vector_allocation_domain(cpu, tmp_mask);
+ apic->vector_allocation_domain(cpu, tmp_mask, mask);
+
+ if (cpumask_subset(tmp_mask, cfg->domain)) {
+ err = 0;
+ if (cpumask_equal(tmp_mask, cfg->domain))
+ break;
+ /*
+ * New cpumask using the vector is a proper subset of
+ * the current in use mask. So cleanup the vector
+ * allocation for the members that are not used anymore.
+ */
+ cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
+ cfg->move_in_progress = 1;
+ cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+ break;
+ }
vector = current_vector;
offset = current_offset;
next:
- vector += 8;
+ vector += 16;
if (vector >= first_system_vector) {
- /* If out of vectors on large boxen, must share them. */
- offset = (offset + 1) % 8;
+ offset = (offset + 1) % 16;
vector = FIRST_EXTERNAL_VECTOR + offset;
}
- if (unlikely(current_vector == vector))
+
+ if (unlikely(current_vector == vector)) {
+ cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
+ cpumask_andnot(tmp_mask, mask, cfg->old_domain);
+ cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
continue;
+ }
if (test_bit(vector, used_vectors))
goto next;
@@ -1162,7 +1171,7 @@ next:
/* Found one! */
current_vector = vector;
current_offset = offset;
- if (old_vector) {
+ if (cfg->vector) {
cfg->move_in_progress = 1;
cpumask_copy(cfg->old_domain, cfg->domain);
}
@@ -1346,18 +1355,18 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
if (!IO_APIC_IRQ(irq))
return;
- /*
- * For legacy irqs, cfg->domain starts with cpu 0 for legacy
- * controllers like 8259. Now that IO-APIC can handle this irq, update
- * the cfg->domain.
- */
- if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
- apic->vector_allocation_domain(0, cfg->domain);
if (assign_irq_vector(irq, cfg, apic->target_cpus()))
return;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
+ &dest)) {
+ pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
+ mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
+ __clear_irq_vector(irq, cfg);
+
+ return;
+ }
apic_printk(APIC_VERBOSE,KERN_DEBUG
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
@@ -1366,7 +1375,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
cfg->vector, irq, attr->trigger, attr->polarity, dest);
if (setup_ioapic_entry(irq, &entry, dest, cfg->vector, attr)) {
- pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
+ pr_warn("Failed to setup ioapic entry for ioapic %d, pin %d\n",
mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
__clear_irq_vector(irq, cfg);
@@ -1469,9 +1478,10 @@ void setup_IO_APIC_irq_extra(u32 gsi)
* Set up the timer pin, possibly with the 8259A-master behind.
*/
static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
- unsigned int pin, int vector)
+ unsigned int pin, int vector)
{
struct IO_APIC_route_entry entry;
+ unsigned int dest;
if (irq_remapping_enabled)
return;
@@ -1482,9 +1492,13 @@ static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
* We use logical delivery to get the timer IRQ
* to the first CPU.
*/
+ if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
+ apic->target_cpus(), &dest)))
+ dest = BAD_APICID;
+
entry.dest_mode = apic->irq_dest_mode;
entry.mask = 0; /* don't mask IRQ for edge */
- entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
+ entry.dest = dest;
entry.delivery_mode = apic->irq_delivery_mode;
entry.polarity = 0;
entry.trigger = 0;
@@ -1521,7 +1535,6 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
reg_03.raw = io_apic_read(ioapic_idx, 3);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- printk("\n");
printk(KERN_DEBUG "IO APIC #%d......\n", mpc_ioapic_id(ioapic_idx));
printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
@@ -1578,7 +1591,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
i,
ir_entry->index
);
- printk("%1d %1d %1d %1d %1d "
+ pr_cont("%1d %1d %1d %1d %1d "
"%1d %1d %X %02X\n",
ir_entry->format,
ir_entry->mask,
@@ -1598,7 +1611,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
i,
entry.dest
);
- printk("%1d %1d %1d %1d %1d "
+ pr_cont("%1d %1d %1d %1d %1d "
"%1d %1d %02X\n",
entry.mask,
entry.trigger,
@@ -1651,8 +1664,8 @@ __apicdebuginit(void) print_IO_APICs(void)
continue;
printk(KERN_DEBUG "IRQ%d ", irq);
for_each_irq_pin(entry, cfg->irq_2_pin)
- printk("-> %d:%d", entry->apic, entry->pin);
- printk("\n");
+ pr_cont("-> %d:%d", entry->apic, entry->pin);
+ pr_cont("\n");
}
printk(KERN_INFO ".................................... done.\n");
@@ -1665,9 +1678,9 @@ __apicdebuginit(void) print_APIC_field(int base)
printk(KERN_DEBUG);
for (i = 0; i < 8; i++)
- printk(KERN_CONT "%08x", apic_read(base + i*0x10));
+ pr_cont("%08x", apic_read(base + i*0x10));
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
__apicdebuginit(void) print_local_APIC(void *dummy)
@@ -1769,7 +1782,7 @@ __apicdebuginit(void) print_local_APIC(void *dummy)
printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
}
}
- printk("\n");
+ pr_cont("\n");
}
__apicdebuginit(void) print_local_APICs(int maxcpu)
@@ -2065,7 +2078,7 @@ void __init setup_ioapic_ids_from_mpc_nocheck(void)
reg_00.raw = io_apic_read(ioapic_idx, 0);
raw_spin_unlock_irqrestore(&ioapic_lock, flags);
if (reg_00.bits.ID != mpc_ioapic_id(ioapic_idx))
- printk("could not set ID!\n");
+ pr_cont("could not set ID!\n");
else
apic_printk(APIC_VERBOSE, " ok.\n");
}
@@ -2210,71 +2223,6 @@ void send_cleanup_vector(struct irq_cfg *cfg)
cfg->move_in_progress = 0;
}
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
-{
- int apic, pin;
- struct irq_pin_list *entry;
- u8 vector = cfg->vector;
-
- for_each_irq_pin(entry, cfg->irq_2_pin) {
- unsigned int reg;
-
- apic = entry->apic;
- pin = entry->pin;
- /*
- * With interrupt-remapping, destination information comes
- * from interrupt-remapping table entry.
- */
- if (!irq_remapped(cfg))
- io_apic_write(apic, 0x11 + pin*2, dest);
- reg = io_apic_read(apic, 0x10 + pin*2);
- reg &= ~IO_APIC_REDIR_VECTOR_MASK;
- reg |= vector;
- io_apic_modify(apic, 0x10 + pin*2, reg);
- }
-}
-
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- unsigned int *dest_id)
-{
- struct irq_cfg *cfg = data->chip_data;
-
- if (!cpumask_intersects(mask, cpu_online_mask))
- return -1;
-
- if (assign_irq_vector(data->irq, data->chip_data, mask))
- return -1;
-
- cpumask_copy(data->affinity, mask);
-
- *dest_id = apic->cpu_mask_to_apicid_and(mask, cfg->domain);
- return 0;
-}
-
-static int
-ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
- bool force)
-{
- unsigned int dest, irq = data->irq;
- unsigned long flags;
- int ret;
-
- raw_spin_lock_irqsave(&ioapic_lock, flags);
- ret = __ioapic_set_affinity(data, mask, &dest);
- if (!ret) {
- /* Only the high 8 bits are valid. */
- dest = SET_APIC_LOGICAL_ID(dest);
- __target_IO_APIC_irq(irq, dest, data->chip_data);
- }
- raw_spin_unlock_irqrestore(&ioapic_lock, flags);
- return ret;
-}
-
asmlinkage void smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
@@ -2362,6 +2310,87 @@ void irq_force_complete_move(int irq)
static inline void irq_complete_move(struct irq_cfg *cfg) { }
#endif
+static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
+{
+ int apic, pin;
+ struct irq_pin_list *entry;
+ u8 vector = cfg->vector;
+
+ for_each_irq_pin(entry, cfg->irq_2_pin) {
+ unsigned int reg;
+
+ apic = entry->apic;
+ pin = entry->pin;
+ /*
+ * With interrupt-remapping, destination information comes
+ * from interrupt-remapping table entry.
+ */
+ if (!irq_remapped(cfg))
+ io_apic_write(apic, 0x11 + pin*2, dest);
+ reg = io_apic_read(apic, 0x10 + pin*2);
+ reg &= ~IO_APIC_REDIR_VECTOR_MASK;
+ reg |= vector;
+ io_apic_modify(apic, 0x10 + pin*2, reg);
+ }
+}
+
+/*
+ * Either sets data->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
+ * leaves data->affinity untouched.
+ */
+int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ unsigned int *dest_id)
+{
+ struct irq_cfg *cfg = data->chip_data;
+ unsigned int irq = data->irq;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -1;
+
+ if (!cpumask_intersects(mask, cpu_online_mask))
+ return -EINVAL;
+
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
+
+ cpumask_copy(data->affinity, mask);
+
+ return 0;
+}
+
+static int
+ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ unsigned int dest, irq = data->irq;
+ unsigned long flags;
+ int ret;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -1;
+
+ raw_spin_lock_irqsave(&ioapic_lock, flags);
+ ret = __ioapic_set_affinity(data, mask, &dest);
+ if (!ret) {
+ /* Only the high 8 bits are valid. */
+ dest = SET_APIC_LOGICAL_ID(dest);
+ __target_IO_APIC_irq(irq, dest, data->chip_data);
+ ret = IRQ_SET_MASK_OK_NOCOPY;
+ }
+ raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+ return ret;
+}
+
static void ack_apic_edge(struct irq_data *data)
{
irq_complete_move(data->chip_data);
@@ -2541,9 +2570,7 @@ static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
chip->irq_ack = ir_ack_apic_edge;
chip->irq_eoi = ir_ack_apic_level;
-#ifdef CONFIG_SMP
chip->irq_set_affinity = set_remapped_irq_affinity;
-#endif
}
#endif /* CONFIG_IRQ_REMAP */
@@ -2554,9 +2581,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_unmask = unmask_ioapic_irq,
.irq_ack = ack_apic_edge,
.irq_eoi = ack_apic_level,
-#ifdef CONFIG_SMP
.irq_set_affinity = ioapic_set_affinity,
-#endif
.irq_retrigger = ioapic_retrigger_irq,
};
@@ -3038,7 +3063,10 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
if (err)
return err;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
if (irq_remapped(cfg)) {
compose_remapped_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -3072,7 +3100,6 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
return err;
}
-#ifdef CONFIG_SMP
static int
msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
{
@@ -3092,9 +3119,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
__write_msi_msg(data->msi_desc, &msg);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
-#endif /* CONFIG_SMP */
/*
* IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
@@ -3105,9 +3131,7 @@ static struct irq_chip msi_chip = {
.irq_unmask = unmask_msi_irq,
.irq_mask = mask_msi_irq,
.irq_ack = ack_apic_edge,
-#ifdef CONFIG_SMP
.irq_set_affinity = msi_set_affinity,
-#endif
.irq_retrigger = ioapic_retrigger_irq,
};
@@ -3192,7 +3216,6 @@ void native_teardown_msi_irq(unsigned int irq)
}
#ifdef CONFIG_DMAR_TABLE
-#ifdef CONFIG_SMP
static int
dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
@@ -3214,19 +3237,15 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
dmar_msi_write(irq, &msg);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
-#endif /* CONFIG_SMP */
-
static struct irq_chip dmar_msi_type = {
.name = "DMAR_MSI",
.irq_unmask = dmar_msi_unmask,
.irq_mask = dmar_msi_mask,
.irq_ack = ack_apic_edge,
-#ifdef CONFIG_SMP
.irq_set_affinity = dmar_msi_set_affinity,
-#endif
.irq_retrigger = ioapic_retrigger_irq,
};
@@ -3247,7 +3266,6 @@ int arch_setup_dmar_msi(unsigned int irq)
#ifdef CONFIG_HPET_TIMER
-#ifdef CONFIG_SMP
static int hpet_msi_set_affinity(struct irq_data *data,
const struct cpumask *mask, bool force)
{
@@ -3267,19 +3285,15 @@ static int hpet_msi_set_affinity(struct irq_data *data,
hpet_msi_write(data->handler_data, &msg);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
-#endif /* CONFIG_SMP */
-
static struct irq_chip hpet_msi_type = {
.name = "HPET_MSI",
.irq_unmask = hpet_msi_unmask,
.irq_mask = hpet_msi_mask,
.irq_ack = ack_apic_edge,
-#ifdef CONFIG_SMP
.irq_set_affinity = hpet_msi_set_affinity,
-#endif
.irq_retrigger = ioapic_retrigger_irq,
};
@@ -3314,8 +3328,6 @@ int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
*/
#ifdef CONFIG_HT_IRQ
-#ifdef CONFIG_SMP
-
static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
{
struct ht_irq_msg msg;
@@ -3340,25 +3352,23 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
return -1;
target_ht_irq(data->irq, dest, cfg->vector);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
-#endif
-
static struct irq_chip ht_irq_chip = {
.name = "PCI-HT",
.irq_mask = mask_ht_irq,
.irq_unmask = unmask_ht_irq,
.irq_ack = ack_apic_edge,
-#ifdef CONFIG_SMP
.irq_set_affinity = ht_set_affinity,
-#endif
.irq_retrigger = ioapic_retrigger_irq,
};
int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
{
struct irq_cfg *cfg;
+ struct ht_irq_msg msg;
+ unsigned dest;
int err;
if (disable_apic)
@@ -3366,36 +3376,37 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
cfg = irq_cfg(irq);
err = assign_irq_vector(irq, cfg, apic->target_cpus());
- if (!err) {
- struct ht_irq_msg msg;
- unsigned dest;
+ if (err)
+ return err;
+
+ err = apic->cpu_mask_to_apicid_and(cfg->domain,
+ apic->target_cpus(), &dest);
+ if (err)
+ return err;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain,
- apic->target_cpus());
+ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
- msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+ msg.address_lo =
+ HT_IRQ_LOW_BASE |
+ HT_IRQ_LOW_DEST_ID(dest) |
+ HT_IRQ_LOW_VECTOR(cfg->vector) |
+ ((apic->irq_dest_mode == 0) ?
+ HT_IRQ_LOW_DM_PHYSICAL :
+ HT_IRQ_LOW_DM_LOGICAL) |
+ HT_IRQ_LOW_RQEOI_EDGE |
+ ((apic->irq_delivery_mode != dest_LowestPrio) ?
+ HT_IRQ_LOW_MT_FIXED :
+ HT_IRQ_LOW_MT_ARBITRATED) |
+ HT_IRQ_LOW_IRQ_MASKED;
- msg.address_lo =
- HT_IRQ_LOW_BASE |
- HT_IRQ_LOW_DEST_ID(dest) |
- HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((apic->irq_dest_mode == 0) ?
- HT_IRQ_LOW_DM_PHYSICAL :
- HT_IRQ_LOW_DM_LOGICAL) |
- HT_IRQ_LOW_RQEOI_EDGE |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED) |
- HT_IRQ_LOW_IRQ_MASKED;
+ write_ht_irq_msg(irq, &msg);
- write_ht_irq_msg(irq, &msg);
+ irq_set_chip_and_handler_name(irq, &ht_irq_chip,
+ handle_edge_irq, "edge");
- irq_set_chip_and_handler_name(irq, &ht_irq_chip,
- handle_edge_irq, "edge");
+ dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
- dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
- }
- return err;
+ return 0;
}
#endif /* CONFIG_HT_IRQ */
@@ -3563,7 +3574,8 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
/* Sanity check */
if (reg_00.bits.ID != apic_id) {
- printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
+ pr_err("IOAPIC[%d]: Unable to change apic_id!\n",
+ ioapic);
return -1;
}
}
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index f00a68cca37a..d661ee95cabf 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -406,16 +406,13 @@ static inline int numaq_check_phys_apicid_present(int phys_apicid)
* We use physical apicids here, not logical, so just return the default
* physical broadcast to stop people from breaking us
*/
-static unsigned int numaq_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- return 0x0F;
-}
-
-static inline unsigned int
+static int
numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- return 0x0F;
+ *apicid = 0x0F;
+ return 0;
}
/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
@@ -441,20 +438,6 @@ static int probe_numaq(void)
return found_numaq;
}
-static void numaq_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
static void numaq_setup_portio_remap(void)
{
int num_quads = num_online_nodes();
@@ -491,7 +474,7 @@ static struct apic __refdata apic_numaq = {
.check_apicid_used = numaq_check_apicid_used,
.check_apicid_present = numaq_check_apicid_present,
- .vector_allocation_domain = numaq_vector_allocation_domain,
+ .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = numaq_init_apic_ldr,
.ioapic_phys_id_map = numaq_ioapic_phys_id_map,
@@ -509,7 +492,6 @@ static struct apic __refdata apic_numaq = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = numaq_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = numaq_cpu_mask_to_apicid_and,
.send_IPI_mask = numaq_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 1b291da09e60..eb35ef9ee63f 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -66,21 +66,6 @@ static void setup_apic_flat_routing(void)
#endif
}
-static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /*
- * Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
/* should be called last. */
static int probe_default(void)
{
@@ -105,7 +90,7 @@ static struct apic apic_default = {
.check_apicid_used = default_check_apicid_used,
.check_apicid_present = default_check_apicid_present,
- .vector_allocation_domain = default_vector_allocation_domain,
+ .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = default_init_apic_ldr,
.ioapic_phys_id_map = default_ioapic_phys_id_map,
@@ -123,8 +108,7 @@ static struct apic apic_default = {
.set_apic_id = NULL,
.apic_id_mask = 0x0F << 24,
- .cpu_mask_to_apicid = default_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
.send_IPI_mask = default_send_IPI_mask_logical,
.send_IPI_mask_allbutself = default_send_IPI_mask_allbutself_logical,
@@ -208,6 +192,9 @@ void __init default_setup_apic_routing(void)
if (apic->setup_apic_routing)
apic->setup_apic_routing();
+
+ if (x86_platform.apic_post_init)
+ x86_platform.apic_post_init();
}
void __init generic_apic_probe(void)
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index 3fe986698929..1793dba7a741 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -23,11 +23,6 @@
#include <asm/ipi.h>
#include <asm/setup.h>
-static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
-{
- return hard_smp_processor_id() >> index_msb;
-}
-
/*
* Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
*/
@@ -48,10 +43,8 @@ void __init default_setup_apic_routing(void)
}
}
- if (is_vsmp_box()) {
- /* need to update phys_pkg_id */
- apic->phys_pkg_id = apicid_phys_pkg_id;
- }
+ if (x86_platform.apic_post_init)
+ x86_platform.apic_post_init();
}
/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 659897c00755..77c95c0e1bf7 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -26,6 +26,8 @@
*
*/
+#define pr_fmt(fmt) "summit: %s: " fmt, __func__
+
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/io.h>
@@ -235,8 +237,8 @@ static int summit_apic_id_registered(void)
static void summit_setup_apic_routing(void)
{
- printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
- nr_ioapics);
+ pr_info("Enabling APIC mode: Summit. Using %d I/O APICs\n",
+ nr_ioapics);
}
static int summit_cpu_present_to_apicid(int mps_cpu)
@@ -263,43 +265,48 @@ static int summit_check_phys_apicid_present(int physical_apicid)
return 1;
}
-static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
+static inline int
+summit_cpu_mask_to_apicid(const struct cpumask *cpumask, unsigned int *dest_id)
{
unsigned int round = 0;
- int cpu, apicid = 0;
+ unsigned int cpu, apicid = 0;
/*
* The cpus in the mask must all be on the apic cluster.
*/
- for_each_cpu(cpu, cpumask) {
+ for_each_cpu_and(cpu, cpumask, cpu_online_mask) {
int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
- printk("%s: Not a valid mask!\n", __func__);
- return BAD_APICID;
+ pr_err("Not a valid mask!\n");
+ return -EINVAL;
}
apicid |= new_apicid;
round++;
}
- return apicid;
+ if (!round)
+ return -EINVAL;
+ *dest_id = apicid;
+ return 0;
}
-static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
- const struct cpumask *andmask)
+static int
+summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
cpumask_var_t cpumask;
+ *apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
- return apicid;
+ return 0;
cpumask_and(cpumask, inmask, andmask);
- cpumask_and(cpumask, cpumask, cpu_online_mask);
- apicid = summit_cpu_mask_to_apicid(cpumask);
+ summit_cpu_mask_to_apicid(cpumask, apicid);
free_cpumask_var(cpumask);
- return apicid;
+ return 0;
}
/*
@@ -320,20 +327,6 @@ static int probe_summit(void)
return 0;
}
-static void summit_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- /* Careful. Some cpus do not strictly honor the set of cpus
- * specified in the interrupt destination when using lowest
- * priority interrupt delivery mode.
- *
- * In particular there was a hyperthreading cpu observed to
- * deliver interrupts to the wrong hyperthread when only one
- * hyperthread was specified in the interrupt desitination.
- */
- cpumask_clear(retmask);
- cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
#ifdef CONFIG_X86_SUMMIT_NUMA
static struct rio_table_hdr *rio_table_hdr;
static struct scal_detail *scal_devs[MAX_NUMNODES];
@@ -355,7 +348,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
}
}
if (i == rio_table_hdr->num_rio_dev) {
- printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
+ pr_err("Couldn't find owner Cyclone for Winnipeg!\n");
return last_bus;
}
@@ -366,7 +359,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
}
}
if (i == rio_table_hdr->num_scal_dev) {
- printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
+ pr_err("Couldn't find owner Twister for Cyclone!\n");
return last_bus;
}
@@ -396,7 +389,7 @@ static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
num_buses = 9;
break;
default:
- printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
+ pr_info("Unsupported Winnipeg type!\n");
return last_bus;
}
@@ -411,13 +404,15 @@ static int build_detail_arrays(void)
int i, scal_detail_size, rio_detail_size;
if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
- printk(KERN_WARNING "%s: MAX_NUMNODES too low! Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+ pr_warn("MAX_NUMNODES too low! Defined as %d, but system has %d nodes\n",
+ MAX_NUMNODES, rio_table_hdr->num_scal_dev);
return 0;
}
switch (rio_table_hdr->version) {
default:
- printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
+ pr_warn("Invalid Rio Grande Table Version: %d\n",
+ rio_table_hdr->version);
return 0;
case 2:
scal_detail_size = 11;
@@ -462,7 +457,7 @@ void setup_summit(void)
offset = *((unsigned short *)(ptr + offset));
}
if (!rio_table_hdr) {
- printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
+ pr_err("Unable to locate Rio Grande Table in EBDA - bailing!\n");
return;
}
@@ -509,7 +504,7 @@ static struct apic apic_summit = {
.check_apicid_used = summit_check_apicid_used,
.check_apicid_present = summit_check_apicid_present,
- .vector_allocation_domain = summit_vector_allocation_domain,
+ .vector_allocation_domain = flat_vector_allocation_domain,
.init_apic_ldr = summit_init_apic_ldr,
.ioapic_phys_id_map = summit_ioapic_phys_id_map,
@@ -527,7 +522,6 @@ static struct apic apic_summit = {
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
- .cpu_mask_to_apicid = summit_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = summit_cpu_mask_to_apicid_and,
.send_IPI_mask = summit_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index ff35cff0e1a7..c88baa4ff0e5 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -81,7 +81,7 @@ static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
}
static void
- x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
}
@@ -96,36 +96,37 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
-static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
+static int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- /*
- * We're using fixed IRQ delivery, can only return one logical APIC ID.
- * May as well be the first.
- */
- int cpu = cpumask_first(cpumask);
+ u32 dest = 0;
+ u16 cluster;
+ int i;
- if ((unsigned)cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_logical_apicid, cpu);
- else
- return BAD_APICID;
-}
+ for_each_cpu_and(i, cpumask, andmask) {
+ if (!cpumask_test_cpu(i, cpu_online_mask))
+ continue;
+ dest = per_cpu(x86_cpu_to_logical_apicid, i);
+ cluster = x2apic_cluster(i);
+ break;
+ }
-static unsigned int
-x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
+ if (!dest)
+ return -EINVAL;
- /*
- * We're using fixed IRQ delivery, can only return one logical APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask) {
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
+ for_each_cpu_and(i, cpumask, andmask) {
+ if (!cpumask_test_cpu(i, cpu_online_mask))
+ continue;
+ if (cluster != x2apic_cluster(i))
+ continue;
+ dest |= per_cpu(x86_cpu_to_logical_apicid, i);
}
- return per_cpu(x86_cpu_to_logical_apicid, cpu);
+ *apicid = dest;
+
+ return 0;
}
static void init_x2apic_ldr(void)
@@ -208,6 +209,32 @@ static int x2apic_cluster_probe(void)
return 0;
}
+static const struct cpumask *x2apic_cluster_target_cpus(void)
+{
+ return cpu_all_mask;
+}
+
+/*
+ * Each x2apic cluster is an allocation domain.
+ */
+static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ /*
+ * To minimize vector pressure, default case of boot, device bringup
+ * etc will use a single cpu for the interrupt destination.
+ *
+ * On explicit migration requests coming from irqbalance etc,
+ * interrupts will be routed to the x2apic cluster (cluster-id
+ * derived from the first cpu in the mask) members specified
+ * in the mask.
+ */
+ if (mask == x2apic_cluster_target_cpus())
+ cpumask_copy(retmask, cpumask_of(cpu));
+ else
+ cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
+}
+
static struct apic apic_x2apic_cluster = {
.name = "cluster x2apic",
@@ -219,13 +246,13 @@ static struct apic apic_x2apic_cluster = {
.irq_delivery_mode = dest_LowestPrio,
.irq_dest_mode = 1, /* logical */
- .target_cpus = x2apic_target_cpus,
+ .target_cpus = x2apic_cluster_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
- .vector_allocation_domain = x2apic_vector_allocation_domain,
+ .vector_allocation_domain = cluster_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -243,7 +270,6 @@ static struct apic apic_x2apic_cluster = {
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index c17e982db275..e03a1e180e81 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -76,38 +76,6 @@ static void x2apic_send_IPI_all(int vector)
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
-static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- int cpu = cpumask_first(cpumask);
-
- if ((unsigned)cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu);
- else
- return BAD_APICID;
-}
-
-static unsigned int
-x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
-{
- int cpu;
-
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- for_each_cpu_and(cpu, cpumask, andmask) {
- if (cpumask_test_cpu(cpu, cpu_online_mask))
- break;
- }
-
- return per_cpu(x86_cpu_to_apicid, cpu);
-}
-
static void init_x2apic_ldr(void)
{
}
@@ -131,13 +99,13 @@ static struct apic apic_x2apic_phys = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = x2apic_target_cpus,
+ .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
- .vector_allocation_domain = x2apic_vector_allocation_domain,
+ .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -155,8 +123,7 @@ static struct apic apic_x2apic_phys = {
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
- .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
+ .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index c6d03f7a4401..8cfade9510a4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -185,17 +185,6 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
-static const struct cpumask *uv_target_cpus(void)
-{
- return cpu_online_mask;
-}
-
-static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
- cpumask_clear(retmask);
- cpumask_set_cpu(cpu, retmask);
-}
-
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
{
#ifdef CONFIG_SMP
@@ -280,25 +269,12 @@ static void uv_init_apic_ldr(void)
{
}
-static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
- /*
- * We're using fixed IRQ delivery, can only return one phys APIC ID.
- * May as well be the first.
- */
- int cpu = cpumask_first(cpumask);
-
- if ((unsigned)cpu < nr_cpu_ids)
- return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
- else
- return BAD_APICID;
-}
-
-static unsigned int
+static int
uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
- const struct cpumask *andmask)
+ const struct cpumask *andmask,
+ unsigned int *apicid)
{
- int cpu;
+ int unsigned cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
@@ -308,7 +284,13 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
}
- return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
+
+ if (likely(cpu < nr_cpu_ids)) {
+ *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
+ return 0;
+ }
+
+ return -EINVAL;
}
static unsigned int x2apic_get_apic_id(unsigned long x)
@@ -362,13 +344,13 @@ static struct apic __refdata apic_x2apic_uv_x = {
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
- .target_cpus = uv_target_cpus,
+ .target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = APIC_DEST_LOGICAL,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
- .vector_allocation_domain = uv_vector_allocation_domain,
+ .vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = uv_init_apic_ldr,
.ioapic_phys_id_map = NULL,
@@ -386,7 +368,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
.set_apic_id = set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
- .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
.send_IPI_mask = uv_send_IPI_mask,
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 07b0c0db466c..d65464e43503 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -201,6 +201,8 @@
* http://www.microsoft.com/whdc/archive/amp_12.mspx]
*/
+#define pr_fmt(fmt) "apm: " fmt
+
#include <linux/module.h>
#include <linux/poll.h>
@@ -485,11 +487,11 @@ static void apm_error(char *str, int err)
if (error_table[i].key == err)
break;
if (i < ERROR_COUNT)
- printk(KERN_NOTICE "apm: %s: %s\n", str, error_table[i].msg);
+ pr_notice("%s: %s\n", str, error_table[i].msg);
else if (err < 0)
- printk(KERN_NOTICE "apm: %s: linux error code %i\n", str, err);
+ pr_notice("%s: linux error code %i\n", str, err);
else
- printk(KERN_NOTICE "apm: %s: unknown error code %#2.2x\n",
+ pr_notice("%s: unknown error code %#2.2x\n",
str, err);
}
@@ -1184,7 +1186,7 @@ static void queue_event(apm_event_t event, struct apm_user *sender)
static int notified;
if (notified++ == 0)
- printk(KERN_ERR "apm: an event queue overflowed\n");
+ pr_err("an event queue overflowed\n");
if (++as->event_tail >= APM_MAX_EVENTS)
as->event_tail = 0;
}
@@ -1447,7 +1449,7 @@ static void apm_mainloop(void)
static int check_apm_user(struct apm_user *as, const char *func)
{
if (as == NULL || as->magic != APM_BIOS_MAGIC) {
- printk(KERN_ERR "apm: %s passed bad filp\n", func);
+ pr_err("%s passed bad filp\n", func);
return 1;
}
return 0;
@@ -1586,7 +1588,7 @@ static int do_release(struct inode *inode, struct file *filp)
as1 = as1->next)
;
if (as1 == NULL)
- printk(KERN_ERR "apm: filp not in user list\n");
+ pr_err("filp not in user list\n");
else
as1->next = as->next;
}
@@ -1600,11 +1602,9 @@ static int do_open(struct inode *inode, struct file *filp)
struct apm_user *as;
as = kmalloc(sizeof(*as), GFP_KERNEL);
- if (as == NULL) {
- printk(KERN_ERR "apm: cannot allocate struct of size %d bytes\n",
- sizeof(*as));
+ if (as == NULL)
return -ENOMEM;
- }
+
as->magic = APM_BIOS_MAGIC;
as->event_tail = as->event_head = 0;
as->suspends_pending = as->standbys_pending = 0;
@@ -2313,16 +2313,16 @@ static int __init apm_init(void)
}
if (apm_info.disabled) {
- printk(KERN_NOTICE "apm: disabled on user request.\n");
+ pr_notice("disabled on user request.\n");
return -ENODEV;
}
if ((num_online_cpus() > 1) && !power_off && !smp) {
- printk(KERN_NOTICE "apm: disabled - APM is not SMP safe.\n");
+ pr_notice("disabled - APM is not SMP safe.\n");
apm_info.disabled = 1;
return -ENODEV;
}
if (!acpi_disabled) {
- printk(KERN_NOTICE "apm: overridden by ACPI.\n");
+ pr_notice("overridden by ACPI.\n");
apm_info.disabled = 1;
return -ENODEV;
}
@@ -2356,8 +2356,7 @@ static int __init apm_init(void)
kapmd_task = kthread_create(apm, NULL, "kapmd");
if (IS_ERR(kapmd_task)) {
- printk(KERN_ERR "apm: disabled - Unable to start kernel "
- "thread.\n");
+ pr_err("disabled - Unable to start kernel thread\n");
err = PTR_ERR(kapmd_task);
kapmd_task = NULL;
remove_proc_entry("apm", NULL);
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 6ab6aa2fdfdd..bac4c3804cc7 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -32,7 +32,9 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.o
ifdef CONFIG_PERF_EVENTS
obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_p4.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
endif
obj-$(CONFIG_X86_MCE) += mcheck/
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 146bb6218eec..9d92e19039f0 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -19,6 +19,39 @@
#include "cpu.h"
+static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u32 gprs[8] = { 0 };
+ int err;
+
+ WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+ gprs[1] = msr;
+ gprs[7] = 0x9c5a203a;
+
+ err = rdmsr_safe_regs(gprs);
+
+ *p = gprs[0] | ((u64)gprs[2] << 32);
+
+ return err;
+}
+
+static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
+{
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
+ u32 gprs[8] = { 0 };
+
+ WARN_ONCE((c->x86 != 0xf), "%s should only be used on K8!\n", __func__);
+
+ gprs[0] = (u32)val;
+ gprs[1] = msr;
+ gprs[2] = val >> 32;
+ gprs[7] = 0x9c5a203a;
+
+ return wrmsr_safe_regs(gprs);
+}
+
#ifdef CONFIG_X86_32
/*
* B step AMD K6 before B 9730xxxx have hardware bugs that can cause
@@ -586,9 +619,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
u64 val;
- if (!rdmsrl_amd_safe(0xc0011005, &val)) {
+ if (!rdmsrl_safe(0xc0011005, &val)) {
val |= 1ULL << 54;
- wrmsrl_amd_safe(0xc0011005, val);
+ wrmsrl_safe(0xc0011005, val);
rdmsrl(0xc0011005, val);
if (val & (1ULL << 54)) {
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
@@ -679,7 +712,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
err = rdmsrl_safe(MSR_AMD64_MCx_MASK(4), &mask);
if (err == 0) {
mask |= (1 << 10);
- checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
+ wrmsrl_safe(MSR_AMD64_MCx_MASK(4), mask);
}
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 46674fbb62ba..c97bb7b5a9f8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -55,8 +55,8 @@ static void __init check_fpu(void)
if (!boot_cpu_data.hard_math) {
#ifndef CONFIG_MATH_EMULATION
- printk(KERN_EMERG "No coprocessor found and no math emulation present.\n");
- printk(KERN_EMERG "Giving up.\n");
+ pr_emerg("No coprocessor found and no math emulation present\n");
+ pr_emerg("Giving up\n");
for (;;) ;
#endif
return;
@@ -86,7 +86,7 @@ static void __init check_fpu(void)
boot_cpu_data.fdiv_bug = fdiv_bug;
if (boot_cpu_data.fdiv_bug)
- printk(KERN_WARNING "Hmm, FPU with FDIV bug.\n");
+ pr_warn("Hmm, FPU with FDIV bug\n");
}
static void __init check_hlt(void)
@@ -94,16 +94,16 @@ static void __init check_hlt(void)
if (boot_cpu_data.x86 >= 5 || paravirt_enabled())
return;
- printk(KERN_INFO "Checking 'hlt' instruction... ");
+ pr_info("Checking 'hlt' instruction... ");
if (!boot_cpu_data.hlt_works_ok) {
- printk("disabled\n");
+ pr_cont("disabled\n");
return;
}
halt();
halt();
halt();
halt();
- printk(KERN_CONT "OK.\n");
+ pr_cont("OK\n");
}
/*
@@ -116,7 +116,7 @@ static void __init check_popad(void)
#ifndef CONFIG_X86_POPAD_OK
int res, inp = (int) &res;
- printk(KERN_INFO "Checking for popad bug... ");
+ pr_info("Checking for popad bug... ");
__asm__ __volatile__(
"movl $12345678,%%eax; movl $0,%%edi; pusha; popa; movl (%%edx,%%edi),%%ecx "
: "=&a" (res)
@@ -127,9 +127,9 @@ static void __init check_popad(void)
* CPU hard. Too bad.
*/
if (res != 12345678)
- printk(KERN_CONT "Buggy.\n");
+ pr_cont("Buggy\n");
else
- printk(KERN_CONT "OK.\n");
+ pr_cont("OK\n");
#endif
}
@@ -161,7 +161,7 @@ void __init check_bugs(void)
{
identify_boot_cpu();
#ifndef CONFIG_SMP
- printk(KERN_INFO "CPU: ");
+ pr_info("CPU: ");
print_cpu_info(&boot_cpu_data);
#endif
check_config();
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6b9333b429ba..5bbc082c47ad 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -947,7 +947,7 @@ static void __cpuinit __print_cpu_msr(void)
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
- if (rdmsrl_amd_safe(index, &val))
+ if (rdmsrl_safe(index, &val))
continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
}
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 755f64fb0743..a8f8fa9769d6 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -37,6 +37,9 @@ static const __initconst struct hypervisor_x86 * const hypervisors[] =
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
+#ifdef CONFIG_KVM_GUEST
+ &x86_hyper_kvm,
+#endif
};
const struct hypervisor_x86 *x86_hyper;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index da27c5d2168a..5e095f873e3e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -7,6 +7,9 @@
* Copyright 2008 Intel Corporation
* Author: Andi Kleen
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/thread_info.h>
#include <linux/capability.h>
#include <linux/miscdevice.h>
@@ -57,8 +60,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mutex);
int mce_disabled __read_mostly;
-#define MISC_MCELOG_MINOR 227
-
#define SPINUNIT 100 /* 100ns */
atomic_t mce_entry;
@@ -210,7 +211,7 @@ static void drain_mcelog_buffer(void)
cpu_relax();
if (!m->finished && retries >= 4) {
- pr_err("MCE: skipping error being logged currently!\n");
+ pr_err("skipping error being logged currently!\n");
break;
}
}
@@ -1167,8 +1168,9 @@ int memory_failure(unsigned long pfn, int vector, int flags)
{
/* mce_severity() should not hand us an ACTION_REQUIRED error */
BUG_ON(flags & MF_ACTION_REQUIRED);
- printk(KERN_ERR "Uncorrected memory error in page 0x%lx ignored\n"
- "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n", pfn);
+ pr_err("Uncorrected memory error in page 0x%lx ignored\n"
+ "Rebuild kernel with CONFIG_MEMORY_FAILURE=y for smarter handling\n",
+ pfn);
return 0;
}
@@ -1186,6 +1188,7 @@ void mce_notify_process(void)
{
unsigned long pfn;
struct mce_info *mi = mce_find_info();
+ int flags = MF_ACTION_REQUIRED;
if (!mi)
mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
@@ -1200,8 +1203,9 @@ void mce_notify_process(void)
* doomed. We still need to mark the page as poisoned and alert any
* other users of the page.
*/
- if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
- mi->restartable == 0) {
+ if (!mi->restartable)
+ flags |= MF_MUST_KILL;
+ if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
pr_err("Memory error not recovered");
force_sig(SIGBUS, current);
}
@@ -1358,11 +1362,10 @@ static int __cpuinit __mcheck_cpu_cap_init(void)
b = cap & MCG_BANKCNT_MASK;
if (!banks)
- printk(KERN_INFO "mce: CPU supports %d MCE banks\n", b);
+ pr_info("CPU supports %d MCE banks\n", b);
if (b > MAX_NR_BANKS) {
- printk(KERN_WARNING
- "MCE: Using only %u machine check banks out of %u\n",
+ pr_warn("Using only %u machine check banks out of %u\n",
MAX_NR_BANKS, b);
b = MAX_NR_BANKS;
}
@@ -1419,7 +1422,7 @@ static void __mcheck_cpu_init_generic(void)
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
{
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
- pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
+ pr_info("unknown CPU type - not enabling MCE support\n");
return -EOPNOTSUPP;
}
@@ -1574,7 +1577,7 @@ static void __mcheck_cpu_init_timer(void)
/* Handle unconfigured int18 (should never happen) */
static void unexpected_machine_check(struct pt_regs *regs, long error_code)
{
- printk(KERN_ERR "CPU#%d: Unexpected int18 (Machine Check).\n",
+ pr_err("CPU#%d: Unexpected int18 (Machine Check)\n",
smp_processor_id());
}
@@ -1893,8 +1896,7 @@ static int __init mcheck_enable(char *str)
get_option(&str, &monarch_timeout);
}
} else {
- printk(KERN_INFO "mce argument %s ignored. Please use /sys\n",
- str);
+ pr_info("mce argument %s ignored. Please use /sys\n", str);
return 0;
}
return 1;
@@ -2342,7 +2344,7 @@ static __init int mcheck_init_device(void)
return err;
}
-device_initcall(mcheck_init_device);
+device_initcall_sync(mcheck_init_device);
/*
* Old style boot options parsing. Only for compatibility.
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index f4873a64f46d..c4e916d77378 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -1,15 +1,17 @@
/*
- * (c) 2005, 2006 Advanced Micro Devices, Inc.
+ * (c) 2005-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
* Written by Jacob Shin - AMD, Inc.
*
- * Support : jacob.shin@amd.com
+ * Support: borislav.petkov@amd.com
*
* April 2006
* - added support for AMD Family 0x10 processors
+ * May 2012
+ * - major scrubbing
*
* All MC4_MISCi registers are shared between multi-cores
*/
@@ -25,6 +27,7 @@
#include <linux/cpu.h>
#include <linux/smp.h>
+#include <asm/amd_nb.h>
#include <asm/apic.h>
#include <asm/idle.h>
#include <asm/mce.h>
@@ -45,23 +48,15 @@
#define MASK_BLKPTR_LO 0xFF000000
#define MCG_XBLK_ADDR 0xC0000400
-struct threshold_block {
- unsigned int block;
- unsigned int bank;
- unsigned int cpu;
- u32 address;
- u16 interrupt_enable;
- bool interrupt_capable;
- u16 threshold_limit;
- struct kobject kobj;
- struct list_head miscj;
+static const char * const th_names[] = {
+ "load_store",
+ "insn_fetch",
+ "combined_unit",
+ "",
+ "northbridge",
+ "execution_unit",
};
-struct threshold_bank {
- struct kobject *kobj;
- struct threshold_block *blocks;
- cpumask_var_t cpus;
-};
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
static unsigned char shared_bank[NR_BANKS] = {
@@ -84,6 +79,26 @@ struct thresh_restart {
u16 old_limit;
};
+static const char * const bank4_names(struct threshold_block *b)
+{
+ switch (b->address) {
+ /* MSR4_MISC0 */
+ case 0x00000413:
+ return "dram";
+
+ case 0xc0000408:
+ return "ht_links";
+
+ case 0xc0000409:
+ return "l3_cache";
+
+ default:
+ WARN(1, "Funny MSR: 0x%08x\n", b->address);
+ return "";
+ }
+};
+
+
static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
{
/*
@@ -224,8 +239,6 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!block)
per_cpu(bank_map, cpu) |= (1 << bank);
- if (shared_bank[bank] && c->cpu_core_id)
- break;
memset(&b, 0, sizeof(b));
b.cpu = cpu;
@@ -326,7 +339,7 @@ struct threshold_attr {
#define SHOW_FIELDS(name) \
static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
{ \
- return sprintf(buf, "%lx\n", (unsigned long) b->name); \
+ return sprintf(buf, "%lu\n", (unsigned long) b->name); \
}
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
@@ -377,38 +390,21 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
return size;
}
-struct threshold_block_cross_cpu {
- struct threshold_block *tb;
- long retval;
-};
-
-static void local_error_count_handler(void *_tbcc)
-{
- struct threshold_block_cross_cpu *tbcc = _tbcc;
- struct threshold_block *b = tbcc->tb;
- u32 low, high;
-
- rdmsr(b->address, low, high);
- tbcc->retval = (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
-}
-
static ssize_t show_error_count(struct threshold_block *b, char *buf)
{
- struct threshold_block_cross_cpu tbcc = { .tb = b, };
+ u32 lo, hi;
- smp_call_function_single(b->cpu, local_error_count_handler, &tbcc, 1);
- return sprintf(buf, "%lx\n", tbcc.retval);
-}
-
-static ssize_t store_error_count(struct threshold_block *b,
- const char *buf, size_t count)
-{
- struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
+ rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
- smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
- return 1;
+ return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
+ (THRESHOLD_MAX - b->threshold_limit)));
}
+static struct threshold_attr error_count = {
+ .attr = {.name = __stringify(error_count), .mode = 0444 },
+ .show = show_error_count,
+};
+
#define RW_ATTR(val) \
static struct threshold_attr val = { \
.attr = {.name = __stringify(val), .mode = 0644 }, \
@@ -418,7 +414,6 @@ static struct threshold_attr val = { \
RW_ATTR(interrupt_enable);
RW_ATTR(threshold_limit);
-RW_ATTR(error_count);
static struct attribute *default_attrs[] = {
&threshold_limit.attr,
@@ -517,7 +512,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
per_cpu(threshold_banks, cpu)[bank]->kobj,
- "misc%i", block);
+ (bank == 4 ? bank4_names(b) : th_names[bank]));
if (err)
goto out_free;
recurse:
@@ -548,98 +543,91 @@ out_free:
return err;
}
-static __cpuinit long
-local_allocate_threshold_blocks(int cpu, unsigned int bank)
+static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
{
- return allocate_threshold_blocks(cpu, bank, 0,
- MSR_IA32_MC0_MISC + bank * 4);
+ struct list_head *head = &b->blocks->miscj;
+ struct threshold_block *pos = NULL;
+ struct threshold_block *tmp = NULL;
+ int err = 0;
+
+ err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
+ if (err)
+ return err;
+
+ list_for_each_entry_safe(pos, tmp, head, miscj) {
+
+ err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
+ if (err) {
+ list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
+ kobject_del(&pos->kobj);
+
+ return err;
+ }
+ }
+ return err;
}
-/* symlinks sibling shared banks to first core. first core owns dir/files. */
static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
{
- int i, err = 0;
- struct threshold_bank *b = NULL;
struct device *dev = per_cpu(mce_device, cpu);
- char name[32];
+ struct amd_northbridge *nb = NULL;
+ struct threshold_bank *b = NULL;
+ const char *name = th_names[bank];
+ int err = 0;
- sprintf(name, "threshold_bank%i", bank);
+ if (shared_bank[bank]) {
-#ifdef CONFIG_SMP
- if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
- i = cpumask_first(cpu_llc_shared_mask(cpu));
+ nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ WARN_ON(!nb);
- /* first core not up yet */
- if (cpu_data(i).cpu_core_id)
- goto out;
+ /* threshold descriptor already initialized on this node? */
+ if (nb->bank4) {
+ /* yes, use it */
+ b = nb->bank4;
+ err = kobject_add(b->kobj, &dev->kobj, name);
+ if (err)
+ goto out;
- /* already linked */
- if (per_cpu(threshold_banks, cpu)[bank])
- goto out;
+ per_cpu(threshold_banks, cpu)[bank] = b;
+ atomic_inc(&b->cpus);
- b = per_cpu(threshold_banks, i)[bank];
+ err = __threshold_add_blocks(b);
- if (!b)
goto out;
-
- err = sysfs_create_link(&dev->kobj, b->kobj, name);
- if (err)
- goto out;
-
- cpumask_copy(b->cpus, cpu_llc_shared_mask(cpu));
- per_cpu(threshold_banks, cpu)[bank] = b;
-
- goto out;
+ }
}
-#endif
b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
err = -ENOMEM;
goto out;
}
- if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
- kfree(b);
- err = -ENOMEM;
- goto out;
- }
b->kobj = kobject_create_and_add(name, &dev->kobj);
- if (!b->kobj)
+ if (!b->kobj) {
+ err = -EINVAL;
goto out_free;
-
-#ifndef CONFIG_SMP
- cpumask_setall(b->cpus);
-#else
- cpumask_set_cpu(cpu, b->cpus);
-#endif
+ }
per_cpu(threshold_banks, cpu)[bank] = b;
- err = local_allocate_threshold_blocks(cpu, bank);
- if (err)
- goto out_free;
-
- for_each_cpu(i, b->cpus) {
- if (i == cpu)
- continue;
-
- dev = per_cpu(mce_device, i);
- if (dev)
- err = sysfs_create_link(&dev->kobj,b->kobj, name);
- if (err)
- goto out;
+ if (shared_bank[bank]) {
+ atomic_set(&b->cpus, 1);
- per_cpu(threshold_banks, i)[bank] = b;
+ /* nb is already initialized, see above */
+ WARN_ON(nb->bank4);
+ nb->bank4 = b;
}
- goto out;
+ err = allocate_threshold_blocks(cpu, bank, 0,
+ MSR_IA32_MC0_MISC + bank * 4);
+ if (!err)
+ goto out;
-out_free:
- per_cpu(threshold_banks, cpu)[bank] = NULL;
- free_cpumask_var(b->cpus);
+ out_free:
kfree(b);
-out:
+
+ out:
return err;
}
@@ -660,12 +648,6 @@ static __cpuinit int threshold_create_device(unsigned int cpu)
return err;
}
-/*
- * let's be hotplug friendly.
- * in case of multiple core processors, the first core always takes ownership
- * of shared sysfs dir/files, and rest of the cores will be symlinked to it.
- */
-
static void deallocate_threshold_block(unsigned int cpu,
unsigned int bank)
{
@@ -686,41 +668,42 @@ static void deallocate_threshold_block(unsigned int cpu,
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
}
+static void __threshold_remove_blocks(struct threshold_bank *b)
+{
+ struct threshold_block *pos = NULL;
+ struct threshold_block *tmp = NULL;
+
+ kobject_del(b->kobj);
+
+ list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
+ kobject_del(&pos->kobj);
+}
+
static void threshold_remove_bank(unsigned int cpu, int bank)
{
+ struct amd_northbridge *nb;
struct threshold_bank *b;
- struct device *dev;
- char name[32];
- int i = 0;
b = per_cpu(threshold_banks, cpu)[bank];
if (!b)
return;
+
if (!b->blocks)
goto free_out;
- sprintf(name, "threshold_bank%i", bank);
-
-#ifdef CONFIG_SMP
- /* sibling symlink */
- if (shared_bank[bank] && b->blocks->cpu != cpu) {
- dev = per_cpu(mce_device, cpu);
- sysfs_remove_link(&dev->kobj, name);
- per_cpu(threshold_banks, cpu)[bank] = NULL;
-
- return;
- }
-#endif
-
- /* remove all sibling symlinks before unregistering */
- for_each_cpu(i, b->cpus) {
- if (i == cpu)
- continue;
-
- dev = per_cpu(mce_device, i);
- if (dev)
- sysfs_remove_link(&dev->kobj, name);
- per_cpu(threshold_banks, i)[bank] = NULL;
+ if (shared_bank[bank]) {
+ if (!atomic_dec_and_test(&b->cpus)) {
+ __threshold_remove_blocks(b);
+ per_cpu(threshold_banks, cpu)[bank] = NULL;
+ return;
+ } else {
+ /*
+ * the last CPU on this node using the shared bank is
+ * going away, remove that bank now.
+ */
+ nb = node_to_amd_nb(amd_get_nb_id(cpu));
+ nb->bank4 = NULL;
+ }
}
deallocate_threshold_block(cpu, bank);
@@ -728,7 +711,6 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
free_out:
kobject_del(b->kobj);
kobject_put(b->kobj);
- free_cpumask_var(b->cpus);
kfree(b);
per_cpu(threshold_banks, cpu)[bank] = NULL;
}
@@ -777,4 +759,24 @@ static __init int threshold_init_device(void)
return 0;
}
-device_initcall(threshold_init_device);
+/*
+ * there are 3 funcs which need to be _initcalled in a logic sequence:
+ * 1. xen_late_init_mcelog
+ * 2. mcheck_init_device
+ * 3. threshold_init_device
+ *
+ * xen_late_init_mcelog must register xen_mce_chrdev_device before
+ * native mce_chrdev_device registration if running under xen platform;
+ *
+ * mcheck_init_device should be inited before threshold_init_device to
+ * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
+ *
+ * so we use following _initcalls
+ * 1. device_initcall(xen_late_init_mcelog);
+ * 2. device_initcall_sync(mcheck_init_device);
+ * 3. late_initcall(threshold_init_device);
+ *
+ * when running under xen, the initcall order is 1,2,3;
+ * on baremetal, we skip 1 and we do only 2 and 3.
+ */
+late_initcall(threshold_init_device);
diff --git a/arch/x86/kernel/cpu/mkcapflags.pl b/arch/x86/kernel/cpu/mkcapflags.pl
index dfea390e1608..c7b3fe2d72e0 100644
--- a/arch/x86/kernel/cpu/mkcapflags.pl
+++ b/arch/x86/kernel/cpu/mkcapflags.pl
@@ -1,4 +1,4 @@
-#!/usr/bin/perl
+#!/usr/bin/perl -w
#
# Generate the x86_cap_flags[] array from include/asm-x86/cpufeature.h
#
@@ -11,22 +11,35 @@ open(OUT, "> $out\0") or die "$0: cannot create: $out: $!\n";
print OUT "#include <asm/cpufeature.h>\n\n";
print OUT "const char * const x86_cap_flags[NCAPINTS*32] = {\n";
+%features = ();
+$err = 0;
+
while (defined($line = <IN>)) {
if ($line =~ /^\s*\#\s*define\s+(X86_FEATURE_(\S+))\s+(.*)$/) {
$macro = $1;
- $feature = $2;
+ $feature = "\L$2";
$tail = $3;
if ($tail =~ /\/\*\s*\"([^"]*)\".*\*\//) {
- $feature = $1;
+ $feature = "\L$1";
}
- if ($feature ne '') {
- printf OUT "\t%-32s = \"%s\",\n",
- "[$macro]", "\L$feature";
+ next if ($feature eq '');
+
+ if ($features{$feature}++) {
+ print STDERR "$in: duplicate feature name: $feature\n";
+ $err++;
}
+ printf OUT "\t%-32s = \"%s\",\n", "[$macro]", $feature;
}
}
print OUT "};\n";
close(IN);
close(OUT);
+
+if ($err) {
+ unlink($out);
+ exit(1);
+}
+
+exit(0);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index bdda2e6c673b..35ffda5d0727 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -258,11 +258,11 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
/* Compute the maximum size with which we can make a range: */
if (range_startk)
- max_align = ffs(range_startk) - 1;
+ max_align = __ffs(range_startk);
else
- max_align = 32;
+ max_align = BITS_PER_LONG - 1;
- align = fls(range_sizek) - 1;
+ align = __fls(range_sizek);
if (align > max_align)
align = max_align;
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index 75772ae6c65f..e9fe907cd249 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -361,11 +361,7 @@ static void __init print_mtrr_state(void)
}
pr_debug("MTRR variable ranges %sabled:\n",
mtrr_state.enabled & 2 ? "en" : "dis");
- if (size_or_mask & 0xffffffffUL)
- high_width = ffs(size_or_mask & 0xffffffffUL) - 1;
- else
- high_width = ffs(size_or_mask>>32) + 32 - 1;
- high_width = (high_width - (32 - PAGE_SHIFT) + 3) / 4;
+ high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
for (i = 0; i < num_var_ranges; ++i) {
if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index c4706cf9c011..29557aa06dda 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -35,17 +35,6 @@
#include "perf_event.h"
-#if 0
-#undef wrmsrl
-#define wrmsrl(msr, val) \
-do { \
- trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
- (unsigned long)(val)); \
- native_write_msr((msr), (u32)((u64)(val)), \
- (u32)((u64)(val) >> 32)); \
-} while (0)
-#endif
-
struct x86_pmu x86_pmu __read_mostly;
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
@@ -74,7 +63,7 @@ u64 x86_perf_event_update(struct perf_event *event)
int idx = hwc->idx;
s64 delta;
- if (idx == X86_PMC_IDX_FIXED_BTS)
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;
/*
@@ -86,7 +75,7 @@ u64 x86_perf_event_update(struct perf_event *event)
*/
again:
prev_raw_count = local64_read(&hwc->prev_count);
- rdmsrl(hwc->event_base, new_raw_count);
+ rdpmcl(hwc->event_base_rdpmc, new_raw_count);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
@@ -189,7 +178,7 @@ static void release_pmc_hardware(void) {}
static bool check_hw_exists(void)
{
- u64 val, val_new = 0;
+ u64 val, val_new = ~0;
int i, reg, ret = 0;
/*
@@ -222,8 +211,9 @@ static bool check_hw_exists(void)
* that don't trap on the MSR access and always return 0s.
*/
val = 0xabcdUL;
- ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
- ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
+ reg = x86_pmu_event_addr(0);
+ ret = wrmsrl_safe(reg, val);
+ ret |= rdmsrl_safe(reg, &val_new);
if (ret || val != val_new)
goto msr_fail;
@@ -240,6 +230,7 @@ bios_fail:
msr_fail:
printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
+ printk(KERN_ERR "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new);
return false;
}
@@ -388,7 +379,7 @@ int x86_pmu_hw_config(struct perf_event *event)
int precise = 0;
/* Support for constant skid */
- if (x86_pmu.pebs_active) {
+ if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
precise++;
/* Support for IP fixup */
@@ -637,8 +628,8 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
c = sched->constraints[sched->state.event];
/* Prefer fixed purpose counters */
- if (x86_pmu.num_counters_fixed) {
- idx = X86_PMC_IDX_FIXED;
+ if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
+ idx = INTEL_PMC_IDX_FIXED;
for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
if (!__test_and_set_bit(idx, sched->state.used))
goto done;
@@ -646,7 +637,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
}
/* Grab the first unused counter starting with idx */
idx = sched->state.counter;
- for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_FIXED) {
+ for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
if (!__test_and_set_bit(idx, sched->state.used))
goto done;
}
@@ -704,8 +695,8 @@ static bool perf_sched_next_event(struct perf_sched *sched)
/*
* Assign a counter for each event.
*/
-static int perf_assign_events(struct event_constraint **constraints, int n,
- int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign)
{
struct perf_sched sched;
@@ -824,15 +815,17 @@ static inline void x86_assign_hw_event(struct perf_event *event,
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
- if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
+ if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0;
hwc->event_base = 0;
- } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
+ } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
- hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
+ hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
+ hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
} else {
hwc->config_base = x86_pmu_config_addr(hwc->idx);
hwc->event_base = x86_pmu_event_addr(hwc->idx);
+ hwc->event_base_rdpmc = hwc->idx;
}
}
@@ -930,7 +923,7 @@ int x86_perf_event_set_period(struct perf_event *event)
s64 period = hwc->sample_period;
int ret = 0, idx = hwc->idx;
- if (idx == X86_PMC_IDX_FIXED_BTS)
+ if (idx == INTEL_PMC_IDX_FIXED_BTS)
return 0;
/*
@@ -1316,7 +1309,6 @@ static struct attribute_group x86_pmu_format_group = {
static int __init init_hw_perf_events(void)
{
struct x86_pmu_quirk *quirk;
- struct event_constraint *c;
int err;
pr_info("Performance Events: ");
@@ -1347,21 +1339,8 @@ static int __init init_hw_perf_events(void)
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
- if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
- WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
- x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
- x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
- }
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
-
- if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
- WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
- x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
- x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
- }
-
- x86_pmu.intel_ctrl |=
- ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+ if (!x86_pmu.intel_ctrl)
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
@@ -1370,22 +1349,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0);
- if (x86_pmu.event_constraints) {
- /*
- * event on fixed counter2 (REF_CYCLES) only works on this
- * counter, so do not extend mask to generic counters
- */
- for_each_event_constraint(c, x86_pmu.event_constraints) {
- if (c->cmask != X86_RAW_EVENT_MASK
- || c->idxmsk64 == X86_PMC_MSK_FIXED_REF_CYCLES) {
- continue;
- }
-
- c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
- c->weight += x86_pmu.num_counters;
- }
- }
-
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
@@ -1620,8 +1583,8 @@ static int x86_pmu_event_idx(struct perf_event *event)
if (!x86_pmu.attr_rdpmc)
return 0;
- if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
- idx -= X86_PMC_IDX_FIXED;
+ if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
+ idx -= INTEL_PMC_IDX_FIXED;
idx |= 1 << 30;
}
@@ -1649,7 +1612,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long val = simple_strtoul(buf, NULL, 0);
+ unsigned long val;
+ ssize_t ret;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
@@ -1682,13 +1650,20 @@ static void x86_pmu_flush_branch_stack(void)
x86_pmu.flush_branch_stack();
}
+void perf_check_microcode(void)
+{
+ if (x86_pmu.check_microcode)
+ x86_pmu.check_microcode();
+}
+EXPORT_SYMBOL_GPL(perf_check_microcode);
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
- .attr_groups = x86_pmu_attr_groups,
+ .attr_groups = x86_pmu_attr_groups,
- .event_init = x86_pmu_event_init,
+ .event_init = x86_pmu_event_init,
.add = x86_pmu_add,
.del = x86_pmu_del,
@@ -1696,11 +1671,11 @@ static struct pmu pmu = {
.stop = x86_pmu_stop,
.read = x86_pmu_read,
- .start_txn = x86_pmu_start_txn,
- .cancel_txn = x86_pmu_cancel_txn,
- .commit_txn = x86_pmu_commit_txn,
+ .start_txn = x86_pmu_start_txn,
+ .cancel_txn = x86_pmu_cancel_txn,
+ .commit_txn = x86_pmu_commit_txn,
- .event_idx = x86_pmu_event_idx,
+ .event_idx = x86_pmu_event_idx,
.flush_branch_stack = x86_pmu_flush_branch_stack,
};
@@ -1863,7 +1838,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
} else {
- if (user_mode(regs))
+ if (!kernel_ip(regs->ip))
misc |= PERF_RECORD_MISC_USER;
else
misc |= PERF_RECORD_MISC_KERNEL;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 7241e2fc3c17..a15df4be151f 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -14,6 +14,18 @@
#include <linux/perf_event.h>
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val) \
+do { \
+ unsigned int _msr = (msr); \
+ u64 _val = (val); \
+ trace_printk("wrmsrl(%x, %Lx)\n", (unsigned int)(_msr), \
+ (unsigned long long)(_val)); \
+ native_write_msr((_msr), (u32)(_val), (u32)(_val >> 32)); \
+} while (0)
+#endif
+
/*
* | NHM/WSM | SNB |
* register -------------------------------
@@ -57,7 +69,7 @@ struct amd_nb {
};
/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS 4
+#define MAX_PEBS_EVENTS 8
/*
* A debug store configuration.
@@ -349,6 +361,8 @@ struct x86_pmu {
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
+
+ void (*check_microcode)(void);
void (*flush_branch_stack)(void);
/*
@@ -360,12 +374,16 @@ struct x86_pmu {
/*
* Intel DebugStore bits
*/
- int bts, pebs;
- int bts_active, pebs_active;
+ int bts :1,
+ bts_active :1,
+ pebs :1,
+ pebs_active :1,
+ pebs_broken :1;
int pebs_record_size;
void (*drain_pebs)(struct pt_regs *regs);
struct event_constraint *pebs_constraints;
void (*pebs_aliases)(struct perf_event *event);
+ int max_pebs_events;
/*
* Intel LBR
@@ -468,6 +486,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
void x86_pmu_enable_all(int added);
+int perf_assign_events(struct event_constraint **constraints, int n,
+ int wmin, int wmax, int *assign);
int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
void x86_pmu_stop(struct perf_event *event, int flags);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 11a4eb9131d5..4528ae7b6ec4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -366,7 +366,7 @@ static void amd_pmu_cpu_starting(int cpu)
cpuc->perf_ctr_virt_mask = AMD_PERFMON_EVENTSEL_HOSTONLY;
- if (boot_cpu_data.x86_max_cores < 2 || boot_cpu_data.x86 == 0x15)
+ if (boot_cpu_data.x86_max_cores < 2)
return;
nb_id = amd_get_nb_id(cpu);
@@ -422,35 +422,6 @@ static struct attribute *amd_format_attr[] = {
NULL,
};
-static __initconst const struct x86_pmu amd_pmu = {
- .name = "AMD",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = x86_pmu_enable_all,
- .enable = x86_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = amd_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_K7_EVNTSEL0,
- .perfctr = MSR_K7_PERFCTR0,
- .event_map = amd_pmu_event_map,
- .max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS,
- .cntval_bits = 48,
- .cntval_mask = (1ULL << 48) - 1,
- .apic = 1,
- /* use highest bit to detect overflow */
- .max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints,
- .put_event_constraints = amd_put_event_constraints,
-
- .format_attrs = amd_format_attr,
-
- .cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_starting = amd_pmu_cpu_starting,
- .cpu_dead = amd_pmu_cpu_dead,
-};
-
/* AMD Family 15h */
#define AMD_EVENT_TYPE_MASK 0x000000F0ULL
@@ -597,8 +568,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
}
}
-static __initconst const struct x86_pmu amd_pmu_f15h = {
- .name = "AMD Family 15h",
+static __initconst const struct x86_pmu amd_pmu = {
+ .name = "AMD",
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
@@ -606,50 +577,68 @@ static __initconst const struct x86_pmu amd_pmu_f15h = {
.disable = x86_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
.schedule_events = x86_schedule_events,
- .eventsel = MSR_F15H_PERF_CTL,
- .perfctr = MSR_F15H_PERF_CTR,
+ .eventsel = MSR_K7_EVNTSEL0,
+ .perfctr = MSR_K7_PERFCTR0,
.event_map = amd_pmu_event_map,
.max_events = ARRAY_SIZE(amd_perfmon_event_map),
- .num_counters = AMD64_NUM_COUNTERS_F15H,
+ .num_counters = AMD64_NUM_COUNTERS,
.cntval_bits = 48,
.cntval_mask = (1ULL << 48) - 1,
.apic = 1,
/* use highest bit to detect overflow */
.max_period = (1ULL << 47) - 1,
- .get_event_constraints = amd_get_event_constraints_f15h,
- /* nortbridge counters not yet implemented: */
-#if 0
+ .get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,
+ .format_attrs = amd_format_attr,
+
.cpu_prepare = amd_pmu_cpu_prepare,
- .cpu_dead = amd_pmu_cpu_dead,
-#endif
.cpu_starting = amd_pmu_cpu_starting,
- .format_attrs = amd_format_attr,
+ .cpu_dead = amd_pmu_cpu_dead,
};
+static int setup_event_constraints(void)
+{
+ if (boot_cpu_data.x86 >= 0x15)
+ x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
+ return 0;
+}
+
+static int setup_perfctr_core(void)
+{
+ if (!cpu_has_perfctr_core) {
+ WARN(x86_pmu.get_event_constraints == amd_get_event_constraints_f15h,
+ KERN_ERR "Odd, counter constraints enabled but no core perfctrs detected!");
+ return -ENODEV;
+ }
+
+ WARN(x86_pmu.get_event_constraints == amd_get_event_constraints,
+ KERN_ERR "hw perf events core counters need constraints handler!");
+
+ /*
+ * If core performance counter extensions exists, we must use
+ * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
+ * x86_pmu_addr_offset().
+ */
+ x86_pmu.eventsel = MSR_F15H_PERF_CTL;
+ x86_pmu.perfctr = MSR_F15H_PERF_CTR;
+ x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
+
+ printk(KERN_INFO "perf: AMD core performance counters detected\n");
+
+ return 0;
+}
+
__init int amd_pmu_init(void)
{
/* Performance-monitoring supported from K7 and later: */
if (boot_cpu_data.x86 < 6)
return -ENODEV;
- /*
- * If core performance counter extensions exists, it must be
- * family 15h, otherwise fail. See x86_pmu_addr_offset().
- */
- switch (boot_cpu_data.x86) {
- case 0x15:
- if (!cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu_f15h;
- break;
- default:
- if (cpu_has_perfctr_core)
- return -ENODEV;
- x86_pmu = amd_pmu;
- break;
- }
+ x86_pmu = amd_pmu;
+
+ setup_event_constraints();
+ setup_perfctr_core();
/* Events are common for all AMDs */
memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 187c294bc658..7a8b9d0abcaa 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -5,6 +5,8 @@
* among events on a single PMU.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -21,14 +23,14 @@
*/
static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
{
- [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
- [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
- [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
- [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
- [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
- [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
- [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
+ [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
};
static struct event_constraint intel_core_event_constraints[] __read_mostly =
@@ -747,7 +749,7 @@ static void intel_pmu_disable_all(void)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
intel_pmu_disable_bts();
intel_pmu_pebs_disable_all();
@@ -763,9 +765,9 @@ static void intel_pmu_enable_all(int added)
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
- if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
+ if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event =
- cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
if (WARN_ON_ONCE(!event))
return;
@@ -871,7 +873,7 @@ static inline void intel_pmu_ack_status(u64 ack)
static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
+ int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, mask;
mask = 0xfULL << (idx * 4);
@@ -886,7 +888,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
intel_pmu_drain_bts_buffer();
return;
@@ -915,7 +917,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
{
- int idx = hwc->idx - X86_PMC_IDX_FIXED;
+ int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
u64 ctrl_val, bits, mask;
/*
@@ -949,7 +951,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
- if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
+ if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
return;
@@ -1000,14 +1002,14 @@ static void intel_pmu_reset(void)
local_irq_save(flags);
- printk("clearing PMU state on CPU#%d\n", smp_processor_id());
+ pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- checking_wrmsrl(x86_pmu_config_addr(idx), 0ull);
- checking_wrmsrl(x86_pmu_event_addr(idx), 0ull);
+ wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
+ wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
}
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
- checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+ wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
if (ds)
ds->bts_index = ds->bts_buffer_base;
@@ -1707,16 +1709,61 @@ static __init void intel_clovertown_quirk(void)
* But taken together it might just make sense to not enable PEBS on
* these chips.
*/
- printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ pr_warn("PEBS disabled due to CPU errata\n");
x86_pmu.pebs = 0;
x86_pmu.pebs_constraints = NULL;
}
+static int intel_snb_pebs_broken(int cpu)
+{
+ u32 rev = UINT_MAX; /* default to broken for unknown models */
+
+ switch (cpu_data(cpu).x86_model) {
+ case 42: /* SNB */
+ rev = 0x28;
+ break;
+
+ case 45: /* SNB-EP */
+ switch (cpu_data(cpu).x86_mask) {
+ case 6: rev = 0x618; break;
+ case 7: rev = 0x70c; break;
+ }
+ }
+
+ return (cpu_data(cpu).microcode < rev);
+}
+
+static void intel_snb_check_microcode(void)
+{
+ int pebs_broken = 0;
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if ((pebs_broken = intel_snb_pebs_broken(cpu)))
+ break;
+ }
+ put_online_cpus();
+
+ if (pebs_broken == x86_pmu.pebs_broken)
+ return;
+
+ /*
+ * Serialized by the microcode lock..
+ */
+ if (x86_pmu.pebs_broken) {
+ pr_info("PEBS enabled due to microcode update\n");
+ x86_pmu.pebs_broken = 0;
+ } else {
+ pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
+ x86_pmu.pebs_broken = 1;
+ }
+}
+
static __init void intel_sandybridge_quirk(void)
{
- printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
- x86_pmu.pebs = 0;
- x86_pmu.pebs_constraints = NULL;
+ x86_pmu.check_microcode = intel_snb_check_microcode;
+ intel_snb_check_microcode();
}
static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
@@ -1736,8 +1783,8 @@ static __init void intel_arch_events_quirk(void)
/* disable event that reported as not presend by cpuid */
for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
- printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n",
- intel_arch_events_map[bit].name);
+ pr_warn("CPUID marked event: \'%s\' unavailable\n",
+ intel_arch_events_map[bit].name);
}
}
@@ -1756,7 +1803,7 @@ static __init void intel_nehalem_quirk(void)
intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
ebx.split.no_branch_misses_retired = 0;
x86_pmu.events_maskl = ebx.full;
- printk(KERN_INFO "CPU erratum AAJ80 worked around\n");
+ pr_info("CPU erratum AAJ80 worked around\n");
}
}
@@ -1765,6 +1812,7 @@ __init int intel_pmu_init(void)
union cpuid10_edx edx;
union cpuid10_eax eax;
union cpuid10_ebx ebx;
+ struct event_constraint *c;
unsigned int unused;
int version;
@@ -1800,6 +1848,8 @@ __init int intel_pmu_init(void)
x86_pmu.events_maskl = ebx.full;
x86_pmu.events_mask_len = eax.split.mask_length;
+ x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
+
/*
* Quirk: v2 perfmon does not report fixed-purpose events, so
* assume at least 3 events:
@@ -1951,5 +2001,37 @@ __init int intel_pmu_init(void)
}
}
+ if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
+ WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
+ x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
+ x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
+ }
+ x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+
+ if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
+ WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
+ x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
+ x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
+ }
+
+ x86_pmu.intel_ctrl |=
+ ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
+
+ if (x86_pmu.event_constraints) {
+ /*
+ * event on fixed counter2 (REF_CYCLES) only works on this
+ * counter, so do not extend mask to generic counters
+ */
+ for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if (c->cmask != X86_RAW_EVENT_MASK
+ || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+ continue;
+ }
+
+ c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
+ c->weight += x86_pmu.num_counters;
+ }
+ }
+
return 0;
}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 35e2192df9f4..629ae0b7ad90 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -248,7 +248,7 @@ void reserve_ds_buffers(void)
*/
struct event_constraint bts_constraint =
- EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
+ EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
void intel_pmu_enable_bts(u64 config)
{
@@ -295,7 +295,7 @@ int intel_pmu_drain_bts_buffer(void)
u64 to;
u64 flags;
};
- struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
+ struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
struct bts_record *at, *top;
struct perf_output_handle handle;
struct perf_event_header header;
@@ -620,7 +620,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
- WARN_ON_ONCE(n > 1);
+ WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
at += n - 1;
__intel_pmu_pebs_event(event, iregs, at);
@@ -651,10 +651,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
*/
- WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
+ WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);
for ( ; at < top; at++) {
- for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
+ for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
event = cpuc->events[bit];
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -670,7 +670,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
break;
}
- if (!event || bit >= MAX_PEBS_EVENTS)
+ if (!event || bit >= x86_pmu.max_pebs_events)
continue;
__intel_pmu_pebs_event(event, iregs, at);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
new file mode 100644
index 000000000000..19faffc60886
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -0,0 +1,1850 @@
+#include "perf_event_intel_uncore.h"
+
+static struct intel_uncore_type *empty_uncore[] = { NULL, };
+static struct intel_uncore_type **msr_uncores = empty_uncore;
+static struct intel_uncore_type **pci_uncores = empty_uncore;
+/* pci bus to socket mapping */
+static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+
+static DEFINE_RAW_SPINLOCK(uncore_box_lock);
+
+/* mask of cpus that collect uncore events */
+static cpumask_t uncore_cpu_mask;
+
+/* constraint for the fixed counter */
+static struct event_constraint constraint_fixed =
+ EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL);
+static struct event_constraint constraint_empty =
+ EVENT_CONSTRAINT(0, 0, 0);
+
+DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
+DEFINE_UNCORE_FORMAT_ATTR(tid_en, tid_en, "config:19");
+DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
+DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(cmask8, cmask, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(thresh5, thresh, "config:24-28");
+DEFINE_UNCORE_FORMAT_ATTR(occ_sel, occ_sel, "config:14-15");
+DEFINE_UNCORE_FORMAT_ATTR(occ_invert, occ_invert, "config:30");
+DEFINE_UNCORE_FORMAT_ATTR(occ_edge, occ_edge, "config:14-51");
+DEFINE_UNCORE_FORMAT_ATTR(filter_tid, filter_tid, "config1:0-4");
+DEFINE_UNCORE_FORMAT_ATTR(filter_nid, filter_nid, "config1:10-17");
+DEFINE_UNCORE_FORMAT_ATTR(filter_state, filter_state, "config1:18-22");
+DEFINE_UNCORE_FORMAT_ATTR(filter_opc, filter_opc, "config1:23-31");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand0, filter_brand0, "config1:0-7");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand1, filter_brand1, "config1:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand2, filter_brand2, "config1:16-23");
+DEFINE_UNCORE_FORMAT_ATTR(filter_brand3, filter_brand3, "config1:24-31");
+
+/* Sandy Bridge-EP uncore support */
+static struct intel_uncore_type snbep_uncore_cbox;
+static struct intel_uncore_type snbep_uncore_pcu;
+
+static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config;
+
+ pci_read_config_dword(pdev, box_ctl, &config);
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+ u32 config;
+
+ pci_read_config_dword(pdev, box_ctl, &config);
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ pci_write_config_dword(pdev, box_ctl, config);
+}
+
+static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config |
+ SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
+ pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
+ return count;
+}
+
+static void snbep_uncore_pci_init_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL,
+ SNBEP_PMON_BOX_CTL_INT);
+}
+
+static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config |= SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ return;
+ }
+}
+
+static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ u64 config;
+ unsigned msr;
+
+ msr = uncore_msr_box_ctl(box);
+ if (msr) {
+ rdmsrl(msr, config);
+ config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+ wrmsrl(msr, config);
+ return;
+ }
+}
+
+static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE)
+ wrmsrl(reg1->reg, reg1->config);
+
+ wrmsrl(hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ wrmsrl(hwc->config_base, hwc->config);
+}
+
+static u64 snbep_uncore_msr_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ rdmsrl(hwc->event_base, count);
+ return count;
+}
+
+static void snbep_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+ if (msr)
+ wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
+}
+
+static struct event_constraint *
+snbep_uncore_get_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+ unsigned long flags;
+ bool ok = false;
+
+ if (reg1->idx == EXTRA_REG_NONE || (box->phys_id >= 0 && reg1->alloc))
+ return NULL;
+
+ er = &box->shared_regs[reg1->idx];
+ raw_spin_lock_irqsave(&er->lock, flags);
+ if (!atomic_read(&er->ref) || er->config1 == reg1->config) {
+ atomic_inc(&er->ref);
+ er->config1 = reg1->config;
+ ok = true;
+ }
+ raw_spin_unlock_irqrestore(&er->lock, flags);
+
+ if (ok) {
+ if (box->phys_id >= 0)
+ reg1->alloc = 1;
+ return NULL;
+ }
+ return &constraint_empty;
+}
+
+static void snbep_uncore_put_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_extra_reg *er;
+ struct hw_perf_event_extra *reg1 = &event->hw.extra_reg;
+
+ if (box->phys_id < 0 || !reg1->alloc)
+ return;
+
+ er = &box->shared_regs[reg1->idx];
+ atomic_dec(&er->ref);
+ reg1->alloc = 0;
+}
+
+static int snbep_uncore_hw_config(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+
+ if (box->pmu->type == &snbep_uncore_cbox) {
+ reg1->reg = SNBEP_C0_MSR_PMON_BOX_FILTER +
+ SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx;
+ reg1->config = event->attr.config1 &
+ SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK;
+ } else if (box->pmu->type == &snbep_uncore_pcu) {
+ reg1->reg = SNBEP_PCU_MSR_PMON_BOX_FILTER;
+ reg1->config = event->attr.config1 &
+ SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK;
+ } else {
+ return 0;
+ }
+ reg1->idx = 0;
+ return 0;
+}
+
+static struct attribute *snbep_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_ubox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_cbox_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_tid_en.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh8.attr,
+ &format_attr_filter_tid.attr,
+ &format_attr_filter_nid.attr,
+ &format_attr_filter_state.attr,
+ &format_attr_filter_opc.attr,
+ NULL,
+};
+
+static struct attribute *snbep_uncore_pcu_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_occ_sel.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_thresh5.attr,
+ &format_attr_occ_invert.attr,
+ &format_attr_occ_edge.attr,
+ &format_attr_filter_brand0.attr,
+ &format_attr_filter_brand1.attr,
+ &format_attr_filter_brand2.attr,
+ &format_attr_filter_brand3.attr,
+ NULL,
+};
+
+static struct uncore_event_desc snbep_uncore_imc_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_read, "event=0x04,umask=0x03"),
+ INTEL_UNCORE_EVENT_DESC(cas_count_write, "event=0x04,umask=0x0c"),
+ { /* end: all zeroes */ },
+};
+
+static struct uncore_event_desc snbep_uncore_qpi_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
+ INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ { /* end: all zeroes */ },
+};
+
+static struct attribute_group snbep_uncore_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_ubox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_ubox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_cbox_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_cbox_formats_attr,
+};
+
+static struct attribute_group snbep_uncore_pcu_format_group = {
+ .name = "format",
+ .attrs = snbep_uncore_pcu_formats_attr,
+};
+
+static struct intel_uncore_ops snbep_uncore_msr_ops = {
+ .init_box = snbep_uncore_msr_init_box,
+ .disable_box = snbep_uncore_msr_disable_box,
+ .enable_box = snbep_uncore_msr_enable_box,
+ .disable_event = snbep_uncore_msr_disable_event,
+ .enable_event = snbep_uncore_msr_enable_event,
+ .read_counter = snbep_uncore_msr_read_counter,
+ .get_constraint = snbep_uncore_get_constraint,
+ .put_constraint = snbep_uncore_put_constraint,
+ .hw_config = snbep_uncore_hw_config,
+};
+
+static struct intel_uncore_ops snbep_uncore_pci_ops = {
+ .init_box = snbep_uncore_pci_init_box,
+ .disable_box = snbep_uncore_pci_disable_box,
+ .enable_box = snbep_uncore_pci_enable_box,
+ .disable_event = snbep_uncore_pci_disable_event,
+ .enable_event = snbep_uncore_pci_enable_event,
+ .read_counter = snbep_uncore_pci_read_counter,
+};
+
+static struct event_constraint snbep_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x01, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x02, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x04, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x05, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x07, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x1b, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1c, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1d, 0xc),
+ UNCORE_EVENT_CONSTRAINT(0x1e, 0xc),
+ EVENT_CONSTRAINT_OVERLAP(0x1f, 0xe, 0xff),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x35, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x38, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x39, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x3b, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r2pcie_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct event_constraint snbep_uncore_r3qpi_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x10, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x11, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x12, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x13, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x20, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x21, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x22, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x23, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x24, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x25, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x26, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x30, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x31, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x32, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x33, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x34, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x36, 0x3),
+ UNCORE_EVENT_CONSTRAINT(0x37, 0x3),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snbep_uncore_ubox = {
+ .name = "ubox",
+ .num_counters = 2,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNBEP_U_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_U_MSR_PMON_CTL0,
+ .event_mask = SNBEP_U_MSR_PMON_RAW_EVENT_MASK,
+ .fixed_ctr = SNBEP_U_MSR_PMON_UCLK_FIXED_CTR,
+ .fixed_ctl = SNBEP_U_MSR_PMON_UCLK_FIXED_CTL,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_ubox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 4,
+ .num_boxes = 8,
+ .perf_ctr_bits = 44,
+ .event_ctl = SNBEP_C0_MSR_PMON_CTL0,
+ .perf_ctr = SNBEP_C0_MSR_PMON_CTR0,
+ .event_mask = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_C0_MSR_PMON_BOX_CTL,
+ .msr_offset = SNBEP_CBO_MSR_OFFSET,
+ .num_shared_regs = 1,
+ .constraints = snbep_uncore_cbox_constraints,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_cbox_format_group,
+};
+
+static struct intel_uncore_type snbep_uncore_pcu = {
+ .name = "pcu",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCU_MSR_PMON_CTR0,
+ .event_ctl = SNBEP_PCU_MSR_PMON_CTL0,
+ .event_mask = SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCU_MSR_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_msr_ops,
+ .format_group = &snbep_uncore_pcu_format_group,
+};
+
+static struct intel_uncore_type *snbep_msr_uncores[] = {
+ &snbep_uncore_ubox,
+ &snbep_uncore_cbox,
+ &snbep_uncore_pcu,
+ NULL,
+};
+
+#define SNBEP_UNCORE_PCI_COMMON_INIT() \
+ .perf_ctr = SNBEP_PCI_PMON_CTR0, \
+ .event_ctl = SNBEP_PCI_PMON_CTL0, \
+ .event_mask = SNBEP_PMON_RAW_EVENT_MASK, \
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL, \
+ .ops = &snbep_uncore_pci_ops, \
+ .format_group = &snbep_uncore_format_group
+
+static struct intel_uncore_type snbep_uncore_ha = {
+ .name = "ha",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_imc = {
+ .name = "imc",
+ .num_counters = 4,
+ .num_boxes = 4,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .fixed_ctr = SNBEP_MC_CHy_PCI_PMON_FIXED_CTR,
+ .fixed_ctl = SNBEP_MC_CHy_PCI_PMON_FIXED_CTL,
+ .event_descs = snbep_uncore_imc_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_qpi = {
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .event_descs = snbep_uncore_qpi_events,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+
+static struct intel_uncore_type snbep_uncore_r2pcie = {
+ .name = "r2pcie",
+ .num_counters = 4,
+ .num_boxes = 1,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r2pcie_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type snbep_uncore_r3qpi = {
+ .name = "r3qpi",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .perf_ctr_bits = 44,
+ .constraints = snbep_uncore_r3qpi_constraints,
+ SNBEP_UNCORE_PCI_COMMON_INIT(),
+};
+
+static struct intel_uncore_type *snbep_pci_uncores[] = {
+ &snbep_uncore_ha,
+ &snbep_uncore_imc,
+ &snbep_uncore_qpi,
+ &snbep_uncore_r2pcie,
+ &snbep_uncore_r3qpi,
+ NULL,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
+ { /* Home Agent */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
+ .driver_data = (unsigned long)&snbep_uncore_ha,
+ },
+ { /* MC Channel 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 2 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* MC Channel 3 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
+ .driver_data = (unsigned long)&snbep_uncore_imc,
+ },
+ { /* QPI Port 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
+ .driver_data = (unsigned long)&snbep_uncore_qpi,
+ },
+ { /* QPI Port 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
+ .driver_data = (unsigned long)&snbep_uncore_qpi,
+ },
+ { /* P2PCIe */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
+ .driver_data = (unsigned long)&snbep_uncore_r2pcie,
+ },
+ { /* R3QPI Link 0 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
+ .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ },
+ { /* R3QPI Link 1 */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
+ .driver_data = (unsigned long)&snbep_uncore_r3qpi,
+ },
+ { /* end: all zeroes */ }
+};
+
+static struct pci_driver snbep_uncore_pci_driver = {
+ .name = "snbep_uncore",
+ .id_table = snbep_uncore_pci_ids,
+};
+
+/*
+ * build pci bus to socket mapping
+ */
+static void snbep_pci2phy_map_init(void)
+{
+ struct pci_dev *ubox_dev = NULL;
+ int i, bus, nodeid;
+ u32 config;
+
+ while (1) {
+ /* find the UBOX device */
+ ubox_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX,
+ ubox_dev);
+ if (!ubox_dev)
+ break;
+ bus = ubox_dev->bus->number;
+ /* get the Node ID of the local register */
+ pci_read_config_dword(ubox_dev, 0x40, &config);
+ nodeid = config;
+ /* get the Node ID mapping */
+ pci_read_config_dword(ubox_dev, 0x54, &config);
+ /*
+ * every three bits in the Node ID mapping register maps
+ * to a particular node.
+ */
+ for (i = 0; i < 8; i++) {
+ if (nodeid == ((config >> (3 * i)) & 0x7)) {
+ pcibus_to_physid[bus] = i;
+ break;
+ }
+ }
+ };
+ return;
+}
+/* end of Sandy Bridge-EP uncore support */
+
+
+/* Sandy Bridge uncore support */
+static void snb_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, SNB_UNC_CTL_EN);
+}
+
+static void snb_uncore_msr_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ wrmsrl(event->hw.config_base, 0);
+}
+
+static u64 snb_uncore_msr_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ u64 count;
+ rdmsrl(event->hw.event_base, count);
+ return count;
+}
+
+static void snb_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0) {
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL,
+ SNB_UNC_GLOBAL_CTL_EN | SNB_UNC_GLOBAL_CTL_CORE_ALL);
+ }
+}
+
+static struct attribute *snb_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask5.attr,
+ NULL,
+};
+
+static struct attribute_group snb_uncore_format_group = {
+ .name = "format",
+ .attrs = snb_uncore_formats_attr,
+};
+
+static struct intel_uncore_ops snb_uncore_msr_ops = {
+ .init_box = snb_uncore_msr_init_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = snb_uncore_msr_enable_event,
+ .read_counter = snb_uncore_msr_read_counter,
+};
+
+static struct event_constraint snb_uncore_cbox_constraints[] = {
+ UNCORE_EVENT_CONSTRAINT(0x80, 0x1),
+ UNCORE_EVENT_CONSTRAINT(0x83, 0x1),
+ EVENT_CONSTRAINT_END
+};
+
+static struct intel_uncore_type snb_uncore_cbox = {
+ .name = "cbox",
+ .num_counters = 2,
+ .num_boxes = 4,
+ .perf_ctr_bits = 44,
+ .fixed_ctr_bits = 48,
+ .perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
+ .event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
+ .fixed_ctr = SNB_UNC_FIXED_CTR,
+ .fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
+ .single_fixed = 1,
+ .event_mask = SNB_UNC_RAW_EVENT_MASK,
+ .msr_offset = SNB_UNC_CBO_MSR_OFFSET,
+ .constraints = snb_uncore_cbox_constraints,
+ .ops = &snb_uncore_msr_ops,
+ .format_group = &snb_uncore_format_group,
+};
+
+static struct intel_uncore_type *snb_msr_uncores[] = {
+ &snb_uncore_cbox,
+ NULL,
+};
+/* end of Sandy Bridge uncore support */
+
+/* Nehalem uncore support */
+static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHM_UNC_PERF_GLOBAL_CTL,
+ NHM_UNC_GLOBAL_CTL_EN_PC_ALL | NHM_UNC_GLOBAL_CTL_EN_FC);
+}
+
+static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (hwc->idx < UNCORE_PMC_IDX_FIXED)
+ wrmsrl(hwc->config_base, hwc->config | SNB_UNC_CTL_EN);
+ else
+ wrmsrl(hwc->config_base, NHM_UNC_FIXED_CTR_CTL_EN);
+}
+
+static struct attribute *nhm_uncore_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_umask.attr,
+ &format_attr_edge.attr,
+ &format_attr_inv.attr,
+ &format_attr_cmask8.attr,
+ NULL,
+};
+
+static struct attribute_group nhm_uncore_format_group = {
+ .name = "format",
+ .attrs = nhm_uncore_formats_attr,
+};
+
+static struct uncore_event_desc nhm_uncore_events[] = {
+ INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
+ INTEL_UNCORE_EVENT_DESC(qmc_writes_full_any, "event=0x2f,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qmc_normal_reads_any, "event=0x2c,umask=0x0f"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_reads, "event=0x20,umask=0x01"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_ioh_writes, "event=0x20,umask=0x02"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_reads, "event=0x20,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_remote_writes, "event=0x20,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_reads, "event=0x20,umask=0x10"),
+ INTEL_UNCORE_EVENT_DESC(qhl_request_local_writes, "event=0x20,umask=0x20"),
+ { /* end: all zeroes */ },
+};
+
+static struct intel_uncore_ops nhm_uncore_msr_ops = {
+ .disable_box = nhm_uncore_msr_disable_box,
+ .enable_box = nhm_uncore_msr_enable_box,
+ .disable_event = snb_uncore_msr_disable_event,
+ .enable_event = nhm_uncore_msr_enable_event,
+ .read_counter = snb_uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type nhm_uncore = {
+ .name = "",
+ .num_counters = 8,
+ .num_boxes = 1,
+ .perf_ctr_bits = 48,
+ .fixed_ctr_bits = 48,
+ .event_ctl = NHM_UNC_PERFEVTSEL0,
+ .perf_ctr = NHM_UNC_UNCORE_PMC0,
+ .fixed_ctr = NHM_UNC_FIXED_CTR,
+ .fixed_ctl = NHM_UNC_FIXED_CTR_CTRL,
+ .event_mask = NHM_UNC_RAW_EVENT_MASK,
+ .event_descs = nhm_uncore_events,
+ .ops = &nhm_uncore_msr_ops,
+ .format_group = &nhm_uncore_format_group,
+};
+
+static struct intel_uncore_type *nhm_msr_uncores[] = {
+ &nhm_uncore,
+ NULL,
+};
+/* end of Nehalem uncore support */
+
+static void uncore_assign_hw_event(struct intel_uncore_box *box,
+ struct perf_event *event, int idx)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ hwc->idx = idx;
+ hwc->last_tag = ++box->tags[idx];
+
+ if (hwc->idx == UNCORE_PMC_IDX_FIXED) {
+ hwc->event_base = uncore_fixed_ctr(box);
+ hwc->config_base = uncore_fixed_ctl(box);
+ return;
+ }
+
+ hwc->config_base = uncore_event_ctl(box, hwc->idx);
+ hwc->event_base = uncore_perf_ctr(box, hwc->idx);
+}
+
+static void uncore_perf_event_update(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ u64 prev_count, new_count, delta;
+ int shift;
+
+ if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+ shift = 64 - uncore_fixed_ctr_bits(box);
+ else
+ shift = 64 - uncore_perf_ctr_bits(box);
+
+ /* the hrtimer might modify the previous event value */
+again:
+ prev_count = local64_read(&event->hw.prev_count);
+ new_count = uncore_read_counter(box, event);
+ if (local64_xchg(&event->hw.prev_count, new_count) != prev_count)
+ goto again;
+
+ delta = (new_count << shift) - (prev_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+/*
+ * The overflow interrupt is unavailable for SandyBridge-EP, is broken
+ * for SandyBridge. So we use hrtimer to periodically poll the counter
+ * to avoid overflow.
+ */
+static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
+{
+ struct intel_uncore_box *box;
+ unsigned long flags;
+ int bit;
+
+ box = container_of(hrtimer, struct intel_uncore_box, hrtimer);
+ if (!box->n_active || box->cpu != smp_processor_id())
+ return HRTIMER_NORESTART;
+ /*
+ * disable local interrupt to prevent uncore_pmu_event_start/stop
+ * to interrupt the update process
+ */
+ local_irq_save(flags);
+
+ for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX)
+ uncore_perf_event_update(box, box->events[bit]);
+
+ local_irq_restore(flags);
+
+ hrtimer_forward_now(hrtimer, ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL));
+ return HRTIMER_RESTART;
+}
+
+static void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
+{
+ __hrtimer_start_range_ns(&box->hrtimer,
+ ns_to_ktime(UNCORE_PMU_HRTIMER_INTERVAL), 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
+{
+ hrtimer_cancel(&box->hrtimer);
+}
+
+static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box)
+{
+ hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ box->hrtimer.function = uncore_pmu_hrtimer;
+}
+
+struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
+ int cpu)
+{
+ struct intel_uncore_box *box;
+ int i, size;
+
+ size = sizeof(*box) + type->num_shared_regs *
+ sizeof(struct intel_uncore_extra_reg);
+
+ box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
+ if (!box)
+ return NULL;
+
+ for (i = 0; i < type->num_shared_regs; i++)
+ raw_spin_lock_init(&box->shared_regs[i].lock);
+
+ uncore_pmu_init_hrtimer(box);
+ atomic_set(&box->refcnt, 1);
+ box->cpu = -1;
+ box->phys_id = -1;
+
+ return box;
+}
+
+static struct intel_uncore_box *
+uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
+{
+ static struct intel_uncore_box *box;
+
+ box = *per_cpu_ptr(pmu->box, cpu);
+ if (box)
+ return box;
+
+ raw_spin_lock(&uncore_box_lock);
+ list_for_each_entry(box, &pmu->box_list, list) {
+ if (box->phys_id == topology_physical_package_id(cpu)) {
+ atomic_inc(&box->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = box;
+ break;
+ }
+ }
+ raw_spin_unlock(&uncore_box_lock);
+
+ return *per_cpu_ptr(pmu->box, cpu);
+}
+
+static struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct intel_uncore_pmu, pmu);
+}
+
+static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
+{
+ /*
+ * perf core schedules event on the basis of cpu, uncore events are
+ * collected by one of the cpus inside a physical package.
+ */
+ return uncore_pmu_to_box(uncore_event_to_pmu(event),
+ smp_processor_id());
+}
+
+static int uncore_collect_events(struct intel_uncore_box *box,
+ struct perf_event *leader, bool dogrp)
+{
+ struct perf_event *event;
+ int n, max_count;
+
+ max_count = box->pmu->type->num_counters;
+ if (box->pmu->type->fixed_ctl)
+ max_count++;
+
+ if (box->n_events >= max_count)
+ return -EINVAL;
+
+ n = box->n_events;
+ box->event_list[n] = leader;
+ n++;
+ if (!dogrp)
+ return n;
+
+ list_for_each_entry(event, &leader->sibling_list, group_entry) {
+ if (event->state <= PERF_EVENT_STATE_OFF)
+ continue;
+
+ if (n >= max_count)
+ return -EINVAL;
+
+ box->event_list[n] = event;
+ n++;
+ }
+ return n;
+}
+
+static struct event_constraint *
+uncore_get_event_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ struct intel_uncore_type *type = box->pmu->type;
+ struct event_constraint *c;
+
+ if (type->ops->get_constraint) {
+ c = type->ops->get_constraint(box, event);
+ if (c)
+ return c;
+ }
+
+ if (event->hw.config == ~0ULL)
+ return &constraint_fixed;
+
+ if (type->constraints) {
+ for_each_event_constraint(c, type->constraints) {
+ if ((event->hw.config & c->cmask) == c->code)
+ return c;
+ }
+ }
+
+ return &type->unconstrainted;
+}
+
+static void uncore_put_event_constraint(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ if (box->pmu->type->ops->put_constraint)
+ box->pmu->type->ops->put_constraint(box, event);
+}
+
+static int uncore_assign_events(struct intel_uncore_box *box,
+ int assign[], int n)
+{
+ unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+ struct event_constraint *c, *constraints[UNCORE_PMC_IDX_MAX];
+ int i, wmin, wmax, ret = 0;
+ struct hw_perf_event *hwc;
+
+ bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
+
+ for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
+ c = uncore_get_event_constraint(box, box->event_list[i]);
+ constraints[i] = c;
+ wmin = min(wmin, c->weight);
+ wmax = max(wmax, c->weight);
+ }
+
+ /* fastpath, try to reuse previous register */
+ for (i = 0; i < n; i++) {
+ hwc = &box->event_list[i]->hw;
+ c = constraints[i];
+
+ /* never assigned */
+ if (hwc->idx == -1)
+ break;
+
+ /* constraint still honored */
+ if (!test_bit(hwc->idx, c->idxmsk))
+ break;
+
+ /* not already used */
+ if (test_bit(hwc->idx, used_mask))
+ break;
+
+ __set_bit(hwc->idx, used_mask);
+ if (assign)
+ assign[i] = hwc->idx;
+ }
+ /* slow path */
+ if (i != n)
+ ret = perf_assign_events(constraints, n, wmin, wmax, assign);
+
+ if (!assign || ret) {
+ for (i = 0; i < n; i++)
+ uncore_put_event_constraint(box, box->event_list[i]);
+ }
+ return ret ? -EINVAL : 0;
+}
+
+static void uncore_pmu_event_start(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int idx = event->hw.idx;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX))
+ return;
+
+ event->hw.state = 0;
+ box->events[idx] = event;
+ box->n_active++;
+ __set_bit(idx, box->active_mask);
+
+ local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
+ uncore_enable_event(box, event);
+
+ if (box->n_active == 1) {
+ uncore_enable_box(box);
+ uncore_pmu_start_hrtimer(box);
+ }
+}
+
+static void uncore_pmu_event_stop(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (__test_and_clear_bit(hwc->idx, box->active_mask)) {
+ uncore_disable_event(box, event);
+ box->n_active--;
+ box->events[hwc->idx] = NULL;
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (box->n_active == 0) {
+ uncore_disable_box(box);
+ uncore_pmu_cancel_hrtimer(box);
+ }
+ }
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ uncore_perf_event_update(box, event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+static int uncore_pmu_event_add(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ struct hw_perf_event *hwc = &event->hw;
+ int assign[UNCORE_PMC_IDX_MAX];
+ int i, n, ret;
+
+ if (!box)
+ return -ENODEV;
+
+ ret = n = uncore_collect_events(box, event, false);
+ if (ret < 0)
+ return ret;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(flags & PERF_EF_START))
+ hwc->state |= PERF_HES_ARCH;
+
+ ret = uncore_assign_events(box, assign, n);
+ if (ret)
+ return ret;
+
+ /* save events moving to new counters */
+ for (i = 0; i < box->n_events; i++) {
+ event = box->event_list[i];
+ hwc = &event->hw;
+
+ if (hwc->idx == assign[i] &&
+ hwc->last_tag == box->tags[assign[i]])
+ continue;
+ /*
+ * Ensure we don't accidentally enable a stopped
+ * counter simply because we rescheduled.
+ */
+ if (hwc->state & PERF_HES_STOPPED)
+ hwc->state |= PERF_HES_ARCH;
+
+ uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+ }
+
+ /* reprogram moved events into new counters */
+ for (i = 0; i < n; i++) {
+ event = box->event_list[i];
+ hwc = &event->hw;
+
+ if (hwc->idx != assign[i] ||
+ hwc->last_tag != box->tags[assign[i]])
+ uncore_assign_hw_event(box, event, assign[i]);
+ else if (i < box->n_events)
+ continue;
+
+ if (hwc->state & PERF_HES_ARCH)
+ continue;
+
+ uncore_pmu_event_start(event, 0);
+ }
+ box->n_events = n;
+
+ return 0;
+}
+
+static void uncore_pmu_event_del(struct perf_event *event, int flags)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ int i;
+
+ uncore_pmu_event_stop(event, PERF_EF_UPDATE);
+
+ for (i = 0; i < box->n_events; i++) {
+ if (event == box->event_list[i]) {
+ uncore_put_event_constraint(box, event);
+
+ while (++i < box->n_events)
+ box->event_list[i - 1] = box->event_list[i];
+
+ --box->n_events;
+ break;
+ }
+ }
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+}
+
+static void uncore_pmu_event_read(struct perf_event *event)
+{
+ struct intel_uncore_box *box = uncore_event_to_box(event);
+ uncore_perf_event_update(box, event);
+}
+
+/*
+ * validation ensures the group can be loaded onto the
+ * PMU if it was the only group available.
+ */
+static int uncore_validate_group(struct intel_uncore_pmu *pmu,
+ struct perf_event *event)
+{
+ struct perf_event *leader = event->group_leader;
+ struct intel_uncore_box *fake_box;
+ int ret = -EINVAL, n;
+
+ fake_box = uncore_alloc_box(pmu->type, smp_processor_id());
+ if (!fake_box)
+ return -ENOMEM;
+
+ fake_box->pmu = pmu;
+ /*
+ * the event is not yet connected with its
+ * siblings therefore we must first collect
+ * existing siblings, then add the new event
+ * before we can simulate the scheduling
+ */
+ n = uncore_collect_events(fake_box, leader, true);
+ if (n < 0)
+ goto out;
+
+ fake_box->n_events = n;
+ n = uncore_collect_events(fake_box, event, false);
+ if (n < 0)
+ goto out;
+
+ fake_box->n_events = n;
+
+ ret = uncore_assign_events(fake_box, NULL, n);
+out:
+ kfree(fake_box);
+ return ret;
+}
+
+int uncore_pmu_event_init(struct perf_event *event)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ pmu = uncore_event_to_pmu(event);
+ /* no device found for this pmu */
+ if (pmu->func_id < 0)
+ return -ENOENT;
+
+ /*
+ * Uncore PMU does measure at all privilege level all the time.
+ * So it doesn't make sense to specify any exclude bits.
+ */
+ if (event->attr.exclude_user || event->attr.exclude_kernel ||
+ event->attr.exclude_hv || event->attr.exclude_idle)
+ return -EINVAL;
+
+ /* Sampling not supported yet */
+ if (hwc->sample_period)
+ return -EINVAL;
+
+ /*
+ * Place all uncore events for a particular physical package
+ * onto a single cpu
+ */
+ if (event->cpu < 0)
+ return -EINVAL;
+ box = uncore_pmu_to_box(pmu, event->cpu);
+ if (!box || box->cpu < 0)
+ return -EINVAL;
+ event->cpu = box->cpu;
+
+ event->hw.idx = -1;
+ event->hw.last_tag = ~0ULL;
+ event->hw.extra_reg.idx = EXTRA_REG_NONE;
+
+ if (event->attr.config == UNCORE_FIXED_EVENT) {
+ /* no fixed counter */
+ if (!pmu->type->fixed_ctl)
+ return -EINVAL;
+ /*
+ * if there is only one fixed counter, only the first pmu
+ * can access the fixed counter
+ */
+ if (pmu->type->single_fixed && pmu->pmu_idx > 0)
+ return -EINVAL;
+ hwc->config = ~0ULL;
+ } else {
+ hwc->config = event->attr.config & pmu->type->event_mask;
+ if (pmu->type->ops->hw_config) {
+ ret = pmu->type->ops->hw_config(box, event);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (event->group_leader != event)
+ ret = uncore_validate_group(pmu, event);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu)
+{
+ int ret;
+
+ pmu->pmu = (struct pmu) {
+ .attr_groups = pmu->type->attr_groups,
+ .task_ctx_nr = perf_invalid_context,
+ .event_init = uncore_pmu_event_init,
+ .add = uncore_pmu_event_add,
+ .del = uncore_pmu_event_del,
+ .start = uncore_pmu_event_start,
+ .stop = uncore_pmu_event_stop,
+ .read = uncore_pmu_event_read,
+ };
+
+ if (pmu->type->num_boxes == 1) {
+ if (strlen(pmu->type->name) > 0)
+ sprintf(pmu->name, "uncore_%s", pmu->type->name);
+ else
+ sprintf(pmu->name, "uncore");
+ } else {
+ sprintf(pmu->name, "uncore_%s_%d", pmu->type->name,
+ pmu->pmu_idx);
+ }
+
+ ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
+ return ret;
+}
+
+static void __init uncore_type_exit(struct intel_uncore_type *type)
+{
+ int i;
+
+ for (i = 0; i < type->num_boxes; i++)
+ free_percpu(type->pmus[i].box);
+ kfree(type->pmus);
+ type->pmus = NULL;
+ kfree(type->attr_groups[1]);
+ type->attr_groups[1] = NULL;
+}
+
+static void uncore_types_exit(struct intel_uncore_type **types)
+{
+ int i;
+ for (i = 0; types[i]; i++)
+ uncore_type_exit(types[i]);
+}
+
+static int __init uncore_type_init(struct intel_uncore_type *type)
+{
+ struct intel_uncore_pmu *pmus;
+ struct attribute_group *events_group;
+ struct attribute **attrs;
+ int i, j;
+
+ pmus = kzalloc(sizeof(*pmus) * type->num_boxes, GFP_KERNEL);
+ if (!pmus)
+ return -ENOMEM;
+
+ type->unconstrainted = (struct event_constraint)
+ __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
+ 0, type->num_counters, 0);
+
+ for (i = 0; i < type->num_boxes; i++) {
+ pmus[i].func_id = -1;
+ pmus[i].pmu_idx = i;
+ pmus[i].type = type;
+ INIT_LIST_HEAD(&pmus[i].box_list);
+ pmus[i].box = alloc_percpu(struct intel_uncore_box *);
+ if (!pmus[i].box)
+ goto fail;
+ }
+
+ if (type->event_descs) {
+ i = 0;
+ while (type->event_descs[i].attr.attr.name)
+ i++;
+
+ events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
+ sizeof(*events_group), GFP_KERNEL);
+ if (!events_group)
+ goto fail;
+
+ attrs = (struct attribute **)(events_group + 1);
+ events_group->name = "events";
+ events_group->attrs = attrs;
+
+ for (j = 0; j < i; j++)
+ attrs[j] = &type->event_descs[j].attr.attr;
+
+ type->attr_groups[1] = events_group;
+ }
+
+ type->pmus = pmus;
+ return 0;
+fail:
+ uncore_type_exit(type);
+ return -ENOMEM;
+}
+
+static int __init uncore_types_init(struct intel_uncore_type **types)
+{
+ int i, ret;
+
+ for (i = 0; types[i]; i++) {
+ ret = uncore_type_init(types[i]);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ while (--i >= 0)
+ uncore_type_exit(types[i]);
+ return ret;
+}
+
+static struct pci_driver *uncore_pci_driver;
+static bool pcidrv_registered;
+
+/*
+ * add a pci uncore device
+ */
+static int __devinit uncore_pci_add(struct intel_uncore_type *type,
+ struct pci_dev *pdev)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, phys_id;
+
+ phys_id = pcibus_to_physid[pdev->bus->number];
+ if (phys_id < 0)
+ return -ENODEV;
+
+ box = uncore_alloc_box(type, 0);
+ if (!box)
+ return -ENOMEM;
+
+ /*
+ * for performance monitoring unit with multiple boxes,
+ * each box has a different function id.
+ */
+ for (i = 0; i < type->num_boxes; i++) {
+ pmu = &type->pmus[i];
+ if (pmu->func_id == pdev->devfn)
+ break;
+ if (pmu->func_id < 0) {
+ pmu->func_id = pdev->devfn;
+ break;
+ }
+ pmu = NULL;
+ }
+
+ if (!pmu) {
+ kfree(box);
+ return -EINVAL;
+ }
+
+ box->phys_id = phys_id;
+ box->pci_dev = pdev;
+ box->pmu = pmu;
+ uncore_box_init(box);
+ pci_set_drvdata(pdev, box);
+
+ raw_spin_lock(&uncore_box_lock);
+ list_add_tail(&box->list, &pmu->box_list);
+ raw_spin_unlock(&uncore_box_lock);
+
+ return 0;
+}
+
+static void uncore_pci_remove(struct pci_dev *pdev)
+{
+ struct intel_uncore_box *box = pci_get_drvdata(pdev);
+ struct intel_uncore_pmu *pmu = box->pmu;
+ int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+
+ if (WARN_ON_ONCE(phys_id != box->phys_id))
+ return;
+
+ raw_spin_lock(&uncore_box_lock);
+ list_del(&box->list);
+ raw_spin_unlock(&uncore_box_lock);
+
+ for_each_possible_cpu(cpu) {
+ if (*per_cpu_ptr(pmu->box, cpu) == box) {
+ *per_cpu_ptr(pmu->box, cpu) = NULL;
+ atomic_dec(&box->refcnt);
+ }
+ }
+
+ WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+ kfree(box);
+}
+
+static int __devinit uncore_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct intel_uncore_type *type;
+
+ type = (struct intel_uncore_type *)id->driver_data;
+ return uncore_pci_add(type, pdev);
+}
+
+static int __init uncore_pci_init(void)
+{
+ int ret;
+
+ switch (boot_cpu_data.x86_model) {
+ case 45: /* Sandy Bridge-EP */
+ pci_uncores = snbep_pci_uncores;
+ uncore_pci_driver = &snbep_uncore_pci_driver;
+ snbep_pci2phy_map_init();
+ break;
+ default:
+ return 0;
+ }
+
+ ret = uncore_types_init(pci_uncores);
+ if (ret)
+ return ret;
+
+ uncore_pci_driver->probe = uncore_pci_probe;
+ uncore_pci_driver->remove = uncore_pci_remove;
+
+ ret = pci_register_driver(uncore_pci_driver);
+ if (ret == 0)
+ pcidrv_registered = true;
+ else
+ uncore_types_exit(pci_uncores);
+
+ return ret;
+}
+
+static void __init uncore_pci_exit(void)
+{
+ if (pcidrv_registered) {
+ pcidrv_registered = false;
+ pci_unregister_driver(uncore_pci_driver);
+ uncore_types_exit(pci_uncores);
+ }
+}
+
+static void __cpuinit uncore_cpu_dying(int cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ box = *per_cpu_ptr(pmu->box, cpu);
+ *per_cpu_ptr(pmu->box, cpu) = NULL;
+ if (box && atomic_dec_and_test(&box->refcnt))
+ kfree(box);
+ }
+ }
+}
+
+static int __cpuinit uncore_cpu_starting(int cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box, *exist;
+ int i, j, k, phys_id;
+
+ phys_id = topology_physical_package_id(cpu);
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ box = *per_cpu_ptr(pmu->box, cpu);
+ /* called by uncore_cpu_init? */
+ if (box && box->phys_id >= 0) {
+ uncore_box_init(box);
+ continue;
+ }
+
+ for_each_online_cpu(k) {
+ exist = *per_cpu_ptr(pmu->box, k);
+ if (exist && exist->phys_id == phys_id) {
+ atomic_inc(&exist->refcnt);
+ *per_cpu_ptr(pmu->box, cpu) = exist;
+ kfree(box);
+ box = NULL;
+ break;
+ }
+ }
+
+ if (box) {
+ box->phys_id = phys_id;
+ uncore_box_init(box);
+ }
+ }
+ }
+ return 0;
+}
+
+static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ if (pmu->func_id < 0)
+ pmu->func_id = j;
+
+ box = uncore_alloc_box(type, cpu);
+ if (!box)
+ return -ENOMEM;
+
+ box->pmu = pmu;
+ box->phys_id = phys_id;
+ *per_cpu_ptr(pmu->box, cpu) = box;
+ }
+ }
+ return 0;
+}
+
+static void __cpuinit uncore_change_context(struct intel_uncore_type **uncores,
+ int old_cpu, int new_cpu)
+{
+ struct intel_uncore_type *type;
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_box *box;
+ int i, j;
+
+ for (i = 0; uncores[i]; i++) {
+ type = uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ if (old_cpu < 0)
+ box = uncore_pmu_to_box(pmu, new_cpu);
+ else
+ box = uncore_pmu_to_box(pmu, old_cpu);
+ if (!box)
+ continue;
+
+ if (old_cpu < 0) {
+ WARN_ON_ONCE(box->cpu != -1);
+ box->cpu = new_cpu;
+ continue;
+ }
+
+ WARN_ON_ONCE(box->cpu != old_cpu);
+ if (new_cpu >= 0) {
+ uncore_pmu_cancel_hrtimer(box);
+ perf_pmu_migrate_context(&pmu->pmu,
+ old_cpu, new_cpu);
+ box->cpu = new_cpu;
+ } else {
+ box->cpu = -1;
+ }
+ }
+ }
+}
+
+static void __cpuinit uncore_event_exit_cpu(int cpu)
+{
+ int i, phys_id, target;
+
+ /* if exiting cpu is used for collecting uncore events */
+ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
+ return;
+
+ /* find a new cpu to collect uncore events */
+ phys_id = topology_physical_package_id(cpu);
+ target = -1;
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (phys_id == topology_physical_package_id(i)) {
+ target = i;
+ break;
+ }
+ }
+
+ /* migrate uncore events to the new cpu */
+ if (target >= 0)
+ cpumask_set_cpu(target, &uncore_cpu_mask);
+
+ uncore_change_context(msr_uncores, cpu, target);
+ uncore_change_context(pci_uncores, cpu, target);
+}
+
+static void __cpuinit uncore_event_init_cpu(int cpu)
+{
+ int i, phys_id;
+
+ phys_id = topology_physical_package_id(cpu);
+ for_each_cpu(i, &uncore_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i))
+ return;
+ }
+
+ cpumask_set_cpu(cpu, &uncore_cpu_mask);
+
+ uncore_change_context(msr_uncores, -1, cpu);
+ uncore_change_context(pci_uncores, -1, cpu);
+}
+
+static int __cpuinit uncore_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ /* allocate/free data structure for uncore box */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ uncore_cpu_prepare(cpu, -1);
+ break;
+ case CPU_STARTING:
+ uncore_cpu_starting(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DYING:
+ uncore_cpu_dying(cpu);
+ break;
+ default:
+ break;
+ }
+
+ /* select the cpu that collects uncore events */
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_FAILED:
+ case CPU_STARTING:
+ uncore_event_init_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ uncore_event_exit_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block uncore_cpu_nb __cpuinitdata = {
+ .notifier_call = uncore_cpu_notifier,
+ /*
+ * to migrate uncore events, our notifier should be executed
+ * before perf core's notifier.
+ */
+ .priority = CPU_PRI_PERF + 1,
+};
+
+static void __init uncore_cpu_setup(void *dummy)
+{
+ uncore_cpu_starting(smp_processor_id());
+}
+
+static int __init uncore_cpu_init(void)
+{
+ int ret, cpu, max_cores;
+
+ max_cores = boot_cpu_data.x86_max_cores;
+ switch (boot_cpu_data.x86_model) {
+ case 26: /* Nehalem */
+ case 30:
+ case 37: /* Westmere */
+ case 44:
+ msr_uncores = nhm_msr_uncores;
+ break;
+ case 42: /* Sandy Bridge */
+ if (snb_uncore_cbox.num_boxes > max_cores)
+ snb_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = snb_msr_uncores;
+ break;
+ case 45: /* Sandy Birdge-EP */
+ if (snbep_uncore_cbox.num_boxes > max_cores)
+ snbep_uncore_cbox.num_boxes = max_cores;
+ msr_uncores = snbep_msr_uncores;
+ break;
+ default:
+ return 0;
+ }
+
+ ret = uncore_types_init(msr_uncores);
+ if (ret)
+ return ret;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ int i, phys_id = topology_physical_package_id(cpu);
+
+ for_each_cpu(i, &uncore_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i)) {
+ phys_id = -1;
+ break;
+ }
+ }
+ if (phys_id < 0)
+ continue;
+
+ uncore_cpu_prepare(cpu, phys_id);
+ uncore_event_init_cpu(cpu);
+ }
+ on_each_cpu(uncore_cpu_setup, NULL, 1);
+
+ register_cpu_notifier(&uncore_cpu_nb);
+
+ put_online_cpus();
+
+ return 0;
+}
+
+static int __init uncore_pmus_register(void)
+{
+ struct intel_uncore_pmu *pmu;
+ struct intel_uncore_type *type;
+ int i, j;
+
+ for (i = 0; msr_uncores[i]; i++) {
+ type = msr_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ uncore_pmu_register(pmu);
+ }
+ }
+
+ for (i = 0; pci_uncores[i]; i++) {
+ type = pci_uncores[i];
+ for (j = 0; j < type->num_boxes; j++) {
+ pmu = &type->pmus[j];
+ uncore_pmu_register(pmu);
+ }
+ }
+
+ return 0;
+}
+
+static int __init intel_uncore_init(void)
+{
+ int ret;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return -ENODEV;
+
+ ret = uncore_pci_init();
+ if (ret)
+ goto fail;
+ ret = uncore_cpu_init();
+ if (ret) {
+ uncore_pci_exit();
+ goto fail;
+ }
+
+ uncore_pmus_register();
+ return 0;
+fail:
+ return ret;
+}
+device_initcall(intel_uncore_init);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
new file mode 100644
index 000000000000..b13e9ea81def
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -0,0 +1,424 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/perf_event.h>
+#include "perf_event.h"
+
+#define UNCORE_PMU_NAME_LEN 32
+#define UNCORE_BOX_HASH_SIZE 8
+
+#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC)
+
+#define UNCORE_FIXED_EVENT 0xff
+#define UNCORE_PMC_IDX_MAX_GENERIC 8
+#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
+#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
+
+#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
+
+/* SNB event control */
+#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
+#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
+#define SNB_UNC_CTL_EDGE_DET (1 << 18)
+#define SNB_UNC_CTL_EN (1 << 22)
+#define SNB_UNC_CTL_INVERT (1 << 23)
+#define SNB_UNC_CTL_CMASK_MASK 0x1f000000
+#define NHM_UNC_CTL_CMASK_MASK 0xff000000
+#define NHM_UNC_FIXED_CTR_CTL_EN (1 << 0)
+
+#define SNB_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ SNB_UNC_CTL_CMASK_MASK)
+
+#define NHM_UNC_RAW_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ SNB_UNC_CTL_UMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET | \
+ SNB_UNC_CTL_INVERT | \
+ NHM_UNC_CTL_CMASK_MASK)
+
+/* SNB global control register */
+#define SNB_UNC_PERF_GLOBAL_CTL 0x391
+#define SNB_UNC_FIXED_CTR_CTRL 0x394
+#define SNB_UNC_FIXED_CTR 0x395
+
+/* SNB uncore global control */
+#define SNB_UNC_GLOBAL_CTL_CORE_ALL ((1 << 4) - 1)
+#define SNB_UNC_GLOBAL_CTL_EN (1 << 29)
+
+/* SNB Cbo register */
+#define SNB_UNC_CBO_0_PERFEVTSEL0 0x700
+#define SNB_UNC_CBO_0_PER_CTR0 0x706
+#define SNB_UNC_CBO_MSR_OFFSET 0x10
+
+/* NHM global control register */
+#define NHM_UNC_PERF_GLOBAL_CTL 0x391
+#define NHM_UNC_FIXED_CTR 0x394
+#define NHM_UNC_FIXED_CTR_CTRL 0x395
+
+/* NHM uncore global control */
+#define NHM_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 8) - 1)
+#define NHM_UNC_GLOBAL_CTL_EN_FC (1ULL << 32)
+
+/* NHM uncore register */
+#define NHM_UNC_PERFEVTSEL0 0x3c0
+#define NHM_UNC_UNCORE_PMC0 0x3b0
+
+/* SNB-EP Box level control */
+#define SNBEP_PMON_BOX_CTL_RST_CTRL (1 << 0)
+#define SNBEP_PMON_BOX_CTL_RST_CTRS (1 << 1)
+#define SNBEP_PMON_BOX_CTL_FRZ (1 << 8)
+#define SNBEP_PMON_BOX_CTL_FRZ_EN (1 << 16)
+#define SNBEP_PMON_BOX_CTL_INT (SNBEP_PMON_BOX_CTL_RST_CTRL | \
+ SNBEP_PMON_BOX_CTL_RST_CTRS | \
+ SNBEP_PMON_BOX_CTL_FRZ_EN)
+/* SNB-EP event control */
+#define SNBEP_PMON_CTL_EV_SEL_MASK 0x000000ff
+#define SNBEP_PMON_CTL_UMASK_MASK 0x0000ff00
+#define SNBEP_PMON_CTL_RST (1 << 17)
+#define SNBEP_PMON_CTL_EDGE_DET (1 << 18)
+#define SNBEP_PMON_CTL_EV_SEL_EXT (1 << 21) /* only for QPI */
+#define SNBEP_PMON_CTL_EN (1 << 22)
+#define SNBEP_PMON_CTL_INVERT (1 << 23)
+#define SNBEP_PMON_CTL_TRESH_MASK 0xff000000
+#define SNBEP_PMON_RAW_EVENT_MASK (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PMON_CTL_TRESH_MASK)
+
+/* SNB-EP Ubox event control */
+#define SNBEP_U_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_U_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PMON_CTL_UMASK_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_U_MSR_PMON_CTL_TRESH_MASK)
+
+#define SNBEP_CBO_PMON_CTL_TID_EN (1 << 19)
+#define SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK (SNBEP_PMON_RAW_EVENT_MASK | \
+ SNBEP_CBO_PMON_CTL_TID_EN)
+
+/* SNB-EP PCU event control */
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK 0x0000c000
+#define SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK 0x1f000000
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT (1 << 30)
+#define SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET (1 << 31)
+#define SNBEP_PCU_MSR_PMON_RAW_EVENT_MASK \
+ (SNBEP_PMON_CTL_EV_SEL_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
+ SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
+ SNBEP_PCU_MSR_PMON_CTL_OCC_EDGE_DET)
+
+/* SNB-EP pci control register */
+#define SNBEP_PCI_PMON_BOX_CTL 0xf4
+#define SNBEP_PCI_PMON_CTL0 0xd8
+/* SNB-EP pci counter register */
+#define SNBEP_PCI_PMON_CTR0 0xa0
+
+/* SNB-EP home agent register */
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH0 0x40
+#define SNBEP_HA_PCI_PMON_BOX_ADDRMATCH1 0x44
+#define SNBEP_HA_PCI_PMON_BOX_OPCODEMATCH 0x48
+/* SNB-EP memory controller register */
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTL 0xf0
+#define SNBEP_MC_CHy_PCI_PMON_FIXED_CTR 0xd0
+/* SNB-EP QPI register */
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH0 0x228
+#define SNBEP_Q_Py_PCI_PMON_PKT_MATCH1 0x22c
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK0 0x238
+#define SNBEP_Q_Py_PCI_PMON_PKT_MASK1 0x23c
+
+/* SNB-EP Ubox register */
+#define SNBEP_U_MSR_PMON_CTR0 0xc16
+#define SNBEP_U_MSR_PMON_CTL0 0xc10
+
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTL 0xc08
+#define SNBEP_U_MSR_PMON_UCLK_FIXED_CTR 0xc09
+
+/* SNB-EP Cbo register */
+#define SNBEP_C0_MSR_PMON_CTR0 0xd16
+#define SNBEP_C0_MSR_PMON_CTL0 0xd10
+#define SNBEP_C0_MSR_PMON_BOX_CTL 0xd04
+#define SNBEP_C0_MSR_PMON_BOX_FILTER 0xd14
+#define SNBEP_CB0_MSR_PMON_BOX_FILTER_MASK 0xfffffc1f
+#define SNBEP_CBO_MSR_OFFSET 0x20
+
+/* SNB-EP PCU register */
+#define SNBEP_PCU_MSR_PMON_CTR0 0xc36
+#define SNBEP_PCU_MSR_PMON_CTL0 0xc30
+#define SNBEP_PCU_MSR_PMON_BOX_CTL 0xc24
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER 0xc34
+#define SNBEP_PCU_MSR_PMON_BOX_FILTER_MASK 0xffffffff
+#define SNBEP_PCU_MSR_CORE_C3_CTR 0x3fc
+#define SNBEP_PCU_MSR_CORE_C6_CTR 0x3fd
+
+struct intel_uncore_ops;
+struct intel_uncore_pmu;
+struct intel_uncore_box;
+struct uncore_event_desc;
+
+struct intel_uncore_type {
+ const char *name;
+ int num_counters;
+ int num_boxes;
+ int perf_ctr_bits;
+ int fixed_ctr_bits;
+ unsigned perf_ctr;
+ unsigned event_ctl;
+ unsigned event_mask;
+ unsigned fixed_ctr;
+ unsigned fixed_ctl;
+ unsigned box_ctl;
+ unsigned msr_offset;
+ unsigned num_shared_regs:8;
+ unsigned single_fixed:1;
+ struct event_constraint unconstrainted;
+ struct event_constraint *constraints;
+ struct intel_uncore_pmu *pmus;
+ struct intel_uncore_ops *ops;
+ struct uncore_event_desc *event_descs;
+ const struct attribute_group *attr_groups[3];
+};
+
+#define format_group attr_groups[0]
+
+struct intel_uncore_ops {
+ void (*init_box)(struct intel_uncore_box *);
+ void (*disable_box)(struct intel_uncore_box *);
+ void (*enable_box)(struct intel_uncore_box *);
+ void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
+ void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
+ u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
+ int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
+ struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
+ struct perf_event *);
+ void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
+};
+
+struct intel_uncore_pmu {
+ struct pmu pmu;
+ char name[UNCORE_PMU_NAME_LEN];
+ int pmu_idx;
+ int func_id;
+ struct intel_uncore_type *type;
+ struct intel_uncore_box ** __percpu box;
+ struct list_head box_list;
+};
+
+struct intel_uncore_extra_reg {
+ raw_spinlock_t lock;
+ u64 config1;
+ atomic_t ref;
+};
+
+struct intel_uncore_box {
+ int phys_id;
+ int n_active; /* number of active events */
+ int n_events;
+ int cpu; /* cpu to collect events */
+ unsigned long flags;
+ atomic_t refcnt;
+ struct perf_event *events[UNCORE_PMC_IDX_MAX];
+ struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+ unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
+ u64 tags[UNCORE_PMC_IDX_MAX];
+ struct pci_dev *pci_dev;
+ struct intel_uncore_pmu *pmu;
+ struct hrtimer hrtimer;
+ struct list_head list;
+ struct intel_uncore_extra_reg shared_regs[0];
+};
+
+#define UNCORE_BOX_FLAG_INITIATED 0
+
+struct uncore_event_desc {
+ struct kobj_attribute attr;
+ const char *config;
+};
+
+#define INTEL_UNCORE_EVENT_DESC(_name, _config) \
+{ \
+ .attr = __ATTR(_name, 0444, uncore_event_show, NULL), \
+ .config = _config, \
+}
+
+#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
+
+
+static ssize_t uncore_event_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct uncore_event_desc *event =
+ container_of(attr, struct uncore_event_desc, attr);
+ return sprintf(buf, "%s", event->config);
+}
+
+static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
+{
+ return box->pmu->type->box_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctl;
+}
+
+static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr;
+}
+
+static inline
+unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ return idx * 4 + box->pmu->type->event_ctl;
+}
+
+static inline
+unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ return idx * 8 + box->pmu->type->perf_ctr;
+}
+
+static inline
+unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
+{
+ if (!box->pmu->type->box_ctl)
+ return 0;
+ return box->pmu->type->box_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
+{
+ if (!box->pmu->type->fixed_ctl)
+ return 0;
+ return box->pmu->type->fixed_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ return idx + box->pmu->type->event_ctl +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ return idx + box->pmu->type->perf_ctr +
+ box->pmu->type->msr_offset * box->pmu->pmu_idx;
+}
+
+static inline
+unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
+{
+ if (box->pci_dev)
+ return uncore_pci_fixed_ctl(box);
+ else
+ return uncore_msr_fixed_ctl(box);
+}
+
+static inline
+unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
+{
+ if (box->pci_dev)
+ return uncore_pci_fixed_ctr(box);
+ else
+ return uncore_msr_fixed_ctr(box);
+}
+
+static inline
+unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
+{
+ if (box->pci_dev)
+ return uncore_pci_event_ctl(box, idx);
+ else
+ return uncore_msr_event_ctl(box, idx);
+}
+
+static inline
+unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
+{
+ if (box->pci_dev)
+ return uncore_pci_perf_ctr(box, idx);
+ else
+ return uncore_msr_perf_ctr(box, idx);
+}
+
+static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
+{
+ return box->pmu->type->perf_ctr_bits;
+}
+
+static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
+{
+ return box->pmu->type->fixed_ctr_bits;
+}
+
+static inline int uncore_num_counters(struct intel_uncore_box *box)
+{
+ return box->pmu->type->num_counters;
+}
+
+static inline void uncore_disable_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->type->ops->disable_box)
+ box->pmu->type->ops->disable_box(box);
+}
+
+static inline void uncore_enable_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->type->ops->enable_box)
+ box->pmu->type->ops->enable_box(box);
+}
+
+static inline void uncore_disable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ box->pmu->type->ops->disable_event(box, event);
+}
+
+static inline void uncore_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ box->pmu->type->ops->enable_event(box, event);
+}
+
+static inline u64 uncore_read_counter(struct intel_uncore_box *box,
+ struct perf_event *event)
+{
+ return box->pmu->type->ops->read_counter(box, event);
+}
+
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+ if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->init_box)
+ box->pmu->type->ops->init_box(box);
+ }
+}
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 47124a73dd73..92c7e39a079f 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -895,8 +895,8 @@ static void p4_pmu_disable_pebs(void)
* So at moment let leave metrics turned on forever -- it's
* ok for now but need to be revisited!
*
- * (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)0);
- * (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
+ * (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)0);
+ * (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)0);
*/
}
@@ -909,7 +909,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
* state we need to clear P4_CCCR_OVF, otherwise interrupt get
* asserted again and again
*/
- (void)checking_wrmsrl(hwc->config_base,
+ (void)wrmsrl_safe(hwc->config_base,
(u64)(p4_config_unpack_cccr(hwc->config)) &
~P4_CCCR_ENABLE & ~P4_CCCR_OVF & ~P4_CCCR_RESERVED);
}
@@ -943,8 +943,8 @@ static void p4_pmu_enable_pebs(u64 config)
bind = &p4_pebs_bind_map[idx];
- (void)checking_wrmsrl(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
- (void)checking_wrmsrl(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
+ (void)wrmsrl_safe(MSR_IA32_PEBS_ENABLE, (u64)bind->metric_pebs);
+ (void)wrmsrl_safe(MSR_P4_PEBS_MATRIX_VERT, (u64)bind->metric_vert);
}
static void p4_pmu_enable_event(struct perf_event *event)
@@ -978,8 +978,8 @@ static void p4_pmu_enable_event(struct perf_event *event)
*/
p4_pmu_enable_pebs(hwc->config);
- (void)checking_wrmsrl(escr_addr, escr_conf);
- (void)checking_wrmsrl(hwc->config_base,
+ (void)wrmsrl_safe(escr_addr, escr_conf);
+ (void)wrmsrl_safe(hwc->config_base,
(cccr & ~P4_CCCR_RESERVED) | P4_CCCR_ENABLE);
}
@@ -1325,7 +1325,7 @@ __init int p4_pmu_init(void)
unsigned int low, high;
/* If we get stripped -- indexing fails */
- BUILD_BUG_ON(ARCH_P4_MAX_CCCR > X86_PMC_MAX_GENERIC);
+ BUILD_BUG_ON(ARCH_P4_MAX_CCCR > INTEL_PMC_MAX_GENERIC);
rdmsr(MSR_IA32_MISC_ENABLE, low, high);
if (!(low & (1 << 7))) {
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index 32bcfc7dd230..e4dd0f7a0453 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -71,7 +71,7 @@ p6_pmu_disable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)wrmsrl_safe(hwc->config_base, val);
}
static void p6_pmu_enable_event(struct perf_event *event)
@@ -84,7 +84,7 @@ static void p6_pmu_enable_event(struct perf_event *event)
if (cpuc->enabled)
val |= ARCH_PERFMON_EVENTSEL_ENABLE;
- (void)checking_wrmsrl(hwc->config_base, val);
+ (void)wrmsrl_safe(hwc->config_base, val);
}
PMU_FORMAT_ATTR(event, "config:0-7" );
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index addf9e82a7f2..ee8e9abc859f 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,7 +31,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
- { X86_FEATURE_DTS, CR_EAX, 0, 0x00000006, 0 },
+ { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
{ X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
{ X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 571246d81edf..ae42418bc50f 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -27,8 +27,8 @@ static int die_counter;
void printk_address(unsigned long address, int reliable)
{
- printk(" [<%p>] %s%pB\n", (void *) address,
- reliable ? "" : "? ", (void *) address);
+ pr_cont(" [<%p>] %s%pB\n",
+ (void *)address, reliable ? "" : "? ", (void *)address);
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -271,6 +271,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
return 1;
+ print_modules();
show_regs(regs);
#ifdef CONFIG_X86_32
if (user_mode_vm(regs)) {
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index e0b1d783daab..1038a417ea53 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -73,11 +73,11 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (kstack_end(stack))
break;
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk(KERN_CONT "\n");
- printk(KERN_CONT " %08lx", *stack++);
+ pr_cont("\n");
+ pr_cont(" %08lx", *stack++);
touch_nmi_watchdog();
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
@@ -86,12 +86,11 @@ void show_regs(struct pt_regs *regs)
{
int i;
- print_modules();
__show_regs(regs, !user_mode_vm(regs));
- printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
- TASK_COMM_LEN, current->comm, task_pid_nr(current),
- current_thread_info(), current, task_thread_info(current));
+ pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
+ TASK_COMM_LEN, current->comm, task_pid_nr(current),
+ current_thread_info(), current, task_thread_info(current));
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
@@ -102,10 +101,10 @@ void show_regs(struct pt_regs *regs)
unsigned char c;
u8 *ip;
- printk(KERN_EMERG "Stack:\n");
+ pr_emerg("Stack:\n");
show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
- printk(KERN_EMERG "Code: ");
+ pr_emerg("Code:");
ip = (u8 *)regs->ip - code_prologue;
if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
@@ -116,16 +115,16 @@ void show_regs(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(KERN_CONT " Bad EIP value.");
+ pr_cont(" Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk(KERN_CONT "<%02x> ", c);
+ pr_cont(" <%02x>", c);
else
- printk(KERN_CONT "%02x ", c);
+ pr_cont(" %02x", c);
}
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 791b76122aa8..b653675d5288 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -228,20 +228,20 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
if (stack >= irq_stack && stack <= irq_stack_end) {
if (stack == irq_stack_end) {
stack = (unsigned long *) (irq_stack_end[-1]);
- printk(KERN_CONT " <EOI> ");
+ pr_cont(" <EOI> ");
}
} else {
if (((long) stack & (THREAD_SIZE-1)) == 0)
break;
}
if (i && ((i % STACKSLOTS_PER_LINE) == 0))
- printk(KERN_CONT "\n");
- printk(KERN_CONT " %016lx", *stack++);
+ pr_cont("\n");
+ pr_cont(" %016lx", *stack++);
touch_nmi_watchdog();
}
preempt_enable();
- printk(KERN_CONT "\n");
+ pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
}
@@ -254,10 +254,9 @@ void show_regs(struct pt_regs *regs)
sp = regs->sp;
printk("CPU %d ", cpu);
- print_modules();
__show_regs(regs, 1);
- printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
- cur->comm, cur->pid, task_thread_info(cur), cur);
+ printk(KERN_DEFAULT "Process %s (pid: %d, threadinfo %p, task %p)\n",
+ cur->comm, cur->pid, task_thread_info(cur), cur);
/*
* When in-kernel, we also print out the stack and code at the
@@ -284,16 +283,16 @@ void show_regs(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
- printk(KERN_CONT " Bad RIP value.");
+ pr_cont(" Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
- printk(KERN_CONT "<%02x> ", c);
+ pr_cont("<%02x> ", c);
else
- printk(KERN_CONT "%02x ", c);
+ pr_cont("%02x ", c);
}
}
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
int is_valid_bugaddr(unsigned long ip)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 7d65133b51be..111f6bbd8b38 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1758,10 +1758,30 @@ end_repeat_nmi:
*/
call save_paranoid
DEFAULT_FRAME 0
+
+ /*
+ * Save off the CR2 register. If we take a page fault in the NMI then
+ * it could corrupt the CR2 value. If the NMI preempts a page fault
+ * handler before it was able to read the CR2 register, and then the
+ * NMI itself takes a page fault, the page fault that was preempted
+ * will read the information from the NMI page fault and not the
+ * origin fault. Save it off and restore it if it changes.
+ * Use the r12 callee-saved register.
+ */
+ movq %cr2, %r12
+
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp,%rdi
movq $-1,%rsi
call do_nmi
+
+ /* Did the NMI take a page fault? Restore cr2 if it did */
+ movq %cr2, %rcx
+ cmpq %rcx, %r12
+ je 1f
+ movq %r12, %cr2
+1:
+
testl %ebx,%ebx /* swapgs needed? */
jnz nmi_restore
nmi_swapgs:
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3dafc6003b7c..1f5f1d5d2a02 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -294,9 +294,9 @@ void fixup_irqs(void)
raw_spin_unlock(&desc->lock);
if (break_affinity && set_affinity)
- printk("Broke affinity for irq %i\n", irq);
+ pr_notice("Broke affinity for irq %i\n", irq);
else if (!set_affinity)
- printk("Cannot set affinity for irq %i\n", irq);
+ pr_notice("Cannot set affinity for irq %i\n", irq);
}
/*
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 8bfb6146f753..3f61904365cf 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -444,12 +444,12 @@ void kgdb_roundup_cpus(unsigned long flags)
/**
* kgdb_arch_handle_exception - Handle architecture specific GDB packets.
- * @vector: The error vector of the exception that happened.
+ * @e_vector: The error vector of the exception that happened.
* @signo: The signal number of the exception that happened.
* @err_code: The error code of the exception that happened.
- * @remcom_in_buffer: The buffer of the packet we have read.
- * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
- * @regs: The &struct pt_regs of the current process.
+ * @remcomInBuffer: The buffer of the packet we have read.
+ * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into.
+ * @linux_regs: The &struct pt_regs of the current process.
*
* This function MUST handle the 'c' and 's' command packets,
* as well packets to set / remove a hardware breakpoint, if used.
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index e554e5ad2fe8..c1d61ee4b4f1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -39,6 +39,9 @@
#include <asm/desc.h>
#include <asm/tlbflush.h>
#include <asm/idle.h>
+#include <asm/apic.h>
+#include <asm/apicdef.h>
+#include <asm/hypervisor.h>
static int kvmapf = 1;
@@ -283,6 +286,22 @@ static void kvm_register_steal_time(void)
cpu, __pa(st));
}
+static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
+
+static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
+{
+ /**
+ * This relies on __test_and_clear_bit to modify the memory
+ * in a way that is atomic with respect to the local CPU.
+ * The hypervisor only accesses this memory from the local CPU so
+ * there's no need for lock or memory barriers.
+ * An optimization barrier is implied in apic write.
+ */
+ if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
+ return;
+ apic_write(APIC_EOI, APIC_EOI_ACK);
+}
+
void __cpuinit kvm_guest_cpu_init(void)
{
if (!kvm_para_available())
@@ -300,11 +319,20 @@ void __cpuinit kvm_guest_cpu_init(void)
smp_processor_id());
}
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
+ unsigned long pa;
+ /* Size alignment is implied but just to make it explicit. */
+ BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
+ __get_cpu_var(kvm_apic_eoi) = 0;
+ pa = __pa(&__get_cpu_var(kvm_apic_eoi)) | KVM_MSR_ENABLED;
+ wrmsrl(MSR_KVM_PV_EOI_EN, pa);
+ }
+
if (has_steal_clock)
kvm_register_steal_time();
}
-static void kvm_pv_disable_apf(void *unused)
+static void kvm_pv_disable_apf(void)
{
if (!__get_cpu_var(apf_reason).enabled)
return;
@@ -316,11 +344,23 @@ static void kvm_pv_disable_apf(void *unused)
smp_processor_id());
}
+static void kvm_pv_guest_cpu_reboot(void *unused)
+{
+ /*
+ * We disable PV EOI before we load a new kernel by kexec,
+ * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
+ * New kernel can re-enable when it boots.
+ */
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
+}
+
static int kvm_pv_reboot_notify(struct notifier_block *nb,
unsigned long code, void *unused)
{
if (code == SYS_RESTART)
- on_each_cpu(kvm_pv_disable_apf, NULL, 1);
+ on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
return NOTIFY_DONE;
}
@@ -371,7 +411,9 @@ static void __cpuinit kvm_guest_cpu_online(void *dummy)
static void kvm_guest_cpu_offline(void *dummy)
{
kvm_disable_steal_time();
- kvm_pv_disable_apf(NULL);
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
apf_task_wake_all();
}
@@ -424,6 +466,9 @@ void __init kvm_guest_init(void)
pv_time_ops.steal_clock = kvm_steal_clock;
}
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ apic_set_eoi_write(kvm_guest_apic_eoi_write);
+
#ifdef CONFIG_SMP
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
register_cpu_notifier(&kvm_cpu_notifier);
@@ -432,6 +477,19 @@ void __init kvm_guest_init(void)
#endif
}
+static bool __init kvm_detect(void)
+{
+ if (!kvm_para_available())
+ return false;
+ return true;
+}
+
+const struct hypervisor_x86 x86_hyper_kvm __refconst = {
+ .name = "KVM",
+ .detect = kvm_detect,
+};
+EXPORT_SYMBOL_GPL(x86_hyper_kvm);
+
static __init int activate_jump_labels(void)
{
if (has_steal_clock) {
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index fbdfc6917180..4873e62db6a1 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -87,6 +87,7 @@
#include <asm/microcode.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+#include <asm/perf_event.h>
MODULE_DESCRIPTION("Microcode Update Driver");
MODULE_AUTHOR("Tigran Aivazian <tigran@aivazian.fsnet.co.uk>");
@@ -277,7 +278,6 @@ static int reload_for_cpu(int cpu)
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
int err = 0;
- mutex_lock(&microcode_mutex);
if (uci->valid) {
enum ucode_state ustate;
@@ -288,7 +288,6 @@ static int reload_for_cpu(int cpu)
if (ustate == UCODE_ERROR)
err = -EINVAL;
}
- mutex_unlock(&microcode_mutex);
return err;
}
@@ -298,19 +297,31 @@ static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
unsigned long val;
- int cpu = dev->id;
- ssize_t ret = 0;
+ int cpu;
+ ssize_t ret = 0, tmp_ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
- if (val == 1) {
- get_online_cpus();
- if (cpu_online(cpu))
- ret = reload_for_cpu(cpu);
- put_online_cpus();
+ if (val != 1)
+ return size;
+
+ get_online_cpus();
+ mutex_lock(&microcode_mutex);
+ for_each_online_cpu(cpu) {
+ tmp_ret = reload_for_cpu(cpu);
+ if (tmp_ret != 0)
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+ /* save retval of the first encountered reload error */
+ if (!ret)
+ ret = tmp_ret;
}
+ if (!ret)
+ perf_check_microcode();
+ mutex_unlock(&microcode_mutex);
+ put_online_cpus();
if (!ret)
ret = size;
@@ -339,7 +350,6 @@ static DEVICE_ATTR(version, 0400, version_show, NULL);
static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
static struct attribute *mc_default_attrs[] = {
- &dev_attr_reload.attr,
&dev_attr_version.attr,
&dev_attr_processor_flags.attr,
NULL
@@ -504,7 +514,7 @@ static struct notifier_block __refdata mc_cpu_notifier = {
#ifdef MODULE
/* Autoload on Intel and AMD systems */
-static const struct x86_cpu_id microcode_id[] = {
+static const struct x86_cpu_id __initconst microcode_id[] = {
#ifdef CONFIG_MICROCODE_INTEL
{ X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, },
#endif
@@ -516,6 +526,16 @@ static const struct x86_cpu_id microcode_id[] = {
MODULE_DEVICE_TABLE(x86cpu, microcode_id);
#endif
+static struct attribute *cpu_root_microcode_attrs[] = {
+ &dev_attr_reload.attr,
+ NULL
+};
+
+static struct attribute_group cpu_root_microcode_group = {
+ .name = "microcode",
+ .attrs = cpu_root_microcode_attrs,
+};
+
static int __init microcode_init(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
@@ -540,16 +560,25 @@ static int __init microcode_init(void)
mutex_lock(&microcode_mutex);
error = subsys_interface_register(&mc_cpu_interface);
-
+ if (!error)
+ perf_check_microcode();
mutex_unlock(&microcode_mutex);
put_online_cpus();
if (error)
goto out_pdev;
+ error = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
+ if (error) {
+ pr_err("Error creating microcode group!\n");
+ goto out_driver;
+ }
+
error = microcode_dev_init();
if (error)
- goto out_driver;
+ goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
@@ -559,7 +588,11 @@ static int __init microcode_init(void)
return 0;
-out_driver:
+ out_ucode_group:
+ sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
+ out_driver:
get_online_cpus();
mutex_lock(&microcode_mutex);
@@ -568,7 +601,7 @@ out_driver:
mutex_unlock(&microcode_mutex);
put_online_cpus();
-out_pdev:
+ out_pdev:
platform_device_unregister(microcode_pdev);
return error;
@@ -584,6 +617,9 @@ static void __exit microcode_exit(void)
unregister_hotcpu_notifier(&mc_cpu_notifier);
unregister_syscore_ops(&mc_syscore_ops);
+ sysfs_remove_group(&cpu_subsys.dev_root->kobj,
+ &cpu_root_microcode_group);
+
get_online_cpus();
mutex_lock(&microcode_mutex);
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f21fd94ac897..216a4d754b0c 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -15,6 +15,9 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
@@ -30,9 +33,14 @@
#include <asm/pgtable.h>
#if 0
-#define DEBUGP printk
+#define DEBUGP(fmt, ...) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#else
-#define DEBUGP(fmt...)
+#define DEBUGP(fmt, ...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
#endif
void *module_alloc(unsigned long size)
@@ -56,8 +64,8 @@ int apply_relocate(Elf32_Shdr *sechdrs,
Elf32_Sym *sym;
uint32_t *location;
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ DEBUGP("Applying relocate section %u to %u\n",
+ relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -73,11 +81,11 @@ int apply_relocate(Elf32_Shdr *sechdrs,
*location += sym->st_value;
break;
case R_386_PC32:
- /* Add the value, subtract its postition */
+ /* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
default:
- printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ pr_err("%s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@@ -97,8 +105,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
void *loc;
u64 val;
- DEBUGP("Applying relocate section %u to %u\n", relsec,
- sechdrs[relsec].sh_info);
+ DEBUGP("Applying relocate section %u to %u\n",
+ relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
@@ -110,8 +118,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
+ ELF64_R_SYM(rel[i].r_info);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
- (int)ELF64_R_TYPE(rel[i].r_info),
- sym->st_value, rel[i].r_addend, (u64)loc);
+ (int)ELF64_R_TYPE(rel[i].r_info),
+ sym->st_value, rel[i].r_addend, (u64)loc);
val = sym->st_value + rel[i].r_addend;
@@ -140,7 +148,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
#endif
break;
default:
- printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
+ pr_err("%s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
@@ -148,9 +156,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
return 0;
overflow:
- printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
+ pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
- printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
+ pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
me->name);
return -ENOEXEC;
}
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index a0b2f84457be..f84f5c57de35 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -365,8 +365,9 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
#ifdef CONFIG_X86_32
/*
* For i386, NMIs use the same stack as the kernel, and we can
- * add a workaround to the iret problem in C. Simply have 3 states
- * the NMI can be in.
+ * add a workaround to the iret problem in C (preventing nested
+ * NMIs if an NMI takes a trap). Simply have 3 states the NMI
+ * can be in:
*
* 1) not running
* 2) executing
@@ -383,32 +384,50 @@ static __kprobes void default_do_nmi(struct pt_regs *regs)
* If an NMI hits a breakpoint that executes an iret, another
* NMI can preempt it. We do not want to allow this new NMI
* to run, but we want to execute it when the first one finishes.
- * We set the state to "latched", and the first NMI will perform
- * an cmpxchg on the state, and if it doesn't successfully
- * reset the state to "not running" it will restart the next
- * NMI.
+ * We set the state to "latched", and the exit of the first NMI will
+ * perform a dec_return, if the result is zero (NOT_RUNNING), then
+ * it will simply exit the NMI handler. If not, the dec_return
+ * would have set the state to NMI_EXECUTING (what we want it to
+ * be when we are running). In this case, we simply jump back
+ * to rerun the NMI handler again, and restart the 'latched' NMI.
+ *
+ * No trap (breakpoint or page fault) should be hit before nmi_restart,
+ * thus there is no race between the first check of state for NOT_RUNNING
+ * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs
+ * at this point.
+ *
+ * In case the NMI takes a page fault, we need to save off the CR2
+ * because the NMI could have preempted another page fault and corrupt
+ * the CR2 that is about to be read. As nested NMIs must be restarted
+ * and they can not take breakpoints or page faults, the update of the
+ * CR2 must be done before converting the nmi state back to NOT_RUNNING.
+ * Otherwise, there would be a race of another nested NMI coming in
+ * after setting state to NOT_RUNNING but before updating the nmi_cr2.
*/
enum nmi_states {
- NMI_NOT_RUNNING,
+ NMI_NOT_RUNNING = 0,
NMI_EXECUTING,
NMI_LATCHED,
};
static DEFINE_PER_CPU(enum nmi_states, nmi_state);
+static DEFINE_PER_CPU(unsigned long, nmi_cr2);
#define nmi_nesting_preprocess(regs) \
do { \
- if (__get_cpu_var(nmi_state) != NMI_NOT_RUNNING) { \
- __get_cpu_var(nmi_state) = NMI_LATCHED; \
+ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \
+ this_cpu_write(nmi_state, NMI_LATCHED); \
return; \
} \
- nmi_restart: \
- __get_cpu_var(nmi_state) = NMI_EXECUTING; \
- } while (0)
+ this_cpu_write(nmi_state, NMI_EXECUTING); \
+ this_cpu_write(nmi_cr2, read_cr2()); \
+ } while (0); \
+ nmi_restart:
#define nmi_nesting_postprocess() \
do { \
- if (cmpxchg(&__get_cpu_var(nmi_state), \
- NMI_EXECUTING, NMI_NOT_RUNNING) != NMI_EXECUTING) \
+ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \
+ write_cr2(this_cpu_read(nmi_cr2)); \
+ if (this_cpu_dec_return(nmi_state)) \
goto nmi_restart; \
} while (0)
#else /* x86_64 */
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index 149b8d9c6ad4..6d9582ec0324 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -42,7 +42,8 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
static void __init init_nmi_testsuite(void)
{
/* trap all the unknown NMIs we may generate */
- register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
+ register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
+ __initdata);
}
static void __init cleanup_nmi_testsuite(void)
@@ -64,8 +65,8 @@ static void __init test_nmi_ipi(struct cpumask *mask)
{
unsigned long timeout;
- if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
- NMI_FLAG_FIRST, "nmi_selftest")) {
+ if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
+ NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
nmi_fail = FAILURE;
return;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 9ce885996fd7..17fff18a1031 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -352,9 +352,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
- .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = native_write_msr_safe,
- .wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
.read_tscp = native_read_tscp,
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index b72838bae64a..299d49302e7d 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "Calgary: " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -245,7 +247,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
offset = iommu_area_alloc(tbl->it_map, tbl->it_size, 0,
npages, 0, boundary_size, 0);
if (offset == ~0UL) {
- printk(KERN_WARNING "Calgary: IOMMU full.\n");
+ pr_warn("IOMMU full\n");
spin_unlock_irqrestore(&tbl->it_lock, flags);
if (panic_on_overflow)
panic("Calgary: fix the allocator.\n");
@@ -271,8 +273,8 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
entry = iommu_range_alloc(dev, tbl, npages);
if (unlikely(entry == DMA_ERROR_CODE)) {
- printk(KERN_WARNING "Calgary: failed to allocate %u pages in "
- "iommu %p\n", npages, tbl);
+ pr_warn("failed to allocate %u pages in iommu %p\n",
+ npages, tbl);
return DMA_ERROR_CODE;
}
@@ -561,8 +563,7 @@ static void calgary_tce_cache_blast(struct iommu_table *tbl)
i++;
} while ((val & 0xff) != 0xff && i < 100);
if (i == 100)
- printk(KERN_WARNING "Calgary: PCI bus not quiesced, "
- "continuing anyway\n");
+ pr_warn("PCI bus not quiesced, continuing anyway\n");
/* invalidate TCE cache */
target = calgary_reg(bbar, tar_offset(tbl->it_busno));
@@ -604,8 +605,7 @@ begin:
i++;
} while ((val64 & 0xff) != 0xff && i < 100);
if (i == 100)
- printk(KERN_WARNING "CalIOC2: PCI bus not quiesced, "
- "continuing anyway\n");
+ pr_warn("CalIOC2: PCI bus not quiesced, continuing anyway\n");
/* 3. poll Page Migration DEBUG for SoftStopFault */
target = calgary_reg(bbar, phb_offset(bus) | PHB_PAGE_MIG_DEBUG);
@@ -617,8 +617,7 @@ begin:
if (++count < 100)
goto begin;
else {
- printk(KERN_WARNING "CalIOC2: too many SoftStopFaults, "
- "aborting TCE cache flush sequence!\n");
+ pr_warn("CalIOC2: too many SoftStopFaults, aborting TCE cache flush sequence!\n");
return; /* pray for the best */
}
}
@@ -840,8 +839,8 @@ static void calgary_dump_error_regs(struct iommu_table *tbl)
plssr = be32_to_cpu(readl(target));
/* If no error, the agent ID in the CSR is not valid */
- printk(KERN_EMERG "Calgary: DMA error on Calgary PHB 0x%x, "
- "0x%08x@CSR 0x%08x@PLSSR\n", tbl->it_busno, csr, plssr);
+ pr_emerg("DMA error on Calgary PHB 0x%x, 0x%08x@CSR 0x%08x@PLSSR\n",
+ tbl->it_busno, csr, plssr);
}
static void calioc2_dump_error_regs(struct iommu_table *tbl)
@@ -867,22 +866,21 @@ static void calioc2_dump_error_regs(struct iommu_table *tbl)
target = calgary_reg(bbar, phboff | 0x800);
mck = be32_to_cpu(readl(target));
- printk(KERN_EMERG "Calgary: DMA error on CalIOC2 PHB 0x%x\n",
- tbl->it_busno);
+ pr_emerg("DMA error on CalIOC2 PHB 0x%x\n", tbl->it_busno);
- printk(KERN_EMERG "Calgary: 0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
- csr, plssr, csmr, mck);
+ pr_emerg("0x%08x@CSR 0x%08x@PLSSR 0x%08x@CSMR 0x%08x@MCK\n",
+ csr, plssr, csmr, mck);
/* dump rest of error regs */
- printk(KERN_EMERG "Calgary: ");
+ pr_emerg("");
for (i = 0; i < ARRAY_SIZE(errregs); i++) {
/* err regs are at 0x810 - 0x870 */
erroff = (0x810 + (i * 0x10));
target = calgary_reg(bbar, phboff | erroff);
errregs[i] = be32_to_cpu(readl(target));
- printk("0x%08x@0x%lx ", errregs[i], erroff);
+ pr_cont("0x%08x@0x%lx ", errregs[i], erroff);
}
- printk("\n");
+ pr_cont("\n");
/* root complex status */
target = calgary_reg(bbar, phboff | PHB_ROOT_COMPLEX_STATUS);
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index c0f420f76cd3..de2b7ad70273 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -45,15 +45,6 @@ int iommu_detected __read_mostly = 0;
*/
int iommu_pass_through __read_mostly;
-/*
- * Group multi-function PCI devices into a single device-group for the
- * iommu_device_group interface. This tells the iommu driver to pretend
- * it cannot distinguish between functions of a device, exposing only one
- * group for the device. Useful for disallowing use of individual PCI
- * functions from userspace drivers.
- */
-int iommu_group_mf __read_mostly;
-
extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
/* Dummy device used for NULL arguments (normally ISA). */
@@ -194,8 +185,6 @@ static __init int iommu_setup(char *p)
#endif
if (!strncmp(p, "pt", 2))
iommu_pass_through = 1;
- if (!strncmp(p, "group_mf", 8))
- iommu_group_mf = 1;
gart_parse_options(p);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 735279e54e59..ef6a8456f719 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -145,16 +147,14 @@ void show_regs_common(void)
/* Board Name is optional */
board = dmi_get_system_info(DMI_BOARD_NAME);
- printk(KERN_CONT "\n");
- printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
- current->pid, current->comm, print_tainted(),
- init_utsname()->release,
- (int)strcspn(init_utsname()->version, " "),
- init_utsname()->version);
- printk(KERN_CONT " %s %s", vendor, product);
- if (board)
- printk(KERN_CONT "/%s", board);
- printk(KERN_CONT "\n");
+ printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
+ current->pid, current->comm, print_tainted(),
+ init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version,
+ vendor, product,
+ board ? "/" : "",
+ board ? board : "");
}
void flush_thread(void)
@@ -645,7 +645,7 @@ static void amd_e400_idle(void)
amd_e400_c1e_detected = true;
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halt in AMD C1E");
- printk(KERN_INFO "System has AMD C1E enabled\n");
+ pr_info("System has AMD C1E enabled\n");
}
}
@@ -659,8 +659,7 @@ static void amd_e400_idle(void)
*/
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
&cpu);
- printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
- cpu);
+ pr_info("Switch to broadcast mode on CPU%d\n", cpu);
}
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
@@ -681,8 +680,7 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
if (pm_idle == poll_idle && smp_num_siblings > 1) {
- printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
- " performance may degrade.\n");
+ pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
}
#endif
if (pm_idle)
@@ -692,11 +690,11 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
/*
* One CPU supports mwait => All CPUs supports mwait
*/
- printk(KERN_INFO "using mwait in idle threads.\n");
+ pr_info("using mwait in idle threads\n");
pm_idle = mwait_idle;
} else if (cpu_has_amd_erratum(amd_erratum_400)) {
/* E400: APIC timer interrupt does not wake up CPU from C1e */
- printk(KERN_INFO "using AMD E400 aware idle routine\n");
+ pr_info("using AMD E400 aware idle routine\n");
pm_idle = amd_e400_idle;
} else
pm_idle = default_idle;
@@ -715,7 +713,7 @@ static int __init idle_setup(char *str)
return -EINVAL;
if (!strcmp(str, "poll")) {
- printk("using polling idle threads.\n");
+ pr_info("using polling idle threads\n");
pm_idle = poll_idle;
boot_option_idle_override = IDLE_POLL;
} else if (!strcmp(str, "mwait")) {
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 61cdf7fdf099..0a980c9d7cb8 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -117,10 +117,10 @@ void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
if (dead_task->mm->context.size) {
- printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
- dead_task->comm,
- dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ pr_warn("WARNING: dead process %8s still has LDT? <%p/%d>\n",
+ dead_task->comm,
+ dead_task->mm->context.ldt,
+ dead_task->mm->context.size);
BUG();
}
}
@@ -466,7 +466,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
- ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
+ ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
}
}
put_cpu();
@@ -494,7 +494,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
/* set the selector to 0 to not confuse
__switch_to */
loadsegment(fs, 0);
- ret = checking_wrmsrl(MSR_FS_BASE, addr);
+ ret = wrmsrl_safe(MSR_FS_BASE, addr);
}
}
put_cpu();
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 03920a15a632..1b27de563561 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -512,7 +512,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
/* Set correct numa_node information for AMD NB functions */
-static void __init quirk_amd_nb_node(struct pci_dev *dev)
+static void __devinit quirk_amd_nb_node(struct pci_dev *dev)
{
struct pci_dev *nb_ht;
unsigned int devfn;
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 25b48edb847c..52190a938b4a 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/init.h>
@@ -20,14 +22,12 @@
#include <asm/virtext.h>
#include <asm/cpu.h>
#include <asm/nmi.h>
+#include <asm/smp.h>
-#ifdef CONFIG_X86_32
-# include <linux/ctype.h>
-# include <linux/mc146818rtc.h>
-# include <asm/realmode.h>
-#else
-# include <asm/x86_init.h>
-#endif
+#include <linux/ctype.h>
+#include <linux/mc146818rtc.h>
+#include <asm/realmode.h>
+#include <asm/x86_init.h>
/*
* Power off function, if any
@@ -49,7 +49,7 @@ int reboot_force;
*/
static int reboot_default = 1;
-#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
+#ifdef CONFIG_SMP
static int reboot_cpu = -1;
#endif
@@ -67,8 +67,8 @@ bool port_cf9_safe = false;
* reboot=b[ios] | s[mp] | t[riple] | k[bd] | e[fi] [, [w]arm | [c]old] | p[ci]
* warm Don't set the cold reboot flag
* cold Set the cold reboot flag
- * bios Reboot by jumping through the BIOS (only for X86_32)
- * smp Reboot by executing reset on BSP or other CPU (only for X86_32)
+ * bios Reboot by jumping through the BIOS
+ * smp Reboot by executing reset on BSP or other CPU
* triple Force a triple fault (init)
* kbd Use the keyboard controller. cold reset (default)
* acpi Use the RESET_REG in the FADT
@@ -95,7 +95,6 @@ static int __init reboot_setup(char *str)
reboot_mode = 0;
break;
-#ifdef CONFIG_X86_32
#ifdef CONFIG_SMP
case 's':
if (isdigit(*(str+1))) {
@@ -112,7 +111,6 @@ static int __init reboot_setup(char *str)
#endif /* CONFIG_SMP */
case 'b':
-#endif
case 'a':
case 'k':
case 't':
@@ -138,7 +136,6 @@ static int __init reboot_setup(char *str)
__setup("reboot=", reboot_setup);
-#ifdef CONFIG_X86_32
/*
* Reboot options and system auto-detection code provided by
* Dell Inc. so their systems "just work". :-)
@@ -152,16 +149,14 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_BIOS) {
reboot_type = BOOT_BIOS;
- printk(KERN_INFO "%s series board detected. Selecting BIOS-method for reboots.\n", d->ident);
+ pr_info("%s series board detected. Selecting %s-method for reboots.\n",
+ "BIOS", d->ident);
}
return 0;
}
-void machine_real_restart(unsigned int type)
+void __noreturn machine_real_restart(unsigned int type)
{
- void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
- real_mode_header->machine_real_restart_asm;
-
local_irq_disable();
/*
@@ -181,25 +176,28 @@ void machine_real_restart(unsigned int type)
/*
* Switch back to the initial page table.
*/
+#ifdef CONFIG_X86_32
load_cr3(initial_page_table);
-
- /*
- * Write 0x1234 to absolute memory location 0x472. The BIOS reads
- * this on booting to tell it to "Bypass memory test (also warm
- * boot)". This seems like a fairly standard thing that gets set by
- * REBOOT.COM programs, and the previous reset routine did this
- * too. */
- *((unsigned short *)0x472) = reboot_mode;
+#else
+ write_cr3(real_mode_header->trampoline_pgd);
+#endif
/* Jump to the identity-mapped low memory code */
- restart_lowmem(type);
+#ifdef CONFIG_X86_32
+ asm volatile("jmpl *%0" : :
+ "rm" (real_mode_header->machine_real_restart_asm),
+ "a" (type));
+#else
+ asm volatile("ljmpl *%0" : :
+ "m" (real_mode_header->machine_real_restart_asm),
+ "D" (type));
+#endif
+ unreachable();
}
#ifdef CONFIG_APM_MODULE
EXPORT_SYMBOL(machine_real_restart);
#endif
-#endif /* CONFIG_X86_32 */
-
/*
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
*/
@@ -207,8 +205,8 @@ static int __init set_pci_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_CF9) {
reboot_type = BOOT_CF9;
- printk(KERN_INFO "%s series board detected. "
- "Selecting PCI-method for reboots.\n", d->ident);
+ pr_info("%s series board detected. Selecting %s-method for reboots.\n",
+ "PCI", d->ident);
}
return 0;
}
@@ -217,17 +215,16 @@ static int __init set_kbd_reboot(const struct dmi_system_id *d)
{
if (reboot_type != BOOT_KBD) {
reboot_type = BOOT_KBD;
- printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+ pr_info("%s series board detected. Selecting %s-method for reboot.\n",
+ "KBD", d->ident);
}
return 0;
}
/*
- * This is a single dmi_table handling all reboot quirks. Note that
- * REBOOT_BIOS is only available for 32bit
+ * This is a single dmi_table handling all reboot quirks.
*/
static struct dmi_system_id __initdata reboot_dmi_table[] = {
-#ifdef CONFIG_X86_32
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
.ident = "Dell E520",
@@ -377,7 +374,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
-#endif /* CONFIG_X86_32 */
{ /* Handle reboot issue on Acer Aspire one */
.callback = set_kbd_reboot,
@@ -451,6 +447,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
},
},
+ { /* Handle problems with rebooting on the Precision M6600. */
+ .callback = set_pci_reboot,
+ .ident = "Dell OptiPlex 990",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"),
+ },
+ },
{ }
};
@@ -576,13 +580,11 @@ static void native_machine_emergency_restart(void)
reboot_type = BOOT_KBD;
break;
-#ifdef CONFIG_X86_32
case BOOT_BIOS:
machine_real_restart(MRR_BIOS);
reboot_type = BOOT_KBD;
break;
-#endif
case BOOT_ACPI:
acpi_reboot();
@@ -624,12 +626,10 @@ void native_machine_shutdown(void)
/* The boot cpu is always logical cpu 0 */
int reboot_cpu_id = 0;
-#ifdef CONFIG_X86_32
/* See if there has been given a command line override */
if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
cpu_online(reboot_cpu))
reboot_cpu_id = reboot_cpu;
-#endif
/* Make certain the cpu I'm about to reboot on is online */
if (!cpu_online(reboot_cpu_id))
@@ -670,7 +670,7 @@ static void __machine_emergency_restart(int emergency)
static void native_machine_restart(char *__unused)
{
- printk("machine restart\n");
+ pr_notice("machine restart\n");
if (!reboot_force)
machine_shutdown();
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 16be6dc14db1..f4b9b80e1b95 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1031,8 +1031,6 @@ void __init setup_arch(char **cmdline_p)
x86_init.timers.wallclock_init();
- x86_platform.wallclock_init();
-
mcheck_init();
arch_init_ideal_nops();
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 21af737053aa..b280908a376e 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -6,6 +6,9 @@
* 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes
* 2000-2002 x86-64 support by Andi Kleen
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
@@ -814,7 +817,7 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where)
me->comm, me->pid, where, frame,
regs->ip, regs->sp, regs->orig_ax);
print_vma_addr(" in ", regs->ip);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
force_sig(SIGSEGV, me);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 7bd8a0823654..c1a310fb8309 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1,4 +1,4 @@
-/*
+ /*
* x86 SMP booting functions
*
* (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
@@ -39,6 +39,8 @@
* Glauber Costa : i386 and x86_64 integration
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/module.h>
@@ -184,7 +186,7 @@ static void __cpuinit smp_callin(void)
* boards)
*/
- pr_debug("CALLIN, before setup_local_APIC().\n");
+ pr_debug("CALLIN, before setup_local_APIC()\n");
if (apic->smp_callin_clear_local_apic)
apic->smp_callin_clear_local_apic();
setup_local_APIC();
@@ -255,22 +257,13 @@ notrace static void __cpuinit start_secondary(void *unused)
check_tsc_sync_target();
/*
- * We need to hold call_lock, so there is no inconsistency
- * between the time smp_call_function() determines number of
- * IPI recipients, and the time when the determination is made
- * for which cpus receive the IPI. Holding this
- * lock helps us to not include this cpu in a currently in progress
- * smp_call_function().
- *
* We need to hold vector_lock so there the set of online cpus
* does not change while we are assigning vectors to cpus. Holding
* this lock ensures we don't half assign or remove an irq from a cpu.
*/
- ipi_call_lock();
lock_vector_lock();
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
- ipi_call_unlock();
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
x86_platform.nmi_init();
@@ -432,17 +425,16 @@ static void impress_friends(void)
/*
* Allow the user to impress friends.
*/
- pr_debug("Before bogomips.\n");
+ pr_debug("Before bogomips\n");
for_each_possible_cpu(cpu)
if (cpumask_test_cpu(cpu, cpu_callout_mask))
bogosum += cpu_data(cpu).loops_per_jiffy;
- printk(KERN_INFO
- "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
+ pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
num_online_cpus(),
bogosum/(500000/HZ),
(bogosum/(5000/HZ))%100);
- pr_debug("Before bogocount - setting activated=1.\n");
+ pr_debug("Before bogocount - setting activated=1\n");
}
void __inquire_remote_apic(int apicid)
@@ -452,18 +444,17 @@ void __inquire_remote_apic(int apicid)
int timeout;
u32 status;
- printk(KERN_INFO "Inquiring remote APIC 0x%x...\n", apicid);
+ pr_info("Inquiring remote APIC 0x%x...\n", apicid);
for (i = 0; i < ARRAY_SIZE(regs); i++) {
- printk(KERN_INFO "... APIC 0x%x %s: ", apicid, names[i]);
+ pr_info("... APIC 0x%x %s: ", apicid, names[i]);
/*
* Wait for idle.
*/
status = safe_apic_wait_icr_idle();
if (status)
- printk(KERN_CONT
- "a previous APIC delivery may have failed\n");
+ pr_cont("a previous APIC delivery may have failed\n");
apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
@@ -476,10 +467,10 @@ void __inquire_remote_apic(int apicid)
switch (status) {
case APIC_ICR_RR_VALID:
status = apic_read(APIC_RRR);
- printk(KERN_CONT "%08x\n", status);
+ pr_cont("%08x\n", status);
break;
default:
- printk(KERN_CONT "failed\n");
+ pr_cont("failed\n");
}
}
}
@@ -513,12 +504,12 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
apic_write(APIC_ESR, 0);
accept_status = (apic_read(APIC_ESR) & 0xEF);
}
- pr_debug("NMI sent.\n");
+ pr_debug("NMI sent\n");
if (send_status)
- printk(KERN_ERR "APIC never delivered???\n");
+ pr_err("APIC never delivered???\n");
if (accept_status)
- printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
+ pr_err("APIC delivery error (%lx)\n", accept_status);
return (send_status | accept_status);
}
@@ -540,7 +531,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
apic_read(APIC_ESR);
}
- pr_debug("Asserting INIT.\n");
+ pr_debug("Asserting INIT\n");
/*
* Turn INIT on target chip
@@ -556,7 +547,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
mdelay(10);
- pr_debug("Deasserting INIT.\n");
+ pr_debug("Deasserting INIT\n");
/* Target chip */
/* Send IPI */
@@ -589,14 +580,14 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
/*
* Run STARTUP IPI loop.
*/
- pr_debug("#startup loops: %d.\n", num_starts);
+ pr_debug("#startup loops: %d\n", num_starts);
for (j = 1; j <= num_starts; j++) {
- pr_debug("Sending STARTUP #%d.\n", j);
+ pr_debug("Sending STARTUP #%d\n", j);
if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
apic_write(APIC_ESR, 0);
apic_read(APIC_ESR);
- pr_debug("After apic_write.\n");
+ pr_debug("After apic_write\n");
/*
* STARTUP IPI
@@ -613,7 +604,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
*/
udelay(300);
- pr_debug("Startup point 1.\n");
+ pr_debug("Startup point 1\n");
pr_debug("Waiting for send to finish...\n");
send_status = safe_apic_wait_icr_idle();
@@ -628,12 +619,12 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
if (send_status || accept_status)
break;
}
- pr_debug("After Startup.\n");
+ pr_debug("After Startup\n");
if (send_status)
- printk(KERN_ERR "APIC never delivered???\n");
+ pr_err("APIC never delivered???\n");
if (accept_status)
- printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status);
+ pr_err("APIC delivery error (%lx)\n", accept_status);
return (send_status | accept_status);
}
@@ -647,11 +638,11 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
if (system_state == SYSTEM_BOOTING) {
if (node != current_node) {
if (current_node > (-1))
- pr_cont(" Ok.\n");
+ pr_cont(" OK\n");
current_node = node;
pr_info("Booting Node %3d, Processors ", node);
}
- pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " Ok.\n" : "");
+ pr_cont(" #%d%s", cpu, cpu == (nr_cpu_ids - 1) ? " OK\n" : "");
return;
} else
pr_info("Booting Node %d Processor %d APIC 0x%x\n",
@@ -731,9 +722,9 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
/*
* allow APs to start initializing.
*/
- pr_debug("Before Callout %d.\n", cpu);
+ pr_debug("Before Callout %d\n", cpu);
cpumask_set_cpu(cpu, cpu_callout_mask);
- pr_debug("After Callout %d.\n", cpu);
+ pr_debug("After Callout %d\n", cpu);
/*
* Wait 5s total for a response
@@ -761,7 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
pr_err("CPU%d: Stuck ??\n", cpu);
else
/* trampoline code not run */
- pr_err("CPU%d: Not responding.\n", cpu);
+ pr_err("CPU%d: Not responding\n", cpu);
if (apic->inquire_remote_apic)
apic->inquire_remote_apic(apicid);
}
@@ -806,7 +797,7 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid ||
!physid_isset(apicid, phys_cpu_present_map) ||
!apic->apic_id_valid(apicid)) {
- printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu);
+ pr_err("%s: bad cpu %d\n", __func__, cpu);
return -EINVAL;
}
@@ -887,9 +878,8 @@ static int __init smp_sanity_check(unsigned max_cpus)
unsigned int cpu;
unsigned nr;
- printk(KERN_WARNING
- "More than 8 CPUs detected - skipping them.\n"
- "Use CONFIG_X86_BIGSMP.\n");
+ pr_warn("More than 8 CPUs detected - skipping them\n"
+ "Use CONFIG_X86_BIGSMP\n");
nr = 0;
for_each_present_cpu(cpu) {
@@ -910,8 +900,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
#endif
if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
- printk(KERN_WARNING
- "weird, boot CPU (#%d) not listed by the BIOS.\n",
+ pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
hard_smp_processor_id());
physid_set(hard_smp_processor_id(), phys_cpu_present_map);
@@ -923,11 +912,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
*/
if (!smp_found_config && !acpi_lapic) {
preempt_enable();
- printk(KERN_NOTICE "SMP motherboard not detected.\n");
+ pr_notice("SMP motherboard not detected\n");
disable_smp();
if (APIC_init_uniprocessor())
- printk(KERN_NOTICE "Local APIC not detected."
- " Using dummy APIC emulation.\n");
+ pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
return -1;
}
@@ -936,9 +924,8 @@ static int __init smp_sanity_check(unsigned max_cpus)
* CPU too, but we do it for the sake of robustness anyway.
*/
if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
- printk(KERN_NOTICE
- "weird, boot CPU (#%d) not listed by the BIOS.\n",
- boot_cpu_physical_apicid);
+ pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
+ boot_cpu_physical_apicid);
physid_set(hard_smp_processor_id(), phys_cpu_present_map);
}
preempt_enable();
@@ -951,8 +938,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
if (!disable_apic) {
pr_err("BIOS bug, local APIC #%d not detected!...\n",
boot_cpu_physical_apicid);
- pr_err("... forcing use of dummy APIC emulation."
- "(tell your hw vendor)\n");
+ pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
}
smpboot_clear_io_apic();
disable_ioapic_support();
@@ -965,7 +951,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
* If SMP should be disabled, then really disable it!
*/
if (!max_cpus) {
- printk(KERN_INFO "SMP mode deactivated.\n");
+ pr_info("SMP mode deactivated\n");
smpboot_clear_io_apic();
connect_bsp_APIC();
@@ -1017,7 +1003,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
if (smp_sanity_check(max_cpus) < 0) {
- printk(KERN_INFO "SMP disabled\n");
+ pr_info("SMP disabled\n");
disable_smp();
goto out;
}
@@ -1055,7 +1041,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
* Set up local APIC timer on boot CPU.
*/
- printk(KERN_INFO "CPU%d: ", 0);
+ pr_info("CPU%d: ", 0);
print_cpu_info(&cpu_data(0));
x86_init.timers.setup_percpu_clockev();
@@ -1105,7 +1091,7 @@ void __init native_smp_prepare_boot_cpu(void)
void __init native_smp_cpus_done(unsigned int max_cpus)
{
- pr_debug("Boot done.\n");
+ pr_debug("Boot done\n");
nmi_selftest();
impress_friends();
@@ -1166,8 +1152,7 @@ __init void prefill_possible_map(void)
/* nr_cpu_ids could be reduced via nr_cpus= */
if (possible > nr_cpu_ids) {
- printk(KERN_WARNING
- "%d Processors exceeds NR_CPUS limit of %d\n",
+ pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
possible, nr_cpu_ids);
possible = nr_cpu_ids;
}
@@ -1176,13 +1161,12 @@ __init void prefill_possible_map(void)
if (!setup_max_cpus)
#endif
if (possible > i) {
- printk(KERN_WARNING
- "%d Processors exceeds max_cpus limit of %u\n",
+ pr_warn("%d Processors exceeds max_cpus limit of %u\n",
possible, setup_max_cpus);
possible = i;
}
- printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
possible, max_t(int, possible - num_processors, 0));
for (i = 0; i < possible; i++)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 05b31d92f69c..b481341c9369 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -9,6 +9,9 @@
/*
* Handle hardware traps and faults.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/interrupt.h>
#include <linux/kallsyms.h>
#include <linux/spinlock.h>
@@ -143,12 +146,11 @@ trap_signal:
#ifdef CONFIG_X86_64
if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
printk_ratelimit()) {
- printk(KERN_INFO
- "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
- tsk->comm, tsk->pid, str,
- regs->ip, regs->sp, error_code);
+ pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
+ tsk->comm, tsk->pid, str,
+ regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
- printk("\n");
+ pr_cont("\n");
}
#endif
@@ -269,12 +271,11 @@ do_general_protection(struct pt_regs *regs, long error_code)
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
- printk(KERN_INFO
- "%s[%d] general protection ip:%lx sp:%lx error:%lx",
+ pr_info("%s[%d] general protection ip:%lx sp:%lx error:%lx",
tsk->comm, task_pid_nr(tsk),
regs->ip, regs->sp, error_code);
print_vma_addr(" in ", regs->ip);
- printk("\n");
+ pr_cont("\n");
}
force_sig(SIGSEGV, tsk);
@@ -570,7 +571,7 @@ do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
conditional_sti(regs);
#if 0
/* No need to warn about this any longer. */
- printk(KERN_INFO "Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
+ pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index fc0a147e3727..cfa5d4f7ca56 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
@@ -84,8 +86,7 @@ EXPORT_SYMBOL_GPL(check_tsc_unstable);
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
- printk(KERN_WARNING "notsc: Kernel compiled with CONFIG_X86_TSC, "
- "cannot disable TSC completely.\n");
+ pr_warn("Kernel compiled with CONFIG_X86_TSC, cannot disable TSC completely\n");
tsc_disabled = 1;
return 1;
}
@@ -373,7 +374,7 @@ static unsigned long quick_pit_calibrate(void)
goto success;
}
}
- printk("Fast TSC calibration failed\n");
+ pr_err("Fast TSC calibration failed\n");
return 0;
success:
@@ -392,7 +393,7 @@ success:
*/
delta *= PIT_TICK_RATE;
do_div(delta, i*256*1000);
- printk("Fast TSC calibration using PIT\n");
+ pr_info("Fast TSC calibration using PIT\n");
return delta;
}
@@ -487,9 +488,8 @@ unsigned long native_calibrate_tsc(void)
* use the reference value, as it is more precise.
*/
if (delta >= 90 && delta <= 110) {
- printk(KERN_INFO
- "TSC: PIT calibration matches %s. %d loops\n",
- hpet ? "HPET" : "PMTIMER", i + 1);
+ pr_info("PIT calibration matches %s. %d loops\n",
+ hpet ? "HPET" : "PMTIMER", i + 1);
return tsc_ref_min;
}
@@ -511,38 +511,36 @@ unsigned long native_calibrate_tsc(void)
*/
if (tsc_pit_min == ULONG_MAX) {
/* PIT gave no useful value */
- printk(KERN_WARNING "TSC: Unable to calibrate against PIT\n");
+ pr_warn("Unable to calibrate against PIT\n");
/* We don't have an alternative source, disable TSC */
if (!hpet && !ref1 && !ref2) {
- printk("TSC: No reference (HPET/PMTIMER) available\n");
+ pr_notice("No reference (HPET/PMTIMER) available\n");
return 0;
}
/* The alternative source failed as well, disable TSC */
if (tsc_ref_min == ULONG_MAX) {
- printk(KERN_WARNING "TSC: HPET/PMTIMER calibration "
- "failed.\n");
+ pr_warn("HPET/PMTIMER calibration failed\n");
return 0;
}
/* Use the alternative source */
- printk(KERN_INFO "TSC: using %s reference calibration\n",
- hpet ? "HPET" : "PMTIMER");
+ pr_info("using %s reference calibration\n",
+ hpet ? "HPET" : "PMTIMER");
return tsc_ref_min;
}
/* We don't have an alternative source, use the PIT calibration value */
if (!hpet && !ref1 && !ref2) {
- printk(KERN_INFO "TSC: Using PIT calibration value\n");
+ pr_info("Using PIT calibration value\n");
return tsc_pit_min;
}
/* The alternative source failed, use the PIT calibration value */
if (tsc_ref_min == ULONG_MAX) {
- printk(KERN_WARNING "TSC: HPET/PMTIMER calibration failed. "
- "Using PIT calibration\n");
+ pr_warn("HPET/PMTIMER calibration failed. Using PIT calibration.\n");
return tsc_pit_min;
}
@@ -551,9 +549,9 @@ unsigned long native_calibrate_tsc(void)
* the PIT value as we know that there are PMTIMERs around
* running at double speed. At least we let the user know:
*/
- printk(KERN_WARNING "TSC: PIT calibration deviates from %s: %lu %lu.\n",
- hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
- printk(KERN_INFO "TSC: Using PIT calibration value\n");
+ pr_warn("PIT calibration deviates from %s: %lu %lu\n",
+ hpet ? "HPET" : "PMTIMER", tsc_pit_min, tsc_ref_min);
+ pr_info("Using PIT calibration value\n");
return tsc_pit_min;
}
@@ -785,7 +783,7 @@ void mark_tsc_unstable(char *reason)
tsc_unstable = 1;
sched_clock_stable = 0;
disable_sched_clock_irqtime();
- printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
+ pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult)
clocksource_mark_unstable(&clocksource_tsc);
@@ -912,9 +910,9 @@ static void tsc_refine_calibration_work(struct work_struct *work)
goto out;
tsc_khz = freq;
- printk(KERN_INFO "Refined TSC clocksource calibration: "
- "%lu.%03lu MHz.\n", (unsigned long)tsc_khz / 1000,
- (unsigned long)tsc_khz % 1000);
+ pr_info("Refined TSC clocksource calibration: %lu.%03lu MHz\n",
+ (unsigned long)tsc_khz / 1000,
+ (unsigned long)tsc_khz % 1000);
out:
clocksource_register_khz(&clocksource_tsc, tsc_khz);
@@ -970,9 +968,9 @@ void __init tsc_init(void)
return;
}
- printk("Detected %lu.%03lu MHz processor.\n",
- (unsigned long)cpu_khz / 1000,
- (unsigned long)cpu_khz % 1000);
+ pr_info("Detected %lu.%03lu MHz processor\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
/*
* Secondary CPUs do not run through tsc_init(), so set up
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index dc4e910a7d96..36fd42091fa7 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -409,9 +409,10 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm,
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
* @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
* Return 0 on success or a -ve number on error.
*/
-int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm)
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr)
{
int ret;
struct insn insn;
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index 255f58ae71e8..54abcc0baf23 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -28,6 +28,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
@@ -137,14 +139,14 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
local_irq_enable();
if (!current->thread.vm86_info) {
- printk("no vm86_info: BAD\n");
+ pr_alert("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(&current->thread.vm86_info->regs, regs);
tmp += put_user(current->thread.screen_bitmap, &current->thread.vm86_info->screen_bitmap);
if (tmp) {
- printk("vm86: could not access userspace vm86_info\n");
+ pr_alert("could not access userspace vm86_info\n");
do_exit(SIGSEGV);
}
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 8eeb55a551b4..992f890283e9 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -16,6 +16,7 @@
#include <linux/pci_ids.h>
#include <linux/pci_regs.h>
#include <linux/smp.h>
+#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/pci-direct.h>
@@ -95,6 +96,18 @@ static void __init set_vsmp_pv_ops(void)
ctl = readl(address + 4);
printk(KERN_INFO "vSMP CTL: capabilities:0x%08x control:0x%08x\n",
cap, ctl);
+
+ /* If possible, let the vSMP foundation route the interrupt optimally */
+#ifdef CONFIG_SMP
+ if (cap & ctl & BIT(8)) {
+ ctl &= ~BIT(8);
+#ifdef CONFIG_PROC_FS
+ /* Don't let users change irq affinity via procfs */
+ no_irq_affinity = 1;
+#endif
+ }
+#endif
+
if (cap & ctl & (1 << 4)) {
/* Setup irq ops and turn on vSMP IRQ fastpath handling */
pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
@@ -102,12 +115,11 @@ static void __init set_vsmp_pv_ops(void)
pv_irq_ops.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
pv_irq_ops.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
pv_init_ops.patch = vsmp_patch;
-
ctl &= ~(1 << 4);
- writel(ctl, address + 4);
- ctl = readl(address + 4);
- printk(KERN_INFO "vSMP CTL: control set to:0x%08x\n", ctl);
}
+ writel(ctl, address + 4);
+ ctl = readl(address + 4);
+ pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
early_iounmap(address, 8);
}
@@ -187,12 +199,36 @@ static void __init vsmp_cap_cpus(void)
#endif
}
+static int apicid_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+ return hard_smp_processor_id() >> index_msb;
+}
+
+/*
+ * In vSMP, all cpus should be capable of handling interrupts, regardless of
+ * the APIC used.
+ */
+static void fill_vector_allocation_domain(int cpu, struct cpumask *retmask,
+ const struct cpumask *mask)
+{
+ cpumask_setall(retmask);
+}
+
+static void vsmp_apic_post_init(void)
+{
+ /* need to update phys_pkg_id */
+ apic->phys_pkg_id = apicid_phys_pkg_id;
+ apic->vector_allocation_domain = fill_vector_allocation_domain;
+}
+
void __init vsmp_init(void)
{
detect_vsmp_box();
if (!is_vsmp_box())
return;
+ x86_platform.apic_post_init = vsmp_apic_post_init;
+
vsmp_cap_cpus();
set_vsmp_pv_ops();
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 7515cf0e1805..8d141b309046 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -18,6 +18,8 @@
* use the vDSO.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/time.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -111,18 +113,13 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
const char *message)
{
- static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
- struct task_struct *tsk;
-
- if (!show_unhandled_signals || !__ratelimit(&rs))
+ if (!show_unhandled_signals)
return;
- tsk = current;
-
- printk("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
- level, tsk->comm, task_pid_nr(tsk),
- message, regs->ip, regs->cs,
- regs->sp, regs->ax, regs->si, regs->di);
+ pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, current->comm, task_pid_nr(current),
+ message, regs->ip, regs->cs,
+ regs->sp, regs->ax, regs->si, regs->di);
}
static int addr_to_vsyscall_nr(unsigned long addr)
@@ -139,6 +136,19 @@ static int addr_to_vsyscall_nr(unsigned long addr)
return nr;
}
+#ifdef CONFIG_SECCOMP
+static int vsyscall_seccomp(struct task_struct *tsk, int syscall_nr)
+{
+ if (!seccomp_mode(&tsk->seccomp))
+ return 0;
+ task_pt_regs(tsk)->orig_ax = syscall_nr;
+ task_pt_regs(tsk)->ax = syscall_nr;
+ return __secure_computing(syscall_nr);
+}
+#else
+#define vsyscall_seccomp(_tsk, _nr) 0
+#endif
+
static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
@@ -174,6 +184,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
int vsyscall_nr;
int prev_sig_on_uaccess_error;
long ret;
+ int skip;
/*
* No point in checking CS -- the only way to get here is a user mode
@@ -205,9 +216,6 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
}
tsk = current;
- if (seccomp_mode(&tsk->seccomp))
- do_exit(SIGKILL);
-
/*
* With a real vsyscall, page faults cause SIGSEGV. We want to
* preserve that behavior to make writing exploits harder.
@@ -222,8 +230,13 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
* address 0".
*/
ret = -EFAULT;
+ skip = 0;
switch (vsyscall_nr) {
case 0:
+ skip = vsyscall_seccomp(tsk, __NR_gettimeofday);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
!write_ok_or_segv(regs->si, sizeof(struct timezone)))
break;
@@ -234,6 +247,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
case 1:
+ skip = vsyscall_seccomp(tsk, __NR_time);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(time_t)))
break;
@@ -241,6 +258,10 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
case 2:
+ skip = vsyscall_seccomp(tsk, __NR_getcpu);
+ if (skip)
+ break;
+
if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
!write_ok_or_segv(regs->si, sizeof(unsigned)))
break;
@@ -253,6 +274,12 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+ if (skip) {
+ if ((long)regs->ax <= 0L) /* seccomp errno emulation */
+ goto do_ret;
+ goto done; /* seccomp trace/trap */
+ }
+
if (ret == -EFAULT) {
/* Bad news -- userspace fed a bad pointer to a vsyscall. */
warn_bad_vsyscall(KERN_INFO, regs,
@@ -271,10 +298,11 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
regs->ax = ret;
+do_ret:
/* Emulate a ret instruction. */
regs->ip = caller;
regs->sp += 8;
-
+done:
return true;
sigsegv:
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 9796c2f3d074..6020f6f5927c 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -28,6 +28,7 @@ EXPORT_SYMBOL(__put_user_8);
EXPORT_SYMBOL(copy_user_generic_string);
EXPORT_SYMBOL(copy_user_generic_unrolled);
+EXPORT_SYMBOL(copy_user_enhanced_fast_string);
EXPORT_SYMBOL(__copy_user_nocache);
EXPORT_SYMBOL(_copy_from_user);
EXPORT_SYMBOL(_copy_to_user);
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 35c5e543f550..9f3167e891ef 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -29,7 +29,6 @@ void __init x86_init_uint_noop(unsigned int unused) { }
void __init x86_init_pgd_noop(pgd_t *unused) { }
int __init iommu_init_noop(void) { return 0; }
void iommu_shutdown_noop(void) { }
-void wallclock_init_noop(void) { }
/*
* The platform setup functions are preset with the default functions
@@ -101,7 +100,6 @@ static int default_i8042_detect(void) { return 1; };
struct x86_platform_ops x86_platform = {
.calibrate_tsc = native_calibrate_tsc,
- .wallclock_init = wallclock_init_noop,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
.iommu_shutdown = iommu_shutdown_noop,
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index bd18149b2b0f..3d3e20709119 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -3,6 +3,9 @@
*
* Author: Suresh Siddha <suresh.b.siddha@intel.com>
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/bootmem.h>
#include <linux/compat.h>
#include <asm/i387.h>
@@ -162,7 +165,7 @@ int save_i387_xstate(void __user *buf)
BUG_ON(sig_xstate_size < xstate_size);
if ((unsigned long)buf % 64)
- printk("save_i387_xstate: bad fpstate %p\n", buf);
+ pr_err("%s: bad fpstate %p\n", __func__, buf);
if (!used_math())
return 0;
@@ -422,7 +425,7 @@ static void __init xstate_enable_boot_cpu(void)
pcntxt_mask = eax + ((u64)edx << 32);
if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
- printk(KERN_ERR "FP/SSE not shown under xsave features 0x%llx\n",
+ pr_err("FP/SSE not shown under xsave features 0x%llx\n",
pcntxt_mask);
BUG();
}
@@ -445,9 +448,8 @@ static void __init xstate_enable_boot_cpu(void)
setup_xstate_init();
- printk(KERN_INFO "xsave/xrstor: enabled xstate_bv 0x%llx, "
- "cntxt size 0x%x\n",
- pcntxt_mask, xstate_size);
+ pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
+ pcntxt_mask, xstate_size);
}
/*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7df1c6d839fb..0595f1397b7c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -201,6 +201,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_lm = 0;
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
+ unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -228,7 +229,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
0 /* DS-CPL, VMX, SMX, EST */ |
0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
- 0 /* Reserved, DCA */ | F(XMM4_1) |
+ F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
F(F16C) | F(RDRAND);
@@ -248,7 +249,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 7.0.ebx */
const u32 kvm_supported_word9_x86_features =
F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | F(RTM);
+ F(BMI2) | F(ERMS) | f_invpcid | F(RTM);
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -409,6 +410,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_NOP_IO_DELAY) |
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
+ (1 << KVM_FEATURE_PV_EOI) |
(1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
if (sched_info_on())
@@ -639,33 +641,37 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
}
-void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
- u32 function, index;
+ u32 function = *eax, index = *ecx;
struct kvm_cpuid_entry2 *best;
- function = kvm_register_read(vcpu, VCPU_REGS_RAX);
- index = kvm_register_read(vcpu, VCPU_REGS_RCX);
- kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
- kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
best = kvm_find_cpuid_entry(vcpu, function, index);
if (!best)
best = check_cpuid_limit(vcpu, function, index);
if (best) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
- kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
- kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
- kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
- }
+ *eax = best->eax;
+ *ebx = best->ebx;
+ *ecx = best->ecx;
+ *edx = best->edx;
+ } else
+ *eax = *ebx = *ecx = *edx = 0;
+}
+
+void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
+{
+ u32 function, eax, ebx, ecx, edx;
+
+ function = eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
+ ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
+ kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
+ kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
+ kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
+ kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
kvm_x86_ops->skip_emulated_instruction(vcpu);
- trace_kvm_cpuid(function,
- kvm_register_read(vcpu, VCPU_REGS_RAX),
- kvm_register_read(vcpu, VCPU_REGS_RBX),
- kvm_register_read(vcpu, VCPU_REGS_RCX),
- kvm_register_read(vcpu, VCPU_REGS_RDX));
+ trace_kvm_cpuid(function, eax, ebx, ecx, edx);
}
EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 26d1fb437eb5..a10e46016851 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -17,6 +17,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries);
+void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
@@ -51,4 +52,12 @@ static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_OSVW));
}
+static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_PCID));
+}
+
#endif
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f95d242ee9f7..97d9a9914ba8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -433,11 +433,32 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->intercept(ctxt, &info, stage);
}
+static void assign_masked(ulong *dest, ulong src, ulong mask)
+{
+ *dest = (*dest & ~mask) | (src & mask);
+}
+
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
+static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
+{
+ u16 sel;
+ struct desc_struct ss;
+
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ return ~0UL;
+ ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
+ return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
+}
+
+static int stack_size(struct x86_emulate_ctxt *ctxt)
+{
+ return (__fls(stack_mask(ctxt)) + 1) >> 3;
+}
+
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
@@ -958,6 +979,12 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
op->orig_val = op->val;
}
+static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
+{
+ if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
+ ctxt->modrm_seg = VCPU_SREG_SS;
+}
+
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
@@ -1061,15 +1088,20 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
- else
+ else {
modrm_ea += ctxt->regs[base_reg];
+ adjust_modrm_seg(ctxt, base_reg);
+ }
if (index_reg != 4)
modrm_ea += ctxt->regs[index_reg] << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
- } else
- modrm_ea += ctxt->regs[ctxt->modrm_rm];
+ } else {
+ base_reg = ctxt->modrm_rm;
+ modrm_ea += ctxt->regs[base_reg];
+ adjust_modrm_seg(ctxt, base_reg);
+ }
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 5)
@@ -1264,7 +1296,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
- u16 selector, struct desc_struct *desc)
+ u16 selector, struct desc_struct *desc,
+ ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
@@ -1275,7 +1308,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
- addr = dt.address + index * 8;
+ *desc_addr_p = addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
@@ -1302,11 +1335,12 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
- struct desc_struct seg_desc;
+ struct desc_struct seg_desc, old_desc;
u8 dpl, rpl, cpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+ ulong desc_addr;
int ret;
memset(&seg_desc, 0, sizeof seg_desc);
@@ -1324,8 +1358,14 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
goto load;
}
- /* NULL selector is not valid for TR, CS and SS */
- if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+ rpl = selector & 3;
+ cpl = ctxt->ops->cpl(ctxt);
+
+ /* NULL selector is not valid for TR, CS and SS (except for long mode) */
+ if ((seg == VCPU_SREG_CS
+ || (seg == VCPU_SREG_SS
+ && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
+ || seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
@@ -1336,7 +1376,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (null_selector) /* for NULL selector skip all following checks */
goto load;
- ret = read_segment_descriptor(ctxt, selector, &seg_desc);
+ ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -1352,9 +1392,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
goto exception;
}
- rpl = selector & 3;
dpl = seg_desc.dpl;
- cpl = ctxt->ops->cpl(ctxt);
switch (seg) {
case VCPU_SREG_SS:
@@ -1384,6 +1422,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
+ old_desc = seg_desc;
+ seg_desc.type |= 2; /* busy */
+ ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
+ sizeof(seg_desc), &ctxt->exception);
+ if (ret != X86EMUL_CONTINUE)
+ return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
@@ -1474,17 +1518,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
-static int em_push(struct x86_emulate_ctxt *ctxt)
+static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
- register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes);
+ register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
addr.seg = VCPU_SREG_SS;
+ return segmented_write(ctxt, addr, data, bytes);
+}
+
+static int em_push(struct x86_emulate_ctxt *ctxt)
+{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
- return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes);
+ return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1556,6 +1605,33 @@ static int em_popf(struct x86_emulate_ctxt *ctxt)
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
+static int em_enter(struct x86_emulate_ctxt *ctxt)
+{
+ int rc;
+ unsigned frame_size = ctxt->src.val;
+ unsigned nesting_level = ctxt->src2.val & 31;
+
+ if (nesting_level)
+ return X86EMUL_UNHANDLEABLE;
+
+ rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt));
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP],
+ stack_mask(ctxt));
+ assign_masked(&ctxt->regs[VCPU_REGS_RSP],
+ ctxt->regs[VCPU_REGS_RSP] - frame_size,
+ stack_mask(ctxt));
+ return X86EMUL_CONTINUE;
+}
+
+static int em_leave(struct x86_emulate_ctxt *ctxt)
+{
+ assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP],
+ stack_mask(ctxt));
+ return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes);
+}
+
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
@@ -1993,8 +2069,8 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
- return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)
- && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
@@ -2013,32 +2089,31 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
eax = 0x00000000;
ecx = 0x00000000;
- if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
- /*
- * Intel ("GenuineIntel")
- * remark: Intel CPUs only support "syscall" in 64bit
- * longmode. Also an 64bit guest with a
- * 32bit compat-app running will #UD !! While this
- * behaviour can be fixed (by emulating) into AMD
- * response - CPUs of AMD can't behave like Intel.
- */
- if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
- edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
- return false;
+ ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ /*
+ * Intel ("GenuineIntel")
+ * remark: Intel CPUs only support "syscall" in 64bit
+ * longmode. Also an 64bit guest with a
+ * 32bit compat-app running will #UD !! While this
+ * behaviour can be fixed (by emulating) into AMD
+ * response - CPUs of AMD can't behave like Intel.
+ */
+ if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
+ return false;
- /* AMD ("AuthenticAMD") */
- if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
- edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
- return true;
-
- /* AMD ("AMDisbetter!") */
- if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
- ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
- edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
- return true;
- }
+ /* AMD ("AuthenticAMD") */
+ if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+ return true;
+
+ /* AMD ("AMDisbetter!") */
+ if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
+ ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
+ edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
+ return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
@@ -2547,13 +2622,14 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
+ ulong desc_addr;
/* FIXME: old_tss_base == ~0 ? */
- ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
+ ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
+ ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
@@ -2948,6 +3024,24 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
+static int em_lldt(struct x86_emulate_ctxt *ctxt)
+{
+ u16 sel = ctxt->src.val;
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
+}
+
+static int em_ltr(struct x86_emulate_ctxt *ctxt)
+{
+ u16 sel = ctxt->src.val;
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
+}
+
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
@@ -2989,11 +3083,42 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
+ void (*get)(struct x86_emulate_ctxt *ctxt,
+ struct desc_ptr *ptr))
+{
+ struct desc_ptr desc_ptr;
+
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ ctxt->op_bytes = 8;
+ get(ctxt, &desc_ptr);
+ if (ctxt->op_bytes == 2) {
+ ctxt->op_bytes = 4;
+ desc_ptr.address &= 0x00ffffff;
+ }
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
+ return segmented_write(ctxt, ctxt->dst.addr.mem,
+ &desc_ptr, 2 + ctxt->op_bytes);
+}
+
+static int em_sgdt(struct x86_emulate_ctxt *ctxt)
+{
+ return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
+}
+
+static int em_sidt(struct x86_emulate_ctxt *ctxt)
+{
+ return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
+}
+
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
struct desc_ptr desc_ptr;
int rc;
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
@@ -3021,6 +3146,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt)
struct desc_ptr desc_ptr;
int rc;
+ if (ctxt->mode == X86EMUL_MODE_PROT64)
+ ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
@@ -3143,6 +3270,42 @@ static int em_bsr(struct x86_emulate_ctxt *ctxt)
return X86EMUL_CONTINUE;
}
+static int em_cpuid(struct x86_emulate_ctxt *ctxt)
+{
+ u32 eax, ebx, ecx, edx;
+
+ eax = ctxt->regs[VCPU_REGS_RAX];
+ ecx = ctxt->regs[VCPU_REGS_RCX];
+ ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
+ ctxt->regs[VCPU_REGS_RAX] = eax;
+ ctxt->regs[VCPU_REGS_RBX] = ebx;
+ ctxt->regs[VCPU_REGS_RCX] = ecx;
+ ctxt->regs[VCPU_REGS_RDX] = edx;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_lahf(struct x86_emulate_ctxt *ctxt)
+{
+ ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL;
+ ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8;
+ return X86EMUL_CONTINUE;
+}
+
+static int em_bswap(struct x86_emulate_ctxt *ctxt)
+{
+ switch (ctxt->op_bytes) {
+#ifdef CONFIG_X86_64
+ case 8:
+ asm("bswap %0" : "+r"(ctxt->dst.val));
+ break;
+#endif
+ default:
+ asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
+ break;
+ }
+ return X86EMUL_CONTINUE;
+}
+
static bool valid_cr(int nr)
{
switch (nr) {
@@ -3424,14 +3587,14 @@ static struct opcode group5[] = {
static struct opcode group6[] = {
DI(Prot, sldt),
DI(Prot, str),
- DI(Prot | Priv, lldt),
- DI(Prot | Priv, ltr),
+ II(Prot | Priv | SrcMem16, em_lldt, lldt),
+ II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static struct group_dual group7 = { {
- DI(Mov | DstMem | Priv, sgdt),
- DI(Mov | DstMem | Priv, sidt),
+ II(Mov | DstMem | Priv, em_sgdt, sgdt),
+ II(Mov | DstMem | Priv, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
@@ -3538,7 +3701,7 @@ static struct opcode opcode_table[256] = {
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
- II(ImplicitOps | Stack, em_popf, popf), N, N,
+ II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
@@ -3561,7 +3724,8 @@ static struct opcode opcode_table[256] = {
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
- N, N, N, I(ImplicitOps | Stack, em_ret_far),
+ I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
+ N, I(ImplicitOps | Stack, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
@@ -3635,7 +3799,7 @@ static struct opcode twobyte_table[256] = {
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
- DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
+ II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
D(DstMem | SrcReg | Src2ImmByte | ModRM),
D(DstMem | SrcReg | Src2CL | ModRM), N, N,
/* 0xA8 - 0xAF */
@@ -3658,11 +3822,12 @@ static struct opcode twobyte_table[256] = {
I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
- /* 0xC0 - 0xCF */
+ /* 0xC0 - 0xC7 */
D2bv(DstMem | SrcReg | ModRM | Lock),
N, D(DstMem | SrcReg | ModRM | Mov),
N, N, N, GD(0, &group9),
- N, N, N, N, N, N, N, N,
+ /* 0xC8 - 0xCF */
+ X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
@@ -4426,12 +4591,12 @@ twobyte_insn:
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
- ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val
+ ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
- ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val :
+ ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
case 0xc0 ... 0xc1: /* xadd */
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 81cf4fa4a2be..1df8fb9e1d5d 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -188,14 +188,15 @@ void kvm_pic_update_irq(struct kvm_pic *s)
pic_unlock(s);
}
-int kvm_pic_set_irq(void *opaque, int irq, int level)
+int kvm_pic_set_irq(struct kvm_pic *s, int irq, int irq_source_id, int level)
{
- struct kvm_pic *s = opaque;
int ret = -1;
pic_lock(s);
if (irq >= 0 && irq < PIC_NUM_PINS) {
- ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, level);
+ int irq_level = __kvm_irq_line_state(&s->irq_states[irq],
+ irq_source_id, level);
+ ret = pic_set_irq1(&s->pics[irq >> 3], irq & 7, irq_level);
pic_update_irq(s);
trace_kvm_pic_set_irq(irq >> 3, irq & 7, s->pics[irq >> 3].elcr,
s->pics[irq >> 3].imr, ret == 0);
@@ -205,6 +206,16 @@ int kvm_pic_set_irq(void *opaque, int irq, int level)
return ret;
}
+void kvm_pic_clear_all(struct kvm_pic *s, int irq_source_id)
+{
+ int i;
+
+ pic_lock(s);
+ for (i = 0; i < PIC_NUM_PINS; i++)
+ __clear_bit(irq_source_id, &s->irq_states[i]);
+ pic_unlock(s);
+}
+
/*
* acknowledge interrupt 'irq'
*/
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 93c15743f1ee..ce878788a39f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -107,6 +107,16 @@ static inline void apic_clear_vector(int vec, void *bitmap)
clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
}
+static inline int __apic_test_and_set_vector(int vec, void *bitmap)
+{
+ return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
+static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
+{
+ return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
+}
+
static inline int apic_hw_enabled(struct kvm_lapic *apic)
{
return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
@@ -210,6 +220,16 @@ static int find_highest_vector(void *bitmap)
return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
}
+static u8 count_vectors(void *bitmap)
+{
+ u32 *word = bitmap;
+ int word_offset;
+ u8 count = 0;
+ for (word_offset = 0; word_offset < MAX_APIC_VECTOR >> 5; ++word_offset)
+ count += hweight32(word[word_offset << 2]);
+ return count;
+}
+
static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
@@ -242,6 +262,27 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
apic->irr_pending = true;
}
+static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
+{
+ if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
+ ++apic->isr_count;
+ BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
+ /*
+ * ISR (in service register) bit is set when injecting an interrupt.
+ * The highest vector is injected. Thus the latest bit set matches
+ * the highest bit in ISR.
+ */
+ apic->highest_isr_cache = vec;
+}
+
+static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
+{
+ if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+ --apic->isr_count;
+ BUG_ON(apic->isr_count < 0);
+ apic->highest_isr_cache = -1;
+}
+
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -270,9 +311,61 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
irq->level, irq->trig_mode);
}
+static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
+{
+
+ return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
+ sizeof(val));
+}
+
+static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
+{
+
+ return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
+ sizeof(*val));
+}
+
+static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
+}
+
+static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
+{
+ u8 val;
+ if (pv_eoi_get_user(vcpu, &val) < 0)
+ apic_debug("Can't read EOI MSR value: 0x%llx\n",
+ (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ return val & 0x1;
+}
+
+static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
+{
+ if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
+ apic_debug("Can't set EOI MSR value: 0x%llx\n",
+ (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ return;
+ }
+ __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+}
+
+static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
+{
+ if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
+ apic_debug("Can't clear EOI MSR value: 0x%llx\n",
+ (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ return;
+ }
+ __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+}
+
static inline int apic_find_highest_isr(struct kvm_lapic *apic)
{
int result;
+ if (!apic->isr_count)
+ return -1;
+ if (likely(apic->highest_isr_cache != -1))
+ return apic->highest_isr_cache;
result = find_highest_vector(apic->regs + APIC_ISR);
ASSERT(result == -1 || result >= 16);
@@ -482,17 +575,20 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
}
-static void apic_set_eoi(struct kvm_lapic *apic)
+static int apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
+
+ trace_kvm_eoi(apic, vector);
+
/*
* Not every write EOI will has corresponding ISR,
* one example is when Kernel check timer on setup_IO_APIC
*/
if (vector == -1)
- return;
+ return vector;
- apic_clear_vector(vector, apic->regs + APIC_ISR);
+ apic_clear_isr(vector, apic);
apic_update_ppr(apic);
if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
@@ -505,6 +601,7 @@ static void apic_set_eoi(struct kvm_lapic *apic)
kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
}
kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
+ return vector;
}
static void apic_send_ipi(struct kvm_lapic *apic)
@@ -1081,10 +1178,13 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
}
apic->irr_pending = false;
+ apic->isr_count = 0;
+ apic->highest_isr_cache = -1;
update_divide_count(apic);
atomic_set(&apic->lapic_timer.pending, 0);
if (kvm_vcpu_is_bsp(vcpu))
vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
+ vcpu->arch.pv_eoi.msr_val = 0;
apic_update_ppr(apic);
vcpu->arch.apic_arb_prio = 0;
@@ -1248,7 +1348,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
if (vector == -1)
return -1;
- apic_set_vector(vector, apic->regs + APIC_ISR);
+ apic_set_isr(vector, apic);
apic_update_ppr(apic);
apic_clear_irr(vector, apic);
return vector;
@@ -1267,6 +1367,8 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
update_divide_count(apic);
start_apic_timer(apic);
apic->irr_pending = true;
+ apic->isr_count = count_vectors(apic->regs + APIC_ISR);
+ apic->highest_isr_cache = -1;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
@@ -1283,11 +1385,51 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
+/*
+ * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
+ *
+ * Detect whether guest triggered PV EOI since the
+ * last entry. If yes, set EOI on guests's behalf.
+ * Clear PV EOI in guest memory in any case.
+ */
+static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
+ struct kvm_lapic *apic)
+{
+ bool pending;
+ int vector;
+ /*
+ * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
+ * and KVM_PV_EOI_ENABLED in guest memory as follows:
+ *
+ * KVM_APIC_PV_EOI_PENDING is unset:
+ * -> host disabled PV EOI.
+ * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
+ * -> host enabled PV EOI, guest did not execute EOI yet.
+ * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
+ * -> host enabled PV EOI, guest executed EOI.
+ */
+ BUG_ON(!pv_eoi_enabled(vcpu));
+ pending = pv_eoi_get_pending(vcpu);
+ /*
+ * Clear pending bit in any case: it will be set again on vmentry.
+ * While this might not be ideal from performance point of view,
+ * this makes sure pv eoi is only enabled when we know it's safe.
+ */
+ pv_eoi_clr_pending(vcpu);
+ if (pending)
+ return;
+ vector = apic_set_eoi(apic);
+ trace_kvm_pv_eoi(apic, vector);
+}
+
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
void *vapic;
+ if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
+ apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
+
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
@@ -1298,17 +1440,44 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
+/*
+ * apic_sync_pv_eoi_to_guest - called before vmentry
+ *
+ * Detect whether it's safe to enable PV EOI and
+ * if yes do so.
+ */
+static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
+ struct kvm_lapic *apic)
+{
+ if (!pv_eoi_enabled(vcpu) ||
+ /* IRR set or many bits in ISR: could be nested. */
+ apic->irr_pending ||
+ /* Cache not set: could be safe but we don't bother. */
+ apic->highest_isr_cache == -1 ||
+ /* Need EOI to update ioapic. */
+ kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
+ /*
+ * PV EOI was disabled by apic_sync_pv_eoi_from_guest
+ * so we need not do anything here.
+ */
+ return;
+ }
+
+ pv_eoi_set_pending(apic->vcpu);
+}
+
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
{
u32 data, tpr;
int max_irr, max_isr;
- struct kvm_lapic *apic;
+ struct kvm_lapic *apic = vcpu->arch.apic;
void *vapic;
+ apic_sync_pv_eoi_to_guest(vcpu, apic);
+
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- apic = vcpu->arch.apic;
tpr = apic_get_reg(apic, APIC_TASKPRI) & 0xff;
max_irr = apic_find_highest_irr(apic);
if (max_irr < 0)
@@ -1394,3 +1563,16 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
return 0;
}
+
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
+{
+ u64 addr = data & ~KVM_MSR_ENABLED;
+ if (!IS_ALIGNED(addr, 4))
+ return 1;
+
+ vcpu->arch.pv_eoi.msr_val = data;
+ if (!pv_eoi_enabled(vcpu))
+ return 0;
+ return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+ addr);
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 6f4ce2575d09..4af5405ae1e2 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -13,6 +13,15 @@ struct kvm_lapic {
u32 divide_count;
struct kvm_vcpu *vcpu;
bool irr_pending;
+ /* Number of bits set in ISR. */
+ s16 isr_count;
+ /* The highest vector set in ISR; if -1 - invalid, must scan ISR. */
+ int highest_isr_cache;
+ /**
+ * APIC register page. The layout matches the register layout seen by
+ * the guest 1:1, because it is accessed by the vmx microcode.
+ * Note: Only one register, the TPR, is used by the microcode.
+ */
void *regs;
gpa_t vapic_addr;
struct page *vapic_page;
@@ -60,4 +69,6 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
{
return vcpu->arch.hv_vapic & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE;
}
+
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
#endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index be3cea4407ff..01ca00423938 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -90,7 +90,7 @@ module_param(dbg, bool, 0644);
#define PTE_PREFETCH_NUM 8
-#define PT_FIRST_AVAIL_BITS_SHIFT 9
+#define PT_FIRST_AVAIL_BITS_SHIFT 10
#define PT64_SECOND_AVAIL_BITS_SHIFT 52
#define PT64_LEVEL_BITS 9
@@ -145,7 +145,8 @@ module_param(dbg, bool, 0644);
#define CREATE_TRACE_POINTS
#include "mmutrace.h"
-#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
+#define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
@@ -188,6 +189,7 @@ static u64 __read_mostly shadow_dirty_mask;
static u64 __read_mostly shadow_mmio_mask;
static void mmu_spte_set(u64 *sptep, u64 spte);
+static void mmu_free_roots(struct kvm_vcpu *vcpu);
void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
{
@@ -444,8 +446,22 @@ static bool __check_direct_spte_mmio_pf(u64 spte)
}
#endif
+static bool spte_is_locklessly_modifiable(u64 spte)
+{
+ return !(~spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE));
+}
+
static bool spte_has_volatile_bits(u64 spte)
{
+ /*
+ * Always atomicly update spte if it can be updated
+ * out of mmu-lock, it can ensure dirty bit is not lost,
+ * also, it can help us to get a stable is_writable_pte()
+ * to ensure tlb flush is not missed.
+ */
+ if (spte_is_locklessly_modifiable(spte))
+ return true;
+
if (!shadow_accessed_mask)
return false;
@@ -478,34 +494,47 @@ static void mmu_spte_set(u64 *sptep, u64 new_spte)
/* Rules for using mmu_spte_update:
* Update the state bits, it means the mapped pfn is not changged.
+ *
+ * Whenever we overwrite a writable spte with a read-only one we
+ * should flush remote TLBs. Otherwise rmap_write_protect
+ * will find a read-only spte, even though the writable spte
+ * might be cached on a CPU's TLB, the return value indicates this
+ * case.
*/
-static void mmu_spte_update(u64 *sptep, u64 new_spte)
+static bool mmu_spte_update(u64 *sptep, u64 new_spte)
{
- u64 mask, old_spte = *sptep;
+ u64 old_spte = *sptep;
+ bool ret = false;
WARN_ON(!is_rmap_spte(new_spte));
- if (!is_shadow_present_pte(old_spte))
- return mmu_spte_set(sptep, new_spte);
-
- new_spte |= old_spte & shadow_dirty_mask;
-
- mask = shadow_accessed_mask;
- if (is_writable_pte(old_spte))
- mask |= shadow_dirty_mask;
+ if (!is_shadow_present_pte(old_spte)) {
+ mmu_spte_set(sptep, new_spte);
+ return ret;
+ }
- if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
+ if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, new_spte);
else
old_spte = __update_clear_spte_slow(sptep, new_spte);
+ /*
+ * For the spte updated out of mmu-lock is safe, since
+ * we always atomicly update it, see the comments in
+ * spte_has_volatile_bits().
+ */
+ if (is_writable_pte(old_spte) && !is_writable_pte(new_spte))
+ ret = true;
+
if (!shadow_accessed_mask)
- return;
+ return ret;
if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
kvm_set_pfn_accessed(spte_to_pfn(old_spte));
if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
kvm_set_pfn_dirty(spte_to_pfn(old_spte));
+
+ return ret;
}
/*
@@ -652,8 +681,7 @@ static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
mmu_page_header_cache);
}
-static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
- size_t size)
+static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
{
void *p;
@@ -664,8 +692,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
{
- return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache,
- sizeof(struct pte_list_desc));
+ return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
}
static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
@@ -1051,35 +1078,82 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep);
}
-static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
+
+static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
+{
+ if (is_large_pte(*sptep)) {
+ WARN_ON(page_header(__pa(sptep))->role.level ==
+ PT_PAGE_TABLE_LEVEL);
+ drop_spte(kvm, sptep);
+ --kvm->stat.lpages;
+ return true;
+ }
+
+ return false;
+}
+
+static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
+{
+ if (__drop_large_spte(vcpu->kvm, sptep))
+ kvm_flush_remote_tlbs(vcpu->kvm);
+}
+
+/*
+ * Write-protect on the specified @sptep, @pt_protect indicates whether
+ * spte writ-protection is caused by protecting shadow page table.
+ * @flush indicates whether tlb need be flushed.
+ *
+ * Note: write protection is difference between drity logging and spte
+ * protection:
+ * - for dirty logging, the spte can be set to writable at anytime if
+ * its dirty bitmap is properly set.
+ * - for spte protection, the spte can be writable only after unsync-ing
+ * shadow page.
+ *
+ * Return true if the spte is dropped.
+ */
+static bool
+spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
+{
+ u64 spte = *sptep;
+
+ if (!is_writable_pte(spte) &&
+ !(pt_protect && spte_is_locklessly_modifiable(spte)))
+ return false;
+
+ rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
+
+ if (__drop_large_spte(kvm, sptep)) {
+ *flush |= true;
+ return true;
+ }
+
+ if (pt_protect)
+ spte &= ~SPTE_MMU_WRITEABLE;
+ spte = spte & ~PT_WRITABLE_MASK;
+
+ *flush |= mmu_spte_update(sptep, spte);
+ return false;
+}
+
+static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
+ int level, bool pt_protect)
{
u64 *sptep;
struct rmap_iterator iter;
- int write_protected = 0;
+ bool flush = false;
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
BUG_ON(!(*sptep & PT_PRESENT_MASK));
- rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
-
- if (!is_writable_pte(*sptep)) {
- sptep = rmap_get_next(&iter);
- continue;
- }
-
- if (level == PT_PAGE_TABLE_LEVEL) {
- mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
- sptep = rmap_get_next(&iter);
- } else {
- BUG_ON(!is_large_pte(*sptep));
- drop_spte(kvm, sptep);
- --kvm->stat.lpages;
+ if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
sptep = rmap_get_first(*rmapp, &iter);
+ continue;
}
- write_protected = 1;
+ sptep = rmap_get_next(&iter);
}
- return write_protected;
+ return flush;
}
/**
@@ -1100,26 +1174,26 @@ void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
while (mask) {
rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
- __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
+ __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL, false);
/* clear the first set bit */
mask &= mask - 1;
}
}
-static int rmap_write_protect(struct kvm *kvm, u64 gfn)
+static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
int i;
- int write_protected = 0;
+ bool write_protected = false;
slot = gfn_to_memslot(kvm, gfn);
for (i = PT_PAGE_TABLE_LEVEL;
i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
rmapp = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(kvm, rmapp, i);
+ write_protected |= __rmap_write_protect(kvm, rmapp, i, true);
}
return write_protected;
@@ -1238,11 +1312,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
u64 *sptep;
- struct rmap_iterator iter;
+ struct rmap_iterator uninitialized_var(iter);
int young = 0;
/*
- * Emulate the accessed bit for EPT, by checking if this page has
+ * In case of absence of EPT Access and Dirty Bits supports,
+ * emulate the accessed bit for EPT, by checking if this page has
* an EPT mapping, and clearing it if it does. On the next access,
* a new EPT mapping will be established.
* This has some overhead, but not as much as the cost of swapping
@@ -1253,11 +1328,12 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
for (sptep = rmap_get_first(*rmapp, &iter); sptep;
sptep = rmap_get_next(&iter)) {
- BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ BUG_ON(!is_shadow_present_pte(*sptep));
- if (*sptep & PT_ACCESSED_MASK) {
+ if (*sptep & shadow_accessed_mask) {
young = 1;
- clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
+ clear_bit((ffs(shadow_accessed_mask) - 1),
+ (unsigned long *)sptep);
}
}
@@ -1281,9 +1357,9 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
for (sptep = rmap_get_first(*rmapp, &iter); sptep;
sptep = rmap_get_next(&iter)) {
- BUG_ON(!(*sptep & PT_PRESENT_MASK));
+ BUG_ON(!is_shadow_present_pte(*sptep));
- if (*sptep & PT_ACCESSED_MASK) {
+ if (*sptep & shadow_accessed_mask) {
young = 1;
break;
}
@@ -1401,12 +1477,10 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
u64 *parent_pte, int direct)
{
struct kvm_mmu_page *sp;
- sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache,
- sizeof *sp);
- sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
+ sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
+ sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
if (!direct)
- sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
- PAGE_SIZE);
+ sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
bitmap_zero(sp->slot_bitmap, KVM_MEM_SLOTS_NUM);
@@ -1701,7 +1775,7 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
- int protected = 0;
+ bool protected = false;
for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
@@ -1866,15 +1940,6 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
mmu_spte_set(sptep, spte);
}
-static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
-{
- if (is_large_pte(*sptep)) {
- drop_spte(vcpu->kvm, sptep);
- --vcpu->kvm->stat.lpages;
- kvm_flush_remote_tlbs(vcpu->kvm);
- }
-}
-
static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
unsigned direct_access)
{
@@ -2243,7 +2308,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
gfn_t gfn, pfn_t pfn, bool speculative,
bool can_unsync, bool host_writable)
{
- u64 spte, entry = *sptep;
+ u64 spte;
int ret = 0;
if (set_mmio_spte(sptep, gfn, pfn, pte_access))
@@ -2257,8 +2322,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
spte |= shadow_x_mask;
else
spte |= shadow_nx_mask;
+
if (pte_access & ACC_USER_MASK)
spte |= shadow_user_mask;
+
if (level > PT_PAGE_TABLE_LEVEL)
spte |= PT_PAGE_SIZE_MASK;
if (tdp_enabled)
@@ -2283,7 +2350,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
goto done;
}
- spte |= PT_WRITABLE_MASK;
+ spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
if (!vcpu->arch.mmu.direct_map
&& !(pte_access & ACC_WRITE_MASK)) {
@@ -2312,8 +2379,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
__func__, gfn);
ret = 1;
pte_access &= ~ACC_WRITE_MASK;
- if (is_writable_pte(spte))
- spte &= ~PT_WRITABLE_MASK;
+ spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
}
}
@@ -2321,14 +2387,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
mark_page_dirty(vcpu->kvm, gfn);
set_pte:
- mmu_spte_update(sptep, spte);
- /*
- * If we overwrite a writable spte with a read-only one we
- * should flush remote TLBs. Otherwise rmap_write_protect
- * will find a read-only spte, even though the writable spte
- * might be cached on a CPU's TLB.
- */
- if (is_writable_pte(entry) && !is_writable_pte(*sptep))
+ if (mmu_spte_update(sptep, spte))
kvm_flush_remote_tlbs(vcpu->kvm);
done:
return ret;
@@ -2403,6 +2462,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
{
+ mmu_free_roots(vcpu);
}
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
@@ -2625,18 +2685,116 @@ exit:
return ret;
}
+static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
+{
+ /*
+ * #PF can be fast only if the shadow page table is present and it
+ * is caused by write-protect, that means we just need change the
+ * W bit of the spte which can be done out of mmu-lock.
+ */
+ if (!(error_code & PFERR_PRESENT_MASK) ||
+ !(error_code & PFERR_WRITE_MASK))
+ return false;
+
+ return true;
+}
+
+static bool
+fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 spte)
+{
+ struct kvm_mmu_page *sp = page_header(__pa(sptep));
+ gfn_t gfn;
+
+ WARN_ON(!sp->role.direct);
+
+ /*
+ * The gfn of direct spte is stable since it is calculated
+ * by sp->gfn.
+ */
+ gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
+
+ if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
+ mark_page_dirty(vcpu->kvm, gfn);
+
+ return true;
+}
+
+/*
+ * Return value:
+ * - true: let the vcpu to access on the same address again.
+ * - false: let the real page fault path to fix it.
+ */
+static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
+ u32 error_code)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ bool ret = false;
+ u64 spte = 0ull;
+
+ if (!page_fault_can_be_fast(vcpu, error_code))
+ return false;
+
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
+ if (!is_shadow_present_pte(spte) || iterator.level < level)
+ break;
+
+ /*
+ * If the mapping has been changed, let the vcpu fault on the
+ * same address again.
+ */
+ if (!is_rmap_spte(spte)) {
+ ret = true;
+ goto exit;
+ }
+
+ if (!is_last_spte(spte, level))
+ goto exit;
+
+ /*
+ * Check if it is a spurious fault caused by TLB lazily flushed.
+ *
+ * Need not check the access of upper level table entries since
+ * they are always ACC_ALL.
+ */
+ if (is_writable_pte(spte)) {
+ ret = true;
+ goto exit;
+ }
+
+ /*
+ * Currently, to simplify the code, only the spte write-protected
+ * by dirty-log can be fast fixed.
+ */
+ if (!spte_is_locklessly_modifiable(spte))
+ goto exit;
+
+ /*
+ * Currently, fast page fault only works for direct mapping since
+ * the gfn is not stable for indirect shadow page.
+ * See Documentation/virtual/kvm/locking.txt to get more detail.
+ */
+ ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte);
+exit:
+ trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
+ spte, ret);
+ walk_shadow_page_lockless_end(vcpu);
+
+ return ret;
+}
+
static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
gva_t gva, pfn_t *pfn, bool write, bool *writable);
-static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
- bool prefault)
+static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
+ gfn_t gfn, bool prefault)
{
int r;
int level;
int force_pt_level;
pfn_t pfn;
unsigned long mmu_seq;
- bool map_writable;
+ bool map_writable, write = error_code & PFERR_WRITE_MASK;
force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
if (likely(!force_pt_level)) {
@@ -2653,6 +2811,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn,
} else
level = PT_PAGE_TABLE_LEVEL;
+ if (fast_page_fault(vcpu, v, level, error_code))
+ return 0;
+
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -3041,7 +3202,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
gfn = gva >> PAGE_SHIFT;
return nonpaging_map(vcpu, gva & PAGE_MASK,
- error_code & PFERR_WRITE_MASK, gfn, prefault);
+ error_code, gfn, prefault);
}
static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
@@ -3121,6 +3282,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
} else
level = PT_PAGE_TABLE_LEVEL;
+ if (fast_page_fault(vcpu, gpa, level, error_code))
+ return 0;
+
mmu_seq = vcpu->kvm->mmu_notifier_seq;
smp_rmb();
@@ -3885,6 +4049,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
{
struct kvm_mmu_page *sp;
+ bool flush = false;
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
int i;
@@ -3899,16 +4064,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
!is_last_spte(pt[i], sp->role.level))
continue;
- if (is_large_pte(pt[i])) {
- drop_spte(kvm, &pt[i]);
- --kvm->stat.lpages;
- continue;
- }
-
- /* avoid RMW */
- if (is_writable_pte(pt[i]))
- mmu_spte_update(&pt[i],
- pt[i] & ~PT_WRITABLE_MASK);
+ spte_write_protect(kvm, &pt[i], &flush, false);
}
}
kvm_flush_remote_tlbs(kvm);
@@ -3934,6 +4090,9 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
{
struct kvm_mmu_page *page;
+ if (list_empty(&kvm->arch.active_mmu_pages))
+ return;
+
page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
@@ -3942,7 +4101,6 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
- struct kvm *kvm_freed = NULL;
int nr_to_scan = sc->nr_to_scan;
if (nr_to_scan == 0)
@@ -3954,22 +4112,30 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
int idx;
LIST_HEAD(invalid_list);
+ /*
+ * n_used_mmu_pages is accessed without holding kvm->mmu_lock
+ * here. We may skip a VM instance errorneosly, but we do not
+ * want to shrink a VM that only started to populate its MMU
+ * anyway.
+ */
+ if (kvm->arch.n_used_mmu_pages > 0) {
+ if (!nr_to_scan--)
+ break;
+ continue;
+ }
+
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- if (!kvm_freed && nr_to_scan > 0 &&
- kvm->arch.n_used_mmu_pages > 0) {
- kvm_mmu_remove_some_alloc_mmu_pages(kvm,
- &invalid_list);
- kvm_freed = kvm;
- }
- nr_to_scan--;
+ kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
+
+ list_move_tail(&kvm->vm_list, &vm_list);
+ break;
}
- if (kvm_freed)
- list_move_tail(&kvm_freed->vm_list, &vm_list);
raw_spin_unlock(&kvm_lock);
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 89fb0e81322a..cd6e98333ba3 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -54,8 +54,8 @@
*/
TRACE_EVENT(
kvm_mmu_pagetable_walk,
- TP_PROTO(u64 addr, int write_fault, int user_fault, int fetch_fault),
- TP_ARGS(addr, write_fault, user_fault, fetch_fault),
+ TP_PROTO(u64 addr, u32 pferr),
+ TP_ARGS(addr, pferr),
TP_STRUCT__entry(
__field(__u64, addr)
@@ -64,8 +64,7 @@ TRACE_EVENT(
TP_fast_assign(
__entry->addr = addr;
- __entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
- | (!!fetch_fault << 4);
+ __entry->pferr = pferr;
),
TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
@@ -243,6 +242,44 @@ TRACE_EVENT(
TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
__entry->access)
);
+
+#define __spte_satisfied(__spte) \
+ (__entry->retry && is_writable_pte(__entry->__spte))
+
+TRACE_EVENT(
+ fast_page_fault,
+ TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
+ u64 *sptep, u64 old_spte, bool retry),
+ TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry),
+
+ TP_STRUCT__entry(
+ __field(int, vcpu_id)
+ __field(gva_t, gva)
+ __field(u32, error_code)
+ __field(u64 *, sptep)
+ __field(u64, old_spte)
+ __field(u64, new_spte)
+ __field(bool, retry)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->gva = gva;
+ __entry->error_code = error_code;
+ __entry->sptep = sptep;
+ __entry->old_spte = old_spte;
+ __entry->new_spte = *sptep;
+ __entry->retry = retry;
+ ),
+
+ TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx"
+ " new %llx spurious %d fixed %d", __entry->vcpu_id,
+ __entry->gva, __print_flags(__entry->error_code, "|",
+ kvm_mmu_trace_pferr_flags), __entry->sptep,
+ __entry->old_spte, __entry->new_spte,
+ __spte_satisfied(old_spte), __spte_satisfied(new_spte)
+ )
+);
#endif /* _TRACE_KVMMMU_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 34f970937ef1..bb7cf01cae76 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -154,8 +154,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
const int fetch_fault = access & PFERR_FETCH_MASK;
u16 errcode = 0;
- trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
- fetch_fault);
+ trace_kvm_mmu_pagetable_walk(addr, access);
retry_walk:
eperm = false;
walker->level = mmu->root_level;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 2e88438ffd83..9b7ec1150ab0 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -80,10 +80,10 @@ static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
{
- if (idx < X86_PMC_IDX_FIXED)
+ if (idx < INTEL_PMC_IDX_FIXED)
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
else
- return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
+ return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
}
void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
@@ -291,7 +291,7 @@ static void reprogram_idx(struct kvm_pmu *pmu, int idx)
if (pmc_is_gp(pmc))
reprogram_gp_counter(pmc, pmc->eventsel);
else {
- int fidx = idx - X86_PMC_IDX_FIXED;
+ int fidx = idx - INTEL_PMC_IDX_FIXED;
reprogram_fixed_counter(pmc,
fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
}
@@ -452,7 +452,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
return;
pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
- X86_PMC_MAX_GENERIC);
+ INTEL_PMC_MAX_GENERIC);
pmu->counter_bitmask[KVM_PMC_GP] =
((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
bitmap_len = (entry->eax >> 24) & 0xff;
@@ -462,13 +462,13 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = 0;
} else {
pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
- X86_PMC_MAX_FIXED);
+ INTEL_PMC_MAX_FIXED);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
}
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
- (((1ull << pmu->nr_arch_fixed_counters) - 1) << X86_PMC_IDX_FIXED);
+ (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
}
@@ -478,15 +478,15 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = &vcpu->arch.pmu;
memset(pmu, 0, sizeof(*pmu));
- for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i;
}
- for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu;
- pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
+ pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
}
init_irq_work(&pmu->irq_work, trigger_pmi);
kvm_pmu_cpuid_update(vcpu);
@@ -498,13 +498,13 @@ void kvm_pmu_reset(struct kvm_vcpu *vcpu)
int i;
irq_work_sync(&pmu->irq_work);
- for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
+ for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
struct kvm_pmc *pmc = &pmu->gp_counters[i];
stop_counter(pmc);
pmc->counter = pmc->eventsel = 0;
}
- for (i = 0; i < X86_PMC_MAX_FIXED; i++)
+ for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
stop_counter(&pmu->fixed_counters[i]);
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f75af406b268..baead950d6c8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3185,8 +3185,8 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
break;
case MSR_IA32_DEBUGCTLMSR:
if (!boot_cpu_has(X86_FEATURE_LBRV)) {
- pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
- __func__, data);
+ vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
+ __func__, data);
break;
}
if (data & DEBUGCTL_RESERVED_BITS)
@@ -3205,7 +3205,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
- pr_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
+ vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
default:
return kvm_set_msr_common(vcpu, ecx, data);
@@ -4044,6 +4044,11 @@ static bool svm_rdtscp_supported(void)
return false;
}
+static bool svm_invpcid_supported(void)
+{
+ return false;
+}
+
static bool svm_has_wbinvd_exit(void)
{
return true;
@@ -4312,6 +4317,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.cpuid_update = svm_cpuid_update,
.rdtscp_supported = svm_rdtscp_supported,
+ .invpcid_supported = svm_invpcid_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 911d2641f14c..a71faf727ff3 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -517,6 +517,40 @@ TRACE_EVENT(kvm_apic_accept_irq,
__entry->coalesced ? " (coalesced)" : "")
);
+TRACE_EVENT(kvm_eoi,
+ TP_PROTO(struct kvm_lapic *apic, int vector),
+ TP_ARGS(apic, vector),
+
+ TP_STRUCT__entry(
+ __field( __u32, apicid )
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->apicid = apic->vcpu->vcpu_id;
+ __entry->vector = vector;
+ ),
+
+ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
+);
+
+TRACE_EVENT(kvm_pv_eoi,
+ TP_PROTO(struct kvm_lapic *apic, int vector),
+ TP_ARGS(apic, vector),
+
+ TP_STRUCT__entry(
+ __field( __u32, apicid )
+ __field( int, vector )
+ ),
+
+ TP_fast_assign(
+ __entry->apicid = apic->vcpu->vcpu_id;
+ __entry->vector = vector;
+ ),
+
+ TP_printk("apicid %x vector %d", __entry->apicid, __entry->vector)
+);
+
/*
* Tracepoint for nested VMRUN
*/
@@ -710,16 +744,6 @@ TRACE_EVENT(kvm_skinit,
__entry->rip, __entry->slb)
);
-#define __print_insn(insn, ilen) ({ \
- int i; \
- const char *ret = p->buffer + p->len; \
- \
- for (i = 0; i < ilen; ++i) \
- trace_seq_printf(p, " %02x", insn[i]); \
- trace_seq_printf(p, "%c", 0); \
- ret; \
- })
-
#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
#define KVM_EMUL_INSN_F_CS_D (1 << 2)
@@ -786,7 +810,7 @@ TRACE_EVENT(kvm_emulate_insn,
TP_printk("%x:%llx:%s (%s)%s",
__entry->csbase, __entry->rip,
- __print_insn(__entry->insn, __entry->len),
+ __print_hex(__entry->insn, __entry->len),
__print_symbolic(__entry->flags,
kvm_trace_symbol_emul_flags),
__entry->failed ? " failed" : ""
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 32eb58866292..c39b60707e02 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -71,7 +71,10 @@ static bool __read_mostly enable_unrestricted_guest = 1;
module_param_named(unrestricted_guest,
enable_unrestricted_guest, bool, S_IRUGO);
-static bool __read_mostly emulate_invalid_guest_state = 0;
+static bool __read_mostly enable_ept_ad_bits = 1;
+module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
+
+static bool __read_mostly emulate_invalid_guest_state = true;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly vmm_exclusive = 1;
@@ -615,6 +618,10 @@ static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
+static void vmx_set_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
+static void vmx_get_segment(struct kvm_vcpu *vcpu,
+ struct kvm_segment *var, int seg);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -789,6 +796,11 @@ static inline bool cpu_has_vmx_ept_4levels(void)
return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
}
+static inline bool cpu_has_vmx_ept_ad_bits(void)
+{
+ return vmx_capability.ept & VMX_EPT_AD_BIT;
+}
+
static inline bool cpu_has_vmx_invept_individual_addr(void)
{
return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
@@ -849,6 +861,12 @@ static inline bool cpu_has_vmx_rdtscp(void)
SECONDARY_EXEC_RDTSCP;
}
+static inline bool cpu_has_vmx_invpcid(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_ENABLE_INVPCID;
+}
+
static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -1739,6 +1757,11 @@ static bool vmx_rdtscp_supported(void)
return cpu_has_vmx_rdtscp();
}
+static bool vmx_invpcid_supported(void)
+{
+ return cpu_has_vmx_invpcid() && enable_ept;
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
@@ -2458,7 +2481,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
- SECONDARY_EXEC_RDTSCP;
+ SECONDARY_EXEC_RDTSCP |
+ SECONDARY_EXEC_ENABLE_INVPCID;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -2645,8 +2669,12 @@ static __init int hardware_setup(void)
!cpu_has_vmx_ept_4levels()) {
enable_ept = 0;
enable_unrestricted_guest = 0;
+ enable_ept_ad_bits = 0;
}
+ if (!cpu_has_vmx_ept_ad_bits())
+ enable_ept_ad_bits = 0;
+
if (!cpu_has_vmx_unrestricted_guest())
enable_unrestricted_guest = 0;
@@ -2770,6 +2798,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct kvm_segment var;
if (enable_unrestricted_guest)
return;
@@ -2813,20 +2842,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
if (emulate_invalid_guest_state)
goto continue_rmode;
- vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
- vmcs_write32(GUEST_SS_LIMIT, 0xffff);
- vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
+ vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
+
+ vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
+
+ vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
+
+ vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
- vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
- vmcs_write32(GUEST_CS_LIMIT, 0xffff);
- if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
- vmcs_writel(GUEST_CS_BASE, 0xf0000);
- vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
+ vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
- fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
- fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
- fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
- fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
+ vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
+ vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
continue_rmode:
kvm_mmu_reset_context(vcpu);
@@ -3027,6 +3059,8 @@ static u64 construct_eptp(unsigned long root_hpa)
/* TODO write the value reading from MSR */
eptp = VMX_EPT_DEFAULT_MT |
VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
+ if (enable_ept_ad_bits)
+ eptp |= VMX_EPT_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK);
return eptp;
@@ -3153,11 +3187,22 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
+ * fail; use the cache instead.
+ */
+ if (unlikely(vmx->emulation_required && emulate_invalid_guest_state)) {
+ return vmx->cpl;
+ }
+
if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
__set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
- to_vmx(vcpu)->cpl = __vmx_get_cpl(vcpu);
+ vmx->cpl = __vmx_get_cpl(vcpu);
}
- return to_vmx(vcpu)->cpl;
+
+ return vmx->cpl;
}
@@ -3165,7 +3210,7 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
- if (var->unusable)
+ if (var->unusable || !var->present)
ar = 1 << 16;
else {
ar = var->type & 15;
@@ -3177,8 +3222,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var)
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
}
- if (ar == 0) /* a 0 value means unusable */
- ar = AR_UNUSABLE_MASK;
return ar;
}
@@ -3229,6 +3272,44 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
vmcs_write32(sf->ar_bytes, ar);
__clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * Fix segments for real mode guest in hosts that don't have
+ * "unrestricted_mode" or it was disabled.
+ * This is done to allow migration of the guests from hosts with
+ * unrestricted guest like Westmere to older host that don't have
+ * unrestricted guest like Nehelem.
+ */
+ if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
+ switch (seg) {
+ case VCPU_SREG_CS:
+ vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
+ vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+ if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
+ vmcs_writel(GUEST_CS_BASE, 0xf0000);
+ vmcs_write16(GUEST_CS_SELECTOR,
+ vmcs_readl(GUEST_CS_BASE) >> 4);
+ break;
+ case VCPU_SREG_ES:
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
+ break;
+ case VCPU_SREG_DS:
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
+ break;
+ case VCPU_SREG_GS:
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
+ break;
+ case VCPU_SREG_FS:
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
+ break;
+ case VCPU_SREG_SS:
+ vmcs_write16(GUEST_SS_SELECTOR,
+ vmcs_readl(GUEST_SS_BASE) >> 4);
+ vmcs_write32(GUEST_SS_LIMIT, 0xffff);
+ vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
+ break;
+ }
+ }
}
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -3731,6 +3812,8 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
enable_unrestricted_guest = 0;
+ /* Enable INVPCID for non-ept guests may cause performance regression. */
+ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
}
if (!enable_unrestricted_guest)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
@@ -4489,7 +4572,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
break;
}
vcpu->run->exit_reason = 0;
- pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
+ vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr);
return 0;
}
@@ -4769,6 +4852,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
gpa_t gpa;
+ u32 error_code;
int gla_validity;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -4793,7 +4877,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
- return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
+
+ /* It is a write fault? */
+ error_code = exit_qualification & (1U << 1);
+ /* ept page table is present? */
+ error_code |= (exit_qualification >> 3) & 0x1;
+
+ return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
static u64 ept_rsvd_mask(u64 spte, int level)
@@ -4908,15 +4998,18 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
int ret = 1;
u32 cpu_exec_ctrl;
bool intr_window_requested;
+ unsigned count = 130;
cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
- while (!guest_state_valid(vcpu)) {
- if (intr_window_requested
- && (kvm_get_rflags(&vmx->vcpu) & X86_EFLAGS_IF))
+ while (!guest_state_valid(vcpu) && count-- != 0) {
+ if (intr_window_requested && vmx_interrupt_allowed(vcpu))
return handle_interrupt_window(&vmx->vcpu);
+ if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
+ return 1;
+
err = emulate_instruction(vcpu, 0);
if (err == EMULATE_DO_MMIO) {
@@ -4924,8 +5017,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
goto out;
}
- if (err != EMULATE_DONE)
+ if (err != EMULATE_DONE) {
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
return 0;
+ }
if (signal_pending(current))
goto out;
@@ -4933,7 +5030,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
schedule();
}
- vmx->emulation_required = 0;
+ vmx->emulation_required = !guest_state_valid(vcpu);
out:
return ret;
}
@@ -6467,6 +6564,23 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
}
}
}
+
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ /* Exposing INVPCID only when PCID is exposed */
+ best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+ if (vmx_invpcid_supported() &&
+ best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+ guest_cpuid_has_pcid(vcpu)) {
+ exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ } else {
+ exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ if (best)
+ best->ecx &= ~bit(X86_FEATURE_INVPCID);
+ }
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
@@ -7201,6 +7315,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.cpuid_update = vmx_cpuid_update,
.rdtscp_supported = vmx_rdtscp_supported,
+ .invpcid_supported = vmx_invpcid_supported,
.set_supported_cpuid = vmx_set_supported_cpuid,
@@ -7230,23 +7345,21 @@ static int __init vmx_init(void)
if (!vmx_io_bitmap_a)
return -ENOMEM;
+ r = -ENOMEM;
+
vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_io_bitmap_b) {
- r = -ENOMEM;
+ if (!vmx_io_bitmap_b)
goto out;
- }
vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy) {
- r = -ENOMEM;
+ if (!vmx_msr_bitmap_legacy)
goto out1;
- }
+
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode) {
- r = -ENOMEM;
+ if (!vmx_msr_bitmap_longmode)
goto out2;
- }
+
/*
* Allow direct access to the PC debug port (it is often used for I/O
@@ -7275,8 +7388,10 @@ static int __init vmx_init(void)
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
- VMX_EPT_EXECUTABLE_MASK);
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index be6d54929fa7..59b59508ff07 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -528,6 +528,9 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
return 1;
}
+ if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
+ return 1;
+
kvm_x86_ops->set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
@@ -604,10 +607,20 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_read_cr3(vcpu)))
return 1;
+ if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
+ if (!guest_cpuid_has_pcid(vcpu))
+ return 1;
+
+ /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
+ if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
+ return 1;
+ }
+
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
- if ((cr4 ^ old_cr4) & pdptr_bits)
+ if (((cr4 ^ old_cr4) & pdptr_bits) ||
+ (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
@@ -626,8 +639,12 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
}
if (is_long_mode(vcpu)) {
- if (cr3 & CR3_L_MODE_RESERVED_BITS)
- return 1;
+ if (kvm_read_cr4(vcpu) & X86_CR4_PCIDE) {
+ if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
+ return 1;
+ } else
+ if (cr3 & CR3_L_MODE_RESERVED_BITS)
+ return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
@@ -795,6 +812,7 @@ static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
+ MSR_KVM_PV_EOI_EN,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
@@ -1437,8 +1455,8 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
break;
}
default:
- pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
- "data 0x%llx\n", msr, data);
+ vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+ "data 0x%llx\n", msr, data);
return 1;
}
return 0;
@@ -1470,8 +1488,8 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
default:
- pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
- "data 0x%llx\n", msr, data);
+ vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+ "data 0x%llx\n", msr, data);
return 1;
}
@@ -1551,15 +1569,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
- pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
- data);
+ vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
+ data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
- pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
- "0x%llx\n", data);
+ vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
+ "0x%llx\n", data);
return 1;
}
break;
@@ -1574,8 +1592,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
thus reserved and should throw a #GP */
return 1;
}
- pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
- __func__, data);
+ vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+ __func__, data);
break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
@@ -1653,6 +1671,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
+ case MSR_KVM_PV_EOI_EN:
+ if (kvm_lapic_enable_pv_eoi(vcpu, data))
+ return 1;
+ break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
@@ -1671,8 +1693,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
- pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
- "0x%x data 0x%llx\n", msr, data);
+ vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
+ "0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
@@ -1681,8 +1703,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
- pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
- "0x%x data 0x%llx\n", msr, data);
+ vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
+ "0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
@@ -1693,8 +1715,8 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
- pr_unimpl(vcpu, "disabled perfctr wrmsr: "
- "0x%x data 0x%llx\n", msr, data);
+ vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
+ "0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
@@ -1720,7 +1742,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
- pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
@@ -1738,12 +1760,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
- pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
- msr, data);
+ vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
+ msr, data);
return 1;
} else {
- pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
- msr, data);
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
+ msr, data);
break;
}
}
@@ -1846,7 +1868,7 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = kvm->arch.hv_hypercall;
break;
default:
- pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
@@ -1877,7 +1899,7 @@ static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.hv_vapic;
break;
default:
- pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+ vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
@@ -2030,10 +2052,10 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) {
- pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
+ vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
} else {
- pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
+ vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
data = 0;
}
break;
@@ -4116,7 +4138,7 @@ static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
value = kvm_get_cr8(vcpu);
break;
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ kvm_err("%s: unexpected cr %u\n", __func__, cr);
return 0;
}
@@ -4145,7 +4167,7 @@ static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
res = kvm_set_cr8(vcpu, val);
break;
default:
- vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
+ kvm_err("%s: unexpected cr %u\n", __func__, cr);
res = -1;
}
@@ -4297,26 +4319,10 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
}
-static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
+static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
- struct kvm_cpuid_entry2 *cpuid = NULL;
-
- if (eax && ecx)
- cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
- *eax, *ecx);
-
- if (cpuid) {
- *eax = cpuid->eax;
- *ecx = cpuid->ecx;
- if (ebx)
- *ebx = cpuid->ebx;
- if (edx)
- *edx = cpuid->edx;
- return true;
- }
-
- return false;
+ kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
}
static struct x86_emulate_ops emulate_ops = {
@@ -5296,8 +5302,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = kvm_mmu_reload(vcpu);
if (unlikely(r)) {
- kvm_x86_ops->cancel_injection(vcpu);
- goto out;
+ goto cancel_injection;
}
preempt_disable();
@@ -5322,9 +5327,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_wmb();
local_irq_enable();
preempt_enable();
- kvm_x86_ops->cancel_injection(vcpu);
r = 1;
- goto out;
+ goto cancel_injection;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -5388,9 +5392,16 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
- kvm_lapic_sync_from_vapic(vcpu);
+ if (vcpu->arch.apic_attention)
+ kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
+ return r;
+
+cancel_injection:
+ kvm_x86_ops->cancel_injection(vcpu);
+ if (unlikely(vcpu->arch.apic_attention))
+ kvm_lapic_sync_from_vapic(vcpu);
out:
return r;
}
@@ -6304,7 +6315,7 @@ void kvm_arch_free_memslot(struct kvm_memory_slot *free,
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
- vfree(free->arch.lpage_info[i]);
+ kvm_kvfree(free->arch.lpage_info[i]);
free->arch.lpage_info[i] = NULL;
}
}
@@ -6323,7 +6334,7 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
slot->base_gfn, level) + 1;
slot->arch.lpage_info[i] =
- vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
+ kvm_kvzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
if (!slot->arch.lpage_info[i])
goto out_free;
@@ -6350,7 +6361,7 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
- vfree(slot->arch.lpage_info[i]);
+ kvm_kvfree(slot->arch.lpage_info[i]);
slot->arch.lpage_info[i] = NULL;
}
return -ENOMEM;
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 459b58a8a15c..25b7ae8d058a 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -115,7 +115,7 @@ EXPORT_SYMBOL(csum_partial_copy_to_user);
* @src: source address
* @dst: destination address
* @len: number of bytes to be copied.
- * @isum: initial sum that is added into the result (32bit unfolded)
+ * @sum: initial sum that is added into the result (32bit unfolded)
*
* Returns an 32bit unfolded checksum of the buffer.
*/
diff --git a/arch/x86/lib/msr-reg-export.c b/arch/x86/lib/msr-reg-export.c
index a311cc59b65d..8d6ef78b5d01 100644
--- a/arch/x86/lib/msr-reg-export.c
+++ b/arch/x86/lib/msr-reg-export.c
@@ -1,5 +1,5 @@
#include <linux/module.h>
#include <asm/msr.h>
-EXPORT_SYMBOL(native_rdmsr_safe_regs);
-EXPORT_SYMBOL(native_wrmsr_safe_regs);
+EXPORT_SYMBOL(rdmsr_safe_regs);
+EXPORT_SYMBOL(wrmsr_safe_regs);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index 69fa10623f21..f6d13eefad10 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -6,13 +6,13 @@
#ifdef CONFIG_X86_64
/*
- * int native_{rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
+ * int {rdmsr,wrmsr}_safe_regs(u32 gprs[8]);
*
* reg layout: u32 gprs[eax, ecx, edx, ebx, esp, ebp, esi, edi]
*
*/
.macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushq_cfi %rbx
pushq_cfi %rbp
@@ -45,13 +45,13 @@ ENTRY(native_\op\()_safe_regs)
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
.endm
#else /* X86_32 */
.macro op_safe_regs op
-ENTRY(native_\op\()_safe_regs)
+ENTRY(\op\()_safe_regs)
CFI_STARTPROC
pushl_cfi %ebx
pushl_cfi %ebp
@@ -92,7 +92,7 @@ ENTRY(native_\op\()_safe_regs)
_ASM_EXTABLE(1b, 3b)
CFI_ENDPROC
-ENDPROC(native_\op\()_safe_regs)
+ENDPROC(\op\()_safe_regs)
.endm
#endif
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index bc4e9d84157f..e0e6990723e9 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -385,7 +385,7 @@ void free_initmem(void)
}
#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
/*
* end could be not aligned, and We can not align that,
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0597f95b6da6..33643a8bcbbb 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -309,6 +309,10 @@ void bpf_jit_compile(struct sk_filter *fp)
else
EMIT1_off32(0x0d, K); /* or imm32,%eax */
break;
+ case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
+ seen |= SEEN_XREG;
+ EMIT2(0x31, 0xd8); /* xor %ebx,%eax */
+ break;
case BPF_S_ALU_LSH_X: /* A <<= X; */
seen |= SEEN_XREG;
EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 303f08637826..b2b94438ff05 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -312,7 +312,7 @@ static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
goto fail;
}
/* both registers must be reserved */
- if (num_counters == AMD64_NUM_COUNTERS_F15H) {
+ if (num_counters == AMD64_NUM_COUNTERS_CORE) {
msrs->counters[i].addr = MSR_F15H_PERF_CTR + (i << 1);
msrs->controls[i].addr = MSR_F15H_PERF_CTL + (i << 1);
} else {
@@ -514,7 +514,7 @@ static int op_amd_init(struct oprofile_operations *ops)
ops->create_files = setup_ibs_files;
if (boot_cpu_data.x86 == 0x15) {
- num_counters = AMD64_NUM_COUNTERS_F15H;
+ num_counters = AMD64_NUM_COUNTERS_CORE;
} else {
num_counters = AMD64_NUM_COUNTERS;
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index fc09c2754e08..505acdd6d600 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -12,8 +12,13 @@ struct pci_root_info {
char name[16];
unsigned int res_num;
struct resource *res;
- int busnum;
struct pci_sysdata sd;
+#ifdef CONFIG_PCI_MMCONFIG
+ bool mcfg_added;
+ u16 segment;
+ u8 start_bus;
+ u8 end_bus;
+#endif
};
static bool pci_use_crs = true;
@@ -120,6 +125,81 @@ void __init pci_acpi_crs_quirks(void)
pci_use_crs ? "nocrs" : "use_crs");
}
+#ifdef CONFIG_PCI_MMCONFIG
+static int __devinit check_segment(u16 seg, struct device *dev, char *estr)
+{
+ if (seg) {
+ dev_err(dev,
+ "%s can't access PCI configuration "
+ "space under this host bridge.\n",
+ estr);
+ return -EIO;
+ }
+
+ /*
+ * Failure in adding MMCFG information is not fatal,
+ * just can't access extended configuration space of
+ * devices under this host bridge.
+ */
+ dev_warn(dev,
+ "%s can't access extended PCI configuration "
+ "space under this bridge.\n",
+ estr);
+
+ return 0;
+}
+
+static int __devinit setup_mcfg_map(struct pci_root_info *info,
+ u16 seg, u8 start, u8 end,
+ phys_addr_t addr)
+{
+ int result;
+ struct device *dev = &info->bridge->dev;
+
+ info->start_bus = start;
+ info->end_bus = end;
+ info->mcfg_added = false;
+
+ /* return success if MMCFG is not in use */
+ if (raw_pci_ext_ops && raw_pci_ext_ops != &pci_mmcfg)
+ return 0;
+
+ if (!(pci_probe & PCI_PROBE_MMCONF))
+ return check_segment(seg, dev, "MMCONFIG is disabled,");
+
+ result = pci_mmconfig_insert(dev, seg, start, end, addr);
+ if (result == 0) {
+ /* enable MMCFG if it hasn't been enabled yet */
+ if (raw_pci_ext_ops == NULL)
+ raw_pci_ext_ops = &pci_mmcfg;
+ info->mcfg_added = true;
+ } else if (result != -EEXIST)
+ return check_segment(seg, dev,
+ "fail to add MMCONFIG information,");
+
+ return 0;
+}
+
+static void teardown_mcfg_map(struct pci_root_info *info)
+{
+ if (info->mcfg_added) {
+ pci_mmconfig_delete(info->segment, info->start_bus,
+ info->end_bus);
+ info->mcfg_added = false;
+ }
+}
+#else
+static int __devinit setup_mcfg_map(struct pci_root_info *info,
+ u16 seg, u8 start, u8 end,
+ phys_addr_t addr)
+{
+ return 0;
+}
+static void teardown_mcfg_map(struct pci_root_info *info)
+{
+}
+#endif
+
static acpi_status
resource_to_addr(struct acpi_resource *resource,
struct acpi_resource_address64 *addr)
@@ -234,13 +314,6 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
}
info->res_num++;
- if (addr.translation_offset)
- dev_info(&info->bridge->dev, "host bridge window %pR "
- "(PCI address [%#llx-%#llx])\n",
- res, res->start - addr.translation_offset,
- res->end - addr.translation_offset);
- else
- dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
return AE_OK;
}
@@ -332,8 +405,11 @@ static void __release_pci_root_info(struct pci_root_info *info)
free_pci_root_info_res(info);
+ teardown_mcfg_map(info);
+
kfree(info);
}
+
static void release_pci_root_info(struct pci_host_bridge *bridge)
{
struct pci_root_info *info = bridge->release_data;
@@ -347,7 +423,9 @@ probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
{
size_t size;
+ sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
info->bridge = device;
+
info->res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
info);
@@ -360,8 +438,6 @@ probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
if (!info->res)
return;
- sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum);
-
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
info);
}
@@ -373,7 +449,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
int domain = root->segment;
int busnum = root->secondary.start;
LIST_HEAD(resources);
- struct pci_bus *bus;
+ struct pci_bus *bus = NULL;
struct pci_sysdata *sd;
int node;
#ifdef CONFIG_ACPI_NUMA
@@ -426,6 +502,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
} else {
probe_pci_root_info(info, device, busnum, domain);
+ /* insert busn res at first */
+ pci_add_resource(&resources, &root->secondary);
/*
* _CRS with no apertures is normal, so only fall back to
* defaults or native bridge info if we're ignoring _CRS.
@@ -437,10 +515,13 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
x86_pci_root_bus_resources(busnum, &resources);
}
- bus = pci_create_root_bus(NULL, busnum, &pci_root_ops, sd,
- &resources);
+ if (!setup_mcfg_map(info, domain, (u8)root->secondary.start,
+ (u8)root->secondary.end, root->mcfg_addr))
+ bus = pci_create_root_bus(NULL, busnum, &pci_root_ops,
+ sd, &resources);
+
if (bus) {
- bus->subordinate = pci_scan_child_bus(bus);
+ pci_scan_child_bus(bus);
pci_set_host_bridge_release(
to_pci_host_bridge(bus->bridge),
release_pci_root_info, info);
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 5aed49bff058..e9e6ed5cdf94 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -121,7 +121,6 @@ static int __init early_fill_mp_bus_info(void)
link = (reg >> 8) & 0x03;
info = alloc_pci_root_info(min_bus, max_bus, node, link);
- sprintf(info->name, "PCI Bus #%02x", min_bus);
}
/* get the default node and link for left over res */
@@ -300,9 +299,9 @@ static int __init early_fill_mp_bus_info(void)
int busnum;
struct pci_root_res *root_res;
- busnum = info->bus_min;
- printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
- info->bus_min, info->bus_max, info->node, info->link);
+ busnum = info->busn.start;
+ printk(KERN_DEBUG "bus: %pR on node %x link %x\n",
+ &info->busn, info->node, info->link);
list_for_each_entry(root_res, &info->resources, list)
printk(KERN_DEBUG "bus: %02x %pR\n",
busnum, &root_res->res);
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 306579f7d0fd..d37e2fec97e5 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -14,7 +14,7 @@ static struct pci_root_info *x86_find_pci_root_info(int bus)
return NULL;
list_for_each_entry(info, &pci_root_infos, list)
- if (info->bus_min == bus)
+ if (info->busn.start == bus)
return info;
return NULL;
@@ -24,6 +24,8 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
{
struct pci_root_info *info = x86_find_pci_root_info(bus);
struct pci_root_res *root_res;
+ struct pci_host_bridge_window *window;
+ bool found = false;
if (!info)
goto default_resources;
@@ -31,6 +33,16 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
printk(KERN_DEBUG "PCI: root bus %02x: hardware-probed resources\n",
bus);
+ /* already added by acpi ? */
+ list_for_each_entry(window, resources, list)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ pci_add_resource(resources, &info->busn);
+
list_for_each_entry(root_res, &info->resources, list) {
struct resource *res;
struct resource *root;
@@ -66,9 +78,13 @@ struct pci_root_info __init *alloc_pci_root_info(int bus_min, int bus_max,
if (!info)
return info;
+ sprintf(info->name, "PCI Bus #%02x", bus_min);
+
INIT_LIST_HEAD(&info->resources);
- info->bus_min = bus_min;
- info->bus_max = bus_max;
+ info->busn.name = info->name;
+ info->busn.start = bus_min;
+ info->busn.end = bus_max;
+ info->busn.flags = IORESOURCE_BUS;
info->node = node;
info->link = link;
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
index 226a466b2b2b..ff8f65b04574 100644
--- a/arch/x86/pci/bus_numa.h
+++ b/arch/x86/pci/bus_numa.h
@@ -13,8 +13,7 @@ struct pci_root_info {
struct list_head list;
char name[12];
struct list_head resources;
- int bus_min;
- int bus_max;
+ struct resource busn;
int node;
int link;
};
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 0ad990a20d4a..720e973fc34a 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -494,7 +494,7 @@ int __init pcibios_init(void)
return 0;
}
-char * __devinit pcibios_setup(char *str)
+char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 301e325992f6..937bcece7006 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -17,6 +17,8 @@
#include <linux/bitmap.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
#include <asm/acpi.h>
@@ -24,7 +26,9 @@
#define PREFIX "PCI: "
/* Indicate if the mmcfg resources have been placed into the resource table. */
-static int __initdata pci_mmcfg_resources_inserted;
+static bool pci_mmcfg_running_state;
+static bool pci_mmcfg_arch_init_failed;
+static DEFINE_MUTEX(pci_mmcfg_lock);
LIST_HEAD(pci_mmcfg_list);
@@ -45,24 +49,25 @@ static __init void free_all_mmcfg(void)
pci_mmconfig_remove(cfg);
}
-static __init void list_add_sorted(struct pci_mmcfg_region *new)
+static __devinit void list_add_sorted(struct pci_mmcfg_region *new)
{
struct pci_mmcfg_region *cfg;
/* keep list sorted by segment and starting bus number */
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list) {
if (cfg->segment > new->segment ||
(cfg->segment == new->segment &&
cfg->start_bus >= new->start_bus)) {
- list_add_tail(&new->list, &cfg->list);
+ list_add_tail_rcu(&new->list, &cfg->list);
return;
}
}
- list_add_tail(&new->list, &pci_mmcfg_list);
+ list_add_tail_rcu(&new->list, &pci_mmcfg_list);
}
-static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
- int end, u64 addr)
+static __devinit struct pci_mmcfg_region *pci_mmconfig_alloc(int segment,
+ int start,
+ int end, u64 addr)
{
struct pci_mmcfg_region *new;
struct resource *res;
@@ -79,8 +84,6 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
new->start_bus = start;
new->end_bus = end;
- list_add_sorted(new);
-
res = &new->res;
res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
res->end = addr + PCI_MMCFG_BUS_OFFSET(end + 1) - 1;
@@ -89,9 +92,25 @@ static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
"PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
res->name = new->name;
- printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
- "%pR (base %#lx)\n", segment, start, end, &new->res,
- (unsigned long) addr);
+ return new;
+}
+
+static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+ int end, u64 addr)
+{
+ struct pci_mmcfg_region *new;
+
+ new = pci_mmconfig_alloc(segment, start, end, addr);
+ if (new) {
+ mutex_lock(&pci_mmcfg_lock);
+ list_add_sorted(new);
+ mutex_unlock(&pci_mmcfg_lock);
+
+ pr_info(PREFIX
+ "MMCONFIG for domain %04x [bus %02x-%02x] at %pR "
+ "(base %#lx)\n",
+ segment, start, end, &new->res, (unsigned long)addr);
+ }
return new;
}
@@ -100,7 +119,7 @@ struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
{
struct pci_mmcfg_region *cfg;
- list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
if (cfg->segment == segment &&
cfg->start_bus <= bus && bus <= cfg->end_bus)
return cfg;
@@ -343,8 +362,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
name = pci_mmcfg_probes[i].probe();
if (name)
- printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
- name);
+ pr_info(PREFIX "%s with MMCONFIG support\n", name);
}
/* some end_bus_number is crazy, fix it */
@@ -353,19 +371,8 @@ static int __init pci_mmcfg_check_hostbridge(void)
return !list_empty(&pci_mmcfg_list);
}
-static void __init pci_mmcfg_insert_resources(void)
-{
- struct pci_mmcfg_region *cfg;
-
- list_for_each_entry(cfg, &pci_mmcfg_list, list)
- insert_resource(&iomem_resource, &cfg->res);
-
- /* Mark that the resources have been inserted. */
- pci_mmcfg_resources_inserted = 1;
-}
-
-static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
- void *data)
+static acpi_status __devinit check_mcfg_resource(struct acpi_resource *res,
+ void *data)
{
struct resource *mcfg_res = data;
struct acpi_resource_address64 address;
@@ -401,8 +408,8 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
return AE_OK;
}
-static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl,
- void *context, void **rv)
+static acpi_status __devinit find_mboard_resource(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
{
struct resource *mcfg_res = context;
@@ -415,7 +422,7 @@ static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl,
return AE_OK;
}
-static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
+static int __devinit is_acpi_reserved(u64 start, u64 end, unsigned not_used)
{
struct resource mcfg_res;
@@ -434,13 +441,15 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
-static int __init is_mmconf_reserved(check_reserved_t is_reserved,
- struct pci_mmcfg_region *cfg, int with_e820)
+static int __ref is_mmconf_reserved(check_reserved_t is_reserved,
+ struct pci_mmcfg_region *cfg,
+ struct device *dev, int with_e820)
{
u64 addr = cfg->res.start;
u64 size = resource_size(&cfg->res);
u64 old_size = size;
- int valid = 0, num_buses;
+ int num_buses;
+ char *method = with_e820 ? "E820" : "ACPI motherboard resources";
while (!is_reserved(addr, addr + size, E820_RESERVED)) {
size >>= 1;
@@ -448,30 +457,76 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
break;
}
- if (size >= (16UL<<20) || size == old_size) {
- printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
- &cfg->res,
- with_e820 ? "E820" : "ACPI motherboard resources");
- valid = 1;
-
- if (old_size != size) {
- /* update end_bus */
- cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
- num_buses = cfg->end_bus - cfg->start_bus + 1;
- cfg->res.end = cfg->res.start +
- PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
- snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
- "PCI MMCONFIG %04x [bus %02x-%02x]",
- cfg->segment, cfg->start_bus, cfg->end_bus);
- printk(KERN_INFO PREFIX
- "MMCONFIG for %04x [bus%02x-%02x] "
- "at %pR (base %#lx) (size reduced!)\n",
- cfg->segment, cfg->start_bus, cfg->end_bus,
- &cfg->res, (unsigned long) cfg->address);
- }
+ if (size < (16UL<<20) && size != old_size)
+ return 0;
+
+ if (dev)
+ dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
+ &cfg->res, method);
+ else
+ pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
+ &cfg->res, method);
+
+ if (old_size != size) {
+ /* update end_bus */
+ cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
+ num_buses = cfg->end_bus - cfg->start_bus + 1;
+ cfg->res.end = cfg->res.start +
+ PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
+ snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
+ "PCI MMCONFIG %04x [bus %02x-%02x]",
+ cfg->segment, cfg->start_bus, cfg->end_bus);
+
+ if (dev)
+ dev_info(dev,
+ "MMCONFIG "
+ "at %pR (base %#lx) (size reduced!)\n",
+ &cfg->res, (unsigned long) cfg->address);
+ else
+ pr_info(PREFIX
+ "MMCONFIG for %04x [bus%02x-%02x] "
+ "at %pR (base %#lx) (size reduced!)\n",
+ cfg->segment, cfg->start_bus, cfg->end_bus,
+ &cfg->res, (unsigned long) cfg->address);
}
- return valid;
+ return 1;
+}
+
+static int __ref pci_mmcfg_check_reserved(struct device *dev,
+ struct pci_mmcfg_region *cfg, int early)
+{
+ if (!early && !acpi_disabled) {
+ if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
+ return 1;
+
+ if (dev)
+ dev_info(dev, FW_INFO
+ "MMCONFIG at %pR not reserved in "
+ "ACPI motherboard resources\n",
+ &cfg->res);
+ else
+ pr_info(FW_INFO PREFIX
+ "MMCONFIG at %pR not reserved in "
+ "ACPI motherboard resources\n",
+ &cfg->res);
+ }
+
+ /*
+ * e820_all_mapped() is marked as __init.
+ * All entries from ACPI MCFG table have been checked at boot time.
+ * For MCFG information constructed from hotpluggable host bridge's
+ * _CBA method, just assume it's reserved.
+ */
+ if (pci_mmcfg_running_state)
+ return 1;
+
+ /* Don't try to do this check unless configuration
+ type 1 is available. how about type 2 ?*/
+ if (raw_pci_ops)
+ return is_mmconf_reserved(e820_all_mapped, cfg, dev, 1);
+
+ return 0;
}
static void __init pci_mmcfg_reject_broken(int early)
@@ -479,38 +534,14 @@ static void __init pci_mmcfg_reject_broken(int early)
struct pci_mmcfg_region *cfg;
list_for_each_entry(cfg, &pci_mmcfg_list, list) {
- int valid = 0;
-
- if (!early && !acpi_disabled) {
- valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
-
- if (valid)
- continue;
- else
- printk(KERN_ERR FW_BUG PREFIX
- "MMCONFIG at %pR not reserved in "
- "ACPI motherboard resources\n",
- &cfg->res);
+ if (pci_mmcfg_check_reserved(NULL, cfg, early) == 0) {
+ pr_info(PREFIX "not using MMCONFIG\n");
+ free_all_mmcfg();
+ return;
}
-
- /* Don't try to do this check unless configuration
- type 1 is available. how about type 2 ?*/
- if (raw_pci_ops)
- valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
-
- if (!valid)
- goto reject;
}
-
- return;
-
-reject:
- printk(KERN_INFO PREFIX "not using MMCONFIG\n");
- free_all_mmcfg();
}
-static int __initdata known_bridge;
-
static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
struct acpi_mcfg_allocation *cfg)
{
@@ -529,7 +560,7 @@ static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
return 0;
}
- printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
+ pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
"is above 4GB, ignored\n", cfg->pci_segment,
cfg->start_bus_number, cfg->end_bus_number, cfg->address);
return -EINVAL;
@@ -556,7 +587,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
i -= sizeof(struct acpi_mcfg_allocation);
};
if (entries == 0) {
- printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
+ pr_err(PREFIX "MMCONFIG has no entries\n");
return -ENODEV;
}
@@ -570,8 +601,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
cfg->end_bus_number, cfg->address) == NULL) {
- printk(KERN_WARNING PREFIX
- "no memory for MCFG entries\n");
+ pr_warn(PREFIX "no memory for MCFG entries\n");
free_all_mmcfg();
return -ENOMEM;
}
@@ -582,28 +612,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
static void __init __pci_mmcfg_init(int early)
{
- /* MMCONFIG disabled */
- if ((pci_probe & PCI_PROBE_MMCONF) == 0)
- return;
-
- /* MMCONFIG already enabled */
- if (!early && !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF))
- return;
-
- /* for late to exit */
- if (known_bridge)
- return;
-
- if (early) {
- if (pci_mmcfg_check_hostbridge())
- known_bridge = 1;
- }
-
- if (!known_bridge)
- acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
-
pci_mmcfg_reject_broken(early);
-
if (list_empty(&pci_mmcfg_list))
return;
@@ -620,33 +629,48 @@ static void __init __pci_mmcfg_init(int early)
if (pci_mmcfg_arch_init())
pci_probe = (pci_probe & ~PCI_PROBE_MASK) | PCI_PROBE_MMCONF;
else {
- /*
- * Signal not to attempt to insert mmcfg resources because
- * the architecture mmcfg setup could not initialize.
- */
- pci_mmcfg_resources_inserted = 1;
+ free_all_mmcfg();
+ pci_mmcfg_arch_init_failed = true;
}
}
+static int __initdata known_bridge;
+
void __init pci_mmcfg_early_init(void)
{
- __pci_mmcfg_init(1);
+ if (pci_probe & PCI_PROBE_MMCONF) {
+ if (pci_mmcfg_check_hostbridge())
+ known_bridge = 1;
+ else
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ __pci_mmcfg_init(1);
+ }
}
void __init pci_mmcfg_late_init(void)
{
- __pci_mmcfg_init(0);
+ /* MMCONFIG disabled */
+ if ((pci_probe & PCI_PROBE_MMCONF) == 0)
+ return;
+
+ if (known_bridge)
+ return;
+
+ /* MMCONFIG hasn't been enabled yet, try again */
+ if (pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF) {
+ acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
+ __pci_mmcfg_init(0);
+ }
}
static int __init pci_mmcfg_late_insert_resources(void)
{
- /*
- * If resources are already inserted or we are not using MMCONFIG,
- * don't insert the resources.
- */
- if ((pci_mmcfg_resources_inserted == 1) ||
- (pci_probe & PCI_PROBE_MMCONF) == 0 ||
- list_empty(&pci_mmcfg_list))
+ struct pci_mmcfg_region *cfg;
+
+ pci_mmcfg_running_state = true;
+
+ /* If we are not using MMCONFIG, don't insert the resources. */
+ if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return 1;
/*
@@ -654,7 +678,9 @@ static int __init pci_mmcfg_late_insert_resources(void)
* marked so it won't cause request errors when __request_region is
* called.
*/
- pci_mmcfg_insert_resources();
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ if (!cfg->res.parent)
+ insert_resource(&iomem_resource, &cfg->res);
return 0;
}
@@ -665,3 +691,101 @@ static int __init pci_mmcfg_late_insert_resources(void)
* with other system resources.
*/
late_initcall(pci_mmcfg_late_insert_resources);
+
+/* Add MMCFG information for host bridges */
+int __devinit pci_mmconfig_insert(struct device *dev,
+ u16 seg, u8 start, u8 end,
+ phys_addr_t addr)
+{
+ int rc;
+ struct resource *tmp = NULL;
+ struct pci_mmcfg_region *cfg;
+
+ if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
+ return -ENODEV;
+
+ if (start > end)
+ return -EINVAL;
+
+ mutex_lock(&pci_mmcfg_lock);
+ cfg = pci_mmconfig_lookup(seg, start);
+ if (cfg) {
+ if (cfg->end_bus < end)
+ dev_info(dev, FW_INFO
+ "MMCONFIG for "
+ "domain %04x [bus %02x-%02x] "
+ "only partially covers this bridge\n",
+ cfg->segment, cfg->start_bus, cfg->end_bus);
+ mutex_unlock(&pci_mmcfg_lock);
+ return -EEXIST;
+ }
+
+ if (!addr) {
+ mutex_unlock(&pci_mmcfg_lock);
+ return -EINVAL;
+ }
+
+ rc = -EBUSY;
+ cfg = pci_mmconfig_alloc(seg, start, end, addr);
+ if (cfg == NULL) {
+ dev_warn(dev, "fail to add MMCONFIG (out of memory)\n");
+ rc = -ENOMEM;
+ } else if (!pci_mmcfg_check_reserved(dev, cfg, 0)) {
+ dev_warn(dev, FW_BUG "MMCONFIG %pR isn't reserved\n",
+ &cfg->res);
+ } else {
+ /* Insert resource if it's not in boot stage */
+ if (pci_mmcfg_running_state)
+ tmp = insert_resource_conflict(&iomem_resource,
+ &cfg->res);
+
+ if (tmp) {
+ dev_warn(dev,
+ "MMCONFIG %pR conflicts with "
+ "%s %pR\n",
+ &cfg->res, tmp->name, tmp);
+ } else if (pci_mmcfg_arch_map(cfg)) {
+ dev_warn(dev, "fail to map MMCONFIG %pR.\n",
+ &cfg->res);
+ } else {
+ list_add_sorted(cfg);
+ dev_info(dev, "MMCONFIG at %pR (base %#lx)\n",
+ &cfg->res, (unsigned long)addr);
+ cfg = NULL;
+ rc = 0;
+ }
+ }
+
+ if (cfg) {
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ kfree(cfg);
+ }
+
+ mutex_unlock(&pci_mmcfg_lock);
+
+ return rc;
+}
+
+/* Delete MMCFG information for host bridges */
+int pci_mmconfig_delete(u16 seg, u8 start, u8 end)
+{
+ struct pci_mmcfg_region *cfg;
+
+ mutex_lock(&pci_mmcfg_lock);
+ list_for_each_entry_rcu(cfg, &pci_mmcfg_list, list)
+ if (cfg->segment == seg && cfg->start_bus == start &&
+ cfg->end_bus == end) {
+ list_del_rcu(&cfg->list);
+ synchronize_rcu();
+ pci_mmcfg_arch_unmap(cfg);
+ if (cfg->res.parent)
+ release_resource(&cfg->res);
+ mutex_unlock(&pci_mmcfg_lock);
+ kfree(cfg);
+ return 0;
+ }
+ mutex_unlock(&pci_mmcfg_lock);
+
+ return -ENOENT;
+}
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 5372e86834c0..db63ac23e3d9 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/init.h>
+#include <linux/rcupdate.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
#include <acpi/acpi.h>
@@ -60,9 +61,12 @@ err: *value = -1;
return -EINVAL;
}
+ rcu_read_lock();
base = get_base_addr(seg, bus, devfn);
- if (!base)
+ if (!base) {
+ rcu_read_unlock();
goto err;
+ }
raw_spin_lock_irqsave(&pci_config_lock, flags);
@@ -80,6 +84,7 @@ err: *value = -1;
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+ rcu_read_unlock();
return 0;
}
@@ -93,9 +98,12 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
if ((bus > 255) || (devfn > 255) || (reg > 4095))
return -EINVAL;
+ rcu_read_lock();
base = get_base_addr(seg, bus, devfn);
- if (!base)
+ if (!base) {
+ rcu_read_unlock();
return -EINVAL;
+ }
raw_spin_lock_irqsave(&pci_config_lock, flags);
@@ -113,11 +121,12 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
break;
}
raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+ rcu_read_unlock();
return 0;
}
-static const struct pci_raw_ops pci_mmcfg = {
+const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read,
.write = pci_mmcfg_write,
};
@@ -132,3 +141,18 @@ int __init pci_mmcfg_arch_init(void)
void __init pci_mmcfg_arch_free(void)
{
}
+
+int __devinit pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
+{
+ return 0;
+}
+
+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
+{
+ unsigned long flags;
+
+ /* Invalidate the cached mmcfg map entry. */
+ raw_spin_lock_irqsave(&pci_config_lock, flags);
+ mmcfg_last_accessed_device = 0;
+ raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+}
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 915a493502cb..d4ebd07c306d 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/rcupdate.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
@@ -34,9 +35,12 @@ err: *value = -1;
return -EINVAL;
}
+ rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
- if (!addr)
+ if (!addr) {
+ rcu_read_unlock();
goto err;
+ }
switch (len) {
case 1:
@@ -49,6 +53,7 @@ err: *value = -1;
*value = mmio_config_readl(addr + reg);
break;
}
+ rcu_read_unlock();
return 0;
}
@@ -62,9 +67,12 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
if (unlikely((bus > 255) || (devfn > 255) || (reg > 4095)))
return -EINVAL;
+ rcu_read_lock();
addr = pci_dev_base(seg, bus, devfn);
- if (!addr)
+ if (!addr) {
+ rcu_read_unlock();
return -EINVAL;
+ }
switch (len) {
case 1:
@@ -77,16 +85,17 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
mmio_config_writel(addr + reg, value);
break;
}
+ rcu_read_unlock();
return 0;
}
-static const struct pci_raw_ops pci_mmcfg = {
+const struct pci_raw_ops pci_mmcfg = {
.read = pci_mmcfg_read,
.write = pci_mmcfg_write,
};
-static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
+static void __iomem * __devinit mcfg_ioremap(struct pci_mmcfg_region *cfg)
{
void __iomem *addr;
u64 start, size;
@@ -105,16 +114,14 @@ int __init pci_mmcfg_arch_init(void)
{
struct pci_mmcfg_region *cfg;
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
- cfg->virt = mcfg_ioremap(cfg);
- if (!cfg->virt) {
- printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
- &cfg->res);
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ if (pci_mmcfg_arch_map(cfg)) {
pci_mmcfg_arch_free();
return 0;
}
- }
+
raw_pci_ext_ops = &pci_mmcfg;
+
return 1;
}
@@ -122,10 +129,25 @@ void __init pci_mmcfg_arch_free(void)
{
struct pci_mmcfg_region *cfg;
- list_for_each_entry(cfg, &pci_mmcfg_list, list) {
- if (cfg->virt) {
- iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
- cfg->virt = NULL;
- }
+ list_for_each_entry(cfg, &pci_mmcfg_list, list)
+ pci_mmcfg_arch_unmap(cfg);
+}
+
+int __devinit pci_mmcfg_arch_map(struct pci_mmcfg_region *cfg)
+{
+ cfg->virt = mcfg_ioremap(cfg);
+ if (!cfg->virt) {
+ pr_err(PREFIX "can't map MMCONFIG at %pR\n", &cfg->res);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void pci_mmcfg_arch_unmap(struct pci_mmcfg_region *cfg)
+{
+ if (cfg && cfg->virt) {
+ iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
+ cfg->virt = NULL;
}
}
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 140942f66b31..e14a2ff708b5 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -264,7 +264,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
static void __devinit mrst_power_off_unused_dev(struct pci_dev *dev)
{
- pci_set_power_state(dev, PCI_D3cold);
+ pci_set_power_state(dev, PCI_D3hot);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 23e5b9d7977b..599be499fdf7 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -203,7 +203,7 @@ static int xo15_sci_remove(struct acpi_device *device, int type)
return 0;
}
-static int xo15_sci_resume(struct acpi_device *device)
+static int xo15_sci_resume(struct device *dev)
{
/* Enable all EC events */
olpc_ec_mask_write(EC_SCI_SRC_ALL);
@@ -215,6 +215,8 @@ static int xo15_sci_resume(struct acpi_device *device)
return 0;
}
+static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
+
static const struct acpi_device_id xo15_sci_device_ids[] = {
{"XO15EC", 0},
{"", 0},
@@ -227,8 +229,8 @@ static struct acpi_driver xo15_sci_drv = {
.ops = {
.add = xo15_sci_add,
.remove = xo15_sci_remove,
- .resume = xo15_sci_resume,
},
+ .drv.pm = &xo15_sci_pm,
};
static int __init xo15_sci_init(void)
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 59880afa851f..71b5d5a07d7b 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1,7 +1,7 @@
/*
* SGI UltraViolet TLB flush routines.
*
- * (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
+ * (c) 2008-2012 Cliff Wickman <cpw@sgi.com>, SGI.
*
* This code is released under the GNU General Public License version 2 or
* later.
@@ -38,8 +38,7 @@ static int timeout_base_ns[] = {
static int timeout_us;
static int nobau;
-static int baudisabled;
-static spinlock_t disable_lock;
+static int nobau_perm;
static cycles_t congested_cycles;
/* tunables: */
@@ -47,12 +46,13 @@ static int max_concurr = MAX_BAU_CONCURRENT;
static int max_concurr_const = MAX_BAU_CONCURRENT;
static int plugged_delay = PLUGGED_DELAY;
static int plugsb4reset = PLUGSB4RESET;
+static int giveup_limit = GIVEUP_LIMIT;
static int timeoutsb4reset = TIMEOUTSB4RESET;
static int ipi_reset_limit = IPI_RESET_LIMIT;
static int complete_threshold = COMPLETE_THRESHOLD;
static int congested_respns_us = CONGESTED_RESPONSE_US;
static int congested_reps = CONGESTED_REPS;
-static int congested_period = CONGESTED_PERIOD;
+static int disabled_period = DISABLED_PERIOD;
static struct tunables tunables[] = {
{&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
@@ -63,7 +63,8 @@ static struct tunables tunables[] = {
{&complete_threshold, COMPLETE_THRESHOLD},
{&congested_respns_us, CONGESTED_RESPONSE_US},
{&congested_reps, CONGESTED_REPS},
- {&congested_period, CONGESTED_PERIOD}
+ {&disabled_period, DISABLED_PERIOD},
+ {&giveup_limit, GIVEUP_LIMIT}
};
static struct dentry *tunables_dir;
@@ -120,6 +121,40 @@ static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
static DEFINE_PER_CPU(struct bau_control, bau_control);
static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
+static void
+set_bau_on(void)
+{
+ int cpu;
+ struct bau_control *bcp;
+
+ if (nobau_perm) {
+ pr_info("BAU not initialized; cannot be turned on\n");
+ return;
+ }
+ nobau = 0;
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->nobau = 0;
+ }
+ pr_info("BAU turned on\n");
+ return;
+}
+
+static void
+set_bau_off(void)
+{
+ int cpu;
+ struct bau_control *bcp;
+
+ nobau = 1;
+ for_each_present_cpu(cpu) {
+ bcp = &per_cpu(bau_control, cpu);
+ bcp->nobau = 1;
+ }
+ pr_info("BAU turned off\n");
+ return;
+}
+
/*
* Determine the first node on a uvhub. 'Nodes' are used for kernel
* memory allocation.
@@ -278,7 +313,7 @@ static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
* Both sockets dump their completed count total into
* the message's count.
*/
- smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
+ *sp = 0;
asp = (struct atomic_short *)&msg->acknowledge_count;
msg_ack_count = atom_asr(socket_ack_count, asp);
@@ -491,16 +526,15 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
}
/*
- * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
+ * UV2 could have an extra bit of status in the ACTIVATION_STATUS_2 register.
+ * But not currently used.
*/
static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
{
unsigned long descriptor_status;
- unsigned long descriptor_status2;
- descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
- descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
- descriptor_status = (descriptor_status << 1) | descriptor_status2;
+ descriptor_status =
+ ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK) << 1;
return descriptor_status;
}
@@ -531,87 +565,11 @@ int normal_busy(struct bau_control *bcp)
*/
int handle_uv2_busy(struct bau_control *bcp)
{
- int busy_one = bcp->using_desc;
- int normal = bcp->uvhub_cpu;
- int selected = -1;
- int i;
- unsigned long descriptor_status;
- unsigned long status;
- int mmr_offset;
- struct bau_desc *bau_desc_old;
- struct bau_desc *bau_desc_new;
- struct bau_control *hmaster = bcp->uvhub_master;
struct ptc_stats *stat = bcp->statp;
- cycles_t ttm;
stat->s_uv2_wars++;
- spin_lock(&hmaster->uvhub_lock);
- /* try for the original first */
- if (busy_one != normal) {
- if (!normal_busy(bcp))
- selected = normal;
- }
- if (selected < 0) {
- /* can't use the normal, select an alternate */
- mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
- descriptor_status = read_lmmr(mmr_offset);
-
- /* scan available descriptors 32-63 */
- for (i = 0; i < UV_CPUS_PER_AS; i++) {
- if ((hmaster->inuse_map & (1 << i)) == 0) {
- status = ((descriptor_status >>
- (i * UV_ACT_STATUS_SIZE)) &
- UV_ACT_STATUS_MASK) << 1;
- if (status != UV2H_DESC_BUSY) {
- selected = i + UV_CPUS_PER_AS;
- break;
- }
- }
- }
- }
-
- if (busy_one != normal)
- /* mark the busy alternate as not in-use */
- hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
-
- if (selected >= 0) {
- /* switch to the selected descriptor */
- if (selected != normal) {
- /* set the selected alternate as in-use */
- hmaster->inuse_map |=
- (1 << (selected - UV_CPUS_PER_AS));
- if (selected > stat->s_uv2_wars_hw)
- stat->s_uv2_wars_hw = selected;
- }
- bau_desc_old = bcp->descriptor_base;
- bau_desc_old += (ITEMS_PER_DESC * busy_one);
- bcp->using_desc = selected;
- bau_desc_new = bcp->descriptor_base;
- bau_desc_new += (ITEMS_PER_DESC * selected);
- *bau_desc_new = *bau_desc_old;
- } else {
- /*
- * All are busy. Wait for the normal one for this cpu to
- * free up.
- */
- stat->s_uv2_war_waits++;
- spin_unlock(&hmaster->uvhub_lock);
- ttm = get_cycles();
- do {
- cpu_relax();
- } while (normal_busy(bcp));
- spin_lock(&hmaster->uvhub_lock);
- /* switch to the original descriptor */
- bcp->using_desc = normal;
- bau_desc_old = bcp->descriptor_base;
- bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
- bcp->using_desc = (ITEMS_PER_DESC * normal);
- bau_desc_new = bcp->descriptor_base;
- bau_desc_new += (ITEMS_PER_DESC * normal);
- *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
- }
- spin_unlock(&hmaster->uvhub_lock);
- return FLUSH_RETRY_BUSYBUG;
+ bcp->busy = 1;
+ return FLUSH_GIVEUP;
}
static int uv2_wait_completion(struct bau_desc *bau_desc,
@@ -620,7 +578,7 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
{
unsigned long descriptor_stat;
cycles_t ttm;
- int desc = bcp->using_desc;
+ int desc = bcp->uvhub_cpu;
long busy_reps = 0;
struct ptc_stats *stat = bcp->statp;
@@ -628,24 +586,38 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
/* spin on the status MMR, waiting for it to go idle */
while (descriptor_stat != UV2H_DESC_IDLE) {
- /*
- * Our software ack messages may be blocked because
- * there are no swack resources available. As long
- * as none of them has timed out hardware will NACK
- * our message and its state will stay IDLE.
- */
- if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
- (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
+ if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) {
+ /*
+ * A h/w bug on the destination side may
+ * have prevented the message being marked
+ * pending, thus it doesn't get replied to
+ * and gets continually nacked until it times
+ * out with a SOURCE_TIMEOUT.
+ */
stat->s_stimeout++;
return FLUSH_GIVEUP;
- } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
- stat->s_strongnacks++;
- bcp->conseccompletes = 0;
- return FLUSH_GIVEUP;
} else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
+ ttm = get_cycles();
+
+ /*
+ * Our retries may be blocked by all destination
+ * swack resources being consumed, and a timeout
+ * pending. In that case hardware returns the
+ * ERROR that looks like a destination timeout.
+ * Without using the extended status we have to
+ * deduce from the short time that this was a
+ * strong nack.
+ */
+ if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
+ bcp->conseccompletes = 0;
+ stat->s_plugged++;
+ /* FLUSH_RETRY_PLUGGED causes hang on boot */
+ return FLUSH_GIVEUP;
+ }
stat->s_dtimeout++;
bcp->conseccompletes = 0;
- return FLUSH_RETRY_TIMEOUT;
+ /* FLUSH_RETRY_TIMEOUT causes hang on boot */
+ return FLUSH_GIVEUP;
} else {
busy_reps++;
if (busy_reps > 1000000) {
@@ -653,9 +625,8 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
busy_reps = 0;
ttm = get_cycles();
if ((ttm - bcp->send_message) >
- (bcp->clocks_per_100_usec)) {
+ bcp->timeout_interval)
return handle_uv2_busy(bcp);
- }
}
/*
* descriptor_stat is still BUSY
@@ -679,7 +650,7 @@ static int wait_completion(struct bau_desc *bau_desc,
{
int right_shift;
unsigned long mmr_offset;
- int desc = bcp->using_desc;
+ int desc = bcp->uvhub_cpu;
if (desc < UV_CPUS_PER_AS) {
mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
@@ -758,33 +729,31 @@ static void destination_timeout(struct bau_desc *bau_desc,
}
/*
- * Completions are taking a very long time due to a congested numalink
- * network.
+ * Stop all cpus on a uvhub from using the BAU for a period of time.
+ * This is reversed by check_enable.
*/
-static void disable_for_congestion(struct bau_control *bcp,
- struct ptc_stats *stat)
+static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
{
- /* let only one cpu do this disabling */
- spin_lock(&disable_lock);
-
- if (!baudisabled && bcp->period_requests &&
- ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
- int tcpu;
- struct bau_control *tbcp;
- /* it becomes this cpu's job to turn on the use of the
- BAU again */
- baudisabled = 1;
- bcp->set_bau_off = 1;
- bcp->set_bau_on_time = get_cycles();
- bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
+ int tcpu;
+ struct bau_control *tbcp;
+ struct bau_control *hmaster;
+ cycles_t tm1;
+
+ hmaster = bcp->uvhub_master;
+ spin_lock(&hmaster->disable_lock);
+ if (!bcp->baudisabled) {
stat->s_bau_disabled++;
+ tm1 = get_cycles();
for_each_present_cpu(tcpu) {
tbcp = &per_cpu(bau_control, tcpu);
- tbcp->baudisabled = 1;
+ if (tbcp->uvhub_master == hmaster) {
+ tbcp->baudisabled = 1;
+ tbcp->set_bau_on_time =
+ tm1 + bcp->disabled_period;
+ }
}
}
-
- spin_unlock(&disable_lock);
+ spin_unlock(&hmaster->disable_lock);
}
static void count_max_concurr(int stat, struct bau_control *bcp,
@@ -815,16 +784,30 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
bcp->period_requests++;
bcp->period_time += elapsed;
if ((elapsed > congested_cycles) &&
- (bcp->period_requests > bcp->cong_reps))
- disable_for_congestion(bcp, stat);
+ (bcp->period_requests > bcp->cong_reps) &&
+ ((bcp->period_time / bcp->period_requests) >
+ congested_cycles)) {
+ stat->s_congested++;
+ disable_for_period(bcp, stat);
+ }
}
} else
stat->s_requestor--;
if (completion_status == FLUSH_COMPLETE && try > 1)
stat->s_retriesok++;
- else if (completion_status == FLUSH_GIVEUP)
+ else if (completion_status == FLUSH_GIVEUP) {
stat->s_giveup++;
+ if (get_cycles() > bcp->period_end)
+ bcp->period_giveups = 0;
+ bcp->period_giveups++;
+ if (bcp->period_giveups == 1)
+ bcp->period_end = get_cycles() + bcp->disabled_period;
+ if (bcp->period_giveups > bcp->giveup_limit) {
+ disable_for_period(bcp, stat);
+ stat->s_giveuplimit++;
+ }
+ }
}
/*
@@ -868,7 +851,8 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
* Returns 1 if it gives up entirely and the original cpu mask is to be
* returned to the kernel.
*/
-int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
+int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp,
+ struct bau_desc *bau_desc)
{
int seq_number = 0;
int completion_stat = 0;
@@ -881,24 +865,23 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
struct bau_control *hmaster = bcp->uvhub_master;
struct uv1_bau_msg_header *uv1_hdr = NULL;
struct uv2_bau_msg_header *uv2_hdr = NULL;
- struct bau_desc *bau_desc;
- if (bcp->uvhub_version == 1)
+ if (bcp->uvhub_version == 1) {
+ uv1 = 1;
uv1_throttle(hmaster, stat);
+ }
while (hmaster->uvhub_quiesce)
cpu_relax();
time1 = get_cycles();
+ if (uv1)
+ uv1_hdr = &bau_desc->header.uv1_hdr;
+ else
+ uv2_hdr = &bau_desc->header.uv2_hdr;
+
do {
- bau_desc = bcp->descriptor_base;
- bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
- if (bcp->uvhub_version == 1) {
- uv1 = 1;
- uv1_hdr = &bau_desc->header.uv1_hdr;
- } else
- uv2_hdr = &bau_desc->header.uv2_hdr;
- if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
+ if (try == 0) {
if (uv1)
uv1_hdr->msg_type = MSG_REGULAR;
else
@@ -916,25 +899,24 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
uv1_hdr->sequence = seq_number;
else
uv2_hdr->sequence = seq_number;
- index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
+ index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
bcp->send_message = get_cycles();
write_mmr_activation(index);
try++;
completion_stat = wait_completion(bau_desc, bcp, try);
- /* UV2: wait_completion() may change the bcp->using_desc */
handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
bcp->ipi_attempts = 0;
+ stat->s_overipilimit++;
completion_stat = FLUSH_GIVEUP;
break;
}
cpu_relax();
} while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
- (completion_stat == FLUSH_RETRY_BUSYBUG) ||
(completion_stat == FLUSH_RETRY_TIMEOUT));
time2 = get_cycles();
@@ -955,28 +937,33 @@ int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
}
/*
- * The BAU is disabled. When the disabled time period has expired, the cpu
- * that disabled it must re-enable it.
- * Return 0 if it is re-enabled for all cpus.
+ * The BAU is disabled for this uvhub. When the disabled time period has
+ * expired re-enable it.
+ * Return 0 if it is re-enabled for all cpus on this uvhub.
*/
static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
{
int tcpu;
struct bau_control *tbcp;
+ struct bau_control *hmaster;
- if (bcp->set_bau_off) {
- if (get_cycles() >= bcp->set_bau_on_time) {
- stat->s_bau_reenabled++;
- baudisabled = 0;
- for_each_present_cpu(tcpu) {
- tbcp = &per_cpu(bau_control, tcpu);
+ hmaster = bcp->uvhub_master;
+ spin_lock(&hmaster->disable_lock);
+ if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
+ stat->s_bau_reenabled++;
+ for_each_present_cpu(tcpu) {
+ tbcp = &per_cpu(bau_control, tcpu);
+ if (tbcp->uvhub_master == hmaster) {
tbcp->baudisabled = 0;
tbcp->period_requests = 0;
tbcp->period_time = 0;
+ tbcp->period_giveups = 0;
}
- return 0;
}
+ spin_unlock(&hmaster->disable_lock);
+ return 0;
}
+ spin_unlock(&hmaster->disable_lock);
return -1;
}
@@ -1078,18 +1065,32 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct cpumask *flush_mask;
struct ptc_stats *stat;
struct bau_control *bcp;
-
- /* kernel was booted 'nobau' */
- if (nobau)
- return cpumask;
+ unsigned long descriptor_status;
+ unsigned long status;
bcp = &per_cpu(bau_control, cpu);
stat = bcp->statp;
+ stat->s_enters++;
+
+ if (bcp->nobau)
+ return cpumask;
+
+ if (bcp->busy) {
+ descriptor_status =
+ read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
+ status = ((descriptor_status >> (bcp->uvhub_cpu *
+ UV_ACT_STATUS_SIZE)) & UV_ACT_STATUS_MASK) << 1;
+ if (status == UV2H_DESC_BUSY)
+ return cpumask;
+ bcp->busy = 0;
+ }
/* bau was disabled due to slow response */
if (bcp->baudisabled) {
- if (check_enable(bcp, stat))
+ if (check_enable(bcp, stat)) {
+ stat->s_ipifordisabled++;
return cpumask;
+ }
}
/*
@@ -1105,7 +1106,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
stat->s_ntargself++;
bau_desc = bcp->descriptor_base;
- bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+ bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu);
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
return NULL;
@@ -1118,25 +1119,27 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
* uv_flush_send_and_wait returns 0 if all cpu's were messaged,
* or 1 if it gave up and the original cpumask should be returned.
*/
- if (!uv_flush_send_and_wait(flush_mask, bcp))
+ if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc))
return NULL;
else
return cpumask;
}
/*
- * Search the message queue for any 'other' message with the same software
- * acknowledge resource bit vector.
+ * Search the message queue for any 'other' unprocessed message with the
+ * same software acknowledge resource bit vector as the 'msg' message.
*/
struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
- struct bau_control *bcp, unsigned char swack_vec)
+ struct bau_control *bcp)
{
struct bau_pq_entry *msg_next = msg + 1;
+ unsigned char swack_vec = msg->swack_vec;
if (msg_next > bcp->queue_last)
msg_next = bcp->queue_first;
- while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
- if (msg_next->swack_vec == swack_vec)
+ while (msg_next != msg) {
+ if ((msg_next->canceled == 0) && (msg_next->replied_to == 0) &&
+ (msg_next->swack_vec == swack_vec))
return msg_next;
msg_next++;
if (msg_next > bcp->queue_last)
@@ -1165,32 +1168,30 @@ void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
* This message was assigned a swack resource, but no
* reserved acknowlegment is pending.
* The bug has prevented this message from setting the MMR.
- * And no other message has used the same sw_ack resource.
- * Do the requested shootdown but do not reply to the msg.
- * (the 0 means make no acknowledge)
*/
- bau_process_message(mdp, bcp, 0);
- return;
- }
-
- /*
- * Some message has set the MMR 'pending' bit; it might have been
- * another message. Look for that message.
- */
- other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
- if (other_msg) {
- /* There is another. Do not ack the current one. */
- bau_process_message(mdp, bcp, 0);
/*
- * Let the natural processing of that message acknowledge
- * it. Don't get the processing of sw_ack's out of order.
+ * Some message has set the MMR 'pending' bit; it might have
+ * been another message. Look for that message.
*/
- return;
+ other_msg = find_another_by_swack(msg, bcp);
+ if (other_msg) {
+ /*
+ * There is another. Process this one but do not
+ * ack it.
+ */
+ bau_process_message(mdp, bcp, 0);
+ /*
+ * Let the natural processing of that other message
+ * acknowledge it. Don't get the processing of sw_ack's
+ * out of order.
+ */
+ return;
+ }
}
/*
- * There is no other message using this sw_ack, so it is safe to
- * acknowledge it.
+ * Either the MMR shows this one pending a reply or there is no
+ * other message using this sw_ack, so it is safe to acknowledge it.
*/
bau_process_message(mdp, bcp, 1);
@@ -1295,7 +1296,8 @@ static void __init enable_timeouts(void)
*/
mmr_image |= (1L << SOFTACK_MSHIFT);
if (is_uv2_hub()) {
- mmr_image |= (1L << UV2_EXT_SHFT);
+ /* hw bug workaround; do not use extended status */
+ mmr_image &= ~(1L << UV2_EXT_SHFT);
}
write_mmr_misc_control(pnode, mmr_image);
}
@@ -1338,29 +1340,34 @@ static inline unsigned long long usec_2_cycles(unsigned long microsec)
static int ptc_seq_show(struct seq_file *file, void *data)
{
struct ptc_stats *stat;
+ struct bau_control *bcp;
int cpu;
cpu = *(loff_t *)data;
if (!cpu) {
seq_printf(file,
- "# cpu sent stime self locals remotes ncpus localhub ");
+ "# cpu bauoff sent stime self locals remotes ncpus localhub ");
seq_printf(file,
"remotehub numuvhubs numuvhubs16 numuvhubs8 ");
seq_printf(file,
- "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
+ "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries ");
+ seq_printf(file,
+ "rok resetp resett giveup sto bz throt disable ");
seq_printf(file,
- "resetp resett giveup sto bz throt swack recv rtime ");
+ "enable wars warshw warwaits enters ipidis plugged ");
seq_printf(file,
- "all one mult none retry canc nocan reset rcan ");
+ "ipiover glim cong swack recv rtime all one mult ");
seq_printf(file,
- "disable enable wars warshw warwaits\n");
+ "none retry canc nocan reset rcan\n");
}
if (cpu < num_possible_cpus() && cpu_online(cpu)) {
- stat = &per_cpu(ptcstats, cpu);
+ bcp = &per_cpu(bau_control, cpu);
+ stat = bcp->statp;
/* source side statistics */
seq_printf(file,
- "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
- cpu, stat->s_requestor, cycles_2_us(stat->s_time),
+ "cpu %d %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ cpu, bcp->nobau, stat->s_requestor,
+ cycles_2_us(stat->s_time),
stat->s_ntargself, stat->s_ntarglocals,
stat->s_ntargremotes, stat->s_ntargcpu,
stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
@@ -1374,20 +1381,23 @@ static int ptc_seq_show(struct seq_file *file, void *data)
stat->s_resets_plug, stat->s_resets_timeout,
stat->s_giveup, stat->s_stimeout,
stat->s_busy, stat->s_throttles);
+ seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ stat->s_bau_disabled, stat->s_bau_reenabled,
+ stat->s_uv2_wars, stat->s_uv2_wars_hw,
+ stat->s_uv2_war_waits, stat->s_enters,
+ stat->s_ipifordisabled, stat->s_plugged,
+ stat->s_overipilimit, stat->s_giveuplimit,
+ stat->s_congested);
/* destination side statistics */
seq_printf(file,
- "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
+ "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
stat->d_requestee, cycles_2_us(stat->d_time),
stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
stat->d_nomsg, stat->d_retries, stat->d_canceled,
stat->d_nocanceled, stat->d_resets,
stat->d_rcanceled);
- seq_printf(file, "%ld %ld %ld %ld %ld\n",
- stat->s_bau_disabled, stat->s_bau_reenabled,
- stat->s_uv2_wars, stat->s_uv2_wars_hw,
- stat->s_uv2_war_waits);
}
return 0;
}
@@ -1401,13 +1411,14 @@ static ssize_t tunables_read(struct file *file, char __user *userbuf,
char *buf;
int ret;
- buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
- "max_concur plugged_delay plugsb4reset",
- "timeoutsb4reset ipi_reset_limit complete_threshold",
- "congested_response_us congested_reps congested_period",
+ buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d %d\n",
+ "max_concur plugged_delay plugsb4reset timeoutsb4reset",
+ "ipi_reset_limit complete_threshold congested_response_us",
+ "congested_reps disabled_period giveup_limit",
max_concurr, plugged_delay, plugsb4reset,
timeoutsb4reset, ipi_reset_limit, complete_threshold,
- congested_respns_us, congested_reps, congested_period);
+ congested_respns_us, congested_reps, disabled_period,
+ giveup_limit);
if (!buf)
return -ENOMEM;
@@ -1438,6 +1449,14 @@ static ssize_t ptc_proc_write(struct file *file, const char __user *user,
return -EFAULT;
optstr[count - 1] = '\0';
+ if (!strcmp(optstr, "on")) {
+ set_bau_on();
+ return count;
+ } else if (!strcmp(optstr, "off")) {
+ set_bau_off();
+ return count;
+ }
+
if (strict_strtol(optstr, 10, &input_arg) < 0) {
printk(KERN_DEBUG "%s is invalid\n", optstr);
return -EINVAL;
@@ -1570,7 +1589,8 @@ static ssize_t tunables_write(struct file *file, const char __user *user,
bcp->complete_threshold = complete_threshold;
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
- bcp->cong_period = congested_period;
+ bcp->disabled_period = sec_2_cycles(disabled_period);
+ bcp->giveup_limit = giveup_limit;
}
return count;
}
@@ -1699,6 +1719,10 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
* fairness chaining multilevel count replied_to
*/
} else {
+ /*
+ * BIOS uses legacy mode, but UV2 hardware always
+ * uses native mode for selective broadcasts.
+ */
uv2_hdr = &bd2->header.uv2_hdr;
uv2_hdr->swack_flag = 1;
uv2_hdr->base_dest_nasid =
@@ -1811,8 +1835,8 @@ static int calculate_destination_timeout(void)
index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
- base = timeout_base_ns[index];
- ts_ns = base * mult1 * mult2;
+ ts_ns = timeout_base_ns[index];
+ ts_ns *= (mult1 * mult2);
ret = ts_ns / 1000;
} else {
/* 4 bits 0/1 for 10/80us base, 3 bits of multiplier */
@@ -1836,6 +1860,8 @@ static void __init init_per_cpu_tunables(void)
for_each_present_cpu(cpu) {
bcp = &per_cpu(bau_control, cpu);
bcp->baudisabled = 0;
+ if (nobau)
+ bcp->nobau = 1;
bcp->statp = &per_cpu(ptcstats, cpu);
/* time interval to catch a hardware stay-busy bug */
bcp->timeout_interval = usec_2_cycles(2*timeout_us);
@@ -1848,10 +1874,11 @@ static void __init init_per_cpu_tunables(void)
bcp->complete_threshold = complete_threshold;
bcp->cong_response_us = congested_respns_us;
bcp->cong_reps = congested_reps;
- bcp->cong_period = congested_period;
- bcp->clocks_per_100_usec = usec_2_cycles(100);
+ bcp->disabled_period = sec_2_cycles(disabled_period);
+ bcp->giveup_limit = giveup_limit;
spin_lock_init(&bcp->queue_lock);
spin_lock_init(&bcp->uvhub_lock);
+ spin_lock_init(&bcp->disable_lock);
}
}
@@ -1972,7 +1999,6 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
}
bcp->uvhub_master = *hmasterp;
bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
- bcp->using_desc = bcp->uvhub_cpu;
if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
printk(KERN_EMERG "%d cpus per uvhub invalid\n",
bcp->uvhub_cpu);
@@ -2069,16 +2095,12 @@ static int __init uv_bau_init(void)
if (!is_uv_system())
return 0;
- if (nobau)
- return 0;
-
for_each_possible_cpu(cur_cpu) {
mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
}
nuvhubs = uv_num_possible_blades();
- spin_lock_init(&disable_lock);
congested_cycles = usec_2_cycles(congested_respns_us);
uv_base_pnode = 0x7fffffff;
@@ -2091,7 +2113,8 @@ static int __init uv_bau_init(void)
enable_timeouts();
if (init_per_cpu(nuvhubs, uv_base_pnode)) {
- nobau = 1;
+ set_bau_off();
+ nobau_perm = 1;
return 0;
}
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index f25c2765a5c9..acf7752da952 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -135,6 +135,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
+ unsigned int dest;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
@@ -143,6 +144,10 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
if (err != 0)
return err;
+ err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
+ if (err != 0)
+ return err;
+
if (limit == UV_AFFINITY_CPU)
irq_set_status_flags(irq, IRQ_NO_BALANCING);
else
@@ -159,7 +164,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
- entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
+ entry->dest = dest;
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
@@ -222,7 +227,7 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
- return 0;
+ return IRQ_SET_MASK_OK_NOCOPY;
}
/*
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 5b84a2d30888..b2d534cab25f 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -22,7 +22,7 @@ wakeup-objs += video-bios.o
realmode-y += header.o
realmode-y += trampoline_$(BITS).o
realmode-y += stack.o
-realmode-$(CONFIG_X86_32) += reboot_32.o
+realmode-y += reboot.o
realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
targets += $(realmode-y)
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
index fadf48378ada..a28221d94e69 100644
--- a/arch/x86/realmode/rm/header.S
+++ b/arch/x86/realmode/rm/header.S
@@ -6,6 +6,7 @@
#include <linux/linkage.h>
#include <asm/page_types.h>
+#include <asm/segment.h>
#include "realmode.h"
@@ -28,8 +29,9 @@ GLOBAL(real_mode_header)
.long pa_wakeup_header
#endif
/* APM/BIOS reboot */
-#ifdef CONFIG_X86_32
.long pa_machine_real_restart_asm
+#ifdef CONFIG_X86_64
+ .long __KERNEL32_CS
#endif
END(real_mode_header)
diff --git a/arch/x86/realmode/rm/reboot_32.S b/arch/x86/realmode/rm/reboot.S
index 114044876b3d..f932ea61d1c8 100644
--- a/arch/x86/realmode/rm/reboot_32.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -2,6 +2,8 @@
#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/msr-index.h>
#include "realmode.h"
/*
@@ -12,13 +14,35 @@
* doesn't work with at least one type of 486 motherboard. It is easy
* to stop this code working; hence the copious comments.
*
- * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
+ * This code is called with the restart type (0 = BIOS, 1 = APM) in
+ * the primary argument register (%eax for 32 bit, %edi for 64 bit).
*/
.section ".text32", "ax"
.code32
-
- .balign 16
ENTRY(machine_real_restart_asm)
+
+#ifdef CONFIG_X86_64
+ /* Switch to trampoline GDT as it is guaranteed < 4 GiB */
+ movl $__KERNEL_DS, %eax
+ movl %eax, %ds
+ lgdtl pa_tr_gdt
+
+ /* Disable paging to drop us out of long mode */
+ movl %cr0, %eax
+ andl $~X86_CR0_PG, %eax
+ movl %eax, %cr0
+ ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off
+
+GLOBAL(machine_real_restart_paging_off)
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+
+ movl %edi, %eax
+
+#endif /* CONFIG_X86_64 */
+
/* Set up the IDT for real mode. */
lidtl pa_machine_real_restart_idt
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 66e6d9359826..0faad646f5fd 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -205,9 +205,9 @@ void syscall32_cpu_init(void)
{
/* Load these always in case some future AMD CPU supports
SYSENTER from compat mode too. */
- checking_wrmsrl(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
- checking_wrmsrl(MSR_IA32_SYSENTER_ESP, 0ULL);
- checking_wrmsrl(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+ wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
+ wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
+ wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
wrmsrl(MSR_CSTAR, ia32_cstar_target);
}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index ff962d4b821e..bf4bda6d3e9a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/syscore_ops.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
@@ -38,6 +39,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
+#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/hvm.h>
@@ -107,7 +109,7 @@ EXPORT_SYMBOL_GPL(xen_have_vector_callback);
* Point at some empty memory to start with. We map the real shared_info
* page as soon as fixmap is up and running.
*/
-struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
+struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
/*
* Flag to determine whether vcpu info placement is available on all
@@ -124,6 +126,19 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
*/
static int have_vcpu_info_placement = 1;
+struct tls_descs {
+ struct desc_struct desc[3];
+};
+
+/*
+ * Updating the 3 TLS descriptors in the GDT on every task switch is
+ * surprisingly expensive so we avoid updating them if they haven't
+ * changed. Since Xen writes different descriptors than the one
+ * passed in the update_descriptor hypercall we keep shadow copies to
+ * compare against.
+ */
+static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
+
static void clamp_max_cpus(void)
{
#ifdef CONFIG_SMP
@@ -341,9 +356,7 @@ static void __init xen_init_cpuid_mask(void)
unsigned int xsave_mask;
cpuid_leaf1_edx_mask =
- ~((1 << X86_FEATURE_MCE) | /* disable MCE */
- (1 << X86_FEATURE_MCA) | /* disable MCA */
- (1 << X86_FEATURE_MTRR) | /* disable MTRR */
+ ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
(1 << X86_FEATURE_ACC)); /* thermal monitoring */
if (!xen_initial_domain())
@@ -540,12 +553,28 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
BUG();
}
+static inline bool desc_equal(const struct desc_struct *d1,
+ const struct desc_struct *d2)
+{
+ return d1->a == d2->a && d1->b == d2->b;
+}
+
static void load_TLS_descriptor(struct thread_struct *t,
unsigned int cpu, unsigned int i)
{
- struct desc_struct *gdt = get_cpu_gdt_table(cpu);
- xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
- struct multicall_space mc = __xen_mc_entry(0);
+ struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
+ struct desc_struct *gdt;
+ xmaddr_t maddr;
+ struct multicall_space mc;
+
+ if (desc_equal(shadow, &t->tls_array[i]))
+ return;
+
+ *shadow = t->tls_array[i];
+
+ gdt = get_cpu_gdt_table(cpu);
+ maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
+ mc = __xen_mc_entry(0);
MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
}
@@ -627,8 +656,8 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
/*
* Look for known traps using IST, and substitute them
* appropriately. The debugger ones are the only ones we care
- * about. Xen will handle faults like double_fault and
- * machine_check, so we should never see them. Warn if
+ * about. Xen will handle faults like double_fault,
+ * so we should never see them. Warn if
* there's an unexpected IST-using fault handler.
*/
if (addr == (unsigned long)debug)
@@ -643,7 +672,11 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
return 0;
#ifdef CONFIG_X86_MCE
} else if (addr == (unsigned long)machine_check) {
- return 0;
+ /*
+ * when xen hypervisor inject vMCE to guest,
+ * use native mce handler to handle it
+ */
+ ;
#endif
} else {
/* Some other trap using IST? */
@@ -1124,9 +1157,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe,
- .rdmsr_regs = native_rdmsr_safe_regs,
.write_msr = xen_write_msr_safe,
- .wrmsr_regs = native_wrmsr_safe_regs,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
@@ -1439,64 +1470,155 @@ asmlinkage void __init xen_start_kernel(void)
#endif
}
-static int init_hvm_pv_info(int *major, int *minor)
-{
- uint32_t eax, ebx, ecx, edx, pages, msr, base;
- u64 pfn;
-
- base = xen_cpuid_base();
- cpuid(base + 1, &eax, &ebx, &ecx, &edx);
-
- *major = eax >> 16;
- *minor = eax & 0xffff;
- printk(KERN_INFO "Xen version %d.%d.\n", *major, *minor);
-
- cpuid(base + 2, &pages, &msr, &ecx, &edx);
-
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- xen_setup_features();
-
- pv_info.name = "Xen HVM";
-
- xen_domain_type = XEN_HVM_DOMAIN;
+#ifdef CONFIG_XEN_PVHVM
+/*
+ * The pfn containing the shared_info is located somewhere in RAM. This
+ * will cause trouble if the current kernel is doing a kexec boot into a
+ * new kernel. The new kernel (and its startup code) can not know where
+ * the pfn is, so it can not reserve the page. The hypervisor will
+ * continue to update the pfn, and as a result memory corruption occours
+ * in the new kernel.
+ *
+ * One way to work around this issue is to allocate a page in the
+ * xen-platform pci device's BAR memory range. But pci init is done very
+ * late and the shared_info page is already in use very early to read
+ * the pvclock. So moving the pfn from RAM to MMIO is racy because some
+ * code paths on other vcpus could access the pfn during the small
+ * window when the old pfn is moved to the new pfn. There is even a
+ * small window were the old pfn is not backed by a mfn, and during that
+ * time all reads return -1.
+ *
+ * Because it is not known upfront where the MMIO region is located it
+ * can not be used right from the start in xen_hvm_init_shared_info.
+ *
+ * To minimise trouble the move of the pfn is done shortly before kexec.
+ * This does not eliminate the race because all vcpus are still online
+ * when the syscore_ops will be called. But hopefully there is no work
+ * pending at this point in time. Also the syscore_op is run last which
+ * reduces the risk further.
+ */
- return 0;
-}
+static struct shared_info *xen_hvm_shared_info;
-void __ref xen_hvm_init_shared_info(void)
+static void xen_hvm_connect_shared_info(unsigned long pfn)
{
- int cpu;
struct xen_add_to_physmap xatp;
- static struct shared_info *shared_info_page = 0;
- if (!shared_info_page)
- shared_info_page = (struct shared_info *)
- extend_brk(PAGE_SIZE, PAGE_SIZE);
xatp.domid = DOMID_SELF;
xatp.idx = 0;
xatp.space = XENMAPSPACE_shared_info;
- xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
+ xatp.gpfn = pfn;
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
BUG();
- HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
+}
+static void xen_hvm_set_shared_info(struct shared_info *sip)
+{
+ int cpu;
+
+ HYPERVISOR_shared_info = sip;
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
* page, we use it in the event channel upcall and in some pvclock
* related functions. We don't need the vcpu_info placement
* optimizations because we don't use any pv_mmu or pv_irq op on
* HVM.
- * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
- * online but xen_hvm_init_shared_info is run at resume time too and
+ * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is
+ * online but xen_hvm_set_shared_info is run at resume time too and
* in that case multiple vcpus might be online. */
for_each_online_cpu(cpu) {
per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
}
}
-#ifdef CONFIG_XEN_PVHVM
+/* Reconnect the shared_info pfn to a mfn */
+void xen_hvm_resume_shared_info(void)
+{
+ xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
+}
+
+#ifdef CONFIG_KEXEC
+static struct shared_info *xen_hvm_shared_info_kexec;
+static unsigned long xen_hvm_shared_info_pfn_kexec;
+
+/* Remember a pfn in MMIO space for kexec reboot */
+void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn)
+{
+ xen_hvm_shared_info_kexec = sip;
+ xen_hvm_shared_info_pfn_kexec = pfn;
+}
+
+static void xen_hvm_syscore_shutdown(void)
+{
+ struct xen_memory_reservation reservation = {
+ .domid = DOMID_SELF,
+ .nr_extents = 1,
+ };
+ unsigned long prev_pfn;
+ int rc;
+
+ if (!xen_hvm_shared_info_kexec)
+ return;
+
+ prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT;
+ set_xen_guest_handle(reservation.extent_start, &prev_pfn);
+
+ /* Move pfn to MMIO, disconnects previous pfn from mfn */
+ xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec);
+
+ /* Update pointers, following hypercall is also a memory barrier */
+ xen_hvm_set_shared_info(xen_hvm_shared_info_kexec);
+
+ /* Allocate new mfn for previous pfn */
+ do {
+ rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
+ if (rc == 0)
+ msleep(123);
+ } while (rc == 0);
+
+ /* Make sure the previous pfn is really connected to a (new) mfn */
+ BUG_ON(rc != 1);
+}
+
+static struct syscore_ops xen_hvm_syscore_ops = {
+ .shutdown = xen_hvm_syscore_shutdown,
+};
+#endif
+
+/* Use a pfn in RAM, may move to MMIO before kexec. */
+static void __init xen_hvm_init_shared_info(void)
+{
+ /* Remember pointer for resume */
+ xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE);
+ xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT);
+ xen_hvm_set_shared_info(xen_hvm_shared_info);
+}
+
+static void __init init_hvm_pv_info(void)
+{
+ int major, minor;
+ uint32_t eax, ebx, ecx, edx, pages, msr, base;
+ u64 pfn;
+
+ base = xen_cpuid_base();
+ cpuid(base + 1, &eax, &ebx, &ecx, &edx);
+
+ major = eax >> 16;
+ minor = eax & 0xffff;
+ printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
+
+ cpuid(base + 2, &pages, &msr, &ecx, &edx);
+
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+
+ xen_setup_features();
+
+ pv_info.name = "Xen HVM";
+
+ xen_domain_type = XEN_HVM_DOMAIN;
+}
+
static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
@@ -1519,14 +1641,12 @@ static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
static void __init xen_hvm_guest_init(void)
{
- int r;
- int major, minor;
-
- r = init_hvm_pv_info(&major, &minor);
- if (r < 0)
- return;
+ init_hvm_pv_info();
xen_hvm_init_shared_info();
+#ifdef CONFIG_KEXEC
+ register_syscore_ops(&xen_hvm_syscore_ops);
+#endif
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3a73785631ce..27336dfcda8e 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -308,8 +308,20 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
static inline void __xen_set_pte(pte_t *ptep, pte_t pteval)
{
- if (!xen_batched_set_pte(ptep, pteval))
- native_set_pte(ptep, pteval);
+ if (!xen_batched_set_pte(ptep, pteval)) {
+ /*
+ * Could call native_set_pte() here and trap and
+ * emulate the PTE write but with 32-bit guests this
+ * needs two traps (one for each of the two 32-bit
+ * words in the PTE) so do one hypercall directly
+ * instead.
+ */
+ struct mmu_update u;
+
+ u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+ u.val = pte_val_ma(pteval);
+ HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF);
+ }
}
static void xen_set_pte(pte_t *ptep, pte_t pteval)
@@ -1416,13 +1428,28 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
}
#endif /* CONFIG_X86_64 */
-/* Init-time set_pte while constructing initial pagetables, which
- doesn't allow RO pagetable pages to be remapped RW */
+/*
+ * Init-time set_pte while constructing initial pagetables, which
+ * doesn't allow RO page table pages to be remapped RW.
+ *
+ * If there is no MFN for this PFN then this page is initially
+ * ballooned out so clear the PTE (as in decrease_reservation() in
+ * drivers/xen/balloon.c).
+ *
+ * Many of these PTE updates are done on unpinned and writable pages
+ * and doing a hypercall for these is unnecessary and expensive. At
+ * this point it is not possible to tell if a page is pinned or not,
+ * so always write the PTE directly and rely on Xen trapping and
+ * emulating any updates as necessary.
+ */
static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
{
- pte = mask_rw_pte(ptep, pte);
+ if (pte_mfn(pte) != INVALID_P2M_ENTRY)
+ pte = mask_rw_pte(ptep, pte);
+ else
+ pte = __pte_ma(0);
- xen_set_pte(ptep, pte);
+ native_set_pte(ptep, pte);
}
static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index a4790bf22c59..ead85576d54a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -157,25 +157,24 @@ static unsigned long __init xen_populate_chunk(
unsigned long dest_pfn;
for (i = 0, entry = list; i < map_size; i++, entry++) {
- unsigned long credits = credits_left;
unsigned long s_pfn;
unsigned long e_pfn;
unsigned long pfns;
long capacity;
- if (credits <= 0)
+ if (credits_left <= 0)
break;
if (entry->type != E820_RAM)
continue;
- e_pfn = PFN_UP(entry->addr + entry->size);
+ e_pfn = PFN_DOWN(entry->addr + entry->size);
/* We only care about E820 after the xen_start_info->nr_pages */
if (e_pfn <= max_pfn)
continue;
- s_pfn = PFN_DOWN(entry->addr);
+ s_pfn = PFN_UP(entry->addr);
/* If the E820 falls within the nr_pages, we want to start
* at the nr_pages PFN.
* If that would mean going past the E820 entry, skip it
@@ -184,23 +183,19 @@ static unsigned long __init xen_populate_chunk(
capacity = e_pfn - max_pfn;
dest_pfn = max_pfn;
} else {
- /* last_pfn MUST be within E820_RAM regions */
- if (*last_pfn && e_pfn >= *last_pfn)
- s_pfn = *last_pfn;
capacity = e_pfn - s_pfn;
dest_pfn = s_pfn;
}
- /* If we had filled this E820_RAM entry, go to the next one. */
- if (capacity <= 0)
- continue;
- if (credits > capacity)
- credits = capacity;
+ if (credits_left < capacity)
+ capacity = credits_left;
- pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
+ pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
done += pfns;
- credits_left -= pfns;
*last_pfn = (dest_pfn + pfns);
+ if (pfns < capacity)
+ break;
+ credits_left -= pfns;
}
return done;
}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index afb250d22a6b..f58dca7a6e52 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -80,9 +80,7 @@ static void __cpuinit cpu_bringup(void)
notify_cpu_starting(cpu);
- ipi_call_lock();
set_cpu_online(cpu, true);
- ipi_call_unlock();
this_cpu_write(cpu_state, CPU_ONLINE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 45329c8c226e..ae8a00c39de4 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled)
{
#ifdef CONFIG_XEN_PVHVM
int cpu;
- xen_hvm_init_shared_info();
+ xen_hvm_resume_shared_info();
xen_callback_vector();
xen_unplug_emulated_devices();
if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 202d4c150154..1e4329e04e0f 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -41,7 +41,7 @@ void xen_enable_syscall(void);
void xen_vcpu_restore(void);
void xen_callback_vector(void);
-void xen_hvm_init_shared_info(void);
+void xen_hvm_resume_shared_info(void);
void xen_unplug_emulated_devices(void);
void __init xen_build_dynamic_phys_to_machine(void);
diff --git a/arch/xtensa/kernel/pci.c b/arch/xtensa/kernel/pci.c
index eb30e356f5be..69759e9cb3ea 100644
--- a/arch/xtensa/kernel/pci.c
+++ b/arch/xtensa/kernel/pci.c
@@ -46,7 +46,6 @@
* pcibios_fixups
* pcibios_align_resource
* pcibios_fixup_bus
- * pcibios_setup
* pci_bus_add_device
* pci_mmap_page_range
*/
@@ -187,7 +186,7 @@ static int __init pcibios_init(void)
bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
pci_ctrl->ops, pci_ctrl, &resources);
pci_ctrl->bus = bus;
- pci_ctrl->last_busno = bus->subordinate;
+ pci_ctrl->last_busno = bus->busn_res.end;
if (next_busno <= pci_ctrl->last_busno)
next_busno = pci_ctrl->last_busno+1;
}
@@ -206,11 +205,6 @@ void __init pcibios_fixup_bus(struct pci_bus *bus)
}
}
-char __init *pcibios_setup(char *str)
-{
- return str;
-}
-
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 9b306e550e3f..2c8d6a3d250a 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -277,7 +277,7 @@ void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
/* Don't leak any random bits. */
- memset(elfregs, 0, sizeof (elfregs));
+ memset(elfregs, 0, sizeof(*elfregs));
/* Note: PS.EXCM is not set while user task is running; its
* being set in regs->ps is for exception handling convenience.
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 02cf6335e9bd..e7dee617358e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -125,12 +125,8 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q)
blkg->pd[i] = pd;
pd->blkg = blkg;
- }
-
- /* invoke per-policy init */
- for (i = 0; i < BLKCG_MAX_POLS; i++) {
- struct blkcg_policy *pol = blkcg_policy[i];
+ /* invoke per-policy init */
if (blkcg_policy_enabled(blkg->q, pol))
pol->pd_init_fn(blkg);
}
@@ -245,10 +241,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup_create);
static void blkg_destroy(struct blkcg_gq *blkg)
{
- struct request_queue *q = blkg->q;
struct blkcg *blkcg = blkg->blkcg;
- lockdep_assert_held(q->queue_lock);
+ lockdep_assert_held(blkg->q->queue_lock);
lockdep_assert_held(&blkcg->lock);
/* Something wrong if we are trying to remove same group twice */
diff --git a/block/blk-core.c b/block/blk-core.c
index 3c923a7aeb56..93eb3e4f88ce 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -361,9 +361,10 @@ EXPORT_SYMBOL(blk_put_queue);
*/
void blk_drain_queue(struct request_queue *q, bool drain_all)
{
+ int i;
+
while (true) {
bool drain = false;
- int i;
spin_lock_irq(q->queue_lock);
@@ -408,6 +409,18 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
break;
msleep(10);
}
+
+ /*
+ * With queue marked dead, any woken up waiter will fail the
+ * allocation path, so the wakeup chaining is lost and we're
+ * left with hung waiters. We need to wake up those waiters.
+ */
+ if (q->request_fn) {
+ spin_lock_irq(q->queue_lock);
+ for (i = 0; i < ARRAY_SIZE(q->rq.wait); i++)
+ wake_up_all(&q->rq.wait[i]);
+ spin_unlock_irq(q->queue_lock);
+ }
}
/**
@@ -467,7 +480,6 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DEAD, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
-
spin_lock_irq(lock);
/*
@@ -485,10 +497,6 @@ void blk_cleanup_queue(struct request_queue *q)
queue_flag_set(QUEUE_FLAG_NOMERGES, q);
queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
queue_flag_set(QUEUE_FLAG_DEAD, q);
-
- if (q->queue_lock != &q->__queue_lock)
- q->queue_lock = &q->__queue_lock;
-
spin_unlock_irq(lock);
mutex_unlock(&q->sysfs_lock);
@@ -499,6 +507,11 @@ void blk_cleanup_queue(struct request_queue *q)
del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer);
blk_sync_queue(q);
+ spin_lock_irq(lock);
+ if (q->queue_lock != &q->__queue_lock)
+ q->queue_lock = &q->__queue_lock;
+ spin_unlock_irq(lock);
+
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
diff --git a/block/blk-exec.c b/block/blk-exec.c
index fb2cbd551621..8b6dc5bd4dd0 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -43,6 +43,9 @@ static void blk_end_sync_rq(struct request *rq, int error)
* Description:
* Insert a fully prepared request at the back of the I/O scheduler queue
* for execution. Don't wait for completion.
+ *
+ * Note:
+ * This function will invoke @done directly if the queue is dead.
*/
void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
struct request *rq, int at_head,
@@ -51,18 +54,20 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
WARN_ON(irqs_disabled());
+
+ rq->rq_disk = bd_disk;
+ rq->end_io = done;
+
spin_lock_irq(q->queue_lock);
if (unlikely(blk_queue_dead(q))) {
- spin_unlock_irq(q->queue_lock);
rq->errors = -ENXIO;
if (rq->end_io)
rq->end_io(rq, rq->errors);
+ spin_unlock_irq(q->queue_lock);
return;
}
- rq->rq_disk = bd_disk;
- rq->end_io = done;
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 780354888958..6e4744cbfb56 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -197,44 +197,3 @@ void blk_add_timer(struct request *req)
mod_timer(&q->timeout, expiry);
}
-/**
- * blk_abort_queue -- Abort all request on given queue
- * @queue: pointer to queue
- *
- */
-void blk_abort_queue(struct request_queue *q)
-{
- unsigned long flags;
- struct request *rq, *tmp;
- LIST_HEAD(list);
-
- /*
- * Not a request based block device, nothing to abort
- */
- if (!q->request_fn)
- return;
-
- spin_lock_irqsave(q->queue_lock, flags);
-
- elv_abort_queue(q);
-
- /*
- * Splice entries to local list, to avoid deadlocking if entries
- * get readded to the timeout list by error handling
- */
- list_splice_init(&q->timeout_list, &list);
-
- list_for_each_entry_safe(rq, tmp, &list, timeout_list)
- blk_abort_request(rq);
-
- /*
- * Occasionally, blk_abort_request() will return without
- * deleting the element from the list. Make sure we add those back
- * instead of leaving them on the local stack list.
- */
- list_splice(&list, &q->timeout_list);
-
- spin_unlock_irqrestore(q->queue_lock, flags);
-
-}
-EXPORT_SYMBOL_GPL(blk_abort_queue);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 673c977cc2bf..fb52df9744f5 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -17,8 +17,6 @@
#include "blk.h"
#include "blk-cgroup.h"
-static struct blkcg_policy blkcg_policy_cfq __maybe_unused;
-
/*
* tunables
*/
@@ -418,11 +416,6 @@ static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
return pd ? container_of(pd, struct cfq_group, pd) : NULL;
}
-static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
-{
- return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
-}
-
static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
{
return pd_to_blkg(&cfqg->pd);
@@ -572,6 +565,13 @@ static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
#ifdef CONFIG_CFQ_GROUP_IOSCHED
+static struct blkcg_policy blkcg_policy_cfq;
+
+static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
+{
+ return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
+}
+
static inline void cfqg_get(struct cfq_group *cfqg)
{
return blkg_get(cfqg_to_blkg(cfqg));
@@ -3951,10 +3951,11 @@ static void cfq_exit_queue(struct elevator_queue *e)
cfq_shutdown_timer_wq(cfqd);
-#ifndef CONFIG_CFQ_GROUP_IOSCHED
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ blkcg_deactivate_policy(q, &blkcg_policy_cfq);
+#else
kfree(cfqd->root_group);
#endif
- blkcg_deactivate_policy(q, &blkcg_policy_cfq);
kfree(cfqd);
}
@@ -4194,14 +4195,15 @@ static int __init cfq_init(void)
#ifdef CONFIG_CFQ_GROUP_IOSCHED
if (!cfq_group_idle)
cfq_group_idle = 1;
-#else
- cfq_group_idle = 0;
-#endif
ret = blkcg_policy_register(&blkcg_policy_cfq);
if (ret)
return ret;
+#else
+ cfq_group_idle = 0;
+#endif
+ ret = -ENOMEM;
cfq_pool = KMEM_CACHE(cfq_queue, 0);
if (!cfq_pool)
goto err_pol_unreg;
@@ -4215,13 +4217,17 @@ static int __init cfq_init(void)
err_free_pool:
kmem_cache_destroy(cfq_pool);
err_pol_unreg:
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
return ret;
}
static void __exit cfq_exit(void)
{
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
blkcg_policy_unregister(&blkcg_policy_cfq);
+#endif
elv_unregister(&iosched_cfq);
kmem_cache_destroy(cfq_pool);
}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 260fa80ef575..9a87daa6f4fb 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -721,11 +721,14 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
break;
}
+ if (capable(CAP_SYS_RAWIO))
+ return 0;
+
/* In particular, rule out all resets and host-specific ioctls. */
printk_ratelimited(KERN_WARNING
"%s: sending ioctl %x to a partition!\n", current->comm, cmd);
- return capable(CAP_SYS_RAWIO) ? 0 : -ENOIOCTLCMD;
+ return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 5a37eadb4e56..ba2c611154af 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -496,9 +496,12 @@ static void crypto_netlink_rcv(struct sk_buff *skb)
static int __init crypto_user_init(void)
{
+ struct netlink_kernel_cfg cfg = {
+ .input = crypto_netlink_rcv,
+ };
+
crypto_nlsk = netlink_kernel_create(&init_net, NETLINK_CRYPTO,
- 0, crypto_netlink_rcv,
- NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!crypto_nlsk)
return -ENOMEM;
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 6512b20aeccd..ff9f6bd48301 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,7 +61,6 @@ static int acpi_ac_open_fs(struct inode *inode, struct file *file);
static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device, int type);
-static int acpi_ac_resume(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id ac_device_ids[] = {
@@ -70,6 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, ac_device_ids);
+static int acpi_ac_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
+
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
.class = ACPI_AC_CLASS,
@@ -78,9 +80,9 @@ static struct acpi_driver acpi_ac_driver = {
.ops = {
.add = acpi_ac_add,
.remove = acpi_ac_remove,
- .resume = acpi_ac_resume,
.notify = acpi_ac_notify,
},
+ .drv.pm = &acpi_ac_pm,
};
struct acpi_ac {
@@ -309,13 +311,18 @@ static int acpi_ac_add(struct acpi_device *device)
return result;
}
-static int acpi_ac_resume(struct acpi_device *device)
+static int acpi_ac_resume(struct device *dev)
{
struct acpi_ac *ac;
unsigned old_state;
- if (!device || !acpi_driver_data(device))
+
+ if (!dev)
return -EINVAL;
- ac = acpi_driver_data(device);
+
+ ac = acpi_driver_data(to_acpi_device(dev));
+ if (!ac)
+ return -EINVAL;
+
old_state = ac->state;
if (acpi_ac_get_state(ac))
return 0;
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index a43fa1a57d57..1502c50273b5 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -36,6 +36,7 @@
#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
static DEFINE_MUTEX(isolated_cpus_lock);
+static DEFINE_MUTEX(round_robin_lock);
static unsigned long power_saving_mwait_eax;
@@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return;
- mutex_lock(&isolated_cpus_lock);
+ mutex_lock(&round_robin_lock);
cpumask_clear(tmp);
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
@@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index)
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
return;
}
for_each_cpu(cpu, tmp) {
@@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index)
tsk_in_cpu[tsk_index] = preferred_cpu;
cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
cpu_weight[preferred_cpu]++;
- mutex_unlock(&isolated_cpus_lock);
+ mutex_unlock(&round_robin_lock);
set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 0ed85cac3231..615996a36bed 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -95,18 +95,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags)
return_ACPI_STATUS(status);
}
- if (sleep_state != ACPI_STATE_S5) {
- /*
- * Disable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
- }
-
/*
* 1) Disable/Clear all GPEs
* 2) Enable all wakeup GPEs
@@ -364,16 +352,6 @@ acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags)
[ACPI_EVENT_POWER_BUTTON].
status_register_id, ACPI_CLEAR_STATUS);
- /*
- * Enable BM arbitration. This feature is contained within an
- * optional register (PM2 Control), so ignore a BAD_ADDRESS
- * exception.
- */
- status = acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
- if (ACPI_FAILURE(status) && (status != AE_BAD_ADDRESS)) {
- return_ACPI_STATUS(status);
- }
-
acpi_hw_execute_sleep_method(METHOD_PATHNAME__SST, ACPI_SST_WORKING);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 23ce09686418..fe6626035495 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -638,7 +638,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
/* Create the new outer package and populate it */
status =
- acpi_ns_wrap_with_package(data, *elements,
+ acpi_ns_wrap_with_package(data, return_object,
return_object_ptr);
if (ACPI_FAILURE(status)) {
return (status);
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 5577762daee1..6686b1eaf13e 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_os_map_generic_address(&entry->register_region);
+ return apei_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_os_unmap_generic_address(&entry->register_region);
+ apei_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
return 0;
}
+int apei_map_generic_address(struct acpi_generic_address *reg)
+{
+ int rc;
+ u32 access_bit_width;
+ u64 address;
+
+ rc = apei_check_gar(reg, &address, &access_bit_width);
+ if (rc)
+ return rc;
+ return acpi_os_map_generic_address(reg);
+}
+EXPORT_SYMBOL_GPL(apei_map_generic_address);
+
/* read GAR in interrupt (including NMI) or process context */
int apei_read(u64 *val, struct acpi_generic_address *reg)
{
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index cca240a33038..f220d642136e 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -7,6 +7,8 @@
#define APEI_INTERNAL_H
#include <linux/cper.h>
+#include <linux/acpi.h>
+#include <linux/acpi_io.h>
struct apei_exec_context;
@@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_map_generic_address(struct acpi_generic_address *reg);
+
+static inline void apei_unmap_generic_address(struct acpi_generic_address *reg)
+{
+ acpi_os_unmap_generic_address(reg);
+}
+
int apei_read(u64 *val, struct acpi_generic_address *reg);
int apei_write(u64 val, struct acpi_generic_address *reg);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 9b3cac0abecc..1599566ed1fe 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_os_map_generic_address(&generic->error_status_address);
+ rc = apei_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_os_unmap_generic_address(&generic->error_status_address);
+ apei_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -330,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
+ apei_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7dd3f9fb9f3f..023f9c8534d0 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -1044,17 +1044,24 @@ static int acpi_battery_remove(struct acpi_device *device, int type)
}
/* this is needed to learn about changes made in suspended state */
-static int acpi_battery_resume(struct acpi_device *device)
+static int acpi_battery_resume(struct device *dev)
{
struct acpi_battery *battery;
- if (!device)
+
+ if (!dev)
return -EINVAL;
- battery = acpi_driver_data(device);
+
+ battery = acpi_driver_data(to_acpi_device(dev));
+ if (!battery)
+ return -EINVAL;
+
battery->update_time = 0;
acpi_battery_update(battery);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
+
static struct acpi_driver acpi_battery_driver = {
.name = "battery",
.class = ACPI_BATTERY_CLASS,
@@ -1062,10 +1069,10 @@ static struct acpi_driver acpi_battery_driver = {
.flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
.ops = {
.add = acpi_battery_add,
- .resume = acpi_battery_resume,
.remove = acpi_battery_remove,
.notify = acpi_battery_notify,
},
+ .drv.pm = &acpi_battery_pm,
};
static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d27d072472f9..79d4c22f7a6d 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -76,19 +76,21 @@ MODULE_DEVICE_TABLE(acpi, button_device_ids);
static int acpi_button_add(struct acpi_device *device);
static int acpi_button_remove(struct acpi_device *device, int type);
-static int acpi_button_resume(struct acpi_device *device);
static void acpi_button_notify(struct acpi_device *device, u32 event);
+static int acpi_button_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
+
static struct acpi_driver acpi_button_driver = {
.name = "button",
.class = ACPI_BUTTON_CLASS,
.ids = button_device_ids,
.ops = {
.add = acpi_button_add,
- .resume = acpi_button_resume,
.remove = acpi_button_remove,
.notify = acpi_button_notify,
},
+ .drv.pm = &acpi_button_pm,
};
struct acpi_button {
@@ -308,8 +310,9 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
}
}
-static int acpi_button_resume(struct acpi_device *device)
+static int acpi_button_resume(struct device *dev)
{
+ struct acpi_device *device = to_acpi_device(dev);
struct acpi_button *button = acpi_driver_data(device);
if (button->type == ACPI_BUTTON_TYPE_LID)
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 0f0356ca1a9e..669d9ee80d16 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -46,8 +46,6 @@ MODULE_LICENSE("GPL");
static int acpi_fan_add(struct acpi_device *device);
static int acpi_fan_remove(struct acpi_device *device, int type);
-static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state);
-static int acpi_fan_resume(struct acpi_device *device);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
@@ -55,6 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
+static int acpi_fan_suspend(struct device *dev);
+static int acpi_fan_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
+
static struct acpi_driver acpi_fan_driver = {
.name = "fan",
.class = ACPI_FAN_CLASS,
@@ -62,9 +64,8 @@ static struct acpi_driver acpi_fan_driver = {
.ops = {
.add = acpi_fan_add,
.remove = acpi_fan_remove,
- .suspend = acpi_fan_suspend,
- .resume = acpi_fan_resume,
},
+ .drv.pm = &acpi_fan_pm,
};
/* thermal cooling device callbacks */
@@ -183,24 +184,24 @@ static int acpi_fan_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_fan_suspend(struct acpi_device *device, pm_message_t state)
+static int acpi_fan_suspend(struct device *dev)
{
- if (!device)
+ if (!dev)
return -EINVAL;
- acpi_bus_set_power(device->handle, ACPI_STATE_D0);
+ acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0);
return AE_OK;
}
-static int acpi_fan_resume(struct acpi_device *device)
+static int acpi_fan_resume(struct device *dev)
{
int result;
- if (!device)
+ if (!dev)
return -EINVAL;
- result = acpi_bus_update_power(device->handle, NULL);
+ result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL);
if (result)
printk(KERN_ERR PREFIX "Error updating fan power state\n");
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7aff6312ce7c..ec54014c321c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -505,6 +505,8 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root;
+ root->mcfg_addr = acpi_pci_root_get_mcfg_addr(device->handle);
+
/*
* All supported architectures that use ACPI have support for
* PCI domains, so we indicate this in _OSC support capabilities.
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index eb6408741a8f..215ecd097408 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -60,7 +60,6 @@ ACPI_MODULE_NAME("power");
static int acpi_power_add(struct acpi_device *device);
static int acpi_power_remove(struct acpi_device *device, int type);
-static int acpi_power_resume(struct acpi_device *device);
static const struct acpi_device_id power_device_ids[] = {
{ACPI_POWER_HID, 0},
@@ -68,6 +67,9 @@ static const struct acpi_device_id power_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, power_device_ids);
+static int acpi_power_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume);
+
static struct acpi_driver acpi_power_driver = {
.name = "power",
.class = ACPI_POWER_CLASS,
@@ -75,8 +77,8 @@ static struct acpi_driver acpi_power_driver = {
.ops = {
.add = acpi_power_add,
.remove = acpi_power_remove,
- .resume = acpi_power_resume,
},
+ .drv.pm = &acpi_power_pm,
};
/*
@@ -773,14 +775,16 @@ static int acpi_power_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_power_resume(struct acpi_device *device)
+static int acpi_power_resume(struct device *dev)
{
int result = 0, state;
+ struct acpi_device *device;
struct acpi_power_resource *resource;
- if (!device)
+ if (!dev)
return -EINVAL;
+ device = to_acpi_device(dev);
resource = acpi_driver_data(device);
if (!resource)
return -EINVAL;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c850de4c9a14..eff722278ff5 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -189,10 +189,12 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always return 0 for CPU0's handle.
+ * Ignores apic_id and always returns 0 for the processor
+ * handle with acpi id 0 if nr_cpu_ids is 1.
+ * This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
*/
- if (acpi_id == 0)
+ if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return apic_id;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0734086537b8..7048b97853e0 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -93,6 +93,9 @@ static const struct acpi_device_id processor_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, processor_device_ids);
+static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
+ acpi_processor_suspend, acpi_processor_resume);
+
static struct acpi_driver acpi_processor_driver = {
.name = "processor",
.class = ACPI_PROCESSOR_CLASS,
@@ -100,10 +103,9 @@ static struct acpi_driver acpi_processor_driver = {
.ops = {
.add = acpi_processor_add,
.remove = acpi_processor_remove,
- .suspend = acpi_processor_suspend,
- .resume = acpi_processor_resume,
.notify = acpi_processor_notify,
},
+ .drv.pm = &acpi_processor_pm,
};
#define INSTALL_NOTIFY_HANDLER 1
@@ -427,18 +429,11 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
* Initialize missing things
*/
if (pr->flags.need_hotplug_init) {
- struct cpuidle_driver *idle_driver =
- cpuidle_get_driver();
-
printk(KERN_INFO "Will online and init hotplugged "
"CPU: %d\n", pr->id);
WARN(acpi_processor_start(pr), "Failed to start CPU:"
" %d\n", pr->id);
pr->flags.need_hotplug_init = 0;
- if (idle_driver && !strcmp(idle_driver->name,
- "intel_idle")) {
- intel_idle_cpu_init(pr->id);
- }
/* Normal CPU soft online event */
} else {
acpi_processor_ppc_has_changed(pr, 0);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f3decb30223f..e589c1985248 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -221,9 +221,6 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
#endif
-/*
- * Suspend / resume control
- */
static u32 saved_bm_rld;
static void acpi_idle_bm_rld_save(void)
@@ -240,13 +237,13 @@ static void acpi_idle_bm_rld_restore(void)
acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
}
-int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
+int acpi_processor_suspend(struct device *dev)
{
acpi_idle_bm_rld_save();
return 0;
}
-int acpi_processor_resume(struct acpi_device * device)
+int acpi_processor_resume(struct device *dev)
{
acpi_idle_bm_rld_restore();
return 0;
@@ -586,7 +583,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
*/
cx->valid = 1;
- cx->latency_ticks = cx->latency;
/*
* On older chipsets, BM_RLD needs to be set
* in order for Bus Master activity to wake the
@@ -619,7 +615,6 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
if (!cx->address)
break;
cx->valid = 1;
- cx->latency_ticks = cx->latency; /* Normalize latency */
break;
case ACPI_STATE_C3:
@@ -754,6 +749,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
local_irq_disable();
+
lapic_timer_state_broadcast(pr, cx, 1);
kt1 = ktime_get_real();
acpi_idle_do_entry(cx);
@@ -764,7 +760,6 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
dev->last_residency = (int)idle_time;
local_irq_enable();
- cx->usage++;
lapic_timer_state_broadcast(pr, cx, 0);
return index;
@@ -823,6 +818,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -866,10 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
- cx->usage++;
-
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += idle_time;
return index;
}
@@ -909,12 +902,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
local_irq_disable();
acpi_safe_halt();
local_irq_enable();
- return -EINVAL;
+ return -EBUSY;
}
}
local_irq_disable();
+
if (cx->entry_method != ACPI_CSTATE_FFH) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -986,10 +980,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (cx->entry_method != ACPI_CSTATE_FFH)
current_thread_info()->status |= TS_POLLING;
- cx->usage++;
-
lapic_timer_state_broadcast(pr, cx, 0);
- cx->time += idle_time;
return index;
}
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index 6e36d0c0057c..c0b9aa5faf4c 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -988,16 +988,18 @@ static void acpi_sbs_rmdirs(void)
#endif
}
-static int acpi_sbs_resume(struct acpi_device *device)
+static int acpi_sbs_resume(struct device *dev)
{
struct acpi_sbs *sbs;
- if (!device)
+ if (!dev)
return -EINVAL;
- sbs = device->driver_data;
+ sbs = to_acpi_device(dev)->driver_data;
acpi_sbs_callback(sbs);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
+
static struct acpi_driver acpi_sbs_driver = {
.name = "sbs",
.class = ACPI_SBS_CLASS,
@@ -1005,8 +1007,8 @@ static struct acpi_driver acpi_sbs_driver = {
.ops = {
.add = acpi_sbs_add,
.remove = acpi_sbs_remove,
- .resume = acpi_sbs_resume,
},
+ .drv.pm = &acpi_sbs_pm,
};
static int __init acpi_sbs_init(void)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c8a1f3b68110..fdda49336560 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -290,26 +290,6 @@ static void acpi_device_release(struct device *dev)
kfree(acpi_dev);
}
-static int acpi_device_suspend(struct device *dev, pm_message_t state)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
-
- if (acpi_drv && acpi_drv->ops.suspend)
- return acpi_drv->ops.suspend(acpi_dev, state);
- return 0;
-}
-
-static int acpi_device_resume(struct device *dev)
-{
- struct acpi_device *acpi_dev = to_acpi_device(dev);
- struct acpi_driver *acpi_drv = acpi_dev->driver;
-
- if (acpi_drv && acpi_drv->ops.resume)
- return acpi_drv->ops.resume(acpi_dev);
- return 0;
-}
-
static int acpi_bus_match(struct device *dev, struct device_driver *drv)
{
struct acpi_device *acpi_dev = to_acpi_device(dev);
@@ -441,8 +421,6 @@ static int acpi_device_remove(struct device * dev)
struct bus_type acpi_bus_type = {
.name = "acpi",
- .suspend = acpi_device_suspend,
- .resume = acpi_device_resume,
.match = acpi_bus_match,
.probe = acpi_device_probe,
.remove = acpi_device_remove,
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 1784cb30e7cf..028dd425702c 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -716,8 +716,9 @@ int acpi_suspend(u32 acpi_state)
* @dev: device to examine; its driver model wakeup flags control
* whether it should be able to wake up the system
* @d_min_p: used to store the upper limit of allowed states range
- * Return value: preferred power state of the device on success, -ENODEV on
- * failure (ie. if there's no 'struct acpi_device' for @dev)
+ * @d_max_in: specify the lowest allowed states
+ * Return value: preferred power state of the device on success, -ENODEV
+ * (ie. if there's no 'struct acpi_device' for @dev) or -EINVAL on failure
*
* Find the lowest power (highest number) ACPI device power state that
* device @dev can be in while the system is in the sleep state represented
@@ -732,13 +733,15 @@ int acpi_suspend(u32 acpi_state)
* via @wake.
*/
-int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
+int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p, int d_max_in)
{
acpi_handle handle = DEVICE_ACPI_HANDLE(dev);
struct acpi_device *adev;
char acpi_method[] = "_SxD";
unsigned long long d_min, d_max;
+ if (d_max_in < ACPI_STATE_D0 || d_max_in > ACPI_STATE_D3)
+ return -EINVAL;
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &adev))) {
printk(KERN_DEBUG "ACPI handle has no context!\n");
return -ENODEV;
@@ -746,8 +749,10 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
acpi_method[2] = '0' + acpi_target_sleep_state;
/*
- * If the sleep state is S0, we will return D3, but if the device has
- * _S0W, we will use the value from _S0W
+ * If the sleep state is S0, the lowest limit from ACPI is D3,
+ * but if the device has _S0W, we will use the value from _S0W
+ * as the lowest limit from ACPI. Finally, we will constrain
+ * the lowest limit with the specified one.
*/
d_min = ACPI_STATE_D0;
d_max = ACPI_STATE_D3;
@@ -791,8 +796,17 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
}
}
+ if (d_max_in < d_min)
+ return -EINVAL;
if (d_min_p)
*d_min_p = d_min;
+ /* constrain d_max with specified lowest limit (max number) */
+ if (d_max > d_max_in) {
+ for (d_max = d_max_in; d_max > d_min; d_max--) {
+ if (adev->power.states[d_max].flags.valid)
+ break;
+ }
+ }
return d_max;
}
EXPORT_SYMBOL(acpi_pm_device_sleep_state);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 9f66181c814e..240a24400976 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
{
int result = 0;
- if (!strncmp(val, "enable", strlen("enable") - 1)) {
+ if (!strncmp(val, "enable", strlen("enable"))) {
result = acpi_debug_trace(trace_method_name, trace_debug_level,
trace_debug_layer, 0);
if (result)
@@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp)
goto exit;
}
- if (!strncmp(val, "disable", strlen("disable") - 1)) {
+ if (!strncmp(val, "disable", strlen("disable"))) {
int name = 0;
result = acpi_debug_trace((char *)&name, trace_debug_level,
trace_debug_layer, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 7dbebea1ec31..21dd4c268aef 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -98,7 +98,6 @@ MODULE_PARM_DESC(psv, "Disable or override all passive trip points.");
static int acpi_thermal_add(struct acpi_device *device);
static int acpi_thermal_remove(struct acpi_device *device, int type);
-static int acpi_thermal_resume(struct acpi_device *device);
static void acpi_thermal_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id thermal_device_ids[] = {
@@ -107,6 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
+static int acpi_thermal_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
+
static struct acpi_driver acpi_thermal_driver = {
.name = "thermal",
.class = ACPI_THERMAL_CLASS,
@@ -114,9 +116,9 @@ static struct acpi_driver acpi_thermal_driver = {
.ops = {
.add = acpi_thermal_add,
.remove = acpi_thermal_remove,
- .resume = acpi_thermal_resume,
.notify = acpi_thermal_notify,
},
+ .drv.pm = &acpi_thermal_pm,
};
struct acpi_thermal_state {
@@ -1041,16 +1043,17 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_thermal_resume(struct acpi_device *device)
+static int acpi_thermal_resume(struct device *dev)
{
- struct acpi_thermal *tz = NULL;
+ struct acpi_thermal *tz;
int i, j, power_state, result;
-
- if (!device || !acpi_driver_data(device))
+ if (!dev)
return -EINVAL;
- tz = acpi_driver_data(device);
+ tz = acpi_driver_data(to_acpi_device(dev));
+ if (!tz)
+ return -EINVAL;
for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) {
if (!(&tz->trips.active[i]))
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a576575617d7..1e0a9e17c31d 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
struct acpi_object_list args = { 1, &arg0 };
+ if (!video->cap._DOS)
+ return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
index aa0b1f160528..0b6f0b28a487 100644
--- a/drivers/amba/tegra-ahb.c
+++ b/drivers/amba/tegra-ahb.c
@@ -264,11 +264,6 @@ static int __devinit tegra_ahb_probe(struct platform_device *pdev)
return 0;
}
-static int __devexit tegra_ahb_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
{ .compatible = "nvidia,tegra30-ahb", },
{ .compatible = "nvidia,tegra20-ahb", },
@@ -277,7 +272,6 @@ static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
static struct platform_driver tegra_ahb_driver = {
.probe = tegra_ahb_probe,
- .remove = __devexit_p(tegra_ahb_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 95a656f33a23..fadd5866d40f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -80,6 +80,8 @@ const struct ata_port_operations ata_base_port_ops = {
.prereset = ata_std_prereset,
.postreset = ata_std_postreset,
.error_handler = ata_std_error_handler,
+ .sched_eh = ata_std_sched_eh,
+ .end_eh = ata_std_end_eh,
};
const struct ata_port_operations sata_port_ops = {
@@ -6644,6 +6646,8 @@ struct ata_port_operations ata_dummy_port_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
.error_handler = ata_dummy_error_handler,
+ .sched_eh = ata_std_sched_eh,
+ .end_eh = ata_std_end_eh,
};
const struct ata_port_info ata_dummy_port_info = {
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 938b6996c38f..7d4535e989bf 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -793,12 +793,12 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
ata_for_each_link(link, ap, HOST_FIRST)
memset(&link->eh_info, 0, sizeof(link->eh_info));
- /* Clear host_eh_scheduled while holding ap->lock such
- * that if exception occurs after this point but
- * before EH completion, SCSI midlayer will
+ /* end eh (clear host_eh_scheduled) while holding
+ * ap->lock such that if exception occurs after this
+ * point but before EH completion, SCSI midlayer will
* re-initiate EH.
*/
- host->host_eh_scheduled = 0;
+ ap->ops->end_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
ata_eh_release(ap);
@@ -986,16 +986,13 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
}
/**
- * ata_port_schedule_eh - schedule error handling without a qc
- * @ap: ATA port to schedule EH for
- *
- * Schedule error handling for @ap. EH will kick in as soon as
- * all commands are drained.
+ * ata_std_sched_eh - non-libsas ata_ports issue eh with this common routine
+ * @ap: ATA port to schedule EH for
*
- * LOCKING:
+ * LOCKING: inherited from ata_port_schedule_eh
* spin_lock_irqsave(host lock)
*/
-void ata_port_schedule_eh(struct ata_port *ap)
+void ata_std_sched_eh(struct ata_port *ap)
{
WARN_ON(!ap->ops->error_handler);
@@ -1007,6 +1004,44 @@ void ata_port_schedule_eh(struct ata_port *ap)
DPRINTK("port EH scheduled\n");
}
+EXPORT_SYMBOL_GPL(ata_std_sched_eh);
+
+/**
+ * ata_std_end_eh - non-libsas ata_ports complete eh with this common routine
+ * @ap: ATA port to end EH for
+ *
+ * In the libata object model there is a 1:1 mapping of ata_port to
+ * shost, so host fields can be directly manipulated under ap->lock, in
+ * the libsas case we need to hold a lock at the ha->level to coordinate
+ * these events.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_std_end_eh(struct ata_port *ap)
+{
+ struct Scsi_Host *host = ap->scsi_host;
+
+ host->host_eh_scheduled = 0;
+}
+EXPORT_SYMBOL(ata_std_end_eh);
+
+
+/**
+ * ata_port_schedule_eh - schedule error handling without a qc
+ * @ap: ATA port to schedule EH for
+ *
+ * Schedule error handling for @ap. EH will kick in as soon as
+ * all commands are drained.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_port_schedule_eh(struct ata_port *ap)
+{
+ /* see: ata_std_sched_eh, unless you know better */
+ ap->ops->sched_eh(ap);
+}
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 765c3a28077a..d91a3a0b2325 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -227,33 +227,24 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev)
static int dev_rmdir(const char *name)
{
- struct nameidata nd;
+ struct path parent;
struct dentry *dentry;
int err;
- err = kern_path_parent(name, &nd);
- if (err)
- return err;
-
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- if (dentry->d_inode->i_private == &thread)
- err = vfs_rmdir(nd.path.dentry->d_inode,
- dentry);
- else
- err = -EPERM;
- } else {
- err = -ENOENT;
- }
- dput(dentry);
+ dentry = kern_path_locked(name, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (dentry->d_inode) {
+ if (dentry->d_inode->i_private == &thread)
+ err = vfs_rmdir(parent.dentry->d_inode, dentry);
+ else
+ err = -EPERM;
} else {
- err = PTR_ERR(dentry);
+ err = -ENOENT;
}
-
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ dput(dentry);
+ mutex_unlock(&parent.dentry->d_inode->i_mutex);
+ path_put(&parent);
return err;
}
@@ -305,50 +296,43 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
static int handle_remove(const char *nodename, struct device *dev)
{
- struct nameidata nd;
+ struct path parent;
struct dentry *dentry;
- struct kstat stat;
int deleted = 1;
int err;
- err = kern_path_parent(nodename, &nd);
- if (err)
- return err;
+ dentry = kern_path_locked(nodename, &parent);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (!IS_ERR(dentry)) {
- if (dentry->d_inode) {
- err = vfs_getattr(nd.path.mnt, dentry, &stat);
- if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
- struct iattr newattrs;
- /*
- * before unlinking this node, reset permissions
- * of possible references like hardlinks
- */
- newattrs.ia_uid = 0;
- newattrs.ia_gid = 0;
- newattrs.ia_mode = stat.mode & ~0777;
- newattrs.ia_valid =
- ATTR_UID|ATTR_GID|ATTR_MODE;
- mutex_lock(&dentry->d_inode->i_mutex);
- notify_change(dentry, &newattrs);
- mutex_unlock(&dentry->d_inode->i_mutex);
- err = vfs_unlink(nd.path.dentry->d_inode,
- dentry);
- if (!err || err == -ENOENT)
- deleted = 1;
- }
- } else {
- err = -ENOENT;
+ if (dentry->d_inode) {
+ struct kstat stat;
+ err = vfs_getattr(parent.mnt, dentry, &stat);
+ if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
+ struct iattr newattrs;
+ /*
+ * before unlinking this node, reset permissions
+ * of possible references like hardlinks
+ */
+ newattrs.ia_uid = 0;
+ newattrs.ia_gid = 0;
+ newattrs.ia_mode = stat.mode & ~0777;
+ newattrs.ia_valid =
+ ATTR_UID|ATTR_GID|ATTR_MODE;
+ mutex_lock(&dentry->d_inode->i_mutex);
+ notify_change(dentry, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ err = vfs_unlink(parent.dentry->d_inode, dentry);
+ if (!err || err == -ENOENT)
+ deleted = 1;
}
- dput(dentry);
} else {
- err = PTR_ERR(dentry);
+ err = -ENOENT;
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ dput(dentry);
+ mutex_unlock(&parent.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ path_put(&parent);
if (deleted && strchr(nodename, '/'))
delete_path(nodename);
return err;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 83aa694a8efe..ba3487c9835b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -75,19 +75,6 @@ static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
start_latency_ns, "start");
}
-static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
-}
-
-static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
-}
-
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -139,6 +126,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
genpd->status = GPD_STATE_ACTIVE;
}
+static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
+{
+ s64 usecs64;
+
+ if (!genpd->cpu_data)
+ return;
+
+ usecs64 = genpd->power_on_latency_ns;
+ do_div(usecs64, NSEC_PER_USEC);
+ usecs64 += genpd->cpu_data->saved_exit_latency;
+ genpd->cpu_data->idle_state->exit_latency = usecs64;
+}
+
/**
* __pm_genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
@@ -146,7 +146,7 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
__releases(&genpd->lock) __acquires(&genpd->lock)
{
struct gpd_link *link;
@@ -176,6 +176,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
}
+ if (genpd->cpu_data) {
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = true;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the masters' .power_on() callbacks fiddles
@@ -215,6 +222,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
if (elapsed_ns > genpd->power_on_latency_ns) {
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
+ genpd_recalc_cpu_exit_latency(genpd);
if (genpd->name)
pr_warning("%s: Power-on latency exceeded, "
"new value %lld ns\n", genpd->name,
@@ -222,6 +230,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
}
+ out:
genpd_set_active(genpd);
return 0;
@@ -251,6 +260,19 @@ int pm_genpd_poweron(struct generic_pm_domain *genpd)
#ifdef CONFIG_PM_RUNTIME
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
unsigned long val, void *ptr)
{
@@ -275,7 +297,7 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
pdd = dev->power.subsys_data ?
dev->power.subsys_data->domain_data : NULL;
- if (pdd) {
+ if (pdd && pdd->dev) {
to_gpd_data(pdd)->td.constraint_changed = true;
genpd = dev_to_genpd(dev);
} else {
@@ -339,19 +361,16 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
+ bool need_restore = gpd_data->need_restore;
- if (!gpd_data->need_restore)
- return;
-
+ gpd_data->need_restore = false;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
- genpd_restore_dev(genpd, dev);
- genpd_stop_dev(genpd, dev);
+ if (need_restore)
+ genpd_restore_dev(genpd, dev);
mutex_lock(&genpd->lock);
-
- gpd_data->need_restore = false;
}
/**
@@ -458,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
}
+ if (genpd->cpu_data) {
+ /*
+ * If cpu_data is set, cpuidle should turn the domain off when
+ * the CPU in it is idle. In that case we don't decrement the
+ * subdomain counts of the master domains, so that power is not
+ * removed from the current domain prematurely as a result of
+ * cutting off the masters' power.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ cpuidle_pause_and_lock();
+ genpd->cpu_data->idle_state->disabled = false;
+ cpuidle_resume_and_unlock();
+ goto out;
+ }
+
if (genpd->power_off) {
ktime_t time_start;
s64 elapsed_ns;
@@ -595,7 +629,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
- goto out;
+ return genpd_start_dev(genpd, dev);
mutex_lock(&genpd->lock);
ret = __pm_genpd_poweron(genpd);
@@ -628,9 +662,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
wake_up_all(&genpd->status_wait_queue);
mutex_unlock(&genpd->lock);
- out:
- genpd_start_dev(genpd, dev);
-
return 0;
}
@@ -1235,6 +1266,27 @@ static void pm_genpd_complete(struct device *dev)
#endif /* CONFIG_PM_SLEEP */
+static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
+{
+ struct generic_pm_domain_data *gpd_data;
+
+ gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
+ if (!gpd_data)
+ return NULL;
+
+ mutex_init(&gpd_data->lock);
+ gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
+ dev_pm_qos_add_notifier(dev, &gpd_data->nb);
+ return gpd_data;
+}
+
+static void __pm_genpd_free_dev_data(struct device *dev,
+ struct generic_pm_domain_data *gpd_data)
+{
+ dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
+ kfree(gpd_data);
+}
+
/**
* __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
@@ -1244,7 +1296,7 @@ static void pm_genpd_complete(struct device *dev)
int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
struct gpd_timing_data *td)
{
- struct generic_pm_domain_data *gpd_data;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
struct pm_domain_data *pdd;
int ret = 0;
@@ -1253,14 +1305,10 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
return -EINVAL;
- gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
- if (!gpd_data)
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
return -ENOMEM;
- mutex_init(&gpd_data->lock);
- gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
- dev_pm_qos_add_notifier(dev, &gpd_data->nb);
-
genpd_acquire_lock(genpd);
if (genpd->prepared_count > 0) {
@@ -1274,35 +1322,42 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
goto out;
}
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
+
genpd->device_count++;
genpd->max_off_time_changed = true;
- dev_pm_get_subsys_data(dev);
-
- mutex_lock(&gpd_data->lock);
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = &genpd->domain;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- gpd_data->base.dev = dev;
- list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ } else {
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
+ }
+ gpd_data->refcount++;
if (td)
gpd_data->td = *td;
+ spin_unlock_irq(&dev->power.lock);
+
+ mutex_lock(&gpd_data->lock);
+ gpd_data->base.dev = dev;
+ list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
- spin_unlock_irq(&dev->power.lock);
mutex_unlock(&gpd_data->lock);
- genpd_release_lock(genpd);
-
- return 0;
-
out:
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
@@ -1348,6 +1403,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
+ bool remove = false;
int ret = 0;
dev_dbg(dev, "%s()\n", __func__);
@@ -1368,22 +1424,28 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
spin_lock_irq(&dev->power.lock);
+
dev->pm_domain = NULL;
pdd = dev->power.subsys_data->domain_data;
list_del_init(&pdd->list_node);
- dev->power.subsys_data->domain_data = NULL;
+ gpd_data = to_gpd_data(pdd);
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
+
spin_unlock_irq(&dev->power.lock);
- gpd_data = to_gpd_data(pdd);
mutex_lock(&gpd_data->lock);
pdd->dev = NULL;
mutex_unlock(&gpd_data->lock);
genpd_release_lock(genpd);
- dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
- kfree(gpd_data);
dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
return 0;
out:
@@ -1541,33 +1603,52 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
* @dev: Device to add the callbacks to.
* @ops: Set of callbacks to add.
* @td: Timing data to add to the device along with the callbacks (optional).
+ *
+ * Every call to this routine should be balanced with a call to
+ * __pm_genpd_remove_callbacks() and they must not be nested.
*/
int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
struct gpd_timing_data *td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
int ret = 0;
- if (!(dev && dev->power.subsys_data && ops))
+ if (!(dev && ops))
return -EINVAL;
+ gpd_data_new = __pm_genpd_alloc_dev_data(dev);
+ if (!gpd_data_new)
+ return -ENOMEM;
+
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ ret = dev_pm_get_subsys_data(dev);
+ if (ret)
+ goto out;
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
+ spin_lock_irq(&dev->power.lock);
+
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
} else {
- ret = -EINVAL;
+ gpd_data = gpd_data_new;
+ dev->power.subsys_data->domain_data = &gpd_data->base;
}
+ gpd_data->refcount++;
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+
+ spin_unlock_irq(&dev->power.lock);
+ out:
device_pm_unlock();
pm_runtime_enable(dev);
+ if (gpd_data != gpd_data_new)
+ __pm_genpd_free_dev_data(dev, gpd_data_new);
+
return ret;
}
EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
@@ -1576,10 +1657,13 @@ EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
* __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
* @dev: Device to remove the callbacks from.
* @clear_td: If set, clear the device's timing data too.
+ *
+ * This routine can only be called after pm_genpd_add_callbacks().
*/
int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
{
- struct pm_domain_data *pdd;
+ struct generic_pm_domain_data *gpd_data = NULL;
+ bool remove = false;
int ret = 0;
if (!(dev && dev->power.subsys_data))
@@ -1588,24 +1672,118 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
pm_runtime_disable(dev);
device_pm_lock();
- pdd = dev->power.subsys_data->domain_data;
- if (pdd) {
- struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+ spin_lock_irq(&dev->power.lock);
- gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (dev->power.subsys_data->domain_data) {
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ gpd_data->ops = (struct gpd_dev_ops){ NULL };
if (clear_td)
gpd_data->td = (struct gpd_timing_data){ 0 };
+
+ if (--gpd_data->refcount == 0) {
+ dev->power.subsys_data->domain_data = NULL;
+ remove = true;
+ }
} else {
ret = -EINVAL;
}
+ spin_unlock_irq(&dev->power.lock);
+
device_pm_unlock();
pm_runtime_enable(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ dev_pm_put_subsys_data(dev);
+ if (remove)
+ __pm_genpd_free_dev_data(dev, gpd_data);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
+{
+ struct cpuidle_driver *cpuidle_drv;
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || state < 0)
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->cpu_data) {
+ ret = -EEXIST;
+ goto out;
+ }
+ cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
+ if (!cpu_data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ cpuidle_drv = cpuidle_driver_ref();
+ if (!cpuidle_drv) {
+ ret = -ENODEV;
+ goto out;
+ }
+ if (cpuidle_drv->state_count <= state) {
+ ret = -EINVAL;
+ goto err;
+ }
+ idle_state = &cpuidle_drv->states[state];
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto err;
+ }
+ cpu_data->idle_state = idle_state;
+ cpu_data->saved_exit_latency = idle_state->exit_latency;
+ genpd->cpu_data = cpu_data;
+ genpd_recalc_cpu_exit_latency(genpd);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+
+ err:
+ cpuidle_driver_unref();
+ goto out;
+}
+
+int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+ struct gpd_cpu_data *cpu_data;
+ struct cpuidle_state *idle_state;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ cpu_data = genpd->cpu_data;
+ if (!cpu_data) {
+ ret = -ENODEV;
+ goto out;
+ }
+ idle_state = cpu_data->idle_state;
+ if (!idle_state->disabled) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ idle_state->exit_latency = cpu_data->saved_exit_latency;
+ cpuidle_driver_unref();
+ genpd->cpu_data = NULL;
+ kfree(cpu_data);
+
+ out:
+ genpd_release_lock(genpd);
+ return ret;
+}
+
/* Default device callbacks for generic PM domains. */
/**
@@ -1615,16 +1793,24 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
static int pm_genpd_default_save_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.save_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_suspend)
- return drv->pm->runtime_suspend(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_suspend;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_suspend;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_suspend;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_suspend;
+
+ return cb ? cb(dev) : 0;
}
/**
@@ -1634,16 +1820,24 @@ static int pm_genpd_default_save_state(struct device *dev)
static int pm_genpd_default_restore_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- struct device_driver *drv = dev->driver;
cb = dev_gpd_data(dev)->ops.restore_state;
if (cb)
return cb(dev);
- if (drv && drv->pm && drv->pm->runtime_resume)
- return drv->pm->runtime_resume(dev);
+ if (dev->type && dev->type->pm)
+ cb = dev->type->pm->runtime_resume;
+ else if (dev->class && dev->class->pm)
+ cb = dev->class->pm->runtime_resume;
+ else if (dev->bus && dev->bus->pm)
+ cb = dev->bus->pm->runtime_resume;
+ else
+ cb = NULL;
- return 0;
+ if (!cb && dev->driver && dev->driver->pm)
+ cb = dev->driver->pm->runtime_resume;
+
+ return cb ? cb(dev) : 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e0fb5b0435a3..0113adc310dc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,7 +28,7 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
-
+#include <linux/cpuidle.h>
#include "../base.h"
#include "power.h"
@@ -45,10 +45,10 @@ typedef int (*pm_callback_t)(struct device *);
*/
LIST_HEAD(dpm_list);
-LIST_HEAD(dpm_prepared_list);
-LIST_HEAD(dpm_suspended_list);
-LIST_HEAD(dpm_late_early_list);
-LIST_HEAD(dpm_noirq_list);
+static LIST_HEAD(dpm_prepared_list);
+static LIST_HEAD(dpm_suspended_list);
+static LIST_HEAD(dpm_late_early_list);
+static LIST_HEAD(dpm_noirq_list);
struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
@@ -166,7 +166,7 @@ static ktime_t initcall_debug_start(struct device *dev)
{
ktime_t calltime = ktime_set(0, 0);
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
pr_info("calling %s+ @ %i, parent: %s\n",
dev_name(dev), task_pid_nr(current),
dev->parent ? dev_name(dev->parent) : "none");
@@ -181,7 +181,7 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
{
ktime_t delta, rettime;
- if (initcall_debug) {
+ if (pm_print_times_enabled) {
rettime = ktime_get();
delta = ktime_sub(rettime, calltime);
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
@@ -467,6 +467,7 @@ static void dpm_resume_noirq(pm_message_t state)
mutex_unlock(&dpm_list_mtx);
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
+ cpuidle_resume();
}
/**
@@ -867,6 +868,7 @@ static int dpm_suspend_noirq(pm_message_t state)
ktime_t starttime = ktime_get();
int error = 0;
+ cpuidle_pause();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
while (!list_empty(&dpm_late_early_list)) {
@@ -989,8 +991,16 @@ static int dpm_suspend_late(pm_message_t state)
int dpm_suspend_end(pm_message_t state)
{
int error = dpm_suspend_late(state);
+ if (error)
+ return error;
+
+ error = dpm_suspend_noirq(state);
+ if (error) {
+ dpm_resume_early(state);
+ return error;
+ }
- return error ? : dpm_suspend_noirq(state);
+ return 0;
}
EXPORT_SYMBOL_GPL(dpm_suspend_end);
@@ -1031,7 +1041,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_children(dev, async);
if (async_error)
- return 0;
+ goto Complete;
pm_runtime_get_noresume(dev);
if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
@@ -1040,7 +1050,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (pm_wakeup_pending()) {
pm_runtime_put_sync(dev);
async_error = -EBUSY;
- return 0;
+ goto Complete;
}
device_lock(dev);
@@ -1097,6 +1107,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
}
device_unlock(dev);
+
+ Complete:
complete_all(&dev->power.completion);
if (error) {
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index fd849a2c4fa8..74a67e0019a2 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -462,7 +462,7 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
static void __dev_pm_qos_drop_user_request(struct device *dev)
{
dev_pm_qos_remove_request(dev->power.pq_req);
- dev->power.pq_req = 0;
+ dev->power.pq_req = NULL;
}
/**
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 48be2ad4dd2c..b91dc6f1e914 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -474,6 +474,8 @@ static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL);
#endif
+#ifdef CONFIG_PM_SLEEP
+
static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -500,6 +502,8 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(async, 0644, async_show, async_store);
+
+#endif
#endif /* CONFIG_PM_ADVANCED_DEBUG */
static struct attribute *power_attrs[] = {
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index b986b8660b0c..80f9ab9c3aa4 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -95,6 +95,9 @@ struct regmap {
/* if set, converts bulk rw to single rw */
bool use_single_rw;
+
+ struct rb_root range_tree;
+ void *selector_work_buf; /* Scratch buffer used for selector */
};
struct regcache_ops {
@@ -115,6 +118,20 @@ bool regmap_precious(struct regmap *map, unsigned int reg);
int _regmap_write(struct regmap *map, unsigned int reg,
unsigned int val);
+struct regmap_range_node {
+ struct rb_node node;
+
+ unsigned int range_min;
+ unsigned int range_max;
+
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ unsigned int window_start;
+ unsigned int window_len;
+};
+
#ifdef CONFIG_DEBUG_FS
extern void regmap_debugfs_initcall(void);
extern void regmap_debugfs_init(struct regmap *map, const char *name);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4fac4b9be88f..a89734621e51 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -24,14 +24,18 @@ struct regmap_irq_chip_data {
struct mutex lock;
struct regmap *map;
- struct regmap_irq_chip *chip;
+ const struct regmap_irq_chip *chip;
int irq_base;
struct irq_domain *domain;
+ int irq;
+ int wake_count;
+
unsigned int *status_buf;
unsigned int *mask_buf;
unsigned int *mask_buf_def;
+ unsigned int *wake_buf;
unsigned int irq_reg_stride;
};
@@ -71,6 +75,16 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
d->chip->mask_base + (i * map->reg_stride));
}
+ /* If we've changed our wakeup count propagate it to the parent */
+ if (d->wake_count < 0)
+ for (i = d->wake_count; i < 0; i++)
+ irq_set_irq_wake(d->irq, 0);
+ else if (d->wake_count > 0)
+ for (i = 0; i < d->wake_count; i++)
+ irq_set_irq_wake(d->irq, 1);
+
+ d->wake_count = 0;
+
mutex_unlock(&d->lock);
}
@@ -92,18 +106,41 @@ static void regmap_irq_disable(struct irq_data *data)
d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
}
+static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ struct regmap *map = d->map;
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
+
+ if (!d->chip->wake_base)
+ return -EINVAL;
+
+ if (on) {
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ &= ~irq_data->mask;
+ d->wake_count++;
+ } else {
+ d->wake_buf[irq_data->reg_offset / map->reg_stride]
+ |= irq_data->mask;
+ d->wake_count--;
+ }
+
+ return 0;
+}
+
static struct irq_chip regmap_irq_chip = {
.name = "regmap",
.irq_bus_lock = regmap_irq_lock,
.irq_bus_sync_unlock = regmap_irq_sync_unlock,
.irq_disable = regmap_irq_disable,
.irq_enable = regmap_irq_enable,
+ .irq_set_wake = regmap_irq_set_wake,
};
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
struct regmap_irq_chip_data *data = d;
- struct regmap_irq_chip *chip = data->chip;
+ const struct regmap_irq_chip *chip = data->chip;
struct regmap *map = data->map;
int ret, i;
bool handled = false;
@@ -195,7 +232,7 @@ static struct irq_domain_ops regmap_domain_ops = {
* register values used by the IRQ controller over suspend and resume.
*/
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, struct regmap_irq_chip *chip,
+ int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data)
{
struct regmap_irq_chip_data *d;
@@ -240,6 +277,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
if (!d->mask_buf_def)
goto err_alloc;
+ if (chip->wake_base) {
+ d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->wake_buf)
+ goto err_alloc;
+ }
+
+ d->irq = irq;
d->map = map;
d->chip = chip;
d->irq_base = irq_base;
@@ -294,6 +339,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
err_domain:
/* Should really dispose of the domain but... */
err_alloc:
+ kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
@@ -315,6 +361,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
free_irq(irq, d);
/* We should unmap the domain but... */
+ kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
kfree(d->status_buf);
@@ -346,6 +393,10 @@ EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
*/
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
+ /* Handle holes in the IRQ list */
+ if (!data->chip->irqs[irq].mask)
+ return -EINVAL;
+
return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index febd6de6c8ac..f05fc74dd84a 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -37,7 +37,7 @@ static int regmap_mmio_gather_write(void *context,
BUG_ON(reg_size != 4);
- offset = be32_to_cpup(reg);
+ offset = *(u32 *)reg;
while (val_size) {
switch (ctx->val_bytes) {
@@ -45,14 +45,14 @@ static int regmap_mmio_gather_write(void *context,
writeb(*(u8 *)val, ctx->regs + offset);
break;
case 2:
- writew(be16_to_cpup(val), ctx->regs + offset);
+ writew(*(u16 *)val, ctx->regs + offset);
break;
case 4:
- writel(be32_to_cpup(val), ctx->regs + offset);
+ writel(*(u32 *)val, ctx->regs + offset);
break;
#ifdef CONFIG_64BIT
case 8:
- writeq(be64_to_cpup(val), ctx->regs + offset);
+ writeq(*(u64 *)val, ctx->regs + offset);
break;
#endif
default:
@@ -83,7 +83,7 @@ static int regmap_mmio_read(void *context,
BUG_ON(reg_size != 4);
- offset = be32_to_cpup(reg);
+ offset = *(u32 *)reg;
while (val_size) {
switch (ctx->val_bytes) {
@@ -91,14 +91,14 @@ static int regmap_mmio_read(void *context,
*(u8 *)val = readb(ctx->regs + offset);
break;
case 2:
- *(u16 *)val = cpu_to_be16(readw(ctx->regs + offset));
+ *(u16 *)val = readw(ctx->regs + offset);
break;
case 4:
- *(u32 *)val = cpu_to_be32(readl(ctx->regs + offset));
+ *(u32 *)val = readl(ctx->regs + offset);
break;
#ifdef CONFIG_64BIT
case 8:
- *(u64 *)val = cpu_to_be32(readq(ctx->regs + offset));
+ *(u64 *)val = readq(ctx->regs + offset);
break;
#endif
default:
@@ -124,9 +124,11 @@ static struct regmap_bus regmap_mmio = {
.gather_write = regmap_mmio_gather_write,
.read = regmap_mmio_read,
.free_context = regmap_mmio_free_context,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
};
-struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
+static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
const struct regmap_config *config)
{
struct regmap_mmio_context *ctx;
@@ -162,7 +164,15 @@ struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
if (config->reg_stride < min_stride)
return ERR_PTR(-EINVAL);
- ctx = kzalloc(GFP_KERNEL, sizeof(*ctx));
+ switch (config->reg_format_endian) {
+ case REGMAP_ENDIAN_DEFAULT:
+ case REGMAP_ENDIAN_NATIVE:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c89aa01fb1de..c241ae2f2f10 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -15,12 +15,25 @@
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <linux/rbtree.h>
#define CREATE_TRACE_POINTS
#include <trace/events/regmap.h>
#include "internal.h"
+/*
+ * Sometimes for failures during very early init the trace
+ * infrastructure isn't available early enough to be used. For this
+ * sort of problem defining LOG_DEVICE will add printks for basic
+ * register I/O on a specific device.
+ */
+#undef LOG_DEVICE
+
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change);
+
bool regmap_writeable(struct regmap *map, unsigned int reg)
{
if (map->max_register && reg > map->max_register)
@@ -119,13 +132,19 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
b[0] = val << shift;
}
-static void regmap_format_16(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
__be16 *b = buf;
b[0] = cpu_to_be16(val << shift);
}
+static void regmap_format_16_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ *(u16 *)buf = val << shift;
+}
+
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
{
u8 *b = buf;
@@ -137,13 +156,19 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
b[2] = val;
}
-static void regmap_format_32(void *buf, unsigned int val, unsigned int shift)
+static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
__be32 *b = buf;
b[0] = cpu_to_be32(val << shift);
}
+static void regmap_format_32_native(void *buf, unsigned int val,
+ unsigned int shift)
+{
+ *(u32 *)buf = val << shift;
+}
+
static unsigned int regmap_parse_8(void *buf)
{
u8 *b = buf;
@@ -151,7 +176,7 @@ static unsigned int regmap_parse_8(void *buf)
return b[0];
}
-static unsigned int regmap_parse_16(void *buf)
+static unsigned int regmap_parse_16_be(void *buf)
{
__be16 *b = buf;
@@ -160,6 +185,11 @@ static unsigned int regmap_parse_16(void *buf)
return b[0];
}
+static unsigned int regmap_parse_16_native(void *buf)
+{
+ return *(u16 *)buf;
+}
+
static unsigned int regmap_parse_24(void *buf)
{
u8 *b = buf;
@@ -170,7 +200,7 @@ static unsigned int regmap_parse_24(void *buf)
return ret;
}
-static unsigned int regmap_parse_32(void *buf)
+static unsigned int regmap_parse_32_be(void *buf)
{
__be32 *b = buf;
@@ -179,6 +209,11 @@ static unsigned int regmap_parse_32(void *buf)
return b[0];
}
+static unsigned int regmap_parse_32_native(void *buf)
+{
+ return *(u32 *)buf;
+}
+
static void regmap_lock_mutex(struct regmap *map)
{
mutex_lock(&map->mutex);
@@ -208,6 +243,67 @@ static void dev_get_regmap_release(struct device *dev, void *res)
*/
}
+static bool _regmap_range_add(struct regmap *map,
+ struct regmap_range_node *data)
+{
+ struct rb_root *root = &map->range_tree;
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ while (*new) {
+ struct regmap_range_node *this =
+ container_of(*new, struct regmap_range_node, node);
+
+ parent = *new;
+ if (data->range_max < this->range_min)
+ new = &((*new)->rb_left);
+ else if (data->range_min > this->range_max)
+ new = &((*new)->rb_right);
+ else
+ return false;
+ }
+
+ rb_link_node(&data->node, parent, new);
+ rb_insert_color(&data->node, root);
+
+ return true;
+}
+
+static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
+ unsigned int reg)
+{
+ struct rb_node *node = map->range_tree.rb_node;
+
+ while (node) {
+ struct regmap_range_node *this =
+ container_of(node, struct regmap_range_node, node);
+
+ if (reg < this->range_min)
+ node = node->rb_left;
+ else if (reg > this->range_max)
+ node = node->rb_right;
+ else
+ return this;
+ }
+
+ return NULL;
+}
+
+static void regmap_range_exit(struct regmap *map)
+{
+ struct rb_node *next;
+ struct regmap_range_node *range_node;
+
+ next = rb_first(&map->range_tree);
+ while (next) {
+ range_node = rb_entry(next, struct regmap_range_node, node);
+ next = rb_next(&range_node->node);
+ rb_erase(&range_node->node, &map->range_tree);
+ kfree(range_node);
+ }
+
+ kfree(map->selector_work_buf);
+}
+
/**
* regmap_init(): Initialise register map
*
@@ -227,6 +323,8 @@ struct regmap *regmap_init(struct device *dev,
{
struct regmap *map, **m;
int ret = -EINVAL;
+ enum regmap_endian reg_endian, val_endian;
+ int i, j;
if (!bus || !config)
goto err;
@@ -275,6 +373,18 @@ struct regmap *regmap_init(struct device *dev,
map->read_flag_mask = bus->read_flag_mask;
}
+ reg_endian = config->reg_format_endian;
+ if (reg_endian == REGMAP_ENDIAN_DEFAULT)
+ reg_endian = bus->reg_format_endian_default;
+ if (reg_endian == REGMAP_ENDIAN_DEFAULT)
+ reg_endian = REGMAP_ENDIAN_BIG;
+
+ val_endian = config->val_format_endian;
+ if (val_endian == REGMAP_ENDIAN_DEFAULT)
+ val_endian = bus->val_format_endian_default;
+ if (val_endian == REGMAP_ENDIAN_DEFAULT)
+ val_endian = REGMAP_ENDIAN_BIG;
+
switch (config->reg_bits + map->reg_shift) {
case 2:
switch (config->val_bits) {
@@ -321,11 +431,29 @@ struct regmap *regmap_init(struct device *dev,
break;
case 16:
- map->format.format_reg = regmap_format_16;
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_16_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_16_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
case 32:
- map->format.format_reg = regmap_format_32;
+ switch (reg_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_reg = regmap_format_32_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_reg = regmap_format_32_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
default:
@@ -338,21 +466,47 @@ struct regmap *regmap_init(struct device *dev,
map->format.parse_val = regmap_parse_8;
break;
case 16:
- map->format.format_val = regmap_format_16;
- map->format.parse_val = regmap_parse_16;
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_16_be;
+ map->format.parse_val = regmap_parse_16_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_16_native;
+ map->format.parse_val = regmap_parse_16_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
case 24:
+ if (val_endian != REGMAP_ENDIAN_BIG)
+ goto err_map;
map->format.format_val = regmap_format_24;
map->format.parse_val = regmap_parse_24;
break;
case 32:
- map->format.format_val = regmap_format_32;
- map->format.parse_val = regmap_parse_32;
+ switch (val_endian) {
+ case REGMAP_ENDIAN_BIG:
+ map->format.format_val = regmap_format_32_be;
+ map->format.parse_val = regmap_parse_32_be;
+ break;
+ case REGMAP_ENDIAN_NATIVE:
+ map->format.format_val = regmap_format_32_native;
+ map->format.parse_val = regmap_parse_32_native;
+ break;
+ default:
+ goto err_map;
+ }
break;
}
- if (map->format.format_write)
+ if (map->format.format_write) {
+ if ((reg_endian != REGMAP_ENDIAN_BIG) ||
+ (val_endian != REGMAP_ENDIAN_BIG))
+ goto err_map;
map->use_single_rw = true;
+ }
if (!map->format.format_write &&
!(map->format.format_reg && map->format.format_val))
@@ -364,27 +518,88 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- regmap_debugfs_init(map, config->name);
+ map->range_tree = RB_ROOT;
+ for (i = 0; i < config->n_ranges; i++) {
+ const struct regmap_range_cfg *range_cfg = &config->ranges[i];
+ struct regmap_range_node *new;
+
+ /* Sanity check */
+ if (range_cfg->range_max < range_cfg->range_min ||
+ range_cfg->range_max > map->max_register ||
+ range_cfg->selector_reg > map->max_register ||
+ range_cfg->window_len == 0)
+ goto err_range;
+
+ /* Make sure, that this register range has no selector
+ or data window within its boundary */
+ for (j = 0; j < config->n_ranges; j++) {
+ unsigned sel_reg = config->ranges[j].selector_reg;
+ unsigned win_min = config->ranges[j].window_start;
+ unsigned win_max = win_min +
+ config->ranges[j].window_len - 1;
+
+ if (range_cfg->range_min <= sel_reg &&
+ sel_reg <= range_cfg->range_max) {
+ goto err_range;
+ }
+
+ if (!(win_max < range_cfg->range_min ||
+ win_min > range_cfg->range_max)) {
+ goto err_range;
+ }
+ }
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+
+ new->range_min = range_cfg->range_min;
+ new->range_max = range_cfg->range_max;
+ new->selector_reg = range_cfg->selector_reg;
+ new->selector_mask = range_cfg->selector_mask;
+ new->selector_shift = range_cfg->selector_shift;
+ new->window_start = range_cfg->window_start;
+ new->window_len = range_cfg->window_len;
+
+ if (_regmap_range_add(map, new) == false) {
+ kfree(new);
+ goto err_range;
+ }
+
+ if (map->selector_work_buf == NULL) {
+ map->selector_work_buf =
+ kzalloc(map->format.buf_size, GFP_KERNEL);
+ if (map->selector_work_buf == NULL) {
+ ret = -ENOMEM;
+ goto err_range;
+ }
+ }
+ }
ret = regcache_init(map, config);
if (ret < 0)
- goto err_debugfs;
+ goto err_range;
+
+ regmap_debugfs_init(map, config->name);
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
ret = -ENOMEM;
- goto err_cache;
+ goto err_debugfs;
}
*m = map;
devres_add(dev, m);
return map;
-err_cache:
- regcache_exit(map);
err_debugfs:
regmap_debugfs_exit(map);
+ regcache_exit(map);
+err_range:
+ regmap_range_exit(map);
kfree(map->work_buf);
err_map:
kfree(map);
@@ -481,6 +696,7 @@ void regmap_exit(struct regmap *map)
{
regcache_exit(map);
regmap_debugfs_exit(map);
+ regmap_range_exit(map);
if (map->bus->free_context)
map->bus->free_context(map->bus_context);
kfree(map->work_buf);
@@ -526,6 +742,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
}
EXPORT_SYMBOL_GPL(dev_get_regmap);
+static int _regmap_select_page(struct regmap *map, unsigned int *reg,
+ unsigned int val_num)
+{
+ struct regmap_range_node *range;
+ void *orig_work_buf;
+ unsigned int win_offset;
+ unsigned int win_page;
+ bool page_chg;
+ int ret;
+
+ range = _regmap_range_lookup(map, *reg);
+ if (range) {
+ win_offset = (*reg - range->range_min) % range->window_len;
+ win_page = (*reg - range->range_min) / range->window_len;
+
+ if (val_num > 1) {
+ /* Bulk write shouldn't cross range boundary */
+ if (*reg + val_num - 1 > range->range_max)
+ return -EINVAL;
+
+ /* ... or single page boundary */
+ if (val_num > range->window_len - win_offset)
+ return -EINVAL;
+ }
+
+ /* It is possible to have selector register inside data window.
+ In that case, selector register is located on every page and
+ it needs no page switching, when accessed alone. */
+ if (val_num > 1 ||
+ range->window_start + win_offset != range->selector_reg) {
+ /* Use separate work_buf during page switching */
+ orig_work_buf = map->work_buf;
+ map->work_buf = map->selector_work_buf;
+
+ ret = _regmap_update_bits(map, range->selector_reg,
+ range->selector_mask,
+ win_page << range->selector_shift,
+ &page_chg);
+
+ map->work_buf = orig_work_buf;
+
+ if (ret < 0)
+ return ret;
+ }
+
+ *reg = range->window_start + win_offset;
+ }
+
+ return 0;
+}
+
static int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
@@ -563,6 +830,10 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
}
}
+ ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
+ if (ret < 0)
+ return ret;
+
map->format.format_reg(map->work_buf, reg, map->reg_shift);
u8[0] |= map->write_flag_mask;
@@ -623,9 +894,18 @@ int _regmap_write(struct regmap *map, unsigned int reg,
}
}
+#ifdef LOG_DEVICE
+ if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x <= %x\n", reg, val);
+#endif
+
trace_regmap_reg_write(map->dev, reg, val);
if (map->format.format_write) {
+ ret = _regmap_select_page(map, &reg, 1);
+ if (ret < 0)
+ return ret;
+
map->format.format_write(map, reg, val);
trace_regmap_hw_write_start(map->dev, reg, 1);
@@ -783,6 +1063,10 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
u8 *u8 = map->work_buf;
int ret;
+ ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
+ if (ret < 0)
+ return ret;
+
map->format.format_reg(map->work_buf, reg, map->reg_shift);
/*
@@ -826,6 +1110,12 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
if (ret == 0) {
*val = map->format.parse_val(map->work_buf);
+
+#ifdef LOG_DEVICE
+ if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
+ dev_info(map->dev, "%x => %x\n", reg, *val);
+#endif
+
trace_regmap_reg_read(map->dev, reg, *val);
}
@@ -982,11 +1272,9 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
int ret;
unsigned int tmp, orig;
- map->lock(map);
-
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
- goto out;
+ return ret;
tmp = orig & ~mask;
tmp |= val & mask;
@@ -998,9 +1286,6 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
*change = false;
}
-out:
- map->unlock(map);
-
return ret;
}
@@ -1018,7 +1303,13 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val)
{
bool change;
- return _regmap_update_bits(map, reg, mask, val, &change);
+ int ret;
+
+ map->lock(map);
+ ret = _regmap_update_bits(map, reg, mask, val, &change);
+ map->unlock(map);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits);
@@ -1038,7 +1329,12 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
unsigned int mask, unsigned int val,
bool *change)
{
- return _regmap_update_bits(map, reg, mask, val, change);
+ int ret;
+
+ map->lock(map);
+ ret = _regmap_update_bits(map, reg, mask, val, change);
+ map->unlock(map);
+ return ret;
}
EXPORT_SYMBOL_GPL(regmap_update_bits_check);
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index fb7c80fb721e..06b3207adebd 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -46,6 +46,25 @@ config BCMA_DRIVER_MIPS
If unsure, say N
+config BCMA_SFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS && BROKEN
+ default y
+
+config BCMA_NFLASH
+ bool
+ depends on BCMA_DRIVER_MIPS && BROKEN
+ default y
+
+config BCMA_DRIVER_GMAC_CMN
+ bool "BCMA Broadcom GBIT MAC COMMON core driver"
+ depends on BCMA
+ help
+ Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
+ specific Advanced Microcontroller Bus.
+
+ If unsure, say N
+
config BCMA_DEBUG
bool "BCMA debugging"
depends on BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
index 82de24e5340c..8ad42d41b2f2 100644
--- a/drivers/bcma/Makefile
+++ b/drivers/bcma/Makefile
@@ -1,8 +1,11 @@
bcma-y += main.o scan.o core.o sprom.o
bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o
+bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o
+bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o
bcma-y += driver_pci.o
bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE) += driver_pci_host.o
bcma-$(CONFIG_BCMA_DRIVER_MIPS) += driver_mips.o
+bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN) += driver_gmac_cmn.o
bcma-$(CONFIG_BCMA_HOST_PCI) += host_pci.o
bcma-$(CONFIG_BCMA_HOST_SOC) += host_soc.o
obj-$(CONFIG_BCMA) += bcma.o
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index b81755bb4798..3cf9cc923cd2 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -10,6 +10,15 @@
#define BCMA_CORE_SIZE 0x1000
+#define bcma_err(bus, fmt, ...) \
+ pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_warn(bus, fmt, ...) \
+ pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_info(bus, fmt, ...) \
+ pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_debug(bus, fmt, ...) \
+ pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+
struct bcma_bus;
/* main.c */
@@ -42,6 +51,28 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc);
u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc);
+#ifdef CONFIG_BCMA_SFLASH
+/* driver_chipcommon_sflash.c */
+int bcma_sflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Serial flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_SFLASH */
+
+#ifdef CONFIG_BCMA_NFLASH
+/* driver_chipcommon_nflash.c */
+int bcma_nflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "NAND flash not supported\n");
+ return 0;
+}
+#endif /* CONFIG_BCMA_NFLASH */
+
#ifdef CONFIG_BCMA_HOST_PCI
/* host_pci.c */
extern int __init bcma_host_pci_init(void);
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index bc6e89212ad3..63c8b470536f 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -75,7 +75,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
udelay(10);
}
if (i)
- pr_err("HT force timeout\n");
+ bcma_err(core->bus, "HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
@@ -102,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
udelay(10);
}
if (i)
- pr_err("PLL enable timeout\n");
+ bcma_err(core->bus, "PLL enable timeout\n");
} else {
- pr_warn("Disabling PLL not supported yet!\n");
+ bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
@@ -120,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcma_device *core)
else
return BCMA_DMA_TRANSLATION_DMA32_CMT;
default:
- pr_err("DMA translation unknown for host %d\n",
- core->bus->hosttype);
+ bcma_err(core->bus, "DMA translation unknown for host %d\n",
+ core->bus->hosttype);
}
return BCMA_DMA_TRANSLATION_NONE;
}
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index e9f1b3fd252c..a4c3ebcc4c86 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
- pr_err("Power control not implemented!\n");
+ bcma_err(cc->core->bus, "Power control not implemented!\n");
if (cc->core->id.rev >= 16) {
if (cc->core->bus->sprom.leddc_on_time &&
@@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
| BCMA_CC_CORECTL_UARTCLKEN);
}
} else {
- pr_err("serial not supported on this device ccrev: 0x%x\n",
- ccrev);
+ bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
return;
}
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
new file mode 100644
index 000000000000..574d62435bc2
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -0,0 +1,19 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon NAND flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/delay.h>
+
+#include "bcma_private.h"
+
+/* Initialize NAND flash access */
+int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "NAND flash support is broken\n");
+ return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index 61ce4054b3c3..44326178db29 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -3,7 +3,8 @@
* ChipCommon Power Management Unit driver
*
* Copyright 2009, Michael Buesch <m@bues.ch>
- * Copyright 2007, Broadcom Corporation
+ * Copyright 2007, 2011, Broadcom Corporation
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@@ -54,39 +55,19 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
-static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
-{
- struct bcma_bus *bus = cc->core->bus;
-
- switch (bus->chipinfo.id) {
- case 0x4313:
- case 0x4331:
- case 43224:
- case 43225:
- break;
- default:
- pr_err("PLL init unknown for device 0x%04X\n",
- bus->chipinfo.id);
- }
-}
-
static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 min_msk = 0, max_msk = 0;
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
min_msk = 0x200D;
max_msk = 0xFFFF;
break;
- case 0x4331:
- case 43224:
- case 43225:
- break;
default:
- pr_err("PMU resource config unknown for device 0x%04X\n",
- bus->chipinfo.id);
+ bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
+ bus->chipinfo.id);
}
/* Set the resource masks. */
@@ -94,22 +75,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
-}
-
-void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
-{
- struct bcma_bus *bus = cc->core->bus;
- switch (bus->chipinfo.id) {
- case 0x4313:
- case 0x4331:
- case 43224:
- case 43225:
- break;
- default:
- pr_err("PMU switch/regulators init unknown for device "
- "0x%04X\n", bus->chipinfo.id);
- }
+ /* Add some delay; allow resources to come up and settle. */
+ mdelay(2);
}
/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
@@ -123,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
val |= BCMA_CHIPCTL_4331_EXTPA_EN;
if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+ else if (bus->chipinfo.rev > 0)
+ val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
} else {
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
+ val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
}
bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
@@ -135,28 +106,38 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4313:
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
+ case BCMA_CHIP_ID_BCM4313:
+ /* enable 12 mA drive strenth for 4313 and set chipControl
+ register bit 1 */
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_4313_12MA_LED_DRIVE,
+ BCMA_CCTRL_4313_12MA_LED_DRIVE);
break;
- case 0x4331:
- case 43431:
+ case BCMA_CHIP_ID_BCM4331:
+ case BCMA_CHIP_ID_BCM43431:
/* Ext PA lines must be enabled for tx on BCM4331 */
bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
break;
- case 43224:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43421:
+ /* enable 12 mA drive strenth for 43224 and set chipControl
+ register bit 15 */
if (bus->chipinfo.rev == 0) {
- pr_err("Workarounds for 43224 rev 0 not fully "
- "implemented\n");
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
+ bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
+ BCMA_CCTRL_43224_GPIO_TOGGLE,
+ BCMA_CCTRL_43224_GPIO_TOGGLE);
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
} else {
- bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
+ bcma_chipco_chipctl_maskset(cc, 0,
+ BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
+ BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
}
break;
- case 43225:
- break;
default:
- pr_err("Workarounds unknown for device 0x%04X\n",
- bus->chipinfo.id);
+ bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
+ bus->chipinfo.id);
}
}
@@ -167,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
- pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
- pmucap);
+ bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
+ cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
@@ -177,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
BCMA_CC_PMU_CTL_NOILPONW);
- if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
- pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
-
- bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
- bcma_pmu_swreg_init(cc);
bcma_pmu_workarounds(cc);
}
@@ -191,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4716:
- case 0x4748:
- case 47162:
- case 0x4313:
- case 0x5357:
- case 0x4749:
- case 53572:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
+ case BCMA_CHIP_ID_BCM4313:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
/* always 20Mhz */
return 20000 * 1000;
- case 0x5356:
- case 0x5300:
+ case BCMA_CHIP_ID_BCM5356:
+ case BCMA_CHIP_ID_BCM4706:
/* always 25Mhz */
return 25000 * 1000;
default:
- pr_warn("No ALP clock specified for %04X device, "
- "pmu rev. %d, using default %d Hz\n",
- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+ bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
}
return BCMA_CC_PMU_ALP_CLOCK;
}
@@ -224,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
BUG_ON(!m || m > 4);
- if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
/* Detect failure in clock setting */
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & 0x40000)
@@ -250,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
return (fc / div) * 1000000;
}
+static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+ u32 tmp, ndiv, p1div, p2div;
+ u32 clock;
+
+ BUG_ON(!m || m > 4);
+
+ /* Get N, P1 and P2 dividers to determine CPU clock */
+ tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
+ ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
+ p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
+ p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
+ >> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
+
+ tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+ if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
+ /* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
+ clock = (25000000 / 4) * ndiv * p2div / p1div;
+ else
+ /* Fixed reference clock 25MHz and m = 2 */
+ clock = (25000000 / 2) * ndiv * p2div / p1div;
+
+ if (m == BCMA_CC_PMU5_MAINPLL_SSB)
+ clock = clock / 4;
+
+ return clock;
+}
+
/* query bus clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
- case 0x4716:
- case 0x4748:
- case 47162:
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5356:
+ case BCMA_CHIP_ID_BCM5356:
return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5357:
- case 0x4749:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
- case 0x5300:
- return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
- BCMA_CC_PMU5_MAINPLL_SSB);
- case 53572:
+ case BCMA_CHIP_ID_BCM4706:
+ return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_SSB);
+ case BCMA_CHIP_ID_BCM53572:
return 75000000;
default:
- pr_warn("No backplane clock specified for %04X device, "
- "pmu rev. %d, using default %d Hz\n",
- bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+ bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+ bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
}
return BCMA_CC_PMU_HT_CLOCK;
}
@@ -286,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
- if (bus->chipinfo.id == 53572)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
return 300000000;
if (cc->pmu.rev >= 5) {
u32 pll;
switch (bus->chipinfo.id) {
- case 0x5356:
+ case BCMA_CHIP_ID_BCM4706:
+ return bcma_pmu_clock_bcm4706(cc,
+ BCMA_CC_PMU4706_MAINPLL_PLL0,
+ BCMA_CC_PMU5_MAINPLL_CPU);
+ case BCMA_CHIP_ID_BCM5356:
pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
break;
- case 0x5357:
- case 0x4749:
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
break;
default:
@@ -304,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
break;
}
- /* TODO: if (bus->chipinfo.id == 0x5300)
- return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
}
return bcma_pmu_get_clockcontrol(cc);
}
+
+static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
+ u32 value)
+{
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+}
+
+void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
+{
+ u32 tmp = 0;
+ u8 phypll_offset = 0;
+ u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
+ u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
+ struct bcma_bus *bus = cc->core->bus;
+
+ switch (bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ case BCMA_CHIP_ID_BCM4749:
+ case BCMA_CHIP_ID_BCM53572:
+ /* 5357[ab]0, 43236[ab]0, and 6362b0 */
+
+ /* BCM5357 needs to touch PLL1_PLLCTL[02],
+ so offset PLL0_PLLCTL[02] by 6 */
+ phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
+
+ /* RMW only the P1 divider */
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
+ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
+ tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+
+ /* RMW only the int feedback divider */
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+ BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
+ tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+ tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
+ tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+ bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM4331:
+ case BCMA_CHIP_ID_BCM43431:
+ if (spuravoid == 2) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0FC00a08);
+ } else if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600a08);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ }
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM43421:
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500010);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x000C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x2001E920);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100010);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x000c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+
+ case BCMA_CHIP_ID_BCM4716:
+ case BCMA_CHIP_ID_BCM4748:
+ case BCMA_CHIP_ID_BCM47162:
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11500060);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x080C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x0F600000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x2001E924);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100060);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x080c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+
+ tmp = 3 << 9;
+ break;
+
+ case BCMA_CHIP_ID_BCM43227:
+ case BCMA_CHIP_ID_BCM43228:
+ case BCMA_CHIP_ID_BCM43428:
+ /* LCNXN */
+ /* PLL Settings for spur avoidance on/off mode,
+ no on2 support for 43228A0 */
+ if (spuravoid == 1) {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x01100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x040C0C06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03140A08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00333333);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x202C2820);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ } else {
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+ 0x11100014);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+ 0x040c0c06);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+ 0x03000a08);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+ 0x00000000);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+ 0x200005c0);
+ bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+ 0x88888815);
+ }
+ tmp = 1 << 10;
+ break;
+ default:
+ bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+ bus->chipinfo.id);
+ break;
+ }
+
+ tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
+ bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+}
+EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
new file mode 100644
index 000000000000..6e157a58a1d7
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -0,0 +1,19 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon serial flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_driver_chipcommon.h>
+#include <linux/delay.h>
+
+#include "bcma_private.h"
+
+/* Initialize serial flash access */
+int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+ bcma_err(cc->core->bus, "Serial flash support is broken\n");
+ return 0;
+}
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
new file mode 100644
index 000000000000..834225f65e8f
--- /dev/null
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -0,0 +1,14 @@
+/*
+ * Broadcom specific AMBA
+ * GBIT MAC COMMON Core
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+
+void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+ mutex_init(&gc->phy_mutex);
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index c3e9dff4224e..b013b049476d 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -22,15 +22,15 @@
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
- return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 &&
- dev->id.id == BCMA_CORE_MIPS_74K;
+ return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
+ dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
}
/* The 5357b0 hangs when reading USB20H DMP registers */
static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
{
- return (dev->bus->chipinfo.id == 0x5357 ||
- dev->bus->chipinfo.id == 0x4749) &&
+ return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+ dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
dev->bus->chipinfo.pkg == 11 &&
dev->id.id == BCMA_CORE_USB20_HOST;
}
@@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
1 << irqflag);
}
- pr_info("set_irq: core 0x%04x, irq %d => %d\n",
- dev->id.id, oldirq + 2, irq + 2);
+ bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
+ dev->id.id, oldirq + 2, irq + 2);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
@@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
return bcma_pmu_get_clockcpu(&bus->drv_cc);
- pr_err("No PMU available, need this to get the cpu clock\n");
+ bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
return 0;
}
EXPORT_SYMBOL(bcma_cpu_clock);
@@ -185,10 +185,11 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
case BCMA_CC_FLASHT_ATSER:
- pr_err("Serial flash not supported.\n");
+ bcma_debug(bus, "Found serial flash\n");
+ bcma_sflash_init(&bus->drv_cc);
break;
case BCMA_CC_FLASHT_PARA:
- pr_info("found parallel flash.\n");
+ bcma_debug(bus, "Found parallel flash\n");
bus->drv_cc.pflash.window = 0x1c000000;
bus->drv_cc.pflash.window_size = 0x02000000;
@@ -199,7 +200,15 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
bus->drv_cc.pflash.buswidth = 2;
break;
default:
- pr_err("flash not supported.\n");
+ bcma_err(bus, "Flash type not supported\n");
+ }
+
+ if (bus->drv_cc.core->id.rev == 38 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+ if (bus->drv_cc.capabilities & BCMA_CC_CAP_NFLASH) {
+ bcma_debug(bus, "Found NAND flash\n");
+ bcma_nflash_init(&bus->drv_cc);
+ }
}
}
@@ -209,7 +218,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
struct bcma_device *core;
bus = mcore->core->bus;
- pr_info("Initializing MIPS core...\n");
+ bcma_info(bus, "Initializing MIPS core...\n");
if (!mcore->setup_done)
mcore->assigned_irqs = 1;
@@ -244,7 +253,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
break;
}
}
- pr_info("IRQ reconfiguration done\n");
+ bcma_info(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
if (mcore->setup_done)
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index b9a86edfec39..cbae2c231336 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -36,7 +36,7 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
return false;
if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
- pr_info("This PCI core is disabled and not working\n");
+ bcma_info(bus, "This PCI core is disabled and not working\n");
return false;
}
@@ -215,7 +215,8 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
} else {
writel(val, mmio);
- if (chipid == 0x4716 || chipid == 0x4748)
+ if (chipid == BCMA_CHIP_ID_BCM4716 ||
+ chipid == BCMA_CHIP_ID_BCM4748)
readl(mmio);
}
@@ -340,6 +341,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
*/
static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
+ struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
u16 val16;
int i;
@@ -378,7 +380,8 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
udelay(10);
}
if (val16 == 0x1)
- pr_err("PCI: Broken device in slot %d\n", dev);
+ bcma_err(bus, "PCI: Broken device in slot %d\n",
+ dev);
}
}
}
@@ -391,11 +394,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
u32 pci_membase_1G;
unsigned long io_map_base;
- pr_info("PCIEcore in host mode found\n");
+ bcma_info(bus, "PCIEcore in host mode found\n");
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
- pr_err("can not allocate memory");
+ bcma_err(bus, "can not allocate memory");
return;
}
@@ -434,13 +437,14 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
* as mips can't generate 64-bit address on the
* backplane.
*/
- if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) {
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
- } else if (bus->chipinfo.id == 0x5300) {
+ } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 6c05cf470f96..11b32d2642df 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
core->wrap);
core->bus->mapped_core = core;
- pr_debug("Switched to core: 0x%X\n", core->id.id);
+ bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}
/* Provides access to the requested core. Returns base offset that has to be
@@ -188,7 +188,7 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
if (!pci_is_pcie(dev))
- pr_err("PCI card detected, report problems.\n");
+ bcma_err(bus, "PCI card detected, report problems.\n");
/* Map MMIO */
err = -ENOMEM;
@@ -268,6 +268,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 7e138ec21357..758af9ccdef0 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -61,6 +61,13 @@ static struct bus_type bcma_bus_type = {
.dev_attrs = bcma_device_attrs,
};
+static u16 bcma_cc_core_id(struct bcma_bus *bus)
+{
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ return BCMA_CORE_4706_CHIPCOMMON;
+ return BCMA_CORE_CHIPCOMMON;
+}
+
struct bcma_device *bcma_find_core(struct bcma_bus *bus, u16 coreid)
{
struct bcma_device *core;
@@ -91,10 +98,12 @@ static int bcma_register_cores(struct bcma_bus *bus)
list_for_each_entry(core, &bus->cores, list) {
/* We support that cores ourself */
switch (core->id.id) {
+ case BCMA_CORE_4706_CHIPCOMMON:
case BCMA_CORE_CHIPCOMMON:
case BCMA_CORE_PCI:
case BCMA_CORE_PCIE:
case BCMA_CORE_MIPS_74K:
+ case BCMA_CORE_4706_MAC_GBIT_COMMON:
continue;
}
@@ -118,8 +127,9 @@ static int bcma_register_cores(struct bcma_bus *bus)
err = device_register(&core->dev);
if (err) {
- pr_err("Could not register dev for core 0x%03X\n",
- core->id.id);
+ bcma_err(bus,
+ "Could not register dev for core 0x%03X\n",
+ core->id.id);
continue;
}
core->dev_registered = true;
@@ -151,12 +161,12 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
- pr_err("Failed to scan: %d\n", err);
+ bcma_err(bus, "Failed to scan: %d\n", err);
return -1;
}
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
@@ -176,17 +186,24 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
bcma_core_pci_init(&bus->drv_pci);
}
+ /* Init GBIT MAC COMMON core */
+ core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
+ if (core) {
+ bus->drv_gmac_cmn.core = core;
+ bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
+ }
+
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
- pr_err("No SPROM available\n");
+ bcma_err(bus, "No SPROM available\n");
} else if (err)
- pr_err("Failed to get SPROM: %d\n", err);
+ bcma_err(bus, "Failed to get SPROM: %d\n", err);
/* Register found cores */
bcma_register_cores(bus);
- pr_info("Bus registered\n");
+ bcma_info(bus, "Bus registered\n");
return 0;
}
@@ -207,14 +224,14 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
bcma_init_bus(bus);
match.manuf = BCMA_MANUF_BCM;
- match.id = BCMA_CORE_CHIPCOMMON;
+ match.id = bcma_cc_core_id(bus);
match.class = BCMA_CL_SIM;
match.rev = BCMA_ANY_REV;
/* Scan for chip common core */
err = bcma_bus_scan_early(bus, &match, core_cc);
if (err) {
- pr_err("Failed to scan for common core: %d\n", err);
+ bcma_err(bus, "Failed to scan for common core: %d\n", err);
return -1;
}
@@ -226,12 +243,12 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
/* Scan for mips core */
err = bcma_bus_scan_early(bus, &match, core_mips);
if (err) {
- pr_err("Failed to scan for mips core: %d\n", err);
+ bcma_err(bus, "Failed to scan for mips core: %d\n", err);
return -1;
}
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ core = bcma_find_core(bus, bcma_cc_core_id(bus));
if (core) {
bus->drv_cc.core = core;
bcma_core_chipcommon_init(&bus->drv_cc);
@@ -244,7 +261,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
bcma_core_mips_init(&bus->drv_mips);
}
- pr_info("Early bus registered\n");
+ bcma_info(bus, "Early bus registered\n");
return 0;
}
@@ -270,8 +287,7 @@ int bcma_bus_resume(struct bcma_bus *bus)
struct bcma_device *core;
/* Init CC core */
- core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
- if (core) {
+ if (bus->drv_cc.core) {
bus->drv_cc.setup_done = false;
bcma_core_chipcommon_init(&bus->drv_cc);
}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 5ed0718fc660..5672b13d0951 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -21,6 +21,7 @@ struct bcma_device_id_name {
};
static const struct bcma_device_id_name bcma_arm_device_names[] = {
+ { BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
{ BCMA_CORE_ARM_1176, "ARM 1176" },
{ BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
{ BCMA_CORE_ARM_CM3, "ARM CM3" },
@@ -28,6 +29,11 @@ static const struct bcma_device_id_name bcma_arm_device_names[] = {
static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
+ { BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
+ { BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
+ { BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+ { BCMA_CORE_AMEMC, "AMEMC (DDR)" },
+ { BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" },
{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
{ BCMA_CORE_ILINE20, "ILine 20" },
@@ -289,11 +295,15 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
/* check if component is a core at all */
if (wrappers[0] + wrappers[1] == 0) {
- /* we could save addrl of the router
- if (cid == BCMA_CORE_OOB_ROUTER)
- */
- bcma_erom_skip_component(bus, eromptr);
- return -ENXIO;
+ /* Some specific cores don't need wrappers */
+ switch (core->id.id) {
+ case BCMA_CORE_4706_MAC_GBIT_COMMON:
+ /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
+ break;
+ default:
+ bcma_erom_skip_component(bus, eromptr);
+ return -ENXIO;
+ }
}
if (bcma_erom_is_bridge(bus, eromptr)) {
@@ -334,7 +344,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
if (tmp <= 0) {
return -EILSEQ;
} else {
- pr_info("Bridge found\n");
+ bcma_info(bus, "Bridge found\n");
return -ENXIO;
}
}
@@ -421,8 +431,8 @@ void bcma_init_bus(struct bcma_bus *bus)
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
- pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
- chipinfo->id, chipinfo->rev, chipinfo->pkg);
+ bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
+ chipinfo->id, chipinfo->rev, chipinfo->pkg);
bus->init_done = true;
}
@@ -476,13 +486,12 @@ int bcma_bus_scan(struct bcma_bus *bus)
other_core = bcma_find_core_reverse(bus, core->id.id);
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
- pr_info("Core %d found: %s "
- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- core->core_index, bcma_device_name(&core->id),
- core->id.manuf, core->id.id, core->id.rev,
- core->id.class);
+ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- list_add(&core->list, &bus->cores);
+ list_add_tail(&core->list, &bus->cores);
}
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
@@ -532,13 +541,12 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
core->core_index = core_num++;
bus->nr_cores++;
- pr_info("Core %d found: %s "
- "(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
- core->core_index, bcma_device_name(&core->id),
- core->id.manuf, core->id.id, core->id.rev,
- core->id.class);
+ bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+ core->core_index, bcma_device_name(&core->id),
+ core->id.manuf, core->id.id, core->id.rev,
+ core->id.class);
- list_add(&core->list, &bus->cores);
+ list_add_tail(&core->list, &bus->cores);
err = 0;
break;
}
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
index 113e6a66884c..30eb475e4d19 100644
--- a/drivers/bcma/scan.h
+++ b/drivers/bcma/scan.h
@@ -27,7 +27,7 @@
#define SCAN_CIB_NMW 0x0007C000
#define SCAN_CIB_NMW_SHIFT 14
#define SCAN_CIB_NSW 0x00F80000
-#define SCAN_CIB_NSW_SHIFT 17
+#define SCAN_CIB_NSW_SHIFT 19
#define SCAN_CIB_REV 0xFF000000
#define SCAN_CIB_REV_SHIFT 24
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index f16f42d36071..26823d97fd9f 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -60,11 +60,11 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
if (err)
goto fail;
- pr_debug("Using SPROM revision %d provided by"
- " platform.\n", bus->sprom.revision);
+ bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
+ bus->sprom.revision);
return 0;
fail:
- pr_warn("Using fallback SPROM failed (err %d)\n", err);
+ bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
return err;
}
@@ -468,11 +468,11 @@ static bool bcma_sprom_ext_available(struct bcma_bus *bus)
/* older chipcommon revisions use chip status register */
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
break;
- case 0x4331:
+ case BCMA_CHIP_ID_BCM4331:
present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
break;
@@ -494,16 +494,16 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
- case 0x4313:
+ case BCMA_CHIP_ID_BCM4313:
present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
break;
- case 0x4331:
+ case BCMA_CHIP_ID_BCM4331:
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
break;
- case 43224:
- case 43225:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
/* for these chips OTP is always available */
present = true;
break;
@@ -579,13 +579,15 @@ int bcma_sprom_get(struct bcma_bus *bus)
if (!sprom)
return -ENOMEM;
- if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
- pr_debug("SPROM offset 0x%x\n", offset);
+ bcma_debug(bus, "SPROM offset 0x%x\n", offset);
bcma_sprom_read(bus, offset, sprom);
- if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
+ if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+ bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b5c5ff53cb57..fcb956bb4b4c 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
first_word = 0;
spin_lock_irq(&b->bm_lock);
}
-
/* last page (respectively only page, for first page == last page) */
last_word = MLPP(el >> LN2_BPL);
- bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
+
+ /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples).
+ * ==> e = 32767, el = 32768, last_page = 2,
+ * and now last_word = 0.
+ * We do not want to touch last_page in this case,
+ * as we did not allocate it, it is not present in bitmap->bm_pages.
+ */
+ if (last_word)
+ bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
/* possibly trailing bits.
* example: (e & 63) == 63, el will be e+1.
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9c5c84946b05..8e93a6ac9bb6 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+ if (req->rq_state & RQ_LOCAL_ABORTED) {
+ _req_may_be_done(req, m);
+ break;
+ }
__drbd_chk_io_error(mdev, false);
goto_queue_for_net_read:
+ D_ASSERT(!(req->rq_state & RQ_NET_MASK));
+
/* no point in retrying if there is no good remote data,
* or we have no connection. */
if (mdev->state.pdsk != D_UP_TO_DATE) {
@@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s
return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr);
}
+static void maybe_pull_ahead(struct drbd_conf *mdev)
+{
+ int congested = 0;
+
+ /* If I don't even have good local storage, we can not reasonably try
+ * to pull ahead of the peer. We also need the local reference to make
+ * sure mdev->act_log is there.
+ * Note: caller has to make sure that net_conf is there.
+ */
+ if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
+ return;
+
+ if (mdev->net_conf->cong_fill &&
+ atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
+ dev_info(DEV, "Congestion-fill threshold reached\n");
+ congested = 1;
+ }
+
+ if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
+ dev_info(DEV, "Congestion-extents threshold reached\n");
+ congested = 1;
+ }
+
+ if (congested) {
+ queue_barrier(mdev); /* last barrier, after mirrored writes */
+
+ if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
+ _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
+ else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
+ _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
+ }
+ put_ldev(mdev);
+}
+
static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time)
{
const int rw = bio_rw(bio);
@@ -972,29 +1011,8 @@ allocate_barrier:
_req_mod(req, queue_for_send_oos);
if (remote &&
- mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) {
- int congested = 0;
-
- if (mdev->net_conf->cong_fill &&
- atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) {
- dev_info(DEV, "Congestion-fill threshold reached\n");
- congested = 1;
- }
-
- if (mdev->act_log->used >= mdev->net_conf->cong_extents) {
- dev_info(DEV, "Congestion-extents threshold reached\n");
- congested = 1;
- }
-
- if (congested) {
- queue_barrier(mdev); /* last barrier, after mirrored writes */
-
- if (mdev->net_conf->on_congestion == OC_PULL_AHEAD)
- _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL);
- else /*mdev->net_conf->on_congestion == OC_DISCONNECT */
- _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL);
- }
- }
+ mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96)
+ maybe_pull_ahead(mdev);
spin_unlock_irq(&mdev->req_lock);
kfree(b); /* if someone else has beaten us to it... */
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cce7df367b79..553f43a90953 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message)
if (drive == current_reqD)
drive = current_drive;
+ __cancel_delayed_work(&fd_timeout);
if (drive < 0 || drive >= N_DRIVE) {
delay = 20UL * HZ;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index bbca966f8f66..3bba65510d23 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1597,14 +1597,12 @@ static int loop_add(struct loop_device **l, int i)
struct gendisk *disk;
int err;
+ err = -ENOMEM;
lo = kzalloc(sizeof(*lo), GFP_KERNEL);
- if (!lo) {
- err = -ENOMEM;
+ if (!lo)
goto out;
- }
- err = idr_pre_get(&loop_index_idr, GFP_KERNEL);
- if (err < 0)
+ if (!idr_pre_get(&loop_index_idr, GFP_KERNEL))
goto out_free_dev;
if (i >= 0) {
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index 76fa3deaee84..1788f491e0fb 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -780,9 +780,9 @@ static const struct block_device_operations mg_disk_ops = {
.getgeo = mg_getgeo
};
-static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
+static int mg_suspend(struct device *dev)
{
- struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
+ struct mg_drv_data *prv_data = dev->platform_data;
struct mg_host *host = prv_data->host;
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -804,9 +804,9 @@ static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
return 0;
}
-static int mg_resume(struct platform_device *plat_dev)
+static int mg_resume(struct device *dev)
{
- struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
+ struct mg_drv_data *prv_data = dev->platform_data;
struct mg_host *host = prv_data->host;
if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
@@ -825,6 +825,8 @@ static int mg_resume(struct platform_device *plat_dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(mg_pm, mg_suspend, mg_resume);
+
static int mg_probe(struct platform_device *plat_dev)
{
struct mg_host *host;
@@ -1074,11 +1076,10 @@ static int mg_remove(struct platform_device *plat_dev)
static struct platform_driver mg_disk_driver = {
.probe = mg_probe,
.remove = mg_remove,
- .suspend = mg_suspend,
- .resume = mg_resume,
.driver = {
.name = MG_DEV_NAME,
.owner = THIS_MODULE,
+ .pm = &mg_pm,
}
};
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 264bc77dcb91..a8fddeb3d638 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -37,6 +37,7 @@
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
#include <linux/export.h>
+#include <linux/debugfs.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -85,6 +86,7 @@ static int instance;
* allocated in mtip_init().
*/
static int mtip_major;
+static struct dentry *dfs_parent;
static DEFINE_SPINLOCK(rssd_index_lock);
static DEFINE_IDA(rssd_index_ida);
@@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
}
/*
- * Sysfs register/status dump.
+ * Sysfs status dump.
*
* @dev Pointer to the device structure, passed by the kernrel.
* @attr Pointer to the device_attribute structure passed by the kernel.
@@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t mtip_hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_status(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u32 group_allocated;
struct driver_data *dd = dev_to_disk(dev)->private_data;
int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
+
+static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
+{
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ u32 group_allocated;
+ int size = *offset;
int n;
- size += sprintf(&buf[size], "Hardware\n--------\n");
- size += sprintf(&buf[size], "S ACTive : [ 0x");
+ if (!len || size)
+ return 0;
+
+ if (size < 0)
+ return -EINVAL;
+
+ size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->s_active[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Command Issue : [ 0x");
+ size += sprintf(&buf[size], "H/ Command Issue : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->cmd_issue[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Completed : [ 0x");
+ size += sprintf(&buf[size], "H/ Completed : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
size += sprintf(&buf[size], "%08X ",
readl(dd->port->completed[n]));
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
+ size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n",
readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
+ size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n",
readl(dd->mmio + HOST_IRQ_STAT));
size += sprintf(&buf[size], "\n");
- size += sprintf(&buf[size], "Local\n-----\n");
- size += sprintf(&buf[size], "Allocated : [ 0x");
+ size += sprintf(&buf[size], "L/ Allocated : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
@@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
}
size += sprintf(&buf[size], "]\n");
- size += sprintf(&buf[size], "Commands in Q: [ 0x");
+ size += sprintf(&buf[size], "L/ Commands in Q : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--) {
if (sizeof(long) > sizeof(u32))
@@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
}
size += sprintf(&buf[size], "]\n");
- return size;
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static ssize_t mtip_hw_show_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
+ size_t len, loff_t *offset)
{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ struct driver_data *dd = (struct driver_data *)f->private_data;
+ char buf[MTIP_DFS_MAX_BUF_SIZE];
+ int size = *offset;
- if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "thermal_shutdown\n");
- else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
- size += sprintf(buf, "%s", "write_protect\n");
- else
- size += sprintf(buf, "%s", "online\n");
-
- return size;
-}
+ if (!len || size)
+ return 0;
-static ssize_t mtip_hw_show_flags(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct driver_data *dd = dev_to_disk(dev)->private_data;
- int size = 0;
+ if (size < 0)
+ return -EINVAL;
- size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
+ size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
- size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
+ size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
dd->dd_flag);
- return size;
+ *offset = size <= len ? size : len;
+ size = copy_to_user(ubuf, buf, *offset);
+ if (size)
+ return -EFAULT;
+
+ return *offset;
}
-static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
-static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
+static const struct file_operations mtip_regs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_registers,
+ .llseek = no_llseek,
+};
+
+static const struct file_operations mtip_flags_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = mtip_hw_read_flags,
+ .llseek = no_llseek,
+};
/*
* Create the sysfs related attributes.
@@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- if (sysfs_create_file(kobj, &dev_attr_registers.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'registers' sysfs entry\n");
if (sysfs_create_file(kobj, &dev_attr_status.attr))
dev_warn(&dd->pdev->dev,
"Error creating 'status' sysfs entry\n");
- if (sysfs_create_file(kobj, &dev_attr_flags.attr))
- dev_warn(&dd->pdev->dev,
- "Error creating 'flags' sysfs entry\n");
return 0;
}
@@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
if (!kobj || !dd)
return -EINVAL;
- sysfs_remove_file(kobj, &dev_attr_registers.attr);
sysfs_remove_file(kobj, &dev_attr_status.attr);
- sysfs_remove_file(kobj, &dev_attr_flags.attr);
return 0;
}
+static int mtip_hw_debugfs_init(struct driver_data *dd)
+{
+ if (!dfs_parent)
+ return -1;
+
+ dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent);
+ if (IS_ERR_OR_NULL(dd->dfs_node)) {
+ dev_warn(&dd->pdev->dev,
+ "Error creating node %s under debugfs\n",
+ dd->disk->disk_name);
+ dd->dfs_node = NULL;
+ return -1;
+ }
+
+ debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd,
+ &mtip_flags_fops);
+ debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd,
+ &mtip_regs_fops);
+
+ return 0;
+}
+
+static void mtip_hw_debugfs_exit(struct driver_data *dd)
+{
+ debugfs_remove_recursive(dd->dfs_node);
+}
+
+
/*
* Perform any init/resume time hardware setup
*
@@ -3730,6 +3784,7 @@ skip_create_disk:
mtip_hw_sysfs_init(dd, kobj);
kobject_put(kobj);
}
+ mtip_hw_debugfs_init(dd);
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -3755,6 +3810,8 @@ start_service_thread:
return rv;
kthread_run_error:
+ mtip_hw_debugfs_exit(dd);
+
/* Delete our gendisk. This also removes the device from /dev */
del_gendisk(dd->disk);
@@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd)
kobject_put(kobj);
}
}
+ mtip_hw_debugfs_exit(dd);
/*
* Delete our gendisk structure. This also removes the device
@@ -4152,10 +4210,20 @@ static int __init mtip_init(void)
}
mtip_major = error;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ printk(KERN_WARNING "Error creating debugfs parent\n");
+ dfs_parent = NULL;
+ }
+ }
+
/* Register our PCI operations. */
error = pci_register_driver(&mtip_pci_driver);
- if (error)
+ if (error) {
+ debugfs_remove(dfs_parent);
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+ }
return error;
}
@@ -4172,6 +4240,8 @@ static int __init mtip_init(void)
*/
static void __exit mtip_exit(void)
{
+ debugfs_remove_recursive(dfs_parent);
+
/* Release the allocated major block device number. */
unregister_blkdev(mtip_major, MTIP_DRV_NAME);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b2c88da26b2a..f51fc23d17bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -26,7 +26,6 @@
#include <linux/ata.h>
#include <linux/interrupt.h>
#include <linux/genhd.h>
-#include <linux/version.h>
/* Offset of Subsystem Device ID in pci confoguration space */
#define PCI_SUBSYSTEM_DEVICEID 0x2E
@@ -111,6 +110,8 @@
#define dbg_printk(format, arg...)
#endif
+#define MTIP_DFS_MAX_BUF_SIZE 1024
+
#define __force_bit2int (unsigned int __force)
enum {
@@ -447,6 +448,8 @@ struct driver_data {
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+
+ struct dentry *dfs_node;
};
#endif
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 65665c9c42c6..8f428a8ab003 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -499,7 +499,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
/ sizeof (*ondisk))
return -EINVAL;
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
- snap_count * sizeof (*ondisk),
+ snap_count * sizeof(u64),
gfp_flags);
if (!header->snapc)
return -ENOMEM;
@@ -977,7 +977,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
op = (void *)(replyhead + 1);
rc = le32_to_cpu(replyhead->result);
bytes = le64_to_cpu(op->extent.length);
- read_op = (le32_to_cpu(op->op) == CEPH_OSD_OP_READ);
+ read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
dout("rbd_req_cb bytes=%lld readop=%d rc=%d\n", bytes, read_op, rc);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index aa2712060bfb..9a72277a31df 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -513,6 +513,44 @@ static void process_page(unsigned long data)
}
}
+struct mm_plug_cb {
+ struct blk_plug_cb cb;
+ struct cardinfo *card;
+};
+
+static void mm_unplug(struct blk_plug_cb *cb)
+{
+ struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb);
+
+ spin_lock_irq(&mmcb->card->lock);
+ activate(mmcb->card);
+ spin_unlock_irq(&mmcb->card->lock);
+ kfree(mmcb);
+}
+
+static int mm_check_plugged(struct cardinfo *card)
+{
+ struct blk_plug *plug = current->plug;
+ struct mm_plug_cb *mmcb;
+
+ if (!plug)
+ return 0;
+
+ list_for_each_entry(mmcb, &plug->cb_list, cb.list) {
+ if (mmcb->cb.callback == mm_unplug && mmcb->card == card)
+ return 1;
+ }
+ /* Not currently on the callback list */
+ mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC);
+ if (!mmcb)
+ return 0;
+
+ mmcb->card = card;
+ mmcb->cb.callback = mm_unplug;
+ list_add(&mmcb->cb.list, &plug->cb_list);
+ return 1;
+}
+
static void mm_make_request(struct request_queue *q, struct bio *bio)
{
struct cardinfo *card = q->queuedata;
@@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
+ if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card))
+ activate(card);
spin_unlock_irq(&card->lock);
return;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 773cf27dc23f..9ad3b5ec1dc1 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
break;
case BLKIF_OP_DISCARD:
dst->u.discard.flag = src->u.discard.flag;
+ dst->u.discard.id = src->u.discard.id;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 60eed4bdd2e4..e4fb3374dcd2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info)
return free;
}
-static void add_id_to_freelist(struct blkfront_info *info,
+static int add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
+ if (info->shadow[id].req.u.rw.id != id)
+ return -EINVAL;
+ if (info->shadow[id].request == NULL)
+ return -EINVAL;
info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
+ return 0;
}
+static const char *op_name(int op)
+{
+ static const char *const names[] = {
+ [BLKIF_OP_READ] = "read",
+ [BLKIF_OP_WRITE] = "write",
+ [BLKIF_OP_WRITE_BARRIER] = "barrier",
+ [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
+ [BLKIF_OP_DISCARD] = "discard" };
+
+ if (op < 0 || op >= ARRAY_SIZE(names))
+ return "unknown";
+
+ if (!names[op])
+ return "reserved";
+
+ return names[op];
+}
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
unsigned int end = minor + nr;
@@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
bret = RING_GET_RESPONSE(&info->ring, i);
id = bret->id;
+ /*
+ * The backend has messed up and given us an id that we would
+ * never have given to it (we stamp it up to BLK_RING_SIZE -
+ * look in get_id_from_freelist.
+ */
+ if (id >= BLK_RING_SIZE) {
+ WARN(1, "%s: response to %s has incorrect id (%ld)\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ /* We can't safely get the 'struct request' as
+ * the id is busted. */
+ continue;
+ }
req = info->shadow[id].request;
if (bret->operation != BLKIF_OP_DISCARD)
blkif_completion(&info->shadow[id]);
- add_id_to_freelist(info, id);
+ if (add_id_to_freelist(info, id)) {
+ WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
+ info->gd->disk_name, op_name(bret->operation), id);
+ continue;
+ }
error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
- printk(KERN_WARNING "blkfront: %s: discard op failed\n",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
@@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
- printk(KERN_WARNING "blkfront: %s: write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
- printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
- info->flush_op == BLKIF_OP_WRITE_BARRIER ?
- "barrier" : "flush disk cache",
- info->gd->disk_name);
+ printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
+ info->gd->disk_name, op_name(bret->operation));
error = -EOPNOTSUPP;
}
if (unlikely(error)) {
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 5ccf142ef0b8..e9f203eadb1f 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -81,6 +81,18 @@ config BT_HCIUART_LL
Say Y here to compile support for HCILL protocol.
+config BT_HCIUART_3WIRE
+ bool "Three-wire UART (H5) protocol support"
+ depends on BT_HCIUART
+ help
+ The HCI Three-wire UART Transport Layer makes it possible to
+ user the Bluetooth HCI over a serial port interface. The HCI
+ Three-wire UART Transport Layer assumes that the UART
+ communication may have bit errors, overrun errors or burst
+ errors and thereby making CTS/RTS lines unnecessary.
+
+ Say Y here to compile support for Three-wire UART protocol.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index f4460f4f4b78..4afae20df512 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -28,4 +28,5 @@ hci_uart-$(CONFIG_BT_HCIUART_H4) += hci_h4.o
hci_uart-$(CONFIG_BT_HCIUART_BCSP) += hci_bcsp.o
hci_uart-$(CONFIG_BT_HCIUART_LL) += hci_ll.o
hci_uart-$(CONFIG_BT_HCIUART_ATH3K) += hci_ath.o
+hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o
hci_uart-objs := $(hci_uart-y)
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 1fcd92380356..66c3a6770c41 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
- register unsigned int offset;
- register unsigned char command;
- register unsigned long ready_bit;
+ unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int offset;
+ unsigned char command;
+ unsigned long ready_bit;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -621,7 +621,6 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
bluecard_info_t *info = hci_get_drvdata(hdev);
- unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
@@ -630,6 +629,8 @@ static int bluecard_hci_open(struct hci_dev *hdev)
return 0;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Enable LED */
outb(0x08 | 0x20, iobase + 0x30);
}
@@ -641,7 +642,6 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
bluecard_info_t *info = hci_get_drvdata(hdev);
- unsigned int iobase = info->p_dev->resource[0]->start;
if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags)))
return 0;
@@ -649,6 +649,8 @@ static int bluecard_hci_close(struct hci_dev *hdev)
bluecard_hci_flush(hdev);
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
+ unsigned int iobase = info->p_dev->resource[0]->start;
+
/* Disable LED */
outb(0x00, iobase + 0x30);
}
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 609861a53c28..29caaed2d715 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
hdev->flush = bpa10x_flush;
hdev->send = bpa10x_send_frame;
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
err = hci_register_dev(hdev);
if (err < 0) {
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 308c8599ab55..8925b6d672a6 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info)
return;
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
if (!pcmcia_dev_present(info->p_dev))
break;
@@ -664,7 +664,7 @@ static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index dc304def8400..3a4343b3bd6d 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -47,10 +47,11 @@ EXPORT_SYMBOL_GPL(btmrvl_interrupt);
bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
- struct hci_ev_cmd_complete *ec;
- u16 opcode, ocf, ogf;
if (hdr->evt == HCI_EV_CMD_COMPLETE) {
+ struct hci_ev_cmd_complete *ec;
+ u16 opcode, ocf, ogf;
+
ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE);
opcode = __le16_to_cpu(ec->opcode);
ocf = hci_opcode_ocf(opcode);
@@ -64,7 +65,8 @@ bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb)
}
if (ogf == OGF) {
- BT_DBG("vendor event skipped: ogf 0x%4.4x", ogf);
+ BT_DBG("vendor event skipped: ogf 0x%4.4x ocf 0x%4.4x",
+ ogf, ocf);
kfree_skb(skb);
return false;
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 0cd61d9f07cd..6a9e9717d3ab 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -110,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8787 Bluetooth AMP device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
@@ -565,8 +568,9 @@ static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv)
if (type == HCI_EVENT_PKT) {
if (btmrvl_check_evtpkt(priv, skb))
hci_recv_frame(skb);
- } else
+ } else {
hci_recv_frame(skb);
+ }
hdev->stat.byte_rx += buf_len;
break;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index c4fc2f3fc32c..21e803a6a281 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -593,7 +593,7 @@ static int btuart_check_config(struct pcmcia_device *p_dev, void *priv_data)
{
int *try = priv_data;
- if (try == 0)
+ if (!try)
p_dev->io_lines = 16;
if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0))
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 83ebb241bfcc..e27221411036 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -21,15 +21,7 @@
*
*/
-#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/skbuff.h>
-
#include <linux/usb.h>
#include <net/bluetooth/bluetooth.h>
@@ -1028,7 +1020,7 @@ static int btusb_probe(struct usb_interface *intf,
data->isoc = usb_ifnum_to_if(data->udev, 1);
if (!reset)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
if (!disable_scofix)
@@ -1040,7 +1032,7 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_DIGIANSWER) {
data->cmdreq_type = USB_TYPE_VENDOR;
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_CSR) {
@@ -1048,7 +1040,7 @@ static int btusb_probe(struct usb_interface *intf,
/* Old firmware would otherwise execute USB reset */
if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
}
if (id->driver_info & BTUSB_SNIFFER) {
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 6e8d96189684..97a7784db4a2 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info)
}
do {
- register unsigned int iobase = info->p_dev->resource[0]->start;
+ unsigned int iobase = info->p_dev->resource[0]->start;
register struct sk_buff *skb;
- register int len;
+ int len;
clear_bit(XMIT_WAKEUP, &(info->tx_state));
@@ -586,29 +586,31 @@ static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data)
static int dtl1_config(struct pcmcia_device *link)
{
dtl1_info_t *info = link->priv;
- int i;
+ int ret;
/* Look for a generic full-sized window */
link->resource[0]->end = 8;
- if (pcmcia_loop_config(link, dtl1_confcheck, NULL) < 0)
+ ret = pcmcia_loop_config(link, dtl1_confcheck, NULL);
+ if (ret)
goto failed;
- i = pcmcia_request_irq(link, dtl1_interrupt);
- if (i != 0)
+ ret = pcmcia_request_irq(link, dtl1_interrupt);
+ if (ret)
goto failed;
- i = pcmcia_enable_device(link);
- if (i != 0)
+ ret = pcmcia_enable_device(link);
+ if (ret)
goto failed;
- if (dtl1_open(info) != 0)
+ ret = dtl1_open(info);
+ if (ret)
goto failed;
return 0;
failed:
dtl1_detach(link);
- return -ENODEV;
+ return ret;
}
static const struct pcmcia_device_id dtl1_ids[] = {
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 661a8dc4d2f8..57e502e06080 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp)
static int bcsp_recv(struct hci_uart *hu, void *data, int count)
{
struct bcsp_struct *bcsp = hu->priv;
- register unsigned char *ptr;
+ unsigned char *ptr;
BT_DBG("hu %p count %d rx_state %d rx_count %ld",
hu, count, bcsp->rx_state, bcsp->rx_count);
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 748329468d26..c60623f206d4 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int h4_check_data_len(struct h4_struct *h4, int len)
{
- register int room = skb_tailroom(h4->rx_skb);
+ int room = skb_tailroom(h4->rx_skb);
BT_DBG("len %d room %d", len, room);
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
new file mode 100644
index 000000000000..b6154d5a07a5
--- /dev/null
+++ b/drivers/bluetooth/hci_h5.c
@@ -0,0 +1,747 @@
+/*
+ *
+ * Bluetooth HCI Three-wire UART driver
+ *
+ * Copyright (C) 2012 Intel Corporation
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "hci_uart.h"
+
+#define HCI_3WIRE_ACK_PKT 0
+#define HCI_3WIRE_LINK_PKT 15
+
+/* Sliding window size */
+#define H5_TX_WIN_MAX 4
+
+#define H5_ACK_TIMEOUT msecs_to_jiffies(250)
+#define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
+
+/*
+ * Maximum Three-wire packet:
+ * 4 byte header + max value for 12-bit length + 2 bytes for CRC
+ */
+#define H5_MAX_LEN (4 + 0xfff + 2)
+
+/* Convenience macros for reading Three-wire header values */
+#define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
+#define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
+#define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
+#define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
+#define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
+#define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
+
+#define SLIP_DELIMITER 0xc0
+#define SLIP_ESC 0xdb
+#define SLIP_ESC_DELIM 0xdc
+#define SLIP_ESC_ESC 0xdd
+
+/* H5 state flags */
+enum {
+ H5_RX_ESC, /* SLIP escape mode */
+ H5_TX_ACK_REQ, /* Pending ack to send */
+};
+
+struct h5 {
+ struct sk_buff_head unack; /* Unack'ed packets queue */
+ struct sk_buff_head rel; /* Reliable packets queue */
+ struct sk_buff_head unrel; /* Unreliable packets queue */
+
+ unsigned long flags;
+
+ struct sk_buff *rx_skb; /* Receive buffer */
+ size_t rx_pending; /* Expecting more bytes */
+ u8 rx_ack; /* Last ack number received */
+
+ int (*rx_func) (struct hci_uart *hu, u8 c);
+
+ struct timer_list timer; /* Retransmission timer */
+
+ u8 tx_seq; /* Next seq number to send */
+ u8 tx_ack; /* Next ack number to send */
+ u8 tx_win; /* Sliding window size */
+
+ enum {
+ H5_UNINITIALIZED,
+ H5_INITIALIZED,
+ H5_ACTIVE,
+ } state;
+
+ enum {
+ H5_AWAKE,
+ H5_SLEEPING,
+ H5_WAKING_UP,
+ } sleep;
+};
+
+static void h5_reset_rx(struct h5 *h5);
+
+static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+
+ nskb = alloc_skb(3, GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
+
+ memcpy(skb_put(nskb, len), data, len);
+
+ skb_queue_tail(&h5->unrel, nskb);
+}
+
+static u8 h5_cfg_field(struct h5 *h5)
+{
+ u8 field = 0;
+
+ /* Sliding window size (first 3 bits) */
+ field |= (h5->tx_win & 7);
+
+ return field;
+}
+
+static void h5_timed_event(unsigned long arg)
+{
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ struct hci_uart *hu = (struct hci_uart *) arg;
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *skb;
+ unsigned long flags;
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (h5->state == H5_UNINITIALIZED)
+ h5_link_control(hu, sync_req, sizeof(sync_req));
+
+ if (h5->state == H5_INITIALIZED) {
+ conf_req[2] = h5_cfg_field(h5);
+ h5_link_control(hu, conf_req, sizeof(conf_req));
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+ goto wakeup;
+ }
+
+ if (h5->sleep != H5_AWAKE) {
+ h5->sleep = H5_SLEEPING;
+ goto wakeup;
+ }
+
+ BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
+ h5->tx_seq = (h5->tx_seq - 1) & 0x07;
+ skb_queue_head(&h5->rel, skb);
+ }
+
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+wakeup:
+ hci_uart_tx_wakeup(hu);
+}
+
+static int h5_open(struct hci_uart *hu)
+{
+ struct h5 *h5;
+ const unsigned char sync[] = { 0x01, 0x7e };
+
+ BT_DBG("hu %p", hu);
+
+ h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
+ if (!h5)
+ return -ENOMEM;
+
+ hu->priv = h5;
+
+ skb_queue_head_init(&h5->unack);
+ skb_queue_head_init(&h5->rel);
+ skb_queue_head_init(&h5->unrel);
+
+ h5_reset_rx(h5);
+
+ init_timer(&h5->timer);
+ h5->timer.function = h5_timed_event;
+ h5->timer.data = (unsigned long) hu;
+
+ h5->tx_win = H5_TX_WIN_MAX;
+
+ set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
+
+ /* Send initial sync request */
+ h5_link_control(hu, sync, sizeof(sync));
+ mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
+
+ return 0;
+}
+
+static int h5_close(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+
+ skb_queue_purge(&h5->unack);
+ skb_queue_purge(&h5->rel);
+ skb_queue_purge(&h5->unrel);
+
+ del_timer(&h5->timer);
+
+ kfree(h5);
+
+ return 0;
+}
+
+static void h5_pkt_cull(struct h5 *h5)
+{
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i, to_remove;
+ u8 seq;
+
+ spin_lock_irqsave(&h5->unack.lock, flags);
+
+ to_remove = skb_queue_len(&h5->unack);
+ if (to_remove == 0)
+ goto unlock;
+
+ seq = h5->tx_seq;
+
+ while (to_remove > 0) {
+ if (h5->rx_ack == seq)
+ break;
+
+ to_remove--;
+ seq = (seq - 1) % 8;
+ }
+
+ if (seq != h5->rx_ack)
+ BT_ERR("Controller acked invalid packet");
+
+ i = 0;
+ skb_queue_walk_safe(&h5->unack, skb, tmp) {
+ if (i++ >= to_remove)
+ break;
+
+ __skb_unlink(skb, &h5->unack);
+ kfree_skb(skb);
+ }
+
+ if (skb_queue_empty(&h5->unack))
+ del_timer(&h5->timer);
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+}
+
+static void h5_handle_internal_rx(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char sync_req[] = { 0x01, 0x7e };
+ const unsigned char sync_rsp[] = { 0x02, 0x7d };
+ unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
+ const unsigned char conf_rsp[] = { 0x04, 0x7b };
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+ const unsigned char woken_req[] = { 0x06, 0xf9 };
+ const unsigned char sleep_req[] = { 0x07, 0x78 };
+ const unsigned char *hdr = h5->rx_skb->data;
+ const unsigned char *data = &h5->rx_skb->data[4];
+
+ BT_DBG("%s", hu->hdev->name);
+
+ if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
+ return;
+
+ if (H5_HDR_LEN(hdr) < 2)
+ return;
+
+ conf_req[2] = h5_cfg_field(h5);
+
+ if (memcmp(data, sync_req, 2) == 0) {
+ h5_link_control(hu, sync_rsp, 2);
+ } else if (memcmp(data, sync_rsp, 2) == 0) {
+ h5->state = H5_INITIALIZED;
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_req, 2) == 0) {
+ h5_link_control(hu, conf_rsp, 2);
+ h5_link_control(hu, conf_req, 3);
+ } else if (memcmp(data, conf_rsp, 2) == 0) {
+ if (H5_HDR_LEN(hdr) > 2)
+ h5->tx_win = (data[2] & 7);
+ BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
+ h5->state = H5_ACTIVE;
+ hci_uart_init_ready(hu);
+ return;
+ } else if (memcmp(data, sleep_req, 2) == 0) {
+ BT_DBG("Peer went to sleep");
+ h5->sleep = H5_SLEEPING;
+ return;
+ } else if (memcmp(data, woken_req, 2) == 0) {
+ BT_DBG("Peer woke up");
+ h5->sleep = H5_AWAKE;
+ } else if (memcmp(data, wakeup_req, 2) == 0) {
+ BT_DBG("Peer requested wakeup");
+ h5_link_control(hu, woken_req, 2);
+ h5->sleep = H5_AWAKE;
+ } else {
+ BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
+ return;
+ }
+
+ hci_uart_tx_wakeup(hu);
+}
+
+static void h5_complete_rx_pkt(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_RELIABLE(hdr)) {
+ h5->tx_ack = (h5->tx_ack + 1) % 8;
+ set_bit(H5_TX_ACK_REQ, &h5->flags);
+ hci_uart_tx_wakeup(hu);
+ }
+
+ h5->rx_ack = H5_HDR_ACK(hdr);
+
+ h5_pkt_cull(h5);
+
+ switch (H5_HDR_PKT_TYPE(hdr)) {
+ case HCI_EVENT_PKT:
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
+
+ /* Remove Three-wire header */
+ skb_pull(h5->rx_skb, 4);
+
+ hci_recv_frame(h5->rx_skb);
+ h5->rx_skb = NULL;
+
+ break;
+
+ default:
+ h5_handle_internal_rx(hu);
+ break;
+ }
+
+ h5_reset_rx(h5);
+}
+
+static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+
+ return 0;
+}
+
+static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ if (H5_HDR_CRC(hdr)) {
+ h5->rx_func = h5_rx_crc;
+ h5->rx_pending = 2;
+ } else {
+ h5_complete_rx_pkt(hu);
+ h5_reset_rx(h5);
+ }
+
+ return 0;
+}
+
+static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+ const unsigned char *hdr = h5->rx_skb->data;
+
+ BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
+ BT_ERR("Invalid header checksum");
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
+ BT_ERR("Out-of-order packet arrived (%u != %u)",
+ H5_HDR_SEQ(hdr), h5->tx_ack);
+ h5_reset_rx(h5);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE &&
+ H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
+ BT_ERR("Non-link packet received in non-active state");
+ h5_reset_rx(h5);
+ }
+
+ h5->rx_func = h5_rx_payload;
+ h5->rx_pending = H5_HDR_LEN(hdr);
+
+ return 0;
+}
+
+static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ return 1;
+
+ h5->rx_func = h5_rx_3wire_hdr;
+ h5->rx_pending = 4;
+
+ h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
+ if (!h5->rx_skb) {
+ BT_ERR("Can't allocate mem for new packet");
+ h5_reset_rx(h5);
+ return -ENOMEM;
+ }
+
+ h5->rx_skb->dev = (void *) hu->hdev;
+
+ return 0;
+}
+
+static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (c == SLIP_DELIMITER)
+ h5->rx_func = h5_rx_pkt_start;
+
+ return 1;
+}
+
+static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
+{
+ const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
+ const u8 *byte = &c;
+
+ if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
+ set_bit(H5_RX_ESC, &h5->flags);
+ return;
+ }
+
+ if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
+ switch (c) {
+ case SLIP_ESC_DELIM:
+ byte = &delim;
+ break;
+ case SLIP_ESC_ESC:
+ byte = &esc;
+ break;
+ default:
+ BT_ERR("Invalid esc byte 0x%02hhx", c);
+ h5_reset_rx(h5);
+ return;
+ }
+ }
+
+ memcpy(skb_put(h5->rx_skb, 1), byte, 1);
+ h5->rx_pending--;
+
+ BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
+}
+
+static void h5_reset_rx(struct h5 *h5)
+{
+ if (h5->rx_skb) {
+ kfree_skb(h5->rx_skb);
+ h5->rx_skb = NULL;
+ }
+
+ h5->rx_func = h5_rx_delimiter;
+ h5->rx_pending = 0;
+ clear_bit(H5_RX_ESC, &h5->flags);
+}
+
+static int h5_recv(struct hci_uart *hu, void *data, int count)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned char *ptr = data;
+
+ BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
+ count);
+
+ while (count > 0) {
+ int processed;
+
+ if (h5->rx_pending > 0) {
+ if (*ptr == SLIP_DELIMITER) {
+ BT_ERR("Too short H5 packet");
+ h5_reset_rx(h5);
+ continue;
+ }
+
+ h5_unslip_one_byte(h5, *ptr);
+
+ ptr++; count--;
+ continue;
+ }
+
+ processed = h5->rx_func(hu, *ptr);
+ if (processed < 0)
+ return processed;
+
+ ptr += processed;
+ count -= processed;
+ }
+
+ return 0;
+}
+
+static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
+{
+ struct h5 *h5 = hu->priv;
+
+ if (skb->len > 0xfff) {
+ BT_ERR("Packet too long (%u bytes)", skb->len);
+ kfree_skb(skb);
+ return 0;
+ }
+
+ if (h5->state != H5_ACTIVE) {
+ BT_ERR("Ignoring HCI data in non-active state");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ skb_queue_tail(&h5->rel, skb);
+ break;
+
+ case HCI_SCODATA_PKT:
+ skb_queue_tail(&h5->unrel, skb);
+ break;
+
+ default:
+ BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
+ kfree_skb(skb);
+ break;
+ }
+
+ return 0;
+}
+
+static void h5_slip_delim(struct sk_buff *skb)
+{
+ const char delim = SLIP_DELIMITER;
+
+ memcpy(skb_put(skb, 1), &delim, 1);
+}
+
+static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
+{
+ const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
+ const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
+
+ switch (c) {
+ case SLIP_DELIMITER:
+ memcpy(skb_put(skb, 2), &esc_delim, 2);
+ break;
+ case SLIP_ESC:
+ memcpy(skb_put(skb, 2), &esc_esc, 2);
+ break;
+ default:
+ memcpy(skb_put(skb, 1), &c, 1);
+ }
+}
+
+static bool valid_packet_type(u8 type)
+{
+ switch (type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_COMMAND_PKT:
+ case HCI_SCODATA_PKT:
+ case HCI_3WIRE_LINK_PKT:
+ case HCI_3WIRE_ACK_PKT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
+ const u8 *data, size_t len)
+{
+ struct h5 *h5 = hu->priv;
+ struct sk_buff *nskb;
+ u8 hdr[4];
+ int i;
+
+ if (!valid_packet_type(pkt_type)) {
+ BT_ERR("Unknown packet type %u", pkt_type);
+ return NULL;
+ }
+
+ /*
+ * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
+ * (because bytes 0xc0 and 0xdb are escaped, worst case is when
+ * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
+ * delimiters at start and end).
+ */
+ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
+ if (!nskb)
+ return NULL;
+
+ bt_cb(nskb)->pkt_type = pkt_type;
+
+ h5_slip_delim(nskb);
+
+ hdr[0] = h5->tx_ack << 3;
+ clear_bit(H5_TX_ACK_REQ, &h5->flags);
+
+ /* Reliable packet? */
+ if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
+ hdr[0] |= 1 << 7;
+ hdr[0] |= h5->tx_seq;
+ h5->tx_seq = (h5->tx_seq + 1) % 8;
+ }
+
+ hdr[1] = pkt_type | ((len & 0x0f) << 4);
+ hdr[2] = len >> 4;
+ hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
+
+ BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
+ hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
+ H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
+ H5_HDR_LEN(hdr));
+
+ for (i = 0; i < 4; i++)
+ h5_slip_one_byte(nskb, hdr[i]);
+
+ for (i = 0; i < len; i++)
+ h5_slip_one_byte(nskb, data[i]);
+
+ h5_slip_delim(nskb);
+
+ return nskb;
+}
+
+static struct sk_buff *h5_dequeue(struct hci_uart *hu)
+{
+ struct h5 *h5 = hu->priv;
+ unsigned long flags;
+ struct sk_buff *skb, *nskb;
+
+ if (h5->sleep != H5_AWAKE) {
+ const unsigned char wakeup_req[] = { 0x05, 0xfa };
+
+ if (h5->sleep == H5_WAKING_UP)
+ return NULL;
+
+ h5->sleep = H5_WAKING_UP;
+ BT_DBG("Sending wakeup request");
+
+ mod_timer(&h5->timer, jiffies + HZ / 100);
+ return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
+ }
+
+ if ((skb = skb_dequeue(&h5->unrel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ kfree_skb(skb);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->unrel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+ spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
+
+ if (h5->unack.qlen >= h5->tx_win)
+ goto unlock;
+
+ if ((skb = skb_dequeue(&h5->rel)) != NULL) {
+ nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
+ skb->data, skb->len);
+ if (nskb) {
+ __skb_queue_tail(&h5->unack, skb);
+ mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+ return nskb;
+ }
+
+ skb_queue_head(&h5->rel, skb);
+ BT_ERR("Could not dequeue pkt because alloc_skb failed");
+ }
+
+unlock:
+ spin_unlock_irqrestore(&h5->unack.lock, flags);
+
+ if (test_bit(H5_TX_ACK_REQ, &h5->flags))
+ return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
+
+ return NULL;
+}
+
+static int h5_flush(struct hci_uart *hu)
+{
+ BT_DBG("hu %p", hu);
+ return 0;
+}
+
+static struct hci_uart_proto h5p = {
+ .id = HCI_UART_3WIRE,
+ .open = h5_open,
+ .close = h5_close,
+ .recv = h5_recv,
+ .enqueue = h5_enqueue,
+ .dequeue = h5_dequeue,
+ .flush = h5_flush,
+};
+
+int __init h5_init(void)
+{
+ int err = hci_uart_register_proto(&h5p);
+
+ if (!err)
+ BT_INFO("HCI Three-wire UART (H5) protocol initialized");
+ else
+ BT_ERR("HCI Three-wire UART (H5) protocol init failed");
+
+ return err;
+}
+
+int __exit h5_deinit(void)
+{
+ return hci_uart_unregister_proto(&h5p);
+}
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index e564579a6115..74e0966b3ead 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -156,6 +156,35 @@ restart:
return 0;
}
+static void hci_uart_init_work(struct work_struct *work)
+{
+ struct hci_uart *hu = container_of(work, struct hci_uart, init_ready);
+ int err;
+
+ if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return;
+
+ err = hci_register_dev(hu->hdev);
+ if (err < 0) {
+ BT_ERR("Can't register HCI device");
+ hci_free_dev(hu->hdev);
+ hu->hdev = NULL;
+ hu->proto->close(hu);
+ }
+
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+}
+
+int hci_uart_init_ready(struct hci_uart *hu)
+{
+ if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return -EALREADY;
+
+ schedule_work(&hu->init_ready);
+
+ return 0;
+}
+
/* ------- Interface to HCI layer ------ */
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
@@ -264,6 +293,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
hu->tty = tty;
tty->receive_room = 65536;
+ INIT_WORK(&hu->init_ready, hci_uart_init_work);
+
spin_lock_init(&hu->rx_lock);
/* Flush any pending characters in the driver and line discipline. */
@@ -286,28 +317,30 @@ static int hci_uart_tty_open(struct tty_struct *tty)
static void hci_uart_tty_close(struct tty_struct *tty)
{
struct hci_uart *hu = (void *)tty->disc_data;
+ struct hci_dev *hdev;
BT_DBG("tty %p", tty);
/* Detach from the tty */
tty->disc_data = NULL;
- if (hu) {
- struct hci_dev *hdev = hu->hdev;
+ if (!hu)
+ return;
- if (hdev)
- hci_uart_close(hdev);
+ hdev = hu->hdev;
+ if (hdev)
+ hci_uart_close(hdev);
- if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
- if (hdev) {
+ if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
+ if (hdev) {
+ if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- }
- hu->proto->close(hu);
+ hci_free_dev(hdev);
}
-
- kfree(hu);
+ hu->proto->close(hu);
}
+
+ kfree(hu);
}
/* hci_uart_tty_wakeup()
@@ -394,19 +427,24 @@ static int hci_uart_register_dev(struct hci_uart *hu)
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags))
- set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
+ set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags))
hdev->dev_type = HCI_AMP;
else
hdev->dev_type = HCI_BREDR;
+ if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags))
+ return 0;
+
if (hci_register_dev(hdev) < 0) {
BT_ERR("Can't register HCI device");
hci_free_dev(hdev);
return -ENODEV;
}
+ set_bit(HCI_UART_REGISTERED, &hu->flags);
+
return 0;
}
@@ -558,6 +596,9 @@ static int __init hci_uart_init(void)
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_init();
#endif
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_init();
+#endif
return 0;
}
@@ -578,6 +619,9 @@ static void __exit hci_uart_exit(void)
#ifdef CONFIG_BT_HCIUART_ATH3K
ath_deinit();
#endif
+#ifdef CONFIG_BT_HCIUART_3WIRE
+ h5_deinit();
+#endif
/* Release tty registration of line discipline */
if ((err = tty_unregister_ldisc(N_HCI)))
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index b874c0efde24..ff6d589c34a5 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb)
static inline int ll_check_data_len(struct ll_struct *ll, int len)
{
- register int room = skb_tailroom(ll->rx_skb);
+ int room = skb_tailroom(ll->rx_skb);
BT_DBG("len %d room %d", len, room);
@@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len)
static int ll_recv(struct hci_uart *hu, void *data, int count)
{
struct ll_struct *ll = hu->priv;
- register char *ptr;
+ char *ptr;
struct hci_event_hdr *eh;
struct hci_acl_hdr *ah;
struct hci_sco_hdr *sh;
- register int len, type, dlen;
+ int len, type, dlen;
BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 6cf6ab22ad21..fffa61ff5cb1 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -47,6 +47,7 @@
#define HCI_UART_RAW_DEVICE 0
#define HCI_UART_RESET_ON_INIT 1
#define HCI_UART_CREATE_AMP 2
+#define HCI_UART_INIT_PENDING 3
struct hci_uart;
@@ -66,6 +67,8 @@ struct hci_uart {
unsigned long flags;
unsigned long hdev_flags;
+ struct work_struct init_ready;
+
struct hci_uart_proto *proto;
void *priv;
@@ -76,6 +79,7 @@ struct hci_uart {
/* HCI_UART proto flag bits */
#define HCI_UART_PROTO_SET 0
+#define HCI_UART_REGISTERED 1
/* TX states */
#define HCI_UART_SENDING 1
@@ -84,6 +88,7 @@ struct hci_uart {
int hci_uart_register_proto(struct hci_uart_proto *p);
int hci_uart_unregister_proto(struct hci_uart_proto *p);
int hci_uart_tx_wakeup(struct hci_uart *hu);
+int hci_uart_init_ready(struct hci_uart *hu);
#ifdef CONFIG_BT_HCIUART_H4
int h4_init(void);
@@ -104,3 +109,8 @@ int ll_deinit(void);
int ath_init(void);
int ath_deinit(void);
#endif
+
+#ifdef CONFIG_BT_HCIUART_3WIRE
+int h5_init(void);
+int h5_deinit(void);
+#endif
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 1412565c01af..d706bd0e9e80 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -162,22 +162,24 @@ static int __exit omap_rng_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
-static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message)
+static int omap_rng_suspend(struct device *dev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x0);
return 0;
}
-static int omap_rng_resume(struct platform_device *pdev)
+static int omap_rng_resume(struct device *dev)
{
omap_rng_write_reg(RNG_MASK_REG, 0x1);
return 0;
}
+static SIMPLE_DEV_PM_OPS(omap_rng_pm, omap_rng_suspend, omap_rng_resume);
+#define OMAP_RNG_PM (&omap_rng_pm)
+
#else
-#define omap_rng_suspend NULL
-#define omap_rng_resume NULL
+#define OMAP_RNG_PM NULL
#endif
@@ -188,11 +190,10 @@ static struct platform_driver omap_rng_driver = {
.driver = {
.name = "omap_rng",
.owner = THIS_MODULE,
+ .pm = OMAP_RNG_PM,
},
.probe = omap_rng_probe,
.remove = __exit_p(omap_rng_remove),
- .suspend = omap_rng_suspend,
- .resume = omap_rng_resume
};
static int __init omap_rng_init(void)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 1e638fff40ea..83f85cf7fb1b 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2503,18 +2503,6 @@ static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
cleanup_one_si(info);
}
-#ifdef CONFIG_PM
-static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- return 0;
-}
-
-static int ipmi_pci_resume(struct pci_dev *pdev)
-{
- return 0;
-}
-#endif
-
static struct pci_device_id ipmi_pci_devices[] = {
{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
{ PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE_MASK) },
@@ -2527,10 +2515,6 @@ static struct pci_driver ipmi_pci_driver = {
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
.remove = __devexit_p(ipmi_pci_remove),
-#ifdef CONFIG_PM
- .suspend = ipmi_pci_suspend,
- .resume = ipmi_pci_resume,
-#endif
};
#endif /* CONFIG_PCI */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 7ed356e52035..37b8be7cba95 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -141,17 +141,6 @@
#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
-/* These are here until the real ones get into the watchdog.h interface. */
-#ifndef WDIOC_GETTIMEOUT
-#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
-#endif
-#ifndef WDIOC_SET_PRETIMEOUT
-#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
-#endif
-#ifndef WDIOC_GET_PRETIMEOUT
-#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
-#endif
-
static DEFINE_MUTEX(ipmi_watchdog_mutex);
static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -732,7 +721,6 @@ static int ipmi_ioctl(struct file *file,
return -EFAULT;
return 0;
- case WDIOC_SET_PRETIMEOUT:
case WDIOC_SETPRETIMEOUT:
i = copy_from_user(&val, argp, sizeof(int));
if (i)
@@ -740,7 +728,6 @@ static int ipmi_ioctl(struct file *file,
pretimeout = val;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
- case WDIOC_GET_PRETIMEOUT:
case WDIOC_GETPRETIMEOUT:
i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
if (i)
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 67c3371723cc..e5eedfa24c91 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -27,14 +27,16 @@
#include <linux/splice.h>
#include <linux/pfn.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
-#include <asm/io.h>
#ifdef CONFIG_IA64
# include <linux/efi.h>
#endif
+#define DEVPORT_MINOR 4
+
static inline unsigned long size_inside_page(unsigned long start,
unsigned long size)
{
@@ -894,6 +896,13 @@ static int __init chr_dev_init(void)
for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
if (!devlist[minor].name)
continue;
+
+ /*
+ * Create /dev/port?
+ */
+ if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
+ continue;
+
device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
NULL, devlist[minor].name);
}
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index 45713f0e7d61..f87780502b41 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1459,7 +1459,7 @@ static int __devexit sonypi_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int old_camera_power;
-static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
+static int sonypi_suspend(struct device *dev)
{
old_camera_power = sonypi_device.camera_power;
sonypi_disable();
@@ -1467,14 +1467,16 @@ static int sonypi_suspend(struct platform_device *dev, pm_message_t state)
return 0;
}
-static int sonypi_resume(struct platform_device *dev)
+static int sonypi_resume(struct device *dev)
{
sonypi_enable(old_camera_power);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(sonypi_pm, sonypi_suspend, sonypi_resume);
+#define SONYPI_PM (&sonypi_pm)
#else
-#define sonypi_suspend NULL
-#define sonypi_resume NULL
+#define SONYPI_PM NULL
#endif
static void sonypi_shutdown(struct platform_device *dev)
@@ -1486,12 +1488,11 @@ static struct platform_driver sonypi_driver = {
.driver = {
.name = "sonypi",
.owner = THIS_MODULE,
+ .pm = SONYPI_PM,
},
.probe = sonypi_probe,
.remove = __devexit_p(sonypi_remove),
.shutdown = sonypi_shutdown,
- .suspend = sonypi_suspend,
- .resume = sonypi_resume,
};
static struct platform_device *sonypi_platform_device;
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index ad7c7320dd1b..817f0ee202b6 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -827,10 +827,10 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
int tpm_do_selftest(struct tpm_chip *chip)
{
int rc;
- u8 digest[TPM_DIGEST_SIZE];
unsigned int loops;
unsigned int delay_msec = 1000;
unsigned long duration;
+ struct tpm_cmd_t cmd;
duration = tpm_calc_ordinal_duration(chip,
TPM_ORD_CONTINUE_SELFTEST);
@@ -845,7 +845,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
return rc;
do {
- rc = __tpm_pcr_read(chip, 0, digest);
+ /* Attempt to read a PCR value */
+ cmd.header.in = pcrread_header;
+ cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
+ rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
+
+ if (rc < TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
dev_info(chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
@@ -1274,7 +1282,7 @@ static struct tpm_input_header savestate_header = {
* We are about to suspend. Save the TPM state
* so that it can be restored.
*/
-int tpm_pm_suspend(struct device *dev, pm_message_t pm_state)
+int tpm_pm_suspend(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
struct tpm_cmd_t cmd;
@@ -1322,6 +1330,9 @@ EXPORT_SYMBOL_GPL(tpm_pm_resume);
void tpm_dev_vendor_release(struct tpm_chip *chip)
{
+ if (!chip)
+ return;
+
if (chip->vendor.release)
chip->vendor.release(chip->dev);
@@ -1339,6 +1350,9 @@ void tpm_dev_release(struct device *dev)
{
struct tpm_chip *chip = dev_get_drvdata(dev);
+ if (!chip)
+ return;
+
tpm_dev_vendor_release(chip);
chip->release(dev);
@@ -1405,15 +1419,12 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
"unable to misc_register %s, minor %d\n",
chip->vendor.miscdev.name,
chip->vendor.miscdev.minor);
- put_device(chip->dev);
- return NULL;
+ goto put_device;
}
if (sysfs_create_group(&dev->kobj, chip->vendor.attr_group)) {
misc_deregister(&chip->vendor.miscdev);
- put_device(chip->dev);
-
- return NULL;
+ goto put_device;
}
chip->bios_dir = tpm_bios_log_setup(devname);
@@ -1425,6 +1436,8 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,
return chip;
+put_device:
+ put_device(chip->dev);
out_free:
kfree(chip);
kfree(devname);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index b1c5280ac159..917f727e6740 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -299,7 +299,7 @@ extern ssize_t tpm_write(struct file *, const char __user *, size_t,
loff_t *);
extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
-extern int tpm_pm_suspend(struct device *, pm_message_t);
+extern int tpm_pm_suspend(struct device *);
extern int tpm_pm_resume(struct device *);
extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
wait_queue_head_t *);
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index c64a1bc65349..678d57019dc4 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -168,22 +168,14 @@ static void atml_plat_remove(void)
}
}
-static int tpm_atml_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
+static SIMPLE_DEV_PM_OPS(tpm_atml_pm, tpm_pm_suspend, tpm_pm_resume);
-static int tpm_atml_resume(struct platform_device *dev)
-{
- return tpm_pm_resume(&dev->dev);
-}
static struct platform_driver atml_drv = {
.driver = {
.name = "tpm_atmel",
.owner = THIS_MODULE,
+ .pm = &tpm_atml_pm,
},
- .suspend = tpm_atml_suspend,
- .resume = tpm_atml_resume,
};
static int __init init_atmel(void)
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 76da32e11f18..3251a44e8ceb 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -4,8 +4,8 @@
* SLD 9630 TT 1.1 and SLB 9635 TT 1.2 Trusted Platform Module
* Specifications at www.trustedcomputinggroup.org
*
- * Copyright (C) 2005, Marcel Selhorst <m.selhorst@sirrix.com>
- * Sirrix AG - security technologies, http://www.sirrix.com and
+ * Copyright (C) 2005, Marcel Selhorst <tpmdd@selhorst.net>
+ * Sirrix AG - security technologies <tpmdd@sirrix.com> and
* Applied Data Security Group, Ruhr-University Bochum, Germany
* Project-Homepage: http://www.trust.rub.de/projects/linux-device-driver-infineon-tpm/
*
@@ -671,7 +671,7 @@ static void __exit cleanup_inf(void)
module_init(init_inf);
module_exit(cleanup_inf);
-MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
+MODULE_AUTHOR("Marcel Selhorst <tpmdd@sirrix.com>");
MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
MODULE_VERSION("1.9.2");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 4d2464871ada..640c9a427b59 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -274,22 +274,13 @@ static void tpm_nsc_remove(struct device *dev)
}
}
-static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
-
-static int tpm_nsc_resume(struct platform_device *dev)
-{
- return tpm_pm_resume(&dev->dev);
-}
+static SIMPLE_DEV_PM_OPS(tpm_nsc_pm, tpm_pm_suspend, tpm_pm_resume);
static struct platform_driver nsc_drv = {
- .suspend = tpm_nsc_suspend,
- .resume = tpm_nsc_resume,
.driver = {
.name = "tpm_nsc",
.owner = THIS_MODULE,
+ .pm = &tpm_nsc_pm,
},
};
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index d2a70cae76df..89682fa8801e 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -750,7 +750,7 @@ static int __devinit tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
static int tpm_tis_pnp_suspend(struct pnp_dev *dev, pm_message_t msg)
{
- return tpm_pm_suspend(&dev->dev, msg);
+ return tpm_pm_suspend(&dev->dev);
}
static int tpm_tis_pnp_resume(struct pnp_dev *dev)
@@ -806,27 +806,25 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id,
sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444);
MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe");
#endif
-static int tpm_tis_suspend(struct platform_device *dev, pm_message_t msg)
-{
- return tpm_pm_suspend(&dev->dev, msg);
-}
-static int tpm_tis_resume(struct platform_device *dev)
+static int tpm_tis_resume(struct device *dev)
{
- struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+ struct tpm_chip *chip = dev_get_drvdata(dev);
if (chip->vendor.irq)
tpm_tis_reenable_interrupts(chip);
- return tpm_pm_resume(&dev->dev);
+ return tpm_pm_resume(dev);
}
+
+static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume);
+
static struct platform_driver tis_drv = {
.driver = {
.name = "tpm_tis",
.owner = THIS_MODULE,
+ .pm = &tpm_tis_pm,
},
- .suspend = tpm_tis_suspend,
- .resume = tpm_tis_resume,
};
static struct platform_device *pdev;
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 4864407e3fc4..3f99b9099658 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -34,4 +34,11 @@ config COMMON_CLK_DEBUG
clk_flags, clk_prepare_count, clk_enable_count &
clk_notifier_count.
+config COMMON_CLK_WM831X
+ tristate "Clock driver for WM831x/2x PMICs"
+ depends on MFD_WM831X
+ ---help---
+ Supports the clocking subsystem of the WM831x/2x series of
+ PMICs from Wolfson Microlectronics.
+
endmenu
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b9a5158a30b1..5869ea387054 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,7 +1,15 @@
-
+# common clock types
obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
clk-mux.o clk-divider.o clk-fixed-factor.o
# SoCs specific
+obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_ARCH_MXS) += mxs/
+obj-$(CONFIG_ARCH_SOCFPGA) += socfpga/
obj-$(CONFIG_PLAT_SPEAR) += spear/
+obj-$(CONFIG_ARCH_U300) += clk-u300.o
+obj-$(CONFIG_ARCH_INTEGRATOR) += versatile/
+
+# Chip specific
+obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8ea11b444528..a9204c69148d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -30,18 +30,89 @@
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
#define div_mask(d) ((1 << (d->width)) - 1)
+#define is_power_of_two(i) !(i & ~i)
+
+static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
+{
+ unsigned int maxdiv = 0;
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div > maxdiv)
+ maxdiv = clkt->div;
+ return maxdiv;
+}
+
+static unsigned int _get_maxdiv(struct clk_divider *divider)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div_mask(divider);
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << div_mask(divider);
+ if (divider->table)
+ return _get_table_maxdiv(divider->table);
+ return div_mask(divider) + 1;
+}
+
+static unsigned int _get_table_div(const struct clk_div_table *table,
+ unsigned int val)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->val == val)
+ return clkt->div;
+ return 0;
+}
+
+static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return val;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return 1 << val;
+ if (divider->table)
+ return _get_table_div(divider->table, val);
+ return val + 1;
+}
+
+static unsigned int _get_table_val(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return clkt->val;
+ return 0;
+}
+
+static unsigned int _get_val(struct clk_divider *divider, u8 div)
+{
+ if (divider->flags & CLK_DIVIDER_ONE_BASED)
+ return div;
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return __ffs(div);
+ if (divider->table)
+ return _get_table_val(divider->table, div);
+ return div - 1;
+}
static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned int div;
+ unsigned int div, val;
- div = readl(divider->reg) >> divider->shift;
- div &= div_mask(divider);
+ val = readl(divider->reg) >> divider->shift;
+ val &= div_mask(divider);
- if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
- div++;
+ div = _get_div(divider, val);
+ if (!div) {
+ WARN(1, "%s: Invalid divisor for clock %s\n", __func__,
+ __clk_get_name(hw->clk));
+ return parent_rate;
+ }
return parent_rate / div;
}
@@ -52,6 +123,26 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
*/
#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1)
+static bool _is_valid_table_div(const struct clk_div_table *table,
+ unsigned int div)
+{
+ const struct clk_div_table *clkt;
+
+ for (clkt = table; clkt->div; clkt++)
+ if (clkt->div == div)
+ return true;
+ return false;
+}
+
+static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+{
+ if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+ return is_power_of_two(div);
+ if (divider->table)
+ return _is_valid_table_div(divider->table, div);
+ return true;
+}
+
static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
unsigned long *best_parent_rate)
{
@@ -62,10 +153,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
if (!rate)
rate = 1;
- maxdiv = (1 << divider->width);
-
- if (divider->flags & CLK_DIVIDER_ONE_BASED)
- maxdiv--;
+ maxdiv = _get_maxdiv(divider);
if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
parent_rate = *best_parent_rate;
@@ -82,6 +170,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
maxdiv = min(ULONG_MAX / rate, maxdiv);
for (i = 1; i <= maxdiv; i++) {
+ if (!_is_valid_div(divider, i))
+ continue;
parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
MULT_ROUND_UP(rate, i));
now = parent_rate / i;
@@ -93,9 +183,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
}
if (!bestdiv) {
- bestdiv = (1 << divider->width);
- if (divider->flags & CLK_DIVIDER_ONE_BASED)
- bestdiv--;
+ bestdiv = _get_maxdiv(divider);
*best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
}
@@ -115,24 +203,22 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned int div;
+ unsigned int div, value;
unsigned long flags = 0;
u32 val;
div = parent_rate / rate;
+ value = _get_val(divider, div);
- if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
- div--;
-
- if (div > div_mask(divider))
- div = div_mask(divider);
+ if (value > div_mask(divider))
+ value = div_mask(divider);
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
val = readl(divider->reg);
val &= ~(div_mask(divider) << divider->shift);
- val |= div << divider->shift;
+ val |= value << divider->shift;
writel(val, divider->reg);
if (divider->lock)
@@ -148,22 +234,11 @@ const struct clk_ops clk_divider_ops = {
};
EXPORT_SYMBOL_GPL(clk_divider_ops);
-/**
- * clk_register_divider - register a divider clock with the clock framework
- * @dev: device registering this clock
- * @name: name of this clock
- * @parent_name: name of clock's parent
- * @flags: framework-specific flags
- * @reg: register address to adjust divider
- * @shift: number of bits to shift the bitfield
- * @width: width of the bitfield
- * @clk_divider_flags: divider-specific flags for this clock
- * @lock: shared register lock for this clock
- */
-struct clk *clk_register_divider(struct device *dev, const char *name,
+static struct clk *_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock)
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock)
{
struct clk_divider *div;
struct clk *clk;
@@ -178,7 +253,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_divider_ops;
- init.flags = flags;
+ init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
@@ -189,6 +264,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
div->flags = clk_divider_flags;
div->lock = lock;
div->hw.init = &init;
+ div->table = table;
/* register the clock */
clk = clk_register(dev, &div->hw);
@@ -198,3 +274,48 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
return clk;
}
+
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+struct clk *clk_register_divider(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, NULL, lock);
+}
+
+/**
+ * clk_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+struct clk *clk_register_divider_table(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock)
+{
+ return _register_divider(dev, name, parent_name, flags, reg, shift,
+ width, clk_divider_flags, table, lock);
+}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index c8c003e217ad..a4899855c0f6 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -82,7 +82,7 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_fixed_factor_ops;
- init.flags = flags;
+ init.flags = flags | CLK_IS_BASIC;
init.parent_names = &parent_name;
init.num_parents = 1;
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index cbd246229786..f5ec0eebd4d7 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
+#include <linux/of.h>
/*
* DOC: basic fixed-rate clock that cannot gate
@@ -63,7 +64,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_fixed_rate_ops;
- init.flags = flags;
+ init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
@@ -79,3 +80,25 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
return clk;
}
+
+#ifdef CONFIG_OF
+/**
+ * of_fixed_clk_setup() - Setup function for simple fixed rate clock
+ */
+void __init of_fixed_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = node->name;
+ u32 rate;
+
+ if (of_property_read_u32(node, "clock-frequency", &rate))
+ return;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+ if (clk)
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+}
+EXPORT_SYMBOL_GPL(of_fixed_clk_setup);
+#endif
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 578465e04be6..15114febfd92 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -130,7 +130,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_gate_ops;
- init.flags = flags;
+ init.flags = flags | CLK_IS_BASIC;
init.parent_names = (parent_name ? &parent_name: NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
new file mode 100644
index 000000000000..52fecadf004a
--- /dev/null
+++ b/drivers/clk/clk-highbank.c
@@ -0,0 +1,346 @@
+/*
+ * Copyright 2011-2012 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+extern void __iomem *sregs_base;
+
+#define HB_PLL_LOCK_500 0x20000000
+#define HB_PLL_LOCK 0x10000000
+#define HB_PLL_DIVF_SHIFT 20
+#define HB_PLL_DIVF_MASK 0x0ff00000
+#define HB_PLL_DIVQ_SHIFT 16
+#define HB_PLL_DIVQ_MASK 0x00070000
+#define HB_PLL_DIVR_SHIFT 8
+#define HB_PLL_DIVR_MASK 0x00001f00
+#define HB_PLL_RANGE_SHIFT 4
+#define HB_PLL_RANGE_MASK 0x00000070
+#define HB_PLL_BYPASS 0x00000008
+#define HB_PLL_RESET 0x00000004
+#define HB_PLL_EXT_BYPASS 0x00000002
+#define HB_PLL_EXT_ENA 0x00000001
+
+#define HB_PLL_VCO_MIN_FREQ 2133000000
+#define HB_PLL_MAX_FREQ HB_PLL_VCO_MIN_FREQ
+#define HB_PLL_MIN_FREQ (HB_PLL_VCO_MIN_FREQ / 64)
+
+#define HB_A9_BCLK_DIV_MASK 0x00000006
+#define HB_A9_BCLK_DIV_SHIFT 1
+#define HB_A9_PCLK_DIV 0x00000001
+
+struct hb_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ char *parent_name;
+};
+#define to_hb_clk(p) container_of(p, struct hb_clk, hw)
+
+static int clk_pll_prepare(struct clk_hw *hwclk)
+ {
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 reg;
+
+ reg = readl(hbclk->reg);
+ reg &= ~HB_PLL_RESET;
+ writel(reg, hbclk->reg);
+
+ while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
+ ;
+ while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
+ ;
+
+ return 0;
+}
+
+static void clk_pll_unprepare(struct clk_hw *hwclk)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 reg;
+
+ reg = readl(hbclk->reg);
+ reg |= HB_PLL_RESET;
+ writel(reg, hbclk->reg);
+}
+
+static int clk_pll_enable(struct clk_hw *hwclk)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 reg;
+
+ reg = readl(hbclk->reg);
+ reg |= HB_PLL_EXT_ENA;
+ writel(reg, hbclk->reg);
+
+ return 0;
+}
+
+static void clk_pll_disable(struct clk_hw *hwclk)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 reg;
+
+ reg = readl(hbclk->reg);
+ reg &= ~HB_PLL_EXT_ENA;
+ writel(reg, hbclk->reg);
+}
+
+static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ unsigned long divf, divq, vco_freq, reg;
+
+ reg = readl(hbclk->reg);
+ if (reg & HB_PLL_EXT_BYPASS)
+ return parent_rate;
+
+ divf = (reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT;
+ divq = (reg & HB_PLL_DIVQ_MASK) >> HB_PLL_DIVQ_SHIFT;
+ vco_freq = parent_rate * (divf + 1);
+
+ return vco_freq / (1 << divq);
+}
+
+static void clk_pll_calc(unsigned long rate, unsigned long ref_freq,
+ u32 *pdivq, u32 *pdivf)
+{
+ u32 divq, divf;
+ unsigned long vco_freq;
+
+ if (rate < HB_PLL_MIN_FREQ)
+ rate = HB_PLL_MIN_FREQ;
+ if (rate > HB_PLL_MAX_FREQ)
+ rate = HB_PLL_MAX_FREQ;
+
+ for (divq = 1; divq <= 6; divq++) {
+ if ((rate * (1 << divq)) >= HB_PLL_VCO_MIN_FREQ)
+ break;
+ }
+
+ vco_freq = rate * (1 << divq);
+ divf = (vco_freq + (ref_freq / 2)) / ref_freq;
+ divf--;
+
+ *pdivq = divq;
+ *pdivf = divf;
+}
+
+static long clk_pll_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 divq, divf;
+ unsigned long ref_freq = *parent_rate;
+
+ clk_pll_calc(rate, ref_freq, &divq, &divf);
+
+ return (ref_freq * (divf + 1)) / (1 << divq);
+}
+
+static int clk_pll_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 divq, divf;
+ u32 reg;
+
+ clk_pll_calc(rate, parent_rate, &divq, &divf);
+
+ reg = readl(hbclk->reg);
+ if (divf != ((reg & HB_PLL_DIVF_MASK) >> HB_PLL_DIVF_SHIFT)) {
+ /* Need to re-lock PLL, so put it into bypass mode */
+ reg |= HB_PLL_EXT_BYPASS;
+ writel(reg | HB_PLL_EXT_BYPASS, hbclk->reg);
+
+ writel(reg | HB_PLL_RESET, hbclk->reg);
+ reg &= ~(HB_PLL_DIVF_MASK | HB_PLL_DIVQ_MASK);
+ reg |= (divf << HB_PLL_DIVF_SHIFT) | (divq << HB_PLL_DIVQ_SHIFT);
+ writel(reg | HB_PLL_RESET, hbclk->reg);
+ writel(reg, hbclk->reg);
+
+ while ((readl(hbclk->reg) & HB_PLL_LOCK) == 0)
+ ;
+ while ((readl(hbclk->reg) & HB_PLL_LOCK_500) == 0)
+ ;
+ reg |= HB_PLL_EXT_ENA;
+ reg &= ~HB_PLL_EXT_BYPASS;
+ } else {
+ reg &= ~HB_PLL_DIVQ_MASK;
+ reg |= divq << HB_PLL_DIVQ_SHIFT;
+ }
+ writel(reg, hbclk->reg);
+
+ return 0;
+}
+
+static const struct clk_ops clk_pll_ops = {
+ .prepare = clk_pll_prepare,
+ .unprepare = clk_pll_unprepare,
+ .enable = clk_pll_enable,
+ .disable = clk_pll_disable,
+ .recalc_rate = clk_pll_recalc_rate,
+ .round_rate = clk_pll_round_rate,
+ .set_rate = clk_pll_set_rate,
+};
+
+static unsigned long clk_cpu_periphclk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 div = (readl(hbclk->reg) & HB_A9_PCLK_DIV) ? 8 : 4;
+ return parent_rate / div;
+}
+
+static const struct clk_ops a9periphclk_ops = {
+ .recalc_rate = clk_cpu_periphclk_recalc_rate,
+};
+
+static unsigned long clk_cpu_a9bclk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 div = (readl(hbclk->reg) & HB_A9_BCLK_DIV_MASK) >> HB_A9_BCLK_DIV_SHIFT;
+
+ return parent_rate / (div + 2);
+}
+
+static const struct clk_ops a9bclk_ops = {
+ .recalc_rate = clk_cpu_a9bclk_recalc_rate,
+};
+
+static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 div;
+
+ div = readl(hbclk->reg) & 0x1f;
+ div++;
+ div *= 2;
+
+ return parent_rate / div;
+}
+
+static long clk_periclk_round_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ u32 div;
+
+ div = *parent_rate / rate;
+ div++;
+ div &= ~0x1;
+
+ return *parent_rate / div;
+}
+
+static int clk_periclk_set_rate(struct clk_hw *hwclk, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hb_clk *hbclk = to_hb_clk(hwclk);
+ u32 div;
+
+ div = parent_rate / rate;
+ if (div & 0x1)
+ return -EINVAL;
+
+ writel(div >> 1, hbclk->reg);
+ return 0;
+}
+
+static const struct clk_ops periclk_ops = {
+ .recalc_rate = clk_periclk_recalc_rate,
+ .round_rate = clk_periclk_round_rate,
+ .set_rate = clk_periclk_set_rate,
+};
+
+static __init struct clk *hb_clk_init(struct device_node *node, const struct clk_ops *ops)
+{
+ u32 reg;
+ struct clk *clk;
+ struct hb_clk *hb_clk;
+ const char *clk_name = node->name;
+ const char *parent_name;
+ struct clk_init_data init;
+ int rc;
+
+ rc = of_property_read_u32(node, "reg", &reg);
+ if (WARN_ON(rc))
+ return NULL;
+
+ hb_clk = kzalloc(sizeof(*hb_clk), GFP_KERNEL);
+ if (WARN_ON(!hb_clk))
+ return NULL;
+
+ hb_clk->reg = sregs_base + reg;
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = 0;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ hb_clk->hw.init = &init;
+
+ clk = clk_register(NULL, &hb_clk->hw);
+ if (WARN_ON(IS_ERR(clk))) {
+ kfree(hb_clk);
+ return NULL;
+ }
+ rc = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+ return clk;
+}
+
+static void __init hb_pll_init(struct device_node *node)
+{
+ hb_clk_init(node, &clk_pll_ops);
+}
+
+static void __init hb_a9periph_init(struct device_node *node)
+{
+ hb_clk_init(node, &a9periphclk_ops);
+}
+
+static void __init hb_a9bus_init(struct device_node *node)
+{
+ struct clk *clk = hb_clk_init(node, &a9bclk_ops);
+ clk_prepare_enable(clk);
+}
+
+static void __init hb_emmc_init(struct device_node *node)
+{
+ hb_clk_init(node, &periclk_ops);
+}
+
+static const __initconst struct of_device_id clk_match[] = {
+ { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
+ { .compatible = "calxeda,hb-pll-clock", .data = hb_pll_init, },
+ { .compatible = "calxeda,hb-a9periph-clock", .data = hb_a9periph_init, },
+ { .compatible = "calxeda,hb-a9bus-clock", .data = hb_a9bus_init, },
+ { .compatible = "calxeda,hb-emmc-clock", .data = hb_emmc_init, },
+ {}
+};
+
+void __init highbank_clocks_init(void)
+{
+ of_clk_init(clk_match);
+}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index fd36a8ea73d9..508c032edce4 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -106,7 +106,7 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
init.name = name;
init.ops = &clk_mux_ops;
- init.flags = flags;
+ init.flags = flags | CLK_IS_BASIC;
init.parent_names = parent_names;
init.num_parents = num_parents;
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
new file mode 100644
index 000000000000..517a8ff7121e
--- /dev/null
+++ b/drivers/clk/clk-nomadik.c
@@ -0,0 +1,47 @@
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+
+/*
+ * The Nomadik clock tree is described in the STN8815A12 DB V4.2
+ * reference manual for the chip, page 94 ff.
+ */
+
+void __init nomadik_clk_init(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+ clk_register_clkdev(clk, NULL, "gpio.0");
+ clk_register_clkdev(clk, NULL, "gpio.1");
+ clk_register_clkdev(clk, NULL, "gpio.2");
+ clk_register_clkdev(clk, NULL, "gpio.3");
+ clk_register_clkdev(clk, NULL, "rng");
+
+ /*
+ * The 2.4 MHz TIMCLK reference clock is active at boot time, this is
+ * actually the MXTALCLK @19.2 MHz divided by 8. This clock is used
+ * by the timers and watchdog. See page 105 ff.
+ */
+ clk = clk_register_fixed_rate(NULL, "TIMCLK", NULL, CLK_IS_ROOT,
+ 2400000);
+ clk_register_clkdev(clk, NULL, "mtu0");
+ clk_register_clkdev(clk, NULL, "mtu1");
+
+ /*
+ * At boot time, PLL2 is set to generate a set of fixed clocks,
+ * one of them is CLK48, the 48 MHz clock, routed to the UART, MMC/SD
+ * I2C, IrDA, USB and SSP blocks.
+ */
+ clk = clk_register_fixed_rate(NULL, "CLK48", NULL, CLK_IS_ROOT,
+ 48000000);
+ clk_register_clkdev(clk, NULL, "uart0");
+ clk_register_clkdev(clk, NULL, "uart1");
+ clk_register_clkdev(clk, NULL, "mmci");
+ clk_register_clkdev(clk, NULL, "ssp");
+ clk_register_clkdev(clk, NULL, "nmk-i2c.0");
+ clk_register_clkdev(clk, NULL, "nmk-i2c.1");
+}
diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c
new file mode 100644
index 000000000000..a15f7928fb11
--- /dev/null
+++ b/drivers/clk/clk-u300.c
@@ -0,0 +1,746 @@
+/*
+ * U300 clock implementation
+ * Copyright (C) 2007-2012 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <mach/syscon.h>
+
+/*
+ * The clocking hierarchy currently looks like this.
+ * NOTE: the idea is NOT to show how the clocks are routed on the chip!
+ * The ideas is to show dependencies, so a clock higher up in the
+ * hierarchy has to be on in order for another clock to be on. Now,
+ * both CPU and DMA can actually be on top of the hierarchy, and that
+ * is not modeled currently. Instead we have the backbone AMBA bus on
+ * top. This bus cannot be programmed in any way but conceptually it
+ * needs to be active for the bridges and devices to transport data.
+ *
+ * Please be aware that a few clocks are hw controlled, which mean that
+ * the hw itself can turn on/off or change the rate of the clock when
+ * needed!
+ *
+ * AMBA bus
+ * |
+ * +- CPU
+ * +- FSMC NANDIF NAND Flash interface
+ * +- SEMI Shared Memory interface
+ * +- ISP Image Signal Processor (U335 only)
+ * +- CDS (U335 only)
+ * +- DMA Direct Memory Access Controller
+ * +- AAIF APP/ACC Inteface (Mobile Scalable Link, MSL)
+ * +- APEX
+ * +- VIDEO_ENC AVE2/3 Video Encoder
+ * +- XGAM Graphics Accelerator Controller
+ * +- AHB
+ * |
+ * +- ahb:0 AHB Bridge
+ * | |
+ * | +- ahb:1 INTCON Interrupt controller
+ * | +- ahb:3 MSPRO Memory Stick Pro controller
+ * | +- ahb:4 EMIF External Memory interface
+ * |
+ * +- fast:0 FAST bridge
+ * | |
+ * | +- fast:1 MMCSD MMC/SD card reader controller
+ * | +- fast:2 I2S0 PCM I2S channel 0 controller
+ * | +- fast:3 I2S1 PCM I2S channel 1 controller
+ * | +- fast:4 I2C0 I2C channel 0 controller
+ * | +- fast:5 I2C1 I2C channel 1 controller
+ * | +- fast:6 SPI SPI controller
+ * | +- fast:7 UART1 Secondary UART (U335 only)
+ * |
+ * +- slow:0 SLOW bridge
+ * |
+ * +- slow:1 SYSCON (not possible to control)
+ * +- slow:2 WDOG Watchdog
+ * +- slow:3 UART0 primary UART
+ * +- slow:4 TIMER_APP Application timer - used in Linux
+ * +- slow:5 KEYPAD controller
+ * +- slow:6 GPIO controller
+ * +- slow:7 RTC controller
+ * +- slow:8 BT Bus Tracer (not used currently)
+ * +- slow:9 EH Event Handler (not used currently)
+ * +- slow:a TIMER_ACC Access style timer (not used currently)
+ * +- slow:b PPM (U335 only, what is that?)
+ */
+
+/* Global syscon virtual base */
+static void __iomem *syscon_vbase;
+
+/**
+ * struct clk_syscon - U300 syscon clock
+ * @hw: corresponding clock hardware entry
+ * @hw_ctrld: whether this clock is hardware controlled (for refcount etc)
+ * and does not need any magic pokes to be enabled/disabled
+ * @reset: state holder, whether this block's reset line is asserted or not
+ * @res_reg: reset line enable/disable flag register
+ * @res_bit: bit for resetting or taking this consumer out of reset
+ * @en_reg: clock line enable/disable flag register
+ * @en_bit: bit for enabling/disabling this consumer clock line
+ * @clk_val: magic value to poke in the register to enable/disable
+ * this one clock
+ */
+struct clk_syscon {
+ struct clk_hw hw;
+ bool hw_ctrld;
+ bool reset;
+ void __iomem *res_reg;
+ u8 res_bit;
+ void __iomem *en_reg;
+ u8 en_bit;
+ u16 clk_val;
+};
+
+#define to_syscon(_hw) container_of(_hw, struct clk_syscon, hw)
+
+static DEFINE_SPINLOCK(syscon_resetreg_lock);
+
+/*
+ * Reset control functions. We remember if a block has been
+ * taken out of reset and don't remove the reset assertion again
+ * and vice versa. Currently we only remove resets so the
+ * enablement function is defined out.
+ */
+static void syscon_block_reset_enable(struct clk_syscon *sclk)
+{
+ unsigned long iflags;
+ u16 val;
+
+ /* Not all blocks support resetting */
+ if (!sclk->res_reg)
+ return;
+ spin_lock_irqsave(&syscon_resetreg_lock, iflags);
+ val = readw(sclk->res_reg);
+ val |= BIT(sclk->res_bit);
+ writew(val, sclk->res_reg);
+ spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
+ sclk->reset = true;
+}
+
+static void syscon_block_reset_disable(struct clk_syscon *sclk)
+{
+ unsigned long iflags;
+ u16 val;
+
+ /* Not all blocks support resetting */
+ if (!sclk->res_reg)
+ return;
+ spin_lock_irqsave(&syscon_resetreg_lock, iflags);
+ val = readw(sclk->res_reg);
+ val &= ~BIT(sclk->res_bit);
+ writew(val, sclk->res_reg);
+ spin_unlock_irqrestore(&syscon_resetreg_lock, iflags);
+ sclk->reset = false;
+}
+
+static int syscon_clk_prepare(struct clk_hw *hw)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+
+ /* If the block is in reset, bring it out */
+ if (sclk->reset)
+ syscon_block_reset_disable(sclk);
+ return 0;
+}
+
+static void syscon_clk_unprepare(struct clk_hw *hw)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+
+ /* Please don't force the console into reset */
+ if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
+ return;
+ /* When unpreparing, force block into reset */
+ if (!sclk->reset)
+ syscon_block_reset_enable(sclk);
+}
+
+static int syscon_clk_enable(struct clk_hw *hw)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+
+ /* Don't touch the hardware controlled clocks */
+ if (sclk->hw_ctrld)
+ return 0;
+ /* These cannot be controlled */
+ if (sclk->clk_val == 0xFFFFU)
+ return 0;
+
+ writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCER);
+ return 0;
+}
+
+static void syscon_clk_disable(struct clk_hw *hw)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+
+ /* Don't touch the hardware controlled clocks */
+ if (sclk->hw_ctrld)
+ return;
+ if (sclk->clk_val == 0xFFFFU)
+ return;
+ /* Please don't disable the console port */
+ if (sclk->clk_val == U300_SYSCON_SBCER_UART_CLK_EN)
+ return;
+
+ writew(sclk->clk_val, syscon_vbase + U300_SYSCON_SBCDR);
+}
+
+static int syscon_clk_is_enabled(struct clk_hw *hw)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+ u16 val;
+
+ /* If no enable register defined, it's always-on */
+ if (!sclk->en_reg)
+ return 1;
+
+ val = readw(sclk->en_reg);
+ val &= BIT(sclk->en_bit);
+
+ return val ? 1 : 0;
+}
+
+static u16 syscon_get_perf(void)
+{
+ u16 val;
+
+ val = readw(syscon_vbase + U300_SYSCON_CCR);
+ val &= U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
+ return val;
+}
+
+static unsigned long
+syscon_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+ u16 perf = syscon_get_perf();
+
+ switch(sclk->clk_val) {
+ case U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN:
+ case U300_SYSCON_SBCER_I2C0_CLK_EN:
+ case U300_SYSCON_SBCER_I2C1_CLK_EN:
+ case U300_SYSCON_SBCER_MMC_CLK_EN:
+ case U300_SYSCON_SBCER_SPI_CLK_EN:
+ /* The FAST clocks have one progression */
+ switch(perf) {
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
+ return 13000000;
+ default:
+ return parent_rate; /* 26 MHz */
+ }
+ case U300_SYSCON_SBCER_DMAC_CLK_EN:
+ case U300_SYSCON_SBCER_NANDIF_CLK_EN:
+ case U300_SYSCON_SBCER_XGAM_CLK_EN:
+ /* AMBA interconnect peripherals */
+ switch(perf) {
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
+ return 6500000;
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
+ return 26000000;
+ default:
+ return parent_rate; /* 52 MHz */
+ }
+ case U300_SYSCON_SBCER_SEMI_CLK_EN:
+ case U300_SYSCON_SBCER_EMIF_CLK_EN:
+ /* EMIF speeds */
+ switch(perf) {
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
+ return 13000000;
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
+ return 52000000;
+ default:
+ return 104000000;
+ }
+ case U300_SYSCON_SBCER_CPU_CLK_EN:
+ /* And the fast CPU clock */
+ switch(perf) {
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
+ return 13000000;
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
+ return 52000000;
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
+ return 104000000;
+ default:
+ return parent_rate; /* 208 MHz */
+ }
+ default:
+ /*
+ * The SLOW clocks and default just inherit the rate of
+ * their parent (typically PLL13 13 MHz).
+ */
+ return parent_rate;
+ }
+}
+
+static long
+syscon_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+
+ if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
+ return *prate;
+ /* We really only support setting the rate of the CPU clock */
+ if (rate <= 13000000)
+ return 13000000;
+ if (rate <= 52000000)
+ return 52000000;
+ if (rate <= 104000000)
+ return 104000000;
+ return 208000000;
+}
+
+static int syscon_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_syscon *sclk = to_syscon(hw);
+ u16 val;
+
+ /* We only support setting the rate of the CPU clock */
+ if (sclk->clk_val != U300_SYSCON_SBCER_CPU_CLK_EN)
+ return -EINVAL;
+ switch (rate) {
+ case 13000000:
+ val = U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER;
+ break;
+ case 52000000:
+ val = U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE;
+ break;
+ case 104000000:
+ val = U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH;
+ break;
+ case 208000000:
+ val = U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST;
+ break;
+ default:
+ return -EINVAL;
+ }
+ val |= readw(syscon_vbase + U300_SYSCON_CCR) &
+ ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK ;
+ writew(val, syscon_vbase + U300_SYSCON_CCR);
+ return 0;
+}
+
+static const struct clk_ops syscon_clk_ops = {
+ .prepare = syscon_clk_prepare,
+ .unprepare = syscon_clk_unprepare,
+ .enable = syscon_clk_enable,
+ .disable = syscon_clk_disable,
+ .is_enabled = syscon_clk_is_enabled,
+ .recalc_rate = syscon_clk_recalc_rate,
+ .round_rate = syscon_clk_round_rate,
+ .set_rate = syscon_clk_set_rate,
+};
+
+static struct clk * __init
+syscon_clk_register(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ bool hw_ctrld,
+ void __iomem *res_reg, u8 res_bit,
+ void __iomem *en_reg, u8 en_bit,
+ u16 clk_val)
+{
+ struct clk *clk;
+ struct clk_syscon *sclk;
+ struct clk_init_data init;
+
+ sclk = kzalloc(sizeof(struct clk_syscon), GFP_KERNEL);
+ if (!sclk) {
+ pr_err("could not allocate syscon clock %s\n",
+ name);
+ return ERR_PTR(-ENOMEM);
+ }
+ init.name = name;
+ init.ops = &syscon_clk_ops;
+ init.flags = flags;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ sclk->hw.init = &init;
+ sclk->hw_ctrld = hw_ctrld;
+ /* Assume the block is in reset at registration */
+ sclk->reset = true;
+ sclk->res_reg = res_reg;
+ sclk->res_bit = res_bit;
+ sclk->en_reg = en_reg;
+ sclk->en_bit = en_bit;
+ sclk->clk_val = clk_val;
+
+ clk = clk_register(dev, &sclk->hw);
+ if (IS_ERR(clk))
+ kfree(sclk);
+
+ return clk;
+}
+
+/**
+ * struct clk_mclk - U300 MCLK clock (MMC/SD clock)
+ * @hw: corresponding clock hardware entry
+ * @is_mspro: if this is the memory stick clock rather than MMC/SD
+ */
+struct clk_mclk {
+ struct clk_hw hw;
+ bool is_mspro;
+};
+
+#define to_mclk(_hw) container_of(_hw, struct clk_mclk, hw)
+
+static int mclk_clk_prepare(struct clk_hw *hw)
+{
+ struct clk_mclk *mclk = to_mclk(hw);
+ u16 val;
+
+ /* The MMC and MSPRO clocks need some special set-up */
+ if (!mclk->is_mspro) {
+ /* Set default MMC clock divisor to 18.9 MHz */
+ writew(0x0054U, syscon_vbase + U300_SYSCON_MMF0R);
+ val = readw(syscon_vbase + U300_SYSCON_MMCR);
+ /* Disable the MMC feedback clock */
+ val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
+ /* Disable MSPRO frequency */
+ val &= ~U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
+ writew(val, syscon_vbase + U300_SYSCON_MMCR);
+ } else {
+ val = readw(syscon_vbase + U300_SYSCON_MMCR);
+ /* Disable the MMC feedback clock */
+ val &= ~U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE;
+ /* Enable MSPRO frequency */
+ val |= U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE;
+ writew(val, syscon_vbase + U300_SYSCON_MMCR);
+ }
+
+ return 0;
+}
+
+static unsigned long
+mclk_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u16 perf = syscon_get_perf();
+
+ switch (perf) {
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW_POWER:
+ /*
+ * Here, the 208 MHz PLL gets shut down and the always
+ * on 13 MHz PLL used for RTC etc kicks into use
+ * instead.
+ */
+ return 13000000;
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_LOW:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_INTERMEDIATE:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_HIGH:
+ case U300_SYSCON_CCR_CLKING_PERFORMANCE_BEST:
+ {
+ /*
+ * This clock is under program control. The register is
+ * divided in two nybbles, bit 7-4 gives cycles-1 to count
+ * high, bit 3-0 gives cycles-1 to count low. Distribute
+ * these with no more than 1 cycle difference between
+ * low and high and add low and high to get the actual
+ * divisor. The base PLL is 208 MHz. Writing 0x00 will
+ * divide by 1 and 1 so the highest frequency possible
+ * is 104 MHz.
+ *
+ * e.g. 0x54 =>
+ * f = 208 / ((5+1) + (4+1)) = 208 / 11 = 18.9 MHz
+ */
+ u16 val = readw(syscon_vbase + U300_SYSCON_MMF0R) &
+ U300_SYSCON_MMF0R_MASK;
+ switch (val) {
+ case 0x0054:
+ return 18900000;
+ case 0x0044:
+ return 20800000;
+ case 0x0043:
+ return 23100000;
+ case 0x0033:
+ return 26000000;
+ case 0x0032:
+ return 29700000;
+ case 0x0022:
+ return 34700000;
+ case 0x0021:
+ return 41600000;
+ case 0x0011:
+ return 52000000;
+ case 0x0000:
+ return 104000000;
+ default:
+ break;
+ }
+ }
+ default:
+ break;
+ }
+ return parent_rate;
+}
+
+static long
+mclk_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ if (rate <= 18900000)
+ return 18900000;
+ if (rate <= 20800000)
+ return 20800000;
+ if (rate <= 23100000)
+ return 23100000;
+ if (rate <= 26000000)
+ return 26000000;
+ if (rate <= 29700000)
+ return 29700000;
+ if (rate <= 34700000)
+ return 34700000;
+ if (rate <= 41600000)
+ return 41600000;
+ /* Highest rate */
+ return 52000000;
+}
+
+static int mclk_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ u16 val;
+ u16 reg;
+
+ switch (rate) {
+ case 18900000:
+ val = 0x0054;
+ break;
+ case 20800000:
+ val = 0x0044;
+ break;
+ case 23100000:
+ val = 0x0043;
+ break;
+ case 26000000:
+ val = 0x0033;
+ break;
+ case 29700000:
+ val = 0x0032;
+ break;
+ case 34700000:
+ val = 0x0022;
+ break;
+ case 41600000:
+ val = 0x0021;
+ break;
+ case 52000000:
+ val = 0x0011;
+ break;
+ case 104000000:
+ val = 0x0000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = readw(syscon_vbase + U300_SYSCON_MMF0R) &
+ ~U300_SYSCON_MMF0R_MASK;
+ writew(reg | val, syscon_vbase + U300_SYSCON_MMF0R);
+ return 0;
+}
+
+static const struct clk_ops mclk_ops = {
+ .prepare = mclk_clk_prepare,
+ .recalc_rate = mclk_clk_recalc_rate,
+ .round_rate = mclk_clk_round_rate,
+ .set_rate = mclk_clk_set_rate,
+};
+
+static struct clk * __init
+mclk_clk_register(struct device *dev, const char *name,
+ const char *parent_name, bool is_mspro)
+{
+ struct clk *clk;
+ struct clk_mclk *mclk;
+ struct clk_init_data init;
+
+ mclk = kzalloc(sizeof(struct clk_mclk), GFP_KERNEL);
+ if (!mclk) {
+ pr_err("could not allocate MMC/SD clock %s\n",
+ name);
+ return ERR_PTR(-ENOMEM);
+ }
+ init.name = "mclk";
+ init.ops = &mclk_ops;
+ init.flags = 0;
+ init.parent_names = (parent_name ? &parent_name : NULL);
+ init.num_parents = (parent_name ? 1 : 0);
+ mclk->hw.init = &init;
+ mclk->is_mspro = is_mspro;
+
+ clk = clk_register(dev, &mclk->hw);
+ if (IS_ERR(clk))
+ kfree(mclk);
+
+ return clk;
+}
+
+void __init u300_clk_init(void __iomem *base)
+{
+ u16 val;
+ struct clk *clk;
+
+ syscon_vbase = base;
+
+ /* Set system to run at PLL208, max performance, a known state. */
+ val = readw(syscon_vbase + U300_SYSCON_CCR);
+ val &= ~U300_SYSCON_CCR_CLKING_PERFORMANCE_MASK;
+ writew(val, syscon_vbase + U300_SYSCON_CCR);
+ /* Wait for the PLL208 to lock if not locked in yet */
+ while (!(readw(syscon_vbase + U300_SYSCON_CSR) &
+ U300_SYSCON_CSR_PLL208_LOCK_IND));
+
+ /* Power management enable */
+ val = readw(syscon_vbase + U300_SYSCON_PMCR);
+ val |= U300_SYSCON_PMCR_PWR_MGNT_ENABLE;
+ writew(val, syscon_vbase + U300_SYSCON_PMCR);
+
+ /* These are always available (RTC and PLL13) */
+ clk = clk_register_fixed_rate(NULL, "app_32_clk", NULL,
+ CLK_IS_ROOT, 32768);
+ /* The watchdog sits directly on the 32 kHz clock */
+ clk_register_clkdev(clk, NULL, "coh901327_wdog");
+ clk = clk_register_fixed_rate(NULL, "pll13", NULL,
+ CLK_IS_ROOT, 13000000);
+
+ /* These derive from PLL208 */
+ clk = clk_register_fixed_rate(NULL, "pll208", NULL,
+ CLK_IS_ROOT, 208000000);
+ clk = clk_register_fixed_factor(NULL, "app_208_clk", "pll208",
+ 0, 1, 1);
+ clk = clk_register_fixed_factor(NULL, "app_104_clk", "pll208",
+ 0, 1, 2);
+ clk = clk_register_fixed_factor(NULL, "app_52_clk", "pll208",
+ 0, 1, 4);
+ /* The 52 MHz is divided down to 26 MHz */
+ clk = clk_register_fixed_factor(NULL, "app_26_clk", "app_52_clk",
+ 0, 1, 2);
+
+ /* Directly on the AMBA interconnect */
+ clk = syscon_clk_register(NULL, "cpu_clk", "app_208_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RRR, 3,
+ syscon_vbase + U300_SYSCON_CERR, 3,
+ U300_SYSCON_SBCER_CPU_CLK_EN);
+ clk = syscon_clk_register(NULL, "dmac_clk", "app_52_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RRR, 4,
+ syscon_vbase + U300_SYSCON_CERR, 4,
+ U300_SYSCON_SBCER_DMAC_CLK_EN);
+ clk_register_clkdev(clk, NULL, "dma");
+ clk = syscon_clk_register(NULL, "fsmc_clk", "app_52_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RRR, 6,
+ syscon_vbase + U300_SYSCON_CERR, 6,
+ U300_SYSCON_SBCER_NANDIF_CLK_EN);
+ clk_register_clkdev(clk, NULL, "fsmc-nand");
+ clk = syscon_clk_register(NULL, "xgam_clk", "app_52_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RRR, 8,
+ syscon_vbase + U300_SYSCON_CERR, 8,
+ U300_SYSCON_SBCER_XGAM_CLK_EN);
+ clk_register_clkdev(clk, NULL, "xgam");
+ clk = syscon_clk_register(NULL, "semi_clk", "app_104_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RRR, 9,
+ syscon_vbase + U300_SYSCON_CERR, 9,
+ U300_SYSCON_SBCER_SEMI_CLK_EN);
+ clk_register_clkdev(clk, NULL, "semi");
+
+ /* AHB bridge clocks */
+ clk = syscon_clk_register(NULL, "ahb_subsys_clk", "app_52_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RRR, 10,
+ syscon_vbase + U300_SYSCON_CERR, 10,
+ U300_SYSCON_SBCER_AHB_SUBSYS_BRIDGE_CLK_EN);
+ clk = syscon_clk_register(NULL, "intcon_clk", "ahb_subsys_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RRR, 12,
+ syscon_vbase + U300_SYSCON_CERR, 12,
+ /* Cannot be enabled, just taken out of reset */
+ 0xFFFFU);
+ clk_register_clkdev(clk, NULL, "intcon");
+ clk = syscon_clk_register(NULL, "emif_clk", "ahb_subsys_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RRR, 5,
+ syscon_vbase + U300_SYSCON_CERR, 5,
+ U300_SYSCON_SBCER_EMIF_CLK_EN);
+ clk_register_clkdev(clk, NULL, "pl172");
+
+ /* FAST bridge clocks */
+ clk = syscon_clk_register(NULL, "fast_clk", "app_26_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RFR, 0,
+ syscon_vbase + U300_SYSCON_CEFR, 0,
+ U300_SYSCON_SBCER_FAST_BRIDGE_CLK_EN);
+ clk = syscon_clk_register(NULL, "i2c0_p_clk", "fast_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RFR, 1,
+ syscon_vbase + U300_SYSCON_CEFR, 1,
+ U300_SYSCON_SBCER_I2C0_CLK_EN);
+ clk_register_clkdev(clk, NULL, "stu300.0");
+ clk = syscon_clk_register(NULL, "i2c1_p_clk", "fast_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RFR, 2,
+ syscon_vbase + U300_SYSCON_CEFR, 2,
+ U300_SYSCON_SBCER_I2C1_CLK_EN);
+ clk_register_clkdev(clk, NULL, "stu300.1");
+ clk = syscon_clk_register(NULL, "mmc_p_clk", "fast_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RFR, 5,
+ syscon_vbase + U300_SYSCON_CEFR, 5,
+ U300_SYSCON_SBCER_MMC_CLK_EN);
+ clk_register_clkdev(clk, "apb_pclk", "mmci");
+ clk = syscon_clk_register(NULL, "spi_p_clk", "fast_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RFR, 6,
+ syscon_vbase + U300_SYSCON_CEFR, 6,
+ U300_SYSCON_SBCER_SPI_CLK_EN);
+ /* The SPI has no external clock for the outward bus, uses the pclk */
+ clk_register_clkdev(clk, NULL, "pl022");
+ clk_register_clkdev(clk, "apb_pclk", "pl022");
+
+ /* SLOW bridge clocks */
+ clk = syscon_clk_register(NULL, "slow_clk", "pll13", 0, true,
+ syscon_vbase + U300_SYSCON_RSR, 0,
+ syscon_vbase + U300_SYSCON_CESR, 0,
+ U300_SYSCON_SBCER_SLOW_BRIDGE_CLK_EN);
+ clk = syscon_clk_register(NULL, "uart0_clk", "slow_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RSR, 1,
+ syscon_vbase + U300_SYSCON_CESR, 1,
+ U300_SYSCON_SBCER_UART_CLK_EN);
+ /* Same clock is used for APB and outward bus */
+ clk_register_clkdev(clk, NULL, "uart0");
+ clk_register_clkdev(clk, "apb_pclk", "uart0");
+ clk = syscon_clk_register(NULL, "gpio_clk", "slow_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RSR, 4,
+ syscon_vbase + U300_SYSCON_CESR, 4,
+ U300_SYSCON_SBCER_GPIO_CLK_EN);
+ clk_register_clkdev(clk, NULL, "u300-gpio");
+ clk = syscon_clk_register(NULL, "keypad_clk", "slow_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RSR, 5,
+ syscon_vbase + U300_SYSCON_CESR, 6,
+ U300_SYSCON_SBCER_KEYPAD_CLK_EN);
+ clk_register_clkdev(clk, NULL, "coh901461-keypad");
+ clk = syscon_clk_register(NULL, "rtc_clk", "slow_clk", 0, true,
+ syscon_vbase + U300_SYSCON_RSR, 6,
+ /* No clock enable register bit */
+ NULL, 0, 0xFFFFU);
+ clk_register_clkdev(clk, NULL, "rtc-coh901331");
+ clk = syscon_clk_register(NULL, "app_tmr_clk", "slow_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RSR, 7,
+ syscon_vbase + U300_SYSCON_CESR, 7,
+ U300_SYSCON_SBCER_APP_TMR_CLK_EN);
+ clk_register_clkdev(clk, NULL, "apptimer");
+ clk = syscon_clk_register(NULL, "acc_tmr_clk", "slow_clk", 0, false,
+ syscon_vbase + U300_SYSCON_RSR, 8,
+ syscon_vbase + U300_SYSCON_CESR, 8,
+ U300_SYSCON_SBCER_ACC_TMR_CLK_EN);
+ clk_register_clkdev(clk, NULL, "timer");
+
+ /* Then this special MMC/SD clock */
+ clk = mclk_clk_register(NULL, "mmc_clk", "mmc_p_clk", false);
+ clk_register_clkdev(clk, NULL, "mmci");
+}
diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
new file mode 100644
index 000000000000..e7b7765e85f3
--- /dev/null
+++ b/drivers/clk/clk-wm831x.c
@@ -0,0 +1,428 @@
+/*
+ * WM831x clock control
+ *
+ * Copyright 2011-2 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/wm831x/core.h>
+
+struct wm831x_clk {
+ struct wm831x *wm831x;
+ struct clk_hw xtal_hw;
+ struct clk_hw fll_hw;
+ struct clk_hw clkout_hw;
+ struct clk *xtal;
+ struct clk *fll;
+ struct clk *clkout;
+ bool xtal_ena;
+};
+
+static int wm831x_xtal_is_enabled(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ xtal_hw);
+
+ return clkdata->xtal_ena;
+}
+
+static unsigned long wm831x_xtal_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ xtal_hw);
+
+ if (clkdata->xtal_ena)
+ return 32768;
+ else
+ return 0;
+}
+
+static const struct clk_ops wm831x_xtal_ops = {
+ .is_enabled = wm831x_xtal_is_enabled,
+ .recalc_rate = wm831x_xtal_recalc_rate,
+};
+
+static struct clk_init_data wm831x_xtal_init = {
+ .name = "xtal",
+ .ops = &wm831x_xtal_ops,
+ .flags = CLK_IS_ROOT,
+};
+
+static const unsigned long wm831x_fll_auto_rates[] = {
+ 2048000,
+ 11289600,
+ 12000000,
+ 12288000,
+ 19200000,
+ 22579600,
+ 24000000,
+ 24576000,
+};
+
+static int wm831x_fll_is_enabled(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read FLL_CONTROL_1: %d\n",
+ ret);
+ return true;
+ }
+
+ return (ret & WM831X_FLL_ENA) != 0;
+}
+
+static int wm831x_fll_prepare(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_2,
+ WM831X_FLL_ENA, WM831X_FLL_ENA);
+ if (ret != 0)
+ dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret);
+
+ usleep_range(2000, 2000);
+
+ return ret;
+}
+
+static void wm831x_fll_unprepare(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_set_bits(wm831x, WM831X_FLL_CONTROL_2, WM831X_FLL_ENA, 0);
+ if (ret != 0)
+ dev_crit(wm831x->dev, "Failed to disaable FLL: %d\n", ret);
+}
+
+static unsigned long wm831x_fll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
+ ret);
+ return 0;
+ }
+
+ if (ret & WM831X_FLL_AUTO)
+ return wm831x_fll_auto_rates[ret & WM831X_FLL_AUTO_FREQ_MASK];
+
+ dev_err(wm831x->dev, "FLL only supported in AUTO mode\n");
+
+ return 0;
+}
+
+static long wm831x_fll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *unused)
+{
+ int best = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
+ if (abs(wm831x_fll_auto_rates[i] - rate) <
+ abs(wm831x_fll_auto_rates[best] - rate))
+ best = i;
+
+ return wm831x_fll_auto_rates[best];
+}
+
+static int wm831x_fll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wm831x_fll_auto_rates); i++)
+ if (wm831x_fll_auto_rates[i] == rate)
+ break;
+ if (i == ARRAY_SIZE(wm831x_fll_auto_rates))
+ return -EINVAL;
+
+ if (wm831x_fll_is_enabled(hw))
+ return -EPERM;
+
+ return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_2,
+ WM831X_FLL_AUTO_FREQ_MASK, i);
+}
+
+static const char *wm831x_fll_parents[] = {
+ "xtal",
+ "clkin",
+};
+
+static u8 wm831x_fll_get_parent(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ fll_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ /* AUTO mode is always clocked from the crystal */
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
+ ret);
+ return 0;
+ }
+
+ if (ret & WM831X_FLL_AUTO)
+ return 0;
+
+ ret = wm831x_reg_read(wm831x, WM831X_FLL_CONTROL_5);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read FLL_CONTROL_5: %d\n",
+ ret);
+ return 0;
+ }
+
+ switch (ret & WM831X_FLL_CLK_SRC_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 1;
+ default:
+ dev_err(wm831x->dev, "Unsupported FLL clock source %d\n",
+ ret & WM831X_FLL_CLK_SRC_MASK);
+ return 0;
+ }
+}
+
+static const struct clk_ops wm831x_fll_ops = {
+ .is_enabled = wm831x_fll_is_enabled,
+ .prepare = wm831x_fll_prepare,
+ .unprepare = wm831x_fll_unprepare,
+ .round_rate = wm831x_fll_round_rate,
+ .recalc_rate = wm831x_fll_recalc_rate,
+ .set_rate = wm831x_fll_set_rate,
+ .get_parent = wm831x_fll_get_parent,
+};
+
+static struct clk_init_data wm831x_fll_init = {
+ .name = "fll",
+ .ops = &wm831x_fll_ops,
+ .parent_names = wm831x_fll_parents,
+ .num_parents = ARRAY_SIZE(wm831x_fll_parents),
+ .flags = CLK_SET_RATE_GATE,
+};
+
+static int wm831x_clkout_is_enabled(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ clkout_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
+ ret);
+ return true;
+ }
+
+ return (ret & WM831X_CLKOUT_ENA) != 0;
+}
+
+static int wm831x_clkout_prepare(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ clkout_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
+ return ret;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
+ WM831X_CLKOUT_ENA, WM831X_CLKOUT_ENA);
+ if (ret != 0)
+ dev_crit(wm831x->dev, "Failed to enable CLKOUT: %d\n", ret);
+
+ wm831x_reg_lock(wm831x);
+
+ return ret;
+}
+
+static void wm831x_clkout_unprepare(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ clkout_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_unlock(wm831x);
+ if (ret != 0) {
+ dev_crit(wm831x->dev, "Failed to lock registers: %d\n", ret);
+ return;
+ }
+
+ ret = wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
+ WM831X_CLKOUT_ENA, 0);
+ if (ret != 0)
+ dev_crit(wm831x->dev, "Failed to disable CLKOUT: %d\n", ret);
+
+ wm831x_reg_lock(wm831x);
+}
+
+static const char *wm831x_clkout_parents[] = {
+ "xtal",
+ "fll",
+};
+
+static u8 wm831x_clkout_get_parent(struct clk_hw *hw)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ clkout_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+ int ret;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_1);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n",
+ ret);
+ return 0;
+ }
+
+ if (ret & WM831X_CLKOUT_SRC)
+ return 0;
+ else
+ return 1;
+}
+
+static int wm831x_clkout_set_parent(struct clk_hw *hw, u8 parent)
+{
+ struct wm831x_clk *clkdata = container_of(hw, struct wm831x_clk,
+ clkout_hw);
+ struct wm831x *wm831x = clkdata->wm831x;
+
+ return wm831x_set_bits(wm831x, WM831X_CLOCK_CONTROL_1,
+ WM831X_CLKOUT_SRC,
+ parent << WM831X_CLKOUT_SRC_SHIFT);
+}
+
+static const struct clk_ops wm831x_clkout_ops = {
+ .is_enabled = wm831x_clkout_is_enabled,
+ .prepare = wm831x_clkout_prepare,
+ .unprepare = wm831x_clkout_unprepare,
+ .get_parent = wm831x_clkout_get_parent,
+ .set_parent = wm831x_clkout_set_parent,
+};
+
+static struct clk_init_data wm831x_clkout_init = {
+ .name = "clkout",
+ .ops = &wm831x_clkout_ops,
+ .parent_names = wm831x_clkout_parents,
+ .num_parents = ARRAY_SIZE(wm831x_clkout_parents),
+ .flags = CLK_SET_RATE_PARENT,
+};
+
+static __devinit int wm831x_clk_probe(struct platform_device *pdev)
+{
+ struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
+ struct wm831x_clk *clkdata;
+ int ret;
+
+ clkdata = devm_kzalloc(&pdev->dev, sizeof(*clkdata), GFP_KERNEL);
+ if (!clkdata)
+ return -ENOMEM;
+
+ /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
+ ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
+ if (ret < 0) {
+ dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_2: %d\n",
+ ret);
+ return ret;
+ }
+ clkdata->xtal_ena = ret & WM831X_XTAL_ENA;
+
+ clkdata->xtal_hw.init = &wm831x_xtal_init;
+ clkdata->xtal = clk_register(&pdev->dev, &clkdata->xtal_hw);
+ if (!clkdata->xtal)
+ return -EINVAL;
+
+ clkdata->fll_hw.init = &wm831x_fll_init;
+ clkdata->fll = clk_register(&pdev->dev, &clkdata->fll_hw);
+ if (!clkdata->fll) {
+ ret = -EINVAL;
+ goto err_xtal;
+ }
+
+ clkdata->clkout_hw.init = &wm831x_clkout_init;
+ clkdata->clkout = clk_register(&pdev->dev, &clkdata->clkout_hw);
+ if (!clkdata->clkout) {
+ ret = -EINVAL;
+ goto err_fll;
+ }
+
+ dev_set_drvdata(&pdev->dev, clkdata);
+
+ return 0;
+
+err_fll:
+ clk_unregister(clkdata->fll);
+err_xtal:
+ clk_unregister(clkdata->xtal);
+ return ret;
+}
+
+static int __devexit wm831x_clk_remove(struct platform_device *pdev)
+{
+ struct wm831x_clk *clkdata = dev_get_drvdata(&pdev->dev);
+
+ clk_unregister(clkdata->clkout);
+ clk_unregister(clkdata->fll);
+ clk_unregister(clkdata->xtal);
+
+ return 0;
+}
+
+static struct platform_driver wm831x_clk_driver = {
+ .probe = wm831x_clk_probe,
+ .remove = __devexit_p(wm831x_clk_remove),
+ .driver = {
+ .name = "wm831x-clk",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(wm831x_clk_driver);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM831x clock driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm831x-clk");
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index dcbe05616090..c87fdd710560 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/of.h>
static DEFINE_SPINLOCK(enable_lock);
static DEFINE_MUTEX(prepare_lock);
@@ -1067,26 +1068,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent)
old_parent = clk->parent;
- /* find index of new parent clock using cached parent ptrs */
- if (clk->parents)
- for (i = 0; i < clk->num_parents; i++)
- if (clk->parents[i] == parent)
- break;
- else
+ if (!clk->parents)
clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
- * find index of new parent clock using string name comparison
- * also try to cache the parent to avoid future calls to __clk_lookup
+ * find index of new parent clock using cached parent ptrs,
+ * or if not yet cached, use string name comparison and cache
+ * them now to avoid future calls to __clk_lookup.
*/
- if (i == clk->num_parents)
- for (i = 0; i < clk->num_parents; i++)
- if (!strcmp(clk->parent_names[i], parent->name)) {
- if (clk->parents)
- clk->parents[i] = __clk_lookup(parent->name);
- break;
- }
+ for (i = 0; i < clk->num_parents; i++) {
+ if (clk->parents && clk->parents[i] == parent)
+ break;
+ else if (!strcmp(clk->parent_names[i], parent->name)) {
+ if (clk->parents)
+ clk->parents[i] = __clk_lookup(parent->name);
+ break;
+ }
+ }
if (i == clk->num_parents) {
pr_debug("%s: clock %s is not a possible parent of clock %s\n",
@@ -1237,8 +1236,8 @@ int __clk_init(struct device *dev, struct clk *clk)
* If clk->parents is not NULL we skip this entire block. This allows
* for clock drivers to statically initialize clk->parents.
*/
- if (clk->num_parents && !clk->parents) {
- clk->parents = kmalloc((sizeof(struct clk*) * clk->num_parents),
+ if (clk->num_parents > 1 && !clk->parents) {
+ clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
GFP_KERNEL);
/*
* __clk_lookup returns NULL for parents that have not been
@@ -1552,3 +1551,142 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
return ret;
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+
+#ifdef CONFIG_OF
+/**
+ * struct of_clk_provider - Clock provider registration structure
+ * @link: Entry in global list of clock providers
+ * @node: Pointer to device tree node of clock provider
+ * @get: Get clock callback. Returns NULL or a struct clk for the
+ * given clock specifier
+ * @data: context pointer to be passed into @get callback
+ */
+struct of_clk_provider {
+ struct list_head link;
+
+ struct device_node *node;
+ struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
+ void *data;
+};
+
+static LIST_HEAD(of_clk_providers);
+static DEFINE_MUTEX(of_clk_lock);
+
+struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ return data;
+}
+EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
+
+/**
+ * of_clk_add_provider() - Register a clock provider for a node
+ * @np: Device node pointer associated with clock provider
+ * @clk_src_get: callback for decoding clock
+ * @data: context pointer for @clk_src_get callback.
+ */
+int of_clk_add_provider(struct device_node *np,
+ struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
+ void *data),
+ void *data)
+{
+ struct of_clk_provider *cp;
+
+ cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->get = clk_src_get;
+
+ mutex_lock(&of_clk_lock);
+ list_add(&cp->link, &of_clk_providers);
+ mutex_unlock(&of_clk_lock);
+ pr_debug("Added clock from %s\n", np->full_name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_clk_add_provider);
+
+/**
+ * of_clk_del_provider() - Remove a previously registered clock provider
+ * @np: Device node pointer associated with clock provider
+ */
+void of_clk_del_provider(struct device_node *np)
+{
+ struct of_clk_provider *cp;
+
+ mutex_lock(&of_clk_lock);
+ list_for_each_entry(cp, &of_clk_providers, link) {
+ if (cp->node == np) {
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
+ }
+ mutex_unlock(&of_clk_lock);
+}
+EXPORT_SYMBOL_GPL(of_clk_del_provider);
+
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
+{
+ struct of_clk_provider *provider;
+ struct clk *clk = ERR_PTR(-ENOENT);
+
+ /* Check if we have such a provider in our array */
+ mutex_lock(&of_clk_lock);
+ list_for_each_entry(provider, &of_clk_providers, link) {
+ if (provider->node == clkspec->np)
+ clk = provider->get(clkspec, provider->data);
+ if (!IS_ERR(clk))
+ break;
+ }
+ mutex_unlock(&of_clk_lock);
+
+ return clk;
+}
+
+const char *of_clk_get_parent_name(struct device_node *np, int index)
+{
+ struct of_phandle_args clkspec;
+ const char *clk_name;
+ int rc;
+
+ if (index < 0)
+ return NULL;
+
+ rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
+ &clkspec);
+ if (rc)
+ return NULL;
+
+ if (of_property_read_string_index(clkspec.np, "clock-output-names",
+ clkspec.args_count ? clkspec.args[0] : 0,
+ &clk_name) < 0)
+ clk_name = clkspec.np->name;
+
+ of_node_put(clkspec.np);
+ return clk_name;
+}
+EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
+
+/**
+ * of_clk_init() - Scan and init clock providers from the DT
+ * @matches: array of compatible values and init functions for providers.
+ *
+ * This function scans the device tree for matching clock providers and
+ * calls their initialization functions
+ */
+void __init of_clk_init(const struct of_device_id *matches)
+{
+ struct device_node *np;
+
+ for_each_matching_node(np, matches) {
+ const struct of_device_id *match = of_match_node(matches, np);
+ of_clk_init_cb_t clk_init_cb = match->data;
+ clk_init_cb(np);
+ }
+}
+#endif
diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c
index c535cf8c5770..d423c9bdd71a 100644
--- a/drivers/clk/clkdev.c
+++ b/drivers/clk/clkdev.c
@@ -19,10 +19,80 @@
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/of.h>
static LIST_HEAD(clocks);
static DEFINE_MUTEX(clocks_mutex);
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+ struct of_phandle_args clkspec;
+ struct clk *clk;
+ int rc;
+
+ if (index < 0)
+ return ERR_PTR(-EINVAL);
+
+ rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
+ &clkspec);
+ if (rc)
+ return ERR_PTR(rc);
+
+ clk = of_clk_get_from_provider(&clkspec);
+ of_node_put(clkspec.np);
+ return clk;
+}
+EXPORT_SYMBOL(of_clk_get);
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+ struct clk *clk = ERR_PTR(-ENOENT);
+
+ /* Walk up the tree of devices looking for a clock that matches */
+ while (np) {
+ int index = 0;
+
+ /*
+ * For named clocks, first look up the name in the
+ * "clock-names" property. If it cannot be found, then
+ * index will be an error code, and of_clk_get() will fail.
+ */
+ if (name)
+ index = of_property_match_string(np, "clock-names", name);
+ clk = of_clk_get(np, index);
+ if (!IS_ERR(clk))
+ break;
+ else if (name && index >= 0) {
+ pr_err("ERROR: could not get clock %s:%s(%i)\n",
+ np->full_name, name ? name : "", index);
+ return clk;
+ }
+
+ /*
+ * No matching clock found on this node. If the parent node
+ * has a "clock-ranges" property, then we can try one of its
+ * clocks.
+ */
+ np = np->parent;
+ if (np && !of_get_property(np, "clock-ranges", NULL))
+ break;
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL(of_clk_get_by_name);
+#endif
+
/*
* Find the correct struct clk for the device and connection ID.
* We do slightly fuzzy matching here:
@@ -83,6 +153,13 @@ EXPORT_SYMBOL(clk_get_sys);
struct clk *clk_get(struct device *dev, const char *con_id)
{
const char *dev_id = dev ? dev_name(dev) : NULL;
+ struct clk *clk;
+
+ if (dev) {
+ clk = of_clk_get_by_name(dev->of_node, con_id);
+ if (!IS_ERR(clk) && __clk_get(clk))
+ return clk;
+ }
return clk_get_sys(dev_id, con_id);
}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
index db2391c054ee..844043ad0fe4 100644
--- a/drivers/clk/mxs/clk-imx23.c
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -106,7 +106,7 @@ static struct clk_lookup lcdif_lookups[] = {
static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx23-gpmi-nand", },
- { .dev_id = "8000c000.gpmi", },
+ { .dev_id = "8000c000.gpmi-nand", },
};
static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
@@ -189,6 +189,7 @@ int __init mx23_clocks_init(void)
}
clk_register_clkdev(clks[clk32k], NULL, "timrot");
+ clk_register_clkdev(clks[pwm], NULL, "80064000.pwm");
clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
index 7fad6c8c13d2..e3aab67b3eb7 100644
--- a/drivers/clk/mxs/clk-imx28.c
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -112,11 +112,11 @@ static void __init clk_misc_init(void)
/*
* 480 MHz seems too high to be ssp clock source directly,
- * so set frac0 to get a 288 MHz ref_io0.
+ * so set frac0 to get a 288 MHz ref_io0 and ref_io1.
*/
val = readl_relaxed(FRAC0);
- val &= ~(0x3f << BP_FRAC0_IO0FRAC);
- val |= 30 << BP_FRAC0_IO0FRAC;
+ val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC));
+ val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC);
writel_relaxed(val, FRAC0);
}
@@ -174,7 +174,7 @@ static struct clk_lookup lcdif_lookups[] = {
static struct clk_lookup gpmi_lookups[] = {
{ .dev_id = "imx28-gpmi-nand", },
- { .dev_id = "8000c000.gpmi", },
+ { .dev_id = "8000c000.gpmi-nand", },
};
static struct clk_lookup fec_lookups[] = {
@@ -314,6 +314,7 @@ int __init mx28_clocks_init(void)
clk_register_clkdev(clks[clk32k], NULL, "timrot");
clk_register_clkdev(clks[enet_out], NULL, "enet_out");
+ clk_register_clkdev(clks[pwm], NULL, "80064000.pwm");
clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
@@ -328,6 +329,10 @@ int __init mx28_clocks_init(void)
clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
+ clk_register_clkdev(clks[usb0_pwr], NULL, "8007c000.usbphy");
+ clk_register_clkdev(clks[usb1_pwr], NULL, "8007e000.usbphy");
+ clk_register_clkdev(clks[usb0], NULL, "80080000.usb");
+ clk_register_clkdev(clks[usb1], NULL, "80090000.usb");
for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
clk_prepare_enable(clks[clks_init_on[i]]);
diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile
new file mode 100644
index 000000000000..0303c0b99cd0
--- /dev/null
+++ b/drivers/clk/socfpga/Makefile
@@ -0,0 +1 @@
+obj-y += clk.o
diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
new file mode 100644
index 000000000000..2c855a6394ff
--- /dev/null
+++ b/drivers/clk/socfpga/clk.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Altera Corporation <www.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+
+#define SOCFPGA_OSC1_CLK 10000000
+#define SOCFPGA_MPU_CLK 800000000
+#define SOCFPGA_MAIN_QSPI_CLK 432000000
+#define SOCFPGA_MAIN_NAND_SDMMC_CLK 250000000
+#define SOCFPGA_S2F_USR_CLK 125000000
+
+void __init socfpga_init_clocks(void)
+{
+ struct clk *clk;
+
+ clk = clk_register_fixed_rate(NULL, "osc1_clk", NULL, CLK_IS_ROOT, SOCFPGA_OSC1_CLK);
+ clk_register_clkdev(clk, "osc1_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "mpu_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK);
+ clk_register_clkdev(clk, "mpu_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
+ clk_register_clkdev(clk, "main_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "dbg_base_clk", NULL, CLK_IS_ROOT, SOCFPGA_MPU_CLK/2);
+ clk_register_clkdev(clk, "dbg_base_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_qspi_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_QSPI_CLK);
+ clk_register_clkdev(clk, "main_qspi_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "main_nand_sdmmc_clk", NULL, CLK_IS_ROOT, SOCFPGA_MAIN_NAND_SDMMC_CLK);
+ clk_register_clkdev(clk, "main_nand_sdmmc_clk", NULL);
+
+ clk = clk_register_fixed_rate(NULL, "s2f_usr_clk", NULL, CLK_IS_ROOT, SOCFPGA_S2F_USR_CLK);
+ clk_register_clkdev(clk, "s2f_usr_clk", NULL);
+}
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 8f05652d53e6..0fcec2aae19c 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -345,31 +345,30 @@ static struct frac_rate_tbl gen_rtbl[] = {
/* clock parents */
static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
-static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+static const char *uart0_parents[] = { "pll5_clk", "uart_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
"osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
- "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
"i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll3_clk", };
static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll2_clk", };
static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
- "ras_pll2_clk", "ras_synth0_clk", };
+ "ras_pll2_clk", "ras_syn0_clk", };
static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
- "ras_pll2_clk", "ras_synth0_clk", };
-static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
-static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
-static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
+ "ras_pll2_clk", "ras_syn0_clk", };
+static const char *uart_parents[] = { "ras_apb_clk", "gen_syn3_clk", };
+static const char *i2c_parents[] = { "ras_apb_clk", "gen_syn1_clk", };
+static const char *ssp1_parents[] = { "ras_apb_clk", "gen_syn1_clk",
"ras_plclk0_clk", };
-static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
-static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
+static const char *pci_parents[] = { "ras_pll3_clk", "gen_syn2_clk", };
+static const char *tdm_parents[] = { "ras_pll3_clk", "gen_syn1_clk", };
void __init spear1310_clk_init(void)
{
@@ -390,9 +389,9 @@ void __init spear1310_clk_init(void)
25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
- clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
CLK_IS_ROOT, 12288000);
@@ -406,34 +405,34 @@ void __init spear1310_clk_init(void)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
- clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco1_mux_clk", NULL);
- clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk",
0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco2_mux_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk",
0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
- clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco3_mux_clk", NULL);
- clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk",
0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco3_clk", NULL);
@@ -473,7 +472,7 @@ void __init spear1310_clk_init(void)
/* peripherals */
clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
128);
- clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -500,177 +499,176 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, "apb_clk", NULL);
/* gpt clocks */
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* others */
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_UART_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e0000000.serial");
- clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
"vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
- clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b3000000.sdhci");
- clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
- clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b2800000.cf");
clk_register_clkdev(clk, NULL, "arasan_xd");
- clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "c3_synth_clk", NULL);
- clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1310_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
- clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "c3");
/* gmac */
- clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
- gmac_phy_input_parents,
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
ARRAY_SIZE(gmac_phy_input_parents), 0,
SPEAR1310_GMAC_CLK_CFG,
SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
- clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
- "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
- NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
- clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1310_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
ARRAY_SIZE(gmac_phy_parents), 0,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.0");
/* clcd */
- clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
ARRAY_SIZE(clcd_synth_parents), 0,
SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
- clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
ARRAY_SIZE(clcd_rtbl), &_lock);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
- clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
ARRAY_SIZE(clcd_pixel_parents), 0,
SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "clcd_clk", NULL);
/* i2s */
- clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
0, &_lock);
clk_register_clkdev(clk, "i2s_src_clk", NULL);
- clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
- clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
&_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
- clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
- clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk",
"i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
&i2s_sclk_masks, i2s_sclk_rtbl,
ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
- clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
/* clock derived from ahb clk */
clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -747,13 +745,13 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, "sysram1_clk", NULL);
- clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "adc_synth_clk", NULL);
- clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "adc_clk");
@@ -790,37 +788,37 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, NULL, "e0300000.kbd");
/* RAS clks */
- clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
- gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
- 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
- clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
- gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
- 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1310_PLL_CFG,
+ SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
- clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
- clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
- clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
- clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
@@ -847,7 +845,7 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, "ras_pll3_clk", NULL);
- clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
+ clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_pad_clk", 0,
SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "ras_tx125_clk", NULL);
@@ -912,7 +910,7 @@ void __init spear1310_clk_init(void)
&_lock);
clk_register_clkdev(clk, NULL, "5c700000.eth");
- clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
+ clk = clk_register_mux(NULL, "smii_rgmii_phy_mclk",
smii_rgmii_phy_parents,
ARRAY_SIZE(smii_rgmii_phy_parents), 0,
SPEAR1310_RAS_CTRL_REG1,
@@ -922,184 +920,184 @@ void __init spear1310_clk_init(void)
clk_register_clkdev(clk, NULL, "stmmacphy.2");
clk_register_clkdev(clk, NULL, "stmmacphy.4");
- clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
+ clk = clk_register_mux(NULL, "rmii_phy_mclk", rmii_phy_parents,
ARRAY_SIZE(rmii_phy_parents), 0,
SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
SPEAR1310_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.3");
- clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart1_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
- clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5c800000.serial");
- clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart2_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart2_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart2_mclk", NULL);
- clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart2_clk", "uart2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5c900000.serial");
- clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart3_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart3_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart3_mclk", NULL);
- clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart3_clk", "uart3_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5ca00000.serial");
- clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart4_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart4_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart4_mclk", NULL);
- clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart4_clk", "uart4_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cb00000.serial");
- clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart5_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
0, &_lock);
- clk_register_clkdev(clk, "uart5_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart5_mclk", NULL);
- clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart5_clk", "uart5_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cc00000.serial");
- clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c1_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c1_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cd00000.i2c");
- clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c2_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c2_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5ce00000.i2c");
- clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c3_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c3_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5cf00000.i2c");
- clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c4_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c4_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d000000.i2c");
- clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c5_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c5_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d100000.i2c");
- clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c6_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c6_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d200000.i2c");
- clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
+ clk = clk_register_mux(NULL, "i2c7_mclk", i2c_parents,
ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
+ clk_register_clkdev(clk, "i2c7_mclk", NULL);
- clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d300000.i2c");
- clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
+ clk = clk_register_mux(NULL, "ssp1_mclk", ssp1_parents,
ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
+ clk_register_clkdev(clk, "ssp1_mclk", NULL);
- clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "5d400000.spi");
- clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
+ clk = clk_register_mux(NULL, "pci_mclk", pci_parents,
ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "pci_mux_clk", NULL);
+ clk_register_clkdev(clk, "pci_mclk", NULL);
- clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
+ clk = clk_register_gate(NULL, "pci_clk", "pci_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "pci");
- clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
+ clk = clk_register_mux(NULL, "tdm1_mclk", tdm_parents,
ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
+ clk_register_clkdev(clk, "tdm1_mclk", NULL);
- clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
- clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
+ clk = clk_register_mux(NULL, "tdm2_mclk", tdm_parents,
ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
+ clk_register_clkdev(clk, "tdm2_mclk", NULL);
- clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mclk", 0,
SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index e3ea72162236..2352cee7f645 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -369,27 +369,25 @@ static struct frac_rate_tbl gen_rtbl[] = {
/* clock parents */
static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
-static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
- "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
-static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
+static const char *sys_parents[] = { "pll1_clk", "pll1_clk", "pll1_clk",
+ "pll1_clk", "sys_synth_clk", "sys_synth_clk", "pll2_clk", "pll3_clk", };
+static const char *ahb_parents[] = { "cpu_div3_clk", "amba_syn_clk", };
static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
- "uart0_synth_gate_clk", };
+ "uart0_syn_gclk", };
static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
- "uart1_synth_gate_clk", };
-static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
-static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
+ "uart1_syn_gclk", };
+static const char *c3_parents[] = { "pll5_clk", "c3_syn_gclk", };
+static const char *gmac_phy_input_parents[] = { "gmii_pad_clk", "pll2_clk",
"osc_25m_clk", };
-static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
- "gmac_phy_synth_gate_clk", };
+static const char *gmac_phy_parents[] = { "phy_input_mclk", "phy_syn_gclk", };
static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
-static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
+static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_syn_clk", };
static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
"i2s_src_pad_clk", };
-static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
-static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
-};
-static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
+static const char *i2s_ref_parents[] = { "i2s_src_mclk", "i2s_prs1_clk", };
+static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_syn2_clk", };
+static const char *spdif_in_parents[] = { "pll2_clk", "gen_syn3_clk", };
static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
"pll3_clk", };
@@ -415,9 +413,9 @@ void __init spear1340_clk_init(void)
25000000);
clk_register_clkdev(clk, "osc_25m_clk", NULL);
- clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
- CLK_IS_ROOT, 125000000);
- clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
+ clk = clk_register_fixed_rate(NULL, "gmii_pad_clk", NULL, CLK_IS_ROOT,
+ 125000000);
+ clk_register_clkdev(clk, "gmii_pad_clk", NULL);
clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
CLK_IS_ROOT, 12288000);
@@ -431,35 +429,35 @@ void __init spear1340_clk_init(void)
/* clock derived from 24 or 25 MHz osc clk */
/* vco-pll */
- clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco1_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco1_mux_clk", NULL);
- clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
- 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco1_mclk", NULL);
+ clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mclk", 0,
+ SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco2_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco2_mux_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
- 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco2_mclk", NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mclk", 0,
+ SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
- clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
+ clk = clk_register_mux(NULL, "vco3_mclk", vco_parents,
ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "vco3_mux_clk", NULL);
- clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
- 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
+ clk_register_clkdev(clk, "vco3_mclk", NULL);
+ clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mclk", 0,
+ SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco3_clk", NULL);
clk_register_clkdev(clk1, "pll3_clk", NULL);
@@ -498,7 +496,7 @@ void __init spear1340_clk_init(void)
/* peripherals */
clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
128);
- clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
+ clk = clk_register_gate(NULL, "thermal_gclk", "thermal_clk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_thermal");
@@ -509,23 +507,23 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, "ddr_clk", NULL);
/* clock derived from pll1 clk */
- clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
+ clk = clk_register_frac("sys_syn_clk", "vco1div2_clk", 0,
SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
ARRAY_SIZE(sys_synth_rtbl), &_lock);
- clk_register_clkdev(clk, "sys_synth_clk", NULL);
+ clk_register_clkdev(clk, "sys_syn_clk", NULL);
- clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
+ clk = clk_register_frac("amba_syn_clk", "vco1div2_clk", 0,
SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
ARRAY_SIZE(amba_synth_rtbl), &_lock);
- clk_register_clkdev(clk, "amba_synth_clk", NULL);
+ clk_register_clkdev(clk, "amba_syn_clk", NULL);
- clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
+ clk = clk_register_mux(NULL, "sys_mclk", sys_parents,
ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
SPEAR1340_SCLK_SRC_SEL_SHIFT,
SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
clk_register_clkdev(clk, "sys_clk", NULL);
- clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
+ clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mclk", 0, 1,
2);
clk_register_clkdev(clk, "cpu_clk", NULL);
@@ -548,194 +546,193 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, "apb_clk", NULL);
/* gpt clocks */
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt0_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt_parents,
ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* others */
- clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
+ clk = clk_register_aux("uart0_syn_clk", "uart0_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart0_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "uart0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart0_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "uart0_clk", "uart0_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "e0000000.serial");
- clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
+ clk = clk_register_aux("uart1_syn_clk", "uart1_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart1_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "uart1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart1_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
+ clk = clk_register_mux(NULL, "uart1_mclk", uart1_parents,
ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "uart1_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart1_mclk", NULL);
- clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
- SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
+ clk = clk_register_gate(NULL, "uart1_clk", "uart1_mclk", 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b4100000.serial");
- clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
+ clk = clk_register_aux("sdhci_syn_clk", "sdhci_syn_gclk",
"vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
- clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "sdhci_syn_clk", NULL);
+ clk_register_clkdev(clk1, "sdhci_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b3000000.sdhci");
- clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
- clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
+ clk = clk_register_aux("cfxd_syn_clk", "cfxd_syn_gclk", "vco1div2_clk",
+ 0, SPEAR1340_CFXD_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "cfxd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "cfxd_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b2800000.cf");
clk_register_clkdev(clk, NULL, "arasan_xd");
- clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
- "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
- aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "c3_synth_clk", NULL);
- clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
+ clk = clk_register_aux("c3_syn_clk", "c3_syn_gclk", "vco1div2_clk", 0,
+ SPEAR1340_C3_CLK_SYNT, NULL, aux_rtbl,
+ ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "c3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "c3_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
+ clk = clk_register_mux(NULL, "c3_mclk", c3_parents,
ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "c3_mux_clk", NULL);
+ clk_register_clkdev(clk, "c3_mclk", NULL);
- clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "c3_clk", "c3_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "c3");
/* gmac */
- clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
- gmac_phy_input_parents,
+ clk = clk_register_mux(NULL, "phy_input_mclk", gmac_phy_input_parents,
ARRAY_SIZE(gmac_phy_input_parents), 0,
SPEAR1340_GMAC_CLK_CFG,
SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
+ clk_register_clkdev(clk, "phy_input_mclk", NULL);
- clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
- "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
- NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
- clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
+ clk = clk_register_aux("phy_syn_clk", "phy_syn_gclk", "phy_input_mclk",
+ 0, SPEAR1340_GMAC_CLK_SYNT, NULL, gmac_rtbl,
+ ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
+ clk_register_clkdev(clk, "phy_syn_clk", NULL);
+ clk_register_clkdev(clk1, "phy_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
+ clk = clk_register_mux(NULL, "phy_mclk", gmac_phy_parents,
ARRAY_SIZE(gmac_phy_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "stmmacphy.0");
/* clcd */
- clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
+ clk = clk_register_mux(NULL, "clcd_syn_mclk", clcd_synth_parents,
ARRAY_SIZE(clcd_synth_parents), 0,
SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_mclk", NULL);
- clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
+ clk = clk_register_frac("clcd_syn_clk", "clcd_syn_mclk", 0,
SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
ARRAY_SIZE(clcd_rtbl), &_lock);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
- clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
+ clk = clk_register_mux(NULL, "clcd_pixel_mclk", clcd_pixel_parents,
ARRAY_SIZE(clcd_pixel_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, "clcd_clk", NULL);
/* i2s */
- clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
+ clk = clk_register_mux(NULL, "i2s_src_mclk", i2s_src_parents,
ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
0, &_lock);
clk_register_clkdev(clk, "i2s_src_clk", NULL);
- clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
+ clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mclk", 0,
SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
- clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
+ clk = clk_register_mux(NULL, "i2s_ref_mclk", i2s_ref_parents,
ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
&_lock);
clk_register_clkdev(clk, "i2s_ref_clk", NULL);
- clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
+ clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
- clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
- "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
- &i2s_sclk_masks, i2s_sclk_rtbl,
- ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
+ clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gclk", "i2s_ref_mclk",
+ 0, SPEAR1340_I2S_CLK_CFG, &i2s_sclk_masks,
+ i2s_sclk_rtbl, ARRAY_SIZE(i2s_sclk_rtbl), &_lock,
+ &clk1);
clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
- clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
+ clk_register_clkdev(clk1, "i2s_sclk_gclk", NULL);
/* clock derived from ahb clk */
clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
@@ -744,7 +741,7 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, NULL, "e0280000.i2c");
clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
- SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
+ SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "b4000000.i2c");
@@ -800,13 +797,13 @@ void __init spear1340_clk_init(void)
&_lock);
clk_register_clkdev(clk, "sysram1_clk", NULL);
- clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
+ clk = clk_register_aux("adc_syn_clk", "adc_syn_gclk", "ahb_clk",
0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "adc_synth_clk", NULL);
- clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "adc_syn_clk", NULL);
+ clk_register_clkdev(clk1, "adc_syn_gclk", NULL);
- clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
+ clk = clk_register_gate(NULL, "adc_clk", "adc_syn_gclk", 0,
SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "adc_clk");
@@ -843,39 +840,39 @@ void __init spear1340_clk_init(void)
clk_register_clkdev(clk, NULL, "e0300000.kbd");
/* RAS clks */
- clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
- gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
- 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn0_1_mclk", gen_synth0_1_parents,
+ ARRAY_SIZE(gen_synth0_1_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_1_clk", NULL);
- clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
- gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
- 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
+ clk = clk_register_mux(NULL, "gen_syn2_3_mclk", gen_synth2_3_parents,
+ ARRAY_SIZE(gen_synth2_3_parents), 0, SPEAR1340_PLL_CFG,
+ SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_3_clk", NULL);
- clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn0_clk", "gen_syn0_1_clk", 0,
SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth0_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn0_clk", NULL);
- clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
+ clk = clk_register_frac("gen_syn1_clk", "gen_syn0_1_clk", 0,
SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth1_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn1_clk", NULL);
- clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn2_clk", "gen_syn2_3_clk", 0,
SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth2_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn2_clk", NULL);
- clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
+ clk = clk_register_frac("gen_syn3_clk", "gen_syn2_3_clk", 0,
SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
&_lock);
- clk_register_clkdev(clk, "gen_synth3_clk", NULL);
+ clk_register_clkdev(clk, "gen_syn3_clk", NULL);
- clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
+ clk = clk_register_gate(NULL, "mali_clk", "gen_syn3_clk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "mali");
@@ -890,74 +887,74 @@ void __init spear1340_clk_init(void)
&_lock);
clk_register_clkdev(clk, NULL, "spear_cec.1");
- clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
+ clk = clk_register_mux(NULL, "spdif_out_mclk", spdif_out_parents,
ARRAY_SIZE(spdif_out_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
+ clk_register_clkdev(clk, "spdif_out_mclk", NULL);
- clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
+ clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "spdif-out");
- clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
+ clk = clk_register_mux(NULL, "spdif_in_mclk", spdif_in_parents,
ARRAY_SIZE(spdif_in_parents), 0,
SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
+ clk_register_clkdev(clk, "spdif_in_mclk", NULL);
- clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
+ clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spdif-in");
- clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
+ clk = clk_register_gate(NULL, "acp_clk", "acp_mclk", 0,
SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "acp_clk");
- clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
+ clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "plgpio");
- clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_dec");
- clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
0, &_lock);
clk_register_clkdev(clk, NULL, "video_enc");
- clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
+ clk = clk_register_gate(NULL, "video_in_clk", "video_in_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_vip");
- clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam0_clk", "cam0_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.0");
- clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam1_clk", "cam1_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.1");
- clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam2_clk", "cam2_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.2");
- clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "cam3_clk", "cam3_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "spear_camif.3");
- clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
+ clk = clk_register_gate(NULL, "pwm_clk", "pwm_mclk", 0,
SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
&_lock);
clk_register_clkdev(clk, NULL, "pwm");
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 01dd6daff2a1..c3157454bb3f 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -122,12 +122,12 @@ static struct gpt_rate_tbl gpt_rtbl[] = {
};
/* clock parents */
-static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
+static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk",
};
-static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
-static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
+static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", };
+static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
"pll2_clk", };
@@ -137,7 +137,7 @@ static void __init spear300_clk_init(void)
{
struct clk *clk;
- clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
1, 1);
clk_register_clkdev(clk, NULL, "60000000.clcd");
@@ -219,15 +219,11 @@ static void __init spear310_clk_init(void)
#define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
#define SPEAR320_UARTX_PCLK_VAL_APB 0x1
-static const char *i2s_ref_parents[] = { "ras_pll2_clk",
- "ras_gen2_synth_gate_clk", };
-static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
- "ras_gen3_synth_gate_clk",
-};
+static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", };
+static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", };
static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
- "ras_gen0_synth_gate_clk", };
-static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
-};
+ "ras_syn0_gclk", };
+static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
static void __init spear320_clk_init(void)
{
@@ -237,7 +233,7 @@ static void __init spear320_clk_init(void)
CLK_IS_ROOT, 125000000);
clk_register_clkdev(clk, "smii_125m_pad", NULL);
- clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
+ clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0,
1, 1);
clk_register_clkdev(clk, NULL, "90000000.clcd");
@@ -363,9 +359,9 @@ void __init spear3xx_clk_init(void)
clk_register_clkdev(clk, NULL, "fc900000.rtc");
/* clock derived from 24 MHz osc clk */
- clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
48000000);
- clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
1);
@@ -392,98 +388,98 @@ void __init spear3xx_clk_init(void)
HCLK_RATIO_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
+ clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents,
ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "uart0_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart0_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
- PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart0", "uart0_mclk", 0, PERIP1_CLK_ENB,
+ UART_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0000000.serial");
- clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
- "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "firda_synth_clk", NULL);
- clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0,
+ FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "firda_mux_clk", NULL);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
- clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "firda");
/* gpt clocks */
- clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
+ clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents,
ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
- gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl,
+ ARRAY_SIZE(gpt_rtbl), &_lock);
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt2");
/* general synths clocks */
- clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
- "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen0_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
-
- clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
- "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen1_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
-
- clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
+ clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk",
+ 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen0_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen0_syn_gclk", NULL);
+
+ clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk",
+ 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "gen1_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen1_syn_gclk", NULL);
+
+ clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents,
ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
&_lock);
- clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
+ clk_register_clkdev(clk, "gen2_3_par_clk", NULL);
- clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
- "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
+ clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk",
+ "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen2_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "gen2_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen2_syn_gclk", NULL);
- clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
- "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
+ clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk",
+ "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "gen3_synth_clk", NULL);
- clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "gen3_syn_clk", NULL);
+ clk_register_clkdev(clk1, "gen3_syn_gclk", NULL);
/* clock derived from pll3 clk */
- clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBH_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "usbh_clk", NULL);
clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
@@ -494,8 +490,8 @@ void __init spear3xx_clk_init(void)
1);
clk_register_clkdev(clk, "usbh.1_clk", NULL);
- clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "designware_udc");
/* clock derived from ahb clk */
@@ -579,29 +575,25 @@ void __init spear3xx_clk_init(void)
RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, "ras_pll2_clk", NULL);
- clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
- "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT0_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
- "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT1_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
- "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT2_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
-
- clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
- "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
- RAS_SYNT3_CLK_ENB, 0, &_lock);
- clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
+ clk_register_clkdev(clk, "ras_pll3_clk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn0_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn1_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn2_gclk", NULL);
+
+ clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", 0,
+ RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock);
+ clk_register_clkdev(clk, "ras_syn3_gclk", NULL);
if (of_machine_is_compatible("st,spear300"))
spear300_clk_init();
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index 61026ae564ab..a98d0866f541 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -97,13 +97,12 @@ static struct aux_rate_tbl aux_rtbl[] = {
{.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
};
-static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
-static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
-};
-static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
-static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
-static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
-static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
+static const char *clcd_parents[] = { "pll3_clk", "clcd_syn_gclk", };
+static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", };
+static const char *uart_parents[] = { "pll3_clk", "uart_syn_gclk", };
+static const char *gpt0_1_parents[] = { "pll3_clk", "gpt0_1_syn_clk", };
+static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", };
+static const char *gpt3_parents[] = { "pll3_clk", "gpt3_syn_clk", };
static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
"pll2_clk", };
@@ -136,9 +135,9 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, NULL, "rtc-spear");
/* clock derived from 30 MHz osc clk */
- clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
+ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0,
48000000);
- clk_register_clkdev(clk, "pll3_48m_clk", NULL);
+ clk_register_clkdev(clk, "pll3_clk", NULL);
clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
@@ -146,9 +145,9 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, "vco1_clk", NULL);
clk_register_clkdev(clk1, "pll1_clk", NULL);
- clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
- "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
- ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
+ clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_30m_clk",
+ 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
+ &_lock, &clk1, NULL);
clk_register_clkdev(clk, "vco2_clk", NULL);
clk_register_clkdev(clk1, "pll2_clk", NULL);
@@ -165,111 +164,111 @@ void __init spear6xx_clk_init(void)
HCLK_RATIO_MASK, 0, &_lock);
clk_register_clkdev(clk, "ahb_clk", NULL);
- clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
- "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "uart_synth_clk", NULL);
- clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
+ clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0,
+ UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "uart_syn_clk", NULL);
+ clk_register_clkdev(clk1, "uart_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
+ clk = clk_register_mux(NULL, "uart_mclk", uart_parents,
ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "uart_mux_clk", NULL);
+ clk_register_clkdev(clk, "uart_mclk", NULL);
- clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
- PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart0", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART0_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0000000.serial");
- clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
- PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "uart1", "uart_mclk", 0, PERIP1_CLK_ENB,
+ UART1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "d0080000.serial");
- clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
- "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "firda_synth_clk", NULL);
- clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
+ clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk",
+ 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "firda_syn_clk", NULL);
+ clk_register_clkdev(clk1, "firda_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
+ clk = clk_register_mux(NULL, "firda_mclk", firda_parents,
ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "firda_mux_clk", NULL);
+ clk_register_clkdev(clk, "firda_mclk", NULL);
- clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
+ clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", 0,
PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "firda");
- clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
- "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
- ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
- clk_register_clkdev(clk, "clcd_synth_clk", NULL);
- clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
+ clk = clk_register_aux("clcd_syn_clk", "clcd_syn_gclk", "pll1_clk",
+ 0, CLCD_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl),
+ &_lock, &clk1);
+ clk_register_clkdev(clk, "clcd_syn_clk", NULL);
+ clk_register_clkdev(clk1, "clcd_syn_gclk", NULL);
- clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
+ clk = clk_register_mux(NULL, "clcd_mclk", clcd_parents,
ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "clcd_mux_clk", NULL);
+ clk_register_clkdev(clk, "clcd_mclk", NULL);
- clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
+ clk = clk_register_gate(NULL, "clcd_clk", "clcd_mclk", 0,
PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "clcd");
/* gpt clocks */
- clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
+ clk = clk_register_gpt("gpt0_1_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt0_1_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
+ clk = clk_register_mux(NULL, "gpt0_mclk", gpt0_1_parents,
ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt0");
- clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
+ clk = clk_register_mux(NULL, "gpt1_mclk", gpt0_1_parents,
ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt1_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", 0,
PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt1");
- clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
+ clk = clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt2_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
+ clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents,
ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt2_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", 0,
PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt2");
- clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
+ clk = clk_register_gpt("gpt3_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
- clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
+ clk_register_clkdev(clk, "gpt3_syn_clk", NULL);
- clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
+ clk = clk_register_mux(NULL, "gpt3_mclk", gpt3_parents,
ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
- clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
+ clk_register_clkdev(clk, "gpt3_mclk", NULL);
- clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
+ clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mclk", 0,
PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "gpt3");
/* clock derived from pll3 clk */
- clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "usbh0_clk", "pll3_clk", 0,
PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "usbh.0_clk");
- clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
+ clk = clk_register_gate(NULL, "usbh1_clk", "pll3_clk", 0,
PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "usbh.1_clk");
- clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
- PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
+ clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB,
+ USBD_CLK_ENB, 0, &_lock);
clk_register_clkdev(clk, NULL, "designware_udc");
/* clock derived from ahb clk */
@@ -278,9 +277,8 @@ void __init spear6xx_clk_init(void)
clk_register_clkdev(clk, "ahbmult2_clk", NULL);
clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
- ARRAY_SIZE(ddr_parents),
- 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
- &_lock);
+ ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
+ MCTR_CLK_MASK, 0, &_lock);
clk_register_clkdev(clk, "ddr_clk", NULL);
clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
new file mode 100644
index 000000000000..50cf6a2ee693
--- /dev/null
+++ b/drivers/clk/versatile/Makefile
@@ -0,0 +1,3 @@
+# Makefile for Versatile-specific clocks
+obj-$(CONFIG_ICST) += clk-icst.o
+obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
new file mode 100644
index 000000000000..f555b50a5fa5
--- /dev/null
+++ b/drivers/clk/versatile/clk-icst.c
@@ -0,0 +1,100 @@
+/*
+ * Driver for the ICST307 VCO clock found in the ARM Reference designs.
+ * We wrap the custom interface from <asm/hardware/icst.h> into the generic
+ * clock framework.
+ *
+ * TODO: when all ARM reference designs are migrated to generic clocks, the
+ * ICST clock code from the ARM tree should probably be merged into this
+ * file.
+ */
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+
+#include "clk-icst.h"
+
+/**
+ * struct clk_icst - ICST VCO clock wrapper
+ * @hw: corresponding clock hardware entry
+ * @params: parameters for this ICST instance
+ * @rate: current rate
+ * @setvco: function to commit ICST settings to hardware
+ */
+struct clk_icst {
+ struct clk_hw hw;
+ const struct icst_params *params;
+ unsigned long rate;
+ struct icst_vco (*getvco)(void);
+ void (*setvco)(struct icst_vco);
+};
+
+#define to_icst(_hw) container_of(_hw, struct clk_icst, hw)
+
+static unsigned long icst_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_icst *icst = to_icst(hw);
+ struct icst_vco vco;
+
+ vco = icst->getvco();
+ icst->rate = icst_hz(icst->params, vco);
+ return icst->rate;
+}
+
+static long icst_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct clk_icst *icst = to_icst(hw);
+ struct icst_vco vco;
+
+ vco = icst_hz_to_vco(icst->params, rate);
+ return icst_hz(icst->params, vco);
+}
+
+static int icst_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_icst *icst = to_icst(hw);
+ struct icst_vco vco;
+
+ vco = icst_hz_to_vco(icst->params, rate);
+ icst->rate = icst_hz(icst->params, vco);
+ icst->setvco(vco);
+ return 0;
+}
+
+static const struct clk_ops icst_ops = {
+ .recalc_rate = icst_recalc_rate,
+ .round_rate = icst_round_rate,
+ .set_rate = icst_set_rate,
+};
+
+struct clk * __init icst_clk_register(struct device *dev,
+ const struct clk_icst_desc *desc)
+{
+ struct clk *clk;
+ struct clk_icst *icst;
+ struct clk_init_data init;
+
+ icst = kzalloc(sizeof(struct clk_icst), GFP_KERNEL);
+ if (!icst) {
+ pr_err("could not allocate ICST clock!\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ init.name = "icst";
+ init.ops = &icst_ops;
+ init.flags = CLK_IS_ROOT;
+ init.parent_names = NULL;
+ init.num_parents = 0;
+ icst->hw.init = &init;
+ icst->params = desc->params;
+ icst->getvco = desc->getvco;
+ icst->setvco = desc->setvco;
+
+ clk = clk_register(dev, &icst->hw);
+ if (IS_ERR(clk))
+ kfree(icst);
+
+ return clk;
+}
diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h
new file mode 100644
index 000000000000..71b4c56c1410
--- /dev/null
+++ b/drivers/clk/versatile/clk-icst.h
@@ -0,0 +1,10 @@
+#include <asm/hardware/icst.h>
+
+struct clk_icst_desc {
+ const struct icst_params *params;
+ struct icst_vco (*getvco)(void);
+ void (*setvco)(struct icst_vco);
+};
+
+struct clk *icst_clk_register(struct device *dev,
+ const struct clk_icst_desc *desc);
diff --git a/drivers/clk/versatile/clk-integrator.c b/drivers/clk/versatile/clk-integrator.c
new file mode 100644
index 000000000000..a5053921bf7f
--- /dev/null
+++ b/drivers/clk/versatile/clk-integrator.c
@@ -0,0 +1,111 @@
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+
+#include <mach/hardware.h>
+#include <mach/platform.h>
+
+#include "clk-icst.h"
+
+/*
+ * Implementation of the ARM Integrator/AP and Integrator/CP clock tree.
+ * Inspired by portions of:
+ * plat-versatile/clock.c and plat-versatile/include/plat/clock.h
+ */
+#define CM_LOCK (__io_address(INTEGRATOR_HDR_BASE)+INTEGRATOR_HDR_LOCK_OFFSET)
+#define CM_AUXOSC (__io_address(INTEGRATOR_HDR_BASE)+0x1c)
+
+/**
+ * cp_auxvco_get() - get ICST VCO settings for the Integrator/CP
+ * @vco: ICST VCO parameters to update with hardware status
+ */
+static struct icst_vco cp_auxvco_get(void)
+{
+ u32 val;
+ struct icst_vco vco;
+
+ val = readl(CM_AUXOSC);
+ vco.v = val & 0x1ff;
+ vco.r = (val >> 9) & 0x7f;
+ vco.s = (val >> 16) & 03;
+ return vco;
+}
+
+/**
+ * cp_auxvco_set() - commit changes to Integrator/CP ICST VCO
+ * @vco: ICST VCO parameters to commit
+ */
+static void cp_auxvco_set(struct icst_vco vco)
+{
+ u32 val;
+
+ val = readl(CM_AUXOSC) & ~0x7ffff;
+ val |= vco.v | (vco.r << 9) | (vco.s << 16);
+
+ /* This magic unlocks the CM VCO so it can be controlled */
+ writel(0xa05f, CM_LOCK);
+ writel(val, CM_AUXOSC);
+ /* This locks the CM again */
+ writel(0, CM_LOCK);
+}
+
+static const struct icst_params cp_auxvco_params = {
+ .ref = 24000000,
+ .vco_max = ICST525_VCO_MAX_5V,
+ .vco_min = ICST525_VCO_MIN,
+ .vd_min = 8,
+ .vd_max = 263,
+ .rd_min = 3,
+ .rd_max = 65,
+ .s2div = icst525_s2div,
+ .idx2s = icst525_idx2s,
+};
+
+static const struct clk_icst_desc __initdata cp_icst_desc = {
+ .params = &cp_auxvco_params,
+ .getvco = cp_auxvco_get,
+ .setvco = cp_auxvco_set,
+};
+
+/*
+ * integrator_clk_init() - set up the integrator clock tree
+ * @is_cp: pass true if it's the Integrator/CP else AP is assumed
+ */
+void __init integrator_clk_init(bool is_cp)
+{
+ struct clk *clk;
+
+ /* APB clock dummy */
+ clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
+ clk_register_clkdev(clk, "apb_pclk", NULL);
+
+ /* UART reference clock */
+ clk = clk_register_fixed_rate(NULL, "uartclk", NULL, CLK_IS_ROOT,
+ 14745600);
+ clk_register_clkdev(clk, NULL, "uart0");
+ clk_register_clkdev(clk, NULL, "uart1");
+ if (is_cp)
+ clk_register_clkdev(clk, NULL, "mmci");
+
+ /* 24 MHz clock */
+ clk = clk_register_fixed_rate(NULL, "clk24mhz", NULL, CLK_IS_ROOT,
+ 24000000);
+ clk_register_clkdev(clk, NULL, "kmi0");
+ clk_register_clkdev(clk, NULL, "kmi1");
+ if (!is_cp)
+ clk_register_clkdev(clk, NULL, "ap_timer");
+
+ if (!is_cp)
+ return;
+
+ /* 1 MHz clock */
+ clk = clk_register_fixed_rate(NULL, "clk1mhz", NULL, CLK_IS_ROOT,
+ 1000000);
+ clk_register_clkdev(clk, NULL, "sp804");
+
+ /* ICST VCO clock used on the Integrator/CP CLCD */
+ clk = icst_clk_register(NULL, &cp_icst_desc);
+ clk_register_clkdev(clk, NULL, "clcd");
+}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 99c6b203e6cd..d53cd0afc200 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -16,6 +16,12 @@ config CLKSRC_MMIO
config DW_APB_TIMER
bool
+config DW_APB_TIMER_OF
+ bool
+
+config ARMADA_370_XP_TIMER
+ bool
+
config CLKSRC_DBX500_PRCMU
bool "Clocksource PRCMU Timer"
depends on UX500_SOC_DB8500
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index dd3e661a124d..b65d0c56ab35 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -10,4 +10,6 @@ obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
obj-$(CONFIG_CLKBLD_I8253) += i8253.o
obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
-obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o \ No newline at end of file
+obj-$(CONFIG_DW_APB_TIMER_OF) += dw_apb_timer_of.o
+obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
+obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
diff --git a/arch/arm/mach-picoxcell/time.c b/drivers/clocksource/dw_apb_timer_of.c
index 2ecba6743b8e..f7dba5b79b44 100644
--- a/arch/arm/mach-picoxcell/time.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -1,11 +1,20 @@
/*
+ * Copyright (C) 2012 Altera Corporation
* Copyright (c) 2011 Picochip Ltd., Jamie Iles
*
+ * Modified from mach-picoxcell/time.c
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * All enquiries to support@picochip.com
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/dw_apb_timer.h>
#include <linux/of.h>
@@ -15,8 +24,6 @@
#include <asm/mach/time.h>
#include <asm/sched_clock.h>
-#include "common.h"
-
static void timer_get_base_and_rate(struct device_node *np,
void __iomem **base, u32 *rate)
{
@@ -25,11 +32,12 @@ static void timer_get_base_and_rate(struct device_node *np,
if (!*base)
panic("Unable to map regs for %s", np->name);
- if (of_property_read_u32(np, "clock-freq", rate))
- panic("No clock-freq property for %s", np->name);
+ if (of_property_read_u32(np, "clock-freq", rate) &&
+ of_property_read_u32(np, "clock-frequency", rate))
+ panic("No clock-frequency property for %s", np->name);
}
-static void picoxcell_add_clockevent(struct device_node *event_timer)
+static void add_clockevent(struct device_node *event_timer)
{
void __iomem *iobase;
struct dw_apb_clock_event_device *ced;
@@ -49,7 +57,7 @@ static void picoxcell_add_clockevent(struct device_node *event_timer)
dw_apb_clockevent_register(ced);
}
-static void picoxcell_add_clocksource(struct device_node *source_timer)
+static void add_clocksource(struct device_node *source_timer)
{
void __iomem *iobase;
struct dw_apb_clocksource *cs;
@@ -67,55 +75,57 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
static void __iomem *sched_io_base;
-static u32 picoxcell_read_sched_clock(void)
+static u32 read_sched_clock(void)
{
return __raw_readl(sched_io_base);
}
-static const struct of_device_id picoxcell_rtc_ids[] __initconst = {
+static const struct of_device_id sptimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-rtc" },
+ { .compatible = "snps,dw-apb-timer-sp" },
{ /* Sentinel */ },
};
-static void picoxcell_init_sched_clock(void)
+static void init_sched_clock(void)
{
struct device_node *sched_timer;
u32 rate;
- sched_timer = of_find_matching_node(NULL, picoxcell_rtc_ids);
+ sched_timer = of_find_matching_node(NULL, sptimer_ids);
if (!sched_timer)
panic("No RTC for sched clock to use");
timer_get_base_and_rate(sched_timer, &sched_io_base, &rate);
of_node_put(sched_timer);
- setup_sched_clock(picoxcell_read_sched_clock, 32, rate);
+ setup_sched_clock(read_sched_clock, 32, rate);
}
-static const struct of_device_id picoxcell_timer_ids[] __initconst = {
+static const struct of_device_id osctimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-timer" },
+ { .compatible = "snps,dw-apb-timer-osc" },
{},
};
-static void __init picoxcell_timer_init(void)
+static void __init timer_init(void)
{
struct device_node *event_timer, *source_timer;
- event_timer = of_find_matching_node(NULL, picoxcell_timer_ids);
+ event_timer = of_find_matching_node(NULL, osctimer_ids);
if (!event_timer)
panic("No timer for clockevent");
- picoxcell_add_clockevent(event_timer);
+ add_clockevent(event_timer);
- source_timer = of_find_matching_node(event_timer, picoxcell_timer_ids);
+ source_timer = of_find_matching_node(event_timer, osctimer_ids);
if (!source_timer)
panic("No timer for clocksource");
- picoxcell_add_clocksource(source_timer);
+ add_clocksource(source_timer);
of_node_put(source_timer);
- picoxcell_init_sched_clock();
+ init_sched_clock();
}
-struct sys_timer picoxcell_timer = {
- .init = picoxcell_timer_init,
+struct sys_timer dw_apb_timer = {
+ .init = timer_init,
};
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
new file mode 100644
index 000000000000..4674f94957cd
--- /dev/null
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -0,0 +1,226 @@
+/*
+ * Marvell Armada 370/XP SoC timer handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ * Timer 0 is used as free-running clocksource, while timer 1 is
+ * used as clock_event_device.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <asm/sched_clock.h>
+
+/*
+ * Timer block registers.
+ */
+#define TIMER_CTRL_OFF 0x0000
+#define TIMER0_EN 0x0001
+#define TIMER0_RELOAD_EN 0x0002
+#define TIMER0_25MHZ 0x0800
+#define TIMER0_DIV(div) ((div) << 19)
+#define TIMER1_EN 0x0004
+#define TIMER1_RELOAD_EN 0x0008
+#define TIMER1_25MHZ 0x1000
+#define TIMER1_DIV(div) ((div) << 22)
+#define TIMER_EVENTS_STATUS 0x0004
+#define TIMER0_CLR_MASK (~0x1)
+#define TIMER1_CLR_MASK (~0x100)
+#define TIMER0_RELOAD_OFF 0x0010
+#define TIMER0_VAL_OFF 0x0014
+#define TIMER1_RELOAD_OFF 0x0018
+#define TIMER1_VAL_OFF 0x001c
+
+/* Global timers are connected to the coherency fabric clock, and the
+ below divider reduces their incrementing frequency. */
+#define TIMER_DIVIDER_SHIFT 5
+#define TIMER_DIVIDER (1 << TIMER_DIVIDER_SHIFT)
+
+/*
+ * SoC-specific data.
+ */
+static void __iomem *timer_base;
+static int timer_irq;
+
+/*
+ * Number of timer ticks per jiffy.
+ */
+static u32 ticks_per_jiffy;
+
+static u32 notrace armada_370_xp_read_sched_clock(void)
+{
+ return ~readl(timer_base + TIMER0_VAL_OFF);
+}
+
+/*
+ * Clockevent handling.
+ */
+static int
+armada_370_xp_clkevt_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ u32 u;
+
+ /*
+ * Clear clockevent timer interrupt.
+ */
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+
+ /*
+ * Setup new clockevent timer value.
+ */
+ writel(delta, timer_base + TIMER1_VAL_OFF);
+
+ /*
+ * Enable the timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ u = ((u & ~TIMER1_RELOAD_EN) | TIMER1_EN |
+ TIMER1_DIV(TIMER_DIVIDER_SHIFT));
+ writel(u, timer_base + TIMER_CTRL_OFF);
+
+ return 0;
+}
+
+static void
+armada_370_xp_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ u32 u;
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC) {
+ /*
+ * Setup timer to fire at 1/HZ intervals.
+ */
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_RELOAD_OFF);
+ writel(ticks_per_jiffy - 1, timer_base + TIMER1_VAL_OFF);
+
+ /*
+ * Enable timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER1_EN | TIMER1_RELOAD_EN |
+ TIMER1_DIV(TIMER_DIVIDER_SHIFT)),
+ timer_base + TIMER_CTRL_OFF);
+ } else {
+ /*
+ * Disable timer.
+ */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u & ~TIMER1_EN, timer_base + TIMER_CTRL_OFF);
+
+ /*
+ * ACK pending timer interrupt.
+ */
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+
+ }
+}
+
+static struct clock_event_device armada_370_xp_clkevt = {
+ .name = "armada_370_xp_tick",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 32,
+ .rating = 300,
+ .set_next_event = armada_370_xp_clkevt_next_event,
+ .set_mode = armada_370_xp_clkevt_mode,
+};
+
+static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id)
+{
+ /*
+ * ACK timer interrupt and call event handler.
+ */
+
+ writel(TIMER1_CLR_MASK, timer_base + TIMER_EVENTS_STATUS);
+ armada_370_xp_clkevt.event_handler(&armada_370_xp_clkevt);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction armada_370_xp_timer_irq = {
+ .name = "armada_370_xp_tick",
+ .flags = IRQF_DISABLED | IRQF_TIMER,
+ .handler = armada_370_xp_timer_interrupt
+};
+
+void __init armada_370_xp_timer_init(void)
+{
+ u32 u;
+ struct device_node *np;
+ unsigned int timer_clk;
+ int ret;
+ np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer");
+ timer_base = of_iomap(np, 0);
+ WARN_ON(!timer_base);
+
+ if (of_find_property(np, "marvell,timer-25Mhz", NULL)) {
+ /* The fixed 25MHz timer is available so let's use it */
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u | TIMER0_25MHZ | TIMER1_25MHZ,
+ timer_base + TIMER_CTRL_OFF);
+ timer_clk = 25000000;
+ } else {
+ u32 clk = 0;
+ ret = of_property_read_u32(np, "clock-frequency", &clk);
+ WARN_ON(!clk || ret < 0);
+ u = readl(timer_base + TIMER_CTRL_OFF);
+ writel(u & ~(TIMER0_25MHZ | TIMER1_25MHZ),
+ timer_base + TIMER_CTRL_OFF);
+ timer_clk = clk / TIMER_DIVIDER;
+ }
+
+ /* We use timer 0 as clocksource, and timer 1 for
+ clockevents */
+ timer_irq = irq_of_parse_and_map(np, 1);
+
+ ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
+
+ /*
+ * Set scale and timer for sched_clock.
+ */
+ setup_sched_clock(armada_370_xp_read_sched_clock, 32, timer_clk);
+
+ /*
+ * Setup free-running clocksource timer (interrupts
+ * disabled).
+ */
+ writel(0xffffffff, timer_base + TIMER0_VAL_OFF);
+ writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF);
+
+ u = readl(timer_base + TIMER_CTRL_OFF);
+
+ writel((u | TIMER0_EN | TIMER0_RELOAD_EN |
+ TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF);
+
+ clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+ "armada_370_xp_clocksource",
+ timer_clk, 300, 32, clocksource_mmio_readl_down);
+
+ /*
+ * Setup clockevent timer (interrupt-driven).
+ */
+ setup_irq(timer_irq, &armada_370_xp_timer_irq);
+ armada_370_xp_clkevt.cpumask = cpumask_of(0);
+ clockevents_config_and_register(&armada_370_xp_clkevt,
+ timer_clk, 1, 0xfffffffe);
+}
+
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 77e1e6cd66ce..3e92b7d3fcd2 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -46,7 +46,7 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
static inline void get_seq(__u32 *ts, int *cpu)
{
preempt_disable();
- *ts = __this_cpu_inc_return(proc_event_counts) -1;
+ *ts = __this_cpu_inc_return(proc_event_counts) - 1;
*cpu = smp_processor_id();
preempt_enable();
}
@@ -62,8 +62,8 @@ void proc_fork_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -93,8 +93,8 @@ void proc_exec_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -119,8 +119,8 @@ void proc_id_connector(struct task_struct *task, int which_id)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
ev->what = which_id;
ev->event_data.id.process_pid = task->pid;
ev->event_data.id.process_tgid = task->tgid;
@@ -134,7 +134,7 @@ void proc_id_connector(struct task_struct *task, int which_id)
ev->event_data.id.e.egid = cred->egid;
} else {
rcu_read_unlock();
- return;
+ return;
}
rcu_read_unlock();
get_seq(&msg->seq, &ev->cpu);
@@ -241,8 +241,8 @@ void proc_exit_connector(struct task_struct *task)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
get_seq(&msg->seq, &ev->cpu);
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -276,8 +276,8 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack)
if (atomic_read(&proc_event_num_listeners) < 1)
return;
- msg = (struct cn_msg*)buffer;
- ev = (struct proc_event*)msg->data;
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
msg->seq = rcvd_seq;
ktime_get_ts(&ts); /* get high res monotonic timestamp */
put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
@@ -303,7 +303,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
if (msg->len != sizeof(*mc_op))
return;
- mc_op = (enum proc_cn_mcast_op*)msg->data;
+ mc_op = (enum proc_cn_mcast_op *)msg->data;
switch (*mc_op) {
case PROC_CN_MCAST_LISTEN:
atomic_inc(&proc_event_num_listeners);
@@ -325,11 +325,11 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
*/
static int __init cn_proc_init(void)
{
- int err;
-
- if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc",
- &cn_proc_mcast_ctl))) {
- printk(KERN_WARNING "cn_proc failed to register\n");
+ int err = cn_add_callback(&cn_proc_event_id,
+ "cn_proc",
+ &cn_proc_mcast_ctl);
+ if (err) {
+ pr_warn("cn_proc failed to register\n");
return err;
}
return 0;
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index c42c9d517790..1f8bf054d11c 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -1,5 +1,5 @@
/*
- * cn_queue.c
+ * cn_queue.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
@@ -34,13 +34,14 @@
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq;
cbq = kzalloc(sizeof(*cbq), GFP_KERNEL);
if (!cbq) {
- printk(KERN_ERR "Failed to create new callback queue.\n");
+ pr_err("Failed to create new callback queue.\n");
return NULL;
}
@@ -71,7 +72,8 @@ int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
struct cb_id *id,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
struct cn_callback_entry *cbq, *__cbq;
int found = 0;
@@ -149,7 +151,7 @@ void cn_queue_free_dev(struct cn_queue_dev *dev)
spin_unlock_bh(&dev->queue_lock);
while (atomic_read(&dev->refcnt)) {
- printk(KERN_INFO "Waiting for %s to become free: refcnt=%d.\n",
+ pr_info("Waiting for %s to become free: refcnt=%d.\n",
dev->name, atomic_read(&dev->refcnt));
msleep(1000);
}
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index dde6a0fad408..82fa4f0f91d6 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -1,5 +1,5 @@
/*
- * connector.c
+ * connector.c
*
* 2004+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
* All rights reserved.
@@ -101,19 +101,19 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
if (!skb)
return -ENOMEM;
- nlh = NLMSG_PUT(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
- data = NLMSG_DATA(nlh);
+ data = nlmsg_data(nlh);
memcpy(data, msg, sizeof(*data) + msg->len);
NETLINK_CB(skb).dst_group = group;
return netlink_broadcast(dev->nls, skb, 0, group, gfp_mask);
-
-nlmsg_failure:
- kfree_skb(skb);
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(cn_netlink_send);
@@ -185,7 +185,8 @@ static void cn_rx_skb(struct sk_buff *__skb)
* May sleep.
*/
int cn_add_callback(struct cb_id *id, const char *name,
- void (*callback)(struct cn_msg *, struct netlink_skb_parms *))
+ void (*callback)(struct cn_msg *,
+ struct netlink_skb_parms *))
{
int err;
struct cn_dev *dev = &cdev;
@@ -251,15 +252,20 @@ static const struct file_operations cn_file_ops = {
.release = single_release
};
+static struct cn_dev cdev = {
+ .input = cn_rx_skb,
+};
+
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
-
- dev->input = cn_rx_skb;
+ struct netlink_kernel_cfg cfg = {
+ .groups = CN_NETLINK_USERS + 0xf,
+ .input = dev->input,
+ };
dev->nls = netlink_kernel_create(&init_net, NETLINK_CONNECTOR,
- CN_NETLINK_USERS + 0xf,
- dev->input, NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!dev->nls)
return -EIO;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7f2f149ae40f..fb8a5279c5d8 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -138,7 +138,7 @@ void disable_cpufreq(void)
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
struct cpufreq_policy *data;
unsigned long flags;
@@ -162,7 +162,7 @@ struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
if (!data)
goto err_out_put_module;
- if (!kobject_get(&data->kobj))
+ if (!sysfs && !kobject_get(&data->kobj))
goto err_out_put_module;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
@@ -175,16 +175,35 @@ err_out_unlock:
err_out:
return NULL;
}
+
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, false);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
+static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
+{
+ return __cpufreq_cpu_get(cpu, true);
+}
-void cpufreq_cpu_put(struct cpufreq_policy *data)
+static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
{
- kobject_put(&data->kobj);
+ if (!sysfs)
+ kobject_put(&data->kobj);
module_put(cpufreq_driver->owner);
}
+
+void cpufreq_cpu_put(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, false);
+}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
+static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
+{
+ __cpufreq_cpu_put(data, true);
+}
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
@@ -617,7 +636,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -631,7 +650,7 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
unlock_policy_rwsem_read(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
@@ -642,7 +661,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get(policy->cpu);
+ policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -656,7 +675,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
unlock_policy_rwsem_write(policy->cpu);
fail:
- cpufreq_cpu_put(policy);
+ cpufreq_cpu_put_sysfs(policy);
no_policy:
return ret;
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index b243a7ee01f6..af2d81e10f71 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -62,8 +62,18 @@ static int exynos_target(struct cpufreq_policy *policy,
goto out;
}
- if (cpufreq_frequency_table_target(policy, freq_table,
- freqs.old, relation, &old_index)) {
+ /*
+ * The policy max have been changed so that we cannot get proper
+ * old_index with cpufreq_frequency_table_target(). Thus, ignore
+ * policy and get the index from the raw freqeuncy table.
+ */
+ for (old_index = 0;
+ freq_table[old_index].frequency != CPUFREQ_TABLE_END;
+ old_index++)
+ if (freq_table[old_index].frequency == freqs.old)
+ break;
+
+ if (freq_table[old_index].frequency == CPUFREQ_TABLE_END) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 50d2f15a3c8a..bcc053bc02c4 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -153,7 +153,7 @@ static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
if (s3c_freq->vddarm) {
dvfs = &s3c2416_dvfs_table[idx];
- pr_debug("cpufreq: setting regultor to %d-%d\n",
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
dvfs->vddarm_min, dvfs->vddarm_max);
ret = regulator_set_voltage(s3c_freq->vddarm,
dvfs->vddarm_min,
@@ -186,7 +186,7 @@ static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
if (s3c_freq->vddarm) {
dvfs = &s3c2416_dvfs_table[idx];
- pr_debug("cpufreq: setting regultor to %d-%d\n",
+ pr_debug("cpufreq: setting regulator to %d-%d\n",
dvfs->vddarm_min, dvfs->vddarm_max);
ret = regulator_set_voltage(s3c_freq->vddarm,
dvfs->vddarm_min,
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index 7432b3a72cd4..e29b59aa68a8 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -203,7 +203,7 @@ static unsigned int speedstep_detect_chipset(void)
if (speedstep_chipset_dev) {
/* speedstep.c causes lockups on Dell Inspirons 8000 and
* 8100 which use a pretty old revision of the 82815
- * host brige. Abort on these systems.
+ * host bridge. Abort on these systems.
*/
static struct pci_dev *hostbridge;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index d90519cec880..d6a533e68e0f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -201,6 +201,22 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
+/* Currently used in suspend/resume path to suspend cpuidle */
+void cpuidle_pause(void)
+{
+ mutex_lock(&cpuidle_lock);
+ cpuidle_uninstall_idle_handler();
+ mutex_unlock(&cpuidle_lock);
+}
+
+/* Currently used in suspend/resume path to resume cpuidle */
+void cpuidle_resume(void)
+{
+ mutex_lock(&cpuidle_lock);
+ cpuidle_install_idle_handler();
+ mutex_unlock(&cpuidle_lock);
+}
+
/**
* cpuidle_wrap_enter - performs timekeeping and irqen around enter function
* @dev: pointer to a valid cpuidle_device object
@@ -265,7 +281,7 @@ static void poll_idle_init(struct cpuidle_driver *drv)
state->power_usage = -1;
state->flags = 0;
state->enter = poll_idle;
- state->disable = 0;
+ state->disabled = false;
}
#else
static void poll_idle_init(struct cpuidle_driver *drv) {}
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index 40cd3f3024df..58bf3b1ac9c4 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -16,6 +16,7 @@
static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
+int cpuidle_driver_refcount;
static void __cpuidle_register_driver(struct cpuidle_driver *drv)
{
@@ -89,8 +90,34 @@ void cpuidle_unregister_driver(struct cpuidle_driver *drv)
}
spin_lock(&cpuidle_driver_lock);
- cpuidle_curr_driver = NULL;
+
+ if (!WARN_ON(cpuidle_driver_refcount > 0))
+ cpuidle_curr_driver = NULL;
+
spin_unlock(&cpuidle_driver_lock);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
+
+struct cpuidle_driver *cpuidle_driver_ref(void)
+{
+ struct cpuidle_driver *drv;
+
+ spin_lock(&cpuidle_driver_lock);
+
+ drv = cpuidle_curr_driver;
+ cpuidle_driver_refcount++;
+
+ spin_unlock(&cpuidle_driver_lock);
+ return drv;
+}
+
+void cpuidle_driver_unref(void)
+{
+ spin_lock(&cpuidle_driver_lock);
+
+ if (!WARN_ON(cpuidle_driver_refcount <= 0))
+ cpuidle_driver_refcount--;
+
+ spin_unlock(&cpuidle_driver_lock);
+}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 06335756ea14..5b1f2c372c1f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -281,7 +281,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* unless the timer is happening really really soon.
*/
if (data->expected_us > 5 &&
- drv->states[CPUIDLE_DRIVER_STATE_START].disable == 0)
+ !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
+ dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
/*
@@ -290,8 +291,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
- if (s->disable)
+ if (s->disabled || su->disable)
continue;
if (s->target_residency > data->predicted_us)
continue;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 88032b4dc6d2..5f809e337b89 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -217,7 +217,8 @@ struct cpuidle_state_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_state *, \
struct cpuidle_state_usage *, char *);
- ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
+ ssize_t (*store)(struct cpuidle_state *, \
+ struct cpuidle_state_usage *, const char *, size_t);
};
#define define_one_state_ro(_name, show) \
@@ -233,21 +234,22 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
return sprintf(buf, "%u\n", state->_name);\
}
-#define define_store_state_function(_name) \
+#define define_store_state_ull_function(_name) \
static ssize_t store_state_##_name(struct cpuidle_state *state, \
+ struct cpuidle_state_usage *state_usage, \
const char *buf, size_t size) \
{ \
- long value; \
+ unsigned long long value; \
int err; \
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- err = kstrtol(buf, 0, &value); \
+ err = kstrtoull(buf, 0, &value); \
if (err) \
return err; \
if (value) \
- state->disable = 1; \
+ state_usage->_name = 1; \
else \
- state->disable = 0; \
+ state_usage->_name = 0; \
return size; \
}
@@ -273,8 +275,8 @@ define_show_state_ull_function(usage)
define_show_state_ull_function(time)
define_show_state_str_function(name)
define_show_state_str_function(desc)
-define_show_state_function(disable)
-define_store_state_function(disable)
+define_show_state_ull_function(disable)
+define_store_state_ull_function(disable)
define_one_state_ro(name, show_state_name);
define_one_state_ro(desc, show_state_desc);
@@ -318,10 +320,11 @@ static ssize_t cpuidle_state_store(struct kobject *kobj,
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
+ struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr *cattr = attr_to_stateattr(attr);
if (cattr->store)
- ret = cattr->store(state, buf, size);
+ ret = cattr->store(state, state_usage, buf, size);
return ret;
}
diff --git a/drivers/crypto/tegra-aes.c b/drivers/crypto/tegra-aes.c
index 422a9766c7c9..ac236f6724f4 100644
--- a/drivers/crypto/tegra-aes.c
+++ b/drivers/crypto/tegra-aes.c
@@ -572,7 +572,7 @@ static void aes_workqueue_handler(struct work_struct *work)
struct tegra_aes_dev *dd = aes_dev;
int ret;
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
BUG_ON("clock enable failed");
@@ -581,7 +581,7 @@ static void aes_workqueue_handler(struct work_struct *work)
ret = tegra_aes_handle_req(dd);
} while (!ret);
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
}
static irqreturn_t aes_irq(int irq, void *dev_id)
@@ -673,7 +673,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
/* take mutex to access the aes hw */
mutex_lock(&aes_lock);
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
return ret;
@@ -700,7 +700,7 @@ static int tegra_aes_get_random(struct crypto_rng *tfm, u8 *rdata,
}
out:
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
mutex_unlock(&aes_lock);
dev_dbg(dd->dev, "%s: done\n", __func__);
@@ -758,7 +758,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
dd->flags = FLAGS_ENCRYPT | FLAGS_RNG;
- ret = clk_enable(dd->aes_clk);
+ ret = clk_prepare_enable(dd->aes_clk);
if (ret)
return ret;
@@ -788,7 +788,7 @@ static int tegra_aes_rng_reset(struct crypto_rng *tfm, u8 *seed,
memcpy(dd->dt, dt, DEFAULT_RNG_BLK_SZ);
out:
- clk_disable(dd->aes_clk);
+ clk_disable_unprepare(dd->aes_clk);
mutex_unlock(&aes_lock);
dev_dbg(dd->dev, "%s: done\n", __func__);
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 7cac12793a4b..1c307e1b840c 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1661,27 +1661,26 @@ static void ux500_cryp_shutdown(struct platform_device *pdev)
}
-static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
+static int ux500_cryp_suspend(struct device *dev)
{
int ret;
+ struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
+ dev_dbg(dev, "[%s]", __func__);
/* Handle state? */
device_data = platform_get_drvdata(pdev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
- dev_err(&pdev->dev, "[%s]: IORESOURCE_IRQ, unavailable",
- __func__);
+ dev_err(dev, "[%s]: IORESOURCE_IRQ, unavailable", __func__);
else
disable_irq(res_irq->start);
@@ -1692,32 +1691,32 @@ static int ux500_cryp_suspend(struct platform_device *pdev, pm_message_t state)
if (device_data->current_ctx == ++temp_ctx) {
if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
- "failed", __func__);
- ret = cryp_disable_power(&pdev->dev, device_data, false);
+ dev_dbg(dev, "[%s]: down_interruptible() failed",
+ __func__);
+ ret = cryp_disable_power(dev, device_data, false);
} else
- ret = cryp_disable_power(&pdev->dev, device_data, true);
+ ret = cryp_disable_power(dev, device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: cryp_disable_power()", __func__);
+ dev_err(dev, "[%s]: cryp_disable_power()", __func__);
return ret;
}
-static int ux500_cryp_resume(struct platform_device *pdev)
+static int ux500_cryp_resume(struct device *dev)
{
int ret = 0;
+ struct platform_device *pdev = to_platform_device(dev);
struct cryp_device_data *device_data;
struct resource *res_irq;
struct cryp_ctx *temp_ctx = NULL;
- dev_dbg(&pdev->dev, "[%s]", __func__);
+ dev_dbg(dev, "[%s]", __func__);
device_data = platform_get_drvdata(pdev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s]: platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s]: platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1730,11 +1729,10 @@ static int ux500_cryp_resume(struct platform_device *pdev)
if (!device_data->current_ctx)
up(&driver_data.device_allocation);
else
- ret = cryp_enable_power(&pdev->dev, device_data, true);
+ ret = cryp_enable_power(dev, device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: cryp_enable_power() failed!",
- __func__);
+ dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res_irq)
@@ -1744,15 +1742,16 @@ static int ux500_cryp_resume(struct platform_device *pdev)
return ret;
}
+static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
+
static struct platform_driver cryp_driver = {
.probe = ux500_cryp_probe,
.remove = ux500_cryp_remove,
.shutdown = ux500_cryp_shutdown,
- .suspend = ux500_cryp_suspend,
- .resume = ux500_cryp_resume,
.driver = {
.owner = THIS_MODULE,
.name = "cryp1"
+ .pm = &ux500_cryp_pm,
}
};
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 6dbb9ec709a3..08d5032cb564 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -1894,19 +1894,17 @@ static void ux500_hash_shutdown(struct platform_device *pdev)
/**
* ux500_hash_suspend - Function that suspends the hash device.
- * @pdev: The platform device.
- * @state: -
+ * @dev: Device to suspend.
*/
-static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
+static int ux500_hash_suspend(struct device *dev)
{
int ret;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- device_data = platform_get_drvdata(pdev);
+ device_data = dev_get_drvdata(dev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1917,33 +1915,32 @@ static int ux500_hash_suspend(struct platform_device *pdev, pm_message_t state)
if (device_data->current_ctx == ++temp_ctx) {
if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "[%s]: down_interruptible() "
- "failed", __func__);
+ dev_dbg(dev, "[%s]: down_interruptible() failed",
+ __func__);
ret = hash_disable_power(device_data, false);
} else
ret = hash_disable_power(device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: hash_disable_power()", __func__);
+ dev_err(dev, "[%s]: hash_disable_power()", __func__);
return ret;
}
/**
* ux500_hash_resume - Function that resume the hash device.
- * @pdev: The platform device.
+ * @dev: Device to resume.
*/
-static int ux500_hash_resume(struct platform_device *pdev)
+static int ux500_hash_resume(struct device *dev)
{
int ret = 0;
struct hash_device_data *device_data;
struct hash_ctx *temp_ctx = NULL;
- device_data = platform_get_drvdata(pdev);
+ device_data = dev_get_drvdata(dev);
if (!device_data) {
- dev_err(&pdev->dev, "[%s] platform_get_drvdata() failed!",
- __func__);
+ dev_err(dev, "[%s] platform_get_drvdata() failed!", __func__);
return -ENOMEM;
}
@@ -1958,21 +1955,21 @@ static int ux500_hash_resume(struct platform_device *pdev)
ret = hash_enable_power(device_data, true);
if (ret)
- dev_err(&pdev->dev, "[%s]: hash_enable_power() failed!",
- __func__);
+ dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
return ret;
}
+static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
+
static struct platform_driver hash_driver = {
.probe = ux500_hash_probe,
.remove = ux500_hash_remove,
.shutdown = ux500_hash_shutdown,
- .suspend = ux500_hash_suspend,
- .resume = ux500_hash_resume,
.driver = {
.owner = THIS_MODULE,
.name = "hash1",
+ .pm = &ux500_hash_pm,
}
};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index aadeb5be9dba..d45cf1bcbde5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -148,6 +148,20 @@ config TXX9_DMAC
Support the TXx9 SoC internal DMA controller. This can be
integrated in chips such as the Toshiba TX4927/38/39.
+config TEGRA20_APB_DMA
+ bool "NVIDIA Tegra20 APB DMA support"
+ depends on ARCH_TEGRA
+ select DMA_ENGINE
+ help
+ Support for the NVIDIA Tegra20 APB DMA controller driver. The
+ DMA controller is having multiple DMA channel which can be
+ configured for different peripherals like audio, UART, SPI,
+ I2C etc which is in APB bus.
+ This DMA controller transfers data from memory to peripheral fifo
+ or vice versa. It does not support memory to memory data transfer.
+
+
+
config SH_DMAE
tristate "Renesas SuperH DMAC support"
depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE)
@@ -237,7 +251,7 @@ config IMX_DMA
config MXS_DMA
bool "MXS DMA support"
- depends on SOC_IMX23 || SOC_IMX28
+ depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q
select STMP_DEVICE
select DMA_ENGINE
help
@@ -260,6 +274,16 @@ config DMA_SA11X0
SA-1110 SoCs. This DMA engine can only be used with on-chip
devices.
+config MMP_TDMA
+ bool "MMP Two-Channel DMA support"
+ depends on ARCH_MMP
+ select DMA_ENGINE
+ help
+ Support the MMP Two-Channel DMA engine.
+ This engine used for MMP Audio DMA and pxa910 SQU.
+
+ Say Y here if you enabled MMP ADMA, otherwise say N.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 86b795baba98..640356add0a3 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
-obj-$(CONFIG_SH_DMAE) += shdma.o
+obj-$(CONFIG_SH_DMAE) += sh/
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
@@ -23,8 +23,10 @@ obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
+obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 7292aa87b2dd..3934fcc4e00b 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -9,10 +9,9 @@
* (at your option) any later version.
*
*
- * This supports the Atmel AHB DMA Controller,
- *
- * The driver has currently been tested with the Atmel AT91SAM9RL
- * and AT91SAM9G45 series.
+ * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
+ * The only Atmel DMA Controller that is not covered by this driver is the one
+ * found on AT91SAM9263.
*/
#include <linux/clk.h>
@@ -1217,7 +1216,7 @@ static const struct platform_device_id atdma_devtypes[] = {
}
};
-static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
struct platform_device *pdev)
{
if (pdev->dev.of_node) {
@@ -1255,7 +1254,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
int irq;
int err;
int i;
- struct at_dma_platform_data *plat_dat;
+ const struct at_dma_platform_data *plat_dat;
/* setup platform data for each SoC */
dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e67b4e06a918..aa384e53b7ac 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1438,34 +1438,32 @@ static int __init coh901318_probe(struct platform_device *pdev)
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
- goto err_get_resource;
+ return -ENODEV;
/* Map DMA controller registers to virtual memory */
- if (request_mem_region(io->start,
- resource_size(io),
- pdev->dev.driver->name) == NULL) {
- err = -EBUSY;
- goto err_request_mem;
- }
+ if (devm_request_mem_region(&pdev->dev,
+ io->start,
+ resource_size(io),
+ pdev->dev.driver->name) == NULL)
+ return -ENOMEM;
pdata = pdev->dev.platform_data;
if (!pdata)
- goto err_no_platformdata;
+ return -ENODEV;
- base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
- pdata->max_channels *
- sizeof(struct coh901318_chan),
- GFP_KERNEL);
+ base = devm_kzalloc(&pdev->dev,
+ ALIGN(sizeof(struct coh901318_base), 4) +
+ pdata->max_channels *
+ sizeof(struct coh901318_chan),
+ GFP_KERNEL);
if (!base)
- goto err_alloc_coh_dma_channels;
+ return -ENOMEM;
base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
- base->virtbase = ioremap(io->start, resource_size(io));
- if (!base->virtbase) {
- err = -ENOMEM;
- goto err_no_ioremap;
- }
+ base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io));
+ if (!base->virtbase)
+ return -ENOMEM;
base->dev = &pdev->dev;
base->platform = pdata;
@@ -1474,25 +1472,20 @@ static int __init coh901318_probe(struct platform_device *pdev)
COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
- platform_set_drvdata(pdev, base);
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- goto err_no_irq;
-
- err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
- "coh901318", base);
- if (err) {
- dev_crit(&pdev->dev,
- "Cannot allocate IRQ for DMA controller!\n");
- goto err_request_irq;
- }
+ return irq;
+
+ err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED,
+ "coh901318", base);
+ if (err)
+ return err;
err = coh901318_pool_create(&base->pool, &pdev->dev,
sizeof(struct coh901318_lli),
32);
if (err)
- goto err_pool_create;
+ return err;
/* init channels for device transfers */
coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
@@ -1538,6 +1531,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
if (err)
goto err_register_memcpy;
+ platform_set_drvdata(pdev, base);
dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
@@ -1547,19 +1541,6 @@ static int __init coh901318_probe(struct platform_device *pdev)
dma_async_device_unregister(&base->dma_slave);
err_register_slave:
coh901318_pool_destroy(&base->pool);
- err_pool_create:
- free_irq(platform_get_irq(pdev, 0), base);
- err_request_irq:
- err_no_irq:
- iounmap(base->virtbase);
- err_no_ioremap:
- kfree(base);
- err_alloc_coh_dma_channels:
- err_no_platformdata:
- release_mem_region(pdev->resource->start,
- resource_size(pdev->resource));
- err_request_mem:
- err_get_resource:
return err;
}
@@ -1570,11 +1551,6 @@ static int __exit coh901318_remove(struct platform_device *pdev)
dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave);
coh901318_pool_destroy(&base->pool);
- free_irq(platform_get_irq(pdev, 0), base);
- iounmap(base->virtbase);
- kfree(base);
- release_mem_region(pdev->resource->start,
- resource_size(pdev->resource));
return 0;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 2397f6f451b1..3491654cdf7b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -45,6 +45,8 @@
* See Documentation/dmaengine.txt for more details
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -261,7 +263,7 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
do {
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
- printk(KERN_ERR "dma_sync_wait_timeout!\n");
+ pr_err("%s: timeout!\n", __func__);
return DMA_ERROR;
}
} while (status == DMA_IN_PROGRESS);
@@ -312,7 +314,7 @@ static int __init dma_channel_table_init(void)
}
if (err) {
- pr_err("dmaengine: initialization failure\n");
+ pr_err("initialization failure\n");
for_each_dma_cap_mask(cap, dma_cap_mask_all)
if (channel_table[cap])
free_percpu(channel_table[cap]);
@@ -520,12 +522,12 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
err = dma_chan_get(chan);
if (err == -ENODEV) {
- pr_debug("%s: %s module removed\n", __func__,
- dma_chan_name(chan));
+ pr_debug("%s: %s module removed\n",
+ __func__, dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else if (err)
pr_debug("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
+ __func__, dma_chan_name(chan), err);
else
break;
if (--device->privatecnt == 0)
@@ -535,7 +537,9 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
}
mutex_unlock(&dma_list_mutex);
- pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
+ pr_debug("%s: %s (%s)\n",
+ __func__,
+ chan ? "success" : "fail",
chan ? dma_chan_name(chan) : NULL);
return chan;
@@ -579,7 +583,7 @@ void dmaengine_get(void)
break;
} else if (err)
pr_err("%s: failed to get %s: (%d)\n",
- __func__, dma_chan_name(chan), err);
+ __func__, dma_chan_name(chan), err);
}
}
@@ -1015,7 +1019,7 @@ dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
while (tx->cookie == -EBUSY) {
if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
pr_err("%s timeout waiting for descriptor submission\n",
- __func__);
+ __func__);
return DMA_ERROR;
}
cpu_relax();
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 721296157577..d3c5a5a88f1e 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -105,13 +105,13 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
spin_lock_irqsave(&dwc->lock, flags);
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
+ i++;
if (async_tx_test_ack(&desc->txd)) {
list_del(&desc->desc_node);
ret = desc;
break;
}
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
- i++;
}
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -191,6 +191,42 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
/*----------------------------------------------------------------------*/
+static inline unsigned int dwc_fast_fls(unsigned long long v)
+{
+ /*
+ * We can be a lot more clever here, but this should take care
+ * of the most common optimization.
+ */
+ if (!(v & 7))
+ return 3;
+ else if (!(v & 3))
+ return 2;
+ else if (!(v & 1))
+ return 1;
+ return 0;
+}
+
+static void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
+{
+ dev_err(chan2dev(&dwc->chan),
+ " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
+ channel_readl(dwc, SAR),
+ channel_readl(dwc, DAR),
+ channel_readl(dwc, LLP),
+ channel_readl(dwc, CTL_HI),
+ channel_readl(dwc, CTL_LO));
+}
+
+
+static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
+{
+ channel_clear_bit(dw, CH_EN, dwc->mask);
+ while (dma_readl(dw, CH_EN) & dwc->mask)
+ cpu_relax();
+}
+
+/*----------------------------------------------------------------------*/
+
/* Called with dwc->lock held and bh disabled */
static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
{
@@ -200,13 +236,7 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
- dev_err(chan2dev(&dwc->chan),
- " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
- channel_readl(dwc, SAR),
- channel_readl(dwc, DAR),
- channel_readl(dwc, LLP),
- channel_readl(dwc, CTL_HI),
- channel_readl(dwc, CTL_LO));
+ dwc_dump_chan_regs(dwc);
/* The tasklet will hopefully advance the queue... */
return;
@@ -290,9 +320,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
"BUG: XFER bit set, but channel not idle!\n");
/* Try to continue after resetting the channel... */
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
}
/*
@@ -337,7 +365,8 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
return;
}
- dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
+ dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__,
+ (unsigned long long)llp);
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
/* check first descriptors addr */
@@ -373,9 +402,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
"BUG: All descriptors done, but channel not idle!\n");
/* Try to continue after resetting the channel... */
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
if (!list_empty(&dwc->queue)) {
list_move(dwc->queue.next, &dwc->active_list);
@@ -384,12 +411,11 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
spin_unlock_irqrestore(&dwc->lock, flags);
}
-static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
+static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
{
dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
" desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
- lli->sar, lli->dar, lli->llp,
- lli->ctlhi, lli->ctllo);
+ lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo);
}
static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
@@ -487,17 +513,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
spin_lock_irqsave(&dwc->lock, flags);
- dev_err(chan2dev(&dwc->chan),
- " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
- channel_readl(dwc, SAR),
- channel_readl(dwc, DAR),
- channel_readl(dwc, LLP),
- channel_readl(dwc, CTL_HI),
- channel_readl(dwc, CTL_LO));
+ dwc_dump_chan_regs(dwc);
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
/* make sure DMA does not restart by loading a new list */
channel_writel(dwc, LLP, 0);
@@ -527,7 +545,7 @@ static void dw_dma_tasklet(unsigned long data)
status_xfer = dma_readl(dw, RAW.XFER);
status_err = dma_readl(dw, RAW.ERROR);
- dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
+ dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
for (i = 0; i < dw->dma.chancnt; i++) {
dwc = &dw->chan[i];
@@ -551,7 +569,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
struct dw_dma *dw = dev_id;
u32 status;
- dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
+ dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__,
dma_readl(dw, STATUS_INT));
/*
@@ -597,12 +615,12 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
* for DMA. But this is hard to do in a race-free manner.
*/
if (list_empty(&dwc->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
+ dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__,
desc->txd.cookie);
list_add_tail(&desc->desc_node, &dwc->active_list);
dwc_dostart(dwc, dwc_first_active(dwc));
} else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
+ dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__,
desc->txd.cookie);
list_add_tail(&desc->desc_node, &dwc->queue);
@@ -627,26 +645,17 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
unsigned int dst_width;
u32 ctllo;
- dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
- dest, src, len, flags);
+ dev_vdbg(chan2dev(chan),
+ "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__,
+ (unsigned long long)dest, (unsigned long long)src,
+ len, flags);
if (unlikely(!len)) {
- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
+ dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
return NULL;
}
- /*
- * We can be a lot more clever here, but this should take care
- * of the most common optimization.
- */
- if (!((src | dest | len) & 7))
- src_width = dst_width = 3;
- else if (!((src | dest | len) & 3))
- src_width = dst_width = 2;
- else if (!((src | dest | len) & 1))
- src_width = dst_width = 1;
- else
- src_width = dst_width = 0;
+ src_width = dst_width = dwc_fast_fls(src | dest | len);
ctllo = DWC_DEFAULT_CTLLO(chan)
| DWC_CTLL_DST_WIDTH(dst_width)
@@ -720,7 +729,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct scatterlist *sg;
size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
if (unlikely(!dws || !sg_len))
return NULL;
@@ -746,14 +755,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- if (!((mem | len) & 7))
- mem_width = 3;
- else if (!((mem | len) & 3))
- mem_width = 2;
- else if (!((mem | len) & 1))
- mem_width = 1;
- else
- mem_width = 0;
+ mem_width = dwc_fast_fls(mem | len);
slave_sg_todev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -813,14 +815,7 @@ slave_sg_todev_fill_desc:
mem = sg_dma_address(sg);
len = sg_dma_len(sg);
- if (!((mem | len) & 7))
- mem_width = 3;
- else if (!((mem | len) & 3))
- mem_width = 2;
- else if (!((mem | len) & 1))
- mem_width = 1;
- else
- mem_width = 0;
+ mem_width = dwc_fast_fls(mem | len);
slave_sg_fromdev_fill_desc:
desc = dwc_desc_get(dwc);
@@ -950,9 +945,7 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
} else if (cmd == DMA_TERMINATE_ALL) {
spin_lock_irqsave(&dwc->lock, flags);
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
dwc->paused = false;
@@ -1014,7 +1007,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
int i;
unsigned long flags;
- dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
+ dev_vdbg(chan2dev(chan), "%s\n", __func__);
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
@@ -1057,8 +1050,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&dwc->lock, flags);
- dev_dbg(chan2dev(chan),
- "alloc_chan_resources allocated %d descriptors\n", i);
+ dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
return i;
}
@@ -1071,7 +1063,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
unsigned long flags;
LIST_HEAD(list);
- dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
+ dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
dwc->descs_allocated);
/* ASSERT: channel is idle */
@@ -1097,7 +1089,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
kfree(desc);
}
- dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
+ dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -1126,13 +1118,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
if (dma_readl(dw, CH_EN) & dwc->mask) {
dev_err(chan2dev(&dwc->chan),
"BUG: Attempted to start non-idle channel\n");
- dev_err(chan2dev(&dwc->chan),
- " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
- channel_readl(dwc, SAR),
- channel_readl(dwc, DAR),
- channel_readl(dwc, LLP),
- channel_readl(dwc, CTL_HI),
- channel_readl(dwc, CTL_LO));
+ dwc_dump_chan_regs(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return -EBUSY;
}
@@ -1167,9 +1153,7 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
}
@@ -1308,9 +1292,9 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
sizeof(last->lli), DMA_TO_DEVICE);
- dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
- "period %zu periods %d\n", buf_addr, buf_len,
- period_len, periods);
+ dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu "
+ "period %zu periods %d\n", (unsigned long long)buf_addr,
+ buf_len, period_len, periods);
cdesc->periods = periods;
dwc->cdesc = cdesc;
@@ -1340,16 +1324,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
int i;
unsigned long flags;
- dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
+ dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__);
if (!cdesc)
return;
spin_lock_irqsave(&dwc->lock, flags);
- channel_clear_bit(dw, CH_EN, dwc->mask);
- while (dma_readl(dw, CH_EN) & dwc->mask)
- cpu_relax();
+ dwc_chan_disable(dw, dwc);
dma_writel(dw, CLEAR.ERROR, dwc->mask);
dma_writel(dw, CLEAR.XFER, dwc->mask);
@@ -1386,7 +1368,7 @@ static void dw_dma_off(struct dw_dma *dw)
dw->chan[i].initialized = false;
}
-static int __init dw_probe(struct platform_device *pdev)
+static int __devinit dw_probe(struct platform_device *pdev)
{
struct dw_dma_platform_data *pdata;
struct resource *io;
@@ -1432,9 +1414,15 @@ static int __init dw_probe(struct platform_device *pdev)
}
clk_prepare_enable(dw->clk);
+ /* Calculate all channel mask before DMA setup */
+ dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
+
/* force dma off, just in case */
dw_dma_off(dw);
+ /* disable BLOCK interrupts as well */
+ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
+
err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
if (err)
goto err_irq;
@@ -1443,8 +1431,6 @@ static int __init dw_probe(struct platform_device *pdev)
tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
- dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
-
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < pdata->nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
@@ -1474,17 +1460,13 @@ static int __init dw_probe(struct platform_device *pdev)
channel_clear_bit(dw, CH_EN, dwc->mask);
}
- /* Clear/disable all interrupts on all channels. */
+ /* Clear all interrupts on all channels. */
dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
+ dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
- channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
-
dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
if (pdata->is_private)
@@ -1523,7 +1505,7 @@ err_kfree:
return err;
}
-static int __exit dw_remove(struct platform_device *pdev)
+static int __devexit dw_remove(struct platform_device *pdev)
{
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
@@ -1602,7 +1584,7 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
static struct platform_driver dw_driver = {
- .remove = __exit_p(dw_remove),
+ .remove = __devexit_p(dw_remove),
.shutdown = dw_shutdown,
.driver = {
.name = "dw_dmac",
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index f298f69ecbf9..50830bee087a 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -82,7 +82,7 @@ struct dw_dma_regs {
DW_REG(ID);
DW_REG(TEST);
- /* optional encoded params, 0x3c8..0x3 */
+ /* optional encoded params, 0x3c8..0x3f7 */
};
/* Bitfields in CTL_LO */
@@ -219,9 +219,9 @@ static inline struct dw_dma *to_dw_dma(struct dma_device *ddev)
/* LLI == Linked List Item; a.k.a. DMA block descriptor */
struct dw_lli {
/* values that are not changed by hardware */
- dma_addr_t sar;
- dma_addr_t dar;
- dma_addr_t llp; /* chain to next lli */
+ u32 sar;
+ u32 dar;
+ u32 llp; /* chain to next lli */
u32 ctllo;
/* values that may get written back: */
u32 ctlhi;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 5ec72044ea4c..c7573e50aa14 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1663,7 +1663,6 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
static int __init ipu_probe(struct platform_device *pdev)
{
- struct ipu_platform_data *pdata = pdev->dev.platform_data;
struct resource *mem_ipu, *mem_ic;
int ret;
@@ -1671,7 +1670,7 @@ static int __init ipu_probe(struct platform_device *pdev)
mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!pdata || !mem_ipu || !mem_ic)
+ if (!mem_ipu || !mem_ic)
return -EINVAL;
ipu_data.dev = &pdev->dev;
@@ -1688,10 +1687,9 @@ static int __init ipu_probe(struct platform_device *pdev)
goto err_noirq;
ipu_data.irq_err = ret;
- ipu_data.irq_base = pdata->irq_base;
- dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n",
- ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base);
+ dev_dbg(&pdev->dev, "fn irq %u, err irq %u\n",
+ ipu_data.irq_fn, ipu_data.irq_err);
/* Remap IPU common registers */
ipu_data.reg_ipu = ioremap(mem_ipu->start, resource_size(mem_ipu));
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index a71f55e72be9..fa95bcc3de1f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <mach/ipu.h>
@@ -354,10 +355,12 @@ static struct irq_chip ipu_irq_chip = {
/* Install the IRQ handler */
int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
{
- struct ipu_platform_data *pdata = dev->dev.platform_data;
- unsigned int irq, irq_base, i;
+ unsigned int irq, i;
+ int irq_base = irq_alloc_descs(-1, 0, CONFIG_MX3_IPU_IRQS,
+ numa_node_id());
- irq_base = pdata->irq_base;
+ if (irq_base < 0)
+ return irq_base;
for (i = 0; i < IPU_IRQ_NR_BANKS; i++)
irq_bank[i].ipu = ipu;
@@ -387,15 +390,16 @@ int __init ipu_irq_attach_irq(struct ipu *ipu, struct platform_device *dev)
irq_set_handler_data(ipu->irq_err, ipu);
irq_set_chained_handler(ipu->irq_err, ipu_irq_err);
+ ipu->irq_base = irq_base;
+
return 0;
}
void ipu_irq_detach_irq(struct ipu *ipu, struct platform_device *dev)
{
- struct ipu_platform_data *pdata = dev->dev.platform_data;
unsigned int irq, irq_base;
- irq_base = pdata->irq_base;
+ irq_base = ipu->irq_base;
irq_set_chained_handler(ipu->irq_fn, NULL);
irq_set_handler_data(ipu->irq_fn, NULL);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
new file mode 100644
index 000000000000..8a15cf2163dc
--- /dev/null
+++ b/drivers/dma/mmp_tdma.c
@@ -0,0 +1,610 @@
+/*
+ * Driver For Marvell Two-channel DMA Engine
+ *
+ * Copyright: Marvell International Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+#include <mach/regs-icu.h>
+#include <mach/sram.h>
+
+#include "dmaengine.h"
+
+/*
+ * Two-Channel DMA registers
+ */
+#define TDBCR 0x00 /* Byte Count */
+#define TDSAR 0x10 /* Src Addr */
+#define TDDAR 0x20 /* Dst Addr */
+#define TDNDPR 0x30 /* Next Desc */
+#define TDCR 0x40 /* Control */
+#define TDCP 0x60 /* Priority*/
+#define TDCDPR 0x70 /* Current Desc */
+#define TDIMR 0x80 /* Int Mask */
+#define TDISR 0xa0 /* Int Status */
+
+/* Two-Channel DMA Control Register */
+#define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */
+#define TDCR_SSZ_12_BITS (0x1 << 22)
+#define TDCR_SSZ_16_BITS (0x2 << 22)
+#define TDCR_SSZ_20_BITS (0x3 << 22)
+#define TDCR_SSZ_24_BITS (0x4 << 22)
+#define TDCR_SSZ_32_BITS (0x5 << 22)
+#define TDCR_SSZ_SHIFT (0x1 << 22)
+#define TDCR_SSZ_MASK (0x7 << 22)
+#define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */
+#define TDCR_ABR (0x1 << 20) /* Channel Abort */
+#define TDCR_CDE (0x1 << 17) /* Close Desc Enable */
+#define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */
+#define TDCR_CHANACT (0x1 << 14) /* Channel Active */
+#define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */
+#define TDCR_CHANEN (0x1 << 12) /* Channel Enable */
+#define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */
+#define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */
+#define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */
+#define TDCR_BURSTSZ_4B (0x0 << 6)
+#define TDCR_BURSTSZ_8B (0x1 << 6)
+#define TDCR_BURSTSZ_16B (0x3 << 6)
+#define TDCR_BURSTSZ_32B (0x6 << 6)
+#define TDCR_BURSTSZ_64B (0x7 << 6)
+#define TDCR_BURSTSZ_SQU_32B (0x7 << 6)
+#define TDCR_BURSTSZ_128B (0x5 << 6)
+#define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */
+#define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */
+#define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */
+#define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */
+#define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */
+#define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */
+#define TDCR_DSTDESCCONT (0x1 << 1)
+#define TDCR_SRCDESTCONT (0x1 << 0)
+
+/* Two-Channel DMA Int Mask Register */
+#define TDIMR_COMP (0x1 << 0)
+
+/* Two-Channel DMA Int Status Register */
+#define TDISR_COMP (0x1 << 0)
+
+/*
+ * Two-Channel DMA Descriptor Struct
+ * NOTE: desc's buf must be aligned to 16 bytes.
+ */
+struct mmp_tdma_desc {
+ u32 byte_cnt;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 nxt_desc;
+};
+
+enum mmp_tdma_type {
+ MMP_AUD_TDMA = 0,
+ PXA910_SQU,
+};
+
+#define TDMA_ALIGNMENT 3
+#define TDMA_MAX_XFER_BYTES SZ_64K
+
+struct mmp_tdma_chan {
+ struct device *dev;
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor desc;
+ struct tasklet_struct tasklet;
+
+ struct mmp_tdma_desc *desc_arr;
+ phys_addr_t desc_arr_phys;
+ int desc_num;
+ enum dma_transfer_direction dir;
+ dma_addr_t dev_addr;
+ u32 burst_sz;
+ enum dma_slave_buswidth buswidth;
+ enum dma_status status;
+
+ int idx;
+ enum mmp_tdma_type type;
+ int irq;
+ unsigned long reg_base;
+
+ size_t buf_len;
+ size_t period_len;
+ size_t pos;
+};
+
+#define TDMA_CHANNEL_NUM 2
+struct mmp_tdma_device {
+ struct device *dev;
+ void __iomem *base;
+ struct dma_device device;
+ struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM];
+ int irq;
+};
+
+#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
+
+static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
+{
+ writel(phys, tdmac->reg_base + TDNDPR);
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND,
+ tdmac->reg_base + TDCR);
+}
+
+static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac)
+{
+ /* enable irq */
+ writel(TDIMR_COMP, tdmac->reg_base + TDIMR);
+ /* enable dma chan */
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
+ tdmac->reg_base + TDCR);
+ tdmac->status = DMA_IN_PROGRESS;
+}
+
+static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac)
+{
+ writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
+ tdmac->reg_base + TDCR);
+ tdmac->status = DMA_SUCCESS;
+}
+
+static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac)
+{
+ writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN,
+ tdmac->reg_base + TDCR);
+ tdmac->status = DMA_IN_PROGRESS;
+}
+
+static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac)
+{
+ writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN,
+ tdmac->reg_base + TDCR);
+ tdmac->status = DMA_PAUSED;
+}
+
+static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac)
+{
+ unsigned int tdcr;
+
+ mmp_tdma_disable_chan(tdmac);
+
+ if (tdmac->dir == DMA_MEM_TO_DEV)
+ tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC;
+ else if (tdmac->dir == DMA_DEV_TO_MEM)
+ tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC;
+
+ if (tdmac->type == MMP_AUD_TDMA) {
+ tdcr |= TDCR_PACKMOD;
+
+ switch (tdmac->burst_sz) {
+ case 4:
+ tdcr |= TDCR_BURSTSZ_4B;
+ break;
+ case 8:
+ tdcr |= TDCR_BURSTSZ_8B;
+ break;
+ case 16:
+ tdcr |= TDCR_BURSTSZ_16B;
+ break;
+ case 32:
+ tdcr |= TDCR_BURSTSZ_32B;
+ break;
+ case 64:
+ tdcr |= TDCR_BURSTSZ_64B;
+ break;
+ case 128:
+ tdcr |= TDCR_BURSTSZ_128B;
+ break;
+ default:
+ dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n");
+ return -EINVAL;
+ }
+
+ switch (tdmac->buswidth) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ tdcr |= TDCR_SSZ_8_BITS;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ tdcr |= TDCR_SSZ_16_BITS;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ tdcr |= TDCR_SSZ_32_BITS;
+ break;
+ default:
+ dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n");
+ return -EINVAL;
+ }
+ } else if (tdmac->type == PXA910_SQU) {
+ tdcr |= TDCR_BURSTSZ_SQU_32B;
+ tdcr |= TDCR_SSPMOD;
+ }
+
+ writel(tdcr, tdmac->reg_base + TDCR);
+ return 0;
+}
+
+static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac)
+{
+ u32 reg = readl(tdmac->reg_base + TDISR);
+
+ if (reg & TDISR_COMP) {
+ /* clear irq */
+ reg &= ~TDISR_COMP;
+ writel(reg, tdmac->reg_base + TDISR);
+
+ return 0;
+ }
+ return -EAGAIN;
+}
+
+static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id)
+{
+ struct mmp_tdma_chan *tdmac = dev_id;
+
+ if (mmp_tdma_clear_chan_irq(tdmac) == 0) {
+ tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len;
+ tasklet_schedule(&tdmac->tasklet);
+ return IRQ_HANDLED;
+ } else
+ return IRQ_NONE;
+}
+
+static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id)
+{
+ struct mmp_tdma_device *tdev = dev_id;
+ int i, ret;
+ int irq_num = 0;
+
+ for (i = 0; i < TDMA_CHANNEL_NUM; i++) {
+ struct mmp_tdma_chan *tdmac = tdev->tdmac[i];
+
+ ret = mmp_tdma_chan_handler(irq, tdmac);
+ if (ret == IRQ_HANDLED)
+ irq_num++;
+ }
+
+ if (irq_num)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+static void dma_do_tasklet(unsigned long data)
+{
+ struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data;
+
+ if (tdmac->desc.callback)
+ tdmac->desc.callback(tdmac->desc.callback_param);
+
+}
+
+static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
+{
+ struct gen_pool *gpool;
+ int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
+
+ gpool = sram_get_gpool("asram");
+ if (tdmac->desc_arr)
+ gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
+ size);
+ tdmac->desc_arr = NULL;
+
+ return;
+}
+
+static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan);
+
+ mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys);
+
+ return 0;
+}
+
+static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+ int ret;
+
+ dma_async_tx_descriptor_init(&tdmac->desc, chan);
+ tdmac->desc.tx_submit = mmp_tdma_tx_submit;
+
+ if (tdmac->irq) {
+ ret = devm_request_irq(tdmac->dev, tdmac->irq,
+ mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac);
+ if (ret)
+ return ret;
+ }
+ return 1;
+}
+
+static void mmp_tdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ if (tdmac->irq)
+ devm_free_irq(tdmac->dev, tdmac->irq, tdmac);
+ mmp_tdma_free_descriptor(tdmac);
+ return;
+}
+
+struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
+{
+ struct gen_pool *gpool;
+ int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc);
+
+ gpool = sram_get_gpool("asram");
+ if (!gpool)
+ return NULL;
+
+ tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
+ if (!tdmac->desc_arr)
+ return NULL;
+
+ tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
+ (unsigned long)tdmac->desc_arr);
+
+ return tdmac->desc_arr;
+}
+
+static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+ struct mmp_tdma_desc *desc;
+ int num_periods = buf_len / period_len;
+ int i = 0, buf = 0;
+
+ if (tdmac->status != DMA_SUCCESS)
+ return NULL;
+
+ if (period_len > TDMA_MAX_XFER_BYTES) {
+ dev_err(tdmac->dev,
+ "maximum period size exceeded: %d > %d\n",
+ period_len, TDMA_MAX_XFER_BYTES);
+ goto err_out;
+ }
+
+ tdmac->status = DMA_IN_PROGRESS;
+ tdmac->desc_num = num_periods;
+ desc = mmp_tdma_alloc_descriptor(tdmac);
+ if (!desc)
+ goto err_out;
+
+ while (buf < buf_len) {
+ desc = &tdmac->desc_arr[i];
+
+ if (i + 1 == num_periods)
+ desc->nxt_desc = tdmac->desc_arr_phys;
+ else
+ desc->nxt_desc = tdmac->desc_arr_phys +
+ sizeof(*desc) * (i + 1);
+
+ if (direction == DMA_MEM_TO_DEV) {
+ desc->src_addr = dma_addr;
+ desc->dst_addr = tdmac->dev_addr;
+ } else {
+ desc->src_addr = tdmac->dev_addr;
+ desc->dst_addr = dma_addr;
+ }
+ desc->byte_cnt = period_len;
+ dma_addr += period_len;
+ buf += period_len;
+ i++;
+ }
+
+ tdmac->buf_len = buf_len;
+ tdmac->period_len = period_len;
+ tdmac->pos = 0;
+
+ return &tdmac->desc;
+
+err_out:
+ tdmac->status = DMA_ERROR;
+ return NULL;
+}
+
+static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+ struct dma_slave_config *dmaengine_cfg = (void *)arg;
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ mmp_tdma_disable_chan(tdmac);
+ break;
+ case DMA_PAUSE:
+ mmp_tdma_pause_chan(tdmac);
+ break;
+ case DMA_RESUME:
+ mmp_tdma_resume_chan(tdmac);
+ break;
+ case DMA_SLAVE_CONFIG:
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ tdmac->dev_addr = dmaengine_cfg->src_addr;
+ tdmac->burst_sz = dmaengine_cfg->src_maxburst;
+ tdmac->buswidth = dmaengine_cfg->src_addr_width;
+ } else {
+ tdmac->dev_addr = dmaengine_cfg->dst_addr;
+ tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
+ tdmac->buswidth = dmaengine_cfg->dst_addr_width;
+ }
+ tdmac->dir = dmaengine_cfg->direction;
+ return mmp_tdma_config_chan(tdmac);
+ default:
+ ret = -ENOSYS;
+ }
+
+ return ret;
+}
+
+static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ dma_set_residue(txstate, tdmac->buf_len - tdmac->pos);
+
+ return tdmac->status;
+}
+
+static void mmp_tdma_issue_pending(struct dma_chan *chan)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ mmp_tdma_enable_chan(tdmac);
+}
+
+static int __devexit mmp_tdma_remove(struct platform_device *pdev)
+{
+ struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
+
+ dma_async_device_unregister(&tdev->device);
+ return 0;
+}
+
+static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
+ int idx, int irq, int type)
+{
+ struct mmp_tdma_chan *tdmac;
+
+ if (idx >= TDMA_CHANNEL_NUM) {
+ dev_err(tdev->dev, "too many channels for device!\n");
+ return -EINVAL;
+ }
+
+ /* alloc channel */
+ tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL);
+ if (!tdmac) {
+ dev_err(tdev->dev, "no free memory for DMA channels!\n");
+ return -ENOMEM;
+ }
+ if (irq)
+ tdmac->irq = irq + idx;
+ tdmac->dev = tdev->dev;
+ tdmac->chan.device = &tdev->device;
+ tdmac->idx = idx;
+ tdmac->type = type;
+ tdmac->reg_base = (unsigned long)tdev->base + idx * 4;
+ tdmac->status = DMA_SUCCESS;
+ tdev->tdmac[tdmac->idx] = tdmac;
+ tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac);
+
+ /* add the channel to tdma_chan list */
+ list_add_tail(&tdmac->chan.device_node,
+ &tdev->device.channels);
+
+ return 0;
+}
+
+static int __devinit mmp_tdma_probe(struct platform_device *pdev)
+{
+ const struct platform_device_id *id = platform_get_device_id(pdev);
+ enum mmp_tdma_type type = id->driver_data;
+ struct mmp_tdma_device *tdev;
+ struct resource *iores;
+ int i, ret;
+ int irq = 0;
+ int chan_num = TDMA_CHANNEL_NUM;
+
+ /* always have couple channels */
+ tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
+ if (!tdev)
+ return -ENOMEM;
+
+ tdev->dev = &pdev->dev;
+ iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!iores)
+ return -EINVAL;
+
+ if (resource_size(iores) != chan_num)
+ tdev->irq = iores->start;
+ else
+ irq = iores->start;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -EINVAL;
+
+ tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
+ if (!tdev->base)
+ return -EADDRNOTAVAIL;
+
+ if (tdev->irq) {
+ ret = devm_request_irq(&pdev->dev, tdev->irq,
+ mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
+ if (ret)
+ return ret;
+ }
+
+ dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
+
+ INIT_LIST_HEAD(&tdev->device.channels);
+
+ /* initialize channel parameters */
+ for (i = 0; i < chan_num; i++) {
+ ret = mmp_tdma_chan_init(tdev, i, irq, type);
+ if (ret)
+ return ret;
+ }
+
+ tdev->device.dev = &pdev->dev;
+ tdev->device.device_alloc_chan_resources =
+ mmp_tdma_alloc_chan_resources;
+ tdev->device.device_free_chan_resources =
+ mmp_tdma_free_chan_resources;
+ tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
+ tdev->device.device_tx_status = mmp_tdma_tx_status;
+ tdev->device.device_issue_pending = mmp_tdma_issue_pending;
+ tdev->device.device_control = mmp_tdma_control;
+ tdev->device.copy_align = TDMA_ALIGNMENT;
+
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ platform_set_drvdata(pdev, tdev);
+
+ ret = dma_async_device_register(&tdev->device);
+ if (ret) {
+ dev_err(tdev->device.dev, "unable to register\n");
+ return ret;
+ }
+
+ dev_info(tdev->device.dev, "initialized\n");
+ return 0;
+}
+
+static const struct platform_device_id mmp_tdma_id_table[] = {
+ { "mmp-adma", MMP_AUD_TDMA },
+ { "pxa910-squ", PXA910_SQU },
+ { },
+};
+
+static struct platform_driver mmp_tdma_driver = {
+ .driver = {
+ .name = "mmp-tdma",
+ .owner = THIS_MODULE,
+ },
+ .id_table = mmp_tdma_id_table,
+ .probe = mmp_tdma_probe,
+ .remove = __devexit_p(mmp_tdma_remove),
+};
+
+module_platform_driver(mmp_tdma_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MMP Two-Channel DMA Driver");
+MODULE_ALIAS("platform:mmp-tdma");
+MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
+MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index c96ab15319f2..7f41b25805fa 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -29,7 +29,6 @@
#include <linux/of_device.h>
#include <asm/irq.h>
-#include <mach/mxs.h>
#include "dmaengine.h"
@@ -201,6 +200,7 @@ int mxs_dma_is_apbh(struct dma_chan *chan)
return dma_is_apbh(mxs_dma);
}
+EXPORT_SYMBOL_GPL(mxs_dma_is_apbh);
int mxs_dma_is_apbx(struct dma_chan *chan)
{
@@ -209,6 +209,7 @@ int mxs_dma_is_apbx(struct dma_chan *chan)
return !dma_is_apbh(mxs_dma);
}
+EXPORT_SYMBOL_GPL(mxs_dma_is_apbx);
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile
new file mode 100644
index 000000000000..54ae9572b0ac
--- /dev/null
+++ b/drivers/dma/sh/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_SH_DMAE) += shdma-base.o
+obj-$(CONFIG_SH_DMAE) += shdma.o
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
new file mode 100644
index 000000000000..27f5c781fd73
--- /dev/null
+++ b/drivers/dma/sh/shdma-base.c
@@ -0,0 +1,934 @@
+/*
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
+ *
+ * extracted from shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/shdma-base.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "../dmaengine.h"
+
+/* DMA descriptor control */
+enum shdma_desc_status {
+ DESC_IDLE,
+ DESC_PREPARED,
+ DESC_SUBMITTED,
+ DESC_COMPLETED, /* completed, have to call callback */
+ DESC_WAITING, /* callback called, waiting for ack / re-submit */
+};
+
+#define NR_DESCS_PER_CHANNEL 32
+
+#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
+#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
+
+/*
+ * For slave DMA we assume, that there is a finite number of DMA slaves in the
+ * system, and that each such slave can only use a finite number of channels.
+ * We use slave channel IDs to make sure, that no such slave channel ID is
+ * allocated more than once.
+ */
+static unsigned int slave_num = 256;
+module_param(slave_num, uint, 0444);
+
+/* A bitmask with slave_num bits */
+static unsigned long *shdma_slave_used;
+
+/* Called under spin_lock_irq(&schan->chan_lock") */
+static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *sdesc;
+
+ /* DMA work check */
+ if (ops->channel_busy(schan))
+ return;
+
+ /* Find the first not transferred descriptor */
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->mark == DESC_SUBMITTED) {
+ ops->start_xfer(schan, sdesc);
+ break;
+ }
+}
+
+static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct shdma_desc *chunk, *c, *desc =
+ container_of(tx, struct shdma_desc, async_tx),
+ *last = desc;
+ struct shdma_chan *schan = to_shdma_chan(tx->chan);
+ dma_async_tx_callback callback = tx->callback;
+ dma_cookie_t cookie;
+ bool power_up;
+
+ spin_lock_irq(&schan->chan_lock);
+
+ power_up = list_empty(&schan->ld_queue);
+
+ cookie = dma_cookie_assign(tx);
+
+ /* Mark all chunks of this descriptor as submitted, move to the queue */
+ list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
+ /*
+ * All chunks are on the global ld_free, so, we have to find
+ * the end of the chain ourselves
+ */
+ if (chunk != desc && (chunk->mark == DESC_IDLE ||
+ chunk->async_tx.cookie > 0 ||
+ chunk->async_tx.cookie == -EBUSY ||
+ &chunk->node == &schan->ld_free))
+ break;
+ chunk->mark = DESC_SUBMITTED;
+ /* Callback goes to the last chunk */
+ chunk->async_tx.callback = NULL;
+ chunk->cookie = cookie;
+ list_move_tail(&chunk->node, &schan->ld_queue);
+ last = chunk;
+
+ dev_dbg(schan->dev, "submit #%d@%p on %d\n",
+ tx->cookie, &last->async_tx, schan->id);
+ }
+
+ last->async_tx.callback = callback;
+ last->async_tx.callback_param = tx->callback_param;
+
+ if (power_up) {
+ int ret;
+ schan->pm_state = SHDMA_PM_BUSY;
+
+ ret = pm_runtime_get(schan->dev);
+
+ spin_unlock_irq(&schan->chan_lock);
+ if (ret < 0)
+ dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+
+ pm_runtime_barrier(schan->dev);
+
+ spin_lock_irq(&schan->chan_lock);
+
+ /* Have we been reset, while waiting? */
+ if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
+ struct shdma_dev *sdev =
+ to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ dev_dbg(schan->dev, "Bring up channel %d\n",
+ schan->id);
+ /*
+ * TODO: .xfer_setup() might fail on some platforms.
+ * Make it int then, on error remove chunks from the
+ * queue again
+ */
+ ops->setup_xfer(schan, schan->slave_id);
+
+ if (schan->pm_state == SHDMA_PM_PENDING)
+ shdma_chan_xfer_ld_queue(schan);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ }
+ } else {
+ /*
+ * Tell .device_issue_pending() not to run the queue, interrupts
+ * will do it anyway
+ */
+ schan->pm_state = SHDMA_PM_PENDING;
+ }
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ return cookie;
+}
+
+/* Called with desc_lock held */
+static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
+{
+ struct shdma_desc *sdesc;
+
+ list_for_each_entry(sdesc, &schan->ld_free, node)
+ if (sdesc->mark != DESC_PREPARED) {
+ BUG_ON(sdesc->mark != DESC_IDLE);
+ list_del(&sdesc->node);
+ return sdesc;
+ }
+
+ return NULL;
+}
+
+static int shdma_setup_slave(struct shdma_chan *schan, int slave_id)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int ret;
+
+ if (slave_id < 0 || slave_id >= slave_num)
+ return -EINVAL;
+
+ if (test_and_set_bit(slave_id, shdma_slave_used))
+ return -EBUSY;
+
+ ret = ops->set_slave(schan, slave_id, false);
+ if (ret < 0) {
+ clear_bit(slave_id, shdma_slave_used);
+ return ret;
+ }
+
+ schan->slave_id = slave_id;
+
+ return 0;
+}
+
+/*
+ * This is the standard shdma filter function to be used as a replacement to the
+ * "old" method, using the .private pointer. If for some reason you allocate a
+ * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter
+ * parameter. If this filter is used, the slave driver, after calling
+ * dma_request_channel(), will also have to call dmaengine_slave_config() with
+ * .slave_id, .direction, and either .src_addr or .dst_addr set.
+ * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
+ * capability! If this becomes a requirement, hardware glue drivers, using this
+ * services would have to provide their own filters, which first would check
+ * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
+ * this, and only then, in case of a match, call this common filter.
+ */
+bool shdma_chan_filter(struct dma_chan *chan, void *arg)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int slave_id = (int)arg;
+ int ret;
+
+ if (slave_id < 0)
+ /* No slave requested - arbitrary channel */
+ return true;
+
+ if (slave_id >= slave_num)
+ return false;
+
+ ret = ops->set_slave(schan, slave_id, true);
+ if (ret < 0)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL(shdma_chan_filter);
+
+static int shdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *desc;
+ struct shdma_slave *slave = chan->private;
+ int ret, i;
+
+ /*
+ * This relies on the guarantee from dmaengine that alloc_chan_resources
+ * never runs concurrently with itself or free_chan_resources.
+ */
+ if (slave) {
+ /* Legacy mode: .private is set in filter */
+ ret = shdma_setup_slave(schan, slave->slave_id);
+ if (ret < 0)
+ goto esetslave;
+ } else {
+ schan->slave_id = -EINVAL;
+ }
+
+ schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
+ sdev->desc_size, GFP_KERNEL);
+ if (!schan->desc) {
+ ret = -ENOMEM;
+ goto edescalloc;
+ }
+ schan->desc_num = NR_DESCS_PER_CHANNEL;
+
+ for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
+ desc = ops->embedded_desc(schan->desc, i);
+ dma_async_tx_descriptor_init(&desc->async_tx,
+ &schan->dma_chan);
+ desc->async_tx.tx_submit = shdma_tx_submit;
+ desc->mark = DESC_IDLE;
+
+ list_add(&desc->node, &schan->ld_free);
+ }
+
+ return NR_DESCS_PER_CHANNEL;
+
+edescalloc:
+ if (slave)
+esetslave:
+ clear_bit(slave->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ return ret;
+}
+
+static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ struct shdma_desc *desc, *_desc;
+ /* Is the "exposed" head of a chain acked? */
+ bool head_acked = false;
+ dma_cookie_t cookie = 0;
+ dma_async_tx_callback callback = NULL;
+ void *param = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
+ struct dma_async_tx_descriptor *tx = &desc->async_tx;
+
+ BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
+ BUG_ON(desc->mark != DESC_SUBMITTED &&
+ desc->mark != DESC_COMPLETED &&
+ desc->mark != DESC_WAITING);
+
+ /*
+ * queue is ordered, and we use this loop to (1) clean up all
+ * completed descriptors, and to (2) update descriptor flags of
+ * any chunks in a (partially) completed chain
+ */
+ if (!all && desc->mark == DESC_SUBMITTED &&
+ desc->cookie != cookie)
+ break;
+
+ if (tx->cookie > 0)
+ cookie = tx->cookie;
+
+ if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
+ if (schan->dma_chan.completed_cookie != desc->cookie - 1)
+ dev_dbg(schan->dev,
+ "Completing cookie %d, expected %d\n",
+ desc->cookie,
+ schan->dma_chan.completed_cookie + 1);
+ schan->dma_chan.completed_cookie = desc->cookie;
+ }
+
+ /* Call callback on the last chunk */
+ if (desc->mark == DESC_COMPLETED && tx->callback) {
+ desc->mark = DESC_WAITING;
+ callback = tx->callback;
+ param = tx->callback_param;
+ dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
+ tx->cookie, tx, schan->id);
+ BUG_ON(desc->chunks != 1);
+ break;
+ }
+
+ if (tx->cookie > 0 || tx->cookie == -EBUSY) {
+ if (desc->mark == DESC_COMPLETED) {
+ BUG_ON(tx->cookie < 0);
+ desc->mark = DESC_WAITING;
+ }
+ head_acked = async_tx_test_ack(tx);
+ } else {
+ switch (desc->mark) {
+ case DESC_COMPLETED:
+ desc->mark = DESC_WAITING;
+ /* Fall through */
+ case DESC_WAITING:
+ if (head_acked)
+ async_tx_ack(&desc->async_tx);
+ }
+ }
+
+ dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
+ tx, tx->cookie);
+
+ if (((desc->mark == DESC_COMPLETED ||
+ desc->mark == DESC_WAITING) &&
+ async_tx_test_ack(&desc->async_tx)) || all) {
+ /* Remove from ld_queue list */
+ desc->mark = DESC_IDLE;
+
+ list_move(&desc->node, &schan->ld_free);
+
+ if (list_empty(&schan->ld_queue)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+ }
+ }
+ }
+
+ if (all && !callback)
+ /*
+ * Terminating and the loop completed normally: forgive
+ * uncompleted cookies
+ */
+ schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ if (callback)
+ callback(param);
+
+ return callback;
+}
+
+/*
+ * shdma_chan_ld_cleanup - Clean up link descriptors
+ *
+ * Clean up the ld_queue of DMA channel.
+ */
+static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
+{
+ while (__ld_cleanup(schan, all))
+ ;
+}
+
+/*
+ * shdma_free_chan_resources - Free all resources of the channel.
+ */
+static void shdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ LIST_HEAD(list);
+
+ /* Protect against ISR */
+ spin_lock_irq(&schan->chan_lock);
+ ops->halt_channel(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ /* Now no new interrupts will occur */
+
+ /* Prepared and not submitted descriptors can still be on the queue */
+ if (!list_empty(&schan->ld_queue))
+ shdma_chan_ld_cleanup(schan, true);
+
+ if (schan->slave_id >= 0) {
+ /* The caller is holding dma_list_mutex */
+ clear_bit(schan->slave_id, shdma_slave_used);
+ chan->private = NULL;
+ }
+
+ spin_lock_irq(&schan->chan_lock);
+
+ list_splice_init(&schan->ld_free, &list);
+ schan->desc_num = 0;
+
+ spin_unlock_irq(&schan->chan_lock);
+
+ kfree(schan->desc);
+}
+
+/**
+ * shdma_add_desc - get, set up and return one transfer descriptor
+ * @schan: DMA channel
+ * @flags: DMA transfer flags
+ * @dst: destination DMA address, incremented when direction equals
+ * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
+ * @src: source DMA address, incremented when direction equals
+ * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
+ * @len: DMA transfer length
+ * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
+ * @direction: needed for slave DMA to decide which address to keep constant,
+ * equals DMA_MEM_TO_MEM for MEMCPY
+ * Returns 0 or an error
+ * Locks: called with desc_lock held
+ */
+static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
+ unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
+ struct shdma_desc **first, enum dma_transfer_direction direction)
+{
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_desc *new;
+ size_t copy_size = *len;
+
+ if (!copy_size)
+ return NULL;
+
+ /* Allocate the link descriptor from the free list */
+ new = shdma_get_desc(schan);
+ if (!new) {
+ dev_err(schan->dev, "No free link descriptor available\n");
+ return NULL;
+ }
+
+ ops->desc_setup(schan, new, *src, *dst, &copy_size);
+
+ if (!*first) {
+ /* First desc */
+ new->async_tx.cookie = -EBUSY;
+ *first = new;
+ } else {
+ /* Other desc - invisible to the user */
+ new->async_tx.cookie = -EINVAL;
+ }
+
+ dev_dbg(schan->dev,
+ "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
+ copy_size, *len, *src, *dst, &new->async_tx,
+ new->async_tx.cookie);
+
+ new->mark = DESC_PREPARED;
+ new->async_tx.flags = flags;
+ new->direction = direction;
+
+ *len -= copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
+ *src += copy_size;
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
+ *dst += copy_size;
+
+ return new;
+}
+
+/*
+ * shdma_prep_sg - prepare transfer descriptors from an SG list
+ *
+ * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
+ * converted to scatter-gather to guarantee consistent locking and a correct
+ * list manipulation. For slave DMA direction carries the usual meaning, and,
+ * logically, the SG list is RAM and the addr variable contains slave address,
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
+ * and the SG list contains only one element and points at the source buffer.
+ */
+static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
+ struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
+ enum dma_transfer_direction direction, unsigned long flags)
+{
+ struct scatterlist *sg;
+ struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
+ LIST_HEAD(tx_list);
+ int chunks = 0;
+ unsigned long irq_flags;
+ int i;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
+
+ /* Have to lock the whole loop to protect against concurrent release */
+ spin_lock_irqsave(&schan->chan_lock, irq_flags);
+
+ /*
+ * Chaining:
+ * first descriptor is what user is dealing with in all API calls, its
+ * cookie is at first set to -EBUSY, at tx-submit to a positive
+ * number
+ * if more than one chunk is needed further chunks have cookie = -EINVAL
+ * the last chunk, if not equal to the first, has cookie = -ENOSPC
+ * all chunks are linked onto the tx_list head with their .node heads
+ * only during this function, then they are immediately spliced
+ * back onto the free list in form of a chain
+ */
+ for_each_sg(sgl, sg, sg_len, i) {
+ dma_addr_t sg_addr = sg_dma_address(sg);
+ size_t len = sg_dma_len(sg);
+
+ if (!len)
+ goto err_get_desc;
+
+ do {
+ dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
+ i, sg, len, (unsigned long long)sg_addr);
+
+ if (direction == DMA_DEV_TO_MEM)
+ new = shdma_add_desc(schan, flags,
+ &sg_addr, addr, &len, &first,
+ direction);
+ else
+ new = shdma_add_desc(schan, flags,
+ addr, &sg_addr, &len, &first,
+ direction);
+ if (!new)
+ goto err_get_desc;
+
+ new->chunks = chunks--;
+ list_add_tail(&new->node, &tx_list);
+ } while (len);
+ }
+
+ if (new != first)
+ new->async_tx.cookie = -ENOSPC;
+
+ /* Put them back on the free list, so, they don't get lost */
+ list_splice_tail(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return &first->async_tx;
+
+err_get_desc:
+ list_for_each_entry(new, &tx_list, node)
+ new->mark = DESC_IDLE;
+ list_splice(&tx_list, &schan->ld_free);
+
+ spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct scatterlist sg;
+
+ if (!chan || !len)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
+ offset_in_page(dma_src));
+ sg_dma_address(&sg) = dma_src;
+ sg_dma_len(&sg) = len;
+
+ return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
+}
+
+static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags, void *context)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
+ const struct shdma_ops *ops = sdev->ops;
+ int slave_id = schan->slave_id;
+ dma_addr_t slave_addr;
+
+ if (!chan)
+ return NULL;
+
+ BUG_ON(!schan->desc_num);
+
+ /* Someone calling slave DMA on a generic channel? */
+ if (slave_id < 0 || !sg_len) {
+ dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
+ __func__, sg_len, slave_id);
+ return NULL;
+ }
+
+ slave_addr = ops->slave_addr(schan);
+
+ return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
+ direction, flags);
+}
+
+static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ struct shdma_dev *sdev = to_shdma_dev(chan->device);
+ const struct shdma_ops *ops = sdev->ops;
+ struct dma_slave_config *config;
+ unsigned long flags;
+ int ret;
+
+ if (!chan)
+ return -EINVAL;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ spin_lock_irqsave(&schan->chan_lock, flags);
+ ops->halt_channel(schan);
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ shdma_chan_ld_cleanup(schan, true);
+ break;
+ case DMA_SLAVE_CONFIG:
+ /*
+ * So far only .slave_id is used, but the slave drivers are
+ * encouraged to also set a transfer direction and an address.
+ */
+ if (!arg)
+ return -EINVAL;
+ /*
+ * We could lock this, but you shouldn't be configuring the
+ * channel, while using it...
+ */
+ config = (struct dma_slave_config *)arg;
+ ret = shdma_setup_slave(schan, config->slave_id);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static void shdma_issue_pending(struct dma_chan *chan)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+
+ spin_lock_irq(&schan->chan_lock);
+ if (schan->pm_state == SHDMA_PM_ESTABLISHED)
+ shdma_chan_xfer_ld_queue(schan);
+ else
+ schan->pm_state = SHDMA_PM_PENDING;
+ spin_unlock_irq(&schan->chan_lock);
+}
+
+static enum dma_status shdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct shdma_chan *schan = to_shdma_chan(chan);
+ enum dma_status status;
+ unsigned long flags;
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ spin_lock_irqsave(&schan->chan_lock, flags);
+
+ status = dma_cookie_status(chan, cookie, txstate);
+
+ /*
+ * If we don't find cookie on the queue, it has been aborted and we have
+ * to report error
+ */
+ if (status != DMA_SUCCESS) {
+ struct shdma_desc *sdesc;
+ status = DMA_ERROR;
+ list_for_each_entry(sdesc, &schan->ld_queue, node)
+ if (sdesc->cookie == cookie) {
+ status = DMA_IN_PROGRESS;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&schan->chan_lock, flags);
+
+ return status;
+}
+
+/* Called from error IRQ or NMI */
+bool shdma_reset(struct shdma_dev *sdev)
+{
+ const struct shdma_ops *ops = sdev->ops;
+ struct shdma_chan *schan;
+ unsigned int handled = 0;
+ int i;
+
+ /* Reset all channels */
+ shdma_for_each_chan(schan, sdev, i) {
+ struct shdma_desc *sdesc;
+ LIST_HEAD(dl);
+
+ if (!schan)
+ continue;
+
+ spin_lock(&schan->chan_lock);
+
+ /* Stop the channel */
+ ops->halt_channel(schan);
+
+ list_splice_init(&schan->ld_queue, &dl);
+
+ if (!list_empty(&dl)) {
+ dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
+ pm_runtime_put(schan->dev);
+ }
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ spin_unlock(&schan->chan_lock);
+
+ /* Complete all */
+ list_for_each_entry(sdesc, &dl, node) {
+ struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
+ sdesc->mark = DESC_IDLE;
+ if (tx->callback)
+ tx->callback(tx->callback_param);
+ }
+
+ spin_lock(&schan->chan_lock);
+ list_splice(&dl, &schan->ld_free);
+ spin_unlock(&schan->chan_lock);
+
+ handled++;
+ }
+
+ return !!handled;
+}
+EXPORT_SYMBOL(shdma_reset);
+
+static irqreturn_t chan_irq(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ irqreturn_t ret;
+
+ spin_lock(&schan->chan_lock);
+
+ ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
+
+ spin_unlock(&schan->chan_lock);
+
+ return ret;
+}
+
+static irqreturn_t chan_irqt(int irq, void *dev)
+{
+ struct shdma_chan *schan = dev;
+ const struct shdma_ops *ops =
+ to_shdma_dev(schan->dma_chan.device)->ops;
+ struct shdma_desc *sdesc;
+
+ spin_lock_irq(&schan->chan_lock);
+ list_for_each_entry(sdesc, &schan->ld_queue, node) {
+ if (sdesc->mark == DESC_SUBMITTED &&
+ ops->desc_completed(schan, sdesc)) {
+ dev_dbg(schan->dev, "done #%d@%p\n",
+ sdesc->async_tx.cookie, &sdesc->async_tx);
+ sdesc->mark = DESC_COMPLETED;
+ break;
+ }
+ }
+ /* Next desc */
+ shdma_chan_xfer_ld_queue(schan);
+ spin_unlock_irq(&schan->chan_lock);
+
+ shdma_chan_ld_cleanup(schan, false);
+
+ return IRQ_HANDLED;
+}
+
+int shdma_request_irq(struct shdma_chan *schan, int irq,
+ unsigned long flags, const char *name)
+{
+ int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
+ flags, name, schan);
+
+ schan->irq = ret < 0 ? ret : irq;
+
+ return ret;
+}
+EXPORT_SYMBOL(shdma_request_irq);
+
+void shdma_free_irq(struct shdma_chan *schan)
+{
+ if (schan->irq >= 0)
+ free_irq(schan->irq, schan);
+}
+EXPORT_SYMBOL(shdma_free_irq);
+
+void shdma_chan_probe(struct shdma_dev *sdev,
+ struct shdma_chan *schan, int id)
+{
+ schan->pm_state = SHDMA_PM_ESTABLISHED;
+
+ /* reference struct dma_device */
+ schan->dma_chan.device = &sdev->dma_dev;
+ dma_cookie_init(&schan->dma_chan);
+
+ schan->dev = sdev->dma_dev.dev;
+ schan->id = id;
+
+ if (!schan->max_xfer_len)
+ schan->max_xfer_len = PAGE_SIZE;
+
+ spin_lock_init(&schan->chan_lock);
+
+ /* Init descripter manage list */
+ INIT_LIST_HEAD(&schan->ld_queue);
+ INIT_LIST_HEAD(&schan->ld_free);
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&schan->dma_chan.device_node,
+ &sdev->dma_dev.channels);
+ sdev->schan[sdev->dma_dev.chancnt++] = schan;
+}
+EXPORT_SYMBOL(shdma_chan_probe);
+
+void shdma_chan_remove(struct shdma_chan *schan)
+{
+ list_del(&schan->dma_chan.device_node);
+}
+EXPORT_SYMBOL(shdma_chan_remove);
+
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
+ int chan_num)
+{
+ struct dma_device *dma_dev = &sdev->dma_dev;
+
+ /*
+ * Require all call-backs for now, they can trivially be made optional
+ * later as required
+ */
+ if (!sdev->ops ||
+ !sdev->desc_size ||
+ !sdev->ops->embedded_desc ||
+ !sdev->ops->start_xfer ||
+ !sdev->ops->setup_xfer ||
+ !sdev->ops->set_slave ||
+ !sdev->ops->desc_setup ||
+ !sdev->ops->slave_addr ||
+ !sdev->ops->channel_busy ||
+ !sdev->ops->halt_channel ||
+ !sdev->ops->desc_completed)
+ return -EINVAL;
+
+ sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
+ if (!sdev->schan)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+
+ /* Common and MEMCPY operations */
+ dma_dev->device_alloc_chan_resources
+ = shdma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = shdma_free_chan_resources;
+ dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
+ dma_dev->device_tx_status = shdma_tx_status;
+ dma_dev->device_issue_pending = shdma_issue_pending;
+
+ /* Compulsory for DMA_SLAVE fields */
+ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
+ dma_dev->device_control = shdma_control;
+
+ dma_dev->dev = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL(shdma_init);
+
+void shdma_cleanup(struct shdma_dev *sdev)
+{
+ kfree(sdev->schan);
+}
+EXPORT_SYMBOL(shdma_cleanup);
+
+static int __init shdma_enter(void)
+{
+ shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
+ sizeof(long), GFP_KERNEL);
+ if (!shdma_slave_used)
+ return -ENOMEM;
+ return 0;
+}
+module_init(shdma_enter);
+
+static void __exit shdma_exit(void)
+{
+ kfree(shdma_slave_used);
+}
+module_exit(shdma_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SH-DMA driver base library");
+MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
new file mode 100644
index 000000000000..027c9be97654
--- /dev/null
+++ b/drivers/dma/sh/shdma.c
@@ -0,0 +1,943 @@
+/*
+ * Renesas SuperH DMA Engine support
+ *
+ * base is drivers/dma/flsdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * - DMA of SuperH does not have Hardware DMA chain mode.
+ * - MAX DMA size is 16MB.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/spinlock.h>
+#include <linux/rculist.h>
+
+#include "../dmaengine.h"
+#include "shdma.h"
+
+#define SH_DMAE_DRV_NAME "sh-dma-engine"
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define SH_DMA_SLAVE_NUMBER 256
+#define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
+
+/*
+ * Used for write-side mutual exclusion for the global device list,
+ * read-side synchronization by way of RCU, and per-controller data.
+ */
+static DEFINE_SPINLOCK(sh_dmae_lock);
+static LIST_HEAD(sh_dmae_devices);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset);
+}
+
+static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
+{
+ __raw_writel(data, sh_dc->base + reg / sizeof(u32));
+}
+
+static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
+{
+ return __raw_readl(sh_dc->base + reg / sizeof(u32));
+}
+
+static u16 dmaor_read(struct sh_dmae_device *shdev)
+{
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ return __raw_readl(addr);
+ else
+ return __raw_readw(addr);
+}
+
+static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
+{
+ u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
+
+ if (shdev->pdata->dmaor_is_32bit)
+ __raw_writel(data, addr);
+ else
+ __raw_writew(data, addr);
+}
+
+static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+static u32 chcr_read(struct sh_dmae_chan *sh_dc)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
+}
+
+/*
+ * Reset DMA controller
+ *
+ * SH7780 has two DMAOR register
+ */
+static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev);
+ dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+}
+
+static int sh_dmae_rst(struct sh_dmae_device *shdev)
+{
+ unsigned short dmaor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sh_dmae_lock, flags);
+
+ dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
+ dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
+
+ dmaor = dmaor_read(shdev);
+
+ spin_unlock_irqrestore(&sh_dmae_lock, flags);
+
+ if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
+ dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
+ return -EIO;
+ }
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->shdma_dev.dma_dev.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
+ return 0;
+}
+
+static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
+{
+ u32 chcr = chcr_read(sh_chan);
+
+ if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
+ return true; /* working */
+
+ return false; /* waiting */
+}
+
+static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
+ ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
+
+ if (cnt >= pdata->ts_shift_num)
+ cnt = 0;
+
+ return pdata->ts_shift[cnt];
+}
+
+static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ int i;
+
+ for (i = 0; i < pdata->ts_shift_num; i++)
+ if (pdata->ts_shift[i] == l2size)
+ break;
+
+ if (i == pdata->ts_shift_num)
+ i = 0;
+
+ return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
+ ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
+}
+
+static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
+{
+ sh_dmae_writel(sh_chan, hw->sar, SAR);
+ sh_dmae_writel(sh_chan, hw->dar, DAR);
+ sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
+}
+
+static void dmae_start(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ if (shdev->pdata->needs_tend_set)
+ sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
+
+ chcr |= CHCR_DE | shdev->chcr_ie_bit;
+ chcr_write(sh_chan, chcr & ~CHCR_TE);
+}
+
+static void dmae_init(struct sh_dmae_chan *sh_chan)
+{
+ /*
+ * Default configuration for dual address memory-memory transfer.
+ * 0x400 represents auto-request.
+ */
+ u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
+ LOG2_DEFAULT_XFER_SIZE);
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
+ chcr_write(sh_chan, chcr);
+}
+
+static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
+{
+ /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
+ chcr_write(sh_chan, val);
+
+ return 0;
+}
+
+static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
+ u16 __iomem *addr = shdev->dmars;
+ unsigned int shift = chan_pdata->dmars_bit;
+
+ if (dmae_is_busy(sh_chan))
+ return -EBUSY;
+
+ if (pdata->no_dmars)
+ return 0;
+
+ /* in the case of a missing DMARS resource use first memory window */
+ if (!addr)
+ addr = (u16 __iomem *)shdev->chan_reg;
+ addr += chan_pdata->dmars / sizeof(u16);
+
+ __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
+ addr);
+
+ return 0;
+}
+
+static void sh_dmae_start_xfer(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
+ sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
+ sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
+ /* Get the ld start address from ld_queue */
+ dmae_set_reg(sh_chan, &sh_desc->hw);
+ dmae_start(sh_chan);
+}
+
+static bool sh_dmae_channel_busy(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ return dmae_is_busy(sh_chan);
+}
+
+static void sh_dmae_setup_xfer(struct shdma_chan *schan,
+ int slave_id)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg =
+ sh_chan->config;
+
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+}
+
+static const struct sh_dmae_slave_config *dmae_find_slave(
+ struct sh_dmae_chan *sh_chan, int slave_id)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ struct sh_dmae_pdata *pdata = shdev->pdata;
+ const struct sh_dmae_slave_config *cfg;
+ int i;
+
+ if (slave_id >= SH_DMA_SLAVE_NUMBER)
+ return NULL;
+
+ for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+ if (cfg->slave_id == slave_id)
+ return cfg;
+
+ return NULL;
+}
+
+static int sh_dmae_set_slave(struct shdma_chan *schan,
+ int slave_id, bool try)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
+ if (!cfg)
+ return -ENODEV;
+
+ if (!try)
+ sh_chan->config = cfg;
+
+ return 0;
+}
+
+static void dmae_halt(struct sh_dmae_chan *sh_chan)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
+ u32 chcr = chcr_read(sh_chan);
+
+ chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
+ chcr_write(sh_chan, chcr);
+}
+
+static int sh_dmae_desc_setup(struct shdma_chan *schan,
+ struct shdma_desc *sdesc,
+ dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+
+ if (*len > schan->max_xfer_len)
+ *len = schan->max_xfer_len;
+
+ sh_desc->hw.sar = src;
+ sh_desc->hw.dar = dst;
+ sh_desc->hw.tcr = *len;
+
+ return 0;
+}
+
+static void sh_dmae_halt(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+ dmae_halt(sh_chan);
+}
+
+static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
+ shdma_chan);
+
+ if (!(chcr_read(sh_chan) & CHCR_TE))
+ return false;
+
+ /* DMA stop */
+ dmae_halt(sh_chan);
+
+ return true;
+}
+
+/* Called from error IRQ or NMI */
+static bool sh_dmae_reset(struct sh_dmae_device *shdev)
+{
+ bool ret;
+
+ /* halt the dma controller */
+ sh_dmae_ctl_stop(shdev);
+
+ /* We cannot detect, which channel caused the error, have to reset all */
+ ret = shdma_reset(&shdev->shdma_dev);
+
+ sh_dmae_rst(shdev);
+
+ return ret;
+}
+
+static irqreturn_t sh_dmae_err(int irq, void *data)
+{
+ struct sh_dmae_device *shdev = data;
+
+ if (!(dmaor_read(shdev) & DMAOR_AE))
+ return IRQ_NONE;
+
+ sh_dmae_reset(shdev);
+ return IRQ_HANDLED;
+}
+
+static bool sh_dmae_desc_completed(struct shdma_chan *schan,
+ struct shdma_desc *sdesc)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+ struct sh_dmae_desc *sh_desc = container_of(sdesc,
+ struct sh_dmae_desc, shdma_desc);
+ u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
+ u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
+
+ return (sdesc->direction == DMA_DEV_TO_MEM &&
+ (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
+ (sdesc->direction != DMA_DEV_TO_MEM &&
+ (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
+}
+
+static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
+{
+ /* Fast path out if NMIF is not asserted for this controller */
+ if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
+ return false;
+
+ return sh_dmae_reset(shdev);
+}
+
+static int sh_dmae_nmi_handler(struct notifier_block *self,
+ unsigned long cmd, void *data)
+{
+ struct sh_dmae_device *shdev;
+ int ret = NOTIFY_DONE;
+ bool triggered;
+
+ /*
+ * Only concern ourselves with NMI events.
+ *
+ * Normally we would check the die chain value, but as this needs
+ * to be architecture independent, check for NMI context instead.
+ */
+ if (!in_nmi())
+ return NOTIFY_DONE;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
+ /*
+ * Only stop if one of the controllers has NMIF asserted,
+ * we do not want to interfere with regular address error
+ * handling or NMI events that don't concern the DMACs.
+ */
+ triggered = sh_dmae_nmi_notify(shdev);
+ if (triggered == true)
+ ret = NOTIFY_OK;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
+ .notifier_call = sh_dmae_nmi_handler,
+
+ /* Run before NMI debug handler and KGDB */
+ .priority = 1,
+};
+
+static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
+ int irq, unsigned long flags)
+{
+ const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
+ struct shdma_dev *sdev = &shdev->shdma_dev;
+ struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
+ struct sh_dmae_chan *sh_chan;
+ struct shdma_chan *schan;
+ int err;
+
+ sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
+ if (!sh_chan) {
+ dev_err(sdev->dma_dev.dev,
+ "No free memory for allocating dma channels!\n");
+ return -ENOMEM;
+ }
+
+ schan = &sh_chan->shdma_chan;
+ schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
+
+ shdma_chan_probe(sdev, schan, id);
+
+ sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
+
+ /* set up channel irq */
+ if (pdev->id >= 0)
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dmae%d.%d", pdev->id, id);
+ else
+ snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
+ "sh-dma%d", id);
+
+ err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
+ if (err) {
+ dev_err(sdev->dma_dev.dev,
+ "DMA channel %d request_irq error %d\n",
+ id, err);
+ goto err_no_irq;
+ }
+
+ shdev->chan[id] = sh_chan;
+ return 0;
+
+err_no_irq:
+ /* remove from dmaengine device node */
+ shdma_chan_remove(schan);
+ kfree(sh_chan);
+ return err;
+}
+
+static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
+{
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+ struct shdma_chan *schan;
+ int i;
+
+ shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+ BUG_ON(!schan);
+
+ shdma_free_irq(&sh_chan->shdma_chan);
+
+ shdma_chan_remove(schan);
+ kfree(sh_chan);
+ }
+ dma_dev->chancnt = 0;
+}
+
+static void sh_dmae_shutdown(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ sh_dmae_ctl_stop(shdev);
+}
+
+static int sh_dmae_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sh_dmae_runtime_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+
+ return sh_dmae_rst(shdev);
+}
+
+#ifdef CONFIG_PM
+static int sh_dmae_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int sh_dmae_resume(struct device *dev)
+{
+ struct sh_dmae_device *shdev = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
+
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+
+ if (!sh_chan->shdma_chan.desc_num)
+ continue;
+
+ if (sh_chan->shdma_chan.slave_id >= 0) {
+ const struct sh_dmae_slave_config *cfg = sh_chan->config;
+ dmae_set_dmars(sh_chan, cfg->mid_rid);
+ dmae_set_chcr(sh_chan, cfg->chcr);
+ } else {
+ dmae_init(sh_chan);
+ }
+ }
+
+ return 0;
+}
+#else
+#define sh_dmae_suspend NULL
+#define sh_dmae_resume NULL
+#endif
+
+const struct dev_pm_ops sh_dmae_pm = {
+ .suspend = sh_dmae_suspend,
+ .resume = sh_dmae_resume,
+ .runtime_suspend = sh_dmae_runtime_suspend,
+ .runtime_resume = sh_dmae_runtime_resume,
+};
+
+static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
+{
+ struct sh_dmae_chan *sh_chan = container_of(schan,
+ struct sh_dmae_chan, shdma_chan);
+
+ /*
+ * Implicit BUG_ON(!sh_chan->config)
+ * This is an exclusive slave DMA operation, may only be called after a
+ * successful slave configuration.
+ */
+ return sh_chan->config->addr;
+}
+
+static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
+{
+ return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
+}
+
+static const struct shdma_ops sh_dmae_shdma_ops = {
+ .desc_completed = sh_dmae_desc_completed,
+ .halt_channel = sh_dmae_halt,
+ .channel_busy = sh_dmae_channel_busy,
+ .slave_addr = sh_dmae_slave_addr,
+ .desc_setup = sh_dmae_desc_setup,
+ .set_slave = sh_dmae_set_slave,
+ .setup_xfer = sh_dmae_setup_xfer,
+ .start_xfer = sh_dmae_start_xfer,
+ .embedded_desc = sh_dmae_embedded_desc,
+ .chan_irq = sh_dmae_chan_irq,
+};
+
+static int __devinit sh_dmae_probe(struct platform_device *pdev)
+{
+ struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
+ unsigned long irqflags = IRQF_DISABLED,
+ chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+ int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+ int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
+ struct sh_dmae_device *shdev;
+ struct dma_device *dma_dev;
+ struct resource *chan, *dmars, *errirq_res, *chanirq_res;
+
+ /* get platform data */
+ if (!pdata || !pdata->channel_num)
+ return -ENODEV;
+
+ chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ /* DMARS area is optional */
+ dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ /*
+ * IRQ resources:
+ * 1. there always must be at least one IRQ IO-resource. On SH4 it is
+ * the error IRQ, in which case it is the only IRQ in this resource:
+ * start == end. If it is the only IRQ resource, all channels also
+ * use the same IRQ.
+ * 2. DMA channel IRQ resources can be specified one per resource or in
+ * ranges (start != end)
+ * 3. iff all events (channels and, optionally, error) on this
+ * controller use the same IRQ, only one IRQ resource can be
+ * specified, otherwise there must be one IRQ per channel, even if
+ * some of them are equal
+ * 4. if all IRQs on this controller are equal or if some specific IRQs
+ * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
+ * requested with the IRQF_SHARED flag
+ */
+ errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!chan || !errirq_res)
+ return -ENODEV;
+
+ if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC register region already claimed\n");
+ return -EBUSY;
+ }
+
+ if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
+ dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
+ err = -EBUSY;
+ goto ermrdmars;
+ }
+
+ err = -ENOMEM;
+ shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
+ if (!shdev) {
+ dev_err(&pdev->dev, "Not enough memory\n");
+ goto ealloc;
+ }
+
+ dma_dev = &shdev->shdma_dev.dma_dev;
+
+ shdev->chan_reg = ioremap(chan->start, resource_size(chan));
+ if (!shdev->chan_reg)
+ goto emapchan;
+ if (dmars) {
+ shdev->dmars = ioremap(dmars->start, resource_size(dmars));
+ if (!shdev->dmars)
+ goto emapdmars;
+ }
+
+ if (!pdata->slave_only)
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ if (pdata->slave && pdata->slave_num)
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+ /* Default transfer size of 32 bytes requires 32-byte alignment */
+ dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
+
+ shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
+ shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
+ err = shdma_init(&pdev->dev, &shdev->shdma_dev,
+ pdata->channel_num);
+ if (err < 0)
+ goto eshdma;
+
+ /* platform data */
+ shdev->pdata = pdev->dev.platform_data;
+
+ if (pdata->chcr_offset)
+ shdev->chcr_offset = pdata->chcr_offset;
+ else
+ shdev->chcr_offset = CHCR;
+
+ if (pdata->chcr_ie_bit)
+ shdev->chcr_ie_bit = pdata->chcr_ie_bit;
+ else
+ shdev->chcr_ie_bit = CHCR_IE;
+
+ platform_set_drvdata(pdev, shdev);
+
+ pm_runtime_enable(&pdev->dev);
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ /* reset dma controller - only needed as a test */
+ err = sh_dmae_rst(shdev);
+ if (err)
+ goto rst_err;
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+ chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+
+ if (!chanirq_res)
+ chanirq_res = errirq_res;
+ else
+ irqres++;
+
+ if (chanirq_res == errirq_res ||
+ (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
+ irqflags = IRQF_SHARED;
+
+ errirq = errirq_res->start;
+
+ err = request_irq(errirq, sh_dmae_err, irqflags,
+ "DMAC Address Error", shdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "DMA failed requesting irq #%d, error %d\n",
+ errirq, err);
+ goto eirq_err;
+ }
+
+#else
+ chanirq_res = errirq_res;
+#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
+
+ if (chanirq_res->start == chanirq_res->end &&
+ !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
+ /* Special case - all multiplexed */
+ for (; irq_cnt < pdata->channel_num; irq_cnt++) {
+ if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
+ chan_irq[irq_cnt] = chanirq_res->start;
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ } else {
+ irq_cap = 1;
+ break;
+ }
+ }
+ } else {
+ do {
+ for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
+ irq_cap = 1;
+ break;
+ }
+
+ if ((errirq_res->flags & IORESOURCE_BITS) ==
+ IORESOURCE_IRQ_SHAREABLE)
+ chan_flag[irq_cnt] = IRQF_SHARED;
+ else
+ chan_flag[irq_cnt] = IRQF_DISABLED;
+ dev_dbg(&pdev->dev,
+ "Found IRQ %d for channel %d\n",
+ i, irq_cnt);
+ chan_irq[irq_cnt++] = i;
+ }
+
+ if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
+ break;
+
+ chanirq_res = platform_get_resource(pdev,
+ IORESOURCE_IRQ, ++irqres);
+ } while (irq_cnt < pdata->channel_num && chanirq_res);
+ }
+
+ /* Create DMA Channel */
+ for (i = 0; i < irq_cnt; i++) {
+ err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
+ if (err)
+ goto chan_probe_err;
+ }
+
+ if (irq_cap)
+ dev_notice(&pdev->dev, "Attempting to register %d DMA "
+ "channels when a maximum of %d are supported.\n",
+ pdata->channel_num, SH_DMAE_MAX_CHANNELS);
+
+ pm_runtime_put(&pdev->dev);
+
+ err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
+ if (err < 0)
+ goto edmadevreg;
+
+ return err;
+
+edmadevreg:
+ pm_runtime_get(&pdev->dev);
+
+chan_probe_err:
+ sh_dmae_chan_remove(shdev);
+
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
+ free_irq(errirq, shdev);
+eirq_err:
+#endif
+rst_err:
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ platform_set_drvdata(pdev, NULL);
+ shdma_cleanup(&shdev->shdma_dev);
+eshdma:
+ if (dmars)
+ iounmap(shdev->dmars);
+emapdmars:
+ iounmap(shdev->chan_reg);
+ synchronize_rcu();
+emapchan:
+ kfree(shdev);
+ealloc:
+ if (dmars)
+ release_mem_region(dmars->start, resource_size(dmars));
+ermrdmars:
+ release_mem_region(chan->start, resource_size(chan));
+
+ return err;
+}
+
+static int __devexit sh_dmae_remove(struct platform_device *pdev)
+{
+ struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
+ struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
+ struct resource *res;
+ int errirq = platform_get_irq(pdev, 0);
+
+ dma_async_device_unregister(dma_dev);
+
+ if (errirq > 0)
+ free_irq(errirq, shdev);
+
+ spin_lock_irq(&sh_dmae_lock);
+ list_del_rcu(&shdev->node);
+ spin_unlock_irq(&sh_dmae_lock);
+
+ pm_runtime_disable(&pdev->dev);
+
+ sh_dmae_chan_remove(shdev);
+ shdma_cleanup(&shdev->shdma_dev);
+
+ if (shdev->dmars)
+ iounmap(shdev->dmars);
+ iounmap(shdev->chan_reg);
+
+ platform_set_drvdata(pdev, NULL);
+
+ synchronize_rcu();
+ kfree(shdev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ release_mem_region(res->start, resource_size(res));
+
+ return 0;
+}
+
+static struct platform_driver sh_dmae_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .pm = &sh_dmae_pm,
+ .name = SH_DMAE_DRV_NAME,
+ },
+ .remove = __devexit_p(sh_dmae_remove),
+ .shutdown = sh_dmae_shutdown,
+};
+
+static int __init sh_dmae_init(void)
+{
+ /* Wire up NMI handling */
+ int err = register_die_notifier(&sh_dmae_nmi_notifier);
+ if (err)
+ return err;
+
+ return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
+}
+module_init(sh_dmae_init);
+
+static void __exit sh_dmae_exit(void)
+{
+ platform_driver_unregister(&sh_dmae_driver);
+
+ unregister_die_notifier(&sh_dmae_nmi_notifier);
+}
+module_exit(sh_dmae_exit);
+
+MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
+MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
diff --git a/drivers/dma/shdma.h b/drivers/dma/sh/shdma.h
index 0b1d2c105f02..9314e93225db 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -13,42 +13,29 @@
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
+#include <linux/sh_dma.h>
+#include <linux/shdma-base.h>
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/list.h>
-#define SH_DMAC_MAX_CHANNELS 20
-#define SH_DMA_SLAVE_NUMBER 256
-#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
+#define SH_DMAE_MAX_CHANNELS 20
+#define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */
struct device;
-enum dmae_pm_state {
- DMAE_PM_ESTABLISHED,
- DMAE_PM_BUSY,
- DMAE_PM_PENDING,
-};
-
struct sh_dmae_chan {
- spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
- struct list_head ld_free; /* Link descriptors free */
- struct dma_chan common; /* DMA common channel */
- struct device *dev; /* Channel device */
- struct tasklet_struct tasklet; /* Tasklet */
- int descs_allocated; /* desc count */
+ struct shdma_chan shdma_chan;
+ const struct sh_dmae_slave_config *config; /* Slave DMA configuration */
int xmit_shift; /* log_2(bytes_per_xfer) */
- int irq;
- int id; /* Raw id of this channel */
u32 __iomem *base;
char dev_id[16]; /* unique name per DMAC of channel */
int pm_error;
- enum dmae_pm_state pm_state;
};
struct sh_dmae_device {
- struct dma_device common;
- struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
+ struct shdma_dev shdma_dev;
+ struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS];
struct sh_dmae_pdata *pdata;
struct list_head node;
u32 __iomem *chan_reg;
@@ -57,10 +44,21 @@ struct sh_dmae_device {
u32 chcr_ie_bit;
};
-#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
+struct sh_dmae_regs {
+ u32 sar; /* SAR / source address */
+ u32 dar; /* DAR / destination address */
+ u32 tcr; /* TCR / transfer count */
+};
+
+struct sh_dmae_desc {
+ struct sh_dmae_regs hw;
+ struct shdma_desc shdma_desc;
+};
+
+#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan)
#define to_sh_desc(lh) container_of(lh, struct sh_desc, node)
#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
-#define to_sh_dev(chan) container_of(chan->common.device,\
- struct sh_dmae_device, common)
+#define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\
+ struct sh_dmae_device, shdma_dev.dma_dev)
#endif /* __DMA_SHDMA_H */
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
deleted file mode 100644
index 19d7a8d3975d..000000000000
--- a/drivers/dma/shdma.c
+++ /dev/null
@@ -1,1524 +0,0 @@
-/*
- * Renesas SuperH DMA Engine support
- *
- * base is drivers/dma/flsdma.c
- *
- * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
- * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
- * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * - DMA of SuperH does not have Hardware DMA chain mode.
- * - MAX DMA size is 16MB.
- *
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/dmaengine.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/sh_dma.h>
-#include <linux/notifier.h>
-#include <linux/kdebug.h>
-#include <linux/spinlock.h>
-#include <linux/rculist.h>
-
-#include "dmaengine.h"
-#include "shdma.h"
-
-/* DMA descriptor control */
-enum sh_dmae_desc_status {
- DESC_IDLE,
- DESC_PREPARED,
- DESC_SUBMITTED,
- DESC_COMPLETED, /* completed, have to call callback */
- DESC_WAITING, /* callback called, waiting for ack / re-submit */
-};
-
-#define NR_DESCS_PER_CHANNEL 32
-/* Default MEMCPY transfer size = 2^2 = 4 bytes */
-#define LOG2_DEFAULT_XFER_SIZE 2
-
-/*
- * Used for write-side mutual exclusion for the global device list,
- * read-side synchronization by way of RCU, and per-controller data.
- */
-static DEFINE_SPINLOCK(sh_dmae_lock);
-static LIST_HEAD(sh_dmae_devices);
-
-/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
-static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
-
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
-static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
- __raw_writel(data, shdev->chan_reg +
- shdev->pdata->channel[sh_dc->id].chclr_offset);
-}
-
-static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
-{
- __raw_writel(data, sh_dc->base + reg / sizeof(u32));
-}
-
-static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
-{
- return __raw_readl(sh_dc->base + reg / sizeof(u32));
-}
-
-static u16 dmaor_read(struct sh_dmae_device *shdev)
-{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
-
- if (shdev->pdata->dmaor_is_32bit)
- return __raw_readl(addr);
- else
- return __raw_readw(addr);
-}
-
-static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
-{
- u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32);
-
- if (shdev->pdata->dmaor_is_32bit)
- __raw_writel(data, addr);
- else
- __raw_writew(data, addr);
-}
-
-static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
- __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32));
-}
-
-static u32 chcr_read(struct sh_dmae_chan *sh_dc)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
-
- return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32));
-}
-
-/*
- * Reset DMA controller
- *
- * SH7780 has two DMAOR register
- */
-static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
-{
- unsigned short dmaor;
- unsigned long flags;
-
- spin_lock_irqsave(&sh_dmae_lock, flags);
-
- dmaor = dmaor_read(shdev);
- dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
-
- spin_unlock_irqrestore(&sh_dmae_lock, flags);
-}
-
-static int sh_dmae_rst(struct sh_dmae_device *shdev)
-{
- unsigned short dmaor;
- unsigned long flags;
-
- spin_lock_irqsave(&sh_dmae_lock, flags);
-
- dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
-
- if (shdev->pdata->chclr_present) {
- int i;
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan)
- chclr_write(sh_chan, 0);
- }
- }
-
- dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
-
- dmaor = dmaor_read(shdev);
-
- spin_unlock_irqrestore(&sh_dmae_lock, flags);
-
- if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
- dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
- return -EIO;
- }
- if (shdev->pdata->dmaor_init & ~dmaor)
- dev_warn(shdev->common.dev,
- "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
- dmaor, shdev->pdata->dmaor_init);
- return 0;
-}
-
-static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
-{
- u32 chcr = chcr_read(sh_chan);
-
- if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
- return true; /* working */
-
- return false; /* waiting */
-}
-
-static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
- int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
- ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
-
- if (cnt >= pdata->ts_shift_num)
- cnt = 0;
-
- return pdata->ts_shift[cnt];
-}
-
-static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
- int i;
-
- for (i = 0; i < pdata->ts_shift_num; i++)
- if (pdata->ts_shift[i] == l2size)
- break;
-
- if (i == pdata->ts_shift_num)
- i = 0;
-
- return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
- ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
-}
-
-static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
-{
- sh_dmae_writel(sh_chan, hw->sar, SAR);
- sh_dmae_writel(sh_chan, hw->dar, DAR);
- sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
-}
-
-static void dmae_start(struct sh_dmae_chan *sh_chan)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- u32 chcr = chcr_read(sh_chan);
-
- if (shdev->pdata->needs_tend_set)
- sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
-
- chcr |= CHCR_DE | shdev->chcr_ie_bit;
- chcr_write(sh_chan, chcr & ~CHCR_TE);
-}
-
-static void dmae_halt(struct sh_dmae_chan *sh_chan)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- u32 chcr = chcr_read(sh_chan);
-
- chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
- chcr_write(sh_chan, chcr);
-}
-
-static void dmae_init(struct sh_dmae_chan *sh_chan)
-{
- /*
- * Default configuration for dual address memory-memory transfer.
- * 0x400 represents auto-request.
- */
- u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
- LOG2_DEFAULT_XFER_SIZE);
- sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
- chcr_write(sh_chan, chcr);
-}
-
-static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
-{
- /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
- if (dmae_is_busy(sh_chan))
- return -EBUSY;
-
- sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
- chcr_write(sh_chan, val);
-
- return 0;
-}
-
-static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
- const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
- u16 __iomem *addr = shdev->dmars;
- unsigned int shift = chan_pdata->dmars_bit;
-
- if (dmae_is_busy(sh_chan))
- return -EBUSY;
-
- if (pdata->no_dmars)
- return 0;
-
- /* in the case of a missing DMARS resource use first memory window */
- if (!addr)
- addr = (u16 __iomem *)shdev->chan_reg;
- addr += chan_pdata->dmars / sizeof(u16);
-
- __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
- addr);
-
- return 0;
-}
-
-static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
-{
- struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
- struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan);
- struct sh_dmae_slave *param = tx->chan->private;
- dma_async_tx_callback callback = tx->callback;
- dma_cookie_t cookie;
- bool power_up;
-
- spin_lock_irq(&sh_chan->desc_lock);
-
- if (list_empty(&sh_chan->ld_queue))
- power_up = true;
- else
- power_up = false;
-
- cookie = dma_cookie_assign(tx);
-
- /* Mark all chunks of this descriptor as submitted, move to the queue */
- list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
- /*
- * All chunks are on the global ld_free, so, we have to find
- * the end of the chain ourselves
- */
- if (chunk != desc && (chunk->mark == DESC_IDLE ||
- chunk->async_tx.cookie > 0 ||
- chunk->async_tx.cookie == -EBUSY ||
- &chunk->node == &sh_chan->ld_free))
- break;
- chunk->mark = DESC_SUBMITTED;
- /* Callback goes to the last chunk */
- chunk->async_tx.callback = NULL;
- chunk->cookie = cookie;
- list_move_tail(&chunk->node, &sh_chan->ld_queue);
- last = chunk;
- }
-
- last->async_tx.callback = callback;
- last->async_tx.callback_param = tx->callback_param;
-
- dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n",
- tx->cookie, &last->async_tx, sh_chan->id,
- desc->hw.sar, desc->hw.tcr, desc->hw.dar);
-
- if (power_up) {
- sh_chan->pm_state = DMAE_PM_BUSY;
-
- pm_runtime_get(sh_chan->dev);
-
- spin_unlock_irq(&sh_chan->desc_lock);
-
- pm_runtime_barrier(sh_chan->dev);
-
- spin_lock_irq(&sh_chan->desc_lock);
-
- /* Have we been reset, while waiting? */
- if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) {
- dev_dbg(sh_chan->dev, "Bring up channel %d\n",
- sh_chan->id);
- if (param) {
- const struct sh_dmae_slave_config *cfg =
- param->config;
-
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
- } else {
- dmae_init(sh_chan);
- }
-
- if (sh_chan->pm_state == DMAE_PM_PENDING)
- sh_chan_xfer_ld_queue(sh_chan);
- sh_chan->pm_state = DMAE_PM_ESTABLISHED;
- }
- } else {
- sh_chan->pm_state = DMAE_PM_PENDING;
- }
-
- spin_unlock_irq(&sh_chan->desc_lock);
-
- return cookie;
-}
-
-/* Called with desc_lock held */
-static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
-{
- struct sh_desc *desc;
-
- list_for_each_entry(desc, &sh_chan->ld_free, node)
- if (desc->mark != DESC_PREPARED) {
- BUG_ON(desc->mark != DESC_IDLE);
- list_del(&desc->node);
- return desc;
- }
-
- return NULL;
-}
-
-static const struct sh_dmae_slave_config *sh_dmae_find_slave(
- struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
-{
- struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
- struct sh_dmae_pdata *pdata = shdev->pdata;
- int i;
-
- if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
- return NULL;
-
- for (i = 0; i < pdata->slave_num; i++)
- if (pdata->slave[i].slave_id == param->slave_id)
- return pdata->slave + i;
-
- return NULL;
-}
-
-static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
-{
- struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- struct sh_desc *desc;
- struct sh_dmae_slave *param = chan->private;
- int ret;
-
- /*
- * This relies on the guarantee from dmaengine that alloc_chan_resources
- * never runs concurrently with itself or free_chan_resources.
- */
- if (param) {
- const struct sh_dmae_slave_config *cfg;
-
- cfg = sh_dmae_find_slave(sh_chan, param);
- if (!cfg) {
- ret = -EINVAL;
- goto efindslave;
- }
-
- if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) {
- ret = -EBUSY;
- goto etestused;
- }
-
- param->config = cfg;
- }
-
- while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) {
- desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL);
- if (!desc)
- break;
- dma_async_tx_descriptor_init(&desc->async_tx,
- &sh_chan->common);
- desc->async_tx.tx_submit = sh_dmae_tx_submit;
- desc->mark = DESC_IDLE;
-
- list_add(&desc->node, &sh_chan->ld_free);
- sh_chan->descs_allocated++;
- }
-
- if (!sh_chan->descs_allocated) {
- ret = -ENOMEM;
- goto edescalloc;
- }
-
- return sh_chan->descs_allocated;
-
-edescalloc:
- if (param)
- clear_bit(param->slave_id, sh_dmae_slave_used);
-etestused:
-efindslave:
- chan->private = NULL;
- return ret;
-}
-
-/*
- * sh_dma_free_chan_resources - Free all resources of the channel.
- */
-static void sh_dmae_free_chan_resources(struct dma_chan *chan)
-{
- struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- struct sh_desc *desc, *_desc;
- LIST_HEAD(list);
-
- /* Protect against ISR */
- spin_lock_irq(&sh_chan->desc_lock);
- dmae_halt(sh_chan);
- spin_unlock_irq(&sh_chan->desc_lock);
-
- /* Now no new interrupts will occur */
-
- /* Prepared and not submitted descriptors can still be on the queue */
- if (!list_empty(&sh_chan->ld_queue))
- sh_dmae_chan_ld_cleanup(sh_chan, true);
-
- if (chan->private) {
- /* The caller is holding dma_list_mutex */
- struct sh_dmae_slave *param = chan->private;
- clear_bit(param->slave_id, sh_dmae_slave_used);
- chan->private = NULL;
- }
-
- spin_lock_irq(&sh_chan->desc_lock);
-
- list_splice_init(&sh_chan->ld_free, &list);
- sh_chan->descs_allocated = 0;
-
- spin_unlock_irq(&sh_chan->desc_lock);
-
- list_for_each_entry_safe(desc, _desc, &list, node)
- kfree(desc);
-}
-
-/**
- * sh_dmae_add_desc - get, set up and return one transfer descriptor
- * @sh_chan: DMA channel
- * @flags: DMA transfer flags
- * @dest: destination DMA address, incremented when direction equals
- * DMA_DEV_TO_MEM
- * @src: source DMA address, incremented when direction equals
- * DMA_MEM_TO_DEV
- * @len: DMA transfer length
- * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
- * @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_MEM_TO_MEM for MEMCPY
- * Returns 0 or an error
- * Locks: called with desc_lock held
- */
-static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
- unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_transfer_direction direction)
-{
- struct sh_desc *new;
- size_t copy_size;
-
- if (!*len)
- return NULL;
-
- /* Allocate the link descriptor from the free list */
- new = sh_dmae_get_desc(sh_chan);
- if (!new) {
- dev_err(sh_chan->dev, "No free link descriptor available\n");
- return NULL;
- }
-
- copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1);
-
- new->hw.sar = *src;
- new->hw.dar = *dest;
- new->hw.tcr = copy_size;
-
- if (!*first) {
- /* First desc */
- new->async_tx.cookie = -EBUSY;
- *first = new;
- } else {
- /* Other desc - invisible to the user */
- new->async_tx.cookie = -EINVAL;
- }
-
- dev_dbg(sh_chan->dev,
- "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n",
- copy_size, *len, *src, *dest, &new->async_tx,
- new->async_tx.cookie, sh_chan->xmit_shift);
-
- new->mark = DESC_PREPARED;
- new->async_tx.flags = flags;
- new->direction = direction;
-
- *len -= copy_size;
- if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
- *src += copy_size;
- if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
- *dest += copy_size;
-
- return new;
-}
-
-/*
- * sh_dmae_prep_sg - prepare transfer descriptors from an SG list
- *
- * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
- * converted to scatter-gather to guarantee consistent locking and a correct
- * list manipulation. For slave DMA direction carries the usual meaning, and,
- * logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
- * and the SG list contains only one element and points at the source buffer.
- */
-static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
- struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct scatterlist *sg;
- struct sh_desc *first = NULL, *new = NULL /* compiler... */;
- LIST_HEAD(tx_list);
- int chunks = 0;
- unsigned long irq_flags;
- int i;
-
- if (!sg_len)
- return NULL;
-
- for_each_sg(sgl, sg, sg_len, i)
- chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) /
- (SH_DMA_TCR_MAX + 1);
-
- /* Have to lock the whole loop to protect against concurrent release */
- spin_lock_irqsave(&sh_chan->desc_lock, irq_flags);
-
- /*
- * Chaining:
- * first descriptor is what user is dealing with in all API calls, its
- * cookie is at first set to -EBUSY, at tx-submit to a positive
- * number
- * if more than one chunk is needed further chunks have cookie = -EINVAL
- * the last chunk, if not equal to the first, has cookie = -ENOSPC
- * all chunks are linked onto the tx_list head with their .node heads
- * only during this function, then they are immediately spliced
- * back onto the free list in form of a chain
- */
- for_each_sg(sgl, sg, sg_len, i) {
- dma_addr_t sg_addr = sg_dma_address(sg);
- size_t len = sg_dma_len(sg);
-
- if (!len)
- goto err_get_desc;
-
- do {
- dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
- i, sg, len, (unsigned long long)sg_addr);
-
- if (direction == DMA_DEV_TO_MEM)
- new = sh_dmae_add_desc(sh_chan, flags,
- &sg_addr, addr, &len, &first,
- direction);
- else
- new = sh_dmae_add_desc(sh_chan, flags,
- addr, &sg_addr, &len, &first,
- direction);
- if (!new)
- goto err_get_desc;
-
- new->chunks = chunks--;
- list_add_tail(&new->node, &tx_list);
- } while (len);
- }
-
- if (new != first)
- new->async_tx.cookie = -ENOSPC;
-
- /* Put them back on the free list, so, they don't get lost */
- list_splice_tail(&tx_list, &sh_chan->ld_free);
-
- spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
-
- return &first->async_tx;
-
-err_get_desc:
- list_for_each_entry(new, &tx_list, node)
- new->mark = DESC_IDLE;
- list_splice(&tx_list, &sh_chan->ld_free);
-
- spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags);
-
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
- size_t len, unsigned long flags)
-{
- struct sh_dmae_chan *sh_chan;
- struct scatterlist sg;
-
- if (!chan || !len)
- return NULL;
-
- sh_chan = to_sh_chan(chan);
-
- sg_init_table(&sg, 1);
- sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
- offset_in_page(dma_src));
- sg_dma_address(&sg) = dma_src;
- sg_dma_len(&sg) = len;
-
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
- flags);
-}
-
-static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_transfer_direction direction, unsigned long flags,
- void *context)
-{
- struct sh_dmae_slave *param;
- struct sh_dmae_chan *sh_chan;
- dma_addr_t slave_addr;
-
- if (!chan)
- return NULL;
-
- sh_chan = to_sh_chan(chan);
- param = chan->private;
-
- /* Someone calling slave DMA on a public channel? */
- if (!param || !sg_len) {
- dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n",
- __func__, param, sg_len, param ? param->slave_id : -1);
- return NULL;
- }
-
- slave_addr = param->config->addr;
-
- /*
- * if (param != NULL), this is a successfully requested slave channel,
- * therefore param->config != NULL too.
- */
- return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
- direction, flags);
-}
-
-static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
-{
- struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- unsigned long flags;
-
- /* Only supports DMA_TERMINATE_ALL */
- if (cmd != DMA_TERMINATE_ALL)
- return -ENXIO;
-
- if (!chan)
- return -EINVAL;
-
- spin_lock_irqsave(&sh_chan->desc_lock, flags);
- dmae_halt(sh_chan);
-
- if (!list_empty(&sh_chan->ld_queue)) {
- /* Record partial transfer */
- struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
- struct sh_desc, node);
- desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
- sh_chan->xmit_shift;
- }
- spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
-
- sh_dmae_chan_ld_cleanup(sh_chan, true);
-
- return 0;
-}
-
-static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
-{
- struct sh_desc *desc, *_desc;
- /* Is the "exposed" head of a chain acked? */
- bool head_acked = false;
- dma_cookie_t cookie = 0;
- dma_async_tx_callback callback = NULL;
- void *param = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&sh_chan->desc_lock, flags);
- list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) {
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
-
- BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
- BUG_ON(desc->mark != DESC_SUBMITTED &&
- desc->mark != DESC_COMPLETED &&
- desc->mark != DESC_WAITING);
-
- /*
- * queue is ordered, and we use this loop to (1) clean up all
- * completed descriptors, and to (2) update descriptor flags of
- * any chunks in a (partially) completed chain
- */
- if (!all && desc->mark == DESC_SUBMITTED &&
- desc->cookie != cookie)
- break;
-
- if (tx->cookie > 0)
- cookie = tx->cookie;
-
- if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
- if (sh_chan->common.completed_cookie != desc->cookie - 1)
- dev_dbg(sh_chan->dev,
- "Completing cookie %d, expected %d\n",
- desc->cookie,
- sh_chan->common.completed_cookie + 1);
- sh_chan->common.completed_cookie = desc->cookie;
- }
-
- /* Call callback on the last chunk */
- if (desc->mark == DESC_COMPLETED && tx->callback) {
- desc->mark = DESC_WAITING;
- callback = tx->callback;
- param = tx->callback_param;
- dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n",
- tx->cookie, tx, sh_chan->id);
- BUG_ON(desc->chunks != 1);
- break;
- }
-
- if (tx->cookie > 0 || tx->cookie == -EBUSY) {
- if (desc->mark == DESC_COMPLETED) {
- BUG_ON(tx->cookie < 0);
- desc->mark = DESC_WAITING;
- }
- head_acked = async_tx_test_ack(tx);
- } else {
- switch (desc->mark) {
- case DESC_COMPLETED:
- desc->mark = DESC_WAITING;
- /* Fall through */
- case DESC_WAITING:
- if (head_acked)
- async_tx_ack(&desc->async_tx);
- }
- }
-
- dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n",
- tx, tx->cookie);
-
- if (((desc->mark == DESC_COMPLETED ||
- desc->mark == DESC_WAITING) &&
- async_tx_test_ack(&desc->async_tx)) || all) {
- /* Remove from ld_queue list */
- desc->mark = DESC_IDLE;
-
- list_move(&desc->node, &sh_chan->ld_free);
-
- if (list_empty(&sh_chan->ld_queue)) {
- dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
- pm_runtime_put(sh_chan->dev);
- }
- }
- }
-
- if (all && !callback)
- /*
- * Terminating and the loop completed normally: forgive
- * uncompleted cookies
- */
- sh_chan->common.completed_cookie = sh_chan->common.cookie;
-
- spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
-
- if (callback)
- callback(param);
-
- return callback;
-}
-
-/*
- * sh_chan_ld_cleanup - Clean up link descriptors
- *
- * This function cleans up the ld_queue of DMA channel.
- */
-static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
-{
- while (__ld_cleanup(sh_chan, all))
- ;
-}
-
-/* Called under spin_lock_irq(&sh_chan->desc_lock) */
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
-{
- struct sh_desc *desc;
-
- /* DMA work check */
- if (dmae_is_busy(sh_chan))
- return;
-
- /* Find the first not transferred descriptor */
- list_for_each_entry(desc, &sh_chan->ld_queue, node)
- if (desc->mark == DESC_SUBMITTED) {
- dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
- desc->async_tx.cookie, sh_chan->id,
- desc->hw.tcr, desc->hw.sar, desc->hw.dar);
- /* Get the ld start address from ld_queue */
- dmae_set_reg(sh_chan, &desc->hw);
- dmae_start(sh_chan);
- break;
- }
-}
-
-static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan)
-{
- struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
-
- spin_lock_irq(&sh_chan->desc_lock);
- if (sh_chan->pm_state == DMAE_PM_ESTABLISHED)
- sh_chan_xfer_ld_queue(sh_chan);
- else
- sh_chan->pm_state = DMAE_PM_PENDING;
- spin_unlock_irq(&sh_chan->desc_lock);
-}
-
-static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
-{
- struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
- enum dma_status status;
- unsigned long flags;
-
- sh_dmae_chan_ld_cleanup(sh_chan, false);
-
- spin_lock_irqsave(&sh_chan->desc_lock, flags);
-
- status = dma_cookie_status(chan, cookie, txstate);
-
- /*
- * If we don't find cookie on the queue, it has been aborted and we have
- * to report error
- */
- if (status != DMA_SUCCESS) {
- struct sh_desc *desc;
- status = DMA_ERROR;
- list_for_each_entry(desc, &sh_chan->ld_queue, node)
- if (desc->cookie == cookie) {
- status = DMA_IN_PROGRESS;
- break;
- }
- }
-
- spin_unlock_irqrestore(&sh_chan->desc_lock, flags);
-
- return status;
-}
-
-static irqreturn_t sh_dmae_interrupt(int irq, void *data)
-{
- irqreturn_t ret = IRQ_NONE;
- struct sh_dmae_chan *sh_chan = data;
- u32 chcr;
-
- spin_lock(&sh_chan->desc_lock);
-
- chcr = chcr_read(sh_chan);
-
- if (chcr & CHCR_TE) {
- /* DMA stop */
- dmae_halt(sh_chan);
-
- ret = IRQ_HANDLED;
- tasklet_schedule(&sh_chan->tasklet);
- }
-
- spin_unlock(&sh_chan->desc_lock);
-
- return ret;
-}
-
-/* Called from error IRQ or NMI */
-static bool sh_dmae_reset(struct sh_dmae_device *shdev)
-{
- unsigned int handled = 0;
- int i;
-
- /* halt the dma controller */
- sh_dmae_ctl_stop(shdev);
-
- /* We cannot detect, which channel caused the error, have to reset all */
- for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- struct sh_desc *desc;
- LIST_HEAD(dl);
-
- if (!sh_chan)
- continue;
-
- spin_lock(&sh_chan->desc_lock);
-
- /* Stop the channel */
- dmae_halt(sh_chan);
-
- list_splice_init(&sh_chan->ld_queue, &dl);
-
- if (!list_empty(&dl)) {
- dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id);
- pm_runtime_put(sh_chan->dev);
- }
- sh_chan->pm_state = DMAE_PM_ESTABLISHED;
-
- spin_unlock(&sh_chan->desc_lock);
-
- /* Complete all */
- list_for_each_entry(desc, &dl, node) {
- struct dma_async_tx_descriptor *tx = &desc->async_tx;
- desc->mark = DESC_IDLE;
- if (tx->callback)
- tx->callback(tx->callback_param);
- }
-
- spin_lock(&sh_chan->desc_lock);
- list_splice(&dl, &sh_chan->ld_free);
- spin_unlock(&sh_chan->desc_lock);
-
- handled++;
- }
-
- sh_dmae_rst(shdev);
-
- return !!handled;
-}
-
-static irqreturn_t sh_dmae_err(int irq, void *data)
-{
- struct sh_dmae_device *shdev = data;
-
- if (!(dmaor_read(shdev) & DMAOR_AE))
- return IRQ_NONE;
-
- sh_dmae_reset(data);
- return IRQ_HANDLED;
-}
-
-static void dmae_do_tasklet(unsigned long data)
-{
- struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
- struct sh_desc *desc;
- u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
- u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
-
- spin_lock_irq(&sh_chan->desc_lock);
- list_for_each_entry(desc, &sh_chan->ld_queue, node) {
- if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_DEV_TO_MEM &&
- (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
- (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
- dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
- desc->async_tx.cookie, &desc->async_tx,
- desc->hw.dar);
- desc->mark = DESC_COMPLETED;
- break;
- }
- }
- /* Next desc */
- sh_chan_xfer_ld_queue(sh_chan);
- spin_unlock_irq(&sh_chan->desc_lock);
-
- sh_dmae_chan_ld_cleanup(sh_chan, false);
-}
-
-static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
-{
- /* Fast path out if NMIF is not asserted for this controller */
- if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
- return false;
-
- return sh_dmae_reset(shdev);
-}
-
-static int sh_dmae_nmi_handler(struct notifier_block *self,
- unsigned long cmd, void *data)
-{
- struct sh_dmae_device *shdev;
- int ret = NOTIFY_DONE;
- bool triggered;
-
- /*
- * Only concern ourselves with NMI events.
- *
- * Normally we would check the die chain value, but as this needs
- * to be architecture independent, check for NMI context instead.
- */
- if (!in_nmi())
- return NOTIFY_DONE;
-
- rcu_read_lock();
- list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
- /*
- * Only stop if one of the controllers has NMIF asserted,
- * we do not want to interfere with regular address error
- * handling or NMI events that don't concern the DMACs.
- */
- triggered = sh_dmae_nmi_notify(shdev);
- if (triggered == true)
- ret = NOTIFY_OK;
- }
- rcu_read_unlock();
-
- return ret;
-}
-
-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
- .notifier_call = sh_dmae_nmi_handler,
-
- /* Run before NMI debug handler and KGDB */
- .priority = 1,
-};
-
-static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
- int irq, unsigned long flags)
-{
- int err;
- const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
- struct platform_device *pdev = to_platform_device(shdev->common.dev);
- struct sh_dmae_chan *new_sh_chan;
-
- /* alloc channel */
- new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
- if (!new_sh_chan) {
- dev_err(shdev->common.dev,
- "No free memory for allocating dma channels!\n");
- return -ENOMEM;
- }
-
- new_sh_chan->pm_state = DMAE_PM_ESTABLISHED;
-
- /* reference struct dma_device */
- new_sh_chan->common.device = &shdev->common;
- dma_cookie_init(&new_sh_chan->common);
-
- new_sh_chan->dev = shdev->common.dev;
- new_sh_chan->id = id;
- new_sh_chan->irq = irq;
- new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
-
- /* Init DMA tasklet */
- tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
- (unsigned long)new_sh_chan);
-
- spin_lock_init(&new_sh_chan->desc_lock);
-
- /* Init descripter manage list */
- INIT_LIST_HEAD(&new_sh_chan->ld_queue);
- INIT_LIST_HEAD(&new_sh_chan->ld_free);
-
- /* Add the channel to DMA device channel list */
- list_add_tail(&new_sh_chan->common.device_node,
- &shdev->common.channels);
- shdev->common.chancnt++;
-
- if (pdev->id >= 0)
- snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
- "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
- else
- snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
- "sh-dma%d", new_sh_chan->id);
-
- /* set up channel irq */
- err = request_irq(irq, &sh_dmae_interrupt, flags,
- new_sh_chan->dev_id, new_sh_chan);
- if (err) {
- dev_err(shdev->common.dev, "DMA channel %d request_irq error "
- "with return %d\n", id, err);
- goto err_no_irq;
- }
-
- shdev->chan[id] = new_sh_chan;
- return 0;
-
-err_no_irq:
- /* remove from dmaengine device node */
- list_del(&new_sh_chan->common.device_node);
- kfree(new_sh_chan);
- return err;
-}
-
-static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
-{
- int i;
-
- for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
- if (shdev->chan[i]) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
-
- free_irq(sh_chan->irq, sh_chan);
-
- list_del(&sh_chan->common.device_node);
- kfree(sh_chan);
- shdev->chan[i] = NULL;
- }
- }
- shdev->common.chancnt = 0;
-}
-
-static int __init sh_dmae_probe(struct platform_device *pdev)
-{
- struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
- unsigned long irqflags = IRQF_DISABLED,
- chan_flag[SH_DMAC_MAX_CHANNELS] = {};
- int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
- int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
- struct sh_dmae_device *shdev;
- struct resource *chan, *dmars, *errirq_res, *chanirq_res;
-
- /* get platform data */
- if (!pdata || !pdata->channel_num)
- return -ENODEV;
-
- chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* DMARS area is optional */
- dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- /*
- * IRQ resources:
- * 1. there always must be at least one IRQ IO-resource. On SH4 it is
- * the error IRQ, in which case it is the only IRQ in this resource:
- * start == end. If it is the only IRQ resource, all channels also
- * use the same IRQ.
- * 2. DMA channel IRQ resources can be specified one per resource or in
- * ranges (start != end)
- * 3. iff all events (channels and, optionally, error) on this
- * controller use the same IRQ, only one IRQ resource can be
- * specified, otherwise there must be one IRQ per channel, even if
- * some of them are equal
- * 4. if all IRQs on this controller are equal or if some specific IRQs
- * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
- * requested with the IRQF_SHARED flag
- */
- errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!chan || !errirq_res)
- return -ENODEV;
-
- if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
- dev_err(&pdev->dev, "DMAC register region already claimed\n");
- return -EBUSY;
- }
-
- if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
- dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
- err = -EBUSY;
- goto ermrdmars;
- }
-
- err = -ENOMEM;
- shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
- if (!shdev) {
- dev_err(&pdev->dev, "Not enough memory\n");
- goto ealloc;
- }
-
- shdev->chan_reg = ioremap(chan->start, resource_size(chan));
- if (!shdev->chan_reg)
- goto emapchan;
- if (dmars) {
- shdev->dmars = ioremap(dmars->start, resource_size(dmars));
- if (!shdev->dmars)
- goto emapdmars;
- }
-
- /* platform data */
- shdev->pdata = pdata;
-
- if (pdata->chcr_offset)
- shdev->chcr_offset = pdata->chcr_offset;
- else
- shdev->chcr_offset = CHCR;
-
- if (pdata->chcr_ie_bit)
- shdev->chcr_ie_bit = pdata->chcr_ie_bit;
- else
- shdev->chcr_ie_bit = CHCR_IE;
-
- platform_set_drvdata(pdev, shdev);
-
- shdev->common.dev = &pdev->dev;
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-
- spin_lock_irq(&sh_dmae_lock);
- list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
- spin_unlock_irq(&sh_dmae_lock);
-
- /* reset dma controller - only needed as a test */
- err = sh_dmae_rst(shdev);
- if (err)
- goto rst_err;
-
- INIT_LIST_HEAD(&shdev->common.channels);
-
- if (!pdata->slave_only)
- dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
- if (pdata->slave && pdata->slave_num)
- dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
-
- shdev->common.device_alloc_chan_resources
- = sh_dmae_alloc_chan_resources;
- shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources;
- shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy;
- shdev->common.device_tx_status = sh_dmae_tx_status;
- shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
-
- /* Compulsory for DMA_SLAVE fields */
- shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
- shdev->common.device_control = sh_dmae_control;
-
- /* Default transfer size of 32 bytes requires 32-byte alignment */
- shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
-
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
-
- if (!chanirq_res)
- chanirq_res = errirq_res;
- else
- irqres++;
-
- if (chanirq_res == errirq_res ||
- (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
- irqflags = IRQF_SHARED;
-
- errirq = errirq_res->start;
-
- err = request_irq(errirq, sh_dmae_err, irqflags,
- "DMAC Address Error", shdev);
- if (err) {
- dev_err(&pdev->dev,
- "DMA failed requesting irq #%d, error %d\n",
- errirq, err);
- goto eirq_err;
- }
-
-#else
- chanirq_res = errirq_res;
-#endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */
-
- if (chanirq_res->start == chanirq_res->end &&
- !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
- /* Special case - all multiplexed */
- for (; irq_cnt < pdata->channel_num; irq_cnt++) {
- if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
- chan_irq[irq_cnt] = chanirq_res->start;
- chan_flag[irq_cnt] = IRQF_SHARED;
- } else {
- irq_cap = 1;
- break;
- }
- }
- } else {
- do {
- for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
- irq_cap = 1;
- break;
- }
-
- if ((errirq_res->flags & IORESOURCE_BITS) ==
- IORESOURCE_IRQ_SHAREABLE)
- chan_flag[irq_cnt] = IRQF_SHARED;
- else
- chan_flag[irq_cnt] = IRQF_DISABLED;
- dev_dbg(&pdev->dev,
- "Found IRQ %d for channel %d\n",
- i, irq_cnt);
- chan_irq[irq_cnt++] = i;
- }
-
- if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
- break;
-
- chanirq_res = platform_get_resource(pdev,
- IORESOURCE_IRQ, ++irqres);
- } while (irq_cnt < pdata->channel_num && chanirq_res);
- }
-
- /* Create DMA Channel */
- for (i = 0; i < irq_cnt; i++) {
- err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
- if (err)
- goto chan_probe_err;
- }
-
- if (irq_cap)
- dev_notice(&pdev->dev, "Attempting to register %d DMA "
- "channels when a maximum of %d are supported.\n",
- pdata->channel_num, SH_DMAC_MAX_CHANNELS);
-
- pm_runtime_put(&pdev->dev);
-
- dma_async_device_register(&shdev->common);
-
- return err;
-
-chan_probe_err:
- sh_dmae_chan_remove(shdev);
-
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
- free_irq(errirq, shdev);
-eirq_err:
-#endif
-rst_err:
- spin_lock_irq(&sh_dmae_lock);
- list_del_rcu(&shdev->node);
- spin_unlock_irq(&sh_dmae_lock);
-
- pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- if (dmars)
- iounmap(shdev->dmars);
-
- platform_set_drvdata(pdev, NULL);
-emapdmars:
- iounmap(shdev->chan_reg);
- synchronize_rcu();
-emapchan:
- kfree(shdev);
-ealloc:
- if (dmars)
- release_mem_region(dmars->start, resource_size(dmars));
-ermrdmars:
- release_mem_region(chan->start, resource_size(chan));
-
- return err;
-}
-
-static int __exit sh_dmae_remove(struct platform_device *pdev)
-{
- struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
- struct resource *res;
- int errirq = platform_get_irq(pdev, 0);
-
- dma_async_device_unregister(&shdev->common);
-
- if (errirq > 0)
- free_irq(errirq, shdev);
-
- spin_lock_irq(&sh_dmae_lock);
- list_del_rcu(&shdev->node);
- spin_unlock_irq(&sh_dmae_lock);
-
- /* channel data remove */
- sh_dmae_chan_remove(shdev);
-
- pm_runtime_disable(&pdev->dev);
-
- if (shdev->dmars)
- iounmap(shdev->dmars);
- iounmap(shdev->chan_reg);
-
- platform_set_drvdata(pdev, NULL);
-
- synchronize_rcu();
- kfree(shdev);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
- release_mem_region(res->start, resource_size(res));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res)
- release_mem_region(res->start, resource_size(res));
-
- return 0;
-}
-
-static void sh_dmae_shutdown(struct platform_device *pdev)
-{
- struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
- sh_dmae_ctl_stop(shdev);
-}
-
-static int sh_dmae_runtime_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int sh_dmae_runtime_resume(struct device *dev)
-{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-
- return sh_dmae_rst(shdev);
-}
-
-#ifdef CONFIG_PM
-static int sh_dmae_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int sh_dmae_resume(struct device *dev)
-{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i, ret;
-
- ret = sh_dmae_rst(shdev);
- if (ret < 0)
- dev_err(dev, "Failed to reset!\n");
-
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- struct sh_dmae_slave *param = sh_chan->common.private;
-
- if (!sh_chan->descs_allocated)
- continue;
-
- if (param) {
- const struct sh_dmae_slave_config *cfg = param->config;
- dmae_set_dmars(sh_chan, cfg->mid_rid);
- dmae_set_chcr(sh_chan, cfg->chcr);
- } else {
- dmae_init(sh_chan);
- }
- }
-
- return 0;
-}
-#else
-#define sh_dmae_suspend NULL
-#define sh_dmae_resume NULL
-#endif
-
-const struct dev_pm_ops sh_dmae_pm = {
- .suspend = sh_dmae_suspend,
- .resume = sh_dmae_resume,
- .runtime_suspend = sh_dmae_runtime_suspend,
- .runtime_resume = sh_dmae_runtime_resume,
-};
-
-static struct platform_driver sh_dmae_driver = {
- .remove = __exit_p(sh_dmae_remove),
- .shutdown = sh_dmae_shutdown,
- .driver = {
- .owner = THIS_MODULE,
- .name = "sh-dma-engine",
- .pm = &sh_dmae_pm,
- },
-};
-
-static int __init sh_dmae_init(void)
-{
- /* Wire up NMI handling */
- int err = register_die_notifier(&sh_dmae_nmi_notifier);
- if (err)
- return err;
-
- return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
-}
-module_init(sh_dmae_init);
-
-static void __exit sh_dmae_exit(void)
-{
- platform_driver_unregister(&sh_dmae_driver);
-
- unregister_die_notifier(&sh_dmae_nmi_notifier);
-}
-module_exit(sh_dmae_exit);
-
-MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
-MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:sh-dma-engine");
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
new file mode 100644
index 000000000000..d52dbc6c54ab
--- /dev/null
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -0,0 +1,1415 @@
+/*
+ * DMA driver for Nvidia's Tegra20 APB DMA controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <mach/clk.h>
+#include "dmaengine.h"
+
+#define TEGRA_APBDMA_GENERAL 0x0
+#define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
+
+#define TEGRA_APBDMA_CONTROL 0x010
+#define TEGRA_APBDMA_IRQ_MASK 0x01c
+#define TEGRA_APBDMA_IRQ_MASK_SET 0x020
+
+/* CSR register */
+#define TEGRA_APBDMA_CHAN_CSR 0x00
+#define TEGRA_APBDMA_CSR_ENB BIT(31)
+#define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
+#define TEGRA_APBDMA_CSR_HOLD BIT(29)
+#define TEGRA_APBDMA_CSR_DIR BIT(28)
+#define TEGRA_APBDMA_CSR_ONCE BIT(27)
+#define TEGRA_APBDMA_CSR_FLOW BIT(21)
+#define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
+#define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
+
+/* STATUS register */
+#define TEGRA_APBDMA_CHAN_STATUS 0x004
+#define TEGRA_APBDMA_STATUS_BUSY BIT(31)
+#define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
+#define TEGRA_APBDMA_STATUS_HALT BIT(29)
+#define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
+#define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
+#define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
+
+/* AHB memory address */
+#define TEGRA_APBDMA_CHAN_AHBPTR 0x010
+
+/* AHB sequence register */
+#define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
+#define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
+#define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
+#define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
+#define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
+#define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
+#define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
+#define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
+#define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
+#define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
+
+/* APB address */
+#define TEGRA_APBDMA_CHAN_APBPTR 0x018
+
+/* APB sequence register */
+#define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
+#define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
+#define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
+#define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
+
+/*
+ * If any burst is in flight and DMA paused then this is the time to complete
+ * on-flight burst and update DMA status register.
+ */
+#define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
+
+/* Channel base address offset from APBDMA base address */
+#define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
+
+/* DMA channel register space size */
+#define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20
+
+struct tegra_dma;
+
+/*
+ * tegra_dma_chip_data Tegra chip specific DMA data
+ * @nr_channels: Number of channels available in the controller.
+ * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
+ */
+struct tegra_dma_chip_data {
+ int nr_channels;
+ int max_dma_count;
+};
+
+/* DMA channel registers */
+struct tegra_dma_channel_regs {
+ unsigned long csr;
+ unsigned long ahb_ptr;
+ unsigned long apb_ptr;
+ unsigned long ahb_seq;
+ unsigned long apb_seq;
+};
+
+/*
+ * tegra_dma_sg_req: Dma request details to configure hardware. This
+ * contains the details for one transfer to configure DMA hw.
+ * The client's request for data transfer can be broken into multiple
+ * sub-transfer as per requester details and hw support.
+ * This sub transfer get added in the list of transfer and point to Tegra
+ * DMA descriptor which manages the transfer details.
+ */
+struct tegra_dma_sg_req {
+ struct tegra_dma_channel_regs ch_regs;
+ int req_len;
+ bool configured;
+ bool last_sg;
+ bool half_done;
+ struct list_head node;
+ struct tegra_dma_desc *dma_desc;
+};
+
+/*
+ * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
+ * This descriptor keep track of transfer status, callbacks and request
+ * counts etc.
+ */
+struct tegra_dma_desc {
+ struct dma_async_tx_descriptor txd;
+ int bytes_requested;
+ int bytes_transferred;
+ enum dma_status dma_status;
+ struct list_head node;
+ struct list_head tx_list;
+ struct list_head cb_node;
+ int cb_count;
+};
+
+struct tegra_dma_channel;
+
+typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
+ bool to_terminate);
+
+/* tegra_dma_channel: Channel specific information */
+struct tegra_dma_channel {
+ struct dma_chan dma_chan;
+ bool config_init;
+ int id;
+ int irq;
+ unsigned long chan_base_offset;
+ spinlock_t lock;
+ bool busy;
+ struct tegra_dma *tdma;
+ bool cyclic;
+
+ /* Different lists for managing the requests */
+ struct list_head free_sg_req;
+ struct list_head pending_sg_req;
+ struct list_head free_dma_desc;
+ struct list_head cb_desc;
+
+ /* ISR handler and tasklet for bottom half of isr handling */
+ dma_isr_handler isr_handler;
+ struct tasklet_struct tasklet;
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ /* Channel-slave specific configuration */
+ struct dma_slave_config dma_sconfig;
+};
+
+/* tegra_dma: Tegra DMA specific information */
+struct tegra_dma {
+ struct dma_device dma_dev;
+ struct device *dev;
+ struct clk *dma_clk;
+ spinlock_t global_lock;
+ void __iomem *base_addr;
+ struct tegra_dma_chip_data *chip_data;
+
+ /* Some register need to be cache before suspend */
+ u32 reg_gen;
+
+ /* Last member of the structure */
+ struct tegra_dma_channel channels[0];
+};
+
+static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
+{
+ writel(val, tdma->base_addr + reg);
+}
+
+static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
+{
+ return readl(tdma->base_addr + reg);
+}
+
+static inline void tdc_write(struct tegra_dma_channel *tdc,
+ u32 reg, u32 val)
+{
+ writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
+{
+ return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg);
+}
+
+static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
+{
+ return container_of(dc, struct tegra_dma_channel, dma_chan);
+}
+
+static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
+ struct dma_async_tx_descriptor *td)
+{
+ return container_of(td, struct tegra_dma_desc, txd);
+}
+
+static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
+{
+ return &tdc->dma_chan.dev->device;
+}
+
+static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
+static int tegra_dma_runtime_suspend(struct device *dev);
+static int tegra_dma_runtime_resume(struct device *dev);
+
+/* Get DMA desc from free list, if not there then allocate it. */
+static struct tegra_dma_desc *tegra_dma_desc_get(
+ struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_desc *dma_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+
+ /* Do not allocate if desc are waiting for ack */
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
+ if (async_tx_test_ack(&dma_desc->txd)) {
+ list_del(&dma_desc->node);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return dma_desc;
+ }
+ }
+
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ /* Allocate DMA desc */
+ dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC);
+ if (!dma_desc) {
+ dev_err(tdc2dev(tdc), "dma_desc alloc failed\n");
+ return NULL;
+ }
+
+ dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
+ dma_desc->txd.tx_submit = tegra_dma_tx_submit;
+ dma_desc->txd.flags = 0;
+ return dma_desc;
+}
+
+static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
+ struct tegra_dma_desc *dma_desc)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ if (!list_empty(&dma_desc->tx_list))
+ list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+}
+
+static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
+ struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_sg_req *sg_req = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ if (!list_empty(&tdc->free_sg_req)) {
+ sg_req = list_first_entry(&tdc->free_sg_req,
+ typeof(*sg_req), node);
+ list_del(&sg_req->node);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return sg_req;
+ }
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC);
+ if (!sg_req)
+ dev_err(tdc2dev(tdc), "sg_req alloc failed\n");
+ return sg_req;
+}
+
+static int tegra_dma_slave_config(struct dma_chan *dc,
+ struct dma_slave_config *sconfig)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ if (!list_empty(&tdc->pending_sg_req)) {
+ dev_err(tdc2dev(tdc), "Configuration not allowed\n");
+ return -EBUSY;
+ }
+
+ memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
+ tdc->config_init = true;
+ return 0;
+}
+
+static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
+ bool wait_for_burst_complete)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ spin_lock(&tdma->global_lock);
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
+ if (wait_for_burst_complete)
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+}
+
+static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma *tdma = tdc->tdma;
+
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+ spin_unlock(&tdma->global_lock);
+}
+
+static void tegra_dma_stop(struct tegra_dma_channel *tdc)
+{
+ u32 csr;
+ u32 status;
+
+ /* Disable interrupts */
+ csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
+ csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
+
+ /* Disable DMA */
+ csr &= ~TEGRA_APBDMA_CSR_ENB;
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
+
+ /* Clear interrupt status if it is there */
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
+ }
+ tdc->busy = false;
+}
+
+static void tegra_dma_start(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req)
+{
+ struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
+
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
+
+ /* Start DMA */
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
+ ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
+}
+
+static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *nsg_req)
+{
+ unsigned long status;
+
+ /*
+ * The DMA controller reloads the new configuration for next transfer
+ * after last burst of current transfer completes.
+ * If there is no IEC status then this makes sure that last burst
+ * has not be completed. There may be case that last burst is on
+ * flight and so it can complete but because DMA is paused, it
+ * will not generates interrupt as well as not reload the new
+ * configuration.
+ * If there is already IEC status then interrupt handler need to
+ * load new configuration.
+ */
+ tegra_dma_global_pause(tdc, false);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+
+ /*
+ * If interrupt is pending then do nothing as the ISR will handle
+ * the programing for new request.
+ */
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
+ dev_err(tdc2dev(tdc),
+ "Skipping new configuration as interrupt is pending\n");
+ tegra_dma_global_resume(tdc);
+ return;
+ }
+
+ /* Safe to program new configuration */
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
+ nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
+ nsg_req->configured = true;
+
+ tegra_dma_global_resume(tdc);
+}
+
+static void tdc_start_head_req(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_sg_req *sg_req;
+
+ if (list_empty(&tdc->pending_sg_req))
+ return;
+
+ sg_req = list_first_entry(&tdc->pending_sg_req,
+ typeof(*sg_req), node);
+ tegra_dma_start(tdc, sg_req);
+ sg_req->configured = true;
+ tdc->busy = true;
+}
+
+static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_sg_req *hsgreq;
+ struct tegra_dma_sg_req *hnsgreq;
+
+ if (list_empty(&tdc->pending_sg_req))
+ return;
+
+ hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
+ if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
+ hnsgreq = list_first_entry(&hsgreq->node,
+ typeof(*hnsgreq), node);
+ tegra_dma_configure_for_next(tdc, hnsgreq);
+ }
+}
+
+static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *sg_req, unsigned long status)
+{
+ return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
+}
+
+static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
+{
+ struct tegra_dma_sg_req *sgreq;
+ struct tegra_dma_desc *dma_desc;
+
+ while (!list_empty(&tdc->pending_sg_req)) {
+ sgreq = list_first_entry(&tdc->pending_sg_req,
+ typeof(*sgreq), node);
+ list_del(&sgreq->node);
+ list_add_tail(&sgreq->node, &tdc->free_sg_req);
+ if (sgreq->last_sg) {
+ dma_desc = sgreq->dma_desc;
+ dma_desc->dma_status = DMA_ERROR;
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
+
+ /* Add in cb list if it is not there. */
+ if (!dma_desc->cb_count)
+ list_add_tail(&dma_desc->cb_node,
+ &tdc->cb_desc);
+ dma_desc->cb_count++;
+ }
+ }
+ tdc->isr_handler = NULL;
+}
+
+static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
+ struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
+{
+ struct tegra_dma_sg_req *hsgreq = NULL;
+
+ if (list_empty(&tdc->pending_sg_req)) {
+ dev_err(tdc2dev(tdc), "Dma is running without req\n");
+ tegra_dma_stop(tdc);
+ return false;
+ }
+
+ /*
+ * Check that head req on list should be in flight.
+ * If it is not in flight then abort transfer as
+ * looping of transfer can not continue.
+ */
+ hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
+ if (!hsgreq->configured) {
+ tegra_dma_stop(tdc);
+ dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n");
+ tegra_dma_abort_all(tdc);
+ return false;
+ }
+
+ /* Configure next request */
+ if (!to_terminate)
+ tdc_configure_next_head_desc(tdc);
+ return true;
+}
+
+static void handle_once_dma_done(struct tegra_dma_channel *tdc,
+ bool to_terminate)
+{
+ struct tegra_dma_sg_req *sgreq;
+ struct tegra_dma_desc *dma_desc;
+
+ tdc->busy = false;
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
+ dma_desc = sgreq->dma_desc;
+ dma_desc->bytes_transferred += sgreq->req_len;
+
+ list_del(&sgreq->node);
+ if (sgreq->last_sg) {
+ dma_desc->dma_status = DMA_SUCCESS;
+ dma_cookie_complete(&dma_desc->txd);
+ if (!dma_desc->cb_count)
+ list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
+ dma_desc->cb_count++;
+ list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
+ }
+ list_add_tail(&sgreq->node, &tdc->free_sg_req);
+
+ /* Do not start DMA if it is going to be terminate */
+ if (to_terminate || list_empty(&tdc->pending_sg_req))
+ return;
+
+ tdc_start_head_req(tdc);
+ return;
+}
+
+static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
+ bool to_terminate)
+{
+ struct tegra_dma_sg_req *sgreq;
+ struct tegra_dma_desc *dma_desc;
+ bool st;
+
+ sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
+ dma_desc = sgreq->dma_desc;
+ dma_desc->bytes_transferred += sgreq->req_len;
+
+ /* Callback need to be call */
+ if (!dma_desc->cb_count)
+ list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
+ dma_desc->cb_count++;
+
+ /* If not last req then put at end of pending list */
+ if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
+ list_del(&sgreq->node);
+ list_add_tail(&sgreq->node, &tdc->pending_sg_req);
+ sgreq->configured = false;
+ st = handle_continuous_head_request(tdc, sgreq, to_terminate);
+ if (!st)
+ dma_desc->dma_status = DMA_ERROR;
+ }
+ return;
+}
+
+static void tegra_dma_tasklet(unsigned long data)
+{
+ struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
+ struct tegra_dma_desc *dma_desc;
+ unsigned long flags;
+ int cb_count;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ while (!list_empty(&tdc->cb_desc)) {
+ dma_desc = list_first_entry(&tdc->cb_desc,
+ typeof(*dma_desc), cb_node);
+ list_del(&dma_desc->cb_node);
+ callback = dma_desc->txd.callback;
+ callback_param = dma_desc->txd.callback_param;
+ cb_count = dma_desc->cb_count;
+ dma_desc->cb_count = 0;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ while (cb_count-- && callback)
+ callback(callback_param);
+ spin_lock_irqsave(&tdc->lock, flags);
+ }
+ spin_unlock_irqrestore(&tdc->lock, flags);
+}
+
+static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
+{
+ struct tegra_dma_channel *tdc = dev_id;
+ unsigned long status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
+ tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
+ tdc->isr_handler(tdc, false);
+ tasklet_schedule(&tdc->tasklet);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ dev_info(tdc2dev(tdc),
+ "Interrupt already served status 0x%08lx\n", status);
+ return IRQ_NONE;
+}
+
+static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ dma_desc->dma_status = DMA_IN_PROGRESS;
+ cookie = dma_cookie_assign(&dma_desc->txd);
+ list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return cookie;
+}
+
+static void tegra_dma_issue_pending(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ if (list_empty(&tdc->pending_sg_req)) {
+ dev_err(tdc2dev(tdc), "No DMA request\n");
+ goto end;
+ }
+ if (!tdc->busy) {
+ tdc_start_head_req(tdc);
+
+ /* Continuous single mode: Configure next req */
+ if (tdc->cyclic) {
+ /*
+ * Wait for 1 burst time for configure DMA for
+ * next transfer.
+ */
+ udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
+ tdc_configure_next_head_desc(tdc);
+ }
+ }
+end:
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return;
+}
+
+static void tegra_dma_terminate_all(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_sg_req *sgreq;
+ struct tegra_dma_desc *dma_desc;
+ unsigned long flags;
+ unsigned long status;
+ bool was_busy;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ if (list_empty(&tdc->pending_sg_req)) {
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return;
+ }
+
+ if (!tdc->busy)
+ goto skip_dma_stop;
+
+ /* Pause DMA before checking the queue status */
+ tegra_dma_global_pause(tdc, true);
+
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
+ dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
+ tdc->isr_handler(tdc, true);
+ status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
+ }
+
+ was_busy = tdc->busy;
+ tegra_dma_stop(tdc);
+
+ if (!list_empty(&tdc->pending_sg_req) && was_busy) {
+ sgreq = list_first_entry(&tdc->pending_sg_req,
+ typeof(*sgreq), node);
+ sgreq->dma_desc->bytes_transferred +=
+ get_current_xferred_count(tdc, sgreq, status);
+ }
+ tegra_dma_global_resume(tdc);
+
+skip_dma_stop:
+ tegra_dma_abort_all(tdc);
+
+ while (!list_empty(&tdc->cb_desc)) {
+ dma_desc = list_first_entry(&tdc->cb_desc,
+ typeof(*dma_desc), cb_node);
+ list_del(&dma_desc->cb_node);
+ dma_desc->cb_count = 0;
+ }
+ spin_unlock_irqrestore(&tdc->lock, flags);
+}
+
+static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ enum dma_status ret;
+ unsigned long flags;
+ unsigned int residual;
+
+ spin_lock_irqsave(&tdc->lock, flags);
+
+ ret = dma_cookie_status(dc, cookie, txstate);
+ if (ret == DMA_SUCCESS) {
+ dma_set_residue(txstate, 0);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+ }
+
+ /* Check on wait_ack desc status */
+ list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
+ if (dma_desc->txd.cookie == cookie) {
+ residual = dma_desc->bytes_requested -
+ (dma_desc->bytes_transferred %
+ dma_desc->bytes_requested);
+ dma_set_residue(txstate, residual);
+ ret = dma_desc->dma_status;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+ }
+ }
+
+ /* Check in pending list */
+ list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
+ dma_desc = sg_req->dma_desc;
+ if (dma_desc->txd.cookie == cookie) {
+ residual = dma_desc->bytes_requested -
+ (dma_desc->bytes_transferred %
+ dma_desc->bytes_requested);
+ dma_set_residue(txstate, residual);
+ ret = dma_desc->dma_status;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+ }
+ }
+
+ dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie);
+ spin_unlock_irqrestore(&tdc->lock, flags);
+ return ret;
+}
+
+static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ return tegra_dma_slave_config(dc,
+ (struct dma_slave_config *)arg);
+
+ case DMA_TERMINATE_ALL:
+ tegra_dma_terminate_all(dc);
+ return 0;
+
+ default:
+ break;
+ }
+
+ return -ENXIO;
+}
+
+static inline int get_bus_width(struct tegra_dma_channel *tdc,
+ enum dma_slave_buswidth slave_bw)
+{
+ switch (slave_bw) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
+ case DMA_SLAVE_BUSWIDTH_8_BYTES:
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
+ default:
+ dev_warn(tdc2dev(tdc),
+ "slave bw is not supported, using 32bits\n");
+ return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
+ }
+}
+
+static inline int get_burst_size(struct tegra_dma_channel *tdc,
+ u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
+{
+ int burst_byte;
+ int burst_ahb_width;
+
+ /*
+ * burst_size from client is in terms of the bus_width.
+ * convert them into AHB memory width which is 4 byte.
+ */
+ burst_byte = burst_size * slave_bw;
+ burst_ahb_width = burst_byte / 4;
+
+ /* If burst size is 0 then calculate the burst size based on length */
+ if (!burst_ahb_width) {
+ if (len & 0xF)
+ return TEGRA_APBDMA_AHBSEQ_BURST_1;
+ else if ((len >> 4) & 0x1)
+ return TEGRA_APBDMA_AHBSEQ_BURST_4;
+ else
+ return TEGRA_APBDMA_AHBSEQ_BURST_8;
+ }
+ if (burst_ahb_width < 4)
+ return TEGRA_APBDMA_AHBSEQ_BURST_1;
+ else if (burst_ahb_width < 8)
+ return TEGRA_APBDMA_AHBSEQ_BURST_4;
+ else
+ return TEGRA_APBDMA_AHBSEQ_BURST_8;
+}
+
+static int get_transfer_param(struct tegra_dma_channel *tdc,
+ enum dma_transfer_direction direction, unsigned long *apb_addr,
+ unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
+ enum dma_slave_buswidth *slave_bw)
+{
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ *apb_addr = tdc->dma_sconfig.dst_addr;
+ *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
+ *burst_size = tdc->dma_sconfig.dst_maxburst;
+ *slave_bw = tdc->dma_sconfig.dst_addr_width;
+ *csr = TEGRA_APBDMA_CSR_DIR;
+ return 0;
+
+ case DMA_DEV_TO_MEM:
+ *apb_addr = tdc->dma_sconfig.src_addr;
+ *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
+ *burst_size = tdc->dma_sconfig.src_maxburst;
+ *slave_bw = tdc->dma_sconfig.src_addr_width;
+ *csr = 0;
+ return 0;
+
+ default:
+ dev_err(tdc2dev(tdc), "Dma direction is not supported\n");
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
+ struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc;
+ unsigned int i;
+ struct scatterlist *sg;
+ unsigned long csr, ahb_seq, apb_ptr, apb_seq;
+ struct list_head req_list;
+ struct tegra_dma_sg_req *sg_req = NULL;
+ u32 burst_size;
+ enum dma_slave_buswidth slave_bw;
+ int ret;
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "dma channel is not configured\n");
+ return NULL;
+ }
+ if (sg_len < 1) {
+ dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
+ return NULL;
+ }
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+ INIT_LIST_HEAD(&req_list);
+
+ ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
+ TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
+
+ csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW;
+ csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+ if (flags & DMA_PREP_INTERRUPT)
+ csr |= TEGRA_APBDMA_CSR_IE_EOC;
+
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
+
+ dma_desc = tegra_dma_desc_get(tdc);
+ if (!dma_desc) {
+ dev_err(tdc2dev(tdc), "Dma descriptors not available\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&dma_desc->tx_list);
+ INIT_LIST_HEAD(&dma_desc->cb_node);
+ dma_desc->cb_count = 0;
+ dma_desc->bytes_requested = 0;
+ dma_desc->bytes_transferred = 0;
+ dma_desc->dma_status = DMA_IN_PROGRESS;
+
+ /* Make transfer requests */
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 len, mem;
+
+ mem = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((len & 3) || (mem & 3) ||
+ (len > tdc->tdma->chip_data->max_dma_count)) {
+ dev_err(tdc2dev(tdc),
+ "Dma length/memory address is not supported\n");
+ tegra_dma_desc_put(tdc, dma_desc);
+ return NULL;
+ }
+
+ sg_req = tegra_dma_sg_req_get(tdc);
+ if (!sg_req) {
+ dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ tegra_dma_desc_put(tdc, dma_desc);
+ return NULL;
+ }
+
+ ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ dma_desc->bytes_requested += len;
+
+ sg_req->ch_regs.apb_ptr = apb_ptr;
+ sg_req->ch_regs.ahb_ptr = mem;
+ sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.apb_seq = apb_seq;
+ sg_req->ch_regs.ahb_seq = ahb_seq;
+ sg_req->configured = false;
+ sg_req->last_sg = false;
+ sg_req->dma_desc = dma_desc;
+ sg_req->req_len = len;
+
+ list_add_tail(&sg_req->node, &dma_desc->tx_list);
+ }
+ sg_req->last_sg = true;
+ if (flags & DMA_CTRL_ACK)
+ dma_desc->txd.flags = DMA_CTRL_ACK;
+
+ /*
+ * Make sure that mode should not be conflicting with currently
+ * configured mode.
+ */
+ if (!tdc->isr_handler) {
+ tdc->isr_handler = handle_once_dma_done;
+ tdc->cyclic = false;
+ } else {
+ if (tdc->cyclic) {
+ dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
+ tegra_dma_desc_put(tdc, dma_desc);
+ return NULL;
+ }
+ }
+
+ return &dma_desc->txd;
+}
+
+struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
+ struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ void *context)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+ struct tegra_dma_desc *dma_desc = NULL;
+ struct tegra_dma_sg_req *sg_req = NULL;
+ unsigned long csr, ahb_seq, apb_ptr, apb_seq;
+ int len;
+ size_t remain_len;
+ dma_addr_t mem = buf_addr;
+ u32 burst_size;
+ enum dma_slave_buswidth slave_bw;
+ int ret;
+
+ if (!buf_len || !period_len) {
+ dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
+ return NULL;
+ }
+
+ if (!tdc->config_init) {
+ dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
+ return NULL;
+ }
+
+ /*
+ * We allow to take more number of requests till DMA is
+ * not started. The driver will loop over all requests.
+ * Once DMA is started then new requests can be queued only after
+ * terminating the DMA.
+ */
+ if (tdc->busy) {
+ dev_err(tdc2dev(tdc), "Request not allowed when dma running\n");
+ return NULL;
+ }
+
+ /*
+ * We only support cycle transfer when buf_len is multiple of
+ * period_len.
+ */
+ if (buf_len % period_len) {
+ dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
+ return NULL;
+ }
+
+ len = period_len;
+ if ((len & 3) || (buf_addr & 3) ||
+ (len > tdc->tdma->chip_data->max_dma_count)) {
+ dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
+ return NULL;
+ }
+
+ ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
+ &burst_size, &slave_bw);
+ if (ret < 0)
+ return NULL;
+
+
+ ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
+ TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
+ ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
+
+ csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC;
+ csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
+
+ apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
+
+ dma_desc = tegra_dma_desc_get(tdc);
+ if (!dma_desc) {
+ dev_err(tdc2dev(tdc), "not enough descriptors available\n");
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&dma_desc->tx_list);
+ INIT_LIST_HEAD(&dma_desc->cb_node);
+ dma_desc->cb_count = 0;
+
+ dma_desc->bytes_transferred = 0;
+ dma_desc->bytes_requested = buf_len;
+ remain_len = buf_len;
+
+ /* Split transfer equal to period size */
+ while (remain_len) {
+ sg_req = tegra_dma_sg_req_get(tdc);
+ if (!sg_req) {
+ dev_err(tdc2dev(tdc), "Dma sg-req not available\n");
+ tegra_dma_desc_put(tdc, dma_desc);
+ return NULL;
+ }
+
+ ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
+ sg_req->ch_regs.apb_ptr = apb_ptr;
+ sg_req->ch_regs.ahb_ptr = mem;
+ sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC);
+ sg_req->ch_regs.apb_seq = apb_seq;
+ sg_req->ch_regs.ahb_seq = ahb_seq;
+ sg_req->configured = false;
+ sg_req->half_done = false;
+ sg_req->last_sg = false;
+ sg_req->dma_desc = dma_desc;
+ sg_req->req_len = len;
+
+ list_add_tail(&sg_req->node, &dma_desc->tx_list);
+ remain_len -= len;
+ mem += len;
+ }
+ sg_req->last_sg = true;
+ dma_desc->txd.flags = 0;
+
+ /*
+ * Make sure that mode should not be conflicting with currently
+ * configured mode.
+ */
+ if (!tdc->isr_handler) {
+ tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
+ tdc->cyclic = true;
+ } else {
+ if (!tdc->cyclic) {
+ dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
+ tegra_dma_desc_put(tdc, dma_desc);
+ return NULL;
+ }
+ }
+
+ return &dma_desc->txd;
+}
+
+static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ dma_cookie_init(&tdc->dma_chan);
+ tdc->config_init = false;
+ return 0;
+}
+
+static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+{
+ struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
+
+ struct tegra_dma_desc *dma_desc;
+ struct tegra_dma_sg_req *sg_req;
+ struct list_head dma_desc_list;
+ struct list_head sg_req_list;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&dma_desc_list);
+ INIT_LIST_HEAD(&sg_req_list);
+
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+
+ if (tdc->busy)
+ tegra_dma_terminate_all(dc);
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ list_splice_init(&tdc->pending_sg_req, &sg_req_list);
+ list_splice_init(&tdc->free_sg_req, &sg_req_list);
+ list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
+ INIT_LIST_HEAD(&tdc->cb_desc);
+ tdc->config_init = false;
+ spin_unlock_irqrestore(&tdc->lock, flags);
+
+ while (!list_empty(&dma_desc_list)) {
+ dma_desc = list_first_entry(&dma_desc_list,
+ typeof(*dma_desc), node);
+ list_del(&dma_desc->node);
+ kfree(dma_desc);
+ }
+
+ while (!list_empty(&sg_req_list)) {
+ sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
+ list_del(&sg_req->node);
+ kfree(sg_req);
+ }
+}
+
+/* Tegra20 specific DMA controller information */
+static struct tegra_dma_chip_data tegra20_dma_chip_data = {
+ .nr_channels = 16,
+ .max_dma_count = 1024UL * 64,
+};
+
+#if defined(CONFIG_OF)
+/* Tegra30 specific DMA controller information */
+static struct tegra_dma_chip_data tegra30_dma_chip_data = {
+ .nr_channels = 32,
+ .max_dma_count = 1024UL * 64,
+};
+
+static const struct of_device_id tegra_dma_of_match[] __devinitconst = {
+ {
+ .compatible = "nvidia,tegra30-apbdma",
+ .data = &tegra30_dma_chip_data,
+ }, {
+ .compatible = "nvidia,tegra20-apbdma",
+ .data = &tegra20_dma_chip_data,
+ }, {
+ },
+};
+MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
+#endif
+
+static int __devinit tegra_dma_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct tegra_dma *tdma;
+ int ret;
+ int i;
+ struct tegra_dma_chip_data *cdata = NULL;
+
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_device(of_match_ptr(tegra_dma_of_match),
+ &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+ } else {
+ /* If no device tree then fallback to tegra20 */
+ cdata = &tegra20_dma_chip_data;
+ }
+
+ tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels *
+ sizeof(struct tegra_dma_channel), GFP_KERNEL);
+ if (!tdma) {
+ dev_err(&pdev->dev, "Error: memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ tdma->dev = &pdev->dev;
+ tdma->chip_data = cdata;
+ platform_set_drvdata(pdev, tdma);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No mem resource for DMA\n");
+ return -EINVAL;
+ }
+
+ tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res);
+ if (!tdma->base_addr) {
+ dev_err(&pdev->dev,
+ "Cannot request memregion/iomap dma address\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tdma->dma_clk)) {
+ dev_err(&pdev->dev, "Error: Missing controller clock\n");
+ return PTR_ERR(tdma->dma_clk);
+ }
+
+ spin_lock_init(&tdma->global_lock);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_dma_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "dma_runtime_resume failed %d\n",
+ ret);
+ goto err_pm_disable;
+ }
+ }
+
+ /* Reset DMA controller */
+ tegra_periph_reset_assert(tdma->dma_clk);
+ udelay(2);
+ tegra_periph_reset_deassert(tdma->dma_clk);
+
+ /* Enable global DMA registers */
+ tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
+ tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
+ tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
+
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
+ for (i = 0; i < cdata->nr_channels; i++) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ char irq_name[30];
+
+ tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
+ i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
+ if (!res) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
+ goto err_irq;
+ }
+ tdc->irq = res->start;
+ snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i);
+ ret = devm_request_irq(&pdev->dev, tdc->irq,
+ tegra_dma_isr, 0, irq_name, tdc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "request_irq failed with err %d channel %d\n",
+ i, ret);
+ goto err_irq;
+ }
+
+ tdc->dma_chan.device = &tdma->dma_dev;
+ dma_cookie_init(&tdc->dma_chan);
+ list_add_tail(&tdc->dma_chan.device_node,
+ &tdma->dma_dev.channels);
+ tdc->tdma = tdma;
+ tdc->id = i;
+
+ tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
+ (unsigned long)tdc);
+ spin_lock_init(&tdc->lock);
+
+ INIT_LIST_HEAD(&tdc->pending_sg_req);
+ INIT_LIST_HEAD(&tdc->free_sg_req);
+ INIT_LIST_HEAD(&tdc->free_dma_desc);
+ INIT_LIST_HEAD(&tdc->cb_desc);
+ }
+
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+
+ tdma->dma_dev.dev = &pdev->dev;
+ tdma->dma_dev.device_alloc_chan_resources =
+ tegra_dma_alloc_chan_resources;
+ tdma->dma_dev.device_free_chan_resources =
+ tegra_dma_free_chan_resources;
+ tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
+ tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
+ tdma->dma_dev.device_control = tegra_dma_device_control;
+ tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
+ tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
+
+ ret = dma_async_device_register(&tdma->dma_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Tegra20 APB DMA driver registration failed %d\n", ret);
+ goto err_irq;
+ }
+
+ dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
+ cdata->nr_channels);
+ return 0;
+
+err_irq:
+ while (--i >= 0) {
+ struct tegra_dma_channel *tdc = &tdma->channels[i];
+ tasklet_kill(&tdc->tasklet);
+ }
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_dma_runtime_suspend(&pdev->dev);
+ return ret;
+}
+
+static int __devexit tegra_dma_remove(struct platform_device *pdev)
+{
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
+ int i;
+ struct tegra_dma_channel *tdc;
+
+ dma_async_device_unregister(&tdma->dma_dev);
+
+ for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
+ tdc = &tdma->channels[i];
+ tasklet_kill(&tdc->tasklet);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_dma_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+static int tegra_dma_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(tdma->dma_clk);
+ return 0;
+}
+
+static int tegra_dma_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct tegra_dma *tdma = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = clk_prepare_enable(tdma->dma_clk);
+ if (ret < 0) {
+ dev_err(dev, "clk_enable failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = {
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = tegra_dma_runtime_suspend,
+ .runtime_resume = tegra_dma_runtime_resume,
+#endif
+};
+
+static struct platform_driver tegra_dmac_driver = {
+ .driver = {
+ .name = "tegra-apbdma",
+ .owner = THIS_MODULE,
+ .pm = &tegra_dma_dev_pm_ops,
+ .of_match_table = of_match_ptr(tegra_dma_of_match),
+ },
+ .probe = tegra_dma_probe,
+ .remove = __devexit_p(tegra_dma_remove),
+};
+
+module_platform_driver(tegra_dmac_driver);
+
+MODULE_ALIAS("platform:tegra20-apbdma");
+MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index c4067d0141f7..542f0c04b695 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -136,7 +136,7 @@ config GPIO_MPC8XXX
config GPIO_MSM_V1
tristate "Qualcomm MSM GPIO v1"
- depends on GPIOLIB && ARCH_MSM
+ depends on GPIOLIB && ARCH_MSM && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
help
Say yes here to support the GPIO interface on ARM v6 based
Qualcomm MSM chips. Most of the pins on the MSM can be
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
index 9e9947cb86a3..1077754f8289 100644
--- a/drivers/gpio/devres.c
+++ b/drivers/gpio/devres.c
@@ -98,6 +98,7 @@ int devm_gpio_request_one(struct device *dev, unsigned gpio,
return 0;
}
+EXPORT_SYMBOL(devm_gpio_request_one);
/**
* devm_gpio_free - free an interrupt
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index c337143b18f8..04691d3abe60 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -33,8 +34,6 @@
#include <asm-generic/bug.h>
#include <asm/mach/irq.h>
-#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
-
enum mxc_gpio_hwtype {
IMX1_GPIO, /* runs on i.mx1 */
IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
@@ -61,7 +60,7 @@ struct mxc_gpio_port {
void __iomem *base;
int irq;
int irq_high;
- int virtual_irq_start;
+ struct irq_domain *domain;
struct bgpio_chip bgc;
u32 both_edges;
};
@@ -144,14 +143,15 @@ static LIST_HEAD(mxc_gpio_ports);
static int gpio_set_irq_type(struct irq_data *d, u32 type)
{
- u32 gpio = irq_to_gpio(d->irq);
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
u32 bit, val;
+ u32 gpio_idx = d->hwirq;
+ u32 gpio = port->bgc.gc.base + gpio_idx;
int edge;
void __iomem *reg = port->base;
- port->both_edges &= ~(1 << (gpio & 31));
+ port->both_edges &= ~(1 << gpio_idx);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
edge = GPIO_INT_RISE_EDGE;
@@ -168,7 +168,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
edge = GPIO_INT_HIGH_LEV;
pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
}
- port->both_edges |= 1 << (gpio & 31);
+ port->both_edges |= 1 << gpio_idx;
break;
case IRQ_TYPE_LEVEL_LOW:
edge = GPIO_INT_LOW_LEV;
@@ -180,11 +180,11 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
return -EINVAL;
}
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
+ reg += GPIO_ICR1 + ((gpio_idx & 0x10) >> 2); /* ICR1 or ICR2 */
+ bit = gpio_idx & 0xf;
val = readl(reg) & ~(0x3 << (bit << 1));
writel(val | (edge << (bit << 1)), reg);
- writel(1 << (gpio & 0x1f), port->base + GPIO_ISR);
+ writel(1 << gpio_idx, port->base + GPIO_ISR);
return 0;
}
@@ -217,15 +217,13 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
/* handle 32 interrupts in one status register */
static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
{
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
while (irq_stat != 0) {
int irqoffset = fls(irq_stat) - 1;
if (port->both_edges & (1 << irqoffset))
mxc_flip_edge(port, irqoffset);
- generic_handle_irq(gpio_irq_no_base + irqoffset);
+ generic_handle_irq(irq_find_mapping(port->domain, irqoffset));
irq_stat &= ~(1 << irqoffset);
}
@@ -276,10 +274,9 @@ static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
*/
static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1F;
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mxc_gpio_port *port = gc->private;
+ u32 gpio_idx = d->hwirq;
if (enable) {
if (port->irq_high && (gpio_idx >= 16))
@@ -296,12 +293,12 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
return 0;
}
-static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port)
+static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
- gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start,
+ gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
port->base, handle_level_irq);
gc->private = port;
@@ -352,7 +349,7 @@ static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
struct mxc_gpio_port *port =
container_of(bgc, struct mxc_gpio_port, bgc);
- return port->virtual_irq_start + offset;
+ return irq_find_mapping(port->domain, offset);
}
static int __devinit mxc_gpio_probe(struct platform_device *pdev)
@@ -360,6 +357,7 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mxc_gpio_port *port;
struct resource *iores;
+ int irq_base;
int err;
mxc_gpio_get_hw(pdev);
@@ -398,10 +396,12 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
writel(~0, port->base + GPIO_ISR);
if (mxc_gpio_hwtype == IMX21_GPIO) {
- /* setup one handler for all GPIO interrupts */
- if (pdev->id == 0)
- irq_set_chained_handler(port->irq,
- mx2_gpio_irq_handler);
+ /*
+ * Setup one handler for all GPIO interrupts. Actually setting
+ * the handler is needed only once, but doing it for every port
+ * is more robust and easier.
+ */
+ irq_set_chained_handler(port->irq, mx2_gpio_irq_handler);
} else {
/* setup one handler for each entry */
irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
@@ -430,20 +430,30 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev)
if (err)
goto out_bgpio_remove;
- /*
- * In dt case, we use gpio number range dynamically
- * allocated by gpio core.
- */
- port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base :
- pdev->id * 32);
+ irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
+ if (irq_base < 0) {
+ err = irq_base;
+ goto out_gpiochip_remove;
+ }
+
+ port->domain = irq_domain_add_legacy(np, 32, irq_base, 0,
+ &irq_domain_simple_ops, NULL);
+ if (!port->domain) {
+ err = -ENODEV;
+ goto out_irqdesc_free;
+ }
/* gpio-mxc can be a generic irq chip */
- mxc_gpio_init_gc(port);
+ mxc_gpio_init_gc(port, irq_base);
list_add_tail(&port->node, &mxc_gpio_ports);
return 0;
+out_irqdesc_free:
+ irq_free_descs(irq_base, 32);
+out_gpiochip_remove:
+ WARN_ON(gpiochip_remove(&port->bgc.gc) < 0);
out_bgpio_remove:
bgpio_remove(&port->bgc);
out_iounmap:
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index c4ed1722734c..4fbc208c32cf 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -174,12 +174,22 @@ static inline void _gpio_dbck_enable(struct gpio_bank *bank)
if (bank->dbck_enable_mask && !bank->dbck_enabled) {
clk_enable(bank->dbck);
bank->dbck_enabled = true;
+
+ __raw_writel(bank->dbck_enable_mask,
+ bank->base + bank->regs->debounce_en);
}
}
static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
if (bank->dbck_enable_mask && bank->dbck_enabled) {
+ /*
+ * Disable debounce before cutting it's clock. If debounce is
+ * enabled but the clock is not, GPIO module seems to be unable
+ * to detect events and generate interrupts at least on OMAP3.
+ */
+ __raw_writel(0, bank->base + bank->regs->debounce_en);
+
clk_disable(bank->dbck);
bank->dbck_enabled = false;
}
@@ -1081,7 +1091,6 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
bank->is_mpuio = pdata->is_mpuio;
bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
bank->loses_context = pdata->loses_context;
- bank->get_context_loss_count = pdata->get_context_loss_count;
bank->regs = pdata->regs;
#ifdef CONFIG_OF_GPIO
bank->chip.of_node = of_node_get(node);
@@ -1135,6 +1144,9 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
omap_gpio_chip_init(bank);
omap_gpio_show_rev(bank);
+ if (bank->loses_context)
+ bank->get_context_loss_count = pdata->get_context_loss_count;
+
pm_runtime_put(bank->dev);
list_add_tail(&bank->node, &omap_gpio_list);
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
index 38416be8ba11..6064fb376e11 100644
--- a/drivers/gpio/gpio-sta2x11.c
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -383,8 +383,9 @@ static int __devinit gsta_probe(struct platform_device *dev)
}
spin_lock_init(&chip->lock);
gsta_gpio_setup(chip);
- for (i = 0; i < GSTA_NR_GPIO; i++)
- gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
+ if (gpio_pdata)
+ for (i = 0; i < GSTA_NR_GPIO; i++)
+ gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
/* 384 was used in previous code: be compatible for other drivers */
err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index c1ad2884f2ed..11f29c82253c 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -149,6 +149,9 @@ static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
tps65910_gpio->gpio_chip.dev = &pdev->dev;
+#ifdef CONFIG_OF_GPIO
+ tps65910_gpio->gpio_chip.of_node = tps65910->dev->of_node;
+#endif
if (pdata && pdata->gpio_base)
tps65910_gpio->gpio_chip.base = pdata->gpio_base;
else
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 92ea5350dfe9..aa61ad2fcaaa 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -89,8 +89,11 @@ static int wm8994_gpio_direction_out(struct gpio_chip *chip,
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
struct wm8994 *wm8994 = wm8994_gpio->wm8994;
+ if (value)
+ value = WM8994_GPN_LVL;
+
return wm8994_set_bits(wm8994, WM8994_GPIO_1 + offset,
- WM8994_GPN_DIR, 0);
+ WM8994_GPN_DIR | WM8994_GPN_LVL, value);
}
static void wm8994_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5873e481e5d2..a8743c399e83 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid,
return true;
}
+static bool valid_inferred_mode(const struct drm_connector *connector,
+ const struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+ bool ok = false;
+
+ list_for_each_entry(m, &connector->probed_modes, head) {
+ if (mode->hdisplay == m->hdisplay &&
+ mode->vdisplay == m->vdisplay &&
+ drm_mode_vrefresh(mode) == drm_mode_vrefresh(m))
+ return false; /* duplicated */
+ if (mode->hdisplay <= m->hdisplay &&
+ mode->vdisplay <= m->vdisplay)
+ ok = true;
+ }
+ return ok;
+}
+
static int
drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct detailed_timing *timing)
@@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid,
struct drm_device *dev = connector->dev;
for (i = 0; i < drm_num_dmt_modes; i++) {
- if (mode_in_range(drm_dmt_modes + i, edid, timing)) {
+ if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
+ valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
@@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
fixup_mode_1366x768(newmode);
- if (!mode_in_range(newmode, edid, timing)) {
+ if (!mode_in_range(newmode, edid, timing) ||
+ !valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 9764045428ce..b7e7b49d8f62 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -78,21 +78,6 @@ static int cdv_backlight_combination_mode(struct drm_device *dev)
return REG_READ(BLC_PWM_CTL2) & PWM_LEGACY_MODE;
}
-static int cdv_get_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(bd);
- u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-
- if (cdv_backlight_combination_mode(dev)) {
- u8 lbpc;
-
- val &= ~1;
- pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
- val *= lbpc;
- }
- return val;
-}
-
static u32 cdv_get_max_backlight(struct drm_device *dev)
{
u32 max = REG_READ(BLC_PWM_CTL);
@@ -110,6 +95,22 @@ static u32 cdv_get_max_backlight(struct drm_device *dev)
return max;
}
+static int cdv_get_brightness(struct backlight_device *bd)
+{
+ struct drm_device *dev = bl_get_data(bd);
+ u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+
+ if (cdv_backlight_combination_mode(dev)) {
+ u8 lbpc;
+
+ val &= ~1;
+ pci_read_config_byte(dev->pdev, 0xF4, &lbpc);
+ val *= lbpc;
+ }
+ return (val * 100)/cdv_get_max_backlight(dev);
+
+}
+
static int cdv_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(bd);
@@ -120,6 +121,9 @@ static int cdv_set_brightness(struct backlight_device *bd)
if (level < 1)
level = 1;
+ level *= cdv_get_max_backlight(dev);
+ level /= 100;
+
if (cdv_backlight_combination_mode(dev)) {
u32 max = cdv_get_max_backlight(dev);
u8 lbpc;
@@ -157,7 +161,6 @@ static int cdv_backlight_init(struct drm_device *dev)
cdv_backlight_device->props.brightness =
cdv_get_brightness(cdv_backlight_device);
- cdv_backlight_device->props.max_brightness = cdv_get_max_backlight(dev);
backlight_update_status(cdv_backlight_device);
dev_priv->backlight_device = cdv_backlight_device;
return 0;
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index 4f186eca3a30..c430bd424681 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -144,6 +144,8 @@ struct opregion_asle {
#define ASLE_CBLV_VALID (1<<31)
+static struct psb_intel_opregion *system_opregion;
+
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -205,7 +207,7 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
struct drm_psb_private *dev_priv = dev->dev_private;
struct opregion_asle *asle = dev_priv->opregion.asle;
- if (asle) {
+ if (asle && system_opregion ) {
/* Don't do this on Medfield or other non PC like devices, they
use the bit for something different altogether */
psb_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
@@ -221,7 +223,6 @@ void psb_intel_opregion_enable_asle(struct drm_device *dev)
#define ACPI_EV_LID (1<<1)
#define ACPI_EV_DOCK (1<<2)
-static struct psb_intel_opregion *system_opregion;
static int psb_intel_opregion_video_event(struct notifier_block *nb,
unsigned long val, void *data)
@@ -266,9 +267,6 @@ void psb_intel_opregion_init(struct drm_device *dev)
system_opregion = opregion;
register_acpi_notifier(&psb_intel_opregion_notifier);
}
-
- if (opregion->asle)
- psb_intel_opregion_enable_asle(dev);
}
void psb_intel_opregion_fini(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/opregion.h b/drivers/gpu/drm/gma500/opregion.h
index 72dc6b921265..4a90f8b0e16c 100644
--- a/drivers/gpu/drm/gma500/opregion.h
+++ b/drivers/gpu/drm/gma500/opregion.h
@@ -27,6 +27,7 @@ extern void psb_intel_opregion_asle_intr(struct drm_device *dev);
extern void psb_intel_opregion_init(struct drm_device *dev);
extern void psb_intel_opregion_fini(struct drm_device *dev);
extern int psb_intel_opregion_setup(struct drm_device *dev);
+extern void psb_intel_opregion_enable_asle(struct drm_device *dev);
#else
@@ -46,4 +47,8 @@ extern inline int psb_intel_opregion_setup(struct drm_device *dev)
{
return 0;
}
+
+extern inline void psb_intel_opregion_enable_asle(struct drm_device *dev)
+{
+}
#endif
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index eff039bf92d4..5971bc82b765 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -144,6 +144,10 @@ static int psb_backlight_init(struct drm_device *dev)
psb_backlight_device->props.max_brightness = 100;
backlight_update_status(psb_backlight_device);
dev_priv->backlight_device = psb_backlight_device;
+
+ /* This must occur after the backlight is properly initialised */
+ psb_lid_timer_init(dev_priv);
+
return 0;
}
@@ -354,13 +358,6 @@ static int psb_chip_setup(struct drm_device *dev)
return 0;
}
-/* Not exactly an erratum more an irritation */
-static void psb_chip_errata(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- psb_lid_timer_init(dev_priv);
-}
-
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -379,7 +376,6 @@ const struct psb_ops psb_chip_ops = {
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
.chip_teardown = psb_chip_teardown,
- .errata = psb_chip_errata,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index caba6e08693c..a8858a907f47 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -374,6 +374,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
if (ret)
return ret;
+ psb_intel_opregion_enable_asle(dev);
#if 0
/*enable runtime pm at last*/
pm_runtime_enable(&dev->pdev->dev);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index f94792626b94..36822b924eb1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base,
}
}
+static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
+{
+ struct apertures_struct *ap;
+ struct pci_dev *pdev = dev_priv->dev->pdev;
+ bool primary;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = dev_priv->dev->agp->base;
+ ap->ranges[0].size =
+ dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+ primary =
+ pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+
+ remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
+
+ kfree(ap);
+}
+
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto free_priv;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto put_bridge;
+ }
+
+ i915_kick_out_firmware_fb(dev_priv);
+
pci_set_master(dev->pdev);
/* overlay on gen2 is broken and can't address above 1G */
@@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_rmmap;
- }
-
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_mapping =
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 59d44937dd9f..84b648a7ddd8 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
rdev->vm_manager.enabled = false;
/* mark first vm as always in use, it's the system one */
+ /* allocate enough for 2 full VM pts */
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- rdev->vm_manager.max_pfn * 8,
+ rdev->vm_manager.max_pfn * 8 * 2,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
@@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
- vm->last_pfn = 0;
+ /* SI requires equal sized PTs for all VMs, so always set
+ * last_pfn to max_pfn. cayman allows variable sized
+ * pts so we can grow then as needed. Once we switch
+ * to two level pts we can unify this again.
+ */
+ if (rdev->family >= CHIP_TAHITI)
+ vm->last_pfn = rdev->vm_manager.max_pfn;
+ else
+ vm->last_pfn = 0;
/* map the ib pool buffer at 0 in virtual address space, set
* read only
*/
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index f28bd4b7ef98..21ec9f5653ce 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
break;
}
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
- if (robj->rdev->asic->ioctl_wait_idle)
- robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
+ if (rdev->asic->ioctl_wait_idle)
+ robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
- r = radeon_gem_handle_lockup(robj->rdev, r);
+ r = radeon_gem_handle_lockup(rdev, r);
return r;
}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index c7b61f16ecfd..0b0279291a73 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev)
WREG32(0x15DC, 0);
/* empty context1-15 */
- /* FIXME start with 1G, once using 2 level pt switch to full
+ /* FIXME start with 4G, once using 2 level pt switch to full
* vm size space
*/
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
- WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE);
+ WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index bef04c192768..fbf49503508d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -53,6 +53,27 @@ config HIDRAW
If unsure, say Y.
+config UHID
+ tristate "User-space I/O driver support for HID subsystem"
+ depends on HID
+ default n
+ ---help---
+ Say Y here if you want to provide HID I/O Drivers from user-space.
+ This allows to write I/O drivers in user-space and feed the data from
+ the device into the kernel. The kernel parses the HID reports, loads the
+ corresponding HID Device Driver or provides input devices on top of your
+ user-space device.
+
+ This driver cannot be used to parse HID-reports in user-space and write
+ special HID-drivers. You should use hidraw for that.
+ Instead, this driver allows to write the transport-layer driver in
+ user-space like USB-HID and Bluetooth-HID do in kernel-space.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called uhid.
+
config HID_GENERIC
tristate "Generic HID driver"
depends on HID
@@ -193,10 +214,12 @@ config HID_EZKEY
Support for Ezkey BTC 8193 keyboard.
config HID_HOLTEK
- tristate "Holtek On Line Grip based game controller support"
+ tristate "Holtek HID devices"
depends on USB_HID
---help---
- Say Y here if you have a Holtek On Line Grip based game controller.
+ Support for Holtek based devices:
+ - Holtek On Line Grip based game controller
+ - Trust GXT 18 Gaming Keyboard
config HOLTEK_FF
bool "Holtek On Line Grip force feedback support"
@@ -261,6 +284,19 @@ config HID_LCPOWER
---help---
Support for LC-Power RC1000MCE RF remote control.
+config HID_LENOVO_TPKBD
+ tristate "Lenovo ThinkPad USB Keyboard with TrackPoint"
+ depends on USB_HID
+ select NEW_LEDS
+ select LEDS_CLASS
+ ---help---
+ Support for the Lenovo ThinkPad USB Keyboard with TrackPoint.
+
+ Say Y here if you have a Lenovo ThinkPad USB Keyboard with TrackPoint
+ and would like to use device-specific features like changing the
+ sensitivity of the trackpoint, using the microphone mute button or
+ controlling the mute and microphone mute LEDs.
+
config HID_LOGITECH
tristate "Logitech devices" if EXPERT
depends on USB_HID
@@ -386,6 +422,7 @@ config HID_MULTITOUCH
- Unitec Panels
- XAT optical touch panels
- Xiroku optical touch panels
+ - Zytronic touch panels
If unsure, say N.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index ca6cc9f0485c..f975485f88b2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -8,6 +8,7 @@ ifdef CONFIG_DEBUG_FS
endif
obj-$(CONFIG_HID) += hid.o
+obj-$(CONFIG_UHID) += uhid.o
obj-$(CONFIG_HID_GENERIC) += hid-generic.o
@@ -48,12 +49,14 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
+obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
+obj-$(CONFIG_HID_LENOVO_TPKBD) += hid-lenovo-tpkbd.o
obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
@@ -69,7 +72,8 @@ obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
- hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o
+ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \
+ hid-roccat-savu.o
obj-$(CONFIG_HID_SAITEK) += hid-saitek.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index fa10f847f7db..585344b6d338 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -517,6 +517,12 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
+ .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
index b99af346fdff..a2abb8e15727 100644
--- a/drivers/hid/hid-chicony.c
+++ b/drivers/hid/hid-chicony.c
@@ -60,6 +60,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static const struct hid_device_id ch_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ }
};
MODULE_DEVICE_TABLE(hid, ch_devices);
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6ac0286b5375..500844f04f93 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1194,8 +1194,10 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
goto out;
}
- for (a = 0; a < report->maxfield; a++)
- hid_input_field(hid, report->field[a], cdata, interrupt);
+ if (hid->claimed != HID_CLAIMED_HIDRAW) {
+ for (a = 0; a < report->maxfield; a++)
+ hid_input_field(hid, report->field[a], cdata, interrupt);
+ }
if (hid->claimed & HID_CLAIMED_INPUT)
hidinput_report_event(hid, report);
@@ -1243,6 +1245,10 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
goto unlock;
}
+ /* Avoid unnecessary overhead if debugfs is disabled */
+ if (list_empty(&hid->debug_list))
+ goto nomem;
+
buf = kmalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
@@ -1373,8 +1379,10 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
hdev->claimed |= HID_CLAIMED_HIDRAW;
- if (!hdev->claimed) {
- hid_err(hdev, "claimed by neither input, hiddev nor hidraw\n");
+ /* Drivers with the ->raw_event callback set are not required to connect
+ * to any other listener. */
+ if (!hdev->claimed && !hdev->driver->raw_event) {
+ hid_err(hdev, "device has no listeners, quitting\n");
return -ENODEV;
}
@@ -1503,6 +1511,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -1518,10 +1529,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
@@ -1536,6 +1549,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
@@ -1544,6 +1558,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
@@ -1617,6 +1632,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
@@ -1995,6 +2011,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MCT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2089,6 +2106,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ }
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 2f0be4c66af7..9e43aaca9774 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -129,6 +129,8 @@ static const struct hid_device_id cp_devices[] = {
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3),
.driver_data = CP_RDESC_SWAPPED_MIN_MAX },
+ { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4),
+ .driver_data = CP_RDESC_SWAPPED_MIN_MAX },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE),
.driver_data = CP_2WHEEL_MOUSE_HACK },
{ }
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
new file mode 100644
index 000000000000..e0a5d1739fc3
--- /dev/null
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -0,0 +1,183 @@
+/*
+ * HID driver for Holtek keyboard
+ * Copyright (c) 2012 Tom Harwood
+*/
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+/* Holtek based keyboards (USB ID 04d9:a055) have the following issues:
+ * - The report descriptor specifies an excessively large number of consumer
+ * usages (2^15), which is more than HID_MAX_USAGES. This prevents proper
+ * parsing of the report descriptor.
+ * - The report descriptor reports on caps/scroll/num lock key presses, but
+ * doesn't have an LED output usage block.
+ *
+ * The replacement descriptor below fixes the number of consumer usages,
+ * and provides an LED output usage block. LED output events are redirected
+ * to the boot interface.
+ */
+
+static __u8 holtek_kbd_rdesc_fixed[] = {
+ /* Original report descriptor, with reduced number of consumer usages */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x80, /* Usage (Sys Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x19, 0x81, /* Usage Minimum (Sys Power Down), */
+ 0x29, 0x83, /* Usage Maximum (Sys Wake Up), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x05, /* Report Size (5), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0C, /* Usage Page (Consumer), */
+ 0x09, 0x01, /* Usage (Consumer Control), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x19, 0x00, /* Usage Minimum (00h), */
+ 0x2A, 0xFF, 0x2F, /* Usage Maximum (0x2FFF), previously 0x7FFF */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x26, 0xFF, 0x2F, /* Logical Maximum (0x2FFF),previously 0x7FFF*/
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x81, 0x00, /* Input, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0xE0, /* Usage Minimum (KB Leftcontrol), */
+ 0x29, 0xE7, /* Usage Maximum (KB Right GUI), */
+ 0x19, 0x00, /* Usage Minimum (None), */
+ 0x29, 0x2F, /* Usage Maximum (KB Lboxbracket And Lbrace),*/
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x95, 0x38, /* Report Count (56), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x05, 0x07, /* Usage Page (Keyboard), */
+ 0x19, 0x30, /* Usage Minimum (KB Rboxbracket And Rbrace),*/
+ 0x29, 0x67, /* Usage Maximum (KP Equals), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection */
+
+ /* LED usage for the boot protocol interface */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x06, /* Usage (Keyboard), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x05, 0x08, /* Usage Page (LED), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x03, /* Usage Maximum (03h), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x91, 0x02, /* Output (Variable), */
+ 0x95, 0x05, /* Report Count (5), */
+ 0x91, 0x01, /* Output (Constant), */
+ 0xC0, /* End Collection */
+};
+
+static __u8 *holtek_kbd_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ rdesc = holtek_kbd_rdesc_fixed;
+ *rsize = sizeof(holtek_kbd_rdesc_fixed);
+ }
+ return rdesc;
+}
+
+static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code,
+ int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct usb_device *usb_dev = hid_to_usb_dev(hid);
+
+ /* Locate the boot interface, to receive the LED change events */
+ struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+
+ struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
+ struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ struct hid_input, list);
+
+ return boot_hid_input->input->event(boot_hid_input->input, type, code,
+ value);
+}
+
+static int holtek_kbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ int ret = hid_parse(hdev);
+
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (!ret && intf->cur_altsetting->desc.bInterfaceNumber == 1) {
+ struct hid_input *hidinput;
+ list_for_each_entry(hidinput, &hdev->inputs, list) {
+ hidinput->input->event = holtek_kbd_input_event;
+ }
+ }
+
+ return ret;
+}
+
+static const struct hid_device_id holtek_kbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT,
+ USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, holtek_kbd_devices);
+
+static struct hid_driver holtek_kbd_driver = {
+ .name = "holtek_kbd",
+ .id_table = holtek_kbd_devices,
+ .report_fixup = holtek_kbd_report_fixup,
+ .probe = holtek_kbd_probe
+};
+
+static int __init holtek_kbd_init(void)
+{
+ return hid_register_driver(&holtek_kbd_driver);
+}
+
+static void __exit holtek_kbd_exit(void)
+{
+ hid_unregister_driver(&holtek_kbd_driver);
+}
+
+module_exit(holtek_kbd_exit);
+module_init(holtek_kbd_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index d1cdd2d28409..41c34f21bd00 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -125,6 +125,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI 0x024c
#define USB_DEVICE_ID_APPLE_WELLSPRING6_ISO 0x024d
#define USB_DEVICE_ID_APPLE_WELLSPRING6_JIS 0x024e
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
@@ -205,6 +208,7 @@
#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123
+#define USB_DEVICE_ID_CHICONY_AK1D 0x1125
#define USB_VENDOR_ID_CHUNGHWAT 0x2247
#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
@@ -234,6 +238,7 @@
#define USB_DEVICE_ID_CYPRESS_BARCODE_1 0xde61
#define USB_DEVICE_ID_CYPRESS_BARCODE_2 0xde64
#define USB_DEVICE_ID_CYPRESS_BARCODE_3 0xbca1
+#define USB_DEVICE_ID_CYPRESS_BARCODE_4 0xed81
#define USB_DEVICE_ID_CYPRESS_TRUETOUCH 0xc001
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
@@ -407,6 +412,9 @@
#define USB_VENDOR_ID_HOLTEK 0x1241
#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
+#define USB_VENDOR_ID_HOLTEK_ALT 0x04d9
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD 0xa055
+
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -476,6 +484,9 @@
#define USB_DEVICE_ID_LD_HYBRID 0x2090
#define USB_DEVICE_ID_LD_HEATCONTROL 0x20A0
+#define USB_VENDOR_ID_LENOVO 0x17ef
+#define USB_DEVICE_ID_LENOVO_TPKBD 0x6009
+
#define USB_VENDOR_ID_LG 0x1fd2
#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
@@ -518,6 +529,9 @@
#define USB_DEVICE_ID_CRYSTALTOUCH 0x0006
#define USB_DEVICE_ID_CRYSTALTOUCH_DUAL 0x0007
+#define USB_VENDOR_ID_MADCATZ 0x0738
+#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
@@ -567,6 +581,9 @@
#define USB_VENDOR_ID_NINTENDO 0x057e
#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
+#define USB_VENDOR_ID_NOVATEK 0x0603
+#define USB_DEVICE_ID_NOVATEK_PCT 0x0600
+
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
@@ -644,6 +661,7 @@
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRED 0x2c24
#define USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS 0x2cf6
+#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
@@ -653,6 +671,9 @@
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
#define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600
+#define USB_VENDOR_ID_SENNHEISER 0x1395
+#define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c
+
#define USB_VENDOR_ID_SIGMA_MICRO 0x1c4f
#define USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD 0x0002
@@ -802,6 +823,9 @@
#define USB_VENDOR_ID_ZYDACRON 0x13EC
#define USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL 0x0006
+#define USB_VENDOR_ID_ZYTRONIC 0x14c8
+#define USB_DEVICE_ID_ZYTRONIC_ZXY100 0x0005
+
#define USB_VENDOR_ID_PRIMAX 0x0461
#define USB_DEVICE_ID_PRIMAX_KEYBOARD 0x4e05
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 132b0019365e..811bfad64609 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -301,6 +301,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
};
@@ -834,6 +837,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_HPVENDOR2:
+ set_bit(EV_REP, input->evbit);
+ switch (usage->hid & HID_USAGE) {
+ case 0x003: map_key_clear(KEY_BRIGHTNESSDOWN); break;
+ case 0x004: map_key_clear(KEY_BRIGHTNESSUP); break;
+ default: goto ignore;
+ }
+ break;
+
case HID_UP_MSVENDOR:
goto ignore;
diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
new file mode 100644
index 000000000000..77d2df04c97b
--- /dev/null
+++ b/drivers/hid/hid-lenovo-tpkbd.c
@@ -0,0 +1,564 @@
+/*
+ * HID driver for Lenovo ThinkPad USB Keyboard with TrackPoint
+ *
+ * Copyright (c) 2012 Bernhard Seibold
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/usb.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include "usbhid/usbhid.h"
+
+#include "hid-ids.h"
+
+/* This is only used for the trackpoint part of the driver, hence _tp */
+struct tpkbd_data_pointer {
+ int led_state;
+ struct led_classdev led_mute;
+ struct led_classdev led_micmute;
+ int press_to_select;
+ int dragging;
+ int release_to_select;
+ int select_right;
+ int sensitivity;
+ int press_speed;
+};
+
+#define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c))
+
+static int tpkbd_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi, struct hid_field *field,
+ struct hid_usage *usage, unsigned long **bit, int *max)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1 && usage->hid == (HID_UP_BUTTON | 0x0010)) {
+ map_key_clear(KEY_MICMUTE);
+ return 1;
+ }
+ return 0;
+}
+
+#undef map_key_clear
+
+static int tpkbd_features_set(struct hid_device *hdev)
+{
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+ report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4];
+
+ report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02;
+ report->field[0]->value[0] |= data_pointer->dragging ? 0x04 : 0x08;
+ report->field[0]->value[0] |= data_pointer->release_to_select ? 0x10 : 0x20;
+ report->field[0]->value[0] |= data_pointer->select_right ? 0x80 : 0x40;
+ report->field[1]->value[0] = 0x03; // unknown setting, imitate windows driver
+ report->field[2]->value[0] = data_pointer->sensitivity;
+ report->field[3]->value[0] = data_pointer->press_speed;
+
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+ return 0;
+}
+
+static ssize_t pointer_press_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select);
+}
+
+static ssize_t pointer_press_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->press_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_dragging_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging);
+}
+
+static ssize_t pointer_dragging_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->dragging = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_release_to_select_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select);
+}
+
+static ssize_t pointer_release_to_select_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->release_to_select = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_select_right_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right);
+}
+
+static ssize_t pointer_select_right_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value))
+ return -EINVAL;
+ if (value < 0 || value > 1)
+ return -EINVAL;
+
+ data_pointer->select_right = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_sensitivity_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->sensitivity);
+}
+
+static ssize_t pointer_sensitivity_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->sensitivity = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static ssize_t pointer_press_speed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ data_pointer->press_speed);
+}
+
+static ssize_t pointer_press_speed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int value;
+
+ hdev = container_of(dev, struct hid_device, dev);
+ if (hdev == NULL)
+ return -ENODEV;
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (kstrtoint(buf, 10, &value) || value < 1 || value > 255)
+ return -EINVAL;
+
+ data_pointer->press_speed = value;
+ tpkbd_features_set(hdev);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_pointer_press_to_select =
+ __ATTR(press_to_select, S_IWUSR | S_IRUGO,
+ pointer_press_to_select_show,
+ pointer_press_to_select_store);
+
+static struct device_attribute dev_attr_pointer_dragging =
+ __ATTR(dragging, S_IWUSR | S_IRUGO,
+ pointer_dragging_show,
+ pointer_dragging_store);
+
+static struct device_attribute dev_attr_pointer_release_to_select =
+ __ATTR(release_to_select, S_IWUSR | S_IRUGO,
+ pointer_release_to_select_show,
+ pointer_release_to_select_store);
+
+static struct device_attribute dev_attr_pointer_select_right =
+ __ATTR(select_right, S_IWUSR | S_IRUGO,
+ pointer_select_right_show,
+ pointer_select_right_store);
+
+static struct device_attribute dev_attr_pointer_sensitivity =
+ __ATTR(sensitivity, S_IWUSR | S_IRUGO,
+ pointer_sensitivity_show,
+ pointer_sensitivity_store);
+
+static struct device_attribute dev_attr_pointer_press_speed =
+ __ATTR(press_speed, S_IWUSR | S_IRUGO,
+ pointer_press_speed_show,
+ pointer_press_speed_store);
+
+static struct attribute *tpkbd_attributes_pointer[] = {
+ &dev_attr_pointer_press_to_select.attr,
+ &dev_attr_pointer_dragging.attr,
+ &dev_attr_pointer_release_to_select.attr,
+ &dev_attr_pointer_select_right.attr,
+ &dev_attr_pointer_sensitivity.attr,
+ &dev_attr_pointer_press_speed.attr,
+ NULL
+};
+
+static const struct attribute_group tpkbd_attr_group_pointer = {
+ .attrs = tpkbd_attributes_pointer,
+};
+
+static enum led_brightness tpkbd_led_brightness_get(
+ struct led_classdev *led_cdev)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ return data_pointer->led_state & (1 << led_nr)
+ ? LED_FULL
+ : LED_OFF;
+}
+
+static void tpkbd_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct device *dev;
+ struct hid_device *hdev;
+ struct hid_report *report;
+ struct tpkbd_data_pointer *data_pointer;
+ int led_nr = 0;
+
+ dev = led_cdev->dev->parent;
+ hdev = container_of(dev, struct hid_device, dev);
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ if (led_cdev == &data_pointer->led_micmute)
+ led_nr = 1;
+
+ if (value == LED_OFF)
+ data_pointer->led_state &= ~(1 << led_nr);
+ else
+ data_pointer->led_state |= 1 << led_nr;
+
+ report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3];
+ report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1;
+ report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1;
+ usbhid_submit_report(hdev, report, USB_DIR_OUT);
+}
+
+static int tpkbd_probe_tp(struct hid_device *hdev)
+{
+ struct device *dev = &hdev->dev;
+ struct tpkbd_data_pointer *data_pointer;
+ size_t name_sz = strlen(dev_name(dev)) + 16;
+ char *name_mute, *name_micmute;
+ int ret;
+
+ if (sysfs_create_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer)) {
+ hid_warn(hdev, "Could not create sysfs group\n");
+ }
+
+ data_pointer = kzalloc(sizeof(struct tpkbd_data_pointer), GFP_KERNEL);
+ if (data_pointer == NULL) {
+ hid_err(hdev, "Could not allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ // set same default values as windows driver
+ data_pointer->sensitivity = 0xa0;
+ data_pointer->press_speed = 0x38;
+
+ name_mute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_mute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev));
+
+ name_micmute = kzalloc(name_sz, GFP_KERNEL);
+ if (name_micmute == NULL) {
+ hid_err(hdev, "Could not allocate memory for led data\n");
+ ret = -ENOMEM;
+ goto err2;
+ }
+ snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev));
+
+ hid_set_drvdata(hdev, data_pointer);
+
+ data_pointer->led_mute.name = name_mute;
+ data_pointer->led_mute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_mute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_mute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_mute);
+
+ data_pointer->led_micmute.name = name_micmute;
+ data_pointer->led_micmute.brightness_get = tpkbd_led_brightness_get;
+ data_pointer->led_micmute.brightness_set = tpkbd_led_brightness_set;
+ data_pointer->led_micmute.dev = dev;
+ led_classdev_register(dev, &data_pointer->led_micmute);
+
+ tpkbd_features_set(hdev);
+
+ return 0;
+
+err2:
+ kfree(name_mute);
+err:
+ kfree(data_pointer);
+ return ret;
+}
+
+static int tpkbd_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct usbhid_device *uhdev;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "hid_parse failed\n");
+ goto err_free;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (ret) {
+ hid_err(hdev, "hid_hw_start failed\n");
+ goto err_free;
+ }
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+
+ if (uhdev->ifnum == 1)
+ return tpkbd_probe_tp(hdev);
+
+ return 0;
+err_free:
+ return ret;
+}
+
+static void tpkbd_remove_tp(struct hid_device *hdev)
+{
+ struct tpkbd_data_pointer *data_pointer;
+
+ sysfs_remove_group(&hdev->dev.kobj,
+ &tpkbd_attr_group_pointer);
+
+ data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev);
+
+ led_classdev_unregister(&data_pointer->led_micmute);
+ led_classdev_unregister(&data_pointer->led_mute);
+
+ hid_set_drvdata(hdev, NULL);
+ kfree(data_pointer);
+}
+
+static void tpkbd_remove(struct hid_device *hdev)
+{
+ struct usbhid_device *uhdev;
+
+ uhdev = (struct usbhid_device *) hdev->driver_data;
+ if (uhdev->ifnum == 1)
+ tpkbd_remove_tp(hdev);
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id tpkbd_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, tpkbd_devices);
+
+static struct hid_driver tpkbd_driver = {
+ .name = "lenovo_tpkbd",
+ .id_table = tpkbd_devices,
+ .input_mapping = tpkbd_input_mapping,
+ .probe = tpkbd_probe,
+ .remove = tpkbd_remove,
+};
+
+static int __init tpkbd_init(void)
+{
+ return hid_register_driver(&tpkbd_driver);
+}
+
+static void __exit tpkbd_exit(void)
+{
+ hid_unregister_driver(&tpkbd_driver);
+}
+
+module_init(tpkbd_init);
+module_exit(tpkbd_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 40ac6654f1d1..73647266daad 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -16,6 +16,7 @@
#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/input/mt.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -48,10 +49,6 @@ static bool scroll_acceleration = false;
module_param(scroll_acceleration, bool, 0644);
MODULE_PARM_DESC(scroll_acceleration, "Accelerate sequential scroll events");
-static bool report_touches = true;
-module_param(report_touches, bool, 0644);
-MODULE_PARM_DESC(report_touches, "Emit touch records (otherwise, only use them for emulation)");
-
static bool report_undeciphered;
module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
@@ -72,15 +69,6 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define SCROLL_ACCEL_DEFAULT 7
-/* Single touch emulation should only begin when no touches are currently down.
- * This is true when single_touch_id is equal to NO_TOUCHES. If multiple touches
- * are down and the touch providing for single touch emulation is lifted,
- * single_touch_id is equal to SINGLE_TOUCH_UP. While single touch emulation is
- * occurring, single_touch_id corresponds with the tracking id of the touch used.
- */
-#define NO_TOUCHES -1
-#define SINGLE_TOUCH_UP -2
-
/* Touch surface information. Dimension is in hundredths of a mm, min and max
* are in units. */
#define MOUSE_DIMENSION_X (float)9056
@@ -129,7 +117,6 @@ struct magicmouse_sc {
u8 size;
} touches[16];
int tracking_ids[16];
- int single_touch_id;
};
static int magicmouse_firm_touch(struct magicmouse_sc *msc)
@@ -268,16 +255,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
}
}
- if (down) {
+ if (down)
msc->ntouches++;
- if (msc->single_touch_id == NO_TOUCHES)
- msc->single_touch_id = id;
- } else if (msc->single_touch_id == id)
- msc->single_touch_id = SINGLE_TOUCH_UP;
+
+ input_mt_slot(input, id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, down);
/* Generate the input events for this touch. */
- if (report_touches && down) {
- input_report_abs(input, ABS_MT_TRACKING_ID, id);
+ if (down) {
input_report_abs(input, ABS_MT_TOUCH_MAJOR, touch_major << 2);
input_report_abs(input, ABS_MT_TOUCH_MINOR, touch_minor << 2);
input_report_abs(input, ABS_MT_ORIENTATION, -orientation);
@@ -290,8 +275,6 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
-
- input_mt_sync(input);
}
}
@@ -312,12 +295,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 9 + 4);
- /* We don't need an MT sync here because trackpad emits a
- * BTN_TOUCH event in a new frame when all touches are released.
- */
- if (msc->ntouches == 0)
- msc->single_touch_id = NO_TOUCHES;
-
clicks = data[1];
/* The following bits provide a device specific timestamp. They
@@ -335,9 +312,6 @@ static int magicmouse_raw_event(struct hid_device *hdev,
for (ii = 0; ii < npoints; ii++)
magicmouse_emit_touch(msc, ii, data + ii * 8 + 6);
- if (report_touches && msc->ntouches == 0)
- input_mt_sync(input);
-
/* When emulating three-button mode, it is important
* to have the current touch information before
* generating a click event.
@@ -370,25 +344,17 @@ static int magicmouse_raw_event(struct hid_device *hdev,
input_report_rel(input, REL_Y, y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
- input_report_key(input, BTN_TOUCH, msc->ntouches > 0);
- input_report_key(input, BTN_TOOL_FINGER, msc->ntouches == 1);
- input_report_key(input, BTN_TOOL_DOUBLETAP, msc->ntouches == 2);
- input_report_key(input, BTN_TOOL_TRIPLETAP, msc->ntouches == 3);
- input_report_key(input, BTN_TOOL_QUADTAP, msc->ntouches == 4);
- if (msc->single_touch_id >= 0) {
- input_report_abs(input, ABS_X,
- msc->touches[msc->single_touch_id].x);
- input_report_abs(input, ABS_Y,
- msc->touches[msc->single_touch_id].y);
- }
+ input_mt_report_pointer_emulation(input, true);
}
input_sync(input);
return 1;
}
-static void magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
+static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
+ int error;
+
__set_bit(EV_KEY, input->evbit);
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
@@ -417,62 +383,66 @@ static void magicmouse_setup_input(struct input_dev *input, struct hid_device *h
__set_bit(BTN_TOOL_DOUBLETAP, input->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, input->keybit);
__set_bit(BTN_TOOL_QUADTAP, input->keybit);
+ __set_bit(BTN_TOOL_QUINTTAP, input->keybit);
__set_bit(BTN_TOUCH, input->keybit);
__set_bit(INPUT_PROP_POINTER, input->propbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
- if (report_touches) {
- __set_bit(EV_ABS, input->evbit);
-
- input_set_abs_params(input, ABS_MT_TRACKING_ID, 0, 15, 0, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
- 4, 0);
- input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
- 4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
-
- /* Note: Touch Y position from the device is inverted relative
- * to how pointer motion is reported (and relative to how USB
- * HID recommends the coordinates work). This driver keeps
- * the origin at the same position, and just uses the additive
- * inverse of the reported Y.
- */
- if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
- input_set_abs_params(input, ABS_MT_POSITION_X,
- MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_MT_POSITION_X,
- MOUSE_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- MOUSE_RES_Y);
- } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
- input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
- TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
- TRACKPAD_MAX_Y, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_X,
- TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y,
- TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
-
- input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
- input_abs_set_res(input, ABS_MT_POSITION_X,
- TRACKPAD_RES_X);
- input_abs_set_res(input, ABS_MT_POSITION_Y,
- TRACKPAD_RES_Y);
- }
- input_set_events_per_packet(input, 60);
+ __set_bit(EV_ABS, input->evbit);
+
+ error = input_mt_init_slots(input, 16);
+ if (error)
+ return error;
+ input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
+ 4, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
+
+ /* Note: Touch Y position from the device is inverted relative
+ * to how pointer motion is reported (and relative to how USB
+ * HID recommends the coordinates work). This driver keeps
+ * the origin at the same position, and just uses the additive
+ * inverse of the reported Y.
+ */
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ MOUSE_MIN_Y, MOUSE_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ MOUSE_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ MOUSE_RES_Y);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
+ TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
+ TRACKPAD_MAX_Y, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD_MIN_X, TRACKPAD_MAX_X, 4, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD_MIN_Y, TRACKPAD_MAX_Y, 4, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X,
+ TRACKPAD_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y,
+ TRACKPAD_RES_Y);
}
+ input_set_events_per_packet(input, 60);
+
if (report_undeciphered) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
+
+ return 0;
}
static int magicmouse_input_mapping(struct hid_device *hdev,
@@ -511,8 +481,6 @@ static int magicmouse_probe(struct hid_device *hdev,
msc->quirks = id->driver_data;
hid_set_drvdata(hdev, msc);
- msc->single_touch_id = NO_TOUCHES;
-
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "magicmouse hid parse failed\n");
@@ -528,8 +496,13 @@ static int magicmouse_probe(struct hid_device *hdev,
/* We do this after hid-input is done parsing reports so that
* hid-input uses the most natural button and axis IDs.
*/
- if (msc->input)
- magicmouse_setup_input(msc->input, hdev);
+ if (msc->input) {
+ ret = magicmouse_setup_input(msc->input, hdev);
+ if (ret) {
+ hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+ goto err_stop_hw;
+ }
+ }
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6e3332a99976..59c8b5c1d2de 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -83,6 +83,7 @@ struct mt_device {
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
__s8 inputmode; /* InputMode HID feature, -1 if non-existent */
+ __s8 inputmode_index; /* InputMode HID feature index in the report */
__s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
-1 if non-existent */
__u8 num_received; /* how many contacts we received */
@@ -260,10 +261,20 @@ static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ int i;
switch (usage->hid) {
case HID_DG_INPUTMODE:
td->inputmode = field->report->id;
+ td->inputmode_index = 0; /* has to be updated below */
+
+ for (i=0; i < field->maxusage; i++) {
+ if (field->usage[i].hid == usage->hid) {
+ td->inputmode_index = i;
+ break;
+ }
+ }
+
break;
case HID_DG_CONTACTMAX:
td->maxcontact_report_id = field->report->id;
@@ -618,7 +629,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
re = &(hdev->report_enum[HID_FEATURE_REPORT]);
r = re->report_id_hash[td->inputmode];
if (r) {
- r->field[0]->value[0] = 0x02;
+ r->field[0]->value[td->inputmode_index] = 0x02;
usbhid_submit_report(hdev, r, USB_DIR_OUT);
}
}
@@ -951,6 +962,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_PANASONIC,
USB_DEVICE_ID_PANABOARD_UBT880) },
+ /* Novatek Panel */
+ { .driver_data = MT_CLS_DEFAULT,
+ MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
+ USB_DEVICE_ID_NOVATEK_PCT) },
+
/* PenMount panels */
{ .driver_data = MT_CLS_CONFIDENCE,
MT_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
@@ -1048,6 +1064,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
USB_DEVICE_ID_XIROKU_CSR2) },
+ /* Zytronic panels */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_ZYTRONIC,
+ USB_DEVICE_ID_ZYTRONIC_ZXY100) },
+
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
{ }
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 45c3433f7986..27c8ebdfad01 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -1846,7 +1846,7 @@ static void picolcd_debug_out_report(struct picolcd_data *data,
#define BUFF_SZ 256
/* Avoid unnecessary overhead if debugfs is disabled */
- if (!hdev->debug_events)
+ if (list_empty(&hdev->debug_list))
return;
buff = kmalloc(BUFF_SZ, GFP_ATOMIC);
@@ -2613,11 +2613,7 @@ static int picolcd_probe(struct hid_device *hdev,
goto err_cleanup_data;
}
- /* We don't use hidinput but hid_hw_start() fails if nothing is
- * claimed. So spoof claimed input. */
- hdev->claimed = HID_CLAIMED_INPUT;
error = hid_hw_start(hdev, 0);
- hdev->claimed = 0;
if (error) {
hid_err(hdev, "hardware start failed\n");
goto err_cleanup_data;
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 093bfad00b02..327f9b8ed1f4 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -39,7 +39,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -67,7 +67,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
temp_buf.state = state;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_MODE_KEY,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -87,7 +87,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -115,7 +115,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
temp_buf.key_mask = key_mask;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_KEY_MASK,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -130,7 +130,7 @@ static int arvo_get_actual_profile(struct usb_device *usb_dev)
struct arvo_actual_profile temp_buf;
int retval;
- retval = roccat_common_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (retval)
@@ -170,7 +170,7 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
temp_buf.actual_profile = profile;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (!retval) {
arvo->actual_profile = profile;
@@ -194,7 +194,7 @@ static ssize_t arvo_sysfs_write(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
@@ -217,7 +217,7 @@ static ssize_t arvo_sysfs_read(struct file *fp,
return -EINVAL;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&arvo->arvo_lock);
return (retval ? retval : real_size);
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index a6d93992c75a..74f704032627 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -16,12 +16,12 @@
#include <linux/module.h>
#include "hid-roccat-common.h"
-static inline uint16_t roccat_common_feature_report(uint8_t report_id)
+static inline uint16_t roccat_common2_feature_report(uint8_t report_id)
{
return 0x300 | report_id;
}
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
@@ -34,16 +34,16 @@ int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_receive);
+EXPORT_SYMBOL_GPL(roccat_common2_receive);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
@@ -56,13 +56,71 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- roccat_common_feature_report(report_id),
+ roccat_common2_feature_report(report_id),
0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
}
-EXPORT_SYMBOL_GPL(roccat_common_send);
+EXPORT_SYMBOL_GPL(roccat_common2_send);
+
+enum roccat_common2_control_states {
+ ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD = 0,
+ ROCCAT_COMMON_CONTROL_STATUS_OK = 1,
+ ROCCAT_COMMON_CONTROL_STATUS_INVALID = 2,
+ ROCCAT_COMMON_CONTROL_STATUS_WAIT = 3,
+};
+
+static int roccat_common2_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct roccat_common2_control control;
+
+ do {
+ msleep(50);
+ retval = roccat_common2_receive(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ROCCAT_COMMON_CONTROL_STATUS_OK:
+ return 0;
+ case ROCCAT_COMMON_CONTROL_STATUS_WAIT:
+ msleep(500);
+ continue;
+ case ROCCAT_COMMON_CONTROL_STATUS_INVALID:
+
+ case ROCCAT_COMMON_CONTROL_STATUS_OVERLOAD:
+ /* seems to be critical - replug necessary */
+ return -EINVAL;
+ default:
+ dev_err(&usb_dev->dev,
+ "roccat_common2_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common2_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ msleep(100);
+
+ return roccat_common2_receive_control_status(usb_dev);
+}
+EXPORT_SYMBOL_GPL(roccat_common2_send_with_status);
MODULE_AUTHOR("Stefan Achatz");
MODULE_DESCRIPTION("USB Roccat common driver");
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index 9a5bc61f9699..a97746a63b70 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -15,9 +15,21 @@
#include <linux/usb.h>
#include <linux/types.h>
-int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
+enum roccat_common2_commands {
+ ROCCAT_COMMON_COMMAND_CONTROL = 0x4,
+};
+
+struct roccat_common2_control {
+ uint8_t command;
+ uint8_t value;
+ uint8_t request; /* always 0 on requesting write check */
+} __packed;
+
+int roccat_common2_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size);
-int roccat_common_send(struct usb_device *usb_dev, uint report_id,
+int roccat_common2_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size);
+int roccat_common2_send_with_status(struct usb_device *usb_dev,
+ uint command, void const *buf, uint size);
#endif
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 0e4a0ab47142..5669916c2943 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -36,51 +36,7 @@ static void isku_profile_activated(struct isku_device *isku, uint new_profile)
static int isku_receive(struct usb_device *usb_dev, uint command,
void *buf, uint size)
{
- return roccat_common_receive(usb_dev, command, buf, size);
-}
-
-static int isku_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct isku_control control;
-
- do {
- msleep(50);
- retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
- &control, sizeof(struct isku_control));
-
- if (retval)
- return retval;
-
- switch (control.value) {
- case ISKU_CONTROL_VALUE_STATUS_OK:
- return 0;
- case ISKU_CONTROL_VALUE_STATUS_WAIT:
- continue;
- case ISKU_CONTROL_VALUE_STATUS_INVALID:
- /* seems to be critical - replug necessary */
- case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
- return -EINVAL;
- default:
- hid_err(usb_dev, "isku_receive_control_status: "
- "unknown response value 0x%x\n",
- control.value);
- return -EINVAL;
- }
-
- } while (1);
-}
-
-static int isku_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return isku_receive_control_status(usb_dev);
+ return roccat_common2_receive(usb_dev, command, buf, size);
}
static int isku_get_actual_profile(struct usb_device *usb_dev)
@@ -100,7 +56,8 @@ static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
buf.size = sizeof(struct isku_actual_profile);
buf.actual_profile = new_profile;
- return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ return roccat_common2_send_with_status(usb_dev,
+ ISKU_COMMAND_ACTUAL_PROFILE, &buf,
sizeof(struct isku_actual_profile));
}
@@ -197,7 +154,8 @@ static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&isku->isku_lock);
- retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
mutex_unlock(&isku->isku_lock);
return retval ? retval : real_size;
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
index 075f6efaec58..605b3ce21638 100644
--- a/drivers/hid/hid-roccat-isku.h
+++ b/drivers/hid/hid-roccat-isku.h
@@ -25,13 +25,6 @@ struct isku_control {
uint8_t request;
} __packed;
-enum isku_control_values {
- ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
- ISKU_CONTROL_VALUE_STATUS_OK = 1,
- ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
- ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
-};
-
struct isku_actual_profile {
uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 40090d602158..9ce2d0b615a4 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -138,7 +138,7 @@ static int kone_check_write(struct usb_device *usb_dev)
return 0;
/* unknown answer */
- hid_err(usb_dev, "got retval %d when checking write\n", data);
+ dev_err(&usb_dev->dev, "got retval %d when checking write\n", data);
return -EIO;
}
@@ -503,7 +503,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = kone_set_settings(usb_dev, &kone->settings);
if (retval) {
- hid_err(usb_dev, "couldn't set tcu state\n");
+ dev_err(&usb_dev->dev, "couldn't set tcu state\n");
/*
* try to reread valid settings into buffer overwriting
* first error code
@@ -519,7 +519,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
retval = size;
exit_no_settings:
- hid_err(usb_dev, "couldn't read settings\n");
+ dev_err(&usb_dev->dev, "couldn't read settings\n");
exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 59e47770fa10..f5602fec4865 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -39,88 +39,26 @@ static void koneplus_profile_activated(struct koneplus_device *koneplus,
static int koneplus_send_control(struct usb_device *usb_dev, uint value,
enum koneplus_control_requests request)
{
- struct koneplus_control control;
+ struct roccat_common2_control control;
if ((request == KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KONEPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-}
-
-static int koneplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct koneplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_CONTROL,
- &control, sizeof(struct koneplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "koneplus_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int koneplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- return koneplus_receive_control_status(usb_dev);
-}
-
-static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
- enum koneplus_control_requests request)
-{
- int retval;
-
- retval = koneplus_send_control(usb_dev, number, request);
- if (retval)
- return retval;
-
- /* allow time to settle things - windows driver uses 500 */
- msleep(100);
-
- retval = koneplus_receive_control_status(usb_dev);
- if (retval)
- return retval;
-
- return 0;
+ return roccat_common2_send_with_status(usb_dev,
+ ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -129,19 +67,20 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct koneplus_profile_settings));
}
@@ -150,19 +89,20 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
{
int retval;
- retval = koneplus_select_profile(usb_dev, number,
+ retval = koneplus_send_control(usb_dev, number,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct koneplus_profile_buttons));
}
@@ -172,7 +112,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
struct koneplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -187,7 +127,8 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct koneplus_actual_profile);
buf.actual_profile = new_profile;
- return koneplus_send(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
}
@@ -208,7 +149,7 @@ static ssize_t koneplus_sysfs_read(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = roccat_common_receive(usb_dev, command, buf, real_size);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
@@ -231,7 +172,8 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return -EINVAL;
mutex_lock(&koneplus->koneplus_lock);
- retval = koneplus_send(usb_dev, command, buf, real_size);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ buf, real_size);
mutex_unlock(&koneplus->koneplus_lock);
if (retval)
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index c03332a4fa9a..7074b2a4b94b 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -20,32 +20,11 @@ struct koneplus_talk {
uint8_t data[14];
} __packed;
-/*
- * case 1: writes request 80 and reads value 1
- *
- */
-struct koneplus_control {
- uint8_t command; /* KONEPLUS_COMMAND_CONTROL */
- /*
- * value is profile number in range 0-4 for requesting settings and buttons
- * 1 if status ok for requesting status
- */
- uint8_t value;
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum koneplus_control_requests {
- KONEPLUS_CONTROL_REQUEST_STATUS = 0x00,
KONEPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x80,
KONEPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x90,
};
-enum koneplus_control_values {
- KONEPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0,
- KONEPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KONEPLUS_CONTROL_REQUEST_STATUS_WAIT = 3,
-};
-
struct koneplus_actual_profile {
uint8_t command; /* KONEPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -137,7 +116,6 @@ struct koneplus_tcu_image {
} __attribute__ ((__packed__));
enum koneplus_commands {
- KONEPLUS_COMMAND_CONTROL = 0x4,
KONEPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KONEPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 112d934132c8..ca6527ac655d 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -47,69 +47,23 @@ static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
enum kovaplus_control_requests request)
{
int retval;
- struct kovaplus_control control;
+ struct roccat_common2_control control;
if ((request == KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS) &&
value > 4)
return -EINVAL;
- control.command = KOVAPLUS_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
+ retval = roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
return retval;
}
-static int kovaplus_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct kovaplus_control control;
-
- do {
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
- &control, sizeof(struct kovaplus_control));
-
- /* check if we get a completely wrong answer */
- if (retval)
- return retval;
-
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OK)
- return 0;
-
- /* indicates that hardware needs some more time to complete action */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT) {
- msleep(500); /* windows driver uses 1000 */
- continue;
- }
-
- /* seems to be critical - replug necessary */
- if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
- return -EINVAL;
-
- hid_err(usb_dev, "roccat_common_receive_control_status: "
- "unknown response value 0x%x\n", control.value);
- return -EINVAL;
- } while (1);
-}
-
-static int kovaplus_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
-
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
-
- msleep(100);
-
- return kovaplus_receive_control_status(usb_dev);
-}
-
static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
enum kovaplus_control_requests request)
{
@@ -119,7 +73,7 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
@@ -133,14 +87,15 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
@@ -154,14 +109,15 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
@@ -171,7 +127,7 @@ static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
struct kovaplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common2_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -186,7 +142,8 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
- return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
+ return roccat_common2_send_with_status(usb_dev,
+ KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index fb2aed44a8e0..f82daa1cdcb9 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -14,27 +14,13 @@
#include <linux/types.h>
-struct kovaplus_control {
- uint8_t command; /* KOVAPLUS_COMMAND_CONTROL */
- uint8_t value;
- uint8_t request;
-} __packed;
-
enum kovaplus_control_requests {
- /* read after write; value = 1 */
- KOVAPLUS_CONTROL_REQUEST_STATUS = 0x0,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
/* write; value = profile number range 0-4 */
KOVAPLUS_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20,
};
-enum kovaplus_control_values {
- KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD = 0, /* supposed */
- KOVAPLUS_CONTROL_REQUEST_STATUS_OK = 1,
- KOVAPLUS_CONTROL_REQUEST_STATUS_WAIT = 3, /* supposed */
-};
-
struct kovaplus_actual_profile {
uint8_t command; /* KOVAPLUS_COMMAND_ACTUAL_PROFILE */
uint8_t size; /* always 3 */
@@ -75,7 +61,6 @@ struct kovaplus_a {
} __packed;
enum kovaplus_commands {
- KOVAPLUS_COMMAND_CONTROL = 0x4,
KOVAPLUS_COMMAND_ACTUAL_PROFILE = 0x5,
KOVAPLUS_COMMAND_PROFILE_SETTINGS = 0x6,
KOVAPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index df05c1b1064f..1317c177a3e2 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -42,43 +42,19 @@ static void profile_activated(struct pyra_device *pyra,
static int pyra_send_control(struct usb_device *usb_dev, int value,
enum pyra_control_requests request)
{
- struct pyra_control control;
+ struct roccat_common2_control control;
if ((request == PYRA_CONTROL_REQUEST_PROFILE_SETTINGS ||
request == PYRA_CONTROL_REQUEST_PROFILE_BUTTONS) &&
(value < 0 || value > 4))
return -EINVAL;
- control.command = PYRA_COMMAND_CONTROL;
+ control.command = ROCCAT_COMMON_COMMAND_CONTROL;
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-}
-
-static int pyra_receive_control_status(struct usb_device *usb_dev)
-{
- int retval;
- struct pyra_control control;
-
- do {
- msleep(10);
- retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL,
- &control, sizeof(struct pyra_control));
-
- /* requested too early, try again */
- } while (retval == -EPROTO);
-
- if (!retval && control.command == PYRA_COMMAND_CONTROL &&
- control.request == PYRA_CONTROL_REQUEST_STATUS &&
- control.value == 1)
- return 0;
- else {
- hid_err(usb_dev, "receive control status: unknown response 0x%x 0x%x\n",
- control.request, control.value);
- return retval ? retval : -EINVAL;
- }
+ return roccat_common2_send(usb_dev, ROCCAT_COMMON_COMMAND_CONTROL,
+ &control, sizeof(struct roccat_common2_control));
}
static int pyra_get_profile_settings(struct usb_device *usb_dev,
@@ -89,7 +65,7 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct pyra_profile_settings));
}
@@ -101,51 +77,44 @@ static int pyra_get_profile_buttons(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_SETTINGS,
buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO,
+ return roccat_common2_receive(usb_dev, PYRA_COMMAND_INFO,
buf, sizeof(struct pyra_info));
}
-static int pyra_send(struct usb_device *usb_dev, uint command,
- void const *buf, uint size)
-{
- int retval;
- retval = roccat_common_send(usb_dev, command, buf, size);
- if (retval)
- return retval;
- return pyra_receive_control_status(usb_dev);
-}
-
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_SETTINGS, settings,
sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_PROFILE_BUTTONS, buttons,
sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings,
+ return roccat_common2_send_with_status(usb_dev,
+ PYRA_COMMAND_SETTINGS, settings,
sizeof(struct pyra_settings));
}
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 0442d7fa2dcf..eada7830fa99 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -20,18 +20,7 @@ struct pyra_b {
uint8_t unknown; /* 1 */
} __attribute__ ((__packed__));
-struct pyra_control {
- uint8_t command; /* PYRA_COMMAND_CONTROL */
- /*
- * value is profile number for request_settings and request_buttons
- * 1 if status ok for request_status
- */
- uint8_t value; /* Range 0-4 */
- uint8_t request;
-} __attribute__ ((__packed__));
-
enum pyra_control_requests {
- PYRA_CONTROL_REQUEST_STATUS = 0x00,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS = 0x10,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS = 0x20
};
@@ -75,7 +64,6 @@ struct pyra_info {
} __attribute__ ((__packed__));
enum pyra_commands {
- PYRA_COMMAND_CONTROL = 0x4,
PYRA_COMMAND_SETTINGS = 0x5,
PYRA_COMMAND_PROFILE_SETTINGS = 0x6,
PYRA_COMMAND_PROFILE_BUTTONS = 0x7,
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
new file mode 100644
index 000000000000..014afba407e0
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.c
@@ -0,0 +1,316 @@
+/*
+ * Roccat Savu driver for Linux
+ *
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/* Roccat Savu is a gamer mouse with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-savu.h"
+
+static struct class *savu_class;
+
+static ssize_t savu_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t savu_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct savu_device *savu = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&savu->savu_lock);
+ retval = roccat_common2_send_with_status(usb_dev, command,
+ (void *)buf, real_size);
+ mutex_unlock(&savu->savu_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define SAVU_SYSFS_W(thingy, THINGY) \
+static ssize_t savu_sysfs_write_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_write(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_R(thingy, THINGY) \
+static ssize_t savu_sysfs_read_ ## thingy(struct file *fp, \
+ struct kobject *kobj, struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return savu_sysfs_read(fp, kobj, buf, off, count, \
+ SAVU_SIZE_ ## THINGY, SAVU_COMMAND_ ## THINGY); \
+}
+
+#define SAVU_SYSFS_RW(thingy, THINGY) \
+SAVU_SYSFS_W(thingy, THINGY) \
+SAVU_SYSFS_R(thingy, THINGY)
+
+#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+#define SAVU_BIN_ATTRIBUTE_R(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .read = savu_sysfs_read_ ## thingy, \
+}
+
+#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = SAVU_SIZE_ ## THINGY, \
+ .write = savu_sysfs_write_ ## thingy \
+}
+
+SAVU_SYSFS_W(control, CONTROL)
+SAVU_SYSFS_RW(profile, PROFILE)
+SAVU_SYSFS_RW(general, GENERAL)
+SAVU_SYSFS_RW(buttons, BUTTONS)
+SAVU_SYSFS_RW(macro, MACRO)
+SAVU_SYSFS_R(info, INFO)
+SAVU_SYSFS_RW(sensor, SENSOR)
+
+static struct bin_attribute savu_bin_attributes[] = {
+ SAVU_BIN_ATTRIBUTE_W(control, CONTROL),
+ SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE),
+ SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
+ SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
+ SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
+ SAVU_BIN_ATTRIBUTE_R(info, INFO),
+ SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
+ __ATTR_NULL
+};
+
+static int savu_init_savu_device_struct(struct usb_device *usb_dev,
+ struct savu_device *savu)
+{
+ mutex_init(&savu->savu_lock);
+
+ return 0;
+}
+
+static int savu_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct savu_device *savu;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ savu = kzalloc(sizeof(*savu), GFP_KERNEL);
+ if (!savu) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, savu);
+
+ retval = savu_init_savu_device_struct(usb_dev, savu);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct savu_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(savu_class, hdev,
+ sizeof(struct savu_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ savu->chrdev_minor = retval;
+ savu->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(savu);
+ return retval;
+}
+
+static void savu_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return;
+
+ savu = hid_get_drvdata(hdev);
+ if (savu->roccat_claimed)
+ roccat_disconnect(savu->chrdev_minor);
+ kfree(savu);
+}
+
+static int savu_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = savu_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install mouse\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void savu_remove(struct hid_device *hdev)
+{
+ savu_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void savu_report_to_chrdev(struct savu_device const *savu,
+ u8 const *data)
+{
+ struct savu_roccat_report roccat_report;
+ struct savu_mouse_report_special const *special_report;
+
+ if (data[0] != SAVU_MOUSE_REPORT_NUMBER_SPECIAL)
+ return;
+
+ special_report = (struct savu_mouse_report_special const *)data;
+
+ roccat_report.type = special_report->type;
+ roccat_report.data[0] = special_report->data[0];
+ roccat_report.data[1] = special_report->data[1];
+ roccat_report_event(savu->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int savu_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct savu_device *savu = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != USB_INTERFACE_PROTOCOL_MOUSE)
+ return 0;
+
+ if (savu == NULL)
+ return 0;
+
+ if (savu->roccat_claimed)
+ savu_report_to_chrdev(savu, data);
+
+ return 0;
+}
+
+static const struct hid_device_id savu_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, savu_devices);
+
+static struct hid_driver savu_driver = {
+ .name = "savu",
+ .id_table = savu_devices,
+ .probe = savu_probe,
+ .remove = savu_remove,
+ .raw_event = savu_raw_event
+};
+
+static int __init savu_init(void)
+{
+ int retval;
+
+ savu_class = class_create(THIS_MODULE, "savu");
+ if (IS_ERR(savu_class))
+ return PTR_ERR(savu_class);
+ savu_class->dev_bin_attrs = savu_bin_attributes;
+
+ retval = hid_register_driver(&savu_driver);
+ if (retval)
+ class_destroy(savu_class);
+ return retval;
+}
+
+static void __exit savu_exit(void)
+{
+ hid_unregister_driver(&savu_driver);
+ class_destroy(savu_class);
+}
+
+module_init(savu_init);
+module_exit(savu_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Savu driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-savu.h b/drivers/hid/hid-roccat-savu.h
new file mode 100644
index 000000000000..9120ba72087f
--- /dev/null
+++ b/drivers/hid/hid-roccat-savu.h
@@ -0,0 +1,87 @@
+#ifndef __HID_ROCCAT_SAVU_H
+#define __HID_ROCCAT_SAVU_H
+
+/*
+ * Copyright (c) 2012 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ SAVU_SIZE_CONTROL = 0x03,
+ SAVU_SIZE_PROFILE = 0x03,
+ SAVU_SIZE_GENERAL = 0x10,
+ SAVU_SIZE_BUTTONS = 0x2f,
+ SAVU_SIZE_MACRO = 0x0823,
+ SAVU_SIZE_INFO = 0x08,
+ SAVU_SIZE_SENSOR = 0x04,
+};
+
+enum savu_control_requests {
+ SAVU_CONTROL_REQUEST_GENERAL = 0x80,
+ SAVU_CONTROL_REQUEST_BUTTONS = 0x90,
+};
+
+enum savu_commands {
+ SAVU_COMMAND_CONTROL = 0x4,
+ SAVU_COMMAND_PROFILE = 0x5,
+ SAVU_COMMAND_GENERAL = 0x6,
+ SAVU_COMMAND_BUTTONS = 0x7,
+ SAVU_COMMAND_MACRO = 0x8,
+ SAVU_COMMAND_INFO = 0x9,
+ SAVU_COMMAND_SENSOR = 0xc,
+};
+
+struct savu_mouse_report_special {
+ uint8_t report_number; /* always 3 */
+ uint8_t zero;
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+enum {
+ SAVU_MOUSE_REPORT_NUMBER_SPECIAL = 3,
+};
+
+enum savu_mouse_report_button_types {
+ /* data1 = new profile range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_PROFILE = 0x20,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_QUICKLAUNCH = 0x60,
+
+ /* data1 = button number range 1-24; data2 = action */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_TIMER = 0x80,
+
+ /* data1 = setting number range 1-5 */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_CPI = 0xb0,
+
+ /* data1 and data2 = range 0x1-0xb */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_SENSITIVITY = 0xc0,
+
+ /* data1 = 22 = next track...
+ * data2 = action
+ */
+ SAVU_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+};
+
+struct savu_roccat_report {
+ uint8_t type;
+ uint8_t data[2];
+} __packed;
+
+struct savu_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex savu_lock;
+};
+
+#endif
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
index aa958706c0e5..0a1805c9b0e5 100644
--- a/drivers/hid/hid-wiimote-ext.c
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -77,7 +77,7 @@ static __u16 wiiext_keymap[] = {
BTN_TR, /* WIIEXT_KEY_RT */
};
-/* diable all extensions */
+/* disable all extensions */
static void ext_disable(struct wiimote_ext *ext)
{
unsigned long flags;
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 36fa77b40ffb..3b6f7bf5a77e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -96,6 +96,7 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
}
kfree(list->buffer[list->tail].value);
+ list->buffer[list->tail].value = NULL;
list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1);
}
out:
@@ -300,6 +301,7 @@ static int hidraw_release(struct inode * inode, struct file * file)
struct hidraw *dev;
struct hidraw_list *list = file->private_data;
int ret;
+ int i;
mutex_lock(&minors_lock);
if (!hidraw_table[minor]) {
@@ -317,6 +319,9 @@ static int hidraw_release(struct inode * inode, struct file * file)
kfree(list->hidraw);
}
}
+
+ for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
+ kfree(list->buffer[i].value);
kfree(list);
ret = 0;
unlock:
@@ -446,12 +451,17 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
int ret = 0;
list_for_each_entry(list, &dev->list, node) {
+ int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+
+ if (new_head == list->tail)
+ continue;
+
if (!(list->buffer[list->head].value = kmemdup(data, len, GFP_ATOMIC))) {
ret = -ENOMEM;
break;
}
list->buffer[list->head].len = len;
- list->head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
+ list->head = new_head;
kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
new file mode 100644
index 000000000000..714cd8cc9579
--- /dev/null
+++ b/drivers/hid/uhid.c
@@ -0,0 +1,572 @@
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/uhid.h>
+#include <linux/wait.h>
+
+#define UHID_NAME "uhid"
+#define UHID_BUFSIZE 32
+
+struct uhid_device {
+ struct mutex devlock;
+ bool running;
+
+ __u8 *rd_data;
+ uint rd_size;
+
+ struct hid_device *hid;
+ struct uhid_event input_buf;
+
+ wait_queue_head_t waitq;
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct uhid_event *outq[UHID_BUFSIZE];
+
+ struct mutex report_lock;
+ wait_queue_head_t report_wait;
+ atomic_t report_done;
+ atomic_t report_id;
+ struct uhid_event report_buf;
+};
+
+static struct miscdevice uhid_misc;
+
+static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ __u8 newhead;
+
+ newhead = (uhid->head + 1) % UHID_BUFSIZE;
+
+ if (newhead != uhid->tail) {
+ uhid->outq[uhid->head] = ev;
+ uhid->head = newhead;
+ wake_up_interruptible(&uhid->waitq);
+ } else {
+ hid_warn(uhid->hid, "Output queue is full\n");
+ kfree(ev);
+ }
+}
+
+static int uhid_queue_event(struct uhid_device *uhid, __u32 event)
+{
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = event;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_hid_start(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return uhid_queue_event(uhid, UHID_START);
+}
+
+static void uhid_hid_stop(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ hid->claimed = 0;
+ uhid_queue_event(uhid, UHID_STOP);
+}
+
+static int uhid_hid_open(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return uhid_queue_event(uhid, UHID_OPEN);
+}
+
+static void uhid_hid_close(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ uhid_queue_event(uhid, UHID_CLOSE);
+}
+
+static int uhid_hid_input(struct input_dev *input, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(input);
+ struct uhid_device *uhid = hid->driver_data;
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_OUTPUT_EV;
+ ev->u.output_ev.type = type;
+ ev->u.output_ev.code = code;
+ ev->u.output_ev.value = value;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return 0;
+}
+
+static int uhid_hid_parse(struct hid_device *hid)
+{
+ struct uhid_device *uhid = hid->driver_data;
+
+ return hid_parse_report(hid, uhid->rd_data, uhid->rd_size);
+}
+
+static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
+ __u8 *buf, size_t count, unsigned char rtype)
+{
+ struct uhid_device *uhid = hid->driver_data;
+ __u8 report_type;
+ struct uhid_event *ev;
+ unsigned long flags;
+ int ret;
+ size_t uninitialized_var(len);
+ struct uhid_feature_answer_req *req;
+
+ if (!uhid->running)
+ return -EIO;
+
+ switch (rtype) {
+ case HID_FEATURE_REPORT:
+ report_type = UHID_FEATURE_REPORT;
+ break;
+ case HID_OUTPUT_REPORT:
+ report_type = UHID_OUTPUT_REPORT;
+ break;
+ case HID_INPUT_REPORT:
+ report_type = UHID_INPUT_REPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = mutex_lock_interruptible(&uhid->report_lock);
+ if (ret)
+ return ret;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ ev->type = UHID_FEATURE;
+ ev->u.feature.id = atomic_inc_return(&uhid->report_id);
+ ev->u.feature.rnum = rnum;
+ ev->u.feature.rtype = report_type;
+
+ atomic_set(&uhid->report_done, 0);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ ret = wait_event_interruptible_timeout(uhid->report_wait,
+ atomic_read(&uhid->report_done), 5 * HZ);
+
+ /*
+ * Make sure "uhid->running" is cleared on shutdown before
+ * "uhid->report_done" is set.
+ */
+ smp_rmb();
+ if (!ret || !uhid->running) {
+ ret = -EIO;
+ } else if (ret < 0) {
+ ret = -ERESTARTSYS;
+ } else {
+ spin_lock_irqsave(&uhid->qlock, flags);
+ req = &uhid->report_buf.u.feature_answer;
+
+ if (req->err) {
+ ret = -EIO;
+ } else {
+ ret = 0;
+ len = min(count,
+ min_t(size_t, req->size, UHID_DATA_MAX));
+ memcpy(buf, req->data, len);
+ }
+
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ }
+
+ atomic_set(&uhid->report_done, 1);
+
+unlock:
+ mutex_unlock(&uhid->report_lock);
+ return ret ? ret : len;
+}
+
+static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count,
+ unsigned char report_type)
+{
+ struct uhid_device *uhid = hid->driver_data;
+ __u8 rtype;
+ unsigned long flags;
+ struct uhid_event *ev;
+
+ switch (report_type) {
+ case HID_FEATURE_REPORT:
+ rtype = UHID_FEATURE_REPORT;
+ break;
+ case HID_OUTPUT_REPORT:
+ rtype = UHID_OUTPUT_REPORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (count < 1 || count > UHID_DATA_MAX)
+ return -EINVAL;
+
+ ev = kzalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return -ENOMEM;
+
+ ev->type = UHID_OUTPUT;
+ ev->u.output.size = count;
+ ev->u.output.rtype = rtype;
+ memcpy(ev->u.output.data, buf, count);
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid_queue(uhid, ev);
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+
+ return count;
+}
+
+static struct hid_ll_driver uhid_hid_driver = {
+ .start = uhid_hid_start,
+ .stop = uhid_hid_stop,
+ .open = uhid_hid_open,
+ .close = uhid_hid_close,
+ .hidinput_input_event = uhid_hid_input,
+ .parse = uhid_hid_parse,
+};
+
+static int uhid_dev_create(struct uhid_device *uhid,
+ const struct uhid_event *ev)
+{
+ struct hid_device *hid;
+ int ret;
+
+ if (uhid->running)
+ return -EALREADY;
+
+ uhid->rd_size = ev->u.create.rd_size;
+ if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE)
+ return -EINVAL;
+
+ uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL);
+ if (!uhid->rd_data)
+ return -ENOMEM;
+
+ if (copy_from_user(uhid->rd_data, ev->u.create.rd_data,
+ uhid->rd_size)) {
+ ret = -EFAULT;
+ goto err_free;
+ }
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ ret = PTR_ERR(hid);
+ goto err_free;
+ }
+
+ strncpy(hid->name, ev->u.create.name, 127);
+ hid->name[127] = 0;
+ strncpy(hid->phys, ev->u.create.phys, 63);
+ hid->phys[63] = 0;
+ strncpy(hid->uniq, ev->u.create.uniq, 63);
+ hid->uniq[63] = 0;
+
+ hid->ll_driver = &uhid_hid_driver;
+ hid->hid_get_raw_report = uhid_hid_get_raw;
+ hid->hid_output_raw_report = uhid_hid_output_raw;
+ hid->bus = ev->u.create.bus;
+ hid->vendor = ev->u.create.vendor;
+ hid->product = ev->u.create.product;
+ hid->version = ev->u.create.version;
+ hid->country = ev->u.create.country;
+ hid->driver_data = uhid;
+ hid->dev.parent = uhid_misc.this_device;
+
+ uhid->hid = hid;
+ uhid->running = true;
+
+ ret = hid_add_device(hid);
+ if (ret) {
+ hid_err(hid, "Cannot register HID device\n");
+ goto err_hid;
+ }
+
+ return 0;
+
+err_hid:
+ hid_destroy_device(hid);
+ uhid->hid = NULL;
+ uhid->running = false;
+err_free:
+ kfree(uhid->rd_data);
+ return ret;
+}
+
+static int uhid_dev_destroy(struct uhid_device *uhid)
+{
+ if (!uhid->running)
+ return -EINVAL;
+
+ /* clear "running" before setting "report_done" */
+ uhid->running = false;
+ smp_wmb();
+ atomic_set(&uhid->report_done, 1);
+ wake_up_interruptible(&uhid->report_wait);
+
+ hid_destroy_device(uhid->hid);
+ kfree(uhid->rd_data);
+
+ return 0;
+}
+
+static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
+{
+ if (!uhid->running)
+ return -EINVAL;
+
+ hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
+ min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0);
+
+ return 0;
+}
+
+static int uhid_dev_feature_answer(struct uhid_device *uhid,
+ struct uhid_event *ev)
+{
+ unsigned long flags;
+
+ if (!uhid->running)
+ return -EINVAL;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+
+ /* id for old report; drop it silently */
+ if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
+ goto unlock;
+ if (atomic_read(&uhid->report_done))
+ goto unlock;
+
+ memcpy(&uhid->report_buf, ev, sizeof(*ev));
+ atomic_set(&uhid->report_done, 1);
+ wake_up_interruptible(&uhid->report_wait);
+
+unlock:
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ return 0;
+}
+
+static int uhid_char_open(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid;
+
+ uhid = kzalloc(sizeof(*uhid), GFP_KERNEL);
+ if (!uhid)
+ return -ENOMEM;
+
+ mutex_init(&uhid->devlock);
+ mutex_init(&uhid->report_lock);
+ spin_lock_init(&uhid->qlock);
+ init_waitqueue_head(&uhid->waitq);
+ init_waitqueue_head(&uhid->report_wait);
+ uhid->running = false;
+ atomic_set(&uhid->report_done, 1);
+
+ file->private_data = uhid;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static int uhid_char_release(struct inode *inode, struct file *file)
+{
+ struct uhid_device *uhid = file->private_data;
+ unsigned int i;
+
+ uhid_dev_destroy(uhid);
+
+ for (i = 0; i < UHID_BUFSIZE; ++i)
+ kfree(uhid->outq[i]);
+
+ kfree(uhid);
+
+ return 0;
+}
+
+static ssize_t uhid_char_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uhid_device *uhid = file->private_data;
+ int ret;
+ unsigned long flags;
+ size_t len;
+
+ /* they need at least the "type" member of uhid_event */
+ if (count < sizeof(__u32))
+ return -EINVAL;
+
+try_again:
+ if (file->f_flags & O_NONBLOCK) {
+ if (uhid->head == uhid->tail)
+ return -EAGAIN;
+ } else {
+ ret = wait_event_interruptible(uhid->waitq,
+ uhid->head != uhid->tail);
+ if (ret)
+ return ret;
+ }
+
+ ret = mutex_lock_interruptible(&uhid->devlock);
+ if (ret)
+ return ret;
+
+ if (uhid->head == uhid->tail) {
+ mutex_unlock(&uhid->devlock);
+ goto try_again;
+ } else {
+ len = min(count, sizeof(**uhid->outq));
+ if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) {
+ ret = -EFAULT;
+ } else {
+ kfree(uhid->outq[uhid->tail]);
+ uhid->outq[uhid->tail] = NULL;
+
+ spin_lock_irqsave(&uhid->qlock, flags);
+ uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE;
+ spin_unlock_irqrestore(&uhid->qlock, flags);
+ }
+ }
+
+ mutex_unlock(&uhid->devlock);
+ return ret ? ret : len;
+}
+
+static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct uhid_device *uhid = file->private_data;
+ int ret;
+ size_t len;
+
+ /* we need at least the "type" member of uhid_event */
+ if (count < sizeof(__u32))
+ return -EINVAL;
+
+ ret = mutex_lock_interruptible(&uhid->devlock);
+ if (ret)
+ return ret;
+
+ memset(&uhid->input_buf, 0, sizeof(uhid->input_buf));
+ len = min(count, sizeof(uhid->input_buf));
+ if (copy_from_user(&uhid->input_buf, buffer, len)) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ switch (uhid->input_buf.type) {
+ case UHID_CREATE:
+ ret = uhid_dev_create(uhid, &uhid->input_buf);
+ break;
+ case UHID_DESTROY:
+ ret = uhid_dev_destroy(uhid);
+ break;
+ case UHID_INPUT:
+ ret = uhid_dev_input(uhid, &uhid->input_buf);
+ break;
+ case UHID_FEATURE_ANSWER:
+ ret = uhid_dev_feature_answer(uhid, &uhid->input_buf);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+unlock:
+ mutex_unlock(&uhid->devlock);
+
+ /* return "count" not "len" to not confuse the caller */
+ return ret ? ret : count;
+}
+
+static unsigned int uhid_char_poll(struct file *file, poll_table *wait)
+{
+ struct uhid_device *uhid = file->private_data;
+
+ poll_wait(file, &uhid->waitq, wait);
+
+ if (uhid->head != uhid->tail)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations uhid_fops = {
+ .owner = THIS_MODULE,
+ .open = uhid_char_open,
+ .release = uhid_char_release,
+ .read = uhid_char_read,
+ .write = uhid_char_write,
+ .poll = uhid_char_poll,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice uhid_misc = {
+ .fops = &uhid_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = UHID_NAME,
+};
+
+static int __init uhid_init(void)
+{
+ return misc_register(&uhid_misc);
+}
+
+static void __exit uhid_exit(void)
+{
+ misc_deregister(&uhid_misc);
+}
+
+module_init(uhid_init);
+module_exit(uhid_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 482f936fc29b..dedd8e4e5c6d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -84,7 +84,7 @@ static int hid_start_in(struct hid_device *hid)
spin_lock_irqsave(&usbhid->lock, flags);
if (hid->open > 0 &&
!test_bit(HID_DISCONNECTED, &usbhid->iofl) &&
- !test_bit(HID_REPORTED_IDLE, &usbhid->iofl) &&
+ !test_bit(HID_SUSPENDED, &usbhid->iofl) &&
!test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) {
rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC);
if (rc != 0) {
@@ -207,15 +207,27 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
int kicked;
int r;
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
@@ -234,15 +246,27 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
int r;
WARN_ON(hid == NULL);
- if (!hid)
+ if (!hid || test_bit(HID_RESET_PENDING, &usbhid->iofl) ||
+ test_bit(HID_SUSPENDED, &usbhid->iofl))
return 0;
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
hid_dbg(hid, "Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+ /* Try to wake up from autosuspend... */
r = usb_autopm_get_interface_async(usbhid->intf);
if (r < 0)
return r;
+
+ /*
+ * If still suspended, don't submit. Submission will
+ * occur if/when resume drains the queue.
+ */
+ if (test_bit(HID_SUSPENDED, &usbhid->iofl)) {
+ usb_autopm_put_interface_no_suspend(usbhid->intf);
+ return r;
+ }
+
/* Asynchronously flush queue. */
set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
@@ -331,9 +355,12 @@ static int hid_submit_out(struct hid_device *hid)
usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
1 + (report->id > 0);
usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report,
- usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
+ usbhid->out[usbhid->outtail].raw_report = NULL;
+ }
dbg_hid("submitting out urb\n");
@@ -362,8 +389,11 @@ static int hid_submit_ctrl(struct hid_device *hid)
if (dir == USB_DIR_OUT) {
usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
+ if (raw_report) {
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ usbhid->ctrl[usbhid->ctrltail].raw_report = NULL;
+ }
} else {
int maxpacket, padlen;
@@ -407,16 +437,6 @@ static int hid_submit_ctrl(struct hid_device *hid)
* Output interrupt completion handler.
*/
-static int irq_out_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->outhead != usbhid->outtail)
- return hid_submit_out(hid);
- else
- return -1;
-}
-
static void hid_irq_out(struct urb *urb)
{
struct hid_device *hid = urb->context;
@@ -441,15 +461,17 @@ static void hid_irq_out(struct urb *urb)
spin_lock_irqsave(&usbhid->lock, flags);
- if (unplug)
+ if (unplug) {
usbhid->outtail = usbhid->outhead;
- else
+ } else {
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (!irq_out_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock_irqrestore(&usbhid->lock, flags);
- return;
+ if (usbhid->outhead != usbhid->outtail &&
+ hid_submit_out(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+ return;
+ }
}
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
@@ -461,15 +483,6 @@ static void hid_irq_out(struct urb *urb)
/*
* Control pipe completion handler.
*/
-static int ctrl_pump_restart(struct hid_device *hid)
-{
- struct usbhid_device *usbhid = hid->driver_data;
-
- if (usbhid->ctrlhead != usbhid->ctrltail)
- return hid_submit_ctrl(hid);
- else
- return -1;
-}
static void hid_ctrl(struct urb *urb)
{
@@ -498,15 +511,17 @@ static void hid_ctrl(struct urb *urb)
hid_warn(urb->dev, "ctrl urb status %d received\n", status);
}
- if (unplug)
+ if (unplug) {
usbhid->ctrltail = usbhid->ctrlhead;
- else
+ } else {
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (!ctrl_pump_restart(hid)) {
- /* Successfully submitted next urb in queue */
- spin_unlock(&usbhid->lock);
- return;
+ if (usbhid->ctrlhead != usbhid->ctrltail &&
+ hid_submit_ctrl(hid) == 0) {
+ /* Successfully submitted next urb in queue */
+ spin_unlock(&usbhid->lock);
+ return;
+ }
}
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
@@ -540,49 +555,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_out_queue(usbhid);
- /*
- * But if still suspended, leave urb enqueued, don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_out + HZ * 5)) {
- usb_block_urb(usbhid->urbout);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbout);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbout);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
- if (!irq_out_pump_restart(hid))
- set_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_block_urb(usbhid->urbout);
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
- }
+ usb_unlink_urb(usbhid->urbout);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbout);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_OUT_RUNNING, &usbhid->iofl))
+ usbhid_restart_out_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
return;
}
@@ -604,47 +606,36 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
- /* Try to awake from autosuspend... */
- if (usb_autopm_get_interface_async(usbhid->intf) < 0)
- return;
+ /* If the queue isn't running, restart it */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
+ usbhid_restart_ctrl_queue(usbhid);
- /*
- * If already suspended, leave urb enqueued, but don't submit.
- * Submission will occur if/when resume() drains the queue.
- */
- if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
- return;
+ /* Otherwise see if an earlier request has timed out */
+ } else if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
+
+ /* Prevent autosuspend following the unlink */
+ usb_autopm_get_interface_no_resume(usbhid->intf);
- if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- usb_autopm_put_interface_async(usbhid->intf);
- }
- wake_up(&usbhid->wait);
- } else {
/*
- * the queue is known to run
- * but an earlier request may be stuck
- * we may need to time out
- * no race because the URB is blocked under
- * spinlock
+ * Prevent resubmission in case the URB completes
+ * before we can unlink it. We don't want to cancel
+ * the wrong transfer!
*/
- if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) {
- usb_block_urb(usbhid->urbctrl);
- /* drop lock to not deadlock if the callback is called */
- spin_unlock(&usbhid->lock);
- usb_unlink_urb(usbhid->urbctrl);
- spin_lock(&usbhid->lock);
- usb_unblock_urb(usbhid->urbctrl);
- /*
- * if the unlinking has already completed
- * the pump will have been stopped
- * it must be restarted now
- */
- if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
- if (!ctrl_pump_restart(hid))
- set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- }
+ usb_block_urb(usbhid->urbctrl);
+
+ /* Drop lock to avoid deadlock if the callback runs */
+ spin_unlock(&usbhid->lock);
+
+ usb_unlink_urb(usbhid->urbctrl);
+ spin_lock(&usbhid->lock);
+ usb_unblock_urb(usbhid->urbctrl);
+
+ /* Unlink might have stopped the queue */
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
+
+ /* Now we can allow autosuspend again */
+ usb_autopm_put_interface_async(usbhid->intf);
}
}
@@ -1002,9 +993,10 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
static void usbhid_restart_queues(struct usbhid_device *usbhid)
{
- if (usbhid->urbout)
+ if (usbhid->urbout && !test_bit(HID_OUT_RUNNING, &usbhid->iofl))
usbhid_restart_out_queue(usbhid);
- usbhid_restart_ctrl_queue(usbhid);
+ if (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl))
+ usbhid_restart_ctrl_queue(usbhid);
}
static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
@@ -1471,11 +1463,38 @@ void usbhid_put_power(struct hid_device *hid)
#ifdef CONFIG_PM
+static int hid_resume_common(struct hid_device *hid, bool driver_suspended)
+{
+ struct usbhid_device *usbhid = hid->driver_data;
+ int status;
+
+ spin_lock_irq(&usbhid->lock);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
+ usbhid_mark_busy(usbhid);
+
+ if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
+ test_bit(HID_RESET_PENDING, &usbhid->iofl))
+ schedule_work(&usbhid->reset_work);
+ usbhid->retry_delay = 0;
+
+ usbhid_restart_queues(usbhid);
+ spin_unlock_irq(&usbhid->lock);
+
+ status = hid_start_in(hid);
+ if (status < 0)
+ hid_io_error(hid);
+
+ if (driver_suspended && hid->driver && hid->driver->resume)
+ status = hid->driver->resume(hid);
+ return status;
+}
+
static int hid_suspend(struct usb_interface *intf, pm_message_t message)
{
struct hid_device *hid = usb_get_intfdata(intf);
struct usbhid_device *usbhid = hid->driver_data;
int status;
+ bool driver_suspended = false;
if (PMSG_IS_AUTO(message)) {
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
@@ -1486,13 +1505,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
&& !test_bit(HID_KEYS_PRESSED, &usbhid->iofl)
&& (!usbhid->ledcount || ignoreled))
{
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
if (hid->driver && hid->driver->suspend) {
status = hid->driver->suspend(hid, message);
if (status < 0)
- return status;
+ goto failed;
}
+ driver_suspended = true;
} else {
usbhid_mark_busy(usbhid);
spin_unlock_irq(&usbhid->lock);
@@ -1505,11 +1525,14 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (status < 0)
return status;
}
+ driver_suspended = true;
spin_lock_irq(&usbhid->lock);
- set_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
- if (usbhid_wait_io(hid) < 0)
- return -EIO;
+ if (usbhid_wait_io(hid) < 0) {
+ status = -EIO;
+ goto failed;
+ }
}
hid_cancel_delayed_stuff(usbhid);
@@ -1517,14 +1540,15 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
/* lost race against keypresses */
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
+ status = -EBUSY;
+ goto failed;
}
dev_dbg(&intf->dev, "suspend\n");
return 0;
+
+ failed:
+ hid_resume_common(hid, driver_suspended);
+ return status;
}
static int hid_resume(struct usb_interface *intf)
@@ -1536,23 +1560,7 @@ static int hid_resume(struct usb_interface *intf)
if (!test_bit(HID_STARTED, &usbhid->iofl))
return 0;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
- usbhid_mark_busy(usbhid);
-
- if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) ||
- test_bit(HID_RESET_PENDING, &usbhid->iofl))
- schedule_work(&usbhid->reset_work);
- usbhid->retry_delay = 0;
- status = hid_start_in(hid);
- if (status < 0)
- hid_io_error(hid);
- usbhid_restart_queues(usbhid);
-
- if (status >= 0 && hid->driver && hid->driver->resume) {
- int ret = hid->driver->resume(hid);
- if (ret < 0)
- status = ret;
- }
+ status = hid_resume_common(hid, true);
dev_dbg(&intf->dev, "resume status %d\n", status);
return 0;
}
@@ -1563,7 +1571,7 @@ static int hid_reset_resume(struct usb_interface *intf)
struct usbhid_device *usbhid = hid->driver_data;
int status;
- clear_bit(HID_REPORTED_IDLE, &usbhid->iofl);
+ clear_bit(HID_SUSPENDED, &usbhid->iofl);
status = hid_post_reset(intf);
if (status >= 0 && hid->driver && hid->driver->reset_resume) {
int ret = hid->driver->reset_resume(hid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0597ee604f6e..903eef3d3e10 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -76,6 +76,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_1, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SYMBOL, USB_DEVICE_ID_SYMBOL_SCANNER_2, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1883d7b94870..bd87a61e5303 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -53,7 +53,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_CLEAR_HALT 6
#define HID_DISCONNECTED 7
#define HID_STARTED 8
-#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
#define HID_NO_BANDWIDTH 11
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f1d167cb1ea..b0a2e4c37e12 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -314,6 +314,16 @@ config SENSORS_DS1621
This driver can also be built as a module. If so, the module
will be called ds1621.
+config SENSORS_DA9052_ADC
+ tristate "Dialog DA9052/DA9053 ADC"
+ depends on PMIC_DA9052
+ help
+ Say y here to support the ADC found on Dialog Semiconductor
+ DA9052-BC and DA9053-AA/Bx PMICs.
+
+ This driver can also be built as module. If so, the module
+ will be called da9052-hwmon.
+
config SENSORS_EXYNOS4_TMU
tristate "Temperature sensor on Samsung EXYNOS4"
depends on ARCH_EXYNOS4
@@ -433,6 +443,16 @@ config SENSORS_GPIO_FAN
This driver can also be built as a module. If so, the module
will be called gpio-fan.
+config SENSORS_HIH6130
+ tristate "Honeywell Humidicon HIH-6130 humidity/temperature sensor"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for Honeywell Humidicon
+ HIH-6130 and HIH-6131 Humidicon humidity sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called hih6130.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86 && PCI && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e1eeac13b851..7aa98119c4ab 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
+obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
obj-$(CONFIG_SENSORS_DS620) += ds620.o
obj-$(CONFIG_SENSORS_DS1621) += ds1621.o
@@ -58,6 +59,7 @@ obj-$(CONFIG_SENSORS_G760A) += g760a.o
obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
+obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index a72bf25601a4..d4419b47f3d4 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1513,10 +1513,10 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM
-static int abituguru_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int abituguru_suspend(struct device *dev)
{
- struct abituguru_data *data = platform_get_drvdata(pdev);
+ struct abituguru_data *data = dev_get_drvdata(dev);
/*
* make sure all communications with the uguru are done and no new
* ones are started
@@ -1525,29 +1525,30 @@ static int abituguru_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int abituguru_resume(struct platform_device *pdev)
+static int abituguru_resume(struct device *dev)
{
- struct abituguru_data *data = platform_get_drvdata(pdev);
+ struct abituguru_data *data = dev_get_drvdata(dev);
/* See if the uGuru is still ready */
if (inb_p(data->addr + ABIT_UGURU_DATA) != ABIT_UGURU_STATUS_INPUT)
data->uguru_ready = 0;
mutex_unlock(&data->update_lock);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(abituguru_pm, abituguru_suspend, abituguru_resume);
+#define ABIT_UGURU_PM &abituguru_pm
#else
-#define abituguru_suspend NULL
-#define abituguru_resume NULL
+#define ABIT_UGURU_PM NULL
#endif /* CONFIG_PM */
static struct platform_driver abituguru_driver = {
.driver = {
.owner = THIS_MODULE,
.name = ABIT_UGURU_NAME,
+ .pm = ABIT_UGURU_PM,
},
.probe = abituguru_probe,
.remove = __devexit_p(abituguru_remove),
- .suspend = abituguru_suspend,
- .resume = abituguru_resume,
};
static int __init abituguru_detect(void)
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index a5bc4287daa6..5d582aebff87 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -1141,10 +1141,10 @@ LEAVE_UPDATE:
return NULL;
}
-#ifdef CONFIG_PM
-static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int abituguru3_suspend(struct device *dev)
{
- struct abituguru3_data *data = platform_get_drvdata(pdev);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
/*
* make sure all communications with the uguru3 are done and no new
* ones are started
@@ -1153,26 +1153,27 @@ static int abituguru3_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int abituguru3_resume(struct platform_device *pdev)
+static int abituguru3_resume(struct device *dev)
{
- struct abituguru3_data *data = platform_get_drvdata(pdev);
+ struct abituguru3_data *data = dev_get_drvdata(dev);
mutex_unlock(&data->update_lock);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(abituguru3_pm, abituguru3_suspend, abituguru3_resume);
+#define ABIT_UGURU3_PM &abituguru3_pm
#else
-#define abituguru3_suspend NULL
-#define abituguru3_resume NULL
+#define ABIT_UGURU3_PM NULL
#endif /* CONFIG_PM */
static struct platform_driver abituguru3_driver = {
.driver = {
.owner = THIS_MODULE,
.name = ABIT_UGURU3_NAME,
+ .pm = ABIT_UGURU3_PM
},
.probe = abituguru3_probe,
.remove = __devexit_p(abituguru3_remove),
- .suspend = abituguru3_suspend,
- .resume = abituguru3_resume
};
static int __init abituguru3_dmi_detect(void)
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 34ad5a27a7e9..563c02904ddf 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -101,7 +101,7 @@ struct acpi_power_meter_resource {
unsigned long sensors_last_updated;
struct sensor_device_attribute sensors[NUM_SENSORS];
int num_sensors;
- int trip[2];
+ s64 trip[2];
int num_domain_devices;
struct acpi_device **domain_devices;
struct kobject *holders_dir;
@@ -237,7 +237,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
if (res)
return res;
- temp /= 1000;
+ temp = DIV_ROUND_CLOSEST(temp, 1000);
if (temp > resource->caps.max_cap || temp < resource->caps.min_cap)
return -EINVAL;
arg0.integer.value = temp;
@@ -307,9 +307,7 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
if (res)
return res;
- temp /= 1000;
- if (temp < 0)
- return -EINVAL;
+ temp = DIV_ROUND_CLOSEST(temp, 1000);
mutex_lock(&resource->lock);
resource->trip[attr->index - 7] = temp;
@@ -929,20 +927,25 @@ static int acpi_power_meter_remove(struct acpi_device *device, int type)
return 0;
}
-static int acpi_power_meter_resume(struct acpi_device *device)
+static int acpi_power_meter_resume(struct device *dev)
{
struct acpi_power_meter_resource *resource;
- if (!device || !acpi_driver_data(device))
+ if (!dev)
+ return -EINVAL;
+
+ resource = acpi_driver_data(to_acpi_device(dev));
+ if (!resource)
return -EINVAL;
- resource = acpi_driver_data(device);
free_capabilities(resource);
read_capabilities(resource);
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume);
+
static struct acpi_driver acpi_power_meter_driver = {
.name = "power_meter",
.class = ACPI_POWER_METER_CLASS,
@@ -950,9 +953,9 @@ static struct acpi_driver acpi_power_meter_driver = {
.ops = {
.add = acpi_power_meter_add,
.remove = acpi_power_meter_remove,
- .resume = acpi_power_meter_resume,
.notify = acpi_power_meter_notify,
},
+ .drv.pm = &acpi_power_meter_pm,
};
/* Module init/exit routines */
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 4394e7e99c46..fd1d1b15854e 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -366,11 +366,11 @@ static int adm1021_probe(struct i2c_client *client,
struct adm1021_data *data;
int err;
- data = kzalloc(sizeof(struct adm1021_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1021_data),
+ GFP_KERNEL);
if (!data) {
- pr_debug("adm1021: detect failed, kzalloc failed!\n");
- err = -ENOMEM;
- goto error0;
+ pr_debug("adm1021: detect failed, devm_kzalloc failed!\n");
+ return -ENOMEM;
}
i2c_set_clientdata(client, data);
@@ -384,21 +384,18 @@ static int adm1021_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1021_group);
if (err)
- goto error1;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto error3;
+ goto error;
}
return 0;
-error3:
+error:
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
-error1:
- kfree(data);
-error0:
return err;
}
@@ -418,7 +415,6 @@ static int adm1021_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm1021_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index b8557f9857d2..7e16e5d07bc6 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -477,11 +477,10 @@ static int adm1025_probe(struct i2c_client *client,
int err;
u8 config;
- data = kzalloc(sizeof(struct adm1025_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1025_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -492,7 +491,7 @@ static int adm1025_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1025_group);
if (err)
- goto exit_free;
+ return err;
/* Pin 11 is either in4 (+12V) or VID4 */
config = i2c_smbus_read_byte_data(client, ADM1025_REG_CONFIG);
@@ -513,9 +512,6 @@ static int adm1025_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adm1025_group);
sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -569,7 +565,6 @@ static int adm1025_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &adm1025_group);
sysfs_remove_group(&client->dev.kobj, &adm1025_group_in4);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 1003219b9f90..0f068e7297ee 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -1834,11 +1834,10 @@ static int adm1026_probe(struct i2c_client *client,
struct adm1026_data *data;
int err;
- data = kzalloc(sizeof(struct adm1026_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1026_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -1852,7 +1851,7 @@ static int adm1026_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1026_group);
if (err)
- goto exitfree;
+ return err;
if (data->config1 & CFG1_AIN8_9)
err = sysfs_create_group(&client->dev.kobj,
&adm1026_group_in8_9);
@@ -1877,9 +1876,6 @@ exitremove:
sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
else
sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
-exitfree:
- kfree(data);
-exit:
return err;
}
@@ -1892,7 +1888,6 @@ static int adm1026_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &adm1026_group_in8_9);
else
sysfs_remove_group(&client->dev.kobj, &adm1026_group_temp3);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 44e1fd7f3d81..c6a4631e833f 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -954,11 +954,10 @@ static int adm1031_probe(struct i2c_client *client,
struct adm1031_data *data;
int err;
- data = kzalloc(sizeof(struct adm1031_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct adm1031_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->chip_type = id->driver_data;
@@ -975,7 +974,7 @@ static int adm1031_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &adm1031_group);
if (err)
- goto exit_free;
+ return err;
if (data->chip_type == adm1031) {
err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt);
@@ -994,9 +993,6 @@ static int adm1031_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &adm1031_group);
sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -1007,7 +1003,6 @@ static int adm1031_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm1031_group);
sysfs_remove_group(&client->dev.kobj, &adm1031_group_opt);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index c3c2865a8967..5a78d102a0fa 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -650,11 +650,9 @@ static int adm9240_probe(struct i2c_client *new_client,
struct adm9240_data *data;
int err;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
mutex_init(&data->update_lock);
@@ -664,7 +662,7 @@ static int adm9240_probe(struct i2c_client *new_client,
/* populate sysfs filesystem */
err = sysfs_create_group(&new_client->dev.kobj, &adm9240_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -676,9 +674,6 @@ static int adm9240_probe(struct i2c_client *new_client,
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &adm9240_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -689,7 +684,6 @@ static int adm9240_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &adm9240_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index df29d13a5349..861c756e9536 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -1260,7 +1260,7 @@ static int adt7475_probe(struct i2c_client *client,
int i, ret = 0, revision;
u8 config2, config3;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -1344,7 +1344,7 @@ static int adt7475_probe(struct i2c_client *client,
ret = sysfs_create_group(&client->dev.kobj, &adt7475_attr_group);
if (ret)
- goto efree;
+ return ret;
/* Features that can be disabled individually */
if (data->has_fan4) {
@@ -1410,8 +1410,6 @@ static int adt7475_probe(struct i2c_client *client,
eremove:
adt7475_remove_files(client, data);
-efree:
- kfree(data);
return ret;
}
@@ -1421,7 +1419,6 @@ static int adt7475_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
adt7475_remove_files(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 2cde9ecf7731..4d937a18fadb 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -54,7 +54,7 @@
#define APPLESMC_MAX_DATA_LENGTH 32
/* wait up to 32 ms for a status change. */
-#define APPLESMC_MIN_WAIT 0x0040
+#define APPLESMC_MIN_WAIT 0x0010
#define APPLESMC_MAX_WAIT 0x8000
#define APPLESMC_STATUS_MASK 0x0f
@@ -80,6 +80,8 @@
#define FANS_MANUAL "FS! " /* r-w ui16 */
#define FAN_ID_FMT "F%dID" /* r-o char[16] */
+#define TEMP_SENSOR_TYPE "sp78"
+
/* List of keys used to read/write fan speeds */
static const char *const fan_speed_fmt[] = {
"F%dAc", /* actual speed */
@@ -96,10 +98,6 @@ static const char *const fan_speed_fmt[] = {
#define APPLESMC_INPUT_FUZZ 4 /* input event threshold */
#define APPLESMC_INPUT_FLAT 4
-#define SENSOR_X 0
-#define SENSOR_Y 1
-#define SENSOR_Z 2
-
#define to_index(attr) (to_sensor_dev_attr(attr)->index & 0xffff)
#define to_option(attr) (to_sensor_dev_attr(attr)->index >> 16)
@@ -135,11 +133,13 @@ static struct applesmc_registers {
unsigned int temp_count; /* number of temperature registers */
unsigned int temp_begin; /* temperature lower index bound */
unsigned int temp_end; /* temperature upper index bound */
+ unsigned int index_count; /* size of temperature index array */
int num_light_sensors; /* number of light sensors */
bool has_accelerometer; /* has motion sensor */
bool has_key_backlight; /* has keyboard backlight */
bool init_complete; /* true when fully initialized */
struct applesmc_entry *cache; /* cached key entries */
+ const char **index; /* temperature key index */
} smcreg = {
.mutex = __MUTEX_INITIALIZER(smcreg.mutex),
};
@@ -432,30 +432,19 @@ static int applesmc_has_key(const char *key, bool *value)
}
/*
- * applesmc_read_motion_sensor - Read motion sensor (X, Y or Z).
+ * applesmc_read_s16 - Read 16-bit signed big endian register
*/
-static int applesmc_read_motion_sensor(int index, s16 *value)
+static int applesmc_read_s16(const char *key, s16 *value)
{
u8 buffer[2];
int ret;
- switch (index) {
- case SENSOR_X:
- ret = applesmc_read_key(MOTION_SENSOR_X_KEY, buffer, 2);
- break;
- case SENSOR_Y:
- ret = applesmc_read_key(MOTION_SENSOR_Y_KEY, buffer, 2);
- break;
- case SENSOR_Z:
- ret = applesmc_read_key(MOTION_SENSOR_Z_KEY, buffer, 2);
- break;
- default:
- ret = -EINVAL;
- }
+ ret = applesmc_read_key(key, buffer, 2);
+ if (ret)
+ return ret;
*value = ((s16)buffer[0] << 8) | buffer[1];
-
- return ret;
+ return 0;
}
/*
@@ -482,6 +471,30 @@ static void applesmc_device_init(void)
pr_warn("failed to init the device\n");
}
+static int applesmc_init_index(struct applesmc_registers *s)
+{
+ const struct applesmc_entry *entry;
+ unsigned int i;
+
+ if (s->index)
+ return 0;
+
+ s->index = kcalloc(s->temp_count, sizeof(s->index[0]), GFP_KERNEL);
+ if (!s->index)
+ return -ENOMEM;
+
+ for (i = s->temp_begin; i < s->temp_end; i++) {
+ entry = applesmc_get_entry_by_index(i);
+ if (IS_ERR(entry))
+ continue;
+ if (strcmp(entry->type, TEMP_SENSOR_TYPE))
+ continue;
+ s->index[s->index_count++] = entry->key;
+ }
+
+ return 0;
+}
+
/*
* applesmc_init_smcreg_try - Try to initialize register cache. Idempotent.
*/
@@ -517,6 +530,10 @@ static int applesmc_init_smcreg_try(void)
return ret;
s->temp_count = s->temp_end - s->temp_begin;
+ ret = applesmc_init_index(s);
+ if (ret)
+ return ret;
+
ret = applesmc_has_key(LIGHT_SENSOR_LEFT_KEY, &left_light_sensor);
if (ret)
return ret;
@@ -533,8 +550,8 @@ static int applesmc_init_smcreg_try(void)
s->num_light_sensors = left_light_sensor + right_light_sensor;
s->init_complete = true;
- pr_info("key=%d fan=%d temp=%d acc=%d lux=%d kbd=%d\n",
- s->key_count, s->fan_count, s->temp_count,
+ pr_info("key=%d fan=%d temp=%d index=%d acc=%d lux=%d kbd=%d\n",
+ s->key_count, s->fan_count, s->temp_count, s->index_count,
s->has_accelerometer,
s->num_light_sensors,
s->has_key_backlight);
@@ -542,6 +559,15 @@ static int applesmc_init_smcreg_try(void)
return 0;
}
+static void applesmc_destroy_smcreg(void)
+{
+ kfree(smcreg.index);
+ smcreg.index = NULL;
+ kfree(smcreg.cache);
+ smcreg.cache = NULL;
+ smcreg.init_complete = false;
+}
+
/*
* applesmc_init_smcreg - Initialize register cache.
*
@@ -562,19 +588,11 @@ static int applesmc_init_smcreg(void)
msleep(INIT_WAIT_MSECS);
}
- kfree(smcreg.cache);
- smcreg.cache = NULL;
+ applesmc_destroy_smcreg();
return ret;
}
-static void applesmc_destroy_smcreg(void)
-{
- kfree(smcreg.cache);
- smcreg.cache = NULL;
- smcreg.init_complete = false;
-}
-
/* Device model stuff */
static int applesmc_probe(struct platform_device *dev)
{
@@ -624,8 +642,8 @@ static struct platform_driver applesmc_driver = {
*/
static void applesmc_calibrate(void)
{
- applesmc_read_motion_sensor(SENSOR_X, &rest_x);
- applesmc_read_motion_sensor(SENSOR_Y, &rest_y);
+ applesmc_read_s16(MOTION_SENSOR_X_KEY, &rest_x);
+ applesmc_read_s16(MOTION_SENSOR_Y_KEY, &rest_y);
rest_x = -rest_x;
}
@@ -634,9 +652,9 @@ static void applesmc_idev_poll(struct input_polled_dev *dev)
struct input_dev *idev = dev->input;
s16 x, y;
- if (applesmc_read_motion_sensor(SENSOR_X, &x))
+ if (applesmc_read_s16(MOTION_SENSOR_X_KEY, &x))
return;
- if (applesmc_read_motion_sensor(SENSOR_Y, &y))
+ if (applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y))
return;
x = -x;
@@ -659,13 +677,13 @@ static ssize_t applesmc_position_show(struct device *dev,
int ret;
s16 x, y, z;
- ret = applesmc_read_motion_sensor(SENSOR_X, &x);
+ ret = applesmc_read_s16(MOTION_SENSOR_X_KEY, &x);
if (ret)
goto out;
- ret = applesmc_read_motion_sensor(SENSOR_Y, &y);
+ ret = applesmc_read_s16(MOTION_SENSOR_Y_KEY, &y);
if (ret)
goto out;
- ret = applesmc_read_motion_sensor(SENSOR_Z, &z);
+ ret = applesmc_read_s16(MOTION_SENSOR_Z_KEY, &z);
if (ret)
goto out;
@@ -718,44 +736,27 @@ out:
static ssize_t applesmc_show_sensor_label(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
- int index = smcreg.temp_begin + to_index(devattr);
- const struct applesmc_entry *entry;
+ const char *key = smcreg.index[to_index(devattr)];
- entry = applesmc_get_entry_by_index(index);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
-
- return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", entry->key);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%s\n", key);
}
/* Displays degree Celsius * 1000 */
static ssize_t applesmc_show_temperature(struct device *dev,
struct device_attribute *devattr, char *sysfsbuf)
{
- int index = smcreg.temp_begin + to_index(devattr);
- const struct applesmc_entry *entry;
+ const char *key = smcreg.index[to_index(devattr)];
int ret;
- u8 buffer[2];
- unsigned int temp;
-
- entry = applesmc_get_entry_by_index(index);
- if (IS_ERR(entry))
- return PTR_ERR(entry);
- if (entry->len > 2)
- return -EINVAL;
+ s16 value;
+ int temp;
- ret = applesmc_read_entry(entry, buffer, entry->len);
+ ret = applesmc_read_s16(key, &value);
if (ret)
return ret;
- if (entry->len == 2) {
- temp = buffer[0] * 1000;
- temp += (buffer[1] >> 6) * 250;
- } else {
- temp = buffer[0] * 4000;
- }
+ temp = 250 * (value >> 6);
- return snprintf(sysfsbuf, PAGE_SIZE, "%u\n", temp);
+ return snprintf(sysfsbuf, PAGE_SIZE, "%d\n", temp);
}
static ssize_t applesmc_show_fan_speed(struct device *dev,
@@ -1265,7 +1266,7 @@ static int __init applesmc_init(void)
if (ret)
goto out_info;
- ret = applesmc_create_nodes(temp_group, smcreg.temp_count);
+ ret = applesmc_create_nodes(temp_group, smcreg.index_count);
if (ret)
goto out_fans;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 7caa242915a6..b867aab78049 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -1109,7 +1109,8 @@ asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- data = kzalloc(sizeof(struct asc7621_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct asc7621_data),
+ GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -1143,7 +1144,6 @@ exit_remove:
&(asc7621_params[i].sda.dev_attr));
}
- kfree(data);
return err;
}
@@ -1192,7 +1192,6 @@ static int asc7621_remove(struct i2c_client *client)
&(asc7621_params[i].sda.dev_attr));
}
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 58af6aa93530..aecb9ea7beb5 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -345,11 +345,10 @@ static int atxp1_probe(struct i2c_client *new_client,
struct atxp1_data *data;
int err;
- data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct atxp1_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
/* Get VRM */
data->vrm = vid_which_vrm();
@@ -362,7 +361,7 @@ static int atxp1_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -377,9 +376,6 @@ static int atxp1_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &atxp1_group);
-exit_free:
- kfree(data);
-exit:
return err;
};
@@ -390,8 +386,6 @@ static int atxp1_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &atxp1_group);
- kfree(data);
-
return 0;
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 7f1feb2f467a..637c51c11b44 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -693,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu)
* sensors. We check this bit only, all the early CPUs
* without thermal sensors will be filtered out.
*/
- if (!cpu_has(c, X86_FEATURE_DTS))
+ if (!cpu_has(c, X86_FEATURE_DTHERM))
return;
if (!pdev) {
@@ -794,7 +794,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = {
};
static const struct x86_cpu_id coretemp_ids[] = {
- { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS },
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM },
{}
};
MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
new file mode 100644
index 000000000000..fc65f2d3ec91
--- /dev/null
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -0,0 +1,344 @@
+/*
+ * HWMON Driver for Dialog DA9052
+ *
+ * Copyright(c) 2012 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+struct da9052_hwmon {
+ struct da9052 *da9052;
+ struct device *class_device;
+ struct mutex hwmon_lock;
+};
+
+static const char * const input_names[] = {
+ [DA9052_ADC_VDDOUT] = "VDDOUT",
+ [DA9052_ADC_ICH] = "CHARGING CURRENT",
+ [DA9052_ADC_TBAT] = "BATTERY TEMP",
+ [DA9052_ADC_VBAT] = "BATTERY VOLTAGE",
+ [DA9052_ADC_IN4] = "ADC IN4",
+ [DA9052_ADC_IN5] = "ADC IN5",
+ [DA9052_ADC_IN6] = "ADC IN6",
+ [DA9052_ADC_TJUNC] = "BATTERY JUNCTION TEMP",
+ [DA9052_ADC_VBBAT] = "BACK-UP BATTERY VOLTAGE",
+};
+
+/* Conversion function for VDDOUT and VBAT */
+static inline int volt_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 1000, 512) + 2500;
+}
+
+/* Conversion function for ADC channels 4, 5 and 6 */
+static inline int input_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 2500, 1023);
+}
+
+/* Conversion function for VBBAT */
+static inline int vbbat_reg_to_mV(int value)
+{
+ return DIV_ROUND_CLOSEST(value * 2500, 512);
+}
+
+static int da9052_enable_vddout_channel(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_CONT_REG);
+ if (ret < 0)
+ return ret;
+
+ ret |= DA9052_ADCCONT_AUTOVDDEN;
+
+ return da9052_reg_write(da9052, DA9052_ADC_CONT_REG, ret);
+}
+
+static int da9052_disable_vddout_channel(struct da9052 *da9052)
+{
+ int ret;
+
+ ret = da9052_reg_read(da9052, DA9052_ADC_CONT_REG);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~DA9052_ADCCONT_AUTOVDDEN;
+
+ return da9052_reg_write(da9052, DA9052_ADC_CONT_REG, ret);
+}
+
+static ssize_t da9052_read_vddout(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret, vdd;
+
+ mutex_lock(&hwmon->hwmon_lock);
+
+ ret = da9052_enable_vddout_channel(hwmon->da9052);
+ if (ret < 0)
+ goto hwmon_err;
+
+ vdd = da9052_reg_read(hwmon->da9052, DA9052_VDD_RES_REG);
+ if (vdd < 0) {
+ ret = vdd;
+ goto hwmon_err_release;
+ }
+
+ ret = da9052_disable_vddout_channel(hwmon->da9052);
+ if (ret < 0)
+ goto hwmon_err;
+
+ mutex_unlock(&hwmon->hwmon_lock);
+ return sprintf(buf, "%d\n", volt_reg_to_mV(vdd));
+
+hwmon_err_release:
+ da9052_disable_vddout_channel(hwmon->da9052);
+hwmon_err:
+ mutex_unlock(&hwmon->hwmon_lock);
+ return ret;
+}
+
+static ssize_t da9052_read_ich(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_reg_read(hwmon->da9052, DA9052_ICHG_AV_REG);
+ if (ret < 0)
+ return ret;
+
+ /* Equivalent to 3.9mA/bit in register ICHG_AV */
+ return sprintf(buf, "%d\n", DIV_ROUND_CLOSEST(ret * 39, 10));
+}
+
+static ssize_t da9052_read_tbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", da9052_adc_read_temp(hwmon->da9052));
+}
+
+static ssize_t da9052_read_vbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, DA9052_ADC_VBAT);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", volt_reg_to_mV(ret));
+}
+
+static ssize_t da9052_read_misc_channel(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int channel = to_sensor_dev_attr(devattr)->index;
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, channel);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", input_reg_to_mV(ret));
+}
+
+static ssize_t da9052_read_tjunc(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int tjunc;
+ int toffset;
+
+ tjunc = da9052_reg_read(hwmon->da9052, DA9052_TJUNC_RES_REG);
+ if (tjunc < 0)
+ return tjunc;
+
+ toffset = da9052_reg_read(hwmon->da9052, DA9052_T_OFFSET_REG);
+ if (toffset < 0)
+ return toffset;
+
+ /*
+ * Degrees celsius = 1.708 * (TJUNC_RES - T_OFFSET) - 108.8
+ * T_OFFSET is a trim value used to improve accuracy of the result
+ */
+ return sprintf(buf, "%d\n", 1708 * (tjunc - toffset) - 108800);
+}
+
+static ssize_t da9052_read_vbbat(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct da9052_hwmon *hwmon = dev_get_drvdata(dev);
+ int ret;
+
+ ret = da9052_adc_manual_read(hwmon->da9052, DA9052_ADC_VBBAT);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d\n", vbbat_reg_to_mV(ret));
+}
+
+static ssize_t da9052_hwmon_show_name(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ return sprintf(buf, "da9052-hwmon\n");
+}
+
+static ssize_t show_label(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ return sprintf(buf, "%s\n",
+ input_names[to_sensor_dev_attr(devattr)->index]);
+}
+
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, da9052_read_vddout, NULL,
+ DA9052_ADC_VDDOUT);
+static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VDDOUT);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, da9052_read_vbat, NULL,
+ DA9052_ADC_VBAT);
+static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VBAT);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN4);
+static SENSOR_DEVICE_ATTR(in4_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN4);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN5);
+static SENSOR_DEVICE_ATTR(in5_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN5);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, da9052_read_misc_channel, NULL,
+ DA9052_ADC_IN6);
+static SENSOR_DEVICE_ATTR(in6_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_IN6);
+static SENSOR_DEVICE_ATTR(in9_input, S_IRUGO, da9052_read_vbbat, NULL,
+ DA9052_ADC_VBBAT);
+static SENSOR_DEVICE_ATTR(in9_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_VBBAT);
+
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, da9052_read_ich, NULL,
+ DA9052_ADC_ICH);
+static SENSOR_DEVICE_ATTR(curr1_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_ICH);
+
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, da9052_read_tbat, NULL,
+ DA9052_ADC_TBAT);
+static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_TBAT);
+static SENSOR_DEVICE_ATTR(temp8_input, S_IRUGO, da9052_read_tjunc, NULL,
+ DA9052_ADC_TJUNC);
+static SENSOR_DEVICE_ATTR(temp8_label, S_IRUGO, show_label, NULL,
+ DA9052_ADC_TJUNC);
+
+static DEVICE_ATTR(name, S_IRUGO, da9052_hwmon_show_name, NULL);
+
+static struct attribute *da9052_attr[] = {
+ &dev_attr_name.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in0_label.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in3_label.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in4_label.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in5_label.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in6_label.dev_attr.attr,
+ &sensor_dev_attr_in9_input.dev_attr.attr,
+ &sensor_dev_attr_in9_label.dev_attr.attr,
+ &sensor_dev_attr_curr1_input.dev_attr.attr,
+ &sensor_dev_attr_curr1_label.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_label.dev_attr.attr,
+ &sensor_dev_attr_temp8_input.dev_attr.attr,
+ &sensor_dev_attr_temp8_label.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group da9052_attr_group = {.attrs = da9052_attr};
+
+static int __devinit da9052_hwmon_probe(struct platform_device *pdev)
+{
+ struct da9052_hwmon *hwmon;
+ int ret;
+
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(struct da9052_hwmon),
+ GFP_KERNEL);
+ if (!hwmon)
+ return -ENOMEM;
+
+ mutex_init(&hwmon->hwmon_lock);
+ hwmon->da9052 = dev_get_drvdata(pdev->dev.parent);
+
+ platform_set_drvdata(pdev, hwmon);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &da9052_attr_group);
+ if (ret)
+ goto err_mem;
+
+ hwmon->class_device = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(hwmon->class_device)) {
+ ret = PTR_ERR(hwmon->class_device);
+ goto err_sysfs;
+ }
+
+ return 0;
+
+err_sysfs:
+ sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
+err_mem:
+ return ret;
+}
+
+static int __devexit da9052_hwmon_remove(struct platform_device *pdev)
+{
+ struct da9052_hwmon *hwmon = platform_get_drvdata(pdev);
+
+ hwmon_device_unregister(hwmon->class_device);
+ sysfs_remove_group(&pdev->dev.kobj, &da9052_attr_group);
+
+ return 0;
+}
+
+static struct platform_driver da9052_hwmon_driver = {
+ .probe = da9052_hwmon_probe,
+ .remove = __devexit_p(da9052_hwmon_remove),
+ .driver = {
+ .name = "da9052-hwmon",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(da9052_hwmon_driver);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 HWMON driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-hwmon");
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index f647a3307ebc..1c568736baff 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -249,11 +249,10 @@ static int ds1621_probe(struct i2c_client *client,
struct ds1621_data *data;
int err;
- data = kzalloc(sizeof(struct ds1621_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct ds1621_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -264,7 +263,7 @@ static int ds1621_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &ds1621_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -276,9 +275,6 @@ static int ds1621_probe(struct i2c_client *client,
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- exit_free:
- kfree(data);
- exit:
return err;
}
@@ -289,8 +285,6 @@ static int ds1621_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &ds1621_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index e7d234b59312..7bb8e888692c 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -732,6 +732,6 @@ static struct i2c_driver emc2103_driver = {
module_i2c_driver(emc2103_driver);
-MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 840f5112e602..ada12a98a97c 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -492,11 +492,10 @@ static int emc6w201_probe(struct i2c_client *client,
struct emc6w201_data *data;
int err;
- data = kzalloc(sizeof(struct emc6w201_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct emc6w201_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -504,7 +503,7 @@ static int emc6w201_probe(struct i2c_client *client,
/* Create sysfs attribute */
err = sysfs_create_group(&client->dev.kobj, &emc6w201_group);
if (err)
- goto exit_free;
+ return err;
/* Expose as a hwmon device */
data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -517,9 +516,6 @@ static int emc6w201_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
- exit_free:
- kfree(data);
- exit:
return err;
}
@@ -529,7 +525,6 @@ static int emc6w201_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &emc6w201_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/exynos4_tmu.c b/drivers/hwmon/exynos4_tmu.c
index f2359a0093bd..e912059140cd 100644
--- a/drivers/hwmon/exynos4_tmu.c
+++ b/drivers/hwmon/exynos4_tmu.c
@@ -475,35 +475,39 @@ static int __devexit exynos4_tmu_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int exynos4_tmu_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int exynos4_tmu_suspend(struct device *dev)
{
- exynos4_tmu_control(pdev, false);
+ exynos4_tmu_control(to_platform_device(dev), false);
return 0;
}
-static int exynos4_tmu_resume(struct platform_device *pdev)
+static int exynos4_tmu_resume(struct device *dev)
{
+ struct platform_device *pdev = to_platform_device(dev);
+
exynos4_tmu_initialize(pdev);
exynos4_tmu_control(pdev, true);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(exynos4_tmu_pm,
+ exynos4_tmu_suspend, exynos4_tmu_resume);
+#define EXYNOS4_TMU_PM &exynos4_tmu_pm
#else
-#define exynos4_tmu_suspend NULL
-#define exynos4_tmu_resume NULL
+#define EXYNOS4_TMU_PM NULL
#endif
static struct platform_driver exynos4_tmu_driver = {
.driver = {
.name = "exynos4-tmu",
.owner = THIS_MODULE,
+ .pm = EXYNOS4_TMU_PM,
},
.probe = exynos4_tmu_probe,
.remove = __devexit_p(exynos4_tmu_remove),
- .suspend = exynos4_tmu_suspend,
- .resume = exynos4_tmu_resume,
};
module_platform_driver(exynos4_tmu_driver);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 3e4da620e9c7..4dd7723d257f 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1386,20 +1386,20 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
"f71872f",
};
- data = kzalloc(sizeof(struct f71805f_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct f71805f_data),
+ GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
pr_err("Out of memory\n");
- goto exit;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + ADDR_REG_OFFSET, 2, DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(&pdev->dev, res->start + ADDR_REG_OFFSET, 2,
+ DRVNAME)) {
dev_err(&pdev->dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)(res->start + ADDR_REG_OFFSET),
(unsigned long)(res->start + ADDR_REG_OFFSET + 1));
- goto exit_free;
+ return -EBUSY;
}
data->addr = res->start;
data->name = names[sio_data->kind];
@@ -1427,7 +1427,7 @@ static int __devinit f71805f_probe(struct platform_device *pdev)
/* Register sysfs interface files */
err = sysfs_create_group(&pdev->dev.kobj, &f71805f_group);
if (err)
- goto exit_release_region;
+ return err;
if (data->has_in & (1 << 4)) { /* in4 */
err = sysfs_create_group(&pdev->dev.kobj,
&f71805f_group_optin[0]);
@@ -1487,19 +1487,12 @@ exit_remove_files:
for (i = 0; i < 4; i++)
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
-exit_release_region:
- release_region(res->start + ADDR_REG_OFFSET, 2);
-exit_free:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
static int __devexit f71805f_remove(struct platform_device *pdev)
{
struct f71805f_data *data = platform_get_drvdata(pdev);
- struct resource *res;
int i;
hwmon_device_unregister(data->hwmon_dev);
@@ -1507,11 +1500,6 @@ static int __devexit f71805f_remove(struct platform_device *pdev)
for (i = 0; i < 4; i++)
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_optin[i]);
sysfs_remove_group(&pdev->dev.kobj, &f71805f_group_pwm_freq);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start + ADDR_REG_OFFSET, 2);
return 0;
}
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 6b13f1a4dc27..2764b78a784b 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -67,7 +67,8 @@ static ssize_t show_power(struct device *dev,
REG_TDP_LIMIT3, &val);
tdp_limit = val >> 16;
- curr_pwr_watts = (tdp_limit + data->base_tdp) << running_avg_range;
+ curr_pwr_watts = ((u64)(tdp_limit +
+ data->base_tdp)) << running_avg_range;
curr_pwr_watts -= running_avg_capture;
curr_pwr_watts *= data->tdp_to_watts;
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 764a083ac7a7..2c74673f48e5 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -544,11 +544,10 @@ static int gl518_probe(struct i2c_client *client,
struct gl518_data *data;
int err, revision;
- data = kzalloc(sizeof(struct gl518_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct gl518_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
revision = gl518_read_value(client, GL518_REG_REVISION);
@@ -562,7 +561,7 @@ static int gl518_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &gl518_group);
if (err)
- goto exit_free;
+ return err;
if (data->type == gl518sm_r80) {
err = sysfs_create_group(&client->dev.kobj, &gl518_group_r80);
if (err)
@@ -581,9 +580,6 @@ exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &gl518_group);
if (data->type == gl518sm_r80)
sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -617,7 +613,6 @@ static int gl518_remove(struct i2c_client *client)
if (data->type == gl518sm_r80)
sysfs_remove_group(&client->dev.kobj, &gl518_group_r80);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 5ff452b6a4d0..a21ff252f2f1 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -779,11 +779,10 @@ static int gl520_probe(struct i2c_client *client,
struct gl520_data *data;
int err;
- data = kzalloc(sizeof(struct gl520_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct gl520_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -794,7 +793,7 @@ static int gl520_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &gl520_group);
if (err)
- goto exit_free;
+ return err;
if (data->two_temps)
err = sysfs_create_group(&client->dev.kobj, &gl520_group_temp2);
@@ -816,9 +815,6 @@ exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &gl520_group);
sysfs_remove_group(&client->dev.kobj, &gl520_group_in4);
sysfs_remove_group(&client->dev.kobj, &gl520_group_temp2);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -870,7 +866,6 @@ static int gl520_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &gl520_group_in4);
sysfs_remove_group(&client->dev.kobj, &gl520_group_temp2);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 2ce8c44a0e07..2f4b01bda87c 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -41,7 +41,7 @@ struct gpio_fan_data {
int num_speed;
struct gpio_fan_speed *speed;
int speed_index;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
int resume_speed;
#endif
bool pwm_enable;
@@ -95,17 +95,17 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
fan_data->alarm = alarm;
- err = gpio_request(alarm->gpio, "GPIO fan alarm");
+ err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm");
if (err)
return err;
err = gpio_direction_input(alarm->gpio);
if (err)
- goto err_free_gpio;
+ return err;
err = device_create_file(&pdev->dev, &dev_attr_fan1_alarm);
if (err)
- goto err_free_gpio;
+ return err;
/*
* If the alarm GPIO don't support interrupts, just leave
@@ -117,8 +117,8 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
INIT_WORK(&fan_data->alarm_work, fan_alarm_notify);
irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH);
- err = request_irq(alarm_irq, fan_alarm_irq_handler, IRQF_SHARED,
- "GPIO fan alarm", fan_data);
+ err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler,
+ IRQF_SHARED, "GPIO fan alarm", fan_data);
if (err)
goto err_free_sysfs;
@@ -126,21 +126,14 @@ static int fan_alarm_init(struct gpio_fan_data *fan_data,
err_free_sysfs:
device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
-err_free_gpio:
- gpio_free(alarm->gpio);
-
return err;
}
static void fan_alarm_free(struct gpio_fan_data *fan_data)
{
struct platform_device *pdev = fan_data->pdev;
- int alarm_irq = gpio_to_irq(fan_data->alarm->gpio);
- if (alarm_irq >= 0)
- free_irq(alarm_irq, fan_data);
device_remove_file(&pdev->dev, &dev_attr_fan1_alarm);
- gpio_free(fan_data->alarm->gpio);
}
/*
@@ -365,15 +358,14 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
int i, err;
for (i = 0; i < num_ctrl; i++) {
- err = gpio_request(ctrl[i], "GPIO fan control");
+ err = devm_gpio_request(&pdev->dev, ctrl[i],
+ "GPIO fan control");
if (err)
- goto err_free_gpio;
+ return err;
err = gpio_direction_output(ctrl[i], gpio_get_value(ctrl[i]));
- if (err) {
- gpio_free(ctrl[i]);
- goto err_free_gpio;
- }
+ if (err)
+ return err;
}
fan_data->num_ctrl = num_ctrl;
@@ -382,32 +374,18 @@ static int fan_ctrl_init(struct gpio_fan_data *fan_data,
fan_data->speed = pdata->speed;
fan_data->pwm_enable = true; /* Enable manual fan speed control. */
fan_data->speed_index = get_fan_speed_index(fan_data);
- if (fan_data->speed_index < 0) {
- err = -ENODEV;
- goto err_free_gpio;
- }
+ if (fan_data->speed_index < 0)
+ return -ENODEV;
err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
- if (err)
- goto err_free_gpio;
-
- return 0;
-
-err_free_gpio:
- for (i = i - 1; i >= 0; i--)
- gpio_free(ctrl[i]);
-
return err;
}
static void fan_ctrl_free(struct gpio_fan_data *fan_data)
{
struct platform_device *pdev = fan_data->pdev;
- int i;
sysfs_remove_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
- for (i = 0; i < fan_data->num_ctrl; i++)
- gpio_free(fan_data->ctrl[i]);
}
/*
@@ -431,7 +409,8 @@ static int __devinit gpio_fan_probe(struct platform_device *pdev)
if (!pdata)
return -EINVAL;
- fan_data = kzalloc(sizeof(struct gpio_fan_data), GFP_KERNEL);
+ fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data),
+ GFP_KERNEL);
if (!fan_data)
return -ENOMEM;
@@ -443,7 +422,7 @@ static int __devinit gpio_fan_probe(struct platform_device *pdev)
if (pdata->alarm) {
err = fan_alarm_init(fan_data, pdata->alarm);
if (err)
- goto err_free_data;
+ return err;
}
/* Configure control GPIOs if available. */
@@ -480,10 +459,6 @@ err_free_ctrl:
err_free_alarm:
if (fan_data->alarm)
fan_alarm_free(fan_data);
-err_free_data:
- platform_set_drvdata(pdev, NULL);
- kfree(fan_data);
-
return err;
}
@@ -497,15 +472,14 @@ static int __devexit gpio_fan_remove(struct platform_device *pdev)
fan_alarm_free(fan_data);
if (fan_data->ctrl)
fan_ctrl_free(fan_data);
- kfree(fan_data);
return 0;
}
-#ifdef CONFIG_PM
-static int gpio_fan_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int gpio_fan_suspend(struct device *dev)
{
- struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->ctrl) {
fan_data->resume_speed = fan_data->speed_index;
@@ -515,27 +489,28 @@ static int gpio_fan_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int gpio_fan_resume(struct platform_device *pdev)
+static int gpio_fan_resume(struct device *dev)
{
- struct gpio_fan_data *fan_data = platform_get_drvdata(pdev);
+ struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
if (fan_data->ctrl)
set_fan_speed(fan_data, fan_data->resume_speed);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(gpio_fan_pm, gpio_fan_suspend, gpio_fan_resume);
+#define GPIO_FAN_PM &gpio_fan_pm
#else
-#define gpio_fan_suspend NULL
-#define gpio_fan_resume NULL
+#define GPIO_FAN_PM NULL
#endif
static struct platform_driver gpio_fan_driver = {
.probe = gpio_fan_probe,
.remove = __devexit_p(gpio_fan_remove),
- .suspend = gpio_fan_suspend,
- .resume = gpio_fan_resume,
.driver = {
.name = "gpio-fan",
+ .pm = GPIO_FAN_PM,
},
};
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
new file mode 100644
index 000000000000..e8ee75f55472
--- /dev/null
+++ b/drivers/hwmon/hih6130.c
@@ -0,0 +1,293 @@
+/* Honeywell HIH-6130/HIH-6131 humidity and temperature sensor driver
+ *
+ * Copyright (C) 2012 Iain Paton <ipaton0@gmail.com>
+ *
+ * heavily based on the sht21 driver
+ * Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Data sheets available (2012-06-22) at
+ * http://sensing.honeywell.com/index.php?ci_id=3106&la_id=1&defId=44872
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+/**
+ * struct hih6130 - HIH-6130 device specific data
+ * @hwmon_dev: device registered with hwmon
+ * @lock: mutex to protect measurement values
+ * @valid: only false before first measurement is taken
+ * @last_update: time of last update (jiffies)
+ * @temperature: cached temperature measurement value
+ * @humidity: cached humidity measurement value
+ */
+struct hih6130 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ bool valid;
+ unsigned long last_update;
+ int temperature;
+ int humidity;
+};
+
+/**
+ * hih6130_temp_ticks_to_millicelsius() - convert raw temperature ticks to
+ * milli celsius
+ * @ticks: temperature ticks value received from sensor
+ */
+static inline int hih6130_temp_ticks_to_millicelsius(int ticks)
+{
+
+ ticks = ticks >> 2;
+ /*
+ * from data sheet section 5.0
+ * Formula T = ( ticks / ( 2^14 - 2 ) ) * 165 -40
+ */
+ return (DIV_ROUND_CLOSEST(ticks * 1650, 16382) - 400) * 100;
+}
+
+/**
+ * hih6130_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to
+ * one-thousandths of a percent relative humidity
+ * @ticks: humidity ticks value received from sensor
+ */
+static inline int hih6130_rh_ticks_to_per_cent_mille(int ticks)
+{
+
+ ticks &= ~0xC000; /* clear status bits */
+ /*
+ * from data sheet section 4.0
+ * Formula RH = ( ticks / ( 2^14 -2 ) ) * 100
+ */
+ return DIV_ROUND_CLOSEST(ticks * 1000, 16382) * 100;
+}
+
+/**
+ * hih6130_update_measurements() - get updated measurements from device
+ * @client: I2C client device
+ *
+ * Returns 0 on success, else negative errno.
+ */
+static int hih6130_update_measurements(struct i2c_client *client)
+{
+ int ret = 0;
+ int t;
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ unsigned char tmp[4];
+ struct i2c_msg msgs[1] = {
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = 4,
+ .buf = tmp,
+ }
+ };
+
+ mutex_lock(&hih6130->lock);
+
+ /*
+ * While the measurement can be completed in ~40ms the sensor takes
+ * much longer to react to a change in external conditions. How quickly
+ * it reacts depends on airflow and other factors outwith our control.
+ * The datasheet specifies maximum 'Response time' for humidity at 8s
+ * and temperature at 30s under specified conditions.
+ * We therefore choose to only read the sensor at most once per second.
+ * This trades off pointless activity polling the sensor much faster
+ * than it can react against better response times in conditions more
+ * favourable than specified in the datasheet.
+ */
+ if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
+
+ /* write to slave address, no data, to request a measurement */
+ ret = i2c_master_send(client, tmp, 0);
+ if (ret < 0)
+ goto out;
+
+ /* measurement cycle time is ~36.65msec */
+ msleep(40);
+
+ ret = i2c_transfer(client->adapter, msgs, 1);
+ if (ret < 0)
+ goto out;
+
+ if ((tmp[0] & 0xC0) != 0) {
+ dev_err(&client->dev, "Error while reading measurement result\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ t = (tmp[0] << 8) + tmp[1];
+ hih6130->humidity = hih6130_rh_ticks_to_per_cent_mille(t);
+
+ t = (tmp[2] << 8) + tmp[3];
+ hih6130->temperature = hih6130_temp_ticks_to_millicelsius(t);
+
+ hih6130->last_update = jiffies;
+ hih6130->valid = true;
+ }
+out:
+ mutex_unlock(&hih6130->lock);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
+ * hih6130_show_temperature() - show temperature measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to temp1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t hih6130_show_temperature(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ int ret = hih6130_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", hih6130->temperature);
+}
+
+/**
+ * hih6130_show_humidity() - show humidity measurement value in sysfs
+ * @dev: device
+ * @attr: device attribute
+ * @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
+ *
+ * Will be called on read access to humidity1_input sysfs attribute.
+ * Returns number of bytes written into buffer, negative errno on error.
+ */
+static ssize_t hih6130_show_humidity(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+ int ret = hih6130_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", hih6130->humidity);
+}
+
+/* sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, hih6130_show_temperature,
+ NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, hih6130_show_humidity,
+ NULL, 0);
+
+static struct attribute *hih6130_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hih6130_attr_group = {
+ .attrs = hih6130_attributes,
+};
+
+/**
+ * hih6130_probe() - probe device
+ * @client: I2C client device
+ * @id: device ID
+ *
+ * Called by the I2C core when an entry in the ID table matches a
+ * device's name.
+ * Returns 0 on success.
+ */
+static int __devinit hih6130_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct hih6130 *hih6130;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(&client->dev, "adapter does not support true I2C\n");
+ return -ENODEV;
+ }
+
+ hih6130 = devm_kzalloc(&client->dev, sizeof(*hih6130), GFP_KERNEL);
+ if (!hih6130)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, hih6130);
+
+ mutex_init(&hih6130->lock);
+
+ err = sysfs_create_group(&client->dev.kobj, &hih6130_attr_group);
+ if (err) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ return err;
+ }
+
+ hih6130->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(hih6130->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ err = PTR_ERR(hih6130->hwmon_dev);
+ goto fail_remove_sysfs;
+ }
+
+ return 0;
+
+fail_remove_sysfs:
+ sysfs_remove_group(&client->dev.kobj, &hih6130_attr_group);
+ return err;
+}
+
+/**
+ * hih6130_remove() - remove device
+ * @client: I2C client device
+ */
+static int __devexit hih6130_remove(struct i2c_client *client)
+{
+ struct hih6130 *hih6130 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(hih6130->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &hih6130_attr_group);
+
+ return 0;
+}
+
+/* Device ID table */
+static const struct i2c_device_id hih6130_id[] = {
+ { "hih6130", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, hih6130_id);
+
+static struct i2c_driver hih6130_driver = {
+ .driver.name = "hih6130",
+ .probe = hih6130_probe,
+ .remove = __devexit_p(hih6130_remove),
+ .id_table = hih6130_id,
+};
+
+module_i2c_driver(hih6130_driver);
+
+MODULE_AUTHOR("Iain Paton <ipaton0@gmail.com>");
+MODULE_DESCRIPTION("Honeywell HIH-6130 humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index e7701d99f8e8..f1de3979181f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -2341,7 +2341,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
- (it87_read_value(data, IT87_REG_CONFIG) & 0x36)
+ (it87_read_value(data, IT87_REG_CONFIG) & 0x3e)
| (update_vbat ? 0x41 : 0x01));
}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 7356b5ec8f67..f2fe8078633b 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,9 +33,6 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
-/* PCI-IDs for Northbridge devices not used anywhere else */
-#define PCI_DEVICE_ID_AMD_15H_M10H_NB_F3 0x1403
-
/* CPUID function 0x80000001, ebx */
#define CPUID_PKGTYPE_MASK 0xf0000000
#define CPUID_PKGTYPE_F 0x00000000
@@ -213,7 +210,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
- { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 35aac82ee8eb..49a69c5b3b8d 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -183,21 +183,17 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
u8 model, stepping;
struct k8temp_data *data;
- data = kzalloc(sizeof(struct k8temp_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct k8temp_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
model = boot_cpu_data.x86_model;
stepping = boot_cpu_data.x86_mask;
/* feature available since SH-C0, exclude older revisions */
- if (((model == 4) && (stepping == 0)) ||
- ((model == 5) && (stepping <= 1))) {
- err = -ENODEV;
- goto exit_free;
- }
+ if ((model == 4 && stepping == 0) ||
+ (model == 5 && stepping <= 1))
+ return -ENODEV;
/*
* AMD NPT family 0fh, i.e. RevF and RevG:
@@ -224,8 +220,7 @@ static int __devinit k8temp_probe(struct pci_dev *pdev,
if (scfg & (SEL_PLACE | SEL_CORE)) {
dev_err(&pdev->dev, "Configuration bit(s) stuck at 1!\n");
- err = -ENODEV;
- goto exit_free;
+ return -ENODEV;
}
scfg |= (SEL_PLACE | SEL_CORE);
@@ -307,10 +302,6 @@ exit_remove:
device_remove_file(&pdev->dev,
&sensor_dev_attr_temp4_input.dev_attr);
device_remove_file(&pdev->dev, &dev_attr_name);
-exit_free:
- pci_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
@@ -328,8 +319,6 @@ static void __devexit k8temp_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev,
&sensor_dev_attr_temp4_input.dev_attr);
device_remove_file(&pdev->dev, &dev_attr_name);
- pci_set_drvdata(pdev, NULL);
- kfree(data);
}
static struct pci_driver k8temp_driver = {
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 602a0f0b0de8..eed4d9401788 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -1108,11 +1108,9 @@ static int lm63_probe(struct i2c_client *client,
struct lm63_data *data;
int err;
- data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm63_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -1129,7 +1127,7 @@ static int lm63_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm63_group);
if (err)
- goto exit_free;
+ return err;
if (data->config & 0x04) { /* tachometer enabled */
err = sysfs_create_group(&client->dev.kobj, &lm63_group_fan1);
if (err)
@@ -1161,9 +1159,6 @@ exit_remove_files:
device_remove_file(&client->dev, &dev_attr_temp2_type);
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -1179,7 +1174,6 @@ static int lm63_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index a83f206af244..291edfff55bf 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -156,7 +156,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA))
return -EIO;
- data = kzalloc(sizeof(struct lm75_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm75_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -174,7 +174,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
status = lm75_read_value(client, LM75_REG_CONF);
if (status < 0) {
dev_dbg(&client->dev, "Can't read config? %d\n", status);
- goto exit_free;
+ return status;
}
data->orig_conf = status;
new = status & ~clr_mask;
@@ -186,7 +186,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
status = sysfs_create_group(&client->dev.kobj, &lm75_group);
if (status)
- goto exit_free;
+ return status;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -201,8 +201,6 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove:
sysfs_remove_group(&client->dev.kobj, &lm75_group);
-exit_free:
- kfree(data);
return status;
}
@@ -213,7 +211,6 @@ static int lm75_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm75_group);
lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 0fca8613e7d8..f82acf67acf5 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -267,10 +267,9 @@ static const struct attribute_group lm77_group = {
};
/* Return 0 if detection is successful, -ENODEV otherwise */
-static int lm77_detect(struct i2c_client *new_client,
- struct i2c_board_info *info)
+static int lm77_detect(struct i2c_client *client, struct i2c_board_info *info)
{
- struct i2c_adapter *adapter = new_client->adapter;
+ struct i2c_adapter *adapter = client->adapter;
int i, cur, conf, hyst, crit, min, max;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA |
@@ -292,18 +291,18 @@ static int lm77_detect(struct i2c_client *new_client,
*/
/* addresses cycling */
- cur = i2c_smbus_read_word_data(new_client, 0);
- conf = i2c_smbus_read_byte_data(new_client, 1);
- hyst = i2c_smbus_read_word_data(new_client, 2);
- crit = i2c_smbus_read_word_data(new_client, 3);
- min = i2c_smbus_read_word_data(new_client, 4);
- max = i2c_smbus_read_word_data(new_client, 5);
+ cur = i2c_smbus_read_word_data(client, 0);
+ conf = i2c_smbus_read_byte_data(client, 1);
+ hyst = i2c_smbus_read_word_data(client, 2);
+ crit = i2c_smbus_read_word_data(client, 3);
+ min = i2c_smbus_read_word_data(client, 4);
+ max = i2c_smbus_read_word_data(client, 5);
for (i = 8; i <= 0xff; i += 8) {
- if (i2c_smbus_read_byte_data(new_client, i + 1) != conf
- || i2c_smbus_read_word_data(new_client, i + 2) != hyst
- || i2c_smbus_read_word_data(new_client, i + 3) != crit
- || i2c_smbus_read_word_data(new_client, i + 4) != min
- || i2c_smbus_read_word_data(new_client, i + 5) != max)
+ if (i2c_smbus_read_byte_data(client, i + 1) != conf
+ || i2c_smbus_read_word_data(client, i + 2) != hyst
+ || i2c_smbus_read_word_data(client, i + 3) != crit
+ || i2c_smbus_read_word_data(client, i + 4) != min
+ || i2c_smbus_read_word_data(client, i + 5) != max)
return -ENODEV;
}
@@ -320,17 +319,17 @@ static int lm77_detect(struct i2c_client *new_client,
return -ENODEV;
/* 0x06 and 0x07 return the last read value */
- cur = i2c_smbus_read_word_data(new_client, 0);
- if (i2c_smbus_read_word_data(new_client, 6) != cur
- || i2c_smbus_read_word_data(new_client, 7) != cur)
+ cur = i2c_smbus_read_word_data(client, 0);
+ if (i2c_smbus_read_word_data(client, 6) != cur
+ || i2c_smbus_read_word_data(client, 7) != cur)
return -ENODEV;
- hyst = i2c_smbus_read_word_data(new_client, 2);
- if (i2c_smbus_read_word_data(new_client, 6) != hyst
- || i2c_smbus_read_word_data(new_client, 7) != hyst)
+ hyst = i2c_smbus_read_word_data(client, 2);
+ if (i2c_smbus_read_word_data(client, 6) != hyst
+ || i2c_smbus_read_word_data(client, 7) != hyst)
return -ENODEV;
- min = i2c_smbus_read_word_data(new_client, 4);
- if (i2c_smbus_read_word_data(new_client, 6) != min
- || i2c_smbus_read_word_data(new_client, 7) != min)
+ min = i2c_smbus_read_word_data(client, 4);
+ if (i2c_smbus_read_word_data(client, 6) != min
+ || i2c_smbus_read_word_data(client, 7) != min)
return -ENODEV;
strlcpy(info->type, "lm77", I2C_NAME_SIZE);
@@ -338,31 +337,29 @@ static int lm77_detect(struct i2c_client *new_client,
return 0;
}
-static int lm77_probe(struct i2c_client *new_client,
- const struct i2c_device_id *id)
+static int lm77_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ struct device *dev = &client->dev;
struct lm77_data *data;
int err;
- data = kzalloc(sizeof(struct lm77_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct lm77_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Initialize the LM77 chip */
- lm77_init_client(new_client);
+ lm77_init_client(client);
/* Register sysfs hooks */
- err = sysfs_create_group(&new_client->dev.kobj, &lm77_group);
+ err = sysfs_create_group(&dev->kobj, &lm77_group);
if (err)
- goto exit_free;
+ return err;
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
@@ -371,10 +368,7 @@ static int lm77_probe(struct i2c_client *new_client,
return 0;
exit_remove:
- sysfs_remove_group(&new_client->dev.kobj, &lm77_group);
-exit_free:
- kfree(data);
-exit:
+ sysfs_remove_group(&dev->kobj, &lm77_group);
return err;
}
@@ -383,7 +377,6 @@ static int lm77_remove(struct i2c_client *client)
struct lm77_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm77_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index f6bc414e1e91..c6ffafe600ad 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -660,7 +660,7 @@ static int lm78_i2c_probe(struct i2c_client *client,
struct lm78_data *data;
int err;
- data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm78_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -674,20 +674,18 @@ static int lm78_i2c_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm78_group);
if (err)
- goto ERROR3;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto error;
}
return 0;
-ERROR4:
+error:
sysfs_remove_group(&client->dev.kobj, &lm78_group);
-ERROR3:
- kfree(data);
return err;
}
@@ -697,7 +695,6 @@ static int lm78_i2c_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm78_group);
- kfree(data);
return 0;
}
@@ -844,16 +841,14 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev, res->start + LM78_ADDR_REG_OFFSET,
+ 2, "lm78"))
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct lm78_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(struct lm78_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release_region;
- }
mutex_init(&data->lock);
data->isa_addr = res->start;
platform_set_drvdata(pdev, data);
@@ -888,25 +883,16 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev)
exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
- exit_release_region:
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
- exit:
return err;
}
static int __devexit lm78_isa_remove(struct platform_device *pdev)
{
struct lm78_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &lm78_group);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start + LM78_ADDR_REG_OFFSET, 2);
return 0;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index e2c43e1774be..28a8b71f4571 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -543,11 +543,9 @@ static int lm80_probe(struct i2c_client *client,
struct lm80_data *data;
int err;
- data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm80_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -562,7 +560,7 @@ static int lm80_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm80_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -574,9 +572,6 @@ static int lm80_probe(struct i2c_client *client,
error_remove:
sysfs_remove_group(&client->dev.kobj, &lm80_group);
-error_free:
- kfree(data);
-exit:
return err;
}
@@ -587,7 +582,6 @@ static int lm80_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm80_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index cd45b9d85584..e998034f1f11 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -343,11 +343,10 @@ static int lm83_probe(struct i2c_client *new_client,
struct lm83_data *data;
int err;
- data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct lm83_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -362,7 +361,7 @@ static int lm83_probe(struct i2c_client *new_client,
err = sysfs_create_group(&new_client->dev.kobj, &lm83_group);
if (err)
- goto exit_free;
+ return err;
if (id->driver_data == lm83) {
err = sysfs_create_group(&new_client->dev.kobj,
@@ -382,9 +381,6 @@ static int lm83_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &lm83_group);
sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -396,7 +392,6 @@ static int lm83_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &lm83_group);
sysfs_remove_group(&client->dev.kobj, &lm83_group_opt);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 864c7d999e0c..9f2dd77e1e0e 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -1387,7 +1387,7 @@ static int lm85_probe(struct i2c_client *client,
struct lm85_data *data;
int err;
- data = kzalloc(sizeof(struct lm85_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm85_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1419,7 +1419,7 @@ static int lm85_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm85_group);
if (err)
- goto err_kfree;
+ return err;
/* minctl and temp_off exist on all chips except emc6d103s */
if (data->type != emc6d103s) {
@@ -1466,8 +1466,6 @@ static int lm85_probe(struct i2c_client *client,
/* Error out and cleanup code */
err_remove_files:
lm85_remove_files(client, data);
- err_kfree:
- kfree(data);
return err;
}
@@ -1476,7 +1474,6 @@ static int lm85_remove(struct i2c_client *client)
struct lm85_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
lm85_remove_files(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 314d147bf1ac..16e45d702152 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -898,11 +898,9 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct lm87_data *data;
int err;
- data = kzalloc(sizeof(struct lm87_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm87_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -923,7 +921,7 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm87_group);
if (err)
- goto exit_free;
+ goto exit_stop;
if (data->channel & CHAN_NO_FAN(0)) {
err = sysfs_create_group(&client->dev.kobj, &lm87_group_in6);
@@ -972,10 +970,8 @@ static int lm87_probe(struct i2c_client *client, const struct i2c_device_id *id)
exit_remove:
lm87_remove_files(client);
-exit_free:
+exit_stop:
lm87_write_value(client, LM87_REG_CONFIG, data->config);
- kfree(data);
-exit:
return err;
}
@@ -987,7 +983,6 @@ static int lm87_remove(struct i2c_client *client)
lm87_remove_files(client);
lm87_write_value(client, LM87_REG_CONFIG, data->config);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 22b14a68e35e..863412a02bdd 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1399,11 +1399,10 @@ static int lm90_probe(struct i2c_client *client,
struct lm90_data *data;
int err;
- data = kzalloc(sizeof(struct lm90_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct lm90_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -1474,8 +1473,6 @@ exit_remove_files:
lm90_remove_files(client, data);
exit_restore:
lm90_restore_conf(client, data);
- kfree(data);
-exit:
return err;
}
@@ -1487,7 +1484,6 @@ static int lm90_remove(struct i2c_client *client)
lm90_remove_files(client, data);
lm90_restore_conf(client, data);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index fdc691a4028f..2282d77e83e8 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -373,11 +373,10 @@ static int lm92_probe(struct i2c_client *new_client,
struct lm92_data *data;
int err;
- data = kzalloc(sizeof(struct lm92_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct lm92_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -389,7 +388,7 @@ static int lm92_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &lm92_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -401,9 +400,6 @@ static int lm92_probe(struct i2c_client *new_client,
exit_remove:
sysfs_remove_group(&new_client->dev.kobj, &lm92_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -414,7 +410,6 @@ static int lm92_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm92_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 67e8fe256e02..bf946187bd37 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -2738,15 +2738,13 @@ static int lm93_probe(struct i2c_client *client,
} else {
dev_dbg(&client->dev, "detect failed, "
"smbus byte and/or word data not supported!\n");
- err = -ENODEV;
- goto err_out;
+ return -ENODEV;
}
- data = kzalloc(sizeof(struct lm93_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct lm93_data), GFP_KERNEL);
if (!data) {
dev_dbg(&client->dev, "out of memory!\n");
- err = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
}
i2c_set_clientdata(client, data);
@@ -2760,7 +2758,7 @@ static int lm93_probe(struct i2c_client *client,
err = sysfs_create_group(&client->dev.kobj, &lm93_attr_grp);
if (err)
- goto err_free;
+ return err;
/* Register hwmon driver class */
data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -2770,9 +2768,6 @@ static int lm93_probe(struct i2c_client *client,
err = PTR_ERR(data->hwmon_dev);
dev_err(&client->dev, "error registering hwmon device.\n");
sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
-err_free:
- kfree(data);
-err_out:
return err;
}
@@ -2783,7 +2778,6 @@ static int lm93_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm93_attr_grp);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index 362a40eb6129..f3978a46e844 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -168,7 +168,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
if (err < 0)
return err;
- data = kzalloc(sizeof(struct max1111_data), GFP_KERNEL);
+ data = devm_kzalloc(&spi->dev, sizeof(struct max1111_data), GFP_KERNEL);
if (data == NULL) {
dev_err(&spi->dev, "failed to allocate memory\n");
return -ENOMEM;
@@ -176,7 +176,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
err = setup_transfer(data);
if (err)
- goto err_free_data;
+ return err;
mutex_init(&data->drvdata_lock);
@@ -186,7 +186,7 @@ static int __devinit max1111_probe(struct spi_device *spi)
err = sysfs_create_group(&spi->dev.kobj, &max1111_attr_group);
if (err) {
dev_err(&spi->dev, "failed to create attribute group\n");
- goto err_free_data;
+ return err;
}
data->hwmon_dev = hwmon_device_register(&spi->dev);
@@ -203,8 +203,6 @@ static int __devinit max1111_probe(struct spi_device *spi)
err_remove:
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
-err_free_data:
- kfree(data);
return err;
}
@@ -215,7 +213,6 @@ static int __devexit max1111_remove(struct spi_device *spi)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&spi->dev.kobj, &max1111_attr_group);
mutex_destroy(&data->drvdata_lock);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index ecac04a7b7d6..6c11ec214071 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -267,11 +267,10 @@ static int max1619_probe(struct i2c_client *new_client,
struct max1619_data *data;
int err;
- data = kzalloc(sizeof(struct max1619_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct max1619_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
data->valid = 0;
@@ -283,7 +282,7 @@ static int max1619_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &max1619_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -295,9 +294,6 @@ static int max1619_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &max1619_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -323,7 +319,6 @@ static int max1619_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max1619_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index de8f7adaccbd..6e60036abfa7 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -548,11 +548,10 @@ static int max6639_probe(struct i2c_client *client,
struct max6639_data *data;
int err;
- data = kzalloc(sizeof(struct max6639_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct max6639_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -560,12 +559,12 @@ static int max6639_probe(struct i2c_client *client,
/* Initialize the max6639 chip */
err = max6639_init_client(client);
if (err < 0)
- goto error_free;
+ return err;
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &max6639_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -579,9 +578,6 @@ static int max6639_probe(struct i2c_client *client,
error_remove:
sysfs_remove_group(&client->dev.kobj, &max6639_group);
-error_free:
- kfree(data);
-exit:
return err;
}
@@ -592,7 +588,6 @@ static int max6639_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max6639_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 4298909a41fd..bf236c0782b7 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -286,11 +286,10 @@ static int max6642_probe(struct i2c_client *new_client,
struct max6642_data *data;
int err;
- data = kzalloc(sizeof(struct max6642_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&new_client->dev, sizeof(struct max6642_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(new_client, data);
mutex_init(&data->update_lock);
@@ -301,7 +300,7 @@ static int max6642_probe(struct i2c_client *new_client,
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &max6642_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -313,9 +312,6 @@ static int max6642_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &max6642_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -326,7 +322,6 @@ static int max6642_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &max6642_group);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index 33a8a7f15e18..f739f83bafb9 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -545,7 +545,8 @@ static int max6650_probe(struct i2c_client *client,
struct max6650_data *data;
int err;
- data = kzalloc(sizeof(struct max6650_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct max6650_data),
+ GFP_KERNEL);
if (!data) {
dev_err(&client->dev, "out of memory.\n");
return -ENOMEM;
@@ -560,11 +561,11 @@ static int max6650_probe(struct i2c_client *client,
*/
err = max6650_init_client(client);
if (err)
- goto err_free;
+ return err;
err = sysfs_create_group(&client->dev.kobj, &max6650_attr_grp);
if (err)
- goto err_free;
+ return err;
/* 3 additional fan inputs for the MAX6651 */
if (data->nr_fans == 4) {
err = sysfs_create_group(&client->dev.kobj, &max6651_attr_grp);
@@ -582,8 +583,6 @@ static int max6650_probe(struct i2c_client *client,
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
err_remove:
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
-err_free:
- kfree(data);
return err;
}
@@ -595,7 +594,6 @@ static int max6650_remove(struct i2c_client *client)
if (data->nr_fans == 4)
sysfs_remove_group(&client->dev.kobj, &max6651_attr_grp);
sysfs_remove_group(&client->dev.kobj, &max6650_attr_grp);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/mc13783-adc.c b/drivers/hwmon/mc13783-adc.c
index ce86c5e3c2c2..cf47a59657a9 100644
--- a/drivers/hwmon/mc13783-adc.c
+++ b/drivers/hwmon/mc13783-adc.c
@@ -179,7 +179,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
const struct platform_device_id *id = platform_get_device_id(pdev);
char *dash;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -194,7 +194,7 @@ static int __init mc13783_adc_probe(struct platform_device *pdev)
/* Register sysfs hooks */
ret = sysfs_create_group(&pdev->dev.kobj, &mc13783_group_base);
if (ret)
- goto out_err_create_base;
+ return ret;
if (id->driver_data & MC13783_ADC_16CHANS) {
ret = sysfs_create_group(&pdev->dev.kobj,
@@ -230,11 +230,6 @@ out_err_create_ts:
out_err_create_16chans:
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_base);
-out_err_create_base:
-
- platform_set_drvdata(pdev, NULL);
- kfree(priv);
-
return ret;
}
@@ -253,9 +248,6 @@ static int __devexit mc13783_adc_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &mc13783_group_base);
- platform_set_drvdata(pdev, NULL);
- kfree(priv);
-
return 0;
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 6da9696e1827..74a6c58d0218 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -351,7 +351,7 @@ static int __devinit ntc_thermistor_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
data->pdata = pdata;
- strncpy(data->name, pdev->id_entry->name, PLATFORM_NAME_SIZE);
+ strlcpy(data->name, pdev->id_entry->name, sizeof(data->name));
switch (pdev->id_entry->driver_data) {
case TYPE_NCPXXWB473:
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 79ba48c8c116..91d5b2a21dd9 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -1230,7 +1230,7 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
int use_thermistors = 0;
struct device *dev = &pdev->dev;
- data = kzalloc(sizeof(struct pc87360_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct pc87360_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1269,15 +1269,12 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
for (i = 0; i < LDNI_MAX; i++) {
data->address[i] = extra_isa[i];
if (data->address[i]
- && !request_region(extra_isa[i], PC87360_EXTENT,
- pc87360_driver.driver.name)) {
+ && !devm_request_region(dev, extra_isa[i], PC87360_EXTENT,
+ pc87360_driver.driver.name)) {
dev_err(dev, "Region 0x%x-0x%x already "
"in use!\n", extra_isa[i],
extra_isa[i]+PC87360_EXTENT-1);
- for (i--; i >= 0; i--)
- release_region(extra_isa[i], PC87360_EXTENT);
- err = -EBUSY;
- goto ERROR1;
+ return -EBUSY;
}
}
@@ -1325,13 +1322,13 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
if (data->innr) {
err = sysfs_create_group(&dev->kobj, &pc8736x_vin_group);
if (err)
- goto ERROR3;
+ goto error;
}
if (data->innr == 14) {
err = sysfs_create_group(&dev->kobj, &pc8736x_therm_group);
if (err)
- goto ERROR3;
+ goto error;
}
/* create device attr-files for varying sysfs groups */
@@ -1341,11 +1338,11 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj,
&pc8736x_temp_attr_group[i]);
if (err)
- goto ERROR3;
+ goto error;
}
err = device_create_file(dev, &dev_attr_alarms_temp);
if (err)
- goto ERROR3;
+ goto error;
}
for (i = 0; i < data->fannr; i++) {
@@ -1353,49 +1350,37 @@ static int __devinit pc87360_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj,
&pc8736x_fan_attr_group[i]);
if (err)
- goto ERROR3;
+ goto error;
}
if (FAN_CONFIG_CONTROL(data->fan_conf, i)) {
err = device_create_file(dev, &pwm[i].dev_attr);
if (err)
- goto ERROR3;
+ goto error;
}
}
err = device_create_file(dev, &dev_attr_name);
if (err)
- goto ERROR3;
+ goto error;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR3;
+ goto error;
}
return 0;
-ERROR3:
+error:
pc87360_remove_files(dev);
- for (i = 0; i < 3; i++) {
- if (data->address[i])
- release_region(data->address[i], PC87360_EXTENT);
- }
-ERROR1:
- kfree(data);
return err;
}
static int __devexit pc87360_remove(struct platform_device *pdev)
{
struct pc87360_data *data = platform_get_drvdata(pdev);
- int i;
hwmon_device_unregister(data->hwmon_dev);
pc87360_remove_files(&pdev->dev);
- for (i = 0; i < 3; i++) {
- if (data->address[i])
- release_region(data->address[i], PC87360_EXTENT);
- }
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 37059a3755e9..f185b1fa53e5 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -956,44 +956,28 @@ static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
* Device detection, attach and detach
*/
-static void pc87427_release_regions(struct platform_device *pdev, int count)
-{
- struct resource *res;
- int i;
-
- for (i = 0; i < count; i++) {
- res = platform_get_resource(pdev, IORESOURCE_IO, i);
- release_region(res->start, resource_size(res));
- }
-}
-
static int __devinit pc87427_request_regions(struct platform_device *pdev,
int count)
{
struct resource *res;
- int i, err = 0;
+ int i;
for (i = 0; i < count; i++) {
res = platform_get_resource(pdev, IORESOURCE_IO, i);
if (!res) {
- err = -ENOENT;
dev_err(&pdev->dev, "Missing resource #%d\n", i);
- break;
+ return -ENOENT;
}
- if (!request_region(res->start, resource_size(res), DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(&pdev->dev, res->start,
+ resource_size(res), DRVNAME)) {
dev_err(&pdev->dev,
"Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start,
(unsigned long)res->end);
- break;
+ return -EBUSY;
}
}
-
- if (err && i)
- pc87427_release_regions(pdev, i);
-
- return err;
+ return 0;
}
static void __devinit pc87427_init_device(struct device *dev)
@@ -1094,11 +1078,11 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
struct pc87427_data *data;
int i, err, res_count;
- data = kzalloc(sizeof(struct pc87427_data), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, sizeof(struct pc87427_data),
+ GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
pr_err("Out of memory\n");
- goto exit;
+ return -ENOMEM;
}
data->address[0] = sio_data->address[0];
@@ -1107,7 +1091,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
err = pc87427_request_regions(pdev, res_count);
if (err)
- goto exit_kfree;
+ return err;
mutex_init(&data->lock);
data->name = "pc87427";
@@ -1117,7 +1101,7 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = device_create_file(&pdev->dev, &dev_attr_name);
if (err)
- goto exit_release_region;
+ return err;
for (i = 0; i < 8; i++) {
if (!(data->fan_enabled & (1 << i)))
continue;
@@ -1154,28 +1138,15 @@ static int __devinit pc87427_probe(struct platform_device *pdev)
exit_remove_files:
pc87427_remove_files(&pdev->dev);
-exit_release_region:
- pc87427_release_regions(pdev, res_count);
-exit_kfree:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-exit:
return err;
}
static int __devexit pc87427_remove(struct platform_device *pdev)
{
struct pc87427_data *data = platform_get_drvdata(pdev);
- int res_count;
-
- res_count = (data->address[0] != 0) + (data->address[1] != 0);
hwmon_device_unregister(data->hwmon_dev);
pc87427_remove_files(&pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- pc87427_release_regions(pdev, res_count);
return 0;
}
diff --git a/drivers/hwmon/pcf8591.c b/drivers/hwmon/pcf8591.c
index 4174c7463d70..825883d29002 100644
--- a/drivers/hwmon/pcf8591.c
+++ b/drivers/hwmon/pcf8591.c
@@ -200,11 +200,10 @@ static int pcf8591_probe(struct i2c_client *client,
struct pcf8591_data *data;
int err;
- data = kzalloc(sizeof(struct pcf8591_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct pcf8591_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -215,7 +214,7 @@ static int pcf8591_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &pcf8591_attr_group);
if (err)
- goto exit_kfree;
+ return err;
/* Register input2 if not in "two differential inputs" mode */
if (input_mode != 3) {
@@ -242,9 +241,6 @@ static int pcf8591_probe(struct i2c_client *client,
exit_sysfs_remove:
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
-exit_kfree:
- kfree(data);
-exit:
return err;
}
@@ -255,7 +251,6 @@ static int pcf8591_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group_opt);
sysfs_remove_group(&client->dev.kobj, &pcf8591_attr_group);
- kfree(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index f6c26d19f521..b7975f858cff 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -288,7 +288,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
return -EINVAL;
}
- hwmon = kzalloc(sizeof(struct s3c_hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(&dev->dev, sizeof(struct s3c_hwmon), GFP_KERNEL);
if (hwmon == NULL) {
dev_err(&dev->dev, "no memory\n");
return -ENOMEM;
@@ -303,8 +303,7 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
hwmon->client = s3c_adc_register(dev, NULL, NULL, 0);
if (IS_ERR(hwmon->client)) {
dev_err(&dev->dev, "cannot register adc\n");
- ret = PTR_ERR(hwmon->client);
- goto err_mem;
+ return PTR_ERR(hwmon->client);
}
/* add attributes for our adc devices. */
@@ -363,8 +362,6 @@ static int __devinit s3c_hwmon_probe(struct platform_device *dev)
err_registered:
s3c_adc_release(hwmon->client);
- err_mem:
- kfree(hwmon);
return ret;
}
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 6c4d8eb9b7ca..8275f0e14eb7 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -593,17 +593,14 @@ static int __devinit sis5595_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SIS5595_EXTENT,
- sis5595_driver.driver.name)) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev, res->start, SIS5595_EXTENT,
+ sis5595_driver.driver.name))
+ return -EBUSY;
- data = kzalloc(sizeof(struct sis5595_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct sis5595_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
mutex_init(&data->lock);
mutex_init(&data->update_lock);
@@ -636,7 +633,7 @@ static int __devinit sis5595_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group);
if (err)
- goto exit_free;
+ return err;
if (data->maxins == 4) {
err = sysfs_create_group(&pdev->dev.kobj, &sis5595_group_in4);
if (err)
@@ -659,11 +656,6 @@ exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_in4);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_temp1);
-exit_free:
- kfree(data);
-exit_release:
- release_region(res->start, SIS5595_EXTENT);
-exit:
return err;
}
@@ -676,10 +668,6 @@ static int __devexit sis5595_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_in4);
sysfs_remove_group(&pdev->dev.kobj, &sis5595_group_temp1);
- release_region(data->addr, SIS5595_EXTENT);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index c5f6be478bad..65b07de11a0f 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -231,13 +231,9 @@ static const struct attribute_group smsc47b397_group = {
static int __devexit smsc47b397_remove(struct platform_device *pdev)
{
struct smsc47b397_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &smsc47b397_group);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, SMSC_EXTENT);
- kfree(data);
return 0;
}
@@ -261,19 +257,17 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
int err = 0;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, SMSC_EXTENT,
- smsc47b397_driver.driver.name)) {
+ if (!devm_request_region(dev, res->start, SMSC_EXTENT,
+ smsc47b397_driver.driver.name)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start,
(unsigned long)res->start + SMSC_EXTENT - 1);
return -EBUSY;
}
- data = kzalloc(sizeof(struct smsc47b397_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error_release;
- }
+ data = devm_kzalloc(dev, sizeof(struct smsc47b397_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->addr = res->start;
data->name = "smsc47b397";
@@ -283,7 +277,7 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
err = sysfs_create_group(&dev->kobj, &smsc47b397_group);
if (err)
- goto error_free;
+ return err;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -295,10 +289,6 @@ static int __devinit smsc47b397_probe(struct platform_device *pdev)
error_remove:
sysfs_remove_group(&dev->kobj, &smsc47b397_group);
-error_free:
- kfree(data);
-error_release:
- release_region(res->start, SMSC_EXTENT);
return err;
}
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index b5aa38dd7ab9..dba0c567e7a1 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -584,18 +584,17 @@ static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
#define CHECK 1
#define REQUEST 2
-#define RELEASE 3
/*
* This function can be used to:
* - test for resource conflicts with ACPI
* - request the resources
- * - release the resources
* We only allocate the I/O ports we really need, to minimize the risk of
* conflicts with ACPI or with other drivers.
*/
-static int smsc47m1_handle_resources(unsigned short address, enum chips type,
- int action, struct device *dev)
+static int __init smsc47m1_handle_resources(unsigned short address,
+ enum chips type, int action,
+ struct device *dev)
{
static const u8 ports_m1[] = {
/* register, region length */
@@ -642,21 +641,13 @@ static int smsc47m1_handle_resources(unsigned short address, enum chips type,
break;
case REQUEST:
/* Request the resources */
- if (!request_region(start, len, DRVNAME)) {
- dev_err(dev, "Region 0x%hx-0x%hx already in "
- "use!\n", start, start + len);
-
- /* Undo all requests */
- for (i -= 2; i >= 0; i -= 2)
- release_region(address + ports[i],
- ports[i + 1]);
+ if (!devm_request_region(dev, start, len, DRVNAME)) {
+ dev_err(dev,
+ "Region 0x%hx-0x%hx already in use!\n",
+ start, start + len);
return -EBUSY;
}
break;
- case RELEASE:
- /* Release the resources */
- release_region(start, len);
- break;
}
}
@@ -694,11 +685,9 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
if (err < 0)
return err;
- data = kzalloc(sizeof(struct smsc47m1_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error_release;
- }
+ data = devm_kzalloc(dev, sizeof(struct smsc47m1_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
data->addr = res->start;
data->type = sio_data->type;
@@ -733,8 +722,7 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
}
if (!(fan1 || fan2 || fan3 || pwm1 || pwm2 || pwm3)) {
dev_warn(dev, "Device not configured, will not use\n");
- err = -ENODEV;
- goto error_free;
+ return -ENODEV;
}
/*
@@ -810,27 +798,16 @@ static int __init smsc47m1_probe(struct platform_device *pdev)
error_remove_files:
smsc47m1_remove_files(dev);
-error_free:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-error_release:
- smsc47m1_handle_resources(res->start, sio_data->type, RELEASE, dev);
return err;
}
static int __exit smsc47m1_remove(struct platform_device *pdev)
{
struct smsc47m1_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
smsc47m1_remove_files(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- smsc47m1_handle_resources(res->start, data->type, RELEASE, &pdev->dev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 4705a8bf11c2..36a3478d0799 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -554,11 +554,10 @@ static int smsc47m192_probe(struct i2c_client *client,
int config;
int err;
- data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct smsc47m192_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->vrm = vid_which_vrm();
@@ -570,7 +569,7 @@ static int smsc47m192_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group);
if (err)
- goto exit_free;
+ return err;
/* Pin 110 is either in4 (+12V) or VID4 */
config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG);
@@ -592,9 +591,6 @@ static int smsc47m192_probe(struct i2c_client *client,
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -606,8 +602,6 @@ static int smsc47m192_remove(struct i2c_client *client)
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index add9f019b24f..080c26370480 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -361,12 +361,10 @@ static int thmc50_probe(struct i2c_client *client,
struct thmc50_data *data;
int err;
- data = kzalloc(sizeof(struct thmc50_data), GFP_KERNEL);
- if (!data) {
- pr_debug("thmc50: detect failed, kzalloc failed!\n");
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct thmc50_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->type = id->driver_data;
@@ -377,7 +375,7 @@ static int thmc50_probe(struct i2c_client *client,
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &thmc50_group);
if (err)
- goto exit_free;
+ return err;
/* Register ADM1022 sysfs hooks */
if (data->has_temp3) {
@@ -400,9 +398,6 @@ exit_remove_sysfs:
sysfs_remove_group(&client->dev.kobj, &temp3_group);
exit_remove_sysfs_thmc50:
sysfs_remove_group(&client->dev.kobj, &thmc50_group);
-exit_free:
- kfree(data);
-exit:
return err;
}
@@ -415,8 +410,6 @@ static int thmc50_remove(struct i2c_client *client)
if (data->has_temp3)
sysfs_remove_group(&client->dev.kobj, &temp3_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 0d466b9d8908..4e1ff82c63e0 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -159,17 +159,16 @@ static int __devinit tmp102_probe(struct i2c_client *client,
return -ENODEV;
}
- tmp102 = kzalloc(sizeof(*tmp102), GFP_KERNEL);
- if (!tmp102) {
- dev_dbg(&client->dev, "kzalloc failed\n");
+ tmp102 = devm_kzalloc(&client->dev, sizeof(*tmp102), GFP_KERNEL);
+ if (!tmp102)
return -ENOMEM;
- }
+
i2c_set_clientdata(client, tmp102);
status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
if (status < 0) {
dev_err(&client->dev, "error reading config register\n");
- goto fail_free;
+ return status;
}
tmp102->config_orig = status;
status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
@@ -213,9 +212,6 @@ fail_remove_sysfs:
fail_restore_config:
i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
tmp102->config_orig);
-fail_free:
- kfree(tmp102);
-
return status;
}
@@ -236,8 +232,6 @@ static int __devexit tmp102_remove(struct i2c_client *client)
config | TMP102_CONF_SD);
}
- kfree(tmp102);
-
return 0;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ea54c3384671..e62054875164 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -594,7 +594,6 @@ static int tmp401_remove(struct i2c_client *client)
&tmp411_attr[i].dev_attr);
}
- kfree(data);
return 0;
}
@@ -605,7 +604,8 @@ static int tmp401_probe(struct i2c_client *client,
struct tmp401_data *data;
const char *names[] = { "TMP401", "TMP411" };
- data = kzalloc(sizeof(struct tmp401_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct tmp401_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -646,7 +646,7 @@ static int tmp401_probe(struct i2c_client *client,
return 0;
exit_remove:
- tmp401_remove(client); /* will also free data for us */
+ tmp401_remove(client);
return err;
}
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 8fac87a38544..6a8ded29f1ed 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -267,7 +267,8 @@ static int tmp421_probe(struct i2c_client *client,
struct tmp421_data *data;
int err;
- data = kzalloc(sizeof(struct tmp421_data), GFP_KERNEL);
+ data = devm_kzalloc(&client->dev, sizeof(struct tmp421_data),
+ GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -277,11 +278,11 @@ static int tmp421_probe(struct i2c_client *client,
err = tmp421_init_client(client);
if (err)
- goto exit_free;
+ return err;
err = sysfs_create_group(&client->dev.kobj, &tmp421_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -293,10 +294,6 @@ static int tmp421_probe(struct i2c_client *client,
exit_remove:
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
-
-exit_free:
- kfree(data);
-
return err;
}
@@ -307,8 +304,6 @@ static int tmp421_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &tmp421_group);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 288135d85e11..299399aa30fe 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -690,18 +690,17 @@ static int __devinit via686a_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, VIA686A_EXTENT,
- via686a_driver.driver.name)) {
+ if (!devm_request_region(&pdev->dev, res->start, VIA686A_EXTENT,
+ via686a_driver.driver.name)) {
dev_err(&pdev->dev, "Region 0x%lx-0x%lx already in use!\n",
(unsigned long)res->start, (unsigned long)res->end);
return -ENODEV;
}
- data = kzalloc(sizeof(struct via686a_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release;
- }
+ data = devm_kzalloc(&pdev->dev, sizeof(struct via686a_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
platform_set_drvdata(pdev, data);
data->addr = res->start;
@@ -714,7 +713,7 @@ static int __devinit via686a_probe(struct platform_device *pdev)
/* Register sysfs hooks */
err = sysfs_create_group(&pdev->dev.kobj, &via686a_group);
if (err)
- goto exit_free;
+ return err;
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
@@ -726,10 +725,6 @@ static int __devinit via686a_probe(struct platform_device *pdev)
exit_remove_files:
sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
-exit_free:
- kfree(data);
-exit_release:
- release_region(res->start, VIA686A_EXTENT);
return err;
}
@@ -740,10 +735,6 @@ static int __devexit via686a_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &via686a_group);
- release_region(data->addr, VIA686A_EXTENT);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
return 0;
}
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index c2c5c72fb8f0..f2c61153dba9 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -1148,19 +1148,18 @@ static int __devinit vt1211_probe(struct platform_device *pdev)
struct resource *res;
int i, err;
- data = kzalloc(sizeof(struct vt1211_data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(struct vt1211_data), GFP_KERNEL);
if (!data) {
- err = -ENOMEM;
dev_err(dev, "Out of memory\n");
- goto EXIT;
+ return -ENOMEM;
}
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, resource_size(res), DRVNAME)) {
- err = -EBUSY;
+ if (!devm_request_region(dev, res->start, resource_size(res),
+ DRVNAME)) {
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start, (unsigned long)res->end);
- goto EXIT_KFREE;
+ return -EBUSY;
}
data->addr = res->start;
data->name = DRVNAME;
@@ -1215,26 +1214,15 @@ EXIT_DEV_REMOVE:
dev_err(dev, "Sysfs interface creation failed (%d)\n", err);
EXIT_DEV_REMOVE_SILENT:
vt1211_remove_sysfs(pdev);
- release_region(res->start, resource_size(res));
-EXIT_KFREE:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-EXIT:
return err;
}
static int __devexit vt1211_remove(struct platform_device *pdev)
{
struct vt1211_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
vt1211_remove_sysfs(pdev);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, resource_size(res));
return 0;
}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 54922ed12978..1821b7423d5b 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -599,6 +599,7 @@ static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr)
reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
| ((data->fan_div[1] << 4) & 0x70);
w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg);
+ break;
case 2:
reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
| (data->fan_div[2] & 0x7);
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 5ce54a297249..ab4825205a9d 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1359,19 +1359,17 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start, WINB_REGION_SIZE, DRVNAME)) {
+ if (!devm_request_region(dev, res->start, WINB_REGION_SIZE, DRVNAME)) {
dev_err(dev, "Failed to request region 0x%lx-0x%lx\n",
(unsigned long)res->start,
(unsigned long)(res->start + WINB_REGION_SIZE - 1));
- err = -EBUSY;
- goto ERROR0;
+ return -EBUSY;
}
- data = kzalloc(sizeof(struct w83627hf_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR1;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83627hf_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
data->addr = res->start;
data->type = sio_data->type;
data->name = names[sio_data->type];
@@ -1391,7 +1389,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
/* Register common device attributes */
err = sysfs_create_group(&dev->kobj, &w83627hf_group);
if (err)
- goto ERROR3;
+ return err;
/* Register chip-specific device attributes */
if (data->type == w83627hf || data->type == w83697hf)
@@ -1419,7 +1417,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_pwm1_freq.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm2_freq.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83697hf)
if ((err = device_create_file(dev,
@@ -1454,7 +1452,7 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_temp3_beep.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_temp3_type.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83697hf && data->vid != 0xff) {
/* Convert VID to voltage based on VRM */
@@ -1462,14 +1460,14 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
if ((err = device_create_file(dev, &dev_attr_cpu0_vid))
|| (err = device_create_file(dev, &dev_attr_vrm)))
- goto ERROR4;
+ goto error;
}
if (data->type == w83627thf || data->type == w83637hf
|| data->type == w83687thf) {
err = device_create_file(dev, &sensor_dev_attr_pwm3.dev_attr);
if (err)
- goto ERROR4;
+ goto error;
}
if (data->type == w83637hf || data->type == w83687thf)
@@ -1479,57 +1477,45 @@ static int __devinit w83627hf_probe(struct platform_device *pdev)
&sensor_dev_attr_pwm2_freq.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm3_freq.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type != w83627hf)
if ((err = device_create_file(dev,
&sensor_dev_attr_pwm1_enable.dev_attr))
|| (err = device_create_file(dev,
&sensor_dev_attr_pwm2_enable.dev_attr)))
- goto ERROR4;
+ goto error;
if (data->type == w83627thf || data->type == w83637hf
|| data->type == w83687thf) {
err = device_create_file(dev,
&sensor_dev_attr_pwm3_enable.dev_attr);
if (err)
- goto ERROR4;
+ goto error;
}
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto error;
}
return 0;
- ERROR4:
+ error:
sysfs_remove_group(&dev->kobj, &w83627hf_group);
sysfs_remove_group(&dev->kobj, &w83627hf_group_opt);
- ERROR3:
- platform_set_drvdata(pdev, NULL);
- kfree(data);
- ERROR1:
- release_region(res->start, WINB_REGION_SIZE);
- ERROR0:
return err;
}
static int __devexit w83627hf_remove(struct platform_device *pdev)
{
struct w83627hf_data *data = platform_get_drvdata(pdev);
- struct resource *res;
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group);
sysfs_remove_group(&pdev->dev.kobj, &w83627hf_group_opt);
- platform_set_drvdata(pdev, NULL);
- kfree(data);
-
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- release_region(res->start, WINB_REGION_SIZE);
return 0;
}
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index b03d54a799e3..5a5046d94c3e 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -867,6 +867,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
struct i2c_adapter *adapter = new_client->adapter;
struct w83781d_data *data = i2c_get_clientdata(new_client);
enum chips kind = data->type;
+ int num_sc = 1;
id = i2c_adapter_id(adapter);
@@ -891,6 +892,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
}
if (kind != w83783s) {
+ num_sc = 2;
if (force_subclients[0] == id &&
force_subclients[1] == address) {
sc_addr[1] = force_subclients[3];
@@ -906,7 +908,7 @@ w83781d_detect_subclients(struct i2c_client *new_client)
}
}
- for (i = 0; i <= 1; i++) {
+ for (i = 0; i < num_sc; i++) {
data->lm75[i] = i2c_new_dummy(adapter, sc_addr[i]);
if (!data->lm75[i]) {
dev_err(&new_client->dev, "Subclient %d "
@@ -917,8 +919,6 @@ w83781d_detect_subclients(struct i2c_client *new_client)
goto ERROR_SC_3;
goto ERROR_SC_2;
}
- if (kind == w83783s)
- break;
}
return 0;
@@ -1213,11 +1213,9 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct w83781d_data *data;
int err;
- data = kzalloc(sizeof(struct w83781d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR1;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83781d_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
@@ -1229,7 +1227,7 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* attach secondary i2c lm75-like clients */
err = w83781d_detect_subclients(client);
if (err)
- goto ERROR3;
+ return err;
/* Initialize the chip */
w83781d_init_device(dev);
@@ -1237,25 +1235,22 @@ w83781d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = w83781d_create_files(dev, data->type, 0);
if (err)
- goto ERROR4;
+ goto exit_remove_files;
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
- goto ERROR4;
+ goto exit_remove_files;
}
return 0;
-ERROR4:
+ exit_remove_files:
w83781d_remove_files(dev);
if (data->lm75[0])
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
-ERROR3:
- kfree(data);
-ERROR1:
return err;
}
@@ -1273,8 +1268,6 @@ w83781d_remove(struct i2c_client *client)
if (data->lm75[1])
i2c_unregister_device(data->lm75[1]);
- kfree(data);
-
return 0;
}
@@ -1780,17 +1773,16 @@ w83781d_isa_probe(struct platform_device *pdev)
/* Reserve the ISA region */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!request_region(res->start + W83781D_ADDR_REG_OFFSET, 2,
- "w83781d")) {
- err = -EBUSY;
- goto exit;
- }
+ if (!devm_request_region(&pdev->dev,
+ res->start + W83781D_ADDR_REG_OFFSET, 2,
+ "w83781d"))
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(struct w83781d_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- data = kzalloc(sizeof(struct w83781d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit_release_region;
- }
mutex_init(&data->lock);
data->isa_addr = res->start;
platform_set_drvdata(pdev, data);
@@ -1829,10 +1821,6 @@ w83781d_isa_probe(struct platform_device *pdev)
exit_remove_files:
w83781d_remove_files(&pdev->dev);
device_remove_file(&pdev->dev, &dev_attr_name);
- kfree(data);
- exit_release_region:
- release_region(res->start + W83781D_ADDR_REG_OFFSET, 2);
- exit:
return err;
}
@@ -1844,8 +1832,6 @@ w83781d_isa_remove(struct platform_device *pdev)
hwmon_device_unregister(data->hwmon_dev);
w83781d_remove_files(&pdev->dev);
device_remove_file(&pdev->dev, &dev_attr_name);
- release_region(data->isa_addr + W83781D_ADDR_REG_OFFSET, 2);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 2f446f92acf2..9ade4d4e2185 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -1384,18 +1384,17 @@ static int w83791d_probe(struct i2c_client *client,
(val1 >> 5) & 0x07, (val1 >> 1) & 0x0f, val1);
#endif
- data = kzalloc(sizeof(struct w83791d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto error0;
- }
+ data = devm_kzalloc(&client->dev, sizeof(struct w83791d_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
err = w83791d_detect_subclients(client);
if (err)
- goto error1;
+ return err;
/* Initialize the chip */
w83791d_init_client(client);
@@ -1440,9 +1439,6 @@ error3:
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
-error1:
- kfree(data);
-error0:
return err;
}
@@ -1458,7 +1454,6 @@ static int w83791d_remove(struct i2c_client *client)
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index ffb5fdfecf0d..0ba5a2bd562e 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -1422,11 +1422,9 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct device *dev = &client->dev;
int i, val1, err;
- data = kzalloc(sizeof(struct w83792d_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto ERROR0;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83792d_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->valid = 0;
@@ -1434,7 +1432,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
err = w83792d_detect_subclients(client);
if (err)
- goto ERROR1;
+ return err;
/* Initialize the chip */
w83792d_init_client(client);
@@ -1448,7 +1446,7 @@ w83792d_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register sysfs hooks */
err = sysfs_create_group(&dev->kobj, &w83792d_group);
if (err)
- goto ERROR3;
+ goto exit_i2c_unregister;
/*
* Read GPIO enable register to check if pins for fan 4,5 are used as
@@ -1493,14 +1491,11 @@ exit_remove_files:
sysfs_remove_group(&dev->kobj, &w83792d_group);
for (i = 0; i < ARRAY_SIZE(w83792d_group_fan); i++)
sysfs_remove_group(&dev->kobj, &w83792d_group_fan[i]);
-ERROR3:
+exit_i2c_unregister:
if (data->lm75[0] != NULL)
i2c_unregister_device(data->lm75[0]);
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
-ERROR1:
- kfree(data);
-ERROR0:
return err;
}
@@ -1521,7 +1516,6 @@ w83792d_remove(struct i2c_client *client)
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index d887cb3b72e8..b813c646c7ca 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -2157,11 +2157,9 @@ static int w83795_probe(struct i2c_client *client,
struct w83795_data *data;
int err;
- data = kzalloc(sizeof(struct w83795_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83795_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
i2c_set_clientdata(client, data);
data->chip_type = id->driver_data;
@@ -2247,8 +2245,6 @@ static int w83795_probe(struct i2c_client *client,
exit_remove:
w83795_handle_files(dev, device_remove_file_wrapper);
- kfree(data);
-exit:
return err;
}
@@ -2258,7 +2254,6 @@ static int w83795_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
w83795_handle_files(&client->dev, device_remove_file_wrapper);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 5f14e3897058..39dbe990dc10 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -176,19 +176,18 @@ static int w83l785ts_detect(struct i2c_client *client,
return 0;
}
-static int w83l785ts_probe(struct i2c_client *new_client,
+static int w83l785ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct w83l785ts_data *data;
- int err = 0;
+ struct device *dev = &client->dev;
+ int err;
- data = kzalloc(sizeof(struct w83l785ts_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
+ data = devm_kzalloc(dev, sizeof(struct w83l785ts_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- i2c_set_clientdata(new_client, data);
+ i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
@@ -200,18 +199,16 @@ static int w83l785ts_probe(struct i2c_client *new_client,
* Nothing yet, assume it is already started.
*/
- err = device_create_file(&new_client->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
+ err = device_create_file(dev, &sensor_dev_attr_temp1_input.dev_attr);
if (err)
- goto exit_remove;
+ return err;
- err = device_create_file(&new_client->dev,
- &sensor_dev_attr_temp1_max.dev_attr);
+ err = device_create_file(dev, &sensor_dev_attr_temp1_max.dev_attr);
if (err)
goto exit_remove;
/* Register sysfs hooks */
- data->hwmon_dev = hwmon_device_register(&new_client->dev);
+ data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
@@ -220,12 +217,8 @@ static int w83l785ts_probe(struct i2c_client *new_client,
return 0;
exit_remove:
- device_remove_file(&new_client->dev,
- &sensor_dev_attr_temp1_input.dev_attr);
- device_remove_file(&new_client->dev,
- &sensor_dev_attr_temp1_max.dev_attr);
- kfree(data);
-exit:
+ device_remove_file(dev, &sensor_dev_attr_temp1_input.dev_attr);
+ device_remove_file(dev, &sensor_dev_attr_temp1_max.dev_attr);
return err;
}
@@ -239,7 +232,6 @@ static int w83l785ts_remove(struct i2c_client *client)
device_remove_file(&client->dev,
&sensor_dev_attr_temp1_max.dev_attr);
- kfree(data);
return 0;
}
diff --git a/drivers/hwmon/wm831x-hwmon.c b/drivers/hwmon/wm831x-hwmon.c
index 07cb25ae69be..d0db1f2738fb 100644
--- a/drivers/hwmon/wm831x-hwmon.c
+++ b/drivers/hwmon/wm831x-hwmon.c
@@ -163,7 +163,8 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
struct wm831x_hwmon *hwmon;
int ret;
- hwmon = kzalloc(sizeof(struct wm831x_hwmon), GFP_KERNEL);
+ hwmon = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_hwmon),
+ GFP_KERNEL);
if (!hwmon)
return -ENOMEM;
@@ -171,7 +172,7 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
ret = sysfs_create_group(&pdev->dev.kobj, &wm831x_attr_group);
if (ret)
- goto err;
+ return ret;
hwmon->classdev = hwmon_device_register(&pdev->dev);
if (IS_ERR(hwmon->classdev)) {
@@ -185,8 +186,6 @@ static int __devinit wm831x_hwmon_probe(struct platform_device *pdev)
err_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group);
-err:
- kfree(hwmon);
return ret;
}
@@ -196,8 +195,6 @@ static int __devexit wm831x_hwmon_remove(struct platform_device *pdev)
hwmon_device_unregister(hwmon->classdev);
sysfs_remove_group(&pdev->dev.kobj, &wm831x_attr_group);
- platform_set_drvdata(pdev, NULL);
- kfree(hwmon);
return 0;
}
diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c
index 61c9cf15fa52..1201a15784c3 100644
--- a/drivers/hwspinlock/hwspinlock_core.c
+++ b/drivers/hwspinlock/hwspinlock_core.c
@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
spin_lock_init(&hwlock->lock);
hwlock->bank = bank;
- ret = hwspin_lock_register_single(hwlock, i);
+ ret = hwspin_lock_register_single(hwlock, base_id + i);
if (ret)
goto reg_failed;
}
@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
reg_failed:
while (--i >= 0)
- hwspin_lock_unregister_single(i);
+ hwspin_lock_unregister_single(base_id + i);
return ret;
}
EXPORT_SYMBOL_GPL(hwspin_lock_register);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 7244c8be6063..2e7530a4e7b8 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -133,7 +133,7 @@ config I2C_PIIX4
ATI IXP300
ATI IXP400
ATI SB600
- ATI SB700
+ ATI SB700/SP5100
ATI SB800
AMD Hudson-2
Serverworks OSB4
@@ -143,6 +143,10 @@ config I2C_PIIX4
Serverworks HT-1100
SMSC Victory66
+ Some AMD chipsets contain two PIIX4-compatible SMBus
+ controllers. This driver will attempt to use both controllers
+ on the SB700/SP5100, if they have been initialized by the BIOS.
+
This driver can also be built as a module. If so, the module
will be called i2c-piix4.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index e66d248fc126..125cd8e0ad25 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -531,15 +531,7 @@ static struct pci_driver ali1535_driver = {
.remove = __devexit_p(ali1535_remove),
};
-static int __init i2c_ali1535_init(void)
-{
- return pci_register_driver(&ali1535_driver);
-}
-
-static void __exit i2c_ali1535_exit(void)
-{
- pci_unregister_driver(&ali1535_driver);
-}
+module_pci_driver(ali1535_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
"Philip Edelbrock <phil@netroedge.com>, "
@@ -547,6 +539,3 @@ MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, "
"and Dan Eaton <dan.eaton@rocketlogix.com>");
MODULE_DESCRIPTION("ALI1535 SMBus driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_ali1535_init);
-module_exit(i2c_ali1535_exit);
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 47ae0091e027..e02d9f86c6a0 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -431,18 +431,6 @@ static struct pci_driver ali1563_pci_driver = {
.remove = __devexit_p(ali1563_remove),
};
-static int __init ali1563_init(void)
-{
- return pci_register_driver(&ali1563_pci_driver);
-}
-
-module_init(ali1563_init);
-
-static void __exit ali1563_exit(void)
-{
- pci_unregister_driver(&ali1563_pci_driver);
-}
-
-module_exit(ali1563_exit);
+module_pci_driver(ali1563_pci_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 087ea9caa74d..ce8d26d053a5 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -513,21 +513,10 @@ static struct pci_driver ali15x3_driver = {
.remove = __devexit_p(ali15x3_remove),
};
-static int __init i2c_ali15x3_init(void)
-{
- return pci_register_driver(&ali15x3_driver);
-}
-
-static void __exit i2c_ali15x3_exit(void)
-{
- pci_unregister_driver(&ali15x3_driver);
-}
+module_pci_driver(ali15x3_driver);
MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, "
"Philip Edelbrock <phil@netroedge.com>, "
"and Mark D. Studebaker <mdsxyz123@yahoo.com>");
MODULE_DESCRIPTION("ALI15X3 SMBus driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_ali15x3_init);
-module_exit(i2c_ali15x3_exit);
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index eb778bf15c18..304aa03b57b2 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -410,21 +410,10 @@ static struct pci_driver amd756_driver = {
.remove = __devexit_p(amd756_remove),
};
-static int __init amd756_init(void)
-{
- return pci_register_driver(&amd756_driver);
-}
-
-static void __exit amd756_exit(void)
-{
- pci_unregister_driver(&amd756_driver);
-}
+module_pci_driver(amd756_driver);
MODULE_AUTHOR("Merlin Hughes <merlin@merlin.org>");
MODULE_DESCRIPTION("AMD756/766/768/8111 and nVidia nForce SMBus driver");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(amd756_smbus);
-
-module_init(amd756_init)
-module_exit(amd756_exit)
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index e5ac53b99b04..0919ac1d99aa 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -491,15 +491,4 @@ static struct pci_driver amd8111_driver = {
.remove = __devexit_p(amd8111_remove),
};
-static int __init i2c_amd8111_init(void)
-{
- return pci_register_driver(&amd8111_driver);
-}
-
-static void __exit i2c_amd8111_exit(void)
-{
- pci_unregister_driver(&amd8111_driver);
-}
-
-module_init(i2c_amd8111_init);
-module_exit(i2c_amd8111_exit);
+module_pci_driver(amd8111_driver);
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 00e8f213f56e..92a1e2c15baa 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -374,17 +374,7 @@ static struct pci_driver dw_i2c_driver = {
},
};
-static int __init dw_i2c_init_driver(void)
-{
- return pci_register_driver(&dw_i2c_driver);
-}
-module_init(dw_i2c_init_driver);
-
-static void __exit dw_i2c_exit_driver(void)
-{
- pci_unregister_driver(&dw_i2c_driver);
-}
-module_exit(dw_i2c_exit_driver);
+module_pci_driver(dw_i2c_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 7eb19a5222f2..aedb94f34bf7 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -517,6 +517,6 @@ static struct usb_driver diolan_u2c_driver = {
module_usb_driver(diolan_u2c_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION(DRIVER_NAME " driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 2f74ae872e1e..259f7697bf25 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -953,17 +953,7 @@ static struct pci_driver pch_pcidriver = {
.resume = pch_i2c_resume
};
-static int __init pch_pci_init(void)
-{
- return pci_register_driver(&pch_pcidriver);
-}
-module_init(pch_pci_init);
-
-static void __exit pch_pci_exit(void)
-{
- pci_unregister_driver(&pch_pcidriver);
-}
-module_exit(pch_pci_exit);
+module_pci_driver(pch_pcidriver);
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index c527de17db4f..c9f95e1666a8 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -156,23 +156,8 @@ static struct pci_driver hydra_driver = {
.remove = __devexit_p(hydra_remove),
};
-static int __init i2c_hydra_init(void)
-{
- return pci_register_driver(&hydra_driver);
-}
-
-
-static void __exit i2c_hydra_exit(void)
-{
- pci_unregister_driver(&hydra_driver);
-}
-
-
+module_pci_driver(hydra_driver);
MODULE_AUTHOR("Geert Uytterhoeven <geert@linux-m68k.org>");
MODULE_DESCRIPTION("i2c for Apple Hydra Mac I/O");
MODULE_LICENSE("GPL");
-
-module_init(i2c_hydra_init);
-module_exit(i2c_hydra_exit);
-
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae2945a5e007..898dcf9c7ade 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -60,10 +60,12 @@
Block process call transaction no
I2C block read transaction yes (doesn't use the block buffer)
Slave mode no
+ Interrupt processing yes
See the file Documentation/i2c/busses/i2c-i801 for details.
*/
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
@@ -76,6 +78,7 @@
#include <linux/io.h>
#include <linux/dmi.h>
#include <linux/slab.h>
+#include <linux/wait.h>
/* I801 SMBus address offsets */
#define SMBHSTSTS(p) (0 + (p)->smba)
@@ -91,8 +94,12 @@
/* PCI Address Constants */
#define SMBBAR 4
+#define SMBPCISTS 0x006
#define SMBHSTCFG 0x040
+/* Host status bits for SMBPCISTS */
+#define SMBPCISTS_INTS 0x08
+
/* Host configuration bits for SMBHSTCFG */
#define SMBHSTCFG_HST_EN 1
#define SMBHSTCFG_SMB_SMI_EN 2
@@ -102,12 +109,8 @@
#define SMBAUXCTL_CRC 1
#define SMBAUXCTL_E32B 2
-/* kill bit for SMBHSTCNT */
-#define SMBHSTCNT_KILL 2
-
/* Other settings */
#define MAX_RETRIES 400
-#define ENABLE_INT9 0 /* set to 0x01 to enable - untested */
/* I801 command constants */
#define I801_QUICK 0x00
@@ -117,10 +120,13 @@
#define I801_PROC_CALL 0x10 /* unimplemented */
#define I801_BLOCK_DATA 0x14
#define I801_I2C_BLOCK_DATA 0x18 /* ICH5 and later */
-#define I801_BLOCK_LAST 0x34
-#define I801_I2C_BLOCK_LAST 0x38 /* ICH5 and later */
-#define I801_START 0x40
-#define I801_PEC_EN 0x80 /* ICH3 and later */
+
+/* I801 Host Control register bits */
+#define SMBHSTCNT_INTREN 0x01
+#define SMBHSTCNT_KILL 0x02
+#define SMBHSTCNT_LAST_BYTE 0x20
+#define SMBHSTCNT_START 0x40
+#define SMBHSTCNT_PEC_EN 0x80 /* ICH3 and later */
/* I801 Hosts Status register bits */
#define SMBHSTSTS_BYTE_DONE 0x80
@@ -132,9 +138,11 @@
#define SMBHSTSTS_INTR 0x02
#define SMBHSTSTS_HOST_BUSY 0x01
-#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_FAILED | \
- SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \
- SMBHSTSTS_INTR)
+#define STATUS_ERROR_FLAGS (SMBHSTSTS_FAILED | SMBHSTSTS_BUS_ERR | \
+ SMBHSTSTS_DEV_ERR)
+
+#define STATUS_FLAGS (SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR | \
+ STATUS_ERROR_FLAGS)
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
@@ -154,6 +162,17 @@ struct i801_priv {
unsigned char original_hstcfg;
struct pci_dev *pci_dev;
unsigned int features;
+
+ /* isr processing */
+ wait_queue_head_t waitq;
+ u8 status;
+
+ /* Command state used by isr for byte-by-byte block transactions */
+ u8 cmd;
+ bool is_read;
+ int count;
+ int len;
+ u8 *data;
};
static struct pci_driver i801_driver;
@@ -162,6 +181,7 @@ static struct pci_driver i801_driver;
#define FEATURE_BLOCK_BUFFER (1 << 1)
#define FEATURE_BLOCK_PROC (1 << 2)
#define FEATURE_I2C_BLOCK_READ (1 << 3)
+#define FEATURE_IRQ (1 << 4)
/* Not really a feature, but it's convenient to handle it as such */
#define FEATURE_IDF (1 << 15)
@@ -170,6 +190,7 @@ static const char *i801_feature_names[] = {
"Block buffer",
"Block process call",
"I2C block read",
+ "Interrupt",
};
static unsigned int disable_features;
@@ -205,13 +226,22 @@ static int i801_check_pre(struct i801_priv *priv)
return 0;
}
-/* Convert the status register to an error code, and clear it. */
-static int i801_check_post(struct i801_priv *priv, int status, int timeout)
+/*
+ * Convert the status register to an error code, and clear it.
+ * Note that status only contains the bits we want to clear, not the
+ * actual register value.
+ */
+static int i801_check_post(struct i801_priv *priv, int status)
{
int result = 0;
- /* If the SMBus is still busy, we give up */
- if (timeout) {
+ /*
+ * If the SMBus is still busy, we give up
+ * Note: This timeout condition only happens when using polling
+ * transactions. For interrupt operation, NAK/timeout is indicated by
+ * DEV_ERR.
+ */
+ if (unlikely(status < 0)) {
dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
/* try to stop the current command */
dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
@@ -244,64 +274,76 @@ static int i801_check_post(struct i801_priv *priv, int status, int timeout)
dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
}
- if (result) {
- /* Clear error flags */
- outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
- status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
- if (status) {
- dev_warn(&priv->pci_dev->dev, "Failed clearing status "
- "flags at end of transaction (%02x)\n",
- status);
- }
- }
+ /* Clear status flags except BYTE_DONE, to be cleared by caller */
+ outb_p(status, SMBHSTSTS(priv));
return result;
}
-static int i801_transaction(struct i801_priv *priv, int xact)
+/* Wait for BUSY being cleared and either INTR or an error flag being set */
+static int i801_wait_intr(struct i801_priv *priv)
{
- int status;
- int result;
int timeout = 0;
-
- result = i801_check_pre(priv);
- if (result < 0)
- return result;
-
- /* the current contents of SMBHSTCNT can be overwritten, since PEC,
- * INTREN, SMBSCMD are passed in xact */
- outb_p(xact | I801_START, SMBHSTCNT(priv));
+ int status;
/* We will always wait for a fraction of a second! */
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_RETRIES));
+ } while (((status & SMBHSTSTS_HOST_BUSY) ||
+ !(status & (STATUS_ERROR_FLAGS | SMBHSTSTS_INTR))) &&
+ (timeout++ < MAX_RETRIES));
- result = i801_check_post(priv, status, timeout > MAX_RETRIES);
- if (result < 0)
- return result;
-
- outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
- return 0;
+ if (timeout > MAX_RETRIES) {
+ dev_dbg(&priv->pci_dev->dev, "INTR Timeout!\n");
+ return -ETIMEDOUT;
+ }
+ return status & (STATUS_ERROR_FLAGS | SMBHSTSTS_INTR);
}
-/* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(struct i801_priv *priv)
+/* Wait for either BYTE_DONE or an error flag being set */
+static int i801_wait_byte_done(struct i801_priv *priv)
{
int timeout = 0;
int status;
+ /* We will always wait for a fraction of a second! */
do {
usleep_range(250, 500);
status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_INTR))
- && (timeout++ < MAX_RETRIES));
+ } while (!(status & (STATUS_ERROR_FLAGS | SMBHSTSTS_BYTE_DONE)) &&
+ (timeout++ < MAX_RETRIES));
- if (timeout > MAX_RETRIES)
- dev_dbg(&priv->pci_dev->dev, "PEC Timeout!\n");
+ if (timeout > MAX_RETRIES) {
+ dev_dbg(&priv->pci_dev->dev, "BYTE_DONE Timeout!\n");
+ return -ETIMEDOUT;
+ }
+ return status & STATUS_ERROR_FLAGS;
+}
- outb_p(status, SMBHSTSTS(priv));
+static int i801_transaction(struct i801_priv *priv, int xact)
+{
+ int status;
+ int result;
+
+ result = i801_check_pre(priv);
+ if (result < 0)
+ return result;
+
+ if (priv->features & FEATURE_IRQ) {
+ outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
+ SMBHSTCNT(priv));
+ wait_event(priv->waitq, (status = priv->status));
+ priv->status = 0;
+ return i801_check_post(priv, status);
+ }
+
+ /* the current contents of SMBHSTCNT can be overwritten, since PEC,
+ * SMBSCMD are passed in xact */
+ outb_p(xact | SMBHSTCNT_START, SMBHSTCNT(priv));
+
+ status = i801_wait_intr(priv);
+ return i801_check_post(priv, status);
}
static int i801_block_transaction_by_block(struct i801_priv *priv,
@@ -321,8 +363,8 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
outb_p(data->block[i+1], SMBBLKDAT(priv));
}
- status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 |
- I801_PEC_EN * hwpec);
+ status = i801_transaction(priv, I801_BLOCK_DATA |
+ (hwpec ? SMBHSTCNT_PEC_EN : 0));
if (status)
return status;
@@ -338,6 +380,98 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
return 0;
}
+static void i801_isr_byte_done(struct i801_priv *priv)
+{
+ if (priv->is_read) {
+ /* For SMBus block reads, length is received with first byte */
+ if (((priv->cmd & 0x1c) == I801_BLOCK_DATA) &&
+ (priv->count == 0)) {
+ priv->len = inb_p(SMBHSTDAT0(priv));
+ if (priv->len < 1 || priv->len > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&priv->pci_dev->dev,
+ "Illegal SMBus block read size %d\n",
+ priv->len);
+ /* FIXME: Recover */
+ priv->len = I2C_SMBUS_BLOCK_MAX;
+ } else {
+ dev_dbg(&priv->pci_dev->dev,
+ "SMBus block read size is %d\n",
+ priv->len);
+ }
+ priv->data[-1] = priv->len;
+ }
+
+ /* Read next byte */
+ if (priv->count < priv->len)
+ priv->data[priv->count++] = inb(SMBBLKDAT(priv));
+ else
+ dev_dbg(&priv->pci_dev->dev,
+ "Discarding extra byte on block read\n");
+
+ /* Set LAST_BYTE for last byte of read transaction */
+ if (priv->count == priv->len - 1)
+ outb_p(priv->cmd | SMBHSTCNT_LAST_BYTE,
+ SMBHSTCNT(priv));
+ } else if (priv->count < priv->len - 1) {
+ /* Write next byte, except for IRQ after last byte */
+ outb_p(priv->data[++priv->count], SMBBLKDAT(priv));
+ }
+
+ /* Clear BYTE_DONE to continue with next byte */
+ outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
+}
+
+/*
+ * There are two kinds of interrupts:
+ *
+ * 1) i801 signals transaction completion with one of these interrupts:
+ * INTR - Success
+ * DEV_ERR - Invalid command, NAK or communication timeout
+ * BUS_ERR - SMI# transaction collision
+ * FAILED - transaction was canceled due to a KILL request
+ * When any of these occur, update ->status and wake up the waitq.
+ * ->status must be cleared before kicking off the next transaction.
+ *
+ * 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt
+ * occurs for each byte of a byte-by-byte to prepare the next byte.
+ */
+static irqreturn_t i801_isr(int irq, void *dev_id)
+{
+ struct i801_priv *priv = dev_id;
+ u16 pcists;
+ u8 status;
+
+ /* Confirm this is our interrupt */
+ pci_read_config_word(priv->pci_dev, SMBPCISTS, &pcists);
+ if (!(pcists & SMBPCISTS_INTS))
+ return IRQ_NONE;
+
+ status = inb_p(SMBHSTSTS(priv));
+ if (status != 0x42)
+ dev_dbg(&priv->pci_dev->dev, "irq: status = %02x\n", status);
+
+ if (status & SMBHSTSTS_BYTE_DONE)
+ i801_isr_byte_done(priv);
+
+ /*
+ * Clear irq sources and report transaction result.
+ * ->status must be cleared before the next transaction is started.
+ */
+ status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS;
+ if (status) {
+ outb_p(status, SMBHSTSTS(priv));
+ priv->status |= status;
+ wake_up(&priv->waitq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * For "byte-by-byte" block transactions:
+ * I2C write uses cmd=I801_BLOCK_DATA, I2C_EN=1
+ * I2C read uses cmd=I801_I2C_BLOCK_DATA
+ */
static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
union i2c_smbus_data *data,
char read_write, int command,
@@ -347,7 +481,6 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
int smbcmd;
int status;
int result;
- int timeout;
result = i801_check_pre(priv);
if (result < 0)
@@ -360,36 +493,39 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
outb_p(data->block[1], SMBBLKDAT(priv));
}
+ if (command == I2C_SMBUS_I2C_BLOCK_DATA &&
+ read_write == I2C_SMBUS_READ)
+ smbcmd = I801_I2C_BLOCK_DATA;
+ else
+ smbcmd = I801_BLOCK_DATA;
+
+ if (priv->features & FEATURE_IRQ) {
+ priv->is_read = (read_write == I2C_SMBUS_READ);
+ if (len == 1 && priv->is_read)
+ smbcmd |= SMBHSTCNT_LAST_BYTE;
+ priv->cmd = smbcmd | SMBHSTCNT_INTREN;
+ priv->len = len;
+ priv->count = 0;
+ priv->data = &data->block[1];
+
+ outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
+ wait_event(priv->waitq, (status = priv->status));
+ priv->status = 0;
+ return i801_check_post(priv, status);
+ }
+
for (i = 1; i <= len; i++) {
- if (i == len && read_write == I2C_SMBUS_READ) {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA)
- smbcmd = I801_I2C_BLOCK_LAST;
- else
- smbcmd = I801_BLOCK_LAST;
- } else {
- if (command == I2C_SMBUS_I2C_BLOCK_DATA
- && read_write == I2C_SMBUS_READ)
- smbcmd = I801_I2C_BLOCK_DATA;
- else
- smbcmd = I801_BLOCK_DATA;
- }
- outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
+ if (i == len && read_write == I2C_SMBUS_READ)
+ smbcmd |= SMBHSTCNT_LAST_BYTE;
+ outb_p(smbcmd, SMBHSTCNT(priv));
if (i == 1)
- outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+ outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
SMBHSTCNT(priv));
- /* We will always wait for a fraction of a second! */
- timeout = 0;
- do {
- usleep_range(250, 500);
- status = inb_p(SMBHSTSTS(priv));
- } while ((!(status & SMBHSTSTS_BYTE_DONE))
- && (timeout++ < MAX_RETRIES));
-
- result = i801_check_post(priv, status, timeout > MAX_RETRIES);
- if (result < 0)
- return result;
+ status = i801_wait_byte_done(priv);
+ if (status)
+ goto exit;
if (i == 1 && read_write == I2C_SMBUS_READ
&& command != I2C_SMBUS_I2C_BLOCK_DATA) {
@@ -416,10 +552,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
outb_p(data->block[i+1], SMBBLKDAT(priv));
/* signals SMBBLKDAT ready */
- outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
+ outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
}
- return 0;
+ status = i801_wait_intr(priv);
+exit:
+ return i801_check_post(priv, status);
}
static int i801_set_block_buffer_mode(struct i801_priv *priv)
@@ -474,9 +612,6 @@ static int i801_block_transaction(struct i801_priv *priv,
read_write,
command, hwpec);
- if (result == 0 && hwpec)
- i801_wait_hwpec(priv);
-
if (command == I2C_SMBUS_I2C_BLOCK_DATA
&& read_write == I2C_SMBUS_WRITE) {
/* restore saved configuration register value */
@@ -564,7 +699,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
ret = i801_block_transaction(priv, data, read_write, size,
hwpec);
else
- ret = i801_transaction(priv, xact | ENABLE_INT9);
+ ret = i801_transaction(priv, xact);
/* Some BIOSes don't like it when PEC is enabled at reboot or resume
time, so we forcibly disable it after every transaction. Turn off
@@ -799,6 +934,16 @@ static int __devinit i801_probe(struct pci_dev *dev,
break;
}
+ /* IRQ processing tested on CougarPoint PCH, ICH5, ICH7-M and ICH10 */
+ if (dev->device == PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS ||
+ dev->device == PCI_DEVICE_ID_INTEL_82801EB_3 ||
+ dev->device == PCI_DEVICE_ID_INTEL_ICH7_17 ||
+ dev->device == PCI_DEVICE_ID_INTEL_ICH8_5 ||
+ dev->device == PCI_DEVICE_ID_INTEL_ICH9_6 ||
+ dev->device == PCI_DEVICE_ID_INTEL_ICH10_4 ||
+ dev->device == PCI_DEVICE_ID_INTEL_ICH10_5)
+ priv->features |= FEATURE_IRQ;
+
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
if (priv->features & disable_features & (1 << i))
@@ -846,16 +991,30 @@ static int __devinit i801_probe(struct pci_dev *dev,
}
pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
- if (temp & SMBHSTCFG_SMB_SMI_EN)
+ if (temp & SMBHSTCFG_SMB_SMI_EN) {
dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
- else
- dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
+ /* Disable SMBus interrupt feature if SMBus using SMI# */
+ priv->features &= ~FEATURE_IRQ;
+ }
/* Clear special mode bits */
if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
+ if (priv->features & FEATURE_IRQ) {
+ init_waitqueue_head(&priv->waitq);
+
+ err = request_irq(dev->irq, i801_isr, IRQF_SHARED,
+ i801_driver.name, priv);
+ if (err) {
+ dev_err(&dev->dev, "Failed to allocate irq %d: %d\n",
+ dev->irq, err);
+ goto exit_release;
+ }
+ dev_info(&dev->dev, "SMBus using PCI Interrupt\n");
+ }
+
/* set up the sysfs linkage to our parent device */
priv->adapter.dev.parent = &dev->dev;
@@ -867,14 +1026,18 @@ static int __devinit i801_probe(struct pci_dev *dev,
err = i2c_add_adapter(&priv->adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
- goto exit_release;
+ goto exit_free_irq;
}
i801_probe_optional_slaves(priv);
pci_set_drvdata(dev, priv);
+
return 0;
+exit_free_irq:
+ if (priv->features & FEATURE_IRQ)
+ free_irq(dev->irq, priv);
exit_release:
pci_release_region(dev, SMBBAR);
exit:
@@ -888,7 +1051,11 @@ static void __devexit i801_remove(struct pci_dev *dev)
i2c_del_adapter(&priv->adapter);
pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+
+ if (priv->features & FEATURE_IRQ)
+ free_irq(dev->irq, priv);
pci_release_region(dev, SMBBAR);
+
pci_set_drvdata(dev, NULL);
kfree(priv);
/*
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 8d6b504d65c4..370031ac8200 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -53,7 +53,6 @@
#include <linux/of_i2c.h>
#include <linux/pinctrl/consumer.h>
-#include <mach/irqs.h>
#include <mach/hardware.h>
#include <mach/i2c.h>
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index 365bad5b890b..7c28f10f95ca 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1116,18 +1116,7 @@ static struct pci_driver intel_mid_i2c_driver = {
.remove = __devexit_p(intel_mid_i2c_remove),
};
-static int __init intel_mid_i2c_init(void)
-{
- return pci_register_driver(&intel_mid_i2c_driver);
-}
-
-static void __exit intel_mid_i2c_exit(void)
-{
- pci_unregister_driver(&intel_mid_i2c_driver);
-}
-
-module_init(intel_mid_i2c_init);
-module_exit(intel_mid_i2c_exit);
+module_pci_driver(intel_mid_i2c_driver);
MODULE_AUTHOR("Ba Zheng <zheng.ba@intel.com>");
MODULE_DESCRIPTION("I2C driver for Moorestown Platform");
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index 43a96a123920..392303b4be07 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -453,16 +453,4 @@ static struct pci_driver nforce2_driver = {
.remove = __devexit_p(nforce2_remove),
};
-static int __init nforce2_init(void)
-{
- return pci_register_driver(&nforce2_driver);
-}
-
-static void __exit nforce2_exit(void)
-{
- pci_unregister_driver(&nforce2_driver);
-}
-
-module_init(nforce2_init);
-module_exit(nforce2_exit);
-
+module_pci_driver(nforce2_driver);
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 5267ab93d550..a92440dbef07 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -965,8 +965,7 @@ static int __devinit nmk_i2c_probe(struct platform_device *pdev)
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->algo = &nmk_i2c_algo;
- adap->timeout = pdata->timeout ? msecs_to_jiffies(pdata->timeout) :
- msecs_to_jiffies(20000);
+ adap->timeout = msecs_to_jiffies(pdata->timeout);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C%d at %lx", pdev->id, (unsigned long)res->start);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 801df6000e9b..c2148332de0f 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -545,6 +545,8 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
if (dev->speed > 400)
w |= OMAP_I2C_CON_OPMODE_HS;
+ if (msg->flags & I2C_M_STOP)
+ stop = 1;
if (msg->flags & I2C_M_TEN)
w |= OMAP_I2C_CON_XA;
if (!(msg->flags & I2C_M_RD))
@@ -658,7 +660,8 @@ out:
static u32
omap_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) |
+ I2C_FUNC_PROTOCOL_MANGLING;
}
static inline void
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index eaaea73209c5..12edefd4183a 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -415,19 +415,8 @@ static struct pci_driver pasemi_smb_driver = {
.remove = __devexit_p(pasemi_smb_remove),
};
-static int __init pasemi_smb_init(void)
-{
- return pci_register_driver(&pasemi_smb_driver);
-}
-
-static void __exit pasemi_smb_exit(void)
-{
- pci_unregister_driver(&pasemi_smb_driver);
-}
+module_pci_driver(pasemi_smb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver");
-
-module_init(pasemi_smb_init);
-module_exit(pasemi_smb_exit);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c14d48dd601a..ef511df2c965 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -21,11 +21,12 @@
Supports:
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
- ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
+ ATI IXP200, IXP300, IXP400, SB600, SB700/SP5100, SB800
AMD Hudson-2
SMSC Victory66
- Note: we assume there can only be one device, with one SMBus interface.
+ Note: we assume there can only be one device, with one or more
+ SMBus interfaces.
*/
#include <linux/module.h>
@@ -94,10 +95,8 @@ MODULE_PARM_DESC(force_addr,
"Forcibly enable the PIIX4 at the given address. "
"EXTREMELY DANGEROUS!");
-static unsigned short piix4_smba;
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
-static struct i2c_adapter piix4_adapter;
static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
{
@@ -127,10 +126,15 @@ static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
{ },
};
+struct i2c_piix4_adapdata {
+ unsigned short smba;
+};
+
static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned char temp;
+ unsigned short piix4_smba;
if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
(PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5))
@@ -206,7 +210,6 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
- piix4_smba = 0;
return -ENODEV;
}
}
@@ -224,12 +227,13 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, temp);
- return 0;
+ return piix4_smba;
}
static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
+ unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
@@ -273,7 +277,6 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region "
"0x%x already in use!\n", piix4_smba + i2ccfg_offset);
release_region(piix4_smba, SMBIOSIZE);
- piix4_smba = 0;
return -EBUSY;
}
i2ccfg = inb_p(piix4_smba + i2ccfg_offset);
@@ -288,30 +291,72 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, i2ccfg >> 4);
- return 0;
+ return piix4_smba;
+}
+
+static int __devinit piix4_setup_aux(struct pci_dev *PIIX4_dev,
+ const struct pci_device_id *id,
+ unsigned short base_reg_addr)
+{
+ /* Set up auxiliary SMBus controllers found on some
+ * AMD chipsets e.g. SP5100 (SB700 derivative) */
+
+ unsigned short piix4_smba;
+
+ /* Read address of auxiliary SMBus controller */
+ pci_read_config_word(PIIX4_dev, base_reg_addr, &piix4_smba);
+ if ((piix4_smba & 1) == 0) {
+ dev_dbg(&PIIX4_dev->dev,
+ "Auxiliary SMBus controller not enabled\n");
+ return -ENODEV;
+ }
+
+ piix4_smba &= 0xfff0;
+ if (piix4_smba == 0) {
+ dev_dbg(&PIIX4_dev->dev,
+ "Auxiliary SMBus base address uninitialized\n");
+ return -ENODEV;
+ }
+
+ if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
+ return -ENODEV;
+
+ if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
+ dev_err(&PIIX4_dev->dev, "Auxiliary SMBus region 0x%x "
+ "already in use!\n", piix4_smba);
+ return -EBUSY;
+ }
+
+ dev_info(&PIIX4_dev->dev,
+ "Auxiliary SMBus Host Controller at 0x%x\n",
+ piix4_smba);
+
+ return piix4_smba;
}
-static int piix4_transaction(void)
+static int piix4_transaction(struct i2c_adapter *piix4_adapter)
{
+ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(piix4_adapter);
+ unsigned short piix4_smba = adapdata->smba;
int temp;
int result = 0;
int timeout = 0;
- dev_dbg(&piix4_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
+ dev_dbg(&piix4_adapter->dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
- dev_dbg(&piix4_adapter.dev, "SMBus busy (%02x). "
+ dev_dbg(&piix4_adapter->dev, "SMBus busy (%02x). "
"Resetting...\n", temp);
outb_p(temp, SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
- dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp);
+ dev_err(&piix4_adapter->dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
- dev_dbg(&piix4_adapter.dev, "Successful!\n");
+ dev_dbg(&piix4_adapter->dev, "Successful!\n");
}
}
@@ -330,35 +375,35 @@ static int piix4_transaction(void)
/* If the SMBus is still busy, we give up */
if (timeout == MAX_TIMEOUT) {
- dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
+ dev_err(&piix4_adapter->dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x10) {
result = -EIO;
- dev_err(&piix4_adapter.dev, "Error: Failed bus transaction\n");
+ dev_err(&piix4_adapter->dev, "Error: Failed bus transaction\n");
}
if (temp & 0x08) {
result = -EIO;
- dev_dbg(&piix4_adapter.dev, "Bus collision! SMBus may be "
+ dev_dbg(&piix4_adapter->dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
}
if (temp & 0x04) {
result = -ENXIO;
- dev_dbg(&piix4_adapter.dev, "Error: no response!\n");
+ dev_dbg(&piix4_adapter->dev, "Error: no response!\n");
}
if (inb_p(SMBHSTSTS) != 0x00)
outb_p(inb(SMBHSTSTS), SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
- dev_err(&piix4_adapter.dev, "Failed reset at end of "
+ dev_err(&piix4_adapter->dev, "Failed reset at end of "
"transaction (%02x)\n", temp);
}
- dev_dbg(&piix4_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, "
+ dev_dbg(&piix4_adapter->dev, "Transaction (post): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
@@ -370,6 +415,8 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
+ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = adapdata->smba;
int i, len;
int status;
@@ -426,7 +473,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT);
- status = piix4_transaction();
+ status = piix4_transaction(adap);
if (status)
return status;
@@ -466,12 +513,6 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = piix4_func,
};
-static struct i2c_adapter piix4_adapter = {
- .owner = THIS_MODULE,
- .class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
- .algo = &smbus_algorithm,
-};
-
static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
@@ -496,6 +537,57 @@ static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
MODULE_DEVICE_TABLE (pci, piix4_ids);
+static struct i2c_adapter *piix4_main_adapter;
+static struct i2c_adapter *piix4_aux_adapter;
+
+static int __devinit piix4_add_adapter(struct pci_dev *dev,
+ unsigned short smba,
+ struct i2c_adapter **padap)
+{
+ struct i2c_adapter *adap;
+ struct i2c_piix4_adapdata *adapdata;
+ int retval;
+
+ adap = kzalloc(sizeof(*adap), GFP_KERNEL);
+ if (adap == NULL) {
+ release_region(smba, SMBIOSIZE);
+ return -ENOMEM;
+ }
+
+ adap->owner = THIS_MODULE;
+ adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+ adap->algo = &smbus_algorithm;
+
+ adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
+ if (adapdata == NULL) {
+ kfree(adap);
+ release_region(smba, SMBIOSIZE);
+ return -ENOMEM;
+ }
+
+ adapdata->smba = smba;
+
+ /* set up the sysfs linkage to our parent device */
+ adap->dev.parent = &dev->dev;
+
+ snprintf(adap->name, sizeof(adap->name),
+ "SMBus PIIX4 adapter at %04x", smba);
+
+ i2c_set_adapdata(adap, adapdata);
+
+ retval = i2c_add_adapter(adap);
+ if (retval) {
+ dev_err(&dev->dev, "Couldn't register adapter!\n");
+ kfree(adapdata);
+ kfree(adap);
+ release_region(smba, SMBIOSIZE);
+ return retval;
+ }
+
+ *padap = adap;
+ return 0;
+}
+
static int __devinit piix4_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
@@ -510,30 +602,52 @@ static int __devinit piix4_probe(struct pci_dev *dev,
else
retval = piix4_setup(dev, id);
- if (retval)
+ /* If no main SMBus found, give up */
+ if (retval < 0)
return retval;
- /* set up the sysfs linkage to our parent device */
- piix4_adapter.dev.parent = &dev->dev;
-
- snprintf(piix4_adapter.name, sizeof(piix4_adapter.name),
- "SMBus PIIX4 adapter at %04x", piix4_smba);
+ /* Try to register main SMBus adapter, give up if we can't */
+ retval = piix4_add_adapter(dev, retval, &piix4_main_adapter);
+ if (retval < 0)
+ return retval;
- if ((retval = i2c_add_adapter(&piix4_adapter))) {
- dev_err(&dev->dev, "Couldn't register adapter!\n");
- release_region(piix4_smba, SMBIOSIZE);
- piix4_smba = 0;
+ /* Check for auxiliary SMBus on some AMD chipsets */
+ if (dev->vendor == PCI_VENDOR_ID_ATI &&
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
+ dev->revision < 0x40) {
+ retval = piix4_setup_aux(dev, id, 0x58);
+ if (retval > 0) {
+ /* Try to add the aux adapter if it exists,
+ * piix4_add_adapter will clean up if this fails */
+ piix4_add_adapter(dev, retval, &piix4_aux_adapter);
+ }
}
- return retval;
+ return 0;
+}
+
+static void __devexit piix4_adap_remove(struct i2c_adapter *adap)
+{
+ struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+
+ if (adapdata->smba) {
+ i2c_del_adapter(adap);
+ release_region(adapdata->smba, SMBIOSIZE);
+ kfree(adapdata);
+ kfree(adap);
+ }
}
static void __devexit piix4_remove(struct pci_dev *dev)
{
- if (piix4_smba) {
- i2c_del_adapter(&piix4_adapter);
- release_region(piix4_smba, SMBIOSIZE);
- piix4_smba = 0;
+ if (piix4_main_adapter) {
+ piix4_adap_remove(piix4_main_adapter);
+ piix4_main_adapter = NULL;
+ }
+
+ if (piix4_aux_adapter) {
+ piix4_adap_remove(piix4_aux_adapter);
+ piix4_aux_adapter = NULL;
}
}
@@ -544,20 +658,9 @@ static struct pci_driver piix4_driver = {
.remove = __devexit_p(piix4_remove),
};
-static int __init i2c_piix4_init(void)
-{
- return pci_register_driver(&piix4_driver);
-}
-
-static void __exit i2c_piix4_exit(void)
-{
- pci_unregister_driver(&piix4_driver);
-}
+module_pci_driver(piix4_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("PIIX4 SMBus driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_piix4_init);
-module_exit(i2c_piix4_exit);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 31c47e18d83c..5285f8565de4 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -227,28 +227,138 @@ static int __devexit i2c_powermac_remove(struct platform_device *dev)
return 0;
}
+static u32 __devinit i2c_powermac_get_addr(struct i2c_adapter *adap,
+ struct pmac_i2c_bus *bus,
+ struct device_node *node)
+{
+ const __be32 *prop;
+ int len;
+
+ /* First check for valid "reg" */
+ prop = of_get_property(node, "reg", &len);
+ if (prop && (len >= sizeof(int)))
+ return (be32_to_cpup(prop) & 0xff) >> 1;
+
+ /* Then check old-style "i2c-address" */
+ prop = of_get_property(node, "i2c-address", &len);
+ if (prop && (len >= sizeof(int)))
+ return (be32_to_cpup(prop) & 0xff) >> 1;
+
+ /* Now handle some devices with missing "reg" properties */
+ if (!strcmp(node->name, "cereal"))
+ return 0x60;
+ else if (!strcmp(node->name, "deq"))
+ return 0x34;
+
+ dev_warn(&adap->dev, "No i2c address for %s\n", node->full_name);
+
+ return 0xffffffff;
+}
+
+static void __devinit i2c_powermac_create_one(struct i2c_adapter *adap,
+ const char *type,
+ u32 addr)
+{
+ struct i2c_board_info info = {};
+ struct i2c_client *newdev;
+
+ strncpy(info.type, type, sizeof(info.type));
+ info.addr = addr;
+ newdev = i2c_new_device(adap, &info);
+ if (!newdev)
+ dev_err(&adap->dev,
+ "i2c-powermac: Failure to register missing %s\n",
+ type);
+}
+
+static void __devinit i2c_powermac_add_missing(struct i2c_adapter *adap,
+ struct pmac_i2c_bus *bus,
+ bool found_onyx)
+{
+ struct device_node *busnode = pmac_i2c_get_bus_node(bus);
+ int rc;
+
+ /* Check for the onyx audio codec */
+#define ONYX_REG_CONTROL 67
+ if (of_device_is_compatible(busnode, "k2-i2c") && !found_onyx) {
+ union i2c_smbus_data data;
+
+ rc = i2c_smbus_xfer(adap, 0x46, 0, I2C_SMBUS_READ,
+ ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA,
+ &data);
+ if (rc >= 0)
+ i2c_powermac_create_one(adap, "MAC,pcm3052", 0x46);
+
+ rc = i2c_smbus_xfer(adap, 0x47, 0, I2C_SMBUS_READ,
+ ONYX_REG_CONTROL, I2C_SMBUS_BYTE_DATA,
+ &data);
+ if (rc >= 0)
+ i2c_powermac_create_one(adap, "MAC,pcm3052", 0x47);
+ }
+}
+
+static bool __devinit i2c_powermac_get_type(struct i2c_adapter *adap,
+ struct device_node *node,
+ u32 addr, char *type, int type_size)
+{
+ char tmp[16];
+
+ /* Note: we to _NOT_ want the standard
+ * i2c drivers to match with any of our powermac stuff
+ * unless they have been specifically modified to handle
+ * it on a case by case basis. For example, for thermal
+ * control, things like lm75 etc... shall match with their
+ * corresponding windfarm drivers, _NOT_ the generic ones,
+ * so we force a prefix of AAPL, onto the modalias to
+ * make that happen
+ */
+
+ /* First try proper modalias */
+ if (of_modalias_node(node, tmp, sizeof(tmp)) >= 0) {
+ snprintf(type, type_size, "MAC,%s", tmp);
+ return true;
+ }
+
+ /* Now look for known workarounds */
+ if (!strcmp(node->name, "deq")) {
+ /* Apple uses address 0x34 for TAS3001 and 0x35 for TAS3004 */
+ if (addr == 0x34) {
+ snprintf(type, type_size, "MAC,tas3001");
+ return true;
+ } else if (addr == 0x35) {
+ snprintf(type, type_size, "MAC,tas3004");
+ return true;
+ }
+ }
+
+ dev_err(&adap->dev, "i2c-powermac: modalias failure"
+ " on %s\n", node->full_name);
+ return false;
+}
+
static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
struct pmac_i2c_bus *bus)
{
struct i2c_client *newdev;
struct device_node *node;
+ bool found_onyx = 0;
+
+ /*
+ * In some cases we end up with the via-pmu node itself, in this
+ * case we skip this function completely as the device-tree will
+ * not contain anything useful.
+ */
+ if (!strcmp(adap->dev.of_node->name, "via-pmu"))
+ return;
for_each_child_of_node(adap->dev.of_node, node) {
struct i2c_board_info info = {};
- struct dev_archdata dev_ad = {};
- const __be32 *reg;
- char tmp[16];
u32 addr;
- int len;
/* Get address & channel */
- reg = of_get_property(node, "reg", &len);
- if (!reg || (len < sizeof(int))) {
- dev_err(&adap->dev, "i2c-powermac: invalid reg on %s\n",
- node->full_name);
+ addr = i2c_powermac_get_addr(adap, bus, node);
+ if (addr == 0xffffffff)
continue;
- }
- addr = be32_to_cpup(reg);
/* Multibus setup, check channel */
if (!pmac_i2c_match_adapter(node, adap))
@@ -257,27 +367,23 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
dev_dbg(&adap->dev, "i2c-powermac: register %s\n",
node->full_name);
- /* Make up a modalias. Note: we to _NOT_ want the standard
- * i2c drivers to match with any of our powermac stuff
- * unless they have been specifically modified to handle
- * it on a case by case basis. For example, for thermal
- * control, things like lm75 etc... shall match with their
- * corresponding windfarm drivers, _NOT_ the generic ones,
- * so we force a prefix of AAPL, onto the modalias to
- * make that happen
+ /*
+ * Keep track of some device existence to handle
+ * workarounds later.
*/
- if (of_modalias_node(node, tmp, sizeof(tmp)) < 0) {
- dev_err(&adap->dev, "i2c-powermac: modalias failure"
- " on %s\n", node->full_name);
+ if (of_device_is_compatible(node, "pcm3052"))
+ found_onyx = true;
+
+ /* Make up a modalias */
+ if (!i2c_powermac_get_type(adap, node, addr,
+ info.type, sizeof(info.type))) {
continue;
}
- snprintf(info.type, sizeof(info.type), "MAC,%s", tmp);
/* Fill out the rest of the info structure */
- info.addr = (addr & 0xff) >> 1;
+ info.addr = addr;
info.irq = irq_of_parse_and_map(node, 0);
info.of_node = of_node_get(node);
- info.archdata = &dev_ad;
newdev = i2c_new_device(adap, &info);
if (!newdev) {
@@ -292,6 +398,9 @@ static void __devinit i2c_powermac_register_devices(struct i2c_adapter *adap,
continue;
}
}
+
+ /* Additional workarounds */
+ i2c_powermac_add_missing(adap, bus, found_onyx);
}
static int __devinit i2c_powermac_probe(struct platform_device *dev)
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index a05817980556..4dc9bef17d77 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -163,17 +163,7 @@ static struct pci_driver ce4100_i2c_driver = {
.remove = __devexit_p(ce4100_i2c_remove),
};
-static int __init ce4100_i2c_init(void)
-{
- return pci_register_driver(&ce4100_i2c_driver);
-}
-module_init(ce4100_i2c_init);
-
-static void __exit ce4100_i2c_exit(void)
-{
- pci_unregister_driver(&ce4100_i2c_driver);
-}
-module_exit(ce4100_i2c_exit);
+module_pci_driver(ce4100_i2c_driver);
MODULE_DESCRIPTION("CE4100 PCI-I2C glue code for PXA's driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 15cf78f65ce0..5d6723b7525e 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -513,21 +513,8 @@ static struct pci_driver sis630_driver = {
.remove = __devexit_p(sis630_remove),
};
-static int __init i2c_sis630_init(void)
-{
- return pci_register_driver(&sis630_driver);
-}
-
-
-static void __exit i2c_sis630_exit(void)
-{
- pci_unregister_driver(&sis630_driver);
-}
-
+module_pci_driver(sis630_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Malysh <amalysh@web.de>");
MODULE_DESCRIPTION("SIS630 SMBus driver");
-
-module_init(i2c_sis630_init);
-module_exit(i2c_sis630_exit);
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index cc5d149413f7..7b72614a9bc0 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -324,21 +324,8 @@ static struct pci_driver sis96x_driver = {
.remove = __devexit_p(sis96x_remove),
};
-static int __init i2c_sis96x_init(void)
-{
- return pci_register_driver(&sis96x_driver);
-}
-
-static void __exit i2c_sis96x_exit(void)
-{
- pci_unregister_driver(&sis96x_driver);
-}
+module_pci_driver(sis96x_driver);
MODULE_AUTHOR("Mark M. Hoffman <mhoffman@lightlink.com>");
MODULE_DESCRIPTION("SiS96x SMBus driver");
MODULE_LICENSE("GPL");
-
-/* Register initialization functions using helper macros */
-module_init(i2c_sis96x_init);
-module_exit(i2c_sis96x_exit);
-
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 8b2e555a9563..3da7ee3eb505 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -341,7 +341,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
u32 val;
int err = 0;
- clk_enable(i2c_dev->clk);
+ clk_prepare_enable(i2c_dev->clk);
tegra_periph_reset_assert(i2c_dev->clk);
udelay(2);
@@ -372,7 +372,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (tegra_i2c_flush_fifos(i2c_dev))
err = -ETIMEDOUT;
- clk_disable(i2c_dev->clk);
+ clk_disable_unprepare(i2c_dev->clk);
if (i2c_dev->irq_disabled) {
i2c_dev->irq_disabled = 0;
@@ -546,14 +546,14 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
if (i2c_dev->is_suspended)
return -EBUSY;
- clk_enable(i2c_dev->clk);
+ clk_prepare_enable(i2c_dev->clk);
for (i = 0; i < num; i++) {
int stop = (i == (num - 1)) ? 1 : 0;
ret = tegra_i2c_xfer_msg(i2c_dev, &msgs[i], stop);
if (ret)
break;
}
- clk_disable(i2c_dev->clk);
+ clk_disable_unprepare(i2c_dev->clk);
return ret ?: i;
}
@@ -666,7 +666,7 @@ static int __devinit tegra_i2c_probe(struct platform_device *pdev)
goto err_free;
}
- clk_enable(i2c_dev->i2c_clk);
+ clk_prepare_enable(i2c_dev->i2c_clk);
i2c_set_adapdata(&i2c_dev->adapter, i2c_dev);
i2c_dev->adapter.owner = THIS_MODULE;
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index f07307ff360d..05106368d405 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -143,6 +143,7 @@ static const struct i2c_algorithm usb_algorithm = {
static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
+ { USB_DEVICE(0x1964, 0x0001) }, /* Robofuzz OSIF */
{ } /* Terminating entry */
};
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 713d31ade26b..7ffee71ca190 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -161,20 +161,8 @@ static struct pci_driver vt586b_driver = {
.remove = __devexit_p(vt586b_remove),
};
-static int __init i2c_vt586b_init(void)
-{
- return pci_register_driver(&vt586b_driver);
-}
-
-static void __exit i2c_vt586b_exit(void)
-{
- pci_unregister_driver(&vt586b_driver);
-}
-
+module_pci_driver(vt586b_driver);
MODULE_AUTHOR("Kyösti Mälkki <kmalkki@cc.hut.fi>");
MODULE_DESCRIPTION("i2c for Via vt82c586b southbridge");
MODULE_LICENSE("GPL");
-
-module_init(i2c_vt586b_init);
-module_exit(i2c_vt586b_exit);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index a6ad32bc0a96..26488aa893d5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2122,7 +2122,7 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
int try;
s32 res;
- flags &= I2C_M_TEN | I2C_CLIENT_PEC;
+ flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB;
if (adapter->algo->smbus_xfer) {
i2c_lock_adapter(adapter);
@@ -2140,11 +2140,17 @@ s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags,
break;
}
i2c_unlock_adapter(adapter);
- } else
- res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
- command, protocol, data);
- return res;
+ if (res != -EOPNOTSUPP || !adapter->algo->master_xfer)
+ return res;
+ /*
+ * Fall back to i2c_smbus_xfer_emulated if the adapter doesn't
+ * implement native support for the SMBus operation.
+ */
+ }
+
+ return i2c_smbus_xfer_emulated(adapter, addr, flags, read_write,
+ command, protocol, data);
}
EXPORT_SYMBOL(i2c_smbus_xfer);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 9836d08f7a77..df3e0bf31eb3 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -245,18 +245,7 @@ int i2c_handle_smbus_alert(struct i2c_client *ara)
}
EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
-static int __init i2c_smbus_init(void)
-{
- return i2c_add_driver(&smbalert_driver);
-}
-
-static void __exit i2c_smbus_exit(void)
-{
- i2c_del_driver(&smbalert_driver);
-}
-
-module_init(i2c_smbus_init);
-module_exit(i2c_smbus_exit);
+module_i2c_driver(smbalert_driver);
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("SMBus protocol extensions support");
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 8aacde1516ac..f8f72f39e0b5 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -396,6 +396,6 @@ static struct i2c_driver pca9541_driver = {
module_i2c_driver(pca9541_driver);
-MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
+MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
MODULE_DESCRIPTION("PCA9541 I2C master selector driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d0f59c3f87ef..fe95d5464a02 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -96,6 +96,7 @@ static const struct idle_cpu *icpu;
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
+static int intel_idle_cpu_init(int cpu);
static struct cpuidle_state *cpuidle_state_table;
@@ -302,22 +303,35 @@ static void __setup_broadcast_timer(void *arg)
clockevents_notify(reason, &cpu);
}
-static int setup_broadcast_cpuhp_notify(struct notifier_block *n,
- unsigned long action, void *hcpu)
+static int cpu_hotplug_notify(struct notifier_block *n,
+ unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
+ struct cpuidle_device *dev;
switch (action & 0xf) {
case CPU_ONLINE:
- smp_call_function_single(hotcpu, __setup_broadcast_timer,
- (void *)true, 1);
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+ smp_call_function_single(hotcpu, __setup_broadcast_timer,
+ (void *)true, 1);
+
+ /*
+ * Some systems can hotplug a cpu at runtime after
+ * the kernel has booted, we have to initialize the
+ * driver in this case
+ */
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu);
+ if (!dev->registered)
+ intel_idle_cpu_init(hotcpu);
+
break;
}
return NOTIFY_OK;
}
-static struct notifier_block setup_broadcast_notifier = {
- .notifier_call = setup_broadcast_cpuhp_notify,
+static struct notifier_block cpu_hotplug_notifier = {
+ .notifier_call = cpu_hotplug_notify,
};
static void auto_demotion_disable(void *dummy)
@@ -405,10 +419,10 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
- else {
+ else
on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
- register_cpu_notifier(&setup_broadcast_notifier);
- }
+
+ register_cpu_notifier(&cpu_hotplug_notifier);
pr_debug(PREFIX "v" INTEL_IDLE_VERSION
" model 0x%X\n", boot_cpu_data.x86_model);
@@ -494,7 +508,7 @@ static int intel_idle_cpuidle_driver_init(void)
* allocate, initialize, register cpuidle_devices
* @cpu: cpu/core to initialize
*/
-int intel_idle_cpu_init(int cpu)
+static int intel_idle_cpu_init(int cpu)
{
int cstate;
struct cpuidle_device *dev;
@@ -539,7 +553,6 @@ int intel_idle_cpu_init(int cpu)
return 0;
}
-EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
static int __init intel_idle_init(void)
{
@@ -581,10 +594,10 @@ static void __exit intel_idle_exit(void)
intel_idle_cpuidle_devices_uninit();
cpuidle_unregister_driver(&intel_idle_driver);
- if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
+
+ if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
- unregister_cpu_notifier(&setup_broadcast_notifier);
- }
+ unregister_cpu_notifier(&cpu_hotplug_notifier);
return;
}
diff --git a/drivers/ieee802154/Kconfig b/drivers/ieee802154/Kconfig
index 15c064073701..1fc4eefc20ed 100644
--- a/drivers/ieee802154/Kconfig
+++ b/drivers/ieee802154/Kconfig
@@ -19,6 +19,7 @@ config IEEE802154_FAKEHARD
This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
+
config IEEE802154_FAKELB
depends on IEEE802154_DRIVERS && MAC802154
tristate "IEEE 802.15.4 loopback driver"
@@ -28,3 +29,8 @@ config IEEE802154_FAKELB
This driver can also be built as a module. To do so say M here.
The module will be called 'fakelb'.
+
+config IEEE802154_AT86RF230
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231 transceiver driver"
+ depends on SPI
diff --git a/drivers/ieee802154/Makefile b/drivers/ieee802154/Makefile
index ea784ea6f0f8..4f4371d3aa7d 100644
--- a/drivers/ieee802154/Makefile
+++ b/drivers/ieee802154/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
+obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c
new file mode 100644
index 000000000000..5d309408395d
--- /dev/null
+++ b/drivers/ieee802154/at86rf230.c
@@ -0,0 +1,968 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/at86rf230.h>
+#include <linux/skbuff.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+
+struct at86rf230_local {
+ struct spi_device *spi;
+ int rstn, slp_tr, dig2;
+
+ u8 part;
+ u8 vers;
+
+ u8 buf[2];
+ struct mutex bmux;
+
+ struct work_struct irqwork;
+ struct completion tx_complete;
+
+ struct ieee802154_dev *dev;
+
+ spinlock_t lock;
+ bool irq_disabled;
+ bool is_tx;
+};
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR 0x05, 0x0f, 0
+#define SR_PA_LT 0x05, 0x30, 4
+#define SR_PA_BUF_LT 0x05, 0xc0, 6
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW (1 << 7)
+#define IRQ_TRX_UR (1 << 6)
+#define IRQ_AMI (1 << 5)
+#define IRQ_CCA_ED (1 << 4)
+#define IRQ_TRX_END (1 << 3)
+#define IRQ_RX_START (1 << 2)
+#define IRQ_PLL_UNL (1 << 1)
+#define IRQ_PLL_LOCK (1 << 0)
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_BUSY_RX_AACK_ON 0x16
+#define STATE_BUSY_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+static int
+__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+ buf[1] = data;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ return status;
+}
+
+static int
+__at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+
+ buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ *data = buf[1];
+
+ return status;
+}
+
+static int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 *data)
+{
+ int status;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+ u8 addr, u8 mask, int shift, u8 data)
+{
+ int status;
+ u8 val;
+
+ mutex_lock(&lp->bmux);
+ status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
+ if (status)
+ goto out;
+
+ val &= ~mask;
+ val |= (data << shift) & mask;
+
+ status = __at86rf230_write(lp, addr, val);
+out:
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+
+ };
+ struct spi_transfer xfer_buf = {
+ .len = len,
+ .tx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+ buf[0] = CMD_WRITE | CMD_FB;
+ buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
+
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ mutex_unlock(&lp->bmux);
+ return status;
+}
+
+static int
+at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+{
+ u8 *buf = lp->buf;
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer_head = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_head1 = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ struct spi_transfer xfer_buf = {
+ .len = 0,
+ .rx_buf = data,
+ };
+
+ mutex_lock(&lp->bmux);
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+
+ xfer_buf.len = *(buf + 1) + 1;
+ *len = buf[1];
+
+ buf[0] = CMD_FB;
+ buf[1] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer_head1, &msg);
+ spi_message_add_tail(&xfer_buf, &msg);
+
+ status = spi_sync(lp->spi, &msg);
+
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+ dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status) {
+ if (lqi && (*len > lp->buf[1]))
+ *lqi = data[lp->buf[1]];
+ }
+ mutex_unlock(&lp->bmux);
+
+ return status;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+ might_sleep();
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int
+at86rf230_state(struct ieee802154_dev *dev, int state)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ u8 val;
+ u8 desired_status;
+
+ might_sleep();
+
+ if (state == STATE_FORCE_TX_ON)
+ desired_status = STATE_TX_ON;
+ else if (state == STATE_FORCE_TRX_OFF)
+ desired_status = STATE_TRX_OFF;
+ else
+ desired_status = state;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+ if (val == desired_status)
+ return 0;
+
+ /* state is equal to phy states */
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
+ if (rc)
+ goto err;
+
+ do {
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+ if (rc)
+ goto err;
+ } while (val == STATE_TRANSITION_IN_PROGRESS);
+
+
+ if (val == desired_status)
+ return 0;
+
+ pr_err("unexpected state change: %d, asked for %d\n", val, state);
+ return -EBUSY;
+
+err:
+ pr_err("error: %d\n", rc);
+ return rc;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+ struct at86rf230_local *lp = dev->priv;
+ u8 rc;
+
+ rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_ON);
+}
+
+static void
+at86rf230_stop(struct ieee802154_dev *dev)
+{
+ at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+}
+
+static int
+at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ might_sleep();
+
+ if (page != 0 || channel < 11 || channel > 26) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ msleep(1); /* Wait for PLL */
+ dev->phy->current_channel = channel;
+
+ return 0;
+}
+
+static int
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+ unsigned long flags;
+
+ spin_lock(&lp->lock);
+ if (lp->irq_disabled) {
+ spin_unlock(&lp->lock);
+ return -EBUSY;
+ }
+ spin_unlock(&lp->lock);
+
+ might_sleep();
+
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ goto err;
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 1;
+ INIT_COMPLETION(lp->tx_complete);
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
+ if (rc)
+ goto err_rx;
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
+ if (rc)
+ goto err_rx;
+
+ rc = wait_for_completion_interruptible(&lp->tx_complete);
+ if (rc < 0)
+ goto err_rx;
+
+ rc = at86rf230_start(dev);
+
+ return rc;
+
+err_rx:
+ at86rf230_start(dev);
+err:
+ pr_err("error: %d\n", rc);
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ return rc;
+}
+
+static int at86rf230_rx(struct at86rf230_local *lp)
+{
+ u8 len = 128, lqi = 0;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+
+ if (!skb)
+ return -ENOMEM;
+
+ if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
+ goto err;
+
+ if (len < 2)
+ goto err;
+
+ skb_trim(skb, len - 2); /* We do not put CRC into the frame */
+
+ ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+
+ dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
+
+ return 0;
+err:
+ pr_debug("received frame is too small\n");
+
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+static struct ieee802154_ops at86rf230_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+};
+
+static void at86rf230_irqwork(struct work_struct *work)
+{
+ struct at86rf230_local *lp =
+ container_of(work, struct at86rf230_local, irqwork);
+ u8 status = 0, val;
+ int rc;
+ unsigned long flags;
+
+ rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
+ status |= val;
+
+ status &= ~IRQ_PLL_LOCK; /* ignore */
+ status &= ~IRQ_RX_START; /* ignore */
+ status &= ~IRQ_AMI; /* ignore */
+ status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
+
+ if (status & IRQ_TRX_END) {
+ spin_lock_irqsave(&lp->lock, flags);
+ status &= ~IRQ_TRX_END;
+ if (lp->is_tx) {
+ lp->is_tx = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+ complete(&lp->tx_complete);
+ } else {
+ spin_unlock_irqrestore(&lp->lock, flags);
+ at86rf230_rx(lp);
+ }
+ }
+
+ spin_lock_irqsave(&lp->lock, flags);
+ lp->irq_disabled = 0;
+ spin_unlock_irqrestore(&lp->lock, flags);
+
+ enable_irq(lp->spi->irq);
+}
+
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+ struct at86rf230_local *lp = data;
+
+ disable_irq_nosync(irq);
+
+ spin_lock(&lp->lock);
+ lp->irq_disabled = 1;
+ spin_unlock(&lp->lock);
+
+ schedule_work(&lp->irqwork);
+
+ return IRQ_HANDLED;
+}
+
+
+static int at86rf230_hw_init(struct at86rf230_local *lp)
+{
+ u8 status;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ if (status == STATE_P_ON) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
+ if (rc)
+ return rc;
+ msleep(1);
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+ }
+
+ rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, 0xff); /* IRQ_TRX_UR |
+ * IRQ_CCA_ED |
+ * IRQ_TRX_END |
+ * IRQ_PLL_UNL |
+ * IRQ_PLL_LOCK
+ */
+ if (rc)
+ return rc;
+
+ /* CLKM changes are applied immediately */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
+ if (rc)
+ return rc;
+
+ /* Turn CLKM Off */
+ rc = at86rf230_write_subreg(lp, SR_CLKM_CTRL, 0x00);
+ if (rc)
+ return rc;
+ /* Wait the next SLEEP cycle */
+ msleep(100);
+
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
+ if (rc)
+ return rc;
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
+ if (rc)
+ return rc;
+ dev_info(&lp->spi->dev, "Status: %02x\n", status);
+
+ rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "DVDD error\n");
+ return -EINVAL;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_AVDD_OK, &status);
+ if (rc)
+ return rc;
+ if (!status) {
+ dev_err(&lp->spi->dev, "AVDD error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int at86rf230_suspend(struct spi_device *spi, pm_message_t message)
+{
+ return 0;
+}
+
+static int at86rf230_resume(struct spi_device *spi)
+{
+ return 0;
+}
+
+static int at86rf230_fill_data(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
+
+ if (!pdata) {
+ dev_err(&spi->dev, "no platform_data\n");
+ return -EINVAL;
+ }
+
+ lp->rstn = pdata->rstn;
+ lp->slp_tr = pdata->slp_tr;
+ lp->dig2 = pdata->dig2;
+
+ return 0;
+}
+
+static int __devinit at86rf230_probe(struct spi_device *spi)
+{
+ struct ieee802154_dev *dev;
+ struct at86rf230_local *lp;
+ u8 man_id_0, man_id_1;
+ int rc;
+ const char *chip;
+ int supported = 0;
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
+ if (!dev)
+ return -ENOMEM;
+
+ lp = dev->priv;
+ lp->dev = dev;
+
+ lp->spi = spi;
+
+ dev->priv = lp;
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ /* We do support only 2.4 Ghz */
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM;
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, at86rf230_irqwork);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ rc = at86rf230_fill_data(spi);
+ if (rc)
+ goto err_fill;
+
+ rc = gpio_request(lp->rstn, "rstn");
+ if (rc)
+ goto err_rstn;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (rc)
+ goto err_slp_tr;
+ }
+
+ rc = gpio_direction_output(lp->rstn, 1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (gpio_is_valid(lp->slp_tr)) {
+ rc = gpio_direction_output(lp->slp_tr, 0);
+ if (rc)
+ goto err_gpio_dir;
+ }
+
+ /* Reset */
+ msleep(1);
+ gpio_set_value(lp->rstn, 0);
+ msleep(1);
+ gpio_set_value(lp->rstn, 1);
+ msleep(1);
+
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
+ if (rc)
+ goto err_gpio_dir;
+ rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
+ if (rc)
+ goto err_gpio_dir;
+
+ if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+ man_id_1, man_id_0);
+ rc = -EINVAL;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
+ if (rc)
+ goto err_gpio_dir;
+
+ switch (lp->part) {
+ case 2:
+ chip = "at86rf230";
+ /* supported = 1; FIXME: should be easy to support; */
+ break;
+ case 3:
+ chip = "at86rf231";
+ supported = 1;
+ break;
+ default:
+ chip = "UNKNOWN";
+ break;
+ }
+
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
+ if (!supported) {
+ rc = -ENOTSUPP;
+ goto err_gpio_dir;
+ }
+
+ rc = at86rf230_hw_init(lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = request_irq(spi->irq, at86rf230_isr, IRQF_SHARED,
+ dev_name(&spi->dev), lp);
+ if (rc)
+ goto err_gpio_dir;
+
+ rc = ieee802154_register_device(lp->dev);
+ if (rc)
+ goto err_irq;
+
+ return rc;
+
+ ieee802154_unregister_device(lp->dev);
+err_irq:
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+err_gpio_dir:
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+err_slp_tr:
+ gpio_free(lp->rstn);
+err_rstn:
+err_fill:
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+ return rc;
+}
+
+static int __devexit at86rf230_remove(struct spi_device *spi)
+{
+ struct at86rf230_local *lp = spi_get_drvdata(spi);
+
+ ieee802154_unregister_device(lp->dev);
+
+ free_irq(spi->irq, lp);
+ flush_work(&lp->irqwork);
+
+ if (gpio_is_valid(lp->slp_tr))
+ gpio_free(lp->slp_tr);
+ gpio_free(lp->rstn);
+
+ spi_set_drvdata(spi, NULL);
+ mutex_destroy(&lp->bmux);
+ ieee802154_free_device(lp->dev);
+
+ dev_dbg(&spi->dev, "unregistered at86rf230\n");
+ return 0;
+}
+
+static struct spi_driver at86rf230_driver = {
+ .driver = {
+ .name = "at86rf230",
+ .owner = THIS_MODULE,
+ },
+ .probe = at86rf230_probe,
+ .remove = __devexit_p(at86rf230_remove),
+ .suspend = at86rf230_suspend,
+ .resume = at86rf230_resume,
+};
+
+static int __init at86rf230_init(void)
+{
+ return spi_register_driver(&at86rf230_driver);
+}
+module_init(at86rf230_init);
+
+static void __exit at86rf230_exit(void)
+{
+ spi_unregister_driver(&at86rf230_driver);
+}
+module_exit(at86rf230_exit);
+
+MODULE_DESCRIPTION("AT86RF230 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 6ef660c1332f..28058ae33d38 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -129,7 +129,7 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
dev_put(dev);
break;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
@@ -243,7 +243,7 @@ out:
return ret;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
static int addr6_resolve(struct sockaddr_in6 *src_in,
struct sockaddr_in6 *dst_in,
struct rdma_dev_addr *addr)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c889aaef3416..d67999f6e34a 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3848,24 +3848,28 @@ static int __init ib_cm_init(void)
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
- if (ret)
- return -ENOMEM;
+ if (ret) {
+ ret = -ENOMEM;
+ goto error1;
+ }
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM;
- goto error1;
+ goto error2;
}
ret = ib_register_client(&cm_client);
if (ret)
- goto error2;
+ goto error3;
return 0;
-error2:
+error3:
destroy_workqueue(cm.wq);
-error1:
+error2:
class_unregister(&cm_class);
+error1:
+ idr_destroy(&cm.local_id_table);
return ret;
}
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 7da9b2102341..be068f47e47e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -44,18 +44,6 @@
#define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
-#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
-#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
-#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
-#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
-#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
-#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
-#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
-#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
-#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
-#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
-#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
-
enum cm_msg_sequence {
CM_MSG_SEQUENCE_REQ,
CM_MSG_SEQUENCE_LAP,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2e826f9702c6..5a335b5447c6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -99,6 +99,10 @@ struct rdma_bind_list {
unsigned short port;
};
+enum {
+ CMA_OPTION_AFONLY,
+};
+
/*
* Device removal can occur at anytime, so we need extra handling to
* serialize notifying the user of device removal with other callbacks.
@@ -137,9 +141,11 @@ struct rdma_id_private {
u32 qkey;
u32 qp_num;
pid_t owner;
+ u32 options;
u8 srq;
u8 tos;
u8 reuseaddr;
+ u8 afonly;
};
struct cma_multicast {
@@ -1297,8 +1303,10 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
} else {
cma_set_ip_ver(cma_data, 4);
cma_set_ip_ver(cma_mask, 0xF);
- cma_data->dst_addr.ip4.addr = ip4_addr;
- cma_mask->dst_addr.ip4.addr = htonl(~0);
+ if (!cma_any_addr(addr)) {
+ cma_data->dst_addr.ip4.addr = ip4_addr;
+ cma_mask->dst_addr.ip4.addr = htonl(~0);
+ }
}
break;
case AF_INET6:
@@ -1312,9 +1320,11 @@ static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
} else {
cma_set_ip_ver(cma_data, 6);
cma_set_ip_ver(cma_mask, 0xF);
- cma_data->dst_addr.ip6 = ip6_addr;
- memset(&cma_mask->dst_addr.ip6, 0xFF,
- sizeof cma_mask->dst_addr.ip6);
+ if (!cma_any_addr(addr)) {
+ cma_data->dst_addr.ip6 = ip6_addr;
+ memset(&cma_mask->dst_addr.ip6, 0xFF,
+ sizeof cma_mask->dst_addr.ip6);
+ }
}
break;
default:
@@ -1499,7 +1509,7 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr);
- if (cma_any_addr(addr))
+ if (cma_any_addr(addr) && !id_priv->afonly)
ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
else {
cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
@@ -1573,6 +1583,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
atomic_inc(&id_priv->refcount);
dev_id_priv->internal_id = 1;
+ dev_id_priv->afonly = id_priv->afonly;
ret = rdma_listen(id, id_priv->backlog);
if (ret)
@@ -2098,6 +2109,26 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
}
EXPORT_SYMBOL(rdma_set_reuseaddr);
+int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
+{
+ struct rdma_id_private *id_priv;
+ unsigned long flags;
+ int ret;
+
+ id_priv = container_of(id, struct rdma_id_private, id);
+ spin_lock_irqsave(&id_priv->lock, flags);
+ if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
+ id_priv->options |= (1 << CMA_OPTION_AFONLY);
+ id_priv->afonly = afonly;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ spin_unlock_irqrestore(&id_priv->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_set_afonly);
+
static void cma_bind_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *id_priv)
{
@@ -2187,22 +2218,24 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct hlist_node *node;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
- if (cma_any_addr(addr) && !reuseaddr)
- return -EADDRNOTAVAIL;
-
hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
if (id_priv == cur_id)
continue;
- if ((cur_id->state == RDMA_CM_LISTEN) ||
- !reuseaddr || !cur_id->reuseaddr) {
- cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
- if (cma_any_addr(cur_addr))
- return -EADDRNOTAVAIL;
+ if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
+ cur_id->reuseaddr)
+ continue;
- if (!cma_addr_cmp(addr, cur_addr))
- return -EADDRINUSE;
- }
+ cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
+ if (id_priv->afonly && cur_id->afonly &&
+ (addr->sa_family != cur_addr->sa_family))
+ continue;
+
+ if (cma_any_addr(addr) || cma_any_addr(cur_addr))
+ return -EADDRNOTAVAIL;
+
+ if (!cma_addr_cmp(addr, cur_addr))
+ return -EADDRINUSE;
}
return 0;
}
@@ -2278,7 +2311,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
struct sockaddr *addr)
{
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
struct sockaddr_in6 *sin6;
if (addr->sa_family != AF_INET6)
@@ -2371,6 +2404,14 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
}
memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
+ if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
+ if (addr->sa_family == AF_INET)
+ id_priv->afonly = 1;
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (addr->sa_family == AF_INET6)
+ id_priv->afonly = init_net.ipv6.sysctl.bindv6only;
+#endif
+ }
ret = cma_get_port(id_priv);
if (ret)
goto err2;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index e497dfbee435..3ae2bfd31015 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -108,12 +108,14 @@ void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
unsigned char *prev_tail;
prev_tail = skb_tail_pointer(skb);
- *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
- len, NLM_F_MULTI);
+ *nlh = nlmsg_put(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
+ len, NLM_F_MULTI);
+ if (!*nlh)
+ goto out_nlmsg_trim;
(*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
- return NLMSG_DATA(*nlh);
+ return nlmsg_data(*nlh);
-nlmsg_failure:
+out_nlmsg_trim:
nlmsg_trim(skb, prev_tail);
return NULL;
}
@@ -171,8 +173,11 @@ static void ibnl_rcv(struct sk_buff *skb)
int __init ibnl_init(void)
{
- nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
- NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .input = ibnl_rcv,
+ };
+
+ nls = netlink_kernel_create(&init_net, NETLINK_RDMA, THIS_MODULE, &cfg);
if (!nls) {
pr_warn("Failed to create netlink socket\n");
return -ENOMEM;
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index fbbfa24cf572..a8905abc56e4 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -94,6 +94,12 @@ struct ib_sa_path_query {
struct ib_sa_query sa_query;
};
+struct ib_sa_guidinfo_query {
+ void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
+ void *context;
+ struct ib_sa_query sa_query;
+};
+
struct ib_sa_mcmember_query {
void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
void *context;
@@ -347,6 +353,34 @@ static const struct ib_field service_rec_table[] = {
.size_bits = 2*64 },
};
+#define GUIDINFO_REC_FIELD(field) \
+ .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
+ .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
+ .field_name = "sa_guidinfo_rec:" #field
+
+static const struct ib_field guidinfo_rec_table[] = {
+ { GUIDINFO_REC_FIELD(lid),
+ .offset_words = 0,
+ .offset_bits = 0,
+ .size_bits = 16 },
+ { GUIDINFO_REC_FIELD(block_num),
+ .offset_words = 0,
+ .offset_bits = 16,
+ .size_bits = 8 },
+ { GUIDINFO_REC_FIELD(res1),
+ .offset_words = 0,
+ .offset_bits = 24,
+ .size_bits = 8 },
+ { GUIDINFO_REC_FIELD(res2),
+ .offset_words = 1,
+ .offset_bits = 0,
+ .size_bits = 32 },
+ { GUIDINFO_REC_FIELD(guid_info_list),
+ .offset_words = 2,
+ .offset_bits = 0,
+ .size_bits = 512 },
+};
+
static void free_sm_ah(struct kref *kref)
{
struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -945,6 +979,105 @@ err1:
return ret;
}
+/* Support GuidInfoRecord */
+static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
+ int status,
+ struct ib_sa_mad *mad)
+{
+ struct ib_sa_guidinfo_query *query =
+ container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
+
+ if (mad) {
+ struct ib_sa_guidinfo_rec rec;
+
+ ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
+ mad->data, &rec);
+ query->callback(status, &rec, query->context);
+ } else
+ query->callback(status, NULL, query->context);
+}
+
+static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
+{
+ kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
+}
+
+int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ struct ib_sa_guidinfo_rec *rec,
+ ib_sa_comp_mask comp_mask, u8 method,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_sa_guidinfo_rec *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query)
+{
+ struct ib_sa_guidinfo_query *query;
+ struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
+ struct ib_sa_port *port;
+ struct ib_mad_agent *agent;
+ struct ib_sa_mad *mad;
+ int ret;
+
+ if (!sa_dev)
+ return -ENODEV;
+
+ if (method != IB_MGMT_METHOD_GET &&
+ method != IB_MGMT_METHOD_SET &&
+ method != IB_SA_METHOD_DELETE) {
+ return -EINVAL;
+ }
+
+ port = &sa_dev->port[port_num - sa_dev->start_port];
+ agent = port->agent;
+
+ query = kmalloc(sizeof *query, gfp_mask);
+ if (!query)
+ return -ENOMEM;
+
+ query->sa_query.port = port;
+ ret = alloc_mad(&query->sa_query, gfp_mask);
+ if (ret)
+ goto err1;
+
+ ib_sa_client_get(client);
+ query->sa_query.client = client;
+ query->callback = callback;
+ query->context = context;
+
+ mad = query->sa_query.mad_buf->mad;
+ init_mad(mad, agent);
+
+ query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
+ query->sa_query.release = ib_sa_guidinfo_rec_release;
+
+ mad->mad_hdr.method = method;
+ mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
+ mad->sa_hdr.comp_mask = comp_mask;
+
+ ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
+ mad->data);
+
+ *sa_query = &query->sa_query;
+
+ ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
+ if (ret < 0)
+ goto err2;
+
+ return ret;
+
+err2:
+ *sa_query = NULL;
+ ib_sa_client_put(query->sa_query.client);
+ free_mad(&query->sa_query);
+
+err1:
+ kfree(query);
+ return ret;
+}
+EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 8002ae642cfe..893cb879462c 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -909,6 +909,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
}
ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
break;
+ case RDMA_OPTION_ID_AFONLY:
+ if (optlen != sizeof(int)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
+ break;
default:
ret = -ENOSYS;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 740dcc065cf2..77b6b182778a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- l2t = t3_l2t_get(tdev, dst, NULL);
+ l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
- ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
+ &cm_id->remote_addr.sin_addr.s_addr);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index b18870c455ad..51f42061dae9 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -548,8 +548,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
}
if (mpa_rev_to_use == 2) {
- mpa->private_data_size +=
- htons(sizeof(struct mpa_v2_conn_params));
+ mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
+ sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
@@ -635,8 +635,8 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size +=
- htons(sizeof(struct mpa_v2_conn_params));
+ mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
+ sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons(((u16)ep->ird) |
(peer2peer ? MPA_V2_PEER2PEER_MODEL :
0));
@@ -715,8 +715,8 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
mpa->flags |= MPA_ENHANCED_RDMA_CONN;
- mpa->private_data_size +=
- htons(sizeof(struct mpa_v2_conn_params));
+ mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
+ sizeof (struct mpa_v2_conn_params));
mpa_v2_params.ird = htons((u16)ep->ird);
mpa_v2_params.ord = htons((u16)ep->ord);
if (peer2peer && (ep->mpa_attr.p2p_type !=
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 259b0670b51c..c27141fef1ab 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -147,47 +147,51 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
}
/*
- * Snoop SM MADs for port info and P_Key table sets, so we can
- * synthesize LID change and P_Key change events.
+ * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
+ * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
*/
static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad,
- u16 prev_lid)
+ u16 prev_lid)
{
- struct ib_event event;
+ struct ib_port_info *pinfo;
+ u16 lid;
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
- mad->mad_hdr.method == IB_MGMT_METHOD_SET) {
- if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) {
- struct ib_port_info *pinfo =
- (struct ib_port_info *) ((struct ib_smp *) mad)->data;
- u16 lid = be16_to_cpu(pinfo->lid);
+ mad->mad_hdr.method == IB_MGMT_METHOD_SET)
+ switch (mad->mad_hdr.attr_id) {
+ case IB_SMP_ATTR_PORT_INFO:
+ pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
+ lid = be16_to_cpu(pinfo->lid);
- update_sm_ah(to_mdev(ibdev), port_num,
+ update_sm_ah(dev, port_num,
be16_to_cpu(pinfo->sm_lid),
pinfo->neighbormtu_mastersmsl & 0xf);
- event.device = ibdev;
- event.element.port_num = port_num;
+ if (pinfo->clientrereg_resv_subnetto & 0x80)
+ mlx4_ib_dispatch_event(dev, port_num,
+ IB_EVENT_CLIENT_REREGISTER);
- if (pinfo->clientrereg_resv_subnetto & 0x80) {
- event.event = IB_EVENT_CLIENT_REREGISTER;
- ib_dispatch_event(&event);
- }
+ if (prev_lid != lid)
+ mlx4_ib_dispatch_event(dev, port_num,
+ IB_EVENT_LID_CHANGE);
+ break;
- if (prev_lid != lid) {
- event.event = IB_EVENT_LID_CHANGE;
- ib_dispatch_event(&event);
- }
- }
+ case IB_SMP_ATTR_PKEY_TABLE:
+ mlx4_ib_dispatch_event(dev, port_num,
+ IB_EVENT_PKEY_CHANGE);
+ break;
- if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) {
- event.device = ibdev;
- event.event = IB_EVENT_PKEY_CHANGE;
- event.element.port_num = port_num;
- ib_dispatch_event(&event);
+ case IB_SMP_ATTR_GUID_INFO:
+ /* paravirtualized master's guid is guid 0 -- does not change */
+ if (!mlx4_is_master(dev->dev))
+ mlx4_ib_dispatch_event(dev, port_num,
+ IB_EVENT_GID_CHANGE);
+ break;
+ default:
+ break;
}
- }
}
static void node_desc_override(struct ib_device *dev,
@@ -242,6 +246,25 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
+ if (in_wc && in_wc->qp->qp_num) {
+ pr_debug("received MAD: slid:%d sqpn:%d "
+ "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
+ in_wc->slid, in_wc->src_qp,
+ in_wc->dlid_path_bits,
+ in_wc->qp->qp_num,
+ in_wc->wc_flags,
+ in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
+ be16_to_cpu(in_mad->mad_hdr.attr_id));
+ if (in_wc->wc_flags & IB_WC_GRH) {
+ pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
+ be64_to_cpu(in_grh->sgid.global.subnet_prefix),
+ be64_to_cpu(in_grh->sgid.global.interface_id));
+ pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
+ be64_to_cpu(in_grh->dgid.global.subnet_prefix),
+ be64_to_cpu(in_grh->dgid.global.interface_id));
+ }
+ }
+
slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
@@ -286,7 +309,8 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
if (!out_mad->mad_hdr.status) {
- smp_snoop(ibdev, port_num, in_mad, prev_lid);
+ if (!(to_mdev(ibdev)->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV))
+ smp_snoop(ibdev, port_num, in_mad, prev_lid);
node_desc_override(ibdev, out_mad);
}
@@ -427,3 +451,64 @@ void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
ib_destroy_ah(dev->sm_ah[p]);
}
}
+
+void handle_port_mgmt_change_event(struct work_struct *work)
+{
+ struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
+ struct mlx4_ib_dev *dev = ew->ib_dev;
+ struct mlx4_eqe *eqe = &(ew->ib_eqe);
+ u8 port = eqe->event.port_mgmt_change.port;
+ u32 changed_attr;
+
+ switch (eqe->subtype) {
+ case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
+ changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
+
+ /* Update the SM ah - This should be done before handling
+ the other changed attributes so that MADs can be sent to the SM */
+ if (changed_attr & MSTR_SM_CHANGE_MASK) {
+ u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
+ u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
+ update_sm_ah(dev, port, lid, sl);
+ }
+
+ /* Check if it is a lid change event */
+ if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_LID_CHANGE);
+
+ /* Generate GUID changed event */
+ if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK)
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+
+ if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
+ mlx4_ib_dispatch_event(dev, port,
+ IB_EVENT_CLIENT_REREGISTER);
+ break;
+
+ case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
+ break;
+ case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
+ /* paravirtualized master's guid is guid 0 -- does not change */
+ if (!mlx4_is_master(dev->dev))
+ mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
+ break;
+ default:
+ pr_warn("Unsupported subtype 0x%x for "
+ "Port Management Change event\n", eqe->subtype);
+ }
+
+ kfree(ew);
+}
+
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+ enum ib_event_type type)
+{
+ struct ib_event event;
+
+ event.device = &dev->ib_dev;
+ event.element.port_num = port_num;
+ event.event = type;
+
+ ib_dispatch_event(&event);
+}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 3530c41fcd1f..fe2088cfa6ee 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -50,7 +50,7 @@
#include "mlx4_ib.h"
#include "user.h"
-#define DRV_NAME "mlx4_ib"
+#define DRV_NAME MLX4_IB_DRV_NAME
#define DRV_VERSION "1.0"
#define DRV_RELDATE "April 4, 2008"
@@ -157,7 +157,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
IB_ATOMIC_HCA : IB_ATOMIC_NONE;
- props->masked_atomic_cap = IB_ATOMIC_HCA;
+ props->masked_atomic_cap = props->atomic_cap;
props->max_pkeys = dev->dev->caps.pkey_table_len[1];
props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
@@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
return ret;
}
+struct mlx4_ib_steering {
+ struct list_head list;
+ u64 reg_id;
+ union ib_gid gid;
+};
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
struct mlx4_ib_qp *mqp = to_mqp(ibqp);
+ u64 reg_id;
+ struct mlx4_ib_steering *ib_steering = NULL;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL);
+ if (!ib_steering)
+ return -ENOMEM;
+ }
- err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw,
- !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
- MLX4_PROT_IB_IPV6);
+ err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port,
+ !!(mqp->flags &
+ MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
+ MLX4_PROT_IB_IPV6, &reg_id);
if (err)
- return err;
+ goto err_malloc;
err = add_gid_entry(ibqp, gid);
if (err)
goto err_add;
+ if (ib_steering) {
+ memcpy(ib_steering->gid.raw, gid->raw, 16);
+ ib_steering->reg_id = reg_id;
+ mutex_lock(&mqp->mutex);
+ list_add(&ib_steering->list, &mqp->steering_rules);
+ mutex_unlock(&mqp->mutex);
+ }
return 0;
err_add:
- mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
+err_malloc:
+ kfree(ib_steering);
+
return err;
}
@@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
u8 mac[6];
struct net_device *ndev;
struct mlx4_ib_gid_entry *ge;
+ u64 reg_id = 0;
+
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ struct mlx4_ib_steering *ib_steering;
+
+ mutex_lock(&mqp->mutex);
+ list_for_each_entry(ib_steering, &mqp->steering_rules, list) {
+ if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) {
+ list_del(&ib_steering->list);
+ break;
+ }
+ }
+ mutex_unlock(&mqp->mutex);
+ if (&ib_steering->list == &mqp->steering_rules) {
+ pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n");
+ return -EINVAL;
+ }
+ reg_id = ib_steering->reg_id;
+ kfree(ib_steering);
+ }
- err = mlx4_multicast_detach(mdev->dev,
- &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6);
+ err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw,
+ MLX4_PROT_IB_IPV6, reg_id);
if (err)
return err;
@@ -898,7 +946,6 @@ static void update_gids_task(struct work_struct *work)
union ib_gid *gids;
int err;
struct mlx4_dev *dev = gw->dev->dev;
- struct ib_event event;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -916,10 +963,7 @@ static void update_gids_task(struct work_struct *work)
pr_warn("set port command failed\n");
else {
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
- event.device = &gw->dev->ib_dev;
- event.element.port_num = gw->port;
- event.event = IB_EVENT_GID_CHANGE;
- ib_dispatch_event(&event);
+ mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1111,7 +1155,8 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
sprintf(name, "mlx4-ib-%d-%d@%s",
i, j, dev->pdev->bus->name);
/* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(dev, name, &ibdev->eq_table[eq])) {
+ if (mlx4_assign_eq(dev, name, NULL,
+ &ibdev->eq_table[eq])) {
/* Use legacy (same as mlx4_en driver) */
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
ibdev->eq_table[eq] =
@@ -1383,10 +1428,18 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
- enum mlx4_dev_event event, int port)
+ enum mlx4_dev_event event, unsigned long param)
{
struct ib_event ibev;
struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
+ struct mlx4_eqe *eqe = NULL;
+ struct ib_event_work *ew;
+ int port = 0;
+
+ if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE)
+ eqe = (struct mlx4_eqe *)param;
+ else
+ port = (u8)param;
if (port > ibdev->num_ports)
return;
@@ -1405,6 +1458,19 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
ibev.event = IB_EVENT_DEVICE_FATAL;
break;
+ case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
+ ew = kmalloc(sizeof *ew, GFP_ATOMIC);
+ if (!ew) {
+ pr_err("failed to allocate memory for events work\n");
+ break;
+ }
+
+ INIT_WORK(&ew->work, handle_port_mgmt_change_event);
+ memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
+ ew->ib_dev = ibdev;
+ handle_port_mgmt_change_event(&ew->work);
+ return;
+
default:
return;
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index ff36655d23d3..c136bb618e29 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,16 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
+#define MLX4_IB_DRV_NAME "mlx4_ib"
+
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+#define pr_fmt(fmt) "<" MLX4_IB_DRV_NAME "> %s: " fmt, __func__
+
+#define mlx4_ib_warn(ibdev, format, arg...) \
+ dev_warn((ibdev)->dma_device, MLX4_IB_DRV_NAME ": " format, ## arg)
+
enum {
MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
MLX4_IB_MAX_HEADROOM = 2048
@@ -163,6 +173,7 @@ struct mlx4_ib_qp {
u8 state;
int mlx_type;
struct list_head gid_list;
+ struct list_head steering_rules;
};
struct mlx4_ib_srq {
@@ -214,6 +225,12 @@ struct mlx4_ib_dev {
int eq_added;
};
+struct ib_event_work {
+ struct work_struct work;
+ struct mlx4_ib_dev *ib_dev;
+ struct mlx4_eqe ib_eqe;
+};
+
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
{
return container_of(ibdev, struct mlx4_ib_dev, ib_dev);
@@ -371,4 +388,7 @@ static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)
int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
union ib_gid *gid);
+void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
+ enum ib_event_type type);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8d4ed24aef93..a6d8ea060ea8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -495,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
INIT_LIST_HEAD(&qp->gid_list);
+ INIT_LIST_HEAD(&qp->steering_rules);
qp->state = IB_QPS_RESET;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -1335,11 +1336,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask))
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
+ pr_debug("qpn 0x%x: invalid attribute mask specified "
+ "for transition %d to %d. qp_type %d,"
+ " attr_mask 0x%x\n",
+ ibqp->qp_num, cur_state, new_state,
+ ibqp->qp_type, attr_mask);
goto out;
+ }
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->dev->caps.num_ports)) {
+ pr_debug("qpn 0x%x: invalid port number (%d) specified "
+ "for transition %d to %d. qp_type %d\n",
+ ibqp->qp_num, attr->port_num, cur_state,
+ new_state, ibqp->qp_type);
goto out;
}
@@ -1350,17 +1361,30 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_PKEY_INDEX) {
int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
+ if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p]) {
+ pr_debug("qpn 0x%x: invalid pkey index (%d) specified "
+ "for transition %d to %d. qp_type %d\n",
+ ibqp->qp_num, attr->pkey_index, cur_state,
+ new_state, ibqp->qp_type);
goto out;
+ }
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > dev->dev->caps.max_qp_init_rdma) {
+ pr_debug("qpn 0x%x: max_rd_atomic (%d) too large. "
+ "Transition %d to %d. qp_type %d\n",
+ ibqp->qp_num, attr->max_rd_atomic, cur_state,
+ new_state, ibqp->qp_type);
goto out;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > dev->dev->caps.max_qp_dest_rdma) {
+ pr_debug("qpn 0x%x: max_dest_rd_atomic (%d) too large. "
+ "Transition %d to %d. qp_type %d\n",
+ ibqp->qp_num, attr->max_dest_rd_atomic, cur_state,
+ new_state, ibqp->qp_type);
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 9601049e14d0..26a684536109 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -247,7 +247,8 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
spin_unlock(&dev->qp_table.lock);
if (!qp) {
- mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mthca_warn(dev, "Async event %d for bogus QP %08x\n",
+ event_type, qpn);
return;
}
@@ -501,6 +502,7 @@ done:
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
+ qp_init_attr->sq_sig_type = qp->sq_policy;
out_mailbox:
mthca_free_mailbox(dev, mailbox);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index b050e629e9c3..5a044526e4f4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -202,8 +202,7 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
return 0;
}
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
-defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q)
static int ocrdma_inet6addr_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
@@ -549,7 +548,7 @@ static struct ocrdma_driver ocrdma_drv = {
static void ocrdma_unregister_inet6addr_notifier(void)
{
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ocrdma_inet6addr_notifier);
#endif
}
@@ -558,7 +557,7 @@ static int __init ocrdma_init_module(void)
{
int status;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
status = register_inet6addr_notifier(&ocrdma_inet6addr_notifier);
if (status)
return status;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 2e2e7aecc990..b2f9784beb4a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
attr->max_srq = (dev->attr.max_qp - 1);
- attr->max_srq_sge = attr->max_srq_sge;
+ attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
attr->max_fast_reg_page_list_len = 0;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7e62f4137148..6e19ec844d99 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1,8 +1,8 @@
#ifndef _QIB_KERNEL_H
#define _QIB_KERNEL_H
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -519,6 +519,7 @@ struct qib_pportdata {
struct qib_devdata *dd;
struct qib_chippport_specific *cpspec; /* chip-specific per-port */
struct kobject pport_kobj;
+ struct kobject pport_cc_kobj;
struct kobject sl2vl_kobj;
struct kobject diagc_kobj;
@@ -544,6 +545,7 @@ struct qib_pportdata {
/* read mostly */
struct qib_sdma_desc *sdma_descq;
+ struct workqueue_struct *qib_wq;
struct qib_sdma_state sdma_state;
dma_addr_t sdma_descq_phys;
volatile __le64 *sdma_head_dma; /* DMA'ed by chip */
@@ -637,6 +639,39 @@ struct qib_pportdata {
struct timer_list led_override_timer;
struct xmit_wait cong_stats;
struct timer_list symerr_clear_timer;
+
+ /* Synchronize access between driver writes and sysfs reads */
+ spinlock_t cc_shadow_lock
+ ____cacheline_aligned_in_smp;
+
+ /* Shadow copy of the congestion control table */
+ struct cc_table_shadow *ccti_entries_shadow;
+
+ /* Shadow copy of the congestion control entries */
+ struct ib_cc_congestion_setting_attr_shadow *congestion_entries_shadow;
+
+ /* List of congestion control table entries */
+ struct ib_cc_table_entry_shadow *ccti_entries;
+
+ /* 16 congestion entries with each entry corresponding to a SL */
+ struct ib_cc_congestion_entry_shadow *congestion_entries;
+
+ /* Total number of congestion control table entries */
+ u16 total_cct_entry;
+
+ /* Bit map identifying service level */
+ u16 cc_sl_control_map;
+
+ /* maximum congestion control table index */
+ u16 ccti_limit;
+
+ /* CA's max number of 64 entry units in the congestion control table */
+ u8 cc_max_table_entries;
+
+ /* Maximum number of congestion control entries that the agent expects
+ * the manager to send.
+ */
+ u8 cc_supported_table_entries;
};
/* Observers. Not to be taken lightly, possibly not to ship. */
@@ -1077,6 +1112,7 @@ extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
extern unsigned qib_wc_pat;
+extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
int qib_enable_wc(struct qib_devdata *dd);
@@ -1267,6 +1303,11 @@ int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
/* ppd->sdma_lock should be locked before calling this. */
int qib_sdma_make_progress(struct qib_pportdata *dd);
+static inline int qib_sdma_empty(const struct qib_pportdata *ppd)
+{
+ return ppd->sdma_descq_added == ppd->sdma_descq_removed;
+}
+
/* must be called under qib_sdma_lock */
static inline u16 qib_sdma_descq_freecnt(const struct qib_pportdata *ppd)
{
diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c
index 9892456a4348..1686fd4bda87 100644
--- a/drivers/infiniband/hw/qib/qib_diag.c
+++ b/drivers/infiniband/hw/qib/qib_diag.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2010 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -53,6 +53,9 @@
#include "qib.h"
#include "qib_common.h"
+#undef pr_fmt
+#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
+
/*
* Each client that opens the diag device must read then write
* offset 0, to prevent lossage from random cat or od. diag_state
@@ -598,8 +601,8 @@ static ssize_t qib_diagpkt_write(struct file *fp,
}
tmpbuf = vmalloc(plen);
if (!tmpbuf) {
- qib_devinfo(dd->pcidev, "Unable to allocate tmp buffer, "
- "failing\n");
+ qib_devinfo(dd->pcidev,
+ "Unable to allocate tmp buffer, failing\n");
ret = -ENOMEM;
goto bail;
}
@@ -693,7 +696,7 @@ int qib_register_observer(struct qib_devdata *dd,
ret = -ENOMEM;
olp = vmalloc(sizeof *olp);
if (!olp) {
- printk(KERN_ERR QIB_DRV_NAME ": vmalloc for observer failed\n");
+ pr_err("vmalloc for observer failed\n");
goto bail;
}
if (olp) {
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 8895cfec5019..e41e7f7fc763 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -764,8 +764,9 @@ int qib_reset_device(int unit)
qib_devinfo(dd->pcidev, "Reset on unit %u requested\n", unit);
if (!dd->kregbase || !(dd->flags & QIB_PRESENT)) {
- qib_devinfo(dd->pcidev, "Invalid unit number %u or "
- "not initialized or not present\n", unit);
+ qib_devinfo(dd->pcidev,
+ "Invalid unit number %u or not initialized or not present\n",
+ unit);
ret = -ENXIO;
goto bail;
}
@@ -802,11 +803,13 @@ int qib_reset_device(int unit)
else
ret = -EAGAIN;
if (ret)
- qib_dev_err(dd, "Reinitialize unit %u after "
- "reset failed with %d\n", unit, ret);
+ qib_dev_err(dd,
+ "Reinitialize unit %u after reset failed with %d\n",
+ unit, ret);
else
- qib_devinfo(dd->pcidev, "Reinitialized unit %u after "
- "resetting\n", unit);
+ qib_devinfo(dd->pcidev,
+ "Reinitialized unit %u after resetting\n",
+ unit);
bail:
return ret;
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 92d9cfe98a68..4d5d71aaa2b4 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -160,10 +161,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
if (oguid > bguid[7]) {
if (bguid[6] == 0xff) {
if (bguid[5] == 0xff) {
- qib_dev_err(dd, "Can't set %s GUID"
- " from base, wraps to"
- " OUI!\n",
- qib_get_unit_name(t));
+ qib_dev_err(dd,
+ "Can't set %s GUID from base, wraps to OUI!\n",
+ qib_get_unit_name(t));
dd->base_guid = 0;
goto bail;
}
@@ -182,8 +182,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
len = sizeof(struct qib_flash);
buf = vmalloc(len);
if (!buf) {
- qib_dev_err(dd, "Couldn't allocate memory to read %u "
- "bytes from eeprom for GUID\n", len);
+ qib_dev_err(dd,
+ "Couldn't allocate memory to read %u bytes from eeprom for GUID\n",
+ len);
goto bail;
}
@@ -201,23 +202,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
csum = flash_csum(ifp, 0);
if (csum != ifp->if_csum) {
- qib_devinfo(dd->pcidev, "Bad I2C flash checksum: "
- "0x%x, not 0x%x\n", csum, ifp->if_csum);
+ qib_devinfo(dd->pcidev,
+ "Bad I2C flash checksum: 0x%x, not 0x%x\n",
+ csum, ifp->if_csum);
goto done;
}
if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
*(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
- qib_dev_err(dd, "Invalid GUID %llx from flash; ignoring\n",
- *(unsigned long long *) ifp->if_guid);
+ qib_dev_err(dd,
+ "Invalid GUID %llx from flash; ignoring\n",
+ *(unsigned long long *) ifp->if_guid);
/* don't allow GUID if all 0 or all 1's */
goto done;
}
/* complain, but allow it */
if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
- qib_devinfo(dd->pcidev, "Warning, GUID %llx is "
- "default, probably not correct!\n",
- *(unsigned long long *) ifp->if_guid);
+ qib_devinfo(dd->pcidev,
+ "Warning, GUID %llx is default, probably not correct!\n",
+ *(unsigned long long *) ifp->if_guid);
bguid = ifp->if_guid;
if (!bguid[0] && !bguid[1] && !bguid[2]) {
@@ -260,8 +263,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
memcpy(dd->serial, ifp->if_serial,
sizeof ifp->if_serial);
if (!strstr(ifp->if_comment, "Tested successfully"))
- qib_dev_err(dd, "Board SN %s did not pass functional "
- "test: %s\n", dd->serial, ifp->if_comment);
+ qib_dev_err(dd,
+ "Board SN %s did not pass functional test: %s\n",
+ dd->serial, ifp->if_comment);
memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
/*
@@ -323,8 +327,9 @@ int qib_update_eeprom_log(struct qib_devdata *dd)
buf = vmalloc(len);
ret = 1;
if (!buf) {
- qib_dev_err(dd, "Couldn't allocate memory to read %u "
- "bytes from eeprom for logging\n", len);
+ qib_dev_err(dd,
+ "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
+ len);
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index a7403248d83d..faa44cb08071 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -49,6 +49,9 @@
#include "qib_common.h"
#include "qib_user_sdma.h"
+#undef pr_fmt
+#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
+
static int qib_open(struct inode *, struct file *);
static int qib_close(struct inode *, struct file *);
static ssize_t qib_write(struct file *, const char __user *, size_t, loff_t *);
@@ -315,8 +318,9 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
}
if (cnt > tidcnt) {
/* make sure it all fits in tid_pg_list */
- qib_devinfo(dd->pcidev, "Process tried to allocate %u "
- "TIDs, only trying max (%u)\n", cnt, tidcnt);
+ qib_devinfo(dd->pcidev,
+ "Process tried to allocate %u TIDs, only trying max (%u)\n",
+ cnt, tidcnt);
cnt = tidcnt;
}
pagep = (struct page **) rcd->tid_pg_list;
@@ -750,9 +754,9 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
ret = remap_pfn_range(vma, vma->vm_start, pfn,
len, vma->vm_page_prot);
if (ret)
- qib_devinfo(dd->pcidev, "%s ctxt%u mmap of %lx, %x "
- "bytes failed: %d\n", what, rcd->ctxt,
- pfn, len, ret);
+ qib_devinfo(dd->pcidev,
+ "%s ctxt%u mmap of %lx, %x bytes failed: %d\n",
+ what, rcd->ctxt, pfn, len, ret);
bail:
return ret;
}
@@ -771,8 +775,9 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
*/
sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE;
if ((vma->vm_end - vma->vm_start) > sz) {
- qib_devinfo(dd->pcidev, "FAIL mmap userreg: reqlen "
- "%lx > PAGE\n", vma->vm_end - vma->vm_start);
+ qib_devinfo(dd->pcidev,
+ "FAIL mmap userreg: reqlen %lx > PAGE\n",
+ vma->vm_end - vma->vm_start);
ret = -EFAULT;
} else {
phys = dd->physaddr + ureg;
@@ -802,8 +807,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* for it.
*/
if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) {
- qib_devinfo(dd->pcidev, "FAIL mmap piobufs: "
- "reqlen %lx > PAGE\n",
+ qib_devinfo(dd->pcidev,
+ "FAIL mmap piobufs: reqlen %lx > PAGE\n",
vma->vm_end - vma->vm_start);
ret = -EINVAL;
goto bail;
@@ -847,8 +852,8 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
size = rcd->rcvegrbuf_size;
total_size = rcd->rcvegrbuf_chunks * size;
if ((vma->vm_end - vma->vm_start) > total_size) {
- qib_devinfo(dd->pcidev, "FAIL on egr bufs: "
- "reqlen %lx > actual %lx\n",
+ qib_devinfo(dd->pcidev,
+ "FAIL on egr bufs: reqlen %lx > actual %lx\n",
vma->vm_end - vma->vm_start,
(unsigned long) total_size);
ret = -EINVAL;
@@ -856,8 +861,9 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
}
if (vma->vm_flags & VM_WRITE) {
- qib_devinfo(dd->pcidev, "Can't map eager buffers as "
- "writable (flags=%lx)\n", vma->vm_flags);
+ qib_devinfo(dd->pcidev,
+ "Can't map eager buffers as writable (flags=%lx)\n",
+ vma->vm_flags);
ret = -EPERM;
goto bail;
}
@@ -1270,8 +1276,8 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
GFP_KERNEL);
if (!rcd || !ptmp) {
- qib_dev_err(dd, "Unable to allocate ctxtdata "
- "memory, failing open\n");
+ qib_dev_err(dd,
+ "Unable to allocate ctxtdata memory, failing open\n");
ret = -ENOMEM;
goto bailerr;
}
@@ -1560,10 +1566,10 @@ done_chk_sdma:
} else if (weight == 1 &&
test_bit(cpumask_first(tsk_cpus_allowed(current)),
qib_cpulist))
- qib_devinfo(dd->pcidev, "%s PID %u affinity "
- "set to cpu %d; already allocated\n",
- current->comm, current->pid,
- cpumask_first(tsk_cpus_allowed(current)));
+ qib_devinfo(dd->pcidev,
+ "%s PID %u affinity set to cpu %d; already allocated\n",
+ current->comm, current->pid,
+ cpumask_first(tsk_cpus_allowed(current)));
}
mutex_unlock(&qib_mutex);
@@ -2185,8 +2191,7 @@ int qib_cdev_init(int minor, const char *name,
cdev = cdev_alloc();
if (!cdev) {
- printk(KERN_ERR QIB_DRV_NAME
- ": Could not allocate cdev for minor %d, %s\n",
+ pr_err("Could not allocate cdev for minor %d, %s\n",
minor, name);
ret = -ENOMEM;
goto done;
@@ -2198,8 +2203,7 @@ int qib_cdev_init(int minor, const char *name,
ret = cdev_add(cdev, dev, 1);
if (ret < 0) {
- printk(KERN_ERR QIB_DRV_NAME
- ": Could not add cdev for minor %d, %s (err %d)\n",
+ pr_err("Could not add cdev for minor %d, %s (err %d)\n",
minor, name, -ret);
goto err_cdev;
}
@@ -2209,8 +2213,7 @@ int qib_cdev_init(int minor, const char *name,
goto done;
ret = PTR_ERR(device);
device = NULL;
- printk(KERN_ERR QIB_DRV_NAME ": Could not create "
- "device for minor %d, %s (err %d)\n",
+ pr_err("Could not create device for minor %d, %s (err %d)\n",
minor, name, -ret);
err_cdev:
cdev_del(cdev);
@@ -2245,16 +2248,14 @@ int __init qib_dev_init(void)
ret = alloc_chrdev_region(&qib_dev, 0, QIB_NMINORS, QIB_DRV_NAME);
if (ret < 0) {
- printk(KERN_ERR QIB_DRV_NAME ": Could not allocate "
- "chrdev region (err %d)\n", -ret);
+ pr_err("Could not allocate chrdev region (err %d)\n", -ret);
goto done;
}
qib_class = class_create(THIS_MODULE, "ipath");
if (IS_ERR(qib_class)) {
ret = PTR_ERR(qib_class);
- printk(KERN_ERR QIB_DRV_NAME ": Could not create "
- "device class (err %d)\n", -ret);
+ pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(qib_dev, QIB_NMINORS);
}
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index 05e0f17c5b44..cff8a6c32161 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -382,7 +383,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
&simple_dir_operations, dd);
if (ret) {
- printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
+ pr_err("create_file(%s) failed: %d\n", unit, ret);
goto bail;
}
@@ -390,21 +391,21 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[0], dd);
if (ret) {
- printk(KERN_ERR "create_file(%s/counters) failed: %d\n",
+ pr_err("create_file(%s/counters) failed: %d\n",
unit, ret);
goto bail;
}
ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp,
&cntr_ops[1], dd);
if (ret) {
- printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n",
+ pr_err("create_file(%s/counter_names) failed: %d\n",
unit, ret);
goto bail;
}
ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[0], dd);
if (ret) {
- printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ pr_err("create_file(%s/%s) failed: %d\n",
unit, "portcounter_names", ret);
goto bail;
}
@@ -416,7 +417,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&portcntr_ops[i], dd);
if (ret) {
- printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ pr_err("create_file(%s/%s) failed: %d\n",
unit, fname, ret);
goto bail;
}
@@ -426,7 +427,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp,
&qsfp_ops[i - 1], dd);
if (ret) {
- printk(KERN_ERR "create_file(%s/%s) failed: %d\n",
+ pr_err("create_file(%s/%s) failed: %d\n",
unit, fname, ret);
goto bail;
}
@@ -435,7 +436,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
&flash_ops, dd);
if (ret)
- printk(KERN_ERR "create_file(%s/flash) failed: %d\n",
+ pr_err("create_file(%s/flash) failed: %d\n",
unit, ret);
bail:
return ret;
@@ -486,7 +487,7 @@ static int remove_device_files(struct super_block *sb,
if (IS_ERR(dir)) {
ret = PTR_ERR(dir);
- printk(KERN_ERR "Lookup of %s failed\n", unit);
+ pr_err("Lookup of %s failed\n", unit);
goto bail;
}
@@ -532,7 +533,7 @@ static int qibfs_fill_super(struct super_block *sb, void *data, int silent)
ret = simple_fill_super(sb, QIBFS_MAGIC, files);
if (ret) {
- printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
+ pr_err("simple_fill_super failed: %d\n", ret);
goto bail;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 4d352b90750a..a099ac171e22 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -753,8 +753,8 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs)
return;
if (hwerrs == ~0ULL) {
- qib_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
+ qib_dev_err(dd,
+ "Read of hardware error status failed (all bits set); ignoring\n");
return;
}
qib_stats.sps_hwerrs++;
@@ -779,13 +779,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
* or it's occurred within the last 5 seconds.
*/
if (hwerrs & ~(TXE_PIO_PARITY | RXEMEMPARITYERR_EAGERTID))
- qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
+ qib_devinfo(dd->pcidev,
+ "Hardware error: hwerr=0x%llx (cleared)\n",
+ (unsigned long long) hwerrs);
if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~IB_HWE_BITSEXTANT));
+ qib_dev_err(dd,
+ "hwerror interrupt with unknown errors %llx set\n",
+ (unsigned long long)(hwerrs & ~IB_HWE_BITSEXTANT));
ctrl = qib_read_kreg32(dd, kr_control);
if ((ctrl & QLOGIC_IB_C_FREEZEMODE) && !dd->diag_client) {
@@ -815,8 +816,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcat(msg, "[Memory BIST test failed, InfiniPath hardware"
- " unusable]", msgl);
+ strlcat(msg,
+ "[Memory BIST test failed, InfiniPath hardware unusable]",
+ msgl);
/* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@@ -868,8 +870,9 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
*msg = 0; /* recovered from all of them */
if (isfatal && !dd->diag_client) {
- qib_dev_err(dd, "Fatal Hardware Error, no longer"
- " usable, SN %.16s\n", dd->serial);
+ qib_dev_err(dd,
+ "Fatal Hardware Error, no longer usable, SN %.16s\n",
+ dd->serial);
/*
* for /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated.
@@ -1017,9 +1020,9 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
qib_inc_eeprom_err(dd, log_idx, 1);
if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd, "error interrupt with unknown errors "
- "%llx set\n",
- (unsigned long long) (errs & ~IB_E_BITSEXTANT));
+ qib_dev_err(dd,
+ "error interrupt with unknown errors %llx set\n",
+ (unsigned long long) (errs & ~IB_E_BITSEXTANT));
if (errs & E_SUM_ERRS) {
qib_disarm_6120_senderrbufs(ppd);
@@ -1089,8 +1092,8 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
}
if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd, "Got reset, requires re-init "
- "(unload and reload driver)\n");
+ qib_dev_err(dd,
+ "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR;
@@ -1541,8 +1544,9 @@ static noinline void unlikely_6120_intr(struct qib_devdata *dd, u64 istat)
qib_stats.sps_errints++;
estat = qib_read_kreg64(dd, kr_errstatus);
if (!estat)
- qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
- "but no error bits set!\n", istat);
+ qib_devinfo(dd->pcidev,
+ "error interrupt (%Lx), but no error bits set!\n",
+ istat);
handle_6120_errors(dd, estat);
}
@@ -1715,16 +1719,16 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
}
if (!dd->cspec->irq)
- qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
- "work\n");
+ qib_dev_err(dd,
+ "irq is 0, BIOS error? Interrupts won't work\n");
else {
int ret;
ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
QIB_DRV_NAME, dd);
if (ret)
- qib_dev_err(dd, "Couldn't setup interrupt "
- "(irq=%d): %d\n", dd->cspec->irq,
- ret);
+ qib_dev_err(dd,
+ "Couldn't setup interrupt (irq=%d): %d\n",
+ dd->cspec->irq, ret);
}
}
@@ -1759,8 +1763,9 @@ static void pe_boardname(struct qib_devdata *dd)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 4 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd, "Unsupported InfiniPath hardware revision "
- "%u.%u!\n", dd->majrev, dd->minrev);
+ qib_dev_err(dd,
+ "Unsupported InfiniPath hardware revision %u.%u!\n",
+ dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
@@ -1833,8 +1838,8 @@ static int qib_6120_setup_reset(struct qib_devdata *dd)
bail:
if (ret) {
if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
- qib_dev_err(dd, "Reset failed to setup PCIe or "
- "interrupts; continuing anyway\n");
+ qib_dev_err(dd,
+ "Reset failed to setup PCIe or interrupts; continuing anyway\n");
/* clear the reset error, init error/hwerror mask */
qib_6120_init_hwerrors(dd);
/* for Rev2 error interrupts; nop for rev 1 */
@@ -1876,8 +1881,9 @@ static void qib_6120_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
}
pa >>= 11;
if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd, "Physical page address 0x%lx "
- "larger than supported\n", pa);
+ qib_dev_err(dd,
+ "Physical page address 0x%lx larger than supported\n",
+ pa);
return;
}
@@ -1941,8 +1947,9 @@ static void qib_6120_put_tid_2(struct qib_devdata *dd, u64 __iomem *tidptr,
}
pa >>= 11;
if (pa & ~QLOGIC_IB_RT_ADDR_MASK) {
- qib_dev_err(dd, "Physical page address 0x%lx "
- "larger than supported\n", pa);
+ qib_dev_err(dd,
+ "Physical page address 0x%lx larger than supported\n",
+ pa);
return;
}
@@ -2928,8 +2935,9 @@ static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
ppd->dd->unit, ppd->port);
} else if (!strncmp(what, "off", 3)) {
ppd->dd->cspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
- qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
- "(normal)\n", ppd->dd->unit, ppd->port);
+ qib_devinfo(ppd->dd->pcidev,
+ "Disabling IB%u:%u IBC loopback (normal)\n",
+ ppd->dd->unit, ppd->port);
} else
ret = -EINVAL;
if (!ret) {
@@ -3186,11 +3194,10 @@ static int qib_late_6120_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd, "Catastrophic software error, "
- "SendPIOAvailAddr written as %lx, "
- "read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
+ qib_dev_err(dd,
+ "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
ret = -EINVAL;
}
return ret;
@@ -3218,8 +3225,8 @@ static int init_6120_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd, "Revision register read failure, "
- "giving up initialization\n");
+ qib_dev_err(dd,
+ "Revision register read failure, giving up initialization\n");
ret = -ENODEV;
goto bail;
}
@@ -3551,8 +3558,8 @@ struct qib_devdata *qib_init_iba6120_funcs(struct pci_dev *pdev,
goto bail;
if (qib_pcie_params(dd, 8, NULL, NULL))
- qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
- "continuing anyway\n");
+ qib_dev_err(dd,
+ "Failed to setup PCIe or interrupts; continuing anyway\n");
dd->cspec->irq = pdev->irq; /* save IRQ */
/* clear diagctrl register, in case diags were running and crashed */
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 86a0ba7ca0c2..64d0ecb90cdc 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1111,9 +1111,9 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
sdma_7220_errors(ppd, errs);
if (errs & ~IB_E_BITSEXTANT)
- qib_dev_err(dd, "error interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (errs & ~IB_E_BITSEXTANT));
+ qib_dev_err(dd,
+ "error interrupt with unknown errors %llx set\n",
+ (unsigned long long) (errs & ~IB_E_BITSEXTANT));
if (errs & E_SUM_ERRS) {
qib_disarm_7220_senderrbufs(ppd);
@@ -1192,8 +1192,8 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
}
if (errs & ERR_MASK(ResetNegated)) {
- qib_dev_err(dd, "Got reset, requires re-init "
- "(unload and reload driver)\n");
+ qib_dev_err(dd,
+ "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR;
@@ -1305,8 +1305,8 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs)
goto bail;
if (hwerrs == ~0ULL) {
- qib_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
+ qib_dev_err(dd,
+ "Read of hardware error status failed (all bits set); ignoring\n");
goto bail;
}
qib_stats.sps_hwerrs++;
@@ -1329,13 +1329,14 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_inc_eeprom_err(dd, log_idx, 1);
if (hwerrs & ~(TXEMEMPARITYERR_PIOBUF | TXEMEMPARITYERR_PIOPBC |
RXE_PARITY))
- qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
+ qib_devinfo(dd->pcidev,
+ "Hardware error: hwerr=0x%llx (cleared)\n",
+ (unsigned long long) hwerrs);
if (hwerrs & ~IB_HWE_BITSEXTANT)
- qib_dev_err(dd, "hwerror interrupt with unknown errors "
- "%llx set\n", (unsigned long long)
- (hwerrs & ~IB_HWE_BITSEXTANT));
+ qib_dev_err(dd,
+ "hwerror interrupt with unknown errors %llx set\n",
+ (unsigned long long) (hwerrs & ~IB_HWE_BITSEXTANT));
if (hwerrs & QLOGIC_IB_HWE_IB_UC_MEMORYPARITYERR)
qib_sd7220_clr_ibpar(dd);
@@ -1362,8 +1363,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcat(msg, "[Memory BIST test failed, "
- "InfiniPath hardware unusable]", msgl);
+ strlcat(msg,
+ "[Memory BIST test failed, InfiniPath hardware unusable]",
+ msgl);
/* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@@ -1409,8 +1411,9 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !dd->diag_client) {
- qib_dev_err(dd, "Fatal Hardware Error, no longer"
- " usable, SN %.16s\n", dd->serial);
+ qib_dev_err(dd,
+ "Fatal Hardware Error, no longer usable, SN %.16s\n",
+ dd->serial);
/*
* For /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated.
@@ -1918,8 +1921,9 @@ static noinline void unlikely_7220_intr(struct qib_devdata *dd, u64 istat)
qib_stats.sps_errints++;
estat = qib_read_kreg64(dd, kr_errstatus);
if (!estat)
- qib_devinfo(dd->pcidev, "error interrupt (%Lx), "
- "but no error bits set!\n", istat);
+ qib_devinfo(dd->pcidev,
+ "error interrupt (%Lx), but no error bits set!\n",
+ istat);
else
handle_7220_errors(dd, estat);
}
@@ -2023,17 +2027,18 @@ bail:
static void qib_setup_7220_interrupt(struct qib_devdata *dd)
{
if (!dd->cspec->irq)
- qib_dev_err(dd, "irq is 0, BIOS error? Interrupts won't "
- "work\n");
+ qib_dev_err(dd,
+ "irq is 0, BIOS error? Interrupts won't work\n");
else {
int ret = request_irq(dd->cspec->irq, qib_7220intr,
dd->msi_lo ? 0 : IRQF_SHARED,
QIB_DRV_NAME, dd);
if (ret)
- qib_dev_err(dd, "Couldn't setup %s interrupt "
- "(irq=%d): %d\n", dd->msi_lo ?
- "MSI" : "INTx", dd->cspec->irq, ret);
+ qib_dev_err(dd,
+ "Couldn't setup %s interrupt (irq=%d): %d\n",
+ dd->msi_lo ? "MSI" : "INTx",
+ dd->cspec->irq, ret);
}
}
@@ -2072,9 +2077,9 @@ static void qib_7220_boardname(struct qib_devdata *dd)
snprintf(dd->boardname, namelen, "%s", n);
if (dd->majrev != 5 || !dd->minrev || dd->minrev > 2)
- qib_dev_err(dd, "Unsupported InfiniPath hardware "
- "revision %u.%u!\n",
- dd->majrev, dd->minrev);
+ qib_dev_err(dd,
+ "Unsupported InfiniPath hardware revision %u.%u!\n",
+ dd->majrev, dd->minrev);
snprintf(dd->boardversion, sizeof(dd->boardversion),
"ChipABI %u.%u, %s, InfiniPath%u %u.%u, SW Compat %u\n",
@@ -2146,8 +2151,8 @@ static int qib_setup_7220_reset(struct qib_devdata *dd)
bail:
if (ret) {
if (qib_pcie_params(dd, dd->lbus_width, NULL, NULL))
- qib_dev_err(dd, "Reset failed to setup PCIe or "
- "interrupts; continuing anyway\n");
+ qib_dev_err(dd,
+ "Reset failed to setup PCIe or interrupts; continuing anyway\n");
/* hold IBC in reset, no sends, etc till later */
qib_write_kreg(dd, kr_control, 0ULL);
@@ -2187,8 +2192,9 @@ static void qib_7220_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
return;
}
if (chippa >= (1UL << IBA7220_TID_SZ_SHIFT)) {
- qib_dev_err(dd, "Physical page address 0x%lx "
- "larger than supported\n", pa);
+ qib_dev_err(dd,
+ "Physical page address 0x%lx larger than supported\n",
+ pa);
return;
}
@@ -2706,8 +2712,9 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
ppd->cpspec->ibcctrl &= ~SYM_MASK(IBCCtrl, Loopback);
/* enable heart beat again */
val = IBA7220_IBC_HRTBT_MASK << IBA7220_IBC_HRTBT_SHIFT;
- qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
- "(normal)\n", ppd->dd->unit, ppd->port);
+ qib_devinfo(ppd->dd->pcidev,
+ "Disabling IB%u:%u IBC loopback (normal)\n",
+ ppd->dd->unit, ppd->port);
} else
ret = -EINVAL;
if (!ret) {
@@ -3307,8 +3314,8 @@ static int qib_7220_intr_fallback(struct qib_devdata *dd)
if (!dd->msi_lo)
return 0;
- qib_devinfo(dd->pcidev, "MSI interrupt not detected,"
- " trying INTx interrupts\n");
+ qib_devinfo(dd->pcidev,
+ "MSI interrupt not detected, trying INTx interrupts\n");
qib_7220_free_irq(dd);
qib_enable_intx(dd->pcidev);
/*
@@ -3980,11 +3987,10 @@ static int qib_late_7220_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd, "Catastrophic software error, "
- "SendPIOAvailAddr written as %lx, "
- "read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
+ qib_dev_err(dd,
+ "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
ret = -EINVAL;
}
qib_register_observer(dd, &sendctrl_observer);
@@ -4014,8 +4020,8 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd, "Revision register read failure, "
- "giving up initialization\n");
+ qib_dev_err(dd,
+ "Revision register read failure, giving up initialization\n");
ret = -ENODEV;
goto bail;
}
@@ -4613,8 +4619,8 @@ struct qib_devdata *qib_init_iba7220_funcs(struct pci_dev *pdev,
break;
}
if (qib_pcie_params(dd, minwidth, NULL, NULL))
- qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
- "continuing anyway\n");
+ qib_dev_err(dd,
+ "Failed to setup PCIe or interrupts; continuing anyway\n");
/* save IRQ for possible later use */
dd->cspec->irq = pdev->irq;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index c881e744c091..0d7280af99bc 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2008 - 2012 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -49,6 +50,10 @@
#include "qib_qsfp.h"
#include "qib_mad.h"
+#include "qib_verbs.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) QIB_DRV_NAME " " fmt
static void qib_setup_7322_setextled(struct qib_pportdata *, u32);
static void qib_7322_handle_hwerrors(struct qib_devdata *, char *, size_t);
@@ -1575,8 +1580,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
qib_stats.sps_errints++;
errs = qib_read_kreg64(dd, kr_errstatus);
if (!errs) {
- qib_devinfo(dd->pcidev, "device error interrupt, "
- "but no error bits set!\n");
+ qib_devinfo(dd->pcidev,
+ "device error interrupt, but no error bits set!\n");
goto done;
}
@@ -1622,8 +1627,8 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
if (errs & QIB_E_RESET) {
int pidx;
- qib_dev_err(dd, "Got reset, requires re-init "
- "(unload and reload driver)\n");
+ qib_dev_err(dd,
+ "Got reset, requires re-init (unload and reload driver)\n");
dd->flags &= ~QIB_INITTED; /* needs re-init */
/* mark as having had error */
*dd->devstatusp |= QIB_STATUS_HWERROR;
@@ -1760,9 +1765,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
ppd->dd->cspec->r1 ?
QDR_STATIC_ADAPT_DOWN_R1 :
QDR_STATIC_ADAPT_DOWN);
- printk(KERN_INFO QIB_DRV_NAME
- " IB%u:%u re-enabled QDR adaptation "
- "ibclt %x\n", ppd->dd->unit, ppd->port, ibclt);
+ pr_info(
+ "IB%u:%u re-enabled QDR adaptation ibclt %x\n",
+ ppd->dd->unit, ppd->port, ibclt);
}
}
}
@@ -1804,9 +1809,9 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
if (!*msg)
snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
"no others");
- qib_dev_porterr(dd, ppd->port, "error interrupt with unknown"
- " errors 0x%016Lx set (and %s)\n",
- (errs & ~QIB_E_P_BITSEXTANT), msg);
+ qib_dev_porterr(dd, ppd->port,
+ "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
+ (errs & ~QIB_E_P_BITSEXTANT), msg);
*msg = '\0';
}
@@ -2024,8 +2029,8 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (!hwerrs)
goto bail;
if (hwerrs == ~0ULL) {
- qib_dev_err(dd, "Read of hardware error status failed "
- "(all bits set); ignoring\n");
+ qib_dev_err(dd,
+ "Read of hardware error status failed (all bits set); ignoring\n");
goto bail;
}
qib_stats.sps_hwerrs++;
@@ -2039,8 +2044,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
/* no EEPROM logging, yet */
if (hwerrs)
- qib_devinfo(dd->pcidev, "Hardware error: hwerr=0x%llx "
- "(cleared)\n", (unsigned long long) hwerrs);
+ qib_devinfo(dd->pcidev,
+ "Hardware error: hwerr=0x%llx (cleared)\n",
+ (unsigned long long) hwerrs);
ctrl = qib_read_kreg32(dd, kr_control);
if ((ctrl & SYM_MASK(Control, FreezeMode)) && !dd->diag_client) {
@@ -2064,8 +2070,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
if (hwerrs & HWE_MASK(PowerOnBISTFailed)) {
isfatal = 1;
- strlcpy(msg, "[Memory BIST test failed, "
- "InfiniPath hardware unusable]", msgl);
+ strlcpy(msg,
+ "[Memory BIST test failed, InfiniPath hardware unusable]",
+ msgl);
/* ignore from now on, so disable until driver reloaded */
dd->cspec->hwerrmask &= ~HWE_MASK(PowerOnBISTFailed);
qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask);
@@ -2078,8 +2085,9 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
qib_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !dd->diag_client) {
- qib_dev_err(dd, "Fatal Hardware Error, no longer"
- " usable, SN %.16s\n", dd->serial);
+ qib_dev_err(dd,
+ "Fatal Hardware Error, no longer usable, SN %.16s\n",
+ dd->serial);
/*
* for /sys status file and user programs to print; if no
* trailing brace is copied, we'll know it was truncated.
@@ -2667,8 +2675,9 @@ static noinline void unknown_7322_ibits(struct qib_devdata *dd, u64 istat)
char msg[128];
kills = istat & ~QIB_I_BITSEXTANT;
- qib_dev_err(dd, "Clearing reserved interrupt(s) 0x%016llx:"
- " %s\n", (unsigned long long) kills, msg);
+ qib_dev_err(dd,
+ "Clearing reserved interrupt(s) 0x%016llx: %s\n",
+ (unsigned long long) kills, msg);
qib_write_kreg(dd, kr_intmask, (dd->cspec->int_enable_mask & ~kills));
}
@@ -3101,16 +3110,16 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend)
/* Try to get INTx interrupt */
try_intx:
if (!dd->pcidev->irq) {
- qib_dev_err(dd, "irq is 0, BIOS error? "
- "Interrupts won't work\n");
+ qib_dev_err(dd,
+ "irq is 0, BIOS error? Interrupts won't work\n");
goto bail;
}
ret = request_irq(dd->pcidev->irq, qib_7322intr,
IRQF_SHARED, QIB_DRV_NAME, dd);
if (ret) {
- qib_dev_err(dd, "Couldn't setup INTx "
- "interrupt (irq=%d): %d\n",
- dd->pcidev->irq, ret);
+ qib_dev_err(dd,
+ "Couldn't setup INTx interrupt (irq=%d): %d\n",
+ dd->pcidev->irq, ret);
goto bail;
}
dd->cspec->irq = dd->pcidev->irq;
@@ -3185,8 +3194,9 @@ try_intx:
* Shouldn't happen since the enable said we could
* have as many as we are trying to setup here.
*/
- qib_dev_err(dd, "Couldn't setup MSIx "
- "interrupt (vec=%d, irq=%d): %d\n", msixnum,
+ qib_dev_err(dd,
+ "Couldn't setup MSIx interrupt (vec=%d, irq=%d): %d\n",
+ msixnum,
dd->cspec->msix_entries[msixnum].msix.vector,
ret);
qib_7322_nomsix(dd);
@@ -3305,8 +3315,9 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
(unsigned)SYM_FIELD(dd->revision, Revision_R, SW));
if (qib_singleport && (features >> PORT_SPD_CAP_SHIFT) & PORT_SPD_CAP) {
- qib_devinfo(dd->pcidev, "IB%u: Forced to single port mode"
- " by module parameter\n", dd->unit);
+ qib_devinfo(dd->pcidev,
+ "IB%u: Forced to single port mode by module parameter\n",
+ dd->unit);
features &= PORT_SPD_CAP;
}
@@ -3400,8 +3411,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (val == dd->revision)
break;
if (i == 5) {
- qib_dev_err(dd, "Failed to initialize after reset, "
- "unusable\n");
+ qib_dev_err(dd,
+ "Failed to initialize after reset, unusable\n");
ret = 0;
goto bail;
}
@@ -3432,8 +3443,8 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
if (qib_pcie_params(dd, dd->lbus_width,
&dd->cspec->num_msix_entries,
dd->cspec->msix_entries))
- qib_dev_err(dd, "Reset failed to setup PCIe or interrupts; "
- "continuing anyway\n");
+ qib_dev_err(dd,
+ "Reset failed to setup PCIe or interrupts; continuing anyway\n");
qib_setup_7322_interrupt(dd, 1);
@@ -3474,8 +3485,9 @@ static void qib_7322_put_tid(struct qib_devdata *dd, u64 __iomem *tidptr,
return;
}
if (chippa >= (1UL << IBA7322_TID_SZ_SHIFT)) {
- qib_dev_err(dd, "Physical page address 0x%lx "
- "larger than supported\n", pa);
+ qib_dev_err(dd,
+ "Physical page address 0x%lx larger than supported\n",
+ pa);
return;
}
@@ -4029,8 +4041,9 @@ static int qib_7322_set_loopback(struct qib_pportdata *ppd, const char *what)
Loopback);
/* enable heart beat again */
val = IBA7322_IBC_HRTBT_RMASK << IBA7322_IBC_HRTBT_LSB;
- qib_devinfo(ppd->dd->pcidev, "Disabling IB%u:%u IBC loopback "
- "(normal)\n", ppd->dd->unit, ppd->port);
+ qib_devinfo(ppd->dd->pcidev,
+ "Disabling IB%u:%u IBC loopback (normal)\n",
+ ppd->dd->unit, ppd->port);
} else
ret = -EINVAL;
if (!ret) {
@@ -4714,8 +4727,8 @@ static void init_7322_cntrnames(struct qib_devdata *dd)
dd->pport[i].cpspec->portcntrs = kmalloc(dd->cspec->nportcntrs
* sizeof(u64), GFP_KERNEL);
if (!dd->pport[i].cpspec->portcntrs)
- qib_dev_err(dd, "Failed allocation for"
- " portcounters\n");
+ qib_dev_err(dd,
+ "Failed allocation for portcounters\n");
}
}
@@ -4865,8 +4878,8 @@ static int qib_7322_intr_fallback(struct qib_devdata *dd)
if (!dd->cspec->num_msix_entries)
return 0; /* already using INTx */
- qib_devinfo(dd->pcidev, "MSIx interrupt not detected,"
- " trying INTx interrupts\n");
+ qib_devinfo(dd->pcidev,
+ "MSIx interrupt not detected, trying INTx interrupts\n");
qib_7322_nomsix(dd);
qib_enable_intx(dd->pcidev);
qib_setup_7322_interrupt(dd, 0);
@@ -5151,15 +5164,11 @@ static void try_7322_ipg(struct qib_pportdata *ppd)
goto retry;
if (!ibp->smi_ah) {
- struct ib_ah_attr attr;
struct ib_ah *ah;
- memset(&attr, 0, sizeof attr);
- attr.dlid = be16_to_cpu(IB_LID_PERMISSIVE);
- attr.port_num = ppd->port;
- ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ ah = qib_create_qp0_ah(ibp, be16_to_cpu(IB_LID_PERMISSIVE));
if (IS_ERR(ah))
- ret = -EINVAL;
+ ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
ibp->smi_ah = to_iah(ah);
@@ -5844,22 +5853,21 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
{
struct qib_devdata *dd;
unsigned long val;
- char *n;
+ int ret;
+
if (strlen(str) >= MAX_ATTEN_LEN) {
- printk(KERN_INFO QIB_DRV_NAME " txselect_values string "
- "too long\n");
+ pr_info("txselect_values string too long\n");
return -ENOSPC;
}
- val = simple_strtoul(str, &n, 0);
- if (n == str || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
+ ret = kstrtoul(str, 0, &val);
+ if (ret || val >= (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
- printk(KERN_INFO QIB_DRV_NAME
- "txselect_values must start with a number < %d\n",
+ pr_info("txselect_values must start with a number < %d\n",
TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ + TXDDS_MFG_SZ);
- return -EINVAL;
+ return ret ? ret : -EINVAL;
}
- strcpy(txselect_list, str);
+ strcpy(txselect_list, str);
list_for_each_entry(dd, &qib_dev_list, list)
if (dd->deviceid == PCI_DEVICE_ID_QLOGIC_IB_7322)
set_no_qsfp_atten(dd, 1);
@@ -5882,11 +5890,10 @@ static int qib_late_7322_initreg(struct qib_devdata *dd)
qib_write_kreg(dd, kr_sendpioavailaddr, dd->pioavailregs_phys);
val = qib_read_kreg64(dd, kr_sendpioavailaddr);
if (val != dd->pioavailregs_phys) {
- qib_dev_err(dd, "Catastrophic software error, "
- "SendPIOAvailAddr written as %lx, "
- "read back as %llx\n",
- (unsigned long) dd->pioavailregs_phys,
- (unsigned long long) val);
+ qib_dev_err(dd,
+ "Catastrophic software error, SendPIOAvailAddr written as %lx, read back as %llx\n",
+ (unsigned long) dd->pioavailregs_phys,
+ (unsigned long long) val);
ret = -EINVAL;
}
@@ -6098,8 +6105,8 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
dd->revision = readq(&dd->kregbase[kr_revision]);
if ((dd->revision & 0xffffffffU) == 0xffffffffU) {
- qib_dev_err(dd, "Revision register read failure, "
- "giving up initialization\n");
+ qib_dev_err(dd,
+ "Revision register read failure, giving up initialization\n");
ret = -ENODEV;
goto bail;
}
@@ -6265,9 +6272,9 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
*/
if (!(dd->flags & QIB_HAS_QSFP)) {
if (!IS_QMH(dd) && !IS_QME(dd))
- qib_devinfo(dd->pcidev, "IB%u:%u: "
- "Unknown mezzanine card type\n",
- dd->unit, ppd->port);
+ qib_devinfo(dd->pcidev,
+ "IB%u:%u: Unknown mezzanine card type\n",
+ dd->unit, ppd->port);
cp->h1_val = IS_QMH(dd) ? H1_FORCE_QMH : H1_FORCE_QME;
/*
* Choose center value as default tx serdes setting
@@ -6922,8 +6929,8 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
dd->cspec->msix_entries[i].msix.entry = i;
if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries))
- qib_dev_err(dd, "Failed to setup PCIe or interrupts; "
- "continuing anyway\n");
+ qib_dev_err(dd,
+ "Failed to setup PCIe or interrupts; continuing anyway\n");
/* may be less than we wanted, if not enough available */
dd->cspec->num_msix_entries = tabsize;
@@ -7276,8 +7283,7 @@ static void find_best_ent(struct qib_pportdata *ppd,
ppd->cpspec->no_eep < (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ +
TXDDS_MFG_SZ)) {
idx = ppd->cpspec->no_eep - (TXDDS_TABLE_SZ + TXDDS_EXTRA_SZ);
- printk(KERN_INFO QIB_DRV_NAME
- " IB%u:%u use idx %u into txdds_mfg\n",
+ pr_info("IB%u:%u use idx %u into txdds_mfg\n",
ppd->dd->unit, ppd->port, idx);
*sdr_dds = &txdds_extra_mfg[idx];
*ddr_dds = &txdds_extra_mfg[idx];
@@ -7432,11 +7438,11 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
if (enable && !state) {
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
+ pr_info("IB%u:%u Turning LOS on\n",
ppd->dd->unit, ppd->port);
data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
} else if (!enable && state) {
- printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
+ pr_info("IB%u:%u Turning LOS off\n",
ppd->dd->unit, ppd->port);
data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
}
@@ -7672,8 +7678,7 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
}
}
if (chan_done) {
- printk(KERN_INFO QIB_DRV_NAME
- " Serdes %d calibration not done after .5 sec: 0x%x\n",
+ pr_info("Serdes %d calibration not done after .5 sec: 0x%x\n",
IBSD(ppd->hw_pidx), chan_done);
} else {
for (chan = 0; chan < SERDES_CHANS; ++chan) {
@@ -7681,9 +7686,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
(chan + (chan >> 1)),
25, 0, 0);
if ((~rxcaldone & (u32)BMASK(10, 10)) == 0)
- printk(KERN_INFO QIB_DRV_NAME
- " Serdes %d chan %d calibration "
- "failed\n", IBSD(ppd->hw_pidx), chan);
+ pr_info("Serdes %d chan %d calibration failed\n",
+ IBSD(ppd->hw_pidx), chan);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index dc14e100a7f1..4443adfcd9ee 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -38,9 +38,14 @@
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/module.h>
+#include <linux/printk.h>
#include "qib.h"
#include "qib_common.h"
+#include "qib_mad.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) QIB_DRV_NAME ": " fmt
/*
* min buffers we want to have per context, after driver
@@ -71,6 +76,9 @@ unsigned qib_n_krcv_queues;
module_param_named(krcvqs, qib_n_krcv_queues, uint, S_IRUGO);
MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
+unsigned qib_cc_table_size;
+module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
+MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
/*
* qib_wc_pat parameter:
* 0 is WC via MTRR
@@ -120,8 +128,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
*/
dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
if (!dd->rcd) {
- qib_dev_err(dd, "Unable to allocate ctxtdata array, "
- "failing\n");
+ qib_dev_err(dd,
+ "Unable to allocate ctxtdata array, failing\n");
ret = -ENOMEM;
goto done;
}
@@ -137,8 +145,8 @@ int qib_create_ctxts(struct qib_devdata *dd)
ppd = dd->pport + (i % dd->num_pports);
rcd = qib_create_ctxtdata(ppd, i);
if (!rcd) {
- qib_dev_err(dd, "Unable to allocate ctxtdata"
- " for Kernel ctxt, failing\n");
+ qib_dev_err(dd,
+ "Unable to allocate ctxtdata for Kernel ctxt, failing\n");
ret = -ENOMEM;
goto done;
}
@@ -199,6 +207,7 @@ struct qib_ctxtdata *qib_create_ctxtdata(struct qib_pportdata *ppd, u32 ctxt)
void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
u8 hw_pidx, u8 port)
{
+ int size;
ppd->dd = dd;
ppd->hw_pidx = hw_pidx;
ppd->port = port; /* IB port number, not index */
@@ -210,6 +219,83 @@ void qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
init_timer(&ppd->symerr_clear_timer);
ppd->symerr_clear_timer.function = qib_clear_symerror_on_linkup;
ppd->symerr_clear_timer.data = (unsigned long)ppd;
+
+ ppd->qib_wq = NULL;
+
+ spin_lock_init(&ppd->cc_shadow_lock);
+
+ if (qib_cc_table_size < IB_CCT_MIN_ENTRIES)
+ goto bail;
+
+ ppd->cc_supported_table_entries = min(max_t(int, qib_cc_table_size,
+ IB_CCT_MIN_ENTRIES), IB_CCT_ENTRIES*IB_CC_TABLE_CAP_DEFAULT);
+
+ ppd->cc_max_table_entries =
+ ppd->cc_supported_table_entries/IB_CCT_ENTRIES;
+
+ size = IB_CC_TABLE_CAP_DEFAULT * sizeof(struct ib_cc_table_entry)
+ * IB_CCT_ENTRIES;
+ ppd->ccti_entries = kzalloc(size, GFP_KERNEL);
+ if (!ppd->ccti_entries) {
+ qib_dev_err(dd,
+ "failed to allocate congestion control table for port %d!\n",
+ port);
+ goto bail;
+ }
+
+ size = IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry);
+ ppd->congestion_entries = kzalloc(size, GFP_KERNEL);
+ if (!ppd->congestion_entries) {
+ qib_dev_err(dd,
+ "failed to allocate congestion setting list for port %d!\n",
+ port);
+ goto bail_1;
+ }
+
+ size = sizeof(struct cc_table_shadow);
+ ppd->ccti_entries_shadow = kzalloc(size, GFP_KERNEL);
+ if (!ppd->ccti_entries_shadow) {
+ qib_dev_err(dd,
+ "failed to allocate shadow ccti list for port %d!\n",
+ port);
+ goto bail_2;
+ }
+
+ size = sizeof(struct ib_cc_congestion_setting_attr);
+ ppd->congestion_entries_shadow = kzalloc(size, GFP_KERNEL);
+ if (!ppd->congestion_entries_shadow) {
+ qib_dev_err(dd,
+ "failed to allocate shadow congestion setting list for port %d!\n",
+ port);
+ goto bail_3;
+ }
+
+ return;
+
+bail_3:
+ kfree(ppd->ccti_entries_shadow);
+ ppd->ccti_entries_shadow = NULL;
+bail_2:
+ kfree(ppd->congestion_entries);
+ ppd->congestion_entries = NULL;
+bail_1:
+ kfree(ppd->ccti_entries);
+ ppd->ccti_entries = NULL;
+bail:
+ /* User is intentionally disabling the congestion control agent */
+ if (!qib_cc_table_size)
+ return;
+
+ if (qib_cc_table_size < IB_CCT_MIN_ENTRIES) {
+ qib_cc_table_size = 0;
+ qib_dev_err(dd,
+ "Congestion Control table size %d less than minimum %d for port %d\n",
+ qib_cc_table_size, IB_CCT_MIN_ENTRIES, port);
+ }
+
+ qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n",
+ port);
+ return;
}
static int init_pioavailregs(struct qib_devdata *dd)
@@ -221,8 +307,8 @@ static int init_pioavailregs(struct qib_devdata *dd)
&dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys,
GFP_KERNEL);
if (!dd->pioavailregs_dma) {
- qib_dev_err(dd, "failed to allocate PIOavail reg area "
- "in memory\n");
+ qib_dev_err(dd,
+ "failed to allocate PIOavail reg area in memory\n");
ret = -ENOMEM;
goto done;
}
@@ -277,15 +363,15 @@ static void init_shadow_tids(struct qib_devdata *dd)
pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *));
if (!pages) {
- qib_dev_err(dd, "failed to allocate shadow page * "
- "array, no expected sends!\n");
+ qib_dev_err(dd,
+ "failed to allocate shadow page * array, no expected sends!\n");
goto bail;
}
addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t));
if (!addrs) {
- qib_dev_err(dd, "failed to allocate shadow dma handle "
- "array, no expected sends!\n");
+ qib_dev_err(dd,
+ "failed to allocate shadow dma handle array, no expected sends!\n");
goto bail_free;
}
@@ -309,13 +395,13 @@ static int loadtime_init(struct qib_devdata *dd)
if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) &
QLOGIC_IB_R_SOFTWARE_MASK) != QIB_CHIP_SWVERSION) {
- qib_dev_err(dd, "Driver only handles version %d, "
- "chip swversion is %d (%llx), failng\n",
- QIB_CHIP_SWVERSION,
- (int)(dd->revision >>
+ qib_dev_err(dd,
+ "Driver only handles version %d, chip swversion is %d (%llx), failng\n",
+ QIB_CHIP_SWVERSION,
+ (int)(dd->revision >>
QLOGIC_IB_R_SOFTWARE_SHIFT) &
- QLOGIC_IB_R_SOFTWARE_MASK,
- (unsigned long long) dd->revision);
+ QLOGIC_IB_R_SOFTWARE_MASK,
+ (unsigned long long) dd->revision);
ret = -ENOSYS;
goto done;
}
@@ -419,8 +505,8 @@ static void verify_interrupt(unsigned long opaque)
*/
if (dd->int_counter == 0) {
if (!dd->f_intr_fallback(dd))
- dev_err(&dd->pcidev->dev, "No interrupts detected, "
- "not usable.\n");
+ dev_err(&dd->pcidev->dev,
+ "No interrupts detected, not usable.\n");
else /* re-arm the timer to see if fallback works */
mod_timer(&dd->intrchk_timer, jiffies + HZ/2);
}
@@ -483,6 +569,41 @@ static void init_piobuf_state(struct qib_devdata *dd)
}
/**
+ * qib_create_workqueues - create per port workqueues
+ * @dd: the qlogic_ib device
+ */
+static int qib_create_workqueues(struct qib_devdata *dd)
+{
+ int pidx;
+ struct qib_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (!ppd->qib_wq) {
+ char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+ snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
+ dd->unit, pidx);
+ ppd->qib_wq =
+ create_singlethread_workqueue(wq_name);
+ if (!ppd->qib_wq)
+ goto wq_error;
+ }
+ }
+ return 0;
+wq_error:
+ pr_err("create_singlethread_workqueue failed for port %d\n",
+ pidx + 1);
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+ if (ppd->qib_wq) {
+ destroy_workqueue(ppd->qib_wq);
+ ppd->qib_wq = NULL;
+ }
+ }
+ return -ENOMEM;
+}
+
+/**
* qib_init - do the actual initialization sequence on the chip
* @dd: the qlogic_ib device
* @reinit: reinitializing, so don't allocate new memory
@@ -547,8 +668,8 @@ int qib_init(struct qib_devdata *dd, int reinit)
if (!lastfail)
lastfail = qib_setup_eagerbufs(rcd);
if (lastfail) {
- qib_dev_err(dd, "failed to allocate kernel ctxt's "
- "rcvhdrq and/or egr bufs\n");
+ qib_dev_err(dd,
+ "failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
continue;
}
}
@@ -764,6 +885,11 @@ static void qib_shutdown_device(struct qib_devdata *dd)
* We can't count on interrupts since we are stopping.
*/
dd->f_quiet_serdes(ppd);
+
+ if (ppd->qib_wq) {
+ destroy_workqueue(ppd->qib_wq);
+ ppd->qib_wq = NULL;
+ }
}
qib_update_eeprom_log(dd);
@@ -893,8 +1019,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
/* 1 GiB/sec, slightly over IB SDR line rate */
if (lcnt < (emsecs * 1024U))
qib_dev_err(dd,
- "Performance problem: bandwidth to PIO buffers is "
- "only %u MiB/sec\n",
+ "Performance problem: bandwidth to PIO buffers is only %u MiB/sec\n",
lcnt / (u32) emsecs);
preempt_enable();
@@ -967,8 +1092,8 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
if (qib_cpulist)
qib_cpulist_count = count;
else
- qib_early_err(&pdev->dev, "Could not alloc cpulist "
- "info, cpu affinity might be wrong\n");
+ qib_early_err(&pdev->dev,
+ "Could not alloc cpulist info, cpu affinity might be wrong\n");
}
bail:
@@ -1057,21 +1182,20 @@ static int __init qlogic_ib_init(void)
*/
idr_init(&qib_unit_table);
if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) {
- printk(KERN_ERR QIB_DRV_NAME ": idr_pre_get() failed\n");
+ pr_err("idr_pre_get() failed\n");
ret = -ENOMEM;
goto bail_cq_wq;
}
ret = pci_register_driver(&qib_driver);
if (ret < 0) {
- printk(KERN_ERR QIB_DRV_NAME
- ": Unable to register driver: error %d\n", -ret);
+ pr_err("Unable to register driver: error %d\n", -ret);
goto bail_unit;
}
/* not fatal if it doesn't work */
if (qib_init_qibfs())
- printk(KERN_ERR QIB_DRV_NAME ": Unable to register ipathfs\n");
+ pr_err("Unable to register ipathfs\n");
goto bail; /* all OK */
bail_unit:
@@ -1095,9 +1219,9 @@ static void __exit qlogic_ib_cleanup(void)
ret = qib_exit_qibfs();
if (ret)
- printk(KERN_ERR QIB_DRV_NAME ": "
- "Unable to cleanup counter filesystem: "
- "error %d\n", -ret);
+ pr_err(
+ "Unable to cleanup counter filesystem: error %d\n",
+ -ret);
pci_unregister_driver(&qib_driver);
@@ -1121,10 +1245,24 @@ static void cleanup_device_data(struct qib_devdata *dd)
unsigned long flags;
/* users can't do anything more with chip */
- for (pidx = 0; pidx < dd->num_pports; ++pidx)
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
if (dd->pport[pidx].statusp)
*dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT;
+ spin_lock(&dd->pport[pidx].cc_shadow_lock);
+
+ kfree(dd->pport[pidx].congestion_entries);
+ dd->pport[pidx].congestion_entries = NULL;
+ kfree(dd->pport[pidx].ccti_entries);
+ dd->pport[pidx].ccti_entries = NULL;
+ kfree(dd->pport[pidx].ccti_entries_shadow);
+ dd->pport[pidx].ccti_entries_shadow = NULL;
+ kfree(dd->pport[pidx].congestion_entries_shadow);
+ dd->pport[pidx].congestion_entries_shadow = NULL;
+
+ spin_unlock(&dd->pport[pidx].cc_shadow_lock);
+ }
+
if (!qib_wc_pat)
qib_disable_wc(dd);
@@ -1223,9 +1361,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
#ifdef CONFIG_PCI_MSI
dd = qib_init_iba6120_funcs(pdev, ent);
#else
- qib_early_err(&pdev->dev, "QLogic PCIE device 0x%x cannot "
- "work if CONFIG_PCI_MSI is not enabled\n",
- ent->device);
+ qib_early_err(&pdev->dev,
+ "QLogic PCIE device 0x%x cannot work if CONFIG_PCI_MSI is not enabled\n",
+ ent->device);
dd = ERR_PTR(-ENODEV);
#endif
break;
@@ -1239,8 +1377,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
break;
default:
- qib_early_err(&pdev->dev, "Failing on unknown QLogic "
- "deviceid 0x%x\n", ent->device);
+ qib_early_err(&pdev->dev,
+ "Failing on unknown QLogic deviceid 0x%x\n",
+ ent->device);
ret = -ENODEV;
}
@@ -1249,6 +1388,10 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (ret)
goto bail; /* error already printed */
+ ret = qib_create_workqueues(dd);
+ if (ret)
+ goto bail;
+
/* do the generic initialization */
initfail = qib_init(dd, 0);
@@ -1293,9 +1436,9 @@ static int __devinit qib_init_one(struct pci_dev *pdev,
if (!qib_wc_pat) {
ret = qib_enable_wc(dd);
if (ret) {
- qib_dev_err(dd, "Write combining not enabled "
- "(err %d): performance may be poor\n",
- -ret);
+ qib_dev_err(dd,
+ "Write combining not enabled (err %d): performance may be poor\n",
+ -ret);
ret = 0;
}
}
@@ -1361,9 +1504,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
- qib_dev_err(dd, "attempt to allocate %d bytes "
- "for ctxt %u rcvhdrq failed\n",
- amt, rcd->ctxt);
+ qib_dev_err(dd,
+ "attempt to allocate %d bytes for ctxt %u rcvhdrq failed\n",
+ amt, rcd->ctxt);
goto bail;
}
@@ -1392,8 +1535,9 @@ int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd)
return 0;
bail_free:
- qib_dev_err(dd, "attempt to allocate 1 page for ctxt %u "
- "rcvhdrqtailaddr failed\n", rcd->ctxt);
+ qib_dev_err(dd,
+ "attempt to allocate 1 page for ctxt %u rcvhdrqtailaddr failed\n",
+ rcd->ctxt);
vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL;
bail_free_hdrq:
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index 6ae57d23004a..f4918f2165ec 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -224,15 +224,15 @@ void qib_bad_intrstatus(struct qib_devdata *dd)
* We print the message and disable interrupts, in hope of
* having a better chance of debugging the problem.
*/
- qib_dev_err(dd, "Read of chip interrupt status failed"
- " disabling interrupts\n");
+ qib_dev_err(dd,
+ "Read of chip interrupt status failed disabling interrupts\n");
if (allbits++) {
/* disable interrupt delivery, something is very wrong */
if (allbits == 2)
dd->f_set_intr_state(dd, 0);
if (allbits == 3) {
- qib_dev_err(dd, "2nd bad interrupt status, "
- "unregistering interrupts\n");
+ qib_dev_err(dd,
+ "2nd bad interrupt status, unregistering interrupts\n");
dd->flags |= QIB_BADINTR;
dd->flags &= ~QIB_INITTED;
dd->f_free_irq(dd);
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 8fd19a47df0c..e9486c74c226 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -35,21 +35,41 @@
/**
* qib_alloc_lkey - allocate an lkey
- * @rkt: lkey table in which to allocate the lkey
* @mr: memory region that this lkey protects
+ * @dma_region: 0->normal key, 1->restricted DMA key
+ *
+ * Returns 0 if successful, otherwise returns -errno.
+ *
+ * Increments mr reference count as required.
+ *
+ * Sets the lkey field mr for non-dma regions.
*
- * Returns 1 if successful, otherwise returns 0.
*/
-int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
+int qib_alloc_lkey(struct qib_mregion *mr, int dma_region)
{
unsigned long flags;
u32 r;
u32 n;
- int ret;
+ int ret = 0;
+ struct qib_ibdev *dev = to_idev(mr->pd->device);
+ struct qib_lkey_table *rkt = &dev->lk_table;
spin_lock_irqsave(&rkt->lock, flags);
+ /* special case for dma_mr lkey == 0 */
+ if (dma_region) {
+ struct qib_mregion *tmr;
+
+ tmr = rcu_dereference(dev->dma_mr);
+ if (!tmr) {
+ qib_get_mr(mr);
+ rcu_assign_pointer(dev->dma_mr, mr);
+ mr->lkey_published = 1;
+ }
+ goto success;
+ }
+
/* Find the next available LKEY */
r = rkt->next;
n = r;
@@ -57,11 +77,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
if (rkt->table[r] == NULL)
break;
r = (r + 1) & (rkt->max - 1);
- if (r == n) {
- spin_unlock_irqrestore(&rkt->lock, flags);
- ret = 0;
+ if (r == n)
goto bail;
- }
}
rkt->next = (r + 1) & (rkt->max - 1);
/*
@@ -76,57 +93,58 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
mr->lkey |= 1 << 8;
rkt->gen++;
}
- rkt->table[r] = mr;
+ qib_get_mr(mr);
+ rcu_assign_pointer(rkt->table[r], mr);
+ mr->lkey_published = 1;
+success:
spin_unlock_irqrestore(&rkt->lock, flags);
-
- ret = 1;
-
-bail:
+out:
return ret;
+bail:
+ spin_unlock_irqrestore(&rkt->lock, flags);
+ ret = -ENOMEM;
+ goto out;
}
/**
* qib_free_lkey - free an lkey
- * @rkt: table from which to free the lkey
- * @lkey: lkey id to free
+ * @mr: mr to free from tables
*/
-int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr)
+void qib_free_lkey(struct qib_mregion *mr)
{
unsigned long flags;
u32 lkey = mr->lkey;
u32 r;
- int ret;
+ struct qib_ibdev *dev = to_idev(mr->pd->device);
+ struct qib_lkey_table *rkt = &dev->lk_table;
- spin_lock_irqsave(&dev->lk_table.lock, flags);
- if (lkey == 0) {
- if (dev->dma_mr && dev->dma_mr == mr) {
- ret = atomic_read(&dev->dma_mr->refcount);
- if (!ret)
- dev->dma_mr = NULL;
- } else
- ret = 0;
- } else {
+ spin_lock_irqsave(&rkt->lock, flags);
+ if (!mr->lkey_published)
+ goto out;
+ if (lkey == 0)
+ rcu_assign_pointer(dev->dma_mr, NULL);
+ else {
r = lkey >> (32 - ib_qib_lkey_table_size);
- ret = atomic_read(&dev->lk_table.table[r]->refcount);
- if (!ret)
- dev->lk_table.table[r] = NULL;
+ rcu_assign_pointer(rkt->table[r], NULL);
}
- spin_unlock_irqrestore(&dev->lk_table.lock, flags);
-
- if (ret)
- ret = -EBUSY;
- return ret;
+ qib_put_mr(mr);
+ mr->lkey_published = 0;
+out:
+ spin_unlock_irqrestore(&rkt->lock, flags);
}
/**
* qib_lkey_ok - check IB SGE for validity and initialize
* @rkt: table containing lkey to check SGE against
+ * @pd: protection domain
* @isge: outgoing internal SGE
* @sge: SGE to check
* @acc: access flags
*
* Return 1 if valid and successful, otherwise returns 0.
*
+ * increments the reference count upon success
+ *
* Check the IB SGE for validity and initialize our internal version
* of it.
*/
@@ -136,24 +154,25 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- unsigned long flags;
/*
* We use LKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c).
*/
- spin_lock_irqsave(&rkt->lock, flags);
+ rcu_read_lock();
if (sge->lkey == 0) {
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
- if (!dev->dma_mr)
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
goto bail;
- atomic_inc(&dev->dma_mr->refcount);
- spin_unlock_irqrestore(&rkt->lock, flags);
+ if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
+ goto bail;
+ rcu_read_unlock();
- isge->mr = dev->dma_mr;
+ isge->mr = mr;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
isge->sge_length = sge->length;
@@ -161,18 +180,18 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
isge->n = 0;
goto ok;
}
- mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
- if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
- mr->pd != &pd->ibpd))
+ mr = rcu_dereference(
+ rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
goto bail;
off = sge->addr - mr->user_base;
- if (unlikely(sge->addr < mr->user_base ||
- off + sge->length > mr->length ||
- (mr->access_flags & acc) != acc))
+ if (unlikely(sge->addr < mr->iova || off + sge->length > mr->length ||
+ (mr->access_flags & acc) == 0))
goto bail;
- atomic_inc(&mr->refcount);
- spin_unlock_irqrestore(&rkt->lock, flags);
+ if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
+ goto bail;
+ rcu_read_unlock();
off += mr->offset;
if (mr->page_shift) {
@@ -208,20 +227,22 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
ok:
return 1;
bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
+ rcu_read_unlock();
return 0;
}
/**
* qib_rkey_ok - check the IB virtual address, length, and RKEY
- * @dev: infiniband device
- * @ss: SGE state
+ * @qp: qp for validation
+ * @sge: SGE state
* @len: length of data
* @vaddr: virtual address to place data
* @rkey: rkey to check
* @acc: access flags
*
* Return 1 if successful, otherwise 0.
+ *
+ * increments the reference count upon success
*/
int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc)
@@ -230,25 +251,26 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
struct qib_mregion *mr;
unsigned n, m;
size_t off;
- unsigned long flags;
/*
* We use RKEY == zero for kernel virtual addresses
* (see qib_get_dma_mr and qib_dma.c).
*/
- spin_lock_irqsave(&rkt->lock, flags);
+ rcu_read_lock();
if (rkey == 0) {
struct qib_pd *pd = to_ipd(qp->ibqp.pd);
struct qib_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user)
goto bail;
- if (!dev->dma_mr)
+ mr = rcu_dereference(dev->dma_mr);
+ if (!mr)
goto bail;
- atomic_inc(&dev->dma_mr->refcount);
- spin_unlock_irqrestore(&rkt->lock, flags);
+ if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
+ goto bail;
+ rcu_read_unlock();
- sge->mr = dev->dma_mr;
+ sge->mr = mr;
sge->vaddr = (void *) vaddr;
sge->length = len;
sge->sge_length = len;
@@ -257,16 +279,18 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
goto ok;
}
- mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
- if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
+ mr = rcu_dereference(
+ rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]);
+ if (unlikely(!mr || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
goto bail;
off = vaddr - mr->iova;
if (unlikely(vaddr < mr->iova || off + len > mr->length ||
(mr->access_flags & acc) == 0))
goto bail;
- atomic_inc(&mr->refcount);
- spin_unlock_irqrestore(&rkt->lock, flags);
+ if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
+ goto bail;
+ rcu_read_unlock();
off += mr->offset;
if (mr->page_shift) {
@@ -302,7 +326,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
ok:
return 1;
bail:
- spin_unlock_irqrestore(&rkt->lock, flags);
+ rcu_read_unlock();
return 0;
}
@@ -325,7 +349,9 @@ int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr)
if (pd->user || rkey == 0)
goto bail;
- mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
+ mr = rcu_dereference_protected(
+ rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))],
+ lockdep_is_held(&rkt->lock));
if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
goto bail;
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 43390217a026..19f1e6c45fb6 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -49,6 +49,18 @@ static int reply(struct ib_smp *smp)
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
+static int reply_failure(struct ib_smp *smp)
+{
+ /*
+ * The verbs framework will handle the directed/LID route
+ * packet changes.
+ */
+ smp->method = IB_MGMT_METHOD_GET_RESP;
+ if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
+ smp->status |= IB_SMP_DIRECTION;
+ return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY;
+}
+
static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
{
struct ib_mad_send_buf *send_buf;
@@ -90,14 +102,10 @@ static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len)
if (!ibp->sm_ah) {
if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) {
struct ib_ah *ah;
- struct ib_ah_attr attr;
- memset(&attr, 0, sizeof attr);
- attr.dlid = ibp->sm_lid;
- attr.port_num = ppd_from_ibp(ibp)->port;
- ah = ib_create_ah(ibp->qp0->ibqp.pd, &attr);
+ ah = qib_create_qp0_ah(ibp, ibp->sm_lid);
if (IS_ERR(ah))
- ret = -EINVAL;
+ ret = PTR_ERR(ah);
else {
send_buf->ah = ah;
ibp->sm_ah = to_iah(ah);
@@ -2051,6 +2059,298 @@ bail:
return ret;
}
+static int cc_get_classportinfo(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev)
+{
+ struct ib_cc_classportinfo_attr *p =
+ (struct ib_cc_classportinfo_attr *)ccp->mgmt_data;
+
+ memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
+
+ p->base_version = 1;
+ p->class_version = 1;
+ p->cap_mask = 0;
+
+ /*
+ * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec.
+ */
+ p->resp_time_value = 18;
+
+ return reply((struct ib_smp *) ccp);
+}
+
+static int cc_get_congestion_info(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_cc_info_attr *p =
+ (struct ib_cc_info_attr *)ccp->mgmt_data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
+
+ p->congestion_info = 0;
+ p->control_table_cap = ppd->cc_max_table_entries;
+
+ return reply((struct ib_smp *) ccp);
+}
+
+static int cc_get_congestion_setting(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev, u8 port)
+{
+ int i;
+ struct ib_cc_congestion_setting_attr *p =
+ (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_cc_congestion_entry_shadow *entries;
+
+ memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
+
+ spin_lock(&ppd->cc_shadow_lock);
+
+ entries = ppd->congestion_entries_shadow->entries;
+ p->port_control = cpu_to_be16(
+ ppd->congestion_entries_shadow->port_control);
+ p->control_map = cpu_to_be16(
+ ppd->congestion_entries_shadow->control_map);
+ for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
+ p->entries[i].ccti_increase = entries[i].ccti_increase;
+ p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer);
+ p->entries[i].trigger_threshold = entries[i].trigger_threshold;
+ p->entries[i].ccti_min = entries[i].ccti_min;
+ }
+
+ spin_unlock(&ppd->cc_shadow_lock);
+
+ return reply((struct ib_smp *) ccp);
+}
+
+static int cc_get_congestion_control_table(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_cc_table_attr *p =
+ (struct ib_cc_table_attr *)ccp->mgmt_data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
+ u32 max_cct_block;
+ u32 cct_entry;
+ struct ib_cc_table_entry_shadow *entries;
+ int i;
+
+ /* Is the table index more than what is supported? */
+ if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
+ goto bail;
+
+ memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data));
+
+ spin_lock(&ppd->cc_shadow_lock);
+
+ max_cct_block =
+ (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES;
+ max_cct_block = max_cct_block ? max_cct_block - 1 : 0;
+
+ if (cct_block_index > max_cct_block) {
+ spin_unlock(&ppd->cc_shadow_lock);
+ goto bail;
+ }
+
+ ccp->attr_mod = cpu_to_be32(cct_block_index);
+
+ cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1);
+
+ cct_entry--;
+
+ p->ccti_limit = cpu_to_be16(cct_entry);
+
+ entries = &ppd->ccti_entries_shadow->
+ entries[IB_CCT_ENTRIES * cct_block_index];
+ cct_entry %= IB_CCT_ENTRIES;
+
+ for (i = 0; i <= cct_entry; i++)
+ p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry);
+
+ spin_unlock(&ppd->cc_shadow_lock);
+
+ return reply((struct ib_smp *) ccp);
+
+bail:
+ return reply_failure((struct ib_smp *) ccp);
+}
+
+static int cc_set_congestion_setting(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_cc_congestion_setting_attr *p =
+ (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ int i;
+
+ ppd->cc_sl_control_map = be16_to_cpu(p->control_map);
+
+ for (i = 0; i < IB_CC_CCS_ENTRIES; i++) {
+ ppd->congestion_entries[i].ccti_increase =
+ p->entries[i].ccti_increase;
+
+ ppd->congestion_entries[i].ccti_timer =
+ be16_to_cpu(p->entries[i].ccti_timer);
+
+ ppd->congestion_entries[i].trigger_threshold =
+ p->entries[i].trigger_threshold;
+
+ ppd->congestion_entries[i].ccti_min =
+ p->entries[i].ccti_min;
+ }
+
+ return reply((struct ib_smp *) ccp);
+}
+
+static int cc_set_congestion_control_table(struct ib_cc_mad *ccp,
+ struct ib_device *ibdev, u8 port)
+{
+ struct ib_cc_table_attr *p =
+ (struct ib_cc_table_attr *)ccp->mgmt_data;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 cct_block_index = be32_to_cpu(ccp->attr_mod);
+ u32 cct_entry;
+ struct ib_cc_table_entry_shadow *entries;
+ int i;
+
+ /* Is the table index more than what is supported? */
+ if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1)
+ goto bail;
+
+ /* If this packet is the first in the sequence then
+ * zero the total table entry count.
+ */
+ if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES)
+ ppd->total_cct_entry = 0;
+
+ cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES;
+
+ /* ccti_limit is 0 to 63 */
+ ppd->total_cct_entry += (cct_entry + 1);
+
+ if (ppd->total_cct_entry > ppd->cc_supported_table_entries)
+ goto bail;
+
+ ppd->ccti_limit = be16_to_cpu(p->ccti_limit);
+
+ entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index);
+
+ for (i = 0; i <= cct_entry; i++)
+ entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry);
+
+ spin_lock(&ppd->cc_shadow_lock);
+
+ ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1;
+ memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries,
+ (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry)));
+
+ ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED;
+ ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map;
+ memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries,
+ IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry));
+
+ spin_unlock(&ppd->cc_shadow_lock);
+
+ return reply((struct ib_smp *) ccp);
+
+bail:
+ return reply_failure((struct ib_smp *) ccp);
+}
+
+static int check_cc_key(struct qib_ibport *ibp,
+ struct ib_cc_mad *ccp, int mad_flags)
+{
+ return 0;
+}
+
+static int process_cc(struct ib_device *ibdev, int mad_flags,
+ u8 port, struct ib_mad *in_mad,
+ struct ib_mad *out_mad)
+{
+ struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ int ret;
+
+ *out_mad = *in_mad;
+
+ if (ccp->class_version != 2) {
+ ccp->status |= IB_SMP_UNSUP_VERSION;
+ ret = reply((struct ib_smp *)ccp);
+ goto bail;
+ }
+
+ ret = check_cc_key(ibp, ccp, mad_flags);
+ if (ret)
+ goto bail;
+
+ switch (ccp->method) {
+ case IB_MGMT_METHOD_GET:
+ switch (ccp->attr_id) {
+ case IB_CC_ATTR_CLASSPORTINFO:
+ ret = cc_get_classportinfo(ccp, ibdev);
+ goto bail;
+
+ case IB_CC_ATTR_CONGESTION_INFO:
+ ret = cc_get_congestion_info(ccp, ibdev, port);
+ goto bail;
+
+ case IB_CC_ATTR_CA_CONGESTION_SETTING:
+ ret = cc_get_congestion_setting(ccp, ibdev, port);
+ goto bail;
+
+ case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
+ ret = cc_get_congestion_control_table(ccp, ibdev, port);
+ goto bail;
+
+ /* FALLTHROUGH */
+ default:
+ ccp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) ccp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_SET:
+ switch (ccp->attr_id) {
+ case IB_CC_ATTR_CA_CONGESTION_SETTING:
+ ret = cc_set_congestion_setting(ccp, ibdev, port);
+ goto bail;
+
+ case IB_CC_ATTR_CONGESTION_CONTROL_TABLE:
+ ret = cc_set_congestion_control_table(ccp, ibdev, port);
+ goto bail;
+
+ /* FALLTHROUGH */
+ default:
+ ccp->status |= IB_SMP_UNSUP_METH_ATTR;
+ ret = reply((struct ib_smp *) ccp);
+ goto bail;
+ }
+
+ case IB_MGMT_METHOD_GET_RESP:
+ /*
+ * The ib_mad module will call us to process responses
+ * before checking for other consumers.
+ * Just tell the caller to process it normally.
+ */
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+
+ case IB_MGMT_METHOD_TRAP:
+ default:
+ ccp->status |= IB_SMP_UNSUP_METHOD;
+ ret = reply((struct ib_smp *) ccp);
+ }
+
+bail:
+ return ret;
+}
+
/**
* qib_process_mad - process an incoming MAD packet
* @ibdev: the infiniband device this packet came in on
@@ -2075,6 +2375,8 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
int ret;
+ struct qib_ibport *ibp = to_iport(ibdev, port);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
switch (in_mad->mad_hdr.mgmt_class) {
case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
@@ -2086,6 +2388,15 @@ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port,
ret = process_perf(ibdev, port, in_mad, out_mad);
goto bail;
+ case IB_MGMT_CLASS_CONG_MGMT:
+ if (!ppd->congestion_entries_shadow ||
+ !qib_cc_table_size) {
+ ret = IB_MAD_RESULT_SUCCESS;
+ goto bail;
+ }
+ ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad);
+ goto bail;
+
default:
ret = IB_MAD_RESULT_SUCCESS;
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index ecc416cdbaaa..57bd3fa016bc 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -31,6 +31,8 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#ifndef _QIB_MAD_H
+#define _QIB_MAD_H
#include <rdma/ib_pma.h>
@@ -223,6 +225,198 @@ struct ib_pma_portcounters_cong {
#define IB_PMA_SEL_CONG_ROUTING 0x08
/*
+ * Congestion control class attributes
+ */
+#define IB_CC_ATTR_CLASSPORTINFO cpu_to_be16(0x0001)
+#define IB_CC_ATTR_NOTICE cpu_to_be16(0x0002)
+#define IB_CC_ATTR_CONGESTION_INFO cpu_to_be16(0x0011)
+#define IB_CC_ATTR_CONGESTION_KEY_INFO cpu_to_be16(0x0012)
+#define IB_CC_ATTR_CONGESTION_LOG cpu_to_be16(0x0013)
+#define IB_CC_ATTR_SWITCH_CONGESTION_SETTING cpu_to_be16(0x0014)
+#define IB_CC_ATTR_SWITCH_PORT_CONGESTION_SETTING cpu_to_be16(0x0015)
+#define IB_CC_ATTR_CA_CONGESTION_SETTING cpu_to_be16(0x0016)
+#define IB_CC_ATTR_CONGESTION_CONTROL_TABLE cpu_to_be16(0x0017)
+#define IB_CC_ATTR_TIME_STAMP cpu_to_be16(0x0018)
+
+/* generalizations for threshold values */
+#define IB_CC_THRESHOLD_NONE 0x0
+#define IB_CC_THRESHOLD_MIN 0x1
+#define IB_CC_THRESHOLD_MAX 0xf
+
+/* CCA MAD header constants */
+#define IB_CC_MAD_LOGDATA_LEN 32
+#define IB_CC_MAD_MGMTDATA_LEN 192
+
+struct ib_cc_mad {
+ u8 base_version;
+ u8 mgmt_class;
+ u8 class_version;
+ u8 method;
+ __be16 status;
+ __be16 class_specific;
+ __be64 tid;
+ __be16 attr_id;
+ __be16 resv;
+ __be32 attr_mod;
+ __be64 cckey;
+
+ /* For CongestionLog attribute only */
+ u8 log_data[IB_CC_MAD_LOGDATA_LEN];
+
+ u8 mgmt_data[IB_CC_MAD_MGMTDATA_LEN];
+} __packed;
+
+/*
+ * Congestion Control class portinfo capability mask bits
+ */
+#define IB_CC_CPI_CM_TRAP_GEN cpu_to_be16(1 << 0)
+#define IB_CC_CPI_CM_GET_SET_NOTICE cpu_to_be16(1 << 1)
+#define IB_CC_CPI_CM_CAP2 cpu_to_be16(1 << 2)
+#define IB_CC_CPI_CM_ENHANCEDPORT0_CC cpu_to_be16(1 << 8)
+
+struct ib_cc_classportinfo_attr {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ u8 reserved[3];
+ u8 resp_time_value; /* only lower 5 bits */
+ union ib_gid redirect_gid;
+ __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 redirect_lid;
+ __be16 redirect_pkey;
+ __be32 redirect_qp; /* only lower 24 bits */
+ __be32 redirect_qkey;
+ union ib_gid trap_gid;
+ __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
+ __be16 trap_lid;
+ __be16 trap_pkey;
+ __be32 trap_hl_qp; /* 8, 24 bits respectively */
+ __be32 trap_qkey;
+} __packed;
+
+/* Congestion control traps */
+#define IB_CC_TRAP_KEY_VIOLATION 0x0000
+
+struct ib_cc_trap_key_violation_attr {
+ __be16 source_lid;
+ u8 method;
+ u8 reserved1;
+ __be16 attrib_id;
+ __be32 attrib_mod;
+ __be32 qp;
+ __be64 cckey;
+ u8 sgid[16];
+ u8 padding[24];
+} __packed;
+
+/* Congestion info flags */
+#define IB_CC_CI_FLAGS_CREDIT_STARVATION 0x1
+#define IB_CC_TABLE_CAP_DEFAULT 31
+
+struct ib_cc_info_attr {
+ __be16 congestion_info;
+ u8 control_table_cap; /* Multiple of 64 entry unit CCTs */
+} __packed;
+
+struct ib_cc_key_info_attr {
+ __be64 cckey;
+ u8 protect;
+ __be16 lease_period;
+ __be16 violations;
+} __packed;
+
+#define IB_CC_CL_CA_LOGEVENTS_LEN 208
+
+struct ib_cc_log_attr {
+ u8 log_type;
+ u8 congestion_flags;
+ __be16 threshold_event_counter;
+ __be16 threshold_congestion_event_map;
+ __be16 current_time_stamp;
+ u8 log_events[IB_CC_CL_CA_LOGEVENTS_LEN];
+} __packed;
+
+#define IB_CC_CLEC_SERVICETYPE_RC 0x0
+#define IB_CC_CLEC_SERVICETYPE_UC 0x1
+#define IB_CC_CLEC_SERVICETYPE_RD 0x2
+#define IB_CC_CLEC_SERVICETYPE_UD 0x3
+
+struct ib_cc_log_event {
+ u8 local_qp_cn_entry;
+ u8 remote_qp_number_cn_entry[3];
+ u8 sl_cn_entry:4;
+ u8 service_type_cn_entry:4;
+ __be32 remote_lid_cn_entry;
+ __be32 timestamp_cn_entry;
+} __packed;
+
+/* Sixteen congestion entries */
+#define IB_CC_CCS_ENTRIES 16
+
+/* Port control flags */
+#define IB_CC_CCS_PC_SL_BASED 0x01
+
+struct ib_cc_congestion_entry {
+ u8 ccti_increase;
+ __be16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct ib_cc_congestion_entry_shadow {
+ u8 ccti_increase;
+ u16 ccti_timer;
+ u8 trigger_threshold;
+ u8 ccti_min; /* min CCTI for cc table */
+} __packed;
+
+struct ib_cc_congestion_setting_attr {
+ __be16 port_control;
+ __be16 control_map;
+ struct ib_cc_congestion_entry entries[IB_CC_CCS_ENTRIES];
+} __packed;
+
+struct ib_cc_congestion_setting_attr_shadow {
+ u16 port_control;
+ u16 control_map;
+ struct ib_cc_congestion_entry_shadow entries[IB_CC_CCS_ENTRIES];
+} __packed;
+
+#define IB_CC_TABLE_ENTRY_INCREASE_DEFAULT 1
+#define IB_CC_TABLE_ENTRY_TIMER_DEFAULT 1
+
+/* 64 Congestion Control table entries in a single MAD */
+#define IB_CCT_ENTRIES 64
+#define IB_CCT_MIN_ENTRIES (IB_CCT_ENTRIES * 2)
+
+struct ib_cc_table_entry {
+ __be16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_entry_shadow {
+ u16 entry; /* shift:2, multiplier:14 */
+};
+
+struct ib_cc_table_attr {
+ __be16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+struct ib_cc_table_attr_shadow {
+ u16 ccti_limit; /* max CCTI for cc table */
+ struct ib_cc_table_entry_shadow ccti_entries[IB_CCT_ENTRIES];
+} __packed;
+
+#define CC_TABLE_SHADOW_MAX \
+ (IB_CC_TABLE_CAP_DEFAULT * IB_CCT_ENTRIES)
+
+struct cc_table_shadow {
+ u16 ccti_last_entry;
+ struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
+} __packed;
+
+#endif /* _QIB_MAD_H */
+/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
* We support 5 counters which only count the mandatory quantities.
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c
index 08944e2ee334..e6687ded8210 100644
--- a/drivers/infiniband/hw/qib/qib_mr.c
+++ b/drivers/infiniband/hw/qib/qib_mr.c
@@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
return container_of(ibfmr, struct qib_fmr, ibfmr);
}
+static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
+ int count)
+{
+ int m, i = 0;
+ int rval = 0;
+
+ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
+ for (; i < m; i++) {
+ mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+ if (!mr->map[i])
+ goto bail;
+ }
+ mr->mapsz = m;
+ init_completion(&mr->comp);
+ /* count returning the ptr to user */
+ atomic_set(&mr->refcount, 1);
+ mr->pd = pd;
+ mr->max_segs = count;
+out:
+ return rval;
+bail:
+ while (i)
+ kfree(mr->map[--i]);
+ rval = -ENOMEM;
+ goto out;
+}
+
+static void deinit_qib_mregion(struct qib_mregion *mr)
+{
+ int i = mr->mapsz;
+
+ mr->mapsz = 0;
+ while (i)
+ kfree(mr->map[--i]);
+}
+
+
/**
* qib_get_dma_mr - get a DMA memory region
* @pd: protection domain for this memory region
@@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr)
*/
struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
{
- struct qib_ibdev *dev = to_idev(pd->device);
- struct qib_mr *mr;
+ struct qib_mr *mr = NULL;
struct ib_mr *ret;
- unsigned long flags;
+ int rval;
if (to_ipd(pd)->user) {
ret = ERR_PTR(-EPERM);
@@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
goto bail;
}
- mr->mr.access_flags = acc;
- atomic_set(&mr->mr.refcount, 0);
+ rval = init_qib_mregion(&mr->mr, pd, 0);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail;
+ }
+
- spin_lock_irqsave(&dev->lk_table.lock, flags);
- if (!dev->dma_mr)
- dev->dma_mr = &mr->mr;
- spin_unlock_irqrestore(&dev->lk_table.lock, flags);
+ rval = qib_alloc_lkey(&mr->mr, 1);
+ if (rval) {
+ ret = ERR_PTR(rval);
+ goto bail_mregion;
+ }
+ mr->mr.access_flags = acc;
ret = &mr->ibmr;
+done:
+ return ret;
+bail_mregion:
+ deinit_qib_mregion(&mr->mr);
bail:
- return ret;
+ kfree(mr);
+ goto done;
}
-static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table)
+static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
{
struct qib_mr *mr;
- int m, i = 0;
+ int rval = -ENOMEM;
+ int m;
/* Allocate struct plus pointers to first level page tables. */
m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
- mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+ mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
if (!mr)
- goto done;
-
- /* Allocate first level page tables. */
- for (; i < m; i++) {
- mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
- if (!mr->mr.map[i])
- goto bail;
- }
- mr->mr.mapsz = m;
- mr->mr.page_shift = 0;
- mr->mr.max_segs = count;
+ goto bail;
+ rval = init_qib_mregion(&mr->mr, pd, count);
+ if (rval)
+ goto bail;
/*
* ib_reg_phys_mr() will initialize mr->ibmr except for
* lkey and rkey.
*/
- if (!qib_alloc_lkey(lk_table, &mr->mr))
- goto bail;
+ rval = qib_alloc_lkey(&mr->mr, 0);
+ if (rval)
+ goto bail_mregion;
mr->ibmr.lkey = mr->mr.lkey;
mr->ibmr.rkey = mr->mr.lkey;
+done:
+ return mr;
- atomic_set(&mr->mr.refcount, 0);
- goto done;
-
+bail_mregion:
+ deinit_qib_mregion(&mr->mr);
bail:
- while (i)
- kfree(mr->mr.map[--i]);
kfree(mr);
- mr = NULL;
-
-done:
- return mr;
+ mr = ERR_PTR(rval);
+ goto done;
}
/**
@@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd,
int n, m, i;
struct ib_mr *ret;
- mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table);
- if (mr == NULL) {
- ret = ERR_PTR(-ENOMEM);
+ mr = alloc_mr(num_phys_buf, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
goto bail;
}
- mr->mr.pd = pd;
mr->mr.user_base = *iova_start;
mr->mr.iova = *iova_start;
- mr->mr.length = 0;
- mr->mr.offset = 0;
mr->mr.access_flags = acc;
- mr->umem = NULL;
m = 0;
n = 0;
@@ -186,7 +221,6 @@ bail:
* @pd: protection domain for this memory region
* @start: starting userspace address
* @length: length of region to register
- * @virt_addr: virtual address to use (from HCA's point of view)
* @mr_access_flags: access flags for this memory region
* @udata: unused by the QLogic_IB driver
*
@@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
list_for_each_entry(chunk, &umem->chunk_list, list)
n += chunk->nents;
- mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
- if (!mr) {
- ret = ERR_PTR(-ENOMEM);
+ mr = alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
+ ret = (struct ib_mr *)mr;
ib_umem_release(umem);
goto bail;
}
- mr->mr.pd = pd;
mr->mr.user_base = start;
mr->mr.iova = virt_addr;
mr->mr.length = length;
@@ -271,21 +304,25 @@ bail:
int qib_dereg_mr(struct ib_mr *ibmr)
{
struct qib_mr *mr = to_imr(ibmr);
- struct qib_ibdev *dev = to_idev(ibmr->device);
- int ret;
- int i;
-
- ret = qib_free_lkey(dev, &mr->mr);
- if (ret)
- return ret;
-
- i = mr->mr.mapsz;
- while (i)
- kfree(mr->mr.map[--i]);
+ int ret = 0;
+ unsigned long timeout;
+
+ qib_free_lkey(&mr->mr);
+
+ qib_put_mr(&mr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&mr->mr.comp,
+ 5 * HZ);
+ if (!timeout) {
+ qib_get_mr(&mr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ deinit_qib_mregion(&mr->mr);
if (mr->umem)
ib_umem_release(mr->umem);
kfree(mr);
- return 0;
+out:
+ return ret;
}
/*
@@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
{
struct qib_mr *mr;
- mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table);
- if (mr == NULL)
- return ERR_PTR(-ENOMEM);
-
- mr->mr.pd = pd;
- mr->mr.user_base = 0;
- mr->mr.iova = 0;
- mr->mr.length = 0;
- mr->mr.offset = 0;
- mr->mr.access_flags = 0;
- mr->umem = NULL;
+ mr = alloc_mr(max_page_list_len, pd);
+ if (IS_ERR(mr))
+ return (struct ib_mr *)mr;
return &mr->ibmr;
}
@@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
if (size > PAGE_SIZE)
return ERR_PTR(-EINVAL);
- pl = kmalloc(sizeof *pl, GFP_KERNEL);
+ pl = kzalloc(sizeof *pl, GFP_KERNEL);
if (!pl)
return ERR_PTR(-ENOMEM);
- pl->page_list = kmalloc(size, GFP_KERNEL);
+ pl->page_list = kzalloc(size, GFP_KERNEL);
if (!pl->page_list)
goto err_free;
@@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
struct ib_fmr_attr *fmr_attr)
{
struct qib_fmr *fmr;
- int m, i = 0;
+ int m;
struct ib_fmr *ret;
+ int rval = -ENOMEM;
/* Allocate struct plus pointers to first level page tables. */
m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
- fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+ fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
if (!fmr)
goto bail;
- /* Allocate first level page tables. */
- for (; i < m; i++) {
- fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
- GFP_KERNEL);
- if (!fmr->mr.map[i])
- goto bail;
- }
- fmr->mr.mapsz = m;
+ rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages);
+ if (rval)
+ goto bail;
/*
* ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
* rkey.
*/
- if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
- goto bail;
+ rval = qib_alloc_lkey(&fmr->mr, 0);
+ if (rval)
+ goto bail_mregion;
fmr->ibfmr.rkey = fmr->mr.lkey;
fmr->ibfmr.lkey = fmr->mr.lkey;
/*
* Resources are allocated but no valid mapping (RKEY can't be
* used).
*/
- fmr->mr.pd = pd;
- fmr->mr.user_base = 0;
- fmr->mr.iova = 0;
- fmr->mr.length = 0;
- fmr->mr.offset = 0;
fmr->mr.access_flags = mr_access_flags;
fmr->mr.max_segs = fmr_attr->max_pages;
fmr->mr.page_shift = fmr_attr->page_shift;
- atomic_set(&fmr->mr.refcount, 0);
ret = &fmr->ibfmr;
- goto done;
+done:
+ return ret;
+bail_mregion:
+ deinit_qib_mregion(&fmr->mr);
bail:
- while (i)
- kfree(fmr->mr.map[--i]);
kfree(fmr);
- ret = ERR_PTR(-ENOMEM);
-
-done:
- return ret;
+ ret = ERR_PTR(rval);
+ goto done;
}
/**
@@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
u32 ps;
int ret;
- if (atomic_read(&fmr->mr.refcount))
+ i = atomic_read(&fmr->mr.refcount);
+ if (i > 2)
return -EBUSY;
if (list_len > fmr->mr.max_segs) {
@@ -490,16 +510,27 @@ int qib_unmap_fmr(struct list_head *fmr_list)
int qib_dealloc_fmr(struct ib_fmr *ibfmr)
{
struct qib_fmr *fmr = to_ifmr(ibfmr);
- int ret;
- int i;
+ int ret = 0;
+ unsigned long timeout;
+
+ qib_free_lkey(&fmr->mr);
+ qib_put_mr(&fmr->mr); /* will set completion if last */
+ timeout = wait_for_completion_timeout(&fmr->mr.comp,
+ 5 * HZ);
+ if (!timeout) {
+ qib_get_mr(&fmr->mr);
+ ret = -EBUSY;
+ goto out;
+ }
+ deinit_qib_mregion(&fmr->mr);
+ kfree(fmr);
+out:
+ return ret;
+}
- ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr);
- if (ret)
- return ret;
+void mr_rcu_callback(struct rcu_head *list)
+{
+ struct qib_mregion *mr = container_of(list, struct qib_mregion, list);
- i = fmr->mr.mapsz;
- while (i)
- kfree(fmr->mr.map[--i]);
- kfree(fmr);
- return 0;
+ complete(&mr->comp);
}
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 790646ef5106..062c301ebf53 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -224,8 +224,9 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
}
do_intx:
if (ret) {
- qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
- "falling back to INTx\n", tabsize, ret);
+ qib_dev_err(dd,
+ "pci_enable_msix %d vectors failed: %d, falling back to INTx\n",
+ tabsize, ret);
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
@@ -251,8 +252,9 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos)
ret = pci_enable_msi(pdev);
if (ret)
- qib_dev_err(dd, "pci_enable_msi failed: %d, "
- "interrupts may not work\n", ret);
+ qib_dev_err(dd,
+ "pci_enable_msi failed: %d, interrupts may not work\n",
+ ret);
/* continue even if it fails, we may still be OK... */
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
@@ -358,8 +360,8 @@ int qib_reinit_intr(struct qib_devdata *dd)
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (!pos) {
- qib_dev_err(dd, "Can't find MSI capability, "
- "can't restore MSI settings\n");
+ qib_dev_err(dd,
+ "Can't find MSI capability, can't restore MSI settings\n");
ret = 0;
/* nothing special for MSIx, just MSI */
goto bail;
@@ -471,8 +473,8 @@ void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
r = pci_enable_device(dd->pcidev);
if (r)
- qib_dev_err(dd, "pci_enable_device failed after "
- "reset: %d\n", r);
+ qib_dev_err(dd,
+ "pci_enable_device failed after reset: %d\n", r);
}
/* code to adjust PCIe capabilities. */
@@ -717,15 +719,16 @@ qib_pci_mmio_enabled(struct pci_dev *pdev)
if (words == ~0ULL)
ret = PCI_ERS_RESULT_NEED_RESET;
}
- qib_devinfo(pdev, "QIB mmio_enabled function called, "
- "read wordscntr %Lx, returning %d\n", words, ret);
+ qib_devinfo(pdev,
+ "QIB mmio_enabled function called, read wordscntr %Lx, returning %d\n",
+ words, ret);
return ret;
}
static pci_ers_result_t
qib_pci_slot_reset(struct pci_dev *pdev)
{
- qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
+ qib_devinfo(pdev, "QIB slot_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER;
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 1ce56b51ab1a..4850d03870c2 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -250,23 +250,33 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
spin_lock_irqsave(&dev->qpt_lock, flags);
- if (ibp->qp0 == qp) {
+ if (rcu_dereference_protected(ibp->qp0,
+ lockdep_is_held(&dev->qpt_lock)) == qp) {
atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp0, NULL);
- } else if (ibp->qp1 == qp) {
+ } else if (rcu_dereference_protected(ibp->qp1,
+ lockdep_is_held(&dev->qpt_lock)) == qp) {
atomic_dec(&qp->refcount);
rcu_assign_pointer(ibp->qp1, NULL);
} else {
- struct qib_qp *q, **qpp;
+ struct qib_qp *q;
+ struct qib_qp __rcu **qpp;
qpp = &dev->qp_table[n];
- for (; (q = *qpp) != NULL; qpp = &q->next)
+ q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock));
+ for (; q; qpp = &q->next) {
if (q == qp) {
atomic_dec(&qp->refcount);
- rcu_assign_pointer(*qpp, qp->next);
- qp->next = NULL;
+ *qpp = qp->next;
+ rcu_assign_pointer(qp->next, NULL);
+ q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock));
break;
}
+ q = rcu_dereference_protected(*qpp,
+ lockdep_is_held(&dev->qpt_lock));
+ }
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
@@ -302,10 +312,12 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
spin_lock_irqsave(&dev->qpt_lock, flags);
for (n = 0; n < dev->qp_table_size; n++) {
- qp = dev->qp_table[n];
+ qp = rcu_dereference_protected(dev->qp_table[n],
+ lockdep_is_held(&dev->qpt_lock));
rcu_assign_pointer(dev->qp_table[n], NULL);
- for (; qp; qp = qp->next)
+ for (; qp; qp = rcu_dereference_protected(qp->next,
+ lockdep_is_held(&dev->qpt_lock)))
qp_inuse++;
}
spin_unlock_irqrestore(&dev->qpt_lock, flags);
@@ -337,7 +349,8 @@ struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
unsigned n = qpn_hash(dev, qpn);
rcu_read_lock();
- for (qp = dev->qp_table[n]; rcu_dereference(qp); qp = qp->next)
+ for (qp = rcu_dereference(dev->qp_table[n]); qp;
+ qp = rcu_dereference(qp->next))
if (qp->ibqp.qp_num == qpn)
break;
}
@@ -406,18 +419,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
unsigned n;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- while (qp->s_rdma_read_sge.num_sge) {
- atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
- if (--qp->s_rdma_read_sge.num_sge)
- qp->s_rdma_read_sge.sge =
- *qp->s_rdma_read_sge.sg_list++;
- }
+ qib_put_ss(&qp->s_rdma_read_sge);
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
if (clr_sends) {
while (qp->s_last != qp->s_head) {
@@ -427,7 +431,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i];
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
@@ -437,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
qp->s_last = 0;
}
if (qp->s_rdma_mr) {
- atomic_dec(&qp->s_rdma_mr->refcount);
+ qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
}
@@ -450,7 +454,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
e->rdma_sge.mr) {
- atomic_dec(&e->rdma_sge.mr->refcount);
+ qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
}
@@ -495,7 +499,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
if (!(qp->s_flags & QIB_S_BUSY)) {
qp->s_hdrwords = 0;
if (qp->s_rdma_mr) {
- atomic_dec(&qp->s_rdma_mr->refcount);
+ qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
if (qp->s_tx) {
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index b641416148eb..3ab341320ead 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
case OP(RDMA_READ_RESPONSE_ONLY):
e = &qp->s_ack_queue[qp->s_tail_ack_queue];
if (e->rdma_sge.mr) {
- atomic_dec(&e->rdma_sge.mr->refcount);
+ qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
/* FALLTHROUGH */
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
/* Copy SGE state in case we need to resend */
qp->s_rdma_mr = e->rdma_sge.mr;
if (qp->s_rdma_mr)
- atomic_inc(&qp->s_rdma_mr->refcount);
+ qib_get_mr(qp->s_rdma_mr);
qp->s_ack_rdma_sge.sge = e->rdma_sge;
qp->s_ack_rdma_sge.num_sge = 1;
qp->s_cur_sge = &qp->s_ack_rdma_sge;
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
qp->s_cur_sge = &qp->s_ack_rdma_sge;
qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
if (qp->s_rdma_mr)
- atomic_inc(&qp->s_rdma_mr->refcount);
+ qib_get_mr(qp->s_rdma_mr);
len = qp->s_ack_rdma_sge.sge.sge_length;
if (len > pmtu)
len = pmtu;
@@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i];
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i];
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
/* Post a send completion queue entry if requested. */
if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
@@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
if (unlikely(offset + len != e->rdma_sge.sge_length))
goto unlock_done;
if (e->rdma_sge.mr) {
- atomic_dec(&e->rdma_sge.mr->refcount);
+ qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
if (len != 0) {
@@ -2024,11 +2024,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
qp->r_msn++;
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
break;
@@ -2116,7 +2112,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- atomic_dec(&e->rdma_sge.mr->refcount);
+ qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
reth = &ohdr->u.rc.reth;
@@ -2188,7 +2184,7 @@ send_last:
}
e = &qp->s_ack_queue[qp->r_head_ack_queue];
if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
- atomic_dec(&e->rdma_sge.mr->refcount);
+ qib_put_mr(e->rdma_sge.mr);
e->rdma_sge.mr = NULL;
}
ateth = &ohdr->u.atomic_eth;
@@ -2210,7 +2206,7 @@ send_last:
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
be64_to_cpu(ateth->compare_data),
sdata);
- atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qib_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
e->opcode = opcode;
e->sent = 0;
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index c0ee7e095d81..357b6cfcd46c 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -110,7 +110,7 @@ bad_lkey:
while (j) {
struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
ss->num_sge = 0;
memset(&wc, 0, sizeof(wc));
@@ -501,7 +501,7 @@ again:
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap);
- atomic_dec(&qp->r_sge.sge.mr->refcount);
+ qib_put_mr(qp->r_sge.sge.mr);
qp->r_sge.num_sge = 0;
goto send_comp;
@@ -525,7 +525,7 @@ again:
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (!release)
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
if (--sqp->s_sge.num_sge)
*sge = *sqp->s_sge.sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
@@ -542,11 +542,7 @@ again:
sqp->s_len -= len;
}
if (release)
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto send_comp;
@@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
for (i = 0; i < wqe->wr.num_sge; i++) {
struct qib_sge *sge = &wqe->sg_list[i];
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
if (qp->ibqp.qp_type == IB_QPT_UD ||
qp->ibqp.qp_type == IB_QPT_SMI ||
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index ac065dd6b693..a322d5171a2c 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -342,15 +342,17 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0, 0);
if (ret < 0)
- qib_dev_err(dd, "Failed checking TRIMDONE, chn %d"
- " (%s)\n", chn, where);
+ qib_dev_err(dd,
+ "Failed checking TRIMDONE, chn %d (%s)\n",
+ chn, where);
if (!(ret & 0x10)) {
int probe;
baduns |= (1 << chn);
- qib_dev_err(dd, "TRIMDONE cleared on chn %d (%02X)."
- " (%s)\n", chn, ret, where);
+ qib_dev_err(dd,
+ "TRIMDONE cleared on chn %d (%02X). (%s)\n",
+ chn, ret, where);
probe = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_PGUDP(0), 0, 0);
qib_dev_err(dd, "probe is %d (%02X)\n",
@@ -375,8 +377,8 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES,
IB_CTRL2(chn), 0x10, 0x10);
if (ret < 0)
- qib_dev_err(dd, "Failed re-setting "
- "TRIMDONE, chn %d (%s)\n",
+ qib_dev_err(dd,
+ "Failed re-setting TRIMDONE, chn %d (%s)\n",
chn, where);
}
}
@@ -1144,10 +1146,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- qib_dev_err(dd, "pre-read failed: elt %d,"
- " addr 0x%X, chnl %d\n",
- (sloc & 0xF),
- (sloc >> 9) & 0x3f, chnl);
+ qib_dev_err(dd,
+ "pre-read failed: elt %d, addr 0x%X, chnl %d\n",
+ (sloc & 0xF),
+ (sloc >> 9) & 0x3f, chnl);
return ret;
}
val = (ret & ~mask) | (val & mask);
@@ -1157,9 +1159,9 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- qib_dev_err(dd, "Global WR failed: elt %d,"
- " addr 0x%X, val %02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, val);
+ qib_dev_err(dd,
+ "Global WR failed: elt %d, addr 0x%X, val %02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, val);
}
return ret;
}
@@ -1173,11 +1175,10 @@ static int ibsd_mod_allchnls(struct qib_devdata *dd, int loc, int val,
if (ret < 0) {
int sloc = loc >> EPB_ADDR_SHF;
- qib_dev_err(dd, "Write failed: elt %d,"
- " addr 0x%X, chnl %d, val 0x%02X,"
- " mask 0x%02X\n",
- (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
- val & 0xFF, mask & 0xFF);
+ qib_dev_err(dd,
+ "Write failed: elt %d, addr 0x%X, chnl %d, val 0x%02X, mask 0x%02X\n",
+ (sloc & 0xF), (sloc >> 9) & 0x3f, chnl,
+ val & 0xFF, mask & 0xFF);
break;
}
}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 12a9604310d7..3fc514431212 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2007 - 2012 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -276,8 +277,8 @@ static int alloc_sdma(struct qib_pportdata *ppd)
GFP_KERNEL);
if (!ppd->sdma_descq) {
- qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
- "FIFO memory\n");
+ qib_dev_err(ppd->dd,
+ "failed to allocate SendDMA descriptor FIFO memory\n");
goto bail;
}
@@ -285,8 +286,8 @@ static int alloc_sdma(struct qib_pportdata *ppd)
ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
if (!ppd->sdma_head_dma) {
- qib_dev_err(ppd->dd, "failed to allocate SendDMA "
- "head memory\n");
+ qib_dev_err(ppd->dd,
+ "failed to allocate SendDMA head memory\n");
goto cleanup_descq;
}
ppd->sdma_head_dma[0] = 0;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index dd9cd49d0979..034cc821de5c 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -33,41 +34,7 @@
#include <linux/ctype.h>
#include "qib.h"
-
-/**
- * qib_parse_ushort - parse an unsigned short value in an arbitrary base
- * @str: the string containing the number
- * @valp: where to put the result
- *
- * Returns the number of bytes consumed, or negative value on error.
- */
-static int qib_parse_ushort(const char *str, unsigned short *valp)
-{
- unsigned long val;
- char *end;
- int ret;
-
- if (!isdigit(str[0])) {
- ret = -EINVAL;
- goto bail;
- }
-
- val = simple_strtoul(str, &end, 0);
-
- if (val > 0xffff) {
- ret = -EINVAL;
- goto bail;
- }
-
- *valp = val;
-
- ret = end + 1 - str;
- if (ret == 0)
- ret = -EINVAL;
-
-bail:
- return ret;
-}
+#include "qib_mad.h"
/* start of per-port functions */
/*
@@ -90,7 +57,11 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
int ret;
u16 val;
- ret = qib_parse_ushort(buf, &val);
+ ret = kstrtou16(buf, 0, &val);
+ if (ret) {
+ qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+ return ret;
+ }
/*
* Set the "intentional" heartbeat enable per either of
@@ -99,10 +70,7 @@ static ssize_t store_hrtbt_enb(struct qib_pportdata *ppd, const char *buf,
* because entering loopback mode overrides it and automatically
* disables heartbeat.
*/
- if (ret >= 0)
- ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
- if (ret < 0)
- qib_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
+ ret = dd->f_set_ib_cfg(ppd, QIB_IB_CFG_HRTBT, val);
return ret < 0 ? ret : count;
}
@@ -126,12 +94,14 @@ static ssize_t store_led_override(struct qib_pportdata *ppd, const char *buf,
int ret;
u16 val;
- ret = qib_parse_ushort(buf, &val);
- if (ret > 0)
- qib_set_led_override(ppd, val);
- else
+ ret = kstrtou16(buf, 0, &val);
+ if (ret) {
qib_dev_err(dd, "attempt to set invalid LED override\n");
- return ret < 0 ? ret : count;
+ return ret;
+ }
+
+ qib_set_led_override(ppd, val);
+ return count;
}
static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
@@ -231,6 +201,98 @@ static struct attribute *port_default_attributes[] = {
NULL
};
+/*
+ * Start of per-port congestion control structures and support code
+ */
+
+/*
+ * Congestion control table size followed by table entries
+ */
+static ssize_t read_cc_table_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int ret;
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_cc_kobj);
+
+ if (!qib_cc_table_size || !ppd->ccti_entries_shadow)
+ return -EINVAL;
+
+ ret = ppd->total_cct_entry * sizeof(struct ib_cc_table_entry_shadow)
+ + sizeof(__be16);
+
+ if (pos > ret)
+ return -EINVAL;
+
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ spin_lock(&ppd->cc_shadow_lock);
+ memcpy(buf, ppd->ccti_entries_shadow, count);
+ spin_unlock(&ppd->cc_shadow_lock);
+
+ return count;
+}
+
+static void qib_port_release(struct kobject *kobj)
+{
+ /* nothing to do since memory is freed by qib_free_devdata() */
+}
+
+static struct kobj_type qib_port_cc_ktype = {
+ .release = qib_port_release,
+};
+
+static struct bin_attribute cc_table_bin_attr = {
+ .attr = {.name = "cc_table_bin", .mode = 0444},
+ .read = read_cc_table_bin,
+ .size = PAGE_SIZE,
+};
+
+/*
+ * Congestion settings: port control, control map and an array of 16
+ * entries for the congestion entries - increase, timer, event log
+ * trigger threshold and the minimum injection rate delay.
+ */
+static ssize_t read_cc_setting_bin(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t pos, size_t count)
+{
+ int ret;
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_cc_kobj);
+
+ if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
+ return -EINVAL;
+
+ ret = sizeof(struct ib_cc_congestion_setting_attr_shadow);
+
+ if (pos > ret)
+ return -EINVAL;
+ if (count > ret - pos)
+ count = ret - pos;
+
+ if (!count)
+ return count;
+
+ spin_lock(&ppd->cc_shadow_lock);
+ memcpy(buf, ppd->congestion_entries_shadow, count);
+ spin_unlock(&ppd->cc_shadow_lock);
+
+ return count;
+}
+
+static struct bin_attribute cc_setting_bin_attr = {
+ .attr = {.name = "cc_settings_bin", .mode = 0444},
+ .read = read_cc_setting_bin,
+ .size = PAGE_SIZE,
+};
+
+
static ssize_t qib_portattr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
@@ -253,10 +315,6 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
return pattr->store(ppd, buf, len);
}
-static void qib_port_release(struct kobject *kobj)
-{
- /* nothing to do since memory is freed by qib_free_devdata() */
-}
static const struct sysfs_ops qib_port_ops = {
.show = qib_portattr_show,
@@ -411,12 +469,12 @@ static ssize_t diagc_attr_store(struct kobject *kobj, struct attribute *attr,
struct qib_pportdata *ppd =
container_of(kobj, struct qib_pportdata, diagc_kobj);
struct qib_ibport *qibp = &ppd->ibport_data;
- char *endp;
- long val = simple_strtol(buf, &endp, 0);
-
- if (val < 0 || endp == buf)
- return -EINVAL;
+ u32 val;
+ int ret;
+ ret = kstrtou32(buf, 0, &val);
+ if (ret)
+ return ret;
*(u32 *)((char *) qibp + dattr->counter) = val;
return size;
}
@@ -649,8 +707,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
int ret;
if (!port_num || port_num > dd->num_pports) {
- qib_dev_err(dd, "Skipping infiniband class with "
- "invalid port %u\n", port_num);
+ qib_dev_err(dd,
+ "Skipping infiniband class with invalid port %u\n",
+ port_num);
ret = -ENODEV;
goto bail;
}
@@ -659,8 +718,9 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
ret = kobject_init_and_add(&ppd->pport_kobj, &qib_port_ktype, kobj,
"linkcontrol");
if (ret) {
- qib_dev_err(dd, "Skipping linkcontrol sysfs info, "
- "(err %d) port %u\n", ret, port_num);
+ qib_dev_err(dd,
+ "Skipping linkcontrol sysfs info, (err %d) port %u\n",
+ ret, port_num);
goto bail;
}
kobject_uevent(&ppd->pport_kobj, KOBJ_ADD);
@@ -668,26 +728,70 @@ int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
ret = kobject_init_and_add(&ppd->sl2vl_kobj, &qib_sl2vl_ktype, kobj,
"sl2vl");
if (ret) {
- qib_dev_err(dd, "Skipping sl2vl sysfs info, "
- "(err %d) port %u\n", ret, port_num);
- goto bail_sl;
+ qib_dev_err(dd,
+ "Skipping sl2vl sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_link;
}
kobject_uevent(&ppd->sl2vl_kobj, KOBJ_ADD);
ret = kobject_init_and_add(&ppd->diagc_kobj, &qib_diagc_ktype, kobj,
"diag_counters");
if (ret) {
- qib_dev_err(dd, "Skipping diag_counters sysfs info, "
- "(err %d) port %u\n", ret, port_num);
- goto bail_diagc;
+ qib_dev_err(dd,
+ "Skipping diag_counters sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_sl;
}
kobject_uevent(&ppd->diagc_kobj, KOBJ_ADD);
+ if (!qib_cc_table_size || !ppd->congestion_entries_shadow)
+ return 0;
+
+ ret = kobject_init_and_add(&ppd->pport_cc_kobj, &qib_port_cc_ktype,
+ kobj, "CCMgtA");
+ if (ret) {
+ qib_dev_err(dd,
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_diagc;
+ }
+
+ kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
+ &cc_setting_bin_attr);
+ if (ret) {
+ qib_dev_err(dd,
+ "Skipping Congestion Control setting sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_cc;
+ }
+
+ ret = sysfs_create_bin_file(&ppd->pport_cc_kobj,
+ &cc_table_bin_attr);
+ if (ret) {
+ qib_dev_err(dd,
+ "Skipping Congestion Control table sysfs info, (err %d) port %u\n",
+ ret, port_num);
+ goto bail_cc_entry_bin;
+ }
+
+ qib_devinfo(dd->pcidev,
+ "IB%u: Congestion Control Agent enabled for port %d\n",
+ dd->unit, port_num);
+
return 0;
+bail_cc_entry_bin:
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj, &cc_setting_bin_attr);
+bail_cc:
+ kobject_put(&ppd->pport_cc_kobj);
bail_diagc:
- kobject_put(&ppd->sl2vl_kobj);
+ kobject_put(&ppd->diagc_kobj);
bail_sl:
+ kobject_put(&ppd->sl2vl_kobj);
+bail_link:
kobject_put(&ppd->pport_kobj);
bail:
return ret;
@@ -720,7 +824,15 @@ void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
for (i = 0; i < dd->num_pports; i++) {
ppd = &dd->pport[i];
- kobject_put(&ppd->pport_kobj);
+ if (qib_cc_table_size &&
+ ppd->congestion_entries_shadow) {
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj,
+ &cc_setting_bin_attr);
+ sysfs_remove_bin_file(&ppd->pport_cc_kobj,
+ &cc_table_bin_attr);
+ kobject_put(&ppd->pport_cc_kobj);
+ }
kobject_put(&ppd->sl2vl_kobj);
+ kobject_put(&ppd->pport_kobj);
}
}
diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c
index ddde72e11edb..647f7beb1b0a 100644
--- a/drivers/infiniband/hw/qib/qib_twsi.c
+++ b/drivers/infiniband/hw/qib/qib_twsi.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -449,8 +450,9 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
goto failed_write;
ret = qib_twsi_wr(dd, addr, 0);
if (ret) {
- qib_dev_err(dd, "Failed to write interface"
- " write addr %02X\n", addr);
+ qib_dev_err(dd,
+ "Failed to write interface write addr %02X\n",
+ addr);
goto failed_write;
}
}
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index ce7387ff5d91..aa3a8035bb68 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -281,11 +281,7 @@ inv:
set_bit(QIB_R_REWIND_SGE, &qp->r_aflags);
qp->r_sge.num_sge = 0;
} else
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
qp->r_state = OP(SEND_LAST);
switch (opcode) {
case OP(SEND_FIRST):
@@ -403,14 +399,9 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
-last_imm:
qib_copy_sge(&qp->r_sge, data, tlen, 0);
- while (qp->s_rdma_read_sge.num_sge) {
- atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount);
- if (--qp->s_rdma_read_sge.num_sge)
- qp->s_rdma_read_sge.sge =
- *qp->s_rdma_read_sge.sg_list++;
- }
+ qib_put_ss(&qp->s_rdma_read_sge);
+last_imm:
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.qp = &qp->ibqp;
@@ -493,13 +484,7 @@ rdma_last_imm:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
- while (qp->s_rdma_read_sge.num_sge) {
- atomic_dec(&qp->s_rdma_read_sge.sge.mr->
- refcount);
- if (--qp->s_rdma_read_sge.num_sge)
- qp->s_rdma_read_sge.sge =
- *qp->s_rdma_read_sge.sg_list++;
- }
+ qib_put_ss(&qp->s_rdma_read_sge);
else {
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
@@ -509,6 +494,8 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ qib_put_ss(&qp->r_sge);
goto last_imm;
case OP(RDMA_WRITE_LAST):
@@ -524,11 +511,7 @@ rdma_last:
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
qib_copy_sge(&qp->r_sge, data, tlen, 1);
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
break;
default:
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index a468bf2d4465..d6c7fe7f88d5 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
}
length -= len;
}
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
goto bail_unlock;
wc.wr_id = qp->r_wr_id;
@@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
} else
qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
- while (qp->r_sge.num_sge) {
- atomic_dec(&qp->r_sge.sge.mr->refcount);
- if (--qp->r_sge.num_sge)
- qp->r_sge.sge = *qp->r_sge.sg_list++;
- }
+ qib_put_ss(&qp->r_sge);
if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
return;
wc.wr_id = qp->r_wr_id;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 7b6c3bffa9d9..fc9b205c2412 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
@@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release)
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (release)
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
@@ -333,7 +333,8 @@ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length)
* @qp: the QP to post on
* @wr: the work request to send
*/
-static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr)
+static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr,
+ int *scheduled)
{
struct qib_swqe *wqe;
u32 next;
@@ -435,11 +436,17 @@ bail_inval_free:
while (j) {
struct qib_sge *sge = &wqe->sg_list[--j];
- atomic_dec(&sge->mr->refcount);
+ qib_put_mr(sge->mr);
}
bail_inval:
ret = -EINVAL;
bail:
+ if (!ret && !wr->next &&
+ !qib_sdma_empty(
+ dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)) {
+ qib_schedule_send(qp);
+ *scheduled = 1;
+ }
spin_unlock_irqrestore(&qp->s_lock, flags);
return ret;
}
@@ -457,9 +464,10 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
{
struct qib_qp *qp = to_iqp(ibqp);
int err = 0;
+ int scheduled = 0;
for (; wr; wr = wr->next) {
- err = qib_post_one_send(qp, wr);
+ err = qib_post_one_send(qp, wr, &scheduled);
if (err) {
*bad_wr = wr;
goto bail;
@@ -467,7 +475,8 @@ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
/* Try to do the send work in the caller's context. */
- qib_do_send(&qp->s_work);
+ if (!scheduled)
+ qib_do_send(&qp->s_work);
bail:
return err;
@@ -978,7 +987,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
if (tx->mr) {
- atomic_dec(&tx->mr->refcount);
+ qib_put_mr(tx->mr);
tx->mr = NULL;
}
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) {
@@ -1336,7 +1345,7 @@ done:
}
qib_sendbuf_done(dd, pbufn);
if (qp->s_rdma_mr) {
- atomic_dec(&qp->s_rdma_mr->refcount);
+ qib_put_mr(qp->s_rdma_mr);
qp->s_rdma_mr = NULL;
}
if (qp->s_wqe) {
@@ -1845,6 +1854,23 @@ bail:
return ret;
}
+struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
+{
+ struct ib_ah_attr attr;
+ struct ib_ah *ah = ERR_PTR(-EINVAL);
+ struct qib_qp *qp0;
+
+ memset(&attr, 0, sizeof attr);
+ attr.dlid = dlid;
+ attr.port_num = ppd_from_ibp(ibp)->port;
+ rcu_read_lock();
+ qp0 = rcu_dereference(ibp->qp0);
+ if (qp0)
+ ah = ib_create_ah(qp0->ibqp.pd, &attr);
+ rcu_read_unlock();
+ return ah;
+}
+
/**
* qib_destroy_ah - destroy an address handle
* @ibah: the AH to destroy
@@ -2060,13 +2086,15 @@ int qib_register_ib_device(struct qib_devdata *dd)
spin_lock_init(&dev->lk_table.lock);
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- dev->lk_table.table = (struct qib_mregion **)
+ dev->lk_table.table = (struct qib_mregion __rcu **)
__get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
}
- memset(dev->lk_table.table, 0, lk_tab_size);
+ RCU_INIT_POINTER(dev->dma_mr, NULL);
+ for (i = 0; i < dev->lk_table.max; i++)
+ RCU_INIT_POINTER(dev->lk_table.table[i], NULL);
INIT_LIST_HEAD(&dev->pending_mmaps);
spin_lock_init(&dev->pending_lock);
dev->mmap_offset = PAGE_SIZE;
@@ -2289,3 +2317,17 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
get_order(lk_tab_size));
kfree(dev->qp_table);
}
+
+/*
+ * This must be called with s_lock held.
+ */
+void qib_schedule_send(struct qib_qp *qp)
+{
+ if (qib_send_ok(qp)) {
+ struct qib_ibport *ibp =
+ to_iport(qp->ibqp.device, qp->port_num);
+ struct qib_pportdata *ppd = ppd_from_ibp(ibp);
+
+ queue_work(ppd->qib_wq, &qp->s_work);
+ }
+}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 487606024659..aff8b2c17886 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
- * All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -41,6 +41,7 @@
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
+#include <linux/completion.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
@@ -302,6 +303,9 @@ struct qib_mregion {
u32 max_segs; /* number of qib_segs in all the arrays */
u32 mapsz; /* size of the map array */
u8 page_shift; /* 0 - non unform/non powerof2 sizes */
+ u8 lkey_published; /* in global table */
+ struct completion comp; /* complete when refcount goes to zero */
+ struct rcu_head list;
atomic_t refcount;
struct qib_segarray *map[0]; /* the segments */
};
@@ -416,7 +420,7 @@ struct qib_qp {
/* read mostly fields above and below */
struct ib_ah_attr remote_ah_attr;
struct ib_ah_attr alt_ah_attr;
- struct qib_qp *next; /* link list for QPN hash table */
+ struct qib_qp __rcu *next; /* link list for QPN hash table */
struct qib_swqe *s_wq; /* send work queue */
struct qib_mmap_info *ip;
struct qib_ib_header *s_hdr; /* next packet header to send */
@@ -646,7 +650,7 @@ struct qib_lkey_table {
u32 next; /* next unused index (speeds search) */
u32 gen; /* generation count */
u32 max; /* size of the table */
- struct qib_mregion **table;
+ struct qib_mregion __rcu **table;
};
struct qib_opcode_stats {
@@ -655,8 +659,8 @@ struct qib_opcode_stats {
};
struct qib_ibport {
- struct qib_qp *qp0;
- struct qib_qp *qp1;
+ struct qib_qp __rcu *qp0;
+ struct qib_qp __rcu *qp1;
struct ib_mad_agent *send_agent; /* agent for SMI (traps) */
struct qib_ah *sm_ah;
struct qib_ah *smi_ah;
@@ -723,12 +727,13 @@ struct qib_ibport {
struct qib_opcode_stats opstats[128];
};
+
struct qib_ibdev {
struct ib_device ibdev;
struct list_head pending_mmaps;
spinlock_t mmap_offset_lock; /* protect mmap_offset */
u32 mmap_offset;
- struct qib_mregion *dma_mr;
+ struct qib_mregion __rcu *dma_mr;
/* QP numbers are shared by all IB ports */
struct qib_qpn_table qpn_table;
@@ -739,7 +744,7 @@ struct qib_ibdev {
struct list_head memwait; /* list for wait kernel memory */
struct list_head txreq_free;
struct timer_list mem_timer;
- struct qib_qp **qp_table;
+ struct qib_qp __rcu **qp_table;
struct qib_pio_header *pio_hdrs;
dma_addr_t pio_hdrs_phys;
/* list of QPs waiting for RNR timer */
@@ -832,11 +837,7 @@ extern struct workqueue_struct *qib_cq_wq;
/*
* This must be called with s_lock held.
*/
-static inline void qib_schedule_send(struct qib_qp *qp)
-{
- if (qib_send_ok(qp))
- queue_work(ib_wq, &qp->s_work);
-}
+void qib_schedule_send(struct qib_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -933,6 +934,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
+
void qib_rc_rnr_retry(unsigned long arg);
void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr);
@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
int has_grh, void *data, u32 tlen, struct qib_qp *qp);
-int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr);
+int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
-int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr);
+void qib_free_lkey(struct qib_mregion *mr);
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
struct qib_sge *isge, struct ib_sge *sge, int acc);
@@ -1014,6 +1017,29 @@ int qib_unmap_fmr(struct list_head *fmr_list);
int qib_dealloc_fmr(struct ib_fmr *ibfmr);
+static inline void qib_get_mr(struct qib_mregion *mr)
+{
+ atomic_inc(&mr->refcount);
+}
+
+void mr_rcu_callback(struct rcu_head *list);
+
+static inline void qib_put_mr(struct qib_mregion *mr)
+{
+ if (unlikely(atomic_dec_and_test(&mr->refcount)))
+ call_rcu(&mr->list, mr_rcu_callback);
+}
+
+static inline void qib_put_ss(struct qib_sge_state *ss)
+{
+ while (ss->num_sge) {
+ qib_put_mr(ss->sge.mr);
+ if (--ss->num_sge)
+ ss->sge = *ss->sg_list++;
+ }
+}
+
+
void qib_release_mmap_info(struct kref *ref);
struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 561b8bca4060..1d7281c5a02e 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
+ * Copyright (c) 2012 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -102,10 +103,10 @@ int qib_enable_wc(struct qib_devdata *dd)
u64 atmp;
atmp = pioaddr & ~(piolen - 1);
if (atmp < addr || (atmp + piolen) > (addr + len)) {
- qib_dev_err(dd, "No way to align address/size "
- "(%llx/%llx), no WC mtrr\n",
- (unsigned long long) atmp,
- (unsigned long long) piolen << 1);
+ qib_dev_err(dd,
+ "No way to align address/size (%llx/%llx), no WC mtrr\n",
+ (unsigned long long) atmp,
+ (unsigned long long) piolen << 1);
ret = -ENODEV;
} else {
pioaddr = atmp;
@@ -120,8 +121,7 @@ int qib_enable_wc(struct qib_devdata *dd)
if (cookie < 0) {
{
qib_devinfo(dd->pcidev,
- "mtrr_add() WC for PIO bufs "
- "failed (%d)\n",
+ "mtrr_add() WC for PIO bufs failed (%d)\n",
cookie);
ret = -EINVAL;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 014504d8e43c..6d66ab0dd92a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1376,7 +1376,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif
@@ -1397,7 +1397,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
int e = skb_queue_empty(&priv->cm.skb_queue);
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5c1bc995e560..f10221f40803 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -123,7 +123,7 @@ static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
skb_frag_size_set(frag, size);
skb->data_len += size;
- skb->truesize += size;
+ skb->truesize += PAGE_SIZE;
} else
skb_put(skb, length);
@@ -156,14 +156,18 @@ static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int buf_size;
+ int tailroom;
u64 *mapping;
- if (ipoib_ud_need_sg(priv->max_ib_mtu))
+ if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
buf_size = IPOIB_UD_HEAD_SIZE;
- else
+ tailroom = 128; /* reserve some tailroom for IP/TCP headers */
+ } else {
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+ tailroom = 0;
+ }
- skb = dev_alloc_skb(buf_size + 4);
+ skb = dev_alloc_skb(buf_size + tailroom + 4);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 3974c290b667..bbee4b2d7a13 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
rcu_read_lock();
if (likely(skb_dst(skb))) {
- n = dst_get_neighbour_noref(skb_dst(skb));
+ n = dst_neigh_lookup_skb(skb_dst(skb), skb);
if (!n) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
@@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
unlock:
+ if (n)
+ neigh_release(n);
rcu_read_unlock();
return NETDEV_TX_OK;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 20ebc6fd1bb9..7cecb16d3d48 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ struct dst_entry *dst = skb_dst(skb);
struct ipoib_mcast *mcast;
+ struct neighbour *n;
unsigned long flags;
+ n = NULL;
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
+
spin_lock_irqsave(&priv->lock, flags);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
@@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
out:
if (mcast && mcast->ah) {
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
-
- rcu_read_lock();
- if (dst)
- n = dst_get_neighbour_noref(dst);
- if (n && !*to_ipoib_neigh(n)) {
- struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
- skb->dev);
-
- if (neigh) {
- kref_get(&mcast->ah->ref);
- neigh->ah = mcast->ah;
- list_add_tail(&neigh->list, &mcast->neigh_list);
+ if (n) {
+ if (!*to_ipoib_neigh(n)) {
+ struct ipoib_neigh *neigh;
+
+ neigh = ipoib_neigh_alloc(n, skb->dev);
+ if (neigh) {
+ kref_get(&mcast->ah->ref);
+ neigh->ah = mcast->ah;
+ list_add_tail(&neigh->list,
+ &mcast->neigh_list);
+ }
}
+ neigh_release(n);
}
- rcu_read_unlock();
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
return;
}
unlock:
+ if (n)
+ neigh_release(n);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 5f6b7f63cdef..7a0ce8d42887 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -1377,10 +1377,14 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
break;
case SRPT_STATE_NEED_DATA:
/* DMA_TO_DEVICE (write) - RDMA read error. */
+
+ /* XXX(hch): this is a horrible layering violation.. */
spin_lock_irqsave(&ioctx->cmd.t_state_lock, flags);
ioctx->cmd.transport_state |= CMD_T_LUN_STOP;
+ ioctx->cmd.transport_state &= ~CMD_T_ACTIVE;
spin_unlock_irqrestore(&ioctx->cmd.t_state_lock, flags);
- transport_generic_handle_data(&ioctx->cmd);
+
+ complete(&ioctx->cmd.transport_lun_stop_comp);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
@@ -1463,9 +1467,10 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
/**
* srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
*
- * Note: transport_generic_handle_data() is asynchronous so unmapping the
- * data that has been transferred via IB RDMA must be postponed until the
- * check_stop_free() callback.
+ * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
+ * the data that has been transferred via IB RDMA had to be postponed until the
+ * check_stop_free() callback. None of this is nessecary anymore and needs to
+ * be cleaned up.
*/
static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx,
@@ -1477,7 +1482,7 @@ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
if (opcode == SRPT_RDMA_READ_LAST) {
if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
SRPT_STATE_DATA_IN))
- transport_generic_handle_data(&ioctx->cmd);
+ target_execute_cmd(&ioctx->cmd);
else
printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
__LINE__, srpt_get_cmd_state(ioctx));
diff --git a/drivers/input/joystick/as5011.c b/drivers/input/joystick/as5011.c
index 57d19d4e0a2d..c96653b58867 100644
--- a/drivers/input/joystick/as5011.c
+++ b/drivers/input/joystick/as5011.c
@@ -282,7 +282,8 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->button_irq,
NULL, as5011_button_interrupt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"as5011_button", as5011);
if (error < 0) {
dev_err(&client->dev,
@@ -296,7 +297,7 @@ static int __devinit as5011_probe(struct i2c_client *client,
error = request_threaded_irq(as5011->axis_irq, NULL,
as5011_axis_interrupt,
- plat_data->axis_irqflags,
+ plat_data->axis_irqflags | IRQF_ONESHOT,
"as5011_joystick", as5011);
if (error) {
dev_err(&client->dev,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index ee16fb67b7ae..83811e45d633 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -142,6 +142,7 @@ static const struct xpad_device {
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
{ 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+ { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
@@ -164,6 +165,7 @@ static const struct xpad_device {
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -238,12 +240,14 @@ static struct usb_device_id xpad_table [] = {
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
+ { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
{ }
};
diff --git a/drivers/input/keyboard/lm8333.c b/drivers/input/keyboard/lm8333.c
index ca168a6679de..081fd9effa8c 100644
--- a/drivers/input/keyboard/lm8333.c
+++ b/drivers/input/keyboard/lm8333.c
@@ -91,7 +91,7 @@ static void lm8333_key_handler(struct lm8333 *lm8333)
return;
}
- for (i = 0; keys[i] && i < LM8333_FIFO_TRANSFER_SIZE; i++) {
+ for (i = 0; i < LM8333_FIFO_TRANSFER_SIZE && keys[i]; i++) {
pressed = keys[i] & 0x80;
code = keys[i] & 0x7f;
diff --git a/drivers/input/keyboard/mcs_touchkey.c b/drivers/input/keyboard/mcs_touchkey.c
index 64a0ca4c92f3..0d77f6c84950 100644
--- a/drivers/input/keyboard/mcs_touchkey.c
+++ b/drivers/input/keyboard/mcs_touchkey.c
@@ -178,7 +178,8 @@ static int __devinit mcs_touchkey_probe(struct i2c_client *client,
}
error = request_threaded_irq(client->irq, NULL, mcs_touchkey_interrupt,
- IRQF_TRIGGER_FALLING, client->dev.driver->name, data);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/mpr121_touchkey.c b/drivers/input/keyboard/mpr121_touchkey.c
index caa218a51b5a..7613f1cac951 100644
--- a/drivers/input/keyboard/mpr121_touchkey.c
+++ b/drivers/input/keyboard/mpr121_touchkey.c
@@ -248,7 +248,7 @@ static int __devinit mpr_touchkey_probe(struct i2c_client *client,
error = request_threaded_irq(client->irq, NULL,
mpr_touchkey_interrupt,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->dev.driver->name, mpr121);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c
index 0b7b2f891752..ca68f2992d72 100644
--- a/drivers/input/keyboard/qt1070.c
+++ b/drivers/input/keyboard/qt1070.c
@@ -201,7 +201,8 @@ static int __devinit qt1070_probe(struct i2c_client *client,
msleep(QT1070_RESET_TIME);
err = request_threaded_irq(client->irq, NULL, qt1070_interrupt,
- IRQF_TRIGGER_NONE, client->dev.driver->name, data);
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (err) {
dev_err(&client->dev, "fail to request irq\n");
goto err_free_mem;
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 3afea3f89718..c355cdde8d22 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -278,7 +278,8 @@ static int __devinit tca6416_keypad_probe(struct i2c_client *client,
error = request_threaded_irq(chip->irqnum, NULL,
tca6416_keys_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
"tca6416-keypad", chip);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
index 5f87b28b3192..893869b29ed9 100644
--- a/drivers/input/keyboard/tca8418_keypad.c
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -360,7 +360,7 @@ static int __devinit tca8418_keypad_probe(struct i2c_client *client,
client->irq = gpio_to_irq(client->irq);
error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, keypad_data);
if (error) {
dev_dbg(&client->dev,
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index 4ffe64d53107..2c1c9ed1bd9f 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -492,7 +492,7 @@ static int tegra_kbc_start(struct tegra_kbc *kbc)
unsigned int debounce_cnt;
u32 val = 0;
- clk_enable(kbc->clk);
+ clk_prepare_enable(kbc->clk);
/* Reset the KBC controller to clear all previous status.*/
tegra_periph_reset_assert(kbc->clk);
@@ -556,7 +556,7 @@ static void tegra_kbc_stop(struct tegra_kbc *kbc)
disable_irq(kbc->irq);
del_timer_sync(&kbc->timer);
- clk_disable(kbc->clk);
+ clk_disable_unprepare(kbc->clk);
}
static int tegra_kbc_open(struct input_dev *dev)
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index a4a445fb7020..4c34f21fbe2d 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -227,15 +227,15 @@ static int __devinit keypad_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(kp->irq_press, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_press, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad press key irq\n");
goto error_irq_press;
}
- error = request_threaded_irq(kp->irq_release, NULL, keypad_irq, 0,
- dev_name(dev), kp);
+ error = request_threaded_irq(kp->irq_release, NULL, keypad_irq,
+ IRQF_ONESHOT, dev_name(dev), kp);
if (error < 0) {
dev_err(kp->dev, "Could not allocate keypad release key irq\n");
goto error_irq_release;
diff --git a/drivers/input/misc/ad714x.c b/drivers/input/misc/ad714x.c
index 0ac75bbad4d6..2e5d5e1de647 100644
--- a/drivers/input/misc/ad714x.c
+++ b/drivers/input/misc/ad714x.c
@@ -972,6 +972,7 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
struct ad714x_platform_data *plat_data = dev->platform_data;
struct ad714x_chip *ad714x;
void *drv_mem;
+ unsigned long irqflags;
struct ad714x_button_drv *bt_drv;
struct ad714x_slider_drv *sd_drv;
@@ -1162,10 +1163,11 @@ struct ad714x_chip *ad714x_probe(struct device *dev, u16 bus_type, int irq,
alloc_idx++;
}
+ irqflags = plat_data->irqflags ?: IRQF_TRIGGER_FALLING;
+ irqflags |= IRQF_ONESHOT;
+
error = request_threaded_irq(ad714x->irq, NULL, ad714x_interrupt_thread,
- plat_data->irqflags ?
- plat_data->irqflags : IRQF_TRIGGER_FALLING,
- "ad714x_captouch", ad714x);
+ irqflags, "ad714x_captouch", ad714x);
if (error) {
dev_err(dev, "can't allocate irq %d\n", ad714x->irq);
goto err_unreg_dev;
diff --git a/drivers/input/misc/cma3000_d0x.c b/drivers/input/misc/cma3000_d0x.c
index a3735a01e9fd..df9b756594f8 100644
--- a/drivers/input/misc/cma3000_d0x.c
+++ b/drivers/input/misc/cma3000_d0x.c
@@ -58,7 +58,7 @@
/*
* Bit weights in mg for bit 0, other bits need
- * multipy factor 2^n. Eight bit is the sign bit.
+ * multiply factor 2^n. Eight bit is the sign bit.
*/
#define BIT_TO_2G 18
#define BIT_TO_8G 71
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 35083c6836c3..c1313d8535c3 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -213,7 +213,8 @@ static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
/* REVISIT: flush the event queue? */
status = request_threaded_irq(keys->irq, NULL, dm355evm_keys_irq,
- IRQF_TRIGGER_FALLING, dev_name(&pdev->dev), keys);
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&pdev->dev), keys);
if (status < 0)
goto fail2;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 2cf681d98c0d..d528c23e194f 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -79,6 +79,10 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI 0x0252
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO 0x0253
#define USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS 0x0254
+/* MacbookPro10,1 (unibody, June 2012) */
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI 0x0262
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_ISO 0x0263
+#define USB_DEVICE_ID_APPLE_WELLSPRING7_JIS 0x0264
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -128,6 +132,10 @@ static const struct usb_device_id bcm5974_table[] = {
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
+ /* MacbookPro10,1 */
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
+ BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
/* Terminating entry */
{}
};
@@ -354,6 +362,18 @@ static const struct bcm5974_config bcm5974_config_table[] = {
{ DIM_X, DIM_X / SN_COORD, -4620, 5140 },
{ DIM_Y, DIM_Y / SN_COORD, -150, 6600 }
},
+ {
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_ISO,
+ USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
+ HAS_INTEGRATED_BUTTON,
+ 0x84, sizeof(struct bt_data),
+ 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS,
+ { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 },
+ { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 },
+ { DIM_X, DIM_X / SN_COORD, -4750, 5280 },
+ { DIM_Y, DIM_Y / SN_COORD, -150, 6730 }
+ },
{}
};
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index cad5602d3ce4..8b31473a81fe 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -216,7 +216,7 @@ static void wacom_retrieve_report_data(struct usb_interface *intf,
rep_data[0] = 12;
result = wacom_get_report(intf, WAC_HID_FEATURE_REPORT,
- rep_data[0], &rep_data, 2,
+ rep_data[0], rep_data, 2,
WAC_MSG_RETRIES);
if (result >= 0 && rep_data[1] > 2)
@@ -401,7 +401,9 @@ static int wacom_parse_hid(struct usb_interface *intf,
break;
case HID_USAGE_CONTACTMAX:
- wacom_retrieve_report_data(intf, features);
+ /* leave touch_max as is if predefined */
+ if (!features->touch_max)
+ wacom_retrieve_report_data(intf, features);
i++;
break;
}
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index e2482b40da51..bd4eb4277697 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -597,7 +597,7 @@ struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned int irq,
AD7879_TMR(ts->pen_down_acc_interval);
err = request_threaded_irq(ts->irq, NULL, ad7879_irq,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
dev_name(dev), ts);
if (err) {
dev_err(dev, "irq %d busy?\n", ts->irq);
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 42e645062c20..25fd0561a17d 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1149,7 +1149,8 @@ static int __devinit mxt_probe(struct i2c_client *client,
goto err_free_object;
error = request_threaded_irq(client->irq, NULL, mxt_interrupt,
- pdata->irqflags, client->dev.driver->name, data);
+ pdata->irqflags | IRQF_ONESHOT,
+ client->dev.driver->name, data);
if (error) {
dev_err(&client->dev, "Failed to register interrupt\n");
goto err_free_object;
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index f2d03c06c2da..5c487d23f11c 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -509,7 +509,8 @@ static int __devinit bu21013_probe(struct i2c_client *client,
input_set_drvdata(in_dev, bu21013_data);
error = request_threaded_irq(pdata->irq, NULL, bu21013_gpio_irq,
- IRQF_TRIGGER_FALLING | IRQF_SHARED,
+ IRQF_TRIGGER_FALLING | IRQF_SHARED |
+ IRQF_ONESHOT,
DRIVER_TP, bu21013_data);
if (error) {
dev_err(&client->dev, "request irq %d failed\n", pdata->irq);
diff --git a/drivers/input/touchscreen/cy8ctmg110_ts.c b/drivers/input/touchscreen/cy8ctmg110_ts.c
index 237753ad1031..464f1bf4b61d 100644
--- a/drivers/input/touchscreen/cy8ctmg110_ts.c
+++ b/drivers/input/touchscreen/cy8ctmg110_ts.c
@@ -251,7 +251,8 @@ static int __devinit cy8ctmg110_probe(struct i2c_client *client,
}
err = request_threaded_irq(client->irq, NULL, cy8ctmg110_irq_thread,
- IRQF_TRIGGER_RISING, "touch_reset_key", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "touch_reset_key", ts);
if (err < 0) {
dev_err(&client->dev,
"irq %d busy? error %d\n", client->irq, err);
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 3cd7a837f82b..cf299377fc49 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -620,7 +620,7 @@ static int __devinit mrstouch_probe(struct platform_device *pdev)
MRST_PRESSURE_MIN, MRST_PRESSURE_MAX, 0, 0);
err = request_threaded_irq(tsdev->irq, NULL, mrstouch_pendet_irq,
- 0, "mrstouch", tsdev);
+ IRQF_ONESHOT, "mrstouch", tsdev);
if (err) {
dev_err(tsdev->dev, "unable to allocate irq\n");
goto err_free_mem;
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
index 72f6ba3a4709..953b4c105cad 100644
--- a/drivers/input/touchscreen/pixcir_i2c_ts.c
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -165,7 +165,7 @@ static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
input_set_drvdata(input, tsdata);
error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
- IRQF_TRIGGER_FALLING,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
client->name, tsdata);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 7e7488097359..368d2c6cf780 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -297,7 +297,7 @@ static int __devinit tsc_probe(struct platform_device *pdev)
goto error_clk;
}
- error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, 0,
+ error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
dev_name(dev), ts);
if (error < 0) {
dev_err(ts->dev, "Could not allocate ts irq\n");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index b6adeaee9cc5..5ce3fa8ce646 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -650,7 +650,8 @@ static int __devinit tsc2005_probe(struct spi_device *spi)
tsc2005_stop_scan(ts);
error = request_threaded_irq(spi->irq, NULL, tsc2005_irq_thread,
- IRQF_TRIGGER_RISING, "tsc2005", ts);
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "tsc2005", ts);
if (error) {
dev_err(&spi->dev, "Failed to request irq, err: %d\n", error);
goto err_free_mem;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 340893727538..9f69b561f5db 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -13,6 +13,10 @@ menuconfig IOMMU_SUPPORT
if IOMMU_SUPPORT
+config OF_IOMMU
+ def_bool y
+ depends on OF
+
# MSM IOMMU support
config MSM_IOMMU
bool "MSM IOMMU Support"
@@ -154,7 +158,7 @@ config TEGRA_IOMMU_GART
config TEGRA_IOMMU_SMMU
bool "Tegra SMMU IOMMU Support"
- depends on ARCH_TEGRA_3x_SOC
+ depends on ARCH_TEGRA_3x_SOC && TEGRA_AHB
select IOMMU_API
help
Enables support for remapping discontiguous physical memory
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 76e54ef796de..14a4d5fc94fa 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
+obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a2e418cba0ff..6d1cbdfc9b2a 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
+static struct dma_map_ops amd_iommu_dma_ops;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -254,11 +256,21 @@ static bool check_device(struct device *dev)
return true;
}
+static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
+{
+ pci_dev_put(*from);
+ *from = to;
+}
+
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
+
static int iommu_init_device(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
+ struct iommu_group *group;
u16 alias;
+ int ret;
if (dev->archdata.iommu)
return 0;
@@ -279,8 +291,43 @@ static int iommu_init_device(struct device *dev)
return -ENOTSUPP;
}
dev_data->alias_data = alias_data;
+
+ dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
+ } else
+ dma_pdev = pci_dev_get(pdev);
+
+ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+
+ if (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
+ swap_pci_ref(&dma_pdev,
+ pci_get_slot(dma_pdev->bus,
+ PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
+ 0)));
+
+ while (!pci_is_root_bus(dma_pdev->bus)) {
+ if (pci_acs_path_enabled(dma_pdev->bus->self,
+ NULL, REQ_ACS_FLAGS))
+ break;
+
+ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ }
+
+ group = iommu_group_get(&dma_pdev->dev);
+ pci_dev_put(dma_pdev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
}
+ ret = iommu_group_add_device(group, dev);
+
+ iommu_group_put(group);
+
+ if (ret)
+ return ret;
+
if (pci_iommuv2_capable(pdev)) {
struct amd_iommu *iommu;
@@ -309,6 +356,8 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev)
{
+ iommu_group_remove_device(dev);
+
/*
* Nothing to do here - we keep dev_data around for unplugged devices
* and reuse it when the device is re-plugged - not doing so would
@@ -382,7 +431,6 @@ DECLARE_STATS_COUNTER(invalidate_iotlb);
DECLARE_STATS_COUNTER(invalidate_iotlb_all);
DECLARE_STATS_COUNTER(pri_requests);
-
static struct dentry *stats_dir;
static struct dentry *de_fflush;
@@ -402,7 +450,7 @@ static void amd_iommu_stats_init(void)
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- (u32 *)&amd_iommu_unmap_flush);
+ &amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
@@ -2071,7 +2119,7 @@ out_err:
/* FIXME: Move this to PCI code */
#define PCI_PRI_TLP_OFF (1 << 15)
-bool pci_pri_tlp_required(struct pci_dev *pdev)
+static bool pci_pri_tlp_required(struct pci_dev *pdev)
{
u16 status;
int pos;
@@ -2252,6 +2300,18 @@ static int device_change_notifier(struct notifier_block *nb,
iommu_init_device(dev);
+ /*
+ * dev_data is still NULL and
+ * got initialized in iommu_init_device
+ */
+ dev_data = get_dev_data(dev);
+
+ if (iommu_pass_through || dev_data->iommu_v2) {
+ dev_data->passthrough = true;
+ attach_device(dev, pt_domain);
+ break;
+ }
+
domain = domain_for_device(dev);
/* allocate a protection domain if a device is added */
@@ -2267,6 +2327,10 @@ static int device_change_notifier(struct notifier_block *nb,
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ dev_data = get_dev_data(dev);
+
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
break;
case BUS_NOTIFY_DEL_DEVICE:
@@ -2963,6 +3027,11 @@ int __init amd_iommu_init_dma_ops(void)
amd_iommu_stats_init();
+ if (amd_iommu_unmap_flush)
+ pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
+ else
+ pr_info("AMD-Vi: Lazy IO/TLB flushing enabled\n");
+
return 0;
free_domains:
@@ -3069,6 +3138,10 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
dom->priv = domain;
+ dom->geometry.aperture_start = 0;
+ dom->geometry.aperture_end = ~0ULL;
+ dom->geometry.force_aperture = true;
+
return 0;
out_free:
@@ -3227,26 +3300,6 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
-static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
-{
- struct iommu_dev_data *dev_data = dev->archdata.iommu;
- struct pci_dev *pdev = to_pci_dev(dev);
- u16 devid;
-
- if (!dev_data)
- return -ENODEV;
-
- if (pdev->is_virtfn || !iommu_group_mf)
- devid = dev_data->devid;
- else
- devid = calc_devid(pdev->bus->number,
- PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
-
- *groupid = amd_iommu_alias_table[devid];
-
- return 0;
-}
-
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
@@ -3256,7 +3309,6 @@ static struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
- .device_group = amd_iommu_device_group,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 542024ba6dba..500e7f15f5c2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -26,6 +26,8 @@
#include <linux/msi.h>
#include <linux/amd-iommu.h>
#include <linux/export.h>
+#include <linux/acpi.h>
+#include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -122,14 +124,14 @@ struct ivmd_header {
bool amd_iommu_dump;
-static int __initdata amd_iommu_detected;
+static bool amd_iommu_detected;
static bool __initdata amd_iommu_disabled;
u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -149,11 +151,6 @@ bool amd_iommu_v2_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly;
/*
- * The ACPI table parsing functions set this variable on an error
- */
-static int __initdata amd_iommu_init_err;
-
-/*
* List of protection domains - used during resume
*/
LIST_HEAD(amd_iommu_pd_list);
@@ -190,13 +187,23 @@ static u32 dev_table_size; /* size of the device table */
static u32 alias_table_size; /* size of the alias table */
static u32 rlookup_table_size; /* size if the rlookup table */
-/*
- * This function flushes all internal caches of
- * the IOMMU used by this driver.
- */
-extern void iommu_flush_all_caches(struct amd_iommu *iommu);
+enum iommu_init_state {
+ IOMMU_START_STATE,
+ IOMMU_IVRS_DETECTED,
+ IOMMU_ACPI_FINISHED,
+ IOMMU_ENABLED,
+ IOMMU_PCI_INIT,
+ IOMMU_INTERRUPTS_EN,
+ IOMMU_DMA_OPS,
+ IOMMU_INITIALIZED,
+ IOMMU_NOT_FOUND,
+ IOMMU_INIT_ERROR,
+};
+
+static enum iommu_init_state init_state = IOMMU_START_STATE;
static int amd_iommu_enable_interrupts(void);
+static int __init iommu_go_to_state(enum iommu_init_state state);
static inline void update_last_devid(u16 devid)
{
@@ -321,23 +328,6 @@ static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{
- static const char * const feat_str[] = {
- "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
- "IA", "GA", "HE", "PC", NULL
- };
- int i;
-
- printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx",
- dev_name(&iommu->dev->dev), iommu->cap_ptr);
-
- if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
- printk(KERN_CONT " extended features: ");
- for (i = 0; feat_str[i]; ++i)
- if (iommu_feature(iommu, (1ULL << i)))
- printk(KERN_CONT " %s", feat_str[i]);
- }
- printk(KERN_CONT "\n");
-
iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
}
@@ -358,7 +348,7 @@ static void iommu_disable(struct amd_iommu *iommu)
* mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
* the system has one.
*/
-static u8 * __init iommu_map_mmio_space(u64 address)
+static u8 __iomem * __init iommu_map_mmio_space(u64 address)
{
if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) {
pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n",
@@ -367,7 +357,7 @@ static u8 * __init iommu_map_mmio_space(u64 address)
return NULL;
}
- return ioremap_nocache(address, MMIO_REGION_LENGTH);
+ return (u8 __iomem *)ioremap_nocache(address, MMIO_REGION_LENGTH);
}
static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
@@ -463,11 +453,9 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
*/
for (i = 0; i < table->length; ++i)
checksum += p[i];
- if (checksum != 0) {
+ if (checksum != 0)
/* ACPI table corrupt */
- amd_iommu_init_err = -ENODEV;
- return 0;
- }
+ return -ENODEV;
p += IVRS_HEADER_LENGTH;
@@ -726,90 +714,6 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
}
/*
- * This function reads some important data from the IOMMU PCI space and
- * initializes the driver data structure with it. It reads the hardware
- * capabilities and the first/last device entries
- */
-static void __init init_iommu_from_pci(struct amd_iommu *iommu)
-{
- int cap_ptr = iommu->cap_ptr;
- u32 range, misc, low, high;
- int i, j;
-
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
- &iommu->cap);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
- &range);
- pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
- &misc);
-
- iommu->first_device = calc_devid(MMIO_GET_BUS(range),
- MMIO_GET_FD(range));
- iommu->last_device = calc_devid(MMIO_GET_BUS(range),
- MMIO_GET_LD(range));
- iommu->evt_msi_num = MMIO_MSI_NUM(misc);
-
- if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
- amd_iommu_iotlb_sup = false;
-
- /* read extended feature bits */
- low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
- high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
-
- iommu->features = ((u64)high << 32) | low;
-
- if (iommu_feature(iommu, FEATURE_GT)) {
- int glxval;
- u32 pasids;
- u64 shift;
-
- shift = iommu->features & FEATURE_PASID_MASK;
- shift >>= FEATURE_PASID_SHIFT;
- pasids = (1 << shift);
-
- amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
-
- glxval = iommu->features & FEATURE_GLXVAL_MASK;
- glxval >>= FEATURE_GLXVAL_SHIFT;
-
- if (amd_iommu_max_glx_val == -1)
- amd_iommu_max_glx_val = glxval;
- else
- amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
- }
-
- if (iommu_feature(iommu, FEATURE_GT) &&
- iommu_feature(iommu, FEATURE_PPR)) {
- iommu->is_iommu_v2 = true;
- amd_iommu_v2_present = true;
- }
-
- if (!is_rd890_iommu(iommu->dev))
- return;
-
- /*
- * Some rd890 systems may not be fully reconfigured by the BIOS, so
- * it's necessary for us to store this information so it can be
- * reprogrammed on resume
- */
-
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
- &iommu->stored_addr_lo);
- pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
- &iommu->stored_addr_hi);
-
- /* Low bit locks writes to configuration space */
- iommu->stored_addr_lo &= ~1;
-
- for (i = 0; i < 6; i++)
- for (j = 0; j < 0x12; j++)
- iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
-
- for (i = 0; i < 0x83; i++)
- iommu->stored_l2[i] = iommu_read_l2(iommu, i);
-}
-
-/*
* Takes a pointer to an AMD IOMMU entry in the ACPI table and
* initializes the hardware and our data structures with it.
*/
@@ -1025,13 +929,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
/*
* Copy data from ACPI table entry to the iommu struct
*/
- iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff);
- if (!iommu->dev)
- return 1;
-
- iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
- PCI_DEVFN(0, 0));
-
+ iommu->devid = h->devid;
iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
@@ -1049,20 +947,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
iommu->int_enabled = false;
- init_iommu_from_pci(iommu);
init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu);
- if (iommu_feature(iommu, FEATURE_PPR)) {
- iommu->ppr_log = alloc_ppr_log(iommu);
- if (!iommu->ppr_log)
- return -ENOMEM;
- }
-
- if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
- amd_iommu_np_cache = true;
-
- return pci_enable_device(iommu->dev);
+ return 0;
}
/*
@@ -1093,16 +981,12 @@ static int __init init_iommu_all(struct acpi_table_header *table)
h->mmio_phys);
iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
- if (iommu == NULL) {
- amd_iommu_init_err = -ENOMEM;
- return 0;
- }
+ if (iommu == NULL)
+ return -ENOMEM;
ret = init_iommu_one(iommu, h);
- if (ret) {
- amd_iommu_init_err = ret;
- return 0;
- }
+ if (ret)
+ return ret;
break;
default:
break;
@@ -1115,6 +999,148 @@ static int __init init_iommu_all(struct acpi_table_header *table)
return 0;
}
+static int iommu_init_pci(struct amd_iommu *iommu)
+{
+ int cap_ptr = iommu->cap_ptr;
+ u32 range, misc, low, high;
+
+ iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid),
+ iommu->devid & 0xff);
+ if (!iommu->dev)
+ return -ENODEV;
+
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
+ &iommu->cap);
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
+ &range);
+ pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
+ &misc);
+
+ iommu->first_device = calc_devid(MMIO_GET_BUS(range),
+ MMIO_GET_FD(range));
+ iommu->last_device = calc_devid(MMIO_GET_BUS(range),
+ MMIO_GET_LD(range));
+
+ if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
+ amd_iommu_iotlb_sup = false;
+
+ /* read extended feature bits */
+ low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
+ high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
+
+ iommu->features = ((u64)high << 32) | low;
+
+ if (iommu_feature(iommu, FEATURE_GT)) {
+ int glxval;
+ u32 pasids;
+ u64 shift;
+
+ shift = iommu->features & FEATURE_PASID_MASK;
+ shift >>= FEATURE_PASID_SHIFT;
+ pasids = (1 << shift);
+
+ amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+
+ glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval >>= FEATURE_GLXVAL_SHIFT;
+
+ if (amd_iommu_max_glx_val == -1)
+ amd_iommu_max_glx_val = glxval;
+ else
+ amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+ }
+
+ if (iommu_feature(iommu, FEATURE_GT) &&
+ iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->is_iommu_v2 = true;
+ amd_iommu_v2_present = true;
+ }
+
+ if (iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->ppr_log = alloc_ppr_log(iommu);
+ if (!iommu->ppr_log)
+ return -ENOMEM;
+ }
+
+ if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
+ amd_iommu_np_cache = true;
+
+ if (is_rd890_iommu(iommu->dev)) {
+ int i, j;
+
+ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
+ /*
+ * Some rd890 systems may not be fully reconfigured by the
+ * BIOS, so it's necessary for us to store this information so
+ * it can be reprogrammed on resume
+ */
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
+ &iommu->stored_addr_lo);
+ pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
+ &iommu->stored_addr_hi);
+
+ /* Low bit locks writes to configuration space */
+ iommu->stored_addr_lo &= ~1;
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 0x12; j++)
+ iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
+
+ for (i = 0; i < 0x83; i++)
+ iommu->stored_l2[i] = iommu_read_l2(iommu, i);
+ }
+
+ return pci_enable_device(iommu->dev);
+}
+
+static void print_iommu_info(void)
+{
+ static const char * const feat_str[] = {
+ "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
+ "IA", "GA", "HE", "PC"
+ };
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ int i;
+
+ pr_info("AMD-Vi: Found IOMMU at %s cap 0x%hx\n",
+ dev_name(&iommu->dev->dev), iommu->cap_ptr);
+
+ if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
+ pr_info("AMD-Vi: Extended features: ");
+ for (i = 0; ARRAY_SIZE(feat_str); ++i) {
+ if (iommu_feature(iommu, (1ULL << i)))
+ pr_cont(" %s", feat_str[i]);
+ }
+ }
+ pr_cont("\n");
+ }
+}
+
+static int __init amd_iommu_init_pci(void)
+{
+ struct amd_iommu *iommu;
+ int ret = 0;
+
+ for_each_iommu(iommu) {
+ ret = iommu_init_pci(iommu);
+ if (ret)
+ break;
+ }
+
+ /* Make sure ACS will be enabled */
+ pci_request_acs();
+
+ ret = amd_iommu_init_devices();
+
+ print_iommu_info();
+
+ return ret;
+}
+
/****************************************************************************
*
* The following functions initialize the MSI interrupts for all IOMMUs
@@ -1217,7 +1243,7 @@ static int __init init_exclusion_range(struct ivmd_header *m)
/* called for unity map ACPI definition */
static int __init init_unity_map_range(struct ivmd_header *m)
{
- struct unity_map_entry *e = 0;
+ struct unity_map_entry *e = NULL;
char *s;
e = kzalloc(sizeof(*e), GFP_KERNEL);
@@ -1369,7 +1395,7 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
* This function finally enables all IOMMUs found in the system after
* they have been initialized
*/
-static void enable_iommus(void)
+static void early_enable_iommus(void)
{
struct amd_iommu *iommu;
@@ -1379,14 +1405,29 @@ static void enable_iommus(void)
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
- iommu_enable_ppr_log(iommu);
- iommu_enable_gt(iommu);
iommu_set_exclusion_range(iommu);
iommu_enable(iommu);
iommu_flush_all_caches(iommu);
}
}
+static void enable_iommus_v2(void)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ iommu_enable_ppr_log(iommu);
+ iommu_enable_gt(iommu);
+ }
+}
+
+static void enable_iommus(void)
+{
+ early_enable_iommus();
+
+ enable_iommus_v2();
+}
+
static void disable_iommus(void)
{
struct amd_iommu *iommu;
@@ -1481,16 +1522,23 @@ static void __init free_on_init_error(void)
* After everything is set up the IOMMUs are enabled and the necessary
* hotplug and suspend notifiers are registered.
*/
-int __init amd_iommu_init_hardware(void)
+static int __init early_amd_iommu_init(void)
{
+ struct acpi_table_header *ivrs_base;
+ acpi_size ivrs_size;
+ acpi_status status;
int i, ret = 0;
if (!amd_iommu_detected)
return -ENODEV;
- if (amd_iommu_dev_table != NULL) {
- /* Hardware already initialized */
- return 0;
+ status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ if (status == AE_NOT_FOUND)
+ return -ENODEV;
+ else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+ pr_err("AMD-Vi: IVRS table error: %s\n", err);
+ return -EINVAL;
}
/*
@@ -1498,10 +1546,7 @@ int __init amd_iommu_init_hardware(void)
* we need to handle. Upon this information the shared data
* structures for the IOMMUs in the system will be allocated
*/
- if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
- return -ENODEV;
-
- ret = amd_iommu_init_err;
+ ret = find_last_devid_acpi(ivrs_base);
if (ret)
goto out;
@@ -1523,20 +1568,20 @@ int __init amd_iommu_init_hardware(void)
amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
get_order(alias_table_size));
if (amd_iommu_alias_table == NULL)
- goto free;
+ goto out;
/* IOMMU rlookup table - find the IOMMU for a specific device */
amd_iommu_rlookup_table = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
get_order(rlookup_table_size));
if (amd_iommu_rlookup_table == NULL)
- goto free;
+ goto out;
amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
GFP_KERNEL | __GFP_ZERO,
get_order(MAX_DOMAIN_ID/8));
if (amd_iommu_pd_alloc_bitmap == NULL)
- goto free;
+ goto out;
/* init the device table */
init_device_table();
@@ -1559,38 +1604,18 @@ int __init amd_iommu_init_hardware(void)
* now the data structures are allocated and basically initialized
* start the real acpi table scan
*/
- ret = -ENODEV;
- if (acpi_table_parse("IVRS", init_iommu_all) != 0)
- goto free;
-
- if (amd_iommu_init_err) {
- ret = amd_iommu_init_err;
- goto free;
- }
-
- if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
- goto free;
-
- if (amd_iommu_init_err) {
- ret = amd_iommu_init_err;
- goto free;
- }
-
- ret = amd_iommu_init_devices();
+ ret = init_iommu_all(ivrs_base);
if (ret)
- goto free;
-
- enable_iommus();
-
- amd_iommu_init_notifier();
+ goto out;
- register_syscore_ops(&amd_iommu_syscore_ops);
+ ret = init_memory_definitions(ivrs_base);
+ if (ret)
+ goto out;
out:
- return ret;
-
-free:
- free_on_init_error();
+ /* Don't leak any ACPI memory */
+ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+ ivrs_base = NULL;
return ret;
}
@@ -1610,26 +1635,29 @@ out:
return ret;
}
-/*
- * This is the core init function for AMD IOMMU hardware in the system.
- * This function is called from the generic x86 DMA layer initialization
- * code.
- *
- * The function calls amd_iommu_init_hardware() to setup and enable the
- * IOMMU hardware if this has not happened yet. After that the driver
- * registers for the DMA-API and for the IOMMU-API as necessary.
- */
-static int __init amd_iommu_init(void)
+static bool detect_ivrs(void)
{
- int ret = 0;
+ struct acpi_table_header *ivrs_base;
+ acpi_size ivrs_size;
+ acpi_status status;
- ret = amd_iommu_init_hardware();
- if (ret)
- goto out;
+ status = acpi_get_table_with_size("IVRS", 0, &ivrs_base, &ivrs_size);
+ if (status == AE_NOT_FOUND)
+ return false;
+ else if (ACPI_FAILURE(status)) {
+ const char *err = acpi_format_exception(status);
+ pr_err("AMD-Vi: IVRS table error: %s\n", err);
+ return false;
+ }
- ret = amd_iommu_enable_interrupts();
- if (ret)
- goto free;
+ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size);
+
+ return true;
+}
+
+static int amd_iommu_init_dma(void)
+{
+ int ret;
if (iommu_pass_through)
ret = amd_iommu_init_passthrough();
@@ -1637,29 +1665,108 @@ static int __init amd_iommu_init(void)
ret = amd_iommu_init_dma_ops();
if (ret)
- goto free;
+ return ret;
amd_iommu_init_api();
- if (iommu_pass_through)
- goto out;
+ amd_iommu_init_notifier();
- if (amd_iommu_unmap_flush)
- printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n");
- else
- printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
+ return 0;
+}
- x86_platform.iommu_shutdown = disable_iommus;
+/****************************************************************************
+ *
+ * AMD IOMMU Initialization State Machine
+ *
+ ****************************************************************************/
+
+static int __init state_next(void)
+{
+ int ret = 0;
+
+ switch (init_state) {
+ case IOMMU_START_STATE:
+ if (!detect_ivrs()) {
+ init_state = IOMMU_NOT_FOUND;
+ ret = -ENODEV;
+ } else {
+ init_state = IOMMU_IVRS_DETECTED;
+ }
+ break;
+ case IOMMU_IVRS_DETECTED:
+ ret = early_amd_iommu_init();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
+ break;
+ case IOMMU_ACPI_FINISHED:
+ early_enable_iommus();
+ register_syscore_ops(&amd_iommu_syscore_ops);
+ x86_platform.iommu_shutdown = disable_iommus;
+ init_state = IOMMU_ENABLED;
+ break;
+ case IOMMU_ENABLED:
+ ret = amd_iommu_init_pci();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
+ enable_iommus_v2();
+ break;
+ case IOMMU_PCI_INIT:
+ ret = amd_iommu_enable_interrupts();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
+ break;
+ case IOMMU_INTERRUPTS_EN:
+ ret = amd_iommu_init_dma();
+ init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
+ break;
+ case IOMMU_DMA_OPS:
+ init_state = IOMMU_INITIALIZED;
+ break;
+ case IOMMU_INITIALIZED:
+ /* Nothing to do */
+ break;
+ case IOMMU_NOT_FOUND:
+ case IOMMU_INIT_ERROR:
+ /* Error states => do nothing */
+ ret = -EINVAL;
+ break;
+ default:
+ /* Unknown state */
+ BUG();
+ }
-out:
return ret;
+}
-free:
- disable_iommus();
+static int __init iommu_go_to_state(enum iommu_init_state state)
+{
+ int ret = 0;
+
+ while (init_state != state) {
+ ret = state_next();
+ if (init_state == IOMMU_NOT_FOUND ||
+ init_state == IOMMU_INIT_ERROR)
+ break;
+ }
+
+ return ret;
+}
+
+
+
+/*
+ * This is the core init function for AMD IOMMU hardware in the system.
+ * This function is called from the generic x86 DMA layer initialization
+ * code.
+ */
+static int __init amd_iommu_init(void)
+{
+ int ret;
- free_on_init_error();
+ ret = iommu_go_to_state(IOMMU_INITIALIZED);
+ if (ret) {
+ disable_iommus();
+ free_on_init_error();
+ }
- goto out;
+ return ret;
}
/****************************************************************************
@@ -1669,29 +1776,25 @@ free:
* IOMMUs
*
****************************************************************************/
-static int __init early_amd_iommu_detect(struct acpi_table_header *table)
-{
- return 0;
-}
-
int __init amd_iommu_detect(void)
{
+ int ret;
+
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
return -ENODEV;
if (amd_iommu_disabled)
return -ENODEV;
- if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
- iommu_detected = 1;
- amd_iommu_detected = 1;
- x86_init.iommu.iommu_init = amd_iommu_init;
+ ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
+ if (ret)
+ return ret;
- /* Make sure ACS will be enabled */
- pci_request_acs();
- return 1;
- }
- return -ENODEV;
+ amd_iommu_detected = true;
+ iommu_detected = 1;
+ x86_init.iommu.iommu_init = amd_iommu_init;
+
+ return 0;
}
/****************************************************************************
@@ -1727,8 +1830,8 @@ __setup("amd_iommu=", parse_amd_iommu_options);
IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
- 0,
- 0);
+ NULL,
+ NULL);
bool amd_iommu_v2_supported(void)
{
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 24355559a2ad..d0dab865a8b8 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -487,7 +487,7 @@ struct amd_iommu {
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
- u8 *mmio_base;
+ u8 __iomem *mmio_base;
/* capabilities of that IOMMU read from ACPI */
u32 cap;
@@ -501,6 +501,9 @@ struct amd_iommu {
/* IOMMUv2 */
bool is_iommu_v2;
+ /* PCI device id of the IOMMU device */
+ u16 devid;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -530,8 +533,6 @@ struct amd_iommu {
u32 evt_buf_size;
/* event buffer virtual address */
u8 *evt_buf;
- /* MSI number for event interrupt */
- u16 evt_msi_num;
/* Base of the PPR log, if present */
u8 *ppr_log;
@@ -652,7 +653,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
/* Smallest number of PASIDs supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasids;
@@ -664,6 +665,12 @@ extern bool amd_iommu_force_isolation;
/* Max levels of glxval supported */
extern int amd_iommu_max_glx_val;
+/*
+ * This function flushes all internal caches of
+ * the IOMMU used by this driver.
+ */
+extern void iommu_flush_all_caches(struct amd_iommu *iommu);
+
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 036fe9bf157e..5208828792e6 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -81,7 +81,7 @@ struct fault {
u16 flags;
};
-struct device_state **state_table;
+static struct device_state **state_table;
static spinlock_t state_lock;
/* List and lock for all pasid_states */
@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
atomic_set(&pasid_state->count, 1);
init_waitqueue_head(&pasid_state->wq);
+ spin_lock_init(&pasid_state->lock);
+
pasid_state->task = task;
pasid_state->mm = get_task_mm(task);
pasid_state->device_state = dev_state;
@@ -924,7 +926,7 @@ static int __init amd_iommu_v2_init(void)
pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>\n");
if (!amd_iommu_v2_supported()) {
- pr_info("AMD IOMMUv2 functionality not available on this sytem\n");
+ pr_info("AMD IOMMUv2 functionality not available on this system\n");
/*
* Load anyway to provide the symbols to other modules
* which may use AMD IOMMUv2 optionally.
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3a74e4410fc0..86e2f4a62b9a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -26,6 +26,8 @@
* These routines are used by both DMA-remapping and Interrupt-remapping
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
+
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
@@ -39,8 +41,6 @@
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
-#define PREFIX "DMAR: "
-
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
* these units are not supported by the architecture.
@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
* ignore it
*/
if (!bus) {
- printk(KERN_WARNING
- PREFIX "Device scope bus [%d] not found\n",
- scope->bus);
+ pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, bus->number, path->dev, path->fn);
+ /* warning will be printed below */
break;
}
path ++;
@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
bus = pdev->subordinate;
}
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
+ segment, scope->bus, path->dev, path->fn);
*dev = NULL;
return 0;
}
@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pdev->subordinate) || (scope->entry_type == \
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
pci_dev_put(pdev);
- printk(KERN_WARNING PREFIX
- "Device scope type does not match for %s\n",
- pci_name(pdev));
+ pr_warn("Device scope type does not match for %s\n",
+ pci_name(pdev));
return -EINVAL;
}
*dev = pdev;
@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ pr_warn("Unsupported device scope\n");
}
start += scope->length;
}
@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd = container_of(header, struct acpi_dmar_hardware_unit,
header);
- printk (KERN_INFO PREFIX
- "DRHD base: %#016Lx flags: %#x\n",
+ pr_info("DRHD base: %#016Lx flags: %#x\n",
(unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = container_of(header, struct acpi_dmar_reserved_memory,
header);
- printk (KERN_INFO PREFIX
- "RMRR base: %#016Lx end: %#016Lx\n",
+ pr_info("RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
case ACPI_DMAR_TYPE_ATSR:
atsr = container_of(header, struct acpi_dmar_atsr, header);
- printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ pr_info("ATSR flags: %#x\n", atsr->flags);
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
- printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+ pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
&dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ pr_warn("Unable to map DMAR\n");
status = AE_NOT_FOUND;
}
@@ -333,20 +324,18 @@ parse_dmar_table(void)
return -ENODEV;
if (dmar->width < PAGE_SHIFT - 1) {
- printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
+ pr_warn("Invalid DMAR haw\n");
return -EINVAL;
}
- printk (KERN_INFO PREFIX "Host address width %d\n",
- dmar->width + 1);
+ pr_info("Host address width %d\n", dmar->width + 1);
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
@@ -369,8 +358,7 @@ parse_dmar_table(void)
#endif
break;
default:
- printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type %d\n",
+ pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
@@ -469,12 +457,12 @@ int __init dmar_table_init(void)
ret = parse_dmar_table();
if (ret) {
if (ret != -ENODEV)
- printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
+ pr_info("parse DMAR table failure.\n");
return ret;
}
if (list_empty(&dmar_drhd_units)) {
- printk(KERN_INFO PREFIX "No DMAR devices found\n");
+ pr_info("No DMAR devices found\n");
return -ENODEV;
}
@@ -506,8 +494,7 @@ int __init check_zero_address(void)
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
return 0;
}
@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1)
- printk(KERN_INFO
- "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
}
+static void unmap_iommu(struct intel_iommu *iommu)
+{
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+}
+
+/**
+ * map_iommu: map the iommu's registers
+ * @iommu: the iommu to map
+ * @phys_addr: the physical address of the base resgister
+ *
+ * Memory map the iommu's registers. Start w/ a single page, and
+ * possibly expand if that turns out to be insufficent.
+ */
+static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+{
+ int map_size, err=0;
+
+ iommu->reg_phys = phys_addr;
+ iommu->reg_size = VTD_PAGE_SIZE;
+
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+
+ iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+ iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+ if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+ err = -EINVAL;
+ warn_invalid_dmar(phys_addr, " returns all ones");
+ goto unmap;
+ }
+
+ /* the registers might be more than one page */
+ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+ cap_max_fault_reg_offset(iommu->cap));
+ map_size = VTD_PAGE_ALIGN(map_size);
+ if (map_size > iommu->reg_size) {
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+ iommu->reg_size = map_size;
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
+ iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+ }
+ err = 0;
+ goto out;
+
+unmap:
+ iounmap(iommu->reg);
+release:
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+out:
+ return err;
+}
+
int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
- int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
+ int err;
if (!drhd->reg_base_addr) {
warn_invalid_dmar(0, "");
@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++;
sprintf (iommu->name, "dmar%d", iommu->seq_id);
- iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
+ err = map_iommu(iommu, drhd->reg_base_addr);
+ if (err) {
+ pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error;
}
- iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
- iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
- if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
- goto err_unmap;
- }
+ err = -EINVAL;
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
+ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
goto err_unmap;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
}
@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->node = -1;
- /* the registers might be more than one page */
- map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
- cap_max_fault_reg_offset(iommu->cap));
- map_size = VTD_PAGE_ALIGN(map_size);
- if (map_size > VTD_PAGE_SIZE) {
- iounmap(iommu->reg);
- iommu->reg = ioremap(drhd->reg_base_addr, map_size);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- }
-
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
iommu->seq_id,
@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
err_unmap:
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
error:
kfree(iommu);
- return -1;
+ return err;
}
void free_iommu(struct intel_iommu *iommu)
@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
free_dmar_iommu(iommu);
if (iommu->reg)
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
+
kfree(iommu);
}
@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> DMAR_IQ_SHIFT) == index) {
- printk(KERN_ERR "VT-d detected invalid descriptor: "
+ pr_err("VT-d detected invalid descriptor: "
"low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low,
(unsigned long long)qi->desc[index].high);
@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+ pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
"fault index %llx\n"
"INTR-REMAP:[fault reason %02d] %s\n",
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else
- printk(KERN_ERR
- "DMAR:[%s] Request device [%02x:%02x.%d] "
+ pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
"fault addr %llx \n"
"DMAR:[fault reason %02d] %s\n",
(type ? "DMA Read" : "DMA Write"),
@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
- fault_status);
+ pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF))
@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
irq = create_irq();
if (!irq) {
- printk(KERN_ERR "IOMMU: no free vectors\n");
+ pr_err("IOMMU: no free vectors\n");
return -EINVAL;
}
@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
+ pr_err("IOMMU: can't request irq\n");
return ret;
}
@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
ret = dmar_set_interrupt(iommu);
if (ret) {
- printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
- " interrupt, ret %d\n",
+ pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 9a114b9ff170..45350ff5e93c 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -317,7 +317,7 @@ static int default_fault_handler(enum exynos_sysmmu_inttype itype,
if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
itype = SYSMMU_FAULT_UNKNOWN;
- pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
+ pr_err("%s occurred at 0x%lx(Page table base: 0x%lx)\n",
sysmmu_fault_name[itype], fault_addr, pgtable_base);
ent = section_entry(__va(pgtable_base), fault_addr);
@@ -732,6 +732,10 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain)
spin_lock_init(&priv->pgtablelock);
INIT_LIST_HEAD(&priv->clients);
+ dom->geometry.aperture_start = 0;
+ dom->geometry.aperture_end = ~0UL;
+ dom->geometry.force_aperture = true;
+
domain->priv = priv;
return 0;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b12af2ff8c54..7469b5346643 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -661,7 +661,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
if (drhd->devices[i] &&
drhd->devices[i]->subordinate &&
drhd->devices[i]->subordinate->number <= bus &&
- drhd->devices[i]->subordinate->subordinate >= bus)
+ drhd->devices[i]->subordinate->busn_res.end >= bus)
return drhd->iommu;
}
@@ -3932,6 +3932,10 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw);
+ domain->geometry.force_aperture = true;
+
return 0;
}
@@ -4090,52 +4094,70 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
-/*
- * Group numbers are arbitrary. Device with the same group number
- * indicate the iommu cannot differentiate between them. To avoid
- * tracking used groups we just use the seg|bus|devfn of the lowest
- * level we're able to differentiate devices
- */
-static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct pci_dev *bridge;
- union {
- struct {
- u8 devfn;
- u8 bus;
- u16 segment;
- } pci;
- u32 group;
- } id;
+ pci_dev_put(*from);
+ *from = to;
+}
- if (iommu_no_mapping(dev))
- return -ENODEV;
+#define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
- id.pci.segment = pci_domain_nr(pdev->bus);
- id.pci.bus = pdev->bus->number;
- id.pci.devfn = pdev->devfn;
+static int intel_iommu_add_device(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *bridge, *dma_pdev;
+ struct iommu_group *group;
+ int ret;
- if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+ if (!device_to_iommu(pci_domain_nr(pdev->bus),
+ pdev->bus->number, pdev->devfn))
return -ENODEV;
bridge = pci_find_upstream_pcie_bridge(pdev);
if (bridge) {
- if (pci_is_pcie(bridge)) {
- id.pci.bus = bridge->subordinate->number;
- id.pci.devfn = 0;
- } else {
- id.pci.bus = bridge->bus->number;
- id.pci.devfn = bridge->devfn;
- }
+ if (pci_is_pcie(bridge))
+ dma_pdev = pci_get_domain_bus_and_slot(
+ pci_domain_nr(pdev->bus),
+ bridge->subordinate->number, 0);
+ else
+ dma_pdev = pci_dev_get(bridge);
+ } else
+ dma_pdev = pci_dev_get(pdev);
+
+ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev));
+
+ if (dma_pdev->multifunction &&
+ !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS))
+ swap_pci_ref(&dma_pdev,
+ pci_get_slot(dma_pdev->bus,
+ PCI_DEVFN(PCI_SLOT(dma_pdev->devfn),
+ 0)));
+
+ while (!pci_is_root_bus(dma_pdev->bus)) {
+ if (pci_acs_path_enabled(dma_pdev->bus->self,
+ NULL, REQ_ACS_FLAGS))
+ break;
+
+ swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self));
+ }
+
+ group = iommu_group_get(&dma_pdev->dev);
+ pci_dev_put(dma_pdev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group))
+ return PTR_ERR(group);
}
- if (!pdev->is_virtfn && iommu_group_mf)
- id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+ ret = iommu_group_add_device(group, dev);
- *groupid = id.group;
+ iommu_group_put(group);
+ return ret;
+}
- return 0;
+static void intel_iommu_remove_device(struct device *dev)
+{
+ iommu_group_remove_device(dev);
}
static struct iommu_ops intel_iommu_ops = {
@@ -4147,7 +4169,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
- .device_group = intel_iommu_device_group,
+ .add_device = intel_iommu_add_device,
+ .remove_device = intel_iommu_remove_device,
.pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6d347064b8b0..e0b18f3ae9a8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
return 0;
}
-#ifdef CONFIG_SMP
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -EINVAL;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
if (get_irte(irq, &irte))
return -EBUSY;
- if (assign_irq_vector(irq, cfg, mask))
- return -EBUSY;
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
cpumask_copy(data->affinity, mask);
return 0;
}
-#endif
static void intel_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry,
-#ifdef CONFIG_SMP
.set_affinity = intel_ioapic_set_affinity,
-#endif
.free_irq = free_irte,
.compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8b9ded88e6f5..ddbdacad7768 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -26,60 +26,535 @@
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/iommu.h>
+#include <linux/idr.h>
+#include <linux/notifier.h>
+#include <linux/err.h>
+
+static struct kset *iommu_group_kset;
+static struct ida iommu_group_ida;
+static struct mutex iommu_group_mutex;
+
+struct iommu_group {
+ struct kobject kobj;
+ struct kobject *devices_kobj;
+ struct list_head devices;
+ struct mutex mutex;
+ struct blocking_notifier_head notifier;
+ void *iommu_data;
+ void (*iommu_data_release)(void *iommu_data);
+ char *name;
+ int id;
+};
+
+struct iommu_device {
+ struct list_head list;
+ struct device *dev;
+ char *name;
+};
+
+struct iommu_group_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct iommu_group *group, char *buf);
+ ssize_t (*store)(struct iommu_group *group,
+ const char *buf, size_t count);
+};
+
+#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
+struct iommu_group_attribute iommu_group_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+#define to_iommu_group_attr(_attr) \
+ container_of(_attr, struct iommu_group_attribute, attr)
+#define to_iommu_group(_kobj) \
+ container_of(_kobj, struct iommu_group, kobj)
-static ssize_t show_iommu_group(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t iommu_group_attr_show(struct kobject *kobj,
+ struct attribute *__attr, char *buf)
{
- unsigned int groupid;
+ struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+ struct iommu_group *group = to_iommu_group(kobj);
+ ssize_t ret = -EIO;
- if (iommu_device_group(dev, &groupid))
- return 0;
+ if (attr->show)
+ ret = attr->show(group, buf);
+ return ret;
+}
+
+static ssize_t iommu_group_attr_store(struct kobject *kobj,
+ struct attribute *__attr,
+ const char *buf, size_t count)
+{
+ struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
+ struct iommu_group *group = to_iommu_group(kobj);
+ ssize_t ret = -EIO;
+
+ if (attr->store)
+ ret = attr->store(group, buf, count);
+ return ret;
+}
+
+static const struct sysfs_ops iommu_group_sysfs_ops = {
+ .show = iommu_group_attr_show,
+ .store = iommu_group_attr_store,
+};
- return sprintf(buf, "%u", groupid);
+static int iommu_group_create_file(struct iommu_group *group,
+ struct iommu_group_attribute *attr)
+{
+ return sysfs_create_file(&group->kobj, &attr->attr);
}
-static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
-static int add_iommu_group(struct device *dev, void *data)
+static void iommu_group_remove_file(struct iommu_group *group,
+ struct iommu_group_attribute *attr)
+{
+ sysfs_remove_file(&group->kobj, &attr->attr);
+}
+
+static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
+{
+ return sprintf(buf, "%s\n", group->name);
+}
+
+static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+
+static void iommu_group_release(struct kobject *kobj)
+{
+ struct iommu_group *group = to_iommu_group(kobj);
+
+ if (group->iommu_data_release)
+ group->iommu_data_release(group->iommu_data);
+
+ mutex_lock(&iommu_group_mutex);
+ ida_remove(&iommu_group_ida, group->id);
+ mutex_unlock(&iommu_group_mutex);
+
+ kfree(group->name);
+ kfree(group);
+}
+
+static struct kobj_type iommu_group_ktype = {
+ .sysfs_ops = &iommu_group_sysfs_ops,
+ .release = iommu_group_release,
+};
+
+/**
+ * iommu_group_alloc - Allocate a new group
+ * @name: Optional name to associate with group, visible in sysfs
+ *
+ * This function is called by an iommu driver to allocate a new iommu
+ * group. The iommu group represents the minimum granularity of the iommu.
+ * Upon successful return, the caller holds a reference to the supplied
+ * group in order to hold the group until devices are added. Use
+ * iommu_group_put() to release this extra reference count, allowing the
+ * group to be automatically reclaimed once it has no devices or external
+ * references.
+ */
+struct iommu_group *iommu_group_alloc(void)
+{
+ struct iommu_group *group;
+ int ret;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return ERR_PTR(-ENOMEM);
+
+ group->kobj.kset = iommu_group_kset;
+ mutex_init(&group->mutex);
+ INIT_LIST_HEAD(&group->devices);
+ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
+
+ mutex_lock(&iommu_group_mutex);
+
+again:
+ if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
+ kfree(group);
+ mutex_unlock(&iommu_group_mutex);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
+ goto again;
+
+ mutex_unlock(&iommu_group_mutex);
+
+ ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
+ NULL, "%d", group->id);
+ if (ret) {
+ mutex_lock(&iommu_group_mutex);
+ ida_remove(&iommu_group_ida, group->id);
+ mutex_unlock(&iommu_group_mutex);
+ kfree(group);
+ return ERR_PTR(ret);
+ }
+
+ group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
+ if (!group->devices_kobj) {
+ kobject_put(&group->kobj); /* triggers .release & free */
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * The devices_kobj holds a reference on the group kobject, so
+ * as long as that exists so will the group. We can therefore
+ * use the devices_kobj for reference counting.
+ */
+ kobject_put(&group->kobj);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_alloc);
+
+/**
+ * iommu_group_get_iommudata - retrieve iommu_data registered for a group
+ * @group: the group
+ *
+ * iommu drivers can store data in the group for use when doing iommu
+ * operations. This function provides a way to retrieve it. Caller
+ * should hold a group reference.
+ */
+void *iommu_group_get_iommudata(struct iommu_group *group)
+{
+ return group->iommu_data;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
+
+/**
+ * iommu_group_set_iommudata - set iommu_data for a group
+ * @group: the group
+ * @iommu_data: new data
+ * @release: release function for iommu_data
+ *
+ * iommu drivers can store data in the group for use when doing iommu
+ * operations. This function provides a way to set the data after
+ * the group has been allocated. Caller should hold a group reference.
+ */
+void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
+ void (*release)(void *iommu_data))
{
- unsigned int groupid;
+ group->iommu_data = iommu_data;
+ group->iommu_data_release = release;
+}
+EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
- if (iommu_device_group(dev, &groupid) == 0)
- return device_create_file(dev, &dev_attr_iommu_group);
+/**
+ * iommu_group_set_name - set name for a group
+ * @group: the group
+ * @name: name
+ *
+ * Allow iommu driver to set a name for a group. When set it will
+ * appear in a name attribute file under the group in sysfs.
+ */
+int iommu_group_set_name(struct iommu_group *group, const char *name)
+{
+ int ret;
+
+ if (group->name) {
+ iommu_group_remove_file(group, &iommu_group_attr_name);
+ kfree(group->name);
+ group->name = NULL;
+ if (!name)
+ return 0;
+ }
+
+ group->name = kstrdup(name, GFP_KERNEL);
+ if (!group->name)
+ return -ENOMEM;
+
+ ret = iommu_group_create_file(group, &iommu_group_attr_name);
+ if (ret) {
+ kfree(group->name);
+ group->name = NULL;
+ return ret;
+ }
return 0;
}
+EXPORT_SYMBOL_GPL(iommu_group_set_name);
+
+/**
+ * iommu_group_add_device - add a device to an iommu group
+ * @group: the group into which to add the device (reference should be held)
+ * @dev: the device
+ *
+ * This function is called by an iommu driver to add a device into a
+ * group. Adding a device increments the group reference count.
+ */
+int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+{
+ int ret, i = 0;
+ struct iommu_device *device;
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ device->dev = dev;
+
+ ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
+ if (ret) {
+ kfree(device);
+ return ret;
+ }
+
+ device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
+rename:
+ if (!device->name) {
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+ kfree(device);
+ return -ENOMEM;
+ }
+
+ ret = sysfs_create_link_nowarn(group->devices_kobj,
+ &dev->kobj, device->name);
+ if (ret) {
+ kfree(device->name);
+ if (ret == -EEXIST && i >= 0) {
+ /*
+ * Account for the slim chance of collision
+ * and append an instance to the name.
+ */
+ device->name = kasprintf(GFP_KERNEL, "%s.%d",
+ kobject_name(&dev->kobj), i++);
+ goto rename;
+ }
+
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+ kfree(device);
+ return ret;
+ }
+
+ kobject_get(group->devices_kobj);
+
+ dev->iommu_group = group;
+
+ mutex_lock(&group->mutex);
+ list_add_tail(&device->list, &group->devices);
+ mutex_unlock(&group->mutex);
+
+ /* Notify any listeners about change to group. */
+ blocking_notifier_call_chain(&group->notifier,
+ IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iommu_group_add_device);
+
+/**
+ * iommu_group_remove_device - remove a device from it's current group
+ * @dev: device to be removed
+ *
+ * This function is called by an iommu driver to remove the device from
+ * it's current group. This decrements the iommu group reference count.
+ */
+void iommu_group_remove_device(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+ struct iommu_device *tmp_device, *device = NULL;
+
+ /* Pre-notify listeners that a device is being removed. */
+ blocking_notifier_call_chain(&group->notifier,
+ IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(tmp_device, &group->devices, list) {
+ if (tmp_device->dev == dev) {
+ device = tmp_device;
+ list_del(&device->list);
+ break;
+ }
+ }
+ mutex_unlock(&group->mutex);
+
+ if (!device)
+ return;
+
+ sysfs_remove_link(group->devices_kobj, device->name);
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+
+ kfree(device->name);
+ kfree(device);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+}
+EXPORT_SYMBOL_GPL(iommu_group_remove_device);
+
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
+int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+ int (*fn)(struct device *, void *))
+{
+ struct iommu_device *device;
+ int ret = 0;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(device, &group->devices, list) {
+ ret = fn(device->dev, data);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
+
+/**
+ * iommu_group_get - Return the group for a device and increment reference
+ * @dev: get the group that this device belongs to
+ *
+ * This function is called by iommu drivers and users to get the group
+ * for the specified device. If found, the group is returned and the group
+ * reference in incremented, else NULL.
+ */
+struct iommu_group *iommu_group_get(struct device *dev)
+{
+ struct iommu_group *group = dev->iommu_group;
+
+ if (group)
+ kobject_get(group->devices_kobj);
+
+ return group;
+}
+EXPORT_SYMBOL_GPL(iommu_group_get);
+
+/**
+ * iommu_group_put - Decrement group reference
+ * @group: the group to use
+ *
+ * This function is called by iommu drivers and users to release the
+ * iommu group. Once the reference count is zero, the group is released.
+ */
+void iommu_group_put(struct iommu_group *group)
+{
+ if (group)
+ kobject_put(group->devices_kobj);
+}
+EXPORT_SYMBOL_GPL(iommu_group_put);
+
+/**
+ * iommu_group_register_notifier - Register a notifier for group changes
+ * @group: the group to watch
+ * @nb: notifier block to signal
+ *
+ * This function allows iommu group users to track changes in a group.
+ * See include/linux/iommu.h for actions sent via this notifier. Caller
+ * should hold a reference to the group throughout notifier registration.
+ */
+int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&group->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
+
+/**
+ * iommu_group_unregister_notifier - Unregister a notifier
+ * @group: the group to watch
+ * @nb: notifier block to signal
+ *
+ * Unregister a previously registered group notifier block.
+ */
+int iommu_group_unregister_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&group->notifier, nb);
+}
+EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
+
+/**
+ * iommu_group_id - Return ID for a group
+ * @group: the group to ID
+ *
+ * Return the unique ID for the group matching the sysfs group number.
+ */
+int iommu_group_id(struct iommu_group *group)
+{
+ return group->id;
+}
+EXPORT_SYMBOL_GPL(iommu_group_id);
-static int remove_iommu_group(struct device *dev)
+static int add_iommu_group(struct device *dev, void *data)
{
- unsigned int groupid;
+ struct iommu_ops *ops = data;
+
+ if (!ops->add_device)
+ return -ENODEV;
- if (iommu_device_group(dev, &groupid) == 0)
- device_remove_file(dev, &dev_attr_iommu_group);
+ WARN_ON(dev->iommu_group);
+
+ ops->add_device(dev);
return 0;
}
-static int iommu_device_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static int iommu_bus_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
{
struct device *dev = data;
+ struct iommu_ops *ops = dev->bus->iommu_ops;
+ struct iommu_group *group;
+ unsigned long group_action = 0;
+
+ /*
+ * ADD/DEL call into iommu driver ops if provided, which may
+ * result in ADD/DEL notifiers to group->notifier
+ */
+ if (action == BUS_NOTIFY_ADD_DEVICE) {
+ if (ops->add_device)
+ return ops->add_device(dev);
+ } else if (action == BUS_NOTIFY_DEL_DEVICE) {
+ if (ops->remove_device && dev->iommu_group) {
+ ops->remove_device(dev);
+ return 0;
+ }
+ }
+
+ /*
+ * Remaining BUS_NOTIFYs get filtered and republished to the
+ * group, if anyone is listening
+ */
+ group = iommu_group_get(dev);
+ if (!group)
+ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
+ break;
+ case BUS_NOTIFY_BOUND_DRIVER:
+ group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
+ break;
+ case BUS_NOTIFY_UNBIND_DRIVER:
+ group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
+ break;
+ }
- if (action == BUS_NOTIFY_ADD_DEVICE)
- return add_iommu_group(dev, NULL);
- else if (action == BUS_NOTIFY_DEL_DEVICE)
- return remove_iommu_group(dev);
+ if (group_action)
+ blocking_notifier_call_chain(&group->notifier,
+ group_action, dev);
+ iommu_group_put(group);
return 0;
}
-static struct notifier_block iommu_device_nb = {
- .notifier_call = iommu_device_notifier,
+static struct notifier_block iommu_bus_nb = {
+ .notifier_call = iommu_bus_notifier,
};
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
- bus_register_notifier(bus, &iommu_device_nb);
- bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
+ bus_register_notifier(bus, &iommu_bus_nb);
+ bus_for_each_dev(bus, NULL, ops, add_iommu_group);
}
/**
@@ -192,6 +667,45 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
}
EXPORT_SYMBOL_GPL(iommu_detach_device);
+/*
+ * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * the IOMMU API works on domains and devices. Bridge that gap by
+ * iterating over the devices in a group. Ideally we'd have a single
+ * device which represents the requestor ID of the group, but we also
+ * allow IOMMU drivers to create policy defined minimum sets, where
+ * the physical hardware may be able to distiguish members, but we
+ * wish to group them at a higher level (ex. untrusted multi-function
+ * PCI devices). Thus we attach each device.
+ */
+static int iommu_group_do_attach_device(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ return iommu_attach_device(domain, dev);
+}
+
+int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+ return iommu_group_for_each_dev(group, domain,
+ iommu_group_do_attach_device);
+}
+EXPORT_SYMBOL_GPL(iommu_attach_group);
+
+static int iommu_group_do_detach_device(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ iommu_detach_device(domain, dev);
+
+ return 0;
+}
+
+void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+ iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
+}
+EXPORT_SYMBOL_GPL(iommu_detach_group);
+
phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova)
{
@@ -336,11 +850,48 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
EXPORT_SYMBOL_GPL(iommu_unmap);
-int iommu_device_group(struct device *dev, unsigned int *groupid)
+static int __init iommu_init(void)
{
- if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
- return dev->bus->iommu_ops->device_group(dev, groupid);
+ iommu_group_kset = kset_create_and_add("iommu_groups",
+ NULL, kernel_kobj);
+ ida_init(&iommu_group_ida);
+ mutex_init(&iommu_group_mutex);
+
+ BUG_ON(!iommu_group_kset);
+
+ return 0;
+}
+subsys_initcall(iommu_init);
+
+int iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ struct iommu_domain_geometry *geometry;
+ int ret = 0;
+
+ switch (attr) {
+ case DOMAIN_ATTR_GEOMETRY:
+ geometry = data;
+ *geometry = domain->geometry;
+
+ break;
+ default:
+ if (!domain->ops->domain_get_attr)
+ return -EINVAL;
+
+ ret = domain->ops->domain_get_attr(domain, attr, data);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
+
+int iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ if (!domain->ops->domain_set_attr)
+ return -EINVAL;
- return -ENODEV;
+ return domain->ops->domain_set_attr(domain, attr, data);
}
-EXPORT_SYMBOL_GPL(iommu_device_group);
+EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index c5c274ab5c5a..67da6cff74e8 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -198,10 +198,10 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
/**
* alloc_iova - allocates an iova
- * @iovad - iova domain in question
- * @size - size of page frames to allocate
- * @limit_pfn - max limit address
- * @size_aligned - set if size_aligned address range is required
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * @size_aligned: - set if size_aligned address range is required
* This function allocates an iova in the range limit_pfn to IOVA_START_PFN
* looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned
* flag is set then the allocated address iova->pfn_lo will be naturally
@@ -238,8 +238,8 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
/**
* find_iova - find's an iova for a given pfn
- * @iovad - iova domain in question.
- * pfn - page frame number
+ * @iovad: - iova domain in question.
+ * @pfn: - page frame number
* This function finds and returns an iova belonging to the
* given doamin which matches the given pfn.
*/
@@ -260,7 +260,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
/* We are not holding the lock while this iova
* is referenced by the caller as the same thread
* which called this function also calls __free_iova()
- * and it is by desing that only one thread can possibly
+ * and it is by design that only one thread can possibly
* reference a particular iova and hence no conflict.
*/
return iova;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 40cda8e98d87..151690db692c 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -1,6 +1,11 @@
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/cpumask.h>
#include <linux/errno.h>
+#include <linux/msi.h>
+
+#include <asm/hw_irq.h>
+#include <asm/irq_remapping.h>
#include "irq_remapping.h"
@@ -111,16 +116,15 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-#ifdef CONFIG_SMP
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- if (!remap_ops || !remap_ops->set_affinity)
+ if (!config_enabled(CONFIG_SMP) || !remap_ops ||
+ !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
}
-#endif
void free_remapped_irq(int irq)
{
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index be9d72950c51..b12974cc1dfe 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -59,11 +59,9 @@ struct irq_remap_ops {
unsigned int, int,
struct io_apic_irq_attr *);
-#ifdef CONFIG_SMP
/* Set the CPU affinity of a remapped interrupt */
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
bool force);
-#endif
/* Free an IRQ */
int (*free_irq)(int);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index cee307e86606..6a8870a31668 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -226,6 +226,11 @@ static int msm_iommu_domain_init(struct iommu_domain *domain)
memset(priv->pgtable, 0, SZ_16K);
domain->priv = priv;
+
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = (1ULL << 32) - 1;
+ domain->geometry.force_aperture = true;
+
return 0;
fail_nomem:
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
new file mode 100644
index 000000000000..ee249bc959f8
--- /dev/null
+++ b/drivers/iommu/of_iommu.c
@@ -0,0 +1,90 @@
+/*
+ * OF helpers for IOMMU
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/export.h>
+#include <linux/limits.h>
+#include <linux/of.h>
+
+/**
+ * of_get_dma_window - Parse *dma-window property and returns 0 if found.
+ *
+ * @dn: device node
+ * @prefix: prefix for property name if any
+ * @index: index to start to parse
+ * @busno: Returns busno if supported. Otherwise pass NULL
+ * @addr: Returns address that DMA starts
+ * @size: Returns the range that DMA can handle
+ *
+ * This supports different formats flexibly. "prefix" can be
+ * configured if any. "busno" and "index" are optionally
+ * specified. Set 0(or NULL) if not used.
+ */
+int of_get_dma_window(struct device_node *dn, const char *prefix, int index,
+ unsigned long *busno, dma_addr_t *addr, size_t *size)
+{
+ const __be32 *dma_window, *end;
+ int bytes, cur_index = 0;
+ char propname[NAME_MAX], addrname[NAME_MAX], sizename[NAME_MAX];
+
+ if (!dn || !addr || !size)
+ return -EINVAL;
+
+ if (!prefix)
+ prefix = "";
+
+ snprintf(propname, sizeof(propname), "%sdma-window", prefix);
+ snprintf(addrname, sizeof(addrname), "%s#dma-address-cells", prefix);
+ snprintf(sizename, sizeof(sizename), "%s#dma-size-cells", prefix);
+
+ dma_window = of_get_property(dn, propname, &bytes);
+ if (!dma_window)
+ return -ENODEV;
+ end = dma_window + bytes / sizeof(*dma_window);
+
+ while (dma_window < end) {
+ u32 cells;
+ const void *prop;
+
+ /* busno is one cell if supported */
+ if (busno)
+ *busno = be32_to_cpup(dma_window++);
+
+ prop = of_get_property(dn, addrname, NULL);
+ if (!prop)
+ prop = of_get_property(dn, "#address-cells", NULL);
+
+ cells = prop ? be32_to_cpup(prop) : of_n_addr_cells(dn);
+ if (!cells)
+ return -EINVAL;
+ *addr = of_read_number(dma_window, cells);
+ dma_window += cells;
+
+ prop = of_get_property(dn, sizename, NULL);
+ cells = prop ? be32_to_cpup(prop) : of_n_size_cells(dn);
+ if (!cells)
+ return -EINVAL;
+ *size = of_read_number(dma_window, cells);
+ dma_window += cells;
+
+ if (cur_index++ == index)
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_get_dma_window);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index e70ee2b59df9..d0b1234581be 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1148,6 +1148,10 @@ static int omap_iommu_domain_init(struct iommu_domain *domain)
domain->priv = omap_domain;
+ domain->geometry.aperture_start = 0;
+ domain->geometry.aperture_end = (1ULL << 32) - 1;
+ domain->geometry.force_aperture = true;
+
return 0;
fail_nomem:
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 0c0a37792218..c16e8fc8a4bd 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -165,6 +165,11 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
return -EINVAL;
domain->priv = gart;
+ domain->geometry.aperture_start = gart->iovmm_base;
+ domain->geometry.aperture_end = gart->iovmm_base +
+ gart->page_count * GART_PAGE_SIZE - 1;
+ domain->geometry.force_aperture = true;
+
client = devm_kzalloc(gart->dev, sizeof(*c), GFP_KERNEL);
if (!client)
return -ENOMEM;
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index ecd679043d77..4ba325ab6262 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -30,12 +30,15 @@
#include <linux/sched.h>
#include <linux/iommu.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_iommu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <mach/iomap.h>
#include <mach/smmu.h>
+#include <mach/tegra-ahb.h>
/* bitmap of the page sizes currently supported */
#define SMMU_IOMMU_PGSIZES (SZ_4K)
@@ -111,12 +114,6 @@
#define SMMU_PDE_NEXT_SHIFT 28
-/* AHB Arbiter Registers */
-#define AHB_XBAR_CTRL 0xe0
-#define AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE 1
-#define AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT 17
-
-#define SMMU_NUM_ASIDS 4
#define SMMU_TLB_FLUSH_VA_SECTION__MASK 0xffc00000
#define SMMU_TLB_FLUSH_VA_SECTION__SHIFT 12 /* right shift */
#define SMMU_TLB_FLUSH_VA_GROUP__MASK 0xffffc000
@@ -136,6 +133,7 @@
#define SMMU_PAGE_SHIFT 12
#define SMMU_PAGE_SIZE (1 << SMMU_PAGE_SHIFT)
+#define SMMU_PAGE_MASK ((1 << SMMU_PAGE_SHIFT) - 1)
#define SMMU_PDIR_COUNT 1024
#define SMMU_PDIR_SIZE (sizeof(unsigned long) * SMMU_PDIR_COUNT)
@@ -177,6 +175,8 @@
#define SMMU_ASID_DISABLE 0
#define SMMU_ASID_ASID(n) ((n) & ~SMMU_ASID_ENABLE(0))
+#define NUM_SMMU_REG_BANKS 3
+
#define smmu_client_enable_hwgrp(c, m) smmu_client_set_hwgrp(c, m, 1)
#define smmu_client_disable_hwgrp(c) smmu_client_set_hwgrp(c, 0, 0)
#define __smmu_client_enable_hwgrp(c, m) __smmu_client_set_hwgrp(c, m, 1)
@@ -235,14 +235,12 @@ struct smmu_as {
* Per SMMU device - IOMMU device
*/
struct smmu_device {
- void __iomem *regs, *regs_ahbarb;
+ void __iomem *regs[NUM_SMMU_REG_BANKS];
unsigned long iovmm_base; /* remappable base address */
unsigned long page_count; /* total remappable size */
spinlock_t lock;
char *name;
struct device *dev;
- int num_as;
- struct smmu_as *as; /* Run-time allocated array */
struct page *avp_vector_page; /* dummy page shared by all AS's */
/*
@@ -252,29 +250,50 @@ struct smmu_device {
unsigned long translation_enable_1;
unsigned long translation_enable_2;
unsigned long asid_security;
+
+ struct device_node *ahb;
+
+ int num_as;
+ struct smmu_as as[0]; /* Run-time allocated array */
};
static struct smmu_device *smmu_handle; /* unique for a system */
/*
- * SMMU/AHB register accessors
+ * SMMU register accessors
*/
static inline u32 smmu_read(struct smmu_device *smmu, size_t offs)
{
- return readl(smmu->regs + offs);
-}
-static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
-{
- writel(val, smmu->regs + offs);
+ BUG_ON(offs < 0x10);
+ if (offs < 0x3c)
+ return readl(smmu->regs[0] + offs - 0x10);
+ BUG_ON(offs < 0x1f0);
+ if (offs < 0x200)
+ return readl(smmu->regs[1] + offs - 0x1f0);
+ BUG_ON(offs < 0x228);
+ if (offs < 0x284)
+ return readl(smmu->regs[2] + offs - 0x228);
+ BUG();
}
-static inline u32 ahb_read(struct smmu_device *smmu, size_t offs)
-{
- return readl(smmu->regs_ahbarb + offs);
-}
-static inline void ahb_write(struct smmu_device *smmu, u32 val, size_t offs)
+static inline void smmu_write(struct smmu_device *smmu, u32 val, size_t offs)
{
- writel(val, smmu->regs_ahbarb + offs);
+ BUG_ON(offs < 0x10);
+ if (offs < 0x3c) {
+ writel(val, smmu->regs[0] + offs - 0x10);
+ return;
+ }
+ BUG_ON(offs < 0x1f0);
+ if (offs < 0x200) {
+ writel(val, smmu->regs[1] + offs - 0x1f0);
+ return;
+ }
+ BUG_ON(offs < 0x228);
+ if (offs < 0x284) {
+ writel(val, smmu->regs[2] + offs - 0x228);
+ return;
+ }
+ BUG();
}
#define VA_PAGE_TO_PA(va, page) \
@@ -370,7 +389,7 @@ static void smmu_flush_regs(struct smmu_device *smmu, int enable)
FLUSH_SMMU_REGS(smmu);
}
-static void smmu_setup_regs(struct smmu_device *smmu)
+static int smmu_setup_regs(struct smmu_device *smmu)
{
int i;
u32 val;
@@ -398,10 +417,7 @@ static void smmu_setup_regs(struct smmu_device *smmu)
smmu_flush_regs(smmu, 1);
- val = ahb_read(smmu, AHB_XBAR_CTRL);
- val |= AHB_XBAR_CTRL_SMMU_INIT_DONE_DONE <<
- AHB_XBAR_CTRL_SMMU_INIT_DONE_SHIFT;
- ahb_write(smmu, val, AHB_XBAR_CTRL);
+ return tegra_ahb_enable_smmu(smmu->ahb);
}
static void flush_ptc_and_tlb(struct smmu_device *smmu,
@@ -537,33 +553,42 @@ static inline void put_signature(struct smmu_as *as,
#endif
/*
- * Caller must lock/unlock as
+ * Caller must not hold as->lock
*/
static int alloc_pdir(struct smmu_as *as)
{
- unsigned long *pdir;
- int pdn;
+ unsigned long *pdir, flags;
+ int pdn, err = 0;
u32 val;
struct smmu_device *smmu = as->smmu;
+ struct page *page;
+ unsigned int *cnt;
+
+ /*
+ * do the allocation, then grab as->lock
+ */
+ cnt = devm_kzalloc(smmu->dev,
+ sizeof(cnt[0]) * SMMU_PDIR_COUNT,
+ GFP_KERNEL);
+ page = alloc_page(GFP_KERNEL | __GFP_DMA);
- if (as->pdir_page)
- return 0;
+ spin_lock_irqsave(&as->lock, flags);
- as->pte_count = devm_kzalloc(smmu->dev,
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
- if (!as->pte_count) {
- dev_err(smmu->dev,
- "failed to allocate smmu_device PTE cunters\n");
- return -ENOMEM;
+ if (as->pdir_page) {
+ /* We raced, free the redundant */
+ err = -EAGAIN;
+ goto err_out;
}
- as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
- if (!as->pdir_page) {
- dev_err(smmu->dev,
- "failed to allocate smmu_device page directory\n");
- devm_kfree(smmu->dev, as->pte_count);
- as->pte_count = NULL;
- return -ENOMEM;
+
+ if (!page || !cnt) {
+ dev_err(smmu->dev, "failed to allocate at %s\n", __func__);
+ err = -ENOMEM;
+ goto err_out;
}
+
+ as->pdir_page = page;
+ as->pte_count = cnt;
+
SetPageReserved(as->pdir_page);
pdir = page_address(as->pdir_page);
@@ -579,7 +604,17 @@ static int alloc_pdir(struct smmu_as *as)
smmu_write(smmu, val, SMMU_TLB_FLUSH);
FLUSH_SMMU_REGS(as->smmu);
+ spin_unlock_irqrestore(&as->lock, flags);
+
return 0;
+
+err_out:
+ spin_unlock_irqrestore(&as->lock, flags);
+
+ devm_kfree(smmu->dev, cnt);
+ if (page)
+ __free_page(page);
+ return err;
}
static void __smmu_iommu_unmap(struct smmu_as *as, dma_addr_t iova)
@@ -771,30 +806,28 @@ out:
static int smmu_iommu_domain_init(struct iommu_domain *domain)
{
- int i;
+ int i, err = -ENODEV;
unsigned long flags;
struct smmu_as *as;
struct smmu_device *smmu = smmu_handle;
/* Look for a free AS with lock held */
for (i = 0; i < smmu->num_as; i++) {
- struct smmu_as *tmp = &smmu->as[i];
-
- spin_lock_irqsave(&tmp->lock, flags);
- if (!tmp->pdir_page) {
- as = tmp;
- goto found;
+ as = &smmu->as[i];
+ if (!as->pdir_page) {
+ err = alloc_pdir(as);
+ if (!err)
+ goto found;
}
- spin_unlock_irqrestore(&tmp->lock, flags);
+ if (err != -EAGAIN)
+ break;
}
- dev_err(smmu->dev, "no free AS\n");
- return -ENODEV;
+ if (i == smmu->num_as)
+ dev_err(smmu->dev, "no free AS\n");
+ return err;
found:
- if (alloc_pdir(as) < 0)
- goto err_alloc_pdir;
-
- spin_lock(&smmu->lock);
+ spin_lock_irqsave(&smmu->lock, flags);
/* Update PDIR register */
smmu_write(smmu, SMMU_PTB_ASID_CUR(as->asid), SMMU_PTB_ASID);
@@ -802,17 +835,18 @@ found:
SMMU_MK_PDIR(as->pdir_page, as->pdir_attr), SMMU_PTB_DATA);
FLUSH_SMMU_REGS(smmu);
- spin_unlock(&smmu->lock);
+ spin_unlock_irqrestore(&smmu->lock, flags);
- spin_unlock_irqrestore(&as->lock, flags);
domain->priv = as;
+ domain->geometry.aperture_start = smmu->iovmm_base;
+ domain->geometry.aperture_end = smmu->iovmm_base +
+ smmu->page_count * SMMU_PAGE_SIZE - 1;
+ domain->geometry.force_aperture = true;
+
dev_dbg(smmu->dev, "smmu_as@%p\n", as);
- return 0;
-err_alloc_pdir:
- spin_unlock_irqrestore(&as->lock, flags);
- return -ENODEV;
+ return 0;
}
static void smmu_iommu_domain_destroy(struct iommu_domain *domain)
@@ -873,65 +907,73 @@ static int tegra_smmu_resume(struct device *dev)
{
struct smmu_device *smmu = dev_get_drvdata(dev);
unsigned long flags;
+ int err;
spin_lock_irqsave(&smmu->lock, flags);
- smmu_setup_regs(smmu);
+ err = smmu_setup_regs(smmu);
spin_unlock_irqrestore(&smmu->lock, flags);
- return 0;
+ return err;
}
static int tegra_smmu_probe(struct platform_device *pdev)
{
struct smmu_device *smmu;
- struct resource *regs, *regs2, *window;
struct device *dev = &pdev->dev;
- int i, err = 0;
+ int i, asids, err = 0;
+ dma_addr_t uninitialized_var(base);
+ size_t bytes, uninitialized_var(size);
if (smmu_handle)
return -EIO;
BUILD_BUG_ON(PAGE_SHIFT != SMMU_PAGE_SHIFT);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- window = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!regs || !regs2 || !window) {
- dev_err(dev, "No SMMU resources\n");
+ if (of_property_read_u32(dev->of_node, "nvidia,#asids", &asids))
return -ENODEV;
- }
- smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+ bytes = sizeof(*smmu) + asids * sizeof(*smmu->as);
+ smmu = devm_kzalloc(dev, bytes, GFP_KERNEL);
if (!smmu) {
dev_err(dev, "failed to allocate smmu_device\n");
return -ENOMEM;
}
- smmu->dev = dev;
- smmu->num_as = SMMU_NUM_ASIDS;
- smmu->iovmm_base = (unsigned long)window->start;
- smmu->page_count = resource_size(window) >> SMMU_PAGE_SHIFT;
- smmu->regs = devm_ioremap(dev, regs->start, resource_size(regs));
- smmu->regs_ahbarb = devm_ioremap(dev, regs2->start,
- resource_size(regs2));
- if (!smmu->regs || !smmu->regs_ahbarb) {
- dev_err(dev, "failed to remap SMMU registers\n");
- err = -ENXIO;
- goto fail;
+ for (i = 0; i < ARRAY_SIZE(smmu->regs); i++) {
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+ if (!res)
+ return -ENODEV;
+ smmu->regs[i] = devm_request_and_ioremap(&pdev->dev, res);
+ if (!smmu->regs[i])
+ return -EBUSY;
}
+ err = of_get_dma_window(dev->of_node, NULL, 0, NULL, &base, &size);
+ if (err)
+ return -ENODEV;
+
+ if (size & SMMU_PAGE_MASK)
+ return -EINVAL;
+
+ size >>= SMMU_PAGE_SHIFT;
+ if (!size)
+ return -EINVAL;
+
+ smmu->ahb = of_parse_phandle(dev->of_node, "nvidia,ahb", 0);
+ if (!smmu->ahb)
+ return -ENODEV;
+
+ smmu->dev = dev;
+ smmu->num_as = asids;
+ smmu->iovmm_base = base;
+ smmu->page_count = size;
+
smmu->translation_enable_0 = ~0;
smmu->translation_enable_1 = ~0;
smmu->translation_enable_2 = ~0;
smmu->asid_security = 0;
- smmu->as = devm_kzalloc(dev,
- sizeof(smmu->as[0]) * smmu->num_as, GFP_KERNEL);
- if (!smmu->as) {
- dev_err(dev, "failed to allocate smmu_as\n");
- err = -ENOMEM;
- goto fail;
- }
-
for (i = 0; i < smmu->num_as; i++) {
struct smmu_as *as = &smmu->as[i];
@@ -945,57 +987,28 @@ static int tegra_smmu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&as->client);
}
spin_lock_init(&smmu->lock);
- smmu_setup_regs(smmu);
+ err = smmu_setup_regs(smmu);
+ if (err)
+ return err;
platform_set_drvdata(pdev, smmu);
smmu->avp_vector_page = alloc_page(GFP_KERNEL);
if (!smmu->avp_vector_page)
- goto fail;
+ return -ENOMEM;
smmu_handle = smmu;
return 0;
-
-fail:
- if (smmu->avp_vector_page)
- __free_page(smmu->avp_vector_page);
- if (smmu->regs)
- devm_iounmap(dev, smmu->regs);
- if (smmu->regs_ahbarb)
- devm_iounmap(dev, smmu->regs_ahbarb);
- if (smmu && smmu->as) {
- for (i = 0; i < smmu->num_as; i++) {
- if (smmu->as[i].pdir_page) {
- ClearPageReserved(smmu->as[i].pdir_page);
- __free_page(smmu->as[i].pdir_page);
- }
- }
- devm_kfree(dev, smmu->as);
- }
- devm_kfree(dev, smmu);
- return err;
}
static int tegra_smmu_remove(struct platform_device *pdev)
{
struct smmu_device *smmu = platform_get_drvdata(pdev);
- struct device *dev = smmu->dev;
+ int i;
smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
- platform_set_drvdata(pdev, NULL);
- if (smmu->as) {
- int i;
-
- for (i = 0; i < smmu->num_as; i++)
- free_pdir(&smmu->as[i]);
- devm_kfree(dev, smmu->as);
- }
- if (smmu->avp_vector_page)
- __free_page(smmu->avp_vector_page);
- if (smmu->regs)
- devm_iounmap(dev, smmu->regs);
- if (smmu->regs_ahbarb)
- devm_iounmap(dev, smmu->regs_ahbarb);
- devm_kfree(dev, smmu);
+ for (i = 0; i < smmu->num_as; i++)
+ free_pdir(&smmu->as[i]);
+ __free_page(smmu->avp_vector_page);
smmu_handle = NULL;
return 0;
}
@@ -1005,6 +1018,14 @@ const struct dev_pm_ops tegra_smmu_pm_ops = {
.resume = tegra_smmu_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_smmu_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra30-smmu", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_smmu_of_match);
+#endif
+
static struct platform_driver tegra_smmu_driver = {
.probe = tegra_smmu_probe,
.remove = tegra_smmu_remove,
@@ -1012,6 +1033,7 @@ static struct platform_driver tegra_smmu_driver = {
.owner = THIS_MODULE,
.name = "tegra-smmu",
.pm = &tegra_smmu_pm_ops,
+ .of_match_table = of_match_ptr(tegra_smmu_of_match),
},
};
@@ -1031,4 +1053,5 @@ module_exit(tegra_smmu_exit);
MODULE_DESCRIPTION("IOMMU API for SMMU in Tegra30");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-smmu");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 27e4a3e21d64..68452b768da2 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -288,6 +288,7 @@ static inline void dump_rawmsg(enum debuglevel level, const char *tag,
* format CAPI IE as string
*/
+#ifdef CONFIG_GIGASET_DEBUG
static const char *format_ie(const char *ie)
{
static char result[3 * MAX_FMT_IE_LEN];
@@ -313,6 +314,7 @@ static const char *format_ie(const char *ie)
*--pout = 0;
return result;
}
+#endif
/*
* emit DATA_B3_CONF message
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index c65c3440cd70..114f3bcba1b0 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2084,13 +2084,21 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* create the control pipes needed for register access */
hw->ctrl_in_pipe = usb_rcvctrlpipe(hw->dev, 0);
hw->ctrl_out_pipe = usb_sndctrlpipe(hw->dev, 0);
+
+ driver_info = (struct hfcsusb_vdata *)
+ hfcsusb_idtab[vend_idx].driver_info;
+
hw->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!hw->ctrl_urb) {
+ pr_warn("%s: No memory for control urb\n",
+ driver_info->vend_name);
+ kfree(hw);
+ return -ENOMEM;
+ }
- driver_info =
- (struct hfcsusb_vdata *)hfcsusb_idtab[vend_idx].driver_info;
- printk(KERN_DEBUG "%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
- hw->name, __func__, driver_info->vend_name,
- conf_str[small_match], ifnum, alt_used);
+ pr_info("%s: %s: detected \"%s\" (%s, if=%d alt=%d)\n",
+ hw->name, __func__, driver_info->vend_name,
+ conf_str[small_match], ifnum, alt_used);
if (setup_instance(hw, dev->dev.parent))
return -EIO;
diff --git a/drivers/isdn/hisax/hfc_usb.c b/drivers/isdn/hisax/hfc_usb.c
index 84f9c8103078..849a80752685 100644
--- a/drivers/isdn/hisax/hfc_usb.c
+++ b/drivers/isdn/hisax/hfc_usb.c
@@ -1483,13 +1483,21 @@ hfc_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
usb_rcvctrlpipe(context->dev, 0);
context->ctrl_out_pipe =
usb_sndctrlpipe(context->dev, 0);
+
+ driver_info = (hfcsusb_vdata *)
+ hfcusb_idtab[vend_idx].driver_info;
+
context->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
- driver_info =
- (hfcsusb_vdata *) hfcusb_idtab[vend_idx].
- driver_info;
- printk(KERN_INFO "HFC-S USB: detected \"%s\"\n",
- driver_info->vend_name);
+ if (!context->ctrl_urb) {
+ pr_warn("%s: No memory for control urb\n",
+ driver_info->vend_name);
+ kfree(context);
+ return -ENOMEM;
+ }
+
+ pr_info("HFC-S USB: detected \"%s\"\n",
+ driver_info->vend_name);
DBG(HFCUSB_DBG_INIT,
"HFC-S USB: Endpoint-Config: %s (if=%d alt=%d), E-Channel(%d)",
diff --git a/drivers/isdn/hisax/isurf.c b/drivers/isdn/hisax/isurf.c
index ea2717215296..c1530fe248c2 100644
--- a/drivers/isdn/hisax/isurf.c
+++ b/drivers/isdn/hisax/isurf.c
@@ -231,6 +231,11 @@ setup_isurf(struct IsdnCard *card)
}
pnp_disable_dev(pnp_d);
err = pnp_activate_dev(pnp_d);
+ if (err < 0) {
+ pr_warn("%s: pnp_activate_dev ret=%d\n",
+ __func__, err);
+ return 0;
+ }
cs->hw.isurf.reset = pnp_port_start(pnp_d, 0);
cs->hw.isurf.phymem = pnp_mem_start(pnp_d, 1);
cs->irq = pnp_irq(pnp_d, 0);
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 1a0ae4445ff2..5f21f629b7ae 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -135,8 +135,8 @@ send_layer2(struct mISDNstack *st, struct sk_buff *skb)
skb = NULL;
else if (*debug & DEBUG_SEND_ERR)
printk(KERN_DEBUG
- "%s ch%d mgr prim(%x) addr(%x) err %d\n",
- __func__, ch->nr, hh->prim, ch->addr, ret);
+ "%s mgr prim(%x) err %d\n",
+ __func__, hh->prim, ret);
}
out:
mutex_unlock(&st->lmutex);
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 41dc76db4311..a019fbb70880 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -21,6 +21,8 @@
#include <linux/reboot.h>
#include "leds.h"
+static int panic_heartbeats;
+
struct heartbeat_trig_data {
unsigned int phase;
unsigned int period;
@@ -34,6 +36,11 @@ static void led_heartbeat_function(unsigned long data)
unsigned long brightness = LED_OFF;
unsigned long delay = 0;
+ if (unlikely(panic_heartbeats)) {
+ led_set_brightness(led_cdev, LED_OFF);
+ return;
+ }
+
/* acts like an actual heart beat -- ie thump-thump-pause... */
switch (heartbeat_data->phase) {
case 0:
@@ -111,12 +118,19 @@ static int heartbeat_reboot_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static int heartbeat_panic_notifier(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ panic_heartbeats = 1;
+ return NOTIFY_DONE;
+}
+
static struct notifier_block heartbeat_reboot_nb = {
.notifier_call = heartbeat_reboot_notifier,
};
static struct notifier_block heartbeat_panic_nb = {
- .notifier_call = heartbeat_reboot_notifier,
+ .notifier_call = heartbeat_panic_notifier,
};
static int __init heartbeat_trig_init(void)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d039de8322f0..b58b7a33914a 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1084,6 +1084,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->split_io = dm_rh_get_region_size(ms->rh);
ti->num_flush_requests = 1;
ti->num_discard_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
ms->kmirrord_wq = alloc_workqueue("kmirrord",
WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
@@ -1214,7 +1215,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & REQ_FLUSH))
+ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
dm_rh_dec(ms->rh, map_context->ll);
return error;
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7771ed212182..69732e03eb34 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
return;
}
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 37fdaf81bd1f..68694da0d21d 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1245,7 +1245,10 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
cell_release_singleton(cell, bio);
cell_release_singleton(cell2, bio);
- remap_and_issue(tc, bio, lookup_result.block);
+ if ((!lookup_result.shared) && pool->pf.discard_passdown)
+ remap_and_issue(tc, bio, lookup_result.block);
+ else
+ bio_endio(bio, 0);
}
break;
@@ -2292,6 +2295,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR("%s: dm_pool_commit_metadata() failed, error = %d",
+ __func__, r);
+ return r;
+ }
+
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
DMWARN("reserve_metadata_snap message failed.");
@@ -2621,6 +2631,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = 1;
ti->num_discard_requests = 1;
+ ti->discard_zeroes_data_unsupported = 1;
}
dm_put(pool_md);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 1c2f9048e1ae..d5ab4493c8be 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2931,6 +2931,7 @@ offset_store(struct md_rdev *rdev, const char *buf, size_t len)
* can be sane */
return -EBUSY;
rdev->data_offset = offset;
+ rdev->new_data_offset = offset;
return len;
}
@@ -3926,8 +3927,8 @@ array_state_show(struct mddev *mddev, char *page)
return sprintf(page, "%s\n", array_states[st]);
}
-static int do_md_stop(struct mddev * mddev, int ro, int is_open);
-static int md_set_readonly(struct mddev * mddev, int is_open);
+static int do_md_stop(struct mddev * mddev, int ro, struct block_device *bdev);
+static int md_set_readonly(struct mddev * mddev, struct block_device *bdev);
static int do_md_run(struct mddev * mddev);
static int restart_array(struct mddev *mddev);
@@ -3943,14 +3944,14 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
/* stopping an active array */
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 0, 0);
+ err = do_md_stop(mddev, 0, NULL);
break;
case inactive:
/* stopping an active array */
if (mddev->pers) {
if (atomic_read(&mddev->openers) > 0)
return -EBUSY;
- err = do_md_stop(mddev, 2, 0);
+ err = do_md_stop(mddev, 2, NULL);
} else
err = 0; /* already inactive */
break;
@@ -3958,7 +3959,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
break; /* not supported yet */
case readonly:
if (mddev->pers)
- err = md_set_readonly(mddev, 0);
+ err = md_set_readonly(mddev, NULL);
else {
mddev->ro = 1;
set_disk_ro(mddev->gendisk, 1);
@@ -3968,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
case read_auto:
if (mddev->pers) {
if (mddev->ro == 0)
- err = md_set_readonly(mddev, 0);
+ err = md_set_readonly(mddev, NULL);
else if (mddev->ro == 1)
err = restart_array(mddev);
if (err == 0) {
@@ -5351,15 +5352,17 @@ void md_stop(struct mddev *mddev)
}
EXPORT_SYMBOL_GPL(md_stop);
-static int md_set_readonly(struct mddev *mddev, int is_open)
+static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
{
int err = 0;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open) {
+ if (atomic_read(&mddev->openers) > !!bdev) {
printk("md: %s still in use.\n",mdname(mddev));
err = -EBUSY;
goto out;
}
+ if (bdev)
+ sync_blockdev(bdev);
if (mddev->pers) {
__md_stop_writes(mddev);
@@ -5381,18 +5384,26 @@ out:
* 0 - completely stop and dis-assemble array
* 2 - stop but do not disassemble array
*/
-static int do_md_stop(struct mddev * mddev, int mode, int is_open)
+static int do_md_stop(struct mddev * mddev, int mode,
+ struct block_device *bdev)
{
struct gendisk *disk = mddev->gendisk;
struct md_rdev *rdev;
mutex_lock(&mddev->open_mutex);
- if (atomic_read(&mddev->openers) > is_open ||
+ if (atomic_read(&mddev->openers) > !!bdev ||
mddev->sysfs_active) {
printk("md: %s still in use.\n",mdname(mddev));
mutex_unlock(&mddev->open_mutex);
return -EBUSY;
}
+ if (bdev)
+ /* It is possible IO was issued on some other
+ * open file which was closed before we took ->open_mutex.
+ * As that was not the last close __blkdev_put will not
+ * have called sync_blockdev, so we must.
+ */
+ sync_blockdev(bdev);
if (mddev->pers) {
if (mddev->ro)
@@ -5466,7 +5477,7 @@ static void autorun_array(struct mddev *mddev)
err = do_md_run(mddev);
if (err) {
printk(KERN_WARNING "md: do_md_run() returned %d\n", err);
- do_md_stop(mddev, 0, 0);
+ do_md_stop(mddev, 0, NULL);
}
}
@@ -5784,8 +5795,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info)
super_types[mddev->major_version].
validate_super(mddev, rdev);
if ((info->state & (1<<MD_DISK_SYNC)) &&
- (!test_bit(In_sync, &rdev->flags) ||
- rdev->raid_disk != info->raid_disk)) {
+ rdev->raid_disk != info->raid_disk) {
/* This was a hot-add request, but events doesn't
* match, so reject it.
*/
@@ -6482,11 +6492,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
goto done_unlock;
case STOP_ARRAY:
- err = do_md_stop(mddev, 0, 1);
+ err = do_md_stop(mddev, 0, bdev);
goto done_unlock;
case STOP_ARRAY_RO:
- err = md_set_readonly(mddev, 1);
+ err = md_set_readonly(mddev, bdev);
goto done_unlock;
case BLKROSET:
@@ -6751,7 +6761,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev
thread->tsk = kthread_run(md_thread, thread,
"%s_%s",
mdname(thread->mddev),
- name ?: mddev->pers->name);
+ name);
if (IS_ERR(thread->tsk)) {
kfree(thread);
return NULL;
@@ -7298,6 +7308,7 @@ void md_do_sync(struct mddev *mddev)
int skipped = 0;
struct md_rdev *rdev;
char *desc;
+ struct blk_plug plug;
/* just incase thread restarts... */
if (test_bit(MD_RECOVERY_DONE, &mddev->recovery))
@@ -7447,6 +7458,7 @@ void md_do_sync(struct mddev *mddev)
}
mddev->curr_resync_completed = j;
+ blk_start_plug(&plug);
while (j < max_sectors) {
sector_t sectors;
@@ -7552,6 +7564,7 @@ void md_do_sync(struct mddev *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
+ blk_finish_plug(&plug);
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 9339e67fcc79..61a1833ebaf3 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev)
}
{
- mddev->thread = md_register_thread(multipathd, mddev, NULL);
+ mddev->thread = md_register_thread(multipathd, mddev,
+ "multipath");
if (!mddev->thread) {
printk(KERN_ERR "multipath: couldn't allocate thread"
" for %s\n", mdname(mddev));
diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c
index 50ed53bf4aa2..fc90c11620ad 100644
--- a/drivers/md/persistent-data/dm-space-map-checker.c
+++ b/drivers/md/persistent-data/dm-space-map-checker.c
@@ -8,6 +8,7 @@
#include <linux/device-mapper.h>
#include <linux/export.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_DM_DEBUG_SPACE_MAPS
@@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm)
ca->nr = nr_blocks;
ca->nr_free = nr_blocks;
- ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL);
- if (!ca->counts)
- return -ENOMEM;
+
+ if (!nr_blocks)
+ ca->counts = NULL;
+ else {
+ ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks);
+ if (!ca->counts)
+ return -ENOMEM;
+ }
return 0;
}
+static void ca_destroy(struct count_array *ca)
+{
+ vfree(ca->counts);
+}
+
static int ca_load(struct count_array *ca, struct dm_space_map *sm)
{
int r;
@@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm)
static int ca_extend(struct count_array *ca, dm_block_t extra_blocks)
{
dm_block_t nr_blocks = ca->nr + extra_blocks;
- uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL);
+ uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks);
if (!counts)
return -ENOMEM;
- memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
- kfree(ca->counts);
+ if (ca->counts) {
+ memcpy(counts, ca->counts, sizeof(*counts) * ca->nr);
+ ca_destroy(ca);
+ }
ca->nr = nr_blocks;
ca->nr_free += extra_blocks;
ca->counts = counts;
@@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new)
return 0;
}
-static void ca_destroy(struct count_array *ca)
-{
- kfree(ca->counts);
-}
-
/*----------------------------------------------------------------*/
struct sm_checker {
@@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
@@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_commit(&smc->old_counts, &smc->counts);
@@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm)
ca_destroy(&smc->counts);
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
return &smc->sm;
@@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm)
int r;
struct sm_checker *smc;
- if (!sm)
- return NULL;
+ if (IS_ERR_OR_NULL(sm))
+ return ERR_PTR(-EINVAL);
smc = kmalloc(sizeof(*smc), GFP_KERNEL);
if (!smc)
- return NULL;
+ return ERR_PTR(-ENOMEM);
memcpy(&smc->sm, &ops_, sizeof(smc->sm));
r = ca_create(&smc->old_counts, sm);
if (r) {
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
r = ca_create(&smc->counts, sm);
if (r) {
ca_destroy(&smc->old_counts);
kfree(smc);
- return NULL;
+ return ERR_PTR(r);
}
smc->real_sm = sm;
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index fc469ba9f627..3d0ed5332883 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm,
dm_block_t nr_blocks)
{
struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks);
- return dm_sm_checker_create_fresh(sm);
+ struct dm_space_map *smc;
+
+ if (IS_ERR_OR_NULL(sm))
+ return sm;
+
+ smc = dm_sm_checker_create_fresh(sm);
+ if (IS_ERR(smc))
+ dm_sm_destroy(sm);
+
+ return smc;
}
EXPORT_SYMBOL_GPL(dm_sm_disk_create);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 400fe144c0cd..e5604b32d91f 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
void dm_tm_destroy(struct dm_transaction_manager *tm)
{
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
kfree(tm);
}
EXPORT_SYMBOL_GPL(dm_tm_destroy);
@@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
} else {
r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location,
@@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm,
}
*sm = dm_sm_checker_create(inner);
- if (!*sm)
+ if (IS_ERR(*sm)) {
+ r = PTR_ERR(*sm);
goto bad2;
+ }
}
return 0;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a9c7981ddd24..cacd008d6864 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
int bad_sectors;
int disk = start_disk + i;
- if (disk >= conf->raid_disks)
- disk -= conf->raid_disks;
+ if (disk >= conf->raid_disks * 2)
+ disk -= conf->raid_disks * 2;
rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (r1_bio->bios[disk] == IO_BLOCKED
@@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
struct md_rdev *blocked_rdev;
- int plugged;
int first_clone;
int sectors_handled;
int max_sectors;
@@ -1034,7 +1033,6 @@ read_again:
* the bad blocks. Each set of writes gets it's own r1bio
* with a set of bios attached.
*/
- plugged = mddev_check_plugged(mddev);
disks = conf->raid_disks * 2;
retry_write:
@@ -1191,6 +1189,8 @@ read_again:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed.
@@ -1213,9 +1213,6 @@ read_again:
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -1821,8 +1818,14 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
if (atomic_dec_and_test(&r1_bio->remaining)) {
/* if we're here, all write(s) have completed, so clean up */
- md_done_sync(mddev, r1_bio->sectors, 1);
- put_buf(r1_bio);
+ int s = r1_bio->sectors;
+ if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
+ test_bit(R1BIO_WriteError, &r1_bio->state))
+ reschedule_retry(r1_bio);
+ else {
+ put_buf(r1_bio);
+ md_done_sync(mddev, s, 1);
+ }
}
}
@@ -2488,9 +2491,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i = 0; i < conf->raid_disks * 2; i++) {
+ for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
+ read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors);
generic_make_request(bio);
}
@@ -2621,7 +2625,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
goto abort;
}
err = -ENOMEM;
- conf->thread = md_register_thread(raid1d, mddev, NULL);
+ conf->thread = md_register_thread(raid1d, mddev, "raid1");
if (!conf->thread) {
printk(KERN_ERR
"md/raid1:%s: couldn't allocate thread\n",
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 99ae6068e456..8da6282254c3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio)
const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
- int plugged;
int sectors_handled;
int max_sectors;
int sectors;
@@ -1239,7 +1238,6 @@ read_again:
* of r10_bios is recored in bio->bi_phys_segments just as with
* the read case.
*/
- plugged = mddev_check_plugged(mddev);
r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
@@ -1396,6 +1394,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio)
continue;
@@ -1423,6 +1423,8 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+ if (!mddev_check_plugged(mddev))
+ md_wakeup_thread(mddev->thread);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1448,9 +1450,6 @@ retry_write:
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
-
- if (do_sync || !mddev->bitmap || !plugged)
- md_wakeup_thread(mddev->thread);
}
static void status(struct seq_file *seq, struct mddev *mddev)
@@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
if (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage, WRITE)
+ s, conf->tmppage, WRITE)
== 0) {
/* Well, this device is dead */
printk(KERN_NOTICE
@@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
switch (r10_sync_page_io(rdev,
r10_bio->devs[sl].addr +
sect,
- s<<9, conf->tmppage,
+ s, conf->tmppage,
READ)) {
case 0:
/* Well, this device is dead */
@@ -2512,7 +2511,7 @@ read_more:
slot = r10_bio->read_slot;
printk_ratelimited(
KERN_ERR
- "md/raid10:%s: %s: redirecting"
+ "md/raid10:%s: %s: redirecting "
"sector %llu to another mirror\n",
mdname(mddev),
bdevname(rdev->bdev, b),
@@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev)
blk_start_plug(&plug);
for (;;) {
- flush_pending_writes(conf);
+ if (atomic_read(&mddev->plug_cnt) == 0)
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
+ if (sect >= mddev->resync_max_sectors) {
+ /* last stripe is not complete - don't
+ * try to recover this sector.
+ */
+ continue;
+ }
/* Unless we are doing a full sync, or a replacement
* we only need to recover the block if it is set in
* the bitmap
@@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
spin_lock_init(&conf->resync_lock);
init_waitqueue_head(&conf->wait_barrier);
- conf->thread = md_register_thread(raid10d, mddev, NULL);
+ conf->thread = md_register_thread(raid10d, mddev, "raid10");
if (!conf->thread)
goto out;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d26767246d26..04348d76bb30 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh)
BUG_ON(!list_empty(&sh->lru));
BUG_ON(atomic_read(&conf->active_stripes)==0);
if (test_bit(STRIPE_HANDLE, &sh->state)) {
- if (test_bit(STRIPE_DELAYED, &sh->state))
+ if (test_bit(STRIPE_DELAYED, &sh->state) &&
+ !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
list_add_tail(&sh->lru, &conf->delayed_list);
else if (test_bit(STRIPE_BIT_DELAY, &sh->state) &&
sh->bm_seq - conf->seq_write > 0)
list_add_tail(&sh->lru, &conf->bitmap_list);
else {
+ clear_bit(STRIPE_DELAYED, &sh->state);
clear_bit(STRIPE_BIT_DELAY, &sh->state);
list_add_tail(&sh->lru, &conf->handle_list);
}
@@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
* a chance*/
md_check_recovery(conf->mddev);
}
+ /*
+ * Because md_wait_for_blocked_rdev
+ * will dec nr_pending, we must
+ * increment it first.
+ */
+ atomic_inc(&rdev->nr_pending);
md_wait_for_blocked_rdev(rdev, conf->mddev);
} else {
/* Acknowledged bad block - skip the write */
@@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
} else {
const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
+ int set_bad = 0;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
@@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (conf->mddev->degraded >= conf->max_degraded)
+ else if (conf->mddev->degraded >= conf->max_degraded) {
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (test_bit(R5_ReWrite, &sh->dev[i].flags))
+ } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) {
/* Oh, no!!! */
+ set_bad = 1;
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error NOT corrected!! "
@@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
mdname(conf->mddev),
(unsigned long long)s,
bdn);
- else if (atomic_read(&rdev->read_errors)
+ } else if (atomic_read(&rdev->read_errors)
> conf->max_nr_stripes)
printk(KERN_WARNING
"md/raid:%s: Too many read errors, failing device %s.\n",
@@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error)
else {
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
- md_error(conf->mddev, rdev);
+ if (!(set_bad
+ && test_bit(In_sync, &rdev->flags)
+ && rdev_set_badblocks(
+ rdev, sh->sector, STRIPE_SECTORS, 0)))
+ md_error(conf->mddev, rdev);
}
}
rdev_dec_pending(rdev, conf->mddev);
@@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh)
finish:
/* wait for this device to become unblocked */
- if (conf->mddev->external && unlikely(s.blocked_rdev))
- md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev);
+ if (unlikely(s.blocked_rdev)) {
+ if (conf->mddev->external)
+ md_wait_for_blocked_rdev(s.blocked_rdev,
+ conf->mddev);
+ else
+ /* Internal metadata will immediately
+ * be written by raid5d, so we don't
+ * need to wait here.
+ */
+ rdev_dec_pending(s.blocked_rdev,
+ conf->mddev);
+ }
if (s.handle_bad_blocks)
for (i = disks; i--; ) {
@@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
- /* No reshape active, so we can trust rdev->data_offset */
- align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9,
@@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
return 0;
}
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bi->bi_sector += rdev->data_offset;
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,
@@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
struct stripe_head *sh;
const int rw = bio_data_dir(bi);
int remaining;
- int plugged;
if (unlikely(bi->bi_rw & REQ_FLUSH)) {
md_flush_request(mddev, bi);
@@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
bi->bi_next = NULL;
bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
- plugged = mddev_check_plugged(mddev);
for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
DEFINE_WAIT(w);
int previous;
@@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ((bi->bi_rw & REQ_SYNC) &&
!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
atomic_inc(&conf->preread_active_stripes);
+ mddev_check_plugged(mddev);
release_stripe(sh);
} else {
/* cannot get stripe for read-ahead, just give-up */
@@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
finish_wait(&conf->wait_for_overlap, &w);
break;
}
-
}
- if (!plugged)
- md_wakeup_thread(mddev->thread);
spin_lock_irq(&conf->device_lock);
remaining = raid5_dec_bi_phys_segments(bi);
@@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
int raid_disk, memory, max_disks;
struct md_rdev *rdev;
struct disk_info *disk;
+ char pers_name[6];
if (mddev->new_level != 5
&& mddev->new_level != 4
@@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev)
printk(KERN_INFO "md/raid:%s: allocated %dkB\n",
mdname(mddev), memory);
- conf->thread = md_register_thread(raid5d, mddev, NULL);
+ sprintf(pers_name, "raid%d", mddev->new_level);
+ conf->thread = md_register_thread(raid5d, mddev, pers_name);
if (!conf->thread) {
printk(KERN_ERR
"md/raid:%s: couldn't allocate thread.\n",
@@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
conf->disks[rdev->saved_raid_disk].rdev == NULL)
- disk = rdev->saved_raid_disk;
- else
- disk = first;
- for ( ; disk <= last ; disk++) {
+ first = rdev->saved_raid_disk;
+
+ for (disk = first; disk <= last; disk++) {
p = conf->disks + disk;
if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->saved_raid_disk != disk)
conf->fullsync = 1;
rcu_assign_pointer(p->rdev, rdev);
- break;
+ goto out;
}
+ }
+ for (disk = first; disk <= last; disk++) {
+ p = conf->disks + disk;
if (test_bit(WantReplacement, &p->rdev->flags) &&
p->replacement == NULL) {
clear_bit(In_sync, &rdev->flags);
@@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
}
+out:
print_raid5_conf(conf);
return err;
}
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index 00a67326c193..39eab73b01ae 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -243,6 +243,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
if (minor == MAX_DVB_MINORS) {
kfree(dvbdevfops);
kfree(dvbdev);
+ up_write(&minor_rwsem);
mutex_unlock(&dvbdev_register_lock);
return -EINVAL;
}
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 342c2c8c1ddf..54ee34872d14 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -232,7 +232,7 @@ MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
-MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
+MODULE_PARM_DESC(txandrx, "Allow simultaneous TX and RX");
static unsigned int wake_sc = 0x800F040C;
module_param(wake_sc, uint, 0644);
@@ -1032,6 +1032,8 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
data->dev->tx_ir = wbcir_tx;
data->dev->priv = data;
data->dev->dev.parent = &device->dev;
+ data->dev->timeout = MS_TO_NS(100);
+ data->dev->allowed_protos = RC_TYPE_ALL;
if (!request_region(data->wbase, WAKEUP_IOMEM_LEN, DRVNAME)) {
dev_err(dev, "Region 0x%lx-0x%lx already in use!\n",
diff --git a/drivers/media/video/cx231xx/cx231xx-audio.c b/drivers/media/video/cx231xx/cx231xx-audio.c
index 068f78dc5d13..b4c99c7270cf 100644
--- a/drivers/media/video/cx231xx/cx231xx-audio.c
+++ b/drivers/media/video/cx231xx/cx231xx-audio.c
@@ -307,7 +307,7 @@ static int cx231xx_init_audio_isoc(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvisocpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->interval = 1;
urb->complete = cx231xx_audio_isocirq;
@@ -368,7 +368,7 @@ static int cx231xx_init_audio_bulk(struct cx231xx *dev)
urb->context = dev;
urb->pipe = usb_rcvbulkpipe(dev->udev,
dev->adev.end_point_addr);
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
urb->transfer_buffer = dev->adev.transfer_buffer[i];
urb->complete = cx231xx_audio_bulkirq;
urb->transfer_buffer_length = sb_size;
diff --git a/drivers/media/video/cx231xx/cx231xx-vbi.c b/drivers/media/video/cx231xx/cx231xx-vbi.c
index 3d15314e1f88..ac7db52f404f 100644
--- a/drivers/media/video/cx231xx/cx231xx-vbi.c
+++ b/drivers/media/video/cx231xx/cx231xx-vbi.c
@@ -448,7 +448,7 @@ int cx231xx_init_vbi_isoc(struct cx231xx *dev, int max_packets,
return -ENOMEM;
}
dev->vbi_mode.bulk_ctl.urb[i] = urb;
- urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags = 0;
dev->vbi_mode.bulk_ctl.transfer_buffer[i] =
kzalloc(sb_size, GFP_KERNEL);
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 13739e002a63..080e11157e5f 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -127,22 +127,37 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1250] = {
.name = "Hauppauge WinTV-HVR1250",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .tuner_bus = 1,
+#endif
+ .force_bff = 1,
.input = {{
+#ifdef MT2131_NO_ANALOG_SUPPORT_YET
.type = CX23885_VMUX_TELEVISION,
- .vmux = 0,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1,
+ .amux = CX25840_AUDIO8,
.gpio0 = 0xff00,
}, {
- .type = CX23885_VMUX_DEBUG,
- .vmux = 0,
- .gpio0 = 0xff01,
- }, {
+#endif
.type = CX23885_VMUX_COMPOSITE1,
- .vmux = 1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
}, {
.type = CX23885_VMUX_SVIDEO,
- .vmux = 2,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
.gpio0 = 0xff02,
} },
},
@@ -267,7 +282,55 @@ struct cx23885_board cx23885_boards[] = {
},
[CX23885_BOARD_HAUPPAUGE_HVR1255] = {
.name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
+ .portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_COMPOSITE1,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN6_CH1,
+ .amux = CX25840_AUDIO7,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
+ },
+ [CX23885_BOARD_HAUPPAUGE_HVR1255_22111] = {
+ .name = "Hauppauge WinTV-HVR1255",
+ .porta = CX23885_ANALOG_VIDEO,
.portc = CX23885_MPEG_DVB,
+ .tuner_type = TUNER_ABSENT,
+ .tuner_addr = 0x42, /* 0x84 >> 1 */
+ .force_bff = 1,
+ .input = {{
+ .type = CX23885_VMUX_TELEVISION,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN5_CH2 |
+ CX25840_VIN2_CH1 |
+ CX25840_DIF_ON,
+ .amux = CX25840_AUDIO8,
+ }, {
+ .type = CX23885_VMUX_SVIDEO,
+ .vmux = CX25840_VIN7_CH3 |
+ CX25840_VIN4_CH2 |
+ CX25840_VIN8_CH1 |
+ CX25840_SVIDEO_ON,
+ .amux = CX25840_AUDIO7,
+ } },
},
[CX23885_BOARD_HAUPPAUGE_HVR1210] = {
.name = "Hauppauge WinTV-HVR1210",
@@ -624,7 +687,7 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0x2259,
- .card = CX23885_BOARD_HAUPPAUGE_HVR1255,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR1255_22111,
}, {
.subvendor = 0x0070,
.subdevice = 0x2291,
@@ -900,7 +963,7 @@ int cx23885_tuner_callback(void *priv, int component, int command, int arg)
struct cx23885_dev *dev = port->dev;
u32 bitmask = 0;
- if (command == XC2028_RESET_CLK)
+ if ((command == XC2028_RESET_CLK) || (command == XC2028_I2C_FLUSH))
return 0;
if (command != 0) {
@@ -1130,6 +1193,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* GPIO-5 RF Control: 0 = RF1 Terrestrial, 1 = RF2 Cable */
/* GPIO-6 I2C Gate which can isolate the demod from the bus */
@@ -1267,6 +1331,7 @@ int cx23885_ir_init(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1400:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
/* FIXME: Implement me */
break;
@@ -1424,6 +1489,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1511,6 +1577,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1275:
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1210:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
@@ -1526,10 +1593,10 @@ void cx23885_card_setup(struct cx23885_dev *dev)
*/
switch (dev->board) {
case CX23885_BOARD_TEVII_S470:
- case CX23885_BOARD_HAUPPAUGE_HVR1250:
/* Currently only enabled for the integrated IR controller */
if (!enable_885_ir)
break;
+ case CX23885_BOARD_HAUPPAUGE_HVR1250:
case CX23885_BOARD_HAUPPAUGE_HVR1800:
case CX23885_BOARD_HAUPPAUGE_HVR1800lp:
case CX23885_BOARD_HAUPPAUGE_HVR1700:
@@ -1539,6 +1606,8 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
case CX23885_BOARD_COMPRO_VIDEOMATE_E800:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1270:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_MYGICA_X8506:
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index a80a92c47455..cd542684ba02 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -712,6 +712,7 @@ static int dvb_register(struct cx23885_tsport *port)
}
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
i2c_bus = &dev->i2c_bus[0];
fe0->dvb.frontend = dvb_attach(s5h1411_attach,
&hcw_s5h1411_config,
@@ -721,6 +722,11 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_tda18271_config);
}
+
+ tda18271_attach(&dev->ts1.analog_fe,
+ 0x60, &dev->i2c_bus[1].i2c_adap,
+ &hauppauge_tda18271_config);
+
break;
case CX23885_BOARD_HAUPPAUGE_HVR1800:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index c654bdc7ccb2..22f8e7fbd665 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -505,6 +505,9 @@ static int cx23885_video_mux(struct cx23885_dev *dev, unsigned int input)
if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1800) ||
(dev->board == CX23885_BOARD_MPX885) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1250) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111) ||
(dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)) {
/* Configure audio routing */
v4l2_subdev_call(dev->sd_cx25840, audio, s_routing,
@@ -1578,7 +1581,9 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
fe = vfe->dvb.frontend;
- if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850)
+ if ((dev->board == CX23885_BOARD_HAUPPAUGE_HVR1850) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255) ||
+ (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1255_22111))
fe = &dev->ts1.analog_fe;
if (fe && fe->ops.tuner_ops.set_analog_params) {
@@ -1608,6 +1613,8 @@ int cx23885_set_frequency(struct file *file, void *priv,
int ret;
switch (dev->board) {
+ case CX23885_BOARD_HAUPPAUGE_HVR1255:
+ case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
case CX23885_BOARD_HAUPPAUGE_HVR1850:
ret = cx23885_set_freq_via_ops(dev, f);
break;
diff --git a/drivers/media/video/cx23885/cx23885.h b/drivers/media/video/cx23885/cx23885.h
index d884784a1c85..13c37ec07ae7 100644
--- a/drivers/media/video/cx23885/cx23885.h
+++ b/drivers/media/video/cx23885/cx23885.h
@@ -90,6 +90,7 @@
#define CX23885_BOARD_MYGICA_X8507 33
#define CX23885_BOARD_TERRATEC_CINERGY_T_PCIE_DUAL 34
#define CX23885_BOARD_TEVII_S471 35
+#define CX23885_BOARD_HAUPPAUGE_HVR1255_22111 36
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/video/cx25821/cx25821-core.c b/drivers/media/video/cx25821/cx25821-core.c
index 83c1aa6b2e6c..f11f6f07e915 100644
--- a/drivers/media/video/cx25821/cx25821-core.c
+++ b/drivers/media/video/cx25821/cx25821-core.c
@@ -904,9 +904,6 @@ static int cx25821_dev_setup(struct cx25821_dev *dev)
list_add_tail(&dev->devlist, &cx25821_devlist);
mutex_unlock(&cx25821_devlist_mutex);
- strcpy(cx25821_boards[UNKNOWN_BOARD].name, "unknown");
- strcpy(cx25821_boards[CX25821_BOARD].name, "cx25821");
-
if (dev->pci->device != 0x8210) {
pr_info("%s(): Exiting. Incorrect Hardware device = 0x%02x\n",
__func__, dev->pci->device);
diff --git a/drivers/media/video/cx25821/cx25821.h b/drivers/media/video/cx25821/cx25821.h
index b9aa801b00a7..029f2934a6d8 100644
--- a/drivers/media/video/cx25821/cx25821.h
+++ b/drivers/media/video/cx25821/cx25821.h
@@ -187,7 +187,7 @@ enum port {
};
struct cx25821_board {
- char *name;
+ const char *name;
enum port porta;
enum port portb;
enum port portc;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index fc1ff69cffd0..d8eac3e30a7e 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -84,7 +84,7 @@ MODULE_PARM_DESC(debug, "Debugging messages [0=Off (default) 1=On]");
/* ----------------------------------------------------------------------- */
-static void cx23885_std_setup(struct i2c_client *client);
+static void cx23888_std_setup(struct i2c_client *client);
int cx25840_write(struct i2c_client *client, u16 addr, u8 value)
{
@@ -638,10 +638,13 @@ static void cx23885_initialize(struct i2c_client *client)
finish_wait(&state->fw_wait, &wait);
destroy_workqueue(q);
- /* Call the cx23885 specific std setup func, we no longer rely on
+ /* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
*/
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
+ else
+ cx25840_std_setup(client);
/* (re)set input */
set_input(client, state->vid_input, state->aud_input);
@@ -1103,9 +1106,23 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x410, 0xffff0dbf);
cx25840_write4(client, 0x414, 0x00137d03);
- cx25840_write4(client, 0x418, 0x01008080);
+
+ /* on the 887, 0x418 is HSCALE_CTRL, on the 888 it is
+ CHROMA_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x418, 0x01008080);
+ else
+ cx25840_write4(client, 0x418, 0x01000000);
+
cx25840_write4(client, 0x41c, 0x00000000);
- cx25840_write4(client, 0x420, 0x001c3e0f);
+
+ /* on the 887, 0x420 is CHROMA_CTRL, on the 888 it is
+ CRUSH_CTRL */
+ if (is_cx23888(state))
+ cx25840_write4(client, 0x420, 0x001c3e0f);
+ else
+ cx25840_write4(client, 0x420, 0x001c8282);
+
cx25840_write4(client, 0x42c, 0x42600000);
cx25840_write4(client, 0x430, 0x0000039b);
cx25840_write4(client, 0x438, 0x00000000);
@@ -1233,7 +1250,7 @@ static int set_input(struct i2c_client *client, enum cx25840_video_input vid_inp
cx25840_write4(client, 0x8d0, 0x1f063870);
}
- if (is_cx2388x(state)) {
+ if (is_cx23888(state)) {
/* HVR1850 */
/* AUD_IO_CTRL - I2S Input, Parallel1*/
/* - Channel 1 src - Parallel1 (Merlin out) */
@@ -1298,8 +1315,8 @@ static int set_v4lstd(struct i2c_client *client)
}
cx25840_and_or(client, 0x400, ~0xf, fmt);
cx25840_and_or(client, 0x403, ~0x3, pal_m);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
else
cx25840_std_setup(client);
if (!is_cx2583x(state))
@@ -1312,6 +1329,7 @@ static int set_v4lstd(struct i2c_client *client)
static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
+ struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
switch (ctrl->id) {
@@ -1324,12 +1342,20 @@ static int cx25840_s_ctrl(struct v4l2_ctrl *ctrl)
break;
case V4L2_CID_SATURATION:
- cx25840_write(client, 0x420, ctrl->val << 1);
- cx25840_write(client, 0x421, ctrl->val << 1);
+ if (is_cx23888(state)) {
+ cx25840_write(client, 0x418, ctrl->val << 1);
+ cx25840_write(client, 0x419, ctrl->val << 1);
+ } else {
+ cx25840_write(client, 0x420, ctrl->val << 1);
+ cx25840_write(client, 0x421, ctrl->val << 1);
+ }
break;
case V4L2_CID_HUE:
- cx25840_write(client, 0x422, ctrl->val);
+ if (is_cx23888(state))
+ cx25840_write(client, 0x41a, ctrl->val);
+ else
+ cx25840_write(client, 0x422, ctrl->val);
break;
default:
@@ -1354,11 +1380,21 @@ static int cx25840_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt
fmt->field = V4L2_FIELD_INTERLACED;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
- Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Vsrc = (cx25840_read(client, 0x42a) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x429) & 0xf0) >> 4;
+ } else {
+ Vsrc = (cx25840_read(client, 0x476) & 0x3f) << 4;
+ Vsrc |= (cx25840_read(client, 0x475) & 0xf0) >> 4;
+ }
- Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
- Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ if (is_cx23888(state)) {
+ Hsrc = (cx25840_read(client, 0x426) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x425) & 0xf0) >> 4;
+ } else {
+ Hsrc = (cx25840_read(client, 0x472) & 0x3f) << 4;
+ Hsrc |= (cx25840_read(client, 0x471) & 0xf0) >> 4;
+ }
Vlines = fmt->height + (is_50Hz ? 4 : 7);
@@ -1782,8 +1818,8 @@ static int cx25840_s_video_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, input, state->aud_input);
}
@@ -1794,8 +1830,8 @@ static int cx25840_s_audio_routing(struct v4l2_subdev *sd,
struct cx25840_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (is_cx2388x(state))
- cx23885_std_setup(client);
+ if (is_cx23888(state))
+ cx23888_std_setup(client);
return set_input(client, state->vid_input, input);
}
@@ -4939,7 +4975,7 @@ void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
}
}
-static void cx23885_std_setup(struct i2c_client *client)
+static void cx23888_std_setup(struct i2c_client *client)
{
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
v4l2_std_id std = state->std;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 92da7c28b6f0..862c6575c557 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2893,7 +2893,7 @@ static void request_module_async(struct work_struct *work)
if (dev->board.has_dvb)
request_module("em28xx-dvb");
- if (dev->board.has_ir_i2c && !disable_ir)
+ if (dev->board.ir_codes && !disable_ir)
request_module("em28xx-rc");
}
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index 6c31e46a1fd2..b9c6f17eabb2 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2070,10 +2070,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
set_gamma(gspca_dev, v4l2_ctrl_g_ctrl(sd->gamma));
set_redblue(gspca_dev, v4l2_ctrl_g_ctrl(sd->blue),
v4l2_ctrl_g_ctrl(sd->red));
- set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
- set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
- set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
- v4l2_ctrl_g_ctrl(sd->vflip));
+ if (sd->gain)
+ set_gain(gspca_dev, v4l2_ctrl_g_ctrl(sd->gain));
+ if (sd->exposure)
+ set_exposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure));
+ if (sd->hflip)
+ set_hvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip),
+ v4l2_ctrl_g_ctrl(sd->vflip));
reg_w1(gspca_dev, 0x1007, 0x20);
reg_w1(gspca_dev, 0x1061, 0x03);
@@ -2176,7 +2179,7 @@ static void sd_dqcallback(struct gspca_dev *gspca_dev)
struct sd *sd = (struct sd *) gspca_dev;
int avg_lum;
- if (!v4l2_ctrl_g_ctrl(sd->autogain))
+ if (sd->autogain == NULL || !v4l2_ctrl_g_ctrl(sd->autogain))
return;
avg_lum = atomic_read(&sd->avg_lum);
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index 4296a8350298..d2e6f82ecfac 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -43,6 +43,7 @@
#include <asm/fiq.h>
#include <mach/dma-mx1-mx2.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <mach/mx1_camera.h>
/*
diff --git a/drivers/media/video/mx2_camera.c b/drivers/media/video/mx2_camera.c
index 41f9a254b245..637bde8aca28 100644
--- a/drivers/media/video/mx2_camera.c
+++ b/drivers/media/video/mx2_camera.c
@@ -83,6 +83,7 @@
#define CSICR1_INV_DATA (1 << 3)
#define CSICR1_INV_PCLK (1 << 2)
#define CSICR1_REDGE (1 << 1)
+#define CSICR1_FMT_MASK (CSICR1_PACK_DIR | CSICR1_SWAP16_EN)
#define SHIFT_STATFF_LEVEL 22
#define SHIFT_RXFF_LEVEL 19
@@ -230,6 +231,7 @@ struct mx2_prp_cfg {
u32 src_pixel;
u32 ch1_pixel;
u32 irq_flags;
+ u32 csicr1;
};
/* prp resizing parameters */
@@ -330,6 +332,7 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.ch1_pixel = 0x2ca00565, /* RGB565 */
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH1WERR |
PRP_INTR_CH1FC | PRP_INTR_LBOVF,
+ .csicr1 = 0,
}
},
{
@@ -343,6 +346,21 @@ static struct mx2_fmt_cfg mx27_emma_prp_table[] = {
.irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
PRP_INTR_CH2FC | PRP_INTR_LBOVF |
PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_PACK_DIR,
+ }
+ },
+ {
+ .in_fmt = V4L2_MBUS_FMT_UYVY8_2X8,
+ .out_fmt = V4L2_PIX_FMT_YUV420,
+ .cfg = {
+ .channel = 2,
+ .in_fmt = PRP_CNTL_DATA_IN_YUV422,
+ .out_fmt = PRP_CNTL_CH2_OUT_YUV420,
+ .src_pixel = 0x22000888, /* YUV422 (YUYV) */
+ .irq_flags = PRP_INTR_RDERR | PRP_INTR_CH2WERR |
+ PRP_INTR_CH2FC | PRP_INTR_LBOVF |
+ PRP_INTR_CH2OVF,
+ .csicr1 = CSICR1_SWAP16_EN,
}
},
};
@@ -1015,14 +1033,14 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
return ret;
}
+ csicr1 = (csicr1 & ~CSICR1_FMT_MASK) | pcdev->emma_prp->cfg.csicr1;
+
if (common_flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
if (common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_HSYNC_POL;
- if (pcdev->platform_flags & MX2_CAMERA_SWAP16)
- csicr1 |= CSICR1_SWAP16_EN;
if (pcdev->platform_flags & MX2_CAMERA_EXT_VSYNC)
csicr1 |= CSICR1_EXT_VSYNC;
if (pcdev->platform_flags & MX2_CAMERA_CCIR)
@@ -1033,8 +1051,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd)
csicr1 |= CSICR1_GCLK_MODE;
if (pcdev->platform_flags & MX2_CAMERA_INV_DATA)
csicr1 |= CSICR1_INV_DATA;
- if (pcdev->platform_flags & MX2_CAMERA_PACK_DIR_MSB)
- csicr1 |= CSICR1_PACK_DIR;
pcdev->csicr1 = csicr1;
@@ -1109,7 +1125,8 @@ static int mx2_camera_get_formats(struct soc_camera_device *icd,
return 0;
}
- if (code == V4L2_MBUS_FMT_YUYV8_2X8) {
+ if (code == V4L2_MBUS_FMT_YUYV8_2X8 ||
+ code == V4L2_MBUS_FMT_UYVY8_2X8) {
formats++;
if (xlate) {
/*
diff --git a/drivers/media/video/omap3isp/isppreview.c b/drivers/media/video/omap3isp/isppreview.c
index 8a4935ecc655..dd91da26f1b0 100644
--- a/drivers/media/video/omap3isp/isppreview.c
+++ b/drivers/media/video/omap3isp/isppreview.c
@@ -888,12 +888,12 @@ static const struct preview_update update_attrs[] = {
preview_config_contrast,
NULL,
offsetof(struct prev_params, contrast),
- 0, true,
+ 0, 0, true,
}, /* OMAP3ISP_PREV_BRIGHTNESS */ {
preview_config_brightness,
NULL,
offsetof(struct prev_params, brightness),
- 0, true,
+ 0, 0, true,
},
};
@@ -1102,7 +1102,7 @@ static void preview_config_input_size(struct isp_prev_device *prev, u32 active)
unsigned int elv = prev->crop.top + prev->crop.height - 1;
u32 features;
- if (format->code == V4L2_MBUS_FMT_Y10_1X10) {
+ if (format->code != V4L2_MBUS_FMT_Y10_1X10) {
sph -= 2;
eph += 2;
slv -= 2;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index c370c2d87c17..b4c679b3fb0f 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/mutex.h>
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index 354574591908..725812aa0c30 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -350,7 +350,8 @@ static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt,
if (pixm)
sizes[i] = max(size, pixm->plane_fmt[i].sizeimage);
else
- sizes[i] = size;
+ sizes[i] = max_t(u32, size, frame->payload[i]);
+
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
@@ -479,37 +480,39 @@ static int fimc_capture_set_default_format(struct fimc_dev *fimc);
static int fimc_capture_open(struct file *file)
{
struct fimc_dev *fimc = video_drvdata(file);
- int ret = v4l2_fh_open(file);
-
- if (ret)
- return ret;
+ int ret;
dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state);
- /* Return if the corresponding video mem2mem node is already opened. */
if (fimc_m2m_active(fimc))
return -EBUSY;
set_bit(ST_CAPT_BUSY, &fimc->state);
- pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ return ret;
- if (++fimc->vid_cap.refcnt == 1) {
- ret = fimc_pipeline_initialize(&fimc->pipeline,
- &fimc->vid_cap.vfd->entity, true);
- if (ret < 0) {
- dev_err(&fimc->pdev->dev,
- "Video pipeline initialization failed\n");
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->vid_cap.refcnt--;
- v4l2_fh_release(file);
- clear_bit(ST_CAPT_BUSY, &fimc->state);
- return ret;
- }
- ret = fimc_capture_ctrls_create(fimc);
+ ret = v4l2_fh_open(file);
+ if (ret)
+ return ret;
- if (!ret && !fimc->vid_cap.user_subdev_api)
- ret = fimc_capture_set_default_format(fimc);
+ if (++fimc->vid_cap.refcnt != 1)
+ return 0;
+
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vid_cap.vfd->entity, true);
+ if (ret < 0) {
+ clear_bit(ST_CAPT_BUSY, &fimc->state);
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->vid_cap.refcnt--;
+ v4l2_fh_release(file);
+ return ret;
}
+ ret = fimc_capture_ctrls_create(fimc);
+
+ if (!ret && !fimc->vid_cap.user_subdev_api)
+ ret = fimc_capture_set_default_format(fimc);
+
return ret;
}
@@ -818,9 +821,6 @@ static int fimc_cap_g_fmt_mplane(struct file *file, void *fh,
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_ctx *ctx = fimc->vid_cap.ctx;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
return fimc_fill_format(&ctx->d_frame, f);
}
@@ -833,9 +833,6 @@ static int fimc_cap_try_fmt_mplane(struct file *file, void *fh,
struct v4l2_mbus_framefmt mf;
struct fimc_fmt *ffmt = NULL;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
-
if (pix->pixelformat == V4L2_PIX_FMT_JPEG) {
fimc_capture_try_format(ctx, &pix->width, &pix->height,
NULL, &pix->pixelformat,
@@ -887,8 +884,6 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
struct fimc_fmt *s_fmt = NULL;
int ret, i;
- if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- return -EINVAL;
if (vb2_is_busy(&fimc->vid_cap.vbq))
return -EBUSY;
@@ -924,10 +919,10 @@ static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f)
pix->width = mf->width;
pix->height = mf->height;
}
+
fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix);
for (i = 0; i < ff->fmt->colplanes; i++)
- ff->payload[i] =
- (pix->width * pix->height * ff->fmt->depth[i]) / 8;
+ ff->payload[i] = pix->plane_fmt[i].sizeimage;
set_frame_bounds(ff, pix->width, pix->height);
/* Reset the composition rectangle if not yet configured */
@@ -1045,18 +1040,22 @@ static int fimc_cap_streamon(struct file *file, void *priv,
{
struct fimc_dev *fimc = video_drvdata(file);
struct fimc_pipeline *p = &fimc->pipeline;
+ struct v4l2_subdev *sd = p->subdevs[IDX_SENSOR];
int ret;
if (fimc_capture_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&p->subdevs[IDX_SENSOR]->entity,
- p->m_pipeline);
+ ret = media_entity_pipeline_start(&sd->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
if (fimc->vid_cap.user_subdev_api) {
ret = fimc_pipeline_validate(fimc);
- if (ret)
+ if (ret < 0) {
+ media_entity_pipeline_stop(&sd->entity);
return ret;
+ }
}
return vb2_streamon(&fimc->vid_cap.vbq, type);
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index 92fc5a20fb76..a4646ca1d56f 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -153,7 +153,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr",
+ .name = "YUV 4:2:0 non-contig. 2p, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -161,7 +161,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 2,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 3-planar, Y/Cb/Cr",
+ .name = "YUV 4:2:0 non-contig. 3p, Y/Cb/Cr",
.fourcc = V4L2_PIX_FMT_YUV420M,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 2, 2 },
@@ -169,7 +169,7 @@ static struct fimc_fmt fimc_formats[] = {
.colplanes = 3,
.flags = FMT_FLAGS_M2M,
}, {
- .name = "YUV 4:2:0 non-contiguous 2-planar, Y/CbCr, tiled",
+ .name = "YUV 4:2:0 non-contig. 2p, tiled",
.fourcc = V4L2_PIX_FMT_NV12MT,
.color = FIMC_FMT_YCBCR420,
.depth = { 8, 4 },
@@ -641,7 +641,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
if (!ctrls->ready)
return;
- mutex_lock(&ctrls->handler.lock);
+ mutex_lock(ctrls->handler.lock);
v4l2_ctrl_activate(ctrls->rotate, active);
v4l2_ctrl_activate(ctrls->hflip, active);
v4l2_ctrl_activate(ctrls->vflip, active);
@@ -660,7 +660,7 @@ void fimc_ctrls_activate(struct fimc_ctx *ctx, bool active)
ctx->hflip = 0;
ctx->vflip = 0;
}
- mutex_unlock(&ctrls->handler.lock);
+ mutex_unlock(ctrls->handler.lock);
}
/* Update maximum value of the alpha color control */
@@ -741,8 +741,8 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
pix->width = width;
for (i = 0; i < pix->num_planes; ++i) {
- u32 bpl = pix->plane_fmt[i].bytesperline;
- u32 *sizeimage = &pix->plane_fmt[i].sizeimage;
+ struct v4l2_plane_pix_format *plane_fmt = &pix->plane_fmt[i];
+ u32 bpl = plane_fmt->bytesperline;
if (fmt->colplanes > 1 && (bpl == 0 || bpl < pix->width))
bpl = pix->width; /* Planar */
@@ -754,8 +754,9 @@ void fimc_adjust_mplane_format(struct fimc_fmt *fmt, u32 width, u32 height,
if (i == 0) /* Same bytesperline for each plane. */
bytesperline = bpl;
- pix->plane_fmt[i].bytesperline = bytesperline;
- *sizeimage = (pix->width * pix->height * fmt->depth[i]) / 8;
+ plane_fmt->bytesperline = bytesperline;
+ plane_fmt->sizeimage = max((pix->width * pix->height *
+ fmt->depth[i]) / 8, plane_fmt->sizeimage);
}
}
diff --git a/drivers/media/video/s5p-fimc/fimc-lite.c b/drivers/media/video/s5p-fimc/fimc-lite.c
index 400d701aef04..74ff310db30c 100644
--- a/drivers/media/video/s5p-fimc/fimc-lite.c
+++ b/drivers/media/video/s5p-fimc/fimc-lite.c
@@ -451,34 +451,44 @@ static void fimc_lite_clear_event_counters(struct fimc_lite *fimc)
static int fimc_lite_open(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
- int ret = v4l2_fh_open(file);
+ int ret;
- if (ret)
- return ret;
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
set_bit(ST_FLITE_IN_USE, &fimc->state);
- pm_runtime_get_sync(&fimc->pdev->dev);
+ ret = pm_runtime_get_sync(&fimc->pdev->dev);
+ if (ret < 0)
+ goto done;
- if (++fimc->ref_count != 1 || fimc->out_path != FIMC_IO_DMA)
- return ret;
+ ret = v4l2_fh_open(file);
+ if (ret < 0)
+ goto done;
- ret = fimc_pipeline_initialize(&fimc->pipeline, &fimc->vfd->entity,
- true);
- if (ret < 0) {
- v4l2_err(fimc->vfd, "Video pipeline initialization failed\n");
- pm_runtime_put_sync(&fimc->pdev->dev);
- fimc->ref_count--;
- v4l2_fh_release(file);
- clear_bit(ST_FLITE_IN_USE, &fimc->state);
- }
+ if (++fimc->ref_count == 1 && fimc->out_path == FIMC_IO_DMA) {
+ ret = fimc_pipeline_initialize(&fimc->pipeline,
+ &fimc->vfd->entity, true);
+ if (ret < 0) {
+ pm_runtime_put_sync(&fimc->pdev->dev);
+ fimc->ref_count--;
+ v4l2_fh_release(file);
+ clear_bit(ST_FLITE_IN_USE, &fimc->state);
+ }
- fimc_lite_clear_event_counters(fimc);
+ fimc_lite_clear_event_counters(fimc);
+ }
+done:
+ mutex_unlock(&fimc->lock);
return ret;
}
static int fimc_lite_close(struct file *file)
{
struct fimc_lite *fimc = video_drvdata(file);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
if (--fimc->ref_count == 0 && fimc->out_path == FIMC_IO_DMA) {
clear_bit(ST_FLITE_IN_USE, &fimc->state);
@@ -492,20 +502,39 @@ static int fimc_lite_close(struct file *file)
if (fimc->ref_count == 0)
vb2_queue_release(&fimc->vb_queue);
- return v4l2_fh_release(file);
+ ret = v4l2_fh_release(file);
+
+ mutex_unlock(&fimc->lock);
+ return ret;
}
static unsigned int fimc_lite_poll(struct file *file,
struct poll_table_struct *wait)
{
struct fimc_lite *fimc = video_drvdata(file);
- return vb2_poll(&fimc->vb_queue, file, wait);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return POLL_ERR;
+
+ ret = vb2_poll(&fimc->vb_queue, file, wait);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static int fimc_lite_mmap(struct file *file, struct vm_area_struct *vma)
{
struct fimc_lite *fimc = video_drvdata(file);
- return vb2_mmap(&fimc->vb_queue, vma);
+ int ret;
+
+ if (mutex_lock_interruptible(&fimc->lock))
+ return -ERESTARTSYS;
+
+ ret = vb2_mmap(&fimc->vb_queue, vma);
+ mutex_unlock(&fimc->lock);
+
+ return ret;
}
static const struct v4l2_file_operations fimc_lite_fops = {
@@ -762,7 +791,9 @@ static int fimc_lite_streamon(struct file *file, void *priv,
if (fimc_lite_active(fimc))
return -EBUSY;
- media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+ ret = media_entity_pipeline_start(&sensor->entity, p->m_pipeline);
+ if (ret < 0)
+ return ret;
ret = fimc_pipeline_validate(fimc);
if (ret) {
@@ -1508,7 +1539,7 @@ static int fimc_lite_suspend(struct device *dev)
return 0;
ret = fimc_lite_stop_capture(fimc, suspend);
- if (ret)
+ if (ret < 0 || !fimc_lite_active(fimc))
return ret;
return fimc_pipeline_shutdown(&fimc->pipeline);
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.c b/drivers/media/video/s5p-fimc/fimc-mdevice.c
index 6753c45631b8..52cef4865423 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.c
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.c
@@ -193,9 +193,13 @@ int __fimc_pipeline_shutdown(struct fimc_pipeline *p)
int fimc_pipeline_shutdown(struct fimc_pipeline *p)
{
- struct media_entity *me = &p->subdevs[IDX_SENSOR]->entity;
+ struct media_entity *me;
int ret;
+ if (!p || !p->subdevs[IDX_SENSOR])
+ return -EINVAL;
+
+ me = &p->subdevs[IDX_SENSOR]->entity;
mutex_lock(&me->parent->graph_mutex);
ret = __fimc_pipeline_shutdown(p);
mutex_unlock(&me->parent->graph_mutex);
@@ -498,12 +502,12 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
* @source: the source entity to create links to all fimc entities from
* @sensor: sensor subdev linked to FIMC[fimc_id] entity, may be null
* @pad: the source entity pad index
- * @fimc_id: index of the fimc device for which link should be enabled
+ * @link_mask: bitmask of the fimc devices for which link should be enabled
*/
static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
struct media_entity *source,
struct v4l2_subdev *sensor,
- int pad, int fimc_id)
+ int pad, int link_mask)
{
struct fimc_sensor_info *s_info;
struct media_entity *sink;
@@ -520,7 +524,7 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (!fmd->fimc[i]->variant->has_cam_if)
continue;
- flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+ flags = ((1 << i) & link_mask) ? MEDIA_LNK_FL_ENABLED : 0;
sink = &fmd->fimc[i]->vid_cap.subdev.entity;
ret = media_entity_create_link(source, pad, sink,
@@ -552,7 +556,10 @@ static int __fimc_md_create_fimc_sink_links(struct fimc_md *fmd,
if (!fmd->fimc_lite[i])
continue;
- flags = (i == fimc_id) ? MEDIA_LNK_FL_ENABLED : 0;
+ if (link_mask & (1 << (i + FIMC_MAX_DEVS)))
+ flags = MEDIA_LNK_FL_ENABLED;
+ else
+ flags = 0;
sink = &fmd->fimc_lite[i]->subdev.entity;
ret = media_entity_create_link(source, pad, sink,
@@ -614,9 +621,8 @@ static int fimc_md_create_links(struct fimc_md *fmd)
struct s5p_fimc_isp_info *pdata;
struct fimc_sensor_info *s_info;
struct media_entity *source, *sink;
- int i, pad, fimc_id = 0;
- int ret = 0;
- u32 flags;
+ int i, pad, fimc_id = 0, ret = 0;
+ u32 flags, link_mask = 0;
for (i = 0; i < fmd->num_sensors; i++) {
if (fmd->sensor[i].subdev == NULL)
@@ -668,19 +674,20 @@ static int fimc_md_create_links(struct fimc_md *fmd)
if (source == NULL)
continue;
+ link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, sensor,
- pad, fimc_id++);
+ pad, link_mask);
}
- fimc_id = 0;
for (i = 0; i < ARRAY_SIZE(fmd->csis); i++) {
if (fmd->csis[i].sd == NULL)
continue;
source = &fmd->csis[i].sd->entity;
pad = CSIS_PAD_SOURCE;
+ link_mask = 1 << fimc_id++;
ret = __fimc_md_create_fimc_sink_links(fmd, source, NULL,
- pad, fimc_id++);
+ pad, link_mask);
}
/* Create immutable links between each FIMC's subdev and video node */
@@ -734,8 +741,8 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
}
static int __fimc_md_set_camclk(struct fimc_md *fmd,
- struct fimc_sensor_info *s_info,
- bool on)
+ struct fimc_sensor_info *s_info,
+ bool on)
{
struct s5p_fimc_isp_info *pdata = s_info->pdata;
struct fimc_camclk_info *camclk;
@@ -744,12 +751,10 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (WARN_ON(pdata->clk_id >= FIMC_MAX_CAMCLKS) || fmd == NULL)
return -EINVAL;
- if (s_info->clk_on == on)
- return 0;
camclk = &fmd->camclk[pdata->clk_id];
- dbg("camclk %d, f: %lu, clk: %p, on: %d",
- pdata->clk_id, pdata->clk_frequency, camclk, on);
+ dbg("camclk %d, f: %lu, use_count: %d, on: %d",
+ pdata->clk_id, pdata->clk_frequency, camclk->use_count, on);
if (on) {
if (camclk->use_count > 0 &&
@@ -760,11 +765,9 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
clk_set_rate(camclk->clock, pdata->clk_frequency);
camclk->frequency = pdata->clk_frequency;
ret = clk_enable(camclk->clock);
+ dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
+ clk_get_rate(camclk->clock));
}
- s_info->clk_on = 1;
- dbg("Enabled camclk %d: f: %lu", pdata->clk_id,
- clk_get_rate(camclk->clock));
-
return ret;
}
@@ -773,7 +776,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
if (--camclk->use_count == 0) {
clk_disable(camclk->clock);
- s_info->clk_on = 0;
dbg("Disabled camclk %d", pdata->clk_id);
}
return ret;
@@ -789,8 +791,6 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
* devices to which sensors can be attached, either directly or through
* the MIPI CSI receiver. The clock is allowed here to be used by
* multiple sensors concurrently if they use same frequency.
- * The per sensor subdev clk_on attribute helps to synchronize accesses
- * to the sclk_cam clocks from the video and media device nodes.
* This function should only be called when the graph mutex is held.
*/
int fimc_md_set_camclk(struct v4l2_subdev *sd, bool on)
diff --git a/drivers/media/video/s5p-fimc/fimc-mdevice.h b/drivers/media/video/s5p-fimc/fimc-mdevice.h
index 3b8a3492a176..1f5dbaff5442 100644
--- a/drivers/media/video/s5p-fimc/fimc-mdevice.h
+++ b/drivers/media/video/s5p-fimc/fimc-mdevice.h
@@ -47,7 +47,6 @@ struct fimc_camclk_info {
* @pdata: sensor's atrributes passed as media device's platform data
* @subdev: image sensor v4l2 subdev
* @host: fimc device the sensor is currently linked to
- * @clk_on: sclk_cam clock's state associated with this subdev
*
* This data structure applies to image sensor and the writeback subdevs.
*/
@@ -55,7 +54,6 @@ struct fimc_sensor_info {
struct s5p_fimc_isp_info *pdata;
struct v4l2_subdev *subdev;
struct fimc_dev *host;
- bool clk_on;
};
/**
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
index 4dd32fc8fd82..feea867f318c 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_dec.c
@@ -996,6 +996,7 @@ int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx)
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_dec_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
index 03d83340e7fb..158b78989b89 100644
--- a/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/video/s5p-mfc/s5p_mfc_enc.c
@@ -1773,6 +1773,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
}
for (i = 0; i < NUM_CTRLS; i++) {
if (IS_MFC51_PRIV(controls[i].id)) {
+ memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = &s5p_mfc_enc_ctrl_ops;
cfg.id = controls[i].id;
cfg.min = controls[i].minimum;
diff --git a/drivers/media/video/smiapp/smiapp-core.c b/drivers/media/video/smiapp/smiapp-core.c
index e8c93c89265a..9cf5bda35fbe 100644
--- a/drivers/media/video/smiapp/smiapp-core.c
+++ b/drivers/media/video/smiapp/smiapp-core.c
@@ -31,6 +31,7 @@
#include <linux/device.h>
#include <linux/gpio.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 83dbb2ddff10..0cbada18f6f5 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -681,6 +681,7 @@ static void determine_valid_ioctls(struct video_device *vdev)
SET_VALID_IOCTL(ops, VIDIOC_G_DV_TIMINGS, vidioc_g_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_ENUM_DV_TIMINGS, vidioc_enum_dv_timings);
SET_VALID_IOCTL(ops, VIDIOC_QUERY_DV_TIMINGS, vidioc_query_dv_timings);
+ SET_VALID_IOCTL(ops, VIDIOC_DV_TIMINGS_CAP, vidioc_dv_timings_cap);
/* yes, really vidioc_subscribe_event */
SET_VALID_IOCTL(ops, VIDIOC_DQEVENT, vidioc_subscribe_event);
SET_VALID_IOCTL(ops, VIDIOC_SUBSCRIBE_EVENT, vidioc_subscribe_event);
diff --git a/drivers/media/video/zoran/zoran.h b/drivers/media/video/zoran/zoran.h
index d7166afc255e..ca2754a3cd63 100644
--- a/drivers/media/video/zoran/zoran.h
+++ b/drivers/media/video/zoran/zoran.h
@@ -172,8 +172,10 @@ struct zoran_jpg_settings {
struct v4l2_jpegcompression jpg_comp; /* JPEG-specific capture settings */
};
+struct zoran_fh;
+
struct zoran_mapping {
- struct file *file;
+ struct zoran_fh *fh;
int count;
};
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index c57310931810..c6ccdeb6d8d6 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -2811,7 +2811,7 @@ static void
zoran_vm_close (struct vm_area_struct *vma)
{
struct zoran_mapping *map = vma->vm_private_data;
- struct zoran_fh *fh = map->file->private_data;
+ struct zoran_fh *fh = map->fh;
struct zoran *zr = fh->zr;
int i;
@@ -2938,7 +2938,7 @@ zoran_mmap (struct file *file,
res = -ENOMEM;
goto mmap_unlock_and_return;
}
- map->file = file;
+ map->fh = fh;
map->count = 1;
vma->vm_ops = &zoran_vm_ops;
diff --git a/drivers/media/video/zoran/zr36016.c b/drivers/media/video/zoran/zr36016.c
index 21c088ea9046..b87ddba8608f 100644
--- a/drivers/media/video/zoran/zr36016.c
+++ b/drivers/media/video/zoran/zr36016.c
@@ -40,10 +40,10 @@
/* v4l API */
/* headerfile of this module */
-#include"zr36016.h"
+#include "zr36016.h"
/* codec io API */
-#include"videocodec.h"
+#include "videocodec.h"
/* it doesn't make sense to have more than 20 or so,
just to prevent some unwanted loops */
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index e129c820df7d..92144ed1ad46 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -286,6 +286,7 @@ config TWL6040_CORE
depends on I2C=y && GENERIC_HARDIRQS
select MFD_CORE
select REGMAP_I2C
+ select IRQ_DOMAIN
default n
help
Say yes here if you want support for Texas Instruments TWL6040 audio
diff --git a/drivers/mfd/ab5500-core.h b/drivers/mfd/ab5500-core.h
deleted file mode 100644
index 63b30b17e4f3..000000000000
--- a/drivers/mfd/ab5500-core.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright (C) 2011 ST-Ericsson
- * License terms: GNU General Public License (GPL) version 2
- * Shared definitions and data structures for the AB5500 MFD driver
- */
-
-/* Read/write operation values. */
-#define AB5500_PERM_RD (0x01)
-#define AB5500_PERM_WR (0x02)
-
-/* Read/write permissions. */
-#define AB5500_PERM_RO (AB5500_PERM_RD)
-#define AB5500_PERM_RW (AB5500_PERM_RD | AB5500_PERM_WR)
-
-#define AB5500_MASK_BASE (0x60)
-#define AB5500_MASK_END (0x79)
-#define AB5500_CHIP_ID (0x20)
-
-/**
- * struct ab5500_reg_range
- * @first: the first address of the range
- * @last: the last address of the range
- * @perm: access permissions for the range
- */
-struct ab5500_reg_range {
- u8 first;
- u8 last;
- u8 perm;
-};
-
-/**
- * struct ab5500_i2c_ranges
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_ranges {
- u8 nranges;
- u8 bankid;
- const struct ab5500_reg_range *range;
-};
-
-/**
- * struct ab5500_i2c_banks
- * @count: the number of ranges in the list
- * @range: the list of register ranges
- */
-struct ab5500_i2c_banks {
- u8 nbanks;
- const struct ab5500_i2c_ranges *bank;
-};
-
-/**
- * struct ab5500_bank
- * @slave_addr: I2C slave_addr found in AB5500 specification
- * @name: Documentation name of the bank. For reference
- */
-struct ab5500_bank {
- u8 slave_addr;
- const char *name;
-};
-
-static const struct ab5500_bank bankinfo[AB5500_NUM_BANKS] = {
- [AB5500_BANK_VIT_IO_I2C_CLK_TST_OTP] = {
- AB5500_ADDR_VIT_IO_I2C_CLK_TST_OTP, "VIT_IO_I2C_CLK_TST_OTP"},
- [AB5500_BANK_VDDDIG_IO_I2C_CLK_TST] = {
- AB5500_ADDR_VDDDIG_IO_I2C_CLK_TST, "VDDDIG_IO_I2C_CLK_TST"},
- [AB5500_BANK_VDENC] = {AB5500_ADDR_VDENC, "VDENC"},
- [AB5500_BANK_SIM_USBSIM] = {AB5500_ADDR_SIM_USBSIM, "SIM_USBSIM"},
- [AB5500_BANK_LED] = {AB5500_ADDR_LED, "LED"},
- [AB5500_BANK_ADC] = {AB5500_ADDR_ADC, "ADC"},
- [AB5500_BANK_RTC] = {AB5500_ADDR_RTC, "RTC"},
- [AB5500_BANK_STARTUP] = {AB5500_ADDR_STARTUP, "STARTUP"},
- [AB5500_BANK_DBI_ECI] = {AB5500_ADDR_DBI_ECI, "DBI-ECI"},
- [AB5500_BANK_CHG] = {AB5500_ADDR_CHG, "CHG"},
- [AB5500_BANK_FG_BATTCOM_ACC] = {
- AB5500_ADDR_FG_BATTCOM_ACC, "FG_BATCOM_ACC"},
- [AB5500_BANK_USB] = {AB5500_ADDR_USB, "USB"},
- [AB5500_BANK_IT] = {AB5500_ADDR_IT, "IT"},
- [AB5500_BANK_VIBRA] = {AB5500_ADDR_VIBRA, "VIBRA"},
- [AB5500_BANK_AUDIO_HEADSETUSB] = {
- AB5500_ADDR_AUDIO_HEADSETUSB, "AUDIO_HEADSETUSB"},
-};
-
-int ab5500_get_register_interruptible_raw(struct ab5500 *ab, u8 bank, u8 reg,
- u8 *value);
-int ab5500_mask_and_set_register_interruptible_raw(struct ab5500 *ab, u8 bank,
- u8 reg, u8 bitmask, u8 bitvalues);
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
index 3fcdab3eb8eb..03df422feb76 100644
--- a/drivers/mfd/mc13xxx-spi.c
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -49,10 +49,72 @@ static struct regmap_config mc13xxx_regmap_spi_config = {
.reg_bits = 7,
.pad_bits = 1,
.val_bits = 24,
+ .write_flag_mask = 0x80,
.max_register = MC13XXX_NUMREGS,
.cache_type = REGCACHE_NONE,
+ .use_single_rw = 1,
+};
+
+static int mc13xxx_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ unsigned char w[4] = { *((unsigned char *) reg), 0, 0, 0};
+ unsigned char r[4];
+ unsigned char *p = val;
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_transfer t = {
+ .tx_buf = w,
+ .rx_buf = r,
+ .len = 4,
+ };
+
+ struct spi_message m;
+ int ret;
+
+ if (val_size != 3 || reg_size != 1)
+ return -ENOTSUPP;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ ret = spi_sync(spi, &m);
+
+ memcpy(p, &r[1], 3);
+
+ return ret;
+}
+
+static int mc13xxx_spi_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ if (count != 4)
+ return -ENOTSUPP;
+
+ return spi_write(spi, data, count);
+}
+
+/*
+ * We cannot use regmap-spi generic bus implementation here.
+ * The MC13783 chip will get corrupted if CS signal is deasserted
+ * and on i.Mx31 SoC (the target SoC for MC13783 PMIC) the SPI controller
+ * has the following errata (DSPhl22960):
+ * "The CSPI negates SS when the FIFO becomes empty with
+ * SSCTL= 0. Software cannot guarantee that the FIFO will not
+ * drain because of higher priority interrupts and the
+ * non-realtime characteristics of the operating system. As a
+ * result, the SS will negate before all of the data has been
+ * transferred to/from the peripheral."
+ * We workaround this by accessing the SPI controller with a
+ * single transfert.
+ */
+
+static struct regmap_bus regmap_mc13xxx_bus = {
+ .write = mc13xxx_spi_write,
+ .read = mc13xxx_spi_read,
};
static int mc13xxx_spi_probe(struct spi_device *spi)
@@ -73,12 +135,13 @@ static int mc13xxx_spi_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, mc13xxx);
spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
- spi->bits_per_word = 32;
mc13xxx->dev = &spi->dev;
mutex_init(&mc13xxx->lock);
- mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
+ mc13xxx->regmap = regmap_init(&spi->dev, &regmap_mc13xxx_bus, &spi->dev,
+ &mc13xxx_regmap_spi_config);
+
if (IS_ERR(mc13xxx->regmap)) {
ret = PTR_ERR(mc13xxx->regmap);
dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7e96bb229724..41088ecbb2a9 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
+#include <linux/gpio.h>
#include <plat/cpu.h>
#include <plat/usb.h>
#include <linux/pm_runtime.h>
@@ -500,8 +501,21 @@ static void omap_usbhs_init(struct device *dev)
dev_dbg(dev, "starting TI HSUSB Controller\n");
pm_runtime_get_sync(dev);
- spin_lock_irqsave(&omap->lock, flags);
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
+ }
+
+ spin_lock_irqsave(&omap->lock, flags);
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
@@ -581,9 +595,39 @@ static void omap_usbhs_init(struct device *dev)
}
spin_unlock_irqrestore(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ /* Hold the PHY in RESET for enough time till
+ * PHY is settled and ready
+ */
+ udelay(10);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[0], 1);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_set_value_cansleep
+ (pdata->ehci_data->reset_gpio_port[1], 1);
+ }
+
pm_runtime_put_sync(dev);
}
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ }
+}
+
/**
* usbhs_omap_probe - initialize TI-based HCDs
@@ -767,6 +811,7 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
goto end_probe;
err_alloc:
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
err_tll:
@@ -818,6 +863,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+ omap_usbhs_deinit(&pdev->dev);
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
clk_put(omap->init_60m_fclk);
diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c
index 00c0aba7eba0..c4a69f193a1d 100644
--- a/drivers/mfd/palmas.c
+++ b/drivers/mfd/palmas.c
@@ -356,7 +356,14 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
}
}
- ret = regmap_add_irq_chip(palmas->regmap[1], palmas->irq,
+ /* Change IRQ into clear on read mode for efficiency */
+ slave = PALMAS_BASE_TO_SLAVE(PALMAS_INTERRUPT_BASE);
+ addr = PALMAS_BASE_TO_REG(PALMAS_INTERRUPT_BASE, PALMAS_INT_CTRL);
+ reg = PALMAS_INT_CTRL_INT_CLEAR;
+
+ regmap_write(palmas->regmap[slave], addr, reg);
+
+ ret = regmap_add_irq_chip(palmas->regmap[slave], palmas->irq,
IRQF_ONESHOT | IRQF_TRIGGER_LOW, -1, &palmas_irq_chip,
&palmas->irq_data);
if (ret < 0)
@@ -441,6 +448,9 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c,
goto err;
}
+ children[PALMAS_PMIC_ID].platform_data = pdata->pmic_pdata;
+ children[PALMAS_PMIC_ID].pdata_size = sizeof(*pdata->pmic_pdata);
+
ret = mfd_add_devices(palmas->dev, -1,
children, ARRAY_SIZE(palmas_children),
NULL, regmap_irq_chip_get_base(palmas->irq_data));
@@ -472,6 +482,7 @@ static const struct i2c_device_id palmas_i2c_id[] = {
{ "twl6035", },
{ "twl6037", },
{ "tps65913", },
+ { /* end */ }
};
MODULE_DEVICE_TABLE(i2c, palmas_i2c_id);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index db194e433c08..61c097a98f5d 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/err.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tps65217.h>
@@ -132,6 +133,61 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65217_clear_bits);
+#ifdef CONFIG_OF
+static struct of_regulator_match reg_matches[] = {
+ { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 },
+ { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 },
+ { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 },
+ { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 },
+ { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 },
+ { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 },
+ { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 },
+};
+
+static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
+{
+ struct device_node *node = client->dev.of_node;
+ struct tps65217_board *pdata;
+ struct device_node *regs;
+ int count = ARRAY_SIZE(reg_matches);
+ int ret, i;
+
+ regs = of_find_node_by_name(node, "regulators");
+ if (!regs)
+ return NULL;
+
+ ret = of_regulator_match(&client->dev, regs, reg_matches, count);
+ of_node_put(regs);
+ if ((ret < 0) || (ret > count))
+ return NULL;
+
+ count = ret;
+ pdata = devm_kzalloc(&client->dev, count * sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ for (i = 0; i < count; i++) {
+ if (!reg_matches[i].init_data || !reg_matches[i].of_node)
+ continue;
+
+ pdata->tps65217_init_data[i] = reg_matches[i].init_data;
+ pdata->of_node[i] = reg_matches[i].of_node;
+ }
+
+ return pdata;
+}
+
+static struct of_device_id tps65217_of_match[] = {
+ { .compatible = "ti,tps65217", },
+ { },
+};
+#else
+static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client)
+{
+ return NULL;
+}
+#endif
+
static struct regmap_config tps65217_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -141,10 +197,14 @@ static int __devinit tps65217_probe(struct i2c_client *client,
const struct i2c_device_id *ids)
{
struct tps65217 *tps;
+ struct regulator_init_data *reg_data;
struct tps65217_board *pdata = client->dev.platform_data;
int i, ret;
unsigned int version;
+ if (!pdata && client->dev.of_node)
+ pdata = tps65217_parse_dt(client);
+
tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
if (!tps)
return -ENOMEM;
@@ -182,8 +242,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
}
pdev->dev.parent = tps->dev;
- platform_device_add_data(pdev, &pdata->tps65217_init_data[i],
- sizeof(pdata->tps65217_init_data[i]));
+ pdev->dev.of_node = pdata->of_node[i];
+ reg_data = pdata->tps65217_init_data[i];
+ platform_device_add_data(pdev, reg_data, sizeof(*reg_data));
tps->regulator_pdev[i] = pdev;
platform_device_add(pdev);
@@ -212,6 +273,8 @@ MODULE_DEVICE_TABLE(i2c, tps65217_id_table);
static struct i2c_driver tps65217_driver = {
.driver = {
.name = "tps65217",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tps65217_of_match),
},
.id_table = tps65217_id_table,
.probe = tps65217_probe,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 2661f6e366f9..154f3ef07631 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -511,7 +511,6 @@ config USB_SWITCH_FSA9480
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
-source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 456972faaeb3..b88df7a350b8 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
-obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index 85cc7710193c..9d5eed754666 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -180,7 +180,7 @@ static int cb710_suspend(struct pci_dev *pdev, pm_message_t state)
pci_save_state(pdev);
pci_disable_device(pdev);
if (state.event & PM_EVENT_SLEEP)
- pci_set_power_state(pdev, PCI_D3cold);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
deleted file mode 100644
index 9e4b88fb57f1..000000000000
--- a/drivers/misc/iwmc3200top/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-config IWMC3200TOP
- tristate "Intel Wireless MultiCom Top Driver"
- depends on MMC && EXPERIMENTAL
- select FW_LOADER
- ---help---
- Intel Wireless MultiCom 3200 Top driver is responsible for
- for firmware load and enabled coms enumeration
-
-config IWMC3200TOP_DEBUG
- bool "Enable full debug output of iwmc3200top Driver"
- depends on IWMC3200TOP
- ---help---
- Enable full debug output of iwmc3200top Driver
-
-config IWMC3200TOP_DEBUGFS
- bool "Enable Debugfs debugging interface for iwmc3200top"
- depends on IWMC3200TOP
- ---help---
- Enable creation of debugfs files for iwmc3200top
-
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
deleted file mode 100644
index fbf53fb4634e..000000000000
--- a/drivers/misc/iwmc3200top/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
-# drivers/misc/iwmc3200top/Makefile
-#
-# Copyright (C) 2009 Intel Corporation. All rights reserved.
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License version
-# 2 as published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-#
-# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
-# -
-#
-#
-
-obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
-iwmc3200top-objs := main.o fw-download.o
-iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
-iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
deleted file mode 100644
index 62fbaec48207..000000000000
--- a/drivers/misc/iwmc3200top/debugfs.c
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/debufs.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio.h>
-#include <linux/debugfs.h>
-
-#include "iwmc3200top.h"
-#include "fw-msg.h"
-#include "log.h"
-#include "debugfs.h"
-
-
-
-/* Constants definition */
-#define HEXADECIMAL_RADIX 16
-
-/* Functions definition */
-
-
-#define DEBUGFS_ADD(name, parent) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
- &iwmct_dbgfs_##name##_ops); \
-} while (0)
-
-#define DEBUGFS_RM(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0)
-
-#define DEBUGFS_READ_FUNC(name) \
-ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = { \
- .read = iwmct_dbgfs_##name##_read, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = { \
- .write = iwmct_dbgfs_##name##_write, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name) \
- DEBUGFS_WRITE_FUNC(name) \
- static const struct file_operations iwmct_dbgfs_##name##_ops = {\
- .write = iwmct_dbgfs_##name##_write, \
- .read = iwmct_dbgfs_##name##_read, \
- .open = iwmct_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
- };
-
-
-/* Debugfs file ops definitions */
-
-/*
- * Create the debugfs files and directories
- *
- */
-void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
-{
- struct iwmct_debugfs *dbgfs;
-
- dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
- sizeof(struct iwmct_debugfs));
- return;
- }
-
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, NULL);
- if (!dbgfs->dir_drv) {
- LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
- return;
- }
-
- return;
-}
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
-{
- if (!dbgfs)
- return;
-
- DEBUGFS_RM(dbgfs->dir_drv);
- kfree(dbgfs);
- dbgfs = NULL;
-}
-
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
deleted file mode 100644
index 71d45759b40f..000000000000
--- a/drivers/misc/iwmc3200top/debugfs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/debufs.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __DEBUGFS_H__
-#define __DEBUGFS_H__
-
-
-#ifdef CONFIG_IWMC3200TOP_DEBUGFS
-
-struct iwmct_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dir_drv_files {
- } dbgfs_drv_files;
-};
-
-void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
-void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
-
-#else /* CONFIG_IWMC3200TOP_DEBUGFS */
-
-struct iwmct_debugfs;
-
-static inline void
-iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
-{}
-
-static inline void
-iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
-{}
-
-#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
-
-#endif /* __DEBUGFS_H__ */
-
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
deleted file mode 100644
index e27afde6e99f..000000000000
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/fw-download.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/firmware.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include <asm/unaligned.h>
-
-#include "iwmc3200top.h"
-#include "log.h"
-#include "fw-msg.h"
-
-#define CHECKSUM_BYTES_NUM sizeof(u32)
-
-/**
- init parser struct with file
- */
-static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
- size_t file_size, size_t block_size)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_hdr *fw_hdr = &parser->versions;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
-
- parser->file = file;
- parser->file_size = file_size;
- parser->cur_pos = 0;
- parser->entry_point = 0;
- parser->buf = kzalloc(block_size, GFP_KERNEL);
- if (!parser->buf) {
- LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
- return -ENOMEM;
- }
- parser->buf_size = block_size;
-
- /* extract fw versions */
- memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
- LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
- "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
- fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
- fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
- fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
- fw_hdr->tic_name);
-
- parser->cur_pos += sizeof(struct iwmct_fw_hdr);
-
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return 0;
-}
-
-static bool iwmct_checksum(struct iwmct_priv *priv)
-{
- struct iwmct_parser *parser = &priv->parser;
- __le32 *file = (__le32 *)parser->file;
- int i, pad, steps;
- u32 accum = 0;
- u32 checksum;
- u32 mask = 0xffffffff;
-
- pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
- steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
-
- LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
-
- for (i = 0; i < steps; i++)
- accum += le32_to_cpu(file[i]);
-
- if (pad) {
- mask <<= 8 * (4 - pad);
- accum += le32_to_cpu(file[steps]) & mask;
- }
-
- checksum = get_unaligned_le32((__le32 *)(parser->file +
- parser->file_size - CHECKSUM_BYTES_NUM));
-
- LOG_INFO(priv, FW_DOWNLOAD,
- "compare checksum accum=0x%x to checksum=0x%x\n",
- accum, checksum);
-
- return checksum == accum;
-}
-
-static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
- size_t *sec_size, __le32 *sec_addr)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_dbg *dbg = &priv->dbg;
- struct iwmct_fw_sec_hdr *sec_hdr;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
- <= parser->file_size) {
-
- sec_hdr = (struct iwmct_fw_sec_hdr *)
- (parser->file + parser->cur_pos);
- parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
-
- LOG_INFO(priv, FW_DOWNLOAD,
- "sec hdr: type=%s addr=0x%x size=%d\n",
- sec_hdr->type, sec_hdr->target_addr,
- sec_hdr->data_size);
-
- if (strcmp(sec_hdr->type, "ENT") == 0)
- parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
- else if (strcmp(sec_hdr->type, "LBL") == 0)
- strcpy(dbg->label_fw, parser->file + parser->cur_pos);
- else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
- (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
- ((strcmp(sec_hdr->type, "GPS") == 0) &&
- (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
- ((strcmp(sec_hdr->type, "BTH") == 0) &&
- (priv->barker & BARKER_DNLOAD_BT_MSK))) {
- *sec_addr = sec_hdr->target_addr;
- *sec_size = le32_to_cpu(sec_hdr->data_size);
- *p_sec = parser->file + parser->cur_pos;
- parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
- return 1;
- } else if (strcmp(sec_hdr->type, "LOG") != 0)
- LOG_WARNING(priv, FW_DOWNLOAD,
- "skipping section type %s\n",
- sec_hdr->type);
-
- parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
- LOG_INFO(priv, FW_DOWNLOAD,
- "finished with section cur_pos=%zd\n", parser->cur_pos);
- }
-
- LOG_TRACE(priv, INIT, "<--\n");
- return 0;
-}
-
-static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
- size_t sec_size, __le32 addr)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
- const u8 *cur_block = p_sec;
- size_t sent = 0;
- int cnt = 0;
- int ret = 0;
- u32 cmd = 0;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
- LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
- addr, sec_size);
-
- while (sent < sec_size) {
- int i;
- u32 chksm = 0;
- u32 reset = atomic_read(&priv->reset);
- /* actual FW data */
- u32 data_size = min(parser->buf_size - sizeof(*hdr),
- sec_size - sent);
- /* Pad to block size */
- u32 trans_size = (data_size + sizeof(*hdr) +
- IWMC_SDIO_BLK_SIZE - 1) &
- ~(IWMC_SDIO_BLK_SIZE - 1);
- ++cnt;
-
- /* in case of reset, interrupt FW DOWNLAOD */
- if (reset) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "Reset detected. Abort FW download!!!");
- ret = -ECANCELED;
- goto exit;
- }
-
- memset(parser->buf, 0, parser->buf_size);
- cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
- cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
- cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
- cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
- hdr->data_size = cpu_to_le32(data_size);
- hdr->target_addr = addr;
-
- /* checksum is allowed for sizes divisible by 4 */
- if (data_size & 0x3)
- cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
-
- memcpy(hdr->data, cur_block, data_size);
-
-
- if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
-
- chksm = data_size + le32_to_cpu(addr) + cmd;
- for (i = 0; i < data_size >> 2; i++)
- chksm += ((u32 *)cur_block)[i];
-
- hdr->block_chksm = cpu_to_le32(chksm);
- LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
- hdr->block_chksm);
- }
-
- LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
- "sec_size=%zd, startAddress 0x%X\n",
- cnt, trans_size, sent, sec_size, addr);
-
- if (priv->dbg.dump)
- LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
-
-
- hdr->cmd = cpu_to_le32(cmd);
- /* send it down */
- /* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, parser->buf, trans_size);
- if (ret != 0) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "iwmct_tx returned %d\n", ret);
- goto exit;
- }
-
- addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
- sent += data_size;
- cur_block = p_sec + sent;
-
- if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
- LOG_INFO(priv, FW_DOWNLOAD,
- "Block number limit is reached [%d]\n",
- priv->dbg.blocks);
- break;
- }
- }
-
- if (sent < sec_size)
- ret = -EINVAL;
-exit:
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return ret;
-}
-
-static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
-{
- struct iwmct_parser *parser = &priv->parser;
- struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
- int ret;
- u32 cmd;
-
- LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
-
- memset(parser->buf, 0, parser->buf_size);
- cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
- if (jump) {
- cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
- hdr->target_addr = cpu_to_le32(parser->entry_point);
- LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
- parser->entry_point);
- } else {
- cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
- LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
- }
-
- hdr->cmd = cpu_to_le32(cmd);
-
- LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
- /* send it down */
- /* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
- if (ret)
- LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
-
- LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
- return 0;
-}
-
-int iwmct_fw_load(struct iwmct_priv *priv)
-{
- const u8 *fw_name = FW_NAME(FW_API_VER);
- const struct firmware *raw;
- const u8 *pdata;
- size_t len;
- __le32 addr;
- int ret;
-
-
- LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
- priv->barker);
- LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
- LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
- LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
-
-
- /* get the firmware */
- ret = request_firmware(&raw, fw_name, &priv->func->dev);
- if (ret < 0) {
- LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
- fw_name, ret);
- goto exit;
- }
-
- if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
- LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
- fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
- goto exit;
- }
-
- LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
-
- /* clear parser struct */
- ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
- if (ret < 0) {
- LOG_ERROR(priv, FW_DOWNLOAD,
- "iwmct_parser_init failed: Reason %d\n", ret);
- goto exit;
- }
-
- if (!iwmct_checksum(priv)) {
- LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
- ret = -EINVAL;
- goto exit;
- }
-
- /* download firmware to device */
- while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
- ret = iwmct_download_section(priv, pdata, len, addr);
- if (ret) {
- LOG_ERROR(priv, FW_DOWNLOAD,
- "%s download section failed\n", fw_name);
- goto exit;
- }
- }
-
- ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
-
-exit:
- kfree(priv->parser.buf);
- release_firmware(raw);
- return ret;
-}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
deleted file mode 100644
index 9e26b75bd482..000000000000
--- a/drivers/misc/iwmc3200top/fw-msg.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/fw-msg.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __FWMSG_H__
-#define __FWMSG_H__
-
-#define COMM_TYPE_D2H 0xFF
-#define COMM_TYPE_H2D 0xEE
-
-#define COMM_CATEGORY_OPERATIONAL 0x00
-#define COMM_CATEGORY_DEBUG 0x01
-#define COMM_CATEGORY_TESTABILITY 0x02
-#define COMM_CATEGORY_DIAGNOSTICS 0x03
-
-#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
-
-#define FW_LOG_SRC_MAX 32
-#define FW_LOG_SRC_ALL 255
-
-#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
-
-#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
-#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
-#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
-#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
-#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
-#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
-#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
-#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
-#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
-#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
-
-#define OP_OPR_ALIVE cpu_to_le16(0x0010)
-#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
-#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
-#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
-
-#define CMD_FLAG_PADDING_256 0x80
-
-#define FW_HCMD_BLOCK_SIZE 256
-
-struct msg_hdr {
- u8 type;
- u8 category;
- __le16 opcode;
- u8 seqnum;
- u8 flags;
- __le16 length;
-} __attribute__((__packed__));
-
-struct log_hdr {
- __le32 timestamp;
- u8 severity;
- u8 logsource;
- __le16 reserved;
-} __attribute__((__packed__));
-
-struct mdump_hdr {
- u8 dmpid;
- u8 frag;
- __le16 size;
- __le32 addr;
-} __attribute__((__packed__));
-
-struct top_msg {
- struct msg_hdr hdr;
- union {
- /* D2H messages */
- struct {
- struct log_hdr log_hdr;
- u8 data[1];
- } __attribute__((__packed__)) log;
-
- struct {
- struct log_hdr log_hdr;
- struct mdump_hdr md_hdr;
- u8 data[1];
- } __attribute__((__packed__)) mdump;
-
- /* H2D messages */
- struct {
- u8 logsource;
- u8 sevmask;
- } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
- struct mdump_hdr mdump_req;
- } u;
-} __attribute__((__packed__));
-
-
-#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
deleted file mode 100644
index 620973ed8bf9..000000000000
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/iwmc3200top.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __IWMC3200TOP_H__
-#define __IWMC3200TOP_H__
-
-#include <linux/workqueue.h>
-
-#define DRV_NAME "iwmc3200top"
-#define FW_API_VER 1
-#define _FW_NAME(api) DRV_NAME "." #api ".fw"
-#define FW_NAME(api) _FW_NAME(api)
-
-#define IWMC_SDIO_BLK_SIZE 256
-#define IWMC_DEFAULT_TR_BLK 64
-#define IWMC_SDIO_DATA_ADDR 0x0
-#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
-#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
-#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
-#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
-
-#define COMM_HUB_HEADER_LENGTH 16
-#define LOGGER_HEADER_LENGTH 10
-
-
-#define BARKER_DNLOAD_BT_POS 0
-#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
-#define BARKER_DNLOAD_GPS_POS 1
-#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
-#define BARKER_DNLOAD_TOP_POS 2
-#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
-#define BARKER_DNLOAD_RESERVED1_POS 3
-#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
-#define BARKER_DNLOAD_JUMP_POS 4
-#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
-#define BARKER_DNLOAD_SYNC_POS 5
-#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
-#define BARKER_DNLOAD_RESERVED2_POS 6
-#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
-#define BARKER_DNLOAD_BARKER_POS 8
-#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
-
-#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
-/* whole field barker */
-#define IWMC_BARKER_ACK 0xfeedbabe
-
-#define IWMC_CMD_SIGNATURE 0xcbbc
-
-#define CMD_HDR_OPCODE_POS 0
-#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
-#define CMD_HDR_RESPONSE_CODE_POS 4
-#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
-#define CMD_HDR_USE_CHECKSUM_POS 8
-#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
-#define CMD_HDR_RESPONSE_REQUIRED_POS 9
-#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
-#define CMD_HDR_DIRECT_ACCESS_POS 10
-#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
-#define CMD_HDR_RESERVED_POS 11
-#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
-#define CMD_HDR_SIGNATURE_POS 16
-#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
-
-enum {
- IWMC_OPCODE_PING = 0,
- IWMC_OPCODE_READ = 1,
- IWMC_OPCODE_WRITE = 2,
- IWMC_OPCODE_JUMP = 3,
- IWMC_OPCODE_REBOOT = 4,
- IWMC_OPCODE_PERSISTENT_WRITE = 5,
- IWMC_OPCODE_PERSISTENT_READ = 6,
- IWMC_OPCODE_READ_MODIFY_WRITE = 7,
- IWMC_OPCODE_LAST_COMMAND = 15
-};
-
-struct iwmct_fw_load_hdr {
- __le32 cmd;
- __le32 target_addr;
- __le32 data_size;
- __le32 block_chksm;
- u8 data[0];
-};
-
-/**
- * struct iwmct_fw_hdr
- * holds all sw components versions
- */
-struct iwmct_fw_hdr {
- u8 top_major;
- u8 top_minor;
- u8 top_revision;
- u8 gps_major;
- u8 gps_minor;
- u8 gps_revision;
- u8 bt_major;
- u8 bt_minor;
- u8 bt_revision;
- u8 tic_name[31];
-};
-
-/**
- * struct iwmct_fw_sec_hdr
- * @type: function type
- * @data_size: section's data size
- * @target_addr: download address
- */
-struct iwmct_fw_sec_hdr {
- u8 type[4];
- __le32 data_size;
- __le32 target_addr;
-};
-
-/**
- * struct iwmct_parser
- * @file: fw image
- * @file_size: fw size
- * @cur_pos: position in file
- * @buf: temp buf for download
- * @buf_size: size of buf
- * @entry_point: address to jump in fw kick-off
- */
-struct iwmct_parser {
- const u8 *file;
- size_t file_size;
- size_t cur_pos;
- u8 *buf;
- size_t buf_size;
- u32 entry_point;
- struct iwmct_fw_hdr versions;
-};
-
-
-struct iwmct_work_struct {
- struct list_head list;
- ssize_t iosize;
-};
-
-struct iwmct_dbg {
- int blocks;
- bool dump;
- bool jump;
- bool direct;
- bool checksum;
- bool fw_download;
- int block_size;
- int download_trans_blks;
-
- char label_fw[256];
-};
-
-struct iwmct_debugfs;
-
-struct iwmct_priv {
- struct sdio_func *func;
- struct iwmct_debugfs *dbgfs;
- struct iwmct_parser parser;
- atomic_t reset;
- atomic_t dev_sync;
- u32 trans_len;
- u32 barker;
- struct iwmct_dbg dbg;
-
- /* drivers work items */
- struct work_struct bus_rescan_worker;
- struct work_struct isr_worker;
-
- /* drivers wait queue */
- wait_queue_head_t wait_q;
-
- /* rx request list */
- struct list_head read_req_list;
-};
-
-extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
-extern int iwmct_fw_load(struct iwmct_priv *priv);
-
-extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
-extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
-extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
-extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
-
-#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
deleted file mode 100644
index a36a55a49cac..000000000000
--- a/drivers/misc/iwmc3200top/log.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/log.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include <linux/ctype.h>
-#include "fw-msg.h"
-#include "iwmc3200top.h"
-#include "log.h"
-
-/* Maximal hexadecimal string size of the FW memdump message */
-#define LOG_MSG_SIZE_MAX 12400
-
-/* iwmct_logdefs is a global used by log macros */
-u8 iwmct_logdefs[LOG_SRC_MAX];
-static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
-
-
-static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
-{
- int i;
-
- if (src < size)
- logdefs[src] = logmask;
- else if (src == LOG_SRC_ALL)
- for (i = 0; i < size; i++)
- logdefs[i] = logmask;
- else
- return -1;
-
- return 0;
-}
-
-
-int iwmct_log_set_filter(u8 src, u8 logmask)
-{
- return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
-}
-
-
-int iwmct_log_set_fw_filter(u8 src, u8 logmask)
-{
- return _log_set_log_filter(iwmct_fw_logdefs,
- FW_LOG_SRC_MAX, src, logmask);
-}
-
-
-static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
- int ilen, char *pref)
-{
- int pos = 0;
- int i;
- int len;
-
- for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
- str[pos] = pref[i];
-
- for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
- len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
-
- if (i < ilen)
- return -1;
-
- return 0;
-}
-
-/* NOTE: This function is not thread safe.
- Currently it's called only from sdio rx worker - no race there
-*/
-void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
-{
- struct top_msg *msg;
- static char logbuf[LOG_MSG_SIZE_MAX];
-
- msg = (struct top_msg *)buf;
-
- if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
- LOG_ERROR(priv, FW_MSG, "Log message from TOP "
- "is too short %d (expected %zd)\n",
- len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
- return;
- }
-
- if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
- BIT(msg->u.log.log_hdr.severity)) ||
- !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
- return;
-
- switch (msg->hdr.category) {
- case COMM_CATEGORY_TESTABILITY:
- if (!(iwmct_logdefs[LOG_SRC_TST] &
- BIT(msg->u.log.log_hdr.severity)))
- return;
- if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
- le16_to_cpu(msg->hdr.length) +
- sizeof(msg->hdr), "<TST>"))
- LOG_WARNING(priv, TST,
- "TOP TST message is too long, truncating...");
- LOG_WARNING(priv, TST, "%s\n", logbuf);
- break;
- case COMM_CATEGORY_DEBUG:
- if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
- LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
- ((u8 *)msg) + sizeof(msg->hdr)
- + sizeof(msg->u.log.log_hdr));
- else {
- if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
- le16_to_cpu(msg->hdr.length)
- + sizeof(msg->hdr),
- "<DBG>"))
- LOG_WARNING(priv, FW_MSG,
- "TOP DBG message is too long,"
- "truncating...");
- LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
- }
- break;
- default:
- break;
- }
-}
-
-static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
-{
- int i, pos, len;
- for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
- len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
- i, logdefs[i]);
- pos += len;
- }
- buf[pos-1] = '\n';
- buf[pos] = '\0';
-
- if (i < logdefsz)
- return -1;
- return 0;
-}
-
-int log_get_filter_str(char *buf, int size)
-{
- return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
-}
-
-int log_get_fw_filter_str(char *buf, int size)
-{
- return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
-}
-
-#define HEXADECIMAL_RADIX 16
-#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
-
-ssize_t show_iwmct_log_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *str_buf;
- int buf_size;
- ssize_t ret;
-
- buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
- str_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %d bytes\n", buf_size);
- ret = -ENOMEM;
- goto exit;
- }
-
- if (log_get_filter_str(str_buf, buf_size) < 0) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = sprintf(buf, "%s", str_buf);
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t store_iwmct_log_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *token, *str_buf = NULL;
- long val;
- ssize_t ret = count;
- u8 src, mask;
-
- if (!count)
- goto exit;
-
- str_buf = kzalloc(count, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %zd bytes\n", count);
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(str_buf, buf, count);
-
- while ((token = strsep(&str_buf, ",")) != NULL) {
- while (isspace(*token))
- ++token;
- if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to convert string to long %s\n",
- token);
- ret = -EINVAL;
- goto exit;
- }
-
- mask = val & 0xFF;
- src = (val & 0XFF00) >> 8;
- iwmct_log_set_filter(src, mask);
- }
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t show_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- char *str_buf;
- int buf_size;
- ssize_t ret;
-
- buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
-
- str_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %d bytes\n", buf_size);
- ret = -ENOMEM;
- goto exit;
- }
-
- if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
- ret = -EINVAL;
- goto exit;
- }
-
- ret = sprintf(buf, "%s", str_buf);
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
-ssize_t store_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- struct top_msg cmd;
- char *token, *str_buf = NULL;
- ssize_t ret = count;
- u16 cmdlen = 0;
- int i;
- long val;
- u8 src, mask;
-
- if (!count)
- goto exit;
-
- str_buf = kzalloc(count, GFP_KERNEL);
- if (!str_buf) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to allocate %zd bytes\n", count);
- ret = -ENOMEM;
- goto exit;
- }
-
- memcpy(str_buf, buf, count);
-
- cmd.hdr.type = COMM_TYPE_H2D;
- cmd.hdr.category = COMM_CATEGORY_DEBUG;
- cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
-
- for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
- (i < FW_LOG_SRC_MAX); i++) {
-
- while (isspace(*token))
- ++token;
-
- if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
- LOG_ERROR(priv, DEBUGFS,
- "failed to convert string to long %s\n",
- token);
- ret = -EINVAL;
- goto exit;
- }
-
- mask = val & 0xFF; /* LSB */
- src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
- iwmct_log_set_fw_filter(src, mask);
-
- cmd.u.logdefs[i].logsource = src;
- cmd.u.logdefs[i].sevmask = mask;
- }
-
- cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
- cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
-
- ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
- if (ret) {
- LOG_ERROR(priv, DEBUGFS,
- "Failed to send %d bytes of fwcmd, ret=%zd\n",
- cmdlen, ret);
- goto exit;
- } else
- LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
-
- ret = count;
-
-exit:
- kfree(str_buf);
- return ret;
-}
-
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
deleted file mode 100644
index 4434bb16cea7..000000000000
--- a/drivers/misc/iwmc3200top/log.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/log.h
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#ifndef __LOG_H__
-#define __LOG_H__
-
-
-/* log severity:
- * The log levels here match FW log levels
- * so values need to stay as is */
-#define LOG_SEV_CRITICAL 0
-#define LOG_SEV_ERROR 1
-#define LOG_SEV_WARNING 2
-#define LOG_SEV_INFO 3
-#define LOG_SEV_INFOEX 4
-
-/* Log levels not defined for FW */
-#define LOG_SEV_TRACE 5
-#define LOG_SEV_DUMP 6
-
-#define LOG_SEV_FW_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
- BIT(LOG_SEV_INFOEX))
-
-#define LOG_SEV_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
- BIT(LOG_SEV_INFOEX) | \
- BIT(LOG_SEV_TRACE) | \
- BIT(LOG_SEV_DUMP))
-
-/* log source */
-#define LOG_SRC_INIT 0
-#define LOG_SRC_DEBUGFS 1
-#define LOG_SRC_FW_DOWNLOAD 2
-#define LOG_SRC_FW_MSG 3
-#define LOG_SRC_TST 4
-#define LOG_SRC_IRQ 5
-
-#define LOG_SRC_MAX 6
-#define LOG_SRC_ALL 0xFF
-
-/**
- * Default intitialization runtime log level
- */
-#ifndef LOG_SEV_FILTER_RUNTIME
-#define LOG_SEV_FILTER_RUNTIME \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING))
-#endif
-
-#ifndef FW_LOG_SEV_FILTER_RUNTIME
-#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
-#endif
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-/**
- * Log macros
- */
-
-#define priv2dev(priv) (&(priv->func)->dev)
-
-#define LOG_CRITICAL(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
- dev_crit(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_ERROR(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
- dev_err(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_WARNING(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
- dev_warn(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_INFO(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
- dev_info(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_TRACE(priv, src, fmt, args...) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
- dev_dbg(priv2dev(priv), "%s %d: " fmt, \
- __func__, __LINE__, ##args); \
-} while (0)
-
-#define LOG_HEXDUMP(src, ptr, len) \
-do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
- 16, 1, ptr, len, false); \
-} while (0)
-
-void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
-
-extern u8 iwmct_logdefs[];
-
-int iwmct_log_set_filter(u8 src, u8 logmask);
-int iwmct_log_set_fw_filter(u8 src, u8 logmask);
-
-ssize_t show_iwmct_log_level(struct device *d,
- struct device_attribute *attr, char *buf);
-ssize_t store_iwmct_log_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t show_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr, char *buf);
-ssize_t store_iwmct_log_level_fw(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count);
-
-#else
-
-#define LOG_CRITICAL(priv, src, fmt, args...)
-#define LOG_ERROR(priv, src, fmt, args...)
-#define LOG_WARNING(priv, src, fmt, args...)
-#define LOG_INFO(priv, src, fmt, args...)
-#define LOG_TRACE(priv, src, fmt, args...)
-#define LOG_HEXDUMP(src, ptr, len)
-
-static inline void iwmct_log_top_message(struct iwmct_priv *priv,
- u8 *buf, int len) {}
-static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
-static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
-
-#endif /* CONFIG_IWMC3200TOP_DEBUG */
-
-int log_get_filter_str(char *buf, int size);
-int log_get_fw_filter_str(char *buf, int size);
-
-#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
deleted file mode 100644
index 701eb600b127..000000000000
--- a/drivers/misc/iwmc3200top/main.c
+++ /dev/null
@@ -1,662 +0,0 @@
-/*
- * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
- * drivers/misc/iwmc3200top/main.c
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
- * -
- *
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio.h>
-
-#include "iwmc3200top.h"
-#include "log.h"
-#include "fw-msg.h"
-#include "debugfs.h"
-
-
-#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
-#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
-
-#define DRIVER_VERSION "0.1.62"
-
-MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(DRIVER_COPYRIGHT);
-MODULE_FIRMWARE(FW_NAME(FW_API_VER));
-
-
-static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
-{
- return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
-
-}
-int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
-{
- int ret;
- sdio_claim_host(priv->func);
- ret = __iwmct_tx(priv, src, count);
- sdio_release_host(priv->func);
- return ret;
-}
-/*
- * This workers main task is to wait for OP_OPR_ALIVE
- * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
- * When OP_OPR_ALIVE received it will issue
- * a call to "bus_rescan_devices".
- */
-static void iwmct_rescan_worker(struct work_struct *ws)
-{
- struct iwmct_priv *priv;
- int ret;
-
- priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
-
- LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
-
- ret = bus_rescan_devices(priv->func->dev.bus);
- if (ret < 0)
- LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
-}
-
-static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
-{
- switch (msg->hdr.opcode) {
- case OP_OPR_ALIVE:
- LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
- schedule_work(&priv->bus_rescan_worker);
- break;
- default:
- LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
- msg->hdr.opcode);
- break;
- }
-}
-
-
-static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
-{
- struct top_msg *msg;
-
- msg = (struct top_msg *)buf;
-
- if (msg->hdr.type != COMM_TYPE_D2H) {
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP with invalid message type 0x%X\n",
- msg->hdr.type);
- return;
- }
-
- if (len < sizeof(msg->hdr)) {
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP is too short for message header "
- "received %d bytes, expected at least %zd bytes\n",
- len, sizeof(msg->hdr));
- return;
- }
-
- if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
- LOG_ERROR(priv, FW_MSG,
- "Message length (%d bytes) is shorter than "
- "in header (%d bytes)\n",
- len, le16_to_cpu(msg->hdr.length));
- return;
- }
-
- switch (msg->hdr.category) {
- case COMM_CATEGORY_OPERATIONAL:
- op_top_message(priv, (struct top_msg *)buf);
- break;
-
- case COMM_CATEGORY_DEBUG:
- case COMM_CATEGORY_TESTABILITY:
- case COMM_CATEGORY_DIAGNOSTICS:
- iwmct_log_top_message(priv, buf, len);
- break;
-
- default:
- LOG_ERROR(priv, FW_MSG,
- "Message from TOP with unknown category 0x%X\n",
- msg->hdr.category);
- break;
- }
-}
-
-int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
-{
- int ret;
- u8 *buf;
-
- LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
-
- /* add padding to 256 for IWMC */
- ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
-
- LOG_HEXDUMP(FW_MSG, cmd, len);
-
- if (len > FW_HCMD_BLOCK_SIZE) {
- LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
- len, FW_HCMD_BLOCK_SIZE);
- return -1;
- }
-
- buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
- if (!buf) {
- LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
- FW_HCMD_BLOCK_SIZE);
- return -1;
- }
-
- memcpy(buf, cmd, len);
- ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
-
- kfree(buf);
- return ret;
-}
-
-
-static void iwmct_irq_read_worker(struct work_struct *ws)
-{
- struct iwmct_priv *priv;
- struct iwmct_work_struct *read_req;
- __le32 *buf = NULL;
- int ret;
- int iosize;
- u32 barker;
- bool is_barker;
-
- priv = container_of(ws, struct iwmct_priv, isr_worker);
-
- LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
-
- /* --------------------- Handshake with device -------------------- */
- sdio_claim_host(priv->func);
-
- /* all list manipulations have to be protected by
- * sdio_claim_host/sdio_release_host */
- if (list_empty(&priv->read_req_list)) {
- LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
- goto exit_release;
- }
-
- read_req = list_entry(priv->read_req_list.next,
- struct iwmct_work_struct, list);
-
- list_del(&read_req->list);
- iosize = read_req->iosize;
- kfree(read_req);
-
- buf = kzalloc(iosize, GFP_KERNEL);
- if (!buf) {
- LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
- goto exit_release;
- }
-
- LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
- iosize, buf, priv->func->num);
-
- /* read from device */
- ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
- if (ret) {
- LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
- goto exit_release;
- }
-
- LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
-
- barker = le32_to_cpu(buf[0]);
-
- /* Verify whether it's a barker and if not - treat as regular Rx */
- if (barker == IWMC_BARKER_ACK ||
- (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
-
- /* Valid Barker is equal on first 4 dwords */
- is_barker = (buf[1] == buf[0]) &&
- (buf[2] == buf[0]) &&
- (buf[3] == buf[0]);
-
- if (!is_barker) {
- LOG_WARNING(priv, IRQ,
- "Potentially inconsistent barker "
- "%08X_%08X_%08X_%08X\n",
- le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
- le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
- }
- } else {
- is_barker = false;
- }
-
- /* Handle Top CommHub message */
- if (!is_barker) {
- sdio_release_host(priv->func);
- handle_top_message(priv, (u8 *)buf, iosize);
- goto exit;
- } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
- if (atomic_read(&priv->dev_sync) == 0) {
- LOG_ERROR(priv, IRQ,
- "ACK barker arrived out-of-sync\n");
- goto exit_release;
- }
-
- /* Continuing to FW download (after Sync is completed)*/
- atomic_set(&priv->dev_sync, 0);
- LOG_INFO(priv, IRQ, "ACK barker arrived "
- "- starting FW download\n");
- } else { /* REBOOT barker */
- LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker);
- priv->barker = barker;
-
- if (barker & BARKER_DNLOAD_SYNC_MSK) {
- /* Send the same barker back */
- ret = __iwmct_tx(priv, buf, iosize);
- if (ret) {
- LOG_ERROR(priv, IRQ,
- "error %d echoing barker\n", ret);
- goto exit_release;
- }
- LOG_INFO(priv, IRQ, "Echoing barker to device\n");
- atomic_set(&priv->dev_sync, 1);
- goto exit_release;
- }
-
- /* Continuing to FW download (without Sync) */
- LOG_INFO(priv, IRQ, "No sync requested "
- "- starting FW download\n");
- }
-
- sdio_release_host(priv->func);
-
- if (priv->dbg.fw_download)
- iwmct_fw_load(priv);
- else
- LOG_ERROR(priv, IRQ, "FW download not allowed\n");
-
- goto exit;
-
-exit_release:
- sdio_release_host(priv->func);
-exit:
- kfree(buf);
- LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
-}
-
-static void iwmct_irq(struct sdio_func *func)
-{
- struct iwmct_priv *priv;
- int val, ret;
- int iosize;
- int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
- struct iwmct_work_struct *read_req;
-
- priv = sdio_get_drvdata(func);
-
- LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
-
- /* read the function's status register */
- val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
-
- LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
-
- if (!val) {
- LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
- goto exit_clear_intr;
- }
-
-
- /*
- * read 2 bytes of the transaction size
- * IMPORTANT: sdio transaction size has to be read before clearing
- * sdio interrupt!!!
- */
- val = sdio_readb(priv->func, addr++, &ret);
- iosize = val;
- val = sdio_readb(priv->func, addr++, &ret);
- iosize += val << 8;
-
- LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
-
- if (iosize == 0) {
- LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
- goto exit_clear_intr;
- }
-
- /* allocate a work structure to pass iosize to the worker */
- read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
- if (!read_req) {
- LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
- goto exit_clear_intr;
- }
-
- INIT_LIST_HEAD(&read_req->list);
- read_req->iosize = iosize;
-
- list_add_tail(&priv->read_req_list, &read_req->list);
-
- /* clear the function's interrupt request bit (write 1 to clear) */
- sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
-
- schedule_work(&priv->isr_worker);
-
- LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
-
- return;
-
-exit_clear_intr:
- /* clear the function's interrupt request bit (write 1 to clear) */
- sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
-}
-
-
-static int blocks;
-module_param(blocks, int, 0604);
-MODULE_PARM_DESC(blocks, "max_blocks_to_send");
-
-static bool dump;
-module_param(dump, bool, 0604);
-MODULE_PARM_DESC(dump, "dump_hex_content");
-
-static bool jump = 1;
-module_param(jump, bool, 0604);
-
-static bool direct = 1;
-module_param(direct, bool, 0604);
-
-static bool checksum = 1;
-module_param(checksum, bool, 0604);
-
-static bool fw_download = 1;
-module_param(fw_download, bool, 0604);
-
-static int block_size = IWMC_SDIO_BLK_SIZE;
-module_param(block_size, int, 0404);
-
-static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
-module_param(download_trans_blks, int, 0604);
-
-static bool rubbish_barker;
-module_param(rubbish_barker, bool, 0604);
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-static int log_level[LOG_SRC_MAX];
-static unsigned int log_level_argc;
-module_param_array(log_level, int, &log_level_argc, 0604);
-MODULE_PARM_DESC(log_level, "log_level");
-
-static int log_level_fw[FW_LOG_SRC_MAX];
-static unsigned int log_level_fw_argc;
-module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
-MODULE_PARM_DESC(log_level_fw, "log_level_fw");
-#endif
-
-void iwmct_dbg_init_params(struct iwmct_priv *priv)
-{
-#ifdef CONFIG_IWMC3200TOP_DEBUG
- int i;
-
- for (i = 0; i < log_level_argc; i++) {
- dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
- i, log_level[i]);
- iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
- log_level[i] & 0xFF);
- }
- for (i = 0; i < log_level_fw_argc; i++) {
- dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
- i, log_level_fw[i]);
- iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
- log_level_fw[i] & 0xFF);
- }
-#endif
-
- priv->dbg.blocks = blocks;
- LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
- priv->dbg.dump = (bool)dump;
- LOG_INFO(priv, INIT, "dump=%d\n", dump);
- priv->dbg.jump = (bool)jump;
- LOG_INFO(priv, INIT, "jump=%d\n", jump);
- priv->dbg.direct = (bool)direct;
- LOG_INFO(priv, INIT, "direct=%d\n", direct);
- priv->dbg.checksum = (bool)checksum;
- LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
- priv->dbg.fw_download = (bool)fw_download;
- LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
- priv->dbg.block_size = block_size;
- LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
- priv->dbg.download_trans_blks = download_trans_blks;
- LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-static ssize_t show_iwmct_fw_version(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwmct_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%s\n", priv->dbg.label_fw);
-}
-static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
-
-#ifdef CONFIG_IWMC3200TOP_DEBUG
-static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
- show_iwmct_log_level, store_iwmct_log_level);
-static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
- show_iwmct_log_level_fw, store_iwmct_log_level_fw);
-#endif
-
-static struct attribute *iwmct_sysfs_entries[] = {
- &dev_attr_cc_label_fw.attr,
-#ifdef CONFIG_IWMC3200TOP_DEBUG
- &dev_attr_log_level.attr,
- &dev_attr_log_level_fw.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwmct_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwmct_sysfs_entries,
-};
-
-
-static int iwmct_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- struct iwmct_priv *priv;
- int ret;
- int val = 1;
- int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
-
- dev_dbg(&func->dev, "enter iwmct_probe\n");
-
- dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
- jiffies_to_msecs(2147483647), HZ);
-
- priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&func->dev, "kzalloc error\n");
- return -ENOMEM;
- }
- priv->func = func;
- sdio_set_drvdata(func, priv);
-
- INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
- INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
-
- init_waitqueue_head(&priv->wait_q);
-
- sdio_claim_host(func);
- /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
- func->enable_timeout = 10;
-
- /* In our HW, setting the block size also wakes up the boot rom. */
- ret = sdio_set_block_size(func, priv->dbg.block_size);
- if (ret) {
- LOG_ERROR(priv, INIT,
- "sdio_set_block_size() failure: %d\n", ret);
- goto error_sdio_enable;
- }
-
- ret = sdio_enable_func(func);
- if (ret) {
- LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
- goto error_sdio_enable;
- }
-
- /* init reset and dev_sync states */
- atomic_set(&priv->reset, 0);
- atomic_set(&priv->dev_sync, 0);
-
- /* init read req queue */
- INIT_LIST_HEAD(&priv->read_req_list);
-
- /* process configurable parameters */
- iwmct_dbg_init_params(priv);
- ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
- if (ret) {
- LOG_ERROR(priv, INIT, "Failed to register attributes and "
- "initialize module_params\n");
- goto error_dev_attrs;
- }
-
- iwmct_dbgfs_register(priv, DRV_NAME);
-
- if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
- LOG_INFO(priv, INIT,
- "Reducing transaction to 8 blocks = 2K (from %d)\n",
- priv->dbg.download_trans_blks);
- priv->dbg.download_trans_blks = 8;
- }
- priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
- LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
-
- ret = sdio_claim_irq(func, iwmct_irq);
- if (ret) {
- LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
- goto error_claim_irq;
- }
-
-
- /* Enable function's interrupt */
- sdio_writeb(priv->func, val, addr, &ret);
- if (ret) {
- LOG_ERROR(priv, INIT, "Failure writing to "
- "Interrupt Enable Register (%d): %d\n", addr, ret);
- goto error_enable_int;
- }
-
- sdio_release_host(func);
-
- LOG_INFO(priv, INIT, "exit iwmct_probe\n");
-
- return ret;
-
-error_enable_int:
- sdio_release_irq(func);
-error_claim_irq:
- sdio_disable_func(func);
-error_dev_attrs:
- iwmct_dbgfs_unregister(priv->dbgfs);
- sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
-error_sdio_enable:
- sdio_release_host(func);
- return ret;
-}
-
-static void iwmct_remove(struct sdio_func *func)
-{
- struct iwmct_work_struct *read_req;
- struct iwmct_priv *priv = sdio_get_drvdata(func);
-
- LOG_INFO(priv, INIT, "enter\n");
-
- sdio_claim_host(func);
- sdio_release_irq(func);
- sdio_release_host(func);
-
- /* Make sure works are finished */
- flush_work_sync(&priv->bus_rescan_worker);
- flush_work_sync(&priv->isr_worker);
-
- sdio_claim_host(func);
- sdio_disable_func(func);
- sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
- iwmct_dbgfs_unregister(priv->dbgfs);
- sdio_release_host(func);
-
- /* free read requests */
- while (!list_empty(&priv->read_req_list)) {
- read_req = list_entry(priv->read_req_list.next,
- struct iwmct_work_struct, list);
-
- list_del(&read_req->list);
- kfree(read_req);
- }
-
- kfree(priv);
-}
-
-
-static const struct sdio_device_id iwmct_ids[] = {
- /* Intel Wireless MultiCom 3200 Top Driver */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
- { }, /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(sdio, iwmct_ids);
-
-static struct sdio_driver iwmct_driver = {
- .probe = iwmct_probe,
- .remove = iwmct_remove,
- .name = DRV_NAME,
- .id_table = iwmct_ids,
-};
-
-static int __init iwmct_init(void)
-{
- int rc;
-
- /* Default log filter settings */
- iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
- iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
- iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
-
- rc = sdio_register_driver(&iwmct_driver);
-
- return rc;
-}
-
-static void __exit iwmct_exit(void)
-{
- sdio_unregister_driver(&iwmct_driver);
-}
-
-module_init(iwmct_init);
-module_exit(iwmct_exit);
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 7de13891e49e..783fcd7365bc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -1147,7 +1147,7 @@ static int mei_pci_resume(struct device *device)
err = request_threaded_irq(pdev->irq,
NULL,
mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
+ IRQF_ONESHOT, mei_driver_name, dev);
else
err = request_threaded_irq(pdev->irq,
mei_interrupt_quick_handler,
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 17bbacb1b4b1..87b251ab6ec5 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -452,9 +452,9 @@ xpc_handle_activate_mq_msg_uv(struct xpc_partition *part,
if (msg->activate_gru_mq_desc_gpa !=
part_uv->activate_gru_mq_desc_gpa) {
- spin_lock_irqsave(&part_uv->flags_lock, irq_flags);
+ spin_lock(&part_uv->flags_lock);
part_uv->flags &= ~XPC_P_CACHED_ACTIVATE_GRU_MQ_DESC_UV;
- spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags);
+ spin_unlock(&part_uv->flags_lock);
part_uv->activate_gru_mq_desc_gpa =
msg->activate_gru_mq_desc_gpa;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 276d21ce6bc1..f1c84decb192 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -850,9 +850,7 @@ out:
goto retry;
if (!err)
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
@@ -934,9 +932,7 @@ out_retry:
if (!err)
mmc_blk_reset_success(md, type);
out:
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
@@ -951,9 +947,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
if (ret)
ret = -EIO;
- spin_lock_irq(&md->lock);
- __blk_end_request_all(req, ret);
- spin_unlock_irq(&md->lock);
+ blk_end_request_all(req, ret);
return ret ? 0 : 1;
}
@@ -1252,14 +1246,10 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
return ret;
}
@@ -1311,10 +1301,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* A block was successfully transferred.
*/
mmc_blk_reset_success(md, type);
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0,
+ ret = blk_end_request(req, 0,
brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
/*
* If the blk_end_request function returns non-zero even
* though all data has been transferred and no errors
@@ -1364,10 +1352,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
* time, so we only reach here after trying to
* read a single sector.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO,
+ ret = blk_end_request(req, -EIO,
brq->data.blksz);
- spin_unlock_irq(&md->lock);
if (!ret)
goto start_new_req;
break;
@@ -1388,12 +1374,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 1;
cmd_abort:
- spin_lock_irq(&md->lock);
if (mmc_card_removed(card))
req->cmd_flags |= REQ_QUIET;
while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
start_new_req:
if (rqc) {
@@ -1417,9 +1401,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_part_switch(card, md);
if (ret) {
if (req) {
- spin_lock_irq(&md->lock);
- __blk_end_request_all(req, -EIO);
- spin_unlock_irq(&md->lock);
+ blk_end_request_all(req, -EIO);
}
ret = 0;
goto out;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index dca4428380f1..38ed210ce2f3 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o cd-gpio.o
+ quirks.o slot-gpio.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
deleted file mode 100644
index f13e38deceac..000000000000
--- a/drivers/mmc/core/cd-gpio.c
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Generic GPIO card-detect helper
- *
- * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/mmc/cd-gpio.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-struct mmc_cd_gpio {
- unsigned int gpio;
- char label[0];
-};
-
-static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
-{
- /* Schedule a card detection after a debounce timeout */
- mmc_detect_change(dev_id, msecs_to_jiffies(100));
- return IRQ_HANDLED;
-}
-
-int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
-{
- size_t len = strlen(dev_name(host->parent)) + 4;
- struct mmc_cd_gpio *cd;
- int irq = gpio_to_irq(gpio);
- int ret;
-
- if (irq < 0)
- return irq;
-
- cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
- if (!cd)
- return -ENOMEM;
-
- snprintf(cd->label, len, "%s cd", dev_name(host->parent));
-
- ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label);
- if (ret < 0)
- goto egpioreq;
-
- ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- cd->label, host);
- if (ret < 0)
- goto eirqreq;
-
- cd->gpio = gpio;
- host->hotplug.irq = irq;
- host->hotplug.handler_priv = cd;
-
- return 0;
-
-eirqreq:
- gpio_free(gpio);
-egpioreq:
- kfree(cd);
- return ret;
-}
-EXPORT_SYMBOL(mmc_cd_gpio_request);
-
-void mmc_cd_gpio_free(struct mmc_host *host)
-{
- struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
-
- if (!cd)
- return;
-
- free_irq(host->hotplug.irq, host);
- gpio_free(cd->gpio);
- kfree(cd);
-}
-EXPORT_SYMBOL(mmc_cd_gpio_free);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0b6141d29dbd..8ac5246e2ab2 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -404,6 +404,7 @@ int mmc_interrupt_hpi(struct mmc_card *card)
{
int err;
u32 status;
+ unsigned long prg_wait;
BUG_ON(!card);
@@ -419,30 +420,38 @@ int mmc_interrupt_hpi(struct mmc_card *card)
goto out;
}
- /*
- * If the card status is in PRG-state, we can send the HPI command.
- */
- if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
- do {
- /*
- * We don't know when the HPI command will finish
- * processing, so we need to resend HPI until out
- * of prg-state, and keep checking the card status
- * with SEND_STATUS. If a timeout error occurs when
- * sending the HPI command, we are already out of
- * prg-state.
- */
- err = mmc_send_hpi_cmd(card, &status);
- if (err)
- pr_debug("%s: abort HPI (%d error)\n",
- mmc_hostname(card->host), err);
+ switch (R1_CURRENT_STATE(status)) {
+ case R1_STATE_IDLE:
+ case R1_STATE_READY:
+ case R1_STATE_STBY:
+ /*
+ * In idle states, HPI is not needed and the caller
+ * can issue the next intended command immediately
+ */
+ goto out;
+ case R1_STATE_PRG:
+ break;
+ default:
+ /* In all other states, it's illegal to issue HPI */
+ pr_debug("%s: HPI cannot be sent. Card state=%d\n",
+ mmc_hostname(card->host), R1_CURRENT_STATE(status));
+ err = -EINVAL;
+ goto out;
+ }
- err = mmc_send_status(card, &status);
- if (err)
- break;
- } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
- } else
- pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ goto out;
+
+ prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
+ do {
+ err = mmc_send_status(card, &status);
+
+ if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
+ break;
+ if (time_after(jiffies, prg_wait))
+ err = -ETIMEDOUT;
+ } while (!err);
out:
mmc_release_host(card->host);
@@ -941,7 +950,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply)
return result;
}
-EXPORT_SYMBOL(mmc_regulator_get_ocrmask);
+EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
/**
* mmc_regulator_set_ocr - set regulator to match host->ios voltage
@@ -1011,7 +1020,30 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
"could not set regulator OCR (%d)\n", result);
return result;
}
-EXPORT_SYMBOL(mmc_regulator_set_ocr);
+EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
+
+int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ struct device *dev = mmc_dev(mmc);
+ struct regulator *supply;
+ int ret;
+
+ supply = devm_regulator_get(dev, "vmmc");
+ mmc->supply.vmmc = supply;
+ mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
+
+ if (IS_ERR(supply))
+ return PTR_ERR(supply);
+
+ ret = mmc_regulator_get_ocrmask(supply);
+ if (ret > 0)
+ mmc->ocr_avail = ret;
+ else
+ dev_warn(mmc_dev(mmc), "Failed getting OCR mask: %d\n", ret);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
#endif /* CONFIG_REGULATOR */
@@ -1180,6 +1212,9 @@ static void mmc_power_up(struct mmc_host *host)
host->ios.timing = MMC_TIMING_LEGACY;
mmc_set_ios(host);
+ /* Set signal voltage to 3.3V */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
+
/*
* This delay should be sufficient to allow the power supply
* to reach the minimum voltage.
@@ -1931,9 +1966,6 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
*/
mmc_hw_reset_for_init(host);
- /* Initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
@@ -2075,6 +2107,7 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
host->f_init = max(freqs[0], host->f_min);
+ host->rescan_disable = 0;
mmc_power_up(host);
mmc_detect_change(host, 0);
}
@@ -2088,6 +2121,7 @@ void mmc_stop_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ host->rescan_disable = 1;
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 91c84c7a1829..597f189b4427 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -32,6 +32,7 @@
static void mmc_host_classdev_release(struct device *dev)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ mutex_destroy(&host->slot.lock);
kfree(host);
}
@@ -312,6 +313,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
if (!host)
return NULL;
+ /* scanning will be enabled when we're ready */
+ host->rescan_disable = 1;
spin_lock(&mmc_host_lock);
err = idr_get_new(&mmc_host_idr, host, &host->index);
spin_unlock(&mmc_host_lock);
@@ -327,6 +330,9 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
mmc_host_clk_init(host);
+ mutex_init(&host->slot.lock);
+ host->slot.cd_irq = -EINVAL;
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 258b203397aa..396b25891bb9 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -717,10 +717,6 @@ static int mmc_select_powerclass(struct mmc_card *card,
card->ext_csd.generic_cmd6_time);
}
- if (err)
- pr_err("%s: power class selection for ext_csd_bus_width %d"
- " failed\n", mmc_hostname(card->host), bus_width);
-
return err;
}
@@ -822,9 +818,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
- /* Initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
@@ -1104,7 +1097,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to bus width %d"
+ " failed\n", mmc_hostname(card->host),
+ 1 << bus_width);
}
/*
@@ -1136,7 +1131,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to "
+ "bus width %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
@@ -1164,7 +1162,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
ext_csd);
if (err)
- goto err;
+ pr_warning("%s: power class selection to "
+ "bus width %d ddr %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width, ddr);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 69370f494e05..0ed2cc5f35b6 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -569,7 +569,6 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
cmd.opcode = opcode;
cmd.arg = card->rca << 16 | 1;
- cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index b2b43f624b9e..74972c241dff 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -244,7 +244,7 @@ static int mmc_read_ssr(struct mmc_card *card)
* bitfield positions accordingly.
*/
au = UNSTUFF_BITS(ssr, 428 - 384, 4);
- if (au > 0 || au <= 9) {
+ if (au > 0 && au <= 9) {
card->ssr.au = 1 << (au + 4);
es = UNSTUFF_BITS(ssr, 408 - 384, 16);
et = UNSTUFF_BITS(ssr, 402 - 384, 6);
@@ -290,8 +290,12 @@ static int mmc_read_switch(struct mmc_card *card)
return -ENOMEM;
}
- /* Find out the supported Bus Speed Modes. */
- err = mmc_sd_switch(card, 0, 0, 1, status);
+ /*
+ * Find out the card's support bits with a mode 0 operation.
+ * The argument does not matter, as the support bits do not
+ * change with the arguments.
+ */
+ err = mmc_sd_switch(card, 0, 0, 0, status);
if (err) {
/*
* If the host or the card can't do the switch,
@@ -312,46 +316,8 @@ static int mmc_read_switch(struct mmc_card *card)
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
-
- /* Find out Driver Strengths supported by the card */
- err = mmc_sd_switch(card, 0, 2, 1, status);
- if (err) {
- /*
- * If the host or the card can't do the switch,
- * fail more gracefully.
- */
- if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
- goto out;
-
- pr_warning("%s: problem reading "
- "Driver Strength.\n",
- mmc_hostname(card->host));
- err = 0;
-
- goto out;
- }
-
+ /* Driver Strengths supported by the card */
card->sw_caps.sd3_drv_type = status[9];
-
- /* Find out Current Limits supported by the card */
- err = mmc_sd_switch(card, 0, 3, 1, status);
- if (err) {
- /*
- * If the host or the card can't do the switch,
- * fail more gracefully.
- */
- if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)
- goto out;
-
- pr_warning("%s: problem reading "
- "Current Limit.\n",
- mmc_hostname(card->host));
- err = 0;
-
- goto out;
- }
-
- card->sw_caps.sd3_curr_limit = status[7];
}
out:
@@ -551,60 +517,80 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
return 0;
}
+/* Get host's max current setting at its current voltage */
+static u32 sd_get_host_max_current(struct mmc_host *host)
+{
+ u32 voltage, max_current;
+
+ voltage = 1 << host->ios.vdd;
+ switch (voltage) {
+ case MMC_VDD_165_195:
+ max_current = host->max_current_180;
+ break;
+ case MMC_VDD_29_30:
+ case MMC_VDD_30_31:
+ max_current = host->max_current_300;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ max_current = host->max_current_330;
+ break;
+ default:
+ max_current = 0;
+ }
+
+ return max_current;
+}
+
static int sd_set_current_limit(struct mmc_card *card, u8 *status)
{
- int current_limit = 0;
+ int current_limit = SD_SET_CURRENT_NO_CHANGE;
int err;
+ u32 max_current;
/*
* Current limit switch is only defined for SDR50, SDR104, and DDR50
- * bus speed modes. For other bus speed modes, we set the default
- * current limit of 200mA.
+ * bus speed modes. For other bus speed modes, we do not change the
+ * current limit.
*/
- if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) ||
- (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) ||
- (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) {
- if (card->host->caps & MMC_CAP_MAX_CURRENT_800) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800)
- current_limit = SD_SET_CURRENT_LIMIT_800;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_600)
- current_limit = SD_SET_CURRENT_LIMIT_600;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600)
- current_limit = SD_SET_CURRENT_LIMIT_600;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400)
- current_limit = SD_SET_CURRENT_LIMIT_400;
- else if (card->sw_caps.sd3_curr_limit &
- SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) {
- if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200)
- current_limit = SD_SET_CURRENT_LIMIT_200;
- }
- } else
+ if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) &&
+ (card->sd_bus_speed != UHS_DDR50_BUS_SPEED))
+ return 0;
+
+ /*
+ * Host has different current capabilities when operating at
+ * different voltages, so find out its max current first.
+ */
+ max_current = sd_get_host_max_current(card->host);
+
+ /*
+ * We only check host's capability here, if we set a limit that is
+ * higher than the card's maximum current, the card will be using its
+ * maximum current, e.g. if the card's maximum current is 300ma, and
+ * when we set current limit to 200ma, the card will draw 200ma, and
+ * when we set current limit to 400/600/800ma, the card will draw its
+ * maximum 300ma from the host.
+ */
+ if (max_current >= 800)
+ current_limit = SD_SET_CURRENT_LIMIT_800;
+ else if (max_current >= 600)
+ current_limit = SD_SET_CURRENT_LIMIT_600;
+ else if (max_current >= 400)
+ current_limit = SD_SET_CURRENT_LIMIT_400;
+ else if (max_current >= 200)
current_limit = SD_SET_CURRENT_LIMIT_200;
- err = mmc_sd_switch(card, 1, 3, current_limit, status);
- if (err)
- return err;
+ if (current_limit != SD_SET_CURRENT_NO_CHANGE) {
+ err = mmc_sd_switch(card, 1, 3, current_limit, status);
+ if (err)
+ return err;
- if (((status[15] >> 4) & 0x0F) != current_limit)
- pr_warning("%s: Problem setting current limit!\n",
- mmc_hostname(card->host));
+ if (((status[15] >> 4) & 0x0F) != current_limit)
+ pr_warning("%s: Problem setting current limit!\n",
+ mmc_hostname(card->host));
+
+ }
return 0;
}
@@ -726,6 +712,7 @@ struct device_type sd_type = {
int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
{
int err;
+ u32 max_current;
/*
* Since we're changing the OCR value, we seem to
@@ -753,9 +740,12 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))
ocr |= SD_OCR_S18R;
- /* If the host can supply more than 150mA, XPC should be set to 1. */
- if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 |
- MMC_CAP_SET_XPC_180))
+ /*
+ * If the host can supply more than 150mA at current voltage,
+ * XPC should be set to 1.
+ */
+ max_current = sd_get_host_max_current(host);
+ if (max_current > 150)
ocr |= SD_OCR_XPC;
try_again:
@@ -911,9 +901,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
BUG_ON(!host);
WARN_ON(!host->claimed);
- /* The initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 41c5fd8848f4..d4619e2ec030 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -591,9 +591,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* Inform the card of the voltage
*/
if (!powered_resume) {
- /* The initialization should be done at 3.3 V I/O voltage. */
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
err = mmc_send_io_op_cond(host, host->ocr, &ocr);
if (err)
goto err;
@@ -1006,10 +1003,6 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
* restore the correct voltage setting of the card.
*/
- /* The initialization should be done at 3.3 V I/O voltage. */
- if (!mmc_card_keep_power(host))
- mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
-
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index f1c7ed8f4d85..8e94e555b788 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
if (ret == -ENOENT) {
/* warn about unknown tuples */
- pr_warning("%s: queuing unknown"
+ pr_warn_ratelimited("%s: queuing unknown"
" CIS tuple 0x%02x (%u bytes)\n",
mmc_hostname(card->host),
tpl_code, tpl_link);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
new file mode 100644
index 000000000000..058242916cef
--- /dev/null
+++ b/drivers/mmc/core/slot-gpio.c
@@ -0,0 +1,188 @@
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+struct mmc_gpio {
+ int ro_gpio;
+ int cd_gpio;
+ char *ro_label;
+ char cd_label[0];
+};
+
+static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ mmc_detect_change(dev_id, msecs_to_jiffies(100));
+ return IRQ_HANDLED;
+}
+
+static int mmc_gpio_alloc(struct mmc_host *host)
+{
+ size_t len = strlen(dev_name(host->parent)) + 4;
+ struct mmc_gpio *ctx;
+
+ mutex_lock(&host->slot.lock);
+
+ ctx = host->slot.handler_priv;
+ if (!ctx) {
+ /*
+ * devm_kzalloc() can be called after device_initialize(), even
+ * before device_add(), i.e., between mmc_alloc_host() and
+ * mmc_add_host()
+ */
+ ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len,
+ GFP_KERNEL);
+ if (ctx) {
+ ctx->ro_label = ctx->cd_label + len;
+ snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
+ snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
+ ctx->cd_gpio = -EINVAL;
+ ctx->ro_gpio = -EINVAL;
+ host->slot.handler_priv = ctx;
+ }
+ }
+
+ mutex_unlock(&host->slot.lock);
+
+ return ctx ? 0 : -ENOMEM;
+}
+
+int mmc_gpio_get_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ return -ENOSYS;
+
+ return !gpio_get_value_cansleep(ctx->ro_gpio) ^
+ !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
+}
+EXPORT_SYMBOL(mmc_gpio_get_ro);
+
+int mmc_gpio_get_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+
+ if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ return -ENOSYS;
+
+ return !gpio_get_value_cansleep(ctx->cd_gpio) ^
+ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
+}
+EXPORT_SYMBOL(mmc_gpio_get_cd);
+
+int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
+{
+ struct mmc_gpio *ctx;
+ int ret;
+
+ if (!gpio_is_valid(gpio))
+ return -EINVAL;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ return gpio_request_one(gpio, GPIOF_DIR_IN, ctx->ro_label);
+}
+EXPORT_SYMBOL(mmc_gpio_request_ro);
+
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
+{
+ struct mmc_gpio *ctx;
+ int irq = gpio_to_irq(gpio);
+ int ret;
+
+ ret = mmc_gpio_alloc(host);
+ if (ret < 0)
+ return ret;
+
+ ctx = host->slot.handler_priv;
+
+ ret = gpio_request_one(gpio, GPIOF_DIR_IN, ctx->cd_label);
+ if (ret < 0)
+ /*
+ * don't bother freeing memory. It might still get used by other
+ * slot functions, in any case it will be freed, when the device
+ * is destroyed.
+ */
+ return ret;
+
+ /*
+ * Even if gpio_to_irq() returns a valid IRQ number, the platform might
+ * still prefer to poll, e.g., because that IRQ number is already used
+ * by another unit and cannot be shared.
+ */
+ if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
+ irq = -EINVAL;
+
+ if (irq >= 0) {
+ ret = request_threaded_irq(irq, NULL, mmc_gpio_cd_irqt,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ ctx->cd_label, host);
+ if (ret < 0)
+ irq = ret;
+ }
+
+ host->slot.cd_irq = irq;
+
+ if (irq < 0)
+ host->caps |= MMC_CAP_NEEDS_POLL;
+
+ ctx->cd_gpio = gpio;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_gpio_request_cd);
+
+void mmc_gpio_free_ro(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int gpio;
+
+ if (!ctx || !gpio_is_valid(ctx->ro_gpio))
+ return;
+
+ gpio = ctx->ro_gpio;
+ ctx->ro_gpio = -EINVAL;
+
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_free_ro);
+
+void mmc_gpio_free_cd(struct mmc_host *host)
+{
+ struct mmc_gpio *ctx = host->slot.handler_priv;
+ int gpio;
+
+ if (!ctx || !gpio_is_valid(ctx->cd_gpio))
+ return;
+
+ if (host->slot.cd_irq >= 0) {
+ free_irq(host->slot.cd_irq, host);
+ host->slot.cd_irq = -EINVAL;
+ }
+
+ gpio = ctx->cd_gpio;
+ ctx->cd_gpio = -EINVAL;
+
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(mmc_gpio_free_cd);
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index f2c115e06438..322412cec4ee 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -391,11 +391,17 @@ static int atmci_regs_show(struct seq_file *s, void *v)
clk_disable(host->mck);
spin_unlock_bh(&host->lock);
- seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n",
+ seq_printf(s, "MR:\t0x%08x%s%s ",
buf[ATMCI_MR / 4],
buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "",
- buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "",
- buf[ATMCI_MR / 4] & 0xff);
+ buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "");
+ if (host->caps.has_odd_clk_div)
+ seq_printf(s, "{CLKDIV,CLKODD}=%u\n",
+ ((buf[ATMCI_MR / 4] & 0xff) << 1)
+ | ((buf[ATMCI_MR / 4] >> 16) & 1));
+ else
+ seq_printf(s, "CLKDIV=%u\n",
+ (buf[ATMCI_MR / 4] & 0xff));
seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]);
seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]);
seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);
@@ -1685,7 +1691,6 @@ static void atmci_tasklet_func(unsigned long priv)
dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
host->cmd = NULL;
- host->data = NULL;
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
atmci_command_complete(host, mrq->stop);
@@ -1699,6 +1704,7 @@ static void atmci_tasklet_func(unsigned long priv)
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
}
+ host->data = NULL;
break;
case STATE_END_REQUEST:
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 1ca5e72ceb65..72dc3cde646d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -405,11 +405,23 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
static int dw_mci_idmac_init(struct dw_mci *host)
{
struct idmac_desc *p;
- int i;
+ int i, dma_support;
/* Number of descriptors in the ring buffer */
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+ /* Check if Hardware Configuration Register has support for DMA */
+ dma_support = (mci_readl(host, HCON) >> 16) & 0x3;
+
+ if (!dma_support || dma_support > 2) {
+ dev_err(&host->dev,
+ "Host Controller does not support IDMA Tx.\n");
+ host->dma_ops = NULL;
+ return -ENODEV;
+ }
+
+ dev_info(&host->dev, "Using internal DMA controller.\n");
+
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
@@ -1876,7 +1888,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
- dev_info(&host->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
@@ -2175,7 +2186,7 @@ int dw_mci_resume(struct dw_mci *host)
return ret;
}
- if (host->dma_ops->init)
+ if (host->use_dma && host->dma_ops->init)
host->dma_ops->init(host);
/* Restore the old value at FIFOTH register */
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 277161d279b8..a51f9309ffbb 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -164,16 +164,23 @@ struct mxs_mmc_host {
spinlock_t lock;
int sdio_irq_en;
int wp_gpio;
+ bool wp_inverted;
};
static int mxs_mmc_get_ro(struct mmc_host *mmc)
{
struct mxs_mmc_host *host = mmc_priv(mmc);
+ int ret;
if (!gpio_is_valid(host->wp_gpio))
return -EINVAL;
- return gpio_get_value(host->wp_gpio);
+ ret = gpio_get_value(host->wp_gpio);
+
+ if (host->wp_inverted)
+ ret = !ret;
+
+ return ret;
}
static int mxs_mmc_get_cd(struct mmc_host *mmc)
@@ -707,6 +714,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
struct pinctrl *pinctrl;
int ret = 0, irq_err, irq_dma;
dma_cap_mask_t mask;
+ struct regulator *reg_vmmc;
+ enum of_gpio_flags flags;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -747,6 +756,16 @@ static int mxs_mmc_probe(struct platform_device *pdev)
host->mmc = mmc;
host->sdio_irq_en = 0;
+ reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc");
+ if (!IS_ERR(reg_vmmc)) {
+ ret = regulator_enable(reg_vmmc);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable vmmc regulator: %d\n", ret);
+ goto out_mmc_free;
+ }
+ }
+
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -785,7 +804,10 @@ static int mxs_mmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_4_BIT_DATA;
else if (bus_width == 8)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
- host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
+ host->wp_gpio = of_get_named_gpio_flags(np, "wp-gpios", 0,
+ &flags);
+ if (flags & OF_GPIO_ACTIVE_LOW)
+ host->wp_inverted = 1;
} else {
if (pdata->flags & SLOTF_8_BIT_CAPABLE)
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 389a3eedfc24..bc28627af66b 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1089,7 +1089,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
/* Disable the clocks */
pm_runtime_put_sync(host->dev);
if (host->dbclk)
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
/* Turn the power off */
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
@@ -1100,7 +1100,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
vdd);
pm_runtime_get_sync(host->dev);
if (host->dbclk)
- clk_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (ret != 0)
goto err;
@@ -1899,7 +1899,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
if (IS_ERR(host->dbclk)) {
dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
host->dbclk = NULL;
- } else if (clk_enable(host->dbclk) != 0) {
+ } else if (clk_prepare_enable(host->dbclk) != 0) {
dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
clk_put(host->dbclk);
host->dbclk = NULL;
@@ -1931,6 +1931,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
if (!res) {
dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n");
+ ret = -ENXIO;
goto err_irq;
}
host->dma_line_tx = res->start;
@@ -1938,6 +1939,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
if (!res) {
dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n");
+ ret = -ENXIO;
goto err_irq;
}
host->dma_line_rx = res->start;
@@ -2023,7 +2025,7 @@ err_irq:
pm_runtime_disable(host->dev);
clk_put(host->fclk);
if (host->dbclk) {
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
clk_put(host->dbclk);
}
err1:
@@ -2058,7 +2060,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
pm_runtime_disable(host->dev);
clk_put(host->fclk);
if (host->dbclk) {
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
clk_put(host->dbclk);
}
@@ -2116,7 +2118,7 @@ static int omap_hsmmc_suspend(struct device *dev)
}
if (host->dbclk)
- clk_disable(host->dbclk);
+ clk_disable_unprepare(host->dbclk);
err:
pm_runtime_put_sync(host->dev);
return ret;
@@ -2137,7 +2139,7 @@ static int omap_hsmmc_resume(struct device *dev)
pm_runtime_get_sync(host->dev);
if (host->dbclk)
- clk_enable(host->dbclk);
+ clk_prepare_enable(host->dbclk);
if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
omap_hsmmc_conf_bus_power(host);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index c3622a69f432..bd5a5cce122c 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -26,7 +26,6 @@
#include <mach/dma.h>
#include <mach/regs-sdi.h>
-#include <mach/regs-gpio.h>
#include <plat/mci.h>
@@ -1237,12 +1236,9 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
switch (ios->power_mode) {
case MMC_POWER_ON:
case MMC_POWER_UP:
- s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
- s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
- s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
- s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
- s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
+ /* Configure GPE5...GPE10 pins in SD mode */
+ s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2),
+ S3C_GPIO_PULL_NONE);
if (host->pdata->set_power)
host->pdata->set_power(ios->power_mode, ios->vdd);
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 177f697b5835..a6e53a1ebb08 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -20,11 +20,17 @@
*/
#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/module.h>
#include <linux/mmc/host.h>
#include "sdhci-pltfm.h"
+struct sdhci_dove_priv {
+ struct clk *clk;
+};
+
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
@@ -66,16 +72,57 @@ static struct sdhci_pltfm_data sdhci_dove_pdata = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_FORCE_DMA,
+ SDHCI_QUIRK_FORCE_DMA |
+ SDHCI_QUIRK_NO_HISPD_BIT,
};
static int __devinit sdhci_dove_probe(struct platform_device *pdev)
{
- return sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ struct sdhci_host *host;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_dove_priv *priv;
+ int ret;
+
+ ret = sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+ if (ret)
+ goto sdhci_dove_register_fail;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to allocate private data");
+ ret = -ENOMEM;
+ goto sdhci_dove_allocate_fail;
+ }
+
+ host = platform_get_drvdata(pdev);
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = priv;
+
+ priv->clk = clk_get(&pdev->dev, NULL);
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+ return 0;
+
+sdhci_dove_allocate_fail:
+ sdhci_pltfm_unregister(pdev);
+sdhci_dove_register_fail:
+ return ret;
}
static int __devexit sdhci_dove_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_dove_priv *priv = pltfm_host->priv;
+
+ if (priv->clk) {
+ if (!IS_ERR(priv->clk)) {
+ clk_disable_unprepare(priv->clk);
+ clk_put(priv->clk);
+ }
+ devm_kfree(&pdev->dev, priv->clk);
+ }
return sdhci_pltfm_unregister(pdev);
}
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index ebbe984e5d00..e23f8134591c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -299,6 +299,8 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = pltfm_host->priv;
u32 new_val;
switch (reg) {
@@ -315,8 +317,11 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
SDHCI_CTRL_D3CD);
/* ensure the endianess */
new_val |= ESDHC_HOST_CONTROL_LE;
- /* DMA mode bits are shifted */
- new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+ /* bits 8&9 are reserved on mx25 */
+ if (!is_imx25_esdhc(imx_data)) {
+ /* DMA mode bits are shifted */
+ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5;
+ }
esdhc_clrset_le(host, 0xffff, new_val, reg);
return;
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 69ef0beae104..504da715a41a 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -157,6 +157,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
static const struct sdhci_pci_fixes sdhci_cafe = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index dbb75bfbcffb..b6ee8857e226 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -28,6 +28,9 @@
#include <linux/mmc/host.h>
#include <linux/platform_data/pxa_sdhci.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -121,6 +124,48 @@ static struct sdhci_ops pxav2_sdhci_ops = {
.platform_8bit_width = pxav2_mmc_set_width,
};
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav2_of_match[] = {
+ {
+ .compatible = "mrvl,pxav2-mmc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match);
+
+static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 bus_width;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_find_property(np, "non-removable", NULL))
+ pdata->flags |= PXA_FLAG_CARD_PERMANENT;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
+
+ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+ if (clk_delay_cycles > 0) {
+ pdata->clk_delay_sel = 1;
+ pdata->clk_delay_cycles = clk_delay_cycles;
+ }
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -128,6 +173,8 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
+ const struct of_device_id *match;
+
int ret;
struct clk *clk;
@@ -156,6 +203,10 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev);
+ if (match) {
+ pdata = pxav2_get_mmc_pdata(dev);
+ }
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
@@ -218,6 +269,9 @@ static struct platform_driver sdhci_pxav2_driver = {
.driver = {
.name = "sdhci-pxav2",
.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav2_of_match,
+#endif
.pm = SDHCI_PLTFM_PMOPS,
},
.probe = sdhci_pxav2_probe,
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index f29695683556..07fe3834fe0b 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -28,6 +28,9 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
#include "sdhci.h"
#include "sdhci-pltfm.h"
@@ -164,6 +167,46 @@ static struct sdhci_ops pxav3_sdhci_ops = {
.platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
};
+#ifdef CONFIG_OF
+static const struct of_device_id sdhci_pxav3_of_match[] = {
+ {
+ .compatible = "mrvl,pxav3-mmc",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match);
+
+static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ struct sdhci_pxa_platdata *pdata;
+ struct device_node *np = dev->of_node;
+ u32 bus_width;
+ u32 clk_delay_cycles;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (of_find_property(np, "non-removable", NULL))
+ pdata->flags |= PXA_FLAG_CARD_PERMANENT;
+
+ of_property_read_u32(np, "bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT;
+
+ of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
+ if (clk_delay_cycles > 0)
+ pdata->clk_delay_cycles = clk_delay_cycles;
+
+ return pdata;
+}
+#else
+static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
@@ -171,6 +214,8 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = NULL;
struct sdhci_pxa *pxa = NULL;
+ const struct of_device_id *match;
+
int ret;
struct clk *clk;
@@ -202,6 +247,10 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
+ match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev);
+ if (match)
+ pdata = pxav3_get_mmc_pdata(dev);
+
if (pdata) {
if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
/* on-chip device */
@@ -263,6 +312,9 @@ static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
static struct platform_driver sdhci_pxav3_driver = {
.driver = {
.name = "sdhci-pxav3",
+#ifdef CONFIG_OF
+ .of_match_table = sdhci_pxav3_of_match,
+#endif
.owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
},
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index b38d8a78f6a0..0810ccc23d7e 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -223,6 +223,7 @@ static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
{
struct tegra_sdhci_platform_data *plat;
struct device_node *np = pdev->dev.of_node;
+ u32 bus_width;
if (!np)
return NULL;
@@ -236,7 +237,9 @@ static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata(
plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
- if (of_find_property(np, "support-8bit", NULL))
+
+ if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
+ bus_width == 8)
plat->is_8bit = 1;
return plat;
@@ -334,7 +337,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
rc = PTR_ERR(clk);
goto err_clk_get;
}
- clk_enable(clk);
+ clk_prepare_enable(clk);
pltfm_host->clk = clk;
host->mmc->pm_caps = plat->pm_flags;
@@ -349,7 +352,7 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
return 0;
err_add_host:
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
err_clk_get:
if (gpio_is_valid(plat->wp_gpio))
@@ -390,7 +393,7 @@ static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
if (gpio_is_valid(plat->power_gpio))
gpio_free(plat->power_gpio);
- clk_disable(pltfm_host->clk);
+ clk_disable_unprepare(pltfm_host->clk);
clk_put(pltfm_host->clk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index f4b8b4db3a9a..9a11dc39921c 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -27,6 +27,7 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
@@ -244,6 +245,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)
static void sdhci_reinit(struct sdhci_host *host)
{
sdhci_init(host, 0);
+ /*
+ * Retuning stuffs are affected by different cards inserted and only
+ * applicable to UHS-I cards. So reset these fields to their initial
+ * value when card is removed.
+ */
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
+ host->flags &= ~SDHCI_USING_RETUNING_TIMER;
+
+ del_timer_sync(&host->tuning_timer);
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+ host->mmc->max_blk_count =
+ (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
+ }
sdhci_enable_card_detection(host);
}
@@ -1245,6 +1259,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
struct sdhci_host *host;
bool present;
unsigned long flags;
+ u32 tuning_opcode;
host = mmc_priv(mmc);
@@ -1292,8 +1307,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
*/
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
+ /* eMMC uses cmd21 while sd and sdio use cmd19 */
+ tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
+ MMC_SEND_TUNING_BLOCK_HS200 :
+ MMC_SEND_TUNING_BLOCK;
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc, mrq->cmd->opcode);
+ sdhci_execute_tuning(mmc, tuning_opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
@@ -1663,11 +1682,15 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
pwr = sdhci_readb(host, SDHCI_POWER_CONTROL);
pwr &= ~SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_disable(host->vmmc);
/* Wait for 1ms as per the spec */
usleep_range(1000, 1500);
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->vmmc)
+ regulator_enable(host->vmmc);
pr_info(DRIVER_NAME ": Switching to 1.8V signalling "
"voltage failed, retrying with S18R set to 0\n");
@@ -1855,6 +1878,7 @@ out:
*/
if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
(host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags |= SDHCI_USING_RETUNING_TIMER;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
/* Tuning mode 1 limits the maximum data length to 4MB */
@@ -1872,10 +1896,10 @@ out:
* try tuning again at a later time, when the re-tuning timer expires.
* So for these controllers, we return 0. Since there might be other
* controllers who do not have this capability, we return error for
- * them.
+ * them. SDHCI_USING_RETUNING_TIMER means the host is currently using
+ * a retuning timer to do the retuning for the card.
*/
- if (err && host->tuning_count &&
- host->tuning_mode == SDHCI_TUNING_MODE_1)
+ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER))
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
@@ -2382,7 +2406,6 @@ out:
int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
- bool has_tuning_timer;
if (host->ops->platform_suspend)
host->ops->platform_suspend(host);
@@ -2390,16 +2413,14 @@ int sdhci_suspend_host(struct sdhci_host *host)
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
- has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
- host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
- if (has_tuning_timer) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
ret = mmc_suspend_host(host->mmc);
if (ret) {
- if (has_tuning_timer) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
host->flags |= SDHCI_NEEDS_RETUNING;
mod_timer(&host->tuning_timer, jiffies +
host->tuning_count * HZ);
@@ -2450,8 +2471,7 @@ int sdhci_resume_host(struct sdhci_host *host)
host->ops->platform_resume(host);
/* Set the re-tuning expiration flag */
- if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1))
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
return ret;
@@ -2490,8 +2510,7 @@ int sdhci_runtime_suspend_host(struct sdhci_host *host)
int ret = 0;
/* Disable tuning since we are suspending */
- if (host->version >= SDHCI_SPEC_300 &&
- host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ if (host->flags & SDHCI_USING_RETUNING_TIMER) {
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
}
@@ -2532,8 +2551,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host)
sdhci_do_enable_preset_value(host, true);
/* Set the re-tuning expiration flag */
- if ((host->version >= SDHCI_SPEC_300) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1))
+ if (host->flags & SDHCI_USING_RETUNING_TIMER)
host->flags |= SDHCI_NEEDS_RETUNING;
spin_lock_irqsave(&host->lock, flags);
@@ -2584,7 +2602,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
- u32 caps[2];
+ u32 caps[2] = {0, 0};
u32 max_current_caps;
unsigned int ocr_avail;
int ret;
@@ -2614,8 +2632,10 @@ int sdhci_add_host(struct sdhci_host *host)
caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
sdhci_readl(host, SDHCI_CAPABILITIES);
- caps[1] = (host->version >= SDHCI_SPEC_300) ?
- sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0;
+ if (host->version >= SDHCI_SPEC_300)
+ caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ?
+ host->caps1 :
+ sdhci_readl(host, SDHCI_CAPABILITIES_1);
if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
host->flags |= SDHCI_USE_SDMA;
@@ -2779,7 +2799,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
- mmc_card_is_removable(mmc))
+ !(host->mmc->caps & MMC_CAP_NONREMOVABLE))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
@@ -2837,6 +2857,30 @@ int sdhci_add_host(struct sdhci_host *host)
SDHCI_RETUNING_MODE_SHIFT;
ocr_avail = 0;
+
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ if (IS_ERR(host->vmmc)) {
+ pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ host->vmmc = NULL;
+ }
+
+#ifdef CONFIG_REGULATOR
+ if (host->vmmc) {
+ ret = regulator_is_supported_voltage(host->vmmc, 3300000,
+ 3300000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330)))
+ caps[0] &= ~SDHCI_CAN_VDD_330;
+ ret = regulator_is_supported_voltage(host->vmmc, 3000000,
+ 3000000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300)))
+ caps[0] &= ~SDHCI_CAN_VDD_300;
+ ret = regulator_is_supported_voltage(host->vmmc, 1800000,
+ 1800000);
+ if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180)))
+ caps[0] &= ~SDHCI_CAN_VDD_180;
+ }
+#endif /* CONFIG_REGULATOR */
+
/*
* According to SD Host Controller spec v3.00, if the Host System
* can afford more than 150mA, Host Driver should set XPC to 1. Also
@@ -2845,55 +2889,45 @@ int sdhci_add_host(struct sdhci_host *host)
* value.
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
+ if (!max_current_caps && host->vmmc) {
+ u32 curr = regulator_get_current_limit(host->vmmc);
+ if (curr > 0) {
+
+ /* convert to SDHCI_MAX_CURRENT format */
+ curr = curr/1000; /* convert to mA */
+ curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
+
+ curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
+ max_current_caps =
+ (curr << SDHCI_MAX_CURRENT_330_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_300_SHIFT) |
+ (curr << SDHCI_MAX_CURRENT_180_SHIFT);
+ }
+ }
if (caps[0] & SDHCI_CAN_VDD_330) {
- int max_current_330;
-
ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
- max_current_330 = ((max_current_caps &
+ mmc->max_current_330 = ((max_current_caps &
SDHCI_MAX_CURRENT_330_MASK) >>
SDHCI_MAX_CURRENT_330_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_330 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_330;
}
if (caps[0] & SDHCI_CAN_VDD_300) {
- int max_current_300;
-
ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
- max_current_300 = ((max_current_caps &
+ mmc->max_current_300 = ((max_current_caps &
SDHCI_MAX_CURRENT_300_MASK) >>
SDHCI_MAX_CURRENT_300_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_300 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_300;
}
if (caps[0] & SDHCI_CAN_VDD_180) {
- int max_current_180;
-
ocr_avail |= MMC_VDD_165_195;
- max_current_180 = ((max_current_caps &
+ mmc->max_current_180 = ((max_current_caps &
SDHCI_MAX_CURRENT_180_MASK) >>
SDHCI_MAX_CURRENT_180_SHIFT) *
SDHCI_MAX_CURRENT_MULTIPLIER;
-
- if (max_current_180 > 150)
- mmc->caps |= MMC_CAP_SET_XPC_180;
-
- /* Maximum current capabilities of the host at 1.8V */
- if (max_current_180 >= 800)
- mmc->caps |= MMC_CAP_MAX_CURRENT_800;
- else if (max_current_180 >= 600)
- mmc->caps |= MMC_CAP_MAX_CURRENT_600;
- else if (max_current_180 >= 400)
- mmc->caps |= MMC_CAP_MAX_CURRENT_400;
- else
- mmc->caps |= MMC_CAP_MAX_CURRENT_200;
}
mmc->ocr_avail = ocr_avail;
@@ -2992,13 +3026,10 @@ int sdhci_add_host(struct sdhci_host *host)
ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED,
mmc_hostname(mmc), host);
- if (ret)
+ if (ret) {
+ pr_err("%s: Failed to request IRQ %d: %d\n",
+ mmc_hostname(mmc), host->irq, ret);
goto untasklet;
-
- host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
- if (IS_ERR(host->vmmc)) {
- pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
- host->vmmc = NULL;
}
sdhci_init(host, 0);
@@ -3016,8 +3047,11 @@ int sdhci_add_host(struct sdhci_host *host)
host->led.brightness_set = sdhci_led_control;
ret = led_classdev_register(mmc_dev(mmc), &host->led);
- if (ret)
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
goto reset;
+ }
#endif
mmiowb();
@@ -3081,8 +3115,6 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
free_irq(host->irq, host);
del_timer_sync(&host->timer);
- if (host->version >= SDHCI_SPEC_300)
- del_timer_sync(&host->tuning_timer);
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index f761f23d2a28..97653ea8942b 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -205,6 +205,7 @@
#define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48
+#define SDHCI_MAX_CURRENT_LIMIT 0xFF
#define SDHCI_MAX_CURRENT_330_MASK 0x0000FF
#define SDHCI_MAX_CURRENT_330_SHIFT 0
#define SDHCI_MAX_CURRENT_300_MASK 0x00FF00
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 724b35e85a26..5d8142773fac 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -54,6 +54,8 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sh_mmcif.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
@@ -211,8 +213,6 @@ struct sh_mmcif_host {
struct mmc_host *mmc;
struct mmc_request *mrq;
struct platform_device *pd;
- struct sh_dmae_slave dma_slave_tx;
- struct sh_dmae_slave dma_slave_rx;
struct clk *hclk;
unsigned int clk;
int bus_width;
@@ -371,56 +371,69 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
desc, cookie);
}
-static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
-{
- dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
- chan->private = arg;
- return true;
-}
-
static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
struct sh_mmcif_plat_data *pdata)
{
- struct sh_dmae_slave *tx, *rx;
+ struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ struct dma_slave_config cfg;
+ dma_cap_mask_t mask;
+ int ret;
+
host->dma_active = false;
+ if (!pdata)
+ return;
+
+ if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
+ return;
+
/* We can only either use DMA for both Tx and Rx or not use it at all */
- if (pdata->dma) {
- dev_warn(&host->pd->dev,
- "Update your platform to use embedded DMA slave IDs\n");
- tx = &pdata->dma->chan_priv_tx;
- rx = &pdata->dma->chan_priv_rx;
- } else {
- tx = &host->dma_slave_tx;
- tx->slave_id = pdata->slave_id_tx;
- rx = &host->dma_slave_rx;
- rx->slave_id = pdata->slave_id_rx;
- }
- if (tx->slave_id > 0 && rx->slave_id > 0) {
- dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_tx);
+ dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
+ host->chan_tx);
- host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
- dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
- host->chan_tx);
+ if (!host->chan_tx)
+ return;
- if (!host->chan_tx)
- return;
+ cfg.slave_id = pdata->slave_id_tx;
+ cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = res->start + MMCIF_CE_DATA;
+ cfg.src_addr = 0;
+ ret = dmaengine_slave_config(host->chan_tx, &cfg);
+ if (ret < 0)
+ goto ecfgtx;
- host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
- dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
- host->chan_rx);
+ host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
+ (void *)pdata->slave_id_rx);
+ dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
+ host->chan_rx);
- if (!host->chan_rx) {
- dma_release_channel(host->chan_tx);
- host->chan_tx = NULL;
- return;
- }
+ if (!host->chan_rx)
+ goto erqrx;
- init_completion(&host->dma_complete);
- }
+ cfg.slave_id = pdata->slave_id_rx;
+ cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = res->start + MMCIF_CE_DATA;
+ ret = dmaengine_slave_config(host->chan_rx, &cfg);
+ if (ret < 0)
+ goto ecfgrx;
+
+ init_completion(&host->dma_complete);
+
+ return;
+
+ecfgrx:
+ dma_release_channel(host->chan_rx);
+ host->chan_rx = NULL;
+erqrx:
+ecfgtx:
+ dma_release_channel(host->chan_tx);
+ host->chan_tx = NULL;
}
static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
@@ -444,13 +457,14 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
{
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ bool sup_pclk = p ? p->sup_pclk : false;
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
if (!clk)
return;
- if (p->sup_pclk && clk == host->clk)
+ if (sup_pclk && clk == host->clk)
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
else
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
@@ -892,21 +906,15 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
switch (mrq->cmd->opcode) {
/* MMCIF does not support SD/SDIO command */
- case SD_IO_SEND_OP_COND:
+ case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */
+ case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
+ if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR)
+ break;
case MMC_APP_CMD:
host->state = STATE_IDLE;
mrq->cmd->error = -ETIMEDOUT;
mmc_request_done(mmc, mrq);
return;
- case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
- if (!mrq->data) {
- /* send_if_cond cmd (not support) */
- host->state = STATE_IDLE;
- mrq->cmd->error = -ETIMEDOUT;
- mmc_request_done(mmc, mrq);
- return;
- }
- break;
default:
break;
}
@@ -916,10 +924,35 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
sh_mmcif_start_cmd(host, mrq);
}
+static int sh_mmcif_clk_update(struct sh_mmcif_host *host)
+{
+ int ret = clk_enable(host->hclk);
+
+ if (!ret) {
+ host->clk = clk_get_rate(host->hclk);
+ host->mmc->f_max = host->clk / 2;
+ host->mmc->f_min = host->clk / 512;
+ }
+
+ return ret;
+}
+
+static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios)
+{
+ struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct mmc_host *mmc = host->mmc;
+
+ if (pd && pd->set_pwr)
+ pd->set_pwr(host->pd, ios->power_mode != MMC_POWER_OFF);
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
- struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
@@ -937,6 +970,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
sh_mmcif_request_dma(host, host->pd->dev.platform_data);
host->card_present = true;
}
+ sh_mmcif_set_power(host, ios);
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
@@ -948,9 +982,10 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
if (host->power) {
pm_runtime_put(&host->pd->dev);
+ clk_disable(host->hclk);
host->power = false;
- if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
- p->down_pwr(host->pd);
+ if (ios->power_mode == MMC_POWER_OFF)
+ sh_mmcif_set_power(host, ios);
}
host->state = STATE_IDLE;
return;
@@ -958,8 +993,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock) {
if (!host->power) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
+ sh_mmcif_clk_update(host);
pm_runtime_get_sync(&host->pd->dev);
host->power = true;
sh_mmcif_sync_reset(host);
@@ -975,8 +1009,12 @@ static int sh_mmcif_get_cd(struct mmc_host *mmc)
{
struct sh_mmcif_host *host = mmc_priv(mmc);
struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
+ int ret = mmc_gpio_get_cd(mmc);
+
+ if (ret >= 0)
+ return ret;
- if (!p->get_cd)
+ if (!p || !p->get_cd)
return -ENOSYS;
else
return p->get_cd(host->pd);
@@ -1242,12 +1280,28 @@ static void mmcif_timeout_work(struct work_struct *work)
mmc_request_done(host->mmc, mrq);
}
+static void sh_mmcif_init_ocr(struct sh_mmcif_host *host)
+{
+ struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ if (!pd)
+ return;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pd->ocr;
+ else if (pd->ocr)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+}
+
static int __devinit sh_mmcif_probe(struct platform_device *pdev)
{
int ret = 0, irq[2];
struct mmc_host *mmc;
struct sh_mmcif_host *host;
- struct sh_mmcif_plat_data *pd;
+ struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
struct resource *res;
void __iomem *reg;
char clk_name[8];
@@ -1268,42 +1322,26 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "ioremap error.\n");
return -ENOMEM;
}
- pd = pdev->dev.platform_data;
- if (!pd) {
- dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
- ret = -ENXIO;
- goto clean_up;
- }
+
mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
- goto clean_up;
+ goto ealloch;
}
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
host->timeout = 1000;
- snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
- host->hclk = clk_get(&pdev->dev, clk_name);
- if (IS_ERR(host->hclk)) {
- dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
- ret = PTR_ERR(host->hclk);
- goto clean_up1;
- }
- clk_enable(host->hclk);
- host->clk = clk_get_rate(host->hclk);
host->pd = pdev;
spin_lock_init(&host->lock);
mmc->ops = &sh_mmcif_ops;
- mmc->f_max = host->clk / 2;
- mmc->f_min = host->clk / 512;
- if (pd->ocr)
- mmc->ocr_avail = pd->ocr;
+ sh_mmcif_init_ocr(host);
+
mmc->caps = MMC_CAP_MMC_HIGHSPEED;
- if (pd->caps)
+ if (pd && pd->caps)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
@@ -1311,34 +1349,52 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;
- sh_mmcif_sync_reset(host);
platform_set_drvdata(pdev, host);
pm_runtime_enable(&pdev->dev);
host->power = false;
+ snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
+ host->hclk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(host->hclk)) {
+ ret = PTR_ERR(host->hclk);
+ dev_err(&pdev->dev, "cannot get clock \"%s\": %d\n", clk_name, ret);
+ goto eclkget;
+ }
+ ret = sh_mmcif_clk_update(host);
+ if (ret < 0)
+ goto eclkupdate;
+
ret = pm_runtime_resume(&pdev->dev);
if (ret < 0)
- goto clean_up2;
+ goto eresume;
INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
+ sh_mmcif_sync_reset(host);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
- goto clean_up3;
+ goto ereqirq0;
}
ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto clean_up4;
+ goto ereqirq1;
+ }
+
+ if (pd && pd->use_cd_gpio) {
+ ret = mmc_gpio_request_cd(mmc, pd->cd_gpio);
+ if (ret < 0)
+ goto erqcd;
}
+ clk_disable(host->hclk);
ret = mmc_add_host(mmc);
if (ret < 0)
- goto clean_up5;
+ goto emmcaddh;
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
@@ -1347,33 +1403,42 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
return ret;
-clean_up5:
+emmcaddh:
+ if (pd && pd->use_cd_gpio)
+ mmc_gpio_free_cd(mmc);
+erqcd:
free_irq(irq[1], host);
-clean_up4:
+ereqirq1:
free_irq(irq[0], host);
-clean_up3:
+ereqirq0:
pm_runtime_suspend(&pdev->dev);
-clean_up2:
- pm_runtime_disable(&pdev->dev);
+eresume:
clk_disable(host->hclk);
-clean_up1:
+eclkupdate:
+ clk_put(host->hclk);
+eclkget:
+ pm_runtime_disable(&pdev->dev);
mmc_free_host(mmc);
-clean_up:
- if (reg)
- iounmap(reg);
+ealloch:
+ iounmap(reg);
return ret;
}
static int __devexit sh_mmcif_remove(struct platform_device *pdev)
{
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;
int irq[2];
host->dying = true;
+ clk_enable(host->hclk);
pm_runtime_get_sync(&pdev->dev);
dev_pm_qos_hide_latency_limit(&pdev->dev);
+ if (pd && pd->use_cd_gpio)
+ mmc_gpio_free_cd(host->mmc);
+
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
@@ -1395,9 +1460,9 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- clk_disable(host->hclk);
mmc_free_host(host->mmc);
pm_runtime_put_sync(&pdev->dev);
+ clk_disable(host->hclk);
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1406,24 +1471,18 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int sh_mmcif_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_mmcif_host *host = platform_get_drvdata(pdev);
+ struct sh_mmcif_host *host = dev_get_drvdata(dev);
int ret = mmc_suspend_host(host->mmc);
- if (!ret) {
+ if (!ret)
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- clk_disable(host->hclk);
- }
return ret;
}
static int sh_mmcif_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct sh_mmcif_host *host = platform_get_drvdata(pdev);
-
- clk_enable(host->hclk);
+ struct sh_mmcif_host *host = dev_get_drvdata(dev);
return mmc_resume_host(host->mmc);
}
@@ -1432,6 +1491,12 @@ static int sh_mmcif_resume(struct device *dev)
#define sh_mmcif_resume NULL
#endif /* CONFIG_PM */
+static const struct of_device_id mmcif_of_match[] = {
+ { .compatible = "renesas,sh-mmcif" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mmcif_of_match);
+
static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
.suspend = sh_mmcif_suspend,
.resume = sh_mmcif_resume,
@@ -1443,6 +1508,8 @@ static struct platform_driver sh_mmcif_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = &sh_mmcif_dev_pm_ops,
+ .owner = THIS_MODULE,
+ .of_match_table = mmcif_of_match,
},
};
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 934b68e9efc3..0bdc146178db 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mmc/host.h>
@@ -39,22 +40,39 @@ struct sh_mobile_sdhi {
struct tmio_mmc_dma dma_priv;
};
+static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ int ret = clk_enable(priv->clk);
+ if (ret < 0)
+ return ret;
+
+ *f = clk_get_rate(priv->clk);
+ return 0;
+}
+
+static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev)
+{
+ struct mmc_host *mmc = dev_get_drvdata(&pdev->dev);
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data);
+ clk_disable(priv->clk);
+}
+
static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state)
{
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- if (p && p->set_pwr)
- p->set_pwr(pdev, state);
+ p->set_pwr(pdev, state);
}
static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
{
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
- if (p && p->get_cd)
- return p->get_cd(pdev);
- else
- return -ENOSYS;
+ return p->get_cd(pdev);
}
static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
@@ -116,12 +134,14 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
}
mmc_data = &priv->mmc_data;
- p->pdata = mmc_data;
- if (p->init) {
- ret = p->init(pdev, &sdhi_ops);
- if (ret)
- goto einit;
+ if (p) {
+ p->pdata = mmc_data;
+ if (p->init) {
+ ret = p->init(pdev, &sdhi_ops);
+ if (ret)
+ goto einit;
+ }
}
snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id);
@@ -132,9 +152,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
goto eclkget;
}
- mmc_data->hclk = clk_get_rate(priv->clk);
- mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
- mmc_data->get_cd = sh_mobile_sdhi_get_cd;
+ mmc_data->clk_enable = sh_mobile_sdhi_clk_enable;
+ mmc_data->clk_disable = sh_mobile_sdhi_clk_disable;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
@@ -142,13 +161,18 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->ocr_mask = p->tmio_ocr_mask;
mmc_data->capabilities |= p->tmio_caps;
+ mmc_data->capabilities2 |= p->tmio_caps2;
mmc_data->cd_gpio = p->cd_gpio;
+ if (p->set_pwr)
+ mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
+ if (p->get_cd)
+ mmc_data->get_cd = sh_mobile_sdhi_get_cd;
if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) {
- priv->param_tx.slave_id = p->dma_slave_tx;
- priv->param_rx.slave_id = p->dma_slave_rx;
- priv->dma_priv.chan_priv_tx = &priv->param_tx;
- priv->dma_priv.chan_priv_rx = &priv->param_rx;
+ priv->param_tx.shdma_slave.slave_id = p->dma_slave_tx;
+ priv->param_rx.shdma_slave.slave_id = p->dma_slave_rx;
+ priv->dma_priv.chan_priv_tx = &priv->param_tx.shdma_slave;
+ priv->dma_priv.chan_priv_rx = &priv->param_rx.shdma_slave;
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv;
}
@@ -248,7 +272,7 @@ eirq_card_detect:
eprobe:
clk_put(priv->clk);
eclkget:
- if (p->cleanup)
+ if (p && p->cleanup)
p->cleanup(pdev);
einit:
kfree(priv);
@@ -263,7 +287,8 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
struct sh_mobile_sdhi_info *p = pdev->dev.platform_data;
int i = 0, irq;
- p->pdata = NULL;
+ if (p)
+ p->pdata = NULL;
tmio_mmc_host_remove(host);
@@ -276,7 +301,7 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev)
clk_put(priv->clk);
- if (p->cleanup)
+ if (p && p->cleanup)
p->cleanup(pdev);
kfree(priv);
@@ -291,11 +316,18 @@ static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
.runtime_resume = tmio_mmc_host_runtime_resume,
};
+static const struct of_device_id sh_mobile_sdhi_of_match[] = {
+ { .compatible = "renesas,shmobile-sdhi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match);
+
static struct platform_driver sh_mobile_sdhi_driver = {
.driver = {
.name = "sh_mobile_sdhi",
.owner = THIS_MODULE,
.pm = &tmio_mmc_dev_pm_ops,
+ .of_match_table = sh_mobile_sdhi_of_match,
},
.probe = sh_mobile_sdhi_probe,
.remove = __devexit_p(sh_mobile_sdhi_remove),
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 9a7996ade58e..0d8a9bbe30be 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -34,8 +34,9 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
-#include <linux/mmc/cd-gpio.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/mmc/tmio.h>
#include <linux/module.h>
#include <linux/pagemap.h>
@@ -305,8 +306,8 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command
int c = cmd->opcode;
u32 irq_mask = TMIO_MASK_CMD;
- /* Command 12 is handled by hardware */
- if (cmd->opcode == 12 && !cmd->arg) {
+ /* CMD12 is handled by hardware */
+ if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) {
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001);
return 0;
}
@@ -449,7 +450,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
}
if (stop) {
- if (stop->opcode == 12 && !stop->arg)
+ if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg)
sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000);
else
BUG();
@@ -751,6 +752,34 @@ fail:
mmc_request_done(mmc, mrq);
}
+static int tmio_mmc_clk_update(struct mmc_host *mmc)
+{
+ struct tmio_mmc_host *host = mmc_priv(mmc);
+ struct tmio_mmc_data *pdata = host->pdata;
+ int ret;
+
+ if (!pdata->clk_enable)
+ return -ENOTSUPP;
+
+ ret = pdata->clk_enable(host->pdev, &mmc->f_max);
+ if (!ret)
+ mmc->f_min = mmc->f_max / 512;
+
+ return ret;
+}
+
+static void tmio_mmc_set_power(struct tmio_mmc_host *host, struct mmc_ios *ios)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, ios->power_mode != MMC_POWER_OFF);
+ if (!IS_ERR(mmc->supply.vmmc))
+ /* Errors ignored... */
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+ ios->power_mode ? ios->vdd : 0);
+}
+
/* Set MMC clock / power.
* Note: This controller uses a simple divider scheme therefore it cannot
* run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
@@ -797,32 +826,37 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
*/
if (ios->power_mode == MMC_POWER_ON && ios->clock) {
if (!host->power) {
+ tmio_mmc_clk_update(mmc);
pm_runtime_get_sync(dev);
host->power = true;
}
tmio_mmc_set_clock(host, ios->clock);
/* power up SD bus */
- if (host->set_pwr)
- host->set_pwr(host->pdev, 1);
+ tmio_mmc_set_power(host, ios);
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
- host->set_pwr(host->pdev, 0);
+ if (ios->power_mode == MMC_POWER_OFF)
+ tmio_mmc_set_power(host, ios);
if (host->power) {
+ struct tmio_mmc_data *pdata = host->pdata;
+ tmio_mmc_clk_stop(host);
host->power = false;
pm_runtime_put(dev);
+ if (pdata->clk_disable)
+ pdata->clk_disable(host->pdev);
}
- tmio_mmc_clk_stop(host);
}
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_1:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
- break;
- case MMC_BUS_WIDTH_4:
- sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
- break;
+ if (host->power) {
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_1:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0);
+ break;
+ case MMC_BUS_WIDTH_4:
+ sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0);
+ break;
+ }
}
/* Let things settle. delay taken from winCE driver */
@@ -841,6 +875,9 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
+ int ret = mmc_gpio_get_ro(mmc);
+ if (ret >= 0)
+ return ret;
return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) ||
(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT));
@@ -850,6 +887,9 @@ static int tmio_mmc_get_cd(struct mmc_host *mmc)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
struct tmio_mmc_data *pdata = host->pdata;
+ int ret = mmc_gpio_get_cd(mmc);
+ if (ret >= 0)
+ return ret;
if (!pdata->get_cd)
return -ENOSYS;
@@ -865,6 +905,19 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
};
+static void tmio_mmc_init_ocr(struct tmio_mmc_host *host)
+{
+ struct tmio_mmc_data *pdata = host->pdata;
+ struct mmc_host *mmc = host->mmc;
+
+ mmc_regulator_get_supply(mmc);
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = pdata->ocr_mask ? : MMC_VDD_32_33 | MMC_VDD_33_34;
+ else if (pdata->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+}
+
int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
struct platform_device *pdev,
struct tmio_mmc_data *pdata)
@@ -904,18 +957,14 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
- mmc->f_max = pdata->hclk;
- mmc->f_min = mmc->f_max / 512;
+ mmc->caps2 = pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- if (pdata->ocr_mask)
- mmc->ocr_avail = pdata->ocr_mask;
- else
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+ tmio_mmc_init_ocr(_host);
_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD ||
mmc->caps & MMC_CAP_NEEDS_POLL ||
@@ -927,6 +976,11 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (ret < 0)
goto pm_disable;
+ if (tmio_mmc_clk_update(mmc) < 0) {
+ mmc->f_max = pdata->hclk;
+ mmc->f_min = mmc->f_max / 512;
+ }
+
/*
* There are 4 different scenarios for the card detection:
* 1) an external gpio irq handles the cd (best for power savings)
@@ -937,7 +991,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
* While we increment the runtime PM counter for all scenarios when
* the mmc core activates us by calling an appropriate set_ios(), we
* must additionally ensure that in case 2) the tmio mmc hardware stays
- * additionally ensure that in case 2) the tmio mmc hardware stays
* powered on during runtime for the card detection to work.
*/
if (_host->native_hotplug)
@@ -948,6 +1001,17 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
_host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK);
tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL);
+
+ /* Unmask the IRQs we want to know about */
+ if (!_host->chan_rx)
+ irq_mask |= TMIO_MASK_READOP;
+ if (!_host->chan_tx)
+ irq_mask |= TMIO_MASK_WRITEOP;
+ if (!_host->native_hotplug)
+ irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
+ _host->sdcard_irq_mask &= ~irq_mask;
+
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
@@ -961,22 +1025,18 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- mmc_add_host(mmc);
+ ret = mmc_add_host(mmc);
+ if (pdata->clk_disable)
+ pdata->clk_disable(pdev);
+ if (ret < 0) {
+ tmio_mmc_host_remove(_host);
+ return ret;
+ }
dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
- /* Unmask the IRQs we want to know about */
- if (!_host->chan_rx)
- irq_mask |= TMIO_MASK_READOP;
- if (!_host->chan_tx)
- irq_mask |= TMIO_MASK_WRITEOP;
- if (!_host->native_hotplug)
- irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
- tmio_mmc_enable_mmc_irqs(_host, irq_mask);
-
if (pdata->flags & TMIO_MMC_USE_GPIO_CD) {
- ret = mmc_cd_gpio_request(mmc, pdata->cd_gpio);
+ ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio);
if (ret < 0) {
tmio_mmc_host_remove(_host);
return ret;
@@ -1008,7 +1068,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
* This means we can miss a card-eject, but this is anyway
* possible, because of delayed processing of hotplug events.
*/
- mmc_cd_gpio_free(mmc);
+ mmc_gpio_free_cd(mmc);
if (!host->native_hotplug)
pm_runtime_get_sync(&pdev->dev);
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index a90bfe79916d..334da5f583c0 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -63,7 +63,7 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
struct super_block *sb;
int ret;
- sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd);
+ sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, flags, mtd);
if (IS_ERR(sb))
goto out_error;
@@ -74,8 +74,6 @@ static struct dentry *mount_mtd_aux(struct file_system_type *fs_type, int flags,
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- sb->s_flags = flags;
-
ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 41371ba1a811..f3f6cfedd69e 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -102,7 +102,7 @@ static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
static int cafe_device_ready(struct mtd_info *mtd)
{
struct cafe_priv *cafe = mtd->priv;
- int result = !!(cafe_readl(cafe, NAND_STATUS) | 0x40000000);
+ int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
cafe_writel(cafe, irqs, NAND_IRQ);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index a05b7b444d4f..a6cad5caba78 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -920,12 +920,12 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
*/
memset(chip->oob_poi, ~0, mtd->oobsize);
chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
-
- read_page_swap_end(this, buf, mtd->writesize,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
}
+
+ read_page_swap_end(this, buf, mtd->writesize,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
exit_nfc:
return ret;
}
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index c58e6a93f445..6acc790c2fbb 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -273,6 +273,26 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
+static void memcpy32_fromio(void *trg, const void __iomem *src, size_t size)
+{
+ int i;
+ u32 *t = trg;
+ const __iomem u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ *t++ = __raw_readl(s++);
+}
+
+static void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+ int i;
+ u32 __iomem *t = trg;
+ const u32 *s = src;
+
+ for (i = 0; i < (size >> 2); i++)
+ __raw_writel(*s++, t++);
+}
+
static int check_int_v3(struct mxc_nand_host *host)
{
uint32_t tmp;
@@ -519,7 +539,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
wait_op_done(host, true);
- memcpy_fromio(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
}
/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -535,7 +555,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
/* Wait for operation to complete */
wait_op_done(host, true);
- memcpy_fromio(host->data_buf, host->main_area0, 16);
+ memcpy32_fromio(host->data_buf, host->main_area0, 16);
if (this->options & NAND_BUSWIDTH_16) {
/* compress the ID info */
@@ -797,16 +817,16 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
if (bfrom) {
for (i = 0; i < n - 1; i++)
- memcpy_fromio(d + i * j, s + i * t, j);
+ memcpy32_fromio(d + i * j, s + i * t, j);
/* the last section */
- memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
+ memcpy32_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
} else {
for (i = 0; i < n - 1; i++)
- memcpy_toio(&s[i * t], &d[i * j], j);
+ memcpy32_toio(&s[i * t], &d[i * j], j);
/* the last section */
- memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
+ memcpy32_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
}
}
@@ -1070,7 +1090,8 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
host->devtype_data->send_page(mtd, NFC_OUTPUT);
- memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
+ memcpy32_fromio(host->data_buf, host->main_area0,
+ mtd->writesize);
copy_spare(mtd, true);
break;
@@ -1086,7 +1107,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
break;
case NAND_CMD_PAGEPROG:
- memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
+ memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
copy_spare(mtd, false);
host->devtype_data->send_page(mtd, NFC_INPUT);
host->devtype_data->send_cmd(host, command, true);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index d47586cf64ce..a11253a0fcab 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3501,6 +3501,13 @@ int nand_scan_tail(struct mtd_info *mtd)
/* propagate ecc info to mtd_info */
mtd->ecclayout = chip->ecc.layout;
mtd->ecc_strength = chip->ecc.strength;
+ /*
+ * Initialize bitflip_threshold to its default prior scan_bbt() call.
+ * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+ * properly set.
+ */
+ if (!mtd->bitflip_threshold)
+ mtd->bitflip_threshold = mtd->ecc_strength;
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 6cc8fbfabb8e..cf0cd3146817 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -28,7 +28,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/string.h>
@@ -546,12 +546,6 @@ static char *get_partition_name(int i)
return kstrdup(buf, GFP_KERNEL);
}
-static uint64_t divide(uint64_t n, uint32_t d)
-{
- do_div(n, d);
- return n;
-}
-
/*
* Initialize the nandsim structure.
*
@@ -580,7 +574,7 @@ static int init_nandsim(struct mtd_info *mtd)
ns->geom.oobsz = mtd->oobsize;
ns->geom.secsz = mtd->erasesize;
ns->geom.pgszoob = ns->geom.pgsz + ns->geom.oobsz;
- ns->geom.pgnum = divide(ns->geom.totsz, ns->geom.pgsz);
+ ns->geom.pgnum = div_u64(ns->geom.totsz, ns->geom.pgsz);
ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
ns->geom.secshift = ffs(ns->geom.secsz) - 1;
ns->geom.pgshift = chip->page_shift;
@@ -921,7 +915,7 @@ static int setup_wear_reporting(struct mtd_info *mtd)
if (!rptwear)
return 0;
- wear_eb_count = divide(mtd->size, mtd->erasesize);
+ wear_eb_count = div_u64(mtd->size, mtd->erasesize);
mem = wear_eb_count * sizeof(unsigned long);
if (mem / sizeof(unsigned long) != wear_eb_count) {
NS_ERR("Too many erase blocks for wear reporting\n");
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 738ee8dc16cd..ea4b95b5451c 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -29,7 +29,7 @@ config MTD_UBI_WL_THRESHOLD
config MTD_UBI_BEB_RESERVE
int "Percentage of reserved eraseblocks for bad eraseblocks handling"
- default 1
+ default 2
range 0 25
help
If the MTD device admits of bad eraseblocks (e.g. NAND flash), UBI
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index acec85deb6af..fb5567878181 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -1026,7 +1026,7 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
{
int ubi_num;
- dbg_gen("dettach MTD device");
+ dbg_gen("detach MTD device");
err = get_user(ubi_num, (__user int32_t *)argp);
if (err) {
err = -EFAULT;
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index f6a7d7ac4b98..8bbfb444b895 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -92,7 +92,30 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
}
/**
- * ubi_calculate_rsvd_pool - calculate how many PEBs must be reserved for bad
+ * ubi_update_reserved - update bad eraseblock handling accounting data.
+ * @ubi: UBI device description object
+ *
+ * This function calculates the gap between current number of PEBs reserved for
+ * bad eraseblock handling and the required level of PEBs that must be
+ * reserved, and if necessary, reserves more PEBs to fill that gap, according
+ * to availability. Should be called with ubi->volumes_lock held.
+ */
+void ubi_update_reserved(struct ubi_device *ubi)
+{
+ int need = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
+
+ if (need <= 0 || ubi->avail_pebs == 0)
+ return;
+
+ need = min_t(int, need, ubi->avail_pebs);
+ ubi->avail_pebs -= need;
+ ubi->rsvd_pebs += need;
+ ubi->beb_rsvd_pebs += need;
+ ubi_msg("reserved more %d PEBs for bad PEB handling", need);
+}
+
+/**
+ * ubi_calculate_reserved - calculate how many PEBs must be reserved for bad
* eraseblock handling.
* @ubi: UBI device description object
*/
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index a1a81c9ea8ce..84f66e3fa05d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -647,6 +647,7 @@ int ubi_more_leb_change_data(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf,
int length);
int ubi_check_volume(struct ubi_device *ubi, int vol_id);
+void ubi_update_reserved(struct ubi_device *ubi);
void ubi_calculate_reserved(struct ubi_device *ubi);
int ubi_check_pattern(const void *buf, uint8_t patt, int size);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 0669cff8ac3c..9169e58c262e 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -443,15 +443,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
ubi->avail_pebs += reserved_pebs;
- i = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (i > 0) {
- i = ubi->avail_pebs >= i ? i : ubi->avail_pebs;
- ubi->avail_pebs -= i;
- ubi->rsvd_pebs += i;
- ubi->beb_rsvd_pebs += i;
- if (i > 0)
- ubi_msg("reserve more %d PEBs", i);
- }
+ ubi_update_reserved(ubi);
ubi->vol_count -= 1;
spin_unlock(&ubi->volumes_lock);
@@ -558,15 +550,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs += pebs;
ubi->avail_pebs -= pebs;
- pebs = ubi->beb_rsvd_level - ubi->beb_rsvd_pebs;
- if (pebs > 0) {
- pebs = ubi->avail_pebs >= pebs ? pebs : ubi->avail_pebs;
- ubi->avail_pebs -= pebs;
- ubi->rsvd_pebs += pebs;
- ubi->beb_rsvd_pebs += pebs;
- if (pebs > 0)
- ubi_msg("reserve more %d PEBs", pebs);
- }
+ ubi_update_reserved(ubi);
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = vol->eba_tbl[i];
kfree(vol->eba_tbl);
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index dd5e04813b76..545c09ed9079 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -936,7 +936,7 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cops_local *lp = netdev_priv(dev);
struct sockaddr_at *sa = (struct sockaddr_at *)&ifr->ifr_addr;
- struct atalk_addr *aa = (struct atalk_addr *)&lp->node_addr;
+ struct atalk_addr *aa = &lp->node_addr;
switch(cmd)
{
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 3463b469e657..a030e635f001 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2454,24 +2454,27 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
}
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
int ret = RX_HANDLER_ANOTHER;
+ struct lacpdu *lacpdu, _lacpdu;
+
if (skb->protocol != PKT_TYPE_LACPDU)
return ret;
- if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+ lacpdu = skb_header_pointer(skb, 0, sizeof(_lacpdu), &_lacpdu);
+ if (!lacpdu)
return ret;
read_lock(&bond->lock);
- ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
+ ret = bond_3ad_rx_indication(lacpdu, slave, skb->len);
read_unlock(&bond->lock);
return ret;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 5ee7e3c45db7..0cfaa4afdece 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -274,8 +274,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave);
+int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 0f59c1564e53..e15cc11edbbe 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -342,27 +342,17 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_unlock_rx_hashtbl_bh(bond);
}
-static int rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arp_pkt *arp;
+ struct arp_pkt *arp, _arp;
if (skb->protocol != cpu_to_be16(ETH_P_ARP))
goto out;
- arp = (struct arp_pkt *) skb->data;
- if (!arp) {
- pr_debug("Packet has no ARP data\n");
+ arp = skb_header_pointer(skb, 0, sizeof(_arp), &_arp);
+ if (!arp)
goto out;
- }
-
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out;
-
- if (skb->len < sizeof(struct arp_pkt)) {
- pr_debug("Packet is too small to be an ARP\n");
- goto out;
- }
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
@@ -1356,12 +1346,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
+ read_unlock(&bond->curr_slave_lock);
+
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
- read_unlock(&bond->curr_slave_lock);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 3680aa251dea..2cf084eb9d52 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -6,7 +6,7 @@
#include "bonding.h"
#include "bond_alb.h"
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_NET_NS)
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b9c2ae62166d..6fae5f3ec7f6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -395,8 +395,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb->dev = slave_dev;
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
- sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
- skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
+ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+ skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -1240,9 +1240,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
if (!np)
goto out;
- np->dev = slave->dev;
- strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, slave->dev);
if (err) {
kfree(np);
goto out;
@@ -1384,6 +1382,7 @@ static void bond_compute_features(struct bonding *bond)
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
read_lock(&bond->lock);
@@ -1394,6 +1393,7 @@ static void bond_compute_features(struct bonding *bond)
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
+ dst_release_flag &= slave->dev->priv_flags;
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
@@ -1402,6 +1402,9 @@ done:
bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
+ flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
+ bond_dev->priv_flags = flags | dst_release_flag;
+
read_unlock(&bond->lock);
netdev_change_features(bond_dev);
@@ -1445,8 +1448,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
struct sk_buff *skb = *pskb;
struct slave *slave;
struct bonding *bond;
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
int ret = RX_HANDLER_ANOTHER;
skb = skb_share_check(skb, GFP_ATOMIC);
@@ -1463,15 +1466,10 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (likely(nskb)) {
- ret = recv_probe(nskb, bond, slave);
- dev_kfree_skb(nskb);
- if (ret == RX_HANDLER_CONSUMED) {
- consume_skb(skb);
- return ret;
- }
+ ret = recv_probe(skb, bond, slave);
+ if (ret == RX_HANDLER_CONSUMED) {
+ consume_skb(skb);
+ return ret;
}
}
@@ -2738,25 +2736,31 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
-static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
- struct slave *slave)
+static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct arphdr *arp;
+ struct arphdr *arp = (struct arphdr *)skb->data;
unsigned char *arp_ptr;
__be32 sip, tip;
+ int alen;
if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
return RX_HANDLER_ANOTHER;
read_lock(&bond->lock);
+ alen = arp_hdr_len(bond->dev);
pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
bond->dev->name, skb->dev->name);
- if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
- goto out_unlock;
+ if (alen > skb_headlen(skb)) {
+ arp = kmalloc(alen, GFP_ATOMIC);
+ if (!arp)
+ goto out_unlock;
+ if (skb_copy_bits(skb, 0, arp, alen) < 0)
+ goto out_unlock;
+ }
- arp = arp_hdr(skb);
if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
@@ -2791,6 +2795,8 @@ static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
out_unlock:
read_unlock(&bond->lock);
+ if (arp != (struct arphdr *)skb->data)
+ kfree(arp);
return RX_HANDLER_ANOTHER;
}
@@ -3227,6 +3233,12 @@ static int bond_master_netdev_event(unsigned long event,
switch (event) {
case NETDEV_CHANGENAME:
return bond_event_changename(event_bond);
+ case NETDEV_UNREGISTER:
+ bond_remove_proc_entry(event_bond);
+ break;
+ case NETDEV_REGISTER:
+ bond_create_proc_entry(event_bond);
+ break;
default:
break;
}
@@ -3987,7 +3999,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
out:
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4009,11 +4021,11 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
res = bond_dev_queue_xmit(bond, skb,
bond->curr_active_slave->dev);
+ read_unlock(&bond->curr_slave_lock);
+
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
-
- read_unlock(&bond->curr_slave_lock);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -4052,7 +4064,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
if (res) {
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
}
return NETDEV_TX_OK;
@@ -4090,7 +4102,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb2, tx_dev);
if (res) {
- dev_kfree_skb(skb2);
+ kfree_skb(skb2);
continue;
}
}
@@ -4104,7 +4116,7 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
out:
if (res)
/* no suitable interface, frame not sent */
- dev_kfree_skb(skb);
+ kfree_skb(skb);
/* frame sent to all suitable interfaces */
return NETDEV_TX_OK;
@@ -4172,7 +4184,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
/*
* Save the original txq to restore before passing to the driver
*/
- qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
@@ -4210,7 +4222,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
pr_err("%s: Error: Unknown bonding mode %d\n",
dev->name, bond->params.mode);
WARN_ON_ONCE(1);
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
}
@@ -4232,7 +4244,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (bond->slave_cnt)
ret = __bond_start_xmit(skb, dev);
else
- dev_kfree_skb(skb);
+ kfree_skb(skb);
read_unlock(&bond->lock);
@@ -4411,8 +4423,6 @@ static void bond_uninit(struct net_device *bond_dev)
bond_work_cancel_all(bond);
- bond_remove_proc_entry(bond);
-
bond_debug_unregister(bond);
__hw_addr_flush(&bond->mc_list);
@@ -4814,7 +4824,6 @@ static int bond_init(struct net_device *bond_dev)
bond_set_lockdep_class(bond_dev);
- bond_create_proc_entry(bond);
list_add_tail(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -4836,17 +4845,19 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
+static unsigned int bond_get_num_tx_queues(void)
{
return tx_queues;
}
static struct rtnl_link_ops bond_link_ops __read_mostly = {
- .kind = "bond",
- .priv_size = sizeof(struct bonding),
- .setup = bond_setup,
- .validate = bond_validate,
- .get_tx_queues = bond_get_tx_queues,
+ .kind = "bond",
+ .priv_size = sizeof(struct bonding),
+ .setup = bond_setup,
+ .validate = bond_validate,
+ .get_num_tx_queues = bond_get_num_tx_queues,
+ .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
+ as for TX queues */
};
/* Create a new bond based on the specified name and bonding parameters.
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 485bedb8278c..dc15d248443f 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1495,7 +1495,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Check buffer length, valid ifname and queue id */
if (strlen(buffer) > IFNAMSIZ ||
!dev_valid_name(buffer) ||
- qid > bond->params.tx_queues)
+ qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
/* Get the pointer to that interface if it exists */
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4581aa5ccaba..f8af2fcd3d16 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -218,8 +218,8 @@ struct bonding {
struct slave *primary_slave;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
- int (*recv_probe)(struct sk_buff *, struct bonding *,
- struct slave *);
+ int (*recv_probe)(const struct sk_buff *, struct bonding *,
+ struct slave *);
rwlock_t lock;
rwlock_t curr_slave_lock;
u8 send_peer_notif;
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4a27adb7ae67..0def8b3106f4 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -11,7 +11,6 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/list.h>
@@ -20,7 +19,7 @@
#include <linux/sched.h>
#include <linux/if_arp.h>
#include <linux/timer.h>
-#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <net/caif/caif_layer.h>
#include <net/caif/caif_hsi.h>
@@ -33,59 +32,46 @@ MODULE_DESCRIPTION("CAIF HSI driver");
#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
(((pow)-((x)&((pow)-1)))))
-static int inactivity_timeout = 1000;
-module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
+static const struct cfhsi_config hsi_default_config = {
-static int aggregation_timeout = 1;
-module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
+ /* Inactivity timeout on HSI, ms */
+ .inactivity_timeout = HZ,
-/*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-static int hsi_head_align = 4;
-module_param(hsi_head_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
+ /* Aggregation timeout (ms) of zero means no aggregation is done*/
+ .aggregation_timeout = 1,
-static int hsi_tail_align = 4;
-module_param(hsi_tail_align, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
-
-/*
- * HSI link layer flowcontrol thresholds.
- * Warning: A high threshold value migth increase throughput but it will at
- * the same time prevent channel prioritization and increase the risk of
- * flooding the modem. The high threshold should be above the low.
- */
-static int hsi_high_threshold = 100;
-module_param(hsi_high_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
+ /*
+ * HSI link layer flow-control thresholds.
+ * Threshold values for the HSI packet queue. Flow-control will be
+ * asserted when the number of packets exceeds q_high_mark. It will
+ * not be de-asserted before the number of packets drops below
+ * q_low_mark.
+ * Warning: A high threshold value might increase throughput but it
+ * will at the same time prevent channel prioritization and increase
+ * the risk of flooding the modem. The high threshold should be above
+ * the low.
+ */
+ .q_high_mark = 100,
+ .q_low_mark = 50,
-static int hsi_low_threshold = 50;
-module_param(hsi_low_threshold, int, S_IRUGO);
-MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
+ /*
+ * HSI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+ .head_align = 4,
+ .tail_align = 4,
+};
#define ON 1
#define OFF 0
-/*
- * Threshold values for the HSI packet queue. Flowcontrol will be asserted
- * when the number of packets exceeds HIGH_WATER_MARK. It will not be
- * de-asserted before the number of packets drops below LOW_WATER_MARK.
- */
-#define LOW_WATER_MARK hsi_low_threshold
-#define HIGH_WATER_MARK hsi_high_threshold
-
static LIST_HEAD(cfhsi_list);
-static spinlock_t cfhsi_list_lock;
static void cfhsi_inactivity_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Schedule power down work queue. */
@@ -101,8 +87,8 @@ static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
int hpad, tpad, len;
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
len = skb->len + hpad + tpad;
if (direction > 0)
@@ -115,7 +101,7 @@ static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
{
int i;
- if (cfhsi->aggregation_timeout < 0)
+ if (cfhsi->cfg.aggregation_timeout == 0)
return true;
for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
@@ -171,7 +157,7 @@ static void cfhsi_abort_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
}
@@ -181,14 +167,14 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
size_t fifo_occupancy;
int ret;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
do {
- ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy);
if (ret) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't get FIFO occupancy: %d.\n",
__func__, ret);
break;
@@ -198,11 +184,11 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->dev);
+ ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
+ cfhsi->ops);
if (ret) {
clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't read data: %d.\n",
__func__, ret);
break;
@@ -213,13 +199,13 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
!test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
if (ret < 0) {
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: can't wait for flush complete: %d.\n",
__func__, ret);
break;
} else if (!ret) {
ret = -ETIMEDOUT;
- dev_warn(&cfhsi->ndev->dev,
+ netdev_warn(cfhsi->ndev,
"%s: timeout waiting for flush complete.\n",
__func__);
break;
@@ -246,14 +232,14 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Check if we can embed a CAIF frame. */
if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Check if frame still fits with added alignment. */
if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
@@ -282,8 +268,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
while (nfrms < CFHSI_MAX_PKTS) {
struct caif_payload_info *info;
- int hpad = 0;
- int tpad = 0;
+ int hpad;
+ int tpad;
if (!skb)
skb = cfhsi_dequeue(cfhsi);
@@ -294,8 +280,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Calculate needed head alignment and tail alignment. */
info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
- tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
+ hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
+ tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
/* Fill in CAIF frame length in descriptor. */
desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
@@ -348,7 +334,7 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
int len, res;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -366,22 +352,22 @@ static void cfhsi_start_tx(struct cfhsi *cfhsi)
cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
break;
}
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
} while (res < 0);
}
static void cfhsi_tx_done(struct cfhsi *cfhsi)
{
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -392,7 +378,7 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
*/
spin_lock_bh(&cfhsi->lock);
if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
+ cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 0;
@@ -404,19 +390,19 @@ static void cfhsi_tx_done(struct cfhsi *cfhsi)
cfhsi_start_tx(cfhsi);
} else {
mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->aggregation_timeout);
+ jiffies + cfhsi->cfg.aggregation_timeout);
spin_unlock_bh(&cfhsi->lock);
}
return;
}
-static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -433,7 +419,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
if ((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -455,7 +441,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frame. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -463,7 +449,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
return -ENOMEM;
}
@@ -477,8 +463,8 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We are called from a arch specific platform device.
- * Unfortunately we don't know what context we're
+ * We are in a callback handler and
+ * unfortunately we don't know what context we're
* running in.
*/
if (in_interrupt())
@@ -504,7 +490,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
xfer_sz += CFHSI_DESC_SZ;
if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Invalid payload len: %d, ignored.\n",
__func__, xfer_sz);
return -EPROTO;
@@ -551,7 +537,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check header and offset. */
if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
(desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
__func__);
return -EPROTO;
}
@@ -573,7 +559,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
struct sk_buff *skb;
u8 *dst = NULL;
u8 *pcffrm = NULL;
- int len = 0;
+ int len;
/* CAIF frame starts after head padding. */
pcffrm = pfrm + *pfrm + 1;
@@ -585,7 +571,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Sanity check length of CAIF frames. */
if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
+ netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
__func__);
return -EPROTO;
}
@@ -593,7 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
/* Allocate SKB (OK even in IRQ context). */
skb = alloc_skb(len + 1, GFP_ATOMIC);
if (!skb) {
- dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
+ netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
__func__);
cfhsi->rx_state.nfrms = nfrms;
return -ENOMEM;
@@ -608,7 +594,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb->dev = cfhsi->ndev;
/*
- * We're called from a platform device,
+ * We're called in callback from HSI
* and don't know the context we're running in.
*/
if (in_interrupt())
@@ -639,7 +625,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
desc = (struct cfhsi_desc *)cfhsi->rx_buf;
- dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
@@ -647,7 +633,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Update inactivity timer if pending. */
spin_lock_bh(&cfhsi->lock);
mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
@@ -680,12 +666,11 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
if (desc_pld_len < 0)
goto out_of_sync;
- if (desc_pld_len > 0)
+ if (desc_pld_len > 0) {
rx_len = desc_pld_len;
-
- if (desc_pld_len > 0 &&
- (piggy_desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
+ if (piggy_desc->header & CFHSI_PIGGY_DESC)
+ rx_len += CFHSI_DESC_SZ;
+ }
/*
* Copy needed information from the piggy-backed
@@ -693,8 +678,6 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
*/
memcpy(rx_buf, (u8 *)piggy_desc,
CFHSI_DESC_SHORT_SZ);
- if (desc_pld_len == -EPROTO)
- goto out_of_sync;
}
}
@@ -710,13 +693,13 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
/* Initiate next read */
if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
/* Set up new transfer. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
__func__);
- res = cfhsi->dev->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->dev);
+ res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
+ cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
__func__, res);
cfhsi->ndev->stats.rx_errors++;
cfhsi->ndev->stats.rx_dropped++;
@@ -753,7 +736,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
return;
out_of_sync:
- dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
cfhsi->rx_buf, CFHSI_DESC_SZ);
schedule_work(&cfhsi->out_of_sync_work);
@@ -763,18 +746,18 @@ static void cfhsi_rx_slowpath(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_rx_done(cfhsi);
}
-static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
+static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
@@ -807,9 +790,9 @@ static void cfhsi_wake_up(struct work_struct *work)
}
/* Activate wake line. */
- cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
- dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
__func__);
/* Wait for acknowledge. */
@@ -819,33 +802,33 @@ static void cfhsi_wake_up(struct work_struct *work)
&cfhsi->bits), ret);
if (unlikely(ret < 0)) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
} else if (!ret) {
bool ca_wake = false;
size_t fifo_occupancy = 0;
/* Wakeup timeout */
- dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
__func__);
/* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
- dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
__func__, (unsigned) fifo_occupancy);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (ca_wake) {
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
/* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
@@ -856,11 +839,11 @@ static void cfhsi_wake_up(struct work_struct *work)
}
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
return;
}
wake_ack:
- dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
__func__);
/* Clear power up bit. */
@@ -868,11 +851,11 @@ wake_ack:
clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
/* Resume read operation. */
- dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
- res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
+ netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
+ res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
if (WARN_ON(res < 0))
- dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
+ netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
/* Clear power up acknowledment. */
clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -881,16 +864,16 @@ wake_ack:
/* Resume transmit if queues are not empty. */
if (!cfhsi_tx_queue_len(cfhsi)) {
- dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
__func__);
/* Start inactivity timer. */
mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->inactivity_timeout);
+ jiffies + cfhsi->cfg.inactivity_timeout);
spin_unlock_bh(&cfhsi->lock);
return;
}
- dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
+ netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
__func__);
spin_unlock_bh(&cfhsi->lock);
@@ -900,14 +883,14 @@ wake_ack:
if (likely(len > 0)) {
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
} else {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: Failed to create HSI frame: %d.\n",
__func__, len);
}
@@ -921,13 +904,13 @@ static void cfhsi_wake_down(struct work_struct *work)
int retry = CFHSI_WAKE_TOUT;
cfhsi = container_of(work, struct cfhsi, wake_down_work);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
+ netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
return;
/* Deactivate wake line. */
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
/* Wait for acknowledge. */
ret = CFHSI_WAKE_TOUT;
@@ -936,26 +919,26 @@ static void cfhsi_wake_down(struct work_struct *work)
&cfhsi->bits), ret);
if (ret < 0) {
/* Interrupted by signal. */
- dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
+ netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
__func__, ret);
return;
} else if (!ret) {
bool ca_wake = true;
/* Timeout */
- dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
/* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
&ca_wake));
if (!ca_wake)
- dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
+ netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
__func__);
}
/* Check FIFO occupancy. */
while (retry) {
- WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
+ WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
&fifo_occupancy));
if (!fifo_occupancy)
@@ -967,14 +950,13 @@ static void cfhsi_wake_down(struct work_struct *work)
}
if (!retry)
- dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
+ netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
/* Clear AWAKE condition. */
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Cancel pending RX requests. */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
-
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
}
static void cfhsi_out_of_sync(struct work_struct *work)
@@ -988,12 +970,12 @@ static void cfhsi_out_of_sync(struct work_struct *work)
rtnl_unlock();
}
-static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
@@ -1007,12 +989,12 @@ static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
queue_work(cfhsi->wq, &cfhsi->wake_up_work);
}
-static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
+static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
{
struct cfhsi *cfhsi = NULL;
- cfhsi = container_of(drv, struct cfhsi, drv);
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
/* Initiating low power is only permitted by the host (us). */
@@ -1024,7 +1006,7 @@ static void cfhsi_aggregation_tout(unsigned long arg)
{
struct cfhsi *cfhsi = (struct cfhsi *)arg;
- dev_dbg(&cfhsi->ndev->dev, "%s.\n",
+ netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
cfhsi_start_tx(cfhsi);
@@ -1077,7 +1059,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Send flow off if number of packets is above high water mark. */
if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
+ cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
cfhsi->cfdev.flowctrl) {
cfhsi->flow_off_sent = 1;
cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -1114,9 +1096,9 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
WARN_ON(!len);
/* Set up new transfer. */
- res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
+ res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
if (WARN_ON(res < 0)) {
- dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
+ netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
__func__, res);
cfhsi_abort_tx(cfhsi);
}
@@ -1129,19 +1111,19 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static const struct net_device_ops cfhsi_ops;
+static const struct net_device_ops cfhsi_netdevops;
static void cfhsi_setup(struct net_device *dev)
{
int i;
struct cfhsi *cfhsi = netdev_priv(dev);
dev->features = 0;
- dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
+ dev->netdev_ops = &cfhsi_netdevops;
for (i = 0; i < CFHSI_PRIO_LAST; ++i)
skb_queue_head_init(&cfhsi->qhead[i]);
cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
@@ -1149,43 +1131,7 @@ static void cfhsi_setup(struct net_device *dev)
cfhsi->cfdev.use_stx = false;
cfhsi->cfdev.use_fcs = false;
cfhsi->ndev = dev;
-}
-
-int cfhsi_probe(struct platform_device *pdev)
-{
- struct cfhsi *cfhsi = NULL;
- struct net_device *ndev;
-
- int res;
-
- ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
- if (!ndev)
- return -ENODEV;
-
- cfhsi = netdev_priv(ndev);
- cfhsi->ndev = ndev;
- cfhsi->pdev = pdev;
-
- /* Assign the HSI device. */
- cfhsi->dev = pdev->dev.platform_data;
-
- /* Assign the driver to this HSI device. */
- cfhsi->dev->drv = &cfhsi->drv;
-
- /* Register network device. */
- res = register_netdev(ndev);
- if (res) {
- dev_err(&ndev->dev, "%s: Registration error: %d.\n",
- __func__, res);
- free_netdev(ndev);
- return -ENODEV;
- }
- /* Add CAIF HSI device to list. */
- spin_lock(&cfhsi_list_lock);
- list_add_tail(&cfhsi->list, &cfhsi_list);
- spin_unlock(&cfhsi_list_lock);
-
- return res;
+ cfhsi->cfg = hsi_default_config;
}
static int cfhsi_open(struct net_device *ndev)
@@ -1201,9 +1147,6 @@ static int cfhsi_open(struct net_device *ndev)
/* Set flow info */
cfhsi->flow_off_sent = 0;
- cfhsi->q_low_mark = LOW_WATER_MARK;
- cfhsi->q_high_mark = HIGH_WATER_MARK;
-
/*
* Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1231,20 +1174,8 @@ static int cfhsi_open(struct net_device *ndev)
goto err_alloc_rx_flip;
}
- /* Pre-calculate inactivity timeout. */
- if (inactivity_timeout != -1) {
- cfhsi->inactivity_timeout =
- inactivity_timeout * HZ / 1000;
- if (!cfhsi->inactivity_timeout)
- cfhsi->inactivity_timeout = 1;
- else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- } else {
- cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
/* Initialize aggregation timeout */
- cfhsi->aggregation_timeout = aggregation_timeout;
+ cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
/* Initialize recieve vaiables. */
cfhsi->rx_ptr = cfhsi->rx_buf;
@@ -1254,10 +1185,10 @@ static int cfhsi_open(struct net_device *ndev)
spin_lock_init(&cfhsi->lock);
/* Set up the driver. */
- cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
+ cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
+ cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
+ cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
+ cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
/* Initialize the work queues. */
INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
@@ -1271,9 +1202,9 @@ static int cfhsi_open(struct net_device *ndev)
clear_bit(CFHSI_AWAKE, &cfhsi->bits);
/* Create work thread. */
- cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
+ cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name);
if (!cfhsi->wq) {
- dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
+ netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
__func__);
res = -ENODEV;
goto err_create_wq;
@@ -1298,9 +1229,9 @@ static int cfhsi_open(struct net_device *ndev)
cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
/* Activate HSI interface. */
- res = cfhsi->dev->cfhsi_up(cfhsi->dev);
+ res = cfhsi->ops->cfhsi_up(cfhsi->ops);
if (res) {
- dev_err(&cfhsi->ndev->dev,
+ netdev_err(cfhsi->ndev,
"%s: can't activate HSI interface: %d.\n",
__func__, res);
goto err_activate;
@@ -1309,14 +1240,14 @@ static int cfhsi_open(struct net_device *ndev)
/* Flush FIFO */
res = cfhsi_flush_fifo(cfhsi);
if (res) {
- dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
+ netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
__func__, res);
goto err_net_reg;
}
return res;
err_net_reg:
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
err_activate:
destroy_workqueue(cfhsi->wq);
err_create_wq:
@@ -1346,7 +1277,7 @@ static int cfhsi_close(struct net_device *ndev)
del_timer_sync(&cfhsi->aggregation_timer);
/* Cancel pending RX request (if any) */
- cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+ cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
/* Destroy workqueue */
destroy_workqueue(cfhsi->wq);
@@ -1359,7 +1290,7 @@ static int cfhsi_close(struct net_device *ndev)
cfhsi_abort_tx(cfhsi);
/* Deactivate interface */
- cfhsi->dev->cfhsi_down(cfhsi->dev);
+ cfhsi->ops->cfhsi_down(cfhsi->ops);
/* Free buffers. */
kfree(tx_buf);
@@ -1368,85 +1299,184 @@ static int cfhsi_close(struct net_device *ndev)
return 0;
}
-static const struct net_device_ops cfhsi_ops = {
+static void cfhsi_uninit(struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+ ASSERT_RTNL();
+ symbol_put(cfhsi_get_device);
+ list_del(&cfhsi->list);
+}
+
+static const struct net_device_ops cfhsi_netdevops = {
+ .ndo_uninit = cfhsi_uninit,
.ndo_open = cfhsi_open,
.ndo_stop = cfhsi_close,
.ndo_start_xmit = cfhsi_xmit
};
-int cfhsi_remove(struct platform_device *pdev)
+static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_dev *dev;
+ int i;
- dev = (struct cfhsi_dev *)pdev->dev.platform_data;
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- /* Find the corresponding device. */
- if (cfhsi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
- return 0;
- }
+ if (!data) {
+ pr_debug("no params data found\n");
+ return;
}
- spin_unlock(&cfhsi_list_lock);
- return -ENODEV;
+
+ i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
+ /*
+ * Inactivity timeout in millisecs. Lowest possible value is 1,
+ * and highest possible is NEXT_TIMER_MAX_DELTA.
+ */
+ if (data[i]) {
+ u32 inactivity_timeout = nla_get_u32(data[i]);
+ /* Pre-calculate inactivity timeout. */
+ cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
+ if (cfhsi->cfg.inactivity_timeout == 0)
+ cfhsi->cfg.inactivity_timeout = 1;
+ else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
+ cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
+ }
+
+ i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
+ if (data[i])
+ cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_HEAD_ALIGN;
+ if (data[i])
+ cfhsi->cfg.head_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_TAIL_ALIGN;
+ if (data[i])
+ cfhsi->cfg.tail_align = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
+
+ i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
+ if (data[i])
+ cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
+}
+
+static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ cfhsi_netlink_parms(data, netdev_priv(dev));
+ netdev_state_change(dev);
+ return 0;
}
-struct platform_driver cfhsi_plat_drv = {
- .probe = cfhsi_probe,
- .remove = cfhsi_remove,
- .driver = {
- .name = "cfhsi",
- .owner = THIS_MODULE,
- },
+static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
+ [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
+ [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
};
-static void __exit cfhsi_exit_module(void)
+static size_t caif_hsi_get_size(const struct net_device *dev)
+{
+ int i;
+ size_t s = 0;
+ for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
+ s += nla_total_size(caif_hsi_policy[i].len);
+ return s;
+}
+
+static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct cfhsi *cfhsi = netdev_priv(dev);
+
+ if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+ cfhsi->cfg.inactivity_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+ cfhsi->cfg.aggregation_timeout) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
+ cfhsi->cfg.head_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
+ cfhsi->cfg.tail_align) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+ cfhsi->cfg.q_high_mark) ||
+ nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
+ cfhsi->cfg.q_low_mark))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
{
- struct list_head *list_node;
- struct list_head *n;
struct cfhsi *cfhsi = NULL;
+ struct cfhsi_ops *(*get_ops)(void);
- spin_lock(&cfhsi_list_lock);
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
+ ASSERT_RTNL();
- /* Remove from list. */
- list_del(list_node);
- spin_unlock(&cfhsi_list_lock);
+ cfhsi = netdev_priv(dev);
+ cfhsi_netlink_parms(data, cfhsi);
+ dev_net_set(cfhsi->ndev, src_net);
+
+ get_ops = symbol_get(cfhsi_get_ops);
+ if (!get_ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ return -ENODEV;
+ }
- unregister_netdevice(cfhsi->ndev);
+ /* Assign the HSI device. */
+ cfhsi->ops = (*get_ops)();
+ if (!cfhsi->ops) {
+ pr_err("%s: failed to get the cfhsi_ops\n", __func__);
+ goto err;
+ }
- spin_lock(&cfhsi_list_lock);
+ /* Assign the driver to this HSI device. */
+ cfhsi->ops->cb_ops = &cfhsi->cb_ops;
+ if (register_netdevice(dev)) {
+ pr_warn("%s: caif_hsi device registration failed\n", __func__);
+ goto err;
}
- spin_unlock(&cfhsi_list_lock);
+ /* Add CAIF HSI device to list. */
+ list_add_tail(&cfhsi->list, &cfhsi_list);
- /* Unregister platform driver. */
- platform_driver_unregister(&cfhsi_plat_drv);
+ return 0;
+err:
+ symbol_put(cfhsi_get_ops);
+ return -ENODEV;
}
-static int __init cfhsi_init_module(void)
+static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
+ .kind = "cfhsi",
+ .priv_size = sizeof(struct cfhsi),
+ .setup = cfhsi_setup,
+ .maxtype = __IFLA_CAIF_HSI_MAX,
+ .policy = caif_hsi_policy,
+ .newlink = caif_hsi_newlink,
+ .changelink = caif_hsi_changelink,
+ .get_size = caif_hsi_get_size,
+ .fill_info = caif_hsi_fill_info,
+};
+
+static void __exit cfhsi_exit_module(void)
{
- int result;
+ struct list_head *list_node;
+ struct list_head *n;
+ struct cfhsi *cfhsi;
- /* Initialize spin lock. */
- spin_lock_init(&cfhsi_list_lock);
+ rtnl_link_unregister(&caif_hsi_link_ops);
- /* Register platform driver. */
- result = platform_driver_register(&cfhsi_plat_drv);
- if (result) {
- printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
- result);
- goto err_dev_register;
+ rtnl_lock();
+ list_for_each_safe(list_node, n, &cfhsi_list) {
+ cfhsi = list_entry(list_node, struct cfhsi, list);
+ unregister_netdev(cfhsi->ndev);
}
+ rtnl_unlock();
+}
- err_dev_register:
- return result;
+static int __init cfhsi_init_module(void)
+{
+ return rtnl_link_register(&caif_hsi_link_ops);
}
module_init(cfhsi_init_module);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6ea905c2cf6d..fcff73a73b1d 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -170,7 +170,7 @@ static const struct at91_devtype_data at91_devtype_data[] __devinitconst = {
},
};
-static struct can_bittiming_const at91_bittiming_const = {
+static const struct can_bittiming_const at91_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 4,
.tseg1_max = 16,
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 3f88473423e9..f2d6d258a286 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -44,7 +44,7 @@ struct bfin_can_priv {
/*
* bfin can timing parameters
*/
-static struct can_bittiming_const bfin_can_bittiming_const = {
+static const struct can_bittiming_const bfin_can_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -597,7 +597,7 @@ static int __devinit bfin_can_probe(struct platform_device *pdev)
dev_info(&pdev->dev,
"%s device registered"
"(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
- DRV_NAME, (void *)priv->membase, priv->rx_irq,
+ DRV_NAME, priv->membase, priv->rx_irq,
priv->tx_irq, priv->err_irq, priv->can.clock.freq);
return 0;
diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig
index ffb9773d102d..3b83bafcd947 100644
--- a/drivers/net/can/c_can/Kconfig
+++ b/drivers/net/can/c_can/Kconfig
@@ -1,15 +1,23 @@
menuconfig CAN_C_CAN
- tristate "Bosch C_CAN devices"
+ tristate "Bosch C_CAN/D_CAN devices"
depends on CAN_DEV && HAS_IOMEM
if CAN_C_CAN
config CAN_C_CAN_PLATFORM
- tristate "Generic Platform Bus based C_CAN driver"
+ tristate "Generic Platform Bus based C_CAN/D_CAN driver"
---help---
- This driver adds support for the C_CAN chips connected to
- the "platform bus" (Linux abstraction for directly to the
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the "platform bus" (Linux abstraction for directly to the
processor attached devices) which can be found on various
- boards from ST Microelectronics (http://www.st.com)
- like the SPEAr1310 and SPEAr320 evaluation boards.
+ boards from ST Microelectronics (http://www.st.com) like the
+ SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
+ boards like am335x, dm814x, dm813x and dm811x.
+
+config CAN_C_CAN_PCI
+ tristate "Generic PCI Bus based C_CAN/D_CAN driver"
+ depends on PCI
+ ---help---
+ This driver adds support for the C_CAN/D_CAN chips connected
+ to the PCI bus.
endif
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index 9273f6d5c4b7..ad1cc842170a 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,6 @@
obj-$(CONFIG_CAN_C_CAN) += c_can.o
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
+obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 86cd532c78f9..4c538e388655 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -41,6 +41,10 @@
#include "c_can.h"
+/* Number of interface registers */
+#define IF_ENUM_REG_LEN 11
+#define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
+
/* control register */
#define CONTROL_TEST BIT(7)
#define CONTROL_CCE BIT(6)
@@ -185,7 +189,7 @@ enum c_can_bus_error_types {
C_CAN_ERROR_PASSIVE,
};
-static struct can_bittiming_const c_can_bittiming_const = {
+static const struct can_bittiming_const c_can_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
.tseg1_max = 16,
@@ -209,10 +213,10 @@ static inline int get_tx_echo_msg_obj(const struct c_can_priv *priv)
C_CAN_MSG_OBJ_TX_FIRST;
}
-static u32 c_can_read_reg32(struct c_can_priv *priv, void *reg)
+static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
{
- u32 val = priv->read_reg(priv, reg);
- val |= ((u32) priv->read_reg(priv, reg + 2)) << 16;
+ u32 val = priv->read_reg(priv, index);
+ val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
return val;
}
@@ -220,14 +224,14 @@ static void c_can_enable_all_interrupts(struct c_can_priv *priv,
int enable)
{
unsigned int cntrl_save = priv->read_reg(priv,
- &priv->regs->control);
+ C_CAN_CTRL_REG);
if (enable)
cntrl_save |= (CONTROL_SIE | CONTROL_EIE | CONTROL_IE);
else
cntrl_save &= ~(CONTROL_EIE | CONTROL_IE | CONTROL_SIE);
- priv->write_reg(priv, &priv->regs->control, cntrl_save);
+ priv->write_reg(priv, C_CAN_CTRL_REG, cntrl_save);
}
static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
@@ -235,7 +239,7 @@ static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
int count = MIN_TIMEOUT_VALUE;
while (count && priv->read_reg(priv,
- &priv->regs->ifregs[iface].com_req) &
+ C_CAN_IFACE(COMREQ_REG, iface)) &
IF_COMR_BUSY) {
count--;
udelay(1);
@@ -258,9 +262,9 @@ static inline void c_can_object_get(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -278,9 +282,9 @@ static inline void c_can_object_put(struct net_device *dev,
* register and message RAM must be complete in 6 CAN-CLK
* period.
*/
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_mask,
+ priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].com_req,
+ priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
IFX_WRITE_LOW_16BIT(objno));
if (c_can_msg_obj_is_busy(priv, iface))
@@ -306,18 +310,18 @@ static void c_can_write_msg_object(struct net_device *dev,
flags |= IF_ARB_MSGVAL;
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, flags |
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
IFX_WRITE_HIGH_16BIT(id));
for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].data[i / 2],
+ priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
frame->data[i] | (frame->data[i + 1] << 8));
}
/* enable interrupt for this message object */
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
frame->can_dlc);
c_can_object_put(dev, iface, objno, IF_COMM_ALL);
@@ -329,7 +333,7 @@ static inline void c_can_mark_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST | IF_MCONT_INTPND));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -343,7 +347,7 @@ static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
struct c_can_priv *priv = netdev_priv(dev);
for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++) {
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, i, IF_COMM_CONTROL);
@@ -356,7 +360,7 @@ static inline void c_can_activate_rx_msg_obj(struct net_device *dev,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
ctrl_mask & ~(IF_MCONT_MSGLST |
IF_MCONT_INTPND | IF_MCONT_NEWDAT));
c_can_object_put(dev, iface, obj, IF_COMM_CONTROL);
@@ -374,7 +378,7 @@ static void c_can_handle_lost_msg_obj(struct net_device *dev,
c_can_object_get(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl,
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
IF_MCONT_CLR_MSGLST);
c_can_object_put(dev, 0, objno, IF_COMM_CONTROL);
@@ -410,8 +414,8 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
frame->can_dlc = get_can_dlc(ctrl & 0x0F);
- flags = priv->read_reg(priv, &priv->regs->ifregs[iface].arb2);
- val = priv->read_reg(priv, &priv->regs->ifregs[iface].arb1) |
+ flags = priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface));
+ val = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface)) |
(flags << 16);
if (flags & IF_ARB_MSGXTD)
@@ -424,7 +428,7 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, int ctrl)
else {
for (i = 0; i < frame->can_dlc; i += 2) {
data = priv->read_reg(priv,
- &priv->regs->ifregs[iface].data[i / 2]);
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2);
frame->data[i] = data;
frame->data[i + 1] = data >> 8;
}
@@ -444,40 +448,40 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask1,
+ priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
IFX_WRITE_LOW_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].mask2,
+ priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
IFX_WRITE_HIGH_16BIT(mask));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1,
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
IFX_WRITE_LOW_16BIT(id));
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2,
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, mcont);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
{
struct c_can_priv *priv = netdev_priv(dev);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb1, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].arb2, 0);
- priv->write_reg(priv, &priv->regs->ifregs[iface].msg_cntrl, 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
+ priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
- c_can_read_reg32(priv, &priv->regs->msgval1));
+ c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
}
static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
{
- int val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
/*
* as transmission request register's bit n-1 corresponds to
@@ -540,12 +544,12 @@ static int c_can_set_bittiming(struct net_device *dev)
netdev_info(dev,
"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
- ctrl_save = priv->read_reg(priv, &priv->regs->control);
- priv->write_reg(priv, &priv->regs->control,
+ ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
+ priv->write_reg(priv, C_CAN_CTRL_REG,
ctrl_save | CONTROL_CCE | CONTROL_INIT);
- priv->write_reg(priv, &priv->regs->btr, reg_btr);
- priv->write_reg(priv, &priv->regs->brp_ext, reg_brpe);
- priv->write_reg(priv, &priv->regs->control, ctrl_save);
+ priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
+ priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
+ priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
return 0;
}
@@ -587,36 +591,36 @@ static void c_can_chip_config(struct net_device *dev)
struct c_can_priv *priv = netdev_priv(dev);
/* enable automatic retransmission */
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_ENABLE_AR);
if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
(priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
/* loopback + silent mode : useful for hot self-test */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test,
+ priv->write_reg(priv, C_CAN_TEST_REG,
TEST_LBACK | TEST_SILENT);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* loopback mode : useful for self-test function */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_LBACK);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
/* silent mode : bus-monitoring mode */
- priv->write_reg(priv, &priv->regs->control, CONTROL_EIE |
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_EIE |
CONTROL_SIE | CONTROL_IE | CONTROL_TEST);
- priv->write_reg(priv, &priv->regs->test, TEST_SILENT);
+ priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
} else
/* normal mode*/
- priv->write_reg(priv, &priv->regs->control,
+ priv->write_reg(priv, C_CAN_CTRL_REG,
CONTROL_EIE | CONTROL_SIE | CONTROL_IE);
/* configure message objects */
c_can_configure_msg_objects(dev);
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
/* set bittiming params */
c_can_set_bittiming(dev);
@@ -669,7 +673,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
unsigned int reg_err_counter;
struct c_can_priv *priv = netdev_priv(dev);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
ERR_CNT_REC_SHIFT;
bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
@@ -697,12 +701,12 @@ static void c_can_do_tx(struct net_device *dev)
for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
msg_obj_no = get_tx_echo_msg_obj(priv);
- val = c_can_read_reg32(priv, &priv->regs->txrqst1);
+ val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
if (!(val & (1 << (msg_obj_no - 1)))) {
can_get_echo_skb(dev,
msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
stats->tx_bytes += priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl)
+ C_CAN_IFACE(MSGCTRL_REG, 0))
& IF_MCONT_DLC_MASK;
stats->tx_packets++;
c_can_inval_msg_object(dev, 0, msg_obj_no);
@@ -744,11 +748,11 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
u32 num_rx_pkts = 0;
unsigned int msg_obj, msg_ctrl_save;
struct c_can_priv *priv = netdev_priv(dev);
- u32 val = c_can_read_reg32(priv, &priv->regs->intpnd1);
+ u32 val = c_can_read_reg32(priv, C_CAN_INTPND1_REG);
for (msg_obj = C_CAN_MSG_OBJ_RX_FIRST;
msg_obj <= C_CAN_MSG_OBJ_RX_LAST && quota > 0;
- val = c_can_read_reg32(priv, &priv->regs->intpnd1),
+ val = c_can_read_reg32(priv, C_CAN_INTPND1_REG),
msg_obj++) {
/*
* as interrupt pending register's bit n-1 corresponds to
@@ -758,7 +762,7 @@ static int c_can_do_rx_poll(struct net_device *dev, int quota)
c_can_object_get(dev, 0, msg_obj, IF_COMM_ALL &
~IF_COMM_TXRQST);
msg_ctrl_save = priv->read_reg(priv,
- &priv->regs->ifregs[0].msg_cntrl);
+ C_CAN_IFACE(MSGCTRL_REG, 0));
if (msg_ctrl_save & IF_MCONT_EOB)
return num_rx_pkts;
@@ -819,7 +823,7 @@ static int c_can_handle_state_change(struct net_device *dev,
return 0;
c_can_get_berr_counter(dev, &bec);
- reg_err_counter = priv->read_reg(priv, &priv->regs->err_cnt);
+ reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
ERR_CNT_RP_SHIFT;
@@ -935,7 +939,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
}
/* set a `lec` value so that we can check for updates later */
- priv->write_reg(priv, &priv->regs->status, LEC_UNUSED);
+ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
netif_receive_skb(skb);
stats->rx_packets++;
@@ -959,15 +963,15 @@ static int c_can_poll(struct napi_struct *napi, int quota)
/* status events have the highest priority */
if (irqstatus == STATUS_INTERRUPT) {
priv->current_status = priv->read_reg(priv,
- &priv->regs->status);
+ C_CAN_STS_REG);
/* handle Tx/Rx events */
if (priv->current_status & STATUS_TXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_TXOK);
if (priv->current_status & STATUS_RXOK)
- priv->write_reg(priv, &priv->regs->status,
+ priv->write_reg(priv, C_CAN_STS_REG,
priv->current_status & ~STATUS_RXOK);
/* handle state changes */
@@ -1033,7 +1037,7 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
struct net_device *dev = (struct net_device *)dev_id;
struct c_can_priv *priv = netdev_priv(dev);
- priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
+ priv->irqstatus = priv->read_reg(priv, C_CAN_INT_REG);
if (!priv->irqstatus)
return IRQ_NONE;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 5f32d34af507..01a7049ab990 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -22,43 +22,129 @@
#ifndef C_CAN_H
#define C_CAN_H
-/* c_can IF registers */
-struct c_can_if_regs {
- u16 com_req;
- u16 com_mask;
- u16 mask1;
- u16 mask2;
- u16 arb1;
- u16 arb2;
- u16 msg_cntrl;
- u16 data[4];
- u16 _reserved[13];
+enum reg {
+ C_CAN_CTRL_REG = 0,
+ C_CAN_STS_REG,
+ C_CAN_ERR_CNT_REG,
+ C_CAN_BTR_REG,
+ C_CAN_INT_REG,
+ C_CAN_TEST_REG,
+ C_CAN_BRPEXT_REG,
+ C_CAN_IF1_COMREQ_REG,
+ C_CAN_IF1_COMMSK_REG,
+ C_CAN_IF1_MASK1_REG,
+ C_CAN_IF1_MASK2_REG,
+ C_CAN_IF1_ARB1_REG,
+ C_CAN_IF1_ARB2_REG,
+ C_CAN_IF1_MSGCTRL_REG,
+ C_CAN_IF1_DATA1_REG,
+ C_CAN_IF1_DATA2_REG,
+ C_CAN_IF1_DATA3_REG,
+ C_CAN_IF1_DATA4_REG,
+ C_CAN_IF2_COMREQ_REG,
+ C_CAN_IF2_COMMSK_REG,
+ C_CAN_IF2_MASK1_REG,
+ C_CAN_IF2_MASK2_REG,
+ C_CAN_IF2_ARB1_REG,
+ C_CAN_IF2_ARB2_REG,
+ C_CAN_IF2_MSGCTRL_REG,
+ C_CAN_IF2_DATA1_REG,
+ C_CAN_IF2_DATA2_REG,
+ C_CAN_IF2_DATA3_REG,
+ C_CAN_IF2_DATA4_REG,
+ C_CAN_TXRQST1_REG,
+ C_CAN_TXRQST2_REG,
+ C_CAN_NEWDAT1_REG,
+ C_CAN_NEWDAT2_REG,
+ C_CAN_INTPND1_REG,
+ C_CAN_INTPND2_REG,
+ C_CAN_MSGVAL1_REG,
+ C_CAN_MSGVAL2_REG,
};
-/* c_can hardware registers */
-struct c_can_regs {
- u16 control;
- u16 status;
- u16 err_cnt;
- u16 btr;
- u16 interrupt;
- u16 test;
- u16 brp_ext;
- u16 _reserved1;
- struct c_can_if_regs ifregs[2]; /* [0] = IF1 and [1] = IF2 */
- u16 _reserved2[8];
- u16 txrqst1;
- u16 txrqst2;
- u16 _reserved3[6];
- u16 newdat1;
- u16 newdat2;
- u16 _reserved4[6];
- u16 intpnd1;
- u16 intpnd2;
- u16 _reserved5[6];
- u16 msgval1;
- u16 msgval2;
- u16 _reserved6[6];
+static const u16 reg_map_c_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x02,
+ [C_CAN_ERR_CNT_REG] = 0x04,
+ [C_CAN_BTR_REG] = 0x06,
+ [C_CAN_INT_REG] = 0x08,
+ [C_CAN_TEST_REG] = 0x0A,
+ [C_CAN_BRPEXT_REG] = 0x0C,
+ [C_CAN_IF1_COMREQ_REG] = 0x10,
+ [C_CAN_IF1_COMMSK_REG] = 0x12,
+ [C_CAN_IF1_MASK1_REG] = 0x14,
+ [C_CAN_IF1_MASK2_REG] = 0x16,
+ [C_CAN_IF1_ARB1_REG] = 0x18,
+ [C_CAN_IF1_ARB2_REG] = 0x1A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x1C,
+ [C_CAN_IF1_DATA1_REG] = 0x1E,
+ [C_CAN_IF1_DATA2_REG] = 0x20,
+ [C_CAN_IF1_DATA3_REG] = 0x22,
+ [C_CAN_IF1_DATA4_REG] = 0x24,
+ [C_CAN_IF2_COMREQ_REG] = 0x40,
+ [C_CAN_IF2_COMMSK_REG] = 0x42,
+ [C_CAN_IF2_MASK1_REG] = 0x44,
+ [C_CAN_IF2_MASK2_REG] = 0x46,
+ [C_CAN_IF2_ARB1_REG] = 0x48,
+ [C_CAN_IF2_ARB2_REG] = 0x4A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x4C,
+ [C_CAN_IF2_DATA1_REG] = 0x4E,
+ [C_CAN_IF2_DATA2_REG] = 0x50,
+ [C_CAN_IF2_DATA3_REG] = 0x52,
+ [C_CAN_IF2_DATA4_REG] = 0x54,
+ [C_CAN_TXRQST1_REG] = 0x80,
+ [C_CAN_TXRQST2_REG] = 0x82,
+ [C_CAN_NEWDAT1_REG] = 0x90,
+ [C_CAN_NEWDAT2_REG] = 0x92,
+ [C_CAN_INTPND1_REG] = 0xA0,
+ [C_CAN_INTPND2_REG] = 0xA2,
+ [C_CAN_MSGVAL1_REG] = 0xB0,
+ [C_CAN_MSGVAL2_REG] = 0xB2,
+};
+
+static const u16 reg_map_d_can[] = {
+ [C_CAN_CTRL_REG] = 0x00,
+ [C_CAN_STS_REG] = 0x04,
+ [C_CAN_ERR_CNT_REG] = 0x08,
+ [C_CAN_BTR_REG] = 0x0C,
+ [C_CAN_BRPEXT_REG] = 0x0E,
+ [C_CAN_INT_REG] = 0x10,
+ [C_CAN_TEST_REG] = 0x14,
+ [C_CAN_TXRQST1_REG] = 0x88,
+ [C_CAN_TXRQST2_REG] = 0x8A,
+ [C_CAN_NEWDAT1_REG] = 0x9C,
+ [C_CAN_NEWDAT2_REG] = 0x9E,
+ [C_CAN_INTPND1_REG] = 0xB0,
+ [C_CAN_INTPND2_REG] = 0xB2,
+ [C_CAN_MSGVAL1_REG] = 0xC4,
+ [C_CAN_MSGVAL2_REG] = 0xC6,
+ [C_CAN_IF1_COMREQ_REG] = 0x100,
+ [C_CAN_IF1_COMMSK_REG] = 0x102,
+ [C_CAN_IF1_MASK1_REG] = 0x104,
+ [C_CAN_IF1_MASK2_REG] = 0x106,
+ [C_CAN_IF1_ARB1_REG] = 0x108,
+ [C_CAN_IF1_ARB2_REG] = 0x10A,
+ [C_CAN_IF1_MSGCTRL_REG] = 0x10C,
+ [C_CAN_IF1_DATA1_REG] = 0x110,
+ [C_CAN_IF1_DATA2_REG] = 0x112,
+ [C_CAN_IF1_DATA3_REG] = 0x114,
+ [C_CAN_IF1_DATA4_REG] = 0x116,
+ [C_CAN_IF2_COMREQ_REG] = 0x120,
+ [C_CAN_IF2_COMMSK_REG] = 0x122,
+ [C_CAN_IF2_MASK1_REG] = 0x124,
+ [C_CAN_IF2_MASK2_REG] = 0x126,
+ [C_CAN_IF2_ARB1_REG] = 0x128,
+ [C_CAN_IF2_ARB2_REG] = 0x12A,
+ [C_CAN_IF2_MSGCTRL_REG] = 0x12C,
+ [C_CAN_IF2_DATA1_REG] = 0x130,
+ [C_CAN_IF2_DATA2_REG] = 0x132,
+ [C_CAN_IF2_DATA3_REG] = 0x134,
+ [C_CAN_IF2_DATA4_REG] = 0x136,
+};
+
+enum c_can_dev_id {
+ C_CAN_DEVTYPE,
+ D_CAN_DEVTYPE,
};
/* c_can private data structure */
@@ -69,9 +155,10 @@ struct c_can_priv {
int tx_object;
int current_status;
int last_status;
- u16 (*read_reg) (struct c_can_priv *priv, void *reg);
- void (*write_reg) (struct c_can_priv *priv, void *reg, u16 val);
- struct c_can_regs __iomem *regs;
+ u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
+ void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
+ void __iomem *base;
+ const u16 *regs;
unsigned long irq_flags; /* for request_irq() */
unsigned int tx_next;
unsigned int tx_echo;
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
new file mode 100644
index 000000000000..1011146ea513
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -0,0 +1,221 @@
+/*
+ * PCI bus driver for Bosch C_CAN/D_CAN controller
+ *
+ * Copyright (C) 2012 Federico Vaga <federico.vaga@gmail.com>
+ *
+ * Borrowed from c_can_platform.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+enum c_can_pci_reg_align {
+ C_CAN_REG_ALIGN_16,
+ C_CAN_REG_ALIGN_32,
+};
+
+struct c_can_pci_data {
+ /* Specify if is C_CAN or D_CAN */
+ enum c_can_dev_id type;
+ /* Set the register alignment in the memory */
+ enum c_can_pci_reg_align reg_align;
+ /* Set the frequency */
+ unsigned int freq;
+};
+
+/*
+ * 16-bit c_can registers can be arranged differently in the memory
+ * architecture of different implementations. For example: 16-bit
+ * registers can be aligned to a 16-bit boundary or 32-bit boundary etc.
+ * Handle the same by providing a common read/write interface.
+ */
+static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + priv->regs[index]);
+}
+
+static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index)
+{
+ return readw(priv->base + 2 * priv->regs[index]);
+}
+
+static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv,
+ enum reg index, u16 val)
+{
+ writew(val, priv->base + 2 * priv->regs[index]);
+}
+
+static int __devinit c_can_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data;
+ struct c_can_priv *priv;
+ struct net_device *dev;
+ void __iomem *addr;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_enable_device FAILED\n");
+ goto out;
+ }
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "pci_request_regions FAILED\n");
+ goto out_disable_device;
+ }
+
+ pci_set_master(pdev);
+ pci_enable_msi(pdev);
+
+ addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
+ if (!addr) {
+ dev_err(&pdev->dev,
+ "device has no PCI memory resources, "
+ "failing adapter\n");
+ ret = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ /* allocate the c_can device */
+ dev = alloc_c_can_dev();
+ if (!dev) {
+ ret = -ENOMEM;
+ goto out_iounmap;
+ }
+
+ priv = netdev_priv(dev);
+ pci_set_drvdata(pdev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ dev->irq = pdev->irq;
+ priv->base = addr;
+
+ if (!c_can_pci_data->freq) {
+ dev_err(&pdev->dev, "no clock frequency defined\n");
+ ret = -ENODEV;
+ goto out_free_c_can;
+ } else {
+ priv->can.clock.freq = c_can_pci_data->freq;
+ }
+
+ /* Configure CAN type */
+ switch (c_can_pci_data->type) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ /* Configure access to registers */
+ switch (c_can_pci_data->reg_align) {
+ case C_CAN_REG_ALIGN_32:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_32bit;
+ break;
+ case C_CAN_REG_ALIGN_16:
+ priv->read_reg = c_can_pci_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_pci_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_free_c_can;
+ }
+
+ ret = register_c_can_dev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
+ KBUILD_MODNAME, ret);
+ goto out_free_c_can;
+ }
+
+ dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
+ KBUILD_MODNAME, priv->regs, dev->irq);
+
+ return 0;
+
+out_free_c_can:
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+out_iounmap:
+ pci_iounmap(pdev, addr);
+out_release_regions:
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+out_disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void __devexit c_can_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct c_can_priv *priv = netdev_priv(dev);
+
+ unregister_c_can_dev(dev);
+
+ pci_set_drvdata(pdev, NULL);
+ free_c_can_dev(dev);
+
+ pci_iounmap(pdev, priv->base);
+ pci_disable_msi(pdev);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct c_can_pci_data c_can_sta2x11= {
+ .type = C_CAN_DEVTYPE,
+ .reg_align = C_CAN_REG_ALIGN_32,
+ .freq = 52000000, /* 52 Mhz */
+};
+
+#define C_CAN_ID(_vend, _dev, _driverdata) { \
+ PCI_DEVICE(_vend, _dev), \
+ .driver_data = (unsigned long)&_driverdata, \
+}
+static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = {
+ C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN,
+ c_can_sta2x11),
+ {},
+};
+static struct pci_driver c_can_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = c_can_pci_tbl,
+ .probe = c_can_pci_probe,
+ .remove = __devexit_p(c_can_pci_remove),
+};
+
+module_pci_driver(c_can_pci_driver);
+
+MODULE_AUTHOR("Federico Vaga <federico.vaga@gmail.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller");
+MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 5e1a5ff6476e..f0921d16f0a9 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -42,27 +42,27 @@
* Handle the same by providing a common read/write interface.
*/
static u16 c_can_plat_read_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg);
+ return readw(priv->base + priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_16bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg);
+ writew(val, priv->base + priv->regs[index]);
}
static u16 c_can_plat_read_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg)
+ enum reg index)
{
- return readw(reg + (long)reg - (long)priv->regs);
+ return readw(priv->base + 2 * priv->regs[index]);
}
static void c_can_plat_write_reg_aligned_to_32bit(struct c_can_priv *priv,
- void *reg, u16 val)
+ enum reg index, u16 val)
{
- writew(val, reg + (long)reg - (long)priv->regs);
+ writew(val, priv->base + 2 * priv->regs[index]);
}
static int __devinit c_can_plat_probe(struct platform_device *pdev)
@@ -71,6 +71,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
void __iomem *addr;
struct net_device *dev;
struct c_can_priv *priv;
+ const struct platform_device_id *id;
struct resource *mem;
int irq;
#ifdef CONFIG_HAVE_CLK
@@ -115,26 +116,40 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
priv = netdev_priv(dev);
+ id = platform_get_device_id(pdev);
+ switch (id->driver_data) {
+ case C_CAN_DEVTYPE:
+ priv->regs = reg_map_c_can;
+ switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_32BIT:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
+ break;
+ case IORESOURCE_MEM_16BIT:
+ default:
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ }
+ break;
+ case D_CAN_DEVTYPE:
+ priv->regs = reg_map_d_can;
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
+ priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
+ priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
+ break;
+ default:
+ ret = -EINVAL;
+ goto exit_free_device;
+ }
dev->irq = irq;
- priv->regs = addr;
+ priv->base = addr;
#ifdef CONFIG_HAVE_CLK
priv->can.clock.freq = clk_get_rate(clk);
priv->priv = clk;
#endif
- switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) {
- case IORESOURCE_MEM_32BIT:
- priv->read_reg = c_can_plat_read_reg_aligned_to_32bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_32bit;
- break;
- case IORESOURCE_MEM_16BIT:
- default:
- priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
- priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
- break;
- }
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -146,7 +161,7 @@ static int __devinit c_can_plat_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
- KBUILD_MODNAME, priv->regs, dev->irq);
+ KBUILD_MODNAME, priv->base, dev->irq);
return 0;
exit_free_device:
@@ -176,7 +191,7 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
free_c_can_dev(dev);
- iounmap(priv->regs);
+ iounmap(priv->base);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
@@ -188,6 +203,20 @@ static int __devexit c_can_plat_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id c_can_id_table[] = {
+ {
+ .name = KBUILD_MODNAME,
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "c_can",
+ .driver_data = C_CAN_DEVTYPE,
+ }, {
+ .name = "d_can",
+ .driver_data = D_CAN_DEVTYPE,
+ }, {
+ }
+};
+
static struct platform_driver c_can_plat_driver = {
.driver = {
.name = KBUILD_MODNAME,
@@ -195,6 +224,7 @@ static struct platform_driver c_can_plat_driver = {
},
.probe = c_can_plat_probe,
.remove = __devexit_p(c_can_plat_remove),
+ .id_table = c_can_id_table,
};
module_platform_driver(c_can_plat_driver);
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index d42a6a7396f2..0f12abf6591c 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -90,7 +90,7 @@ static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
[CC770_OBJ_TX] = 0,
};
-static struct can_bittiming_const cc770_bittiming_const = {
+static const struct can_bittiming_const cc770_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -695,7 +695,7 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
netif_wake_queue(dev);
}
-irqreturn_t cc770_interrupt(int irq, void *dev_id)
+static irqreturn_t cc770_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *)dev_id;
struct cc770_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index f03d7a481a80..963e2ccd10db 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -33,6 +33,39 @@ MODULE_DESCRIPTION(MOD_DESC);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+/* CAN DLC to real data length conversion helpers */
+
+static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 12, 16, 20, 24, 32, 48, 64};
+
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc)
+{
+ return dlc2len[can_dlc & 0x0F];
+}
+EXPORT_SYMBOL_GPL(can_dlc2len);
+
+static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
+ 9, 9, 9, 9, /* 9 - 12 */
+ 10, 10, 10, 10, /* 13 - 16 */
+ 11, 11, 11, 11, /* 17 - 20 */
+ 12, 12, 12, 12, /* 21 - 24 */
+ 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
+ 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
+ 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
+ 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len)
+{
+ if (unlikely(len > 64))
+ return 0xF;
+
+ return len2dlc[len];
+}
+EXPORT_SYMBOL_GPL(can_len2dlc);
+
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
@@ -368,7 +401,7 @@ EXPORT_SYMBOL_GPL(can_free_echo_skb);
/*
* CAN device restart for bus-off recovery
*/
-void can_restart(unsigned long data)
+static void can_restart(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct can_priv *priv = netdev_priv(dev);
@@ -454,7 +487,7 @@ EXPORT_SYMBOL_GPL(can_bus_off);
static void can_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 10;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 81d474102378..c5f143165f80 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -34,6 +34,7 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/consumer.h>
@@ -165,10 +166,21 @@ struct flexcan_regs {
u32 imask1; /* 0x28 */
u32 iflag2; /* 0x2c */
u32 iflag1; /* 0x30 */
- u32 _reserved2[19];
+ u32 crl2; /* 0x34 */
+ u32 esr2; /* 0x38 */
+ u32 imeur; /* 0x3c */
+ u32 lrfr; /* 0x40 */
+ u32 crcr; /* 0x44 */
+ u32 rxfgmask; /* 0x48 */
+ u32 rxfir; /* 0x4c */
+ u32 _reserved3[12];
struct flexcan_mb cantxfg[64];
};
+struct flexcan_devtype_data {
+ u32 hw_ver; /* hardware controller version */
+};
+
struct flexcan_priv {
struct can_priv can;
struct net_device *dev;
@@ -178,11 +190,21 @@ struct flexcan_priv {
u32 reg_esr;
u32 reg_ctrl_default;
- struct clk *clk;
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct flexcan_platform_data *pdata;
+ const struct flexcan_devtype_data *devtype_data;
+};
+
+static struct flexcan_devtype_data fsl_p1010_devtype_data = {
+ .hw_ver = 3,
+};
+
+static struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+ .hw_ver = 10,
};
-static struct can_bittiming_const flexcan_bittiming_const = {
+static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
.tseg1_max = 16,
@@ -750,6 +772,9 @@ static int flexcan_chip_start(struct net_device *dev)
flexcan_write(0x0, &regs->rx14mask);
flexcan_write(0x0, &regs->rx15mask);
+ if (priv->devtype_data->hw_ver >= 10)
+ flexcan_write(0x0, &regs->rxfgmask);
+
flexcan_transceiver_switch(priv, 1);
/* synchronize with the can bus */
@@ -804,7 +829,8 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk_ipg);
+ clk_prepare_enable(priv->clk_per);
err = open_candev(dev);
if (err)
@@ -826,7 +852,8 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
return err;
}
@@ -840,7 +867,8 @@ static int flexcan_close(struct net_device *dev)
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
close_candev(dev);
@@ -879,7 +907,8 @@ static int __devinit register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_prepare_enable(priv->clk);
+ clk_prepare_enable(priv->clk_ipg);
+ clk_prepare_enable(priv->clk_per);
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -912,7 +941,8 @@ static int __devinit register_flexcandev(struct net_device *dev)
out:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_per);
+ clk_disable_unprepare(priv->clk_ipg);
return err;
}
@@ -922,12 +952,25 @@ static void __devexit unregister_flexcandev(struct net_device *dev)
unregister_candev(dev);
}
+static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+ { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+ { /* sentinel */ },
+};
+
+static const struct platform_device_id flexcan_id_table[] = {
+ { .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
+ { /* sentinel */ },
+};
+
static int __devinit flexcan_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id;
+ const struct flexcan_devtype_data *devtype_data;
struct net_device *dev;
struct flexcan_priv *priv;
struct resource *mem;
- struct clk *clk = NULL;
+ struct clk *clk_ipg = NULL, *clk_per = NULL;
struct pinctrl *pinctrl;
void __iomem *base;
resource_size_t mem_size;
@@ -938,23 +981,25 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
if (IS_ERR(pinctrl))
return PTR_ERR(pinctrl);
- if (pdev->dev.of_node) {
- const __be32 *clock_freq_p;
-
- clock_freq_p = of_get_property(pdev->dev.of_node,
- "clock-frequency", NULL);
- if (clock_freq_p)
- clock_freq = be32_to_cpup(clock_freq_p);
- }
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node,
+ "clock-frequency", &clock_freq);
if (!clock_freq) {
- clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "no clock defined\n");
- err = PTR_ERR(clk);
+ clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(clk_ipg)) {
+ dev_err(&pdev->dev, "no ipg clock defined\n");
+ err = PTR_ERR(clk_ipg);
+ goto failed_clock;
+ }
+ clock_freq = clk_get_rate(clk_ipg);
+
+ clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(clk_per)) {
+ dev_err(&pdev->dev, "no per clock defined\n");
+ err = PTR_ERR(clk_per);
goto failed_clock;
}
- clock_freq = clk_get_rate(clk);
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -982,6 +1027,17 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
goto failed_alloc;
}
+ of_id = of_match_device(flexcan_of_match, &pdev->dev);
+ if (of_id) {
+ devtype_data = of_id->data;
+ } else if (pdev->id_entry->driver_data) {
+ devtype_data = (struct flexcan_devtype_data *)
+ pdev->id_entry->driver_data;
+ } else {
+ err = -ENODEV;
+ goto failed_devtype;
+ }
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -996,8 +1052,10 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING;
priv->base = base;
priv->dev = dev;
- priv->clk = clk;
+ priv->clk_ipg = clk_ipg;
+ priv->clk_per = clk_per;
priv->pdata = pdev->dev.platform_data;
+ priv->devtype_data = devtype_data;
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
@@ -1016,14 +1074,13 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
+ failed_devtype:
free_candev(dev);
failed_alloc:
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
failed_get:
- if (clk)
- clk_put(clk);
failed_clock:
return err;
}
@@ -1041,20 +1098,46 @@ static int __devexit flexcan_remove(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
- if (priv->clk)
- clk_put(priv->clk);
-
free_candev(dev);
return 0;
}
-static struct of_device_id flexcan_of_match[] = {
- {
- .compatible = "fsl,p1010-flexcan",
- },
- {},
-};
+#ifdef CONFIG_PM
+static int flexcan_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ flexcan_chip_disable(priv);
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ return 0;
+}
+
+static int flexcan_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+ flexcan_chip_enable(priv);
+
+ return 0;
+}
+#else
+#define flexcan_suspend NULL
+#define flexcan_resume NULL
+#endif
static struct platform_driver flexcan_driver = {
.driver = {
@@ -1064,6 +1147,9 @@ static struct platform_driver flexcan_driver = {
},
.probe = flexcan_probe,
.remove = __devexit_p(flexcan_remove),
+ .suspend = flexcan_suspend,
+ .resume = flexcan_resume,
+ .id_table = flexcan_id_table,
};
module_platform_driver(flexcan_driver);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 08c893cb7896..98ee43819911 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -116,6 +116,7 @@
#define ICAN3_BUSERR_QUOTA_MAX 255
/* Janz ICAN3 CAN Frame Conversion */
+#define ICAN3_SNGL 0x02
#define ICAN3_ECHO 0x10
#define ICAN3_EFF_RTR 0x40
#define ICAN3_SFF_RTR 0x10
@@ -220,6 +221,9 @@ struct ican3_dev {
/* old and new style host interface */
unsigned int iftype;
+ /* queue for echo packets */
+ struct sk_buff_head echoq;
+
/*
* Any function which changes the current DPM page must hold this
* lock while it is performing data accesses. This ensures that the
@@ -235,7 +239,6 @@ struct ican3_dev {
/* fast host interface */
unsigned int fastrx_start;
- unsigned int fastrx_int;
unsigned int fastrx_num;
unsigned int fasttx_start;
unsigned int fasttx_num;
@@ -454,7 +457,6 @@ static void __devinit ican3_init_fast_host_interface(struct ican3_dev *mod)
/* save the start recv page */
mod->fastrx_start = mod->free_page;
mod->fastrx_num = 0;
- mod->fastrx_int = 0;
/* build a single fast tohost queue descriptor */
memset(&desc, 0, sizeof(desc));
@@ -813,10 +815,10 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[0] << 3;
cf->can_id |= (desc->data[1] & 0xe0) >> 5;
- cf->can_dlc = desc->data[1] & ICAN3_CAN_DLC_MASK;
- memcpy(cf->data, &desc->data[2], sizeof(cf->data));
+ cf->can_dlc = get_can_dlc(desc->data[1] & ICAN3_CAN_DLC_MASK);
+ memcpy(cf->data, &desc->data[2], cf->can_dlc);
} else {
- cf->can_dlc = desc->data[0] & ICAN3_CAN_DLC_MASK;
+ cf->can_dlc = get_can_dlc(desc->data[0] & ICAN3_CAN_DLC_MASK);
if (desc->data[0] & ICAN3_EFF_RTR)
cf->can_id |= CAN_RTR_FLAG;
@@ -831,7 +833,7 @@ static void ican3_to_can_frame(struct ican3_dev *mod,
cf->can_id |= desc->data[3] >> 5; /* 2-0 */
}
- memcpy(cf->data, &desc->data[6], sizeof(cf->data));
+ memcpy(cf->data, &desc->data[6], cf->can_dlc);
}
}
@@ -847,6 +849,10 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
desc->data[0] |= cf->can_dlc;
desc->data[1] |= ICAN3_ECHO;
+ /* support single transmission (no retries) mode */
+ if (mod->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ desc->data[1] |= ICAN3_SNGL;
+
if (cf->can_id & CAN_RTR_FLAG)
desc->data[0] |= ICAN3_EFF_RTR;
@@ -863,7 +869,7 @@ static void can_frame_to_ican3(struct ican3_dev *mod,
}
/* copy the data bits into the descriptor */
- memcpy(&desc->data[6], cf->data, sizeof(cf->data));
+ memcpy(&desc->data[6], cf->data, cf->can_dlc);
}
/*
@@ -909,8 +915,8 @@ static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg)
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+ stats->rx_over_errors++;
stats->rx_errors++;
- stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
@@ -927,7 +933,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
struct net_device *dev = mod->ndev;
struct net_device_stats *stats = &dev->stats;
enum can_state state = mod->can.state;
- u8 status, isrc, rxerr, txerr;
+ u8 isrc, ecc, status, rxerr, txerr;
struct can_frame *cf;
struct sk_buff *skb;
@@ -943,15 +949,53 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
return -EINVAL;
}
- skb = alloc_can_err_skb(dev, &cf);
- if (skb == NULL)
- return -ENOMEM;
-
isrc = msg->data[0];
+ ecc = msg->data[2];
status = msg->data[3];
rxerr = msg->data[4];
txerr = msg->data[5];
+ /*
+ * This hardware lacks any support other than bus error messages to
+ * determine if packet transmission has failed.
+ *
+ * When TX errors happen, one echo skb needs to be dropped from the
+ * front of the queue.
+ *
+ * A small bit of code is duplicated here and below, to avoid error
+ * skb allocation when it will just be freed immediately.
+ */
+ if (isrc == CEVTIND_BEI) {
+ int ret;
+ dev_dbg(mod->dev, "bus error interrupt\n");
+
+ /* TX error */
+ if (!(ecc & ECC_DIR)) {
+ kfree_skb(skb_dequeue(&mod->echoq));
+ stats->tx_errors++;
+ } else {
+ stats->rx_errors++;
+ }
+
+ /*
+ * The controller automatically disables bus-error interrupts
+ * and therefore we must re-enable them.
+ */
+ ret = ican3_set_buserror(mod, 1);
+ if (ret) {
+ dev_err(mod->dev, "unable to re-enable bus-error\n");
+ return ret;
+ }
+
+ /* bus error reporting is off, return immediately */
+ if (!(mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
+ return 0;
+ }
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (skb == NULL)
+ return -ENOMEM;
+
/* data overrun interrupt */
if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) {
dev_dbg(mod->dev, "data overrun interrupt\n");
@@ -980,11 +1024,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
/* bus error interrupt */
if (isrc == CEVTIND_BEI) {
- u8 ecc = msg->data[2];
-
- dev_dbg(mod->dev, "bus error interrupt\n");
mod->can.can_stats.bus_error++;
- stats->rx_errors++;
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & ECC_MASK) {
@@ -1003,7 +1043,7 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
break;
}
- if ((ecc & ECC_DIR) == 0)
+ if (!(ecc & ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
cf->data[6] = txerr;
@@ -1030,8 +1070,6 @@ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg)
}
mod->can.state = state;
- stats->rx_errors++;
- stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
return 0;
}
@@ -1091,6 +1129,88 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
}
/*
+ * The ican3 needs to store all echo skbs, and therefore cannot
+ * use the generic infrastructure for this.
+ */
+static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
+{
+ struct sock *srcsk = skb->sk;
+
+ if (atomic_read(&skb->users) != 1) {
+ struct sk_buff *old_skb = skb;
+
+ skb = skb_clone(old_skb, GFP_ATOMIC);
+ kfree_skb(old_skb);
+ if (!skb)
+ return;
+ } else {
+ skb_orphan(skb);
+ }
+
+ skb->sk = srcsk;
+
+ /* save this skb for tx interrupt echo handling */
+ skb_queue_tail(&mod->echoq, skb);
+}
+
+static unsigned int ican3_get_echo_skb(struct ican3_dev *mod)
+{
+ struct sk_buff *skb = skb_dequeue(&mod->echoq);
+ struct can_frame *cf;
+ u8 dlc;
+
+ /* this should never trigger unless there is a driver bug */
+ if (!skb) {
+ netdev_err(mod->ndev, "BUG: echo skb not occupied\n");
+ return 0;
+ }
+
+ cf = (struct can_frame *)skb->data;
+ dlc = cf->can_dlc;
+
+ /* check flag whether this packet has to be looped back */
+ if (skb->pkt_type != PACKET_LOOPBACK) {
+ kfree_skb(skb);
+ return dlc;
+ }
+
+ skb->protocol = htons(ETH_P_CAN);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb->dev = mod->ndev;
+ netif_receive_skb(skb);
+ return dlc;
+}
+
+/*
+ * Compare an skb with an existing echo skb
+ *
+ * This function will be used on devices which have a hardware loopback.
+ * On these devices, this function can be used to compare a received skb
+ * with the saved echo skbs so that the hardware echo skb can be dropped.
+ *
+ * Returns true if the skb's are identical, false otherwise.
+ */
+static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb)
+{
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ struct sk_buff *echo_skb = skb_peek(&mod->echoq);
+ struct can_frame *echo_cf;
+
+ if (!echo_skb)
+ return false;
+
+ echo_cf = (struct can_frame *)echo_skb->data;
+ if (cf->can_id != echo_cf->can_id)
+ return false;
+
+ if (cf->can_dlc != echo_cf->can_dlc)
+ return false;
+
+ return memcmp(cf->data, echo_cf->data, cf->can_dlc) == 0;
+}
+
+/*
* Check that there is room in the TX ring to transmit another skb
*
* LOCKING: must hold mod->lock
@@ -1100,6 +1220,10 @@ static bool ican3_txok(struct ican3_dev *mod)
struct ican3_fast_desc __iomem *desc;
u8 control;
+ /* check that we have echo queue space */
+ if (skb_queue_len(&mod->echoq) >= ICAN3_TX_BUFFERS)
+ return false;
+
/* copy the control bits of the descriptor */
ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16));
desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc));
@@ -1150,10 +1274,27 @@ static int ican3_recv_skb(struct ican3_dev *mod)
/* convert the ICAN3 frame into Linux CAN format */
ican3_to_can_frame(mod, &desc, cf);
- /* receive the skb, update statistics */
- netif_receive_skb(skb);
+ /*
+ * If this is an ECHO frame received from the hardware loopback
+ * feature, use the skb saved in the ECHO stack instead. This allows
+ * the Linux CAN core to support CAN_RAW_RECV_OWN_MSGS correctly.
+ *
+ * Since this is a confirmation of a successfully transmitted packet
+ * sent from this host, update the transmit statistics.
+ *
+ * Also, the netdevice queue needs to be allowed to send packets again.
+ */
+ if (ican3_echo_skb_matches(mod, skb)) {
+ stats->tx_packets++;
+ stats->tx_bytes += ican3_get_echo_skb(mod);
+ kfree_skb(skb);
+ goto err_noalloc;
+ }
+
+ /* update statistics, receive the skb */
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
err_noalloc:
/* toggle the valid bit and return the descriptor to the ring */
@@ -1176,13 +1317,13 @@ err_noalloc:
static int ican3_napi(struct napi_struct *napi, int budget)
{
struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi);
- struct ican3_msg msg;
unsigned long flags;
int received = 0;
int ret;
/* process all communication messages */
while (true) {
+ struct ican3_msg msg;
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
@@ -1325,7 +1466,7 @@ static int __devinit ican3_startup_module(struct ican3_dev *mod)
}
/* default to "bus errors enabled" */
- ret = ican3_set_buserror(mod, ICAN3_BUSERR_QUOTA_MAX);
+ ret = ican3_set_buserror(mod, 1);
if (ret) {
dev_err(mod->dev, "unable to set bus-error\n");
return ret;
@@ -1354,7 +1495,6 @@ static int __devinit ican3_startup_module(struct ican3_dev *mod)
static int ican3_open(struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
- u8 quota;
int ret;
/* open the CAN layer */
@@ -1364,19 +1504,6 @@ static int ican3_open(struct net_device *ndev)
return ret;
}
- /* set the bus error generation state appropriately */
- if (mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
- quota = ICAN3_BUSERR_QUOTA_MAX;
- else
- quota = 0;
-
- ret = ican3_set_buserror(mod, quota);
- if (ret) {
- dev_err(mod->dev, "unable to set bus-error\n");
- close_candev(ndev);
- return ret;
- }
-
/* bring the bus online */
ret = ican3_set_bus_state(mod, true);
if (ret) {
@@ -1408,6 +1535,9 @@ static int ican3_stop(struct net_device *ndev)
return ret;
}
+ /* drop all outstanding echo skbs */
+ skb_queue_purge(&mod->echoq);
+
/* close the CAN layer */
close_candev(ndev);
return 0;
@@ -1416,18 +1546,19 @@ static int ican3_stop(struct net_device *ndev)
static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ican3_dev *mod = netdev_priv(ndev);
- struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
struct ican3_fast_desc desc;
void __iomem *desc_addr;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
spin_lock_irqsave(&mod->lock, flags);
/* check that we can actually transmit */
if (!ican3_txok(mod)) {
- dev_err(mod->dev, "no free descriptors, stopping queue\n");
- netif_stop_queue(ndev);
+ dev_err(mod->dev, "BUG: no free descriptors\n");
spin_unlock_irqrestore(&mod->lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1442,6 +1573,14 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
can_frame_to_ican3(mod, cf, &desc);
/*
+ * This hardware doesn't have TX-done notifications, so we'll try and
+ * emulate it the best we can using ECHO skbs. Add the skb to the ECHO
+ * stack. Upon packet reception, check if the ECHO skb and received
+ * skb match, and use that to wake the queue.
+ */
+ ican3_put_echo_skb(mod, skb);
+
+ /*
* the programming manual says that you must set the IVALID bit, then
* interrupt, then set the valid bit. Quite weird, but it seems to be
* required for this to work
@@ -1459,19 +1598,7 @@ static int ican3_xmit(struct sk_buff *skb, struct net_device *ndev)
mod->fasttx_num = (desc.control & DESC_WRAP) ? 0
: (mod->fasttx_num + 1);
- /* update statistics */
- stats->tx_packets++;
- stats->tx_bytes += cf->can_dlc;
- kfree_skb(skb);
-
- /*
- * This hardware doesn't have TX-done notifications, so we'll try and
- * emulate it the best we can using ECHO skbs. Get the next TX
- * descriptor, and see if we have room to send. If not, stop the queue.
- * It will be woken when the ECHO skb for the current packet is recv'd.
- */
-
- /* copy the control bits of the descriptor */
+ /* if there is no free descriptor space, stop the transmit queue */
if (!ican3_txok(mod))
netif_stop_queue(ndev);
@@ -1490,7 +1617,7 @@ static const struct net_device_ops ican3_netdev_ops = {
*/
/* This structure was stolen from drivers/net/can/sja1000/sja1000.c */
-static struct can_bittiming_const ican3_bittiming_const = {
+static const struct can_bittiming_const ican3_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
@@ -1667,6 +1794,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
mod->dev = &pdev->dev;
mod->num = pdata->modno;
netif_napi_add(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS);
+ skb_queue_head_init(&mod->echoq);
spin_lock_init(&mod->lock);
init_completion(&mod->termination_comp);
init_completion(&mod->buserror_comp);
@@ -1687,7 +1815,8 @@ static int __devinit ican3_probe(struct platform_device *pdev)
mod->can.do_set_mode = ican3_set_mode;
mod->can.do_get_berr_counter = ican3_get_berr_counter;
mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES
- | CAN_CTRLMODE_BERR_REPORTING;
+ | CAN_CTRLMODE_BERR_REPORTING
+ | CAN_CTRLMODE_ONE_SHOT;
/* find our IRQ number */
mod->irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 346785c56a25..a580db29e503 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -214,7 +214,7 @@ static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
module_param(mcp251x_enable_dma, int, S_IRUGO);
MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
-static struct can_bittiming_const mcp251x_bittiming_const = {
+static const struct can_bittiming_const mcp251x_bittiming_const = {
.name = DEVICE_NAME,
.tseg1_min = 3,
.tseg1_max = 16,
@@ -1020,8 +1020,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
GFP_DMA);
if (priv->spi_tx_buf) {
- priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
- (PAGE_SIZE / 2));
+ priv->spi_rx_buf = (priv->spi_tx_buf + (PAGE_SIZE / 2));
priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
(PAGE_SIZE / 2));
} else {
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5caa572d71e3..06adf881ea24 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -251,7 +251,7 @@ static struct of_device_id mpc5xxx_can_table[];
static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
{
const struct of_device_id *match;
- struct mpc5xxx_can_data *data;
+ const struct mpc5xxx_can_data *data;
struct device_node *np = ofdev->dev.of_node;
struct net_device *dev;
struct mscan_priv *priv;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 41a2a2dda7ea..2b104d5f422c 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,7 +34,7 @@
#include "mscan.h"
-static struct can_bittiming_const mscan_bittiming_const = {
+static const struct can_bittiming_const mscan_bittiming_const = {
.name = "mscan",
.tseg1_min = 4,
.tseg1_max = 16,
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 1226297e7676..48b3d62b34cb 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -184,7 +184,7 @@ struct pch_can_priv {
int use_msi;
};
-static struct can_bittiming_const pch_can_bittiming_const = {
+static const struct can_bittiming_const pch_can_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 2,
.tseg1_max = 16,
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 5e10472371ed..4c4f33d482d2 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -69,7 +69,7 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION(DRV_NAME "CAN netdevice driver");
-static struct can_bittiming_const sja1000_bittiming_const = {
+static const struct can_bittiming_const sja1000_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index a7c77c744ee9..f2a221e7b968 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -826,12 +826,12 @@ static __devinit int softing_pdev_probe(struct platform_device *pdev)
goto sysfs_failed;
}
- ret = -ENOMEM;
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
card->net[j] = netdev =
softing_netdev_create(card, card->id.chip[j]);
if (!netdev) {
dev_alert(&pdev->dev, "failed to make can[%i]", j);
+ ret = -ENOMEM;
goto netdev_failed;
}
priv = netdev_priv(card->net[j]);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4accd7ec6954..527dbcf95335 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -196,7 +196,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
/* CAN Bittiming constants as per HECC specs */
-static struct can_bittiming_const ti_hecc_bittiming_const = {
+static const struct can_bittiming_const ti_hecc_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 7ae65fc80032..086fa321677a 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -889,7 +889,7 @@ static const struct net_device_ops ems_usb_netdev_ops = {
.ndo_start_xmit = ems_usb_start_xmit,
};
-static struct can_bittiming_const ems_usb_bittiming_const = {
+static const struct can_bittiming_const ems_usb_bittiming_const = {
.name = "ems_usb",
.tseg1_min = 1,
.tseg1_max = 16,
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 09b1da5bc512..bd36e5517173 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -871,7 +871,7 @@ static const struct net_device_ops esd_usb2_netdev_ops = {
.ndo_start_xmit = esd_usb2_start_xmit,
};
-static struct can_bittiming_const esd_usb2_bittiming_const = {
+static const struct can_bittiming_const esd_usb2_bittiming_const = {
.name = "esd_usb2",
.tseg1_min = ESD_USB2_TSEG1_MIN,
.tseg1_max = ESD_USB2_TSEG1_MAX,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index a948c5a89401..4c775b620be2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -45,7 +45,7 @@ struct peak_usb_adapter {
char *name;
u32 device_id;
struct can_clock clock;
- struct can_bittiming_const bittiming_const;
+ const struct can_bittiming_const bittiming_const;
unsigned int ctrl_count;
int (*intf_probe)(struct usb_interface *intf);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index ea2d94285936..4f93c0be0053 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -70,13 +70,12 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
- skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -86,7 +85,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
@@ -94,7 +93,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
stats->tx_packets++;
- stats->tx_bytes += cf->can_dlc;
+ stats->tx_bytes += cfd->len;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -108,7 +107,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += cf->can_dlc;
+ stats->rx_bytes += cfd->len;
}
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -133,14 +132,28 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static int vcan_change_mtu(struct net_device *dev, int new_mtu)
+{
+ /* Do not allow changing the MTU while running */
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static const struct net_device_ops vcan_netdev_ops = {
.ndo_start_xmit = vcan_tx,
+ .ndo_change_mtu = vcan_change_mtu,
};
static void vcan_setup(struct net_device *dev)
{
dev->type = ARPHRD_CAN;
- dev->mtu = sizeof(struct can_frame);
+ dev->mtu = CAN_MTU;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->tx_queue_len = 0;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 9c755db6b16d..f0c8bd54ce29 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1008,7 +1008,7 @@ e100_send_mdio_bit(unsigned char bit)
}
static unsigned char
-e100_receive_mdio_bit()
+e100_receive_mdio_bit(void)
{
unsigned char bit;
*R_NETWORK_MGM_CTRL = 0;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bab0158f1cc3..c260af5411d0 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -40,18 +40,6 @@
static int numdummies = 1;
-static int dummy_set_address(struct net_device *dev, void *p)
-{
- struct sockaddr *sa = p;
-
- if (!is_valid_ether_addr(sa->sa_data))
- return -EADDRNOTAVAIL;
-
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
- return 0;
-}
-
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
{
@@ -75,10 +63,10 @@ static struct rtnl_link_stats64 *dummy_get_stats64(struct net_device *dev,
dstats = per_cpu_ptr(dev->dstats, i);
do {
- start = u64_stats_fetch_begin(&dstats->syncp);
+ start = u64_stats_fetch_begin_bh(&dstats->syncp);
tbytes = dstats->tx_bytes;
tpackets = dstats->tx_packets;
- } while (u64_stats_fetch_retry(&dstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&dstats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpackets;
}
@@ -118,7 +106,7 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_start_xmit = dummy_xmit,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = set_multicast_list,
- .ndo_set_mac_address = dummy_set_address,
+ .ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
};
@@ -134,6 +122,7 @@ static void dummy_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/3com/3c501.c b/drivers/net/ethernet/3com/3c501.c
index bf73e1a02293..2038eaabaea4 100644
--- a/drivers/net/ethernet/3com/3c501.c
+++ b/drivers/net/ethernet/3com/3c501.c
@@ -143,7 +143,7 @@ static int irq = 5;
static int mem_start;
/**
- * el1_probe: - probe for a 3c501
+ * el1_probe - probe for a 3c501
* @dev: The device structure passed in to probe.
*
* This can be called from two places. The network layer will probe using
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2e538676924d..e1219e037c04 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -162,6 +162,20 @@ config MAC8390
and read the Ethernet-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
+config MCF8390
+ tristate "ColdFire NS8390 based Ethernet support"
+ depends on COLDFIRE
+ select CRC32
+ ---help---
+ This driver is for Ethernet devices using an NS8390-compatible
+ chipset on many common ColdFire CPU based boards. Many of the older
+ Freescale dev boards use this, and some other common boards like
+ some SnapGear routers do as well.
+
+ If you have one of these boards and want to use the network interface
+ on them then choose Y. To compile this driver as a module, choose M
+ here, the module will be called mcf8390.
+
config NE2000
tristate "NE2000/NE1000 support"
depends on (ISA || (Q40 && m) || M32R || MACH_TX49XX)
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index d13790b7fd27..f43038babf86 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
obj-$(CONFIG_HPLAN) += hp.o 8390p.o
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
obj-$(CONFIG_LNE390) += lne390.o 8390.o
+obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 923959275a82..912ed7a5f33a 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -454,7 +454,7 @@ apne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int rin
buf[count-1] = inb(NE_BASE + NE_DATAPORT);
}
} else {
- ptrc = (char*)buf;
+ ptrc = buf;
for (cnt = 0; cnt < count; cnt++)
*ptrc++ = inb(NE_BASE + NE_DATAPORT);
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
new file mode 100644
index 000000000000..230efd6fa5d5
--- /dev/null
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -0,0 +1,480 @@
+/*
+ * Support for ColdFire CPU based boards using a NS8390 Ethernet device.
+ *
+ * Derived from the many other 8390 drivers.
+ *
+ * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of the Linux
+ * distribution for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/io.h>
+#include <asm/mcf8390.h>
+
+static const char version[] =
+ "mcf8390.c: (15-06-2012) Greg Ungerer <gerg@uclinux.org>";
+
+#define NE_CMD 0x00
+#define NE_DATAPORT 0x10 /* NatSemi-defined port window offset */
+#define NE_RESET 0x1f /* Issue a read to reset ,a write to clear */
+#define NE_EN0_ISR 0x07
+#define NE_EN0_DCFG 0x0e
+#define NE_EN0_RSARLO 0x08
+#define NE_EN0_RSARHI 0x09
+#define NE_EN0_RCNTLO 0x0a
+#define NE_EN0_RXCR 0x0c
+#define NE_EN0_TXCR 0x0d
+#define NE_EN0_RCNTHI 0x0b
+#define NE_EN0_IMR 0x0f
+
+#define NESM_START_PG 0x40 /* First page of TX buffer */
+#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+#ifdef NE2000_ODDOFFSET
+/*
+ * A lot of the ColdFire boards use a separate address region for odd offset
+ * register addresses. The following functions convert and map as required.
+ * Note that the data port accesses are treated a little differently, and
+ * always accessed via the insX/outsX functions.
+ */
+static inline u32 NE_PTR(u32 addr)
+{
+ if (addr & 1)
+ return addr - 1 + NE2000_ODDOFFSET;
+ return addr;
+}
+
+static inline u32 NE_DATA_PTR(u32 addr)
+{
+ return addr;
+}
+
+void ei_outb(u32 val, u32 addr)
+{
+ NE2000_BYTE *rp;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ *rp = RSWAP(val);
+}
+
+#define ei_inb ei_inb
+u8 ei_inb(u32 addr)
+{
+ NE2000_BYTE *rp, val;
+
+ rp = (NE2000_BYTE *) NE_PTR(addr);
+ val = *rp;
+ return (u8) (RSWAP(val) & 0xff);
+}
+
+void ei_insb(u32 addr, void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *rp;
+ *buf++ = RSWAP(val);
+ }
+}
+
+void ei_insw(u32 addr, void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *rp;
+ *buf++ = BSWAP(w);
+ }
+}
+
+void ei_outsb(u32 addr, const void *vbuf, int len)
+{
+ NE2000_BYTE *rp, val;
+ u8 *buf;
+
+ buf = (u8 *) vbuf;
+ rp = (NE2000_BYTE *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ val = *buf++;
+ *rp = RSWAP(val);
+ }
+}
+
+void ei_outsw(u32 addr, const void *vbuf, int len)
+{
+ volatile u16 *rp;
+ u16 w, *buf;
+
+ buf = (u16 *) vbuf;
+ rp = (volatile u16 *) NE_DATA_PTR(addr);
+ for (; (len > 0); len--) {
+ w = *buf++;
+ *rp = BSWAP(w);
+ }
+}
+
+#else /* !NE2000_ODDOFFSET */
+
+#define ei_inb inb
+#define ei_outb outb
+#define ei_insb insb
+#define ei_insw insw
+#define ei_outsb outsb
+#define ei_outsw outsw
+
+#endif /* !NE2000_ODDOFFSET */
+
+#define ei_inb_p ei_inb
+#define ei_outb_p ei_outb
+
+#include "lib8390.c"
+
+/*
+ * Hard reset the card. This used to pause for the same period that a
+ * 8390 reset command required, but that shouldn't be necessary.
+ */
+static void mcf8390_reset_8390(struct net_device *dev)
+{
+ unsigned long reset_start_time = jiffies;
+ u32 addr = dev->base_addr;
+
+ if (ei_debug > 1)
+ netdev_dbg(dev, "resetting the 8390 t=%ld...\n", jiffies);
+
+ ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET);
+
+ ei_status.txing = 0;
+ ei_status.dmaing = 0;
+
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RESET) == 0) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
+ netdev_warn(dev, "%s: did not complete\n", __func__);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RESET, addr + NE_EN0_ISR);
+}
+
+/*
+ * This *shouldn't* happen.
+ * If it does, it's the last thing you'll see
+ */
+static void mcf8390_dmaing_err(const char *func, struct net_device *dev,
+ struct ei_device *ei_local)
+{
+ netdev_err(dev, "%s: DMAing conflict [DMAstat:%d][irqlock:%d]\n",
+ func, ei_local->dmaing, ei_local->irqlock);
+}
+
+/*
+ * Grab the 8390 specific header. Similar to the block_input routine, but
+ * we don't need to be concerned with ring wrap as the header will be at
+ * the start of a page, so we optimize accordingly.
+ */
+static void mcf8390_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(sizeof(struct e8390_pkt_hdr), addr + NE_EN0_RCNTLO);
+ ei_outb(0, addr + NE_EN0_RCNTHI);
+ ei_outb(0, addr + NE_EN0_RSARLO); /* On page boundary */
+ ei_outb(ring_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1);
+
+ outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+
+ hdr->count = cpu_to_le16(hdr->count);
+}
+
+/*
+ * Block input and output, similar to the Crynwr packet driver.
+ * If you are porting to a new ethercard, look at the packet driver source
+ * for hints. The NEx000 doesn't share the on-board packet memory --
+ * you have to put the packet out through the "remote DMA" dataport
+ * using z_writeb.
+ */
+static void mcf8390_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ char *buf = skb->data;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, addr + NE_CMD);
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(ring_offset & 0xff, addr + NE_EN0_RSARLO);
+ ei_outb(ring_offset >> 8, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RREAD + E8390_START, addr + NE_CMD);
+
+ ei_insw(addr + NE_DATAPORT, buf, count >> 1);
+ if (count & 1)
+ buf[count - 1] = ei_inb(addr + NE_DATAPORT);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static void mcf8390_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ const int start_page)
+{
+ struct ei_device *ei_local = netdev_priv(dev);
+ u32 addr = dev->base_addr;
+ unsigned long dma_start;
+
+ /* Make sure we transfer all bytes if 16bit IO writes */
+ if (count & 0x1)
+ count++;
+
+ if (ei_local->dmaing) {
+ mcf8390_dmaing_err(__func__, dev, ei_local);
+ return;
+ }
+
+ ei_local->dmaing |= 0x01;
+ /* We should already be in page 0, but to be safe... */
+ ei_outb(E8390_PAGE0 + E8390_START + E8390_NODMA, addr + NE_CMD);
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR);
+
+ /* Now the normal output. */
+ ei_outb(count & 0xff, addr + NE_EN0_RCNTLO);
+ ei_outb(count >> 8, addr + NE_EN0_RCNTHI);
+ ei_outb(0x00, addr + NE_EN0_RSARLO);
+ ei_outb(start_page, addr + NE_EN0_RSARHI);
+ ei_outb(E8390_RWRITE + E8390_START, addr + NE_CMD);
+
+ ei_outsw(addr + NE_DATAPORT, buf, count >> 1);
+
+ dma_start = jiffies;
+ while ((ei_inb(addr + NE_EN0_ISR) & ENISR_RDC) == 0) {
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
+ netdev_err(dev, "timeout waiting for Tx RDC\n");
+ mcf8390_reset_8390(dev);
+ __NS8390_init(dev, 1);
+ break;
+ }
+ }
+
+ ei_outb(ENISR_RDC, addr + NE_EN0_ISR); /* Ack intr */
+ ei_local->dmaing &= ~0x01;
+}
+
+static const struct net_device_ops mcf8390_netdev_ops = {
+ .ndo_open = __ei_open,
+ .ndo_stop = __ei_close,
+ .ndo_start_xmit = __ei_start_xmit,
+ .ndo_tx_timeout = __ei_tx_timeout,
+ .ndo_get_stats = __ei_get_stats,
+ .ndo_set_rx_mode = __ei_set_multicast_list,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_change_mtu = eth_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = __ei_poll,
+#endif
+};
+
+static int mcf8390_init(struct net_device *dev)
+{
+ static u32 offsets[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ };
+ struct ei_device *ei_local = netdev_priv(dev);
+ unsigned char SA_prom[32];
+ u32 addr = dev->base_addr;
+ int start_page, stop_page;
+ int i, ret;
+
+ mcf8390_reset_8390(dev);
+
+ /*
+ * Read the 16 bytes of station address PROM.
+ * We must first initialize registers,
+ * similar to NS8390_init(eifdev, 0).
+ * We can't reliably read the SAPROM address without this.
+ * (I learned the hard way!).
+ */
+ {
+ static const struct {
+ u32 value;
+ u32 offset;
+ } program_seq[] = {
+ {E8390_NODMA + E8390_PAGE0 + E8390_STOP, NE_CMD},
+ /* Select page 0 */
+ {0x48, NE_EN0_DCFG}, /* 0x48: Set byte-wide access */
+ {0x00, NE_EN0_RCNTLO}, /* Clear the count regs */
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_IMR}, /* Mask completion irq */
+ {0xFF, NE_EN0_ISR},
+ {E8390_RXOFF, NE_EN0_RXCR}, /* 0x20 Set to monitor */
+ {E8390_TXOFF, NE_EN0_TXCR}, /* 0x02 and loopback mode */
+ {32, NE_EN0_RCNTLO},
+ {0x00, NE_EN0_RCNTHI},
+ {0x00, NE_EN0_RSARLO}, /* DMA starting at 0x0000 */
+ {0x00, NE_EN0_RSARHI},
+ {E8390_RREAD + E8390_START, NE_CMD},
+ };
+ for (i = 0; i < ARRAY_SIZE(program_seq); i++) {
+ ei_outb(program_seq[i].value,
+ addr + program_seq[i].offset);
+ }
+ }
+
+ for (i = 0; i < 16; i++) {
+ SA_prom[i] = ei_inb(addr + NE_DATAPORT);
+ ei_inb(addr + NE_DATAPORT);
+ }
+
+ /* We must set the 8390 for word mode. */
+ ei_outb(0x49, addr + NE_EN0_DCFG);
+ start_page = NESM_START_PG;
+ stop_page = NESM_STOP_PG;
+
+ /* Install the Interrupt handler */
+ ret = request_irq(dev->irq, __ei_interrupt, 0, dev->name, dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = SA_prom[i];
+
+ netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
+
+ ei_local->name = "mcf8390";
+ ei_local->tx_start_page = start_page;
+ ei_local->stop_page = stop_page;
+ ei_local->word16 = 1;
+ ei_local->rx_start_page = start_page + TX_PAGES;
+ ei_local->reset_8390 = mcf8390_reset_8390;
+ ei_local->block_input = mcf8390_block_input;
+ ei_local->block_output = mcf8390_block_output;
+ ei_local->get_8390_hdr = mcf8390_get_8390_hdr;
+ ei_local->reg_offset = offsets;
+
+ dev->netdev_ops = &mcf8390_netdev_ops;
+ __NS8390_init(dev, 0);
+ ret = register_netdev(dev);
+ if (ret) {
+ free_irq(dev->irq, dev);
+ return ret;
+ }
+
+ netdev_info(dev, "addr=0x%08x irq=%d, Ethernet Address %pM\n",
+ addr, dev->irq, dev->dev_addr);
+ return 0;
+}
+
+static int mcf8390_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct ei_device *ei_local;
+ struct resource *mem, *irq;
+ resource_size_t msize;
+ int ret;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (irq == NULL) {
+ dev_err(&pdev->dev, "no IRQ specified?\n");
+ return -ENXIO;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "no memory address specified?\n");
+ return -ENXIO;
+ }
+ msize = resource_size(mem);
+ if (!request_mem_region(mem->start, msize, pdev->name))
+ return -EBUSY;
+
+ dev = ____alloc_ei_netdev(0);
+ if (dev == NULL) {
+ release_mem_region(mem->start, msize);
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ ei_local = netdev_priv(dev);
+
+ dev->irq = irq->start;
+ dev->base_addr = mem->start;
+
+ ret = mcf8390_init(dev);
+ if (ret) {
+ release_mem_region(mem->start, msize);
+ free_netdev(dev);
+ return ret;
+ }
+ return 0;
+}
+
+static int mcf8390_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct resource *mem;
+
+ unregister_netdev(dev);
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem)
+ release_mem_region(mem->start, resource_size(mem));
+ free_netdev(dev);
+ return 0;
+}
+
+static struct platform_driver mcf8390_drv = {
+ .driver = {
+ .name = "mcf8390",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcf8390_probe,
+ .remove = mcf8390_remove,
+};
+
+module_platform_driver(mcf8390_drv);
+
+MODULE_DESCRIPTION("MCF8390 ColdFire NS8390 driver");
+MODULE_AUTHOR("Greg Ungerer <gerg@uclinux.org>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mcf8390");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 348501178089..9c77c736f171 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1014,7 +1014,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
struct greth_regs *regs;
greth = netdev_priv(dev);
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1036,7 +1036,7 @@ static void greth_set_hash_filter(struct net_device *dev)
{
struct netdev_hw_addr *ha;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
u32 mc_filter[2];
unsigned int bitnr;
@@ -1055,7 +1055,7 @@ static void greth_set_multicast_list(struct net_device *dev)
{
int cfg;
struct greth_private *greth = netdev_priv(dev);
- struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ struct greth_regs *regs = greth->regs;
cfg = GRETH_REGLOAD(regs->control);
if (dev->flags & IFF_PROMISC)
@@ -1414,7 +1414,7 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
goto error1;
}
- regs = (struct greth_regs *) greth->regs;
+ regs = greth->regs;
greth->irq = ofdev->archdata.irqs[0];
dev_set_drvdata(greth->dev, dev);
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 75299f500ee5..7203b522f234 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -623,7 +623,7 @@ static int lance_rx(struct net_device *dev)
skb_put(skb, len); /* make room */
cp_from_buf(lp->type, skb->data,
- (char *)lp->rx_buf_ptr_cpu[entry], len);
+ lp->rx_buf_ptr_cpu[entry], len);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -919,7 +919,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
*lib_ptr(ib, btx_ring[entry].length, lp->type) = (-len);
*lib_ptr(ib, btx_ring[entry].misc, lp->type) = 0;
- cp_to_buf(lp->type, (char *)lp->tx_buf_ptr_cpu[entry], skb->data, len);
+ cp_to_buf(lp->type, lp->tx_buf_ptr_cpu[entry], skb->data, len);
/* Now, give the packet to the lance */
*lib_ptr(ib, btx_ring[entry].tmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index a6e2e840884e..5c728436b85e 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -873,10 +873,9 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
lp->rx_skbuff[i] = skb;
- if (skb) {
- skb->dev = dev;
+ if (skb)
rx_buff = skb->data;
- } else
+ else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
if (rx_buff == NULL)
lp->rx_ring[i].base = 0;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index ab7ff8645ab1..a92ddee7f665 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -228,7 +228,7 @@ static int __devinit mace_probe(struct platform_device *pdev)
* bits are reversed.
*/
- addr = (void *)MACE_PROM;
+ addr = MACE_PROM;
for (j = 0; j < 6; ++j) {
u8 v = bitrev8(addr[j<<4]);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index ff9c73859d45..21e261ffbe10 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -199,7 +199,7 @@ int atl1c_read_mac_addr(struct atl1c_hw *hw)
err = atl1c_get_permanent_address(hw);
if (err)
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr));
return err;
@@ -602,7 +602,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
int atl1c_phy_init(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
@@ -696,7 +696,7 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
/* select one link mode to get lower power consumption */
int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
int ret = 0;
u16 autoneg_advertised = ADVERTISED_10baseT_Half;
@@ -768,7 +768,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 master_ctrl, mac_ctrl, phy_ctrl;
u32 wol_ctrl, speed;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 17d935bdde0a..21d8c4dbdbe1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -74,6 +74,8 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
#define L2CB_V10 0xc0
#define L2CB_V11 0xc1
+#define L2CB_V20 0xc0
+#define L2CB_V21 0xc1
/* register definition */
#define REG_DEVICE_CAP 0x5C
@@ -87,6 +89,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define LINK_CTRL_L1_EN 0x02
#define LINK_CTRL_EXT_SYNC 0x80
+#define REG_PCIE_IND_ACC_ADDR 0x80
+#define REG_PCIE_IND_ACC_DATA 0x84
+
#define REG_DEV_SERIALNUM_CTRL 0x200
#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
#define REG_DEV_MAC_SEL_SHIFT 0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 9cc15701101b..1bf5bbfe778e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -166,7 +166,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
msleep(5);
}
-/*
+/**
* atl1c_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -179,7 +179,7 @@ static inline void atl1c_irq_enable(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -192,7 +192,7 @@ static inline void atl1c_irq_disable(struct atl1c_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1c_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -220,7 +220,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
return data;
}
-/*
+/**
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -261,7 +261,6 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
if ((phy_data & BMSR_LSTATUS) == 0) {
/* link down */
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
hw->hibernate = true;
if (atl1c_reset_mac(hw) != 0)
if (netif_msg_hw(adapter))
@@ -361,7 +360,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
}
-/*
+/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -374,7 +373,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->common_task);
}
-/*
+/**
* atl1c_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -453,7 +452,7 @@ static void atl1c_restore_vlan(struct atl1c_adapter *adapter)
atl1c_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1c_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -518,7 +517,7 @@ static int atl1c_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -577,12 +576,6 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
atl1c_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1c_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -633,12 +626,6 @@ out:
return retval;
}
-/*
- * atl1c_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -651,7 +638,7 @@ static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl1c_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -740,6 +727,8 @@ static const struct atl1c_platform_patch plats[] __devinitdata = {
static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
{
+ struct pci_dev *pdev = hw->adapter->pdev;
+ u32 misc_ctrl;
int i = 0;
hw->msi_lnkpatch = false;
@@ -754,8 +743,20 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
}
i++;
}
+
+ if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
+ hw->revision_id == L2CB_V21) {
+ /* config acess mode */
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
+ misc_ctrl &= ~0x100;
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
+ REG_PCIE_DEV_MISC_CTRL);
+ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
+ }
}
-/*
+/**
* atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
* @adapter: board private structure to initialize
*
@@ -781,7 +782,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
hw->device_id = pdev->device;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_id = pdev->subsystem_device;
- AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
+ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
hw->revision_id = revision & 0xFF;
/* before link up, we assume hibernate is true */
hw->hibernate = true;
@@ -853,7 +854,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
buffer_info->skb = NULL;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
}
-/*
+/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
@@ -878,7 +879,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
tpd_ring->next_to_use = 0;
}
-/*
+/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
@@ -931,7 +932,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -954,7 +955,7 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -989,12 +990,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
}
for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
tpd_ring[i].buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
rfd_ring->buffer_info =
- (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
+ (tpd_ring->buffer_info + count);
count += rfd_ring->count;
rx_desc_count += rfd_ring->count;
@@ -1227,7 +1228,7 @@ static void atl1c_start_mac(struct atl1c_adapter *adapter)
*/
static int atl1c_reset_mac(struct atl1c_hw *hw)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
+ struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
u32 ctrl_data = 0;
@@ -1363,7 +1364,7 @@ static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
return;
}
-/*
+/**
* atl1c_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1477,7 +1478,7 @@ static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
}
}
-/*
+/**
* atl1c_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1531,8 +1532,7 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
{
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1559,11 +1559,10 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
return true;
}
-/*
+/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1c_intr(int irq, void *data)
{
@@ -1814,9 +1813,8 @@ rrs_checked:
atl1c_alloc_rx_buffer(adapter);
}
-/*
+/**
* atl1c_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2271,7 +2269,7 @@ static void atl1c_down(struct atl1c_adapter *adapter)
atl1c_reset_dma_ring(adapter);
}
-/*
+/**
* atl1c_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2310,7 +2308,7 @@ err_up:
return err;
}
-/*
+/**
* atl1c_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2433,7 +2431,7 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1c_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1c_pci_tbl
@@ -2580,7 +2578,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1c_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2606,7 +2604,7 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-/*
+/**
* atl1c_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2634,7 +2632,7 @@ static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1c_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2662,7 +2660,7 @@ static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1c_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2705,7 +2703,7 @@ static struct pci_driver atl1c_driver = {
.driver.pm = &atl1c_pm_ops,
};
-/*
+/**
* atl1c_init_module - Driver Registration Routine
*
* atl1c_init_module is the first routine called when the driver is
@@ -2716,7 +2714,7 @@ static int __init atl1c_init_module(void)
return pci_register_driver(&atl1c_driver);
}
-/*
+/**
* atl1c_exit_module - Driver Exit Cleanup Routine
*
* atl1c_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6e61f9f9ebb5..82b23861bf55 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -268,7 +268,7 @@ static int atl1e_set_eeprom(struct net_device *netdev,
if (eeprom_buff == NULL)
return -ENOMEM;
- ptr = (u32 *)eeprom_buff;
+ ptr = eeprom_buff;
if (eeprom->offset & 3) {
/* need read/modify/write of first changed EEPROM word */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 1220e511ced6..a98acc8a956f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -89,7 +89,7 @@ static const u16 atl1e_pay_load_size[] = {
128, 256, 512, 1024, 2048, 4096,
};
-/*
+/**
* atl1e_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -102,7 +102,7 @@ static inline void atl1e_irq_enable(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -114,7 +114,7 @@ static inline void atl1e_irq_disable(struct atl1e_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-/*
+/**
* atl1e_irq_reset - reset interrupt confiure on the NIC
* @adapter: board private structure
*/
@@ -126,7 +126,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
AT_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -210,7 +210,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
* @netdev: network interface device structure
*/
@@ -259,7 +259,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
cancel_work_sync(&adapter->link_chg_task);
}
-/*
+/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -271,7 +271,7 @@ static void atl1e_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -345,7 +345,7 @@ static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
atl1e_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-/*
+/**
* atl1e_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -397,7 +397,7 @@ static int atl1e_set_features(struct net_device *netdev,
return 0;
}
-/*
+/**
* atl1e_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -449,12 +449,6 @@ static void atl1e_mdio_write(struct net_device *netdev, int phy_id,
atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val);
}
-/*
- * atl1e_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
@@ -505,12 +499,6 @@ out:
}
-/*
- * atl1e_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -541,7 +529,7 @@ static void atl1e_setup_pcicmd(struct pci_dev *pdev)
msleep(1);
}
-/*
+/**
* atl1e_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
*
@@ -551,7 +539,7 @@ static int __devinit atl1e_alloc_queues(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_sw_init - Initialize general software structures (struct atl1e_adapter)
* @adapter: board private structure to initialize
*
@@ -635,14 +623,13 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
*/
static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -679,14 +666,14 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
ring_count);
}
-/*
+/**
* atl1e_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
*/
static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter)
{
struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
+ &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc;
u16 i, j;
@@ -762,7 +749,7 @@ static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -787,7 +774,7 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
}
}
-/*
+/**
* atl1e_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -884,14 +871,12 @@ failed:
return err;
}
-static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
+static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
- struct atl1e_rx_ring *rx_ring =
- (struct atl1e_rx_ring *)&adapter->rx_ring;
- struct atl1e_tx_ring *tx_ring =
- (struct atl1e_tx_ring *)&adapter->tx_ring;
+ struct atl1e_hw *hw = &adapter->hw;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_rx_page_desc *rx_page_desc = NULL;
int i, j;
@@ -932,7 +917,7 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 dev_ctrl_data = 0;
u32 max_pay_load = 0;
u32 jumbo_thresh = 0;
@@ -975,7 +960,7 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
{
- struct atl1e_hw *hw = (struct atl1e_hw *)&adapter->hw;
+ struct atl1e_hw *hw = &adapter->hw;
u32 rxf_len = 0;
u32 rxf_low = 0;
u32 rxf_high = 0;
@@ -1078,7 +1063,7 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
AT_WRITE_REG(hw, REG_MAC_CTRL, value);
}
-/*
+/**
* atl1e_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1148,7 +1133,7 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
return 0;
}
-/*
+/**
* atl1e_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
@@ -1224,8 +1209,7 @@ static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter)
static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = (struct atl1e_tx_ring *)
- &adapter->tx_ring;
+ struct atl1e_tx_ring *tx_ring = &adapter->tx_ring;
struct atl1e_tx_buffer *tx_buffer = NULL;
u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX);
u16 next_to_clean = atomic_read(&tx_ring->next_to_clean);
@@ -1261,11 +1245,10 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
return true;
}
-/*
+/**
* atl1e_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1e_intr(int irq, void *data)
{
@@ -1384,15 +1367,14 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
(struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc;
u8 rx_using = rx_page_desc[que].rx_using;
- return (struct atl1e_rx_page *)&(rx_page_desc[que].rx_page[rx_using]);
+ return &(rx_page_desc[que].rx_page[rx_using]);
}
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
struct net_device *netdev = adapter->netdev;
- struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
- &adapter->rx_ring;
+ struct atl1e_rx_ring *rx_ring = &adapter->rx_ring;
struct atl1e_rx_page_desc *rx_page_desc =
(struct atl1e_rx_page_desc *) rx_ring->rx_page_desc;
struct sk_buff *skb = NULL;
@@ -1494,9 +1476,8 @@ fatal_err:
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl1e_clean - NAPI Rx polling callback
- * @adapter: board private structure
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1576,7 +1557,7 @@ static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter)
tx_ring->next_to_use = 0;
memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc));
- return (struct atl1e_tpd_desc *)&tx_ring->desc[next_to_use];
+ return &tx_ring->desc[next_to_use];
}
static struct atl1e_tx_buffer *
@@ -1961,7 +1942,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
}
-/*
+/**
* atl1e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2007,7 +1988,7 @@ err_req_irq:
return err;
}
-/*
+/**
* atl1e_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2061,8 +2042,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc) {
/* get link status */
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
- atl1e_read_phy_reg(hw, MII_BMSR, (u16 *)&mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
+ atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data);
mii_advertise_data = ADVERTISE_10HALF;
@@ -2086,7 +2067,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) {
msleep(100);
atl1e_read_phy_reg(hw, MII_BMSR,
- (u16 *)&mii_bmsr_data);
+ &mii_bmsr_data);
if (mii_bmsr_data & BMSR_LSTATUS)
break;
}
@@ -2243,7 +2224,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
return 0;
}
-/*
+/**
* atl1e_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1e_pci_tbl
@@ -2397,7 +2378,7 @@ err_dma:
return err;
}
-/*
+/**
* atl1e_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -2429,7 +2410,7 @@ static void __devexit atl1e_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-/*
+/**
* atl1e_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
* @state: The current pci connection state
@@ -2457,7 +2438,7 @@ atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
return PCI_ERS_RESULT_NEED_RESET;
}
-/*
+/**
* atl1e_io_slot_reset - called after the pci bus has been reset.
* @pdev: Pointer to PCI device
*
@@ -2484,7 +2465,7 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-/*
+/**
* atl1e_io_resume - called when traffic can start flowing again.
* @pdev: Pointer to PCI device
*
@@ -2528,7 +2509,7 @@ static struct pci_driver atl1e_driver = {
.err_handler = &atl1e_err_handler
};
-/*
+/**
* atl1e_init_module - Driver Registration Routine
*
* atl1e_init_module is the first routine called when the driver is
@@ -2539,7 +2520,7 @@ static int __init atl1e_init_module(void)
return pci_register_driver(&atl1e_driver);
}
-/*
+/**
* atl1e_exit_module - Driver Exit Cleanup Routine
*
* atl1e_exit_module is called just before the driver is removed
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
index 0ce60b6e7ef0..b5086f1e637f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_param.c
@@ -168,7 +168,7 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
return -1;
}
-/*
+/**
* atl1e_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5d10884e5080..7bae2ad7a7c0 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -195,7 +195,7 @@ static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
return -1;
}
-/*
+/**
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
@@ -538,7 +538,7 @@ static s32 atl1_read_mac_addr(struct atl1_hw *hw)
u16 i;
if (atl1_get_permanent_address(hw)) {
- random_ether_addr(hw->perm_mac_addr);
+ eth_random_addr(hw->perm_mac_addr);
ret = 1;
}
@@ -937,7 +937,7 @@ static void atl1_set_mac_addr(struct atl1_hw *hw)
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
}
-/*
+/**
* atl1_sw_init - Initialize general software structures (struct atl1_adapter)
* @adapter: board private structure to initialize
*
@@ -1014,12 +1014,6 @@ static void mdio_write(struct net_device *netdev, int phy_id, int reg_num,
atl1_write_phy_reg(&adapter->hw, reg_num, val);
}
-/*
- * atl1_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
@@ -1036,7 +1030,7 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return retval;
}
-/*
+/**
* atl1_setup_mem_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -1061,7 +1055,7 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
goto err_nomem;
}
rfd_ring->buffer_info =
- (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+ (tpd_ring->buffer_info + tpd_ring->count);
/*
* real ring DMA buffer
@@ -1147,7 +1141,7 @@ static void atl1_init_ring_ptrs(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_rx_ring - Free RFD Buffers
* @adapter: board private structure
*/
@@ -1187,7 +1181,7 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
atomic_set(&rrd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_clean_tx_ring - Free Tx Buffers
* @adapter: board private structure
*/
@@ -1227,7 +1221,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
atomic_set(&tpd_ring->next_to_clean, 0);
}
-/*
+/**
* atl1_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -1470,7 +1464,7 @@ static void set_flow_ctrl_new(struct atl1_hw *hw)
iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
}
-/*
+/**
* atl1_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -1844,7 +1838,7 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
}
}
-/*
+/**
* atl1_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
*/
@@ -2489,11 +2483,10 @@ static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
return 1;
}
-/*
+/**
* atl1_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl1_intr(int irq, void *data)
{
@@ -2574,7 +2567,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
}
-/*
+/**
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -2693,7 +2686,7 @@ static void atl1_reset_dev_task(struct work_struct *work)
netif_device_attach(netdev);
}
-/*
+/**
* atl1_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -2727,7 +2720,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl1_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2762,7 +2755,7 @@ err_up:
return err;
}
-/*
+/**
* atl1_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -2930,7 +2923,7 @@ static const struct net_device_ops atl1_netdev_ops = {
#endif
};
-/*
+/**
* atl1_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl1_pci_tbl
@@ -3111,7 +3104,7 @@ err_request_regions:
return err;
}
-/*
+/**
* atl1_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -3158,7 +3151,7 @@ static struct pci_driver atl1_driver = {
.driver.pm = ATL1_PM_OPS,
};
-/*
+/**
* atl1_exit_module - Driver Exit Cleanup Routine
*
* atl1_exit_module is called just before the driver is removed
@@ -3169,7 +3162,7 @@ static void __exit atl1_exit_module(void)
pci_unregister_driver(&atl1_driver);
}
-/*
+/**
* atl1_init_module - Driver Registration Routine
*
* atl1_init_module is the first routine called when the driver is
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 6762dc406b25..57d64b80fd72 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -75,7 +75,7 @@ static void atl2_set_ethtool_ops(struct net_device *netdev);
static void atl2_check_options(struct atl2_adapter *adapter);
-/*
+/**
* atl2_sw_init - Initialize general software structures (struct atl2_adapter)
* @adapter: board private structure to initialize
*
@@ -123,7 +123,7 @@ static int __devinit atl2_sw_init(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -177,7 +177,7 @@ static void init_ring_ptrs(struct atl2_adapter *adapter)
adapter->txs_next_clear = 0;
}
-/*
+/**
* atl2_configure - Configure Transmit&Receive Unit after Reset
* @adapter: board private structure
*
@@ -283,7 +283,7 @@ static int atl2_configure(struct atl2_adapter *adapter)
return value;
}
-/*
+/**
* atl2_setup_ring_resources - allocate Tx / RX descriptor resources
* @adapter: board private structure
*
@@ -340,7 +340,7 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -350,7 +350,7 @@ static inline void atl2_irq_enable(struct atl2_adapter *adapter)
ATL2_WRITE_FLUSH(&adapter->hw);
}
-/*
+/**
* atl2_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -599,11 +599,10 @@ static inline void atl2_clear_phy_int(struct atl2_adapter *adapter)
spin_unlock(&adapter->stats_lock);
}
-/*
+/**
* atl2_intr - Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
*/
static irqreturn_t atl2_intr(int irq, void *data)
{
@@ -679,7 +678,7 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
netdev);
}
-/*
+/**
* atl2_free_ring_resources - Free Tx / RX descriptor Resources
* @adapter: board private structure
*
@@ -692,7 +691,7 @@ static void atl2_free_ring_resources(struct atl2_adapter *adapter)
adapter->ring_dma);
}
-/*
+/**
* atl2_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -798,7 +797,7 @@ static void atl2_free_irq(struct atl2_adapter *adapter)
#endif
}
-/*
+/**
* atl2_close - Disables a network interface
* @netdev: network interface device structure
*
@@ -918,7 +917,7 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/*
+/**
* atl2_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -943,7 +942,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-/*
+/**
* atl2_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -969,12 +968,6 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
return 0;
}
-/*
- * atl2_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
@@ -1011,12 +1004,6 @@ static int atl2_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return 0;
}
-/*
- * atl2_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
- */
static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
@@ -1033,7 +1020,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
@@ -1045,7 +1032,7 @@ static void atl2_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->reset_task);
}
-/*
+/**
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1070,7 +1057,7 @@ static void atl2_watchdog(unsigned long data)
}
}
-/*
+/**
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
@@ -1274,9 +1261,8 @@ static int atl2_check_link(struct atl2_adapter *adapter)
return 0;
}
-/*
+/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1341,7 +1327,7 @@ static const struct net_device_ops atl2_netdev_ops = {
#endif
};
-/*
+/**
* atl2_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in atl2_pci_tbl
@@ -1501,7 +1487,7 @@ err_dma:
return err;
}
-/*
+/**
* atl2_remove - Device Removal Routine
* @pdev: PCI device information struct
*
@@ -1728,7 +1714,7 @@ static struct pci_driver atl2_driver = {
.shutdown = atl2_shutdown,
};
-/*
+/**
* atl2_init_module - Driver Registration Routine
*
* atl2_init_module is the first routine called when the driver is
@@ -1743,7 +1729,7 @@ static int __init atl2_init_module(void)
}
module_init(atl2_init_module);
-/*
+/**
* atl2_exit_module - Driver Exit Cleanup Routine
*
* atl2_exit_module is called just before the driver is removed
@@ -2360,7 +2346,7 @@ static s32 atl2_read_mac_addr(struct atl2_hw *hw)
{
if (get_permanent_address(hw)) {
/* for test */
- /* FIXME: shouldn't we use random_ether_addr() here? */
+ /* FIXME: shouldn't we use eth_random_addr() here? */
hw->perm_mac_addr[0] = 0x00;
hw->perm_mac_addr[1] = 0x13;
hw->perm_mac_addr[2] = 0x74;
@@ -2997,7 +2983,7 @@ static int __devinit atl2_validate_option(int *value, struct atl2_option *opt)
return -1;
}
-/*
+/**
* atl2_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index b4f3aa49a7fc..77ffbc4a5071 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -64,7 +64,7 @@ static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
}
-/*
+/**
* atlx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
@@ -115,7 +115,7 @@ static void atlx_check_for_link(struct atlx_adapter *adapter)
schedule_work(&adapter->link_chg_task);
}
-/*
+/**
* atlx_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
@@ -162,7 +162,7 @@ static inline void atlx_imr_set(struct atlx_adapter *adapter,
ioread32(adapter->hw.hw_addr + REG_IMR);
}
-/*
+/**
* atlx_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
@@ -172,7 +172,7 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
adapter->int_enabled = true;
}
-/*
+/**
* atlx_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
@@ -193,7 +193,7 @@ static void atlx_clear_phy_int(struct atlx_adapter *adapter)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-/*
+/**
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 46b8b7d81633..9786c0e9890e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -483,9 +483,11 @@ out:
static void b44_stats_update(struct b44 *bp)
{
unsigned long reg;
- u32 *val;
+ u64 *val;
val = &bp->hw_stats.tx_good_octets;
+ u64_stats_update_begin(&bp->hw_stats.syncp);
+
for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
@@ -496,6 +498,8 @@ static void b44_stats_update(struct b44 *bp)
for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
*val++ += br32(bp, reg);
}
+
+ u64_stats_update_end(&bp->hw_stats.syncp);
}
static void b44_link_report(struct b44 *bp)
@@ -656,7 +660,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
dma_unmap_single(bp->sdev->dma_dev, mapping,
RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
- skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+ skb = alloc_skb(RX_PKT_BUF_SZ, GFP_ATOMIC | GFP_DMA);
if (skb == NULL)
return -ENOMEM;
mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
@@ -967,7 +971,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_unmap_single(bp->sdev->dma_dev, mapping, len,
DMA_TO_DEVICE);
- bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb)
goto err_out;
@@ -1635,44 +1639,49 @@ static int b44_close(struct net_device *dev)
return 0;
}
-static struct net_device_stats *b44_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *b44_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *nstat)
{
struct b44 *bp = netdev_priv(dev);
- struct net_device_stats *nstat = &dev->stats;
struct b44_hw_stats *hwstat = &bp->hw_stats;
-
- /* Convert HW stats into netdevice stats. */
- nstat->rx_packets = hwstat->rx_pkts;
- nstat->tx_packets = hwstat->tx_pkts;
- nstat->rx_bytes = hwstat->rx_octets;
- nstat->tx_bytes = hwstat->tx_octets;
- nstat->tx_errors = (hwstat->tx_jabber_pkts +
- hwstat->tx_oversize_pkts +
- hwstat->tx_underruns +
- hwstat->tx_excessive_cols +
- hwstat->tx_late_cols);
- nstat->multicast = hwstat->tx_multicast_pkts;
- nstat->collisions = hwstat->tx_total_cols;
-
- nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
- hwstat->rx_undersize);
- nstat->rx_over_errors = hwstat->rx_missed_pkts;
- nstat->rx_frame_errors = hwstat->rx_align_errs;
- nstat->rx_crc_errors = hwstat->rx_crc_errs;
- nstat->rx_errors = (hwstat->rx_jabber_pkts +
- hwstat->rx_oversize_pkts +
- hwstat->rx_missed_pkts +
- hwstat->rx_crc_align_errs +
- hwstat->rx_undersize +
- hwstat->rx_crc_errs +
- hwstat->rx_align_errs +
- hwstat->rx_symbol_errs);
-
- nstat->tx_aborted_errors = hwstat->tx_underruns;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
+
+ /* Convert HW stats into rtnl_link_stats64 stats. */
+ nstat->rx_packets = hwstat->rx_pkts;
+ nstat->tx_packets = hwstat->tx_pkts;
+ nstat->rx_bytes = hwstat->rx_octets;
+ nstat->tx_bytes = hwstat->tx_octets;
+ nstat->tx_errors = (hwstat->tx_jabber_pkts +
+ hwstat->tx_oversize_pkts +
+ hwstat->tx_underruns +
+ hwstat->tx_excessive_cols +
+ hwstat->tx_late_cols);
+ nstat->multicast = hwstat->tx_multicast_pkts;
+ nstat->collisions = hwstat->tx_total_cols;
+
+ nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+ hwstat->rx_undersize);
+ nstat->rx_over_errors = hwstat->rx_missed_pkts;
+ nstat->rx_frame_errors = hwstat->rx_align_errs;
+ nstat->rx_crc_errors = hwstat->rx_crc_errs;
+ nstat->rx_errors = (hwstat->rx_jabber_pkts +
+ hwstat->rx_oversize_pkts +
+ hwstat->rx_missed_pkts +
+ hwstat->rx_crc_align_errs +
+ hwstat->rx_undersize +
+ hwstat->rx_crc_errs +
+ hwstat->rx_align_errs +
+ hwstat->rx_symbol_errs);
+
+ nstat->tx_aborted_errors = hwstat->tx_underruns;
#if 0
- /* Carrier lost counter seems to be broken for some devices */
- nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+ /* Carrier lost counter seems to be broken for some devices */
+ nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
#endif
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
return nstat;
}
@@ -1993,17 +2002,24 @@ static void b44_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct b44 *bp = netdev_priv(dev);
- u32 *val = &bp->hw_stats.tx_good_octets;
+ struct b44_hw_stats *hwstat = &bp->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
u32 i;
spin_lock_irq(&bp->lock);
-
b44_stats_update(bp);
+ spin_unlock_irq(&bp->lock);
- for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
- *data++ = *val++;
+ do {
+ data_src = &hwstat->tx_good_octets;
+ data_dst = data;
+ start = u64_stats_fetch_begin_bh(&hwstat->syncp);
- spin_unlock_irq(&bp->lock);
+ for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+ *data_dst++ = *data_src++;
+
+ } while (u64_stats_fetch_retry_bh(&hwstat->syncp, start));
}
static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2113,7 +2129,7 @@ static const struct net_device_ops b44_netdev_ops = {
.ndo_open = b44_open,
.ndo_stop = b44_close,
.ndo_start_xmit = b44_start_xmit,
- .ndo_get_stats = b44_get_stats,
+ .ndo_get_stats64 = b44_get_stats64,
.ndo_set_rx_mode = b44_set_rx_mode,
.ndo_set_mac_address = b44_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
index e1905a49279f..8993d72f0420 100644
--- a/drivers/net/ethernet/broadcom/b44.h
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -338,9 +338,10 @@ struct ring_info {
* the layout
*/
struct b44_hw_stats {
-#define _B44(x) u32 x;
+#define _B44(x) u64 x;
B44_STAT_REG_DECLARE
#undef _B44
+ struct u64_stats_sync syncp;
};
struct ssb_device;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index ac7b74488531..79cebd8525ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/stringify.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/errno.h>
@@ -57,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.1"
-#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define DRV_MODULE_VERSION "2.2.3"
+#define DRV_MODULE_RELDATE "June 27, 2012"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -872,8 +873,7 @@ bnx2_alloc_mem(struct bnx2 *bp)
bnapi = &bp->bnx2_napi[i];
- sblk = (void *) (status_blk +
- BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+ sblk = (status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
bnapi->status_blk.msix = sblk;
bnapi->hw_tx_cons_ptr =
&sblk->status_tx_quick_consumer_index;
@@ -1972,22 +1972,26 @@ bnx2_remote_phy_event(struct bnx2 *bp)
switch (speed) {
case BNX2_LINK_STATUS_10HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_10FULL:
bp->line_speed = SPEED_10;
break;
case BNX2_LINK_STATUS_100HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_100BASE_T4:
case BNX2_LINK_STATUS_100FULL:
bp->line_speed = SPEED_100;
break;
case BNX2_LINK_STATUS_1000HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_1000FULL:
bp->line_speed = SPEED_1000;
break;
case BNX2_LINK_STATUS_2500HALF:
bp->duplex = DUPLEX_HALF;
+ /* fall through */
case BNX2_LINK_STATUS_2500FULL:
bp->line_speed = SPEED_2500;
break;
@@ -2473,6 +2477,7 @@ bnx2_dump_mcp_state(struct bnx2 *bp)
bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
pr_cont(" condition[%08x]\n",
bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+ DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
DP_SHMEM_LINE(bp, 0x3cc);
DP_SHMEM_LINE(bp, 0x3dc);
DP_SHMEM_LINE(bp, 0x3ec);
@@ -5372,7 +5377,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5384,8 +5389,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
@@ -6245,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
static int
bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
{
- int cpus = num_online_cpus();
+ int cpus = netif_get_num_default_rss_queues();
int msix_vecs;
if (!bp->num_req_rx_rings)
@@ -6383,6 +6388,7 @@ bnx2_reset_task(struct work_struct *work)
{
struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
int rc;
+ u16 pcicmd;
rtnl_lock();
if (!netif_running(bp->dev)) {
@@ -6392,6 +6398,12 @@ bnx2_reset_task(struct work_struct *work)
bnx2_netif_stop(bp, true);
+ pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
+ if (!(pcicmd & PCI_COMMAND_MEMORY)) {
+ /* in case PCI block has reset */
+ pci_restore_state(bp->pdev);
+ pci_save_state(bp->pdev);
+ }
rc = bnx2_init_nic(bp, 1);
if (rc) {
netdev_err(bp->dev, "failed to reset NIC, closing\n");
@@ -6406,6 +6418,75 @@ bnx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+#define BNX2_FTQ_ENTRY(ftq) { __stringify(ftq##FTQ_CTL), BNX2_##ftq##FTQ_CTL }
+
+static void
+bnx2_dump_ftq(struct bnx2 *bp)
+{
+ int i;
+ u32 reg, bdidx, cid, valid;
+ struct net_device *dev = bp->dev;
+ static const struct ftq_reg {
+ char *name;
+ u32 off;
+ } ftq_arr[] = {
+ BNX2_FTQ_ENTRY(RV2P_P),
+ BNX2_FTQ_ENTRY(RV2P_T),
+ BNX2_FTQ_ENTRY(RV2P_M),
+ BNX2_FTQ_ENTRY(TBDR_),
+ BNX2_FTQ_ENTRY(TDMA_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TXP_),
+ BNX2_FTQ_ENTRY(TPAT_),
+ BNX2_FTQ_ENTRY(RXP_C),
+ BNX2_FTQ_ENTRY(RXP_),
+ BNX2_FTQ_ENTRY(COM_COMXQ_),
+ BNX2_FTQ_ENTRY(COM_COMTQ_),
+ BNX2_FTQ_ENTRY(COM_COMQ_),
+ BNX2_FTQ_ENTRY(CP_CPQ_),
+ };
+
+ netdev_err(dev, "<--- start FTQ dump --->\n");
+ for (i = 0; i < ARRAY_SIZE(ftq_arr); i++)
+ netdev_err(dev, "%s %08x\n", ftq_arr[i].name,
+ bnx2_reg_rd_ind(bp, ftq_arr[i].off));
+
+ netdev_err(dev, "CPU states:\n");
+ for (reg = BNX2_TXP_CPU_MODE; reg <= BNX2_CP_CPU_MODE; reg += 0x40000)
+ netdev_err(dev, "%06x mode %x state %x evt_mask %x pc %x pc %x instr %x\n",
+ reg, bnx2_reg_rd_ind(bp, reg),
+ bnx2_reg_rd_ind(bp, reg + 4),
+ bnx2_reg_rd_ind(bp, reg + 8),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x1c),
+ bnx2_reg_rd_ind(bp, reg + 0x20));
+
+ netdev_err(dev, "<--- end FTQ dump --->\n");
+ netdev_err(dev, "<--- start TBDC dump --->\n");
+ netdev_err(dev, "TBDC free cnt: %ld\n",
+ REG_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
+ netdev_err(dev, "LINE CID BIDX CMD VALIDS\n");
+ for (i = 0; i < 0x20; i++) {
+ int j = 0;
+
+ REG_WR(bp, BNX2_TBDC_BD_ADDR, i);
+ REG_WR(bp, BNX2_TBDC_CAM_OPCODE,
+ BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ);
+ REG_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
+ while ((REG_RD(bp, BNX2_TBDC_COMMAND) &
+ BNX2_TBDC_COMMAND_CMD_REG_ARB) && j < 100)
+ j++;
+
+ cid = REG_RD(bp, BNX2_TBDC_CID);
+ bdidx = REG_RD(bp, BNX2_TBDC_BIDX);
+ valid = REG_RD(bp, BNX2_TBDC_CAM_OPCODE);
+ netdev_err(dev, "%02x %06x %04lx %02x [%x]\n",
+ i, cid, bdidx & BNX2_TBDC_BDIDX_BDIDX,
+ bdidx >> 24, (valid >> 8) & 0x0ff);
+ }
+ netdev_err(dev, "<--- end TBDC dump --->\n");
+}
+
static void
bnx2_dump_state(struct bnx2 *bp)
{
@@ -6435,6 +6516,7 @@ bnx2_tx_timeout(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ bnx2_dump_ftq(bp);
bnx2_dump_state(bp);
bnx2_dump_mcp_state(bp);
@@ -6628,6 +6710,7 @@ bnx2_close(struct net_device *dev)
bnx2_disable_int_sync(bp);
bnx2_napi_disable(bp);
+ netif_tx_disable(dev);
del_timer_sync(&bp->timer);
bnx2_shutdown_chip(bp);
bnx2_free_irq(bp);
@@ -7832,7 +7915,7 @@ bnx2_get_5709_media(struct bnx2 *bp)
else
strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
- if (PCI_FUNC(bp->pdev->devfn) == 0) {
+ if (bp->func == 0) {
switch (strap) {
case 0x4:
case 0x5:
@@ -8131,9 +8214,12 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+ if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
+ bp->func = 1;
+
if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
BNX2_SHM_HDR_SIGNATURE_SIG) {
- u32 off = PCI_FUNC(pdev->devfn) << 2;
+ u32 off = bp->func << 2;
bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
} else
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index dc06bda73be7..af6451dec295 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -4642,6 +4642,47 @@ struct l2_fhdr {
#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH (0x3ffL<<22)
+/*
+ * tbdc definition
+ * offset: 0x5400
+ */
+#define BNX2_TBDC_COMMAND 0x5400
+#define BNX2_TBDC_COMMAND_CMD_ENABLED (1UL<<0)
+#define BNX2_TBDC_COMMAND_CMD_FLUSH (1UL<<1)
+#define BNX2_TBDC_COMMAND_CMD_SOFT_RST (1UL<<2)
+#define BNX2_TBDC_COMMAND_CMD_REG_ARB (1UL<<3)
+#define BNX2_TBDC_COMMAND_WRCHK_RANGE_ERROR (1UL<<4)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ONES_ERROR (1UL<<5)
+#define BNX2_TBDC_COMMAND_WRCHK_ALL_ZEROS_ERROR (1UL<<6)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ONES_ERROR (1UL<<7)
+#define BNX2_TBDC_COMMAND_WRCHK_ANY_ZEROS_ERROR (1UL<<8)
+
+#define BNX2_TBDC_STATUS 0x5404
+#define BNX2_TBDC_STATUS_FREE_CNT (0x3fUL<<0)
+
+#define BNX2_TBDC_BD_ADDR 0x5424
+
+#define BNX2_TBDC_BIDX 0x542c
+#define BNX2_TBDC_BDIDX_BDIDX (0xffffUL<<0)
+#define BNX2_TBDC_BDIDX_CMD (0xffUL<<24)
+
+#define BNX2_TBDC_CID 0x5430
+
+#define BNX2_TBDC_CAM_OPCODE 0x5434
+#define BNX2_TBDC_CAM_OPCODE_OPCODE (0x7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_SEARCH (0UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CACHE_WRITE (1UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_INVALIDATE (2UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_WRITE (4UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_CAM_READ (5UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_WRITE (6UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_OPCODE_RAM_READ (7UL<<0)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_BDIDX (1UL<<4)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CID (1UL<<5)
+#define BNX2_TBDC_CAM_OPCODE_SMASK_CMD (1UL<<6)
+#define BNX2_TBDC_CAM_OPCODE_WMT_FAILED (1UL<<7)
+#define BNX2_TBDC_CAM_OPCODE_CAM_VALIDS (0xffUL<<8)
+
/*
* tdma_reg definition
@@ -6930,6 +6971,8 @@ struct bnx2 {
struct bnx2_irq irq_tbl[BNX2_MAX_MSIX_VEC];
int irq_nvecs;
+ u8 func;
+
u8 num_tx_rings;
u8 num_rx_rings;
@@ -7314,6 +7357,8 @@ struct bnx2_rv2p_fw_file {
#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
(msg))
+#define BNX2_BC_RESET_TYPE 0x000001c0
+
#define BNX2_BC_STATE 0x000001c4
#define BNX2_BC_STATE_ERR_MASK 0x0000ff00
#define BNX2_BC_STATE_SIGN 0x42530000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 7de824184979..77bcd4cb4ffb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.72.50-0"
-#define DRV_MODULE_RELDATE "2012/04/23"
+#define DRV_MODULE_VERSION "1.72.51-0"
+#define DRV_MODULE_RELDATE "2012/06/18"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -51,6 +51,7 @@
#include "bnx2x_reg.h"
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#include "bnx2x_hsi.h"
#include "bnx2x_link.h"
#include "bnx2x_sp.h"
@@ -248,13 +249,12 @@ enum {
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
-#define BNX2X_CNIC_START_ETH_CID 48
-enum {
+#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\
+ (bp)->max_cos)
/* iSCSI L2 */
- BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
+#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp))
/* FCoE L2 */
- BNX2X_FCOE_ETH_CID,
-};
+#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1)
/** Additional rings budgeting */
#ifdef BCM_CNIC
@@ -276,29 +276,30 @@ enum {
#define FIRST_TX_ONLY_COS_INDEX 1
#define FIRST_TX_COS_INDEX 0
-/* defines for decodeing the fastpath index and the cos index out of the
- * transmission queue index
- */
-#define MAX_TXQS_PER_COS FP_SB_MAX_E1x
-
-#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS)
-#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS)
-
/* rules for calculating the cids of tx-only connections */
-#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS)
-#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS)
+#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp))
+#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \
+ (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
/* fp index inside class of service range */
-#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS)
-
-/*
- * 0..15 eth cos0
- * 16..31 eth cos1 if applicable
- * 32..47 eth cos2 If applicable
- * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+#define FP_COS_TO_TXQ(fp, cos, bp) \
+ ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp))
+
+/* Indexes for transmission queues array:
+ * txdata for RSS i CoS j is at location i + (j * num of RSS)
+ * txdata for FCoE (if exist) is at location max cos * num of RSS
+ * txdata for FWD (if exist) is one location after FCoE
+ * txdata for OOO (if exist) is one location after FWD
*/
-#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos)
-#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
+enum {
+ FCOE_TXQ_IDX_OFFSET,
+ FWD_TXQ_IDX_OFFSET,
+ OOO_TXQ_IDX_OFFSET,
+};
+#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos)
+#ifdef BCM_CNIC
+#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET)
+#endif
/* fast path */
/*
@@ -453,6 +454,7 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ bool l4_rxhash;
u16 gro_size;
u16 full_page;
};
@@ -481,6 +483,8 @@ struct bnx2x_fp_txdata {
__le16 *tx_cons_sb;
int txq_index;
+ struct bnx2x_fastpath *parent_fp;
+ int tx_ring_size;
};
enum bnx2x_tpa_mode_t {
@@ -507,7 +511,7 @@ struct bnx2x_fastpath {
enum bnx2x_tpa_mode_t mode;
u8 max_cos; /* actual number of active tx coses */
- struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
+ struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS];
struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */
struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */
@@ -547,51 +551,45 @@ struct bnx2x_fastpath {
rx_calls;
/* TPA related */
- struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+ struct bnx2x_agg_info *tpa_info;
u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
-
- struct tstorm_per_queue_stats old_tclient;
- struct ustorm_per_queue_stats old_uclient;
- struct xstorm_per_queue_stats old_xclient;
- struct bnx2x_eth_q_stats eth_q_stats;
- struct bnx2x_eth_q_stats_old eth_q_stats_old;
-
/* The size is calculated using the following:
sizeof name field from netdev structure +
4 ('-Xx-' string) +
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
-
- /* MACs object */
- struct bnx2x_vlan_mac_obj mac_obj;
-
- /* Queue State object */
- struct bnx2x_queue_sp_obj q_obj;
-
};
-#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
+#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
+#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index])
+#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
+#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
-/* FCoE L2 `fastpath' entry is right after the eth entries */
-#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
-#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
-#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
-#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
- txdata[FIRST_TX_COS_INDEX].var)
+#define FCOE_IDX_OFFSET 0
+
+#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \
+ FCOE_IDX_OFFSET)
+#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)])
+#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)])
+#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var)
+#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \
+ txdata_ptr[FIRST_TX_COS_INDEX] \
+ ->var)
#define IS_ETH_FP(fp) (fp->index < \
BNX2X_NUM_ETH_QUEUES(fp->bp))
#ifdef BCM_CNIC
-#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
-#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
+#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp))
+#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp))
#else
#define IS_FCOE_FP(fp) false
#define IS_FCOE_IDX(idx) false
@@ -616,6 +614,22 @@ struct bnx2x_fastpath {
#define TX_BD(x) ((x) & MAX_TX_BD)
#define TX_BD_POFF(x) ((x) & MAX_TX_DESC_CNT)
+/* number of NEXT_PAGE descriptors may be required during placement */
+#define NEXT_CNT_PER_TX_PKT(bds) \
+ (((bds) + MAX_TX_DESC_CNT - 1) / \
+ MAX_TX_DESC_CNT * NEXT_PAGE_TX_DESC_CNT)
+/* max BDs per tx packet w/o next_pages:
+ * START_BD - describes packed
+ * START_BD(splitted) - includes unpaged data segment for GSO
+ * PARSING_BD - for TSO and CSUM data
+ * Frag BDs - decribes pages for frags
+ */
+#define BDS_PER_TX_PKT 3
+#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
+/* max BDs per tx packet including next pages */
+#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))
+
/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
#define NUM_RX_RINGS 8
#define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
@@ -805,8 +819,11 @@ struct bnx2x_common {
#define CHIP_NUM_57810_MF 0x16ae
#define CHIP_NUM_57811 0x163d
#define CHIP_NUM_57811_MF 0x163e
-#define CHIP_NUM_57840 0x168d
-#define CHIP_NUM_57840_MF 0x16ab
+#define CHIP_NUM_57840_OBSOLETE 0x168d
+#define CHIP_NUM_57840_MF_OBSOLETE 0x16ab
+#define CHIP_NUM_57840_4_10 0x16a1
+#define CHIP_NUM_57840_2_20 0x16a2
+#define CHIP_NUM_57840_MF 0x16a4
#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
#define CHIP_IS_57711(bp) (CHIP_NUM(bp) == CHIP_NUM_57711)
#define CHIP_IS_57711E(bp) (CHIP_NUM(bp) == CHIP_NUM_57711E)
@@ -818,8 +835,12 @@ struct bnx2x_common {
#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
-#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
-#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_57840(bp) \
+ ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_2_20) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_OBSOLETE))
+#define CHIP_IS_57840_MF(bp) ((CHIP_NUM(bp) == CHIP_NUM_57840_MF) || \
+ (CHIP_NUM(bp) == CHIP_NUM_57840_MF_OBSOLETE))
#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
CHIP_IS_57711E(bp))
#define CHIP_IS_E2(bp) (CHIP_IS_57712(bp) || \
@@ -978,8 +999,8 @@ union cdu_context {
};
/* CDU host DB constants */
-#define CDU_ILT_PAGE_SZ_HW 3
-#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define CDU_ILT_PAGE_SZ_HW 2
+#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */
#define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
#ifdef BCM_CNIC
@@ -1182,11 +1203,31 @@ struct bnx2x_prev_path_list {
struct list_head list;
};
+struct bnx2x_sp_objs {
+ /* MACs object */
+ struct bnx2x_vlan_mac_obj mac_obj;
+
+ /* Queue State object */
+ struct bnx2x_queue_sp_obj q_obj;
+};
+
+struct bnx2x_fp_stats {
+ struct tstorm_per_queue_stats old_tclient;
+ struct ustorm_per_queue_stats old_uclient;
+ struct xstorm_per_queue_stats old_xclient;
+ struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
*/
struct bnx2x_fastpath *fp;
+ struct bnx2x_sp_objs *sp_objs;
+ struct bnx2x_fp_stats *fp_stats;
+ struct bnx2x_fp_txdata *bnx2x_txq;
+ int bnx2x_txq_size;
void __iomem *regview;
void __iomem *doorbells;
u16 db_size;
@@ -1301,7 +1342,9 @@ struct bnx2x {
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
#define BC_SUPPORTS_PFC_STATS (1 << 17)
+#define BC_SUPPORTS_FCOE_FEATURES (1 << 19)
#define USING_SINGLE_MSIX_FLAG (1 << 20)
+#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1377,6 +1420,7 @@ struct bnx2x {
#define BNX2X_MAX_COS 3
#define BNX2X_MAX_TX_COS 2
int num_queues;
+ int num_napi_queues;
int disable_tpa;
u32 rx_mode;
@@ -1389,6 +1433,7 @@ struct bnx2x {
u8 igu_dsb_id;
u8 igu_base_sb;
u8 igu_sb_cnt;
+
dma_addr_t def_status_blk_mapping;
struct bnx2x_slowpath *slowpath;
@@ -1420,7 +1465,11 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- struct hw_context context;
+ /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ * context size we need 8 ILT entries.
+ */
+#define ILT_MAX_L2_LINES 8
+ struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
#define BP_ILT(bp) ((bp)->ilt)
@@ -1433,13 +1482,14 @@ struct bnx2x {
/*
* Maximum CID count that might be required by the bnx2x:
- * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI
*/
-#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
- NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \
+ + NON_ETH_CONTEXT_USE + CNIC_PRESENT)
#define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
ILT_PAGE_CIDS))
-#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
int qm_cid_count;
@@ -1598,6 +1648,8 @@ struct bnx2x {
extern int num_queues;
#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \
+ NON_ETH_CONTEXT_USE)
#define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp)
#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
@@ -1656,6 +1708,9 @@ struct bnx2x_func_init_params {
continue; \
else
+#define for_each_napi_rx_queue(bp, var) \
+ for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
+
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
@@ -1709,15 +1764,6 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
/**
- * Deletes all MACs configured for the specific MAC object.
- *
- * @param bp Function driver instance
- * @param mac_obj MAC object to cleanup
- *
- * @return zero if all MACs were cleaned
- */
-
-/**
* bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
*
* @bp: driver handle
@@ -1817,6 +1863,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define LOAD_NORMAL 0
#define LOAD_OPEN 1
#define LOAD_DIAG 2
+#define LOAD_LOOPBACK_EXT 3
#define UNLOAD_NORMAL 0
#define UNLOAD_CLOSE 1
#define UNLOAD_RECOVERY 2
@@ -1899,13 +1946,17 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define PCICFG_LINK_SPEED 0xf0000
#define PCICFG_LINK_SPEED_SHIFT 16
-
-#define BNX2X_NUM_TESTS 7
+#define BNX2X_NUM_TESTS_SF 7
+#define BNX2X_NUM_TESTS_MF 3
+#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \
+ BNX2X_NUM_TESTS_SF)
#define BNX2X_PHY_LOOPBACK 0
#define BNX2X_MAC_LOOPBACK 1
+#define BNX2X_EXT_LOOPBACK 2
#define BNX2X_PHY_LOOPBACK_FAILED 1
#define BNX2X_MAC_LOOPBACK_FAILED 2
+#define BNX2X_EXT_LOOPBACK_FAILED 3
#define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \
BNX2X_PHY_LOOPBACK_FAILED)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea9704d..e879e19eb0d6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -40,12 +40,19 @@
* Makes sure the contents of the bp->fp[to].napi is kept
* intact. This is done by first copying the napi struct from
* the target to the source, and then mem copying the entire
- * source onto the target
+ * source onto the target. Update txdata pointers and related
+ * content.
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
+ struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
+ struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
+ struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
+ int old_max_eth_txqs, new_max_eth_txqs;
+ int old_txdata_index = 0, new_txdata_index = 0;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -53,6 +60,30 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+
+ /* move sp_objs contents as well, as their indices match fp ones */
+ memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
+
+ /* move fp_stats contents as well, as their indices match fp ones */
+ memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
+
+ /* Update txdata pointers in fp and move txdata content accordingly:
+ * Each fp consumes 'max_cos' txdata structures, so the index should be
+ * decremented by max_cos x delta.
+ */
+
+ old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
+ new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
+ (bp)->max_cos;
+ if (from == FCOE_IDX(bp)) {
+ old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
+ }
+
+ memcpy(&bp->bnx2x_txq[old_txdata_index],
+ &bp->bnx2x_txq[new_txdata_index],
+ sizeof(struct bnx2x_fp_txdata));
+ to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -190,7 +221,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
netif_tx_wake_queue(txq);
__netif_tx_unlock(txq);
@@ -264,12 +295,20 @@ static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
* CQE (calculated by HW).
*/
static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
- const struct eth_fast_path_rx_cqe *cqe)
+ const struct eth_fast_path_rx_cqe *cqe,
+ bool *l4_rxhash)
{
/* Set Toeplitz hash from CQE */
if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
+ enum eth_rss_hash_type htype;
+
+ htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
+ *l4_rxhash = (htype == TCP_IPV4_HASH_TYPE) ||
+ (htype == TCP_IPV6_HASH_TYPE);
return le32_to_cpu(cqe->rss_hash_result);
+ }
+ *l4_rxhash = false;
return 0;
}
@@ -323,7 +362,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
- tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->l4_rxhash);
if (fp->mode == TPA_MODE_GRO) {
u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
tpa_info->full_page =
@@ -479,7 +518,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
where we are and drop the whole packet */
err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
if (unlikely(err)) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
return err;
}
@@ -558,6 +597,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
skb->rxhash = tpa_info->rxhash;
+ skb->l4_rxhash = tpa_info->l4_rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -584,7 +624,7 @@ drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
"Failed to allocate or map a new skb - dropping packet!\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
}
static int bnx2x_alloc_rx_data(struct bnx2x *bp,
@@ -617,8 +657,10 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
return 0;
}
-static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
- struct bnx2x_fastpath *fp)
+static
+void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
+ struct bnx2x_fastpath *fp,
+ struct bnx2x_eth_q_stats *qstats)
{
/* Do nothing if no IP/L4 csum validation was done */
@@ -632,7 +674,7 @@ static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
if (cqe->fast_path_cqe.type_error_flags &
(ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
- fp->eth_q_stats.hw_csum_err++;
+ qstats->hw_csum_err++;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -679,6 +721,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad, queue;
u8 *data;
+ bool l4_rxhash;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -776,7 +819,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
goto reuse_rx;
}
@@ -789,7 +832,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (skb == NULL) {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
goto reuse_rx;
}
memcpy(skb->data, data + pad, len);
@@ -803,14 +846,15 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
skb = build_skb(data, 0);
if (unlikely(!skb)) {
kfree(data);
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->
+ rx_skb_alloc_failed++;
goto next_rx;
}
skb_reserve(skb, pad);
} else {
DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
@@ -821,13 +865,14 @@ reuse_rx:
skb->protocol = eth_type_trans(skb, bp->dev);
/* Set Toeplitz hash for a none-LRO skb */
- skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp, &l4_rxhash);
+ skb->l4_rxhash = l4_rxhash;
skb_checksum_none_assert(skb);
if (bp->dev->features & NETIF_F_RXCSUM)
- bnx2x_csum_validate(skb, cqe, fp);
-
+ bnx2x_csum_validate(skb, cqe, fp,
+ bnx2x_fp_qstats(bp, fp));
skb_record_rx_queue(skb, fp->rx_queue);
@@ -888,7 +933,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
@@ -1205,7 +1250,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
unsigned pkts_compl = 0, bytes_compl = 0;
u16 sw_prod = txdata->tx_pkt_prod;
@@ -1217,7 +1262,8 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
sw_cons++;
}
netdev_tx_reset_queue(
- netdev_get_tx_queue(bp->dev, txdata->txq_index));
+ netdev_get_tx_queue(bp->dev,
+ txdata->txq_index));
}
}
}
@@ -1325,7 +1371,7 @@ void bnx2x_free_irq(struct bnx2x *bp)
free_irq(bp->dev->irq, bp->dev);
}
-int __devinit bnx2x_enable_msix(struct bnx2x *bp)
+int bnx2x_enable_msix(struct bnx2x *bp)
{
int msix_vec = 0, i, rc, req_cnt;
@@ -1579,6 +1625,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
+
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
}
/**
@@ -1607,8 +1655,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp)
{
int rc, tx, rx;
- tx = MAX_TXQS_PER_COS * bp->max_cos;
- rx = BNX2X_NUM_ETH_QUEUES(bp);
+ tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
+ rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE;
/* account for fcoe queue */
#ifdef BCM_CNIC
@@ -1666,14 +1714,13 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
static int bnx2x_init_rss_pf(struct bnx2x *bp)
{
int i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
/* Prepare the initial contents fo the indirection table if RSS is
* enabled
*/
- for (i = 0; i < sizeof(ind_table); i++)
- ind_table[i] =
+ for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
+ bp->rss_conf_obj.ind_table[i] =
bp->fp->cl_id +
ethtool_rxfh_indir_default(i, num_eth_queues);
@@ -1685,12 +1732,11 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
* For 57712 and newer on the other hand it's a per-function
* configuration.
*/
- return bnx2x_config_rss_eth(bp, ind_table,
- bp->port.pmf || !CHIP_IS_E1x(bp));
+ return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash)
+ bool config_hash)
{
struct bnx2x_config_rss_params params = {NULL};
int i;
@@ -1713,11 +1759,15 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
- memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+ memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
if (config_hash) {
/* RSS keys */
@@ -1754,7 +1804,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
struct bnx2x_mcast_ramrod_params rparam = {NULL};
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1765,7 +1815,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Clean ETH primary MAC */
__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
- rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+ rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc != 0)
BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
@@ -1851,11 +1901,16 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
static void bnx2x_bz_fp(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index];
+
+ int cos;
struct napi_struct orig_napi = fp->napi;
+ struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
/* bzero bnx2x_fastpath contents */
- if (bp->stats_init)
+ if (bp->stats_init) {
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
- else {
+ } else {
/* Keep Queue statistics */
struct bnx2x_eth_q_stats *tmp_eth_q_stats;
struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
@@ -1863,26 +1918,27 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
GFP_KERNEL);
if (tmp_eth_q_stats)
- memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats,
sizeof(struct bnx2x_eth_q_stats));
tmp_eth_q_stats_old =
kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
GFP_KERNEL);
if (tmp_eth_q_stats_old)
- memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
+ memset(fp->tpa_info, 0, sizeof(*fp->tpa_info));
memset(fp, 0, sizeof(*fp));
if (tmp_eth_q_stats) {
- memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
- sizeof(struct bnx2x_eth_q_stats));
+ memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
kfree(tmp_eth_q_stats);
}
if (tmp_eth_q_stats_old) {
- memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old,
sizeof(struct bnx2x_eth_q_stats_old));
kfree(tmp_eth_q_stats_old);
}
@@ -1891,7 +1947,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Restore the NAPI object as it has been already initialized */
fp->napi = orig_napi;
-
+ fp->tpa_info = orig_tpa_info;
fp->bp = bp;
fp->index = index;
if (IS_ETH_FP(fp))
@@ -1900,6 +1956,16 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* Special queues support only one CoS */
fp->max_cos = 1;
+ /* Init txdata pointers */
+#ifdef BCM_CNIC
+ if (IS_FCOE_FP(fp))
+ fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
+#endif
+ if (IS_ETH_FP(fp))
+ for_each_cos_in_tx_queue(fp, cos)
+ fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
+ BNX2X_NUM_ETH_QUEUES(bp) + index];
+
/*
* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
@@ -1949,11 +2015,13 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa.
+ * Also set fp->disable_tpa and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
+ memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size *
+ sizeof(struct bnx2x_fp_txdata));
/* Set the receive queues buffer size */
@@ -2176,6 +2244,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
case LOAD_DIAG:
+ case LOAD_LOOPBACK_EXT:
bp->state = BNX2X_STATE_DIAG;
break;
@@ -2195,6 +2264,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
+ bnx2x_setup_cnic_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
@@ -2215,7 +2285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
return -EBUSY;
}
- bnx2x_dcbx_init(bp);
+ /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
+ if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
+ bnx2x_dcbx_init(bp, false);
+
return 0;
#ifndef BNX2X_STOP_ON_ERROR
@@ -2298,6 +2371,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Stop Tx */
bnx2x_tx_disable(bp);
+ netdev_reset_tc(bp->dev);
#ifdef BCM_CNIC
bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
@@ -2456,8 +2530,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
#endif
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
- bnx2x_tx_int(bp, &fp->txdata[cos]);
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
+ bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
if (bnx2x_has_rx_work(fp)) {
@@ -2834,7 +2908,6 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_fastpath *fp;
struct netdev_queue *txq;
struct bnx2x_fp_txdata *txdata;
struct sw_tx_bd *tx_buf;
@@ -2844,7 +2917,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
u32 pbd_e2_parsing_data = 0;
u16 pkt_prod, bd_prod;
- int nbd, txq_index, fp_index, txdata_index;
+ int nbd, txq_index;
dma_addr_t mapping;
u32 xmit_type = bnx2x_xmit_type(bp, skb);
int i;
@@ -2863,39 +2936,22 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
- /* decode the fastpath index and the cos index from the txq */
- fp_index = TXQ_TO_FP(txq_index);
- txdata_index = TXQ_TO_COS(txq_index);
-
-#ifdef BCM_CNIC
- /*
- * Override the above for the FCoE queue:
- * - FCoE fp entry is right after the ETH entries.
- * - FCoE L2 queue uses bp->txdata[0] only.
- */
- if (unlikely(!NO_FCOE(bp) && (txq_index ==
- bnx2x_fcoe_tx(bp, txq_index)))) {
- fp_index = FCOE_IDX;
- txdata_index = 0;
- }
-#endif
+ txdata = &bp->bnx2x_txq[txq_index];
/* enable this debug print to view the transmission queue being used
DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
- /* locate the fastpath and the txdata */
- fp = &bp->fp[fp_index];
- txdata = &fp->txdata[txdata_index];
-
/* enable this debug print to view the tranmission details
DP(NETIF_MSG_TX_QUEUED,
"transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
- (skb_shinfo(skb)->nr_frags + 3))) {
- fp->eth_q_stats.driver_xoff++;
+ skb_shinfo(skb)->nr_frags +
+ BDS_PER_TX_PKT +
+ NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
netif_tx_stop_queue(txq);
BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
@@ -3169,7 +3225,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata->tx_bd_prod += nbd;
- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
netif_tx_stop_queue(txq);
/* paired memory barrier is in bnx2x_tx_int(), we have to keep
@@ -3177,8 +3233,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
* fp->bd_tx_cons */
smp_mb();
- fp->eth_q_stats.driver_xoff++;
- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
+ if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
netif_tx_wake_queue(txq);
}
txdata->tx_pkt++;
@@ -3243,7 +3299,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* configure traffic class to transmission queue mapping */
for (cos = 0; cos < bp->max_cos; cos++) {
count = BNX2X_NUM_ETH_QUEUES(bp);
- offset = cos * MAX_TXQS_PER_COS;
+ offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
netdev_set_tc_queue(dev, cos, count, offset);
DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
"mapping tc %d to offset %d count %d\n",
@@ -3342,7 +3398,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
if (!skip_tx_queue(bp, fp_index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
@@ -3414,7 +3470,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+ bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
return i - failure_cnt;
}
@@ -3499,7 +3555,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!skip_tx_queue(bp, index)) {
/* fastpath tx rings: tx_buf tx_desc */
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
DP(NETIF_MSG_IFUP,
"allocating tx memory of fp %d cos %d\n",
@@ -3582,7 +3638,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp))
/* FCoE */
- if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
/* we will fail load process instead of mark
* NO_FCOE_FLAG
*/
@@ -3607,7 +3663,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
*/
/* move FCoE fp even NO_FCOE_FLAG is on */
- bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+ bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
#endif
bp->num_queues -= delta;
BNX2X_ERR("Adjusted num of queues from %d to %d\n",
@@ -3619,7 +3675,11 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp)
void bnx2x_free_mem_bp(struct bnx2x *bp)
{
+ kfree(bp->fp->tpa_info);
kfree(bp->fp);
+ kfree(bp->sp_objs);
+ kfree(bp->fp_stats);
+ kfree(bp->bnx2x_txq);
kfree(bp->msix_table);
kfree(bp->ilt);
}
@@ -3630,6 +3690,8 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
struct msix_entry *tbl;
struct bnx2x_ilt *ilt;
int msix_table_size = 0;
+ int fp_array_size;
+ int i;
/*
* The biggest MSI-X table we might need is as a maximum number of fast
@@ -3638,12 +3700,44 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
- sizeof(*fp), GFP_KERNEL);
+ fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE;
+ BNX2X_DEV_INFO("fp_array_size %d", fp_array_size);
+
+ fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
+ for (i = 0; i < fp_array_size; i++) {
+ fp[i].tpa_info =
+ kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
+ sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+ if (!(fp[i].tpa_info))
+ goto alloc_err;
+ }
+
bp->fp = fp;
+ /* allocate sp objs */
+ bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs),
+ GFP_KERNEL);
+ if (!bp->sp_objs)
+ goto alloc_err;
+
+ /* allocate fp_stats */
+ bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats),
+ GFP_KERNEL);
+ if (!bp->fp_stats)
+ goto alloc_err;
+
+ /* Allocate memory for the transmission queues array */
+ bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS;
+#ifdef BCM_CNIC
+ bp->bnx2x_txq_size++;
+#endif
+ bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size,
+ sizeof(struct bnx2x_fp_txdata), GFP_KERNEL);
+ if (!bp->bnx2x_txq)
+ goto alloc_err;
+
/* msix table */
tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7cd99b75347a..dfa757e74296 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -29,6 +29,7 @@
extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
extern int num_queues;
+extern int int_mode;
/************************ Macros ********************************/
#define BNX2X_PCI_FREE(x, y, size) \
@@ -89,12 +90,12 @@ void bnx2x_send_unload_done(struct bnx2x *bp);
* bnx2x_config_rss_pf - configure RSS parameters in a PF.
*
* @bp: driver handle
- * @rss_obj RSS object to use
+ * @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
*/
int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- u8 *ind_table, bool config_hash);
+ bool config_hash);
/**
* bnx2x__init_func_obj - init function object
@@ -244,6 +245,14 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
* @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_cnic_info - provides cnic with updated info
+ *
+ * @bp: driver handle
+ */
+void bnx2x_setup_cnic_info(struct bnx2x *bp);
+
#endif
/**
@@ -409,7 +418,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp);
*
* @bp: driver handle
*/
-void bnx2x_dcbx_init(struct bnx2x *bp);
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
/**
* bnx2x_set_power_state - set power state to the requested value.
@@ -487,7 +496,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
* fills msix_table, requests vectors, updates num_queues
* according to number of available vectors.
*/
-int __devinit bnx2x_enable_msix(struct bnx2x *bp);
+int bnx2x_enable_msix(struct bnx2x *bp);
/**
* bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -728,7 +737,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
{
u8 cos;
for_each_cos_in_tx_queue(fp, cos)
- if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
return true;
return false;
}
@@ -780,8 +789,10 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
{
int i;
+ bp->num_napi_queues = bp->num_queues;
+
/* Add NAPI objects */
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -790,10 +801,12 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_rx_queue(bp, i)
+ for_each_napi_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+void bnx2x_set_int_mode(struct bnx2x *bp);
+
static inline void bnx2x_disable_msi(struct bnx2x *bp)
{
if (bp->flags & USING_MSIX_FLAG) {
@@ -809,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
{
return num_queues ?
min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+ min_t(int, netif_get_num_default_rss_queues(),
+ BNX2X_MAX_QUEUES(bp));
}
static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
@@ -865,11 +879,9 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
return 2 * vn + BP_PORT(bp);
}
-static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
- bool config_hash)
+static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
- config_hash);
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
}
/**
@@ -975,8 +987,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
struct bnx2x *bp = fp->bp;
/* Configure classification DBs */
- bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
- BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+ bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
+ fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
bnx2x_sp_mapping(bp, mac_rdata),
BNX2X_FILTER_MAC_PENDING,
&bp->sp_state, obj_type,
@@ -1068,12 +1080,14 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
}
static inline void bnx2x_init_txdata(struct bnx2x *bp,
- struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
- __le16 *tx_cons_sb)
+ struct bnx2x_fp_txdata *txdata, u32 cid,
+ int txq_index, __le16 *tx_cons_sb,
+ struct bnx2x_fastpath *fp)
{
txdata->cid = cid;
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
+ txdata->parent_fp = fp;
DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
@@ -1107,18 +1121,13 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
- /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
- * 16 ETH clients per function when CNIC is enabled!
- *
- * Fix it ASAP!!!
- */
- bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+ bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
-
- bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
- fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+ bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
+ fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
+ fp);
DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
@@ -1135,8 +1144,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
/* No multi-CoS for FCoE L2 client */
BUG_ON(fp->max_cos != 1);
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
+ &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
DP(NETIF_MSG_IFUP,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 4f9244bd7530..8a73374e52a7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -972,23 +972,26 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_default_priority = 0;
}
-void bnx2x_dcbx_init(struct bnx2x *bp)
+void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem)
{
u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+ /* only PMF can send ADMIN msg to MFW in old MFW versions */
+ if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF)))
+ return;
+
if (bp->dcbx_enabled <= 0)
return;
/* validate:
* chip of good for dcbx version,
* dcb is wanted
- * the function is pmf
* shmem2 contains DCBX support fields
*/
DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
- if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+ if (bp->dcb_state == BNX2X_DCB_STATE_ON &&
SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
@@ -999,12 +1002,23 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
- bnx2x_dcbx_admin_mib_updated_params(bp,
- dcbx_lldp_params_offset);
+ /* need HW lock to avoid scenario of two drivers
+ * writing in parallel to shmem
+ */
+ bnx2x_acquire_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
+ if (update_shmem)
+ bnx2x_dcbx_admin_mib_updated_params(bp,
+ dcbx_lldp_params_offset);
/* Let HW start negotiation */
bnx2x_fw_command(bp,
DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+ /* release HW lock only after MFW acks that it finished
+ * reading values from shmem
+ */
+ bnx2x_release_hw_lock(bp,
+ HW_LOCK_RESOURCE_DCBX_ADMIN_MIB);
}
}
}
@@ -2063,10 +2077,8 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
"Handling parity error recovery. Try again later\n");
return 1;
}
- if (netif_running(bp->dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
+ if (netif_running(bp->dev))
+ bnx2x_dcbx_init(bp, true);
DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index ddc18ee5c5ae..fc4e0e3885b0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -177,6 +177,8 @@ static const struct {
4, STATS_FLAGS_FUNC, "recoverable_errors" },
{ STATS_OFFSET32(unrecoverable_error),
4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
+ { STATS_OFFSET32(eee_tx_lpi),
+ 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -185,7 +187,8 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
int port_type;
u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
switch (bp->link_params.phy[phy_idx].media_type) {
- case ETH_PHY_SFP_FIBER:
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
case ETH_PHY_XFP_FIBER:
case ETH_PHY_KR:
case ETH_PHY_CX4:
@@ -218,6 +221,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
+ if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type ==
+ ETH_PHY_SFP_1G_FIBER) {
+ cmd->supported &= ~(SUPPORTED_10000baseT_Full);
+ cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ }
if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
if (!(bp->flags & MF_FUNC_DIS)) {
@@ -293,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
- u32 speed;
+ u32 speed, phy_idx;
if (IS_MF_SD(bp))
return 0;
@@ -548,9 +556,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
"10G half not supported\n");
return -EINVAL;
}
-
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
if (!(bp->port.supported[cfg_idx]
- & SUPPORTED_10000baseT_Full)) {
+ & SUPPORTED_10000baseT_Full) ||
+ (bp->link_params.phy[phy_idx].media_type ==
+ ETH_PHY_SFP_1G_FIBER)) {
DP(BNX2X_MSG_ETHTOOL,
"10G full not supported\n");
return -EINVAL;
@@ -824,7 +834,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
- info->testinfo_len = BNX2X_NUM_TESTS;
+ info->testinfo_len = BNX2X_NUM_TESTS(bp);
info->eedump_len = bp->common.flash_size;
info->regdump_len = bnx2x_get_regs_len(dev);
}
@@ -1150,6 +1160,65 @@ static int bnx2x_get_eeprom(struct net_device *dev,
return rc;
}
+static int bnx2x_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int rc = 0, phy_idx;
+ u8 *user_data = data;
+ int remaining_len = ee->len, xfer_size;
+ unsigned int page_off = ee->offset;
+
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ bnx2x_acquire_phy_lock(bp);
+ while (!rc && remaining_len > 0) {
+ xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ?
+ SFP_EEPROM_PAGE_SIZE : remaining_len;
+ rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
+ &bp->link_params,
+ page_off,
+ xfer_size,
+ user_data);
+ remaining_len -= xfer_size;
+ user_data += xfer_size;
+ page_off += xfer_size;
+ }
+
+ bnx2x_release_phy_lock(bp);
+ return rc;
+}
+
+static int bnx2x_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ int phy_idx;
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
+ return -EAGAIN;
+ }
+
+ phy_idx = bnx2x_get_cur_phy_idx(bp);
+ switch (bp->link_params.phy[phy_idx].media_type) {
+ case ETH_PHY_SFPP_10G_FIBER:
+ case ETH_PHY_SFP_1G_FIBER:
+ case ETH_PHY_DA_TWINAX:
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
u32 cmd_flags)
{
@@ -1531,18 +1600,146 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static const struct {
- char string[ETH_GSTRING_LEN];
-} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
- { "register_test (offline)" },
- { "memory_test (offline)" },
- { "loopback_test (offline)" },
- { "nvram_test (online)" },
- { "interrupt_test (online)" },
- { "link_test (online)" },
- { "idle check (online)" }
+static char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = {
+ "register_test (offline) ",
+ "memory_test (offline) ",
+ "int_loopback_test (offline)",
+ "ext_loopback_test (offline)",
+ "nvram_test (online) ",
+ "interrupt_test (online) ",
+ "link_test (online) "
};
+static u32 bnx2x_eee_to_adv(u32 eee_adv)
+{
+ u32 modes = 0;
+
+ if (eee_adv & SHMEM_EEE_100M_ADV)
+ modes |= ADVERTISED_100baseT_Full;
+ if (eee_adv & SHMEM_EEE_1G_ADV)
+ modes |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & SHMEM_EEE_10G_ADV)
+ modes |= ADVERTISED_10000baseT_Full;
+
+ return modes;
+}
+
+static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
+{
+ u32 eee_adv = 0;
+ if (modes & ADVERTISED_100baseT_Full)
+ eee_adv |= SHMEM_EEE_100M_ADV;
+ if (modes & ADVERTISED_1000baseT_Full)
+ eee_adv |= SHMEM_EEE_1G_ADV;
+ if (modes & ADVERTISED_10000baseT_Full)
+ eee_adv |= SHMEM_EEE_10G_ADV;
+
+ return eee_adv << shift;
+}
+
+static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ edata->supported =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
+ SHMEM_EEE_SUPPORTED_SHIFT);
+
+ edata->advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ edata->lp_advertised =
+ bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+
+ /* SHMEM value is in 16u units --> Convert to 1u units. */
+ edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
+
+ edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
+ edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
+ edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
+
+ return 0;
+}
+
+static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 eee_cfg;
+ u32 advertised;
+
+ if (IS_MF(bp))
+ return 0;
+
+ if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
+ DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
+ return -EOPNOTSUPP;
+ }
+
+ eee_cfg = SHMEM2_RD(bp, eee_status[BP_PORT(bp)]);
+
+ if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
+ DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
+ return -EOPNOTSUPP;
+ }
+
+ advertised = bnx2x_adv_to_eee(edata->advertised,
+ SHMEM_EEE_ADV_STATUS_SHIFT);
+ if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Direct manipulation of EEE advertisment is not supported\n");
+ return -EINVAL;
+ }
+
+ if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Maximal Tx Lpi timer supported is %x(u)\n",
+ EEE_MODE_TIMER_MASK);
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled &&
+ (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Minimal Tx Lpi timer supported is %d(u)\n",
+ EEE_MODE_NVRAM_AGGRESSIVE_TIME);
+ return -EINVAL;
+ }
+
+ /* All is well; Apply changes*/
+ if (edata->eee_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
+
+ if (edata->tx_lpi_enabled)
+ bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
+ else
+ bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
+
+ bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
+ bp->link_params.eee_mode |= (edata->tx_lpi_timer &
+ EEE_MODE_TIMER_MASK) |
+ EEE_MODE_OVERRIDE_NVRAM |
+ EEE_MODE_OUTPUT_TIME;
+
+ /* Restart link to propogate changes */
+ if (netif_running(dev)) {
+ bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_link_set(bp);
+ }
+
+ return 0;
+}
+
+
enum {
BNX2X_CHIP_E1_OFST = 0,
BNX2X_CHIP_E1H_OFST,
@@ -1811,6 +2008,14 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
+
+ cnt = 1400;
+ while (!bp->link_vars.link_up && cnt--)
+ msleep(20);
+
+ if (cnt <= 0 && !bp->link_vars.link_up)
+ DP(BNX2X_MSG_ETHTOOL,
+ "Timeout waiting for link init\n");
}
}
@@ -1821,7 +2026,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
unsigned char *packet;
struct bnx2x_fastpath *fp_rx = &bp->fp[0];
struct bnx2x_fastpath *fp_tx = &bp->fp[0];
- struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+ struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
u16 pkt_prod, bd_prod;
@@ -1836,13 +2041,16 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
u16 len;
int rc = -ENODEV;
u8 *data;
- struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
+ txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
case BNX2X_PHY_LOOPBACK:
- if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+ if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
+ DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
return -EINVAL;
+ }
break;
case BNX2X_MAC_LOOPBACK:
if (CHIP_IS_E3(bp)) {
@@ -1859,6 +2067,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
+ case BNX2X_EXT_LOOPBACK:
+ if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't configure external loopback\n");
+ return -EINVAL;
+ }
+ break;
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
@@ -2030,6 +2245,38 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
return rc;
}
+static int bnx2x_test_ext_loopback(struct bnx2x *bp)
+{
+ int rc;
+ u8 is_serdes =
+ (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+ if (BP_NOMCP(bp))
+ return -ENODEV;
+
+ if (!netif_running(bp->dev))
+ return BNX2X_EXT_LOOPBACK_FAILED;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
+ if (rc) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for external lb) failed\n");
+ return -ENODEV;
+ }
+ bnx2x_wait_for_link(bp, 1, is_serdes);
+
+ bnx2x_netif_stop(bp, 1);
+
+ rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
+ if (rc)
+ DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
+
+ bnx2x_netif_start(bp);
+
+ return rc;
+}
+
#define CRC32_RESIDUAL 0xdebb20e3
static int bnx2x_test_nvram(struct bnx2x *bp)
@@ -2112,7 +2359,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
return -ENODEV;
}
- params.q_obj = &bp->fp->q_obj;
+ params.q_obj = &bp->sp_objs->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
@@ -2125,24 +2372,31 @@ static void bnx2x_self_test(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
+ int rc;
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
+ DP(BNX2X_MSG_ETHTOOL,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
- memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+ memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test when interface is down\n");
return;
+ }
- /* offline tests are not supported in MF mode */
- if (IS_MF(bp))
- etest->flags &= ~ETH_TEST_FL_OFFLINE;
is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
- if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ /* offline tests are not supported in MF mode */
+ if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
int port = BP_PORT(bp);
u32 val;
u8 link_up;
@@ -2155,7 +2409,14 @@ static void bnx2x_self_test(struct net_device *dev,
link_up = bp->link_vars.link_up;
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- bnx2x_nic_load(bp, LOAD_DIAG);
+ rc = bnx2x_nic_load(bp, LOAD_DIAG);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for offline) failed\n");
+ return;
+ }
+
/* wait until link state is restored */
bnx2x_wait_for_link(bp, 1, is_serdes);
@@ -2168,30 +2429,51 @@ static void bnx2x_self_test(struct net_device *dev,
etest->flags |= ETH_TEST_FL_FAILED;
}
- buf[2] = bnx2x_test_loopback(bp);
+ buf[2] = bnx2x_test_loopback(bp); /* internal LB */
if (buf[2] != 0)
etest->flags |= ETH_TEST_FL_FAILED;
+ if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
+ if (buf[3] != 0)
+ etest->flags |= ETH_TEST_FL_FAILED;
+ etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
/* restore input for TX port IF */
REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
-
- bnx2x_nic_load(bp, LOAD_NORMAL);
+ rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ if (rc) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ DP(BNX2X_MSG_ETHTOOL,
+ "Can't perform self-test, nic_load (for online) failed\n");
+ return;
+ }
/* wait until link state is restored */
bnx2x_wait_for_link(bp, link_up, is_serdes);
}
if (bnx2x_test_nvram(bp) != 0) {
- buf[3] = 1;
+ if (!IS_MF(bp))
+ buf[4] = 1;
+ else
+ buf[0] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_test_intr(bp) != 0) {
- buf[4] = 1;
+ if (!IS_MF(bp))
+ buf[5] = 1;
+ else
+ buf[1] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
if (bnx2x_link_test(bp, is_serdes) != 0) {
- buf[5] = 1;
+ if (!IS_MF(bp))
+ buf[6] = 1;
+ else
+ buf[2] = 1;
etest->flags |= ETH_TEST_FL_FAILED;
}
@@ -2236,7 +2518,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
return num_stats;
case ETH_SS_TEST:
- return BNX2X_NUM_TESTS;
+ return BNX2X_NUM_TESTS(bp);
default:
return -EINVAL;
@@ -2246,7 +2528,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
struct bnx2x *bp = netdev_priv(dev);
- int i, j, k;
+ int i, j, k, offset, start;
char queue_name[MAX_QUEUE_NAME_LEN+1];
switch (stringset) {
@@ -2277,7 +2559,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
break;
case ETH_SS_TEST:
- memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+ /* First 4 tests cannot be done in MF mode */
+ if (!IS_MF(bp))
+ start = 0;
+ else
+ start = 4;
+ for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp));
+ i++, j++) {
+ offset = sprintf(buf+32*i, "%s",
+ bnx2x_tests_str_arr[j]);
+ *(buf+offset) = '\0';
+ }
break;
}
}
@@ -2291,7 +2583,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
if (is_multi(bp)) {
for_each_eth_queue(bp, i) {
- hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+ hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
if (bnx2x_q_stats_arr[j].size == 0) {
/* skip this counter */
@@ -2375,6 +2667,41 @@ static int bnx2x_set_phys_id(struct net_device *dev,
return 0;
}
+static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v4)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case UDP_V6_FLOW:
+ if (bp->rss_conf_obj.udp_rss_v6)
+ info->data = RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ else
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
@@ -2384,7 +2711,102 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
case ETHTOOL_GRXRINGS:
info->data = BNX2X_NUM_ETH_QUEUES(bp);
return 0;
+ case ETHTOOL_GRXFH:
+ return bnx2x_get_rss_flags(bp, info);
+ default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
+{
+ int udp_rss_requested;
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tupple hash or 4-tupple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ udp_rss_requested = 1;
+ else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
+ udp_rss_requested = 0;
+ else
+ return -EINVAL;
+ if ((info->flow_type == UDP_V4_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ } else if ((info->flow_type == UDP_V6_FLOW) &&
+ (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
+ bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
+ return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ DP(BNX2X_MSG_ETHTOOL,
+ "rss re-configured, UDP 4-tupple %s\n",
+ udp_rss_requested ? "enabled" : "disabled");
+ } else {
+ return 0;
+ }
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tupple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP(BNX2X_MSG_ETHTOOL,
+ "Command parameters not supported\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return bnx2x_set_rss_flags(bp, info);
default:
DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
@@ -2424,7 +2846,6 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
- u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
@@ -2436,10 +2857,88 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir[i] + bp->fp->cl_id;
+ bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, ind_table, false);
+ return bnx2x_config_rss_eth(bp, false);
+}
+
+/**
+ * bnx2x_get_channels - gets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: returns the number of max / current queues
+ */
+static void bnx2x_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
+ channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+/**
+ * bnx2x_change_num_queues - change the number of RSS queues.
+ *
+ * @bp: bnx2x private structure
+ *
+ * Re-configure interrupt mode to get the new number of MSI-X
+ * vectors and re-add NAPI objects.
+ */
+static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
+{
+ bnx2x_del_all_napi(bp);
+ bnx2x_disable_msi(bp);
+ BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
+ bnx2x_set_int_mode(bp);
+ bnx2x_add_all_napi(bp);
+}
+
+/**
+ * bnx2x_set_channels - sets the number of RSS queues.
+ *
+ * @dev: net device
+ * @channels: includes the number of queues requested
+ */
+static int bnx2x_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+
+ DP(BNX2X_MSG_ETHTOOL,
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count, channels->other_count,
+ channels->combined_count);
+
+ /* We don't support separate rx / tx channels.
+ * We don't allow setting 'other' channels.
+ */
+ if (channels->rx_count || channels->tx_count || channels->other_count
+ || (channels->combined_count == 0) ||
+ (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
+ DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
+ DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
+ return 0;
+ }
+
+ /* Set the requested number of queues in bp context.
+ * Note that the actual number of queues created during load may be
+ * less than requested if memory is low.
+ */
+ if (unlikely(!netif_running(dev))) {
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return 0;
+ }
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ bnx2x_change_num_queues(bp, channels->combined_count);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
@@ -2469,9 +2968,17 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_channels = bnx2x_get_channels,
+ .set_channels = bnx2x_set_channels,
+ .get_module_info = bnx2x_get_module_info,
+ .get_module_eeprom = bnx2x_get_module_eeprom,
+ .get_eee = bnx2x_get_eee,
+ .set_eee = bnx2x_set_eee,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void bnx2x_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 426f77aa721a..bbc66ced9c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -321,9 +321,7 @@
#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
-/**
- * This file defines HSI constants common to all microcode flows
- */
+/* This file defines HSI constants common to all microcode flows */
#define PROTOCOL_STATE_BIT_OFFSET 6
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index a440a8ba85f2..76b6e65790f8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -10,6 +10,7 @@
#define BNX2X_HSI_H
#include "bnx2x_fw_defs.h"
+#include "bnx2x_mfw_req.h"
#define FW_ENCODE_32BIT_PATTERN 0x1e1e1e1e
@@ -33,12 +34,6 @@ struct license_key {
u32 reserved_b[4];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
-#define NVM_PATH_MAX 2
-
/****************************************************************************
* Shared HW configuration *
****************************************************************************/
@@ -1067,8 +1062,18 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
uses the same defines as link_config */
u32 mfw_wol_link_cfg2; /* 0x480 */
- u32 Reserved2[17]; /* 0x484 */
+ /* EEE power saving mode */
+ u32 eee_power_mode; /* 0x484 */
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_MASK 0x000000FF
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT 0
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED 0x00000000
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED 0x00000001
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE 0x00000002
+ #define PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY 0x00000003
+
+
+ u32 Reserved2[16]; /* 0x488 */
};
@@ -1140,6 +1145,7 @@ struct drv_port_mb {
u32 link_status;
/* Driver should update this field on any link change event */
+ #define LINK_STATUS_NONE (0<<0)
#define LINK_STATUS_LINK_FLAG_MASK 0x00000001
#define LINK_STATUS_LINK_UP 0x00000001
#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E
@@ -1197,6 +1203,7 @@ struct drv_port_mb {
#define LINK_STATUS_PFC_ENABLED 0x20000000
#define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000
+ #define LINK_STATUS_SFP_TX_FAULT 0x80000000
u32 port_stx;
@@ -1240,9 +1247,11 @@ struct drv_func_mb {
#define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
#define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
+ #define REQ_BC_VER_4_FCOE_FEATURES 0x00070209
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
+ #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
@@ -1255,6 +1264,8 @@ struct drv_func_mb {
#define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
#define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
+ #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1320,6 +1331,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
#define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
+ #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1383,6 +1396,8 @@ struct drv_func_mb {
#define DRV_STATUS_DRV_INFO_REQ 0x04000000
+ #define DRV_STATUS_EEE_NEGOTIATION_RESULTS 0x08000000
+
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
#define VIRT_MAC_SIGNATURE 0x564d0000
@@ -1613,6 +1628,11 @@ struct fw_flr_mb {
struct fw_flr_ack ack;
};
+struct eee_remote_vals {
+ u32 tx_tw;
+ u32 rx_tw;
+};
+
/**** SUPPORT FOR SHMEM ARRRAYS ***
* The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
* define arrays with storage types smaller then unsigned dwords.
@@ -2053,6 +2073,41 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
u32 ibft_host_addr; /* initialized by option ROM */
+ struct eee_remote_vals eee_remote_vals[PORT_MAX];
+ u32 reserved[E2_FUNC_MAX];
+
+
+ /* the status of EEE auto-negotiation
+ * bits 15:0 the configured tx-lpi entry timer value. Depends on bit 31.
+ * bits 19:16 the supported modes for EEE.
+ * bits 23:20 the speeds advertised for EEE.
+ * bits 27:24 the speeds the Link partner advertised for EEE.
+ * The supported/adv. modes in bits 27:19 originate from the
+ * SHMEM_EEE_XXX_ADV definitions (where XXX is replaced by speed).
+ * bit 28 when 1'b1 EEE was requested.
+ * bit 29 when 1'b1 tx lpi was requested.
+ * bit 30 when 1'b1 EEE was negotiated. Tx lpi will be asserted iff
+ * 30:29 are 2'b11.
+ * bit 31 when 1'b0 bits 15:0 contain a PORT_FEAT_CFG_EEE_ define as
+ * value. When 1'b1 those bits contains a value times 16 microseconds.
+ */
+ u32 eee_status[PORT_MAX];
+ #define SHMEM_EEE_TIMER_MASK 0x0000ffff
+ #define SHMEM_EEE_SUPPORTED_MASK 0x000f0000
+ #define SHMEM_EEE_SUPPORTED_SHIFT 16
+ #define SHMEM_EEE_ADV_STATUS_MASK 0x00f00000
+ #define SHMEM_EEE_100M_ADV (1<<0)
+ #define SHMEM_EEE_1G_ADV (1<<1)
+ #define SHMEM_EEE_10G_ADV (1<<2)
+ #define SHMEM_EEE_ADV_STATUS_SHIFT 20
+ #define SHMEM_EEE_LP_ADV_STATUS_MASK 0x0f000000
+ #define SHMEM_EEE_LP_ADV_STATUS_SHIFT 24
+ #define SHMEM_EEE_REQUESTED_BIT 0x10000000
+ #define SHMEM_EEE_LPI_REQUESTED_BIT 0x20000000
+ #define SHMEM_EEE_ACTIVE_BIT 0x40000000
+ #define SHMEM_EEE_TIME_OUTPUT_BIT 0x80000000
+
+ u32 sizeof_port_stats;
};
@@ -2599,6 +2654,9 @@ struct host_port_stats {
u32 pfc_frames_tx_lo;
u32 pfc_frames_rx_hi;
u32 pfc_frames_rx_lo;
+
+ u32 eee_lpi_count_hi;
+ u32 eee_lpi_count_lo;
};
@@ -2638,118 +2696,6 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
-/* current drv_info version */
-#define DRV_INFO_CUR_VER 1
-
-/* drv_info op codes supported */
-enum drv_info_opcode {
- ETH_STATS_OPCODE,
- FCOE_STATS_OPCODE,
- ISCSI_STATS_OPCODE
-};
-
-#define ETH_STAT_INFO_VERSION_LEN 12
-/* Per PCI Function Ethernet Statistics required from the driver */
-struct eth_stats_info {
- /* Function's Driver Version. padded to 12 */
- u8 version[ETH_STAT_INFO_VERSION_LEN];
- /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
- u8 mac_local[8];
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
- u32 feature_flags; /* Feature_Flags. */
-#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
-#define FEATURE_ETH_LSO_MASK 0x02
-#define FEATURE_ETH_BOOTMODE_MASK 0x1C
-#define FEATURE_ETH_BOOTMODE_SHIFT 2
-#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
-#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
-#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
-#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
-#define FEATURE_ETH_TOE_MASK 0x20
- u32 lso_max_size; /* LSO MaxOffloadSize. */
- u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
- /* Num Offloaded Connections TCP_IPv4. */
- u32 ipv4_ofld_cnt;
- /* Num Offloaded Connections TCP_IPv6. */
- u32 ipv6_ofld_cnt;
- u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
- u32 txq_size; /* TX Descriptors Queue Size */
- u32 rxq_size; /* RX Descriptors Queue Size */
- /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 txq_avg_depth;
- /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
- u32 rxq_avg_depth;
- /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
- u32 iov_offload;
- /* Number of NetQueue/VMQ Config'd. */
- u32 netq_cnt;
- u32 vf_cnt; /* Num VF assigned to this PF. */
-};
-
-/* Per PCI Function FCOE Statistics required from the driver */
-struct fcoe_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u32 txq_size; /* FCoE TX Descriptors Queue Size. */
- u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
- /* FCoE TX Descriptor Queue Avg Depth. */
- u32 txq_avg_depth;
- /* FCoE RX Descriptors Queue Avg Depth. */
- u32 rxq_avg_depth;
- u32 rx_frames_lo; /* FCoE RX Frames received. */
- u32 rx_frames_hi; /* FCoE RX Frames received. */
- u32 rx_bytes_lo; /* FCoE RX Bytes received. */
- u32 rx_bytes_hi; /* FCoE RX Bytes received. */
- u32 tx_frames_lo; /* FCoE TX Frames sent. */
- u32 tx_frames_hi; /* FCoE TX Frames sent. */
- u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
- u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
-};
-
-/* Per PCI Function iSCSI Statistics required from the driver*/
-struct iscsi_stats_info {
- u8 version[12]; /* Function's Driver Version. */
- u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
- u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
- /* QoS Priority (per 802.1p). 0-7255 */
- u32 qos_priority;
- u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
- u8 ww_port_name[64]; /* iSCSI World wide port name */
- u8 boot_target_name[64];/* iSCSI Boot Target Name. */
- u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
- u32 boot_target_portal; /* iSCSI Boot Target Portal. */
- u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
- u32 max_frame_size; /* Max Frame Size. bytes */
- u32 txq_size; /* PDU TX Descriptors Queue Size. */
- u32 rxq_size; /* PDU RX Descriptors Queue Size. */
- u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
- u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
- u32 rx_pdus_lo; /* iSCSI PDUs received. */
- u32 rx_pdus_hi; /* iSCSI PDUs received. */
- u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
- u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
- u32 tx_pdus_lo; /* iSCSI PDUs sent. */
- u32 tx_pdus_hi; /* iSCSI PDUs sent. */
- u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
- u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
- u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
- * 9 nibbles, the position of each nibble
- * represents the C-PCP value, the value
- * of the nibble = S-PCP value.
- */
-};
-
-union drv_info_to_mcp {
- struct eth_stats_info ether_stat;
- struct fcoe_stats_info fcoe_stat;
- struct iscsi_stats_info iscsi_stat;
-};
/* stats collected for afex.
* NOTE: structure is exactly as expected to be received by the switch.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 6e7d5c0843b4..f4beb46c4709 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -285,7 +285,6 @@
#define ETS_E3B0_PBF_MIN_W_VAL (10000)
#define MAX_PACKET_SIZE (9700)
-#define WC_UC_TIMEOUT 100
#define MAX_KR_LINK_RETRY 4
/**********************************************************/
@@ -1306,6 +1305,94 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
+
+/******************************************************************/
+/* EEE section */
+/******************************************************************/
+static u8 bnx2x_eee_has_cap(struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (REG_RD(bp, params->shmem2_base) <=
+ offsetof(struct shmem2_region, eee_status[params->port]))
+ return 0;
+
+ return 1;
+}
+
+static int bnx2x_eee_nvram_to_time(u32 nvram_mode, u32 *idle_timer)
+{
+ switch (nvram_mode) {
+ case PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED:
+ *idle_timer = EEE_MODE_NVRAM_BALANCED_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE:
+ *idle_timer = EEE_MODE_NVRAM_AGGRESSIVE_TIME;
+ break;
+ case PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY:
+ *idle_timer = EEE_MODE_NVRAM_LATENCY_TIME;
+ break;
+ default:
+ *idle_timer = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int bnx2x_eee_time_to_nvram(u32 idle_timer, u32 *nvram_mode)
+{
+ switch (idle_timer) {
+ case EEE_MODE_NVRAM_BALANCED_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_BALANCED;
+ break;
+ case EEE_MODE_NVRAM_AGGRESSIVE_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_AGGRESSIVE;
+ break;
+ case EEE_MODE_NVRAM_LATENCY_TIME:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_LOW_LATENCY;
+ break;
+ default:
+ *nvram_mode = PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static u32 bnx2x_eee_calc_timer(struct link_params *params)
+{
+ u32 eee_mode, eee_idle;
+ struct bnx2x *bp = params->bp;
+
+ if (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) {
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* time value in eee_mode --> used directly*/
+ eee_idle = params->eee_mode & EEE_MODE_TIMER_MASK;
+ } else {
+ /* hsi value in eee_mode --> time */
+ if (bnx2x_eee_nvram_to_time(params->eee_mode &
+ EEE_MODE_NVRAM_MASK,
+ &eee_idle))
+ return 0;
+ }
+ } else {
+ /* hsi values in nvram --> time*/
+ eee_mode = ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_feature_config[params->port].
+ eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+
+ if (bnx2x_eee_nvram_to_time(eee_mode, &eee_idle))
+ return 0;
+ }
+
+ return eee_idle;
+}
+
+
/******************************************************************/
/* PFC section */
/******************************************************************/
@@ -1540,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params,
/* Reset UMAC */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
@@ -1631,7 +1718,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
* ports of the path
*/
- if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
+ if ((CHIP_NUM(bp) == CHIP_NUM_57840_4_10) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
MISC_REGISTERS_RESET_REG_2_XMAC)) {
DP(NETIF_MSG_LINK,
@@ -1642,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Hard reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC);
@@ -1672,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
/* Soft reset */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
@@ -1730,6 +1817,14 @@ static int bnx2x_xmac_enable(struct link_params *params,
/* update PFC */
bnx2x_update_pfc_xmac(params, vars, 0);
+ if (vars->eee_status & SHMEM_EEE_ADV_STATUS_MASK) {
+ DP(NETIF_MSG_LINK, "Setting XMAC for EEE\n");
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_TIMERS_HI, 0x1380008);
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x1);
+ } else {
+ REG_WR(bp, xmac_base + XMAC_REG_EEE_CTRL, 0x0);
+ }
+
/* Enable TX and RX */
val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
@@ -1785,11 +1880,6 @@ static int bnx2x_emac_enable(struct link_params *params,
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_RESET);
- if (CHIP_REV_IS_SLOW(bp)) {
- /* config GMII mode */
- val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
- EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
- } else { /* ASIC */
/* pause enable/disable */
bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
EMAC_RX_MODE_FLOW_EN);
@@ -1812,7 +1902,6 @@ static int bnx2x_emac_enable(struct link_params *params,
} else
bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
EMAC_TX_MODE_FLOW_EN);
- }
/* KEEP_VLAN_TAG, promiscuous */
val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
@@ -1851,23 +1940,23 @@ static int bnx2x_emac_enable(struct link_params *params,
val &= ~0x810;
EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
- /* enable emac */
+ /* Enable emac */
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
- /* enable emac for jumbo packets */
+ /* Enable emac for jumbo packets */
EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
(EMAC_RX_MTU_SIZE_JUMBO_ENA |
(ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
- /* strip CRC */
+ /* Strip CRC */
REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
- /* disable the NIG in/out to the bmac */
+ /* Disable the NIG in/out to the bmac */
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
- /* enable the NIG in/out to the emac */
+ /* Enable the NIG in/out to the emac */
REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
val = 0;
if ((params->feature_config_flags &
@@ -1902,7 +1991,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
- /* tx control */
+ /* TX control */
val = 0xc0;
if (!(params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED) &&
@@ -1962,7 +2051,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
wb_data[0] &= ~(1<<2);
} else {
DP(NETIF_MSG_LINK, "PFC is disabled\n");
- /* disable PFC RX & TX & STATS and set 8 COS */
+ /* Disable PFC RX & TX & STATS and set 8 COS */
wb_data[0] = 0x8;
wb_data[1] = 0;
}
@@ -2056,7 +2145,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2084,7 +2173,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2114,7 +2203,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2132,7 +2221,7 @@ static int bnx2x_pfc_brb_get_config_params(
PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
+ /* Non pause able*/
config_val->non_pauseable_th.pause_xoff =
PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
@@ -2189,7 +2278,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params(
if (pfc_params->cos0_pauseable !=
pfc_params->cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
+ /* Nonpauseable= Lossy + pauseable = Lossless*/
e3b0_val->lb_guarantied =
PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
@@ -2388,9 +2477,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
* This function is needed because NIG ARB_CREDIT_WEIGHT_X are
* not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
******************************************************************************/
-int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
- u8 cos_entry,
- u32 priority_mask, u8 port)
+static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+ u8 cos_entry,
+ u32 priority_mask, u8 port)
{
u32 nig_reg_rx_priority_mask_add = 0;
@@ -2440,6 +2529,16 @@ static void bnx2x_update_mng(struct link_params *params, u32 link_status)
port_mb[params->port].link_status), link_status);
}
+static void bnx2x_update_mng_eee(struct link_params *params, u32 eee_status)
+{
+ struct bnx2x *bp = params->bp;
+
+ if (bnx2x_eee_has_cap(params))
+ REG_WR(bp, params->shmem2_base +
+ offsetof(struct shmem2_region,
+ eee_status[params->port]), eee_status);
+}
+
static void bnx2x_update_pfc_nig(struct link_params *params,
struct link_vars *vars,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
@@ -2507,7 +2606,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
- /* output enable for RX_XCM # IF */
+ /* Output enable for RX_XCM # IF */
REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
NIG_REG_XCM0_OUT_EN, xcm_out_en);
@@ -2556,10 +2655,10 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_mng(params, vars->link_status);
- /* update NIG params */
+ /* Update NIG params */
bnx2x_update_pfc_nig(params, vars, pfc_params);
- /* update BRB params */
+ /* Update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
if (bnx2x_status)
return bnx2x_status;
@@ -2614,7 +2713,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
wb_data, 2);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2623,7 +2722,7 @@ static int bnx2x_bmac1_enable(struct link_params *params,
params->mac_addr[1]);
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
- /* mac control */
+ /* MAC control */
val = 0x3;
if (is_lb) {
val |= 0x4;
@@ -2633,24 +2732,24 @@ static int bnx2x_bmac1_enable(struct link_params *params,
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
- /* set rx mtu */
+ /* Set rx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
bnx2x_update_pfc_bmac1(params, vars);
- /* set tx mtu */
+ /* Set tx mtu */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
- /* configure safc */
+ /* Configure SAFC */
wb_data[0] = 0x1000200;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
@@ -2684,7 +2783,7 @@ static int bnx2x_bmac2_enable(struct link_params *params,
udelay(30);
- /* tx MAC SA */
+ /* TX MAC SA */
wb_data[0] = ((params->mac_addr[2] << 24) |
(params->mac_addr[3] << 16) |
(params->mac_addr[4] << 8) |
@@ -2703,18 +2802,18 @@ static int bnx2x_bmac2_enable(struct link_params *params,
wb_data, 2);
udelay(30);
- /* set rx mtu */
+ /* Set RX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set tx mtu */
+ /* Set TX MTU */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
udelay(30);
- /* set cnt max size */
+ /* Set cnt max size */
wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
wb_data[1] = 0;
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
@@ -2732,15 +2831,15 @@ static int bnx2x_bmac_enable(struct link_params *params,
u8 port = params->port;
struct bnx2x *bp = params->bp;
u32 val;
- /* reset and unreset the BigMac */
+ /* Reset and unreset the BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- msleep(1);
+ usleep_range(1000, 2000);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
- /* enable access for bmac registers */
+ /* Enable access for bmac registers */
REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
/* Enable BMAC according to BMAC type*/
@@ -2798,7 +2897,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
BIGMAC_REGISTER_BMAC_CONTROL,
wb_data, 2);
}
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -2810,17 +2909,16 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
u32 init_crd, crd;
u32 count = 1000;
- /* disable port */
+ /* Disable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
- /* wait for init credit */
+ /* Wait for init credit */
init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
while ((init_crd != crd) && count) {
- msleep(5);
-
+ usleep_range(5000, 10000);
crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
count--;
}
@@ -2837,18 +2935,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
line_speed == SPEED_1000 ||
line_speed == SPEED_2500) {
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
- /* update init credit */
+ /* Update init credit */
init_crd = 778; /* (800-18-4) */
} else {
u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
ETH_OVREHEAD)/16;
REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
- /* update threshold */
+ /* Update threshold */
REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
- /* update init credit */
+ /* Update init credit */
switch (line_speed) {
case SPEED_10000:
init_crd = thresh + 553 - 22;
@@ -2863,12 +2961,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
line_speed, init_crd);
- /* probe the credit changes */
+ /* Probe the credit changes */
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
- msleep(5);
+ usleep_range(5000, 10000);
REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
- /* enable port */
+ /* Enable port */
REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
return 0;
}
@@ -2935,7 +3033,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (reg << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -2971,7 +3069,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp,
REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
mode & ~EMAC_MDIO_MODE_CLAUSE_45);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (reg << 16) |
EMAC_MDIO_COMM_COMMAND_READ_22 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3009,7 +3107,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
val = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3030,7 +3128,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
*ret_val = 0;
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
val = ((phy->addr << 21) | (devad << 16) |
EMAC_MDIO_COMM_COMMAND_READ_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3078,7 +3176,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
EMAC_MDIO_STATUS_10MB);
- /* address */
+ /* Address */
tmp = ((phy->addr << 21) | (devad << 16) | reg |
EMAC_MDIO_COMM_COMMAND_ADDRESS |
EMAC_MDIO_COMM_START_BUSY);
@@ -3098,7 +3196,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
} else {
- /* data */
+ /* Data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
EMAC_MDIO_COMM_COMMAND_WRITE_45 |
EMAC_MDIO_COMM_START_BUSY);
@@ -3188,23 +3286,23 @@ static int bnx2x_bsc_read(struct link_params *params,
xfer_cnt = 16 - lc_addr;
- /* enable the engine */
+ /* Enable the engine */
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
val |= MCPR_IMC_COMMAND_ENABLE;
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* program slave device ID */
+ /* Program slave device ID */
val = (sl_devid << 16) | sl_addr;
REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
- /* start xfer with 0 byte to update the address pointer ???*/
+ /* Start xfer with 0 byte to update the address pointer ???*/
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_WRITE_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3220,7 +3318,7 @@ static int bnx2x_bsc_read(struct link_params *params,
if (rc == -EFAULT)
return rc;
- /* start xfer with read op */
+ /* Start xfer with read op */
val = (MCPR_IMC_COMMAND_ENABLE) |
(MCPR_IMC_COMMAND_READ_OP <<
MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
@@ -3228,7 +3326,7 @@ static int bnx2x_bsc_read(struct link_params *params,
(xfer_cnt);
REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
- /* poll for completion */
+ /* Poll for completion */
i = 0;
val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
@@ -3331,7 +3429,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
port = port ^ 1;
lane = (port<<1) + path;
- } else { /* two port mode - no port swap */
+ } else { /* Two port mode - no port swap */
/* Figure out path swap value */
path_swap_ovr =
@@ -3409,7 +3507,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
val = SERDES_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3430,7 +3528,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params)
val = XGXS_RESET_BITS << (port*16);
- /* reset and unreset the SerDes/XGXS */
+ /* Reset and unreset the SerDes/XGXS */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
udelay(500);
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
@@ -3522,7 +3620,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params,
{
u16 val;
struct bnx2x *bp = params->bp;
- /* read modify write pause advertizing */
+ /* Read modify write pause advertizing */
bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
@@ -3657,44 +3755,35 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 val16 = 0, lane, bam37 = 0;
- struct bnx2x *bp = params->bp;
+ u16 val16 = 0, lane, i;
+ struct bnx2x *bp = params->bp;
+ static struct bnx2x_reg_set reg_set[] = {
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555},
+ {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190},
+ /* Disable Autoneg: re-enable it after adv is done. */
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0}
+ };
DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
/* Set to default registers that may be overriden by 10G force */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
- MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, 0x7415);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190);
- /* Disable Autoneg: re-enable it after adv is done. */
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0);
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Check adding advertisement for 1G KX */
if (((vars->line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
(vars->line_speed == SPEED_1000)) {
- u16 sd_digital;
+ u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
val16 |= (1<<5);
/* Enable CL37 1G Parallel Detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
- (sd_digital | 0x1));
-
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1);
DP(NETIF_MSG_LINK, "Advertize 1G\n");
}
if (((vars->line_speed == SPEED_AUTO_NEG) &&
@@ -3704,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
val16 |= (1<<7);
/* Enable 10G Parallel Detect */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+ MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
DP(NETIF_MSG_LINK, "Advertize 10G\n");
}
@@ -3738,10 +3827,9 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
offsetof(struct shmem_region, dev_info.
port_hw_config[params->port].default_cfg)) &
PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL,
+ 1);
DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
}
@@ -3755,11 +3843,8 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
}
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, &val16);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC7, 0x100);
/* Over 1G - AN local device user page 1 */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3776,50 +3861,35 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 val;
-
- /* Disable Autoneg */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
-
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL3_UP1, 0x1);
-
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
-
- /* Disable CL36 PCS Tx */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
-
- /* Double Wide Single Data Rate @ pll rate */
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
-
- /* Leave cl72 training enable, needed for KR */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+ u16 i;
+ static struct bnx2x_reg_set reg_set[] = {
+ /* Disable Autoneg */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3f00},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0},
+ {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa},
+ /* Disable CL36 PCS Tx */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0},
+ /* Double Wide Single Data Rate @ pll rate */
+ {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF},
+ /* Leave cl72 training enable, needed for KR */
+ {MDIO_PMA_DEVAD,
MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
- 0x2);
+ 0x2}
+ };
+
+ for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg,
+ reg_set[i].val);
/* Leave CL72 enabled */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
- val | 0x3800);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+ 0x3800);
/* Set speed via PMA/PMD register */
bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
@@ -3840,7 +3910,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, 0xF9);
- /* set and clear loopback to cause a reset to 64/66 decoder */
+ /* Set and clear loopback to cause a reset to 64/66 decoder */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -3855,16 +3925,12 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 misc1_val, tap_val, tx_driver_val, lane, val;
/* Hold rxSeqStart */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
/* Hold tx_fifo_reset */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1);
/* Disable CL73 AN */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
@@ -3876,10 +3942,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
/* Disable 100FX Idle detect */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_FX100_CTRL3, 0x0080);
/* Set Block address to Remote PHY & Clear forced_speed[5] */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3940,16 +4004,20 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
tx_driver_val);
/* Enable fiber mode, enable and invert sig_det */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd);
/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, &val);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC3, 0x8080);
+
+ /* Enable LPI pass through */
+ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n");
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+ MDIO_WC_REG_EEE_COMBO_CONTROL0,
+ 0x7c);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL4_MISC5, 0xc000);
/* 10G XFI Full Duplex */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
@@ -4139,40 +4207,35 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
u16 lane)
{
struct bnx2x *bp = params->bp;
- u16 val16;
-
+ u16 i;
+ static struct bnx2x_reg_set wc_regs[] = {
+ {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+ 0x0195},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+ 0x0007},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3,
+ 0x0002},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040},
+ {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140}
+ };
/* Set XFI clock comp as default. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX66_CONTROL, (3<<13));
+
+ for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++)
+ bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg,
+ wc_regs[i].val);
- bnx2x_warpcore_reset_lane(bp, phy, 1);
- bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL1, 0x014a);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_FX100_CTRL3, 0x0800);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_TX_FIR_TAP, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
- bnx2x_warpcore_reset_lane(bp, phy, 0);
+
}
static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
@@ -4260,7 +4323,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
if (!vars->turn_to_run_wc_rt)
return;
- /* return if there is no link partner */
+ /* Return if there is no link partner */
if (!(bnx2x_warpcore_get_sigdet(phy, params))) {
DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n");
return;
@@ -4294,7 +4357,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
bnx2x_warpcore_reset_lane(bp, phy, 1);
bnx2x_warpcore_reset_lane(bp, phy, 0);
- /* restart Autoneg */
+ /* Restart Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
@@ -4311,6 +4374,23 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
} /*params->rx_tx_asic_rst*/
}
+static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ u16 lane = bnx2x_get_warpcore_lane(phy, params);
+ struct bnx2x *bp = params->bp;
+ bnx2x_warpcore_clear_regs(phy, params, lane);
+ if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] ==
+ SPEED_10000) &&
+ (phy->media_type != ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+ bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+ } else {
+ DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0);
+ }
+}
+
static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4371,19 +4451,11 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
break;
case PORT_HW_CFG_NET_SERDES_IF_SFI:
-
- bnx2x_warpcore_clear_regs(phy, params, lane);
- if (vars->line_speed == SPEED_10000) {
- DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
- bnx2x_warpcore_set_10G_XFI(phy, params, 0);
- } else if (vars->line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(
- phy, params, 1, 0);
- }
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
bnx2x_sfp_module_detection(phy, params);
+
+ bnx2x_warpcore_config_sfi(phy, params);
break;
case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
@@ -4500,12 +4572,9 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, 0);
/* Enable 1G MDIO (1-copy) */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
- val16 | 0x10);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+ 0x10);
/* Set 1G loopback based on lane (1-copy) */
lane = bnx2x_get_warpcore_lane(phy, params);
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4518,22 +4587,19 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
bnx2x_set_aer_mmd(params, phy);
} else {
/* 10G & 20G */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+ 0x4000);
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+ bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1);
}
}
-void bnx2x_sync_link(struct link_params *params,
- struct link_vars *vars)
+
+static void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
@@ -4606,7 +4672,7 @@ void bnx2x_sync_link(struct link_params *params,
USES_WARPCORE(bp) &&
(vars->line_speed == SPEED_1000))
vars->phy_flags |= PHY_SGMII_FLAG;
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
if (link_10g_plus) {
@@ -4620,7 +4686,7 @@ void bnx2x_sync_link(struct link_params *params,
else
vars->mac_type = MAC_TYPE_EMAC;
}
- } else { /* link down */
+ } else { /* Link down */
DP(NETIF_MSG_LINK, "phy link down\n");
vars->phy_link_up = 0;
@@ -4629,10 +4695,12 @@ void bnx2x_sync_link(struct link_params *params,
vars->duplex = DUPLEX_FULL;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ if (vars->link_status & LINK_STATUS_SFP_TX_FAULT)
+ vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG;
}
}
@@ -4698,7 +4766,7 @@ static void bnx2x_set_master_ln(struct link_params *params,
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
- /* set the master_ln for AN */
+ /* Set the master_ln for AN */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_XGXS_BLOCK2,
MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
@@ -4721,7 +4789,7 @@ static int bnx2x_reset_unicore(struct link_params *params,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
- /* reset the unicore */
+ /* Reset the unicore */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4730,11 +4798,11 @@ static int bnx2x_reset_unicore(struct link_params *params,
if (set_serdes)
bnx2x_set_serdes_access(bp, params->port);
- /* wait for the reset to self clear */
+ /* Wait for the reset to self clear */
for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
udelay(5);
- /* the reset erased the previous bank value */
+ /* The reset erased the previous bank value */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL,
@@ -4952,7 +5020,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
}
-/* program SerDes, forced speed */
+/* Program SerDes, forced speed */
static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -4960,7 +5028,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 reg_val;
- /* program duplex, disable autoneg and sgmii*/
+ /* Program duplex, disable autoneg and sgmii*/
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
@@ -4979,7 +5047,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_MISC1, &reg_val);
- /* clearing the speed value before setting the right speed */
+ /* Clearing the speed value before setting the right speed */
DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
@@ -5008,7 +5076,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
- /* set extended capabilities */
+ /* Set extended capabilities */
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
val |= MDIO_OVER_1G_UP1_2_5G;
if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
@@ -5028,7 +5096,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 val;
- /* for AN, we are always publishing full duplex */
+ /* For AN, we are always publishing full duplex */
CL22_WR_OVER_CL45(bp, phy,
MDIO_REG_BANK_COMBO_IEEE0,
@@ -5090,14 +5158,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 control1;
- /* in SGMII mode, the unicore is always slave */
+ /* In SGMII mode, the unicore is always slave */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_SERDES_DIGITAL,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
&control1);
control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
- /* set sgmii mode (and not fiber) */
+ /* Set sgmii mode (and not fiber) */
control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
@@ -5106,9 +5174,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
control1);
- /* if forced speed */
+ /* If forced speed */
if (!(vars->line_speed == SPEED_AUTO_NEG)) {
- /* set speed, disable autoneg */
+ /* Set speed, disable autoneg */
u16 mii_control;
CL22_RD_OVER_CL45(bp, phy,
@@ -5129,16 +5197,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
break;
case SPEED_10:
- /* there is nothing to set for 10M */
+ /* There is nothing to set for 10M */
break;
default:
- /* invalid speed for SGMII */
+ /* Invalid speed for SGMII */
DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
vars->line_speed);
break;
}
- /* setting the full duplex */
+ /* Setting the full duplex */
if (phy->req_duplex == DUPLEX_FULL)
mii_control |=
MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
@@ -5148,7 +5216,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
mii_control);
} else { /* AN mode */
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, 0);
}
}
@@ -5244,7 +5312,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
- /* resolve from gp_status in case of AN complete and not sgmii */
+ /* Resolve from gp_status in case of AN complete and not sgmii */
if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
/* Update the advertised flow-controled of LD/LP in AN */
if (phy->req_line_speed == SPEED_AUTO_NEG)
@@ -5468,7 +5536,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
bnx2x_xgxs_an_resolve(phy, params, vars,
gp_status);
}
- } else { /* link_down */
+ } else { /* Link_down */
if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
SINGLE_MEDIA_DIRECT(params)) {
/* Check signal is detected */
@@ -5617,12 +5685,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
u16 tx_driver;
u16 bank;
- /* read precomp */
+ /* Read precomp */
CL22_RD_OVER_CL45(bp, phy,
MDIO_REG_BANK_OVER_1G,
MDIO_OVER_1G_LP_UP2, &lp_up2);
- /* bits [10:7] at lp_up2, positioned at [15:12] */
+ /* Bits [10:7] at lp_up2, positioned at [15:12] */
lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
@@ -5636,7 +5704,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params)
bank,
MDIO_TX0_TX_DRIVER, &tx_driver);
- /* replace tx_driver bits [15:12] */
+ /* Replace tx_driver bits [15:12] */
if (lp_up2 !=
(tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
@@ -5732,16 +5800,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
bnx2x_set_preemphasis(phy, params);
- /* forced speed requested? */
+ /* Forced speed requested? */
if (vars->line_speed != SPEED_AUTO_NEG ||
(SINGLE_MEDIA_DIRECT(params) &&
params->loopback_mode == LOOPBACK_EXT)) {
DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
- /* disable autoneg */
+ /* Disable autoneg */
bnx2x_set_autoneg(phy, params, vars, 0);
- /* program speed and duplex */
+ /* Program speed and duplex */
bnx2x_program_serdes(phy, params, vars);
} else { /* AN_mode */
@@ -5750,14 +5818,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
/* AN enabled */
bnx2x_set_brcm_cl37_advertisement(phy, params);
- /* program duplex & pause advertisement (for aneg) */
+ /* Program duplex & pause advertisement (for aneg) */
bnx2x_set_ieee_aneg_advertisement(phy, params,
vars->ieee_fc);
- /* enable autoneg */
+ /* Enable autoneg */
bnx2x_set_autoneg(phy, params, vars, enable_cl73);
- /* enable and restart AN */
+ /* Enable and restart AN */
bnx2x_restart_autoneg(phy, params, enable_cl73);
}
@@ -5793,12 +5861,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
bnx2x_set_master_ln(params, phy);
rc = bnx2x_reset_unicore(params, phy, 0);
- /* reset the SerDes and wait for reset bit return low */
- if (rc != 0)
+ /* Reset the SerDes and wait for reset bit return low */
+ if (rc)
return rc;
bnx2x_set_aer_mmd(params, phy);
- /* setting the masterLn_def again after the reset */
+ /* Setting the masterLn_def again after the reset */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
bnx2x_set_master_ln(params, phy);
bnx2x_set_swap_lanes(params, phy);
@@ -5823,7 +5891,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &ctrl);
if (!(ctrl & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt == 1000)
@@ -6054,7 +6122,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
if (!CHIP_IS_E3(bp)) {
- /* change the uni_phy_addr in the nig */
+ /* Change the uni_phy_addr in the nig */
md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
port*0x18));
@@ -6074,11 +6142,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
(MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
0x6041);
msleep(200);
- /* set aer mmd back */
+ /* Set aer mmd back */
bnx2x_set_aer_mmd(params, phy);
if (!CHIP_IS_E3(bp)) {
- /* and md_devad */
+ /* And md_devad */
REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
md_devad);
}
@@ -6275,7 +6343,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
MDIO_REG_BANK_GP_STATUS,
MDIO_GP_STATUS_TOP_AN_STATUS1,
&gp_status);
- /* link is up only if both local phy and external phy are up */
+ /* Link is up only if both local phy and external phy are up */
if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
return -ESRCH;
}
@@ -6296,7 +6364,9 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
for (phy_index = EXT_PHY1; phy_index < params->num_phys;
phy_index++) {
serdes_phy_type = ((params->phy[phy_index].media_type ==
- ETH_PHY_SFP_FIBER) ||
+ ETH_PHY_SFPP_10G_FIBER) ||
+ (params->phy[phy_index].media_type ==
+ ETH_PHY_SFP_1G_FIBER) ||
(params->phy[phy_index].media_type ==
ETH_PHY_XFP_FIBER) ||
(params->phy[phy_index].media_type ==
@@ -6397,7 +6467,7 @@ static int bnx2x_link_initialize(struct link_params *params,
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
- /* reset the SerDes/XGXS */
+ /* Reset the SerDes/XGXS */
REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
(0x1ff << (params->port*16)));
}
@@ -6430,10 +6500,10 @@ static int bnx2x_update_link_down(struct link_params *params,
DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
- /* indicate no mac active */
+ /* Indicate no mac active */
vars->mac_type = MAC_TYPE_NONE;
- /* update shared memory */
+ /* Update shared memory */
vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
LINK_STATUS_LINK_UP |
LINK_STATUS_PHYSICAL_LINK_FLAG |
@@ -6446,15 +6516,15 @@ static int bnx2x_update_link_down(struct link_params *params,
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
- /* reset BigMac/Xmac */
+ usleep_range(10000, 20000);
+ /* Reset BigMac/Xmac */
if (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp)) {
bnx2x_bmac_rx_disable(bp, params->port);
@@ -6463,6 +6533,16 @@ static int bnx2x_update_link_down(struct link_params *params,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
}
if (CHIP_IS_E3(bp)) {
+ /* Prevent LPI Generation by chip */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2),
+ 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 + (params->port << 2),
+ 0);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
@@ -6502,6 +6582,16 @@ static int bnx2x_update_link_up(struct link_params *params,
bnx2x_umac_enable(params, vars, 0);
bnx2x_set_led(params, vars,
LED_MODE_OPER, vars->line_speed);
+
+ if ((vars->eee_status & SHMEM_EEE_ACTIVE_BIT) &&
+ (vars->eee_status & SHMEM_EEE_LPI_REQUESTED_BIT)) {
+ DP(NETIF_MSG_LINK, "Enabling LPI assertion\n");
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 +
+ (params->port << 2), 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 1);
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_ENT_P0 +
+ (params->port << 2), 0xfc20);
+ }
}
if ((CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) {
@@ -6534,12 +6624,12 @@ static int bnx2x_update_link_up(struct link_params *params,
rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
vars->line_speed);
- /* disable drain */
+ /* Disable drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
- /* update shared memory */
+ /* Update shared memory */
bnx2x_update_mng(params, vars->link_status);
-
+ bnx2x_update_mng_eee(params, vars->eee_status);
/* Check remote fault */
for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
@@ -6583,6 +6673,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
phy_vars[phy_index].phy_link_up = 0;
phy_vars[phy_index].link_up = 0;
phy_vars[phy_index].fault_detected = 0;
+ /* different consideration, since vars holds inner state */
+ phy_vars[phy_index].eee_status = vars->eee_status;
}
if (USES_WARPCORE(bp))
@@ -6603,7 +6695,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
@@ -6712,6 +6804,9 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
vars->link_status |= LINK_STATUS_SERDES_LINK;
else
vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+
+ vars->eee_status = phy_vars[active_external_phy].eee_status;
+
DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
active_external_phy);
}
@@ -6745,11 +6840,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
} else if (prev_line_speed != vars->line_speed) {
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
0);
- msleep(1);
+ usleep_range(1000, 2000);
}
}
- /* anything 10 and over uses the bmac */
+ /* Anything 10 and over uses the bmac */
link_10g_plus = (vars->line_speed >= SPEED_10000);
bnx2x_link_int_ack(params, vars, link_10g_plus);
@@ -6815,7 +6910,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
{
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
}
@@ -6912,7 +7007,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_REG_GEN_CTRL,
0x0001);
- /* ucode reboot and rst */
+ /* Ucode reboot and rst */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_GEN_CTRL,
@@ -6956,7 +7051,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
- msleep(1);
+ usleep_range(1000, 2000);
} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
((fw_msgout & 0xff) != 0x03 && (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
@@ -7050,11 +7145,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
"XAUI workaround has completed\n");
return 0;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
break;
}
- msleep(3);
+ usleep_range(3000, 6000);
}
DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
return -EINVAL;
@@ -7128,7 +7223,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
bnx2x_cl45_write(bp, phy,
@@ -7276,7 +7371,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
- /* clear the interrupt LASI status register */
+ /* Clear the interrupt LASI status register */
bnx2x_cl45_read(bp, phy,
MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
bnx2x_cl45_read(bp, phy,
@@ -7601,7 +7696,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val = 0;
u16 i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7655,7 +7750,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
}
@@ -7692,7 +7787,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
u32 data_array[4];
u16 addr32;
struct bnx2x *bp = params->bp;
- if (byte_cnt > 16) {
+
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 16 bytes\n");
return -EINVAL;
@@ -7728,7 +7824,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u16 val, i;
- if (byte_cnt > 16) {
+ if (byte_cnt > SFP_EEPROM_PAGE_SIZE) {
DP(NETIF_MSG_LINK,
"Reading from eeprom is limited to 0xf\n");
return -EINVAL;
@@ -7765,7 +7861,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
/* Wait appropriate time for two-wire command to finish before
* polling the status register
*/
- msleep(1);
+ usleep_range(1000, 2000);
/* Wait up to 500us for command complete status */
for (i = 0; i < 100; i++) {
@@ -7801,7 +7897,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
return 0;
- msleep(1);
+ usleep_range(1000, 2000);
}
return -EINVAL;
@@ -7811,7 +7907,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
struct link_params *params, u16 addr,
u8 byte_cnt, u8 *o_buf)
{
- int rc = -EINVAL;
+ int rc = -EOPNOTSUPP;
switch (phy->type) {
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
@@ -7836,7 +7932,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u32 sync_offset = 0, phy_idx, media_types;
- u8 val, check_limiting_mode = 0;
+ u8 val[2], check_limiting_mode = 0;
*edc_mode = EDC_MODE_LIMITING;
phy->media_type = ETH_PHY_UNSPECIFIED;
@@ -7844,13 +7940,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_CON_TYPE_ADDR,
- 1,
- &val) != 0) {
+ 2,
+ (u8 *)val) != 0) {
DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
return -EINVAL;
}
- switch (val) {
+ switch (val[0]) {
case SFP_EEPROM_CON_TYPE_VAL_COPPER:
{
u8 copper_module_type;
@@ -7888,13 +7984,29 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
break;
}
case SFP_EEPROM_CON_TYPE_VAL_LC:
- phy->media_type = ETH_PHY_SFP_FIBER;
- DP(NETIF_MSG_LINK, "Optic module detected\n");
check_limiting_mode = 1;
+ if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
+ SFP_EEPROM_COMP_CODE_LR_MASK |
+ SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
+ DP(NETIF_MSG_LINK, "1G Optic module detected\n");
+ phy->media_type = ETH_PHY_SFP_1G_FIBER;
+ phy->req_line_speed = SPEED_1000;
+ } else {
+ int idx, cfg_idx = 0;
+ DP(NETIF_MSG_LINK, "10G Optic module detected\n");
+ for (idx = INT_PHY; idx < MAX_PHYS; idx++) {
+ if (params->phy[idx].type == phy->type) {
+ cfg_idx = LINK_CONFIG_IDX(idx);
+ break;
+ }
+ }
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
+ phy->req_line_speed = params->req_line_speed[cfg_idx];
+ }
break;
default:
DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
- val);
+ val[0]);
return -EINVAL;
}
sync_offset = params->shmem_base +
@@ -7980,7 +8092,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
return 0;
}
- /* format the warning message */
+ /* Format the warning message */
if (bnx2x_read_sfp_module_eeprom(phy,
params,
SFP_EEPROM_VENDOR_NAME_ADDR,
@@ -8026,7 +8138,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
timeout * 5);
return 0;
}
- msleep(5);
+ usleep_range(5000, 10000);
}
return -EINVAL;
}
@@ -8338,7 +8450,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
return -EINVAL;
} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
- /* check SFP+ module compatibility */
+ /* Check SFP+ module compatibility */
DP(NETIF_MSG_LINK, "Module verification failed!!\n");
rc = -EINVAL;
/* Turn on fault module-detected led */
@@ -8401,14 +8513,34 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
/* Call the handling function in case module is detected */
if (gpio_val == 0) {
+ bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_power_sfp_module(params, phy, 1);
bnx2x_set_gpio_int(bp, gpio_num,
MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
gpio_port);
- if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+ if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) {
bnx2x_sfp_module_detection(phy, params);
- else
+ if (CHIP_IS_E3(bp)) {
+ u16 rx_tx_in_reset;
+ /* In case WC is out of reset, reconfigure the
+ * link speed while taking into account 1G
+ * module limitation.
+ */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL5_MISC6,
+ &rx_tx_in_reset);
+ if (!rx_tx_in_reset) {
+ bnx2x_warpcore_reset_lane(bp, phy, 1);
+ bnx2x_warpcore_config_sfi(phy, params);
+ bnx2x_warpcore_reset_lane(bp, phy, 0);
+ }
+ }
+ } else {
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+ }
} else {
u32 val = REG_RD(bp, params->shmem_base +
offsetof(struct shmem_region, dev_info.
@@ -8469,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
MDIO_PMA_LASI_TXCTRL);
- /* clear LASI indication*/
+ /* Clear LASI indication*/
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
bnx2x_cl45_read(bp, phy,
@@ -8537,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
if (val)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
if ((params->feature_config_flags &
@@ -8666,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
MDIO_PMA_REG_GEN_CTRL,
MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
- /* wait for 150ms for microcode load */
+ /* Wait for 150ms for microcode load */
msleep(150);
/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
@@ -8860,6 +8992,63 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
}
+static void bnx2x_8727_config_speed(struct bnx2x_phy *phy,
+ struct link_params *params)
+{
+ struct bnx2x *bp = params->bp;
+ u16 tmp1, val;
+ /* Set option 1G speed */
+ if ((phy->req_line_speed == SPEED_1000) ||
+ (phy->media_type == ETH_PHY_SFP_1G_FIBER)) {
+ DP(NETIF_MSG_LINK, "Setting 1G force\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+ DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+ /* Power down the XAUI until link is up in case of dual-media
+ * and 1G
+ */
+ if (DUAL_MEDIA(params)) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, &val);
+ val |= (3<<10);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8727_PCS_GP, val);
+ }
+ } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+ ((phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+ DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+ } else {
+ /* Since the 8727 has only single reset pin, need to set the 10G
+ * registers although it is default
+ */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+ 0x0020);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+ 0x0008);
+ }
+}
+
static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
@@ -8877,7 +9066,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
lasi_ctrl_val = 0x0006;
DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
- /* enable LASI */
+ /* Enable LASI */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
rx_alarm_ctrl_val);
@@ -8929,56 +9118,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
- /* Set option 1G speed */
- if (phy->req_line_speed == SPEED_1000) {
- DP(NETIF_MSG_LINK, "Setting 1G force\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
- DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
- /* Power down the XAUI until link is up in case of dual-media
- * and 1G
- */
- if (DUAL_MEDIA(params)) {
- bnx2x_cl45_read(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, &val);
- val |= (3<<10);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_8727_PCS_GP, val);
- }
- } else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
- ((phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
-
- DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
- } else {
- /* Since the 8727 has only single reset pin, need to set the 10G
- * registers although it is default
- */
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
- 0x0020);
- bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
- bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
- 0x0008);
- }
-
+ bnx2x_8727_config_speed(phy, params);
/* Set 2-wire transfer rate of SFP+ module EEPROM
* to 100Khz since some DACs(direct attached cables) do
* not work at 400Khz.
@@ -9105,6 +9245,9 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
bnx2x_sfp_module_detection(phy, params);
else
DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+
+ /* Reconfigure link speed based on module type limitations */
+ bnx2x_8727_config_speed(phy, params);
}
DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
@@ -9585,9 +9728,9 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[])
+ u16 cmd_args[], int argc)
{
- u32 idx;
+ int idx;
u16 val;
struct bnx2x *bp = params->bp;
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
@@ -9599,7 +9742,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
MDIO_84833_CMD_HDLR_STATUS, &val);
if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (idx >= PHY84833_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
@@ -9607,7 +9750,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
}
/* Prepare argument(s) and issue command */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
cmd_args[idx]);
@@ -9620,7 +9763,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if ((idx >= PHY84833_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
@@ -9628,7 +9771,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
return -EINVAL;
}
/* Gather returning data */
- for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ for (idx = 0; idx < argc; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_84833_CMD_HDLR_DATA1 + idx,
&cmd_args[idx]);
@@ -9662,7 +9805,7 @@ static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
data[1] = (u16)pair_swap;
status = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_PAIR_SWAP, data);
+ PHY84833_CMD_SET_PAIR_SWAP, data, PHY84833_CMDHDLR_MAX_ARGS);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -9740,6 +9883,95 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
+static int bnx2x_8483x_eee_timers(struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 eee_idle = 0, eee_mode;
+ struct bnx2x *bp = params->bp;
+
+ eee_idle = bnx2x_eee_calc_timer(params);
+
+ if (eee_idle) {
+ REG_WR(bp, MISC_REG_CPMU_LP_IDLE_THR_P0 + (params->port << 2),
+ eee_idle);
+ } else if ((params->eee_mode & EEE_MODE_ENABLE_LPI) &&
+ (params->eee_mode & EEE_MODE_OVERRIDE_NVRAM) &&
+ (params->eee_mode & EEE_MODE_OUTPUT_TIME)) {
+ DP(NETIF_MSG_LINK, "Error: Tx LPI is enabled with timer 0\n");
+ return -EINVAL;
+ }
+
+ vars->eee_status &= ~(SHMEM_EEE_TIMER_MASK | SHMEM_EEE_TIME_OUTPUT_BIT);
+ if (params->eee_mode & EEE_MODE_OUTPUT_TIME) {
+ /* eee_idle in 1u --> eee_status in 16u */
+ eee_idle >>= 4;
+ vars->eee_status |= (eee_idle & SHMEM_EEE_TIMER_MASK) |
+ SHMEM_EEE_TIME_OUTPUT_BIT;
+ } else {
+ if (bnx2x_eee_time_to_nvram(eee_idle, &eee_mode))
+ return -EINVAL;
+ vars->eee_status |= eee_mode;
+ }
+
+ return 0;
+}
+
+static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 0;
+
+ DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
+
+ /* Make Certain LPI is disabled */
+ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0);
+ REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0);
+
+ /* Prevent Phy from working in EEE and advertising it */
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE disable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0);
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+
+ return 0;
+}
+
+static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ int rc;
+ struct bnx2x *bp = params->bp;
+ u16 cmd_args = 1;
+
+ DP(NETIF_MSG_LINK, "Advertise 10GBase-T EEE\n");
+
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "EEE enable failed.\n");
+ return rc;
+ }
+
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_EEE_ADV, 0x8);
+
+ /* Mask events preventing LPI generation */
+ REG_WR(bp, MISC_REG_CPMU_LP_MASK_EXT_P0 + (params->port << 2), 0xfc20);
+
+ vars->eee_status &= ~SHMEM_EEE_ADV_STATUS_MASK;
+ vars->eee_status |= (SHMEM_EEE_10G_ADV << SHMEM_EEE_ADV_STATUS_SHIFT);
+
+ return 0;
+}
+
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9752,7 +9984,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
- msleep(1);
+ usleep_range(1000, 2000);
if (!(CHIP_IS_E1x(bp)))
port = BP_PATH(bp);
@@ -9839,8 +10071,9 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_84833_cmd_hdlr(phy, params,
- PHY84833_CMD_SET_EEE_MODE, cmd_args);
- if (rc != 0)
+ PHY84833_CMD_SET_EEE_MODE, cmd_args,
+ PHY84833_CMDHDLR_MAX_ARGS);
+ if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
if (initialize)
@@ -9864,6 +10097,48 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_FW_REV, &val);
+
+ /* Configure EEE support */
+ if ((val >= MDIO_84833_TOP_CFG_FW_EEE) && bnx2x_eee_has_cap(params)) {
+ phy->flags |= FLAGS_EEE_10GBT;
+ vars->eee_status |= SHMEM_EEE_10G_ADV <<
+ SHMEM_EEE_SUPPORTED_SHIFT;
+ /* Propogate params' bits --> vars (for migration exposure) */
+ if (params->eee_mode & EEE_MODE_ENABLE_LPI)
+ vars->eee_status |= SHMEM_EEE_LPI_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_LPI_REQUESTED_BIT;
+
+ if (params->eee_mode & EEE_MODE_ADV_LPI)
+ vars->eee_status |= SHMEM_EEE_REQUESTED_BIT;
+ else
+ vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT;
+
+ rc = bnx2x_8483x_eee_timers(params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n");
+ bnx2x_8483x_disable_eee(phy, params, vars);
+ return rc;
+ }
+
+ if ((params->req_duplex[actual_phy_selection] == DUPLEX_FULL) &&
+ (params->eee_mode & EEE_MODE_ADV_LPI) &&
+ (bnx2x_eee_calc_timer(params) ||
+ !(params->eee_mode & EEE_MODE_ENABLE_LPI)))
+ rc = bnx2x_8483x_enable_eee(phy, params, vars);
+ else
+ rc = bnx2x_8483x_disable_eee(phy, params, vars);
+ if (rc) {
+ DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n");
+ return rc;
+ }
+ } else {
+ phy->flags &= ~FLAGS_EEE_10GBT;
+ vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
+ }
+
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read(bp, phy,
@@ -9918,17 +10193,19 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n",
legacy_status);
link_up = ((legacy_status & (1<<11)) == (1<<11));
- if (link_up) {
- legacy_speed = (legacy_status & (3<<9));
- if (legacy_speed == (0<<9))
- vars->line_speed = SPEED_10;
- else if (legacy_speed == (1<<9))
- vars->line_speed = SPEED_100;
- else if (legacy_speed == (2<<9))
- vars->line_speed = SPEED_1000;
- else /* Should not happen */
- vars->line_speed = 0;
+ legacy_speed = (legacy_status & (3<<9));
+ if (legacy_speed == (0<<9))
+ vars->line_speed = SPEED_10;
+ else if (legacy_speed == (1<<9))
+ vars->line_speed = SPEED_100;
+ else if (legacy_speed == (2<<9))
+ vars->line_speed = SPEED_1000;
+ else { /* Should not happen: Treat as link down */
+ vars->line_speed = 0;
+ link_up = 0;
+ }
+ if (link_up) {
if (legacy_status & (1<<8))
vars->duplex = DUPLEX_FULL;
else
@@ -9956,7 +10233,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
}
}
if (link_up) {
- DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+ DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
@@ -9995,6 +10272,31 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
if (val & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ /* Determine if EEE was negotiated */
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ u32 eee_shmem = 0;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_EEE_ADV, &val1);
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_EEE_ADV, &val2);
+ if ((val1 & val2) & 0x8) {
+ DP(NETIF_MSG_LINK, "EEE negotiated\n");
+ vars->eee_status |= SHMEM_EEE_ACTIVE_BIT;
+ }
+
+ if (val2 & 0x12)
+ eee_shmem |= SHMEM_EEE_100M_ADV;
+ if (val2 & 0x4)
+ eee_shmem |= SHMEM_EEE_1G_ADV;
+ if (val2 & 0x68)
+ eee_shmem |= SHMEM_EEE_10G_ADV;
+
+ vars->eee_status &= ~SHMEM_EEE_LP_ADV_STATUS_MASK;
+ vars->eee_status |= (eee_shmem <<
+ SHMEM_EEE_LP_ADV_STATUS_SHIFT);
+ }
}
return link_up;
@@ -10273,7 +10575,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
u32 cfg_pin;
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
- usleep_range(1000, 1000);
+ usleep_range(1000, 2000);
/* This works with E3 only, no need to check the chip
* before determining the port.
@@ -10342,7 +10644,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
- /* read all advertisement */
+ /* Read all advertisement */
bnx2x_cl22_read(bp, phy,
0x09,
&an_1000_val);
@@ -10379,7 +10681,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* set 100 speed advertisement */
+ /* Set 100 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
@@ -10393,7 +10695,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 100M\n");
}
- /* set 10 speed advertisement */
+ /* Set 10 speed advertisement */
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
@@ -10532,7 +10834,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
/* Get speed operation status */
bnx2x_cl22_read(bp, phy,
- 0x19,
+ MDIO_REG_GPHY_AUX_STATUS,
&legacy_status);
DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
@@ -10759,7 +11061,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
val2, val1);
link_up = ((val1 & 4) == 4);
- /* if link is up print the AN outcome of the SFX7101 PHY */
+ /* If link is up print the AN outcome of the SFX7101 PHY */
if (link_up) {
bnx2x_cl45_read(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
@@ -10771,7 +11073,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
- /* read LP advertised speeds */
+ /* Read LP advertised speeds */
if (val2 & (1<<11))
vars->link_status |=
LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
@@ -11090,7 +11392,7 @@ static struct bnx2x_phy phy_8706 = {
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause),
- .media_type = ETH_PHY_SFP_FIBER,
+ .media_type = ETH_PHY_SFPP_10G_FIBER,
.ver_addr = 0,
.req_flow_ctrl = 0,
.req_line_speed = 0,
@@ -11249,7 +11551,8 @@ static struct bnx2x_phy phy_84833 = {
.def_md_devad = 0,
.flags = (FLAGS_FAN_FAILURE_DET_REQ |
FLAGS_REARM_LATCH_SIGNAL |
- FLAGS_TX_ERROR_CHECK),
+ FLAGS_TX_ERROR_CHECK |
+ FLAGS_EEE_10GBT),
.rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
.mdio_ctrl = 0,
@@ -11428,7 +11731,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
SUPPORTED_FIBRE |
SUPPORTED_Pause |
SUPPORTED_Asym_Pause);
- phy->media_type = ETH_PHY_SFP_FIBER;
+ phy->media_type = ETH_PHY_SFPP_10G_FIBER;
break;
case PORT_HW_CFG_NET_SERDES_IF_KR:
phy->media_type = ETH_PHY_KR;
@@ -11968,7 +12271,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
vars->mac_type = MAC_TYPE_NONE;
vars->phy_flags = 0;
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
@@ -12017,6 +12320,8 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
break;
}
bnx2x_update_mng(params, vars->link_status);
+
+ bnx2x_update_mng_eee(params, vars->eee_status);
return 0;
}
@@ -12026,19 +12331,22 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
struct bnx2x *bp = params->bp;
u8 phy_index, port = params->port, clear_latch_ind = 0;
DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
- /* disable attentions */
+ /* Disable attentions */
vars->link_status = 0;
bnx2x_update_mng(params, vars->link_status);
+ vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK |
+ SHMEM_EEE_ACTIVE_BIT);
+ bnx2x_update_mng_eee(params, vars->eee_status);
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
(NIG_MASK_XGXS0_LINK_STATUS |
NIG_MASK_XGXS0_LINK10G |
NIG_MASK_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- /* activate nig drain */
+ /* Activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
- /* disable nig egress interface */
+ /* Disable nig egress interface */
if (!CHIP_IS_E3(bp)) {
REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
@@ -12051,15 +12359,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
bnx2x_xmac_disable(params);
bnx2x_umac_disable(params);
}
- /* disable emac */
+ /* Disable emac */
if (!CHIP_IS_E3(bp))
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
- msleep(10);
+ usleep_range(10000, 20000);
/* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
- /* clear link led */
+ /* Clear link led */
bnx2x_set_mdio_clk(bp, params->chip_id, port);
bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
@@ -12089,9 +12397,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
params->phy[INT_PHY].link_reset(
&params->phy[INT_PHY], params);
- /* disable nig ingress interface */
+ /* Disable nig ingress interface */
if (!CHIP_IS_E3(bp)) {
- /* reset BigMac */
+ /* Reset BigMac */
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
@@ -12148,7 +12456,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
DP(NETIF_MSG_LINK, "populate_phy failed\n");
return -EINVAL;
}
- /* disable attentions */
+ /* Disable attentions */
bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
port_of_path*4,
(NIG_MASK_XGXS0_LINK_STATUS |
@@ -12222,7 +12530,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
bnx2x_cl45_write(bp, phy_blk[port],
MDIO_PMA_DEVAD,
MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
- msleep(15);
+ usleep_range(15000, 30000);
/* Read modify write the SPI-ROM version select register */
bnx2x_cl45_read(bp, phy_blk[port],
@@ -12254,7 +12562,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
bnx2x_ext_phy_hw_reset(bp, 0);
- msleep(5);
+ usleep_range(5000, 10000);
for (port = 0; port < PORT_MAX; port++) {
u32 shmem_base, shmem2_base;
@@ -12361,11 +12669,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
/* Initiate PHY reset*/
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
port);
- msleep(1);
+ usleep_range(1000, 2000);
bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
port);
- msleep(5);
+ usleep_range(5000, 10000);
/* PART1 - Reset both phys */
for (port = PORT_MAX - 1; port >= PORT_0; port--) {
@@ -12459,7 +12767,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
MDIO_PMA_REG_CTRL, &val);
if (!(val & (1<<15)))
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (cnt >= 1500) {
DP(NETIF_MSG_LINK, "84833 reset timeout\n");
@@ -12549,7 +12857,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
break;
}
- if (rc != 0)
+ if (rc)
netdev_err(bp->dev, "Warning: PHY was not initialized,"
" Port %d\n",
0);
@@ -12630,30 +12938,41 @@ static void bnx2x_check_over_curr(struct link_params *params,
vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
}
-static void bnx2x_analyze_link_error(struct link_params *params,
- struct link_vars *vars, u32 lss_status,
- u8 notify)
+/* Returns 0 if no change occured since last check; 1 otherwise. */
+static u8 bnx2x_analyze_link_error(struct link_params *params,
+ struct link_vars *vars, u32 status,
+ u32 phy_flag, u32 link_flag, u8 notify)
{
struct bnx2x *bp = params->bp;
/* Compare new value with previous value */
u8 led_mode;
- u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+ u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0;
- if ((lss_status ^ half_open_conn) == 0)
- return;
+ if ((status ^ old_status) == 0)
+ return 0;
/* If values differ */
- DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
- half_open_conn, lss_status);
+ switch (phy_flag) {
+ case PHY_HALF_OPEN_CONN_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze Remote Fault\n");
+ break;
+ case PHY_SFP_TX_FAULT_FLAG:
+ DP(NETIF_MSG_LINK, "Analyze TX Fault\n");
+ break;
+ default:
+ DP(NETIF_MSG_LINK, "Analyze UNKOWN\n");
+ }
+ DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
+ old_status, status);
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
- if (lss_status) {
- DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+ if (status) {
vars->link_status &= ~LINK_STATUS_LINK_UP;
+ vars->link_status |= link_flag;
vars->link_up = 0;
- vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags |= phy_flag;
/* activate nig drain */
REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
@@ -12662,10 +12981,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
*/
led_mode = LED_MODE_OFF;
} else {
- DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
vars->link_status |= LINK_STATUS_LINK_UP;
+ vars->link_status &= ~link_flag;
vars->link_up = 1;
- vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+ vars->phy_flags &= ~phy_flag;
led_mode = LED_MODE_OPER;
/* Clear nig drain */
@@ -12682,6 +13001,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
if (notify)
bnx2x_notify_link_changed(bp);
+
+ return 1;
}
/******************************************************************************
@@ -12723,7 +13044,9 @@ int bnx2x_check_half_open_conn(struct link_params *params,
if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
lss_status = 1;
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
/* Check E1X / E2 BMAC */
@@ -12740,11 +13063,55 @@ int bnx2x_check_half_open_conn(struct link_params *params,
REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
lss_status = (wb_data[0] > 0);
- bnx2x_analyze_link_error(params, vars, lss_status, notify);
+ bnx2x_analyze_link_error(params, vars, lss_status,
+ PHY_HALF_OPEN_CONN_FLAG,
+ LINK_STATUS_NONE, notify);
}
return 0;
}
+static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u32 cfg_pin, value = 0;
+ u8 led_change, port = params->port;
+ /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */
+ cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region,
+ dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+ PORT_HW_CFG_E3_TX_FAULT_MASK) >>
+ PORT_HW_CFG_E3_TX_FAULT_SHIFT;
+
+ if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) {
+ DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin);
+ return;
+ }
+
+ led_change = bnx2x_analyze_link_error(params, vars, value,
+ PHY_SFP_TX_FAULT_FLAG,
+ LINK_STATUS_SFP_TX_FAULT, 1);
+
+ if (led_change) {
+ /* Change TX_Fault led, set link status for further syncs */
+ u8 led_mode;
+
+ if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) {
+ led_mode = MISC_REGISTERS_GPIO_HIGH;
+ vars->link_status |= LINK_STATUS_SFP_TX_FAULT;
+ } else {
+ led_mode = MISC_REGISTERS_GPIO_LOW;
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ }
+
+ /* If module is unapproved, led should be on regardless */
+ if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) {
+ DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n",
+ led_mode);
+ bnx2x_set_e3_module_fault_led(params, led_mode);
+ }
+ }
+}
void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
{
u16 phy_idx;
@@ -12763,7 +13130,26 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
struct bnx2x_phy *phy = &params->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
bnx2x_check_over_curr(params, vars);
- bnx2x_warpcore_config_runtime(phy, params, vars);
+ if (vars->rx_tx_asic_rst)
+ bnx2x_warpcore_config_runtime(phy, params, vars);
+
+ if ((REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ port_hw_config[params->port].default_cfg))
+ & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SFI) {
+ if (bnx2x_is_sfp_module_plugged(phy, params)) {
+ bnx2x_sfp_tx_fault_detection(phy, params, vars);
+ } else if (vars->link_status &
+ LINK_STATUS_SFP_TX_FAULT) {
+ /* Clean trail, interrupt corrects the leds */
+ vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT;
+ vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG;
+ /* Update link status in the shared memory */
+ bnx2x_update_mng(params, vars->link_status);
+ }
+ }
+
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index ea4371f4335f..51cac8130051 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,7 @@
#define SPEED_AUTO_NEG 0
#define SPEED_20000 20000
+#define SFP_EEPROM_PAGE_SIZE 16
#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
#define SFP_EEPROM_VENDOR_NAME_SIZE 16
#define SFP_EEPROM_VENDOR_OUI_ADDR 0x25
@@ -125,6 +126,11 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
struct link_params *params, u8 mode);
typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
struct link_params *params, u32 action);
+struct bnx2x_reg_set {
+ u8 devad;
+ u16 reg;
+ u16 val;
+};
struct bnx2x_phy {
u32 type;
@@ -149,6 +155,7 @@ struct bnx2x_phy {
#define FLAGS_DUMMY_READ (1<<9)
#define FLAGS_MDC_MDIO_WA_B0 (1<<10)
#define FLAGS_TX_ERROR_CHECK (1<<12)
+#define FLAGS_EEE_10GBT (1<<13)
/* preemphasis values for the rx side */
u16 rx_preemphasis[4];
@@ -162,14 +169,15 @@ struct bnx2x_phy {
u32 supported;
u32 media_type;
-#define ETH_PHY_UNSPECIFIED 0x0
-#define ETH_PHY_SFP_FIBER 0x1
-#define ETH_PHY_XFP_FIBER 0x2
-#define ETH_PHY_DA_TWINAX 0x3
-#define ETH_PHY_BASE_T 0x4
-#define ETH_PHY_KR 0xf0
-#define ETH_PHY_CX4 0xf1
-#define ETH_PHY_NOT_PRESENT 0xff
+#define ETH_PHY_UNSPECIFIED 0x0
+#define ETH_PHY_SFPP_10G_FIBER 0x1
+#define ETH_PHY_XFP_FIBER 0x2
+#define ETH_PHY_DA_TWINAX 0x3
+#define ETH_PHY_BASE_T 0x4
+#define ETH_PHY_SFP_1G_FIBER 0x5
+#define ETH_PHY_KR 0xf0
+#define ETH_PHY_CX4 0xf1
+#define ETH_PHY_NOT_PRESENT 0xff
/* The address in which version is located*/
u32 ver_addr;
@@ -265,6 +273,30 @@ struct link_params {
u8 num_phys;
u8 rsrv;
+
+ /* Used to configure the EEE Tx LPI timer, has several modes of
+ * operation, according to bits 29:28 -
+ * 2'b00: Timer will be configured by nvram, output will be the value
+ * from nvram.
+ * 2'b01: Timer will be configured by nvram, output will be in
+ * microseconds.
+ * 2'b10: bits 1:0 contain an nvram value which will be used instead
+ * of the one located in the nvram. Output will be that value.
+ * 2'b11: bits 19:0 contain the idle timer in microseconds; output
+ * will be in microseconds.
+ * Bits 31:30 should be 2'b11 in order for EEE to be enabled.
+ */
+ u32 eee_mode;
+#define EEE_MODE_NVRAM_BALANCED_TIME (0xa00)
+#define EEE_MODE_NVRAM_AGGRESSIVE_TIME (0x100)
+#define EEE_MODE_NVRAM_LATENCY_TIME (0x6000)
+#define EEE_MODE_NVRAM_MASK (0x3)
+#define EEE_MODE_TIMER_MASK (0xfffff)
+#define EEE_MODE_OUTPUT_TIME (1<<28)
+#define EEE_MODE_OVERRIDE_NVRAM (1<<29)
+#define EEE_MODE_ENABLE_LPI (1<<30)
+#define EEE_MODE_ADV_LPI (1<<31)
+
u16 hw_led_mode; /* part of the hw_config read from the shmem */
u32 multi_phy_config;
@@ -282,6 +314,7 @@ struct link_vars {
#define PHY_PHYSICAL_LINK_FLAG (1<<2)
#define PHY_HALF_OPEN_CONN_FLAG (1<<3)
#define PHY_OVER_CURRENT_FLAG (1<<4)
+#define PHY_SFP_TX_FAULT_FLAG (1<<5)
u8 mac_type;
#define MAC_TYPE_NONE 0
@@ -301,6 +334,7 @@ struct link_vars {
/* The same definitions as the shmem parameter */
u32 link_status;
+ u32 eee_status;
u8 fault_detected;
u8 rsrv1;
u16 periodic_flags;
@@ -459,8 +493,7 @@ struct bnx2x_ets_params {
struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
};
-/**
- * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+/* Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
* when link is already up
*/
int bnx2x_update_pfc(struct link_params *params,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f755a665dab3..9aaf863b4237 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -74,6 +74,8 @@
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/* Time in jiffies before concluding the transmitter is hung */
#define TX_TIMEOUT (5*HZ)
@@ -104,7 +106,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
#define INT_MODE_INTx 1
#define INT_MODE_MSI 2
-static int int_mode;
+int int_mode;
module_param(int_mode, int, 0);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
@@ -135,7 +137,10 @@ enum bnx2x_board_type {
BCM57800_MF,
BCM57810,
BCM57810_MF,
- BCM57840,
+ BCM57840_O,
+ BCM57840_4_10,
+ BCM57840_2_20,
+ BCM57840_MFO,
BCM57840_MF,
BCM57811,
BCM57811_MF
@@ -155,6 +160,9 @@ static struct {
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
+ { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
{ "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
@@ -187,8 +195,17 @@ static struct {
#ifndef PCI_DEVICE_ID_NX2_57810_MF
#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
#endif
-#ifndef PCI_DEVICE_ID_NX2_57840
-#define PCI_DEVICE_ID_NX2_57840 CHIP_NUM_57840
+#ifndef PCI_DEVICE_ID_NX2_57840_O
+#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_4_10
+#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_2_20
+#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MFO
+#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
#endif
#ifndef PCI_DEVICE_ID_NX2_57840_MF
#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
@@ -209,7 +226,10 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
- { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
@@ -758,7 +778,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* Tx */
for_each_cos_in_tx_queue(fp, cos)
{
- txdata = fp->txdata[cos];
+ txdata = *fp->txdata_ptr[cos];
BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
@@ -876,7 +896,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_tx_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
- struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
@@ -1583,7 +1603,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
- struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+ struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
DP(BNX2X_MSG_SP,
"fp %d cid %d got ramrod #%d state is %x type is %d\n",
@@ -1710,7 +1730,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
/* Handle Rx or Tx according to SB id */
prefetch(fp->rx_cons_sb);
for_each_cos_in_tx_queue(fp, cos)
- prefetch(fp->txdata[cos].tx_cons_sb);
+ prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
prefetch(&fp->sb_running_index[SM_RX_ID]);
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
status &= ~mask;
@@ -2124,6 +2144,11 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
}
}
+ if (load_mode == LOAD_LOOPBACK_EXT) {
+ struct link_params *lp = &bp->link_params;
+ lp->loopback_mode = LOOPBACK_EXT;
+ }
+
rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
@@ -2916,7 +2941,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
u8 cos)
{
- txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+ txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
txq_init->fw_sb_id = fp->fw_sb_id;
@@ -3030,9 +3055,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
memcpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN - 1);
- bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
- DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
- ether_stat->mac_local);
+ bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
ether_stat->mtu_size = bp->dev->mtu;
@@ -3055,7 +3080,8 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
struct fcoe_stats_info *fcoe_stat =
&bp->slowpath->drv_info_to_mcp.fcoe_stat;
- memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+ memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->fip_mac, ETH_ALEN);
fcoe_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3063,11 +3089,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
/* insert FCoE stats from ramrod response */
if (!NO_FCOE(bp)) {
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
tstorm_queue_statistics;
struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
xstorm_queue_statistics;
struct fcoe_statistics_params *fw_fcoe_stat =
@@ -3146,7 +3172,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
struct iscsi_stats_info *iscsi_stat =
&bp->slowpath->drv_info_to_mcp.iscsi_stat;
- memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+ memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
+ bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
iscsi_stat->qos_priority =
app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -3176,6 +3203,12 @@ static void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_eee_event(struct bnx2x *bp)
+{
+ DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
+}
+
static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
{
enum drv_info_opcode op_code;
@@ -3742,6 +3775,8 @@ static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_AFEX_EVENT_MASK)
bnx2x_handle_afex_cmd(bp,
val & DRV_STATUS_AFEX_EVENT_MASK);
+ if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
+ bnx2x_handle_eee_event(bp);
if (bp->link_vars.periodic_flags &
PERIODIC_FLAGS_LINK_EVENT) {
/* sync with link */
@@ -4615,11 +4650,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
case BNX2X_FILTER_MAC_PENDING:
DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
- if (cid == BNX2X_ISCSI_ETH_CID)
+ if (cid == BNX2X_ISCSI_ETH_CID(bp))
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
else
#endif
- vlan_mac_obj = &bp->fp[cid].mac_obj;
+ vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
break;
case BNX2X_FILTER_MCAST_PENDING:
@@ -4717,7 +4752,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
for_each_eth_queue(bp, q) {
/* Set the appropriate Queue object */
fp = &bp->fp[q];
- queue_params.q_obj = &fp->q_obj;
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* send the ramrod */
rc = bnx2x_queue_state_change(bp, &queue_params);
@@ -4728,8 +4763,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
#ifdef BCM_CNIC
if (!NO_FCOE(bp)) {
- fp = &bp->fp[FCOE_IDX];
- queue_params.q_obj = &fp->q_obj;
+ fp = &bp->fp[FCOE_IDX(bp)];
+ queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* clear pending completion bit */
__clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
@@ -4761,11 +4796,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
{
DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
#ifdef BCM_CNIC
- if (cid == BNX2X_FCOE_ETH_CID)
- return &bnx2x_fcoe(bp, q_obj);
+ if (cid == BNX2X_FCOE_ETH_CID(bp))
+ return &bnx2x_fcoe_sp_obj(bp, q_obj);
else
#endif
- return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+ return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
}
static void bnx2x_eq_int(struct bnx2x *bp)
@@ -5647,15 +5682,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init tx data */
for_each_cos_in_tx_queue(fp, cos) {
- bnx2x_init_txdata(bp, &fp->txdata[cos],
- CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
- FP_COS_TO_TXQ(fp, cos),
- BNX2X_TX_SB_INDEX_BASE + cos);
- cids[cos] = fp->txdata[cos].cid;
+ bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
+ CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
+ FP_COS_TO_TXQ(fp, cos, bp),
+ BNX2X_TX_SB_INDEX_BASE + cos, fp);
+ cids[cos] = fp->txdata_ptr[cos]->cid;
}
- bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
- BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+ bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
+ fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
/**
@@ -5706,7 +5741,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp)
for_each_tx_queue(bp, i)
for_each_cos_in_tx_queue(&bp->fp[i], cos)
- bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+ bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
}
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
@@ -7055,12 +7090,10 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
for (i = 0; i < L2_ILT_LINES(bp); i++) {
- ilt->lines[cdu_ilt_start + i].page =
- bp->context.vcxt + (ILT_PAGE_CIDS * i);
+ ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
ilt->lines[cdu_ilt_start + i].page_mapping =
- bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
- /* cdu ilt pages are allocated manually so there's no need to
- set the size */
+ bp->context[i].cxt_mapping;
+ ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
}
bnx2x_ilt_init_op(bp, INITOP_SET);
@@ -7327,6 +7360,8 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
void bnx2x_free_mem(struct bnx2x *bp)
{
+ int i;
+
/* fastpath */
bnx2x_free_fp_mem(bp);
/* end of fastpath */
@@ -7340,9 +7375,9 @@ void bnx2x_free_mem(struct bnx2x *bp)
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
- BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
- bp->context.size);
-
+ for (i = 0; i < L2_ILT_LINES(bp); i++)
+ BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
+ bp->context[i].size);
bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
BNX2X_FREE(bp->ilt->lines);
@@ -7428,6 +7463,8 @@ alloc_mem_err:
int bnx2x_alloc_mem(struct bnx2x *bp)
{
+ int i, allocated, context_size;
+
#ifdef BCM_CNIC
if (!CHIP_IS_E1x(bp))
/* size = the status block + ramrod buffers */
@@ -7457,11 +7494,29 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
- bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
-
- BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
- bp->context.size);
+ /* Allocate memory for CDU context:
+ * This memory is allocated separately and not in the generic ILT
+ * functions because CDU differs in few aspects:
+ * 1. There are multiple entities allocating memory for context -
+ * 'regular' driver, CNIC and SRIOV driver. Each separately controls
+ * its own ILT lines.
+ * 2. Since CDU page-size is not a single 4KB page (which is the case
+ * for the other ILT clients), to be efficient we want to support
+ * allocation of sub-page-size in the last entry.
+ * 3. Context pointers are used by the driver to pass to FW / update
+ * the context (for the other ILT clients the pointers are used just to
+ * free the memory during unload).
+ */
+ context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+ for (i = 0, allocated = 0; allocated < context_size; i++) {
+ bp->context[i].size = min(CDU_ILT_PAGE_SZ,
+ (context_size - allocated));
+ BNX2X_PCI_ALLOC(bp->context[i].vcxt,
+ &bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ allocated += bp->context[i].size;
+ }
BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
@@ -7563,8 +7618,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
/* Eth MAC is set on RSS leading client (fp[0]) */
- return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
- BNX2X_ETH_MAC, &ramrod_flags);
+ return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
+ set, BNX2X_ETH_MAC, &ramrod_flags);
}
int bnx2x_setup_leading(struct bnx2x *bp)
@@ -7579,7 +7634,7 @@ int bnx2x_setup_leading(struct bnx2x *bp)
*
* In case of MSI-X it will also try to enable MSI-X.
*/
-static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+void bnx2x_set_int_mode(struct bnx2x *bp)
{
switch (int_mode) {
case INT_MODE_MSI:
@@ -7590,11 +7645,6 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
- /* Set number of queues for MSI-X mode */
- bnx2x_set_num_queues(bp);
-
- BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
-
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
* and fallback to MSI or legacy INTx with one fp
@@ -7735,6 +7785,8 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
{
u8 cos;
+ int cxt_index, cxt_offset;
+
/* FCoE Queue uses Default SB, thus has no HC capabilities */
if (!IS_FCOE_FP(fp)) {
__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
@@ -7771,9 +7823,13 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
fp->index, init_params->max_cos);
/* set the context pointers queue object */
- for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+ for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
+ cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
+ cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
+ ILT_PAGE_CIDS);
init_params->cxts[cos] =
- &bp->context.vcxt[fp->txdata[cos].cid].eth;
+ &bp->context[cxt_index].vcxt[cxt_offset].eth;
+ }
}
int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
@@ -7838,7 +7894,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
IGU_INT_ENABLE, 0);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7911,7 +7967,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
- q_params.q_obj = &fp->q_obj;
+ q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
/* We want to wait for completion in this context */
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
@@ -7922,7 +7978,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
tx_index++){
/* ascertain this is a normal queue*/
- txdata = &fp->txdata[tx_index];
+ txdata = fp->txdata_ptr[tx_index];
DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
@@ -8289,7 +8345,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos)
- rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+ rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
#ifdef BNX2X_STOP_ON_ERROR
if (rc)
return;
@@ -8300,12 +8356,13 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
usleep_range(1000, 1000);
/* Clean all ETH MACs */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
+ false);
if (rc < 0)
BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
/* Clean up UC list */
- rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+ rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
@@ -9697,6 +9754,11 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
BC_SUPPORTS_PFC_STATS : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
+ BC_SUPPORTS_FCOE_FEATURES : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
+ BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -10082,7 +10144,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
{
int port = BP_PORT(bp);
u32 config;
- u32 ext_phy_type, ext_phy_config;
+ u32 ext_phy_type, ext_phy_config, eee_mode;
bp->link_params.bp = bp;
bp->link_params.port = port;
@@ -10149,6 +10211,19 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
bp->common.shmem_base,
bp->common.shmem2_base);
+
+ /* Configure link feature according to nvram value */
+ eee_mode = (((SHMEM_RD(bp, dev_info.
+ port_feature_config[port].eee_power_mode)) &
+ PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
+ PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
+ if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
+ bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
+ EEE_MODE_ENABLE_LPI |
+ EEE_MODE_OUTPUT_TIME;
+ } else {
+ bp->link_params.eee_mode = 0;
+ }
}
void bnx2x_get_iscsi_info(struct bnx2x *bp)
@@ -10997,7 +11072,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp)
int rc;
struct net_device *dev = bp->dev;
struct netdev_hw_addr *ha;
- struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+ struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
unsigned long ramrod_flags = 0;
/* First schedule a cleanup up of old configuration */
@@ -11503,8 +11578,7 @@ static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
}
}
-/**
- * IRO array is stored in the following format:
+/* IRO array is stored in the following format:
* {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
*/
static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
@@ -11672,7 +11746,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
/* must be called after sriov-enable */
static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
{
- int cid_count = BNX2X_L2_CID_COUNT(bp);
+ int cid_count = BNX2X_L2_MAX_CID(bp);
#ifdef BCM_CNIC
cid_count += CNIC_CID_MAX;
@@ -11717,7 +11791,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
struct bnx2x *bp;
int pcie_width, pcie_speed;
int rc, max_non_def_sbs;
- int rx_count, tx_count, rss_count;
+ int rx_count, tx_count, rss_count, doorbell_size;
/*
* An estimated maximum supported CoS number according to the chip
* version.
@@ -11745,7 +11819,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
case BCM57800_MF:
case BCM57810:
case BCM57810_MF:
- case BCM57840:
+ case BCM57840_O:
+ case BCM57840_4_10:
+ case BCM57840_2_20:
+ case BCM57840_MFO:
case BCM57840_MF:
case BCM57811:
case BCM57811_MF:
@@ -11760,13 +11837,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
- /* !!! FIXME !!!
- * Do not allow the maximum SB count to grow above 16
- * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
- * We will use the FP_SB_MAX_E1x macro for this matter.
- */
- max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
-
WARN_ON(!max_non_def_sbs);
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
@@ -11777,9 +11847,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/*
* Maximum number of netdev Tx queues:
- * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
+ * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
*/
- tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+ tx_count = rss_count * max_cos_est + FCOE_PRESENT;
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
@@ -11788,9 +11858,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp = netdev_priv(dev);
- BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
- tx_count, rx_count);
-
bp->igu_sb_cnt = max_non_def_sbs;
bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -11803,6 +11870,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
+ tx_count, rx_count);
+
rc = bnx2x_init_bp(bp);
if (rc)
goto init_one_exit;
@@ -11811,9 +11881,15 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
* Map doorbels here as we need the real value of bp->max_cos which
* is initialized in bnx2x_init_bp().
*/
+ doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
+ if (doorbell_size > pci_resource_len(pdev, 2)) {
+ dev_err(&bp->pdev->dev,
+ "Cannot map doorbells, bar size too small, aborting\n");
+ rc = -ENOMEM;
+ goto init_one_exit;
+ }
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
- min_t(u64, BNX2X_DB_SIZE(bp),
- pci_resource_len(pdev, 2)));
+ doorbell_size);
if (!bp->doorbells) {
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
@@ -11831,8 +11907,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
+
+ /* Set bp->num_queues for MSI-X mode*/
+ bnx2x_set_num_queues(bp);
+
/* Configure interrupt mode: try to enable MSI-X/MSI if
- * needed, set bp->num_queues appropriately.
+ * needed.
*/
bnx2x_set_int_mode(bp);
@@ -12176,6 +12256,7 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
{
struct eth_spe *spe;
+ int cxt_index, cxt_offset;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -12198,10 +12279,16 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
* ramrod
*/
if (type == ETH_CONNECTION_TYPE) {
- if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
- bnx2x_set_ctx_validation(bp, &bp->context.
- vcxt[BNX2X_ISCSI_ETH_CID].eth,
- BNX2X_ISCSI_ETH_CID);
+ if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
+ cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
+ ILT_PAGE_CIDS;
+ cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
+ (cxt_index * ILT_PAGE_CIDS);
+ bnx2x_set_ctx_validation(bp,
+ &bp->context[cxt_index].
+ vcxt[cxt_offset].eth,
+ BNX2X_ISCSI_ETH_CID(bp));
+ }
}
/*
@@ -12488,21 +12575,45 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
case DRV_CTL_ULP_REGISTER_CMD: {
- int ulp_type = ctl->data.ulp_type;
+ int ulp_type = ctl->data.register_data.ulp_type;
if (CHIP_IS_E3(bp)) {
int idx = BP_FW_MB_IDX(bp);
- u32 cap;
+ u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ int path = BP_PATH(bp);
+ int port = BP_PORT(bp);
+ int i;
+ u32 scratch_offset;
+ u32 *host_addr;
- cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ /* first write capability to shmem2 */
if (ulp_type == CNIC_ULP_ISCSI)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
else if (ulp_type == CNIC_ULP_FCOE)
cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+
+ if ((ulp_type != CNIC_ULP_FCOE) ||
+ (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
+ (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
+ break;
+
+ /* if reached here - should write fcoe capabilities */
+ scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
+ if (!scratch_offset)
+ break;
+ scratch_offset += offsetof(struct glob_ncsi_oem_data,
+ fcoe_features[path][port]);
+ host_addr = (u32 *) &(ctl->data.register_data.
+ fcoe_features);
+ for (i = 0; i < sizeof(struct fcoe_capabilities);
+ i += 4)
+ REG_WR(bp, scratch_offset + i,
+ *(host_addr + i/4));
}
break;
}
+
case DRV_CTL_ULP_UNREGISTER_CMD: {
int ulp_type = ctl->data.ulp_type;
@@ -12554,6 +12665,21 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
cp->num_irq = 2;
}
+void bnx2x_setup_cnic_info(struct bnx2x *bp)
+{
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+
+ cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+ bnx2x_cid_ilt_lines(bp);
+ cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
+
+ if (NO_ISCSI_OOO(bp))
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+}
+
static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
void *data)
{
@@ -12632,10 +12758,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
cp->drv_ctl = bnx2x_drv_ctl;
cp->drv_register_cnic = bnx2x_register_cnic;
cp->drv_unregister_cnic = bnx2x_unregister_cnic;
- cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+ cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
cp->iscsi_l2_client_id =
bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
- cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+ cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
if (NO_ISCSI_OOO(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
new file mode 100644
index 000000000000..ddd5106ad2f9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h
@@ -0,0 +1,168 @@
+/* bnx2x_mfw_req.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_MFW_REQ_H
+#define BNX2X_MFW_REQ_H
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
+
+/* FCoE capabilities required from the driver */
+struct fcoe_capabilities {
+ u32 capability1;
+ /* Maximum number of I/Os per connection */
+ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff
+ #define FCOE_IOS_PER_CONNECTION_SHIFT 0
+ /* Maximum number of Logins per port */
+ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000
+ #define FCOE_LOGINS_PER_PORT_SHIFT 16
+
+ u32 capability2;
+ /* Maximum number of exchanges */
+ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff
+ #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0
+ /* Maximum NPIV WWN per port */
+ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000
+ #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16
+
+ u32 capability3;
+ /* Maximum number of targets supported */
+ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff
+ #define FCOE_TARGETS_SUPPORTED_SHIFT 0
+ /* Maximum number of outstanding commands across all connections */
+ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000
+ #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16
+
+ u32 capability4;
+ #define FCOE_CAPABILITY4_STATEFUL 0x00000001
+ #define FCOE_CAPABILITY4_STATELESS 0x00000002
+ #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004
+};
+
+struct glob_ncsi_oem_data {
+ u32 driver_version;
+ u32 unused[3];
+ struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX];
+};
+
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 2
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
+#endif /* BNX2X_MFW_REQ_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index bbd387492a80..ec62a5c8bd37 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1488,6 +1488,121 @@
* 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
#define MISC_REG_CHIP_TYPE 0xac60
#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
+#define MISC_REG_CPMU_LP_DR_ENABLE 0xa858
+/* [RW 1] FW EEE LPI Enable. When 1 indicates that EEE LPI mode is enabled
+ * by FW. When 0 indicates that the EEE LPI mode is disabled by FW. Clk
+ * 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_FW_ENABLE_P0 0xa84c
+/* [RW 32] EEE LPI Idle Threshold. The threshold value for the idle EEE LPI
+ * counter. Timer tick is 1 us. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_IDLE_THR_P0 0xa8a0
+/* [RW 18] LPI entry events mask. [0] - Vmain SM Mask. When 1 indicates that
+ * the Vmain SM end state is disabled. When 0 indicates that the Vmain SM
+ * end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates that
+ * the FW command that all Queues are empty is disabled. When 0 indicates
+ * that the FW command that all Queues are empty is enabled. [2] - FW Early
+ * Exit Mask / Reserved (Entry mask). When 1 indicates that the FW Early
+ * Exit command is disabled. When 0 indicates that the FW Early Exit command
+ * is enabled. This bit applicable only in the EXIT Events Mask registers.
+ * [3] - PBF Request Mask. When 1 indicates that the PBF Request indication
+ * is disabled. When 0 indicates that the PBF Request indication is enabled.
+ * [4] - Tx Request Mask. When =1 indicates that the Tx other Than PBF
+ * Request indication is disabled. When 0 indicates that the Tx Other Than
+ * PBF Request indication is enabled. [5] - Rx EEE LPI Status Mask. When 1
+ * indicates that the RX EEE LPI Status indication is disabled. When 0
+ * indicates that the RX EEE LPI Status indication is enabled. In the EXIT
+ * Events Masks registers; this bit masks the falling edge detect of the LPI
+ * Status (Rx LPI is on - off). [6] - Tx Pause Mask. When 1 indicates that
+ * the Tx Pause indication is disabled. When 0 indicates that the Tx Pause
+ * indication is enabled. [7] - BRB1 Empty Mask. When 1 indicates that the
+ * BRB1 EMPTY indication is disabled. When 0 indicates that the BRB1 EMPTY
+ * indication is enabled. [8] - QM Idle Mask. When 1 indicates that the QM
+ * IDLE indication is disabled. When 0 indicates that the QM IDLE indication
+ * is enabled. (One bit for both VOQ0 and VOQ1). [9] - QM LB Idle Mask. When
+ * 1 indicates that the QM IDLE indication for LOOPBACK is disabled. When 0
+ * indicates that the QM IDLE indication for LOOPBACK is enabled. [10] - L1
+ * Status Mask. When 1 indicates that the L1 Status indication from the PCIE
+ * CORE is disabled. When 0 indicates that the RX EEE LPI Status indication
+ * from the PCIE CORE is enabled. In the EXIT Events Masks registers; this
+ * bit masks the falling edge detect of the L1 status (L1 is on - off). [11]
+ * - P0 E0 EEE EEE LPI REQ Mask. When =1 indicates that the P0 E0 EEE EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 E0 EEE LPI
+ * REQ indication is enabled. [12] - P1 E0 EEE LPI REQ Mask. When =1
+ * indicates that the P0 EEE LPI REQ indication is disabled. When =0
+ * indicates that the P0 EEE LPI REQ indication is enabled. [13] - P0 E1 EEE
+ * LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication is
+ * disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [15] - L1 REQ Mask. When =1 indicates that the L1
+ * REQ indication is disabled. When =0 indicates that the L1 indication is
+ * enabled. [16] - Rx EEE LPI Status Edge Detect Mask. When =1 indicates
+ * that the RX EEE LPI Status Falling Edge Detect indication is disabled (Rx
+ * EEE LPI is on - off). When =0 indicates that the RX EEE LPI Status
+ * Falling Edge Detec indication is enabled (Rx EEE LPI is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers. [17] - L1
+ * Status Edge Detect Mask. When =1 indicates that the L1 Status Falling
+ * Edge Detect indication from the PCIE CORE is disabled (L1 is on - off).
+ * When =0 indicates that the L1 Status Falling Edge Detect indication from
+ * the PCIE CORE is enabled (L1 is on - off). This bit is applicable only in
+ * the EXIT Events Masks registers. Clock 25MHz. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_ENT_P0 0xa880
+/* [RW 18] EEE LPI exit events mask. [0] - Vmain SM Mask. When 1 indicates
+ * that the Vmain SM end state is disabled. When 0 indicates that the Vmain
+ * SM end state is enabled. [1] - FW Queues Empty Mask. When 1 indicates
+ * that the FW command that all Queues are empty is disabled. When 0
+ * indicates that the FW command that all Queues are empty is enabled. [2] -
+ * FW Early Exit Mask / Reserved (Entry mask). When 1 indicates that the FW
+ * Early Exit command is disabled. When 0 indicates that the FW Early Exit
+ * command is enabled. This bit applicable only in the EXIT Events Mask
+ * registers. [3] - PBF Request Mask. When 1 indicates that the PBF Request
+ * indication is disabled. When 0 indicates that the PBF Request indication
+ * is enabled. [4] - Tx Request Mask. When =1 indicates that the Tx other
+ * Than PBF Request indication is disabled. When 0 indicates that the Tx
+ * Other Than PBF Request indication is enabled. [5] - Rx EEE LPI Status
+ * Mask. When 1 indicates that the RX EEE LPI Status indication is disabled.
+ * When 0 indicates that the RX LPI Status indication is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the EEE LPI Status (Rx EEE LPI is on - off). [6] - Tx Pause Mask. When 1
+ * indicates that the Tx Pause indication is disabled. When 0 indicates that
+ * the Tx Pause indication is enabled. [7] - BRB1 Empty Mask. When 1
+ * indicates that the BRB1 EMPTY indication is disabled. When 0 indicates
+ * that the BRB1 EMPTY indication is enabled. [8] - QM Idle Mask. When 1
+ * indicates that the QM IDLE indication is disabled. When 0 indicates that
+ * the QM IDLE indication is enabled. (One bit for both VOQ0 and VOQ1). [9]
+ * - QM LB Idle Mask. When 1 indicates that the QM IDLE indication for
+ * LOOPBACK is disabled. When 0 indicates that the QM IDLE indication for
+ * LOOPBACK is enabled. [10] - L1 Status Mask. When 1 indicates that the L1
+ * Status indication from the PCIE CORE is disabled. When 0 indicates that
+ * the RX EEE LPI Status indication from the PCIE CORE is enabled. In the
+ * EXIT Events Masks registers; this bit masks the falling edge detect of
+ * the L1 status (L1 is on - off). [11] - P0 E0 EEE EEE LPI REQ Mask. When
+ * =1 indicates that the P0 E0 EEE EEE LPI REQ indication is disabled. When
+ * =0 indicates that the P0 E0 EEE LPI REQ indication is enabled. [12] - P1
+ * E0 EEE LPI REQ Mask. When =1 indicates that the P0 EEE LPI REQ indication
+ * is disabled. When =0 indicates that the P0 EEE LPI REQ indication is
+ * enabled. [13] - P0 E1 EEE LPI REQ Mask. When =1 indicates that the P0 EEE
+ * LPI REQ indication is disabled. When =0 indicates that the P0 EEE LPI REQ
+ * indication is enabled. [14] - P1 E1 EEE LPI REQ Mask. When =1 indicates
+ * that the P0 EEE LPI REQ indication is disabled. When =0 indicates that
+ * the P0 EEE LPI REQ indication is enabled. [15] - L1 REQ Mask. When =1
+ * indicates that the L1 REQ indication is disabled. When =0 indicates that
+ * the L1 indication is enabled. [16] - Rx EEE LPI Status Edge Detect Mask.
+ * When =1 indicates that the RX EEE LPI Status Falling Edge Detect
+ * indication is disabled (Rx EEE LPI is on - off). When =0 indicates that
+ * the RX EEE LPI Status Falling Edge Detec indication is enabled (Rx EEE
+ * LPI is on - off). This bit is applicable only in the EXIT Events Masks
+ * registers. [17] - L1 Status Edge Detect Mask. When =1 indicates that the
+ * L1 Status Falling Edge Detect indication from the PCIE CORE is disabled
+ * (L1 is on - off). When =0 indicates that the L1 Status Falling Edge
+ * Detect indication from the PCIE CORE is enabled (L1 is on - off). This
+ * bit is applicable only in the EXIT Events Masks registers.Clock 25MHz.
+ * Reset on hard reset. */
+#define MISC_REG_CPMU_LP_MASK_EXT_P0 0xa888
+/* [RW 16] EEE LPI Entry Events Counter. A statistic counter with the number
+ * of counts that the SM entered the EEE LPI state. Clock 25MHz. Read only
+ * register. Reset on hard reset. */
+#define MISC_REG_CPMU_LP_SM_ENT_CNT_P0 0xa8b8
/* [RW 32] The following driver registers(1...16) represent 16 drivers and
32 clients. Each client can be controlled by one driver only. One in each
bit represent that this driver control the appropriate client (Ex: bit 5
@@ -5372,6 +5487,8 @@
/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
* packets transmitted by the MAC */
#define XMAC_REG_CTRL_SA_LO 0x28
+#define XMAC_REG_EEE_CTRL 0xd8
+#define XMAC_REG_EEE_TIMERS_HI 0xe4
#define XMAC_REG_PAUSE_CTRL 0x68
#define XMAC_REG_PFC_CTRL 0x70
#define XMAC_REG_PFC_CTRL_HI 0x74
@@ -5796,6 +5913,7 @@
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13
#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
@@ -6813,6 +6931,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
+#define MDIO_AN_REG_EEE_ADV 0x003c
+#define MDIO_AN_REG_LP_EEE_ADV 0x003d
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
@@ -6866,6 +6986,8 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_FW_REV 0x400f
+#define MDIO_84833_TOP_CFG_FW_EEE 0x10b1
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
#define MDIO_84833_SUPER_ISOLATE 0x8000
/* These are mailbox register set used by 84833. */
@@ -6993,11 +7115,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
+#define MDIO_WC_REG_DIGITAL4_MISC5 0x833e
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED 0x834e
#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL 0x8350
#define MDIO_WC_REG_CL49_USERB0_CTRL 0x8368
+#define MDIO_WC_REG_EEE_COMBO_CONTROL0 0x8390
#define MDIO_WC_REG_TX66_CONTROL 0x83b0
#define MDIO_WC_REG_RX66_CONTROL 0x83c0
#define MDIO_WC_REG_RX66_SCW0 0x83c2
@@ -7036,6 +7160,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_REG_GPHY_EEE_1G (0x1 << 2)
#define MDIO_REG_GPHY_EEE_100 (0x1 << 1)
#define MDIO_REG_GPHY_EEE_RESOLVED 0x803e
+#define MDIO_REG_GPHY_AUX_STATUS 0x19
#define MDIO_REG_INTR_STATUS 0x1a
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
@@ -7150,8 +7275,7 @@ Theotherbitsarereservedandshouldbezero*/
#define CDU_REGION_NUMBER_UCM_AG 4
-/**
- * String-to-compress [31:8] = CID (all 24 bits)
+/* String-to-compress [31:8] = CID (all 24 bits)
* String-to-compress [7:4] = Region
* String-to-compress [3:0] = Type
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 6c14b4a4e82c..734fd87cd990 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4107,6 +4107,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY;
+
if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
@@ -4115,6 +4119,10 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |=
ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+ if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags))
+ data->capabilities |=
+ ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY;
+
/* Hashing mask */
data->rss_result_mask = p->rss_result_mask;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index efd80bdd0dfe..f83e033da6da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -167,9 +167,8 @@ typedef int (*exe_q_remove)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
-/**
- * @return positive is entry was optimized, 0 - if not, negative
- * in case of an error.
+/* Return positive if entry was optimized, 0 - if not, negative
+ * in case of an error.
*/
typedef int (*exe_q_optimize)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
@@ -694,8 +693,10 @@ enum {
BNX2X_RSS_IPV4,
BNX2X_RSS_IPV4_TCP,
+ BNX2X_RSS_IPV4_UDP,
BNX2X_RSS_IPV6,
BNX2X_RSS_IPV6_TCP,
+ BNX2X_RSS_IPV6_UDP,
};
struct bnx2x_config_rss_params {
@@ -729,6 +730,10 @@ struct bnx2x_rss_config_obj {
/* Last configured indirection table */
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ /* flags for enabling 4-tupple hash on UDP */
+ u8 udp_rss_v4;
+ u8 udp_rss_v6;
+
int (*config_rss)(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
};
@@ -1280,12 +1285,11 @@ void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
struct bnx2x_rx_mode_obj *o);
/**
- * Send and RX_MODE ramrod according to the provided parameters.
+ * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
*
- * @param bp
- * @param p Command parameters
+ * @p: Command parameters
*
- * @return 0 - if operation was successfull and there is no pending completions,
+ * Return: 0 - if operation was successfull and there is no pending completions,
* positive number - if there are pending completions,
* negative - if there were errors
*/
@@ -1302,7 +1306,11 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Configure multicast MACs list. May configure a new list
+ * bnx2x_config_mcast - Configure multicast MACs list.
+ *
+ * @cmd: command to execute: BNX2X_MCAST_CMD_X
+ *
+ * May configure a new list
* provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
* (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
* configuration, continue to execute the pending commands
@@ -1313,11 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp,
* the current command will be enqueued to the tail of the
* pending commands list.
*
- * @param bp
- * @param p
- * @param command to execute: BNX2X_MCAST_CMD_X
- *
- * @return 0 is operation was sucessfull and there are no pending completions,
+ * Return: 0 is operation was sucessfull and there are no pending completions,
* negative if there were errors, positive if there are pending
* completions.
*/
@@ -1342,21 +1346,17 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
bnx2x_obj_type type);
/**
- * Updates RSS configuration according to provided parameters.
- *
- * @param bp
- * @param p
+ * bnx2x_config_rss - Updates RSS configuration according to provided parameters
*
- * @return 0 in case of success
+ * Return: 0 in case of success
*/
int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *p);
/**
- * Return the current ind_table configuration.
+ * bnx2x_get_rss_ind_table - Return the current ind_table configuration.
*
- * @param bp
- * @param ind_table buffer to fill with the current indirection
+ * @ind_table: buffer to fill with the current indirection
* table content. Should be at least
* T_ETH_INDIRECTION_TABLE_SIZE bytes long.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 1e2785cd11d0..667d89042d35 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -785,6 +785,10 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
pstats->host_port_stats_counter++;
+ if (CHIP_IS_E3(bp))
+ estats->eee_tx_lpi += REG_RD(bp,
+ MISC_REG_CPMU_LP_SM_ENT_CNT_P0);
+
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
@@ -855,17 +859,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
struct tstorm_per_queue_stats *tclient =
&bp->fw_stats_data->queue_stats[i].
tstorm_queue_statistics;
- struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+ struct tstorm_per_queue_stats *old_tclient =
+ &bnx2x_fp_stats(bp, fp)->old_tclient;
struct ustorm_per_queue_stats *uclient =
&bp->fw_stats_data->queue_stats[i].
ustorm_queue_statistics;
- struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+ struct ustorm_per_queue_stats *old_uclient =
+ &bnx2x_fp_stats(bp, fp)->old_uclient;
struct xstorm_per_queue_stats *xclient =
&bp->fw_stats_data->queue_stats[i].
xstorm_queue_statistics;
- struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct xstorm_per_queue_stats *old_xclient =
+ &bnx2x_fp_stats(bp, fp)->old_xclient;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
u32 diff;
@@ -1048,8 +1057,11 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
tmp = estats->mac_discard;
- for_each_rx_queue(bp, i)
- tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+ for_each_rx_queue(bp, i) {
+ struct tstorm_per_queue_stats *old_tclient =
+ &bp->fp_stats[i].old_tclient;
+ tmp += le32_to_cpu(old_tclient->checksum_discard);
+ }
nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1099,9 +1111,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
int i;
for_each_queue(bp, i) {
- struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
struct bnx2x_eth_q_stats_old *qstats_old =
- &bp->fp[i].eth_q_stats_old;
+ &bp->fp_stats[i].eth_q_stats_old;
UPDATE_ESTAT_QSTAT(driver_xoff);
UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
@@ -1309,12 +1321,9 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-/**
- * This function will prepare the statistics ramrod data the way
+/* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
* send the ramrod each time we have to.
- *
- * @param bp
*/
static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
@@ -1428,7 +1437,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]);
cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
cur_query_entry->address.hi =
cpu_to_le32(U64_HI(cur_data_offset));
@@ -1479,15 +1488,19 @@ void bnx2x_stats_init(struct bnx2x *bp)
/* function stats */
for_each_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
-
- memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
- memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
- memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+ struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
+
+ memset(&fp_stats->old_tclient, 0,
+ sizeof(fp_stats->old_tclient));
+ memset(&fp_stats->old_uclient, 0,
+ sizeof(fp_stats->old_uclient));
+ memset(&fp_stats->old_xclient, 0,
+ sizeof(fp_stats->old_xclient));
if (bp->stats_init) {
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
- memset(&fp->eth_q_stats_old, 0,
- sizeof(fp->eth_q_stats_old));
+ memset(&fp_stats->eth_q_stats, 0,
+ sizeof(fp_stats->eth_q_stats));
+ memset(&fp_stats->eth_q_stats_old, 0,
+ sizeof(fp_stats->eth_q_stats_old));
}
}
@@ -1529,8 +1542,10 @@ void bnx2x_save_statistics(struct bnx2x *bp)
/* save queue statistics */
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+ struct bnx2x_eth_q_stats *qstats =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bnx2x_fp_stats(bp, fp)->eth_q_stats_old;
UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
@@ -1569,7 +1584,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
struct per_queue_stats *fcoe_q_stats =
- &bp->fw_stats_data->queue_stats[FCOE_IDX];
+ &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)];
struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
&fcoe_q_stats->tstorm_queue_statistics;
@@ -1586,8 +1601,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
memset(afex_stats, 0, sizeof(struct afex_stats));
for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats;
ADD_64(afex_stats->rx_unicast_bytes_hi,
qstats->total_unicast_bytes_received_hi,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 93e689fdfeda..24b8e505b60c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -203,6 +203,8 @@ struct bnx2x_eth_stats {
/* Recovery */
u32 recoverable_error;
u32 unrecoverable_error;
+ /* src: Clear-on-Read register; Will not survive PMF Migration */
+ u32 eee_tx_lpi;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index c95e7b5e2b85..3b4fc61f24cf 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -256,11 +256,16 @@ static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
struct drv_ctl_info info;
+ struct fcoe_capabilities *fcoe_cap =
+ &info.data.register_data.fcoe_features;
- if (reg)
+ if (reg) {
info.cmd = DRV_CTL_ULP_REGISTER_CMD;
- else
+ if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap)
+ memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap));
+ } else {
info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+ }
info.data.ulp_type = ulp_type;
ethdev->drv_ctl(dev->netdev, &info);
@@ -286,6 +291,9 @@ static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
{
u32 i;
+ if (!cp->ctx_tbl)
+ return -EINVAL;
+
for (i = 0; i < cp->max_cid_space; i++) {
if (cp->ctx_tbl[i].cid == cid) {
*l5_cid = i;
@@ -534,7 +542,8 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+ pr_warn("%s: Failed waiting for ref count to go to zero\n",
+ __func__);
return 0;
out_unlock:
@@ -611,6 +620,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (ulp_type == CNIC_ULP_ISCSI)
cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ else if (ulp_type == CNIC_ULP_FCOE)
+ dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1053,12 +1064,13 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo = &udev->cnic_uinfo;
- uinfo->mem[0].addr = dev->netdev->base_addr;
+ uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0);
uinfo->mem[0].internal_addr = dev->regview;
- uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
+ TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
@@ -1068,6 +1080,8 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+ uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
+
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
@@ -2585,7 +2599,7 @@ static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
return;
}
- cqes[0] = (struct kcqe *) &kcqe;
+ cqes[0] = &kcqe;
cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
}
@@ -3213,6 +3227,9 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
u32 l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+ break;
+
if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
@@ -3943,6 +3960,15 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
+ case L5CM_RAMROD_CMD_ID_CLOSE:
+ if (l4kcqe->status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
+ "status 0x%x\n", l4kcqe->status);
+ opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ /* Fall through */
+ } else {
+ break;
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4246,8 +4272,6 @@ static int cnic_cm_shutdown(struct cnic_dev *dev)
struct cnic_local *cp = dev->cnic_priv;
int i;
- cp->stop_cm(dev);
-
if (!cp->csk_tbl)
return 0;
@@ -4665,9 +4689,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
cp->kcq1.sw_prod_idx = 0;
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &sblk->status_completion_producer_index;
+ &sblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+ cp->kcq1.status_idx_ptr = &sblk->status_idx;
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
@@ -4693,9 +4717,9 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
cp->kcq1.hw_prod_idx_ptr =
- (u16 *) &msblk->status_completion_producer_index;
- cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
- cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+ &msblk->status_completion_producer_index;
+ cp->kcq1.status_idx_ptr = &msblk->status_idx;
+ cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index;
cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
@@ -4977,8 +5001,14 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
- u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+ u32 val;
+ pci_read_config_dword(dev->pcidev, PCICFG_ME_REGISTER, &val);
+ cp->func = (u8) ((val & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ func = CNIC_FUNC(cp);
+
+ val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
if (!(val & 1))
val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
else
@@ -5283,6 +5313,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
i++;
}
cnic_shutdown_rings(dev);
+ cp->stop_cm(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
@@ -5512,9 +5543,7 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
rcu_read_unlock();
}
-/**
- * netdev event handler
- */
+/* netdev event handler */
static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 289274e546be..5cb88881bba1 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -12,8 +12,10 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.10"
-#define CNIC_MODULE_RELDATE "March 21, 2012"
+#include "bnx2x/bnx2x_mfw_req.h"
+
+#define CNIC_MODULE_VERSION "2.5.12"
+#define CNIC_MODULE_RELDATE "June 29, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -131,6 +133,11 @@ struct drv_ctl_l2_ring {
u32 cid;
};
+struct drv_ctl_register_data {
+ int ulp_type;
+ struct fcoe_capabilities fcoe_features;
+};
+
struct drv_ctl_info {
int cmd;
union {
@@ -138,6 +145,7 @@ struct drv_ctl_info {
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
int ulp_type;
+ struct drv_ctl_register_data register_data;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -305,6 +313,7 @@ struct cnic_dev {
int max_rdma_conn;
union drv_info_to_mcp *stats_addr;
+ struct fcoe_capabilities *fcoe_cap;
void *cnic_priv;
};
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e47ff8be1d7b..9a009fd6ea1b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -44,6 +44,10 @@
#include <linux/prefetch.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#if IS_ENABLED(CONFIG_HWMON)
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#include <net/checksum.h>
#include <net/ip.h>
@@ -298,6 +302,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -730,44 +735,131 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
tg3_ape_write32(tp, gnt + 4 * locknum, bit);
}
-static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
{
- int i;
u32 apedata;
- /* NCSI does not support APE events */
- if (tg3_flag(tp, APE_HAS_NCSI))
- return;
+ while (timeout_us) {
+ if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+ return -EBUSY;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+ udelay(10);
+ timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
+ }
+
+ return timeout_us ? 0 : -EBUSY;
+}
+
+static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
+{
+ u32 i, apedata;
+
+ for (i = 0; i < timeout_us / 10; i++) {
+ apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+ if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+ break;
+
+ udelay(10);
+ }
+
+ return i == timeout_us / 10;
+}
+
+int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
+{
+ int err;
+ u32 i, bufoff, msgoff, maxlen, apedata;
+
+ if (!tg3_flag(tp, APE_HAS_NCSI))
+ return 0;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
- return;
+ return -ENODEV;
apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
if (!(apedata & APE_FW_STATUS_READY))
- return;
+ return -EAGAIN;
- /* Wait for up to 1 millisecond for APE to service previous event. */
- for (i = 0; i < 10; i++) {
- if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
- return;
+ bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
+ TG3_APE_SHMEM_BASE;
+ msgoff = bufoff + 2 * sizeof(u32);
+ maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
- apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+ while (len) {
+ u32 length;
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
- event | APE_EVENT_STATUS_EVENT_PENDING);
+ /* Cap xfer sizes to scratchpad limits. */
+ length = (len > maxlen) ? maxlen : len;
+ len -= length;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 msec for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ apedata = APE_EVENT_STATUS_DRIVER_EVNT |
+ APE_EVENT_STATUS_SCRTCHPD_READ |
+ APE_EVENT_STATUS_EVENT_PENDING;
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
+
+ tg3_ape_write32(tp, bufoff, base_off);
+ tg3_ape_write32(tp, bufoff + sizeof(u32), length);
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- break;
+ base_off += length;
- udelay(100);
+ if (tg3_ape_wait_for_event(tp, 30000))
+ return -EAGAIN;
+
+ for (i = 0; length; i += 4, length -= 4) {
+ u32 val = tg3_ape_read32(tp, msgoff + i);
+ memcpy(data, &val, sizeof(u32));
+ data++;
+ }
}
- if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
- tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+ return 0;
+}
+
+static int tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+ int err;
+ u32 apedata;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+ if (apedata != APE_SEG_SIG_MAGIC)
+ return -EAGAIN;
+
+ apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+ if (!(apedata & APE_FW_STATUS_READY))
+ return -EAGAIN;
+
+ /* Wait for up to 1 millisecond for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 1000);
+ if (err)
+ return err;
+
+ tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+ event | APE_EVENT_STATUS_EVENT_PENDING);
+
+ tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+ tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+
+ return 0;
}
static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
@@ -9393,6 +9485,110 @@ static int tg3_init_hw(struct tg3 *tp, int reset_phy)
return tg3_reset_hw(tp, reset_phy);
}
+#if IS_ENABLED(CONFIG_HWMON)
+static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
+{
+ int i;
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
+ u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
+
+ tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
+ off += len;
+
+ if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
+ !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
+ memset(ocir, 0, TG3_OCIR_LEN);
+ }
+}
+
+/* sysfs attributes for hwmon */
+static ssize_t tg3_show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ u32 temperature;
+
+ spin_lock_bh(&tp->lock);
+ tg3_ape_scratchpad_read(tp, &temperature, attr->index,
+ sizeof(temperature));
+ spin_unlock_bh(&tp->lock);
+ return sprintf(buf, "%u\n", temperature);
+}
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_SENSOR_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_CAUTION_OFFSET);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
+ TG3_TEMP_MAX_OFFSET);
+
+static struct attribute *tg3_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tg3_group = {
+ .attrs = tg3_attributes,
+};
+
+#endif
+
+static void tg3_hwmon_close(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ if (tp->hwmon_dev) {
+ hwmon_device_unregister(tp->hwmon_dev);
+ tp->hwmon_dev = NULL;
+ sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+static void tg3_hwmon_open(struct tg3 *tp)
+{
+#if IS_ENABLED(CONFIG_HWMON)
+ int i, err;
+ u32 size = 0;
+ struct pci_dev *pdev = tp->pdev;
+ struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
+
+ tg3_sd_scan_scratchpad(tp, ocirs);
+
+ for (i = 0; i < TG3_SD_NUM_RECS; i++) {
+ if (!ocirs[i].src_data_length)
+ continue;
+
+ size += ocirs[i].src_hdr_length;
+ size += ocirs[i].src_data_length;
+ }
+
+ if (!size)
+ return;
+
+ /* Register hwmon sysfs hooks */
+ err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
+ return;
+ }
+
+ tp->hwmon_dev = hwmon_device_register(&pdev->dev);
+ if (IS_ERR(tp->hwmon_dev)) {
+ tp->hwmon_dev = NULL;
+ dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
+ sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
+ }
+#endif
+}
+
+
#define TG3_STAT_ADD32(PSTAT, REG) \
do { u32 __val = tr32(REG); \
(PSTAT)->low += __val; \
@@ -9908,7 +10104,7 @@ static bool tg3_enable_msix(struct tg3 *tp)
int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- tp->irq_cnt = num_online_cpus();
+ tp->irq_cnt = netif_get_num_default_rss_queues();
if (tp->irq_cnt > 1) {
/* We want as many rx rings enabled as there are cpus.
* In multiqueue MSI-X mode, the first MSI-X vector
@@ -10101,6 +10297,8 @@ static int tg3_open(struct net_device *dev)
tg3_phy_start(tp);
+ tg3_hwmon_open(tp);
+
tg3_full_lock(tp, 0);
tg3_timer_start(tp);
@@ -10150,6 +10348,8 @@ static int tg3_close(struct net_device *dev)
tg3_timer_stop(tp);
+ tg3_hwmon_close(tp);
+
tg3_phy_stop(tp);
tg3_full_lock(tp, 1);
@@ -13857,14 +14057,9 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
}
}
-static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+static void __devinit tg3_probe_ncsi(struct tg3 *tp)
{
- int vlen;
u32 apedata;
- char *fwtype;
-
- if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
- return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
if (apedata != APE_SEG_SIG_MAGIC)
@@ -13874,14 +14069,22 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
if (!(apedata & APE_FW_STATUS_READY))
return;
+ if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
+ tg3_flag_set(tp, APE_HAS_NCSI);
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+ int vlen;
+ u32 apedata;
+ char *fwtype;
+
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
- if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
- tg3_flag_set(tp, APE_HAS_NCSI);
+ if (tg3_flag(tp, APE_HAS_NCSI))
fwtype = "NCSI";
- } else {
+ else
fwtype = "DASH";
- }
vlen = strlen(tp->fw_ver);
@@ -13915,20 +14118,17 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
tg3_read_sb_ver(tp, val);
else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
tg3_read_hwsb_ver(tp);
- else
- return;
-
- if (vpd_vers)
- goto done;
- if (tg3_flag(tp, ENABLE_APE)) {
- if (tg3_flag(tp, ENABLE_ASF))
- tg3_read_dash_ver(tp);
- } else if (tg3_flag(tp, ENABLE_ASF)) {
- tg3_read_mgmtfw_ver(tp);
+ if (tg3_flag(tp, ENABLE_ASF)) {
+ if (tg3_flag(tp, ENABLE_APE)) {
+ tg3_probe_ncsi(tp);
+ if (!vpd_vers)
+ tg3_read_dash_ver(tp);
+ } else if (!vpd_vers) {
+ tg3_read_mgmtfw_ver(tp);
+ }
}
-done:
tp->fw_ver[TG3_VER_SIZE - 1] = 0;
}
@@ -14168,7 +14368,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (bridge->subordinate &&
(bridge->subordinate->number <=
tp->pdev->bus->number) &&
- (bridge->subordinate->subordinate >=
+ (bridge->subordinate->busn_res.end >=
tp->pdev->bus->number)) {
tg3_flag_set(tp, 5701_DMA_BUG);
pci_dev_put(bridge);
@@ -14196,7 +14396,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (bridge && bridge->subordinate &&
(bridge->subordinate->number <=
tp->pdev->bus->number) &&
- (bridge->subordinate->subordinate >=
+ (bridge->subordinate->busn_res.end >=
tp->pdev->bus->number)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
pci_dev_put(bridge);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4f..a1b75cd67b9d 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2311,10 +2311,11 @@
#define APE_LOCK_REQ_DRIVER 0x00001000
#define TG3_APE_LOCK_GRANT 0x004c
#define APE_LOCK_GRANT_DRIVER 0x00001000
-#define TG3_APE_SEG_SIG 0x4000
-#define APE_SEG_SIG_MAGIC 0x41504521
/* APE shared memory. Accessible through BAR1 */
+#define TG3_APE_SHMEM_BASE 0x4000
+#define TG3_APE_SEG_SIG 0x4000
+#define APE_SEG_SIG_MAGIC 0x41504521
#define TG3_APE_FW_STATUS 0x400c
#define APE_FW_STATUS_READY 0x00000100
#define TG3_APE_FW_FEATURES 0x4010
@@ -2327,6 +2328,8 @@
#define APE_FW_VERSION_REVMSK 0x0000ff00
#define APE_FW_VERSION_REVSFT 8
#define APE_FW_VERSION_BLDMSK 0x000000ff
+#define TG3_APE_SEG_MSG_BUF_OFF 0x401c
+#define TG3_APE_SEG_MSG_BUF_LEN 0x4020
#define TG3_APE_HOST_SEG_SIG 0x4200
#define APE_HOST_SEG_SIG_MAGIC 0x484f5354
#define TG3_APE_HOST_SEG_LEN 0x4204
@@ -2353,6 +2356,8 @@
#define APE_EVENT_STATUS_DRIVER_EVNT 0x00000010
#define APE_EVENT_STATUS_STATE_CHNGE 0x00000500
+#define APE_EVENT_STATUS_SCRTCHPD_READ 0x00001600
+#define APE_EVENT_STATUS_SCRTCHPD_WRITE 0x00001700
#define APE_EVENT_STATUS_STATE_START 0x00010000
#define APE_EVENT_STATUS_STATE_UNLOAD 0x00020000
#define APE_EVENT_STATUS_STATE_WOL 0x00030000
@@ -2671,6 +2676,40 @@ struct tg3_hw_stats {
u8 __reserved4[0xb00-0x9c8];
};
+#define TG3_SD_NUM_RECS 3
+#define TG3_OCIR_LEN (sizeof(struct tg3_ocir))
+#define TG3_OCIR_SIG_MAGIC 0x5253434f
+#define TG3_OCIR_FLAG_ACTIVE 0x00000001
+
+#define TG3_TEMP_CAUTION_OFFSET 0xc8
+#define TG3_TEMP_MAX_OFFSET 0xcc
+#define TG3_TEMP_SENSOR_OFFSET 0xd4
+
+
+struct tg3_ocir {
+ u32 signature;
+ u16 version_flags;
+ u16 refresh_int;
+ u32 refresh_tmr;
+ u32 update_tmr;
+ u32 dst_base_addr;
+ u16 src_hdr_offset;
+ u16 src_hdr_length;
+ u16 src_data_offset;
+ u16 src_data_length;
+ u16 dst_hdr_offset;
+ u16 dst_data_offset;
+ u16 dst_reg_upd_offset;
+ u16 dst_sem_offset;
+ u32 reserved1[2];
+ u32 port0_flags;
+ u32 port1_flags;
+ u32 port2_flags;
+ u32 port3_flags;
+ u32 reserved2[1];
+};
+
+
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
@@ -3206,6 +3245,10 @@ struct tg3 {
const char *fw_needed;
const struct firmware *fw;
u32 fw_len; /* includes BSS */
+
+#if IS_ENABLED(CONFIG_HWMON)
+ struct device *hwmon_dev;
+#endif
};
#endif /* !(_T3_H) */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 689e5e19cc0b..550d2521ba76 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -52,13 +52,7 @@ bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg)
}
/**
- * bfa_cee_attr_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE attributes
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes
*/
static u32
bfa_cee_attr_meminfo(void)
@@ -66,13 +60,7 @@ bfa_cee_attr_meminfo(void)
return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ);
}
/**
- * bfa_cee_stats_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE stats
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats
*/
static u32
bfa_cee_stats_meminfo(void)
@@ -81,14 +69,10 @@ bfa_cee_stats_meminfo(void)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-attributes responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w
*
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -105,14 +89,10 @@ bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
- *
- * @brief CEE ISR for get-stats responses from f/w
+ * bfa_cee_get_attr_isr - CEE ISR for get-stats responses from f/w
*
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -147,13 +127,7 @@ bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
}
/**
- * bfa_nw_cee_meminfo()
- *
- * @brief Returns the size of the DMA memory needed by CEE module
- *
- * @param[in] void
- *
- * @return Size of DMA region
+ * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module
*/
u32
bfa_nw_cee_meminfo(void)
@@ -162,15 +136,11 @@ bfa_nw_cee_meminfo(void)
}
/**
- * bfa_nw_cee_mem_claim()
- *
- * @brief Initialized CEE DMA Memory
- *
- * @param[in] cee CEE module pointer
- * dma_kva Kernel Virtual Address of CEE DMA Memory
- * dma_pa Physical Address of CEE DMA Memory
+ * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory
*
- * @return void
+ * @cee: CEE module pointer
+ * @dma_kva: Kernel Virtual Address of CEE DMA Memory
+ * @dma_pa: Physical Address of CEE DMA Memory
*/
void
bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
@@ -185,13 +155,11 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr()
- *
- * @brief Send the request to the f/w to fetch CEE attributes.
+ * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @cee: Pointer to the CEE module data structure.
*
- * @return Status
+ * Return: status
*/
enum bfa_status
bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
@@ -220,13 +188,7 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs()
- *
- * @brief Handles Mail-box interrupts for CEE module.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
+ * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
*/
static void
@@ -253,14 +215,9 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_notify()
- *
- * @brief CEE module heart-beat failure handler.
- * @brief CEE module IOC event handler.
- *
- * @param[in] IOC event type
+ * bfa_cee_notify - CEE module heart-beat failure handler.
*
- * @return void
+ * @event: IOC event type
*/
static void
@@ -307,17 +264,13 @@ bfa_cee_notify(void *arg, enum bfa_ioc_event event)
}
/**
- * bfa_nw_cee_attach()
- *
- * @brief CEE module-attach API
+ * bfa_nw_cee_attach - CEE module-attach API
*
- * @param[in] cee - Pointer to the CEE module data structure
- * ioc - Pointer to the ioc module data structure
- * dev - Pointer to the device driver module data structure
- * The device driver specific mbox ISR functions have
- * this pointer as one of the parameters.
- *
- * @return void
+ * @cee: Pointer to the CEE module data structure
+ * @ioc: Pointer to the ioc module data structure
+ * @dev: Pointer to the device driver module data structure.
+ * The device driver specific mbox ISR functions have
+ * this pointer as one of the parameters.
*/
void
bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h
index 3da1a946ccdd..ad004a4c3897 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h
@@ -16,23 +16,18 @@
* www.brocade.com
*/
-/**
- * @file bfa_cs.h BFA common services
- */
+/* BFA common services */
#ifndef __BFA_CS_H__
#define __BFA_CS_H__
#include "cna.h"
-/**
- * @ BFA state machine interfaces
- */
+/* BFA state machine interfaces */
typedef void (*bfa_sm_t)(void *sm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -45,9 +40,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
#define bfa_sm_get_state(_sm) ((_sm)->sm)
#define bfa_sm_cmp_state(_sm, _state) ((_sm)->sm == (bfa_sm_t)(_state))
-/**
- * For converting from state machine function to state encoding.
- */
+/* For converting from state machine function to state encoding. */
struct bfa_sm_table {
bfa_sm_t sm; /*!< state machine function */
int state; /*!< state machine encoding */
@@ -55,13 +48,10 @@ struct bfa_sm_table {
};
#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
-/**
- * State machine with entry actions.
- */
+/* State machine with entry actions. */
typedef void (*bfa_fsm_t)(void *fsm, int event);
-/**
- * oc - object class eg. bfa_ioc
+/* oc - object class eg. bfa_ioc
* st - state, eg. reset
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
@@ -90,9 +80,7 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
return smt[i].state;
}
-/**
- * @ Generic wait counter.
- */
+/* Generic wait counter. */
typedef void (*bfa_wc_resume_t) (void *cbarg);
@@ -116,9 +104,7 @@ bfa_wc_down(struct bfa_wc *wc)
wc->wc_resume(wc->wc_cbarg);
}
-/**
- * Initialize a waiting counter.
- */
+/* Initialize a waiting counter. */
static inline void
bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
{
@@ -128,9 +114,7 @@ bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
bfa_wc_up(wc);
}
-/**
- * Wait for counter to reach zero
- */
+/* Wait for counter to reach zero */
static inline void
bfa_wc_wait(struct bfa_wc *wc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 48f877337390..e423f82da490 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -26,13 +26,9 @@
#define BFA_STRING_32 32
#define BFA_VERSION_LEN 64
-/**
- * ---------------------- adapter definitions ------------
- */
+/* ---------------------- adapter definitions ------------ */
-/**
- * BFA adapter level attributes.
- */
+/* BFA adapter level attributes. */
enum {
BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
/*
@@ -74,18 +70,14 @@ struct bfa_adapter_attr {
u8 trunk_capable;
};
-/**
- * ---------------------- IOC definitions ------------
- */
+/* ---------------------- IOC definitions ------------ */
enum {
BFA_IOC_DRIVER_LEN = 16,
BFA_IOC_CHIP_REV_LEN = 8,
};
-/**
- * Driver and firmware versions.
- */
+/* Driver and firmware versions. */
struct bfa_ioc_driver_attr {
char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */
char driver_ver[BFA_VERSION_LEN]; /*!< driver version */
@@ -95,9 +87,7 @@ struct bfa_ioc_driver_attr {
char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */
};
-/**
- * IOC PCI device attributes
- */
+/* IOC PCI device attributes */
struct bfa_ioc_pci_attr {
u16 vendor_id; /*!< PCI vendor ID */
u16 device_id; /*!< PCI device ID */
@@ -108,9 +98,7 @@ struct bfa_ioc_pci_attr {
char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */
};
-/**
- * IOC states
- */
+/* IOC states */
enum bfa_ioc_state {
BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
BFA_IOC_RESET = 2, /*!< IOC is in reset state */
@@ -127,9 +115,7 @@ enum bfa_ioc_state {
BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */
};
-/**
- * IOC firmware stats
- */
+/* IOC firmware stats */
struct bfa_fw_ioc_stats {
u32 enable_reqs;
u32 disable_reqs;
@@ -139,9 +125,7 @@ struct bfa_fw_ioc_stats {
u32 unknown_reqs;
};
-/**
- * IOC driver stats
- */
+/* IOC driver stats */
struct bfa_ioc_drv_stats {
u32 ioc_isrs;
u32 ioc_enables;
@@ -157,9 +141,7 @@ struct bfa_ioc_drv_stats {
u32 rsvd;
};
-/**
- * IOC statistics
- */
+/* IOC statistics */
struct bfa_ioc_stats {
struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */
struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */
@@ -171,9 +153,7 @@ enum bfa_ioc_type {
BFA_IOC_TYPE_LL = 3,
};
-/**
- * IOC attributes returned in queries
- */
+/* IOC attributes returned in queries */
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
enum bfa_ioc_state state; /*!< IOC state */
@@ -187,22 +167,16 @@ struct bfa_ioc_attr {
u8 rsvd[4]; /*!< 64bit align */
};
-/**
- * Adapter capability mask definition
- */
+/* Adapter capability mask definition */
enum {
BFA_CM_HBA = 0x01,
BFA_CM_CNA = 0x02,
BFA_CM_NIC = 0x04,
};
-/**
- * ---------------------- mfg definitions ------------
- */
+/* ---------------------- mfg definitions ------------ */
-/**
- * Checksum size
- */
+/* Checksum size */
#define BFA_MFG_CHKSUM_SIZE 16
#define BFA_MFG_PARTNUM_SIZE 14
@@ -213,8 +187,7 @@ enum {
#pragma pack(1)
-/**
- * @brief BFA adapter manufacturing block definition.
+/* BFA adapter manufacturing block definition.
*
* All numerical fields are in big-endian format.
*/
@@ -256,9 +229,7 @@ struct bfa_mfg_block {
#pragma pack()
-/**
- * ---------------------- pci definitions ------------
- */
+/* ---------------------- pci definitions ------------ */
/*
* PCI device ID information
@@ -275,9 +246,7 @@ enum {
#define bfa_asic_id_ctc(device) \
(bfa_asic_id_ct(device) || bfa_asic_id_ct2(device))
-/**
- * PCI sub-system device and vendor ID information
- */
+/* PCI sub-system device and vendor ID information */
enum {
BFA_PCI_FCOE_SSDEVICE_ID = 0x14,
BFA_PCI_CT2_SSID_FCoE = 0x22,
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
index 8ab33ee2c2bc..b39c5f23974b 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h
@@ -20,10 +20,7 @@
#include "bfa_defs.h"
-/**
- * @brief
- * FC physical port statistics.
- */
+/* FC physical port statistics. */
struct bfa_port_fc_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 tx_frames; /*!< Tx frames */
@@ -59,10 +56,7 @@ struct bfa_port_fc_stats {
u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */
};
-/**
- * @brief
- * Eth Physical Port statistics.
- */
+/* Eth Physical Port statistics. */
struct bfa_port_eth_stats {
u64 secs_reset; /*!< Seconds since stats is reset */
u64 frame_64; /*!< Frames 64 bytes */
@@ -108,10 +102,7 @@ struct bfa_port_eth_stats {
u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */
};
-/**
- * @brief
- * Port statistics.
- */
+/* Port statistics. */
union bfa_port_stats_u {
struct bfa_port_fc_stats fc;
struct bfa_port_eth_stats eth;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
index 6681fe87c1e1..7fb396fe679d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h
@@ -20,33 +20,23 @@
#include "bfa_defs.h"
-/**
- * Manufacturing block version
- */
+/* Manufacturing block version */
#define BFA_MFG_VERSION 3
#define BFA_MFG_VERSION_UNINIT 0xFF
-/**
- * Manufacturing block encrypted version
- */
+/* Manufacturing block encrypted version */
#define BFA_MFG_ENC_VER 2
-/**
- * Manufacturing block version 1 length
- */
+/* Manufacturing block version 1 length */
#define BFA_MFG_VER1_LEN 128
-/**
- * Manufacturing block header length
- */
+/* Manufacturing block header length */
#define BFA_MFG_HDR_LEN 4
#define BFA_MFG_SERIALNUM_SIZE 11
#define STRSZ(_n) (((_n) + 4) & ~3)
-/**
- * Manufacturing card type
- */
+/* Manufacturing card type */
enum {
BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */
BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */
@@ -70,9 +60,7 @@ enum {
#pragma pack(1)
-/**
- * Check if Mezz card
- */
+/* Check if Mezz card */
#define bfa_mfg_is_mezz(type) (( \
(type) == BFA_MFG_TYPE_JAYHAWK || \
(type) == BFA_MFG_TYPE_WANCHESE || \
@@ -127,9 +115,7 @@ do { \
} \
} while (0)
-/**
- * VPD data length
- */
+/* VPD data length */
#define BFA_MFG_VPD_LEN 512
#define BFA_MFG_VPD_LEN_INVALID 0
@@ -137,9 +123,7 @@ do { \
#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */
#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */
-/**
- * VPD vendor tag
- */
+/* VPD vendor tag */
enum {
BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
@@ -151,8 +135,7 @@ enum {
BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
-/**
- * @brief BFA adapter flash vpd data definition.
+/* BFA adapter flash vpd data definition.
*
* All numerical fields are in big-endian format.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
index 7c5fe6c2e80e..ea9af9ae754d 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h
@@ -18,8 +18,7 @@
#ifndef __BFA_DEFS_STATUS_H__
#define __BFA_DEFS_STATUS_H__
-/**
- * API status return values
+/* API status return values
*
* NOTE: The error msgs are auto generated from the comments. Only singe line
* comments are supported
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 0b640fafbda3..959c58ef972a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -20,13 +20,9 @@
#include "bfi_reg.h"
#include "bfa_defs.h"
-/**
- * IOC local definitions
- */
+/* IOC local definitions */
-/**
- * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
- */
+/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
#define bfa_ioc_firmware_lock(__ioc) \
((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
@@ -96,9 +92,7 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
-/**
- * IOC state machine definitions/declarations
- */
+/* IOC state machine definitions/declarations */
enum ioc_event {
IOC_E_RESET = 1, /*!< IOC reset request */
IOC_E_ENABLE = 2, /*!< IOC enable request */
@@ -148,9 +142,7 @@ static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
static void bfa_iocpf_stop(struct bfa_ioc *ioc);
-/**
- * IOCPF state machine events
- */
+/* IOCPF state machine events */
enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
@@ -166,9 +158,7 @@ enum iocpf_event {
IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
};
-/**
- * IOCPF states
- */
+/* IOCPF states */
enum bfa_iocpf_state {
BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
@@ -215,21 +205,15 @@ static struct bfa_sm_table iocpf_sm_table[] = {
{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
};
-/**
- * IOC State Machine
- */
+/* IOC State Machine */
-/**
- * Beginning state. IOC uninit state.
- */
+/* Beginning state. IOC uninit state. */
static void
bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC is in uninit state.
- */
+/* IOC is in uninit state. */
static void
bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -243,18 +227,14 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
{
bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
}
-/**
- * IOC is in reset state.
- */
+/* IOC is in reset state. */
static void
bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -282,8 +262,7 @@ bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_enable(ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -325,9 +304,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
{
@@ -336,9 +313,7 @@ bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
bfa_ioc_send_getattr(ioc);
}
-/**
- * IOC configuration in progress. Timer is active.
- */
+/* IOC configuration in progress. Timer is active. */
static void
bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -419,9 +394,7 @@ bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
bfa_iocpf_disable(ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -449,9 +422,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
{
@@ -485,9 +456,7 @@ bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
{
}
-/**
- * Hardware initialization retry.
- */
+/* Hardware initialization retry. */
static void
bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -534,9 +503,7 @@ bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -568,9 +535,7 @@ bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
{
}
-/**
- * IOC failure.
- */
+/* IOC failure. */
static void
bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
{
@@ -593,13 +558,9 @@ bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
}
}
-/**
- * IOCPF State Machine
- */
+/* IOCPF State Machine */
-/**
- * Reset entry actions -- initialize state machine
- */
+/* Reset entry actions -- initialize state machine */
static void
bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
{
@@ -607,9 +568,7 @@ bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
iocpf->auto_recover = bfa_nw_auto_recover;
}
-/**
- * Beginning state. IOC is in reset state.
- */
+/* Beginning state. IOC is in reset state. */
static void
bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -626,9 +585,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Semaphore should be acquired for version check.
- */
+/* Semaphore should be acquired for version check. */
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
@@ -636,9 +593,7 @@ bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting h/w semaphore to continue with version check.
- */
+/* Awaiting h/w semaphore to continue with version check. */
static void
bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -683,9 +638,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Notify enable completion callback
- */
+/* Notify enable completion callback */
static void
bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
{
@@ -698,9 +651,7 @@ bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
msecs_to_jiffies(BFA_IOC_TOV));
}
-/**
- * Awaiting firmware version match.
- */
+/* Awaiting firmware version match. */
static void
bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -727,18 +678,14 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * Request for semaphore.
- */
+/* Request for semaphore. */
static void
bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
{
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Awaiting semaphore for h/w initialzation.
- */
+/* Awaiting semaphore for h/w initialzation. */
static void
bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -778,8 +725,7 @@ bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
bfa_ioc_reset(iocpf->ioc, false);
}
-/**
- * Hardware is being initialized. Interrupts are enabled.
+/* Hardware is being initialized. Interrupts are enabled.
* Holding hardware semaphore lock.
*/
static void
@@ -822,8 +768,7 @@ bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_enable(iocpf->ioc);
}
-/**
- * Host IOC function is being enabled, awaiting response from firmware.
+/* Host IOC function is being enabled, awaiting response from firmware.
* Semaphore is acquired.
*/
static void
@@ -896,9 +841,7 @@ bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
bfa_ioc_send_disable(iocpf->ioc);
}
-/**
- * IOC is being disabled
- */
+/* IOC is being disabled */
static void
bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -935,9 +878,7 @@ bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC hb ack request is being removed.
- */
+/* IOC hb ack request is being removed. */
static void
bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -963,9 +904,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * IOC disable completion entry.
- */
+/* IOC disable completion entry. */
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
@@ -1000,9 +939,7 @@ bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1046,9 +983,7 @@ bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * Hardware initialization failed.
- */
+/* Hardware initialization failed. */
static void
bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1084,9 +1019,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
bfa_ioc_hw_sem_get(iocpf->ioc);
}
-/**
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1134,10 +1067,7 @@ bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
{
}
-/**
- * @brief
- * IOC is in failed state.
- */
+/* IOC is in failed state. */
static void
bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
{
@@ -1151,13 +1081,9 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
}
}
-/**
- * BFA IOC private functions
- */
+/* BFA IOC private functions */
-/**
- * Notify common modules registered for notification.
- */
+/* Notify common modules registered for notification. */
static void
bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
@@ -1298,10 +1224,7 @@ bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
del_timer(&ioc->sem_timer);
}
-/**
- * @brief
- * Initialize LPU local memory (aka secondary memory / SRAM)
- */
+/* Initialize LPU local memory (aka secondary memory / SRAM) */
static void
bfa_ioc_lmem_init(struct bfa_ioc *ioc)
{
@@ -1366,9 +1289,7 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
}
-/**
- * Get driver and firmware versions.
- */
+/* Get driver and firmware versions. */
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1388,9 +1309,7 @@ bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
}
}
-/**
- * Returns TRUE if same.
- */
+/* Returns TRUE if same. */
bool
bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
@@ -1408,8 +1327,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
return true;
}
-/**
- * Return true if current running version is valid. Firmware signature and
+/* Return true if current running version is valid. Firmware signature and
* execution context (driver/bios) must match.
*/
static bool
@@ -1430,9 +1348,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
}
-/**
- * Conditionally flush any pending message from firmware at start.
- */
+/* Conditionally flush any pending message from firmware at start. */
static void
bfa_ioc_msgflush(struct bfa_ioc *ioc)
{
@@ -1443,9 +1359,6 @@ bfa_ioc_msgflush(struct bfa_ioc *ioc)
writel(1, ioc->ioc_regs.lpu_mbox_cmd);
}
-/**
- * @img ioc_init_logic.jpg
- */
static void
bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
@@ -1603,10 +1516,7 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
del_timer(&ioc->hb_timer);
}
-/**
- * @brief
- * Initiate a full firmware download.
- */
+/* Initiate a full firmware download. */
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_env)
@@ -1672,9 +1582,7 @@ bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
bfa_ioc_hwinit(ioc, force);
}
-/**
- * BFA ioc enable reply by firmware
- */
+/* BFA ioc enable reply by firmware */
static void
bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
u8 cap_bm)
@@ -1686,10 +1594,7 @@ bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
}
-/**
- * @brief
- * Update BFA configuration from firmware configuration.
- */
+/* Update BFA configuration from firmware configuration. */
static void
bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
{
@@ -1702,9 +1607,7 @@ bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
}
-/**
- * Attach time initialization of mbox logic.
- */
+/* Attach time initialization of mbox logic. */
static void
bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
{
@@ -1718,9 +1621,7 @@ bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
}
}
-/**
- * Mbox poll timer -- restarts any pending mailbox requests.
- */
+/* Mbox poll timer -- restarts any pending mailbox requests. */
static void
bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
{
@@ -1760,9 +1661,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
}
}
-/**
- * Cleanup any pending requests.
- */
+/* Cleanup any pending requests. */
static void
bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
@@ -1774,12 +1673,12 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
}
/**
- * Read data from SMEM to host through PCI memmap
+ * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
*
- * @param[in] ioc memory for IOC
- * @param[in] tbuf app memory to store data from smem
- * @param[in] soff smem offset
- * @param[in] sz size of smem in bytes
+ * @ioc: memory for IOC
+ * @tbuf: app memory to store data from smem
+ * @soff: smem offset
+ * @sz: size of smem in bytes
*/
static int
bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
@@ -1826,9 +1725,7 @@ bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
return 0;
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1844,9 +1741,7 @@ bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
return status;
}
-/**
- * Save firmware trace if configured.
- */
+/* Save firmware trace if configured. */
static void
bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
{
@@ -1861,9 +1756,7 @@ bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
}
}
-/**
- * Retrieve saved firmware trace from a prior IOC failure.
- */
+/* Retrieve saved firmware trace from a prior IOC failure. */
int
bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
{
@@ -1892,9 +1785,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
bfa_nw_ioc_debug_save_ftrc(ioc);
}
-/**
- * IOCPF to IOC interface
- */
+/* IOCPF to IOC interface */
static void
bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
{
@@ -1928,9 +1819,7 @@ bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
}
-/**
- * IOC public
- */
+/* IOC public */
static enum bfa_status
bfa_ioc_pll_init(struct bfa_ioc *ioc)
{
@@ -1954,8 +1843,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
return BFA_STATUS_OK;
}
-/**
- * Interface used by diag module to do firmware boot with memory test
+/* Interface used by diag module to do firmware boot with memory test
* as the entry vector.
*/
static void
@@ -1983,9 +1871,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
bfa_ioc_lpu_start(ioc);
}
-/**
- * Enable/disable IOC failure auto recovery.
- */
+/* Enable/disable IOC failure auto recovery. */
void
bfa_nw_ioc_auto_recover(bool auto_recover)
{
@@ -2056,10 +1942,10 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
}
/**
- * IOC attach time initialization and setup.
+ * bfa_nw_ioc_attach - IOC attach time initialization and setup.
*
- * @param[in] ioc memory for IOC
- * @param[in] bfa driver instance structure
+ * @ioc: memory for IOC
+ * @bfa: driver instance structure
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2078,9 +1964,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
bfa_fsm_send_event(ioc, IOC_E_RESET);
}
-/**
- * Driver detach time IOC cleanup.
- */
+/* Driver detach time IOC cleanup. */
void
bfa_nw_ioc_detach(struct bfa_ioc *ioc)
{
@@ -2091,9 +1975,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
}
/**
- * Setup IOC PCI properties.
+ * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
- * @param[in] pcidev PCI device information for this IOC
+ * @pcidev: PCI device information for this IOC
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2160,10 +2044,10 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
}
/**
- * Initialize IOC dma memory
+ * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
- * @param[in] dm_kva kernel virtual address of IOC dma memory
- * @param[in] dm_pa physical address of IOC dma memory
+ * @dm_kva: kernel virtual address of IOC dma memory
+ * @dm_pa: physical address of IOC dma memory
*/
void
bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
@@ -2176,9 +2060,7 @@ bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
ioc->attr = (struct bfi_ioc_attr *) dm_kva;
}
-/**
- * Return size of dma memory required.
- */
+/* Return size of dma memory required. */
u32
bfa_nw_ioc_meminfo(void)
{
@@ -2201,9 +2083,7 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
-/**
- * Initialize memory for saving firmware trace.
- */
+/* Initialize memory for saving firmware trace. */
void
bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
{
@@ -2217,9 +2097,7 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
-/**
- * Register mailbox message handler function, to be called by common modules
- */
+/* Register mailbox message handler function, to be called by common modules */
void
bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
@@ -2231,11 +2109,12 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
}
/**
- * Queue a mailbox command request to firmware. Waits if mailbox is busy.
- * Responsibility of caller to serialize
+ * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
*
- * @param[in] ioc IOC instance
- * @param[i] cmd Mailbox command
+ * @ioc: IOC instance
+ * @cmd: Mailbox command
+ *
+ * Waits if mailbox is busy. Responsibility of caller to serialize
*/
bool
bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
@@ -2272,9 +2151,7 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
return false;
}
-/**
- * Handle mailbox interrupts
- */
+/* Handle mailbox interrupts */
void
bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
{
@@ -2314,9 +2191,7 @@ bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
-/**
- * return true if IOC is disabled
- */
+/* return true if IOC is disabled */
bool
bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
{
@@ -2324,17 +2199,14 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
}
-/**
- * return true if IOC is operational
- */
+/* return true if IOC is operational */
bool
bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
{
return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
}
-/**
- * Add to IOC heartbeat failure notification queue. To be used by common
+/* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
@@ -2518,9 +2390,7 @@ bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
}
-/**
- * WWN public
- */
+/* WWN public */
static u64
bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
{
@@ -2533,9 +2403,7 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
return ioc->attr->mac;
}
-/**
- * Firmware failure detected. Start recovery actions.
- */
+/* Firmware failure detected. Start recovery actions. */
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
@@ -2545,10 +2413,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
-/**
- * @dg hal_iocpf_pvt BFA IOC PF private functions
- * @{
- */
+/* BFA IOC PF private functions */
static void
bfa_iocpf_enable(struct bfa_ioc *ioc)
@@ -2669,8 +2534,6 @@ bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
/*
* Send flash write request.
- *
- * @param[in] cbarg - callback argument
*/
static void
bfa_flash_write_send(struct bfa_flash *flash)
@@ -2699,10 +2562,10 @@ bfa_flash_write_send(struct bfa_flash *flash)
flash->offset += len;
}
-/*
- * Send flash read request.
+/**
+ * bfa_flash_read_send - Send flash read request.
*
- * @param[in] cbarg - callback argument
+ * @cbarg: callback argument
*/
static void
bfa_flash_read_send(void *cbarg)
@@ -2724,11 +2587,11 @@ bfa_flash_read_send(void *cbarg)
bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
}
-/*
- * Process flash response messages upon receiving interrupts.
+/**
+ * bfa_flash_intr - Process flash response messages upon receiving interrupts.
*
- * @param[in] flasharg - flash structure
- * @param[in] msg - message structure
+ * @flasharg: flash structure
+ * @msg: message structure
*/
static void
bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
@@ -2821,12 +2684,12 @@ bfa_nw_flash_meminfo(void)
return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Flash attach API.
+/**
+ * bfa_nw_flash_attach - Flash attach API.
*
- * @param[in] flash - flash structure
- * @param[in] ioc - ioc structure
- * @param[in] dev - device structure
+ * @flash: flash structure
+ * @ioc: ioc structure
+ * @dev: device structure
*/
void
bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
@@ -2842,12 +2705,12 @@ bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
}
-/*
- * Claim memory for flash
+/**
+ * bfa_nw_flash_memclaim - Claim memory for flash
*
- * @param[in] flash - flash structure
- * @param[in] dm_kva - pointer to virtual memory address
- * @param[in] dm_pa - physical memory address
+ * @flash: flash structure
+ * @dm_kva: pointer to virtual memory address
+ * @dm_pa: physical memory address
*/
void
bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
@@ -2859,13 +2722,13 @@ bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
}
-/*
- * Get flash attribute.
+/**
+ * bfa_nw_flash_get_attr - Get flash attribute.
*
- * @param[in] flash - flash structure
- * @param[in] attr - flash attribute structure
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @attr: flash attribute structure
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2895,17 +2758,17 @@ bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
return BFA_STATUS_OK;
}
-/*
- * Update flash partition.
+/**
+ * bfa_nw_flash_update_part - Update flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - update data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: update data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
@@ -2944,17 +2807,17 @@ bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
return BFA_STATUS_OK;
}
-/*
- * Read flash partition.
+/**
+ * bfa_nw_flash_read_part - Read flash partition.
*
- * @param[in] flash - flash structure
- * @param[in] type - flash partition type
- * @param[in] instance - flash partition instance
- * @param[in] buf - read data buffer
- * @param[in] len - data buffer length
- * @param[in] offset - offset relative to the partition starting address
- * @param[in] cbfn - callback function
- * @param[in] cbarg - callback argument
+ * @flash: flash structure
+ * @type: flash partition type
+ * @instance: flash partition instance
+ * @buf: read data buffer
+ * @len: data buffer length
+ * @offset: offset relative to the partition starting address
+ * @cbfn: callback function
+ * @cbarg: callback argument
*
* Return status.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index 3b4460fdc148..63a85e555df8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -30,9 +30,7 @@
#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
BFI_IOC_TRC_HDR_SZ)
-/**
- * PCI device information required by IOC
- */
+/* PCI device information required by IOC */
struct bfa_pcidev {
int pci_slot;
u8 pci_func;
@@ -41,8 +39,7 @@ struct bfa_pcidev {
void __iomem *pci_bar_kva;
};
-/**
- * Structure used to remember the DMA-able memory block's KVA and Physical
+/* Structure used to remember the DMA-able memory block's KVA and Physical
* Address
*/
struct bfa_dma {
@@ -52,15 +49,11 @@ struct bfa_dma {
#define BFA_DMA_ALIGN_SZ 256
-/**
- * smem size for Crossbow and Catapult
- */
+/* smem size for Crossbow and Catapult */
#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */
#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
-/**
- * @brief BFA dma address assignment macro. (big endian format)
- */
+/* BFA dma address assignment macro. (big endian format) */
#define bfa_dma_be_addr_set(dma_addr, pa) \
__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
static inline void
@@ -108,9 +101,7 @@ struct bfa_ioc_regs {
u32 smem_pg0;
};
-/**
- * IOC Mailbox structures
- */
+/* IOC Mailbox structures */
typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
@@ -119,9 +110,7 @@ struct bfa_mbox_cmd {
u32 msg[BFI_IOC_MSGSZ];
};
-/**
- * IOC mailbox module
- */
+/* IOC mailbox module */
typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
struct bfa_ioc_mbox_mod {
struct list_head cmd_q; /*!< pending mbox queue */
@@ -132,9 +121,7 @@ struct bfa_ioc_mbox_mod {
} mbhdlr[BFI_MC_MAX];
};
-/**
- * IOC callback function interfaces
- */
+/* IOC callback function interfaces */
typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
@@ -146,9 +133,7 @@ struct bfa_ioc_cbfn {
bfa_ioc_reset_cbfn_t reset_cbfn;
};
-/**
- * IOC event notification mechanism.
- */
+/* IOC event notification mechanism. */
enum bfa_ioc_event {
BFA_IOC_E_ENABLED = 1,
BFA_IOC_E_DISABLED = 2,
@@ -163,9 +148,7 @@ struct bfa_ioc_notify {
void *cbarg;
};
-/**
- * Initialize a IOC event notification structure
- */
+/* Initialize a IOC event notification structure */
#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
@@ -261,9 +244,7 @@ struct bfa_ioc_hwif {
#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS)
#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-/**
- * IOC mailbox interface
- */
+/* IOC mailbox interface */
bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
struct bfa_mbox_cmd *cmd,
bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
@@ -271,9 +252,7 @@ void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
-/**
- * IOC interfaces
- */
+/* IOC interfaces */
#define bfa_ioc_pll_init_asic(__ioc) \
((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index b6b036a143ae..5df0b0c68c5a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -87,9 +87,7 @@ static const struct bfa_ioc_hwif nw_hwif_ct2 = {
.ioc_sync_complete = bfa_ioc_ct_sync_complete,
};
-/**
- * Called from bfa_ioc_attach() to map asic specific calls.
- */
+/* Called from bfa_ioc_attach() to map asic specific calls. */
void
bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
{
@@ -102,9 +100,7 @@ bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
ioc->ioc_hwif = &nw_hwif_ct2;
}
-/**
- * Return true if firmware of current driver matches the running firmware.
- */
+/* Return true if firmware of current driver matches the running firmware. */
static bool
bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
{
@@ -182,9 +178,7 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
}
-/**
- * Notify other functions on HB failure.
- */
+/* Notify other functions on HB failure. */
static void
bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
{
@@ -195,9 +189,7 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
readl(ioc->ioc_regs.alt_ll_halt);
}
-/**
- * Host to LPU mailbox message addresses
- */
+/* Host to LPU mailbox message addresses */
static const struct {
u32 hfn_mbox;
u32 lpu_mbox;
@@ -209,9 +201,7 @@ static const struct {
{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
+/* Host <-> LPU mailbox command/status registers - port 0 */
static const struct {
u32 hfn;
u32 lpu;
@@ -222,9 +212,7 @@ static const struct {
{ HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
};
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
+/* Host <-> LPU mailbox command/status registers - port 1 */
static const struct {
u32 hfn;
u32 lpu;
@@ -368,9 +356,7 @@ bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
ioc->ioc_regs.err_set = rb + ERR_SET_REG;
}
-/**
- * Initialize IOC to port mapping.
- */
+/* Initialize IOC to port mapping. */
#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
static void
@@ -398,9 +384,7 @@ bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
}
-/**
- * Set interrupt mode for a function: INTX or MSIX
- */
+/* Set interrupt mode for a function: INTX or MSIX */
static void
bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
{
@@ -443,9 +427,7 @@ bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
return false;
}
-/**
- * MSI-X resource allocation for 1860 with no asic block
- */
+/* MSI-X resource allocation for 1860 with no asic block */
#define HOSTFN_MSIX_DEFAULT 64
#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
@@ -473,9 +455,7 @@ bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
}
-/**
- * Cleanup hw semaphore and usecnt registers
- */
+/* Cleanup hw semaphore and usecnt registers */
static void
bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
{
@@ -492,9 +472,7 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
bfa_nw_ioc_hw_sem_release(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static bool
bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
{
@@ -518,9 +496,7 @@ bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
return bfa_ioc_ct_sync_complete(ioc);
}
-/**
- * Synchronized IOC failure processing routines
- */
+/* Synchronized IOC failure processing routines */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
index dd36427f4752..55067d0d25cf 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_msgq.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c
@@ -16,9 +16,7 @@
* www.brocade.com
*/
-/**
- * @file bfa_msgq.c MSGQ module source file.
- */
+/* MSGQ module source file. */
#include "bfi.h"
#include "bfa_msgq.h"
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 0d9df695397a..1f24c23dc786 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -22,15 +22,11 @@
#pragma pack(1)
-/**
- * BFI FW image type
- */
+/* BFI FW image type */
#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */
#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32))
-/**
- * Msg header common to all msgs
- */
+/* Msg header common to all msgs */
struct bfi_mhdr {
u8 msg_class; /*!< @ref enum bfi_mclass */
u8 msg_id; /*!< msg opcode with in the class */
@@ -65,17 +61,14 @@ struct bfi_mhdr {
#define BFI_I2H_OPCODE_BASE 128
#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
-/**
- ****************************************************************************
+/****************************************************************************
*
* Scatter Gather Element and Page definition
*
****************************************************************************
*/
-/**
- * DMA addresses
- */
+/* DMA addresses */
union bfi_addr_u {
struct {
u32 addr_lo;
@@ -83,9 +76,7 @@ union bfi_addr_u {
} a32;
};
-/**
- * Generic DMA addr-len pair.
- */
+/* Generic DMA addr-len pair. */
struct bfi_alen {
union bfi_addr_u al_addr; /* DMA addr of buffer */
u32 al_len; /* length of buffer */
@@ -98,26 +89,20 @@ struct bfi_alen {
#define BFI_LMSG_PL_WSZ \
((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4)
-/**
- * Mailbox message structure
- */
+/* Mailbox message structure */
#define BFI_MBMSG_SZ 7
struct bfi_mbmsg {
struct bfi_mhdr mh;
u32 pl[BFI_MBMSG_SZ];
};
-/**
- * Supported PCI function class codes (personality)
- */
+/* Supported PCI function class codes (personality) */
enum bfi_pcifn_class {
BFI_PCIFN_CLASS_FC = 0x0c04,
BFI_PCIFN_CLASS_ETH = 0x0200,
};
-/**
- * Message Classes
- */
+/* Message Classes */
enum bfi_mclass {
BFI_MC_IOC = 1, /*!< IO Controller (IOC) */
BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */
@@ -159,15 +144,12 @@ enum bfi_mclass {
#define BFI_FWBOOT_ENV_OS 0
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* IOC
*----------------------------------------------------------------------
*/
-/**
- * Different asic generations
- */
+/* Different asic generations */
enum bfi_asic_gen {
BFI_ASIC_GEN_CB = 1,
BFI_ASIC_GEN_CT = 2,
@@ -196,9 +178,7 @@ enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_HBEAT = BFA_I2HM(4),
};
-/**
- * BFI_IOC_H2I_GETATTR_REQ message
- */
+/* BFI_IOC_H2I_GETATTR_REQ message */
struct bfi_ioc_getattr_req {
struct bfi_mhdr mh;
union bfi_addr_u attr_addr;
@@ -231,30 +211,22 @@ struct bfi_ioc_attr {
u32 card_type; /*!< card type */
};
-/**
- * BFI_IOC_I2H_GETATTR_REPLY message
- */
+/* BFI_IOC_I2H_GETATTR_REPLY message */
struct bfi_ioc_getattr_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< cfg reply status */
u8 rsvd[3];
};
-/**
- * Firmware memory page offsets
- */
+/* Firmware memory page offsets */
#define BFI_IOC_SMEM_PG0_CB (0x40)
#define BFI_IOC_SMEM_PG0_CT (0x180)
-/**
- * Firmware statistic offset
- */
+/* Firmware statistic offset */
#define BFI_IOC_FWSTATS_OFF (0x6B40)
#define BFI_IOC_FWSTATS_SZ (4096)
-/**
- * Firmware trace offset
- */
+/* Firmware trace offset */
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
#define BFI_IOC_TRC_ENT_SZ 16
@@ -299,9 +271,7 @@ struct bfi_ioc_hbeat {
u32 hb_count; /*!< current heart beat count */
};
-/**
- * IOC hardware/firmware state
- */
+/* IOC hardware/firmware state */
enum bfi_ioc_state {
BFI_IOC_UNINIT = 0, /*!< not initialized */
BFI_IOC_INITING = 1, /*!< h/w is being initialized */
@@ -345,9 +315,7 @@ enum {
((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \
BFI_ADAPTER_UNSUPP))
-/**
- * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
- */
+/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */
struct bfi_ioc_ctrl_req {
struct bfi_mhdr mh;
u16 clscode;
@@ -355,9 +323,7 @@ struct bfi_ioc_ctrl_req {
u32 tv_sec;
};
-/**
- * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
- */
+/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */
struct bfi_ioc_ctrl_reply {
struct bfi_mhdr mh; /*!< Common msg header */
u8 status; /*!< enable/disable status */
@@ -367,9 +333,7 @@ struct bfi_ioc_ctrl_reply {
};
#define BFI_IOC_MSGSZ 8
-/**
- * H2I Messages
- */
+/* H2I Messages */
union bfi_ioc_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_req enable_req;
@@ -378,17 +342,14 @@ union bfi_ioc_h2i_msg_u {
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- * I2H Messages
- */
+/* I2H Messages */
union bfi_ioc_i2h_msg_u {
struct bfi_mhdr mh;
struct bfi_ioc_ctrl_reply fw_event;
u32 mboxmsg[BFI_IOC_MSGSZ];
};
-/**
- *----------------------------------------------------------------------
+/*----------------------------------------------------------------------
* MSGQ
*----------------------------------------------------------------------
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h
index 4eecabea397b..6704a4392973 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_cna.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h
@@ -37,18 +37,14 @@ enum bfi_port_i2h {
BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4),
};
-/**
- * Generic REQ type
- */
+/* Generic REQ type */
struct bfi_port_generic_req {
struct bfi_mhdr mh; /*!< msg header */
u32 msgtag; /*!< msgtag for reply */
u32 rsvd;
};
-/**
- * Generic RSP type
- */
+/* Generic RSP type */
struct bfi_port_generic_rsp {
struct bfi_mhdr mh; /*!< common msg header */
u8 status; /*!< port enable status */
@@ -56,44 +52,12 @@ struct bfi_port_generic_rsp {
u32 msgtag; /*!< msgtag for reply */
};
-/**
- * @todo
- * BFI_PORT_H2I_ENABLE_REQ
- */
-
-/**
- * @todo
- * BFI_PORT_I2H_ENABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_DISABLE_REQ
- */
-
-/**
- * BFI_PORT_I2H_DISABLE_RSP
- */
-
-/**
- * BFI_PORT_H2I_GET_STATS_REQ
- */
+/* BFI_PORT_H2I_GET_STATS_REQ */
struct bfi_port_get_stats_req {
struct bfi_mhdr mh; /*!< common msg header */
union bfi_addr_u dma_addr;
};
-/**
- * BFI_PORT_I2H_GET_STATS_RSP
- */
-
-/**
- * BFI_PORT_H2I_CLEAR_STATS_REQ
- */
-
-/**
- * BFI_PORT_I2H_CLEAR_STATS_RSP
- */
-
union bfi_port_h2i_msg_u {
struct bfi_mhdr mh;
struct bfi_port_generic_req enable_req;
diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h
index a90f1cf46b41..eef6e1f8aecc 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_enet.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h
@@ -16,12 +16,9 @@
* www.brocade.com
*/
-/**
- * @file bfi_enet.h BNA Hardware and Firmware Interface
- */
+/* BNA Hardware and Firmware Interface */
-/**
- * Skipping statistics collection to avoid clutter.
+/* Skipping statistics collection to avoid clutter.
* Command is no longer needed:
* MTU
* TxQ Stop
@@ -64,9 +61,7 @@ union bfi_addr_be_u {
} a32;
};
-/**
- * T X Q U E U E D E F I N E S
- */
+/* T X Q U E U E D E F I N E S */
/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */
/* TxQ Entry Opcodes */
#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -106,10 +101,7 @@ struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */
union bfi_addr_be_u addr;
};
-/**
- * TxQ Entry Structure
- *
- */
+/* TxQ Entry Structure */
struct bfi_enet_txq_entry {
union {
struct bfi_enet_txq_wi_base base;
@@ -124,16 +116,12 @@ struct bfi_enet_txq_entry {
#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
-/**
- * R X Q U E U E D E F I N E S
- */
+/* R X Q U E U E D E F I N E S */
struct bfi_enet_rxq_entry {
union bfi_addr_be_u rx_buffer;
};
-/**
- * R X C O M P L E T I O N Q U E U E D E F I N E S
- */
+/* R X C O M P L E T I O N Q U E U E D E F I N E S */
/* CQ Entry Flags */
#define BFI_ENET_CQ_EF_MAC_ERROR (1 << 0)
#define BFI_ENET_CQ_EF_FCS_ERROR (1 << 1)
@@ -174,9 +162,7 @@ struct bfi_enet_cq_entry {
u8 rxq_id;
};
-/**
- * E N E T C O N T R O L P A T H C O M M A N D S
- */
+/* E N E T C O N T R O L P A T H C O M M A N D S */
struct bfi_enet_q {
union bfi_addr_u pg_tbl;
union bfi_addr_u first_entry;
@@ -222,9 +208,7 @@ struct bfi_enet_ib {
u16 rsvd;
};
-/**
- * ENET command messages
- */
+/* ENET command messages */
enum bfi_enet_h2i_msgs {
/* Rx Commands */
BFI_ENET_H2I_RX_CFG_SET_REQ = 1,
@@ -350,9 +334,7 @@ enum bfi_enet_i2h_msgs {
BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4),
};
-/**
- * The following error codes can be returned by the enet commands
- */
+/* The following error codes can be returned by the enet commands */
enum bfi_enet_err {
BFI_ENET_CMD_OK = 0,
BFI_ENET_CMD_FAIL = 1,
@@ -364,8 +346,7 @@ enum bfi_enet_err {
BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */
};
-/**
- * Generic Request
+/* Generic Request
*
* bfi_enet_req is used by:
* BFI_ENET_H2I_RX_CFG_CLR_REQ
@@ -375,8 +356,7 @@ struct bfi_enet_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * Enable/Disable Request
+/* Enable/Disable Request
*
* bfi_enet_enable_req is used by:
* BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero)
@@ -391,9 +371,7 @@ struct bfi_enet_enable_req {
u8 rsvd[3];
};
-/**
- * Generic Response
- */
+/* Generic Response */
struct bfi_enet_rsp {
struct bfi_msgq_mhdr mh;
u8 error; /*!< if error see cmd_offset */
@@ -401,20 +379,16 @@ struct bfi_enet_rsp {
u16 cmd_offset; /*!< offset to invalid parameter */
};
-/**
- * GLOBAL CONFIGURATION
- */
+/* GLOBAL CONFIGURATION */
-/**
- * bfi_enet_attr_req is used by:
+/* bfi_enet_attr_req is used by:
* BFI_ENET_H2I_GET_ATTR_REQ
*/
struct bfi_enet_attr_req {
struct bfi_msgq_mhdr mh;
};
-/**
- * bfi_enet_attr_rsp is used by:
+/* bfi_enet_attr_rsp is used by:
* BFI_ENET_I2H_GET_ATTR_RSP
*/
struct bfi_enet_attr_rsp {
@@ -427,8 +401,7 @@ struct bfi_enet_attr_rsp {
u32 rit_size;
};
-/**
- * Tx Configuration
+/* Tx Configuration
*
* bfi_enet_tx_cfg is used by:
* BFI_ENET_H2I_TX_CFG_SET_REQ
@@ -477,8 +450,7 @@ struct bfi_enet_tx_cfg_rsp {
} q_handles[BFI_ENET_TXQ_PRIO_MAX];
};
-/**
- * Rx Configuration
+/* Rx Configuration
*
* bfi_enet_rx_cfg is used by:
* BFI_ENET_H2I_RX_CFG_SET_REQ
@@ -553,8 +525,7 @@ struct bfi_enet_rx_cfg_rsp {
} q_handles[BFI_ENET_RX_QSET_MAX];
};
-/**
- * RIT
+/* RIT
*
* bfi_enet_rit_req is used by:
* BFI_ENET_H2I_RIT_CFG_REQ
@@ -566,8 +537,7 @@ struct bfi_enet_rit_req {
u8 table[BFI_ENET_RSS_RIT_MAX];
};
-/**
- * RSS
+/* RSS
*
* bfi_enet_rss_cfg_req is used by:
* BFI_ENET_H2I_RSS_CFG_REQ
@@ -591,8 +561,7 @@ struct bfi_enet_rss_cfg_req {
struct bfi_enet_rss_cfg cfg;
};
-/**
- * MAC Unicast
+/* MAC Unicast
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_MAC_UCAST_SET_REQ
@@ -606,17 +575,14 @@ struct bfi_enet_ucast_req {
u8 rsvd[2];
};
-/**
- * MAC Unicast + VLAN
- */
+/* MAC Unicast + VLAN */
struct bfi_enet_mac_n_vlan_req {
struct bfi_msgq_mhdr mh;
u16 vlan_id;
mac_t mac_addr;
};
-/**
- * MAC Multicast
+/* MAC Multicast
*
* bfi_enet_mac_mfilter_add_req is used by:
* BFI_ENET_H2I_MAC_MCAST_ADD_REQ
@@ -627,8 +593,7 @@ struct bfi_enet_mcast_add_req {
u8 rsvd[2];
};
-/**
- * bfi_enet_mac_mfilter_add_rsp is used by:
+/* bfi_enet_mac_mfilter_add_rsp is used by:
* BFI_ENET_I2H_MAC_MCAST_ADD_RSP
*/
struct bfi_enet_mcast_add_rsp {
@@ -640,8 +605,7 @@ struct bfi_enet_mcast_add_rsp {
u8 rsvd1[2];
};
-/**
- * bfi_enet_mac_mfilter_del_req is used by:
+/* bfi_enet_mac_mfilter_del_req is used by:
* BFI_ENET_H2I_MAC_MCAST_DEL_REQ
*/
struct bfi_enet_mcast_del_req {
@@ -650,8 +614,7 @@ struct bfi_enet_mcast_del_req {
u8 rsvd[2];
};
-/**
- * VLAN
+/* VLAN
*
* bfi_enet_rx_vlan_req is used by:
* BFI_ENET_H2I_RX_VLAN_SET_REQ
@@ -663,8 +626,7 @@ struct bfi_enet_rx_vlan_req {
u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX];
};
-/**
- * PAUSE
+/* PAUSE
*
* bfi_enet_set_pause_req is used by:
* BFI_ENET_H2I_SET_PAUSE_REQ
@@ -676,8 +638,7 @@ struct bfi_enet_set_pause_req {
u8 rx_pause; /* 1 = enable; 0 = disable */
};
-/**
- * DIAGNOSTICS
+/* DIAGNOSTICS
*
* bfi_enet_diag_lb_req is used by:
* BFI_ENET_H2I_DIAG_LOOPBACK
@@ -689,16 +650,13 @@ struct bfi_enet_diag_lb_req {
u8 enable; /* 1 = enable; 0 = disable */
};
-/**
- * enum for Loopback opmodes
- */
+/* enum for Loopback opmodes */
enum {
BFI_ENET_DIAG_LB_OPMODE_EXT = 0,
BFI_ENET_DIAG_LB_OPMODE_CBL = 1,
};
-/**
- * STATISTICS
+/* STATISTICS
*
* bfi_enet_stats_req is used by:
* BFI_ENET_H2I_STATS_GET_REQ
@@ -713,9 +671,7 @@ struct bfi_enet_stats_req {
union bfi_addr_u host_buffer;
};
-/**
- * defines for "stats_mask" above.
- */
+/* defines for "stats_mask" above. */
#define BFI_ENET_STATS_MAC (1 << 0) /* !< MAC Statistics */
#define BFI_ENET_STATS_BPC (1 << 1) /* !< Pause Stats from BPC */
#define BFI_ENET_STATS_RAD (1 << 2) /* !< Rx Admission Statistics */
@@ -881,8 +837,7 @@ struct bfi_enet_stats_mac {
u64 tx_fragments;
};
-/**
- * Complete statistics, DMAed from fw to host followed by
+/* Complete statistics, DMAed from fw to host followed by
* BFI_ENET_I2H_STATS_GET_RSP
*/
struct bfi_enet_stats {
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index 0e094fe46dfd..c49fa312ddbd 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -221,9 +221,7 @@ enum {
#define __PMM_1T_RESET_P 0x00000001
#define PMM_1T_RESET_REG_P1 0x00023c1c
-/**
- * Brocade 1860 Adapter specific defines
- */
+/* Brocade 1860 Adapter specific defines */
#define CT2_PCI_CPQ_BASE 0x00030000
#define CT2_PCI_APP_BASE 0x00030100
#define CT2_PCI_ETH_BASE 0x00030400
diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h
index 4d7a5de08e12..ede532b4e9db 100644
--- a/drivers/net/ethernet/brocade/bna/bna.h
+++ b/drivers/net/ethernet/brocade/bna/bna.h
@@ -25,11 +25,7 @@
extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
-/**
- *
- * Macros and constants
- *
- */
+/* Macros and constants */
#define BNA_IOC_TIMER_FREQ 200
@@ -356,11 +352,7 @@ do { \
} \
} while (0)
-/**
- *
- * Inline functions
- *
- */
+/* Inline functions */
static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
{
@@ -377,15 +369,9 @@ static inline struct bna_mac *bna_mac_find(struct list_head *q, u8 *addr)
#define bna_attr(_bna) (&(_bna)->ioceth.attr)
-/**
- *
- * Function prototypes
- *
- */
+/* Function prototypes */
-/**
- * BNA
- */
+/* BNA */
/* FW response handlers */
void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr);
@@ -413,24 +399,19 @@ struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod);
void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
struct bna_mcam_handle *handle);
-/**
- * MBOX
- */
+/* MBOX */
/* API for BNAD */
void bna_mbox_handler(struct bna *bna, u32 intr_status);
-/**
- * ETHPORT
- */
+/* ETHPORT */
/* Callbacks for RX */
void bna_ethport_cb_rx_started(struct bna_ethport *ethport);
void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport);
-/**
- * TX MODULE AND TX
- */
+/* TX MODULE AND TX */
+
/* FW response handelrs */
void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx,
struct bfi_msgq_mhdr *msghdr);
@@ -462,9 +443,7 @@ void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void bna_tx_cleanup_complete(struct bna_tx *tx);
void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo);
-/**
- * RX MODULE, RX, RXF
- */
+/* RX MODULE, RX, RXF */
/* FW response handlers */
void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx,
@@ -522,9 +501,7 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode,
void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id);
void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id);
void bna_rx_vlanfilter_enable(struct bna_rx *rx);
-/**
- * ENET
- */
+/* ENET */
/* API for RX */
int bna_enet_mtu_get(struct bna_enet *enet);
@@ -544,18 +521,14 @@ void bna_enet_mtu_set(struct bna_enet *enet, int mtu,
void (*cbfn)(struct bnad *));
void bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac);
-/**
- * IOCETH
- */
+/* IOCETH */
/* APIs for BNAD */
void bna_ioceth_enable(struct bna_ioceth *ioceth);
void bna_ioceth_disable(struct bna_ioceth *ioceth,
enum bna_cleanup_type type);
-/**
- * BNAD
- */
+/* BNAD */
/* Callbacks for ENET */
void bnad_cb_ethport_link_status(struct bnad *bnad,
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 9ccc586e3767..db14f69d63bc 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -378,9 +378,8 @@ bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
}
}
-/**
- * ETHPORT
- */
+/* ETHPORT */
+
#define call_ethport_stop_cbfn(_ethport) \
do { \
if ((_ethport)->stop_cbfn) { \
@@ -804,9 +803,8 @@ bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
}
}
-/**
- * ENET
- */
+/* ENET */
+
#define bna_enet_chld_start(enet) \
do { \
enum bna_tx_type tx_type = \
@@ -1328,9 +1326,8 @@ bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
}
-/**
- * IOCETH
- */
+/* IOCETH */
+
#define enable_mbox_intr(_ioceth) \
do { \
u32 intr_status; \
diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
index 4c6aab2a9534..b8c4e21fbf4c 100644
--- a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h
@@ -16,20 +16,15 @@
* www.brocade.com
*/
-/**
- * File for interrupt macros and functions
- */
+/* File for interrupt macros and functions */
#ifndef __BNA_HW_DEFS_H__
#define __BNA_HW_DEFS_H__
#include "bfi_reg.h"
-/**
- *
- * SW imposed limits
- *
- */
+/* SW imposed limits */
+
#define BFI_ENET_DEF_TXQ 1
#define BFI_ENET_DEF_RXP 1
#define BFI_ENET_DEF_UCAM 1
@@ -141,11 +136,8 @@
}
#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id)
-/**
- *
- * Interrupt related bits, flags and macros
- *
- */
+
+/* Interrupt related bits, flags and macros */
#define IB_STATUS_BITS 0x0000ffff
@@ -280,11 +272,7 @@ do { \
(writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
(_rcb)->q_dbell));
-/**
- *
- * TxQ, RxQ, CQ related bits, offsets, macros
- *
- */
+/* TxQ, RxQ, CQ related bits, offsets, macros */
/* TxQ Entry Opcodes */
#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
@@ -334,11 +322,7 @@ do { \
#define BNA_CQ_EF_LOCAL (1 << 20)
-/**
- *
- * Data structures
- *
- */
+/* Data structures */
struct bna_reg_offset {
u32 fn_int_status;
@@ -371,8 +355,7 @@ struct bna_txq_wi_vector {
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
-/**
- * TxQ Entry Structure
+/* TxQ Entry Structure
*
* BEWARE: Load values into this structure with correct endianess.
*/
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 276fcb589f4b..71144b396e02 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -18,9 +18,7 @@
#include "bna.h"
#include "bfi.h"
-/**
- * IB
- */
+/* IB */
static void
bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
{
@@ -29,9 +27,7 @@ bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
(u32)ib->coalescing_timeo, 0);
}
-/**
- * RXF
- */
+/* RXF */
#define bna_rxf_vlan_cfg_soft_reset(rxf) \
do { \
@@ -1312,9 +1308,7 @@ bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
return 0;
}
-/**
- * RX
- */
+/* RX */
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
@@ -2791,9 +2785,8 @@ const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
{1, 2},
};
-/**
- * TX
- */
+/* TX */
+
#define call_tx_stop_cbfn(tx) \
do { \
if ((tx)->stop_cbfn) { \
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e8d3ab7ea6cb..d3eb8bddfb2a 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -23,11 +23,7 @@
#include "bfa_cee.h"
#include "bfa_msgq.h"
-/**
- *
- * Forward declarations
- *
- */
+/* Forward declarations */
struct bna_mcam_handle;
struct bna_txq;
@@ -40,11 +36,7 @@ struct bna_enet;
struct bna;
struct bnad;
-/**
- *
- * Enums, primitive data types
- *
- */
+/* Enums, primitive data types */
enum bna_status {
BNA_STATUS_T_DISABLED = 0,
@@ -331,11 +323,7 @@ struct bna_attr {
int max_rit_size;
};
-/**
- *
- * IOCEth
- *
- */
+/* IOCEth */
struct bna_ioceth {
bfa_fsm_t fsm;
@@ -351,11 +339,7 @@ struct bna_ioceth {
struct bna *bna;
};
-/**
- *
- * Enet
- *
- */
+/* Enet */
/* Pause configuration */
struct bna_pause_config {
@@ -390,11 +374,7 @@ struct bna_enet {
struct bna *bna;
};
-/**
- *
- * Ethport
- *
- */
+/* Ethport */
struct bna_ethport {
bfa_fsm_t fsm;
@@ -419,11 +399,7 @@ struct bna_ethport {
struct bna *bna;
};
-/**
- *
- * Interrupt Block
- *
- */
+/* Interrupt Block */
/* Doorbell structure */
struct bna_ib_dbell {
@@ -447,11 +423,7 @@ struct bna_ib {
int interpkt_timeo;
};
-/**
- *
- * Tx object
- *
- */
+/* Tx object */
/* Tx datapath control structure */
#define BNA_Q_NAME_SIZE 16
@@ -585,11 +557,7 @@ struct bna_tx_mod {
struct bna *bna;
};
-/**
- *
- * Rx object
- *
- */
+/* Rx object */
/* Rx datapath control structure */
struct bna_rcb {
@@ -898,11 +866,7 @@ struct bna_rx_mod {
u32 rid_mask;
};
-/**
- *
- * CAM
- *
- */
+/* CAM */
struct bna_ucam_mod {
struct bna_mac *ucmac; /* BFI_MAX_UCMAC entries */
@@ -927,11 +891,7 @@ struct bna_mcam_mod {
struct bna *bna;
};
-/**
- *
- * Statistics
- *
- */
+/* Statistics */
struct bna_stats {
struct bna_dma_addr hw_stats_dma;
@@ -949,11 +909,7 @@ struct bna_stats_mod {
struct bfi_enet_stats_req stats_clr;
};
-/**
- *
- * BNA
- *
- */
+/* BNA */
struct bna {
struct bna_ident ident;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 67cd2ed0306a..b441f33258e7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1302,8 +1302,7 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
return 0;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Tx MSIX vector(s) from the kernel
*/
static void
@@ -1322,8 +1321,7 @@ bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
@@ -1354,8 +1352,7 @@ err_return:
return -1;
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Unregisters Rx MSIX vector(s) from the kernel
*/
static void
@@ -1375,8 +1372,7 @@ bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
}
}
-/**
- * NOTE: Should be called for MSIX only
+/* NOTE: Should be called for MSIX only
* Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
*/
static int
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 72742be11277..d78339224751 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -389,9 +389,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
void bnad_debugfs_init(struct bnad *bnad);
void bnad_debugfs_uninit(struct bnad *bnad);
-/**
- * MACROS
- */
+/* MACROS */
/* To set & get the stats counters */
#define BNAD_UPDATE_CTR(_bnad, _ctr) \
(((_bnad)->stats.drv_stats._ctr)++)
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index cfc22a64157e..6a68e8d93309 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -67,10 +67,10 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
{
switch (asic_gen) {
case BFI_ASIC_GEN_CT:
- return (u32 *)(bfi_image_ct_cna + off);
+ return (bfi_image_ct_cna + off);
break;
case BFI_ASIC_GEN_CT2:
- return (u32 *)(bfi_image_ct2_cna + off);
+ return (bfi_image_ct2_cna + off);
break;
default:
return NULL;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 1466bc4e3dda..033064b7b576 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -179,13 +179,16 @@ static void macb_handle_link_change(struct net_device *dev)
spin_unlock_irqrestore(&bp->lock, flags);
if (status_change) {
- if (phydev->link)
+ if (phydev->link) {
+ netif_carrier_on(dev);
netdev_info(dev, "link up (%d/%s)\n",
phydev->speed,
phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
- else
+ } else {
+ netif_carrier_off(dev);
netdev_info(dev, "link down\n");
+ }
}
}
@@ -1033,6 +1036,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
+ /* carrier starts down */
+ netif_carrier_off(dev);
+
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
return -EAGAIN;
@@ -1406,6 +1412,8 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
+ netif_carrier_off(dev);
+
netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
dev->irq, dev->dev_addr);
@@ -1469,6 +1477,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
struct net_device *netdev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(netdev);
+ netif_carrier_off(netdev);
netif_device_detach(netdev);
clk_disable(bp->hclk);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 11f667f6131a..2b4b4f529ab4 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -264,7 +264,7 @@
#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
-#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
/* XGMAC HW Features Register */
@@ -671,26 +671,23 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
p = priv->dma_rx + entry;
- if (priv->rx_skbuff[entry] != NULL)
- continue;
-
- skb = __skb_dequeue(&priv->rx_recycle);
- if (skb == NULL)
- skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
- if (unlikely(skb == NULL))
- break;
-
- priv->rx_skbuff[entry] = skb;
- paddr = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ if (priv->rx_skbuff[entry] == NULL) {
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+ }
netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
priv->rx_head, priv->rx_tail);
priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
- /* Ensure descriptor is in memory before handing to h/w */
- wmb();
desc_set_rx_owner(p);
}
}
@@ -933,6 +930,7 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
priv->tx_tail = 0;
priv->tx_head = 0;
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
@@ -972,7 +970,7 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
- writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+ writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
XGMAC_CONTROL_CAR;
@@ -984,7 +982,8 @@ static int xgmac_hw_init(struct net_device *dev)
writel(value, ioaddr + XGMAC_DMA_CONTROL);
/* Set the HW DMA mode and the COE */
- writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
+ XGMAC_OMR_RTC_256,
ioaddr + XGMAC_OMR);
/* Reset the MMC counters */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index abb6ce7c1b7e..6505070abcfa 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = {
static void set_nqsets(struct adapter *adap)
{
int i, j = 0;
- int num_cpus = num_online_cpus();
+ int num_cpus = netif_get_num_default_rss_queues();
int hwports = adap->params.nports;
int nqsets = adap->msix_nvectors - 1;
@@ -3173,6 +3173,9 @@ static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
pi->iscsic.mac_addr[3] |= 0x80;
}
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
static int __devinit init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -3293,6 +3296,7 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;
netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_TX;
+ netdev->vlan_features |= netdev->features & VLAN_FEAT;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 65e4b280619a..2dbbcbb450d3 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024;
static const unsigned int ATID_BASE = 0x10000;
static void cxgb_neigh_update(struct neighbour *neigh);
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr);
static inline int offload_activated(struct t3cdev *tdev)
{
@@ -575,7 +577,7 @@ static void t3_process_tid_release_list(struct work_struct *work)
if (!skb) {
spin_lock_bh(&td->tid_release_lock);
p->ctx = (void *)td->tid_release_list;
- td->tid_release_list = (struct t3c_tid_entry *)p;
+ td->tid_release_list = p;
break;
}
mk_tid_release(skb, p - td->tid_maps.tid_tab);
@@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
}
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
- cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
+ cxgb_redirect(nr->old, nr->old_neigh,
+ nr->new, nr->new_neigh,
+ nr->daddr);
+ cxgb_neigh_update(nr->new_neigh);
break;
}
default:
@@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
tdev->send(tdev, skb);
}
-static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
+static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh,
+ struct dst_entry *new, struct neighbour *new_neigh,
+ const void *daddr)
{
struct net_device *olddev, *newdev;
- struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- n = dst_get_neighbour_noref(old);
- if (!n)
- return;
- olddev = n->dev;
-
- n = dst_get_neighbour_noref(new);
- if (!n)
- return;
- newdev = n->dev;
+ olddev = old_neigh->dev;
+ newdev = new_neigh->dev;
if (!is_offloading(olddev))
return;
@@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, new, newdev);
+ e = t3_l2t_get(tdev, new, newdev, daddr);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 3fa3c8833ed7..8d53438638b2 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev)
+ struct net_device *dev, const void *daddr)
{
struct l2t_entry *e = NULL;
struct neighbour *neigh;
@@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
int smt_idx;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ neigh = dst_neigh_lookup(dst, daddr);
if (!neigh)
goto done_rcu;
@@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
+ if (neigh)
+ neigh_release(neigh);
rcu_read_unlock();
return e;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c4e864369751..8cffcdfd5678 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
- struct net_device *dev);
+ struct net_device *dev, const void *daddr);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index cfb60e1f51da..dd901c5061b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -2877,7 +2877,7 @@ static void sge_timer_tx(unsigned long data)
mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
}
-/*
+/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
* @data: the SGE queue set to maintain
*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 44ac2f40b644..bff8a3cdd3df 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -1076,7 +1076,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
return 0;
}
-/*
+/**
* t3_load_fw - download firmware
* @adapter: the adapter
* @fw_data: the firmware image to write
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e1f96fbb48c1..5ed49af23d6a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap)
*/
if (n10g)
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
- if (q10g > num_online_cpus())
- q10g = num_online_cpus();
+ if (q10g > netif_get_num_default_rss_queues())
+ q10g = netif_get_num_default_rss_queues();
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e111d974afd8..8596acaa402b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -753,7 +753,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
end = (void *)q->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 32e1dd566a14..fa947dfa4c30 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2010,7 +2010,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
}
-/*
+/**
* t4_mem_win_read_len - read memory through PCIE memory window
* @adap: the adapter
* @addr: address of first byte requested aligned on 32b.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 25e3308fc9d8..9dad56101e23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -418,7 +418,7 @@ static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
* restart a TX Ethernet Queue which was stopped for lack of
* free TX Queue Descriptors ...
*/
- const struct cpl_sge_egr_update *p = (void *)cpl;
+ const struct cpl_sge_egr_update *p = cpl;
unsigned int qid = EGR_QID(be32_to_cpu(p->opcode_qid));
struct sge *s = &adapter->sge;
struct sge_txq *tq;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 0bd585bba39d..f2d1ecdcaf98 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -934,7 +934,7 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
end = (void *)tq->desc + part1;
}
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
- *(u64 *)end = 0;
+ *end = 0;
}
/**
@@ -1323,8 +1323,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely((void *)sgl == (void *)tq->stat)) {
sgl = (void *)tq->desc;
- end = (void *)((void *)tq->desc +
- ((void *)end - (void *)tq->stat));
+ end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
}
write_sgl(skb, tq, sgl, end, 0, addr);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 8132c785cea8..ad1468b3ab91 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1300,8 +1300,6 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
}
- skb->dev = netdev;
-
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, vlan_tci);
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index d3cd489d11a2..f879e9224846 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -3973,7 +3973,7 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
tmp = srom_rd(aprom_addr, i);
*p++ = cpu_to_le16(tmp);
}
- de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
+ de4x5_dbg_srom(&lp->srom);
}
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c5c4c0e83bd1..d266c86a53f7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.2.220u"
+#define DRV_VER "4.4.31.0u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -389,6 +389,7 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ struct delayed_work func_recovery_work;
u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
@@ -396,9 +397,10 @@ struct be_adapter {
u32 *pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
- bool eeh_err;
- bool ue_detected;
+ bool eeh_error;
bool fw_timeout;
+ bool hw_error;
+
u32 port_num;
bool promiscuous;
u32 function_mode;
@@ -435,6 +437,7 @@ struct be_adapter {
u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
u32 uc_macs; /* Count of secondary UC MAC programmed */
u32 msg_enable;
+ int be_get_temp_freq;
};
#define be_physfn(adapter) (!adapter->virtfn)
@@ -454,6 +457,9 @@ struct be_adapter {
#define lancer_chip(adapter) ((adapter->pdev->device == OC_DEVICE_ID3) || \
(adapter->pdev->device == OC_DEVICE_ID4))
+#define skyhawk_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID5)
+
+
#define be_roce_supported(adapter) ((adapter->if_type == SLI_INTF_TYPE_3 || \
adapter->sli_family == SKYHAWK_SLI_FAMILY) && \
(adapter->function_mode & RDMA_ENABLED))
@@ -573,6 +579,11 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
return val;
}
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+ return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -593,7 +604,19 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
static inline bool be_error(struct be_adapter *adapter)
{
- return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+ return adapter->eeh_error || adapter->hw_error || adapter->fw_timeout;
+}
+
+static inline bool be_crit_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_error || adapter->hw_error;
+}
+
+static inline void be_clear_all_error(struct be_adapter *adapter)
+{
+ adapter->eeh_error = false;
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
}
static inline bool be_is_wol_excluded(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 921c2082af4c..7fac97b4bb59 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,9 +19,6 @@
#include "be.h"
#include "be_cmds.h"
-/* Must be a power of 2 or else MODULO will BUG_ON */
-static int be_get_temp_freq = 64;
-
static inline void *embedded_payload(struct be_mcc_wrb *wrb)
{
return wrb->payload.embedded_payload;
@@ -115,7 +112,7 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
}
} else {
if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
- be_get_temp_freq = 0;
+ adapter->be_get_temp_freq = 0;
if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
compl_status == MCC_STATUS_ILLEGAL_REQUEST)
@@ -144,6 +141,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
+ /* Ignore physical link event */
+ if (lancer_chip(adapter) &&
+ !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
+ return;
+
/* For the initial link status do not rely on the ASYNC event as
* it may not be received in some cases.
*/
@@ -352,7 +354,7 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
adapter->fw_timeout = true;
- be_detect_dump_ue(adapter);
+ be_detect_error(adapter);
return -1;
}
@@ -429,12 +431,65 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
return 0;
}
-int be_cmd_POST(struct be_adapter *adapter)
+int lancer_wait_ready(struct be_adapter *adapter)
+{
+#define SLIPORT_READY_TIMEOUT 30
+ u32 sliport_status;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ if (sliport_status & SLIPORT_STATUS_RDY_MASK)
+ break;
+
+ msleep(1000);
+ }
+
+ if (i == SLIPORT_READY_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status, err, reset_needed;
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ err = sliport_status & SLIPORT_STATUS_ERR_MASK;
+ reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
+ if (err && reset_needed) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+
+ /* check adapter has corrected the error */
+ status = lancer_wait_ready(adapter);
+ sliport_status = ioread32(adapter->db +
+ SLIPORT_STATUS_OFFSET);
+ sliport_status &= (SLIPORT_STATUS_ERR_MASK |
+ SLIPORT_STATUS_RN_MASK);
+ if (status || sliport_status)
+ status = -1;
+ } else if (err || reset_needed) {
+ status = -1;
+ }
+ }
+ return status;
+}
+
+int be_fw_wait_ready(struct be_adapter *adapter)
{
u16 stage;
int status, timeout = 0;
struct device *dev = &adapter->pdev->dev;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ return status;
+ }
+
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
@@ -565,6 +620,9 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -592,6 +650,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (lancer_chip(adapter))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -610,6 +671,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay)
{
@@ -1132,7 +1194,7 @@ err:
* Uses MCCQ
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
- u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
+ u32 *if_handle, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_create *req;
@@ -1152,17 +1214,13 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
- if (mac)
- memcpy(req->mac_addr, mac, ETH_ALEN);
- else
- req->pmac_invalid = true;
+
+ req->pmac_invalid = true;
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- if (mac)
- *pmac_id = le32_to_cpu(resp->pmac_id);
}
err:
@@ -1210,9 +1268,6 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1581,7 +1636,8 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
/* Reset mcast promisc mode if already set by setting mask
* and not setting flags field
*/
- req->if_flags_mask |=
+ if (!lancer_chip(adapter) || be_physfn(adapter))
+ req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
@@ -1692,6 +1748,20 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
+ if (lancer_chip(adapter)) {
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in non recoverable error\n");
+ }
+ return status;
+ }
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1728,6 +1798,13 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
+
+ if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
+ req->hdr.version = 1;
+ req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6);
+ }
+
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
memcpy(req->cpu_table, rsstable, table_size);
memcpy(req->hash, myhash, sizeof(myhash));
@@ -1805,8 +1882,9 @@ err:
}
int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_written, u8 *addn_status)
+ u32 data_size, u32 data_offset,
+ const char *obj_name, u32 *data_written,
+ u8 *change_status, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_write_object *req;
@@ -1862,10 +1940,12 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
status = adapter->flash_status;
resp = embedded_payload(wrb);
- if (!status)
+ if (!status) {
*data_written = le32_to_cpu(resp->actual_write_len);
- else
+ *change_status = resp->change_status;
+ } else {
*addn_status = resp->additional_status;
+ }
return status;
@@ -2330,8 +2410,8 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac)
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2376,8 +2456,9 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
get_mac_list_cmd.va;
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
- * or one or more pseudo permanant mac addresses. If an active
- * mac_id is present, return first active mac_id found
+ * or one or more true or pseudo permanant mac addresses.
+ * If an active mac_id is present, return first active mac_id
+ * found.
*/
for (i = 0; i < mac_count; i++) {
struct get_list_macaddr *mac_entry;
@@ -2396,7 +2477,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
goto out;
}
}
- /* If no active mac_id found, return first pseudo mac addr */
+ /* If no active mac_id found, return first mac addr */
*pmac_id_active = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
@@ -2648,6 +2729,44 @@ err:
return status;
}
+int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_port_name *req;
+ int status;
+
+ if (!lancer_chip(adapter)) {
+ *port_name = adapter->hba_port_num + '0';
+ return 0;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
+ NULL);
+ req->hdr.version = 1;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
+ *port_name = resp->port_name[adapter->hba_port_num];
+ } else {
+ *port_name = adapter->hba_port_num + '0';
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b3f3fc3d1323..250f19b5f7b6 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -93,6 +93,7 @@ enum {
LINK_UP = 0x1
};
#define LINK_STATUS_MASK 0x1
+#define LOGICAL_LINK_STATUS_MASK 0x2
/* When the event code of an async trailer is link-state, the mcc_compl
* must be interpreted as follows
@@ -186,6 +187,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
+#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1081,13 +1083,25 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/******************** RSS Config *******************/
-/* RSS types */
+/******************** RSS Config ****************************************/
+/* RSS type Input parameters used to compute RX hash
+ * RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
+ * RSS_ENABLE_TCP_IPV4 SRC IPv4, DST IPv4, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_IPV6 SRC IPv6, DST IPv6
+ * RSS_ENABLE_TCP_IPV6 SRC IPv6, DST IPv6, TCP SRC PORT, TCP DST PORT
+ * RSS_ENABLE_UDP_IPV4 SRC IPv4, DST IPv4, UDP SRC PORT, UDP DST PORT
+ * RSS_ENABLE_UDP_IPV6 SRC IPv6, DST IPv6, UDP SRC PORT, UDP DST PORT
+ *
+ * When multiple RSS types are enabled, HW picks the best hash policy
+ * based on the type of the received packet.
+ */
#define RSS_ENABLE_NONE 0x0
#define RSS_ENABLE_IPV4 0x1
#define RSS_ENABLE_TCP_IPV4 0x2
#define RSS_ENABLE_IPV6 0x4
#define RSS_ENABLE_TCP_IPV6 0x8
+#define RSS_ENABLE_UDP_IPV4 0x10
+#define RSS_ENABLE_UDP_IPV6 0x20
struct be_cmd_req_rss_config {
struct be_cmd_req_hdr hdr;
@@ -1163,6 +1177,8 @@ struct lancer_cmd_req_write_object {
u32 addr_high;
};
+#define LANCER_NO_RESET_NEEDED 0x00
+#define LANCER_FW_RESET_NEEDED 0x02
struct lancer_cmd_resp_write_object {
u8 opcode;
u8 subsystem;
@@ -1173,6 +1189,8 @@ struct lancer_cmd_resp_write_object {
u32 resp_len;
u32 actual_resp_len;
u32 actual_write_len;
+ u8 change_status;
+ u8 rsvd3[3];
};
/************************ Lancer Read FW info **************/
@@ -1502,6 +1520,17 @@ struct be_cmd_resp_get_hsw_config {
u32 rsvd;
};
+/******************* get port names ***************/
+struct be_cmd_req_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd0;
+};
+
+struct be_cmd_resp_get_port_name {
+ struct be_cmd_req_hdr hdr;
+ u8 port_name[4];
+};
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1656,7 +1685,7 @@ struct be_cmd_req_set_ext_fat_caps {
};
extern int be_pci_fnum_get(struct be_adapter *adapter);
-extern int be_cmd_POST(struct be_adapter *adapter);
+extern int be_fw_wait_ready(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
@@ -1664,8 +1693,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
- u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
- u32 domain);
+ u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
@@ -1719,10 +1747,11 @@ extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
extern int lancer_cmd_write_object(struct be_adapter *adapter,
- struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset,
- const char *obj_name,
- u32 *data_written, u8 *addn_status);
+ struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset,
+ const char *obj_name,
+ u32 *data_written, u8 *change_status,
+ u8 *addn_status);
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
@@ -1745,14 +1774,15 @@ extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
extern int be_cmd_get_phy_info(struct be_adapter *adapter);
extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
-extern void be_detect_dump_ue(struct be_adapter *adapter);
+extern void be_detect_error(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
-extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
- bool *pmac_id_active, u32 *pmac_id, u8 *mac);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
+ bool *pmac_id_active, u32 *pmac_id,
+ u8 domain);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
@@ -1765,4 +1795,7 @@ extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
extern int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd,
struct be_fat_conf_params *cfgs);
+extern int lancer_wait_ready(struct be_adapter *adapter);
+extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
+extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 63e51d476900..e34be1c7ae8a 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -648,7 +648,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ecmd->autoneg != 0)
+ if (ecmd->autoneg != adapter->phy.fc_autoneg)
return -EINVAL;
adapter->tx_fc = ecmd->tx_pause;
adapter->rx_fc = ecmd->rx_pause;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index d9fb0c501fa1..b755f7061dce 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -45,20 +45,19 @@
#define POST_STAGE_ARMFW_RDY 0xc000 /* FW is done with POST */
-/* Lancer SLIPORT_CONTROL SLIPORT_STATUS registers */
+/* Lancer SLIPORT registers */
#define SLIPORT_STATUS_OFFSET 0x404
#define SLIPORT_CONTROL_OFFSET 0x408
#define SLIPORT_ERROR1_OFFSET 0x40C
#define SLIPORT_ERROR2_OFFSET 0x410
+#define PHYSDEV_CONTROL_OFFSET 0x414
#define SLIPORT_STATUS_ERR_MASK 0x80000000
#define SLIPORT_STATUS_RN_MASK 0x01000000
#define SLIPORT_STATUS_RDY_MASK 0x00800000
-
-
#define SLI_PORT_CONTROL_IP_MASK 0x08000000
-
-#define PCICFG_CUST_SCRATCHPAD_CSR 0x1EC
+#define PHYSDEV_CONTROL_FW_RESET_MASK 0x00000002
+#define PHYSDEV_CONTROL_INP_MASK 0x40000000
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 501dfa9c88ec..4d9677174490 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -155,7 +155,7 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
{
u32 reg, enabled;
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
@@ -201,7 +201,7 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
DB_EQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -220,7 +220,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
DB_CQ_RING_ID_EXT_MASK_SHIFT);
- if (adapter->eeh_err)
+ if (adapter->eeh_error)
return;
if (arm)
@@ -558,6 +558,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_pa_hi = upper_32_bits(addr);
wrb->frag_pa_lo = addr & 0xFFFFFFFF;
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
+ wrb->rsvd0 = 0;
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
@@ -576,6 +577,11 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
return vlan_tag;
}
+static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
+{
+ return vlan_tx_tag_present(skb) || adapter->pvid;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
@@ -703,33 +709,56 @@ dma_err:
return 0;
}
+static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u16 vlan_tag = 0;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ return skb;
+
+ if (vlan_tx_tag_present(skb)) {
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
struct be_queue_info *txq = &txo->q;
+ struct iphdr *ip = NULL;
u32 wrb_cnt = 0, copied = 0;
- u32 start = txq->head;
+ u32 start = txq->head, eth_hdr_len;
bool dummy_wrb, stopped = false;
- /* For vlan tagged pkts, BE
- * 1) calculates checksum even when CSO is not requested
- * 2) calculates checksum wrongly for padded pkt less than
- * 60 bytes long.
- * As a workaround disable TX vlan offloading in such cases.
+ eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
+ VLAN_ETH_HLEN : ETH_HLEN;
+
+ /* HW has a bug which considers padding bytes as legal
+ * and modifies the IPv4 hdr's 'tot_len' field
*/
- if (unlikely(vlan_tx_tag_present(skb) &&
- (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (unlikely(!skb))
- goto tx_drop;
+ if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
+ is_ipv4_pkt(skb)) {
+ ip = (struct iphdr *)ip_hdr(skb);
+ pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
+ }
- skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ /* HW has a bug wherein it will calculate CSUM for VLAN
+ * pkts even though it is disabled.
+ * Manually insert VLAN in pkt.
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL &&
+ be_vlan_tag_chk(adapter, skb)) {
+ skb = be_insert_vlan_in_pkt(adapter, skb);
if (unlikely(!skb))
goto tx_drop;
-
- skb->vlan_tci = 0;
}
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
@@ -786,19 +815,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
* A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
* If the user configures more, place BE in vlan promiscuous mode.
*/
-static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
+static int be_vid_config(struct be_adapter *adapter)
{
- struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
- u16 vtag[BE_NUM_VLANS_SUPPORTED];
- u16 ntags = 0, i;
+ u16 vids[BE_NUM_VLANS_SUPPORTED];
+ u16 num = 0, i;
int status = 0;
- if (vf) {
- vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
- status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
- 1, 1, 0);
- }
-
/* No need to further configure vids if in promiscuous mode */
if (adapter->promiscuous)
return 0;
@@ -809,10 +831,10 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_N_VID; i++)
if (adapter->vlan_tag[i])
- vtag[ntags++] = cpu_to_le16(i);
+ vids[num++] = cpu_to_le16(i);
status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vtag, ntags, 1, 0);
+ vids, num, 1, 0);
/* Set to VLAN promisc mode as setting VLAN filter failed */
if (status) {
@@ -841,7 +863,7 @@ static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added++;
@@ -863,7 +885,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- status = be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
@@ -890,7 +912,7 @@ static void be_set_rx_mode(struct net_device *netdev)
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
if (adapter->vlans_added)
- be_vid_config(adapter, false, 0);
+ be_vid_config(adapter);
}
/* Enable multicast promisc if num configured exceeds what we support */
@@ -1057,13 +1079,16 @@ static int be_find_vfs(struct be_adapter *adapter, int vf_state)
u16 offset, stride;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
while (dev) {
vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
- if (dev->is_virtfn && dev->devfn == vf_fn) {
+ if (dev->is_virtfn && dev->devfn == vf_fn &&
+ dev->bus->number == pdev->bus->number) {
vfs++;
if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
assigned_vfs++;
@@ -1203,16 +1228,16 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
/* Copy data in the first descriptor of this completion */
curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
- /* Copy the header portion into skb_data */
- hdr_len = min(BE_HDR_LEN, curr_frag_len);
- memcpy(skb->data, start, hdr_len);
skb->len = curr_frag_len;
if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
+ memcpy(skb->data, start, curr_frag_len);
/* Complete packet has now been moved to data */
put_page(page_info->page);
skb->data_len = 0;
skb->tail += curr_frag_len;
} else {
+ hdr_len = ETH_HLEN;
+ memcpy(skb->data, start, hdr_len);
skb_shinfo(skb)->nr_frags = 1;
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
@@ -1709,9 +1734,10 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
int i;
for_all_evt_queues(adapter, eqo, i) {
- be_eq_clean(eqo);
- if (eqo->q.created)
+ if (eqo->q.created) {
+ be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ }
be_queue_free(adapter, &eqo->q);
}
}
@@ -1898,6 +1924,12 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
*/
adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
num_irqs(adapter) + 1 : 1;
+ if (adapter->num_rx_qs != MAX_RX_QS) {
+ rtnl_lock();
+ netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_qs);
+ rtnl_unlock();
+ }
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2067,13 +2099,13 @@ int be_poll(struct napi_struct *napi, int budget)
return max_work;
}
-void be_detect_dump_ue(struct be_adapter *adapter)
+void be_detect_error(struct be_adapter *adapter)
{
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
- if (adapter->eeh_err || adapter->ue_detected)
+ if (be_crit_error(adapter))
return;
if (lancer_chip(adapter)) {
@@ -2094,16 +2126,24 @@ void be_detect_dump_ue(struct be_adapter *adapter)
pci_read_config_dword(adapter->pdev,
PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
- ue_lo = (ue_lo & (~ue_lo_mask));
- ue_hi = (ue_hi & (~ue_hi_mask));
+ ue_lo = (ue_lo & ~ue_lo_mask);
+ ue_hi = (ue_hi & ~ue_hi_mask);
}
if (ue_lo || ue_hi ||
sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->ue_detected = true;
- adapter->eeh_err = true;
+ adapter->hw_error = true;
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport status 0x%x\n", sliport_status);
+ dev_err(&adapter->pdev->dev,
+ "ERR: sliport error1 0x%x\n", sliport_err1);
dev_err(&adapter->pdev->dev,
- "Unrecoverable error in the card\n");
+ "ERR: sliport error2 0x%x\n", sliport_err2);
}
if (ue_lo) {
@@ -2113,6 +2153,7 @@ void be_detect_dump_ue(struct be_adapter *adapter)
"UE: %s bit set\n", ue_status_low_desc[i]);
}
}
+
if (ue_hi) {
for (i = 0; ue_hi; ue_hi >>= 1, i++) {
if (ue_hi & 1)
@@ -2121,14 +2162,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "sliport error2 0x%x\n", sliport_err2);
- }
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2141,12 +2174,14 @@ static void be_msix_disable(struct be_adapter *adapter)
static uint be_num_rss_want(struct be_adapter *adapter)
{
+ u32 num = 0;
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
!sriov_want(adapter) && be_physfn(adapter) &&
- !be_is_mc(adapter))
- return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- else
- return 0;
+ !be_is_mc(adapter)) {
+ num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
+ }
+ return num;
}
static void be_msix_enable(struct be_adapter *adapter)
@@ -2540,11 +2575,7 @@ static int be_clear(struct be_adapter *adapter)
be_tx_queues_destroy(adapter);
be_evt_queues_destroy(adapter);
- /* tell fw we're done with firing cmds */
- be_cmd_fw_clean(adapter);
-
be_msix_disable(adapter);
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
return 0;
}
@@ -2602,8 +2633,8 @@ static int be_vf_setup(struct be_adapter *adapter)
cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &vf_cfg->if_handle, NULL, vf + 1);
+ status = be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -2643,29 +2674,43 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->phy.forced_port_speed = -1;
}
-static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
+static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
+ bool *active_mac, u32 *pmac_id)
{
- u32 pmac_id;
- int status;
- bool pmac_id_active;
+ int status = 0;
- status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
- &pmac_id, mac);
- if (status != 0)
- goto do_none;
+ if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ if (!lancer_chip(adapter) && !be_physfn(adapter))
+ *active_mac = true;
+ else
+ *active_mac = false;
- if (pmac_id_active) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK,
- false, adapter->if_handle, pmac_id);
+ return status;
+ }
- if (!status)
- adapter->pmac_id[0] = pmac_id;
+ if (lancer_chip(adapter)) {
+ status = be_cmd_get_mac_from_list(adapter, mac,
+ active_mac, pmac_id, 0);
+ if (*active_mac) {
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, if_handle,
+ *pmac_id);
+ }
+ } else if (be_physfn(adapter)) {
+ /* For BE3, for PF get permanent MAC */
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, true,
+ 0, 0);
+ *active_mac = false;
} else {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->if_handle, &adapter->pmac_id[0], 0);
+ /* For BE3, for VF get soft MAC assigned by PF*/
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ if_handle, 0);
+ *active_mac = true;
}
-do_none:
return status;
}
@@ -2686,12 +2731,12 @@ static int be_get_config(struct be_adapter *adapter)
static int be_setup(struct be_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
struct device *dev = &adapter->pdev->dev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
int status;
u8 mac[ETH_ALEN];
+ bool active_mac;
be_setup_init(adapter);
@@ -2717,14 +2762,6 @@ static int be_setup(struct be_adapter *adapter)
if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0, 0);
- if (status)
- return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
-
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
@@ -2734,27 +2771,36 @@ static int be_setup(struct be_adapter *adapter)
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
}
+
+ if (lancer_chip(adapter) && !be_physfn(adapter)) {
+ en_flags = BE_IF_FLAGS_UNTAGGED |
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ cap_flags = en_flags;
+ }
+
status = be_cmd_if_create(adapter, cap_flags, en_flags,
- netdev->dev_addr, &adapter->if_handle,
- &adapter->pmac_id[0], 0);
+ &adapter->if_handle, 0);
if (status != 0)
goto err;
- /* The VF's permanent mac queried from card is incorrect.
- * For BEx: Query the mac configued by the PF using if_handle
- * For Lancer: Get and use mac_list to obtain mac address.
- */
- if (!be_physfn(adapter)) {
- if (lancer_chip(adapter))
- status = be_add_mac_from_list(adapter, mac);
- else
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false,
- adapter->if_handle, 0);
- if (!status) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ memset(mac, 0, ETH_ALEN);
+ active_mac = false;
+ status = be_get_mac_addr(adapter, mac, adapter->if_handle,
+ &active_mac, &adapter->pmac_id[0]);
+ if (status != 0)
+ goto err;
+
+ if (!active_mac) {
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ if (status != 0)
+ goto err;
+ }
+
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
}
status = be_tx_qs_create(adapter);
@@ -2763,7 +2809,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
- be_vid_config(adapter, false, 0);
+ if (adapter->vlans_added)
+ be_vid_config(adapter);
be_set_rx_mode(adapter->netdev);
@@ -2773,8 +2820,6 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- pcie_set_readrq(adapter->pdev, 4096);
-
if (be_physfn(adapter) && num_vfs) {
if (adapter->dev_num_vfs)
be_vf_setup(adapter);
@@ -2788,8 +2833,6 @@ static int be_setup(struct be_adapter *adapter)
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
-
- pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
return 0;
err:
be_clear(adapter);
@@ -3033,6 +3076,40 @@ static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
return 0;
}
+static int lancer_wait_idle(struct be_adapter *adapter)
+{
+#define SLIPORT_IDLE_TIMEOUT 30
+ u32 reg_val;
+ int status = 0, i;
+
+ for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
+ reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
+ if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
+ break;
+
+ ssleep(1);
+ }
+
+ if (i == SLIPORT_IDLE_TIMEOUT)
+ status = -1;
+
+ return status;
+}
+
+static int lancer_fw_reset(struct be_adapter *adapter)
+{
+ int status = 0;
+
+ status = lancer_wait_idle(adapter);
+ if (status)
+ return status;
+
+ iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
+ PHYSDEV_CONTROL_OFFSET);
+
+ return status;
+}
+
static int lancer_fw_download(struct be_adapter *adapter,
const struct firmware *fw)
{
@@ -3047,6 +3124,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
u32 offset = 0;
int status = 0;
u8 add_status = 0;
+ u8 change_status;
if (!IS_ALIGNED(fw->size, sizeof(u32))) {
dev_err(&adapter->pdev->dev,
@@ -3079,9 +3157,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
memcpy(dest_image_ptr, data_ptr, chunk_size);
status = lancer_cmd_write_object(adapter, &flash_cmd,
- chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
-
+ chunk_size, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
if (status)
break;
@@ -3093,8 +3172,10 @@ static int lancer_fw_download(struct be_adapter *adapter,
if (!status) {
/* Commit the FW written */
status = lancer_cmd_write_object(adapter, &flash_cmd,
- 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
- &data_written, &add_status);
+ 0, offset,
+ LANCER_FW_DOWNLOAD_LOCATION,
+ &data_written, &change_status,
+ &add_status);
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
@@ -3107,6 +3188,20 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
+ if (change_status == LANCER_FW_RESET_NEEDED) {
+ status = lancer_fw_reset(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter busy for FW reset.\n"
+ "New FW will not be active.\n");
+ goto lancer_fw_exit;
+ }
+ } else if (change_status != LANCER_NO_RESET_NEEDED) {
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW"
+ " to be active\n");
+ }
+
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
lancer_fw_exit:
return status;
@@ -3435,10 +3530,15 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_roce_dev_remove(adapter);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
unregister_netdev(adapter->netdev);
be_clear(adapter);
+ /* tell fw we're done with firing cmds */
+ be_cmd_fw_clean(adapter);
+
be_stats_cleanup(adapter);
be_ctrl_cleanup(adapter);
@@ -3530,6 +3630,9 @@ static int be_get_initial_config(struct be_adapter *adapter)
if (be_is_wol_supported(adapter))
adapter->wol = true;
+ /* Must be a power of 2 or else MODULO will BUG_ON */
+ adapter->be_get_temp_freq = 64;
+
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
@@ -3585,101 +3688,68 @@ static int be_dev_type_check(struct be_adapter *adapter)
return 0;
}
-static int lancer_wait_ready(struct be_adapter *adapter)
+static int lancer_recover_func(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 30
- u32 sliport_status;
- int status = 0, i;
+ int status;
- for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- if (sliport_status & SLIPORT_STATUS_RDY_MASK)
- break;
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
- msleep(1000);
- }
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
- if (i == SLIPORT_READY_TIMEOUT)
- status = -1;
+ be_clear(adapter);
- return status;
-}
+ adapter->hw_error = false;
+ adapter->fw_timeout = false;
-static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
-{
- int status;
- u32 sliport_status, err, reset_needed;
- status = lancer_wait_ready(adapter);
- if (!status) {
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
- err = sliport_status & SLIPORT_STATUS_ERR_MASK;
- reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
- if (err && reset_needed) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
-
- /* check adapter has corrected the error */
- status = lancer_wait_ready(adapter);
- sliport_status = ioread32(adapter->db +
- SLIPORT_STATUS_OFFSET);
- sliport_status &= (SLIPORT_STATUS_ERR_MASK |
- SLIPORT_STATUS_RN_MASK);
- if (status || sliport_status)
- status = -1;
- } else if (err || reset_needed) {
- status = -1;
- }
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
}
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery succeeded\n");
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter SLIPORT recovery failed\n");
+
return status;
}
-static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+static void be_func_recovery_task(struct work_struct *work)
{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, func_recovery_work.work);
int status;
- u32 sliport_status;
-
- if (adapter->eeh_err || adapter->ue_detected)
- return;
- sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+ be_detect_error(adapter);
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "Adapter in error state."
- "Trying to recover.\n");
+ if (adapter->hw_error && lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
- if (status)
- goto err;
+ if (adapter->eeh_error)
+ goto out;
+ rtnl_lock();
netif_device_detach(adapter->netdev);
+ rtnl_unlock();
- if (netif_running(adapter->netdev))
- be_close(adapter->netdev);
-
- be_clear(adapter);
-
- adapter->fw_timeout = false;
-
- status = be_setup(adapter);
- if (status)
- goto err;
-
- if (netif_running(adapter->netdev)) {
- status = be_open(adapter->netdev);
- if (status)
- goto err;
- }
-
- netif_device_attach(adapter->netdev);
+ status = lancer_recover_func(adapter);
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery succeeded\n");
+ if (!status)
+ netif_device_attach(adapter->netdev);
}
- return;
-err:
- dev_err(&adapter->pdev->dev,
- "Adapter error recovery failed\n");
+
+out:
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
}
static void be_worker(struct work_struct *work)
@@ -3690,11 +3760,6 @@ static void be_worker(struct work_struct *work)
struct be_eq_obj *eqo;
int i;
- if (lancer_chip(adapter))
- lancer_test_and_recover_fn_err(adapter);
-
- be_detect_dump_ue(adapter);
-
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
@@ -3710,6 +3775,9 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
+ if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
for_all_rx_queues(adapter, rxo, i) {
if (rxo->rx_post_starved) {
rxo->rx_post_starved = false;
@@ -3727,10 +3795,7 @@ reschedule:
static bool be_reset_required(struct be_adapter *adapter)
{
- u32 reg;
-
- pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
- return reg;
+ return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
}
static int __devinit be_probe(struct pci_dev *pdev,
@@ -3739,6 +3804,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
int status = 0;
struct be_adapter *adapter;
struct net_device *netdev;
+ char port_name;
status = pci_enable_device(pdev);
if (status)
@@ -3749,7 +3815,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_dev;
pci_set_master(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
+ netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
if (netdev == NULL) {
status = -ENOMEM;
goto rel_reg;
@@ -3780,22 +3846,9 @@ static int __devinit be_probe(struct pci_dev *pdev,
if (status)
goto free_netdev;
- if (lancer_chip(adapter)) {
- status = lancer_wait_ready(adapter);
- if (!status) {
- iowrite32(SLI_PORT_CONTROL_IP_MASK,
- adapter->db + SLIPORT_CONTROL_OFFSET);
- status = lancer_test_and_set_rdy_state(adapter);
- }
- if (status) {
- dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto ctrl_clean;
- }
- }
-
/* sync up with fw's ready state */
if (be_physfn(adapter)) {
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
goto ctrl_clean;
}
@@ -3826,6 +3879,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto stats_clean;
INIT_DELAYED_WORK(&adapter->work, be_worker);
+ INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
adapter->rx_fc = adapter->tx_fc = true;
status = be_setup(adapter);
@@ -3839,8 +3893,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
be_roce_dev_add(adapter);
- dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
- adapter->port_num);
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
+
+ be_cmd_query_port_name(adapter, &port_name);
+
+ dev_info(&pdev->dev, "%s: %s port %c\n", netdev->name, nic_name(pdev),
+ port_name);
return 0;
@@ -3872,6 +3931,8 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
if (adapter->wol)
be_setup_wol(adapter, true);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
netif_device_detach(netdev);
if (netif_running(netdev)) {
rtnl_lock();
@@ -3912,6 +3973,9 @@ static int be_resume(struct pci_dev *pdev)
be_open(netdev);
rtnl_unlock();
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
if (adapter->wol)
@@ -3931,6 +3995,7 @@ static void be_shutdown(struct pci_dev *pdev)
return;
cancel_delayed_work_sync(&adapter->work);
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
netif_device_detach(adapter->netdev);
@@ -3950,9 +4015,13 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
dev_err(&adapter->pdev->dev, "EEH error detected\n");
- adapter->eeh_err = true;
+ adapter->eeh_error = true;
+ cancel_delayed_work_sync(&adapter->func_recovery_work);
+
+ rtnl_lock();
netif_device_detach(netdev);
+ rtnl_unlock();
if (netif_running(netdev)) {
rtnl_lock();
@@ -3980,9 +4049,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
int status;
dev_info(&adapter->pdev->dev, "EEH reset\n");
- adapter->eeh_err = false;
- adapter->ue_detected = false;
- adapter->fw_timeout = false;
+ be_clear_all_error(adapter);
status = pci_enable_device(pdev);
if (status)
@@ -3993,7 +4060,7 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
/* Check if card is ok and fw is ready */
- status = be_cmd_POST(adapter);
+ status = be_fw_wait_ready(adapter);
if (status)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4015,6 +4082,10 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
+ status = be_cmd_reset_function(adapter);
+ if (status)
+ goto err;
+
status = be_setup(adapter);
if (status)
goto err;
@@ -4024,6 +4095,9 @@ static void be_eeh_resume(struct pci_dev *pdev)
if (status)
goto err;
}
+
+ schedule_delayed_work(&adapter->func_recovery_work,
+ msecs_to_jiffies(1000));
netif_device_attach(netdev);
return;
err:
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a38167810546..94b7bfcdb24e 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -902,7 +902,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
};
/**
- * ethoc_probe() - initialize OpenCores ethernet MAC
+ * ethoc_probe - initialize OpenCores ethernet MAC
* pdev: platform device
*/
static int __devinit ethoc_probe(struct platform_device *pdev)
@@ -1057,7 +1057,7 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
/* Check the MAC again for validity, if it still isn't choose and
* program a random one. */
if (!is_valid_ether_addr(netdev->dev_addr)) {
- random_ether_addr(netdev->dev_addr);
+ eth_random_addr(netdev->dev_addr);
random_mac = true;
}
@@ -1140,7 +1140,7 @@ out:
}
/**
- * ethoc_remove() - shutdown OpenCores ethernet MAC
+ * ethoc_remove - shutdown OpenCores ethernet MAC
* @pdev: platform device
*/
static int __devexit ethoc_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 16b07048274c..74d749e29aab 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -479,9 +479,14 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
rxdes = ftgmac100_current_rxdes(priv);
} while (!done);
- if (skb->len <= 64)
+ /* Small frames are copied into linear part of skb to free one page */
+ if (skb->len <= 128) {
skb->truesize -= PAGE_SIZE;
- __pskb_pull_tail(skb, min(skb->len, 64U));
+ __pskb_pull_tail(skb, skb->len);
+ } else {
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ }
skb->protocol = eth_type_trans(skb, netdev);
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 829b1092fd78..b901a01e3fa5 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -441,11 +441,14 @@ static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed)
skb->len += length;
skb->data_len += length;
- /* page might be freed in __pskb_pull_tail() */
- if (length > 64)
+ if (length > 128) {
skb->truesize += PAGE_SIZE;
- __pskb_pull_tail(skb, min(length, 64));
-
+ /* We pull the minimum amount into linear part */
+ __pskb_pull_tail(skb, ETH_HLEN);
+ } else {
+ /* Small frames are copied into linear part to free one page */
+ __pskb_pull_tail(skb, length);
+ }
ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC);
ftmac100_rx_pointer_advance(priv);
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index ff7f4c5115a1..fffd20528b5d 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -49,6 +49,7 @@
#include <linux/of_gpio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
#include <asm/cacheflush.h>
@@ -1388,8 +1389,8 @@ fec_set_mac_address(struct net_device *ndev, void *p)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * fec_poll_controller: FEC Poll controller function
+/**
+ * fec_poll_controller - FEC Poll controller function
* @dev: The FEC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1506,18 +1507,25 @@ static int __devinit fec_get_phy_mode_dt(struct platform_device *pdev)
static void __devinit fec_reset_phy(struct platform_device *pdev)
{
int err, phy_reset;
+ int msec = 1;
struct device_node *np = pdev->dev.of_node;
if (!np)
return;
+ of_property_read_u32(np, "phy-reset-duration", &msec);
+ /* A sane reset duration should not be longer than 1s */
+ if (msec > 1000)
+ msec = 1;
+
phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- err = gpio_request_one(phy_reset, GPIOF_OUT_INIT_LOW, "phy-reset");
+ err = devm_gpio_request_one(&pdev->dev, phy_reset,
+ GPIOF_OUT_INIT_LOW, "phy-reset");
if (err) {
pr_debug("FEC: failed to get gpio phy-reset: %d\n", err);
return;
}
- msleep(1);
+ msleep(msec);
gpio_set_value(phy_reset, 1);
}
#else /* CONFIG_OF */
@@ -1546,6 +1554,7 @@ fec_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
static int dev_id;
struct pinctrl *pinctrl;
+ struct regulator *reg_phy;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
@@ -1593,8 +1602,6 @@ fec_probe(struct platform_device *pdev)
fep->phy_interface = ret;
}
- fec_reset_phy(pdev);
-
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
if (irq < 0) {
@@ -1634,6 +1641,18 @@ fec_probe(struct platform_device *pdev)
clk_prepare_enable(fep->clk_ahb);
clk_prepare_enable(fep->clk_ipg);
+ reg_phy = devm_regulator_get(&pdev->dev, "phy");
+ if (!IS_ERR(reg_phy)) {
+ ret = regulator_enable(reg_phy);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable phy regulator: %d\n", ret);
+ goto failed_regulator;
+ }
+ }
+
+ fec_reset_phy(pdev);
+
ret = fec_enet_init(ndev);
if (ret)
goto failed_init;
@@ -1655,6 +1674,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
+failed_regulator:
clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
failed_pin:
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index f7f0bf5d037b..9527b28d70d1 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -47,6 +47,9 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+/* Number of microseconds to wait for an MII register to respond */
+#define MII_TIMEOUT 1000
+
struct fsl_pq_mdio_priv {
void __iomem *map;
struct fsl_pq_mdio __iomem *regs;
@@ -64,6 +67,8 @@ struct fsl_pq_mdio_priv {
int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
int regnum, u16 value)
{
+ u32 status;
+
/* Set the PHY address and the register address we want to write */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -71,10 +76,10 @@ int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
out_be32(&regs->miimcon, value);
/* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & MIIMIND_BUSY)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
- return 0;
+ return status ? 0 : -ETIMEDOUT;
}
/*
@@ -91,6 +96,7 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
int mii_id, int regnum)
{
u16 value;
+ u32 status;
/* Set the PHY address and the register address we want to read */
out_be32(&regs->miimadd, (mii_id << 8) | regnum);
@@ -99,9 +105,12 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
out_be32(&regs->miimcom, 0);
out_be32(&regs->miimcom, MII_READ_COMMAND);
- /* Wait for the transaction to finish */
- while (in_be32(&regs->miimind) & (MIIMIND_NOTVALID | MIIMIND_BUSY))
- cpu_relax();
+ /* Wait for the transaction to finish, normally less than 100us */
+ status = spin_event_timeout(!(in_be32(&regs->miimind) &
+ (MIIMIND_NOTVALID | MIIMIND_BUSY)),
+ MII_TIMEOUT, 0);
+ if (!status)
+ return -ETIMEDOUT;
/* Grab the value of the register from miimstat */
value = in_be32(&regs->miimstat);
@@ -144,7 +153,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
static int fsl_pq_mdio_reset(struct mii_bus *bus)
{
struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
- int timeout = PHY_INIT_TIMEOUT;
+ u32 status;
mutex_lock(&bus->mdio_lock);
@@ -155,12 +164,12 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
out_be32(&regs->miimcfg, MIIMCFG_INIT_VALUE);
/* Wait until the bus is free */
- while ((in_be32(&regs->miimind) & MIIMIND_BUSY) && timeout--)
- cpu_relax();
+ status = spin_event_timeout(!(in_be32(&regs->miimind) & MIIMIND_BUSY),
+ MII_TIMEOUT, 0);
mutex_unlock(&bus->mdio_lock);
- if (timeout < 0) {
+ if (!status) {
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
bus->name);
return -EBUSY;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 0741aded9eb0..4605f7246687 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1,5 +1,4 @@
-/*
- * drivers/net/ethernet/freescale/gianfar.c
+/* drivers/net/ethernet/freescale/gianfar.c
*
* Gianfar Ethernet Driver
* This driver is designed for the non-CPM ethernet controllers
@@ -114,7 +113,7 @@ static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -266,8 +265,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
tx_queue->tx_bd_dma_base = addr;
tx_queue->dev = ndev;
/* enet DMA only understands physical addresses */
- addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
- vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
+ addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
+ vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
}
/* Start the rx descriptor ring where the tx ring leaves off */
@@ -276,15 +275,16 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
rx_queue->rx_bd_base = vaddr;
rx_queue->rx_bd_dma_base = addr;
rx_queue->dev = ndev;
- addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
- vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
+ addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
+ vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
}
/* Setup the skbuff rings */
for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i];
tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
- tx_queue->tx_ring_size, GFP_KERNEL);
+ tx_queue->tx_ring_size,
+ GFP_KERNEL);
if (!tx_queue->tx_skbuff) {
netif_err(priv, ifup, ndev,
"Could not allocate tx_skbuff\n");
@@ -298,7 +298,8 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
- rx_queue->rx_ring_size, GFP_KERNEL);
+ rx_queue->rx_ring_size,
+ GFP_KERNEL);
if (!rx_queue->rx_skbuff) {
netif_err(priv, ifup, ndev,
@@ -327,15 +328,15 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
int i;
baddr = &regs->tbase0;
- for(i = 0; i < priv->num_tx_queues; i++) {
+ for (i = 0; i < priv->num_tx_queues; i++) {
gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
baddr = &regs->rbase0;
- for(i = 0; i < priv->num_rx_queues; i++) {
+ for (i = 0; i < priv->num_rx_queues; i++) {
gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
- baddr += 2;
+ baddr += 2;
}
}
@@ -405,7 +406,8 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->attreli, attrs);
/* Start with defaults, and add stashing or locking
- * depending on the approprate variables */
+ * depending on the approprate variables
+ */
attrs = ATTR_INIT_SETTINGS;
if (priv->bd_stash_en)
@@ -426,16 +428,16 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
unsigned long tx_packets = 0, tx_bytes = 0;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++) {
rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
+ dev->stats.rx_bytes = rx_bytes;
dev->stats.rx_dropped = rx_dropped;
for (i = 0; i < priv->num_tx_queues; i++) {
@@ -443,7 +445,7 @@ static struct net_device_stats *gfar_get_stats(struct net_device *dev)
tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
+ dev->stats.tx_bytes = tx_bytes;
dev->stats.tx_packets = tx_packets;
return &dev->stats;
@@ -468,7 +470,7 @@ static const struct net_device_ops gfar_netdev_ops = {
void lock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_lock(&priv->rx_queue[i]->rxlock);
@@ -476,7 +478,7 @@ void lock_rx_qs(struct gfar_private *priv)
void lock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_lock(&priv->tx_queue[i]->txlock);
@@ -484,7 +486,7 @@ void lock_tx_qs(struct gfar_private *priv)
void unlock_rx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
spin_unlock(&priv->rx_queue[i]->rxlock);
@@ -492,7 +494,7 @@ void unlock_rx_qs(struct gfar_private *priv)
void unlock_tx_qs(struct gfar_private *priv)
{
- int i = 0x0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
spin_unlock(&priv->tx_queue[i]->txlock);
@@ -508,13 +510,13 @@ static bool gfar_is_vlan_on(struct gfar_private *priv)
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
return gfar_is_vlan_on(priv) ||
- (priv->ndev->features & NETIF_F_RXCSUM) ||
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
+ (priv->ndev->features & NETIF_F_RXCSUM) ||
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
static void free_tx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_tx_queues; i++)
kfree(priv->tx_queue[i]);
@@ -522,7 +524,7 @@ static void free_tx_pointers(struct gfar_private *priv)
static void free_rx_pointers(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_rx_queues; i++)
kfree(priv->rx_queue[i]);
@@ -530,7 +532,7 @@ static void free_rx_pointers(struct gfar_private *priv)
static void unmap_group_regs(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < MAXGROUPS; i++)
if (priv->gfargrp[i].regs)
@@ -539,7 +541,7 @@ static void unmap_group_regs(struct gfar_private *priv)
static void disable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_disable(&priv->gfargrp[i].napi);
@@ -547,14 +549,14 @@ static void disable_napi(struct gfar_private *priv)
static void enable_napi(struct gfar_private *priv)
{
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++)
napi_enable(&priv->gfargrp[i].napi);
}
static int gfar_parse_group(struct device_node *np,
- struct gfar_private *priv, const char *model)
+ struct gfar_private *priv, const char *model)
{
u32 *queue_mask;
@@ -580,15 +582,13 @@ static int gfar_parse_group(struct device_node *np,
priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
priv->gfargrp[priv->num_grps].priv = priv;
spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
- if(priv->mode == MQ_MG_MODE) {
- queue_mask = (u32 *)of_get_property(np,
- "fsl,rx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].rx_bit_map =
- queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
- queue_mask = (u32 *)of_get_property(np,
- "fsl,tx-bit-map", NULL);
- priv->gfargrp[priv->num_grps].tx_bit_map =
- queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ if (priv->mode == MQ_MG_MODE) {
+ queue_mask = (u32 *)of_get_property(np, "fsl,rx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].rx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
+ queue_mask = (u32 *)of_get_property(np, "fsl,tx-bit-map", NULL);
+ priv->gfargrp[priv->num_grps].tx_bit_map = queue_mask ?
+ *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
} else {
priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
@@ -652,7 +652,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_rx_queues = num_rx_qs;
priv->num_grps = 0x0;
- /* Init Rx queue filer rule set linked list*/
+ /* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
priv->rx_list.count = 0;
mutex_init(&priv->rx_queue_access);
@@ -673,7 +673,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
} else {
priv->mode = SQ_SG_MODE;
err = gfar_parse_group(np, priv, model);
- if(err)
+ if (err)
goto err_grp_init;
}
@@ -730,27 +730,27 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
mac_addr = of_get_mac_address(np);
+
if (mac_addr)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags =
- FSL_GIANFAR_DEV_HAS_GIGABIT |
- FSL_GIANFAR_DEV_HAS_COALESCE |
- FSL_GIANFAR_DEV_HAS_RMON |
- FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
- FSL_GIANFAR_DEV_HAS_CSUM |
- FSL_GIANFAR_DEV_HAS_VLAN |
- FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
- FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ FSL_GIANFAR_DEV_HAS_COALESCE |
+ FSL_GIANFAR_DEV_HAS_RMON |
+ FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+ FSL_GIANFAR_DEV_HAS_PADDING |
+ FSL_GIANFAR_DEV_HAS_CSUM |
+ FSL_GIANFAR_DEV_HAS_VLAN |
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
+ FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
+ FSL_GIANFAR_DEV_HAS_TIMER;
ctype = of_get_property(np, "phy-connection-type", NULL);
@@ -781,7 +781,7 @@ err_grp_init:
}
static int gfar_hwtstamp_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
struct hwtstamp_config config;
struct gfar_private *priv = netdev_priv(netdev);
@@ -851,6 +851,7 @@ static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
{
unsigned int new_bit_map = 0x0;
int mask = 0x1 << (max_qs - 1), i;
+
for (i = 0; i < max_qs; i++) {
if (bit_map & mask)
new_bit_map = new_bit_map + (1 << i);
@@ -936,22 +937,22 @@ static void gfar_detect_errata(struct gfar_private *priv)
/* MPC8313 Rev 2.0 and higher; All MPC837x */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_74;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_76;
/* MPC8313 and MPC837x all rev */
if ((pvr == 0x80850010 && mod == 0x80b0) ||
- (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
+ (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
priv->errata |= GFAR_ERRATA_A002;
/* MPC8313 Rev < 2.0, MPC8548 rev 2.0 */
if ((pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020) ||
- (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
+ (pvr == 0x80210020 && mod == 0x8030 && rev == 0x0020))
priv->errata |= GFAR_ERRATA_12;
if (priv->errata)
@@ -960,7 +961,8 @@ static void gfar_detect_errata(struct gfar_private *priv)
}
/* Set up the ethernet device structure, private data,
- * and anything else we need before we start */
+ * and anything else we need before we start
+ */
static int gfar_probe(struct platform_device *ofdev)
{
u32 tempval;
@@ -991,8 +993,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_detect_errata(priv);
- /* Stop the DMA engine now, in case it was running before */
- /* (The firmware could have used it, and left it running). */
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
gfar_halt(dev);
/* Reset MAC layer */
@@ -1026,13 +1029,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
@@ -1081,7 +1085,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->padding = 0;
if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
dev->needed_headroom = GMAC_FCB_LEN;
/* Program the isrg regs only if number of grps > 1 */
@@ -1098,28 +1102,32 @@ static int gfar_probe(struct platform_device *ofdev)
/* Need to reverse the bit maps as bit_map's MSB is q0
* but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers */
+ * basically reverses the queue numbers
+ */
for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map = reverse_bitmap(
- priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map = reverse_bitmap(
- priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ priv->gfargrp[i].tx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
+ priv->gfargrp[i].rx_bit_map =
+ reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
}
/* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups */
+ * also assign queues to groups
+ */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
+ priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
+
for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
+ priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
tstat = tstat | (TSTAT_CLEAR_THALT >> i);
@@ -1149,7 +1157,7 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer*/
+ /* always enable rx filer */
priv->rx_filer_enable = 1;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -1165,7 +1173,8 @@ static int gfar_probe(struct platform_device *ofdev)
}
device_init_wakeup(&dev->dev,
- priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
/* fill out IRQ number and name fields */
for (i = 0; i < priv->num_grps; i++) {
@@ -1189,13 +1198,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
- /* Even more device info helps when determining which kernel */
- /* provided which set of benchmarks. */
+ /* Even more device info helps when determining which kernel
+ * provided which set of benchmarks.
+ */
netdev_info(dev, "Running with NAPI enabled\n");
for (i = 0; i < priv->num_rx_queues; i++)
netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
i, priv->rx_queue[i]->rx_ring_size);
- for(i = 0; i < priv->num_tx_queues; i++)
+ for (i = 0; i < priv->num_tx_queues; i++)
netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
i, priv->tx_queue[i]->tx_ring_size);
@@ -1242,7 +1252,8 @@ static int gfar_suspend(struct device *dev)
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
netif_device_detach(ndev);
@@ -1294,7 +1305,8 @@ static int gfar_resume(struct device *dev)
unsigned long flags;
u32 tempval;
int magic_packet = priv->wol_en &&
- (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
+ (priv->device_flags &
+ FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
if (!netif_running(ndev)) {
netif_device_attach(ndev);
@@ -1393,13 +1405,13 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
}
if (ecntrl & ECNTRL_REDUCED_MODE) {
- if (ecntrl & ECNTRL_REDUCED_MII_MODE)
+ if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
return PHY_INTERFACE_MODE_RMII;
+ }
else {
phy_interface_t interface = priv->interface;
- /*
- * This isn't autodetected right now, so it must
+ /* This isn't autodetected right now, so it must
* be set by the device tree or platform code.
*/
if (interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -1453,8 +1465,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-/*
- * Initialize TBI PHY interface for communicating with the
+/* Initialize TBI PHY interface for communicating with the
* SERDES lynx PHY on the chip. We communicate with this PHY
* through the MDIO bus on each controller, treating it as a
* "normal" PHY at the address found in the TBIPA register. We assume
@@ -1479,8 +1490,7 @@ static void gfar_configure_serdes(struct net_device *dev)
return;
}
- /*
- * If the link is already up, we must already be ok, and don't need to
+ /* If the link is already up, we must already be ok, and don't need to
* configure and reset the TBI<->SerDes link. Maybe U-Boot configured
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
@@ -1492,18 +1502,19 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, MII_ADVERTISE,
- ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM);
+ ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM);
- phy_write(tbiphy, MII_BMCR, BMCR_ANENABLE |
- BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+ phy_write(tbiphy, MII_BMCR,
+ BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
+ BMCR_SPEED1000);
}
static void init_registers(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1554,15 +1565,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
- /*
- * Normaly TSEC should not hang on GRS commands, so we should
+ /* Normaly TSEC should not hang on GRS commands, so we should
* actually wait for IEVENT_GRSC flag.
*/
if (likely(!gfar_has_errata(priv, GFAR_ERRATA_A002)))
return 0;
- /*
- * Read the eTSEC register at offset 0xD1C. If bits 7-14 are
+ /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
* the same as bits 23-30, the eTSEC Rx is assumed to be idle
* and the Rx can be safely reset.
*/
@@ -1580,7 +1589,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
u32 tempval;
- int i = 0;
+ int i;
for (i = 0; i < priv->num_grps; i++) {
regs = priv->gfargrp[i].regs;
@@ -1594,8 +1603,8 @@ static void gfar_halt_nodisable(struct net_device *dev)
regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
- if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
- != (DMACTRL_GRS | DMACTRL_GTS)) {
+ if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
+ (DMACTRL_GRS | DMACTRL_GTS)) {
int ret;
tempval |= (DMACTRL_GRS | DMACTRL_GTS);
@@ -1660,7 +1669,7 @@ void stop_gfar(struct net_device *dev)
} else {
for (i = 0; i < priv->num_grps; i++)
free_irq(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
}
free_skb_resources(priv);
@@ -1679,13 +1688,13 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
continue;
dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
txbdp->lstatus = 0;
for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
- j++) {
+ j++) {
txbdp++;
dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
- txbdp->length, DMA_TO_DEVICE);
+ txbdp->length, DMA_TO_DEVICE);
}
txbdp++;
dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
@@ -1705,8 +1714,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
for (i = 0; i < rx_queue->rx_ring_size; i++) {
if (rx_queue->rx_skbuff[i]) {
dma_unmap_single(&priv->ofdev->dev,
- rxbdp->bufPtr, priv->rx_buffer_size,
- DMA_FROM_DEVICE);
+ rxbdp->bufPtr, priv->rx_buffer_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
rx_queue->rx_skbuff[i] = NULL;
}
@@ -1718,7 +1727,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
}
/* If there are any tx skbs or rx skbs still around, free them.
- * Then free tx_skbuff and rx_skbuff */
+ * Then free tx_skbuff and rx_skbuff
+ */
static void free_skb_resources(struct gfar_private *priv)
{
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -1728,24 +1738,25 @@ static void free_skb_resources(struct gfar_private *priv)
/* Go through all the buffer descriptors and free their data buffers */
for (i = 0; i < priv->num_tx_queues; i++) {
struct netdev_queue *txq;
+
tx_queue = priv->tx_queue[i];
txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
- if(tx_queue->tx_skbuff)
+ if (tx_queue->tx_skbuff)
free_skb_tx_queue(tx_queue);
netdev_tx_reset_queue(txq);
}
for (i = 0; i < priv->num_rx_queues; i++) {
rx_queue = priv->rx_queue[i];
- if(rx_queue->rx_skbuff)
+ if (rx_queue->rx_skbuff)
free_skb_rx_queue(rx_queue);
}
dma_free_coherent(&priv->ofdev->dev,
- sizeof(struct txbd8) * priv->total_tx_ring_size +
- sizeof(struct rxbd8) * priv->total_rx_ring_size,
- priv->tx_queue[0]->tx_bd_base,
- priv->tx_queue[0]->tx_bd_dma_base);
+ sizeof(struct txbd8) * priv->total_tx_ring_size +
+ sizeof(struct rxbd8) * priv->total_rx_ring_size,
+ priv->tx_queue[0]->tx_bd_base,
+ priv->tx_queue[0]->tx_bd_dma_base);
skb_queue_purge(&priv->rx_recycle);
}
@@ -1784,7 +1795,7 @@ void gfar_start(struct net_device *dev)
}
void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
+ unsigned long tx_mask, unsigned long rx_mask)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 __iomem *baddr;
@@ -1794,28 +1805,26 @@ void gfar_configure_coalescing(struct gfar_private *priv,
* multiple queues, there's only single reg to program
*/
gfar_write(&regs->txic, 0);
- if(likely(priv->tx_queue[0]->txcoalescing))
+ if (likely(priv->tx_queue[0]->txcoalescing))
gfar_write(&regs->txic, priv->tx_queue[0]->txic);
gfar_write(&regs->rxic, 0);
- if(unlikely(priv->rx_queue[0]->rxcoalescing))
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- if (likely(priv->tx_queue[i]->txcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
}
baddr = &regs->rxic0;
for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- if (likely(priv->rx_queue[i]->rxcoalescing)) {
- gfar_write(baddr + i, 0);
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
}
}
}
@@ -1827,12 +1836,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
int err;
/* If the device has multiple interrupts, register for
- * them. Otherwise, only register for the one */
+ * them. Otherwise, only register for the one
+ */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
/* Install our interrupt handlers for Error,
- * Transmit, and Receive */
- if ((err = request_irq(grp->interruptError, gfar_error, 0,
- grp->int_name_er,grp)) < 0) {
+ * Transmit, and Receive
+ */
+ if ((err = request_irq(grp->interruptError, gfar_error,
+ 0, grp->int_name_er, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptError);
@@ -1840,21 +1851,21 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
}
if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
- 0, grp->int_name_tx, grp)) < 0) {
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto tx_irq_fail;
}
- if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
- grp->int_name_rx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptReceive, gfar_receive,
+ 0, grp->int_name_rx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptReceive);
goto rx_irq_fail;
}
} else {
- if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
- grp->int_name_tx, grp)) < 0) {
+ if ((err = request_irq(grp->interruptTransmit, gfar_interrupt,
+ 0, grp->int_name_tx, grp)) < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
grp->interruptTransmit);
goto err_irq_fail;
@@ -1914,8 +1925,9 @@ irq_fail:
return err;
}
-/* Called when something needs to use the ethernet device */
-/* Returns 0 for success. */
+/* Called when something needs to use the ethernet device
+ * Returns 0 for success.
+ */
static int gfar_enet_open(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1960,18 +1972,17 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
}
static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
- int fcb_length)
+ int fcb_length)
{
- u8 flags = 0;
-
/* If we're here, it's a IP packet with a TCP or UDP
* payload. We set it to checksum, using a pseudo-header
* we provide
*/
- flags = TXFCB_DEFAULT;
+ u8 flags = TXFCB_DEFAULT;
- /* Tell the controller what the protocol is */
- /* And provide the already calculated phcs */
+ /* Tell the controller what the protocol is
+ * And provide the already calculated phcs
+ */
if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
flags |= TXFCB_UDP;
fcb->phcs = udp_hdr(skb)->check;
@@ -1981,7 +1992,8 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
/* l3os is the distance between the start of the
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
- * l3 hdr and the l4 hdr */
+ * l3 hdr and the l4 hdr
+ */
fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
@@ -1995,7 +2007,7 @@ void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
}
static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
- struct txbd8 *base, int ring_size)
+ struct txbd8 *base, int ring_size)
{
struct txbd8 *new_bd = bdp + stride;
@@ -2003,13 +2015,14 @@ static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
}
static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
- int ring_size)
+ int ring_size)
{
return skip_txbd(bdp, 1, base, ring_size);
}
-/* This is called by the kernel when a frame is ready for transmission. */
-/* It is pointed to by the dev->hard_start_xmit function pointer */
+/* This is called by the kernel when a frame is ready for transmission.
+ * It is pointed to by the dev->hard_start_xmit function pointer
+ */
static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -2024,13 +2037,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
- /*
- * TOE=1 frames larger than 2500 bytes may see excess delays
+ /* TOE=1 frames larger than 2500 bytes may see excess delays
* before start of transmission.
*/
if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
+ skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb->len > 2500)) {
int ret;
ret = skb_checksum_help(skb);
@@ -2046,16 +2058,16 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
+ priv->hwts_tx_en)) {
do_tstamp = 1;
fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
}
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ vlan_tx_tag_present(skb) ||
+ unlikely(do_tstamp)) &&
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, fcb_length);
@@ -2065,10 +2077,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /* Steal sock reference for processing TX time stamps */
- swap(skb_new->sk, skb->sk);
- swap(skb_new->destructor, skb->destructor);
- kfree_skb(skb);
+ if (skb->sk)
+ skb_set_owner_w(skb_new, skb->sk);
+ consume_skb(skb);
skb = skb_new;
}
@@ -2099,12 +2110,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Time stamp insertion requires one additional TxBD */
if (unlikely(do_tstamp))
txbdp_tstamp = txbdp = next_txbd(txbdp, base,
- tx_queue->tx_ring_size);
+ tx_queue->tx_ring_size);
if (nr_frags == 0) {
if (unlikely(do_tstamp))
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
- TXBD_INTERRUPT);
+ TXBD_INTERRUPT);
else
lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
@@ -2116,7 +2127,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
length = skb_shinfo(skb)->frags[i].size;
lstatus = txbdp->lstatus | length |
- BD_LFLAG(TXBD_READY);
+ BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
if (i == nr_frags - 1)
@@ -2146,8 +2157,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
/* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12)
- && ((unsigned long)fcb % 0x20) > 0x18)) {
+ if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ ((unsigned long)fcb % 0x20) > 0x18)) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
} else {
@@ -2175,10 +2186,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ skb_headlen(skb), DMA_TO_DEVICE);
- /*
- * If time stamping is requested one additional TxBD must be set up. The
+ /* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
* GMAC_FCB_LEN. The second TxBD points to the actual frame data with
* the full frame length.
@@ -2186,7 +2196,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(do_tstamp)) {
txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2194,8 +2204,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(txq, skb->len);
- /*
- * We can work in parallel with gfar_clean_tx_ring(), except
+ /* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
* when we were reading the num_txbdfree and checking for available
* space, that's because outside of this function it can only grow,
@@ -2208,8 +2217,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
spin_lock_irqsave(&tx_queue->txlock, flags);
- /*
- * The powerpc-specific eieio() is used, as wmb() has too strong
+ /* The powerpc-specific eieio() is used, as wmb() has too strong
* semantics (it requires synchronization between cacheable and
* uncacheable mappings, which eieio doesn't provide and which we
* don't need), thus requiring a more expensive sync instruction. At
@@ -2225,9 +2233,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
/* Update the current skb pointer to the next entry we will use
- * (wrapping if necessary) */
+ * (wrapping if necessary)
+ */
tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
- TX_RING_MOD_MASK(tx_queue->tx_ring_size);
+ TX_RING_MOD_MASK(tx_queue->tx_ring_size);
tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
@@ -2235,7 +2244,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->num_txbdfree -= (nr_txbds);
/* If the next BD still needs to be cleaned up, then the bds
- are full. We need to tell the kernel to stop sending us stuff. */
+ * are full. We need to tell the kernel to stop sending us stuff.
+ */
if (!tx_queue->num_txbdfree) {
netif_tx_stop_queue(txq);
@@ -2360,12 +2370,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
frame_size += priv->padding;
- tempsize =
- (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
+ tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
/* Only stop and start the controller if it isn't already
- * stopped, and we changed something */
+ * stopped, and we changed something
+ */
if ((oldsize != tempsize) && (dev->flags & IFF_UP))
stop_gfar(dev);
@@ -2378,11 +2388,12 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
/* If the mtu is larger than the max size for standard
* ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length */
+ * to allow huge frames, and to check the length
+ */
tempval = gfar_read(&regs->maccfg2);
if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
else
tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
@@ -2403,7 +2414,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
- reset_task);
+ reset_task);
struct net_device *dev = priv->ndev;
if (dev->flags & IFF_UP) {
@@ -2430,7 +2441,7 @@ static void gfar_align_skb(struct sk_buff *skb)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, RXBUF_ALIGNMENT -
- (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
+ (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1)));
}
/* Interrupt Handler for Transmit complete */
@@ -2464,8 +2475,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
frags = skb_shinfo(skb)->nr_frags;
- /*
- * When time stamping, one additional TxBD must be freed.
+ /* When time stamping, one additional TxBD must be freed.
* Also, we need to dma_unmap_single() the TxPAL.
*/
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
@@ -2479,7 +2489,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
/* Only clean completed frames */
if ((lstatus & BD_LFLAG(TXBD_READY)) &&
- (lstatus & BD_LENGTH_MASK))
+ (lstatus & BD_LENGTH_MASK))
break;
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
@@ -2489,11 +2499,12 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
buflen = bdp->length;
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- buflen, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
struct skb_shared_hwtstamps shhwtstamps;
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
+
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
@@ -2506,23 +2517,20 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
for (i = 0; i < frags; i++) {
- dma_unmap_page(&priv->ofdev->dev,
- bdp->bufPtr,
- bdp->length,
- DMA_TO_DEVICE);
+ dma_unmap_page(&priv->ofdev->dev, bdp->bufPtr,
+ bdp->length, DMA_TO_DEVICE);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next_txbd(bdp, base, tx_ring_size);
}
bytes_sent += skb->len;
- /*
- * If there's room in the queue (limit it to rx_buffer_size)
+ /* If there's room in the queue (limit it to rx_buffer_size)
* we add this skb back into the pool, if it's the right size
*/
if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
- skb_recycle_check(skb, priv->rx_buffer_size +
- RXBUF_ALIGNMENT)) {
+ skb_recycle_check(skb, priv->rx_buffer_size +
+ RXBUF_ALIGNMENT)) {
gfar_align_skb(skb);
skb_queue_head(&priv->rx_recycle, skb);
} else
@@ -2531,7 +2539,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
tx_queue->tx_skbuff[skb_dirtytx] = NULL;
skb_dirtytx = (skb_dirtytx + 1) &
- TX_RING_MOD_MASK(tx_ring_size);
+ TX_RING_MOD_MASK(tx_ring_size);
howmany++;
spin_lock_irqsave(&tx_queue->txlock, flags);
@@ -2561,8 +2569,7 @@ static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
__napi_schedule(&gfargrp->napi);
} else {
- /*
- * Clear IEVENT, so interrupts aren't called again
+ /* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived.
*/
gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
@@ -2579,7 +2586,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
}
static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2590,7 +2597,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
gfar_init_rxbdp(rx_queue, bdp, buf);
}
-static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
+static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2604,7 +2611,7 @@ static struct sk_buff * gfar_alloc_skb(struct net_device *dev)
return skb;
}
-struct sk_buff * gfar_new_skb(struct net_device *dev)
+struct sk_buff *gfar_new_skb(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct sk_buff *skb = NULL;
@@ -2622,8 +2629,7 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
struct gfar_extra_stats *estats = &priv->extra_stats;
- /* If the packet was truncated, none of the other errors
- * matter */
+ /* If the packet was truncated, none of the other errors matter */
if (status & RXBD_TRUNCATED) {
stats->rx_length_errors++;
@@ -2664,7 +2670,8 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
{
/* If valid headers were found, and valid sums
* were verified, then we tell the kernel that no
- * checksumming is necessary. Otherwise, it is */
+ * checksumming is necessary. Otherwise, it is [FIXME]
+ */
if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -2672,8 +2679,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
}
-/* gfar_process_frame() -- handle one incoming packet if skb
- * isn't NULL. */
+/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi)
{
@@ -2685,8 +2691,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* fcb is at the beginning if exists */
fcb = (struct rxfcb *)skb->data;
- /* Remove the FCB from the skb */
- /* Remove the padded bytes, if there are any */
+ /* Remove the FCB from the skb
+ * Remove the padded bytes, if there are any
+ */
if (amount_pull) {
skb_record_rx_queue(skb, fcb->rq);
skb_pull(skb, amount_pull);
@@ -2696,6 +2703,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->hwts_rx_en) {
struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
u64 *ns = (u64 *) skb->data;
+
memset(shhwtstamps, 0, sizeof(*shhwtstamps));
shhwtstamps->hwtstamp = ns_to_ktime(*ns);
}
@@ -2709,8 +2717,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
/* Tell the skb what kind of packet this is */
skb->protocol = eth_type_trans(skb, dev);
- /*
- * There's need to check for NETIF_F_HW_VLAN_RX here.
+ /* There's need to check for NETIF_F_HW_VLAN_RX here.
* Even if vlan rx accel is disabled, on some chips
* RXFCB_VLN is pseudo randomly set.
*/
@@ -2728,8 +2735,8 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
}
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
- * until the budget/quota has been reached. Returns the number
- * of frames handled
+ * until the budget/quota has been reached. Returns the number
+ * of frames handled
*/
int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
{
@@ -2749,6 +2756,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
struct sk_buff *newskb;
+
rmb();
/* Add another skb for the future */
@@ -2757,15 +2765,15 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
- priv->rx_buffer_size, DMA_FROM_DEVICE);
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
if (unlikely(!(bdp->status & RXBD_ERR) &&
- bdp->length > priv->rx_buffer_size))
+ bdp->length > priv->rx_buffer_size))
bdp->status = RXBD_LARGE;
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
- bdp->status & RXBD_ERR)) {
+ bdp->status & RXBD_ERR)) {
count_errors(bdp->status, dev);
if (unlikely(!newskb))
@@ -2784,7 +2792,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
rx_queue->stats.rx_bytes += pkt_len;
skb_record_rx_queue(skb, rx_queue->qindex);
gfar_process_frame(dev, skb, amount_pull,
- &rx_queue->grp->napi);
+ &rx_queue->grp->napi);
} else {
netif_warn(priv, rx_err, dev, "Missing skb!\n");
@@ -2803,9 +2811,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
/* update to point at the next skb */
- rx_queue->skb_currx =
- (rx_queue->skb_currx + 1) &
- RX_RING_MOD_MASK(rx_queue->rx_ring_size);
+ rx_queue->skb_currx = (rx_queue->skb_currx + 1) &
+ RX_RING_MOD_MASK(rx_queue->rx_ring_size);
}
/* Update the current rxbd pointer to be the next one */
@@ -2816,8 +2823,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
static int gfar_poll(struct napi_struct *napi, int budget)
{
- struct gfar_priv_grp *gfargrp = container_of(napi,
- struct gfar_priv_grp, napi);
+ struct gfar_priv_grp *gfargrp =
+ container_of(napi, struct gfar_priv_grp, napi);
struct gfar_private *priv = gfargrp->priv;
struct gfar __iomem *regs = gfargrp->regs;
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -2831,11 +2838,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = budget/num_queues;
/* Clear IEVENT, so interrupts aren't called again
- * because of the packets that have already arrived */
+ * because of the packets that have already arrived
+ */
gfar_write(&regs->ievent, IEVENT_RTX_MASK);
while (num_queues && left_over_budget) {
-
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
@@ -2846,12 +2853,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
tx_queue = priv->tx_queue[rx_queue->qindex];
tx_cleaned += gfar_clean_tx_ring(tx_queue);
- rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
- budget_per_queue);
+ rx_cleaned_per_queue =
+ gfar_clean_rx_ring(rx_queue, budget_per_queue);
rx_cleaned += rx_cleaned_per_queue;
- if(rx_cleaned_per_queue < budget_per_queue) {
+ if (rx_cleaned_per_queue < budget_per_queue) {
left_over_budget = left_over_budget +
- (budget_per_queue - rx_cleaned_per_queue);
+ (budget_per_queue -
+ rx_cleaned_per_queue);
set_bit(i, &serviced_queues);
num_queues--;
}
@@ -2869,25 +2877,25 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&regs->imask, IMASK_DEFAULT);
- /* If we are coalescing interrupts, update the timer */
- /* Otherwise, clear it */
- gfar_configure_coalescing(priv,
- gfargrp->rx_bit_map, gfargrp->tx_bit_map);
+ /* If we are coalescing interrupts, update the timer
+ * Otherwise, clear it
+ */
+ gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
+ gfargrp->tx_bit_map);
}
return rx_cleaned;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling 'interrupt' - used by things like netconsole to send skbs
+/* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
*/
static void gfar_netpoll(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i;
/* If the device has multiple interrupts, run tx/rx */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
@@ -2896,7 +2904,7 @@ static void gfar_netpoll(struct net_device *dev)
disable_irq(priv->gfargrp[i].interruptReceive);
disable_irq(priv->gfargrp[i].interruptError);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptError);
enable_irq(priv->gfargrp[i].interruptReceive);
enable_irq(priv->gfargrp[i].interruptTransmit);
@@ -2905,7 +2913,7 @@ static void gfar_netpoll(struct net_device *dev)
for (i = 0; i < priv->num_grps; i++) {
disable_irq(priv->gfargrp[i].interruptTransmit);
gfar_interrupt(priv->gfargrp[i].interruptTransmit,
- &priv->gfargrp[i]);
+ &priv->gfargrp[i]);
enable_irq(priv->gfargrp[i].interruptTransmit);
}
}
@@ -2957,7 +2965,8 @@ static void adjust_link(struct net_device *dev)
u32 ecntrl = gfar_read(&regs->ecntrl);
/* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
+ * If not, we operate in half-duplex mode.
+ */
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
@@ -2983,7 +2992,8 @@ static void adjust_link(struct net_device *dev)
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
/* Reduced mode distinguishes
- * between 10 and 100 */
+ * between 10 and 100
+ */
if (phydev->speed == SPEED_100)
ecntrl |= ECNTRL_R100;
else
@@ -3022,7 +3032,8 @@ static void adjust_link(struct net_device *dev)
/* Update the hash table based on the current list of multicast
* addresses we subscribe to. Also, change the promiscuity of
* the device based on the flags (this function is called
- * whenever dev->flags is changed */
+ * whenever dev->flags is changed
+ */
static void gfar_set_multi(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -3084,7 +3095,8 @@ static void gfar_set_multi(struct net_device *dev)
/* If we have extended hash tables, we need to
* clear the exact match registers to prepare for
- * setting them */
+ * setting them
+ */
if (priv->extended_hash) {
em_num = GFAR_EM_NUM + 1;
gfar_clear_exact_match(dev);
@@ -3110,13 +3122,14 @@ static void gfar_set_multi(struct net_device *dev)
/* Clears each of the exact match registers to zero, so they
- * don't interfere with normal reception */
+ * don't interfere with normal reception
+ */
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
+ for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
}
@@ -3132,7 +3145,8 @@ static void gfar_clear_exact_match(struct net_device *dev)
* hash index which gaddr register to use, and the 5 other bits
* indicate which bit (assuming an IBM numbering scheme, which
* for PowerPC (tm) is usually the case) in the register holds
- * the entry. */
+ * the entry.
+ */
static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
@@ -3164,8 +3178,9 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
macptr += num*2;
- /* Now copy it into the mac registers backwards, cuz */
- /* little endian is silly */
+ /* Now copy it into the mac registers backwards, cuz
+ * little endian is silly
+ */
for (idx = 0; idx < ETH_ALEN; idx++)
tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
@@ -3197,7 +3212,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
/* Hmm... */
if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
- netdev_dbg(dev, "error interrupt (ievent=0x%08x imask=0x%08x)\n",
+ netdev_dbg(dev,
+ "error interrupt (ievent=0x%08x imask=0x%08x)\n",
events, gfar_read(&regs->imask));
/* Update the error counters */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8a025570d97e..8971921cc1c8 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -46,18 +46,24 @@
#include "gianfar.h"
extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
+extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
+ int rx_work_limit);
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
- u64 * buf);
+ u64 *buf);
static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf);
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals);
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals);
-static void gfar_gdrvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo);
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals);
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals);
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo);
static const char stat_gstrings[][ETH_GSTRING_LEN] = {
"rx-dropped-by-kernel",
@@ -130,14 +136,15 @@ static void gfar_gstrings(struct net_device *dev, u32 stringset, u8 * buf)
memcpy(buf, stat_gstrings, GFAR_STATS_LEN * ETH_GSTRING_LEN);
else
memcpy(buf, stat_gstrings,
- GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
+ GFAR_EXTRA_STATS_LEN * ETH_GSTRING_LEN);
}
/* Fill in an array of 64-bit statistics from various sources.
* This array will be appended to the end of the ethtool_stats
* structure, and returned to user space
*/
-static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 * buf)
+static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
+ u64 *buf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -174,8 +181,8 @@ static int gfar_sset_count(struct net_device *dev, int sset)
}
/* Fills in the drvinfo structure with some basic info */
-static void gfar_gdrvinfo(struct net_device *dev, struct
- ethtool_drvinfo *drvinfo)
+static void gfar_gdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
{
strncpy(drvinfo->driver, DRV_NAME, GFAR_INFOSTR_LEN);
strncpy(drvinfo->version, gfar_driver_version, GFAR_INFOSTR_LEN);
@@ -226,7 +233,8 @@ static int gfar_reglen(struct net_device *dev)
}
/* Return a dump of the GFAR register space */
-static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf)
+static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *regbuf)
{
int i;
struct gfar_private *priv = netdev_priv(dev);
@@ -239,7 +247,8 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
/* Convert microseconds to ethernet clock ticks, which changes
* depending on what speed the controller is running at */
-static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int usecs)
+static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
+ unsigned int usecs)
{
unsigned int count;
@@ -263,7 +272,8 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv, unsigned int use
}
/* Convert ethernet clock ticks to microseconds */
-static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int ticks)
+static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
+ unsigned int ticks)
{
unsigned int count;
@@ -288,7 +298,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
/* Get the coalescing parameters, and put them in the cvals
* structure. */
-static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_gcoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_rx_q *rx_queue = NULL;
@@ -353,7 +364,8 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
* Both cvals->*_usecs and cvals->*_frames have to be > 0
* in order for coalescing to be active
*/
-static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
+static int gfar_scoalesce(struct net_device *dev,
+ struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
int i = 0;
@@ -364,7 +376,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Set up rx coalescing */
/* As of now, we will enable/disable coalescing for all
* queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface */
+ * along with the ethtool interface
+ */
if ((cvals->rx_coalesce_usecs == 0) ||
(cvals->rx_max_coalesced_frames == 0)) {
for (i = 0; i < priv->num_rx_queues; i++)
@@ -433,7 +446,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
/* Fills in rvals with the current ring parameters. Currently,
* rx, rx_mini, and rx_jumbo rings are the same size, as mini and
* jumbo are ignored by the driver */
-static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+static void gfar_gringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar_priv_tx_q *tx_queue = NULL;
@@ -459,8 +473,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
/* Change the current ring parameters, stopping the controller if
* necessary so that we don't mess things up while we're in
* motion. We wait for the ring to be clean before reallocating
- * the rings. */
-static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
+ * the rings.
+ */
+static int gfar_sringparam(struct net_device *dev,
+ struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
int err = 0, i = 0;
@@ -486,7 +502,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
unsigned long flags;
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -499,7 +516,7 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -509,7 +526,8 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
+ priv->tx_queue[i]->num_txbdfree =
+ priv->tx_queue[i]->tx_ring_size;
}
/* Rebuild the rings with the new size */
@@ -535,7 +553,8 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
- * have already been received */
+ * have already been received
+ */
local_irq_save(flags);
lock_tx_qs(priv);
lock_rx_qs(priv);
@@ -548,7 +567,7 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
for (i = 0; i < priv->num_rx_queues; i++)
gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ priv->rx_queue[i]->rx_ring_size);
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
@@ -564,12 +583,14 @@ int gfar_set_features(struct net_device *dev, netdev_features_t features)
static uint32_t gfar_get_msglevel(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
+
return priv->msg_enable;
}
static void gfar_set_msglevel(struct net_device *dev, uint32_t data)
{
struct gfar_private *priv = netdev_priv(dev);
+
priv->msg_enable = data;
}
@@ -614,14 +635,14 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L2DA) {
fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->cur_filer_idx = priv->cur_filer_idx - 1;
fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
- RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -630,7 +651,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_VLAN) {
fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
@@ -639,7 +660,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_IP_SRC) {
fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -648,7 +669,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & (RXH_IP_DST)) {
fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -657,7 +678,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L3_PROTO) {
fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -666,7 +687,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_0_1) {
fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -675,7 +696,7 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
if (ethflow & RXH_L4_B_2_3) {
fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
- RQFCR_AND | RQFCR_HASHTBL_0;
+ RQFCR_AND | RQFCR_HASHTBL_0;
priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
@@ -683,7 +704,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
}
}
-static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
+static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow,
+ u64 class)
{
unsigned int last_rule_idx = priv->cur_filer_idx;
unsigned int cmp_rqfpr;
@@ -694,9 +716,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
int ret = 1;
local_rqfpr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
local_rqfcr = kmalloc(sizeof(unsigned int) * (MAX_FILER_IDX + 1),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!local_rqfpr || !local_rqfcr) {
pr_err("Out of memory\n");
ret = 0;
@@ -726,9 +748,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
local_rqfpr[j] = priv->ftp_rqfpr[i];
local_rqfcr[j] = priv->ftp_rqfcr[i];
j--;
- if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
- RQFCR_CLE |RQFCR_AND)) &&
- (priv->ftp_rqfpr[i] == cmp_rqfpr))
+ if ((priv->ftp_rqfcr[i] ==
+ (RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND)) &&
+ (priv->ftp_rqfpr[i] == cmp_rqfpr))
break;
}
@@ -743,12 +765,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
*/
for (l = i+1; l < MAX_FILER_IDX; l++) {
if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
- !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
+ !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
- RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
+ RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
priv->ftp_rqfpr[l] = FPR_FILER_MASK;
gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
- priv->ftp_rqfpr[l]);
+ priv->ftp_rqfpr[l]);
break;
}
@@ -773,7 +795,7 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
gfar_write_filer(priv, priv->cur_filer_idx,
- local_rqfcr[k], local_rqfpr[k]);
+ local_rqfcr[k], local_rqfpr[k]);
if (!priv->cur_filer_idx)
break;
priv->cur_filer_idx = priv->cur_filer_idx - 1;
@@ -785,7 +807,8 @@ err:
return ret;
}
-static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
+static int gfar_set_hash_opts(struct gfar_private *priv,
+ struct ethtool_rxnfc *cmd)
{
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
@@ -810,10 +833,10 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK | RCTRL_PRSFM;
if (i == (RCTRL_PRSDEP_MASK | RCTRL_PRSFM)) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
@@ -823,16 +846,17 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
i &= RCTRL_PRSDEP_MASK;
if (i == RCTRL_PRSDEP_MASK) {
netdev_info(priv->ndev,
- "Receive Queue Filtering enabled\n");
+ "Receive Queue Filtering enabled\n");
} else {
netdev_warn(priv->ndev,
- "Receive Queue Filtering disabled\n");
+ "Receive Queue Filtering disabled\n");
return -EOPNOTSUPP;
}
}
/* Sets the properties for arbitrary filer rule
- * to the first 4 Layer 4 Bytes */
+ * to the first 4 Layer 4 Bytes
+ */
regs->rbifx = 0xC0C1C2C3;
return 0;
}
@@ -870,14 +894,14 @@ static void gfar_set_mask(u32 mask, struct filer_table *tab)
static void gfar_set_parse_bits(u32 value, u32 mask, struct filer_table *tab)
{
gfar_set_mask(mask, tab);
- tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE
- | RQFCR_AND;
+ tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_PID_PARSE |
+ RQFCR_AND;
tab->fe[tab->index].prop = value;
tab->index++;
}
static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
gfar_set_mask(mask, tab);
tab->fe[tab->index].ctrl = RQFCR_CMP_EXACT | RQFCR_AND | flag;
@@ -885,8 +909,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
tab->index++;
}
-/*
- * For setting a tuple of value and mask of type flag
+/* For setting a tuple of value and mask of type flag
* Example:
* IP-Src = 10.0.0.0/255.0.0.0
* value: 0x0A000000 mask: FF000000 flag: RQFPR_IPV4
@@ -901,7 +924,7 @@ static void gfar_set_general_attribute(u32 value, u32 mask, u32 flag,
* Further the all masks are one-padded for better hardware efficiency.
*/
static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
- struct filer_table *tab)
+ struct filer_table *tab)
{
switch (flag) {
/* 3bit */
@@ -959,7 +982,8 @@ static void gfar_set_attribute(u32 value, u32 mask, u32 flag,
/* Translates value and mask for UDP, TCP or SCTP */
static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
- struct ethtool_tcpip4_spec *mask, struct filer_table *tab)
+ struct ethtool_tcpip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
@@ -970,97 +994,92 @@ static void gfar_set_basic_ip(struct ethtool_tcpip4_spec *value,
/* Translates value and mask for RAW-IP4 */
static void gfar_set_user_ip(struct ethtool_usrip4_spec *value,
- struct ethtool_usrip4_spec *mask, struct filer_table *tab)
+ struct ethtool_usrip4_spec *mask,
+ struct filer_table *tab)
{
gfar_set_attribute(value->ip4src, mask->ip4src, RQFCR_PID_SIA, tab);
gfar_set_attribute(value->ip4dst, mask->ip4dst, RQFCR_PID_DIA, tab);
gfar_set_attribute(value->tos, mask->tos, RQFCR_PID_TOS, tab);
gfar_set_attribute(value->proto, mask->proto, RQFCR_PID_L4P, tab);
gfar_set_attribute(value->l4_4_bytes, mask->l4_4_bytes, RQFCR_PID_ARB,
- tab);
+ tab);
}
/* Translates value and mask for ETHER spec */
static void gfar_set_ether(struct ethhdr *value, struct ethhdr *mask,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 upper_temp_mask = 0;
u32 lower_temp_mask = 0;
+
/* Source address */
if (!is_broadcast_ether_addr(mask->h_source)) {
-
if (is_zero_ether_addr(mask->h_source)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_source[0] << 16
- | mask->h_source[1] << 8
- | mask->h_source[2];
- lower_temp_mask = mask->h_source[3] << 16
- | mask->h_source[4] << 8
- | mask->h_source[5];
+ upper_temp_mask = mask->h_source[0] << 16 |
+ mask->h_source[1] << 8 |
+ mask->h_source[2];
+ lower_temp_mask = mask->h_source[3] << 16 |
+ mask->h_source[4] << 8 |
+ mask->h_source[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_source[0] << 16 | value->h_source[1]
- << 8 | value->h_source[2],
- upper_temp_mask, RQFCR_PID_SAH, tab);
+ gfar_set_attribute(value->h_source[0] << 16 |
+ value->h_source[1] << 8 |
+ value->h_source[2],
+ upper_temp_mask, RQFCR_PID_SAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_source[3] << 16 | value->h_source[4]
- << 8 | value->h_source[5],
- lower_temp_mask, RQFCR_PID_SAL, tab);
+ gfar_set_attribute(value->h_source[3] << 16 |
+ value->h_source[4] << 8 |
+ value->h_source[5],
+ lower_temp_mask, RQFCR_PID_SAL, tab);
}
/* Destination address */
if (!is_broadcast_ether_addr(mask->h_dest)) {
-
/* Special for destination is limited broadcast */
- if ((is_broadcast_ether_addr(value->h_dest)
- && is_zero_ether_addr(mask->h_dest))) {
+ if ((is_broadcast_ether_addr(value->h_dest) &&
+ is_zero_ether_addr(mask->h_dest))) {
gfar_set_parse_bits(RQFPR_EBC, RQFPR_EBC, tab);
} else {
-
if (is_zero_ether_addr(mask->h_dest)) {
upper_temp_mask = 0xFFFFFFFF;
lower_temp_mask = 0xFFFFFFFF;
} else {
- upper_temp_mask = mask->h_dest[0] << 16
- | mask->h_dest[1] << 8
- | mask->h_dest[2];
- lower_temp_mask = mask->h_dest[3] << 16
- | mask->h_dest[4] << 8
- | mask->h_dest[5];
+ upper_temp_mask = mask->h_dest[0] << 16 |
+ mask->h_dest[1] << 8 |
+ mask->h_dest[2];
+ lower_temp_mask = mask->h_dest[3] << 16 |
+ mask->h_dest[4] << 8 |
+ mask->h_dest[5];
}
/* Upper 24bit */
- gfar_set_attribute(
- value->h_dest[0] << 16
- | value->h_dest[1] << 8
- | value->h_dest[2],
- upper_temp_mask, RQFCR_PID_DAH, tab);
+ gfar_set_attribute(value->h_dest[0] << 16 |
+ value->h_dest[1] << 8 |
+ value->h_dest[2],
+ upper_temp_mask, RQFCR_PID_DAH, tab);
/* And the same for the lower part */
- gfar_set_attribute(
- value->h_dest[3] << 16
- | value->h_dest[4] << 8
- | value->h_dest[5],
- lower_temp_mask, RQFCR_PID_DAL, tab);
+ gfar_set_attribute(value->h_dest[3] << 16 |
+ value->h_dest[4] << 8 |
+ value->h_dest[5],
+ lower_temp_mask, RQFCR_PID_DAL, tab);
}
}
gfar_set_attribute(value->h_proto, mask->h_proto, RQFCR_PID_ETY, tab);
-
}
/* Convert a rule to binary filter format of gianfar */
static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 vlan = 0, vlan_mask = 0;
u32 id = 0, id_mask = 0;
u32 cfi = 0, cfi_mask = 0;
u32 prio = 0, prio_mask = 0;
-
u32 old_index = tab->index;
/* Check if vlan is wanted */
@@ -1076,13 +1095,16 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
id_mask = rule->m_ext.vlan_tci & VLAN_VID_MASK;
cfi = rule->h_ext.vlan_tci & VLAN_CFI_MASK;
cfi_mask = rule->m_ext.vlan_tci & VLAN_CFI_MASK;
- prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ prio = (rule->h_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
+ prio_mask = (rule->m_ext.vlan_tci & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT;
if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
+ } else if (cfi != VLAN_TAG_PRESENT &&
+ cfi_mask == VLAN_TAG_PRESENT) {
vlan_mask |= RQFPR_CFI;
}
}
@@ -1090,34 +1112,36 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_TCP | vlan,
- RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_TCP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.tcp_ip4_spec,
- &rule->m_u.tcp_ip4_spec, tab);
+ &rule->m_u.tcp_ip4_spec, tab);
break;
case UDP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | RQFPR_UDP | vlan,
- RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
+ RQFPR_IPV4 | RQFPR_UDP | vlan_mask, tab);
gfar_set_basic_ip(&rule->h_u.udp_ip4_spec,
- &rule->m_u.udp_ip4_spec, tab);
+ &rule->m_u.udp_ip4_spec, tab);
break;
case SCTP_V4_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_attribute(132, 0, RQFCR_PID_L4P, tab);
- gfar_set_basic_ip((struct ethtool_tcpip4_spec *) &rule->h_u,
- (struct ethtool_tcpip4_spec *) &rule->m_u, tab);
+ gfar_set_basic_ip((struct ethtool_tcpip4_spec *)&rule->h_u,
+ (struct ethtool_tcpip4_spec *)&rule->m_u,
+ tab);
break;
case IP_USER_FLOW:
gfar_set_parse_bits(RQFPR_IPV4 | vlan, RQFPR_IPV4 | vlan_mask,
- tab);
+ tab);
gfar_set_user_ip((struct ethtool_usrip4_spec *) &rule->h_u,
- (struct ethtool_usrip4_spec *) &rule->m_u, tab);
+ (struct ethtool_usrip4_spec *) &rule->m_u,
+ tab);
break;
case ETHER_FLOW:
if (vlan)
gfar_set_parse_bits(vlan, vlan_mask, tab);
gfar_set_ether((struct ethhdr *) &rule->h_u,
- (struct ethhdr *) &rule->m_u, tab);
+ (struct ethhdr *) &rule->m_u, tab);
break;
default:
return -1;
@@ -1152,7 +1176,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
tab->fe[tab->index - 1].ctrl |= RQFCR_CLE;
}
- /* In rare cases the cache can be full while there is free space in hw */
+ /* In rare cases the cache can be full while there is
+ * free space in hw
+ */
if (tab->index > MAX_FILER_CACHE_IDX - 1)
return -EBUSY;
@@ -1161,7 +1187,7 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
/* Copy size filer entries */
static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
- struct gfar_filer_entry src[0], s32 size)
+ struct gfar_filer_entry src[0], s32 size)
{
while (size > 0) {
size--;
@@ -1171,10 +1197,12 @@ static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
}
/* Delete the contents of the filer-table between start and end
- * and collapse them */
+ * and collapse them
+ */
static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
{
int length;
+
if (end > MAX_FILER_CACHE_IDX || end < begin)
return -EINVAL;
@@ -1200,14 +1228,14 @@ static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
/* Make space on the wanted location */
static int gfar_expand_filer_entries(u32 begin, u32 length,
- struct filer_table *tab)
+ struct filer_table *tab)
{
- if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX || begin
- > MAX_FILER_CACHE_IDX)
+ if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
+ begin > MAX_FILER_CACHE_IDX)
return -EINVAL;
gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
- tab->index - length + 1);
+ tab->index - length + 1);
tab->index += length;
return 0;
@@ -1215,9 +1243,10 @@ static int gfar_expand_filer_entries(u32 begin, u32 length,
static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_AND | RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_AND | RQFCR_CLE))
return start;
}
return -1;
@@ -1225,16 +1254,16 @@ static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
{
- for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1); start++) {
- if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE))
- == (RQFCR_CLE))
+ for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
+ start++) {
+ if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
+ (RQFCR_CLE))
return start;
}
return -1;
}
-/*
- * Uses hardwares clustering option to reduce
+/* Uses hardwares clustering option to reduce
* the number of filer table entries
*/
static void gfar_cluster_filer(struct filer_table *tab)
@@ -1244,8 +1273,7 @@ static void gfar_cluster_filer(struct filer_table *tab)
while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
j = i;
while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
- /*
- * The cluster entries self and the previous one
+ /* The cluster entries self and the previous one
* (a mask) must be identical!
*/
if (tab->fe[i].ctrl != tab->fe[j].ctrl)
@@ -1260,21 +1288,21 @@ static void gfar_cluster_filer(struct filer_table *tab)
jend = gfar_get_next_cluster_end(j, tab);
if (jend == -1 || iend == -1)
break;
- /*
- * First we make some free space, where our cluster
+
+ /* First we make some free space, where our cluster
* element should be. Then we copy it there and finally
* delete in from its old location.
*/
-
- if (gfar_expand_filer_entries(iend, (jend - j), tab)
- == -EINVAL)
+ if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
+ -EINVAL)
break;
gfar_copy_filer_entries(&(tab->fe[iend + 1]),
- &(tab->fe[jend + 1]), jend - j);
+ &(tab->fe[jend + 1]), jend - j);
if (gfar_trim_filer_entries(jend - 1,
- jend + (jend - j), tab) == -EINVAL)
+ jend + (jend - j),
+ tab) == -EINVAL)
return;
/* Mask out cluster bit */
@@ -1285,8 +1313,9 @@ static void gfar_cluster_filer(struct filer_table *tab)
/* Swaps the masked bits of a1<>a2 and b1<>b2 */
static void gfar_swap_bits(struct gfar_filer_entry *a1,
- struct gfar_filer_entry *a2, struct gfar_filer_entry *b1,
- struct gfar_filer_entry *b2, u32 mask)
+ struct gfar_filer_entry *a2,
+ struct gfar_filer_entry *b1,
+ struct gfar_filer_entry *b2, u32 mask)
{
u32 temp[4];
temp[0] = a1->ctrl & mask;
@@ -1305,13 +1334,12 @@ static void gfar_swap_bits(struct gfar_filer_entry *a1,
b2->ctrl |= temp[2];
}
-/*
- * Generate a list consisting of masks values with their start and
+/* Generate a list consisting of masks values with their start and
* end of validity and block as indicator for parts belonging
* together (glued by ANDs) in mask_table
*/
static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i, and_index = 0, block_index = 1;
@@ -1327,13 +1355,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
and_index++;
}
/* cluster starts and ends will be separated because they should
- * hold their position */
+ * hold their position
+ */
if (tab->fe[i].ctrl & RQFCR_CLE)
block_index++;
/* A not set AND indicates the end of a depended block */
if (!(tab->fe[i].ctrl & RQFCR_AND))
block_index++;
-
}
mask_table[and_index - 1].end = i - 1;
@@ -1341,14 +1369,13 @@ static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
return and_index;
}
-/*
- * Sorts the entries of mask_table by the values of the masks.
+/* Sorts the entries of mask_table by the values of the masks.
* Important: The 0xFF80 flags of the first and last entry of a
* block must hold their position (which queue, CLusterEnable, ReJEct,
* AND)
*/
static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
- struct filer_table *temp_table, u32 and_index)
+ struct filer_table *temp_table, u32 and_index)
{
/* Pointer to compare function (_asc or _desc) */
int (*gfar_comp)(const void *, const void *);
@@ -1359,16 +1386,16 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
gfar_comp = &gfar_comp_desc;
for (i = 0; i < and_index; i++) {
-
if (prev != mask_table[i].block) {
old_first = mask_table[start].start + 1;
old_last = mask_table[i - 1].end;
sort(mask_table + start, size,
- sizeof(struct gfar_mask_entry),
- gfar_comp, &gfar_swap);
+ sizeof(struct gfar_mask_entry),
+ gfar_comp, &gfar_swap);
/* Toggle order for every block. This makes the
- * thing more efficient! */
+ * thing more efficient!
+ */
if (gfar_comp == gfar_comp_desc)
gfar_comp = &gfar_comp_asc;
else
@@ -1378,12 +1405,11 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
new_last = mask_table[i - 1].end;
gfar_swap_bits(&temp_table->fe[new_first],
- &temp_table->fe[old_first],
- &temp_table->fe[new_last],
- &temp_table->fe[old_last],
- RQFCR_QUEUE | RQFCR_CLE |
- RQFCR_RJE | RQFCR_AND
- );
+ &temp_table->fe[old_first],
+ &temp_table->fe[new_last],
+ &temp_table->fe[old_last],
+ RQFCR_QUEUE | RQFCR_CLE |
+ RQFCR_RJE | RQFCR_AND);
start = i;
size = 0;
@@ -1391,11 +1417,9 @@ static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
size++;
prev = mask_table[i].block;
}
-
}
-/*
- * Reduces the number of masks needed in the filer table to save entries
+/* Reduces the number of masks needed in the filer table to save entries
* This is done by sorting the masks of a depended block. A depended block is
* identified by gluing ANDs or CLE. The sorting order toggles after every
* block. Of course entries in scope of a mask must change their location with
@@ -1410,13 +1434,14 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
s32 ret = 0;
/* We need a copy of the filer table because
- * we want to change its order */
+ * we want to change its order
+ */
temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
- sizeof(struct gfar_mask_entry), GFP_KERNEL);
+ sizeof(struct gfar_mask_entry), GFP_KERNEL);
if (mask_table == NULL) {
ret = -ENOMEM;
@@ -1428,7 +1453,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
gfar_sort_mask_table(mask_table, temp_table, and_index);
/* Now we can copy the data from our duplicated filer table to
- * the real one in the order the mask table says */
+ * the real one in the order the mask table says
+ */
for (i = 0; i < and_index; i++) {
size = mask_table[i].end - mask_table[i].start + 1;
gfar_copy_filer_entries(&(tab->fe[j]),
@@ -1437,7 +1463,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
}
/* And finally we just have to check for duplicated masks and drop the
- * second ones */
+ * second ones
+ */
for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
if (tab->fe[i].ctrl == 0x80) {
previous_mask = i++;
@@ -1448,7 +1475,8 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
if (tab->fe[i].ctrl == 0x80) {
if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
/* Two identical ones found!
- * So drop the second one! */
+ * So drop the second one!
+ */
gfar_trim_filer_entries(i, i, tab);
} else
/* Not identical! */
@@ -1463,7 +1491,7 @@ end: kfree(temp_table);
/* Write the bit-pattern from software's buffer to hardware registers */
static int gfar_write_filer_table(struct gfar_private *priv,
- struct filer_table *tab)
+ struct filer_table *tab)
{
u32 i = 0;
if (tab->index > MAX_FILER_IDX - 1)
@@ -1473,13 +1501,15 @@ static int gfar_write_filer_table(struct gfar_private *priv,
lock_rx_qs(priv);
/* Fill regular entries */
- for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl); i++)
+ for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
+ i++)
gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
/* Fill the rest with fall-troughs */
for (; i < MAX_FILER_IDX - 1; i++)
gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
/* Last entry must be default accept
- * because that's what people expect */
+ * because that's what people expect
+ */
gfar_write_filer(priv, i, 0x20, 0x0);
unlock_rx_qs(priv);
@@ -1488,21 +1518,21 @@ static int gfar_write_filer_table(struct gfar_private *priv,
}
static int gfar_check_capability(struct ethtool_rx_flow_spec *flow,
- struct gfar_private *priv)
+ struct gfar_private *priv)
{
if (flow->flow_type & FLOW_EXT) {
if (~flow->m_ext.data[0] || ~flow->m_ext.data[1])
netdev_warn(priv->ndev,
- "User-specific data not supported!\n");
+ "User-specific data not supported!\n");
if (~flow->m_ext.vlan_etype)
netdev_warn(priv->ndev,
- "VLAN-etype not supported!\n");
+ "VLAN-etype not supported!\n");
}
if (flow->flow_type == IP_USER_FLOW)
if (flow->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
netdev_warn(priv->ndev,
- "IP-Version differing from IPv4 not supported!\n");
+ "IP-Version differing from IPv4 not supported!\n");
return 0;
}
@@ -1520,15 +1550,18 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
return -ENOMEM;
/* Now convert the existing filer data from flow_spec into
- * filer tables binary format */
+ * filer tables binary format
+ */
list_for_each_entry(j, &priv->rx_list.list, list) {
ret = gfar_convert_to_filer(&j->fs, tab);
if (ret == -EBUSY) {
- netdev_err(priv->ndev, "Rule not added: No free space!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: No free space!\n");
goto end;
}
if (ret == -1) {
- netdev_err(priv->ndev, "Rule not added: Unsupported Flow-type!\n");
+ netdev_err(priv->ndev,
+ "Rule not added: Unsupported Flow-type!\n");
goto end;
}
}
@@ -1540,9 +1573,9 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
gfar_optimize_filer_masks(tab);
pr_debug("\n\tSummary:\n"
- "\tData on hardware: %d\n"
- "\tCompression rate: %d%%\n",
- tab->index, 100 - (100 * tab->index) / i);
+ "\tData on hardware: %d\n"
+ "\tCompression rate: %d%%\n",
+ tab->index, 100 - (100 * tab->index) / i);
/* Write everything to hardware */
ret = gfar_write_filer_table(priv, tab);
@@ -1551,7 +1584,8 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
goto end;
}
-end: kfree(tab);
+end:
+ kfree(tab);
return ret;
}
@@ -1569,7 +1603,7 @@ static void gfar_invert_masks(struct ethtool_rx_flow_spec *flow)
}
static int gfar_add_cls(struct gfar_private *priv,
- struct ethtool_rx_flow_spec *flow)
+ struct ethtool_rx_flow_spec *flow)
{
struct ethtool_flow_spec_container *temp, *comp;
int ret = 0;
@@ -1591,7 +1625,6 @@ static int gfar_add_cls(struct gfar_private *priv,
list_add(&temp->list, &priv->rx_list.list);
goto process;
} else {
-
list_for_each_entry(comp, &priv->rx_list.list, list) {
if (comp->fs.location > flow->location) {
list_add_tail(&temp->list, &comp->list);
@@ -1599,8 +1632,8 @@ static int gfar_add_cls(struct gfar_private *priv,
}
if (comp->fs.location == flow->location) {
netdev_err(priv->ndev,
- "Rule not added: ID %d not free!\n",
- flow->location);
+ "Rule not added: ID %d not free!\n",
+ flow->location);
ret = -EBUSY;
goto clean_mem;
}
@@ -1642,7 +1675,6 @@ static int gfar_del_cls(struct gfar_private *priv, u32 loc)
}
return ret;
-
}
static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
@@ -1663,7 +1695,7 @@ static int gfar_get_cls(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
}
static int gfar_get_cls_all(struct gfar_private *priv,
- struct ethtool_rxnfc *cmd, u32 *rule_locs)
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct ethtool_flow_spec_container *comp;
u32 i = 0;
@@ -1714,7 +1746,7 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
@@ -1748,23 +1780,19 @@ static int gfar_get_ts_info(struct net_device *dev,
struct gfar_private *priv = netdev_priv(dev);
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
- info->so_timestamping =
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
info->phc_index = -1;
return 0;
}
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = gfar_phc_index;
- info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9ac14f804851..21c6574c5f15 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -185,7 +185,7 @@ static void mem_disp(u8 *addr, int size)
for (; (u32) i < (u32) addr + size4Aling; i += 4)
printk("%08x ", *((u32 *) (i)));
for (; (u32) i < (u32) addr + size; i++)
- printk("%02x", *((u8 *) (i)));
+ printk("%02x", *((i)));
if (notAlign == 1)
printk("\r\n");
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index d496673f0908..3f4391bede81 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1217,7 +1217,7 @@ static int hp100_init_rxpdl(struct net_device *dev,
ringptr->pdl = pdlptr + 1;
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr + 1);
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
/*
* Write address and length of first PDL Fragment (which is used for
@@ -1243,7 +1243,7 @@ static int hp100_init_txpdl(struct net_device *dev,
ringptr->pdl = pdlptr; /* +1; */
ringptr->pdl_paddr = virt_to_whatever(dev, pdlptr); /* +1 */
- ringptr->skb = (void *) NULL;
+ ringptr->skb = NULL;
return roundup(MAX_TX_FRAG * 2 + 2, 4);
}
@@ -1628,7 +1628,7 @@ static void hp100_clean_txring(struct net_device *dev)
/* Conversion to new PCI API : NOP */
pci_unmap_single(lp->pci_dev, (dma_addr_t) lp->txrhead->pdl[1], lp->txrhead->pdl[2], PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->txrhead->skb);
- lp->txrhead->skb = (void *) NULL;
+ lp->txrhead->skb = NULL;
lp->txrhead = lp->txrhead->next;
lp->txrcommit--;
}
diff --git a/drivers/net/ethernet/i825xx/lp486e.c b/drivers/net/ethernet/i825xx/lp486e.c
index 6c2952c8ea15..3735bfa53600 100644
--- a/drivers/net/ethernet/i825xx/lp486e.c
+++ b/drivers/net/ethernet/i825xx/lp486e.c
@@ -629,10 +629,10 @@ init_i596(struct net_device *dev) {
memcpy ((void *)lp->eth_addr, dev->dev_addr, 6);
lp->set_add.command = CmdIASetup;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->set_add);
+ i596_add_cmd(dev, &lp->set_add);
lp->tdr.command = CmdTDR;
- i596_add_cmd(dev, (struct i596_cmd *)&lp->tdr);
+ i596_add_cmd(dev, &lp->tdr);
if (lp->scb.command && i596_timeout(dev, "i82596 init", 200))
return 1;
@@ -737,7 +737,7 @@ i596_cleanup_cmd(struct net_device *dev) {
lp = netdev_priv(dev);
while (lp->cmd_head) {
- cmd = (struct i596_cmd *)lp->cmd_head;
+ cmd = lp->cmd_head;
lp->cmd_head = pa_to_va(lp->cmd_head->pa_next);
lp->cmd_backlog--;
@@ -1281,7 +1281,7 @@ static void set_multicast_list(struct net_device *dev) {
lp->i596_config[8] |= 0x01;
}
- i596_add_cmd(dev, (struct i596_cmd *) &lp->set_conf);
+ i596_add_cmd(dev, &lp->set_conf);
}
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index cae17f4bc93e..353f57f675d0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -571,7 +571,7 @@ static int init586(struct net_device *dev)
}
#endif
- ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */
+ ptr = alloc_rfa(dev,ptr); /* init receive-frame-area */
/*
* alloc xmit-buffs / init xmit_cmds
@@ -584,7 +584,7 @@ static int init586(struct net_device *dev)
ptr = (char *) ptr + XMIT_BUFF_SIZE;
p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */
ptr = (char *) ptr + sizeof(struct tbd_struct);
- if((void *)ptr > (void *)dev->mem_end)
+ if(ptr > (void *)dev->mem_end)
{
printk("%s: not enough shared-mem for your configuration!\n",dev->name);
return 1;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 4fb47f14dbfe..cb66f574dc97 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -376,9 +376,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
return 0;
}
-/**
- * allocates memory for a queue and registers pages in phyp
- */
+/* allocates memory for a queue and registers pages in phyp */
static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index ada720b42ff6..535f94fac4a1 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1249,20 +1249,35 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
const struct firmware *fw = nic->fw;
u8 timer, bundle, min_size;
int err = 0;
+ bool required = false;
/* do not load u-code for ICH devices */
if (nic->flags & ich)
return NULL;
- /* Search for ucode match against h/w revision */
- if (nic->mac == mac_82559_D101M)
+ /* Search for ucode match against h/w revision
+ *
+ * Based on comments in the source code for the FreeBSD fxp
+ * driver, the FIRMWARE_D102E ucode includes both CPUSaver and
+ *
+ * "fixes for bugs in the B-step hardware (specifically, bugs
+ * with Inline Receive)."
+ *
+ * So we must fail if it cannot be loaded.
+ *
+ * The other microcode files are only required for the optional
+ * CPUSaver feature. Nice to have, but no reason to fail.
+ */
+ if (nic->mac == mac_82559_D101M) {
fw_name = FIRMWARE_D101M;
- else if (nic->mac == mac_82559_D101S)
+ } else if (nic->mac == mac_82559_D101S) {
fw_name = FIRMWARE_D101S;
- else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10)
+ } else if (nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
fw_name = FIRMWARE_D102E;
- else /* No ucode on other devices */
+ required = true;
+ } else { /* No ucode on other devices */
return NULL;
+ }
/* If the firmware has not previously been loaded, request a pointer
* to it. If it was previously loaded, we are reinitializing the
@@ -1273,10 +1288,17 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
err = request_firmware(&fw, fw_name, &nic->pdev->dev);
if (err) {
- netif_err(nic, probe, nic->netdev,
- "Failed to load firmware \"%s\": %d\n",
- fw_name, err);
- return ERR_PTR(err);
+ if (required) {
+ netif_err(nic, probe, nic->netdev,
+ "Failed to load firmware \"%s\": %d\n",
+ fw_name, err);
+ return ERR_PTR(err);
+ } else {
+ netif_info(nic, probe, nic->netdev,
+ "CPUSaver disabled. Needs \"%s\": %d\n",
+ fw_name, err);
+ return NULL;
+ }
}
/* Firmware should be precisely UCODE_SIZE (words) plus three bytes
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 3103f0b6bf5e..736a7d987db5 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1851,6 +1851,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c526279e4927..3d6839528761 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -399,7 +399,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
}
/**
- * e1000_reset_hw: reset the hardware completely
+ * e1000_reset_hw - reset the hardware completely
* @hw: Struct containing variables accessed by shared code
*
* Reset the transmit and receive units; mask and clear all interrupts.
@@ -546,7 +546,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
}
/**
- * e1000_init_hw: Performs basic configuration of the adapter.
+ * e1000_init_hw - Performs basic configuration of the adapter.
* @hw: Struct containing variables accessed by shared code
*
* Assumes that the controller has previously been reset and is in a
@@ -2591,7 +2591,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
* @hw: Struct containing variables accessed by shared code
* @speed: Speed of the connection
* @duplex: Duplex setting of the connection
-
+ *
* Detects the current speed and duplex settings of the hardware.
*/
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
@@ -2959,7 +2959,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
* @data: data to write to the PHY
-
+ *
* Writes a value to a PHY register
*/
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7483ca0a6282..3bfbb8df8989 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -721,9 +721,7 @@ void e1000_reset(struct e1000_adapter *adapter)
e1000_release_manageability(adapter);
}
-/**
- * Dump the eeprom for users having checksum issues
- **/
+/* Dump the eeprom for users having checksum issues */
static void e1000_dump_eeprom(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1078,18 +1076,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->features |= netdev->hw_features;
- netdev->hw_features |= NETIF_F_RXCSUM;
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->hw_features |= NETIF_F_RXFCS;
+ netdev->hw_features |= (NETIF_F_RXCSUM |
+ NETIF_F_RXALL |
+ NETIF_F_RXFCS);
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_HW_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= (NETIF_F_TSO |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SG);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -3056,14 +3054,13 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
mmiowb();
}
-/**
- * 82547 workaround to avoid controller hang in half-duplex environment.
+/* 82547 workaround to avoid controller hang in half-duplex environment.
* The workaround is to avoid queuing a large packet that would span
* the internal Tx FIFO ring boundary by notifying the stack to resend
* the packet at a later time. This gives the Tx FIFO an opportunity to
* flush all packets. When that occurs, we reset the Tx FIFO pointers
* to the beginning of the Tx FIFO.
- **/
+ */
#define E1000_FIFO_HDR 0x10
#define E1000_82547_PAD_LEN 0x3E0
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 36db4df09aed..0b3bade957fd 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1572,6 +1572,9 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
ctrl = er32(CTRL);
status = er32(STATUS);
rxcw = er32(RXCW);
+ /* SYNCH bit and IV bit are sticky */
+ udelay(10);
+ rxcw = er32(RXCW);
if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1677,16 +1680,18 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("ANYSTATE -> DOWN\n");
} else {
/*
- * Check several times, if Sync and Config
- * both are consistently 1 then simply ignore
- * the Invalid bit and restart Autoneg
+ * Check several times, if SYNCH bit and CONFIG
+ * bit both are consistently 1 then simply ignore
+ * the IV bit and restart Autoneg
*/
for (i = 0; i < AN_RETRY_COUNT; i++) {
udelay(10);
rxcw = er32(RXCW);
- if ((rxcw & E1000_RXCW_IV) &&
- !((rxcw & E1000_RXCW_SYNCH) &&
- (rxcw & E1000_RXCW_C))) {
+ if ((rxcw & E1000_RXCW_SYNCH) &&
+ (rxcw & E1000_RXCW_C))
+ continue;
+
+ if (rxcw & E1000_RXCW_IV) {
mac->serdes_has_link = false;
mac->serdes_link_state =
e1000_serdes_link_down;
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 351a4097b2ba..76edbc1be33b 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -103,6 +103,7 @@
#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 6e6fffb34581..cd153326c3cf 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -514,6 +514,7 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr);
extern unsigned int copybreak;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 905e2147d918..0349e2478df8 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1897,7 +1897,6 @@ static int e1000_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 4) &&
@@ -1916,9 +1915,9 @@ static int e1000_set_coalesce(struct net_device *netdev,
}
if (adapter->itr_setting != 0)
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
else
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
return 0;
}
@@ -2062,6 +2061,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_rxnfc = e1000_get_rxnfc,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 238ab2f8a5e7..e3a7b07df629 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -325,24 +325,46 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
**/
static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
{
- u16 phy_reg;
- u32 phy_id;
+ u16 phy_reg = 0;
+ u32 phy_id = 0;
+ s32 ret_val;
+ u16 retry_count;
+
+ for (retry_count = 0; retry_count < 2; retry_count++) {
+ ret_val = e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF))
+ continue;
+ phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
- phy_id = (u32)(phy_reg << 16);
- e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
- phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ ret_val = e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
+ if (ret_val || (phy_reg == 0xFFFF)) {
+ phy_id = 0;
+ continue;
+ }
+ phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
+ break;
+ }
if (hw->phy.id) {
if (hw->phy.id == phy_id)
return true;
- } else {
- if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
- hw->phy.id = phy_id;
+ } else if (phy_id) {
+ hw->phy.id = phy_id;
+ hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
return true;
}
- return false;
+ /*
+ * In case the PHY needs to be in mdio slow mode,
+ * set slow mode and try to get the PHY id again.
+ */
+ hw->phy.ops.release(hw);
+ ret_val = e1000_set_mdio_slow_mode_hv(hw);
+ if (!ret_val)
+ ret_val = e1000e_get_phy_id(hw);
+ hw->phy.ops.acquire(hw);
+
+ return !ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 31d37a2b5ba8..95b245310f17 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* @sk_buff: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
- __le16 csum, struct sk_buff *skb)
+ struct sk_buff *skb)
{
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
@@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
if (status & E1000_RXD_STAT_IXSM)
return;
- /* TCP/UDP checksum error bit is set */
- if (errors & E1000_RXD_ERR_TCPE) {
+ /* TCP/UDP checksum error bit or IP checksum error bit is set */
+ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
/* let the stack verify checksum errors */
adapter->hw_csum_err++;
return;
@@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
return;
/* It must be a TCP or UDP packet with a valid checksum */
- if (status & E1000_RXD_STAT_TCPCS) {
- /* TCP checksum is good */
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- /*
- * IP fragment with UDP payload
- * Hardware complements the payload checksum, so we undo it
- * and then put the value in host order for further stack use.
- */
- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
- skb->csum = csum_unfold(~sum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_good++;
}
@@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
/* Receive Checksum Offload */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1341,8 +1328,7 @@ copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
}
}
- /* Receive Checksum Offload XXX recompute due to CRC strip? */
- e1000_rx_checksum(adapter, staterr,
- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
+ /* Receive Checksum Offload */
+ e1000_rx_checksum(adapter, staterr, skb);
e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
@@ -2174,7 +2159,7 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
}
/**
- * @e1000_alloc_ring - allocate memory for a ring structure
+ * e1000_alloc_ring_dma - allocate memory for a ring structure
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2489,6 +2474,30 @@ set_itr_now:
}
/**
+ * e1000e_write_itr - write the ITR value to the appropriate registers
+ * @adapter: address of board private structure
+ * @itr: new ITR value to program
+ *
+ * e1000e_write_itr determines if the adapter is in MSI-X mode
+ * and, if so, writes the EITR registers with the ITR value.
+ * Otherwise, it writes the ITR value into the ITR register.
+ **/
+void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
+
+ if (adapter->msix_entries) {
+ int vector;
+
+ for (vector = 0; vector < adapter->num_vectors; vector++)
+ writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
+ } else {
+ ew32(ITR, new_itr);
+ }
+}
+
+/**
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
@@ -3074,7 +3083,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* irq moderation */
ew32(RADV, adapter->rx_abs_int_delay);
if ((adapter->itr_setting != 0) && (adapter->itr != 0))
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
ctrl_ext = er32(CTRL_EXT);
/* Auto-Mask interrupts upon ICR access */
@@ -3098,19 +3107,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable Receive Checksum Offload for TCP and UDP */
rxcsum = er32(RXCSUM);
- if (adapter->netdev->features & NETIF_F_RXCSUM) {
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
rxcsum |= E1000_RXCSUM_TUOFL;
-
- /*
- * IPv4 payload checksum for UDP fragments must be
- * used in conjunction with packet-split.
- */
- if (adapter->rx_ps_pages)
- rxcsum |= E1000_RXCSUM_IPPCSE;
- } else {
+ else
rxcsum &= ~E1000_RXCSUM_TUOFL;
- /* no need to clear IPPCSE as it defaults to 0 */
- }
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
@@ -3510,14 +3510,14 @@ void e1000e_reset(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned off\n");
adapter->flags2 |= FLAG2_DISABLE_AIM;
- ew32(ITR, 0);
+ e1000e_write_itr(adapter, 0);
}
} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
dev_info(&adapter->pdev->dev,
"Interrupt Throttle Rate turned on\n");
adapter->flags2 &= ~FLAG2_DISABLE_AIM;
adapter->itr = 20000;
- ew32(ITR, 1000000000 / (adapter->itr * 256));
+ e1000e_write_itr(adapter, adapter->itr);
}
}
@@ -4600,7 +4600,7 @@ link_up:
adapter->gorc - adapter->gotc) / 10000;
u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
- ew32(ITR, 1000000000 / (itr * 256));
+ e1000e_write_itr(adapter, itr);
}
/* Cause software interrupt to ensure Rx ring is cleaned */
@@ -5241,22 +5241,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* Jumbo frame support */
- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
- e_err("Jumbo Frames not supported.\n");
- return -EINVAL;
- }
-
- /*
- * IP payload checksum (enabled with jumbos/packet-split when
- * Rx checksum is enabled) and generation of RSS hash is
- * mutually exclusive in the hardware.
- */
- if ((netdev->features & NETIF_F_RXCSUM) &&
- (netdev->features & NETIF_F_RXHASH)) {
- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
- return -EINVAL;
- }
+ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+ e_err("Jumbo Frames not supported.\n");
+ return -EINVAL;
}
/* Supported frame sizes */
@@ -6030,17 +6018,6 @@ static int e1000_set_features(struct net_device *netdev,
NETIF_F_RXALL)))
return 0;
- /*
- * IP payload checksum (enabled with jumbos/packet-split when Rx
- * checksum is enabled) and generation of RSS hash is mutually
- * exclusive in the hardware.
- */
- if (adapter->rx_ps_pages &&
- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
- return -EINVAL;
- }
-
if (changed & NETIF_F_RXFCS) {
if (features & NETIF_F_RXFCS) {
adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
@@ -6238,7 +6215,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
- e_info("PHY reset is blocked due to SOL/IDER session.\n");
+ dev_info(&pdev->dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Set initial default active device features */
netdev->features = (NETIF_F_SG |
@@ -6288,7 +6266,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
break;
if (i == 2) {
- e_err("The NVM Checksum Is Not Valid\n");
+ dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -6298,13 +6276,15 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
- e_err("NVM Read Error while reading MAC address\n");
+ dev_err(&pdev->dev,
+ "NVM Read Error while reading MAC address\n");
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- e_err("Invalid MAC Address: %pM\n", netdev->perm_addr);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->perm_addr);
err = -EIO;
goto err_eeprom;
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 55cc1565bc2f..dfbfa7fd98c3 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -199,16 +199,19 @@ static int __devinit e1000_validate_option(unsigned int *value,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- e_info("%s Enabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Enabled\n",
+ opt->name);
return 0;
case OPTION_DISABLED:
- e_info("%s Disabled\n", opt->name);
+ dev_info(&adapter->pdev->dev, "%s Disabled\n",
+ opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- e_info("%s set to %i\n", opt->name, *value);
+ dev_info(&adapter->pdev->dev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -220,7 +223,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- e_info("%s\n", ent->str);
+ dev_info(&adapter->pdev->dev, "%s\n",
+ ent->str);
return 0;
}
}
@@ -230,8 +234,8 @@ static int __devinit e1000_validate_option(unsigned int *value,
BUG();
}
- e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
- opt->err);
+ dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -251,8 +255,10 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
int bd = adapter->bd_number;
if (bd >= E1000_MAX_NIC) {
- e_notice("Warning: no configuration for board #%i\n", bd);
- e_notice("Using defaults for all values\n");
+ dev_notice(&adapter->pdev->dev,
+ "Warning: no configuration for board #%i\n", bd);
+ dev_notice(&adapter->pdev->dev,
+ "Using defaults for all values\n");
}
{ /* Transmit Interrupt Delay */
@@ -366,27 +372,32 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
* default values
*/
if (adapter->itr > 4)
- e_info("%s set to default %d\n", opt.name,
- adapter->itr);
+ dev_info(&adapter->pdev->dev,
+ "%s set to default %d\n", opt.name,
+ adapter->itr);
}
adapter->itr_setting = adapter->itr;
switch (adapter->itr) {
case 0:
- e_info("%s turned off\n", opt.name);
+ dev_info(&adapter->pdev->dev, "%s turned off\n",
+ opt.name);
break;
case 1:
- e_info("%s set to dynamic mode\n", opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic mode\n", opt.name);
adapter->itr = 20000;
break;
case 3:
- e_info("%s set to dynamic conservative mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to dynamic conservative mode\n",
+ opt.name);
adapter->itr = 20000;
break;
case 4:
- e_info("%s set to simplified (2000-8000 ints) mode\n",
- opt.name);
+ dev_info(&adapter->pdev->dev,
+ "%s set to simplified (2000-8000 ints) mode\n",
+ opt.name);
break;
default:
/*
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 35d1e4f2c92c..10efcd88dca0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -117,6 +117,7 @@
/* TX Rate Limit Registers */
#define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */
#define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
/* Split and Replication RX Control - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ae6d3f393a54..9e572dd29ab2 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -65,19 +65,30 @@ struct igb_adapter;
#define MAX_Q_VECTORS 8
/* Transmit and receive queues */
-#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \
- (hw->mac.type > e1000_82575 ? 8 : 4)))
-#define IGB_MAX_RX_QUEUES_I210 4
+#define IGB_MAX_RX_QUEUES 8
+#define IGB_MAX_RX_QUEUES_82575 4
#define IGB_MAX_RX_QUEUES_I211 2
-#define IGB_MAX_TX_QUEUES 16
-#define IGB_MAX_TX_QUEUES_I210 4
-#define IGB_MAX_TX_QUEUES_I211 2
+#define IGB_MAX_TX_QUEUES 8
#define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8
#define IGB_MAX_VFTA_ENTRIES 128
#define IGB_82576_VF_DEV_ID 0x10CA
#define IGB_I350_VF_DEV_ID 0x1520
+/* NVM version defines */
+#define IGB_MAJOR_MASK 0xF000
+#define IGB_MINOR_MASK 0x0FF0
+#define IGB_BUILD_MASK 0x000F
+#define IGB_COMB_VER_MASK 0x00FF
+#define IGB_MAJOR_SHIFT 12
+#define IGB_MINOR_SHIFT 4
+#define IGB_COMB_VER_SHFT 8
+#define IGB_NVM_VER_INVALID 0xFFFF
+#define IGB_ETRACK_SHIFT 16
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -371,6 +382,7 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ char fw_version[32];
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -420,6 +432,7 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
+extern void igb_set_fw_version(struct igb_adapter *);
#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_remove(struct igb_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 812d4f963bd1..a19c84cad0e9 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -710,6 +710,7 @@ static int igb_set_eeprom(struct net_device *netdev,
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG)))
hw->nvm.ops.update(hw);
+ igb_set_fw_version(adapter);
kfree(eeprom_buff);
return ret_val;
}
@@ -718,20 +719,16 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- u16 eeprom_data;
strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
- /* EEPROM image version # is reported as firmware version # for
- * 82575 controllers */
- adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
- "%d.%d-%d",
- (eeprom_data & 0xF000) >> 12,
- (eeprom_data & 0x0FF0) >> 4,
- eeprom_data & 0x000F);
-
+ /*
+ * EEPROM image version # is reported as firmware version # for
+ * 82575 controllers
+ */
+ strlcpy(drvinfo->fw_version, adapter->fw_version,
+ sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
@@ -2271,6 +2268,38 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+#ifdef CONFIG_IGB_PTP
+static int igb_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (adapter->ptp_clock)
+ info->phc_index = ptp_clock_index(adapter->ptp_clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL) |
+ (1 << HWTSTAMP_FILTER_SOME) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#endif
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2299,6 +2328,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_coalesce = igb_set_coalesce,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
+#ifdef CONFIG_IGB_PTP
+ .get_ts_info = igb_ethtool_get_ts_info,
+#endif
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index dd3bfe8cd36c..1050411e7ca3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -59,9 +59,9 @@
#endif
#include "igb.h"
-#define MAJ 3
-#define MIN 4
-#define BUILD 7
+#define MAJ 4
+#define MIN 0
+#define BUILD 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -1048,11 +1048,6 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
- /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */
- if ((adapter->hw.mac.type == e1000_i210)
- || (adapter->hw.mac.type == e1000_i211))
- numvecs = 4;
-
/* store the number of vectors reserved for queues */
adapter->num_q_vectors = numvecs;
@@ -1505,11 +1500,12 @@ static void igb_configure(struct igb_adapter *adapter)
**/
void igb_power_up_link(struct igb_adapter *adapter)
{
+ igb_reset_phy(&adapter->hw);
+
if (adapter->hw.phy.media_type == e1000_media_type_copper)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
- igb_reset_phy(&adapter->hw);
}
/**
@@ -1821,6 +1817,69 @@ static const struct net_device_ops igb_netdev_ops = {
};
/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ *
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 major, build, patch, fw_version;
+ u32 etrack_id;
+
+ hw->nvm.ops.read(hw, 5, 1, &fw_version);
+ if (adapter->hw.mac.type != e1000_i211) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
+
+ /* combo image version needs to be found */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) &&
+ (comb_offset != IGB_NVM_VER_INVALID)) {
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* Only display Option Rom if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != IGB_NVM_VER_INVALID) &&
+ (comb_verl != IGB_NVM_VER_INVALID))) {
+ major = comb_verl >> IGB_COMB_VER_SHFT;
+ build = (comb_verl << IGB_COMB_VER_SHFT) |
+ (comb_verh >> IGB_COMB_VER_SHFT);
+ patch = comb_verh & IGB_COMB_VER_MASK;
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x, %d.%d.%d",
+ (fw_version & IGB_MAJOR_MASK) >>
+ IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >>
+ IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK),
+ etrack_id, major, build, patch);
+ goto out;
+ }
+ }
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d, 0x%08x",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK), etrack_id);
+ } else {
+ snprintf(adapter->fw_version, sizeof(adapter->fw_version),
+ "%d.%d%d",
+ (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
+ (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
+ (fw_version & IGB_BUILD_MASK));
+ }
+out:
+ return;
+}
+
+/**
* igb_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in igb_pci_tbl
@@ -2030,6 +2089,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
goto err_eeprom;
}
+ /* get firmware version for ethtool -i */
+ igb_set_fw_version(adapter);
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2338,6 +2400,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u32 max_rss_queues;
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
@@ -2370,40 +2433,69 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
} else
adapter->vfs_allocated_count = max_vfs;
break;
- case e1000_i210:
- case e1000_i211:
- adapter->vfs_allocated_count = 0;
- break;
default:
break;
}
#endif /* CONFIG_PCI_IOV */
+
+ /* Determine the maximum number of RSS queues supported. */
switch (hw->mac.type) {
+ case e1000_i211:
+ max_rss_queues = IGB_MAX_RX_QUEUES_I211;
+ break;
+ case e1000_82575:
case e1000_i210:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210,
- num_online_cpus());
+ max_rss_queues = IGB_MAX_RX_QUEUES_82575;
+ break;
+ case e1000_i350:
+ /* I350 cannot do RSS and SR-IOV at the same time */
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 1;
+ break;
+ }
+ /* fall through */
+ case e1000_82576:
+ if (!!adapter->vfs_allocated_count) {
+ max_rss_queues = 2;
+ break;
+ }
+ /* fall through */
+ case e1000_82580:
+ default:
+ max_rss_queues = IGB_MAX_RX_QUEUES;
break;
+ }
+
+ adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
+
+ /* Determine if we need to pair queues. */
+ switch (hw->mac.type) {
+ case e1000_82575:
case e1000_i211:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211,
- num_online_cpus());
+ /* Device supports enough interrupts without queue pairing. */
break;
+ case e1000_82576:
+ /*
+ * If VFs are going to be allocated with RSS queues then we
+ * should pair the queues in order to conserve interrupts due
+ * to limited supply.
+ */
+ if ((adapter->rss_queues > 1) &&
+ (adapter->vfs_allocated_count > 6))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+ /* fall through */
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
default:
- adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES,
- num_online_cpus());
+ /*
+ * If rss_queues > half of max_rss_queues, pair the queues in
+ * order to conserve interrupts due to limited supply.
+ */
+ if (adapter->rss_queues > (max_rss_queues / 2))
+ adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
break;
}
- /* i350 cannot do RSS and SR-IOV at the same time */
- if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
- adapter->rss_queues = 1;
-
- /*
- * if rss_queues > 4 or vfs are going to be allocated with rss_queues
- * then we should combine the queues into a queue pair in order to
- * conserve interrupts due to limited supply
- */
- if ((adapter->rss_queues > 4) ||
- ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
- adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kzalloc(sizeof(u32) *
@@ -4917,7 +5009,7 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf)
unsigned int device_id;
u16 thisvf_devfn;
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
igb_set_vf_mac(adapter, vf, mac_addr);
switch (adapter->hw.mac.type) {
@@ -5326,7 +5418,7 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
/* generate a new mac address as we were hotplug removed/added */
if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
- random_ether_addr(vf_mac);
+ eth_random_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -5686,6 +5778,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ *
* returns true if ring is completely cleaned
**/
static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
@@ -6997,6 +7090,11 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
}
wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+ */
+ wr32(E1000_RTTBCNRM, 0x14);
wr32(E1000_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index d5ee7fa50723..c846ea9131a3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -330,7 +330,17 @@ void igb_ptp_init(struct igb_adapter *adapter)
void igb_ptp_remove(struct igb_adapter *adapter)
{
- cancel_delayed_work_sync(&adapter->overflow_work);
+ switch (adapter->hw.mac.type) {
+ case e1000_i211:
+ case e1000_i210:
+ case e1000_i350:
+ case e1000_82580:
+ case e1000_82576:
+ cancel_delayed_work_sync(&adapter->overflow_work);
+ break;
+ default:
+ return;
+ }
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 8ce67064b9c5..90eef07943f4 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev,
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) ||
- ((ec->rx_coalesce_usecs > 3) &&
- (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) ||
- (ec->rx_coalesce_usecs == 2))
- return -EINVAL;
-
- /* convert to rate of irq's per second */
- if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) {
+ if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) &&
+ (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) {
+ adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ adapter->requested_itr = 1000000000 /
+ (adapter->current_itr * 256);
+ } else if ((ec->rx_coalesce_usecs == 3) ||
+ (ec->rx_coalesce_usecs == 2)) {
adapter->current_itr = IGBVF_START_ITR;
adapter->requested_itr = ec->rx_coalesce_usecs;
- } else {
- adapter->current_itr = ec->rx_coalesce_usecs << 2;
+ } else if (ec->rx_coalesce_usecs == 0) {
+ /*
+ * The user's desire is to turn off interrupt throttling
+ * altogether, but due to HW limitations, we can't do that.
+ * Instead we set a very small value in EITR, which would
+ * allow ~967k interrupts per second, but allow the adapter's
+ * internal clocking to still function properly.
+ */
+ adapter->current_itr = 4;
adapter->requested_itr = 1000000000 /
(adapter->current_itr * 256);
- }
+ } else
+ return -EINVAL;
writel(adapter->current_itr,
hw->hw_addr + adapter->rx_ring->itr_register);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 8ec74b07f940..0696abfe9944 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -766,6 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ *
* returns true if ring is completely cleaned
**/
static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index 30a6cc426037..eea0e10ce12f 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -283,7 +283,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
return err;
}
-/** e1000_rlpml_set_vf - Set the maximum receive packet length
+/**
+ * e1000_rlpml_set_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
@@ -302,7 +303,7 @@ void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
* e1000_rar_set_vf - set device MAC address
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
- * @index receive address array register
+ * @index: receive address array register
**/
static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
{
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index 99b69adb4a0f..bf9a220f71fb 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -32,6 +32,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/pci_ids.h>
#include "ixgb_hw.h"
#include "ixgb_ids.h"
@@ -96,7 +97,7 @@ static u32 ixgb_mac_reset(struct ixgb_hw *hw)
ASSERT(!(ctrl_reg & IXGB_CTRL0_RST));
#endif
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID) {
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN) {
ctrl_reg = /* Enable interrupt from XFP and SerDes */
IXGB_CTRL1_GPI0_EN |
IXGB_CTRL1_SDP6_DIR |
@@ -271,7 +272,7 @@ ixgb_identify_phy(struct ixgb_hw *hw)
}
/* update phy type for sun specific board */
- if (hw->subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (hw->subsystem_vendor_id == PCI_VENDOR_ID_SUN)
phy_type = ixgb_phy_type_bcm;
return phy_type;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
index 2a58847f46e8..32c1b302d791 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ids.h
@@ -33,11 +33,6 @@
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
-#define INTEL_VENDOR_ID 0x8086
-#define INTEL_SUBVENDOR_ID 0x8086
-#define SUN_VENDOR_ID 0x108E
-#define SUN_SUBVENDOR_ID 0x108E
-
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5fce363d810a..d05fc95befc5 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -54,13 +54,13 @@ MODULE_PARM_DESC(copybreak,
* Class, Class Mask, private data (not used) }
*/
static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_CX4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_SR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,
+ {PCI_VENDOR_ID_INTEL, IXGB_DEVICE_ID_82597EX_LR,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
/* required last entry */
@@ -195,7 +195,7 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
{
u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
IXGB_INT_TXDW | IXGB_INT_LSC;
- if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
+ if (adapter->hw.subsystem_vendor_id == PCI_VENDOR_ID_SUN)
val |= IXGB_INT_GPI0;
IXGB_WRITE_REG(&adapter->hw, IMS, val);
IXGB_WRITE_FLUSH(&adapter->hw);
@@ -2276,9 +2276,9 @@ static void ixgb_netpoll(struct net_device *dev)
#endif
/**
- * ixgb_io_error_detected() - called when PCI error is detected
- * @pdev pointer to pci device with error
- * @state pci channel state after error
+ * ixgb_io_error_detected - called when PCI error is detected
+ * @pdev: pointer to pci device with error
+ * @state: pci channel state after error
*
* This callback is called by the PCI subsystem whenever
* a PCI bus error is detected.
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 0bdf06bc5c49..5fd5d04c26c9 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,11 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
-
+ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7af291e236bf..b9623e9ea895 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -77,17 +77,18 @@
#define IXGBE_MAX_FCPAUSE 0xFFFF
/* Supported Rx Buffer Sizes */
-#define IXGBE_RXBUFFER_512 512 /* Used for packet split */
+#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/*
- * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
+ * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
+ * this adds up to 448 bytes of extra data.
+ *
+ * Since netdev_alloc_skb now allocates a page fragment we can use a value
+ * of 256 and the resultant skb will have a truesize of 960 or less.
*/
-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
+#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
#define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
@@ -113,7 +114,7 @@
#define IXGBE_MAX_VFTA_ENTRIES 128
#define MAX_EMULATION_MAC_ADDRS 16
#define IXGBE_MAX_PF_MACVLANS 15
-#define VMDQ_P(p) ((p) + adapter->num_vfs)
+#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
#define IXGBE_82599_VF_DEVICE_ID 0x10ED
#define IXGBE_X540_VF_DEVICE_ID 0x1515
@@ -130,7 +131,6 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
- struct pci_dev *vfdev;
};
struct vf_macvlans {
@@ -278,10 +278,16 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
- int indices;
- int mask;
+ u16 limit; /* upper limit on feature indices */
+ u16 indices; /* current value of indices */
+ u16 mask; /* Mask used for feature to ring mapping */
+ u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp;
+#define IXGBE_82599_VMDQ_8Q_MASK 0x78
+#define IXGBE_82599_VMDQ_4Q_MASK 0x7C
+#define IXGBE_82599_VMDQ_2Q_MASK 0x7E
+
/*
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
* this is twice the size of a half page we need to double the page order
@@ -315,7 +321,7 @@ struct ixgbe_ring_container {
? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
-/* MAX_MSIX_Q_VECTORS of these are allocated,
+/* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
@@ -401,11 +407,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64
-#define MAX_MSIX_Q_VECTORS_82599 64
+#define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18
-#define MAX_MSIX_Q_VECTORS_82598 16
+#define MAX_Q_VECTORS_82598 16
-#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599
+#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1
@@ -427,35 +433,33 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
-#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
+#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
@@ -496,7 +500,7 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
/* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc;
@@ -507,8 +511,8 @@ struct ixgbe_adapter {
u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode;
- int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
@@ -561,6 +565,7 @@ struct ixgbe_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ int rx_hwtstamp_filter;
u32 base_incval;
u32 cycle_speed;
#endif /* CONFIG_IXGBE_PTP */
@@ -686,7 +691,6 @@ extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len);
-extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
@@ -695,6 +699,8 @@ extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
+extern int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
+extern void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_enable(struct net_device *netdev);
extern int ixgbe_fcoe_disable(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
@@ -704,6 +710,7 @@ extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
struct netdev_fcoe_hbainfo *info);
+extern u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter);
#endif /* IXGBE_FCOE */
static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
@@ -718,6 +725,7 @@ extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb);
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index dee64d2703f0..50fc137501da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -241,7 +241,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
/* Determine 1G link capabilities off of SFP+ type */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*negotiation = true;
goto out;
@@ -1023,6 +1025,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -2104,6 +2109,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 77ac41feb0fe..90e41db3cb69 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2848,6 +2848,31 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * This function should only be involved in the IOV mode.
+ * In IOV mode, Default pool is next pool after the number of
+ * VFs advertized and not 0.
+ * MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
+ *
+ * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
+{
+ u32 rar = hw->mac.san_mac_rar_index;
+
+ if (vmdq < 32) {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
* @hw: pointer to hardware structure
**/
@@ -3132,7 +3157,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
}
/**
- * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
* the EEPROM
* @hw: pointer to hardware structure
* @wwnn_prefix: the alternative WWNN prefix
@@ -3200,20 +3225,22 @@ void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
* PFVFSPOOF register array is size 8 with 8 bits assigned to
* MAC anti-spoof enables in each register array element.
*/
- for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ for (j = 0; j < pf_target_reg; j++)
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
- /* If not enabling anti-spoofing then done */
- if (!enable)
- return;
-
/*
* The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Reset the bit assigned to the PF
+ * emulation mode NICs. Do not set the bits assigned to the PF
*/
- pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
- pfvfspoof ^= (1 << pf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+ pfvfspoof &= (1 << pf_target_shift) - 1;
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+ /*
+ * Remaining pools belong to the PF so they do not need to have
+ * anti-spoofing enabled.
+ */
+ for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
}
/**
@@ -3325,6 +3352,7 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
* ixgbe_calculate_checksum - Calculate checksum for buffer
* @buffer: pointer to EEPROM
* @length: size of EEPROM to calculate a checksum for
+ *
* Calculates the checksum for some buffer on a specified length. The
* checksum calculated is returned.
**/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 6222fdb3d3f1..d813d1188c36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -85,6 +85,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 8bfaaee5ac5b..9bc17c0cb972 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -180,67 +180,83 @@ out:
void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- *pfc_en = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- *pfc_en |= !!(cfg->tc_config[i].dcb_pfc & 0xF) << i;
+ for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
+ if (tc_config[tc].dcb_pfc != pfc_disabled)
+ *pfc_en |= 1 << tc;
+ }
}
void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction,
u16 *refill)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- refill[i] = p->data_credits_refill;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ refill[tc] = tc_config[tc].path[direction].data_credits_refill;
}
void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max)
{
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- max[i] = cfg->tc_config[i].desc_credits_max;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ max[tc] = tc_config[tc].desc_credits_max;
}
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction,
u8 *bwgid)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- bwgid[i] = p->bwg_id;
- }
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ bwgid[tc] = tc_config[tc].path[direction].bwg_id;
}
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 *ptype)
{
- struct tc_bw_alloc *p;
- int i;
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ int tc;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- p = &cfg->tc_config[i].path[direction];
- ptype[i] = p->prio_type;
+ for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++)
+ ptype[tc] = tc_config[tc].path[direction].prio_type;
+}
+
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
+{
+ struct tc_configuration *tc_config = &cfg->tc_config[0];
+ u8 prio_mask = 1 << up;
+ u8 tc = cfg->num_tcs.pg_tcs;
+
+ /* If tc is 0 then DCB is likely not enabled or supported */
+ if (!tc)
+ goto out;
+
+ /*
+ * Test from maximum TC to 1 and report the first match we find. If
+ * we find no match we can assume that the TC is 0 since the TC must
+ * be set for all user priorities
+ */
+ for (tc--; tc; tc--) {
+ if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
+ break;
}
+out:
+ return tc;
}
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
{
- int i, up;
- unsigned long bitmap;
+ u8 up;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- bitmap = cfg->tc_config[i].path[direction].up_to_tc_bitmap;
- for_each_set_bit(up, &bitmap, MAX_USER_PRIORITY)
- map[up] = i;
- }
+ for (up = 0; up < MAX_USER_PRIORITY; up++)
+ map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index 24333b718166..1f4108ee154b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -146,6 +146,7 @@ void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *);
void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *);
void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *);
+u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8);
/* DCB credits calculation */
s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 5164a21b13ca..f1e002d5fa8f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -151,34 +151,21 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
- int err = 0;
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
/* Fail command if not in CEE mode */
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
/* verify there is something to do, if not then exit */
- if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- goto out;
-
- if (state > 0) {
- err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- } else {
- err = ixgbe_setup_tc(netdev, 0);
- }
-
- if (err)
+ if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
goto out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
-
+ err = ixgbe_setup_tc(netdev,
+ state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
out:
- return err ? 1 : 0;
+ return !!err;
}
static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -584,9 +571,6 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
if (err)
goto err_out;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
-
err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
err_out:
return err;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1ec3711..4104ea25d818 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -154,100 +154,60 @@ static int ixgbe_get_settings(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ ixgbe_link_speed supported_link;
u32 link_speed = 0;
+ bool autoneg;
bool link_up;
- ecmd->supported = SUPPORTED_10000baseT_Full;
- ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->transceiver = XCVR_EXTERNAL;
- if ((hw->phy.media_type == ixgbe_media_type_copper) ||
- (hw->phy.multispeed_fiber)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg);
-
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- ecmd->supported |= SUPPORTED_100baseT_Full;
- break;
- default:
- break;
- }
-
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.autoneg_advertised) {
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_100_FULL)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_10GB_FULL)
- ecmd->advertising |= ADVERTISED_10000baseT_Full;
- if (hw->phy.autoneg_advertised &
- IXGBE_LINK_SPEED_1GB_FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
- } else {
- /*
- * Default advertised modes in case
- * phy.autoneg_advertised isn't set.
- */
- ecmd->advertising |= (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full);
- if (hw->mac.type == ixgbe_mac_X540)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
-
- if (hw->phy.media_type == ixgbe_media_type_copper) {
- ecmd->supported |= SUPPORTED_TP;
- ecmd->advertising |= ADVERTISED_TP;
- ecmd->port = PORT_TP;
- } else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising |= ADVERTISED_FIBRE;
- ecmd->port = PORT_FIBRE;
- }
- } else if (hw->phy.media_type == ixgbe_media_type_backplane) {
- /* Set as FIBRE until SERDES defined in kernel */
- if (hw->device_id == IXGBE_DEV_ID_82598_BX) {
- ecmd->supported = (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
- } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
- (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- } else {
- ecmd->supported |= (SUPPORTED_1000baseT_Full |
- SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_1000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- }
+ hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
+
+ /* set the supported link speeds */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->supported |= SUPPORTED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->supported |= SUPPORTED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->supported |= SUPPORTED_100baseT_Full;
+
+ /* set the advertised speeds */
+ if (hw->phy.autoneg_advertised) {
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
} else {
- ecmd->supported |= SUPPORTED_FIBRE;
- ecmd->advertising = (ADVERTISED_10000baseT_Full |
- ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->autoneg = AUTONEG_DISABLE;
+ /* default modes in case phy.autoneg_advertised isn't set */
+ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
+ ecmd->advertising |= ADVERTISED_10000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
+ ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (supported_link & IXGBE_LINK_SPEED_100_FULL)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
}
- /* Get PHY type */
+ if (autoneg) {
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->autoneg = AUTONEG_ENABLE;
+ } else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ /* Determine the remaining settings based on the PHY type. */
switch (adapter->hw.phy.type) {
case ixgbe_phy_tn:
case ixgbe_phy_aq:
case ixgbe_phy_cu_unknown:
- /* Copper 10G-BASET */
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
break;
case ixgbe_phy_qt:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_phy_nl:
@@ -257,42 +217,59 @@ static int ixgbe_get_settings(struct net_device *netdev,
case ixgbe_phy_sfp_avago:
case ixgbe_phy_sfp_intel:
case ixgbe_phy_sfp_unknown:
- switch (adapter->hw.phy.sfp_type) {
/* SFP+ devices, further checking needed */
+ switch (adapter->hw.phy.sfp_type) {
case ixgbe_sfp_type_da_cu:
case ixgbe_sfp_type_da_cu_core0:
case ixgbe_sfp_type_da_cu_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_DA;
break;
case ixgbe_sfp_type_sr:
case ixgbe_sfp_type_lr:
case ixgbe_sfp_type_srlr_core0:
case ixgbe_sfp_type_srlr_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_not_present:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_sfp_type_1g_cu_core0:
case ixgbe_sfp_type_1g_cu_core1:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
ecmd->port = PORT_TP;
- ecmd->supported = SUPPORTED_TP;
- ecmd->advertising = (ADVERTISED_1000baseT_Full |
- ADVERTISED_TP);
+ break;
+ case ixgbe_sfp_type_1g_sx_core0:
+ case ixgbe_sfp_type_1g_sx_core1:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
break;
case ixgbe_sfp_type_unknown:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
break;
case ixgbe_phy_xaui:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_NONE;
break;
case ixgbe_phy_unknown:
case ixgbe_phy_generic:
case ixgbe_phy_sfp_unsupported:
default:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
ecmd->port = PORT_OTHER;
break;
}
@@ -2113,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
int i;
- int num_vectors;
u16 tx_itr_param, rx_itr_param;
bool need_reset = false;
@@ -2149,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_vectors = 1;
-
- for (i = 0; i < num_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
@@ -2274,10 +2245,6 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
{
cmd->data = 0;
- /* if RSS is disabled then report no hashing */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return 0;
-
/* Report default options for RSS on ixgbe */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index bc07933d67da..ae73ef14fdf3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -38,7 +38,7 @@
/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
- * @ddp - ptr to the ixgbe_fcoe_ddp
+ * @ddp: ptr to the ixgbe_fcoe_ddp
*
* Returns : none
*
@@ -104,10 +104,10 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
udelay(100);
}
if (ddp->sgl)
- pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
if (ddp->pool) {
- pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ddp->pool = NULL;
}
@@ -134,6 +134,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
struct ixgbe_hw *hw;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
struct scatterlist *sg;
unsigned int i, j, dmacount;
unsigned int len;
@@ -144,8 +145,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
- struct pci_pool *pool;
- unsigned int cpu;
if (!netdev || !sgl)
return 0;
@@ -162,11 +161,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0;
fcoe = &adapter->fcoe;
- if (!fcoe->pool) {
- e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
- return 0;
- }
-
ddp = &fcoe->ddp[xid];
if (ddp->sgl) {
e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
@@ -175,22 +169,32 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
}
ixgbe_fcoe_clear_ddp(ddp);
+
+ if (!fcoe->ddp_pool) {
+ e_warn(drv, "No ddp_pool resources allocated\n");
+ return 0;
+ }
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+ if (!ddp_pool->pool) {
+ e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
+ goto out_noddp;
+ }
+
/* setup dma from scsi command sgl */
- dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
if (dmacount == 0) {
e_err(drv, "xid 0x%x DMA map error\n", xid);
- return 0;
+ goto out_noddp;
}
/* alloc the udl from per cpu ddp pool */
- cpu = get_cpu();
- pool = *per_cpu_ptr(fcoe->pool, cpu);
- ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
+ ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
- ddp->pool = pool;
+ ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
@@ -201,7 +205,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
while (len) {
/* max number of buffers allowed in one DDP context */
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) += 1;
+ ddp_pool->noddp++;
goto out_noddp_free;
}
@@ -241,7 +245,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
*/
if (lastsize == bufflen) {
if (j >= IXGBE_BUFFCNT_MAX) {
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) += 1;
+ ddp_pool->noddp_ext_buff++;
goto out_noddp_free;
}
@@ -293,11 +297,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1;
out_noddp_free:
- pci_pool_free(pool, ddp->udl, ddp->udp);
+ dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
- pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
put_cpu();
return 0;
}
@@ -409,7 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
break;
/* unmap the sg list when FCPRSP is received */
case __constant_cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
- pci_unmap_sg(adapter->pdev, ddp->sgl,
+ dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = ddp_err;
ddp->sgl = NULL;
@@ -563,44 +568,37 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
return 0;
}
-static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
{
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
- for_each_possible_cpu(cpu) {
- pool = per_cpu_ptr(fcoe->pool, cpu);
- if (*pool)
- pci_pool_destroy(*pool);
- }
- free_percpu(fcoe->pool);
- fcoe->pool = NULL;
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ if (ddp_pool->pool)
+ dma_pool_destroy(ddp_pool->pool);
+ ddp_pool->pool = NULL;
}
-static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
+ struct device *dev,
+ unsigned int cpu)
{
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ struct dma_pool *pool;
char pool_name[32];
- fcoe->pool = alloc_percpu(struct pci_pool *);
- if (!fcoe->pool)
- return;
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- /* allocate pci pool for each cpu */
- for_each_possible_cpu(cpu) {
- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
- pool = per_cpu_ptr(fcoe->pool, cpu);
- *pool = pci_pool_create(pool_name,
- adapter->pdev, IXGBE_FCPTR_MAX,
- IXGBE_FCPTR_ALIGN, PAGE_SIZE);
- if (!*pool) {
- e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
- ixgbe_fcoe_ddp_pools_free(fcoe);
- return;
- }
- }
+ pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!pool)
+ return -ENOMEM;
+
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ ddp_pool->pool = pool;
+ ddp_pool->noddp = 0;
+ ddp_pool->noddp_ext_buff = 0;
+
+ return 0;
}
/**
@@ -613,132 +611,171 @@ static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
*/
void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
{
- int i, fcoe_q, fcoe_i;
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- unsigned int cpu;
-
- if (!fcoe->pool) {
- spin_lock_init(&fcoe->lock);
-
- ixgbe_fcoe_ddp_pools_alloc(adapter);
- if (!fcoe->pool) {
- e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
- return;
- }
-
- /* Extra buffer to be shared by all DDPs for HW work around */
- fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
- if (fcoe->extra_ddp_buffer == NULL) {
- e_err(drv, "failed to allocated extra DDP buffer\n");
- goto out_ddp_pools;
- }
+ int i, fcoe_q, fcoe_i;
+ u32 etqf;
- fcoe->extra_ddp_buffer_dma =
- dma_map_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma)) {
- e_err(drv, "failed to map extra DDP buffer\n");
- goto out_extra_ddp_buffer;
- }
+ /* Minimal functionality for FCoE requires at least CRC offloads */
+ if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
+ return;
- /* Alloc per cpu mem to count the ddp alloc failure number */
- fcoe->pcpu_noddp = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp) {
- e_err(drv, "failed to alloc noddp counter\n");
- goto out_pcpu_noddp_alloc_fail;
- }
+ /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
+ etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- fcoe->pcpu_noddp_ext_buff = alloc_percpu(u64);
- if (!fcoe->pcpu_noddp_ext_buff) {
- e_err(drv, "failed to alloc noddp extra buff cnt\n");
- goto out_pcpu_noddp_extra_buff_alloc_fail;
- }
+ /* leave registers un-configured if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return;
- for_each_possible_cpu(cpu) {
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu) = 0;
- *per_cpu_ptr(fcoe->pcpu_noddp_ext_buff, cpu) = 0;
- }
+ /* Use one or more Rx queues for FCoE by redirection table */
+ for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
+ fcoe_i = fcoe->offset + (i % fcoe->indices);
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
+ IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- /* Enable L2 eth type filter for FCoE */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
- (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
- /* Enable L2 eth type filter for FIP */
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
- (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
- if (adapter->ring_feature[RING_F_FCOE].indices) {
- /* Use multiple rx queues for FCoE by redirection table */
- for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
- fcoe_i = f->mask + i % f->indices;
- fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
- }
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
- } else {
- /* Use single rx queue for FCoE */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
- IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
- IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
- IXGBE_ETQS_QUEUE_EN |
- (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
+ /* Enable L2 EtherType filter for FIP */
+ etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ etqf |= IXGBE_ETQF_POOL_ENABLE;
+ etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
}
- /* send FIP frames to the first FCoE queue */
- fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
+
+ /* Send FIP frames to the first FCoE queue */
+ fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
- IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, IXGBE_FCRXCTRL_FCCRCBO |
+ /* Configure FCoE Rx control */
+ IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
+ IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
- return;
-out_pcpu_noddp_extra_buff_alloc_fail:
- free_percpu(fcoe->pcpu_noddp);
-out_pcpu_noddp_alloc_fail:
- dma_unmap_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
-out_extra_ddp_buffer:
- kfree(fcoe->extra_ddp_buffer);
-out_ddp_pools:
- ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
- * ixgbe_cleanup_fcoe - release all fcoe ddp context resources
+ * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
* @adapter : ixgbe adapter
*
* Cleans up outstanding ddp context resources
*
* Returns : none
*/
-void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
+void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
{
- int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ int cpu, i;
- if (!fcoe->pool)
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
return;
for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
ixgbe_fcoe_ddp_put(adapter->netdev, i);
+
+ for_each_possible_cpu(cpu)
+ ixgbe_fcoe_dma_pool_free(fcoe, cpu);
+
dma_unmap_single(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma,
IXGBE_FCBUFF_MIN,
DMA_FROM_DEVICE);
- free_percpu(fcoe->pcpu_noddp);
- free_percpu(fcoe->pcpu_noddp_ext_buff);
kfree(fcoe->extra_ddp_buffer);
- ixgbe_fcoe_ddp_pools_free(fcoe);
+
+ fcoe->extra_ddp_buffer = NULL;
+ fcoe->extra_ddp_buffer_dma = 0;
+}
+
+/**
+ * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
+ * @adapter: ixgbe adapter
+ *
+ * Sets up ddp context resouces
+ *
+ * Returns : 0 indicates success or -EINVAL on failure
+ */
+int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct device *dev = &adapter->pdev->dev;
+ void *buffer;
+ dma_addr_t dma;
+ unsigned int cpu;
+
+ /* do nothing if no DDP pools were allocated */
+ if (!fcoe->ddp_pool)
+ return 0;
+
+ /* Extra buffer to be shared by all DDPs for HW work around */
+ buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
+ if (!buffer) {
+ e_err(drv, "failed to allocate extra DDP buffer\n");
+ return -ENOMEM;
+ }
+
+ dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ e_err(drv, "failed to map extra DDP buffer\n");
+ kfree(buffer);
+ return -ENOMEM;
+ }
+
+ fcoe->extra_ddp_buffer = buffer;
+ fcoe->extra_ddp_buffer_dma = dma;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
+ if (!err)
+ continue;
+
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_free_fcoe_ddp_resources(adapter);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
+ return -EINVAL;
+
+ fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
+
+ if (!fcoe->ddp_pool) {
+ e_err(drv, "failed to allocate percpu DDP resources\n");
+ return -ENOMEM;
+ }
+
+ adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+
+ return 0;
+}
+
+static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+
+ adapter->netdev->fcoe_ddp_xid = 0;
+
+ if (!fcoe->ddp_pool)
+ return;
+
+ free_percpu(fcoe->ddp_pool);
+ fcoe->ddp_pool = NULL;
}
/**
@@ -751,40 +788,37 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
*/
int ixgbe_fcoe_enable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ atomic_inc(&fcoe->refcnt);
if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_enable;
+ return -EINVAL;
- atomic_inc(&fcoe->refcnt);
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- goto out_enable;
+ return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Allocate per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_enable(adapter);
+ /* enable FCoE and notify stack */
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE;
- netdev->features |= NETIF_F_FCOE_CRC;
- netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
+ netdev_features_change(netdev);
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
- netdev_features_change(netdev);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_enable:
- return rc;
+ return 0;
}
/**
@@ -797,41 +831,35 @@ out_enable:
*/
int ixgbe_fcoe_disable(struct net_device *netdev)
{
- int rc = -EINVAL;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
- goto out_disable;
+ if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
+ return -EINVAL;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- goto out_disable;
-
- if (!atomic_dec_and_test(&fcoe->refcnt))
- goto out_disable;
+ return -EINVAL;
e_info(drv, "Disabling FCoE offload features.\n");
- netdev->features &= ~NETIF_F_FCOE_CRC;
- netdev->features &= ~NETIF_F_FSO;
- netdev->features &= ~NETIF_F_FCOE_MTU;
- netdev->fcoe_ddp_xid = 0;
- netdev_features_change(netdev);
-
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ /* Free per CPU memory to track DDP pools */
+ ixgbe_fcoe_ddp_disable(adapter);
+
+ /* disable FCoE and notify stack */
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
- ixgbe_cleanup_fcoe(adapter);
+ netdev->features &= ~NETIF_F_FCOE_MTU;
+
+ netdev_features_change(netdev);
+
+ /* release existing queues and reallocate them */
+ ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
- rc = 0;
-out_disable:
- return rc;
+ return 0;
}
/**
@@ -960,3 +988,18 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
return 0;
}
+
+/**
+ * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
+ * @adapter - pointer to the device adapter structure
+ *
+ * Return : TC that FCoE is mapped to
+ */
+u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
+#else
+ return 0;
+#endif
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 1dbed17c8107..bf724da99375 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -62,19 +62,24 @@ struct ixgbe_fcoe_ddp {
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
- struct pci_pool *pool;
+ struct dma_pool *pool;
+};
+
+/* per cpu variables */
+struct ixgbe_fcoe_ddp_pool {
+ struct dma_pool *pool;
+ u64 noddp;
+ u64 noddp_ext_buff;
};
struct ixgbe_fcoe {
- struct pci_pool **pool;
+ struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool;
atomic_t refcnt;
spinlock_t lock;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
- unsigned char *extra_ddp_buffer;
+ void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
- u64 __percpu *pcpu_noddp;
- u64 __percpu *pcpu_noddp_ext_buff;
#ifdef CONFIG_IXGBE_DCB
u8 up;
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index c377706e81a8..17ecbcedd548 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -28,28 +28,83 @@
#include "ixgbe.h"
#include "ixgbe_sriov.h"
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for RSS to the assigned rings.
+ * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It
+ * will also try to cache the proper offsets if RSS/FCoE are enabled along
+ * with VMDq.
*
**/
-static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
int i;
+ u16 reg_idx;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
return false;
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ }
+
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= tcs)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
+
+#ifdef IXGBE_FCOE
+ /* nothing to do if FCoE is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ return true;
+
+ /* The work is already done if the FCoE ring is shared */
+ if (fcoe->offset < tcs)
+ return true;
+
+ /* The FCoE rings exist separately, we need to move their reg_idx */
+ if (fcoe->indices) {
+ u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
+ u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+
+ reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
+ for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ reg_idx++;
+ }
+ }
+#endif /* IXGBE_FCOE */
return true;
}
-#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
@@ -64,42 +119,37 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- *tx = tc << 2;
- *rx = tc << 3;
+ /* TxQs/TC: 4 RxQs/TC: 8 */
+ *tx = tc << 2; /* 0, 4, 8, 12, 16, 20, 24, 28 */
+ *rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
- if (tc < 3) {
- *tx = tc << 5;
- *rx = tc << 4;
- } else if (tc < 5) {
- *tx = ((tc + 2) << 4);
- *rx = tc << 4;
- } else if (tc < num_tcs) {
- *tx = ((tc + 8) << 3);
- *rx = tc << 4;
- }
+ /*
+ * TCs : TC0/1 TC2/3 TC4-7
+ * TxQs/TC: 32 16 8
+ * RxQs/TC: 16 16 16
+ */
+ *rx = tc << 4;
+ if (tc < 3)
+ *tx = tc << 5; /* 0, 32, 64 */
+ else if (tc < 5)
+ *tx = (tc + 2) << 4; /* 80, 96 */
+ else
+ *tx = (tc + 8) << 3; /* 104, 112, 120 */
} else {
- *rx = tc << 5;
- switch (tc) {
- case 0:
- *tx = 0;
- break;
- case 1:
- *tx = 64;
- break;
- case 2:
- *tx = 96;
- break;
- case 3:
- *tx = 112;
- break;
- default:
- break;
- }
+ /*
+ * TCs : TC0 TC1 TC2/3
+ * TxQs/TC: 64 32 16
+ * RxQs/TC: 32 32 32
+ */
+ *rx = tc << 5;
+ if (tc < 2)
+ *tx = tc << 6; /* 0, 64 */
+ else
+ *tx = (tc + 4) << 4; /* 96, 112 */
}
- break;
default:
break;
}
@@ -112,106 +162,115 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
-static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
+static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- int i, j, k;
+ unsigned int tx_idx, rx_idx;
+ int tc, offset, rss_i, i;
u8 num_tcs = netdev_get_num_tc(dev);
- if (!num_tcs)
+ /* verify we have DCB queueing enabled before proceeding */
+ if (num_tcs <= 1)
return false;
- for (i = 0, k = 0; i < num_tcs; i++) {
- unsigned int tx_s, rx_s;
- u16 count = dev->tc_to_txq[i].count;
+ rss_i = adapter->ring_feature[RING_F_RSS].indices;
- ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
- for (j = 0; j < count; j++, k++) {
- adapter->tx_ring[k]->reg_idx = tx_s + j;
- adapter->rx_ring[k]->reg_idx = rx_s + j;
- adapter->tx_ring[k]->dcb_tc = i;
- adapter->rx_ring[k]->dcb_tc = i;
+ for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
+ ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
+ for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
+ adapter->tx_ring[offset + i]->reg_idx = tx_idx;
+ adapter->rx_ring[offset + i]->reg_idx = rx_idx;
+ adapter->tx_ring[offset + i]->dcb_tc = tc;
+ adapter->rx_ring[offset + i]->dcb_tc = tc;
}
}
return true;
}
-#endif
+#endif
/**
- * ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
- * Cache the descriptor ring offsets for Flow Director to the assigned rings.
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
*
- **/
-static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
+ */
+static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
+#ifdef IXGBE_FCOE
+ struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
+#endif /* IXGBE_FCOE */
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
int i;
- bool ret = false;
-
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i]->reg_idx = i;
- for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i]->reg_idx = i;
- ret = true;
+ u16 reg_idx;
+
+ /* only proceed if VMDq is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
+ return false;
+
+ /* start at VMDq register offset for SR-IOV enabled setups */
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
+#ifdef IXGBE_FCOE
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & ~vmdq->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->rx_ring[i]->reg_idx = reg_idx;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_rx_queues; i++, reg_idx++)
+ adapter->rx_ring[i]->reg_idx = reg_idx;
+#endif
+ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
+ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
#ifdef IXGBE_FCOE
-/**
- * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
- * @adapter: board private structure to initialize
- *
- * Cache the descriptor ring offsets for FCoE mode to the assigned rings.
- *
- */
-static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
- int i;
- u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+ /* Allow first FCoE queue to be mapped as RSS */
+ if (fcoe->offset && (i > fcoe->offset))
+ break;
+#endif
+ /* If we are greater than indices move to next pool */
+ if ((reg_idx & rss->mask) >= rss->indices)
+ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
+ adapter->tx_ring[i]->reg_idx = reg_idx;
+ }
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- return false;
+#ifdef IXGBE_FCOE
+ /* FCoE uses a linear block of queues so just assigning 1:1 */
+ for (; i < adapter->num_tx_queues; i++, reg_idx++)
+ adapter->tx_ring[i]->reg_idx = reg_idx;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_cache_ring_fdir(adapter);
- else
- ixgbe_cache_ring_rss(adapter);
+#endif
- fcoe_rx_i = f->mask;
- fcoe_tx_i = f->mask;
- }
- for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
- }
return true;
}
-#endif /* IXGBE_FCOE */
/**
- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
- * SR-IOV doesn't use any descriptor rings but changes the default if
- * no other mapping is used.
+ * Cache the descriptor ring offsets for RSS to the assigned rings.
*
- */
-static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+ **/
+static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
- adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
- adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
- if (adapter->num_vfs)
- return true;
- else
- return false;
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ adapter->rx_ring[i]->reg_idx = i;
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ adapter->tx_ring[i]->reg_idx = i;
+
+ return true;
}
/**
@@ -231,186 +290,384 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
- if (ixgbe_cache_ring_sriov(adapter))
- return;
-
#ifdef CONFIG_IXGBE_DCB
- if (ixgbe_cache_ring_dcb(adapter))
+ if (ixgbe_cache_ring_dcb_sriov(adapter))
return;
-#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_cache_ring_fcoe(adapter))
+ if (ixgbe_cache_ring_dcb(adapter))
return;
-#endif /* IXGBE_FCOE */
- if (ixgbe_cache_ring_fdir(adapter))
+#endif
+ if (ixgbe_cache_ring_sriov(adapter))
return;
- if (ixgbe_cache_ring_rss(adapter))
- return;
+ ixgbe_cache_ring_rss(adapter);
}
-/**
- * ixgbe_set_sriov_queues: Allocate queues for IOV use
- * @adapter: board private structure to initialize
- *
- * IOV doesn't actually use anything, so just NAK the
- * request for now and let the other queue routines
- * figure out what to do.
- */
-static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
-{
- return false;
-}
+#define IXGBE_RSS_16Q_MASK 0xF
+#define IXGBE_RSS_8Q_MASK 0x7
+#define IXGBE_RSS_4Q_MASK 0x3
+#define IXGBE_RSS_2Q_MASK 0x1
+#define IXGBE_RSS_DISABLED_MASK 0x0
+#ifdef CONFIG_IXGBE_DCB
/**
- * ixgbe_set_rss_queues: Allocate queues for RSS
+ * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
* @adapter: board private structure to initialize
*
- * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
- * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. Also assign queues based on DCB
+ * priorities and map accordingly..
*
**/
-static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
-
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- f->mask = 0xF;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ int i;
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* verify we have VMDq enabled before proceeding */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return false;
+
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* 16 pools w/ 8 TC per pool */
+ if (tcs > 4) {
+ vmdq_i = min_t(u16, vmdq_i, 16);
+ vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
+ /* 32 pools w/ 4 TC per pool */
+ } else {
+ vmdq_i = min_t(u16, vmdq_i, 32);
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
}
- return ret;
-}
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
-/**
- * ixgbe_set_fdir_queues: Allocate queues for Flow Director
- * @adapter: board private structure to initialize
- *
- * Flow Director is an advanced Rx filter, attempting to get Rx flows back
- * to the original CPU that initiated the Tx session. This runs in addition
- * to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
- * Rx load across CPUs using RSS.
- *
- **/
-static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
-{
- bool ret = false;
- struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
- f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
- f_fdir->mask = 0;
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
/*
- * Use RSS in addition to Flow Director to ensure the best
- * distribution of flows across cores, even when an FDIR flow
- * isn't matched.
+ * We do not support DCB, VMDq, and RSS all simultaneously
+ * so we will disable RSS since it is the lowest priority
*/
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
- adapter->num_tx_queues = f_fdir->indices;
- adapter->num_rx_queues = f_fdir->indices;
- ret = true;
- } else {
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->ring_feature[RING_F_RSS].indices = 1;
+ adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = tcs;
+
+ adapter->num_tx_queues = vmdq_i * tcs;
+ adapter->num_rx_queues = vmdq_i * tcs;
+
+#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (fcoe_i) {
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * tcs;
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ } else if (tcs > 1) {
+ /* use queue belonging to FcoE TC */
+ fcoe->indices = 1;
+ fcoe->offset = ixgbe_fcoe_get_tc(adapter);
+ } else {
+ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
+
+ fcoe->indices = 0;
+ fcoe->offset = 0;
+ }
}
- return ret;
+
+#endif /* IXGBE_FCOE */
+ /* configure TC to queue mapping */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(adapter->netdev, i, 1, i);
+
+ return true;
}
+static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *f;
+ int rss_i, rss_m, i;
+ int tcs;
+
+ /* Map queue offset and counts onto allocated tx queues */
+ tcs = netdev_get_num_tc(dev);
+
+ /* verify we have DCB queueing enabled before proceeding */
+ if (tcs <= 1)
+ return false;
+
+ /* determine the upper limit for our current DCB mode */
+ rss_i = dev->num_tx_queues / tcs;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ /* 8 TC w/ 4 queues per TC */
+ rss_i = min_t(u16, rss_i, 4);
+ rss_m = IXGBE_RSS_4Q_MASK;
+ } else if (tcs > 4) {
+ /* 8 TC w/ 8 queues per TC */
+ rss_i = min_t(u16, rss_i, 8);
+ rss_m = IXGBE_RSS_8Q_MASK;
+ } else {
+ /* 4 TC w/ 16 queues per TC */
+ rss_i = min_t(u16, rss_i, 16);
+ rss_m = IXGBE_RSS_16Q_MASK;
+ }
+
+ /* set RSS mask and indices */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = min_t(int, rss_i, f->limit);
+ f->indices = rss_i;
+ f->mask = rss_m;
+
+ /* disable ATR as it is not supported when multiple TCs are enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
#ifdef IXGBE_FCOE
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and offset. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ u8 tc = ixgbe_fcoe_get_tc(adapter);
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+ f->indices = min_t(u16, rss_i, f->limit);
+ f->offset = rss_i * tc;
+ }
+
+#endif /* IXGBE_FCOE */
+ for (i = 0; i < tcs; i++)
+ netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
+
+ adapter->num_tx_queues = rss_i * tcs;
+ adapter->num_rx_queues = rss_i * tcs;
+
+ return true;
+}
+
+#endif
/**
- * ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
+ * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
* @adapter: board private structure to initialize
*
- * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
- * The ring feature mask is not used as a mask for FCoE, as it can take any 8
- * rx queues out of the max number of rx queues, instead, it is used as the
- * index of the first rx queue used by FCoE.
+ * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
+ * and VM pools where appropriate. If RSS is available, then also try and
+ * enable RSS and map accordingly.
*
**/
-static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
+static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+ u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
+ u16 vmdq_m = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
+ u16 rss_m = IXGBE_RSS_DISABLED_MASK;
+#ifdef IXGBE_FCOE
+ u16 fcoe_i = 0;
+#endif
- if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+ /* only proceed if SR-IOV is enabled */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return false;
- f->indices = min_t(int, num_online_cpus(), f->indices);
+ /* Add starting offset to total pool count */
+ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ /* double check we are limited to maximum pools */
+ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
+ /* 64 pool mode with 2 queues per pool */
+ if ((vmdq_i > 32) || (rss_i < 4)) {
+ vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
+ rss_m = IXGBE_RSS_2Q_MASK;
+ rss_i = min_t(u16, rss_i, 2);
+ /* 32 pool mode with 4 queues per pool */
+ } else {
+ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
+ rss_m = IXGBE_RSS_4Q_MASK;
+ rss_i = 4;
}
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
+#ifdef IXGBE_FCOE
+ /* queues in the remaining pools are available for FCoE */
+ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
+
+#endif
+ /* remove the starting offset from the pool count */
+ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
+
+ /* save features for later use */
+ adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
+ adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
+
+ /* limit RSS based on user input and save for later use */
+ adapter->ring_feature[RING_F_RSS].indices = rss_i;
+ adapter->ring_feature[RING_F_RSS].mask = rss_m;
+ adapter->num_rx_pools = vmdq_i;
+ adapter->num_rx_queues_per_pool = rss_i;
+
+ adapter->num_rx_queues = vmdq_i * rss_i;
+ adapter->num_tx_queues = vmdq_i * rss_i;
+
+ /* disable ATR as it is not supported when VMDq is enabled */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+#ifdef IXGBE_FCOE
+ /*
+ * FCoE can use rings from adjacent buffers to allow RSS
+ * like behavior. To account for this we need to add the
+ * FCoE indices to the total ring count.
+ */
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ struct ixgbe_ring_feature *fcoe;
+
+ fcoe = &adapter->ring_feature[RING_F_FCOE];
+
+ /* limit ourselves based on feature limits */
+ fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
+
+ if (vmdq_i > 1 && fcoe_i) {
+ /* reserve no more than number of CPUs */
+ fcoe_i = min_t(u16, fcoe_i, num_online_cpus());
+
+ /* alloc queues for FCoE separately */
+ fcoe->indices = fcoe_i;
+ fcoe->offset = vmdq_i * rss_i;
+ } else {
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
+ fcoe->offset = fcoe_i - fcoe->indices;
+
+ fcoe_i -= rss_i;
+ }
+
+ /* add queues to adapter */
+ adapter->num_tx_queues += fcoe_i;
+ adapter->num_rx_queues += fcoe_i;
+ }
+
+#endif
return true;
}
-#endif /* IXGBE_FCOE */
-/* Artificial max queue cap per traffic class in DCB mode */
-#define DCB_QUEUE_CAP 8
-
-#ifdef CONFIG_IXGBE_DCB
-static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
+/**
+ * ixgbe_set_rss_queues - Allocate queues for RSS
+ * @adapter: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
- int per_tc_q, q, i, offset = 0;
- struct net_device *dev = adapter->netdev;
- int tcs = netdev_get_num_tc(dev);
+ struct ixgbe_ring_feature *f;
+ u16 rss_i;
- if (!tcs)
- return false;
+ /* set mask for 16 queue limit of RSS */
+ f = &adapter->ring_feature[RING_F_RSS];
+ rss_i = f->limit;
- /* Map queue offset and counts onto allocated tx queues */
- per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
- q = min_t(int, num_online_cpus(), per_tc_q);
+ f->indices = rss_i;
+ f->mask = IXGBE_RSS_16Q_MASK;
- for (i = 0; i < tcs; i++) {
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
+ /* disable ATR by default, it will be configured below */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+
+ /*
+ * Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (rss_i > 1 && adapter->atr_sample_rate) {
+ f = &adapter->ring_feature[RING_F_FDIR];
- adapter->num_tx_queues = q * tcs;
- adapter->num_rx_queues = q * tcs;
+ f->indices = min_t(u16, num_online_cpus(), f->limit);
+ rss_i = max_t(u16, rss_i, f->indices);
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ }
#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration indexed
- * by feature specific indices and mask. Here we map FCoE
- * indices onto the DCB queue pairs allowing FCoE to own
- * configuration later.
+ /*
+ * FCoE can exist on the same rings as standard network traffic
+ * however it is preferred to avoid that if possible. In order
+ * to get the best performance we allocate as many FCoE queues
+ * as we can and we place them at the end of the ring array to
+ * avoid sharing queues with standard RSS on systems with 24 or
+ * more CPUs.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- u8 prio_tc[MAX_USER_PRIORITY] = {0};
- int tc;
- struct ixgbe_ring_feature *f =
- &adapter->ring_feature[RING_F_FCOE];
-
- ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
- tc = prio_tc[adapter->fcoe.up];
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
+ struct net_device *dev = adapter->netdev;
+ u16 fcoe_i;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ /* merge FCoE queues with RSS queues */
+ fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
+ fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
+
+ /* limit indices to rss_i if MSI-X is disabled */
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
+ fcoe_i = rss_i;
+
+ /* attempt to reserve some queues for just FCoE */
+ f->indices = min_t(u16, fcoe_i, f->limit);
+ f->offset = fcoe_i - f->indices;
+ rss_i = max_t(u16, fcoe_i, rss_i);
}
-#endif
+
+#endif /* IXGBE_FCOE */
+ adapter->num_rx_queues = rss_i;
+ adapter->num_tx_queues = rss_i;
return true;
}
-#endif
/**
- * ixgbe_set_num_queues: Allocate queues for device, feature dependent
+ * ixgbe_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -420,7 +677,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
* fallthrough conditions.
*
**/
-static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
+static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
@@ -428,38 +685,18 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
- if (ixgbe_set_sriov_queues(adapter))
- goto done;
-
#ifdef CONFIG_IXGBE_DCB
+ if (ixgbe_set_dcb_sriov_queues(adapter))
+ return;
+
if (ixgbe_set_dcb_queues(adapter))
- goto done;
+ return;
#endif
-#ifdef IXGBE_FCOE
- if (ixgbe_set_fcoe_queues(adapter))
- goto done;
-
-#endif /* IXGBE_FCOE */
- if (ixgbe_set_fdir_queues(adapter))
- goto done;
-
- if (ixgbe_set_rss_queues(adapter))
- goto done;
-
- /* fallback to base case */
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
-
-done:
- if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
- (adapter->netdev->reg_state == NETREG_UNREGISTERING))
- return 0;
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
- /* Notify the stack of the (possibly) reduced queue counts. */
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- return netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
+ ixgbe_set_rss_queues(adapter);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -507,8 +744,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
- adapter->num_msix_vectors = min(vectors,
- adapter->max_msix_q_vectors + NON_Q_VECTORS);
+ vectors -= NON_Q_VECTORS;
+ adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
}
}
@@ -632,8 +869,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
- if ((rxr_idx >= f->mask) &&
- (rxr_idx < f->mask + f->indices))
+ if ((rxr_idx >= f->offset) &&
+ (rxr_idx < f->offset + f->indices))
set_bit(__IXGBE_RX_FCOE, &ring->state);
}
@@ -695,7 +932,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
@@ -739,10 +976,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return 0;
err_out:
- while (v_idx) {
- v_idx--;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
- }
return -ENOMEM;
}
@@ -757,14 +996,13 @@ err_out:
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
- int v_idx, q_vectors;
+ int v_idx = adapter->num_q_vectors;
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- q_vectors = 1;
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
- for (v_idx = 0; v_idx < q_vectors; v_idx++)
+ while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx);
}
@@ -788,11 +1026,10 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
-static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
+static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
- int vector, v_budget;
+ int vector, v_budget, err;
/*
* It's easy to be greedy for MSI-X vectors, but it really
@@ -825,38 +1062,41 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- goto out;
+ return;
}
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
- e_err(probe,
- "ATR is not supported while multiple "
- "queues are disabled. Disabling Flow Director\n");
+ /* disable DCB if number of TCs exceeds 1 */
+ if (netdev_get_num_tc(adapter->netdev) > 1) {
+ e_err(probe, "num TCs exceeds number of queues - disabling DCB\n");
+ netdev_reset_tc(adapter->netdev);
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
}
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->atr_sample_rate = 0;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+
+ /* disable SR-IOV */
+ ixgbe_disable_sriov(adapter);
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ /* disable RSS */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
+
+ ixgbe_set_num_queues(adapter);
+ adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev);
- if (!err) {
- adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
- } else {
+ if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
- /* reset err */
- err = 0;
+ return;
}
-
-out:
- return err;
+ adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
}
/**
@@ -874,15 +1114,10 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
int err;
/* Number of supported queues */
- err = ixgbe_set_num_queues(adapter);
- if (err)
- return err;
+ ixgbe_set_num_queues(adapter);
- err = ixgbe_set_interrupt_capability(adapter);
- if (err) {
- e_dev_err("Unable to setup interrupt capabilities\n");
- goto err_set_interrupt;
- }
+ /* Set interrupt mode */
+ ixgbe_set_interrupt_capability(adapter);
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
@@ -902,7 +1137,6 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
-err_set_interrupt:
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18ca3bcadf0c..3b6784cf134a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -516,7 +516,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
}
-/*
+/**
* ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -790,12 +790,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
#ifdef CONFIG_IXGBE_PTP
- if (unlikely(tx_buffer->tx_flags &
- IXGBE_TX_FLAGS_TSTAMP))
- ixgbe_ptp_tx_hwtstamp(q_vector,
- tx_buffer->skb);
-
+ if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
+ ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
#endif
+
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -995,7 +993,6 @@ out_no_update:
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
- int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -1004,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- else
- num_q_vectors = 1;
-
- for (i = 0; i < num_q_vectors; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]);
}
@@ -1399,8 +1391,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
#ifdef CONFIG_IXGBE_PTP
- if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
+ ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
#endif
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
@@ -1526,8 +1517,8 @@ static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
* 60 bytes if the skb->len is less than 60 for skb_pad.
*/
pull_len = skb_frag_size(frag);
- if (pull_len > 256)
- pull_len = ixgbe_get_headlen(va, pull_len);
+ if (pull_len > IXGBE_RX_HDR_SIZE)
+ pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
@@ -1834,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector;
- int q_vectors, v_idx;
+ int v_idx;
u32 mask;
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
@@ -1849,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
- for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx];
@@ -2413,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int vector, err;
int ri = 0, ti = 0;
- for (vector = 0; vector < q_vectors; vector++) {
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
@@ -2572,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i, q_vectors;
+ int vector;
- q_vectors = adapter->num_msix_vectors;
- i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, adapter);
- i--;
+ if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
+ free_irq(adapter->pdev->irq, adapter);
+ return;
+ }
- for (; i >= 0; i--) {
- /* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rx.ring &&
- !adapter->q_vector[i]->tx.ring)
- continue;
+ for (vector = 0; vector < adapter->num_q_vectors; vector++) {
+ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(adapter->msix_entries[i].vector,
- NULL);
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
- free_irq(adapter->msix_entries[i].vector,
- adapter->q_vector[i]);
- }
- } else {
- free_irq(adapter->pdev->irq, adapter);
+ /* clear the affinity_mask in the IRQ descriptor */
+ irq_set_affinity_hint(entry->vector, NULL);
+
+ free_irq(entry->vector, q_vector);
}
+
+ free_irq(adapter->msix_entries[vector++].vector, adapter);
}
/**
@@ -2619,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_msix_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
+ int vector;
+
+ for (vector = 0; vector < adapter->num_q_vectors; vector++)
+ synchronize_irq(adapter->msix_entries[vector].vector);
+
+ synchronize_irq(adapter->msix_entries[vector++].vector);
} else {
synchronize_irq(adapter->pdev->irq);
}
@@ -2699,8 +2688,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
- adapter->atr_sample_rate) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
ring->atr_sample_rate = adapter->atr_sample_rate;
ring->atr_count = 0;
set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
@@ -2730,8 +2718,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 rttdcs;
- u32 reg;
+ u32 rttdcs, mtqc;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -2743,28 +2730,32 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
/* set transmit pool layout */
- switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- case (IXGBE_FLAG_SRIOV_ENABLED):
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
- break;
- default:
- if (!tcs)
- reg = IXGBE_MTQC_64Q_1PB;
- else if (tcs <= 4)
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ mtqc = IXGBE_MTQC_VT_ENA;
+ if (tcs > 4)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mtqc |= IXGBE_MTQC_32VF;
else
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ mtqc |= IXGBE_MTQC_64VF;
+ } else {
+ if (tcs > 4)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
+ else if (tcs > 1)
+ mtqc = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ mtqc = IXGBE_MTQC_64Q_1PB;
+ }
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
- /* Enable Security TX Buffer IFG for multiple pb */
- if (tcs) {
- reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
- reg |= IXGBE_SECTX_DCB;
- IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
- }
- break;
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ u32 sectx = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ sectx |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, sectx);
}
/* re-enable the arbiter */
@@ -2858,40 +2849,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
u8 reg_idx = rx_ring->reg_idx;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB: {
- struct ixgbe_ring_feature *feature = adapter->ring_feature;
- const int mask = feature[RING_F_RSS].mask;
- reg_idx = reg_idx & mask;
- }
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- default:
- break;
- }
-
- srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ u16 mask = adapter->ring_feature[RING_F_RSS].mask;
- srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
- srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
- if (adapter->num_vfs)
- srrctl |= IXGBE_SRRCTL_DROP_EN;
+ /*
+ * if VMDq is not active we must program one srrctl register
+ * per RSS queue since we have enabled RDRXCTL.MVMEN
+ */
+ reg_idx &= mask;
+ }
- srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK;
+ /* configure header buffer length, needed for RSC */
+ srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ /* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif
+
+ /* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
}
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2903,11 +2888,15 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
u32 mrqc = 0, reta = 0;
u32 rxcsum;
int i, j;
- u8 tcs = netdev_get_num_tc(adapter->netdev);
- int maxq = adapter->ring_feature[RING_F_RSS].indices;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
- if (tcs)
- maxq = min(maxq, adapter->num_tx_queues / tcs);
+ /*
+ * Program table for at least 2 queues w/ SR-IOV so that VFs can
+ * make full use of any rings they may have. We will use the
+ * PSRTYPE register to control how many rings we use within the PF.
+ */
+ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2))
+ rss_i = 2;
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -2915,7 +2904,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == maxq)
+ if (j == rss_i)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
@@ -2929,35 +2918,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
- (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- mrqc = IXGBE_MRQC_RSSEN;
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ if (adapter->ring_feature[RING_F_RSS].mask)
+ mrqc = IXGBE_MRQC_RSSEN;
} else {
- int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
- | IXGBE_FLAG_SRIOV_ENABLED);
-
- switch (mask) {
- case (IXGBE_FLAG_RSS_ENABLED):
- if (!tcs)
- mrqc = IXGBE_MRQC_RSSEN;
- else if (tcs <= 4)
- mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ if (tcs > 4)
+ mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */
+ else if (adapter->ring_feature[RING_F_RSS].indices == 4)
+ mrqc = IXGBE_MRQC_VMDQRSS32EN;
else
+ mrqc = IXGBE_MRQC_VMDQRSS64EN;
+ } else {
+ if (tcs > 4)
mrqc = IXGBE_MRQC_RTRSS8TCEN;
- break;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- mrqc = IXGBE_MRQC_VMDQEN;
- break;
- default:
- break;
+ else if (tcs > 1)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RSSEN;
}
}
/* Perform hash on these packet types */
- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
- | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
- | IXGBE_MRQC_RSS_FIELD_IPV6
- | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+ mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 |
+ IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
+ IXGBE_MRQC_RSS_FIELD_IPV6 |
+ IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
@@ -3108,6 +3098,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int rss_i = adapter->ring_feature[RING_F_RSS].indices;
int p;
/* PSRTYPE must be initialized in non 82598 adapters */
@@ -3120,58 +3111,69 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82598EB)
return;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
- psrtype |= (adapter->num_rx_queues_per_pool << 29);
+ if (rss_i > 3)
+ psrtype |= 2 << 29;
+ else if (rss_i > 1)
+ psrtype |= 1 << 29;
for (p = 0; p < adapter->num_rx_pools; p++)
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
+ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)),
psrtype);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr_ext;
- u32 vt_reg_bits;
u32 reg_offset, vf_shift;
- u32 vmdctl;
+ u32 gcr_ext, vmdctl;
int i;
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return;
vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
- vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
- vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ vmdctl |= IXGBE_VMD_CTL_VMDQ_EN;
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT;
+ vmdctl |= IXGBE_VT_CTL_REPLEN;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
- vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
+ vf_shift = VMDQ_P(0) % 32;
+ reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
- hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+ hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
/*
* Set up VF register offsets for selected VT Mode,
* i.e. 32 or 64 VFs for SR-IOV
*/
- gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
- gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_32;
+ break;
+ default:
+ gcr_ext = IXGBE_GCR_EXT_VT_MODE_64;
+ break;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
/* enable Tx loopback for VF/PF communication */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+
/* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw,
- (adapter->num_vfs != 0),
+ hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
adapter->num_vfs);
/* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
@@ -3307,10 +3309,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3320,10 +3321,9 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- int pool_ndx = adapter->num_vfs;
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -3441,15 +3441,18 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- unsigned int vfn = adapter->num_vfs;
- unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
+ unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
+ /* In SR-IOV mode significantly less RAR entries are available */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
+
/* return ENOMEM indicating insufficient memory for addresses */
if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (!netdev_uc_empty(netdev) && rar_entries) {
+ if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
/* return error if we do not support writing to RAR table */
if (!hw->mac.ops.set_rar)
@@ -3459,7 +3462,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
if (!rar_entries)
break;
hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- vfn, IXGBE_RAH_AV);
+ VMDQ_P(0), IXGBE_RAH_AV);
count++;
}
}
@@ -3533,12 +3536,14 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
vmolr |= IXGBE_VMOLR_ROPE;
}
- if (adapter->num_vfs) {
+ if (adapter->num_vfs)
ixgbe_restore_vf_multicasts(adapter);
- vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(adapter->num_vfs)) &
+
+ if (hw->mac.type != ixgbe_mac_82598EB) {
+ vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
IXGBE_VMOLR_ROPE);
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(adapter->num_vfs), vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr);
}
/* This is useful for sniffing bad packets. */
@@ -3564,37 +3569,21 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_enable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_enable(&adapter->q_vector[q_idx]->napi);
}
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{
int q_idx;
- struct ixgbe_q_vector *q_vector;
- int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- /* legacy and MSI only use one vector */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
- q_vectors = 1;
-
- for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- q_vector = adapter->q_vector[q_idx];
- napi_disable(&q_vector->napi);
- }
+ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
+ napi_disable(&adapter->q_vector[q_idx]->napi);
}
#ifdef CONFIG_IXGBE_DCB
-/*
+/**
* ixgbe_configure_dcb - Configure DCB hardware
* @adapter: ixgbe adapter struct
*
@@ -3641,19 +3630,16 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Enable RSS Hash per TC */
if (hw->mac.type != ixgbe_mac_82598EB) {
- int i;
- u32 reg = 0;
-
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- u8 msb = 0;
- u8 cnt = adapter->netdev->tc_to_txq[i].count;
+ u32 msb = 0;
+ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices - 1;
- while (cnt >>= 1)
- msb++;
-
- reg |= msb << IXGBE_RQTC_SHIFT_TC(i);
+ while (rss_i) {
+ msb++;
+ rss_i >>= 1;
}
- IXGBE_WRITE_REG(hw, IXGBE_RQTC, reg);
+
+ /* write msb to all 8 TCs in one write */
+ IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111);
}
}
#endif
@@ -3661,11 +3647,11 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
/* Additional bittime to account for IXGBE framing */
#define IXGBE_ETH_FRAMING 20
-/*
+/**
* ixgbe_hpbthresh - calculate high water mark for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
{
@@ -3679,18 +3665,12 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
#ifdef IXGBE_FCOE
/* FCoE traffic class uses FCOE jumbo frames */
- if (dev->features & NETIF_F_FCOE_MTU) {
- int fcoe_pb = 0;
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == ixgbe_fcoe_get_tc(adapter)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-#ifdef CONFIG_IXGBE_DCB
- fcoe_pb = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
-
-#endif
- if (fcoe_pb == pb && tc < IXGBE_FCOE_JUMBO_FRAME_SIZE)
- tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
- }
#endif
-
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -3725,11 +3705,11 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
return marker;
}
-/*
+/**
* ixgbe_lpbthresh - calculate low water mark for for flow control
*
* @adapter: board private structure to calculate for
- * @pb - packet buffer to calculate
+ * @pb: packet buffer to calculate
*/
static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
{
@@ -3830,12 +3810,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_set_rx_mode(adapter->netdev);
ixgbe_restore_vlan(adapter);
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_configure_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -3865,6 +3839,11 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_virtualization(adapter);
+#ifdef IXGBE_FCOE
+ /* configure FCoE L2 filters, redirection table, and Rx control */
+ ixgbe_configure_fcoe(adapter);
+
+#endif /* IXGBE_FCOE */
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
}
@@ -3973,7 +3952,18 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
- gpie |= IXGBE_GPIE_VTMODE_64;
+
+ switch (adapter->ring_feature[RING_F_VMDQ].mask) {
+ case IXGBE_82599_VMDQ_8Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_16;
+ break;
+ case IXGBE_82599_VMDQ_4Q_MASK:
+ gpie |= IXGBE_GPIE_VTMODE_32;
+ break;
+ default:
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ break;
+ }
}
/* Enable Thermal over heat sensor interrupt */
@@ -4131,8 +4121,11 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+
+ /* update SAN MAC vmdq pool selection */
+ if (hw->mac.san_mac_rar_index)
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
/**
@@ -4413,32 +4406,29 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
- adapter->ring_feature[RING_F_RSS].indices = rss;
- adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
- adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
+ adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
- adapter->ring_feature[RING_F_FDIR].indices =
+ adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
- adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
@@ -4449,6 +4439,11 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
}
+#ifdef IXGBE_FCOE
+ /* FCoE support exists, always init the FCoE lock */
+ spin_lock_init(&adapter->fcoe.lock);
+
+#endif
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
@@ -4497,6 +4492,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
+#ifdef CONFIG_PCI_IOV
+ /* assign number of SR-IOV VFs */
+ if (hw->mac.type != ixgbe_mac_82598EB)
+ adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
+
+#endif
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
@@ -4588,10 +4589,16 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Tx Queue %u failed\n", i);
- break;
+ goto err_setup_tx;
}
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
return err;
}
@@ -4666,10 +4673,20 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
if (!err)
continue;
+
e_err(probe, "Allocation for Rx Queue %u failed\n", i);
- break;
+ goto err_setup_rx;
}
+#ifdef IXGBE_FCOE
+ err = ixgbe_setup_fcoe_ddp_resources(adapter);
+ if (!err)
+#endif
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
return err;
}
@@ -4744,6 +4761,10 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
{
int i;
+#ifdef IXGBE_FCOE
+ ixgbe_free_fcoe_ddp_resources(adapter);
+
+#endif
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->rx_ring[i]->desc)
ixgbe_free_rx_resources(adapter->rx_ring[i]);
@@ -4825,15 +4846,31 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+
+ err = netif_set_real_num_rx_queues(netdev,
+ adapter->num_rx_pools > 1 ? 1 :
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
ixgbe_up_complete(adapter);
return 0;
+err_set_queues:
+ ixgbe_free_irq(adapter);
err_req_irq:
-err_setup_rx:
ixgbe_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
ixgbe_free_all_tx_resources(adapter);
+err_setup_tx:
ixgbe_reset(adapter);
return err;
@@ -4891,23 +4928,19 @@ static int ixgbe_resume(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
- rtnl_lock();
- err = ixgbe_init_interrupt_scheme(adapter);
- rtnl_unlock();
- if (err) {
- e_dev_err("Cannot initialize interrupts for device\n");
- return err;
- }
-
ixgbe_reset(adapter);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
- if (netif_running(netdev)) {
+ rtnl_lock();
+ err = ixgbe_init_interrupt_scheme(adapter);
+ if (!err && netif_running(netdev))
err = ixgbe_open(netdev);
- if (err)
- return err;
- }
+
+ rtnl_unlock();
+
+ if (err)
+ return err;
netif_device_attach(netdev);
@@ -5043,11 +5076,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
-#ifdef IXGBE_FCOE
- struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- unsigned int cpu;
- u64 fcoe_noddp_counts_sum = 0, fcoe_noddp_ext_buff_counts_sum = 0;
-#endif /* IXGBE_FCOE */
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5178,17 +5206,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
/* Add up per cpu counters for total ddp aloc fail */
- if (fcoe->pcpu_noddp && fcoe->pcpu_noddp_ext_buff) {
+ if (adapter->fcoe.ddp_pool) {
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ struct ixgbe_fcoe_ddp_pool *ddp_pool;
+ unsigned int cpu;
+ u64 noddp = 0, noddp_ext_buff = 0;
for_each_possible_cpu(cpu) {
- fcoe_noddp_counts_sum +=
- *per_cpu_ptr(fcoe->pcpu_noddp, cpu);
- fcoe_noddp_ext_buff_counts_sum +=
- *per_cpu_ptr(fcoe->
- pcpu_noddp_ext_buff, cpu);
+ ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+ noddp += ddp_pool->noddp;
+ noddp_ext_buff += ddp_pool->noddp_ext_buff;
}
+ hwstats->fcoe_noddp = noddp;
+ hwstats->fcoe_noddp_ext_buff = noddp_ext_buff;
}
- hwstats->fcoe_noddp = fcoe_noddp_counts_sum;
- hwstats->fcoe_noddp_ext_buff = fcoe_noddp_ext_buff_counts_sum;
#endif /* IXGBE_FCOE */
break;
default:
@@ -5246,7 +5276,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/**
* ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
{
@@ -5282,7 +5312,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
@@ -5316,7 +5346,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
} else {
/* get one bit for every active tx/rx interrupt vector */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i);
@@ -5330,8 +5360,8 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_update_link - update the link status
- * @adapter - pointer to the device adapter structure
- * @link_speed - pointer to a u32 to store the link_speed
+ * @adapter: pointer to the device adapter structure
+ * @link_speed: pointer to a u32 to store the link_speed
**/
static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
@@ -5374,7 +5404,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
@@ -5429,12 +5459,15 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_link_is_down - update netif_carrier status and
* print link down message
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
**/
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{
@@ -5458,11 +5491,14 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
+
+ /* ping all the active vfs to let them know link has changed */
+ ixgbe_ping_all_vfs(adapter);
}
/**
* ixgbe_watchdog_flush_tx - flush queues on link down
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
{
@@ -5511,7 +5547,7 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
/**
* ixgbe_watchdog_subtask - check and bring link up
- * @adapter - pointer to the device adapter structure
+ * @adapter: pointer to the device adapter structure
**/
static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
{
@@ -5535,7 +5571,7 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
/**
* ixgbe_sfp_detection_subtask - poll for SFP+ cable
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
@@ -5602,7 +5638,7 @@ sfp_out:
/**
* ixgbe_sfp_link_config_subtask - set up link SFP after module install
- * @adapter - the ixgbe adapter structure
+ * @adapter: the ixgbe adapter structure
**/
static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
{
@@ -6233,8 +6269,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
- txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
- txq += adapter->ring_feature[RING_F_FCOE].mask;
+ struct ixgbe_ring_feature *f;
+
+ f = &adapter->ring_feature[RING_F_FCOE];
+
+ while (txq >= f->indices)
+ txq -= f->indices;
+ txq += adapter->ring_feature[RING_F_FCOE].offset;
+
return txq;
}
#endif
@@ -6348,7 +6390,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
if ((protocol == __constant_htons(ETH_P_FCOE)) &&
- (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
+ (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) {
tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
@@ -6389,17 +6431,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring;
- if (skb->len <= 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/*
* The minimum packet size for olinfo paylen is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
}
@@ -6427,8 +6464,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
- IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
return 0;
}
@@ -6485,12 +6521,15 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev)
{
int err = 0;
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ixgbe_mac_info *mac = &adapter->hw.mac;
+ struct ixgbe_hw *hw = &adapter->hw;
- if (is_valid_ether_addr(mac->san_addr)) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
rtnl_lock();
- err = dev_addr_add(dev, mac->san_addr, NETDEV_HW_ADDR_T_SAN);
+ err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN);
rtnl_unlock();
+
+ /* update SAN MAC vmdq pool selection */
+ hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
}
return err;
}
@@ -6533,11 +6572,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- for (i = 0; i < num_q_vectors; i++) {
- struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- ixgbe_msix_clean_rings(0, q_vector);
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
@@ -6594,8 +6630,9 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
}
#ifdef CONFIG_IXGBE_DCB
-/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
- * #adapter: pointer to ixgbe_adapter
+/**
+ * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * @adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled
*
* Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
@@ -6630,8 +6667,33 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return;
}
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
+/**
+ * ixgbe_set_prio_tc_map - Configure netdev prio tc map
+ * @adapter: Pointer to adapter struct
+ *
+ * Populate the netdev user priority to tc map
+ */
+static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg;
+ struct ieee_ets *ets = adapter->ixgbe_ieee_ets;
+ u8 prio;
+
+ for (prio = 0; prio < MAX_USER_PRIORITY; prio++) {
+ u8 tc = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)
+ tc = ixgbe_dcb_get_tc_from_up(dcb_cfg, 0, prio);
+ else if (ets)
+ tc = ets->prio_tc[prio];
+
+ netdev_set_prio_tc_map(dev, prio, tc);
+ }
+}
+
+/**
+ * ixgbe_setup_tc - configure net_device for multiple traffic classes
*
* @netdev: net device to configure
* @tc: number of traffic classes to enable
@@ -6641,12 +6703,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
- /* Multiple traffic classes requires multiple queues */
- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
- e_err(drv, "Enable failed, needs MSI-X\n");
- return -EINVAL;
- }
-
/* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB &&
@@ -6663,8 +6719,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) {
netdev_set_num_tc(dev, tc);
+ ixgbe_set_prio_tc_map(adapter);
+
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
adapter->last_lfc_mode = adapter->hw.fc.requested_mode;
@@ -6672,11 +6729,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
}
} else {
netdev_reset_tc(dev);
+
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->temp_dcb_cfg.pfc_mode_enable = false;
adapter->dcb_cfg.pfc_mode_enable = false;
@@ -6706,10 +6763,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* return error if RXHASH is being enabled when RSS is not supported */
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- features &= ~NETIF_F_RXHASH;
-
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
@@ -6749,20 +6802,40 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset.
*/
- if (!(features & NETIF_F_NTUPLE)) {
- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- /* turn off Flow Director, set ATR and reset */
- if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- }
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
/* turn off ATR, enable perfect filters and reset */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ need_reset = true;
+
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ need_reset = true;
+
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+
+ /* We cannot enable ATR if SR-IOV is enabled */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ break;
+
+ /* We cannot enable ATR if we have 2 or more traffic classes */
+ if (netdev_get_num_tc(netdev) > 1)
+ break;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (adapter->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ /* A sample rate of 0 indicates ATR disabled */
+ if (!adapter->atr_sample_rate)
+ break;
+
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ break;
}
if (features & NETIF_F_HW_VLAN_RX)
@@ -6786,7 +6859,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
u16 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int err = -EOPNOTSUPP;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
if (ndm->ndm_state & NUD_PERMANENT) {
pr_info("%s: FDB only supports static addresses\n",
@@ -6794,13 +6870,17 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
return -EINVAL;
}
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (is_unicast_ether_addr(addr))
+ if (is_unicast_ether_addr(addr)) {
+ u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
+
+ if (netdev_uc_count(dev) < rar_uc_entries)
err = dev_uc_add_excl(dev, addr);
- else if (is_multicast_ether_addr(addr))
- err = dev_mc_add_excl(dev, addr);
else
- err = -EINVAL;
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(dev, addr);
+ } else {
+ err = -EINVAL;
}
/* Only return duplicate errors if NLM_F_EXCL is set */
@@ -6889,26 +6969,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
};
-static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
- const struct ixgbe_info *ii)
-{
-#ifdef CONFIG_PCI_IOV
- struct ixgbe_hw *hw = &adapter->hw;
-
- if (hw->mac.type == ixgbe_mac_82598EB)
- return;
-
- /* The 82599 supports up to 64 VFs per physical function
- * but this implementation limits allocation to 63 so that
- * basic networking resources are still available to the
- * physical function. If the user requests greater thn
- * 63 VFs then it is an error - reset to default of zero.
- */
- adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
- ixgbe_enable_sriov(adapter, ii);
-#endif /* CONFIG_PCI_IOV */
-}
-
/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
@@ -6935,6 +6995,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
if (hw->bus.func != 0)
break;
case IXGBE_SUBDEV_ID_82599_SFP:
+ case IXGBE_SUBDEV_ID_82599_RNDC:
is_wol_supported = 1;
break;
}
@@ -6982,6 +7043,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
int i, err, pci_using_dac;
u8 part_str[IXGBE_PBANUM_LENGTH];
unsigned int indices = num_possible_cpus();
+ unsigned int dcb_max = 0;
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -7031,7 +7093,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_save_state(pdev);
#ifdef CONFIG_IXGBE_DCB
- indices *= MAX_TRAFFIC_CLASS;
+ if (ii->mac == ixgbe_mac_82598EB)
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_RSS_INDICES);
+ else
+ dcb_max = min_t(unsigned int, indices * MAX_TRAFFIC_CLASS,
+ IXGBE_MAX_FDIR_INDICES);
#endif
if (ii->mac == ixgbe_mac_82598EB)
@@ -7043,6 +7110,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
+ indices = max_t(unsigned int, dcb_max, indices);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
@@ -7149,8 +7217,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
- ixgbe_probe_vf(adapter, ii);
+#ifdef CONFIG_PCI_IOV
+ ixgbe_enable_sriov(adapter, ii);
+#endif
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -7186,10 +7256,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
- IXGBE_FLAG_DCB_ENABLED);
-
#ifdef CONFIG_IXGBE_DCB
netdev->dcbnl_ops = &dcbnl_ops;
#endif
@@ -7201,11 +7267,15 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS)
adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
}
- }
- if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) {
- netdev->vlan_features |= NETIF_F_FCOE_CRC;
- netdev->vlan_features |= NETIF_F_FSO;
- netdev->vlan_features |= NETIF_F_FCOE_MTU;
+
+ adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
+
+ netdev->features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC;
+
+ netdev->vlan_features |= NETIF_F_FSO |
+ NETIF_F_FCOE_CRC |
+ NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
if (pci_using_dac) {
@@ -7244,11 +7314,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
- netdev->hw_features &= ~NETIF_F_RXHASH;
- netdev->features &= ~NETIF_F_RXHASH;
- }
-
/* WOL not supported for all devices */
adapter->wol = 0;
hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
@@ -7359,8 +7424,7 @@ err_register:
ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- ixgbe_disable_sriov(adapter);
+ ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(hw->hw_addr);
err_ioremap:
@@ -7407,25 +7471,13 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
ixgbe_sysfs_exit(adapter);
#endif /* CONFIG_IXGBE_HWMON */
-#ifdef IXGBE_FCOE
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- ixgbe_cleanup_fcoe(adapter);
-
-#endif /* IXGBE_FCOE */
-
/* remove the added san mac */
ixgbe_del_sanmac_netdev(netdev);
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
- if (!(ixgbe_check_vf_assignment(adapter)))
- ixgbe_disable_sriov(adapter);
- else
- e_dev_warn("Unloading driver while VFs are assigned "
- "- VFs will not be deallocated\n");
- }
+ ixgbe_disable_sriov(adapter);
ixgbe_clear_interrupt_scheme(adapter);
@@ -7516,11 +7568,11 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
}
/* Find the pci device of the offending VF */
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, device_id, NULL);
while (vfdev) {
if (vfdev->devfn == (req_id & 0xFF))
break;
- vfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL,
device_id, vfdev);
}
/*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 24117709d6a2..71659edf81aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -907,6 +907,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
* 8 SFP_act_lmt_DA_CORE1 - 82599-specific
* 9 SFP_1g_cu_CORE0 - 82599-specific
* 10 SFP_1g_cu_CORE1 - 82599-specific
+ * 11 SFP_1g_sx_CORE0 - 82599-specific
+ * 12 SFP_1g_sx_CORE1 - 82599-specific
*/
if (hw->mac.type == ixgbe_mac_82598EB) {
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
@@ -957,6 +959,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_cu_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_sx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1049,7 +1058,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Verify supported 1G SFP modules */
if (comp_codes_10g == 0 &&
!(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1064,7 +1075,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->mac.ops.get_device_caps(hw, &enforce_sfp);
if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
!((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
- (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) ||
+ (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = 0;
@@ -1128,10 +1141,12 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* SR modules
*/
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core0)
+ sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
- sfp_type == ixgbe_sfp_type_1g_cu_core1)
+ sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_sx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index dcebd128becf..3456d5617143 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "ixgbe.h"
#include <linux/export.h>
+#include <linux/ptp_classify.h>
/*
* The 82599 and the X540 do not have true 64bit nanosecond scale
@@ -100,9 +101,13 @@
#define NSECS_PER_SEC 1000000000ULL
#endif
+static struct sock_filter ptp_filter[] = {
+ PTP_FILTER
+};
+
/**
* ixgbe_ptp_read - read raw cycle counter (to be used by time counter)
- * @cc - the cyclecounter structure
+ * @cc: the cyclecounter structure
*
* this function reads the cyclecounter registers and is called by the
* cyclecounter structure used to construct a ns counter from the
@@ -123,8 +128,8 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc)
/**
* ixgbe_ptp_adjfreq
- * @ptp - the ptp clock structure
- * @ppb - parts per billion adjustment from base
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
*
* adjust the frequency of the ptp cycle counter by the
* indicated ppb from the base frequency.
@@ -170,8 +175,8 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
/**
* ixgbe_ptp_adjtime
- * @ptp - the ptp clock structure
- * @delta - offset to adjust the cycle counter by
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
*
* adjust the timer by resetting the timecounter structure.
*/
@@ -198,8 +203,8 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
/**
* ixgbe_ptp_gettime
- * @ptp - the ptp clock structure
- * @ts - timespec structure to hold the current time value
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
@@ -224,8 +229,8 @@ static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
/**
* ixgbe_ptp_settime
- * @ptp - the ptp clock structure
- * @ts - the timespec containing the new time for the cycle counter
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
*
* reset the timecounter to use a new base value instead of the kernel
* wall timer value.
@@ -251,9 +256,9 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_enable
- * @ptp - the ptp clock structure
- * @rq - the requested feature to change
- * @on - whether to enable or disable the feature
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
*
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
@@ -289,8 +294,8 @@ static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
/**
* ixgbe_ptp_check_pps_event
- * @adapter - the private adapter structure
- * @eicr - the interrupt cause register value
+ * @adapter: the private adapter structure
+ * @eicr: the interrupt cause register value
*
* This function is called by the interrupt routine when checking for
* interrupts. It will check and handle a pps event.
@@ -307,20 +312,21 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED))
return;
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_TIMESYNC)
+ if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) {
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
- break;
- default:
- break;
+ break;
+ default:
+ break;
+ }
}
}
/**
* ixgbe_ptp_enable_sdp
- * @hw - the hardware private structure
- * @shift - the clock shift for calculating nanoseconds
+ * @hw: the hardware private structure
+ * @shift: the clock shift for calculating nanoseconds
*
* this function enables the clock out feature on the sdp0 for the
* X540 device. It will create a 1second periodic output that can be
@@ -393,7 +399,7 @@ static void ixgbe_ptp_enable_sdp(struct ixgbe_hw *hw, int shift)
/**
* ixgbe_ptp_disable_sdp
- * @hw - the private hardware structure
+ * @hw: the private hardware structure
*
* this function disables the auxiliary SDP clock out feature
*/
@@ -425,6 +431,68 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_ptp_match - determine if this skb matches a ptp packet
+ * @skb: pointer to the skb
+ * @hwtstamp: pointer to the hwtstamp_config to check
+ *
+ * Determine whether the skb should have been timestamped, assuming the
+ * hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
+ * should have a timestamp waiting in the registers, and 0 otherwise.
+ *
+ * V1 packets have to check the version type to determine whether they are
+ * correct. However, we can't directly access the data because it might be
+ * fragmented in the SKB, in paged memory. In order to work around this, we
+ * use skb_copy_bits which will properly copy the data whether it is in the
+ * paged memory fragments or not. We have to copy the IP header as well as the
+ * message type.
+ */
+static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
+{
+ struct iphdr iph;
+ u8 msgtype;
+ unsigned int type, offset;
+
+ if (rx_filter == HWTSTAMP_FILTER_NONE)
+ return 0;
+
+ type = sk_run_filter(skb, ptp_filter);
+
+ if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
+ return type & PTP_CLASS_V2;
+
+ /* For the remaining cases actually check message type */
+ switch (type) {
+ case PTP_CLASS_V1_IPV4:
+ skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
+ offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
+ break;
+ case PTP_CLASS_V1_IPV6:
+ offset = OFF_PTP6 + OFF_PTP_CONTROL;
+ break;
+ default:
+ /* other cases invalid or handled above */
+ return 0;
+ }
+
+ /* Make sure our buffer is long enough */
+ if (skb->len < offset)
+ return 0;
+
+ skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
+
+ switch (rx_filter) {
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
+ break;
+ default:
+ return 0;
+ }
+}
+
+/**
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
* @q_vector: structure containing interrupt and ring information
* @skb: particular skb to send timestamp with
@@ -473,6 +541,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
/**
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
* @q_vector: structure containing interrupt and ring information
+ * @rx_desc: the rx descriptor
* @skb: particular skb to send timestamp with
*
* if the timestamp is valid, we convert it into the timecounter ns
@@ -480,6 +549,7 @@ void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
* is passed up the network stack
*/
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
+ union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct ixgbe_adapter *adapter;
@@ -497,21 +567,33 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
hw = &adapter->hw;
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+
+ /* Check if we have a valid timestamp and make sure the skb should
+ * have been timestamped */
+ if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
+ !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
+ /*
+ * Always read the registers, in order to clear a possible fault
+ * because of stagnant RX timestamp values for a packet that never
+ * reached the queue.
+ */
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
/*
- * If this bit is set, then the RX registers contain the time stamp. No
- * other packet will be time stamped until we read these registers, so
- * read the registers to make them available again. Because only one
- * packet can be time stamped at a time, we know that the register
- * values must belong to this one here and therefore we don't need to
- * compare any of the additional attributes stored for it.
+ * If the timestamp bit is set in the packet's descriptor, we know the
+ * timestamp belongs to this packet. No other packet can be
+ * timestamped until the registers for timestamping have been read.
+ * Therefor only one packet with this bit can be in the queue at a
+ * time, and the rx timestamp values that were in the registers belong
+ * to this packet.
*
* If nothing went wrong, then it should have a skb_shared_tx that we
* can turn into a skb_shared_hwtstamps.
*/
- if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
+ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
return;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -539,6 +621,11 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
+ *
+ * Since hardware always timestamps Path delay packets when timestamping V2
+ * packets, regardless of the type specified in the register, only use V2
+ * Event mode. This more accurately tells the user what the hardware is going
+ * to do anyways.
*/
int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd)
@@ -582,41 +669,30 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_SYNC_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_mtrl = IXGBE_RXMTRL_V2_DELAY_REQ_MSG;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
default:
/*
- * register RXMTRL must be set, therefore it is not
- * possible to time stamp both V1 Sync and Delay_Req messages
- * and hardware does not support timestamping all packets
- * => return error
+ * register RXMTRL must be set in order to do V1 packets,
+ * therefore it is not possible to time stamp both V1 Sync and
+ * Delay_Req messages and hardware does not support
+ * timestamping all packets => return error
*/
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -626,6 +702,9 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
return 0;
}
+ /* Store filter value for later use */
+ adapter->rx_hwtstamp_filter = config.rx_filter;
+
/* define ethertype filter for timestamped packets */
if (is_l2)
IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
@@ -690,7 +769,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/**
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
- * @adapter - pointer to the adapter structure
+ * @adapter: pointer to the adapter structure
*
* this function initializes the timecounter and cyclecounter
* structures for use in generated a ns counter from the arbitrary
@@ -826,7 +905,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
/**
* ixgbe_ptp_init
- * @adapter - the ixgbe private adapter structure
+ * @adapter: the ixgbe private adapter structure
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
@@ -870,6 +949,10 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
return;
}
+ /* initialize the ptp filter */
+ if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
+ e_dev_warn("ptp_filter_init failed\n");
+
spin_lock_init(&adapter->tmreg_lock);
ixgbe_ptp_start_cyclecounter(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 2d971d18696e..4fea8716ab64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -44,50 +44,15 @@
#include "ixgbe_sriov.h"
#ifdef CONFIG_PCI_IOV
-static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *pvfdev;
- u16 vf_devfn = 0;
- int device_id;
- int vfs_found = 0;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- vf_devfn = pdev->devfn + 0x80;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == vf_devfn &&
- (pvfdev->bus->number >= pdev->bus->number))
- vfs_found++;
- vf_devfn += 2;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
-
- return vfs_found;
-}
-
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii)
{
struct ixgbe_hw *hw = &adapter->hw;
- int err = 0;
int num_vf_macvlans, i;
struct vf_macvlans *mv_list;
int pre_existing_vfs = 0;
- pre_existing_vfs = ixgbe_find_enabled_vfs(adapter);
+ pre_existing_vfs = pci_num_vf(adapter->pdev);
if (!pre_existing_vfs && !adapter->num_vfs)
return;
@@ -106,16 +71,33 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
"enabled for this device - Please reload all "
"VF drivers to avoid spoofed packet errors\n");
} else {
+ int err;
+ /*
+ * The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function. If the user requests greater thn
+ * 63 VFs then it is an error - reset to default of zero.
+ */
+ adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
+
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ e_err(probe, "Failed to enable PCI sriov: %d\n", err);
+ adapter->num_vfs = 0;
+ return;
+ }
}
- if (err) {
- e_err(probe, "Failed to enable PCI sriov: %d\n", err);
- goto err_novfs;
- }
- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
+ /* Enable VMDq flag so device will be set in VM mode */
+ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED;
+ if (!adapter->ring_feature[RING_F_VMDQ].limit)
+ adapter->ring_feature[RING_F_VMDQ].limit = 1;
+ adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs;
+
num_vf_macvlans = hw->mac.num_rar_entries -
(IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
@@ -146,12 +128,39 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
* and memory allocated set up the mailbox parameters
*/
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops,
- sizeof(hw->mbx.ops));
+ memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+
+ /* limit trafffic classes based on VFs enabled */
+ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
+ (adapter->num_vfs < 16)) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ } else if (adapter->num_vfs < 32) {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 4;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 4;
+ } else {
+ adapter->dcb_cfg.num_tcs.pg_tcs = 1;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
+ }
+
+ /* We do not support RSS w/ SR-IOV */
+ adapter->ring_feature[RING_F_RSS].limit = 1;
/* Disable RSC when in SR-IOV mode */
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
+
+#ifdef IXGBE_FCOE
+ /*
+ * When SR-IOV is enabled 82599 cannot support jumbo frames
+ * so we must disable FCoE because we cannot support FCoE MTU.
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
+ IXGBE_FLAG_FCOE_CAPABLE);
+#endif
+
+ /* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
return;
@@ -160,31 +169,80 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
/* Oh oh */
e_err(probe, "Unable to allocate memory for VF Data Storage - "
"SRIOV disabled\n");
- pci_disable_sriov(adapter->pdev);
+ ixgbe_disable_sriov(adapter);
+}
-err_novfs:
- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
- adapter->num_vfs = 0;
+static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *vfdev;
+ int dev_id;
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ dev_id = IXGBE_DEV_ID_82599_VF;
+ break;
+ case ixgbe_mac_X540:
+ dev_id = IXGBE_DEV_ID_X540_VF;
+ break;
+ default:
+ return false;
+ }
+
+ /* loop through all the VFs to see if we own any that are assigned */
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ while (vfdev) {
+ /* if we don't own it we don't care */
+ if (vfdev->is_virtfn && vfdev->physfn == pdev) {
+ /* if it is assigned we cannot release it */
+ if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ return true;
+ }
+
+ vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
+ }
+
+ return false;
}
-#endif /* #ifdef CONFIG_PCI_IOV */
+#endif /* #ifdef CONFIG_PCI_IOV */
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 gcr;
u32 gpie;
u32 vmdctl;
- int i;
+
+ /* set num VFs to 0 to prevent access to vfinfo */
+ adapter->num_vfs = 0;
+
+ /* free VF control structures */
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ /* free macvlan list */
+ kfree(adapter->mv_list);
+ adapter->mv_list = NULL;
+
+ /* if SR-IOV is already disabled then there is nothing to do */
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return;
#ifdef CONFIG_PCI_IOV
+ /*
+ * If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (ixgbe_vfs_are_assigned(adapter)) {
+ e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
+ return;
+ }
/* disable iov and allow time for transactions to clear */
pci_disable_sriov(adapter->pdev);
#endif
/* turn off device IOV mode */
- gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gcr &= ~(IXGBE_GCR_EXT_SRIOV);
- IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0);
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
gpie &= ~IXGBE_GPIE_VTMODE_MASK;
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -195,19 +253,14 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
IXGBE_WRITE_FLUSH(hw);
+ /* Disable VMDq flag so device will be set in VM mode */
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1)
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->ring_feature[RING_F_VMDQ].offset = 0;
+
/* take a breather then clean up driver data */
msleep(100);
- /* Release reference to VF devices */
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev)
- pci_dev_put(adapter->vfinfo[i].vfdev);
- }
- kfree(adapter->vfinfo);
- kfree(adapter->mv_list);
- adapter->vfinfo = NULL;
-
- adapter->num_vfs = 0;
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
@@ -441,33 +494,16 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
return 0;
}
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_PCI_IOV
- int i;
- for (i = 0; i < adapter->num_vfs; i++) {
- if (adapter->vfinfo[i].vfdev->dev_flags &
- PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-#endif
- return false;
-}
-
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
unsigned int vfn = (event_mask & 0x3f);
- struct pci_dev *pvfdev;
- unsigned int device_id;
- u16 thisvf_devfn = (pdev->devfn + 0x80 + (vfn << 1)) |
- (pdev->devfn & 1);
bool enable = ((event_mask & 0x10000000U) != 0);
if (enable) {
- random_ether_addr(vf_mac_addr);
+ eth_random_addr(vf_mac_addr);
e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
vfn, vf_mac_addr);
/*
@@ -475,31 +511,6 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
* for it later.
*/
memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- device_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- device_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- device_id = 0;
- break;
- }
-
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
- while (pvfdev) {
- if (pvfdev->devfn == thisvf_devfn)
- break;
- pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
- device_id, pvfdev);
- }
- if (pvfdev)
- adapter->vfinfo[vfn].vfdev = pvfdev;
- else
- e_err(drv, "Couldn't find pci dev ptr for VF %4.4x\n",
- thisvf_devfn);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 2ab38d5fda92..1be1d30e4e78 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -42,7 +42,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
-int ixgbe_check_vf_assignment(struct ixgbe_adapter *adapter);
#ifdef CONFIG_PCI_IOV
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
index 1d80b1cefa6a..16ddf14e8ba4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -37,7 +37,6 @@
#include <linux/netdevice.h>
#include <linux/hwmon.h>
-#ifdef CONFIG_IXGBE_HWMON
/* hwmon callback functions */
static ssize_t ixgbe_hwmon_show_location(struct device *dev,
struct device_attribute *attr,
@@ -96,11 +95,11 @@ static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
return sprintf(buf, "%u\n", value);
}
-/*
+/**
* ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
- * @ adapter: pointer to the adapter structure
- * @ offset: offset in the eeprom sensor data table
- * @ type: type of sensor data to display
+ * @adapter: pointer to the adapter structure
+ * @offset: offset in the eeprom sensor data table
+ * @type: type of sensor data to display
*
* For each file we want in hwmon's sysfs interface we need a device_attribute
* This is included in our hwmon_attr struct that contains the references to
@@ -241,5 +240,4 @@ err:
exit:
return rc;
}
-#endif /* CONFIG_IXGBE_HWMON */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 204848d2448c..400f86a31174 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -32,9 +32,6 @@
#include <linux/mdio.h>
#include <linux/netdevice.h>
-/* Vendor ID */
-#define IXGBE_INTEL_VENDOR_ID 0x8086
-
/* Device IDs */
#define IXGBE_DEV_ID_82598 0x10B6
#define IXGBE_DEV_ID_82598_BX 0x1508
@@ -57,6 +54,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
@@ -1452,6 +1450,7 @@ enum {
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
#define IXGBE_ETQS_RX_QUEUE_SHIFT 16
@@ -2419,7 +2418,7 @@ typedef u32 ixgbe_physical_layer;
*/
/* BitTimes (BT) conversion */
-#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024))
#define IXGBE_B2BT(BT) (BT * 8)
/* Calculate Delay to respond to PFC */
@@ -2450,24 +2449,31 @@ typedef u32 ixgbe_physical_layer;
#define IXGBE_PCI_DELAY 10000
/* Calculate X540 delay value in bit times */
-#define IXGBE_FILL_RATE (36 / 25)
-
-#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + \
- (2 * IXGBE_ID_X540) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV_X540(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID_X540) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate 82599, 82598 delay value in bit times */
-#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
- (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
- (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
- IXGBE_HD + IXGBE_B2BT(TC)))
+#define IXGBE_DV(_max_frame_link, _max_frame_tc) \
+ ((36 * \
+ (IXGBE_B2BT(_max_frame_link) + \
+ IXGBE_PFC_D + \
+ (2 * IXGBE_CABLE_DC) + \
+ (2 * IXGBE_ID) + \
+ IXGBE_HD) / 25 + 1) + \
+ 2 * IXGBE_B2BT(_max_frame_tc))
/* Calculate low threshold delay values */
-#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
- (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
-#define IXGBE_LOW_DV(TC) (2 * IXGBE_LOW_DV_X540(TC))
+#define IXGBE_LOW_DV_X540(_max_frame_tc) \
+ (2 * IXGBE_B2BT(_max_frame_tc) + \
+ (36 * IXGBE_PCI_DELAY / 25) + 1)
+#define IXGBE_LOW_DV(_max_frame_tc) \
+ (2 * IXGBE_LOW_DV_X540(_max_frame_tc))
/* Software ATR hash keys */
#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
@@ -2597,6 +2603,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_da_act_lmt_core1 = 8,
ixgbe_sfp_type_1g_cu_core0 = 9,
ixgbe_sfp_type_1g_cu_core1 = 10,
+ ixgbe_sfp_type_1g_sx_core0 = 11,
+ ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -2837,6 +2845,7 @@ struct ixgbe_mac_operations {
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
s32 (*clear_rar)(struct ixgbe_hw *, u32);
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+ s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
@@ -2912,6 +2921,7 @@ struct ixgbe_mac_info {
bool orig_link_settings_stored;
bool autotry_restart;
u8 flags;
+ u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data;
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index f90ec078ece2..de4da5219b71 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -156,6 +156,9 @@ mac_reset_top:
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ /* Save the SAN MAC RAR index */
+ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
+
/* Reserve the last RAR for the SAN MAC address */
hw->mac.num_rar_entries--;
}
@@ -832,6 +835,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
+ .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic,
.clear_vmdq = &ixgbe_clear_vmdq_generic,
.init_rx_addrs = &ixgbe_init_rx_addrs_generic,
.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index e09a6cc633bb..418af827b230 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -251,6 +251,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
IXGBE_ADVTXD_POPTS_SHIFT)
@@ -264,32 +265,9 @@ struct ixgbe_adv_tx_context_desc {
/* Interrupt register bitmasks */
-/* Extended Interrupt Cause Read */
-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
-
-/* Extended Interrupt Cause Set */
-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Set */
-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-/* Extended Interrupt Mask Clear */
-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
-
-#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_MAILBOX | \
- IXGBE_EIMS_OTHER)
-
#define IXGBE_EITR_CNT_WDIS 0x80000000
+#define IXGBE_MAX_EITR 0x00000FF8
+#define IXGBE_MIN_EITR 8
/* Error Codes */
#define IXGBE_ERR_INVALID_MAC_ADDR -1
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e8dddf572d38..8f2070439b59 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -43,7 +43,6 @@
#define IXGBE_ALL_RAR_ENTRIES 16
-#ifdef ETHTOOL_GSTATS
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
@@ -75,21 +74,17 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
zero_base)},
{"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
zero_base)},
- {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
};
#define IXGBE_QUEUE_STATS_LEN 0
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
-#endif /* ETHTOOL_GSTATS */
-#ifdef ETHTOOL_TEST
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
-#endif /* ETHTOOL_TEST */
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -289,13 +284,11 @@ static void ixgbevf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- struct ixgbevf_ring *tx_ring = adapter->tx_ring;
- struct ixgbevf_ring *rx_ring = adapter->rx_ring;
ring->rx_max_pending = IXGBEVF_MAX_RXD;
ring->tx_max_pending = IXGBEVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_ring_count;
+ ring->tx_pending = adapter->tx_ring_count;
}
static int ixgbevf_set_ringparam(struct net_device *netdev,
@@ -303,33 +296,28 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
- int i, err = 0;
u32 new_rx_count, new_tx_count;
+ int i, err = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
- new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
- new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
+ new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
- /* nothing to do */
+ new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
+ new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ /* if nothing to do return success */
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count))
return 0;
- }
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
- /*
- * If the adapter isn't up and running then just set the
- * new parameters and scurry for the exits.
- */
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i].count = new_tx_count;
@@ -340,82 +328,98 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!tx_ring) {
- err = -ENOMEM;
- goto clear_reset;
- }
-
- rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbevf_ring), GFP_KERNEL);
- if (!rx_ring) {
- err = -ENOMEM;
- goto err_rx_setup;
- }
-
- ixgbevf_down(adapter);
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
- memcpy(tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring[i].count = new_tx_count;
- err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
- if (err) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ /* clone ring and setup updated count */
+ tx_ring[i] = adapter->tx_ring[i];
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_tx_resources(adapter,
- &tx_ring[i]);
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
}
- goto err_tx_ring_setup;
+
+ vfree(tx_ring);
+ tx_ring = NULL;
+
+ goto clear_reset;
}
- tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
}
- memcpy(rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring[i].count = new_rx_count;
- err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
- if (err) {
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
+ if (!rx_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ /* clone ring and setup updated count */
+ rx_ring[i] = adapter->rx_ring[i];
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (!err)
+ continue;
while (i) {
i--;
- ixgbevf_free_rx_resources(adapter,
- &rx_ring[i]);
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
}
- goto err_rx_ring_setup;
+
+ vfree(rx_ring);
+ rx_ring = NULL;
+
+ goto clear_reset;
}
- rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
}
- /*
- * Only switch to new rings if all the prior allocations
- * and ring setups have succeeded.
- */
- kfree(adapter->tx_ring);
- adapter->tx_ring = tx_ring;
- adapter->tx_ring_count = new_tx_count;
-
- kfree(adapter->rx_ring);
- adapter->rx_ring = rx_ring;
- adapter->rx_ring_count = new_rx_count;
+ /* bring interface down to prepare for update */
+ ixgbevf_down(adapter);
- /* success! */
- ixgbevf_up(adapter);
+ /* Tx */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+ adapter->tx_ring[i] = tx_ring[i];
+ }
+ adapter->tx_ring_count = new_tx_count;
- goto clear_reset;
+ vfree(tx_ring);
+ tx_ring = NULL;
+ }
-err_rx_ring_setup:
- for(i = 0; i < adapter->num_tx_queues; i++)
- ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ /* Rx */
+ if (rx_ring) {
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+ adapter->rx_ring[i] = rx_ring[i];
+ }
+ adapter->rx_ring_count = new_rx_count;
-err_tx_ring_setup:
- kfree(rx_ring);
+ vfree(rx_ring);
+ rx_ring = NULL;
+ }
-err_rx_setup:
- kfree(tx_ring);
+ /* restore interface using new values */
+ ixgbevf_up(adapter);
clear_reset:
+ /* free Tx resources if Rx error is encountered */
+ if (tx_ring) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
+ vfree(tx_ring);
+ }
+
clear_bit(__IXGBEVF_RESETTING, &adapter->state);
return err;
}
@@ -674,10 +678,8 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- if (netif_running(netdev)) {
- if (!adapter->dev_closed)
- ixgbevf_reinit_locked(adapter);
- }
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 0a1b99240d43..98cadb0c4dab 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -52,12 +52,12 @@ struct ixgbevf_tx_buffer {
struct ixgbevf_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
- struct page *page;
- dma_addr_t page_dma;
- unsigned int page_offset;
};
struct ixgbevf_ring {
+ struct ixgbevf_ring *next;
+ struct net_device *netdev;
+ struct device *dev;
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
@@ -83,29 +83,9 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different
* for DCB and RSS modes */
-#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
- /* cpu for tx queue */
- int cpu;
-#endif
-
- u64 v_idx; /* maps directly to the index for this ring in the hardware
- * vector array, can also be used for finding the bit in EICR
- * and friends that represents the vector for this ring */
-
- u16 work_limit; /* max work per interrupt */
u16 rx_buf_len;
};
-enum ixgbevf_ring_f_enum {
- RING_F_NONE = 0,
- RING_F_ARRAY_SIZE /* must be last in enum set */
-};
-
-struct ixgbevf_ring_feature {
- int indices;
- int mask;
-};
-
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -120,8 +100,6 @@ struct ixgbevf_ring_feature {
#define IXGBEVF_MIN_RXD 64
/* Supported Rx Buffer Sizes */
-#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
@@ -140,22 +118,42 @@ struct ixgbevf_ring_feature {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+struct ixgbevf_ring_container {
+ struct ixgbevf_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+/* iterator for handling rings in ring container */
+#define ixgbevf_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos != NULL; pos = pos->next)
+
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring */
+ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
- u32 eitr;
- int v_idx; /* vector index in list */
+ struct ixgbevf_ring_container rx, tx;
+ char name[IFNAMSIZ + 9];
};
+/*
+ * microsecond values for various ITR rates shifted by 2 to fit itr register
+ * with the first 3 bits reserved 0
+ */
+#define IXGBE_MIN_RSC_ITR 24
+#define IXGBE_100K_ITR 40
+#define IXGBE_20K_ITR 200
+#define IXGBE_10K_ITR 400
+#define IXGBE_8K_ITR 500
+
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways. The lowest value
* supported by all of the ixgbe hardware is 8.
@@ -168,12 +166,12 @@ struct ixgbevf_q_vector {
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define IXGBE_RX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
-#define IXGBE_TX_DESC_ADV(R, i) \
- (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
-#define IXGBE_TX_CTXTDESC_ADV(R, i) \
- (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+#define IXGBEVF_RX_DESC(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_DESC(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
+#define IXGBEVF_TX_CTXTDESC(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
@@ -181,9 +179,8 @@ struct ixgbevf_q_vector {
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2
-#define MAX_MSIX_COUNT 2
-#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
@@ -193,12 +190,14 @@ struct ixgbevf_adapter {
u16 bd_number;
struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ u16 rx_itr_setting;
+ u16 tx_itr_setting;
+
+ /* interrupt masks */
+ u32 eims_enable_mask;
+ u32 eims_other;
/* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */
@@ -213,18 +212,13 @@ struct ixgbevf_adapter {
/* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues;
- int num_rx_pools; /* == num_rx_queues in 82598 */
- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs;
int num_msix_vectors;
- int max_msix_q_vectors; /* true count of q_vectors for device */
- struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
@@ -232,15 +226,8 @@ struct ixgbevf_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
@@ -254,18 +241,16 @@ struct ixgbevf_adapter {
u32 eitr_param;
unsigned long state;
- u32 *config_space;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
u32 link_speed;
bool link_up;
- unsigned long link_check_timeout;
struct work_struct watchdog_task;
- bool netdev_registered;
- bool dev_closed;
+
+ spinlock_t mbx_lock;
};
enum ixbgevf_state_t {
@@ -301,11 +286,8 @@ extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
struct ixgbevf_ring *);
extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
-
-#ifdef ETHTOOL_OPS_COMPAT
extern int ethtool_ioctl(struct ifreq *ifr);
-#endif
extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index f69ec4288b10..3f9841d619ad 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -42,6 +42,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
#include <net/checksum.h>
@@ -97,9 +98,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg);
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -115,7 +114,7 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
}
-/*
+/**
* ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
* @adapter: pointer to adapter struct
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
@@ -146,18 +145,18 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
}
}
-static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer
*tx_buffer_info)
{
if (tx_buffer_info->dma) {
if (tx_buffer_info->mapped_as_page)
- dma_unmap_page(&adapter->pdev->dev,
+ dma_unmap_page(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
else
- dma_unmap_single(&adapter->pdev->dev,
+ dma_unmap_single(tx_ring->dev,
tx_buffer_info->dma,
tx_buffer_info->length,
DMA_TO_DEVICE);
@@ -175,38 +174,34 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#ifdef MAX_SKB_FRAGS
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-#else
-#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
-#endif
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
static void ixgbevf_tx_timeout(struct net_device *netdev);
/**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @q_vector: board private structure
* @tx_ring: tx ring to clean
**/
-static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring)
{
- struct net_device *netdev = adapter->netdev;
- struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ return true;
+
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < tx_ring->count)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */
@@ -214,7 +209,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
goto cont_loop;
for ( ; !cleaned; count++) {
struct sk_buff *skb;
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
cleaned = (i == eop);
skb = tx_buffer_info->skb;
@@ -231,7 +226,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
total_bytes += bytecount;
}
- ixgbevf_unmap_and_free_tx_resource(adapter,
+ ixgbevf_unmap_and_free_tx_resource(tx_ring,
tx_buffer_info);
tx_desc->wb.status = 0;
@@ -243,37 +238,25 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
cont_loop:
eop = tx_ring->tx_buffer_info[i].next_to_watch;
- eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
}
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
- if (unlikely(count && netif_carrier_ok(netdev) &&
+ if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
(IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
smp_mb();
-#ifdef HAVE_TX_MQ
- if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
- !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_subqueue(netdev, tx_ring->queue_index);
- ++adapter->restart_queue;
- }
-#else
- if (netif_queue_stopped(netdev) &&
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- netif_wake_queue(netdev);
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
++adapter->restart_queue;
}
-#endif
- }
-
- /* re-arm the interrupt */
- if ((count >= tx_ring->work_limit) &&
- (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
u64_stats_update_begin(&tx_ring->syncp);
@@ -281,7 +264,7 @@ cont_loop:
tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
- return count < tx_ring->work_limit;
+ return count < tx_ring->count;
}
/**
@@ -301,13 +284,10 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
bool is_vlan = (status & IXGBE_RXD_STAT_VP);
u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
- if (is_vlan && test_bit(tag, adapter->active_vlans))
+ if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
- napi_gro_receive(&q_vector->napi, skb);
- else
- netif_rx(skb);
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
@@ -317,12 +297,13 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
* @skb: skb currently being received and modified
**/
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
/* Rx csum disabled */
- if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ if (!(ring->netdev->features & NETIF_F_RXCSUM))
return;
/* if IP and error */
@@ -357,52 +338,21 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
struct sk_buff *skb;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+ unsigned int i = rx_ring->next_to_use;
- i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
-
- if (!bi->page_dma &&
- (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
skb = bi->skb;
if (!skb) {
- skb = netdev_alloc_skb(adapter->netdev,
- bufsz);
-
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
- /*
- * Make buffer alignment 2 beyond a 16 byte boundary
- * this will result in a 16 byte aligned IP header after
- * the 14 byte MAC header is removed
- */
- skb_reserve(skb, NET_IP_ALIGN);
-
bi->skb = skb;
}
if (!bi->dma) {
@@ -410,14 +360,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- } else {
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
- }
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
i++;
if (i == rx_ring->count)
@@ -428,36 +371,22 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
- u64 qmask)
+ u32 qmask)
{
- u32 mask;
struct ixgbe_hw *hw = &adapter->hw;
- mask = (qmask & 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-}
-
-static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
- int *work_done, int work_to_do)
+ int budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -466,36 +395,21 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb;
unsigned int i;
u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
- rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
- u32 upper_len = 0;
- if (*work_done >= work_to_do)
+ if (!budget)
break;
- (*work_done)++;
+ budget--;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- if (hdr_info & IXGBE_RXDADV_SPH)
- adapter->rx_hdr_split++;
- if (len > IXGBEVF_RX_HDR_SIZE)
- len = IXGBEVF_RX_HDR_SIZE;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ len = le16_to_cpu(rx_desc->wb.upper.length);
skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL;
@@ -508,46 +422,19 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
skb_put(skb, len);
}
- if (upper_len) {
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- rx_buffer_info->page,
- rx_buffer_info->page_offset,
- upper_len);
-
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
- get_page(rx_buffer_info->page);
-
- skb->len += upper_len;
- skb->data_len += upper_len;
- skb->truesize += upper_len;
- }
-
i++;
if (i == rx_ring->count)
i = 0;
- next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ next_rxd = IXGBEVF_RX_DESC(rx_ring, i);
prefetch(next_rxd);
cleaned_count++;
next_buffer = &rx_ring->rx_buffer_info[i];
if (!(staterr & IXGBE_RXD_STAT_EOP)) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- rx_buffer_info->skb = next_buffer->skb;
- rx_buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- } else {
- skb->next = next_buffer->skb;
- skb->next->prev = skb;
- }
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
adapter->non_eop_descs++;
goto next_desc;
}
@@ -558,7 +445,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, staterr, skb);
+ ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -573,7 +460,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
if (header_fixup_len < 14)
skb_push(skb, header_fixup_len);
}
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
@@ -605,95 +492,74 @@ next_desc:
rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
- return cleaned;
+ return !!budget;
}
/**
- * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * ixgbevf_poll - NAPI polling calback
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
- * This function is optimized for cleaning one queue only on a single
- * q_vector!!!
+ * This function will clean more than one or more rings associated with a
+ * q_vector.
**/
-static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+static int ixgbevf_poll(struct napi_struct *napi, int budget)
{
struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0;
- long r_idx;
+ struct ixgbevf_ring *ring;
+ int per_ring_budget;
+ bool clean_complete = true;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
-
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
- }
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget/q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
+ per_ring_budget);
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+ /* all work done, exit the polling mode */
+ napi_complete(napi);
+ if (adapter->rx_itr_setting & 1)
+ ixgbevf_set_itr(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter,
+ 1 << q_vector->v_idx);
- return work_done;
+ return 0;
}
/**
- * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
- * @napi: napi struct with our devices info in it
- * @budget: amount of work driver is allowed to do this pass, in packets
- *
- * This function will clean more than one rx queue associated with a
- * q_vector.
- **/
-static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbevf_q_vector *q_vector =
- container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *rx_ring = NULL;
- int work_done = 0, i;
- long r_idx;
- u64 enable_mask = 0;
-
- /* attempt to distribute budget to each queue fairly, but don't allow
- * the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
- budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
- enable_mask |= rx_ring->v_idx;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
-#ifndef HAVE_NETDEV_NAPI_LIST
- if (!netif_running(adapter->netdev))
- work_done = 0;
-
-#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
- /* If all Rx work done, exit the polling mode */
- if (work_done < budget) {
- napi_complete(napi);
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
- if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
- ixgbevf_irq_enable_queues(adapter, enable_mask);
- }
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
- return work_done;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
}
-
/**
* ixgbevf_configure_msix - Configure MSI-X hardware
* @adapter: board private structure
@@ -704,56 +570,49 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
{
struct ixgbevf_q_vector *q_vector;
- struct ixgbe_hw *hw = &adapter->hw;
- int i, j, q_vectors, v_idx, r_idx;
- u32 mask;
+ int q_vectors, v_idx;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ adapter->eims_enable_mask = 0;
/*
* Populate the IVAR table and set the ITR values to the
* corresponding register.
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ struct ixgbevf_ring *ring;
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
- adapter->num_rx_queues);
-
- for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 0, j, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
- adapter->num_rx_queues,
- r_idx + 1);
- }
- r_idx = find_first_bit(q_vector->txr_idx,
- adapter->num_tx_queues);
-
- for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
- ixgbevf_set_ivar(adapter, 1, j, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
- adapter->num_tx_queues,
- r_idx + 1);
+
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx);
+
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
+
+ if (q_vector->tx.ring && !q_vector->rx.ring) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
}
- /* if this is a tx only vector halve the interrupt rate */
- if (q_vector->txr_count && !q_vector->rxr_count)
- q_vector->eitr = (adapter->eitr_param >> 1);
- else if (q_vector->rxr_count)
- /* rx only */
- q_vector->eitr = adapter->eitr_param;
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= 1 << v_idx;
- ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
-
- /* set up to autoclear timer, and the vectors */
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~IXGBE_EIMS_OTHER;
- IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+ /* setup eims_other and add value to global eims_enable_mask */
+ adapter->eims_other = 1 << v_idx;
+ adapter->eims_enable_mask |= adapter->eims_other;
}
enum latency_range {
@@ -765,11 +624,8 @@ enum latency_range {
/**
* ixgbevf_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -779,17 +635,17 @@ enum latency_range {
* on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
u32 timepassed_us;
u64 bytes_perint;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -797,134 +653,77 @@ static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = q_vector->itr >> 2;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
- if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ if (bytes_perint > 10)
+ itr_setting = low_latency;
break;
case low_latency:
- if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
- else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ if (bytes_perint > 20)
+ itr_setting = bulk_latency;
+ else if (bytes_perint <= 10)
+ itr_setting = lowest_latency;
break;
case bulk_latency:
- if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ if (bytes_perint <= 20)
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
-/**
- * ixgbevf_write_eitr - write VTEITR register in hardware specific way
- * @adapter: pointer to adapter struct
- * @v_idx: vector index into q_vector array
- * @itr_reg: new value to be written in *register* format, not ints/s
- *
- * This function is made to be called by ethtool and by the driver
- * when it needs to update VTEITR registers at runtime. Hardware
- * specific quirks/differences are taken care of here.
- */
-static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
- u32 itr_reg)
+static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{
- struct ixgbe_hw *hw = &adapter->hw;
-
- itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
-
- /*
- * set the WDIS bit to not clear the timer bits and cause an
- * immediate assertion of the interrupt
- */
- itr_reg |= IXGBE_EITR_CNT_WDIS;
+ u32 new_itr = q_vector->itr;
+ u8 current_itr;
- IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
-}
+ ixgbevf_update_itr(q_vector, &q_vector->tx);
+ ixgbevf_update_itr(q_vector, &q_vector->rx);
-static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
-{
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- u32 new_itr;
- u8 current_itr, ret_itr;
- int i, r_idx, v_idx = q_vector->v_idx;
- struct ixgbevf_ring *rx_ring, *tx_ring;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
case lowest_latency:
- new_itr = 100000;
+ new_itr = IXGBE_100K_ITR;
break;
case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
+ new_itr = IXGBE_20K_ITR;
break;
case bulk_latency:
default:
- new_itr = 8000;
+ new_itr = IXGBE_8K_ITR;
break;
}
- if (new_itr != q_vector->eitr) {
- u32 itr_reg;
-
- /* save the algorithm value here, not the smoothed one */
- q_vector->eitr = new_itr;
+ if (new_itr != q_vector->itr) {
/* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
- itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
- ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ new_itr = (10 * new_itr * q_vector->itr) /
+ ((9 * new_itr) + q_vector->itr);
+
+ /* save the algorithm value here */
+ q_vector->itr = new_itr;
+
+ ixgbevf_write_eitr(q_vector);
}
}
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
- u32 eicr;
u32 msg;
bool got_ack = false;
- eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
- IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
-
if (!hw->mbx.ops.check_for_ack(hw))
got_ack = true;
@@ -953,75 +752,24 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
if (got_ack)
hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
-{
- struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbevf_ring *tx_ring;
- int i, r_idx;
-
- if (!q_vector->txr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
- ixgbevf_clean_tx_irq(adapter, tx_ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
-
- if (adapter->itr_setting & 1)
- ixgbevf_set_itr_msix(q_vector);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other);
return IRQ_HANDLED;
}
+
/**
- * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
+ * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector
**/
-static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
{
struct ixgbevf_q_vector *q_vector = data;
- struct ixgbevf_adapter *adapter = q_vector->adapter;
- struct ixgbe_hw *hw = &adapter->hw;
- struct ixgbevf_ring *rx_ring;
- int r_idx;
- int i;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
-
- if (!q_vector->rxr_count)
- return IRQ_HANDLED;
-
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
- /* disable interrupts on this vector only */
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
- napi_schedule(&q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
-{
- ixgbevf_msix_clean_rx(irq, data);
- ixgbevf_msix_clean_tx(irq, data);
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -1031,9 +779,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
- a->rx_ring[r_idx].v_idx = 1 << v_idx;
+ a->rx_ring[r_idx].next = q_vector->rx.ring;
+ q_vector->rx.ring = &a->rx_ring[r_idx];
+ q_vector->rx.count++;
}
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
@@ -1041,9 +789,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
{
struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
- a->tx_ring[t_idx].v_idx = 1 << v_idx;
+ a->tx_ring[t_idx].next = q_vector->tx.ring;
+ q_vector->tx.ring = &a->tx_ring[t_idx];
+ q_vector->tx.count++;
}
/**
@@ -1119,37 +867,30 @@ out:
static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- irqreturn_t (*handler)(int, void *);
- int i, vector, q_vectors, err;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int vector, err;
int ri = 0, ti = 0;
- /* Decrement for Other and TCP Timer vectors */
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
- ? &ixgbevf_msix_clean_many : \
- (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
- (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
- NULL)
for (vector = 0; vector < q_vectors; vector++) {
- handler = SET_HANDLER(adapter->q_vector[vector]);
-
- if (handler == &ixgbevf_msix_clean_rx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "rx", ri++);
- } else if (handler == &ixgbevf_msix_clean_tx) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "tx", ti++);
- } else if (handler == &ixgbevf_msix_clean_many) {
- sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
+ struct msix_entry *entry = &adapter->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "TxRx", ri++);
+ ti++;
+ } else if (q_vector->rx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "rx", ri++);
+ } else if (q_vector->tx.ring) {
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-%s-%d", netdev->name, "tx", ti++);
} else {
/* skip this unused q_vector */
continue;
}
- err = request_irq(adapter->msix_entries[vector].vector,
- handler, 0, adapter->name[vector],
- adapter->q_vector[vector]);
+ err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
+ q_vector->name, q_vector);
if (err) {
hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt "
@@ -1158,9 +899,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
}
}
- sprintf(adapter->name[vector], "%s:mbx", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ &ixgbevf_msix_mbx, 0, netdev->name, adapter);
if (err) {
hw_dbg(&adapter->hw,
"request_irq for msix_mbx failed: %d\n", err);
@@ -1170,9 +910,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return 0;
free_queue_irqs:
- for (i = vector - 1; i >= 0; i--)
- free_irq(adapter->msix_entries[--vector].vector,
- &(adapter->q_vector[i]));
+ while (vector) {
+ vector--;
+ free_irq(adapter->msix_entries[vector].vector,
+ adapter->q_vector[vector]);
+ }
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -1185,11 +927,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
- q_vector->eitr = adapter->eitr_param;
+ q_vector->rx.ring = NULL;
+ q_vector->tx.ring = NULL;
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -1215,17 +956,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
-
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
+ /* free only the irqs that were actually requested */
+ if (!adapter->q_vector[i]->rx.ring &&
+ !adapter->q_vector[i]->tx.ring)
+ continue;
+
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
@@ -1239,10 +983,12 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
**/
static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, 0);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, 0);
IXGBE_WRITE_FLUSH(hw);
@@ -1254,23 +1000,13 @@ static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
* ixgbevf_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
**/
-static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
- bool queues, bool flush)
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 mask;
- u64 qmask;
- mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
- qmask = ~0;
-
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- if (queues)
- ixgbevf_irq_enable_queues(adapter, qmask);
-
- if (flush)
- IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask);
}
/**
@@ -1320,29 +1056,14 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- u16 bufsz = IXGBEVF_RXBUFFER_2048;
- /* grow the amount we can receive on large page machines */
- if (bufsz < (PAGE_SIZE / 2))
- bufsz = (PAGE_SIZE / 2);
- /* cap the bufsz at our largest descriptor size */
- bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
-
- srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
- srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
- IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
- IXGBE_SRRCTL_BSIZEHDR_MASK);
- } else {
- srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
- if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
- srrctl |= IXGBEVF_RXBUFFER_2048 >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- srrctl |= rx_ring->rx_buf_len >>
- IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- }
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
}
@@ -1362,36 +1083,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
u32 rdlen;
int rx_buf_len;
- /* Decide whether to use packet split mode or not */
- if (netdev->mtu > ETH_DATA_LEN) {
- if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- } else {
- if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- else
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
- }
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
- /* PSRTYPE must be initialized in 82599 */
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
- IXGBE_PSRTYPE_UDPHDR |
- IXGBE_PSRTYPE_IPV4HDR |
- IXGBE_PSRTYPE_IPV6HDR |
- IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
- rx_buf_len = IXGBEVF_RX_HDR_SIZE;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
- if (netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
- }
+ /* PSRTYPE must be initialized in 82599 */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
@@ -1418,9 +1115,14 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* add VID to filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
+
+ spin_unlock(&adapter->mbx_lock);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -1431,9 +1133,14 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* remove VID from filter table */
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -1488,11 +1195,15 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ spin_lock(&adapter->mbx_lock);
+
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
+
+ spin_unlock(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1502,15 +1213,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
- struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
- napi = &q_vector->napi;
- if (q_vector->rxr_count > 1)
- napi->poll = &ixgbevf_clean_rxonly_many;
-
- napi_enable(napi);
+ napi_enable(&q_vector->napi);
}
}
@@ -1522,8 +1226,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
- if (!q_vector->rxr_count)
- continue;
napi_disable(&q_vector->napi);
}
}
@@ -1541,9 +1243,8 @@ static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
ixgbevf_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbevf_ring *ring = &adapter->rx_ring[i];
- ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ ixgbevf_alloc_rx_buffers(adapter, ring,
+ IXGBE_DESC_UNUSED(ring));
}
}
@@ -1647,6 +1348,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar) {
if (is_valid_ether_addr(hw->mac.addr))
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
@@ -1658,6 +1361,8 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
hw->mbx.ops.write_posted(hw, msg, 2);
+ spin_unlock(&adapter->mbx_lock);
+
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1667,10 +1372,6 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_save_reset_stats(adapter);
ixgbevf_init_last_counter_stats(adapter);
- /* bring the link up in the watchdog, this could race with our first
- * link up interrupt but shouldn't be a problem */
- adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
}
@@ -1685,7 +1386,7 @@ void ixgbevf_up(struct ixgbevf_adapter *adapter)
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_VTEICR);
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
}
/**
@@ -1723,14 +1424,6 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
dev_kfree_skb(this);
} while (skb);
}
- if (!rx_buffer_info->page)
- continue;
- dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- rx_buffer_info->page_dma = 0;
- put_page(rx_buffer_info->page);
- rx_buffer_info->page = NULL;
- rx_buffer_info->page_offset = 0;
}
size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
@@ -1767,7 +1460,7 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
@@ -1882,11 +1575,15 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
+ spin_unlock(&adapter->mbx_lock);
+
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1900,10 +1597,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
{
int err, vector_threshold;
- /* We'll want at least 3 (vector_threshold):
- * 1) TxQ[0] Cleanup
- * 2) RxQ[0] Cleanup
- * 3) Other (Link Status Change, etc.)
+ /* We'll want at least 2 (vector_threshold):
+ * 1) TxQ[0] + RxQ[0] handler
+ * 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
@@ -1942,8 +1638,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
}
-/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
+/**
+ * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -1958,8 +1654,6 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
- adapter->num_rx_pools = adapter->num_rx_queues;
- adapter->num_rx_queues_per_pool = 1;
}
/**
@@ -1988,12 +1682,16 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i].dev = &adapter->pdev->dev;
+ adapter->tx_ring[i].netdev = adapter->netdev;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->rx_ring[i].count = adapter->rx_ring_count;
adapter->rx_ring[i].queue_index = i;
adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i].dev = &adapter->pdev->dev;
+ adapter->rx_ring[i].netdev = adapter->netdev;
}
return 0;
@@ -2020,10 +1718,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
- * (roughly) twice the number of vectors as there are CPU's.
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
*/
- v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
- (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
@@ -2054,12 +1754,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
int q_idx, num_q_vectors;
struct ixgbevf_q_vector *q_vector;
- int napi_vectors;
- int (*poll)(struct napi_struct *, int);
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
- poll = &ixgbevf_clean_rxonly;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
@@ -2067,10 +1763,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
goto err_out;
q_vector->adapter = adapter;
q_vector->v_idx = q_idx;
- q_vector->eitr = adapter->eitr_param;
- if (q_idx < napi_vectors)
- netif_napi_add(adapter->netdev, &q_vector->napi,
- (*poll), 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ ixgbevf_poll, 64);
adapter->q_vector[q_idx] = q_vector;
}
@@ -2216,21 +1910,17 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
adapter->netdev->addr_len);
}
- /* Enable dynamic interrupt throttling rates */
- adapter->eitr_param = 20000;
- adapter->itr_setting = 1;
+ /* lock to protect mailbox accesses */
+ spin_lock_init(&adapter->mbx_lock);
- /* set defaults for eitr in MegaBytes */
- adapter->eitr_low = 10;
- adapter->eitr_high = 20;
+ /* Enable dynamic interrupt throttling rates */
+ adapter->rx_itr_setting = 1;
+ adapter->tx_itr_setting = 1;
/* set default ring sizes */
adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
- /* enable rx csum by default */
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
-
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
@@ -2290,7 +1980,7 @@ static void ixgbevf_watchdog(unsigned long data)
{
struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
- u64 eics = 0;
+ u32 eics = 0;
int i;
/*
@@ -2304,11 +1994,11 @@ static void ixgbevf_watchdog(unsigned long data)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
- eics |= (1 << i);
+ if (qv->rx.ring || qv->tx.ring)
+ eics |= 1 << i;
}
- IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, eics);
watchdog_short_circuit:
schedule_work(&adapter->watchdog_task);
@@ -2362,8 +2052,16 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* no LSC interrupt
*/
if (hw->mac.ops.check_link) {
- if ((hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false)) != 0) {
+ s32 need_reset;
+
+ spin_lock(&adapter->mbx_lock);
+
+ need_reset = hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false);
+
+ spin_unlock(&adapter->mbx_lock);
+
+ if (need_reset) {
adapter->link_up = link_up;
adapter->link_speed = link_speed;
netif_carrier_off(netdev);
@@ -2478,7 +2176,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -2682,7 +2379,7 @@ static int ixgbevf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- ixgbevf_irq_enable(adapter, true, true);
+ ixgbevf_irq_enable(adapter);
return 0;
@@ -2724,172 +2421,153 @@ static int ixgbevf_close(struct net_device *netdev)
return 0;
}
-static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+static void ixgbevf_tx_ctxtdesc(struct ixgbevf_ring *tx_ring,
+ u32 vlan_macip_lens, u32 type_tucmd,
+ u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (skb->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- adapter->hw_tso_ctxt++;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- adapter->hw_tso6_ctxt++;
- }
+ context_desc = IXGBEVF_TX_CTXTDESC(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- return true;
+static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ if (skb_header_cloned(skb)) {
+ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
}
- return false;
+ /* compute header lengths */
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return 1;
}
-static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbevf_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |= (tx_flags &
- IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
-
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- if (ip_hdr(skb)->protocol == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- case __constant_htons(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- type_tucmd_mlhl |=
- IXGBE_ADVTXD_TUCMD_L4T_TCP;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- pr_warn("partial checksum but "
- "proto=%x!\n", skb->protocol);
- }
- break;
- }
- }
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- adapter->hw_csum_tx_good++;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4_hdr = 0;
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
- return true;
+ switch (l4_hdr) {
+ case IPPROTO_TCP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_SCTP:
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ l4_hdr);
+ }
+ break;
+ }
}
- return false;
+ /* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
+ type_tucmd, mss_l4len_idx);
+
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
-static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring,
+static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
unsigned int first)
{
- struct pci_dev *pdev = adapter->pdev;
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
@@ -2908,12 +2586,11 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->mapped_as_page = false;
- tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ tx_buffer_info->dma = dma_map_single(tx_ring->dev,
skb->data + offset,
size, DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2938,12 +2615,12 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
tx_buffer_info->length = size;
tx_buffer_info->dma =
- skb_frag_dma_map(&adapter->pdev->dev, frag,
+ skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
tx_buffer_info->mapped_as_page = true;
- if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+ if (dma_mapping_error(tx_ring->dev,
+ tx_buffer_info->dma))
goto dma_error;
- tx_buffer_info->time_stamp = jiffies;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2964,15 +2641,15 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
i = i - 1;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
+ tx_ring->tx_buffer_info[first].time_stamp = jiffies;
return count;
dma_error:
- dev_err(&pdev->dev, "TX DMA map failed\n");
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
/* clear timestamp and dma mappings for failed tx_buffer_info map */
tx_buffer_info->dma = 0;
- tx_buffer_info->time_stamp = 0;
tx_buffer_info->next_to_watch = 0;
count--;
@@ -2983,14 +2660,13 @@ dma_error:
if (i < 0)
i += tx_ring->count;
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
}
return count;
}
-static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *tx_ring, int tx_flags,
+static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -3007,28 +2683,31 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+ if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_ADVTXD_POPTS_TXSM;
+
if (tx_flags & IXGBE_TX_FLAGS_TSO) {
cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
-
/* use index 1 context for tso */
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
- olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
+
+ }
- } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
- olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
- IXGBE_ADVTXD_POPTS_SHIFT;
+ /*
+ * Check Context must be set if Tx switch is enabled, which it
+ * always is for case where virtual functions are running
+ */
+ olinfo_status |= IXGBE_ADVTXD_CC;
olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
i = tx_ring->next_to_use;
while (count--) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
tx_desc->read.cmd_type_len =
cpu_to_le32(cmd_type_len | tx_buffer_info->length);
@@ -3040,24 +2719,14 @@ static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
- /*
- * Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
-
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
}
-static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
- netif_stop_subqueue(netdev, tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */
@@ -3069,17 +2738,16 @@ static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(netdev, tx_ring->queue_index);
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++adapter->restart_queue;
return 0;
}
-static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
- struct ixgbevf_ring *tx_ring, int size)
+static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
{
if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
return 0;
- return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+ return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -3090,54 +2758,66 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned int tx_flags = 0;
u8 hdr_len = 0;
int r_idx = 0, tso;
- int count = 0;
-
- unsigned int f;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
tx_ring = &adapter->tx_ring[r_idx];
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
if (vlan_tx_tag_present(skb)) {
tx_flags |= vlan_tx_tag_get(skb);
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
-
- if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
- adapter->tx_busy++;
- return NETDEV_TX_BUSY;
- }
-
first = tx_ring->next_to_use;
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ tso = ixgbevf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (tso < 0) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
if (tso)
- tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM;
+ else if (ixgbevf_tx_csum(tx_ring, skb, tx_flags))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
- ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
- ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ ixgbevf_tx_queue(tx_ring, tx_flags,
+ ixgbevf_tx_map(tx_ring, skb, tx_flags, first),
skb->len, hdr_len);
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
- ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+ ixgbevf_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK;
}
@@ -3161,9 +2841,13 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+ spin_lock(&adapter->mbx_lock);
+
if (hw->mac.ops.set_rar)
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ spin_unlock(&adapter->mbx_lock);
+
return 0;
}
@@ -3220,9 +2904,7 @@ static void ixgbevf_shutdown(struct pci_dev *pdev)
ixgbevf_free_all_rx_resources(adapter);
}
-#ifdef CONFIG_PM
pci_save_state(pdev);
-#endif
pci_disable_device(pdev);
}
@@ -3265,19 +2947,6 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
-
- if (features & NETIF_F_RXCSUM)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3290,7 +2959,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_tx_timeout = ixgbevf_tx_timeout,
.ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
- .ndo_set_features = ixgbevf_set_features,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -3350,12 +3018,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
pci_set_master(pdev);
-#ifdef HAVE_TX_MQ
netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
MAX_TX_QUEUES);
-#else
- netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
-#endif
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -3396,10 +3060,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
sizeof(struct ixgbe_mbx_operations));
- adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
- adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
-
/* setup the private structure */
err = ixgbevf_sw_init(adapter);
if (err)
@@ -3458,8 +3118,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- adapter->netdev_registered = true;
-
netif_carrier_off(netdev);
ixgbevf_init_last_counter_stats(adapter);
@@ -3469,8 +3127,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
hw_dbg(hw, "MAC: %d\n", hw->mac.type);
- hw_dbg(hw, "LRO is disabled\n");
-
hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
cards_found++;
return 0;
@@ -3510,10 +3166,8 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->reset_task);
cancel_work_sync(&adapter->watchdog_task);
- if (adapter->netdev_registered) {
+ if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- adapter->netdev_registered = false;
- }
ixgbevf_reset_interrupt_capability(adapter);
@@ -3530,12 +3184,92 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/**
+ * ixgbevf_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+
+ pci_disable_device(pdev);
+
+ /* Request a slot slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ixgbevf_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the ixgbevf_resume routine.
+ */
+static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+
+ ixgbevf_reset(adapter);
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * ixgbevf_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the ixgbevf_resume routine.
+ */
+static void ixgbevf_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+ netif_device_attach(netdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers ixgbevf_err_handler = {
+ .error_detected = ixgbevf_io_error_detected,
+ .slot_reset = ixgbevf_io_slot_reset,
+ .resume = ixgbevf_io_resume,
+};
+
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
.remove = __devexit_p(ixgbevf_remove),
.shutdown = ixgbevf_shutdown,
+ .err_handler = &ixgbevf_err_handler
};
/**
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 4ea6580d3ae8..c911d883c27e 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2743,6 +2743,17 @@ jme_set_features(struct net_device *netdev, netdev_features_t features)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void jme_netpoll(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ jme_intr(dev->irq, dev);
+ local_irq_restore(flags);
+}
+#endif
+
static int
jme_nway_reset(struct net_device *netdev)
{
@@ -2944,6 +2955,9 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_tx_timeout = jme_tx_timeout,
.ndo_fix_features = jme_fix_features,
.ndo_set_features = jme_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = jme_netpoll,
+#endif
};
static int __devinit
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 5dc9cbd51514..003c5bc7189f 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -149,7 +149,6 @@ ltq_etop_hw_receive(struct ltq_etop_chan *ch)
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
- skb->dev = ch->netdev;
skb->protocol = eth_type_trans(skb, ch->netdev);
netif_receive_skb(skb);
}
@@ -646,7 +645,7 @@ ltq_etop_init(struct net_device *dev)
memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
if (!is_valid_ether_addr(mac.sa_data)) {
pr_warn("etop: invalid MAC, using random\n");
- random_ether_addr(mac.sa_data);
+ eth_random_addr(mac.sa_data);
random_mac = true;
}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0f06b2bc28b..770ee557924c 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1896,7 +1896,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
goto out_free;
}
- rx_desc = (struct rx_desc *)rxq->rx_desc_area;
+ rx_desc = rxq->rx_desc_area;
for (i = 0; i < rxq->rx_ring_size; i++) {
int nexti;
@@ -2001,7 +2001,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_desc_area_size = size;
- tx_desc = (struct tx_desc *)txq->tx_desc_area;
+ tx_desc = txq->tx_desc_area;
for (i = 0; i < txq->tx_ring_size; i++) {
struct tx_desc *txd = tx_desc + i;
int nexti;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 1db023b075a1..59489722e898 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1032,7 +1032,7 @@ static int rxq_init(struct net_device *dev)
}
memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */
- p_rx_desc = (struct rx_desc *)pep->p_rx_desc_area;
+ p_rx_desc = pep->p_rx_desc_area;
for (i = 0; i < rx_desc_num; i++) {
p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
((i + 1) % rx_desc_num) * sizeof(struct rx_desc);
@@ -1095,7 +1095,7 @@ static int txq_init(struct net_device *dev)
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
- p_tx_desc = (struct tx_desc *)pep->p_tx_desc_area;
+ p_tx_desc = pep->p_tx_desc_area;
for (i = 0; i < tx_desc_num; i++) {
p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
((i + 1) % tx_desc_num) * sizeof(struct tx_desc);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 28a54451a3e5..2b0748dba8b8 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -141,6 +141,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
{ 0 }
};
@@ -3079,8 +3080,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id)
/* Reading this mask interrupts as side effect */
status = sky2_read32(hw, B0_Y2_SP_ISRC2);
- if (status == 0 || status == ~0)
+ if (status == 0 || status == ~0) {
+ sky2_write32(hw, B0_Y2_SP_ICR, 2);
return IRQ_NONE;
+ }
prefetch(&hw->st_le[hw->st_idx]);
@@ -3349,6 +3352,17 @@ static void sky2_reset(struct sky2_hw *hw)
sky2_pci_write16(hw, pdev->pcie_cap + PCI_EXP_LNKCTL,
reg);
+ if (hw->chip_id == CHIP_ID_YUKON_PRM &&
+ hw->chip_rev == CHIP_REV_YU_PRM_A0) {
+ /* change PHY Interrupt polarity to low active */
+ reg = sky2_read16(hw, GPHY_CTRL);
+ sky2_write16(hw, GPHY_CTRL, reg | GPC_INTPOL);
+
+ /* adapt HW for low active PHY Interrupt */
+ reg = sky2_read16(hw, Y2_CFG_SPC + PCI_LDO_CTRL);
+ sky2_write16(hw, Y2_CFG_SPC + PCI_LDO_CTRL, reg | PHY_M_UNDOC1);
+ }
+
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
/* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
@@ -4871,7 +4885,7 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
"UL 2", /* 0xba */
"Unknown", /* 0xbb */
"Optima", /* 0xbc */
- "Optima Prime", /* 0xbd */
+ "OptimaEEE", /* 0xbd */
"Optima 2", /* 0xbe */
};
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index 3c896ce80b71..615ac63ea860 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -23,6 +23,7 @@ enum {
PSM_CONFIG_REG3 = 0x164,
PSM_CONFIG_REG4 = 0x168,
+ PCI_LDO_CTRL = 0xbc,
};
/* Yukon-2 */
@@ -586,6 +587,10 @@ enum yukon_supr_rev {
CHIP_REV_YU_SU_B1 = 3,
};
+enum yukon_prm_rev {
+ CHIP_REV_YU_PRM_Z1 = 1,
+ CHIP_REV_YU_PRM_A0 = 2,
+};
/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
enum {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 842c8ce9494e..7e94987d030c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = {
.verify = NULL,
.wrapper = NULL
},
+ /* flow steering commands */
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_QP_FLOW_STEERING_DETACH,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
+ },
};
static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 908a460d8db6..aa9c2f6cf3c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -77,6 +77,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
char name[25];
+ struct cpu_rmap *rmap =
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap;
+#else
+ NULL;
+#endif
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -91,7 +97,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
sprintf(name, "%s-%d", priv->dev->name,
cq->ring);
/* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, &cq->vector)) {
+ if (mlx4_assign_eq(mdev->dev, name, rmap,
+ &cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
mlx4_warn(mdev, "Failed Assigning an EQ to "
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 72901ce2b088..9d0b88eea02b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -34,10 +34,14 @@
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
+#include <linux/mlx4/driver.h>
#include "mlx4_en.h"
#include "en_port.h"
+#define EN_ETHTOOL_QP_ATTACH (1ull << 63)
+#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
+#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
static void
mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
@@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev,
return err;
}
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int mlx4_en_validate_flow(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethhdr *eth_mask;
+ u64 full_mac = ~0ull;
+ u64 zero_mac = 0;
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (cmd->fs.m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+ /* don't allow mask which isn't all 0 or 1 */
+ if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
+ !all_zeros_or_all_ones(l4_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l4_mask->psrc) ||
+ !all_zeros_or_all_ones(l4_mask->pdst))
+ return -EINVAL;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
+ (!l3_mask->ip4src && !l3_mask->ip4dst) ||
+ !all_zeros_or_all_ones(l3_mask->ip4src) ||
+ !all_zeros_or_all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ break;
+ case ETHER_FLOW:
+ eth_mask = &cmd->fs.m_u.ether_spec;
+ /* source mac mask must not be set */
+ if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN))
+ return -EINVAL;
+
+ /* dest mac mask must be ff:ff:ff:ff:ff:ff */
+ if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN))
+ return -EINVAL;
+
+ if (!all_zeros_or_all_ones(eth_mask->h_proto))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cmd->fs.flow_type & FLOW_EXT)) {
+ if (cmd->fs.m_ext.vlan_etype ||
+ !(cmd->fs.m_ext.vlan_tci == 0 ||
+ cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int add_ip_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ if (!spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
+ if (l3_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
+ if (l3_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+ list_add_tail(&spec_l3->list, list_h);
+
+ return 0;
+}
+
+static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *list_h, int proto)
+{
+ struct mlx4_spec_list *spec_l3;
+ struct mlx4_spec_list *spec_l4;
+ struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
+
+ spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL);
+ spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL);
+ if (!spec_l4 || !spec_l3) {
+ en_err(priv, "Fail to alloc ethtool rule.\n");
+ kfree(spec_l3);
+ kfree(spec_l4);
+ return -ENOMEM;
+ }
+
+ spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
+
+ if (proto == TCP_V4_FLOW) {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
+ } else {
+ spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
+ spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
+ spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
+ spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
+ spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
+ }
+
+ if (l4_mask->ip4src)
+ spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
+ if (l4_mask->ip4dst)
+ spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
+
+ if (l4_mask->psrc)
+ spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
+ if (l4_mask->pdst)
+ spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
+
+ list_add_tail(&spec_l3->list, list_h);
+ list_add_tail(&spec_l4->list, list_h);
+
+ return 0;
+}
+
+static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ struct list_head *rule_list_h)
+{
+ int err;
+ u64 mac;
+ __be64 be_mac;
+ struct ethhdr *eth_spec;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_spec_list *spec_l2;
+ __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ err = mlx4_en_validate_flow(dev, cmd);
+ if (err)
+ return err;
+
+ spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL);
+ if (!spec_l2)
+ return -ENOMEM;
+
+ mac = priv->mac & MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+
+ spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
+ if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN);
+
+ if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) {
+ spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
+ spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff);
+ }
+
+ list_add_tail(&spec_l2->list, rule_list_h);
+
+ switch (cmd->fs.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ eth_spec = &cmd->fs.h_u.ether_spec;
+ memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN);
+ spec_l2->eth.ether_type = eth_spec->h_proto;
+ if (eth_spec->h_proto)
+ spec_l2->eth.ether_type_enable = 1;
+ break;
+ case IP_USER_FLOW:
+ err = add_ip_rule(priv, cmd, rule_list_h);
+ break;
+ case TCP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
+ break;
+ case UDP_V4_FLOW:
+ err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
+ break;
+ }
+
+ return err;
+}
+
+static int mlx4_en_flow_replace(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ethtool_flow_id *loc_rule;
+ struct mlx4_spec_list *spec, *tmp_spec;
+ u32 qpn;
+ u64 reg_id;
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ };
+
+ rule.port = priv->port;
+ rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
+ INIT_LIST_HEAD(&rule.list);
+
+ /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
+ if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
+ qpn = priv->drop_qp.qpn;
+ else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
+ qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
+ } else {
+ if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
+ if (!qpn) {
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ cmd->fs.ring_cookie);
+ return -EINVAL;
+ }
+ }
+ rule.qpn = qpn;
+ err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
+ if (err)
+ goto out_free_list;
+
+ loc_rule = &priv->ethtool_rules[cmd->fs.location];
+ if (loc_rule->id) {
+ err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
+ cmd->fs.location, loc_rule->id);
+ goto out_free_list;
+ }
+ loc_rule->id = 0;
+ memset(&loc_rule->flow_spec, 0,
+ sizeof(struct ethtool_rx_flow_spec));
+ }
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
+ if (err) {
+ en_err(priv, "Fail to attach network rule at location %d.\n",
+ cmd->fs.location);
+ goto out_free_list;
+ }
+ loc_rule->id = reg_id;
+ memcpy(&loc_rule->flow_spec, &cmd->fs,
+ sizeof(struct ethtool_rx_flow_spec));
+
+out_free_list:
+ list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
+ list_del(&spec->list);
+ kfree(spec);
+ }
+ return err;
+}
+
+static int mlx4_en_flow_detach(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[cmd->fs.location];
+ if (!rule->id) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ err = mlx4_flow_detach(priv->mdev->dev, rule->id);
+ if (err) {
+ en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
+ cmd->fs.location, rule->id);
+ goto out;
+ }
+ rule->id = 0;
+ memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
+out:
+ return err;
+
+}
+
+static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ int loc)
+{
+ int err = 0;
+ struct ethtool_flow_id *rule;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
+ return -EINVAL;
+
+ rule = &priv->ethtool_rules[loc];
+ if (rule->id)
+ memcpy(&cmd->fs, &rule->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ else
+ err = -ENOENT;
+
+ return err;
+}
+
+static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
+{
+
+ int i, res = 0;
+ for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
+ if (priv->ethtool_rules[i].id)
+ res++;
+ }
+ return res;
+
+}
+
static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
int err = 0;
+ int i = 0, priority = 0;
+
+ if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
+ cmd->cmd == ETHTOOL_GRXCLSRULE ||
+ cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
+ mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = priv->rx_ring_num;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = mlx4_en_get_num_flows(priv);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
+ err = mlx4_en_get_flow(dev, cmd, i);
+ if (!err)
+ rule_locs[priority++] = i;
+ i++;
+ }
+ err = 0;
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
return err;
}
+static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EINVAL;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx4_en_flow_replace(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx4_en_flow_detach(dev, cmd);
+ break;
+ default:
+ en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
.get_rxnfc = mlx4_en_get_rxnfc,
+ .set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
.get_rxfh_indir = mlx4_en_get_rxfh_indir,
.set_rxfh_indir = mlx4_en_set_rxfh_indir,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 69ba57270481..a52922ed85c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -131,7 +131,7 @@ static void *mlx4_en_get_netdev(struct mlx4_dev *dev, void *ctx, u8 port)
}
static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
- enum mlx4_dev_event event, int port)
+ enum mlx4_dev_event event, unsigned long port)
{
struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
struct mlx4_en_priv *priv;
@@ -156,7 +156,8 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
if (port < 1 || port > dev->caps.num_ports ||
!mdev->pndev[port])
return;
- mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
+ mlx4_warn(mdev, "Unhandled event %d for port %d\n", event,
+ (int) port);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 073b85b45fc5..8864d8b53737 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -36,6 +36,8 @@
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/hash.h>
+#include <net/ip.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -66,6 +68,299 @@ static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
+#ifdef CONFIG_RFS_ACCEL
+
+struct mlx4_en_filter {
+ struct list_head next;
+ struct work_struct work;
+
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+
+ int rxq_index;
+ struct mlx4_en_priv *priv;
+ u32 flow_id; /* RFS infrastructure id */
+ int id; /* mlx4_en driver id */
+ u64 reg_id; /* Flow steering API id */
+ u8 activated; /* Used to prevent expiry before filter
+ * is attached
+ */
+ struct hlist_node filter_chain;
+};
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
+
+static void mlx4_en_filter_work(struct work_struct *work)
+{
+ struct mlx4_en_filter *filter = container_of(work,
+ struct mlx4_en_filter,
+ work);
+ struct mlx4_en_priv *priv = filter->priv;
+ struct mlx4_spec_list spec_tcp = {
+ .id = MLX4_NET_TRANS_RULE_ID_TCP,
+ {
+ .tcp_udp = {
+ .dst_port = filter->dst_port,
+ .dst_port_msk = (__force __be16)-1,
+ .src_port = filter->src_port,
+ .src_port_msk = (__force __be16)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_ip = {
+ .id = MLX4_NET_TRANS_RULE_ID_IPV4,
+ {
+ .ipv4 = {
+ .dst_ip = filter->dst_ip,
+ .dst_ip_msk = (__force __be32)-1,
+ .src_ip = filter->src_ip,
+ .src_ip_msk = (__force __be32)-1,
+ },
+ },
+ };
+ struct mlx4_spec_list spec_eth = {
+ .id = MLX4_NET_TRANS_RULE_ID_ETH,
+ };
+ struct mlx4_net_trans_rule rule = {
+ .list = LIST_HEAD_INIT(rule.list),
+ .queue_mode = MLX4_NET_TRANS_Q_LIFO,
+ .exclusive = 1,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .port = priv->port,
+ .priority = MLX4_DOMAIN_RFS,
+ };
+ int rc;
+ __be64 mac;
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ list_add_tail(&spec_eth.list, &rule.list);
+ list_add_tail(&spec_ip.list, &rule.list);
+ list_add_tail(&spec_tcp.list, &rule.list);
+
+ mac = cpu_to_be64((priv->mac & MLX4_MAC_MASK) << 16);
+
+ rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
+ memcpy(spec_eth.eth.dst_mac, &mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+
+ filter->activated = 0;
+
+ if (filter->reg_id) {
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+ }
+
+ rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
+ if (rc)
+ en_err(priv, "Error attaching flow. err = %d\n", rc);
+
+ mlx4_en_filter_rfs_expire(priv);
+
+ filter->activated = 1;
+}
+
+static inline struct hlist_head *
+filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+ l ^= (__force unsigned long)(src_ip ^ dst_ip);
+
+ bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
+
+ return &priv->filter_hash[bucket_idx];
+}
+
+static struct mlx4_en_filter *
+mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
+ __be32 dst_ip, __be16 src_port, __be16 dst_port,
+ u32 flow_id)
+{
+ struct mlx4_en_filter *filter = NULL;
+
+ filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
+ if (!filter)
+ return NULL;
+
+ filter->priv = priv;
+ filter->rxq_index = rxq_index;
+ INIT_WORK(&filter->work, mlx4_en_filter_work);
+
+ filter->src_ip = src_ip;
+ filter->dst_ip = dst_ip;
+ filter->src_port = src_port;
+ filter->dst_port = dst_port;
+
+ filter->flow_id = flow_id;
+
+ filter->id = priv->last_filter_id++;
+
+ list_add_tail(&filter->next, &priv->filters);
+ hlist_add_head(&filter->filter_chain,
+ filter_hash_bucket(priv, src_ip, dst_ip, src_port,
+ dst_port));
+
+ return filter;
+}
+
+static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
+{
+ struct mlx4_en_priv *priv = filter->priv;
+ int rc;
+
+ list_del(&filter->next);
+
+ rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
+ if (rc && rc != -ENOENT)
+ en_err(priv, "Error detaching flow. rc = %d\n", rc);
+
+ kfree(filter);
+}
+
+static inline struct mlx4_en_filter *
+mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
+ __be16 src_port, __be16 dst_port)
+{
+ struct hlist_node *elem;
+ struct mlx4_en_filter *filter;
+ struct mlx4_en_filter *ret = NULL;
+
+ hlist_for_each_entry(filter, elem,
+ filter_hash_bucket(priv, src_ip, dst_ip,
+ src_port, dst_port),
+ filter_chain) {
+ if (filter->src_ip == src_ip &&
+ filter->dst_ip == dst_ip &&
+ filter->src_port == src_port &&
+ filter->dst_port == dst_port) {
+ ret = filter;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(net_dev);
+ struct mlx4_en_filter *filter;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ __be32 src_ip;
+ __be32 dst_ip;
+ __be16 src_port;
+ __be16 dst_port;
+ int nhoff = skb_network_offset(skb);
+ int ret = 0;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ src_ip = ip->saddr;
+ dst_ip = ip->daddr;
+ src_port = ports[0];
+ dst_port = ports[1];
+
+ if (ip->protocol != IPPROTO_TCP)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&priv->filters_lock);
+ filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
+ if (filter) {
+ if (filter->rxq_index == rxq_index)
+ goto out;
+
+ filter->rxq_index = rxq_index;
+ } else {
+ filter = mlx4_en_filter_alloc(priv, rxq_index,
+ src_ip, dst_ip,
+ src_port, dst_port, flow_id);
+ if (!filter) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ queue_work(priv->mdev->workqueue, &filter->work);
+
+out:
+ ret = filter->id;
+err:
+ spin_unlock_bh(&priv->filters_lock);
+
+ return ret;
+}
+
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring)
+{
+ struct mlx4_en_filter *filter, *tmp;
+ LIST_HEAD(del_list);
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ }
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next) {
+ cancel_work_sync(&filter->work);
+ mlx4_en_filter_free(filter);
+ }
+}
+
+static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
+ LIST_HEAD(del_list);
+ int i = 0;
+
+ spin_lock_bh(&priv->filters_lock);
+ list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
+ if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
+ break;
+
+ if (filter->activated &&
+ !work_pending(&filter->work) &&
+ rps_may_expire_flow(priv->dev,
+ filter->rxq_index, filter->flow_id,
+ filter->id)) {
+ list_move(&filter->next, &del_list);
+ hlist_del(&filter->filter_chain);
+ } else
+ last_filter = filter;
+
+ i++;
+ }
+
+ if (last_filter && (&last_filter->next != priv->filters.next))
+ list_move(&priv->filters, &last_filter->next);
+
+ spin_unlock_bh(&priv->filters_lock);
+
+ list_for_each_entry_safe(filter, tmp, &del_list, next)
+ mlx4_en_filter_free(filter);
+}
+#endif
+
static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -170,33 +465,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
static void mlx4_en_clear_list(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_mc_list *tmp, *mc_to_del;
- kfree(priv->mc_addrs);
- priv->mc_addrs = NULL;
- priv->mc_addrs_cnt = 0;
+ list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
+ list_del(&mc_to_del->list);
+ kfree(mc_to_del);
+ }
}
static void mlx4_en_cache_mclist(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct netdev_hw_addr *ha;
- char *mc_addrs;
- int mc_addrs_cnt = netdev_mc_count(dev);
- int i;
+ struct mlx4_en_mc_list *tmp;
- mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
- if (!mc_addrs) {
- en_err(priv, "failed to allocate multicast list\n");
- return;
- }
- i = 0;
- netdev_for_each_mc_addr(ha, dev)
- memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
mlx4_en_clear_list(dev);
- priv->mc_addrs = mc_addrs;
- priv->mc_addrs_cnt = mc_addrs_cnt;
+ netdev_for_each_mc_addr(ha, dev) {
+ tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ en_err(priv, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp->addr, ha->addr, ETH_ALEN);
+ list_add_tail(&tmp->list, &priv->mc_list);
+ }
}
+static void update_mclist_flags(struct mlx4_en_priv *priv,
+ struct list_head *dst,
+ struct list_head *src)
+{
+ struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
+ bool found;
+
+ /* Find all the entries that should be removed from dst,
+ * These are the entries that are not found in src
+ */
+ list_for_each_entry(dst_tmp, dst, list) {
+ found = false;
+ list_for_each_entry(src_tmp, src, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found)
+ dst_tmp->action = MCLIST_REM;
+ }
+
+ /* Add entries that exist in src but not in dst
+ * mark them as need to add
+ */
+ list_for_each_entry(src_tmp, src, list) {
+ found = false;
+ list_for_each_entry(dst_tmp, dst, list) {
+ if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
+ dst_tmp->action = MCLIST_NONE;
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
+ GFP_KERNEL);
+ if (!new_mc) {
+ en_err(priv, "Failed to allocate current multicast list\n");
+ return;
+ }
+ memcpy(new_mc, src_tmp,
+ sizeof(struct mlx4_en_mc_list));
+ new_mc->action = MCLIST_ADD;
+ list_add_tail(&new_mc->list, dst);
+ }
+ }
+}
static void mlx4_en_set_multicast(struct net_device *dev)
{
@@ -214,9 +557,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
mcast_task);
struct mlx4_en_dev *mdev = priv->mdev;
struct net_device *dev = priv->dev;
+ struct mlx4_en_mc_list *mclist, *tmp;
u64 mcast_addr = 0;
u8 mc_list[16] = {0};
- int err;
+ int err = 0;
mutex_lock(&mdev->state_lock);
if (!mdev->device_up) {
@@ -251,16 +595,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!(mdev->dev->caps.flags &
- MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 1);
- else
- err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_add(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed enabling "
- "promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed enabling unicast promiscuous mode\n");
+
+ /* Add the default qp number as multicast
+ * promisc
+ */
+ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed enabling multicast promiscuous mode\n");
+ priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ 1);
+ if (err)
+ en_err(priv, "Failed enabling promiscuous mode\n");
+ break;
+ }
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -269,15 +643,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
en_err(priv, "Failed disabling "
"multicast filter\n");
- /* Add the default qp number as multicast promisc */
- if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
- if (err)
- en_err(priv, "Failed entering multicast promisc mode\n");
- priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
- }
-
/* Disable port VLAN filter */
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
if (err)
@@ -296,22 +661,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
- priv->base_qpn, 0);
- else
- err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ if (err)
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_unicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
priv->port);
- if (err)
- en_err(priv, "Failed disabling promiscuous mode\n");
+ if (err)
+ en_err(priv, "Failed disabling unicast promiscuous mode\n");
+ /* Disable Multicast promisc */
+ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ if (err)
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
+ priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ }
+ break;
- /* Disable Multicast promisc */
- if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ case MLX4_STEERING_MODE_A0:
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev,
+ priv->port,
+ priv->base_qpn, 0);
if (err)
- en_err(priv, "Failed disabling multicast promiscuous mode\n");
- priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
+ en_err(priv, "Failed disabling promiscuous mode\n");
+ break;
}
/* Enable port VLAN filter */
@@ -329,18 +712,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
/* Add the default qp number as multicast promisc */
if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
- err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_add(mdev->dev,
+ priv->port,
+ priv->base_qpn,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_add(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed entering multicast promisc mode\n");
priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
}
} else {
- int i;
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
- err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
- priv->port);
+ switch (mdev->dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ err = mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ break;
+
+ case MLX4_STEERING_MODE_B0:
+ err = mlx4_multicast_promisc_remove(mdev->dev,
+ priv->base_qpn,
+ priv->port);
+ break;
+
+ case MLX4_STEERING_MODE_A0:
+ break;
+ }
if (err)
en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@ -351,13 +762,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
if (err)
en_err(priv, "Failed disabling multicast filter\n");
- /* Detach our qp from all the multicast addresses */
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
- }
/* Flush mcast filter and init it with broadcast address */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
1, MLX4_MCAST_CONFIG);
@@ -367,13 +771,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
netif_tx_lock_bh(dev);
mlx4_en_cache_mclist(dev);
netif_tx_unlock_bh(dev);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- mcast_addr =
- mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
- mc_list[5] = priv->port;
- mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, 0, MLX4_PROT_ETH);
+ list_for_each_entry(mclist, &priv->mc_list, list) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -381,6 +780,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
0, MLX4_MCAST_ENABLE);
if (err)
en_err(priv, "Failed enabling multicast filter\n");
+
+ update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ if (mclist->action == MCLIST_REM) {
+ /* detach this address and delete from list */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_detach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ MLX4_PROT_ETH,
+ mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to detach multicast address\n");
+
+ /* remove from list */
+ list_del(&mclist->list);
+ kfree(mclist);
+ } else if (mclist->action == MCLIST_ADD) {
+ /* attach the address */
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
+ /* needed for B0 steering support */
+ mc_list[5] = priv->port;
+ err = mlx4_multicast_attach(mdev->dev,
+ &priv->rss_map.indir_qp,
+ mc_list,
+ priv->port, 0,
+ MLX4_PROT_ETH,
+ &mclist->reg_id);
+ if (err)
+ en_err(priv, "Fail to attach multicast address\n");
+
+ }
+ }
}
out:
mutex_unlock(&mdev->state_lock);
@@ -605,6 +1038,9 @@ int mlx4_en_start_port(struct net_device *dev)
return 0;
}
+ INIT_LIST_HEAD(&priv->mc_list);
+ INIT_LIST_HEAD(&priv->curr_list);
+
/* Calculate Rx buf size */
dev->mtu = min(dev->mtu, priv->max_mtu);
mlx4_en_calc_rx_buf(dev);
@@ -653,6 +1089,10 @@ int mlx4_en_start_port(struct net_device *dev)
goto mac_err;
}
+ err = mlx4_en_create_drop_qp(priv);
+ if (err)
+ goto rss_err;
+
/* Configure tx cq's and rings */
for (i = 0; i < priv->tx_ring_num; i++) {
/* Configure cq */
@@ -720,13 +1160,23 @@ int mlx4_en_start_port(struct net_device *dev)
/* Attach rx QP to bradcast address */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- 0, MLX4_PROT_ETH))
+ priv->port, 0, MLX4_PROT_ETH,
+ &priv->broadcast_id))
mlx4_warn(mdev, "Failed Attaching Broadcast\n");
/* Must redo promiscuous mode setup. */
priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_UPLINK);
+ mlx4_flow_steer_promisc_remove(mdev->dev,
+ priv->port,
+ MLX4_FS_PROMISC_ALL_MULTI);
+ }
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
@@ -742,7 +1192,8 @@ tx_err:
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
}
-
+ mlx4_en_destroy_drop_qp(priv);
+rss_err:
mlx4_en_release_rss_steer(priv);
mac_err:
mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
@@ -760,6 +1211,7 @@ void mlx4_en_stop_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_mc_list *mclist, *tmp;
int i;
u8 mc_list[16] = {0};
@@ -778,19 +1230,26 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Detach All multicasts */
memset(&mc_list[10], 0xff, ETH_ALEN);
- mc_list[5] = priv->port;
+ mc_list[5] = priv->port; /* needed for B0 steering support */
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
- MLX4_PROT_ETH);
- for (i = 0; i < priv->mc_addrs_cnt; i++) {
- memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN);
+ MLX4_PROT_ETH, priv->broadcast_id);
+ list_for_each_entry(mclist, &priv->curr_list, list) {
+ memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
- mc_list, MLX4_PROT_ETH);
+ mc_list, MLX4_PROT_ETH, mclist->reg_id);
}
mlx4_en_clear_list(dev);
+ list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
+ list_del(&mclist->list);
+ kfree(mclist);
+ }
+
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
+ mlx4_en_destroy_drop_qp(priv);
+
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -915,6 +1374,11 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
+ priv->dev->rx_cpu_rmap = NULL;
+#endif
+
for (i = 0; i < priv->tx_ring_num; i++) {
if (priv->tx_ring[i].tx_info)
mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
@@ -970,6 +1434,15 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
goto err;
}
+#ifdef CONFIG_RFS_ACCEL
+ priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
+ if (!priv->dev->rx_cpu_rmap)
+ goto err;
+
+ INIT_LIST_HEAD(&priv->filters);
+ spin_lock_init(&priv->filters_lock);
+#endif
+
return 0;
err:
@@ -1077,6 +1550,9 @@ static const struct net_device_ops mlx4_netdev_ops = {
#endif
.ndo_set_features = mlx4_en_set_features,
.ndo_setup_tc = mlx4_en_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx4_en_filter_rfs,
+#endif
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1194,6 +1670,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
NETIF_F_HW_VLAN_FILTER;
dev->hw_features |= NETIF_F_LOOPBACK;
+ if (mdev->dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ dev->hw_features |= NETIF_F_NTUPLE;
+
mdev->pndev[port] = dev;
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d49a7ac3187d..f32e70300770 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -41,41 +41,75 @@
#include "mlx4_en.h"
-
-static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *ring_alloc,
- int i)
+static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct mlx4_en_rx_alloc *frags,
+ struct mlx4_en_rx_alloc *ring_alloc)
{
- struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
- struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ struct mlx4_en_frag_info *frag_info;
struct page *page;
dma_addr_t dma;
+ int i;
- if (page_alloc->offset == frag_info->last_offset) {
- /* Allocate new page */
- page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
- if (!page)
- return -ENOMEM;
-
- skb_frags[i].page = page_alloc->page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->page = page;
- page_alloc->offset = frag_info->frag_align;
- } else {
- page = page_alloc->page;
- get_page(page);
+ for (i = 0; i < priv->num_frags; i++) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset) {
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ goto out;
+ dma = dma_map_page(priv->ddev, page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, dma)) {
+ put_page(page);
+ goto out;
+ }
+ page_alloc[i].page = page;
+ page_alloc[i].dma = dma;
+ page_alloc[i].offset = frag_info->frag_align;
+ } else {
+ page_alloc[i].page = ring_alloc[i].page;
+ get_page(ring_alloc[i].page);
+ page_alloc[i].dma = ring_alloc[i].dma;
+ page_alloc[i].offset = ring_alloc[i].offset +
+ frag_info->frag_stride;
+ }
+ }
- skb_frags[i].page = page;
- skb_frags[i].offset = page_alloc->offset;
- page_alloc->offset += frag_info->frag_stride;
+ for (i = 0; i < priv->num_frags; i++) {
+ frags[i] = ring_alloc[i];
+ dma = ring_alloc[i].dma + ring_alloc[i].offset;
+ ring_alloc[i] = page_alloc[i];
+ rx_desc->data[i].addr = cpu_to_be64(dma);
}
- dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
- skb_frags[i].offset, frag_info->frag_size,
- PCI_DMA_FROMDEVICE);
- rx_desc->data[i].addr = cpu_to_be64(dma);
+
return 0;
+
+
+out:
+ while (i--) {
+ frag_info = &priv->frag_info[i];
+ if (ring_alloc[i].offset == frag_info->last_offset)
+ dma_unmap_page(priv->ddev, page_alloc[i].dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ put_page(page_alloc[i].page);
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_alloc *frags,
+ int i)
+{
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+
+ if (frags[i].offset == frag_info->last_offset) {
+ dma_unmap_page(priv->ddev, frags[i].dma, MLX4_EN_ALLOC_SIZE,
+ PCI_DMA_FROMDEVICE);
+ }
+ if (frags[i].page)
+ put_page(frags[i].page);
}
static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
@@ -91,6 +125,13 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
if (!page_alloc->page)
goto out;
+ page_alloc->dma = dma_map_page(priv->ddev, page_alloc->page, 0,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(priv->ddev, page_alloc->dma)) {
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ goto out;
+ }
page_alloc->offset = priv->frag_info[i].frag_align;
en_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
i, page_alloc->page);
@@ -100,6 +141,8 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
out:
while (i--) {
page_alloc = &ring->page_alloc[i];
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
@@ -117,24 +160,22 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
en_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
i, page_count(page_alloc->page));
+ dma_unmap_page(priv->ddev, page_alloc->dma,
+ MLX4_EN_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
put_page(page_alloc->page);
page_alloc->page = NULL;
}
}
-
static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
- struct skb_frag_struct *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
int possible_frags;
int i;
/* Set size and memtype fields */
for (i = 0; i < priv->num_frags; i++) {
- skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size);
rx_desc->data[i].byte_count =
cpu_to_be32(priv->frag_info[i].frag_size);
rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
@@ -151,29 +192,14 @@ static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
}
}
-
static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, int index)
{
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
- struct page_frag *skb_frags = ring->rx_info +
- (index << priv->log_rx_info);
- int i;
+ struct mlx4_en_rx_alloc *frags = ring->rx_info +
+ (index << priv->log_rx_info);
- for (i = 0; i < priv->num_frags; i++)
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
- goto err;
-
- return 0;
-
-err:
- while (i--) {
- dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
- pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[i].page);
- }
- return -ENOMEM;
+ return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc);
}
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
@@ -185,20 +211,13 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
int index)
{
- struct page_frag *skb_frags;
- struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
- dma_addr_t dma;
+ struct mlx4_en_rx_alloc *frags;
int nr;
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
for (nr = 0; nr < priv->num_frags; nr++) {
en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
- dma = be64_to_cpu(rx_desc->data[nr].addr);
-
- en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
- dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
- PCI_DMA_FROMDEVICE);
- put_page(skb_frags[nr].page);
+ mlx4_en_free_frag(priv, frags, nr);
}
}
@@ -268,10 +287,9 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = -ENOMEM;
int tmp;
-
ring->prod = 0;
ring->cons = 0;
ring->size = size;
@@ -281,7 +299,7 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
- sizeof(struct skb_frag_struct));
+ sizeof(struct mlx4_en_rx_alloc));
ring->rx_info = vmalloc(tmp);
if (!ring->rx_info)
return -ENOMEM;
@@ -338,7 +356,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
memset(ring->buf, 0, ring->buf_size);
mlx4_en_update_rx_prod_db(ring);
- /* Initailize all descriptors */
+ /* Initialize all descriptors */
for (i = 0; i < ring->size; i++)
mlx4_en_init_rx_desc(priv, ring, i);
@@ -389,6 +407,9 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv, ring);
+#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
@@ -401,12 +422,10 @@ void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
}
-/* Unmap a completed descriptor and free unused pages */
static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
+ struct mlx4_en_rx_alloc *frags,
struct sk_buff *skb,
- struct mlx4_en_rx_alloc *page_alloc,
int length)
{
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
@@ -414,26 +433,24 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
int nr;
dma_addr_t dma;
- /* Collect used fragments while replacing them in the HW descirptors */
+ /* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0; nr < priv->num_frags; nr++) {
frag_info = &priv->frag_info[nr];
if (length <= frag_info->frag_prefix_size)
break;
+ if (!frags[nr].page)
+ goto fail;
- /* Save page reference in skb */
- __skb_frag_set_page(&skb_frags_rx[nr], skb_frags[nr].page);
- skb_frag_size_set(&skb_frags_rx[nr], skb_frags[nr].size);
- skb_frags_rx[nr].page_offset = skb_frags[nr].offset;
- skb->truesize += frag_info->frag_stride;
dma = be64_to_cpu(rx_desc->data[nr].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, frag_info->frag_size,
+ DMA_FROM_DEVICE);
- /* Allocate a replacement page */
- if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
- goto fail;
-
- /* Unmap buffer */
- dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
- PCI_DMA_FROMDEVICE);
+ /* Save page reference in skb */
+ get_page(frags[nr].page);
+ __skb_frag_set_page(&skb_frags_rx[nr], frags[nr].page);
+ skb_frag_size_set(&skb_frags_rx[nr], frag_info->frag_size);
+ skb_frags_rx[nr].page_offset = frags[nr].offset;
+ skb->truesize += frag_info->frag_stride;
}
/* Adjust size of last fragment to match actual length */
if (nr > 0)
@@ -442,8 +459,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
return nr;
fail:
- /* Drop all accumulated fragments (which have already been replaced in
- * the descriptor) of this packet; remaining fragments are reused... */
while (nr > 0) {
nr--;
__skb_frag_unref(&skb_frags_rx[nr]);
@@ -454,8 +469,7 @@ fail:
static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
struct mlx4_en_rx_desc *rx_desc,
- struct page_frag *skb_frags,
- struct mlx4_en_rx_alloc *page_alloc,
+ struct mlx4_en_rx_alloc *frags,
unsigned int length)
{
struct sk_buff *skb;
@@ -473,23 +487,20 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* Get pointer to first fragment so we could copy the headers into the
* (linear part of the) skb */
- va = page_address(skb_frags[0].page) + skb_frags[0].offset;
+ va = page_address(frags[0].page) + frags[0].offset;
if (length <= SMALL_PACKET_SIZE) {
/* We are copying all relevant data to the skb - temporarily
- * synch buffers for the copy */
+ * sync buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
dma_sync_single_for_cpu(priv->ddev, dma, length,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_for_device(priv->ddev, dma, length,
- DMA_FROM_DEVICE);
skb->tail += length;
} else {
-
/* Move relevant fragments to skb */
- used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
- skb, page_alloc, length);
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, frags,
+ skb, length);
if (unlikely(!used_frags)) {
kfree_skb(skb);
return NULL;
@@ -526,12 +537,25 @@ out_loopback:
dev_kfree_skb_any(skb);
}
+static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ int index = ring->prod & ring->size_mask;
+
+ while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+ if (mlx4_en_prepare_rx_desc(priv, ring, index))
+ break;
+ ring->prod++;
+ index = ring->prod & ring->size_mask;
+ }
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cqe *cqe;
struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
- struct page_frag *skb_frags;
+ struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
struct sk_buff *skb;
int index;
@@ -540,6 +564,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int ip_summed;
struct ethhdr *ethh;
+ dma_addr_t dma;
u64 s_mac;
if (!priv->port_up)
@@ -555,7 +580,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cq->mcq.cons_index & cq->size)) {
- skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ frags = ring->rx_info + (index << priv->log_rx_info);
rx_desc = ring->buf + (index << ring->log_stride);
/*
@@ -579,8 +604,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Get pointer to first fragment since we haven't skb yet and
* cast it to ethhdr struct */
- ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
- skb_frags[0].offset);
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
+ DMA_FROM_DEVICE);
+ ethh = (struct ethhdr *)(page_address(frags[0].page) +
+ frags[0].offset);
s_mac = mlx4_en_mac_to_u64(ethh->h_source);
/* If source MAC is equal to our own MAC and not performing
@@ -612,10 +640,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!gro_skb)
goto next;
- nr = mlx4_en_complete_rx_desc(
- priv, rx_desc,
- skb_frags, gro_skb,
- ring->page_alloc, length);
+ nr = mlx4_en_complete_rx_desc(priv,
+ rx_desc, frags, gro_skb,
+ length);
if (!nr)
goto next;
@@ -651,8 +678,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring->csum_none++;
}
- skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
- ring->page_alloc, length);
+ skb = mlx4_en_rx_skb(priv, rx_desc, frags, length);
if (!skb) {
priv->stats.rx_dropped++;
goto next;
@@ -678,6 +704,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
netif_receive_skb(skb);
next:
+ for (nr = 0; nr < priv->num_frags; nr++)
+ mlx4_en_free_frag(priv, frags, nr);
+
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = &cq->buf[index];
@@ -693,7 +722,7 @@ out:
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
ring->cons = cq->mcq.cons_index;
- ring->prod += polled; /* Polled descriptors were realocated in place */
+ mlx4_en_refill_rx_buffers(priv, ring);
mlx4_en_update_rx_prod_db(ring);
return polled;
}
@@ -782,7 +811,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->num_frags = i;
priv->rx_skb_size = eff_mtu;
- priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
"num_frags:%d):\n", eff_mtu, priv->num_frags);
@@ -844,6 +873,36 @@ out:
return err;
}
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
+{
+ int err;
+ u32 qpn;
+
+ err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn);
+ if (err) {
+ en_err(priv, "Failed reserving drop qpn\n");
+ return err;
+ }
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
+ if (err) {
+ en_err(priv, "Failed allocating drop qp\n");
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
+{
+ u32 qpn;
+
+ qpn = priv->drop_qp.qpn;
+ mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
+ mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
+}
+
/* Allocate rx qp's and configure them according to rss map */
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
{
@@ -954,8 +1013,3 @@ void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
}
mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
}
-
-
-
-
-
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index bce98d9c0039..99a04648fab0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -39,6 +39,7 @@
#include <linux/dma-mapping.h>
#include <linux/mlx4/cmd.h>
+#include <linux/cpu_rmap.h>
#include "mlx4.h"
#include "fw.h"
@@ -82,6 +83,15 @@ enum {
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
+static u64 get_async_ev_mask(struct mlx4_dev *dev)
+{
+ u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
+ async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
+
+ return async_ev_mask;
+}
+
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
__raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
@@ -473,6 +483,11 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
+ case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT:
+ mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
+ (unsigned long) eqe);
+ break;
+
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
@@ -956,7 +971,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
priv->eq_table.have_irq = 1;
}
- err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
@@ -996,7 +1011,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
- mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
+ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
mlx4_free_irqs(dev);
@@ -1040,7 +1055,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
mlx4_cmd_use_polling(dev);
/* Map the new eq to handle all asyncronous events */
- err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[i].eqn);
if (err) {
mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
@@ -1054,13 +1069,14 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
}
/* Return to default */
- mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
+ mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
+int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
+ int *vector)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1074,6 +1090,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
snprintf(priv->eq_table.irq_names +
vec * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, "%s", name);
+#ifdef CONFIG_RFS_ACCEL
+ if (rmap) {
+ err = irq_cpu_rmap_add(rmap,
+ priv->eq_table.eq[vec].irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
err = request_irq(priv->eq_table.eq[vec].irq,
mlx4_msi_x_interrupt, 0,
&priv->eq_table.irq_names[vec<<5],
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 9c83bb8151ea..c69648487321 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -109,6 +109,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
[41] = "Unicast VEP steering support",
[42] = "Multicast VEP steering support",
[48] = "Counters support",
+ [59] = "Port management change event support",
};
int i;
@@ -123,7 +124,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
static const char * const fname[] = {
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
- [2] = "RSS XOR Hash Function support"
+ [2] = "RSS XOR Hash Function support",
+ [3] = "Device manage flow steering support"
};
int i;
@@ -173,6 +175,7 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_FMR_OFFSET 0x8
#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
@@ -182,25 +185,44 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+#define QUERY_FUNC_CAP_FMR_FLAG 0x80
+#define QUERY_FUNC_CAP_FLAG_RDMA 0x40
+#define QUERY_FUNC_CAP_FLAG_ETH 0x80
+
+/* when opcode modifier = 1 */
#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_RDMA_PROPS_OFFSET 0x8
#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC 0x40
+#define QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN 0x80
+
+#define QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID 0x80
+
if (vhcr->op_modifier == 1) {
field = vhcr->in_modifier;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
- field = 0; /* ensure fvl bit is not set */
+ field = 0;
+ /* ensure force vlan and force mac bits are not set */
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ /* ensure that phy_wqe_gid bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+
} else if (vhcr->op_modifier == 0) {
- field = 1 << 7; /* enable only ethernet interface */
+ /* enable rdma and ethernet interfaces */
+ field = (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA);
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
field = dev->caps.num_ports;
MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
- size = 0; /* no PF behavious is set for now */
+ size = 0; /* no PF behaviour is set for now */
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ field = 0; /* protected FMR support not available as yet */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FMR_OFFSET);
+
size = dev->caps.num_qps;
MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
@@ -253,11 +275,12 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
outbox = mailbox->buf;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
- if (!(field & (1 << 7))) {
- mlx4_err(dev, "The host doesn't support eth interface\n");
+ if (!(field & (QUERY_FUNC_CAP_FLAG_ETH | QUERY_FUNC_CAP_FLAG_RDMA))) {
+ mlx4_err(dev, "The host supports neither eth nor rdma interfaces\n");
err = -EPROTONOSUPPORT;
goto out;
}
+ func_cap->flags = field;
MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
func_cap->num_ports = field;
@@ -296,17 +319,27 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
if (err)
goto out;
- MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
- if (field & (1 << 7)) {
- mlx4_err(dev, "VLAN is enforced on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
- }
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_VLAN) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
- if (field & (1 << 6)) {
- mlx4_err(dev, "Force mac is enabled on this port\n");
- err = -EPROTONOSUPPORT;
- goto out;
+ if (field & QUERY_FUNC_CAP_ETH_PROPS_FORCE_MAC) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+ } else if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) {
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_RDMA_PROPS_OFFSET);
+ if (field & QUERY_FUNC_CAP_RDMA_PROPS_FORCE_PHY_WQE_GID) {
+ mlx4_err(dev, "phy_wqe_gid is "
+ "enforced on this ib port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
}
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
@@ -391,6 +424,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66
#define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67
#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
+#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76
+#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -474,6 +509,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->num_ports = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET);
dev_cap->max_msg_sz = 1 << (field & 0x1f);
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET);
+ if (field & 0x80)
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN;
+ dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f;
+ MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET);
+ dev_cap->fs_max_num_qp_per_entry = field;
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
@@ -698,14 +739,12 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
{
u64 def_mac;
u8 port_type;
+ u16 short_field;
int err;
-#define MLX4_PORT_SUPPORT_IB (1 << 0)
-#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
-#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
-#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
- ~MLX4_PORT_SUGGEST_TYPE & \
- ~MLX4_PORT_DEFAULT_SENSE)
+#define MLX4_VF_PORT_NO_LINK_SENSE_MASK 0xE0
+#define QUERY_PORT_CUR_MAX_PKEY_OFFSET 0x0c
+#define QUERY_PORT_CUR_MAX_GID_OFFSET 0x0e
err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
@@ -721,20 +760,58 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_GET(port_type, outbox->buf,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
- /* Allow only Eth port, no link sensing allowed */
- port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
-
- /* check eth is enabled for this port */
- if (!(port_type & 2))
- mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+ /* No link sensing allowed */
+ port_type &= MLX4_VF_PORT_NO_LINK_SENSE_MASK;
+ /* set port type to currently operating port type */
+ port_type |= (dev->caps.port_type[vhcr->in_modifier] & 0x3);
MLX4_PUT(outbox->buf, port_type,
QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ short_field = 1; /* slave max gids */
+ MLX4_PUT(outbox->buf, short_field,
+ QUERY_PORT_CUR_MAX_GID_OFFSET);
+
+ short_field = dev->caps.pkey_table_len[vhcr->in_modifier];
+ MLX4_PUT(outbox->buf, short_field,
+ QUERY_PORT_CUR_MAX_PKEY_OFFSET);
}
return err;
}
+int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
+ int *gid_tbl_len, int *pkey_tbl_len)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u16 field;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_GID_OFFSET);
+ *gid_tbl_len = field;
+
+ MLX4_GET(field, outbox, QUERY_PORT_CUR_MAX_PKEY_OFFSET);
+ *pkey_tbl_len = field;
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_get_slave_pkey_gid_tbl_len);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -881,11 +958,12 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
if (mlx4_is_slave(dev))
goto out;
- MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
- dev->caps.function = lg;
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
@@ -966,9 +1044,12 @@ int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
if (err)
return err;
- /* for slaves, zero out everything except FW version */
+ /* for slaves, set pci PPF ID to invalid and zero out everything
+ * else except FW version */
outbuf[0] = outbuf[1] = 0;
memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
+ outbuf[QUERY_FW_PPF_ID] = MLX4_INVALID_SLAVE_ID;
+
return 0;
}
@@ -1061,6 +1142,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16)
#define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18)
#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b)
+#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6
+#define INIT_HCA_FS_PARAM_OFFSET 0x1d0
+#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00)
+#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12)
+#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b)
+#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21)
+#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22)
+#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25)
+#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26)
#define INIT_HCA_TPT_OFFSET 0x0f0
#define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00)
#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b)
@@ -1119,14 +1209,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
-
- MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
- MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
- MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |=
+ cpu_to_be32(1 <<
+ INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN);
+
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ /* Enable Ethernet flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_ETH_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET);
+ /* Enable IPoIB flow steering
+ * with udp unicast and tcp unicast
+ */
+ MLX4_PUT(inbox, param->fs_hash_enable_bits,
+ INIT_HCA_FS_IB_BITS_OFFSET);
+ MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR,
+ INIT_HCA_FS_IB_NUM_ADDRS_OFFSET);
+ } else {
+ MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_entry_sz,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_hash_sz,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_PUT(inbox, param->log_mc_table_sz,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0)
+ MLX4_PUT(inbox, (u8) (1 << 3),
+ INIT_HCA_UC_STEERING_OFFSET);
+ }
/* TPT attributes */
@@ -1188,15 +1308,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
- /* multicast attributes */
+ /* steering attributes */
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
- MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ } else {
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ }
/* TPT attributes */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 64c0399e4b78..83fcbbf1b169 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -78,6 +78,8 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
+ int fs_log_max_ucast_qp_range_size;
+ int fs_max_num_qp_per_entry;
u64 flags;
u64 flags2;
int reserved_uars;
@@ -165,6 +167,7 @@ struct mlx4_init_hca_param {
u8 log_mpt_sz;
u8 log_uar_sz;
u8 uar_page_sz; /* log pg sz in 4k chunks */
+ u8 fs_hash_enable_bits;
};
struct mlx4_init_ib_param {
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index a9ade1c3cad5..88b7b3e75ab1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -413,6 +413,8 @@ err:
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
+ kfree(table->icm);
+
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index b10c07a1dc1a..19e4efc0b342 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -81,13 +81,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
-void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
-int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- int start, int end);
-void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- int start, int end);
static inline void mlx4_icm_first(struct mlx4_icm *icm,
struct mlx4_icm_iter *iter)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index b4e9f6f5cc04..116895ac8b35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -115,7 +115,8 @@ void mlx4_unregister_interface(struct mlx4_interface *intf)
}
EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port)
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
+ unsigned long param)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_device_context *dev_ctx;
@@ -125,7 +126,7 @@ void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int por
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
- dev_ctx->intf->event(dev, dev_ctx->context, type, port);
+ dev_ctx->intf->event(dev, dev_ctx->context, type, param);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index a0313de122de..e8f8ebb4ae65 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,6 +41,7 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
+#include <linux/netdevice.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size,
MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
" of qp per mcg, for example:"
" 10 gives 248.range: 9<="
- " log_num_mgm_entry_size <= 12");
+ " log_num_mgm_entry_size <= 12."
+ " Not in use with device managed"
+ " flow steering");
#define MLX4_VF (1 << 0)
@@ -215,6 +218,10 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
+ dev->phys_caps.gid_phys_table_len[i] = dev_cap->max_gids[i];
+ dev->phys_caps.pkey_phys_table_len[i] = dev_cap->max_pkeys[i];
+ /* set gid and pkey table operating lengths by default
+ * to non-sriov values */
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
@@ -243,7 +250,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -274,6 +280,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
+ dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
+ dev->caps.fs_log_max_ucast_qp_range_size =
+ dev_cap->fs_log_max_ucast_qp_range_size;
+ } else {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
+ } else {
+ dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
+ mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags "
+ "set to use B0 steering. Falling back to A0 steering mode.\n");
+ }
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
+ }
+ mlx4_dbg(dev, "Steering mode is: %s\n",
+ mlx4_steering_mode_str(dev->caps.steering_mode));
+
/* Sense port always allowed on supported devices for ConnectX1 and 2 */
if (dev->pdev->device != 0x1003)
dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
@@ -288,29 +316,19 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
/* if only ETH is supported - assign ETH */
if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- /* if only IB is supported,
- * assign IB only if SRIOV is off*/
+ /* if only IB is supported, assign IB */
else if (dev->caps.supported_type[i] ==
- MLX4_PORT_TYPE_IB) {
- if (dev->flags & MLX4_FLAG_SRIOV)
- dev->caps.port_type[i] =
- MLX4_PORT_TYPE_NONE;
- else
- dev->caps.port_type[i] =
- MLX4_PORT_TYPE_IB;
- /* if IB and ETH are supported,
- * first of all check if SRIOV is on */
- } else if (dev->flags & MLX4_FLAG_SRIOV)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ MLX4_PORT_TYPE_IB)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
else {
- /* In non-SRIOV mode, we set the port type
- * according to user selection of port type,
- * if usere selected none, take the FW hint */
- if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ /* if IB and ETH are supported, we set the port
+ * type according to user selection of port type;
+ * if user selected none, take the FW hint */
+ if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
else
- dev->caps.port_type[i] = port_type_array[i-1];
+ dev->caps.port_type[i] = port_type_array[i - 1];
}
}
/*
@@ -391,6 +409,23 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
return ret;
}
+int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
+{
+ u32 qk = MLX4_RESERVED_QKEY_BASE;
+ if (qpn >= dev->caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
+ qpn < dev->caps.sqp_start)
+ return -EINVAL;
+
+ if (qpn >= dev->caps.base_tunnel_sqpn)
+ /* tunnel qp */
+ qk += qpn - dev->caps.base_tunnel_sqpn;
+ else
+ qk += qpn - dev->caps.sqp_start;
+ *qkey = qk;
+ return 0;
+}
+EXPORT_SYMBOL(mlx4_get_parav_qkey);
+
int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -491,8 +526,13 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
return -ENODEV;
}
- for (i = 1; i <= dev->caps.num_ports; ++i)
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_mask[i] = dev->caps.port_type[i];
+ if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
+ &dev->caps.gid_table_len[i],
+ &dev->caps.pkey_table_len[i]))
+ return -ENODEV;
+ }
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
@@ -529,7 +569,7 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
dev->caps.port_type[port] = port_types[port - 1];
- err = mlx4_SET_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port, -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, "
"aborting\n", port);
@@ -715,7 +755,7 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_unregister_device(mdev);
for (port = 1; port <= mdev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(mdev, port);
- err = mlx4_SET_PORT(mdev, port);
+ err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
mlx4_err(mdev, "Failed to set port %d, "
"aborting\n", port);
@@ -967,9 +1007,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
}
/*
- * It's not strictly required, but for simplicity just map the
- * whole multicast group table now. The table isn't very big
- * and it's a lot easier than trying to track ref counts.
+ * For flow steering device managed mode it is required to use
+ * mlx4_init_icm_table. For B0 steering mode it's not strictly
+ * required, but for simplicity just map the whole multicast
+ * group table now. The table isn't very big and it's a lot
+ * easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base,
@@ -1166,6 +1208,17 @@ err:
return -EIO;
}
+static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
+{
+ int i;
+
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ dev->caps.gid_table_len[i] = 1;
+ dev->caps.pkey_table_len[i] =
+ dev->phys_caps.pkey_phys_table_len[i] - 1;
+ }
+}
+
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1205,7 +1258,29 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
goto err_stop_fw;
}
+ if (mlx4_is_master(dev))
+ mlx4_parav_master_pf_caps(dev);
+
+ priv->fs_hash_mode = MLX4_FS_L2_HASH;
+
+ switch (priv->fs_hash_mode) {
+ case MLX4_FS_L2_HASH:
+ init_hca.fs_hash_enable_bits = 0;
+ break;
+
+ case MLX4_FS_L2_L3_L4_HASH:
+ /* Enable flow steering with
+ * udp unicast and tcp unicast
+ */
+ init_hca.fs_hash_enable_bits =
+ MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN;
+ break;
+ }
+
profile = default_profile;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ profile.num_mcg = MLX4_FS_NUM_MCG;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
&init_hca);
@@ -1477,12 +1552,24 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
"with caps = 0\n", port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+ /* initialize per-slave default ib port capabilities */
+ if (mlx4_is_master(dev)) {
+ int i;
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == mlx4_master_func_num(dev))
+ continue;
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
+ ib_port_default_caps;
+ }
+ }
+
if (mlx4_is_mfunc(dev))
dev->caps.port_ib_mtu[port] = IB_MTU_2048;
else
dev->caps.port_ib_mtu[port] = IB_MTU_4096;
- err = mlx4_SET_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
+ dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
@@ -1539,8 +1626,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
- + MSIX_LEGACY_SZ, MAX_MSIX);
+ min_t(int, netif_get_num_default_rss_queues() + 1,
+ MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f4a8f98e402a..4ec3835e1bc2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -54,7 +54,12 @@ struct mlx4_mgm {
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
- return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
+ else
+ return min((1 << mlx4_log_num_mgm_entry_size),
+ MLX4_MAX_MGM_ENTRY_SIZE);
}
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
@@ -62,6 +67,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
}
+static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ u32 size,
+ u64 *reg_id)
+{
+ u64 imm;
+ int err = 0;
+
+ err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ *reg_id = imm;
+
+ return err;
+}
+
+static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
+{
+ int err = 0;
+
+ err = mlx4_cmd(dev, regid, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ return err;
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
@@ -614,6 +648,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
return err;
}
+struct mlx4_net_trans_rule_hw_ctrl {
+ __be32 ctrl;
+ __be32 vf_vep_port;
+ __be32 qpn;
+ __be32 reserved;
+};
+
+static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
+ struct mlx4_net_trans_rule_hw_ctrl *hw)
+{
+ static const u8 __promisc_mode[] = {
+ [MLX4_FS_PROMISC_NONE] = 0x0,
+ [MLX4_FS_PROMISC_UPLINK] = 0x1,
+ [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
+ [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
+ };
+
+ u32 dw = 0;
+
+ dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+ dw |= ctrl->exclusive ? (1 << 2) : 0;
+ dw |= ctrl->allow_loopback ? (1 << 3) : 0;
+ dw |= __promisc_mode[ctrl->promisc_mode] << 8;
+ dw |= ctrl->priority << 16;
+
+ hw->ctrl = cpu_to_be32(dw);
+ hw->vf_vep_port = cpu_to_be32(ctrl->port);
+ hw->qpn = cpu_to_be32(ctrl->qpn);
+}
+
+struct mlx4_net_trans_rule_hw_ib {
+ u8 size;
+ u8 rsvd1;
+ __be16 id;
+ u32 rsvd2;
+ __be32 qpn;
+ __be32 qpn_mask;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+} __packed;
+
+struct mlx4_net_trans_rule_hw_eth {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ u8 rsvd1[6];
+ u8 dst_mac[6];
+ u16 rsvd2;
+ u8 dst_mac_msk[6];
+ u16 rsvd3;
+ u8 src_mac[6];
+ u16 rsvd4;
+ u8 src_mac_msk[6];
+ u8 rsvd5;
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_tcp_udp {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be16 rsvd1[3];
+ __be16 dst_port;
+ __be16 rsvd2;
+ __be16 dst_port_msk;
+ __be16 rsvd3;
+ __be16 src_port;
+ __be16 rsvd4;
+ __be16 src_port_msk;
+} __packed;
+
+struct mlx4_net_trans_rule_hw_ipv4 {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ __be32 rsvd1;
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+} __packed;
+
+struct _rule_hw {
+ union {
+ struct {
+ u8 size;
+ u8 rsvd;
+ __be16 id;
+ };
+ struct mlx4_net_trans_rule_hw_eth eth;
+ struct mlx4_net_trans_rule_hw_ib ib;
+ struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+ struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+ };
+};
+
+static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
+ struct _rule_hw *rule_hw)
+{
+ static const u16 __sw_id_hw[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
+ [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
+ [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
+ [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
+ };
+
+ static const size_t __rule_hw_sz[] = {
+ [MLX4_NET_TRANS_RULE_ID_ETH] =
+ sizeof(struct mlx4_net_trans_rule_hw_eth),
+ [MLX4_NET_TRANS_RULE_ID_IB] =
+ sizeof(struct mlx4_net_trans_rule_hw_ib),
+ [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+ [MLX4_NET_TRANS_RULE_ID_IPV4] =
+ sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+ [MLX4_NET_TRANS_RULE_ID_TCP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+ [MLX4_NET_TRANS_RULE_ID_UDP] =
+ sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ };
+ if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
+ mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+ return -EINVAL;
+ }
+ memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+ rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
+ rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+
+ switch (spec->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
+ ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
+ memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
+ ETH_ALEN);
+ if (spec->eth.ether_type_enable) {
+ rule_hw->eth.ether_type_enable = 1;
+ rule_hw->eth.ether_type = spec->eth.ether_type;
+ }
+ rule_hw->eth.vlan_id = spec->eth.vlan_id;
+ rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ rule_hw->ib.qpn = spec->ib.r_qpn;
+ rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
+ memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
+ memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ return -EOPNOTSUPP;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
+ rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
+ rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
+ rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
+ rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
+ rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
+ rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return __rule_hw_sz[spec->id];
+}
+
+static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
+ struct mlx4_net_trans_rule *rule)
+{
+#define BUF_SIZE 256
+ struct mlx4_spec_list *cur;
+ char buf[BUF_SIZE];
+ int len = 0;
+
+ mlx4_err(dev, "%s", str);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "port = %d prio = 0x%x qp = 0x%x ",
+ rule->port, rule->priority, rule->qpn);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ switch (cur->id) {
+ case MLX4_NET_TRANS_RULE_ID_ETH:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dmac = %pM ", &cur->eth.dst_mac);
+ if (cur->eth.ether_type)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "ethertype = 0x%x ",
+ be16_to_cpu(cur->eth.ether_type));
+ if (cur->eth.vlan_id)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "vlan-id = %d ",
+ be16_to_cpu(cur->eth.vlan_id));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV4:
+ if (cur->ipv4.src_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-ip = %pI4 ",
+ &cur->ipv4.src_ip);
+ if (cur->ipv4.dst_ip)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-ip = %pI4 ",
+ &cur->ipv4.dst_ip);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_TCP:
+ case MLX4_NET_TRANS_RULE_ID_UDP:
+ if (cur->tcp_udp.src_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "src-port = %d ",
+ be16_to_cpu(cur->tcp_udp.src_port));
+ if (cur->tcp_udp.dst_port)
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-port = %d ",
+ be16_to_cpu(cur->tcp_udp.dst_port));
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IB:
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid = %pI6\n", cur->ib.dst_gid);
+ len += snprintf(buf + len, BUF_SIZE - len,
+ "dst-gid-mask = %pI6\n",
+ cur->ib.dst_gid_msk);
+ break;
+
+ case MLX4_NET_TRANS_RULE_ID_IPV6:
+ break;
+
+ default:
+ break;
+ }
+ }
+ len += snprintf(buf + len, BUF_SIZE - len, "\n");
+ mlx4_err(dev, "%s", buf);
+
+ if (len >= BUF_SIZE)
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+}
+
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_spec_list *cur;
+ u32 size = 0;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
+ trans_rule_ctrl_to_hw(rule, mailbox->buf);
+
+ size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+
+ list_for_each_entry(cur, &rule->list, list) {
+ ret = parse_trans_rule(dev, cur, mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ size += ret;
+ }
+
+ ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
+ if (ret == -ENOMEM)
+ mlx4_err_rule(dev,
+ "mcg table is full. Fail to register network rule.\n",
+ rule);
+ else if (ret)
+ mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_attach);
+
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
+{
+ int err;
+
+ err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
+ if (err)
+ mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
+ reg_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_detach);
+
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer)
@@ -866,49 +1205,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol prot)
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol prot, u64 *reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
+
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+ return mlx4_qp_attach_common(dev, qp, gid,
+ block_mcast_loopback, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.allow_loopback = ~block_mcast_loopback;
+ rule.port = port;
+ rule.qpn = qp->qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ switch (prot) {
+ case MLX4_PROT_ETH:
+ spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
+ memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ break;
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 1,
- block_mcast_loopback, prot);
+ case MLX4_PROT_IB_IPV6:
+ spec.id = MLX4_NET_TRANS_RULE_ID_IB;
+ memcpy(spec.ib.dst_gid, gid, 16);
+ memset(&spec.ib.dst_gid_msk, 0xff, 16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ list_add_tail(&spec.list, &rule.list);
- return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
- prot, MLX4_MC_STEER);
+ return mlx4_flow_attach(dev, &rule, reg_id);
+ }
+
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol prot)
+ enum mlx4_protocol prot, u64 reg_id)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ if (prot == MLX4_PROT_ETH)
+ return 0;
- if (prot == MLX4_PROT_ETH)
- gid[7] |= (MLX4_MC_STEER << 1);
+ case MLX4_STEERING_MODE_B0:
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_MC_STEER << 1);
- if (mlx4_is_mfunc(dev))
- return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot,
+ MLX4_MC_STEER);
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return mlx4_flow_detach(dev, reg_id);
- return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
+ default:
+ return -EINVAL;
+ }
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
+ u32 qpn, enum mlx4_net_trans_promisc_mode mode)
+{
+ struct mlx4_net_trans_rule rule;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p != 0)
+ return -1;
+
+ rule.promisc_mode = mode;
+ rule.port = port;
+ rule.qpn = qpn;
+ INIT_LIST_HEAD(&rule.list);
+ mlx4_err(dev, "going promisc on %x\n", port);
+
+ return mlx4_flow_attach(dev, &rule, regid_p);
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
+
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode)
+{
+ int ret;
+ u64 *regid_p;
+
+ switch (mode) {
+ case MLX4_FS_PROMISC_UPLINK:
+ case MLX4_FS_PROMISC_FUNCTION_PORT:
+ regid_p = &dev->regid_promisc_array[port];
+ break;
+ case MLX4_FS_PROMISC_ALL_MULTI:
+ regid_p = &dev->regid_allmulti_array[port];
+ break;
+ default:
+ return -1;
+ }
+
+ if (*regid_p == 0)
+ return -1;
+
+ ret = mlx4_flow_detach(dev, *regid_p);
+ if (ret == 0)
+ *regid_p = 0;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
+
int mlx4_unicast_attach(struct mlx4_dev *dev,
struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -924,10 +1373,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
u8 gid[16], enum mlx4_protocol prot)
{
- if (prot == MLX4_PROT_ETH &&
- !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (prot == MLX4_PROT_ETH)
gid[7] |= (MLX4_UC_STEER << 1);
@@ -968,9 +1413,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
@@ -980,9 +1422,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
@@ -992,9 +1431,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
@@ -1004,9 +1440,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- return 0;
-
if (mlx4_is_mfunc(dev))
return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
@@ -1019,6 +1452,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ /* No need for mcg_table when fw managed the mcg table*/
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return 0;
err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
dev->caps.num_amgms - 1, 0, 0);
if (err)
@@ -1031,5 +1468,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
{
- mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e5d20220762c..59ebc0339638 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -39,6 +39,7 @@
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
#include <linux/timer.h>
#include <linux/semaphore.h>
#include <linux/workqueue.h>
@@ -53,6 +54,17 @@
#define DRV_VERSION "1.1"
#define DRV_RELDATE "Dec, 2011"
+#define MLX4_FS_UDP_UC_EN (1 << 1)
+#define MLX4_FS_TCP_UC_EN (1 << 2)
+#define MLX4_FS_NUM_OF_L2_ADDR 8
+#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7
+#define MLX4_FS_NUM_MCG (1 << 17)
+
+enum {
+ MLX4_FS_L2_HASH = 0,
+ MLX4_FS_L2_L3_L4_HASH,
+};
+
#define MLX4_NUM_UP 8
#define MLX4_NUM_TC 8
#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
@@ -137,6 +149,7 @@ enum mlx4_resource {
RES_VLAN,
RES_EQ,
RES_COUNTER,
+ RES_FS_RULE,
MLX4_NUM_OF_RESOURCE_TYPE
};
@@ -338,66 +351,6 @@ struct mlx4_srq_context {
__be64 db_rec_addr;
};
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- struct {
- #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
- u32 reserved;
- u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
- } __packed comm_channel_arm;
- struct {
- u8 port;
- u8 reserved[3];
- __be64 mac;
- } __packed mac_update;
- struct {
- u8 port;
- } __packed sw_event;
- struct {
- __be32 slave_id;
- } __packed flr_event;
- struct {
- __be16 current_temperature;
- __be16 warning_threshold;
- } __packed warming;
- } event;
- u8 slave_id;
- u8 reserved3[2];
- u8 owner;
-} __packed;
-
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -509,7 +462,7 @@ struct slave_list {
struct mlx4_resource_tracker {
spinlock_t lock;
/* tree for each resources */
- struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
/* num_of_slave's lists, one per slave */
struct slave_list *slave_list;
};
@@ -703,6 +656,7 @@ struct mlx4_set_port_rqp_calc_context {
struct mlx4_mac_entry {
u64 mac;
+ u64 reg_id;
};
struct mlx4_port_info {
@@ -776,6 +730,7 @@ struct mlx4_priv {
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
int reserved_mtts;
+ int fs_hash_mode;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -887,7 +842,8 @@ void mlx4_catas_init(void);
int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
-void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, int port);
+void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
+ unsigned long param);
struct mlx4_dev_cap;
struct mlx4_init_hca_param;
@@ -1028,11 +984,11 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
/* resource tracker functions*/
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource resource_type,
- int resource_id, int *slave);
+ u64 resource_id, int *slave);
void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
int mlx4_init_resource_tracker(struct mlx4_dev *dev);
@@ -1071,6 +1027,8 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+int mlx4_get_slave_pkey_gid_tbl_len(struct mlx4_dev *dev, u8 port,
+ int *gid_tbl_len, int *pkey_tbl_len);
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
@@ -1117,6 +1075,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 225c20d47900..5f1ab105debc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -43,6 +43,7 @@
#ifdef CONFIG_MLX4_EN_DCB
#include <linux/dcbnl.h>
#endif
+#include <linux/cpu_rmap.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/qp.h>
@@ -75,6 +76,10 @@
#define STAMP_SHIFT 31
#define STAMP_VAL 0x7fffffff
#define STATS_DELAY (HZ / 4)
+#define MAX_NUM_OF_FS_RULES 256
+
+#define MLX4_EN_FILTER_HASH_SHIFT 4
+#define MLX4_EN_FILTER_EXPIRY_QUOTA 60
/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
#define MAX_DESC_SIZE 512
@@ -106,7 +111,7 @@ enum {
#define MLX4_EN_MAX_TX_SIZE 8192
#define MLX4_EN_MAX_RX_SIZE 8192
-/* Minimum ring size for our page-allocation sceme to work */
+/* Minimum ring size for our page-allocation scheme to work */
#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
@@ -227,6 +232,7 @@ struct mlx4_en_tx_desc {
struct mlx4_en_rx_alloc {
struct page *page;
+ dma_addr_t dma;
u16 offset;
};
@@ -404,6 +410,19 @@ struct mlx4_en_perf_stats {
#define NUM_PERF_COUNTERS 6
};
+enum mlx4_en_mclist_act {
+ MCLIST_NONE,
+ MCLIST_REM,
+ MCLIST_ADD,
+};
+
+struct mlx4_en_mc_list {
+ struct list_head list;
+ enum mlx4_en_mclist_act action;
+ u8 addr[ETH_ALEN];
+ u64 reg_id;
+};
+
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
@@ -422,6 +441,11 @@ struct mlx4_en_frag_info {
#endif
+struct ethtool_flow_id {
+ struct ethtool_rx_flow_spec flow_spec;
+ u64 id;
+};
+
struct mlx4_en_priv {
struct mlx4_en_dev *mdev;
struct mlx4_en_port_profile *prof;
@@ -431,6 +455,7 @@ struct mlx4_en_priv {
struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
+ struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
unsigned long last_moder_packets[MAX_RX_RINGS];
unsigned long last_moder_tx_packets;
@@ -480,6 +505,7 @@ struct mlx4_en_priv {
struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
struct mlx4_en_cq *tx_cq;
struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct mlx4_qp drop_qp;
struct work_struct mcast_task;
struct work_struct mac_task;
struct work_struct watchdog_task;
@@ -489,8 +515,9 @@ struct mlx4_en_priv {
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
u64 stats_bitmap;
- char *mc_addrs;
- int mc_addrs_cnt;
+ struct list_head mc_list;
+ struct list_head curr_list;
+ u64 broadcast_id;
struct mlx4_en_stat_out_mbox hw_stats;
int vids[128];
bool wol;
@@ -501,6 +528,13 @@ struct mlx4_en_priv {
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
#endif
+#ifdef CONFIG_RFS_ACCEL
+ spinlock_t filters_lock;
+ int last_filter_id;
+ struct list_head filters;
+ struct hlist_head filter_hash[1 << MLX4_EN_FILTER_HASH_SHIFT];
+#endif
+
};
enum mlx4_en_wol {
@@ -565,6 +599,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
void mlx4_en_calc_rx_buf(struct net_device *dev);
int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv);
+void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv);
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
void mlx4_en_rx_irq(struct mlx4_cq *mcq);
@@ -578,6 +614,11 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
#endif
+#ifdef CONFIG_RFS_ACCEL
+void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *rx_ring);
+#endif
+
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index a8fb52992c64..e36dd0f2fa73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -39,7 +39,6 @@
#include "mlx4.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0xffffffffffffULL
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
@@ -75,21 +74,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
+ u64 mac, int *qpn, u64 *reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
__be64 be_mac;
int err;
- qp.qpn = *qpn;
-
- mac &= 0xffffffffffffULL;
+ mac &= MLX4_MAC_MASK;
be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
- err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+
+ qp.qpn = *qpn;
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
+
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ struct mlx4_spec_list spec_eth = { {NULL} };
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
+
+ struct mlx4_net_trans_rule rule = {
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
+ .exclusive = 0,
+ .allow_loopback = 1,
+ .promisc_mode = MLX4_FS_PROMISC_NONE,
+ .priority = MLX4_DOMAIN_NIC,
+ };
+
+ rule.port = port;
+ rule.qpn = *qpn;
+ INIT_LIST_HEAD(&rule.list);
+
+ spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN);
+ memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
+ list_add_tail(&spec_eth.list, &rule.list);
+
+ err = mlx4_flow_attach(dev, &rule, reg_id);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
if (err)
mlx4_warn(dev, "Failed Attaching Unicast\n");
@@ -97,19 +129,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn)
+ u64 mac, int qpn, u64 reg_id)
{
- struct mlx4_qp qp;
- u8 gid[16] = {0};
- __be64 be_mac;
+ switch (dev->caps.steering_mode) {
+ case MLX4_STEERING_MODE_B0: {
+ struct mlx4_qp qp;
+ u8 gid[16] = {0};
+ __be64 be_mac;
- qp.qpn = qpn;
- mac &= 0xffffffffffffULL;
- be_mac = cpu_to_be64(mac << 16);
- memcpy(&gid[10], &be_mac, ETH_ALEN);
- gid[5] = port;
+ qp.qpn = qpn;
+ mac &= MLX4_MAC_MASK;
+ be_mac = cpu_to_be64(mac << 16);
+ memcpy(&gid[10], &be_mac, ETH_ALEN);
+ gid[5] = port;
- mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+ break;
+ }
+ case MLX4_STEERING_MODE_DEVICE_MANAGED: {
+ mlx4_flow_detach(dev, reg_id);
+ break;
+ }
+ default:
+ mlx4_err(dev, "Invalid steering mode.\n");
+ }
}
static int validate_index(struct mlx4_dev *dev,
@@ -144,6 +187,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
struct mlx4_mac_entry *entry;
int index = 0;
int err = 0;
+ u64 reg_id;
mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
(unsigned long long) mac);
@@ -155,7 +199,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
return err;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
*qpn = info->base_qpn + index;
return 0;
}
@@ -167,7 +211,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto qp_err;
}
- err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ err = mlx4_uc_steer_add(dev, port, mac, qpn, &reg_id);
if (err)
goto steer_err;
@@ -177,6 +221,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
goto alloc_err;
}
entry->mac = mac;
+ entry->reg_id = reg_id;
err = radix_tree_insert(&info->mac_tree, *qpn, entry);
if (err)
goto insert_err;
@@ -186,7 +231,7 @@ insert_err:
kfree(entry);
alloc_err:
- mlx4_uc_steer_release(dev, port, mac, *qpn);
+ mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id);
steer_err:
mlx4_qp_release_range(dev, *qpn, 1);
@@ -206,13 +251,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
(unsigned long long) mac);
mlx4_unregister_mac(dev, port, mac);
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
" qpn %d\n", port,
(unsigned long long) mac, qpn);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_qp_release_range(dev, qpn, 1);
radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
@@ -359,15 +405,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
int index = qpn - info->base_qpn;
int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac,
+ qpn, entry->reg_id);
mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
+ entry->reg_id = 0;
mlx4_register_mac(dev, port, new_mac);
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ err = mlx4_uc_steer_add(dev, port, entry->mac,
+ &qpn, &entry->reg_id);
return err;
}
@@ -726,14 +775,15 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
enum {
MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
+ MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
MLX4_CHANGE_PORT_VL_CAP = 21,
MLX4_CHANGE_PORT_MTU_CAP = 22,
};
-int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
{
struct mlx4_cmd_mailbox *mailbox;
- int err, vl_cap;
+ int err, vl_cap, pkey_tbl_flag = 0;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
return 0;
@@ -746,11 +796,17 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+ if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
+ pkey_tbl_flag = 1;
+ ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
+ }
+
/* IB VL CAP enum isn't used by the firmware, just numerical values */
for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
((__be32 *) mailbox->buf)[0] = cpu_to_be32(
(1 << MLX4_CHANGE_PORT_MTU_CAP) |
(1 << MLX4_CHANGE_PORT_VL_CAP) |
+ (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
(dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
(vl_cap << MLX4_SET_PORT_VL_CAP));
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
@@ -803,8 +859,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b83bc928d52a..9ee4725363d5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->mtt_base = profile[i].start;
break;
case MLX4_RES_MCG:
- dev->caps.num_mgms = profile[i].num >> 1;
- dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
init_hca->log_mc_entry_sz =
ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
- init_hca->log_mc_hash_sz = profile[i].log_num - 1;
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ dev->caps.num_mgms = profile[i].num;
+ } else {
+ init_hca->log_mc_hash_sz =
+ profile[i].log_num - 1;
+ dev->caps.num_mgms = profile[i].num >> 1;
+ dev->caps.num_amgms = profile[i].num >> 1;
+ }
break;
default:
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b45d0e7f6ab0..94ceddd17ab2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -41,13 +41,12 @@
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
+#include <linux/if_ether.h>
#include "mlx4.h"
#include "fw.h"
#define MLX4_MAC_VALID (1ull << 63)
-#define MLX4_MAC_MASK 0x7fffffffffffffffULL
-#define ETH_ALEN 6
struct mac_res {
struct list_head list;
@@ -57,7 +56,8 @@ struct mac_res {
struct res_common {
struct list_head list;
- u32 res_id;
+ struct rb_node node;
+ u64 res_id;
int owner;
int state;
int from_state;
@@ -189,6 +189,58 @@ struct res_xrcdn {
int port;
};
+enum res_fs_rule_states {
+ RES_FS_RULE_BUSY = RES_ANY_BUSY,
+ RES_FS_RULE_ALLOCATED,
+};
+
+struct res_fs_rule {
+ struct res_common com;
+};
+
+static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ struct res_common *res = container_of(node, struct res_common,
+ node);
+
+ if (res_id < res->res_id)
+ node = node->rb_left;
+ else if (res_id > res->res_id)
+ node = node->rb_right;
+ else
+ return res;
+ }
+ return NULL;
+}
+
+static int res_tracker_insert(struct rb_root *root, struct res_common *res)
+{
+ struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ struct res_common *this = container_of(*new, struct res_common,
+ node);
+
+ parent = *new;
+ if (res->res_id < this->res_id)
+ new = &((*new)->rb_left);
+ else if (res->res_id > this->res_id)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&res->node, parent, new);
+ rb_insert_color(&res->node, root);
+
+ return 0;
+}
+
/* For Debug uses */
static const char *ResourceType(enum mlx4_resource rt)
{
@@ -201,6 +253,7 @@ static const char *ResourceType(enum mlx4_resource rt)
case RES_MAC: return "RES_MAC";
case RES_EQ: return "RES_EQ";
case RES_COUNTER: return "RES_COUNTER";
+ case RES_FS_RULE: return "RES_FS_RULE";
case RES_XRCD: return "RES_XRCD";
default: return "Unknown resource type !!!";
};
@@ -228,8 +281,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
dev->num_slaves);
for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
- INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
- GFP_ATOMIC|__GFP_NOWARN);
+ priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
spin_lock_init(&priv->mfunc.master.res_tracker.lock);
return 0 ;
@@ -277,11 +329,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id,
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
- res_id);
+ return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type,
void *res)
{
@@ -307,7 +359,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
- mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ mlx4_dbg(dev, "res %s id 0x%llx to busy\n",
ResourceType(type), r->res_id);
if (res)
@@ -320,7 +372,7 @@ exit:
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
- int res_id, int *slave)
+ u64 res_id, int *slave)
{
struct res_common *r;
@@ -341,7 +393,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
return err;
}
-static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
enum mlx4_resource type)
{
struct res_common *r;
@@ -473,7 +525,21 @@ static struct res_common *alloc_xrcdn_tr(int id)
return &ret->com;
}
-static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+static struct res_common *alloc_fs_rule_tr(u64 id)
+{
+ struct res_fs_rule *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_FS_RULE_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
int extra)
{
struct res_common *ret;
@@ -506,6 +572,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
case RES_XRCD:
ret = alloc_xrcdn_tr(id);
break;
+ case RES_FS_RULE:
+ ret = alloc_fs_rule_tr(id);
+ break;
default:
return NULL;
}
@@ -515,7 +584,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
return ret;
}
-static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
int i;
@@ -523,7 +592,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
struct mlx4_priv *priv = mlx4_priv(dev);
struct res_common **res_arr;
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
- struct radix_tree_root *root = &tracker->res_tree[type];
+ struct rb_root *root = &tracker->res_tree[type];
res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
if (!res_arr)
@@ -546,7 +615,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
err = -EEXIST;
goto undo;
}
- err = radix_tree_insert(root, base + i, res_arr[i]);
+ err = res_tracker_insert(root, res_arr[i]);
if (err)
goto undo;
list_add_tail(&res_arr[i]->list,
@@ -559,7 +628,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
undo:
for (--i; i >= base; --i)
- radix_tree_delete(&tracker->res_tree[type], i);
+ rb_erase(&res_arr[i]->node, root);
spin_unlock_irq(mlx4_tlock(dev));
@@ -638,6 +707,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res)
return 0;
}
+static int remove_fs_rule_ok(struct res_fs_rule *res)
+{
+ if (res->com.state == RES_FS_RULE_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_FS_RULE_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
static int remove_cq_ok(struct res_cq *res)
{
if (res->com.state == RES_CQ_BUSY)
@@ -679,15 +758,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
return remove_counter_ok((struct res_counter *)res);
case RES_XRCD:
return remove_xrcdn_ok((struct res_xrcdn *)res);
+ case RES_FS_RULE:
+ return remove_fs_rule_ok((struct res_fs_rule *)res);
default:
return -EINVAL;
}
}
-static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
enum mlx4_resource type, int extra)
{
- int i;
+ u64 i;
int err;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
@@ -695,7 +776,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
spin_lock_irq(mlx4_tlock(dev));
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
if (!r) {
err = -ENOENT;
goto out;
@@ -710,8 +791,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
}
for (i = base; i < base + count; ++i) {
- r = radix_tree_lookup(&tracker->res_tree[type], i);
- radix_tree_delete(&tracker->res_tree[type], i);
+ r = res_tracker_lookup(&tracker->res_tree[type], i);
+ rb_erase(&r->node, &tracker->res_tree[type]);
list_del(&r->list);
kfree(r);
}
@@ -733,7 +814,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -741,7 +822,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
else {
switch (state) {
case RES_QP_BUSY:
- mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
__func__, r->com.res_id);
err = -EBUSY;
break;
@@ -750,7 +831,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
if (r->com.state == RES_QP_MAPPED && !alloc)
break;
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
err = -EINVAL;
break;
@@ -759,7 +840,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.state == RES_QP_HW)
break;
else {
- mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
r->com.res_id);
err = -EINVAL;
}
@@ -779,7 +860,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
r->com.to_state = state;
r->com.state = RES_QP_BUSY;
if (qp)
- *qp = (struct res_qp *)r;
+ *qp = r;
}
}
@@ -797,7 +878,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -832,7 +913,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
r->com.to_state = state;
r->com.state = RES_MPT_BUSY;
if (mpt)
- *mpt = (struct res_mpt *)r;
+ *mpt = r;
}
}
@@ -850,7 +931,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -898,7 +979,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
int err;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -952,7 +1033,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
int err = 0;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
if (!r)
err = -ENOENT;
else if (r->com.owner != slave)
@@ -1001,7 +1082,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->from_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -1015,7 +1096,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave,
struct res_common *r;
spin_lock_irq(mlx4_tlock(dev));
- r = radix_tree_lookup(&tracker->res_tree[type], id);
+ r = res_tracker_lookup(&tracker->res_tree[type], id);
if (r && (r->owner == slave))
r->state = r->to_state;
spin_unlock_irq(mlx4_tlock(dev));
@@ -2695,6 +2776,60 @@ ex_put:
return err;
}
+int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
+ vhcr->in_modifier, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ /* detach rule*/
+ mlx4_cmd(dev, vhcr->out_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ }
+ return err;
+}
+
+int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+
+ if (dev->caps.steering_mode !=
+ MLX4_STEERING_MODE_DEVICE_MANAGED)
+ return -EOPNOTSUPP;
+
+ err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
+ if (err) {
+ mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ return err;
+ }
+
+ err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ return err;
+}
+
enum {
BUSY_MAX_RETRIES = 10
};
@@ -2751,7 +2886,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (r->state == RES_ANY_BUSY) {
if (print)
mlx4_dbg(dev,
- "%s id 0x%x is busy\n",
+ "%s id 0x%llx is busy\n",
ResourceType(type),
r->res_id);
++busy;
@@ -2817,8 +2952,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_QP_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_QP],
- qp->com.res_id);
+ rb_erase(&qp->com.node,
+ &tracker->res_tree[RES_QP]);
list_del(&qp->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(qp);
@@ -2888,8 +3023,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
case RES_SRQ_ALLOCATED:
__mlx4_srq_free_icm(dev, srqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_SRQ],
- srqn);
+ rb_erase(&srq->com.node,
+ &tracker->res_tree[RES_SRQ]);
list_del(&srq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(srq);
@@ -2954,8 +3089,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
case RES_CQ_ALLOCATED:
__mlx4_cq_free_icm(dev, cqn);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_CQ],
- cqn);
+ rb_erase(&cq->com.node,
+ &tracker->res_tree[RES_CQ]);
list_del(&cq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(cq);
@@ -3017,8 +3152,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
case RES_MPT_RESERVED:
__mlx4_mr_release(dev, mpt->key);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MPT],
- mptn);
+ rb_erase(&mpt->com.node,
+ &tracker->res_tree[RES_MPT]);
list_del(&mpt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mpt);
@@ -3086,8 +3221,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
__mlx4_free_mtt_range(dev, base,
mtt->order);
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_MTT],
- base);
+ rb_erase(&mtt->com.node,
+ &tracker->res_tree[RES_MTT]);
list_del(&mtt->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(mtt);
@@ -3104,6 +3239,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
spin_unlock_irq(mlx4_tlock(dev));
}
+static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *fs_rule_list =
+ &tracker->slave_list[slave].res_list[RES_FS_RULE];
+ struct res_fs_rule *fs_rule;
+ struct res_fs_rule *tmp;
+ int state;
+ u64 base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_FS_RULE);
+ if (err)
+ mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
+ slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (fs_rule->com.owner == slave) {
+ base = fs_rule->com.res_id;
+ state = fs_rule->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_FS_RULE_ALLOCATED:
+ /* detach rule */
+ err = mlx4_cmd(dev, base, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ rb_erase(&fs_rule->com.node,
+ &tracker->res_tree[RES_FS_RULE]);
+ list_del(&fs_rule->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(fs_rule);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -3133,8 +3320,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
switch (state) {
case RES_EQ_RESERVED:
spin_lock_irq(mlx4_tlock(dev));
- radix_tree_delete(&tracker->res_tree[RES_EQ],
- eqn);
+ rb_erase(&eq->com.node,
+ &tracker->res_tree[RES_EQ]);
list_del(&eq->com.list);
spin_unlock_irq(mlx4_tlock(dev));
kfree(eq);
@@ -3191,7 +3378,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
if (counter->com.owner == slave) {
index = counter->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_COUNTER], index);
+ rb_erase(&counter->com.node,
+ &tracker->res_tree[RES_COUNTER]);
list_del(&counter->com.list);
kfree(counter);
__mlx4_counter_free(dev, index);
@@ -3220,7 +3408,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
if (xrcd->com.owner == slave) {
xrcdn = xrcd->com.res_id;
- radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn);
+ rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
list_del(&xrcd->com.list);
kfree(xrcd);
__mlx4_xrcd_free(dev, xrcdn);
@@ -3244,5 +3432,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
rem_slave_mtts(dev, slave);
rem_slave_counters(dev, slave);
rem_slave_xrcdns(dev, slave);
+ rem_slave_fs_rule(dev, slave);
mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
}
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 5e313e9a252f..1540ebeb8669 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -422,7 +422,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
*
* Get or create the initial mac address for the device and then set that
* into the station address register. If there is an EEPROM present, then
- * we try that. If no valid mac address is found we use random_ether_addr()
+ * we try that. If no valid mac address is found we use eth_random_addr()
* to create a new one.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 5ffde23ac8fb..38529edfe350 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -16,8 +16,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/**
- * Supports:
+/* Supports:
* KS8851 16bit MLL chip from Micrel Inc.
*/
@@ -35,7 +34,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <asm/io.h>
+#include <linux/ks8851_mll.h>
#define DRV_NAME "ks8851_mll"
@@ -465,8 +464,7 @@ static int msg_enable;
#define BE1 0x2000 /* Byte Enable 1 */
#define BE0 0x1000 /* Byte Enable 0 */
-/**
- * register read/write calls.
+/* register read/write calls.
*
* All these calls issue transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
@@ -1103,7 +1101,7 @@ static void ks_set_grpaddr(struct ks_net *ks)
}
} /* ks_set_grpaddr */
-/*
+/**
* ks_clear_mcast - clear multicast information
*
* @ks : The chip information
@@ -1515,6 +1513,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
struct net_device *netdev;
struct ks_net *ks;
u16 id, data;
+ struct ks8851_mll_platform_data *pdata;
io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -1596,17 +1595,27 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
ks_disable_qmu(ks);
ks_setup(ks);
ks_setup_int(ks);
- memcpy(netdev->dev_addr, ks->mac_addr, 6);
data = ks_rdreg16(ks, KS_OBCR);
ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
- /**
- * If you want to use the default MAC addr,
- * comment out the 2 functions below.
- */
+ /* overwriting the default MAC address */
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ netdev_err(netdev, "No platform data\n");
+ err = -ENODEV;
+ goto err_pdata;
+ }
+ memcpy(ks->mac_addr, pdata->mac_addr, 6);
+ if (!is_valid_ether_addr(ks->mac_addr)) {
+ /* Use random MAC address if none passed */
+ eth_random_addr(ks->mac_addr);
+ netdev_info(netdev, "Using random mac address\n");
+ }
+ netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
+
+ memcpy(netdev->dev_addr, ks->mac_addr, 6);
- random_ether_addr(netdev->dev_addr);
ks_set_mac(ks, netdev->dev_addr);
id = ks_rdreg16(ks, KS_CIDER);
@@ -1615,6 +1624,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
(id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
return 0;
+err_pdata:
+ unregister_netdev(netdev);
err_register:
err_get_irq:
iounmap(ks->hw_addr_cmd);
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index eaf9ff0262a9..318fee91c79d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -3913,7 +3913,7 @@ static void hw_start_rx(struct ksz_hw *hw)
hw->rx_stop = 2;
}
-/*
+/**
* hw_stop_rx - stop receiving
* @hw: The hardware instance.
*
@@ -4480,14 +4480,12 @@ static void ksz_init_rx_buffers(struct dev_info *adapter)
dma_buf->len = adapter->mtu;
if (!dma_buf->skb)
dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
- if (dma_buf->skb && !dma_buf->dma) {
- dma_buf->skb->dev = adapter->dev;
+ if (dma_buf->skb && !dma_buf->dma)
dma_buf->dma = pci_map_single(
adapter->pdev,
skb_tail_pointer(dma_buf->skb),
dma_buf->len,
PCI_DMA_FROMDEVICE);
- }
/* Set descriptor. */
set_rx_buf(desc, dma_buf->dma);
@@ -4881,8 +4879,8 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
- ((CHECKSUM_PARTIAL == skb->ip_summed) &&
- (ETH_P_IPV6 == htons(skb->protocol)))) {
+ (CHECKSUM_PARTIAL == skb->ip_summed &&
+ skb->protocol == htons(ETH_P_IPV6))) {
struct sk_buff *org_skb = skb;
skb = netdev_alloc_skb(dev, org_skb->len);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 90153fc983cb..fa85cf1353fd 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->num_slices = 1;
msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- ncpus = num_online_cpus();
+ ncpus = netif_get_num_default_rss_queues();
if (myri10ge_max_slices == 1 || msix_cap == 0 ||
(myri10ge_max_slices == -1 && ncpus < 2))
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bb367582c1e8..d958c2299372 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3377,7 +3377,7 @@ static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
} while (cnt < 20);
return ret;
}
-/*
+/**
* check_pci_device_id - Checks if the device id is supported
* @id : device id
* Description: Function to check if the pci device id is supported by driver.
@@ -5238,7 +5238,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
}
/**
- * s2io_set_mac_addr driver entry point
+ * s2io_set_mac_addr - driver entry point
*/
static int s2io_set_mac_addr(struct net_device *dev, void *p)
@@ -6088,7 +6088,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
}
/**
- * s2io-link_test - verifies the link state of the nic
+ * s2io_link_test - verifies the link state of the nic
* @sp ; private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
@@ -6116,9 +6116,9 @@ static int s2io_link_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_rldram_test - offline test for access to the RldRam chip on the NIC
- * @sp - private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
- * @data - variable that returns the result of each of the test
+ * @data: variable that returns the result of each of the test
* conducted by the driver.
* Description:
* This is one of the offline test that tests the read and write
@@ -6946,9 +6946,9 @@ static int rxd_owner_bit_reset(struct s2io_nic *sp)
if (sp->rxd_mode == RXD_MODE_3B)
ba = &ring->ba[j][k];
if (set_rxd_buffer_pointer(sp, rxdp, ba, &skb,
- (u64 *)&temp0_64,
- (u64 *)&temp1_64,
- (u64 *)&temp2_64,
+ &temp0_64,
+ &temp1_64,
+ &temp2_64,
size) == -ENOMEM) {
return 0;
}
@@ -7149,7 +7149,7 @@ static int s2io_card_up(struct s2io_nic *sp)
int i, ret = 0;
struct config_param *config;
struct mac_info *mac_control;
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
u16 interruptible;
/* Initialize the H/W I/O registers */
@@ -7325,7 +7325,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
{
struct s2io_nic *sp = ring_data->nic;
- struct net_device *dev = (struct net_device *)ring_data->dev;
+ struct net_device *dev = ring_data->dev;
struct sk_buff *skb = (struct sk_buff *)
((unsigned long)rxdp->Host_Control);
int ring_no = ring_data->ring_no;
@@ -7508,7 +7508,7 @@ aggregate:
static void s2io_link(struct s2io_nic *sp, int link)
{
- struct net_device *dev = (struct net_device *)sp->dev;
+ struct net_device *dev = sp->dev;
struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
if (link != sp->last_link_state) {
@@ -8280,7 +8280,7 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
return -1;
}
- *ip = (struct iphdr *)((u8 *)buffer + ip_off);
+ *ip = (struct iphdr *)(buffer + ip_off);
ip_len = (u8)((*ip)->ihl);
ip_len <<= 2;
*tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 98e2c10ae08b..32d06824fe3e 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2346,7 +2346,7 @@ void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
for (i = 0; i < nreq; i++)
vxge_os_dma_malloc_async(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
blockpool->hldev, VXGE_HW_BLOCK_SIZE);
}
@@ -2428,13 +2428,13 @@ __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
break;
pci_unmap_single(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
((struct __vxge_hw_blockpool_entry *)p)->length,
PCI_DMA_BIDIRECTIONAL);
vxge_os_dma_free(
- ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+ (blockpool->hldev)->pdev,
((struct __vxge_hw_blockpool_entry *)p)->memblock,
&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
@@ -4059,7 +4059,7 @@ __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
- vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
+ vpath = &hldev->virtual_paths[vp_id];
if (vpath->ringh) {
status = __vxge_hw_ring_reset(vpath->ringh);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 5046a64f0fe8..9e0c1eed5dc5 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1922,7 +1922,7 @@ realloc:
/* misaligned, free current one and try allocating
* size + VXGE_CACHE_LINE_SIZE memory
*/
- kfree((void *) vaddr);
+ kfree(vaddr);
size += VXGE_CACHE_LINE_SIZE;
realloc_flag = 1;
goto realloc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 51387c31914b..de2190443510 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1134,7 +1134,7 @@ static void vxge_set_multicast(struct net_device *dev)
"%s:%d", __func__, __LINE__);
vdev = netdev_priv(dev);
- hldev = (struct __vxge_hw_device *)vdev->devh;
+ hldev = vdev->devh;
if (unlikely(!is_vxge_card_up(vdev)))
return;
@@ -3131,12 +3131,12 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
u64 packets, bytes, multicast;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_bh(&rxstats->syncp);
packets = rxstats->rx_frms;
multicast = rxstats->rx_mcast;
bytes = rxstats->rx_bytes;
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&rxstats->syncp, start));
net_stats->rx_packets += packets;
net_stats->rx_bytes += bytes;
@@ -3146,11 +3146,11 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
net_stats->rx_dropped += rxstats->rx_dropped;
do {
- start = u64_stats_fetch_begin(&txstats->syncp);
+ start = u64_stats_fetch_begin_bh(&txstats->syncp);
packets = txstats->tx_frms;
bytes = txstats->tx_bytes;
- } while (u64_stats_fetch_retry(&txstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&txstats->syncp, start));
net_stats->tx_packets += packets;
net_stats->tx_bytes += bytes;
@@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths(
return 0;
if (!driver_config->g_no_cpus)
- driver_config->g_no_cpus = num_online_cpus();
+ driver_config->g_no_cpus =
+ netif_get_num_default_rss_queues();
driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1;
if (!driver_config->vpath_per_dev)
@@ -3989,16 +3990,16 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
continue;
vxge_debug_ll_config(VXGE_TRACE,
"%s: MTU size - %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].mtu);
vxge_debug_init(VXGE_TRACE,
"%s: VLAN tag stripping %s", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].rpa_strip_vlan_tag
? "Enabled" : "Disabled");
vxge_debug_ll_config(VXGE_TRACE,
"%s: Max frags : %d", vdev->ndev->name,
- ((struct __vxge_hw_device *)(vdev->devh))->
+ ((vdev->devh))->
config.vp_config[i].fifo.max_frags);
break;
}
@@ -4260,9 +4261,7 @@ static int vxge_probe_fw_update(struct vxgedev *vdev)
if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
VXGE_FW_VER(maj, min, 0)) {
vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
- " be used with this driver.\n"
- "Please get the latest version from "
- "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+ " be used with this driver.",
VXGE_DRIVER_NAME, maj, min, bld);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index 35f3e7552ec2..36ca40f8f249 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -430,8 +430,7 @@ void vxge_initialize_ethtool_ops(struct net_device *ndev);
enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
-/**
- * #define VXGE_DEBUG_INIT: debug for initialization functions
+/* #define VXGE_DEBUG_INIT: debug for initialization functions
* #define VXGE_DEBUG_TX : debug transmit related functions
* #define VXGE_DEBUG_RX : debug recevice related functions
* #define VXGE_DEBUG_MEM : debug memory module
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 5954fa264da1..99749bd07d72 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -533,8 +533,7 @@ __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
/* notify driver */
if (hldev->uld_callbacks->crit_err)
- hldev->uld_callbacks->crit_err(
- (struct __vxge_hw_device *)hldev,
+ hldev->uld_callbacks->crit_err(hldev,
type, vp_id);
out:
@@ -1322,7 +1321,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
/* check whether it is not the end */
if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
- vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+ vxge_assert((rxdp)->host_control !=
0);
++ring->cmpl_cnt;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 928913c4f3ff..f45def01a98e 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3218,7 +3218,7 @@ static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
}
/**
- * nv_update_linkspeed: Setup the MAC according to the link partner
+ * nv_update_linkspeed - Setup the MAC according to the link partner
* @dev: Network device to be configured
*
* The function queries the PHY and checks if there is a link partner.
@@ -3552,8 +3552,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
return IRQ_HANDLED;
}
-/**
- * All _optimized functions are used to help increase performance
+/* All _optimized functions are used to help increase performance
* (reduce CPU and increase throughput). They use descripter version 3,
* compiler directives, and reduce memory accesses.
*/
@@ -3776,7 +3775,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (events & NVREG_IRQ_RECOVER_ERROR) {
- spin_lock_irq(&np->lock);
+ spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base);
@@ -3786,7 +3785,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
- spin_unlock_irq(&np->lock);
+ spin_unlock_irqrestore(&np->lock, flags);
break;
}
if (unlikely(i > max_interrupt_work)) {
@@ -5183,6 +5182,7 @@ static const struct ethtool_ops ops = {
.get_ethtool_stats = nv_get_ethtool_stats,
.get_sset_count = nv_get_sset_count,
.self_test = nv_self_test,
+ .get_ts_info = ethtool_op_get_ts_info,
};
/* The mgmt unit and driver use a semaphore to access the phy during init */
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 083d6715335c..4069edab229e 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -44,7 +44,6 @@
#include <linux/of_net.h>
#include <linux/types.h>
-#include <linux/delay.h>
#include <linux/io.h>
#include <mach/board.h>
#include <mach/platform.h>
@@ -52,7 +51,6 @@
#define MODNAME "lpc-eth"
#define DRV_VERSION "1.00"
-#define PHYDEF_ADDR 0x00
#define ENET_MAXF_SIZE 1536
#define ENET_RX_DESC 48
@@ -416,9 +414,6 @@ static bool use_iram_for_net(struct device *dev)
#define TXDESC_CONTROL_LAST (1 << 30)
#define TXDESC_CONTROL_INT (1 << 31)
-static int lpc_eth_hard_start_xmit(struct sk_buff *skb,
- struct net_device *ndev);
-
/*
* Structure of a TX/RX descriptors and RX status
*/
@@ -440,7 +435,7 @@ struct netdata_local {
spinlock_t lock;
void __iomem *net_base;
u32 msg_enable;
- struct sk_buff *skb[ENET_TX_DESC];
+ unsigned int skblen[ENET_TX_DESC];
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
@@ -903,12 +898,11 @@ err_out:
static void __lpc_handle_xmit(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct sk_buff *skb;
u32 txcidx, *ptxstat, txstat;
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
while (pldat->last_tx_idx != txcidx) {
- skb = pldat->skb[pldat->last_tx_idx];
+ unsigned int skblen = pldat->skblen[pldat->last_tx_idx];
/* A buffer is available, get buffer status */
ptxstat = &pldat->tx_stat_v[pldat->last_tx_idx];
@@ -945,9 +939,8 @@ static void __lpc_handle_xmit(struct net_device *ndev)
} else {
/* Update stats */
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_bytes += skblen;
}
- dev_kfree_skb_irq(skb);
txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
}
@@ -1132,7 +1125,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
/* Save the buffer and increment the buffer counter */
- pldat->skb[txidx] = skb;
+ pldat->skblen[txidx] = len;
pldat->num_used_tx_buffs++;
/* Start transmit */
@@ -1147,6 +1140,7 @@ static int lpc_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irq(&pldat->lock);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1442,7 +1436,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
res->start);
netdev_dbg(ndev, "IO address size :%d\n",
res->end - res->start + 1);
- netdev_err(ndev, "IO address (mapped) :0x%p\n",
+ netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
index e48f084ad226..5ae03e815ee9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_api.c
@@ -60,7 +60,7 @@ static void pch_gbe_plat_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_plat_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed-EBUSY
*/
@@ -108,7 +108,7 @@ static void pch_gbe_plat_init_function_pointers(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_setup_init_funcs - Initializes function pointers
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -137,7 +137,7 @@ inline void pch_gbe_hal_get_bus_info(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_init_hw - Initialize hardware
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
@@ -155,7 +155,7 @@ inline s32 pch_gbe_hal_init_hw(struct pch_gbe_hw *hw)
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The buffer to store the 16-bit read.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -172,7 +172,7 @@ inline s32 pch_gbe_hal_read_phy_reg(struct pch_gbe_hw *hw, u32 offset,
* @hw: Pointer to the HW structure
* @offset: The register to read
* @data: The value to write.
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -211,7 +211,7 @@ inline void pch_gbe_hal_phy_sw_reset(struct pch_gbe_hw *hw)
/**
* pch_gbe_hal_read_mac_addr - Reads MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successfully
* ENOSYS: Function is not registered
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index ac4e72d529e5..9dbf38c10a68 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -77,7 +77,7 @@ static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
* pch_gbe_get_settings - Get device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -100,7 +100,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
* pch_gbe_set_settings - Set device-specific settings
* @netdev: Network interface device structure
* @ecmd: Ethtool command
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static void pch_gbe_get_wol(struct net_device *netdev,
* pch_gbe_set_wol - Turn Wake-on-Lan on or off
* @netdev: Network interface device structure
* @wol: Pointer of wake-on-Lan information straucture
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -248,7 +248,7 @@ static int pch_gbe_set_wol(struct net_device *netdev,
/**
* pch_gbe_nway_reset - Restart autonegotiation
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -398,7 +398,7 @@ static void pch_gbe_get_pauseparam(struct net_device *netdev,
* pch_gbe_set_pauseparam - Set pause paramters
* @netdev: Network interface device structure
* @pause: Pause parameters structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3787c64ee71c..b1006563f736 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -301,7 +301,7 @@ inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_mac_read_mac_addr - Read MAC address
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
*/
s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
@@ -483,7 +483,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
/**
* pch_gbe_mac_force_mac_fc - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -639,7 +639,7 @@ static void pch_gbe_mac_set_pause_packet(struct pch_gbe_hw *hw)
/**
* pch_gbe_alloc_queues - Allocate memory for all rings
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -670,7 +670,7 @@ static void pch_gbe_init_stats(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_init_phy - Initialize PHY
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -720,7 +720,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
* @netdev: Network interface device structure
* @addr: Phy ID
* @reg: Access location
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1364,7 +1364,7 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
* pch_gbe_intr - Interrupt Handler
* @irq: Interrupt number
* @data: Pointer to a network interface device structure
- * Returns
+ * Returns:
* - IRQ_HANDLED: Our interrupt
* - IRQ_NONE: Not our interrupt
*/
@@ -1566,7 +1566,7 @@ static void pch_gbe_alloc_tx_buffers(struct pch_gbe_adapter *adapter,
* pch_gbe_clean_tx - Reclaim resources after transmit completes
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1660,7 +1660,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
* @rx_ring: Rx descriptor ring
* @work_done: Completed count
* @work_to_do: Request count
- * Returns
+ * Returns:
* true: Cleaned the descriptor
* false: Not cleaned the descriptor
*/
@@ -1775,7 +1775,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_tx_resources - Allocate Tx resources (Descriptors)
* @adapter: Board private structure
* @tx_ring: Tx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1822,7 +1822,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
* pch_gbe_setup_rx_resources - Allocate Rx resources (Descriptors)
* @adapter: Board private structure
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1899,7 +1899,7 @@ void pch_gbe_free_rx_resources(struct pch_gbe_adapter *adapter,
/**
* pch_gbe_request_irq - Allocate an interrupt line
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -1932,7 +1932,7 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_up - Up GbE network device
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2018,7 +2018,7 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_sw_init - Initialize general software structures (struct pch_gbe_adapter)
* @adapter: Board private structure to initialize
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2057,7 +2057,7 @@ static int pch_gbe_sw_init(struct pch_gbe_adapter *adapter)
/**
* pch_gbe_open - Called when a network interface is made active
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2097,7 +2097,7 @@ err_setup_tx:
/**
* pch_gbe_stop - Disables a network interface
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* 0: Successfully
*/
static int pch_gbe_stop(struct net_device *netdev)
@@ -2117,7 +2117,7 @@ static int pch_gbe_stop(struct net_device *netdev)
* pch_gbe_xmit_frame - Packet transmitting start
* @skb: Socket buffer structure
* @netdev: Network interface device structure
- * Returns
+ * Returns:
* - NETDEV_TX_OK: Normal end
* - NETDEV_TX_BUSY: Error end
*/
@@ -2225,7 +2225,7 @@ static void pch_gbe_set_multi(struct net_device *netdev)
* pch_gbe_set_mac - Change the Ethernet Address of the NIC
* @netdev: Network interface device structure
* @addr: Pointer to an address structure
- * Returns
+ * Returns:
* 0: Successfully
* -EADDRNOTAVAIL: Failed
*/
@@ -2256,7 +2256,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
* pch_gbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: Network interface device structure
* @new_mtu: New value for maximum frame size
- * Returns
+ * Returns:
* 0: Successfully
* -EINVAL: Failed
*/
@@ -2309,7 +2309,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* pch_gbe_set_features - Reset device after features changed
* @netdev: Network interface device structure
* @features: New features
- * Returns
+ * Returns:
* 0: HW state updated successfully
*/
static int pch_gbe_set_features(struct net_device *netdev,
@@ -2334,7 +2334,7 @@ static int pch_gbe_set_features(struct net_device *netdev,
* @netdev: Network interface device structure
* @ifr: Pointer to ifr structure
* @cmd: Control command
- * Returns
+ * Returns:
* 0: Successfully
* Negative value: Failed
*/
@@ -2369,7 +2369,7 @@ static void pch_gbe_tx_timeout(struct net_device *netdev)
* pch_gbe_napi_poll - NAPI receive and transfer polling callback
* @napi: Pointer of polling device struct
* @budget: The maximum number of a packet
- * Returns
+ * Returns:
* false: Exit the polling mode
* true: Continue the polling mode
*/
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 29e23bec809c..8653c3b81f84 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
/**
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
@@ -220,7 +220,7 @@ static const struct pch_gbe_opt_list fc_list[] = {
* @value: value
* @opt: option
* @adapter: Board private structure
- * Returns
+ * Returns:
* 0: Successful.
* Negative value: Failed.
*/
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 37ccbe54e62d..eb3dfdbb642b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 79
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.79"
+#define _NETXEN_NIC_LINUX_SUBVERSION 80
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.80"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index 39730403782f..10468e7932dd 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -489,7 +489,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -511,7 +511,7 @@ netxen_nic_get_pauseparam(struct net_device *dev,
break;
}
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
@@ -534,7 +534,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
int port = adapter->physical_port;
/* read mode */
if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
@@ -577,7 +577,7 @@ netxen_nic_set_pauseparam(struct net_device *dev,
}
NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
} else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
- if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS))
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
return -EIO;
val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
if (port == 0) {
@@ -826,7 +826,12 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
dump->len = mdump->md_dump_size;
else
dump->len = 0;
- dump->flag = mdump->md_capture_mask;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
dump->version = adapter->fw_version;
return 0;
}
@@ -840,8 +845,10 @@ netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
switch (val->flag) {
case NX_FORCE_FW_DUMP_KEY:
- if (!mdump->md_enabled)
- mdump->md_enabled = 1;
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
if (adapter->fw_mdump_rdy) {
netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index de96a948bb7f..946160fa5843 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -365,7 +365,7 @@ static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
return 0;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = 0;
@@ -392,7 +392,7 @@ static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
u32 port = adapter->physical_port;
u16 board_type = adapter->ahw.board_type;
- if (port > NETXEN_NIU_MAX_XG_PORTS)
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
return -EINVAL;
mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 8694124ef77d..bc165f4d0f65 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1437,8 +1437,6 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
netdev->name, cable_len);
}
- netxen_advert_link_change(adapter, link_status);
-
/* update link parameters */
if (duplex == LINKEVENT_FULL_DUPLEX)
adapter->link_duplex = DUPLEX_FULL;
@@ -1447,6 +1445,8 @@ netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
adapter->module_type = module;
adapter->link_autoneg = autoneg;
adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
}
static void
@@ -1532,8 +1532,6 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
} else
skb->ip_summed = CHECKSUM_NONE;
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
no_skb:
buffer->state = NETXEN_BUFFER_FREE;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 8680a5dae4a2..eaa1db9fec32 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 28
-#define QLCNIC_LINUX_VERSIONID "5.0.28"
+#define _QLCNIC_LINUX_SUBVERSION 29
+#define QLCNIC_LINUX_VERSIONID "5.0.29"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -258,6 +258,8 @@ struct rcv_desc {
(((sts_data) >> 52) & 0x1)
#define qlcnic_get_lro_sts_seq_number(sts_data) \
((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
struct status_desc {
@@ -610,7 +612,11 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
#define QLCNIC_RCODE_TIMEOUT 17
#define QLCNIC_DESTROY_CTX_RESET 0
@@ -623,6 +629,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
/*
* Context state
@@ -829,6 +836,9 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -918,6 +928,7 @@ struct qlcnic_ipaddr {
#define QLCNIC_NEED_FLR 0x1000
#define QLCNIC_FW_RESET_OWNER 0x2000
#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 8db85244e8ad..b8ead696141e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -53,12 +53,39 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter, struct qlcnic_cmd_args *cmd)
rsp = qlcnic_poll_rsp(adapter);
if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
- dev_err(&pdev->dev, "card response timeout.\n");
+ dev_err(&pdev->dev, "CDRP response timeout.\n");
cmd->rsp.cmd = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
cmd->rsp.cmd = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
- dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ switch (cmd->rsp.cmd) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ dev_err(&pdev->dev, "CDRP invalid args: 0x%x.\n",
cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ dev_err(&pdev->dev,
+ "CDRP command not supported: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ dev_err(&pdev->dev,
+ "CDRP requested action not permitted: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_INVALID:
+ dev_err(&pdev->dev,
+ "CDRP invalid or unknown cmd received: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ dev_err(&pdev->dev, "CDRP command timeout: 0x%x.\n",
+ cmd->rsp.cmd);
+ break;
+ default:
+ dev_err(&pdev->dev, "CDRP command failed: 0x%x.\n",
+ cmd->rsp.cmd);
+ }
} else if (rsp == QLCNIC_CDRP_RSP_OK) {
cmd->rsp.cmd = QLCNIC_RCODE_SUCCESS;
if (cmd->rsp.arg2)
@@ -237,6 +264,9 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLCNIC_CAP0_LRO_MSS;
+
prq->valid_field_offset = offsetof(struct qlcnic_hostrq_rx_ctx,
msix_handler);
prq->txrx_sds_binding = nsds_rings - 1;
@@ -954,9 +984,6 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
- } else {
- dev_info(&adapter->pdev->dev,
- "%s: Get mac stats failed =%d.\n", __func__, err);
}
dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 6ced3195aad3..28a6b28192e3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -588,6 +588,7 @@ enum {
#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index 799fd40ed03a..0bcda9c51e9b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -1488,8 +1488,6 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb_checksum_none_assert(skb);
}
- skb->dev = adapter->netdev;
-
buffer->skb = NULL;
return skb;
@@ -1653,6 +1651,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+
if (vid != 0xffff)
__vlan_hwaccel_put_tag(skb, vid);
netif_receive_skb(skb);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ad98f4d7919d..212c12193275 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1136,6 +1136,8 @@ static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
+ u32 capab2;
+
struct qlcnic_host_rds_ring *rds_ring;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1146,6 +1148,12 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+
if (qlcnic_fw_create_ctx(adapter))
return -EIO;
@@ -1215,6 +1223,7 @@ __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_napi_disable(adapter);
qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
qlcnic_release_tx_buffers(adapter);
@@ -2024,6 +2033,7 @@ qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
vh = (struct vlan_ethhdr *)skb->data;
flags = FLAGS_VLAN_TAGGED;
vlan_tci = vh->h_vlan_TCI;
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
} else if (vlan_tx_tag_present(skb)) {
flags = FLAGS_VLAN_OOB;
vlan_tci = vlan_tx_tag_get(skb);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 5a639df33f18..a131d7b5d2fe 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,13 +18,15 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.30.00.00-01"
+#define DRV_VERSION "v1.00.00.31"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
+#define QLGE_MEZZ_SSYS_ID_068 0x0068
+#define QLGE_MEZZ_SSYS_ID_180 0x0180
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS ((MAX_CPUS * 2) + 1)
@@ -1397,7 +1399,6 @@ struct tx_ring {
struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
- atomic_t queue_stopped; /* Turns queue off when full. */
struct delayed_work tx_work;
struct ql_adapter *qdev;
u64 tx_packets;
@@ -1535,6 +1536,14 @@ struct nic_stats {
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
+ /* Receive Mac Err stats */
+ u64 rx_code_err;
+ u64 rx_oversize_err;
+ u64 rx_undersize_err;
+ u64 rx_preamble_err;
+ u64 rx_frame_len_err;
+ u64 rx_crc_err;
+ u64 rx_err_count;
/*
* These stats come from offset 500h to 5C8h
* in the XGMAC register.
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 8e2c2a74f3a5..6f316ab23257 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -35,10 +35,152 @@
#include "qlge.h"
+struct ql_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m)
+#define QL_OFF(m) offsetof(struct ql_adapter, m)
+
+static const struct ql_stats ql_gstrings_stats[] = {
+ {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)},
+ {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)},
+ {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts),
+ QL_OFF(nic_stats.tx_mcast_pkts)},
+ {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts),
+ QL_OFF(nic_stats.tx_bcast_pkts)},
+ {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts),
+ QL_OFF(nic_stats.tx_ucast_pkts)},
+ {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts),
+ QL_OFF(nic_stats.tx_ctl_pkts)},
+ {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts),
+ QL_OFF(nic_stats.tx_pause_pkts)},
+ {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt),
+ QL_OFF(nic_stats.tx_64_pkt)},
+ {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt),
+ QL_OFF(nic_stats.tx_65_to_127_pkt)},
+ {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt),
+ QL_OFF(nic_stats.tx_128_to_255_pkt)},
+ {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt),
+ QL_OFF(nic_stats.tx_256_511_pkt)},
+ {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt),
+ QL_OFF(nic_stats.tx_512_to_1023_pkt)},
+ {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt),
+ QL_OFF(nic_stats.tx_1024_to_1518_pkt)},
+ {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt),
+ QL_OFF(nic_stats.tx_1519_to_max_pkt)},
+ {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt),
+ QL_OFF(nic_stats.tx_undersize_pkt)},
+ {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt),
+ QL_OFF(nic_stats.tx_oversize_pkt)},
+ {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)},
+ {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok),
+ QL_OFF(nic_stats.rx_bytes_ok)},
+ {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)},
+ {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok),
+ QL_OFF(nic_stats.rx_pkts_ok)},
+ {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts),
+ QL_OFF(nic_stats.rx_bcast_pkts)},
+ {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts),
+ QL_OFF(nic_stats.rx_mcast_pkts)},
+ {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts),
+ QL_OFF(nic_stats.rx_ucast_pkts)},
+ {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts),
+ QL_OFF(nic_stats.rx_undersize_pkts)},
+ {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts),
+ QL_OFF(nic_stats.rx_oversize_pkts)},
+ {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts),
+ QL_OFF(nic_stats.rx_jabber_pkts)},
+ {"rx_undersize_fcerr_pkts",
+ QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts),
+ QL_OFF(nic_stats.rx_undersize_fcerr_pkts)},
+ {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events),
+ QL_OFF(nic_stats.rx_drop_events)},
+ {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts),
+ QL_OFF(nic_stats.rx_fcerr_pkts)},
+ {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err),
+ QL_OFF(nic_stats.rx_align_err)},
+ {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err),
+ QL_OFF(nic_stats.rx_symbol_err)},
+ {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err),
+ QL_OFF(nic_stats.rx_mac_err)},
+ {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts),
+ QL_OFF(nic_stats.rx_ctl_pkts)},
+ {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts),
+ QL_OFF(nic_stats.rx_pause_pkts)},
+ {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts),
+ QL_OFF(nic_stats.rx_64_pkts)},
+ {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts),
+ QL_OFF(nic_stats.rx_65_to_127_pkts)},
+ {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts),
+ QL_OFF(nic_stats.rx_128_255_pkts)},
+ {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts),
+ QL_OFF(nic_stats.rx_256_511_pkts)},
+ {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts),
+ QL_OFF(nic_stats.rx_512_to_1023_pkts)},
+ {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts),
+ QL_OFF(nic_stats.rx_1024_to_1518_pkts)},
+ {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts),
+ QL_OFF(nic_stats.rx_1519_to_max_pkts)},
+ {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts),
+ QL_OFF(nic_stats.rx_len_err_pkts)},
+ {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err),
+ QL_OFF(nic_stats.rx_code_err)},
+ {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err),
+ QL_OFF(nic_stats.rx_oversize_err)},
+ {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err),
+ QL_OFF(nic_stats.rx_undersize_err)},
+ {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err),
+ QL_OFF(nic_stats.rx_preamble_err)},
+ {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err),
+ QL_OFF(nic_stats.rx_frame_len_err)},
+ {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err),
+ QL_OFF(nic_stats.rx_crc_err)},
+ {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count),
+ QL_OFF(nic_stats.rx_err_count)},
+ {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames0)},
+ {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames1)},
+ {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames2)},
+ {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames3)},
+ {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames4)},
+ {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames5)},
+ {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames6)},
+ {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.tx_cbfc_pause_frames7)},
+ {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames0)},
+ {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames1)},
+ {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames2)},
+ {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames3)},
+ {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames4)},
+ {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames5)},
+ {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames6)},
+ {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7),
+ QL_OFF(nic_stats.rx_cbfc_pause_frames7)},
+ {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop),
+ QL_OFF(nic_stats.rx_nic_fifo_drop)},
+};
+
static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
"Loopback test (offline)"
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
+#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -183,73 +325,19 @@ quit:
QL_DUMP_STAT(qdev);
}
-static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
- {"tx_pkts"},
- {"tx_bytes"},
- {"tx_mcast_pkts"},
- {"tx_bcast_pkts"},
- {"tx_ucast_pkts"},
- {"tx_ctl_pkts"},
- {"tx_pause_pkts"},
- {"tx_64_pkts"},
- {"tx_65_to_127_pkts"},
- {"tx_128_to_255_pkts"},
- {"tx_256_511_pkts"},
- {"tx_512_to_1023_pkts"},
- {"tx_1024_to_1518_pkts"},
- {"tx_1519_to_max_pkts"},
- {"tx_undersize_pkts"},
- {"tx_oversize_pkts"},
- {"rx_bytes"},
- {"rx_bytes_ok"},
- {"rx_pkts"},
- {"rx_pkts_ok"},
- {"rx_bcast_pkts"},
- {"rx_mcast_pkts"},
- {"rx_ucast_pkts"},
- {"rx_undersize_pkts"},
- {"rx_oversize_pkts"},
- {"rx_jabber_pkts"},
- {"rx_undersize_fcerr_pkts"},
- {"rx_drop_events"},
- {"rx_fcerr_pkts"},
- {"rx_align_err"},
- {"rx_symbol_err"},
- {"rx_mac_err"},
- {"rx_ctl_pkts"},
- {"rx_pause_pkts"},
- {"rx_64_pkts"},
- {"rx_65_to_127_pkts"},
- {"rx_128_255_pkts"},
- {"rx_256_511_pkts"},
- {"rx_512_to_1023_pkts"},
- {"rx_1024_to_1518_pkts"},
- {"rx_1519_to_max_pkts"},
- {"rx_len_err_pkts"},
- {"tx_cbfc_pause_frames0"},
- {"tx_cbfc_pause_frames1"},
- {"tx_cbfc_pause_frames2"},
- {"tx_cbfc_pause_frames3"},
- {"tx_cbfc_pause_frames4"},
- {"tx_cbfc_pause_frames5"},
- {"tx_cbfc_pause_frames6"},
- {"tx_cbfc_pause_frames7"},
- {"rx_cbfc_pause_frames0"},
- {"rx_cbfc_pause_frames1"},
- {"rx_cbfc_pause_frames2"},
- {"rx_cbfc_pause_frames3"},
- {"rx_cbfc_pause_frames4"},
- {"rx_cbfc_pause_frames5"},
- {"rx_cbfc_pause_frames6"},
- {"rx_cbfc_pause_frames7"},
- {"rx_nic_fifo_drop"},
-};
-
static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
+ int index;
switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
case ETH_SS_STATS:
- memcpy(buf, ql_stats_str_arr, sizeof(ql_stats_str_arr));
+ for (index = 0; index < QLGE_STATS_LEN; index++) {
+ memcpy(buf + index * ETH_GSTRING_LEN,
+ ql_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
break;
}
}
@@ -260,7 +348,7 @@ static int ql_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return QLGE_TEST_LEN;
case ETH_SS_STATS:
- return ARRAY_SIZE(ql_stats_str_arr);
+ return QLGE_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -271,69 +359,17 @@ ql_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *stats, u64 *data)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- struct nic_stats *s = &qdev->nic_stats;
+ int index, length;
+ length = QLGE_STATS_LEN;
ql_update_stats(qdev);
- *data++ = s->tx_pkts;
- *data++ = s->tx_bytes;
- *data++ = s->tx_mcast_pkts;
- *data++ = s->tx_bcast_pkts;
- *data++ = s->tx_ucast_pkts;
- *data++ = s->tx_ctl_pkts;
- *data++ = s->tx_pause_pkts;
- *data++ = s->tx_64_pkt;
- *data++ = s->tx_65_to_127_pkt;
- *data++ = s->tx_128_to_255_pkt;
- *data++ = s->tx_256_511_pkt;
- *data++ = s->tx_512_to_1023_pkt;
- *data++ = s->tx_1024_to_1518_pkt;
- *data++ = s->tx_1519_to_max_pkt;
- *data++ = s->tx_undersize_pkt;
- *data++ = s->tx_oversize_pkt;
- *data++ = s->rx_bytes;
- *data++ = s->rx_bytes_ok;
- *data++ = s->rx_pkts;
- *data++ = s->rx_pkts_ok;
- *data++ = s->rx_bcast_pkts;
- *data++ = s->rx_mcast_pkts;
- *data++ = s->rx_ucast_pkts;
- *data++ = s->rx_undersize_pkts;
- *data++ = s->rx_oversize_pkts;
- *data++ = s->rx_jabber_pkts;
- *data++ = s->rx_undersize_fcerr_pkts;
- *data++ = s->rx_drop_events;
- *data++ = s->rx_fcerr_pkts;
- *data++ = s->rx_align_err;
- *data++ = s->rx_symbol_err;
- *data++ = s->rx_mac_err;
- *data++ = s->rx_ctl_pkts;
- *data++ = s->rx_pause_pkts;
- *data++ = s->rx_64_pkts;
- *data++ = s->rx_65_to_127_pkts;
- *data++ = s->rx_128_255_pkts;
- *data++ = s->rx_256_511_pkts;
- *data++ = s->rx_512_to_1023_pkts;
- *data++ = s->rx_1024_to_1518_pkts;
- *data++ = s->rx_1519_to_max_pkts;
- *data++ = s->rx_len_err_pkts;
- *data++ = s->tx_cbfc_pause_frames0;
- *data++ = s->tx_cbfc_pause_frames1;
- *data++ = s->tx_cbfc_pause_frames2;
- *data++ = s->tx_cbfc_pause_frames3;
- *data++ = s->tx_cbfc_pause_frames4;
- *data++ = s->tx_cbfc_pause_frames5;
- *data++ = s->tx_cbfc_pause_frames6;
- *data++ = s->tx_cbfc_pause_frames7;
- *data++ = s->rx_cbfc_pause_frames0;
- *data++ = s->rx_cbfc_pause_frames1;
- *data++ = s->rx_cbfc_pause_frames2;
- *data++ = s->rx_cbfc_pause_frames3;
- *data++ = s->rx_cbfc_pause_frames4;
- *data++ = s->rx_cbfc_pause_frames5;
- *data++ = s->rx_cbfc_pause_frames6;
- *data++ = s->rx_cbfc_pause_frames7;
- *data++ = s->rx_nic_fifo_drop;
+ for (index = 0; index < length; index++) {
+ char *p = (char *)qdev +
+ ql_gstrings_stats[index].stat_offset;
+ *data++ = (ql_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : (*(u32 *)p);
+ }
}
static int ql_get_settings(struct net_device *ndev,
@@ -388,30 +424,33 @@ static void ql_get_drvinfo(struct net_device *ndev,
static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- /* What we support. */
- wol->supported = WAKE_MAGIC;
- /* What we've currently got set. */
- wol->wolopts = qdev->wol;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 ||
+ ssys_dev == QLGE_MEZZ_SSYS_ID_180) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = qdev->wol;
+ }
}
static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- int status;
+ unsigned short ssys_dev = qdev->pdev->subsystem_device;
+ /* WOL is only supported for mezz card. */
+ if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 &&
+ ssys_dev != QLGE_MEZZ_SSYS_ID_180) {
+ netif_info(qdev, drv, qdev->ndev,
+ "WOL is only supported for mezz card\n");
+ return -EOPNOTSUPP;
+ }
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
qdev->wol = wol->wolopts;
netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
- if (!qdev->wol) {
- u32 wol = 0;
- status = ql_mb_wol_mode(qdev, wol);
- netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
- status == 0 ? "cleared successfully" : "clear failed",
- wol);
- }
-
return 0;
}
@@ -528,6 +567,8 @@ static void ql_self_test(struct net_device *ndev,
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ memset(data, 0, sizeof(u64) * QLGE_TEST_LEN);
+
if (netif_running(ndev)) {
set_bit(QL_SELFTEST, &qdev->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 09d8d33171df..3769f5711cc3 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1433,6 +1433,36 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Categorizing receive firmware frame errors */
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
+{
+ struct nic_stats *stats = &qdev->nic_stats;
+
+ stats->rx_err_count++;
+
+ switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
+ case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
+ stats->rx_code_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
+ stats->rx_oversize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
+ stats->rx_undersize_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
+ stats->rx_preamble_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
+ stats->rx_frame_len_err++;
+ break;
+ case IB_MAC_IOCB_RSP_ERR_CRC:
+ stats->rx_crc_err++;
+ default:
+ break;
+ }
+}
+
/* Process an inbound completion from an rx ring. */
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
@@ -1499,15 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
-
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- rx_ring->rx_errors++;
- goto err_out;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1546,7 +1567,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
struct iphdr *iph =
(struct iphdr *) ((u8 *)addr + ETH_HLEN);
if (!(iph->frag_off &
- cpu_to_be16(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1593,15 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1619,7 +1631,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"%s Multicast.\n",
@@ -1654,7 +1665,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG,
qdev->ndev,
@@ -1908,15 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- netif_info(qdev, drv, qdev->ndev,
- "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
- dev_kfree_skb_any(skb);
- rx_ring->rx_errors++;
- return;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1934,7 +1936,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
}
prefetch(skb->data);
- skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
(ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
@@ -1968,7 +1969,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* Unfragmented ipv4 UDP frame. */
struct iphdr *iph = (struct iphdr *) skb->data;
if (!(iph->frag_off &
- ntohs(IP_MF|IP_OFFSET))) {
+ htons(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
"TCP checksum done!\n");
@@ -1999,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
+ return (unsigned long)length;
+ }
+
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
@@ -2173,8 +2180,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_write_cq_idx(rx_ring);
tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
- if (atomic_read(&tx_ring->queue_stopped) &&
- (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
/*
* The queue got stopped because the tx_ring was full.
* Wake it up, because it's now at least 25% empty.
@@ -2558,10 +2564,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
netif_info(qdev, tx_queued, qdev->ndev,
- "%s: shutting down tx queue %d du to lack of resources.\n",
+ "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
__func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
- atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2612,6 +2617,16 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
+
+ if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
+ netif_stop_subqueue(ndev, tx_ring->wq_id);
+ if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
+ /*
+ * The queue got stopped because the tx_ring was full.
+ * Wake it up, because it's now at least 25% empty.
+ */
+ netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
+ }
return NETDEV_TX_OK;
}
@@ -2680,7 +2695,6 @@ static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
tx_ring_desc++;
}
atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
- atomic_set(&tx_ring->queue_stopped, 0);
}
static void ql_free_tx_resources(struct ql_adapter *qdev,
@@ -2703,10 +2717,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
&tx_ring->wq_base_dma);
if ((tx_ring->wq_base == NULL) ||
- tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
- return -ENOMEM;
- }
+ tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
+ goto pci_alloc_err;
+
tx_ring->q =
kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
if (tx_ring->q == NULL)
@@ -2716,6 +2729,9 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
err:
pci_free_consistent(qdev->pdev, tx_ring->wq_size,
tx_ring->wq_base, tx_ring->wq_base_dma);
+ tx_ring->wq_base = NULL;
+pci_alloc_err:
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -4649,7 +4665,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
int err = 0;
ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
- min(MAX_CPUS, (int)num_online_cpus()));
+ min(MAX_CPUS, netif_get_num_default_rss_queues()));
if (!ndev)
return -ENOMEM;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index d1827e887f4e..557a26545d75 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
netif_napi_del(&lp->napi);
- pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, lp->base);
pci_release_regions(pdev);
free_netdev(dev);
@@ -1278,17 +1277,4 @@ static struct pci_driver r6040_driver = {
.remove = __devexit_p(r6040_remove_one),
};
-
-static int __init r6040_init(void)
-{
- return pci_register_driver(&r6040_driver);
-}
-
-
-static void __exit r6040_cleanup(void)
-{
- pci_unregister_driver(&r6040_driver);
-}
-
-module_init(r6040_init);
-module_exit(r6040_cleanup);
+module_pci_driver(r6040_driver);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d7a04e091101..b47d5b35024e 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -46,6 +46,8 @@
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
+#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
+#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
#ifdef RTL8169_DEBUG
#define assert(expr) \
@@ -141,6 +143,9 @@ enum mac_version {
RTL_GIGA_MAC_VER_36,
RTL_GIGA_MAC_VER_37,
RTL_GIGA_MAC_VER_38,
+ RTL_GIGA_MAC_VER_39,
+ RTL_GIGA_MAC_VER_40,
+ RTL_GIGA_MAC_VER_41,
RTL_GIGA_MAC_NONE = 0xff,
};
@@ -259,6 +264,14 @@ static const struct {
[RTL_GIGA_MAC_VER_38] =
_R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_39] =
+ _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
+ JUMBO_1K, true),
+ [RTL_GIGA_MAC_VER_40] =
+ _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
+ JUMBO_9K, false),
+ [RTL_GIGA_MAC_VER_41] =
+ _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
};
#undef _R
@@ -389,8 +402,12 @@ enum rtl8168_8101_registers {
TWSI = 0xd2,
MCU = 0xd3,
#define NOW_IS_OOB (1 << 7)
+#define TX_EMPTY (1 << 5)
+#define RX_EMPTY (1 << 4)
+#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
#define EN_NDP (1 << 3)
#define EN_OOB_RESET (1 << 2)
+#define LINK_LIST_RDY (1 << 1)
EFUSEAR = 0xdc,
#define EFUSEAR_FLAG 0x80000000
#define EFUSEAR_WRITE_CMD 0x80000000
@@ -416,6 +433,7 @@ enum rtl8168_registers {
#define ERIAR_MASK_SHIFT 12
#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
+#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
EPHY_RXER_NUM = 0x7c,
OCPDR = 0xb0, /* OCP GPHY access */
@@ -428,10 +446,14 @@ enum rtl8168_registers {
#define OCPAR_FLAG 0x80000000
#define OCPAR_GPHY_WRITE_CMD 0x8000f060
#define OCPAR_GPHY_READ_CMD 0x0000f060
+ GPHY_OCP = 0xb8,
RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
MISC = 0xf0, /* 8168e only. */
#define TXPLA_RST (1 << 29)
+#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
#define PWM_EN (1 << 22)
+#define RXDV_GATED_EN (1 << 19)
+#define EARLY_TALLY_EN (1 << 16)
};
enum rtl_register_content {
@@ -721,8 +743,8 @@ struct rtl8169_private {
u16 event_slow;
struct mdio_ops {
- void (*write)(void __iomem *, int, int);
- int (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ int (*read)(struct rtl8169_private *, int);
} mdio_ops;
struct pll_power_ops {
@@ -736,8 +758,8 @@ struct rtl8169_private {
} jumbo_ops;
struct csi_ops {
- void (*write)(void __iomem *, int, int);
- u32 (*read)(void __iomem *, int);
+ void (*write)(struct rtl8169_private *, int, int);
+ u32 (*read)(struct rtl8169_private *, int);
} csi_ops;
int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
@@ -774,6 +796,8 @@ struct rtl8169_private {
} phy_action;
} *rtl_fw;
#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
+
+ u32 ocp_base;
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -794,6 +818,8 @@ MODULE_FIRMWARE(FIRMWARE_8168F_1);
MODULE_FIRMWARE(FIRMWARE_8168F_2);
MODULE_FIRMWARE(FIRMWARE_8402_1);
MODULE_FIRMWARE(FIRMWARE_8411_1);
+MODULE_FIRMWARE(FIRMWARE_8106E_1);
+MODULE_FIRMWARE(FIRMWARE_8168G_1);
static void rtl_lock_work(struct rtl8169_private *tp)
{
@@ -818,47 +844,114 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
}
}
+struct rtl_cond {
+ bool (*check)(struct rtl8169_private *);
+ const char *msg;
+};
+
+static void rtl_udelay(unsigned int d)
+{
+ udelay(d);
+}
+
+static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
+ void (*delay)(unsigned int), unsigned int d, int n,
+ bool high)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ delay(d);
+ if (c->check(tp) == high)
+ return true;
+ }
+ netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
+ c->msg, !high, n, d);
+ return false;
+}
+
+static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
+}
+
+static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
+}
+
+static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, true);
+}
+
+static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
+ const struct rtl_cond *c,
+ unsigned int d, int n)
+{
+ return rtl_loop_wait(tp, c, msleep, d, n, false);
+}
+
+#define DECLARE_RTL_COND(name) \
+static bool name ## _check(struct rtl8169_private *); \
+ \
+static const struct rtl_cond name = { \
+ .check = name ## _check, \
+ .msg = #name \
+}; \
+ \
+static bool name ## _check(struct rtl8169_private *tp)
+
+DECLARE_RTL_COND(rtl_ocpar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(OCPAR) & OCPAR_FLAG;
+}
+
static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
- return RTL_R32(OCPDR);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
+ RTL_R32(OCPDR) : ~0;
}
static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W32(OCPDR, data);
RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
- for (i = 0; i < 20; i++) {
- udelay(100);
- if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
- break;
- }
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
+}
+
+DECLARE_RTL_COND(rtl_eriar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(ERIAR) & ERIAR_FLAG;
}
static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
RTL_W8(ERIDR, cmd);
RTL_W32(ERIAR, 0x800010e8);
msleep(2);
- for (i = 0; i < 5; i++) {
- udelay(100);
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- }
+
+ if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
+ return;
ocp_write(tp, 0x1, 0x30, 0x00000001);
}
@@ -872,36 +965,27 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
}
-static void rtl8168_driver_start(struct rtl8169_private *tp)
+DECLARE_RTL_COND(rtl_ocp_read_cond)
{
u16 reg;
- int i;
-
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
reg = rtl8168_get_ocp_reg(tp);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if (ocp_read(tp, 0x0f, reg) & 0x00000800)
- break;
- }
+ return ocp_read(tp, 0x0f, reg) & 0x00000800;
}
-static void rtl8168_driver_stop(struct rtl8169_private *tp)
+static void rtl8168_driver_start(struct rtl8169_private *tp)
{
- u16 reg;
- int i;
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
- rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
+}
- reg = rtl8168_get_ocp_reg(tp);
+static void rtl8168_driver_stop(struct rtl8169_private *tp)
+{
+ rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
- for (i = 0; i < 10; i++) {
- msleep(10);
- if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
- break;
- }
+ rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
}
static int r8168dp_check_dash(struct rtl8169_private *tp)
@@ -911,21 +995,114 @@ static int r8168dp_check_dash(struct rtl8169_private *tp)
return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
}
-static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
{
- int i;
+ if (reg & 0xffff0001) {
+ netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
+ return true;
+ }
+ return false;
+}
- RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0x1f) << 16 | (value & 0xffff));
+DECLARE_RTL_COND(rtl_ocp_gphy_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed writing to the specified
- * MII register.
- */
- if (!(RTL_R32(PHYAR) & 0x80000000))
- break;
- udelay(25);
+ return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
+}
+
+static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
+
+ rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
+}
+
+static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(GPHY_OCP, reg << 15);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
+ (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
+}
+
+static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
+{
+ int val;
+
+ val = r8168_phy_ocp_read(tp, reg);
+ r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
+}
+
+static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return;
+
+ RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
+}
+
+static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ if (rtl_ocp_reg_failure(tp, reg))
+ return 0;
+
+ RTL_W32(OCPDR, reg << 15);
+
+ return RTL_R32(OCPDR);
+}
+
+#define OCP_STD_PHY_BASE 0xa400
+
+static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ if (reg == 0x1f) {
+ tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
+ return;
}
+
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
+}
+
+static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
+{
+ if (tp->ocp_base != OCP_STD_PHY_BASE)
+ reg -= 0x10;
+
+ return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
+}
+
+DECLARE_RTL_COND(rtl_phyar_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(PHYAR) & 0x80000000;
+}
+
+static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
+
+ rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
* According to hardware specs a 20us delay is required after write
* complete indication, but before sending next command.
@@ -933,23 +1110,16 @@ static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
udelay(20);
}
-static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i, value = -1;
+ void __iomem *ioaddr = tp->mmio_addr;
+ int value;
- RTL_W32(PHYAR, 0x0 | (reg_addr & 0x1f) << 16);
+ RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
+
+ value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
+ RTL_R32(PHYAR) & 0xffff : ~0;
- for (i = 20; i > 0; i--) {
- /*
- * Check if the RTL8169 has completed retrieving data from
- * the specified MII register.
- */
- if (RTL_R32(PHYAR) & 0x80000000) {
- value = RTL_R32(PHYAR) & 0xffff;
- break;
- }
- udelay(25);
- }
/*
* According to hardware specs a 20us delay is required after read
* complete indication, but before sending next command.
@@ -959,45 +1129,35 @@ static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
return value;
}
-static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
+static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(OCPDR, data |
- ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
- break;
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
-static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
- (value & OCPDR_DATA_MASK));
+ r8168dp_1_mdio_access(tp, reg,
+ OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
}
-static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
{
- int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
+ r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1);
RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
RTL_W32(EPHY_RXER_NUM, 0);
- for (i = 0; i < 100; i++) {
- mdelay(1);
- if (RTL_R32(OCPAR) & OCPAR_FLAG)
- break;
- }
-
- return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
+ RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
@@ -1012,22 +1172,25 @@ static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
}
-static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
+static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
r8168dp_2_mdio_start(ioaddr);
- r8169_mdio_write(ioaddr, reg_addr, value);
+ r8169_mdio_write(tp, reg, value);
r8168dp_2_mdio_stop(ioaddr);
}
-static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
+static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
+ void __iomem *ioaddr = tp->mmio_addr;
int value;
r8168dp_2_mdio_start(ioaddr);
- value = r8169_mdio_read(ioaddr, reg_addr);
+ value = r8169_mdio_read(tp, reg);
r8168dp_2_mdio_stop(ioaddr);
@@ -1036,12 +1199,12 @@ static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
{
- tp->mdio_ops.write(tp->mmio_addr, location, val);
+ tp->mdio_ops.write(tp, location, val);
}
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
- return tp->mdio_ops.read(tp->mmio_addr, location);
+ return tp->mdio_ops.read(tp, location);
}
static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
@@ -1072,79 +1235,64 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
return rtl_readphy(tp, location);
}
-static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
+DECLARE_RTL_COND(rtl_ephyar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(EPHYAR) & EPHYAR_FLAG;
+}
+
+static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(EPHYAR) & EPHYAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
+
+ udelay(10);
}
-static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
+static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
- u16 value = 0xffff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(EPHYAR) & EPHYAR_FLAG) {
- value = RTL_R32(EPHYAR) & EPHYAR_DATA_MASK;
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
+ RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
-static
-void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
+static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
+ u32 val, int type)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
BUG_ON((addr & 3) || (mask == 0));
RTL_W32(ERIDR, val);
RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(ERIAR) & ERIAR_FLAG))
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
-static u32 rtl_eri_read(void __iomem *ioaddr, int addr, int type)
+static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(ERIAR) & ERIAR_FLAG) {
- value = RTL_R32(ERIDR);
- break;
- }
- udelay(100);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
+ RTL_R32(ERIDR) : ~0;
}
-static void
-rtl_w1w0_eri(void __iomem *ioaddr, int addr, u32 mask, u32 p, u32 m, int type)
+static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
+ u32 m, int type)
{
u32 val;
- val = rtl_eri_read(ioaddr, addr, type);
- rtl_eri_write(ioaddr, addr, mask, (val & ~m) | p, type);
+ val = rtl_eri_read(tp, addr, type);
+ rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
}
struct exgmac_reg {
@@ -1153,31 +1301,30 @@ struct exgmac_reg {
u32 val;
};
-static void rtl_write_exgmac_batch(void __iomem *ioaddr,
+static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
const struct exgmac_reg *r, int len)
{
while (len-- > 0) {
- rtl_eri_write(ioaddr, r->addr, r->mask, r->val, ERIAR_EXGMAC);
+ rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
r++;
}
}
-static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
+DECLARE_RTL_COND(rtl_efusear_cond)
{
- u8 value = 0xff;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+ return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
+}
- for (i = 0; i < 300; i++) {
- if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) {
- value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK;
- break;
- }
- udelay(100);
- }
+static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
- return value;
+ RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+
+ return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
+ RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
static u16 rtl_get_events(struct rtl8169_private *tp)
@@ -1276,48 +1423,48 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else if (RTL_R8(PHYstatus) & _100bps) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
/* Reset packet filter */
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
if (RTL_R8(PHYstatus) & _1000bpsF) {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x00000011, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x00000005, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
- 0x0000001f, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
- 0x0000003f, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
+ ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
if (RTL_R8(PHYstatus) & _10bps) {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x4d02, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
- 0x0060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
+ ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
+ ERIAR_EXGMAC);
} else {
- rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
- 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
+ ERIAR_EXGMAC);
}
}
}
@@ -1784,6 +1931,13 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
}
}
+DECLARE_RTL_COND(rtl_counters_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CounterAddrLow) & CounterDump;
+}
+
static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1792,7 +1946,6 @@ static void rtl8169_update_counters(struct net_device *dev)
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
- int wait = 1000;
/*
* Some chips are unable to dump tally counters when the receiver
@@ -1810,13 +1963,8 @@ static void rtl8169_update_counters(struct net_device *dev)
RTL_W32(CounterAddrLow, cmd);
RTL_W32(CounterAddrLow, cmd | CounterDump);
- while (wait--) {
- if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
- memcpy(&tp->counters, counters, sizeof(*counters));
- break;
- }
- udelay(10);
- }
+ if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
+ memcpy(&tp->counters, counters, sizeof(*counters));
RTL_W32(CounterAddrLow, 0);
RTL_W32(CounterAddrHigh, 0);
@@ -1894,6 +2042,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168G family. */
+ { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
+ { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
+
/* 8168F family. */
{ 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
{ 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
@@ -1933,6 +2085,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
+ { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
{ 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
{ 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
@@ -2186,7 +2340,7 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
index -= regno;
break;
case PHY_READ_EFUSE:
- predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
+ predata = rtl8168d_efuse_read(tp, regno);
index++;
break;
case PHY_CLEAR_READCOUNT:
@@ -2626,7 +2780,6 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
@@ -2638,7 +2791,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -2738,11 +2891,10 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
{ 0x1f, 0x0000 },
{ 0x0d, 0xf880 }
};
- void __iomem *ioaddr = tp->mmio_addr;
rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
- if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
+ if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
static const struct phy_reg phy_reg_init[] = {
{ 0x1f, 0x0002 },
{ 0x05, 0x669a },
@@ -3010,8 +3162,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* EEE setting */
- rtl_w1w0_eri(tp->mmio_addr, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3115,7 +3266,6 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct phy_reg phy_reg_init[] = {
/* Channel estimation fine tune */
{ 0x1f, 0x0003 },
@@ -3189,7 +3339,7 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
/* eee setting */
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x8b85);
rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
@@ -3211,6 +3361,55 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const u16 mac_ocp_patch[] = {
+ 0xe008, 0xe01b, 0xe01d, 0xe01f,
+ 0xe021, 0xe023, 0xe025, 0xe027,
+ 0x49d2, 0xf10d, 0x766c, 0x49e2,
+ 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
+
+ 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
+ 0xc707, 0x8ee1, 0x9d6c, 0xc603,
+ 0xbe00, 0xb416, 0x0076, 0xe86c,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+ 0x0000, 0xc602, 0xbe00, 0x0000,
+ 0xc602, 0xbe00, 0x0000, 0xc602,
+ 0xbe00, 0x0000, 0xc602, 0xbe00,
+
+ 0x0000, 0x0000, 0x0000, 0x0000
+ };
+ u32 i;
+
+ /* Patch code for GPHY reset */
+ for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
+ r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
+ r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
+ r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
+
+ rtl_apply_firmware(tp);
+
+ if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
+
+ if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
+ rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
+ else
+ rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
+
+ rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
+ rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
+
+ r8168_phy_ocp_write(tp, 0xa436, 0x8012);
+ rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
+
+ rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -3256,8 +3455,6 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Disable ALDPS before setting firmware */
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, 0x18, 0x0310);
@@ -3266,13 +3463,35 @@ static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
rtl_apply_firmware(tp);
/* EEE setting */
- rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_writephy(tp, 0x1f, 0x0004);
rtl_writephy(tp, 0x10, 0x401f);
rtl_writephy(tp, 0x19, 0x7030);
rtl_writephy(tp, 0x1f, 0x0000);
}
+static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ { 0x1f, 0x0004 },
+ { 0x10, 0xc07f },
+ { 0x19, 0x7030 },
+ { 0x1f, 0x0000 }
+ };
+
+ /* Disable ALDPS before ram code */
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x18, 0x0310);
+ msleep(100);
+
+ rtl_apply_firmware(tp);
+
+ rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+}
+
static void rtl_hw_phy_config(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3369,6 +3588,15 @@ static void rtl_hw_phy_config(struct net_device *dev)
rtl8411_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_39:
+ rtl8106e_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_40:
+ rtl8168g_1_hw_phy_config(tp);
+ break;
+
+ case RTL_GIGA_MAC_VER_41:
default:
break;
}
@@ -3426,18 +3654,16 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
free_netdev(dev);
}
+DECLARE_RTL_COND(rtl_phy_reset_cond)
+{
+ return tp->phy_reset_pending(tp);
+}
+
static void rtl8169_phy_reset(struct net_device *dev,
struct rtl8169_private *tp)
{
- unsigned int i;
-
tp->phy_reset_enable(tp);
- for (i = 0; i < 100; i++) {
- if (!tp->phy_reset_pending(tp))
- return;
- msleep(1);
- }
- netif_err(tp, link, dev, "PHY reset failed\n");
+ rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
}
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
@@ -3512,7 +3738,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
low >> 16 },
};
- rtl_write_exgmac_batch(ioaddr, e, ARRAY_SIZE(e));
+ rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -3589,6 +3815,11 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ ops->write = r8168g_mdio_write;
+ ops->read = r8168g_mdio_read;
+ break;
default:
ops->write = r8169_mdio_write;
ops->read = r8169_mdio_read;
@@ -3608,6 +3839,9 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_39:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
RTL_W32(RxConfig, RTL_R32(RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
@@ -3761,7 +3995,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
tp->mac_version == RTL_GIGA_MAC_VER_33)
- rtl_ephy_write(ioaddr, 0x19, 0xff64);
+ rtl_ephy_write(tp, 0x19, 0xff64);
if (rtl_wol_pll_power_down(tp))
return;
@@ -3830,6 +4064,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_29:
case RTL_GIGA_MAC_VER_30:
case RTL_GIGA_MAC_VER_37:
+ case RTL_GIGA_MAC_VER_39:
ops->down = r810x_pll_power_down;
ops->up = r810x_pll_power_up;
break;
@@ -3855,6 +4090,8 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_35:
case RTL_GIGA_MAC_VER_36:
case RTL_GIGA_MAC_VER_38:
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -4051,6 +4288,8 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
* No action needed for jumbo frames with 8169.
* No jumbo for 810x at all.
*/
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
default:
ops->disable = NULL;
ops->enable = NULL;
@@ -4058,20 +4297,20 @@ static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
}
}
+DECLARE_RTL_COND(rtl_chipcmd_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(ChipCmd) & CmdReset;
+}
+
static void rtl_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- int i;
- /* Soft reset the chip. */
RTL_W8(ChipCmd, CmdReset);
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- udelay(100);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
@@ -4125,6 +4364,20 @@ static void rtl_rx_close(struct rtl8169_private *tp)
RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
}
+DECLARE_RTL_COND(rtl_npq_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(TxPoll) & NPQ;
+}
+
+DECLARE_RTL_COND(rtl_txcfg_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(TxConfig) & TXCFG_EMPTY;
+}
+
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -4137,16 +4390,16 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
tp->mac_version == RTL_GIGA_MAC_VER_28 ||
tp->mac_version == RTL_GIGA_MAC_VER_31) {
- while (RTL_R8(TxPoll) & NPQ)
- udelay(20);
+ rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36 ||
tp->mac_version == RTL_GIGA_MAC_VER_37 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_40 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_41 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
- while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
- udelay(100);
+ rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
udelay(100);
@@ -4352,15 +4605,12 @@ static void rtl_hw_start_8169(struct net_device *dev)
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
{
if (tp->csi_ops.write)
- tp->csi_ops.write(tp->mmio_addr, addr, value);
+ tp->csi_ops.write(tp, addr, value);
}
static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
{
- if (tp->csi_ops.read)
- return tp->csi_ops.read(tp->mmio_addr, addr);
- else
- return ~0;
+ return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
}
static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
@@ -4381,73 +4631,56 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
rtl_csi_access_enable(tp, 0x27000000);
}
-static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
+DECLARE_RTL_COND(rtl_csiar_cond)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R32(CSIAR) & CSIAR_FLAG;
+}
+
+static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
-static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
+static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIDR, value);
RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC);
- for (i = 0; i < 100; i++) {
- if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
- break;
- udelay(10);
- }
+ rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
}
-static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
+static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
{
- u32 value = ~0x00;
- unsigned int i;
+ void __iomem *ioaddr = tp->mmio_addr;
RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
- for (i = 0; i < 100; i++) {
- if (RTL_R32(CSIAR) & CSIAR_FLAG) {
- value = RTL_R32(CSIDR);
- break;
- }
- udelay(10);
- }
-
- return value;
+ return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
+ RTL_R32(CSIDR) : ~0;
}
static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -4492,13 +4725,14 @@ struct ephy_info {
u16 bits;
};
-static void rtl_ephy_init(void __iomem *ioaddr, const struct ephy_info *e, int len)
+static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
+ int len)
{
u16 w;
while (len-- > 0) {
- w = (rtl_ephy_read(ioaddr, e->offset) & ~e->mask) | e->bits;
- rtl_ephy_write(ioaddr, e->offset, w);
+ w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
+ rtl_ephy_write(tp, e->offset, w);
e++;
}
}
@@ -4582,7 +4816,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168cp[] = {
{ 0x01, 0, 0x0001 },
{ 0x02, 0x0800, 0x1000 },
@@ -4593,7 +4826,7 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
+ rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
__rtl_hw_start_8168cp(tp);
}
@@ -4644,14 +4877,13 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
- rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
+ rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
__rtl_hw_start_8168cp(tp);
}
static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168c_2[] = {
{ 0x01, 0, 0x0001 },
{ 0x03, 0x0400, 0x0220 }
@@ -4659,7 +4891,7 @@ static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
+ rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
__rtl_hw_start_8168cp(tp);
}
@@ -4727,8 +4959,8 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
const struct ephy_info *e = e_info_8168d_4 + i;
u16 w;
- w = rtl_ephy_read(ioaddr, e->offset);
- rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
+ w = rtl_ephy_read(tp, e->offset);
+ rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
}
rtl_enable_clock_request(pdev);
@@ -4756,7 +4988,7 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
+ rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
@@ -4782,19 +5014,18 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_csi_access_enable_1(tp);
- rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
+ rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4820,16 +5051,16 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
RTL_W8(MaxTxPacketSize, EarlySize);
@@ -4854,10 +5085,9 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
@@ -4865,7 +5095,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
static void rtl_hw_start_8411(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x0f, 0xffff, 0x5200 },
@@ -4875,10 +5104,39 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
rtl_hw_start_8168f(tp);
- rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
+ rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
- ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
+
+ rtl_csi_access_enable_1(tp);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+
+ RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
+ RTL_W8(MaxTxPacketSize, EarlySize);
+
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+
+ /* Adjust EEE LED frequency */
+ RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+
+ rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
}
static void rtl_hw_start_8168(struct net_device *dev)
@@ -4982,6 +5240,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
rtl_hw_start_8411(tp);
break;
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_start_8168g_1(tp);
+ break;
+
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
@@ -5036,7 +5299,7 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
RTL_W8(Config1, cfg1 & ~LEDS0);
- rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
+ rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
@@ -5056,7 +5319,7 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
{
rtl_hw_start_8102e_2(tp);
- rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
+ rtl_ephy_write(tp, 0x03, 0xc2f9);
}
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
@@ -5082,15 +5345,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+ rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
}
static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_start_8105e_1(tp);
- rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
+ rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
}
static void rtl_hw_start_8402(struct rtl8169_private *tp)
@@ -5109,18 +5370,29 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
+ rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
- rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
- rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
- ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
+ rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+}
+
+static void rtl_hw_start_8106(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
+ RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+
+ RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
}
static void rtl_hw_start_8101(struct net_device *dev)
@@ -5167,6 +5439,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
case RTL_GIGA_MAC_VER_37:
rtl_hw_start_8402(tp);
break;
+
+ case RTL_GIGA_MAC_VER_39:
+ rtl_hw_start_8106(tp);
+ break;
}
RTL_W8(Cfg9346, Cfg9346_Lock);
@@ -5380,7 +5656,6 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
{
rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
tp->cur_tx = tp->dirty_tx = 0;
- netdev_reset_queue(tp->dev);
}
static void rtl_reset_work(struct rtl8169_private *tp)
@@ -5535,8 +5810,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
wmb();
@@ -5633,16 +5906,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
-struct rtl_txc {
- int packets;
- int bytes;
-};
-
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
{
- struct rtl8169_stats *tx_stats = &tp->tx_stats;
unsigned int dirty_tx, tx_left;
- struct rtl_txc txc = { 0, 0 };
dirty_tx = tp->dirty_tx;
smp_rmb();
@@ -5661,24 +5927,17 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
tp->TxDescArray + entry);
if (status & LastFrag) {
- struct sk_buff *skb = tx_skb->skb;
-
- txc.packets++;
- txc.bytes += skb->len;
- dev_kfree_skb(skb);
+ u64_stats_update_begin(&tp->tx_stats.syncp);
+ tp->tx_stats.packets++;
+ tp->tx_stats.bytes += tx_skb->skb->len;
+ u64_stats_update_end(&tp->tx_stats.syncp);
+ dev_kfree_skb(tx_skb->skb);
tx_skb->skb = NULL;
}
dirty_tx++;
tx_left--;
}
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets += txc.packets;
- tx_stats->bytes += txc.bytes;
- u64_stats_update_end(&tx_stats->syncp);
-
- netdev_completed_queue(dev, txc.packets, txc.bytes);
-
if (tp->dirty_tx != dirty_tx) {
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
@@ -6435,6 +6694,67 @@ static unsigned rtl_try_msi(struct rtl8169_private *tp,
return msi;
}
+DECLARE_RTL_COND(rtl_link_list_ready_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return RTL_R8(MCU) & LINK_LIST_RDY;
+}
+
+DECLARE_RTL_COND(rtl_rxtx_empty_cond)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+}
+
+static void __devinit rtl_hw_init_8168g(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ u32 data;
+
+ tp->ocp_base = OCP_STD_PHY_BASE;
+
+ RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
+ return;
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
+ return;
+
+ RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ msleep(1);
+ RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data &= ~(1 << 14);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+
+ data = r8168_mac_ocp_read(tp, 0xe8de);
+ data |= (1 << 15);
+ r8168_mac_ocp_write(tp, 0xe8de, data);
+
+ if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
+ return;
+}
+
+static void __devinit rtl_hw_initialize(struct rtl8169_private *tp)
+{
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_40:
+ case RTL_GIGA_MAC_VER_41:
+ rtl_hw_init_8168g(tp);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int __devinit
rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -6544,6 +6864,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_irq_disable(tp);
+ rtl_hw_initialize(tp);
+
rtl_hw_reset(tp);
rtl_ack_events(tp, 0xffff);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 79bf09b41971..af0b867a6cf6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -49,6 +49,34 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
+ defined(CONFIG_CPU_SUBTYPE_SH7763) || \
+ defined(CONFIG_ARCH_R8A7740)
+static void sh_eth_select_mii(struct net_device *ndev)
+{
+ u32 value = 0x0;
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ switch (mdp->phy_interface) {
+ case PHY_INTERFACE_MODE_GMII:
+ value = 0x2;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ value = 0x1;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ value = 0x0;
+ break;
+ default:
+ pr_warn("PHY interface mode was not setup. Set to MII.\n");
+ value = 0x1;
+ break;
+ }
+
+ sh_eth_write(ndev, value, RMII_MII);
+}
+#endif
+
/* There is CPU dependent code */
#if defined(CONFIG_CPU_SUBTYPE_SH7724)
#define SH_ETH_RESET_DEFAULT 1
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
#elif defined(CONFIG_CPU_SUBTYPE_SH7757)
#define SH_ETH_HAS_BOTH_MODULES 1
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
}
static int sh_eth_is_gether(struct sh_eth_private *mdp);
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int cnt = 100;
+ int ret = 0;
if (sh_eth_is_gether(mdp)) {
sh_eth_write(ndev, 0x03, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt < 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
EDMR);
}
+
+out:
+ return ret;
}
static void sh_eth_set_duplex_giga(struct net_device *ndev)
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp)
#elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
static void sh_eth_reset_hw_crc(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev)
mdelay(1);
}
-static void sh_eth_reset(struct net_device *ndev)
-{
- int cnt = 100;
-
- sh_eth_write(ndev, EDSR_ENALL, EDSR);
- sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
-
- /* Table Init */
- sh_eth_write(ndev, 0x0, TDLAR);
- sh_eth_write(ndev, 0x0, TDFAR);
- sh_eth_write(ndev, 0x0, TDFXR);
- sh_eth_write(ndev, 0x0, TDFFR);
- sh_eth_write(ndev, 0x0, RDLAR);
- sh_eth_write(ndev, 0x0, RDFAR);
- sh_eth_write(ndev, 0x0, RDFXR);
- sh_eth_write(ndev, 0x0, RDFFR);
-
- /* Reset HW CRC register */
- sh_eth_reset_hw_crc(ndev);
-}
-
static void sh_eth_set_duplex(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tsu = 1,
#if defined(CONFIG_CPU_SUBTYPE_SH7734)
.hw_crc = 1,
+ .select_mii = 1,
#endif
};
+static int sh_eth_reset(struct net_device *ndev)
+{
+ int ret = 0;
+
+ sh_eth_write(ndev, EDSR_ENALL, EDSR);
+ sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
+
+ /* Table Init */
+ sh_eth_write(ndev, 0x0, TDLAR);
+ sh_eth_write(ndev, 0x0, TDFAR);
+ sh_eth_write(ndev, 0x0, TDFXR);
+ sh_eth_write(ndev, 0x0, TDFFR);
+ sh_eth_write(ndev, 0x0, RDLAR);
+ sh_eth_write(ndev, 0x0, RDFAR);
+ sh_eth_write(ndev, 0x0, RDFXR);
+ sh_eth_write(ndev, 0x0, RDFFR);
+
+ /* Reset HW CRC register */
+ sh_eth_reset_hw_crc(ndev);
+
+ /* Select MII mode */
+ if (sh_eth_my_cpu_data.select_mii)
+ sh_eth_select_mii(ndev);
+out:
+ return ret;
+}
+
static void sh_eth_reset_hw_crc(struct net_device *ndev)
{
if (sh_eth_my_cpu_data.hw_crc)
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
#elif defined(CONFIG_ARCH_R8A7740)
#define SH_ETH_HAS_TSU 1
+static int sh_eth_check_reset(struct net_device *ndev);
+
static void sh_eth_chip_reset(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long mii;
/* reset device */
sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
mdelay(1);
- switch (mdp->phy_interface) {
- case PHY_INTERFACE_MODE_GMII:
- mii = 2;
- break;
- case PHY_INTERFACE_MODE_MII:
- mii = 1;
- break;
- case PHY_INTERFACE_MODE_RMII:
- default:
- mii = 0;
- break;
- }
- sh_eth_write(ndev, mii, RMII_MII);
+ sh_eth_select_mii(ndev);
}
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
- int cnt = 100;
+ int ret = 0;
sh_eth_write(ndev, EDSR_ENALL, EDSR);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
- while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
- break;
- mdelay(1);
- cnt--;
- }
- if (cnt == 0)
- printk(KERN_ERR "Device reset fail\n");
+
+ ret = sh_eth_check_reset(ndev);
+ if (ret)
+ goto out;
/* Table Init */
sh_eth_write(ndev, 0x0, TDLAR);
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev)
sh_eth_write(ndev, 0x0, RDFAR);
sh_eth_write(ndev, 0x0, RDFXR);
sh_eth_write(ndev, 0x0, RDFFR);
+
+out:
+ return ret;
}
static void sh_eth_set_duplex(struct net_device *ndev)
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+ .select_mii = 1,
};
#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
#if defined(SH_ETH_RESET_DEFAULT)
/* Chip Reset */
-static void sh_eth_reset(struct net_device *ndev)
+static int sh_eth_reset(struct net_device *ndev)
{
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR);
mdelay(3);
sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR);
+
+ return 0;
+}
+#else
+static int sh_eth_check_reset(struct net_device *ndev)
+{
+ int ret = 0;
+ int cnt = 100;
+
+ while (cnt > 0) {
+ if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ break;
+ mdelay(1);
+ cnt--;
+ }
+ if (cnt < 0) {
+ printk(KERN_ERR "Device reset fail\n");
+ ret = -ETIMEDOUT;
+ }
+ return ret;
}
#endif
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev)
/* Free Rx skb ringbuffer */
if (mdp->rx_skbuff) {
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
if (mdp->rx_skbuff[i])
dev_kfree_skb(mdp->rx_skbuff[i]);
}
}
kfree(mdp->rx_skbuff);
+ mdp->rx_skbuff = NULL;
/* Free Tx skb ringbuffer */
if (mdp->tx_skbuff) {
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
}
}
kfree(mdp->tx_skbuff);
+ mdp->tx_skbuff = NULL;
}
/* format skb and descriptor buffer */
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sk_buff *skb;
struct sh_eth_rxdesc *rxdesc = NULL;
struct sh_eth_txdesc *txdesc = NULL;
- int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
- int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;
+ int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
+ int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
mdp->cur_rx = mdp->cur_tx = 0;
mdp->dirty_rx = mdp->dirty_tx = 0;
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->rx_ring, 0, rx_ringsize);
/* build Rx ring buffer */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
}
}
- mdp->dirty_rx = (u32) (i - RX_RING_SIZE);
+ mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
/* Mark the last entry as wrapping the ring. */
rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
memset(mdp->tx_ring, 0, tx_ringsize);
/* build Tx ring buffer */
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->rx_buf_sz += NET_IP_ALIGN;
/* Allocate RX and TX skb rings */
- mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
+ mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring,
GFP_KERNEL);
if (!mdp->rx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Rx skb\n");
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
return ret;
}
- mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
+ mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring,
GFP_KERNEL);
if (!mdp->tx_skbuff) {
dev_err(&ndev->dev, "Cannot allocate Tx skb\n");
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
}
/* Allocate all Rx descriptors. */
- rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
+ rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL);
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->dirty_rx = 0;
/* Allocate all Tx descriptors. */
- tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
+ tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL);
if (!mdp->tx_ring) {
@@ -890,19 +935,41 @@ desc_ring_free:
skb_ring_free:
/* Free Rx and Tx skb ring buffer */
sh_eth_ring_free(ndev);
+ mdp->tx_ring = NULL;
+ mdp->rx_ring = NULL;
return ret;
}
-static int sh_eth_dev_init(struct net_device *ndev)
+static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
+{
+ int ringsize;
+
+ if (mdp->rx_ring) {
+ ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->rx_ring,
+ mdp->rx_desc_dma);
+ mdp->rx_ring = NULL;
+ }
+
+ if (mdp->tx_ring) {
+ ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
+ dma_free_coherent(NULL, ringsize, mdp->tx_ring,
+ mdp->tx_desc_dma);
+ mdp->tx_ring = NULL;
+ }
+}
+
+static int sh_eth_dev_init(struct net_device *ndev, bool start)
{
int ret = 0;
struct sh_eth_private *mdp = netdev_priv(ndev);
- u_int32_t rx_int_var, tx_int_var;
u32 val;
/* Soft Reset */
- sh_eth_reset(ndev);
+ ret = sh_eth_reset(ndev);
+ if (ret)
+ goto out;
/* Descriptor format */
sh_eth_ring_format(ndev);
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev)
/* Frame recv control */
sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
- rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
- tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
- sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);
+ sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ if (start)
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
+ if (start) {
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_start_queue(ndev);
+ netif_start_queue(ndev);
+ }
+out:
return ret;
}
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev)
int entry = 0;
for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
- entry = mdp->dirty_tx % TX_RING_SIZE;
+ entry = mdp->dirty_tx % mdp->num_tx_ring;
txdesc = &mdp->tx_ring[entry];
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
break;
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev)
freeNum++;
}
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
@@ -1016,8 +1086,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_rxdesc *rxdesc;
- int entry = mdp->cur_rx % RX_RING_SIZE;
- int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
+ int entry = mdp->cur_rx % mdp->num_rx_ring;
+ int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
ndev->stats.rx_bytes += pkt_len;
}
rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
- entry = (++mdp->cur_rx) % RX_RING_SIZE;
+ entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
/* Refill the Rx ring buffers. */
for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
- entry = mdp->dirty_rx % RX_RING_SIZE;
+ entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 16 byte boundary. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status)
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
}
- if (entry >= RX_RING_SIZE - 1)
+ if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
else
@@ -1293,14 +1363,6 @@ other_irq:
return ret;
}
-static void sh_eth_timer(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- mod_timer(&mdp->timer, jiffies + (10 * HZ));
-}
-
/* PHY state control function */
static void sh_eth_adjust_link(struct net_device *ndev)
{
@@ -1499,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
+static void sh_eth_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ ring->rx_max_pending = RX_RING_MAX;
+ ring->tx_max_pending = TX_RING_MAX;
+ ring->rx_pending = mdp->num_rx_ring;
+ ring->tx_pending = mdp->num_tx_ring;
+}
+
+static int sh_eth_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ring)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int ret;
+
+ if (ring->tx_pending > TX_RING_MAX ||
+ ring->rx_pending > RX_RING_MAX ||
+ ring->tx_pending < TX_RING_MIN ||
+ ring->rx_pending < RX_RING_MIN)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(ndev)) {
+ netif_tx_disable(ndev);
+ /* Disable interrupts by clearing the interrupt mask. */
+ sh_eth_write(ndev, 0x0000, EESIPR);
+ /* Stop the chip's Tx and Rx processes. */
+ sh_eth_write(ndev, 0, EDTRR);
+ sh_eth_write(ndev, 0, EDRRR);
+ synchronize_irq(ndev->irq);
+ }
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+
+ /* Set new parameters */
+ mdp->num_rx_ring = ring->rx_pending;
+ mdp->num_tx_ring = ring->tx_pending;
+
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__);
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
+ netif_wake_queue(ndev);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
@@ -1509,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
+ .get_ringparam = sh_eth_get_ringparam,
+ .set_ringparam = sh_eth_set_ringparam,
};
/* network device open function */
@@ -1539,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev);
+ ret = sh_eth_dev_init(ndev, true);
if (ret)
goto out_free_irq;
@@ -1548,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- /* Set the timer to check for link beat. */
- init_timer(&mdp->timer);
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev);
-
return ret;
out_free_irq:
@@ -1577,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* tx_errors count up */
ndev->stats.tx_errors++;
- /* timer off */
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
rxdesc->status = 0;
rxdesc->addr = 0xBADF00D0;
@@ -1589,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < mdp->num_tx_ring; i++) {
if (mdp->tx_skbuff[i])
dev_kfree_skb(mdp->tx_skbuff[i]);
mdp->tx_skbuff[i] = NULL;
}
/* device init */
- sh_eth_dev_init(ndev);
-
- /* timer on */
- mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */
- add_timer(&mdp->timer);
+ sh_eth_dev_init(ndev, true);
}
/* Packet transmit function */
@@ -1612,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
- if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
+ if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
if (!sh_eth_txfree(ndev)) {
if (netif_msg_tx_queued(mdp))
dev_warn(&ndev->dev, "TxFD exhausted.\n");
@@ -1623,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
- entry = mdp->cur_tx % TX_RING_SIZE;
+ entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
@@ -1637,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
else
txdesc->buffer_length = skb->len;
- if (entry >= TX_RING_SIZE - 1)
+ if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
else
txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
@@ -1654,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- int ringsize;
netif_stop_queue(ndev);
@@ -1673,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
- del_timer_sync(&mdp->timer);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
/* free DMA buffer */
- ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);
-
- /* free DMA buffer */
- ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
- dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
+ sh_eth_free_dma_buffer(mdp);
pm_runtime_put_sync(&mdp->pdev->dev);
@@ -2275,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ether_setup(ndev);
mdp = netdev_priv(ndev);
+ mdp->num_tx_ring = TX_RING_SIZE;
+ mdp->num_rx_ring = RX_RING_SIZE;
mdp->addr = ioremap(res->start, resource_size(res));
if (mdp->addr == NULL) {
ret = -ENOMEM;
@@ -2312,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* debug message level */
mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
- mdp->post_rx = POST_RX >> (devno << 1);
- mdp->post_fw = POST_FW >> (devno << 1);
/* read and set MAC address */
read_mac_address(ndev, pd->mac_addr);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 57b8e1fc5d15..bae84fd2e73a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,6 +27,10 @@
#define TX_TIMEOUT (5*HZ)
#define TX_RING_SIZE 64 /* Tx ring size */
#define RX_RING_SIZE 64 /* Rx ring size */
+#define TX_RING_MIN 64
+#define RX_RING_MIN 64
+#define TX_RING_MAX 1024
+#define RX_RING_MAX 1024
#define ETHERSMALL 60
#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
@@ -585,71 +589,6 @@ enum RPADIR_BIT {
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
-enum phy_offsets {
- PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3,
- PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6,
- PHY_16 = 16,
-};
-
-/* PHY_CTRL */
-enum PHY_CTRL_BIT {
- PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000,
- PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400,
- PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080,
-};
-#define DM9161_PHY_C_ANEGEN 0 /* auto nego special */
-
-/* PHY_STAT */
-enum PHY_STAT_BIT {
- PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000,
- PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020,
- PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004,
- PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001,
-};
-
-/* PHY_ANA */
-enum PHY_ANA_BIT {
- PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000,
- PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100,
- PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020,
- PHY_A_SEL = 0x001e,
-};
-/* PHY_ANL */
-enum PHY_ANL_BIT {
- PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000,
- PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100,
- PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020,
- PHY_L_SEL = 0x001f,
-};
-
-/* PHY_ANE */
-enum PHY_ANE_BIT {
- PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004,
- PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001,
-};
-
-/* DM9161 */
-enum PHY_16_BIT {
- PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000,
- PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800,
- PHY_16_TXselect = 0x0400,
- PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100,
- PHY_16_Force100LNK = 0x0080,
- PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020,
- PHY_16_RPDCTR_EN = 0x0010,
- PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004,
- PHY_16_Sleepmode = 0x0002,
- PHY_16_RemoteLoopOut = 0x0001,
-};
-
-#define POST_RX 0x08
-#define POST_FW 0x04
-#define POST0_RX (POST_RX)
-#define POST0_FW (POST_FW)
-#define POST1_RX (POST_RX >> 2)
-#define POST1_FW (POST_FW >> 2)
-#define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW)
-
/* ARSTR */
enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data {
unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */
unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */
unsigned hw_crc:1; /* E-DMAC have CSMR */
+ unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
};
struct sh_eth_private {
@@ -765,13 +705,14 @@ struct sh_eth_private {
const u16 *reg_offset;
void __iomem *addr;
void __iomem *tsu_addr;
+ u32 num_rx_ring;
+ u32 num_tx_ring;
dma_addr_t rx_desc_dma;
dma_addr_t tx_desc_dma;
struct sh_eth_rxdesc *rx_ring;
struct sh_eth_txdesc *tx_ring;
struct sk_buff **rx_skbuff;
struct sk_buff **tx_skbuff;
- struct timer_list timer;
spinlock_t lock;
u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */
u32 cur_tx, dirty_tx;
@@ -786,10 +727,6 @@ struct sh_eth_private {
int msg_enable;
int speed;
int duplex;
- u32 rx_int_var, tx_int_var; /* interrupt control variables */
- char post_rx; /* POST receive */
- char post_fw; /* POST forward */
- struct net_device_stats tsu_stats; /* TSU forward status */
int port; /* for TSU */
int vlan_num_ids; /* for VLAN tag filter */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index b95f2e1b33f0..70554a1b2b02 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1103,8 +1103,8 @@ static int efx_init_io(struct efx_nic *efx)
* masks event though they reject 46 bit masks.
*/
while (dma_mask > 0x7fffffffUL) {
- if (pci_dma_supported(pci_dev, dma_mask)) {
- rc = pci_set_dma_mask(pci_dev, dma_mask);
+ if (dma_supported(&pci_dev->dev, dma_mask)) {
+ rc = dma_set_mask(&pci_dev->dev, dma_mask);
if (rc == 0)
break;
}
@@ -1117,10 +1117,10 @@ static int efx_init_io(struct efx_nic *efx)
}
netif_dbg(efx, probe, efx->net_dev,
"using DMA mask %llx\n", (unsigned long long) dma_mask);
- rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+ rc = dma_set_coherent_mask(&pci_dev->dev, dma_mask);
if (rc) {
- /* pci_set_consistent_dma_mask() is not *allowed* to
- * fail with a mask that pci_set_dma_mask() accepted,
+ /* dma_set_coherent_mask() is not *allowed* to
+ * fail with a mask that dma_set_mask() accepted,
* but just in case...
*/
netif_err(efx, probe, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index d725a8fbe1a6..182dbe2cc6e4 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -136,10 +136,10 @@ enum efx_loopback_mode {
*
* Reset methods are numbered in order of increasing scope.
*
- * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
- * @RESET_TYPE_ALL: reset everything but PCI core blocks
- * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
- * @RESET_TYPE_DISABLE: disable NIC
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 03ded364c8da..10536f93b561 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -453,7 +453,7 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
switch (string_set) {
case ETH_SS_STATS:
for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strncpy(ethtool_strings[i].name,
+ strlcpy(ethtool_strings[i].name,
efx_ethtool_stats[i].name,
sizeof(ethtool_strings[i].name));
break;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 3a1ca2bd1548..12b573a8e82b 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -25,9 +25,12 @@
#include "io.h"
#include "phy.h"
#include "workarounds.h"
+#include "selftest.h"
/* Hardware control for SFC4000 (aka Falcon). */
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
* 8 KB, 16-bit address, 32 B write block */
@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_test falcon_b0_register_tests[] = {
EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
};
-static int falcon_b0_test_registers(struct efx_nic *efx)
+static int
+falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests));
+ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+ int rc, rc2;
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
+ /* We need the 312 clock from the PHY to test the XMAC
+ * registers, so move into XGMII loopback if available */
+ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+ efx->loopback_mode = LOOPBACK_XGMII;
+ else
+ efx->loopback_mode = __ffs(efx->loopback_modes);
+ }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
+
+ efx_reset_down(efx, reset_method);
+
+ tests->registers =
+ efx_nic_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
+ ? -1 : 1;
+
+ rc = falcon_reset_hw(efx, reset_method);
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -1818,7 +1845,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
- .test_registers = falcon_b0_test_registers,
+ .test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
.revision = EFX_REV_FALCON_B0,
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
index 6106ef15dee3..8333865d4c95 100644
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ b/drivers/net/ethernet/sfc/falcon_xmac.c
@@ -341,12 +341,12 @@ void falcon_update_stats_xmac(struct efx_nic *efx)
FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
/* Update derived statistics */
- mac_stats->tx_good_bytes =
- (mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- mac_stats->rx_bad_bytes =
- (mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
+ mac_stats->tx_control * 64);
+ efx_update_diff_stat(&mac_stats->rx_bad_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_good_bytes -
+ mac_stats->rx_control * 64);
}
void falcon_poll_xmac(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index fea7f7300675..c3fd61f0a95c 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -662,7 +662,7 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- unsigned int filter_idx, depth;
+ unsigned int filter_idx, depth = 0;
u32 key;
int rc;
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 17b6463e459c..fc5e7bbcbc9e 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1001,12 +1001,17 @@ static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
u8 inbuf[MC_CMD_REBOOT_IN_LEN];
- /* Atomically reboot the mcfw out of the assertion handler */
+ /* If the MC is running debug firmware, it might now be
+ * waiting for a debugger to attach, but we just want it to
+ * reboot. We set a flag that makes the command a no-op if it
+ * has already done so. We don't know what return code to
+ * expect (0 or -EIO), so ignore it.
+ */
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
+ (void) efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
}
int efx_mcdi_handle_assertion(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index fb7f65b59eb8..1d552f0664d7 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -222,6 +222,7 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
attr->index = index;
attr->type = type;
attr->limit_value = limit_value;
+ sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.show = reader;
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 0310b9f08c9b..db4beed97669 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -48,8 +48,7 @@
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
-/**
- * MCDI version 1
+/* MCDI version 1
*
* Each MCDI request starts with an MCDI_HEADER, which is a 32byte
* structure, filled in by the client.
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0e575359af17..cd9c0a989692 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -68,6 +68,8 @@
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+struct efx_self_tests;
+
/**
* struct efx_special_buffer - An Efx special buffer
* @addr: CPU base address of the buffer
@@ -100,7 +102,7 @@ struct efx_special_buffer {
* @len: Length of this fragment.
* This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
- * @unmap_single: True if pci_unmap_single should be used.
+ * @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap
*/
struct efx_tx_buffer {
@@ -527,7 +529,7 @@ struct efx_phy_operations {
};
/**
- * @enum efx_phy_mode - PHY operating mode flags
+ * enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
* @PHY_MODE_LOW_POWER: set to low power through MDIO
@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_registers: Test read/write functionality of control registers
+ * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
* @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size
@@ -946,7 +949,7 @@ struct efx_nic_type {
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
int (*set_wol)(struct efx_nic *efx, u32 type);
void (*resume_wol)(struct efx_nic *efx);
- int (*test_registers)(struct efx_nic *efx);
+ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
int revision;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 4a9a5beec8fc..326d799762d6 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -124,9 +124,6 @@ int efx_nic_test_registers(struct efx_nic *efx,
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
- /* Falcon should be in loopback to isolate the XMAC from the PHY */
- WARN_ON(!LOOPBACK_INTERNAL(efx));
-
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
@@ -308,8 +305,8 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len)
{
- buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
- &buffer->dma_addr);
+ buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, GFP_ATOMIC);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -320,8 +317,8 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
{
if (buffer->addr) {
- pci_free_consistent(efx->pci_dev, buffer->len,
- buffer->addr, buffer->dma_addr);
+ dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+ buffer->addr, buffer->dma_addr);
buffer->addr = NULL;
}
}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index f48ccf6bb3b9..bab5cd9f5740 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -294,6 +294,24 @@ extern bool falcon_xmac_check_fault(struct efx_nic *efx);
extern int falcon_reconfigure_xmac(struct efx_nic *efx);
extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously. If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value. Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+ if ((s64)(diff - *stat) > 0)
+ *stat = diff;
+}
+
/* Interrupts and test events */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 243e91f3dff9..719319b89d7a 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -155,11 +155,11 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->flags = 0;
- rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+ rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
skb->data, rx_buf->len,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev,
- rx_buf->dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+ rx_buf->dma_addr))) {
dev_kfree_skb_any(skb);
rx_buf->u.skb = NULL;
return -EIO;
@@ -200,10 +200,10 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
efx->rx_buffer_order);
if (unlikely(page == NULL))
return -ENOMEM;
- dma_addr = pci_map_page(efx->pci_dev, page, 0,
+ dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
__free_pages(page, efx->rx_buffer_order);
return -EIO;
}
@@ -247,14 +247,14 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state = page_address(rx_buf->u.page);
if (--state->refcnt == 0) {
- pci_unmap_page(efx->pci_dev,
+ dma_unmap_page(&efx->pci_dev->dev,
state->dma_addr,
efx_rx_buf_size(efx),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
- pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
- rx_buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
+ rx_buf->len, DMA_FROM_DEVICE);
}
}
@@ -336,6 +336,7 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
/**
* efx_fast_push_rx_descriptors - push new RX descriptors quickly
* @rx_queue: RX descriptor queue
+ *
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index de4c0069f5b2..96068d15b601 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
return rc;
}
-static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
-{
- int rc = 0;
-
- /* Test register access */
- if (efx->type->test_registers) {
- rc = efx->type->test_registers(efx);
- tests->registers = rc ? -1 : 1;
- }
-
- return rc;
-}
-
/**************************************************************************
*
* Interrupt and event queue testing
@@ -488,7 +475,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
skb = state->skbs[i];
if (skb && !skb_shared(skb))
++tx_done;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb(skb);
}
netif_tx_unlock_bh(efx->net_dev);
@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
{
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
int phy_mode = efx->phy_mode;
- enum reset_type reset_method = RESET_TYPE_INVISIBLE;
- int rc_test = 0, rc_reset = 0, rc;
+ int rc_test = 0, rc_reset, rc;
efx_selftest_async_cancel(efx);
@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
*/
netif_device_detach(efx->net_dev);
- mutex_lock(&efx->mac_lock);
- if (efx->loopback_modes) {
- /* We need the 312 clock from the PHY to test the XMAC
- * registers, so move into XGMII loopback if available */
- if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
- efx->loopback_mode = LOOPBACK_XGMII;
- else
- efx->loopback_mode = __ffs(efx->loopback_modes);
- }
-
- __efx_reconfigure_port(efx);
- mutex_unlock(&efx->mac_lock);
-
- /* free up all consumers of SRAM (including all the queues) */
- efx_reset_down(efx, reset_method);
-
- rc = efx_test_chip(efx, tests);
- if (rc && !rc_test)
- rc_test = rc;
+ if (efx->type->test_chip) {
+ rc_reset = efx->type->test_chip(efx, tests);
+ if (rc_reset) {
+ netif_err(efx, hw, efx->net_dev,
+ "Unable to recover from chip test\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ return rc_reset;
+ }
- /* reset the chip to recover from the register test */
- rc_reset = efx->type->reset(efx, reset_method);
+ if ((tests->registers < 0) && !rc_test)
+ rc_test = -EIO;
+ }
/* Ensure that the phy is powered and out of loopback
* for the bist and loopback tests */
+ mutex_lock(&efx->mac_lock);
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
efx->loopback_mode = LOOPBACK_NONE;
-
- rc = efx_reset_up(efx, reset_method, rc_reset == 0);
- if (rc && !rc_reset)
- rc_reset = rc;
-
- if (rc_reset) {
- netif_err(efx, drv, efx->net_dev,
- "Unable to recover from chip test\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- return rc_reset;
- }
+ __efx_reconfigure_port(efx);
+ mutex_unlock(&efx->mac_lock);
rc = efx_test_phy(efx, tests, flags);
if (rc && !rc_test)
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 9f8d7cea3967..6bafd216e55e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -25,10 +25,12 @@
#include "workarounds.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
+#include "selftest.h"
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
+static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -154,10 +156,29 @@ static const struct efx_nic_register_test siena_register_tests[] = {
EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
};
-static int siena_test_registers(struct efx_nic *efx)
+static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
{
- return efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests));
+ enum reset_type reset_method = reset_method;
+ int rc, rc2;
+
+ efx_reset_down(efx, reset_method);
+
+ /* Reset the chip immediately so that it is completely
+ * quiescent regardless of what any VF driver does.
+ */
+ rc = siena_reset_hw(efx, reset_method);
+ if (rc)
+ goto out;
+
+ tests->registers =
+ efx_nic_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
+ ? -1 : 1;
+
+ rc = siena_reset_hw(efx, reset_method);
+out:
+ rc2 = efx_reset_up(efx, reset_method, rc == 0);
+ return rc ? rc : rc2;
}
/**************************************************************************
@@ -437,8 +458,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_bytes, TX_BYTES);
MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- mac_stats->tx_good_bytes = (mac_stats->tx_bytes -
- mac_stats->tx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->tx_good_bytes,
+ mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
MAC_STAT(tx_packets, TX_PKTS);
MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
MAC_STAT(tx_pause, TX_PAUSE_PKTS);
@@ -471,8 +492,8 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
MAC_STAT(rx_bytes, RX_BYTES);
MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- mac_stats->rx_good_bytes = (mac_stats->rx_bytes -
- mac_stats->rx_bad_bytes);
+ efx_update_diff_stat(&mac_stats->rx_good_bytes,
+ mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
MAC_STAT(rx_packets, RX_PKTS);
MAC_STAT(rx_good, RX_GOOD_PKTS);
MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
@@ -649,7 +670,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
- .test_registers = siena_test_registers,
+ .test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 94d0365b31cd..9b225a7769f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -36,15 +36,15 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
buffer->unmap_single = false;
}
@@ -138,7 +138,7 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
struct efx_nic *efx = tx_queue->efx;
- struct pci_dev *pci_dev = efx->pci_dev;
+ struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer;
skb_frag_t *fragment;
unsigned int len, unmap_len = 0, fill_level, insert_ptr;
@@ -167,17 +167,17 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->txq_entries - 1 - fill_level;
- /* Map for DMA. Use pci_map_single rather than pci_map_page
+ /* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse
* memory.
*/
unmap_single = true;
- dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */
while (1) {
- if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
- goto pci_err;
+ if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+ goto dma_err;
/* Store fields for marking in the per-fragment final
* descriptor */
@@ -246,7 +246,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
i++;
/* Map for DMA */
unmap_single = false;
- dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
+ dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE);
}
@@ -261,7 +261,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK;
- pci_err:
+ dma_err:
netif_err(efx, tx_err, efx->net_dev,
" TX queue %d could not map skb with %d bytes %d "
"fragments for DMA\n", tx_queue->queue, skb->len,
@@ -284,11 +284,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Free the fragment we were mid-way through pushing */
if (unmap_len) {
if (unmap_single)
- pci_unmap_single(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(pci_dev, unmap_addr, unmap_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(dma_dev, unmap_addr, unmap_len,
+ DMA_TO_DEVICE);
}
return rc;
@@ -651,17 +651,8 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto !=
protocol);
if (protocol == htons(ETH_P_8021Q)) {
- /* Find the encapsulated protocol; reset network header
- * and transport header based on that. */
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
- skb_set_network_header(skb, sizeof(*veh));
- if (protocol == htons(ETH_P_IP))
- skb_set_transport_header(skb, sizeof(*veh) +
- 4 * ip_hdr(skb)->ihl);
- else if (protocol == htons(ETH_P_IPV6))
- skb_set_transport_header(skb, sizeof(*veh) +
- sizeof(struct ipv6hdr));
}
if (protocol == htons(ETH_P_IP)) {
@@ -684,20 +675,19 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
*/
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{
-
- struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+ struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
u8 *base_kva, *kva;
- base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr);
+ base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
if (base_kva == NULL) {
netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
"Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
- /* pci_alloc_consistent() allocates pages. */
+ /* dma_alloc_coherent() allocates pages. */
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
@@ -714,7 +704,7 @@ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
/* Free up a TSO header, and all others in the same page. */
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh,
- struct pci_dev *pci_dev)
+ struct device *dma_dev)
{
struct efx_tso_header **p;
unsigned long base_kva;
@@ -731,7 +721,7 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
p = &(*p)->next;
}
- pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
+ dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
}
static struct efx_tso_header *
@@ -743,11 +733,11 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
if (unlikely(!tsoh))
return NULL;
- tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
+ tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
TSOH_BUFFER(tsoh), header_len,
- PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
- tsoh->dma_addr))) {
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
+ tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
}
@@ -759,9 +749,9 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
static void
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
{
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr, tsoh->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
kfree(tsoh);
}
@@ -892,13 +882,13 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
- pci_unmap_single(tx_queue->efx->pci_dev,
+ dma_unmap_single(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
else
- pci_unmap_page(tx_queue->efx->pci_dev,
+ dma_unmap_page(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
buffer->len = 0;
@@ -927,7 +917,6 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst);
- st->packet_space = st->full_packet_size;
st->out_len = skb->len - st->header_len;
st->unmap_len = 0;
st->unmap_single = false;
@@ -954,9 +943,9 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
int hl = st->header_len;
int len = skb_headlen(skb) - hl;
- st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl,
- len, PCI_DMA_TODEVICE);
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
+ st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
+ len, DMA_TO_DEVICE);
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = true;
st->unmap_len = len;
st->in_len = len;
@@ -1008,7 +997,7 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
buffer->continuation = !end_of_packet;
if (st->in_len == 0) {
- /* Transfer ownership of the pci mapping */
+ /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len;
buffer->unmap_single = st->unmap_single;
st->unmap_len = 0;
@@ -1181,18 +1170,18 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
mem_err:
netif_err(efx, tx_err, efx->net_dev,
- "Out of memory for TSO headers, or PCI mapping error\n");
+ "Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb);
unwind:
/* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) {
if (state.unmap_single)
- pci_unmap_single(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
else
- pci_unmap_page(efx->pci_dev, state.unmap_addr,
- state.unmap_len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&efx->pci_dev->dev, state.unmap_addr,
+ state.unmap_len, DMA_TO_DEVICE);
}
efx_enqueue_unwind(tx_queue);
@@ -1216,5 +1205,5 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
- tx_queue->efx->pci_dev);
+ &tx_queue->efx->pci_dev->dev);
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index ac149d99f78f..b5ba3084c7fc 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -583,7 +583,7 @@ static inline void ioc3_rx(struct net_device *dev)
unsigned long *rxr;
u32 w0, err;
- rxr = (unsigned long *) ip->rxr; /* Ring base */
+ rxr = ip->rxr; /* Ring base */
rx_entry = ip->rx_ci; /* RX consume index */
n_entry = ip->rx_pi;
@@ -903,7 +903,7 @@ static void ioc3_alloc_rings(struct net_device *dev)
if (ip->rxr == NULL) {
/* Allocate and initialize rx ring. 4kb = 512 entries */
ip->rxr = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
- rxr = (unsigned long *) ip->rxr;
+ rxr = ip->rxr;
if (!rxr)
printk("ioc3_alloc_rings(): get_zeroed_page() failed!\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8814b2f5d46f..8d15f7a74b45 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -773,7 +773,7 @@ static int smc911x_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc911x_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -819,7 +819,7 @@ static int smc911x_phy_reset(struct net_device *dev, int phy)
return reg & PMT_CTRL_PHY_RST_;
}
-/*
+/**
* smc911x_phy_powerdown - powerdown phy
* @dev: net device
* @phy: phy address
@@ -837,7 +837,7 @@ static void smc911x_phy_powerdown(struct net_device *dev, int phy)
SMC_SET_PHY_BMCR(lp, phy, bmcr);
}
-/*
+/**
* smc911x_phy_check_media - check the media status and adjust BMCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index fee449355014..318adc935a53 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -942,7 +942,7 @@ static int smc_phy_fixed(struct net_device *dev)
return 1;
}
-/*
+/**
* smc_phy_reset - reset the phy
* @dev: net device
* @phy: phy address
@@ -976,7 +976,7 @@ static int smc_phy_reset(struct net_device *dev, int phy)
return bmcr & BMCR_RESET;
}
-/*
+/**
* smc_phy_powerdown - powerdown phy
* @dev: net device
*
@@ -1000,7 +1000,7 @@ static void smc_phy_powerdown(struct net_device *dev)
smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN);
}
-/*
+/**
* smc_phy_check_media - check the media status and adjust TCR
* @dev: net device
* @init: set true for initialisation
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 1466e5d2af44..62d1baf111ea 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1442,6 +1442,14 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
smsc911x_mac_write(pdata, ADDRL, mac_low32);
}
+static void smsc911x_disable_irq_chip(struct net_device *dev)
+{
+ struct smsc911x_data *pdata = netdev_priv(dev);
+
+ smsc911x_reg_write(pdata, INT_EN, 0);
+ smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
@@ -1494,8 +1502,7 @@ static int smsc911x_open(struct net_device *dev)
spin_unlock_irq(&pdata->mac_lock);
/* Initialise irqs, but leave all sources disabled */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
/* Set interrupt deassertion to 100uS */
intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
@@ -2215,9 +2222,6 @@ static int __devinit smsc911x_init(struct net_device *dev)
if (smsc911x_soft_reset(pdata))
return -ENODEV;
- /* Disable all interrupt sources until we bring the device up */
- smsc911x_reg_write(pdata, INT_EN, 0);
-
ether_setup(dev);
dev->flags |= IFF_MULTICAST;
netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT);
@@ -2434,8 +2438,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
smsc911x_reg_write(pdata, INT_CFG, intcfg);
/* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_reg_write(pdata, INT_EN, 0);
- smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
+ smsc911x_disable_irq_chip(dev);
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
@@ -2485,7 +2488,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
eth_hw_addr_random(dev);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
SMSC_TRACE(pdata, probe,
- "MAC Address is set to random_ether_addr");
+ "MAC Address is set to eth_random_addr");
}
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fd33b21f6c96..1fcd914ec39b 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -1640,8 +1640,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_io_4;
/* descriptors are aligned due to the nature of pci_alloc_consistent */
- pd->tx_ring = (struct smsc9420_dma_desc *)
- (pd->rx_ring + RX_RING_SIZE);
+ pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index bcd54d6e94fd..e2d083228f3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -95,6 +95,16 @@ struct stmmac_extra_stats {
unsigned long poll_n;
unsigned long sched_timer_n;
unsigned long normal_irq_n;
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_receive_pmt_irq_n;
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
};
/* CSR Frequency Access Defines*/
@@ -162,6 +172,17 @@ enum tx_dma_irq_status {
handle_tx_rx = 3,
};
+enum core_specific_irq_mask {
+ core_mmc_tx_irq = 1,
+ core_mmc_rx_irq = 2,
+ core_mmc_rx_csum_offload_irq = 4,
+ core_irq_receive_pmt_irq = 8,
+ core_irq_tx_path_in_lpi_mode = 16,
+ core_irq_tx_path_exit_lpi_mode = 32,
+ core_irq_rx_path_in_lpi_mode = 64,
+ core_irq_rx_path_exit_lpi_mode = 128,
+};
+
/* DMA HW capabilities */
struct dma_features {
unsigned int mbps_10_100;
@@ -208,6 +229,10 @@ struct dma_features {
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
+#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
+
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
@@ -278,7 +303,7 @@ struct stmmac_ops {
/* Dump MAC registers */
void (*dump_regs) (void __iomem *ioaddr);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (void __iomem *ioaddr);
+ int (*host_irq_status) (void __iomem *ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev, int id);
/* Flow control setting */
@@ -291,6 +316,10 @@ struct stmmac_ops {
unsigned int reg_n);
void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
+ void (*set_eee_mode) (void __iomem *ioaddr);
+ void (*reset_eee_mode) (void __iomem *ioaddr);
+ void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
+ void (*set_eee_pls) (void __iomem *ioaddr, int link);
};
struct mac_link {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 23478bf4ed7a..f90fcb5f9573 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -36,6 +36,7 @@
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
enum dwmac1000_irq_status {
+ lpiis_irq = 0x400,
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -60,6 +61,25 @@ enum power_event {
power_down = 0x00000001,
};
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
/* GMAC HW ADDR regs */
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
(reg * 8))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b5e4d02f15c9..bfe022605498 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -194,26 +194,107 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
}
-static void dwmac1000_irq_status(void __iomem *ioaddr)
+static int dwmac1000_irq_status(void __iomem *ioaddr)
{
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ int status = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ if ((intr_status & mmc_tx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ status |= core_mmc_tx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ status |= core_mmc_rx_irq;
+ }
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
+ CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ status |= core_mmc_rx_csum_offload_irq;
+ }
if (unlikely(intr_status & pmt_irq)) {
- CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
/* clear the PMT bits 5 and 6 by reading the PMT
* status register. */
readl(ioaddr + GMAC_PMT);
+ status |= core_irq_receive_pmt_irq;
}
+ /* MAC trx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpiis_irq) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
+ status |= core_irq_tx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_TLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
+ status |= core_irq_tx_path_exit_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEN) {
+ CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
+ status |= core_irq_rx_path_in_lpi_mode;
+ }
+ if (lpi_status & LPI_CTRL_STATUS_RLPIEX) {
+ CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
+ status |= core_irq_rx_path_exit_lpi_mode;
+ }
+ }
+
+ return status;
+}
+
+static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state. */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+{
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+{
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
}
static const struct stmmac_ops dwmac1000_ops = {
@@ -226,6 +307,10 @@ static const struct stmmac_ops dwmac1000_ops = {
.pmt = dwmac1000_pmt,
.set_umac_addr = dwmac1000_set_umac_addr,
.get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 19e0f4eed2bc..f83210e7c221 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,9 +72,9 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
return 0;
}
-static void dwmac100_irq_status(void __iomem *ioaddr)
+static int dwmac100_irq_status(void __iomem *ioaddr)
{
- return;
+ return 0;
}
static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 6e0360f9cfde..e678ce39d014 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -70,6 +70,7 @@
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index fb8377da1687..4b785e10f2ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 1, bmax,
csum);
-
+ wmb();
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -59,6 +59,7 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum);
+ wmb();
priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dc20c56efc9d..ab4c376cb276 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -87,6 +87,12 @@ struct stmmac_priv {
#endif
int clk_csr;
int synopsys_id;
+ struct timer_list eee_ctrl_timer;
+ bool tx_path_in_lpi_mode;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
};
extern int phyaddr;
@@ -104,6 +110,8 @@ int stmmac_dvr_remove(struct net_device *ndev);
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
void __iomem *addr);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_HAVE_CLK
static inline int stmmac_clk_enable(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ce431846fc6f..76fd61aa005f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -93,6 +93,16 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(poll_n),
STMMAC_STAT(sched_timer_n),
STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -366,6 +376,11 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
(*(u32 *)p);
}
}
+ if (priv->eee_enabled) {
+ int val = phy_get_eee_err(priv->phydev);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
}
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
@@ -464,6 +479,46 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+
+ return phy_ethtool_get_eee(priv->phydev, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ priv->eee_enabled = edata->eee_enabled;
+
+ if (!priv->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+ else {
+ /* We are asking for enabling the EEE but it is safe
+ * to verify all by invoking the eee_init function.
+ * In case of failure it will return an error.
+ */
+ priv->eee_enabled = stmmac_eee_init(priv);
+ if (!priv->eee_enabled)
+ return -EOPNOTSUPP;
+
+ /* Do not change tx_lpi_timer in case of failure */
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ }
+
+ return phy_ethtool_set_eee(priv->phydev, edata);
+}
+
static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -480,6 +535,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_strings = stmmac_get_strings,
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
.get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 51b3b68528ee..f6b04c1a3672 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -133,6 +133,12 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -161,6 +167,8 @@ static void stmmac_verify_args(void)
flow_ctrl = FLOW_OFF;
if (unlikely((pause < 0) || (pause > 0xffff)))
pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
}
static void stmmac_clk_csr_set(struct stmmac_priv *priv)
@@ -229,6 +237,85 @@ static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
phydev->speed);
}
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Check and enter in LPI mode */
+ if ((priv->dirty_tx == priv->cur_tx) &&
+ (priv->tx_path_in_lpi_mode == false))
+ priv->hw->mac->set_eee_mode(priv->ioaddr);
+}
+
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ /* Exit and disable EEE in case of we are are in LPI state. */
+ priv->hw->mac->reset_eee_mode(priv->ioaddr);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer
+ * @arg : data hook
+ * Description:
+ * If there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(unsigned long arg)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+}
+
+/**
+ * stmmac_eee_init
+ * @priv: private device pointer
+ * Description:
+ * If the EEE support has been enabled while configuring the driver,
+ * if the GMAC actually supports the EEE (from the HW cap reg) and the
+ * phy can also manage EEE, so enable the LPI state and start the timer
+ * to verify if the tx path can enter in LPI state.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ bool ret = false;
+
+ /* MAC core supports the EEE feature. */
+ if (priv->dma_cap.eee) {
+ /* Check if the PHY supports EEE */
+ if (phy_init_eee(priv->phydev, 1))
+ goto out;
+
+ priv->eee_active = 1;
+ init_timer(&priv->eee_ctrl_timer);
+ priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+ priv->eee_ctrl_timer.data = (unsigned long)priv;
+ priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
+ add_timer(&priv->eee_ctrl_timer);
+
+ priv->hw->mac->set_eee_timer(priv->ioaddr,
+ STMMAC_DEFAULT_LIT_LS_TIMER,
+ priv->tx_lpi_timer);
+
+ pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+
+ ret = true;
+ }
+out:
+ return ret;
+}
+
+static void stmmac_eee_adjust(struct stmmac_priv *priv)
+{
+ /* When the EEE has been already initialised we have to
+ * modify the PLS bit in the LPI ctrl & status reg according
+ * to the PHY link status. For this reason.
+ */
+ if (priv->eee_enabled)
+ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+}
+
/**
* stmmac_adjust_link
* @dev: net device structure
@@ -249,6 +336,7 @@ static void stmmac_adjust_link(struct net_device *dev)
phydev->addr, phydev->link);
spin_lock_irqsave(&priv->lock, flags);
+
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -315,6 +403,8 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
+ stmmac_eee_adjust(priv);
+
spin_unlock_irqrestore(&priv->lock, flags);
DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
@@ -332,7 +422,7 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[MII_BUS_ID_SIZE + 3];
+ char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
int interface = priv->plat->interface;
priv->oldlink = 0;
@@ -346,11 +436,12 @@ static int stmmac_init_phy(struct net_device *dev)
snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
priv->plat->bus_id);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
- phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, 0,
+ interface);
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -677,7 +768,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->hw->desc->release_tx_desc(p);
- entry = (++priv->dirty_tx) % txsize;
+ priv->dirty_tx++;
}
if (unlikely(netif_queue_stopped(priv->dev) &&
stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
@@ -689,6 +780,11 @@ static void stmmac_tx(struct stmmac_priv *priv)
}
netif_tx_unlock(priv->dev);
}
+
+ if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+ }
spin_unlock(&priv->tx_lock);
}
@@ -1027,6 +1123,17 @@ static int stmmac_open(struct net_device *dev)
}
}
+ /* Request the IRQ lines */
+ if (priv->lpi_irq != -ENXIO) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
+ dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ goto open_error_lpiirq;
+ }
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_set_mac(priv->ioaddr, true);
@@ -1062,12 +1169,19 @@ static int stmmac_open(struct net_device *dev)
if (priv->phydev)
phy_start(priv->phydev);
+ priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+ priv->eee_enabled = stmmac_eee_init(priv);
+
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
return 0;
+open_error_lpiirq:
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+
open_error_wolirq:
free_irq(dev->irq, dev);
@@ -1093,6 +1207,9 @@ static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ if (priv->eee_enabled)
+ del_timer_sync(&priv->eee_ctrl_timer);
+
/* Stop and disconnect the PHY */
if (priv->phydev) {
phy_stop(priv->phydev);
@@ -1115,6 +1232,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
+ if (priv->lpi_irq != -ENXIO)
+ free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1164,6 +1283,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock(&priv->tx_lock);
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
entry = priv->cur_tx % txsize;
#ifdef STMMAC_XMIT_DEBUG
@@ -1212,6 +1334,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
wmb();
priv->hw->desc->set_tx_owner(desc);
+ wmb();
}
/* Interrupt on completition only for the latest segment */
@@ -1227,6 +1350,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* To avoid raise condition */
priv->hw->desc->set_tx_owner(first);
+ wmb();
priv->cur_tx++;
@@ -1290,6 +1414,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
wmb();
priv->hw->desc->set_rx_owner(p + entry);
+ wmb();
}
}
@@ -1308,7 +1433,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
display_ring(priv->dma_rx, rxsize);
}
#endif
- count = 0;
while (!priv->hw->desc->get_rx_owner(p)) {
int status;
@@ -1541,10 +1665,37 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
return IRQ_NONE;
}
- if (priv->plat->has_gmac)
- /* To handle GMAC own interrupts */
- priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
+ /* To handle GMAC own interrupts */
+ if (priv->plat->has_gmac) {
+ int status = priv->hw->mac->host_irq_status((void __iomem *)
+ dev->base_addr);
+ if (unlikely(status)) {
+ if (status & core_mmc_tx_irq)
+ priv->xstats.mmc_tx_irq_n++;
+ if (status & core_mmc_rx_irq)
+ priv->xstats.mmc_rx_irq_n++;
+ if (status & core_mmc_rx_csum_offload_irq)
+ priv->xstats.mmc_rx_csum_offload_irq_n++;
+ if (status & core_irq_receive_pmt_irq)
+ priv->xstats.irq_receive_pmt_irq_n++;
+
+ /* For LPI we need to save the tx status */
+ if (status & core_irq_tx_path_in_lpi_mode) {
+ priv->xstats.irq_tx_path_in_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = true;
+ }
+ if (status & core_irq_tx_path_exit_lpi_mode) {
+ priv->xstats.irq_tx_path_exit_lpi_mode_n++;
+ priv->tx_path_in_lpi_mode = false;
+ }
+ if (status & core_irq_rx_path_in_lpi_mode)
+ priv->xstats.irq_rx_path_in_lpi_mode_n++;
+ if (status & core_irq_rx_path_exit_lpi_mode)
+ priv->xstats.irq_rx_path_exit_lpi_mode_n++;
+ }
+ }
+ /* To handle DMA interrupts */
stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
@@ -2130,42 +2281,38 @@ static int __init stmmac_cmdline_opt(char *str)
return -EINVAL;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&debug))
+ if (kstrtoint(opt + 6, 0, &debug))
goto err;
} else if (!strncmp(opt, "phyaddr:", 8)) {
- if (strict_strtoul(opt + 8, 0,
- (unsigned long *)&phyaddr))
+ if (kstrtoint(opt + 8, 0, &phyaddr))
goto err;
} else if (!strncmp(opt, "dma_txsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_txsize))
+ if (kstrtoint(opt + 11, 0, &dma_txsize))
goto err;
} else if (!strncmp(opt, "dma_rxsize:", 11)) {
- if (strict_strtoul(opt + 11, 0,
- (unsigned long *)&dma_rxsize))
+ if (kstrtoint(opt + 11, 0, &dma_rxsize))
goto err;
} else if (!strncmp(opt, "buf_sz:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&buf_sz))
+ if (kstrtoint(opt + 7, 0, &buf_sz))
goto err;
} else if (!strncmp(opt, "tc:", 3)) {
- if (strict_strtoul(opt + 3, 0, (unsigned long *)&tc))
+ if (kstrtoint(opt + 3, 0, &tc))
goto err;
} else if (!strncmp(opt, "watchdog:", 9)) {
- if (strict_strtoul(opt + 9, 0,
- (unsigned long *)&watchdog))
+ if (kstrtoint(opt + 9, 0, &watchdog))
goto err;
} else if (!strncmp(opt, "flow_ctrl:", 10)) {
- if (strict_strtoul(opt + 10, 0,
- (unsigned long *)&flow_ctrl))
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
goto err;
} else if (!strncmp(opt, "pause:", 6)) {
- if (strict_strtoul(opt + 6, 0, (unsigned long *)&pause))
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 6)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
goto err;
#ifdef CONFIG_STMMAC_TIMER
} else if (!strncmp(opt, "tmrate:", 7)) {
- if (strict_strtoul(opt + 7, 0,
- (unsigned long *)&tmrate))
+ if (kstrtoint(opt + 7, 0, &tmrate))
goto err;
#endif
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index cf826e6b6aa1..13afb8edfadc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -125,7 +125,7 @@ err_out_req_reg_failed:
}
/**
- * stmmac_dvr_remove
+ * stmmac_pci_remove
*
* @pdev: platform device pointer
* Description: this function calls the main to free the net resources
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 680d2b8dfe27..cd01ee7ecef1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -49,7 +49,9 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
* are provided. All other properties should be added
* once needed on other platforms.
*/
- if (of_device_is_compatible(np, "st,spear600-gmac")) {
+ if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac")) {
plat->has_gmac = 1;
plat->pmt = 1;
}
@@ -156,6 +158,8 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
if (priv->wol_irq == -ENXIO)
priv->wol_irq = priv->dev->irq;
+ priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+
platform_set_drvdata(pdev, priv->dev);
pr_debug("STMMAC platform driver registration completed");
@@ -190,7 +194,7 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- iounmap((void *)priv->ioaddr);
+ iounmap((void __force __iomem *)priv->ioaddr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -250,7 +254,9 @@ static const struct dev_pm_ops stmmac_pltfr_pm_ops;
#endif /* CONFIG_PM */
static const struct of_device_id stmmac_dt_ids[] = {
- { .compatible = "st,spear600-gmac", },
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac"},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8c726b7004d3..c2a0fe393267 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3335,6 +3335,10 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp,
addr = np->ops->map_page(np->device, page, 0,
PAGE_SIZE, DMA_FROM_DEVICE);
+ if (!addr) {
+ __free_page(page);
+ return -ENOMEM;
+ }
niu_hash_page(rp, page, addr);
if (rp->rbr_blocks_per_page > 1)
@@ -3513,7 +3517,7 @@ static int niu_rbr_fill(struct niu *np, struct rx_ring_info *rp, gfp_t mask)
err = 0;
while (index < (rp->rbr_table_size - blocks_per_page)) {
err = niu_rbr_add_page(np, rp, mask, index);
- if (err)
+ if (unlikely(err))
break;
index += blocks_per_page;
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 2a83fc57edba..967fe8cb476e 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -233,7 +233,6 @@ static void bigmac_init_rings(struct bigmac *bp, int from_irq)
continue;
bp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, ETH_FRAME_LEN);
@@ -838,7 +837,6 @@ static void bigmac_rx(struct bigmac *bp)
RX_BUF_ALLOC_SIZE - 34,
DMA_FROM_DEVICE);
bp->rx_skbs[elem] = new_skb;
- new_skb->dev = bp->dev;
skb_put(new_skb, ETH_FRAME_LEN);
skb_reserve(new_skb, 34);
this->rx_addr =
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 3cf4ab755838..9ae12d0c9632 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -752,7 +752,6 @@ static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size
if (likely(skb)) {
unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data);
skb_reserve(skb, offset);
- skb->dev = dev;
}
return skb;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index dfc00c4683e5..73f341b8befb 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1249,7 +1249,6 @@ static void happy_meal_clean_rings(struct happy_meal *hp)
static void happy_meal_init_rings(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
- struct net_device *dev = hp->dev;
int i;
HMD(("happy_meal_init_rings: counters to zero, "));
@@ -1270,7 +1269,6 @@ static void happy_meal_init_rings(struct happy_meal *hp)
continue;
}
hp->rx_skbs[i] = skb;
- skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
@@ -2031,7 +2029,6 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
}
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
- new_skb->dev = dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 7d4a040d84a2..aeded7ff1c8f 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -441,7 +441,7 @@ static void qe_rx(struct sunqe *qep)
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
- skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf,
+ skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 447a6932cab3..6ce9edd95c04 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -137,14 +137,15 @@ static void print_eth_id(struct net_device *ndev)
#define bdx_disable_interrupts(priv) \
do { WRITE_REG(priv, regIMR, 0); } while (0)
-/* bdx_fifo_init
- * create TX/RX descriptor fifo for host-NIC communication.
+/**
+ * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication.
+ * @priv: NIC private structure
+ * @f: fifo to initialize
+ * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
+ * @reg_XXX: offsets of registers relative to base address
+ *
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
- * @priv - NIC private structure
- * @f - fifo to initialize
- * @fsz_type - fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX - offsets of registers relative to base address
*
* Returns 0 on success, negative value on failure
*
@@ -177,9 +178,10 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
RET(0);
}
-/* bdx_fifo_free - free all resources used by fifo
- * @priv - NIC private structure
- * @f - fifo to release
+/**
+ * bdx_fifo_free - free all resources used by fifo
+ * @priv: NIC private structure
+ * @f: fifo to release
*/
static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
@@ -192,9 +194,9 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
RET();
}
-/*
+/**
* bdx_link_changed - notifies OS about hw link state.
- * @bdx_priv - hw adapter structure
+ * @priv: hw adapter structure
*/
static void bdx_link_changed(struct bdx_priv *priv)
{
@@ -233,10 +235,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
}
-/* bdx_isr - Interrupt Service Routine for Bordeaux NIC
- * @irq - interrupt number
- * @ndev - network device
- * @regs - CPU registers
+/**
+ * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC
+ * @irq: interrupt number
+ * @dev: network device
*
* Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise
*
@@ -307,8 +309,10 @@ static int bdx_poll(struct napi_struct *napi, int budget)
return work_done;
}
-/* bdx_fw_load - loads firmware to NIC
- * @priv - NIC private structure
+/**
+ * bdx_fw_load - loads firmware to NIC
+ * @priv: NIC private structure
+ *
* Firmware is loaded via TXD fifo, so it must be initialized first.
* Firware must be loaded once per NIC not per PCI device provided by NIC (NIC
* can have few of them). So all drivers use semaphore register to choose one
@@ -380,8 +384,9 @@ static void bdx_restore_mac(struct net_device *ndev, struct bdx_priv *priv)
RET();
}
-/* bdx_hw_start - inits registers and starts HW's Rx and Tx engines
- * @priv - NIC private structure
+/**
+ * bdx_hw_start - inits registers and starts HW's Rx and Tx engines
+ * @priv: NIC private structure
*/
static int bdx_hw_start(struct bdx_priv *priv)
{
@@ -691,12 +696,13 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
RET(-EOPNOTSUPP);
}
-/*
+/**
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
- * by passing VLAN filter table to hardware
- * @ndev network device
- * @vid VLAN vid
- * @op add or kill operation
+ * @ndev: network device
+ * @vid: VLAN vid
+ * @op: add or kill operation
+ *
+ * Passes VLAN filter table to hardware
*/
static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
{
@@ -722,10 +728,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
RET();
}
-/*
+/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
- * @ndev network device
- * @vid VLAN vid to add
+ * @ndev: network device
+ * @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
@@ -733,10 +739,10 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
return 0;
}
-/*
+/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
- * @ndev network device
- * @vid VLAN vid to kill
+ * @ndev: network device
+ * @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
@@ -974,8 +980,9 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
* Rx Init *
*************************************************************************/
-/* bdx_rx_init - initialize RX all related HW and SW resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_init - initialize RX all related HW and SW resources
+ * @priv: NIC private structure
*
* Returns 0 on success, negative value on failure
*
@@ -1016,9 +1023,10 @@ err_mem:
return -ENOMEM;
}
-/* bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
- * @priv - NIC private structure
- * @f - RXF fifo
+/**
+ * bdx_rx_free_skbs - frees and unmaps all skbs allocated for the fifo
+ * @priv: NIC private structure
+ * @f: RXF fifo
*/
static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
{
@@ -1045,8 +1053,10 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
}
}
-/* bdx_rx_free - release all Rx resources
- * @priv - NIC private structure
+/**
+ * bdx_rx_free - release all Rx resources
+ * @priv: NIC private structure
+ *
* It assumes that Rx is desabled in HW
*/
static void bdx_rx_free(struct bdx_priv *priv)
@@ -1067,9 +1077,11 @@ static void bdx_rx_free(struct bdx_priv *priv)
* Rx Engine *
*************************************************************************/
-/* bdx_rx_alloc_skbs - fill rxf fifo with new skbs
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+/**
+ * bdx_rx_alloc_skbs - fill rxf fifo with new skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ *
* It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo.
* skb's virtual and physical addresses are stored in skb db.
* To calculate free space, func uses cached values of RPTR and WPTR
@@ -1179,13 +1191,15 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
RET();
}
-/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+/**
+ * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
* NOTE: a special treatment is given to non-continuous descriptors
* that start near the end, wraps around and continue at the beginning. a second
* part is copied right after the first, and then descriptor is interpreted as
* normal. fifo has an extra space to allow such operations
- * @priv - nic's private structure
- * @f - RXF fifo that needs skbs
+ * @priv: nic's private structure
+ * @f: RXF fifo that needs skbs
+ * @budget: maximum number of packets to receive
*/
/* TBD: replace memcpy func call by explicite inline asm */
@@ -1375,9 +1389,10 @@ static inline int bdx_tx_db_size(struct txdb *db)
return db->size - taken;
}
-/* __bdx_tx_ptr_next - helper function, increment read/write pointer + wrap
- * @d - tx data base
- * @ptr - read or write pointer
+/**
+ * __bdx_tx_db_ptr_next - helper function, increment read/write pointer + wrap
+ * @db: tx data base
+ * @pptr: read or write pointer
*/
static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
{
@@ -1394,8 +1409,9 @@ static inline void __bdx_tx_db_ptr_next(struct txdb *db, struct tx_map **pptr)
*pptr = db->start;
}
-/* bdx_tx_db_inc_rptr - increment read pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_rptr - increment read pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_rptr(struct txdb *db)
{
@@ -1403,8 +1419,9 @@ static inline void bdx_tx_db_inc_rptr(struct txdb *db)
__bdx_tx_db_ptr_next(db, &db->rptr);
}
-/* bdx_tx_db_inc_rptr - increment write pointer
- * @d - tx data base
+/**
+ * bdx_tx_db_inc_wptr - increment write pointer
+ * @db: tx data base
*/
static inline void bdx_tx_db_inc_wptr(struct txdb *db)
{
@@ -1413,9 +1430,11 @@ static inline void bdx_tx_db_inc_wptr(struct txdb *db)
a result of write */
}
-/* bdx_tx_db_init - creates and initializes tx db
- * @d - tx data base
- * @sz_type - size of tx fifo
+/**
+ * bdx_tx_db_init - creates and initializes tx db
+ * @d: tx data base
+ * @sz_type: size of tx fifo
+ *
* Returns 0 on success, error code otherwise
*/
static int bdx_tx_db_init(struct txdb *d, int sz_type)
@@ -1441,8 +1460,9 @@ static int bdx_tx_db_init(struct txdb *d, int sz_type)
return 0;
}
-/* bdx_tx_db_close - closes tx db and frees all memory
- * @d - tx data base
+/**
+ * bdx_tx_db_close - closes tx db and frees all memory
+ * @d: tx data base
*/
static void bdx_tx_db_close(struct txdb *d)
{
@@ -1463,9 +1483,11 @@ static struct {
u16 qwords; /* qword = 64 bit */
} txd_sizes[MAX_SKB_FRAGS + 1];
-/* txdb_map_skb - creates and stores dma mappings for skb's data blocks
- * @priv - NIC private structure
- * @skb - socket buffer to map
+/**
+ * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: TX descriptor to use
*
* It makes dma mappings for skb's data blocks and writes them to PBL of
* new tx descriptor. It also stores them in the tx db, so they could be
@@ -1562,9 +1584,10 @@ err_mem:
return -ENOMEM;
}
-/*
+/**
* bdx_tx_space - calculates available space in TX fifo
- * @priv - NIC private structure
+ * @priv: NIC private structure
+ *
* Returns available space in TX fifo in bytes
*/
static inline int bdx_tx_space(struct bdx_priv *priv)
@@ -1579,9 +1602,10 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
return fsize;
}
-/* bdx_tx_transmit - send packet to NIC
- * @skb - packet to send
- * ndev - network device assigned to NIC
+/**
+ * bdx_tx_transmit - send packet to NIC
+ * @skb: packet to send
+ * @ndev: network device assigned to NIC
* Return codes:
* o NETDEV_TX_OK everything ok.
* o NETDEV_TX_BUSY Cannot transmit packet, try later
@@ -1699,8 +1723,10 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/* bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
- * @priv - bdx adapter
+/**
+ * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ.
+ * @priv: bdx adapter
+ *
* It scans TXF fifo for descriptors, frees DMA mappings and reports to OS
* that those packets were sent
*/
@@ -1761,7 +1787,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
spin_unlock(&priv->tx_lock);
}
-/* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+/**
+ * bdx_tx_free_skbs - frees all skbs from TXD fifo.
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1790,10 +1817,11 @@ static void bdx_tx_free(struct bdx_priv *priv)
bdx_tx_db_close(&priv->txdb);
}
-/* bdx_tx_push_desc - push descriptor to TxD fifo
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc - push descriptor to TxD fifo
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* Pushes desc to TxD fifo and overlaps it if needed.
* NOTE: this func does not check for available space. this is responsibility
@@ -1819,10 +1847,11 @@ static void bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
}
-/* bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
- * @priv - NIC private structure
- * @data - desc's data
- * @size - desc's size
+/**
+ * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
*
* NOTE: this func does check for available space and, if necessary, waits for
* NIC to read existing data before writing new one.
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6685bbb5705a..1e5d85b06e71 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_data/cpsw.h>
@@ -494,11 +495,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_intr_disable(priv);
netif_carrier_off(ndev);
- ret = clk_enable(priv->clk);
- if (ret < 0) {
- dev_err(priv->dev, "unable to turn on device clock\n");
- return ret;
- }
+ pm_runtime_get_sync(&priv->pdev->dev);
reg = __raw_readl(&priv->regs->id_ver);
@@ -569,7 +566,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_carrier_off(priv->ndev);
cpsw_ale_stop(priv->ale);
for_each_slave(priv, cpsw_slave_stop, priv);
- clk_disable(priv->clk);
+ pm_runtime_put_sync(&priv->pdev->dev);
return 0;
}
@@ -748,7 +745,7 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
pr_info("Detected MACID = %pM", priv->mac_addr);
} else {
- random_ether_addr(priv->mac_addr);
+ eth_random_addr(priv->mac_addr);
pr_info("Random MACID = %pM", priv->mac_addr);
}
@@ -763,10 +760,12 @@ static int __devinit cpsw_probe(struct platform_device *pdev)
for (i = 0; i < data->slaves; i++)
priv->slaves[i].slave_num = i;
- priv->clk = clk_get(&pdev->dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ priv->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(priv->clk)) {
- dev_err(priv->dev, "failed to get device clock)\n");
- ret = -EBUSY;
+ dev_err(&pdev->dev, "fck is not found\n");
+ ret = -ENODEV;
+ goto clean_slave_ret;
}
priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -935,6 +934,8 @@ clean_cpsw_iores_ret:
resource_size(priv->cpsw_res));
clean_clk_ret:
clk_put(priv->clk);
+clean_slave_ret:
+ pm_runtime_disable(&pdev->dev);
kfree(priv->slaves);
clean_ndev_ret:
free_netdev(ndev);
@@ -959,6 +960,7 @@ static int __devexit cpsw_remove(struct platform_device *pdev)
resource_size(priv->cpsw_res));
release_mem_region(priv->cpsw_ss_res->start,
resource_size(priv->cpsw_ss_res));
+ pm_runtime_disable(&pdev->dev);
clk_put(priv->clk);
kfree(priv->slaves);
free_netdev(ndev);
@@ -973,6 +975,8 @@ static int cpsw_suspend(struct device *dev)
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
+ pm_runtime_put_sync(&pdev->dev);
+
return 0;
}
@@ -981,6 +985,7 @@ static int cpsw_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(&pdev->dev);
if (netif_running(ndev))
cpsw_ndo_open(ndev);
return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d614c374ed9d..3b5c4571b55e 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 4da93a5d7ec6..fce89a0ab06e 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -57,7 +57,12 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <asm/irq.h>
#include <asm/page.h>
@@ -339,6 +344,9 @@ struct emac_priv {
u32 rx_addr_type;
atomic_t cur_tx;
const char *phy_id;
+#ifdef CONFIG_OF
+ struct device_node *phy_node;
+#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -346,10 +354,6 @@ struct emac_priv {
void (*int_disable) (void);
};
-/* clock frequency for EMAC */
-static struct clk *emac_clk;
-static unsigned long emac_bus_frequency;
-
/* EMAC TX Host Error description strings */
static char *emac_txhost_errcodes[16] = {
"No error", "SOP error", "Ownership bit not set in SOP buffer",
@@ -375,7 +379,7 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs: Dump important EMAC registers to debug terminal
+ * emac_dump_regs - Dump important EMAC registers to debug terminal
* @priv: The DaVinci EMAC private adapter structure
*
* Executes ethtool set cmd & sets phy mode
@@ -466,7 +470,7 @@ static void emac_dump_regs(struct emac_priv *priv)
}
/**
- * emac_get_drvinfo: Get EMAC driver information
+ * emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
*
@@ -481,7 +485,7 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings: Get EMAC settings
+ * emac_get_settings - Get EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -500,7 +504,7 @@ static int emac_get_settings(struct net_device *ndev,
}
/**
- * emac_set_settings: Set EMAC settings
+ * emac_set_settings - Set EMAC settings
* @ndev: The DaVinci EMAC network adapter
* @ecmd: ethtool command
*
@@ -518,7 +522,7 @@ static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
}
/**
- * emac_get_coalesce : Get interrupt coalesce settings for this device
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -536,7 +540,7 @@ static int emac_get_coalesce(struct net_device *ndev,
}
/**
- * emac_set_coalesce : Set interrupt coalesce settings for this device
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
*
@@ -614,11 +618,9 @@ static int emac_set_coalesce(struct net_device *ndev,
}
-/**
- * ethtool_ops: DaVinci EMAC Ethtool structure
+/* ethtool_ops: DaVinci EMAC Ethtool structure
*
* Ethtool support for EMAC adapter
- *
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
@@ -631,7 +633,7 @@ static const struct ethtool_ops ethtool_ops = {
};
/**
- * emac_update_phystatus: Update Phy status
+ * emac_update_phystatus - Update Phy status
* @priv: The DaVinci EMAC private adapter structure
*
* Updates phy status and takes action for network queue if required
@@ -697,7 +699,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
}
/**
- * hash_get: Calculate hash value from mac address
+ * hash_get - Calculate hash value from mac address
* @addr: mac address to delete from hash table
*
* Calculates hash value from mac address
@@ -723,9 +725,9 @@ static u32 hash_get(u8 *addr)
}
/**
- * hash_add: Hash function to add mac addr from hash table
+ * hash_add - Hash function to add mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Adds mac address to the internal hash table
*
@@ -765,9 +767,9 @@ static int hash_add(struct emac_priv *priv, u8 *mac_addr)
}
/**
- * hash_del: Hash function to delete mac addr from hash table
+ * hash_del - Hash function to delete mac addr from hash table
* @priv: The DaVinci EMAC private adapter structure
- * mac_addr: mac address to delete from hash table
+ * @mac_addr: mac address to delete from hash table
*
* Removes mac address from the internal hash table
*
@@ -807,7 +809,7 @@ static int hash_del(struct emac_priv *priv, u8 *mac_addr)
#define EMAC_ALL_MULTI_CLR 3
/**
- * emac_add_mcast: Set multicast address in the EMAC adapter (Internal)
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
* mac_addr: mac address to set
@@ -855,7 +857,7 @@ static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
}
/**
- * emac_dev_mcast_set: Set multicast address in the EMAC adapter
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
* @ndev: The DaVinci EMAC network adapter
*
* Set multicast addresses in EMAC adapter
@@ -901,7 +903,7 @@ static void emac_dev_mcast_set(struct net_device *ndev)
*************************************************************************/
/**
- * emac_int_disable: Disable EMAC module interrupt (from adapter)
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Disable EMAC interrupt on the adapter
@@ -931,7 +933,7 @@ static void emac_int_disable(struct emac_priv *priv)
}
/**
- * emac_int_enable: Enable EMAC module interrupt (from adapter)
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
* @priv: The DaVinci EMAC private adapter structure
*
* Enable EMAC interrupt on the adapter
@@ -967,7 +969,7 @@ static void emac_int_enable(struct emac_priv *priv)
}
/**
- * emac_irq: EMAC interrupt handler
+ * emac_irq - EMAC interrupt handler
* @irq: interrupt number
* @dev_id: EMAC network adapter data structure ptr
*
@@ -1060,7 +1062,7 @@ static void emac_tx_handler(void *token, int len, int status)
}
/**
- * emac_dev_xmit: EMAC Transmit function
+ * emac_dev_xmit - EMAC Transmit function
* @skb: SKB pointer
* @ndev: The DaVinci EMAC network adapter
*
@@ -1111,7 +1113,7 @@ fail_tx:
}
/**
- * emac_dev_tx_timeout: EMAC Transmit timeout function
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system detects that a skb timeout period has expired
@@ -1138,7 +1140,7 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
}
/**
- * emac_set_type0addr: Set EMAC Type0 mac address
+ * emac_set_type0addr - Set EMAC Type0 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1165,7 +1167,7 @@ static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type1addr: Set EMAC Type1 mac address
+ * emac_set_type1addr - Set EMAC Type1 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1187,7 +1189,7 @@ static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_set_type2addr: Set EMAC Type2 mac address
+ * emac_set_type2addr - Set EMAC Type2 mac address
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1213,7 +1215,7 @@ static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
}
/**
- * emac_setmac: Set mac address in the adapter (internal function)
+ * emac_setmac - Set mac address in the adapter (internal function)
* @priv: The DaVinci EMAC private adapter structure
* @ch: RX channel number
* @mac_addr: MAC address to set in device
@@ -1242,7 +1244,7 @@ static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
}
/**
- * emac_dev_setmac_addr: Set mac address in the adapter
+ * emac_dev_setmac_addr - Set mac address in the adapter
* @ndev: The DaVinci EMAC network adapter
* @addr: MAC address to set in device
*
@@ -1277,7 +1279,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
}
/**
- * emac_hw_enable: Enable EMAC hardware for packet transmission/reception
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
* @priv: The DaVinci EMAC private adapter structure
*
* Enables EMAC hardware for packet processing - enables PHY, enables RX
@@ -1347,7 +1349,7 @@ static int emac_hw_enable(struct emac_priv *priv)
}
/**
- * emac_poll: EMAC NAPI Poll function
+ * emac_poll - EMAC NAPI Poll function
* @ndev: The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
@@ -1430,7 +1432,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
- * emac_poll_controller: EMAC Poll controller function
+ * emac_poll_controller - EMAC Poll controller function
* @ndev: The DaVinci EMAC network adapter
*
* Polled functionality used by netconsole and others in non interrupt mode
@@ -1489,7 +1491,7 @@ static void emac_adjust_link(struct net_device *ndev)
*************************************************************************/
/**
- * emac_devioctl: EMAC adapter ioctl
+ * emac_devioctl - EMAC adapter ioctl
* @ndev: The DaVinci EMAC network adapter
* @ifrq: request parameter
* @cmd: command parameter
@@ -1516,7 +1518,7 @@ static int match_first_device(struct device *dev, void *data)
}
/**
- * emac_dev_open: EMAC device open
+ * emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to start the interface. We init TX/RX channels
@@ -1535,6 +1537,8 @@ static int emac_dev_open(struct net_device *ndev)
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ pm_runtime_get(&priv->pdev->dev);
+
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
ndev->dev_addr[cnt] = priv->mac_addr[cnt];
@@ -1604,7 +1608,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id);
ret = PTR_ERR(priv->phydev);
priv->phydev = NULL;
- return ret;
+ goto err;
}
priv->link = 0;
@@ -1645,11 +1649,15 @@ rollback:
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1);
m = res->end;
}
- return -EBUSY;
+
+ ret = -EBUSY;
+err:
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
/**
- * emac_dev_stop: EMAC device stop
+ * emac_dev_stop - EMAC device stop
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to stop or down the interface. We stop the network
@@ -1687,11 +1695,12 @@ static int emac_dev_stop(struct net_device *ndev)
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+ pm_runtime_put(&priv->pdev->dev);
return 0;
}
/**
- * emac_dev_getnetstats: EMAC get statistics function
+ * emac_dev_getnetstats - EMAC get statistics function
* @ndev: The DaVinci EMAC network adapter
*
* Called when system wants to get statistics from the device.
@@ -1762,8 +1771,79 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+#ifdef CONFIG_OF
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ struct device_node *np;
+ struct emac_platform_data *pdata = NULL;
+ const u8 *mac_addr;
+ u32 data;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto nodata;
+ }
+
+ np = pdev->dev.of_node;
+ if (!np)
+ goto nodata;
+ else
+ pdata->version = EMAC_VERSION_2;
+
+ if (!is_valid_ether_addr(pdata->mac_addr)) {
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+ }
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-reg-offset", &data);
+ if (!ret)
+ pdata->ctrl_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+ &data);
+ if (!ret)
+ pdata->ctrl_mod_reg_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-offset", &data);
+ if (!ret)
+ pdata->ctrl_ram_offset = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-ctrl-ram-size", &data);
+ if (!ret)
+ pdata->ctrl_ram_size = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-rmii-en", &data);
+ if (!ret)
+ pdata->rmii_en = data;
+
+ ret = of_property_read_u32(np, "ti,davinci-no-bd-ram", &data);
+ if (!ret)
+ pdata->no_bd_ram = data;
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node)
+ pdata->phy_id = "";
+
+ pdev->dev.platform_data = pdata;
+nodata:
+ return pdata;
+}
+#else
+static struct emac_platform_data
+ *davinci_emac_of_get_pdata(struct platform_device *pdev,
+ struct emac_priv *priv)
+{
+ return pdev->dev.platform_data;
+}
+#endif
/**
- * davinci_emac_probe: EMAC device probe
+ * davinci_emac_probe - EMAC device probe
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when probing for emac devicesr. We get details of instances and
@@ -1780,6 +1860,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
struct emac_platform_data *pdata;
struct device *emac_dev;
struct cpdma_params dma_params;
+ struct clk *emac_clk;
+ unsigned long emac_bus_frequency;
+
/* obtain emac clock from kernel */
emac_clk = clk_get(&pdev->dev, NULL);
@@ -1788,12 +1871,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ clk_put(emac_clk);
+
/* TODO: Probe PHY here if possible */
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
rc = -ENOMEM;
- goto free_clk;
+ goto no_ndev;
}
platform_set_drvdata(pdev, ndev);
@@ -1804,7 +1889,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
- pdata = pdev->dev.platform_data;
+ pdata = davinci_emac_of_get_pdata(pdev, priv);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
rc = -ENODEV;
@@ -1909,15 +1994,13 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
- clk_enable(emac_clk);
-
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
- goto netdev_reg_err;
+ goto no_irq_res;
}
@@ -1926,10 +2009,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_resume(&pdev->dev);
+
return 0;
-netdev_reg_err:
- clk_disable(emac_clk);
no_irq_res:
if (priv->txchan)
cpdma_chan_destroy(priv->txchan);
@@ -1943,13 +2028,12 @@ no_dma:
probe_quit:
free_netdev(ndev);
-free_clk:
- clk_put(emac_clk);
+no_ndev:
return rc;
}
/**
- * davinci_emac_remove: EMAC device remove
+ * davinci_emac_remove - EMAC device remove
* @pdev: The DaVinci EMAC device that we are removing
*
* Called when removing the device driver. We disable clock usage and release
@@ -1978,9 +2062,6 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
iounmap(priv->remap_addr);
free_netdev(ndev);
- clk_disable(emac_clk);
- clk_put(emac_clk);
-
return 0;
}
@@ -1992,8 +2073,6 @@ static int davinci_emac_suspend(struct device *dev)
if (netif_running(ndev))
emac_dev_stop(ndev);
- clk_disable(emac_clk);
-
return 0;
}
@@ -2002,8 +2081,6 @@ static int davinci_emac_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
- clk_enable(emac_clk);
-
if (netif_running(ndev))
emac_dev_open(ndev);
@@ -2015,21 +2092,26 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
.resume = davinci_emac_resume,
};
-/**
- * davinci_emac_driver: EMAC platform driver structure
- */
+static const struct of_device_id davinci_emac_of_match[] = {
+ {.compatible = "ti,davinci-dm6467-emac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+
+/* davinci_emac_driver: EMAC platform driver structure */
static struct platform_driver davinci_emac_driver = {
.driver = {
.name = "davinci_emac",
.owner = THIS_MODULE,
.pm = &davinci_emac_pm_ops,
+ .of_match_table = of_match_ptr(davinci_emac_of_match),
},
.probe = davinci_emac_probe,
.remove = __devexit_p(davinci_emac_remove),
};
/**
- * davinci_emac_init: EMAC driver module init
+ * davinci_emac_init - EMAC driver module init
*
* Called when initializing the driver. We register the driver with
* the platform.
@@ -2041,7 +2123,7 @@ static int __init davinci_emac_init(void)
late_initcall(davinci_emac_init);
/**
- * davinci_emac_exit: EMAC driver module exit
+ * davinci_emac_exit - EMAC driver module exit
*
* Called when exiting the driver completely. We unregister the driver with
* the platform and exit
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index e4e47088e26b..cd7ee204e94a 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -34,6 +34,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/pm_runtime.h>
#include <linux/davinci_emac.h>
/*
@@ -321,7 +322,9 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, pdev->id);
- data->clk = clk_get(dev, NULL);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+ data->clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -329,8 +332,6 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
goto bail_out;
}
- clk_enable(data->clk);
-
dev_set_drvdata(dev, data);
data->dev = dev;
spin_lock_init(&data->lock);
@@ -378,10 +379,10 @@ bail_out:
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
kfree(data);
@@ -396,10 +397,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_free(data->bus);
- if (data->clk) {
- clk_disable(data->clk);
+ if (data->clk)
clk_put(data->clk);
- }
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
dev_set_drvdata(dev, NULL);
@@ -421,8 +422,7 @@ static int davinci_mdio_suspend(struct device *dev)
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- if (data->clk)
- clk_disable(data->clk);
+ pm_runtime_put_sync(data->dev);
data->suspended = true;
spin_unlock(&data->lock);
@@ -436,8 +436,7 @@ static int davinci_mdio_resume(struct device *dev)
u32 ctrl;
spin_lock(&data->lock);
- if (data->clk)
- clk_enable(data->clk);
+ pm_runtime_put_sync(data->dev);
/* restart the scan state machine */
ctrl = __raw_readl(&data->regs->control);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 83b4b388ad49..4e2a1628484d 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -123,6 +123,7 @@ struct tile_net_comps {
/* The transmit wake timer for a given cpu and echannel. */
struct tile_net_tx_wake {
+ int tx_queue_idx;
struct hrtimer timer;
struct net_device *dev;
};
@@ -573,12 +574,14 @@ static void add_comp(gxio_mpipe_equeue_t *equeue,
comps->comp_next++;
}
-static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
+static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
+ int tx_queue_idx)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev);
+ struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
- hrtimer_start(&info->tx_wake[priv->echannel].timer,
+ hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
HRTIMER_MODE_REL_PINNED);
}
@@ -587,7 +590,7 @@ static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
{
struct tile_net_tx_wake *tx_wake =
container_of(t, struct tile_net_tx_wake, timer);
- netif_wake_subqueue(tx_wake->dev, smp_processor_id());
+ netif_wake_subqueue(tx_wake->dev, tx_wake->tx_queue_idx);
return HRTIMER_NORESTART;
}
@@ -1218,6 +1221,7 @@ static int tile_net_open(struct net_device *dev)
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
+ tx_wake->tx_queue_idx = cpu;
tx_wake->timer.function = tile_net_handle_tx_wake_timer;
tx_wake->dev = dev;
}
@@ -1291,6 +1295,7 @@ static inline void *tile_net_frag_buf(skb_frag_t *f)
* stop the queue and schedule the tx_wake timer.
*/
static s64 tile_net_equeue_try_reserve(struct net_device *dev,
+ int tx_queue_idx,
struct tile_net_comps *comps,
gxio_mpipe_equeue_t *equeue,
int num_edescs)
@@ -1313,8 +1318,8 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev,
}
/* Still nothing; give up and stop the queue for a short while. */
- netif_stop_subqueue(dev, smp_processor_id());
- tile_net_schedule_tx_wake_timer(dev);
+ netif_stop_subqueue(dev, tx_queue_idx);
+ tile_net_schedule_tx_wake_timer(dev, tx_queue_idx);
return -1;
}
@@ -1328,11 +1333,12 @@ static s64 tile_net_equeue_try_reserve(struct net_device *dev,
static int tso_count_edescs(struct sk_buff *skb)
{
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int p_len = sh->gso_size;
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int num_edescs = 0;
int segment;
@@ -1377,13 +1383,14 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
struct skb_shared_info *sh = skb_shinfo(skb);
struct iphdr *ih;
struct tcphdr *th;
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned char *data = skb->data;
- unsigned int ih_off, th_off, sh_len, p_len;
+ unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
long n; /* size of the current piece of payload */
int segment;
@@ -1392,14 +1399,13 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
th = tcp_hdr(skb);
ih_off = skb_network_offset(skb);
th_off = skb_transport_offset(skb);
- sh_len = th_off + tcp_hdrlen(skb);
p_len = sh->gso_size;
/* Set up seed values for IP and TCP csum and initialize id and seq. */
isum_seed = ((0xFFFF - ih->check) +
(0xFFFF - ih->tot_len) +
(0xFFFF - ih->id));
- tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
+ tsum_seed = th->check + (0xFFFF ^ htons(sh_len + data_len));
id = ntohs(ih->id);
seq = ntohl(th->seq);
@@ -1471,21 +1477,22 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
{
struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
- unsigned int data_len = skb->data_len;
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->data_len + skb->hdr_len - sh_len;
unsigned int p_len = sh->gso_size;
gxio_mpipe_edesc_t edesc_head = { { 0 } };
gxio_mpipe_edesc_t edesc_body = { { 0 } };
long f_id = -1; /* id of the current fragment */
- long f_size = -1; /* size of the current fragment */
- long f_used = -1; /* bytes used from the current fragment */
+ long f_size = skb->hdr_len; /* size of the current fragment */
+ long f_used = sh_len; /* bytes used from the current fragment */
+ void *f_data = skb->data;
long n; /* size of the current piece of payload */
unsigned long tx_packets = 0, tx_bytes = 0;
- unsigned int csum_start, sh_len;
+ unsigned int csum_start;
int segment;
/* Prepare to egress the headers: set up header edesc. */
csum_start = skb_checksum_start_offset(skb);
- sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
edesc_head.csum = 1;
edesc_head.csum_start = csum_start;
edesc_head.csum_dest = csum_start + skb->csum_offset;
@@ -1497,7 +1504,6 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
/* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) {
- void *va;
unsigned char *buf;
unsigned int p_used = 0;
@@ -1516,10 +1522,9 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
f_id++;
f_size = sh->frags[f_id].size;
f_used = 0;
+ f_data = tile_net_frag_buf(&sh->frags[f_id]);
}
- va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
-
/* Use bytes from the current fragment. */
n = p_len - p_used;
if (n > f_size - f_used)
@@ -1528,7 +1533,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
p_used += n;
/* Egress a piece of the payload. */
- edesc_body.va = va_to_tile_io_addr(va);
+ edesc_body.va = va_to_tile_io_addr(f_data) + f_used;
edesc_body.xfer_size = n;
edesc_body.bound = !(p_used < p_len);
gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
@@ -1580,7 +1585,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
local_irq_save(irqflags);
/* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
if (slot < 0) {
local_irq_restore(irqflags);
return NETDEV_TX_BUSY;
@@ -1674,7 +1680,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
local_irq_save(irqflags);
/* Try to acquire a completion entry and an egress slot. */
- slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
+ slot = tile_net_equeue_try_reserve(dev, skb->queue_mapping, comps,
+ equeue, num_edescs);
if (slot < 0) {
local_irq_restore(irqflags);
return NETDEV_TX_BUSY;
@@ -1844,7 +1851,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
memcpy(dev->dev_addr, mac, 6);
dev->addr_len = 6;
} else {
- random_ether_addr(dev->dev_addr);
+ eth_hw_addr_random(dev);
}
/* Register the network device. */
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 6199f6b387b6..c1ebfe9efcb3 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -114,7 +114,8 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
out_be32(card->regs + reg, value);
}
-/** spider_net_write_phy - write to phy register
+/**
+ * spider_net_write_phy - write to phy register
* @netdev: adapter to be written to
* @mii_id: id of MII
* @reg: PHY register
@@ -137,7 +138,8 @@ spider_net_write_phy(struct net_device *netdev, int mii_id,
spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
}
-/** spider_net_read_phy - read from phy register
+/**
+ * spider_net_read_phy - read from phy register
* @netdev: network device to be read from
* @mii_id: id of MII
* @reg: PHY register
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ea3e0a21ba74..a46c19859683 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -486,7 +486,7 @@ static void __devinit velocity_get_options(struct velocity_opt *opts, int index,
velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
- velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
+ velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
opts->numrx = (opts->numrx & ~3);
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index a75e9ef5a4ce..a5826a3111a6 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -637,7 +637,7 @@ static int __devinit w5100_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 3306a20ec211..bdd8891c215a 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -557,7 +557,7 @@ static int __devinit w5300_hw_probe(struct platform_device *pdev)
if (data && is_valid_ether_addr(data->mac_addr)) {
memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
} else {
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
ndev->addr_assign_type |= NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1eaf7128afee..f8e351880119 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -197,7 +197,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
/**
- * * temac_dma_bd_release - Release buffer descriptor rings
+ * temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
{
@@ -768,7 +768,6 @@ static void ll_temac_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
skb_put(skb, length);
- skb->dev = ndev;
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9c365e192a31..0793299bd39e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -312,7 +312,7 @@ static void axienet_set_mac_address(struct net_device *ndev, void *address)
if (address)
memcpy(ndev->dev_addr, address, ETH_ALEN);
if (!is_valid_ether_addr(ndev->dev_addr))
- random_ether_addr(ndev->dev_addr);
+ eth_random_addr(ndev->dev_addr);
/* Set up unicast MAC address filter set its mac address */
axienet_iow(lp, XAE_UAW0_OFFSET,
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4ad80f771099..6695a1dadf4e 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -2962,7 +2962,7 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
bp->descr_block_virt->rcv_data[i+j].long_1 = (u32) (bp->rcv_block_phys + (i * PI_RCV_DATA_K_SIZE_MAX));
- bp->p_rcv_buff_va[i+j] = (char *) (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
+ bp->p_rcv_buff_va[i+j] = (bp->rcv_block_virt + (i * PI_RCV_DATA_K_SIZE_MAX));
}
#endif
}
@@ -3030,7 +3030,7 @@ static void dfx_rcv_queue_process(
#ifdef DYNAMIC_BUFFERS
p_buff = (char *) (((struct sk_buff *)bp->p_rcv_buff_va[entry])->data);
#else
- p_buff = (char *) bp->p_rcv_buff_va[entry];
+ p_buff = bp->p_rcv_buff_va[entry];
#endif
memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 9ac4665d7411..24d8566cfd8b 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -1242,7 +1242,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from+2,6) ;
+ memcpy(to,from+2,6) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1251,7 +1251,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 4)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,4) ;
+ memcpy(to,from,4) ;
to += 4 ;
from += 4 ;
len -= 4 ;
@@ -1260,7 +1260,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 8)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,8) ;
+ memcpy(to,from,8) ;
to += 8 ;
from += 8 ;
len -= 8 ;
@@ -1269,7 +1269,7 @@ static int smt_set_para(struct s_smc *smc, struct smt_para *pa, int index,
if (len < 32)
goto len_error ;
if (set)
- memcpy((char *) to,(char *) from,32) ;
+ memcpy(to,from,32) ;
to += 32 ;
from += 32 ;
len -= 32 ;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index aed1a6105b24..2c0894a92abd 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -485,7 +485,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
return;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
} else {
unsigned short crc;
@@ -497,7 +497,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_SMACK:
*p |= 0x80;
crc = swab16(crc16(0, p, len));
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
case CRC_MODE_FLEX_TEST:
ax->crcmode = CRC_MODE_NONE;
@@ -506,11 +506,11 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
case CRC_MODE_FLEX:
*p |= 0x20;
crc = calc_crc_flex(p, len);
- count = kiss_esc_crc(p, (unsigned char *)ax->xbuff, crc, len+2);
+ count = kiss_esc_crc(p, ax->xbuff, crc, len+2);
break;
default:
- count = kiss_esc(p, (unsigned char *)ax->xbuff, len);
+ count = kiss_esc(p, ax->xbuff, len);
}
}
spin_unlock_bh(&ax->buflock);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2857ab078aac..95ceb3593043 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -131,6 +131,7 @@ int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
+int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0c569831db5a..6cee2917eb02 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -614,7 +614,7 @@ retry_send_cmplt:
static void netvsc_receive_completion(void *context)
{
struct hv_netvsc_packet *packet = context;
- struct hv_device *device = (struct hv_device *)packet->device;
+ struct hv_device *device = packet->device;
struct netvsc_device *net_device;
u64 transaction_id = 0;
bool fsend_receive_comp = false;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 8f8ed3320425..8e23c084c4a7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -341,6 +341,34 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return 0;
}
+
+static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct sockaddr *addr = p;
+ char save_adr[14];
+ unsigned char save_aatype;
+ int err;
+
+ memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
+ save_aatype = ndev->addr_assign_type;
+
+ err = eth_mac_addr(ndev, p);
+ if (err != 0)
+ return err;
+
+ err = rndis_filter_set_device_mac(hdev, addr->sa_data);
+ if (err != 0) {
+ /* roll back to saved MAC */
+ memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
+ ndev->addr_assign_type = save_aatype;
+ }
+
+ return err;
+}
+
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -353,7 +381,7 @@ static const struct net_device_ops device_ops = {
.ndo_set_rx_mode = netvsc_set_multicast_list,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = netvsc_set_mac_addr,
};
/*
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 981ebb115637..fbf539468205 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -27,6 +27,7 @@
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
+#include <linux/nls.h>
#include "hyperv_net.h"
@@ -47,6 +48,7 @@ struct rndis_request {
struct hv_page_buffer buf;
/* FIXME: We assumed a fixed size request here. */
struct rndis_message request_msg;
+ u8 ext[100];
};
static void rndis_filter_send_completion(void *ctx);
@@ -511,6 +513,83 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
dev->hw_mac_adr, &size);
}
+#define NWADR_STR "NetworkAddress"
+#define NWADR_STRLEN 14
+
+int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
+{
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct rndis_device *rdev = nvdev->extension;
+ struct net_device *ndev = nvdev->ndev;
+ struct rndis_request *request;
+ struct rndis_set_request *set;
+ struct rndis_config_parameter_info *cpi;
+ wchar_t *cfg_nwadr, *cfg_mac;
+ struct rndis_set_complete *set_complete;
+ char macstr[2*ETH_ALEN+1];
+ u32 extlen = sizeof(struct rndis_config_parameter_info) +
+ 2*NWADR_STRLEN + 4*ETH_ALEN;
+ int ret, t;
+
+ request = get_rndis_request(rdev, RNDIS_MSG_SET,
+ RNDIS_MESSAGE_SIZE(struct rndis_set_request) + extlen);
+ if (!request)
+ return -ENOMEM;
+
+ set = &request->request_msg.msg.set_req;
+ set->oid = RNDIS_OID_GEN_RNDIS_CONFIG_PARAMETER;
+ set->info_buflen = extlen;
+ set->info_buf_offset = sizeof(struct rndis_set_request);
+ set->dev_vc_handle = 0;
+
+ cpi = (struct rndis_config_parameter_info *)((ulong)set +
+ set->info_buf_offset);
+ cpi->parameter_name_offset =
+ sizeof(struct rndis_config_parameter_info);
+ /* Multiply by 2 because host needs 2 bytes (utf16) for each char */
+ cpi->parameter_name_length = 2*NWADR_STRLEN;
+ cpi->parameter_type = RNDIS_CONFIG_PARAM_TYPE_STRING;
+ cpi->parameter_value_offset =
+ cpi->parameter_name_offset + cpi->parameter_name_length;
+ /* Multiply by 4 because each MAC byte displayed as 2 utf16 chars */
+ cpi->parameter_value_length = 4*ETH_ALEN;
+
+ cfg_nwadr = (wchar_t *)((ulong)cpi + cpi->parameter_name_offset);
+ cfg_mac = (wchar_t *)((ulong)cpi + cpi->parameter_value_offset);
+ ret = utf8s_to_utf16s(NWADR_STR, NWADR_STRLEN, UTF16_HOST_ENDIAN,
+ cfg_nwadr, NWADR_STRLEN);
+ if (ret < 0)
+ goto cleanup;
+ snprintf(macstr, 2*ETH_ALEN+1, "%pm", mac);
+ ret = utf8s_to_utf16s(macstr, 2*ETH_ALEN, UTF16_HOST_ENDIAN,
+ cfg_mac, 2*ETH_ALEN);
+ if (ret < 0)
+ goto cleanup;
+
+ ret = rndis_filter_send_request(rdev, request);
+ if (ret != 0)
+ goto cleanup;
+
+ t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ if (t == 0) {
+ netdev_err(ndev, "timeout before we got a set response...\n");
+ /*
+ * can't put_rndis_request, since we may still receive a
+ * send-completion.
+ */
+ return -EBUSY;
+ } else {
+ set_complete = &request->response_msg.msg.set_complete;
+ if (set_complete->status != RNDIS_STATUS_SUCCESS)
+ ret = -EINVAL;
+ }
+
+cleanup:
+ put_rndis_request(rdev, request);
+ return ret;
+}
+
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev)
{
u32 size = sizeof(u32);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index dcc80d652b78..84872043b5c6 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1017,7 +1017,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
{
int iobase;
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
struct net_device *dev;
IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
@@ -1052,7 +1052,7 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
*/
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
@@ -1121,7 +1121,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
{
- struct ali_ircc_cb *self = (struct ali_ircc_cb *) priv;
+ struct ali_ircc_cb *self = priv;
int iobase,dongle_id;
int tmp = 0;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index fc503aa5288e..e09417df8f39 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -794,7 +794,7 @@ static int __devinit au1k_irda_net_init(struct net_device *dev)
/* allocate the data buffers */
aup->db[0].vaddr =
- (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
if (!aup->db[0].vaddr)
goto out3;
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 32eb94ece6c1..e2a06fd996d5 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -107,10 +107,10 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
- start = u64_stats_fetch_begin(&lb_stats->syncp);
+ start = u64_stats_fetch_begin_bh(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
- } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&lb_stats->syncp, start));
bytes += tbytes;
packets += tpackets;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 2ee56de7b0ca..0737bd4d1669 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -847,13 +847,12 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
const struct iovec *iv, unsigned long len,
int noblock)
{
- DECLARE_WAITQUEUE(wait, current);
+ DEFINE_WAIT(wait);
struct sk_buff *skb;
ssize_t ret = 0;
- add_wait_queue(sk_sleep(&q->sk), &wait);
while (len) {
- current->state = TASK_INTERRUPTIBLE;
+ prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE);
/* Read frames from the queue */
skb = skb_dequeue(&q->sk.sk_receive_queue);
@@ -875,8 +874,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
break;
}
- current->state = TASK_RUNNING;
- remove_wait_queue(sk_sleep(&q->sk), &wait);
+ finish_wait(sk_sleep(&q->sk), &wait);
return ret;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 944cdfb80fe4..3090dc65a6f1 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -67,6 +67,11 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM87XX_PHY
+ tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
+ help
+ Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
+
config ICPLUS_PHY
tristate "Drivers for ICPlus PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f51af688ef8b..6d2dc6c94f2e 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index cfabd5fe5372..a3fb5ceb6487 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -77,13 +77,7 @@ static struct phy_driver am79c_driver = {
static int __init am79c_init(void)
{
- int ret;
-
- ret = phy_driver_register(&am79c_driver);
- if (ret)
- return ret;
-
- return 0;
+ return phy_driver_register(&am79c_driver);
}
static void __exit am79c_exit(void)
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index cd802eb25fd2..84c7a39b1c65 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -71,7 +71,8 @@ static int bcm63xx_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm63xx_1_driver = {
+static struct phy_driver bcm63xx_driver[] = {
+{
.phy_id = 0x00406000,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
@@ -84,10 +85,8 @@ static struct phy_driver bcm63xx_1_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-/* same phy as above, with just a different OUI */
-static struct phy_driver bcm63xx_2_driver = {
+}, {
+ /* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (2)",
@@ -99,30 +98,18 @@ static struct phy_driver bcm63xx_2_driver = {
.ack_interrupt = bcm63xx_ack_interrupt,
.config_intr = bcm63xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init bcm63xx_phy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm63xx_1_driver);
- if (ret)
- goto out_63xx_1;
- ret = phy_driver_register(&bcm63xx_2_driver);
- if (ret)
- goto out_63xx_2;
- return ret;
-
-out_63xx_2:
- phy_driver_unregister(&bcm63xx_1_driver);
-out_63xx_1:
- return ret;
+ return phy_drivers_register(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
static void __exit bcm63xx_phy_exit(void)
{
- phy_driver_unregister(&bcm63xx_1_driver);
- phy_driver_unregister(&bcm63xx_2_driver);
+ phy_drivers_unregister(bcm63xx_driver,
+ ARRAY_SIZE(bcm63xx_driver));
}
module_init(bcm63xx_phy_init);
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
new file mode 100644
index 000000000000..2346b38b9837
--- /dev/null
+++ b/drivers/net/phy/bcm87xx.c
@@ -0,0 +1,231 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 - 2012 Cavium, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#define PHY_ID_BCM8706 0x0143bdc1
+#define PHY_ID_BCM8727 0x0143bff0
+
+#define BCM87XX_PMD_RX_SIGNAL_DETECT (MII_ADDR_C45 | 0x1000a)
+#define BCM87XX_10GBASER_PCS_STATUS (MII_ADDR_C45 | 0x30020)
+#define BCM87XX_XGXS_LANE_STATUS (MII_ADDR_C45 | 0x40018)
+
+#define BCM87XX_LASI_CONTROL (MII_ADDR_C45 | 0x39002)
+#define BCM87XX_LASI_STATUS (MII_ADDR_C45 | 0x39005)
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+/* Set and/or override some configuration registers based on the
+ * broadcom,c45-reg-init property stored in the of_node for the phydev.
+ *
+ * broadcom,c45-reg-init = <devid reg mask value>,...;
+ *
+ * There may be one or more sets of <devid reg mask value>:
+ *
+ * devid: which sub-device to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ const __be32 *paddr;
+ const __be32 *paddr_end;
+ int len, ret;
+
+ if (!phydev->dev.of_node)
+ return 0;
+
+ paddr = of_get_property(phydev->dev.of_node,
+ "broadcom,c45-reg-init", &len);
+ if (!paddr)
+ return 0;
+
+ paddr_end = paddr + (len /= sizeof(*paddr));
+
+ ret = 0;
+
+ while (paddr + 3 < paddr_end) {
+ u16 devid = be32_to_cpup(paddr++);
+ u16 reg = be32_to_cpup(paddr++);
+ u16 mask = be32_to_cpup(paddr++);
+ u16 val_bits = be32_to_cpup(paddr++);
+ int val;
+ u32 regnum = MII_ADDR_C45 | (devid << 16) | reg;
+ val = 0;
+ if (mask) {
+ val = phy_read(phydev, regnum);
+ if (val < 0) {
+ ret = val;
+ goto err;
+ }
+ val &= mask;
+ }
+ val |= val_bits;
+
+ ret = phy_write(phydev, regnum, val);
+ if (ret < 0)
+ goto err;
+ }
+err:
+ return ret;
+}
+#else
+static int bcm87xx_of_reg_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int bcm87xx_config_init(struct phy_device *phydev)
+{
+ phydev->supported = SUPPORTED_10000baseR_FEC;
+ phydev->advertising = ADVERTISED_10000baseR_FEC;
+ phydev->state = PHY_NOLINK;
+ phydev->autoneg = AUTONEG_DISABLE;
+
+ bcm87xx_of_reg_init(phydev);
+
+ return 0;
+}
+
+static int bcm87xx_config_aneg(struct phy_device *phydev)
+{
+ return -EINVAL;
+}
+
+static int bcm87xx_read_status(struct phy_device *phydev)
+{
+ int rx_signal_detect;
+ int pcs_status;
+ int xgxs_lane_status;
+
+ rx_signal_detect = phy_read(phydev, BCM87XX_PMD_RX_SIGNAL_DETECT);
+ if (rx_signal_detect < 0)
+ return rx_signal_detect;
+
+ if ((rx_signal_detect & 1) == 0)
+ goto no_link;
+
+ pcs_status = phy_read(phydev, BCM87XX_10GBASER_PCS_STATUS);
+ if (pcs_status < 0)
+ return pcs_status;
+
+ if ((pcs_status & 1) == 0)
+ goto no_link;
+
+ xgxs_lane_status = phy_read(phydev, BCM87XX_XGXS_LANE_STATUS);
+ if (xgxs_lane_status < 0)
+ return xgxs_lane_status;
+
+ if ((xgxs_lane_status & 0x1000) == 0)
+ goto no_link;
+
+ phydev->speed = 10000;
+ phydev->link = 1;
+ phydev->duplex = 1;
+ return 0;
+
+no_link:
+ phydev->link = 0;
+ return 0;
+}
+
+static int bcm87xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, BCM87XX_LASI_CONTROL);
+
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg |= 1;
+ else
+ reg &= ~1;
+
+ err = phy_write(phydev, BCM87XX_LASI_CONTROL, reg);
+ return err;
+}
+
+static int bcm87xx_did_interrupt(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read(phydev, BCM87XX_LASI_STATUS);
+
+ if (reg < 0) {
+ dev_err(&phydev->dev,
+ "Error: Read of BCM87XX_LASI_STATUS failed: %d\n", reg);
+ return 0;
+ }
+ return (reg & 1) != 0;
+}
+
+static int bcm87xx_ack_interrupt(struct phy_device *phydev)
+{
+ /* Reading the LASI status clears it. */
+ bcm87xx_did_interrupt(phydev);
+ return 0;
+}
+
+static int bcm8706_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8706;
+}
+
+static int bcm8727_match_phy_device(struct phy_device *phydev)
+{
+ return phydev->c45_ids.device_ids[4] == PHY_ID_BCM8727;
+}
+
+static struct phy_driver bcm87xx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM8706,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8706",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8706_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM8727,
+ .phy_id_mask = 0xffffffff,
+ .name = "Broadcom BCM8727",
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = bcm87xx_config_init,
+ .config_aneg = bcm87xx_config_aneg,
+ .read_status = bcm87xx_read_status,
+ .ack_interrupt = bcm87xx_ack_interrupt,
+ .config_intr = bcm87xx_config_intr,
+ .did_interrupt = bcm87xx_did_interrupt,
+ .match_phy_device = bcm8727_match_phy_device,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static int __init bcm87xx_init(void)
+{
+ return phy_drivers_register(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_init(bcm87xx_init);
+
+static void __exit bcm87xx_exit(void)
+{
+ phy_drivers_unregister(bcm87xx_driver,
+ ARRAY_SIZE(bcm87xx_driver));
+}
+module_exit(bcm87xx_exit);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 60338ff63092..f8c90ea75108 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -682,7 +682,8 @@ static int brcm_fet_config_intr(struct phy_device *phydev)
return err;
}
-static struct phy_driver bcm5411_driver = {
+static struct phy_driver broadcom_drivers[] = {
+{
.phy_id = PHY_ID_BCM5411,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
@@ -695,9 +696,7 @@ static struct phy_driver bcm5411_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5421_driver = {
+}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
@@ -710,9 +709,7 @@ static struct phy_driver bcm5421_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5461_driver = {
+}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
@@ -725,9 +722,7 @@ static struct phy_driver bcm5461_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5464_driver = {
+}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
@@ -740,9 +735,7 @@ static struct phy_driver bcm5464_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5481_driver = {
+}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
@@ -755,9 +748,7 @@ static struct phy_driver bcm5481_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5482_driver = {
+}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
@@ -770,9 +761,7 @@ static struct phy_driver bcm5482_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610_driver = {
+}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
@@ -785,9 +774,7 @@ static struct phy_driver bcm50610_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm50610m_driver = {
+}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
@@ -800,9 +787,7 @@ static struct phy_driver bcm50610m_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm57780_driver = {
+}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
@@ -815,9 +800,7 @@ static struct phy_driver bcm57780_driver = {
.ack_interrupt = bcm54xx_ack_interrupt,
.config_intr = bcm54xx_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcmac131_driver = {
+}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
@@ -830,9 +813,7 @@ static struct phy_driver bcmac131_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
-
-static struct phy_driver bcm5241_driver = {
+}, {
.phy_id = PHY_ID_BCM5241,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
@@ -845,84 +826,18 @@ static struct phy_driver bcm5241_driver = {
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
.driver = { .owner = THIS_MODULE },
-};
+} };
static int __init broadcom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&bcm5411_driver);
- if (ret)
- goto out_5411;
- ret = phy_driver_register(&bcm5421_driver);
- if (ret)
- goto out_5421;
- ret = phy_driver_register(&bcm5461_driver);
- if (ret)
- goto out_5461;
- ret = phy_driver_register(&bcm5464_driver);
- if (ret)
- goto out_5464;
- ret = phy_driver_register(&bcm5481_driver);
- if (ret)
- goto out_5481;
- ret = phy_driver_register(&bcm5482_driver);
- if (ret)
- goto out_5482;
- ret = phy_driver_register(&bcm50610_driver);
- if (ret)
- goto out_50610;
- ret = phy_driver_register(&bcm50610m_driver);
- if (ret)
- goto out_50610m;
- ret = phy_driver_register(&bcm57780_driver);
- if (ret)
- goto out_57780;
- ret = phy_driver_register(&bcmac131_driver);
- if (ret)
- goto out_ac131;
- ret = phy_driver_register(&bcm5241_driver);
- if (ret)
- goto out_5241;
- return ret;
-
-out_5241:
- phy_driver_unregister(&bcmac131_driver);
-out_ac131:
- phy_driver_unregister(&bcm57780_driver);
-out_57780:
- phy_driver_unregister(&bcm50610m_driver);
-out_50610m:
- phy_driver_unregister(&bcm50610_driver);
-out_50610:
- phy_driver_unregister(&bcm5482_driver);
-out_5482:
- phy_driver_unregister(&bcm5481_driver);
-out_5481:
- phy_driver_unregister(&bcm5464_driver);
-out_5464:
- phy_driver_unregister(&bcm5461_driver);
-out_5461:
- phy_driver_unregister(&bcm5421_driver);
-out_5421:
- phy_driver_unregister(&bcm5411_driver);
-out_5411:
- return ret;
+ return phy_drivers_register(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
static void __exit broadcom_exit(void)
{
- phy_driver_unregister(&bcm5241_driver);
- phy_driver_unregister(&bcmac131_driver);
- phy_driver_unregister(&bcm57780_driver);
- phy_driver_unregister(&bcm50610m_driver);
- phy_driver_unregister(&bcm50610_driver);
- phy_driver_unregister(&bcm5482_driver);
- phy_driver_unregister(&bcm5481_driver);
- phy_driver_unregister(&bcm5464_driver);
- phy_driver_unregister(&bcm5461_driver);
- phy_driver_unregister(&bcm5421_driver);
- phy_driver_unregister(&bcm5411_driver);
+ phy_drivers_unregister(broadcom_drivers,
+ ARRAY_SIZE(broadcom_drivers));
}
module_init(broadcom_init);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index d28173161c21..db472ffb6e89 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -102,7 +102,8 @@ static int cis820x_config_intr(struct phy_device *phydev)
}
/* Cicada 8201, a.k.a Vitesse VSC8201 */
-static struct phy_driver cis8201_driver = {
+static struct phy_driver cis820x_driver[] = {
+{
.phy_id = 0x000fc410,
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
@@ -113,11 +114,8 @@ static struct phy_driver cis8201_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-/* Cicada 8204 */
-static struct phy_driver cis8204_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x000fc440,
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
@@ -128,32 +126,19 @@ static struct phy_driver cis8204_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init cicada_init(void)
{
- int ret;
-
- ret = phy_driver_register(&cis8204_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&cis8201_driver);
- if (ret)
- goto err2;
- return 0;
-
-err2:
- phy_driver_unregister(&cis8204_driver);
-err1:
- return ret;
+ return phy_drivers_register(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
static void __exit cicada_exit(void)
{
- phy_driver_unregister(&cis8204_driver);
- phy_driver_unregister(&cis8201_driver);
+ phy_drivers_unregister(cis820x_driver,
+ ARRAY_SIZE(cis820x_driver));
}
module_init(cicada_init);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5f59cc064778..81c7bc010dd8 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -144,7 +144,8 @@ static int dm9161_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static struct phy_driver dm9161e_driver = {
+static struct phy_driver dm91xx_driver[] = {
+{
.phy_id = 0x0181b880,
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
@@ -153,9 +154,7 @@ static struct phy_driver dm9161e_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9161a_driver = {
+}, {
.phy_id = 0x0181b8a0,
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
@@ -164,9 +163,7 @@ static struct phy_driver dm9161a_driver = {
.config_aneg = dm9161_config_aneg,
.read_status = genphy_read_status,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver dm9131_driver = {
+}, {
.phy_id = 0x00181b80,
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
@@ -177,38 +174,18 @@ static struct phy_driver dm9131_driver = {
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init davicom_init(void)
{
- int ret;
-
- ret = phy_driver_register(&dm9161e_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&dm9161a_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&dm9131_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&dm9161a_driver);
- err2:
- phy_driver_unregister(&dm9161e_driver);
- err1:
- return ret;
+ return phy_drivers_register(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
static void __exit davicom_exit(void)
{
- phy_driver_unregister(&dm9161e_driver);
- phy_driver_unregister(&dm9161a_driver);
- phy_driver_unregister(&dm9131_driver);
+ phy_drivers_unregister(dm91xx_driver,
+ ARRAY_SIZE(dm91xx_driver));
}
module_init(davicom_init);
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 940b29022d0c..b0da0226661f 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -17,6 +17,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/ethtool.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -453,16 +456,16 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
if (!phydev->attached_dev) {
- pr_warning("dp83640: expected to find an attached netdevice\n");
+ pr_warn("expected to find an attached netdevice\n");
return;
}
if (on) {
if (dev_mc_add(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to add mc address\n");
+ pr_warn("failed to add mc address\n");
} else {
if (dev_mc_del(phydev->attached_dev, status_frame_dst))
- pr_warning("dp83640: failed to delete mc address\n");
+ pr_warn("failed to delete mc address\n");
}
}
@@ -582,9 +585,9 @@ static void recalibrate(struct dp83640_clock *clock)
* read out and correct offsets
*/
val = ext_read(master, PAGE4, PTP_STS);
- pr_info("master PTP_STS 0x%04hx", val);
+ pr_info("master PTP_STS 0x%04hx\n", val);
val = ext_read(master, PAGE4, PTP_ESTS);
- pr_info("master PTP_ESTS 0x%04hx", val);
+ pr_info("master PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(master, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(master, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(master, PAGE4, PTP_EDATA);
@@ -594,9 +597,9 @@ static void recalibrate(struct dp83640_clock *clock)
list_for_each(this, &clock->phylist) {
tmp = list_entry(this, struct dp83640_private, list);
val = ext_read(tmp->phydev, PAGE4, PTP_STS);
- pr_info("slave PTP_STS 0x%04hx", val);
+ pr_info("slave PTP_STS 0x%04hx\n", val);
val = ext_read(tmp->phydev, PAGE4, PTP_ESTS);
- pr_info("slave PTP_ESTS 0x%04hx", val);
+ pr_info("slave PTP_ESTS 0x%04hx\n", val);
event_ts.ns_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.ns_hi = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
event_ts.sec_lo = ext_read(tmp->phydev, PAGE4, PTP_EDATA);
@@ -686,7 +689,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
prune_rx_ts(dp83640);
if (list_empty(&dp83640->rxpool)) {
- pr_debug("dp83640: rx timestamp pool is empty\n");
+ pr_debug("rx timestamp pool is empty\n");
goto out;
}
rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
@@ -709,7 +712,7 @@ static void decode_txts(struct dp83640_private *dp83640,
skb = skb_dequeue(&dp83640->tx_queue);
if (!skb) {
- pr_debug("dp83640: have timestamp but tx_queue empty\n");
+ pr_debug("have timestamp but tx_queue empty\n");
return;
}
ns = phy2txts(phy_txts);
@@ -847,7 +850,7 @@ static void dp83640_free_clocks(void)
list_for_each_safe(this, next, &phyter_clocks) {
clock = list_entry(this, struct dp83640_clock, list);
if (!list_empty(&clock->phylist)) {
- pr_warning("phy list non-empty while unloading");
+ pr_warn("phy list non-empty while unloading\n");
BUG();
}
list_del(&clock->list);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 633680d0828e..ba55adfc7aae 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -70,7 +70,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10FULL;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
} else {
@@ -90,7 +90,7 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
lpa |= LPA_10HALF;
break;
default:
- printk(KERN_WARNING "fixed phy: unknown speed\n");
+ pr_warn("fixed phy: unknown speed\n");
return -EINVAL;
}
}
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 47f8e8939266..d5199cb4caec 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -202,7 +202,8 @@ static int ip101a_g_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ip175c_driver = {
+static struct phy_driver icplus_driver[] = {
+{
.phy_id = 0x02430d80,
.name = "ICPlus IP175C",
.phy_id_mask = 0x0ffffff0,
@@ -213,9 +214,7 @@ static struct phy_driver ip175c_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip1001_driver = {
+}, {
.phy_id = 0x02430d90,
.name = "ICPlus IP1001",
.phy_id_mask = 0x0ffffff0,
@@ -227,9 +226,7 @@ static struct phy_driver ip1001_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ip101a_g_driver = {
+}, {
.phy_id = 0x02430c54,
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
@@ -243,28 +240,18 @@ static struct phy_driver ip101a_g_driver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
-};
+} };
static int __init icplus_init(void)
{
- int ret = 0;
-
- ret = phy_driver_register(&ip1001_driver);
- if (ret < 0)
- return -ENODEV;
-
- ret = phy_driver_register(&ip101a_g_driver);
- if (ret < 0)
- return -ENODEV;
-
- return phy_driver_register(&ip175c_driver);
+ return phy_drivers_register(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
static void __exit icplus_exit(void)
{
- phy_driver_unregister(&ip1001_driver);
- phy_driver_unregister(&ip101a_g_driver);
- phy_driver_unregister(&ip175c_driver);
+ phy_drivers_unregister(icplus_driver,
+ ARRAY_SIZE(icplus_driver));
}
module_init(icplus_init);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 6f6e8b616a62..6d1e3fcc43e2 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -149,7 +149,8 @@ static int lxt973_config_aneg(struct phy_device *phydev)
return phydev->priv ? 0 : genphy_config_aneg(phydev);
}
-static struct phy_driver lxt970_driver = {
+static struct phy_driver lxt97x_driver[] = {
+{
.phy_id = 0x78100000,
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
@@ -160,10 +161,8 @@ static struct phy_driver lxt970_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt971_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x001378e0,
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
@@ -173,10 +172,8 @@ static struct phy_driver lxt971_driver = {
.read_status = genphy_read_status,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver lxt973_driver = {
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = 0x00137a10,
.name = "LXT973",
.phy_id_mask = 0xfffffff0,
@@ -185,39 +182,19 @@ static struct phy_driver lxt973_driver = {
.probe = lxt973_probe,
.config_aneg = lxt973_config_aneg,
.read_status = genphy_read_status,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init lxt_init(void)
{
- int ret;
-
- ret = phy_driver_register(&lxt970_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&lxt971_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&lxt973_driver);
- if (ret)
- goto err3;
- return 0;
-
- err3:
- phy_driver_unregister(&lxt971_driver);
- err2:
- phy_driver_unregister(&lxt970_driver);
- err1:
- return ret;
+ return phy_drivers_register(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
static void __exit lxt_exit(void)
{
- phy_driver_unregister(&lxt970_driver);
- phy_driver_unregister(&lxt971_driver);
- phy_driver_unregister(&lxt973_driver);
+ phy_drivers_unregister(lxt97x_driver,
+ ARRAY_SIZE(lxt97x_driver));
}
module_init(lxt_init);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 418928d644bf..5d2a3f215887 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -826,28 +826,14 @@ static struct phy_driver marvell_drivers[] = {
static int __init marvell_init(void)
{
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++) {
- ret = phy_driver_register(&marvell_drivers[i]);
-
- if (ret) {
- while (i-- > 0)
- phy_driver_unregister(&marvell_drivers[i]);
- return ret;
- }
- }
-
- return 0;
+ return phy_drivers_register(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
static void __exit marvell_exit(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(marvell_drivers); i++)
- phy_driver_unregister(&marvell_drivers[i]);
+ phy_drivers_unregister(marvell_drivers,
+ ARRAY_SIZE(marvell_drivers));
}
module_init(marvell_init);
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 39ea0674dcde..5c120189ec86 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -46,7 +46,13 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ /* In theory multiple mdio_mux could be stacked, thus creating
+ * more than a single level of nesting. But in practice,
+ * SINGLE_DEPTH_NESTING will cover the vast majority of use
+ * cases. We use it, instead of trying to handle the general
+ * case.
+ */
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -71,7 +77,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock(&pb->mii_bus->mdio_lock);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 5061608f408c..170eb411ab5d 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -13,6 +13,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -22,6 +25,7 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/of_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@@ -148,7 +152,7 @@ int mdiobus_register(struct mii_bus *bus)
err = device_register(&bus->dev);
if (err) {
- printk(KERN_ERR "mii_bus %s failed to register\n", bus->id);
+ pr_err("mii_bus %s failed to register\n", bus->id);
return -EINVAL;
}
@@ -229,7 +233,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
struct phy_device *phydev;
int err;
- phydev = get_phy_device(bus, addr);
+ phydev = get_phy_device(bus, addr, false);
if (IS_ERR(phydev) || phydev == NULL)
return phydev;
@@ -305,6 +309,12 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
struct phy_device *phydev = to_phy_device(dev);
struct phy_driver *phydrv = to_phy_driver(drv);
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (phydrv->match_phy_device)
+ return phydrv->match_phy_device(phydev);
+
return ((phydrv->phy_id & phydrv->phy_id_mask) ==
(phydev->phy_id & phydrv->phy_id_mask));
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9d6c80c8a0cf..cf287e0eb408 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -114,7 +114,8 @@ static int ks8051_config_init(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ks8737_driver = {
+static struct phy_driver ksphy_driver[] = {
+{
.phy_id = PHY_ID_KS8737,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8737",
@@ -126,9 +127,7 @@ static struct phy_driver ks8737_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ks8737_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8041_driver = {
+}, {
.phy_id = PHY_ID_KS8041,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8041",
@@ -141,9 +140,7 @@ static struct phy_driver ks8041_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8051_driver = {
+}, {
.phy_id = PHY_ID_KS8051,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KS8051",
@@ -156,9 +153,7 @@ static struct phy_driver ks8051_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ks8001_driver = {
+}, {
.phy_id = PHY_ID_KS8001,
.name = "Micrel KS8001 or KS8721",
.phy_id_mask = 0x00ffffff,
@@ -170,9 +165,7 @@ static struct phy_driver ks8001_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.driver = { .owner = THIS_MODULE,},
-};
-
-static struct phy_driver ksz9021_driver = {
+}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
@@ -185,51 +178,18 @@ static struct phy_driver ksz9021_driver = {
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = ksz9021_config_intr,
.driver = { .owner = THIS_MODULE, },
-};
+} };
static int __init ksphy_init(void)
{
- int ret;
-
- ret = phy_driver_register(&ks8001_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register(&ksz9021_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register(&ks8737_driver);
- if (ret)
- goto err3;
- ret = phy_driver_register(&ks8041_driver);
- if (ret)
- goto err4;
- ret = phy_driver_register(&ks8051_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister(&ks8041_driver);
-err4:
- phy_driver_unregister(&ks8737_driver);
-err3:
- phy_driver_unregister(&ksz9021_driver);
-err2:
- phy_driver_unregister(&ks8001_driver);
-err1:
- return ret;
+ return phy_drivers_register(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
static void __exit ksphy_exit(void)
{
- phy_driver_unregister(&ks8001_driver);
- phy_driver_unregister(&ks8737_driver);
- phy_driver_unregister(&ksz9021_driver);
- phy_driver_unregister(&ks8041_driver);
- phy_driver_unregister(&ks8051_driver);
+ phy_drivers_unregister(ksphy_driver,
+ ARRAY_SIZE(ksphy_driver));
}
module_init(ksphy_init);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 04bb8fcc0cb5..9a5f234d95b0 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -15,6 +15,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -22,6 +24,8 @@
#include <linux/phy.h>
#include <linux/netdevice.h>
+#define DEBUG
+
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
@@ -112,8 +116,8 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
ns_exp_write(phydev, 0x1c0,
ns_exp_read(phydev, 0x1c0) & 0xfffe);
- printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
- (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
+ pr_debug("10BASE-T HDX loopback %s\n",
+ (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
}
static int ns_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3cbda0851f83..7ca2ff97c368 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -15,6 +15,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -32,6 +35,7 @@
#include <linux/phy.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/mdio.h>
#include <linux/atomic.h>
#include <asm/io.h>
@@ -44,18 +48,16 @@
*/
void phy_print_status(struct phy_device *phydev)
{
- pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
- phydev->link ? "Up" : "Down");
if (phydev->link)
- printk(KERN_CONT " - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "Full" : "Half");
-
- printk(KERN_CONT "\n");
+ pr_info("%s - Link is Up - %d/%s\n",
+ dev_name(&phydev->dev),
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
}
EXPORT_SYMBOL(phy_print_status);
-
/**
* phy_clear_interrupt - Ack the phy device's interrupt
* @phydev: the phy_device struct
@@ -482,9 +484,8 @@ static void phy_force_reduction(struct phy_device *phydev)
phydev->speed = settings[idx].speed;
phydev->duplex = settings[idx].duplex;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL == phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
}
@@ -598,9 +599,8 @@ int phy_start_interrupts(struct phy_device *phydev)
IRQF_SHARED,
"phy_interrupt",
phydev) < 0) {
- printk(KERN_WARNING "%s: Can't get IRQ %d (PHY)\n",
- phydev->bus->name,
- phydev->irq);
+ pr_warn("%s: Can't get IRQ %d (PHY)\n",
+ phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
return 0;
}
@@ -838,10 +838,10 @@ void phy_state_machine(struct work_struct *work)
phydev->autoneg = AUTONEG_DISABLE;
- pr_info("Trying %d/%s\n", phydev->speed,
- DUPLEX_FULL ==
- phydev->duplex ?
- "FULL" : "HALF");
+ pr_info("Trying %d/%s\n",
+ phydev->speed,
+ DUPLEX_FULL == phydev->duplex ?
+ "FULL" : "HALF");
}
break;
case PHY_NOLINK:
@@ -968,3 +968,283 @@ void phy_state_machine(struct work_struct *work)
schedule_delayed_work(&phydev->state_queue, PHY_STATE_TIME * HZ);
}
+
+static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ /* Write the desired MMD Devad */
+ bus->write(bus, addr, MII_MMD_CTRL, devad);
+
+ /* Write the desired MMD register address */
+ bus->write(bus, addr, MII_MMD_DATA, prtad);
+
+ /* Select the Function : DATA with no post increment */
+ bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+}
+
+/**
+ * phy_read_mmd_indirect - reads data from the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ *
+ * Description: it reads data from the MMD registers (clause 22 to access to
+ * clause 45) of the specified phy address.
+ * To read these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Read reg 14 // Read MMD data
+ */
+static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr)
+{
+ u32 ret;
+
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Read the content of the MMD's selected register */
+ ret = bus->read(bus, addr, MII_MMD_DATA);
+
+ return ret;
+}
+
+/**
+ * phy_write_mmd_indirect - writes data to the MMD registers
+ * @bus: the target MII bus
+ * @prtad: MMD Address
+ * @devad: MMD DEVAD
+ * @addr: PHY address on the MII bus
+ * @data: data to write in the MMD register
+ *
+ * Description: Write data from the MMD registers of the specified
+ * phy address.
+ * To write these register we have:
+ * 1) Write reg 13 // DEVAD
+ * 2) Write reg 14 // MMD Address
+ * 3) Write reg 13 // MMD Data Command for MMD DEVAD
+ * 3) Write reg 14 // Write MMD data
+ */
+static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
+ int addr, u32 data)
+{
+ mmd_phy_indirect(bus, prtad, devad, addr);
+
+ /* Write the data into MMD's selected register */
+ bus->write(bus, addr, MII_MMD_DATA, data);
+}
+
+static u32 phy_eee_to_adv(u16 eee_adv)
+{
+ u32 adv = 0;
+
+ if (eee_adv & MDIO_EEE_100TX)
+ adv |= ADVERTISED_100baseT_Full;
+ if (eee_adv & MDIO_EEE_1000T)
+ adv |= ADVERTISED_1000baseT_Full;
+ if (eee_adv & MDIO_EEE_10GT)
+ adv |= ADVERTISED_10000baseT_Full;
+ if (eee_adv & MDIO_EEE_1000KX)
+ adv |= ADVERTISED_1000baseKX_Full;
+ if (eee_adv & MDIO_EEE_10GKX4)
+ adv |= ADVERTISED_10000baseKX4_Full;
+ if (eee_adv & MDIO_EEE_10GKR)
+ adv |= ADVERTISED_10000baseKR_Full;
+
+ return adv;
+}
+
+static u32 phy_eee_to_supported(u16 eee_caported)
+{
+ u32 supported = 0;
+
+ if (eee_caported & MDIO_EEE_100TX)
+ supported |= SUPPORTED_100baseT_Full;
+ if (eee_caported & MDIO_EEE_1000T)
+ supported |= SUPPORTED_1000baseT_Full;
+ if (eee_caported & MDIO_EEE_10GT)
+ supported |= SUPPORTED_10000baseT_Full;
+ if (eee_caported & MDIO_EEE_1000KX)
+ supported |= SUPPORTED_1000baseKX_Full;
+ if (eee_caported & MDIO_EEE_10GKX4)
+ supported |= SUPPORTED_10000baseKX4_Full;
+ if (eee_caported & MDIO_EEE_10GKR)
+ supported |= SUPPORTED_10000baseKR_Full;
+
+ return supported;
+}
+
+static u16 phy_adv_to_eee(u32 adv)
+{
+ u16 reg = 0;
+
+ if (adv & ADVERTISED_100baseT_Full)
+ reg |= MDIO_EEE_100TX;
+ if (adv & ADVERTISED_1000baseT_Full)
+ reg |= MDIO_EEE_1000T;
+ if (adv & ADVERTISED_10000baseT_Full)
+ reg |= MDIO_EEE_10GT;
+ if (adv & ADVERTISED_1000baseKX_Full)
+ reg |= MDIO_EEE_1000KX;
+ if (adv & ADVERTISED_10000baseKX4_Full)
+ reg |= MDIO_EEE_10GKX4;
+ if (adv & ADVERTISED_10000baseKR_Full)
+ reg |= MDIO_EEE_10GKR;
+
+ return reg;
+}
+
+/**
+ * phy_init_eee - init and check the EEE feature
+ * @phydev: target phy_device struct
+ * @clk_stop_enable: PHY may stop the clock during LPI
+ *
+ * Description: it checks if the Energy-Efficient Ethernet (EEE)
+ * is supported by looking at the MMD registers 3.20 and 7.60/61
+ * and it programs the MMD register 3.0 setting the "Clock stop enable"
+ * bit if required.
+ */
+int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
+{
+ int ret = -EPROTONOSUPPORT;
+
+ /* According to 802.3az,the EEE is supported only in full duplex-mode.
+ * Also EEE feature is active when core is operating with MII, GMII
+ * or RGMII.
+ */
+ if ((phydev->duplex == DUPLEX_FULL) &&
+ ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
+ int eee_lp, eee_cap, eee_adv;
+ u32 lp, cap, adv;
+ int idx, status;
+
+ /* Read phy status to properly get the right settings */
+ status = phy_read_status(phydev);
+ if (status)
+ return status;
+
+ /* First check if the EEE ability is supported */
+ eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (eee_cap < 0)
+ return eee_cap;
+
+ cap = phy_eee_to_supported(eee_cap);
+ if (!cap)
+ goto eee_exit;
+
+ /* Check which link settings negotiated and verify it in
+ * the EEE advertising registers.
+ */
+ eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_lp < 0)
+ return eee_lp;
+
+ eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (eee_adv < 0)
+ return eee_adv;
+
+ adv = phy_eee_to_adv(eee_adv);
+ lp = phy_eee_to_adv(eee_lp);
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+ if ((lp & adv & settings[idx].setting))
+ goto eee_exit;
+
+ if (clk_stop_enable) {
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS,
+ phydev->addr);
+ if (val < 0)
+ return val;
+
+ val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
+ phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
+ MDIO_MMD_PCS, phydev->addr, val);
+ }
+
+ ret = 0; /* EEE supported */
+ }
+
+eee_exit:
+ return ret;
+}
+EXPORT_SYMBOL(phy_init_eee);
+
+/**
+ * phy_get_eee_err - report the EEE wake error count
+ * @phydev: target phy_device struct
+ *
+ * Description: it is to report the number of time where the PHY
+ * failed to complete its normal wake sequence.
+ */
+int phy_get_eee_err(struct phy_device *phydev)
+{
+ return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
+ MDIO_MMD_PCS, phydev->addr);
+
+}
+EXPORT_SYMBOL(phy_get_eee_err);
+
+/**
+ * phy_ethtool_get_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reportes the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ /* Get Supported EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+ MDIO_MMD_PCS, phydev->addr);
+ if (val < 0)
+ return val;
+ data->supported = phy_eee_to_supported(val);
+
+ /* Get advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->advertised = phy_eee_to_adv(val);
+
+ /* Get LP advertisement EEE */
+ val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+ MDIO_MMD_AN, phydev->addr);
+ if (val < 0)
+ return val;
+ data->lp_advertised = phy_eee_to_adv(val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_get_eee);
+
+/**
+ * phy_ethtool_set_eee - set EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it is to program the Advertisement EEE register.
+ */
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
+{
+ int val;
+
+ val = phy_adv_to_eee(data->advertised);
+ phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
+ phydev->addr, val);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_set_eee);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index de86a5582224..8af46e88a181 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -14,6 +14,9 @@
* option) any later version.
*
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
@@ -149,8 +152,8 @@ int phy_scan_fixups(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_scan_fixups);
-static struct phy_device* phy_device_create(struct mii_bus *bus,
- int addr, int phy_id)
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
struct phy_device *dev;
@@ -171,8 +174,11 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
dev->autoneg = AUTONEG_ENABLE;
+ dev->is_c45 = is_c45;
dev->addr = addr;
dev->phy_id = phy_id;
+ if (c45_ids)
+ dev->c45_ids = *c45_ids;
dev->bus = bus;
dev->dev.parent = bus->parent;
dev->dev.bus = &mdio_bus_type;
@@ -197,20 +203,99 @@ static struct phy_device* phy_device_create(struct mii_bus *bus,
return dev;
}
+EXPORT_SYMBOL(phy_device_create);
+
+/**
+ * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs.
+ * @bus: the target MII bus
+ * @addr: PHY address on the MII bus
+ * @phy_id: where to store the ID retrieved.
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * If the PHY devices-in-package appears to be valid, it and the
+ * corresponding identifiers are stored in @c45_ids, zero is stored
+ * in @phy_id. Otherwise 0xffffffff is stored in @phy_id. Returns
+ * zero on success.
+ *
+ */
+static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id,
+ struct phy_c45_device_ids *c45_ids) {
+ int phy_reg;
+ int i, reg_addr;
+ const int num_ids = ARRAY_SIZE(c45_ids->device_ids);
+
+ /* Find first non-zero Devices In package. Device
+ * zero is reserved, so don't probe it.
+ */
+ for (i = 1;
+ i < num_ids && c45_ids->devices_in_package == 0;
+ i++) {
+ reg_addr = MII_ADDR_C45 | i << 16 | 6;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | 5;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->devices_in_package |= (phy_reg & 0xffff);
+
+ /* If mostly Fs, there is no device there,
+ * let's get out of here.
+ */
+ if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) {
+ *phy_id = 0xffffffff;
+ return 0;
+ }
+ }
+
+ /* Now probe Device Identifiers for each device present. */
+ for (i = 1; i < num_ids; i++) {
+ if (!(c45_ids->devices_in_package & (1 << i)))
+ continue;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID1;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] = (phy_reg & 0xffff) << 16;
+
+ reg_addr = MII_ADDR_C45 | i << 16 | MII_PHYSID2;
+ phy_reg = mdiobus_read(bus, addr, reg_addr);
+ if (phy_reg < 0)
+ return -EIO;
+ c45_ids->device_ids[i] |= (phy_reg & 0xffff);
+ }
+ *phy_id = 0;
+ return 0;
+}
/**
* get_phy_id - reads the specified addr for its ID.
* @bus: the target MII bus
* @addr: PHY address on the MII bus
* @phy_id: where to store the ID retrieved.
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
+ * @c45_ids: where to store the c45 ID information.
+ *
+ * Description: In the case of a 802.3-c22 PHY, reads the ID registers
+ * of the PHY at @addr on the @bus, stores it in @phy_id and returns
+ * zero on success.
+ *
+ * In the case of a 802.3-c45 PHY, get_phy_c45_ids() is invoked, and
+ * its return value is in turn returned.
*
- * Description: Reads the ID registers of the PHY at @addr on the
- * @bus, stores it in @phy_id and returns zero on success.
*/
-static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
+static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids)
{
int phy_reg;
+ if (is_c45)
+ return get_phy_c45_ids(bus, addr, phy_id, c45_ids);
+
/* Grab the bits from PHYIR1, and put them
* in the upper half */
phy_reg = mdiobus_read(bus, addr, MII_PHYSID1);
@@ -235,17 +320,19 @@ static int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
* get_phy_device - reads the specified PHY device and returns its @phy_device struct
* @bus: the target MII bus
* @addr: PHY address on the MII bus
+ * @is_c45: If true the PHY uses the 802.3 clause 45 protocol
*
* Description: Reads the ID registers of the PHY at @addr on the
* @bus, then allocates and returns the phy_device to represent it.
*/
-struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
{
+ struct phy_c45_device_ids c45_ids = {0};
struct phy_device *dev = NULL;
- u32 phy_id;
+ u32 phy_id = 0;
int r;
- r = get_phy_id(bus, addr, &phy_id);
+ r = get_phy_id(bus, addr, &phy_id, is_c45, &c45_ids);
if (r)
return ERR_PTR(r);
@@ -253,7 +340,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if ((phy_id & 0x1fffffff) == 0x1fffffff)
return NULL;
- dev = phy_device_create(bus, addr, phy_id);
+ dev = phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
return dev;
}
@@ -446,6 +533,11 @@ static int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver. */
if (NULL == d->driver) {
+ if (phydev->is_c45) {
+ pr_err("No driver for phy %x\n", phydev->phy_id);
+ return -ENODEV;
+ }
+
d->driver = &genphy_driver.driver;
err = d->driver->probe(d);
@@ -975,8 +1067,8 @@ int phy_driver_register(struct phy_driver *new_driver)
retval = driver_register(&new_driver->driver);
if (retval) {
- printk(KERN_ERR "%s: Error %d in registering driver\n",
- new_driver->name, retval);
+ pr_err("%s: Error %d in registering driver\n",
+ new_driver->name, retval);
return retval;
}
@@ -987,12 +1079,37 @@ int phy_driver_register(struct phy_driver *new_driver)
}
EXPORT_SYMBOL(phy_driver_register);
+int phy_drivers_register(struct phy_driver *new_driver, int n)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n; i++) {
+ ret = phy_driver_register(new_driver + i);
+ if (ret) {
+ while (i-- > 0)
+ phy_driver_unregister(new_driver + i);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(phy_drivers_register);
+
void phy_driver_unregister(struct phy_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(phy_driver_unregister);
+void phy_drivers_unregister(struct phy_driver *drv, int n)
+{
+ int i;
+ for (i = 0; i < n; i++) {
+ phy_driver_unregister(drv + i);
+ }
+}
+EXPORT_SYMBOL(phy_drivers_unregister);
+
static struct phy_driver genphy_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index f414ffb5b728..72f93470ea35 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -65,11 +65,7 @@ static struct phy_driver rtl821x_driver = {
static int __init realtek_init(void)
{
- int ret;
-
- ret = phy_driver_register(&rtl821x_driver);
-
- return ret;
+ return phy_driver_register(&rtl821x_driver);
}
static void __exit realtek_exit(void)
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index fc3e7e96c88c..6d6192316b30 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -12,7 +12,7 @@
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
- * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com
+ * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@shawell.net
*
*/
@@ -61,7 +61,8 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
-static struct phy_driver lan83c185_driver = {
+static struct phy_driver smsc_phy_driver[] = {
+{
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN83C185",
@@ -83,9 +84,7 @@ static struct phy_driver lan83c185_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8187_driver = {
+}, {
.phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8187",
@@ -107,9 +106,7 @@ static struct phy_driver lan8187_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8700_driver = {
+}, {
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -131,9 +128,7 @@ static struct phy_driver lan8700_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan911x_int_driver = {
+}, {
.phy_id = 0x0007c0d0, /* OUI=0x00800f, Model#=0x0d */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN911x Internal PHY",
@@ -155,9 +150,7 @@ static struct phy_driver lan911x_int_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
-
-static struct phy_driver lan8710_driver = {
+}, {
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
@@ -179,53 +172,18 @@ static struct phy_driver lan8710_driver = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE, }
-};
+} };
static int __init smsc_init(void)
{
- int ret;
-
- ret = phy_driver_register (&lan83c185_driver);
- if (ret)
- goto err1;
-
- ret = phy_driver_register (&lan8187_driver);
- if (ret)
- goto err2;
-
- ret = phy_driver_register (&lan8700_driver);
- if (ret)
- goto err3;
-
- ret = phy_driver_register (&lan911x_int_driver);
- if (ret)
- goto err4;
-
- ret = phy_driver_register (&lan8710_driver);
- if (ret)
- goto err5;
-
- return 0;
-
-err5:
- phy_driver_unregister (&lan911x_int_driver);
-err4:
- phy_driver_unregister (&lan8700_driver);
-err3:
- phy_driver_unregister (&lan8187_driver);
-err2:
- phy_driver_unregister (&lan83c185_driver);
-err1:
- return ret;
+ return phy_drivers_register(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
static void __exit smsc_exit(void)
{
- phy_driver_unregister (&lan8710_driver);
- phy_driver_unregister (&lan911x_int_driver);
- phy_driver_unregister (&lan8700_driver);
- phy_driver_unregister (&lan8187_driver);
- phy_driver_unregister (&lan83c185_driver);
+ return phy_drivers_unregister(smsc_phy_driver,
+ ARRAY_SIZE(smsc_phy_driver));
}
MODULE_DESCRIPTION("SMSC PHY driver");
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 4eb98bc52a0a..1c3abce78b6a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -11,6 +11,8 @@
* by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -356,7 +358,7 @@ static struct spi_driver ks8995_driver = {
static int __init ks8995_init(void)
{
- printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+ pr_info(DRV_DESC " version " DRV_VERSION "\n");
return spi_register_driver(&ks8995_driver);
}
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 187a2fa814f2..5e1eb138916f 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -81,7 +81,8 @@ static int ste10Xp_ack_interrupt(struct phy_device *phydev)
return 0;
}
-static struct phy_driver ste101p_pdriver = {
+static struct phy_driver ste10xp_pdriver[] = {
+{
.phy_id = STE101P_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
@@ -95,9 +96,7 @@ static struct phy_driver ste101p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
-
-static struct phy_driver ste100p_pdriver = {
+}, {
.phy_id = STE100P_PHY_ID,
.phy_id_mask = 0xffffffff,
.name = "STe100p",
@@ -111,22 +110,18 @@ static struct phy_driver ste100p_pdriver = {
.suspend = genphy_suspend,
.resume = genphy_resume,
.driver = {.owner = THIS_MODULE,}
-};
+} };
static int __init ste10Xp_init(void)
{
- int retval;
-
- retval = phy_driver_register(&ste100p_pdriver);
- if (retval < 0)
- return retval;
- return phy_driver_register(&ste101p_pdriver);
+ return phy_drivers_register(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
static void __exit ste10Xp_exit(void)
{
- phy_driver_unregister(&ste100p_pdriver);
- phy_driver_unregister(&ste101p_pdriver);
+ phy_drivers_unregister(ste10xp_pdriver,
+ ARRAY_SIZE(ste10xp_pdriver));
}
module_init(ste10Xp_init);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 0ec8e09cc2ac..2585c383e623 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -138,21 +138,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev)
return err;
}
-/* Vitesse 824x */
-static struct phy_driver vsc8244_driver = {
- .phy_id = PHY_ID_VSC8244,
- .name = "Vitesse VSC8244",
- .phy_id_mask = 0x000fffc0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &genphy_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
-
static int vsc8221_config_init(struct phy_device *phydev)
{
int err;
@@ -165,8 +150,22 @@ static int vsc8221_config_init(struct phy_device *phydev)
Options are 802.3Z SerDes or SGMII */
}
-/* Vitesse 8221 */
-static struct phy_driver vsc8221_driver = {
+/* Vitesse 824x */
+static struct phy_driver vsc82xx_driver[] = {
+{
+ .phy_id = PHY_ID_VSC8244,
+ .name = "Vitesse VSC8244",
+ .phy_id_mask = 0x000fffc0,
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc824x_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+}, {
+ /* Vitesse 8221 */
.phy_id = PHY_ID_VSC8221,
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
@@ -177,26 +176,19 @@ static struct phy_driver vsc8221_driver = {
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-};
+ .driver = { .owner = THIS_MODULE,},
+} };
static int __init vsc82xx_init(void)
{
- int err;
-
- err = phy_driver_register(&vsc8244_driver);
- if (err < 0)
- return err;
- err = phy_driver_register(&vsc8221_driver);
- if (err < 0)
- phy_driver_unregister(&vsc8244_driver);
- return err;
+ return phy_drivers_register(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
static void __exit vsc82xx_exit(void)
{
- phy_driver_unregister(&vsc8244_driver);
- phy_driver_unregister(&vsc8221_driver);
+ return phy_drivers_unregister(vsc82xx_driver,
+ ARRAY_SIZE(vsc82xx_driver));
}
module_init(vsc82xx_init);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index d4c9db3da22a..a34d6bf5e43b 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -390,10 +390,10 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
if (sl->mode & SL_MODE_SLIP6)
- count = slip_esc6(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc6(p, sl->xbuff, len);
else
#endif
- count = slip_esc(p, (unsigned char *) sl->xbuff, len);
+ count = slip_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 89024d5fc33a..6a7260b03a1e 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -15,6 +15,17 @@ menuconfig NET_TEAM
if NET_TEAM
+config NET_TEAM_MODE_BROADCAST
+ tristate "Broadcast mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where packets are transmitted always by all suitable ports.
+
+ All added ports are setup to have team's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_broadcast.
+
config NET_TEAM_MODE_ROUNDROBIN
tristate "Round-robin mode support"
depends on NET_TEAM
@@ -22,7 +33,7 @@ config NET_TEAM_MODE_ROUNDROBIN
Basic mode where port used for transmitting packets is selected in
round-robin fashion using packet counter.
- All added ports are setup to have bond's mac address.
+ All added ports are setup to have team's mac address.
To compile this team mode as a module, choose M here: the module
will be called team_mode_roundrobin.
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index fb9f4c1c51ff..975763014e5a 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
#
obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c61ae35a53ce..b104c05225f7 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team.c - Network team device driver
+ * drivers/net/team/team.c - Network team device driver
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -18,6 +18,7 @@
#include <linux/ctype.h>
#include <linux/notifier.h>
#include <linux/netdevice.h>
+#include <linux/netpoll.h>
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/socket.h>
@@ -26,6 +27,7 @@
#include <net/rtnetlink.h>
#include <net/genetlink.h>
#include <net/netlink.h>
+#include <net/sch_generic.h>
#include <linux/if_team.h>
#define DRV_NAME "team"
@@ -82,14 +84,16 @@ static void team_refresh_port_linkup(struct team_port *port)
port->state.linkup;
}
+
/*******************
* Options handling
*******************/
struct team_option_inst { /* One for each option instance */
struct list_head list;
+ struct list_head tmp_list;
struct team_option *option;
- struct team_port *port; /* != NULL if per-port */
+ struct team_option_inst_info info;
bool changed;
bool removed;
};
@@ -106,22 +110,6 @@ static struct team_option *__team_find_option(struct team *team,
return NULL;
}
-static int __team_option_inst_add(struct team *team, struct team_option *option,
- struct team_port *port)
-{
- struct team_option_inst *opt_inst;
-
- opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
- if (!opt_inst)
- return -ENOMEM;
- opt_inst->option = option;
- opt_inst->port = port;
- opt_inst->changed = true;
- opt_inst->removed = false;
- list_add_tail(&opt_inst->list, &team->option_inst_list);
- return 0;
-}
-
static void __team_option_inst_del(struct team_option_inst *opt_inst)
{
list_del(&opt_inst->list);
@@ -139,14 +127,49 @@ static void __team_option_inst_del_option(struct team *team,
}
}
+static int __team_option_inst_add(struct team *team, struct team_option *option,
+ struct team_port *port)
+{
+ struct team_option_inst *opt_inst;
+ unsigned int array_size;
+ unsigned int i;
+ int err;
+
+ array_size = option->array_size;
+ if (!array_size)
+ array_size = 1; /* No array but still need one instance */
+
+ for (i = 0; i < array_size; i++) {
+ opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
+ if (!opt_inst)
+ return -ENOMEM;
+ opt_inst->option = option;
+ opt_inst->info.port = port;
+ opt_inst->info.array_index = i;
+ opt_inst->changed = true;
+ opt_inst->removed = false;
+ list_add_tail(&opt_inst->list, &team->option_inst_list);
+ if (option->init) {
+ err = option->init(team, &opt_inst->info);
+ if (err)
+ return err;
+ }
+
+ }
+ return 0;
+}
+
static int __team_option_inst_add_option(struct team *team,
struct team_option *option)
{
struct team_port *port;
int err;
- if (!option->per_port)
- return __team_option_inst_add(team, option, 0);
+ if (!option->per_port) {
+ err = __team_option_inst_add(team, option, NULL);
+ if (err)
+ goto inst_del_option;
+ }
list_for_each_entry(port, &team->port_list, list) {
err = __team_option_inst_add(team, option, port);
@@ -180,7 +203,7 @@ static void __team_option_inst_del_port(struct team *team,
list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
if (opt_inst->option->per_port &&
- opt_inst->port == port)
+ opt_inst->info.port == port)
__team_option_inst_del(opt_inst);
}
}
@@ -211,7 +234,7 @@ static void __team_option_inst_mark_removed_port(struct team *team,
struct team_option_inst *opt_inst;
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- if (opt_inst->port == port) {
+ if (opt_inst->info.port == port) {
opt_inst->changed = true;
opt_inst->removed = true;
}
@@ -324,28 +347,12 @@ void team_options_unregister(struct team *team,
}
EXPORT_SYMBOL(team_options_unregister);
-static int team_option_port_add(struct team *team, struct team_port *port)
-{
- int err;
-
- err = __team_option_inst_add_port(team, port);
- if (err)
- return err;
- __team_options_change_check(team);
- return 0;
-}
-
-static void team_option_port_del(struct team *team, struct team_port *port)
-{
- __team_option_inst_mark_removed_port(team, port);
- __team_options_change_check(team);
- __team_option_inst_del_port(team, port);
-}
-
static int team_option_get(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
+ if (!opt_inst->option->getter)
+ return -EOPNOTSUPP;
return opt_inst->option->getter(team, ctx);
}
@@ -353,16 +360,26 @@ static int team_option_set(struct team *team,
struct team_option_inst *opt_inst,
struct team_gsetter_ctx *ctx)
{
- int err;
+ if (!opt_inst->option->setter)
+ return -EOPNOTSUPP;
+ return opt_inst->option->setter(team, ctx);
+}
- err = opt_inst->option->setter(team, ctx);
- if (err)
- return err;
+void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
+{
+ struct team_option_inst *opt_inst;
+ opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
opt_inst->changed = true;
+}
+EXPORT_SYMBOL(team_option_inst_set_change);
+
+void team_options_change_check(struct team *team)
+{
__team_options_change_check(team);
- return err;
}
+EXPORT_SYMBOL(team_options_change_check);
+
/****************
* Mode handling
@@ -371,13 +388,18 @@ static int team_option_set(struct team *team,
static LIST_HEAD(mode_list);
static DEFINE_SPINLOCK(mode_list_lock);
-static struct team_mode *__find_mode(const char *kind)
+struct team_mode_item {
+ struct list_head list;
+ const struct team_mode *mode;
+};
+
+static struct team_mode_item *__find_mode(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
- list_for_each_entry(mode, &mode_list, list) {
- if (strcmp(mode->kind, kind) == 0)
- return mode;
+ list_for_each_entry(mitem, &mode_list, list) {
+ if (strcmp(mitem->mode->kind, kind) == 0)
+ return mitem;
}
return NULL;
}
@@ -392,49 +414,65 @@ static bool is_good_mode_name(const char *name)
return true;
}
-int team_mode_register(struct team_mode *mode)
+int team_mode_register(const struct team_mode *mode)
{
int err = 0;
+ struct team_mode_item *mitem;
if (!is_good_mode_name(mode->kind) ||
mode->priv_size > TEAM_MODE_PRIV_SIZE)
return -EINVAL;
+
+ mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
+ if (!mitem)
+ return -ENOMEM;
+
spin_lock(&mode_list_lock);
if (__find_mode(mode->kind)) {
err = -EEXIST;
+ kfree(mitem);
goto unlock;
}
- list_add_tail(&mode->list, &mode_list);
+ mitem->mode = mode;
+ list_add_tail(&mitem->list, &mode_list);
unlock:
spin_unlock(&mode_list_lock);
return err;
}
EXPORT_SYMBOL(team_mode_register);
-int team_mode_unregister(struct team_mode *mode)
+void team_mode_unregister(const struct team_mode *mode)
{
+ struct team_mode_item *mitem;
+
spin_lock(&mode_list_lock);
- list_del_init(&mode->list);
+ mitem = __find_mode(mode->kind);
+ if (mitem) {
+ list_del_init(&mitem->list);
+ kfree(mitem);
+ }
spin_unlock(&mode_list_lock);
- return 0;
}
EXPORT_SYMBOL(team_mode_unregister);
-static struct team_mode *team_mode_get(const char *kind)
+static const struct team_mode *team_mode_get(const char *kind)
{
- struct team_mode *mode;
+ struct team_mode_item *mitem;
+ const struct team_mode *mode = NULL;
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
- if (!mode) {
+ mitem = __find_mode(kind);
+ if (!mitem) {
spin_unlock(&mode_list_lock);
request_module("team-mode-%s", kind);
spin_lock(&mode_list_lock);
- mode = __find_mode(kind);
+ mitem = __find_mode(kind);
}
- if (mode)
+ if (mitem) {
+ mode = mitem->mode;
if (!try_module_get(mode->owner))
mode = NULL;
+ }
spin_unlock(&mode_list_lock);
return mode;
@@ -458,26 +496,45 @@ rx_handler_result_t team_dummy_receive(struct team *team,
return RX_HANDLER_ANOTHER;
}
-static void team_adjust_ops(struct team *team)
+static const struct team_mode __team_no_mode = {
+ .kind = "*NOMODE*",
+};
+
+static bool team_is_mode_set(struct team *team)
+{
+ return team->mode != &__team_no_mode;
+}
+
+static void team_set_no_mode(struct team *team)
+{
+ team->mode = &__team_no_mode;
+}
+
+static void __team_adjust_ops(struct team *team, int en_port_count)
{
/*
* To avoid checks in rx/tx skb paths, ensure here that non-null and
* correct ops are always set.
*/
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->transmit)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->transmit)
team->ops.transmit = team_dummy_transmit;
else
team->ops.transmit = team->mode->ops->transmit;
- if (list_empty(&team->port_list) ||
- !team->mode || !team->mode->ops->receive)
+ if (!en_port_count || !team_is_mode_set(team) ||
+ !team->mode->ops->receive)
team->ops.receive = team_dummy_receive;
else
team->ops.receive = team->mode->ops->receive;
}
+static void team_adjust_ops(struct team *team)
+{
+ __team_adjust_ops(team, team->en_port_count);
+}
+
/*
* We can benefit from the fact that it's ensured no port is present
* at the time of mode change. Therefore no packets are in fly so there's no
@@ -487,7 +544,7 @@ static int __team_change_mode(struct team *team,
const struct team_mode *new_mode)
{
/* Check if mode was previously set and do cleanup if so */
- if (team->mode) {
+ if (team_is_mode_set(team)) {
void (*exit_op)(struct team *team) = team->ops.exit;
/* Clear ops area so no callback is called any longer */
@@ -497,7 +554,7 @@ static int __team_change_mode(struct team *team,
if (exit_op)
exit_op(team);
team_mode_put(team->mode);
- team->mode = NULL;
+ team_set_no_mode(team);
/* zero private data area */
memset(&team->mode_priv, 0,
sizeof(struct team) - offsetof(struct team, mode_priv));
@@ -523,7 +580,7 @@ static int __team_change_mode(struct team *team,
static int team_change_mode(struct team *team, const char *kind)
{
- struct team_mode *new_mode;
+ const struct team_mode *new_mode;
struct net_device *dev = team->dev;
int err;
@@ -532,7 +589,7 @@ static int team_change_mode(struct team *team, const char *kind)
return -EBUSY;
}
- if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
netdev_err(dev, "Unable to change to the same mode the team is in\n");
return -EINVAL;
}
@@ -559,8 +616,6 @@ static int team_change_mode(struct team *team, const char *kind)
* Rx path frame handler
************************/
-static bool team_port_enabled(struct team_port *port);
-
/* note: already called with rcu_read_lock */
static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
{
@@ -618,11 +673,6 @@ static bool team_port_find(const struct team *team,
return false;
}
-static bool team_port_enabled(struct team_port *port)
-{
- return port->index != -1;
-}
-
/*
* Enable/disable port by adding to enabled port hashlist and setting
* port->index (Might be racy so reader could see incorrect ifindex when
@@ -637,6 +687,9 @@ static void team_port_enable(struct team *team,
port->index = team->en_port_count++;
hlist_add_head_rcu(&port->hlist,
team_port_index_hash(team, port->index));
+ team_adjust_ops(team);
+ if (team->ops.port_enabled)
+ team->ops.port_enabled(team, port);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -656,14 +709,20 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
static void team_port_disable(struct team *team,
struct team_port *port)
{
- int rm_index = port->index;
-
if (!team_port_enabled(port))
return;
+ if (team->ops.port_disabled)
+ team->ops.port_disabled(team, port);
hlist_del_rcu(&port->hlist);
- __reconstruct_port_hlist(team, rm_index);
- team->en_port_count--;
+ __reconstruct_port_hlist(team, port->index);
port->index = -1;
+ __team_adjust_ops(team, team->en_port_count - 1);
+ /*
+ * Wait until readers see adjusted ops. This ensures that
+ * readers never see team->en_port_count == 0
+ */
+ synchronize_rcu();
+ team->en_port_count--;
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -675,12 +734,14 @@ static void __team_compute_features(struct team *team)
struct team_port *port;
u32 vlan_features = TEAM_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
list_for_each_entry(port, &team->port_list, list) {
vlan_features = netdev_increment_features(vlan_features,
port->dev->vlan_features,
TEAM_VLAN_FEATURES);
+ dst_release_flag &= port->dev->priv_flags;
if (port->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = port->dev->hard_header_len;
}
@@ -688,6 +749,9 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hard_header_len = max_hard_header_len;
+ flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
+ team->dev->priv_flags = flags | dst_release_flag;
+
netdev_change_features(team->dev);
}
@@ -730,6 +794,58 @@ static void team_port_leave(struct team *team, struct team_port *port)
dev_put(team->dev);
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+{
+ struct netpoll *np;
+ int err;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ return -ENOMEM;
+
+ err = __netpoll_setup(np, port->dev);
+ if (err) {
+ kfree(np);
+ return err;
+ }
+ port->np = np;
+ return err;
+}
+
+static void team_port_disable_netpoll(struct team_port *port)
+{
+ struct netpoll *np = port->np;
+
+ if (!np)
+ return;
+ port->np = NULL;
+
+ /* Wait for transmitting packets to finish before freeing. */
+ synchronize_rcu_bh();
+ __netpoll_cleanup(np);
+ kfree(np);
+}
+
+static struct netpoll_info *team_netpoll_info(struct team *team)
+{
+ return team->dev->npinfo;
+}
+
+#else
+static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+{
+ return 0;
+}
+static void team_port_disable_netpoll(struct team_port *port)
+{
+}
+static struct netpoll_info *team_netpoll_info(struct team *team)
+{
+ return NULL;
+}
+#endif
+
static void __team_port_change_check(struct team_port *port, bool linkup);
static int team_port_add(struct team *team, struct net_device *port_dev)
@@ -758,7 +874,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
- port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
+ GFP_KERNEL);
if (!port)
return -ENOMEM;
@@ -795,6 +912,15 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
+ if (team_netpoll_info(team)) {
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
+ }
+ }
+
err = netdev_set_master(port_dev, dev);
if (err) {
netdev_err(dev, "Device %s failed to set master\n", portname);
@@ -809,7 +935,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_handler_register;
}
- err = team_option_port_add(team, port);
+ err = __team_option_inst_add_port(team, port);
if (err) {
netdev_err(dev, "Device %s failed to add per-port options\n",
portname);
@@ -819,9 +945,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
port->index = -1;
team_port_enable(team, port);
list_add_tail_rcu(&port->list, &team->port_list);
- team_adjust_ops(team);
__team_compute_features(team);
__team_port_change_check(port, !!netif_carrier_ok(port_dev));
+ __team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -834,6 +960,9 @@ err_handler_register:
netdev_set_master(port_dev, NULL);
err_set_master:
+ team_port_disable_netpoll(port);
+
+err_enable_netpoll:
vlan_vids_del_by_dev(port_dev, dev);
err_vids_add:
@@ -865,14 +994,16 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
return -ENOENT;
}
+ __team_option_inst_mark_removed_port(team, port);
+ __team_options_change_check(team);
+ __team_option_inst_del_port(team, port);
port->removed = true;
__team_port_change_check(port, false);
team_port_disable(team, port);
list_del_rcu(&port->list);
- team_adjust_ops(team);
- team_option_port_del(team, port);
netdev_rx_handler_unregister(port_dev);
netdev_set_master(port_dev, NULL);
+ team_port_disable_netpoll(port);
vlan_vids_del_by_dev(port_dev, dev);
dev_close(port_dev);
team_port_leave(team, port);
@@ -891,11 +1022,9 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
* Net device ops
*****************/
-static const char team_no_mode_kind[] = "*NOMODE*";
-
static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
+ ctx->data.str_val = team->mode->kind;
return 0;
}
@@ -907,39 +1036,47 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = team_port_enabled(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = team_port_enabled(port);
return 0;
}
static int team_port_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
+ struct team_port *port = ctx->info->port;
+
if (ctx->data.bool_val)
- team_port_enable(team, ctx->port);
+ team_port_enable(team, port);
else
- team_port_disable(team, ctx->port);
+ team_port_disable(team, port);
return 0;
}
static int team_user_linkup_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->data.bool_val = ctx->port->user.linkup;
+ struct team_port *port = ctx->info->port;
+
+ ctx->data.bool_val = port->user.linkup;
return 0;
}
static int team_user_linkup_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- ctx->port->user.linkup = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ struct team_port *port = ctx->info->port;
+
+ port->user.linkup = ctx->data.bool_val;
+ team_refresh_port_linkup(port);
return 0;
}
static int team_user_linkup_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
ctx->data.bool_val = port->user.linkup_enabled;
return 0;
@@ -948,10 +1085,10 @@ static int team_user_linkup_en_option_get(struct team *team,
static int team_user_linkup_en_option_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
- struct team_port *port = ctx->port;
+ struct team_port *port = ctx->info->port;
port->user.linkup_enabled = ctx->data.bool_val;
- team_refresh_port_linkup(ctx->port);
+ team_refresh_port_linkup(port);
return 0;
}
@@ -985,6 +1122,22 @@ static const struct team_option team_options[] = {
},
};
+static struct lock_class_key team_netdev_xmit_lock_key;
+static struct lock_class_key team_netdev_addr_lock_key;
+
+static void team_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *unused)
+{
+ lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
+}
+
+static void team_set_lockdep_class(struct net_device *dev)
+{
+ lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
+ netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
+}
+
static int team_init(struct net_device *dev)
{
struct team *team = netdev_priv(dev);
@@ -993,6 +1146,7 @@ static int team_init(struct net_device *dev)
team->dev = dev;
mutex_init(&team->lock);
+ team_set_no_mode(team);
team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
if (!team->pcpu_stats)
@@ -1011,6 +1165,8 @@ static int team_init(struct net_device *dev)
goto err_options_register;
netif_carrier_off(dev);
+ team_set_lockdep_class(dev);
+
return 0;
err_options_register:
@@ -1079,6 +1235,29 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ /*
+ * This helper function exists to help dev_pick_tx get the correct
+ * destination queue. Using a helper function skips a call to
+ * skb_tx_hash and will put the skbs in the queue we expect on their
+ * way down to the team driver.
+ */
+ u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
+
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+ qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do {
+ txq -= dev->real_num_tx_queues;
+ } while (txq >= dev->real_num_tx_queues);
+ }
+ return txq;
+}
+
static void team_change_rx_flags(struct net_device *dev, int change)
{
struct team *team = netdev_priv(dev);
@@ -1116,10 +1295,11 @@ static int team_set_mac_address(struct net_device *dev, void *p)
{
struct team *team = netdev_priv(dev);
struct team_port *port;
- struct sockaddr *addr = p;
+ int err;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
rcu_read_lock();
list_for_each_entry_rcu(port, &team->port_list, list)
if (team->ops.port_change_mac)
@@ -1240,6 +1420,48 @@ static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void team_poll_controller(struct net_device *dev)
+{
+}
+
+static void __team_netpoll_cleanup(struct team *team)
+{
+ struct team_port *port;
+
+ list_for_each_entry(port, &team->port_list, list)
+ team_port_disable_netpoll(port);
+}
+
+static void team_netpoll_cleanup(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ mutex_lock(&team->lock);
+ __team_netpoll_cleanup(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_netpoll_setup(struct net_device *dev,
+ struct netpoll_info *npifo)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = team_port_enable_netpoll(team, port);
+ if (err) {
+ __team_netpoll_cleanup(team);
+ break;
+ }
+ }
+ mutex_unlock(&team->lock);
+ return err;
+}
+#endif
+
static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
{
struct team *team = netdev_priv(dev);
@@ -1289,6 +1511,7 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_open = team_open,
.ndo_stop = team_close,
.ndo_start_xmit = team_xmit,
+ .ndo_select_queue = team_select_queue,
.ndo_change_rx_flags = team_change_rx_flags,
.ndo_set_rx_mode = team_set_rx_mode,
.ndo_set_mac_address = team_set_mac_address,
@@ -1296,6 +1519,11 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_get_stats64 = team_get_stats64,
.ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = team_poll_controller,
+ .ndo_netpoll_setup = team_netpoll_setup,
+ .ndo_netpoll_cleanup = team_netpoll_cleanup,
+#endif
.ndo_add_slave = team_add_slave,
.ndo_del_slave = team_del_slave,
.ndo_fix_features = team_fix_features,
@@ -1321,7 +1549,7 @@ static void team_setup(struct net_device *dev)
* bring us to promisc mode in case a unicast addr is added.
* Let this up to underlay drivers.
*/
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GRO;
@@ -1358,12 +1586,24 @@ static int team_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
+static unsigned int team_get_num_tx_queues(void)
+{
+ return TEAM_DEFAULT_NUM_TX_QUEUES;
+}
+
+static unsigned int team_get_num_rx_queues(void)
+{
+ return TEAM_DEFAULT_NUM_RX_QUEUES;
+}
+
static struct rtnl_link_ops team_link_ops __read_mostly = {
- .kind = DRV_NAME,
- .priv_size = sizeof(struct team),
- .setup = team_setup,
- .newlink = team_newlink,
- .validate = team_validate,
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+ .get_num_tx_queues = team_get_num_tx_queues,
+ .get_num_rx_queues = team_get_num_rx_queues,
};
@@ -1404,7 +1644,7 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
void *hdr;
int err;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -1466,7 +1706,7 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team,
struct sk_buff *skb;
int err;
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1482,16 +1722,128 @@ err_fill:
return err;
}
-static int team_nl_fill_options_get(struct sk_buff *skb,
- u32 pid, u32 seq, int flags,
- struct team *team, bool fillall)
+typedef int team_nl_send_func_t(struct sk_buff *skb,
+ struct team *team, u32 pid);
+
+static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid)
+{
+ return genlmsg_unicast(dev_net(team->dev), skb, pid);
+}
+
+static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
+ struct team_option_inst *opt_inst)
+{
+ struct nlattr *option_item;
+ struct team_option *option = opt_inst->option;
+ struct team_option_inst_info *opt_inst_info = &opt_inst->info;
+ struct team_gsetter_ctx ctx;
+ int err;
+
+ ctx.info = opt_inst_info;
+ err = team_option_get(team, opt_inst, &ctx);
+ if (err)
+ return err;
+
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ return -EMSGSIZE;
+
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
+ goto nest_cancel;
+ if (opt_inst_info->port &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
+ opt_inst_info->port->dev->ifindex))
+ goto nest_cancel;
+ if (opt_inst->option->array_size &&
+ nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
+ opt_inst_info->array_index))
+ goto nest_cancel;
+
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
+ goto nest_cancel;
+ if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
+ goto nest_cancel;
+ if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
+ ctx.data.str_val))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BINARY:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
+ goto nest_cancel;
+ if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
+ ctx.data.bin_val.ptr))
+ goto nest_cancel;
+ break;
+ case TEAM_OPTION_TYPE_BOOL:
+ if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
+ goto nest_cancel;
+ if (ctx.data.bool_val &&
+ nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
+ goto nest_cancel;
+ break;
+ default:
+ BUG();
+ }
+ if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
+ goto nest_cancel;
+ if (opt_inst->changed) {
+ if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
+ goto nest_cancel;
+ opt_inst->changed = false;
+ }
+ nla_nest_end(skb, option_item);
+ return 0;
+
+nest_cancel:
+ nla_nest_cancel(skb, option_item);
+ return -EMSGSIZE;
+}
+
+static int __send_and_alloc_skb(struct sk_buff **pskb,
+ struct team *team, u32 pid,
+ team_nl_send_func_t *send_func)
+{
+ int err;
+
+ if (*pskb) {
+ err = send_func(*pskb, team, pid);
+ if (err)
+ return err;
+ }
+ *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!*pskb)
+ return -ENOMEM;
+ return 0;
+}
+
+static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq,
+ int flags, team_nl_send_func_t *send_func,
+ struct list_head *sel_opt_inst_list)
{
struct nlattr *option_list;
+ struct nlmsghdr *nlh;
void *hdr;
struct team_option_inst *opt_inst;
int err;
+ struct sk_buff *skb = NULL;
+ bool incomplete;
+ int i;
- hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ opt_inst = list_first_entry(sel_opt_inst_list,
+ struct team_option_inst, tmp_list);
+
+start_again:
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ return err;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
TEAM_CMD_OPTIONS_GET);
if (IS_ERR(hdr))
return PTR_ERR(hdr);
@@ -1500,122 +1852,80 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
goto nla_put_failure;
option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
if (!option_list)
- return -EMSGSIZE;
-
- list_for_each_entry(opt_inst, &team->option_inst_list, list) {
- struct nlattr *option_item;
- struct team_option *option = opt_inst->option;
- struct team_gsetter_ctx ctx;
+ goto nla_put_failure;
- /* Include only changed options if fill all mode is not on */
- if (!fillall && !opt_inst->changed)
- continue;
- option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
- if (!option_item)
- goto nla_put_failure;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
- goto nla_put_failure;
- if (opt_inst->changed) {
- if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
- goto nla_put_failure;
- opt_inst->changed = false;
- }
- if (opt_inst->removed &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
- goto nla_put_failure;
- if (opt_inst->port &&
- nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
- opt_inst->port->dev->ifindex))
- goto nla_put_failure;
- ctx.port = opt_inst->port;
- switch (option->type) {
- case TEAM_OPTION_TYPE_U32:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.u32_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_STRING:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.str_val))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BINARY:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
- ctx.data.bin_val.len, ctx.data.bin_val.ptr))
- goto nla_put_failure;
- break;
- case TEAM_OPTION_TYPE_BOOL:
- if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
- goto nla_put_failure;
- err = team_option_get(team, opt_inst, &ctx);
- if (err)
- goto errout;
- if (ctx.data.bool_val &&
- nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
- goto nla_put_failure;
- break;
- default:
- BUG();
+ i = 0;
+ incomplete = false;
+ list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
+ err = team_nl_fill_one_option_get(skb, team, opt_inst);
+ if (err) {
+ if (err == -EMSGSIZE) {
+ if (!i)
+ goto errout;
+ incomplete = true;
+ break;
+ }
+ goto errout;
}
- nla_nest_end(skb, option_item);
+ i++;
}
nla_nest_end(skb, option_list);
- return genlmsg_end(skb, hdr);
+ genlmsg_end(skb, hdr);
+ if (incomplete)
+ goto start_again;
+
+send_done:
+ nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
+ if (!nlh) {
+ err = __send_and_alloc_skb(&skb, team, pid, send_func);
+ if (err)
+ goto errout;
+ goto send_done;
+ }
+
+ return send_func(skb, team, pid);
nla_put_failure:
err = -EMSGSIZE;
errout:
genlmsg_cancel(skb, hdr);
+ nlmsg_free(skb);
return err;
}
-static int team_nl_fill_options_get_all(struct sk_buff *skb,
- struct genl_info *info, int flags,
- struct team *team)
-{
- return team_nl_fill_options_get(skb, info->snd_pid,
- info->snd_seq, NLM_F_ACK,
- team, true);
-}
-
static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
+ struct team_option_inst *opt_inst;
int err;
+ LIST_HEAD(sel_opt_inst_list);
team = team_nl_team_get(info);
if (!team)
return -EINVAL;
- err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq,
+ NLM_F_ACK, team_nl_send_unicast,
+ &sel_opt_inst_list);
team_nl_team_put(team);
return err;
}
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list);
+
static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
int err = 0;
int i;
struct nlattr *nl_option;
+ LIST_HEAD(opt_inst_list);
team = team_nl_team_get(info);
if (!team)
@@ -1629,10 +1939,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
- struct nlattr *attr_port_ifindex;
+ struct nlattr *attr;
struct nlattr *attr_data;
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
+ u32 opt_array_index = 0;
+ bool opt_is_array = false;
struct team_option_inst *opt_inst;
char *opt_name;
bool opt_found = false;
@@ -1674,23 +1986,33 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
- attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
- if (attr_port_ifindex)
- opt_port_ifindex = nla_get_u32(attr_port_ifindex);
+ attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
+ if (attr)
+ opt_port_ifindex = nla_get_u32(attr);
+
+ attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
+ if (attr) {
+ opt_is_array = true;
+ opt_array_index = nla_get_u32(attr);
+ }
list_for_each_entry(opt_inst, &team->option_inst_list, list) {
struct team_option *option = opt_inst->option;
struct team_gsetter_ctx ctx;
+ struct team_option_inst_info *opt_inst_info;
int tmp_ifindex;
- tmp_ifindex = opt_inst->port ?
- opt_inst->port->dev->ifindex : 0;
+ opt_inst_info = &opt_inst->info;
+ tmp_ifindex = opt_inst_info->port ?
+ opt_inst_info->port->dev->ifindex : 0;
if (option->type != opt_type ||
strcmp(option->name, opt_name) ||
- tmp_ifindex != opt_port_ifindex)
+ tmp_ifindex != opt_port_ifindex ||
+ (option->array_size && !opt_is_array) ||
+ opt_inst_info->array_index != opt_array_index)
continue;
opt_found = true;
- ctx.port = opt_inst->port;
+ ctx.info = opt_inst_info;
switch (opt_type) {
case TEAM_OPTION_TYPE_U32:
ctx.data.u32_val = nla_get_u32(attr_data);
@@ -1715,6 +2037,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = team_option_set(team, opt_inst, &ctx);
if (err)
goto team_put;
+ opt_inst->changed = true;
+ list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
@@ -1722,6 +2046,8 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
}
}
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+
team_put:
team_nl_team_put(team);
@@ -1746,7 +2072,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
goto nla_put_failure;
port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
if (!port_list)
- return -EMSGSIZE;
+ goto nla_put_failure;
list_for_each_entry(port, &team->port_list, list) {
struct nlattr *port_item;
@@ -1838,27 +2164,18 @@ static struct genl_multicast_group team_change_event_mcgrp = {
.name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
};
-static int team_nl_send_event_options_get(struct team *team)
+static int team_nl_send_multicast(struct sk_buff *skb,
+ struct team *team, u32 pid)
{
- struct sk_buff *skb;
- int err;
- struct net *net = dev_net(team->dev);
-
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
- if (err < 0)
- goto err_fill;
-
- err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
- GFP_KERNEL);
- return err;
+ return genlmsg_multicast_netns(dev_net(team->dev), skb, 0,
+ team_change_event_mcgrp.id, GFP_KERNEL);
+}
-err_fill:
- nlmsg_free(skb);
- return err;
+static int team_nl_send_event_options_get(struct team *team,
+ struct list_head *sel_opt_inst_list)
+{
+ return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
+ sel_opt_inst_list);
}
static int team_nl_send_event_port_list_get(struct team *team)
@@ -1867,7 +2184,7 @@ static int team_nl_send_event_port_list_get(struct team *team)
int err;
struct net *net = dev_net(team->dev);
- skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -1918,10 +2235,17 @@ static void team_nl_fini(void)
static void __team_options_change_check(struct team *team)
{
int err;
+ struct team_option_inst *opt_inst;
+ LIST_HEAD(sel_opt_inst_list);
- err = team_nl_send_event_options_get(team);
+ list_for_each_entry(opt_inst, &team->option_inst_list, list) {
+ if (opt_inst->changed)
+ list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
+ }
+ err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
if (err)
- netdev_warn(team->dev, "Failed to send options change via netlink\n");
+ netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
+ err);
}
/* rtnl lock is held */
@@ -1965,6 +2289,7 @@ static void team_port_change_check(struct team_port *port, bool linkup)
mutex_unlock(&team->lock);
}
+
/************************************
* Net device notifier event handler
************************************/
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index fd6bd03aaa89..6262b4defd93 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -40,11 +40,10 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb)
{
struct team_port *active_port;
- active_port = rcu_dereference(ab_priv(team)->active_port);
+ active_port = rcu_dereference_bh(ab_priv(team)->active_port);
if (unlikely(!active_port))
goto drop;
- skb->dev = active_port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, active_port, skb))
return false;
return true;
@@ -61,8 +60,12 @@ static void ab_port_leave(struct team *team, struct team_port *port)
static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (ab_priv(team)->active_port)
- ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
+ struct team_port *active_port;
+
+ active_port = rcu_dereference_protected(ab_priv(team)->active_port,
+ lockdep_is_held(&team->lock));
+ if (active_port)
+ ctx->data.u32_val = active_port->dev->ifindex;
else
ctx->data.u32_val = 0;
return 0;
@@ -108,7 +111,7 @@ static const struct team_mode_ops ab_mode_ops = {
.port_leave = ab_port_leave,
};
-static struct team_mode ab_mode = {
+static const struct team_mode ab_mode = {
.kind = "activebackup",
.owner = THIS_MODULE,
.priv_size = sizeof(struct ab_priv),
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
new file mode 100644
index 000000000000..c96e4d2967f0
--- /dev/null
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -0,0 +1,87 @@
+/*
+ * drivers/net/team/team_mode_broadcast.c - Broadcast mode for team
+ * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+static bool bc_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *cur;
+ struct team_port *last = NULL;
+ struct sk_buff *skb2;
+ bool ret;
+ bool sum_ret = false;
+
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (team_port_txable(cur)) {
+ if (last) {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
+ if (skb2) {
+ ret = team_dev_queue_xmit(team, last,
+ skb2);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ }
+ last = cur;
+ }
+ }
+ if (last) {
+ ret = team_dev_queue_xmit(team, last, skb);
+ if (!sum_ret)
+ sum_ret = ret;
+ }
+ return sum_ret;
+}
+
+static int bc_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void bc_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops bc_mode_ops = {
+ .transmit = bc_transmit,
+ .port_enter = bc_port_enter,
+ .port_change_mac = bc_port_change_mac,
+};
+
+static const struct team_mode bc_mode = {
+ .kind = "broadcast",
+ .owner = THIS_MODULE,
+ .ops = &bc_mode_ops,
+};
+
+static int __init bc_init_module(void)
+{
+ return team_mode_register(&bc_mode);
+}
+
+static void __exit bc_cleanup_module(void)
+{
+ team_mode_unregister(&bc_mode);
+}
+
+module_init(bc_init_module);
+module_exit(bc_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Broadcast mode for team");
+MODULE_ALIAS("team-mode-broadcast");
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index 86e8183c8e3d..cdc31b5ea15e 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -17,34 +17,209 @@
#include <linux/filter.h>
#include <linux/if_team.h>
+struct lb_priv;
+
+typedef struct team_port *lb_select_tx_port_func_t(struct team *,
+ struct lb_priv *,
+ struct sk_buff *,
+ unsigned char);
+
+#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
+
+struct lb_stats {
+ u64 tx_bytes;
+};
+
+struct lb_pcpu_stats {
+ struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE];
+ struct u64_stats_sync syncp;
+};
+
+struct lb_stats_info {
+ struct lb_stats stats;
+ struct lb_stats last_stats;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_port_mapping {
+ struct team_port __rcu *port;
+ struct team_option_inst_info *opt_inst_info;
+};
+
+struct lb_priv_ex {
+ struct team *team;
+ struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE];
+ struct sock_fprog *orig_fprog;
+ struct {
+ unsigned int refresh_interval; /* in tenths of second */
+ struct delayed_work refresh_dw;
+ struct lb_stats_info info[LB_TX_HASHTABLE_SIZE];
+ } stats;
+};
+
struct lb_priv {
struct sk_filter __rcu *fp;
- struct sock_fprog *orig_fprog;
+ lb_select_tx_port_func_t __rcu *select_tx_port_func;
+ struct lb_pcpu_stats __percpu *pcpu_stats;
+ struct lb_priv_ex *ex; /* priv extension */
};
-static struct lb_priv *lb_priv(struct team *team)
+static struct lb_priv *get_lb_priv(struct team *team)
{
return (struct lb_priv *) &team->mode_priv;
}
-static bool lb_transmit(struct team *team, struct sk_buff *skb)
+struct lb_port_priv {
+ struct lb_stats __percpu *pcpu_stats;
+ struct lb_stats_info stats_info;
+};
+
+static struct lb_port_priv *get_lb_port_priv(struct team_port *port)
+{
+ return (struct lb_port_priv *) &port->mode_priv;
+}
+
+#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
+
+#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
+ (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
+
+static void lb_tx_hash_to_port_mapping_null_port(struct team *team,
+ struct team_port *port)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ bool changed = false;
+ int i;
+
+ for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) {
+ struct lb_port_mapping *pm;
+
+ pm = &lb_priv->ex->tx_hash_to_port_mapping[i];
+ if (rcu_access_pointer(pm->port) == port) {
+ RCU_INIT_POINTER(pm->port, NULL);
+ team_option_inst_set_change(pm->opt_inst_info);
+ changed = true;
+ }
+ }
+ if (changed)
+ team_options_change_check(team);
+}
+
+/* Basic tx selection based solely by hash */
+static struct team_port *lb_hash_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
{
- struct sk_filter *fp;
- struct team_port *port;
- unsigned int hash;
int port_index;
- fp = rcu_dereference(lb_priv(team)->fp);
- if (unlikely(!fp))
- goto drop;
- hash = SK_RUN_FILTER(fp, skb);
port_index = hash % team->en_port_count;
- port = team_get_port_by_index_rcu(team, port_index);
+ return team_get_port_by_index_rcu(team, port_index);
+}
+
+/* Hash to port mapping select tx port */
+static struct team_port *lb_htpm_select_tx_port(struct team *team,
+ struct lb_priv *lb_priv,
+ struct sk_buff *skb,
+ unsigned char hash)
+{
+ return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash));
+}
+
+struct lb_select_tx_port {
+ char *name;
+ lb_select_tx_port_func_t *func;
+};
+
+static const struct lb_select_tx_port lb_select_tx_port_list[] = {
+ {
+ .name = "hash",
+ .func = lb_hash_select_tx_port,
+ },
+ {
+ .name = "hash_to_port_mapping",
+ .func = lb_htpm_select_tx_port,
+ },
+};
+#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
+
+static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (item->func == func)
+ return item->name;
+ }
+ return NULL;
+}
+
+static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
+{
+ int i;
+
+ for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) {
+ const struct lb_select_tx_port *item;
+
+ item = &lb_select_tx_port_list[i];
+ if (!strcmp(item->name, name))
+ return item->func;
+ }
+ return NULL;
+}
+
+static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
+ struct sk_buff *skb)
+{
+ struct sk_filter *fp;
+ uint32_t lhash;
+ unsigned char *c;
+
+ fp = rcu_dereference_bh(lb_priv->fp);
+ if (unlikely(!fp))
+ return 0;
+ lhash = SK_RUN_FILTER(fp, skb);
+ c = (char *) &lhash;
+ return c[0] ^ c[1] ^ c[2] ^ c[3];
+}
+
+static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv,
+ struct lb_port_priv *lb_port_priv,
+ unsigned char hash)
+{
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *port_stats;
+ struct lb_stats *hash_stats;
+
+ pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats);
+ port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats);
+ hash_stats = &pcpu_stats->hash_stats[hash];
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ port_stats->tx_bytes += tx_bytes;
+ hash_stats->tx_bytes += tx_bytes;
+ u64_stats_update_end(&pcpu_stats->syncp);
+}
+
+static bool lb_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *select_tx_port_func;
+ struct team_port *port;
+ unsigned char hash;
+ unsigned int tx_bytes = skb->len;
+
+ hash = lb_get_skb_hash(lb_priv, skb);
+ select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func);
+ port = select_tx_port_func(team, lb_priv, skb, hash);
if (unlikely(!port))
goto drop;
- skb->dev = port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, port, skb))
return false;
+ lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash);
return true;
drop:
@@ -54,14 +229,16 @@ drop:
static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
{
- if (!lb_priv(team)->orig_fprog) {
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ if (!lb_priv->ex->orig_fprog) {
ctx->data.bin_val.len = 0;
ctx->data.bin_val.ptr = NULL;
return 0;
}
- ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
+ ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len *
sizeof(struct sock_filter);
- ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
+ ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter;
return 0;
}
@@ -94,7 +271,9 @@ static void __fprog_destroy(struct sock_fprog *fprog)
static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
struct sk_filter *fp = NULL;
+ struct sk_filter *orig_fp;
struct sock_fprog *fprog = NULL;
int err;
@@ -110,14 +289,238 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
}
}
- if (lb_priv(team)->orig_fprog) {
+ if (lb_priv->ex->orig_fprog) {
/* Clear old filter data */
- __fprog_destroy(lb_priv(team)->orig_fprog);
- sk_unattached_filter_destroy(lb_priv(team)->fp);
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ orig_fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ sk_unattached_filter_destroy(orig_fp);
}
- rcu_assign_pointer(lb_priv(team)->fp, fp);
- lb_priv(team)->orig_fprog = fprog;
+ rcu_assign_pointer(lb_priv->fp, fp);
+ lb_priv->ex->orig_fprog = fprog;
+ return 0;
+}
+
+static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ char *name;
+
+ func = rcu_dereference_protected(lb_priv->select_tx_port_func,
+ lockdep_is_held(&team->lock));
+ name = lb_select_tx_port_get_name(func);
+ BUG_ON(!name);
+ ctx->data.str_val = name;
+ return 0;
+}
+
+static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+
+ func = lb_select_tx_port_get_func(ctx->data.str_val);
+ if (!func)
+ return -EINVAL;
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ port = LB_HTPM_PORT_BY_HASH(lb_priv, hash);
+ ctx->data.u32_val = port ? port->dev->ifindex : 0;
+ return 0;
+}
+
+static int lb_tx_hash_to_port_mapping_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct team_port *port;
+ unsigned char hash = ctx->info->array_index;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ if (ctx->data.u32_val == port->dev->ifindex &&
+ team_port_enabled(port)) {
+ rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash),
+ port);
+ return 0;
+ }
+ }
+ return -ENODEV;
+}
+
+static int lb_hash_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = info->array_index;
+
+ lb_priv->ex->stats.info[hash].opt_inst_info = info;
+ return 0;
+}
+
+static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned char hash = ctx->info->array_index;
+
+ ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static int lb_port_stats_init(struct team *team,
+ struct team_option_inst_info *info)
+{
+ struct team_port *port = info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->stats_info.opt_inst_info = info;
+ return 0;
+}
+
+static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx)
+{
+ struct team_port *port = ctx->info->port;
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats;
+ ctx->data.bin_val.len = sizeof(struct lb_stats);
+ return 0;
+}
+
+static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info)
+{
+ memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats));
+ memset(&s_info->stats, 0, sizeof(struct lb_stats));
+}
+
+static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info,
+ struct team *team)
+{
+ if (memcmp(&s_info->last_stats, &s_info->stats,
+ sizeof(struct lb_stats))) {
+ team_option_inst_set_change(s_info->opt_inst_info);
+ return true;
+ }
+ return false;
+}
+
+static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats,
+ struct lb_stats *cpu_stats,
+ struct u64_stats_sync *syncp)
+{
+ unsigned int start;
+ struct lb_stats tmp;
+
+ do {
+ start = u64_stats_fetch_begin_bh(syncp);
+ tmp.tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(syncp, start));
+ acc_stats->tx_bytes += tmp.tx_bytes;
+}
+
+static void lb_stats_refresh(struct work_struct *work)
+{
+ struct team *team;
+ struct lb_priv *lb_priv;
+ struct lb_priv_ex *lb_priv_ex;
+ struct lb_pcpu_stats *pcpu_stats;
+ struct lb_stats *stats;
+ struct lb_stats_info *s_info;
+ struct team_port *port;
+ bool changed = false;
+ int i;
+ int j;
+
+ lb_priv_ex = container_of(work, struct lb_priv_ex,
+ stats.refresh_dw.work);
+
+ team = lb_priv_ex->team;
+ lb_priv = get_lb_priv(team);
+
+ if (!mutex_trylock(&team->lock)) {
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0);
+ return;
+ }
+
+ for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) {
+ s_info = &lb_priv->ex->stats.info[j];
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = &pcpu_stats->hash_stats[j];
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ s_info = &lb_port_priv->stats_info;
+ __lb_stats_info_refresh_prepare(s_info);
+ for_each_possible_cpu(i) {
+ pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i);
+ stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i);
+ __lb_one_cpu_stats_add(&s_info->stats, stats,
+ &pcpu_stats->syncp);
+ }
+ changed |= __lb_stats_info_refresh_check(s_info, team);
+ }
+
+ if (changed)
+ team_options_change_check(team);
+
+ schedule_delayed_work(&lb_priv_ex->stats.refresh_dw,
+ (lb_priv_ex->stats.refresh_interval * HZ) / 10);
+
+ mutex_unlock(&team->lock);
+}
+
+static int lb_stats_refresh_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
+ ctx->data.u32_val = lb_priv->ex->stats.refresh_interval;
+ return 0;
+}
+
+static int lb_stats_refresh_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ unsigned int interval;
+
+ interval = ctx->data.u32_val;
+ if (lb_priv->ex->stats.refresh_interval == interval)
+ return 0;
+ lb_priv->ex->stats.refresh_interval = interval;
+ if (interval)
+ schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0);
+ else
+ cancel_delayed_work(&lb_priv->ex->stats.refresh_dw);
return 0;
}
@@ -128,30 +531,125 @@ static const struct team_option lb_options[] = {
.getter = lb_bpf_func_get,
.setter = lb_bpf_func_set,
},
+ {
+ .name = "lb_tx_method",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = lb_tx_method_get,
+ .setter = lb_tx_method_set,
+ },
+ {
+ .name = "lb_tx_hash_to_port_mapping",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_U32,
+ .init = lb_tx_hash_to_port_mapping_init,
+ .getter = lb_tx_hash_to_port_mapping_get,
+ .setter = lb_tx_hash_to_port_mapping_set,
+ },
+ {
+ .name = "lb_hash_stats",
+ .array_size = LB_TX_HASHTABLE_SIZE,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_hash_stats_init,
+ .getter = lb_hash_stats_get,
+ },
+ {
+ .name = "lb_port_stats",
+ .per_port = true,
+ .type = TEAM_OPTION_TYPE_BINARY,
+ .init = lb_port_stats_init,
+ .getter = lb_port_stats_get,
+ },
+ {
+ .name = "lb_stats_refresh_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = lb_stats_refresh_interval_get,
+ .setter = lb_stats_refresh_interval_set,
+ },
};
static int lb_init(struct team *team)
{
- return team_options_register(team, lb_options,
- ARRAY_SIZE(lb_options));
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ lb_select_tx_port_func_t *func;
+ int err;
+
+ /* set default tx port selector */
+ func = lb_select_tx_port_get_func("hash");
+ BUG_ON(!func);
+ rcu_assign_pointer(lb_priv->select_tx_port_func, func);
+
+ lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL);
+ if (!lb_priv->ex)
+ return -ENOMEM;
+ lb_priv->ex->team = team;
+
+ lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats);
+ if (!lb_priv->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_alloc_pcpu_stats;
+ }
+
+ INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh);
+
+ err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options));
+ if (err)
+ goto err_options_register;
+ return 0;
+
+err_options_register:
+ free_percpu(lb_priv->pcpu_stats);
+err_alloc_pcpu_stats:
+ kfree(lb_priv->ex);
+ return err;
}
static void lb_exit(struct team *team)
{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
+ free_percpu(lb_priv->pcpu_stats);
+ kfree(lb_priv->ex);
+}
+
+static int lb_port_enter(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats);
+ if (!lb_port_priv->pcpu_stats)
+ return -ENOMEM;
+ return 0;
+}
+
+static void lb_port_leave(struct team *team, struct team_port *port)
+{
+ struct lb_port_priv *lb_port_priv = get_lb_port_priv(port);
+
+ free_percpu(lb_port_priv->pcpu_stats);
+}
+
+static void lb_port_disabled(struct team *team, struct team_port *port)
+{
+ lb_tx_hash_to_port_mapping_null_port(team, port);
}
static const struct team_mode_ops lb_mode_ops = {
.init = lb_init,
.exit = lb_exit,
+ .port_enter = lb_port_enter,
+ .port_leave = lb_port_leave,
+ .port_disabled = lb_port_disabled,
.transmit = lb_transmit,
};
-static struct team_mode lb_mode = {
+static const struct team_mode lb_mode = {
.kind = "loadbalance",
.owner = THIS_MODULE,
.priv_size = sizeof(struct lb_priv),
+ .port_priv_size = sizeof(struct lb_port_priv),
.ops = &lb_mode_ops,
};
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 6abfbdc96be5..ad7ed0ec544c 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -1,5 +1,5 @@
/*
- * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -30,16 +30,16 @@ static struct team_port *__get_first_port_up(struct team *team,
{
struct team_port *cur;
- if (port->linkup)
+ if (team_port_txable(port))
return port;
cur = port;
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
list_for_each_entry_rcu(cur, &team->port_list, list) {
if (cur == port)
break;
- if (cur->linkup)
+ if (team_port_txable(port))
return cur;
}
return NULL;
@@ -55,8 +55,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
port = __get_first_port_up(team, port);
if (unlikely(!port))
goto drop;
- skb->dev = port->dev;
- if (dev_queue_xmit(skb))
+ if (team_dev_queue_xmit(team, port, skb))
return false;
return true;
@@ -81,7 +80,7 @@ static const struct team_mode_ops rr_mode_ops = {
.port_change_mac = rr_port_change_mac,
};
-static struct team_mode rr_mode = {
+static const struct team_mode rr_mode = {
.kind = "roundrobin",
.owner = THIS_MODULE,
.priv_size = sizeof(struct rr_priv),
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 987aeefbc774..c62163e272cd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -22,7 +22,7 @@
* Add TUNSETLINK ioctl to set the link encapsulation
*
* Mark Smith <markzzzsmith@yahoo.com.au>
- * Use random_ether_addr() for tap MAC address.
+ * Use eth_random_addr() for tap MAC address.
*
* Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
* Fixes in packet dropping, queue length setting and queue wakeup.
@@ -100,6 +100,8 @@ do { \
} while (0)
#endif
+#define GOODCOPY_LEN 128
+
#define FLT_EXACT_COUNT 8
struct tap_filter {
unsigned int count; /* Number of addrs. Zero means disabled */
@@ -358,6 +360,8 @@ static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
+ BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
+
sk_release_kernel(tun->socket.sk);
}
@@ -414,6 +418,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Orphan the skb - required as we might hang on to it
* for indefinite time. */
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ goto drop;
skb_orphan(skb);
/* Enqueue packet */
@@ -600,19 +606,100 @@ static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
return skb;
}
+/* set skb frags from iovec, this can move to core network code for reuse */
+static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = skb_headlen(skb);
+ int size, offset1 = 0;
+ int i = 0;
+
+ /* Skip over from offset */
+ while (count && (offset >= from->iov_len)) {
+ offset -= from->iov_len;
+ ++from;
+ --count;
+ }
+
+ /* copy up to skb headlen */
+ while (count && (copy > 0)) {
+ size = min_t(unsigned int, copy, from->iov_len - offset);
+ if (copy_from_user(skb->data + offset1, from->iov_base + offset,
+ size))
+ return -EFAULT;
+ if (copy > size) {
+ ++from;
+ --count;
+ offset = 0;
+ } else
+ offset += size;
+ copy -= size;
+ offset1 += size;
+ }
+
+ if (len == offset1)
+ return 0;
+
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+ unsigned long truesize;
+
+ len = from->iov_len - offset;
+ if (!len) {
+ offset = 0;
+ ++from;
+ continue;
+ }
+ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (i + size > MAX_SKB_FRAGS)
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+ for (i = 0; i < num_pages; i++)
+ put_page(page[i]);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += truesize;
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ __skb_fill_page_desc(skb, i, page[i], off, size);
+ skb_shinfo(skb)->nr_frags++;
+ /* increase sk_wmem_alloc */
+ base += size;
+ len -= size;
+ i++;
+ }
+ offset = 0;
+ ++from;
+ }
+ return 0;
+}
+
/* Get packet from user space buffer */
-static ssize_t tun_get_user(struct tun_struct *tun,
- const struct iovec *iv, size_t count,
- int noblock)
+static ssize_t tun_get_user(struct tun_struct *tun, void *msg_control,
+ const struct iovec *iv, size_t total_len,
+ size_t count, int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
- size_t len = count, align = NET_SKB_PAD;
+ size_t len = total_len, align = NET_SKB_PAD;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
+ int copylen;
+ bool zerocopy = false;
+ int err;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > count)
+ if ((len -= sizeof(pi)) > total_len)
return -EINVAL;
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
@@ -621,7 +708,7 @@ static ssize_t tun_get_user(struct tun_struct *tun,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > count)
+ if ((len -= tun->vnet_hdr_sz) > total_len)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -643,14 +730,46 @@ static ssize_t tun_get_user(struct tun_struct *tun,
return -EINVAL;
}
- skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
+ if (msg_control)
+ zerocopy = true;
+
+ if (zerocopy) {
+ /* Userspace may produce vectors with count greater than
+ * MAX_SKB_FRAGS, so we need to linearize parts of the skb
+ * to let the rest of data to be fit in the frags.
+ */
+ if (count > MAX_SKB_FRAGS) {
+ copylen = iov_length(iv, count - MAX_SKB_FRAGS);
+ if (copylen < offset)
+ copylen = 0;
+ else
+ copylen -= offset;
+ } else
+ copylen = 0;
+ /* There are 256 bytes to be copied in skb, so there is enough
+ * room for skb expand head in case it is used.
+ * The rest of the buffer is mapped from userspace.
+ */
+ if (copylen < gso.hdr_len)
+ copylen = gso.hdr_len;
+ if (!copylen)
+ copylen = GOODCOPY_LEN;
+ } else
+ copylen = len;
+
+ skb = tun_alloc_skb(tun, align, copylen, gso.hdr_len, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
return PTR_ERR(skb);
}
- if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
+ if (zerocopy)
+ err = zerocopy_sg_from_iovec(skb, iv, offset, count);
+ else
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
+
+ if (err) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
@@ -724,12 +843,18 @@ static ssize_t tun_get_user(struct tun_struct *tun,
skb_shinfo(skb)->gso_segs = 0;
}
+ /* copy skb_ubuf_info for callback when skb has no error */
+ if (zerocopy) {
+ skb_shinfo(skb)->destructor_arg = msg_control;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
+ }
+
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
- return count;
+ return total_len;
}
static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
@@ -744,7 +869,7 @@ static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
- result = tun_get_user(tun, iv, iov_length(iv, count),
+ result = tun_get_user(tun, NULL, iv, iov_length(iv, count), count,
file->f_flags & O_NONBLOCK);
tun_put(tun);
@@ -958,8 +1083,8 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
- return tun_get_user(tun, m->msg_iov, total_len,
- m->msg_flags & MSG_DONTWAIT);
+ return tun_get_user(tun, m->msg_control, m->msg_iov, total_len,
+ m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
}
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
@@ -1115,6 +1240,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
+ set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
err = -ENOMEM;
sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
@@ -1128,6 +1254,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
+ sock_set_flag(sk, SOCK_ZEROCOPY);
tun_sk(sk)->tun = tun;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 833e32f8d63b..c1ae76968f47 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -134,6 +134,7 @@ config USB_NET_AX8817X
tristate "ASIX AX88xxx Based USB 2.0 Ethernet Adapters"
depends on USB_USBNET
select CRC32
+ select PHYLIB
default y
help
This option adds support for ASIX AX88xxx based USB 2.0
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a2e2d72c52a0..bf063008c1af 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
+asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
new file mode 100644
index 000000000000..e889631161b8
--- /dev/null
+++ b/drivers/net/usb/asix.h
@@ -0,0 +1,218 @@
+/*
+ * ASIX AX8817X based USB 2.0 Ethernet Devices
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _ASIX_H
+#define _ASIX_H
+
+// #define DEBUG // error path messages, extra info
+// #define VERBOSE // more; success messages
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#define DRIVER_VERSION "22-Dec-2011"
+#define DRIVER_NAME "asix"
+
+/* ASIX AX8817X based USB 2.0 Ethernet Devices */
+
+#define AX_CMD_SET_SW_MII 0x06
+#define AX_CMD_READ_MII_REG 0x07
+#define AX_CMD_WRITE_MII_REG 0x08
+#define AX_CMD_SET_HW_MII 0x0a
+#define AX_CMD_READ_EEPROM 0x0b
+#define AX_CMD_WRITE_EEPROM 0x0c
+#define AX_CMD_WRITE_ENABLE 0x0d
+#define AX_CMD_WRITE_DISABLE 0x0e
+#define AX_CMD_READ_RX_CTL 0x0f
+#define AX_CMD_WRITE_RX_CTL 0x10
+#define AX_CMD_READ_IPG012 0x11
+#define AX_CMD_WRITE_IPG0 0x12
+#define AX_CMD_WRITE_IPG1 0x13
+#define AX_CMD_READ_NODE_ID 0x13
+#define AX_CMD_WRITE_NODE_ID 0x14
+#define AX_CMD_WRITE_IPG2 0x14
+#define AX_CMD_WRITE_MULTI_FILTER 0x16
+#define AX88172_CMD_READ_NODE_ID 0x17
+#define AX_CMD_READ_PHY_ID 0x19
+#define AX_CMD_READ_MEDIUM_STATUS 0x1a
+#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
+#define AX_CMD_READ_MONITOR_MODE 0x1c
+#define AX_CMD_WRITE_MONITOR_MODE 0x1d
+#define AX_CMD_READ_GPIOS 0x1e
+#define AX_CMD_WRITE_GPIOS 0x1f
+#define AX_CMD_SW_RESET 0x20
+#define AX_CMD_SW_PHY_STATUS 0x21
+#define AX_CMD_SW_PHY_SELECT 0x22
+
+#define AX_PHY_SELECT_MASK (BIT(3) | BIT(2))
+#define AX_PHY_SELECT_INTERNAL 0
+#define AX_PHY_SELECT_EXTERNAL BIT(2)
+
+#define AX_MONITOR_MODE 0x01
+#define AX_MONITOR_LINK 0x02
+#define AX_MONITOR_MAGIC 0x04
+#define AX_MONITOR_HSFS 0x10
+
+/* AX88172 Medium Status Register values */
+#define AX88172_MEDIUM_FD 0x02
+#define AX88172_MEDIUM_TX 0x04
+#define AX88172_MEDIUM_FC 0x10
+#define AX88172_MEDIUM_DEFAULT \
+ ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+
+#define AX_SWRESET_CLEAR 0x00
+#define AX_SWRESET_RR 0x01
+#define AX_SWRESET_RT 0x02
+#define AX_SWRESET_PRTE 0x04
+#define AX_SWRESET_PRL 0x08
+#define AX_SWRESET_BZ 0x10
+#define AX_SWRESET_IPRL 0x20
+#define AX_SWRESET_IPPD 0x40
+
+#define AX88772_IPG0_DEFAULT 0x15
+#define AX88772_IPG1_DEFAULT 0x0c
+#define AX88772_IPG2_DEFAULT 0x12
+
+/* AX88772 & AX88178 Medium Mode Register */
+#define AX_MEDIUM_PF 0x0080
+#define AX_MEDIUM_JFE 0x0040
+#define AX_MEDIUM_TFC 0x0020
+#define AX_MEDIUM_RFC 0x0010
+#define AX_MEDIUM_ENCK 0x0008
+#define AX_MEDIUM_AC 0x0004
+#define AX_MEDIUM_FD 0x0002
+#define AX_MEDIUM_GM 0x0001
+#define AX_MEDIUM_SM 0x1000
+#define AX_MEDIUM_SBP 0x0800
+#define AX_MEDIUM_PS 0x0200
+#define AX_MEDIUM_RE 0x0100
+
+#define AX88178_MEDIUM_DEFAULT \
+ (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
+ AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
+ AX_MEDIUM_RE)
+
+#define AX88772_MEDIUM_DEFAULT \
+ (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
+ AX_MEDIUM_TFC | AX_MEDIUM_PS | \
+ AX_MEDIUM_AC | AX_MEDIUM_RE)
+
+/* AX88772 & AX88178 RX_CTL values */
+#define AX_RX_CTL_SO 0x0080
+#define AX_RX_CTL_AP 0x0020
+#define AX_RX_CTL_AM 0x0010
+#define AX_RX_CTL_AB 0x0008
+#define AX_RX_CTL_SEP 0x0004
+#define AX_RX_CTL_AMALL 0x0002
+#define AX_RX_CTL_PRO 0x0001
+#define AX_RX_CTL_MFB_2048 0x0000
+#define AX_RX_CTL_MFB_4096 0x0100
+#define AX_RX_CTL_MFB_8192 0x0200
+#define AX_RX_CTL_MFB_16384 0x0300
+
+#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
+
+/* GPIO 0 .. 2 toggles */
+#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
+#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
+#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
+#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
+#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
+#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
+#define AX_GPIO_RESERVED 0x40 /* Reserved */
+#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
+
+#define AX_EEPROM_MAGIC 0xdeadbeef
+#define AX_EEPROM_LEN 0x200
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct asix_data {
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 res;
+};
+
+int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data);
+
+int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data);
+
+void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value,
+ u16 index, u16 size, void *data);
+
+int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb);
+
+struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags);
+
+int asix_set_sw_mii(struct usbnet *dev);
+int asix_set_hw_mii(struct usbnet *dev);
+
+int asix_read_phy_addr(struct usbnet *dev, int internal);
+int asix_get_phy_addr(struct usbnet *dev);
+
+int asix_sw_reset(struct usbnet *dev, u8 flags);
+
+u16 asix_read_rx_ctl(struct usbnet *dev);
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode);
+
+u16 asix_read_medium_status(struct usbnet *dev);
+int asix_write_medium_mode(struct usbnet *dev, u16 mode);
+
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep);
+
+void asix_set_multicast(struct net_device *net);
+
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
+
+void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
+int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo);
+
+int asix_get_eeprom_len(struct net_device *net);
+int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data);
+int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data);
+
+void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info);
+
+int asix_set_mac_address(struct net_device *net, void *p);
+
+#endif /* _ASIX_H */
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
new file mode 100644
index 000000000000..774d9ce2dafc
--- /dev/null
+++ b/drivers/net/usb/asix_common.c
@@ -0,0 +1,631 @@
+/*
+ * ASIX AX8817X based USB 2.0 Ethernet Devices
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "asix.h"
+
+int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ void *buf;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err = usb_control_msg(
+ dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ cmd,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ buf,
+ size,
+ USB_CTRL_GET_TIMEOUT);
+ if (err == size)
+ memcpy(data, buf, size);
+ else if (err >= 0)
+ err = -EINVAL;
+ kfree(buf);
+
+out:
+ return err;
+}
+
+int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ void *buf = NULL;
+ int err = -ENOMEM;
+
+ netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ if (data) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ goto out;
+ }
+
+ err = usb_control_msg(
+ dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ cmd,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value,
+ index,
+ buf,
+ size,
+ USB_CTRL_SET_TIMEOUT);
+ kfree(buf);
+
+out:
+ return err;
+}
+
+static void asix_async_cmd_callback(struct urb *urb)
+{
+ struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
+ int status = urb->status;
+
+ if (status < 0)
+ printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
+ status);
+
+ kfree(req);
+ usb_free_urb(urb);
+}
+
+void asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ struct usb_ctrlrequest *req;
+ int status;
+ struct urb *urb;
+
+ netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
+ return;
+ }
+
+ req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
+ if (!req) {
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
+ usb_free_urb(urb);
+ return;
+ }
+
+ req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
+ req->bRequest = cmd;
+ req->wValue = cpu_to_le16(value);
+ req->wIndex = cpu_to_le16(index);
+ req->wLength = cpu_to_le16(size);
+
+ usb_fill_control_urb(urb, dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ (void *)req, data, size,
+ asix_async_cmd_callback, req);
+
+ status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (status < 0) {
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
+ kfree(req);
+ usb_free_urb(urb);
+ }
+}
+
+int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *ax_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
+ return 0;
+ }
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!ax_skb)
+ return 0;
+
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, ax_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+ skb->len);
+ return 0;
+ }
+ return 1;
+}
+
+struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int padlen;
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 packet_len;
+ u32 padbytes = 0xffff0000;
+
+ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
+
+ /* We need to push 4 bytes in front of frame (packet_len)
+ * and maybe add 4 bytes after the end (if padlen is 4)
+ *
+ * Avoid skb_copy_expand() expensive call, using following rules :
+ * - We are allowed to push 4 bytes in headroom if skb_header_cloned()
+ * is false (and if we have 4 bytes of headroom)
+ * - We are allowed to put 4 bytes at tail if skb_cloned()
+ * is false (and if we have 4 bytes of tailroom)
+ *
+ * TCP packets for example are cloned, but skb_header_release()
+ * was called in tcp stack, allowing us to use headroom for our needs.
+ */
+ if (!skb_header_cloned(skb) &&
+ !(padlen && skb_cloned(skb)) &&
+ headroom + tailroom >= 4 + padlen) {
+ /* following should not happen, but better be safe */
+ if (headroom < 4 ||
+ tailroom < padlen) {
+ skb->data = memmove(skb->head + 4, skb->data, skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len;
+ skb_push(skb, 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+ return skb;
+}
+
+int asix_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+ ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+int asix_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+ ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+int asix_read_phy_addr(struct usbnet *dev, int internal)
+{
+ int offset = (internal ? 1 : 0);
+ u8 buf[2];
+ int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
+
+ netdev_dbg(dev->net, "asix_get_phy_addr()\n");
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
+ *((__le16 *)buf));
+ ret = buf[offset];
+
+out:
+ return ret;
+}
+
+int asix_get_phy_addr(struct usbnet *dev)
+{
+ /* return the address of the internal phy */
+ return asix_read_phy_addr(dev, 1);
+}
+
+
+int asix_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
+
+ return ret;
+}
+
+u16 asix_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
+ goto out;
+ }
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+u16 asix_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
+ ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+
+}
+
+int asix_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
+ value, ret);
+
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/*
+ * AX88772 & AX88178 have a 16-bit RX_CTL value
+ */
+void asix_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ u16 rx_ctl = AX_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= AX_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > AX_MAX_MCAST) {
+ rx_ctl |= AX_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
+ AX_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= AX_RX_CTL_AM;
+ }
+
+ asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ asix_set_sw_mii(dev);
+ asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
+ (__u16)loc, 2, &res);
+ asix_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ asix_set_sw_mii(dev);
+ asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ asix_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+void asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= AX_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= AX_MONITOR_MAGIC;
+
+ if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+int asix_get_eeprom_len(struct net_device *net)
+{
+ return AX_EEPROM_LEN;
+}
+
+int asix_get_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int i;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = AX_EEPROM_MAGIC;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* ax8817x returns 2 bytes from eeprom on read */
+ for (i = first_word; i <= last_word; i++) {
+ if (asix_read_cmd(dev, AX_CMD_READ_EEPROM, i, 0, 2,
+ &(eeprom_buff[i - first_word])) < 0) {
+ kfree(eeprom_buff);
+ return -EIO;
+ }
+ }
+
+ memcpy(data, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
+ kfree(eeprom_buff);
+ return 0;
+}
+
+int asix_set_eeprom(struct net_device *net, struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u16 *eeprom_buff;
+ int first_word, last_word;
+ int i;
+ int ret;
+
+ netdev_dbg(net, "write EEPROM len %d, offset %d, magic 0x%x\n",
+ eeprom->len, eeprom->offset, eeprom->magic);
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ if (eeprom->magic != AX_EEPROM_MAGIC)
+ return -EINVAL;
+
+ first_word = eeprom->offset >> 1;
+ last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+ eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
+ GFP_KERNEL);
+ if (!eeprom_buff)
+ return -ENOMEM;
+
+ /* align data to 16 bit boundaries, read the missing data from
+ the EEPROM */
+ if (eeprom->offset & 1) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, first_word, 0, 2,
+ &(eeprom_buff[0]));
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", first_word);
+ goto free;
+ }
+ }
+
+ if ((eeprom->offset + eeprom->len) & 1) {
+ ret = asix_read_cmd(dev, AX_CMD_READ_EEPROM, last_word, 0, 2,
+ &(eeprom_buff[last_word - first_word]));
+ if (ret < 0) {
+ netdev_err(net, "Failed to read EEPROM at offset 0x%02x.\n", last_word);
+ goto free;
+ }
+ }
+
+ memcpy((u8 *)eeprom_buff + (eeprom->offset & 1), data, eeprom->len);
+
+ /* write data to EEPROM */
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_ENABLE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to enable EEPROM write\n");
+ goto free;
+ }
+ msleep(20);
+
+ for (i = first_word; i <= last_word; i++) {
+ netdev_dbg(net, "write to EEPROM at offset 0x%02x, data 0x%04x\n",
+ i, eeprom_buff[i - first_word]);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_EEPROM, i,
+ eeprom_buff[i - first_word], 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to write EEPROM at offset 0x%02x.\n",
+ i);
+ goto free;
+ }
+ msleep(20);
+ }
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_DISABLE, 0x0000, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_err(net, "Failed to disable EEPROM write\n");
+ goto free;
+ }
+
+ ret = 0;
+free:
+ kfree(eeprom_buff);
+ return ret;
+}
+
+void asix_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
+ strncpy (info->version, DRIVER_VERSION, sizeof info->version);
+ info->eedump_len = AX_EEPROM_LEN;
+}
+
+int asix_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix_devices.c
index 3ae80eccd0ef..4fd48df6b989 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix_devices.c
@@ -20,137 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-// #define DEBUG // error path messages, extra info
-// #define VERBOSE // more; success messages
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/workqueue.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/crc32.h>
-#include <linux/usb/usbnet.h>
-#include <linux/slab.h>
-#include <linux/if_vlan.h>
-
-#define DRIVER_VERSION "22-Dec-2011"
-#define DRIVER_NAME "asix"
-
-/* ASIX AX8817X based USB 2.0 Ethernet Devices */
-
-#define AX_CMD_SET_SW_MII 0x06
-#define AX_CMD_READ_MII_REG 0x07
-#define AX_CMD_WRITE_MII_REG 0x08
-#define AX_CMD_SET_HW_MII 0x0a
-#define AX_CMD_READ_EEPROM 0x0b
-#define AX_CMD_WRITE_EEPROM 0x0c
-#define AX_CMD_WRITE_ENABLE 0x0d
-#define AX_CMD_WRITE_DISABLE 0x0e
-#define AX_CMD_READ_RX_CTL 0x0f
-#define AX_CMD_WRITE_RX_CTL 0x10
-#define AX_CMD_READ_IPG012 0x11
-#define AX_CMD_WRITE_IPG0 0x12
-#define AX_CMD_WRITE_IPG1 0x13
-#define AX_CMD_READ_NODE_ID 0x13
-#define AX_CMD_WRITE_NODE_ID 0x14
-#define AX_CMD_WRITE_IPG2 0x14
-#define AX_CMD_WRITE_MULTI_FILTER 0x16
-#define AX88172_CMD_READ_NODE_ID 0x17
-#define AX_CMD_READ_PHY_ID 0x19
-#define AX_CMD_READ_MEDIUM_STATUS 0x1a
-#define AX_CMD_WRITE_MEDIUM_MODE 0x1b
-#define AX_CMD_READ_MONITOR_MODE 0x1c
-#define AX_CMD_WRITE_MONITOR_MODE 0x1d
-#define AX_CMD_READ_GPIOS 0x1e
-#define AX_CMD_WRITE_GPIOS 0x1f
-#define AX_CMD_SW_RESET 0x20
-#define AX_CMD_SW_PHY_STATUS 0x21
-#define AX_CMD_SW_PHY_SELECT 0x22
-
-#define AX_MONITOR_MODE 0x01
-#define AX_MONITOR_LINK 0x02
-#define AX_MONITOR_MAGIC 0x04
-#define AX_MONITOR_HSFS 0x10
-
-/* AX88172 Medium Status Register values */
-#define AX88172_MEDIUM_FD 0x02
-#define AX88172_MEDIUM_TX 0x04
-#define AX88172_MEDIUM_FC 0x10
-#define AX88172_MEDIUM_DEFAULT \
- ( AX88172_MEDIUM_FD | AX88172_MEDIUM_TX | AX88172_MEDIUM_FC )
-
-#define AX_MCAST_FILTER_SIZE 8
-#define AX_MAX_MCAST 64
-
-#define AX_SWRESET_CLEAR 0x00
-#define AX_SWRESET_RR 0x01
-#define AX_SWRESET_RT 0x02
-#define AX_SWRESET_PRTE 0x04
-#define AX_SWRESET_PRL 0x08
-#define AX_SWRESET_BZ 0x10
-#define AX_SWRESET_IPRL 0x20
-#define AX_SWRESET_IPPD 0x40
-
-#define AX88772_IPG0_DEFAULT 0x15
-#define AX88772_IPG1_DEFAULT 0x0c
-#define AX88772_IPG2_DEFAULT 0x12
-
-/* AX88772 & AX88178 Medium Mode Register */
-#define AX_MEDIUM_PF 0x0080
-#define AX_MEDIUM_JFE 0x0040
-#define AX_MEDIUM_TFC 0x0020
-#define AX_MEDIUM_RFC 0x0010
-#define AX_MEDIUM_ENCK 0x0008
-#define AX_MEDIUM_AC 0x0004
-#define AX_MEDIUM_FD 0x0002
-#define AX_MEDIUM_GM 0x0001
-#define AX_MEDIUM_SM 0x1000
-#define AX_MEDIUM_SBP 0x0800
-#define AX_MEDIUM_PS 0x0200
-#define AX_MEDIUM_RE 0x0100
-
-#define AX88178_MEDIUM_DEFAULT \
- (AX_MEDIUM_PS | AX_MEDIUM_FD | AX_MEDIUM_AC | \
- AX_MEDIUM_RFC | AX_MEDIUM_TFC | AX_MEDIUM_JFE | \
- AX_MEDIUM_RE)
-
-#define AX88772_MEDIUM_DEFAULT \
- (AX_MEDIUM_FD | AX_MEDIUM_RFC | \
- AX_MEDIUM_TFC | AX_MEDIUM_PS | \
- AX_MEDIUM_AC | AX_MEDIUM_RE)
-
-/* AX88772 & AX88178 RX_CTL values */
-#define AX_RX_CTL_SO 0x0080
-#define AX_RX_CTL_AP 0x0020
-#define AX_RX_CTL_AM 0x0010
-#define AX_RX_CTL_AB 0x0008
-#define AX_RX_CTL_SEP 0x0004
-#define AX_RX_CTL_AMALL 0x0002
-#define AX_RX_CTL_PRO 0x0001
-#define AX_RX_CTL_MFB_2048 0x0000
-#define AX_RX_CTL_MFB_4096 0x0100
-#define AX_RX_CTL_MFB_8192 0x0200
-#define AX_RX_CTL_MFB_16384 0x0300
-
-#define AX_DEFAULT_RX_CTL (AX_RX_CTL_SO | AX_RX_CTL_AB)
-
-/* GPIO 0 .. 2 toggles */
-#define AX_GPIO_GPO0EN 0x01 /* GPIO0 Output enable */
-#define AX_GPIO_GPO_0 0x02 /* GPIO0 Output value */
-#define AX_GPIO_GPO1EN 0x04 /* GPIO1 Output enable */
-#define AX_GPIO_GPO_1 0x08 /* GPIO1 Output value */
-#define AX_GPIO_GPO2EN 0x10 /* GPIO2 Output enable */
-#define AX_GPIO_GPO_2 0x20 /* GPIO2 Output value */
-#define AX_GPIO_RESERVED 0x40 /* Reserved */
-#define AX_GPIO_RSE 0x80 /* Reload serial EEPROM */
-
-#define AX_EEPROM_MAGIC 0xdeadbeef
-#define AX88172_EEPROM_LEN 0x40
-#define AX88772_EEPROM_LEN 0xff
+#include "asix.h"
#define PHY_MODE_MARVELL 0x0000
#define MII_MARVELL_LED_CTRL 0x0018
@@ -166,15 +36,6 @@
#define PHY_MODE_RTL8211CL 0x000C
-/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
-struct asix_data {
- u8 multi_filter[AX_MCAST_FILTER_SIZE];
- u8 mac_addr[ETH_ALEN];
- u8 phymode;
- u8 ledmode;
- u8 eeprom_len;
-};
-
struct ax88172_int_data {
__le16 res1;
u8 link;
@@ -183,209 +44,6 @@ struct ax88172_int_data {
__le16 res3;
} __packed;
-static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- void *buf;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- buf = kmalloc(size, GFP_KERNEL);
- if (!buf)
- goto out;
-
- err = usb_control_msg(
- dev->udev,
- usb_rcvctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_GET_TIMEOUT);
- if (err == size)
- memcpy(data, buf, size);
- else if (err >= 0)
- err = -EINVAL;
- kfree(buf);
-
-out:
- return err;
-}
-
-static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- void *buf = NULL;
- int err = -ENOMEM;
-
- netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- if (data) {
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- goto out;
- }
-
- err = usb_control_msg(
- dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- cmd,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value,
- index,
- buf,
- size,
- USB_CTRL_SET_TIMEOUT);
- kfree(buf);
-
-out:
- return err;
-}
-
-static void asix_async_cmd_callback(struct urb *urb)
-{
- struct usb_ctrlrequest *req = (struct usb_ctrlrequest *)urb->context;
- int status = urb->status;
-
- if (status < 0)
- printk(KERN_DEBUG "asix_async_cmd_callback() failed with %d",
- status);
-
- kfree(req);
- usb_free_urb(urb);
-}
-
-static void
-asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
-{
- struct usb_ctrlrequest *req;
- int status;
- struct urb *urb;
-
- netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
- cmd, value, index, size);
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb) {
- netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
- return;
- }
-
- req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!req) {
- netdev_err(dev->net, "Failed to allocate memory for control request\n");
- usb_free_urb(urb);
- return;
- }
-
- req->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE;
- req->bRequest = cmd;
- req->wValue = cpu_to_le16(value);
- req->wIndex = cpu_to_le16(index);
- req->wLength = cpu_to_le16(size);
-
- usb_fill_control_urb(urb, dev->udev,
- usb_sndctrlpipe(dev->udev, 0),
- (void *)req, data, size,
- asix_async_cmd_callback, req);
-
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status < 0) {
- netdev_err(dev->net, "Error submitting the control message: status=%d\n",
- status);
- kfree(req);
- usb_free_urb(urb);
- }
-}
-
-static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
-{
- int offset = 0;
-
- while (offset + sizeof(u32) < skb->len) {
- struct sk_buff *ax_skb;
- u16 size;
- u32 header = get_unaligned_le32(skb->data + offset);
-
- offset += sizeof(u32);
-
- /* get the packet length */
- size = (u16) (header & 0x7ff);
- if (size != ((~header >> 16) & 0x07ff)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
- return 0;
- }
-
- if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
- (size + offset > skb->len)) {
- netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
- size);
- return 0;
- }
- ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
- if (!ax_skb)
- return 0;
-
- skb_put(ax_skb, size);
- memcpy(ax_skb->data, skb->data + offset, size);
- usbnet_skb_return(dev, ax_skb);
-
- offset += (size + 1) & 0xfffe;
- }
-
- if (skb->len != offset) {
- netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
- skb->len);
- return 0;
- }
- return 1;
-}
-
-static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
- gfp_t flags)
-{
- int padlen;
- int headroom = skb_headroom(skb);
- int tailroom = skb_tailroom(skb);
- u32 packet_len;
- u32 padbytes = 0xffff0000;
-
- padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
-
- if ((!skb_cloned(skb)) &&
- ((headroom + tailroom) >= (4 + padlen))) {
- if ((headroom < 4) || (tailroom < padlen)) {
- skb->data = memmove(skb->head + 4, skb->data, skb->len);
- skb_set_tail_pointer(skb, skb->len);
- }
- } else {
- struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, 4, padlen, flags);
- dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
- }
-
- skb_push(skb, 4);
- packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
- cpu_to_le32s(&packet_len);
- skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
-
- if (padlen) {
- cpu_to_le32s(&padbytes);
- memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
- skb_put(skb, sizeof(padbytes));
- }
- return skb;
-}
-
static void asix_status(struct usbnet *dev, struct urb *urb)
{
struct ax88172_int_data *event;
@@ -406,200 +64,6 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
}
}
-static inline int asix_set_sw_mii(struct usbnet *dev)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable software MII access\n");
- return ret;
-}
-
-static inline int asix_set_hw_mii(struct usbnet *dev)
-{
- int ret;
- ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to enable hardware MII access\n");
- return ret;
-}
-
-static inline int asix_get_phy_addr(struct usbnet *dev)
-{
- u8 buf[2];
- int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
-
- netdev_dbg(dev->net, "asix_get_phy_addr()\n");
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
- goto out;
- }
- netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
- *((__le16 *)buf));
- ret = buf[1];
-
-out:
- return ret;
-}
-
-static int asix_sw_reset(struct usbnet *dev, u8 flags)
-{
- int ret;
-
- ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
-
- return ret;
-}
-
-static u16 asix_read_rx_ctl(struct usbnet *dev)
-{
- __le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
- goto out;
- }
- ret = le16_to_cpu(v);
-out:
- return ret;
-}
-
-static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
- mode, ret);
-
- return ret;
-}
-
-static u16 asix_read_medium_status(struct usbnet *dev)
-{
- __le16 v;
- int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
-
- if (ret < 0) {
- netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
- ret);
- return ret; /* TODO: callers not checking for error ret */
- }
-
- return le16_to_cpu(v);
-
-}
-
-static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
- mode, ret);
-
- return ret;
-}
-
-static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
-{
- int ret;
-
- netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
- ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
- if (ret < 0)
- netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
- value, ret);
-
- if (sleep)
- msleep(sleep);
-
- return ret;
-}
-
-/*
- * AX88772 & AX88178 have a 16-bit RX_CTL value
- */
-static void asix_set_multicast(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
- u16 rx_ctl = AX_DEFAULT_RX_CTL;
-
- if (net->flags & IFF_PROMISC) {
- rx_ctl |= AX_RX_CTL_PRO;
- } else if (net->flags & IFF_ALLMULTI ||
- netdev_mc_count(net) > AX_MAX_MCAST) {
- rx_ctl |= AX_RX_CTL_AMALL;
- } else if (netdev_mc_empty(net)) {
- /* just broadcast and directed */
- } else {
- /* We use the 20 byte dev->data
- * for our 8 byte filter buffer
- * to avoid allocating memory that
- * is tricky to free later */
- struct netdev_hw_addr *ha;
- u32 crc_bits;
-
- memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
-
- /* Build the multicast hash filter. */
- netdev_for_each_mc_addr(ha, net) {
- crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
- data->multi_filter[crc_bits >> 3] |=
- 1 << (crc_bits & 7);
- }
-
- asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
- AX_MCAST_FILTER_SIZE, data->multi_filter);
-
- rx_ctl |= AX_RX_CTL_AM;
- }
-
- asix_write_cmd_async(dev, AX_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
-}
-
-static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
-{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res;
-
- mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
- mutex_unlock(&dev->phy_mutex);
-
- netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
- phy_id, loc, le16_to_cpu(res));
-
- return le16_to_cpu(res);
-}
-
-static void
-asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- __le16 res = cpu_to_le16(val);
-
- netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
- phy_id, loc, val);
- mutex_lock(&dev->phy_mutex);
- asix_set_sw_mii(dev);
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
- asix_set_hw_mii(dev);
- mutex_unlock(&dev->phy_mutex);
-}
-
/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
static u32 asix_get_phyid(struct usbnet *dev)
{
@@ -629,88 +93,6 @@ static u32 asix_get_phyid(struct usbnet *dev)
return phy_id;
}
-static void
-asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
-{
- struct usbnet *dev = netdev_priv(net);
- u8 opt;
-
- if (asix_read_cmd(dev, AX_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
- wolinfo->supported = 0;
- wolinfo->wolopts = 0;
- return;
- }
- wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
- wolinfo->wolopts = 0;
- if (opt & AX_MONITOR_LINK)
- wolinfo->wolopts |= WAKE_PHY;
- if (opt & AX_MONITOR_MAGIC)
- wolinfo->wolopts |= WAKE_MAGIC;
-}
-
-static int
-asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
-{
- struct usbnet *dev = netdev_priv(net);
- u8 opt = 0;
-
- if (wolinfo->wolopts & WAKE_PHY)
- opt |= AX_MONITOR_LINK;
- if (wolinfo->wolopts & WAKE_MAGIC)
- opt |= AX_MONITOR_MAGIC;
-
- if (asix_write_cmd(dev, AX_CMD_WRITE_MONITOR_MODE,
- opt, 0, 0, NULL) < 0)
- return -EINVAL;
-
- return 0;
-}
-
-static int asix_get_eeprom_len(struct net_device *net)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- return data->eeprom_len;
-}
-
-static int asix_get_eeprom(struct net_device *net,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- struct usbnet *dev = netdev_priv(net);
- __le16 *ebuf = (__le16 *)data;
- int i;
-
- /* Crude hack to ensure that we don't overwrite memory
- * if an odd length is supplied
- */
- if (eeprom->len % 2)
- return -EINVAL;
-
- eeprom->magic = AX_EEPROM_MAGIC;
-
- /* ax8817x returns 2 bytes from eeprom on read */
- for (i=0; i < eeprom->len / 2; i++) {
- if (asix_read_cmd(dev, AX_CMD_READ_EEPROM,
- eeprom->offset + i, 0, 2, &ebuf[i]) < 0)
- return -EINVAL;
- }
- return 0;
-}
-
-static void asix_get_drvinfo (struct net_device *net,
- struct ethtool_drvinfo *info)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- /* Inherit standard device info */
- usbnet_get_drvinfo(net, info);
- strncpy (info->driver, DRIVER_NAME, sizeof info->driver);
- strncpy (info->version, DRIVER_VERSION, sizeof info->version);
- info->eedump_len = data->eeprom_len;
-}
-
static u32 asix_get_link(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@@ -725,30 +107,6 @@ static int asix_ioctl (struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-static int asix_set_mac_address(struct net_device *net, void *p)
-{
- struct usbnet *dev = netdev_priv(net);
- struct asix_data *data = (struct asix_data *)&dev->data;
- struct sockaddr *addr = p;
-
- if (netif_running(net))
- return -EBUSY;
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
-
- /* We use the 20 byte dev->data
- * for our 6 byte mac buffer
- * to avoid allocating memory that
- * is tricky to free later */
- memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
- asix_write_cmd_async(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
- data->mac_addr);
-
- return 0;
-}
-
/* We need to override some ethtool_ops so we require our
own structure so we don't interfere with other usbnet
devices that may be connected at the same time. */
@@ -761,6 +119,7 @@ static const struct ethtool_ops ax88172_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -843,9 +202,6 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
u8 buf[ETH_ALEN];
int i;
unsigned long gpio_bits = dev->driver_info->data;
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- data->eeprom_len = AX88172_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
@@ -880,6 +236,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
@@ -901,6 +259,7 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -1049,12 +408,9 @@ static const struct net_device_ops ax88772_netdev_ops = {
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret, embd_phy;
- struct asix_data *data = (struct asix_data *)&dev->data;
u8 buf[ETH_ALEN];
u32 phyid;
- data->eeprom_len = AX88772_EEPROM_LEN;
-
usbnet_get_endpoints(dev,intf);
/* Get the MAC address */
@@ -1075,6 +431,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
+ dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
+ dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */
embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
@@ -1122,6 +480,7 @@ static const struct ethtool_ops ax88178_ethtool_ops = {
.set_wol = asix_set_wol,
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
.get_settings = usbnet_get_settings,
.set_settings = usbnet_set_settings,
.nway_reset = usbnet_nway_reset,
@@ -1405,9 +764,6 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
u8 buf[ETH_ALEN];
- struct asix_data *data = (struct asix_data *)&dev->data;
-
- data->eeprom_len = AX88772_EEPROM_LEN;
usbnet_get_endpoints(dev,intf);
@@ -1510,6 +866,8 @@ static const struct driver_info ax88178_info = {
.tx_fixup = asix_tx_fixup,
};
+extern const struct driver_info ax88172a_info;
+
static const struct usb_device_id products [] = {
{
// Linksys USB200M
@@ -1635,6 +993,10 @@ static const struct usb_device_id products [] = {
// Asus USB Ethernet Adapter
USB_DEVICE (0x0b95, 0x7e2b),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ /* ASIX 88172a demo board */
+ USB_DEVICE(0x0b95, 0x172a),
+ .driver_info = (unsigned long) &ax88172a_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
new file mode 100644
index 000000000000..c8e0aa85fb8e
--- /dev/null
+++ b/drivers/net/usb/ax88172a.c
@@ -0,0 +1,414 @@
+/*
+ * ASIX AX88172A based USB 2.0 Ethernet Devices
+ * Copyright (C) 2012 OMICRON electronics GmbH
+ *
+ * Supports external PHYs via phylib. Based on the driver for the
+ * AX88772. Original copyrights follow:
+ *
+ * Copyright (C) 2003-2006 David Hollis <dhollis@davehollis.com>
+ * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
+ * Copyright (C) 2006 James Painter <jamie.painter@iname.com>
+ * Copyright (c) 2002-2003 TiVo Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "asix.h"
+#include <linux/phy.h>
+
+struct ax88172a_private {
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ char phy_name[20];
+ u16 phy_addr;
+ u16 oldmode;
+ int use_embdphy;
+};
+
+/* MDIO read and write wrappers for phylib */
+static int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ return asix_mdio_read(((struct usbnet *)bus->priv)->net, phy_id,
+ regnum);
+}
+
+static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val)
+{
+ asix_mdio_write(((struct usbnet *)bus->priv)->net, phy_id, regnum, val);
+ return 0;
+}
+
+static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(net))
+ return -EINVAL;
+
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_mii_ioctl(net->phydev, rq, cmd);
+}
+
+/* set MAC link settings according to information from phylib */
+static void ax88172a_adjust_link(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+ struct usbnet *dev = netdev_priv(netdev);
+ struct ax88172a_private *priv = dev->driver_priv;
+ u16 mode = 0;
+
+ if (phydev->link) {
+ mode = AX88772_MEDIUM_DEFAULT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mode &= ~AX_MEDIUM_FD;
+
+ if (phydev->speed != SPEED_100)
+ mode &= ~AX_MEDIUM_PS;
+ }
+
+ if (mode != priv->oldmode) {
+ asix_write_medium_mode(dev, mode);
+ priv->oldmode = mode;
+ netdev_dbg(netdev, "speed %u duplex %d, setting mode to 0x%04x\n",
+ phydev->speed, phydev->duplex, mode);
+ phy_print_status(phydev);
+ }
+}
+
+static void ax88172a_status(struct usbnet *dev, struct urb *urb)
+{
+ /* link changes are detected by polling the phy */
+}
+
+/* use phylib infrastructure */
+static int ax88172a_init_mdio(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+ int ret, i;
+
+ priv->mdio = mdiobus_alloc();
+ if (!priv->mdio) {
+ netdev_err(dev->net, "Could not allocate MDIO bus\n");
+ return -ENOMEM;
+ }
+
+ priv->mdio->priv = (void *)dev;
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+
+ priv->mdio->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!priv->mdio->irq) {
+ netdev_err(dev->net, "Could not allocate mdio->irq\n");
+ ret = -ENOMEM;
+ goto mfree;
+ }
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mdio->irq[i] = PHY_POLL;
+
+ ret = mdiobus_register(priv->mdio);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus\n");
+ goto ifree;
+ }
+
+ netdev_info(dev->net, "registered mdio bus %s\n", priv->mdio->id);
+ return 0;
+
+ifree:
+ kfree(priv->mdio->irq);
+mfree:
+ mdiobus_free(priv->mdio);
+ return ret;
+}
+
+static void ax88172a_remove_mdio(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
+ mdiobus_unregister(priv->mdio);
+ kfree(priv->mdio->irq);
+ mdiobus_free(priv->mdio);
+}
+
+static const struct net_device_ops ax88172a_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = asix_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = ax88172a_ioctl,
+ .ndo_set_rx_mode = asix_set_multicast,
+};
+
+int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(net->phydev, cmd);
+}
+
+int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(net->phydev, cmd);
+}
+
+int ax88172a_nway_reset(struct net_device *net)
+{
+ if (!net->phydev)
+ return -ENODEV;
+
+ return phy_start_aneg(net->phydev);
+}
+
+static const struct ethtool_ops ax88172a_ethtool_ops = {
+ .get_drvinfo = asix_get_drvinfo,
+ .get_link = usbnet_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = asix_get_wol,
+ .set_wol = asix_set_wol,
+ .get_eeprom_len = asix_get_eeprom_len,
+ .get_eeprom = asix_get_eeprom,
+ .set_eeprom = asix_set_eeprom,
+ .get_settings = ax88172a_get_settings,
+ .set_settings = ax88172a_set_settings,
+ .nway_reset = ax88172a_nway_reset,
+};
+
+static int ax88172a_reset_phy(struct usbnet *dev, int embd_phy)
+{
+ int ret;
+
+ ret = asix_sw_reset(dev, AX_SWRESET_IPPD);
+ if (ret < 0)
+ goto err;
+
+ msleep(150);
+ ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
+ if (ret < 0)
+ goto err;
+
+ msleep(150);
+
+ ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_IPPD);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ return ret;
+}
+
+
+static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int ret;
+ u8 buf[ETH_ALEN];
+ struct ax88172a_private *priv;
+
+ usbnet_get_endpoints(dev, intf);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ netdev_err(dev->net, "Could not allocate memory for private data\n");
+ return -ENOMEM;
+ }
+ dev->driver_priv = priv;
+
+ /* Get the MAC address */
+ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
+ goto free;
+ }
+ memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+
+ dev->net->netdev_ops = &ax88172a_netdev_ops;
+ dev->net->ethtool_ops = &ax88172a_ethtool_ops;
+
+ /* are we using the internal or the external phy? */
+ ret = asix_read_cmd(dev, AX_CMD_SW_PHY_STATUS, 0, 0, 1, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to read software interface selection register: %d\n",
+ ret);
+ goto free;
+ }
+
+ netdev_dbg(dev->net, "AX_CMD_SW_PHY_STATUS = 0x%02x\n", buf[0]);
+ switch (buf[0] & AX_PHY_SELECT_MASK) {
+ case AX_PHY_SELECT_INTERNAL:
+ netdev_dbg(dev->net, "use internal phy\n");
+ priv->use_embdphy = 1;
+ break;
+ case AX_PHY_SELECT_EXTERNAL:
+ netdev_dbg(dev->net, "use external phy\n");
+ priv->use_embdphy = 0;
+ break;
+ default:
+ netdev_err(dev->net, "Interface mode not supported by driver\n");
+ ret = -ENOTSUPP;
+ goto free;
+ }
+
+ priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
+ ax88172a_reset_phy(dev, priv->use_embdphy);
+
+ /* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
+ if (dev->driver_info->flags & FLAG_FRAMING_AX) {
+ /* hard_mtu is still the default - the device does not support
+ jumbo eth frames */
+ dev->rx_urb_size = 2048;
+ }
+
+ /* init MDIO bus */
+ ret = ax88172a_init_mdio(dev);
+ if (ret)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(priv);
+ return ret;
+}
+
+static int ax88172a_stop(struct usbnet *dev)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ netdev_dbg(dev->net, "Stopping interface\n");
+
+ if (priv->phydev) {
+ netdev_info(dev->net, "Disconnecting from phy %s\n",
+ priv->phy_name);
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ }
+
+ return 0;
+}
+
+static void ax88172a_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct ax88172a_private *priv = dev->driver_priv;
+
+ ax88172a_remove_mdio(dev);
+ kfree(priv);
+}
+
+static int ax88172a_reset(struct usbnet *dev)
+{
+ struct asix_data *data = (struct asix_data *)&dev->data;
+ struct ax88172a_private *priv = dev->driver_priv;
+ int ret;
+ u16 rx_ctl;
+
+ ax88172a_reset_phy(dev, priv->use_embdphy);
+
+ msleep(150);
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = asix_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ msleep(150);
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_IPG0,
+ AX88772_IPG0_DEFAULT | AX88772_IPG1_DEFAULT,
+ AX88772_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = asix_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = asix_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ /* Connect to PHY */
+ snprintf(priv->phy_name, 20, PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ priv->phydev = phy_connect(dev->net, priv->phy_name,
+ &ax88172a_adjust_link,
+ 0, PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(priv->phydev)) {
+ netdev_err(dev->net, "Could not connect to PHY device %s\n",
+ priv->phy_name);
+ ret = PTR_ERR(priv->phydev);
+ goto out;
+ }
+
+ netdev_info(dev->net, "Connected to phy %s\n", priv->phy_name);
+
+ /* During power-up, the AX88172A set the power down (BMCR_PDOWN)
+ * bit of the PHY. Bring the PHY up again.
+ */
+ genphy_resume(priv->phydev);
+ phy_start(priv->phydev);
+
+ return 0;
+
+out:
+ return ret;
+
+}
+
+const struct driver_info ax88172a_info = {
+ .description = "ASIX AX88172A USB 2.0 Ethernet",
+ .bind = ax88172a_bind,
+ .reset = ax88172a_reset,
+ .stop = ax88172a_stop,
+ .unbind = ax88172a_unbind,
+ .status = ax88172a_status,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
+ .rx_fixup = asix_rx_fixup,
+ .tx_fixup = asix_tx_fixup,
+};
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index d848d4dd5754..187c144c5e5b 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -394,7 +394,7 @@ int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *id)
SET_NETDEV_DEV(dev, &intf->dev);
pnd->dev = dev;
- pnd->usb = usb_get_dev(usbdev);
+ pnd->usb = usbdev;
pnd->intf = intf;
pnd->data_intf = data_intf;
spin_lock_init(&pnd->tx_lock);
@@ -440,7 +440,6 @@ out:
static void usbpn_disconnect(struct usb_interface *intf)
{
struct usbpn_dev *pnd = usb_get_intfdata(intf);
- struct usb_device *usb = pnd->usb;
if (pnd->disconnected)
return;
@@ -449,7 +448,6 @@ static void usbpn_disconnect(struct usb_interface *intf)
usb_driver_release_interface(&usbpn_driver,
(pnd->intf == intf) ? pnd->data_intf : pnd->intf);
unregister_netdev(pnd->dev);
- usb_put_dev(usb);
}
static struct usb_driver usbpn_driver = {
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 7023220456c5..a0b5807b30d4 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -1329,8 +1329,6 @@ static int pegasus_probe(struct usb_interface *intf,
}
pegasus_count++;
- usb_get_dev(dev);
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1407,7 +1405,6 @@ out2:
out1:
free_netdev(net);
out:
- usb_put_dev(dev);
pegasus_dec_workqueue();
return res;
}
@@ -1425,7 +1422,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->flags |= PEGASUS_UNPLUG;
cancel_delayed_work(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
- usb_put_dev(interface_to_usbdev(intf));
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
free_skb_pool(pegasus);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 3767a1225860..2ea126a16d79 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1,6 +1,10 @@
/*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
+ * The probing code is heavily inspired by cdc_ether, which is:
+ * Copyright (C) 2003-2005 by David Brownell
+ * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -15,11 +19,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
-/* The name of the CDC Device Management driver */
-#define DM_DRIVER "cdc_wdm"
-
-/*
- * This driver supports wwan (3G/LTE/?) devices using a vendor
+/* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
@@ -31,59 +31,117 @@
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
+ * Alternatively, control and data functions can be combined in a
+ * single USB interface.
+ *
* Handling a protocol like QMI is out of the scope for any driver.
- * It can be exported as a character device using the cdc-wdm driver,
- * which will enable userspace applications ("modem managers") to
- * handle it. This may be required to use the network interface
- * provided by the driver.
+ * It is exported as a character device using the cdc-wdm driver as
+ * a subdriver, enabling userspace applications ("modem managers") to
+ * handle it.
*
* These devices may alternatively/additionally be configured using AT
- * commands on any of the serial interfaces driven by the option driver
- *
- * This driver binds only to the data ("slave") interface to enable
- * the cdc-wdm driver to bind to the control interface. It still
- * parses the CDC functional descriptors on the control interface to
- * a) verify that this is indeed a handled interface (CDC Union
- * header lists it as slave)
- * b) get MAC address and other ethernet config from the CDC Ethernet
- * header
- * c) enable user bind requests against the control interface, which
- * is the common way to bind to CDC Ethernet Control Model type
- * interfaces
- * d) provide a hint to the user about which interface is the
- * corresponding management interface
+ * commands on a serial interface
*/
+/* driver specific data */
+struct qmi_wwan_state {
+ struct usb_driver *subdriver;
+ atomic_t pmcount;
+ unsigned long unused;
+ struct usb_interface *control;
+ struct usb_interface *data;
+};
+
+/* using a counter to merge subdriver requests with our own into a combined state */
+static int qmi_wwan_manage_power(struct usbnet *dev, int on)
+{
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ int rv = 0;
+
+ dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on);
+
+ if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) {
+ /* need autopm_get/put here to ensure the usbcore sees the new value */
+ rv = usb_autopm_get_interface(dev->intf);
+ if (rv < 0)
+ goto err;
+ dev->intf->needs_remote_wakeup = on;
+ usb_autopm_put_interface(dev->intf);
+ }
+err:
+ return rv;
+}
+
+static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
+{
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ /* can be called while disconnecting */
+ if (!dev)
+ return 0;
+ return qmi_wwan_manage_power(dev, on);
+}
+
+/* collect all three endpoints and register subdriver */
+static int qmi_wwan_register_subdriver(struct usbnet *dev)
+{
+ int rv;
+ struct usb_driver *subdriver = NULL;
+ struct qmi_wwan_state *info = (void *)&dev->data;
+
+ /* collect bulk endpoints */
+ rv = usbnet_get_endpoints(dev, info->data);
+ if (rv < 0)
+ goto err;
+
+ /* update status endpoint if separate control interface */
+ if (info->control != info->data)
+ dev->status = &info->control->cur_altsetting->endpoint[0];
+
+ /* require interrupt endpoint for subdriver */
+ if (!dev->status) {
+ rv = -EINVAL;
+ goto err;
+ }
+
+ /* for subdriver power management */
+ atomic_set(&info->pmcount, 0);
+
+ /* register subdriver */
+ subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ dev_err(&info->control->dev, "subdriver registration failed\n");
+ rv = PTR_ERR(subdriver);
+ goto err;
+ }
+
+ /* prevent usbnet from using status endpoint */
+ dev->status = NULL;
+
+ /* save subdriver struct for suspend/resume wrappers */
+ info->subdriver = subdriver;
+
+err:
+ return rv;
+}
+
static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
{
int status = -1;
- struct usb_interface *control = NULL;
u8 *buf = intf->cur_altsetting->extra;
int len = intf->cur_altsetting->extralen;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
struct usb_cdc_union_desc *cdc_union = NULL;
struct usb_cdc_ether_desc *cdc_ether = NULL;
- u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE;
u32 found = 0;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct usb_driver *driver = driver_of(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
- atomic_set(pmcount, 0);
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state)));
- /*
- * assume a data interface has no additional descriptors and
- * that the control and data interface are numbered
- * consecutively - this holds for the Huawei device at least
- */
- if (len == 0 && desc->bInterfaceNumber > 0) {
- control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1);
- if (!control)
- goto err;
-
- buf = control->cur_altsetting->extra;
- len = control->cur_altsetting->extralen;
- dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n",
- dev_name(&control->dev));
- }
+ /* require a single interrupt status endpoint for subdriver */
+ if (intf->cur_altsetting->desc.bNumEndpoints != 1)
+ goto err;
while (len > 3) {
struct usb_descriptor_header *h = (void *)buf;
@@ -142,15 +200,23 @@ next_desc:
}
/* did we find all the required ones? */
- if ((found & required) != required) {
+ if (!(found & (1 << USB_CDC_HEADER_TYPE)) ||
+ !(found & (1 << USB_CDC_UNION_TYPE))) {
dev_err(&intf->dev, "CDC functional descriptors missing\n");
goto err;
}
- /* give the user a helpful hint if trying to bind to the wrong interface */
- if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) {
- dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n",
- dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev));
+ /* verify CDC Union */
+ if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) {
+ dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0);
+ goto err;
+ }
+
+ /* need to save these for unbind */
+ info->control = intf;
+ info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0);
+ if (!info->data) {
+ dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0);
goto err;
}
@@ -160,59 +226,29 @@ next_desc:
usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress);
}
- /* success! point the user to the management interface */
- if (control)
- dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n",
- dev_name(&control->dev));
-
- /* XXX: add a sysfs symlink somewhere to help management applications find it? */
+ /* claim data interface and set it up */
+ status = usb_driver_claim_interface(driver, info->data, dev);
+ if (status < 0)
+ goto err;
- /* collect bulk endpoints now that we know intf == "data" interface */
- status = usbnet_get_endpoints(dev, intf);
+ status = qmi_wwan_register_subdriver(dev);
+ if (status < 0) {
+ usb_set_intfdata(info->data, NULL);
+ usb_driver_release_interface(driver, info->data);
+ }
err:
return status;
}
-/* using a counter to merge subdriver requests with our own into a combined state */
-static int qmi_wwan_manage_power(struct usbnet *dev, int on)
-{
- atomic_t *pmcount = (void *)&dev->data[1];
- int rv = 0;
-
- dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on);
-
- if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) {
- /* need autopm_get/put here to ensure the usbcore sees the new value */
- rv = usb_autopm_get_interface(dev->intf);
- if (rv < 0)
- goto err;
- dev->intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(dev->intf);
- }
-err:
- return rv;
-}
-
-static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on)
-{
- struct usbnet *dev = usb_get_intfdata(intf);
- return qmi_wwan_manage_power(dev, on);
-}
-
/* Some devices combine the "control" and "data" functions into a
* single interface with all three endpoints: interrupt + bulk in and
* out
- *
- * Setting up cdc-wdm as a subdriver owning the interrupt endpoint
- * will let it provide userspace access to the encapsulated QMI
- * protocol without interfering with the usbnet operations.
- */
+ */
static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
{
int rv;
- struct usb_driver *subdriver = NULL;
- atomic_t *pmcount = (void *)&dev->data[1];
+ struct qmi_wwan_state *info = (void *)&dev->data;
/* ZTE makes devices where the interface descriptors and endpoint
* configurations of two or more interfaces are identical, even
@@ -228,43 +264,39 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf)
goto err;
}
- atomic_set(pmcount, 0);
-
- /* collect all three endpoints */
- rv = usbnet_get_endpoints(dev, intf);
- if (rv < 0)
- goto err;
-
- /* require interrupt endpoint for subdriver */
- if (!dev->status) {
- rv = -EINVAL;
- goto err;
- }
-
- subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power);
- if (IS_ERR(subdriver)) {
- rv = PTR_ERR(subdriver);
- goto err;
- }
-
- /* can't let usbnet use the interrupt endpoint */
- dev->status = NULL;
-
- /* save subdriver struct for suspend/resume wrappers */
- dev->data[0] = (unsigned long)subdriver;
+ /* control and data is shared */
+ info->control = intf;
+ info->data = intf;
+ rv = qmi_wwan_register_subdriver(dev);
err:
return rv;
}
-static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf)
+static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- struct usb_driver *subdriver = (void *)dev->data[0];
-
- if (subdriver && subdriver->disconnect)
- subdriver->disconnect(intf);
+ struct qmi_wwan_state *info = (void *)&dev->data;
+ struct usb_driver *driver = driver_of(intf);
+ struct usb_interface *other;
+
+ if (info->subdriver && info->subdriver->disconnect)
+ info->subdriver->disconnect(info->control);
+
+ /* allow user to unbind using either control or data */
+ if (intf == info->control)
+ other = info->data;
+ else
+ other = info->control;
+
+ /* only if not shared */
+ if (other && intf != other) {
+ usb_set_intfdata(other, NULL);
+ usb_driver_release_interface(driver, other);
+ }
- dev->data[0] = (unsigned long)NULL;
+ info->subdriver = NULL;
+ info->data = NULL;
+ info->control = NULL;
}
/* suspend/resume wrappers calling both usbnet and the cdc-wdm
@@ -276,15 +308,15 @@ static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *int
static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret;
ret = usbnet_suspend(intf, message);
if (ret < 0)
goto err;
- if (subdriver && subdriver->suspend)
- ret = subdriver->suspend(intf, message);
+ if (info->subdriver && info->subdriver->suspend)
+ ret = info->subdriver->suspend(intf, message);
if (ret < 0)
usbnet_resume(intf);
err:
@@ -294,33 +326,33 @@ err:
static int qmi_wwan_resume(struct usb_interface *intf)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct usb_driver *subdriver = (void *)dev->data[0];
+ struct qmi_wwan_state *info = (void *)&dev->data;
int ret = 0;
- if (subdriver && subdriver->resume)
- ret = subdriver->resume(intf);
+ if (info->subdriver && info->subdriver->resume)
+ ret = info->subdriver->resume(intf);
if (ret < 0)
goto err;
ret = usbnet_resume(intf);
- if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend)
- subdriver->suspend(intf, PMSG_SUSPEND);
+ if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend)
+ info->subdriver->suspend(intf, PMSG_SUSPEND);
err:
return ret;
}
-
static const struct driver_info qmi_wwan_info = {
- .description = "QMI speaking wwan device",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
static const struct driver_info qmi_wwan_shared = {
- .description = "QMI speaking wwan device with combined interface",
+ .description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
};
@@ -328,7 +360,7 @@ static const struct driver_info qmi_wwan_force_int0 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(0), /* interface whitelist bitmap */
};
@@ -337,16 +369,25 @@ static const struct driver_info qmi_wwan_force_int1 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(1), /* interface whitelist bitmap */
};
+static const struct driver_info qmi_wwan_force_int2 = {
+ .description = "Qualcomm WWAN/QMI device",
+ .flags = FLAG_WWAN,
+ .bind = qmi_wwan_bind_shared,
+ .unbind = qmi_wwan_unbind,
+ .manage_power = qmi_wwan_manage_power,
+ .data = BIT(2), /* interface whitelist bitmap */
+};
+
static const struct driver_info qmi_wwan_force_int3 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(3), /* interface whitelist bitmap */
};
@@ -355,7 +396,7 @@ static const struct driver_info qmi_wwan_force_int4 = {
.description = "Qualcomm WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(4), /* interface whitelist bitmap */
};
@@ -377,7 +418,7 @@ static const struct driver_info qmi_wwan_sierra = {
.description = "Sierra Wireless wwan/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind_shared,
- .unbind = qmi_wwan_unbind_shared,
+ .unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
};
@@ -400,7 +441,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
@@ -408,7 +449,7 @@ static const struct usb_device_id products[] = {
.idVendor = HUAWEI_VENDOR_ID,
.bInterfaceClass = USB_CLASS_VENDOR_SPEC,
.bInterfaceSubClass = 1,
- .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */
+ .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Huawei E392, E398 and possibly others in "Windows mode"
@@ -440,6 +481,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF821D */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x0326,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int4,
+ },
{ /* ZTE (Vodafone) K3520-Z */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x19d2,
@@ -494,6 +544,15 @@ static const struct usb_device_id products[] = {
.bInterfaceProtocol = 0xff,
.driver_info = (unsigned long)&qmi_wwan_force_int4,
},
+ { /* ZTE MF60 */
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x19d2,
+ .idProduct = 0x1402,
+ .bInterfaceClass = 0xff,
+ .bInterfaceSubClass = 0xff,
+ .bInterfaceProtocol = 0xff,
+ .driver_info = (unsigned long)&qmi_wwan_force_int2,
+ },
{ /* Sierra Wireless MC77xx in QMI mode */
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x1199,
@@ -550,10 +609,27 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
+static int qmi_wwan_probe(struct usb_interface *intf, const struct usb_device_id *prod)
+{
+ struct usb_device_id *id = (struct usb_device_id *)prod;
+
+ /* Workaround to enable dynamic IDs. This disables usbnet
+ * blacklisting functionality. Which, if required, can be
+ * reimplemented here by using a magic "blacklist" value
+ * instead of 0 in the static device id table
+ */
+ if (!id->driver_info) {
+ dev_dbg(&intf->dev, "setting defaults for dynamic device id\n");
+ id->driver_info = (unsigned long)&qmi_wwan_shared;
+ }
+
+ return usbnet_probe(intf, id);
+}
+
static struct usb_driver qmi_wwan_driver = {
.name = "qmi_wwan",
.id_table = products,
- .probe = usbnet_probe,
+ .probe = qmi_wwan_probe,
.disconnect = usbnet_disconnect,
.suspend = qmi_wwan_suspend,
.resume = qmi_wwan_resume,
@@ -562,17 +638,7 @@ static struct usb_driver qmi_wwan_driver = {
.disable_hub_initiated_lpm = 1,
};
-static int __init qmi_wwan_init(void)
-{
- return usb_register(&qmi_wwan_driver);
-}
-module_init(qmi_wwan_init);
-
-static void __exit qmi_wwan_exit(void)
-{
- usb_deregister(&qmi_wwan_driver);
-}
-module_exit(qmi_wwan_exit);
+module_usb_driver(qmi_wwan_driver);
MODULE_AUTHOR("Bjørn Mork <bjorn@mork.no>");
MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 1c6e51588da7..f5ab6e613ec8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -616,7 +616,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
/* no eeprom, or eeprom values are invalid. generate random MAC */
eth_hw_addr_random(dev->net);
- netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr");
}
static int smsc75xx_set_mac_address(struct usbnet *dev)
@@ -1260,6 +1260,6 @@ static struct usb_driver smsc75xx_driver = {
module_usb_driver(smsc75xx_driver);
MODULE_AUTHOR("Nancy Lin");
-MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
MODULE_DESCRIPTION("SMSC75XX USB 2.0 Gigabit Ethernet Devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index b1112e753859..d45e539a84b7 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -578,6 +578,36 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
}
+static int smsc95xx_ethtool_getregslen(struct net_device *netdev)
+{
+ /* all smsc95xx registers */
+ return COE_CR - ID_REV + 1;
+}
+
+static void
+smsc95xx_ethtool_getregs(struct net_device *netdev, struct ethtool_regs *regs,
+ void *buf)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ unsigned int i, j;
+ int retval;
+ u32 *data = buf;
+
+ retval = smsc95xx_read_reg(dev, ID_REV, &regs->version);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read ID_REV\n");
+ return;
+ }
+
+ for (i = ID_REV, j = 0; i <= COE_CR; i += (sizeof(u32)), j++) {
+ retval = smsc95xx_read_reg(dev, i, &data[j]);
+ if (retval < 0) {
+ netdev_warn(netdev, "REGS: cannot read reg[%x]\n", i);
+ return;
+ }
+ }
+}
+
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -589,6 +619,8 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
+ .get_regs_len = smsc95xx_ethtool_getregslen,
+ .get_regs = smsc95xx_ethtool_getregs,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -615,7 +647,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
/* no eeprom, or eeprom values are invalid. generate random MAC */
eth_hw_addr_random(dev->net);
- netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
static int smsc95xx_set_mac_address(struct usbnet *dev)
@@ -1303,6 +1335,6 @@ static struct usb_driver smsc95xx_driver = {
module_usb_driver(smsc95xx_driver);
MODULE_AUTHOR("Nancy Lin");
-MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
MODULE_DESCRIPTION("SMSC95XX USB 2.0 Ethernet Devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index aba769d77459..8531c1caac28 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -180,7 +180,40 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
-static void intr_complete (struct urb *urb);
+static void intr_complete (struct urb *urb)
+{
+ struct usbnet *dev = urb->context;
+ int status = urb->status;
+
+ switch (status) {
+ /* success */
+ case 0:
+ dev->driver_info->status(dev, urb);
+ break;
+
+ /* software-driven interface shutdown */
+ case -ENOENT: /* urb killed */
+ case -ESHUTDOWN: /* hardware gone */
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
+ return;
+
+ /* NOTE: not throttling like RX/TX, since this endpoint
+ * already polls infrequently
+ */
+ default:
+ netdev_dbg(dev->net, "intr status %d\n", status);
+ break;
+ }
+
+ if (!netif_running (dev->net))
+ return;
+
+ status = usb_submit_urb (urb, GFP_ATOMIC);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
+}
static int init_status (struct usbnet *dev, struct usb_interface *intf)
{
@@ -519,42 +552,6 @@ block:
netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
-static void intr_complete (struct urb *urb)
-{
- struct usbnet *dev = urb->context;
- int status = urb->status;
-
- switch (status) {
- /* success */
- case 0:
- dev->driver_info->status(dev, urb);
- break;
-
- /* software-driven interface shutdown */
- case -ENOENT: /* urb killed */
- case -ESHUTDOWN: /* hardware gone */
- netif_dbg(dev, ifdown, dev->net,
- "intr shutdown, code %d\n", status);
- return;
-
- /* NOTE: not throttling like RX/TX, since this endpoint
- * already polls infrequently
- */
- default:
- netdev_dbg(dev->net, "intr status %d\n", status);
- break;
- }
-
- if (!netif_running (dev->net))
- return;
-
- memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
- status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status != 0)
- netif_err(dev, timer, dev->net,
- "intr resubmit --> %d\n", status);
-}
-
/*-------------------------------------------------------------------------*/
void usbnet_pause_rx(struct usbnet *dev)
{
@@ -1312,7 +1309,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_free_urb(dev->interrupt);
free_netdev(net);
- usb_put_dev (xdev);
}
EXPORT_SYMBOL_GPL(usbnet_disconnect);
@@ -1368,8 +1364,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
xdev = interface_to_usbdev (udev);
interface = udev->cur_altsetting;
- usb_get_dev (xdev);
-
status = -ENOMEM;
// set up our own records
@@ -1498,7 +1492,6 @@ out3:
out1:
free_netdev(net);
out:
- usb_put_dev(xdev);
return status;
}
EXPORT_SYMBOL_GPL(usbnet_probe);
@@ -1600,7 +1593,7 @@ static int __init usbnet_init(void)
BUILD_BUG_ON(
FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
- random_ether_addr(node_id);
+ eth_random_addr(node_id);
return 0;
}
module_init(usbnet_init);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f18149ae2588..83d2b0c34c5e 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -704,16 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
u64 tpackets, tbytes, rpackets, rbytes;
do {
- start = u64_stats_fetch_begin(&stats->tx_syncp);
+ start = u64_stats_fetch_begin_bh(&stats->tx_syncp);
tpackets = stats->tx_packets;
tbytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
+ } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start));
do {
- start = u64_stats_fetch_begin(&stats->rx_syncp);
+ start = u64_stats_fetch_begin_bh(&stats->rx_syncp);
rpackets = stats->rx_packets;
rbytes = stats->rx_bytes;
- } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
+ } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start));
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
@@ -1062,7 +1062,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return -ENOMEM;
/* Set up network device as normal. */
- dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3f04ba0a5454..93e0cfb739b8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1037,7 +1037,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
#endif
dev_dbg(&adapter->netdev->dev,
"txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
- (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
+ (u32)(ctx.sop_txd -
tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d7a65e141d1a..44db8b75a531 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -231,7 +231,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
}
p = icp;
- count = x25_asy_esc(p, (unsigned char *) sl->xbuff, len);
+ count = x25_asy_esc(p, sl->xbuff, len);
/* Order of next two lines is *very* important.
* When we are sending a little amount of data,
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 672de18a776c..71453db14258 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -7,9 +7,6 @@ config WIMAX_I2400M
comment "Enable USB support to see WiMAX USB drivers"
depends on USB = n
-comment "Enable MMC support to see WiMAX SDIO drivers"
- depends on MMC = n
-
config WIMAX_I2400M_USB
tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
depends on WIMAX && USB
@@ -21,25 +18,6 @@ config WIMAX_I2400M_USB
If unsure, it is safe to select M (module).
-config WIMAX_I2400M_SDIO
- tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
- depends on WIMAX && MMC
- select WIMAX_I2400M
- help
- Select if you have a device based on the Intel WiMAX
- Connection 2400 over SDIO.
-
- If unsure, it is safe to select M (module).
-
-config WIMAX_IWMC3200_SDIO
- bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
- depends on WIMAX_I2400M_SDIO
- depends on EXPERIMENTAL
- select IWMC3200TOP
- help
- Select if you have a device based on the Intel Multicom WiMAX
- Connection 3200 over SDIO.
-
config WIMAX_I2400M_DEBUG_LEVEL
int "WiMAX i2400m debug level"
depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/Makefile b/drivers/net/wimax/i2400m/Makefile
index 5d9e018d31af..f6d19c348082 100644
--- a/drivers/net/wimax/i2400m/Makefile
+++ b/drivers/net/wimax/i2400m/Makefile
@@ -1,7 +1,6 @@
obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
-obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
i2400m-y := \
control.o \
@@ -21,10 +20,3 @@ i2400m-usb-y := \
usb-tx.o \
usb-rx.o \
usb.o
-
-
-i2400m-sdio-y := \
- sdio.o \
- sdio-tx.o \
- sdio-fw.o \
- sdio-rx.o
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 2fea02b35b2d..4a01e5c7fe09 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -130,7 +130,7 @@ ssize_t i2400m_tlv_match(const struct i2400m_tlv_hdr *tlv,
&& le16_to_cpu(tlv->length) + sizeof(*tlv) != tlv_size) {
size_t size = le16_to_cpu(tlv->length) + sizeof(*tlv);
printk(KERN_WARNING "W: tlv type 0x%x mismatched because of "
- "size (got %zu vs %zu expected)\n",
+ "size (got %zu vs %zd expected)\n",
tlv_type, size, tlv_size);
return size;
}
@@ -235,7 +235,7 @@ const struct i2400m_tlv_hdr *i2400m_tlv_find(
break;
if (match > 0)
dev_warn(dev, "TLV type 0x%04x found with size "
- "mismatch (%zu vs %zu needed)\n",
+ "mismatch (%zu vs %zd needed)\n",
tlv_type, match, tlv_size);
}
return tlv;
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 47cae7150bc1..025426132754 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -754,8 +754,7 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
/*
* Alloc the command and ack buffers for boot mode
*
- * Get the buffers needed to deal with boot mode messages. These
- * buffers need to be allocated before the sdio receive irq is setup.
+ * Get the buffers needed to deal with boot mode messages.
*/
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
@@ -897,7 +896,7 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
result = i2400m_read_mac_addr(i2400m);
if (result < 0)
goto error_read_mac_addr;
- random_ether_addr(i2400m->src_mac_addr);
+ eth_random_addr(i2400m->src_mac_addr);
i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
register_pm_notifier(&i2400m->pm_notifier);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 7cbd7d231e11..283237f6f074 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -51,8 +51,7 @@
* firmware. Normal hardware takes only signed firmware.
*
* On boot mode, in USB, we write to the device using the bulk out
- * endpoint and read from it in the notification endpoint. In SDIO we
- * talk to it via the write address and read from the read address.
+ * endpoint and read from it in the notification endpoint.
*
* Upon entrance to boot mode, the device sends (preceded with a few
* zero length packets (ZLPs) on the notification endpoint in USB) a
@@ -1268,7 +1267,7 @@ int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
size_t leftover, offset, header_len, size;
leftover = top - itr;
- offset = itr - (const void *) bcf;
+ offset = itr - bcf;
if (leftover <= sizeof(*bcf_hdr)) {
dev_err(dev, "firmware %s: %zu B left at @%zx, "
"not enough for BCF header\n",
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
deleted file mode 100644
index 1d63ffdedfde..000000000000
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO-specific i2400m driver definitions
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Brian Bian <brian.bian@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- * - Initial implementation
- *
- *
- * This driver implements the bus-specific part of the i2400m for
- * SDIO. Check i2400m.h for a generic driver description.
- *
- * ARCHITECTURE
- *
- * This driver sits under the bus-generic i2400m driver, providing the
- * connection to the device.
- *
- * When probed, all the function pointers are setup and then the
- * bus-generic code called. The generic driver will then use the
- * provided pointers for uploading firmware (i2400ms_bus_bm*() in
- * sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
- * sdio.c).
- *
- * Once firmware is uploaded, TX functions (sdio-tx.c) are called when
- * data is ready for transmission in the TX fifo; then the SDIO IRQ is
- * fired and data is available (sdio-rx.c), it is sent to the generic
- * driver for processing with i2400m_rx.
- */
-
-#ifndef __I2400M_SDIO_H__
-#define __I2400M_SDIO_H__
-
-#include "i2400m.h"
-
-/* Host-Device interface for SDIO */
-enum {
- I2400M_SDIO_BOOT_RETRIES = 3,
- I2400MS_BLK_SIZE = 256,
- I2400MS_PL_SIZE_MAX = 0x3E00,
-
- I2400MS_DATA_ADDR = 0x0,
- I2400MS_INTR_STATUS_ADDR = 0x13,
- I2400MS_INTR_CLEAR_ADDR = 0x13,
- I2400MS_INTR_ENABLE_ADDR = 0x14,
- I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
- /* The number of ticks to wait for the device to signal that
- * it is ready */
- I2400MS_INIT_SLEEP_INTERVAL = 100,
- /* How long to wait for the device to settle after reset */
- I2400MS_SETTLE_TIME = 40,
- /* The number of msec to wait for IOR after sending IOE */
- IWMC3200_IOR_TIMEOUT = 10,
-};
-
-
-/**
- * struct i2400ms - descriptor for a SDIO connected i2400m
- *
- * @i2400m: bus-generic i2400m implementation; has to be first (see
- * it's documentation in i2400m.h).
- *
- * @func: pointer to our SDIO function
- *
- * @tx_worker: workqueue struct used to TX data when the bus-generic
- * code signals packets are pending for transmission to the device.
- *
- * @tx_workqueue: workqeueue used for data TX; we don't use the
- * system's workqueue as that might cause deadlocks with code in
- * the bus-generic driver. The read/write operation to the queue
- * is protected with spinlock (tx_lock in struct i2400m) to avoid
- * the queue being destroyed in the middle of a the queue read/write
- * operation.
- *
- * @debugfs_dentry: dentry for the SDIO specific debugfs files
- *
- * Note this value is set to NULL upon destruction; this is
- * because some routinges use it to determine if we are inside the
- * probe() path or some other path. When debugfs is disabled,
- * creation sets the dentry to '(void*) -ENODEV', which is valid
- * for the test.
- */
-struct i2400ms {
- struct i2400m i2400m; /* FIRST! See doc */
- struct sdio_func *func;
-
- struct work_struct tx_worker;
- struct workqueue_struct *tx_workqueue;
- char tx_wq_name[32];
-
- struct dentry *debugfs_dentry;
-
- wait_queue_head_t bm_wfa_wq;
- int bm_wait_result;
- size_t bm_ack_size;
-
- /* Device is any of the iwmc3200 SKUs */
- unsigned iwmc3200:1;
-};
-
-
-static inline
-void i2400ms_init(struct i2400ms *i2400ms)
-{
- i2400m_init(&i2400ms->i2400m);
-}
-
-
-extern int i2400ms_rx_setup(struct i2400ms *);
-extern void i2400ms_rx_release(struct i2400ms *);
-
-extern int i2400ms_tx_setup(struct i2400ms *);
-extern void i2400ms_tx_release(struct i2400ms *);
-extern void i2400ms_bus_tx_kick(struct i2400m *);
-
-extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
- const struct i2400m_bootrom_header *,
- size_t, int);
-extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
- struct i2400m_bootrom_header *,
- size_t);
-extern void i2400ms_bus_bm_release(struct i2400m *);
-extern int i2400ms_bus_bm_setup(struct i2400m *);
-
-#endif /* #ifndef __I2400M_SDIO_H__ */
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c806d4550212..79c6505b5c20 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -46,7 +46,7 @@
* - bus generic driver (this part)
*
* The bus specific driver sets up stuff specific to the bus the
- * device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative
+ * device is connected to (USB, PCI, tam-tam...non-authoritative
* nor binding list) which is basically the device-model management
* (probe/disconnect, etc), moving data from device to kernel and
* back, doing the power saving details and reseting the device.
@@ -238,14 +238,13 @@ struct i2400m_barker_db;
* amount needed for loading firmware, where us dev_start/stop setup
* the rest needed to do full data/control traffic.
*
- * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
- * so we have a tx_blk_size variable that the bus layer sets to
- * tell the engine how much of that we need.
+ * @bus_tx_block_size: [fill] USB imposes a 16 block size, but other
+ * busses will differ. So we have a tx_blk_size variable that the
+ * bus layer sets to tell the engine how much of that we need.
*
* @bus_tx_room_min: [fill] Minimum room required while allocating
- * TX queue's buffer space for message header. SDIO requires
- * 224 bytes and USB 16 bytes. Refer bus specific driver code
- * for details.
+ * TX queue's buffer space for message header. USB requires
+ * 16 bytes. Refer to bus specific driver code for details.
*
* @bus_pl_size_max: [fill] Maximum payload size.
*
diff --git a/drivers/net/wimax/i2400m/sdio-debug-levels.h b/drivers/net/wimax/i2400m/sdio-debug-levels.h
deleted file mode 100644
index c51998741301..000000000000
--- a/drivers/net/wimax/i2400m/sdio-debug-levels.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * debug levels control file for the i2400m module's
- */
-#ifndef __debug_levels__h__
-#define __debug_levels__h__
-
-/* Maximum compile and run time debug level for all submodules */
-#define D_MODULENAME i2400m_sdio
-#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
-
-#include <linux/wimax/debug.h>
-
-/* List of all the enabled modules */
-enum d_module {
- D_SUBMODULE_DECLARE(main),
- D_SUBMODULE_DECLARE(tx),
- D_SUBMODULE_DECLARE(rx),
- D_SUBMODULE_DECLARE(fw)
-};
-
-
-#endif /* #ifndef __debug_levels__h__ */
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
deleted file mode 100644
index 8e025418f5be..000000000000
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * Firmware uploader's SDIO specifics
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Initial implementation
- *
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - Bus generic/specific split for USB
- *
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation for SDIO
- *
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * - SDIO rehash for changes in the bus-driver model
- *
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Make it IRQ based, not polling
- *
- * THE PROCEDURE
- *
- * See fw.c for the generic description of this procedure.
- *
- * This file implements only the SDIO specifics. It boils down to how
- * to send a command and waiting for an acknowledgement from the
- * device.
- *
- * All this code is sequential -- all i2400ms_bus_bm_*() functions are
- * executed in the same thread, except i2400ms_bm_irq() [on its own by
- * the SDIO driver]. This makes it possible to avoid locking.
- *
- * COMMAND EXECUTION
- *
- * The generic firmware upload code will call i2400m_bus_bm_cmd_send()
- * to send commands.
- *
- * The SDIO devices expects things in 256 byte blocks, so it will pad
- * it, compute the checksum (if needed) and pass it to SDIO.
- *
- * ACK RECEPTION
- *
- * This works in IRQ mode -- the fw loader says when to wait for data
- * and for that it calls i2400ms_bus_bm_wait_for_ack().
- *
- * This checks if there is any data available (RX size > 0); if not,
- * waits for the IRQ handler to notify about it. Once there is data,
- * it is read and passed to the caller. Doing it this way we don't
- * need much coordination/locking, and it makes it much more difficult
- * for an interrupt to be lost and the wait_for_ack() function getting
- * stuck even when data is pending.
- */
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-
-
-#define D_SUBMODULE fw
-#include "sdio-debug-levels.h"
-
-
-/*
- * Send a boot-mode command to the SDIO function
- *
- * We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
- * touch the header if the RAW flag is not set.
- *
- * @flags: pass thru from i2400m_bm_cmd()
- * @return: cmd_size if ok, < 0 errno code on error.
- *
- * Note the command is padded to the SDIO block size for the device.
- */
-ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
- const struct i2400m_bootrom_header *_cmd,
- size_t cmd_size, int flags)
-{
- ssize_t result;
- struct device *dev = i2400m_dev(i2400m);
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
- struct i2400m_bootrom_header *cmd;
- /* SDIO restriction */
- size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
-
- d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
- i2400m, _cmd, cmd_size);
- result = -E2BIG;
- if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
- goto error_too_big;
-
- if (_cmd != i2400m->bm_cmd_buf)
- memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
- cmd = i2400m->bm_cmd_buf;
- if (cmd_size_a > cmd_size) /* Zero pad space */
- memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
- if ((flags & I2400M_BM_CMD_RAW) == 0) {
- if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
- dev_warn(dev, "SW BUG: response_required == 0\n");
- i2400m_bm_cmd_prepare(cmd);
- }
- d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
- opcode, cmd_size, cmd_size_a);
- d_dump(5, dev, cmd, cmd_size);
-
- sdio_claim_host(i2400ms->func); /* Send & check */
- result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
- i2400m->bm_cmd_buf, cmd_size_a);
- sdio_release_host(i2400ms->func);
- if (result < 0) {
- dev_err(dev, "BM cmd %d: cannot send: %ld\n",
- opcode, (long) result);
- goto error_cmd_send;
- }
- result = cmd_size;
-error_cmd_send:
-error_too_big:
- d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
- i2400m, _cmd, cmd_size, (int) result);
- return result;
-}
-
-
-/*
- * Read an ack from the device's boot-mode
- *
- * @i2400m:
- * @_ack: pointer to where to store the read data
- * @ack_size: how many bytes we should read
- *
- * Returns: < 0 errno code on error; otherwise, amount of received bytes.
- *
- * The ACK for a BM command is always at least sizeof(*ack) bytes, so
- * check for that. We don't need to check for device reboots
- *
- */
-ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
- struct i2400m_bootrom_header *ack,
- size_t ack_size)
-{
- ssize_t result;
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- int size;
-
- BUG_ON(sizeof(*ack) > ack_size);
-
- d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
- i2400m, ack, ack_size);
-
- result = wait_event_timeout(i2400ms->bm_wfa_wq,
- i2400ms->bm_ack_size != -EINPROGRESS,
- 2 * HZ);
- if (result == 0) {
- result = -ETIMEDOUT;
- dev_err(dev, "BM: error waiting for an ack\n");
- goto error_timeout;
- }
-
- spin_lock(&i2400m->rx_lock);
- result = i2400ms->bm_ack_size;
- BUG_ON(result == -EINPROGRESS);
- if (result < 0) /* so we exit when rx_release() is called */
- dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
- else {
- size = min(ack_size, i2400ms->bm_ack_size);
- memcpy(ack, i2400m->bm_ack_buf, size);
- }
- /*
- * Remember always to clear the bm_ack_size to -EINPROGRESS
- * after the RX data is processed
- */
- i2400ms->bm_ack_size = -EINPROGRESS;
- spin_unlock(&i2400m->rx_lock);
-
-error_timeout:
- d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
- i2400m, ack, ack_size, result);
- return result;
-}
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
deleted file mode 100644
index fb6396dd115f..000000000000
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO RX handling
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation
- *
- *
- * This handles the RX path on SDIO.
- *
- * The SDIO bus driver calls the "irq" routine when data is available.
- * This is not a traditional interrupt routine since the SDIO bus
- * driver calls us from its irq thread context. Because of this
- * sleeping in the SDIO RX IRQ routine is okay.
- *
- * From there on, we obtain the size of the data that is available,
- * allocate an skb, copy it and then pass it to the generic driver's
- * RX routine [i2400m_rx()].
- *
- * ROADMAP
- *
- * i2400ms_irq()
- * i2400ms_rx()
- * __i2400ms_rx_get_size()
- * i2400m_is_boot_barker()
- * i2400m_rx()
- *
- * i2400ms_rx_setup()
- *
- * i2400ms_rx_release()
- */
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/skbuff.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/slab.h>
-#include "i2400m-sdio.h"
-
-#define D_SUBMODULE rx
-#include "sdio-debug-levels.h"
-
-static const __le32 i2400m_ACK_BARKER[4] = {
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER),
- __constant_cpu_to_le32(I2400M_ACK_BARKER)
-};
-
-
-/*
- * Read and return the amount of bytes available for RX
- *
- * The RX size has to be read like this: byte reads of three
- * sequential locations; then glue'em together.
- *
- * sdio_readl() doesn't work.
- */
-static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
-{
- int ret, cnt, val;
- ssize_t rx_size;
- unsigned xfer_size_addr;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &i2400ms->func->dev;
-
- d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
- xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
- rx_size = 0;
- for (cnt = 0; cnt < 3; cnt++) {
- val = sdio_readb(func, xfer_size_addr + cnt, &ret);
- if (ret < 0) {
- dev_err(dev, "RX: Can't read byte %d of RX size from "
- "0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
- rx_size = ret;
- goto error_read;
- }
- rx_size = rx_size << 8 | (val & 0xff);
- }
- d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
-error_read:
- d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
- return rx_size;
-}
-
-
-/*
- * Read data from the device (when in normal)
- *
- * Allocate an SKB of the right size, read the data in and then
- * deliver it to the generic layer.
- *
- * We also check for a reboot barker. That means the device died and
- * we have to reboot it.
- */
-static
-void i2400ms_rx(struct i2400ms *i2400ms)
-{
- int ret;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct sk_buff *skb;
- ssize_t rx_size;
-
- d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
- rx_size = __i2400ms_rx_get_size(i2400ms);
- if (rx_size < 0) {
- ret = rx_size;
- goto error_get_size;
- }
- /*
- * Hardware quirk: make sure to clear the INTR status register
- * AFTER getting the data transfer size.
- */
- sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
-
- ret = -ENOMEM;
- skb = alloc_skb(rx_size, GFP_ATOMIC);
- if (NULL == skb) {
- dev_err(dev, "RX: unable to alloc skb\n");
- goto error_alloc_skb;
- }
- ret = sdio_memcpy_fromio(func, skb->data,
- I2400MS_DATA_ADDR, rx_size);
- if (ret < 0) {
- dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
- goto error_memcpy_fromio;
- }
-
- rmb(); /* make sure we get boot_mode from dev_reset_handle */
- if (unlikely(i2400m->boot_mode == 1)) {
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_ack_size = rx_size;
- spin_unlock(&i2400m->rx_lock);
- memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
- wake_up(&i2400ms->bm_wfa_wq);
- d_printf(5, dev, "RX: SDIO boot mode message\n");
- kfree_skb(skb);
- goto out;
- }
- ret = -EIO;
- if (unlikely(rx_size < sizeof(__le32))) {
- dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
- goto error_bad_size;
- }
- if (likely(i2400m_is_d2h_barker(skb->data))) {
- skb_put(skb, rx_size);
- i2400m_rx(i2400m, skb);
- } else if (unlikely(i2400m_is_boot_barker(i2400m,
- skb->data, rx_size))) {
- ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
- dev_err(dev, "RX: SDIO reboot barker\n");
- kfree_skb(skb);
- } else {
- i2400m_unknown_barker(i2400m, skb->data, rx_size);
- kfree_skb(skb);
- }
-out:
- d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
- return;
-
-error_memcpy_fromio:
- kfree_skb(skb);
-error_alloc_skb:
-error_get_size:
-error_bad_size:
- d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
-}
-
-
-/*
- * Process an interrupt from the SDIO card
- *
- * FIXME: need to process other events that are not just ready-to-read
- *
- * Checks there is data ready and then proceeds to read it.
- */
-static
-void i2400ms_irq(struct sdio_func *func)
-{
- int ret;
- struct i2400ms *i2400ms = sdio_get_drvdata(func);
- struct device *dev = &func->dev;
- int val;
-
- d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
- val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
- if (ret < 0) {
- dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
- goto error_no_irq;
- }
- if (!val) {
- dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
- goto error_no_irq;
- }
- i2400ms_rx(i2400ms);
-error_no_irq:
- d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
-}
-
-
-/*
- * Setup SDIO RX
- *
- * Hooks up the IRQ handler and then enables IRQs.
- */
-int i2400ms_rx_setup(struct i2400ms *i2400ms)
-{
- int result;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
-
- init_waitqueue_head(&i2400ms->bm_wfa_wq);
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_wait_result = -EINPROGRESS;
- /*
- * Before we are about to enable the RX interrupt, make sure
- * bm_ack_size is cleared to -EINPROGRESS which indicates
- * no RX interrupt happened yet or the previous interrupt
- * has been handled, we are ready to take the new interrupt
- */
- i2400ms->bm_ack_size = -EINPROGRESS;
- spin_unlock(&i2400m->rx_lock);
-
- sdio_claim_host(func);
- result = sdio_claim_irq(func, i2400ms_irq);
- if (result < 0) {
- dev_err(dev, "Cannot claim IRQ: %d\n", result);
- goto error_irq_claim;
- }
- result = 0;
- sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
- if (result < 0) {
- sdio_release_irq(func);
- dev_err(dev, "Failed to enable interrupts %d\n", result);
- }
-error_irq_claim:
- sdio_release_host(func);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
- return result;
-}
-
-
-/*
- * Tear down SDIO RX
- *
- * Disables IRQs in the device and removes the IRQ handler.
- */
-void i2400ms_rx_release(struct i2400ms *i2400ms)
-{
- int result;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
- spin_lock(&i2400m->rx_lock);
- i2400ms->bm_ack_size = -EINTR;
- spin_unlock(&i2400m->rx_lock);
- wake_up_all(&i2400ms->bm_wfa_wq);
- sdio_claim_host(func);
- sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
- sdio_release_irq(func);
- sdio_release_host(func);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
-}
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
deleted file mode 100644
index b53cd1c80e3e..000000000000
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * SDIO TX transaction backends
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * - Initial implementation
- *
- *
- * Takes the TX messages in the i2400m's driver TX FIFO and sends them
- * to the device until there are no more.
- *
- * If we fail sending the message, we just drop it. There isn't much
- * we can do at this point. Most of the traffic is network, which has
- * recovery methods for dropped packets.
- *
- * The SDIO functions are not atomic, so we can't run from the context
- * where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
- * (some times atomic). Thus, the actual TX work is deferred to a
- * workqueue.
- *
- * ROADMAP
- *
- * i2400ms_bus_tx_kick()
- * i2400ms_tx_submit() [through workqueue]
- *
- * i2400m_tx_setup()
- *
- * i2400m_tx_release()
- */
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-
-#define D_SUBMODULE tx
-#include "sdio-debug-levels.h"
-
-
-/*
- * Pull TX transations from the TX FIFO and send them to the device
- * until there are no more.
- */
-static
-void i2400ms_tx_submit(struct work_struct *ws)
-{
- int result;
- struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- struct i2400m_msg_hdr *tx_msg;
- size_t tx_msg_size;
-
- d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
-
- while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
- d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
- d_dump(5, dev, tx_msg, tx_msg_size);
-
- sdio_claim_host(func);
- result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
- sdio_release_host(func);
-
- i2400m_tx_msg_sent(i2400m);
-
- if (result < 0) {
- dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
- " %d\n", (void *) tx_msg - i2400m->tx_buf,
- tx_msg_size, result);
- }
-
- if (result == -ETIMEDOUT) {
- i2400m_error_recovery(i2400m);
- break;
- }
- d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
- }
-
- d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
-}
-
-
-/*
- * The generic driver notifies us that there is data ready for TX
- *
- * Schedule a run of i2400ms_tx_submit() to handle it.
- */
-void i2400ms_bus_tx_kick(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = &i2400ms->func->dev;
- unsigned long flags;
-
- d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
-
- /* schedule tx work, this is because tx may block, therefore
- * it has to run in a thread context.
- */
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- if (i2400ms->tx_workqueue != NULL)
- queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-
- d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
-}
-
-int i2400ms_tx_setup(struct i2400ms *i2400ms)
-{
- int result;
- struct device *dev = &i2400ms->func->dev;
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct workqueue_struct *tx_workqueue;
- unsigned long flags;
-
- d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
-
- INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
- snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
- "%s-tx", i2400m->wimax_dev.name);
- tx_workqueue =
- create_singlethread_workqueue(i2400ms->tx_wq_name);
- if (tx_workqueue == NULL) {
- dev_err(dev, "TX: failed to create workqueue\n");
- result = -ENOMEM;
- } else
- result = 0;
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- i2400ms->tx_workqueue = tx_workqueue;
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
- return result;
-}
-
-void i2400ms_tx_release(struct i2400ms *i2400ms)
-{
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct workqueue_struct *tx_workqueue;
- unsigned long flags;
-
- tx_workqueue = i2400ms->tx_workqueue;
-
- spin_lock_irqsave(&i2400m->tx_lock, flags);
- i2400ms->tx_workqueue = NULL;
- spin_unlock_irqrestore(&i2400m->tx_lock, flags);
-
- if (tx_workqueue)
- destroy_workqueue(tx_workqueue);
-}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
deleted file mode 100644
index 21a9edd6e75d..000000000000
--- a/drivers/net/wimax/i2400m/sdio.c
+++ /dev/null
@@ -1,602 +0,0 @@
-/*
- * Intel Wireless WiMAX Connection 2400m
- * Linux driver model glue for the SDIO device, reset & fw upload
- *
- *
- * Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
- * Dirk Brandewie <dirk.j.brandewie@intel.com>
- * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
- * Yanir Lubetkin <yanirx.lubetkin@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- *
- * See i2400m-sdio.h for a general description of this driver.
- *
- * This file implements driver model glue, and hook ups for the
- * generic driver to implement the bus-specific functions (device
- * communication setup/tear down, firmware upload and resetting).
- *
- * ROADMAP
- *
- * i2400m_probe()
- * alloc_netdev()
- * i2400ms_netdev_setup()
- * i2400ms_init()
- * i2400m_netdev_setup()
- * i2400ms_enable_function()
- * i2400m_setup()
- *
- * i2400m_remove()
- * i2400m_release()
- * free_netdev(net_dev)
- *
- * i2400ms_bus_reset() Called by i2400m_reset
- * __i2400ms_reset()
- * __i2400ms_send_barker()
- */
-
-#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-#include "i2400m-sdio.h"
-#include <linux/wimax/i2400m.h>
-#include <linux/module.h>
-
-#define D_SUBMODULE main
-#include "sdio-debug-levels.h"
-
-/* IOE WiMAX function timeout in seconds */
-static int ioe_timeout = 2;
-module_param(ioe_timeout, int, 0);
-
-static char i2400ms_debug_params[128];
-module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
- 0644);
-MODULE_PARM_DESC(debug,
- "String of space-separated NAME:VALUE pairs, where NAMEs "
- "are the different debug submodules and VALUE are the "
- "initial debug value to set.");
-
-/* Our firmware file name list */
-static const char *i2400ms_bus_fw_names[] = {
-#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
- I2400MS_FW_FILE_NAME,
- NULL
-};
-
-
-static const struct i2400m_poke_table i2400ms_pokes[] = {
- I2400M_FW_POKE(0x6BE260, 0x00000088),
- I2400M_FW_POKE(0x080550, 0x00000005),
- I2400M_FW_POKE(0xAE0000, 0x00000000),
- I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad
- * things will happen */
-};
-
-/*
- * Enable the SDIO function
- *
- * Tries to enable the SDIO function; might fail if it is still not
- * ready (in some hardware, the SDIO WiMAX function is only enabled
- * when we ask it to explicitly doing). Tries until a timeout is
- * reached.
- *
- * The @maxtries argument indicates how many times (at most) it should
- * be tried to enable the function. 0 means forever. This acts along
- * with the timeout (ie: it'll stop trying as soon as the maximum
- * number of tries is reached _or_ as soon as the timeout is reached).
- *
- * The reverse of this is...sdio_disable_function()
- *
- * Returns: 0 if the SDIO function was enabled, < 0 errno code on
- * error (-ENODEV when it was unable to enable the function).
- */
-static
-int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
-{
- struct sdio_func *func = i2400ms->func;
- u64 timeout;
- int err;
- struct device *dev = &func->dev;
- unsigned tries = 0;
-
- d_fnstart(3, dev, "(func %p)\n", func);
- /* Setup timeout (FIXME: This needs to read the CIS table to
- * get a real timeout) and then wait for the device to signal
- * it is ready */
- timeout = get_jiffies_64() + ioe_timeout * HZ;
- err = -ENODEV;
- while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
- sdio_claim_host(func);
- /*
- * There is a sillicon bug on the IWMC3200, where the
- * IOE timeout will cause problems on Moorestown
- * platforms (system hang). We explicitly overwrite
- * func->enable_timeout here to work around the issue.
- */
- if (i2400ms->iwmc3200)
- func->enable_timeout = IWMC3200_IOR_TIMEOUT;
- err = sdio_enable_func(func);
- if (0 == err) {
- sdio_release_host(func);
- d_printf(2, dev, "SDIO function enabled\n");
- goto function_enabled;
- }
- d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
- sdio_release_host(func);
- if (maxtries > 0 && ++tries >= maxtries) {
- err = -ETIME;
- break;
- }
- msleep(I2400MS_INIT_SLEEP_INTERVAL);
- }
- /* If timed out, device is not there yet -- get -ENODEV so
- * the device driver core will retry later on. */
- if (err == -ETIME) {
- dev_err(dev, "Can't enable WiMAX function; "
- " has the function been enabled?\n");
- err = -ENODEV;
- }
-function_enabled:
- d_fnend(3, dev, "(func %p) = %d\n", func, err);
- return err;
-}
-
-
-/*
- * Setup minimal device communication infrastructure needed to at
- * least be able to update the firmware.
- *
- * Note the ugly trick: if we are in the probe path
- * (i2400ms->debugfs_dentry == NULL), we only retry function
- * enablement one, to avoid racing with the iwmc3200 top controller.
- */
-static
-int i2400ms_bus_setup(struct i2400m *i2400m)
-{
- int result;
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = i2400m_dev(i2400m);
- struct sdio_func *func = i2400ms->func;
- int retries;
-
- sdio_claim_host(func);
- result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
- sdio_release_host(func);
- if (result < 0) {
- dev_err(dev, "Failed to set block size: %d\n", result);
- goto error_set_blk_size;
- }
-
- if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
- retries = 1;
- else
- retries = 0;
- result = i2400ms_enable_function(i2400ms, retries);
- if (result < 0) {
- dev_err(dev, "Cannot enable SDIO function: %d\n", result);
- goto error_func_enable;
- }
-
- result = i2400ms_tx_setup(i2400ms);
- if (result < 0)
- goto error_tx_setup;
- result = i2400ms_rx_setup(i2400ms);
- if (result < 0)
- goto error_rx_setup;
- return 0;
-
-error_rx_setup:
- i2400ms_tx_release(i2400ms);
-error_tx_setup:
- sdio_claim_host(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-error_func_enable:
-error_set_blk_size:
- return result;
-}
-
-
-/*
- * Tear down minimal device communication infrastructure needed to at
- * least be able to update the firmware.
- */
-static
-void i2400ms_bus_release(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
-
- i2400ms_rx_release(i2400ms);
- i2400ms_tx_release(i2400ms);
- sdio_claim_host(func);
- sdio_disable_func(func);
- sdio_release_host(func);
-}
-
-
-/*
- * Setup driver resources needed to communicate with the device
- *
- * The fw needs some time to settle, and it was just uploaded,
- * so give it a break first. I'd prefer to just wait for the device to
- * send something, but seems the poking we do to enable SDIO stuff
- * interferes with it, so just give it a break before starting...
- */
-static
-int i2400ms_bus_dev_start(struct i2400m *i2400m)
-{
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
-
- d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
- msleep(200);
- d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
- return 0;
-}
-
-
-/*
- * Sends a barker buffer to the device
- *
- * This helper will allocate a kmalloced buffer and use it to transmit
- * (then free it). Reason for this is that the SDIO host controller
- * expects alignment (unknown exactly which) which the stack won't
- * really provide and certain arches/host-controller combinations
- * cannot use stack/vmalloc/text areas for DMA transfers.
- */
-static
-int __i2400ms_send_barker(struct i2400ms *i2400ms,
- const __le32 *barker, size_t barker_size)
-{
- int ret;
- struct sdio_func *func = i2400ms->func;
- struct device *dev = &func->dev;
- void *buffer;
-
- ret = -ENOMEM;
- buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
- if (buffer == NULL)
- goto error_kzalloc;
-
- memcpy(buffer, barker, barker_size);
- sdio_claim_host(func);
- ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
- sdio_release_host(func);
-
- if (ret < 0)
- d_printf(0, dev, "E: barker error: %d\n", ret);
-
- kfree(buffer);
-error_kzalloc:
- return ret;
-}
-
-
-/*
- * Reset a device at different levels (warm, cold or bus)
- *
- * @i2400ms: device descriptor
- * @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
- *
- * FIXME: not tested -- need to confirm expected effects
- *
- * Warm and cold resets get an SDIO reset if they fail (unimplemented)
- *
- * Warm reset:
- *
- * The device will be fully reset internally, but won't be
- * disconnected from the bus (so no reenumeration will
- * happen). Firmware upload will be necessary.
- *
- * The device will send a reboot barker that will trigger the driver
- * to reinitialize the state via __i2400m_dev_reset_handle.
- *
- *
- * Cold and bus reset:
- *
- * The device will be fully reset internally, disconnected from the
- * bus an a reenumeration will happen. Firmware upload will be
- * necessary. Thus, we don't do any locking or struct
- * reinitialization, as we are going to be fully disconnected and
- * reenumerated.
- *
- * Note we need to return -ENODEV if a warm reset was requested and we
- * had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
- * and wimax_dev->op_reset.
- *
- * WARNING: no driver state saved/fixed
- */
-static
-int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
-{
- int result = 0;
- struct i2400ms *i2400ms =
- container_of(i2400m, struct i2400ms, i2400m);
- struct device *dev = i2400m_dev(i2400m);
- static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- cpu_to_le32(I2400M_WARM_RESET_BARKER),
- };
- static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- cpu_to_le32(I2400M_COLD_RESET_BARKER),
- };
-
- if (rt == I2400M_RT_WARM)
- result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
- sizeof(i2400m_WARM_BOOT_BARKER));
- else if (rt == I2400M_RT_COLD)
- result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
- sizeof(i2400m_COLD_BOOT_BARKER));
- else if (rt == I2400M_RT_BUS) {
-do_bus_reset:
-
- i2400ms_bus_release(i2400m);
-
- /* Wait for the device to settle */
- msleep(40);
-
- result = i2400ms_bus_setup(i2400m);
- } else
- BUG();
- if (result < 0 && rt != I2400M_RT_BUS) {
- dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
- rt == I2400M_RT_WARM ? "warm" : "cold", result);
- rt = I2400M_RT_BUS;
- goto do_bus_reset;
- }
- return result;
-}
-
-
-static
-void i2400ms_netdev_setup(struct net_device *net_dev)
-{
- struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
- struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- i2400ms_init(i2400ms);
- i2400m_netdev_setup(net_dev);
-}
-
-
-/*
- * Debug levels control; see debug.h
- */
-struct d_level D_LEVEL[] = {
- D_SUBMODULE_DEFINE(main),
- D_SUBMODULE_DEFINE(tx),
- D_SUBMODULE_DEFINE(rx),
- D_SUBMODULE_DEFINE(fw),
-};
-size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
-
-
-#define __debugfs_register(prefix, name, parent) \
-do { \
- result = d_level_register_debugfs(prefix, name, parent); \
- if (result < 0) \
- goto error; \
-} while (0)
-
-
-static
-int i2400ms_debugfs_add(struct i2400ms *i2400ms)
-{
- int result;
- struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
-
- dentry = debugfs_create_dir("i2400m-sdio", dentry);
- result = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- if (result == -ENODEV)
- result = 0; /* No debugfs support */
- goto error;
- }
- i2400ms->debugfs_dentry = dentry;
- __debugfs_register("dl_", main, dentry);
- __debugfs_register("dl_", tx, dentry);
- __debugfs_register("dl_", rx, dentry);
- __debugfs_register("dl_", fw, dentry);
-
- return 0;
-
-error:
- debugfs_remove_recursive(i2400ms->debugfs_dentry);
- i2400ms->debugfs_dentry = NULL;
- return result;
-}
-
-
-static struct device_type i2400ms_type = {
- .name = "wimax",
-};
-
-/*
- * Probe a i2400m interface and register it
- *
- * @func: SDIO function
- * @id: SDIO device ID
- * @returns: 0 if ok, < 0 errno code on error.
- *
- * Alloc a net device, initialize the bus-specific details and then
- * calls the bus-generic initialization routine. That will register
- * the wimax and netdev devices, upload the firmware [using
- * _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
- * communication with the device and then will start to talk to it to
- * finnish setting it up.
- *
- * Initialization is tricky; some instances of the hw are packed with
- * others in a way that requires a third driver that enables the WiMAX
- * function. In those cases, we can't enable the SDIO function and
- * we'll return with -ENODEV. When the driver that enables the WiMAX
- * function does its thing, it has to do a bus_rescan_devices() on the
- * SDIO bus so this driver is called again to enumerate the WiMAX
- * function.
- */
-static
-int i2400ms_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- int result;
- struct net_device *net_dev;
- struct device *dev = &func->dev;
- struct i2400m *i2400m;
- struct i2400ms *i2400ms;
-
- /* Allocate instance [calls i2400m_netdev_setup() on it]. */
- result = -ENOMEM;
- net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
- i2400ms_netdev_setup);
- if (net_dev == NULL) {
- dev_err(dev, "no memory for network device instance\n");
- goto error_alloc_netdev;
- }
- SET_NETDEV_DEV(net_dev, dev);
- SET_NETDEV_DEVTYPE(net_dev, &i2400ms_type);
- i2400m = net_dev_to_i2400m(net_dev);
- i2400ms = container_of(i2400m, struct i2400ms, i2400m);
- i2400m->wimax_dev.net_dev = net_dev;
- i2400ms->func = func;
- sdio_set_drvdata(func, i2400ms);
-
- i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
- /*
- * Room required in the TX queue for SDIO message to accommodate
- * a smallest payload while allocating header space is 224 bytes,
- * which is the smallest message size(the block size 256 bytes)
- * minus the smallest message header size(32 bytes).
- */
- i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
- i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
- i2400m->bus_setup = i2400ms_bus_setup;
- i2400m->bus_dev_start = i2400ms_bus_dev_start;
- i2400m->bus_dev_stop = NULL;
- i2400m->bus_release = i2400ms_bus_release;
- i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
- i2400m->bus_reset = i2400ms_bus_reset;
- /* The iwmc3200-wimax sometimes requires the driver to try
- * hard when we paint it into a corner. */
- i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
- i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
- i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
- i2400m->bus_fw_names = i2400ms_bus_fw_names;
- i2400m->bus_bm_mac_addr_impaired = 1;
- i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
-
- switch (func->device) {
- case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
- case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
- i2400ms->iwmc3200 = 1;
- break;
- default:
- i2400ms->iwmc3200 = 0;
- }
-
- result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
- if (result < 0) {
- dev_err(dev, "cannot setup device: %d\n", result);
- goto error_setup;
- }
-
- result = i2400ms_debugfs_add(i2400ms);
- if (result < 0) {
- dev_err(dev, "cannot create SDIO debugfs: %d\n",
- result);
- goto error_debugfs_add;
- }
- return 0;
-
-error_debugfs_add:
- i2400m_release(i2400m);
-error_setup:
- sdio_set_drvdata(func, NULL);
- free_netdev(net_dev);
-error_alloc_netdev:
- return result;
-}
-
-
-static
-void i2400ms_remove(struct sdio_func *func)
-{
- struct device *dev = &func->dev;
- struct i2400ms *i2400ms = sdio_get_drvdata(func);
- struct i2400m *i2400m = &i2400ms->i2400m;
- struct net_device *net_dev = i2400m->wimax_dev.net_dev;
-
- d_fnstart(3, dev, "SDIO func %p\n", func);
- debugfs_remove_recursive(i2400ms->debugfs_dentry);
- i2400ms->debugfs_dentry = NULL;
- i2400m_release(i2400m);
- sdio_set_drvdata(func, NULL);
- free_netdev(net_dev);
- d_fnend(3, dev, "SDIO func %p\n", func);
-}
-
-static
-const struct sdio_device_id i2400ms_sdio_ids[] = {
- /* Intel: i2400m WiMAX (iwmc3200) over SDIO */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
- SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
- SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
-
-
-static
-struct sdio_driver i2400m_sdio_driver = {
- .name = KBUILD_MODNAME,
- .probe = i2400ms_probe,
- .remove = i2400ms_remove,
- .id_table = i2400ms_sdio_ids,
-};
-
-
-static
-int __init i2400ms_driver_init(void)
-{
- d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
- "i2400m_sdio.debug");
- return sdio_register_driver(&i2400m_sdio_driver);
-}
-module_init(i2400ms_driver_init);
-
-
-static
-void __exit i2400ms_driver_exit(void)
-{
- sdio_unregister_driver(&i2400m_sdio_driver);
-}
-module_exit(i2400ms_driver_exit);
-
-
-MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
-MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
-MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 1fda46c55eb3..e74664b84925 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -212,7 +212,7 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
}
if (result != cmd_size) { /* all was transferred? */
dev_err(dev, "boot-mode cmd %d: incomplete transfer "
- "(%zu vs %zu submitted)\n", opcode, result, cmd_size);
+ "(%zd vs %zu submitted)\n", opcode, result, cmd_size);
result = -EIO;
goto error_cmd_size;
}
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 5f58fa53238c..6deaae18db57 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -276,7 +276,6 @@ source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/iwlegacy/Kconfig"
-source "drivers/net/wireless/iwmc3200wifi/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0ce218b931d4..062dfdff6364 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -53,8 +53,6 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WL_TI) += ti/
-obj-$(CONFIG_IWM) += iwmc3200wifi/
-
obj-$(CONFIG_MWIFIEX) += mwifiex/
obj-$(CONFIG_BRCMFMAC) += brcm80211/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 0ac09a2bd144..689a71c1af71 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1738,8 +1738,7 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
return -ENOMEM;
}
- priv->tx_ring = (struct adm8211_desc *)(priv->rx_ring +
- priv->rx_ring_size);
+ priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
priv->tx_ring_dma = priv->rx_ring_dma +
sizeof(struct adm8211_desc) * priv->rx_ring_size;
@@ -1855,7 +1854,7 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(perm_addr)) {
printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
pci_name(pdev));
- random_ether_addr(perm_addr);
+ eth_random_addr(perm_addr);
}
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a747c632597a..f9f15bb3f03a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1997,7 +1997,7 @@ static int mpi_send_packet (struct net_device *dev)
* ------------------------------------------------
*/
- memcpy((char *)ai->txfids[0].virtual_host_addr,
+ memcpy(ai->txfids[0].virtual_host_addr,
(char *)&wifictlhdr8023, sizeof(wifictlhdr8023));
payloadLen = (__le16 *)(ai->txfids[0].virtual_host_addr +
@@ -4212,7 +4212,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
airo_print_err(ai->dev->name, "%s: len=%d", __func__, len);
rc = -1;
} else {
- memcpy((char *)ai->config_desc.virtual_host_addr,
+ memcpy(ai->config_desc.virtual_host_addr,
pBuf, len);
rc = issuecommand(ai, &cmd, &rsp);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index c54b7d37bff1..6169fbd23ed1 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -143,6 +143,7 @@ struct ath_common {
u32 keymax;
DECLARE_BITMAP(keymap, ATH_KEYMAX);
DECLARE_BITMAP(tkip_keymap, ATH_KEYMAX);
+ DECLARE_BITMAP(ccmp_keymap, ATH_KEYMAX);
enum ath_crypt_caps crypt_caps;
unsigned int clockrate;
@@ -215,6 +216,7 @@ void ath_printk(const char *level, const struct ath_common *common,
* used exclusively for WLAN-BT coexistence starting from
* AR9462.
* @ATH_DBG_DFS: radar datection
+ * @ATH_DBG_WOW: Wake on Wireless
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -242,6 +244,7 @@ enum ATH_DEBUG {
ATH_DBG_BSTUCK = 0x00008000,
ATH_DBG_MCI = 0x00010000,
ATH_DBG_DFS = 0x00020000,
+ ATH_DBG_WOW = 0x00040000,
ATH_DBG_ANY = 0xffffffff
};
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index e18a9aa7b6ca..338c5c42357d 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -64,3 +64,11 @@ config ATH5K_PCI
---help---
This adds support for PCI type chipsets of the 5xxx Atheros
family.
+
+config ATH5K_TEST_CHANNELS
+ bool "Enables testing channels on ath5k"
+ depends on ATH5K && CFG80211_CERTIFICATION_ONUS
+ ---help---
+ This enables non-standard IEEE 802.11 channels on ath5k, which
+ can be used for research purposes. This option should be disabled
+ unless doing research.
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 44ad6fe0278f..8c4c040a47b8 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -74,10 +74,6 @@ bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static bool modparam_all_channels;
-module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
-MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-
static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
@@ -258,8 +254,15 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
- * Returns true for the channel numbers used without all_channels modparam.
+ * Returns true for the channel numbers used.
*/
+#ifdef CONFIG_ATH5K_TEST_CHANNELS
+static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+{
+ return true;
+}
+
+#else
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
if (band == IEEE80211_BAND_2GHZ && chan <= 14)
@@ -276,6 +279,7 @@ static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
/* 802.11j 4.9GHz (20MHz) */
(chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
+#endif
static unsigned int
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
@@ -316,8 +320,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
if (!ath5k_channel_ok(ah, &channels[count]))
continue;
- if (!modparam_all_channels &&
- !ath5k_is_standard_channel(ch, band))
+ if (!ath5k_is_standard_channel(ch, band))
continue;
count++;
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 22b80af0f47c..260e7dc7f751 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -594,7 +594,7 @@ ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
qi.tqi_aifs = params->aifs;
qi.tqi_cw_min = params->cw_min;
qi.tqi_cw_max = params->cw_max;
- qi.tqi_burst_time = params->txop;
+ qi.tqi_burst_time = params->txop * 32;
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Configure tx [queue %d], "
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index b869a358ce43..86aeef4b9d7e 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -53,6 +53,11 @@
#define DEFAULT_BG_SCAN_PERIOD 60
+struct ath6kl_cfg80211_match_probe_ssid {
+ struct cfg80211_ssid ssid;
+ u8 flag;
+};
+
static struct ieee80211_rate ath6kl_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
@@ -576,6 +581,9 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->nw_type = vif->next_mode;
+ /* enable enhanced bmiss detection if applicable */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, true);
+
if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
nw_subtype = SUBTYPE_P2PCLIENT;
@@ -852,20 +860,6 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
}
- /*
- * Send a disconnect command to target when a disconnect event is
- * received with reason code other than 3 (DISCONNECT_CMD - disconnect
- * request from host) to make the firmware stop trying to connect even
- * after giving disconnect event. There will be one more disconnect
- * event for this disconnect command with reason code DISCONNECT_CMD
- * which will be notified to cfg80211.
- */
-
- if (reason != DISCONNECT_CMD) {
- ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
- return;
- }
-
clear_bit(CONNECT_PEND, &vif->flags);
if (vif->sme_state == SME_CONNECTING) {
@@ -875,32 +869,96 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
- cfg80211_disconnected(vif->ndev, reason,
+ cfg80211_disconnected(vif->ndev, proto_reason,
NULL, 0, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
+
+ /*
+ * Send a disconnect command to target when a disconnect event is
+ * received with reason code other than 3 (DISCONNECT_CMD - disconnect
+ * request from host) to make the firmware stop trying to connect even
+ * after giving disconnect event. There will be one more disconnect
+ * event for this disconnect command with reason code DISCONNECT_CMD
+ * which won't be notified to cfg80211.
+ */
+ if (reason != DISCONNECT_CMD)
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
}
static int ath6kl_set_probed_ssids(struct ath6kl *ar,
struct ath6kl_vif *vif,
- struct cfg80211_ssid *ssids, int n_ssids)
+ struct cfg80211_ssid *ssids, int n_ssids,
+ struct cfg80211_match_set *match_set,
+ int n_match_ssid)
{
- u8 i;
+ u8 i, j, index_to_add, ssid_found = false;
+ struct ath6kl_cfg80211_match_probe_ssid ssid_list[MAX_PROBED_SSIDS];
+
+ memset(ssid_list, 0, sizeof(ssid_list));
- if (n_ssids > MAX_PROBED_SSID_INDEX)
+ if (n_ssids > MAX_PROBED_SSIDS ||
+ n_match_ssid > MAX_PROBED_SSIDS)
return -EINVAL;
for (i = 0; i < n_ssids; i++) {
+ memcpy(ssid_list[i].ssid.ssid,
+ ssids[i].ssid,
+ ssids[i].ssid_len);
+ ssid_list[i].ssid.ssid_len = ssids[i].ssid_len;
+
+ if (ssids[i].ssid_len)
+ ssid_list[i].flag = SPECIFIC_SSID_FLAG;
+ else
+ ssid_list[i].flag = ANY_SSID_FLAG;
+
+ if (n_match_ssid == 0)
+ ssid_list[i].flag |= MATCH_SSID_FLAG;
+ }
+
+ index_to_add = i;
+
+ for (i = 0; i < n_match_ssid; i++) {
+ ssid_found = false;
+
+ for (j = 0; j < n_ssids; j++) {
+ if ((match_set[i].ssid.ssid_len ==
+ ssid_list[j].ssid.ssid_len) &&
+ (!memcmp(ssid_list[j].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len))) {
+ ssid_list[j].flag |= MATCH_SSID_FLAG;
+ ssid_found = true;
+ break;
+ }
+ }
+
+ if (ssid_found)
+ continue;
+
+ if (index_to_add >= MAX_PROBED_SSIDS)
+ continue;
+
+ ssid_list[index_to_add].ssid.ssid_len =
+ match_set[i].ssid.ssid_len;
+ memcpy(ssid_list[index_to_add].ssid.ssid,
+ match_set[i].ssid.ssid,
+ match_set[i].ssid.ssid_len);
+ ssid_list[index_to_add].flag |= MATCH_SSID_FLAG;
+ index_to_add++;
+ }
+
+ for (i = 0; i < index_to_add; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
- ssids[i].ssid_len ?
- SPECIFIC_SSID_FLAG : ANY_SSID_FLAG,
- ssids[i].ssid_len,
- ssids[i].ssid);
+ ssid_list[i].flag,
+ ssid_list[i].ssid.ssid_len,
+ ssid_list[i].ssid.ssid);
+
}
/* Make sure no old entries are left behind */
- for (i = n_ssids; i < MAX_PROBED_SSID_INDEX; i++) {
+ for (i = index_to_add; i < MAX_PROBED_SSIDS; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, i,
DISABLE_SSID_FLAG, 0, NULL);
}
@@ -908,11 +966,11 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar,
return 0;
}
-static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+static int ath6kl_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
- struct ath6kl *ar = ath6kl_priv(ndev);
- struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(request->wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
@@ -934,7 +992,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
}
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
- request->n_ssids);
+ request->n_ssids, NULL, 0);
if (ret < 0)
return ret;
@@ -943,7 +1001,7 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_err("failed to set Probe Request appie for scan");
+ ath6kl_err("failed to set Probe Request appie for scan\n");
return ret;
}
@@ -1429,14 +1487,14 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return 0;
}
-static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+static struct wireless_dev *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- struct net_device *ndev;
+ struct wireless_dev *wdev;
u8 if_idx, nw_type;
if (ar->num_vif == ar->vif_max) {
@@ -1449,20 +1507,20 @@ static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
return ERR_PTR(-EINVAL);
}
- ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
- if (!ndev)
+ wdev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ if (!wdev)
return ERR_PTR(-ENOMEM);
ar->num_vif++;
- return ndev;
+ return wdev;
}
static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
- struct net_device *ndev)
+ struct wireless_dev *wdev)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- struct ath6kl_vif *vif = netdev_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(wdev->netdev);
spin_lock_bh(&ar->list_lock);
list_del(&vif->list);
@@ -1512,6 +1570,9 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
}
}
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
+
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -2074,7 +2135,9 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (wow && (wow->n_patterns > WOW_MAX_FILTERS_PER_LIST))
return -EINVAL;
- if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags)) {
+ if (!test_bit(NETDEV_MCAST_ALL_ON, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, false);
if (ret)
@@ -2209,7 +2272,9 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
ar->state = ATH6KL_STATE_ON;
- if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags)) {
+ if (!test_bit(NETDEV_MCAST_ALL_OFF, &vif->flags) &&
+ test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ ar->fw_capabilities)) {
ret = ath6kl_wmi_mcast_filter_cmd(vif->ar->wmi,
vif->fw_vif_idx, true);
if (ret)
@@ -2475,7 +2540,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
bool ht_enable)
{
- struct ath6kl_htcap *htcap = &vif->htcap;
+ struct ath6kl_htcap *htcap = &vif->htcap[band];
if (htcap->ht_enable == ht_enable)
return 0;
@@ -2585,33 +2650,28 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
-static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable)
{
- struct ath6kl_vif *vif;
+ int err;
- /*
- * 'dev' could be NULL if a channel change is required for the hardware
- * device itself, instead of a particular VIF.
- *
- * FIXME: To be handled properly when monitor mode is supported.
- */
- if (!dev)
- return -EBUSY;
+ if (WARN_ON(!test_bit(WMI_READY, &vif->ar->flag)))
+ return;
- vif = netdev_priv(dev);
+ if (vif->nw_type != INFRA_NETWORK)
+ return;
- if (!ath6kl_cfg80211_ready(vif))
- return -EIO;
+ if (!test_bit(ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+ vif->ar->fw_capabilities))
+ return;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
- __func__, chan->center_freq, chan->hw_value);
- vif->next_chan = chan->center_freq;
- vif->next_ch_type = channel_type;
- vif->next_ch_band = chan->band;
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s fw bmiss enhance\n",
+ enable ? "enable" : "disable");
- return 0;
+ err = ath6kl_wmi_sta_bmiss_enhance_cmd(vif->ar->wmi,
+ vif->fw_vif_idx, enable);
+ if (err)
+ ath6kl_err("failed to %s enhanced bmiss detection: %d\n",
+ enable ? "enable" : "disable", err);
}
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
@@ -2694,9 +2754,15 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
/* TODO:
* info->interval
- * info->dtim_period
*/
+ ret = ath6kl_wmi_ap_set_dtim_cmd(ar->wmi, vif->fw_vif_idx,
+ info->dtim_period);
+
+ /* ignore error, just print a warning and continue normally */
+ if (ret)
+ ath6kl_warn("Failed to set dtim_period in beacon: %d\n", ret);
+
if (info->beacon.head == NULL)
return -EINVAL;
mgmt = (struct ieee80211_mgmt *) info->beacon.head;
@@ -2791,7 +2857,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
p.ssid_len = vif->ssid_len;
memcpy(p.ssid, vif->ssid, vif->ssid_len);
p.dot11_auth_mode = vif->dot11_auth_mode;
- p.ch = cpu_to_le16(vif->next_chan);
+ p.ch = cpu_to_le16(info->channel->center_freq);
/* Enable uAPSD support by default */
res = ath6kl_wmi_ap_set_apsd(ar->wmi, vif->fw_vif_idx, true);
@@ -2815,8 +2881,8 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
return res;
}
- if (ath6kl_set_htcap(vif, vif->next_ch_band,
- vif->next_ch_type != NL80211_CHAN_NO_HT))
+ if (ath6kl_set_htcap(vif, info->channel->band,
+ info->channel_type != NL80211_CHAN_NO_HT))
return -EIO;
/*
@@ -2909,14 +2975,14 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
@@ -2933,11 +2999,11 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
if (cookie != vif->last_roc_id)
return -ENOENT;
@@ -3068,15 +3134,15 @@ static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
return false;
}
-static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
+ struct ath6kl *ar = ath6kl_priv(vif->ndev);
u32 id;
const struct ieee80211_mgmt *mgmt;
bool more_data, queued;
@@ -3121,10 +3187,10 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
- struct ath6kl_vif *vif = netdev_priv(dev);
+ struct ath6kl_vif *vif = ath6kl_vif_from_wdev(wdev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
@@ -3160,10 +3226,24 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ath6kl_cfg80211_scan_complete_event(vif, true);
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
- request->n_ssids);
+ request->n_ssids,
+ request->match_sets,
+ request->n_match_sets);
if (ret < 0)
return ret;
+ if (!request->n_match_sets) {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ MATCHED_SSID_FILTER, 0);
+ if (ret < 0)
+ return ret;
+ }
+
/* fw uses seconds, also make sure that it's >0 */
interval = max_t(u16, 1, request->interval / 1000);
@@ -3185,7 +3265,7 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
- ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d\n",
ret);
return ret;
}
@@ -3217,6 +3297,18 @@ static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
return 0;
}
+static int ath6kl_cfg80211_set_bitrate(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ return ath6kl_wmi_set_bitrate_mask(ar->wmi, vif->fw_vif_idx,
+ mask);
+}
+
static const struct ieee80211_txrx_stypes
ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_STATION] = {
@@ -3271,7 +3363,6 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.suspend = __ath6kl_cfg80211_suspend,
.resume = __ath6kl_cfg80211_resume,
#endif
- .set_channel = ath6kl_set_channel,
.start_ap = ath6kl_start_ap,
.change_beacon = ath6kl_change_beacon,
.stop_ap = ath6kl_stop_ap,
@@ -3283,6 +3374,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.mgmt_frame_register = ath6kl_mgmt_frame_register,
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
+ .set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
};
void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
@@ -3385,9 +3477,9 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
ar->num_vif--;
}
-struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
- enum nl80211_iftype type, u8 fw_vif_idx,
- u8 nw_type)
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type)
{
struct net_device *ndev;
struct ath6kl_vif *vif;
@@ -3410,7 +3502,8 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->bg_scan_period = 0;
- vif->htcap.ht_enable = true;
+ vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
+ vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0)
@@ -3440,7 +3533,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
list_add_tail(&vif->list, &ar->vif_list);
spin_unlock_bh(&ar->list_lock);
- return ndev;
+ return &vif->wdev;
err:
aggr_module_destroy(vif->aggr_cntxt);
@@ -3470,7 +3563,13 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
}
/* max num of ssids that can be probed during scanning */
- wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ssids = MAX_PROBED_SSIDS;
+
+ /* max num of ssids that can be matched after scan */
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+ ar->fw_capabilities))
+ wiphy->max_match_sets = MAX_PROBED_SSIDS;
+
wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
switch (ar->hw.cap) {
case WMI_11AN_CAP:
@@ -3507,6 +3606,17 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ath6kl_band_5ghz.ht_cap.cap = 0;
ath6kl_band_5ghz.ht_cap.ht_supported = false;
}
+
+ if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES) {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
+ } else {
+ ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
+ }
+
if (band_2gig)
wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
@@ -3517,6 +3627,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->cipher_suites = cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+#ifdef CONFIG_PM
wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -3526,8 +3637,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+#endif
- wiphy->max_sched_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
WIPHY_FLAG_HAVE_AP_SME |
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index 5ea8cbb79f43..56b1ebe79812 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -25,9 +25,9 @@ enum ath6kl_cfg_suspend_mode {
ATH6KL_CFG_SUSPEND_SCHED_SCAN,
};
-struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
- enum nl80211_iftype type,
- u8 fw_vif_idx, u8 nw_type);
+struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
enum wmi_phy_mode mode);
void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
@@ -62,5 +62,7 @@ void ath6kl_cfg80211_cleanup(struct ath6kl *ar);
struct ath6kl *ath6kl_cfg80211_create(void);
void ath6kl_cfg80211_destroy(struct ath6kl *ar);
+/* TODO: remove this once ath6kl_vif_cleanup() is moved to cfg80211.c */
+void ath6kl_cfg80211_sta_bmiss_enhance(struct ath6kl_vif *vif, bool enable);
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index fdb3b1decc76..82c4dd2a960e 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(ath6kl_core_rx_complete);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
{
struct ath6kl_bmi_target_info targ_info;
- struct net_device *ndev;
+ struct wireless_dev *wdev;
int ret = 0, i;
switch (htc_type) {
@@ -187,12 +187,12 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
rtnl_lock();
/* Add an initial station interface */
- ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ wdev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
INFRA_NETWORK);
rtnl_unlock();
- if (!ndev) {
+ if (!wdev) {
ath6kl_err("Failed to instantiate a network device\n");
ret = -ENOMEM;
wiphy_unregister(ar->wiphy);
@@ -200,7 +200,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ndev->name, ndev, ar);
+ __func__, wdev->netdev->name, wdev->netdev, ar);
return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 4d9c6f142698..cec49a31029a 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -100,6 +100,21 @@ enum ath6kl_fw_capability {
/* Firmware has support to override rsn cap of rsn ie */
ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
+ /*
+ * Multicast support in WOW and host awake mode.
+ * Allow all multicast in host awake mode.
+ * Apply multicast filter in WOW mode.
+ */
+ ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+
+ /* Firmware supports enhanced bmiss detection */
+ ATH6KL_FW_CAPABILITY_BMISS_ENHANCE,
+
+ /*
+ * FW supports matching of ssid in schedule scan
+ */
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST,
+
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
};
@@ -112,6 +127,10 @@ struct ath6kl_fw_ie {
u8 data[0];
};
+enum ath6kl_hw_flags {
+ ATH6KL_HW_FLAG_64BIT_RATES = BIT(0),
+};
+
#define ATH6KL_FW_API2_FILE "fw-2.bin"
#define ATH6KL_FW_API3_FILE "fw-3.bin"
@@ -196,7 +215,7 @@ struct ath6kl_fw_ie {
#define AGGR_NUM_OF_FREE_NETBUFS 16
-#define AGGR_RX_TIMEOUT 400 /* in ms */
+#define AGGR_RX_TIMEOUT 100 /* in ms */
#define WMI_TIMEOUT (2 * HZ)
@@ -245,7 +264,6 @@ struct skb_hold_q {
struct rxtid {
bool aggr;
- bool progress;
bool timer_mon;
u16 win_sz;
u16 seq_next;
@@ -254,9 +272,15 @@ struct rxtid {
struct sk_buff_head q;
/*
- * FIXME: No clue what this should protect. Apparently it should
- * protect some of the fields above but they are also accessed
- * without taking the lock.
+ * lock mainly protects seq_next and hold_q. Movement of seq_next
+ * needs to be protected between aggr_timeout() and
+ * aggr_process_recv_frm(). hold_q will be holding the pending
+ * reorder frames and it's access should also be protected.
+ * Some of the other fields like hold_q_sz, win_sz and aggr are
+ * initialized/reset when receiving addba/delba req, also while
+ * deleting aggr state all the pending buffers are flushed before
+ * resetting these fields, so there should not be any race in accessing
+ * these fields.
*/
spinlock_t lock;
};
@@ -541,7 +565,7 @@ struct ath6kl_vif {
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
- struct ath6kl_htcap htcap;
+ struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
@@ -553,9 +577,6 @@ struct ath6kl_vif {
u32 last_cancel_roc_id;
u32 send_action_id;
bool probe_req_report;
- u16 next_chan;
- enum nl80211_channel_type next_ch_type;
- enum ieee80211_band next_ch_band;
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
@@ -568,6 +589,11 @@ struct ath6kl_vif {
struct list_head mc_filter;
};
+static inline struct ath6kl_vif *ath6kl_vif_from_wdev(struct wireless_dev *wdev)
+{
+ return container_of(wdev, struct ath6kl_vif, wdev);
+}
+
#define WOW_LIST_ID 0
#define WOW_HOST_REQ_DELAY 500 /* ms */
@@ -687,6 +713,8 @@ struct ath6kl {
u32 testscript_addr;
enum wmi_phy_cap cap;
+ u32 flags;
+
struct ath6kl_hw_fw {
const char *dir;
const char *otp;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 2798624d3a9d..cd0e1ba410d6 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -1309,7 +1309,7 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
}
ath6kl_dbg(ATH6KL_DBG_HTC,
- "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
+ "htc rx 0x%p hdr 0x%x len %d mbox 0x%x\n",
packet, packet->info.rx.exp_hdr,
padded_len, dev->ar->mbox_info.htc_addr);
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 7eb0515f458a..f90b5db741cf 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -42,6 +42,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 6912,
.refclk_hz = 26000000,
.uarttx_pin = 8,
+ .flags = 0,
/* hw2.0 needs override address hardcoded */
.app_start_override_addr = 0x944C00,
@@ -67,6 +68,7 @@ static const struct ath6kl_hw hw_list[] = {
.refclk_hz = 26000000,
.uarttx_pin = 8,
.testscript_addr = 0x57ef74,
+ .flags = 0,
.fw = {
.dir = AR6003_HW_2_1_1_FW_DIR,
@@ -91,6 +93,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x433900,
.refclk_hz = 26000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_0_FW_DIR,
@@ -110,6 +113,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x43d400,
.refclk_hz = 40000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_1_FW_DIR,
@@ -129,6 +133,7 @@ static const struct ath6kl_hw hw_list[] = {
.board_addr = 0x435c00,
.refclk_hz = 40000000,
.uarttx_pin = 11,
+ .flags = ATH6KL_HW_FLAG_64BIT_RATES,
.fw = {
.dir = AR6004_HW_1_2_FW_DIR,
@@ -938,6 +943,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
}
switch (ie_id) {
+ case ATH6KL_FW_IE_FW_VERSION:
+ strlcpy(ar->wiphy->fw_version, data,
+ sizeof(ar->wiphy->fw_version));
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found fw version %s\n",
+ ar->wiphy->fw_version);
+ break;
case ATH6KL_FW_IE_OTP_IMAGE:
ath6kl_dbg(ATH6KL_DBG_BOOT, "found otp image ie (%zd B)\n",
ie_len);
@@ -991,9 +1004,6 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:
- if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
- break;
-
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found firmware capabilities ie (%zd B)\n",
ie_len);
@@ -1002,6 +1012,9 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
index = i / 8;
bit = i % 8;
+ if (index == ie_len)
+ break;
+
if (data[index] & (1 << bit))
__set_bit(i, ar->fw_capabilities);
}
@@ -1392,6 +1405,12 @@ static int ath6kl_init_upload(struct ath6kl *ar)
ar->version.target_ver == AR6003_HW_2_1_1_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
+ param = 0x28;
+ address = GPIO_BASE_ADDRESS + GPIO_PIN9_ADDRESS;
+ status = ath6kl_bmi_reg_write(ar, address, param);
+ if (status)
+ return status;
+
param = 0x20;
address = GPIO_BASE_ADDRESS + GPIO_PIN10_ADDRESS;
@@ -1659,6 +1678,9 @@ void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
cfg80211_scan_done(vif->scan_req, true);
vif->scan_req = NULL;
}
+
+ /* need to clean up enhanced bmiss detection fw state */
+ ath6kl_cfg80211_sta_bmiss_enhance(vif, false);
}
void ath6kl_stop_txrx(struct ath6kl *ar)
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index e5524470529c..c189e28e86a9 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -554,20 +554,24 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver,
struct ath6kl *ar = devt;
memcpy(ar->mac_addr, datap, ETH_ALEN);
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
- __func__, ar->mac_addr);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "ready event mac addr %pM sw_ver 0x%x abi_ver 0x%x cap 0x%x\n",
+ ar->mac_addr, sw_ver, abi_ver, cap);
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
ar->hw.cap = cap;
- snprintf(ar->wiphy->fw_version,
- sizeof(ar->wiphy->fw_version),
- "%u.%u.%u.%u",
- (ar->version.wlan_ver & 0xf0000000) >> 28,
- (ar->version.wlan_ver & 0x0f000000) >> 24,
- (ar->version.wlan_ver & 0x00ff0000) >> 16,
- (ar->version.wlan_ver & 0x0000ffff));
+ if (strlen(ar->wiphy->fw_version) == 0) {
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ (ar->version.wlan_ver & 0xf0000000) >> 28,
+ (ar->version.wlan_ver & 0x0f000000) >> 24,
+ (ar->version.wlan_ver & 0x00ff0000) >> 16,
+ (ar->version.wlan_ver & 0x0000ffff));
+ }
/* indicate to the waiting thread that the ready event was received */
set_bit(WMI_READY, &ar->flag);
@@ -598,7 +602,6 @@ static int ath6kl_commit_ch_switch(struct ath6kl_vif *vif, u16 channel)
struct ath6kl *ar = vif->ar;
- vif->next_chan = channel;
vif->profile.ch = cpu_to_le16(channel);
switch (vif->nw_type) {
@@ -1167,7 +1170,10 @@ static void ath6kl_set_multicast_list(struct net_device *ndev)
else
clear_bit(NETDEV_MCAST_ALL_ON, &vif->flags);
- mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
+ if (test_bit(ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER,
+ vif->ar->fw_capabilities)) {
+ mc_all_on = mc_all_on || (vif->ar->state == ATH6KL_STATE_ON);
+ }
if (!(ndev->flags & IFF_MULTICAST)) {
mc_all_on = false;
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index 78e0ef4567a5..a98c12ba70c1 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -45,6 +45,7 @@
#define LPO_CAL_ENABLE_S 20
#define LPO_CAL_ENABLE 0x00100000
+#define GPIO_PIN9_ADDRESS 0x0000004c
#define GPIO_PIN10_ADDRESS 0x00000050
#define GPIO_PIN11_ADDRESS 0x00000054
#define GPIO_PIN12_ADDRESS 0x00000058
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 67206aedea6c..7dfa0fd86d7b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -1036,6 +1036,7 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
rxtid = &agg_conn->rx_tid[tid];
stats = &agg_conn->stat[tid];
+ spin_lock_bh(&rxtid->lock);
idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
/*
@@ -1054,8 +1055,6 @@ static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
seq_end = seq_no ? seq_no : rxtid->seq_next;
idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
- spin_lock_bh(&rxtid->lock);
-
do {
node = &rxtid->hold_q[idx];
if ((order == 1) && (!node->skb))
@@ -1127,11 +1126,13 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
((end > extended_end) && (cur > extended_end) &&
(cur < end))) {
aggr_deque_frms(agg_conn, tid, 0, 0);
+ spin_lock_bh(&rxtid->lock);
if (cur >= rxtid->hold_q_sz - 1)
rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
else
rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
(rxtid->hold_q_sz - 2 - cur);
+ spin_unlock_bh(&rxtid->lock);
} else {
/*
* Dequeue only those frames that are outside the
@@ -1185,25 +1186,25 @@ static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
aggr_deque_frms(agg_conn, tid, 0, 1);
if (agg_conn->timer_scheduled)
- rxtid->progress = true;
- else
- for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
- if (rxtid->hold_q[idx].skb) {
- /*
- * There is a frame in the queue and no
- * timer so start a timer to ensure that
- * the frame doesn't remain stuck
- * forever.
- */
- agg_conn->timer_scheduled = true;
- mod_timer(&agg_conn->timer,
- (jiffies +
- HZ * (AGGR_RX_TIMEOUT) / 1000));
- rxtid->progress = false;
- rxtid->timer_mon = true;
- break;
- }
+ return is_queued;
+
+ spin_lock_bh(&rxtid->lock);
+ for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
+ if (rxtid->hold_q[idx].skb) {
+ /*
+ * There is a frame in the queue and no
+ * timer so start a timer to ensure that
+ * the frame doesn't remain stuck
+ * forever.
+ */
+ agg_conn->timer_scheduled = true;
+ mod_timer(&agg_conn->timer,
+ (jiffies + (HZ * AGGR_RX_TIMEOUT) / 1000));
+ rxtid->timer_mon = true;
+ break;
}
+ }
+ spin_unlock_bh(&rxtid->lock);
return is_queued;
}
@@ -1608,7 +1609,7 @@ static void aggr_timeout(unsigned long arg)
rxtid = &aggr_conn->rx_tid[i];
stats = &aggr_conn->stat[i];
- if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
+ if (!rxtid->aggr || !rxtid->timer_mon)
continue;
stats->num_timeouts++;
@@ -1626,14 +1627,15 @@ static void aggr_timeout(unsigned long arg)
rxtid = &aggr_conn->rx_tid[i];
if (rxtid->aggr && rxtid->hold_q) {
+ spin_lock_bh(&rxtid->lock);
for (j = 0; j < rxtid->hold_q_sz; j++) {
if (rxtid->hold_q[j].skb) {
aggr_conn->timer_scheduled = true;
rxtid->timer_mon = true;
- rxtid->progress = false;
break;
}
}
+ spin_unlock_bh(&rxtid->lock);
if (j >= rxtid->hold_q_sz)
rxtid->timer_mon = false;
@@ -1660,7 +1662,6 @@ static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
aggr_deque_frms(aggr_conn, tid, 0, 0);
rxtid->aggr = false;
- rxtid->progress = false;
rxtid->timer_mon = false;
rxtid->win_sz = 0;
rxtid->seq_next = 0;
@@ -1739,7 +1740,6 @@ void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
for (i = 0; i < NUM_OF_TIDS; i++) {
rxtid = &aggr_conn->rx_tid[i];
rxtid->aggr = false;
- rxtid->progress = false;
rxtid->timer_mon = false;
skb_queue_head_init(&rxtid->q);
spin_lock_init(&rxtid->lock);
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index ee8ec2394c2c..c30ab4b11d61 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -474,7 +474,7 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
return -EINVAL;
}
id = vif->last_roc_id;
- cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
+ cfg80211_ready_on_channel(&vif->wdev, id, chan, NL80211_CHAN_NO_HT,
dur, GFP_ATOMIC);
return 0;
@@ -513,7 +513,7 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
else
id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
vif->last_cancel_roc_id = 0;
- cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
+ cfg80211_remain_on_channel_expired(&vif->wdev, id, chan,
NL80211_CHAN_NO_HT, GFP_ATOMIC);
return 0;
@@ -533,7 +533,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
id, ev->ack_status);
if (wmi->last_mgmt_tx_frame) {
- cfg80211_mgmt_tx_status(vif->ndev, id,
+ cfg80211_mgmt_tx_status(&vif->wdev, id,
wmi->last_mgmt_tx_frame,
wmi->last_mgmt_tx_frame_len,
!!ev->ack_status, GFP_ATOMIC);
@@ -568,7 +568,7 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
return 0;
@@ -608,7 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(vif->ndev, freq, 0,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0,
ev->data, dlen, GFP_ATOMIC);
return 0;
@@ -743,7 +743,6 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
return -ENOMEM;
cmd = (struct roam_ctrl_cmd *) skb->data;
- memset(cmd, 0, sizeof(*cmd));
memcpy(cmd->info.bssid, bssid, ETH_ALEN);
cmd->roam_ctrl = WMI_FORCE_ROAM;
@@ -753,6 +752,22 @@ int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
NO_SYNC_WMIFLAG);
}
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period)
+{
+ struct sk_buff *skb;
+ struct set_dtim_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct set_dtim_cmd *) skb->data;
+
+ cmd->dtim_period = cpu_to_le32(dtim_period);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_AP_SET_DTIM_CMDID, NO_SYNC_WMIFLAG);
+}
+
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
{
struct sk_buff *skb;
@@ -763,7 +778,6 @@ int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
return -ENOMEM;
cmd = (struct roam_ctrl_cmd *) skb->data;
- memset(cmd, 0, sizeof(*cmd));
cmd->info.roam_mode = mode;
cmd->roam_ctrl = WMI_SET_ROAM_MODE;
@@ -1995,7 +2009,7 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
struct wmi_probed_ssid_cmd *cmd;
int ret;
- if (index > MAX_PROBED_SSID_INDEX)
+ if (index >= MAX_PROBED_SSIDS)
return -EINVAL;
if (ssid_len > sizeof(cmd->ssid))
@@ -2599,6 +2613,115 @@ static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
spin_unlock_bh(&wmi->lock);
}
+static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u64 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ struct wmi_set_tx_select_rates64_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].mcs[1];
+ mcsrate <<= 8;
+ mcsrate |= mask->control[band].mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 28;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 64 bit: 2.4:%llx 5:%llx\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates64_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct sk_buff *skb;
+ int ret, mode, band;
+ u32 mcsrate, ratemask[IEEE80211_NUM_BANDS];
+ struct wmi_set_tx_select_rates32_cmd *cmd;
+
+ memset(&ratemask, 0, sizeof(ratemask));
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ /* copy legacy rate mask */
+ ratemask[band] = mask->control[band].legacy;
+ if (band == IEEE80211_BAND_5GHZ)
+ ratemask[band] =
+ mask->control[band].legacy << 4;
+
+ /* copy mcs rate mask */
+ mcsrate = mask->control[band].mcs[0];
+ ratemask[band] |= mcsrate << 12;
+ ratemask[band] |= mcsrate << 20;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Ratemask 32 bit: 2.4:%x 5:%x\n",
+ ratemask[0], ratemask[1]);
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd) * WMI_RATES_MODE_MAX);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_tx_select_rates32_cmd *) skb->data;
+ for (mode = 0; mode < WMI_RATES_MODE_MAX; mode++) {
+ /* A mode operate in 5GHZ band */
+ if (mode == WMI_RATES_MODE_11A ||
+ mode == WMI_RATES_MODE_11A_HT20 ||
+ mode == WMI_RATES_MODE_11A_HT40)
+ band = IEEE80211_BAND_5GHZ;
+ else
+ band = IEEE80211_BAND_2GHZ;
+ cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
+ }
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_TX_SELECT_RATES_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath6kl *ar = wmi->parent_dev;
+
+ if (ar->hw.flags & ATH6KL_HW_FLAG_64BIT_RATES)
+ return ath6kl_set_bitrate_mask64(wmi, if_idx, mask);
+ else
+ return ath6kl_set_bitrate_mask32(wmi, if_idx, mask);
+}
+
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode)
{
@@ -2997,6 +3120,25 @@ int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enhance)
+{
+ struct sk_buff *skb;
+ struct wmi_sta_bmiss_enhance_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_bmiss_enhance_cmd *) skb->data;
+ cmd->enable = enhance ? 1 : 0;
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_STA_BMISS_ENHANCE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
s32 ath6kl_wmi_get_rate(s8 rate_index)
{
if (rate_index == RATE_AUTO)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 9076bec3a2ba..43339aca585d 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -624,6 +624,10 @@ enum wmi_cmd_id {
WMI_SEND_MGMT_CMDID,
WMI_BEGIN_SCAN_CMDID,
+ WMI_SET_BLACK_LIST,
+ WMI_SET_MCASTRATE,
+
+ WMI_STA_BMISS_ENHANCE_CMDID,
};
enum wmi_mgmt_frame_type {
@@ -960,6 +964,9 @@ enum wmi_bss_filter {
/* beacons matching probed ssid */
PROBED_SSID_FILTER,
+ /* beacons matching matched ssid */
+ MATCHED_SSID_FILTER,
+
/* marker only */
LAST_BSS_FILTER,
};
@@ -978,7 +985,7 @@ struct wmi_bss_filter_cmd {
} __packed;
/* WMI_SET_PROBED_SSID_CMDID */
-#define MAX_PROBED_SSID_INDEX 9
+#define MAX_PROBED_SSIDS 16
enum wmi_ssid_flag {
/* disables entry */
@@ -989,10 +996,13 @@ enum wmi_ssid_flag {
/* probes for any ssid */
ANY_SSID_FLAG = 0x02,
+
+ /* match for ssid */
+ MATCH_SSID_FLAG = 0x08,
};
struct wmi_probed_ssid_cmd {
- /* 0 to MAX_PROBED_SSID_INDEX */
+ /* 0 to MAX_PROBED_SSIDS - 1 */
u8 entry_index;
/* see, enum wmi_ssid_flg */
@@ -1017,6 +1027,11 @@ struct wmi_bmiss_time_cmd {
__le16 num_beacons;
};
+/* WMI_STA_ENHANCE_BMISS_CMDID */
+struct wmi_sta_bmiss_enhance_cmd {
+ u8 enable;
+} __packed;
+
/* WMI_SET_POWER_MODE_CMDID */
enum wmi_power_mode {
REC_POWER = 0x01,
@@ -1048,6 +1063,36 @@ struct wmi_power_params_cmd {
__le16 ps_fail_event_policy;
} __packed;
+/*
+ * Ratemask for below modes should be passed
+ * to WMI_SET_TX_SELECT_RATES_CMDID.
+ * AR6003 has 32 bit mask for each modes.
+ * First 12 bits for legacy rates, 13 to 20
+ * bits for HT 20 rates and 21 to 28 bits for
+ * HT 40 rates
+ */
+enum wmi_mode_phy {
+ WMI_RATES_MODE_11A = 0,
+ WMI_RATES_MODE_11G,
+ WMI_RATES_MODE_11B,
+ WMI_RATES_MODE_11GONLY,
+ WMI_RATES_MODE_11A_HT20,
+ WMI_RATES_MODE_11G_HT20,
+ WMI_RATES_MODE_11A_HT40,
+ WMI_RATES_MODE_11G_HT40,
+ WMI_RATES_MODE_MAX
+};
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates32_cmd {
+ __le32 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
+/* WMI_SET_TX_SELECT_RATES_CMDID */
+struct wmi_set_tx_select_rates64_cmd {
+ __le64 ratemask[WMI_RATES_MODE_MAX];
+} __packed;
+
/* WMI_SET_DISC_TIMEOUT_CMDID */
struct wmi_disc_timeout_cmd {
/* seconds */
@@ -1572,6 +1617,10 @@ struct roam_ctrl_cmd {
u8 roam_ctrl;
} __packed;
+struct set_dtim_cmd {
+ __le32 dtim_period;
+} __packed;
+
/* BSS INFO HDR version 2.0 */
struct wmi_bss_info_hdr2 {
__le16 ch; /* frequency in MHz */
@@ -2532,6 +2581,8 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, u8 if_idx,
__be32 ips0, __be32 ips1);
int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_bitrate_mask(struct wmi *wmi, u8 if_idx,
+ const struct cfg80211_bitrate_mask *mask);
int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
enum ath6kl_wow_mode wow_mode,
u32 filter, u16 host_req_delay);
@@ -2542,11 +2593,14 @@ int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_ap_set_dtim_cmd(struct wmi *wmi, u8 if_idx, u32 dtim_period);
int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
int ath6kl_wmi_mcast_filter_cmd(struct wmi *wmi, u8 if_idx, bool mc_all_on);
int ath6kl_wmi_add_del_mcast_filter_cmd(struct wmi *wmi, u8 if_idx,
u8 *filter, bool add_filter);
+int ath6kl_wmi_sta_bmiss_enhance_cmd(struct wmi *wmi, u8 if_idx, bool enable);
+
/* AP mode uAPSD */
int ath6kl_wmi_ap_set_apsd(struct wmi *wmi, u8 if_idx, u8 enable);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index e507e78398f3..c7aa6646123e 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -64,7 +64,7 @@ config ATH9K_DEBUGFS
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
- depends on ATH9K && EXPERT
+ depends on ATH9K && CFG80211_CERTIFICATION_ONUS
default n
---help---
This option enables DFS support for initiating radiation on
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 3f0b84723789..2ad8f9474ba1 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -3,7 +3,9 @@ ath9k-y += beacon.o \
init.o \
main.o \
recv.o \
- xmit.o
+ xmit.o \
+ link.o \
+ antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
@@ -15,6 +17,7 @@ ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
dfs.o \
dfs_pattern_detector.o \
dfs_pri_detector.o
+ath9k-$(CONFIG_PM_SLEEP) += wow.o
obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 5e47ca6d16a8..3a69804f4c16 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -35,6 +35,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "ar934x_wmac",
.driver_data = AR9300_DEVID_AR9340,
},
+ {
+ .name = "qca955x_wmac",
+ .driver_data = AR9300_DEVID_QCA955X,
+ },
{},
};
@@ -126,7 +130,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->irq = irq;
/* Will be cleared in ath9k_start() */
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index b4c77f9d7470..ff007f500feb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -104,11 +104,6 @@ static const struct ani_cck_level_entry cck_level_table[] = {
#define ATH9K_ANI_CCK_DEF_LEVEL \
2 /* default level - matches the INI settings */
-static bool use_new_ani(struct ath_hw *ah)
-{
- return AR_SREV_9300_20_OR_LATER(ah) || modparam_force_new_ani;
-}
-
static void ath9k_hw_update_mibstats(struct ath_hw *ah,
struct ath9k_mib_stats *stats)
{
@@ -122,8 +117,6 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
static void ath9k_ani_restart(struct ath_hw *ah)
{
struct ar5416AniState *aniState;
- struct ath_common *common = ath9k_hw_common(ah);
- u32 ofdm_base = 0, cck_base = 0;
if (!DO_ANI(ah))
return;
@@ -131,18 +124,10 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState = &ah->curchan->ani;
aniState->listenTime = 0;
- if (!use_new_ani(ah)) {
- ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
- cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
- }
-
- ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
- ofdm_base, cck_base);
-
ENABLE_REGWRITE_BUFFER(ah);
- REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
+ REG_WRITE(ah, AR_PHY_ERR_1, 0);
+ REG_WRITE(ah, AR_PHY_ERR_2, 0);
REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
@@ -154,129 +139,23 @@ static void ath9k_ani_restart(struct ath_hw *ah)
aniState->cckPhyErrCount = 0;
}
-static void ath9k_hw_ani_ofdm_err_trigger_old(struct ath_hw *ah)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
-
- if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel + 1)) {
- return;
- }
- }
-
- if (aniState->spurImmunityLevel < HAL_SPUR_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel + 1)) {
- return;
- }
- }
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- }
- return;
- }
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrHigh) {
- if (!aniState->ofdmWeakSigDetectOff) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- false)) {
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
- return;
- }
- }
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
- }
- } else if (rssi > aniState->rssiThrLow) {
- if (aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- true);
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- return;
- } else {
- if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
- !conf_is_ht(conf)) {
- if (!aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- false);
- if (aniState->firstepLevel > 0)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL, 0);
- return;
- }
- }
-}
-
-static void ath9k_hw_ani_cck_err_trigger_old(struct ath_hw *ah)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
- if (aniState->noiseImmunityLevel < HAL_NOISE_IMMUNE_MAX) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel + 1)) {
- return;
- }
- }
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- }
- return;
- }
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrLow) {
- if (aniState->firstepLevel < HAL_FIRST_STEP_MAX)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel + 1);
- } else {
- if ((conf->channel->band == IEEE80211_BAND_2GHZ) &&
- !conf_is_ht(conf)) {
- if (aniState->firstepLevel > 0)
- ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL, 0);
- }
- }
-}
-
/* Adjust the OFDM Noise Immunity Level */
-static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
+static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
+ bool scan)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
-
- aniState->noiseFloor = BEACON_RSSI(ah);
+ bool weak_sig;
ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
- immunityLevel, aniState->noiseFloor,
+ immunityLevel, BEACON_RSSI(ah),
aniState->rssiThrLow, aniState->rssiThrHigh);
- if (aniState->update_ani)
- aniState->ofdmNoiseImmunityLevel =
- (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
- immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
+ if (!scan)
+ aniState->ofdmNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -292,12 +171,22 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
ATH9K_ANI_FIRSTEP_LEVEL,
entry_ofdm->fir_step_level);
- if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
- (!aniState->ofdmWeakSigDetectOff !=
- entry_ofdm->ofdm_weak_signal_on)) {
+ weak_sig = entry_ofdm->ofdm_weak_signal_on;
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= aniState->rssiThrHigh)
+ weak_sig = true;
+
+ if (aniState->ofdmWeakSigDetect != weak_sig)
ath9k_hw_ani_control(ah,
ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
entry_ofdm->ofdm_weak_signal_on);
+
+ if (aniState->ofdmNoiseImmunityLevel >= ATH9K_ANI_OFDM_DEF_LEVEL) {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI;
+ } else {
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
}
}
@@ -308,43 +197,35 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_ofdm_err_trigger_old(ah);
- return;
- }
-
aniState = &ah->curchan->ani;
if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL)
- ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1);
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false);
}
/*
* Set the ANI settings to match an CCK level.
*/
-static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
+static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
+ bool scan)
{
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath_common *common = ath9k_hw_common(ah);
const struct ani_ofdm_level_entry *entry_ofdm;
const struct ani_cck_level_entry *entry_cck;
- aniState->noiseFloor = BEACON_RSSI(ah);
ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
- aniState->noiseFloor, aniState->rssiThrLow,
+ BEACON_RSSI(ah), aniState->rssiThrLow,
aniState->rssiThrHigh);
- if ((ah->opmode == NL80211_IFTYPE_STATION ||
- ah->opmode == NL80211_IFTYPE_ADHOC) &&
- aniState->noiseFloor <= aniState->rssiThrLow &&
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ BEACON_RSSI(ah) <= aniState->rssiThrLow &&
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
- if (aniState->update_ani)
- aniState->cckNoiseImmunityLevel =
- (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
- immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
+ if (!scan)
+ aniState->cckNoiseImmunityLevel = immunityLevel;
entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -359,7 +240,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
return;
- if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
+ if (aniState->mrcCCK != entry_cck->mrc_cck_on)
ath9k_hw_ani_control(ah,
ATH9K_ANI_MRC_CCK,
entry_cck->mrc_cck_on);
@@ -372,68 +253,11 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_cck_err_trigger_old(ah);
- return;
- }
-
aniState = &ah->curchan->ani;
if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL)
- ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1);
-}
-
-static void ath9k_hw_ani_lower_immunity_old(struct ath_hw *ah)
-{
- struct ar5416AniState *aniState;
- int32_t rssi;
-
- aniState = &ah->curchan->ani;
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- } else {
- rssi = BEACON_RSSI(ah);
- if (rssi > aniState->rssiThrHigh) {
- /* XXX: Handle me */
- } else if (rssi > aniState->rssiThrLow) {
- if (aniState->ofdmWeakSigDetectOff) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- true))
- return;
- }
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- } else {
- if (aniState->firstepLevel > 0) {
- if (ath9k_hw_ani_control(ah,
- ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel - 1))
- return;
- }
- }
- }
-
- if (aniState->spurImmunityLevel > 0) {
- if (ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel - 1))
- return;
- }
-
- if (aniState->noiseImmunityLevel > 0) {
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel - 1);
- return;
- }
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1,
+ false);
}
/*
@@ -446,87 +270,18 @@ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah)
aniState = &ah->curchan->ani;
- if (!use_new_ani(ah)) {
- ath9k_hw_ani_lower_immunity_old(ah);
- return;
- }
-
/* lower OFDM noise immunity */
if (aniState->ofdmNoiseImmunityLevel > 0 &&
(aniState->ofdmsTurn || aniState->cckNoiseImmunityLevel == 0)) {
- ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1);
+ ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel - 1,
+ false);
return;
}
/* lower CCK noise immunity */
if (aniState->cckNoiseImmunityLevel > 0)
- ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1);
-}
-
-static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
-{
- struct ar5416AniState *aniState;
- struct ath9k_channel *chan = ah->curchan;
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (!DO_ANI(ah))
- return;
-
- aniState = &ah->curchan->ani;
-
- if (ah->opmode != NL80211_IFTYPE_STATION
- && ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
- ah->stats.ast_ani_reset++;
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- /*
- * ath9k_hw_ani_control() will only process items set on
- * ah->ani_function
- */
- if (IS_CHAN_2GHZ(chan))
- ah->ani_function = (ATH9K_ANI_SPUR_IMMUNITY_LEVEL |
- ATH9K_ANI_FIRSTEP_LEVEL);
- else
- ah->ani_function = 0;
- }
-
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL, 0);
- ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- !ATH9K_ANI_USE_OFDM_WEAK_SIG);
- ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
- ATH9K_ANI_CCK_WEAK_SIG_THR);
-
- ath9k_ani_restart(ah);
- return;
- }
-
- if (aniState->noiseImmunityLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_NOISE_IMMUNITY_LEVEL,
- aniState->noiseImmunityLevel);
- if (aniState->spurImmunityLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_SPUR_IMMUNITY_LEVEL,
- aniState->spurImmunityLevel);
- if (aniState->ofdmWeakSigDetectOff)
- ath9k_hw_ani_control(ah, ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
- !aniState->ofdmWeakSigDetectOff);
- if (aniState->cckWeakSigThreshold)
- ath9k_hw_ani_control(ah, ATH9K_ANI_CCK_WEAK_SIGNAL_THR,
- aniState->cckWeakSigThreshold);
- if (aniState->firstepLevel != 0)
- ath9k_hw_ani_control(ah, ATH9K_ANI_FIRSTEP_LEVEL,
- aniState->firstepLevel);
-
- ath9k_ani_restart(ah);
-
- ENABLE_REGWRITE_BUFFER(ah);
-
- REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
- REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
-
- REGWRITE_BUFFER_FLUSH(ah);
+ ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel - 1,
+ false);
}
/*
@@ -539,13 +294,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
struct ar5416AniState *aniState = &ah->curchan->ani;
struct ath9k_channel *chan = ah->curchan;
struct ath_common *common = ath9k_hw_common(ah);
+ int ofdm_nil, cck_nil;
if (!DO_ANI(ah))
return;
- if (!use_new_ani(ah))
- return ath9k_ani_reset_old(ah, is_scanning);
-
BUG_ON(aniState == NULL);
ah->stats.ast_ani_reset++;
@@ -563,6 +316,11 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/* always allow mode (on/off) to be controlled */
ah->ani_function |= ATH9K_ANI_MODE;
+ ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
+ aniState->ofdmNoiseImmunityLevel);
+ cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
+ aniState->cckNoiseImmunityLevel);
+
if (is_scanning ||
(ah->opmode != NL80211_IFTYPE_STATION &&
ah->opmode != NL80211_IFTYPE_ADHOC)) {
@@ -585,9 +343,8 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
- aniState->update_ani = false;
- ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
- ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
+ ofdm_nil = ATH9K_ANI_OFDM_DEF_LEVEL;
+ cck_nil = ATH9K_ANI_CCK_DEF_LEVEL;
}
} else {
/*
@@ -601,13 +358,9 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
is_scanning,
aniState->ofdmNoiseImmunityLevel,
aniState->cckNoiseImmunityLevel);
-
- aniState->update_ani = true;
- ath9k_hw_set_ofdm_nil(ah,
- aniState->ofdmNoiseImmunityLevel);
- ath9k_hw_set_cck_nil(ah,
- aniState->cckNoiseImmunityLevel);
}
+ ath9k_hw_set_ofdm_nil(ah, ofdm_nil, is_scanning);
+ ath9k_hw_set_cck_nil(ah, cck_nil, is_scanning);
/*
* enable phy counters if hw supports or if not, enable phy
@@ -627,9 +380,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar5416AniState *aniState = &ah->curchan->ani;
- u32 ofdm_base = 0;
- u32 cck_base = 0;
- u32 ofdmPhyErrCnt, cckPhyErrCnt;
u32 phyCnt1, phyCnt2;
int32_t listenTime;
@@ -642,11 +392,6 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
return false;
}
- if (!use_new_ani(ah)) {
- ofdm_base = AR_PHY_COUNTMAX - ah->config.ofdm_trig_high;
- cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
- }
-
aniState->listenTime += listenTime;
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -654,35 +399,12 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
- if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
- if (phyCnt1 < ofdm_base) {
- ath_dbg(common, ANI,
- "phyCnt1 0x%x, resetting counter value to 0x%x\n",
- phyCnt1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
- REG_WRITE(ah, AR_PHY_ERR_MASK_1,
- AR_PHY_ERR_OFDM_TIMING);
- }
- if (phyCnt2 < cck_base) {
- ath_dbg(common, ANI,
- "phyCnt2 0x%x, resetting counter value to 0x%x\n",
- phyCnt2, cck_base);
- REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
- REG_WRITE(ah, AR_PHY_ERR_MASK_2,
- AR_PHY_ERR_CCK_TIMING);
- }
- return false;
- }
+ ah->stats.ast_ani_ofdmerrs += phyCnt1 - aniState->ofdmPhyErrCount;
+ aniState->ofdmPhyErrCount = phyCnt1;
- ofdmPhyErrCnt = phyCnt1 - ofdm_base;
- ah->stats.ast_ani_ofdmerrs +=
- ofdmPhyErrCnt - aniState->ofdmPhyErrCount;
- aniState->ofdmPhyErrCount = ofdmPhyErrCnt;
+ ah->stats.ast_ani_cckerrs += phyCnt2 - aniState->cckPhyErrCount;
+ aniState->cckPhyErrCount = phyCnt2;
- cckPhyErrCnt = phyCnt2 - cck_base;
- ah->stats.ast_ani_cckerrs +=
- cckPhyErrCnt - aniState->cckPhyErrCount;
- aniState->cckPhyErrCount = cckPhyErrCnt;
return true;
}
@@ -716,21 +438,10 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
if (aniState->listenTime > ah->aniperiod) {
if (cckPhyErrRate < ah->config.cck_trig_low &&
- ((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
- aniState->ofdmNoiseImmunityLevel <
- ATH9K_ANI_OFDM_DEF_LEVEL) ||
- (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
- aniState->ofdmNoiseImmunityLevel >=
- ATH9K_ANI_OFDM_DEF_LEVEL))) {
+ ofdmPhyErrRate < ah->config.ofdm_trig_low) {
ath9k_hw_ani_lower_immunity(ah);
aniState->ofdmsTurn = !aniState->ofdmsTurn;
- } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
- aniState->ofdmNoiseImmunityLevel >=
- ATH9K_ANI_OFDM_DEF_LEVEL) ||
- (ofdmPhyErrRate >
- ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
- aniState->ofdmNoiseImmunityLevel <
- ATH9K_ANI_OFDM_DEF_LEVEL)) {
+ } else if (ofdmPhyErrRate > ah->config.ofdm_trig_high) {
ath9k_hw_ani_ofdm_err_trigger(ah);
aniState->ofdmsTurn = false;
} else if (cckPhyErrRate > ah->config.cck_trig_high) {
@@ -778,49 +489,6 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_disable_mib_counters);
-/*
- * Process a MIB interrupt. We may potentially be invoked because
- * any of the MIB counters overflow/trigger so don't assume we're
- * here because a PHY error counter triggered.
- */
-void ath9k_hw_proc_mib_event(struct ath_hw *ah)
-{
- u32 phyCnt1, phyCnt2;
-
- /* Reset these counters regardless */
- REG_WRITE(ah, AR_FILT_OFDM, 0);
- REG_WRITE(ah, AR_FILT_CCK, 0);
- if (!(REG_READ(ah, AR_SLP_MIB_CTRL) & AR_SLP_MIB_PENDING))
- REG_WRITE(ah, AR_SLP_MIB_CTRL, AR_SLP_MIB_CLEAR);
-
- /* Clear the mib counters and save them in the stats */
- ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
-
- if (!DO_ANI(ah)) {
- /*
- * We must always clear the interrupt cause by
- * resetting the phy error regs.
- */
- REG_WRITE(ah, AR_PHY_ERR_1, 0);
- REG_WRITE(ah, AR_PHY_ERR_2, 0);
- return;
- }
-
- /* NB: these are not reset-on-read */
- phyCnt1 = REG_READ(ah, AR_PHY_ERR_1);
- phyCnt2 = REG_READ(ah, AR_PHY_ERR_2);
- if (((phyCnt1 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK) ||
- ((phyCnt2 & AR_MIBCNT_INTRMASK) == AR_MIBCNT_INTRMASK)) {
-
- if (!use_new_ani(ah))
- ath9k_hw_ani_read_counters(ah);
-
- /* NB: always restart to insure the h/w counters are reset */
- ath9k_ani_restart(ah);
- }
-}
-EXPORT_SYMBOL(ath9k_hw_proc_mib_event);
-
void ath9k_hw_ani_setup(struct ath_hw *ah)
{
int i;
@@ -845,66 +513,37 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
ath_dbg(common, ANI, "Initialize ANI\n");
- if (use_new_ani(ah)) {
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_NEW;
+ ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH;
+ ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW;
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_NEW;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_NEW;
- } else {
- ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_OLD;
- ah->config.ofdm_trig_low = ATH9K_ANI_OFDM_TRIG_LOW_OLD;
-
- ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH_OLD;
- ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW_OLD;
- }
+ ah->config.cck_trig_high = ATH9K_ANI_CCK_TRIG_HIGH;
+ ah->config.cck_trig_low = ATH9K_ANI_CCK_TRIG_LOW;
for (i = 0; i < ARRAY_SIZE(ah->channels); i++) {
struct ath9k_channel *chan = &ah->channels[i];
struct ar5416AniState *ani = &chan->ani;
- if (use_new_ani(ah)) {
- ani->spurImmunityLevel =
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
+ ani->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
- ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
+ ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
- if (AR_SREV_9300_20_OR_LATER(ah))
- ani->mrcCCKOff =
- !ATH9K_ANI_ENABLE_MRC_CCK;
- else
- ani->mrcCCKOff = true;
-
- ani->ofdmsTurn = true;
- } else {
- ani->spurImmunityLevel =
- ATH9K_ANI_SPUR_IMMUNE_LVL_OLD;
- ani->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_OLD;
-
- ani->cckWeakSigThreshold =
- ATH9K_ANI_CCK_WEAK_SIG_THR;
- }
+ ani->mrcCCK = AR_SREV_9300_20_OR_LATER(ah) ? true : false;
+
+ ani->ofdmsTurn = true;
ani->rssiThrHigh = ATH9K_ANI_RSSI_THR_HIGH;
ani->rssiThrLow = ATH9K_ANI_RSSI_THR_LOW;
- ani->ofdmWeakSigDetectOff =
- !ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ ani->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
ani->cckNoiseImmunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
ani->ofdmNoiseImmunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
- ani->update_ani = false;
}
/*
* since we expect some ongoing maintenance on the tables, let's sanity
* check here default level should not modify INI setting.
*/
- if (use_new_ani(ah)) {
- ah->aniperiod = ATH9K_ANI_PERIOD_NEW;
- ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_NEW;
- } else {
- ah->aniperiod = ATH9K_ANI_PERIOD_OLD;
- ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL_OLD;
- }
+ ah->aniperiod = ATH9K_ANI_PERIOD;
+ ah->config.ani_poll_interval = ATH9K_ANI_POLLINTERVAL;
if (ah->config.enable_ani)
ah->proc_phyerr |= HAL_PROCESS_ANI;
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 72e2b874e179..1485bf5e3518 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -24,42 +24,34 @@
#define BEACON_RSSI(ahp) (ahp->stats.avgbrssi)
/* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
-#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
+#define ATH9K_ANI_OFDM_TRIG_HIGH 3500
#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
/* units are errors per second */
-#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
-#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
+#define ATH9K_ANI_OFDM_TRIG_LOW 400
#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
/* units are errors per second */
-#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
-#define ATH9K_ANI_CCK_TRIG_HIGH_NEW 600
+#define ATH9K_ANI_CCK_TRIG_HIGH 600
/* units are errors per second */
-#define ATH9K_ANI_CCK_TRIG_LOW_OLD 100
-#define ATH9K_ANI_CCK_TRIG_LOW_NEW 300
+#define ATH9K_ANI_CCK_TRIG_LOW 300
#define ATH9K_ANI_NOISE_IMMUNE_LVL 4
#define ATH9K_ANI_USE_OFDM_WEAK_SIG true
#define ATH9K_ANI_CCK_WEAK_SIG_THR false
-#define ATH9K_ANI_SPUR_IMMUNE_LVL_OLD 7
-#define ATH9K_ANI_SPUR_IMMUNE_LVL_NEW 3
+#define ATH9K_ANI_SPUR_IMMUNE_LVL 3
-#define ATH9K_ANI_FIRSTEP_LVL_OLD 0
-#define ATH9K_ANI_FIRSTEP_LVL_NEW 2
+#define ATH9K_ANI_FIRSTEP_LVL 2
#define ATH9K_ANI_RSSI_THR_HIGH 40
#define ATH9K_ANI_RSSI_THR_LOW 7
-#define ATH9K_ANI_PERIOD_OLD 100
-#define ATH9K_ANI_PERIOD_NEW 300
+#define ATH9K_ANI_PERIOD 300
/* in ms */
-#define ATH9K_ANI_POLLINTERVAL_OLD 100
-#define ATH9K_ANI_POLLINTERVAL_NEW 1000
+#define ATH9K_ANI_POLLINTERVAL 1000
#define HAL_NOISE_IMMUNE_MAX 4
#define HAL_SPUR_IMMUNE_MAX 7
@@ -70,8 +62,6 @@
#define ATH9K_SIG_SPUR_IMM_SETTING_MIN 0
#define ATH9K_SIG_SPUR_IMM_SETTING_MAX 22
-#define ATH9K_ANI_ENABLE_MRC_CCK true
-
/* values here are relative to the INI */
enum ath9k_ani_cmd {
@@ -119,16 +109,14 @@ struct ar5416AniState {
u8 ofdmNoiseImmunityLevel;
u8 cckNoiseImmunityLevel;
bool ofdmsTurn;
- u8 mrcCCKOff;
+ u8 mrcCCK;
u8 spurImmunityLevel;
u8 firstepLevel;
- u8 ofdmWeakSigDetectOff;
+ u8 ofdmWeakSigDetect;
u8 cckWeakSigThreshold;
- bool update_ani;
u32 listenTime;
int32_t rssiThrLow;
int32_t rssiThrHigh;
- u32 noiseFloor;
u32 ofdmPhyErrCount;
u32 cckPhyErrCount;
int16_t pktRssi[2];
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
new file mode 100644
index 000000000000..bbcfeb3b2a60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
+ int mindelta, int main_rssi_avg,
+ int alt_rssi_avg, int pkt_count)
+{
+ return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
+ (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
+}
+
+static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
+ int curr_main_set, int curr_alt_set,
+ int alt_rssi_avg, int main_rssi_avg)
+{
+ bool result = false;
+ switch (div_group) {
+ case 0:
+ if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
+ result = true;
+ break;
+ case 1:
+ case 2:
+ if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
+ (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
+ (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+ ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
+ (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
+ (alt_rssi_avg >= 4))
+ result = true;
+ else
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf ant_conf,
+ int main_rssi_avg)
+{
+ antcomb->quick_scan_cnt = 0;
+
+ if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+
+ switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
+ case 0x10: /* LNA2 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x20: /* LNA1 A-B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
+ break;
+ case 0x23: /* LNA1 A+B */
+ antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ antcomb->first_quick_scan_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg,
+ int alt_ratio)
+{
+ /* alt_good */
+ switch (antcomb->quick_scan_cnt) {
+ case 0:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ break;
+ case 1:
+ /* set alt to main, and alt to first conf */
+ div_ant_conf->main_lna_conf = antcomb->main_conf;
+ div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_second = alt_rssi_avg;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ /* main is LNA1 */
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ } else {
+ if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg +
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
+ (alt_rssi_avg > main_rssi_avg)) &&
+ (antcomb->total_pkt_count > 50))
+ antcomb->first_ratio = true;
+ else
+ antcomb->first_ratio = false;
+ }
+ break;
+ case 2:
+ antcomb->alt_good = false;
+ antcomb->scan_not_start = false;
+ antcomb->scan = false;
+ antcomb->rssi_first = main_rssi_avg;
+ antcomb->rssi_third = alt_rssi_avg;
+
+ if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ else if (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ else if (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
+ antcomb->rssi_lna2 = main_rssi_avg;
+ else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
+ antcomb->rssi_lna1 = main_rssi_avg;
+ }
+
+ if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
+ ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+
+ if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
+ if (ath_is_alt_ant_ratio_better(alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ } else {
+ if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
+ (alt_rssi_avg > main_rssi_avg +
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
+ (alt_rssi_avg > main_rssi_avg)) &&
+ (antcomb->total_pkt_count > 50))
+ antcomb->second_ratio = true;
+ else
+ antcomb->second_ratio = false;
+ }
+
+ /* set alt to the conf with maximun ratio */
+ if (antcomb->first_ratio && antcomb->second_ratio) {
+ if (antcomb->rssi_second > antcomb->rssi_third) {
+ /* first alt*/
+ if ((antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2*/
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if ((antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ } else {
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->second_quick_scan_conf;
+ }
+ } else if (antcomb->first_ratio) {
+ /* first alt */
+ if ((antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if (antcomb->second_ratio) {
+ /* second alt */
+ if ((antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf ==
+ ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf =
+ antcomb->second_quick_scan_conf;
+ } else {
+ /* main is largest */
+ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (div_ant_conf->main_lna_conf ==
+ ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else
+ div_ant_conf->alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ div_ant_conf->alt_lna_conf = antcomb->main_conf;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio)
+{
+ if (ant_conf->div_group == 0) {
+ /* Adjust the fast_div_bias based on main and alt lna conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x10: /* LNA2 A-B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x2;
+ break;
+ case 0x13: /* LNA2 A+B */
+ ant_conf->fast_div_bias = 0x7;
+ break;
+ case 0x20: /* LNA1 A-B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ ant_conf->fast_div_bias = 0x6;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x3b;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x3d;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 1) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x3f;
+ else
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
+ } else if (ant_conf->div_group == 2) {
+ /* Adjust the fast_div_bias based on main and alt_lna_conf */
+ switch ((ant_conf->main_lna_conf << 4) |
+ ant_conf->alt_lna_conf) {
+ case 0x01: /* A-B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x02: /* A-B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x03: /* A-B A+B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x10: /* LNA2 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x12: /* LNA2 LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x13: /* LNA2 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x20: /* LNA1 A-B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x21: /* LNA1 LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x23: /* LNA1 A+B */
+ if (!(antcomb->scan) &&
+ (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ ant_conf->fast_div_bias = 0x1;
+ else
+ ant_conf->fast_div_bias = 0x2;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x30: /* A+B A-B */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x31: /* A+B LNA2 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ case 0x32: /* A+B LNA1 */
+ ant_conf->fast_div_bias = 0x1;
+ ant_conf->main_gaintb = 0;
+ ant_conf->alt_gaintb = 0;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
+{
+ struct ath_hw_antcomb_conf div_ant_conf;
+ struct ath_ant_comb *antcomb = &sc->ant_comb;
+ int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
+ int curr_main_set;
+ int main_rssi = rs->rs_rssi_ctl0;
+ int alt_rssi = rs->rs_rssi_ctl1;
+ int rx_ant_conf, main_ant_conf;
+ bool short_scan = false;
+
+ rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
+ ATH_ANT_RX_MASK;
+ main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
+ ATH_ANT_RX_MASK;
+
+ /* Record packet only when both main_rssi and alt_rssi is positive */
+ if (main_rssi > 0 && alt_rssi > 0) {
+ antcomb->total_pkt_count++;
+ antcomb->main_total_rssi += main_rssi;
+ antcomb->alt_total_rssi += alt_rssi;
+ if (main_ant_conf == rx_ant_conf)
+ antcomb->main_recv_cnt++;
+ else
+ antcomb->alt_recv_cnt++;
+ }
+
+ /* Short scan check */
+ if (antcomb->scan && antcomb->alt_good) {
+ if (time_after(jiffies, antcomb->scan_start_time +
+ msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+ short_scan = true;
+ else
+ if (antcomb->total_pkt_count ==
+ ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
+ short_scan = true;
+ }
+ }
+
+ if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
+ rs->rs_moreaggr) && !short_scan)
+ return;
+
+ if (antcomb->total_pkt_count) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ main_rssi_avg = (antcomb->main_total_rssi /
+ antcomb->total_pkt_count);
+ alt_rssi_avg = (antcomb->alt_total_rssi /
+ antcomb->total_pkt_count);
+ }
+
+
+ ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
+ curr_alt_set = div_ant_conf.alt_lna_conf;
+ curr_main_set = div_ant_conf.main_lna_conf;
+
+ antcomb->count++;
+
+ if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
+ if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
+ ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
+ main_rssi_avg);
+ antcomb->alt_good = true;
+ } else {
+ antcomb->alt_good = false;
+ }
+
+ antcomb->count = 0;
+ antcomb->scan = true;
+ antcomb->scan_not_start = true;
+ }
+
+ if (!antcomb->scan) {
+ if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
+ alt_ratio, curr_main_set, curr_alt_set,
+ alt_rssi_avg, main_rssi_avg)) {
+ if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+ /* Switch main and alt LNA */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+
+ goto div_comb_done;
+ } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt to another LNA */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+
+ goto div_comb_done;
+ }
+
+ if ((alt_rssi_avg < (main_rssi_avg +
+ div_ant_conf.lna1_lna2_delta)))
+ goto div_comb_done;
+ }
+
+ if (!antcomb->scan_not_start) {
+ switch (curr_alt_set) {
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ antcomb->rssi_lna1 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ antcomb->rssi_lna2 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ antcomb->rssi_add = alt_rssi_avg;
+ antcomb->scan = true;
+ /* set to A-B */
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+ antcomb->rssi_sub = alt_rssi_avg;
+ antcomb->scan = false;
+ if (antcomb->rssi_lna2 >
+ (antcomb->rssi_lna1 +
+ ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ /* use LNA2 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA1 */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ }
+ } else {
+ /* use LNA1 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA2 */
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (!antcomb->alt_good) {
+ antcomb->scan_not_start = false;
+ /* Set alt to another LNA */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf.main_lna_conf =
+ ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf.alt_lna_conf =
+ ATH_ANT_DIV_COMB_LNA2;
+ }
+ goto div_comb_done;
+ }
+ }
+
+ ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+ main_rssi_avg, alt_rssi_avg,
+ alt_ratio);
+
+ antcomb->quick_scan_cnt++;
+
+div_comb_done:
+ ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
+ ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+
+ antcomb->scan_start_time = jiffies;
+ antcomb->total_pkt_count = 0;
+ antcomb->main_total_rssi = 0;
+ antcomb->alt_total_rssi = 0;
+ antcomb->main_recv_cnt = 0;
+ antcomb->alt_recv_cnt = 0;
+}
+
+void ath_ant_comb_update(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_hw_antcomb_conf div_ant_conf;
+ u8 lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+
+ if (sc->ant_rx == 1)
+ lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+ div_ant_conf.main_lna_conf = lna_conf;
+ div_ant_conf.alt_lna_conf = lna_conf;
+
+ ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index c7492c6a2519..874186bfda41 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -995,141 +995,6 @@ static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
return pll;
}
-static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
- enum ath9k_ani_cmd cmd,
- int param)
-{
- struct ar5416AniState *aniState = &ah->curchan->ani;
- struct ath_common *common = ath9k_hw_common(ah);
-
- switch (cmd & ah->ani_function) {
- case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
- u32 level = param;
-
- if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(ah->totalSizeDesired));
- return false;
- }
-
- REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
- AR_PHY_DESIRED_SZ_TOT_DES,
- ah->totalSizeDesired[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_LOW,
- ah->coarse_low[level]);
- REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
- AR_PHY_AGC_CTL1_COARSE_HIGH,
- ah->coarse_high[level]);
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRPWR,
- ah->firpwr[level]);
-
- if (level > aniState->noiseImmunityLevel)
- ah->stats.ast_ani_niup++;
- else if (level < aniState->noiseImmunityLevel)
- ah->stats.ast_ani_nidown++;
- aniState->noiseImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
- u32 on = param ? 1 : 0;
-
- if (on)
- REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- else
- REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
- AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
-
- if (!on != aniState->ofdmWeakSigDetectOff) {
- if (on)
- ah->stats.ast_ani_ofdmon++;
- else
- ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
- }
- break;
- }
- case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
- static const int weakSigThrCck[] = { 8, 6 };
- u32 high = param ? 1 : 0;
-
- REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
- AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
- weakSigThrCck[high]);
- if (high != aniState->cckWeakSigThreshold) {
- if (high)
- ah->stats.ast_ani_cckhigh++;
- else
- ah->stats.ast_ani_ccklow++;
- aniState->cckWeakSigThreshold = high;
- }
- break;
- }
- case ATH9K_ANI_FIRSTEP_LEVEL:{
- static const int firstep[] = { 0, 4, 8 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(firstep)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(firstep));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
- AR_PHY_FIND_SIG_FIRSTEP,
- firstep[level]);
- if (level > aniState->firstepLevel)
- ah->stats.ast_ani_stepup++;
- else if (level < aniState->firstepLevel)
- ah->stats.ast_ani_stepdown++;
- aniState->firstepLevel = level;
- break;
- }
- case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
- static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
- u32 level = param;
-
- if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
- level, ARRAY_SIZE(cycpwrThr1));
- return false;
- }
- REG_RMW_FIELD(ah, AR_PHY_TIMING5,
- AR_PHY_TIMING5_CYCPWR_THR1,
- cycpwrThr1[level]);
- if (level > aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurup++;
- else if (level < aniState->spurImmunityLevel)
- ah->stats.ast_ani_spurdown++;
- aniState->spurImmunityLevel = level;
- break;
- }
- case ATH9K_ANI_PRESENT:
- break;
- default:
- ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
- return false;
- }
-
- ath_dbg(common, ANI, "ANI parameters:\n");
- ath_dbg(common, ANI,
- "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
- aniState->noiseImmunityLevel,
- aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff);
- ath_dbg(common, ANI,
- "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
- aniState->cckWeakSigThreshold,
- aniState->firstepLevel,
- aniState->listenTime);
- ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
- aniState->ofdmPhyErrCount,
- aniState->cckPhyErrCount);
-
- return true;
-}
-
static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
enum ath9k_ani_cmd cmd,
int param)
@@ -1206,18 +1071,18 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on != aniState->ofdmWeakSigDetect) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
+ aniState->ofdmWeakSigDetect ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
+ aniState->ofdmWeakSigDetect = on;
}
break;
}
@@ -1236,7 +1101,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1251,7 +1116,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -1267,7 +1132,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
@@ -1275,7 +1140,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
@@ -1300,7 +1165,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1316,7 +1181,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -1331,7 +1196,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
@@ -1339,7 +1204,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
@@ -1367,9 +1232,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->ofdmWeakSigDetect ? "on" : "off",
aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1454,10 +1319,10 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
AR_PHY_EXT_TIMING5_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
- aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
- aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
- aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
- aniState->mrcCCKOff = true; /* not available on pre AR9003 */
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCK = false; /* not available on pre AR9003 */
}
static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
@@ -1545,11 +1410,8 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
priv_ops->do_getnf = ar5008_hw_do_getnf;
priv_ops->set_radar_params = ar5008_hw_set_radar_params;
- if (modparam_force_new_ani) {
- priv_ops->ani_control = ar5008_hw_ani_control_new;
- priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
- } else
- priv_ops->ani_control = ar5008_hw_ani_control_old;
+ priv_ops->ani_control = ar5008_hw_ani_control_new;
+ priv_ops->ani_cache_ini_regs = ar5008_hw_ani_cache_ini_regs;
if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index d9a69fc470cd..648da3e885e9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -21,110 +21,79 @@
#include "ar9002_initvals.h"
#include "ar9002_phy.h"
-int modparam_force_new_ani;
-module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
-MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
-
/* General hardware code for the A5008/AR9001/AR9002 hadware families */
static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
{
if (AR_SREV_9271(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
- ARRAY_SIZE(ar9271Modes_9271), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
- ARRAY_SIZE(ar9271Common_9271), 2);
- INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
- ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 5);
+ INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271);
+ INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg);
return;
}
if (ah->config.pcie_clock_req)
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_off_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
+ ar9280PciePhy_clkreq_off_L1_9280);
else
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9280PciePhy_clkreq_always_on_L1_9280,
- ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
+ ar9280PciePhy_clkreq_always_on_L1_9280);
+#ifdef CONFIG_PM_SLEEP
+ INIT_INI_ARRAY(&ah->iniPcieSerdesWow,
+ ar9280PciePhy_awow);
+#endif
if (AR_SREV_9287_11_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
- ARRAY_SIZE(ar9287Modes_9287_1_1), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
- ARRAY_SIZE(ar9287Common_9287_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
- ARRAY_SIZE(ar9285Modes_9285_1_2), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
- ARRAY_SIZE(ar9285Common_9285_1_2), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2);
} else if (AR_SREV_9280_20_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
- ARRAY_SIZE(ar9280Modes_9280_2), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
- ARRAY_SIZE(ar9280Common_9280_2), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2);
+ INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9280Modes_fast_clock_9280_2,
- ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
+ ar9280Modes_fast_clock_9280_2);
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
- ARRAY_SIZE(ar5416Modes_9160), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
- ARRAY_SIZE(ar5416Common_9160), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160);
if (AR_SREV_9160_11(ah)) {
INIT_INI_ARRAY(&ah->iniAddac,
- ar5416Addac_9160_1_1,
- ARRAY_SIZE(ar5416Addac_9160_1_1), 2);
+ ar5416Addac_9160_1_1);
} else {
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
- ARRAY_SIZE(ar5416Addac_9160), 2);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160);
}
} else if (AR_SREV_9100_OR_LATER(ah)) {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
- ARRAY_SIZE(ar5416Modes_9100), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
- ARRAY_SIZE(ar5416Common_9100), 2);
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
- ARRAY_SIZE(ar5416Bank6_9100), 3);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
- ARRAY_SIZE(ar5416Addac_9100), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100);
} else {
- INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
- ARRAY_SIZE(ar5416Modes), 5);
- INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
- ARRAY_SIZE(ar5416Common), 2);
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
- ARRAY_SIZE(ar5416Bank6TPC), 3);
- INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
- ARRAY_SIZE(ar5416Addac), 2);
+ INIT_INI_ARRAY(&ah->iniModes, ar5416Modes);
+ INIT_INI_ARRAY(&ah->iniCommon, ar5416Common);
+ INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC);
+ INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac);
}
if (!AR_SREV_9280_20_OR_LATER(ah)) {
/* Common for AR5416, AR913x, AR9160 */
- INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
- ARRAY_SIZE(ar5416BB_RfGain), 3);
-
- INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
- ARRAY_SIZE(ar5416Bank0), 2);
- INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
- ARRAY_SIZE(ar5416Bank1), 2);
- INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
- ARRAY_SIZE(ar5416Bank2), 2);
- INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
- ARRAY_SIZE(ar5416Bank3), 3);
- INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
- ARRAY_SIZE(ar5416Bank7), 2);
+ INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain);
+
+ INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0);
+ INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1);
+ INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2);
+ INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3);
+ INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7);
/* Common for AR5416, AR9160 */
if (!AR_SREV_9100(ah))
- INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
- ARRAY_SIZE(ar5416Bank6), 3);
+ INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6);
/* Common for AR913x, AR9160 */
if (!AR_SREV_5416(ah))
- INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
- ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
+ INIT_INI_ARRAY(&ah->iniBank6TPC,
+ ar5416Bank6TPC_9100);
}
/* iniAddac needs to be modified for these chips */
@@ -147,13 +116,9 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
}
if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniCckfirNormal,
- ar9287Common_normal_cck_fir_coeff_9287_1_1,
- ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_9287_1_1),
- 2);
+ ar9287Common_normal_cck_fir_coeff_9287_1_1);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- ar9287Common_japan_2484_cck_fir_coeff_9287_1_1,
- ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_9287_1_1),
- 2);
+ ar9287Common_japan_2484_cck_fir_coeff_9287_1_1);
}
}
@@ -167,20 +132,16 @@ static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_13db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 5);
+ ar9280Modes_backoff_13db_rxgain_9280_2);
else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_backoff_23db_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 5);
+ ar9280Modes_backoff_23db_rxgain_9280_2);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
+ ar9280Modes_original_rxgain_9280_2);
} else {
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9280Modes_original_rxgain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 5);
+ ar9280Modes_original_rxgain_9280_2);
}
}
@@ -190,16 +151,13 @@ static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
AR5416_EEP_MINOR_VER_19) {
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_high_power_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 5);
+ ar9280Modes_high_power_tx_gain_9280_2);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
+ ar9280Modes_original_tx_gain_9280_2);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9280Modes_original_tx_gain_9280_2,
- ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 5);
+ ar9280Modes_original_tx_gain_9280_2);
}
}
@@ -207,12 +165,10 @@ static void ar9271_hw_init_txgain_ini(struct ath_hw *ah, u32 txgain_type)
{
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9271Modes_high_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 5);
+ ar9271Modes_high_power_tx_gain_9271);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9271Modes_normal_power_tx_gain_9271,
- ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 5);
+ ar9271Modes_normal_power_tx_gain_9271);
}
static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
@@ -221,8 +177,7 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
if (AR_SREV_9287_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9287Modes_rx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 5);
+ ar9287Modes_rx_gain_9287_1_1);
else if (AR_SREV_9280_20(ah))
ar9280_20_hw_init_rxgain_ini(ah);
@@ -230,8 +185,7 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
ar9271_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9287_11_OR_LATER(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9287Modes_tx_gain_9287_1_1,
- ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 5);
+ ar9287Modes_tx_gain_9287_1_1);
} else if (AR_SREV_9280_20(ah)) {
ar9280_20_hw_init_txgain_ini(ah, txgain_type);
} else if (AR_SREV_9285_12_OR_LATER(ah)) {
@@ -239,26 +193,18 @@ static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_XE2_0_high_power,
- ARRAY_SIZE(
- ar9285Modes_XE2_0_high_power), 5);
+ ar9285Modes_XE2_0_high_power);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_high_power_tx_gain_9285_1_2,
- ARRAY_SIZE(
- ar9285Modes_high_power_tx_gain_9285_1_2), 5);
+ ar9285Modes_high_power_tx_gain_9285_1_2);
}
} else {
if (AR_SREV_9285E_20(ah)) {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_XE2_0_normal_power,
- ARRAY_SIZE(
- ar9285Modes_XE2_0_normal_power), 5);
+ ar9285Modes_XE2_0_normal_power);
} else {
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9285Modes_original_tx_gain_9285_1_2,
- ARRAY_SIZE(
- ar9285Modes_original_tx_gain_9285_1_2), 5);
+ ar9285Modes_original_tx_gain_9285_1_2);
}
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 4d18c66a6790..beb6162cf97c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -925,6 +925,20 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
{0x00004044, 0x00000000},
};
+static const u32 ar9280PciePhy_awow[][2] = {
+ /* Addr allmodes */
+ {0x00004040, 0x9248fd00},
+ {0x00004040, 0x24924924},
+ {0x00004040, 0xa8000019},
+ {0x00004040, 0x13160820},
+ {0x00004040, 0xe5980560},
+ {0x00004040, 0xc01dcffd},
+ {0x00004040, 0x1aaabe41},
+ {0x00004040, 0xbe105554},
+ {0x00004040, 0x00043007},
+ {0x00004044, 0x00000000},
+};
+
static const u32 ar9285Modes_9285_1_2[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 952cb2b4656b..89bf94d4d8a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9fdd70fcaf5b..84b558d126ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -159,14 +159,11 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
}
}
- /* Do NF cal only at longer intervals */
- if (longcal) {
- /*
- * Get the value from the previous NF cal and update
- * history buffer.
- */
- ath9k_hw_getnf(ah, chan);
-
+ /*
+ * Do NF cal only at longer intervals. Get the value from
+ * the previous NF cal and update history buffer.
+ */
+ if (longcal && ath9k_hw_getnf(ah, chan)) {
/*
* Load the NF from history buffer of the current channel.
* NF is slow time-variant, so it is OK to use a historical
@@ -653,7 +650,6 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
}
static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- u8 num_chains,
struct coeff *coeff,
bool is_reusable)
{
@@ -677,7 +673,9 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
}
/* Load the average of 2 passes */
- for (i = 0; i < num_chains; i++) {
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
nmeasurement = REG_READ_FIELD(ah,
AR_PHY_TX_IQCAL_STATUS_B0,
AR_PHY_CALIBRATED_GAINS_0);
@@ -767,16 +765,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
};
struct coeff coeff;
s32 iq_res[6];
- u8 num_chains = 0;
int i, im, j;
int nmeasurement;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (ah->txchainmask & (1 << i))
- num_chains++;
- }
+ if (!(ah->txchainmask & (1 << i)))
+ continue;
- for (i = 0; i < num_chains; i++) {
nmeasurement = REG_READ_FIELD(ah,
AR_PHY_TX_IQCAL_STATUS_B0,
AR_PHY_CALIBRATED_GAINS_0);
@@ -839,8 +834,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
coeff.phs_coeff[i][im] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains,
- &coeff, is_reusable);
+ ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
return;
@@ -901,7 +895,6 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
- bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -970,7 +963,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
- if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_req(ah, &is_reusable);
if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
@@ -993,7 +986,7 @@ skip_tx_iqcal:
0, AH_WAIT_TIMEOUT);
}
- if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
+ if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal)
ar9003_mci_init_cal_done(ah);
if (rtt && !run_rtt_cal) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index dfb0441f406c..2588848f4a82 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -131,8 +131,9 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -331,8 +332,9 @@ static const struct ar9300_eeprom ar9300_default = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -704,8 +706,9 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -904,8 +907,9 @@ static const struct ar9300_eeprom ar9300_x113 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1278,8 +1282,9 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -1478,8 +1483,9 @@ static const struct ar9300_eeprom ar9300_h112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -1852,8 +1858,9 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80c080),
.papdRateMaskHt40 = LE32(0x0080c080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2052,8 +2059,9 @@ static const struct ar9300_eeprom ar9300_x112 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -2425,8 +2433,9 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0c80C080),
.papdRateMaskHt40 = LE32(0x0080C080),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
@@ -2625,8 +2634,9 @@ static const struct ar9300_eeprom ar9300_h116 = {
.thresh62 = 28,
.papdRateMaskHt20 = LE32(0x0cf0e0e0),
.papdRateMaskHt40 = LE32(0x6cf0e0e0),
+ .xlna_bias_strength = 0,
.futureModal = {
- 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext2 = {
@@ -2971,14 +2981,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return (pBase->txrxMask >> 4) & 0xf;
case EEP_RX_MASK:
return pBase->txrxMask & 0xf;
- case EEP_DRIVE_STRENGTH:
-#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
- return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
- case EEP_INTERNAL_REGULATOR:
- /* Bit 4 is internal regulator flag */
- return (pBase->featureEnable & 0x10) >> 4;
- case EEP_SWREG:
- return le32_to_cpu(pBase->swreg);
case EEP_PAPRD:
return !!(pBase->featureEnable & BIT(5));
case EEP_CHAIN_MASK_REDUCE:
@@ -2989,8 +2991,6 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
- case EEP_QUICK_DROP:
- return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@@ -3178,7 +3178,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
mdata_size, length);
return -1;
}
- memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
+ memcpy(mptr, word + COMP_HDR_LEN, length);
ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
@@ -3199,7 +3199,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
- (u8 *) (word + COMP_HDR_LEN), length);
+ (word + COMP_HDR_LEN), length);
break;
default:
ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
@@ -3260,10 +3260,20 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
int it;
u16 checksum, mchecksum;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ar9300_eeprom *eep;
eeprom_read_op read;
- if (ath9k_hw_use_flash(ah))
- return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+ if (ath9k_hw_use_flash(ah)) {
+ u8 txrx;
+
+ ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
+
+ /* check if eeprom contains valid data */
+ eep = (struct ar9300_eeprom *) mptr;
+ txrx = eep->baseEepHeader.txrxMask;
+ if (txrx != 0 && txrx != 0xff)
+ return 0;
+ }
word = kzalloc(2048, GFP_KERNEL);
if (!word)
@@ -3412,11 +3422,11 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ar9003_dump_modal_eeprom(buf, len, size,
+ len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader2G);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
- len += ar9003_dump_modal_eeprom(buf, len, size,
+ len = ar9003_dump_modal_eeprom(buf, len, size,
&eep->modalHeader5G);
goto out;
}
@@ -3493,23 +3503,24 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
return 0;
}
-static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
+static struct ar9300_modal_eep_header *ar9003_modal_header(struct ath_hw *ah,
+ bool is2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
if (is2ghz)
- return eep->modalHeader2G.xpaBiasLvl;
+ return &eep->modalHeader2G;
else
- return eep->modalHeader5G.xpaBiasLvl;
+ return &eep->modalHeader5G;
}
static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
{
- int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
+ int bias = ar9003_modal_header(ah, is2ghz)->xpaBiasLvl;
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
- else if (AR_SREV_9462(ah))
+ else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@@ -3521,57 +3532,26 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
}
}
-static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
+static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le16 val;
-
- if (is_2ghz)
- val = eep->modalHeader2G.switchcomspdt;
- else
- val = eep->modalHeader5G.switchcomspdt;
- return le16_to_cpu(val);
+ return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
}
static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
-
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlCommon;
- else
- val = eep->modalHeader5G.antCtrlCommon;
- return le32_to_cpu(val);
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
}
static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
-
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlCommon2;
- else
- val = eep->modalHeader5G.antCtrlCommon2;
- return le32_to_cpu(val);
+ return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
}
-static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
- int chain,
+static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le16 val = 0;
-
- if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
- if (is2ghz)
- val = eep->modalHeader2G.antCtrlChain[chain];
- else
- val = eep->modalHeader5G.antCtrlChain[chain];
- }
-
+ __le16 val = ar9003_modal_header(ah, is2ghz)->antCtrlChain[chain];
return le16_to_cpu(val);
}
@@ -3591,6 +3571,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
+ } else if (AR_SREV_9550(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
+ AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_ALL, value);
@@ -3613,6 +3596,7 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
value = ar9003_switch_com_spdt_get(ah, is2ghz);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
AR_SWITCH_TABLE_COM_SPDT_ALL, value);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_SPDT_ENABLE);
}
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
@@ -3677,11 +3661,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
int drive_strength;
unsigned long reg;
- drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
-
+ drive_strength = pBase->miscConfiguration & BIT(0);
if (!drive_strength)
return;
@@ -3811,11 +3796,11 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
{
- int internal_regulator =
- ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
u32 reg_val;
- if (internal_regulator) {
+ if (pBase->featureEnable & BIT(4)) {
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
int reg_pmu_set;
@@ -3859,11 +3844,11 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
return;
} else if (AR_SREV_9462(ah)) {
- reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_PHY_PMU1, reg_val);
} else {
/* Internal regulator is ON. Write swreg register. */
- reg_val = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
+ reg_val = le32_to_cpu(pBase->swreg);
REG_WRITE(ah, AR_RTC_REG_CONTROL1,
REG_READ(ah, AR_RTC_REG_CONTROL1) &
(~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
@@ -3905,6 +3890,9 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
+ if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ return;
+
if (eep->baseEepHeader.featureEnable & 0x40) {
tuning_caps_param &= 0x7f;
REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
@@ -3917,10 +3905,11 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+ struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
+ int quick_drop;
s32 t[3], f[3] = {5180, 5500, 5785};
- if (!quick_drop)
+ if (!(pBase->miscConfiguration & BIT(1)))
return;
if (freq < 4000)
@@ -3934,13 +3923,11 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
}
-static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u32 value;
- value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
- eep->modalHeader5G.txEndToXpaOff;
+ value = ar9003_modal_header(ah, is2ghz)->txEndToXpaOff;
REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
@@ -3948,19 +3935,63 @@ static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
}
+static void ar9003_hw_xpa_timing_control_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 xpa_ctl;
+
+ if (!(eep->baseEepHeader.featureEnable & 0x80))
+ return;
+
+ if (!AR_SREV_9300(ah) && !AR_SREV_9340(ah) && !AR_SREV_9580(ah))
+ return;
+
+ xpa_ctl = ar9003_modal_header(ah, is2ghz)->txFrameToXpaOn;
+ if (is2ghz)
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON, xpa_ctl);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON, xpa_ctl);
+}
+
+static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u8 bias;
+
+ if (!(eep->baseEepHeader.featureEnable & 0x40))
+ return;
+
+ if (!AR_SREV_9300(ah))
+ return;
+
+ bias = ar9003_modal_header(ah, is2ghz)->xlna_bias_strength;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+ bias >>= 2;
+ REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4, AR_PHY_65NM_RXTX4_XLNA_BIAS,
+ bias & 0x3);
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
- ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
- ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
+ bool is2ghz = IS_CHAN_2GHZ(chan);
+ ar9003_hw_xpa_timing_control_apply(ah, is2ghz);
+ ar9003_hw_xpa_bias_level_apply(ah, is2ghz);
+ ar9003_hw_ant_ctrl_apply(ah, is2ghz);
ar9003_hw_drive_strength_apply(ah);
+ ar9003_hw_xlna_bias_strength_apply(ah, is2ghz);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
- if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
+ if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
ar9003_hw_internal_regulator_apply(ah);
- if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
- ar9003_hw_apply_tuning_caps(ah);
- ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
+ ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_txend_to_xpa_off_apply(ah, is2ghz);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -5096,14 +5127,9 @@ s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
}
-u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz)
+u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is2ghz)
{
- struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
-
- if (is_2ghz)
- return eep->modalHeader2G.spurChans;
- else
- return eep->modalHeader5G.spurChans;
+ return ar9003_modal_header(ah, is2ghz)->spurChans;
}
unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 8396d150ce01..3a1ff55bceb9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -231,7 +231,8 @@ struct ar9300_modal_eep_header {
__le32 papdRateMaskHt20;
__le32 papdRateMaskHt40;
__le16 switchcomspdt;
- u8 futureModal[8];
+ u8 xlna_bias_strength;
+ u8 futureModal[7];
} __packed;
struct ar9300_cal_data_per_freq_op_loop {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index a0e3394b10dc..1e8a4da5952f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -21,6 +21,7 @@
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
#include "ar9330_1p2_initvals.h"
+#include "ar955x_1p0_initvals.h"
#include "ar9580_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
@@ -43,408 +44,310 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
ar9462_2p0_baseband_core_txfir_coeff_japan_2484
if (AR_SREV_9330_11(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9331_1p1_mac_core,
- ARRAY_SIZE(ar9331_1p1_mac_core), 2);
+ ar9331_1p1_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9331_1p1_mac_postamble,
- ARRAY_SIZE(ar9331_1p1_mac_postamble), 5);
+ ar9331_1p1_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9331_1p1_baseband_core,
- ARRAY_SIZE(ar9331_1p1_baseband_core), 2);
+ ar9331_1p1_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9331_1p1_baseband_postamble,
- ARRAY_SIZE(ar9331_1p1_baseband_postamble), 5);
+ ar9331_1p1_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9331_1p1_radio_core,
- ARRAY_SIZE(ar9331_1p1_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+ ar9331_1p1_radio_core);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9331_1p1_soc_preamble,
- ARRAY_SIZE(ar9331_1p1_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9331_1p1_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9331_1p1_soc_postamble,
- ARRAY_SIZE(ar9331_1p1_soc_postamble), 2);
+ ar9331_1p1_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_rx_gain_1p1), 2);
+ ar9331_common_rx_gain_1p1);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p1_xtal_25M,
- ARRAY_SIZE(ar9331_1p1_xtal_25M), 2);
+ ar9331_1p1_xtal_25M);
else
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p1_xtal_40M,
- ARRAY_SIZE(ar9331_1p1_xtal_40M), 2);
+ ar9331_1p1_xtal_40M);
} else if (AR_SREV_9330_12(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9331_1p2_mac_core,
- ARRAY_SIZE(ar9331_1p2_mac_core), 2);
+ ar9331_1p2_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9331_1p2_mac_postamble,
- ARRAY_SIZE(ar9331_1p2_mac_postamble), 5);
+ ar9331_1p2_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9331_1p2_baseband_core,
- ARRAY_SIZE(ar9331_1p2_baseband_core), 2);
+ ar9331_1p2_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9331_1p2_baseband_postamble,
- ARRAY_SIZE(ar9331_1p2_baseband_postamble), 5);
+ ar9331_1p2_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9331_1p2_radio_core,
- ARRAY_SIZE(ar9331_1p2_radio_core), 2);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], NULL, 0, 0);
+ ar9331_1p2_radio_core);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9331_1p2_soc_preamble,
- ARRAY_SIZE(ar9331_1p2_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9331_1p2_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9331_1p2_soc_postamble,
- ARRAY_SIZE(ar9331_1p2_soc_postamble), 2);
+ ar9331_1p2_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_rx_gain_1p2), 2);
+ ar9331_common_rx_gain_1p2);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
/* additional clock settings */
if (ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p2_xtal_25M,
- ARRAY_SIZE(ar9331_1p2_xtal_25M), 2);
+ ar9331_1p2_xtal_25M);
else
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9331_1p2_xtal_40M,
- ARRAY_SIZE(ar9331_1p2_xtal_40M), 2);
+ ar9331_1p2_xtal_40M);
} else if (AR_SREV_9340(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9340_1p0_mac_core,
- ARRAY_SIZE(ar9340_1p0_mac_core), 2);
+ ar9340_1p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9340_1p0_mac_postamble,
- ARRAY_SIZE(ar9340_1p0_mac_postamble), 5);
+ ar9340_1p0_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9340_1p0_baseband_core,
- ARRAY_SIZE(ar9340_1p0_baseband_core), 2);
+ ar9340_1p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9340_1p0_baseband_postamble,
- ARRAY_SIZE(ar9340_1p0_baseband_postamble), 5);
+ ar9340_1p0_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9340_1p0_radio_core,
- ARRAY_SIZE(ar9340_1p0_radio_core), 2);
+ ar9340_1p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9340_1p0_radio_postamble,
- ARRAY_SIZE(ar9340_1p0_radio_postamble), 5);
+ ar9340_1p0_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9340_1p0_soc_preamble,
- ARRAY_SIZE(ar9340_1p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9340_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9340_1p0_soc_postamble,
- ARRAY_SIZE(ar9340_1p0_soc_postamble), 5);
+ ar9340_1p0_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
- 5);
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_high_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_high_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9340Modes_fast_clock_1p0,
- ARRAY_SIZE(ar9340Modes_fast_clock_1p0),
- 3);
+ ar9340Modes_fast_clock_1p0);
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
- ar9340_1p0_radio_core_40M,
- ARRAY_SIZE(ar9340_1p0_radio_core_40M),
- 2);
+ ar9340_1p0_radio_core_40M);
} else if (AR_SREV_9485_11(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9485_1_1_mac_core,
- ARRAY_SIZE(ar9485_1_1_mac_core), 2);
+ ar9485_1_1_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9485_1_1_mac_postamble,
- ARRAY_SIZE(ar9485_1_1_mac_postamble), 5);
+ ar9485_1_1_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1,
- ARRAY_SIZE(ar9485_1_1), 2);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_1);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9485_1_1_baseband_core,
- ARRAY_SIZE(ar9485_1_1_baseband_core), 2);
+ ar9485_1_1_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9485_1_1_baseband_postamble,
- ARRAY_SIZE(ar9485_1_1_baseband_postamble), 5);
+ ar9485_1_1_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9485_1_1_radio_core,
- ARRAY_SIZE(ar9485_1_1_radio_core), 2);
+ ar9485_1_1_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9485_1_1_radio_postamble,
- ARRAY_SIZE(ar9485_1_1_radio_postamble), 2);
+ ar9485_1_1_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9485_1_1_soc_preamble,
- ARRAY_SIZE(ar9485_1_1_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
+ ar9485_1_1_soc_preamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1), 2);
+ ar9485Common_wo_xlna_rx_gain_1_1);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485_modes_lowest_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
- 5);
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
/* Load PCIE SERDES settings from INI */
/* Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9485_1_1_pcie_phy_clkreq_disable_L1,
- ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
- 2);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9485_1_1_pcie_phy_clkreq_disable_L1,
- ARRAY_SIZE(ar9485_1_1_pcie_phy_clkreq_disable_L1),
- 2);
+ ar9485_1_1_pcie_phy_clkreq_disable_L1);
} else if (AR_SREV_9462_20(ah)) {
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core,
- ARRAY_SIZE(ar9462_2p0_mac_core), 2);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9462_2p0_mac_postamble,
- ARRAY_SIZE(ar9462_2p0_mac_postamble), 5);
+ ar9462_2p0_mac_postamble);
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9462_2p0_baseband_core,
- ARRAY_SIZE(ar9462_2p0_baseband_core), 2);
+ ar9462_2p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9462_2p0_baseband_postamble,
- ARRAY_SIZE(ar9462_2p0_baseband_postamble), 5);
+ ar9462_2p0_baseband_postamble);
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9462_2p0_radio_core,
- ARRAY_SIZE(ar9462_2p0_radio_core), 2);
+ ar9462_2p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9462_2p0_radio_postamble,
- ARRAY_SIZE(ar9462_2p0_radio_postamble), 5);
+ ar9462_2p0_radio_postamble);
INIT_INI_ARRAY(&ah->ini_radio_post_sys2ant,
- ar9462_2p0_radio_postamble_sys2ant,
- ARRAY_SIZE(ar9462_2p0_radio_postamble_sys2ant),
- 5);
+ ar9462_2p0_radio_postamble_sys2ant);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9462_2p0_soc_preamble,
- ARRAY_SIZE(ar9462_2p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9462_2p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9462_2p0_soc_postamble,
- ARRAY_SIZE(ar9462_2p0_soc_postamble), 5);
+ ar9462_2p0_soc_postamble);
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
+ ar9462_common_rx_gain_table_2p0);
/* Awake -> Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
- 2);
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0);
/* Sleep -> Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- PCIE_PLL_ON_CREQ_DIS_L1_2P0,
- ARRAY_SIZE(PCIE_PLL_ON_CREQ_DIS_L1_2P0),
- 2);
+ PCIE_PLL_ON_CREQ_DIS_L1_2P0);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9462_modes_fast_clock_2p0,
- ARRAY_SIZE(ar9462_modes_fast_clock_2p0), 3);
+ ar9462_modes_fast_clock_2p0);
INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
- AR9462_BB_CTX_COEFJ(2p0),
- ARRAY_SIZE(AR9462_BB_CTX_COEFJ(2p0)), 2);
+ AR9462_BB_CTX_COEFJ(2p0));
- INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ,
- ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2);
+ INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ);
+ } else if (AR_SREV_9550(ah)) {
+ /* mac */
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
+ ar955x_1p0_mac_core);
+ INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
+ ar955x_1p0_mac_postamble);
+
+ /* bb */
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
+ ar955x_1p0_baseband_core);
+ INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
+ ar955x_1p0_baseband_postamble);
+
+ /* radio */
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
+ ar955x_1p0_radio_core);
+ INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
+ ar955x_1p0_radio_postamble);
+
+ /* soc */
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
+ ar955x_1p0_soc_preamble);
+ INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
+ ar955x_1p0_soc_postamble);
+
+ /* rx/tx gain */
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
+ /* Fast clock modal settings */
+ INIT_INI_ARRAY(&ah->iniModesFastClock,
+ ar955x_1p0_modes_fast_clock);
} else if (AR_SREV_9580(ah)) {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9580_1p0_mac_core,
- ARRAY_SIZE(ar9580_1p0_mac_core), 2);
+ ar9580_1p0_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9580_1p0_mac_postamble,
- ARRAY_SIZE(ar9580_1p0_mac_postamble), 5);
+ ar9580_1p0_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9580_1p0_baseband_core,
- ARRAY_SIZE(ar9580_1p0_baseband_core), 2);
+ ar9580_1p0_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9580_1p0_baseband_postamble,
- ARRAY_SIZE(ar9580_1p0_baseband_postamble), 5);
+ ar9580_1p0_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9580_1p0_radio_core,
- ARRAY_SIZE(ar9580_1p0_radio_core), 2);
+ ar9580_1p0_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9580_1p0_radio_postamble,
- ARRAY_SIZE(ar9580_1p0_radio_postamble), 5);
+ ar9580_1p0_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9580_1p0_soc_preamble,
- ARRAY_SIZE(ar9580_1p0_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9580_1p0_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9580_1p0_soc_postamble,
- ARRAY_SIZE(ar9580_1p0_soc_postamble), 5);
+ ar9580_1p0_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_rx_gain_table), 2);
+ ar9580_1p0_rx_gain_table);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_low_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_low_ob_db_tx_gain_table);
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9580_1p0_modes_fast_clock,
- ARRAY_SIZE(ar9580_1p0_modes_fast_clock),
- 3);
+ ar9580_1p0_modes_fast_clock);
} else {
/* mac */
- INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
- ar9300_2p2_mac_core,
- ARRAY_SIZE(ar9300_2p2_mac_core), 2);
+ ar9300_2p2_mac_core);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
- ar9300_2p2_mac_postamble,
- ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
+ ar9300_2p2_mac_postamble);
/* bb */
- INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
- ar9300_2p2_baseband_core,
- ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
+ ar9300_2p2_baseband_core);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
- ar9300_2p2_baseband_postamble,
- ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
+ ar9300_2p2_baseband_postamble);
/* radio */
- INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
- ar9300_2p2_radio_core,
- ARRAY_SIZE(ar9300_2p2_radio_core), 2);
+ ar9300_2p2_radio_core);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
- ar9300_2p2_radio_postamble,
- ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
+ ar9300_2p2_radio_postamble);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
- ar9300_2p2_soc_preamble,
- ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
- INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
+ ar9300_2p2_soc_preamble);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
- ar9300_2p2_soc_postamble,
- ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
+ ar9300_2p2_soc_postamble);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
+ ar9300Common_rx_gain_table_2p2);
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
/* Load PCIE SERDES settings from INI */
/* Awake Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdes,
- ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
- 2);
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
/* Sleep Setting */
INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
- ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
- ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
- 2);
+ ar9300PciePhy_pll_on_clkreq_disable_L1_2p2);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
- ar9300Modes_fast_clock_2p2,
- ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
- 3);
+ ar9300Modes_fast_clock_2p2);
}
}
@@ -452,146 +355,110 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_lowest_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_lowest_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_lowest_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485_modes_lowest_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
- 5);
+ ar9485_modes_lowest_ob_db_tx_gain_1_1);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_xpa_tx_gain_table);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_lowest_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_lowest_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_lowest_ob_db_tx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_low_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9462_modes_low_ob_db_tx_gain_table_2p0),
- 5);
+ ar9462_modes_low_ob_db_tx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_lowest_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_high_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_high_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_ob_db_tx_gain_1_1),
- 5);
+ ar9485Modes_high_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_high_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_high_ob_db_tx_gain_table);
+ else if (AR_SREV_9550(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar955x_1p0_modes_no_xpa_tx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9462_modes_high_ob_db_tx_gain_table_2p0,
- ARRAY_SIZE(ar9462_modes_high_ob_db_tx_gain_table_2p0),
- 5);
+ ar9462_modes_high_ob_db_tx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_high_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p2),
- 5);
+ ar9331_modes_low_ob_db_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_low_ob_db_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_low_ob_db_tx_gain_1p1),
- 5);
+ ar9331_modes_low_ob_db_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_low_ob_db_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_low_ob_db_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_low_ob_db_tx_gain_1_1),
- 5);
+ ar9485Modes_low_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_low_ob_db_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_low_ob_db_tx_gain_table),
- 5);
+ ar9580_1p0_low_ob_db_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_low_ob_db_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
- 5);
+ ar9300Modes_low_ob_db_tx_gain_table_2p2);
}
static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p2,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p2),
- 5);
+ ar9331_modes_high_power_tx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9331_modes_high_power_tx_gain_1p1,
- ARRAY_SIZE(ar9331_modes_high_power_tx_gain_1p1),
- 5);
+ ar9331_modes_high_power_tx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9340Modes_lowest_ob_db_tx_gain_table_1p0,
- ARRAY_SIZE(ar9340Modes_lowest_ob_db_tx_gain_table_1p0),
- 5);
+ ar9340Modes_high_power_tx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9485Modes_high_power_tx_gain_1_1,
- ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_1),
- 5);
+ ar9485Modes_high_power_tx_gain_1_1);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9580_1p0_high_power_tx_gain_table,
- ARRAY_SIZE(ar9580_1p0_high_power_tx_gain_table),
- 5);
+ ar9580_1p0_high_power_tx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesTxGain,
- ar9300Modes_high_power_tx_gain_table_2p2,
- ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
- 5);
+ ar9300Modes_high_power_tx_gain_table_2p2);
+}
+
+static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
+{
+ if (AR_SREV_9340(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9340Modes_mixed_ob_db_tx_gain_table_1p0);
+ else if (AR_SREV_9580(ah))
+ INIT_INI_ARRAY(&ah->iniModesTxGain,
+ ar9580_1p0_mixed_ob_db_tx_gain_table);
}
static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -610,6 +477,9 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
case 3:
ar9003_tx_gain_table_mode3(ah);
break;
+ case 4:
+ ar9003_tx_gain_table_mode4(ah);
+ break;
}
}
@@ -617,86 +487,67 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_rx_gain_1p2),
- 2);
+ ar9331_common_rx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_rx_gain_1p1),
- 2);
+ ar9331_common_rx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_rx_gain_table_1p0),
- 2);
+ ar9340Common_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
- else if (AR_SREV_9580(ah))
+ ar9485Common_wo_xlna_rx_gain_1_1);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_rx_gain_bounds);
+ } else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_rx_gain_table),
- 2);
+ ar9580_1p0_rx_gain_table);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_rx_gain_table_2p0),
- 2);
+ ar9462_common_rx_gain_table_2p0);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
- 2);
+ ar9300Common_rx_gain_table_2p2);
}
static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
{
if (AR_SREV_9330_12(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p2,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p2),
- 2);
+ ar9331_common_wo_xlna_rx_gain_1p2);
else if (AR_SREV_9330_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9331_common_wo_xlna_rx_gain_1p1,
- ARRAY_SIZE(ar9331_common_wo_xlna_rx_gain_1p1),
- 2);
+ ar9331_common_wo_xlna_rx_gain_1p1);
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9340Common_wo_xlna_rx_gain_table_1p0,
- ARRAY_SIZE(ar9340Common_wo_xlna_rx_gain_table_1p0),
- 2);
+ ar9340Common_wo_xlna_rx_gain_table_1p0);
else if (AR_SREV_9485_11(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9485Common_wo_xlna_rx_gain_1_1,
- ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
- 2);
+ ar9485Common_wo_xlna_rx_gain_1_1);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_wo_xlna_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0),
- 2);
- else if (AR_SREV_9580(ah))
+ ar9462_common_wo_xlna_rx_gain_table_2p0);
+ else if (AR_SREV_9550(ah)) {
+ INIT_INI_ARRAY(&ah->iniModesRxGain,
+ ar955x_1p0_common_wo_xlna_rx_gain_table);
+ INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
+ ar955x_1p0_common_wo_xlna_rx_gain_bounds);
+ } else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9580_1p0_wo_xlna_rx_gain_table,
- ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table),
- 2);
+ ar9580_1p0_wo_xlna_rx_gain_table);
else
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9300Common_wo_xlna_rx_gain_table_2p2,
- ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
- 2);
+ ar9300Common_wo_xlna_rx_gain_table_2p2);
}
static void ar9003_rx_gain_table_mode2(struct ath_hw *ah)
{
if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
- ar9462_common_mixed_rx_gain_table_2p0,
- ARRAY_SIZE(ar9462_common_mixed_rx_gain_table_2p0), 2);
+ ar9462_common_mixed_rx_gain_table_2p0);
}
static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index d9e0824af093..78816b8b2173 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -181,11 +181,14 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
- u32 sync_cause = 0, async_cause;
+ u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ;
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
+ if (async_cause & async_mask) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index ffbb180f91e1..9a34fcaae3ff 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -35,31 +35,30 @@ static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
struct ath_common *common = ath9k_hw_common(ah);
while (time_out) {
- if (REG_READ(ah, address) & bit_position) {
- REG_WRITE(ah, address, bit_position);
-
- if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
- if (bit_position &
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
- ar9003_mci_reset_req_wakeup(ah);
-
- if (bit_position &
- (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
-
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_RX_MSG);
- }
- break;
- }
+ if (!(REG_READ(ah, address) & bit_position)) {
+ udelay(10);
+ time_out -= 10;
- udelay(10);
- time_out -= 10;
+ if (time_out < 0)
+ break;
+ else
+ continue;
+ }
+ REG_WRITE(ah, address, bit_position);
- if (time_out < 0)
+ if (address != AR_MCI_INTERRUPT_RX_MSG_RAW)
break;
+
+ if (bit_position & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position & (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_RX_MSG);
+ break;
}
if (time_out <= 0) {
@@ -127,14 +126,13 @@ static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- if (!mci->bt_version_known &&
- (mci->bt_state != MCI_BT_SLEEP)) {
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_VERSION_QUERY);
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true);
- }
+ if (mci->bt_version_known ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
}
static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
@@ -158,15 +156,14 @@ static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 *payload = &mci->wlan_channels[0];
- if ((mci->wlan_channels_update == true) &&
- (mci->bt_state != MCI_BT_SLEEP)) {
- MCI_GPM_SET_TYPE_OPCODE(payload,
- MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_WLAN_CHANNELS);
- ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true);
- MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
- }
+ if (!mci->wlan_channels_update ||
+ (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
}
static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
@@ -174,29 +171,30 @@ static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
- bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
- MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+ bool query_btinfo;
- if (mci->bt_state != MCI_BT_SLEEP) {
+ if (mci->bt_state == MCI_BT_SLEEP)
+ return;
- MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
- MCI_GPM_COEX_STATUS_QUERY);
+ query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_STATUS_QUERY);
- *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
-
- /*
- * If bt_status_query message is not sent successfully,
- * then need_flush_btinfo should be set again.
- */
- if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
- wait_done, true)) {
- if (query_btinfo)
- mci->need_flush_btinfo = true;
- }
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
if (query_btinfo)
- mci->query_bt = false;
+ mci->need_flush_btinfo = true;
}
+
+ if (query_btinfo)
+ mci->query_bt = false;
}
static void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
@@ -241,73 +239,73 @@ static void ar9003_mci_prep_interface(struct ath_hw *ah)
ar9003_mci_remote_reset(ah, true);
ar9003_mci_send_req_wake(ah, true);
- if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+ if (!ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500))
+ goto clear_redunt;
- mci->bt_state = MCI_BT_AWAKE;
+ mci->bt_state = MCI_BT_AWAKE;
- /*
- * we don't need to send more remote_reset at this moment.
- * If BT receive first remote_reset, then BT HW will
- * be cleaned up and will be able to receive req_wake
- * and BT HW will respond sys_waking.
- * In this case, WLAN will receive BT's HW sys_waking.
- * Otherwise, if BT SW missed initial remote_reset,
- * that remote_reset will still clean up BT MCI RX,
- * and the req_wake will wake BT up,
- * and BT SW will respond this req_wake with a remote_reset and
- * sys_waking. In this case, WLAN will receive BT's SW
- * sys_waking. In either case, BT's RX is cleaned up. So we
- * don't need to reply BT's remote_reset now, if any.
- * Similarly, if in any case, WLAN can receive BT's sys_waking,
- * that means WLAN's RX is also fine.
- */
- ar9003_mci_send_sys_waking(ah, true);
- udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
- /*
- * Set BT priority interrupt value to be 0xff to
- * avoid having too many BT PRIORITY interrupts.
- */
- REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
- REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
- /*
- * A contention reset will be received after send out
- * sys_waking. Also BT priority interrupt bits will be set.
- * Clear those bits before the next step.
- */
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
- REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
- AR_MCI_INTERRUPT_BT_PRI);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, AR_MCI_INTERRUPT_BT_PRI);
- if (mci->is_2g) {
- ar9003_mci_send_lna_transfer(ah, true);
- udelay(5);
- }
+ if (mci->is_2g) {
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
- if ((mci->is_2g && !mci->update_2g5g)) {
- if (ar9003_mci_wait_for_interrupt(ah,
- AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
- mci_timeout))
- ath_dbg(common, MCI,
- "MCI WLAN has control over the LNA & BT obeys it\n");
- else
- ath_dbg(common, MCI,
- "MCI BT didn't respond to LNA_TRANS\n");
- }
+ if ((mci->is_2g && !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
}
+clear_redunt:
/* Clear the extra redundant SYS_WAKING from BT */
if ((mci->bt_state == MCI_BT_AWAKE) &&
- (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
(REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
@@ -323,14 +321,13 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) &&
(mci->bt_state != MCI_BT_SLEEP) &&
!mci->halted_bt_gpm) {
ar9003_mci_send_coex_halt_bt_gpm(ah, true, true);
}
mci->ready = false;
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
}
static void ar9003_mci_disable_interrupt(struct ath_hw *ah)
@@ -487,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 cur_bt_state;
- cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP);
if (mci->bt_state != cur_bt_state)
mci->bt_state = cur_bt_state;
@@ -596,8 +593,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
if (!time_out)
break;
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
continue;
@@ -615,9 +611,9 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
}
break;
}
- } else if ((recv_type == gpm_type) && (recv_opcode == gpm_opcode)) {
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
break;
- }
/*
* check if it's cal_grant
@@ -661,8 +657,7 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
time_out = 0;
while (more_data == MCI_GPM_MORE) {
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -731,38 +726,38 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!IS_CHAN_2GHZ(chan) || (mci_hw->bt_state != MCI_BT_SLEEP))
goto exit;
- if (ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
- ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+ if (!ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) &&
+ !ar9003_mci_check_int(ah, AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE))
+ goto exit;
- /*
- * BT is sleeping. Check if BT wakes up during
- * WLAN calibration. If BT wakes up during
- * WLAN calibration, need to go through all
- * message exchanges again and recal.
- */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
- AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE));
- ar9003_mci_remote_reset(ah, true);
- ar9003_mci_send_sys_waking(ah, true);
- udelay(1);
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
- if (IS_CHAN_2GHZ(chan))
- ar9003_mci_send_lna_transfer(ah, true);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
- mci_hw->bt_state = MCI_BT_AWAKE;
+ mci_hw->bt_state = MCI_BT_AWAKE;
- if (caldata) {
- caldata->done_txiqcal_once = false;
- caldata->done_txclcal_once = false;
- caldata->rtt_done = false;
- }
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_done = false;
+ }
- if (!ath9k_hw_init_cal(ah, chan))
- return -EIO;
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
- }
exit:
ar9003_mci_enable_interrupt(ah);
return 0;
@@ -772,10 +767,6 @@ static void ar9003_mci_mute_bt(struct ath_hw *ah)
{
/* disable all MCI messages */
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
- REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
/* wait pending HW messages to flush out */
@@ -798,29 +789,27 @@ static void ar9003_mci_osla_setup(struct ath_hw *ah, bool enable)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 thresh;
- if (enable) {
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
- REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
- AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
-
- if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
- thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
- } else {
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
- }
-
- REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
- } else {
+ if (!enable) {
REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ return;
}
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2, AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+ thresh = MS(mci->config, ATH_MCI_CONFIG_AGGR_THRESH);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_AGGR_THRESH, thresh);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 1);
+ } else
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN, 0);
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN, 1);
}
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
@@ -898,13 +887,16 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
udelay(100);
}
+ /* Check pending GPM msg before MCI Reset Rx */
+ ar9003_mci_check_gpm_offset(ah);
+
regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
udelay(1);
regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
REG_WRITE(ah, AR_MCI_COMMAND2, regval);
- ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ar9003_mci_get_next_gpm_offset(ah, true, NULL);
REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
(SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
@@ -943,26 +935,27 @@ static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
u32 new_flags, to_set, to_clear;
- if (mci->update_2g5g && (mci->bt_state != MCI_BT_SLEEP)) {
- if (mci->is_2g) {
- new_flags = MCI_2G_FLAGS;
- to_clear = MCI_2G_FLAGS_CLEAR_MASK;
- to_set = MCI_2G_FLAGS_SET_MASK;
- } else {
- new_flags = MCI_5G_FLAGS;
- to_clear = MCI_5G_FLAGS_CLEAR_MASK;
- to_set = MCI_5G_FLAGS_SET_MASK;
- }
+ if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP))
+ return;
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
- if (to_clear)
- ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_CLEAR,
to_clear);
- if (to_set)
- ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
MCI_GPM_COEX_BT_FLAGS_SET,
to_set);
- }
}
static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
@@ -1014,38 +1007,36 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
}
}
-void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
{
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- if (mci->update_2g5g) {
- if (mci->is_2g) {
- ar9003_mci_send_2g5g_status(ah, true);
- ar9003_mci_send_lna_transfer(ah, true);
- udelay(5);
+ if (!mci->update_2g5g && !force)
+ return;
- REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
- REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (mci->is_2g) {
+ ar9003_mci_send_2g5g_status(ah, true);
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
- if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
- REG_SET_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
- }
- } else {
- ar9003_mci_send_lna_take(ah, true);
- udelay(5);
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+
+ if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
+ ar9003_mci_osla_setup(ah, true);
+ } else {
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
- REG_SET_BIT(ah, AR_MCI_TX_CTRL,
- AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
- REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
- AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
- AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
- ar9003_mci_send_2g5g_status(ah, true);
- }
+ ar9003_mci_osla_setup(ah, false);
+ ar9003_mci_send_2g5g_status(ah, true);
}
}
@@ -1132,7 +1123,7 @@ void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable)
if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000)) {
ath_dbg(common, MCI, "MCI BT_CAL_GRANT received\n");
} else {
- is_reusable = false;
+ *is_reusable = false;
ath_dbg(common, MCI, "MCI BT_CAL_GRANT not received\n");
}
}
@@ -1173,11 +1164,10 @@ void ar9003_mci_cleanup(struct ath_hw *ah)
}
EXPORT_SYMBOL(ar9003_mci_cleanup);
-u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type)
{
- struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
- u32 value = 0, more_gpm = 0, gpm_ptr;
+ u32 value = 0;
u8 query_type;
switch (state_type) {
@@ -1190,81 +1180,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
}
value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
break;
- case MCI_STATE_INIT_GPM_OFFSET:
- value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
- mci->gpm_idx = value;
- break;
- case MCI_STATE_NEXT_GPM_OFFSET:
- case MCI_STATE_LAST_GPM_OFFSET:
- /*
- * This could be useful to avoid new GPM message interrupt which
- * may lead to spurious interrupt after power sleep, or multiple
- * entry of ath_mci_intr().
- * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
- * alleviate this effect, but clearing GPM RX interrupt bit is
- * safe, because whether this is called from hw or driver code
- * there must be an interrupt bit set/triggered initially
- */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
- AR_MCI_INTERRUPT_RX_MSG_GPM);
-
- gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
- value = gpm_ptr;
-
- if (value == 0)
- value = mci->gpm_len - 1;
- else if (value >= mci->gpm_len) {
- if (value != 0xFFFF)
- value = 0;
- } else {
- value--;
- }
-
- if (value == 0xFFFF) {
- value = MCI_GPM_INVALID;
- more_gpm = MCI_GPM_NOMORE;
- } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
- if (gpm_ptr == mci->gpm_idx) {
- value = MCI_GPM_INVALID;
- more_gpm = MCI_GPM_NOMORE;
- } else {
- for (;;) {
- u32 temp_index;
-
- /* skip reserved GPM if any */
-
- if (value != mci->gpm_idx)
- more_gpm = MCI_GPM_MORE;
- else
- more_gpm = MCI_GPM_NOMORE;
-
- temp_index = mci->gpm_idx;
- mci->gpm_idx++;
-
- if (mci->gpm_idx >=
- mci->gpm_len)
- mci->gpm_idx = 0;
-
- if (ar9003_mci_is_gpm_valid(ah,
- temp_index)) {
- value = temp_index;
- break;
- }
-
- if (more_gpm == MCI_GPM_NOMORE) {
- value = MCI_GPM_INVALID;
- break;
- }
- }
- }
- if (p_data)
- *p_data = more_gpm;
- }
-
- if (value != MCI_GPM_INVALID)
- value <<= 4;
-
- break;
case MCI_STATE_LAST_SCHD_MSG_OFFSET:
value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
AR_MCI_RX_LAST_SCHD_MSG_INDEX);
@@ -1276,21 +1191,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
AR_MCI_RX_REMOTE_SLEEP) ?
MCI_BT_SLEEP : MCI_BT_AWAKE;
break;
- case MCI_STATE_CONT_RSSI_POWER:
- value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
- break;
- case MCI_STATE_CONT_PRIORITY:
- value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
- break;
- case MCI_STATE_CONT_TXRX:
- value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
- break;
- case MCI_STATE_BT:
- value = mci->bt_state;
- break;
- case MCI_STATE_SET_BT_SLEEP:
- mci->bt_state = MCI_BT_SLEEP;
- break;
case MCI_STATE_SET_BT_AWAKE:
mci->bt_state = MCI_BT_AWAKE;
ar9003_mci_send_coex_version_query(ah, true);
@@ -1299,7 +1199,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
if (mci->unhalt_bt_gpm)
ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
- ar9003_mci_2g5g_switch(ah, true);
+ ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_SET_BT_CAL_START:
mci->bt_state = MCI_BT_CAL_START;
@@ -1323,34 +1223,6 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
case MCI_STATE_SEND_WLAN_COEX_VERSION:
ar9003_mci_send_coex_version_response(ah, true);
break;
- case MCI_STATE_SET_BT_COEX_VERSION:
- if (!p_data)
- ath_dbg(common, MCI,
- "MCI Set BT Coex version with NULL data!!\n");
- else {
- mci->bt_ver_major = (*p_data >> 8) & 0xff;
- mci->bt_ver_minor = (*p_data) & 0xff;
- mci->bt_version_known = true;
- ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
- mci->bt_ver_major, mci->bt_ver_minor);
- }
- break;
- case MCI_STATE_SEND_WLAN_CHANNELS:
- if (p_data) {
- if (((mci->wlan_channels[1] & 0xffff0000) ==
- (*(p_data + 1) & 0xffff0000)) &&
- (mci->wlan_channels[2] == *(p_data + 2)) &&
- (mci->wlan_channels[3] == *(p_data + 3)))
- break;
-
- mci->wlan_channels[0] = *p_data++;
- mci->wlan_channels[1] = *p_data++;
- mci->wlan_channels[2] = *p_data++;
- mci->wlan_channels[3] = *p_data++;
- }
- mci->wlan_channels_update = true;
- ar9003_mci_send_coex_wlan_channels(ah, true);
- break;
case MCI_STATE_SEND_VERSION_QUERY:
ar9003_mci_send_coex_version_query(ah, true);
break;
@@ -1358,38 +1230,16 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
break;
- case MCI_STATE_NEED_FLUSH_BT_INFO:
- /*
- * btcoex_hw.mci.unhalt_bt_gpm means whether it's
- * needed to send UNHALT message. It's set whenever
- * there's a request to send HALT message.
- * mci_halted_bt_gpm means whether HALT message is sent
- * out successfully.
- *
- * Checking (mci_unhalt_bt_gpm == false) instead of
- * checking (ah->mci_halted_bt_gpm == false) will make
- * sure currently is in UNHALT-ed mode and BT can
- * respond to status query.
- */
- value = (!mci->unhalt_bt_gpm &&
- mci->need_flush_btinfo) ? 1 : 0;
- if (p_data)
- mci->need_flush_btinfo =
- (*p_data != 0) ? true : false;
- break;
case MCI_STATE_RECOVER_RX:
ar9003_mci_prep_interface(ah);
mci->query_bt = true;
mci->need_flush_btinfo = true;
ar9003_mci_send_coex_wlan_channels(ah, true);
- ar9003_mci_2g5g_switch(ah, true);
+ ar9003_mci_2g5g_switch(ah, false);
break;
case MCI_STATE_NEED_FTP_STOMP:
value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
break;
- case MCI_STATE_NEED_TUNING:
- value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
- break;
default:
break;
}
@@ -1397,3 +1247,173 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
return value;
}
EXPORT_SYMBOL(ar9003_mci_state);
+
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ ath_dbg(common, MCI, "Give LNA and SPDT control to BT\n");
+
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(50);
+
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ mci->is_2g = false;
+ mci->update_2g5g = true;
+ ar9003_mci_send_2g5g_status(ah, true);
+
+ /* Force another 2g5g update at next scanning */
+ mci->update_2g5g = true;
+}
+
+void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+ u32 btcoex_ctrl2, diag_sw;
+ int i;
+ u8 lna_ctrl, bt_sleep;
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2);
+ if (btcoex_ctrl2 != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23)));
+
+ for (i = 0; i < AH_WAIT_TIMEOUT; i++) {
+ diag_sw = REG_READ(ah, AR_DIAG_SW);
+ if (diag_sw != 0xdeadbeef)
+ break;
+ udelay(AH_TIME_QUANTUM);
+ }
+ REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18)));
+ lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3;
+ bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP;
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2);
+ REG_WRITE(ah, AR_DIAG_SW, diag_sw);
+
+ if (bt_sleep && (lna_ctrl == 2)) {
+ REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1);
+ REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1);
+ udelay(50);
+ }
+}
+
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset;
+
+ /*
+ * This should only be called before "MAC Warm Reset" or "MCI Reset Rx".
+ */
+ offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ if (mci->gpm_idx == offset)
+ return;
+ ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n",
+ mci->gpm_idx, offset);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ mci->gpm_idx = 0;
+}
+
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 offset, more_gpm = 0, gpm_ptr;
+
+ if (first) {
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ mci->gpm_idx = gpm_ptr;
+ return gpm_ptr;
+ }
+
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ offset = gpm_ptr;
+
+ if (!offset)
+ offset = mci->gpm_len - 1;
+ else if (offset >= mci->gpm_len) {
+ if (offset != 0xFFFF)
+ offset = 0;
+ } else {
+ offset--;
+ }
+
+ if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) {
+ offset = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ goto out;
+ }
+ for (;;) {
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (offset != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >= mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ if (ar9003_mci_is_gpm_valid(ah, temp_index)) {
+ offset = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ offset = MCI_GPM_INVALID;
+ break;
+ }
+ }
+
+ if (offset != MCI_GPM_INVALID)
+ offset <<= 4;
+out:
+ if (more)
+ *more = more_gpm;
+
+ return offset;
+}
+EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset);
+
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->bt_ver_major = major;
+ mci->bt_ver_minor = minor;
+ mci->bt_version_known = true;
+ ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+}
+EXPORT_SYMBOL(ar9003_mci_set_bt_version);
+
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+}
+EXPORT_SYMBOL(ar9003_mci_send_wlan_channels);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
index 4842f6c06b8c..d33b8e128855 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -189,30 +189,18 @@ enum mci_bt_state {
/* Type of state query */
enum mci_state_type {
MCI_STATE_ENABLE,
- MCI_STATE_INIT_GPM_OFFSET,
- MCI_STATE_NEXT_GPM_OFFSET,
- MCI_STATE_LAST_GPM_OFFSET,
- MCI_STATE_BT,
- MCI_STATE_SET_BT_SLEEP,
MCI_STATE_SET_BT_AWAKE,
MCI_STATE_SET_BT_CAL_START,
MCI_STATE_SET_BT_CAL,
MCI_STATE_LAST_SCHD_MSG_OFFSET,
MCI_STATE_REMOTE_SLEEP,
- MCI_STATE_CONT_RSSI_POWER,
- MCI_STATE_CONT_PRIORITY,
- MCI_STATE_CONT_TXRX,
MCI_STATE_RESET_REQ_WAKE,
MCI_STATE_SEND_WLAN_COEX_VERSION,
- MCI_STATE_SET_BT_COEX_VERSION,
- MCI_STATE_SEND_WLAN_CHANNELS,
MCI_STATE_SEND_VERSION_QUERY,
MCI_STATE_SEND_STATUS_QUERY,
- MCI_STATE_NEED_FLUSH_BT_INFO,
MCI_STATE_SET_CONCUR_TX_PRI,
MCI_STATE_RECOVER_RX,
MCI_STATE_NEED_FTP_STOMP,
- MCI_STATE_NEED_TUNING,
MCI_STATE_DEBUG,
MCI_STATE_MAX
};
@@ -260,28 +248,26 @@ enum mci_gpm_coex_opcode {
bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
u32 *payload, u8 len, bool wait_done,
bool check_bt);
-u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type);
void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
u16 len, u32 sched_addr);
void ar9003_mci_cleanup(struct ath_hw *ah);
void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
u32 *rx_msg_intr);
-
+u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more);
+void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor);
+void ar9003_mci_send_wlan_channels(struct ath_hw *ah);
/*
* These functions are used by ath9k_hw.
*/
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
-static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
-{
- return ah->btcoex_hw.mci.ready;
-}
void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep);
void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable);
void ar9003_mci_init_cal_done(struct ath_hw *ah);
void ar9003_mci_set_full_sleep(struct ath_hw *ah);
-void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force);
void ar9003_mci_check_bt(struct ath_hw *ah);
bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan);
int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -289,13 +275,12 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan,
void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
bool is_full_sleep);
void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked);
+void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah);
+void ar9003_mci_set_power_awake(struct ath_hw *ah);
+void ar9003_mci_check_gpm_offset(struct ath_hw *ah);
#else
-static inline bool ar9003_mci_is_ready(struct ath_hw *ah)
-{
- return false;
-}
static inline void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep)
{
}
@@ -330,6 +315,15 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
{
}
+static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_set_power_awake(struct ath_hw *ah)
+{
+}
+static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 3d400e8d6535..2c9f7d7ed4cc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -211,7 +211,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
- if (AR_SREV_9485(ah) || AR_SREV_9462(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
-3);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 11abb972be1f..e476f9f92ce3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -99,7 +99,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
- } else if (AR_SREV_9340(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
u32 chan_frac;
@@ -113,11 +113,12 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
- if (AR_SREV_9340(ah) && ah->is_clk_25mhz) {
+ if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
+ ah->is_clk_25mhz) {
u32 chan_frac;
- channelSel = (freq * 2) / 75;
- chan_frac = (((freq * 2) % 75) * 0x20000) / 75;
+ channelSel = freq / 75;
+ chan_frac = ((freq % 75) * 0x20000) / 75;
channelSel = (channelSel << 17) | chan_frac;
} else {
channelSel = CHANSEL_5G(freq);
@@ -173,16 +174,15 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
int cur_bb_spur, negative = 0, cck_spur_freq;
int i;
int range, max_spur_cnts, synth_freq;
- u8 *spur_fbin_ptr = NULL;
+ u8 *spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah, IS_CHAN_2GHZ(chan));
/*
* Need to verify range +/- 10 MHz in control channel, otherwise spur
* is out-of-band and can be ignored.
*/
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
- spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
- IS_CHAN_2GHZ(chan));
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah)) {
if (spur_fbin_ptr[0] == 0) /* No spur */
return;
max_spur_cnts = 5;
@@ -207,7 +207,8 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
if (AR_SREV_9462(ah) && (i == 0 || i == 3))
continue;
negative = 0;
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah))
cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan));
else
@@ -620,6 +621,50 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
}
}
+static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
+ struct ath9k_channel *chan)
+{
+ int ret;
+
+ switch (chan->chanmode) {
+ case CHANNEL_A:
+ case CHANNEL_A_HT20:
+ if (chan->channel <= 5350)
+ ret = 1;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 3;
+ else
+ ret = 5;
+ break;
+
+ case CHANNEL_A_HT40PLUS:
+ case CHANNEL_A_HT40MINUS:
+ if (chan->channel <= 5350)
+ ret = 2;
+ else if ((chan->channel > 5350) && (chan->channel <= 5600))
+ ret = 4;
+ else
+ ret = 6;
+ break;
+
+ case CHANNEL_G:
+ case CHANNEL_G_HT20:
+ case CHANNEL_B:
+ ret = 8;
+ break;
+
+ case CHANNEL_G_HT40PLUS:
+ case CHANNEL_G_HT40MINUS:
+ ret = 7;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static int ar9003_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -661,7 +706,22 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
}
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
- REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ if (AR_SREV_9550(ah))
+ REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
+ regWrites);
+
+ if (AR_SREV_9550(ah)) {
+ int modes_txgain_index;
+
+ modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
+ if (modes_txgain_index < 0)
+ return -EINVAL;
+
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index,
+ regWrites);
+ } else {
+ REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ }
/*
* For 5GHz channels requiring Fast Clock, apply
@@ -676,6 +736,10 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
+ if (AR_SREV_9462(ah))
+ REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+ AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
@@ -821,18 +885,18 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
- if (!on != aniState->ofdmWeakSigDetectOff) {
+ if (on != aniState->ofdmWeakSigDetect) {
ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
- !aniState->ofdmWeakSigDetectOff ?
+ aniState->ofdmWeakSigDetect ?
"on" : "off",
on ? "on" : "off");
if (on)
ah->stats.ast_ani_ofdmon++;
else
ah->stats.ast_ani_ofdmoff++;
- aniState->ofdmWeakSigDetectOff = !on;
+ aniState->ofdmWeakSigDetect = on;
}
break;
}
@@ -851,7 +915,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstep;
if (value < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -866,7 +930,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = firstep_table[level] -
- firstep_table[ATH9K_ANI_FIRSTEP_LVL_NEW] +
+ firstep_table[ATH9K_ANI_FIRSTEP_LVL] +
aniState->iniDef.firstepLow;
if (value2 < ATH9K_SIG_FIRSTEP_SETTING_MIN)
value2 = ATH9K_SIG_FIRSTEP_SETTING_MIN;
@@ -882,7 +946,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value,
aniState->iniDef.firstep);
ath_dbg(common, ANI,
@@ -890,7 +954,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->firstepLevel,
level,
- ATH9K_ANI_FIRSTEP_LVL_NEW,
+ ATH9K_ANI_FIRSTEP_LVL,
value2,
aniState->iniDef.firstepLow);
if (level > aniState->firstepLevel)
@@ -915,7 +979,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1;
if (value < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -931,7 +995,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
* from INI file & cap value
*/
value2 = cycpwrThr1_table[level] -
- cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL_NEW] +
+ cycpwrThr1_table[ATH9K_ANI_SPUR_IMMUNE_LVL] +
aniState->iniDef.cycpwrThr1Ext;
if (value2 < ATH9K_SIG_SPUR_IMM_SETTING_MIN)
value2 = ATH9K_SIG_SPUR_IMM_SETTING_MIN;
@@ -946,7 +1010,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value,
aniState->iniDef.cycpwrThr1);
ath_dbg(common, ANI,
@@ -954,7 +1018,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
chan->channel,
aniState->spurImmunityLevel,
level,
- ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
+ ATH9K_ANI_SPUR_IMMUNE_LVL,
value2,
aniState->iniDef.cycpwrThr1Ext);
if (level > aniState->spurImmunityLevel)
@@ -975,16 +1039,16 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
- if (!is_on != aniState->mrcCCKOff) {
+ if (is_on != aniState->mrcCCK) {
ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
is_on ? "on" : "off");
if (is_on)
ah->stats.ast_ani_ccklow++;
else
ah->stats.ast_ani_cckhigh++;
- aniState->mrcCCKOff = !is_on;
+ aniState->mrcCCK = is_on;
}
break;
}
@@ -998,9 +1062,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
- !aniState->ofdmWeakSigDetectOff ? "on" : "off",
+ aniState->ofdmWeakSigDetect ? "on" : "off",
aniState->firstepLevel,
- !aniState->mrcCCKOff ? "on" : "off",
+ aniState->mrcCCK ? "on" : "off",
aniState->listenTime,
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1107,10 +1171,10 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
AR_PHY_EXT_CYCPWR_THR1);
/* these levels just got reset to defaults by the INI */
- aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL_NEW;
- aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL_NEW;
- aniState->ofdmWeakSigDetectOff = !ATH9K_ANI_USE_OFDM_WEAK_SIG;
- aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
+ aniState->spurImmunityLevel = ATH9K_ANI_SPUR_IMMUNE_LVL;
+ aniState->firstepLevel = ATH9K_ANI_FIRSTEP_LVL;
+ aniState->ofdmWeakSigDetect = ATH9K_ANI_USE_OFDM_WEAK_SIG;
+ aniState->mrcCCK = true;
}
static void ar9003_hw_set_radar_params(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 7268a48a92a1..7bfbaf065a43 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -633,11 +633,13 @@
#define AR_PHY_65NM_CH0_BIAS2 0x160c4
#define AR_PHY_65NM_CH0_BIAS4 0x160cc
#define AR_PHY_65NM_CH0_RXTX4 0x1610c
+#define AR_PHY_65NM_CH1_RXTX4 0x1650c
+#define AR_PHY_65NM_CH2_RXTX4 0x1690c
#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
-#define AR_CH0_TOP_XPABIASLVL (0x300)
-#define AR_CH0_TOP_XPABIASLVL_S (8)
+#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
+#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
@@ -650,6 +652,8 @@
#define AR_SWITCH_TABLE_COM_ALL_S (0)
#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL (0xffffff)
+#define AR_SWITCH_TABLE_COM_AR9550_ALL_S (0)
#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)
@@ -820,18 +824,26 @@
#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
-#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
-#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
-#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
-#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
-#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
-#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
-#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
-#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
-#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
+
+#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
+#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
+#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
+#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
+#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
+#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
+#define AR_PHY_SPECTRAL_SCAN_COUNT 0x0FFF0000
+#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x10000000
+#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 28
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY 0x20000000
+#define AR_PHY_SPECTRAL_SCAN_PRIORITY_S 29
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5 0x40000000
+#define AR_PHY_SPECTRAL_SCAN_USE_ERR5_S 30
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT 0x80000000
+#define AR_PHY_SPECTRAL_SCAN_COMPRESSED_RPT_S 31
+
#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION 0x00000001
#define AR_PHY_RTT_CTRL_ENA_RADIO_RETENTION_S 0
@@ -866,6 +878,9 @@
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS 0xC0000000
+#define AR_PHY_65NM_RXTX4_XLNA_BIAS_S 30
+
/*
* Channel 1 Register Map
*/
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 1bd3a3d22101..6e1756bc3833 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -337,12 +337,7 @@ static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-static const u32 ar9331_1p1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
+#define ar9331_1p1_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
static const u32 ar9331_1p1_xtal_25M[][2] = {
/* Addr allmodes */
@@ -783,17 +778,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
{0x00016284, 0x14d3f000, 0x14d3f000, 0x14d3f000, 0x14d3f000},
};
-static const u32 ar9331_1p1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9331_1p1_mac_postamble ar9300_2p2_mac_postamble
static const u32 ar9331_1p1_soc_preamble[][2] = {
/* Addr allmodes */
@@ -1112,38 +1097,4 @@ static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
{0x00000000},
};
-static const u32 ar9331_1p1_chansel_xtal_25M[] = {
- 0x0101479e,
- 0x0101d027,
- 0x010258af,
- 0x0102e138,
- 0x010369c0,
- 0x0103f249,
- 0x01047ad1,
- 0x0105035a,
- 0x01058be2,
- 0x0106146b,
- 0x01069cf3,
- 0x0107257c,
- 0x0107ae04,
- 0x0108f5b2,
-};
-
-static const u32 ar9331_1p1_chansel_xtal_40M[] = {
- 0x00a0ccbe,
- 0x00a12213,
- 0x00a17769,
- 0x00a1ccbe,
- 0x00a22213,
- 0x00a27769,
- 0x00a2ccbe,
- 0x00a32213,
- 0x00a37769,
- 0x00a3ccbe,
- 0x00a42213,
- 0x00a47769,
- 0x00a4ccbe,
- 0x00a5998b,
-};
-
#endif /* INITVALS_9330_1P1_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 0e6ca0834b34..57ed8a112173 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,8 +18,8 @@
#ifndef INITVALS_9330_1P2_H
#define INITVALS_9330_1P2_H
-static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
{0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
{0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
@@ -102,8 +103,14 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p2[][5] = {
{0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
};
+#define ar9331_modes_high_power_tx_gain_1p2 ar9331_modes_high_ob_db_tx_gain_1p2
+
+#define ar9331_modes_low_ob_db_tx_gain_1p2 ar9331_modes_high_power_tx_gain_1p2
+
+#define ar9331_modes_lowest_ob_db_tx_gain_1p2 ar9331_modes_low_ob_db_tx_gain_1p2
+
static const u32 ar9331_1p2_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
{0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -147,191 +154,6 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
};
-static const u32 ar9331_modes_high_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
-
-static const u32 ar9331_modes_low_ob_db_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
-
-static const u32 ar9331_1p2_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9331_1p2_xtal_25M[][2] = {
- /* Addr allmodes */
- {0x00007038, 0x000002f8},
- {0x00008244, 0x0010f3d7},
- {0x0000824c, 0x0001e7ae},
- {0x0001609c, 0x0f508f29},
-};
-
static const u32 ar9331_1p2_radio_core[][2] = {
/* Addr allmodes */
{0x00016000, 0x36db6db6},
@@ -397,684 +219,24 @@ static const u32 ar9331_1p2_radio_core[][2] = {
{0x000163d4, 0x00000000},
};
-static const u32 ar9331_1p2_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
-};
+#define ar9331_1p2_baseband_core_txfir_coeff_japan_2484 ar9331_1p1_baseband_core_txfir_coeff_japan_2484
-static const u32 ar9331_common_wo_xlna_rx_gain_1p2[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00060005},
- {0x0000a004, 0x00810080},
- {0x0000a008, 0x00830082},
- {0x0000a00c, 0x00850084},
- {0x0000a010, 0x01820181},
- {0x0000a014, 0x01840183},
- {0x0000a018, 0x01880185},
- {0x0000a01c, 0x018a0189},
- {0x0000a020, 0x02850284},
- {0x0000a024, 0x02890288},
- {0x0000a028, 0x028b028a},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x24242428},
- {0x0000a098, 0x171e1e1e},
- {0x0000a09c, 0x02020b0b},
- {0x0000a0a0, 0x02020202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x22072208},
- {0x0000a0c4, 0x22052206},
- {0x0000a0c8, 0x22032204},
- {0x0000a0cc, 0x22012202},
- {0x0000a0d0, 0x221f2200},
- {0x0000a0d4, 0x221d221e},
- {0x0000a0d8, 0x33023303},
- {0x0000a0dc, 0x33003301},
- {0x0000a0e0, 0x331e331f},
- {0x0000a0e4, 0x4402331d},
- {0x0000a0e8, 0x44004401},
- {0x0000a0ec, 0x441e441f},
- {0x0000a0f0, 0x55025503},
- {0x0000a0f4, 0x55005501},
- {0x0000a0f8, 0x551e551f},
- {0x0000a0fc, 0x6602551d},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
-};
+#define ar9331_1p2_xtal_25M ar9331_1p1_xtal_25M
-static const u32 ar9331_1p2_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a8f6b},
- {0x0000980c, 0x04800000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x32840bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009c04, 0x00000000},
- {0x00009c08, 0x03200000},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x1883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c00400},
- {0x00009d18, 0x00000000},
- {0x00009e08, 0x0038233c},
- {0x00009e24, 0x9927b515},
- {0x00009e28, 0x12ef0200},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009fc0, 0x803e4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x0000a20c, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0cdbd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739ce},
- {0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
- {0x0000a440, 0x00000000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x04000000},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a458, 0x00000000},
- {0x0000a640, 0x00000000},
- {0x0000a644, 0x3fad9d74},
- {0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00003c37},
- {0x0000a670, 0x03020100},
- {0x0000a674, 0x09080504},
- {0x0000a678, 0x0d0c0b0a},
- {0x0000a67c, 0x13121110},
- {0x0000a680, 0x31301514},
- {0x0000a684, 0x35343332},
- {0x0000a688, 0x00000036},
- {0x0000a690, 0x00000838},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000001},
-};
+#define ar9331_1p2_xtal_40M ar9331_1p1_xtal_40M
-static const u32 ar9331_modes_high_power_tx_gain_1p2[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a410, 0x000050d7, 0x000050d7, 0x000050d7, 0x000050d7},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x23000a00, 0x23000a00},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x27000a02, 0x27000a02},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2b000a04, 0x2b000a04},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x3f001620, 0x3f001620},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x41001621, 0x41001621},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x44001640, 0x44001640},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x46001641, 0x46001641},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x48001642, 0x48001642},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x4b001644, 0x4b001644},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4e001a81, 0x4e001a81},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x51001a83, 0x51001a83},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x54001c84, 0x54001c84},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x57001ce3, 0x57001ce3},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x5b001ce5, 0x5b001ce5},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5f001ce9, 0x5f001ce9},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x66001eec, 0x66001eec},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x66001eec, 0x66001eec},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x66001eec, 0x66001eec},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x66001eec, 0x66001eec},
- {0x0000a580, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a584, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a588, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a58c, 0x11062202, 0x11062202, 0x0b000200, 0x0b000200},
- {0x0000a590, 0x17022e00, 0x17022e00, 0x0f000202, 0x0f000202},
- {0x0000a594, 0x1d000ec2, 0x1d000ec2, 0x11000400, 0x11000400},
- {0x0000a598, 0x25020ec0, 0x25020ec0, 0x15000402, 0x15000402},
- {0x0000a59c, 0x2b020ec3, 0x2b020ec3, 0x19000404, 0x19000404},
- {0x0000a5a0, 0x2f001f04, 0x2f001f04, 0x1b000603, 0x1b000603},
- {0x0000a5a4, 0x35001fc4, 0x35001fc4, 0x1f000a02, 0x1f000a02},
- {0x0000a5a8, 0x3c022f04, 0x3c022f04, 0x23000a04, 0x23000a04},
- {0x0000a5ac, 0x41023e85, 0x41023e85, 0x26000a20, 0x26000a20},
- {0x0000a5b0, 0x48023ec6, 0x48023ec6, 0x2a000e20, 0x2a000e20},
- {0x0000a5b4, 0x4d023f01, 0x4d023f01, 0x2e000e22, 0x2e000e22},
- {0x0000a5b8, 0x53023f4b, 0x53023f4b, 0x31000e24, 0x31000e24},
- {0x0000a5bc, 0x5a027f09, 0x5a027f09, 0x34001640, 0x34001640},
- {0x0000a5c0, 0x5f027fc9, 0x5f027fc9, 0x38001660, 0x38001660},
- {0x0000a5c4, 0x6502feca, 0x6502feca, 0x3b001861, 0x3b001861},
- {0x0000a5c8, 0x6b02ff4a, 0x6b02ff4a, 0x3e001a81, 0x3e001a81},
- {0x0000a5cc, 0x7203feca, 0x7203feca, 0x42001a83, 0x42001a83},
- {0x0000a5d0, 0x7703ff0b, 0x7703ff0b, 0x44001c84, 0x44001c84},
- {0x0000a5d4, 0x7d06ffcb, 0x7d06ffcb, 0x48001ce3, 0x48001ce3},
- {0x0000a5d8, 0x8407ff0b, 0x8407ff0b, 0x4c001ce5, 0x4c001ce5},
- {0x0000a5dc, 0x8907ffcb, 0x8907ffcb, 0x50001ce9, 0x50001ce9},
- {0x0000a5e0, 0x900fff0b, 0x900fff0b, 0x54001ceb, 0x54001ceb},
- {0x0000a5e4, 0x960fffcb, 0x960fffcb, 0x56001eec, 0x56001eec},
- {0x0000a5e8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5ec, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f0, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f4, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5f8, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a5fc, 0x9c1fff0b, 0x9c1fff0b, 0x56001eec, 0x56001eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x02008501, 0x02008501, 0x02008501, 0x02008501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008802, 0x02008802},
- {0x0000a620, 0x0300c802, 0x0300c802, 0x0300c802, 0x0300c802},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x0300cc03, 0x0300cc03},
- {0x0000a628, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a62c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a630, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a634, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a638, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
- {0x0000a63c, 0x04011004, 0x04011004, 0x04011004, 0x04011004},
-};
+#define ar9331_1p2_baseband_core ar9331_1p1_baseband_core
-static const u32 ar9331_1p2_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9331_1p2_soc_postamble ar9331_1p1_soc_postamble
-static const u32 ar9331_1p2_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000002f8},
-};
+#define ar9331_1p2_mac_postamble ar9331_1p1_mac_postamble
-static const u32 ar9331_1p2_xtal_40M[][2] = {
- /* Addr allmodes */
- {0x00007038, 0x000004c2},
- {0x00008244, 0x0010f400},
- {0x0000824c, 0x0001e800},
- {0x0001609c, 0x0b283f31},
-};
+#define ar9331_1p2_soc_preamble ar9331_1p1_soc_preamble
-static const u32 ar9331_1p2_mac_core[][2] = {
- /* Addr allmodes */
- {0x00000008, 0x00000000},
- {0x00000030, 0x00020085},
- {0x00000034, 0x00000005},
- {0x00000040, 0x00000000},
- {0x00000044, 0x00000000},
- {0x00000048, 0x00000008},
- {0x0000004c, 0x00000010},
- {0x00000050, 0x00000000},
- {0x00001040, 0x002ffc0f},
- {0x00001044, 0x002ffc0f},
- {0x00001048, 0x002ffc0f},
- {0x0000104c, 0x002ffc0f},
- {0x00001050, 0x002ffc0f},
- {0x00001054, 0x002ffc0f},
- {0x00001058, 0x002ffc0f},
- {0x0000105c, 0x002ffc0f},
- {0x00001060, 0x002ffc0f},
- {0x00001064, 0x002ffc0f},
- {0x000010f0, 0x00000100},
- {0x00001270, 0x00000000},
- {0x000012b0, 0x00000000},
- {0x000012f0, 0x00000000},
- {0x0000143c, 0x00000000},
- {0x0000147c, 0x00000000},
- {0x00008000, 0x00000000},
- {0x00008004, 0x00000000},
- {0x00008008, 0x00000000},
- {0x0000800c, 0x00000000},
- {0x00008018, 0x00000000},
- {0x00008020, 0x00000000},
- {0x00008038, 0x00000000},
- {0x0000803c, 0x00000000},
- {0x00008040, 0x00000000},
- {0x00008044, 0x00000000},
- {0x00008048, 0x00000000},
- {0x0000804c, 0xffffffff},
- {0x00008054, 0x00000000},
- {0x00008058, 0x00000000},
- {0x0000805c, 0x000fc78f},
- {0x00008060, 0x0000000f},
- {0x00008064, 0x00000000},
- {0x00008070, 0x00000310},
- {0x00008074, 0x00000020},
- {0x00008078, 0x00000000},
- {0x0000809c, 0x0000000f},
- {0x000080a0, 0x00000000},
- {0x000080a4, 0x02ff0000},
- {0x000080a8, 0x0e070605},
- {0x000080ac, 0x0000000d},
- {0x000080b0, 0x00000000},
- {0x000080b4, 0x00000000},
- {0x000080b8, 0x00000000},
- {0x000080bc, 0x00000000},
- {0x000080c0, 0x2a800000},
- {0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c20},
- {0x000080cc, 0x01f40000},
- {0x000080d0, 0x00252500},
- {0x000080d4, 0x00a00000},
- {0x000080d8, 0x00400000},
- {0x000080dc, 0x00000000},
- {0x000080e0, 0xffffffff},
- {0x000080e4, 0x0000ffff},
- {0x000080e8, 0x3f3f3f3f},
- {0x000080ec, 0x00000000},
- {0x000080f0, 0x00000000},
- {0x000080f4, 0x00000000},
- {0x000080fc, 0x00020000},
- {0x00008100, 0x00000000},
- {0x00008108, 0x00000052},
- {0x0000810c, 0x00000000},
- {0x00008110, 0x00000000},
- {0x00008114, 0x000007ff},
- {0x00008118, 0x000000aa},
- {0x0000811c, 0x00003210},
- {0x00008124, 0x00000000},
- {0x00008128, 0x00000000},
- {0x0000812c, 0x00000000},
- {0x00008130, 0x00000000},
- {0x00008134, 0x00000000},
- {0x00008138, 0x00000000},
- {0x0000813c, 0x0000ffff},
- {0x00008144, 0xffffffff},
- {0x00008168, 0x00000000},
- {0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
- {0x000081c0, 0x00000000},
- {0x000081c4, 0x33332210},
- {0x000081c8, 0x00000000},
- {0x000081cc, 0x00000000},
- {0x000081d4, 0x00000000},
- {0x000081ec, 0x00000000},
- {0x000081f0, 0x00000000},
- {0x000081f4, 0x00000000},
- {0x000081f8, 0x00000000},
- {0x000081fc, 0x00000000},
- {0x00008240, 0x00100000},
- {0x00008248, 0x00000800},
- {0x00008250, 0x00000000},
- {0x00008254, 0x00000000},
- {0x00008258, 0x00000000},
- {0x0000825c, 0x40000000},
- {0x00008260, 0x00080922},
- {0x00008264, 0x9d400010},
- {0x00008268, 0xffffffff},
- {0x0000826c, 0x0000ffff},
- {0x00008270, 0x00000000},
- {0x00008274, 0x40000000},
- {0x00008278, 0x003e4180},
- {0x0000827c, 0x00000004},
- {0x00008284, 0x0000002c},
- {0x00008288, 0x0000002c},
- {0x0000828c, 0x000000ff},
- {0x00008294, 0x00000000},
- {0x00008298, 0x00000000},
- {0x0000829c, 0x00000000},
- {0x00008300, 0x00000140},
- {0x00008314, 0x00000000},
- {0x0000831c, 0x0000010d},
- {0x00008328, 0x00000000},
- {0x0000832c, 0x00000007},
- {0x00008330, 0x00000302},
- {0x00008334, 0x00000700},
- {0x00008338, 0x00ff0000},
- {0x0000833c, 0x02400000},
- {0x00008340, 0x000107ff},
- {0x00008344, 0xaa48105b},
- {0x00008348, 0x008f0000},
- {0x0000835c, 0x00000000},
- {0x00008360, 0xffffffff},
- {0x00008364, 0xffffffff},
- {0x00008368, 0x00000000},
- {0x00008370, 0x00000000},
- {0x00008374, 0x000000ff},
- {0x00008378, 0x00000000},
- {0x0000837c, 0x00000000},
- {0x00008380, 0xffffffff},
- {0x00008384, 0xffffffff},
- {0x00008390, 0xffffffff},
- {0x00008394, 0xffffffff},
- {0x00008398, 0x00000000},
- {0x0000839c, 0x00000000},
- {0x000083a0, 0x00000000},
- {0x000083a4, 0x0000fa14},
- {0x000083a8, 0x000f0c00},
- {0x000083ac, 0x33332210},
- {0x000083b0, 0x33332210},
- {0x000083b4, 0x33332210},
- {0x000083b8, 0x33332210},
- {0x000083bc, 0x00000000},
- {0x000083c0, 0x00000000},
- {0x000083c4, 0x00000000},
- {0x000083c8, 0x00000000},
- {0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
-};
+#define ar9331_1p2_mac_core ar9331_1p1_mac_core
-static const u32 ar9331_common_rx_gain_1p2[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x01800082},
- {0x0000a014, 0x01820181},
- {0x0000a018, 0x01840183},
- {0x0000a01c, 0x01880185},
- {0x0000a020, 0x018a0189},
- {0x0000a024, 0x02850284},
- {0x0000a028, 0x02890288},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x21212128},
- {0x0000a098, 0x171c1c1c},
- {0x0000a09c, 0x02020212},
- {0x0000a0a0, 0x00000202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x111f1100},
- {0x0000a0c8, 0x111d111e},
- {0x0000a0cc, 0x111b111c},
- {0x0000a0d0, 0x22032204},
- {0x0000a0d4, 0x22012202},
- {0x0000a0d8, 0x221f2200},
- {0x0000a0dc, 0x221d221e},
- {0x0000a0e0, 0x33013302},
- {0x0000a0e4, 0x331f3300},
- {0x0000a0e8, 0x4402331e},
- {0x0000a0ec, 0x44004401},
- {0x0000a0f0, 0x441e441f},
- {0x0000a0f4, 0x55015502},
- {0x0000a0f8, 0x551f5500},
- {0x0000a0fc, 0x6602551e},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
-};
+#define ar9331_common_wo_xlna_rx_gain_1p2 ar9331_common_wo_xlna_rx_gain_1p1
+
+#define ar9331_common_rx_gain_1p2 ar9485_common_rx_gain_1_1
#endif /* INITVALS_9330_1P2_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index 815a8af1beef..1d8235e19f0f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2011 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -18,16 +19,16 @@
#define INITVALS_9340_H
static const u32 ar9340_1p0_radio_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800},
- {0x0001610c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
+ {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016140, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
- {0x0001650c, 0x08000000, 0x08000000, 0x00000000, 0x00000000},
+ {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
{0x00016540, 0x10804000, 0x10804000, 0x50804000, 0x50804000},
};
static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -99,21 +100,10 @@ static const u32 ar9340Modes_lowest_ob_db_tx_gain_table_1p0[][5] = {
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
-static const u32 ar9340Modes_fast_clock_1p0[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x03721821, 0x03721821},
- {0x0000a230, 0x0000000b, 0x00000016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
+#define ar9340Modes_fast_clock_1p0 ar9300Modes_fast_clock_2p2
static const u32 ar9340_1p0_radio_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00016000, 0x36db6db6},
{0x00016004, 0x6db6db40},
{0x00016008, 0x73f00000},
@@ -146,15 +136,13 @@ static const u32 ar9340_1p0_radio_core[][2] = {
{0x00016100, 0x04cb0001},
{0x00016104, 0xfff80000},
{0x00016108, 0x00080010},
- {0x0001610c, 0x00000000},
{0x00016140, 0x50804008},
{0x00016144, 0x01884080},
{0x00016148, 0x000080c0},
{0x00016280, 0x01000015},
- {0x00016284, 0x05530000},
+ {0x00016284, 0x15530000},
{0x00016288, 0x00318000},
{0x0001628c, 0x50000000},
- {0x00016290, 0x4080294f},
{0x00016380, 0x00000000},
{0x00016384, 0x00000000},
{0x00016388, 0x00800700},
@@ -219,52 +207,43 @@ static const u32 ar9340_1p0_radio_core[][2] = {
};
static const u32 ar9340_1p0_radio_core_40M[][2] = {
+ /* Addr allmodes */
{0x0001609c, 0x02566f3a},
{0x000160ac, 0xa4647c00},
{0x000160b0, 0x01885f5a},
};
-static const u32 ar9340_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9340_1p0_mac_postamble ar9300_2p2_mac_postamble
-static const u32 ar9340_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
+#define ar9340_1p0_soc_postamble ar9300_2p2_soc_postamble
static const u32 ar9340_1p0_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
{0x00009820, 0x206a022e, 0x206a022e, 0x206a022e, 0x206a022e},
{0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
{0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
- {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e, 0x7ec88d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x00003fc0, 0x00003fc4, 0x00003fc4, 0x00003fc0},
+ {0x0000a204, 0x00003ec0, 0x00003ec4, 0x00003ec4, 0x00003ec0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
{0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
{0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
{0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
@@ -277,11 +256,11 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
{0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae04, 0x00180000, 0x00180000, 0x00180000, 0x00180000},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
@@ -289,21 +268,21 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
};
static const u32 ar9340_1p0_baseband_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00009800, 0xafe68e30},
{0x00009804, 0xfd14e000},
{0x00009808, 0x9c0a9f6b},
{0x0000980c, 0x04900000},
- {0x00009814, 0xb280c00a},
+ {0x00009814, 0x3280c00a},
{0x00009818, 0x00000000},
{0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
+ {0x00009834, 0x6400a190},
{0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
+ {0x0000983c, 0x14000600},
{0x00009880, 0x201fff00},
{0x00009884, 0x00001042},
{0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
+ {0x000098b0, 0x32840bbe},
{0x000098d0, 0x004b6a8e},
{0x000098d4, 0x00000820},
{0x000098dc, 0x00000000},
@@ -329,7 +308,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e3c, 0xcf946222},
{0x00009e40, 0x0d261820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
@@ -342,8 +320,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a220, 0x00000000},
{0x0000a224, 0x00000000},
{0x0000a228, 0x10002310},
- {0x0000a22c, 0x01036a1e},
- {0x0000a234, 0x10000fff},
{0x0000a23c, 0x00000000},
{0x0000a244, 0x0c000000},
{0x0000a2a0, 0x00000001},
@@ -351,10 +327,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a2c8, 0x00000000},
{0x0000a2cc, 0x18c43433},
{0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
{0x0000a2ec, 0x00000000},
{0x0000a2f0, 0x00000000},
{0x0000a2f4, 0x00000000},
@@ -385,7 +357,7 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a3e8, 0x20202020},
{0x0000a3ec, 0x20202020},
{0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000246},
+ {0x0000a3f4, 0x00000000},
{0x0000a3f8, 0x0cdbd380},
{0x0000a3fc, 0x000f0f01},
{0x0000a400, 0x8fa91f01},
@@ -402,33 +374,17 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
+ {0x0000a43c, 0x00100000},
{0x0000a440, 0x00000000},
{0x0000a444, 0x00000000},
- {0x0000a448, 0x04000080},
+ {0x0000a448, 0x05000080},
{0x0000a44c, 0x00000001},
{0x0000a450, 0x00010000},
{0x0000a458, 0x00000000},
- {0x0000a600, 0x00000000},
- {0x0000a604, 0x00000000},
- {0x0000a608, 0x00000000},
- {0x0000a60c, 0x00000000},
- {0x0000a610, 0x00000000},
- {0x0000a614, 0x00000000},
- {0x0000a618, 0x00000000},
- {0x0000a61c, 0x00000000},
- {0x0000a620, 0x00000000},
- {0x0000a624, 0x00000000},
- {0x0000a628, 0x00000000},
- {0x0000a62c, 0x00000000},
- {0x0000a630, 0x00000000},
- {0x0000a634, 0x00000000},
- {0x0000a638, 0x00000000},
- {0x0000a63c, 0x00000000},
{0x0000a640, 0x00000000},
{0x0000a644, 0x3fad9d74},
{0x0000a648, 0x0048060a},
- {0x0000a64c, 0x00000637},
+ {0x0000a64c, 0x00003c37},
{0x0000a670, 0x03020100},
{0x0000a674, 0x09080504},
{0x0000a678, 0x0d0c0b0a},
@@ -451,10 +407,6 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
{0x0000a8f4, 0x00000000},
{0x0000b2d0, 0x00000080},
{0x0000b2d4, 0x00000000},
- {0x0000b2dc, 0x00000000},
- {0x0000b2e0, 0x00000000},
- {0x0000b2e4, 0x00000000},
- {0x0000b2e8, 0x00000000},
{0x0000b2ec, 0x00000000},
{0x0000b2f0, 0x00000000},
{0x0000b2f4, 0x00000000},
@@ -465,80 +417,108 @@ static const u32 ar9340_1p0_baseband_core[][2] = {
};
static const u32 ar9340Modes_high_power_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
+ {0x0000a504, 0x04002222, 0x04002222, 0x02000001, 0x02000001},
+ {0x0000a508, 0x09002421, 0x09002421, 0x05000003, 0x05000003},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0a000005, 0x0a000005},
+ {0x0000a510, 0x13004620, 0x13004620, 0x0e000201, 0x0e000201},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x11000203, 0x11000203},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x14000401, 0x14000401},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x18000403, 0x18000403},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000602, 0x1b000602},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000802, 0x1f000802},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x21000620, 0x21000620},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x25000820, 0x25000820},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x29000822, 0x29000822},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2d000824, 0x2d000824},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x30000828, 0x30000828},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x3400082a, 0x3400082a},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x38000849, 0x38000849},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b000a2c, 0x3b000a2c},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x3e000e2b, 0x3e000e2b},
+ {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42000e2d, 0x42000e2d},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x4500124a, 0x4500124a},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x4900124c, 0x4900124c},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c00126c, 0x4c00126c},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x4f00128c, 0x4f00128c},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x52001290, 0x52001290},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001292, 0x56001292},
{0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x0000a584, 0x04802222, 0x04802222, 0x02800001, 0x02800001},
+ {0x0000a588, 0x09802421, 0x09802421, 0x05800003, 0x05800003},
+ {0x0000a58c, 0x0d802621, 0x0d802621, 0x0a800005, 0x0a800005},
+ {0x0000a590, 0x13804620, 0x13804620, 0x0e800201, 0x0e800201},
+ {0x0000a594, 0x19804a20, 0x19804a20, 0x11800203, 0x11800203},
+ {0x0000a598, 0x1d804e20, 0x1d804e20, 0x14800401, 0x14800401},
+ {0x0000a59c, 0x21805420, 0x21805420, 0x18800403, 0x18800403},
+ {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800602, 0x1b800602},
+ {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800802, 0x1f800802},
+ {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x21800620, 0x21800620},
+ {0x0000a5ac, 0x33805e44, 0x33805e44, 0x25800820, 0x25800820},
+ {0x0000a5b0, 0x38805e65, 0x38805e65, 0x29800822, 0x29800822},
+ {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2d800824, 0x2d800824},
+ {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x30800828, 0x30800828},
+ {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x3480082a, 0x3480082a},
+ {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38800849, 0x38800849},
+ {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b800a2c, 0x3b800a2c},
+ {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e800e2b, 0x3e800e2b},
+ {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42800e2d, 0x42800e2d},
+ {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x4580124a, 0x4580124a},
+ {0x0000a5d4, 0x61827f12, 0x61827f12, 0x4980124c, 0x4980124c},
+ {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c80126c, 0x4c80126c},
+ {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x4f80128c, 0x4f80128c},
+ {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x52801290, 0x52801290},
+ {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801292, 0x56801292},
+ {0x00016044, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
{0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
- {0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
+ {0x00016444, 0x056db2db, 0x056db2db, 0x022492db, 0x022492db},
{0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
};
static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -559,7 +539,7 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
{0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
{0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
{0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
@@ -604,13 +584,43 @@ static const u32 ar9340Modes_high_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016048, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
+ {0x00016048, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
{0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016448, 0x8e481266, 0x8e481266, 0x8e481266, 0x8e481266},
+ {0x00016448, 0x8e481666, 0x8e481666, 0x8e481266, 0x8e481266},
};
+
static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a00ae, 0x206a00ae},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec82d2e, 0x7ec82d2e},
+ {0x0000a2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000a2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000a2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000a2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
{0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
{0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
@@ -676,15 +686,34 @@ static const u32 ar9340Modes_ub124_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x00016044, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
- {0x00016048, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
- {0x00016444, 0x036db2db, 0x036db2db, 0x036db2db, 0x036db2db},
- {0x00016448, 0x69b65266, 0x69b65266, 0x69b65266, 0x69b65266},
+ {0x00016044, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016048, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x00016444, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4, 0x03b6d2e4},
+ {0x00016448, 0x8e480086, 0x8e480086, 0x8e480086, 0x8e480086},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0xfef5d402, 0xfef5d402, 0xfdab5b52, 0xfdab5b52},
+ {0x0000b2e0, 0xfe896600, 0xfe896600, 0xfd339c84, 0xfd339c84},
+ {0x0000b2e4, 0xff01f800, 0xff01f800, 0xfec3e000, 0xfec3e000},
+ {0x0000b2e8, 0xfffe0000, 0xfffe0000, 0xfffc0000, 0xfffc0000},
};
-
static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -845,14 +874,14 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
{0x0000b074, 0x00000000},
{0x0000b078, 0x00000000},
{0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
{0x0000b0a0, 0x00000000},
{0x0000b0a4, 0x00000000},
{0x0000b0a8, 0x00000000},
@@ -944,7 +973,11 @@ static const u32 ar9340Common_rx_gain_table_1p0[][2] = {
};
static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -952,8 +985,8 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
{0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
{0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
{0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
@@ -965,19 +998,19 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
{0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
{0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1010,14 +1043,40 @@ static const u32 ar9340Modes_low_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
{0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
{0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
- {0x00016048, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016048, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0xf0318000, 0xf0318000, 0xf0318000, 0xf0318000},
{0x00016444, 0x056db2db, 0x056db2db, 0x056db2db, 0x056db2db},
- {0x00016448, 0x24925266, 0x24925266, 0x24925266, 0x24925266},
+ {0x00016448, 0x24925666, 0x24925666, 0x24925266, 0x24925266},
};
static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
{0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
@@ -1025,8 +1084,8 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
{0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
{0x0000a514, 0x1c000223, 0x1c000223, 0x11000400, 0x11000400},
- {0x0000a518, 0x21020220, 0x21020220, 0x15000402, 0x15000402},
- {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
+ {0x0000a518, 0x21002220, 0x21002220, 0x15000402, 0x15000402},
+ {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
{0x0000a520, 0x2b022220, 0x2b022220, 0x1b000603, 0x1b000603},
{0x0000a524, 0x2f022222, 0x2f022222, 0x1f000a02, 0x1f000a02},
{0x0000a528, 0x34022225, 0x34022225, 0x23000a04, 0x23000a04},
@@ -1038,19 +1097,19 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a540, 0x4e02246c, 0x4e02246c, 0x38001660, 0x38001660},
{0x0000a544, 0x5302266c, 0x5302266c, 0x3b001861, 0x3b001861},
{0x0000a548, 0x5702286c, 0x5702286c, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x61042a6c, 0x61042a6c, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x66062a6c, 0x66062a6c, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x7006308c, 0x7006308c, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x730a308a, 0x730a308a, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x770a308c, 0x770a308c, 0x56001eec, 0x56001eec},
+ {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x61024a6c, 0x61024a6c, 0x44001c84, 0x44001c84},
+ {0x0000a554, 0x66026a6c, 0x66026a6c, 0x48001ce3, 0x48001ce3},
+ {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x4c001ce5, 0x4c001ce5},
+ {0x0000a55c, 0x7002708c, 0x7002708c, 0x50001ce9, 0x50001ce9},
+ {0x0000a560, 0x7302b08a, 0x7302b08a, 0x54001ceb, 0x54001ceb},
+ {0x0000a564, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a568, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a570, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a574, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a578, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
+ {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x56001eec, 0x56001eec},
{0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
{0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
{0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -1083,14 +1142,36 @@ static const u32 ar9340Modes_mixed_ob_db_tx_gain_table_1p0[][5] = {
{0x0000a5f4, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
{0x0000a5f8, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
{0x0000a5fc, 0x778a308c, 0x778a308c, 0x56801eec, 0x56801eec},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
+ {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
+ {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
+ {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
+ {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
+ {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
+ {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
+ {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
+ {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
+ {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
+ {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
{0x00016044, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016048, 0x24927266, 0x24927266, 0x8e483266, 0x8e483266},
+ {0x00016048, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
+ {0x00016280, 0x01000015, 0x01000015, 0x01001015, 0x01001015},
+ {0x00016288, 0x30318000, 0x30318000, 0x00318000, 0x00318000},
{0x00016444, 0x056db2db, 0x056db2db, 0x03b6d2e4, 0x03b6d2e4},
- {0x00016448, 0x24927266, 0x24927266, 0x8e482266, 0x8e482266},
+ {0x00016448, 0x24925666, 0x24925666, 0x8e481266, 0x8e481266},
};
static const u32 ar9340_1p0_mac_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00000008, 0x00000000},
{0x00000030, 0x00020085},
{0x00000034, 0x00000005},
@@ -1119,6 +1200,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x00008004, 0x00000000},
{0x00008008, 0x00000000},
{0x0000800c, 0x00000000},
+ {0x00008010, 0x00080800},
{0x00008018, 0x00000000},
{0x00008020, 0x00000000},
{0x00008038, 0x00000000},
@@ -1146,7 +1228,7 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000080bc, 0x00000000},
{0x000080c0, 0x2a800000},
{0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c20},
+ {0x000080c8, 0x13881c22},
{0x000080cc, 0x01f40000},
{0x000080d0, 0x00252500},
{0x000080d4, 0x00a00000},
@@ -1250,276 +1332,17 @@ static const u32 ar9340_1p0_mac_core[][2] = {
{0x000083c4, 0x00000000},
{0x000083c8, 0x00000000},
{0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
+ {0x000083d0, 0x000101ff},
};
-static const u32 ar9340Common_wo_xlna_rx_gain_table_1p0[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9340Common_wo_xlna_rx_gain_table_1p0 ar9300Common_wo_xlna_rx_gain_table_2p2
static const u32 ar9340_1p0_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x000040a4, 0x00a0c1c9},
+ /* Addr allmodes */
{0x00007008, 0x00000000},
{0x00007020, 0x00000000},
{0x00007034, 0x00000002},
{0x00007038, 0x000004c2},
};
-#endif
+#endif /* INITVALS_9340_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1d6658e139b5..4ef7dcccaa2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2010 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -52,7 +53,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
{0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
{0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -61,7 +62,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
+ {0x0000a204, 0x01318fc0, 0x01318fc4, 0x01318fc4, 0x01318fc0},
{0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
{0x0000a22c, 0x01026a2f, 0x01026a27, 0x01026a2f, 0x01026a2f},
{0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
@@ -958,7 +959,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
{0x0001604c, 0x2699e04f},
{0x00016050, 0x6db6db6c},
{0x00016058, 0x6c200000},
- {0x00016080, 0x00040000},
+ {0x00016080, 0x000c0000},
{0x00016084, 0x9a68048c},
{0x00016088, 0x54214514},
{0x0001608c, 0x1203040b},
@@ -981,7 +982,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
{0x00016144, 0x02084080},
{0x00016148, 0x000080c0},
{0x00016280, 0x050a0001},
- {0x00016284, 0x3d841400},
+ {0x00016284, 0x3d841418},
{0x00016288, 0x00000000},
{0x0001628c, 0xe3000000},
{0x00016290, 0xa1005080},
@@ -1007,6 +1008,7 @@ static const u32 ar9462_2p0_radio_core[][2] = {
static const u32 ar9462_2p0_soc_preamble[][2] = {
/* Addr allmodes */
+ {0x000040a4, 0x00a0c1c9},
{0x00007020, 0x00000000},
{0x00007034, 0x00000002},
{0x00007038, 0x000004c2},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index d16d029f81a9..fb4497fc7a3d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,360 +18,151 @@
#ifndef INITVALS_9485_H
#define INITVALS_9485_H
-static const u32 ar9485_1_1_mac_core[][2] = {
- /* Addr allmodes */
- {0x00000008, 0x00000000},
- {0x00000030, 0x00020085},
- {0x00000034, 0x00000005},
- {0x00000040, 0x00000000},
- {0x00000044, 0x00000000},
- {0x00000048, 0x00000008},
- {0x0000004c, 0x00000010},
- {0x00000050, 0x00000000},
- {0x00001040, 0x002ffc0f},
- {0x00001044, 0x002ffc0f},
- {0x00001048, 0x002ffc0f},
- {0x0000104c, 0x002ffc0f},
- {0x00001050, 0x002ffc0f},
- {0x00001054, 0x002ffc0f},
- {0x00001058, 0x002ffc0f},
- {0x0000105c, 0x002ffc0f},
- {0x00001060, 0x002ffc0f},
- {0x00001064, 0x002ffc0f},
- {0x000010f0, 0x00000100},
- {0x00001270, 0x00000000},
- {0x000012b0, 0x00000000},
- {0x000012f0, 0x00000000},
- {0x0000143c, 0x00000000},
- {0x0000147c, 0x00000000},
- {0x00008000, 0x00000000},
- {0x00008004, 0x00000000},
- {0x00008008, 0x00000000},
- {0x0000800c, 0x00000000},
- {0x00008018, 0x00000000},
- {0x00008020, 0x00000000},
- {0x00008038, 0x00000000},
- {0x0000803c, 0x00000000},
- {0x00008040, 0x00000000},
- {0x00008044, 0x00000000},
- {0x00008048, 0x00000000},
- {0x0000804c, 0xffffffff},
- {0x00008054, 0x00000000},
- {0x00008058, 0x00000000},
- {0x0000805c, 0x000fc78f},
- {0x00008060, 0x0000000f},
- {0x00008064, 0x00000000},
- {0x00008070, 0x00000310},
- {0x00008074, 0x00000020},
- {0x00008078, 0x00000000},
- {0x0000809c, 0x0000000f},
- {0x000080a0, 0x00000000},
- {0x000080a4, 0x02ff0000},
- {0x000080a8, 0x0e070605},
- {0x000080ac, 0x0000000d},
- {0x000080b0, 0x00000000},
- {0x000080b4, 0x00000000},
- {0x000080b8, 0x00000000},
- {0x000080bc, 0x00000000},
- {0x000080c0, 0x2a800000},
- {0x000080c4, 0x06900168},
- {0x000080c8, 0x13881c22},
- {0x000080cc, 0x01f40000},
- {0x000080d0, 0x00252500},
- {0x000080d4, 0x00a00000},
- {0x000080d8, 0x00400000},
- {0x000080dc, 0x00000000},
- {0x000080e0, 0xffffffff},
- {0x000080e4, 0x0000ffff},
- {0x000080e8, 0x3f3f3f3f},
- {0x000080ec, 0x00000000},
- {0x000080f0, 0x00000000},
- {0x000080f4, 0x00000000},
- {0x000080fc, 0x00020000},
- {0x00008100, 0x00000000},
- {0x00008108, 0x00000052},
- {0x0000810c, 0x00000000},
- {0x00008110, 0x00000000},
- {0x00008114, 0x000007ff},
- {0x00008118, 0x000000aa},
- {0x0000811c, 0x00003210},
- {0x00008124, 0x00000000},
- {0x00008128, 0x00000000},
- {0x0000812c, 0x00000000},
- {0x00008130, 0x00000000},
- {0x00008134, 0x00000000},
- {0x00008138, 0x00000000},
- {0x0000813c, 0x0000ffff},
- {0x00008144, 0xffffffff},
- {0x00008168, 0x00000000},
- {0x0000816c, 0x00000000},
- {0x00008170, 0x18486200},
- {0x00008174, 0x33332210},
- {0x00008178, 0x00000000},
- {0x0000817c, 0x00020000},
- {0x000081c0, 0x00000000},
- {0x000081c4, 0x33332210},
- {0x000081d4, 0x00000000},
- {0x000081ec, 0x00000000},
- {0x000081f0, 0x00000000},
- {0x000081f4, 0x00000000},
- {0x000081f8, 0x00000000},
- {0x000081fc, 0x00000000},
- {0x00008240, 0x00100000},
- {0x00008244, 0x0010f400},
- {0x00008248, 0x00000800},
- {0x0000824c, 0x0001e800},
- {0x00008250, 0x00000000},
- {0x00008254, 0x00000000},
- {0x00008258, 0x00000000},
- {0x0000825c, 0x40000000},
- {0x00008260, 0x00080922},
- {0x00008264, 0x9ca00010},
- {0x00008268, 0xffffffff},
- {0x0000826c, 0x0000ffff},
- {0x00008270, 0x00000000},
- {0x00008274, 0x40000000},
- {0x00008278, 0x003e4180},
- {0x0000827c, 0x00000004},
- {0x00008284, 0x0000002c},
- {0x00008288, 0x0000002c},
- {0x0000828c, 0x000000ff},
- {0x00008294, 0x00000000},
- {0x00008298, 0x00000000},
- {0x0000829c, 0x00000000},
- {0x00008300, 0x00000140},
- {0x00008314, 0x00000000},
- {0x0000831c, 0x0000010d},
- {0x00008328, 0x00000000},
- {0x0000832c, 0x00000007},
- {0x00008330, 0x00000302},
- {0x00008334, 0x00000700},
- {0x00008338, 0x00ff0000},
- {0x0000833c, 0x02400000},
- {0x00008340, 0x000107ff},
- {0x00008344, 0xa248105b},
- {0x00008348, 0x008f0000},
- {0x0000835c, 0x00000000},
- {0x00008360, 0xffffffff},
- {0x00008364, 0xffffffff},
- {0x00008368, 0x00000000},
- {0x00008370, 0x00000000},
- {0x00008374, 0x000000ff},
- {0x00008378, 0x00000000},
- {0x0000837c, 0x00000000},
- {0x00008380, 0xffffffff},
- {0x00008384, 0xffffffff},
- {0x00008390, 0xffffffff},
- {0x00008394, 0xffffffff},
- {0x00008398, 0x00000000},
- {0x0000839c, 0x00000000},
- {0x000083a0, 0x00000000},
- {0x000083a4, 0x0000fa14},
- {0x000083a8, 0x000f0c00},
- {0x000083ac, 0x33332210},
- {0x000083b0, 0x33332210},
- {0x000083b4, 0x33332210},
- {0x000083b8, 0x33332210},
- {0x000083bc, 0x00000000},
- {0x000083c0, 0x00000000},
- {0x000083c4, 0x00000000},
- {0x000083c8, 0x00000000},
- {0x000083cc, 0x00000200},
- {0x000083d0, 0x000301ff},
-};
+/* AR9485 1.0 */
-static const u32 ar9485_1_1_baseband_core[][2] = {
- /* Addr allmodes */
- {0x00009800, 0xafe68e30},
- {0x00009804, 0xfd14e000},
- {0x00009808, 0x9c0a8f6b},
- {0x0000980c, 0x04800000},
- {0x00009814, 0x9280c00a},
- {0x00009818, 0x00000000},
- {0x0000981c, 0x00020028},
- {0x00009834, 0x5f3ca3de},
- {0x00009838, 0x0108ecff},
- {0x0000983c, 0x14750600},
- {0x00009880, 0x201fff00},
- {0x00009884, 0x00001042},
- {0x000098a4, 0x00200400},
- {0x000098b0, 0x52440bbe},
- {0x000098d0, 0x004b6a8e},
- {0x000098d4, 0x00000820},
- {0x000098dc, 0x00000000},
- {0x000098f0, 0x00000000},
- {0x000098f4, 0x00000000},
- {0x00009c04, 0x00000000},
- {0x00009c08, 0x03200000},
- {0x00009c0c, 0x00000000},
- {0x00009c10, 0x00000000},
- {0x00009c14, 0x00046384},
- {0x00009c18, 0x05b6b440},
- {0x00009c1c, 0x00b6b440},
- {0x00009d00, 0xc080a333},
- {0x00009d04, 0x40206c10},
- {0x00009d08, 0x009c4060},
- {0x00009d0c, 0x1883800a},
- {0x00009d10, 0x01834061},
- {0x00009d14, 0x00c00400},
- {0x00009d18, 0x00000000},
- {0x00009d1c, 0x00000000},
- {0x00009e08, 0x0038233c},
- {0x00009e24, 0x9927b515},
- {0x00009e28, 0x12ef0200},
- {0x00009e30, 0x06336f77},
- {0x00009e34, 0x6af6532f},
- {0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
- {0x00009e4c, 0x00001004},
- {0x00009e50, 0x00ff03f1},
- {0x00009fc0, 0x80be4788},
- {0x00009fc4, 0x0001efb5},
- {0x00009fcc, 0x40000014},
- {0x0000a20c, 0x00000000},
- {0x0000a210, 0x00000000},
- {0x0000a220, 0x00000000},
- {0x0000a224, 0x00000000},
- {0x0000a228, 0x10002310},
- {0x0000a23c, 0x00000000},
- {0x0000a244, 0x0c000000},
- {0x0000a2a0, 0x00000001},
- {0x0000a2c0, 0x00000001},
- {0x0000a2c8, 0x00000000},
- {0x0000a2cc, 0x18c43433},
- {0x0000a2d4, 0x00000000},
- {0x0000a2dc, 0x00000000},
- {0x0000a2e0, 0x00000000},
- {0x0000a2e4, 0x00000000},
- {0x0000a2e8, 0x00000000},
- {0x0000a2ec, 0x00000000},
- {0x0000a2f0, 0x00000000},
- {0x0000a2f4, 0x00000000},
- {0x0000a2f8, 0x00000000},
- {0x0000a344, 0x00000000},
- {0x0000a34c, 0x00000000},
- {0x0000a350, 0x0000a000},
- {0x0000a364, 0x00000000},
- {0x0000a370, 0x00000000},
- {0x0000a390, 0x00000001},
- {0x0000a394, 0x00000444},
- {0x0000a398, 0x001f0e0f},
- {0x0000a39c, 0x0075393f},
- {0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x000000ff},
- {0x0000a3a8, 0x3b3b3b3b},
- {0x0000a3ac, 0x2f2f2f2f},
- {0x0000a3c0, 0x20202020},
- {0x0000a3c4, 0x22222220},
- {0x0000a3c8, 0x20200020},
- {0x0000a3cc, 0x20202020},
- {0x0000a3d0, 0x20202020},
- {0x0000a3d4, 0x20202020},
- {0x0000a3d8, 0x20202020},
- {0x0000a3dc, 0x20202020},
- {0x0000a3e0, 0x20202020},
- {0x0000a3e4, 0x20202020},
- {0x0000a3e8, 0x20202020},
- {0x0000a3ec, 0x20202020},
- {0x0000a3f0, 0x00000000},
- {0x0000a3f4, 0x00000006},
- {0x0000a3f8, 0x0cdbd380},
- {0x0000a3fc, 0x000f0f01},
- {0x0000a400, 0x8fa91f01},
- {0x0000a404, 0x00000000},
- {0x0000a408, 0x0e79e5c6},
- {0x0000a40c, 0x00820820},
- {0x0000a414, 0x1ce739cf},
- {0x0000a418, 0x2d0019ce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
- {0x0000a434, 0x00000000},
- {0x0000a438, 0x00001801},
- {0x0000a43c, 0x00000000},
- {0x0000a440, 0x00000000},
- {0x0000a444, 0x00000000},
- {0x0000a448, 0x04000000},
- {0x0000a44c, 0x00000001},
- {0x0000a450, 0x00010000},
- {0x0000a5c4, 0xbfad9d74},
- {0x0000a5c8, 0x0048060a},
- {0x0000a5cc, 0x00000637},
- {0x0000a760, 0x03020100},
- {0x0000a764, 0x09080504},
- {0x0000a768, 0x0d0c0b0a},
- {0x0000a76c, 0x13121110},
- {0x0000a770, 0x31301514},
- {0x0000a774, 0x35343332},
- {0x0000a778, 0x00000036},
- {0x0000a780, 0x00000838},
- {0x0000a7c0, 0x00000000},
- {0x0000a7c4, 0xfffffffc},
- {0x0000a7c8, 0x00000000},
- {0x0000a7cc, 0x00000000},
- {0x0000a7d0, 0x00000000},
- {0x0000a7d4, 0x00000004},
- {0x0000a7dc, 0x00000000},
-};
+#define ar9485_1_1_mac_postamble ar9300_2p2_mac_postamble
-static const u32 ar9485Common_1_1[][2] = {
- /* Addr allmodes */
- {0x00007010, 0x00000022},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18012e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
};
-static const u32 ar9485_1_1_baseband_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
- {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
- {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
- {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
- {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
- {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
- {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
- {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
- {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
- {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
- {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
- {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
- {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
- {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
- {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
- {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
- {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
- {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
- {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
- {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
- {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
- {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
- {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
- {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
- {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
- {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
- {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
- {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
- {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
- {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
- {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00060005},
+ {0x0000a004, 0x00810080},
+ {0x0000a008, 0x00830082},
+ {0x0000a00c, 0x00850084},
+ {0x0000a010, 0x01820181},
+ {0x0000a014, 0x01840183},
+ {0x0000a018, 0x01880185},
+ {0x0000a01c, 0x018a0189},
+ {0x0000a020, 0x02850284},
+ {0x0000a024, 0x02890288},
+ {0x0000a028, 0x028b028a},
+ {0x0000a02c, 0x03850384},
+ {0x0000a030, 0x03890388},
+ {0x0000a034, 0x038b038a},
+ {0x0000a038, 0x038d038c},
+ {0x0000a03c, 0x03910390},
+ {0x0000a040, 0x03930392},
+ {0x0000a044, 0x03950394},
+ {0x0000a048, 0x00000396},
+ {0x0000a04c, 0x00000000},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x28282828},
+ {0x0000a084, 0x28282828},
+ {0x0000a088, 0x28282828},
+ {0x0000a08c, 0x28282828},
+ {0x0000a090, 0x28282828},
+ {0x0000a094, 0x24242428},
+ {0x0000a098, 0x171e1e1e},
+ {0x0000a09c, 0x02020b0b},
+ {0x0000a0a0, 0x02020202},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x22072208},
+ {0x0000a0c4, 0x22052206},
+ {0x0000a0c8, 0x22032204},
+ {0x0000a0cc, 0x22012202},
+ {0x0000a0d0, 0x221f2200},
+ {0x0000a0d4, 0x221d221e},
+ {0x0000a0d8, 0x33023303},
+ {0x0000a0dc, 0x33003301},
+ {0x0000a0e0, 0x331e331f},
+ {0x0000a0e4, 0x4402331d},
+ {0x0000a0e8, 0x44004401},
+ {0x0000a0ec, 0x441e441f},
+ {0x0000a0f0, 0x55025503},
+ {0x0000a0f4, 0x55005501},
+ {0x0000a0f8, 0x551e551f},
+ {0x0000a0fc, 0x6602551d},
+ {0x0000a100, 0x66006601},
+ {0x0000a104, 0x661e661f},
+ {0x0000a108, 0x7703661d},
+ {0x0000a10c, 0x77017702},
+ {0x0000a110, 0x00007700},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x111f1100},
+ {0x0000a148, 0x111d111e},
+ {0x0000a14c, 0x111b111c},
+ {0x0000a150, 0x22032204},
+ {0x0000a154, 0x22012202},
+ {0x0000a158, 0x221f2200},
+ {0x0000a15c, 0x221d221e},
+ {0x0000a160, 0x33013302},
+ {0x0000a164, 0x331f3300},
+ {0x0000a168, 0x4402331e},
+ {0x0000a16c, 0x44004401},
+ {0x0000a170, 0x441e441f},
+ {0x0000a174, 0x55015502},
+ {0x0000a178, 0x551f5500},
+ {0x0000a17c, 0x6602551e},
+ {0x0000a180, 0x66006601},
+ {0x0000a184, 0x661e661f},
+ {0x0000a188, 0x7703661d},
+ {0x0000a18c, 0x77017702},
+ {0x0000a190, 0x00007700},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000296},
};
-static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
{0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
{0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -442,102 +234,34 @@ static const u32 ar9485Modes_high_ob_db_tx_gain_1_1[][5] = {
{0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
};
-static const u32 ar9485_modes_lowest_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
+#define ar9485Modes_high_ob_db_tx_gain_1_1 ar9485Modes_high_power_tx_gain_1_1
-static const u32 ar9485_1_1_radio_postamble[][2] = {
- /* Addr allmodes */
- {0x0001609c, 0x0b283f31},
- {0x000160ac, 0x24611800},
- {0x000160b0, 0x03284f3e},
- {0x0001610c, 0x00170000},
- {0x00016140, 0x50804008},
-};
+#define ar9485Modes_low_ob_db_tx_gain_1_1 ar9485Modes_high_ob_db_tx_gain_1_1
-static const u32 ar9485_1_1_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+#define ar9485_modes_lowest_ob_db_tx_gain_1_1 ar9485Modes_low_ob_db_tx_gain_1_1
+
+static const u32 ar9485_1_1[][2] = {
+ /* Addr allmodes */
+ {0x0000a580, 0x00000000},
+ {0x0000a584, 0x00000000},
+ {0x0000a588, 0x00000000},
+ {0x0000a58c, 0x00000000},
+ {0x0000a590, 0x00000000},
+ {0x0000a594, 0x00000000},
+ {0x0000a598, 0x00000000},
+ {0x0000a59c, 0x00000000},
+ {0x0000a5a0, 0x00000000},
+ {0x0000a5a4, 0x00000000},
+ {0x0000a5a8, 0x00000000},
+ {0x0000a5ac, 0x00000000},
+ {0x0000a5b0, 0x00000000},
+ {0x0000a5b4, 0x00000000},
+ {0x0000a5b8, 0x00000000},
+ {0x0000a5bc, 0x00000000},
};
static const u32 ar9485_1_1_radio_core[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00016000, 0x36db6db6},
{0x00016004, 0x6db6db40},
{0x00016008, 0x73800000},
@@ -601,294 +325,145 @@ static const u32 ar9485_1_1_radio_core[][2] = {
{0x00016c44, 0x12000000},
};
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18052e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
-static const u32 ar9485Modes_high_power_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_1_1[][2] = {
- /* Addr allmodes */
- {0x0000a580, 0x00000000},
- {0x0000a584, 0x00000000},
- {0x0000a588, 0x00000000},
- {0x0000a58c, 0x00000000},
- {0x0000a590, 0x00000000},
- {0x0000a594, 0x00000000},
- {0x0000a598, 0x00000000},
- {0x0000a59c, 0x00000000},
- {0x0000a5a0, 0x00000000},
- {0x0000a5a4, 0x00000000},
- {0x0000a5a8, 0x00000000},
- {0x0000a5ac, 0x00000000},
- {0x0000a5b0, 0x00000000},
- {0x0000a5b4, 0x00000000},
- {0x0000a5b8, 0x00000000},
- {0x0000a5bc, 0x00000000},
-};
-
-static const u32 ar9485_modes_green_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000003, 0x00000003, 0x00000003, 0x00000003},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x80000000, 0x80000000, 0x80000000, 0x80000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000006, 0x00000006},
- {0x0000a504, 0x05062002, 0x05062002, 0x03000201, 0x03000201},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x06000203, 0x06000203},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0a000401, 0x0a000401},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x0e000403, 0x0e000403},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x12000405, 0x12000405},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x15000604, 0x15000604},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x18000605, 0x18000605},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x1c000a04, 0x1c000a04},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x21000a06, 0x21000a06},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x29000a24, 0x29000a24},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2f000e21, 0x2f000e21},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x31000e20, 0x31000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x33000e20, 0x33000e20},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b504, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b508, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b50c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b510, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b514, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b518, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b51c, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b520, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b524, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b528, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a},
- {0x0000b52c, 0x0000002a, 0x0000002a, 0x0000002a, 0x0000002a},
- {0x0000b530, 0x0000003a, 0x0000003a, 0x0000003a, 0x0000003a},
- {0x0000b534, 0x0000004a, 0x0000004a, 0x0000004a, 0x0000004a},
- {0x0000b538, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b53c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b540, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b544, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b548, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b54c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b550, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b554, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b558, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b55c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b560, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b564, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b568, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b56c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b570, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b574, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b578, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x0000b57c, 0x0000005b, 0x0000005b, 0x0000005b, 0x0000005b},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18013e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
-};
-
-static const u32 ar9485_1_1_soc_preamble[][2] = {
- /* Addr allmodes */
- {0x00004014, 0xba280400},
- {0x00004090, 0x00aa10aa},
- {0x000040a4, 0x00a0c9c9},
- {0x00007010, 0x00000022},
- {0x00007020, 0x00000000},
- {0x00007034, 0x00000002},
- {0x00007038, 0x000004c2},
- {0x00007048, 0x00000002},
-};
-
-static const u32 ar9485_1_1_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
-
-static const u32 ar9485Modes_low_ob_db_tx_gain_1_1[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x000098bc, 0x00000002, 0x00000002, 0x00000002, 0x00000002},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
- {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
- {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
- {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
- {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
- {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
- {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
- {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
- {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
- {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
- {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
- {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
- {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
- {0x0000a530, 0x48023ec6, 0x48023ec6, 0x34000e20, 0x34000e20},
- {0x0000a534, 0x4d023f01, 0x4d023f01, 0x35000e21, 0x35000e21},
- {0x0000a538, 0x53023f4b, 0x53023f4b, 0x43000e62, 0x43000e62},
- {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x45000e63, 0x45000e63},
- {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x49000e65, 0x49000e65},
- {0x0000a544, 0x6502feca, 0x6502feca, 0x4b000e66, 0x4b000e66},
- {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x4d001645, 0x4d001645},
- {0x0000a54c, 0x7203feca, 0x7203feca, 0x51001865, 0x51001865},
- {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x55001a86, 0x55001a86},
- {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x57001ce9, 0x57001ce9},
- {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x5a001ceb, 0x5a001ceb},
- {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a564, 0x960fffcb, 0x960fffcb, 0x5e001eeb, 0x5e001eeb},
- {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x5e001eeb, 0x5e001eeb},
- {0x0000b500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b504, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b508, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b50c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b510, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b514, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b518, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b51c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b520, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b524, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b528, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b52c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b530, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b534, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b538, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b53c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b540, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b544, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b548, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b54c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b550, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b554, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b558, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b55c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b560, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b564, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b568, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b56c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b570, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b574, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b578, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000b57c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x00016044, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db, 0x05d6b2db},
- {0x00016048, 0x6c924260, 0x6c924260, 0x6c924260, 0x6c924260},
-};
-
-static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
- /* Addr 5G_HT2 5G_HT40 */
- {0x00009e00, 0x03721821, 0x03721821},
- {0x0000a230, 0x0000400b, 0x00004016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
-
-static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00018c00, 0x18012e5e},
- {0x00018c04, 0x000801d8},
- {0x00018c08, 0x0000080c},
+static const u32 ar9485_1_1_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a8f6b},
+ {0x0000980c, 0x04800000},
+ {0x00009814, 0x9280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x5f3ca3de},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14750600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x52440bbe},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0x00000000},
+ {0x00009c08, 0x03200000},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x1883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c00400},
+ {0x00009d18, 0x00000000},
+ {0x00009d1c, 0x00000000},
+ {0x00009e08, 0x0038233c},
+ {0x00009e24, 0x9927b515},
+ {0x00009e28, 0x12ef0200},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x80be4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x0000a20c, 0x00000000},
+ {0x0000a210, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a2a0, 0x00000001},
+ {0x0000a2c0, 0x00000001},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2cc, 0x18c43433},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2dc, 0x00000000},
+ {0x0000a2e0, 0x00000000},
+ {0x0000a2e4, 0x00000000},
+ {0x0000a2e8, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x001f0e0f},
+ {0x0000a39c, 0x0075393f},
+ {0x0000a3a0, 0xb79f6427},
+ {0x0000a3a4, 0x000000ff},
+ {0x0000a3a8, 0x3b3b3b3b},
+ {0x0000a3ac, 0x2f2f2f2f},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000006},
+ {0x0000a3f8, 0x0cdbd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739cf},
+ {0x0000a418, 0x2d0019ce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00000000},
+ {0x0000a440, 0x00000000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x04000000},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a5c4, 0xbfad9d74},
+ {0x0000a5c8, 0x0048060a},
+ {0x0000a5cc, 0x00000637},
+ {0x0000a760, 0x03020100},
+ {0x0000a764, 0x09080504},
+ {0x0000a768, 0x0d0c0b0a},
+ {0x0000a76c, 0x13121110},
+ {0x0000a770, 0x31301514},
+ {0x0000a774, 0x35343332},
+ {0x0000a778, 0x00000036},
+ {0x0000a780, 0x00000838},
+ {0x0000a7c0, 0x00000000},
+ {0x0000a7c4, 0xfffffffc},
+ {0x0000a7c8, 0x00000000},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
};
static const u32 ar9485_common_rx_gain_1_1[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x0000a000, 0x00010000},
{0x0000a004, 0x00030002},
{0x0000a008, 0x00050004},
@@ -1019,143 +594,260 @@ static const u32 ar9485_common_rx_gain_1_1[][2] = {
{0x0000a1fc, 0x00000296},
};
+static const u32 ar9485_1_1_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18052e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
static const u32 ar9485_1_1_pcie_phy_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
+ /* Addr allmodes */
{0x00018c00, 0x18053e5e},
{0x00018c04, 0x000801d8},
{0x00018c08, 0x0000080c},
};
-static const u32 ar9485Common_wo_xlna_rx_gain_1_1[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00060005},
- {0x0000a004, 0x00810080},
- {0x0000a008, 0x00830082},
- {0x0000a00c, 0x00850084},
- {0x0000a010, 0x01820181},
- {0x0000a014, 0x01840183},
- {0x0000a018, 0x01880185},
- {0x0000a01c, 0x018a0189},
- {0x0000a020, 0x02850284},
- {0x0000a024, 0x02890288},
- {0x0000a028, 0x028b028a},
- {0x0000a02c, 0x03850384},
- {0x0000a030, 0x03890388},
- {0x0000a034, 0x038b038a},
- {0x0000a038, 0x038d038c},
- {0x0000a03c, 0x03910390},
- {0x0000a040, 0x03930392},
- {0x0000a044, 0x03950394},
- {0x0000a048, 0x00000396},
- {0x0000a04c, 0x00000000},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x28282828},
- {0x0000a084, 0x28282828},
- {0x0000a088, 0x28282828},
- {0x0000a08c, 0x28282828},
- {0x0000a090, 0x28282828},
- {0x0000a094, 0x24242428},
- {0x0000a098, 0x171e1e1e},
- {0x0000a09c, 0x02020b0b},
- {0x0000a0a0, 0x02020202},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x22072208},
- {0x0000a0c4, 0x22052206},
- {0x0000a0c8, 0x22032204},
- {0x0000a0cc, 0x22012202},
- {0x0000a0d0, 0x221f2200},
- {0x0000a0d4, 0x221d221e},
- {0x0000a0d8, 0x33023303},
- {0x0000a0dc, 0x33003301},
- {0x0000a0e0, 0x331e331f},
- {0x0000a0e4, 0x4402331d},
- {0x0000a0e8, 0x44004401},
- {0x0000a0ec, 0x441e441f},
- {0x0000a0f0, 0x55025503},
- {0x0000a0f4, 0x55005501},
- {0x0000a0f8, 0x551e551f},
- {0x0000a0fc, 0x6602551d},
- {0x0000a100, 0x66006601},
- {0x0000a104, 0x661e661f},
- {0x0000a108, 0x7703661d},
- {0x0000a10c, 0x77017702},
- {0x0000a110, 0x00007700},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x111f1100},
- {0x0000a148, 0x111d111e},
- {0x0000a14c, 0x111b111c},
- {0x0000a150, 0x22032204},
- {0x0000a154, 0x22012202},
- {0x0000a158, 0x221f2200},
- {0x0000a15c, 0x221d221e},
- {0x0000a160, 0x33013302},
- {0x0000a164, 0x331f3300},
- {0x0000a168, 0x4402331e},
- {0x0000a16c, 0x44004401},
- {0x0000a170, 0x441e441f},
- {0x0000a174, 0x55015502},
- {0x0000a178, 0x551f5500},
- {0x0000a17c, 0x6602551e},
- {0x0000a180, 0x66006601},
- {0x0000a184, 0x661e661f},
- {0x0000a188, 0x7703661d},
- {0x0000a18c, 0x77017702},
- {0x0000a190, 0x00007700},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000296},
+static const u32 ar9485_1_1_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00004014, 0xba280400},
+ {0x00004090, 0x00aa10aa},
+ {0x000040a4, 0x00a0c9c9},
+ {0x00007010, 0x00000022},
+ {0x00007020, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000002},
+};
+
+static const u32 ar9485_fast_clock_1_1_baseband_postamble[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00009e00, 0x03721821, 0x03721821},
+ {0x0000a230, 0x0000400b, 0x00004016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+static const u32 ar9485_1_1_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
+ {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
+ {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
+ {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
+ {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
+ {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
+ {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
+ {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
+ {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+};
+
+static const u32 ar9485_1_1_pcie_phy_clkreq_disable_L1[][2] = {
+ /* Addr allmodes */
+ {0x00018c00, 0x18013e5e},
+ {0x00018c04, 0x000801d8},
+ {0x00018c08, 0x0000080c},
+};
+
+static const u32 ar9485_1_1_radio_postamble[][2] = {
+ /* Addr allmodes */
+ {0x0001609c, 0x0b283f31},
+ {0x000160ac, 0x24611800},
+ {0x000160b0, 0x03284f3e},
+ {0x0001610c, 0x00170000},
+ {0x00016140, 0x50804008},
+};
+
+static const u32 ar9485_1_1_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x00008170, 0x18486200},
+ {0x00008174, 0x33332210},
+ {0x00008178, 0x00000000},
+ {0x0000817c, 0x00020000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081d4, 0x00000000},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9ca00010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00000140},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x00000007},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0x00ff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xa248105b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x000301ff},
};
-#endif
+#endif /* INITVALS_9485_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
new file mode 100644
index 000000000000..df97f21c52dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -0,0 +1,1284 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef INITVALS_955X_1P0_H
+#define INITVALS_955X_1P0_H
+
+/* AR955X 1.0 */
+
+static const u32 ar955x_1p0_radio_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00016098, 0xd2dd5554, 0xd2dd5554, 0xd28b3330, 0xd28b3330},
+ {0x0001609c, 0x0a566f3a, 0x0a566f3a, 0x06345f2a, 0x06345f2a},
+ {0x000160ac, 0xa4647c00, 0xa4647c00, 0xa4646800, 0xa4646800},
+ {0x000160b0, 0x01885f52, 0x01885f52, 0x04accf3a, 0x04accf3a},
+ {0x00016104, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001610c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016140, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016504, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001650c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016540, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+ {0x00016904, 0xb7a00001, 0xb7a00001, 0xb7a00001, 0xb7a00001},
+ {0x0001690c, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000},
+ {0x00016940, 0x10804008, 0x10804008, 0x10804008, 0x10804008},
+};
+
+static const u32 ar955x_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
+ /* Addr allmodes */
+ {0x0000a398, 0x00000000},
+ {0x0000a39c, 0x6f7f0301},
+ {0x0000a3a0, 0xca9228ee},
+};
+
+static const u32 ar955x_1p0_baseband_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
+ {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
+ {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
+ {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
+ {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
+ {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
+ {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
+ {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
+ {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
+ {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822},
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+ {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
+ {0x0000a204, 0x005c0ec0, 0x005c0ec4, 0x005c0ec4, 0x005c0ec0},
+ {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
+ {0x0000a22c, 0x07e26a2f, 0x07e26a2f, 0x01026a2f, 0x01026a2f},
+ {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
+ {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
+ {0x0000a238, 0xffb01018, 0xffb01018, 0xffb01018, 0xffb01018},
+ {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
+ {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
+ {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
+ {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
+ {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
+ {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
+ {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
+ {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
+ {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
+ {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
+ {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
+ {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000b284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+ {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be04, 0x001c0000, 0x001c0000, 0x001c0000, 0x001c0000},
+ {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
+ {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000c284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
+};
+
+static const u32 ar955x_1p0_radio_core[][2] = {
+ /* Addr allmodes */
+ {0x00016000, 0x36db6db6},
+ {0x00016004, 0x6db6db40},
+ {0x00016008, 0x73f00000},
+ {0x0001600c, 0x00000000},
+ {0x00016040, 0x7f80fff8},
+ {0x0001604c, 0x76d005b5},
+ {0x00016050, 0x557cf031},
+ {0x00016054, 0x13449440},
+ {0x00016058, 0x0c51c92c},
+ {0x0001605c, 0x3db7fffc},
+ {0x00016060, 0xfffffffc},
+ {0x00016064, 0x000f0278},
+ {0x00016068, 0x6db6db6c},
+ {0x0001606c, 0x6db60000},
+ {0x00016080, 0x00080000},
+ {0x00016084, 0x0e48048c},
+ {0x00016088, 0x14214514},
+ {0x0001608c, 0x119f101e},
+ {0x00016090, 0x24926490},
+ {0x00016094, 0x00000000},
+ {0x000160a0, 0x0a108ffe},
+ {0x000160a4, 0x812fc370},
+ {0x000160a8, 0x423c8000},
+ {0x000160b4, 0x92480080},
+ {0x000160c0, 0x006db6d0},
+ {0x000160c4, 0x6db6db60},
+ {0x000160c8, 0x6db6db6c},
+ {0x000160cc, 0x01e6c000},
+ {0x00016100, 0x11999601},
+ {0x00016108, 0x00080010},
+ {0x00016144, 0x02084080},
+ {0x00016148, 0x000080c0},
+ {0x00016280, 0x01800804},
+ {0x00016284, 0x00038dc5},
+ {0x00016288, 0x00000000},
+ {0x0001628c, 0x00000040},
+ {0x00016380, 0x00000000},
+ {0x00016384, 0x00000000},
+ {0x00016388, 0x00400705},
+ {0x0001638c, 0x00800700},
+ {0x00016390, 0x00800700},
+ {0x00016394, 0x00000000},
+ {0x00016398, 0x00000000},
+ {0x0001639c, 0x00000000},
+ {0x000163a0, 0x00000001},
+ {0x000163a4, 0x00000001},
+ {0x000163a8, 0x00000000},
+ {0x000163ac, 0x00000000},
+ {0x000163b0, 0x00000000},
+ {0x000163b4, 0x00000000},
+ {0x000163b8, 0x00000000},
+ {0x000163bc, 0x00000000},
+ {0x000163c0, 0x000000a0},
+ {0x000163c4, 0x000c0000},
+ {0x000163c8, 0x14021402},
+ {0x000163cc, 0x00001402},
+ {0x000163d0, 0x00000000},
+ {0x000163d4, 0x00000000},
+ {0x00016400, 0x36db6db6},
+ {0x00016404, 0x6db6db40},
+ {0x00016408, 0x73f00000},
+ {0x0001640c, 0x00000000},
+ {0x00016440, 0x7f80fff8},
+ {0x0001644c, 0x76d005b5},
+ {0x00016450, 0x557cf031},
+ {0x00016454, 0x13449440},
+ {0x00016458, 0x0c51c92c},
+ {0x0001645c, 0x3db7fffc},
+ {0x00016460, 0xfffffffc},
+ {0x00016464, 0x000f0278},
+ {0x00016468, 0x6db6db6c},
+ {0x0001646c, 0x6db60000},
+ {0x00016500, 0x11999601},
+ {0x00016508, 0x00080010},
+ {0x00016544, 0x02084080},
+ {0x00016548, 0x000080c0},
+ {0x00016780, 0x00000000},
+ {0x00016784, 0x00000000},
+ {0x00016788, 0x00400705},
+ {0x0001678c, 0x00800700},
+ {0x00016790, 0x00800700},
+ {0x00016794, 0x00000000},
+ {0x00016798, 0x00000000},
+ {0x0001679c, 0x00000000},
+ {0x000167a0, 0x00000001},
+ {0x000167a4, 0x00000001},
+ {0x000167a8, 0x00000000},
+ {0x000167ac, 0x00000000},
+ {0x000167b0, 0x00000000},
+ {0x000167b4, 0x00000000},
+ {0x000167b8, 0x00000000},
+ {0x000167bc, 0x00000000},
+ {0x000167c0, 0x000000a0},
+ {0x000167c4, 0x000c0000},
+ {0x000167c8, 0x14021402},
+ {0x000167cc, 0x00001402},
+ {0x000167d0, 0x00000000},
+ {0x000167d4, 0x00000000},
+ {0x00016800, 0x36db6db6},
+ {0x00016804, 0x6db6db40},
+ {0x00016808, 0x73f00000},
+ {0x0001680c, 0x00000000},
+ {0x00016840, 0x7f80fff8},
+ {0x0001684c, 0x76d005b5},
+ {0x00016850, 0x557cf031},
+ {0x00016854, 0x13449440},
+ {0x00016858, 0x0c51c92c},
+ {0x0001685c, 0x3db7fffc},
+ {0x00016860, 0xfffffffc},
+ {0x00016864, 0x000f0278},
+ {0x00016868, 0x6db6db6c},
+ {0x0001686c, 0x6db60000},
+ {0x00016900, 0x11999601},
+ {0x00016908, 0x00080010},
+ {0x00016944, 0x02084080},
+ {0x00016948, 0x000080c0},
+ {0x00016b80, 0x00000000},
+ {0x00016b84, 0x00000000},
+ {0x00016b88, 0x00400705},
+ {0x00016b8c, 0x00800700},
+ {0x00016b90, 0x00800700},
+ {0x00016b94, 0x00000000},
+ {0x00016b98, 0x00000000},
+ {0x00016b9c, 0x00000000},
+ {0x00016ba0, 0x00000001},
+ {0x00016ba4, 0x00000001},
+ {0x00016ba8, 0x00000000},
+ {0x00016bac, 0x00000000},
+ {0x00016bb0, 0x00000000},
+ {0x00016bb4, 0x00000000},
+ {0x00016bb8, 0x00000000},
+ {0x00016bbc, 0x00000000},
+ {0x00016bc0, 0x000000a0},
+ {0x00016bc4, 0x000c0000},
+ {0x00016bc8, 0x14021402},
+ {0x00016bcc, 0x00001402},
+ {0x00016bd0, 0x00000000},
+ {0x00016bd4, 0x00000000},
+};
+
+static const u32 ar955x_1p0_modes_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000a2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000a410, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050de, 0x000050da, 0x000050da},
+ {0x0000a500, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000005, 0x04000002, 0x04000002},
+ {0x0000a508, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000009, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c00000b, 0x0c000006, 0x0c000006},
+ {0x0000a510, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x1000000d, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x14000011, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x19004008, 0x19004008, 0x19004008, 0x19004008, 0x18004008, 0x18004008, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1d00400a, 0x1c00400a, 0x1c00400a, 0x1b000064, 0x1b000064},
+ {0x0000a520, 0x230020a2, 0x230020a2, 0x210020a2, 0x210020a2, 0x200020a2, 0x200020a2, 0x1f000242, 0x1f000242},
+ {0x0000a524, 0x2500006e, 0x2500006e, 0x2500006e, 0x2500006e, 0x2400006e, 0x2400006e, 0x23000229, 0x23000229},
+ {0x0000a528, 0x29022221, 0x29022221, 0x28022221, 0x28022221, 0x27022221, 0x27022221, 0x270002a2, 0x270002a2},
+ {0x0000a52c, 0x2d00062a, 0x2d00062a, 0x2c00062a, 0x2c00062a, 0x2a00062a, 0x2a00062a, 0x2c001203, 0x2c001203},
+ {0x0000a530, 0x340220a5, 0x340220a5, 0x320220a5, 0x320220a5, 0x2f0220a5, 0x2f0220a5, 0x30001803, 0x30001803},
+ {0x0000a534, 0x380022c5, 0x380022c5, 0x350022c5, 0x350022c5, 0x320022c5, 0x320022c5, 0x33000881, 0x33000881},
+ {0x0000a538, 0x3b002486, 0x3b002486, 0x39002486, 0x39002486, 0x36002486, 0x36002486, 0x38001809, 0x38001809},
+ {0x0000a53c, 0x3f00248a, 0x3f00248a, 0x3d00248a, 0x3d00248a, 0x3a00248a, 0x3a00248a, 0x3a000814, 0x3a000814},
+ {0x0000a540, 0x4202242c, 0x4202242c, 0x4102242c, 0x4102242c, 0x3f02242c, 0x3f02242c, 0x3f001a0c, 0x3f001a0c},
+ {0x0000a544, 0x490044c6, 0x490044c6, 0x460044c6, 0x460044c6, 0x420044c6, 0x420044c6, 0x43001a0e, 0x43001a0e},
+ {0x0000a548, 0x4d024485, 0x4d024485, 0x4a024485, 0x4a024485, 0x46024485, 0x46024485, 0x46001812, 0x46001812},
+ {0x0000a54c, 0x51044483, 0x51044483, 0x4e044483, 0x4e044483, 0x4a044483, 0x4a044483, 0x49001884, 0x49001884},
+ {0x0000a550, 0x5404a40c, 0x5404a40c, 0x5204a40c, 0x5204a40c, 0x4d04a40c, 0x4d04a40c, 0x4d001e84, 0x4d001e84},
+ {0x0000a554, 0x57024632, 0x57024632, 0x55024632, 0x55024632, 0x52024632, 0x52024632, 0x50001e69, 0x50001e69},
+ {0x0000a558, 0x5c00a634, 0x5c00a634, 0x5900a634, 0x5900a634, 0x5600a634, 0x5600a634, 0x550006f4, 0x550006f4},
+ {0x0000a55c, 0x5f026832, 0x5f026832, 0x5d026832, 0x5d026832, 0x5a026832, 0x5a026832, 0x59000ad3, 0x59000ad3},
+ {0x0000a560, 0x6602b012, 0x6602b012, 0x6202b012, 0x6202b012, 0x5d02b012, 0x5d02b012, 0x5e000ad5, 0x5e000ad5},
+ {0x0000a564, 0x6e02d0e1, 0x6e02d0e1, 0x6802d0e1, 0x6802d0e1, 0x6002d0e1, 0x6002d0e1, 0x61001ced, 0x61001ced},
+ {0x0000a568, 0x7202b4c4, 0x7202b4c4, 0x6c02b4c4, 0x6c02b4c4, 0x6502b4c4, 0x6502b4c4, 0x660018d4, 0x660018d4},
+ {0x0000a56c, 0x75007894, 0x75007894, 0x70007894, 0x70007894, 0x6b007894, 0x6b007894, 0x660018d4, 0x660018d4},
+ {0x0000a570, 0x7b025c74, 0x7b025c74, 0x75025c74, 0x75025c74, 0x70025c74, 0x70025c74, 0x660018d4, 0x660018d4},
+ {0x0000a574, 0x8300bcb5, 0x8300bcb5, 0x7a00bcb5, 0x7a00bcb5, 0x7600bcb5, 0x7600bcb5, 0x660018d4, 0x660018d4},
+ {0x0000a578, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a57c, 0x8a04dc74, 0x8a04dc74, 0x7f04dc74, 0x7f04dc74, 0x7c04dc74, 0x7c04dc74, 0x660018d4, 0x660018d4},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x03804000, 0x03804000},
+ {0x0000a610, 0x04c08c01, 0x04c08c01, 0x04808b01, 0x04808b01, 0x04808a01, 0x04808a01, 0x0300ca02, 0x0300ca02},
+ {0x0000a614, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00000e04, 0x00000e04},
+ {0x0000a618, 0x04010c01, 0x04010c01, 0x03c10b01, 0x03c10b01, 0x03810a01, 0x03810a01, 0x03014000, 0x03014000},
+ {0x0000a61c, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x00000000, 0x00000000},
+ {0x0000a620, 0x04010303, 0x04010303, 0x03c10303, 0x03c10303, 0x03810303, 0x03810303, 0x00000000, 0x00000000},
+ {0x0000a624, 0x03814e05, 0x03814e05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03414d05, 0x03014000, 0x03014000},
+ {0x0000a628, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x00c0c000, 0x03804c05, 0x03804c05},
+ {0x0000a62c, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x00c0c303, 0x0701de06, 0x0701de06},
+ {0x0000a630, 0x03418000, 0x03418000, 0x03018000, 0x03018000, 0x02c18000, 0x02c18000, 0x07819c07, 0x07819c07},
+ {0x0000a634, 0x03815004, 0x03815004, 0x03414f04, 0x03414f04, 0x03414e04, 0x03414e04, 0x0701dc07, 0x0701dc07},
+ {0x0000a638, 0x03005302, 0x03005302, 0x02c05202, 0x02c05202, 0x02805202, 0x02805202, 0x0701dc07, 0x0701dc07},
+ {0x0000a63c, 0x04c09302, 0x04c09302, 0x04809202, 0x04809202, 0x04809202, 0x04809202, 0x0701dc07, 0x0701dc07},
+ {0x0000b2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000b2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x0000c2dc, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xffffaaaa, 0xfffd5aaa, 0xfffd5aaa},
+ {0x0000c2e0, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xffffcccc, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xfffff0f0, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xffffff00, 0xfffcff00, 0xfffcff00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016048, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016280, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01801e84, 0x01808e84, 0x01808e84},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016448, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x010002d4, 0x010002d4},
+ {0x00016848, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x62482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_mac_core[][2] = {
+ /* Addr allmodes */
+ {0x00000008, 0x00000000},
+ {0x00000030, 0x00020085},
+ {0x00000034, 0x00000005},
+ {0x00000040, 0x00000000},
+ {0x00000044, 0x00000000},
+ {0x00000048, 0x00000008},
+ {0x0000004c, 0x00000010},
+ {0x00000050, 0x00000000},
+ {0x00001040, 0x002ffc0f},
+ {0x00001044, 0x002ffc0f},
+ {0x00001048, 0x002ffc0f},
+ {0x0000104c, 0x002ffc0f},
+ {0x00001050, 0x002ffc0f},
+ {0x00001054, 0x002ffc0f},
+ {0x00001058, 0x002ffc0f},
+ {0x0000105c, 0x002ffc0f},
+ {0x00001060, 0x002ffc0f},
+ {0x00001064, 0x002ffc0f},
+ {0x000010f0, 0x00000100},
+ {0x00001270, 0x00000000},
+ {0x000012b0, 0x00000000},
+ {0x000012f0, 0x00000000},
+ {0x0000143c, 0x00000000},
+ {0x0000147c, 0x00000000},
+ {0x00008000, 0x00000000},
+ {0x00008004, 0x00000000},
+ {0x00008008, 0x00000000},
+ {0x0000800c, 0x00000000},
+ {0x00008018, 0x00000000},
+ {0x00008020, 0x00000000},
+ {0x00008038, 0x00000000},
+ {0x0000803c, 0x00000000},
+ {0x00008040, 0x00000000},
+ {0x00008044, 0x00000000},
+ {0x00008048, 0x00000000},
+ {0x0000804c, 0xffffffff},
+ {0x00008054, 0x00000000},
+ {0x00008058, 0x00000000},
+ {0x0000805c, 0x000fc78f},
+ {0x00008060, 0x0000000f},
+ {0x00008064, 0x00000000},
+ {0x00008070, 0x00000310},
+ {0x00008074, 0x00000020},
+ {0x00008078, 0x00000000},
+ {0x0000809c, 0x0000000f},
+ {0x000080a0, 0x00000000},
+ {0x000080a4, 0x02ff0000},
+ {0x000080a8, 0x0e070605},
+ {0x000080ac, 0x0000000d},
+ {0x000080b0, 0x00000000},
+ {0x000080b4, 0x00000000},
+ {0x000080b8, 0x00000000},
+ {0x000080bc, 0x00000000},
+ {0x000080c0, 0x2a800000},
+ {0x000080c4, 0x06900168},
+ {0x000080c8, 0x13881c22},
+ {0x000080cc, 0x01f40000},
+ {0x000080d0, 0x00252500},
+ {0x000080d4, 0x00a00000},
+ {0x000080d8, 0x00400000},
+ {0x000080dc, 0x00000000},
+ {0x000080e0, 0xffffffff},
+ {0x000080e4, 0x0000ffff},
+ {0x000080e8, 0x3f3f3f3f},
+ {0x000080ec, 0x00000000},
+ {0x000080f0, 0x00000000},
+ {0x000080f4, 0x00000000},
+ {0x000080fc, 0x00020000},
+ {0x00008100, 0x00000000},
+ {0x00008108, 0x00000052},
+ {0x0000810c, 0x00000000},
+ {0x00008110, 0x00000000},
+ {0x00008114, 0x000007ff},
+ {0x00008118, 0x000000aa},
+ {0x0000811c, 0x00003210},
+ {0x00008124, 0x00000000},
+ {0x00008128, 0x00000000},
+ {0x0000812c, 0x00000000},
+ {0x00008130, 0x00000000},
+ {0x00008134, 0x00000000},
+ {0x00008138, 0x00000000},
+ {0x0000813c, 0x0000ffff},
+ {0x00008140, 0x000000fe},
+ {0x00008144, 0xffffffff},
+ {0x00008168, 0x00000000},
+ {0x0000816c, 0x00000000},
+ {0x000081c0, 0x00000000},
+ {0x000081c4, 0x33332210},
+ {0x000081ec, 0x00000000},
+ {0x000081f0, 0x00000000},
+ {0x000081f4, 0x00000000},
+ {0x000081f8, 0x00000000},
+ {0x000081fc, 0x00000000},
+ {0x00008240, 0x00100000},
+ {0x00008244, 0x0010f400},
+ {0x00008248, 0x00000800},
+ {0x0000824c, 0x0001e800},
+ {0x00008250, 0x00000000},
+ {0x00008254, 0x00000000},
+ {0x00008258, 0x00000000},
+ {0x0000825c, 0x40000000},
+ {0x00008260, 0x00080922},
+ {0x00008264, 0x9d400010},
+ {0x00008268, 0xffffffff},
+ {0x0000826c, 0x0000ffff},
+ {0x00008270, 0x00000000},
+ {0x00008274, 0x40000000},
+ {0x00008278, 0x003e4180},
+ {0x0000827c, 0x00000004},
+ {0x00008284, 0x0000002c},
+ {0x00008288, 0x0000002c},
+ {0x0000828c, 0x000000ff},
+ {0x00008294, 0x00000000},
+ {0x00008298, 0x00000000},
+ {0x0000829c, 0x00000000},
+ {0x00008300, 0x00001d40},
+ {0x00008314, 0x00000000},
+ {0x0000831c, 0x0000010d},
+ {0x00008328, 0x00000000},
+ {0x0000832c, 0x0000001f},
+ {0x00008330, 0x00000302},
+ {0x00008334, 0x00000700},
+ {0x00008338, 0xffff0000},
+ {0x0000833c, 0x02400000},
+ {0x00008340, 0x000107ff},
+ {0x00008344, 0xaa48107b},
+ {0x00008348, 0x008f0000},
+ {0x0000835c, 0x00000000},
+ {0x00008360, 0xffffffff},
+ {0x00008364, 0xffffffff},
+ {0x00008368, 0x00000000},
+ {0x00008370, 0x00000000},
+ {0x00008374, 0x000000ff},
+ {0x00008378, 0x00000000},
+ {0x0000837c, 0x00000000},
+ {0x00008380, 0xffffffff},
+ {0x00008384, 0xffffffff},
+ {0x00008390, 0xffffffff},
+ {0x00008394, 0xffffffff},
+ {0x00008398, 0x00000000},
+ {0x0000839c, 0x00000000},
+ {0x000083a0, 0x00000000},
+ {0x000083a4, 0x0000fa14},
+ {0x000083a8, 0x000f0c00},
+ {0x000083ac, 0x33332210},
+ {0x000083b0, 0x33332210},
+ {0x000083b4, 0x33332210},
+ {0x000083b8, 0x33332210},
+ {0x000083bc, 0x00000000},
+ {0x000083c0, 0x00000000},
+ {0x000083c4, 0x00000000},
+ {0x000083c8, 0x00000000},
+ {0x000083cc, 0x00000200},
+ {0x000083d0, 0x8c7901ff},
+};
+
+static const u32 ar955x_1p0_common_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x01910190},
+ {0x0000a030, 0x01930192},
+ {0x0000a034, 0x01950194},
+ {0x0000a038, 0x038a0196},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x22222229},
+ {0x0000a084, 0x1d1d1d1d},
+ {0x0000a088, 0x1d1d1d1d},
+ {0x0000a08c, 0x1d1d1d1d},
+ {0x0000a090, 0x171d1d1d},
+ {0x0000a094, 0x11111717},
+ {0x0000a098, 0x00030311},
+ {0x0000a09c, 0x00000000},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x23232323},
+ {0x0000b084, 0x21232323},
+ {0x0000b088, 0x19191c1e},
+ {0x0000b08c, 0x12141417},
+ {0x0000b090, 0x07070e0e},
+ {0x0000b094, 0x03030305},
+ {0x0000b098, 0x00000003},
+ {0x0000b09c, 0x00000000},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar955x_1p0_baseband_core[][2] = {
+ /* Addr allmodes */
+ {0x00009800, 0xafe68e30},
+ {0x00009804, 0xfd14e000},
+ {0x00009808, 0x9c0a9f6b},
+ {0x0000980c, 0x04900000},
+ {0x00009814, 0x0280c00a},
+ {0x00009818, 0x00000000},
+ {0x0000981c, 0x00020028},
+ {0x00009834, 0x6400a190},
+ {0x00009838, 0x0108ecff},
+ {0x0000983c, 0x14000600},
+ {0x00009880, 0x201fff00},
+ {0x00009884, 0x00001042},
+ {0x000098a4, 0x00200400},
+ {0x000098b0, 0x32840bbe},
+ {0x000098bc, 0x00000002},
+ {0x000098d0, 0x004b6a8e},
+ {0x000098d4, 0x00000820},
+ {0x000098dc, 0x00000000},
+ {0x000098f0, 0x00000000},
+ {0x000098f4, 0x00000000},
+ {0x00009c04, 0xff55ff55},
+ {0x00009c08, 0x0320ff55},
+ {0x00009c0c, 0x00000000},
+ {0x00009c10, 0x00000000},
+ {0x00009c14, 0x00046384},
+ {0x00009c18, 0x05b6b440},
+ {0x00009c1c, 0x00b6b440},
+ {0x00009d00, 0xc080a333},
+ {0x00009d04, 0x40206c10},
+ {0x00009d08, 0x009c4060},
+ {0x00009d0c, 0x9883800a},
+ {0x00009d10, 0x01834061},
+ {0x00009d14, 0x00c0040b},
+ {0x00009d18, 0x00000000},
+ {0x00009e08, 0x0038230c},
+ {0x00009e24, 0x990bb515},
+ {0x00009e28, 0x0c6f0000},
+ {0x00009e30, 0x06336f77},
+ {0x00009e34, 0x6af6532f},
+ {0x00009e38, 0x0cc80c00},
+ {0x00009e40, 0x0d261820},
+ {0x00009e4c, 0x00001004},
+ {0x00009e50, 0x00ff03f1},
+ {0x00009fc0, 0x813e4788},
+ {0x00009fc4, 0x0001efb5},
+ {0x00009fcc, 0x40000014},
+ {0x00009fd0, 0x01193b93},
+ {0x0000a20c, 0x00000000},
+ {0x0000a220, 0x00000000},
+ {0x0000a224, 0x00000000},
+ {0x0000a228, 0x10002310},
+ {0x0000a23c, 0x00000000},
+ {0x0000a244, 0x0c000000},
+ {0x0000a248, 0x00000140},
+ {0x0000a2a0, 0x00000007},
+ {0x0000a2c0, 0x00000007},
+ {0x0000a2c8, 0x00000000},
+ {0x0000a2d4, 0x00000000},
+ {0x0000a2ec, 0x00000000},
+ {0x0000a2f0, 0x00000000},
+ {0x0000a2f4, 0x00000000},
+ {0x0000a2f8, 0x00000000},
+ {0x0000a344, 0x00000000},
+ {0x0000a34c, 0x00000000},
+ {0x0000a350, 0x0000a000},
+ {0x0000a364, 0x00000000},
+ {0x0000a370, 0x00000000},
+ {0x0000a390, 0x00000001},
+ {0x0000a394, 0x00000444},
+ {0x0000a398, 0x1f020503},
+ {0x0000a39c, 0x29180c03},
+ {0x0000a3a0, 0x9a8b6844},
+ {0x0000a3a4, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa},
+ {0x0000a3ac, 0x3c466478},
+ {0x0000a3c0, 0x20202020},
+ {0x0000a3c4, 0x22222220},
+ {0x0000a3c8, 0x20200020},
+ {0x0000a3cc, 0x20202020},
+ {0x0000a3d0, 0x20202020},
+ {0x0000a3d4, 0x20202020},
+ {0x0000a3d8, 0x20202020},
+ {0x0000a3dc, 0x20202020},
+ {0x0000a3e0, 0x20202020},
+ {0x0000a3e4, 0x20202020},
+ {0x0000a3e8, 0x20202020},
+ {0x0000a3ec, 0x20202020},
+ {0x0000a3f0, 0x00000000},
+ {0x0000a3f4, 0x00000000},
+ {0x0000a3f8, 0x0c9bd380},
+ {0x0000a3fc, 0x000f0f01},
+ {0x0000a400, 0x8fa91f01},
+ {0x0000a404, 0x00000000},
+ {0x0000a408, 0x0e79e5c6},
+ {0x0000a40c, 0x00820820},
+ {0x0000a414, 0x1ce739ce},
+ {0x0000a418, 0x2d001dce},
+ {0x0000a41c, 0x1ce739ce},
+ {0x0000a420, 0x000001ce},
+ {0x0000a424, 0x1ce739ce},
+ {0x0000a428, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce},
+ {0x0000a434, 0x00000000},
+ {0x0000a438, 0x00001801},
+ {0x0000a43c, 0x00100000},
+ {0x0000a444, 0x00000000},
+ {0x0000a448, 0x05000080},
+ {0x0000a44c, 0x00000001},
+ {0x0000a450, 0x00010000},
+ {0x0000a458, 0x00000000},
+ {0x0000a644, 0x3fad9d74},
+ {0x0000a648, 0x0048060a},
+ {0x0000a64c, 0x00003c37},
+ {0x0000a670, 0x03020100},
+ {0x0000a674, 0x09080504},
+ {0x0000a678, 0x0d0c0b0a},
+ {0x0000a67c, 0x13121110},
+ {0x0000a680, 0x31301514},
+ {0x0000a684, 0x35343332},
+ {0x0000a688, 0x00000036},
+ {0x0000a690, 0x00000838},
+ {0x0000a7cc, 0x00000000},
+ {0x0000a7d0, 0x00000000},
+ {0x0000a7d4, 0x00000004},
+ {0x0000a7dc, 0x00000000},
+ {0x0000a8d0, 0x004b6a8e},
+ {0x0000a8d4, 0x00000820},
+ {0x0000a8dc, 0x00000000},
+ {0x0000a8f0, 0x00000000},
+ {0x0000a8f4, 0x00000000},
+ {0x0000b2d0, 0x00000080},
+ {0x0000b2d4, 0x00000000},
+ {0x0000b2ec, 0x00000000},
+ {0x0000b2f0, 0x00000000},
+ {0x0000b2f4, 0x00000000},
+ {0x0000b2f8, 0x00000000},
+ {0x0000b408, 0x0e79e5c0},
+ {0x0000b40c, 0x00820820},
+ {0x0000b420, 0x00000000},
+ {0x0000b8d0, 0x004b6a8e},
+ {0x0000b8d4, 0x00000820},
+ {0x0000b8dc, 0x00000000},
+ {0x0000b8f0, 0x00000000},
+ {0x0000b8f4, 0x00000000},
+ {0x0000c2d0, 0x00000080},
+ {0x0000c2d4, 0x00000000},
+ {0x0000c2ec, 0x00000000},
+ {0x0000c2f0, 0x00000000},
+ {0x0000c2f4, 0x00000000},
+ {0x0000c2f8, 0x00000000},
+ {0x0000c408, 0x0e79e5c0},
+ {0x0000c40c, 0x00820820},
+ {0x0000c420, 0x00000000},
+};
+
+static const u32 ar955x_1p0_common_wo_xlna_rx_gain_table[][2] = {
+ /* Addr allmodes */
+ {0x0000a000, 0x00010000},
+ {0x0000a004, 0x00030002},
+ {0x0000a008, 0x00050004},
+ {0x0000a00c, 0x00810080},
+ {0x0000a010, 0x00830082},
+ {0x0000a014, 0x01810180},
+ {0x0000a018, 0x01830182},
+ {0x0000a01c, 0x01850184},
+ {0x0000a020, 0x01890188},
+ {0x0000a024, 0x018b018a},
+ {0x0000a028, 0x018d018c},
+ {0x0000a02c, 0x03820190},
+ {0x0000a030, 0x03840383},
+ {0x0000a034, 0x03880385},
+ {0x0000a038, 0x038a0389},
+ {0x0000a03c, 0x038c038b},
+ {0x0000a040, 0x0390038d},
+ {0x0000a044, 0x03920391},
+ {0x0000a048, 0x03940393},
+ {0x0000a04c, 0x03960395},
+ {0x0000a050, 0x00000000},
+ {0x0000a054, 0x00000000},
+ {0x0000a058, 0x00000000},
+ {0x0000a05c, 0x00000000},
+ {0x0000a060, 0x00000000},
+ {0x0000a064, 0x00000000},
+ {0x0000a068, 0x00000000},
+ {0x0000a06c, 0x00000000},
+ {0x0000a070, 0x00000000},
+ {0x0000a074, 0x00000000},
+ {0x0000a078, 0x00000000},
+ {0x0000a07c, 0x00000000},
+ {0x0000a080, 0x29292929},
+ {0x0000a084, 0x29292929},
+ {0x0000a088, 0x29292929},
+ {0x0000a08c, 0x29292929},
+ {0x0000a090, 0x22292929},
+ {0x0000a094, 0x1d1d2222},
+ {0x0000a098, 0x0c111117},
+ {0x0000a09c, 0x00030303},
+ {0x0000a0a0, 0x00000000},
+ {0x0000a0a4, 0x00000000},
+ {0x0000a0a8, 0x00000000},
+ {0x0000a0ac, 0x00000000},
+ {0x0000a0b0, 0x00000000},
+ {0x0000a0b4, 0x00000000},
+ {0x0000a0b8, 0x00000000},
+ {0x0000a0bc, 0x00000000},
+ {0x0000a0c0, 0x001f0000},
+ {0x0000a0c4, 0x01000101},
+ {0x0000a0c8, 0x011e011f},
+ {0x0000a0cc, 0x011c011d},
+ {0x0000a0d0, 0x02030204},
+ {0x0000a0d4, 0x02010202},
+ {0x0000a0d8, 0x021f0200},
+ {0x0000a0dc, 0x0302021e},
+ {0x0000a0e0, 0x03000301},
+ {0x0000a0e4, 0x031e031f},
+ {0x0000a0e8, 0x0402031d},
+ {0x0000a0ec, 0x04000401},
+ {0x0000a0f0, 0x041e041f},
+ {0x0000a0f4, 0x0502041d},
+ {0x0000a0f8, 0x05000501},
+ {0x0000a0fc, 0x051e051f},
+ {0x0000a100, 0x06010602},
+ {0x0000a104, 0x061f0600},
+ {0x0000a108, 0x061d061e},
+ {0x0000a10c, 0x07020703},
+ {0x0000a110, 0x07000701},
+ {0x0000a114, 0x00000000},
+ {0x0000a118, 0x00000000},
+ {0x0000a11c, 0x00000000},
+ {0x0000a120, 0x00000000},
+ {0x0000a124, 0x00000000},
+ {0x0000a128, 0x00000000},
+ {0x0000a12c, 0x00000000},
+ {0x0000a130, 0x00000000},
+ {0x0000a134, 0x00000000},
+ {0x0000a138, 0x00000000},
+ {0x0000a13c, 0x00000000},
+ {0x0000a140, 0x001f0000},
+ {0x0000a144, 0x01000101},
+ {0x0000a148, 0x011e011f},
+ {0x0000a14c, 0x011c011d},
+ {0x0000a150, 0x02030204},
+ {0x0000a154, 0x02010202},
+ {0x0000a158, 0x021f0200},
+ {0x0000a15c, 0x0302021e},
+ {0x0000a160, 0x03000301},
+ {0x0000a164, 0x031e031f},
+ {0x0000a168, 0x0402031d},
+ {0x0000a16c, 0x04000401},
+ {0x0000a170, 0x041e041f},
+ {0x0000a174, 0x0502041d},
+ {0x0000a178, 0x05000501},
+ {0x0000a17c, 0x051e051f},
+ {0x0000a180, 0x06010602},
+ {0x0000a184, 0x061f0600},
+ {0x0000a188, 0x061d061e},
+ {0x0000a18c, 0x07020703},
+ {0x0000a190, 0x07000701},
+ {0x0000a194, 0x00000000},
+ {0x0000a198, 0x00000000},
+ {0x0000a19c, 0x00000000},
+ {0x0000a1a0, 0x00000000},
+ {0x0000a1a4, 0x00000000},
+ {0x0000a1a8, 0x00000000},
+ {0x0000a1ac, 0x00000000},
+ {0x0000a1b0, 0x00000000},
+ {0x0000a1b4, 0x00000000},
+ {0x0000a1b8, 0x00000000},
+ {0x0000a1bc, 0x00000000},
+ {0x0000a1c0, 0x00000000},
+ {0x0000a1c4, 0x00000000},
+ {0x0000a1c8, 0x00000000},
+ {0x0000a1cc, 0x00000000},
+ {0x0000a1d0, 0x00000000},
+ {0x0000a1d4, 0x00000000},
+ {0x0000a1d8, 0x00000000},
+ {0x0000a1dc, 0x00000000},
+ {0x0000a1e0, 0x00000000},
+ {0x0000a1e4, 0x00000000},
+ {0x0000a1e8, 0x00000000},
+ {0x0000a1ec, 0x00000000},
+ {0x0000a1f0, 0x00000396},
+ {0x0000a1f4, 0x00000396},
+ {0x0000a1f8, 0x00000396},
+ {0x0000a1fc, 0x00000196},
+ {0x0000b000, 0x00010000},
+ {0x0000b004, 0x00030002},
+ {0x0000b008, 0x00050004},
+ {0x0000b00c, 0x00810080},
+ {0x0000b010, 0x00830082},
+ {0x0000b014, 0x01810180},
+ {0x0000b018, 0x01830182},
+ {0x0000b01c, 0x01850184},
+ {0x0000b020, 0x02810280},
+ {0x0000b024, 0x02830282},
+ {0x0000b028, 0x02850284},
+ {0x0000b02c, 0x02890288},
+ {0x0000b030, 0x028b028a},
+ {0x0000b034, 0x0388028c},
+ {0x0000b038, 0x038a0389},
+ {0x0000b03c, 0x038c038b},
+ {0x0000b040, 0x0390038d},
+ {0x0000b044, 0x03920391},
+ {0x0000b048, 0x03940393},
+ {0x0000b04c, 0x03960395},
+ {0x0000b050, 0x00000000},
+ {0x0000b054, 0x00000000},
+ {0x0000b058, 0x00000000},
+ {0x0000b05c, 0x00000000},
+ {0x0000b060, 0x00000000},
+ {0x0000b064, 0x00000000},
+ {0x0000b068, 0x00000000},
+ {0x0000b06c, 0x00000000},
+ {0x0000b070, 0x00000000},
+ {0x0000b074, 0x00000000},
+ {0x0000b078, 0x00000000},
+ {0x0000b07c, 0x00000000},
+ {0x0000b080, 0x32323232},
+ {0x0000b084, 0x2f2f3232},
+ {0x0000b088, 0x23282a2d},
+ {0x0000b08c, 0x1c1e2123},
+ {0x0000b090, 0x14171919},
+ {0x0000b094, 0x0e0e1214},
+ {0x0000b098, 0x03050707},
+ {0x0000b09c, 0x00030303},
+ {0x0000b0a0, 0x00000000},
+ {0x0000b0a4, 0x00000000},
+ {0x0000b0a8, 0x00000000},
+ {0x0000b0ac, 0x00000000},
+ {0x0000b0b0, 0x00000000},
+ {0x0000b0b4, 0x00000000},
+ {0x0000b0b8, 0x00000000},
+ {0x0000b0bc, 0x00000000},
+ {0x0000b0c0, 0x003f0020},
+ {0x0000b0c4, 0x00400041},
+ {0x0000b0c8, 0x0140005f},
+ {0x0000b0cc, 0x0160015f},
+ {0x0000b0d0, 0x017e017f},
+ {0x0000b0d4, 0x02410242},
+ {0x0000b0d8, 0x025f0240},
+ {0x0000b0dc, 0x027f0260},
+ {0x0000b0e0, 0x0341027e},
+ {0x0000b0e4, 0x035f0340},
+ {0x0000b0e8, 0x037f0360},
+ {0x0000b0ec, 0x04400441},
+ {0x0000b0f0, 0x0460045f},
+ {0x0000b0f4, 0x0541047f},
+ {0x0000b0f8, 0x055f0540},
+ {0x0000b0fc, 0x057f0560},
+ {0x0000b100, 0x06400641},
+ {0x0000b104, 0x0660065f},
+ {0x0000b108, 0x067e067f},
+ {0x0000b10c, 0x07410742},
+ {0x0000b110, 0x075f0740},
+ {0x0000b114, 0x077f0760},
+ {0x0000b118, 0x07800781},
+ {0x0000b11c, 0x07a0079f},
+ {0x0000b120, 0x07c107bf},
+ {0x0000b124, 0x000007c0},
+ {0x0000b128, 0x00000000},
+ {0x0000b12c, 0x00000000},
+ {0x0000b130, 0x00000000},
+ {0x0000b134, 0x00000000},
+ {0x0000b138, 0x00000000},
+ {0x0000b13c, 0x00000000},
+ {0x0000b140, 0x003f0020},
+ {0x0000b144, 0x00400041},
+ {0x0000b148, 0x0140005f},
+ {0x0000b14c, 0x0160015f},
+ {0x0000b150, 0x017e017f},
+ {0x0000b154, 0x02410242},
+ {0x0000b158, 0x025f0240},
+ {0x0000b15c, 0x027f0260},
+ {0x0000b160, 0x0341027e},
+ {0x0000b164, 0x035f0340},
+ {0x0000b168, 0x037f0360},
+ {0x0000b16c, 0x04400441},
+ {0x0000b170, 0x0460045f},
+ {0x0000b174, 0x0541047f},
+ {0x0000b178, 0x055f0540},
+ {0x0000b17c, 0x057f0560},
+ {0x0000b180, 0x06400641},
+ {0x0000b184, 0x0660065f},
+ {0x0000b188, 0x067e067f},
+ {0x0000b18c, 0x07410742},
+ {0x0000b190, 0x075f0740},
+ {0x0000b194, 0x077f0760},
+ {0x0000b198, 0x07800781},
+ {0x0000b19c, 0x07a0079f},
+ {0x0000b1a0, 0x07c107bf},
+ {0x0000b1a4, 0x000007c0},
+ {0x0000b1a8, 0x00000000},
+ {0x0000b1ac, 0x00000000},
+ {0x0000b1b0, 0x00000000},
+ {0x0000b1b4, 0x00000000},
+ {0x0000b1b8, 0x00000000},
+ {0x0000b1bc, 0x00000000},
+ {0x0000b1c0, 0x00000000},
+ {0x0000b1c4, 0x00000000},
+ {0x0000b1c8, 0x00000000},
+ {0x0000b1cc, 0x00000000},
+ {0x0000b1d0, 0x00000000},
+ {0x0000b1d4, 0x00000000},
+ {0x0000b1d8, 0x00000000},
+ {0x0000b1dc, 0x00000000},
+ {0x0000b1e0, 0x00000000},
+ {0x0000b1e4, 0x00000000},
+ {0x0000b1e8, 0x00000000},
+ {0x0000b1ec, 0x00000000},
+ {0x0000b1f0, 0x00000396},
+ {0x0000b1f4, 0x00000396},
+ {0x0000b1f8, 0x00000396},
+ {0x0000b1fc, 0x00000196},
+};
+
+static const u32 ar955x_1p0_soc_preamble[][2] = {
+ /* Addr allmodes */
+ {0x00007000, 0x00000000},
+ {0x00007004, 0x00000000},
+ {0x00007008, 0x00000000},
+ {0x0000700c, 0x00000000},
+ {0x0000701c, 0x00000000},
+ {0x00007020, 0x00000000},
+ {0x00007024, 0x00000000},
+ {0x00007028, 0x00000000},
+ {0x0000702c, 0x00000000},
+ {0x00007030, 0x00000000},
+ {0x00007034, 0x00000002},
+ {0x00007038, 0x000004c2},
+ {0x00007048, 0x00000000},
+};
+
+static const u32 ar955x_1p0_common_wo_xlna_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
+};
+
+static const u32 ar955x_1p0_mac_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
+ {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
+ {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
+ {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
+ {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
+ {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
+ {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
+ {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
+};
+
+static const u32 ar955x_1p0_common_rx_gain_bounds[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e48, 0x5030201a, 0x5030201a, 0x50302018, 0x50302018},
+};
+
+static const u32 ar955x_1p0_modes_no_xpa_tx_gain_table[][9] = {
+ /* Addr 5G_HT20_L 5G_HT40_L 5G_HT20_M 5G_HT40_M 5G_HT20_H 5G_HT40_H 2G_HT40 2G_HT20 */
+ {0x0000a2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000a2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d8, 0x000050d7, 0x000050d7},
+ {0x0000a500, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
+ {0x0000a504, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
+ {0x0000a508, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
+ {0x0000a50c, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0d002621, 0x0b000006, 0x0b000006},
+ {0x0000a510, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x13004620, 0x0f00000a, 0x0f00000a},
+ {0x0000a514, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x19004a20, 0x1300000c, 0x1300000c},
+ {0x0000a518, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1d004e20, 0x1700000e, 0x1700000e},
+ {0x0000a51c, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x21005420, 0x1b000012, 0x1b000012},
+ {0x0000a520, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x26005e20, 0x1f00004a, 0x1f00004a},
+ {0x0000a524, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x2b005e40, 0x23000244, 0x23000244},
+ {0x0000a528, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2f005e42, 0x2700022b, 0x2700022b},
+ {0x0000a52c, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x33005e44, 0x2b000625, 0x2b000625},
+ {0x0000a530, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x38005e65, 0x2f001006, 0x2f001006},
+ {0x0000a534, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x3c005e69, 0x330008a0, 0x330008a0},
+ {0x0000a538, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x40005e6b, 0x37000a2a, 0x37000a2a},
+ {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x44005e6d, 0x3b001c23, 0x3b001c23},
+ {0x0000a540, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x49005e72, 0x3f0014a0, 0x3f0014a0},
+ {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x4e005eb2, 0x43001882, 0x43001882},
+ {0x0000a548, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x53005f12, 0x47001ca2, 0x47001ca2},
+ {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x59025eb2, 0x4b001ec3, 0x4b001ec3},
+ {0x0000a550, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x5e025f12, 0x4f00148c, 0x4f00148c},
+ {0x0000a554, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x61027f12, 0x53001c6e, 0x53001c6e},
+ {0x0000a558, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x6702bf12, 0x57001c92, 0x57001c92},
+ {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x6b02bf14, 0x5c001af6, 0x5c001af6},
+ {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x6f02bf16, 0x5c001af6, 0x5c001af6},
+ {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a610, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x00804000, 0x04005001, 0x04005001},
+ {0x0000a614, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x00804201, 0x03808e02, 0x03808e02},
+ {0x0000a618, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0280c802, 0x0300c000, 0x0300c000},
+ {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x0280ca03, 0x03808e02, 0x03808e02},
+ {0x0000a620, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x04c15104, 0x03410c03, 0x03410c03},
+ {0x0000a624, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04014c03, 0x04014c03},
+ {0x0000a628, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x05818d04, 0x05818d04},
+ {0x0000a62c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801cd04, 0x0801cd04},
+ {0x0000a630, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a634, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a638, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000a63c, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x04c15305, 0x0801e007, 0x0801e007},
+ {0x0000b2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000b2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x0000c2dc, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0x01feee00, 0xfffe5aaa, 0xfffe5aaa},
+ {0x0000c2e0, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0x0000f000, 0xfffe9ccc, 0xfffe9ccc},
+ {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0x01ff0000, 0xffffe0f0, 0xffffe0f0},
+ {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffef00, 0xffffef00},
+ {0x00016044, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016048, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016444, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016448, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+ {0x00016844, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x056db2d4, 0x054922d4, 0x054922d4},
+ {0x00016848, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401, 0x66482401},
+};
+
+static const u32 ar955x_1p0_soc_postamble[][5] = {
+ /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
+ {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
+};
+
+static const u32 ar955x_1p0_modes_fast_clock[][3] = {
+ /* Addr 5G_HT20 5G_HT40 */
+ {0x00001030, 0x00000268, 0x000004d0},
+ {0x00001070, 0x0000018c, 0x00000318},
+ {0x000010b0, 0x00000fd0, 0x00001fa0},
+ {0x00008014, 0x044c044c, 0x08980898},
+ {0x0000801c, 0x148ec02b, 0x148ec057},
+ {0x00008318, 0x000044c0, 0x00008980},
+ {0x00009e00, 0x0372131c, 0x0372131c},
+ {0x0000a230, 0x0000000b, 0x00000016},
+ {0x0000a254, 0x00000898, 0x00001130},
+};
+
+#endif /* INITVALS_955X_1P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 06b3f0df9fad..6e1915aee712 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2010 Atheros Communications Inc.
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +20,7 @@
/* AR9580 1.0 */
-static const u32 ar9580_1p0_modes_fast_clock[][3] = {
- /* Addr 5G_HT20 5G_HT40 */
- {0x00001030, 0x00000268, 0x000004d0},
- {0x00001070, 0x0000018c, 0x00000318},
- {0x000010b0, 0x00000fd0, 0x00001fa0},
- {0x00008014, 0x044c044c, 0x08980898},
- {0x0000801c, 0x148ec02b, 0x148ec057},
- {0x00008318, 0x000044c0, 0x00008980},
- {0x00009e00, 0x0372131c, 0x0372131c},
- {0x0000a230, 0x0000000b, 0x00000016},
- {0x0000a254, 0x00000898, 0x00001130},
-};
+#define ar9580_1p0_modes_fast_clock ar9300Modes_fast_clock_2p2
static const u32 ar9580_1p0_radio_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -208,17 +198,7 @@ static const u32 ar9580_1p0_baseband_core[][2] = {
{0x0000c420, 0x00000000},
};
-static const u32 ar9580_1p0_mac_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
- {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
- {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
- {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
- {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
- {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
- {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
- {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
-};
+#define ar9580_1p0_mac_postamble ar9300_2p2_mac_postamble
static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -326,111 +306,7 @@ static const u32 ar9580_1p0_low_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_high_power_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
- {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
- {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
- {0x0000a518, 0x21002220, 0x21002220, 0x16000402, 0x16000402},
- {0x0000a51c, 0x27002223, 0x27002223, 0x19000404, 0x19000404},
- {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
- {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
- {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
- {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
- {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
- {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
- {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
- {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
- {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
- {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
- {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
- {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83},
- {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84},
- {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3},
- {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5},
- {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9},
- {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb},
- {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec},
- {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
- {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
- {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
- {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
- {0x0000a598, 0x21802220, 0x21802220, 0x16800402, 0x16800402},
- {0x0000a59c, 0x27802223, 0x27802223, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
- {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
- {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
- {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
- {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
- {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
- {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
- {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
- {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
- {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
- {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
- {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83},
- {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84},
- {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3},
- {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5},
- {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9},
- {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb},
- {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000},
- {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501},
- {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501},
- {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005},
- {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016048, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016448, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
- {0x00016848, 0x66480001, 0x66480001, 0x66480001, 0x66480001},
- {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
-};
+#define ar9580_1p0_high_power_tx_gain_table ar9580_1p0_low_ob_db_tx_gain_table
static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
@@ -538,12 +414,7 @@ static const u32 ar9580_1p0_lowest_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_baseband_core_txfir_coeff_japan_2484[][2] = {
- /* Addr allmodes */
- {0x0000a398, 0x00000000},
- {0x0000a39c, 0x6f7f0301},
- {0x0000a3a0, 0xca9228ee},
-};
+#define ar9580_1p0_baseband_core_txfir_coeff_japan_2484 ar9462_2p0_baseband_core_txfir_coeff_japan_2484
static const u32 ar9580_1p0_mac_core[][2] = {
/* Addr allmodes */
@@ -808,376 +679,11 @@ static const u32 ar9580_1p0_mixed_ob_db_tx_gain_table[][5] = {
{0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
};
-static const u32 ar9580_1p0_wo_xlna_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x03820190},
- {0x0000a030, 0x03840383},
- {0x0000a034, 0x03880385},
- {0x0000a038, 0x038a0389},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x29292929},
- {0x0000a084, 0x29292929},
- {0x0000a088, 0x29292929},
- {0x0000a08c, 0x29292929},
- {0x0000a090, 0x22292929},
- {0x0000a094, 0x1d1d2222},
- {0x0000a098, 0x0c111117},
- {0x0000a09c, 0x00030303},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x32323232},
- {0x0000b084, 0x2f2f3232},
- {0x0000b088, 0x23282a2d},
- {0x0000b08c, 0x1c1e2123},
- {0x0000b090, 0x14171919},
- {0x0000b094, 0x0e0e1214},
- {0x0000b098, 0x03050707},
- {0x0000b09c, 0x00030303},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9580_1p0_wo_xlna_rx_gain_table ar9300Common_wo_xlna_rx_gain_table_2p2
-static const u32 ar9580_1p0_soc_postamble[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
-};
+#define ar9580_1p0_soc_postamble ar9300_2p2_soc_postamble
-static const u32 ar9580_1p0_high_ob_db_tx_gain_table[][5] = {
- /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x0000a2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000a410, 0x000050d8, 0x000050d8, 0x000050d9, 0x000050d9},
- {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
- {0x0000a504, 0x04002222, 0x04002222, 0x04000002, 0x04000002},
- {0x0000a508, 0x09002421, 0x09002421, 0x08000004, 0x08000004},
- {0x0000a50c, 0x0d002621, 0x0d002621, 0x0b000200, 0x0b000200},
- {0x0000a510, 0x13004620, 0x13004620, 0x0f000202, 0x0f000202},
- {0x0000a514, 0x19004a20, 0x19004a20, 0x11000400, 0x11000400},
- {0x0000a518, 0x1d004e20, 0x1d004e20, 0x15000402, 0x15000402},
- {0x0000a51c, 0x21005420, 0x21005420, 0x19000404, 0x19000404},
- {0x0000a520, 0x26005e20, 0x26005e20, 0x1b000603, 0x1b000603},
- {0x0000a524, 0x2b005e40, 0x2b005e40, 0x1f000a02, 0x1f000a02},
- {0x0000a528, 0x2f005e42, 0x2f005e42, 0x23000a04, 0x23000a04},
- {0x0000a52c, 0x33005e44, 0x33005e44, 0x26000a20, 0x26000a20},
- {0x0000a530, 0x38005e65, 0x38005e65, 0x2a000e20, 0x2a000e20},
- {0x0000a534, 0x3c005e69, 0x3c005e69, 0x2e000e22, 0x2e000e22},
- {0x0000a538, 0x40005e6b, 0x40005e6b, 0x31000e24, 0x31000e24},
- {0x0000a53c, 0x44005e6d, 0x44005e6d, 0x34001640, 0x34001640},
- {0x0000a540, 0x49005e72, 0x49005e72, 0x38001660, 0x38001660},
- {0x0000a544, 0x4e005eb2, 0x4e005eb2, 0x3b001861, 0x3b001861},
- {0x0000a548, 0x53005f12, 0x53005f12, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5e025f12, 0x5e025f12, 0x44001c84, 0x44001c84},
- {0x0000a554, 0x61027f12, 0x61027f12, 0x48001ce3, 0x48001ce3},
- {0x0000a558, 0x6702bf12, 0x6702bf12, 0x4c001ce5, 0x4c001ce5},
- {0x0000a55c, 0x6b02bf14, 0x6b02bf14, 0x50001ce9, 0x50001ce9},
- {0x0000a560, 0x6f02bf16, 0x6f02bf16, 0x54001ceb, 0x54001ceb},
- {0x0000a564, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a568, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a56c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a570, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a574, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a578, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a57c, 0x6f02bf16, 0x6f02bf16, 0x56001eec, 0x56001eec},
- {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
- {0x0000a584, 0x04802222, 0x04802222, 0x04800002, 0x04800002},
- {0x0000a588, 0x09802421, 0x09802421, 0x08800004, 0x08800004},
- {0x0000a58c, 0x0d802621, 0x0d802621, 0x0b800200, 0x0b800200},
- {0x0000a590, 0x13804620, 0x13804620, 0x0f800202, 0x0f800202},
- {0x0000a594, 0x19804a20, 0x19804a20, 0x11800400, 0x11800400},
- {0x0000a598, 0x1d804e20, 0x1d804e20, 0x15800402, 0x15800402},
- {0x0000a59c, 0x21805420, 0x21805420, 0x19800404, 0x19800404},
- {0x0000a5a0, 0x26805e20, 0x26805e20, 0x1b800603, 0x1b800603},
- {0x0000a5a4, 0x2b805e40, 0x2b805e40, 0x1f800a02, 0x1f800a02},
- {0x0000a5a8, 0x2f805e42, 0x2f805e42, 0x23800a04, 0x23800a04},
- {0x0000a5ac, 0x33805e44, 0x33805e44, 0x26800a20, 0x26800a20},
- {0x0000a5b0, 0x38805e65, 0x38805e65, 0x2a800e20, 0x2a800e20},
- {0x0000a5b4, 0x3c805e69, 0x3c805e69, 0x2e800e22, 0x2e800e22},
- {0x0000a5b8, 0x40805e6b, 0x40805e6b, 0x31800e24, 0x31800e24},
- {0x0000a5bc, 0x44805e6d, 0x44805e6d, 0x34801640, 0x34801640},
- {0x0000a5c0, 0x49805e72, 0x49805e72, 0x38801660, 0x38801660},
- {0x0000a5c4, 0x4e805eb2, 0x4e805eb2, 0x3b801861, 0x3b801861},
- {0x0000a5c8, 0x53805f12, 0x53805f12, 0x3e801a81, 0x3e801a81},
- {0x0000a5cc, 0x59825eb2, 0x59825eb2, 0x42801a83, 0x42801a83},
- {0x0000a5d0, 0x5e825f12, 0x5e825f12, 0x44801c84, 0x44801c84},
- {0x0000a5d4, 0x61827f12, 0x61827f12, 0x48801ce3, 0x48801ce3},
- {0x0000a5d8, 0x6782bf12, 0x6782bf12, 0x4c801ce5, 0x4c801ce5},
- {0x0000a5dc, 0x6b82bf14, 0x6b82bf14, 0x50801ce9, 0x50801ce9},
- {0x0000a5e0, 0x6f82bf16, 0x6f82bf16, 0x54801ceb, 0x54801ceb},
- {0x0000a5e4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5e8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5ec, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f0, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f4, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5f8, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a5fc, 0x6f82bf16, 0x6f82bf16, 0x56801eec, 0x56801eec},
- {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a610, 0x00804000, 0x00804000, 0x00000000, 0x00000000},
- {0x0000a614, 0x00804201, 0x00804201, 0x01404000, 0x01404000},
- {0x0000a618, 0x0280c802, 0x0280c802, 0x01404501, 0x01404501},
- {0x0000a61c, 0x0280ca03, 0x0280ca03, 0x02008501, 0x02008501},
- {0x0000a620, 0x04c15104, 0x04c15104, 0x0280ca03, 0x0280ca03},
- {0x0000a624, 0x04c15305, 0x04c15305, 0x03010c04, 0x03010c04},
- {0x0000a628, 0x04c15305, 0x04c15305, 0x04014c04, 0x04014c04},
- {0x0000a62c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a630, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a634, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a638, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000a63c, 0x04c15305, 0x04c15305, 0x04015005, 0x04015005},
- {0x0000b2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000b2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000b2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000b2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x0000c2dc, 0x01feee00, 0x01feee00, 0x03aaa352, 0x03aaa352},
- {0x0000c2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
- {0x0000c2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
- {0x0000c2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
- {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
- {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
- {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
- {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
-};
+#define ar9580_1p0_high_ob_db_tx_gain_table ar9300Modes_high_ob_db_tx_gain_table_2p2
static const u32 ar9580_1p0_soc_preamble[][2] = {
/* Addr allmodes */
@@ -1189,265 +695,7 @@ static const u32 ar9580_1p0_soc_preamble[][2] = {
{0x00007048, 0x00000008},
};
-static const u32 ar9580_1p0_rx_gain_table[][2] = {
- /* Addr allmodes */
- {0x0000a000, 0x00010000},
- {0x0000a004, 0x00030002},
- {0x0000a008, 0x00050004},
- {0x0000a00c, 0x00810080},
- {0x0000a010, 0x00830082},
- {0x0000a014, 0x01810180},
- {0x0000a018, 0x01830182},
- {0x0000a01c, 0x01850184},
- {0x0000a020, 0x01890188},
- {0x0000a024, 0x018b018a},
- {0x0000a028, 0x018d018c},
- {0x0000a02c, 0x01910190},
- {0x0000a030, 0x01930192},
- {0x0000a034, 0x01950194},
- {0x0000a038, 0x038a0196},
- {0x0000a03c, 0x038c038b},
- {0x0000a040, 0x0390038d},
- {0x0000a044, 0x03920391},
- {0x0000a048, 0x03940393},
- {0x0000a04c, 0x03960395},
- {0x0000a050, 0x00000000},
- {0x0000a054, 0x00000000},
- {0x0000a058, 0x00000000},
- {0x0000a05c, 0x00000000},
- {0x0000a060, 0x00000000},
- {0x0000a064, 0x00000000},
- {0x0000a068, 0x00000000},
- {0x0000a06c, 0x00000000},
- {0x0000a070, 0x00000000},
- {0x0000a074, 0x00000000},
- {0x0000a078, 0x00000000},
- {0x0000a07c, 0x00000000},
- {0x0000a080, 0x22222229},
- {0x0000a084, 0x1d1d1d1d},
- {0x0000a088, 0x1d1d1d1d},
- {0x0000a08c, 0x1d1d1d1d},
- {0x0000a090, 0x171d1d1d},
- {0x0000a094, 0x11111717},
- {0x0000a098, 0x00030311},
- {0x0000a09c, 0x00000000},
- {0x0000a0a0, 0x00000000},
- {0x0000a0a4, 0x00000000},
- {0x0000a0a8, 0x00000000},
- {0x0000a0ac, 0x00000000},
- {0x0000a0b0, 0x00000000},
- {0x0000a0b4, 0x00000000},
- {0x0000a0b8, 0x00000000},
- {0x0000a0bc, 0x00000000},
- {0x0000a0c0, 0x001f0000},
- {0x0000a0c4, 0x01000101},
- {0x0000a0c8, 0x011e011f},
- {0x0000a0cc, 0x011c011d},
- {0x0000a0d0, 0x02030204},
- {0x0000a0d4, 0x02010202},
- {0x0000a0d8, 0x021f0200},
- {0x0000a0dc, 0x0302021e},
- {0x0000a0e0, 0x03000301},
- {0x0000a0e4, 0x031e031f},
- {0x0000a0e8, 0x0402031d},
- {0x0000a0ec, 0x04000401},
- {0x0000a0f0, 0x041e041f},
- {0x0000a0f4, 0x0502041d},
- {0x0000a0f8, 0x05000501},
- {0x0000a0fc, 0x051e051f},
- {0x0000a100, 0x06010602},
- {0x0000a104, 0x061f0600},
- {0x0000a108, 0x061d061e},
- {0x0000a10c, 0x07020703},
- {0x0000a110, 0x07000701},
- {0x0000a114, 0x00000000},
- {0x0000a118, 0x00000000},
- {0x0000a11c, 0x00000000},
- {0x0000a120, 0x00000000},
- {0x0000a124, 0x00000000},
- {0x0000a128, 0x00000000},
- {0x0000a12c, 0x00000000},
- {0x0000a130, 0x00000000},
- {0x0000a134, 0x00000000},
- {0x0000a138, 0x00000000},
- {0x0000a13c, 0x00000000},
- {0x0000a140, 0x001f0000},
- {0x0000a144, 0x01000101},
- {0x0000a148, 0x011e011f},
- {0x0000a14c, 0x011c011d},
- {0x0000a150, 0x02030204},
- {0x0000a154, 0x02010202},
- {0x0000a158, 0x021f0200},
- {0x0000a15c, 0x0302021e},
- {0x0000a160, 0x03000301},
- {0x0000a164, 0x031e031f},
- {0x0000a168, 0x0402031d},
- {0x0000a16c, 0x04000401},
- {0x0000a170, 0x041e041f},
- {0x0000a174, 0x0502041d},
- {0x0000a178, 0x05000501},
- {0x0000a17c, 0x051e051f},
- {0x0000a180, 0x06010602},
- {0x0000a184, 0x061f0600},
- {0x0000a188, 0x061d061e},
- {0x0000a18c, 0x07020703},
- {0x0000a190, 0x07000701},
- {0x0000a194, 0x00000000},
- {0x0000a198, 0x00000000},
- {0x0000a19c, 0x00000000},
- {0x0000a1a0, 0x00000000},
- {0x0000a1a4, 0x00000000},
- {0x0000a1a8, 0x00000000},
- {0x0000a1ac, 0x00000000},
- {0x0000a1b0, 0x00000000},
- {0x0000a1b4, 0x00000000},
- {0x0000a1b8, 0x00000000},
- {0x0000a1bc, 0x00000000},
- {0x0000a1c0, 0x00000000},
- {0x0000a1c4, 0x00000000},
- {0x0000a1c8, 0x00000000},
- {0x0000a1cc, 0x00000000},
- {0x0000a1d0, 0x00000000},
- {0x0000a1d4, 0x00000000},
- {0x0000a1d8, 0x00000000},
- {0x0000a1dc, 0x00000000},
- {0x0000a1e0, 0x00000000},
- {0x0000a1e4, 0x00000000},
- {0x0000a1e8, 0x00000000},
- {0x0000a1ec, 0x00000000},
- {0x0000a1f0, 0x00000396},
- {0x0000a1f4, 0x00000396},
- {0x0000a1f8, 0x00000396},
- {0x0000a1fc, 0x00000196},
- {0x0000b000, 0x00010000},
- {0x0000b004, 0x00030002},
- {0x0000b008, 0x00050004},
- {0x0000b00c, 0x00810080},
- {0x0000b010, 0x00830082},
- {0x0000b014, 0x01810180},
- {0x0000b018, 0x01830182},
- {0x0000b01c, 0x01850184},
- {0x0000b020, 0x02810280},
- {0x0000b024, 0x02830282},
- {0x0000b028, 0x02850284},
- {0x0000b02c, 0x02890288},
- {0x0000b030, 0x028b028a},
- {0x0000b034, 0x0388028c},
- {0x0000b038, 0x038a0389},
- {0x0000b03c, 0x038c038b},
- {0x0000b040, 0x0390038d},
- {0x0000b044, 0x03920391},
- {0x0000b048, 0x03940393},
- {0x0000b04c, 0x03960395},
- {0x0000b050, 0x00000000},
- {0x0000b054, 0x00000000},
- {0x0000b058, 0x00000000},
- {0x0000b05c, 0x00000000},
- {0x0000b060, 0x00000000},
- {0x0000b064, 0x00000000},
- {0x0000b068, 0x00000000},
- {0x0000b06c, 0x00000000},
- {0x0000b070, 0x00000000},
- {0x0000b074, 0x00000000},
- {0x0000b078, 0x00000000},
- {0x0000b07c, 0x00000000},
- {0x0000b080, 0x2a2d2f32},
- {0x0000b084, 0x21232328},
- {0x0000b088, 0x19191c1e},
- {0x0000b08c, 0x12141417},
- {0x0000b090, 0x07070e0e},
- {0x0000b094, 0x03030305},
- {0x0000b098, 0x00000003},
- {0x0000b09c, 0x00000000},
- {0x0000b0a0, 0x00000000},
- {0x0000b0a4, 0x00000000},
- {0x0000b0a8, 0x00000000},
- {0x0000b0ac, 0x00000000},
- {0x0000b0b0, 0x00000000},
- {0x0000b0b4, 0x00000000},
- {0x0000b0b8, 0x00000000},
- {0x0000b0bc, 0x00000000},
- {0x0000b0c0, 0x003f0020},
- {0x0000b0c4, 0x00400041},
- {0x0000b0c8, 0x0140005f},
- {0x0000b0cc, 0x0160015f},
- {0x0000b0d0, 0x017e017f},
- {0x0000b0d4, 0x02410242},
- {0x0000b0d8, 0x025f0240},
- {0x0000b0dc, 0x027f0260},
- {0x0000b0e0, 0x0341027e},
- {0x0000b0e4, 0x035f0340},
- {0x0000b0e8, 0x037f0360},
- {0x0000b0ec, 0x04400441},
- {0x0000b0f0, 0x0460045f},
- {0x0000b0f4, 0x0541047f},
- {0x0000b0f8, 0x055f0540},
- {0x0000b0fc, 0x057f0560},
- {0x0000b100, 0x06400641},
- {0x0000b104, 0x0660065f},
- {0x0000b108, 0x067e067f},
- {0x0000b10c, 0x07410742},
- {0x0000b110, 0x075f0740},
- {0x0000b114, 0x077f0760},
- {0x0000b118, 0x07800781},
- {0x0000b11c, 0x07a0079f},
- {0x0000b120, 0x07c107bf},
- {0x0000b124, 0x000007c0},
- {0x0000b128, 0x00000000},
- {0x0000b12c, 0x00000000},
- {0x0000b130, 0x00000000},
- {0x0000b134, 0x00000000},
- {0x0000b138, 0x00000000},
- {0x0000b13c, 0x00000000},
- {0x0000b140, 0x003f0020},
- {0x0000b144, 0x00400041},
- {0x0000b148, 0x0140005f},
- {0x0000b14c, 0x0160015f},
- {0x0000b150, 0x017e017f},
- {0x0000b154, 0x02410242},
- {0x0000b158, 0x025f0240},
- {0x0000b15c, 0x027f0260},
- {0x0000b160, 0x0341027e},
- {0x0000b164, 0x035f0340},
- {0x0000b168, 0x037f0360},
- {0x0000b16c, 0x04400441},
- {0x0000b170, 0x0460045f},
- {0x0000b174, 0x0541047f},
- {0x0000b178, 0x055f0540},
- {0x0000b17c, 0x057f0560},
- {0x0000b180, 0x06400641},
- {0x0000b184, 0x0660065f},
- {0x0000b188, 0x067e067f},
- {0x0000b18c, 0x07410742},
- {0x0000b190, 0x075f0740},
- {0x0000b194, 0x077f0760},
- {0x0000b198, 0x07800781},
- {0x0000b19c, 0x07a0079f},
- {0x0000b1a0, 0x07c107bf},
- {0x0000b1a4, 0x000007c0},
- {0x0000b1a8, 0x00000000},
- {0x0000b1ac, 0x00000000},
- {0x0000b1b0, 0x00000000},
- {0x0000b1b4, 0x00000000},
- {0x0000b1b8, 0x00000000},
- {0x0000b1bc, 0x00000000},
- {0x0000b1c0, 0x00000000},
- {0x0000b1c4, 0x00000000},
- {0x0000b1c8, 0x00000000},
- {0x0000b1cc, 0x00000000},
- {0x0000b1d0, 0x00000000},
- {0x0000b1d4, 0x00000000},
- {0x0000b1d8, 0x00000000},
- {0x0000b1dc, 0x00000000},
- {0x0000b1e0, 0x00000000},
- {0x0000b1e4, 0x00000000},
- {0x0000b1e8, 0x00000000},
- {0x0000b1ec, 0x00000000},
- {0x0000b1f0, 0x00000396},
- {0x0000b1f4, 0x00000396},
- {0x0000b1f8, 0x00000396},
- {0x0000b1fc, 0x00000196},
-};
+#define ar9580_1p0_rx_gain_table ar9462_common_rx_gain_table_2p0
static const u32 ar9580_1p0_radio_core[][2] = {
/* Addr allmodes */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 4866550ddd96..b09285c36c4a 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -297,6 +297,8 @@ struct ath_tx {
struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
struct ath_descdma txdma;
struct ath_txq *txq_map[WME_NUM_AC];
+ u32 txq_max_pending[WME_NUM_AC];
+ u16 max_aggr_framelen[WME_NUM_AC][4][32];
};
struct ath_rx_edma {
@@ -308,6 +310,7 @@ struct ath_rx {
u8 defant;
u8 rxotherant;
u32 *rxlink;
+ u32 num_pkts;
unsigned int rxfilter;
spinlock_t rxbuflock;
struct list_head rxbuf;
@@ -326,6 +329,9 @@ int ath_rx_init(struct ath_softc *sc, int nbufs);
void ath_rx_cleanup(struct ath_softc *sc);
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq);
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq);
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
void ath_draintxq(struct ath_softc *sc,
@@ -337,6 +343,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs);
void ath_tx_cleanup(struct ath_softc *sc);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_tx_control *txctl);
void ath_tx_tasklet(struct ath_softc *sc);
@@ -356,7 +363,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
struct ath_vif {
int av_bslot;
- bool is_bslot_active, primary_sta_vif;
+ bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
struct ath_buf *av_bcbuf;
};
@@ -382,6 +389,7 @@ struct ath_beacon_config {
u16 dtim_period;
u16 bmiss_timeout;
u8 dtim_count;
+ bool enable_beacon;
};
struct ath_beacon {
@@ -393,7 +401,6 @@ struct ath_beacon {
u32 beaconq;
u32 bmisscnt;
- u32 ast_be_xmit;
u32 bc_tstamp;
struct ieee80211_vif *bslot[ATH_BCBUF];
int slottime;
@@ -407,17 +414,19 @@ struct ath_beacon {
bool tx_last;
};
-void ath_beacon_tasklet(unsigned long data);
-void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
-int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif);
-void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
-int ath_beaconq_config(struct ath_softc *sc);
-void ath_set_beacon(struct ath_softc *sc);
+void ath9k_beacon_tasklet(unsigned long data);
+bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed);
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_set_beacon(struct ath_softc *sc);
void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
-/*******/
-/* ANI */
-/*******/
+/*******************/
+/* Link Monitoring */
+/*******************/
#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
#define ATH_AP_SHORT_CALINTERVAL 100 /* 100 ms */
@@ -428,7 +437,9 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
#define ATH_PAPRD_TIMEOUT 100 /* msecs */
+#define ATH_PLL_WORK_INTERVAL 100
+void ath_tx_complete_poll_work(struct work_struct *work);
void ath_reset_work(struct work_struct *work);
void ath_hw_check(struct work_struct *work);
void ath_hw_pll_work(struct work_struct *work);
@@ -436,23 +447,35 @@ void ath_rx_poll(unsigned long data);
void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
void ath_paprd_calibrate(struct work_struct *work);
void ath_ani_calibrate(unsigned long data);
-void ath_start_ani(struct ath_common *common);
+void ath_start_ani(struct ath_softc *sc);
+void ath_stop_ani(struct ath_softc *sc);
+void ath_check_ani(struct ath_softc *sc);
+int ath_update_survey_stats(struct ath_softc *sc);
+void ath_update_survey_nf(struct ath_softc *sc, int channel);
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type);
/**********/
/* BTCOEX */
/**********/
+enum bt_op_flags {
+ BT_OP_PRIORITY_DETECTED,
+ BT_OP_SCAN,
+};
+
struct ath_btcoex {
bool hw_timer_enabled;
spinlock_t btcoex_lock;
struct timer_list period_timer; /* Timer for BT period */
u32 bt_priority_cnt;
unsigned long bt_priority_time;
+ unsigned long op_flags;
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
u32 btscan_no_stomp; /* in usec */
u32 duty_cycle;
+ u32 bt_wait_time;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
struct ath_mci_profile mci;
};
@@ -466,6 +489,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc);
void ath9k_btcoex_timer_pause(struct ath_softc *sc);
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
#else
static inline int ath9k_init_btcoex(struct ath_softc *sc)
{
@@ -489,8 +513,17 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
{
return 0;
}
+static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+struct ath9k_wow_pattern {
+ u8 pattern_bytes[MAX_PATTERN_SIZE];
+ u8 mask_bytes[MAX_PATTERN_SIZE];
+ u32 pattern_len;
+};
+
/********************/
/* LED Control */
/********************/
@@ -514,8 +547,10 @@ static inline void ath_deinit_leds(struct ath_softc *sc)
}
#endif
-
+/*******************************/
/* Antenna diversity/combining */
+/*******************************/
+
#define ATH_ANT_RX_CURRENT_SHIFT 4
#define ATH_ANT_RX_MAIN_SHIFT 2
#define ATH_ANT_RX_MASK 0x3
@@ -568,6 +603,9 @@ struct ath_ant_comb {
unsigned long scan_start_time;
};
+void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
+void ath_ant_comb_update(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -585,15 +623,14 @@ struct ath_ant_comb {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_OFFCHANNEL BIT(2)
-#define SC_OP_RXFLUSH BIT(3)
-#define SC_OP_TSF_RESET BIT(4)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(5)
-#define SC_OP_BT_SCAN BIT(6)
-#define SC_OP_ANI_RUN BIT(7)
-#define SC_OP_PRIM_STA_VIF BIT(8)
+enum sc_op_flags {
+ SC_OP_INVALID,
+ SC_OP_BEACONS,
+ SC_OP_RXFLUSH,
+ SC_OP_ANI_RUN,
+ SC_OP_PRIM_STA_VIF,
+ SC_OP_HW_RESET,
+};
/* Powersave flags */
#define PS_WAIT_FOR_BEACON BIT(0)
@@ -639,9 +676,9 @@ struct ath_softc {
struct completion paprd_complete;
unsigned int hw_busy_count;
+ unsigned long sc_flags;
u32 intrstatus;
- u32 sc_flags; /* SC_OP_* */
u16 ps_flags; /* PS_* */
u16 curtxpow;
bool ps_enabled;
@@ -679,6 +716,7 @@ struct ath_softc {
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
struct ath_btcoex btcoex;
struct ath_mci_coex mci_coex;
+ struct work_struct mci_work;
#endif
struct ath_descdma txsdma;
@@ -686,6 +724,13 @@ struct ath_softc {
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
struct dfs_pattern_detector *dfs_detector;
+ u32 wow_enabled;
+
+#ifdef CONFIG_PM_SLEEP
+ atomic_t wow_got_bmiss_intr;
+ atomic_t wow_sleep_proc_intr; /* in the middle of WoW sleep ? */
+ u32 wow_intr_before_sleep;
+#endif
};
void ath9k_tasklet(unsigned long data);
@@ -701,6 +746,7 @@ extern int ath9k_modparam_nohwcrypt;
extern int led_blink;
extern bool is_ath9k_unloaded;
+u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
int ath9k_init_device(u16 devid, struct ath_softc *sc,
const struct ath_bus_ops *bus_ops);
@@ -737,5 +783,4 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ath9k_vif_iter_data *iter_data);
-
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 11bc55e3d697..76f07d8c272d 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -30,7 +30,7 @@ static void ath9k_reset_beacon_status(struct ath_softc *sc)
* the operating mode of the station (AP or AdHoc). Parameters are AIFS
* settings and channel width min/max
*/
-int ath_beaconq_config(struct ath_softc *sc)
+static void ath9k_beaconq_config(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -38,6 +38,7 @@ int ath_beaconq_config(struct ath_softc *sc)
struct ath_txq *txq;
ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
+
if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
/* Always burst out beacon and CAB traffic. */
qi.tqi_aifs = 1;
@@ -48,17 +49,17 @@ int ath_beaconq_config(struct ath_softc *sc)
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
qi.tqi_aifs = qi_be.tqi_aifs;
- qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
+ if (ah->slottime == ATH9K_SLOT_TIME_20)
+ qi.tqi_cwmin = 2*qi_be.tqi_cwmin;
+ else
+ qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
qi.tqi_cwmax = qi_be.tqi_cwmax;
}
if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
- ath_err(common,
- "Unable to update h/w beacon queue parameters\n");
- return 0;
+ ath_err(common, "Unable to update h/w beacon queue parameters\n");
} else {
ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
- return 1;
}
}
@@ -67,7 +68,7 @@ int ath_beaconq_config(struct ath_softc *sc)
* up rate codes, and channel flags. Beacons are always sent out at the
* lowest rate, and are not retried.
*/
-static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
+static void ath9k_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
struct ath_buf *bf, int rateidx)
{
struct sk_buff *skb = bf->bf_mpdu;
@@ -78,8 +79,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
u8 chainmask = ah->txchainmask;
u8 rate = 0;
- ath9k_reset_beacon_status(sc);
-
sband = &sc->sbands[common->hw->conf.channel->band];
rate = sband->bitrates[rateidx].hw_value;
if (vif->bss_conf.use_short_preamble)
@@ -108,7 +107,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
ath9k_hw_set_txdesc(ah, bf->bf_desc, &info);
}
-static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void ath9k_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -125,28 +124,22 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
}
}
-static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static struct ath_buf *ath9k_beacon_generate(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_buf *bf;
- struct ath_vif *avp;
+ struct ath_vif *avp = (void *)vif->drv_priv;
struct sk_buff *skb;
- struct ath_txq *cabq;
+ struct ath_txq *cabq = sc->beacon.cabq;
struct ieee80211_tx_info *info;
+ struct ieee80211_mgmt *mgmt_hdr;
int cabq_depth;
- ath9k_reset_beacon_status(sc);
-
- avp = (void *)vif->drv_priv;
- cabq = sc->beacon.cabq;
-
- if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
+ if (avp->av_bcbuf == NULL)
return NULL;
- /* Release the old beacon first */
-
bf = avp->av_bcbuf;
skb = bf->bf_mpdu;
if (skb) {
@@ -156,14 +149,14 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
bf->bf_buf_addr = 0;
}
- /* Get a new beacon from mac80211 */
-
skb = ieee80211_beacon_get(hw, vif);
- bf->bf_mpdu = skb;
if (skb == NULL)
return NULL;
- ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
- avp->tsf_adjust;
+
+ bf->bf_mpdu = skb;
+
+ mgmt_hdr = (struct ieee80211_mgmt *)skb->data;
+ mgmt_hdr->u.beacon.timestamp = avp->tsf_adjust;
info = IEEE80211_SKB_CB(skb);
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
@@ -209,61 +202,52 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
}
}
- ath_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
+ ath9k_beacon_setup(sc, vif, bf, info->control.rates[0].idx);
while (skb) {
- ath_tx_cabq(hw, skb);
+ ath9k_tx_cabq(hw, skb);
skb = ieee80211_get_buffered_bc(hw, vif);
}
return bf;
}
-int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
+void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp;
- struct ath_buf *bf;
- struct sk_buff *skb;
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- __le64 tstamp;
-
- avp = (void *)vif->drv_priv;
-
- /* Allocate a beacon descriptor if we haven't done so. */
- if (!avp->av_bcbuf) {
- /* Allocate beacon state for hostap/ibss. We know
- * a buffer is available. */
- avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf,
- struct ath_buf, list);
- list_del(&avp->av_bcbuf->list);
-
- if (ath9k_uses_beacons(vif->type)) {
- int slot;
- /*
- * Assign the vif to a beacon xmit slot. As
- * above, this cannot fail to find one.
- */
- avp->av_bslot = 0;
- for (slot = 0; slot < ATH_BCBUF; slot++)
- if (sc->beacon.bslot[slot] == NULL) {
- avp->av_bslot = slot;
- avp->is_bslot_active = false;
-
- /* NB: keep looking for a double slot */
- if (slot == 0 || !sc->beacon.bslot[slot-1])
- break;
- }
- BUG_ON(sc->beacon.bslot[avp->av_bslot] != NULL);
- sc->beacon.bslot[avp->av_bslot] = vif;
- sc->nbcnvifs++;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ int slot;
+
+ avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf, struct ath_buf, list);
+ list_del(&avp->av_bcbuf->list);
+
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
+ if (sc->beacon.bslot[slot] == NULL) {
+ avp->av_bslot = slot;
+ break;
}
}
- /* release the previous beacon frame, if it already exists. */
- bf = avp->av_bcbuf;
- if (bf->bf_mpdu != NULL) {
- skb = bf->bf_mpdu;
+ sc->beacon.bslot[avp->av_bslot] = vif;
+ sc->nbcnvifs++;
+
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->av_bslot);
+}
+
+void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_buf *bf = avp->av_bcbuf;
+
+ ath_dbg(common, CONFIG, "Removing interface at beacon slot: %d\n",
+ avp->av_bslot);
+
+ tasklet_disable(&sc->bcon_tasklet);
+
+ if (bf && bf->bf_mpdu) {
+ struct sk_buff *skb = bf->bf_mpdu;
dma_unmap_single(sc->dev, bf->bf_buf_addr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -271,99 +255,74 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
bf->bf_buf_addr = 0;
}
- /* NB: the beacon data buffer must be 32-bit aligned. */
- skb = ieee80211_beacon_get(sc->hw, vif);
- if (skb == NULL)
- return -ENOMEM;
+ avp->av_bcbuf = NULL;
+ sc->beacon.bslot[avp->av_bslot] = NULL;
+ sc->nbcnvifs--;
+ list_add_tail(&bf->list, &sc->beacon.bbuf);
- tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- sc->beacon.bc_tstamp = (u32) le64_to_cpu(tstamp);
- /* Calculate a TSF adjustment factor required for staggered beacons. */
- if (avp->av_bslot > 0) {
- u64 tsfadjust;
- int intval;
-
- intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ tasklet_enable(&sc->bcon_tasklet);
+}
- /*
- * Calculate the TSF offset for this beacon slot, i.e., the
- * number of usecs that need to be added to the timestamp field
- * in Beacon and Probe Response frames. Beacon slot 0 is
- * processed at the correct offset, so it does not require TSF
- * adjustment. Other slots are adjusted to get the timestamp
- * close to the TBTT for the BSS.
- */
- tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
- avp->tsf_adjust = cpu_to_le64(tsfadjust);
+static int ath9k_beacon_choose_slot(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ u16 intval;
+ u32 tsftu;
+ u64 tsf;
+ int slot;
- ath_dbg(common, BEACON,
- "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
- avp->av_bslot, intval, (unsigned long long)tsfadjust);
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_AP) {
+ ath_dbg(common, BEACON, "slot 0, tsf: %llu\n",
+ ath9k_hw_gettsf64(sc->sc_ah));
+ return 0;
+ }
- ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
- avp->tsf_adjust;
- } else
- avp->tsf_adjust = cpu_to_le64(0);
+ intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
+ tsf = ath9k_hw_gettsf64(sc->sc_ah);
+ tsf += TU_TO_USEC(sc->sc_ah->config.sw_beacon_response_time);
+ tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
+ slot = (tsftu % (intval * ATH_BCBUF)) / intval;
- bf->bf_mpdu = skb;
- bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
- bf->bf_buf_addr = 0;
- ath_err(common, "dma_mapping_error on beacon alloc\n");
- return -ENOMEM;
- }
- avp->is_bslot_active = true;
+ ath_dbg(common, BEACON, "slot: %d tsf: %llu tsftu: %u\n",
+ slot, tsf, tsftu / ATH_BCBUF);
- return 0;
+ return slot;
}
-void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp)
+void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- if (avp->av_bcbuf != NULL) {
- struct ath_buf *bf;
-
- avp->is_bslot_active = false;
- if (avp->av_bslot != -1) {
- sc->beacon.bslot[avp->av_bslot] = NULL;
- sc->nbcnvifs--;
- avp->av_bslot = -1;
- }
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ u64 tsfadjust;
- bf = avp->av_bcbuf;
- if (bf->bf_mpdu != NULL) {
- struct sk_buff *skb = bf->bf_mpdu;
- dma_unmap_single(sc->dev, bf->bf_buf_addr,
- skb->len, DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- bf->bf_mpdu = NULL;
- bf->bf_buf_addr = 0;
- }
- list_add_tail(&bf->list, &sc->beacon.bbuf);
+ if (avp->av_bslot == 0)
+ return;
- avp->av_bcbuf = NULL;
- }
+ tsfadjust = cur_conf->beacon_interval * avp->av_bslot / ATH_BCBUF;
+ avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
+
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
+ (unsigned long long)tsfadjust, avp->av_bslot);
}
-void ath_beacon_tasklet(unsigned long data)
+void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
- u32 bfaddr, bc = 0;
- if (work_pending(&sc->hw_reset_work)) {
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
ath_dbg(common, RESET,
"reset work is pending, skip beaconing now\n");
return;
}
+
/*
* Check if the previous beacon has gone out. If
* not don't try to post another, skip this period
@@ -387,55 +346,25 @@ void ath_beacon_tasklet(unsigned long data)
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
sc->beacon.bmisscnt = 0;
- sc->sc_flags |= SC_OP_TSF_RESET;
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ ath9k_queue_reset(sc, RESET_TYPE_BEACON_STUCK);
}
return;
}
- /*
- * Generate beacon frames. we are sending frames
- * staggered so calculate the slot for this frame based
- * on the tsf to safeguard against missing an swba.
- */
-
-
- if (ah->opmode == NL80211_IFTYPE_AP) {
- u16 intval;
- u32 tsftu;
- u64 tsf;
-
- intval = cur_conf->beacon_interval ? : ATH_DEFAULT_BINTVAL;
- tsf = ath9k_hw_gettsf64(ah);
- tsf += TU_TO_USEC(ah->config.sw_beacon_response_time);
- tsftu = TSF_TO_TU((tsf * ATH_BCBUF) >>32, tsf * ATH_BCBUF);
- slot = (tsftu % (intval * ATH_BCBUF)) / intval;
- vif = sc->beacon.bslot[slot];
-
- ath_dbg(common, BEACON,
- "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
- slot, tsf, tsftu / ATH_BCBUF, intval, vif);
- } else {
- slot = 0;
- vif = sc->beacon.bslot[slot];
- }
+ slot = ath9k_beacon_choose_slot(sc);
+ vif = sc->beacon.bslot[slot];
+ if (!vif || !vif->bss_conf.enable_beacon)
+ return;
- bfaddr = 0;
- if (vif) {
- bf = ath_beacon_generate(sc->hw, vif);
- if (bf != NULL) {
- bfaddr = bf->bf_daddr;
- bc = 1;
- }
+ bf = ath9k_beacon_generate(sc->hw, vif);
+ WARN_ON(!bf);
- if (sc->beacon.bmisscnt != 0) {
- ath_dbg(common, BSTUCK,
- "resume beacon xmit after %u misses\n",
- sc->beacon.bmisscnt);
- sc->beacon.bmisscnt = 0;
- }
+ if (sc->beacon.bmisscnt != 0) {
+ ath_dbg(common, BSTUCK, "resume beacon xmit after %u misses\n",
+ sc->beacon.bmisscnt);
+ sc->beacon.bmisscnt = 0;
}
/*
@@ -455,39 +384,40 @@ void ath_beacon_tasklet(unsigned long data)
* set to ATH_BCBUF so this check is a noop.
*/
if (sc->beacon.updateslot == UPDATE) {
- sc->beacon.updateslot = COMMIT; /* commit next beacon */
+ sc->beacon.updateslot = COMMIT;
sc->beacon.slotupdate = slot;
- } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
+ } else if (sc->beacon.updateslot == COMMIT &&
+ sc->beacon.slotupdate == slot) {
ah->slottime = sc->beacon.slottime;
ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
- if (bfaddr != 0) {
+
+ if (bf) {
+ ath9k_reset_beacon_status(sc);
+
+ ath_dbg(common, BEACON,
+ "Transmitting beacon for slot: %d\n", slot);
+
/* NB: cabq traffic should already be queued and primed */
- ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
+ ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
if (!edma)
ath9k_hw_txstart(ah, sc->beacon.beaconq);
-
- sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
}
}
-static void ath9k_beacon_init(struct ath_softc *sc,
- u32 next_beacon,
- u32 beacon_period)
+static void ath9k_beacon_init(struct ath_softc *sc, u32 nexttbtt, u32 intval)
{
- if (sc->sc_flags & SC_OP_TSF_RESET) {
- ath9k_ps_wakeup(sc);
- ath9k_hw_reset_tsf(sc->sc_ah);
- }
-
- ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
+ struct ath_hw *ah = sc->sc_ah;
- if (sc->sc_flags & SC_OP_TSF_RESET) {
- ath9k_ps_restore(sc);
- sc->sc_flags &= ~SC_OP_TSF_RESET;
- }
+ ath9k_hw_disable_interrupts(ah);
+ ath9k_hw_reset_tsf(ah);
+ ath9k_beaconq_config(sc);
+ ath9k_hw_beaconinit(ah, nexttbtt, intval);
+ sc->beacon.bmisscnt = 0;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
}
/*
@@ -495,32 +425,27 @@ static void ath9k_beacon_init(struct ath_softc *sc,
* burst together. For the former arrange for the SWBA to be delivered for each
* slot. Slots that are not occupied will generate nothing.
*/
-static void ath_beacon_config_ap(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_ap(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
u32 nexttbtt, intval;
/* NB: the beacon interval is kept internally in TU's */
intval = TU_TO_USEC(conf->beacon_interval);
- intval /= ATH_BCBUF; /* for staggered beacons */
+ intval /= ATH_BCBUF;
nexttbtt = intval;
- /*
- * In AP mode we enable the beacon timers and SWBA interrupts to
- * prepare beacon frames.
- */
- ah->imask |= ATH9K_INT_SWBA;
- ath_beaconq_config(sc);
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
- /* Set the computed AP beacon timers */
+ ath_dbg(common, BEACON, "AP nexttbtt: %u intval: %u conf_intval: %u\n",
+ nexttbtt, intval, conf->beacon_interval);
- ath9k_hw_disable_interrupts(ah);
- sc->sc_flags |= SC_OP_TSF_RESET;
ath9k_beacon_init(sc, nexttbtt, intval);
- sc->beacon.bmisscnt = 0;
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
}
/*
@@ -531,8 +456,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
* we'll receive a BMISS interrupt when we stop seeing beacons from the AP
* we've associated with.
*/
-static void ath_beacon_config_sta(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_sta(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -544,7 +469,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
/* No need to configure beacon if we are not associated */
- if (!common->curaid) {
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return;
@@ -651,97 +576,65 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
ath9k_hw_enable_interrupts(ah);
}
-static void ath_beacon_config_adhoc(struct ath_softc *sc,
- struct ath_beacon_config *conf)
+static void ath9k_beacon_config_adhoc(struct ath_softc *sc,
+ struct ath_beacon_config *conf)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- u32 tsf, intval, nexttbtt;
+ u32 intval, nexttbtt;
ath9k_reset_beacon_status(sc);
- if (!(sc->sc_flags & SC_OP_BEACONS))
- ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
intval = TU_TO_USEC(conf->beacon_interval);
- tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
- nexttbtt = tsf + intval;
-
- ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
- nexttbtt, intval, conf->beacon_interval);
-
- /*
- * In IBSS mode enable the beacon timers but only enable SWBA interrupts
- * if we need to manually prepare beacon frames. Otherwise we use a
- * self-linked tx descriptor and let the hardware deal with things.
- */
- ah->imask |= ATH9K_INT_SWBA;
+ nexttbtt = intval;
- ath_beaconq_config(sc);
+ if (conf->enable_beacon)
+ ah->imask |= ATH9K_INT_SWBA;
+ else
+ ah->imask &= ~ATH9K_INT_SWBA;
- /* Set the computed ADHOC beacon timers */
+ ath_dbg(common, BEACON, "IBSS nexttbtt: %u intval: %u conf_intval: %u\n",
+ nexttbtt, intval, conf->beacon_interval);
- ath9k_hw_disable_interrupts(ah);
ath9k_beacon_init(sc, nexttbtt, intval);
- sc->beacon.bmisscnt = 0;
-
- ath9k_hw_set_interrupts(ah);
- ath9k_hw_enable_interrupts(ah);
}
-static bool ath9k_allow_beacon_config(struct ath_softc *sc,
- struct ieee80211_vif *vif)
+bool ath9k_allow_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
- /*
- * Can not have different beacon interval on multiple
- * AP interface case
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
- (sc->nbcnvifs > 1) &&
- (vif->type == NL80211_IFTYPE_AP) &&
- (cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, CONFIG,
- "Changing beacon interval of multiple AP interfaces !\n");
- return false;
- }
- /*
- * Can not configure station vif's beacon config
- * while on AP opmode
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
- (vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, CONFIG,
- "STA vif's beacon not allowed on AP mode\n");
- return false;
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
+ if ((vif->type != NL80211_IFTYPE_AP) ||
+ (sc->nbcnvifs > 1)) {
+ ath_dbg(common, CONFIG,
+ "An AP interface is already present !\n");
+ return false;
+ }
}
- /*
- * Do not allow beacon config if HW was already configured
- * with another STA vif
- */
- if ((sc->sc_ah->opmode == NL80211_IFTYPE_STATION) &&
- (vif->type == NL80211_IFTYPE_STATION) &&
- (sc->sc_flags & SC_OP_BEACONS) &&
- !avp->primary_sta_vif) {
- ath_dbg(common, CONFIG,
- "Beacon already configured for a station interface\n");
- return false;
+
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ if ((vif->type == NL80211_IFTYPE_STATION) &&
+ test_bit(SC_OP_BEACONS, &sc->sc_flags) &&
+ !avp->primary_sta_vif) {
+ ath_dbg(common, CONFIG,
+ "Beacon already configured for a station interface\n");
+ return false;
+ }
}
+
return true;
}
-void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_cache_beacon_config(struct ath_softc *sc,
+ struct ieee80211_bss_conf *bss_conf)
{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- if (!ath9k_allow_beacon_config(sc, vif))
- return;
+ ath_dbg(common, BEACON,
+ "Caching beacon data for BSS: %pM\n", bss_conf->bssid);
- /* Setup the beacon configuration parameters */
cur_conf->beacon_interval = bss_conf->beacon_int;
cur_conf->dtim_period = bss_conf->dtim_period;
cur_conf->listen_interval = 1;
@@ -766,73 +659,62 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
if (cur_conf->dtim_period == 0)
cur_conf->dtim_period = 1;
- ath_set_beacon(sc);
}
-static bool ath_has_valid_bslot(struct ath_softc *sc)
+void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
+ u32 changed)
{
- struct ath_vif *avp;
- int slot;
- bool found = false;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
- for (slot = 0; slot < ATH_BCBUF; slot++) {
- if (sc->beacon.bslot[slot]) {
- avp = (void *)sc->beacon.bslot[slot]->drv_priv;
- if (avp->is_bslot_active) {
- found = true;
- break;
+ if (sc->sc_ah->opmode == NL80211_IFTYPE_STATION) {
+ ath9k_cache_beacon_config(sc, bss_conf);
+ ath9k_set_beacon(sc);
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ } else {
+ /*
+ * Take care of multiple interfaces when
+ * enabling/disabling SWBA.
+ */
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (!bss_conf->enable_beacon &&
+ (sc->nbcnvifs <= 1)) {
+ cur_conf->enable_beacon = false;
+ } else if (bss_conf->enable_beacon) {
+ cur_conf->enable_beacon = true;
+ ath9k_cache_beacon_config(sc, bss_conf);
}
}
+
+ if (cur_conf->beacon_interval) {
+ ath9k_set_beacon(sc);
+
+ if (cur_conf->enable_beacon)
+ set_bit(SC_OP_BEACONS, &sc->sc_flags);
+ else
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ }
}
- return found;
}
-
-void ath_set_beacon(struct ath_softc *sc)
+void ath9k_set_beacon(struct ath_softc *sc)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
switch (sc->sc_ah->opmode) {
case NL80211_IFTYPE_AP:
- if (ath_has_valid_bslot(sc))
- ath_beacon_config_ap(sc, cur_conf);
+ ath9k_beacon_config_ap(sc, cur_conf);
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
- ath_beacon_config_adhoc(sc, cur_conf);
+ ath9k_beacon_config_adhoc(sc, cur_conf);
break;
case NL80211_IFTYPE_STATION:
- ath_beacon_config_sta(sc, cur_conf);
+ ath9k_beacon_config_sta(sc, cur_conf);
break;
default:
ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
-
- sc->sc_flags |= SC_OP_BEACONS;
-}
-
-void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (!ath_has_valid_bslot(sc)) {
- sc->sc_flags &= ~SC_OP_BEACONS;
- return;
- }
-
- ath9k_ps_wakeup(sc);
- if (status) {
- /* Re-enable beaconing */
- ah->imask |= ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah);
- } else {
- /* Disable SWBA interrupt */
- ah->imask &= ~ATH9K_INT_SWBA;
- ath9k_hw_set_interrupts(ah);
- tasklet_kill(&sc->bcon_tasklet);
- ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
- }
- ath9k_ps_restore(sc);
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 1ca6da80d4ad..acd437384fe4 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -336,10 +336,16 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
- const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
- ar9462_wlan_weights[stomp_type];
+ const u32 *weight = ar9003_wlan_weights[stomp_type];
int i;
+ if (AR_SREV_9462(ah)) {
+ if ((stomp_type == ATH_BTCOEX_STOMP_LOW) &&
+ btcoex->mci.stomp_ftp)
+ stomp_type = ATH_BTCOEX_STOMP_LOW_FTP;
+ weight = ar9462_wlan_weights[stomp_type];
+ }
+
for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
btcoex->bt_weight[i] = AR9300_BT_WGHT;
btcoex->wlan_weight[i] = weight[i];
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 3a1e1cfabd5e..20092f98658f 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,6 +36,9 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
+#define ATH_BTCOEX_RX_WAIT_TIME 100
+#define ATH_BTCOEX_STOMP_FTP_THRESH 5
+
#define AR9300_NUM_BT_WEIGHTS 4
#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
@@ -80,6 +83,7 @@ struct ath9k_hw_mci {
u8 bt_ver_major;
u8 bt_ver_minor;
u8 bt_state;
+ u8 stomp_ftp;
};
struct ath_btcoex_hw {
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 3b33996d97df..1060c19a5012 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -30,10 +30,10 @@ struct ar5416IniArray {
u32 ia_columns;
};
-#define INIT_INI_ARRAY(iniarray, array, rows, columns) do { \
+#define INIT_INI_ARRAY(iniarray, array) do { \
(iniarray)->ia_array = (u32 *)(array); \
- (iniarray)->ia_rows = (rows); \
- (iniarray)->ia_columns = (columns); \
+ (iniarray)->ia_rows = ARRAY_SIZE(array); \
+ (iniarray)->ia_columns = ARRAY_SIZE(array[0]); \
} while (0)
#define INI_RA(iniarray, row, column) \
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index fde700c4e490..68b643c8943c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -205,11 +205,10 @@ static ssize_t write_file_disable_ani(struct file *file,
common->disable_ani = !!disable_ani;
if (disable_ani) {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
+ clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_stop_ani(sc);
} else {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
+ ath_check_ani(sc);
}
return count;
@@ -348,8 +347,6 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.txok++;
if (status & ATH9K_INT_TXURN)
sc->debug.stats.istats.txurn++;
- if (status & ATH9K_INT_MIB)
- sc->debug.stats.istats.mib++;
if (status & ATH9K_INT_RXPHY)
sc->debug.stats.istats.rxphyerr++;
if (status & ATH9K_INT_RXKCM)
@@ -374,6 +371,8 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
sc->debug.stats.istats.dtim++;
if (status & ATH9K_INT_TSFOOR)
sc->debug.stats.istats.tsfoor++;
+ if (status & ATH9K_INT_MCI)
+ sc->debug.stats.istats.mci++;
}
static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
@@ -418,6 +417,7 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
PR_IS("DTIMSYNC", dtimsync);
PR_IS("DTIM", dtim);
PR_IS("TSFOOR", tsfoor);
+ PR_IS("MCI", mci);
PR_IS("TOTAL", total);
len += snprintf(buf + len, mxlen - len,
@@ -1318,7 +1318,7 @@ static int open_file_bb_mac_samps(struct inode *inode, struct file *file)
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
u8 nread;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EAGAIN;
buf = vmalloc(size);
@@ -1555,6 +1555,14 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_interrupt);
debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_xmit);
+ debugfs_create_u32("qlen_bk", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_BK]);
+ debugfs_create_u32("qlen_be", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_BE]);
+ debugfs_create_u32("qlen_vi", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_VI]);
+ debugfs_create_u32("qlen_vo", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
+ &sc->tx.txq_max_pending[WME_AC_VO]);
debugfs_create_file("stations", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_stations);
debugfs_create_file("misc", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index c34da09d9103..8b9d080d89da 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -32,6 +32,19 @@ struct ath_buf;
#define RESET_STAT_INC(sc, type) do { } while (0)
#endif
+enum ath_reset_type {
+ RESET_TYPE_BB_HANG,
+ RESET_TYPE_BB_WATCHDOG,
+ RESET_TYPE_FATAL_INT,
+ RESET_TYPE_TX_ERROR,
+ RESET_TYPE_TX_HANG,
+ RESET_TYPE_PLL_HANG,
+ RESET_TYPE_MAC_HANG,
+ RESET_TYPE_BEACON_STUCK,
+ RESET_TYPE_MCI,
+ __RESET_TYPE_MAX
+};
+
#ifdef CONFIG_ATH9K_DEBUGFS
/**
@@ -86,6 +99,7 @@ struct ath_interrupt_stats {
u32 dtim;
u32 bb_watchdog;
u32 tsfoor;
+ u32 mci;
/* Sync-cause stats */
u32 sync_cause_all;
@@ -208,17 +222,6 @@ struct ath_rx_stats {
u32 rx_frags;
};
-enum ath_reset_type {
- RESET_TYPE_BB_HANG,
- RESET_TYPE_BB_WATCHDOG,
- RESET_TYPE_FATAL_INT,
- RESET_TYPE_TX_ERROR,
- RESET_TYPE_TX_HANG,
- RESET_TYPE_PLL_HANG,
- RESET_TYPE_MAC_HANG,
- __RESET_TYPE_MAX
-};
-
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 33acb920ed3f..484b31305906 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -241,16 +241,12 @@ enum eeprom_param {
EEP_TEMPSENSE_SLOPE,
EEP_TEMPSENSE_SLOPE_PAL_ON,
EEP_PWR_TABLE_OFFSET,
- EEP_DRIVE_STRENGTH,
- EEP_INTERNAL_REGULATOR,
- EEP_SWREG,
EEP_PAPRD,
EEP_MODAL_VER,
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
EEP_ANTENNA_GAIN_5G,
- EEP_QUICK_DROP
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4322ac80c203..7d075105a85d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -135,7 +135,7 @@ static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ath9k_dump_4k_modal_eeprom(buf, len, size,
+ len = ath9k_dump_4k_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
}
@@ -188,8 +188,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
{
#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
struct ath_common *common = ath9k_hw_common(ah);
- struct ar5416_eeprom_4k *eep =
- (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
+ struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
bool need_swap = false;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index aa614767adff..cd742fb944c2 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -132,7 +132,7 @@ static u32 ath9k_hw_ar9287_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ar9287_dump_modal_eeprom(buf, len, size,
+ len = ar9287_dump_modal_eeprom(buf, len, size,
&eep->modalHeader);
goto out;
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index b5fba8b18b8b..a8ac30a00720 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -211,11 +211,11 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
if (!dump_base_hdr) {
len += snprintf(buf + len, size - len,
"%20s :\n", "2GHz modal Header");
- len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[0]);
len += snprintf(buf + len, size - len,
"%20s :\n", "5GHz modal Header");
- len += ath9k_def_dump_modal_eeprom(buf, len, size,
+ len = ath9k_def_dump_modal_eeprom(buf, len, size,
&eep->modalHeader[1]);
goto out;
}
@@ -264,8 +264,7 @@ static u32 ath9k_hw_def_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
{
- struct ar5416_eeprom_def *eep =
- (struct ar5416_eeprom_def *) &ah->eeprom.def;
+ struct ar5416_eeprom_def *eep = &ah->eeprom.def;
struct ath_common *common = ath9k_hw_common(ah);
u16 *eepdata, temp, magic, magic2;
u32 sum = 0, el;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 281a9af0f1b6..bacdb8fb4ef4 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -132,17 +132,18 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
if (time_after(jiffies, btcoex->bt_priority_time +
msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT scan detected\n");
- sc->sc_flags |= (SC_OP_BT_SCAN |
- SC_OP_BT_PRIORITY_DETECTED);
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ set_bit(BT_OP_SCAN, &btcoex->op_flags);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT priority traffic detected\n");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ set_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
}
btcoex->bt_priority_cnt = 0;
@@ -190,13 +191,34 @@ static void ath_btcoex_period_timer(unsigned long data)
struct ath_softc *sc = (struct ath_softc *) data;
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
u32 timer_period;
bool is_btscan;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ goto skip_hw_wakeup;
+ }
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_wakeup(sc);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
ath_detect_bt_priority(sc);
- is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+ is_btscan = test_bit(BT_OP_SCAN, &btcoex->op_flags);
+
+ btcoex->bt_wait_time += btcoex->btcoex_period;
+ if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) {
+ if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) &&
+ (mci->num_pan || mci->num_other_acl))
+ ah->btcoex_hw.mci.stomp_ftp =
+ (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH);
+ else
+ ah->btcoex_hw.mci.stomp_ftp = false;
+ btcoex->bt_wait_time = 0;
+ sc->rx.num_pkts = 0;
+ }
spin_lock_bh(&btcoex->btcoex_lock);
@@ -218,9 +240,9 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
- timer_period = btcoex->btcoex_period / 1000;
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(timer_period));
+skip_hw_wakeup:
+ timer_period = btcoex->btcoex_period;
+ mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
}
/*
@@ -233,14 +255,14 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_hw *ah = sc->sc_ah;
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_common *common = ath9k_hw_common(ah);
- bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ test_bit(BT_OP_SCAN, &btcoex->op_flags))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
@@ -254,10 +276,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
- btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 *
btcoex->btcoex_period / 100;
setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
@@ -292,7 +314,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
+ clear_bit(BT_OP_SCAN, &btcoex->op_flags);
mod_timer(&btcoex->period_timer, jiffies);
}
@@ -314,14 +337,22 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
btcoex->hw_timer_enabled = false;
}
+void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+}
+
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
{
+ struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &sc->btcoex.mci;
u16 aggr_limit = 0;
if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
- else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ else if (test_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags))
aggr_limit = min((max_4ms_framelen * 3) / 8,
(u32)ATH_AMPDU_LIMIT_MAX);
@@ -362,9 +393,9 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
if (ah->btcoex_hw.enabled &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
- ath9k_hw_btcoex_disable(ah);
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(ah);
if (AR_SREV_9462(ah))
ath_mci_flush_profile(&sc->btcoex.mci);
}
@@ -372,11 +403,13 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
void ath9k_deinit_btcoex(struct ath_softc *sc)
{
+ struct ath_hw *ah = sc->sc_ah;
+
if ((sc->btcoex.no_stomp_timer) &&
ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
- if (AR_SREV_9462(sc->sc_ah))
+ if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
}
@@ -402,7 +435,7 @@ int ath9k_init_btcoex(struct ath_softc *sc)
txq = sc->tx.txq_map[WME_AC_BE];
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- if (AR_SREV_9462(ah)) {
+ if (ath9k_hw_mci_is_enabled(ah)) {
sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
INIT_LIST_HEAD(&sc->btcoex.mci.info);
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 135795257d95..936e920fb88e 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -453,7 +453,6 @@ struct ath9k_htc_priv {
u8 num_sta_assoc_vif;
u8 num_ap_vif;
- u16 op_flags;
u16 curtxpow;
u16 txpowlimit;
u16 nvifs;
@@ -461,6 +460,7 @@ struct ath9k_htc_priv {
bool rearm_ani;
bool reconfig_beacon;
unsigned int rxfilter;
+ unsigned long op_flags;
struct ath9k_hw_cal_data caldata;
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
@@ -572,8 +572,6 @@ bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
-void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
-void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
#ifdef CONFIG_MAC80211_LEDS
void ath9k_init_leds(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 2eadffb7971c..77d541feb910 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -207,9 +207,9 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
- if (priv->op_flags & OP_TSF_RESET) {
+ if (test_bit(OP_TSF_RESET, &priv->op_flags)) {
ath9k_hw_reset_tsf(priv->ah);
- priv->op_flags &= ~OP_TSF_RESET;
+ clear_bit(OP_TSF_RESET, &priv->op_flags);
} else {
/*
* Pull nexttbtt forward to reflect the current TSF.
@@ -221,7 +221,7 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
} while (nexttbtt < tsftu);
}
- if (priv->op_flags & OP_ENABLE_BEACON)
+ if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
@@ -269,7 +269,7 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
else
priv->ah->config.sw_beacon_response_time = MIN_SWBA_RESPONSE;
- if (priv->op_flags & OP_ENABLE_BEACON)
+ if (test_bit(OP_ENABLE_BEACON, &priv->op_flags))
imask |= ATH9K_INT_SWBA;
ath_dbg(common, CONFIG,
@@ -365,7 +365,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
vif = priv->cur_beacon_conf.bslot[slot];
avp = (struct ath9k_htc_vif *)vif->drv_priv;
- if (unlikely(priv->op_flags & OP_SCANNING)) {
+ if (unlikely(test_bit(OP_SCANNING, &priv->op_flags))) {
spin_unlock_bh(&priv->beacon_lock);
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 1c10e2e5c237..07df279c8d46 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -37,17 +37,18 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
if (time_after(jiffies, btcoex->bt_priority_time +
msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT scan detected\n");
- priv->op_flags |= (OP_BT_SCAN |
- OP_BT_PRIORITY_DETECTED);
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ set_bit(OP_BT_SCAN, &priv->op_flags);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT priority traffic detected\n");
- priv->op_flags |= OP_BT_PRIORITY_DETECTED;
+ set_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
}
btcoex->bt_priority_cnt = 0;
@@ -67,26 +68,23 @@ static void ath_btcoex_period_work(struct work_struct *work)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_common *common = ath9k_hw_common(priv->ah);
u32 timer_period;
- bool is_btscan;
int ret;
ath_detect_bt_priority(priv);
- is_btscan = !!(priv->op_flags & OP_BT_SCAN);
-
ret = ath9k_htc_update_cap_target(priv,
- !!(priv->op_flags & OP_BT_PRIORITY_DETECTED));
+ test_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags));
if (ret) {
ath_err(common, "Unable to set BTCOEX parameters\n");
return;
}
- ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
- btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_bt_stomp(priv->ah, test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ ATH_BTCOEX_STOMP_ALL : btcoex->bt_stomp_type);
ath9k_hw_btcoex_enable(priv->ah);
- timer_period = is_btscan ? btcoex->btscan_no_stomp :
- btcoex->btcoex_no_stomp;
+ timer_period = test_bit(OP_BT_SCAN, &priv->op_flags) ?
+ btcoex->btscan_no_stomp : btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
msecs_to_jiffies(timer_period));
ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work,
@@ -104,14 +102,15 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_hw *ah = priv->ah;
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_common *common = ath9k_hw_common(ah);
- bool is_btscan = priv->op_flags & OP_BT_SCAN;
ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW ||
+ test_bit(OP_BT_SCAN, &priv->op_flags))
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+
ath9k_hw_btcoex_enable(priv->ah);
}
@@ -141,7 +140,8 @@ static void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
- priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
+ clear_bit(OP_BT_PRIORITY_DETECTED, &priv->op_flags);
+ clear_bit(OP_BT_SCAN, &priv->op_flags);
ieee80211_queue_delayed_work(priv->hw, &priv->coex_period_work, 0);
}
@@ -310,95 +310,3 @@ void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
wiphy_rfkill_start_polling(priv->hw->wiphy);
}
-
-void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_err(common,
- "Unable to reset hardware; reset status %d (freq %u MHz)\n",
- ret, ah->curchan->channel);
- }
-
- ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
- &priv->curtxpow);
-
- /* Start RX */
- WMI_CMD(WMI_START_RECV_CMDID);
- ath9k_host_rx_init(priv);
-
- /* Start TX */
- htc_start(priv->htc);
- spin_lock_bh(&priv->tx.tx_lock);
- priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
- spin_unlock_bh(&priv->tx.tx_lock);
- ieee80211_wake_queues(hw);
-
- WMI_CMD(WMI_ENABLE_INTR_CMDID);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-}
-
-void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
-{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_hw *ah = priv->ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int ret;
- u8 cmd_rsp;
-
- ath9k_htc_ps_wakeup(priv);
-
- /* Disable LED */
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
-
- WMI_CMD(WMI_DISABLE_INTR_CMDID);
-
- /* Stop TX */
- ieee80211_stop_queues(hw);
- ath9k_htc_tx_drain(priv);
- WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
-
- /* Stop RX */
- WMI_CMD(WMI_STOP_RECV_CMDID);
-
- /* Clear the WMI event queue */
- ath9k_wmi_event_drain(priv);
-
- /*
- * The MIB counters have to be disabled here,
- * since the target doesn't do it.
- */
- ath9k_hw_disable_mib_counters(ah);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- /* Reset the HW */
- ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (ret) {
- ath_err(common,
- "Unable to reset hardware; reset status %d (freq %u MHz)\n",
- ret, ah->curchan->channel);
- }
-
- /* Disable the PHY */
- ath9k_hw_phy_disable(ah);
-
- ath9k_htc_ps_restore(priv);
- ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
-}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 25213d521bc2..a035a380d669 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,7 +611,7 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
struct ath_common *common;
int i, ret = 0, csz = 0;
- priv->op_flags |= OP_INVALID;
+ set_bit(OP_INVALID, &priv->op_flags);
ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
if (!ah)
@@ -718,7 +718,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
hw->queues = 4;
hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
+ hw->max_listen_interval = 1;
hw->vif_data_size = sizeof(struct ath9k_htc_vif);
hw->sta_data_size = sizeof(struct ath9k_htc_sta);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index abbd6effd60d..c785129692ff 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -75,14 +75,19 @@ unlock:
void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
{
+ bool reset;
+
mutex_lock(&priv->htc_pm_lock);
if (--priv->ps_usecount != 0)
goto unlock;
- if (priv->ps_idle)
+ if (priv->ps_idle) {
+ ath9k_hw_setrxabort(priv->ah, true);
+ ath9k_hw_stopdmarecv(priv->ah, &reset);
ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
- else if (priv->ps_enabled)
+ } else if (priv->ps_enabled) {
ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
+ }
unlock:
mutex_unlock(&priv->htc_pm_lock);
@@ -250,7 +255,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
u8 cmd_rsp;
int ret;
- if (priv->op_flags & OP_INVALID)
+ if (test_bit(OP_INVALID, &priv->op_flags))
return -EIO;
fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
@@ -304,7 +309,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
htc_start(priv->htc);
- if (!(priv->op_flags & OP_SCANNING) &&
+ if (!test_bit(OP_SCANNING, &priv->op_flags) &&
!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
ath9k_htc_vif_reconfig(priv);
@@ -750,7 +755,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
common->ani.shortcal_timer = timestamp;
common->ani.checkani_timer = timestamp;
- priv->op_flags |= OP_ANI_RUNNING;
+ set_bit(OP_ANI_RUNNING, &priv->op_flags);
ieee80211_queue_delayed_work(common->hw, &priv->ani_work,
msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
@@ -759,7 +764,7 @@ void ath9k_htc_start_ani(struct ath9k_htc_priv *priv)
void ath9k_htc_stop_ani(struct ath9k_htc_priv *priv)
{
cancel_delayed_work_sync(&priv->ani_work);
- priv->op_flags &= ~OP_ANI_RUNNING;
+ clear_bit(OP_ANI_RUNNING, &priv->op_flags);
}
void ath9k_htc_ani_work(struct work_struct *work)
@@ -944,7 +949,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
- priv->op_flags &= ~OP_INVALID;
+ clear_bit(OP_INVALID, &priv->op_flags);
htc_start(priv->htc);
spin_lock_bh(&priv->tx.tx_lock);
@@ -973,7 +978,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (priv->op_flags & OP_INVALID) {
+ if (test_bit(OP_INVALID, &priv->op_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
@@ -1015,7 +1020,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
ath9k_htc_ps_restore(priv);
ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
- priv->op_flags |= OP_INVALID;
+ set_bit(OP_INVALID, &priv->op_flags);
ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
@@ -1105,8 +1110,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
if ((priv->ah->opmode == NL80211_IFTYPE_AP) &&
- !(priv->op_flags & OP_ANI_RUNNING)) {
- ath9k_hw_set_tsfadjust(priv->ah, 1);
+ !test_bit(OP_ANI_RUNNING, &priv->op_flags)) {
+ ath9k_hw_set_tsfadjust(priv->ah, true);
ath9k_htc_start_ani(priv);
}
@@ -1178,24 +1183,20 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
struct ath9k_htc_priv *priv = hw->priv;
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ieee80211_conf *conf = &hw->conf;
+ bool chip_reset = false;
+ int ret = 0;
mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- bool enable_radio = false;
- bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-
mutex_lock(&priv->htc_pm_lock);
- if (!idle && priv->ps_idle)
- enable_radio = true;
- priv->ps_idle = idle;
- mutex_unlock(&priv->htc_pm_lock);
- if (enable_radio) {
- ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
- ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
- ath9k_htc_radio_enable(hw);
- }
+ priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
+ if (priv->ps_idle)
+ chip_reset = true;
+
+ mutex_unlock(&priv->htc_pm_lock);
}
/*
@@ -1210,7 +1211,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
ath9k_htc_remove_monitor_interface(priv);
}
- if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -1223,8 +1224,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
- mutex_unlock(&priv->mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
}
@@ -1246,21 +1247,10 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
priv->txpowlimit, &priv->curtxpow);
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE) {
- mutex_lock(&priv->htc_pm_lock);
- if (!priv->ps_idle) {
- mutex_unlock(&priv->htc_pm_lock);
- goto out;
- }
- mutex_unlock(&priv->htc_pm_lock);
-
- ath_dbg(common, CONFIG, "idle: disabling radio\n");
- ath9k_htc_radio_disable(hw);
- }
-
out:
+ ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
- return 0;
+ return ret;
}
#define SUPPORTED_FILTERS \
@@ -1285,7 +1275,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FILTERS;
*total_flags &= SUPPORTED_FILTERS;
- if (priv->op_flags & OP_INVALID) {
+ if (test_bit(OP_INVALID, &priv->op_flags)) {
ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
@@ -1361,7 +1351,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
qi.tqi_aifs = params->aifs;
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
- qi.tqi_burstTime = params->txop;
+ qi.tqi_burstTime = params->txop * 32;
qnum = get_hw_qnum(queue, priv->hwq_map);
@@ -1516,7 +1506,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
- priv->op_flags |= OP_ENABLE_BEACON;
+ set_bit(OP_ENABLE_BEACON, &priv->op_flags);
ath9k_htc_beacon_config(priv, vif);
}
@@ -1529,7 +1519,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
- priv->op_flags &= ~OP_ENABLE_BEACON;
+ clear_bit(OP_ENABLE_BEACON, &priv->op_flags);
ath9k_htc_beacon_config(priv, vif);
}
}
@@ -1542,7 +1532,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
(priv->nvifs == 1) &&
(priv->num_ap_vif == 1) &&
(vif->type == NL80211_IFTYPE_AP)) {
- priv->op_flags |= OP_TSF_RESET;
+ set_bit(OP_TSF_RESET, &priv->op_flags);
}
ath_dbg(common, CONFIG,
"Beacon interval changed for BSS: %pM\n",
@@ -1654,7 +1644,7 @@ static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- priv->op_flags |= OP_SCANNING;
+ set_bit(OP_SCANNING, &priv->op_flags);
spin_unlock_bh(&priv->beacon_lock);
cancel_work_sync(&priv->ps_work);
ath9k_htc_stop_ani(priv);
@@ -1667,7 +1657,7 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
spin_lock_bh(&priv->beacon_lock);
- priv->op_flags &= ~OP_SCANNING;
+ clear_bit(OP_SCANNING, &priv->op_flags);
spin_unlock_bh(&priv->beacon_lock);
ath9k_htc_ps_wakeup(priv);
ath9k_htc_vif_reconfig(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 3e40a6461512..47e61d0da33b 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -916,7 +916,7 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
{
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
- ath9k_hw_startpcureceive(priv->ah, (priv->op_flags & OP_SCANNING));
+ ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 1c68e564f503..cfa91ab7acf8 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -342,6 +342,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
return;
+ case AR9300_DEVID_QCA955X:
+ ah->hw_version.macVersion = AR_SREV_VERSION_9550;
+ return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@@ -390,14 +393,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
-static void ath9k_hw_aspm_init(struct ath_hw *ah)
-{
- struct ath_common *common = ath9k_hw_common(ah);
-
- if (common->bus_ops->aspm_init)
- common->bus_ops->aspm_init(common);
-}
-
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
@@ -622,7 +617,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
- ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
+ ((AR_SREV_9160(ah) || AR_SREV_9280(ah) || AR_SREV_9287(ah)) &&
!ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;
@@ -654,6 +649,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
case AR_SREV_VERSION_9462:
+ case AR_SREV_VERSION_9550:
break;
default:
ath_err(common,
@@ -663,7 +659,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
- AR_SREV_9330(ah))
+ AR_SREV_9330(ah) || AR_SREV_9550(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@@ -675,10 +671,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
- /* disable ANI for 9340 */
- if (AR_SREV_9340(ah))
- ah->config.enable_ani = false;
-
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -693,9 +685,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (r)
return r;
- if (ah->is_pciexpress)
- ath9k_hw_aspm_init(ah);
-
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_err(common, "Failed to initialize MAC address\n");
@@ -738,6 +727,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9485_PCIE:
case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
+ case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
break;
@@ -876,7 +866,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
- } else if (AR_SREV_9340(ah)) {
+ } else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@@ -890,9 +880,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
pll2_divfrac = 0x1eb85;
refdiv = 3;
} else {
- pll2_divint = 88;
- pll2_divfrac = 0;
- refdiv = 5;
+ if (AR_SREV_9340(ah)) {
+ pll2_divint = 88;
+ pll2_divfrac = 0;
+ refdiv = 5;
+ } else {
+ pll2_divint = 0x11;
+ pll2_divfrac = 0x26666;
+ refdiv = 1;
+ }
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
@@ -905,8 +901,12 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
regval = REG_READ(ah, AR_PHY_PLL_MODE);
- regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) |
- (0x4 << 26) | (0x18 << 19);
+ if (AR_SREV_9340(ah))
+ regval = (regval & 0x80071fff) | (0x1 << 30) |
+ (0x1 << 13) | (0x4 << 26) | (0x18 << 19);
+ else
+ regval = (regval & 0x80071fff) | (0x3 << 30) |
+ (0x1 << 13) | (0x4 << 26) | (0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
REG_WRITE(ah, AR_PHY_PLL_MODE,
REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
@@ -917,7 +917,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
- if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
+ if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
+ AR_SREV_9550(ah))
udelay(1000);
/* Switch the core clock for ar9271 to 117Mhz */
@@ -930,7 +931,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
- if (AR_SREV_9340(ah)) {
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@@ -954,7 +955,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@@ -1371,6 +1372,9 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
}
}
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_check_gpm_offset(ah);
+
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1455,9 +1459,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
break;
}
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
return ret;
}
@@ -1733,8 +1734,8 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah))
- ar9003_mci_2g5g_switch(ah, true);
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_2g5g_switch(ah, false);
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
@@ -1754,10 +1755,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool start_mci_reset = false;
- bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
bool save_fullsleep = ah->chip_fullsleep;
- if (mci) {
+ if (ath9k_hw_mci_is_enabled(ah)) {
start_mci_reset = ar9003_mci_start_reset(ah, chan);
if (start_mci_reset)
return 0;
@@ -1786,7 +1786,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
return r;
}
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_stop_bt(ah, save_fullsleep);
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
@@ -1844,7 +1844,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
/*
@@ -1939,7 +1939,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_dma(ah);
- REG_WRITE(ah, AR_OBS, 8);
+ if (!ath9k_hw_mci_is_enabled(ah))
+ REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
@@ -1960,10 +1961,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
- ath9k_hw_loadnf(ah, chan);
- ath9k_hw_start_nfcal(ah, true);
-
- if (mci && ar9003_mci_end_reset(ah, chan, caldata))
+ if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
return -EIO;
ENABLE_REGWRITE_BUFFER(ah);
@@ -1998,7 +1996,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
}
#ifdef __BIG_ENDIAN
- else if (AR_SREV_9330(ah) || AR_SREV_9340(ah))
+ else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
+ AR_SREV_9550(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@@ -2008,9 +2007,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_btcoex_is_enabled(ah))
ath9k_hw_btcoex_enable(ah);
- if (mci)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_bt(ah);
+ ath9k_hw_loadnf(ah, chan);
+ ath9k_hw_start_nfcal(ah, true);
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -2031,39 +2033,35 @@ EXPORT_SYMBOL(ath9k_hw_reset);
* Notify Power Mgt is disabled in self-generated frames.
* If requested, force chip to sleep.
*/
-static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
+static void ath9k_set_power_sleep(struct ath_hw *ah)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (setChip) {
- if (AR_SREV_9462(ah)) {
- REG_WRITE(ah, AR_TIMER_MODE,
- REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
- REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
- AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
- REG_WRITE(ah, AR_SLP32_INC,
- REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
- /* xxx Required for WLAN only case ? */
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
- udelay(100);
- }
- /*
- * Clear the RTC force wake bit to allow the
- * mac to go to sleep.
- */
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ if (AR_SREV_9462(ah)) {
+ REG_CLR_BIT(ah, AR_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_NDP2_TIMER_MODE, 0xff);
+ REG_CLR_BIT(ah, AR_SLP32_INC, 0xfffff);
+ /* xxx Required for WLAN only case ? */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+ udelay(100);
+ }
- if (AR_SREV_9462(ah))
- udelay(100);
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
- if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(100);
- /* Shutdown chip. Active low */
- if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
- REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
- udelay(2);
- }
+ if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
+
+ /* Shutdown chip. Active low */
+ if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
+ REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ udelay(2);
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
@@ -2076,44 +2074,38 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
* frames. If request, set power mode of chip to
* auto/normal. Duration in units of 128us (1/8 TU).
*/
-static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
+static void ath9k_set_power_network_sleep(struct ath_hw *ah)
{
- u32 val;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
- if (setChip) {
- struct ath9k_hw_capabilities *pCap = &ah->caps;
- if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
- /* Set WakeOnInterrupt bit; clear ForceWake bit */
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_ON_INT);
- } else {
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ /* Set WakeOnInterrupt bit; clear ForceWake bit */
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_ON_INT);
+ } else {
- /* When chip goes into network sleep, it could be waken
- * up by MCI_INT interrupt caused by BT's HW messages
- * (LNA_xxx, CONT_xxx) which chould be in a very fast
- * rate (~100us). This will cause chip to leave and
- * re-enter network sleep mode frequently, which in
- * consequence will have WLAN MCI HW to generate lots of
- * SYS_WAKING and SYS_SLEEPING messages which will make
- * BT CPU to busy to process.
- */
- if (AR_SREV_9462(ah)) {
- val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) &
- ~AR_MCI_INTERRUPT_RX_HW_MSG_MASK;
- REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val);
- }
- /*
- * Clear the RTC force wake bit to allow the
- * mac to go to sleep.
- */
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_EN);
-
- if (AR_SREV_9462(ah))
- udelay(30);
- }
+ /* When chip goes into network sleep, it could be waken
+ * up by MCI_INT interrupt caused by BT's HW messages
+ * (LNA_xxx, CONT_xxx) which chould be in a very fast
+ * rate (~100us). This will cause chip to leave and
+ * re-enter network sleep mode frequently, which in
+ * consequence will have WLAN MCI HW to generate lots of
+ * SYS_WAKING and SYS_SLEEPING messages which will make
+ * BT CPU to busy to process.
+ */
+ if (ath9k_hw_mci_is_enabled(ah))
+ REG_CLR_BIT(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_HW_MSG_MASK);
+ /*
+ * Clear the RTC force wake bit to allow the
+ * mac to go to sleep.
+ */
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ udelay(30);
}
/* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
@@ -2121,7 +2113,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
-static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
+static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
{
u32 val;
int i;
@@ -2132,37 +2124,38 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
udelay(10);
}
- if (setChip) {
- if ((REG_READ(ah, AR_RTC_STATUS) &
- AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
- if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
- return false;
- }
- if (!AR_SREV_9300_20_OR_LATER(ah))
- ath9k_hw_init_pll(ah, NULL);
+ if ((REG_READ(ah, AR_RTC_STATUS) &
+ AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
+ if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
+ return false;
}
- if (AR_SREV_9100(ah))
- REG_SET_BIT(ah, AR_RTC_RESET,
- AR_RTC_RESET_EN);
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ ath9k_hw_init_pll(ah, NULL);
+ }
+ if (AR_SREV_9100(ah))
+ REG_SET_BIT(ah, AR_RTC_RESET,
+ AR_RTC_RESET_EN);
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ AR_RTC_FORCE_WAKE_EN);
+ udelay(50);
+
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_set_power_awake(ah);
+
+ for (i = POWER_UP_TIME / 50; i > 0; i--) {
+ val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ if (val == AR_RTC_STATUS_ON)
+ break;
+ udelay(50);
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
- udelay(50);
-
- for (i = POWER_UP_TIME / 50; i > 0; i--) {
- val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
- if (val == AR_RTC_STATUS_ON)
- break;
- udelay(50);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
- AR_RTC_FORCE_WAKE_EN);
- }
- if (i == 0) {
- ath_err(ath9k_hw_common(ah),
- "Failed to wakeup in %uus\n",
- POWER_UP_TIME / 20);
- return false;
- }
+ }
+ if (i == 0) {
+ ath_err(ath9k_hw_common(ah),
+ "Failed to wakeup in %uus\n",
+ POWER_UP_TIME / 20);
+ return false;
}
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2173,7 +2166,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
- int status = true, setChip = true;
+ int status = true;
static const char *modes[] = {
"AWAKE",
"FULL-SLEEP",
@@ -2189,25 +2182,17 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
switch (mode) {
case ATH9K_PM_AWAKE:
- status = ath9k_hw_set_power_awake(ah, setChip);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
+ status = ath9k_hw_set_power_awake(ah);
break;
case ATH9K_PM_FULL_SLEEP:
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_set_full_sleep(ah);
- ath9k_set_power_sleep(ah, setChip);
+ ath9k_set_power_sleep(ah);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
-
- ath9k_set_power_network_sleep(ah, setChip);
+ ath9k_set_power_network_sleep(ah);
break;
default:
ath_err(common, "Unknown power mode %u\n", mode);
@@ -2600,6 +2585,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
}
+ if (AR_SREV_9280_20_OR_LATER(ah)) {
+ pCap->hw_caps |= ATH9K_HW_WOW_DEVICE_CAPABLE |
+ ATH9K_HW_WOW_PATTERN_MATCH_EXACT;
+
+ if (AR_SREV_9280(ah))
+ pCap->hw_caps |= ATH9K_HW_WOW_PATTERN_MATCH_DWORD;
+ }
+
return 0;
}
@@ -2777,6 +2770,9 @@ EXPORT_SYMBOL(ath9k_hw_setrxfilter);
bool ath9k_hw_phy_disable(struct ath_hw *ah)
{
+ if (ath9k_hw_mci_is_enabled(ah))
+ ar9003_mci_bt_gain_ctrl(ah);
+
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
return false;
@@ -2916,9 +2912,9 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
}
EXPORT_SYMBOL(ath9k_hw_reset_tsf);
-void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set)
{
- if (setting)
+ if (set)
ah->misc_mode |= AR_PCU_TX_ADD_TSF;
else
ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
@@ -3162,6 +3158,7 @@ static struct {
{ AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
{ AR_SREV_VERSION_9462, "9462" },
+ { AR_SREV_VERSION_9550, "9550" },
};
/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b620c557c2a6..dd0c146d81dc 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -48,6 +48,7 @@
#define AR9300_DEVID_AR9580 0x0033
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
+#define AR9300_DEVID_QCA955X 0x0038
#define AR5416_AR9100_DEVID 0x000b
@@ -179,6 +180,37 @@
#define PAPRD_TABLE_SZ 24
#define PAPRD_IDEAL_AGC2_PWR_RANGE 0xe0
+/*
+ * Wake on Wireless
+ */
+
+/* Keep Alive Frame */
+#define KAL_FRAME_LEN 28
+#define KAL_FRAME_TYPE 0x2 /* data frame */
+#define KAL_FRAME_SUB_TYPE 0x4 /* null data frame */
+#define KAL_DURATION_ID 0x3d
+#define KAL_NUM_DATA_WORDS 6
+#define KAL_NUM_DESC_WORDS 12
+#define KAL_ANTENNA_MODE 1
+#define KAL_TO_DS 1
+#define KAL_DELAY 4 /*delay of 4ms between 2 KAL frames */
+#define KAL_TIMEOUT 900
+
+#define MAX_PATTERN_SIZE 256
+#define MAX_PATTERN_MASK_SIZE 32
+#define MAX_NUM_PATTERN 8
+#define MAX_NUM_USER_PATTERN 6 /* deducting the disassociate and
+ deauthenticate packets */
+
+/*
+ * WoW trigger mapping to hardware code
+ */
+
+#define AH_WOW_USER_PATTERN_EN BIT(0)
+#define AH_WOW_MAGIC_PATTERN_EN BIT(1)
+#define AH_WOW_LINK_CHANGE BIT(2)
+#define AH_WOW_BEACON_MISS BIT(3)
+
enum ath_hw_txq_subtype {
ATH_TXQ_AC_BE = 0,
ATH_TXQ_AC_BK = 1,
@@ -211,8 +243,22 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_RTT = BIT(14),
ATH9K_HW_CAP_MCI = BIT(15),
ATH9K_HW_CAP_DFS = BIT(16),
+ ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
+ ATH9K_HW_WOW_PATTERN_MATCH_EXACT = BIT(18),
+ ATH9K_HW_WOW_PATTERN_MATCH_DWORD = BIT(19),
};
+/*
+ * WoW device capabilities
+ * @ATH9K_HW_WOW_DEVICE_CAPABLE: device revision is capable of WoW.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_EXACT: device is capable of matching
+ * an exact user defined pattern or de-authentication/disassoc pattern.
+ * @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
+ * bytes of the pattern for user defined pattern, de-authentication and
+ * disassociation patterns for all types of possible frames recieved
+ * of those types.
+ */
+
struct ath9k_hw_capabilities {
u32 hw_caps; /* ATH9K_HW_CAP_* from ath9k_hw_caps */
u16 rts_aggr_limit;
@@ -814,17 +860,20 @@ struct ath_hw {
struct ar5416IniArray iniBank7;
struct ar5416IniArray iniAddac;
struct ar5416IniArray iniPcieSerdes;
+#ifdef CONFIG_PM_SLEEP
+ struct ar5416IniArray iniPcieSerdesWow;
+#endif
struct ar5416IniArray iniPcieSerdesLowPower;
struct ar5416IniArray iniModesFastClock;
struct ar5416IniArray iniAdditional;
struct ar5416IniArray iniModesRxGain;
+ struct ar5416IniArray ini_modes_rx_gain_bounds;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;
struct ar5416IniArray ini_japan2484;
struct ar5416IniArray iniModes_9271_ANI_reg;
struct ar5416IniArray ini_radio_post_sys2ant;
- struct ar5416IniArray ini_BTCOEX_MAX_TXPWR;
struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
@@ -862,6 +911,9 @@ struct ath_hw {
/* Enterprise mode cap */
u32 ent_mode;
+#ifdef CONFIG_PM_SLEEP
+ u32 wow_event_mask;
+#endif
bool is_clk_25mhz;
int (*get_mac_revision)(void);
int (*external_reset)(void);
@@ -942,7 +994,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
-void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
+void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
void ath9k_hw_init_global_settings(struct ath_hw *ah);
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
@@ -1020,16 +1072,8 @@ void ar9002_hw_attach_ops(struct ath_hw *ah);
void ar9003_hw_attach_ops(struct ath_hw *ah);
void ar9002_hw_load_ani_reg(struct ath_hw *ah, struct ath9k_channel *chan);
-/*
- * ANI work can be shared between all families but a next
- * generation implementation of ANI will be used only for AR9003 only
- * for now as the other families still need to be tested with the same
- * next generation ANI. Feel free to start testing it though for the
- * older families (AR5008, AR9001, AR9002) by using modparam_force_new_ani.
- */
-extern int modparam_force_new_ani;
+
void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
-void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
@@ -1037,6 +1081,12 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return ah->btcoex_hw.enabled;
}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return ah->common.btcoex_enabled &&
+ (ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+
+}
void ath9k_hw_btcoex_enable(struct ath_hw *ah);
static inline enum ath_btcoex_scheme
ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
@@ -1048,6 +1098,10 @@ static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah)
{
return false;
}
+static inline bool ath9k_hw_mci_is_enabled(struct ath_hw *ah)
+{
+ return false;
+}
static inline void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
}
@@ -1058,6 +1112,37 @@ ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#ifdef CONFIG_PM_SLEEP
+const char *ath9k_hw_wow_event_to_string(u32 wow_event);
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len);
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah);
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable);
+#else
+static inline const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ return NULL;
+}
+static inline void ath9k_hw_wow_apply_pattern(struct ath_hw *ah,
+ u8 *user_pattern,
+ u8 *user_mask,
+ int pattern_count,
+ int pattern_len)
+{
+}
+static inline u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ return 0;
+}
+static inline void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+}
+#endif
+
+
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index dee9e092449a..f33712140fa5 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -434,6 +434,7 @@ static int ath9k_init_queues(struct ath_softc *sc)
for (i = 0; i < WME_NUM_AC; i++) {
sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
sc->tx.txq_map[i]->mac80211_qnum = i;
+ sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
}
return 0;
}
@@ -489,6 +490,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -557,9 +559,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->debug.samp_lock);
#endif
tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
(unsigned long)sc);
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+ INIT_WORK(&sc->hw_check_work, ath_hw_check);
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+ setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
+
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
@@ -590,6 +598,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
+ if (common->bus_ops->aspm_init)
+ common->bus_ops->aspm_init(common);
+
return 0;
err_btcoex:
@@ -703,6 +714,24 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+#ifdef CONFIG_PM_SLEEP
+
+ if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
+ device_can_wakeup(sc->dev)) {
+
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT;
+ hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
+ hw->wiphy->wowlan.pattern_min_len = 1;
+ hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
+
+ }
+
+ atomic_set(&sc->wow_sleep_proc_intr, -1);
+ atomic_set(&sc->wow_got_bmiss_intr, -1);
+
+#endif
+
hw->queues = 4;
hw->max_rates = 4;
hw->channel_change_time = 5000;
@@ -782,11 +811,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
ARRAY_SIZE(ath9k_tpt_blink));
#endif
- INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
- INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
-
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
@@ -805,9 +829,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
goto error_world;
}
- setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
-
ath_init_leds(sc);
ath_start_rfkill_poll(sc);
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
new file mode 100644
index 000000000000..d4549e9aac5c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -0,0 +1,555 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/*
+ * TX polling - checks if the TX engine is stuck somewhere
+ * and issues a chip reset if so.
+ */
+void ath_tx_complete_poll_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ tx_complete_work.work);
+ struct ath_txq *txq;
+ int i;
+ bool needreset = false;
+#ifdef CONFIG_ATH9K_DEBUGFS
+ sc->tx_complete_poll_work_seen++;
+#endif
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i)) {
+ txq = &sc->tx.txq[i];
+ ath_txq_lock(sc, txq);
+ if (txq->axq_depth) {
+ if (txq->axq_tx_inprogress) {
+ needreset = true;
+ ath_txq_unlock(sc, txq);
+ break;
+ } else {
+ txq->axq_tx_inprogress = true;
+ }
+ }
+ ath_txq_unlock_complete(sc, txq);
+ }
+
+ if (needreset) {
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
+ "tx hung, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
+ return;
+ }
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
+ msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
+}
+
+/*
+ * Checks if the BB/MAC is hung.
+ */
+void ath_hw_check(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ unsigned long flags;
+ int busy;
+ u8 is_alive, nbeacon = 1;
+ enum ath_reset_type type;
+
+ ath9k_ps_wakeup(sc);
+ is_alive = ath9k_hw_check_alive(sc->sc_ah);
+
+ if (is_alive && !AR_SREV_9300(sc->sc_ah))
+ goto out;
+ else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
+ ath_dbg(common, RESET,
+ "DCU stuck is detected. Schedule chip reset\n");
+ type = RESET_TYPE_MAC_HANG;
+ goto sched_reset;
+ }
+
+ spin_lock_irqsave(&common->cc_lock, flags);
+ busy = ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+
+ ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+ busy, sc->hw_busy_count + 1);
+ if (busy >= 99) {
+ if (++sc->hw_busy_count >= 3) {
+ type = RESET_TYPE_BB_HANG;
+ goto sched_reset;
+ }
+ } else if (busy >= 0) {
+ sc->hw_busy_count = 0;
+ nbeacon = 3;
+ }
+
+ ath_start_rx_poll(sc, nbeacon);
+ goto out;
+
+sched_reset:
+ ath9k_queue_reset(sc, type);
+out:
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * PLL-WAR for AR9485/AR9340
+ */
+static bool ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
+{
+ static int count;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (pll_sqsum >= 0x40000) {
+ count++;
+ if (count == 3) {
+ ath_dbg(common, RESET, "PLL WAR, resetting the chip\n");
+ ath9k_queue_reset(sc, RESET_TYPE_PLL_HANG);
+ count = 0;
+ return true;
+ }
+ } else {
+ count = 0;
+ }
+
+ return false;
+}
+
+void ath_hw_pll_work(struct work_struct *work)
+{
+ u32 pll_sqsum;
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ hw_pll_work.work);
+ /*
+ * ensure that the PLL WAR is executed only
+ * after the STA is associated (or) if the
+ * beaconing had started in interfaces that
+ * uses beacons.
+ */
+ if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ return;
+
+ ath9k_ps_wakeup(sc);
+ pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
+ ath9k_ps_restore(sc);
+ if (ath_hw_pll_rx_hang_check(sc, pll_sqsum))
+ return;
+
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+}
+
+/*
+ * RX Polling - monitors baseband hangs.
+ */
+void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
+{
+ if (!AR_SREV_9300(sc->sc_ah))
+ return;
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ return;
+
+ mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
+ (nbeacon * sc->cur_beacon_conf.beacon_interval));
+}
+
+void ath_rx_poll(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+
+ ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+}
+
+/*
+ * PA Pre-distortion.
+ */
+static void ath_paprd_activate(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ int chain;
+
+ if (!caldata || !caldata->paprd_done)
+ return;
+
+ ath9k_ps_wakeup(sc);
+ ar9003_paprd_enable(ah, false);
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ ar9003_paprd_populate_single_table(ah, caldata, chain);
+ }
+
+ ar9003_paprd_enable(ah, true);
+ ath9k_ps_restore(sc);
+}
+
+static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_tx_control txctl;
+ int time_left;
+
+ memset(&txctl, 0, sizeof(txctl));
+ txctl.txq = sc->tx.txq_map[WME_AC_BE];
+
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->band = hw->conf.channel->band;
+ tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ tx_info->control.rates[0].idx = 0;
+ tx_info->control.rates[0].count = 1;
+ tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ tx_info->control.rates[1].idx = -1;
+
+ init_completion(&sc->paprd_complete);
+ txctl.paprd = BIT(chain);
+
+ if (ath_tx_start(hw, skb, &txctl) != 0) {
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
+ dev_kfree_skb_any(skb);
+ return false;
+ }
+
+ time_left = wait_for_completion_timeout(&sc->paprd_complete,
+ msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
+
+ if (!time_left)
+ ath_dbg(common, CALIBRATE,
+ "Timeout waiting for paprd training on TX chain %d\n",
+ chain);
+
+ return !!time_left;
+}
+
+void ath_paprd_calibrate(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb = NULL;
+ struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int ftype;
+ int chain_ok = 0;
+ int chain;
+ int len = 1800;
+
+ if (!caldata)
+ return;
+
+ ath9k_ps_wakeup(sc);
+
+ if (ar9003_paprd_init_table(ah) < 0)
+ goto fail_paprd;
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ goto fail_paprd;
+
+ skb_put(skb, len);
+ memset(skb->data, 0, len);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
+ hdr->frame_control = cpu_to_le16(ftype);
+ hdr->duration_id = cpu_to_le16(10);
+ memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
+ memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
+
+ for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+ if (!(ah->txchainmask & BIT(chain)))
+ continue;
+
+ chain_ok = 0;
+
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD frame for thermal measurement on chain %d\n",
+ chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
+
+ ar9003_paprd_setup_gain_table(ah, chain);
+
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD training frame on chain %d\n", chain);
+ if (!ath_paprd_send_frame(sc, skb, chain))
+ goto fail_paprd;
+
+ if (!ar9003_paprd_is_done(ah)) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD not yet done on chain %d\n", chain);
+ break;
+ }
+
+ if (ar9003_paprd_create_curve(ah, caldata, chain)) {
+ ath_dbg(common, CALIBRATE,
+ "PAPRD create curve failed on chain %d\n",
+ chain);
+ break;
+ }
+
+ chain_ok = 1;
+ }
+ kfree_skb(skb);
+
+ if (chain_ok) {
+ caldata->paprd_done = true;
+ ath_paprd_activate(sc);
+ }
+
+fail_paprd:
+ ath9k_ps_restore(sc);
+}
+
+/*
+ * ANI performs periodic noise floor calibration
+ * that is used to adjust and optimize the chip performance. This
+ * takes environmental changes (location, temperature) into account.
+ * When the task is complete, it reschedules itself depending on the
+ * appropriate interval that was calculated.
+ */
+void ath_ani_calibrate(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool longcal = false;
+ bool shortcal = false;
+ bool aniflag = false;
+ unsigned int timestamp = jiffies_to_msecs(jiffies);
+ u32 cal_interval, short_cal_interval, long_cal_interval;
+ unsigned long flags;
+
+ if (ah->caldata && ah->caldata->nfcal_interference)
+ long_cal_interval = ATH_LONG_CALINTERVAL_INT;
+ else
+ long_cal_interval = ATH_LONG_CALINTERVAL;
+
+ short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
+ ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
+
+ /* Only calibrate if awake */
+ if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
+ goto set_timer;
+
+ ath9k_ps_wakeup(sc);
+
+ /* Long calibration runs independently of short calibration. */
+ if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
+ longcal = true;
+ common->ani.longcal_timer = timestamp;
+ }
+
+ /* Short calibration applies only while caldone is false */
+ if (!common->ani.caldone) {
+ if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
+ shortcal = true;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.resetcal_timer = timestamp;
+ }
+ } else {
+ if ((timestamp - common->ani.resetcal_timer) >=
+ ATH_RESTART_CALINTERVAL) {
+ common->ani.caldone = ath9k_hw_reset_calvalid(ah);
+ if (common->ani.caldone)
+ common->ani.resetcal_timer = timestamp;
+ }
+ }
+
+ /* Verify whether we must check ANI */
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
+ aniflag = true;
+ common->ani.checkani_timer = timestamp;
+ }
+
+ /* Call ANI routine if necessary */
+ if (aniflag) {
+ spin_lock_irqsave(&common->cc_lock, flags);
+ ath9k_hw_ani_monitor(ah, ah->curchan);
+ ath_update_survey_stats(sc);
+ spin_unlock_irqrestore(&common->cc_lock, flags);
+ }
+
+ /* Perform calibration if necessary */
+ if (longcal || shortcal) {
+ common->ani.caldone =
+ ath9k_hw_calibrate(ah, ah->curchan,
+ ah->rxchainmask, longcal);
+ }
+
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
+ ath9k_debug_samp_bb_mac(sc);
+ ath9k_ps_restore(sc);
+
+set_timer:
+ /*
+ * Set timer interval based on previous results.
+ * The interval must be the shortest necessary to satisfy ANI,
+ * short calibration and long calibration.
+ */
+ cal_interval = ATH_LONG_CALINTERVAL;
+ if (sc->sc_ah->config.enable_ani)
+ cal_interval = min(cal_interval,
+ (u32)ah->config.ani_poll_interval);
+ if (!common->ani.caldone)
+ cal_interval = min(cal_interval, (u32)short_cal_interval);
+
+ mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
+ if (!ah->caldata->paprd_done)
+ ieee80211_queue_work(sc->hw, &sc->paprd_work);
+ else if (!ah->paprd_table_write_done)
+ ath_paprd_activate(sc);
+ }
+}
+
+void ath_start_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long timestamp = jiffies_to_msecs(jiffies);
+
+ if (common->disable_ani ||
+ !test_bit(SC_OP_ANI_RUN, &sc->sc_flags) ||
+ (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ return;
+
+ common->ani.longcal_timer = timestamp;
+ common->ani.shortcal_timer = timestamp;
+ common->ani.checkani_timer = timestamp;
+
+ ath_dbg(common, ANI, "Starting ANI\n");
+ mod_timer(&common->ani.timer,
+ jiffies + msecs_to_jiffies((u32)ah->config.ani_poll_interval));
+}
+
+void ath_stop_ani(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ ath_dbg(common, ANI, "Stopping ANI\n");
+ del_timer_sync(&common->ani.timer);
+}
+
+void ath_check_ani(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
+
+ /*
+ * Check for the various conditions in which ANI has to
+ * be stopped.
+ */
+ if (ah->opmode == NL80211_IFTYPE_ADHOC) {
+ if (!cur_conf->enable_beacon)
+ goto stop_ani;
+ } else if (ah->opmode == NL80211_IFTYPE_AP) {
+ if (!cur_conf->enable_beacon) {
+ /*
+ * Disable ANI only when there are no
+ * associated stations.
+ */
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ goto stop_ani;
+ }
+ } else if (ah->opmode == NL80211_IFTYPE_STATION) {
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
+ goto stop_ani;
+ }
+
+ if (!test_bit(SC_OP_ANI_RUN, &sc->sc_flags)) {
+ set_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_start_ani(sc);
+ }
+
+ return;
+
+stop_ani:
+ clear_bit(SC_OP_ANI_RUN, &sc->sc_flags);
+ ath_stop_ani(sc);
+}
+
+void ath_update_survey_nf(struct ath_softc *sc, int channel)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_channel *chan = &ah->channels[channel];
+ struct survey_info *survey = &sc->survey[channel];
+
+ if (chan->noisefloor) {
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+ survey->noise = ath9k_hw_getchan_noise(ah, chan);
+ }
+}
+
+/*
+ * Updates the survey statistics and returns the busy time since last
+ * update in %, if the measurement duration was long enough for the
+ * result to be useful, -1 otherwise.
+ */
+int ath_update_survey_stats(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ int pos = ah->curchan - &ah->channels[0];
+ struct survey_info *survey = &sc->survey[pos];
+ struct ath_cycle_counters *cc = &common->cc_survey;
+ unsigned int div = common->clockrate * 1000;
+ int ret = 0;
+
+ if (!ah->curchan)
+ return -1;
+
+ if (ah->power_mode == ATH9K_PM_AWAKE)
+ ath_hw_cycle_counters_update(common);
+
+ if (cc->cycles > 0) {
+ survey->filled |= SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_BUSY |
+ SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_CHANNEL_TIME_TX;
+ survey->channel_time += cc->cycles / div;
+ survey->channel_time_busy += cc->rx_busy / div;
+ survey->channel_time_rx += cc->rx_frame / div;
+ survey->channel_time_tx += cc->tx_frame / div;
+ }
+
+ if (cc->cycles < div)
+ return -1;
+
+ if (cc->cycles > 0)
+ ret = cc->rx_busy * 100 / cc->cycles;
+
+ memset(cc, 0, sizeof(*cc));
+
+ ath_update_survey_nf(sc, pos);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 04ef775ccee1..7990cd55599c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -810,7 +810,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
- if (AR_SREV_9340(ah))
+ if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 21c955609e6c..0eba36dca6f8 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -646,6 +646,7 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
+ ATH9K_RX_FILTER_4ADDRESS = 0x00100000,
};
#define ATH9K_RATESERIES_RTS_CTS 0x0001
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dac1a2709e3c..6049d8b82855 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,7 +19,10 @@
#include "ath9k.h"
#include "btcoex.h"
-static u8 parse_mpdudensity(u8 mpdudensity)
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+ struct ieee80211_vif *vif);
+
+u8 ath9k_parse_mpdudensity(u8 mpdudensity)
{
/*
* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -101,6 +104,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
spin_lock(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
+ memset(&common->cc_ani, 0, sizeof(common->cc_ani));
spin_unlock(&common->cc_lock);
}
@@ -129,6 +133,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK))) {
mode = ATH9K_PM_NETWORK_SLEEP;
+ if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
+ ath9k_btcoex_stop_gen_timer(sc);
} else {
goto unlock;
}
@@ -143,90 +149,17 @@ void ath9k_ps_restore(struct ath_softc *sc)
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
}
-void ath_start_ani(struct ath_common *common)
-{
- struct ath_hw *ah = common->ah;
- unsigned long timestamp = jiffies_to_msecs(jiffies);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (!(sc->sc_flags & SC_OP_ANI_RUN))
- return;
-
- if (sc->sc_flags & SC_OP_OFFCHANNEL)
- return;
-
- common->ani.longcal_timer = timestamp;
- common->ani.shortcal_timer = timestamp;
- common->ani.checkani_timer = timestamp;
-
- mod_timer(&common->ani.timer,
- jiffies +
- msecs_to_jiffies((u32)ah->config.ani_poll_interval));
-}
-
-static void ath_update_survey_nf(struct ath_softc *sc, int channel)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_channel *chan = &ah->channels[channel];
- struct survey_info *survey = &sc->survey[channel];
-
- if (chan->noisefloor) {
- survey->filled |= SURVEY_INFO_NOISE_DBM;
- survey->noise = ath9k_hw_getchan_noise(ah, chan);
- }
-}
-
-/*
- * Updates the survey statistics and returns the busy time since last
- * update in %, if the measurement duration was long enough for the
- * result to be useful, -1 otherwise.
- */
-static int ath_update_survey_stats(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- int pos = ah->curchan - &ah->channels[0];
- struct survey_info *survey = &sc->survey[pos];
- struct ath_cycle_counters *cc = &common->cc_survey;
- unsigned int div = common->clockrate * 1000;
- int ret = 0;
-
- if (!ah->curchan)
- return -1;
-
- if (ah->power_mode == ATH9K_PM_AWAKE)
- ath_hw_cycle_counters_update(common);
-
- if (cc->cycles > 0) {
- survey->filled |= SURVEY_INFO_CHANNEL_TIME |
- SURVEY_INFO_CHANNEL_TIME_BUSY |
- SURVEY_INFO_CHANNEL_TIME_RX |
- SURVEY_INFO_CHANNEL_TIME_TX;
- survey->channel_time += cc->cycles / div;
- survey->channel_time_busy += cc->rx_busy / div;
- survey->channel_time_rx += cc->rx_frame / div;
- survey->channel_time_tx += cc->tx_frame / div;
- }
-
- if (cc->cycles < div)
- return -1;
-
- if (cc->cycles > 0)
- ret = cc->rx_busy * 100 / cc->cycles;
-
- memset(cc, 0, sizeof(*cc));
-
- ath_update_survey_nf(sc, pos);
-
- return ret;
-}
-
static void __ath_cancel_work(struct ath_softc *sc)
{
cancel_work_sync(&sc->paprd_work);
cancel_work_sync(&sc->hw_check_work);
cancel_delayed_work_sync(&sc->tx_complete_work);
cancel_delayed_work_sync(&sc->hw_pll_work);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ath9k_hw_mci_is_enabled(sc->sc_ah))
+ cancel_work_sync(&sc->mci_work);
+#endif
}
static void ath_cancel_work(struct ath_softc *sc)
@@ -235,16 +168,28 @@ static void ath_cancel_work(struct ath_softc *sc)
cancel_work_sync(&sc->hw_reset_work);
}
+static void ath_restart_work(struct ath_softc *sc)
+{
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
+
+ if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
+ AR_SREV_9550(sc->sc_ah))
+ ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
+ msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
+
+ ath_start_rx_poll(sc, 3);
+ ath_start_ani(sc);
+}
+
static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
{
struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
bool ret = true;
ieee80211_stop_queues(sc->hw);
sc->hw_busy_count = 0;
- del_timer_sync(&common->ani.timer);
+ ath_stop_ani(sc);
del_timer_sync(&sc->rx_poll_timer);
ath9k_debug_samp_bb_mac(sc);
@@ -271,6 +216,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long flags;
if (ath_startrecv(sc) != 0) {
ath_err(common, "Unable to restart recv logic\n");
@@ -279,36 +225,30 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
+
+ clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
ath9k_hw_set_interrupts(ah);
ath9k_hw_enable_interrupts(ah);
- if (!(sc->sc_flags & (SC_OP_OFFCHANNEL)) && start) {
- if (sc->sc_flags & SC_OP_BEACONS)
- ath_set_beacon(sc);
-
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
- ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
- ath_start_rx_poll(sc, 3);
- if (!common->disable_ani)
- ath_start_ani(common);
- }
-
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
- struct ath_hw_antcomb_conf div_ant_conf;
- u8 lna_conf;
-
- ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+ if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
+ if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
+ goto work;
- if (sc->ant_rx == 1)
- lna_conf = ATH_ANT_DIV_COMB_LNA1;
- else
- lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.main_lna_conf = lna_conf;
- div_ant_conf.alt_lna_conf = lna_conf;
+ ath9k_set_beacon(sc);
- ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
+ }
+ work:
+ ath_restart_work(sc);
}
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
+ ath_ant_comb_update(sc);
+
ieee80211_wake_queues(sc->hw);
return true;
@@ -328,7 +268,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
spin_lock_bh(&sc->sc_pcu_lock);
- if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) {
+ if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
fastcc = false;
caldata = &sc->caldata;
}
@@ -371,7 +311,7 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
{
int r;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return -EIO;
r = ath_reset_internal(sc, hchan, false);
@@ -379,262 +319,11 @@ static int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
return r;
}
-static void ath_paprd_activate(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- int chain;
-
- if (!caldata || !caldata->paprd_done)
- return;
-
- ath9k_ps_wakeup(sc);
- ar9003_paprd_enable(ah, false);
- for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->txchainmask & BIT(chain)))
- continue;
-
- ar9003_paprd_populate_single_table(ah, caldata, chain);
- }
-
- ar9003_paprd_enable(ah, true);
- ath9k_ps_restore(sc);
-}
-
-static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_tx_control txctl;
- int time_left;
-
- memset(&txctl, 0, sizeof(txctl));
- txctl.txq = sc->tx.txq_map[WME_AC_BE];
-
- memset(tx_info, 0, sizeof(*tx_info));
- tx_info->band = hw->conf.channel->band;
- tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
- tx_info->control.rates[0].idx = 0;
- tx_info->control.rates[0].count = 1;
- tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- tx_info->control.rates[1].idx = -1;
-
- init_completion(&sc->paprd_complete);
- txctl.paprd = BIT(chain);
-
- if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
- dev_kfree_skb_any(skb);
- return false;
- }
-
- time_left = wait_for_completion_timeout(&sc->paprd_complete,
- msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
-
- if (!time_left)
- ath_dbg(common, CALIBRATE,
- "Timeout waiting for paprd training on TX chain %d\n",
- chain);
-
- return !!time_left;
-}
-
-void ath_paprd_calibrate(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_hdr *hdr;
- struct sk_buff *skb = NULL;
- struct ath9k_hw_cal_data *caldata = ah->caldata;
- struct ath_common *common = ath9k_hw_common(ah);
- int ftype;
- int chain_ok = 0;
- int chain;
- int len = 1800;
-
- if (!caldata)
- return;
-
- ath9k_ps_wakeup(sc);
-
- if (ar9003_paprd_init_table(ah) < 0)
- goto fail_paprd;
-
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- goto fail_paprd;
-
- skb_put(skb, len);
- memset(skb->data, 0, len);
- hdr = (struct ieee80211_hdr *)skb->data;
- ftype = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC;
- hdr->frame_control = cpu_to_le16(ftype);
- hdr->duration_id = cpu_to_le16(10);
- memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
- memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
-
- for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
- if (!(ah->txchainmask & BIT(chain)))
- continue;
-
- chain_ok = 0;
-
- ath_dbg(common, CALIBRATE,
- "Sending PAPRD frame for thermal measurement on chain %d\n",
- chain);
- if (!ath_paprd_send_frame(sc, skb, chain))
- goto fail_paprd;
-
- ar9003_paprd_setup_gain_table(ah, chain);
-
- ath_dbg(common, CALIBRATE,
- "Sending PAPRD training frame on chain %d\n", chain);
- if (!ath_paprd_send_frame(sc, skb, chain))
- goto fail_paprd;
-
- if (!ar9003_paprd_is_done(ah)) {
- ath_dbg(common, CALIBRATE,
- "PAPRD not yet done on chain %d\n", chain);
- break;
- }
-
- if (ar9003_paprd_create_curve(ah, caldata, chain)) {
- ath_dbg(common, CALIBRATE,
- "PAPRD create curve failed on chain %d\n",
- chain);
- break;
- }
-
- chain_ok = 1;
- }
- kfree_skb(skb);
-
- if (chain_ok) {
- caldata->paprd_done = true;
- ath_paprd_activate(sc);
- }
-
-fail_paprd:
- ath9k_ps_restore(sc);
-}
-
-/*
- * This routine performs the periodic noise floor calibration function
- * that is used to adjust and optimize the chip performance. This
- * takes environmental changes (location, temperature) into account.
- * When the task is complete, it reschedules itself depending on the
- * appropriate interval that was calculated.
- */
-void ath_ani_calibrate(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- bool longcal = false;
- bool shortcal = false;
- bool aniflag = false;
- unsigned int timestamp = jiffies_to_msecs(jiffies);
- u32 cal_interval, short_cal_interval, long_cal_interval;
- unsigned long flags;
-
- if (ah->caldata && ah->caldata->nfcal_interference)
- long_cal_interval = ATH_LONG_CALINTERVAL_INT;
- else
- long_cal_interval = ATH_LONG_CALINTERVAL;
-
- short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
- ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
-
- /* Only calibrate if awake */
- if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
- goto set_timer;
-
- ath9k_ps_wakeup(sc);
-
- /* Long calibration runs independently of short calibration. */
- if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
- longcal = true;
- common->ani.longcal_timer = timestamp;
- }
-
- /* Short calibration applies only while caldone is false */
- if (!common->ani.caldone) {
- if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
- shortcal = true;
- common->ani.shortcal_timer = timestamp;
- common->ani.resetcal_timer = timestamp;
- }
- } else {
- if ((timestamp - common->ani.resetcal_timer) >=
- ATH_RESTART_CALINTERVAL) {
- common->ani.caldone = ath9k_hw_reset_calvalid(ah);
- if (common->ani.caldone)
- common->ani.resetcal_timer = timestamp;
- }
- }
-
- /* Verify whether we must check ANI */
- if (sc->sc_ah->config.enable_ani
- && (timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
- aniflag = true;
- common->ani.checkani_timer = timestamp;
- }
-
- /* Call ANI routine if necessary */
- if (aniflag) {
- spin_lock_irqsave(&common->cc_lock, flags);
- ath9k_hw_ani_monitor(ah, ah->curchan);
- ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
- }
-
- /* Perform calibration if necessary */
- if (longcal || shortcal) {
- common->ani.caldone =
- ath9k_hw_calibrate(ah, ah->curchan,
- ah->rxchainmask, longcal);
- }
-
- ath_dbg(common, ANI,
- "Calibration @%lu finished: %s %s %s, caldone: %s\n",
- jiffies,
- longcal ? "long" : "", shortcal ? "short" : "",
- aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
-
- ath9k_ps_restore(sc);
-
-set_timer:
- /*
- * Set timer interval based on previous results.
- * The interval must be the shortest necessary to satisfy ANI,
- * short calibration and long calibration.
- */
- ath9k_debug_samp_bb_mac(sc);
- cal_interval = ATH_LONG_CALINTERVAL;
- if (sc->sc_ah->config.enable_ani)
- cal_interval = min(cal_interval,
- (u32)ah->config.ani_poll_interval);
- if (!common->ani.caldone)
- cal_interval = min(cal_interval, (u32)short_cal_interval);
-
- mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
- if (!ah->caldata->paprd_done)
- ieee80211_queue_work(sc->hw, &sc->paprd_work);
- else if (!ah->paprd_table_write_done)
- ath_paprd_activate(sc);
- }
-}
-
static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
struct ieee80211_vif *vif)
{
struct ath_node *an;
+ u8 density;
an = (struct ath_node *)sta->drv_priv;
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -649,7 +338,8 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
sta->ht_cap.ampdu_factor);
- an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
+ density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->mpdudensity = density;
}
}
@@ -668,13 +358,13 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-
void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
-
+ enum ath_reset_type type;
+ unsigned long flags;
u32 status = sc->intrstatus;
u32 rxmask;
@@ -683,20 +373,17 @@ void ath9k_tasklet(unsigned long data)
if ((status & ATH9K_INT_FATAL) ||
(status & ATH9K_INT_BB_WATCHDOG)) {
-#ifdef CONFIG_ATH9K_DEBUGFS
- enum ath_reset_type type;
if (status & ATH9K_INT_FATAL)
type = RESET_TYPE_FATAL_INT;
else
type = RESET_TYPE_BB_WATCHDOG;
- RESET_STAT_INC(sc, type);
-#endif
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ ath9k_queue_reset(sc, type);
goto out;
}
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
/*
* TSF sync does not look correct; remain awake to sync with
@@ -705,6 +392,7 @@ void ath9k_tasklet(unsigned long data)
ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
@@ -766,15 +454,17 @@ irqreturn_t ath_isr(int irq, void *dev)
* touch anything. Note this can happen early
* on if the IRQ is shared.
*/
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
+ if(test_bit(SC_OP_HW_RESET, &sc->sc_flags))
+ return IRQ_HANDLED;
+
/*
* Figure out the reason(s) for the interrupt. Note
* that the hal returns a pseudo-ISR that may include
@@ -797,6 +487,17 @@ irqreturn_t ath_isr(int irq, void *dev)
if (status & SCHED_INTR)
sched = true;
+#ifdef CONFIG_PM_SLEEP
+ if (status & ATH9K_INT_BMISS) {
+ if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
+ ath_dbg(common, ANY, "during WoW we got a BMISS\n");
+ atomic_inc(&sc->wow_got_bmiss_intr);
+ atomic_dec(&sc->wow_sleep_proc_intr);
+ }
+ ath_dbg(common, INTERRUPT, "beacon miss interrupt\n");
+ }
+#endif
+
/*
* If a FATAL or RXORN interrupt is received, we have to reset the
* chip immediately.
@@ -827,24 +528,6 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_hw_set_interrupts(ah);
}
- if (status & ATH9K_INT_MIB) {
- /*
- * Disable interrupts until we service the MIB
- * interrupt; otherwise it will continue to
- * fire.
- */
- ath9k_hw_disable_interrupts(ah);
- /*
- * Let the hal handle the event. We assume
- * it will clear whatever condition caused
- * the interrupt.
- */
- spin_lock(&common->cc_lock);
- ath9k_hw_proc_mib_event(ah);
- spin_unlock(&common->cc_lock);
- ath9k_hw_enable_interrupts(ah);
- }
-
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
if (status & ATH9K_INT_TIM_TIMER) {
if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
@@ -852,8 +535,10 @@ irqreturn_t ath_isr(int irq, void *dev)
/* Clear RxAbort bit so that we can
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
+ spin_lock(&sc->sc_pm_lock);
ath9k_hw_setrxabort(sc->sc_ah, 0);
sc->ps_flags |= PS_WAIT_FOR_BEACON;
+ spin_unlock(&sc->sc_pm_lock);
}
chip_reset:
@@ -895,101 +580,20 @@ static int ath_reset(struct ath_softc *sc, bool retry_tx)
return r;
}
-void ath_reset_work(struct work_struct *work)
+void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
-
- ath_reset(sc, true);
-}
-
-void ath_hw_check(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc, hw_check_work);
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long flags;
- int busy;
- u8 is_alive, nbeacon = 1;
-
- ath9k_ps_wakeup(sc);
- is_alive = ath9k_hw_check_alive(sc->sc_ah);
-
- if (is_alive && !AR_SREV_9300(sc->sc_ah))
- goto out;
- else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
- ath_dbg(common, RESET,
- "DCU stuck is detected. Schedule chip reset\n");
- RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
- goto sched_reset;
- }
-
- spin_lock_irqsave(&common->cc_lock, flags);
- busy = ath_update_survey_stats(sc);
- spin_unlock_irqrestore(&common->cc_lock, flags);
-
- ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
- busy, sc->hw_busy_count + 1);
- if (busy >= 99) {
- if (++sc->hw_busy_count >= 3) {
- RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
- goto sched_reset;
- }
- } else if (busy >= 0) {
- sc->hw_busy_count = 0;
- nbeacon = 3;
- }
-
- ath_start_rx_poll(sc, nbeacon);
- goto out;
-
-sched_reset:
+#ifdef CONFIG_ATH9K_DEBUGFS
+ RESET_STAT_INC(sc, type);
+#endif
+ set_bit(SC_OP_HW_RESET, &sc->sc_flags);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
-out:
- ath9k_ps_restore(sc);
-}
-
-static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
-{
- static int count;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-
- if (pll_sqsum >= 0x40000) {
- count++;
- if (count == 3) {
- /* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, RESET, "Possible RX hang, resetting\n");
- RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- count = 0;
- }
- } else
- count = 0;
}
-void ath_hw_pll_work(struct work_struct *work)
+void ath_reset_work(struct work_struct *work)
{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- hw_pll_work.work);
- u32 pll_sqsum;
-
- /*
- * ensure that the PLL WAR is executed only
- * after the STA is associated (or) if the
- * beaconing had started in interfaces that
- * uses beacons.
- */
- if (!(sc->sc_flags & SC_OP_BEACONS))
- return;
-
- if (AR_SREV_9485(sc->sc_ah)) {
-
- ath9k_ps_wakeup(sc);
- pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- ath_hw_pll_rx_hang_check(sc, pll_sqsum);
+ struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
- ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/5);
- }
+ ath_reset(sc, true);
}
/**********************/
@@ -1054,10 +658,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
- if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
- ah->imask |= ATH9K_INT_MCI;
+ ath_mci_enable(sc);
- sc->sc_flags &= ~SC_OP_INVALID;
+ clear_bit(SC_OP_INVALID, &sc->sc_flags);
sc->sc_ah->is_monitoring = false;
if (!ath_complete_reset(sc, false)) {
@@ -1080,8 +683,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_start_btcoex(sc);
-
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@@ -1099,6 +700,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_tx_control txctl;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ unsigned long flags;
if (sc->ps_enabled) {
/*
@@ -1121,6 +723,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* completed and if needed, also for RX of buffered frames.
*/
ath9k_ps_wakeup(sc);
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
@@ -1136,6 +739,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the ps_flags bit is cleared. We are just dropping
* the ps_usecount here.
*/
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_restore(sc);
}
@@ -1176,7 +780,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath_cancel_work(sc);
del_timer_sync(&sc->rx_poll_timer);
- if (sc->sc_flags & SC_OP_INVALID) {
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -1185,8 +789,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- ath9k_stop_btcoex(sc);
-
spin_lock_bh(&sc->sc_pcu_lock);
/* prevent tasklets to enable interrupts once we disable them */
@@ -1233,7 +835,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
ath9k_ps_restore(sc);
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
@@ -1253,16 +855,6 @@ bool ath9k_uses_beacons(int type)
}
}
-static void ath9k_reclaim_beacon(struct ath_softc *sc,
- struct ieee80211_vif *vif)
-{
- struct ath_vif *avp = (void *)vif->drv_priv;
-
- ath9k_set_beaconing_status(sc, false);
- ath_beacon_return(sc, avp);
- ath9k_set_beaconing_status(sc, true);
-}
-
static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath9k_vif_iter_data *iter_data = data;
@@ -1294,6 +886,18 @@ static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
}
}
+static void ath9k_sta_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ struct ath_vif *avp = (void *)vif->drv_priv;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (avp->primary_sta_vif)
+ ath9k_set_assoc_state(sc, vif);
+}
+
/* Called with sc->mutex held. */
void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1327,21 +931,18 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_vif_iter_data iter_data;
+ enum nl80211_iftype old_opmode = ah->opmode;
ath9k_calculate_iter_data(hw, vif, &iter_data);
- /* Set BSSID mask. */
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
ath_hw_setbssidmask(common);
- /* Set op-mode & TSF */
if (iter_data.naps > 0) {
- ath9k_hw_set_tsfadjust(ah, 1);
- sc->sc_flags |= SC_OP_TSF_RESET;
+ ath9k_hw_set_tsfadjust(ah, true);
ah->opmode = NL80211_IFTYPE_AP;
} else {
- ath9k_hw_set_tsfadjust(ah, 0);
- sc->sc_flags &= ~SC_OP_TSF_RESET;
+ ath9k_hw_set_tsfadjust(ah, false);
if (iter_data.nmeshes)
ah->opmode = NL80211_IFTYPE_MESH_POINT;
@@ -1353,70 +954,27 @@ static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
ah->opmode = NL80211_IFTYPE_STATION;
}
- /*
- * Enable MIB interrupts when there are hardware phy counters.
- */
- if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0) {
- if (ah->config.enable_ani)
- ah->imask |= ATH9K_INT_MIB;
+ ath9k_hw_setopmode(ah);
+
+ if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
ah->imask |= ATH9K_INT_TSFOOR;
- } else {
- ah->imask &= ~ATH9K_INT_MIB;
+ else
ah->imask &= ~ATH9K_INT_TSFOOR;
- }
ath9k_hw_set_interrupts(ah);
- /* Set up ANI */
- if (iter_data.naps > 0) {
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
-
- } else {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- }
-}
-
-/* Called with sc->mutex held, vif counts set up properly. */
-static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct ath_softc *sc = hw->priv;
-
- ath9k_calculate_summary_state(hw, vif);
-
- if (ath9k_uses_beacons(vif->type)) {
- /* Reserve a beacon slot for the vif */
- ath9k_set_beaconing_status(sc, false);
- ath_beacon_alloc(sc, vif);
- ath9k_set_beaconing_status(sc, true);
+ /*
+ * If we are changing the opmode to STATION,
+ * a beacon sync needs to be done.
+ */
+ if (ah->opmode == NL80211_IFTYPE_STATION &&
+ old_opmode == NL80211_IFTYPE_AP &&
+ test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ ath9k_sta_vif_iter, sc);
}
}
-void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
-{
- if (!AR_SREV_9300(sc->sc_ah))
- return;
-
- if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
- return;
-
- mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
- (nbeacon * sc->cur_beacon_conf.beacon_interval));
-}
-
-void ath_rx_poll(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *)data;
-
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
-}
-
static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1456,7 +1014,10 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
sc->nvifs++;
- ath9k_do_vif_add_setup(hw, vif);
+ ath9k_calculate_summary_state(hw, vif);
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
out:
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -1473,6 +1034,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
int ret = 0;
ath_dbg(common, CONFIG, "Change Interface\n");
+
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -1485,15 +1047,16 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
}
}
- /* Clean up old vif stuff */
if (ath9k_uses_beacons(vif->type))
- ath9k_reclaim_beacon(sc, vif);
+ ath9k_beacon_remove_slot(sc, vif);
- /* Add new settings */
vif->type = new_type;
vif->p2p = p2p;
- ath9k_do_vif_add_setup(hw, vif);
+ ath9k_calculate_summary_state(hw, vif);
+ if (ath9k_uses_beacons(vif->type))
+ ath9k_beacon_assign_slot(sc, vif);
+
out:
ath9k_ps_restore(sc);
mutex_unlock(&sc->mutex);
@@ -1513,9 +1076,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
sc->nvifs--;
- /* Reclaim beacon resources */
if (ath9k_uses_beacons(vif->type))
- ath9k_reclaim_beacon(sc, vif);
+ ath9k_beacon_remove_slot(sc, vif);
ath9k_calculate_summary_state(hw, NULL);
@@ -1573,14 +1135,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (sc->ps_idle)
+ if (sc->ps_idle) {
ath_cancel_work(sc);
- else
+ ath9k_stop_btcoex(sc);
+ } else {
+ ath9k_start_btcoex(sc);
/*
* The chip needs a reset to properly wake up from
* full sleep
*/
reset_channel = ah->chip_fullsleep;
+ }
}
/*
@@ -1618,11 +1183,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- if (hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- sc->sc_flags |= SC_OP_OFFCHANNEL;
- else
- sc->sc_flags &= ~SC_OP_OFFCHANNEL;
-
ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
@@ -1664,6 +1224,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return -EINVAL;
}
@@ -1813,21 +1374,18 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
qi.tqi_aifs = params->aifs;
qi.tqi_cwmin = params->cw_min;
qi.tqi_cwmax = params->cw_max;
- qi.tqi_burstTime = params->txop;
+ qi.tqi_burstTime = params->txop * 32;
ath_dbg(common, CONFIG,
"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
+ ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
ret = ath_txq_update(sc, txq->axq_qnum, &qi);
if (ret)
ath_err(common, "TXQ Update failed\n");
- if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
- if (queue == WME_AC_BE && !ret)
- ath_beaconq_config(sc);
-
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
@@ -1896,83 +1454,53 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
return ret;
}
-static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+
+static void ath9k_set_assoc_state(struct ath_softc *sc,
+ struct ieee80211_vif *vif)
{
- struct ath_softc *sc = data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ unsigned long flags;
+
+ set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ avp->primary_sta_vif = true;
/*
- * Skip iteration if primary station vif's bss info
- * was not changed
+ * Set the AID, BSSID and do beacon-sync only when
+ * the HW opmode is STATION.
+ *
+ * But the primary bit is set above in any case.
*/
- if (sc->sc_flags & SC_OP_PRIM_STA_VIF)
+ if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
return;
- if (bss_conf->assoc) {
- sc->sc_flags |= SC_OP_PRIM_STA_VIF;
- avp->primary_sta_vif = true;
- memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
- common->curaid = bss_conf->aid;
- ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
- ath_beacon_config(sc, vif);
- /*
- * Request a re-configuration of Beacon related timers
- * on the receipt of the first Beacon frame (i.e.,
- * after time sync with the AP).
- */
- sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
- /* Reset rssi stats */
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
+ memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
+ common->curaid = bss_conf->aid;
+ ath9k_hw_write_associd(sc->sc_ah);
- ath_start_rx_poll(sc, 3);
+ sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
+ sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- }
+ ath_dbg(common, CONFIG,
+ "Primary Station interface: %pM, BSSID: %pM\n",
+ vif->addr, common->curbssid);
}
-static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
+static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_softc *sc = data;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ath_vif *avp = (void *)vif->drv_priv;
- if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
+ if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
return;
- /* Reconfigure bss info */
- if (avp->primary_sta_vif && !bss_conf->assoc) {
- ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
- common->curaid, common->curbssid);
- sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
- avp->primary_sta_vif = false;
- memset(common->curbssid, 0, ETH_ALEN);
- common->curaid = 0;
- }
-
- ieee80211_iterate_active_interfaces_atomic(
- sc->hw, ath9k_bss_iter, sc);
-
- /*
- * None of station vifs are associated.
- * Clear bssid & aid
- */
- if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF)) {
- ath9k_hw_write_associd(sc->sc_ah);
- /* Stop ANI */
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- del_timer_sync(&sc->rx_poll_timer);
- memset(&sc->caldata, 0, sizeof(sc->caldata));
- }
+ if (bss_conf->assoc)
+ ath9k_set_assoc_state(sc, vif);
}
static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
@@ -1980,6 +1508,11 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+#define CHECK_ANI \
+ (BSS_CHANGED_ASSOC | \
+ BSS_CHANGED_IBSS | \
+ BSS_CHANGED_BEACON_ENABLED)
+
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
@@ -1990,53 +1523,41 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
if (changed & BSS_CHANGED_ASSOC) {
- ath9k_config_bss(sc, vif);
+ ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
+ bss_conf->bssid, bss_conf->assoc);
+
+ if (avp->primary_sta_vif && !bss_conf->assoc) {
+ clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
+ avp->primary_sta_vif = false;
- ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
- common->curbssid, common->curaid);
+ if (ah->opmode == NL80211_IFTYPE_STATION)
+ clear_bit(SC_OP_BEACONS, &sc->sc_flags);
+ }
+
+ ieee80211_iterate_active_interfaces_atomic(sc->hw,
+ ath9k_bss_assoc_iter, sc);
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
+ ah->opmode == NL80211_IFTYPE_STATION) {
+ memset(common->curbssid, 0, ETH_ALEN);
+ common->curaid = 0;
+ ath9k_hw_write_associd(sc->sc_ah);
+ }
}
if (changed & BSS_CHANGED_IBSS) {
- /* There can be only one vif available */
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
-
- if (bss_conf->ibss_joined) {
- sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
-
- if (!common->disable_ani) {
- sc->sc_flags |= SC_OP_ANI_RUN;
- ath_start_ani(common);
- }
-
- } else {
- sc->sc_flags &= ~SC_OP_ANI_RUN;
- del_timer_sync(&common->ani.timer);
- del_timer_sync(&sc->rx_poll_timer);
- }
}
- /*
- * In case of AP mode, the HW TSF has to be reset
- * when the beacon interval changes.
- */
- if ((changed & BSS_CHANGED_BEACON_INT) &&
- (vif->type == NL80211_IFTYPE_AP))
- sc->sc_flags |= SC_OP_TSF_RESET;
-
- /* Configure beaconing (AP, IBSS, MESH) */
- if (ath9k_uses_beacons(vif->type) &&
- ((changed & BSS_CHANGED_BEACON) ||
- (changed & BSS_CHANGED_BEACON_ENABLED) ||
- (changed & BSS_CHANGED_BEACON_INT))) {
- ath9k_set_beaconing_status(sc, false);
- if (bss_conf->enable_beacon)
- ath_beacon_alloc(sc, vif);
- else
- avp->is_bslot_active = false;
- ath_beacon_config(sc, vif);
- ath9k_set_beaconing_status(sc, true);
+ if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
+ (changed & BSS_CHANGED_BEACON_INT)) {
+ if (ah->opmode == NL80211_IFTYPE_AP &&
+ bss_conf->enable_beacon)
+ ath9k_set_tsfadjust(sc, vif);
+ if (ath9k_allow_beacon_config(sc, vif))
+ ath9k_beacon_config(sc, vif, changed);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -2058,8 +1579,13 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & CHECK_ANI)
+ ath_check_ani(sc);
+
mutex_unlock(&sc->mutex);
ath9k_ps_restore(sc);
+
+#undef CHECK_ANI
}
static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -2215,7 +1741,7 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
return;
}
- if (sc->sc_flags & SC_OP_INVALID) {
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
@@ -2288,10 +1814,11 @@ static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
if (!vif)
return 0;
- avp = (void *)vif->drv_priv;
- if (!avp->is_bslot_active)
+ if (!vif->bss_conf.enable_beacon)
return 0;
+ avp = (void *)vif->drv_priv;
+
if (!sc->beacon.tx_processed && !edma) {
tasklet_disable(&sc->bcon_tasklet);
@@ -2345,12 +1872,29 @@ static u32 fill_chainmask(u32 cap, u32 new)
return filled;
}
+static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
+{
+ switch (val & 0x7) {
+ case 0x1:
+ case 0x3:
+ case 0x7:
+ return true;
+ case 0x2:
+ return (ah->caps.rx_chainmask == 1);
+ default:
+ return false;
+ }
+}
+
static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
- if (!rx_ant || !tx_ant)
+ if (ah->caps.rx_chainmask != 1)
+ rx_ant |= tx_ant;
+
+ if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
return -EINVAL;
sc->ant_rx = rx_ant;
@@ -2380,6 +1924,490 @@ static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
return 0;
}
+#ifdef CONFIG_ATH9K_DEBUGFS
+
+/* Ethtool support for get-stats */
+
+#define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
+static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ AMKSTR(d_tx_pkts),
+ AMKSTR(d_tx_bytes),
+ AMKSTR(d_tx_mpdus_queued),
+ AMKSTR(d_tx_mpdus_completed),
+ AMKSTR(d_tx_mpdu_xretries),
+ AMKSTR(d_tx_aggregates),
+ AMKSTR(d_tx_ampdus_queued_hw),
+ AMKSTR(d_tx_ampdus_queued_sw),
+ AMKSTR(d_tx_ampdus_completed),
+ AMKSTR(d_tx_ampdu_retries),
+ AMKSTR(d_tx_ampdu_xretries),
+ AMKSTR(d_tx_fifo_underrun),
+ AMKSTR(d_tx_op_exceeded),
+ AMKSTR(d_tx_timer_expiry),
+ AMKSTR(d_tx_desc_cfg_err),
+ AMKSTR(d_tx_data_underrun),
+ AMKSTR(d_tx_delim_underrun),
+
+ "d_rx_decrypt_crc_err",
+ "d_rx_phy_err",
+ "d_rx_mic_err",
+ "d_rx_pre_delim_crc_err",
+ "d_rx_post_delim_crc_err",
+ "d_rx_decrypt_busy_err",
+
+ "d_rx_phyerr_radar",
+ "d_rx_phyerr_ofdm_timing",
+ "d_rx_phyerr_cck_timing",
+
+};
+#define ATH9K_SSTATS_LEN ARRAY_SIZE(ath9k_gstrings_stats)
+
+static void ath9k_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath9k_gstrings_stats,
+ sizeof(ath9k_gstrings_stats));
+}
+
+static int ath9k_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH9K_SSTATS_LEN;
+ return 0;
+}
+
+#define PR_QNUM(_n) (sc->tx.txq_map[_n]->axq_qnum)
+#define AWDATA(elem) \
+ do { \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].elem; \
+ data[i++] = sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].elem; \
+ } while (0)
+
+#define AWDATA_RX(elem) \
+ do { \
+ data[i++] = sc->debug.stats.rxstats.elem; \
+ } while (0)
+
+static void ath9k_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath_softc *sc = hw->priv;
+ int i = 0;
+
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_pkts_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_pkts_all);
+ data[i++] = (sc->debug.stats.txstats[PR_QNUM(WME_AC_BE)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_BK)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VI)].tx_bytes_all +
+ sc->debug.stats.txstats[PR_QNUM(WME_AC_VO)].tx_bytes_all);
+ AWDATA_RX(rx_pkts_all);
+ AWDATA_RX(rx_bytes_all);
+
+ AWDATA(tx_pkts_all);
+ AWDATA(tx_bytes_all);
+ AWDATA(queued);
+ AWDATA(completed);
+ AWDATA(xretries);
+ AWDATA(a_aggr);
+ AWDATA(a_queued_hw);
+ AWDATA(a_queued_sw);
+ AWDATA(a_completed);
+ AWDATA(a_retries);
+ AWDATA(a_xretries);
+ AWDATA(fifo_underrun);
+ AWDATA(xtxop);
+ AWDATA(timer_exp);
+ AWDATA(desc_cfg_err);
+ AWDATA(data_underrun);
+ AWDATA(delim_underrun);
+
+ AWDATA_RX(decrypt_crc_err);
+ AWDATA_RX(phy_err);
+ AWDATA_RX(mic_err);
+ AWDATA_RX(pre_delim_crc_err);
+ AWDATA_RX(post_delim_crc_err);
+ AWDATA_RX(decrypt_busy_err);
+
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_RADAR]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_OFDM_TIMING]);
+ AWDATA_RX(phy_err_stats[ATH9K_PHYERR_CCK_TIMING]);
+
+ WARN_ON(i != ATH9K_SSTATS_LEN);
+}
+
+/* End of ethtool get-stats functions */
+
+#endif
+
+
+#ifdef CONFIG_PM_SLEEP
+
+static void ath9k_wow_map_triggers(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan,
+ u32 *wow_triggers)
+{
+ if (wowlan->disconnect)
+ *wow_triggers |= AH_WOW_LINK_CHANGE |
+ AH_WOW_BEACON_MISS;
+ if (wowlan->magic_pkt)
+ *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
+
+ if (wowlan->n_patterns)
+ *wow_triggers |= AH_WOW_USER_PATTERN_EN;
+
+ sc->wow_enabled = *wow_triggers;
+
+}
+
+static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pcaps = &ah->caps;
+ int pattern_count = 0;
+ int i, byte_cnt;
+ u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
+ u8 dis_deauth_mask[MAX_PATTERN_SIZE];
+
+ memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
+ memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
+
+ /*
+ * Create Dissassociate / Deauthenticate packet filter
+ *
+ * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
+ * +--------------+----------+---------+--------+--------+----
+ * + Frame Control+ Duration + DA + SA + BSSID +
+ * +--------------+----------+---------+--------+--------+----
+ *
+ * The above is the management frame format for disassociate/
+ * deauthenticate pattern, from this we need to match the first byte
+ * of 'Frame Control' and DA, SA, and BSSID fields
+ * (skipping 2nd byte of FC and Duration feild.
+ *
+ * Disassociate pattern
+ * --------------------
+ * Frame control = 00 00 1010
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ *
+ * Deauthenticate pattern
+ * ----------------------
+ * Frame control = 00 00 1100
+ * DA, SA, BSSID = x:x:x:x:x:x
+ * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
+ * | x:x:x:x:x:x -- 22 bytes
+ */
+
+ /* Create Disassociate Pattern first */
+
+ byte_cnt = 0;
+
+ /* Fill out the mask with all FF's */
+
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
+ dis_deauth_mask[i] = 0xff;
+
+ /* copy the first byte of frame control field */
+ dis_deauth_pattern[byte_cnt] = 0xa0;
+ byte_cnt++;
+
+ /* skip 2nd byte of frame control and Duration field */
+ byte_cnt += 3;
+
+ /*
+ * need not match the destination mac address, it can be a broadcast
+ * mac address or an unicast to this station
+ */
+ byte_cnt += 6;
+
+ /* copy the source mac address */
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ byte_cnt += 6;
+
+ /* copy the bssid, its same as the source mac address */
+
+ memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
+
+ /* Create Disassociate pattern mask */
+
+ if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_EXACT) {
+
+ if (pcaps->hw_caps & ATH9K_HW_WOW_PATTERN_MATCH_DWORD) {
+ /*
+ * for AR9280, because of hardware limitation, the
+ * first 4 bytes have to be matched for all patterns.
+ * the mask for disassociation and de-auth pattern
+ * matching need to enable the first 4 bytes.
+ * also the duration field needs to be filled.
+ */
+ dis_deauth_mask[0] = 0xf0;
+
+ /*
+ * fill in duration field
+ FIXME: what is the exact value ?
+ */
+ dis_deauth_pattern[2] = 0xff;
+ dis_deauth_pattern[3] = 0xff;
+ } else {
+ dis_deauth_mask[0] = 0xfe;
+ }
+
+ dis_deauth_mask[1] = 0x03;
+ dis_deauth_mask[2] = 0xc0;
+ } else {
+ dis_deauth_mask[0] = 0xef;
+ dis_deauth_mask[1] = 0x3f;
+ dis_deauth_mask[2] = 0x00;
+ dis_deauth_mask[3] = 0xfc;
+ }
+
+ ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
+
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+ pattern_count++;
+ /*
+ * for de-authenticate pattern, only the first byte of the frame
+ * control field gets changed from 0xA0 to 0xC0
+ */
+ dis_deauth_pattern[0] = 0xC0;
+
+ ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
+ pattern_count, byte_cnt);
+
+}
+
+static void ath9k_wow_add_pattern(struct ath_softc *sc,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_wow_pattern *wow_pattern = NULL;
+ struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
+ int mask_len;
+ s8 i = 0;
+
+ if (!wowlan->n_patterns)
+ return;
+
+ /*
+ * Add the new user configured patterns
+ */
+ for (i = 0; i < wowlan->n_patterns; i++) {
+
+ wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
+
+ if (!wow_pattern)
+ return;
+
+ /*
+ * TODO: convert the generic user space pattern to
+ * appropriate chip specific/802.11 pattern.
+ */
+
+ mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+ memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
+ memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
+ memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
+ patterns[i].pattern_len);
+ memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
+ wow_pattern->pattern_len = patterns[i].pattern_len;
+
+ /*
+ * just need to take care of deauth and disssoc pattern,
+ * make sure we don't overwrite them.
+ */
+
+ ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
+ wow_pattern->mask_bytes,
+ i + 2,
+ wow_pattern->pattern_len);
+ kfree(wow_pattern);
+
+ }
+
+}
+
+static int ath9k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_triggers_enabled = 0;
+ int ret = 0;
+
+ mutex_lock(&sc->mutex);
+
+ ath_cancel_work(sc);
+ del_timer_sync(&common->ani.timer);
+ del_timer_sync(&sc->rx_poll_timer);
+
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
+ ath_dbg(common, ANY, "Device not present\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
+
+ if (WARN_ON(!wowlan)) {
+ ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
+ ret = -EINVAL;
+ goto fail_wow;
+ }
+
+ if (!device_can_wakeup(sc->dev)) {
+ ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ /*
+ * none of the sta vifs are associated
+ * and we are not currently handling multivif
+ * cases, for instance we have to seperately
+ * configure 'keep alive frame' for each
+ * STA.
+ */
+
+ if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
+ ath_dbg(common, WOW, "None of the STA vifs are associated\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ if (sc->nvifs > 1) {
+ ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
+ ret = 1;
+ goto fail_wow;
+ }
+
+ ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
+
+ ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
+ wow_triggers_enabled);
+
+ ath9k_ps_wakeup(sc);
+
+ ath9k_stop_btcoex(sc);
+
+ /*
+ * Enable wake up on recieving disassoc/deauth
+ * frame by default.
+ */
+ ath9k_wow_add_disassoc_deauth_pattern(sc);
+
+ if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
+ ath9k_wow_add_pattern(sc, wowlan);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ /*
+ * To avoid false wake, we enable beacon miss interrupt only
+ * when we go to sleep. We save the current interrupt mask
+ * so we can restore it after the system wakes up
+ */
+ sc->wow_intr_before_sleep = ah->imask;
+ ah->imask &= ~ATH9K_INT_GLOBAL;
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /*
+ * we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock
+ */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+
+ ath9k_hw_wow_enable(ah, wow_triggers_enabled);
+
+ ath9k_ps_restore(sc);
+ ath_dbg(common, ANY, "WoW enabled in ath9k\n");
+ atomic_inc(&sc->wow_sleep_proc_intr);
+
+fail_wow:
+ mutex_unlock(&sc->mutex);
+ return ret;
+}
+
+static int ath9k_resume(struct ieee80211_hw *hw)
+{
+ struct ath_softc *sc = hw->priv;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 wow_status;
+
+ mutex_lock(&sc->mutex);
+
+ ath9k_ps_wakeup(sc);
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ ath9k_hw_disable_interrupts(ah);
+ ah->imask = sc->wow_intr_before_sleep;
+ ath9k_hw_set_interrupts(ah);
+ ath9k_hw_enable_interrupts(ah);
+
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ wow_status = ath9k_hw_wow_wakeup(ah);
+
+ if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
+ /*
+ * some devices may not pick beacon miss
+ * as the reason they woke up so we add
+ * that here for that shortcoming.
+ */
+ wow_status |= AH_WOW_BEACON_MISS;
+ atomic_dec(&sc->wow_got_bmiss_intr);
+ ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
+ }
+
+ atomic_dec(&sc->wow_sleep_proc_intr);
+
+ if (wow_status) {
+ ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
+ ath9k_hw_wow_event_to_string(wow_status), wow_status);
+ }
+
+ ath_restart_work(sc);
+ ath9k_start_btcoex(sc);
+
+ ath9k_ps_restore(sc);
+ mutex_unlock(&sc->mutex);
+
+ return 0;
+}
+
+static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath_softc *sc = hw->priv;
+
+ mutex_lock(&sc->mutex);
+ device_init_wakeup(sc->dev, 1);
+ device_set_wakeup_enable(sc->dev, enabled);
+ mutex_unlock(&sc->mutex);
+}
+
+#endif
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2408,4 +2436,16 @@ struct ieee80211_ops ath9k_ops = {
.get_stats = ath9k_get_stats,
.set_antenna = ath9k_set_antenna,
.get_antenna = ath9k_get_antenna,
+
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ath9k_suspend,
+ .resume = ath9k_resume,
+ .set_wakeup = ath9k_set_wakeup,
+#endif
+
+#ifdef CONFIG_ATH9K_DEBUGFS
+ .get_et_sset_count = ath9k_get_et_sset_count,
+ .get_et_stats = ath9k_get_et_stats,
+ .get_et_strings = ath9k_get_et_strings,
+#endif
};
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
index 29fe52d69973..fb536e7e661b 100644
--- a/drivers/net/wireless/ath/ath9k/mci.c
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -20,7 +20,7 @@
#include "ath9k.h"
#include "mci.h"
-static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
static struct ath_mci_profile_info*
ath_mci_find_profile(struct ath_mci_profile *mci,
@@ -28,11 +28,14 @@ ath_mci_find_profile(struct ath_mci_profile *mci,
{
struct ath_mci_profile_info *entry;
+ if (list_empty(&mci->info))
+ return NULL;
+
list_for_each_entry(entry, &mci->info, list) {
if (entry->conn_handle == info->conn_handle)
- break;
+ return entry;
}
- return entry;
+ return NULL;
}
static bool ath_mci_add_profile(struct ath_common *common,
@@ -49,31 +52,21 @@ static bool ath_mci_add_profile(struct ath_common *common,
(info->type != MCI_GPM_COEX_PROFILE_VOICE))
return false;
- entry = ath_mci_find_profile(mci, info);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return false;
- if (entry) {
- memcpy(entry, info, 10);
- } else {
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return false;
-
- memcpy(entry, info, 10);
- INC_PROF(mci, info);
- list_add_tail(&info->list, &mci->info);
- }
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&entry->list, &mci->info);
return true;
}
static void ath_mci_del_profile(struct ath_common *common,
struct ath_mci_profile *mci,
- struct ath_mci_profile_info *info)
+ struct ath_mci_profile_info *entry)
{
- struct ath_mci_profile_info *entry;
-
- entry = ath_mci_find_profile(mci, info);
-
if (!entry)
return;
@@ -86,12 +79,16 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci)
{
struct ath_mci_profile_info *info, *tinfo;
+ mci->aggr_limit = 0;
+
+ if (list_empty(&mci->info))
+ return;
+
list_for_each_entry_safe(info, tinfo, &mci->info, list) {
list_del(&info->list);
DEC_PROF(mci, info);
kfree(info);
}
- mci->aggr_limit = 0;
}
static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
@@ -116,42 +113,60 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
struct ath_mci_profile_info *info;
u32 num_profile = NUM_PROF(mci);
+ if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
+ goto skip_tuning;
+
+ btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
+
if (num_profile == 1) {
info = list_first_entry(&mci->info,
struct ath_mci_profile_info,
list);
- if (mci->num_sco && info->T == 12) {
- mci->aggr_limit = 8;
+ if (mci->num_sco) {
+ if (info->T == 12)
+ mci->aggr_limit = 8;
+ else if (info->T == 6) {
+ mci->aggr_limit = 6;
+ btcoex->duty_cycle = 30;
+ }
ath_dbg(common, MCI,
- "Single SCO, aggregation limit 2 ms\n");
- } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
- !info->master) {
- btcoex->btcoex_period = 60;
+ "Single SCO, aggregation limit %d 1/4 ms\n",
+ mci->aggr_limit);
+ } else if (mci->num_pan || mci->num_other_acl) {
+ /*
+ * For single PAN/FTP profile, allocate 35% for BT
+ * to improve WLAN throughput.
+ */
+ btcoex->duty_cycle = 35;
+ btcoex->btcoex_period = 53;
ath_dbg(common, MCI,
- "Single slave PAN/FTP, bt period 60 ms\n");
- } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
- (info->T > 0 && info->T < 50) &&
- (info->A > 1 || info->W > 1)) {
+ "Single PAN/FTP bt period %d ms dutycycle %d\n",
+ btcoex->duty_cycle, btcoex->btcoex_period);
+ } else if (mci->num_hid) {
btcoex->duty_cycle = 30;
- mci->aggr_limit = 8;
+ mci->aggr_limit = 6;
ath_dbg(common, MCI,
"Multiple attempt/timeout single HID "
- "aggregation limit 2 ms dutycycle 30%%\n");
+ "aggregation limit 1.5 ms dutycycle 30%%\n");
}
- } else if ((num_profile == 2) && (mci->num_hid == 2)) {
- btcoex->duty_cycle = 30;
- mci->aggr_limit = 8;
- ath_dbg(common, MCI,
- "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
- } else if (num_profile > 3) {
+ } else if (num_profile == 2) {
+ if (mci->num_hid == 2)
+ btcoex->duty_cycle = 30;
mci->aggr_limit = 6;
ath_dbg(common, MCI,
- "Three or more profiles aggregation limit 1.5 ms\n");
+ "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
+ btcoex->duty_cycle);
+ } else if (num_profile >= 3) {
+ mci->aggr_limit = 4;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1 ms\n");
}
+skip_tuning:
if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
if (IS_CHAN_HT(sc->sc_ah->curchan))
ath_mci_adjust_aggr_limit(btcoex);
@@ -159,18 +174,17 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
btcoex->btcoex_period >>= 1;
}
- ath9k_hw_btcoex_disable(sc->sc_ah);
ath9k_btcoex_timer_pause(sc);
+ ath9k_hw_btcoex_disable(sc->sc_ah);
if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
return;
- btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
- btcoex->btcoex_period *= 1000;
- btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
(100 - btcoex->duty_cycle) / 100;
ath9k_hw_btcoex_enable(sc->sc_ah);
@@ -181,20 +195,16 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
{
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 payload[4] = {0, 0, 0, 0};
switch (opcode) {
case MCI_GPM_BT_CAL_REQ:
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
- ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- } else {
- ath_dbg(common, MCI, "MCI State mismatch: %d\n",
- ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+ if (mci_hw->bt_state == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START);
+ ath9k_queue_reset(sc, RESET_TYPE_MCI);
}
- break;
- case MCI_GPM_BT_CAL_DONE:
- ar9003_mci_state(ah, MCI_STATE_BT, NULL);
+ ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
break;
case MCI_GPM_BT_CAL_GRANT:
MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
@@ -207,32 +217,55 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
}
}
+static void ath9k_mci_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
+
+ ath_mci_update_scheme(sc);
+}
+
static void ath_mci_process_profile(struct ath_softc *sc,
struct ath_mci_profile_info *info)
{
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *entry = NULL;
+
+ entry = ath_mci_find_profile(mci, info);
+ if (entry) {
+ /*
+ * Two MCI interrupts are generated while connecting to
+ * headset and A2DP profile, but only one MCI interrupt
+ * is generated with last added profile type while disconnecting
+ * both profiles.
+ * So while adding second profile type decrement
+ * the first one.
+ */
+ if (entry->type != info->type) {
+ DEC_PROF(mci, entry);
+ INC_PROF(mci, info);
+ }
+ memcpy(entry, info, 10);
+ }
if (info->start) {
- if (!ath_mci_add_profile(common, mci, info))
+ if (!entry && !ath_mci_add_profile(common, mci, info))
return;
} else
- ath_mci_del_profile(common, mci, info);
+ ath_mci_del_profile(common, mci, entry);
btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
mci->aggr_limit = mci->num_sco ? 6 : 0;
- if (NUM_PROF(mci)) {
+ btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+ if (NUM_PROF(mci))
btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
- } else {
+ else
btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
ATH_BTCOEX_STOMP_LOW;
- btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
- }
- ath_mci_update_scheme(sc);
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_process_status(struct ath_softc *sc,
@@ -247,8 +280,6 @@ static void ath_mci_process_status(struct ath_softc *sc,
if (status->is_link)
return;
- memset(&info, 0, sizeof(struct ath_mci_profile_info));
-
info.conn_handle = status->conn_handle;
if (ath_mci_find_profile(mci, &info))
return;
@@ -268,7 +299,7 @@ static void ath_mci_process_status(struct ath_softc *sc,
} while (++i < ATH_MCI_MAX_PROFILE);
if (old_num_mgmt != mci->num_mgmt)
- ath_mci_update_scheme(sc);
+ ieee80211_queue_work(sc->hw, &sc->mci_work);
}
static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
@@ -277,25 +308,20 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
struct ath_mci_profile_info profile_info;
struct ath_mci_profile_status profile_status;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u32 version;
- u8 major;
- u8 minor;
+ u8 major, minor;
u32 seq_num;
switch (opcode) {
case MCI_GPM_COEX_VERSION_QUERY:
- version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION,
- NULL);
+ ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
break;
case MCI_GPM_COEX_VERSION_RESPONSE:
major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
- version = (major << 8) + minor;
- version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION,
- &version);
+ ar9003_mci_set_bt_version(ah, major, minor);
break;
case MCI_GPM_COEX_STATUS_QUERY:
- ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ ar9003_mci_send_wlan_channels(ah);
break;
case MCI_GPM_COEX_BT_PROFILE_INFO:
memcpy(&profile_info,
@@ -322,7 +348,7 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
seq_num = *((u32 *)(rx_payload + 12));
ath_dbg(common, MCI,
- "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
profile_status.is_link, profile_status.conn_handle,
profile_status.is_critical, seq_num);
@@ -362,6 +388,7 @@ int ath_mci_setup(struct ath_softc *sc)
mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
mci->sched_buf.bf_paddr);
+ INIT_WORK(&sc->mci_work, ath9k_mci_work);
ath_dbg(common, MCI, "MCI Initialized\n");
return 0;
@@ -389,6 +416,7 @@ void ath_mci_intr(struct ath_softc *sc)
struct ath_mci_coex *mci = &sc->mci_coex;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 mci_int, mci_int_rxmsg;
u32 offset, subtype, opcode;
u32 *pgpm;
@@ -397,8 +425,8 @@ void ath_mci_intr(struct ath_softc *sc)
ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
- if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
- ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
+ ar9003_mci_get_next_gpm_offset(ah, true, NULL);
return;
}
@@ -417,46 +445,41 @@ void ath_mci_intr(struct ath_softc *sc)
NULL, 0, true, false);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
- ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
/*
* always do this for recovery and 2G/5G toggling and LNA_TRANS
*/
- ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
- MCI_BT_SLEEP)
- ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE,
- NULL);
- }
+ if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_SLEEP))
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
- if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
- if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) !=
- MCI_BT_AWAKE)
- ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
- NULL);
- }
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
+ MCI_BT_AWAKE))
+ mci_hw->bt_state = MCI_BT_SLEEP;
}
if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
(mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
- ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
skip_gpm = true;
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
- offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
- NULL);
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
@@ -465,8 +488,8 @@ void ath_mci_intr(struct ath_softc *sc)
while (more_data == MCI_GPM_MORE) {
pgpm = mci->gpm_buf.bf_addr;
- offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
- &more_data);
+ offset = ar9003_mci_get_next_gpm_offset(ah, false,
+ &more_data);
if (offset == MCI_GPM_INVALID)
break;
@@ -507,23 +530,17 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
- int value_dbm = ar9003_mci_state(ah,
- MCI_STATE_CONT_RSSI_POWER, NULL);
+ int value_dbm = MS(mci_hw->cont_status,
+ AR_MCI_CONT_RSSI_POWER);
mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
- if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
- ath_dbg(common, MCI,
- "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
- ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
- value_dbm);
- else
- ath_dbg(common, MCI,
- "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
- ar9003_mci_state(ah,
- MCI_STATE_CONT_PRIORITY, NULL),
- value_dbm);
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
+ MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
+ "tx" : "rx",
+ MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
+ value_dbm);
}
if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
@@ -538,3 +555,14 @@ void ath_mci_intr(struct ath_softc *sc)
mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
}
+
+void ath_mci_enable(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ if (!common->btcoex_enabled)
+ return;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ sc->sc_ah->imask |= ATH9K_INT_MCI;
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
index c841444f53c2..fc14eea034eb 100644
--- a/drivers/net/wireless/ath/ath9k/mci.h
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -130,4 +130,13 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci);
int ath_mci_setup(struct ath_softc *sc);
void ath_mci_cleanup(struct ath_softc *sc);
void ath_mci_intr(struct ath_softc *sc);
-#endif
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+void ath_mci_enable(struct ath_softc *sc);
+#else
+static inline void ath_mci_enable(struct ath_softc *sc)
+{
+}
+#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
+
+#endif /* MCI_H*/
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a856b51255f4..87b89d55e637 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -115,6 +115,9 @@ static void ath_pci_aspm_init(struct ath_common *common)
int pos;
u8 aspm;
+ if (!ah->is_pciexpress)
+ return;
+
pos = pci_pcie_cap(pdev);
if (!pos)
return;
@@ -138,6 +141,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
pci_write_config_byte(parent, pos + PCI_EXP_LNKCTL, aspm);
+ ath_info(common, "Disabling ASPM since BTCOEX is enabled\n");
return;
}
@@ -147,6 +151,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
+ ath_info(common, "ASPM enabled: 0x%x\n", aspm);
}
}
@@ -246,7 +251,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->mem = mem;
/* Will be cleared in ath9k_start() */
- sc->sc_flags |= SC_OP_INVALID;
+ set_bit(SC_OP_INVALID, &sc->sc_flags);
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
@@ -308,6 +313,9 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
+ if (sc->wow_enabled)
+ return 0;
+
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 92a6c0a87f89..e034add9cd5a 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix, high_rix;
+ u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -791,7 +791,6 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
rate_table = ath_rc_priv->rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, false);
- high_rix = rix;
/*
* If we're in HT mode and both us and our peer supports LDPC.
@@ -839,16 +838,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
try_per_rate = 8;
/*
- * Use a legacy rate as last retry to ensure that the frame
- * is tried in both MCS and legacy rates.
+ * If the last rate in the rate series is MCS and has
+ * more than 80% of per thresh, then use a legacy rate
+ * as last retry to ensure that the frame is tried in both
+ * MCS and legacy rate.
*/
- if ((rates[2].flags & IEEE80211_TX_RC_MCS) &&
- (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) ||
- (ath_rc_priv->per[high_rix] > 45)))
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
+ if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) &&
+ (ath_rc_priv->per[rix] > 45))
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table,
&is_probe, true);
- else
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index e1fcc68124dc..12aca02228c2 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -20,43 +20,6 @@
#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
-static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
- int mindelta, int main_rssi_avg,
- int alt_rssi_avg, int pkt_count)
-{
- return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
- (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
-}
-
-static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
- int curr_main_set, int curr_alt_set,
- int alt_rssi_avg, int main_rssi_avg)
-{
- bool result = false;
- switch (div_group) {
- case 0:
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- result = true;
- break;
- case 1:
- case 2:
- if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
- (alt_rssi_avg >= (main_rssi_avg - 5))) ||
- ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
- (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
- (alt_rssi_avg >= 4))
- result = true;
- else
- result = false;
- break;
- }
-
- return result;
-}
-
static inline bool ath9k_check_auto_sleep(struct ath_softc *sc)
{
return sc->ps_enabled &&
@@ -303,7 +266,7 @@ static void ath_edma_start_recv(struct ath_softc *sc)
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(sc->sc_ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+ ath9k_hw_startpcureceive(sc->sc_ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
}
@@ -322,8 +285,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
int error = 0;
spin_lock_init(&sc->sc_pcu_lock);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
+ clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
common->rx_bufsize = IEEE80211_MAX_MPDU_LEN / 2 +
sc->sc_ah->caps.rx_status_len;
@@ -467,6 +430,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
+ if (AR_SREV_9550(sc->sc_ah))
+ rfilt |= ATH9K_RX_FILTER_4ADDRESS;
+
return rfilt;
}
@@ -500,7 +466,7 @@ int ath_startrecv(struct ath_softc *sc)
start_recv:
ath_opmode_init(sc);
- ath9k_hw_startpcureceive(ah, (sc->sc_flags & SC_OP_OFFCHANNEL));
+ ath9k_hw_startpcureceive(ah, !!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL));
spin_unlock_bh(&sc->rx.rxbuflock);
@@ -535,11 +501,11 @@ bool ath_stoprecv(struct ath_softc *sc)
void ath_flushrecv(struct ath_softc *sc)
{
- sc->sc_flags |= SC_OP_RXFLUSH;
+ set_bit(SC_OP_RXFLUSH, &sc->sc_flags);
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
ath_rx_tasklet(sc, 1, true);
ath_rx_tasklet(sc, 1, false);
- sc->sc_flags &= ~SC_OP_RXFLUSH;
+ clear_bit(SC_OP_RXFLUSH, &sc->sc_flags);
}
static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
@@ -587,7 +553,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
sc->ps_flags &= ~PS_BEACON_SYNC;
ath_dbg(common, PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
- ath_set_beacon(sc);
+ ath9k_set_beacon(sc);
}
if (ath_beacon_dtim_pending_cab(skb)) {
@@ -624,13 +590,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
/* Process Beacon and CAB receive in PS state */
if (((sc->ps_flags & PS_WAIT_FOR_BEACON) || ath9k_check_auto_sleep(sc))
- && mybeacon)
+ && mybeacon) {
ath_rx_ps_beacon(sc, skb);
- else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
- (ieee80211_is_data(hdr->frame_control) ||
- ieee80211_is_action(hdr->frame_control)) &&
- is_multicast_ether_addr(hdr->addr1) &&
- !ieee80211_has_moredata(hdr->frame_control)) {
+ } else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
+ (ieee80211_is_data(hdr->frame_control) ||
+ ieee80211_is_action(hdr->frame_control)) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ !ieee80211_has_moredata(hdr->frame_control)) {
/*
* No more broadcast/multicast frames to be received at this
* point.
@@ -695,9 +661,9 @@ static bool ath_edma_get_buffers(struct ath_softc *sc,
__skb_unlink(skb, &rx_edma->rx_fifo);
list_add_tail(&bf->list, &sc->rx.rxbuf);
ath_rx_edma_buf_link(sc, qtype);
- } else {
- bf = NULL;
}
+
+ bf = NULL;
}
*dest = bf;
@@ -822,7 +788,8 @@ static bool ath9k_rx_accept(struct ath_common *common,
* descriptor does contain a valid key index. This has been observed
* mostly with CCMP encryption.
*/
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
if (!rx_stats->rs_datalen) {
@@ -1067,709 +1034,6 @@ static void ath9k_rx_skb_postprocess(struct ath_common *common,
rxs->flag &= ~RX_FLAG_DECRYPTED;
}
-static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
- struct ath_hw_antcomb_conf ant_conf,
- int main_rssi_avg)
-{
- antcomb->quick_scan_cnt = 0;
-
- if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = main_rssi_avg;
- else if (ant_conf.main_lna_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = main_rssi_avg;
-
- switch ((ant_conf.main_lna_conf << 4) | ant_conf.alt_lna_conf) {
- case 0x10: /* LNA2 A-B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
- break;
- case 0x20: /* LNA1 A-B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
- break;
- case 0x21: /* LNA1 LNA2 */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case 0x12: /* LNA2 LNA1 */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case 0x13: /* LNA2 A+B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA1;
- break;
- case 0x23: /* LNA1 A+B */
- antcomb->main_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- antcomb->first_quick_scan_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- antcomb->second_quick_scan_conf = ATH_ANT_DIV_COMB_LNA2;
- break;
- default:
- break;
- }
-}
-
-static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
- struct ath_hw_antcomb_conf *div_ant_conf,
- int main_rssi_avg, int alt_rssi_avg,
- int alt_ratio)
-{
- /* alt_good */
- switch (antcomb->quick_scan_cnt) {
- case 0:
- /* set alt to main, and alt to first conf */
- div_ant_conf->main_lna_conf = antcomb->main_conf;
- div_ant_conf->alt_lna_conf = antcomb->first_quick_scan_conf;
- break;
- case 1:
- /* set alt to main, and alt to first conf */
- div_ant_conf->main_lna_conf = antcomb->main_conf;
- div_ant_conf->alt_lna_conf = antcomb->second_quick_scan_conf;
- antcomb->rssi_first = main_rssi_avg;
- antcomb->rssi_second = alt_rssi_avg;
-
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- /* main is LNA1 */
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- } else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
- antcomb->first_ratio = true;
- else
- antcomb->first_ratio = false;
- }
- break;
- case 2:
- antcomb->alt_good = false;
- antcomb->scan_not_start = false;
- antcomb->scan = false;
- antcomb->rssi_first = main_rssi_avg;
- antcomb->rssi_third = alt_rssi_avg;
-
- if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
- antcomb->rssi_lna2 = main_rssi_avg;
- else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
- antcomb->rssi_lna1 = main_rssi_avg;
- }
-
- if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)
- div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- else
- div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
-
- if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- } else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
- ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
- ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
- main_rssi_avg, alt_rssi_avg,
- antcomb->total_pkt_count))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- } else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
- antcomb->second_ratio = true;
- else
- antcomb->second_ratio = false;
- }
-
- /* set alt to the conf with maximun ratio */
- if (antcomb->first_ratio && antcomb->second_ratio) {
- if (antcomb->rssi_second > antcomb->rssi_third) {
- /* first alt*/
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2*/
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- } else {
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- }
- } else if (antcomb->first_ratio) {
- /* first alt */
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if (antcomb->second_ratio) {
- /* second alt */
- if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- } else {
- /* main is largest */
- if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf = antcomb->main_conf;
- }
- break;
- default:
- break;
- }
-}
-
-static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
- struct ath_ant_comb *antcomb, int alt_ratio)
-{
- if (ant_conf->div_group == 0) {
- /* Adjust the fast_div_bias based on main and alt lna conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x3b;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x3d;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- break;
- case 0x10: /* LNA2 A-B */
- ant_conf->fast_div_bias = 0x7;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x2;
- break;
- case 0x13: /* LNA2 A+B */
- ant_conf->fast_div_bias = 0x7;
- break;
- case 0x20: /* LNA1 A-B */
- ant_conf->fast_div_bias = 0x6;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x0;
- break;
- case 0x23: /* LNA1 A+B */
- ant_conf->fast_div_bias = 0x6;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x3b;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x3d;
- break;
- default:
- break;
- }
- } else if (ant_conf->div_group == 1) {
- /* Adjust the fast_div_bias based on main and alt_lna_conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x3f;
- else
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- default:
- break;
- }
- } else if (ant_conf->div_group == 2) {
- /* Adjust the fast_div_bias based on main and alt_lna_conf */
- switch ((ant_conf->main_lna_conf << 4) |
- ant_conf->alt_lna_conf) {
- case 0x01: /* A-B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x02: /* A-B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x03: /* A-B A+B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x12: /* LNA2 LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x21: /* LNA1 LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
- ant_conf->fast_div_bias = 0x1;
- else
- ant_conf->fast_div_bias = 0x2;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x30: /* A+B A-B */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x31: /* A+B LNA2 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- case 0x32: /* A+B LNA1 */
- ant_conf->fast_div_bias = 0x1;
- ant_conf->main_gaintb = 0;
- ant_conf->alt_gaintb = 0;
- break;
- default:
- break;
- }
- }
-}
-
-/* Antenna diversity and combining */
-static void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
-{
- struct ath_hw_antcomb_conf div_ant_conf;
- struct ath_ant_comb *antcomb = &sc->ant_comb;
- int alt_ratio = 0, alt_rssi_avg = 0, main_rssi_avg = 0, curr_alt_set;
- int curr_main_set;
- int main_rssi = rs->rs_rssi_ctl0;
- int alt_rssi = rs->rs_rssi_ctl1;
- int rx_ant_conf, main_ant_conf;
- bool short_scan = false;
-
- rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
- ATH_ANT_RX_MASK;
- main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
- ATH_ANT_RX_MASK;
-
- /* Record packet only when both main_rssi and alt_rssi is positive */
- if (main_rssi > 0 && alt_rssi > 0) {
- antcomb->total_pkt_count++;
- antcomb->main_total_rssi += main_rssi;
- antcomb->alt_total_rssi += alt_rssi;
- if (main_ant_conf == rx_ant_conf)
- antcomb->main_recv_cnt++;
- else
- antcomb->alt_recv_cnt++;
- }
-
- /* Short scan check */
- if (antcomb->scan && antcomb->alt_good) {
- if (time_after(jiffies, antcomb->scan_start_time +
- msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
- short_scan = true;
- else
- if (antcomb->total_pkt_count ==
- ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- short_scan = true;
- }
- }
-
- if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
- rs->rs_moreaggr) && !short_scan)
- return;
-
- if (antcomb->total_pkt_count) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- main_rssi_avg = (antcomb->main_total_rssi /
- antcomb->total_pkt_count);
- alt_rssi_avg = (antcomb->alt_total_rssi /
- antcomb->total_pkt_count);
- }
-
-
- ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
- curr_alt_set = div_ant_conf.alt_lna_conf;
- curr_main_set = div_ant_conf.main_lna_conf;
-
- antcomb->count++;
-
- if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
- ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
- main_rssi_avg);
- antcomb->alt_good = true;
- } else {
- antcomb->alt_good = false;
- }
-
- antcomb->count = 0;
- antcomb->scan = true;
- antcomb->scan_not_start = true;
- }
-
- if (!antcomb->scan) {
- if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
- alt_ratio, curr_main_set, curr_alt_set,
- alt_rssi_avg, main_rssi_avg)) {
- if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
- /* Switch main and alt LNA */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
-
- goto div_comb_done;
- } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
-
- goto div_comb_done;
- }
-
- if ((alt_rssi_avg < (main_rssi_avg +
- div_ant_conf.lna1_lna2_delta)))
- goto div_comb_done;
- }
-
- if (!antcomb->scan_not_start) {
- switch (curr_alt_set) {
- case ATH_ANT_DIV_COMB_LNA2:
- antcomb->rssi_lna2 = alt_rssi_avg;
- antcomb->rssi_lna1 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1:
- antcomb->rssi_lna1 = alt_rssi_avg;
- antcomb->rssi_lna2 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
- antcomb->rssi_add = alt_rssi_avg;
- antcomb->scan = true;
- /* set to A-B */
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
- antcomb->rssi_sub = alt_rssi_avg;
- antcomb->scan = false;
- if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
- /* use LNA2 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA1 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- }
- } else {
- /* use LNA1 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA2 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- }
- break;
- default:
- break;
- }
- } else {
- if (!antcomb->alt_good) {
- antcomb->scan_not_start = false;
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- goto div_comb_done;
- }
- }
-
- ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
- main_rssi_avg, alt_rssi_avg,
- alt_ratio);
-
- antcomb->quick_scan_cnt++;
-
-div_comb_done:
- ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
- ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
-
- antcomb->scan_start_time = jiffies;
- antcomb->total_pkt_count = 0;
- antcomb->main_total_rssi = 0;
- antcomb->alt_total_rssi = 0;
- antcomb->main_recv_cnt = 0;
- antcomb->alt_recv_cnt = 0;
-}
-
int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
{
struct ath_buf *bf;
@@ -1803,7 +1067,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
do {
/* If handling rx interrupt and flush is in progress => exit */
- if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
+ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0))
break;
memset(&rs, 0, sizeof(rs));
@@ -1841,13 +1105,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
rs.is_mybeacon = false;
+ sc->rx.num_pkts++;
ath_debug_stat_rx(sc, &rs);
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
*/
- if (sc->sc_flags & SC_OP_RXFLUSH) {
+ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags)) {
RX_STAT_INC(rx_drop_rxflush);
goto requeue_drop_frag;
}
@@ -1968,7 +1233,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_trim(skb, skb->len - 8);
spin_lock_irqsave(&sc->sc_pm_lock, flags);
-
if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
PS_WAIT_FOR_PSPOLL_DATA)) ||
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 458f81b4a7cb..87cac8eb7834 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -696,9 +696,12 @@
#define AR_WA_BIT7 (1 << 7)
#define AR_WA_BIT23 (1 << 23)
#define AR_WA_D3_L1_DISABLE (1 << 14)
+#define AR_WA_UNTIE_RESET_EN (1 << 15) /* Enable PCI Reset
+ to POR (power-on-reset) */
#define AR_WA_D3_TO_L1_DISABLE_REAL (1 << 16)
#define AR_WA_ASPM_TIMER_BASED_DISABLE (1 << 17)
-#define AR_WA_RESET_EN (1 << 18) /* Sw Control to enable PCI-Reset to POR (bit 15) */
+#define AR_WA_RESET_EN (1 << 18) /* Enable PCI-Reset to
+ POR (bit 15) */
#define AR_WA_ANALOG_SHIFT (1 << 20)
#define AR_WA_POR_SHORT (1 << 21) /* PCI-E Phy reset control */
#define AR_WA_BIT22 (1 << 22)
@@ -798,6 +801,7 @@
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_REVISION_9462_20 2
+#define AR_SREV_VERSION_9550 0x400
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -905,6 +909,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
+#define AR_SREV_9550(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
+
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
@@ -1028,6 +1035,8 @@ enum {
#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
#define AR_PCIE_PM_CTRL_ENA 0x00080000
+#define AR_PCIE_PHY_REG3 0x18c08
+
#define AR_NUM_GPIO 14
#define AR928X_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
@@ -1231,6 +1240,8 @@ enum {
#define AR_RTC_PLL_CLKSEL 0x00000300
#define AR_RTC_PLL_CLKSEL_S 8
#define AR_RTC_PLL_BYPASS 0x00010000
+#define AR_RTC_PLL_NOPWD 0x00040000
+#define AR_RTC_PLL_NOPWD_S 18
#define PLL3 0x16188
#define PLL3_DO_MEAS_MASK 0x40000000
@@ -1643,11 +1654,11 @@ enum {
#define AR_TPC 0x80e8
#define AR_TPC_ACK 0x0000003f
-#define AR_TPC_ACK_S 0x00
+#define AR_TPC_ACK_S 0
#define AR_TPC_CTS 0x00003f00
-#define AR_TPC_CTS_S 0x08
+#define AR_TPC_CTS_S 8
#define AR_TPC_CHIRP 0x003f0000
-#define AR_TPC_CHIRP_S 0x16
+#define AR_TPC_CHIRP_S 16
#define AR_QUIET1 0x80fc
#define AR_QUIET1_NEXT_QUIET_S 0
@@ -1883,6 +1894,8 @@ enum {
#define AR_PCU_MISC_MODE2_HWWAR2 0x02000000
#define AR_PCU_MISC_MODE2_RESERVED2 0xFFFE0000
+#define AR_PCU_MISC_MODE3 0x83d0
+
#define AR_MAC_PCU_ASYNC_FIFO_REG3 0x8358
#define AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL 0x00000400
#define AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET 0x80000000
@@ -1905,6 +1918,140 @@ enum {
#define AR_RATE_DURATION_32 0x8780
#define AR_RATE_DURATION(_n) (AR_RATE_DURATION_0 + ((_n)<<2))
+/* WoW - Wake On Wireless */
+
+#define AR_PMCTRL_AUX_PWR_DET 0x10000000 /* Puts Chip in L2 state */
+#define AR_PMCTRL_D3COLD_VAUX 0x00800000
+#define AR_PMCTRL_HOST_PME_EN 0x00400000 /* Send OOB WAKE_L on WoW
+ event */
+#define AR_PMCTRL_WOW_PME_CLR 0x00200000 /* Clear WoW event */
+#define AR_PMCTRL_PWR_STATE_MASK 0x0f000000 /* Power State Mask */
+#define AR_PMCTRL_PWR_STATE_D1D3 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D1D3_REAL 0x0f000000 /* Activate D1 and D3 */
+#define AR_PMCTRL_PWR_STATE_D0 0x08000000 /* Activate D0 */
+#define AR_PMCTRL_PWR_PM_CTRL_ENA 0x00008000 /* Enable power mgmt */
+
+#define AR_WOW_BEACON_TIMO_MAX 0xffffffff
+
+/*
+ * MAC WoW Registers
+ */
+
+#define AR_WOW_PATTERN 0x825C
+#define AR_WOW_COUNT 0x8260
+#define AR_WOW_BCN_EN 0x8270
+#define AR_WOW_BCN_TIMO 0x8274
+#define AR_WOW_KEEP_ALIVE_TIMO 0x8278
+#define AR_WOW_KEEP_ALIVE 0x827c
+#define AR_WOW_US_SCALAR 0x8284
+#define AR_WOW_KEEP_ALIVE_DELAY 0x8288
+#define AR_WOW_PATTERN_MATCH 0x828c
+#define AR_WOW_PATTERN_OFF1 0x8290 /* pattern bytes 0 -> 3 */
+#define AR_WOW_PATTERN_OFF2 0x8294 /* pattern bytes 4 -> 7 */
+
+/* for AR9285 or later version of chips */
+#define AR_WOW_EXACT 0x829c
+#define AR_WOW_LENGTH1 0x8360
+#define AR_WOW_LENGTH2 0X8364
+/* register to enable match for less than 256 bytes packets */
+#define AR_WOW_PATTERN_MATCH_LT_256B 0x8368
+
+#define AR_SW_WOW_CONTROL 0x20018
+#define AR_SW_WOW_ENABLE 0x1
+#define AR_SWITCH_TO_REFCLK 0x2
+#define AR_RESET_CONTROL 0x4
+#define AR_RESET_VALUE_MASK 0x8
+#define AR_HW_WOW_DISABLE 0x10
+#define AR_CLR_MAC_INTERRUPT 0x20
+#define AR_CLR_KA_INTERRUPT 0x40
+
+/* AR_WOW_PATTERN register values */
+#define AR_WOW_BACK_OFF_SHIFT(x) ((x & 0xf) << 28) /* in usecs */
+#define AR_WOW_MAC_INTR_EN 0x00040000
+#define AR_WOW_MAGIC_EN 0x00010000
+#define AR_WOW_PATTERN_EN(x) (x & 0xff)
+#define AR_WOW_PAT_FOUND_SHIFT 8
+#define AR_WOW_PATTERN_FOUND(x) (x & (0xff << AR_WOW_PAT_FOUND_SHIFT))
+#define AR_WOW_PATTERN_FOUND_MASK ((0xff) << AR_WOW_PAT_FOUND_SHIFT)
+#define AR_WOW_MAGIC_PAT_FOUND 0x00020000
+#define AR_WOW_MAC_INTR 0x00080000
+#define AR_WOW_KEEP_ALIVE_FAIL 0x00100000
+#define AR_WOW_BEACON_FAIL 0x00200000
+
+#define AR_WOW_STATUS(x) (x & (AR_WOW_PATTERN_FOUND_MASK | \
+ AR_WOW_MAGIC_PAT_FOUND | \
+ AR_WOW_KEEP_ALIVE_FAIL | \
+ AR_WOW_BEACON_FAIL))
+#define AR_WOW_CLEAR_EVENTS(x) (x & ~(AR_WOW_PATTERN_EN(0xff) | \
+ AR_WOW_MAGIC_EN | \
+ AR_WOW_MAC_INTR_EN | \
+ AR_WOW_BEACON_FAIL | \
+ AR_WOW_KEEP_ALIVE_FAIL))
+
+/* AR_WOW_COUNT register values */
+#define AR_WOW_AIFS_CNT(x) (x & 0xff)
+#define AR_WOW_SLOT_CNT(x) ((x & 0xff) << 8)
+#define AR_WOW_KEEP_ALIVE_CNT(x) ((x & 0xff) << 16)
+
+/* AR_WOW_BCN_EN register */
+#define AR_WOW_BEACON_FAIL_EN 0x00000001
+
+/* AR_WOW_BCN_TIMO rgister */
+#define AR_WOW_BEACON_TIMO 0x40000000 /* valid if BCN_EN is set */
+
+/* AR_WOW_KEEP_ALIVE_TIMO register */
+#define AR_WOW_KEEP_ALIVE_TIMO_VALUE
+#define AR_WOW_KEEP_ALIVE_NEVER 0xffffffff
+
+/* AR_WOW_KEEP_ALIVE register */
+#define AR_WOW_KEEP_ALIVE_AUTO_DIS 0x00000001
+#define AR_WOW_KEEP_ALIVE_FAIL_DIS 0x00000002
+
+/* AR_WOW_KEEP_ALIVE_DELAY register */
+#define AR_WOW_KEEP_ALIVE_DELAY_VALUE 0x000003e8 /* 1 msec */
+
+
+/*
+ * keep it long for beacon workaround - ensure no false alarm
+ */
+#define AR_WOW_BMISSTHRESHOLD 0x20
+
+/* AR_WOW_PATTERN_MATCH register */
+#define AR_WOW_PAT_END_OF_PKT(x) (x & 0xf)
+#define AR_WOW_PAT_OFF_MATCH(x) ((x & 0xf) << 8)
+
+/*
+ * default values for Wow Configuration for backoff, aifs, slot, keep-alive
+ * to be programmed into various registers.
+ */
+#define AR_WOW_PAT_BACKOFF 0x00000004 /* AR_WOW_PATTERN_REG */
+#define AR_WOW_CNT_AIFS_CNT 0x00000022 /* AR_WOW_COUNT_REG */
+#define AR_WOW_CNT_SLOT_CNT 0x00000009 /* AR_WOW_COUNT_REG */
+/*
+ * Keepalive count applicable for AR9280 2.0 and above.
+ */
+#define AR_WOW_CNT_KA_CNT 0x00000008 /* AR_WOW_COUNT register */
+
+/* WoW - Transmit buffer for keep alive frames */
+#define AR_WOW_TRANSMIT_BUFFER 0xe000 /* E000 - EFFC */
+
+#define AR_WOW_TXBUF(i) (AR_WOW_TRANSMIT_BUFFER + ((i) << 2))
+
+#define AR_WOW_KA_DESC_WORD2 0xe000
+
+#define AR_WOW_KA_DATA_WORD0 0xe030
+
+/* WoW Transmit Buffer for patterns */
+#define AR_WOW_TB_PATTERN(i) (0xe100 + (i << 8))
+#define AR_WOW_TB_MASK(i) (0xec00 + (i << 5))
+
+/* Currently Pattern 0-7 are supported - so bit 0-7 are set */
+#define AR_WOW_PATTERN_SUPPORTED 0xff
+#define AR_WOW_LENGTH_MAX 0xff
+#define AR_WOW_LEN1_SHIFT(_i) ((0x3 - ((_i) & 0x3)) << 0x3)
+#define AR_WOW_LENGTH1_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN1_SHIFT(_i))
+#define AR_WOW_LEN2_SHIFT(_i) ((0x7 - ((_i) & 0x7)) << 0x3)
+#define AR_WOW_LENGTH2_MASK(_i) (AR_WOW_LENGTH_MAX << AR_WOW_LEN2_SHIFT(_i))
#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
@@ -2077,12 +2224,6 @@ enum {
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
- AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
- AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
- AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#define AR_MCI_CPU_INT 0x1840
@@ -2098,8 +2239,8 @@ enum {
#define AR_MCI_CONT_STATUS 0x1848
#define AR_MCI_CONT_RSSI_POWER 0x000000FF
#define AR_MCI_CONT_RSSI_POWER_S 0
-#define AR_MCI_CONT_RRIORITY 0x0000FF00
-#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_PRIORITY 0x0000FF00
+#define AR_MCI_CONT_PRIORITY_S 8
#define AR_MCI_CONT_TXRX 0x00010000
#define AR_MCI_CONT_TXRX_S 16
@@ -2162,10 +2303,6 @@ enum {
#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
-#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
-#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
-#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
-#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
#define AR_BTCOEX_WL_LNA 0x1940
#define AR_BTCOEX_RFGAIN_CTRL 0x1944
@@ -2211,5 +2348,7 @@ enum {
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+#define AR_GLB_SWREG_DISCONT_MODE 0x2002c
+#define AR_GLB_SWREG_DISCONT_EN_BT_WLAN 0x3
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wow.c b/drivers/net/wireless/ath/ath9k/wow.c
new file mode 100644
index 000000000000..44a08eb53c62
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wow.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "ath9k.h"
+#include "reg.h"
+#include "hw-ops.h"
+
+const char *ath9k_hw_wow_event_to_string(u32 wow_event)
+{
+ if (wow_event & AH_WOW_MAGIC_PATTERN_EN)
+ return "Magic pattern";
+ if (wow_event & AH_WOW_USER_PATTERN_EN)
+ return "User pattern";
+ if (wow_event & AH_WOW_LINK_CHANGE)
+ return "Link change";
+ if (wow_event & AH_WOW_BEACON_MISS)
+ return "Beacon miss";
+
+ return "unknown reason";
+}
+EXPORT_SYMBOL(ath9k_hw_wow_event_to_string);
+
+static void ath9k_hw_config_serdes_wow_sleep(struct ath_hw *ah)
+{
+ int i;
+
+ for (i = 0; i < ah->iniPcieSerdesWow.ia_rows; i++)
+ REG_WRITE(ah, INI_RA(&ah->iniPcieSerdesWow, i, 0),
+ INI_RA(&ah->iniPcieSerdesWow, i, 1));
+
+ usleep_range(1000, 1500);
+}
+
+static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
+
+ /* set rx disable bit */
+ REG_WRITE(ah, AR_CR, AR_CR_RXD);
+
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
+ REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
+ return;
+ } else {
+ if (!AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, AR_RXDP, 0x0);
+ }
+
+ /* AR9280 WoW has sleep issue, do not set it to sleep */
+ if (AR_SREV_9280_20(ah))
+ return;
+
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+}
+
+static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u8 sta_mac_addr[ETH_ALEN], ap_mac_addr[ETH_ALEN];
+ u32 ctl[13] = {0};
+ u32 data_word[KAL_NUM_DATA_WORDS];
+ u8 i;
+ u32 wow_ka_data_word0;
+
+ memcpy(sta_mac_addr, common->macaddr, ETH_ALEN);
+ memcpy(ap_mac_addr, common->curbssid, ETH_ALEN);
+
+ /* set the transmit buffer */
+ ctl[0] = (KAL_FRAME_LEN | (MAX_RATE_POWER << 16));
+
+ if (!(AR_SREV_9300_20_OR_LATER(ah)))
+ ctl[0] += (KAL_ANTENNA_MODE << 25);
+
+ ctl[1] = 0;
+ ctl[3] = 0xb; /* OFDM_6M hardware value for this rate */
+ ctl[4] = 0;
+ ctl[7] = (ah->txchainmask) << 2;
+
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ ctl[2] = 0xf << 16; /* tx_tries 0 */
+ else
+ ctl[2] = 0x7 << 16; /* tx_tries 0 */
+
+
+ for (i = 0; i < KAL_NUM_DESC_WORDS; i++)
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ /* for AR9300 family 13 descriptor words */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + i * 4), ctl[i]);
+
+ data_word[0] = (KAL_FRAME_TYPE << 2) | (KAL_FRAME_SUB_TYPE << 4) |
+ (KAL_TO_DS << 8) | (KAL_DURATION_ID << 16);
+ data_word[1] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[2] = (sta_mac_addr[1] << 24) | (sta_mac_addr[0] << 16) |
+ (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+ data_word[3] = (sta_mac_addr[5] << 24) | (sta_mac_addr[4] << 16) |
+ (sta_mac_addr[3] << 8) | (sta_mac_addr[2]);
+ data_word[4] = (ap_mac_addr[3] << 24) | (ap_mac_addr[2] << 16) |
+ (ap_mac_addr[1] << 8) | (ap_mac_addr[0]);
+ data_word[5] = (ap_mac_addr[5] << 8) | (ap_mac_addr[4]);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /* AR9462 2.0 has an extra descriptor word (time based
+ * discard) compared to other chips */
+ REG_WRITE(ah, (AR_WOW_KA_DESC_WORD2 + (12 * 4)), 0);
+ wow_ka_data_word0 = AR_WOW_TXBUF(13);
+ } else {
+ wow_ka_data_word0 = AR_WOW_TXBUF(12);
+ }
+
+ for (i = 0; i < KAL_NUM_DATA_WORDS; i++)
+ REG_WRITE(ah, (wow_ka_data_word0 + i*4), data_word[i]);
+
+}
+
+void ath9k_hw_wow_apply_pattern(struct ath_hw *ah, u8 *user_pattern,
+ u8 *user_mask, int pattern_count,
+ int pattern_len)
+{
+ int i;
+ u32 pattern_val, mask_val;
+ u32 set, clr;
+
+ /* FIXME: should check count by querying the hardware capability */
+ if (pattern_count >= MAX_NUM_PATTERN)
+ return;
+
+ REG_SET_BIT(ah, AR_WOW_PATTERN, BIT(pattern_count));
+
+ /* set the registers for pattern */
+ for (i = 0; i < MAX_PATTERN_SIZE; i += 4) {
+ memcpy(&pattern_val, user_pattern, 4);
+ REG_WRITE(ah, (AR_WOW_TB_PATTERN(pattern_count) + i),
+ pattern_val);
+ user_pattern += 4;
+ }
+
+ /* set the registers for mask */
+ for (i = 0; i < MAX_PATTERN_MASK_SIZE; i += 4) {
+ memcpy(&mask_val, user_mask, 4);
+ REG_WRITE(ah, (AR_WOW_TB_MASK(pattern_count) + i), mask_val);
+ user_mask += 4;
+ }
+
+ /* set the pattern length to be matched
+ *
+ * AR_WOW_LENGTH1_REG1
+ * bit 31:24 pattern 0 length
+ * bit 23:16 pattern 1 length
+ * bit 15:8 pattern 2 length
+ * bit 7:0 pattern 3 length
+ *
+ * AR_WOW_LENGTH1_REG2
+ * bit 31:24 pattern 4 length
+ * bit 23:16 pattern 5 length
+ * bit 15:8 pattern 6 length
+ * bit 7:0 pattern 7 length
+ *
+ * the below logic writes out the new
+ * pattern length for the corresponding
+ * pattern_count, while masking out the
+ * other fields
+ */
+
+ ah->wow_event_mask |= BIT(pattern_count + AR_WOW_PAT_FOUND_SHIFT);
+
+ if (!AR_SREV_9285_12_OR_LATER(ah))
+ return;
+
+ if (pattern_count < 4) {
+ /* Pattern 0-3 uses AR_WOW_LENGTH1 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN1_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH1_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH1, set, clr);
+ } else {
+ /* Pattern 4-7 uses AR_WOW_LENGTH2 register */
+ set = (pattern_len & AR_WOW_LENGTH_MAX) <<
+ AR_WOW_LEN2_SHIFT(pattern_count);
+ clr = AR_WOW_LENGTH2_MASK(pattern_count);
+ REG_RMW(ah, AR_WOW_LENGTH2, set, clr);
+ }
+
+}
+EXPORT_SYMBOL(ath9k_hw_wow_apply_pattern);
+
+u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
+{
+ u32 wow_status = 0;
+ u32 val = 0, rval;
+ /*
+ * read the WoW status register to know
+ * the wakeup reason
+ */
+ rval = REG_READ(ah, AR_WOW_PATTERN);
+ val = AR_WOW_STATUS(rval);
+
+ /*
+ * mask only the WoW events that we have enabled. Sometimes
+ * we have spurious WoW events from the AR_WOW_PATTERN
+ * register. This mask will clean it up.
+ */
+
+ val &= ah->wow_event_mask;
+
+ if (val) {
+
+ if (val & AR_WOW_MAGIC_PAT_FOUND)
+ wow_status |= AH_WOW_MAGIC_PATTERN_EN;
+
+ if (AR_WOW_PATTERN_FOUND(val))
+ wow_status |= AH_WOW_USER_PATTERN_EN;
+
+ if (val & AR_WOW_KEEP_ALIVE_FAIL)
+ wow_status |= AH_WOW_LINK_CHANGE;
+
+ if (val & AR_WOW_BEACON_FAIL)
+ wow_status |= AH_WOW_BEACON_MISS;
+
+ }
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip to
+ * generate next wow signal.
+ * disable D3 before accessing other registers ?
+ */
+
+ /* do we need to check the bit value 0x01000000 (7-10) ?? */
+ REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ AR_PMCTRL_PWR_STATE_D1D3);
+
+ /*
+ * clear all events
+ */
+ REG_WRITE(ah, AR_WOW_PATTERN,
+ AR_WOW_CLEAR_EVENTS(REG_READ(ah, AR_WOW_PATTERN)));
+
+ /*
+ * tie reset register for AR9002 family of chipsets
+ * NB: not tieing it back might have some repurcussions.
+ */
+
+ if (!AR_SREV_9300_20_OR_LATER(ah)) {
+ REG_SET_BIT(ah, AR_WA, AR_WA_UNTIE_RESET_EN |
+ AR_WA_POR_SHORT | AR_WA_RESET_EN);
+ }
+
+
+ /*
+ * restore the beacon threshold to init value
+ */
+ REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
+
+ /*
+ * Restore the way the PCI-E reset, Power-On-Reset, external
+ * PCIE_POR_SHORT pins are tied to its original value.
+ * Previously just before WoW sleep, we untie the PCI-E
+ * reset to our Chip's Power On Reset so that any PCI-E
+ * reset from the bus will not reset our chip
+ */
+
+ if (AR_SREV_9280_20_OR_LATER(ah) && ah->is_pciexpress)
+ ath9k_hw_configpcipowersave(ah, false);
+
+ ah->wow_event_mask = 0;
+
+ return wow_status;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_wakeup);
+
+void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
+{
+ u32 wow_event_mask;
+ u32 set, clr;
+
+ /*
+ * wow_event_mask is a mask to the AR_WOW_PATTERN register to
+ * indicate which WoW events we have enabled. The WoW events
+ * are from the 'pattern_enable' in this function and
+ * 'pattern_count' of ath9k_hw_wow_apply_pattern()
+ */
+
+ wow_event_mask = ah->wow_event_mask;
+
+ /*
+ * Untie Power-on-Reset from the PCI-E-Reset. When we are in
+ * WOW sleep, we do want the Reset from the PCI-E to disturb
+ * our hw state
+ */
+
+ if (ah->is_pciexpress) {
+
+ /*
+ * we need to untie the internal POR (power-on-reset)
+ * to the external PCI-E reset. We also need to tie
+ * the PCI-E Phy reset to the PCI-E reset.
+ */
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ set = AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ clr = AR_WA_UNTIE_RESET_EN | AR_WA_D3_L1_DISABLE;
+ REG_RMW(ah, AR_WA, set, clr);
+ } else {
+ if (AR_SREV_9285(ah) || AR_SREV_9287(ah))
+ set = AR9285_WA_DEFAULT;
+ else
+ set = AR9280_WA_DEFAULT;
+
+ /*
+ * In AR9280 and AR9285, bit 14 in WA register
+ * (disable L1) should only be set when device
+ * enters D3 state and be cleared when device
+ * comes back to D0
+ */
+
+ if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
+ set |= AR_WA_D3_L1_DISABLE;
+
+ clr = AR_WA_UNTIE_RESET_EN;
+ set |= AR_WA_RESET_EN | AR_WA_POR_SHORT;
+ REG_RMW(ah, AR_WA, set, clr);
+
+ /*
+ * for WoW sleep, we reprogram the SerDes so that the
+ * PLL and CLK REQ are both enabled. This uses more
+ * power but otherwise WoW sleep is unstable and the
+ * chip may disappear.
+ */
+
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ ath9k_hw_config_serdes_wow_sleep(ah);
+
+ }
+ }
+
+ /*
+ * set the power states appropriately and enable PME
+ */
+ set = AR_PMCTRL_HOST_PME_EN | AR_PMCTRL_PWR_PM_CTRL_ENA |
+ AR_PMCTRL_AUX_PWR_DET | AR_PMCTRL_WOW_PME_CLR;
+
+ /*
+ * set and clear WOW_PME_CLEAR registers for the chip
+ * to generate next wow signal.
+ */
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ clr = AR_PMCTRL_WOW_PME_CLR;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ /*
+ * Setup for:
+ * - beacon misses
+ * - magic pattern
+ * - keep alive timeout
+ * - pattern matching
+ */
+
+ /*
+ * Program default values for pattern backoff, aifs/slot/KAL count,
+ * beacon miss timeout, KAL timeout, etc.
+ */
+
+ set = AR_WOW_BACK_OFF_SHIFT(AR_WOW_PAT_BACKOFF);
+ REG_SET_BIT(ah, AR_WOW_PATTERN, set);
+
+ set = AR_WOW_AIFS_CNT(AR_WOW_CNT_AIFS_CNT) |
+ AR_WOW_SLOT_CNT(AR_WOW_CNT_SLOT_CNT) |
+ AR_WOW_KEEP_ALIVE_CNT(AR_WOW_CNT_KA_CNT);
+ REG_SET_BIT(ah, AR_WOW_COUNT, set);
+
+ if (pattern_enable & AH_WOW_BEACON_MISS)
+ set = AR_WOW_BEACON_TIMO;
+ /* We are not using beacon miss, program a large value */
+ else
+ set = AR_WOW_BEACON_TIMO_MAX;
+
+ REG_WRITE(ah, AR_WOW_BCN_TIMO, set);
+
+ /*
+ * Keep alive timo in ms except AR9280
+ */
+ if (!pattern_enable || AR_SREV_9280(ah))
+ set = AR_WOW_KEEP_ALIVE_NEVER;
+ else
+ set = KAL_TIMEOUT * 32;
+
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_TIMO, set);
+
+ /*
+ * Keep alive delay in us. based on 'power on clock',
+ * therefore in usec
+ */
+ set = KAL_DELAY * 1000;
+ REG_WRITE(ah, AR_WOW_KEEP_ALIVE_DELAY, set);
+
+ /*
+ * Create keep alive pattern to respond to beacons
+ */
+ ath9k_wow_create_keep_alive_pattern(ah);
+
+ /*
+ * Configure MAC WoW Registers
+ */
+
+ set = 0;
+ /* Send keep alive timeouts anyway */
+ clr = AR_WOW_KEEP_ALIVE_AUTO_DIS;
+
+ if (pattern_enable & AH_WOW_LINK_CHANGE)
+ wow_event_mask |= AR_WOW_KEEP_ALIVE_FAIL;
+ else
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ /*
+ * FIXME: For now disable keep alive frame
+ * failure. This seems to sometimes trigger
+ * unnecessary wake up with AR9485 chipsets.
+ */
+ set = AR_WOW_KEEP_ALIVE_FAIL_DIS;
+
+ REG_RMW(ah, AR_WOW_KEEP_ALIVE, set, clr);
+
+
+ /*
+ * we are relying on a bmiss failure. ensure we have
+ * enough threshold to prevent false positives
+ */
+ REG_RMW_FIELD(ah, AR_RSSI_THR, AR_RSSI_THR_BM_THR,
+ AR_WOW_BMISSTHRESHOLD);
+
+ set = 0;
+ clr = 0;
+
+ if (pattern_enable & AH_WOW_BEACON_MISS) {
+ set = AR_WOW_BEACON_FAIL_EN;
+ wow_event_mask |= AR_WOW_BEACON_FAIL;
+ } else {
+ clr = AR_WOW_BEACON_FAIL_EN;
+ }
+
+ REG_RMW(ah, AR_WOW_BCN_EN, set, clr);
+
+ set = 0;
+ clr = 0;
+ /*
+ * Enable the magic packet registers
+ */
+ if (pattern_enable & AH_WOW_MAGIC_PATTERN_EN) {
+ set = AR_WOW_MAGIC_EN;
+ wow_event_mask |= AR_WOW_MAGIC_PAT_FOUND;
+ } else {
+ clr = AR_WOW_MAGIC_EN;
+ }
+ set |= AR_WOW_MAC_INTR_EN;
+ REG_RMW(ah, AR_WOW_PATTERN, set, clr);
+
+ /*
+ * For AR9285 and later version of chipsets
+ * enable WoW pattern match for packets less
+ * than 256 bytes for all patterns
+ */
+ if (AR_SREV_9285_12_OR_LATER(ah))
+ REG_WRITE(ah, AR_WOW_PATTERN_MATCH_LT_256B,
+ AR_WOW_PATTERN_SUPPORTED);
+
+ /*
+ * Set the power states appropriately and enable PME
+ */
+ clr = 0;
+ set = AR_PMCTRL_PWR_STATE_D1D3 | AR_PMCTRL_HOST_PME_EN |
+ AR_PMCTRL_PWR_PM_CTRL_ENA;
+ /*
+ * This is needed for AR9300 chipsets to wake-up
+ * the host.
+ */
+ if (AR_SREV_9300_20_OR_LATER(ah))
+ clr = AR_PCIE_PM_CTRL_ENA;
+
+ REG_RMW(ah, AR_PCIE_PM_CTRL, set, clr);
+
+ if (AR_SREV_9462(ah)) {
+ /*
+ * this is needed to prevent the chip waking up
+ * the host within 3-4 seconds with certain
+ * platform/BIOS. The fix is to enable
+ * D1 & D3 to match original definition and
+ * also match the OTP value. Anyway this
+ * is more related to SW WOW.
+ */
+ clr = AR_PMCTRL_PWR_STATE_D1D3;
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, clr);
+
+ set = AR_PMCTRL_PWR_STATE_D1D3_REAL;
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL, set);
+ }
+
+
+
+ REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
+
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ /* to bring down WOW power low margin */
+ set = BIT(13);
+ REG_SET_BIT(ah, AR_PCIE_PHY_REG3, set);
+ /* HW WoW */
+ clr = BIT(5);
+ REG_CLR_BIT(ah, AR_PCU_MISC_MODE3, clr);
+ }
+
+ ath9k_hw_set_powermode_wow_sleep(ah);
+ ah->wow_event_mask = wow_event_mask;
+}
+EXPORT_SYMBOL(ath9k_hw_wow_enable);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4d571394c7a8..2c9da6b2ecb1 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -29,6 +29,8 @@
#define HT_LTF(_ns) (4 * (_ns))
#define SYMBOL_TIME(_ns) ((_ns) << 2) /* ns * 4 us */
#define SYMBOL_TIME_HALFGI(_ns) (((_ns) * 18 + 4) / 5) /* ns * 3.6 us */
+#define TIME_SYMBOLS(t) ((t) >> 2)
+#define TIME_SYMBOLS_HALFGI(t) (((t) * 5 - 4) / 18)
#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
@@ -74,50 +76,23 @@ enum {
MCS_HT40_SGI,
};
-static int ath_max_4ms_framelen[4][32] = {
- [MCS_HT20] = {
- 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
- 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
- 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
- 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
- },
- [MCS_HT20_SGI] = {
- 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
- 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
- 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
- 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
- },
- [MCS_HT40] = {
- 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
- 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
- 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
- 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
- },
- [MCS_HT40_SGI] = {
- 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
- 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
- 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
- 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
- }
-};
-
/*********************/
/* Aggregation logic */
/*********************/
-static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
__acquires(&txq->axq_lock)
{
spin_lock_bh(&txq->axq_lock);
}
-static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
spin_unlock_bh(&txq->axq_lock);
}
-static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
__releases(&txq->axq_lock)
{
struct sk_buff_head q;
@@ -614,10 +589,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
rcu_read_unlock();
- if (needreset) {
- RESET_STAT_INC(sc, RESET_TYPE_TX_ERROR);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- }
+ if (needreset)
+ ath9k_queue_reset(sc, RESET_TYPE_TX_ERROR);
}
static bool ath_lookup_legacy(struct ath_buf *bf)
@@ -650,6 +623,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ieee80211_tx_rate *rates;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, bt_aggr_limit, legacy = 0;
+ int q = tid->ac->txq->mac80211_qnum;
int i;
skb = bf->bf_mpdu;
@@ -658,8 +632,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
/*
* Find the lowest frame length among the rate series that will have a
- * 4ms transmit duration.
- * TODO - TXOP limit needs to be considered.
+ * 4ms (or TXOP limited) transmit duration.
*/
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
@@ -682,7 +655,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
modeidx++;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx];
max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
@@ -929,6 +902,44 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
return duration;
}
+static int ath_max_framelen(int usec, int mcs, bool ht40, bool sgi)
+{
+ int streams = HT_RC_2_STREAMS(mcs);
+ int symbols, bits;
+ int bytes = 0;
+
+ symbols = sgi ? TIME_SYMBOLS_HALFGI(usec) : TIME_SYMBOLS(usec);
+ bits = symbols * bits_per_symbol[mcs % 8][ht40] * streams;
+ bits -= OFDM_PLCP_BITS;
+ bytes = bits / 8;
+ bytes -= L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
+ if (bytes > 65532)
+ bytes = 65532;
+
+ return bytes;
+}
+
+void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
+{
+ u16 *cur_ht20, *cur_ht20_sgi, *cur_ht40, *cur_ht40_sgi;
+ int mcs;
+
+ /* 4ms is the default (and maximum) duration */
+ if (!txop || txop > 4096)
+ txop = 4096;
+
+ cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20];
+ cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI];
+ cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40];
+ cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI];
+ for (mcs = 0; mcs < 32; mcs++) {
+ cur_ht20[mcs] = ath_max_framelen(txop, mcs, false, false);
+ cur_ht20_sgi[mcs] = ath_max_framelen(txop, mcs, false, true);
+ cur_ht40[mcs] = ath_max_framelen(txop, mcs, true, false);
+ cur_ht40_sgi[mcs] = ath_max_framelen(txop, mcs, true, true);
+ }
+}
+
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_info *info, int len)
{
@@ -1165,6 +1176,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
{
struct ath_atx_tid *txtid;
struct ath_node *an;
+ u8 density;
an = (struct ath_node *)sta->drv_priv;
txtid = ATH_AN_2_TID(an, tid);
@@ -1172,6 +1184,17 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
return -EAGAIN;
+ /* update ampdu factor/density, they may have changed. This may happen
+ * in HT IBSS when a beacon with HT-info is received after the station
+ * has already been added.
+ */
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ sta->ht_cap.ampdu_factor);
+ density = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
+ an->mpdudensity = density;
+ }
+
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
@@ -1391,16 +1414,6 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
int error = 0;
struct ath9k_tx_queue_info qi;
- if (qnum == sc->beacon.beaconq) {
- /*
- * XXX: for beacon queue, we just save the parameter.
- * It will be picked up by ath_beaconq_config when
- * it's necessary.
- */
- sc->beacon.beacon_qi = *qinfo;
- return 0;
- }
-
BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
ath9k_hw_get_txq_props(ah, qnum, &qi);
@@ -1526,7 +1539,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
int i;
u32 npend = 0;
- if (sc->sc_flags & SC_OP_INVALID)
+ if (test_bit(SC_OP_INVALID, &sc->sc_flags))
return true;
ath9k_hw_abort_tx_dma(ah);
@@ -1574,7 +1587,8 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
struct ath_atx_ac *ac, *ac_tmp, *last_ac;
struct ath_atx_tid *tid, *last_tid;
- if (work_pending(&sc->hw_reset_work) || list_empty(&txq->axq_acq) ||
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
+ list_empty(&txq->axq_acq) ||
txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
return;
@@ -1976,7 +1990,8 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
- ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
+ ++txq->pending_frames > sc->tx.txq_max_pending[q] &&
+ !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
txq->stopped = true;
}
@@ -1999,6 +2014,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
+ unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2017,6 +2033,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
+ spin_lock_irqsave(&sc->sc_pm_lock, flags);
if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_dbg(common, PS,
@@ -2026,13 +2043,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK));
}
+ spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
- if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
ieee80211_wake_queue(sc->hw, q);
txq->stopped = false;
}
@@ -2176,7 +2195,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_lock(sc, txq);
for (;;) {
- if (work_pending(&sc->hw_reset_work))
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
break;
if (list_empty(&txq->axq_q)) {
@@ -2236,46 +2255,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_txq_unlock_complete(sc, txq);
}
-static void ath_tx_complete_poll_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- tx_complete_work.work);
- struct ath_txq *txq;
- int i;
- bool needreset = false;
-#ifdef CONFIG_ATH9K_DEBUGFS
- sc->tx_complete_poll_work_seen++;
-#endif
-
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i)) {
- txq = &sc->tx.txq[i];
- ath_txq_lock(sc, txq);
- if (txq->axq_depth) {
- if (txq->axq_tx_inprogress) {
- needreset = true;
- ath_txq_unlock(sc, txq);
- break;
- } else {
- txq->axq_tx_inprogress = true;
- }
- }
- ath_txq_unlock_complete(sc, txq);
- }
-
- if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
- "tx hung, resetting the chip\n");
- RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
- ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
- }
-
- ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
- msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
-}
-
-
-
void ath_tx_tasklet(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
@@ -2299,7 +2278,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
int status;
for (;;) {
- if (work_pending(&sc->hw_reset_work))
+ if (test_bit(SC_OP_HW_RESET, &sc->sc_flags))
break;
status = ath9k_hw_txprocdesc(ah, NULL, (void *)&ts);
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 0cea20e3e250..376be11161c0 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -289,6 +289,7 @@ struct ar9170 {
unsigned int mem_block_size;
unsigned int rx_size;
unsigned int tx_seq_table;
+ bool ba_filter;
} fw;
/* interface configuration combinations */
@@ -425,6 +426,10 @@ struct ar9170 {
struct sk_buff *rx_failover;
int rx_failover_missing;
+ /* FIFO for collecting outstanding BlockAckRequest */
+ struct list_head bar_list[__AR9170_NUM_TXQ];
+ spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
+
#ifdef CONFIG_CARL9170_WPC
struct {
bool pbc_state;
@@ -468,6 +473,12 @@ enum carl9170_ps_off_override_reasons {
PS_OFF_BCN = BIT(1),
};
+struct carl9170_bar_list_entry {
+ struct list_head list;
+ struct rcu_head head;
+ struct sk_buff *skb;
+};
+
struct carl9170_ba_stats {
u8 ampdu_len;
u8 ampdu_ack_len;
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index 195dc6538110..39a63874b275 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -138,7 +138,7 @@ int carl9170_reboot(struct ar9170 *ar)
if (!cmd)
return -ENOMEM;
- err = __carl9170_exec_cmd(ar, (struct carl9170_cmd *)cmd, true);
+ err = __carl9170_exec_cmd(ar, cmd, true);
return err;
}
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index 5c73c03872f3..c5ca6f1f5836 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -307,6 +307,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
+ if (SUPP(CARL9170FW_RX_BA_FILTER))
+ ar->fw.ba_filter = true;
+
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);
diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h
index 6d9c0891ce7f..66848d47c88e 100644
--- a/drivers/net/wireless/ath/carl9170/fwdesc.h
+++ b/drivers/net/wireless/ath/carl9170/fwdesc.h
@@ -78,6 +78,9 @@ enum carl9170fw_feature_list {
/* HW (ANI, CCA, MIB) tally counters */
CARL9170FW_HW_COUNTERS,
+ /* Firmware will pass BA when BARs are queued */
+ CARL9170FW_RX_BA_FILTER,
+
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 8d2523b3f722..858e58dfc4dc 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -949,6 +949,9 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
u32 rx_filter = 0;
+ if (!ar->fw.ba_filter)
+ rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
+
if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
rx_filter |= CARL9170_RX_FILTER_BAD;
@@ -1753,6 +1756,9 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ar->hw->queues; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
+
+ INIT_LIST_HEAD(&ar->bar_list[i]);
+ spin_lock_init(&ar->bar_list_lock[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
INIT_WORK(&ar->ping_work, carl9170_ping_work);
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 84b22eec7abd..6f6a34155667 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -161,7 +161,7 @@ static void carl9170_cmd_callback(struct ar9170 *ar, u32 len, void *buffer)
void carl9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
- struct carl9170_rsp *cmd = (void *) buf;
+ struct carl9170_rsp *cmd = buf;
struct ieee80211_vif *vif;
if (carl9170_check_sequence(ar, cmd->hdr.seq))
@@ -520,7 +520,7 @@ static u8 *carl9170_find_ie(u8 *data, unsigned int len, u8 ie)
*/
static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
{
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
@@ -576,6 +576,53 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
}
}
+static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
+{
+ struct ieee80211_bar *bar = (void *) data;
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue;
+
+ if (likely(!ieee80211_is_back(bar->frame_control)))
+ return;
+
+ if (len <= sizeof(*bar) + FCS_LEN)
+ return;
+
+ queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ struct sk_buff *entry_skb = entry->skb;
+ struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
+ struct ieee80211_bar *entry_bar = (void *)super->frame_data;
+
+#define TID_CHECK(a, b) ( \
+ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
+ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
+
+ if (bar->start_seq_num == entry_bar->start_seq_num &&
+ TID_CHECK(bar->control, entry_bar->control) &&
+ compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
+ compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
+ struct ieee80211_tx_info *tx_info;
+
+ tx_info = IEEE80211_SKB_CB(entry_skb);
+ tx_info->flags |= IEEE80211_TX_STAT_ACK;
+
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+#undef TID_CHECK
+}
+
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
{
__le16 fc;
@@ -738,6 +785,8 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
carl9170_ps_beacon(ar, buf, mpdu_len);
+ carl9170_ba_check(ar, buf, mpdu_len);
+
skb = carl9170_rx_copy_data(buf, mpdu_len);
if (!skb)
goto drop;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index aed305177af6..6a8681407a1d 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -277,11 +277,11 @@ static void carl9170_tx_release(struct kref *ref)
return;
BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
+ offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
- memset(&txinfo->status.ampdu_ack_len, 0,
+ memset(&txinfo->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
@@ -436,6 +436,45 @@ out_rcu:
rcu_read_unlock();
}
+static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
+ struct ieee80211_tx_info *tx_info)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ /*
+ * Unlike all other frames, the status report for BARs does
+ * not directly come from the hardware as it is incapable of
+ * matching a BA to a previously send BAR.
+ * Instead the RX-path will scan for incoming BAs and set the
+ * IEEE80211_TX_STAT_ACK if it sees one that was likely
+ * caused by a BAR from us.
+ */
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
+ struct carl9170_bar_list_entry *entry;
+ int queue = skb_get_queue_mapping(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
+ if (entry->skb == skb) {
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_del_rcu(&entry->list);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ kfree_rcu(entry, head);
+ goto out;
+ }
+ }
+
+ WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
+ queue, bar->ra, bar->ta, bar->control,
+ bar->start_seq_num);
+out:
+ rcu_read_unlock();
+ }
+}
+
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
@@ -445,6 +484,8 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
txinfo = IEEE80211_SKB_CB(skb);
+ carl9170_tx_bar_status(ar, skb, txinfo);
+
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
@@ -1265,6 +1306,26 @@ out_rcu:
return false;
}
+static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
+{
+ struct _carl9170_tx_superframe *super = (void *) skb->data;
+ struct ieee80211_bar *bar = (void *) super->frame_data;
+
+ if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
+ skb->len >= sizeof(struct ieee80211_bar)) {
+ struct carl9170_bar_list_entry *entry;
+ unsigned int queue = skb_get_queue_mapping(skb);
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!WARN_ON_ONCE(!entry)) {
+ entry->skb = skb;
+ spin_lock_bh(&ar->bar_list_lock[queue]);
+ list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
+ spin_unlock_bh(&ar->bar_list_lock[queue]);
+ }
+ }
+}
+
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@@ -1287,6 +1348,8 @@ static void carl9170_tx(struct ar9170 *ar)
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
+ carl9170_bar_check(ar, skb);
+
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index e651db856344..2ec3e9191e4d 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
-#define CARL9170FW_VERSION_YEAR 11
-#define CARL9170FW_VERSION_MONTH 8
-#define CARL9170FW_VERSION_DAY 15
-#define CARL9170FW_VERSION_GIT "1.9.4"
+#define CARL9170FW_VERSION_YEAR 12
+#define CARL9170FW_VERSION_MONTH 7
+#define CARL9170FW_VERSION_DAY 7
+#define CARL9170FW_VERSION_GIT "1.9.6"
#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 0e81904956cf..5c54aa43ca2d 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -556,6 +556,9 @@ int ath_key_config(struct ath_common *common,
return -EIO;
set_bit(idx, common->keymap);
+ if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
+ set_bit(idx, common->ccmp_keymap);
+
if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
set_bit(idx + 64, common->keymap);
set_bit(idx, common->tkip_keymap);
@@ -582,6 +585,7 @@ void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
return;
clear_bit(key->hw_key_idx, common->keymap);
+ clear_bit(key->hw_key_idx, common->ccmp_keymap);
if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index d07c0301da6a..4a4e98f71807 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -2952,10 +2952,10 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
/* current AP address - only in reassoc frame */
if (is_reassoc) {
memcpy(body.ap, priv->CurrentBSSID, 6);
- ssid_el_p = (u8 *)&body.ssid_el_id;
+ ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
- ssid_el_p = (u8 *)&body.ap[0];
+ ssid_el_p = &body.ap[0];
bodysize = 12 + priv->SSID_size;
}
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c06b6cb5c91e..7c899fc7ddd0 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -870,13 +870,6 @@ struct b43_wl {
* handler, only. This basically is just the IRQ mask register. */
spinlock_t hardirq_lock;
- /* The number of queues that were registered with the mac80211 subsystem
- * initially. This is a backup copy of hw->queues in case hw->queues has
- * to be dynamically lowered at runtime (Firmware does not support QoS).
- * hw->queues has to be restored to the original value before unregistering
- * from the mac80211 subsystem. */
- u16 mac80211_initially_registered_queues;
-
/* Set this if we call ieee80211_register_hw() and check if we call
* ieee80211_unregister_hw(). */
bool hw_registred;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 1b988f26bdf1..b80352b308d5 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2359,6 +2359,8 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
if (err)
goto err_load;
+ fw->opensource = (ctx->req_type == B43_FWTYPE_OPENSOURCE);
+
return 0;
err_no_ucode:
@@ -2434,6 +2436,10 @@ static void b43_request_firmware(struct work_struct *work)
goto out;
start_ieee80211:
+ wl->hw->queues = B43_QOS_QUEUE_NUM;
+ if (!modparam_qos || dev->fw.opensource)
+ wl->hw->queues = 1;
+
err = ieee80211_register_hw(wl->hw);
if (err)
goto err_one_core_detach;
@@ -2537,11 +2543,9 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->fw.hdr_format = B43_FW_HDR_410;
else
dev->fw.hdr_format = B43_FW_HDR_351;
- dev->fw.opensource = (fwdate == 0xFFFF);
+ WARN_ON(dev->fw.opensource != (fwdate == 0xFFFF));
- /* Default to use-all-queues. */
- dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
- dev->qos_enabled = !!modparam_qos;
+ dev->qos_enabled = dev->wl->hw->queues > 1;
/* Default to firmware/hardware crypto acceleration. */
dev->hwcrypto_enabled = true;
@@ -2559,14 +2563,8 @@ static int b43_upload_microcode(struct b43_wldev *dev)
/* Disable hardware crypto and fall back to software crypto. */
dev->hwcrypto_enabled = false;
}
- if (!(fwcapa & B43_FWCAPA_QOS)) {
- b43info(dev->wl, "QoS not supported by firmware\n");
- /* Disable QoS. Tweak hw->queues to 1. It will be restored before
- * ieee80211_unregister to make sure the networking core can
- * properly free possible resources. */
- dev->wl->hw->queues = 1;
- dev->qos_enabled = false;
- }
+ /* adding QoS support should use an offline discovery mechanism */
+ WARN(fwcapa & B43_FWCAPA_QOS, "QoS in OpenFW not supported\n");
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
"(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
@@ -5298,8 +5296,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
- hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
- wl->mac80211_initially_registered_queues = hw->queues;
wl->hw_registred = false;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
@@ -5374,10 +5370,6 @@ static void b43_bcma_remove(struct bcma_device *core)
B43_WARN_ON(!wl);
if (wl->current_dev == wldev && wl->hw_registred) {
- /* Restore the queues count before unregistering, because firmware detect
- * might have modified it. Restoring is important, so the networking
- * stack can properly free resources. */
- wl->hw->queues = wl->mac80211_initially_registered_queues;
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
@@ -5452,10 +5444,6 @@ static void b43_ssb_remove(struct ssb_device *sdev)
B43_WARN_ON(!wl);
if (wl->current_dev == wldev && wl->hw_registred) {
- /* Restore the queues count before unregistering, because firmware detect
- * might have modified it. Restoring is important, so the networking
- * stack can properly free resources. */
- wl->hw->queues = wl->mac80211_initially_registered_queues;
b43_leds_stop(wldev);
ieee80211_unregister_hw(wl->hw);
}
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 108118820b36..b92bb9c92ad1 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -1369,7 +1369,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
i << 2);
b43_nphy_poll_rssi(dev, 2, results[i], 8);
}
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < 4; i += 2) {
s32 curr;
s32 mind = 40;
s32 minpoll = 249;
@@ -1415,14 +1415,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i);
b43_nphy_poll_rssi(dev, i, poll_results, 8);
for (j = 0; j < 4; j++) {
- if (j / 2 == core)
+ if (j / 2 == core) {
offset[j] = 232 - poll_results[j];
- if (offset[j] < 0)
- offset[j] = -(abs(offset[j] + 4) / 8);
- else
- offset[j] = (offset[j] + 4) / 8;
- b43_nphy_scale_offset_rssi(dev, 0,
- offset[2 * core], core + 1, j % 2, i);
+ if (offset[j] < 0)
+ offset[j] = -(abs(offset[j] + 4) / 8);
+ else
+ offset[j] = (offset[j] + 4) / 8;
+ b43_nphy_scale_offset_rssi(dev, 0,
+ offset[2 * core], core + 1, j % 2, i);
+ }
}
}
}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index b31ccc02fa21..136510edf3cf 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -663,7 +663,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
u32 uninitialized_var(macstat);
u16 chanid;
u16 phytype;
- int padding;
+ int padding, rate_idx;
memset(&status, 0, sizeof(status));
@@ -766,16 +766,17 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
}
if (phystat0 & B43_RX_PHYST0_OFDM)
- status.rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
+ rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
phytype == B43_PHYTYPE_A);
else
- status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
- if (unlikely(status.rate_idx == -1)) {
+ rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
+ if (unlikely(rate_idx == -1)) {
/* PLCP seems to be corrupted.
* Drop the frame, if we are not interested in corrupted frames. */
if (!(dev->wl->filter_flags & FIF_PLCPFAIL))
goto drop;
}
+ status.rate_idx = rate_idx;
status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT);
/*
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index f1f8bd09bd87..2d3c6644f82d 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -52,7 +52,7 @@ struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
desc = ring->descbase;
desc = &(desc[slot]);
- return (struct b43legacy_dmadesc32 *)desc;
+ return desc;
}
static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
@@ -1072,7 +1072,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
+ bounce_skb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!bounce_skb) {
ring->current_slot = old_top_slot;
ring->used_slots = old_used_slots;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index eae691e2f7dd..8156135a0590 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1508,7 +1508,7 @@ static void b43legacy_release_firmware(struct b43legacy_wldev *dev)
static void b43legacy_print_fw_helptext(struct b43legacy_wl *wl)
{
- b43legacyerr(wl, "You must go to http://linuxwireless.org/en/users/"
+ b43legacyerr(wl, "You must go to http://wireless.kernel.org/en/users/"
"Drivers/b43#devicefirmware "
"and download the correct firmware (version 3).\n");
}
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index a8012f2749ee..b8ffea6f5c64 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -269,8 +269,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->plcp), plcp_fragment_len,
rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->plcp_fb), plcp_fragment_len,
+ b43legacy_generate_plcp_hdr(&txhdr->plcp_fb, plcp_fragment_len,
rate_fb->hw_value);
/* PHY TX Control word */
@@ -340,8 +339,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
(&txhdr->rts_plcp),
len, rts_rate);
- b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
- (&txhdr->rts_plcp_fb),
+ b43legacy_generate_plcp_hdr(&txhdr->rts_plcp_fb,
len, rts_rate_fb);
hdr = (struct ieee80211_hdr *)(&txhdr->rts_frame);
txhdr->rts_dur_fb = hdr->duration_id;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index abb48032753b..9d5170b6df50 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -34,3 +34,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
sdio_chip.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
+brcmfmac-$(CONFIG_BRCMDBG) += \
+ dhd_dbg.o \ No newline at end of file
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 82f51dbd0d66..49765d34b4e0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -44,6 +44,7 @@
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
+#define SDIO_DEVICE_ID_BROADCOM_4334 0x4334
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -52,6 +53,7 @@
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4334)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 9f637014486e..a11fe54f5950 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -613,6 +613,9 @@ struct brcmf_pub {
struct work_struct multicast_work;
u8 macvalue[ETH_ALEN];
atomic_t pend_8021x_cnt;
+#ifdef DEBUG
+ struct dentry *dbgfs_dir;
+#endif
};
struct brcmf_if_event {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 366916494be4..537f499cc5d2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,6 +36,13 @@ struct dngl_stats {
unsigned long multicast; /* multicast packets received */
};
+struct brcmf_bus_dcmd {
+ char *name;
+ char *param;
+ int param_len;
+ struct list_head list;
+};
+
/* interface structure between common and bus layer */
struct brcmf_bus {
u8 type; /* bus type */
@@ -50,6 +57,7 @@ struct brcmf_bus {
unsigned long tx_realloc; /* Tx packets realloced for headroom */
struct dngl_stats dstats; /* Stats for dongle-based data */
u8 align; /* bus alignment requirement */
+ struct list_head dcmd_list;
/* interface functions pointers */
/* Stop bus module: clear pending frames, disable data flow */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 236cb9fa460c..2621dd3d7dcd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -800,13 +800,13 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
"event_msgs" + '\0' + bitvec */
char buf[128], *ptr;
- u32 dongle_align = drvr->bus_if->align;
- u32 glom = 0;
u32 roaming = 1;
uint bcn_timeout = 3;
int scan_assoc_time = 40;
int scan_unassoc_time = 40;
int i;
+ struct brcmf_bus_dcmd *cmdlst;
+ struct list_head *cur, *q;
mutex_lock(&drvr->proto_block);
@@ -827,17 +827,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
/* Print fw version info */
brcmf_dbg(ERROR, "Firmware version = %s\n", buf);
- /* Match Host and Dongle rx alignment */
- brcmf_c_mkiovar("bus:txglomalign", (char *)&dongle_align, 4, iovbuf,
- sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
- /* disable glom option per default */
- brcmf_c_mkiovar("bus:txglom", (char *)&glom, 4, iovbuf, sizeof(iovbuf));
- brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
- sizeof(iovbuf));
-
/* Setup timeout if Beacons are lost and roam is off to report
link down */
brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf,
@@ -874,6 +863,20 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
0, true);
}
+ /* set bus specific command if there is any */
+ list_for_each_safe(cur, q, &drvr->bus_if->dcmd_list) {
+ cmdlst = list_entry(cur, struct brcmf_bus_dcmd, list);
+ if (cmdlst->name && cmdlst->param && cmdlst->param_len) {
+ brcmf_c_mkiovar(cmdlst->name, cmdlst->param,
+ cmdlst->param_len, iovbuf,
+ sizeof(iovbuf));
+ brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR,
+ iovbuf, sizeof(iovbuf));
+ }
+ list_del(cur);
+ kfree(cmdlst);
+ }
+
mutex_unlock(&drvr->proto_block);
return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
new file mode 100644
index 000000000000..7f89540b56da
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2012 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/debugfs.h>
+#include <linux/if_ether.h>
+#include <linux/if.h>
+#include <linux/ieee80211.h>
+#include <linux/module.h>
+
+#include <defs.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include "dhd.h"
+#include "dhd_bus.h"
+#include "dhd_dbg.h"
+
+static struct dentry *root_folder;
+
+void brcmf_debugfs_init(void)
+{
+ root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (IS_ERR(root_folder))
+ root_folder = NULL;
+}
+
+void brcmf_debugfs_exit(void)
+{
+ if (!root_folder)
+ return;
+
+ debugfs_remove_recursive(root_folder);
+ root_folder = NULL;
+}
+
+int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+{
+ if (!root_folder)
+ return -ENODEV;
+
+ drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder);
+ return PTR_RET(drvr->dbgfs_dir);
+}
+
+void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+{
+ if (!IS_ERR_OR_NULL(drvr->dbgfs_dir))
+ debugfs_remove_recursive(drvr->dbgfs_dir);
+}
+
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr)
+{
+ return drvr->dbgfs_dir;
+}
+
+static
+ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_sdio_count *sdcnt = f->private_data;
+ char buf[750];
+ int res;
+
+ /* only allow read from start */
+ if (*ppos > 0)
+ return 0;
+
+ res = scnprintf(buf, sizeof(buf),
+ "intrcount: %u\nlastintrs: %u\n"
+ "pollcnt: %u\nregfails: %u\n"
+ "tx_sderrs: %u\nfcqueued: %u\n"
+ "rxrtx: %u\nrx_toolong: %u\n"
+ "rxc_errors: %u\nrx_hdrfail: %u\n"
+ "rx_badhdr: %u\nrx_badseq: %u\n"
+ "fc_rcvd: %u\nfc_xoff: %u\n"
+ "fc_xon: %u\nrxglomfail: %u\n"
+ "rxglomframes: %u\nrxglompkts: %u\n"
+ "f2rxhdrs: %u\nf2rxdata: %u\n"
+ "f2txdata: %u\nf1regdata: %u\n"
+ "tickcnt: %u\ntx_ctlerrs: %lu\n"
+ "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n"
+ "rx_ctlpkts: %lu\nrx_readahead: %lu\n",
+ sdcnt->intrcount, sdcnt->lastintrs,
+ sdcnt->pollcnt, sdcnt->regfails,
+ sdcnt->tx_sderrs, sdcnt->fcqueued,
+ sdcnt->rxrtx, sdcnt->rx_toolong,
+ sdcnt->rxc_errors, sdcnt->rx_hdrfail,
+ sdcnt->rx_badhdr, sdcnt->rx_badseq,
+ sdcnt->fc_rcvd, sdcnt->fc_xoff,
+ sdcnt->fc_xon, sdcnt->rxglomfail,
+ sdcnt->rxglomframes, sdcnt->rxglompkts,
+ sdcnt->f2rxhdrs, sdcnt->f2rxdata,
+ sdcnt->f2txdata, sdcnt->f1regdata,
+ sdcnt->tickcnt, sdcnt->tx_ctlerrs,
+ sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs,
+ sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt);
+
+ return simple_read_from_buffer(data, count, ppos, buf, res);
+}
+
+static const struct file_operations brcmf_debugfs_sdio_counter_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_debugfs_sdio_counter_read
+};
+
+void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
+ struct brcmf_sdio_count *sdcnt)
+{
+ struct dentry *dentry = drvr->dbgfs_dir;
+
+ if (!IS_ERR_OR_NULL(dentry))
+ debugfs_create_file("counters", S_IRUGO, dentry,
+ sdcnt, &brcmf_debugfs_sdio_counter_ops);
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index a2c4576cf9ff..b784920532d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -76,4 +76,63 @@ do { \
extern int brcmf_msg_level;
+/*
+ * hold counter variables used in brcmfmac sdio driver.
+ */
+struct brcmf_sdio_count {
+ uint intrcount; /* Count of device interrupt callbacks */
+ uint lastintrs; /* Count as of last watchdog timer */
+ uint pollcnt; /* Count of active polls */
+ uint regfails; /* Count of R_REG failures */
+ uint tx_sderrs; /* Count of tx attempts with sd errors */
+ uint fcqueued; /* Tx packets that got queued */
+ uint rxrtx; /* Count of rtx requests (NAK to dongle) */
+ uint rx_toolong; /* Receive frames too long to receive */
+ uint rxc_errors; /* SDIO errors when reading control frames */
+ uint rx_hdrfail; /* SDIO errors on header reads */
+ uint rx_badhdr; /* Bad received headers (roosync?) */
+ uint rx_badseq; /* Mismatched rx sequence number */
+ uint fc_rcvd; /* Number of flow-control events received */
+ uint fc_xoff; /* Number which turned on flow-control */
+ uint fc_xon; /* Number which turned off flow-control */
+ uint rxglomfail; /* Failed deglom attempts */
+ uint rxglomframes; /* Number of glom frames (superframes) */
+ uint rxglompkts; /* Number of packets from glom frames */
+ uint f2rxhdrs; /* Number of header reads */
+ uint f2rxdata; /* Number of frame data reads */
+ uint f2txdata; /* Number of f2 frame writes */
+ uint f1regdata; /* Number of f1 register accesses */
+ uint tickcnt; /* Number of watchdog been schedule */
+ ulong tx_ctlerrs; /* Err of sending ctrl frames */
+ ulong tx_ctlpkts; /* Ctrl frames sent to dongle */
+ ulong rx_ctlerrs; /* Err of processing rx ctrl frames */
+ ulong rx_ctlpkts; /* Ctrl frames processed from dongle */
+ ulong rx_readahead_cnt; /* packets where header read-ahead was used */
+};
+
+struct brcmf_pub;
+#ifdef DEBUG
+void brcmf_debugfs_init(void);
+void brcmf_debugfs_exit(void);
+int brcmf_debugfs_attach(struct brcmf_pub *drvr);
+void brcmf_debugfs_detach(struct brcmf_pub *drvr);
+struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
+void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
+ struct brcmf_sdio_count *sdcnt);
+#else
+static inline void brcmf_debugfs_init(void)
+{
+}
+static inline void brcmf_debugfs_exit(void)
+{
+}
+static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
+{
+ return 0;
+}
+static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
+{
+}
+#endif
+
#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8933f9b31a9a..57bf1d7ee80f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -1007,6 +1007,9 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
drvr->bus_if->drvr = drvr;
drvr->dev = dev;
+ /* create device debugfs folder */
+ brcmf_debugfs_attach(drvr);
+
/* Attach and link in the protocol */
ret = brcmf_proto_attach(drvr);
if (ret != 0) {
@@ -1017,6 +1020,8 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev)
INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
+ INIT_LIST_HEAD(&drvr->bus_if->dcmd_list);
+
return ret;
fail:
@@ -1123,6 +1128,7 @@ void brcmf_detach(struct device *dev)
brcmf_proto_detach(drvr);
}
+ brcmf_debugfs_detach(drvr);
bus_if->drvr = NULL;
kfree(drvr);
}
@@ -1192,6 +1198,8 @@ exit:
static void brcmf_driver_init(struct work_struct *work)
{
+ brcmf_debugfs_init();
+
#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_init();
#endif
@@ -1219,6 +1227,7 @@ static void __exit brcmfmac_module_exit(void)
#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
#endif
+ brcmf_debugfs_exit();
}
module_init(brcmfmac_module_init);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 1dbf2be478c8..472f2ef5c652 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -31,6 +31,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/bcma/bcma.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -48,6 +50,9 @@
#define CBUF_LEN (128)
+/* Device console log buffer state */
+#define CONSOLE_BUFFER_MAX 2024
+
struct rte_log_le {
__le32 buf; /* Can't be pointer on (64-bit) hosts */
__le32 buf_size;
@@ -281,7 +286,7 @@ struct rte_console {
* Shared structure between dongle and the host.
* The structure contains pointers to trap or assert information.
*/
-#define SDPCM_SHARED_VERSION 0x0002
+#define SDPCM_SHARED_VERSION 0x0003
#define SDPCM_SHARED_VERSION_MASK 0x00FF
#define SDPCM_SHARED_ASSERT_BUILT 0x0100
#define SDPCM_SHARED_ASSERT 0x0200
@@ -428,6 +433,29 @@ struct brcmf_console {
u8 *buf; /* Log buffer (host copy) */
uint last; /* Last buffer read index */
};
+
+struct brcmf_trap_info {
+ __le32 type;
+ __le32 epc;
+ __le32 cpsr;
+ __le32 spsr;
+ __le32 r0; /* a1 */
+ __le32 r1; /* a2 */
+ __le32 r2; /* a3 */
+ __le32 r3; /* a4 */
+ __le32 r4; /* v1 */
+ __le32 r5; /* v2 */
+ __le32 r6; /* v3 */
+ __le32 r7; /* v4 */
+ __le32 r8; /* v5 */
+ __le32 r9; /* sb/v6 */
+ __le32 r10; /* sl/v7 */
+ __le32 r11; /* fp/v8 */
+ __le32 r12; /* ip */
+ __le32 r13; /* sp */
+ __le32 r14; /* lr */
+ __le32 pc; /* r15 */
+};
#endif /* DEBUG */
struct sdpcm_shared {
@@ -439,6 +467,7 @@ struct sdpcm_shared {
u32 console_addr; /* Address of struct rte_console */
u32 msgtrace_addr;
u8 tag[32];
+ u32 brpt_addr;
};
struct sdpcm_shared_le {
@@ -450,6 +479,7 @@ struct sdpcm_shared_le {
__le32 console_addr; /* Address of struct rte_console */
__le32 msgtrace_addr;
u8 tag[32];
+ __le32 brpt_addr;
};
@@ -502,12 +532,9 @@ struct brcmf_sdio {
bool intr; /* Use interrupts */
bool poll; /* Use polling */
bool ipend; /* Device interrupt is pending */
- uint intrcount; /* Count of device interrupt callbacks */
- uint lastintrs; /* Count as of last watchdog timer */
uint spurious; /* Count of spurious interrupts */
uint pollrate; /* Ticks between device polls */
uint polltick; /* Tick counter */
- uint pollcnt; /* Count of active polls */
#ifdef DEBUG
uint console_interval;
@@ -515,8 +542,6 @@ struct brcmf_sdio {
uint console_addr; /* Console address from shared struct */
#endif /* DEBUG */
- uint regfails; /* Count of R_REG failures */
-
uint clkstate; /* State of sd and backplane clock(s) */
bool activity; /* Activity flag for clock down */
s32 idletime; /* Control for activity timeout */
@@ -531,33 +556,6 @@ struct brcmf_sdio {
/* Field to decide if rx of control frames happen in rxbuf or lb-pool */
bool usebufpool;
- /* Some additional counters */
- uint tx_sderrs; /* Count of tx attempts with sd errors */
- uint fcqueued; /* Tx packets that got queued */
- uint rxrtx; /* Count of rtx requests (NAK to dongle) */
- uint rx_toolong; /* Receive frames too long to receive */
- uint rxc_errors; /* SDIO errors when reading control frames */
- uint rx_hdrfail; /* SDIO errors on header reads */
- uint rx_badhdr; /* Bad received headers (roosync?) */
- uint rx_badseq; /* Mismatched rx sequence number */
- uint fc_rcvd; /* Number of flow-control events received */
- uint fc_xoff; /* Number which turned on flow-control */
- uint fc_xon; /* Number which turned off flow-control */
- uint rxglomfail; /* Failed deglom attempts */
- uint rxglomframes; /* Number of glom frames (superframes) */
- uint rxglompkts; /* Number of packets from glom frames */
- uint f2rxhdrs; /* Number of header reads */
- uint f2rxdata; /* Number of frame data reads */
- uint f2txdata; /* Number of f2 frame writes */
- uint f1regdata; /* Number of f1 register accesses */
- uint tickcnt; /* Number of watchdog been schedule */
- unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
- unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
- unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
- unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
- unsigned long rx_readahead_cnt; /* Number of packets where header
- * read-ahead was used. */
-
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
bool ctrl_frame_stat;
@@ -583,6 +581,7 @@ struct brcmf_sdio {
u32 fw_ptr;
bool txoff; /* Transmit flow-controlled */
+ struct brcmf_sdio_count sdcnt;
};
/* clkstate */
@@ -945,7 +944,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK,
offsetof(struct sdpcmd_regs, tosbmailbox));
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
/* Dongle recomposed rx frames, accept them again */
if (hmb_data & HMB_DATA_NAKHANDLED) {
@@ -984,12 +983,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
HMB_DATA_FCDATA_SHIFT;
if (fcbits & ~bus->flowcontrol)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@@ -1021,7 +1020,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_RF_TERM, &err);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
/* Wait until the packet has been flushed (device/FIFO stable) */
for (lastrbc = retries = 0xffff; retries > 0; retries--) {
@@ -1029,7 +1028,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
SBSDIO_FUNC1_RFRAMEBCHI, &err);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_RFRAMEBCLO, &err);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
@@ -1047,11 +1046,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries);
if (rtx) {
- bus->rxrtx++;
+ bus->sdcnt.rxrtx++;
err = w_sdreg32(bus, SMB_NAK,
offsetof(struct sdpcmd_regs, tosbmailbox));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
if (err == 0)
bus->rxskip = true;
}
@@ -1243,7 +1242,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
dlen);
errcode = -1;
}
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe, allow a couple retries */
if (errcode < 0) {
@@ -1256,7 +1255,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- bus->rxglomfail++;
+ bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
return 0;
@@ -1312,7 +1311,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1376,7 +1375,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- bus->rxglomfail++;
+ bus->sdcnt.rxglomfail++;
brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
@@ -1402,7 +1401,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (rxseq != seq) {
brcmf_dbg(GLOM, "rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
rxseq++;
@@ -1441,8 +1440,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
down(&bus->sdsem);
}
- bus->rxglomframes++;
- bus->rxglompkts += bus->glom.qlen;
+ bus->sdcnt.rxglomframes++;
+ bus->sdcnt.rxglompkts += bus->glom.qlen;
}
return num;
}
@@ -1526,7 +1525,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
len, len - doff, bus->sdiodev->bus_if->maxctl);
bus->sdiodev->bus_if->dstats.rx_errors++;
- bus->rx_toolong++;
+ bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
@@ -1536,13 +1535,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
bus->sdiodev->sbwad,
SDIO_FUNC_2,
F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
/* Control frame failures need retransmission */
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d control bytes failed: %d\n",
rdlen, sdret);
- bus->rxc_errors++;
+ bus->sdcnt.rxc_errors++;
brcmf_sdbrcm_rxfail(bus, true, true);
goto done;
}
@@ -1589,7 +1588,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
/* Read the entire frame */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, *pkt);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
@@ -1630,7 +1629,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
if ((u16)~(*len ^ check)) {
brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n",
nextlen, *len, check);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto fail;
}
@@ -1746,7 +1745,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
- bus->rx_readahead_cnt++;
+ bus->sdcnt.rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -1754,12 +1753,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
@@ -1767,7 +1766,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (rxseq != seq) {
brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n",
seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1814,11 +1813,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
BRCMF_FIRSTREAD);
- bus->f2rxhdrs++;
+ bus->sdcnt.f2rxhdrs++;
if (sdret < 0) {
brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret);
- bus->rx_hdrfail++;
+ bus->sdcnt.rx_hdrfail++;
brcmf_sdbrcm_rxfail(bus, true, true);
continue;
}
@@ -1840,7 +1839,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((u16) ~(len ^ check)) {
brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n",
len, check);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1861,7 +1860,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if ((doff < SDPCM_HDRLEN) || (doff > len)) {
brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n",
doff, len, SDPCM_HDRLEN, seq);
- bus->rx_badhdr++;
+ bus->sdcnt.rx_badhdr++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1880,19 +1879,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
if (bus->flowcontrol != fcbits) {
if (~bus->flowcontrol & fcbits)
- bus->fc_xoff++;
+ bus->sdcnt.fc_xoff++;
if (bus->flowcontrol & ~fcbits)
- bus->fc_xon++;
+ bus->sdcnt.fc_xon++;
- bus->fc_rcvd++;
+ bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fcbits;
}
/* Check and update sequence number */
if (rxseq != seq) {
brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq);
- bus->rx_badseq++;
+ bus->sdcnt.rx_badseq++;
rxseq = seq;
}
@@ -1937,7 +1936,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
bus->sdiodev->bus_if->dstats.rx_errors++;
- bus->rx_toolong++;
+ bus->sdcnt.rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
}
@@ -1960,7 +1959,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
/* Read the remaining frame data */
sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
- bus->f2rxdata++;
+ bus->sdcnt.f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen,
@@ -2147,18 +2146,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, pkt);
- bus->f2txdata++;
+ bus->sdcnt.f2txdata++;
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2166,7 +2165,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@@ -2224,7 +2223,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
ret = r_sdreg32(bus, &intstatus,
offsetof(struct sdpcmd_regs,
intstatus));
- bus->f2txdata++;
+ bus->sdcnt.f2txdata++;
if (ret != 0)
break;
if (intstatus & bus->hostintmask)
@@ -2417,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
bus->ipend = false;
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
if (err != 0)
newstatus = 0;
newstatus &= bus->hostintmask;
@@ -2426,7 +2425,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = w_sdreg32(bus, newstatus,
offsetof(struct sdpcmd_regs,
intstatus));
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
}
}
@@ -2445,7 +2444,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
err = r_sdreg32(bus, &newstatus,
offsetof(struct sdpcmd_regs, intstatus));
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
bus->fcstate =
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE));
intstatus |= (newstatus & bus->hostintmask);
@@ -2502,7 +2501,7 @@ clkwait:
int ret, i;
ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
+ SDIO_FUNC_2, F2SYNC, bus->ctrl_frame_buf,
(u32) bus->ctrl_frame_len);
if (ret < 0) {
@@ -2510,13 +2509,13 @@ clkwait:
terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, &err);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2526,7 +2525,7 @@ clkwait:
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO,
&err);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if ((hi == 0) && (lo == 0))
break;
}
@@ -2657,7 +2656,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
/* Check for existing queue, current flow-control,
pending event, or pending clock */
brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq));
- bus->fcqueued++;
+ bus->sdcnt.fcqueued++;
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
@@ -2845,13 +2844,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
/* On failure, abort the command and terminate the frame */
brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n",
ret);
- bus->tx_sderrs++;
+ bus->sdcnt.tx_sderrs++;
brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL,
SFC_WF_TERM, NULL);
- bus->f1regdata++;
+ bus->sdcnt.f1regdata++;
for (i = 0; i < 3; i++) {
u8 hi, lo;
@@ -2859,7 +2858,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
SBSDIO_FUNC1_WFRAMEBCHI, NULL);
lo = brcmf_sdio_regrb(bus->sdiodev,
SBSDIO_FUNC1_WFRAMEBCLO, NULL);
- bus->f1regdata += 2;
+ bus->sdcnt.f1regdata += 2;
if (hi == 0 && lo == 0)
break;
}
@@ -2976,13 +2975,324 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
- bus->tx_ctlerrs++;
+ bus->sdcnt.tx_ctlerrs++;
else
- bus->tx_ctlpkts++;
+ bus->sdcnt.tx_ctlpkts++;
return ret ? -EIO : 0;
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ rv = brcmf_sdbrcm_membytes(bus, false, shaddr,
+ (u8 *)&addr_le, 4);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_dbg(ERROR, "invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdbrcm_membytes(bus, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) != SDPCM_SHARED_VERSION) {
+ brcmf_dbg(ERROR,
+ "sdpcm_shared version mismatch: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+{
+ u32 addr, console_ptr, console_size, console_index;
+ char *conbuf = NULL;
+ __le32 sh_val;
+ int rv;
+ loff_t pos = 0;
+ int nbytes = 0;
+
+ /* obtain console information from device memory */
+ addr = sh->console_addr + offsetof(struct rte_console, log_le);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_ptr = le32_to_cpu(sh_val);
+
+ addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_size = le32_to_cpu(sh_val);
+
+ addr = sh->console_addr + offsetof(struct rte_console, log_le.idx);
+ rv = brcmf_sdbrcm_membytes(bus, false, addr,
+ (u8 *)&sh_val, sizeof(u32));
+ if (rv < 0)
+ return rv;
+ console_index = le32_to_cpu(sh_val);
+
+ /* allocate buffer for console data */
+ if (console_size <= CONSOLE_BUFFER_MAX)
+ conbuf = vzalloc(console_size+1);
+
+ if (!conbuf)
+ return -ENOMEM;
+
+ /* obtain the console data from device */
+ conbuf[console_size] = '\0';
+ rv = brcmf_sdbrcm_membytes(bus, false, console_ptr, (u8 *)conbuf,
+ console_size);
+ if (rv < 0)
+ goto done;
+
+ rv = simple_read_from_buffer(data, count, &pos,
+ conbuf + console_index,
+ console_size - console_index);
+ if (rv < 0)
+ goto done;
+
+ nbytes = rv;
+ if (console_index > 0) {
+ pos = 0;
+ rv = simple_read_from_buffer(data+nbytes, count, &pos,
+ conbuf, console_index - 1);
+ if (rv < 0)
+ goto done;
+ rv += nbytes;
+ }
+done:
+ vfree(conbuf);
+ return rv;
+}
+
+static int brcmf_sdio_trap_info(struct brcmf_sdio *bus, struct sdpcm_shared *sh,
+ char __user *data, size_t count)
+{
+ int error, res;
+ char buf[350];
+ struct brcmf_trap_info tr;
+ int nbytes;
+ loff_t pos = 0;
+
+ if ((sh->flags & SDPCM_SHARED_TRAP) == 0)
+ return 0;
+
+ error = brcmf_sdbrcm_membytes(bus, false, sh->trap_addr, (u8 *)&tr,
+ sizeof(struct brcmf_trap_info));
+ if (error < 0)
+ return error;
+
+ nbytes = brcmf_sdio_dump_console(bus, sh, data, count);
+ if (nbytes < 0)
+ return nbytes;
+
+ res = scnprintf(buf, sizeof(buf),
+ "dongle trap info: type 0x%x @ epc 0x%08x\n"
+ " cpsr 0x%08x spsr 0x%08x sp 0x%08x\n"
+ " lr 0x%08x pc 0x%08x offset 0x%x\n"
+ " r0 0x%08x r1 0x%08x r2 0x%08x r3 0x%08x\n"
+ " r4 0x%08x r5 0x%08x r6 0x%08x r7 0x%08x\n",
+ le32_to_cpu(tr.type), le32_to_cpu(tr.epc),
+ le32_to_cpu(tr.cpsr), le32_to_cpu(tr.spsr),
+ le32_to_cpu(tr.r13), le32_to_cpu(tr.r14),
+ le32_to_cpu(tr.pc), sh->trap_addr,
+ le32_to_cpu(tr.r0), le32_to_cpu(tr.r1),
+ le32_to_cpu(tr.r2), le32_to_cpu(tr.r3),
+ le32_to_cpu(tr.r4), le32_to_cpu(tr.r5),
+ le32_to_cpu(tr.r6), le32_to_cpu(tr.r7));
+
+ error = simple_read_from_buffer(data+nbytes, count, &pos, buf, res);
+ if (error < 0)
+ return error;
+
+ nbytes += error;
+ return nbytes;
+}
+
+static int brcmf_sdio_assert_info(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh, char __user *data,
+ size_t count)
+{
+ int error = 0;
+ char buf[200];
+ char file[80] = "?";
+ char expr[80] = "<???>";
+ int res;
+ loff_t pos = 0;
+
+ if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) {
+ brcmf_dbg(INFO, "firmware not built with -assert\n");
+ return 0;
+ } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) {
+ brcmf_dbg(INFO, "no assert in dongle\n");
+ return 0;
+ }
+
+ if (sh->assert_file_addr != 0) {
+ error = brcmf_sdbrcm_membytes(bus, false, sh->assert_file_addr,
+ (u8 *)file, 80);
+ if (error < 0)
+ return error;
+ }
+ if (sh->assert_exp_addr != 0) {
+ error = brcmf_sdbrcm_membytes(bus, false, sh->assert_exp_addr,
+ (u8 *)expr, 80);
+ if (error < 0)
+ return error;
+ }
+
+ res = scnprintf(buf, sizeof(buf),
+ "dongle assert: %s:%d: assert(%s)\n",
+ file, sh->assert_line, expr);
+ return simple_read_from_buffer(data, count, &pos, buf, res);
+}
+
+static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+{
+ int error;
+ struct sdpcm_shared sh;
+
+ down(&bus->sdsem);
+ error = brcmf_sdio_readshared(bus, &sh);
+ up(&bus->sdsem);
+
+ if (error < 0)
+ return error;
+
+ if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0)
+ brcmf_dbg(INFO, "firmware not built with -assert\n");
+ else if (sh.flags & SDPCM_SHARED_ASSERT)
+ brcmf_dbg(ERROR, "assertion in dongle\n");
+
+ if (sh.flags & SDPCM_SHARED_TRAP)
+ brcmf_dbg(ERROR, "firmware trap in dongle\n");
+
+ return 0;
+}
+
+static int brcmf_sdbrcm_died_dump(struct brcmf_sdio *bus, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ int error = 0;
+ struct sdpcm_shared sh;
+ int nbytes = 0;
+ loff_t pos = *ppos;
+
+ if (pos != 0)
+ return 0;
+
+ down(&bus->sdsem);
+ error = brcmf_sdio_readshared(bus, &sh);
+ if (error < 0)
+ goto done;
+
+ error = brcmf_sdio_assert_info(bus, &sh, data, count);
+ if (error < 0)
+ goto done;
+
+ nbytes = error;
+ error = brcmf_sdio_trap_info(bus, &sh, data, count);
+ if (error < 0)
+ goto done;
+
+ error += nbytes;
+ *ppos += error;
+done:
+ up(&bus->sdsem);
+ return error;
+}
+
+static ssize_t brcmf_sdio_forensic_read(struct file *f, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct brcmf_sdio *bus = f->private_data;
+ int res;
+
+ res = brcmf_sdbrcm_died_dump(bus, data, count, ppos);
+ if (res > 0)
+ *ppos += res;
+ return (ssize_t)res;
+}
+
+static const struct file_operations brcmf_sdio_forensic_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = brcmf_sdio_forensic_read
+};
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+ struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
+ struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ debugfs_create_file("forensics", S_IRUGO, dentry, bus,
+ &brcmf_sdio_forensic_ops);
+ brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+}
+#else
+static int brcmf_sdbrcm_checkdied(struct brcmf_sdio *bus)
+{
+ return 0;
+}
+
+static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static int
brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
@@ -3009,60 +3319,27 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
rxlen, msglen);
} else if (timeleft == 0) {
brcmf_dbg(ERROR, "resumed on timeout\n");
+ brcmf_sdbrcm_checkdied(bus);
} else if (pending) {
brcmf_dbg(CTL, "cancelled\n");
return -ERESTARTSYS;
} else {
brcmf_dbg(CTL, "resumed for unknown reason?\n");
+ brcmf_sdbrcm_checkdied(bus);
}
if (rxlen)
- bus->rx_ctlpkts++;
+ bus->sdcnt.rx_ctlpkts++;
else
- bus->rx_ctlerrs++;
+ bus->sdcnt.rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
-{
- int bcmerror = 0;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* Basic sanity checks */
- if (bus->sdiodev->bus_if->drvr_up) {
- bcmerror = -EISCONN;
- goto err;
- }
- if (!len) {
- bcmerror = -EOVERFLOW;
- goto err;
- }
-
- /* Free the old ones and replace with passed variables */
- kfree(bus->vars);
-
- bus->vars = kmalloc(len, GFP_ATOMIC);
- bus->varsz = bus->vars ? len : 0;
- if (bus->vars == NULL) {
- bcmerror = -ENOMEM;
- goto err;
- }
-
- /* Copy the passed variables, which should include the
- terminating double-null */
- memcpy(bus->vars, arg, bus->varsz);
-err:
- return bcmerror;
-}
-
static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
- u32 varsize;
u32 varaddr;
- u8 *vbuffer;
u32 varsizew;
__le32 varsizew_le;
#ifdef DEBUG
@@ -3071,56 +3348,44 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
/* Even if there are no vars are to be written, we still
need to set the ramsize. */
- varsize = bus->varsz ? roundup(bus->varsz, 4) : 0;
- varaddr = (bus->ramsize - 4) - varsize;
+ varaddr = (bus->ramsize - 4) - bus->varsz;
if (bus->vars) {
- vbuffer = kzalloc(varsize, GFP_ATOMIC);
- if (!vbuffer)
- return -ENOMEM;
-
- memcpy(vbuffer, bus->vars, bus->varsz);
-
/* Write the vars list */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, true, varaddr, vbuffer, varsize);
+ bcmerror = brcmf_sdbrcm_membytes(bus, true, varaddr,
+ bus->vars, bus->varsz);
#ifdef DEBUG
/* Verify NVRAM bytes */
- brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
- nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
- if (!nvram_ularray) {
- kfree(vbuffer);
+ brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n",
+ bus->varsz);
+ nvram_ularray = kmalloc(bus->varsz, GFP_ATOMIC);
+ if (!nvram_ularray)
return -ENOMEM;
- }
/* Upload image to verify downloaded contents. */
- memset(nvram_ularray, 0xaa, varsize);
+ memset(nvram_ularray, 0xaa, bus->varsz);
/* Read the vars list to temp buffer for comparison */
- bcmerror =
- brcmf_sdbrcm_membytes(bus, false, varaddr, nvram_ularray,
- varsize);
+ bcmerror = brcmf_sdbrcm_membytes(bus, false, varaddr,
+ nvram_ularray, bus->varsz);
if (bcmerror) {
brcmf_dbg(ERROR, "error %d on reading %d nvram bytes at 0x%08x\n",
- bcmerror, varsize, varaddr);
+ bcmerror, bus->varsz, varaddr);
}
/* Compare the org NVRAM with the one read from RAM */
- if (memcmp(vbuffer, nvram_ularray, varsize))
+ if (memcmp(bus->vars, nvram_ularray, bus->varsz))
brcmf_dbg(ERROR, "Downloaded NVRAM image is corrupted\n");
else
brcmf_dbg(ERROR, "Download/Upload/Compare of NVRAM ok\n");
kfree(nvram_ularray);
#endif /* DEBUG */
-
- kfree(vbuffer);
}
/* adjust to the user specified RAM */
brcmf_dbg(INFO, "Physical memory size: %d\n", bus->ramsize);
brcmf_dbg(INFO, "Vars are at %d, orig varsize is %d\n",
- varaddr, varsize);
- varsize = ((bus->ramsize - 4) - varaddr);
+ varaddr, bus->varsz);
/*
* Determine the length token:
@@ -3131,13 +3396,13 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
varsizew = 0;
varsizew_le = cpu_to_le32(0);
} else {
- varsizew = varsize / 4;
+ varsizew = bus->varsz / 4;
varsizew = (~varsizew << 16) | (varsizew & 0x0000FFFF);
varsizew_le = cpu_to_le32(varsizew);
}
brcmf_dbg(INFO, "New varsize is %d, length token=0x%08x\n",
- varsize, varsizew);
+ bus->varsz, varsizew);
/* Write the length token to the last word */
bcmerror = brcmf_sdbrcm_membytes(bus, true, (bus->ramsize - 4),
@@ -3261,13 +3526,21 @@ err:
* by two NULs.
*/
-static uint brcmf_process_nvram_vars(char *varbuf, uint len)
+static int brcmf_process_nvram_vars(struct brcmf_sdio *bus)
{
+ char *varbuf;
char *dp;
bool findNewline;
int column;
- uint buf_len, n;
+ int ret = 0;
+ uint buf_len, n, len;
+ len = bus->firmware->size;
+ varbuf = vmalloc(len);
+ if (!varbuf)
+ return -ENOMEM;
+
+ memcpy(varbuf, bus->firmware->data, len);
dp = varbuf;
findNewline = false;
@@ -3296,56 +3569,44 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
column++;
}
buf_len = dp - varbuf;
-
while (dp < varbuf + n)
*dp++ = 0;
- return buf_len;
+ kfree(bus->vars);
+ /* roundup needed for download to device */
+ bus->varsz = roundup(buf_len + 1, 4);
+ bus->vars = kmalloc(bus->varsz, GFP_KERNEL);
+ if (bus->vars == NULL) {
+ bus->varsz = 0;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* copy the processed variables and add null termination */
+ memcpy(bus->vars, varbuf, buf_len);
+ bus->vars[buf_len] = 0;
+err:
+ vfree(varbuf);
+ return ret;
}
static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
- uint len;
- char *memblock = NULL;
- char *bufp;
int ret;
+ if (bus->sdiodev->bus_if->drvr_up)
+ return -EISCONN;
+
ret = request_firmware(&bus->firmware, BRCMF_SDIO_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
return ret;
}
- bus->fw_ptr = 0;
-
- memblock = kmalloc(MEMBLOCK, GFP_ATOMIC);
- if (memblock == NULL) {
- ret = -ENOMEM;
- goto err;
- }
-
- len = brcmf_sdbrcm_get_image(memblock, MEMBLOCK, bus);
-
- if (len > 0 && len < MEMBLOCK) {
- bufp = (char *)memblock;
- bufp[len] = 0;
- len = brcmf_process_nvram_vars(bufp, len);
- bufp += len;
- *bufp++ = 0;
- if (len)
- ret = brcmf_sdbrcm_downloadvars(bus, memblock, len + 1);
- if (ret)
- brcmf_dbg(ERROR, "error downloading vars: %d\n", ret);
- } else {
- brcmf_dbg(ERROR, "error reading nvram file: %d\n", len);
- ret = -EIO;
- }
-err:
- kfree(memblock);
+ ret = brcmf_process_nvram_vars(bus);
release_firmware(bus->firmware);
- bus->fw_ptr = 0;
return ret;
}
@@ -3419,7 +3680,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev)
return 0;
/* Start the watchdog timer */
- bus->tickcnt = 0;
+ bus->sdcnt.tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@@ -3512,7 +3773,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
/* Count the interrupt call */
- bus->intrcount++;
+ bus->sdcnt.intrcount++;
bus->ipend = true;
/* Shouldn't get this interrupt if we're sleeping? */
@@ -3554,7 +3815,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
bus->polltick = 0;
/* Check device if no interrupts */
- if (!bus->intr || (bus->intrcount == bus->lastintrs)) {
+ if (!bus->intr ||
+ (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) {
if (!bus->dpc_sched) {
u8 devpend;
@@ -3569,7 +3831,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
/* If there is something, make like the ISR and
schedule the DPC */
if (intstatus) {
- bus->pollcnt++;
+ bus->sdcnt.pollcnt++;
bus->ipend = true;
bus->dpc_sched = true;
@@ -3581,7 +3843,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
}
/* Update interrupt tracking */
- bus->lastintrs = bus->intrcount;
+ bus->sdcnt.lastintrs = bus->sdcnt.intrcount;
}
#ifdef DEBUG
/* Poll for console output periodically */
@@ -3623,6 +3885,8 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
return true;
if (chipid == BCM4330_CHIP_ID)
return true;
+ if (chipid == BCM4334_CHIP_ID)
+ return true;
return false;
}
@@ -3793,7 +4057,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
- bus->tickcnt++;
+ bus->sdcnt.tickcnt++;
} else
break;
}
@@ -3856,6 +4120,10 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
struct brcmf_sdio *bus;
+ struct brcmf_bus_dcmd *dlst;
+ u32 dngl_txglom;
+ u32 dngl_txglomalign;
+ u8 idx;
brcmf_dbg(TRACE, "Enter\n");
@@ -3938,8 +4206,29 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
goto fail;
}
+ brcmf_sdio_debugfs_create(bus);
brcmf_dbg(INFO, "completed!!\n");
+ /* sdio bus core specific dcmd */
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ dlst = kzalloc(sizeof(struct brcmf_bus_dcmd), GFP_KERNEL);
+ if (dlst) {
+ if (bus->ci->c_inf[idx].rev < 12) {
+ /* for sdio core rev < 12, disable txgloming */
+ dngl_txglom = 0;
+ dlst->name = "bus:txglom";
+ dlst->param = (char *)&dngl_txglom;
+ dlst->param_len = sizeof(u32);
+ } else {
+ /* otherwise, set txglomalign */
+ dngl_txglomalign = bus->sdiodev->bus_if->align;
+ dlst->name = "bus:txglomalign";
+ dlst->param = (char *)&dngl_txglomalign;
+ dlst->param_len = sizeof(u32);
+ }
+ list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
+ }
+
/* if firmware path present try to download and bring up bus */
ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
index f8e1f1c84d08..58155e23d220 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -403,6 +403,23 @@ static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
ci->c_inf[3].cib = 0x03004211;
ci->ramsize = 0x48000;
break;
+ case BCM4334_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x29004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x0d004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x13080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x07004211;
+ ci->ramsize = 0x80000;
+ break;
default:
brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
return -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d13ae9c299f2..28c5fbb4af26 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -691,9 +691,10 @@ scan_out:
}
static s32
-brcmf_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
+brcmf_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *ndev = request->wdev->netdev;
s32 err = 0;
WL_TRACE("Enter\n");
@@ -919,9 +920,7 @@ brcmf_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *ndev,
set_bit(WL_STATUS_CONNECTING, &cfg_priv->status);
if (params->bssid)
- WL_CONN("BSSID: %02X %02X %02X %02X %02X %02X\n",
- params->bssid[0], params->bssid[1], params->bssid[2],
- params->bssid[3], params->bssid[4], params->bssid[5]);
+ WL_CONN("BSSID: %pM\n", params->bssid);
else
WL_CONN("No BSSID specified\n");
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 6d8b7213643a..8c9345dd37d2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -318,10 +318,6 @@
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
-
#ifdef DEBUG
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
@@ -473,9 +469,6 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out buscore */
- sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
-
return true;
}
@@ -483,11 +476,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
- u32 w, savewin;
struct bcma_device *cc;
- struct ssb_sprom *sprom = &pbus->sprom;
-
- savewin = 0;
sii->icbus = pbus;
sii->pcibus = pbus->host_pci;
@@ -510,47 +499,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* PMU specific initializations */
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
- si_pmu_init(sih);
(void)si_pmu_measure_alpclk(sih);
- si_pmu_res_init(sih);
- }
-
- /* setup the GPIO based LED powersave register */
- w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
- (sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
- if (w == 0)
- w = DEFAULT_GPIOTIMERVAL;
- ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
-
- if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 43224 and
- * set chipControl register bit 15
- */
- if (ai_get_chiprev(sih) == 0) {
- SI_MSG("Applying 43224A0 WARs\n");
- ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
- si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
- CCTRL_43224A0_12MA_LED_DRIVE);
- }
- if (ai_get_chiprev(sih) >= 1) {
- SI_MSG("Applying 43224B0+ WARs\n");
- si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
- CCTRL_43224B0_12MA_LED_DRIVE);
- }
- }
-
- if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
- /*
- * enable 12 mA drive strenth for 4313 and
- * set chipControl register bit 1
- */
- SI_MSG("Applying 4313 WARs\n");
- si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
- CCTRL_4313_12MA_LED_DRIVE);
}
return sii;
@@ -589,7 +538,7 @@ void ai_detach(struct si_pub *sih)
struct si_pub *si_local = NULL;
memcpy(&si_local, &sih, sizeof(struct si_pub **));
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (sii == NULL)
return;
@@ -597,27 +546,6 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* return index of coreid or BADIDX if not found */
-struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
-{
- struct bcma_device *core;
- struct si_info *sii;
- uint found;
-
- sii = (struct si_info *)sih;
-
- found = 0;
-
- list_for_each_entry(core, &sii->icbus->cores, list)
- if (core->id.id == coreid) {
- if (found == coreunit)
- return core;
- found++;
- }
-
- return NULL;
-}
-
/*
* read/modify chipcommon core register.
*/
@@ -627,13 +555,12 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
u32 w;
struct si_info *sii;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
cc = sii->icbus->drv_cc.core;
/* mask and set */
- if (mask || val) {
+ if (mask || val)
bcma_maskset32(cc, regoff, ~mask, val);
- }
/* readback */
w = bcma_read32(cc, regoff);
@@ -694,12 +621,13 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ cc = sii->icbus->drv_cc.core;
if (cc == NULL)
return;
@@ -721,7 +649,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
uint slowminfreq;
u16 fpdelay;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
return fpdelay;
@@ -731,7 +659,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
return 0;
fpdelay = 0;
- cc = ai_findcore(sih, CC_CORE_ID, 0);
+ cc = sii->icbus->drv_cc.core;
if (cc) {
slowminfreq = ai_slowclk_freq(sih, false, cc);
fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
@@ -753,12 +681,9 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
struct si_info *sii;
struct bcma_device *cc;
- sii = (struct si_info *)sih;
-
- if (PCI_FORCEHT(sih))
- return mode == BCMA_CLKMODE_FAST;
+ sii = container_of(sih, struct si_info, pub);
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
+ cc = sii->icbus->drv_cc.core;
bcma_core_set_clockmode(cc, mode);
return mode == BCMA_CLKMODE_FAST;
}
@@ -766,16 +691,10 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
- struct bcma_device *cc;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
- if (PCI_FORCEHT(sih)) {
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
- }
-
- if (PCIE(sih))
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
}
@@ -783,26 +702,20 @@ void ai_pci_up(struct si_pub *sih)
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
- struct bcma_device *cc;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
- /* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sih)) {
- cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
- }
-
- if (PCIE(sih))
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
- cc = ai_findcore(sih, CC_CORE_ID, 0);
+ cc = sii->icbus->drv_cc.core;
/* EPA Fix */
bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
@@ -814,7 +727,7 @@ bool ai_deviceremoved(struct si_pub *sih)
u32 w;
struct si_info *sii;
- sii = (struct si_info *)sih;
+ sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
return false;
@@ -825,15 +738,3 @@ bool ai_deviceremoved(struct si_pub *sih)
return false;
}
-
-uint ai_get_buscoretype(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
- return sii->buscore->id.id;
-}
-
-uint ai_get_buscorerev(struct si_pub *sih)
-{
- struct si_info *sii = (struct si_info *)sih;
- return sii->buscore->id.rev;
-}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index d9f04a683bdb..89562c1fbf49 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -88,16 +88,6 @@
#define CLKD_OTP 0x000f0000
#define CLKD_OTP_SHIFT 16
-/* Package IDs */
-#define BCM4717_PKG_ID 9 /* 4717 package id */
-#define BCM4718_PKG_ID 10 /* 4718 package id */
-#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
-
-/* these are router chips */
-#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
-#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
-#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
-
/* dynamic clock control defines */
#define LPOMINFREQ 25000 /* low power oscillator min */
#define LPOMAXFREQ 43000 /* low power oscillator max */
@@ -168,7 +158,6 @@ struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
struct bcma_bus *icbus; /* handle to soc interconnect bus */
struct pci_dev *pcibus; /* handle to pci bus */
- struct bcma_device *buscore;
u32 chipst; /* chip status */
};
@@ -183,8 +172,6 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern struct bcma_device *ai_findcore(struct si_pub *sih,
- u16 coreid, u16 coreunit);
extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
@@ -193,7 +180,7 @@ extern void ai_detach(struct si_pub *sih);
extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
-extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
+extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
extern bool ai_deviceremoved(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
@@ -202,9 +189,6 @@ extern void ai_pci_up(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
-extern uint ai_get_buscoretype(struct si_pub *sih);
-extern uint ai_get_buscorerev(struct si_pub *sih);
-
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 95b5902bc4b3..be5bcfb9153b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -663,9 +663,6 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
/* patch the first MPDU */
if (count == 1) {
u8 plcp0, plcp3, is40, sgi;
- struct ieee80211_sta *sta;
-
- sta = tx_info->control.sta;
if (rr) {
plcp0 = plcp[0];
@@ -735,10 +732,8 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
* a candidate for aggregation
*/
p = pktq_ppeek(&qi->q, prec);
- /* tx_info must be checked with current p */
- tx_info = IEEE80211_SKB_CB(p);
-
if (p) {
+ tx_info = IEEE80211_SKB_CB(p);
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
@@ -759,6 +754,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
p = NULL;
continue;
}
+ /* next packet fit for aggregation so dequeue */
p = brcmu_pktq_pdeq(&qi->q, prec);
} else {
p = NULL;
@@ -1196,8 +1192,8 @@ static bool cb_del_ampdu_pkt(struct sk_buff *mpdu, void *arg_a)
bool rc;
rc = tx_info->flags & IEEE80211_TX_CTL_AMPDU ? true : false;
- rc = rc && (tx_info->control.sta == NULL || ampdu_pars->sta == NULL ||
- tx_info->control.sta == ampdu_pars->sta);
+ rc = rc && (tx_info->rate_driver_data[0] == NULL || ampdu_pars->sta == NULL ||
+ tx_info->rate_driver_data[0] == ampdu_pars->sta);
rc = rc && ((u8)(mpdu->priority) == ampdu_pars->tid);
return rc;
}
@@ -1211,8 +1207,8 @@ static void dma_cb_fn_ampdu(void *txi, void *arg_a)
struct ieee80211_tx_info *tx_info = (struct ieee80211_tx_info *)txi;
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- (tx_info->control.sta == sta || sta == NULL))
- tx_info->control.sta = NULL;
+ (tx_info->rate_driver_data[0] == sta || sta == NULL))
+ tx_info->rate_driver_data[0] = NULL;
}
/*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index eb77ac3cfb6b..9a4c63f927cb 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -15,7 +15,9 @@
*/
#include <linux/types.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
+#include <net/regulatory.h>
#include <defs.h>
#include "pub.h"
@@ -23,73 +25,17 @@
#include "main.h"
#include "stf.h"
#include "channel.h"
+#include "mac80211_if.h"
/* QDB() macro takes a dB value and converts to a quarter dB value */
#define QDB(n) ((n) * BRCMS_TXPWR_DB_FACTOR)
-#define LOCALE_CHAN_01_11 (1<<0)
-#define LOCALE_CHAN_12_13 (1<<1)
-#define LOCALE_CHAN_14 (1<<2)
-#define LOCALE_SET_5G_LOW_JP1 (1<<3) /* 34-48, step 2 */
-#define LOCALE_SET_5G_LOW_JP2 (1<<4) /* 34-46, step 4 */
-#define LOCALE_SET_5G_LOW1 (1<<5) /* 36-48, step 4 */
-#define LOCALE_SET_5G_LOW2 (1<<6) /* 52 */
-#define LOCALE_SET_5G_LOW3 (1<<7) /* 56-64, step 4 */
-#define LOCALE_SET_5G_MID1 (1<<8) /* 100-116, step 4 */
-#define LOCALE_SET_5G_MID2 (1<<9) /* 120-124, step 4 */
-#define LOCALE_SET_5G_MID3 (1<<10) /* 128 */
-#define LOCALE_SET_5G_HIGH1 (1<<11) /* 132-140, step 4 */
-#define LOCALE_SET_5G_HIGH2 (1<<12) /* 149-161, step 4 */
-#define LOCALE_SET_5G_HIGH3 (1<<13) /* 165 */
-#define LOCALE_CHAN_52_140_ALL (1<<14)
-#define LOCALE_SET_5G_HIGH4 (1<<15) /* 184-216 */
-
-#define LOCALE_CHAN_36_64 (LOCALE_SET_5G_LOW1 | \
- LOCALE_SET_5G_LOW2 | \
- LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_52_64 (LOCALE_SET_5G_LOW2 | LOCALE_SET_5G_LOW3)
-#define LOCALE_CHAN_100_124 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2)
-#define LOCALE_CHAN_100_140 (LOCALE_SET_5G_MID1 | LOCALE_SET_5G_MID2 | \
- LOCALE_SET_5G_MID3 | LOCALE_SET_5G_HIGH1)
-#define LOCALE_CHAN_149_165 (LOCALE_SET_5G_HIGH2 | LOCALE_SET_5G_HIGH3)
-#define LOCALE_CHAN_184_216 LOCALE_SET_5G_HIGH4
-
-#define LOCALE_CHAN_01_14 (LOCALE_CHAN_01_11 | \
- LOCALE_CHAN_12_13 | \
- LOCALE_CHAN_14)
-
-#define LOCALE_RADAR_SET_NONE 0
-#define LOCALE_RADAR_SET_1 1
-
-#define LOCALE_RESTRICTED_NONE 0
-#define LOCALE_RESTRICTED_SET_2G_SHORT 1
-#define LOCALE_RESTRICTED_CHAN_165 2
-#define LOCALE_CHAN_ALL_5G 3
-#define LOCALE_RESTRICTED_JAPAN_LEGACY 4
-#define LOCALE_RESTRICTED_11D_2G 5
-#define LOCALE_RESTRICTED_11D_5G 6
-#define LOCALE_RESTRICTED_LOW_HI 7
-#define LOCALE_RESTRICTED_12_13_14 8
-
-#define LOCALE_2G_IDX_i 0
-#define LOCALE_5G_IDX_11 0
#define LOCALE_MIMO_IDX_bn 0
#define LOCALE_MIMO_IDX_11n 0
-/* max of BAND_5G_PWR_LVLS and 6 for 2.4 GHz */
-#define BRCMS_MAXPWR_TBL_SIZE 6
/* max of BAND_5G_PWR_LVLS and 14 for 2.4 GHz */
#define BRCMS_MAXPWR_MIMO_TBL_SIZE 14
-/* power level in group of 2.4GHz band channels:
- * maxpwr[0] - CCK channels [1]
- * maxpwr[1] - CCK channels [2-10]
- * maxpwr[2] - CCK channels [11-14]
- * maxpwr[3] - OFDM channels [1]
- * maxpwr[4] - OFDM channels [2-10]
- * maxpwr[5] - OFDM channels [11-14]
- */
-
/* maxpwr mapping to 5GHz band channels:
* maxpwr[0] - channels [34-48]
* maxpwr[1] - channels [52-60]
@@ -101,16 +47,8 @@
#define LC(id) LOCALE_MIMO_IDX_ ## id
-#define LC_2G(id) LOCALE_2G_IDX_ ## id
-
-#define LC_5G(id) LOCALE_5G_IDX_ ## id
-
-#define LOCALES(band2, band5, mimo2, mimo5) \
- {LC_2G(band2), LC_5G(band5), LC(mimo2), LC(mimo5)}
-
-/* macro to get 2.4 GHz channel group index for tx power */
-#define CHANNEL_POWER_IDX_2G_CCK(c) (((c) < 2) ? 0 : (((c) < 11) ? 1 : 2))
-#define CHANNEL_POWER_IDX_2G_OFDM(c) (((c) < 2) ? 3 : (((c) < 11) ? 4 : 5))
+#define LOCALES(mimo2, mimo5) \
+ {LC(mimo2), LC(mimo5)}
/* macro to get 5 GHz channel group index for tx power */
#define CHANNEL_POWER_IDX_5G(c) (((c) < 52) ? 0 : \
@@ -118,18 +56,37 @@
(((c) < 100) ? 2 : \
(((c) < 149) ? 3 : 4))))
-#define ISDFS_EU(fl) (((fl) & BRCMS_DFS_EU) == BRCMS_DFS_EU)
-
-struct brcms_cm_band {
- /* struct locale_info flags */
- u8 locale_flags;
- /* List of valid channels in the country */
- struct brcms_chanvec valid_channels;
- /* List of restricted use channels */
- const struct brcms_chanvec *restricted_channels;
- /* List of radar sensitive channels */
- const struct brcms_chanvec *radar_channels;
- u8 PAD[8];
+#define BRCM_2GHZ_2412_2462 REG_RULE(2412-10, 2462+10, 40, 0, 19, 0)
+#define BRCM_2GHZ_2467_2472 REG_RULE(2467-10, 2472+10, 20, 0, 19, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+#define BRCM_5GHZ_5180_5240 REG_RULE(5180-10, 5240+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5260_5320 REG_RULE(5260-10, 5320+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_DFS | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5500_5700 REG_RULE(5500-10, 5700+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_DFS | \
+ NL80211_RRF_NO_IBSS)
+#define BRCM_5GHZ_5745_5825 REG_RULE(5745-10, 5825+10, 40, 0, 21, \
+ NL80211_RRF_PASSIVE_SCAN | \
+ NL80211_RRF_NO_IBSS)
+
+static const struct ieee80211_regdomain brcms_regdom_x2 = {
+ .n_reg_rules = 7,
+ .alpha2 = "X2",
+ .reg_rules = {
+ BRCM_2GHZ_2412_2462,
+ BRCM_2GHZ_2467_2472,
+ BRCM_5GHZ_5180_5240,
+ BRCM_5GHZ_5260_5320,
+ BRCM_5GHZ_5500_5700,
+ BRCM_5GHZ_5745_5825,
+ }
};
/* locale per-channel tx power limits for MIMO frames
@@ -141,337 +98,23 @@ struct locale_mimo_info {
s8 maxpwr20[BRCMS_MAXPWR_MIMO_TBL_SIZE];
/* tx 40 MHz power limits, qdBm units */
s8 maxpwr40[BRCMS_MAXPWR_MIMO_TBL_SIZE];
- u8 flags;
};
/* Country names and abbreviations with locale defined from ISO 3166 */
struct country_info {
- const u8 locale_2G; /* 2.4G band locale */
- const u8 locale_5G; /* 5G band locale */
const u8 locale_mimo_2G; /* 2.4G mimo info */
const u8 locale_mimo_5G; /* 5G mimo info */
};
+struct brcms_regd {
+ struct country_info country;
+ const struct ieee80211_regdomain *regdomain;
+};
+
struct brcms_cm_info {
struct brcms_pub *pub;
struct brcms_c_info *wlc;
- char srom_ccode[BRCM_CNTRY_BUF_SZ]; /* Country Code in SROM */
- uint srom_regrev; /* Regulatory Rev for the SROM ccode */
- const struct country_info *country; /* current country def */
- char ccode[BRCM_CNTRY_BUF_SZ]; /* current internal Country Code */
- uint regrev; /* current Regulatory Revision */
- char country_abbrev[BRCM_CNTRY_BUF_SZ]; /* current advertised ccode */
- /* per-band state (one per phy/radio) */
- struct brcms_cm_band bandstate[MAXBANDS];
- /* quiet channels currently for radar sensitivity or 11h support */
- /* channels on which we cannot transmit */
- struct brcms_chanvec quiet_channels;
-};
-
-/* locale channel and power info. */
-struct locale_info {
- u32 valid_channels;
- /* List of radar sensitive channels */
- u8 radar_channels;
- /* List of channels used only if APs are detected */
- u8 restricted_channels;
- /* Max tx pwr in qdBm for each sub-band */
- s8 maxpwr[BRCMS_MAXPWR_TBL_SIZE];
- /* Country IE advertised max tx pwr in dBm per sub-band */
- s8 pub_maxpwr[BAND_5G_PWR_LVLS];
- u8 flags;
-};
-
-/* Regulatory Matrix Spreadsheet (CLM) MIMO v3.7.9 */
-
-/*
- * Some common channel sets
- */
-
-/* No channels */
-static const struct brcms_chanvec chanvec_none = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 2.4 GHz HW channels */
-static const struct brcms_chanvec chanvec_all_2G = {
- {0xfe, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* All 5 GHz HW channels */
-static const struct brcms_chanvec chanvec_all_5G = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x11, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x20, 0x22, 0x22, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x01}
-};
-
-/*
- * Radar channel sets
- */
-
-/* Channels 52 - 64, 100 - 140 */
-static const struct brcms_chanvec radar_set1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, /* 52 - 60 */
- 0x01, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x11, /* 64, 100 - 124 */
- 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 128 - 140 */
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/*
- * Restricted channel sets
- */
-
-/* Channels 34, 38, 42, 46 */
-static const struct brcms_chanvec restricted_set_japan_legacy = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12, 13 */
-static const struct brcms_chanvec restricted_set_2g_short = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channel 165 */
-static const struct brcms_chanvec restricted_chan_165 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 36 - 48 & 149 - 165 */
-static const struct brcms_chanvec restricted_low_hi = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x22, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* Channels 12 - 14 */
-static const struct brcms_chanvec restricted_set_12_13_14 = {
- {0x00, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-/* global memory to provide working buffer for expanded locale */
-
-static const struct brcms_chanvec *g_table_radar_set[] = {
- &chanvec_none,
- &radar_set1
-};
-
-static const struct brcms_chanvec *g_table_restricted_chan[] = {
- &chanvec_none, /* restricted_set_none */
- &restricted_set_2g_short,
- &restricted_chan_165,
- &chanvec_all_5G,
- &restricted_set_japan_legacy,
- &chanvec_all_2G, /* restricted_set_11d_2G */
- &chanvec_all_5G, /* restricted_set_11d_5G */
- &restricted_low_hi,
- &restricted_set_12_13_14
-};
-
-static const struct brcms_chanvec locale_2g_01_11 = {
- {0xfe, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_2g_12_13 = {
- {0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_2g_14 = {
- {0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW_JP1 = {
- {0x00, 0x00, 0x00, 0x00, 0x54, 0x55, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW_JP2 = {
- {0x00, 0x00, 0x00, 0x00, 0x44, 0x44, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW1 = {
- {0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x01, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_LOW3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x10, 0x11, 0x11, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_MID3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH1 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x10, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH2 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x20, 0x22, 0x02, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH3 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_52_140_ALL = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x11,
- 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
- 0x11, 0x11, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00}
-};
-
-static const struct brcms_chanvec locale_5g_HIGH4 = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
- 0x11, 0x11, 0x11, 0x11}
-};
-
-static const struct brcms_chanvec *g_table_locale_base[] = {
- &locale_2g_01_11,
- &locale_2g_12_13,
- &locale_2g_14,
- &locale_5g_LOW_JP1,
- &locale_5g_LOW_JP2,
- &locale_5g_LOW1,
- &locale_5g_LOW2,
- &locale_5g_LOW3,
- &locale_5g_MID1,
- &locale_5g_MID2,
- &locale_5g_MID3,
- &locale_5g_HIGH1,
- &locale_5g_HIGH2,
- &locale_5g_HIGH3,
- &locale_5g_52_140_ALL,
- &locale_5g_HIGH4
-};
-
-static void brcms_c_locale_add_channels(struct brcms_chanvec *target,
- const struct brcms_chanvec *channels)
-{
- u8 i;
- for (i = 0; i < sizeof(struct brcms_chanvec); i++)
- target->vec[i] |= channels->vec[i];
-}
-
-static void brcms_c_locale_get_channels(const struct locale_info *locale,
- struct brcms_chanvec *channels)
-{
- u8 i;
-
- memset(channels, 0, sizeof(struct brcms_chanvec));
-
- for (i = 0; i < ARRAY_SIZE(g_table_locale_base); i++) {
- if (locale->valid_channels & (1 << i))
- brcms_c_locale_add_channels(channels,
- g_table_locale_base[i]);
- }
-}
-
-/*
- * Locale Definitions - 2.4 GHz
- */
-static const struct locale_info locale_i = { /* locale i. channel 1 - 13 */
- LOCALE_CHAN_01_11 | LOCALE_CHAN_12_13,
- LOCALE_RADAR_SET_NONE,
- LOCALE_RESTRICTED_SET_2G_SHORT,
- {QDB(19), QDB(19), QDB(19),
- QDB(19), QDB(19), QDB(19)},
- {20, 20, 20, 0},
- BRCMS_EIRP
-};
-
-/*
- * Locale Definitions - 5 GHz
- */
-static const struct locale_info locale_11 = {
- /* locale 11. channel 36 - 48, 52 - 64, 100 - 140, 149 - 165 */
- LOCALE_CHAN_36_64 | LOCALE_CHAN_100_140 | LOCALE_CHAN_149_165,
- LOCALE_RADAR_SET_1,
- LOCALE_RESTRICTED_NONE,
- {QDB(21), QDB(21), QDB(21), QDB(21), QDB(21)},
- {23, 23, 23, 30, 30},
- BRCMS_EIRP | BRCMS_DFS_EU
-};
-
-static const struct locale_info *g_locale_2g_table[] = {
- &locale_i
-};
-
-static const struct locale_info *g_locale_5g_table[] = {
- &locale_11
+ const struct brcms_regd *world_regd;
};
/*
@@ -484,7 +127,6 @@ static const struct locale_mimo_info locale_bn = {
{0, 0, QDB(13), QDB(13), QDB(13),
QDB(13), QDB(13), QDB(13), QDB(13), QDB(13),
QDB(13), 0, 0},
- 0
};
static const struct locale_mimo_info *g_mimo_2g_table[] = {
@@ -497,114 +139,20 @@ static const struct locale_mimo_info *g_mimo_2g_table[] = {
static const struct locale_mimo_info locale_11n = {
{ /* 12.5 dBm */ 50, 50, 50, QDB(15), QDB(15)},
{QDB(14), QDB(15), QDB(15), QDB(15), QDB(15)},
- 0
};
static const struct locale_mimo_info *g_mimo_5g_table[] = {
&locale_11n
};
-static const struct {
- char abbrev[BRCM_CNTRY_BUF_SZ]; /* country abbreviation */
- struct country_info country;
-} cntry_locales[] = {
+static const struct brcms_regd cntry_locales[] = {
+ /* Worldwide RoW 2, must always be at index 0 */
{
- "X2", LOCALES(i, 11, bn, 11n)}, /* Worldwide RoW 2 */
-};
-
-#ifdef SUPPORT_40MHZ
-/* 20MHz channel info for 40MHz pairing support */
-struct chan20_info {
- u8 sb;
- u8 adj_sbs;
+ .country = LOCALES(bn, 11n),
+ .regdomain = &brcms_regdom_x2,
+ },
};
-/* indicates adjacent channels that are allowed for a 40 Mhz channel and
- * those that permitted by the HT
- */
-struct chan20_info chan20_info[] = {
- /* 11b/11g */
-/* 0 */ {1, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 1 */ {2, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 2 */ {3, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 3 */ {4, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 4 */ {5, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 5 */ {6, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 6 */ {7, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 7 */ {8, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 8 */ {9, (CH_UPPER_SB | CH_LOWER_SB | CH_EWA_VALID)},
-/* 9 */ {10, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 10 */ {11, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 11 */ {12, (CH_LOWER_SB)},
-/* 12 */ {13, (CH_LOWER_SB)},
-/* 13 */ {14, (CH_LOWER_SB)},
-
-/* 11a japan high */
-/* 14 */ {34, (CH_UPPER_SB)},
-/* 15 */ {38, (CH_LOWER_SB)},
-/* 16 */ {42, (CH_LOWER_SB)},
-/* 17 */ {46, (CH_LOWER_SB)},
-
-/* 11a usa low */
-/* 18 */ {36, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 19 */ {40, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 20 */ {44, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 21 */ {48, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 22 */ {52, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 23 */ {56, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 24 */ {60, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 25 */ {64, (CH_LOWER_SB | CH_EWA_VALID)},
-
-/* 11a Europe */
-/* 26 */ {100, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 27 */ {104, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 28 */ {108, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 29 */ {112, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 30 */ {116, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 31 */ {120, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 32 */ {124, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 33 */ {128, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 34 */ {132, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 35 */ {136, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 36 */ {140, (CH_LOWER_SB)},
-
-/* 11a usa high, ref5 only */
-/* The 0x80 bit in pdiv means these are REF5, other entries are REF20 */
-/* 37 */ {149, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 38 */ {153, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 39 */ {157, (CH_UPPER_SB | CH_EWA_VALID)},
-/* 40 */ {161, (CH_LOWER_SB | CH_EWA_VALID)},
-/* 41 */ {165, (CH_LOWER_SB)},
-
-/* 11a japan */
-/* 42 */ {184, (CH_UPPER_SB)},
-/* 43 */ {188, (CH_LOWER_SB)},
-/* 44 */ {192, (CH_UPPER_SB)},
-/* 45 */ {196, (CH_LOWER_SB)},
-/* 46 */ {200, (CH_UPPER_SB)},
-/* 47 */ {204, (CH_LOWER_SB)},
-/* 48 */ {208, (CH_UPPER_SB)},
-/* 49 */ {212, (CH_LOWER_SB)},
-/* 50 */ {216, (CH_LOWER_SB)}
-};
-#endif /* SUPPORT_40MHZ */
-
-static const struct locale_info *brcms_c_get_locale_2g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_2g_table))
- return NULL; /* error condition */
-
- return g_locale_2g_table[locale_idx];
-}
-
-static const struct locale_info *brcms_c_get_locale_5g(u8 locale_idx)
-{
- if (locale_idx >= ARRAY_SIZE(g_locale_5g_table))
- return NULL; /* error condition */
-
- return g_locale_5g_table[locale_idx];
-}
-
static const struct locale_mimo_info *brcms_c_get_mimo_2g(u8 locale_idx)
{
if (locale_idx >= ARRAY_SIZE(g_mimo_2g_table))
@@ -621,13 +169,6 @@ static const struct locale_mimo_info *brcms_c_get_mimo_5g(u8 locale_idx)
return g_mimo_5g_table[locale_idx];
}
-static int
-brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- return false;
-}
-
/*
* Indicates whether the country provided is valid to pass
* to cfg80211 or not.
@@ -662,155 +203,24 @@ static bool brcms_c_country_valid(const char *ccode)
return true;
}
-/* Lookup a country info structure from a null terminated country
- * abbreviation and regrev directly with no translation.
- */
-static const struct country_info *
-brcms_c_country_lookup_direct(const char *ccode, uint regrev)
+static const struct brcms_regd *brcms_world_regd(const char *regdom, int len)
{
- uint size, i;
-
- /* Should just return 0 for single locale driver. */
- /* Keep it this way in case we add more locales. (for now anyway) */
-
- /*
- * all other country def arrays are for regrev == 0, so if
- * regrev is non-zero, fail
- */
- if (regrev > 0)
- return NULL;
-
- /* find matched table entry from country code */
- size = ARRAY_SIZE(cntry_locales);
- for (i = 0; i < size; i++) {
- if (strcmp(ccode, cntry_locales[i].abbrev) == 0)
- return &cntry_locales[i].country;
- }
- return NULL;
-}
-
-static const struct country_info *
-brcms_c_countrycode_map(struct brcms_cm_info *wlc_cm, const char *ccode,
- char *mapped_ccode, uint *mapped_regrev)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- const struct country_info *country;
- uint srom_regrev = wlc_cm->srom_regrev;
- const char *srom_ccode = wlc_cm->srom_ccode;
- int mapped;
-
- /* check for currently supported ccode size */
- if (strlen(ccode) > (BRCM_CNTRY_BUF_SZ - 1)) {
- wiphy_err(wlc->wiphy, "wl%d: %s: ccode \"%s\" too long for "
- "match\n", wlc->pub->unit, __func__, ccode);
- return NULL;
- }
-
- /* default mapping is the given ccode and regrev 0 */
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- *mapped_regrev = 0;
-
- /* If the desired country code matches the srom country code,
- * then the mapped country is the srom regulatory rev.
- * Otherwise look for an aggregate mapping.
- */
- if (!strcmp(srom_ccode, ccode)) {
- *mapped_regrev = srom_regrev;
- mapped = 0;
- wiphy_err(wlc->wiphy, "srom_code == ccode %s\n", __func__);
- } else {
- mapped =
- brcms_c_country_aggregate_map(wlc_cm, ccode, mapped_ccode,
- mapped_regrev);
- }
-
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
-
- /* if there is not an exact rev match, default to rev zero */
- if (country == NULL && *mapped_regrev != 0) {
- *mapped_regrev = 0;
- country =
- brcms_c_country_lookup_direct(mapped_ccode, *mapped_regrev);
- }
-
- return country;
-}
-
-/* Lookup a country info structure from a null terminated country code
- * The lookup is case sensitive.
- */
-static const struct country_info *
-brcms_c_country_lookup(struct brcms_c_info *wlc, const char *ccode)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /*
- * map the country code to a built-in country code, regrev, and
- * country_info struct
- */
- country = brcms_c_countrycode_map(wlc->cmi, ccode, mapped_ccode,
- &mapped_regrev);
-
- return country;
-}
-
-/*
- * reset the quiet channels vector to the union
- * of the restricted and radar channel sets
- */
-static void brcms_c_quiet_channels_reset(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const struct brcms_chanvec *chanvec;
-
- memset(&wlc_cm->quiet_channels, 0, sizeof(struct brcms_chanvec));
-
- band = wlc->band;
- for (i = 0; i < wlc->pub->_nbands;
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- /* initialize quiet channels for restricted channels */
- chanvec = wlc_cm->bandstate[band->bandunit].restricted_channels;
- for (j = 0; j < sizeof(struct brcms_chanvec); j++)
- wlc_cm->quiet_channels.vec[j] |= chanvec->vec[j];
+ const struct brcms_regd *regd = NULL;
+ int i;
+ for (i = 0; i < ARRAY_SIZE(cntry_locales); i++) {
+ if (!strncmp(regdom, cntry_locales[i].regdomain->alpha2, len)) {
+ regd = &cntry_locales[i];
+ break;
+ }
}
-}
-
-/* Is the channel valid for the current locale and current band? */
-static bool brcms_c_valid_channel20(struct brcms_cm_info *wlc_cm, uint val)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- return ((val < MAXCHANNEL) &&
- isset(wlc_cm->bandstate[wlc->band->bandunit].valid_channels.vec,
- val));
+ return regd;
}
-/* Is the channel valid for the current locale and specified band? */
-static bool brcms_c_valid_channel20_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit, uint val)
-{
- return ((val < MAXCHANNEL)
- && isset(wlc_cm->bandstate[bandunit].valid_channels.vec, val));
-}
-
-/* Is the channel valid for the current locale? (but don't consider channels not
- * available due to bandlocking)
- */
-static bool brcms_c_valid_channel20_db(struct brcms_cm_info *wlc_cm, uint val)
+static const struct brcms_regd *brcms_default_world_regd(void)
{
- struct brcms_c_info *wlc = wlc_cm->wlc;
-
- return brcms_c_valid_channel20(wlc->cmi, val) ||
- (!wlc->bandlocked
- && brcms_c_valid_channel20_in_band(wlc->cmi,
- OTHERBANDUNIT(wlc), val));
+ return &cntry_locales[0];
}
/* JP, J1 - J10 are Japan ccodes */
@@ -820,12 +230,6 @@ static bool brcms_c_japan_ccode(const char *ccode)
(ccode[1] == 'P' || (ccode[1] >= '1' && ccode[1] <= '9')));
}
-/* Returns true if currently set country is Japan or variant */
-static bool brcms_c_japan(struct brcms_c_info *wlc)
-{
- return brcms_c_japan_ccode(wlc->cmi->country_abbrev);
-}
-
static void
brcms_c_channel_min_txpower_limits_with_local_constraint(
struct brcms_cm_info *wlc_cm, struct txpwr_limits *txpwr,
@@ -901,140 +305,16 @@ brcms_c_channel_min_txpower_limits_with_local_constraint(
}
-/* Update the radio state (enable/disable) and tx power targets
- * based on a new set of channel/regulatory information
- */
-static void brcms_c_channels_commit(struct brcms_cm_info *wlc_cm)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint chan;
- struct txpwr_limits txpwr;
-
- /* search for the existence of any valid channel */
- for (chan = 0; chan < MAXCHANNEL; chan++) {
- if (brcms_c_valid_channel20_db(wlc->cmi, chan))
- break;
- }
- if (chan == MAXCHANNEL)
- chan = INVCHANNEL;
-
- /*
- * based on the channel search above, set or
- * clear WL_RADIO_COUNTRY_DISABLE.
- */
- if (chan == INVCHANNEL) {
- /*
- * country/locale with no valid channels, set
- * the radio disable bit
- */
- mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\" "
- "nbands %d bandlocked %d\n", wlc->pub->unit,
- __func__, wlc_cm->country_abbrev, wlc->pub->_nbands,
- wlc->bandlocked);
- } else if (mboolisset(wlc->pub->radio_disabled,
- WL_RADIO_COUNTRY_DISABLE)) {
- /*
- * country/locale with valid channel, clear
- * the radio disable bit
- */
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
- }
-
- /*
- * Now that the country abbreviation is set, if the radio supports 2G,
- * then set channel 14 restrictions based on the new locale.
- */
- if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
- wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
- brcms_c_japan(wlc) ? true :
- false);
-
- if (wlc->pub->up && chan != INVCHANNEL) {
- brcms_c_channel_reg_limits(wlc_cm, wlc->chanspec, &txpwr);
- brcms_c_channel_min_txpower_limits_with_local_constraint(wlc_cm,
- &txpwr, BRCMS_TXPWR_MAX);
- wlc_phy_txpower_limit_set(wlc->band->pi, &txpwr, wlc->chanspec);
- }
-}
-
-static int
-brcms_c_channels_init(struct brcms_cm_info *wlc_cm,
- const struct country_info *country)
-{
- struct brcms_c_info *wlc = wlc_cm->wlc;
- uint i, j;
- struct brcms_band *band;
- const struct locale_info *li;
- struct brcms_chanvec sup_chan;
- const struct locale_mimo_info *li_mimo;
-
- band = wlc->band;
- for (i = 0; i < wlc->pub->_nbands;
- i++, band = wlc->bandstate[OTHERBANDUNIT(wlc)]) {
-
- li = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
- wlc_cm->bandstate[band->bandunit].locale_flags = li->flags;
- li_mimo = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_mimo_5g(country->locale_mimo_5G) :
- brcms_c_get_mimo_2g(country->locale_mimo_2G);
-
- /* merge the mimo non-mimo locale flags */
- wlc_cm->bandstate[band->bandunit].locale_flags |=
- li_mimo->flags;
-
- wlc_cm->bandstate[band->bandunit].restricted_channels =
- g_table_restricted_chan[li->restricted_channels];
- wlc_cm->bandstate[band->bandunit].radar_channels =
- g_table_radar_set[li->radar_channels];
-
- /*
- * set the channel availability, masking out the channels
- * that may not be supported on this phy.
- */
- wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
- &sup_chan);
- brcms_c_locale_get_channels(li,
- &wlc_cm->bandstate[band->bandunit].
- valid_channels);
- for (j = 0; j < sizeof(struct brcms_chanvec); j++)
- wlc_cm->bandstate[band->bandunit].valid_channels.
- vec[j] &= sup_chan.vec[j];
- }
-
- brcms_c_quiet_channels_reset(wlc_cm);
- brcms_c_channels_commit(wlc_cm);
-
- return 0;
-}
-
/*
* set the driver's current country and regulatory information
* using a country code as the source. Look up built in country
* information found with the country code.
*/
static void
-brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, uint regrev,
- const struct country_info *country)
+brcms_c_set_country(struct brcms_cm_info *wlc_cm,
+ const struct brcms_regd *regd)
{
- const struct locale_info *locale;
struct brcms_c_info *wlc = wlc_cm->wlc;
- char prev_country_abbrev[BRCM_CNTRY_BUF_SZ];
-
- /* save current country state */
- wlc_cm->country = country;
-
- memset(&prev_country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(prev_country_abbrev, wlc_cm->country_abbrev,
- BRCM_CNTRY_BUF_SZ - 1);
-
- strncpy(wlc_cm->country_abbrev, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
- strncpy(wlc_cm->ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
- wlc_cm->regrev = regrev;
if ((wlc->pub->_n_enab & SUPPORT_11N) !=
wlc->protection->nmode_user)
@@ -1042,75 +322,19 @@ brcms_c_set_country_common(struct brcms_cm_info *wlc_cm,
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_2G_INDEX]);
brcms_c_stf_ss_update(wlc, wlc->bandstate[BAND_5G_INDEX]);
- /* set or restore gmode as required by regulatory */
- locale = brcms_c_get_locale_2g(country->locale_2G);
- if (locale && (locale->flags & BRCMS_NO_OFDM))
- brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
- else
- brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
- brcms_c_channels_init(wlc_cm, country);
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
return;
}
-static int
-brcms_c_set_countrycode_rev(struct brcms_cm_info *wlc_cm,
- const char *country_abbrev,
- const char *ccode, int regrev)
-{
- const struct country_info *country;
- char mapped_ccode[BRCM_CNTRY_BUF_SZ];
- uint mapped_regrev;
-
- /* if regrev is -1, lookup the mapped country code,
- * otherwise use the ccode and regrev directly
- */
- if (regrev == -1) {
- /*
- * map the country code to a built-in country
- * code, regrev, and country_info
- */
- country =
- brcms_c_countrycode_map(wlc_cm, ccode, mapped_ccode,
- &mapped_regrev);
- } else {
- /* find the matching built-in country definition */
- country = brcms_c_country_lookup_direct(ccode, regrev);
- strncpy(mapped_ccode, ccode, BRCM_CNTRY_BUF_SZ);
- mapped_regrev = regrev;
- }
-
- if (country == NULL)
- return -EINVAL;
-
- /* set the driver state for the country */
- brcms_c_set_country_common(wlc_cm, country_abbrev, mapped_ccode,
- mapped_regrev, country);
-
- return 0;
-}
-
-/*
- * set the driver's current country and regulatory information using
- * a country code as the source. Lookup built in country information
- * found with the country code.
- */
-static int
-brcms_c_set_countrycode(struct brcms_cm_info *wlc_cm, const char *ccode)
-{
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- strncpy(country_abbrev, ccode, BRCM_CNTRY_BUF_SZ);
- return brcms_c_set_countrycode_rev(wlc_cm, country_abbrev, ccode, -1);
-}
-
struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
{
struct brcms_cm_info *wlc_cm;
- char country_abbrev[BRCM_CNTRY_BUF_SZ];
- const struct country_info *country;
struct brcms_pub *pub = wlc->pub;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
+ const char *ccode = sprom->alpha2;
+ int ccode_len = sizeof(sprom->alpha2);
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
@@ -1122,24 +346,27 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
wlc->cmi = wlc_cm;
/* store the country code for passing up as a regulatory hint */
- if (sprom->alpha2 && brcms_c_country_valid(sprom->alpha2))
- strncpy(wlc->pub->srom_ccode, sprom->alpha2, sizeof(sprom->alpha2));
+ wlc_cm->world_regd = brcms_world_regd(ccode, ccode_len);
+ if (brcms_c_country_valid(ccode))
+ strncpy(wlc->pub->srom_ccode, ccode, ccode_len);
/*
- * internal country information which must match
- * regulatory constraints in firmware
+ * If no custom world domain is found in the SROM, use the
+ * default "X2" domain.
*/
- memset(country_abbrev, 0, BRCM_CNTRY_BUF_SZ);
- strncpy(country_abbrev, "X2", sizeof(country_abbrev) - 1);
- country = brcms_c_country_lookup(wlc, country_abbrev);
+ if (!wlc_cm->world_regd) {
+ wlc_cm->world_regd = brcms_default_world_regd();
+ ccode = wlc_cm->world_regd->regdomain->alpha2;
+ ccode_len = BRCM_CNTRY_BUF_SZ - 1;
+ }
/* save default country for exiting 11d regulatory mode */
- strncpy(wlc->country_default, country_abbrev, BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->country_default, ccode, ccode_len);
/* initialize autocountry_default to driver default */
- strncpy(wlc->autocountry_default, "X2", BRCM_CNTRY_BUF_SZ - 1);
+ strncpy(wlc->autocountry_default, ccode, ccode_len);
- brcms_c_set_countrycode(wlc_cm, country_abbrev);
+ brcms_c_set_country(wlc_cm, wlc_cm->world_regd);
return wlc_cm;
}
@@ -1149,31 +376,15 @@ void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm)
kfree(wlc_cm);
}
-u8
-brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit)
-{
- return wlc_cm->bandstate[bandunit].locale_flags;
-}
-
-static bool
-brcms_c_quiet_chanspec(struct brcms_cm_info *wlc_cm, u16 chspec)
-{
- return (wlc_cm->wlc->pub->_n_enab & SUPPORT_11N) &&
- CHSPEC_IS40(chspec) ?
- (isset(wlc_cm->quiet_channels.vec,
- lower_20_sb(CHSPEC_CHANNEL(chspec))) ||
- isset(wlc_cm->quiet_channels.vec,
- upper_20_sb(CHSPEC_CHANNEL(chspec)))) :
- isset(wlc_cm->quiet_channels.vec, CHSPEC_CHANNEL(chspec));
-}
-
void
brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
u8 local_constraint_qdbm)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
+ const struct ieee80211_reg_rule *reg_rule;
struct txpwr_limits txpwr;
+ int ret;
brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr);
@@ -1181,8 +392,15 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
wlc_cm, &txpwr, local_constraint_qdbm
);
+ /* set or restore gmode as required by regulatory */
+ ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, &reg_rule);
+ if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM))
+ brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false);
+ else
+ brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false);
+
brcms_b_set_chanspec(wlc->hw, chanspec,
- (brcms_c_quiet_chanspec(wlc_cm, chanspec) != 0),
+ !!(ch->flags & IEEE80211_CHAN_PASSIVE_SCAN),
&txpwr);
}
@@ -1191,15 +409,14 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
uint i;
uint chan;
int maxpwr;
int delta;
const struct country_info *country;
struct brcms_band *band;
- const struct locale_info *li;
int conducted_max = BRCMS_TXPWR_MAX;
- int conducted_ofdm_max = BRCMS_TXPWR_MAX;
const struct locale_mimo_info *li_mimo;
int maxpwr20, maxpwr40;
int maxpwr_idx;
@@ -1207,67 +424,35 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
memset(txpwr, 0, sizeof(struct txpwr_limits));
- if (!brcms_c_valid_chanspec_db(wlc_cm, chanspec)) {
- country = brcms_c_country_lookup(wlc, wlc->autocountry_default);
- if (country == NULL)
- return;
- } else {
- country = wlc_cm->country;
- }
+ if (WARN_ON(!ch))
+ return;
+
+ country = &wlc_cm->world_regd->country;
chan = CHSPEC_CHANNEL(chanspec);
band = wlc->bandstate[chspec_bandunit(chanspec)];
- li = (band->bandtype == BRCM_BAND_5G) ?
- brcms_c_get_locale_5g(country->locale_5G) :
- brcms_c_get_locale_2g(country->locale_2G);
-
li_mimo = (band->bandtype == BRCM_BAND_5G) ?
brcms_c_get_mimo_5g(country->locale_mimo_5G) :
brcms_c_get_mimo_2g(country->locale_mimo_2G);
- if (li->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
+ delta = band->antgain;
- if (li == &locale_i) {
+ if (band->bandtype == BRCM_BAND_2G)
conducted_max = QDB(22);
- conducted_ofdm_max = QDB(22);
- }
+
+ maxpwr = QDB(ch->max_power) - delta;
+ maxpwr = max(maxpwr, 0);
+ maxpwr = min(maxpwr, conducted_max);
/* CCK txpwr limits for 2.4G band */
if (band->bandtype == BRCM_BAND_2G) {
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_CCK(chan)];
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_max);
-
for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
txpwr->cck[i] = (u8) maxpwr;
}
- /* OFDM txpwr limits for 2.4G or 5G bands */
- if (band->bandtype == BRCM_BAND_2G)
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_2G_OFDM(chan)];
- else
- maxpwr = li->maxpwr[CHANNEL_POWER_IDX_5G(chan)];
-
- maxpwr = maxpwr - delta;
- maxpwr = max(maxpwr, 0);
- maxpwr = min(maxpwr, conducted_ofdm_max);
-
- /* Keep OFDM lmit below CCK limit */
- if (band->bandtype == BRCM_BAND_2G)
- maxpwr = min_t(int, maxpwr, txpwr->cck[0]);
-
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
+ for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
txpwr->ofdm[i] = (u8) maxpwr;
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++) {
/*
* OFDM 40 MHz SISO has the same power as the corresponding
* MCS0-7 rate unless overriden by the locale specific code.
@@ -1282,14 +467,9 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->ofdm_40_cdd[i] = 0;
}
- /* MIMO/HT specific limits */
- if (li_mimo->flags & BRCMS_EIRP) {
- delta = band->antgain;
- } else {
- delta = 0;
- if (band->antgain > QDB(6))
- delta = band->antgain - QDB(6); /* Excess over 6 dB */
- }
+ delta = 0;
+ if (band->antgain > QDB(6))
+ delta = band->antgain - QDB(6); /* Excess over 6 dB */
if (band->bandtype == BRCM_BAND_2G)
maxpwr_idx = (chan - 1);
@@ -1431,8 +611,7 @@ static bool brcms_c_chspec_malformed(u16 chanspec)
* and they are also a legal HT combination
*/
static bool
-brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
- bool dualband)
+brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec)
{
struct brcms_c_info *wlc = wlc_cm->wlc;
u8 channel = CHSPEC_CHANNEL(chspec);
@@ -1448,59 +627,163 @@ brcms_c_valid_chanspec_ext(struct brcms_cm_info *wlc_cm, u16 chspec,
chspec_bandunit(chspec))
return false;
- /* Check a 20Mhz channel */
- if (CHSPEC_IS20(chspec)) {
- if (dualband)
- return brcms_c_valid_channel20_db(wlc_cm->wlc->cmi,
- channel);
- else
- return brcms_c_valid_channel20(wlc_cm->wlc->cmi,
- channel);
+ return true;
+}
+
+bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
+{
+ return brcms_c_valid_chanspec_ext(wlc_cm, chspec);
+}
+
+static bool brcms_is_radar_freq(u16 center_freq)
+{
+ return center_freq >= 5260 && center_freq <= 5700;
+}
+
+static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ int i;
+
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (!brcms_is_radar_freq(ch->center_freq))
+ continue;
+
+ /*
+ * All channels in this range should be passive and have
+ * DFS enabled.
+ */
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch->flags |= IEEE80211_CHAN_RADAR |
+ IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN;
}
-#ifdef SUPPORT_40MHZ
- /*
- * We know we are now checking a 40MHZ channel, so we should
- * only be here for NPHYS
- */
- if (BRCMS_ISNPHY(wlc->band) || BRCMS_ISSSLPNPHY(wlc->band)) {
- u8 upper_sideband = 0, idx;
- u8 num_ch20_entries =
- sizeof(chan20_info) / sizeof(struct chan20_info);
-
- if (!VALID_40CHANSPEC_IN_BAND(wlc, chspec_bandunit(chspec)))
- return false;
-
- if (dualband) {
- if (!brcms_c_valid_channel20_db(wlc->cmi,
- lower_20_sb(channel)) ||
- !brcms_c_valid_channel20_db(wlc->cmi,
- upper_20_sb(channel)))
- return false;
- } else {
- if (!brcms_c_valid_channel20(wlc->cmi,
- lower_20_sb(channel)) ||
- !brcms_c_valid_channel20(wlc->cmi,
- upper_20_sb(channel)))
- return false;
+}
+
+static void
+brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator)
+{
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ const struct ieee80211_reg_rule *rule;
+ int band, i, ret;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (ch->flags &
+ (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR))
+ continue;
+
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
+ ret = freq_reg_info(wiphy, ch->center_freq,
+ 0, &rule);
+ if (ret)
+ continue;
+
+ if (!(rule->flags & NL80211_RRF_NO_IBSS))
+ ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
+ if (!(rule->flags & NL80211_RRF_PASSIVE_SCAN))
+ ch->flags &=
+ ~IEEE80211_CHAN_PASSIVE_SCAN;
+ } else if (ch->beacon_found) {
+ ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN);
+ }
}
+ }
+}
- /* find the lower sideband info in the sideband array */
- for (idx = 0; idx < num_ch20_entries; idx++) {
- if (chan20_info[idx].sb == lower_20_sb(channel))
- upper_sideband = chan20_info[idx].adj_sbs;
+static int brcms_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct brcms_info *wl = hw->priv;
+ struct brcms_c_info *wlc = wl->wlc;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ int band, i;
+ bool ch_found = false;
+
+ brcms_reg_apply_radar_flags(wiphy);
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
+ brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
+
+ /* Disable radio if all channels disallowed by regulatory */
+ for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+ sband = wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; !ch_found && i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED))
+ ch_found = true;
}
- /* check that the lower sideband allows an upper sideband */
- if ((upper_sideband & (CH_UPPER_SB | CH_EWA_VALID)) ==
- (CH_UPPER_SB | CH_EWA_VALID))
- return true;
- return false;
}
-#endif /* 40 MHZ */
- return false;
+ if (ch_found) {
+ mboolclr(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ } else {
+ mboolset(wlc->pub->radio_disabled, WL_RADIO_COUNTRY_DISABLE);
+ wiphy_err(wlc->wiphy, "wl%d: %s: no valid channel for \"%s\"\n",
+ wlc->pub->unit, __func__, request->alpha2);
+ }
+
+ if (wlc->pub->_nbands > 1 || wlc->band->bandtype == BRCM_BAND_2G)
+ wlc_phy_chanspec_ch14_widefilter_set(wlc->band->pi,
+ brcms_c_japan_ccode(request->alpha2));
+
+ return 0;
}
-bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm, u16 chspec)
+void brcms_c_regd_init(struct brcms_c_info *wlc)
{
- return brcms_c_valid_chanspec_ext(wlc_cm, chspec, true);
+ struct wiphy *wiphy = wlc->wiphy;
+ const struct brcms_regd *regd = wlc->cmi->world_regd;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *ch;
+ struct brcms_chanvec sup_chan;
+ struct brcms_band *band;
+ int band_idx, i;
+
+ /* Disable any channels not supported by the phy */
+ for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) {
+ band = wlc->bandstate[band_idx];
+
+ wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
+ &sup_chan);
+
+ if (band_idx == BAND_2G_INDEX)
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ else
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ for (i = 0; i < sband->n_channels; i++) {
+ ch = &sband->channels[i];
+ if (!isset(sup_chan.vec, ch->hw_value))
+ ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ }
+
+ wlc->wiphy->reg_notifier = brcms_reg_notifier;
+ wlc->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_STRICT_REGULATORY;
+ wiphy_apply_custom_regulatory(wlc->wiphy, regd->regdomain);
+ brcms_reg_apply_beaconing_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.h b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
index 808cb4fbfbe7..006483a0abe6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.h
@@ -37,9 +37,6 @@ brcms_c_channel_mgr_attach(struct brcms_c_info *wlc);
extern void brcms_c_channel_mgr_detach(struct brcms_cm_info *wlc_cm);
-extern u8 brcms_c_channel_locale_flags_in_band(struct brcms_cm_info *wlc_cm,
- uint bandunit);
-
extern bool brcms_c_valid_chanspec_db(struct brcms_cm_info *wlc_cm,
u16 chspec);
@@ -49,5 +46,6 @@ extern void brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm,
extern void brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm,
u16 chanspec,
u8 local_constraint_qdbm);
+extern void brcms_c_regd_init(struct brcms_c_info *wlc);
#endif /* _WLC_CHANNEL_H */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 11054ae9d4f6..5e53305bd9a9 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -573,6 +573,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
struct dma_info *di;
u8 rev = core->id.rev;
uint size;
+ struct si_info *sii = container_of(sih, struct si_info, pub);
/* allocate private info structure */
di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
@@ -633,16 +634,20 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
di->ddoffsetlow = 0;
di->dataoffsetlow = 0;
- /* add offset for pcie with DMA64 bus */
- di->ddoffsetlow = 0;
- di->ddoffsethigh = SI_PCIE_DMA_H32;
+ /* for pci bus, add offset */
+ if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
+ /* add offset for pcie with DMA64 bus */
+ di->ddoffsetlow = 0;
+ di->ddoffsethigh = SI_PCIE_DMA_H32;
+ }
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
+
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((core->id.id == SDIOD_CORE_ID)
+ if ((core->id.id == BCMA_CORE_SDIO_DEV)
&& ((rev > 0) && (rev <= 2)))
di->addrext = false;
- else if ((core->id.id == I2S_CORE_ID) &&
+ else if ((core->id.id == BCMA_CORE_I2S) &&
((rev == 0) || (rev == 1)))
di->addrext = false;
else
@@ -1433,7 +1438,7 @@ void dma_walk_packets(struct dma_pub *dmah, void (*callback_fnc)
struct ieee80211_tx_info *tx_info;
while (i != end) {
- skb = (struct sk_buff *)di->txp[i];
+ skb = di->txp[i];
if (skb != NULL) {
tx_info = (struct ieee80211_tx_info *)skb->cb;
(callback_fnc)(tx_info, arg_a);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 50f92a0b7c41..9e79d47e077f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -267,6 +267,7 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct brcms_info *wl = hw->priv;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
spin_lock_bh(&wl->lock);
if (!wl->pub->up) {
@@ -275,6 +276,7 @@ static void brcms_ops_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
goto done;
}
brcms_c_sendpkt_mac80211(wl->wlc, skb, hw);
+ tx_info->rate_driver_data[0] = tx_info->control.sta;
done:
spin_unlock_bh(&wl->lock);
}
@@ -319,8 +321,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
return;
spin_lock_bh(&wl->lock);
- status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
- wl->wlc->hw->deviceid);
+ status = brcms_c_chipmatch(wl->wlc->hw->d11core);
spin_unlock_bh(&wl->lock);
if (!status) {
wiphy_err(wl->wiphy,
@@ -721,14 +722,6 @@ static const struct ieee80211_ops brcms_ops = {
.flush = brcms_ops_flush,
};
-/*
- * is called in brcms_bcma_probe() context, therefore no locking required.
- */
-static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
-{
- return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev);
-}
-
void brcms_dpc(unsigned long data)
{
struct brcms_info *wl;
@@ -1058,6 +1051,8 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
goto fail;
}
+ brcms_c_regd_init(wl->wlc);
+
memcpy(perm, &wl->pub->cur_etheraddr, ETH_ALEN);
if (WARN_ON(!is_valid_ether_addr(perm)))
goto fail;
@@ -1068,9 +1063,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
"%d\n", __func__, err);
- if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
- wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
- __func__, err);
+ if (wl->pub->srom_ccode[0] &&
+ regulatory_hint(wl->wiphy, wl->pub->srom_ccode))
+ wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__);
n_adapters_found++;
return wl;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 19db4052c44c..03ca65324845 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -18,6 +18,7 @@
#include <linux/pci_ids.h>
#include <linux/if_ether.h>
+#include <net/cfg80211.h>
#include <net/mac80211.h>
#include <brcm_hw_ids.h>
#include <aiutils.h>
@@ -268,7 +269,7 @@ struct brcms_c_bit_desc {
*/
/* Starting corerev for the fifo size table */
-#define XMTFIFOTBL_STARTREV 20
+#define XMTFIFOTBL_STARTREV 17
struct d11init {
__le16 addr;
@@ -332,6 +333,12 @@ const u8 wlc_prio2prec_map[] = {
};
static const u16 xmtfifo_sz[][NFIFO] = {
+ /* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
+ {20, 192, 192, 21, 17, 5},
+ /* corerev 18: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 19: */
+ {0, 0, 0, 0, 0, 0},
/* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
@@ -342,6 +349,14 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{20, 192, 192, 21, 17, 5},
/* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
+ /* corerev 25: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 26: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 27: */
+ {0, 0, 0, 0, 0, 0},
+ /* corerev 28: 2304, 14848, 5632, 3584, 3584, 1280 */
+ {9, 58, 22, 14, 14, 5},
};
#ifdef DEBUG
@@ -878,7 +893,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info = IEEE80211_SKB_CB(p);
h = (struct ieee80211_hdr *)((u8 *) (txh + 1) + D11_PHY_HDR_LEN);
- if (tx_info->control.sta)
+ if (tx_info->rate_driver_data[0])
scb = &wlc->pri_scb;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
@@ -1941,7 +1956,8 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
* accesses phyreg throughput mac. This can be skipped since
* only mac reg is accessed below
*/
- flags |= SICF_PCLKE;
+ if (D11REV_GE(wlc_hw->corerev, 18))
+ flags |= SICF_PCLKE;
/*
* TODO: test suspend/resume
@@ -2022,7 +2038,8 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* phyreg throughput mac, AND phy_reset is skipped at early stage when
* band->pi is invalid. need to enable PHY CLK
*/
- flags |= SICF_PCLKE;
+ if (D11REV_GE(wlc_hw->corerev, 18))
+ flags |= SICF_PCLKE;
/*
* reset the core
@@ -2125,8 +2142,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
struct bcma_device *core = wlc_hw->d11core;
- if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
- (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43224) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
@@ -2790,7 +2807,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
tmp = 0;
if (on) {
- if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
CCS_ERSRC_REQ_HT |
CCS_ERSRC_REQ_D11PLL |
@@ -3139,20 +3156,6 @@ void brcms_c_reset(struct brcms_c_info *wlc)
brcms_b_reset(wlc->hw);
}
-/* Return the channel the driver should initialize during brcms_c_init.
- * the channel may have to be changed from the currently configured channel
- * if other configurations are in conflict (bandlocked, 11n mode disabled,
- * invalid channel for current country, etc.)
- */
-static u16 brcms_c_init_chanspec(struct brcms_c_info *wlc)
-{
- u16 chanspec =
- 1 | WL_CHANSPEC_BW_20 | WL_CHANSPEC_CTL_SB_NONE |
- WL_CHANSPEC_BAND_2G;
-
- return chanspec;
-}
-
void brcms_c_init_scb(struct scb *scb)
{
int i;
@@ -4231,9 +4234,8 @@ static void brcms_c_radio_timer(void *arg)
}
/* common low-level watchdog code */
-static void brcms_b_watchdog(void *arg)
+static void brcms_b_watchdog(struct brcms_c_info *wlc)
{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
struct brcms_hardware *wlc_hw = wlc->hw;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@@ -4254,10 +4256,8 @@ static void brcms_b_watchdog(void *arg)
}
/* common watchdog code */
-static void brcms_c_watchdog(void *arg)
+static void brcms_c_watchdog(struct brcms_c_info *wlc)
{
- struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
if (!wlc->pub->up)
@@ -4297,7 +4297,9 @@ static void brcms_c_watchdog(void *arg)
static void brcms_c_watchdog_by_timer(void *arg)
{
- brcms_c_watchdog(arg);
+ struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
+
+ brcms_c_watchdog(wlc);
}
static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
@@ -4467,11 +4469,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* verify again the device is supported */
- if (core->bus->hosttype == BCMA_HOSTTYPE_PCI &&
- !brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
- wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
- "vendor/device (0x%x/0x%x)\n",
- unit, pcidev->vendor, pcidev->device);
+ if (!brcms_c_chipmatch(core)) {
+ wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported device\n",
+ unit);
err = 12;
goto fail;
}
@@ -4541,7 +4541,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
else
wlc_hw->_nbands = 1;
- if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4608,8 +4608,12 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
+ WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
+ (wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
+ ARRAY_SIZE(xmtfifo_sz));
wlc_hw->xmtfifo_sz =
xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
+ WARN_ON(!wlc_hw->xmtfifo_sz[0]);
/* Get a phy for this band */
wlc_hw->band->pi =
@@ -5049,7 +5053,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5129,6 +5133,8 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
/* make interface operational */
int brcms_c_up(struct brcms_c_info *wlc)
{
+ struct ieee80211_channel *ch;
+
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
/* HW is turned off so don't try to access it */
@@ -5141,7 +5147,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5195,8 +5201,9 @@ int brcms_c_up(struct brcms_c_info *wlc)
wlc->pub->up = true;
if (wlc->bandinit_pending) {
+ ch = wlc->pub->ieee_hw->conf.channel;
brcms_c_suspend_mac_and_wait(wlc);
- brcms_c_set_chanspec(wlc, wlc->default_bss->chanspec);
+ brcms_c_set_chanspec(wlc, ch20mhz_chspec(ch->hw_value));
wlc->bandinit_pending = false;
brcms_c_enable_mac(wlc);
}
@@ -5397,11 +5404,6 @@ int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config)
else
return -EINVAL;
- /* Legacy or bust when no OFDM is supported by regulatory */
- if ((brcms_c_channel_locale_flags_in_band(wlc->cmi, band->bandunit) &
- BRCMS_NO_OFDM) && (gmode != GMODE_LEGACY_B))
- return -EINVAL;
-
/* update configuration value */
if (config)
brcms_c_protection_upd(wlc, BRCMS_PROT_G_USER, gmode);
@@ -5782,8 +5784,12 @@ void brcms_c_print_txstatus(struct tx_status *txs)
(txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
}
-bool brcms_c_chipmatch(u16 vendor, u16 device)
+static bool brcms_c_chipmatch_pci(struct bcma_device *core)
{
+ struct pci_dev *pcidev = core->bus->host_pci;
+ u16 vendor = pcidev->vendor;
+ u16 device = pcidev->device;
+
if (vendor != PCI_VENDOR_ID_BROADCOM) {
pr_err("unknown vendor id %04x\n", vendor);
return false;
@@ -5802,6 +5808,30 @@ bool brcms_c_chipmatch(u16 vendor, u16 device)
return false;
}
+static bool brcms_c_chipmatch_soc(struct bcma_device *core)
+{
+ struct bcma_chipinfo *chipinfo = &core->bus->chipinfo;
+
+ if (chipinfo->id == BCMA_CHIP_ID_BCM4716)
+ return true;
+
+ pr_err("unknown chip id %04x\n", chipinfo->id);
+ return false;
+}
+
+bool brcms_c_chipmatch(struct bcma_device *core)
+{
+ switch (core->bus->hosttype) {
+ case BCMA_HOSTTYPE_PCI:
+ return brcms_c_chipmatch_pci(core);
+ case BCMA_HOSTTYPE_SOC:
+ return brcms_c_chipmatch_soc(core);
+ default:
+ pr_err("unknown host type: %i\n", core->bus->hosttype);
+ return false;
+ }
+}
+
#if defined(DEBUG)
void brcms_c_print_txdesc(struct d11txh *txh)
{
@@ -8201,19 +8231,12 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
struct bcma_device *core = wlc->hw->d11core;
+ struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel;
u16 chanspec;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- /*
- * This will happen if a big-hammer was executed. In
- * that case, we want to go back to the channel that
- * we were on and not new channel
- */
- if (wlc->pub->associated)
- chanspec = wlc->home_chanspec;
- else
- chanspec = brcms_c_init_chanspec(wlc);
+ chanspec = ch20mhz_chspec(ch->hw_value);
brcms_b_init(wlc->hw, chanspec);
@@ -8318,7 +8341,7 @@ brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
+ wlc = brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index 264f8c4c703d..91937c5025ce 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -198,6 +198,8 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
+
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@@ -209,7 +211,8 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
+ (++pi->phy_wreg >= pi->phy_wreg_limit)) {
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
@@ -292,10 +295,13 @@ void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
#else
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
+
bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
- if (++pi->phy_wreg >= pi->phy_wreg_limit) {
+ if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
+ (++pi->phy_wreg >= pi->phy_wreg_limit)) {
pi->phy_wreg = 0;
(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
@@ -837,7 +843,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
pi->tbl_data_hi = tblDataHi;
pi->tbl_data_lo = tblDataLo;
- if (pi->sh->chip == BCM43224_CHIP_ID &&
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 &&
pi->sh->chiprev == 1) {
pi->tbl_addr = tblAddr;
pi->tbl_save_id = tbl_id;
@@ -847,7 +853,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
{
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, pi->tbl_data_lo);
@@ -881,7 +887,7 @@ wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, tblDataLo);
@@ -918,7 +924,7 @@ wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
- if ((pi->sh->chip == BCM43224_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1)) {
(void)read_phy_reg(pi, tblDataLo);
@@ -2894,7 +2900,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
{
- if ((pi->sh->chip == BCM4313_CHIP_ID) &&
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) &&
(pi->sh->boardflags & BFL_FEM)) {
if (mode) {
u16 txant = 0;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 13b261517cce..65db9b7458dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -14358,7 +14358,7 @@ void wlc_phy_nphy_tkip_rifs_war(struct brcms_phy *pi, u8 rifs)
wlc_phy_write_txmacreg_nphy(pi, holdoff, delay);
- if (pi && pi->sh && (pi->sh->_rifs_phy != rifs))
+ if (pi->sh && (pi->sh->_rifs_phy != rifs))
pi->sh->_rifs_phy = rifs;
}
@@ -17893,6 +17893,8 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
nphy_tpc_txgain_ipa_2g_2057rev7;
} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM47162)
+ tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else {
@@ -19254,8 +19256,14 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
case 38:
case 102:
case 118:
- nphy_adj_tone_id_buf[0] = 0;
- nphy_adj_noise_var_buf[0] = 0x0;
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) &&
+ (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
+ nphy_adj_tone_id_buf[0] = 32;
+ nphy_adj_noise_var_buf[0] = 0x21f;
+ } else {
+ nphy_adj_tone_id_buf[0] = 0;
+ nphy_adj_noise_var_buf[0] = 0x0;
+ }
break;
case 134:
nphy_adj_tone_id_buf[0] = 32;
@@ -19309,8 +19317,8 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
- ((pi->sh->chippkg == BCM4717_PKG_ID) ||
- (pi->sh->chippkg == BCM4718_PKG_ID))) {
+ ((pi->sh->chippkg == BCMA_PKG_ID_BCM4717) ||
+ (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
ai_cc_reg(pi->sh->sih,
@@ -19318,6 +19326,10 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
0x40, 0x40);
}
+ if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357))
+ si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
+ CCTRL5357_EXTPA);
+
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
@@ -20695,12 +20707,22 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
RADIO_2056_SYN, 0x1f);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_LOOPFILTER4 |
- RADIO_2056_SYN, 0xb);
- write_radio_reg(pi,
- RADIO_2056_SYN_PLL_CP2 |
- RADIO_2056_SYN, 0x14);
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_LOOPFILTER4 |
+ RADIO_2056_SYN, 0x14);
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_CP2 |
+ RADIO_2056_SYN, 0x00);
+ } else {
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_LOOPFILTER4 |
+ RADIO_2056_SYN, 0xb);
+ write_radio_reg(pi,
+ RADIO_2056_SYN_PLL_CP2 |
+ RADIO_2056_SYN, 0x14);
+ }
}
}
@@ -20747,24 +20769,30 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
PADG_IDAC, 0xcc);
- bias = 0x25;
- cascbias = 0x20;
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
+ bias = 0x40;
+ cascbias = 0x45;
+ pag_boost_tune = 0x5;
+ pgag_boost_tune = 0x33;
+ padg_boost_tune = 0x77;
+ mixg_boost_tune = 0x55;
+ } else {
+ bias = 0x25;
+ cascbias = 0x20;
- if ((pi->sh->chip ==
- BCM43224_CHIP_ID)
- || (pi->sh->chip ==
- BCM43225_CHIP_ID)) {
- if (pi->sh->chippkg ==
- BCM43224_FAB_SMIC) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
+ pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
+ pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) {
bias = 0x2a;
cascbias = 0x38;
}
- }
- pag_boost_tune = 0x4;
- pgag_boost_tune = 0x03;
- padg_boost_tune = 0x77;
- mixg_boost_tune = 0x65;
+ pag_boost_tune = 0x4;
+ pgag_boost_tune = 0x03;
+ padg_boost_tune = 0x77;
+ mixg_boost_tune = 0x65;
+ }
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
INTPAG_IMAIN_STAT, bias);
@@ -20863,11 +20891,10 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
cascbias = 0x30;
- if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID)) {
- if (pi->sh->chippkg == BCM43224_FAB_SMIC)
- cascbias = 0x35;
- }
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
+ pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
+ pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC)
+ cascbias = 0x35;
pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias;
@@ -21106,6 +21133,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
const struct nphy_sfo_cfg *ci)
{
u16 val;
+ struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
@@ -21178,22 +21206,32 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
} else if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if (val == 54)
spuravoid = 1;
- } else {
- if (pi->nphy_aband_spurwar_en &&
- ((val == 38) || (val == 102)
- || (val == 118)))
+ } else if (pi->nphy_aband_spurwar_en &&
+ ((val == 38) || (val == 102) || (val == 118))) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716)
+ && (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
+ spuravoid = 0;
+ } else {
spuravoid = 1;
+ }
}
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
spuravoid = 1;
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
- wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
+ bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
+ spuravoid);
+ } else {
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
+ bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
+ spuravoid);
+ wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
+ }
- if ((pi->sh->chip == BCM43224_CHIP_ID) ||
- (pi->sh->chip == BCM43225_CHIP_ID)) {
+ if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
if (spuravoid == 1) {
bcma_write16(pi->d11core,
D11REGOFFS(tsf_clk_frac_l),
@@ -21209,7 +21247,9 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
}
}
- wlapi_bmac_core_phypll_reset(pi->sh->physhim);
+ if (!((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
+ (pi->sh->chip == BCMA_CHIP_ID_BCM47162)))
+ wlapi_bmac_core_phypll_reset(pi->sh->physhim);
mod_phy_reg(pi, 0x01, (0x1 << 15),
((spuravoid > 0) ? (0x1 << 15) : 0));
@@ -22171,9 +22211,15 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
&auxADC_rssi_ctrlH_save);
- radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
- + 82 * (auxADC_Vl) - 28861 +
- 128) / 256;
+ if (pi->sh->chip == BCMA_CHIP_ID_BCM5357) {
+ radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
+ + 88 * (auxADC_Vl) - 27111 +
+ 128) / 256;
+ } else {
+ radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
+ + 82 * (auxADC_Vl) - 28861 +
+ 128) / 256;
+ }
offset = (s16) pi->phy_tempsense_offset;
@@ -24923,14 +24969,16 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
if (txgains->useindex) {
phy_a4 = 15 - ((txgains->index) >> 3);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
- if (NREV_GE(pi->pubpi.phy_rev, 6))
+ if (NREV_GE(pi->pubpi.phy_rev, 6) &&
+ pi->sh->chip == BCMA_CHIP_ID_BCM47162) {
+ phy_a5 = 0x10f7 | (phy_a4 << 8);
+ } else if (NREV_GE(pi->pubpi.phy_rev, 6)) {
phy_a5 = 0x00f7 | (phy_a4 << 8);
-
- else
- if (NREV_IS(pi->pubpi.phy_rev, 5))
+ } else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
phy_a5 = 0x10f7 | (phy_a4 << 8);
- else
+ } else {
phy_a5 = 0x50f7 | (phy_a4 << 8);
+ }
} else {
phy_a5 = 0x70f7 | (phy_a4 << 8);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 4931d29d077b..7e9df566c733 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -74,16 +74,6 @@
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
* number to differentiate different PLLs controlled by the same PMU rev.
*/
-/* pllcontrol registers:
- * ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
- * p1div, p2div, _bypass_sdmod
- */
-#define PMU1_PLL0_PLLCTL0 0
-#define PMU1_PLL0_PLLCTL1 1
-#define PMU1_PLL0_PLLCTL2 2
-#define PMU1_PLL0_PLLCTL3 3
-#define PMU1_PLL0_PLLCTL4 4
-#define PMU1_PLL0_PLLCTL5 5
/* pmu XtalFreqRatio */
#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
@@ -108,118 +98,14 @@
#define RES4313_HT_AVAIL_RSRC 14
#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
-/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
-static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
-{
- u32 min_mask = 0, max_mask = 0;
- uint rsrcs;
-
- /* # resources */
- rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
-
- /* determine min/max rsrc masks */
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
-
- case BCM4313_CHIP_ID:
- min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
- PMURES_BIT(RES4313_XTAL_PU_RSRC) |
- PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
- PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
- max_mask = 0xffff;
- break;
- default:
- break;
- }
-
- *pmin = min_mask;
- *pmax = max_mask;
-}
-
-void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
-{
- u32 tmp = 0;
- struct bcma_device *core;
-
- /* switch to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- if (spuravoid == 1) {
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x11500010);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL1);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x000C0C06);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL2);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x0F600a08);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL3);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x00000000);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL4);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x2001E920);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL5);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x88888815);
- } else {
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x11100010);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL1);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x000c0c06);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL2);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x03000a08);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL3);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x00000000);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL4);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x200005c0);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
- PMU1_PLL0_PLLCTL5);
- bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
- 0x88888815);
- }
- tmp = 1 << 10;
- break;
-
- default:
- /* bail out */
- return;
- }
-
- bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
-}
-
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM4313_CHIP_ID:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM4313:
delay = 3700;
break;
default:
@@ -270,9 +156,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
switch (ai_get_chip_id(sih)) {
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- case BCM4313_CHIP_ID:
+ case BCMA_CHIP_ID_BCM43224:
+ case BCMA_CHIP_ID_BCM43225:
+ case BCMA_CHIP_ID_BCM4313:
/* always 20Mhz */
clock = 20000 * 1000;
break;
@@ -283,51 +169,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-/* initialize PMU */
-void si_pmu_init(struct si_pub *sih)
-{
- struct bcma_device *core;
-
- /* select chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- if (ai_get_pmurev(sih) == 1)
- bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
- ~PCTL_NOILP_ON_WAIT);
- else if (ai_get_pmurev(sih) >= 2)
- bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
-}
-
-/* initialize PMU resources */
-void si_pmu_res_init(struct si_pub *sih)
-{
- struct bcma_device *core;
- u32 min_mask = 0, max_mask = 0;
-
- /* select to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
-
- /* Determine min/max rsrc masks */
- si_pmu_res_masks(sih, &min_mask, &max_mask);
-
- /* It is required to program max_mask first and then min_mask */
-
- /* Program max resource mask */
-
- if (max_mask)
- bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
-
- /* Program min resource mask */
-
- if (min_mask)
- bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
-
- /* Add some delay; allow resources to come up and settle. */
- mdelay(2);
-}
-
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
+ struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *core;
u32 alp_khz;
@@ -335,7 +179,7 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
return 0;
/* Remember original core before switch to chipc */
- core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ core = sii->icbus->drv_cc.core;
if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3e39c5e0f9ff..f7cff873578b 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,10 +26,7 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
-extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_res_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index aa5d67f8d874..5855f4fd16dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -311,7 +311,7 @@ extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
-extern bool brcms_c_chipmatch(u16 vendor, u16 device);
+extern bool brcms_c_chipmatch(struct bcma_device *core);
extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index b45ab34cdfdc..3e6405e06ac0 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -43,6 +43,8 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
+ if (!skb)
+ return;
WARN_ON(skb->next);
if (skb->destructor)
/* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 333193f20e1c..bcc79b4e3267 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -37,5 +37,6 @@
#define BCM4329_CHIP_ID 0x4329
#define BCM4330_CHIP_ID 0x4330
#define BCM4331_CHIP_ID 0x4331
+#define BCM4334_CHIP_ID 0x4334
#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4e9b7e4827ea..123cfa854a0d 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -19,68 +19,6 @@
#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
/* Common core control flags */
#define SICF_BIST_EN 0x8000
#define SICF_PME_EN 0x4000
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index 75ef8f04aabe..dc447c1b5abe 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -58,8 +58,7 @@ static int prism2_stats_proc_read(char *page, char **start, off_t off,
{
char *p = page;
local_info_t *local = (local_info_t *) data;
- struct comm_tallies_sums *sums = (struct comm_tallies_sums *)
- &local->comm_tallies;
+ struct comm_tallies_sums *sums = &local->comm_tallies;
if (off != 0) {
*eof = 1;
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0036737fe8e3..0df459147394 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2701,6 +2701,20 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
}
+static void ipw_read_eeprom(struct ipw_priv *priv)
+{
+ int i;
+ __le16 *eeprom = (__le16 *) priv->eeprom;
+
+ IPW_DEBUG_TRACE(">>\n");
+
+ /* read entire contents of eeprom into private buffer */
+ for (i = 0; i < 128; i++)
+ eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
+
+ IPW_DEBUG_TRACE("<<\n");
+}
+
/*
* Either the device driver (i.e. the host) or the firmware can
* load eeprom data into the designated region in SRAM. If neither
@@ -2712,14 +2726,9 @@ static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
static void ipw_eeprom_init_sram(struct ipw_priv *priv)
{
int i;
- __le16 *eeprom = (__le16 *) priv->eeprom;
IPW_DEBUG_TRACE(">>\n");
- /* read entire contents of eeprom into private buffer */
- for (i = 0; i < 128; i++)
- eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
-
/*
If the data looks correct, then copy it to our private
copy. Otherwise let the firmware know to perform the operation
@@ -3643,8 +3652,10 @@ static int ipw_load(struct ipw_priv *priv)
/* ack fw init done interrupt */
ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
- /* read eeprom data and initialize the eeprom region of sram */
+ /* read eeprom data */
priv->eeprom_delay = 1;
+ ipw_read_eeprom(priv);
+ /* initialize the eeprom region of sram */
ipw_eeprom_init_sram(priv);
/* enable interrupts */
@@ -7069,9 +7080,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
}
IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
- err = ipw_send_qos_params_command(priv,
- (struct libipw_qos_parameters *)
- &(qos_parameters[0]));
+ err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
if (err)
IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 4b10157d8686..d4fd29ad90dc 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -946,7 +946,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
case IEEE80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
- case IEEE80211_NUM_BANDS:
+ default:
BUG();
break;
}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 509301a5e7e2..34f61a0581a2 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -3405,7 +3405,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
return 0;
}
- if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
key_flags);
spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -3420,7 +3420,7 @@ il4965_remove_dynamic_key(struct il_priv *il,
memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
il->stations[sta_id].sta.key.key_flags =
STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -5724,7 +5724,8 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->flags |=
- WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
/*
* For now, disable PS by default because it affects
@@ -5873,6 +5874,16 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ D_MAC80211("leave - ad-hoc group key\n");
+ return -EOPNOTSUPP;
+ }
+
sta_id = il_sta_id_or_broadcast(il, sta);
if (sta_id == IL_INVALID_STATION)
return -EINVAL;
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index cbf2dc18341f..0370403fd0bd 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4717,10 +4717,11 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
struct il_tx_queue *txq = &il->txq[cnt];
struct il_queue *q = &txq->q;
unsigned long timeout;
+ unsigned long now = jiffies;
int ret;
if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
+ txq->time_stamp = now;
return 0;
}
@@ -4728,9 +4729,9 @@ il_check_stuck_queue(struct il_priv *il, int cnt)
txq->time_stamp +
msecs_to_jiffies(il->cfg->wd_timeout);
- if (time_after(jiffies, timeout)) {
+ if (time_after(now, timeout)) {
IL_ERR("Queue %d stuck for %u ms.\n", q->id,
- il->cfg->wd_timeout);
+ jiffies_to_msecs(now - txq->time_stamp));
ret = il_force_reset(il, false);
return (ret == -EAGAIN) ? 0 : 1;
}
@@ -4767,14 +4768,12 @@ il_bg_watchdog(unsigned long data)
return;
/* monitor and check for other stuck queues */
- if (il_is_any_associated(il)) {
- for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == il->cmd_queue)
- continue;
- if (il_check_stuck_queue(il, cnt))
- return;
- }
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
}
mod_timer(&il->watchdog,
@@ -5360,7 +5359,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ASSOC) {
D_MAC80211("ASSOC %d\n", bss_conf->assoc);
if (bss_conf->assoc) {
- il->timestamp = bss_conf->last_tsf;
+ il->timestamp = bss_conf->sync_tsf;
if (!il_is_rfkill(il))
il->ops->post_associate(il);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2463c0626438..727fbb5db9da 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,6 +6,7 @@ config IWLWIFI
select LEDS_CLASS
select LEDS_TRIGGERS
select MAC80211_LEDS
+ select IWLDVM
---help---
Select to build the driver supporting the:
@@ -41,6 +42,10 @@ config IWLWIFI
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwlwifi.
+config IWLDVM
+ tristate "Intel Wireless WiFi"
+ depends on IWLWIFI
+
menu "Debugging Options"
depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index d615eacbf050..170ec330d2a9 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,27 +1,19 @@
-# WIFI
+# common
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
-iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
-iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
-iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
-iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
-
-iwlwifi-objs += iwl-eeprom.o iwl-power.o
-iwlwifi-objs += iwl-scan.o iwl-led.o
-iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
-iwlwifi-objs += iwl-5000.o
-iwlwifi-objs += iwl-6000.o
-iwlwifi-objs += iwl-1000.o
-iwlwifi-objs += iwl-2000.o
-iwlwifi-objs += iwl-pci.o
+iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
+iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-notif-wait.o
-iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
-
+iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o
-iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
-CFLAGS_iwl-devtrace.o := -I$(src)
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)
-ccflags-y += -D__CHECK_ENDIAN__
+
+obj-$(CONFIG_IWLDVM) += dvm/
+
+CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/dvm/Makefile b/drivers/net/wireless/iwlwifi/dvm/Makefile
new file mode 100644
index 000000000000..5ff76b204141
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/Makefile
@@ -0,0 +1,13 @@
+# DVM
+obj-$(CONFIG_IWLDVM) += iwldvm.o
+iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
+iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
+
+iwldvm-objs += power.o
+iwldvm-objs += scan.o led.o
+iwldvm-objs += rxon.o devices.o
+
+iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+iwldvm-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += testmode.o
+
+ccflags-y += -D__CHECK_ENDIAN__ -I$(src)/../
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 79c0fe06f4db..9bb16bdf6d26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -63,9 +63,10 @@
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
-#include "iwl-dev.h"
#include "iwl-config.h"
+#include "dev.h"
+
/* The first 11 queues (0-10) are used otherwise */
#define IWLAGN_FIRST_AMPDU_QUEUE 11
@@ -91,7 +92,6 @@ extern struct iwl_lib_ops iwl6030_lib;
#define STATUS_CT_KILL 1
#define STATUS_ALIVE 2
#define STATUS_READY 3
-#define STATUS_GEO_CONFIGURED 4
#define STATUS_EXIT_PENDING 5
#define STATUS_STATISTICS 6
#define STATUS_SCANNING 7
@@ -101,6 +101,7 @@ extern struct iwl_lib_ops iwl6030_lib;
#define STATUS_CHANNEL_SWITCH_PENDING 11
#define STATUS_SCAN_COMPLETE 12
#define STATUS_POWER_PMI 13
+#define STATUS_SCAN_ROC_EXPIRED 14
struct iwl_ucode_capabilities;
@@ -255,6 +256,10 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
enum iwl_scan_type scan_type,
enum ieee80211_band band);
+void iwl_scan_roc_expired(struct iwl_priv *priv);
+void iwl_scan_offchannel_skb(struct iwl_priv *priv);
+void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
+
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
@@ -264,7 +269,7 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
+#define IWL_SCAN_CHECK_WATCHDOG (HZ * 15)
/* bt coex */
@@ -390,8 +395,10 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
}
extern int iwl_alive_start(struct iwl_priv *priv);
-/* svtool */
+
+/* testmode support */
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
@@ -399,13 +406,16 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
struct netlink_callback *cb,
void *data, int len);
extern void iwl_testmode_init(struct iwl_priv *priv);
-extern void iwl_testmode_cleanup(struct iwl_priv *priv);
+extern void iwl_testmode_free(struct iwl_priv *priv);
+
#else
+
static inline
int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
{
return -ENOSYS;
}
+
static inline
int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
@@ -413,12 +423,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
{
return -ENOSYS;
}
-static inline
-void iwl_testmode_init(struct iwl_priv *priv)
+
+static inline void iwl_testmode_init(struct iwl_priv *priv)
{
}
-static inline
-void iwl_testmode_cleanup(struct iwl_priv *priv)
+
+static inline void iwl_testmode_free(struct iwl_priv *priv)
{
}
#endif
@@ -437,10 +447,8 @@ static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
static inline int iwl_is_ready(struct iwl_priv *priv)
{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
+ /* The adapter is 'ready' if READY EXIT_PENDING is not set */
return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
!test_bit(STATUS_EXIT_PENDING, &priv->status);
}
@@ -518,85 +526,4 @@ static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
return s;
return "UNKNOWN";
}
-
-/* API method exported for mvm hybrid state */
-void iwl_setup_deferred_work(struct iwl_priv *priv);
-int iwl_send_wimax_coex(struct iwl_priv *priv);
-int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwl_option_config(struct iwl_priv *priv);
-void iwl_set_hw_params(struct iwl_priv *priv);
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
-int iwl_init_drv(struct iwl_priv *priv);
-void iwl_uninit_drv(struct iwl_priv *priv);
-void iwl_send_bt_config(struct iwl_priv *priv);
-void iwl_rf_kill_ct_config(struct iwl_priv *priv);
-int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change);
-int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
-void iwlagn_check_needed_chains(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_bss_conf *bss_conf);
-void iwlagn_chain_noise_reset(struct iwl_priv *priv);
-int iwlagn_update_beacon(struct iwl_priv *priv,
- struct ieee80211_vif *vif);
-void iwl_tt_handler(struct iwl_priv *priv);
-void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
-void iwl_nic_error(struct iwl_op_mode *op_mode);
-void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
-void iwl_nic_config(struct iwl_op_mode *op_mode);
-int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set);
-void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event);
-int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
-void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state);
-int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
-void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data);
-void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwlagn_mac_stop(struct ieee80211_hw *hw);
-void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 95f27f1a423b..f2dd671d7dc8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -63,10 +63,11 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
-#include "iwl-agn.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
/*****************************************************************************
* INIT calibrations framework
@@ -832,14 +833,14 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= priv->eeprom_data->valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
* priv->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(priv->eeprom_data->valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
@@ -853,7 +854,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->hw_params.valid_tx_ant);
+ find_first_chain(priv->eeprom_data->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -863,13 +864,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
+ if (active_chains != priv->eeprom_data->valid_rx_ant &&
active_chains != priv->chain_noise_data.active_chains)
IWL_DEBUG_CALIB(priv,
"Detected that not all antennas are connected! "
"Connected: %#x, valid: %#x.\n",
active_chains,
- priv->hw_params.valid_rx_ant);
+ priv->eeprom_data->valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
@@ -1054,7 +1055,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
priv->cfg->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
- data->active_chains = priv->hw_params.valid_rx_ant;
+ data->active_chains = priv->eeprom_data->valid_rx_ant;
for (i = 0; i < NUM_RX_CHAINS; i++)
if (!(data->active_chains & (1<<i)))
data->disconn_array[i] = 1;
@@ -1083,8 +1084,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
min_average_noise, min_average_noise_antenna_i);
- iwlagn_gain_computation(priv, average_noise,
- find_first_chain(priv->hw_params.valid_rx_ant));
+ iwlagn_gain_computation(
+ priv, average_noise,
+ find_first_chain(priv->eeprom_data->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index dbe13787f272..2349f393cc42 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -62,8 +62,8 @@
#ifndef __iwl_calib_h__
#define __iwl_calib_h__
-#include "iwl-dev.h"
-#include "iwl-commands.h"
+#include "dev.h"
+#include "commands.h"
void iwl_chain_noise_calibration(struct iwl_priv *priv);
void iwl_sensitivity_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 9af6a239b384..4a361c55c543 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -61,9 +61,9 @@
*
*****************************************************************************/
/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
+ * Please use this file (commands.h) only for uCode API definitions.
* Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
+ * Please use dev.h for driver implementation definitions.
*/
#ifndef __iwl_commands_h__
@@ -190,6 +190,44 @@ enum {
REPLY_MAX = 0xff
};
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ * - 4 standard TX queues
+ * - the command queue
+ * - 4 PAN TX queues
+ * - the PAN multicast queue, and
+ * - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES 11
+
+/*
+ * Command queue depends on iPAN support.
+ */
+#define IWL_DEFAULT_CMD_QUEUE_NUM 4
+#define IWL_IPAN_CMD_QUEUE_NUM 9
+
+#define IWL_TX_FIFO_BK 0 /* shared */
+#define IWL_TX_FIFO_BE 1
+#define IWL_TX_FIFO_VI 2 /* shared */
+#define IWL_TX_FIFO_VO 3
+#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN 4
+#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN 5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX 5
+#define IWL_TX_FIFO_UNUSED 255
+
+#define IWLAGN_CMD_FIFO_NUM 7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE 8
+
/******************************************************************************
* (0)
* Commonly used structures and definitions:
@@ -197,9 +235,6 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
-
/**
* iwlagn rate_n_flags bit fields
*
@@ -758,8 +793,6 @@ struct iwl_qosparam_cmd {
#define IWLAGN_BROADCAST_ID 15
#define IWLAGN_STATION_COUNT 16
-#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 8
#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
@@ -1872,6 +1905,7 @@ struct iwl_bt_cmd {
#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
#define IWLAGN_BT_PRIO_BOOST_DEFAULT 0xF0
+#define IWLAGN_BT_PRIO_BOOST_DEFAULT32 0xF0F0F0F0
#define IWLAGN_BT_MAX_KILL_DEFAULT 5
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 7f97dec8534d..46782f1102ac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -30,16 +30,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/debugfs.h>
-
#include <linux/ieee80211.h>
#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
#include "iwl-debug.h"
#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
/* create and remove of files */
#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -87,7 +83,7 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
- .read = iwl_dbgfs_##name##_read, \
+ .read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
@@ -307,13 +303,13 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ size_t eeprom_len = priv->eeprom_blob_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16)
return -ENODATA;
- ptr = priv->eeprom;
+ ptr = priv->eeprom_blob;
if (!ptr)
return -ENOMEM;
@@ -322,11 +318,9 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
if (!buf)
return -ENOMEM;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
- "version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", eeprom_ver);
+ eeprom_ver = priv->eeprom_data->eeprom_version;
+ pos += scnprintf(buf + pos, buf_size - pos,
+ "NVM version: 0x%x\n", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -351,9 +345,6 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
char *buf;
ssize_t ret;
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -426,8 +417,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
test_bit(STATUS_ALIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
test_bit(STATUS_EXIT_PENDING, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
@@ -1341,17 +1330,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->hw_params.valid_tx_ant & ANT_A) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_A) &&
tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->hw_params.valid_tx_ant & ANT_B) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_B) &&
tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->hw_params.valid_tx_ant & ANT_C) &&
+ if ((priv->eeprom_data->valid_tx_ant & ANT_C) &&
tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
@@ -2266,6 +2255,10 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
char buf[8];
int buf_size;
+ /* check that the interface is up */
+ if (!iwl_is_ready(priv))
+ return -EAGAIN;
+
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 70062379d0ec..054f728f6266 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -24,8 +24,8 @@
*
*****************************************************************************/
/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
+ * Please use this file (dev.h) for driver implementation definitions.
+ * Please use commands.h for uCode API definitions.
*/
#ifndef __iwl_dev_h__
@@ -39,17 +39,20 @@
#include <linux/mutex.h>
#include "iwl-fw.h"
-#include "iwl-eeprom.h"
+#include "iwl-eeprom-parse.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-agn-rs.h"
-#include "iwl-agn-tt.h"
-#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-notif-wait.h"
+#include "iwl-trans.h"
+
+#include "led.h"
+#include "power.h"
+#include "rs.h"
+#include "tt.h"
+
+#include "iwl-test.h"
/* CT-KILL constants */
#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
@@ -87,49 +90,6 @@
#define IWL_NUM_SCAN_RATES (2)
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-};
-
-/*
- * Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate
- * - 4 standard TX queues
- * - the command queue
- * - 4 PAN TX queues
- * - the PAN multicast queue, and
- * - the AUX (TX during scan dwell) queue.
- */
-#define IWL_MIN_NUM_QUEUES 11
-
-/*
- * Command queue depends on iPAN support.
- */
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-#define IWL_IPAN_CMD_QUEUE_NUM 9
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30
@@ -153,29 +113,6 @@ union iwl_ht_rate_supp {
};
};
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
struct iwl_ht_config {
bool single_chain_sufficient;
enum ieee80211_smps_mode smps; /* current smps mode */
@@ -445,23 +382,6 @@ enum {
MEASUREMENT_ACTIVE = (1 << 1),
};
-enum iwl_nvm_type {
- NVM_DEVICE_TYPE_EEPROM = 0,
- NVM_DEVICE_TYPE_OTP,
-};
-
-/*
- * Two types of OTP memory access modes
- * IWL_OTP_ACCESS_ABSOLUTE - absolute address mode,
- * based on physical memory addressing
- * IWL_OTP_ACCESS_RELATIVE - relative address mode,
- * based on logical memory addressing
- */
-enum iwl_access_mode {
- IWL_OTP_ACCESS_ABSOLUTE,
- IWL_OTP_ACCESS_RELATIVE,
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -632,10 +552,6 @@ enum iwl_scan_type {
*
* @tx_chains_num: Number of TX chains
* @rx_chains_num: Number of RX chains
- * @valid_tx_ant: usable antennas for TX
- * @valid_rx_ant: usable antennas for RX
- * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
- * @sku: sku read from EEPROM
* @ct_kill_threshold: temperature threshold - in hw dependent unit
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
@@ -645,11 +561,7 @@ enum iwl_scan_type {
struct iwl_hw_params {
u8 tx_chains_num;
u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u8 ht40_channel;
bool use_rts_for_aggregation;
- u16 sku;
u32 ct_kill_threshold;
u32 ct_kill_exit_threshold;
@@ -664,31 +576,10 @@ struct iwl_lib_ops {
/* device specific configuration */
void (*nic_config)(struct iwl_priv *priv);
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
/* temperature */
void (*temperature)(struct iwl_priv *priv);
};
-#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
-struct iwl_testmode_trace {
- u32 buff_size;
- u32 total_size;
- u32 num_chunks;
- u8 *cpu_addr;
- u8 *trace_addr;
- dma_addr_t dma_addr;
- bool trace_enabled;
-};
-struct iwl_testmode_mem {
- u32 buff_size;
- u32 num_chunks;
- u8 *buff_addr;
- bool read_in_progress;
-};
-#endif
-
struct iwl_wipan_noa_data {
struct rcu_head rcu_head;
u32 length;
@@ -735,8 +626,6 @@ struct iwl_priv {
/* ieee device used by generic ieee processing code */
struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
struct list_head calib_results;
@@ -747,16 +636,12 @@ struct iwl_priv {
enum ieee80211_band band;
u8 valid_contexts;
- void (*pre_rx_handler)(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb);
int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
struct iwl_notif_wait_data notif_wait;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
@@ -787,11 +672,6 @@ struct iwl_priv {
bool ucode_loaded;
bool init_ucode_run; /* Don't run init uCode again */
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
u8 plcp_delta_threshold;
/* thermal calibration */
@@ -846,6 +726,7 @@ struct iwl_priv {
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
+ atomic_t num_aux_in_flight;
u8 mac80211_registered;
@@ -950,10 +831,8 @@ struct iwl_priv {
struct delayed_work scan_check;
- /* TX Power */
+ /* TX Power settings */
s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
s8 tx_power_next;
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -964,9 +843,10 @@ struct iwl_priv {
void *wowlan_sram;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- enum iwl_nvm_type nvm_device_type;
+ struct iwl_eeprom_data *eeprom_data;
+ /* eeprom blob for debugfs/testmode */
+ u8 *eeprom_blob;
+ size_t eeprom_blob_size;
struct work_struct txpower_work;
u32 calib_disabled;
@@ -979,9 +859,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
+
#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
- struct iwl_testmode_trace testmode_trace;
- struct iwl_testmode_mem testmode_mem;
+ struct iwl_test tst;
u32 tm_fixed_rate;
#endif
@@ -1001,8 +881,6 @@ struct iwl_priv {
enum iwl_ucode_type cur_ucode;
}; /*iwl_priv */
-extern struct kmem_cache *iwl_tx_cmd_pool;
-
static inline struct iwl_rxon_context *
iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
{
@@ -1036,36 +914,4 @@ static inline int iwl_is_any_associated(struct iwl_priv *priv)
return false;
}
-static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_2GHZ;
-}
-
-static inline int is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
-}
-
#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/dvm/devices.c
index 48533b3a0f9a..349c205d5f62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
+++ b/drivers/net/wireless/iwlwifi/dvm/devices.c
@@ -27,11 +27,14 @@
/*
* DVM device-specific data & functions
*/
-#include "iwl-agn.h"
-#include "iwl-dev.h"
-#include "iwl-commands.h"
#include "iwl-io.h"
#include "iwl-prph.h"
+#include "iwl-eeprom-parse.h"
+
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+
/*
* 1000 series
@@ -58,11 +61,6 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 1000 series */
static void iwl1000_nic_config(struct iwl_priv *priv)
{
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
/* Setting digital SVR for 1000 card to 1.32V */
/* locking is acquired in iwl_set_bits_mask_prph() function */
iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
@@ -170,16 +168,6 @@ static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -189,17 +177,6 @@ static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
struct iwl_lib_ops iwl1000_lib = {
.set_hw_params = iwl1000_hw_set_hw_params,
.nic_config = iwl1000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- },
.temperature = iwlagn_temperature,
};
@@ -219,8 +196,6 @@ static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 2000 series */
static void iwl2000_nic_config(struct iwl_priv *priv)
{
- iwl_rf_config(priv);
-
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -251,16 +226,6 @@ static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -270,36 +235,12 @@ static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
struct iwl_lib_ops iwl2000_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
struct iwl_lib_ops iwl2030_lib = {
.set_hw_params = iwl2000_hw_set_hw_params,
.nic_config = iwl2000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
@@ -309,19 +250,6 @@ struct iwl_lib_ops iwl2030_lib = {
*/
/* NIC configuration for 5000 series */
-static void iwl5000_nic_config(struct iwl_priv *priv)
-{
- iwl_rf_config(priv);
-
- /* W/A : NIC is stuck in a reset state after Early PCIe power off
- * (PCIe power is lost before PERST# is asserted),
- * causing ME FW to lose ownership and not being able to obtain it back.
- */
- iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
-}
-
static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.min_nrg_cck = 100,
.auto_corr_min_ofdm = 90,
@@ -376,11 +304,9 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
- EEPROM_KELVIN_TEMPERATURE);
- temperature = le16_to_cpu(temp_calib[0]);
- voltage = le16_to_cpu(temp_calib[1]);
+ temperature = le16_to_cpu(priv->eeprom_data->kelvin_temperature);
+ voltage = le16_to_cpu(priv->eeprom_data->kelvin_voltage);
/* offset = temp - volt / coeff */
return (s32)(temperature -
@@ -404,14 +330,6 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -420,14 +338,6 @@ static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -455,7 +365,6 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl5000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
@@ -505,14 +414,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
+ cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
return iwl_dvm_send_cmd(priv, &hcmd);
}
@@ -520,36 +422,12 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
struct iwl_lib_ops iwl5000_lib = {
.set_hw_params = iwl5000_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
- .nic_config = iwl5000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- },
.temperature = iwlagn_temperature,
};
struct iwl_lib_ops iwl5150_lib = {
.set_hw_params = iwl5150_hw_set_hw_params,
.set_channel_switch = iwl5000_hw_channel_switch,
- .nic_config = iwl5000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- },
.temperature = iwl5150_temperature,
};
@@ -570,8 +448,6 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
- iwl_rf_config(priv);
-
switch (priv->cfg->device_family) {
case IWL_DEVICE_FAMILY_6005:
case IWL_DEVICE_FAMILY_6030:
@@ -584,13 +460,13 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
break;
case IWL_DEVICE_FAMILY_6050:
/* Indicate calibration version to uCode. */
- if (iwl_eeprom_calib_version(priv) >= 6)
+ if (priv->eeprom_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
break;
case IWL_DEVICE_FAMILY_6150:
/* Indicate calibration version to uCode. */
- if (iwl_eeprom_calib_version(priv) >= 6)
+ if (priv->eeprom_data->calib_version >= 6)
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
@@ -627,17 +503,6 @@ static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
- BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.tx_chains_num =
- num_of_ant(priv->hw_params.valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
- priv->hw_params.rx_chains_num = 1;
- else
- priv->hw_params.rx_chains_num =
- num_of_ant(priv->hw_params.valid_rx_ant);
-
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
@@ -654,7 +519,6 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
*/
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
struct iwl6000_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
u32 switch_time_in_usec, ucode_switch_time;
u16 ch;
u32 tsf_low;
@@ -704,14 +568,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
}
IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
cmd.switch_time);
- ch_info = iwl_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
+ cmd.expect_beacon = ch_switch->channel->flags & IEEE80211_CHAN_RADAR;
return iwl_dvm_send_cmd(priv, &hcmd);
}
@@ -720,18 +577,6 @@ struct iwl_lib_ops iwl6000_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
@@ -739,17 +584,5 @@ struct iwl_lib_ops iwl6030_lib = {
.set_hw_params = iwl6000_hw_set_hw_params,
.set_channel_switch = iwl6000_hw_channel_switch,
.nic_config = iwl6000_nic_config,
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REG_BAND_1_CHANNELS,
- EEPROM_REG_BAND_2_CHANNELS,
- EEPROM_REG_BAND_3_CHANNELS,
- EEPROM_REG_BAND_4_CHANNELS,
- EEPROM_REG_BAND_5_CHANNELS,
- EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
- EEPROM_REG_BAND_52_HT40_CHANNELS
- },
- .enhanced_txpower = true,
- },
.temperature = iwlagn_temperature,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/dvm/led.c
index 47000419f916..bf479f709091 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/dvm/led.c
@@ -34,12 +34,11 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
/* Throughput OFF time(ms) ON time (ms)
* >300 25 25
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/dvm/led.h
index b02a853103d3..b02a853103d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/dvm/led.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index e55ec6c8a920..bef88c1a2c9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -33,13 +33,14 @@
#include <linux/sched.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -58,8 +59,7 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
/* half dBm need to multiply */
tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
- if (priv->tx_power_lmt_in_half_dbm &&
- priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
+ if (tx_power_cmd.global_lmt > priv->eeprom_data->max_tx_pwr_half_dbm) {
/*
* For the newer devices which using enhanced/extend tx power
* table in EEPROM, the format is in half dBm. driver need to
@@ -71,7 +71,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
* "tx_power_user_lmt" is higher than EEPROM value (in
* half-dBm format), lower the tx power based on EEPROM
*/
- tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
+ tx_power_cmd.global_lmt =
+ priv->eeprom_data->max_tx_pwr_half_dbm;
}
tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
@@ -159,7 +160,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -264,6 +265,8 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
bt_cmd_v2.tx_prio_boost = 0;
bt_cmd_v2.rx_prio_boost = 0;
} else {
+ /* older version only has 8 bits */
+ WARN_ON(priv->cfg->bt_params->bt_prio_boost & ~0xFF);
bt_cmd_v1.prio_boost =
priv->cfg->bt_params->bt_prio_boost;
bt_cmd_v1.tx_prio_boost = 0;
@@ -617,6 +620,11 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
int ave_rssi;
+ if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
+ IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
+ return false;
+ }
+
ave_rssi = ieee80211_ave_rssi(ctx->vif);
if (!ave_rssi) {
/* no rssi data, no changes to reduce tx power */
@@ -818,7 +826,7 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (priv->chain_noise_data.active_chains)
active_chains = priv->chain_noise_data.active_chains;
else
- active_chains = priv->hw_params.valid_rx_ant;
+ active_chains = priv->eeprom_data->valid_rx_ant;
if (priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist &&
@@ -1259,7 +1267,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
- if (cmd->flags & CMD_SYNC)
+ if (!(cmd->flags & CMD_ASYNC))
lockdep_assert_held(&priv->mutex);
if (priv->ucode_owner == IWL_OWNERSHIP_TM &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 3ee23134c02b..a5f7bce96325 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -38,19 +38,20 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <net/ieee80211_radiotap.h>
#include <net/mac80211.h>
#include <asm/div64.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
/*****************************************************************************
*
* mac80211 entry point functions
@@ -154,6 +155,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
IEEE80211_HW_SCAN_WHILE_IDLE;
hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
/*
* Including the following line will crash some AP's. This
@@ -162,7 +164,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
*/
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE)
hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_SUPPORTS_STATIC_SMPS;
@@ -237,12 +239,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ if (priv->eeprom_data->bands[IEEE80211_BAND_2GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ &priv->eeprom_data->bands[IEEE80211_BAND_2GHZ];
+ if (priv->eeprom_data->bands[IEEE80211_BAND_5GHZ].n_channels)
priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
+ &priv->eeprom_data->bands[IEEE80211_BAND_5GHZ];
hw->wiphy->hw_version = priv->trans->hw_id;
@@ -341,7 +343,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
return 0;
}
-void iwlagn_mac_stop(struct ieee80211_hw *hw)
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -369,9 +371,9 @@ void iwlagn_mac_stop(struct ieee80211_hw *hw)
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -397,7 +399,8 @@ void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
#ifdef CONFIG_PM_SLEEP
-int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -420,8 +423,6 @@ int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
if (ret)
goto error;
- device_set_wakeup_enable(priv->trans->dev, true);
-
iwl_trans_wowlan_suspend(priv->trans);
goto out;
@@ -475,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
}
if (priv->wowlan_sram)
- _iwl_read_targ_mem_words(
+ _iwl_read_targ_mem_dwords(
priv->trans, 0x800000,
priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4);
@@ -488,8 +489,6 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
priv->wowlan = false;
- device_set_wakeup_enable(priv->trans->dev, false);
-
iwlagn_prepare_restart(priv);
memset((void *)&ctx->active, 0, sizeof(ctx->active));
@@ -504,9 +503,15 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
return 1;
}
+static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+ device_set_wakeup_enable(priv->trans->dev, enabled);
+}
#endif
-void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -517,21 +522,21 @@ void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb_any(skb);
}
-void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
}
-int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -631,11 +636,11 @@ int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
-int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
@@ -644,7 +649,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
sta->addr, tid);
- if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE))
+ if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE))
return -EACCES;
IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -662,7 +667,7 @@ int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
- if (!priv->trans->ops->tx_agg_setup)
+ if (!priv->trans->ops->txq_enable)
break;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
break;
@@ -757,11 +762,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
return ret;
}
-int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- enum ieee80211_sta_state old_state,
- enum ieee80211_sta_state new_state)
+static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -796,6 +801,18 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
switch (op) {
case ADD:
ret = iwlagn_mac_sta_add(hw, vif, sta);
+ if (ret)
+ break;
+ /*
+ * Clear the in-progress flag, the AP station entry was added
+ * but we'll initialize LQ only when we've associated (which
+ * would also clear the in-progress flag). This is necessary
+ * in case we never initialize LQ because association fails.
+ */
+ spin_lock_bh(&priv->sta_lock);
+ priv->stations[iwl_sta_id(sta)].used &=
+ ~IWL_STA_UCODE_INPROGRESS;
+ spin_unlock_bh(&priv->sta_lock);
break;
case REMOVE:
ret = iwlagn_mac_sta_remove(hw, vif, sta);
@@ -840,11 +857,10 @@ int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
return ret;
}
-void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- const struct iwl_channel_info *ch_info;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = ch_switch->channel;
struct iwl_ht_config *ht_conf = &priv->current_ht_config;
@@ -881,12 +897,6 @@ void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
if (le16_to_cpu(ctx->active.channel) == ch)
goto out;
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
priv->current_ht_config.smps = conf->smps_mode;
/* Configure HT40 channels */
@@ -935,10 +945,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
ieee80211_chswitch_done(ctx->vif, is_success);
}
-void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
__le32 filter_or = 0, filter_nand = 0;
@@ -985,7 +995,7 @@ void iwlagn_configure_filter(struct ieee80211_hw *hw,
FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
}
-void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1038,8 +1048,18 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- err = -EBUSY;
- goto out;
+ /* mac80211 should not scan while ROC or ROC while scanning */
+ if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ iwl_scan_cancel_timeout(priv, 100);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+ err = -EBUSY;
+ goto out;
+ }
}
priv->hw_roc_channel = channel;
@@ -1112,7 +1132,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
return err;
}
-int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1129,8 +1149,8 @@ int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
return 0;
}
-void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event)
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1154,8 +1174,8 @@ void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set)
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1164,9 +1184,9 @@ int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
return 0;
}
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1208,7 +1228,7 @@ int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
return 0;
}
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -1224,7 +1244,8 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
return iwlagn_commit_rxon(priv, ctx);
}
-int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
struct ieee80211_vif *vif = ctx->vif;
int err, ac;
@@ -1344,9 +1365,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
return err;
}
-void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
{
struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
@@ -1402,13 +1423,11 @@ static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
}
static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_context *tmp;
+ struct iwl_rxon_context *ctx, *tmp;
enum nl80211_iftype newviftype = newtype;
u32 interface_modes;
int err;
@@ -1419,6 +1438,18 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
+ ctx = iwl_rxon_ctx_from_vif(vif);
+
+ /*
+ * To simplify this code, only support changes on the
+ * BSS context. The PAN context is usually reassigned
+ * by creating/removing P2P interfaces anyway.
+ */
+ if (ctx->ctxid != IWL_RXON_CTX_BSS) {
+ err = -EBUSY;
+ goto out;
+ }
+
if (!ctx->vif || !iwl_is_ready_rf(priv)) {
/*
* Huh? But wait ... this can maybe happen when
@@ -1428,32 +1459,19 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
goto out;
}
+ /* Check if the switch is supported in the same context */
interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
if (!(interface_modes & BIT(newtype))) {
err = -EBUSY;
goto out;
}
- /*
- * Refuse a change that should be done by moving from the PAN
- * context to the BSS context instead, if the BSS context is
- * available and can support the new interface type.
- */
- if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
- (bss_ctx->interface_modes & BIT(newtype) ||
- bss_ctx->exclusive_interface_modes & BIT(newtype))) {
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- err = -EBUSY;
- goto out;
- }
-
if (ctx->exclusive_interface_modes & BIT(newtype)) {
for_each_context(priv, tmp) {
if (ctx == tmp)
continue;
- if (!tmp->vif)
+ if (!tmp->is_active)
continue;
/*
@@ -1487,9 +1505,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
return err;
}
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret;
@@ -1544,10 +1562,10 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
}
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -1584,6 +1602,7 @@ struct ieee80211_ops iwlagn_hw_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = iwlagn_mac_suspend,
.resume = iwlagn_mac_resume,
+ .set_wakeup = iwlagn_mac_set_wakeup,
#endif
.add_interface = iwlagn_mac_add_interface,
.remove_interface = iwlagn_mac_remove_interface,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index ec36e2b020b6..84d3db5aa506 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -44,15 +44,19 @@
#include <asm/div64.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-eeprom-parse.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
#include "iwl-modparams.h"
+#include "iwl-prph.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
/******************************************************************************
*
@@ -78,7 +82,8 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwlagn");
+
+static const struct iwl_op_mode_ops iwl_dvm_ops;
void iwl_update_chain_flags(struct iwl_priv *priv)
{
@@ -180,7 +185,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate = info->control.rates[0].idx;
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant);
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
@@ -403,7 +408,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
- iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
+ iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read));
capacity = read.capacity;
mode = read.mode;
num_wraps = read.wrap_counter;
@@ -513,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
* queue/FIFO/AC mapping definitions
*/
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
-#define IWL_TX_FIFO_BE_IPAN 4
-#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
-#define IWL_TX_FIFO_VO_IPAN 5
-/* re-uses the VO FIFO, uCode will properly flush/schedule */
-#define IWL_TX_FIFO_AUX 5
-#define IWL_TX_FIFO_UNUSED -1
-
-#define IWLAGN_CMD_FIFO_NUM 7
-
-/*
- * This queue number is required for proper operation
- * because the ucode will stop/start the scheduler as
- * required.
- */
-#define IWL_IPAN_MCAST_QUEUE 8
-
-static const u8 iwlagn_default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWLAGN_CMD_FIFO_NUM,
-};
-
-static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL_TX_FIFO_BK_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWL_TX_FIFO_VI_IPAN,
- IWL_TX_FIFO_VO_IPAN,
- IWL_TX_FIFO_BE_IPAN,
- IWLAGN_CMD_FIFO_NUM,
- IWL_TX_FIFO_AUX,
-};
-
static const u8 iwlagn_bss_ac_to_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
@@ -578,7 +540,7 @@ static const u8 iwlagn_pan_ac_to_queue[] = {
7, 6, 5, 4,
};
-void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -645,7 +607,7 @@ void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-void iwl_rf_kill_ct_config(struct iwl_priv *priv)
+static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
{
struct iwl_ct_kill_config cmd;
struct iwl_ct_kill_throttling_config adv_cmd;
@@ -726,7 +688,7 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
}
}
-void iwl_send_bt_config(struct iwl_priv *priv)
+static void iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
.lead_time = BT_LEAD_TIME_DEF,
@@ -814,7 +776,7 @@ int iwl_alive_start(struct iwl_priv *priv)
ieee80211_wake_queues(priv->hw);
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, priv->eeprom_data->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -932,11 +894,12 @@ void iwl_down(struct iwl_priv *priv)
priv->ucode_loaded = false;
iwl_trans_stop_device(priv->trans);
+ /* Set num_aux_in_flight must be done after the transport is stopped */
+ atomic_set(&priv->num_aux_in_flight, 0);
+
/* Clear out all status bits but a few that are stable across reset */
priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
test_bit(STATUS_FW_ERROR, &priv->status) <<
STATUS_FW_ERROR |
test_bit(STATUS_EXIT_PENDING, &priv->status) <<
@@ -1078,7 +1041,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
*
*****************************************************************************/
-void iwl_setup_deferred_work(struct iwl_priv *priv)
+static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
priv->workqueue = create_singlethread_workqueue(DRV_NAME);
@@ -1123,224 +1086,14 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
del_timer_sync(&priv->ucode_trace);
}
-static void iwl_init_hw_rates(struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->ht_greenfield_support)
- ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (iwlwifi_mod_params.amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-static int iwl_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kcalloc(priv->channel_count,
- sizeof(struct ieee80211_channel), GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwl_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
- iwl_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- /* FIXME: might be removed if scan is OK */
- if (!is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your %s to maintainer.\n",
- priv->trans->hw_id_str);
- priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
- }
-
- if (iwlwifi_mod_params.disable_5ghz)
- priv->bands[IEEE80211_BAND_5GHZ].n_channels = 0;
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-
-/*
- * iwl_free_geos - undo allocations in iwl_init_geos
- */
-static void iwl_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-
-int iwl_init_drv(struct iwl_priv *priv)
+static int iwl_init_drv(struct iwl_priv *priv)
{
- int ret;
-
spin_lock_init(&priv->sta_lock);
mutex_init(&priv->mutex);
INIT_LIST_HEAD(&priv->calib_results);
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
priv->plcp_delta_threshold =
@@ -1371,31 +1124,11 @@ int iwl_init_drv(struct iwl_priv *priv)
priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
}
- ret = iwl_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl_init_hw_rates(priv->ieee_rates);
-
return 0;
-
-err_free_channel_map:
- iwl_free_channel_map(priv);
-err:
- return ret;
}
-void iwl_uninit_drv(struct iwl_priv *priv)
+static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_free_geos(priv);
- iwl_free_channel_map(priv);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
kfree(rcu_dereference_raw(priv->noa_data));
@@ -1405,15 +1138,12 @@ void iwl_uninit_drv(struct iwl_priv *priv)
#endif
}
-void iwl_set_hw_params(struct iwl_priv *priv)
+static void iwl_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->ht_params)
priv->hw_params.use_rts_for_aggregation =
priv->cfg->ht_params->use_rts_for_aggregation;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
- priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
-
/* Device-specific setup */
priv->lib->set_hw_params(priv);
}
@@ -1421,7 +1151,7 @@ void iwl_set_hw_params(struct iwl_priv *priv)
/* show what optional capabilities we have */
-void iwl_option_config(struct iwl_priv *priv)
+static void iwl_option_config(struct iwl_priv *priv)
{
#ifdef CONFIG_IWLWIFI_DEBUG
IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
@@ -1454,6 +1184,42 @@ void iwl_option_config(struct iwl_priv *priv)
#endif
}
+static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+{
+ u16 radio_cfg;
+
+ priv->eeprom_data->sku = priv->eeprom_data->sku;
+
+ if (priv->eeprom_data->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !priv->cfg->ht_params) {
+ IWL_ERR(priv, "Invalid 11n configuration\n");
+ return -EINVAL;
+ }
+
+ if (!priv->eeprom_data->sku) {
+ IWL_ERR(priv, "Invalid device sku\n");
+ return -EINVAL;
+ }
+
+ IWL_INFO(priv, "Device SKU: 0x%X\n", priv->eeprom_data->sku);
+
+ radio_cfg = priv->eeprom_data->radio_cfg;
+
+ priv->hw_params.tx_chains_num =
+ num_of_ant(priv->eeprom_data->valid_tx_ant);
+ if (priv->cfg->rx_with_siso_diversity)
+ priv->hw_params.rx_chains_num = 1;
+ else
+ priv->hw_params.rx_chains_num =
+ num_of_ant(priv->eeprom_data->valid_rx_ant);
+
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ priv->eeprom_data->valid_tx_ant,
+ priv->eeprom_data->valid_rx_ant);
+
+ return 0;
+}
+
static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
const struct iwl_fw *fw)
@@ -1466,7 +1232,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
struct iwl_trans_config trans_cfg;
static const u8 no_reclaim_cmds[] = {
REPLY_RX_PHY_CMD,
- REPLY_RX,
REPLY_RX_MPDU_CMD,
REPLY_COMPRESSED_BA,
STATISTICS_NOTIFICATION,
@@ -1539,8 +1304,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
trans_cfg.queue_watchdog_timeout =
priv->cfg->base_params->wd_timeout;
else
- trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED;
+ trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
trans_cfg.command_names = iwl_dvm_cmd_strings;
+ trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+
+ WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
+ priv->cfg->base_params->num_of_queues);
ucode_flags = fw->ucode_capa.flags;
@@ -1551,15 +1320,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
} else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
}
/* Configure transport layer */
@@ -1599,25 +1362,33 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
goto out_free_hw;
/* Read the EEPROM */
- if (iwl_eeprom_init(priv, priv->trans->hw_rev)) {
+ if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
+ &priv->eeprom_blob_size)) {
IWL_ERR(priv, "Unable to init EEPROM\n");
goto out_free_hw;
}
+
/* Reset chip to save power until we load uCode during "up". */
iwl_trans_stop_hw(priv->trans, false);
- if (iwl_eeprom_check_version(priv))
+ priv->eeprom_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
+ priv->eeprom_blob,
+ priv->eeprom_blob_size);
+ if (!priv->eeprom_data)
+ goto out_free_eeprom_blob;
+
+ if (iwl_eeprom_check_version(priv->eeprom_data, priv->trans))
goto out_free_eeprom;
if (iwl_eeprom_init_hw_params(priv))
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ memcpy(priv->addresses[0].addr, priv->eeprom_data->hw_addr, ETH_ALEN);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = priv->eeprom_data->n_hw_addrs;
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -1630,7 +1401,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
************************/
iwl_set_hw_params(priv);
- if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
+ if (!(priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1640,9 +1411,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
- trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
- trans_cfg.n_queue_to_fifo =
- ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
/* Configure transport layer again*/
iwl_trans_configure(priv->trans, &trans_cfg);
@@ -1660,9 +1428,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
atomic_set(&priv->queue_stop_count[i], 0);
}
- WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
- IWLAGN_CMD_FIFO_NUM);
-
if (iwl_init_drv(priv))
goto out_free_eeprom;
@@ -1711,8 +1476,10 @@ out_destroy_workqueue:
destroy_workqueue(priv->workqueue);
priv->workqueue = NULL;
iwl_uninit_drv(priv);
+out_free_eeprom_blob:
+ kfree(priv->eeprom_blob);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_free_eeprom_data(priv->eeprom_data);
out_free_hw:
ieee80211_free_hw(priv->hw);
out:
@@ -1720,7 +1487,7 @@ out:
return op_mode;
}
-void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
+static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -1728,7 +1495,7 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
iwl_dbgfs_unregister(priv);
- iwl_testmode_cleanup(priv);
+ iwl_testmode_free(priv);
iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
@@ -1737,7 +1504,8 @@ void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
priv->ucode_loaded = false;
iwl_trans_stop_device(priv->trans);
- iwl_eeprom_free(priv);
+ kfree(priv->eeprom_blob);
+ iwl_free_eeprom_data(priv->eeprom_data);
/*netif_stop_queue(dev); */
flush_workqueue(priv->workqueue);
@@ -1850,7 +1618,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
}
/*TODO: Update dbgfs with ISR error stats obtained below */
- iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
+ iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n");
@@ -2185,7 +1953,7 @@ static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
}
}
-void iwl_nic_error(struct iwl_op_mode *op_mode)
+static void iwl_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2198,7 +1966,7 @@ void iwl_nic_error(struct iwl_op_mode *op_mode)
iwlagn_fw_error(priv, false);
}
-void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2208,11 +1976,60 @@ void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
}
}
-void iwl_nic_config(struct iwl_op_mode *op_mode)
+#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
+
+static void iwl_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ u16 radio_cfg = priv->eeprom_data->radio_cfg;
+
+ /* SKU Control */
+ iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+ (CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+ (CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+ CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
+ u32 reg_val =
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg) <<
+ CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ iwl_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH, reg_val);
+
+ IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg),
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+ } else {
+ WARN_ON(1);
+ }
- priv->lib->nic_config(priv);
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ /* W/A : NIC is stuck in a reset state after Early PCIe power off
+ * (PCIe power is lost before PERST# is asserted),
+ * causing ME FW to lose ownership and not being able to obtain it back.
+ */
+ iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+
+ if (priv->lib->nic_config)
+ priv->lib->nic_config(priv);
}
static void iwl_wimax_active(struct iwl_op_mode *op_mode)
@@ -2223,7 +2040,7 @@ static void iwl_wimax_active(struct iwl_op_mode *op_mode)
IWL_ERR(priv, "RF is used by WiMAX\n");
}
-void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
int mq = priv->queue_to_mac80211[queue];
@@ -2242,7 +2059,7 @@ void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
ieee80211_stop_queue(priv->hw, mq);
}
-void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
int mq = priv->queue_to_mac80211[queue];
@@ -2282,16 +2099,17 @@ void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
priv->passive_no_rx = false;
}
-void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
struct ieee80211_tx_info *info;
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
dev_kfree_skb_any(skb);
}
-void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
@@ -2303,7 +2121,7 @@ void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
}
-const struct iwl_op_mode_ops iwl_dvm_ops = {
+static const struct iwl_op_mode_ops iwl_dvm_ops = {
.start = iwl_op_mode_dvm_start,
.stop = iwl_op_mode_dvm_stop,
.rx = iwl_rx_dispatch,
@@ -2322,9 +2140,6 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
* driver and module entry point
*
*****************************************************************************/
-
-struct kmem_cache *iwl_tx_cmd_pool;
-
static int __init iwl_init(void)
{
@@ -2332,36 +2147,25 @@ static int __init iwl_init(void)
pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
pr_info(DRV_COPYRIGHT "\n");
- iwl_tx_cmd_pool = kmem_cache_create("iwl_dev_cmd",
- sizeof(struct iwl_device_cmd),
- sizeof(void *), 0, NULL);
- if (!iwl_tx_cmd_pool)
- return -ENOMEM;
-
ret = iwlagn_rate_control_register();
if (ret) {
pr_err("Unable to register rate control algorithm: %d\n", ret);
- goto error_rc_register;
+ return ret;
}
- ret = iwl_pci_register_driver();
- if (ret)
- goto error_pci_register;
- return ret;
+ ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
+ if (ret) {
+ pr_err("Unable to register op_mode: %d\n", ret);
+ iwlagn_rate_control_unregister();
+ }
-error_pci_register:
- iwlagn_rate_control_unregister();
-error_rc_register:
- kmem_cache_destroy(iwl_tx_cmd_pool);
return ret;
}
+module_init(iwl_init);
static void __exit iwl_exit(void)
{
- iwl_pci_unregister_driver();
+ iwl_opmode_deregister("iwldvm");
iwlagn_rate_control_unregister();
- kmem_cache_destroy(iwl_tx_cmd_pool);
}
-
module_exit(iwl_exit);
-module_init(iwl_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/dvm/power.c
index 544ddf17f5bd..518cf3715809 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/dvm/power.c
@@ -31,18 +31,15 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-io.h"
-#include "iwl-commands.h"
#include "iwl-debug.h"
-#include "iwl-power.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "commands.h"
+#include "power.h"
/*
* Setting power level allows the card to go to sleep when not busy.
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/dvm/power.h
index 21afc92efacb..a2cee7f04848 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/dvm/power.h
@@ -28,7 +28,7 @@
#ifndef __iwl_power_setting_h__
#define __iwl_power_setting_h__
-#include "iwl-commands.h"
+#include "commands.h"
struct iwl_power_mgr {
struct iwl_powertable_cmd sleep_cmd;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 8cebd7c363fc..6fddd2785e6e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -35,10 +35,8 @@
#include <linux/workqueue.h>
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-op-mode.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
#define RS_NAME "iwl-agn-rs"
@@ -819,7 +817,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (num_of_ant(tbl->ant_type) > 1)
tbl->ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
tbl->is_ht40 = 0;
tbl->is_SGI = 0;
@@ -1447,7 +1445,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret = 0;
u8 update_search_tbl_counter = 0;
@@ -1465,7 +1463,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
tbl->action != IWL_LEGACY_SWITCH_SISO)
tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1489,7 +1487,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_LEGACY_SWITCH_SISO;
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
start_action = tbl->action;
@@ -1623,7 +1621,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1641,7 +1639,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
/* avoid antenna B and MIMO */
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
break;
@@ -1659,7 +1657,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
/* configure as 1x1 if bt full concurrency */
if (priv->bt_full_concurrent) {
valid_tx_ant =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
}
@@ -1795,7 +1793,7 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
u8 update_search_tbl_counter = 0;
int ret;
@@ -1965,7 +1963,7 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
u32 sz = (sizeof(struct iwl_scale_tbl_info) -
(sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
+ u8 valid_tx_ant = priv->eeprom_data->valid_tx_ant;
u8 tx_chains_num = priv->hw_params.tx_chains_num;
int ret;
u8 update_search_tbl_counter = 0;
@@ -2699,7 +2697,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
i = lq_sta->last_txrate_idx;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
@@ -2893,15 +2891,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* These values will be overridden later */
lq_sta->lq.general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant &
+ ~first_antenna(priv->eeprom_data->valid_tx_ant);
if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ priv->eeprom_data->valid_tx_ant;
}
/* as default allow aggregation for all tids */
@@ -2947,7 +2945,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
/* How many times should we repeat the initial rate? */
@@ -2979,7 +2977,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv->bt_full_concurrent)
valid_tx_ant = ANT_A;
else
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
}
/* Fill rest of rate table */
@@ -3013,7 +3011,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
if (priv && priv->bt_full_concurrent) {
/* 1x1 only */
tbl_type.ant_type =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
}
/* Indicate to uCode which entries might be MIMO.
@@ -3100,7 +3098,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
u8 ant_sel_tx;
priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
+ valid_tx_ant = priv->eeprom_data->valid_tx_ant;
if (lq_sta->dbg_fixed_rate) {
ant_sel_tx =
((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3171,9 +3169,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += sprintf(buff+desc, "fixed rate 0x%X\n",
lq_sta->dbg_fixed_rate);
desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ (priv->eeprom_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (priv->eeprom_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (priv->eeprom_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
desc += sprintf(buff+desc, "lq type %s\n",
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index 82d02e1ae89f..ad3aea8f626a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -29,9 +29,10 @@
#include <net/mac80211.h>
-#include "iwl-commands.h"
#include "iwl-config.h"
+#include "commands.h"
+
struct iwl_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/dvm/rx.c
index 403de96f9747..fee5cffa1669 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rx.c
@@ -32,12 +32,10 @@
#include <linux/sched.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-calib.h"
-#include "iwl-agn.h"
-#include "iwl-modparams.h"
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
#define IWL_CMD_ENTRY(x) [x] = #x
@@ -90,7 +88,6 @@ const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
- IWL_CMD_ENTRY(REPLY_RX),
IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
@@ -897,8 +894,7 @@ static int iwlagn_calc_rssi(struct iwl_priv *priv,
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
+/* Called for REPLY_RX_MPDU_CMD */
static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd)
@@ -913,37 +909,17 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
u32 ampdu_status;
u32 rate_n_flags;
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->data;
- header = (struct ieee80211_hdr *)(pkt->data + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return 0;
- }
- phy_res = &priv->last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
- header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
- ampdu_status = iwlagn_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
+ if (!priv->last_phy_res_valid) {
+ IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+ return 0;
}
+ phy_res = &priv->last_phy_res;
+ amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
+ header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
+ ampdu_status = iwlagn_translate_rx_status(priv,
+ le32_to_cpu(rx_pkt_status));
if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
@@ -1012,6 +988,8 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
rx_status.flag |= RX_FLAG_40MHZ;
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status.flag |= RX_FLAG_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ rx_status.flag |= RX_FLAG_HT_GF;
iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
rxb, &rx_status);
@@ -1124,8 +1102,6 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
- void (*pre_rx_handler)(struct iwl_priv *,
- struct iwl_rx_cmd_buffer *);
int err = 0;
/*
@@ -1135,19 +1111,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
*/
iwl_notification_wait_notify(&priv->notif_wait, pkt);
- /* RX data may be forwarded to userspace (using pre_rx_handler) in one
- * of two cases: the first, that the user owns the uCode through
- * testmode - in such case the pre_rx_handler is set and no further
- * processing takes place. The other case is when the user want to
- * monitor the rx w/o affecting the regular flow - the pre_rx_handler
- * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ /*
+ * RX data may be forwarded to userspace in one
+ * of two cases: the user owns the fw through testmode or when
+ * the user requested to monitor the rx w/o affecting the regular flow.
+ * In these cases the iwl_test object will handle forwarding the rx
+ * data to user space.
+ * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow
* continues.
- * We need to use ACCESS_ONCE to prevent a case where the handler
- * changes between the check and the call.
*/
- pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler);
- if (pre_rx_handler)
- pre_rx_handler(priv, rxb);
+ iwl_test_rx(&priv->tst, rxb);
+#endif
+
if (priv->ucode_owner != IWL_OWNERSHIP_TM) {
/* Based on type of command response or notification,
* handle those that need handling via function in
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 0a3aa7c83003..10896393e5a0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -25,11 +25,11 @@
*****************************************************************************/
#include <linux/etherdevice.h>
-#include "iwl-dev.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
/*
* initialize rxon structure with default values from eeprom
@@ -37,8 +37,6 @@
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx)
{
- const struct iwl_channel_info *ch_info;
-
memset(&ctx->staging, 0, sizeof(ctx->staging));
if (!ctx->vif) {
@@ -80,14 +78,8 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv,
ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
#endif
- ch_info = iwl_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
+ ctx->staging.channel = cpu_to_le16(priv->hw->conf.channel->hw_value);
+ priv->band = priv->hw->conf.channel->band;
iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
@@ -175,7 +167,8 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
return ret;
}
-void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+static void iwlagn_update_qos(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
int ret;
@@ -202,8 +195,8 @@ void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
-int iwlagn_update_beacon(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
+static int iwlagn_update_beacon(struct iwl_priv *priv,
+ struct ieee80211_vif *vif)
{
lockdep_assert_held(&priv->mutex);
@@ -215,7 +208,7 @@ int iwlagn_update_beacon(struct iwl_priv *priv,
}
static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+ struct iwl_rxon_context *ctx)
{
int ret = 0;
struct iwl_rxon_assoc_cmd rxon_assoc;
@@ -427,10 +420,10 @@ static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
return -EINVAL;
}
- if (tx_power > priv->tx_power_device_lmt) {
+ if (tx_power > DIV_ROUND_UP(priv->eeprom_data->max_tx_pwr_half_dbm, 2)) {
IWL_WARN(priv,
"Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
+ tx_power, priv->eeprom_data->max_tx_pwr_half_dbm);
return -EINVAL;
}
@@ -863,8 +856,8 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
* or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
* a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
*/
-int iwl_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
+static int iwl_full_rxon_required(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
{
const struct iwl_rxon_cmd *staging = &ctx->staging;
const struct iwl_rxon_cmd *active = &ctx->active;
@@ -1189,7 +1182,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
struct iwl_rxon_context *ctx;
struct ieee80211_conf *conf = &hw->conf;
struct ieee80211_channel *channel = conf->channel;
- const struct iwl_channel_info *ch_info;
int ret = 0;
IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
@@ -1223,14 +1215,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ch_info = iwl_get_channel_info(priv, channel->band,
- channel->hw_value);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto out;
- }
-
for_each_context(priv, ctx) {
/* Configure HT40 channels */
if (ctx->ht.enabled != conf_is_ht(conf))
@@ -1294,9 +1278,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
return ret;
}
-void iwlagn_check_needed_chains(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_bss_conf *bss_conf)
+static void iwlagn_check_needed_chains(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx,
+ struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = ctx->vif;
struct iwl_rxon_context *tmp;
@@ -1388,7 +1372,7 @@ void iwlagn_check_needed_chains(struct iwl_priv *priv,
ht_conf->single_chain_sufficient = !need_multiple;
}
-void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
int ret;
@@ -1463,7 +1447,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (changes & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
- priv->timestamp = bss_conf->last_tsf;
+ priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 031d8e21f82f..e3467fa86899 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -30,11 +30,8 @@
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
* sending probe req. This should be set long enough to hear probe responses
@@ -54,6 +51,9 @@
#define IWL_CHANNEL_TUNE_TIME 5
#define MAX_SCAN_CHANNEL 50
+/* For reset radio, need minimal dwell time only */
+#define IWL_RADIO_RESET_DWELL_TIME 5
+
static int iwl_send_scan_abort(struct iwl_priv *priv)
{
int ret;
@@ -67,7 +67,6 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
* to receive scan abort command or it does not perform
* hardware scan currently */
if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
!test_bit(STATUS_SCAN_HW, &priv->status) ||
test_bit(STATUS_FW_ERROR, &priv->status))
return -EIO;
@@ -101,11 +100,8 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- if (priv->scan_type == IWL_SCAN_ROC) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
- }
+ if (priv->scan_type == IWL_SCAN_ROC)
+ iwl_scan_roc_expired(priv);
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
@@ -134,11 +130,8 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
goto out_settings;
}
- if (priv->scan_type == IWL_SCAN_ROC) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work, 10 * HZ);
- }
+ if (priv->scan_type == IWL_SCAN_ROC)
+ iwl_scan_roc_expired(priv);
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
@@ -403,15 +396,21 @@ static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
{
struct iwl_rxon_context *ctx;
+ int limits[NUM_IWL_RXON_CTX] = {};
+ int n_active = 0;
+ u16 limit;
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
/*
* If we're associated, we clamp the dwell time 98%
- * of the smallest beacon interval (minus 2 * channel
- * tune time)
+ * of the beacon interval (minus 2 * channel tune time)
+ * If both contexts are active, we have to restrict to
+ * 1/2 of the minimum of them, because they might be in
+ * lock-step with the time inbetween only half of what
+ * time we'd have in each of them.
*/
for_each_context(priv, ctx) {
- u16 value;
-
switch (ctx->staging.dev_type) {
case RXON_DEV_TYPE_P2P:
/* no timing constraints */
@@ -431,14 +430,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
break;
}
- value = ctx->beacon_int;
- if (!value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- dwell_time = min(value, dwell_time);
+ limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
}
- return dwell_time;
+ switch (n_active) {
+ case 0:
+ return dwell_time;
+ case 2:
+ limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ limit /= 2;
+ dwell_time = min(limit, dwell_time);
+ /* fall through to limit further */
+ case 1:
+ limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+ limit /= n_active;
+ return min(limit, dwell_time);
+ default:
+ WARN_ON_ONCE(1);
+ return dwell_time;
+ }
}
static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
@@ -453,27 +463,17 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* Return valid, unused, channel for a passive scan to reset the RF */
static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum ieee80211_band band)
{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
+ struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
struct iwl_rxon_context *ctx;
+ int i;
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
+ for (i = 0; i < sband->n_channels; i++) {
bool busy = false;
for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
+ busy = sband->channels[i].hw_value ==
le16_to_cpu(ctx->staging.channel);
if (busy)
break;
@@ -482,54 +482,46 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
if (busy)
continue;
- channel = priv->channel_info[i].channel;
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (is_channel_valid(ch_info))
- break;
+ if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
+ return sband->channels[i].hw_value;
}
- return channel;
+ return 0;
}
-static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- struct iwl_scan_channel *scan_ch)
+static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
{
const struct ieee80211_supported_band *sband;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added = 0;
- u16 channel = 0;
+ u16 channel;
sband = iwl_get_hw_mode(priv, band);
if (!sband) {
IWL_ERR(priv, "invalid band\n");
- return added;
+ return 0;
}
- active_dwell = iwl_get_active_dwell_time(priv, band, 0);
- passive_dwell = iwl_get_passive_dwell_time(priv, band);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
channel = iwl_get_single_channel_number(priv, band);
if (channel) {
scan_ch->channel = cpu_to_le16(channel);
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ scan_ch->active_dwell =
+ cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
+ scan_ch->passive_dwell =
+ cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
if (band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
- added++;
- } else
- IWL_ERR(priv, "no valid channel found\n");
- return added;
+ return 1;
+ }
+
+ IWL_ERR(priv, "no valid channel found\n");
+ return 0;
}
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
@@ -540,7 +532,6 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
{
struct ieee80211_channel *chan;
const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
u16 passive_dwell = 0;
u16 active_dwell = 0;
int added, i;
@@ -565,16 +556,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
channel = chan->hw_value;
scan_ch->channel = cpu_to_le16(channel);
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ if (!is_active || (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
else
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
@@ -678,12 +660,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
+ u8 rx_ant = priv->eeprom_data->valid_rx_ant;
u8 rate;
bool is_active = false;
int chan_mod;
u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
+ u8 scan_tx_antennas = priv->eeprom_data->valid_tx_ant;
int ret;
int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
@@ -755,6 +737,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
switch (priv->scan_type) {
case IWL_SCAN_RADIO_RESET:
IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ /*
+ * Override quiet time as firmware checks that active
+ * dwell is >= quiet; since we use passive scan it'll
+ * not actually be used.
+ */
+ scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
break;
case IWL_SCAN_NORMAL:
if (priv->scan_request->n_ssids) {
@@ -893,7 +881,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* MIMO is not used here, but value is required */
rx_chain |=
- priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ priv->eeprom_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -928,7 +916,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
switch (priv->scan_type) {
case IWL_SCAN_RADIO_RESET:
scan->channel_count =
- iwl_get_single_channel_for_scan(priv, vif, band,
+ iwl_get_channel_for_reset_scan(priv, vif, band,
(void *)&scan->data[cmd_len]);
break;
case IWL_SCAN_NORMAL:
@@ -994,8 +982,10 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
set_bit(STATUS_SCAN_HW, &priv->status);
ret = iwlagn_set_pan_params(priv);
- if (ret)
+ if (ret) {
+ clear_bit(STATUS_SCAN_HW, &priv->status);
return ret;
+ }
ret = iwl_dvm_send_cmd(priv, &cmd);
if (ret) {
@@ -1008,7 +998,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
void iwl_init_scan_params(struct iwl_priv *priv)
{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
+ u8 ant_idx = fls(priv->eeprom_data->valid_tx_ant) - 1;
if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
@@ -1158,3 +1148,40 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
+
+void iwl_scan_roc_expired(struct iwl_priv *priv)
+{
+ /*
+ * The status bit should be set here, to prevent a race
+ * where the atomic_read returns 1, but before the execution continues
+ * iwl_scan_offchannel_skb_status() checks if the status bit is set
+ */
+ set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
+
+ if (atomic_read(&priv->num_aux_in_flight) == 0) {
+ ieee80211_remain_on_channel_expired(priv->hw);
+ priv->hw_roc_channel = NULL;
+ schedule_delayed_work(&priv->hw_roc_disable_work,
+ 10 * HZ);
+
+ clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
+ } else {
+ IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
+ atomic_read(&priv->num_aux_in_flight));
+ }
+}
+
+void iwl_scan_offchannel_skb(struct iwl_priv *priv)
+{
+ WARN_ON(!priv->hw_roc_start_notified);
+ atomic_inc(&priv->num_aux_in_flight);
+}
+
+void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
+{
+ if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
+ test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
+ iwl_scan_roc_expired(priv);
+ }
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index eb6a8eaf42fc..b29b798f7550 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -28,10 +28,9 @@
*****************************************************************************/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
@@ -171,26 +170,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
return cmd.handler_status;
}
-static bool iwl_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_get_channel_info(priv, band, channel);
- if (!is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
struct ieee80211_sta_ht_cap *ht_cap)
@@ -198,21 +177,25 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
return false;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (priv->disable_ht40)
+ return false;
+#endif
+
/*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
+ * Remainder of this function checks ht_cap, but if it's
+ * NULL then we can do HT40 (special case for RXON)
*/
- if (ht_cap && !ht_cap->ht_supported)
+ if (!ht_cap)
+ return true;
+
+ if (!ht_cap->ht_supported)
return false;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (priv->disable_ht40)
+ if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40))
return false;
-#endif
- return iwl_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
+ return true;
}
static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
@@ -236,6 +219,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
+ sta->addr,
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
"static" :
(mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
@@ -649,23 +633,23 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
- rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
+ rate_flags |= first_antenna(priv->eeprom_data->valid_tx_ant) <<
RATE_MCS_ANT_POS;
rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
link_cmd->general_params.single_stream_ant_msk =
- first_antenna(priv->hw_params.valid_tx_ant);
+ first_antenna(priv->eeprom_data->valid_tx_ant);
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~first_antenna(priv->hw_params.valid_tx_ant);
+ priv->eeprom_data->valid_tx_ant &
+ ~first_antenna(priv->eeprom_data->valid_tx_ant);
if (!link_cmd->general_params.dual_stream_ant_msk) {
link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
+ } else if (num_of_ant(priv->eeprom_data->valid_tx_ant) == 2) {
link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
+ priv->eeprom_data->valid_tx_ant;
}
link_cmd->agg_params.agg_dis_start_th =
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
new file mode 100644
index 000000000000..57b918ce3b5f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -0,0 +1,471 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <net/net_namespace.h>
+#include <linux/netdevice.h>
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <net/netlink.h>
+
+#include "iwl-debug.h"
+#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
+#include "iwl-test.h"
+#include "iwl-testmode.h"
+
+static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return iwl_dvm_send_cmd(priv, cmd);
+}
+
+static bool iwl_testmode_valid_hw_addr(u32 addr)
+{
+ if (iwlagn_hw_valid_rtc_data_addr(addr))
+ return true;
+
+ if (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
+ addr < IWLAGN_RTC_INST_UPPER_BOUND)
+ return true;
+
+ return false;
+}
+
+static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return priv->fw->ucode_ver;
+}
+
+static struct sk_buff*
+iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len);
+}
+
+static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ return cfg80211_testmode_reply(skb);
+}
+
+static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode,
+ int len)
+{
+ struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+ return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len,
+ GFP_ATOMIC);
+}
+
+static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+ return cfg80211_testmode_event(skb, GFP_ATOMIC);
+}
+
+static struct iwl_test_ops tst_ops = {
+ .send_cmd = iwl_testmode_send_cmd,
+ .valid_hw_addr = iwl_testmode_valid_hw_addr,
+ .get_fw_ver = iwl_testmode_get_fw_ver,
+ .alloc_reply = iwl_testmode_alloc_reply,
+ .reply = iwl_testmode_reply,
+ .alloc_event = iwl_testmode_alloc_event,
+ .event = iwl_testmode_event,
+};
+
+void iwl_testmode_init(struct iwl_priv *priv)
+{
+ iwl_test_init(&priv->tst, priv->trans, &tst_ops);
+}
+
+void iwl_testmode_free(struct iwl_priv *priv)
+{
+ iwl_test_free(&priv->tst);
+}
+
+static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
+{
+ struct iwl_notification_wait calib_wait;
+ static const u8 calib_complete[] = {
+ CALIBRATION_COMPLETE_NOTIFICATION
+ };
+ int ret;
+
+ iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+ calib_complete, ARRAY_SIZE(calib_complete),
+ NULL, NULL);
+ ret = iwl_init_alive_start(priv);
+ if (ret) {
+ IWL_ERR(priv, "Fail init calibration: %d\n", ret);
+ goto cfg_init_calib_error;
+ }
+
+ ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
+ if (ret)
+ IWL_ERR(priv, "Error detecting"
+ " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
+ return ret;
+
+cfg_init_calib_error:
+ iwl_remove_notification(&priv->notif_wait, &calib_wait);
+ return ret;
+}
+
+/*
+ * This function handles the user application commands for driver.
+ *
+ * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
+ * handlers respectively.
+ *
+ * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
+ * value of the actual command execution is replied to the user application.
+ *
+ * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
+ * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
+ * IWL_TM_CMD_DEV2APP_SYNC_RSP.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ struct iwl_trans *trans = priv->trans;
+ struct sk_buff *skb;
+ unsigned char *rsp_data_ptr = NULL;
+ int status = 0, rsp_data_len = 0;
+ u32 inst_size = 0, data_size = 0;
+ const struct fw_img *img;
+
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+ rsp_data_ptr = (unsigned char *)priv->cfg->name;
+ rsp_data_len = strlen(priv->cfg->name);
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+ rsp_data_len + 20);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
+ nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
+ rsp_data_len, rsp_data_ptr))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
+ if (status)
+ IWL_ERR(priv, "Error loading init ucode: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+ iwl_testmode_cfg_init_calib(priv);
+ priv->ucode_loaded = false;
+ iwl_trans_stop_device(trans);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
+ if (status) {
+ IWL_ERR(priv,
+ "Error loading runtime ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_ERR(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ priv->ucode_loaded = false;
+ iwl_trans_stop_device(trans);
+ status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_ERR(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_ERR(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+ if (priv->eeprom_blob) {
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
+ priv->eeprom_blob_size + 20);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
+ nla_put(skb, IWL_TM_ATTR_EEPROM,
+ priv->eeprom_blob_size,
+ priv->eeprom_blob))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n",
+ status);
+ } else
+ return -ENODATA;
+ break;
+
+ case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ if (!tb[IWL_TM_ATTR_FIXRATE]) {
+ IWL_ERR(priv, "Missing fixrate setting\n");
+ return -ENOMSG;
+ }
+ priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
+ if (!skb) {
+ IWL_ERR(priv, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (!priv->ucode_loaded) {
+ IWL_ERR(priv, "No uCode has not been loaded\n");
+ return -EINVAL;
+ } else {
+ img = &priv->fw->img[priv->cur_ucode];
+ inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
+ data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
+ nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
+ nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
+ goto nla_put_failure;
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_ERR(priv, "Error sending msg : %d\n", status);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown testmode driver command ID\n");
+ return -ENOSYS;
+ }
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * This function handles the user application switch ucode ownership.
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
+ * decide who the current owner of the uCode
+ *
+ * If the current owner is OWNERSHIP_TM, then the only host command
+ * can deliver to uCode is from testmode, all the other host commands
+ * will dropped.
+ *
+ * default driver is the owner of uCode in normal operational mode
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ u8 owner;
+
+ if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
+ IWL_ERR(priv, "Missing ucode owner\n");
+ return -ENOMSG;
+ }
+
+ owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
+ if (owner == IWL_OWNERSHIP_DRIVER) {
+ priv->ucode_owner = owner;
+ iwl_test_enable_notifications(&priv->tst, false);
+ } else if (owner == IWL_OWNERSHIP_TM) {
+ priv->ucode_owner = owner;
+ iwl_test_enable_notifications(&priv->tst, true);
+ } else {
+ IWL_ERR(priv, "Invalid owner\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* The testmode gnl message handler that takes the gnl message from the
+ * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
+ * invoke the corresponding handlers.
+ *
+ * This function is invoked when there is user space application sending
+ * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
+ * by nl80211.
+ *
+ * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
+ * dispatching it to the corresponding handler.
+ *
+ * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
+ * -ENOSYS is replied to the user application if the command is unknown;
+ * Otherwise, the command is dispatched to the respective handler.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @data: pointer to user space message
+ * @len: length in byte of @data
+ */
+int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
+{
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ int result;
+
+ result = iwl_test_parse(&priv->tst, tb, data, len);
+ if (result)
+ return result;
+
+ /* in case multiple accesses to the device happens */
+ mutex_lock(&priv->mutex);
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_UCODE:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+ case IWL_TM_CMD_APP2DEV_END_TRACE:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ result = iwl_test_handle_cmd(&priv->tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
+ case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
+ case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
+ case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
+ case IWL_TM_CMD_APP2DEV_GET_EEPROM:
+ case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
+ IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
+ result = iwl_testmode_driver(hw, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_OWNERSHIP:
+ IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
+ result = iwl_testmode_ownership(hw, tb);
+ break;
+
+ default:
+ IWL_ERR(priv, "Unknown testmode command\n");
+ result = -ENOSYS;
+ break;
+ }
+ mutex_unlock(&priv->mutex);
+
+ if (result)
+ IWL_ERR(priv, "Test cmd failed result=%d\n", result);
+ return result;
+}
+
+int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ void *data, int len)
+{
+ struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+ int result;
+ u32 cmd;
+
+ if (cb->args[3]) {
+ /* offset by 1 since commands start at 0 */
+ cmd = cb->args[3] - 1;
+ } else {
+ struct nlattr *tb[IWL_TM_ATTR_MAX];
+
+ result = iwl_test_parse(&priv->tst, tb, data, len);
+ if (result)
+ return result;
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ cb->args[3] = cmd + 1;
+ }
+
+ /* in case multiple accesses to the device happens */
+ mutex_lock(&priv->mutex);
+ result = iwl_test_dump(&priv->tst, cmd, skb, cb);
+ mutex_unlock(&priv->mutex);
+ return result;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/dvm/tt.c
index a5cfe0aceedb..eb864433e59d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.c
@@ -31,17 +31,14 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
-
#include <net/mac80211.h>
-
-#include "iwl-agn.h"
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-agn-tt.h"
#include "iwl-modparams.h"
+#include "iwl-debug.h"
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+#include "tt.h"
/* default Thermal Throttling transaction table
* Current state | Throttling Down | Throttling Up
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h b/drivers/net/wireless/iwlwifi/dvm/tt.h
index 86bbf47501c1..44c7c8f30a2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.h
+++ b/drivers/net/wireless/iwlwifi/dvm/tt.h
@@ -28,7 +28,7 @@
#ifndef __iwl_tt_setting_h__
#define __iwl_tt_setting_h__
-#include "iwl-commands.h"
+#include "commands.h"
#define IWL_ABSOLUTE_ZERO 0
#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 3366e2e2f00f..5971a23aa47d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -32,12 +32,11 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/ieee80211.h>
-
-#include "iwl-dev.h"
#include "iwl-io.h"
-#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
#include "iwl-trans.h"
+#include "iwl-agn-hw.h"
+#include "dev.h"
+#include "agn.h"
static const u8 tid_to_ac[] = {
IEEE80211_AC_BE,
@@ -187,7 +186,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_idx = info->control.rates[0].idx;
if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
+ rate_idx = rate_lowest_index(
+ &priv->eeprom_data->bands[info->band],
info->control.sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
if (info->band == IEEE80211_BAND_5GHZ)
@@ -207,10 +207,11 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- first_antenna(priv->hw_params.valid_tx_ant));
+ first_antenna(priv->eeprom_data->valid_tx_ant));
} else
- priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
+ priv->mgmt_tx_ant = iwl_toggle_tx_ant(
+ priv, priv->mgmt_tx_ant,
+ priv->eeprom_data->valid_tx_ant);
rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* Set the rate in the TX cmd */
@@ -296,7 +297,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct iwl_station_priv *sta_priv = NULL;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_device_cmd *dev_cmd = NULL;
+ struct iwl_device_cmd *dev_cmd;
struct iwl_tx_cmd *tx_cmd;
__le16 fc;
u8 hdr_len;
@@ -378,7 +379,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_CTL_AMPDU)
is_agg = true;
- dev_cmd = kmem_cache_alloc(iwl_tx_cmd_pool, GFP_ATOMIC);
+ dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
if (unlikely(!dev_cmd))
goto drop_unlock_priv;
@@ -402,6 +403,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
+ /* From now on, we cannot access info->control */
spin_lock(&priv->sta_lock);
@@ -486,11 +488,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
+ iwl_scan_offchannel_skb(priv);
+
return 0;
drop_unlock_sta:
if (dev_cmd)
- kmem_cache_free(iwl_tx_cmd_pool, dev_cmd);
+ iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
spin_unlock(&priv->sta_lock);
drop_unlock_priv:
return -1;
@@ -597,7 +602,7 @@ turn_off:
* time, or we hadn't time to drain the AC queues.
*/
if (agg_state == IWL_AGG_ON)
- iwl_trans_tx_agg_disable(priv->trans, txq_id);
+ iwl_trans_txq_disable(priv->trans, txq_id);
else
IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
agg_state);
@@ -686,9 +691,8 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
- iwl_trans_tx_agg_setup(priv->trans, q, fifo,
- sta_priv->sta_id, tid,
- buf_size, ssn);
+ iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
+ buf_size, ssn);
/*
* If the limit is 0, then it wasn't initialised yet,
@@ -753,8 +757,8 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
IWL_DEBUG_TX_QUEUES(priv,
"Can continue DELBA flow ssn = next_recl ="
" %d", tid_data->next_reclaimed);
- iwl_trans_tx_agg_disable(priv->trans,
- tid_data->agg.txq_id);
+ iwl_trans_txq_disable(priv->trans,
+ tid_data->agg.txq_id);
iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
tid_data->agg.state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
@@ -1136,6 +1140,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+ bool is_offchannel_skb;
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
IWLAGN_TX_RES_TID_POS;
@@ -1149,6 +1154,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
__skb_queue_head_init(&skbs);
+ is_offchannel_skb = false;
+
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1176,7 +1183,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
}
/*we can free until ssn % q.n_bd not inclusive */
- WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
+ WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid,
+ txq_id, ssn, &skbs));
iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
@@ -1189,8 +1197,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
info = IEEE80211_SKB_CB(skb);
ctx = info->driver_data[0];
- kmem_cache_free(iwl_tx_cmd_pool,
- (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans,
+ info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
@@ -1225,10 +1233,19 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+ is_offchannel_skb =
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
WARN_ON(!is_agg && freed != 1);
+
+ /*
+ * An offchannel frame can be send only on the AUX queue, where
+ * there is no aggregation (and reordering) so it only is single
+ * skb is expected to be processed.
+ */
+ WARN_ON(is_offchannel_skb && freed != 1);
}
iwl_check_abort_status(priv, tx_resp->frame_count, status);
@@ -1239,6 +1256,9 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ieee80211_tx_status(priv->hw, skb);
}
+ if (is_offchannel_skb)
+ iwl_scan_offchannel_skb_status(priv);
+
return 0;
}
@@ -1341,7 +1361,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
WARN_ON_ONCE(1);
info = IEEE80211_SKB_CB(skb);
- kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
+ iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
if (freed == 1) {
/* this is the first skb we deliver in this batch */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index bc40dc68b0f4..6d8d6dd7943f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -30,15 +30,16 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include "iwl-dev.h"
#include "iwl-io.h"
#include "iwl-agn-hw.h"
-#include "iwl-agn.h"
-#include "iwl-agn-calib.h"
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-op-mode.h"
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
+
/******************************************************************************
*
* uCode download functions
@@ -60,8 +61,7 @@ iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
static int iwl_set_Xtal_calib(struct iwl_priv *priv)
{
struct iwl_calib_xtal_freq_cmd cmd;
- __le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ __le16 *xtal_calib = priv->eeprom_data->xtal_calib;
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -72,12 +72,10 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_cmd cmd;
- __le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- memcpy(&cmd.radio_sensor_offset, offset_calib, sizeof(*offset_calib));
+ cmd.radio_sensor_offset = priv->eeprom_data->raw_temperature;
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
@@ -89,27 +87,17 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
- EEPROM_KELVIN_TEMPERATURE);
- __le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
- struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_CALIB_ALL);
- memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
- sizeof(*offset_calib_high));
- memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
- sizeof(*offset_calib_low));
- if (!(cmd.radio_sensor_offset_low)) {
+ cmd.radio_sensor_offset_high = priv->eeprom_data->kelvin_temperature;
+ cmd.radio_sensor_offset_low = priv->eeprom_data->raw_temperature;
+ if (!cmd.radio_sensor_offset_low) {
IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
- memcpy(&cmd.burntVoltageRef, &hdr->voltage,
- sizeof(hdr->voltage));
+ cmd.burntVoltageRef = priv->eeprom_data->calib_voltage;
IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
@@ -177,7 +165,7 @@ int iwl_init_alive_start(struct iwl_priv *priv)
return 0;
}
-int iwl_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
{
struct iwl_wimax_coex_cmd coex_cmd;
@@ -238,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
return ret;
}
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+ IWL_TX_FIFO_VO,
+ IWL_TX_FIFO_VI,
+ IWL_TX_FIFO_BE,
+ IWL_TX_FIFO_BK,
+ IWL_TX_FIFO_BK_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_VI_IPAN,
+ IWL_TX_FIFO_VO_IPAN,
+ IWL_TX_FIFO_BE_IPAN,
+ IWL_TX_FIFO_UNUSED,
+ IWL_TX_FIFO_AUX,
+};
static int iwl_alive_notify(struct iwl_priv *priv)
{
+ const u8 *queue_to_txf;
+ u8 n_queues;
int ret;
+ int i;
iwl_trans_fw_alive(priv->trans);
+ if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
+ priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
+ n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
+ queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
+ } else {
+ n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
+ queue_to_txf = iwlagn_default_queue_to_tx_fifo;
+ }
+
+ for (i = 0; i < n_queues; i++)
+ if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
+ iwl_trans_ac_txq_enable(priv->trans, i,
+ queue_to_txf[i]);
+
priv->passive_no_rx = false;
priv->transport_queue_stop = 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 67b28aa7f9be..87f465a49df1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -113,7 +113,7 @@ enum iwl_led_mode {
#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
/* TX queue watchdog timeouts in mSecs */
-#define IWL_WATCHHDOG_DISABLED 0
+#define IWL_WATCHDOG_DISABLED 0
#define IWL_DEF_WD_TIMEOUT 2000
#define IWL_LONG_WD_TIMEOUT 10000
#define IWL_MAX_WD_TIMEOUT 120000
@@ -143,7 +143,7 @@ enum iwl_led_mode {
* @chain_noise_scale: default chain noise scale used for gain computation
* @wd_timeout: TX queues watchdog timeout
* @max_event_log_size: size of event log buffer size for ucode event logging
- * @shadow_reg_enable: HW shadhow register bit
+ * @shadow_reg_enable: HW shadow register support
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
* @no_idle_support: do not support idle mode
*/
@@ -177,18 +177,39 @@ struct iwl_base_params {
struct iwl_bt_params {
bool advanced_bt_coexist;
u8 bt_init_traffic_load;
- u8 bt_prio_boost;
+ u32 bt_prio_boost;
u16 agg_time_limit;
bool bt_sco_disable;
bool bt_session_2;
};
+
/*
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
+ enum ieee80211_smps_mode smps_mode;
const bool ht_greenfield_support; /* if used set to true */
bool use_rts_for_aggregation;
- enum ieee80211_smps_mode smps_mode;
+ u8 ht40_bands;
+};
+
+/*
+ * information on how to parse the EEPROM
+ */
+#define EEPROM_REG_BAND_1_CHANNELS 0x08
+#define EEPROM_REG_BAND_2_CHANNELS 0x26
+#define EEPROM_REG_BAND_3_CHANNELS 0x42
+#define EEPROM_REG_BAND_4_CHANNELS 0x5C
+#define EEPROM_REG_BAND_5_CHANNELS 0x74
+#define EEPROM_REG_BAND_24_HT40_CHANNELS 0x82
+#define EEPROM_REG_BAND_52_HT40_CHANNELS 0x92
+#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS 0x80
+#define EEPROM_REGULATORY_BAND_NO_HT40 0
+
+struct iwl_eeprom_params {
+ const u8 regulatory_bands[7];
+ bool enhanced_txpower;
};
/**
@@ -243,6 +264,7 @@ struct iwl_cfg {
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_bt_params *bt_params;
+ const struct iwl_eeprom_params *eeprom_params;
const bool need_temp_offset_calib; /* if used set to true */
const bool no_xtal_calib;
enum iwl_led_mode led_mode;
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 59750543fce7..34a5287dfc2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -97,13 +97,10 @@
/*
* Hardware revision info
* Bit fields:
- * 31-8: Reserved
- * 7-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
+ * 31-16: Reserved
+ * 15-4: Type of device: see CSR_HW_REV_TYPE_xxx definitions
* 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
* 1-0: "Dash" (-) value, as in A-1, etc.
- *
- * NOTE: Revision step affects calculation of CCK txpower for 4965.
- * NOTE: See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
*/
#define CSR_HW_REV (CSR_BASE+0x028)
@@ -155,9 +152,21 @@
#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
/* Bits for CSR_HW_IF_CONFIG_REG */
-#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
-#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
@@ -270,7 +279,10 @@
/* HW REV */
-#define CSR_HW_REV_TYPE_MSK (0x00001F0)
+#define CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+
+#define CSR_HW_REV_TYPE_MSK (0x000FFF0)
#define CSR_HW_REV_TYPE_5300 (0x0000020)
#define CSR_HW_REV_TYPE_5350 (0x0000030)
#define CSR_HW_REV_TYPE_5100 (0x0000050)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 2d1b42847b9b..87535a67de76 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -61,7 +61,11 @@
*
*****************************************************************************/
+#define DEBUG
+
+#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include "iwl-debug.h"
#include "iwl-devtrace.h"
@@ -81,8 +85,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
}
__iwl_fn(warn)
+EXPORT_SYMBOL_GPL(__iwl_warn);
__iwl_fn(info)
+EXPORT_SYMBOL_GPL(__iwl_info);
__iwl_fn(crit)
+EXPORT_SYMBOL_GPL(__iwl_crit);
void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
const char *fmt, ...)
@@ -103,6 +110,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
trace_iwlwifi_err(&vaf);
va_end(args);
}
+EXPORT_SYMBOL_GPL(__iwl_err);
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
@@ -119,10 +127,11 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
+ dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
function, &vaf);
#endif
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
va_end(args);
}
+EXPORT_SYMBOL_GPL(__iwl_dbg);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 8376b842bdba..42b20b0e83bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -38,13 +38,14 @@ static inline bool iwl_have_debug_level(u32 level)
}
void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
- const char *fmt, ...);
-void __iwl_warn(struct device *dev, const char *fmt, ...);
-void __iwl_info(struct device *dev, const char *fmt, ...);
-void __iwl_crit(struct device *dev, const char *fmt, ...);
+ const char *fmt, ...) __printf(4, 5);
+void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
/* No matter what is m (priv, bus, trans), this will work */
#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
+#define IWL_ERR_DEV(d, f, a...) __iwl_err((d), false, false, f, ## a)
#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
@@ -52,9 +53,9 @@ void __iwl_crit(struct device *dev, const char *fmt, ...);
#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
void __iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
- const char *fmt, ...);
+ const char *fmt, ...) __printf(5, 6);
#else
-static inline void
+__printf(5, 6) static inline void
__iwl_dbg(struct device *dev,
u32 level, bool limit, const char *function,
const char *fmt, ...)
@@ -69,6 +70,8 @@ do { \
#define IWL_DEBUG(m, level, fmt, args...) \
__iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
+#define IWL_DEBUG_DEV(dev, level, fmt, args...) \
+ __iwl_dbg((dev), level, false, __func__, fmt, ##args)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
__iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
@@ -153,7 +156,7 @@ do { \
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_EEPROM(d, f, a...) IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 91f45e71e0a2..70191ddbd8f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -42,4 +42,9 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_info);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_warn);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_crit);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_err);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dbg);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 06203d6a1d86..06ca505bb2cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -28,6 +28,7 @@
#define __IWLWIFI_DEVICE_TRACE
#include <linux/tracepoint.h>
+#include <linux/device.h>
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
@@ -175,7 +176,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_msg
-#define MAX_MSG_LEN 100
+#define MAX_MSG_LEN 110
DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
@@ -188,7 +189,7 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >= MAX_MSG_LEN);
),
- TP_printk("%s", (char *)__get_dynamic_array(msg))
+ TP_printk("%s", __get_str(msg))
);
DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fac67a526a30..cc41cfaedfbd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -77,8 +77,33 @@
/* private includes */
#include "iwl-fw-file.h"
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
/**
* struct iwl_drv - drv common data
+ * @list: list of drv structures using this opmode
* @fw: the iwl_fw structure
* @op_mode: the running op_mode
* @trans: transport layer
@@ -89,6 +114,7 @@
* @request_firmware_complete: the firmware has been obtained from user space
*/
struct iwl_drv {
+ struct list_head list;
struct iwl_fw fw;
struct iwl_op_mode *op_mode;
@@ -102,7 +128,19 @@ struct iwl_drv {
struct completion request_firmware_complete;
};
-
+#define DVM_OP_MODE 0
+#define MVM_OP_MODE 1
+
+/* Protects the table contents, i.e. the ops pointer & drv list */
+static struct mutex iwlwifi_opmode_table_mtx;
+static struct iwlwifi_opmode_table {
+ const char *name; /* name: iwldvm, iwlmvm, etc */
+ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */
+ struct list_head drv; /* list of devices using this op_mode */
+} iwlwifi_opmode_table[] = { /* ops set when driver is initialized */
+ { .name = "iwldvm", .ops = NULL },
+ { .name = "iwlmvm", .ops = NULL },
+};
/*
* struct fw_sec: Just for the image parsing proccess.
@@ -721,7 +759,6 @@ static int validate_sec_sizes(struct iwl_drv *drv,
return 0;
}
-
/**
* iwl_ucode_callback - callback when firmware was loaded
*
@@ -733,6 +770,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
struct iwl_drv *drv = context;
struct iwl_fw *fw = &drv->fw;
struct iwl_ucode_header *ucode;
+ struct iwlwifi_opmode_table *op;
int err;
struct iwl_firmware_pieces pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
@@ -740,6 +778,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
const unsigned int api_min = drv->cfg->ucode_api_min;
u32 api_ver;
int i;
+ bool load_module = false;
fw->ucode_capa.max_probe_length = 200;
fw->ucode_capa.standard_phy_calibration_size =
@@ -862,10 +901,24 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* We have our copies now, allow OS release its copies */
release_firmware(ucode_raw);
- drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ op = &iwlwifi_opmode_table[DVM_OP_MODE];
- if (!drv->op_mode)
- goto out_unbind;
+ /* add this device to the list of devices using this op_mode */
+ list_add_tail(&drv->list, &op->drv);
+
+ if (op->ops) {
+ const struct iwl_op_mode_ops *ops = op->ops;
+ drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw);
+
+ if (!drv->op_mode) {
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ goto out_unbind;
+ }
+ } else {
+ load_module = true;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
/*
* Complete the firmware request last so that
@@ -873,6 +926,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
* are doing the start() above.
*/
complete(&drv->request_firmware_complete);
+
+ /*
+ * Load the module last so we don't block anything
+ * else from proceeding if the module fails to load
+ * or hangs loading.
+ */
+ if (load_module)
+ request_module("%s", op->name);
return;
try_again:
@@ -906,6 +967,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
drv->cfg = cfg;
init_completion(&drv->request_firmware_complete);
+ INIT_LIST_HEAD(&drv->list);
ret = iwl_request_firmware(drv, true);
@@ -928,6 +990,16 @@ void iwl_drv_stop(struct iwl_drv *drv)
iwl_dealloc_ucode(drv);
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ /*
+ * List is empty (this item wasn't added)
+ * when firmware loading failed -- in that
+ * case we can't remove it from any list.
+ */
+ if (!list_empty(&drv->list))
+ list_del(&drv->list);
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+
kfree(drv);
}
@@ -941,8 +1013,78 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1,
.bt_ch_announce = true,
.auto_agg = true,
+ .wd_disable = true,
/* the rest are 0 by default */
};
+EXPORT_SYMBOL_GPL(iwlwifi_mod_params);
+
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
+{
+ int i;
+ struct iwl_drv *drv;
+
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+ if (strcmp(iwlwifi_opmode_table[i].name, name))
+ continue;
+ iwlwifi_opmode_table[i].ops = ops;
+ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+ drv->op_mode = ops->start(drv->trans, drv->cfg,
+ &drv->fw);
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return 0;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return -EIO;
+}
+EXPORT_SYMBOL_GPL(iwl_opmode_register);
+
+void iwl_opmode_deregister(const char *name)
+{
+ int i;
+ struct iwl_drv *drv;
+
+ mutex_lock(&iwlwifi_opmode_table_mtx);
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+ if (strcmp(iwlwifi_opmode_table[i].name, name))
+ continue;
+ iwlwifi_opmode_table[i].ops = NULL;
+
+ /* call the stop routine for all devices */
+ list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) {
+ if (drv->op_mode) {
+ iwl_op_mode_stop(drv->op_mode);
+ drv->op_mode = NULL;
+ }
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+ return;
+ }
+ mutex_unlock(&iwlwifi_opmode_table_mtx);
+}
+EXPORT_SYMBOL_GPL(iwl_opmode_deregister);
+
+static int __init iwl_drv_init(void)
+{
+ int i;
+
+ mutex_init(&iwlwifi_opmode_table_mtx);
+
+ for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
+ INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
+
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ return iwl_pci_register_driver();
+}
+module_init(iwl_drv_init);
+
+static void __exit iwl_drv_exit(void)
+{
+ iwl_pci_unregister_driver();
+}
+module_exit(iwl_drv_exit);
#ifdef CONFIG_IWLWIFI_DEBUG
module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 000000000000..f10170fe8799
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,903 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-modparams.h"
+#include "iwl-eeprom-parse.h"
+
+/* EEPROM offset definitions */
+
+/* indirect access definitions */
+#define ADDRESS_MSK 0x0000FFFF
+#define INDIRECT_TYPE_MSK 0x000F0000
+#define INDIRECT_HOST 0x00010000
+#define INDIRECT_GENERAL 0x00020000
+#define INDIRECT_REGULATORY 0x00030000
+#define INDIRECT_CALIBRATION 0x00040000
+#define INDIRECT_PROCESS_ADJST 0x00050000
+#define INDIRECT_OTHERS 0x00060000
+#define INDIRECT_TXP_LIMIT 0x00070000
+#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
+#define INDIRECT_ADDRESS 0x00100000
+
+/* corresponding link offsets in EEPROM */
+#define EEPROM_LINK_HOST (2*0x64)
+#define EEPROM_LINK_GENERAL (2*0x65)
+#define EEPROM_LINK_REGULATORY (2*0x66)
+#define EEPROM_LINK_CALIBRATION (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
+#define EEPROM_LINK_OTHERS (2*0x69)
+#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
+#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* calibration */
+struct iwl_eeprom_calib_hdr {
+ u8 version;
+ u8 pa_type;
+ __le16 voltage;
+} __packed;
+
+#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
+
+/* temperature */
+#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
+
+/*
+ * EEPROM bands
+ * These are the channel numbers from each band in the order
+ * that they are stored in the EEPROM band information. Note
+ * that EEPROM bands aren't the same as mac80211 bands, and
+ * there are even special "ht40 bands" in the EEPROM.
+ */
+static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+#define IWL_NUM_CHANNELS (ARRAY_SIZE(iwl_eeprom_band_1) + \
+ ARRAY_SIZE(iwl_eeprom_band_2) + \
+ ARRAY_SIZE(iwl_eeprom_band_3) + \
+ ARRAY_SIZE(iwl_eeprom_band_4) + \
+ ARRAY_SIZE(iwl_eeprom_band_5))
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+ { .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+ { .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+ { .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+ { .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+ { .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+ { .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+ { .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+ { .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+ { .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+ { .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS 0
+#define N_RATES_24 ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS 4
+#define N_RATES_52 (N_RATES_24 - RATES_52_OFFS)
+
+/* EEPROM reading functions */
+
+static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
+{
+ if (WARN_ON(offset + sizeof(u16) > eeprom_size))
+ return 0;
+ return le16_to_cpup((__le16 *)(eeprom + offset));
+}
+
+static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
+ u32 address)
+{
+ u16 offset = 0;
+
+ if ((address & INDIRECT_ADDRESS) == 0)
+ return address;
+
+ switch (address & INDIRECT_TYPE_MSK) {
+ case INDIRECT_HOST:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_HOST);
+ break;
+ case INDIRECT_GENERAL:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_GENERAL);
+ break;
+ case INDIRECT_REGULATORY:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_REGULATORY);
+ break;
+ case INDIRECT_TXP_LIMIT:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_TXP_LIMIT);
+ break;
+ case INDIRECT_TXP_LIMIT_SIZE:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_TXP_LIMIT_SIZE);
+ break;
+ case INDIRECT_CALIBRATION:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_CALIBRATION);
+ break;
+ case INDIRECT_PROCESS_ADJST:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_PROCESS_ADJST);
+ break;
+ case INDIRECT_OTHERS:
+ offset = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_LINK_OTHERS);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* translate the offset from words to byte */
+ return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
+ u32 offset)
+{
+ u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
+
+ if (WARN_ON(address >= eeprom_size))
+ return NULL;
+
+ return &eeprom[address];
+}
+
+static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
+ struct iwl_eeprom_data *data)
+{
+ struct iwl_eeprom_calib_hdr *hdr;
+
+ hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_CALIB_ALL);
+ if (!hdr)
+ return -ENODATA;
+ data->calib_version = hdr->version;
+ data->calib_voltage = hdr->voltage;
+
+ return 0;
+}
+
+/**
+ * enum iwl_eeprom_channel_flags - channel flags in EEPROM
+ * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
+ * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
+ * @EEPROM_CHANNEL_RADAR: radar detection required
+ * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
+ */
+enum iwl_eeprom_channel_flags {
+ EEPROM_CHANNEL_VALID = BIT(0),
+ EEPROM_CHANNEL_IBSS = BIT(1),
+ EEPROM_CHANNEL_ACTIVE = BIT(3),
+ EEPROM_CHANNEL_RADAR = BIT(4),
+ EEPROM_CHANNEL_WIDE = BIT(5),
+ EEPROM_CHANNEL_DFS = BIT(7),
+};
+
+/**
+ * struct iwl_eeprom_channel - EEPROM channel data
+ * @flags: %EEPROM_CHANNEL_* flags
+ * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
+ */
+struct iwl_eeprom_channel {
+ u8 flags;
+ s8 max_power_avg;
+} __packed;
+
+
+enum iwl_eeprom_enhanced_txpwr_flags {
+ IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
+ IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
+ IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
+ IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
+ IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
+ IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
+ IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
+};
+
+/**
+ * iwl_eeprom_enhanced_txpwr structure
+ * @flags: entry flags
+ * @channel: channel number
+ * @chain_a_max_pwr: chain a max power in 1/2 dBm
+ * @chain_b_max_pwr: chain b max power in 1/2 dBm
+ * @chain_c_max_pwr: chain c max power in 1/2 dBm
+ * @delta_20_in_40: 20-in-40 deltas (hi/lo)
+ * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
+ * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
+ *
+ * This structure presents the enhanced regulatory tx power limit layout
+ * in an EEPROM image.
+ */
+struct iwl_eeprom_enhanced_txpwr {
+ u8 flags;
+ u8 channel;
+ s8 chain_a_max;
+ s8 chain_b_max;
+ s8 chain_c_max;
+ u8 delta_20_in_40;
+ s8 mimo2_max;
+ s8 mimo3_max;
+} __packed;
+
+static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_eeprom_data *data,
+ struct iwl_eeprom_enhanced_txpwr *txp)
+{
+ s8 result = 0; /* (.5 dBm) */
+
+ /* Take the highest tx power from any valid chains */
+ if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
+ result = txp->chain_a_max;
+
+ if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
+ result = txp->chain_b_max;
+
+ if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
+ result = txp->chain_c_max;
+
+ if ((data->valid_tx_ant == ANT_AB ||
+ data->valid_tx_ant == ANT_BC ||
+ data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
+ result = txp->mimo2_max;
+
+ if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
+ result = txp->mimo3_max;
+
+ return result;
+}
+
+#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) \
+ ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
+
+static void
+iwl_eeprom_enh_txp_read_element(struct iwl_eeprom_data *data,
+ struct iwl_eeprom_enhanced_txpwr *txp,
+ int n_channels, s8 max_txpower_avg)
+{
+ int ch_idx;
+ enum ieee80211_band band;
+
+ band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+ IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+
+ for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
+ struct ieee80211_channel *chan = &data->channels[ch_idx];
+
+ /* update matching channel or from common data only */
+ if (txp->channel != 0 && chan->hw_value != txp->channel)
+ continue;
+
+ /* update matching band only */
+ if (band != chan->band)
+ continue;
+
+ if (chan->max_power < max_txpower_avg &&
+ !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
+ chan->max_power = max_txpower_avg;
+ }
+}
+
+static void iwl_eeprom_enhanced_txpower(struct device *dev,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size,
+ int n_channels)
+{
+ struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+ int idx, entries;
+ __le16 *txp_len;
+ s8 max_txp_avg_halfdbm;
+
+ BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+ /* the length is in 16-bit words, but we want entries */
+ txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_TXP_SZ_OFFS);
+ entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+ txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_TXP_OFFS);
+
+ for (idx = 0; idx < entries; idx++) {
+ txp = &txp_array[idx];
+ /* skip invalid entries */
+ if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+ continue;
+
+ IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+ (txp->channel && (txp->flags &
+ IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+ "Common " : (txp->channel) ?
+ "Channel" : "Common",
+ (txp->channel),
+ TXP_CHECK_AND_PRINT(VALID),
+ TXP_CHECK_AND_PRINT(BAND_52G),
+ TXP_CHECK_AND_PRINT(OFDM),
+ TXP_CHECK_AND_PRINT(40MHZ),
+ TXP_CHECK_AND_PRINT(HT_AP),
+ TXP_CHECK_AND_PRINT(RES1),
+ TXP_CHECK_AND_PRINT(RES2),
+ TXP_CHECK_AND_PRINT(COMMON_TYPE),
+ txp->flags);
+ IWL_DEBUG_EEPROM(dev,
+ "\t\t chain_A: 0x%02x chain_B: 0X%02x chain_C: 0X%02x\n",
+ txp->chain_a_max, txp->chain_b_max,
+ txp->chain_c_max);
+ IWL_DEBUG_EEPROM(dev,
+ "\t\t MIMO2: 0x%02x MIMO3: 0x%02x High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
+ txp->mimo2_max, txp->mimo3_max,
+ ((txp->delta_20_in_40 & 0xf0) >> 4),
+ (txp->delta_20_in_40 & 0x0f));
+
+ max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
+
+ iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
+ DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
+
+ if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
+ data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
+ }
+}
+
+static void iwl_init_band_reference(const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size,
+ int eeprom_band, int *eeprom_ch_count,
+ const struct iwl_eeprom_channel **ch_info,
+ const u8 **eeprom_ch_array)
+{
+ u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
+
+ offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
+
+ *ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+
+ switch (eeprom_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
+ *eeprom_ch_array = iwl_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
+ *eeprom_ch_array = iwl_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
+ *eeprom_ch_array = iwl_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
+ *eeprom_ch_array = iwl_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
+ *eeprom_ch_array = iwl_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
+ *eeprom_ch_array = iwl_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
+ *eeprom_ch_array = iwl_eeprom_band_7;
+ break;
+ default:
+ *eeprom_ch_count = 0;
+ *eeprom_ch_array = NULL;
+ WARN_ON(1);
+ }
+}
+
+#define CHECK_AND_PRINT(x) \
+ ((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static void iwl_mod_ht40_chan_info(struct device *dev,
+ struct iwl_eeprom_data *data, int n_channels,
+ enum ieee80211_band band, u16 channel,
+ const struct iwl_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct ieee80211_channel *chan = NULL;
+ int i;
+
+ for (i = 0; i < n_channels; i++) {
+ if (data->channels[i].band != band)
+ continue;
+ if (data->channels[i].hw_value != channel)
+ continue;
+ chan = &data->channels[i];
+ break;
+ }
+
+ if (!chan)
+ return;
+
+ IWL_DEBUG_EEPROM(dev,
+ "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel,
+ band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS),
+ CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR),
+ CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS),
+ eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
+ : "not ");
+
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ chan->flags &= ~clear_ht40_extension_channel;
+}
+
+#define CHECK_AND_PRINT_I(x) \
+ ((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ int band, ch_idx;
+ const struct iwl_eeprom_channel *eeprom_ch_info;
+ const u8 *eeprom_ch_array;
+ int eeprom_ch_count;
+ int n_channels = 0;
+
+ /*
+ * Loop through the 5 EEPROM bands and add them to the parse list
+ */
+ for (band = 1; band <= 5; band++) {
+ struct ieee80211_channel *channel;
+
+ iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+ &eeprom_ch_count, &eeprom_ch_info,
+ &eeprom_ch_array);
+
+ /* Loop through each band adding each of the channels */
+ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+ const struct iwl_eeprom_channel *eeprom_ch;
+
+ eeprom_ch = &eeprom_ch_info[ch_idx];
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d Flags %x [%sGHz] - No traffic\n",
+ eeprom_ch_array[ch_idx],
+ eeprom_ch_info[ch_idx].flags,
+ (band != 1) ? "5.2" : "2.4");
+ continue;
+ }
+
+ channel = &data->channels[n_channels];
+ n_channels++;
+
+ channel->hw_value = eeprom_ch_array[ch_idx];
+ channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
+ : IEEE80211_BAND_5GHZ;
+ channel->center_freq =
+ ieee80211_channel_to_frequency(
+ channel->hw_value, channel->band);
+
+ /* set no-HT40, will enable as appropriate later */
+ channel->flags = IEEE80211_CHAN_NO_HT40;
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
+ channel->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
+ channel->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
+ channel->flags |= IEEE80211_CHAN_RADAR;
+
+ /* Initialize regulatory-based run-time data */
+ channel->max_power =
+ eeprom_ch_info[ch_idx].max_power_avg;
+ IWL_DEBUG_EEPROM(dev,
+ "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ channel->hw_value,
+ (band != 1) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch_idx].flags,
+ eeprom_ch_info[ch_idx].max_power_avg,
+ ((eeprom_ch_info[ch_idx].flags &
+ EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch_idx].flags &
+ EEPROM_CHANNEL_RADAR))
+ ? "" : "not ");
+ }
+ }
+
+ if (cfg->eeprom_params->enhanced_txpower) {
+ /*
+ * for newer device (6000 series and up)
+ * EEPROM contain enhanced tx power information
+ * driver need to process addition information
+ * to determine the max channel tx power limits
+ */
+ iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
+ n_channels);
+ } else {
+ /* All others use data from channel map */
+ int i;
+
+ data->max_tx_pwr_half_dbm = -128;
+
+ for (i = 0; i < n_channels; i++)
+ data->max_tx_pwr_half_dbm =
+ max_t(s8, data->max_tx_pwr_half_dbm,
+ data->channels[i].max_power * 2);
+ }
+
+ /* Check if we do have HT40 channels */
+ if (cfg->eeprom_params->regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ cfg->eeprom_params->regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return n_channels;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+ &eeprom_ch_count, &eeprom_ch_info,
+ &eeprom_ch_array);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
+ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+ /* Set up driver's info for lower half */
+ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+ eeprom_ch_array[ch_idx],
+ &eeprom_ch_info[ch_idx],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+ eeprom_ch_array[ch_idx] + 4,
+ &eeprom_ch_info[ch_idx],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return n_channels;
+}
+
+static int iwl_init_sband_channels(struct iwl_eeprom_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum ieee80211_band band)
+{
+ struct ieee80211_channel *chan = &data->channels[0];
+ int n = 0, idx = 0;
+
+ while (chan->band != band && idx < n_channels)
+ chan = &data->channels[++idx];
+
+ sband->channels = &data->channels[idx];
+
+ while (chan->band == band && idx < n_channels) {
+ chan = &data->channels[++idx];
+ n++;
+ }
+
+ sband->n_channels = n;
+
+ return n;
+}
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+
+static void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ int max_bit_rate = 0;
+ u8 rx_chains;
+ u8 tx_chains;
+
+ tx_chains = hweight8(data->valid_tx_ant);
+ if (cfg->rx_with_siso_diversity)
+ rx_chains = 1;
+ else
+ rx_chains = hweight8(data->valid_rx_ant);
+
+ if (!(data->sku & EEPROM_SKU_CAP_11N_ENABLE) || !cfg->ht_params) {
+ ht_info->ht_supported = false;
+ return;
+ }
+
+ ht_info->ht_supported = true;
+ ht_info->cap = 0;
+
+ if (iwlwifi_mod_params.amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ if (cfg->ht_params->ht_greenfield_support)
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+ if (cfg->ht_params->ht40_bands & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+ struct iwl_eeprom_data *data,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ int n_channels = iwl_init_channel_map(dev, cfg, data,
+ eeprom, eeprom_size);
+ int n_used = 0;
+ struct ieee80211_supported_band *sband;
+
+ sband = &data->bands[IEEE80211_BAND_2GHZ];
+ sband->band = IEEE80211_BAND_2GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+ sband->n_bitrates = N_RATES_24;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ sband = &data->bands[IEEE80211_BAND_5GHZ];
+ sband->band = IEEE80211_BAND_5GHZ;
+ sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+ sband->n_bitrates = N_RATES_52;
+ n_used += iwl_init_sband_channels(data, sband, n_channels,
+ IEEE80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ if (n_channels != n_used)
+ IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
+ n_used, n_channels);
+}
+
+/* EEPROM data functions */
+
+struct iwl_eeprom_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size)
+{
+ struct iwl_eeprom_data *data;
+ const void *tmp;
+
+ if (WARN_ON(!cfg || !cfg->eeprom_params))
+ return NULL;
+
+ data = kzalloc(sizeof(*data) +
+ sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ /* get MAC address(es) */
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
+ if (!tmp)
+ goto err_free;
+ memcpy(data->hw_addr, tmp, ETH_ALEN);
+ data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_NUM_MAC_ADDRESS);
+
+ if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
+ goto err_free;
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
+ if (!tmp)
+ goto err_free;
+ memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_RAW_TEMPERATURE);
+ if (!tmp)
+ goto err_free;
+ data->raw_temperature = *(__le16 *)tmp;
+
+ tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+ EEPROM_KELVIN_TEMPERATURE);
+ if (!tmp)
+ goto err_free;
+ data->kelvin_temperature = *(__le16 *)tmp;
+ data->kelvin_voltage = *((__le16 *)tmp + 1);
+
+ data->radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_RADIO_CONFIG);
+ data->sku = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_SKU_CAP);
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ data->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+
+ data->eeprom_version = iwl_eeprom_query16(eeprom, eeprom_size,
+ EEPROM_VERSION);
+
+ data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(data->radio_cfg);
+ data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(data->radio_cfg);
+
+ /* check overrides (some devices have wrong EEPROM) */
+ if (cfg->valid_tx_ant)
+ data->valid_tx_ant = cfg->valid_tx_ant;
+ if (cfg->valid_rx_ant)
+ data->valid_rx_ant = cfg->valid_rx_ant;
+
+ if (!data->valid_tx_ant || !data->valid_rx_ant) {
+ IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+ data->valid_tx_ant, data->valid_rx_ant);
+ goto err_free;
+ }
+
+ iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
+
+ return data;
+ err_free:
+ kfree(data);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data);
+
+/* helper functions */
+int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
+ struct iwl_trans *trans)
+{
+ if (data->eeprom_version >= trans->cfg->eeprom_ver ||
+ data->calib_version >= trans->cfg->eeprom_calib_ver) {
+ IWL_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+ data->eeprom_version, data->calib_version);
+ return 0;
+ }
+
+ IWL_ERR(trans,
+ "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
+ data->eeprom_version, trans->cfg->eeprom_ver,
+ data->calib_version, trans->cfg->eeprom_calib_ver);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(iwl_eeprom_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 000000000000..9c07c670a1ce
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_eeprom_parse_h__
+#define __iwl_eeprom_parse_h__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include "iwl-trans.h"
+
+/* SKU Capabilities (actual values from EEPROM definition) */
+#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
+#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
+#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
+#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
+#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
+
+/* radio config bits (actual values from EEPROM definition) */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+struct iwl_eeprom_data {
+ int n_hw_addrs;
+ u8 hw_addr[ETH_ALEN];
+
+ u16 radio_config;
+
+ u8 calib_version;
+ __le16 calib_voltage;
+
+ __le16 raw_temperature;
+ __le16 kelvin_temperature;
+ __le16 kelvin_voltage;
+ __le16 xtal_calib[2];
+
+ u16 sku;
+ u16 radio_cfg;
+ u16 eeprom_version;
+ s8 max_tx_pwr_half_dbm;
+
+ u8 valid_tx_ant, valid_rx_ant;
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_channel channels[];
+};
+
+/**
+ * iwl_parse_eeprom_data - parse EEPROM data and return values
+ *
+ * @dev: device pointer we're parsing for, for debug only
+ * @cfg: device configuration for parsing and overrides
+ * @eeprom: the EEPROM data
+ * @eeprom_size: length of the EEPROM data
+ *
+ * This function parses all EEPROM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_eeprom_data().
+ */
+struct iwl_eeprom_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size);
+
+/**
+ * iwl_free_eeprom_data - free EEPROM data
+ * @data: the data to free
+ */
+static inline void iwl_free_eeprom_data(struct iwl_eeprom_data *data)
+{
+ kfree(data);
+}
+
+int iwl_eeprom_check_version(struct iwl_eeprom_data *data,
+ struct iwl_trans *trans);
+
+#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 000000000000..27c7da3c6ed1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,463 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "iwl-debug.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_EEPROM(trans->dev,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
+{
+ iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+}
+
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
+{
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+
+ IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
+
+ switch (gp) {
+ case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+ if (!nvm_is_otp) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+ gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ if (nvm_is_otp) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+ default:
+ IWL_ERR(trans,
+ "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+ nvm_is_otp ? "OTP" : "EEPROM", gp);
+ return -ENOENT;
+ }
+}
+
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
+
+static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
+{
+ iwl_read32(trans, CSR_OTP_GP_REG);
+
+ iwl_clear_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+}
+
+static int iwl_nvm_is_otp(struct iwl_trans *trans)
+{
+ u32 otpgp;
+
+ /* OTP only valid for CP/PP and after */
+ switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_NONE:
+ IWL_ERR(trans, "Unknown hardware type\n");
+ return -EIO;
+ case CSR_HW_REV_TYPE_5300:
+ case CSR_HW_REV_TYPE_5350:
+ case CSR_HW_REV_TYPE_5100:
+ case CSR_HW_REV_TYPE_5150:
+ return 0;
+ default:
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+ return 1;
+ return 0;
+ }
+}
+
+static int iwl_init_otp_access(struct iwl_trans *trans)
+{
+ int ret;
+
+ /* Enable 40MHz radio clock */
+ iwl_write32(trans, CSR_GP_CNTRL,
+ iwl_read32(trans, CSR_GP_CNTRL) |
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /* wait for clock to be ready */
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out access OTP\n");
+ } else {
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+
+ /*
+ * CSR auto clock gate disable bit -
+ * this is only applicable for HW with OTP shadow RAM
+ */
+ if (trans->cfg->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ }
+ return ret;
+}
+
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+ __le16 *eeprom_data)
+{
+ int ret = 0;
+ u32 r;
+ u32 otpgp;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+ return ret;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ /* check for ECC errors: */
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+ /* stop in this case */
+ /* set the uncorrectable OTP ECC bit for acknowledgement */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+ return -EINVAL;
+ }
+ if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+ /* continue in this case */
+ /* set the correctable OTP ECC bit for acknowledgement */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+ }
+ *eeprom_data = cpu_to_le16(r >> 16);
+ return 0;
+}
+
+/*
+ * iwl_is_otp_empty: check for empty OTP
+ */
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
+{
+ u16 next_link_addr = 0;
+ __le16 link_value;
+ bool is_empty = false;
+
+ /* locate the beginning of OTP link list */
+ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+ if (!link_value) {
+ IWL_ERR(trans, "OTP is empty\n");
+ is_empty = true;
+ }
+ } else {
+ IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+ is_empty = true;
+ }
+
+ return is_empty;
+}
+
+
+/*
+ * iwl_find_otp_image: find EEPROM image in OTP
+ * finding the OTP block that contains the EEPROM image.
+ * the last valid block on the link list (the block _before_ the last block)
+ * is the block we should read and used to configure the device.
+ * If all the available OTP blocks are full, the last block will be the block
+ * we should read and used to configure the device.
+ * only perform this operation if shadow RAM is disabled
+ */
+static int iwl_find_otp_image(struct iwl_trans *trans,
+ u16 *validblockaddr)
+{
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
+ int usedblocks = 0;
+
+ /* set addressing mode to absolute to traverse the link list */
+ iwl_set_otp_access_absolute(trans);
+
+ /* checking for empty OTP or error */
+ if (iwl_is_otp_empty(trans))
+ return -EINVAL;
+
+ /*
+ * start traverse link list
+ * until reach the max number of OTP blocks
+ * different devices have different number of OTP blocks
+ */
+ do {
+ /* save current valid block address
+ * check for more block on the link list
+ */
+ valid_addr = next_link_addr;
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
+ usedblocks, next_link_addr);
+ if (iwl_read_otp_word(trans, next_link_addr, &link_value))
+ return -EINVAL;
+ if (!link_value) {
+ /*
+ * reach the end of link list, return success and
+ * set address point to the starting address
+ * of the image
+ */
+ *validblockaddr = valid_addr;
+ /* skip first 2 bytes (link list pointer) */
+ *validblockaddr += 2;
+ return 0;
+ }
+ /* more in the link list, continue */
+ usedblocks++;
+ } while (usedblocks <= trans->cfg->base_params->max_ll_items);
+
+ /* OTP has no valid blocks */
+ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+ return -EINVAL;
+}
+
+/**
+ * iwl_read_eeprom - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter and return it
+ * and its size.
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+ u16 validblockaddr = 0;
+ u16 cache_addr = 0;
+ int nvm_is_otp;
+
+ if (!eeprom || !eeprom_size)
+ return -EINVAL;
+
+ nvm_is_otp = iwl_nvm_is_otp(trans);
+ if (nvm_is_otp < 0)
+ return nvm_is_otp;
+
+ sz = trans->cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+
+ e = kmalloc(sz, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+ if (ret < 0) {
+ IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ goto err_free;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = iwl_eeprom_acquire_semaphore(trans);
+ if (ret < 0) {
+ IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+ goto err_free;
+ }
+
+ if (nvm_is_otp) {
+ ret = iwl_init_otp_access(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to initialize OTP access.\n");
+ goto err_unlock;
+ }
+
+ iwl_write32(trans, CSR_EEPROM_GP,
+ iwl_read32(trans, CSR_EEPROM_GP) &
+ ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ /* traversing the linked list if no shadow ram supported */
+ if (!trans->cfg->base_params->shadow_ram_support) {
+ ret = iwl_find_otp_image(trans, &validblockaddr);
+ if (ret)
+ goto err_unlock;
+ }
+ for (addr = validblockaddr; addr < validblockaddr + sz;
+ addr += sizeof(u16)) {
+ __le16 eeprom_data;
+
+ ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+ if (ret)
+ goto err_unlock;
+ e[cache_addr / 2] = eeprom_data;
+ cache_addr += sizeof(u16);
+ }
+ } else {
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Time out reading EEPROM[%d]\n", addr);
+ goto err_unlock;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+ }
+
+ IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
+ nvm_is_otp ? "OTP" : "EEPROM");
+
+ iwl_eeprom_release_semaphore(trans);
+
+ *eeprom_size = sz;
+ *eeprom = (u8 *)e;
+ return 0;
+
+ err_unlock:
+ iwl_eeprom_release_semaphore(trans);
+ err_free:
+ kfree(e);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
new file mode 100644
index 000000000000..1337c9d36fee
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -0,0 +1,70 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_eeprom_h__
+#define __iwl_eeprom_h__
+
+#include "iwl-trans.h"
+
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
+
+#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
deleted file mode 100644
index b8e2b223ac36..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ /dev/null
@@ -1,1148 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-agn.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-#include "iwl-prph.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwl_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwl_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwl_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * generic NVM functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_EEPROM(trans,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
-{
- iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
- CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
- IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
- gp);
- ret = -ENOENT;
- }
- break;
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
- IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
- ret = -ENOENT;
- }
- break;
- case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
- default:
- IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
- "EEPROM_GP=0x%08x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-
-int iwl_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_eeprom_calib_version(priv);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
- IWL_ERR(priv, "Invalid 11n configuration\n");
- return -EINVAL;
- }
-
- if (!priv->hw_params.sku) {
- IWL_ERR(priv, "Invalid device sku\n");
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
-
- /* check overrides (some devices have wrong EEPROM) */
- if (priv->cfg->valid_tx_ant)
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- if (priv->cfg->valid_rx_ant)
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
- priv->hw_params.valid_tx_ant,
- priv->hw_params.valid_rx_ant);
- return -EINVAL;
- }
-
- IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
- priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
-
- return 0;
-}
-
-u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
-{
- struct iwl_eeprom_calib_hdr *hdr;
-
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
- EEPROM_CALIB_ALL);
- return hdr->version;
-}
-
-static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
-{
- u16 offset = 0;
-
- if ((address & INDIRECT_ADDRESS) == 0)
- return address;
-
- switch (address & INDIRECT_TYPE_MSK) {
- case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
- break;
- case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
- break;
- case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
- break;
- case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
- break;
- case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
- break;
- case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
- break;
- case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
- break;
- case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
- break;
- default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
- address & INDIRECT_TYPE_MSK);
- break;
- }
-
- /* translate the offset from words to byte */
- return (address & ADDRESS_MSK) + (offset << 1);
-}
-
-const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
-{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
-}
-
-void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
-
-/******************************************************************************
- *
- * OTP related functions
- *
-******************************************************************************/
-
-static void iwl_set_otp_access(struct iwl_trans *trans,
- enum iwl_access_mode mode)
-{
- iwl_read32(trans, CSR_OTP_GP_REG);
-
- if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
- else
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
-}
-
-static int iwl_get_nvm_type(struct iwl_trans *trans, u32 hw_rev)
-{
- u32 otpgp;
- int nvm_type;
-
- /* OTP only valid for CP/PP and after */
- switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(trans, "Unknown hardware type\n");
- return -ENOENT;
- case CSR_HW_REV_TYPE_5300:
- case CSR_HW_REV_TYPE_5350:
- case CSR_HW_REV_TYPE_5100:
- case CSR_HW_REV_TYPE_5150:
- nvm_type = NVM_DEVICE_TYPE_EEPROM;
- break;
- default:
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
- nvm_type = NVM_DEVICE_TYPE_OTP;
- else
- nvm_type = NVM_DEVICE_TYPE_EEPROM;
- break;
- }
- return nvm_type;
-}
-
-static int iwl_init_otp_access(struct iwl_trans *trans)
-{
- int ret;
-
- /* Enable 40MHz radio clock */
- iwl_write32(trans, CSR_GP_CNTRL,
- iwl_read32(trans, CSR_GP_CNTRL) |
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /* wait for clock to be ready */
- ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- 25000);
- if (ret < 0)
- IWL_ERR(trans, "Time out access OTP\n");
- else {
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
- }
- return ret;
-}
-
-static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
- __le16 *eeprom_data)
-{
- int ret = 0;
- u32 r;
- u32 otpgp;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
- return ret;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- /* check for ECC errors: */
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
- /* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
- return -EINVAL;
- }
- if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
- /* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
- }
- *eeprom_data = cpu_to_le16(r >> 16);
- return 0;
-}
-
-/*
- * iwl_is_otp_empty: check for empty OTP
- */
-static bool iwl_is_otp_empty(struct iwl_trans *trans)
-{
- u16 next_link_addr = 0;
- __le16 link_value;
- bool is_empty = false;
-
- /* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
- if (!link_value) {
- IWL_ERR(trans, "OTP is empty\n");
- is_empty = true;
- }
- } else {
- IWL_ERR(trans, "Unable to read first block of OTP list.\n");
- is_empty = true;
- }
-
- return is_empty;
-}
-
-
-/*
- * iwl_find_otp_image: find EEPROM image in OTP
- * finding the OTP block that contains the EEPROM image.
- * the last valid block on the link list (the block _before_ the last block)
- * is the block we should read and used to configure the device.
- * If all the available OTP blocks are full, the last block will be the block
- * we should read and used to configure the device.
- * only perform this operation if shadow RAM is disabled
- */
-static int iwl_find_otp_image(struct iwl_trans *trans,
- u16 *validblockaddr)
-{
- u16 next_link_addr = 0, valid_addr;
- __le16 link_value = 0;
- int usedblocks = 0;
-
- /* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(trans, IWL_OTP_ACCESS_ABSOLUTE);
-
- /* checking for empty OTP or error */
- if (iwl_is_otp_empty(trans))
- return -EINVAL;
-
- /*
- * start traverse link list
- * until reach the max number of OTP blocks
- * different devices have different number of OTP blocks
- */
- do {
- /* save current valid block address
- * check for more block on the link list
- */
- valid_addr = next_link_addr;
- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(trans, "OTP blocks %d addr 0x%x\n",
- usedblocks, next_link_addr);
- if (iwl_read_otp_word(trans, next_link_addr, &link_value))
- return -EINVAL;
- if (!link_value) {
- /*
- * reach the end of link list, return success and
- * set address point to the starting address
- * of the image
- */
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return 0;
- }
- /* more in the link list, continue */
- usedblocks++;
- } while (usedblocks <= trans->cfg->base_params->max_ll_items);
-
- /* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
- return -EINVAL;
-}
-
-/******************************************************************************
- *
- * Tx Power related functions
- *
-******************************************************************************/
-/**
- * iwl_get_max_txpower_avg - get the highest tx power from all chains.
- * find the highest tx power from all chains for the channel
- */
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
- int element, s8 *max_txpower_in_half_dbm)
-{
- s8 max_txpower_avg = 0; /* (dBm) */
-
- /* Take the highest tx power from any valid chains */
- if ((priv->hw_params.valid_tx_ant & ANT_A) &&
- (enhanced_txpower[element].chain_a_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->hw_params.valid_tx_ant & ANT_B) &&
- (enhanced_txpower[element].chain_b_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->hw_params.valid_tx_ant & ANT_C) &&
- (enhanced_txpower[element].chain_c_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->hw_params.valid_tx_ant == ANT_AB) |
- (priv->hw_params.valid_tx_ant == ANT_BC) |
- (priv->hw_params.valid_tx_ant == ANT_AC)) &&
- (enhanced_txpower[element].mimo2_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
- (enhanced_txpower[element].mimo3_max > max_txpower_avg))
- max_txpower_avg = enhanced_txpower[element].mimo3_max;
-
- /*
- * max. tx power in EEPROM is in 1/2 dBm format
- * convert from 1/2 dBm to dBm (round-up convert)
- * but we also do not want to loss 1/2 dBm resolution which
- * will impact performance
- */
- *max_txpower_in_half_dbm = max_txpower_avg;
- return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
-}
-
-static void
-iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
- struct iwl_eeprom_enhanced_txpwr *txp,
- s8 max_txpower_avg)
-{
- int ch_idx;
- bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
- enum ieee80211_band band;
-
- band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
-
- for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
- struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
-
- /* update matching channel or from common data only */
- if (txp->channel != 0 && ch_info->channel != txp->channel)
- continue;
-
- /* update matching band only */
- if (band != ch_info->band)
- continue;
-
- if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
- ch_info->max_power_avg = max_txpower_avg;
- ch_info->curr_txpow = max_txpower_avg;
- ch_info->scan_power = max_txpower_avg;
- }
-
- if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
- ch_info->ht40_max_power_avg = max_txpower_avg;
- }
-}
-
-#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
-#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
-#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
-
-#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
- ? # x " " : "")
-
-static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
-{
- struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
- int idx, entries;
- __le16 *txp_len;
- s8 max_txp_avg, max_txp_avg_halfdbm;
-
- BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
-
- /* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
- entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
-
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
-
- for (idx = 0; idx < entries; idx++) {
- txp = &txp_array[idx];
- /* skip invalid entries */
- if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
- continue;
-
- IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
- (txp->channel && (txp->flags &
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
- "Common " : (txp->channel) ?
- "Channel" : "Common",
- (txp->channel),
- TXP_CHECK_AND_PRINT(VALID),
- TXP_CHECK_AND_PRINT(BAND_52G),
- TXP_CHECK_AND_PRINT(OFDM),
- TXP_CHECK_AND_PRINT(40MHZ),
- TXP_CHECK_AND_PRINT(HT_AP),
- TXP_CHECK_AND_PRINT(RES1),
- TXP_CHECK_AND_PRINT(RES2),
- TXP_CHECK_AND_PRINT(COMMON_TYPE),
- txp->flags);
- IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
- "chain_B: 0X%02x chain_C: 0X%02x\n",
- txp->chain_a_max, txp->chain_b_max,
- txp->chain_c_max);
- IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
- "MIMO3: 0x%02x High 20_on_40: 0x%02x "
- "Low 20_on_40: 0x%02x\n",
- txp->mimo2_max, txp->mimo3_max,
- ((txp->delta_20_in_40 & 0xf0) >> 4),
- (txp->delta_20_in_40 & 0x0f));
-
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
- &max_txp_avg_halfdbm);
-
- /*
- * Update the user limit values values to the highest
- * power supported by any channel
- */
- if (max_txp_avg > priv->tx_power_user_lmt)
- priv->tx_power_user_lmt = max_txp_avg;
- if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
- priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
-
- iwl_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
- }
-}
-
-/**
- * iwl_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
- u16 validblockaddr = 0;
- u16 cache_addr = 0;
-
- priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
- if (priv->nvm_device_type == -ENOENT)
- return -ENOENT;
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- ret = iwl_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(priv->trans);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
-
- ret = iwl_init_otp_access(priv->trans);
- if (ret) {
- IWL_ERR(priv, "Failed to initialize OTP access.\n");
- ret = -ENOENT;
- goto done;
- }
- iwl_write32(priv->trans, CSR_EEPROM_GP,
- iwl_read32(priv->trans, CSR_EEPROM_GP) &
- ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
- iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- /* traversing the linked list if no shadow ram supported */
- if (!priv->cfg->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
- ret = -ENOENT;
- goto done;
- }
- }
- for (addr = validblockaddr; addr < validblockaddr + sz;
- addr += sizeof(u16)) {
- __le16 eeprom_data;
-
- ret = iwl_read_otp_word(priv->trans, addr,
- &eeprom_data);
- if (ret)
- goto done;
- e[cache_addr / 2] = eeprom_data;
- cache_addr += sizeof(u16);
- }
- } else {
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- iwl_write32(priv->trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv,
- "Time out reading EEPROM[%d]\n", addr);
- goto done;
- }
- r = iwl_read32(priv->trans, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
- ? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- iwl_eeprom_release_semaphore(priv->trans);
-
-err:
- if (ret)
- iwl_eeprom_free(priv);
-alloc_err:
- return ret;
-}
-
-void iwl_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-
-static void iwl_init_band_reference(struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwl_eeprom_band_7;
- break;
- default:
- BUG();
- return;
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_get_channel_info(priv, band, channel);
-
- if (!is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwl_eeprom_band_1) +
- ARRAY_SIZE(iwl_eeprom_band_2) +
- ARRAY_SIZE(iwl_eeprom_band_3) +
- ARRAY_SIZE(iwl_eeprom_band_4) +
- ARRAY_SIZE(iwl_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kcalloc(priv->channel_count,
- sizeof(struct iwl_channel_info),
- GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- /* for newer device (6000 series and up)
- * EEPROM contain enhanced tx power information
- * driver need to process addition information
- * to determine the max channel tx power limits
- */
- if (priv->lib->eeprom_ops.enhanced_txpower)
- iwl_eeprom_enhanced_txpower(priv);
-
- return 0;
-}
-
-/*
- * iwl_free_channel_map - undo allocations in iwl_init_channel_map
- */
-void iwl_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-
-/**
- * iwl_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-
-void iwl_rf_config(struct iwl_priv *priv)
-{
- u16 radio_cfg;
-
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
- IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg),
- EEPROM_RF_CFG_STEP_MSK(radio_cfg),
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
- } else
- WARN_ON(1);
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
deleted file mode 100644
index 64bfd947caeb..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_eeprom_h__
-#define __iwl_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-#define EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
-#define EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
-#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
-#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-enum iwl_eeprom_enhanced_txpwr_flags {
- IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
- IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
- IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
- IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
- IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
- IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
- IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
- IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
-};
-
-/**
- * iwl_eeprom_enhanced_txpwr structure
- * This structure presents the enhanced regulatory tx power limit layout
- * in eeprom image
- * Enhanced regulatory tx power portion of eeprom image can be broken down
- * into individual structures; each one is 8 bytes in size and contain the
- * following information
- * @flags: entry flags
- * @channel: channel number
- * @chain_a_max_pwr: chain a max power in 1/2 dBm
- * @chain_b_max_pwr: chain b max power in 1/2 dBm
- * @chain_c_max_pwr: chain c max power in 1/2 dBm
- * @delta_20_in_40: 20-in-40 deltas (hi/lo)
- * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
- * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
- *
- */
-struct iwl_eeprom_enhanced_txpwr {
- u8 flags;
- u8 channel;
- s8 chain_a_max;
- s8 chain_b_max;
- s8 chain_c_max;
- u8 delta_20_in_40;
- s8 mimo2_max;
- s8 mimo3_max;
-} __packed;
-
-/* calibration */
-struct iwl_eeprom_calib_hdr {
- u8 version;
- u8 pa_type;
- __le16 voltage;
-} __packed;
-
-#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
-#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
-
-/* temperature */
-#define EEPROM_KELVIN_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
-#define EEPROM_RAW_TEMPERATURE ((2*0x12B) | EEPROM_CALIB_ALL)
-
-
-/* agn links */
-#define EEPROM_LINK_HOST (2*0x64)
-#define EEPROM_LINK_GENERAL (2*0x65)
-#define EEPROM_LINK_REGULATORY (2*0x66)
-#define EEPROM_LINK_CALIBRATION (2*0x67)
-#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
-#define EEPROM_LINK_OTHERS (2*0x69)
-#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
-#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
-
-/* agn regulatory - indirect access */
-#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
-#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
-#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
-#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
-#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
-
-/* 6000 regulatory - indirect access */
-#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
- | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
-/* 2.4 GHz */
-extern const u8 iwl_eeprom_band_1[14];
-
-#define ADDRESS_MSK 0x0000FFFF
-#define INDIRECT_TYPE_MSK 0x000F0000
-#define INDIRECT_HOST 0x00010000
-#define INDIRECT_GENERAL 0x00020000
-#define INDIRECT_REGULATORY 0x00030000
-#define INDIRECT_CALIBRATION 0x00040000
-#define INDIRECT_PROCESS_ADJST 0x00050000
-#define INDIRECT_OTHERS 0x00060000
-#define INDIRECT_TXP_LIMIT 0x00070000
-#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
-#define INDIRECT_ADDRESS 0x00100000
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_SUBSYSTEM_ID (2*0x0A) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- bool enhanced_txpower;
-};
-
-
-int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
-int iwl_eeprom_check_version(struct iwl_priv *priv);
-int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
-u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
-void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
-int iwl_init_channel_map(struct iwl_priv *priv);
-void iwl_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-void iwl_rf_config(struct iwl_priv *priv);
-
-#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 74bce97a8600..806046641747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -421,6 +421,8 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
+#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
+
/* Instruct FH to increment the retry count of a packet when
* it is brought from the memory to TX-FIFO
*/
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 081dd34d2387..3dfebfb8434f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -27,9 +27,10 @@
*****************************************************************************/
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/export.h>
#include "iwl-io.h"
-#include"iwl-csr.h"
+#include "iwl-csr.h"
#include "iwl-debug.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -52,6 +53,7 @@ void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
__iwl_set_bit(trans, reg, mask);
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bit);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -61,6 +63,25 @@ void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
__iwl_clear_bit(trans, reg, mask);
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_clear_bit);
+
+void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+ unsigned long flags;
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+}
+EXPORT_SYMBOL_GPL(iwl_set_bits_mask);
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout)
@@ -76,6 +97,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(iwl_poll_bit);
int iwl_grab_nic_access_silent(struct iwl_trans *trans)
{
@@ -117,6 +139,7 @@ int iwl_grab_nic_access_silent(struct iwl_trans *trans)
return 0;
}
+EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
bool iwl_grab_nic_access(struct iwl_trans *trans)
{
@@ -130,6 +153,7 @@ bool iwl_grab_nic_access(struct iwl_trans *trans)
return true;
}
+EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
void iwl_release_nic_access(struct iwl_trans *trans)
{
@@ -144,6 +168,7 @@ void iwl_release_nic_access(struct iwl_trans *trans)
*/
mmiowb();
}
+EXPORT_SYMBOL_GPL(iwl_release_nic_access);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{
@@ -158,6 +183,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
return value;
}
+EXPORT_SYMBOL_GPL(iwl_read_direct32);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
{
@@ -170,6 +196,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_write_direct32);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout)
@@ -185,6 +212,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
return -ETIMEDOUT;
}
+EXPORT_SYMBOL_GPL(iwl_poll_direct_bit);
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 reg)
{
@@ -211,6 +239,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 reg)
spin_unlock_irqrestore(&trans->reg_lock, flags);
return val;
}
+EXPORT_SYMBOL_GPL(iwl_read_prph);
void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
@@ -223,6 +252,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_write_prph);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -236,6 +266,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bits_prph);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask)
@@ -250,6 +281,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
{
@@ -264,9 +296,10 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask)
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
-void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words)
+void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
unsigned long flags;
int offs;
@@ -275,24 +308,26 @@ void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
- for (offs = 0; offs < words; offs++)
+ for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
+EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
{
u32 value;
- _iwl_read_targ_mem_words(trans, addr, &value, 1);
+ _iwl_read_targ_mem_dwords(trans, addr, &value, 1);
return value;
}
+EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
-int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words)
+int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
@@ -301,7 +336,7 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
- for (offs = 0; offs < words; offs++)
+ for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(trans);
} else
@@ -310,8 +345,10 @@ int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
return result;
}
+EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
{
- return _iwl_write_targ_mem_words(trans, addr, &val, 1);
+ return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
}
+EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index abb3250164ba..50d3819739d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -54,6 +54,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
+void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
+
int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
@@ -74,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask);
-void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words);
+void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
-#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \
+#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
- _iwl_read_targ_mem_words(trans, addr, buf, \
- (bufsize) / sizeof(u32));\
+ _iwl_read_targ_mem_dwords(trans, addr, buf, \
+ (bufsize) / sizeof(u32));\
} while (0)
-int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr,
- void *buf, int words);
+int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 0066b899fe5c..c61f2070f15a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/sched.h>
+#include <linux/export.h>
#include "iwl-notif-wait.h"
@@ -71,6 +72,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
INIT_LIST_HEAD(&notif_wait->notif_waits);
init_waitqueue_head(&notif_wait->notif_waitq);
}
+EXPORT_SYMBOL_GPL(iwl_notification_wait_init);
void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
struct iwl_rx_packet *pkt)
@@ -115,20 +117,20 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
if (triggered)
wake_up_all(&notif_wait->notif_waitq);
}
+EXPORT_SYMBOL_GPL(iwl_notification_wait_notify);
void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
{
- unsigned long flags;
struct iwl_notification_wait *wait_entry;
- spin_lock_irqsave(&notif_wait->notif_wait_lock, flags);
+ spin_lock(&notif_wait->notif_wait_lock);
list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
wait_entry->aborted = true;
- spin_unlock_irqrestore(&notif_wait->notif_wait_lock, flags);
+ spin_unlock(&notif_wait->notif_wait_lock);
wake_up_all(&notif_wait->notif_waitq);
}
-
+EXPORT_SYMBOL_GPL(iwl_abort_notification_waits);
void
iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -152,6 +154,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
list_add(&wait_entry->list, &notif_wait->notif_waits);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
+EXPORT_SYMBOL_GPL(iwl_init_notification_wait);
int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry,
@@ -175,6 +178,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
return -ETIMEDOUT;
return 0;
}
+EXPORT_SYMBOL_GPL(iwl_wait_notification);
void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
struct iwl_notification_wait *wait_entry)
@@ -183,3 +187,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
list_del(&wait_entry->list);
spin_unlock_bh(&notif_wait->notif_wait_lock);
}
+EXPORT_SYMBOL_GPL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4ef742b28e08..64886f95664f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -111,22 +111,25 @@ struct iwl_cfg;
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD the this Rx responds to.
- * Must be atomic.
+ * Must be atomic and called with BH disabled.
* @queue_full: notifies that a HW queue is full.
- * Must be atomic
+ * Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
- * Must be atomic
+ * Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Must be atomic.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* Must be atomic
- * @nic_error: error notification. Must be atomic
- * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
+ * @nic_error: error notification. Must be atomic and must be called with BH
+ * disabled.
+ * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
+ * called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
- * @wimax_active: invoked when WiMax becomes active. Must be atomic.
+ * @wimax_active: invoked when WiMax becomes active. Must be atomic and called
+ * with BH disabled.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -145,6 +148,9 @@ struct iwl_op_mode_ops {
void (*wimax_active)(struct iwl_op_mode *op_mode);
};
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
+void iwl_opmode_deregister(const char *name);
+
/**
* struct iwl_op_mode - operational mode
*
@@ -162,7 +168,6 @@ struct iwl_op_mode {
static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
{
might_sleep();
-
op_mode->ops->stop(op_mode);
}
@@ -218,9 +223,4 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
-/*****************************************************
-* Op mode layers implementations
-******************************************************/
-extern const struct iwl_op_mode_ops iwl_dvm_ops;
-
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index dfd54662e3e6..9253ef1dba72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -187,7 +187,7 @@
#define SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
#define SCD_QUEUE_STTS_REG_POS_WSL (4)
#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
-#define SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
+#define SCD_QUEUE_STTS_REG_MSK (0x017F0000)
#define SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
#define SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
new file mode 100644
index 000000000000..81e8c7126d72
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -0,0 +1,856 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/export.h>
+#include <net/netlink.h>
+
+#include "iwl-io.h"
+#include "iwl-fh.h"
+#include "iwl-prph.h"
+#include "iwl-trans.h"
+#include "iwl-test.h"
+#include "iwl-csr.h"
+#include "iwl-testmode.h"
+
+/*
+ * Periphery registers absolute lower bound. This is used in order to
+ * differentiate registery access through HBUS_TARG_PRPH_* and
+ * HBUS_TARG_MEM_* accesses.
+ */
+#define IWL_ABS_PRPH_START (0xA00000)
+
+/*
+ * The TLVs used in the gnl message policy between the kernel module and
+ * user space application. iwl_testmode_gnl_msg_policy is to be carried
+ * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
+ * See iwl-testmode.h
+ */
+static
+struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
+ [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
+ [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
+ [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
+ [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
+
+ [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
+};
+
+static inline void iwl_test_trace_clear(struct iwl_test *tst)
+{
+ memset(&tst->trace, 0, sizeof(struct iwl_test_trace));
+}
+
+static void iwl_test_trace_stop(struct iwl_test *tst)
+{
+ if (!tst->trace.enabled)
+ return;
+
+ if (tst->trace.cpu_addr && tst->trace.dma_addr)
+ dma_free_coherent(tst->trans->dev,
+ tst->trace.tsize,
+ tst->trace.cpu_addr,
+ tst->trace.dma_addr);
+
+ iwl_test_trace_clear(tst);
+}
+
+static inline void iwl_test_mem_clear(struct iwl_test *tst)
+{
+ memset(&tst->mem, 0, sizeof(struct iwl_test_mem));
+}
+
+static inline void iwl_test_mem_stop(struct iwl_test *tst)
+{
+ if (!tst->mem.in_read)
+ return;
+
+ iwl_test_mem_clear(tst);
+}
+
+/*
+ * Initializes the test object
+ * During the lifetime of the test object it is assumed that the transport is
+ * started. The test object should be stopped before the transport is stopped.
+ */
+void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
+ struct iwl_test_ops *ops)
+{
+ tst->trans = trans;
+ tst->ops = ops;
+
+ iwl_test_trace_clear(tst);
+ iwl_test_mem_clear(tst);
+}
+EXPORT_SYMBOL_GPL(iwl_test_init);
+
+/*
+ * Stop the test object
+ */
+void iwl_test_free(struct iwl_test *tst)
+{
+ iwl_test_mem_stop(tst);
+ iwl_test_trace_stop(tst);
+}
+EXPORT_SYMBOL_GPL(iwl_test_free);
+
+static inline int iwl_test_send_cmd(struct iwl_test *tst,
+ struct iwl_host_cmd *cmd)
+{
+ return tst->ops->send_cmd(tst->trans->op_mode, cmd);
+}
+
+static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr)
+{
+ return tst->ops->valid_hw_addr(addr);
+}
+
+static inline u32 iwl_test_fw_ver(struct iwl_test *tst)
+{
+ return tst->ops->get_fw_ver(tst->trans->op_mode);
+}
+
+static inline struct sk_buff*
+iwl_test_alloc_reply(struct iwl_test *tst, int len)
+{
+ return tst->ops->alloc_reply(tst->trans->op_mode, len);
+}
+
+static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb)
+{
+ return tst->ops->reply(tst->trans->op_mode, skb);
+}
+
+static inline struct sk_buff*
+iwl_test_alloc_event(struct iwl_test *tst, int len)
+{
+ return tst->ops->alloc_event(tst->trans->op_mode, len);
+}
+
+static inline void
+iwl_test_event(struct iwl_test *tst, struct sk_buff *skb)
+{
+ return tst->ops->event(tst->trans->op_mode, skb);
+}
+
+/*
+ * This function handles the user application commands to the fw. The fw
+ * commands are sent in a synchronuous manner. In case that the user requested
+ * to get commands response, it is send to the user.
+ */
+static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct iwl_host_cmd cmd;
+ struct iwl_rx_packet *pkt;
+ struct sk_buff *skb;
+ void *reply_buf;
+ u32 reply_len;
+ int ret;
+ bool cmd_want_skb;
+
+ memset(&cmd, 0, sizeof(struct iwl_host_cmd));
+
+ if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
+ !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
+ IWL_ERR(tst->trans, "Missing fw command mandatory fields\n");
+ return -ENOMSG;
+ }
+
+ cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
+ cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
+ if (cmd_want_skb)
+ cmd.flags |= CMD_WANT_SKB;
+
+ cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
+ cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+ cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
+ cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n",
+ cmd.id, cmd.flags, cmd.len[0]);
+
+ ret = iwl_test_send_cmd(tst, &cmd);
+ if (ret) {
+ IWL_ERR(tst->trans, "Failed to send hcmd\n");
+ return ret;
+ }
+ if (!cmd_want_skb)
+ return ret;
+
+ /* Handling return of SKB to the user */
+ pkt = cmd.resp_pkt;
+ if (!pkt) {
+ IWL_ERR(tst->trans, "HCMD received a null response packet\n");
+ return ret;
+ }
+
+ reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ skb = iwl_test_alloc_reply(tst, reply_len + 20);
+ reply_buf = kmalloc(reply_len, GFP_KERNEL);
+ if (!skb || !reply_buf) {
+ kfree_skb(skb);
+ kfree(reply_buf);
+ return -ENOMEM;
+ }
+
+ /* The reply is in a page, that we cannot send to user space. */
+ memcpy(reply_buf, &(pkt->hdr), reply_len);
+ iwl_free_resp(&cmd);
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+ nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
+ goto nla_put_failure;
+ return iwl_test_reply(tst, skb);
+
+nla_put_failure:
+ IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n");
+ kfree(reply_buf);
+ kfree_skb(skb);
+ return -ENOMSG;
+}
+
+/*
+ * Handles the user application commands for register access.
+ */
+static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 ofs, val32, cmd;
+ u8 val8;
+ struct sk_buff *skb;
+ int status = 0;
+ struct iwl_trans *trans = tst->trans;
+
+ if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
+ IWL_ERR(trans, "Missing reg offset\n");
+ return -ENOMSG;
+ }
+
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
+ IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs);
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+
+ /*
+ * Allow access only to FH/CSR/HBUS in direct mode.
+ * Since we don't have the upper bounds for the CSR and HBUS segments,
+ * we will use only the upper bound of FH for sanity check.
+ */
+ if (ofs >= FH_MEM_UPPER_BOUND) {
+ IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n",
+ FH_MEM_UPPER_BOUND);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ val32 = iwl_read_direct32(tst->trans, ofs);
+ IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+ if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
+ goto nla_put_failure;
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(trans, "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_ERR(trans, "Missing value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32);
+ iwl_write_direct32(tst->trans, ofs, val32);
+ }
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
+ IWL_ERR(trans, "Missing value to write\n");
+ return -ENOMSG;
+ } else {
+ val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
+ IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8);
+ iwl_write8(tst->trans, ofs, val8);
+ }
+ break;
+
+ default:
+ IWL_ERR(trans, "Unknown test register cmd ID\n");
+ return -ENOMSG;
+ }
+
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles the request to start FW tracing. Allocates of the trace buffer
+ * and sends a reply to user space with the address of the allocated buffer.
+ */
+static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct sk_buff *skb;
+ int status = 0;
+
+ if (tst->trace.enabled)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_TRACE_SIZE])
+ tst->trace.size = TRACE_BUFF_SIZE_DEF;
+ else
+ tst->trace.size =
+ nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
+
+ if (!tst->trace.size)
+ return -EINVAL;
+
+ if (tst->trace.size < TRACE_BUFF_SIZE_MIN ||
+ tst->trace.size > TRACE_BUFF_SIZE_MAX)
+ return -EINVAL;
+
+ tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD;
+ tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev,
+ tst->trace.tsize,
+ &tst->trace.dma_addr,
+ GFP_KERNEL);
+ if (!tst->trace.cpu_addr)
+ return -ENOMEM;
+
+ tst->trace.enabled = true;
+ tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100);
+
+ memset(tst->trace.trace_addr, 0x03B, tst->trace.size);
+
+ skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ iwl_test_trace_stop(tst);
+ return -ENOMEM;
+ }
+
+ if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
+ sizeof(tst->trace.dma_addr),
+ (u64 *)&tst->trace.dma_addr))
+ goto nla_put_failure;
+
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size,
+ DUMP_CHUNK_SIZE);
+
+ return status;
+
+nla_put_failure:
+ kfree_skb(skb);
+ if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
+ IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
+ iwl_test_trace_stop(tst);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles indirect read from the periphery or the SRAM. The read is performed
+ * to a temporary buffer. The user space application should later issue a dump
+ */
+static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
+{
+ struct iwl_trans *trans = tst->trans;
+ unsigned long flags;
+ int i;
+
+ if (size & 0x3)
+ return -EINVAL;
+
+ tst->mem.size = size;
+ tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL);
+ if (tst->mem.addr == NULL)
+ return -ENOMEM;
+
+ /* Hard-coded periphery absolute address */
+ if (IWL_ABS_PRPH_START <= addr &&
+ addr < IWL_ABS_PRPH_START + PRPH_END) {
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
+ addr | (3 << 24));
+ for (i = 0; i < size; i += 4)
+ *(u32 *)(tst->mem.addr + i) =
+ iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
+ iwl_release_nic_access(trans);
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else { /* target memory (SRAM) */
+ _iwl_read_targ_mem_dwords(trans, addr,
+ tst->mem.addr,
+ tst->mem.size / 4);
+ }
+
+ tst->mem.nchunks =
+ DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE);
+ tst->mem.in_read = true;
+ return 0;
+
+}
+
+/*
+ * Handles indirect write to the periphery or SRAM. The is performed to a
+ * temporary buffer.
+ */
+static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
+ u32 size, unsigned char *buf)
+{
+ struct iwl_trans *trans = tst->trans;
+ u32 val, i;
+ unsigned long flags;
+
+ if (IWL_ABS_PRPH_START <= addr &&
+ addr < IWL_ABS_PRPH_START + PRPH_END) {
+ /* Periphery writes can be 1-3 bytes long, or DWORDs */
+ if (size < 4) {
+ memcpy(&val, buf, size);
+ spin_lock_irqsave(&trans->reg_lock, flags);
+ iwl_grab_nic_access(trans);
+ iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
+ (addr & 0x0000FFFF) |
+ ((size - 1) << 24));
+ iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+ iwl_release_nic_access(trans);
+ /* needed after consecutive writes w/o read */
+ mmiowb();
+ spin_unlock_irqrestore(&trans->reg_lock, flags);
+ } else {
+ if (size % 4)
+ return -EINVAL;
+ for (i = 0; i < size; i += 4)
+ iwl_write_prph(trans, addr+i,
+ *(u32 *)(buf+i));
+ }
+ } else if (iwl_test_valid_hw_addr(tst, addr)) {
+ _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4);
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Handles the user application commands for indirect read/write
+ * to/from the periphery or the SRAM.
+ */
+static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 addr, size, cmd;
+ unsigned char *buf;
+
+ /* Both read and write should be blocked, for atomicity */
+ if (tst->mem.in_read)
+ return -EBUSY;
+
+ cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
+ if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
+ IWL_ERR(tst->trans, "Error finding memory offset address\n");
+ return -ENOMSG;
+ }
+ addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
+ if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
+ IWL_ERR(tst->trans, "Error finding size for memory reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
+
+ if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) {
+ return iwl_test_indirect_read(tst, addr, size);
+ } else {
+ if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
+ return -EINVAL;
+ buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
+ return iwl_test_indirect_write(tst, addr, size, buf);
+ }
+}
+
+/*
+ * Enable notifications to user space
+ */
+static int iwl_test_notifications(struct iwl_test *tst,
+ struct nlattr **tb)
+{
+ tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
+ return 0;
+}
+
+/*
+ * Handles the request to get the device id
+ */
+static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb)
+{
+ u32 devid = tst->trans->hw_id;
+ struct sk_buff *skb;
+ int status;
+
+ IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
+ goto nla_put_failure;
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Handles the request to get the FW version
+ */
+static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb)
+{
+ struct sk_buff *skb;
+ int status;
+ u32 ver = iwl_test_fw_ver(tst);
+
+ IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver);
+
+ skb = iwl_test_alloc_reply(tst, 20);
+ if (!skb) {
+ IWL_ERR(tst->trans, "Memory allocation fail\n");
+ return -ENOMEM;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver))
+ goto nla_put_failure;
+
+ status = iwl_test_reply(tst, skb);
+ if (status < 0)
+ IWL_ERR(tst->trans, "Error sending msg : %d\n", status);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+/*
+ * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists
+ */
+int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
+ void *data, int len)
+{
+ int result;
+
+ result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
+ iwl_testmode_gnl_msg_policy);
+ if (result) {
+ IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result);
+ return result;
+ }
+
+ /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
+ if (!tb[IWL_TM_ATTR_COMMAND]) {
+ IWL_ERR(tst->trans, "Missing testmode command type\n");
+ return -ENOMSG;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iwl_test_parse);
+
+/*
+ * Handle test commands.
+ * Returns 1 for unknown commands (not handled by the test object); negative
+ * value in case of error.
+ */
+int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
+{
+ int result;
+
+ switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
+ case IWL_TM_CMD_APP2DEV_UCODE:
+ IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n");
+ result = iwl_test_fw_cmd(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ IWL_DEBUG_INFO(tst->trans, "test cmd to register\n");
+ result = iwl_test_reg(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
+ IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n");
+ result = iwl_test_trace_begin(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_END_TRACE:
+ iwl_test_trace_stop(tst);
+ result = 0;
+ break;
+
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
+ IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n");
+ result = iwl_test_indirect_mem(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
+ IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n");
+ result = iwl_test_notifications(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n");
+ result = iwl_test_get_fw_ver(tst, tb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n");
+ result = iwl_test_get_dev_id(tst, tb);
+ break;
+
+ default:
+ IWL_DEBUG_INFO(tst->trans, "Unknown test command\n");
+ result = 1;
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(iwl_test_handle_cmd);
+
+static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx, length;
+
+ if (!tst->trace.enabled || !tst->trace.trace_addr)
+ return -EFAULT;
+
+ idx = cb->args[4];
+ if (idx >= tst->trace.nchunks)
+ return -ENOENT;
+
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == tst->trace.nchunks) &&
+ (tst->trace.size % DUMP_CHUNK_SIZE))
+ length = tst->trace.size %
+ DUMP_CHUNK_SIZE;
+
+ if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
+ tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx)))
+ goto nla_put_failure;
+
+ cb->args[4] = ++idx;
+ return 0;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
+static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int idx, length;
+
+ if (!tst->mem.in_read)
+ return -EFAULT;
+
+ idx = cb->args[4];
+ if (idx >= tst->mem.nchunks) {
+ iwl_test_mem_stop(tst);
+ return -ENOENT;
+ }
+
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == tst->mem.nchunks) &&
+ (tst->mem.size % DUMP_CHUNK_SIZE))
+ length = tst->mem.size % DUMP_CHUNK_SIZE;
+
+ if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
+ tst->mem.addr + (DUMP_CHUNK_SIZE * idx)))
+ goto nla_put_failure;
+
+ cb->args[4] = ++idx;
+ return 0;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
+/*
+ * Handle dump commands.
+ * Returns 1 for unknown commands (not handled by the test object); negative
+ * value in case of error.
+ */
+int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ int result;
+
+ switch (cmd) {
+ case IWL_TM_CMD_APP2DEV_READ_TRACE:
+ IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n");
+ result = iwl_test_trace_dump(tst, skb, cb);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
+ IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n");
+ result = iwl_test_buffer_dump(tst, skb, cb);
+ break;
+
+ default:
+ result = 1;
+ break;
+ }
+ return result;
+}
+EXPORT_SYMBOL_GPL(iwl_test_dump);
+
+/*
+ * Multicast a spontaneous messages from the device to the user space.
+ */
+static void iwl_test_send_rx(struct iwl_test *tst,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct sk_buff *skb;
+ struct iwl_rx_packet *data;
+ int length;
+
+ data = rxb_addr(rxb);
+ length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+
+ /* the length doesn't include len_n_flags field, so add it manually */
+ length += sizeof(__le32);
+
+ skb = iwl_test_alloc_event(tst, length + 20);
+ if (skb == NULL) {
+ IWL_ERR(tst->trans, "Out of memory for message to user\n");
+ return;
+ }
+
+ if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
+ IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
+ nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data))
+ goto nla_put_failure;
+
+ iwl_test_event(tst, skb);
+ return;
+
+nla_put_failure:
+ kfree_skb(skb);
+ IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n");
+}
+
+/*
+ * Called whenever a Rx frames is recevied from the device. If notifications to
+ * the user space are requested, sends the frames to the user.
+ */
+void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
+{
+ if (tst->notify)
+ iwl_test_send_rx(tst, rxb);
+}
+EXPORT_SYMBOL_GPL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
new file mode 100644
index 000000000000..e13ffa8acc02
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -0,0 +1,161 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_TEST_H__
+#define __IWL_TEST_H__
+
+#include <linux/types.h>
+#include "iwl-trans.h"
+
+struct iwl_test_trace {
+ u32 size;
+ u32 tsize;
+ u32 nchunks;
+ u8 *cpu_addr;
+ u8 *trace_addr;
+ dma_addr_t dma_addr;
+ bool enabled;
+};
+
+struct iwl_test_mem {
+ u32 size;
+ u32 nchunks;
+ u8 *addr;
+ bool in_read;
+};
+
+/*
+ * struct iwl_test_ops: callback to the op mode
+ *
+ * The structure defines the callbacks that the op_mode should handle,
+ * inorder to handle logic that is out of the scope of iwl_test. The
+ * op_mode must set all the callbacks.
+
+ * @send_cmd: handler that is used by the test object to request the
+ * op_mode to send a command to the fw.
+ *
+ * @valid_hw_addr: handler that is used by the test object to request the
+ * op_mode to check if the given address is a valid address.
+ *
+ * @get_fw_ver: handler used to get the FW version.
+ *
+ * @alloc_reply: handler used by the test object to request the op_mode
+ * to allocate an skb for sending a reply to the user, and initialize
+ * the skb. It is assumed that the test object only fills the required
+ * attributes.
+ *
+ * @reply: handler used by the test object to request the op_mode to reply
+ * to a request. The skb is an skb previously allocated by the the
+ * alloc_reply callback.
+ I
+ * @alloc_event: handler used by the test object to request the op_mode
+ * to allocate an skb for sending an event, and initialize
+ * the skb. It is assumed that the test object only fills the required
+ * attributes.
+ *
+ * @reply: handler used by the test object to request the op_mode to send
+ * an event. The skb is an skb previously allocated by the the
+ * alloc_event callback.
+ */
+struct iwl_test_ops {
+ int (*send_cmd)(struct iwl_op_mode *op_modes,
+ struct iwl_host_cmd *cmd);
+ bool (*valid_hw_addr)(u32 addr);
+ u32 (*get_fw_ver)(struct iwl_op_mode *op_mode);
+
+ struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len);
+ int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+ struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len);
+ void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+};
+
+struct iwl_test {
+ struct iwl_trans *trans;
+ struct iwl_test_ops *ops;
+ struct iwl_test_trace trace;
+ struct iwl_test_mem mem;
+ bool notify;
+};
+
+void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans,
+ struct iwl_test_ops *ops);
+
+void iwl_test_free(struct iwl_test *tst);
+
+int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
+ void *data, int len);
+
+int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb);
+
+int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
+ struct netlink_callback *cb);
+
+void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb);
+
+static inline void iwl_test_enable_notifications(struct iwl_test *tst,
+ bool enable)
+{
+ tst->notify = enable;
+}
+
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
deleted file mode 100644
index 060aac3e22f1..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ /dev/null
@@ -1,1114 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/dma-mapping.h>
-#include <net/net_namespace.h>
-#include <linux/netdevice.h>
-#include <net/cfg80211.h>
-#include <net/mac80211.h>
-#include <net/netlink.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "iwl-agn.h"
-#include "iwl-testmode.h"
-#include "iwl-trans.h"
-#include "iwl-fh.h"
-#include "iwl-prph.h"
-
-
-/* Periphery registers absolute lower bound. This is used in order to
- * differentiate registery access through HBUS_TARG_PRPH_* and
- * HBUS_TARG_MEM_* accesses.
- */
-#define IWL_TM_ABS_PRPH_START (0xA00000)
-
-/* The TLVs used in the gnl message policy between the kernel module and
- * user space application. iwl_testmode_gnl_msg_policy is to be carried
- * through the NL80211_CMD_TESTMODE channel regulated by nl80211.
- * See iwl-testmode.h
- */
-static
-struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
- [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, },
- [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, },
- [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, },
- [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, },
- [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
-
- [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, },
-
- [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
- [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, },
- [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, },
-
- [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, },
-};
-
-/*
- * See the struct iwl_rx_packet in iwl-commands.h for the format of the
- * received events from the device
- */
-static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- if (pkt)
- return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- else
- return 0;
-}
-
-
-/*
- * This function multicasts the spontaneous messages from the device to the
- * user space. It is invoked whenever there is a received messages
- * from the device. This function is called within the ISR of the rx handlers
- * in iwlagn driver.
- *
- * The parsing of the message content is left to the user space application,
- * The message content is treated as unattacked raw data and is encapsulated
- * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space.
- *
- * @priv: the instance of iwlwifi device
- * @rxb: pointer to rx data content received by the ISR
- *
- * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[].
- * For the messages multicasting to the user application, the mandatory
- * TLV fields are :
- * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT
- * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content
- */
-
-static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct sk_buff *skb;
- void *data;
- int length;
-
- data = (void *)rxb_addr(rxb);
- length = get_event_length(rxb);
-
- if (!data || length == 0)
- return;
-
- skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length,
- GFP_ATOMIC);
- if (skb == NULL) {
- IWL_ERR(priv,
- "Run out of memory for messages to user space ?\n");
- return;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- /* the length doesn't include len_n_flags field, so add it manually */
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
- goto nla_put_failure;
- cfg80211_testmode_event(skb, GFP_ATOMIC);
- return;
-
-nla_put_failure:
- kfree_skb(skb);
- IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n");
-}
-
-void iwl_testmode_init(struct iwl_priv *priv)
-{
- priv->pre_rx_handler = NULL;
- priv->testmode_trace.trace_enabled = false;
- priv->testmode_mem.read_in_progress = false;
-}
-
-static void iwl_mem_cleanup(struct iwl_priv *priv)
-{
- if (priv->testmode_mem.read_in_progress) {
- kfree(priv->testmode_mem.buff_addr);
- priv->testmode_mem.buff_addr = NULL;
- priv->testmode_mem.buff_size = 0;
- priv->testmode_mem.num_chunks = 0;
- priv->testmode_mem.read_in_progress = false;
- }
-}
-
-static void iwl_trace_cleanup(struct iwl_priv *priv)
-{
- if (priv->testmode_trace.trace_enabled) {
- if (priv->testmode_trace.cpu_addr &&
- priv->testmode_trace.dma_addr)
- dma_free_coherent(priv->trans->dev,
- priv->testmode_trace.total_size,
- priv->testmode_trace.cpu_addr,
- priv->testmode_trace.dma_addr);
- priv->testmode_trace.trace_enabled = false;
- priv->testmode_trace.cpu_addr = NULL;
- priv->testmode_trace.trace_addr = NULL;
- priv->testmode_trace.dma_addr = 0;
- priv->testmode_trace.buff_size = 0;
- priv->testmode_trace.total_size = 0;
- }
-}
-
-
-void iwl_testmode_cleanup(struct iwl_priv *priv)
-{
- iwl_trace_cleanup(priv);
- iwl_mem_cleanup(priv);
-}
-
-
-/*
- * This function handles the user application commands to the ucode.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and
- * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the
- * host command to the ucode.
- *
- * If any mandatory field is missing, -ENOMSG is replied to the user space
- * application; otherwise, waits for the host command to be sent and checks
- * the return code. In case or error, it is returned, otherwise a reply is
- * allocated and the reply RX packet
- * is returned.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_host_cmd cmd;
- struct iwl_rx_packet *pkt;
- struct sk_buff *skb;
- void *reply_buf;
- u32 reply_len;
- int ret;
- bool cmd_want_skb;
-
- memset(&cmd, 0, sizeof(struct iwl_host_cmd));
-
- if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] ||
- !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) {
- IWL_ERR(priv, "Missing ucode command mandatory fields\n");
- return -ENOMSG;
- }
-
- cmd.flags = CMD_ON_DEMAND | CMD_SYNC;
- cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]);
- if (cmd_want_skb)
- cmd.flags |= CMD_WANT_SKB;
-
- cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]);
- cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]);
- cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x,"
- " len %d\n", cmd.id, cmd.flags, cmd.len[0]);
-
- ret = iwl_dvm_send_cmd(priv, &cmd);
- if (ret) {
- IWL_ERR(priv, "Failed to send hcmd\n");
- return ret;
- }
- if (!cmd_want_skb)
- return ret;
-
- /* Handling return of SKB to the user */
- pkt = cmd.resp_pkt;
- if (!pkt) {
- IWL_ERR(priv, "HCMD received a null response packet\n");
- return ret;
- }
-
- reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20);
- reply_buf = kmalloc(reply_len, GFP_KERNEL);
- if (!skb || !reply_buf) {
- kfree_skb(skb);
- kfree(reply_buf);
- return -ENOMEM;
- }
-
- /* The reply is in a page, that we cannot send to user space. */
- memcpy(reply_buf, &(pkt->hdr), reply_len);
- iwl_free_resp(&cmd);
-
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
- nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
- goto nla_put_failure;
- return cfg80211_testmode_reply(skb);
-
-nla_put_failure:
- IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n");
- return -ENOMSG;
-}
-
-
-/*
- * This function handles the user application commands for register access.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the
- * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32,
- * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating
- * the success of the command execution.
- *
- * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read
- * value is returned with IWL_TM_ATTR_REG_VALUE32.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u32 ofs, val32, cmd;
- u8 val8;
- struct sk_buff *skb;
- int status = 0;
-
- if (!tb[IWL_TM_ATTR_REG_OFFSET]) {
- IWL_ERR(priv, "Missing register offset\n");
- return -ENOMSG;
- }
- ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]);
- IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
-
- /* Allow access only to FH/CSR/HBUS in direct mode.
- Since we don't have the upper bounds for the CSR and HBUS segments,
- we will use only the upper bound of FH for sanity check. */
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 ||
- cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 ||
- cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) &&
- (ofs >= FH_MEM_UPPER_BOUND)) {
- IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n",
- FH_MEM_UPPER_BOUND);
- return -EINVAL;
- }
-
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- val32 = iwl_read_direct32(priv->trans, ofs);
- IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
- IWL_ERR(priv, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
- IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
- iwl_write_direct32(priv->trans, ofs, val32);
- }
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
- IWL_ERR(priv, "Missing value to write\n");
- return -ENOMSG;
- } else {
- val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
- IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
- iwl_write8(priv->trans, ofs, val8);
- }
- break;
- default:
- IWL_ERR(priv, "Unknown testmode register command ID\n");
- return -ENOSYS;
- }
-
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-
-static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
-{
- struct iwl_notification_wait calib_wait;
- static const u8 calib_complete[] = {
- CALIBRATION_COMPLETE_NOTIFICATION
- };
- int ret;
-
- iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
- calib_complete, ARRAY_SIZE(calib_complete),
- NULL, NULL);
- ret = iwl_init_alive_start(priv);
- if (ret) {
- IWL_ERR(priv, "Fail init calibration: %d\n", ret);
- goto cfg_init_calib_error;
- }
-
- ret = iwl_wait_notification(&priv->notif_wait, &calib_wait, 2 * HZ);
- if (ret)
- IWL_ERR(priv, "Error detecting"
- " CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
- return ret;
-
-cfg_init_calib_error:
- iwl_remove_notification(&priv->notif_wait, &calib_wait);
- return ret;
-}
-
-/*
- * This function handles the user application commands for driver.
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * If there's any message responding to the user space, IWL_TM_ATTR_SYNC_RSP
- * is used for carry the message while IWL_TM_ATTR_COMMAND must set to
- * IWL_TM_CMD_DEV2APP_SYNC_RSP.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_trans *trans = priv->trans;
- struct sk_buff *skb;
- unsigned char *rsp_data_ptr = NULL;
- int status = 0, rsp_data_len = 0;
- u32 devid, inst_size = 0, data_size = 0;
- const struct fw_img *img;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- rsp_data_len + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
- nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
- rsp_data_len, rsp_data_ptr))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
- if (status)
- IWL_ERR(priv, "Error loading init ucode: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- iwl_testmode_cfg_init_calib(priv);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
- if (status) {
- IWL_ERR(priv,
- "Error loading runtime ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- iwl_scan_cancel_timeout(priv, 200);
- priv->ucode_loaded = false;
- iwl_trans_stop_device(trans);
- status = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
- if (status) {
- IWL_ERR(priv,
- "Error loading WOWLAN ucode: %d\n", status);
- break;
- }
- status = iwl_alive_start(priv);
- if (status)
- IWL_ERR(priv,
- "Error starting the device: %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->cfg->base_params->eeprom_size + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
- IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
- nla_put(skb, IWL_TM_ATTR_EEPROM,
- priv->cfg->base_params->eeprom_size,
- priv->eeprom))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n",
- status);
- } else
- return -EFAULT;
- break;
-
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- if (!tb[IWL_TM_ATTR_FIXRATE]) {
- IWL_ERR(priv, "Missing fixrate setting\n");
- return -ENOMSG;
- }
- priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- IWL_INFO(priv, "uCode version raw: 0x%x\n",
- priv->fw->ucode_ver);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
- priv->fw->ucode_ver))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- devid = priv->trans->hw_id;
- IWL_INFO(priv, "hw version: 0x%x\n", devid);
-
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- return -ENOMEM;
- }
- if (!priv->ucode_loaded) {
- IWL_ERR(priv, "No uCode has not been loaded\n");
- return -EINVAL;
- } else {
- img = &priv->fw->img[priv->cur_ucode];
- inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
- data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
- }
- if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
- nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0)
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode driver command ID\n");
- return -ENOSYS;
- }
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EMSGSIZE;
-}
-
-
-/*
- * This function handles the user application commands for uCode trace
- *
- * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the
- * handlers respectively.
- *
- * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned
- * value of the actual command execution is replied to the user application.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct sk_buff *skb;
- int status = 0;
- struct device *dev = priv->trans->dev;
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- if (priv->testmode_trace.trace_enabled)
- return -EBUSY;
-
- if (!tb[IWL_TM_ATTR_TRACE_SIZE])
- priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF;
- else
- priv->testmode_trace.buff_size =
- nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]);
- if (!priv->testmode_trace.buff_size)
- return -EINVAL;
- if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN ||
- priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX)
- return -EINVAL;
-
- priv->testmode_trace.total_size =
- priv->testmode_trace.buff_size + TRACE_BUFF_PADD;
- priv->testmode_trace.cpu_addr =
- dma_alloc_coherent(dev,
- priv->testmode_trace.total_size,
- &priv->testmode_trace.dma_addr,
- GFP_KERNEL);
- if (!priv->testmode_trace.cpu_addr)
- return -ENOMEM;
- priv->testmode_trace.trace_enabled = true;
- priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN(
- priv->testmode_trace.cpu_addr, 0x100);
- memset(priv->testmode_trace.trace_addr, 0x03B,
- priv->testmode_trace.buff_size);
- skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- sizeof(priv->testmode_trace.dma_addr) + 20);
- if (!skb) {
- IWL_ERR(priv, "Memory allocation fail\n");
- iwl_trace_cleanup(priv);
- return -ENOMEM;
- }
- if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
- sizeof(priv->testmode_trace.dma_addr),
- (u64 *)&priv->testmode_trace.dma_addr))
- goto nla_put_failure;
- status = cfg80211_testmode_reply(skb);
- if (status < 0) {
- IWL_ERR(priv, "Error sending msg : %d\n", status);
- }
- priv->testmode_trace.num_chunks =
- DIV_ROUND_UP(priv->testmode_trace.buff_size,
- DUMP_CHUNK_SIZE);
- break;
-
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- iwl_trace_cleanup(priv);
- break;
- default:
- IWL_ERR(priv, "Unknown testmode mem command ID\n");
- return -ENOSYS;
- }
- return status;
-
-nla_put_failure:
- kfree_skb(skb);
- if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) ==
- IWL_TM_CMD_APP2DEV_BEGIN_TRACE)
- iwl_trace_cleanup(priv);
- return -EMSGSIZE;
-}
-
-static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int idx, length;
-
- if (priv->testmode_trace.trace_enabled &&
- priv->testmode_trace.trace_addr) {
- idx = cb->args[4];
- if (idx >= priv->testmode_trace.num_chunks)
- return -ENOENT;
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
- length = priv->testmode_trace.buff_size %
- DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
- priv->testmode_trace.trace_addr +
- (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
- idx++;
- cb->args[4] = idx;
- return 0;
- } else
- return -EFAULT;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-/*
- * This function handles the user application switch ucode ownership.
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_OWNER and
- * decide who the current owner of the uCode
- *
- * If the current owner is OWNERSHIP_TM, then the only host command
- * can deliver to uCode is from testmode, all the other host commands
- * will dropped.
- *
- * default driver is the owner of uCode in normal operational mode
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u8 owner;
-
- if (!tb[IWL_TM_ATTR_UCODE_OWNER]) {
- IWL_ERR(priv, "Missing ucode owner\n");
- return -ENOMSG;
- }
-
- owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]);
- if (owner == IWL_OWNERSHIP_DRIVER) {
- priv->ucode_owner = owner;
- priv->pre_rx_handler = NULL;
- } else if (owner == IWL_OWNERSHIP_TM) {
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
- priv->ucode_owner = owner;
- } else {
- IWL_ERR(priv, "Invalid owner\n");
- return -EINVAL;
- }
- return 0;
-}
-
-static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
-{
- struct iwl_trans *trans = priv->trans;
- unsigned long flags;
- int i;
-
- if (size & 0x3)
- return -EINVAL;
- priv->testmode_mem.buff_size = size;
- priv->testmode_mem.buff_addr =
- kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL);
- if (priv->testmode_mem.buff_addr == NULL)
- return -ENOMEM;
-
- /* Hard-coded periphery absolute address */
- if (IWL_TM_ABS_PRPH_START <= addr &&
- addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
- addr | (3 << 24));
- for (i = 0; i < size; i += 4)
- *(u32 *)(priv->testmode_mem.buff_addr + i) =
- iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
- iwl_release_nic_access(trans);
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else { /* target memory (SRAM) */
- _iwl_read_targ_mem_words(trans, addr,
- priv->testmode_mem.buff_addr,
- priv->testmode_mem.buff_size / 4);
- }
-
- priv->testmode_mem.num_chunks =
- DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE);
- priv->testmode_mem.read_in_progress = true;
- return 0;
-
-}
-
-static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
- u32 size, unsigned char *buf)
-{
- struct iwl_trans *trans = priv->trans;
- u32 val, i;
- unsigned long flags;
-
- if (IWL_TM_ABS_PRPH_START <= addr &&
- addr < IWL_TM_ABS_PRPH_START + PRPH_END) {
- /* Periphery writes can be 1-3 bytes long, or DWORDs */
- if (size < 4) {
- memcpy(&val, buf, size);
- spin_lock_irqsave(&trans->reg_lock, flags);
- iwl_grab_nic_access(trans);
- iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
- (addr & 0x0000FFFF) |
- ((size - 1) << 24));
- iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
- iwl_release_nic_access(trans);
- /* needed after consecutive writes w/o read */
- mmiowb();
- spin_unlock_irqrestore(&trans->reg_lock, flags);
- } else {
- if (size % 4)
- return -EINVAL;
- for (i = 0; i < size; i += 4)
- iwl_write_prph(trans, addr+i,
- *(u32 *)(buf+i));
- }
- } else if (iwlagn_hw_valid_rtc_data_addr(addr) ||
- (IWLAGN_RTC_INST_LOWER_BOUND <= addr &&
- addr < IWLAGN_RTC_INST_UPPER_BOUND)) {
- _iwl_write_targ_mem_words(trans, addr, buf, size/4);
- } else
- return -EINVAL;
- return 0;
-}
-
-/*
- * This function handles the user application commands for SRAM data dump
- *
- * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
- * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
- *
- * Several error will be retured, -EBUSY if the SRAM data retrieved by
- * previous command has not been delivered to userspace, or -ENOMSG if
- * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
- * are missing, or -ENOMEM if the buffer allocation fails.
- *
- * Otherwise 0 is replied indicating the success of the SRAM reading.
- *
- * @hw: ieee80211_hw object that represents the device
- * @tb: gnl message fields from the user space
- */
-static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw,
- struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- u32 addr, size, cmd;
- unsigned char *buf;
-
- /* Both read and write should be blocked, for atomicity */
- if (priv->testmode_mem.read_in_progress)
- return -EBUSY;
-
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- if (!tb[IWL_TM_ATTR_MEM_ADDR]) {
- IWL_ERR(priv, "Error finding memory offset address\n");
- return -ENOMSG;
- }
- addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]);
- if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) {
- IWL_ERR(priv, "Error finding size for memory reading\n");
- return -ENOMSG;
- }
- size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]);
-
- if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ)
- return iwl_testmode_indirect_read(priv, addr, size);
- else {
- if (!tb[IWL_TM_ATTR_BUFFER_DUMP])
- return -EINVAL;
- buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]);
- return iwl_testmode_indirect_write(priv, addr, size, buf);
- }
-}
-
-static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct netlink_callback *cb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int idx, length;
-
- if (priv->testmode_mem.read_in_progress) {
- idx = cb->args[4];
- if (idx >= priv->testmode_mem.num_chunks) {
- iwl_mem_cleanup(priv);
- return -ENOENT;
- }
- length = DUMP_CHUNK_SIZE;
- if (((idx + 1) == priv->testmode_mem.num_chunks) &&
- (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE))
- length = priv->testmode_mem.buff_size %
- DUMP_CHUNK_SIZE;
-
- if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
- priv->testmode_mem.buff_addr +
- (DUMP_CHUNK_SIZE * idx)))
- goto nla_put_failure;
- idx++;
- cb->args[4] = idx;
- return 0;
- } else
- return -EFAULT;
-
- nla_put_failure:
- return -ENOBUFS;
-}
-
-static int iwl_testmode_notifications(struct ieee80211_hw *hw,
- struct nlattr **tb)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- bool enable;
-
- enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]);
- if (enable)
- priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
- else
- priv->pre_rx_handler = NULL;
- return 0;
-}
-
-
-/* The testmode gnl message handler that takes the gnl message from the
- * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
- * invoke the corresponding handlers.
- *
- * This function is invoked when there is user space application sending
- * gnl message through the testmode tunnel NL80211_CMD_TESTMODE regulated
- * by nl80211.
- *
- * It retrieves the mandatory field, IWL_TM_ATTR_COMMAND, before
- * dispatching it to the corresponding handler.
- *
- * If IWL_TM_ATTR_COMMAND is missing, -ENOMSG is replied to user application;
- * -ENOSYS is replied to the user application if the command is unknown;
- * Otherwise, the command is dispatched to the respective handler.
- *
- * @hw: ieee80211_hw object that represents the device
- * @data: pointer to user space message
- * @len: length in byte of @data
- */
-int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
-{
- struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
-
- result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
- iwl_testmode_gnl_msg_policy);
- if (result != 0) {
- IWL_ERR(priv, "Error parsing the gnl message : %d\n", result);
- return result;
- }
-
- /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
- if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_ERR(priv, "Missing testmode command type\n");
- return -ENOMSG;
- }
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
-
- switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_UCODE:
- IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
- result = iwl_testmode_ucode(hw, tb);
- break;
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
- IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
- result = iwl_testmode_reg(hw, tb);
- break;
- case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
- case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
- case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
- case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
- case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
- case IWL_TM_CMD_APP2DEV_GET_FW_INFO:
- IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
- result = iwl_testmode_driver(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
- case IWL_TM_CMD_APP2DEV_END_TRACE:
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n");
- result = iwl_testmode_trace(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_OWNERSHIP:
- IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n");
- result = iwl_testmode_ownership(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ:
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE:
- IWL_DEBUG_INFO(priv, "testmode indirect memory cmd "
- "to driver\n");
- result = iwl_testmode_indirect_mem(hw, tb);
- break;
-
- case IWL_TM_CMD_APP2DEV_NOTIFICATIONS:
- IWL_DEBUG_INFO(priv, "testmode notifications cmd "
- "to driver\n");
- result = iwl_testmode_notifications(hw, tb);
- break;
-
- default:
- IWL_ERR(priv, "Unknown testmode command\n");
- result = -ENOSYS;
- break;
- }
-
- mutex_unlock(&priv->mutex);
- return result;
-}
-
-int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct netlink_callback *cb,
- void *data, int len)
-{
- struct nlattr *tb[IWL_TM_ATTR_MAX];
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- int result;
- u32 cmd;
-
- if (cb->args[3]) {
- /* offset by 1 since commands start at 0 */
- cmd = cb->args[3] - 1;
- } else {
- result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len,
- iwl_testmode_gnl_msg_policy);
- if (result) {
- IWL_ERR(priv,
- "Error parsing the gnl message : %d\n", result);
- return result;
- }
-
- /* IWL_TM_ATTR_COMMAND is absolutely mandatory */
- if (!tb[IWL_TM_ATTR_COMMAND]) {
- IWL_ERR(priv, "Missing testmode command type\n");
- return -ENOMSG;
- }
- cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]);
- cb->args[3] = cmd + 1;
- }
-
- /* in case multiple accesses to the device happens */
- mutex_lock(&priv->mutex);
- switch (cmd) {
- case IWL_TM_CMD_APP2DEV_READ_TRACE:
- IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
- result = iwl_testmode_trace_dump(hw, skb, cb);
- break;
- case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP:
- IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
- result = iwl_testmode_buffer_dump(hw, skb, cb);
- break;
- default:
- result = -EINVAL;
- break;
- }
-
- mutex_unlock(&priv->mutex);
- return result;
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 79a1e7ae4995..92576a3e84ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -154,6 +154,9 @@ struct iwl_cmd_header {
__le16 sequence;
} __packed;
+/* iwl_cmd_header flags value */
+#define IWL_CMD_FAILED_MSK 0x40
+
#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
#define FH_RSCSR_FRAME_INVALID 0x55550000
@@ -280,21 +283,24 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
#define MAX_NO_RECLAIM_CMDS 6
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
/*
* Maximum number of HW queues the transport layer
* currently supports
*/
#define IWL_MAX_HW_QUEUES 32
+#define IWL_INVALID_STATION 255
+#define IWL_MAX_TID_COUNT 8
+#define IWL_FRAME_LIMIT 64
/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
- * @queue_to_fifo: queue to FIFO mapping to set up by
- * default
- * @n_queue_to_fifo: number of queues to set up
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
+ * @cmd_fifo: the fifo for host commands
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@@ -309,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
- const u8 *queue_to_fifo;
- u8 n_queue_to_fifo;
u8 cmd_queue;
+ u8 cmd_fifo;
const u8 *no_reclaim_cmds;
int n_no_reclaim_cmds;
@@ -350,10 +355,10 @@ struct iwl_trans;
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
- * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
- * ready and a successful ADDBA response has been received.
- * May sleep
- * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
+ * @txq_enable: setup a queue. To setup an AC queue, use the
+ * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
+ * this one. The op_mode must not configure the HCMD queue. May sleep.
+ * @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @wait_tx_queue_empty: wait until all tx queues are empty
* May sleep
@@ -386,9 +391,9 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs);
- void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
- void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
+ void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
+ void (*txq_disable)(struct iwl_trans *trans, int queue);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
@@ -428,6 +433,11 @@ enum iwl_trans_state {
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
* @pm_support: set to true in start_hw if link pm is supported
* @wait_command_queue: the wait_queue for SYNC host commands
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @dev_cmd_headroom: room needed for the transport's private use before the
+ * device_cmd for Tx - for internal use only
+ * The user should use iwl_trans_{alloc,free}_tx_cmd.
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -445,6 +455,11 @@ struct iwl_trans {
wait_queue_head_t wait_command_queue;
+ /* The following fields are internal only */
+ struct kmem_cache *dev_cmd_pool;
+ size_t dev_cmd_headroom;
+ char dev_cmd_pool_name[50];
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -483,9 +498,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
- trans->ops->fw_alive(trans);
-
trans->state = IWL_TRANS_FW_ALIVE;
+
+ trans->ops->fw_alive(trans);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@@ -520,6 +535,26 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
return trans->ops->send_cmd(trans, cmd);
}
+static inline struct iwl_device_cmd *
+iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+ u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+
+ if (unlikely(dev_cmd_ptr == NULL))
+ return NULL;
+
+ return (struct iwl_device_cmd *)
+ (dev_cmd_ptr + trans->dev_cmd_headroom);
+}
+
+static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+ struct iwl_device_cmd *dev_cmd)
+{
+ u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
+
+ kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
+}
+
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int queue)
{
@@ -538,27 +573,34 @@ static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
trans->ops->reclaim(trans, queue, ssn, skbs);
}
-static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
+static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
{
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- trans->ops->tx_agg_disable(trans, queue);
+ trans->ops->txq_disable(trans, queue);
}
-static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
- int fifo, int sta_id, int tid,
- int frame_limit, u16 ssn)
+static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
+ int fifo, int sta_id, int tid,
+ int frame_limit, u16 ssn)
{
might_sleep();
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state);
- trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
+ trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn);
}
+static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
+ int fifo)
+{
+ iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
+ IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
+}
+
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/pcie/1000.c
index 2629a6602dfa..81b83f484f08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/1000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-csr.h"
#include "iwl-agn-hw.h"
+#include "cfg.h"
/* Highest firmware API version supported */
#define IWL1000_UCODE_API_MAX 5
@@ -64,13 +64,26 @@ static const struct iwl_base_params iwl1000_base_params = {
.support_ct_kill_exit = true,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHHDOG_DISABLED,
+ .wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 128,
};
static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+};
+
+static const struct iwl_eeprom_params iwl1000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ }
};
#define IWL_DEVICE_1000 \
@@ -84,6 +97,7 @@ static const struct iwl_ht_params iwl1000_ht_params = {
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
+ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl1000_bgn_cfg = {
@@ -108,6 +122,7 @@ const struct iwl_cfg iwl1000_bg_cfg = {
.eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
.base_params = &iwl1000_base_params, \
+ .eeprom_params = &iwl1000_eeprom_params, \
.led_mode = IWL_LED_RF_STATE, \
.rx_with_siso_diversity = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/pcie/2000.c
index 8133105ac645..9fbde32f7559 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/2000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
-#include "iwl-commands.h" /* needed for BT for now */
+#include "cfg.h"
+#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
#define IWL2030_UCODE_API_MAX 6
@@ -104,6 +104,7 @@ static const struct iwl_base_params iwl2030_base_params = {
static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
};
static const struct iwl_bt_params iwl2030_bt_params = {
@@ -111,11 +112,24 @@ static const struct iwl_bt_params iwl2030_bt_params = {
.advanced_bt_coexist = true,
.agg_time_limit = BT_AGG_THRESHOLD_DEF,
.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
- .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+ .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
.bt_sco_disable = true,
.bt_session_2 = true,
};
+static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .enhanced_txpower = true,
+};
+
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
@@ -127,6 +141,7 @@ static const struct iwl_bt_params iwl2030_bt_params = {
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE
@@ -155,6 +170,7 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -177,6 +193,7 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
.eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2000_base_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -207,6 +224,7 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
+ .eeprom_params = &iwl20x0_eeprom_params, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/pcie/5000.c
index 8e26bc825f23..d1665fa6d15a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/5000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
#include "iwl-csr.h"
+#include "cfg.h"
/* Highest firmware API version supported */
#define IWL5000_UCODE_API_MAX 5
@@ -62,13 +62,26 @@ static const struct iwl_base_params iwl5000_base_params = {
.led_compensation = 51,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
.chain_noise_scale = 1000,
- .wd_timeout = IWL_WATCHHDOG_DISABLED,
+ .wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
.no_idle_support = true,
};
static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+static const struct iwl_eeprom_params iwl5000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
};
#define IWL_DEVICE_5000 \
@@ -82,6 +95,7 @@ static const struct iwl_ht_params iwl5000_ht_params = {
.eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
+ .eeprom_params = &iwl5000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl5300_agn_cfg = {
@@ -128,6 +142,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_ver = EEPROM_5050_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
.base_params = &iwl5000_base_params,
+ .eeprom_params = &iwl5000_eeprom_params,
.ht_params = &iwl5000_ht_params,
.led_mode = IWL_LED_BLINK,
.internal_wimax_coex = true,
@@ -144,6 +159,7 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.base_params = &iwl5000_base_params, \
+ .eeprom_params = &iwl5000_eeprom_params, \
.no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/pcie/6000.c
index e5e8ada4aaf6..4a57624afc40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/pcie/6000.c
@@ -27,9 +27,9 @@
#include <linux/module.h>
#include <linux/stringify.h>
#include "iwl-config.h"
-#include "iwl-cfg.h"
#include "iwl-agn-hw.h"
-#include "iwl-commands.h" /* needed for BT for now */
+#include "cfg.h"
+#include "dvm/commands.h" /* needed for BT for now */
/* Highest firmware API version supported */
#define IWL6000_UCODE_API_MAX 6
@@ -127,6 +127,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
static const struct iwl_bt_params iwl6000_bt_params = {
@@ -138,6 +139,19 @@ static const struct iwl_bt_params iwl6000_bt_params = {
.bt_sco_disable = true,
};
+static const struct iwl_eeprom_params iwl6000_eeprom_params = {
+ .regulatory_bands = {
+ EEPROM_REG_BAND_1_CHANNELS,
+ EEPROM_REG_BAND_2_CHANNELS,
+ EEPROM_REG_BAND_3_CHANNELS,
+ EEPROM_REG_BAND_4_CHANNELS,
+ EEPROM_REG_BAND_5_CHANNELS,
+ EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_REG_BAND_52_HT40_CHANNELS
+ },
+ .enhanced_txpower = true,
+};
+
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
@@ -149,6 +163,7 @@ static const struct iwl_bt_params iwl6000_bt_params = {
.eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -204,6 +219,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -242,6 +258,7 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
.eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true
@@ -292,6 +309,7 @@ const struct iwl_cfg iwl130_bg_cfg = {
.eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
.base_params = &iwl6000_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK
const struct iwl_cfg iwl6000i_2agn_cfg = {
@@ -322,6 +340,7 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -346,6 +365,7 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
+ .eeprom_params = &iwl6000_eeprom_params, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -372,6 +392,7 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.base_params = &iwl6000_base_params,
+ .eeprom_params = &iwl6000_eeprom_params,
.ht_params = &iwl6000_ht_params,
.led_mode = IWL_LED_BLINK,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
index 82152311d73b..82152311d73b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/pcie/cfg.h
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 0c8a1c2d8847..f4c3500b68c6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -68,10 +68,11 @@
#include <linux/pci-aspm.h>
#include "iwl-trans.h"
-#include "iwl-cfg.h"
#include "iwl-drv.h"
#include "iwl-trans.h"
-#include "iwl-trans-pcie-int.h"
+
+#include "cfg.h"
+#include "internal.h"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
.vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index e959207c630a..d9694c58208c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -269,10 +269,9 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
unsigned long status;
u8 cmd_queue;
+ u8 cmd_fifo;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
- u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
- u8 n_q_to_fifo;
bool rx_buf_size_8k;
u32 rx_page_order;
@@ -313,7 +312,7 @@ void iwl_bg_rx_replenish(struct work_struct *data);
void iwl_irq_tasklet(struct iwl_trans *trans);
void iwlagn_rx_replenish(struct iwl_trans *trans);
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q);
+ struct iwl_rx_queue *q);
/*****************************************************
* ICT
@@ -328,7 +327,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data);
* TX / HCMD
******************************************************/
void iwl_txq_update_write_ptr(struct iwl_trans *trans,
- struct iwl_tx_queue *txq);
+ struct iwl_tx_queue *txq);
int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
dma_addr_t addr, u16 len, u8 reset);
@@ -337,17 +336,13 @@ int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_tx_cmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb, int handler_status);
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
-void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
-void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
-void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, bool active);
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn);
-void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir);
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt);
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn);
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue);
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
struct sk_buff_head *skbs);
int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08517d3c80bb..39a6ca1f009c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -32,7 +32,7 @@
#include "iwl-prph.h"
#include "iwl-io.h"
-#include "iwl-trans-pcie-int.h"
+#include "internal.h"
#include "iwl-op-mode.h"
#ifdef CONFIG_IWLWIFI_IDI
@@ -130,7 +130,7 @@ static int iwl_rx_queue_space(const struct iwl_rx_queue *q)
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
- struct iwl_rx_queue *q)
+ struct iwl_rx_queue *q)
{
unsigned long flags;
u32 reg;
@@ -201,9 +201,7 @@ static inline __le32 iwlagn_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
@@ -253,9 +251,7 @@ static void iwlagn_rx_queue_restock(struct iwl_trans *trans)
*/
static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
@@ -278,8 +274,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
gfp_mask |= __GFP_COMP;
/* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask,
- trans_pcie->rx_page_order);
+ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
if (!page) {
if (net_ratelimit())
IWL_DEBUG_INFO(trans, "alloc_pages failed, "
@@ -315,9 +310,10 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
BUG_ON(rxb->page);
rxb->page = page;
/* Get physical address of the RB */
- rxb->page_dma = dma_map_page(trans->dev, page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ rxb->page_dma =
+ dma_map_page(trans->dev, page, 0,
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
@@ -465,8 +461,8 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
} else
@@ -497,7 +493,7 @@ static void iwl_rx_handle(struct iwl_trans *trans)
/* Rx interrupt, but nothing sent from uCode */
if (i == r)
- IWL_DEBUG_RX(trans, "r = %d, i = %d\n", r, i);
+ IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
/* calculate total frames need to be restock after handling RX */
total_empty = r - rxq->write_actual;
@@ -513,8 +509,8 @@ static void iwl_rx_handle(struct iwl_trans *trans)
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
- IWL_DEBUG_RX(trans, "rxbuf: r = %d, i = %d (%p)\n", rxb);
-
+ IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d (%p)\n",
+ r, i, rxb);
iwl_rx_handle_rxbuf(trans, rxb);
i = (i + 1) & RX_QUEUE_MASK;
@@ -546,12 +542,12 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
if (trans->cfg->internal_wimax_coex &&
(!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
- APMS_CLK_VAL_MRB_FUNC_MODE) ||
+ APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(trans, APMG_PS_CTRL_REG) &
- APMG_PS_CTRL_VAL_RESET_REQ))) {
- struct iwl_trans_pcie *trans_pcie;
+ APMG_PS_CTRL_VAL_RESET_REQ))) {
+ struct iwl_trans_pcie *trans_pcie =
+ IWL_TRANS_GET_PCIE_TRANS(trans);
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
iwl_op_mode_wimax_active(trans->op_mode);
wake_up(&trans->wait_command_queue);
@@ -567,6 +563,8 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
/* tasklet for iwlagn interrupt */
void iwl_irq_tasklet(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
unsigned long flags;
@@ -575,10 +573,6 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
u32 inta_mask;
#endif
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
-
-
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Ack/clear/reset pending uCode interrupts.
@@ -593,7 +587,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* interrupt coalescing can still be achieved.
*/
iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
+ trans_pcie->inta | ~trans_pcie->inta_mask);
inta = trans_pcie->inta;
@@ -602,7 +596,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* just for debug */
inta_mask = iwl_read32(trans, CSR_INT_MASK);
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
- inta, inta_mask);
+ inta, inta_mask);
}
#endif
@@ -651,7 +645,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans);
IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
- hw_rfkill ? "disable radio" : "enable radio");
+ hw_rfkill ? "disable radio" : "enable radio");
isr_stats->rfkill++;
@@ -693,7 +687,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* Rx "responses" (frame-received notification), and other
* notifications from uCode come through here*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
- CSR_INT_BIT_RX_PERIODIC)) {
+ CSR_INT_BIT_RX_PERIODIC)) {
IWL_DEBUG_ISR(trans, "Rx interrupt\n");
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
@@ -733,7 +727,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
*/
if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
iwl_write8(trans, CSR_INT_PERIODIC_REG,
- CSR_INT_PERIODIC_ENA);
+ CSR_INT_PERIODIC_ENA);
isr_stats->rx++;
}
@@ -782,8 +776,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
/* Free dram table */
void iwl_free_isr_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (trans_pcie->ict_tbl) {
dma_free_coherent(trans->dev, ICT_SIZE,
@@ -802,8 +795,7 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
*/
int iwl_alloc_isr_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
dma_alloc_coherent(trans->dev, ICT_SIZE,
@@ -837,10 +829,9 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
*/
void iwl_reset_ict(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->ict_tbl)
return;
@@ -868,9 +859,7 @@ void iwl_reset_ict(struct iwl_trans *trans)
/* Device is going down disable ict interrupt usage */
void iwl_disable_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -878,23 +867,19 @@ void iwl_disable_ict(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
}
+/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
static irqreturn_t iwl_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask;
- unsigned long flags;
#ifdef CONFIG_IWLWIFI_DEBUG
u32 inta_fh;
#endif
- if (!trans)
- return IRQ_NONE;
- trace_iwlwifi_dev_irq(trans->dev);
+ lockdep_assert_held(&trans_pcie->irq_lock);
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ trace_iwlwifi_dev_irq(trans->dev);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -918,7 +903,7 @@ static irqreturn_t iwl_isr(int irq, void *data)
/* Hardware disappeared. It might have already raised
* an interrupt */
IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
+ return IRQ_HANDLED;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -934,21 +919,16 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
- unplugged:
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
-
- none:
+none:
/* re-enable interrupts here since we don't have anything to service. */
/* only Re-enable if disabled by irq and no schedules tasklet. */
if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_NONE;
}
@@ -974,15 +954,19 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+
/* dram interrupt table not set yet,
* use legacy interrupt.
*/
- if (!trans_pcie->use_ict)
- return iwl_isr(irq, data);
+ if (unlikely(!trans_pcie->use_ict)) {
+ irqreturn_t ret = iwl_isr(irq, data);
+ spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ return ret;
+ }
trace_iwlwifi_dev_irq(trans->dev);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1036,7 +1020,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
- inta, inta_mask, val);
+ inta, inta_mask, val);
inta &= trans_pcie->inta_mask;
trans_pcie->inta |= inta;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 79c6b91417f9..939c2f78df58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -70,15 +70,12 @@
#include "iwl-drv.h"
#include "iwl-trans.h"
-#include "iwl-trans-pcie-int.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
-#include "iwl-eeprom.h"
#include "iwl-agn-hw.h"
+#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "iwl-commands.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+#include "dvm/commands.h"
#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
(((1<<trans->cfg->base_params->num_of_queues) - 1) &\
@@ -86,8 +83,7 @@
static int iwl_trans_rx_alloc(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
struct device *dev = trans->dev;
@@ -114,7 +110,7 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
err_rb_stts:
dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- rxq->bd, rxq->bd_dma);
+ rxq->bd, rxq->bd_dma);
memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
rxq->bd = NULL;
err_bd:
@@ -123,8 +119,7 @@ err_bd:
static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
int i;
@@ -134,8 +129,8 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].page != NULL) {
dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
- PAGE_SIZE << trans_pcie->rx_page_order,
- DMA_FROM_DEVICE);
+ PAGE_SIZE << trans_pcie->rx_page_order,
+ DMA_FROM_DEVICE);
__free_pages(rxq->pool[i].page,
trans_pcie->rx_page_order);
rxq->pool[i].page = NULL;
@@ -193,8 +188,7 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
static int iwl_rx_init(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
int i, err;
@@ -236,10 +230,8 @@ static int iwl_rx_init(struct iwl_trans *trans)
static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
-
unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
@@ -274,11 +266,11 @@ static int iwl_trans_rx_stop(struct iwl_trans *trans)
/* stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
}
-static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr, size_t size)
+static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr, size_t size)
{
if (WARN_ON(ptr->addr))
return -EINVAL;
@@ -291,8 +283,8 @@ static inline int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
return 0;
}
-static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
- struct iwl_dma_ptr *ptr)
+static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
+ struct iwl_dma_ptr *ptr)
{
if (unlikely(!ptr->addr))
return;
@@ -304,8 +296,13 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
{
struct iwl_tx_queue *txq = (void *)data;
+ struct iwl_queue *q = &txq->q;
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+ u32 scd_sram_addr = trans_pcie->scd_base_addr +
+ SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
+ u8 buf[16];
+ int i;
spin_lock(&txq->lock);
/* check if triggered erroneously */
@@ -315,26 +312,59 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
}
spin_unlock(&txq->lock);
-
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
jiffies_to_msecs(trans_pcie->wd_timeout));
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr);
- IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
- & (TFD_QUEUE_SIZE_MAX - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
+
+ iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
+
+ iwl_print_hex_error(trans, buf, sizeof(buf));
+
+ for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
+ IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
+ iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
+
+ for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+ u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
+ u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+ u32 tbl_dw =
+ iwl_read_targ_mem(trans,
+ trans_pcie->scd_base_addr +
+ SCD_TRANS_TBL_OFFSET_QUEUE(i));
+
+ if (i & 0x1)
+ tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
+ else
+ tbl_dw = tbl_dw & 0x0000FFFF;
+
+ IWL_ERR(trans,
+ "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
+ i, active ? "" : "in", fifo, tbl_dw,
+ iwl_read_prph(trans,
+ SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
+ }
+
+ for (i = q->read_ptr; i != q->write_ptr;
+ i = iwl_queue_inc_wrap(i, q->n_bd)) {
+ struct iwl_tx_cmd *tx_cmd =
+ (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
+ IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
+ get_unaligned_le32(&tx_cmd->scratch));
+ }
iwl_op_mode_nic_error(trans->op_mode);
}
static int iwl_trans_txq_alloc(struct iwl_trans *trans,
- struct iwl_tx_queue *txq, int slots_num,
- u32 txq_id)
+ struct iwl_tx_queue *txq, int slots_num,
+ u32 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
int i;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
@@ -435,7 +465,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
spin_lock_bh(&txq->lock);
while (q->write_ptr != q->read_ptr) {
- iwlagn_txq_free_tfd(trans, txq, dma_dir);
+ iwl_txq_free_tfd(trans, txq, dma_dir);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
}
spin_unlock_bh(&txq->lock);
@@ -455,6 +485,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
struct device *dev = trans->dev;
int i;
+
if (WARN_ON(!txq))
return;
@@ -574,11 +605,11 @@ error:
}
static int iwl_tx_init(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
unsigned long flags;
bool alloc = false;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->txq) {
ret = iwl_trans_tx_alloc(trans);
@@ -643,10 +674,9 @@ static void iwl_set_pwr_vmain(struct iwl_trans *trans)
static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int pos;
u16 pci_lnk_ctl;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
struct pci_dev *pci_dev = trans_pcie->pci_dev;
@@ -700,14 +730,14 @@ static int iwl_apm_init(struct iwl_trans *trans)
/* Disable L0S exit timer (platform NMI Work/Around) */
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
* don't wait for ICH L0s (ICH bug W/A)
*/
iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
/* Set FH wait threshold to maximum (HW error during stress W/A) */
iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
@@ -717,7 +747,7 @@ static int iwl_apm_init(struct iwl_trans *trans)
* wake device's PCI Express link L1a -> L0s
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
iwl_apm_config(trans);
@@ -738,8 +768,8 @@ static int iwl_apm_init(struct iwl_trans *trans)
* and accesses to uCode SRAM.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
if (ret < 0) {
IWL_DEBUG_INFO(trans, "Failed to init the card\n");
goto out;
@@ -773,8 +803,8 @@ static int iwl_apm_stop_master(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
ret = iwl_poll_bit(trans, CSR_RESET,
- CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
if (ret)
IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
@@ -816,8 +846,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
iwl_apm_init(trans);
/* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING,
- IWL_HOST_INT_CALIB_TIMEOUT_DEF);
+ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -836,8 +865,8 @@ static int iwl_nic_init(struct iwl_trans *trans)
if (trans->cfg->base_params->shadow_reg_enable) {
/* enable shadow regs in HW */
- iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
+ iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+ IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
}
return 0;
@@ -851,13 +880,13 @@ static int iwl_set_hw_ready(struct iwl_trans *trans)
int ret;
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
/* See if we got it */
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ HW_READY_TIMEOUT);
IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
return ret;
@@ -877,11 +906,11 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
/* If HW is not ready, prepare the conditions to check again */
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
+ CSR_HW_IF_CONFIG_REG_PREPARE);
ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
if (ret < 0)
return ret;
@@ -908,32 +937,33 @@ static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
trans_pcie->ucode_write_complete = false;
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
iwl_write_direct32(trans,
- FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
+ FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+ dst_addr);
iwl_write_direct32(trans,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
iwl_write_direct32(trans,
- FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
- (iwl_get_dma_hi_addr(phy_addr)
- << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+ FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+ (iwl_get_dma_hi_addr(phy_addr)
+ << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
- 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
- FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+ FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
+ 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
+ FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
iwl_write_direct32(trans,
- FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+ FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
section_num);
@@ -1016,15 +1046,12 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under the irq lock and with MAC access
*/
static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
{
struct iwl_trans_pcie __maybe_unused *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- lockdep_assert_held(&trans_pcie->irq_lock);
-
iwl_write_prph(trans, SCD_TXFACT, mask);
}
@@ -1032,11 +1059,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 a;
- unsigned long flags;
- int i, chan;
+ int chan;
u32 reg_val;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ /* make sure all queue are not stopped/used */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -1063,64 +1091,26 @@ static void iwl_tx_start(struct iwl_trans *trans)
*/
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+ iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+ trans_pcie->cmd_fifo);
+
+ /* Activate all Tx DMA/FIFO channels */
+ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
+
/* Enable DMA channel */
for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
- iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
- SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
- iwl_write_prph(trans, SCD_AGGR_SEL, 0);
-
- /* initiate the queues */
- for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i), 0);
- iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
- SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- ((SCD_WIN_SIZE <<
- SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((SCD_FRAME_LIMIT <<
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- }
-
- iwl_write_prph(trans, SCD_INTERRUPT_MASK,
- IWL_MASK(0, trans->cfg->base_params->num_of_queues));
-
- /* Activate all Tx DMA/FIFO channels */
- iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
-
- iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
-
- /* make sure all queue are not stopped/used */
- memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
- memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
-
- for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
- int fifo = trans_pcie->setup_q_to_fifo[i];
-
- set_bit(i, trans_pcie->queue_used);
-
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
- fifo, true);
- }
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-
/* Enable L1-Active */
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
@@ -1134,9 +1124,9 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
*/
static int iwl_trans_tx_stop(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* Turn off all Tx DMA fifos */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1148,18 +1138,19 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
iwl_write_direct32(trans,
FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000);
+ FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
if (ret < 0)
- IWL_ERR(trans, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_read_direct32(trans,
- FH_TSSR_TX_STATUS_REG));
+ IWL_ERR(trans,
+ "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+ ch,
+ iwl_read_direct32(trans,
+ FH_TSSR_TX_STATUS_REG));
}
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
if (!trans_pcie->txq) {
- IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
+ IWL_WARN(trans,
+ "Stopping tx queues that aren't allocated...\n");
return 0;
}
@@ -1173,8 +1164,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
- unsigned long flags;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ unsigned long flags;
/* tell the device to stop sending interrupts */
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
@@ -1204,7 +1195,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Make sure (redundant) we've released our request to stay awake */
iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_apm_stop(trans);
@@ -1273,13 +1264,27 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
+ ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
/* Set up driver data for this TFD */
txq->entries[q->write_ptr].skb = skb;
txq->entries[q->write_ptr].cmd = dev_cmd;
dev_cmd->hdr.cmd = REPLY_TX;
- dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(q->write_ptr)));
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[q->write_ptr].meta;
@@ -1344,7 +1349,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* take back ownership of DMA buffer to enable update */
dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
@@ -1356,16 +1361,17 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
trace_iwlwifi_dev_tx(trans->dev,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
+ &txq->tfds[txq->q.write_ptr],
sizeof(struct iwl_tfd),
&dev_cmd->hdr, firstlen,
skb->data + hdr_len, secondlen);
/* start timer if queue currently empty */
- if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
+ if (txq->need_update && q->read_ptr == q->write_ptr &&
+ trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
/* Tell device the write index *just past* this latest filled TFD */
@@ -1395,8 +1401,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
bool hw_rfkill;
@@ -1409,7 +1414,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_alloc_isr_ict(trans);
err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
- DRV_NAME, trans);
+ DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
trans_pcie->irq);
@@ -1422,7 +1427,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
err = iwl_prepare_card_hw(trans);
if (err) {
- IWL_ERR(trans, "Error while preparing HW: %d", err);
+ IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq;
}
@@ -1447,9 +1452,9 @@ error:
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
bool op_mode_leaving)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
unsigned long flags;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_apm_stop(trans);
@@ -1520,6 +1525,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+ trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -1528,17 +1534,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
trans_pcie->n_no_reclaim_cmds * sizeof(u8));
- trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
-
- if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
- trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
-
- /* at least the command queue must be mapped */
- WARN_ON(!trans_pcie->n_q_to_fifo);
-
- memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
- trans_pcie->n_q_to_fifo * sizeof(u8));
-
trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
if (trans_pcie->rx_buf_size_8k)
trans_pcie->rx_page_order = get_order(8 * 1024);
@@ -1553,8 +1548,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
void iwl_trans_pcie_free(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_trans_pcie_tx_free(trans);
#ifndef CONFIG_IWLWIFI_IDI
@@ -1569,6 +1563,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iounmap(trans_pcie->hw_base);
pci_release_regions(trans_pcie->pci_dev);
pci_disable_device(trans_pcie->pci_dev);
+ kmem_cache_destroy(trans->dev_cmd_pool);
kfree(trans);
}
@@ -1816,8 +1811,8 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1853,11 +1848,11 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
}
static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_queue *rxq = &trans_pcie->rxq;
char buf[256];
int pos = 0;
@@ -1881,11 +1876,10 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
- size_t count, loff_t *ppos) {
-
+ size_t count, loff_t *ppos)
+{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
int pos = 0;
@@ -1943,8 +1937,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
char buf[8];
@@ -1964,8 +1957,8 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
}
static ssize_t iwl_dbgfs_csr_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
char buf[8];
@@ -1985,8 +1978,8 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
}
static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
char *buf;
@@ -2012,7 +2005,9 @@ static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
if (!trans->op_mode)
return -EAGAIN;
+ local_bh_disable();
iwl_op_mode_nic_error(trans->op_mode);
+ local_bh_enable();
return count;
}
@@ -2029,7 +2024,7 @@ DEBUGFS_WRITE_FILE_OPS(fw_restart);
*
*/
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
+ struct dentry *dir)
{
DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
@@ -2041,9 +2036,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
}
#else
static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
- struct dentry *dir)
-{ return 0; }
-
+ struct dentry *dir)
+{
+ return 0;
+}
#endif /*CONFIG_IWLWIFI_DEBUGFS */
static const struct iwl_trans_ops trans_ops_pcie = {
@@ -2060,8 +2056,8 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.tx = iwl_trans_pcie_tx,
.reclaim = iwl_trans_pcie_reclaim,
- .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
- .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
+ .txq_disable = iwl_trans_pcie_txq_disable,
+ .txq_enable = iwl_trans_pcie_txq_enable,
.dbgfs_register = iwl_trans_pcie_dbgfs_register,
@@ -2088,7 +2084,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
int err;
trans = kzalloc(sizeof(struct iwl_trans) +
- sizeof(struct iwl_trans_pcie), GFP_KERNEL);
+ sizeof(struct iwl_trans_pcie), GFP_KERNEL);
if (WARN_ON(!trans))
return NULL;
@@ -2104,7 +2100,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
/* W/A - seems to solve weird behavior. We need to remove this if we
* don't want to stay in L1 all the time. This wastes a lot of power */
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ PCIE_LINK_STATE_CLKPM);
if (pci_enable_device(pdev)) {
err = -ENODEV;
@@ -2120,7 +2116,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (!err)
err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
+ DMA_BIT_MASK(32));
/* both attempts failed: */
if (err) {
dev_printk(KERN_ERR, &pdev->dev,
@@ -2131,25 +2127,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
+ dev_printk(KERN_ERR, &pdev->dev,
+ "pci_request_regions failed\n");
goto out_pci_disable_device;
}
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
- dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
+ dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
err = -ENODEV;
goto out_pci_release_regions;
}
dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
+ "pci_resource_len = 0x%08llx\n",
+ (unsigned long long) pci_resource_len(pdev, 0));
dev_printk(KERN_INFO, &pdev->dev,
- "pci_resource_base = %p\n", trans_pcie->hw_base);
+ "pci_resource_base = %p\n", trans_pcie->hw_base);
dev_printk(KERN_INFO, &pdev->dev,
- "HW Revision ID = 0x%X\n", pdev->revision);
+ "HW Revision ID = 0x%X\n", pdev->revision);
/* We disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state */
@@ -2158,7 +2155,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_enable_msi(pdev);
if (err)
dev_printk(KERN_ERR, &pdev->dev,
- "pci_enable_msi failed(0X%x)", err);
+ "pci_enable_msi failed(0X%x)\n", err);
trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;
@@ -2180,8 +2177,25 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans->wait_command_queue);
spin_lock_init(&trans->reg_lock);
+ snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+ "iwl_cmd_pool:%s", dev_name(trans->dev));
+
+ trans->dev_cmd_headroom = 0;
+ trans->dev_cmd_pool =
+ kmem_cache_create(trans->dev_cmd_pool_name,
+ sizeof(struct iwl_device_cmd)
+ + trans->dev_cmd_headroom,
+ sizeof(void *),
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+
+ if (!trans->dev_cmd_pool)
+ goto out_pci_disable_msi;
+
return trans;
+out_pci_disable_msi:
+ pci_disable_msi(pdev);
out_pci_release_regions:
pci_release_regions(pdev);
out_pci_disable_device:
@@ -2190,4 +2204,3 @@ out_no_pci:
kfree(trans);
return NULL;
}
-
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index a8750238ee09..6baf8deef519 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -34,11 +34,10 @@
#include "iwl-csr.h"
#include "iwl-prph.h"
#include "iwl-io.h"
-#include "iwl-agn-hw.h"
#include "iwl-op-mode.h"
-#include "iwl-trans-pcie-int.h"
+#include "internal.h"
/* FIXME: need to abstract out TX command (once we know what it looks like) */
-#include "iwl-commands.h"
+#include "dvm/commands.h"
#define IWL_TX_CRC_SIZE 4
#define IWL_TX_DELIMITER_SIZE 4
@@ -47,12 +46,11 @@
* iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
+ struct iwl_tx_queue *txq,
+ u16 byte_cnt)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int write_ptr = txq->q.write_ptr;
int txq_id = txq->q.id;
u8 sec_ctl = 0;
@@ -178,8 +176,8 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd)
return tfd->num_tbs & 0x1f;
}
-static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
+static void iwl_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+ struct iwl_tfd *tfd, enum dma_data_direction dma_dir)
{
int i;
int num_tbs;
@@ -209,7 +207,7 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
}
/**
- * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
* @txq - tx queue
* @dma_dir - the direction of the DMA mapping
@@ -217,8 +215,8 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
* Does NOT advance any TFD circular buffer read/write indexes
* Does NOT free the TFD itself (which is within circular buffer)
*/
-void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
- enum dma_data_direction dma_dir)
+void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
+ enum dma_data_direction dma_dir)
{
struct iwl_tfd *tfd_tmp = txq->tfds;
@@ -229,8 +227,8 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use q->n_bd tfds */
- iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
- &tfd_tmp[rd_ptr], dma_dir);
+ iwl_unmap_tfd(trans, &txq->entries[idx].meta, &tfd_tmp[rd_ptr],
+ dma_dir);
/* free SKB */
if (txq->entries) {
@@ -270,7 +268,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
/* Each TFD can point to a maximum 20 Tx buffers */
if (num_tbs >= IWL_NUM_OF_TBS) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
+ IWL_NUM_OF_TBS);
return -EINVAL;
}
@@ -279,7 +277,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans,
if (unlikely(addr & ~IWL_TX_DMA_MASK))
IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
+ (unsigned long long)addr);
iwl_tfd_set_tb(tfd, num_tbs, addr, len);
@@ -382,16 +380,14 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
-static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
- u16 txq_id)
+static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+ u16 txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
-
scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr = trans_pcie->scd_base_addr +
@@ -409,7 +405,7 @@ static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid,
return 0;
}
-static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
+static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
@@ -419,102 +415,87 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
(1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
-void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
- int txq_id, u32 index)
-{
- IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
- iwl_write_direct32(trans, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, bool active)
-{
- int txq_id = txq->q.id;
-
- iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
- (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
- SCD_QUEUE_STTS_REG_MSK);
-
- if (active)
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
- txq_id, tx_fifo_id);
- else
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
-}
-
-void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
- int sta_id, int tid, int frame_limit, u16 ssn)
+void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
+ int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- u16 ra_tid = BUILD_RAxTID(sta_id, tid);
if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
/* Stop this Tx queue before configuring it */
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
+ iwl_txq_set_inactive(trans, txq_id);
- /* Map receiver-address / traffic-ID to this queue */
- iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
+ /* Set this queue as a chain-building queue unless it is CMD queue */
+ if (txq_id != trans_pcie->cmd_queue)
+ iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
- /* Set this queue as a chain-building queue */
- iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+ /* If this queue is mapped to a certain station: it is an AGG queue */
+ if (sta_id != IWL_INVALID_STATION) {
+ u16 ra_tid = BUILD_RAxTID(sta_id, tid);
- /* enable aggregations for the queue */
- iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ /* Map receiver-address / traffic-ID to this queue */
+ iwl_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+ /* enable aggregations for the queue */
+ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ } else {
+ /*
+ * disable aggregations for the queue, this will also make the
+ * ra_tid mapping configuration irrelevant since it is now a
+ * non-AGG queue.
+ */
+ iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ }
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
- iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
+
+ iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+ (ssn & 0xff) | (txq_id << 8));
+ iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
+ SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
- iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
-
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
- fifo, true);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+ (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+ SCD_QUEUE_STTS_REG_MSK);
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n",
+ txq_id, fifo, ssn & 0xff);
}
-void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u16 rd_ptr, wr_ptr;
+ int n_bd = trans_pcie->txq[txq_id].q.n_bd;
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
WARN_ONCE(1, "queue %d not used", txq_id);
return;
}
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+ rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
+ wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
+ txq_id, rd_ptr, wr_ptr);
- iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
-
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
- 0, false);
+ iwl_txq_set_inactive(trans, txq_id);
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -615,13 +596,13 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
IWL_DEBUG_HC(trans,
- "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
- trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
- out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
- q->write_ptr, idx, trans_pcie->cmd_queue);
+ "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+ trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
+ out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+ cmd_size, q->write_ptr, idx, trans_pcie->cmd_queue);
phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
idx = -ENOMEM;
goto out;
@@ -630,8 +611,7 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
dma_unmap_addr_set(out_meta, mapping, phys_addr);
dma_unmap_len_set(out_meta, len, copy_size);
- iwlagn_txq_attach_buf_to_tfd(trans, txq,
- phys_addr, copy_size, 1);
+ iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, copy_size, 1);
#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
trace_bufs[0] = &out_cmd->hdr;
trace_lens[0] = copy_size;
@@ -643,13 +623,12 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
continue;
if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY))
continue;
- phys_addr = dma_map_single(trans->dev,
- (void *)cmd->data[i],
+ phys_addr = dma_map_single(trans->dev, (void *)cmd->data[i],
cmd->len[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwlagn_unmap_tfd(trans, out_meta,
- &txq->tfds[q->write_ptr],
- DMA_BIDIRECTIONAL);
+ iwl_unmap_tfd(trans, out_meta,
+ &txq->tfds[q->write_ptr],
+ DMA_BIDIRECTIONAL);
idx = -ENOMEM;
goto out;
}
@@ -723,9 +702,10 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
lockdep_assert_held(&txq->lock);
if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) {
- IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
- "index %d is out of range [0-%d] %d %d.\n", __func__,
- txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr);
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
return;
}
@@ -733,8 +713,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (nfreed++ > 0) {
- IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
+ IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+ idx, q->write_ptr, q->read_ptr);
iwl_op_mode_nic_error(trans->op_mode);
}
@@ -771,9 +751,9 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans_pcie->cmd_queue, sequence,
- trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
- trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
+ txq_id, trans_pcie->cmd_queue, sequence,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
+ trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
@@ -784,8 +764,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
- iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
- DMA_BIDIRECTIONAL);
+ iwl_unmap_tfd(trans, meta, &txq->tfds[index], DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -870,8 +849,9 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
ret = wait_event_timeout(trans->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
- HOST_COMPLETE_TIMEOUT);
+ !test_bit(STATUS_HCMD_ACTIVE,
+ &trans_pcie->status),
+ HOST_COMPLETE_TIMEOUT);
if (!ret) {
if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
struct iwl_tx_queue *txq =
@@ -956,10 +936,10 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
if ((index >= q->n_bd) ||
(iwl_queue_used(q, last_to_free) == 0)) {
- IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), "
- "last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free, q->n_bd,
- q->write_ptr, q->read_ptr);
+ IWL_ERR(trans,
+ "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free, q->n_bd,
+ q->write_ptr, q->read_ptr);
return 0;
}
@@ -979,7 +959,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
- iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
+ iwl_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
}
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
deleted file mode 100644
index 7107ce53d4d4..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
-config IWM
- tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
- depends on MMC && EXPERIMENTAL
- depends on CFG80211
- select FW_LOADER
- select IWMC3200TOP
- help
- The Intel Wireless Multicomm 3200 hardware is a combo
- card with GPS, Bluetooth, WiMax and 802.11 radios. It
- runs over SDIO and is typically found on Moorestown
- based platform. This driver takes care of the 802.11
- part, which is a fullmac one.
-
- If you choose to build it as a module, it'll be called
- iwmc3200wifi.ko.
-
-config IWM_DEBUG
- bool "Enable full debugging output in iwmc3200wifi"
- depends on IWM && DEBUG_FS
- help
- This option will enable debug tracing and setting for iwm
-
- You can set the debug level and module through debugfs. By
- default all modules are set to the IWL_DL_ERR level.
- To see the list of debug modules and levels, see iwm/debug.h
-
- For example, if you want the full MLME debug output:
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/mlme
-
- Or, if you want the full debug, for all modules:
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
- echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
-
-config IWM_TRACING
- bool "Enable event tracing for iwmc3200wifi"
- depends on IWM && EVENT_TRACING
- help
- Say Y here to trace all the commands and responses between
- the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
deleted file mode 100644
index cdc7e07ba113..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-obj-$(CONFIG_IWM) := iwmc3200wifi.o
-iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
-iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
-
-iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
-iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
-
-CFLAGS_trace.o := -I$(src)
-
-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
deleted file mode 100644
index 62edd5888a7b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_BUS_H__
-#define __IWM_BUS_H__
-
-#include "iwm.h"
-
-struct iwm_if_ops {
- int (*enable)(struct iwm_priv *iwm);
- int (*disable)(struct iwm_priv *iwm);
- int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
-
- void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
- void (*debugfs_exit)(struct iwm_priv *iwm);
-
- const char *umac_name;
- const char *calib_lmac_name;
- const char *lmac_name;
-};
-
-static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
-{
- return iwm->bus_ops->send_chunk(iwm, buf, count);
-}
-
-static inline int iwm_bus_enable(struct iwm_priv *iwm)
-{
- return iwm->bus_ops->enable(iwm);
-}
-
-static inline int iwm_bus_disable(struct iwm_priv *iwm)
-{
- return iwm->bus_ops->disable(iwm);
-}
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
deleted file mode 100644
index 48e8218fd23b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ /dev/null
@@ -1,882 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/slab.h>
-#include <net/cfg80211.h>
-
-#include "iwm.h"
-#include "commands.h"
-#include "cfg80211.h"
-#include "debug.h"
-
-#define RATETAB_ENT(_rate, _rateid, _flags) \
- { \
- .bitrate = (_rate), \
- .hw_value = (_rateid), \
- .flags = (_flags), \
- }
-
-#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = 5000 + (5 * (_channel)), \
- .hw_value = (_channel), \
- .flags = (_flags), \
- .max_antenna_gain = 0, \
- .max_power = 30, \
-}
-
-static struct ieee80211_rate iwm_rates[] = {
- RATETAB_ENT(10, 0x1, 0),
- RATETAB_ENT(20, 0x2, 0),
- RATETAB_ENT(55, 0x4, 0),
- RATETAB_ENT(110, 0x8, 0),
- RATETAB_ENT(60, 0x10, 0),
- RATETAB_ENT(90, 0x20, 0),
- RATETAB_ENT(120, 0x40, 0),
- RATETAB_ENT(180, 0x80, 0),
- RATETAB_ENT(240, 0x100, 0),
- RATETAB_ENT(360, 0x200, 0),
- RATETAB_ENT(480, 0x400, 0),
- RATETAB_ENT(540, 0x800, 0),
-};
-
-#define iwm_a_rates (iwm_rates + 4)
-#define iwm_a_rates_size 8
-#define iwm_g_rates (iwm_rates + 0)
-#define iwm_g_rates_size 12
-
-static struct ieee80211_channel iwm_2ghz_channels[] = {
- CHAN2G(1, 2412, 0),
- CHAN2G(2, 2417, 0),
- CHAN2G(3, 2422, 0),
- CHAN2G(4, 2427, 0),
- CHAN2G(5, 2432, 0),
- CHAN2G(6, 2437, 0),
- CHAN2G(7, 2442, 0),
- CHAN2G(8, 2447, 0),
- CHAN2G(9, 2452, 0),
- CHAN2G(10, 2457, 0),
- CHAN2G(11, 2462, 0),
- CHAN2G(12, 2467, 0),
- CHAN2G(13, 2472, 0),
- CHAN2G(14, 2484, 0),
-};
-
-static struct ieee80211_channel iwm_5ghz_a_channels[] = {
- CHAN5G(34, 0), CHAN5G(36, 0),
- CHAN5G(38, 0), CHAN5G(40, 0),
- CHAN5G(42, 0), CHAN5G(44, 0),
- CHAN5G(46, 0), CHAN5G(48, 0),
- CHAN5G(52, 0), CHAN5G(56, 0),
- CHAN5G(60, 0), CHAN5G(64, 0),
- CHAN5G(100, 0), CHAN5G(104, 0),
- CHAN5G(108, 0), CHAN5G(112, 0),
- CHAN5G(116, 0), CHAN5G(120, 0),
- CHAN5G(124, 0), CHAN5G(128, 0),
- CHAN5G(132, 0), CHAN5G(136, 0),
- CHAN5G(140, 0), CHAN5G(149, 0),
- CHAN5G(153, 0), CHAN5G(157, 0),
- CHAN5G(161, 0), CHAN5G(165, 0),
- CHAN5G(184, 0), CHAN5G(188, 0),
- CHAN5G(192, 0), CHAN5G(196, 0),
- CHAN5G(200, 0), CHAN5G(204, 0),
- CHAN5G(208, 0), CHAN5G(212, 0),
- CHAN5G(216, 0),
-};
-
-static struct ieee80211_supported_band iwm_band_2ghz = {
- .channels = iwm_2ghz_channels,
- .n_channels = ARRAY_SIZE(iwm_2ghz_channels),
- .bitrates = iwm_g_rates,
- .n_bitrates = iwm_g_rates_size,
-};
-
-static struct ieee80211_supported_band iwm_band_5ghz = {
- .channels = iwm_5ghz_a_channels,
- .n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
- .bitrates = iwm_a_rates,
- .n_bitrates = iwm_a_rates_size,
-};
-
-static int iwm_key_init(struct iwm_key *key, u8 key_index,
- const u8 *mac_addr, struct key_params *params)
-{
- key->hdr.key_idx = key_index;
- if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
- key->hdr.multicast = 1;
- memset(key->hdr.mac, 0xff, ETH_ALEN);
- } else {
- key->hdr.multicast = 0;
- memcpy(key->hdr.mac, mac_addr, ETH_ALEN);
- }
-
- if (params) {
- if (params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
- return -EINVAL;
-
- key->cipher = params->cipher;
- key->key_len = params->key_len;
- key->seq_len = params->seq_len;
- memcpy(key->key, params->key, key->key_len);
- memcpy(key->seq, params->seq, key->seq_len);
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- struct key_params *params)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
- int ret;
-
- IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- key = &iwm->keys[key_index];
- memset(key, 0, sizeof(struct iwm_key));
- ret = iwm_key_init(key, key_index, mac_addr, params);
- if (ret < 0) {
- IWM_ERR(iwm, "Invalid key_params\n");
- return ret;
- }
-
- return iwm_set_key(iwm, 0, key);
-}
-
-static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr,
- void *cookie,
- void (*callback)(void *cookie,
- struct key_params*))
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
- struct key_params params;
-
- IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- memset(&params, 0, sizeof(params));
-
- key = &iwm->keys[key_index];
- params.cipher = key->cipher;
- params.key_len = key->key_len;
- params.seq_len = key->seq_len;
- params.seq = key->seq;
- params.key = key->key;
-
- callback(cookie, &params);
-
- return key->key_len ? 0 : -ENOENT;
-}
-
-
-static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
- u8 key_index, bool pairwise, const u8 *mac_addr)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key;
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- key = &iwm->keys[key_index];
- if (!iwm->keys[key_index].key_len) {
- IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
- return 0;
- }
-
- if (key_index == iwm->default_key)
- iwm->default_key = -1;
-
- return iwm_set_key(iwm, 1, key);
-}
-
-static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 key_index, bool unicast,
- bool multicast)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
-
- if (key_index >= IWM_NUM_KEYS)
- return -ENOENT;
-
- if (!iwm->keys[key_index].key_len) {
- IWM_ERR(iwm, "Key %d not used\n", key_index);
- return -EINVAL;
- }
-
- iwm->default_key = key_index;
-
- return iwm_set_tx_key(iwm, key_index);
-}
-
-static int iwm_cfg80211_get_station(struct wiphy *wiphy,
- struct net_device *ndev,
- u8 *mac, struct station_info *sinfo)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- if (memcmp(mac, iwm->bssid, ETH_ALEN))
- return -ENOENT;
-
- sinfo->filled |= STATION_INFO_TX_BITRATE;
- sinfo->txrate.legacy = iwm->rate * 10;
-
- if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- sinfo->filled |= STATION_INFO_SIGNAL;
- sinfo->signal = iwm->wstats.qual.level;
- }
-
- return 0;
-}
-
-
-int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_bss_info *bss;
- struct iwm_umac_notif_bss_info *umac_bss;
- struct ieee80211_mgmt *mgmt;
- struct ieee80211_channel *channel;
- struct ieee80211_supported_band *band;
- s32 signal;
- int freq;
-
- list_for_each_entry(bss, &iwm->bss_list, node) {
- umac_bss = bss->bss;
- mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
-
- if (umac_bss->band == UMAC_BAND_2GHZ)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else if (umac_bss->band == UMAC_BAND_5GHZ)
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- else {
- IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
- return -EINVAL;
- }
-
- freq = ieee80211_channel_to_frequency(umac_bss->channel,
- band->band);
- channel = ieee80211_get_channel(wiphy, freq);
- signal = umac_bss->rssi * 100;
-
- if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
- le16_to_cpu(umac_bss->frame_len),
- signal, GFP_KERNEL))
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_change_iface(struct wiphy *wiphy,
- struct net_device *ndev,
- enum nl80211_iftype type, u32 *flags,
- struct vif_params *params)
-{
- struct wireless_dev *wdev;
- struct iwm_priv *iwm;
- u32 old_mode;
-
- wdev = ndev->ieee80211_ptr;
- iwm = ndev_to_iwm(ndev);
- old_mode = iwm->conf.mode;
-
- switch (type) {
- case NL80211_IFTYPE_STATION:
- iwm->conf.mode = UMAC_MODE_BSS;
- break;
- case NL80211_IFTYPE_ADHOC:
- iwm->conf.mode = UMAC_MODE_IBSS;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- wdev->iftype = type;
-
- if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
- return 0;
-
- iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
- struct cfg80211_scan_request *request)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
- int ret;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Scan while device is not ready\n");
- return -EIO;
- }
-
- if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
- IWM_ERR(iwm, "Scanning already\n");
- return -EAGAIN;
- }
-
- if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
- IWM_ERR(iwm, "Scanning being aborted\n");
- return -EAGAIN;
- }
-
- set_bit(IWM_STATUS_SCANNING, &iwm->status);
-
- ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
- if (ret) {
- clear_bit(IWM_STATUS_SCANNING, &iwm->status);
- return ret;
- }
-
- iwm->scan_request = request;
- return 0;
-}
-
-static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
- (iwm->conf.rts_threshold != wiphy->rts_threshold)) {
- int ret;
-
- iwm->conf.rts_threshold = wiphy->rts_threshold;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_RTS_THRESHOLD,
- iwm->conf.rts_threshold);
- if (ret < 0)
- return ret;
- }
-
- if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
- (iwm->conf.frag_threshold != wiphy->frag_threshold)) {
- int ret;
-
- iwm->conf.frag_threshold = wiphy->frag_threshold;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
- CFG_FRAG_THRESHOLD,
- iwm->conf.frag_threshold);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_ibss_params *params)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct ieee80211_channel *chan = params->channel;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return -EIO;
-
- /* UMAC doesn't support creating or joining an IBSS network
- * with specified bssid. */
- if (params->bssid)
- return -EOPNOTSUPP;
-
- iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
- iwm->umac_profile->ibss.band = chan->band;
- iwm->umac_profile->ibss.channel = iwm->channel;
- iwm->umac_profile->ssid.ssid_len = params->ssid_len;
- memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
-
- return iwm_send_mlme_profile(iwm);
-}
-
-static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- if (iwm->umac_profile_active)
- return iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_set_auth_type(struct iwm_priv *iwm,
- enum nl80211_auth_type sme_auth_type)
-{
- u8 *auth_type = &iwm->umac_profile->sec.auth_type;
-
- switch (sme_auth_type) {
- case NL80211_AUTHTYPE_AUTOMATIC:
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n");
- *auth_type = UMAC_AUTH_TYPE_OPEN;
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- if (iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
- IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n");
- *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
- } else {
- IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n");
- *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
- }
-
- break;
- default:
- IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
-{
- IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version);
-
- if (!wpa_version) {
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
- return 0;
- }
-
- if (wpa_version & NL80211_WPA_VERSION_1)
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
-
- if (wpa_version & NL80211_WPA_VERSION_2)
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
-
- return 0;
-}
-
-static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast)
-{
- u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
- &iwm->umac_profile->sec.mcast_cipher;
-
- if (!cipher) {
- *profile_cipher = UMAC_CIPHER_TYPE_NONE;
- return 0;
- }
-
- IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm',
- cipher);
-
- switch (cipher) {
- case IW_AUTH_CIPHER_NONE:
- *profile_cipher = UMAC_CIPHER_TYPE_NONE;
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- *profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
- break;
- case WLAN_CIPHER_SUITE_WEP104:
- *profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- *profile_cipher = UMAC_CIPHER_TYPE_TKIP;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- *profile_cipher = UMAC_CIPHER_TYPE_CCMP;
- break;
- default:
- IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt)
-{
- u8 *auth_type = &iwm->umac_profile->sec.auth_type;
-
- IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt);
-
- if (key_mgt == WLAN_AKM_SUITE_8021X)
- *auth_type = UMAC_AUTH_TYPE_8021X;
- else if (key_mgt == WLAN_AKM_SUITE_PSK) {
- if (iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
- *auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
- else
- *auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
- } else {
- IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
- return -EINVAL;
- }
-
- return 0;
-}
-
-
-static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_connect_params *sme)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct ieee80211_channel *chan = sme->channel;
- struct key_params key_param;
- int ret;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return -EIO;
-
- if (!sme->ssid)
- return -EINVAL;
-
- if (iwm->umac_profile_active) {
- ret = iwm_invalidate_mlme_profile(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldn't invalidate profile\n");
- return ret;
- }
- }
-
- if (chan)
- iwm->channel =
- ieee80211_frequency_to_channel(chan->center_freq);
-
- iwm->umac_profile->ssid.ssid_len = sme->ssid_len;
- memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len);
-
- if (sme->bssid) {
- IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid);
- memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN);
- iwm->umac_profile->bss_num = 1;
- } else {
- memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
- iwm->umac_profile->bss_num = 0;
- }
-
- ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions);
- if (ret < 0)
- return ret;
-
- ret = iwm_set_auth_type(iwm, sme->auth_type);
- if (ret < 0)
- return ret;
-
- if (sme->crypto.n_ciphers_pairwise) {
- ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0],
- true);
- if (ret < 0)
- return ret;
- }
-
- ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false);
- if (ret < 0)
- return ret;
-
- if (sme->crypto.n_akm_suites) {
- ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]);
- if (ret < 0)
- return ret;
- }
-
- /*
- * We save the WEP key in case we want to do shared authentication.
- * We have to do it so because UMAC will assert whenever it gets a
- * key before a profile.
- */
- if (sme->key) {
- key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL);
- if (key_param.key == NULL)
- return -ENOMEM;
- key_param.key_len = sme->key_len;
- key_param.seq_len = 0;
- key_param.cipher = sme->crypto.ciphers_pairwise[0];
-
- ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx,
- NULL, &key_param);
- kfree(key_param.key);
- if (ret < 0) {
- IWM_ERR(iwm, "Invalid key_params\n");
- return ret;
- }
-
- iwm->default_key = sme->key_idx;
- }
-
- /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
- if ((iwm->umac_profile->sec.flags &
- (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
- iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
- }
-
- ret = iwm_send_mlme_profile(iwm);
-
- if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
- sme->key == NULL)
- return ret;
-
- /*
- * We want to do shared auth.
- * We need to actually set the key we previously cached,
- * and then tell the UMAC it's the default one.
- * That will trigger the auth+assoc UMAC machinery, and again,
- * this must be done after setting the profile.
- */
- ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]);
- if (ret < 0)
- return ret;
-
- return iwm_set_tx_key(iwm, iwm->default_key);
-}
-
-static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
- u16 reason_code)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- return 0;
-}
-
-static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, int mbm)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- int ret;
-
- switch (type) {
- case NL80211_TX_POWER_AUTOMATIC:
- return 0;
- case NL80211_TX_POWER_FIXED:
- if (mbm < 0 || (mbm % 100))
- return -EOPNOTSUPP;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return 0;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_TX_PWR_LIMIT_USR,
- MBM_TO_DBM(mbm) * 2);
- if (ret < 0)
- return ret;
-
- return iwm_tx_power_trigger(iwm);
- default:
- IWM_ERR(iwm, "Unsupported power type: %d\n", type);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- *dbm = iwm->txpower >> 1;
-
- return 0;
-}
-
-static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
- struct net_device *dev,
- bool enabled, int timeout)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- u32 power_index;
-
- if (enabled)
- power_index = IWM_POWER_INDEX_DEFAULT;
- else
- power_index = IWM_POWER_INDEX_MIN;
-
- if (power_index == iwm->conf.power_index)
- return 0;
-
- iwm->conf.power_index = power_index;
-
- return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_POWER_INDEX, iwm->conf.power_index);
-}
-
-static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
-}
-
-static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
- struct net_device *netdev,
- struct cfg80211_pmksa *pmksa)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
-
- return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
-}
-
-static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
- struct net_device *netdev)
-{
- struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
- struct cfg80211_pmksa pmksa;
-
- memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
-
- return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
-}
-
-
-static struct cfg80211_ops iwm_cfg80211_ops = {
- .change_virtual_intf = iwm_cfg80211_change_iface,
- .add_key = iwm_cfg80211_add_key,
- .get_key = iwm_cfg80211_get_key,
- .del_key = iwm_cfg80211_del_key,
- .set_default_key = iwm_cfg80211_set_default_key,
- .get_station = iwm_cfg80211_get_station,
- .scan = iwm_cfg80211_scan,
- .set_wiphy_params = iwm_cfg80211_set_wiphy_params,
- .connect = iwm_cfg80211_connect,
- .disconnect = iwm_cfg80211_disconnect,
- .join_ibss = iwm_cfg80211_join_ibss,
- .leave_ibss = iwm_cfg80211_leave_ibss,
- .set_tx_power = iwm_cfg80211_set_txpower,
- .get_tx_power = iwm_cfg80211_get_txpower,
- .set_power_mgmt = iwm_cfg80211_set_power_mgmt,
- .set_pmksa = iwm_cfg80211_set_pmksa,
- .del_pmksa = iwm_cfg80211_del_pmksa,
- .flush_pmksa = iwm_cfg80211_flush_pmksa,
-};
-
-static const u32 cipher_suites[] = {
- WLAN_CIPHER_SUITE_WEP40,
- WLAN_CIPHER_SUITE_WEP104,
- WLAN_CIPHER_SUITE_TKIP,
- WLAN_CIPHER_SUITE_CCMP,
-};
-
-struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
-{
- int ret = 0;
- struct wireless_dev *wdev;
-
- /*
- * We're trying to have the following memory
- * layout:
- *
- * +-------------------------+
- * | struct wiphy |
- * +-------------------------+
- * | struct iwm_priv |
- * +-------------------------+
- * | bus private data |
- * | (e.g. iwm_priv_sdio) |
- * +-------------------------+
- *
- */
-
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- dev_err(dev, "Couldn't allocate wireless device\n");
- return ERR_PTR(-ENOMEM);
- }
-
- wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
- sizeof(struct iwm_priv) + sizeof_bus);
- if (!wdev->wiphy) {
- dev_err(dev, "Couldn't allocate wiphy device\n");
- ret = -ENOMEM;
- goto out_err_new;
- }
-
- set_wiphy_dev(wdev->wiphy, dev);
- wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
- wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
-
- ret = wiphy_register(wdev->wiphy);
- if (ret < 0) {
- dev_err(dev, "Couldn't register wiphy device\n");
- goto out_err_register;
- }
-
- return wdev;
-
- out_err_register:
- wiphy_free(wdev->wiphy);
-
- out_err_new:
- kfree(wdev);
-
- return ERR_PTR(ret);
-}
-
-void iwm_wdev_free(struct iwm_priv *iwm)
-{
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
-
- if (!wdev)
- return;
-
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.h b/drivers/net/wireless/iwmc3200wifi/cfg80211.h
deleted file mode 100644
index 56a34145acbf..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_CFG80211_H__
-#define __IWM_CFG80211_H__
-
-int iwm_cfg80211_inform_bss(struct iwm_priv *iwm);
-struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev);
-void iwm_wdev_free(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
deleted file mode 100644
index bd75078c454b..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ /dev/null
@@ -1,1002 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/wireless.h>
-#include <linux/etherdevice.h>
-#include <linux/ieee80211.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "commands.h"
-#include "debug.h"
-
-static int iwm_send_lmac_ptrough_cmd(struct iwm_priv *iwm,
- u8 lmac_cmd_id,
- const void *lmac_payload,
- u16 lmac_payload_size,
- u8 resp)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_LMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
-
- lmac_cmd.id = lmac_cmd_id;
-
- umac_cmd.id = UMAC_CMD_OPCODE_WIFI_PASS_THROUGH;
- umac_cmd.resp = resp;
-
- return iwm_hal_send_host_cmd(iwm, &udma_cmd, &umac_cmd, &lmac_cmd,
- lmac_payload, lmac_payload_size);
-}
-
-int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
- bool resp)
-{
- struct iwm_umac_wifi_if *hdr = (struct iwm_umac_wifi_if *)payload;
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- int ret;
- u8 oid = hdr->oid;
-
- if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Interface is not ready yet");
- return -EAGAIN;
- }
-
- umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
- umac_cmd.resp = resp;
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
- payload, payload_size);
-
- if (resp) {
- ret = wait_event_interruptible_timeout(iwm->wifi_ntfy_queue,
- test_and_clear_bit(oid, &iwm->wifi_ntfy[0]),
- 3 * HZ);
-
- return ret ? 0 : -EBUSY;
- }
-
- return ret;
-}
-
-static int modparam_wiwi = COEX_MODE_CM;
-module_param_named(wiwi, modparam_wiwi, int, 0644);
-MODULE_PARM_DESC(wiwi, "Wifi-WiMAX coexistence: 1=SA, 2=XOR, 3=CM (default)");
-
-static struct coex_event iwm_sta_xor_prio_tbl[COEX_EVENTS_NUM] =
-{
- {4, 3, 0, COEX_UNASSOC_IDLE_FLAGS},
- {4, 3, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {4, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {4, 3, 0, COEX_CALIBRATION_FLAGS},
- {4, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {4, 3, 0, COEX_CONNECTION_ESTAB_FLAGS},
- {4, 3, 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {4, 3, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {4, 3, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {4, 3, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {6, 3, 0, COEX_XOR_RF_ON_FLAGS},
- {4, 3, 0, COEX_RF_OFF_FLAGS},
- {6, 6, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {4, 3, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {4, 3, 0, COEX_RSRVD1_FLAGS},
- {4, 3, 0, COEX_RSRVD2_FLAGS}
-};
-
-static struct coex_event iwm_sta_cm_prio_tbl[COEX_EVENTS_NUM] =
-{
- {1, 1, 0, COEX_UNASSOC_IDLE_FLAGS},
- {4, 4, 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
- {3, 3, 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
- {6, 6, 0, COEX_CALIBRATION_FLAGS},
- {3, 3, 0, COEX_PERIODIC_CALIBRATION_FLAGS},
- {6, 5, 0, COEX_CONNECTION_ESTAB_FLAGS},
- {4, 4, 0, COEX_ASSOCIATED_IDLE_FLAGS},
- {4, 4, 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
- {4, 4, 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
- {4, 4, 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
- {1, 1, 0, COEX_RF_ON_FLAGS},
- {1, 1, 0, COEX_RF_OFF_FLAGS},
- {7, 7, 0, COEX_STAND_ALONE_DEBUG_FLAGS},
- {5, 4, 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
- {1, 1, 0, COEX_RSRVD1_FLAGS},
- {1, 1, 0, COEX_RSRVD2_FLAGS}
-};
-
-int iwm_send_prio_table(struct iwm_priv *iwm)
-{
- struct iwm_coex_prio_table_cmd coex_table_cmd;
- u32 coex_enabled, mode_enabled;
-
- memset(&coex_table_cmd, 0, sizeof(struct iwm_coex_prio_table_cmd));
-
- coex_table_cmd.flags = COEX_FLAGS_STA_TABLE_VALID_MSK;
-
- switch (modparam_wiwi) {
- case COEX_MODE_XOR:
- case COEX_MODE_CM:
- coex_enabled = 1;
- break;
- default:
- coex_enabled = 0;
- break;
- }
-
- switch (iwm->conf.mode) {
- case UMAC_MODE_BSS:
- case UMAC_MODE_IBSS:
- mode_enabled = 1;
- break;
- default:
- mode_enabled = 0;
- break;
- }
-
- if (coex_enabled && mode_enabled) {
- coex_table_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK |
- COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK |
- COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK;
-
- switch (modparam_wiwi) {
- case COEX_MODE_XOR:
- memcpy(coex_table_cmd.sta_prio, iwm_sta_xor_prio_tbl,
- sizeof(iwm_sta_xor_prio_tbl));
- break;
- case COEX_MODE_CM:
- memcpy(coex_table_cmd.sta_prio, iwm_sta_cm_prio_tbl,
- sizeof(iwm_sta_cm_prio_tbl));
- break;
- default:
- IWM_ERR(iwm, "Invalid coex_mode 0x%x\n",
- modparam_wiwi);
- break;
- }
- } else
- IWM_WARN(iwm, "coexistense disabled\n");
-
- return iwm_send_lmac_ptrough_cmd(iwm, COEX_PRIORITY_TABLE_CMD,
- &coex_table_cmd,
- sizeof(struct iwm_coex_prio_table_cmd), 0);
-}
-
-int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
-{
- struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
-
- memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
-
- cal_cfg_cmd.ucode_cfg.init.enable = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.init.start = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.init.send_res = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.flags =
- cpu_to_le32(CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK);
-
- return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
- sizeof(struct iwm_lmac_cal_cfg_cmd), 1);
-}
-
-int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested)
-{
- struct iwm_lmac_cal_cfg_cmd cal_cfg_cmd;
-
- memset(&cal_cfg_cmd, 0, sizeof(struct iwm_lmac_cal_cfg_cmd));
-
- cal_cfg_cmd.ucode_cfg.periodic.enable = cpu_to_le32(calib_requested);
- cal_cfg_cmd.ucode_cfg.periodic.start = cpu_to_le32(calib_requested);
-
- return iwm_send_lmac_ptrough_cmd(iwm, CALIBRATION_CFG_CMD, &cal_cfg_cmd,
- sizeof(struct iwm_lmac_cal_cfg_cmd), 0);
-}
-
-int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
-{
- struct iwm_calib_rxiq *rxiq;
- u8 *eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
- int grplen = sizeof(struct iwm_calib_rxiq_group);
-
- rxiq = kzalloc(sizeof(struct iwm_calib_rxiq), GFP_KERNEL);
- if (!rxiq) {
- IWM_ERR(iwm, "Couldn't alloc memory for RX IQ\n");
- return -ENOMEM;
- }
-
- eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
- if (IS_ERR(eeprom_rxiq)) {
- IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
- kfree(rxiq);
- return PTR_ERR(eeprom_rxiq);
- }
-
- iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].buf = (u8 *)rxiq;
- iwm->calib_res[SHILOH_PHY_CALIBRATE_RX_IQ_CMD].size = sizeof(*rxiq);
-
- rxiq->hdr.opcode = SHILOH_PHY_CALIBRATE_RX_IQ_CMD;
- rxiq->hdr.first_grp = 0;
- rxiq->hdr.grp_num = 1;
- rxiq->hdr.all_data_valid = 1;
-
- memcpy(&rxiq->group[0], eeprom_rxiq, 4 * grplen);
- memcpy(&rxiq->group[4], eeprom_rxiq + 6 * grplen, grplen);
-
- return 0;
-}
-
-int iwm_send_calib_results(struct iwm_priv *iwm)
-{
- int i, ret = 0;
-
- for (i = PHY_CALIBRATE_OPCODES_NUM; i < CALIBRATION_CMD_NUM; i++) {
- if (test_bit(i - PHY_CALIBRATE_OPCODES_NUM,
- &iwm->calib_done_map)) {
- IWM_DBG_CMD(iwm, DBG,
- "Send calibration %d result\n", i);
- ret |= iwm_send_lmac_ptrough_cmd(iwm,
- REPLY_PHY_CALIBRATION_CMD,
- iwm->calib_res[i].buf,
- iwm->calib_res[i].size, 0);
-
- kfree(iwm->calib_res[i].buf);
- iwm->calib_res[i].buf = NULL;
- iwm->calib_res[i].size = 0;
- }
- }
-
- return ret;
-}
-
-int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
-{
- struct iwm_ct_kill_cfg_cmd cmd;
-
- cmd.entry_threshold = entry;
- cmd.exit_threshold = exit;
-
- return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
- sizeof(struct iwm_ct_kill_cfg_cmd), 0);
-}
-
-int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_reset reset;
-
- reset.flags = reset_flags;
-
- umac_cmd.id = UMAC_CMD_OPCODE_RESET;
- umac_cmd.resp = resp;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &reset,
- sizeof(struct iwm_umac_cmd_reset));
-}
-
-int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_set_param_fix param;
-
- if ((tbl != UMAC_PARAM_TBL_CFG_FIX) &&
- (tbl != UMAC_PARAM_TBL_FA_CFG_FIX))
- return -EINVAL;
-
- umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_FIX;
- umac_cmd.resp = 0;
-
- param.tbl = cpu_to_le16(tbl);
- param.key = cpu_to_le16(key);
- param.value = cpu_to_le32(value);
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &param,
- sizeof(struct iwm_umac_cmd_set_param_fix));
-}
-
-int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
- void *payload, u16 payload_size)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_set_param_var *param_hdr;
- u8 *param;
- int ret;
-
- param = kzalloc(payload_size +
- sizeof(struct iwm_umac_cmd_set_param_var), GFP_KERNEL);
- if (!param) {
- IWM_ERR(iwm, "Couldn't allocate param\n");
- return -ENOMEM;
- }
-
- param_hdr = (struct iwm_umac_cmd_set_param_var *)param;
-
- umac_cmd.id = UMAC_CMD_OPCODE_SET_PARAM_VAR;
- umac_cmd.resp = 0;
-
- param_hdr->tbl = cpu_to_le16(UMAC_PARAM_TBL_CFG_VAR);
- param_hdr->key = cpu_to_le16(key);
- param_hdr->len = cpu_to_le16(payload_size);
- memcpy(param + sizeof(struct iwm_umac_cmd_set_param_var),
- payload, payload_size);
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, param,
- sizeof(struct iwm_umac_cmd_set_param_var) +
- payload_size);
- kfree(param);
-
- return ret;
-}
-
-int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags)
-{
- int ret;
-
- /* Use UMAC default values */
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_POWER_INDEX, iwm->conf.power_index);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
- CFG_FRAG_THRESHOLD,
- iwm->conf.frag_threshold);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_RTS_THRESHOLD,
- iwm->conf.rts_threshold);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_CTS_TO_SELF, iwm->conf.cts_to_self);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_WIRELESS_MODE,
- iwm->conf.wireless_mode);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_COEX_MODE, modparam_wiwi);
- if (ret < 0)
- return ret;
-
- /*
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_ASSOCIATION_TIMEOUT,
- iwm->conf.assoc_timeout);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_ROAM_TIMEOUT,
- iwm->conf.roam_timeout);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_WIRELESS_MODE,
- WIRELESS_MODE_11A | WIRELESS_MODE_11G);
- if (ret < 0)
- return ret;
- */
-
- ret = iwm_umac_set_config_var(iwm, CFG_NET_ADDR,
- iwm_to_ndev(iwm)->dev_addr, ETH_ALEN);
- if (ret < 0)
- return ret;
-
- /* UMAC PM static configurations */
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_LEGACY_RX_TIMEOUT, 0x12C);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_LEGACY_TX_TIMEOUT, 0x15E);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_CTRL_FLAGS, 0x1);
- if (ret < 0)
- return ret;
-
- ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
- CFG_PM_KEEP_ALIVE_IN_BEACONS, 0x80);
- if (ret < 0)
- return ret;
-
- /* reset UMAC */
- ret = iwm_send_umac_reset(iwm, reset_flags, 1);
- if (ret < 0)
- return ret;
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
- return ret;
- }
-
- return ret;
-}
-
-int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id)
-{
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
-
- udma_cmd.eop = 1; /* always set eop for non-concatenated Tx */
- udma_cmd.credit_group = pool_id;
- udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = REPLY_TX;
- umac_cmd.color = tx_info->color;
- umac_cmd.resp = 0;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd,
- skb->data, skb->len);
-}
-
-static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
- u8 *response, u32 resp_size)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- struct iwm_nonwifi_cmd *cmd;
- u16 seq_num;
- int ret = 0;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ;
- target_cmd.addr = address;
- target_cmd.op1_sz = cpu_to_le32(resp_size);
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 1;
- target_cmd.eop = 1;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't send READ command\n");
- return ret;
- }
-
- /* When succeeding, the send_target routine returns the seq number */
- seq_num = ret;
-
- ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
- (cmd = iwm_get_pending_nonwifi_cmd(iwm, seq_num,
- UMAC_HDI_OUT_OPCODE_READ)) != NULL,
- 2 * HZ);
-
- if (!ret) {
- IWM_ERR(iwm, "Didn't receive a target READ answer\n");
- return ret;
- }
-
- memcpy(response, cmd->buf.hdr + sizeof(struct iwm_udma_in_hdr),
- resp_size);
-
- kfree(cmd);
-
- return 0;
-}
-
-int iwm_read_mac(struct iwm_priv *iwm, u8 *mac)
-{
- int ret;
- u8 mac_align[ALIGN(ETH_ALEN, 8)];
-
- ret = iwm_target_read(iwm, cpu_to_le32(WICO_MAC_ADDRESS_ADDR),
- mac_align, sizeof(mac_align));
- if (ret)
- return ret;
-
- if (is_valid_ether_addr(mac_align))
- memcpy(mac, mac_align, ETH_ALEN);
- else {
- IWM_ERR(iwm, "Invalid EEPROM MAC\n");
- memcpy(mac, iwm->conf.mac_addr, ETH_ALEN);
- get_random_bytes(&mac[3], 3);
- }
-
- return 0;
-}
-
-static int iwm_check_profile(struct iwm_priv *iwm)
-{
- if (!iwm->umac_profile_active)
- return -EAGAIN;
-
- if (iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_TKIP &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_CCMP) {
- IWM_ERR(iwm, "Wrong unicast cipher: 0x%x\n",
- iwm->umac_profile->sec.ucast_cipher);
- return -EAGAIN;
- }
-
- if (iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_WEP_104 &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_TKIP &&
- iwm->umac_profile->sec.mcast_cipher != UMAC_CIPHER_TYPE_CCMP) {
- IWM_ERR(iwm, "Wrong multicast cipher: 0x%x\n",
- iwm->umac_profile->sec.mcast_cipher);
- return -EAGAIN;
- }
-
- if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
- iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
- (iwm->umac_profile->sec.ucast_cipher !=
- iwm->umac_profile->sec.mcast_cipher)) {
- IWM_ERR(iwm, "Unicast and multicast ciphers differ for WEP\n");
- }
-
- return 0;
-}
-
-int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx)
-{
- struct iwm_umac_tx_key_id tx_key_id;
- int ret;
-
- ret = iwm_check_profile(iwm);
- if (ret < 0)
- return ret;
-
- /* UMAC only allows to set default key for WEP and auth type is
- * NOT 802.1X or RSNA. */
- if ((iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_40 &&
- iwm->umac_profile->sec.ucast_cipher != UMAC_CIPHER_TYPE_WEP_104) ||
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_8021X ||
- iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_RSNA_PSK)
- return 0;
-
- tx_key_id.hdr.oid = UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID;
- tx_key_id.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_tx_key_id) -
- sizeof(struct iwm_umac_wifi_if));
-
- tx_key_id.key_idx = key_idx;
-
- return iwm_send_wifi_if_cmd(iwm, &tx_key_id, sizeof(tx_key_id), 1);
-}
-
-int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key)
-{
- int ret = 0;
- u8 cmd[64], *sta_addr, *key_data, key_len;
- s8 key_idx;
- u16 cmd_size = 0;
- struct iwm_umac_key_hdr *key_hdr = &key->hdr;
- struct iwm_umac_key_wep40 *wep40 = (struct iwm_umac_key_wep40 *)cmd;
- struct iwm_umac_key_wep104 *wep104 = (struct iwm_umac_key_wep104 *)cmd;
- struct iwm_umac_key_tkip *tkip = (struct iwm_umac_key_tkip *)cmd;
- struct iwm_umac_key_ccmp *ccmp = (struct iwm_umac_key_ccmp *)cmd;
-
- if (!remove) {
- ret = iwm_check_profile(iwm);
- if (ret < 0)
- return ret;
- }
-
- sta_addr = key->hdr.mac;
- key_data = key->key;
- key_len = key->key_len;
- key_idx = key->hdr.key_idx;
-
- if (!remove) {
- u8 auth_type = iwm->umac_profile->sec.auth_type;
-
- IWM_DBG_WEXT(iwm, DBG, "key_idx:%d\n", key_idx);
- IWM_DBG_WEXT(iwm, DBG, "key_len:%d\n", key_len);
- IWM_DBG_WEXT(iwm, DBG, "MAC:%pM, idx:%d, multicast:%d\n",
- key_hdr->mac, key_hdr->key_idx, key_hdr->multicast);
-
- IWM_DBG_WEXT(iwm, DBG, "profile: mcast:0x%x, ucast:0x%x\n",
- iwm->umac_profile->sec.mcast_cipher,
- iwm->umac_profile->sec.ucast_cipher);
- IWM_DBG_WEXT(iwm, DBG, "profile: auth_type:0x%x, flags:0x%x\n",
- iwm->umac_profile->sec.auth_type,
- iwm->umac_profile->sec.flags);
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- wep40->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP40_KEY;
- wep40->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_wep40) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&wep40->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
- memcpy(wep40->key, key_data, key_len);
- wep40->static_key =
- !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
- (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
-
- cmd_size = sizeof(struct iwm_umac_key_wep40);
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- wep104->hdr.oid = UMAC_WIFI_IF_CMD_ADD_WEP104_KEY;
- wep104->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_wep104) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&wep104->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
- memcpy(wep104->key, key_data, key_len);
- wep104->static_key =
- !!((auth_type != UMAC_AUTH_TYPE_8021X) &&
- (auth_type != UMAC_AUTH_TYPE_RSNA_PSK));
-
- cmd_size = sizeof(struct iwm_umac_key_wep104);
- break;
-
- case WLAN_CIPHER_SUITE_CCMP:
- key_hdr->key_idx++;
- ccmp->hdr.oid = UMAC_WIFI_IF_CMD_ADD_CCMP_KEY;
- ccmp->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_ccmp) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&ccmp->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- memcpy(ccmp->key, key_data, key_len);
-
- if (key->seq_len)
- memcpy(ccmp->iv_count, key->seq, key->seq_len);
-
- cmd_size = sizeof(struct iwm_umac_key_ccmp);
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- key_hdr->key_idx++;
- tkip->hdr.oid = UMAC_WIFI_IF_CMD_ADD_TKIP_KEY;
- tkip->hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_tkip) -
- sizeof(struct iwm_umac_wifi_if));
-
- memcpy(&tkip->key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- memcpy(tkip->tkip_key, key_data, IWM_TKIP_KEY_SIZE);
- memcpy(tkip->mic_tx_key, key_data + IWM_TKIP_KEY_SIZE,
- IWM_TKIP_MIC_SIZE);
- memcpy(tkip->mic_rx_key,
- key_data + IWM_TKIP_KEY_SIZE + IWM_TKIP_MIC_SIZE,
- IWM_TKIP_MIC_SIZE);
-
- if (key->seq_len)
- memcpy(ccmp->iv_count, key->seq, key->seq_len);
-
- cmd_size = sizeof(struct iwm_umac_key_tkip);
- break;
-
- default:
- return -ENOTSUPP;
- }
-
- if ((key->cipher == WLAN_CIPHER_SUITE_TKIP) ||
- (key->cipher == WLAN_CIPHER_SUITE_CCMP))
- /*
- * UGLY_UGLY_UGLY
- * Copied HACK from the MWG driver.
- * Without it, the key is set before the second
- * EAPOL frame is sent, and the latter is thus
- * encrypted.
- */
- schedule_timeout_interruptible(usecs_to_jiffies(300));
-
- ret = iwm_send_wifi_if_cmd(iwm, cmd, cmd_size, 1);
- } else {
- struct iwm_umac_key_remove key_remove;
-
- IWM_DBG_WEXT(iwm, ERR, "Removing key_idx:%d\n", key_idx);
-
- key_remove.hdr.oid = UMAC_WIFI_IF_CMD_REMOVE_KEY;
- key_remove.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_key_remove) -
- sizeof(struct iwm_umac_wifi_if));
- memcpy(&key_remove.key_hdr, key_hdr,
- sizeof(struct iwm_umac_key_hdr));
-
- ret = iwm_send_wifi_if_cmd(iwm, &key_remove,
- sizeof(struct iwm_umac_key_remove),
- 1);
- if (ret)
- return ret;
-
- iwm->keys[key_idx].key_len = 0;
- }
-
- return ret;
-}
-
-
-int iwm_send_mlme_profile(struct iwm_priv *iwm)
-{
- int ret;
- struct iwm_umac_profile profile;
-
- memcpy(&profile, iwm->umac_profile, sizeof(profile));
-
- profile.hdr.oid = UMAC_WIFI_IF_CMD_SET_PROFILE;
- profile.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_profile) -
- sizeof(struct iwm_umac_wifi_if));
-
- ret = iwm_send_wifi_if_cmd(iwm, &profile, sizeof(profile), 1);
- if (ret) {
- IWM_ERR(iwm, "Send profile command failed\n");
- return ret;
- }
-
- set_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
- return 0;
-}
-
-int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
-{
- struct iwm_umac_invalidate_profile invalid;
-
- invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
- invalid.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_invalidate_profile) -
- sizeof(struct iwm_umac_wifi_if));
-
- invalid.reason = WLAN_REASON_UNSPECIFIED;
-
- return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
-}
-
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
-{
- int ret;
-
- ret = __iwm_invalidate_mlme_profile(iwm);
- if (ret)
- return ret;
-
- ret = wait_event_interruptible_timeout(iwm->mlme_queue,
- (iwm->umac_profile_active == 0), 5 * HZ);
-
- return ret ? 0 : -EBUSY;
-}
-
-int iwm_tx_power_trigger(struct iwm_priv *iwm)
-{
- struct iwm_umac_pwr_trigger pwr_trigger;
-
- pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
- pwr_trigger.hdr.buf_size =
- cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
- sizeof(struct iwm_umac_wifi_if));
-
-
- return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
-}
-
-int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_stats_req stats_req;
-
- stats_req.flags = cpu_to_le32(flags);
-
- umac_cmd.id = UMAC_CMD_OPCODE_STATISTIC_REQUEST;
- umac_cmd.resp = 0;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stats_req,
- sizeof(struct iwm_umac_cmd_stats_req));
-}
-
-int iwm_send_umac_channel_list(struct iwm_priv *iwm)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_get_channel_list *ch_list;
- int size = sizeof(struct iwm_umac_cmd_get_channel_list) +
- sizeof(struct iwm_umac_channel_info) * 4;
- int ret;
-
- ch_list = kzalloc(size, GFP_KERNEL);
- if (!ch_list) {
- IWM_ERR(iwm, "Couldn't allocate channel list cmd\n");
- return -ENOMEM;
- }
-
- ch_list->ch[0].band = UMAC_BAND_2GHZ;
- ch_list->ch[0].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[0].flags = UMAC_CHANNEL_FLAG_VALID;
-
- ch_list->ch[1].band = UMAC_BAND_5GHZ;
- ch_list->ch[1].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[1].flags = UMAC_CHANNEL_FLAG_VALID;
-
- ch_list->ch[2].band = UMAC_BAND_2GHZ;
- ch_list->ch[2].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[2].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
-
- ch_list->ch[3].band = UMAC_BAND_5GHZ;
- ch_list->ch[3].type = UMAC_CHANNEL_WIDTH_20MHZ;
- ch_list->ch[3].flags = UMAC_CHANNEL_FLAG_VALID | UMAC_CHANNEL_FLAG_IBSS;
-
- ch_list->count = cpu_to_le16(4);
-
- umac_cmd.id = UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST;
- umac_cmd.resp = 1;
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, ch_list, size);
-
- kfree(ch_list);
-
- return ret;
-}
-
-int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
- int ssid_num)
-{
- struct iwm_umac_cmd_scan_request req;
- int i, ret;
-
- memset(&req, 0, sizeof(struct iwm_umac_cmd_scan_request));
-
- req.hdr.oid = UMAC_WIFI_IF_CMD_SCAN_REQUEST;
- req.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_cmd_scan_request)
- - sizeof(struct iwm_umac_wifi_if));
- req.type = UMAC_WIFI_IF_SCAN_TYPE_USER;
- req.timeout = 2;
- req.seq_num = iwm->scan_id;
- req.ssid_num = min(ssid_num, UMAC_WIFI_IF_PROBE_OPTION_MAX);
-
- for (i = 0; i < req.ssid_num; i++) {
- memcpy(req.ssids[i].ssid, ssids[i].ssid, ssids[i].ssid_len);
- req.ssids[i].ssid_len = ssids[i].ssid_len;
- }
-
- ret = iwm_send_wifi_if_cmd(iwm, &req, sizeof(req), 0);
- if (ret) {
- IWM_ERR(iwm, "Couldn't send scan request\n");
- return ret;
- }
-
- iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
-
- return 0;
-}
-
-int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len)
-{
- struct cfg80211_ssid one_ssid;
-
- if (test_and_set_bit(IWM_STATUS_SCANNING, &iwm->status))
- return 0;
-
- one_ssid.ssid_len = min(ssid_len, IEEE80211_MAX_SSID_LEN);
- memcpy(&one_ssid.ssid, ssid, one_ssid.ssid_len);
-
- return iwm_scan_ssids(iwm, &one_ssid, 1);
-}
-
-int iwm_target_reset(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_REBOOT;
- target_cmd.addr = 0;
- target_cmd.op1_sz = 0;
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 0;
- target_cmd.eop = 1;
-
- return iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
-}
-
-int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
- struct iwm_umac_notif_stop_resume_tx *ntf)
-{
- struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_stop_resume_tx stp_res_cmd;
- struct iwm_sta_info *sta_info;
- u8 sta_id = STA_ID_N_COLOR_ID(ntf->sta_id);
- int i;
-
- sta_info = &iwm->sta_table[sta_id];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Invalid STA: %d\n", sta_id);
- return -EINVAL;
- }
-
- umac_cmd.id = UMAC_CMD_OPCODE_STOP_RESUME_STA_TX;
- umac_cmd.resp = 0;
-
- stp_res_cmd.flags = ntf->flags;
- stp_res_cmd.sta_id = ntf->sta_id;
- stp_res_cmd.stop_resume_tid_msk = ntf->stop_resume_tid_msk;
- for (i = 0; i < IWM_UMAC_TID_NR; i++)
- stp_res_cmd.last_seq_num[i] =
- sta_info->tid_info[i].last_seq_num;
-
- return iwm_hal_send_umac_cmd(iwm, &udma_cmd, &umac_cmd, &stp_res_cmd,
- sizeof(struct iwm_umac_cmd_stop_resume_tx));
-
-}
-
-int iwm_send_pmkid_update(struct iwm_priv *iwm,
- struct cfg80211_pmksa *pmksa, u32 command)
-{
- struct iwm_umac_pmkid_update update;
- int ret;
-
- memset(&update, 0, sizeof(struct iwm_umac_pmkid_update));
-
- update.hdr.oid = UMAC_WIFI_IF_CMD_PMKID_UPDATE;
- update.hdr.buf_size = cpu_to_le16(sizeof(struct iwm_umac_pmkid_update) -
- sizeof(struct iwm_umac_wifi_if));
-
- update.command = cpu_to_le32(command);
- if (pmksa->bssid)
- memcpy(&update.bssid, pmksa->bssid, ETH_ALEN);
- if (pmksa->pmkid)
- memcpy(&update.pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
-
- ret = iwm_send_wifi_if_cmd(iwm, &update,
- sizeof(struct iwm_umac_pmkid_update), 0);
- if (ret) {
- IWM_ERR(iwm, "PMKID update command failed\n");
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
deleted file mode 100644
index 6421689f5e8e..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_COMMANDS_H__
-#define __IWM_COMMANDS_H__
-
-#include <linux/ieee80211.h>
-
-#define IWM_BARKER_REBOOT_NOTIFICATION 0xF
-#define IWM_ACK_BARKER_NOTIFICATION 0x10
-
-/* UMAC commands */
-#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001
-#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002
-#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004
-#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008
-#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010
-#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040
-#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080
-#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100
-
-struct iwm_umac_cmd_reset {
- __le32 flags;
-} __packed;
-
-#define UMAC_PARAM_TBL_ORD_FIX 0x0
-#define UMAC_PARAM_TBL_ORD_VAR 0x1
-#define UMAC_PARAM_TBL_CFG_FIX 0x2
-#define UMAC_PARAM_TBL_CFG_VAR 0x3
-#define UMAC_PARAM_TBL_BSS_TRK 0x4
-#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5
-#define UMAC_PARAM_TBL_STA 0x6
-#define UMAC_PARAM_TBL_CHN 0x7
-#define UMAC_PARAM_TBL_STATISTICS 0x8
-
-/* fast access table */
-enum {
- CFG_FRAG_THRESHOLD = 0,
- CFG_FRAME_RETRY_LIMIT,
- CFG_OS_QUEUE_UTIL_TH,
- CFG_RX_FILTER,
- /* <-- LAST --> */
- FAST_ACCESS_CFG_TBL_FIX_LAST
-};
-
-/* fixed size table */
-enum {
- CFG_POWER_INDEX = 0,
- CFG_PM_LEGACY_RX_TIMEOUT,
- CFG_PM_LEGACY_TX_TIMEOUT,
- CFG_PM_CTRL_FLAGS,
- CFG_PM_KEEP_ALIVE_IN_BEACONS,
- CFG_BT_ON_THRESHOLD,
- CFG_RTS_THRESHOLD,
- CFG_CTS_TO_SELF,
- CFG_COEX_MODE,
- CFG_WIRELESS_MODE,
- CFG_ASSOCIATION_TIMEOUT,
- CFG_ROAM_TIMEOUT,
- CFG_CAPABILITY_SUPPORTED_RATES,
- CFG_SCAN_ALLOWED_UNASSOC_FLAGS,
- CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS,
- CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS,
- CFG_SCAN_INTERNAL_PERIODIC_ENABLED,
- CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT,
- CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC,
- CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
- CFG_TLC_SUPPORTED_TX_HT_RATES,
- CFG_TLC_SUPPORTED_TX_RATES,
- CFG_TLC_SPATIAL_STREAM_SUPPORTED,
- CFG_TLC_RETRY_PER_RATE,
- CFG_TLC_RETRY_PER_HT_RATE,
- CFG_TLC_FIXED_MCS,
- CFG_TLC_CONTROL_FLAGS,
- CFG_TLC_SR_MIN_FAIL,
- CFG_TLC_SR_MIN_PASS,
- CFG_TLC_HT_STAY_IN_COL_PASS_THRESH,
- CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH,
- CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH,
- CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH,
- CFG_TLC_HT_FLUSH_STATS_PACKETS,
- CFG_TLC_LEGACY_FLUSH_STATS_PACKETS,
- CFG_TLC_LEGACY_FLUSH_STATS_MS,
- CFG_TLC_HT_FLUSH_STATS_MS,
- CFG_TLC_STAY_IN_COL_TIME_OUT,
- CFG_TLC_AGG_SHORT_LIM,
- CFG_TLC_AGG_LONG_LIM,
- CFG_TLC_HT_SR_NO_DECREASE,
- CFG_TLC_LEGACY_SR_NO_DECREASE,
- CFG_TLC_SR_FORCE_DECREASE,
- CFG_TLC_SR_ALLOW_INCREASE,
- CFG_TLC_AGG_SET_LONG,
- CFG_TLC_AUTO_AGGREGATION,
- CFG_TLC_AGG_THRESHOLD,
- CFG_TLC_TID_LOAD_THRESHOLD,
- CFG_TLC_BLOCK_ACK_TIMEOUT,
- CFG_TLC_NO_BA_COUNTED_AS_ONE,
- CFG_TLC_NUM_BA_STREAMS_ALLOWED,
- CFG_TLC_NUM_BA_STREAMS_PRESENT,
- CFG_TLC_RENEW_ADDBA_DELAY,
- CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
- CFG_TLC_IS_STABLE_IN_HT,
- CFG_TLC_SR_SIC_1ST_FAIL,
- CFG_TLC_SR_SIC_1ST_PASS,
- CFG_TLC_SR_SIC_TOTAL_FAIL,
- CFG_TLC_SR_SIC_TOTAL_PASS,
- CFG_RLC_CHAIN_CTRL,
- CFG_TRK_TABLE_OP_MODE,
- CFG_TRK_TABLE_RSSI_THRESHOLD,
- CFG_TX_PWR_TARGET, /* Used By xVT */
- CFG_TX_PWR_LIMIT_USR,
- CFG_TX_PWR_LIMIT_BSS, /* 11d limit */
- CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */
- CFG_TX_PWR_MODE,
- CFG_MLME_DBG_NOTIF_BLOCK,
- CFG_BT_OFF_BECONS_INTERVALS,
- CFG_BT_FRAG_DURATION,
- CFG_ACTIVE_CHAINS,
- CFG_CALIB_CTRL,
- CFG_CAPABILITY_SUPPORTED_HT_RATES,
- CFG_HT_MAC_PARAM_INFO,
- CFG_MIMO_PS_MODE,
- CFG_HT_DEFAULT_CAPABILIES_INFO,
- CFG_LED_SC_RESOLUTION_FACTOR,
- CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
- CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
- CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
- CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
- CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
- CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
- CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
- CFG_PTAM_LINK_SENS_FA_CCK_MAX,
- CFG_PTAM_LINK_SENS_FA_CCK_MIN,
- CFG_PTAM_LINK_SENS_NRG_DIFF,
- CFG_PTAM_LINK_SENS_NRG_MARGIN,
- CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
- CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
- CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
- CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
- CFG_AGG_MGG_ADDBA_BUF_SIZE,
- CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
- CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
- CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
- CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
- CFG_11D_ENABLED,
- CFG_11H_FEATURE_FLAGS,
-
- /* <-- LAST --> */
- CFG_TBL_FIX_LAST
-};
-
-/* variable size table */
-enum {
- CFG_NET_ADDR = 0,
- CFG_LED_PATTERN_TABLE,
-
- /* <-- LAST --> */
- CFG_TBL_VAR_LAST
-};
-
-struct iwm_umac_cmd_set_param_fix {
- __le16 tbl;
- __le16 key;
- __le32 value;
-} __packed;
-
-struct iwm_umac_cmd_set_param_var {
- __le16 tbl;
- __le16 key;
- __le16 len;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_cmd_get_param {
- __le16 tbl;
- __le16 key;
-} __packed;
-
-struct iwm_umac_cmd_get_param_resp {
- __le16 tbl;
- __le16 key;
- __le16 len;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_cmd_eeprom_proxy_hdr {
- __le32 type;
- __le32 offset;
- __le32 len;
-} __packed;
-
-struct iwm_umac_cmd_eeprom_proxy {
- struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
- u8 buf[0];
-} __packed;
-
-#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
-#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
-
-#define UMAC_CHANNEL_FLAG_VALID BIT(0)
-#define UMAC_CHANNEL_FLAG_IBSS BIT(1)
-#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3)
-#define UMAC_CHANNEL_FLAG_RADAR BIT(4)
-#define UMAC_CHANNEL_FLAG_DFS BIT(7)
-
-struct iwm_umac_channel_info {
- u8 band;
- u8 type;
- u8 reserved;
- u8 flags;
- __le32 channels_mask;
-} __packed;
-
-struct iwm_umac_cmd_get_channel_list {
- __le16 count;
- __le16 reserved;
- struct iwm_umac_channel_info ch[0];
-} __packed;
-
-
-/* UMAC WiFi interface commands */
-
-/* Coexistence mode */
-#define COEX_MODE_SA 0x1
-#define COEX_MODE_XOR 0x2
-#define COEX_MODE_CM 0x3
-#define COEX_MODE_MAX 0x4
-
-/* Wireless mode */
-#define WIRELESS_MODE_11A 0x1
-#define WIRELESS_MODE_11G 0x2
-#define WIRELESS_MODE_11N 0x4
-
-#define UMAC_PROFILE_EX_IE_REQUIRED 0x1
-#define UMAC_PROFILE_QOS_ALLOWED 0x2
-
-/* Scanning */
-#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10
-
-#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0
-#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1
-#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2
-#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3
-
-struct iwm_umac_ssid {
- u8 ssid_len;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 reserved[3];
-} __packed;
-
-struct iwm_umac_cmd_scan_request {
- struct iwm_umac_wifi_if hdr;
- __le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */
- u8 ssid_num;
- u8 seq_num;
- u8 timeout; /* In seconds */
- u8 reserved;
- struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
-} __packed;
-
-#define UMAC_CIPHER_TYPE_NONE 0xFF
-#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
-#define UMAC_CIPHER_TYPE_WEP_40 0x01
-#define UMAC_CIPHER_TYPE_WEP_104 0x02
-#define UMAC_CIPHER_TYPE_TKIP 0x04
-#define UMAC_CIPHER_TYPE_CCMP 0x08
-
-/* Supported authentication types - bitmap */
-#define UMAC_AUTH_TYPE_OPEN 0x00
-#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01
-#define UMAC_AUTH_TYPE_8021X 0x02
-#define UMAC_AUTH_TYPE_RSNA_PSK 0x04
-
-/* iwm_umac_security.flag is WPA supported -- bits[0:0] */
-#define UMAC_SEC_FLG_WPA_ON_POS 0
-#define UMAC_SEC_FLG_WPA_ON_SEED 1
-#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \
- UMAC_SEC_FLG_WPA_ON_POS)
-
-/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */
-#define UMAC_SEC_FLG_RSNA_ON_POS 1
-#define UMAC_SEC_FLG_RSNA_ON_SEED 1
-#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \
- UMAC_SEC_FLG_RSNA_ON_POS)
-
-/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
-#define UMAC_SEC_FLG_WSC_ON_POS 2
-#define UMAC_SEC_FLG_WSC_ON_SEED 1
-#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
- UMAC_SEC_FLG_WSC_ON_POS)
-
-
-/* Legacy profile can use only WEP40 and WEP104 for encryption and
- * OPEN or PSK for authentication */
-#define UMAC_SEC_FLG_LEGACY_PROFILE 0
-
-struct iwm_umac_security {
- u8 auth_type;
- u8 ucast_cipher;
- u8 mcast_cipher;
- u8 flags;
-} __packed;
-
-struct iwm_umac_ibss {
- u8 beacon_interval; /* in millisecond */
- u8 atim; /* in millisecond */
- s8 join_only;
- u8 band;
- u8 channel;
- u8 reserved[3];
-} __packed;
-
-#define UMAC_MODE_BSS 0
-#define UMAC_MODE_IBSS 1
-
-#define UMAC_BSSID_MAX 4
-
-struct iwm_umac_profile {
- struct iwm_umac_wifi_if hdr;
- __le32 mode;
- struct iwm_umac_ssid ssid;
- u8 bssid[UMAC_BSSID_MAX][ETH_ALEN];
- struct iwm_umac_security sec;
- struct iwm_umac_ibss ibss;
- __le32 channel_2ghz;
- __le32 channel_5ghz;
- __le16 flags;
- u8 wireless_mode;
- u8 bss_num;
-} __packed;
-
-struct iwm_umac_invalidate_profile {
- struct iwm_umac_wifi_if hdr;
- u8 reason;
- u8 reserved[3];
-} __packed;
-
-/* Encryption key commands */
-struct iwm_umac_key_wep40 {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 key[WLAN_KEY_LEN_WEP40];
- u8 static_key;
- u8 reserved[2];
-} __packed;
-
-struct iwm_umac_key_wep104 {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 key[WLAN_KEY_LEN_WEP104];
- u8 static_key;
- u8 reserved[2];
-} __packed;
-
-#define IWM_TKIP_KEY_SIZE 16
-#define IWM_TKIP_MIC_SIZE 8
-struct iwm_umac_key_tkip {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 iv_count[6];
- u8 reserved[2];
- u8 tkip_key[IWM_TKIP_KEY_SIZE];
- u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
- u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
-} __packed;
-
-struct iwm_umac_key_ccmp {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
- u8 iv_count[6];
- u8 reserved[2];
- u8 key[WLAN_KEY_LEN_CCMP];
-} __packed;
-
-struct iwm_umac_key_remove {
- struct iwm_umac_wifi_if hdr;
- struct iwm_umac_key_hdr key_hdr;
-} __packed;
-
-struct iwm_umac_tx_key_id {
- struct iwm_umac_wifi_if hdr;
- u8 key_idx;
- u8 reserved[3];
-} __packed;
-
-struct iwm_umac_pwr_trigger {
- struct iwm_umac_wifi_if hdr;
- __le32 reseved;
-} __packed;
-
-struct iwm_umac_cmd_stats_req {
- __le32 flags;
-} __packed;
-
-struct iwm_umac_cmd_stop_resume_tx {
- u8 flags;
- u8 sta_id;
- __le16 stop_resume_tid_msk;
- __le16 last_seq_num[IWM_UMAC_TID_NR];
- u16 reserved;
-} __packed;
-
-#define IWM_CMD_PMKID_ADD 1
-#define IWM_CMD_PMKID_DEL 2
-#define IWM_CMD_PMKID_FLUSH 3
-
-struct iwm_umac_pmkid_update {
- struct iwm_umac_wifi_if hdr;
- __le32 command;
- u8 bssid[ETH_ALEN];
- __le16 reserved;
- u8 pmkid[WLAN_PMKID_LEN];
-} __packed;
-
-/* LMAC commands */
-int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
-int iwm_send_prio_table(struct iwm_priv *iwm);
-int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
-int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
-int iwm_send_calib_results(struct iwm_priv *iwm);
-int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
-int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
-
-/* UMAC commands */
-int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
- bool resp);
-int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp);
-int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value);
-int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
- void *payload, u16 payload_size);
-int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
-int iwm_send_mlme_profile(struct iwm_priv *iwm);
-int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
-int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
-int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
-int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
-int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
-int iwm_tx_power_trigger(struct iwm_priv *iwm);
-int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
-int iwm_send_umac_channel_list(struct iwm_priv *iwm);
-int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
- int ssid_num);
-int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
-int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
- struct iwm_umac_notif_stop_resume_tx *ntf);
-int iwm_send_pmkid_update(struct iwm_priv *iwm,
- struct cfg80211_pmksa *pmksa, u32 command);
-
-/* UDMA commands */
-int iwm_target_reset(struct iwm_priv *iwm);
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
deleted file mode 100644
index a0c13a49ab3c..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#ifndef __IWM_DEBUG_H__
-#define __IWM_DEBUG_H__
-
-#define IWM_ERR(p, f, a...) dev_err(iwm_to_dev(p), f, ## a)
-#define IWM_WARN(p, f, a...) dev_warn(iwm_to_dev(p), f, ## a)
-#define IWM_INFO(p, f, a...) dev_info(iwm_to_dev(p), f, ## a)
-#define IWM_CRIT(p, f, a...) dev_crit(iwm_to_dev(p), f, ## a)
-
-#ifdef CONFIG_IWM_DEBUG
-
-#define IWM_DEBUG_MODULE(i, level, module, f, a...) \
-do { \
- if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
- dev_printk(KERN_INFO, (iwm_to_dev(i)), \
- "%s " f, __func__ , ## a); \
-} while (0)
-
-#define IWM_HEXDUMP(i, level, module, pref, buf, len) \
-do { \
- if (unlikely(i->dbg.dbg_module[IWM_DM_##module] >= (IWM_DL_##level)))\
- print_hex_dump(KERN_INFO, pref, DUMP_PREFIX_OFFSET, \
- 16, 1, buf, len, 1); \
-} while (0)
-
-#else
-
-#define IWM_DEBUG_MODULE(i, level, module, f, a...)
-#define IWM_HEXDUMP(i, level, module, pref, buf, len)
-
-#endif /* CONFIG_IWM_DEBUG */
-
-/* Debug modules */
-enum iwm_debug_module_id {
- IWM_DM_BOOT = 0,
- IWM_DM_FW,
- IWM_DM_SDIO,
- IWM_DM_NTF,
- IWM_DM_RX,
- IWM_DM_TX,
- IWM_DM_MLME,
- IWM_DM_CMD,
- IWM_DM_WEXT,
- __IWM_DM_NR,
-};
-#define IWM_DM_DEFAULT 0
-
-#define IWM_DBG_BOOT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, BOOT, f, ## a)
-#define IWM_DBG_FW(i, l, f, a...) IWM_DEBUG_MODULE(i, l, FW, f, ## a)
-#define IWM_DBG_SDIO(i, l, f, a...) IWM_DEBUG_MODULE(i, l, SDIO, f, ## a)
-#define IWM_DBG_NTF(i, l, f, a...) IWM_DEBUG_MODULE(i, l, NTF, f, ## a)
-#define IWM_DBG_RX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, RX, f, ## a)
-#define IWM_DBG_TX(i, l, f, a...) IWM_DEBUG_MODULE(i, l, TX, f, ## a)
-#define IWM_DBG_MLME(i, l, f, a...) IWM_DEBUG_MODULE(i, l, MLME, f, ## a)
-#define IWM_DBG_CMD(i, l, f, a...) IWM_DEBUG_MODULE(i, l, CMD, f, ## a)
-#define IWM_DBG_WEXT(i, l, f, a...) IWM_DEBUG_MODULE(i, l, WEXT, f, ## a)
-
-/* Debug levels */
-enum iwm_debug_level {
- IWM_DL_NONE = 0,
- IWM_DL_ERR,
- IWM_DL_WARN,
- IWM_DL_INFO,
- IWM_DL_DBG,
-};
-#define IWM_DL_DEFAULT IWM_DL_ERR
-
-struct iwm_debugfs {
- struct iwm_priv *iwm;
- struct dentry *rootdir;
- struct dentry *devdir;
- struct dentry *dbgdir;
- struct dentry *txdir;
- struct dentry *rxdir;
- struct dentry *busdir;
-
- u32 dbg_level;
- struct dentry *dbg_level_dentry;
-
- unsigned long dbg_modules;
- struct dentry *dbg_modules_dentry;
-
- u8 dbg_module[__IWM_DM_NR];
- struct dentry *dbg_module_dentries[__IWM_DM_NR];
-
- struct dentry *txq_dentry;
- struct dentry *tx_credit_dentry;
- struct dentry *rx_ticket_dentry;
-
- struct dentry *fw_err_dentry;
-};
-
-#ifdef CONFIG_IWM_DEBUG
-void iwm_debugfs_init(struct iwm_priv *iwm);
-void iwm_debugfs_exit(struct iwm_priv *iwm);
-#else
-static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
-static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
-#endif
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
deleted file mode 100644
index b6199d124bb9..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ /dev/null
@@ -1,488 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/debugfs.h>
-#include <linux/export.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "rx.h"
-#include "debug.h"
-
-static struct {
- u8 id;
- char *name;
-} iwm_debug_module[__IWM_DM_NR] = {
- {IWM_DM_BOOT, "boot"},
- {IWM_DM_FW, "fw"},
- {IWM_DM_SDIO, "sdio"},
- {IWM_DM_NTF, "ntf"},
- {IWM_DM_RX, "rx"},
- {IWM_DM_TX, "tx"},
- {IWM_DM_MLME, "mlme"},
- {IWM_DM_CMD, "cmd"},
- {IWM_DM_WEXT, "wext"},
-};
-
-#define add_dbg_module(dbg, name, id, initlevel) \
-do { \
- dbg.dbg_module[id] = (initlevel); \
- dbg.dbg_module_dentries[id] = \
- debugfs_create_x8(name, 0600, \
- dbg.dbgdir, \
- &(dbg.dbg_module[id])); \
-} while (0)
-
-static int iwm_debugfs_u32_read(void *data, u64 *val)
-{
- struct iwm_priv *iwm = data;
-
- *val = iwm->dbg.dbg_level;
- return 0;
-}
-
-static int iwm_debugfs_dbg_level_write(void *data, u64 val)
-{
- struct iwm_priv *iwm = data;
- int i;
-
- iwm->dbg.dbg_level = val;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- iwm->dbg.dbg_module[i] = val;
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_level,
- iwm_debugfs_u32_read, iwm_debugfs_dbg_level_write,
- "%llu\n");
-
-static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
-{
- struct iwm_priv *iwm = data;
- int i, bit;
-
- iwm->dbg.dbg_modules = val;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- iwm->dbg.dbg_module[i] = 0;
-
- for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
- iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
-
- return 0;
-}
-DEFINE_SIMPLE_ATTRIBUTE(fops_iwm_dbg_modules,
- iwm_debugfs_u32_read, iwm_debugfs_dbg_modules_write,
- "%llu\n");
-
-
-static ssize_t iwm_debugfs_txq_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- char *buf;
- int i, buf_len = 4096;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- struct iwm_tx_queue *txq = &iwm->txq[i];
- struct sk_buff *skb;
- int j;
- unsigned long flags;
-
- spin_lock_irqsave(&txq->queue.lock, flags);
-
- skb = (struct sk_buff *)&txq->queue;
-
- len += snprintf(buf + len, buf_len - len, "TXQ #%d\n", i);
- len += snprintf(buf + len, buf_len - len, "\tStopped: %d\n",
- __netif_subqueue_stopped(iwm_to_ndev(iwm),
- txq->id));
- len += snprintf(buf + len, buf_len - len, "\tConcat count:%d\n",
- txq->concat_count);
- len += snprintf(buf + len, buf_len - len, "\tQueue len: %d\n",
- skb_queue_len(&txq->queue));
- for (j = 0; j < skb_queue_len(&txq->queue); j++) {
- struct iwm_tx_info *tx_info;
-
- skb = skb->next;
- tx_info = skb_to_tx_info(skb);
-
- len += snprintf(buf + len, buf_len - len,
- "\tSKB #%d\n", j);
- len += snprintf(buf + len, buf_len - len,
- "\t\tsta: %d\n", tx_info->sta);
- len += snprintf(buf + len, buf_len - len,
- "\t\tcolor: %d\n", tx_info->color);
- len += snprintf(buf + len, buf_len - len,
- "\t\ttid: %d\n", tx_info->tid);
- }
-
- spin_unlock_irqrestore(&txq->queue.lock, flags);
-
- spin_lock_irqsave(&txq->stopped_queue.lock, flags);
-
- len += snprintf(buf + len, buf_len - len,
- "\tStopped Queue len: %d\n",
- skb_queue_len(&txq->stopped_queue));
- for (j = 0; j < skb_queue_len(&txq->stopped_queue); j++) {
- struct iwm_tx_info *tx_info;
-
- skb = skb->next;
- tx_info = skb_to_tx_info(skb);
-
- len += snprintf(buf + len, buf_len - len,
- "\tSKB #%d\n", j);
- len += snprintf(buf + len, buf_len - len,
- "\t\tsta: %d\n", tx_info->sta);
- len += snprintf(buf + len, buf_len - len,
- "\t\tcolor: %d\n", tx_info->color);
- len += snprintf(buf + len, buf_len - len,
- "\t\ttid: %d\n", tx_info->tid);
- }
-
- spin_unlock_irqrestore(&txq->stopped_queue.lock, flags);
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_tx_credit_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_tx_credit *credit = &iwm->tx_credit;
- char *buf;
- int i, buf_len = 4096;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += snprintf(buf + len, buf_len - len,
- "NR pools: %d\n", credit->pool_nr);
- len += snprintf(buf + len, buf_len - len,
- "pools map: 0x%lx\n", credit->full_pools_map);
-
- len += snprintf(buf + len, buf_len - len, "\n### POOLS ###\n");
- for (i = 0; i < IWM_MACS_OUT_GROUPS; i++) {
- len += snprintf(buf + len, buf_len - len,
- "pools entry #%d\n", i);
- len += snprintf(buf + len, buf_len - len,
- "\tid: %d\n",
- credit->pools[i].id);
- len += snprintf(buf + len, buf_len - len,
- "\tsid: %d\n",
- credit->pools[i].sid);
- len += snprintf(buf + len, buf_len - len,
- "\tmin_pages: %d\n",
- credit->pools[i].min_pages);
- len += snprintf(buf + len, buf_len - len,
- "\tmax_pages: %d\n",
- credit->pools[i].max_pages);
- len += snprintf(buf + len, buf_len - len,
- "\talloc_pages: %d\n",
- credit->pools[i].alloc_pages);
- len += snprintf(buf + len, buf_len - len,
- "\tfreed_pages: %d\n",
- credit->pools[i].total_freed_pages);
- }
-
- len += snprintf(buf + len, buf_len - len, "\n### SPOOLS ###\n");
- for (i = 0; i < IWM_MACS_OUT_SGROUPS; i++) {
- len += snprintf(buf + len, buf_len - len,
- "spools entry #%d\n", i);
- len += snprintf(buf + len, buf_len - len,
- "\tid: %d\n",
- credit->spools[i].id);
- len += snprintf(buf + len, buf_len - len,
- "\tmax_pages: %d\n",
- credit->spools[i].max_pages);
- len += snprintf(buf + len, buf_len - len,
- "\talloc_pages: %d\n",
- credit->spools[i].alloc_pages);
-
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_rx_ticket_node *ticket;
- char *buf;
- int buf_len = 4096, i;
- size_t len = 0;
- ssize_t ret;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry(ticket, &iwm->rx_tickets, node) {
- len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
- ticket->ticket->id);
- len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
- ticket->ticket->action);
- len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
- ticket->ticket->flags);
- }
- spin_unlock(&iwm->ticket_lock);
-
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- struct iwm_rx_packet *packet;
- struct list_head *pkt_list = &iwm->rx_packets[i];
-
- if (!list_empty(pkt_list)) {
- len += snprintf(buf + len, buf_len - len,
- "Packet hash #%d\n", i);
- spin_lock(&iwm->packet_lock[i]);
- list_for_each_entry(packet, pkt_list, node) {
- len += snprintf(buf + len, buf_len - len,
- "\tPacket id: %d\n",
- packet->id);
- len += snprintf(buf + len, buf_len - len,
- "\tPacket length: %lu\n",
- packet->pkt_size);
- }
- spin_unlock(&iwm->packet_lock[i]);
- }
- }
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
- kfree(buf);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
- char __user *buffer,
- size_t count, loff_t *ppos)
-{
-
- struct iwm_priv *iwm = filp->private_data;
- char buf[512];
- int buf_len = 512;
- size_t len = 0;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- if (!iwm->last_fw_err)
- return -ENOMEM;
-
- if (iwm->last_fw_err->line_num == 0)
- goto out;
-
- len += snprintf(buf + len, buf_len - len, "%cMAC FW ERROR:\n",
- (le32_to_cpu(iwm->last_fw_err->category) == UMAC_SYS_ERR_CAT_LMAC)
- ? 'L' : 'U');
- len += snprintf(buf + len, buf_len - len,
- "\tCategory: %d\n",
- le32_to_cpu(iwm->last_fw_err->category));
-
- len += snprintf(buf + len, buf_len - len,
- "\tStatus: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tPC: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->pc));
-
- len += snprintf(buf + len, buf_len - len,
- "\tblink1: %d\n",
- le32_to_cpu(iwm->last_fw_err->blink1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tblink2: %d\n",
- le32_to_cpu(iwm->last_fw_err->blink2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tilink1: %d\n",
- le32_to_cpu(iwm->last_fw_err->ilink1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tilink2: %d\n",
- le32_to_cpu(iwm->last_fw_err->ilink2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tData1: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->data1));
-
- len += snprintf(buf + len, buf_len - len,
- "\tData2: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->data2));
-
- len += snprintf(buf + len, buf_len - len,
- "\tLine number: %d\n",
- le32_to_cpu(iwm->last_fw_err->line_num));
-
- len += snprintf(buf + len, buf_len - len,
- "\tUMAC status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->umac_status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tLMAC status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->lmac_status));
-
- len += snprintf(buf + len, buf_len - len,
- "\tSDIO status: 0x%x\n",
- le32_to_cpu(iwm->last_fw_err->sdio_status));
-
-out:
-
- return simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
-}
-
-static const struct file_operations iwm_debugfs_txq_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_txq_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_tx_credit_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_tx_credit_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_rx_ticket_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_rx_ticket_read,
- .llseek = default_llseek,
-};
-
-static const struct file_operations iwm_debugfs_fw_err_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_fw_err_read,
- .llseek = default_llseek,
-};
-
-void iwm_debugfs_init(struct iwm_priv *iwm)
-{
- int i;
-
- iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
- iwm->dbg.rootdir);
- iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
- iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
- iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
- iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
- if (iwm->bus_ops->debugfs_init)
- iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
-
- iwm->dbg.dbg_level = IWM_DL_NONE;
- iwm->dbg.dbg_level_dentry =
- debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
- &fops_iwm_dbg_level);
-
- iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
- iwm->dbg.dbg_modules_dentry =
- debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
- &fops_iwm_dbg_modules);
-
- for (i = 0; i < __IWM_DM_NR; i++)
- add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
- iwm_debug_module[i].id, IWM_DL_DEFAULT);
-
- iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
- iwm->dbg.txdir, iwm,
- &iwm_debugfs_txq_fops);
- iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
- iwm->dbg.txdir, iwm,
- &iwm_debugfs_tx_credit_fops);
- iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
- iwm->dbg.rxdir, iwm,
- &iwm_debugfs_rx_ticket_fops);
- iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
- iwm->dbg.dbgdir, iwm,
- &iwm_debugfs_fw_err_fops);
-}
-
-void iwm_debugfs_exit(struct iwm_priv *iwm)
-{
- int i;
-
- for (i = 0; i < __IWM_DM_NR; i++)
- debugfs_remove(iwm->dbg.dbg_module_dentries[i]);
-
- debugfs_remove(iwm->dbg.dbg_modules_dentry);
- debugfs_remove(iwm->dbg.dbg_level_dentry);
- debugfs_remove(iwm->dbg.txq_dentry);
- debugfs_remove(iwm->dbg.tx_credit_dentry);
- debugfs_remove(iwm->dbg.rx_ticket_dentry);
- debugfs_remove(iwm->dbg.fw_err_dentry);
- if (iwm->bus_ops->debugfs_exit)
- iwm->bus_ops->debugfs_exit(iwm);
-
- debugfs_remove(iwm->dbg.busdir);
- debugfs_remove(iwm->dbg.dbgdir);
- debugfs_remove(iwm->dbg.txdir);
- debugfs_remove(iwm->dbg.rxdir);
- debugfs_remove(iwm->dbg.devdir);
- debugfs_remove(iwm->dbg.rootdir);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.c b/drivers/net/wireless/iwmc3200wifi/eeprom.c
deleted file mode 100644
index e80e776b74f7..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "umac.h"
-#include "commands.h"
-#include "eeprom.h"
-
-static struct iwm_eeprom_entry eeprom_map[] = {
- [IWM_EEPROM_SIG] =
- {"Signature", IWM_EEPROM_SIG_OFF, IWM_EEPROM_SIG_LEN},
-
- [IWM_EEPROM_VERSION] =
- {"Version", IWM_EEPROM_VERSION_OFF, IWM_EEPROM_VERSION_LEN},
-
- [IWM_EEPROM_OEM_HW_VERSION] =
- {"OEM HW version", IWM_EEPROM_OEM_HW_VERSION_OFF,
- IWM_EEPROM_OEM_HW_VERSION_LEN},
-
- [IWM_EEPROM_MAC_VERSION] =
- {"MAC version", IWM_EEPROM_MAC_VERSION_OFF, IWM_EEPROM_MAC_VERSION_LEN},
-
- [IWM_EEPROM_CARD_ID] =
- {"Card ID", IWM_EEPROM_CARD_ID_OFF, IWM_EEPROM_CARD_ID_LEN},
-
- [IWM_EEPROM_RADIO_CONF] =
- {"Radio config", IWM_EEPROM_RADIO_CONF_OFF, IWM_EEPROM_RADIO_CONF_LEN},
-
- [IWM_EEPROM_SKU_CAP] =
- {"SKU capabilities", IWM_EEPROM_SKU_CAP_OFF, IWM_EEPROM_SKU_CAP_LEN},
-
- [IWM_EEPROM_FAT_CHANNELS_CAP] =
- {"HT channels capabilities", IWM_EEPROM_FAT_CHANNELS_CAP_OFF,
- IWM_EEPROM_FAT_CHANNELS_CAP_LEN},
-
- [IWM_EEPROM_CALIB_RXIQ_OFFSET] =
- {"RX IQ offset", IWM_EEPROM_CALIB_RXIQ_OFF, IWM_EEPROM_INDIRECT_LEN},
-
- [IWM_EEPROM_CALIB_RXIQ] =
- {"Calib RX IQ", 0, IWM_EEPROM_CALIB_RXIQ_LEN},
-};
-
-
-static int iwm_eeprom_read(struct iwm_priv *iwm, u8 eeprom_id)
-{
- int ret;
- u32 entry_size, chunk_size, data_offset = 0, addr_offset = 0;
- u32 addr;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_umac_cmd_eeprom_proxy eeprom_cmd;
-
- if (eeprom_id > (IWM_EEPROM_LAST - 1))
- return -EINVAL;
-
- entry_size = eeprom_map[eeprom_id].length;
-
- if (eeprom_id >= IWM_EEPROM_INDIRECT_DATA) {
- /* indirect data */
- u32 off_id = eeprom_id - IWM_EEPROM_INDIRECT_DATA +
- IWM_EEPROM_INDIRECT_OFFSET;
-
- eeprom_map[eeprom_id].offset =
- *(u16 *)(iwm->eeprom + eeprom_map[off_id].offset) << 1;
- }
-
- addr = eeprom_map[eeprom_id].offset;
-
- udma_cmd.eop = 1;
- udma_cmd.credit_group = 0x4;
- udma_cmd.ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = UMAC_CMD_OPCODE_EEPROM_PROXY;
- umac_cmd.resp = 1;
-
- while (entry_size > 0) {
- chunk_size = min_t(u32, entry_size, IWM_MAX_EEPROM_DATA_LEN);
-
- eeprom_cmd.hdr.type =
- cpu_to_le32(IWM_UMAC_CMD_EEPROM_TYPE_READ);
- eeprom_cmd.hdr.offset = cpu_to_le32(addr + addr_offset);
- eeprom_cmd.hdr.len = cpu_to_le32(chunk_size);
-
- ret = iwm_hal_send_umac_cmd(iwm, &udma_cmd,
- &umac_cmd, &eeprom_cmd,
- sizeof(struct iwm_umac_cmd_eeprom_proxy));
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read eeprom\n");
- return ret;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_EEPROM_PROXY,
- IWM_SRC_UMAC, 2*HZ);
- if (ret < 0) {
- IWM_ERR(iwm, "Did not get any eeprom answer\n");
- return ret;
- }
-
- data_offset += chunk_size;
- addr_offset += chunk_size;
- entry_size -= chunk_size;
- }
-
- return 0;
-}
-
-u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id)
-{
- if (!iwm->eeprom)
- return ERR_PTR(-ENODEV);
-
- return iwm->eeprom + eeprom_map[eeprom_id].offset;
-}
-
-int iwm_eeprom_fat_channels(struct iwm_priv *iwm)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_supported_band *band;
- u16 *channels, i;
-
- channels = (u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_FAT_CHANNELS_CAP);
- if (IS_ERR(channels))
- return PTR_ERR(channels);
-
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- band->ht_cap.ht_supported = true;
-
- for (i = 0; i < IWM_EEPROM_FAT_CHANNELS_24; i++)
- if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
- band->ht_cap.ht_supported = false;
-
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- band->ht_cap.ht_supported = true;
- for (i = IWM_EEPROM_FAT_CHANNELS_24; i < IWM_EEPROM_FAT_CHANNELS; i++)
- if (!(channels[i] & IWM_EEPROM_FAT_CHANNEL_ENABLED))
- band->ht_cap.ht_supported = false;
-
- return 0;
-}
-
-u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm)
-{
- u16 sku_cap;
- u32 wireless_mode = 0;
-
- sku_cap = *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP));
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_24GHZ)
- wireless_mode |= WIRELESS_MODE_11G;
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_BAND_52GHZ)
- wireless_mode |= WIRELESS_MODE_11A;
-
- if (sku_cap & IWM_EEPROM_SKU_CAP_11N_ENABLE)
- wireless_mode |= WIRELESS_MODE_11N;
-
- return wireless_mode;
-}
-
-
-int iwm_eeprom_init(struct iwm_priv *iwm)
-{
- int i, ret = 0;
- char name[32];
-
- iwm->eeprom = kzalloc(IWM_EEPROM_LEN, GFP_KERNEL);
- if (!iwm->eeprom)
- return -ENOMEM;
-
- for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
- ret = iwm_eeprom_read(iwm, i);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read eeprom entry #%d: %s\n",
- i, eeprom_map[i].name);
- break;
- }
- }
-
- IWM_DBG_BOOT(iwm, DBG, "EEPROM dump:\n");
- for (i = IWM_EEPROM_FIRST; i < IWM_EEPROM_LAST; i++) {
- memset(name, 0, 32);
- sprintf(name, "%s: ", eeprom_map[i].name);
-
- IWM_HEXDUMP(iwm, DBG, BOOT, name,
- iwm->eeprom + eeprom_map[i].offset,
- eeprom_map[i].length);
- }
-
- return ret;
-}
-
-void iwm_eeprom_exit(struct iwm_priv *iwm)
-{
- kfree(iwm->eeprom);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/eeprom.h b/drivers/net/wireless/iwmc3200wifi/eeprom.h
deleted file mode 100644
index 4e3a3fdab0d3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/eeprom.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_EEPROM_H__
-#define __IWM_EEPROM_H__
-
-enum {
- IWM_EEPROM_SIG = 0,
- IWM_EEPROM_FIRST = IWM_EEPROM_SIG,
- IWM_EEPROM_VERSION,
- IWM_EEPROM_OEM_HW_VERSION,
- IWM_EEPROM_MAC_VERSION,
- IWM_EEPROM_CARD_ID,
- IWM_EEPROM_RADIO_CONF,
- IWM_EEPROM_SKU_CAP,
- IWM_EEPROM_FAT_CHANNELS_CAP,
-
- IWM_EEPROM_INDIRECT_OFFSET,
- IWM_EEPROM_CALIB_RXIQ_OFFSET = IWM_EEPROM_INDIRECT_OFFSET,
-
- IWM_EEPROM_INDIRECT_DATA,
- IWM_EEPROM_CALIB_RXIQ = IWM_EEPROM_INDIRECT_DATA,
-
- IWM_EEPROM_LAST,
-};
-
-#define IWM_EEPROM_SIG_OFF 0x00
-#define IWM_EEPROM_VERSION_OFF (0x54 << 1)
-#define IWM_EEPROM_OEM_HW_VERSION_OFF (0x56 << 1)
-#define IWM_EEPROM_MAC_VERSION_OFF (0x30 << 1)
-#define IWM_EEPROM_CARD_ID_OFF (0x5d << 1)
-#define IWM_EEPROM_RADIO_CONF_OFF (0x58 << 1)
-#define IWM_EEPROM_SKU_CAP_OFF (0x55 << 1)
-#define IWM_EEPROM_CALIB_CONFIG_OFF (0x7c << 1)
-#define IWM_EEPROM_FAT_CHANNELS_CAP_OFF (0xde << 1)
-
-#define IWM_EEPROM_SIG_LEN 4
-#define IWM_EEPROM_VERSION_LEN 2
-#define IWM_EEPROM_OEM_HW_VERSION_LEN 2
-#define IWM_EEPROM_MAC_VERSION_LEN 1
-#define IWM_EEPROM_CARD_ID_LEN 2
-#define IWM_EEPROM_RADIO_CONF_LEN 2
-#define IWM_EEPROM_SKU_CAP_LEN 2
-#define IWM_EEPROM_FAT_CHANNELS_CAP_LEN 40
-#define IWM_EEPROM_INDIRECT_LEN 2
-
-#define IWM_MAX_EEPROM_DATA_LEN 240
-#define IWM_EEPROM_LEN 0x800
-
-#define IWM_EEPROM_MIN_ALLOWED_VERSION 0x0610
-#define IWM_EEPROM_MAX_ALLOWED_VERSION 0x0700
-#define IWM_EEPROM_CURRENT_VERSION 0x0612
-
-#define IWM_EEPROM_SKU_CAP_BAND_24GHZ (1 << 4)
-#define IWM_EEPROM_SKU_CAP_BAND_52GHZ (1 << 5)
-#define IWM_EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
-
-#define IWM_EEPROM_FAT_CHANNELS 20
-/* 2.4 gHz FAT primary channels: 1, 2, 3, 4, 5, 6, 7, 8, 9 */
-#define IWM_EEPROM_FAT_CHANNELS_24 9
-/* 5.2 gHz FAT primary channels: 36,44,52,60,100,108,116,124,132,149,157 */
-#define IWM_EEPROM_FAT_CHANNELS_52 11
-
-#define IWM_EEPROM_FAT_CHANNEL_ENABLED (1 << 0)
-
-enum {
- IWM_EEPROM_CALIB_CAL_HDR,
- IWM_EEPROM_CALIB_TX_POWER,
- IWM_EEPROM_CALIB_XTAL,
- IWM_EEPROM_CALIB_TEMPERATURE,
- IWM_EEPROM_CALIB_RX_BB_FILTER,
- IWM_EEPROM_CALIB_RX_IQ,
- IWM_EEPROM_CALIB_MAX,
-};
-
-#define IWM_EEPROM_CALIB_RXIQ_OFF (IWM_EEPROM_CALIB_CONFIG_OFF + \
- (IWM_EEPROM_CALIB_RX_IQ << 1))
-#define IWM_EEPROM_CALIB_RXIQ_LEN sizeof(struct iwm_lmac_calib_rxiq)
-
-struct iwm_eeprom_entry {
- char *name;
- u32 offset;
- u32 length;
-};
-
-int iwm_eeprom_init(struct iwm_priv *iwm);
-void iwm_eeprom_exit(struct iwm_priv *iwm);
-u8 *iwm_eeprom_access(struct iwm_priv *iwm, u8 eeprom_id);
-int iwm_eeprom_fat_channels(struct iwm_priv *iwm);
-u32 iwm_eeprom_wireless_mode(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
deleted file mode 100644
index 6f1afe6bbc8c..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/firmware.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "debug.h"
-#include "fw.h"
-#include "commands.h"
-
-static const char fw_barker[] = "*WESTOPFORNOONE*";
-
-/*
- * @op_code: Op code we're looking for.
- * @index: There can be several instances of the same opcode within
- * the firmware. Index specifies which one we're looking for.
- */
-static int iwm_fw_op_offset(struct iwm_priv *iwm, const struct firmware *fw,
- u16 op_code, u32 index)
-{
- int offset = -EINVAL, fw_offset;
- u32 op_index = 0;
- const u8 *fw_ptr;
- struct iwm_fw_hdr_rec *rec;
-
- fw_offset = 0;
- fw_ptr = fw->data;
-
- /* We first need to look for the firmware barker */
- if (memcmp(fw_ptr, fw_barker, IWM_HDR_BARKER_LEN)) {
- IWM_ERR(iwm, "No barker string in this FW\n");
- return -EINVAL;
- }
-
- if (fw->size < IWM_HDR_LEN) {
- IWM_ERR(iwm, "FW is too small (%zu)\n", fw->size);
- return -EINVAL;
- }
-
- fw_offset += IWM_HDR_BARKER_LEN;
-
- while (fw_offset < fw->size) {
- rec = (struct iwm_fw_hdr_rec *)(fw_ptr + fw_offset);
-
- IWM_DBG_FW(iwm, DBG, "FW: op_code: 0x%x, len: %d @ 0x%x\n",
- rec->op_code, rec->len, fw_offset);
-
- if (rec->op_code == IWM_HDR_REC_OP_INVALID) {
- IWM_DBG_FW(iwm, DBG, "Reached INVALID op code\n");
- break;
- }
-
- if (rec->op_code == op_code) {
- if (op_index == index) {
- fw_offset += sizeof(struct iwm_fw_hdr_rec);
- offset = fw_offset;
- goto out;
- }
- op_index++;
- }
-
- fw_offset += sizeof(struct iwm_fw_hdr_rec) + rec->len;
- }
-
- out:
- return offset;
-}
-
-static int iwm_load_firmware_chunk(struct iwm_priv *iwm,
- const struct firmware *fw,
- struct iwm_fw_img_desc *img_desc)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- u32 chunk_size;
- const u8 *chunk_ptr;
- int ret = 0;
-
- IWM_DBG_FW(iwm, INFO, "Loading FW chunk: %d bytes @ 0x%x\n",
- img_desc->length, img_desc->address);
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.handle_by_hw = 1;
- target_cmd.op2 = 0;
- target_cmd.resp = 0;
- target_cmd.eop = 1;
-
- chunk_size = img_desc->length;
- chunk_ptr = fw->data + img_desc->offset;
-
- while (chunk_size > 0) {
- u32 tmp_chunk_size;
-
- tmp_chunk_size = min_t(u32, chunk_size,
- IWM_MAX_NONWIFI_CMD_BUFF_SIZE);
-
- target_cmd.addr = cpu_to_le32(img_desc->address +
- (chunk_ptr - fw->data - img_desc->offset));
- target_cmd.op1_sz = cpu_to_le32(tmp_chunk_size);
-
- IWM_DBG_FW(iwm, DBG, "\t%d bytes @ 0x%x\n",
- tmp_chunk_size, target_cmd.addr);
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, chunk_ptr);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't load FW chunk\n");
- break;
- }
-
- chunk_size -= tmp_chunk_size;
- chunk_ptr += tmp_chunk_size;
- }
-
- return ret;
-}
-/*
- * To load a fw image to the target, we basically go through the
- * fw, looking for OP_MEM_DESC records. Once we found one, we
- * pass it to iwm_load_firmware_chunk().
- * The OP_MEM_DESC records contain the actuall memory chunk to be
- * sent, but also the destination address.
- */
-static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
-{
- const struct firmware *fw;
- struct iwm_fw_img_desc *img_desc;
- struct iwm_fw_img_ver *ver;
- int ret = 0, fw_offset;
- u32 opcode_idx = 0, build_date;
- char *build_tag;
-
- ret = request_firmware(&fw, img_name, iwm_to_dev(iwm));
- if (ret) {
- IWM_ERR(iwm, "Request firmware failed");
- return ret;
- }
-
- IWM_DBG_FW(iwm, INFO, "Start to load FW %s\n", img_name);
-
- while (1) {
- fw_offset = iwm_fw_op_offset(iwm, fw,
- IWM_HDR_REC_OP_MEM_DESC,
- opcode_idx);
- if (fw_offset < 0)
- break;
-
- img_desc = (struct iwm_fw_img_desc *)(fw->data + fw_offset);
- ret = iwm_load_firmware_chunk(iwm, fw, img_desc);
- if (ret < 0)
- goto err_release_fw;
- opcode_idx++;
- }
-
- /* Read firmware version */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_SW_VER, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- ver = (struct iwm_fw_img_ver *)(fw->data + fw_offset);
-
- /* Read build tag */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_TAG, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- build_tag = (char *)(fw->data + fw_offset);
-
- /* Read build date */
- fw_offset = iwm_fw_op_offset(iwm, fw, IWM_HDR_REC_OP_BUILD_DATE, 0);
- if (fw_offset < 0)
- goto err_release_fw;
-
- build_date = *(u32 *)(fw->data + fw_offset);
-
- IWM_INFO(iwm, "%s:\n", img_name);
- IWM_INFO(iwm, "\tVersion: %02X.%02X\n", ver->major, ver->minor);
- IWM_INFO(iwm, "\tBuild tag: %s\n", build_tag);
- IWM_INFO(iwm, "\tBuild date: %x-%x-%x\n",
- IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
- IWM_BUILD_DAY(build_date));
-
- if (!strcmp(img_name, iwm->bus_ops->umac_name))
- sprintf(iwm->umac_version, "%02X.%02X",
- ver->major, ver->minor);
-
- if (!strcmp(img_name, iwm->bus_ops->lmac_name))
- sprintf(iwm->lmac_version, "%02X.%02X",
- ver->major, ver->minor);
-
- err_release_fw:
- release_firmware(fw);
-
- return ret;
-}
-
-static int iwm_load_umac(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- int ret;
-
- ret = iwm_load_img(iwm, iwm->bus_ops->umac_name);
- if (ret < 0)
- return ret;
-
- /* We've loaded the UMAC, we can tell the target to jump there */
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_JUMP;
- target_cmd.addr = cpu_to_le32(UMAC_MU_FW_INST_DATA_12_ADDR);
- target_cmd.op1_sz = 0;
- target_cmd.op2 = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.resp = 1 ;
- target_cmd.eop = 1;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, NULL);
- if (ret < 0)
- IWM_ERR(iwm, "Couldn't send JMP command\n");
-
- return ret;
-}
-
-static int iwm_load_lmac(struct iwm_priv *iwm, const char *img_name)
-{
- int ret;
-
- ret = iwm_load_img(iwm, img_name);
- if (ret < 0)
- return ret;
-
- return iwm_send_umac_reset(iwm,
- cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_CLK_EN), 0);
-}
-
-static int iwm_init_calib(struct iwm_priv *iwm, unsigned long cfg_bitmap,
- unsigned long expected_bitmap, u8 rx_iq_cmd)
-{
- /* Read RX IQ calibration result from EEPROM */
- if (test_bit(rx_iq_cmd, &cfg_bitmap)) {
- iwm_store_rxiq_calib_result(iwm);
- set_bit(PHY_CALIBRATE_RX_IQ_CMD, &iwm->calib_done_map);
- }
-
- iwm_send_prio_table(iwm);
- iwm_send_init_calib_cfg(iwm, cfg_bitmap);
-
- while (iwm->calib_done_map != expected_bitmap) {
- if (iwm_notif_handle(iwm, CALIBRATION_RES_NOTIFICATION,
- IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT)) {
- IWM_DBG_FW(iwm, DBG, "Initial calibration timeout\n");
- return -ETIMEDOUT;
- }
-
- IWM_DBG_FW(iwm, DBG, "Got calibration result. calib_done_map: "
- "0x%lx, expected calibrations: 0x%lx\n",
- iwm->calib_done_map, expected_bitmap);
- }
-
- return 0;
-}
-
-/*
- * We currently have to load 3 FWs:
- * 1) The UMAC (Upper MAC).
- * 2) The calibration LMAC (Lower MAC).
- * We then send the calibration init command, so that the device can
- * run a first calibration round.
- * 3) The operational LMAC, which replaces the calibration one when it's
- * done with the first calibration round.
- *
- * Once those 3 FWs have been loaded, we send the periodic calibration
- * command, and then the device is available for regular 802.11 operations.
- */
-int iwm_load_fw(struct iwm_priv *iwm)
-{
- unsigned long init_calib_map, periodic_calib_map;
- unsigned long expected_calib_map;
- int ret;
-
- /* We first start downloading the UMAC */
- ret = iwm_load_umac(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "UMAC loading failed\n");
- return ret;
- }
-
- /* Handle UMAC_ALIVE notification */
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_ALIVE, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle UMAC_ALIVE failed: %d\n", ret);
- return ret;
- }
-
- /* UMAC is alive, we can download the calibration LMAC */
- ret = iwm_load_lmac(iwm, iwm->bus_ops->calib_lmac_name);
- if (ret) {
- IWM_ERR(iwm, "Calibration LMAC loading failed\n");
- return ret;
- }
-
- /* Handle UMAC_INIT_COMPLETE notification */
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle INIT_COMPLETE failed for calibration "
- "LMAC: %d\n", ret);
- return ret;
- }
-
- /* Read EEPROM data */
- ret = iwm_eeprom_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't init eeprom array\n");
- return ret;
- }
-
- init_calib_map = iwm->conf.calib_map & IWM_CALIB_MAP_INIT_MSK;
- expected_calib_map = iwm->conf.expected_calib_map &
- IWM_CALIB_MAP_INIT_MSK;
- periodic_calib_map = IWM_CALIB_MAP_PER_LMAC(iwm->conf.calib_map);
-
- ret = iwm_init_calib(iwm, init_calib_map, expected_calib_map,
- CALIB_CFG_RX_IQ_IDX);
- if (ret < 0) {
- /* Let's try the old way */
- ret = iwm_init_calib(iwm, expected_calib_map,
- expected_calib_map,
- PHY_CALIBRATE_RX_IQ_CMD);
- if (ret < 0) {
- IWM_ERR(iwm, "Calibration result timeout\n");
- goto out;
- }
- }
-
- /* Handle LMAC CALIBRATION_COMPLETE notification */
- ret = iwm_notif_handle(iwm, CALIBRATION_COMPLETE_NOTIFICATION,
- IWM_SRC_LMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for CALIBRATION_COMPLETE timeout\n");
- goto out;
- }
-
- IWM_INFO(iwm, "LMAC calibration done: 0x%lx\n", iwm->calib_done_map);
-
- iwm_send_umac_reset(iwm, cpu_to_le32(UMAC_RST_CTRL_FLG_LARC_RESET), 1);
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_RESET, IWM_SRC_UMAC,
- WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Wait for UMAC RESET timeout\n");
- goto out;
- }
-
- /* Download the operational LMAC */
- ret = iwm_load_lmac(iwm, iwm->bus_ops->lmac_name);
- if (ret) {
- IWM_ERR(iwm, "LMAC loading failed\n");
- goto out;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_INIT_COMPLETE,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Handle INIT_COMPLETE failed for LMAC: %d\n", ret);
- goto out;
- }
-
- iwm_send_prio_table(iwm);
- iwm_send_calib_results(iwm);
- iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
- iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
- iwm->conf.ct_kill_exit);
-
- return 0;
-
- out:
- iwm_eeprom_exit(iwm);
- return ret;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.h b/drivers/net/wireless/iwmc3200wifi/fw.h
deleted file mode 100644
index c70a3b40dad3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/fw.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_FW_H__
-#define __IWM_FW_H__
-
-/**
- * struct iwm_fw_hdr_rec - An iwm firmware image is a
- * concatenation of various records. Each of them is
- * defined by an ID (aka op code), a length, and the
- * actual data.
- * @op_code: The record ID, see IWM_HDR_REC_OP_*
- *
- * @len: The record payload length
- *
- * @buf: The record payload
- */
-struct iwm_fw_hdr_rec {
- u16 op_code;
- u16 len;
- u8 buf[0];
-};
-
-/* Header's definitions */
-#define IWM_HDR_LEN (512)
-#define IWM_HDR_BARKER_LEN (16)
-
-/* Header's opcodes */
-#define IWM_HDR_REC_OP_INVALID (0x00)
-#define IWM_HDR_REC_OP_BUILD_DATE (0x01)
-#define IWM_HDR_REC_OP_BUILD_TAG (0x02)
-#define IWM_HDR_REC_OP_SW_VER (0x03)
-#define IWM_HDR_REC_OP_HW_SKU (0x04)
-#define IWM_HDR_REC_OP_BUILD_OPT (0x05)
-#define IWM_HDR_REC_OP_MEM_DESC (0x06)
-#define IWM_HDR_REC_USERDEFS (0x07)
-
-/* Header's records length (in bytes) */
-#define IWM_HDR_REC_LEN_BUILD_DATE (4)
-#define IWM_HDR_REC_LEN_BUILD_TAG (64)
-#define IWM_HDR_REC_LEN_SW_VER (4)
-#define IWM_HDR_REC_LEN_HW_SKU (4)
-#define IWM_HDR_REC_LEN_BUILD_OPT (4)
-#define IWM_HDR_REC_LEN_MEM_DESC (12)
-#define IWM_HDR_REC_LEN_USERDEF (64)
-
-#define IWM_BUILD_YEAR(date) ((date >> 16) & 0xffff)
-#define IWM_BUILD_MONTH(date) ((date >> 8) & 0xff)
-#define IWM_BUILD_DAY(date) (date & 0xff)
-
-struct iwm_fw_img_desc {
- u32 offset;
- u32 address;
- u32 length;
-};
-
-struct iwm_fw_img_ver {
- u8 minor;
- u8 major;
- u16 reserved;
-};
-
-int iwm_load_fw(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
deleted file mode 100644
index 1cabcb39643f..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * Hardware Abstraction Layer for iwm.
- *
- * This file mostly defines an abstraction API for
- * sending various commands to the target.
- *
- * We have 2 types of commands: wifi and non-wifi ones.
- *
- * - wifi commands:
- * They are used for sending LMAC and UMAC commands,
- * and thus are the most commonly used ones.
- * There are 2 different wifi command types, the regular
- * one and the LMAC one. The former is used to send
- * UMAC commands (see UMAC_CMD_OPCODE_* from umac.h)
- * while the latter is used for sending commands to the
- * LMAC. If you look at LMAC commands you'll se that they
- * are actually regular iwlwifi target commands encapsulated
- * into a special UMAC command called UMAC passthrough.
- * This is due to the fact the host talks exclusively
- * to the UMAC and so there needs to be a special UMAC
- * command for talking to the LMAC.
- * This is how a wifi command is laid out:
- * ------------------------
- * | iwm_udma_out_wifi_hdr |
- * ------------------------
- * | SW meta_data (32 bits) |
- * ------------------------
- * | iwm_dev_cmd_hdr |
- * ------------------------
- * | payload |
- * | .... |
- *
- * - non-wifi, or general commands:
- * Those commands are handled by the device's bootrom,
- * and are typically sent when the UMAC and the LMAC
- * are not yet available.
- * * This is how a non-wifi command is laid out:
- * ---------------------------
- * | iwm_udma_out_nonwifi_hdr |
- * ---------------------------
- * | payload |
- * | .... |
-
- *
- * All the commands start with a UDMA header, which is
- * basically a 32 bits field. The 4 LSB there define
- * an opcode that allows the target to differentiate
- * between wifi (opcode is 0xf) and non-wifi commands
- * (opcode is [0..0xe]).
- *
- * When a command (wifi or non-wifi) is supposed to receive
- * an answer, we queue the command buffer. When we do receive
- * a command response from the UMAC, we go through the list
- * of pending command, and pass both the command and the answer
- * to the rx handler. Each command is sent with a unique
- * sequence id, and the answer is sent with the same one. This
- * is how we're supposed to match an answer with its command.
- * See rx.c:iwm_rx_handle_[non]wifi() and iwm_get_pending_[non]wifi()
- * for the implementation details.
- */
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "bus.h"
-#include "hal.h"
-#include "umac.h"
-#include "debug.h"
-#include "trace.h"
-
-static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
- struct iwm_nonwifi_cmd *cmd,
- struct iwm_udma_nonwifi_cmd *udma_cmd)
-{
- INIT_LIST_HEAD(&cmd->pending);
-
- spin_lock(&iwm->cmd_lock);
-
- cmd->resp_received = 0;
-
- cmd->seq_num = iwm->nonwifi_seq_num;
- udma_cmd->seq_num = cpu_to_le16(cmd->seq_num);
-
- iwm->nonwifi_seq_num++;
- iwm->nonwifi_seq_num %= UMAC_NONWIFI_SEQ_NUM_MAX;
-
- if (udma_cmd->resp)
- list_add_tail(&cmd->pending, &iwm->nonwifi_pending_cmd);
-
- spin_unlock(&iwm->cmd_lock);
-
- cmd->buf.start = cmd->buf.payload;
- cmd->buf.len = 0;
-
- memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
-
- return cmd->seq_num;
-}
-
-u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm)
-{
- u16 seq_num = iwm->wifi_seq_num;
-
- iwm->wifi_seq_num++;
- iwm->wifi_seq_num %= UMAC_WIFI_SEQ_NUM_MAX;
-
- return seq_num;
-}
-
-static void iwm_wifi_cmd_init(struct iwm_priv *iwm,
- struct iwm_wifi_cmd *cmd,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- u16 payload_size)
-{
- INIT_LIST_HEAD(&cmd->pending);
-
- spin_lock(&iwm->cmd_lock);
-
- cmd->seq_num = iwm_alloc_wifi_cmd_seq(iwm);
- umac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
-
- if (umac_cmd->resp)
- list_add_tail(&cmd->pending, &iwm->wifi_pending_cmd);
-
- spin_unlock(&iwm->cmd_lock);
-
- cmd->buf.start = cmd->buf.payload;
- cmd->buf.len = 0;
-
- if (lmac_cmd) {
- cmd->buf.start -= sizeof(struct iwm_lmac_hdr);
-
- lmac_cmd->seq_num = cpu_to_le16(cmd->seq_num);
- lmac_cmd->count = cpu_to_le16(payload_size);
-
- memcpy(&cmd->lmac_cmd, lmac_cmd, sizeof(*lmac_cmd));
-
- umac_cmd->count = cpu_to_le16(sizeof(struct iwm_lmac_hdr));
- } else
- umac_cmd->count = 0;
-
- umac_cmd->count = cpu_to_le16(payload_size +
- le16_to_cpu(umac_cmd->count));
- udma_cmd->count = cpu_to_le16(sizeof(struct iwm_umac_fw_cmd_hdr) +
- le16_to_cpu(umac_cmd->count));
-
- memcpy(&cmd->udma_cmd, udma_cmd, sizeof(*udma_cmd));
- memcpy(&cmd->umac_cmd, umac_cmd, sizeof(*umac_cmd));
-}
-
-void iwm_cmd_flush(struct iwm_priv *iwm)
-{
- struct iwm_wifi_cmd *wcmd, *wnext;
- struct iwm_nonwifi_cmd *nwcmd, *nwnext;
-
- list_for_each_entry_safe(wcmd, wnext, &iwm->wifi_pending_cmd, pending) {
- list_del(&wcmd->pending);
- kfree(wcmd);
- }
-
- list_for_each_entry_safe(nwcmd, nwnext, &iwm->nonwifi_pending_cmd,
- pending) {
- list_del(&nwcmd->pending);
- kfree(nwcmd);
- }
-}
-
-struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
-{
- struct iwm_wifi_cmd *cmd;
-
- list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
- if (cmd->seq_num == seq_num) {
- list_del(&cmd->pending);
- return cmd;
- }
-
- return NULL;
-}
-
-struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
- u8 seq_num, u8 cmd_opcode)
-{
- struct iwm_nonwifi_cmd *cmd;
-
- list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
- if ((cmd->seq_num == seq_num) &&
- (cmd->udma_cmd.opcode == cmd_opcode) &&
- (cmd->resp_received)) {
- list_del(&cmd->pending);
- return cmd;
- }
-
- return NULL;
-}
-
-static void iwm_build_udma_nonwifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_nonwifi_hdr *hdr,
- struct iwm_udma_nonwifi_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, cmd->opcode);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP, cmd->resp);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, 1);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW,
- cmd->handle_by_hw);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
- SET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM,
- le16_to_cpu(cmd->seq_num));
-
- hdr->addr = cmd->addr;
- hdr->op1_sz = cmd->op1_sz;
- hdr->op2 = cmd->op2;
-}
-
-static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
- struct iwm_nonwifi_cmd *cmd)
-{
- struct iwm_udma_out_nonwifi_hdr *udma_hdr;
- struct iwm_nonwifi_cmd_buff *buf;
- struct iwm_udma_nonwifi_cmd *udma_cmd = &cmd->udma_cmd;
-
- buf = &cmd->buf;
-
- buf->start -= sizeof(struct iwm_umac_nonwifi_out_hdr);
- buf->len += sizeof(struct iwm_umac_nonwifi_out_hdr);
-
- udma_hdr = (struct iwm_udma_out_nonwifi_hdr *)(buf->start);
-
- iwm_build_udma_nonwifi_hdr(iwm, udma_hdr, udma_cmd);
-
- IWM_DBG_CMD(iwm, DBG,
- "Send UDMA nonwifi cmd: opcode = 0x%x, resp = 0x%x, "
- "hw = 0x%x, seqnum = %d, addr = 0x%x, op1_sz = 0x%x, "
- "op2 = 0x%x\n", udma_cmd->opcode, udma_cmd->resp,
- udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
- udma_cmd->op1_sz, udma_cmd->op2);
-
- trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
- return iwm_bus_send_chunk(iwm, buf->start, buf->len);
-}
-
-void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop)
-{
- struct iwm_udma_out_wifi_hdr *hdr = (struct iwm_udma_out_wifi_hdr *)buf;
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, eop);
-}
-
-void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_wifi_hdr *hdr,
- struct iwm_udma_wifi_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE, UMAC_HDI_OUT_OPCODE_WIFI);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT, cmd->eop);
- SET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_SIGNATURE, UMAC_HDI_OUT_SIGNATURE);
-
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_BYTE_COUNT,
- le16_to_cpu(cmd->count));
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_CREDIT_GRP, cmd->credit_group);
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_RATID, cmd->ra_tid);
- SET_VAL32(hdr->meta_data, UMAC_HDI_OUT_LMAC_OFFSET, cmd->lmac_offset);
-}
-
-void iwm_build_umac_hdr(struct iwm_priv *iwm,
- struct iwm_umac_fw_cmd_hdr *hdr,
- struct iwm_umac_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- SET_VAL32(hdr->meta_data, UMAC_FW_CMD_BYTE_COUNT,
- le16_to_cpu(cmd->count));
- SET_VAL32(hdr->meta_data, UMAC_FW_CMD_TX_STA_COLOR, cmd->color);
- SET_VAL8(hdr->cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ, cmd->resp);
-
- hdr->cmd.cmd = cmd->id;
- hdr->cmd.seq_num = cmd->seq_num;
-}
-
-static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_out_hdr *umac_hdr;
- struct iwm_wifi_cmd_buff *buf;
- struct iwm_udma_wifi_cmd *udma_cmd = &cmd->udma_cmd;
- struct iwm_umac_cmd *umac_cmd = &cmd->umac_cmd;
- int ret;
-
- buf = &cmd->buf;
-
- buf->start -= sizeof(struct iwm_umac_wifi_out_hdr);
- buf->len += sizeof(struct iwm_umac_wifi_out_hdr);
-
- umac_hdr = (struct iwm_umac_wifi_out_hdr *)(buf->start);
-
- iwm_build_udma_wifi_hdr(iwm, &umac_hdr->hw_hdr, udma_cmd);
- iwm_build_umac_hdr(iwm, &umac_hdr->sw_hdr, umac_cmd);
-
- IWM_DBG_CMD(iwm, DBG,
- "Send UDMA wifi cmd: opcode = 0x%x, UMAC opcode = 0x%x, "
- "eop = 0x%x, count = 0x%x, credit_group = 0x%x, "
- "ra_tid = 0x%x, lmac_offset = 0x%x, seqnum = %d\n",
- UMAC_HDI_OUT_OPCODE_WIFI, umac_cmd->id,
- udma_cmd->eop, udma_cmd->count, udma_cmd->credit_group,
- udma_cmd->ra_tid, udma_cmd->lmac_offset, cmd->seq_num);
-
- if (umac_cmd->id == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH)
- IWM_DBG_CMD(iwm, DBG, "\tLMAC opcode: 0x%x\n",
- cmd->lmac_cmd.id);
-
- ret = iwm_tx_credit_alloc(iwm, udma_cmd->credit_group, buf->len);
-
- /* We keep sending UMAC reset regardless of the command credits.
- * The UMAC is supposed to be reset anyway and the Tx credits are
- * reinitialized afterwards. If we are lucky, the reset could
- * still be done even though we have run out of credits for the
- * command pool at this moment.*/
- if (ret && (umac_cmd->id != UMAC_CMD_OPCODE_RESET)) {
- IWM_DBG_TX(iwm, DBG, "Failed to alloc tx credit for cmd %d\n",
- umac_cmd->id);
- return ret;
- }
-
- trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
- return iwm_bus_send_chunk(iwm, buf->start, buf->len);
-}
-
-/* target_cmd a.k.a udma_nonwifi_cmd can be sent when UMAC is not available */
-int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
- struct iwm_udma_nonwifi_cmd *udma_cmd,
- const void *payload)
-{
- struct iwm_nonwifi_cmd *cmd;
- int ret, seq_num;
-
- cmd = kzalloc(sizeof(struct iwm_nonwifi_cmd), GFP_KERNEL);
- if (!cmd) {
- IWM_ERR(iwm, "Couldn't alloc memory for hal cmd\n");
- return -ENOMEM;
- }
-
- seq_num = iwm_nonwifi_cmd_init(iwm, cmd, udma_cmd);
-
- if (cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE ||
- cmd->udma_cmd.opcode == UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT) {
- cmd->buf.len = le32_to_cpu(cmd->udma_cmd.op1_sz);
- memcpy(&cmd->buf.payload, payload, cmd->buf.len);
- }
-
- ret = iwm_send_udma_nonwifi_cmd(iwm, cmd);
-
- if (!udma_cmd->resp)
- kfree(cmd);
-
- if (ret < 0)
- return ret;
-
- return seq_num;
-}
-
-static void iwm_build_lmac_hdr(struct iwm_priv *iwm, struct iwm_lmac_hdr *hdr,
- struct iwm_lmac_cmd *cmd)
-{
- memset(hdr, 0, sizeof(*hdr));
-
- hdr->id = cmd->id;
- hdr->flags = 0; /* Is this ever used? */
- hdr->seq_num = cmd->seq_num;
-}
-
-/*
- * iwm_hal_send_host_cmd(): sends commands to the UMAC or the LMAC.
- * Sending command to the LMAC is equivalent to sending a
- * regular UMAC command with the LMAC passthrough or the LMAC
- * wrapper UMAC command IDs.
- */
-int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- const void *payload, u16 payload_size)
-{
- struct iwm_wifi_cmd *cmd;
- struct iwm_lmac_hdr *hdr;
- int lmac_hdr_len = 0;
- int ret;
-
- cmd = kzalloc(sizeof(struct iwm_wifi_cmd), GFP_KERNEL);
- if (!cmd) {
- IWM_ERR(iwm, "Couldn't alloc memory for wifi hal cmd\n");
- return -ENOMEM;
- }
-
- iwm_wifi_cmd_init(iwm, cmd, udma_cmd, umac_cmd, lmac_cmd, payload_size);
-
- if (lmac_cmd) {
- hdr = (struct iwm_lmac_hdr *)(cmd->buf.start);
-
- iwm_build_lmac_hdr(iwm, hdr, &cmd->lmac_cmd);
- lmac_hdr_len = sizeof(struct iwm_lmac_hdr);
- }
-
- memcpy(cmd->buf.payload, payload, payload_size);
- cmd->buf.len = le16_to_cpu(umac_cmd->count);
-
- ret = iwm_send_udma_wifi_cmd(iwm, cmd);
-
- /* We free the cmd if we're not expecting any response */
- if (!umac_cmd->resp)
- kfree(cmd);
- return ret;
-}
-
-/*
- * iwm_hal_send_umac_cmd(): This is a special case for
- * iwm_hal_send_host_cmd() to send direct UMAC cmd (without
- * LMAC involved).
- */
-int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- const void *payload, u16 payload_size)
-{
- return iwm_hal_send_host_cmd(iwm, udma_cmd, umac_cmd, NULL,
- payload, payload_size);
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
deleted file mode 100644
index c20936d9b6b7..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef _IWM_HAL_H_
-#define _IWM_HAL_H_
-
-#include "umac.h"
-
-#define GET_VAL8(s, name) ((s >> name##_POS) & name##_SEED)
-#define GET_VAL16(s, name) ((le16_to_cpu(s) >> name##_POS) & name##_SEED)
-#define GET_VAL32(s, name) ((le32_to_cpu(s) >> name##_POS) & name##_SEED)
-
-#define SET_VAL8(s, name, val) \
-do { \
- s = (s & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS); \
-} while (0)
-
-#define SET_VAL16(s, name, val) \
-do { \
- s = cpu_to_le16((le16_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS)); \
-} while (0)
-
-#define SET_VAL32(s, name, val) \
-do { \
- s = cpu_to_le32((le32_to_cpu(s) & ~(name##_SEED << name##_POS)) | \
- ((val & name##_SEED) << name##_POS)); \
-} while (0)
-
-
-#define UDMA_UMAC_INIT { .eop = 1, \
- .credit_group = 0x4, \
- .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
- .lmac_offset = 0 }
-#define UDMA_LMAC_INIT { .eop = 1, \
- .credit_group = 0x4, \
- .ra_tid = UMAC_HDI_ACT_TBL_IDX_HOST_CMD, \
- .lmac_offset = 4 }
-
-
-/* UDMA IN OP CODE -- cmd bits [3:0] */
-#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
-#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
-
-#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
-#define UDMA_IN_OPCODE_READ_RESP 0x1
-#define UDMA_IN_OPCODE_WRITE_RESP 0x2
-#define UDMA_IN_OPCODE_PERS_WRITE_RESP 0x5
-#define UDMA_IN_OPCODE_PERS_READ_RESP 0x6
-#define UDMA_IN_OPCODE_RD_MDFY_WR_RESP 0x7
-#define UDMA_IN_OPCODE_EP_MNGMT_MSG 0x8
-#define UDMA_IN_OPCODE_CRDT_CHNG_MSG 0x9
-#define UDMA_IN_OPCODE_CNTRL_DATABASE_MSG 0xA
-#define UDMA_IN_OPCODE_SW_MSG 0xB
-#define UDMA_IN_OPCODE_WIFI 0xF
-#define UDMA_IN_OPCODE_WIFI_LMAC 0x1F
-#define UDMA_IN_OPCODE_WIFI_UMAC 0x2F
-
-/* HW API: udma_hdi_nonwifi API (OUT and IN) */
-
-/* iwm_udma_nonwifi_cmd request response -- bits [9:9] */
-#define UDMA_HDI_OUT_NW_CMD_RESP_POS 9
-#define UDMA_HDI_OUT_NW_CMD_RESP_SEED 0x1
-
-/* iwm_udma_nonwifi_cmd handle by HW -- bits [11:11] */
-#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_POS 11
-#define UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW_SEED 0x1
-
-/* iwm_udma_nonwifi_cmd sequence-number -- bits [12:15] */
-#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_POS 12
-#define UDMA_HDI_OUT_NW_CMD_SEQ_NUM_SEED 0xF
-
-/* UDMA IN Non-WIFI HW sequence number -- bits [12:15] */
-#define UDMA_IN_NW_HW_SEQ_NUM_POS 12
-#define UDMA_IN_NW_HW_SEQ_NUM_SEED 0xF
-
-/* UDMA IN Non-WIFI HW signature -- bits [16:31] */
-#define UDMA_IN_NW_HW_SIG_POS 16
-#define UDMA_IN_NW_HW_SIG_SEED 0xFFFF
-
-/* fixed signature */
-#define UDMA_IN_NW_HW_SIG 0xCBBC
-
-/* UDMA IN Non-WIFI HW block length -- bits [32:35] */
-#define UDMA_IN_NW_HW_LENGTH_SEED 0xF
-#define UDMA_IN_NW_HW_LENGTH_POS 32
-
-/* End of HW API: udma_hdi_nonwifi API (OUT and IN) */
-
-#define IWM_SDIO_FW_MAX_CHUNK_SIZE 2032
-#define IWM_MAX_WIFI_HEADERS_SIZE 32
-#define IWM_MAX_NONWIFI_HEADERS_SIZE 16
-#define IWM_MAX_NONWIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
- IWM_MAX_NONWIFI_HEADERS_SIZE)
-#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
- IWM_MAX_WIFI_HEADERS_SIZE)
-
-#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
-
-struct iwm_wifi_cmd_buff {
- u16 len;
- u8 *start;
- u8 hdr[IWM_MAX_WIFI_HEADERS_SIZE];
- u8 payload[IWM_MAX_WIFI_CMD_BUFF_SIZE];
-};
-
-struct iwm_nonwifi_cmd_buff {
- u16 len;
- u8 *start;
- u8 hdr[IWM_MAX_NONWIFI_HEADERS_SIZE];
- u8 payload[IWM_MAX_NONWIFI_CMD_BUFF_SIZE];
-};
-
-struct iwm_udma_nonwifi_cmd {
- u8 opcode;
- u8 eop;
- u8 resp;
- u8 handle_by_hw;
- __le32 addr;
- __le32 op1_sz;
- __le32 op2;
- __le16 seq_num;
-};
-
-struct iwm_udma_wifi_cmd {
- __le16 count;
- u8 eop;
- u8 credit_group;
- u8 ra_tid;
- u8 lmac_offset;
-};
-
-struct iwm_umac_cmd {
- u8 id;
- __le16 count;
- u8 resp;
- __le16 seq_num;
- u8 color;
-};
-
-struct iwm_lmac_cmd {
- u8 id;
- __le16 count;
- u8 resp;
- __le16 seq_num;
-};
-
-struct iwm_nonwifi_cmd {
- u16 seq_num;
- bool resp_received;
- struct list_head pending;
- struct iwm_udma_nonwifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
- struct iwm_nonwifi_cmd_buff buf;
- u32 flags;
-};
-
-struct iwm_wifi_cmd {
- u16 seq_num;
- struct list_head pending;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_lmac_cmd lmac_cmd;
- struct iwm_wifi_cmd_buff buf;
- u32 flags;
-};
-
-void iwm_cmd_flush(struct iwm_priv *iwm);
-
-struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm,
- u16 seq_num);
-struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
- u8 seq_num, u8 cmd_opcode);
-
-
-int iwm_hal_send_target_cmd(struct iwm_priv *iwm,
- struct iwm_udma_nonwifi_cmd *ucmd,
- const void *payload);
-
-int iwm_hal_send_host_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- struct iwm_lmac_cmd *lmac_cmd,
- const void *payload, u16 payload_size);
-
-int iwm_hal_send_umac_cmd(struct iwm_priv *iwm,
- struct iwm_udma_wifi_cmd *udma_cmd,
- struct iwm_umac_cmd *umac_cmd,
- const void *payload, u16 payload_size);
-
-u16 iwm_alloc_wifi_cmd_seq(struct iwm_priv *iwm);
-
-void iwm_udma_wifi_hdr_set_eop(struct iwm_priv *iwm, u8 *buf, u8 eop);
-void iwm_build_udma_wifi_hdr(struct iwm_priv *iwm,
- struct iwm_udma_out_wifi_hdr *hdr,
- struct iwm_udma_wifi_cmd *cmd);
-void iwm_build_umac_hdr(struct iwm_priv *iwm,
- struct iwm_umac_fw_cmd_hdr *hdr,
- struct iwm_umac_cmd *cmd);
-#endif /* _IWM_HAL_H_ */
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
deleted file mode 100644
index 51d7efa15ae6..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_H__
-#define __IWM_H__
-
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-#include <net/cfg80211.h>
-
-#include "debug.h"
-#include "hal.h"
-#include "umac.h"
-#include "lmac.h"
-#include "eeprom.h"
-#include "trace.h"
-
-#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
-#define IWM_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWM_SRC_LMAC UMAC_HDI_IN_SOURCE_FHRX
-#define IWM_SRC_UDMA UMAC_HDI_IN_SOURCE_UDMA
-#define IWM_SRC_UMAC UMAC_HDI_IN_SOURCE_FW
-#define IWM_SRC_NUM 3
-
-#define IWM_POWER_INDEX_MIN 0
-#define IWM_POWER_INDEX_MAX 5
-#define IWM_POWER_INDEX_DEFAULT 3
-
-struct iwm_conf {
- u32 sdio_ior_timeout;
- unsigned long calib_map;
- unsigned long expected_calib_map;
- u8 ct_kill_entry;
- u8 ct_kill_exit;
- bool reset_on_fatal_err;
- bool auto_connect;
- bool wimax_not_present;
- bool enable_qos;
- u32 mode;
-
- u32 power_index;
- u32 frag_threshold;
- u32 rts_threshold;
- bool cts_to_self;
-
- u32 assoc_timeout;
- u32 roam_timeout;
- u32 wireless_mode;
-
- u8 ibss_band;
- u8 ibss_channel;
-
- u8 mac_addr[ETH_ALEN];
-};
-
-enum {
- COEX_MODE_SA = 1,
- COEX_MODE_XOR,
- COEX_MODE_CM,
- COEX_MODE_MAX,
-};
-
-struct iwm_if_ops;
-struct iwm_wifi_cmd;
-
-struct pool_entry {
- int id; /* group id */
- int sid; /* super group id */
- int min_pages; /* min capacity in pages */
- int max_pages; /* max capacity in pages */
- int alloc_pages; /* allocated # of pages. incresed by driver */
- int total_freed_pages; /* total freed # of pages. incresed by UMAC */
-};
-
-struct spool_entry {
- int id;
- int max_pages;
- int alloc_pages;
-};
-
-struct iwm_tx_credit {
- spinlock_t lock;
- int pool_nr;
- unsigned long full_pools_map; /* bitmap for # of filled tx pools */
- struct pool_entry pools[IWM_MACS_OUT_GROUPS];
- struct spool_entry spools[IWM_MACS_OUT_SGROUPS];
-};
-
-struct iwm_notif {
- struct list_head pending;
- u32 cmd_id;
- void *cmd;
- u8 src;
- void *buf;
- unsigned long buf_size;
-};
-
-struct iwm_tid_info {
- __le16 last_seq_num;
- bool stopped;
- struct mutex mutex;
-};
-
-struct iwm_sta_info {
- u8 addr[ETH_ALEN];
- bool valid;
- bool qos;
- u8 color;
- struct iwm_tid_info tid_info[IWM_UMAC_TID_NR];
-};
-
-struct iwm_tx_info {
- u8 sta;
- u8 color;
- u8 tid;
-};
-
-struct iwm_rx_info {
- unsigned long rx_size;
- unsigned long rx_buf_size;
-};
-
-#define IWM_NUM_KEYS 4
-
-struct iwm_umac_key_hdr {
- u8 mac[ETH_ALEN];
- u8 key_idx;
- u8 multicast; /* BCast encrypt & BCast decrypt of frames FROM mac */
-} __packed;
-
-struct iwm_key {
- struct iwm_umac_key_hdr hdr;
- u32 cipher;
- u8 key[WLAN_MAX_KEY_LEN];
- u8 seq[IW_ENCODE_SEQ_MAX_SIZE];
- int key_len;
- int seq_len;
-};
-
-#define IWM_RX_ID_HASH 0xff
-#define IWM_RX_ID_GET_HASH(id) ((id) % IWM_RX_ID_HASH)
-
-#define IWM_STA_TABLE_NUM 16
-#define IWM_TX_LIST_SIZE 64
-#define IWM_RX_LIST_SIZE 256
-
-#define IWM_SCAN_ID_MAX 0xff
-
-#define IWM_STATUS_READY 0
-#define IWM_STATUS_SCANNING 1
-#define IWM_STATUS_SCAN_ABORTING 2
-#define IWM_STATUS_SME_CONNECTING 3
-#define IWM_STATUS_ASSOCIATED 4
-#define IWM_STATUS_RESETTING 5
-
-struct iwm_tx_queue {
- int id;
- struct sk_buff_head queue;
- struct sk_buff_head stopped_queue;
- spinlock_t lock;
- struct workqueue_struct *wq;
- struct work_struct worker;
- u8 concat_buf[IWM_HAL_CONCATENATE_BUF_SIZE];
- int concat_count;
- u8 *concat_ptr;
-};
-
-/* Queues 0 ~ 3 for AC data, 5 for iPAN */
-#define IWM_TX_QUEUES 5
-#define IWM_TX_DATA_QUEUES 4
-#define IWM_TX_CMD_QUEUE 4
-
-struct iwm_bss_info {
- struct list_head node;
- struct cfg80211_bss *cfg_bss;
- struct iwm_umac_notif_bss_info *bss;
-};
-
-typedef int (*iwm_handler)(struct iwm_priv *priv, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd);
-
-#define IWM_WATCHDOG_PERIOD (6 * HZ)
-
-struct iwm_priv {
- struct wireless_dev *wdev;
- struct iwm_if_ops *bus_ops;
-
- struct iwm_conf conf;
-
- unsigned long status;
-
- struct list_head pending_notif;
- wait_queue_head_t notif_queue;
-
- wait_queue_head_t nonwifi_queue;
-
- unsigned long calib_done_map;
- struct {
- u8 *buf;
- u32 size;
- } calib_res[CALIBRATION_CMD_NUM];
-
- struct iwm_umac_profile *umac_profile;
- bool umac_profile_active;
-
- u8 bssid[ETH_ALEN];
- u8 channel;
- u16 rate;
- u32 txpower;
-
- struct iwm_sta_info sta_table[IWM_STA_TABLE_NUM];
- struct list_head bss_list;
-
- void (*nonwifi_rx_handlers[UMAC_HDI_IN_OPCODE_NONWIFI_MAX])
- (struct iwm_priv *priv, u8 *buf, unsigned long buf_size);
-
- const iwm_handler *umac_handlers;
- const iwm_handler *lmac_handlers;
- DECLARE_BITMAP(lmac_handler_map, LMAC_COMMAND_ID_NUM);
- DECLARE_BITMAP(umac_handler_map, LMAC_COMMAND_ID_NUM);
- DECLARE_BITMAP(udma_handler_map, LMAC_COMMAND_ID_NUM);
-
- struct list_head wifi_pending_cmd;
- struct list_head nonwifi_pending_cmd;
- u16 wifi_seq_num;
- u8 nonwifi_seq_num;
- spinlock_t cmd_lock;
-
- u32 core_enabled;
-
- u8 scan_id;
- struct cfg80211_scan_request *scan_request;
-
- struct sk_buff_head rx_list;
- struct list_head rx_tickets;
- spinlock_t ticket_lock;
- struct list_head rx_packets[IWM_RX_ID_HASH];
- spinlock_t packet_lock[IWM_RX_ID_HASH];
- struct workqueue_struct *rx_wq;
- struct work_struct rx_worker;
-
- struct iwm_tx_credit tx_credit;
- struct iwm_tx_queue txq[IWM_TX_QUEUES];
-
- struct iwm_key keys[IWM_NUM_KEYS];
- s8 default_key;
-
- DECLARE_BITMAP(wifi_ntfy, WIFI_IF_NTFY_MAX);
- wait_queue_head_t wifi_ntfy_queue;
-
- wait_queue_head_t mlme_queue;
-
- struct iw_statistics wstats;
- struct delayed_work stats_request;
- struct delayed_work disconnect;
- struct delayed_work ct_kill_delay;
-
- struct iwm_debugfs dbg;
-
- u8 *eeprom;
- struct timer_list watchdog;
- struct work_struct reset_worker;
- struct work_struct auth_retry_worker;
- struct mutex mutex;
-
- u8 *req_ie;
- int req_ie_len;
- u8 *resp_ie;
- int resp_ie_len;
-
- struct iwm_fw_error_hdr *last_fw_err;
- char umac_version[8];
- char lmac_version[8];
-
- char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
-};
-
-static inline void *iwm_private(struct iwm_priv *iwm)
-{
- BUG_ON(!iwm);
- return &iwm->private;
-}
-
-#define hw_to_iwm(h) (h->iwm)
-#define iwm_to_dev(i) (wiphy_dev(i->wdev->wiphy))
-#define iwm_to_wiphy(i) (i->wdev->wiphy)
-#define wiphy_to_iwm(w) (struct iwm_priv *)(wiphy_priv(w))
-#define iwm_to_wdev(i) (i->wdev)
-#define wdev_to_iwm(w) (struct iwm_priv *)(wdev_priv(w))
-#define iwm_to_ndev(i) (i->wdev->netdev)
-#define ndev_to_iwm(n) (wdev_to_iwm(n->ieee80211_ptr))
-#define skb_to_rx_info(s) ((struct iwm_rx_info *)(s->cb))
-#define skb_to_tx_info(s) ((struct iwm_tx_info *)s->cb)
-
-void *iwm_if_alloc(int sizeof_bus, struct device *dev,
- struct iwm_if_ops *if_ops);
-void iwm_if_free(struct iwm_priv *iwm);
-int iwm_if_add(struct iwm_priv *iwm);
-void iwm_if_remove(struct iwm_priv *iwm);
-int iwm_mode_to_nl80211_iftype(int mode);
-int iwm_priv_init(struct iwm_priv *iwm);
-void iwm_priv_deinit(struct iwm_priv *iwm);
-void iwm_reset(struct iwm_priv *iwm);
-void iwm_resetting(struct iwm_priv *iwm);
-void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
- struct iwm_umac_notif_alive *alive);
-int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb);
-int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
- u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size);
-int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout);
-void iwm_init_default_profile(struct iwm_priv *iwm,
- struct iwm_umac_profile *profile);
-void iwm_link_on(struct iwm_priv *iwm);
-void iwm_link_off(struct iwm_priv *iwm);
-int iwm_up(struct iwm_priv *iwm);
-int iwm_down(struct iwm_priv *iwm);
-
-/* TX API */
-int iwm_tid_to_queue(u16 tid);
-void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
-void iwm_tx_worker(struct work_struct *work);
-int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-
-/* RX API */
-void iwm_rx_setup_handlers(struct iwm_priv *iwm);
-int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size);
-int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
- struct iwm_wifi_cmd *cmd);
-void iwm_rx_free(struct iwm_priv *iwm);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
deleted file mode 100644
index 5ddcdf8c70c0..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ /dev/null
@@ -1,484 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_LMAC_H__
-#define __IWM_LMAC_H__
-
-struct iwm_lmac_hdr {
- u8 id;
- u8 flags;
- __le16 seq_num;
-} __packed;
-
-/* LMAC commands */
-#define CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK 0x1
-
-struct iwm_lmac_cal_cfg_elt {
- __le32 enable; /* 1 means LMAC needs to do something */
- __le32 start; /* 1 to start calibration, 0 to stop */
- __le32 send_res; /* 1 for sending back results */
- __le32 apply_res; /* 1 for applying calibration results to HW */
- __le32 reserved;
-} __packed;
-
-struct iwm_lmac_cal_cfg_status {
- struct iwm_lmac_cal_cfg_elt init;
- struct iwm_lmac_cal_cfg_elt periodic;
- __le32 flags; /* CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_AFTER_MSK */
-} __packed;
-
-struct iwm_lmac_cal_cfg_cmd {
- struct iwm_lmac_cal_cfg_status ucode_cfg;
- struct iwm_lmac_cal_cfg_status driver_cfg;
- __le32 reserved;
-} __packed;
-
-struct iwm_lmac_cal_cfg_resp {
- __le32 status;
-} __packed;
-
-#define IWM_CARD_STATE_SW_HW_ENABLED 0x00
-#define IWM_CARD_STATE_HW_DISABLED 0x01
-#define IWM_CARD_STATE_SW_DISABLED 0x02
-#define IWM_CARD_STATE_CTKILL_DISABLED 0x04
-#define IWM_CARD_STATE_IS_RXON 0x10
-
-struct iwm_lmac_card_state {
- __le32 flags;
-} __packed;
-
-/**
- * COEX_PRIORITY_TABLE_CMD
- *
- * Priority entry for each state
- * Will keep two tables, for STA and WIPAN
- */
-enum {
- /* UN-ASSOCIATION PART */
- COEX_UNASSOC_IDLE = 0,
- COEX_UNASSOC_MANUAL_SCAN,
- COEX_UNASSOC_AUTO_SCAN,
-
- /* CALIBRATION */
- COEX_CALIBRATION,
- COEX_PERIODIC_CALIBRATION,
-
- /* CONNECTION */
- COEX_CONNECTION_ESTAB,
-
- /* ASSOCIATION PART */
- COEX_ASSOCIATED_IDLE,
- COEX_ASSOC_MANUAL_SCAN,
- COEX_ASSOC_AUTO_SCAN,
- COEX_ASSOC_ACTIVE_LEVEL,
-
- /* RF ON/OFF */
- COEX_RF_ON,
- COEX_RF_OFF,
- COEX_STAND_ALONE_DEBUG,
-
- /* IPNN */
- COEX_IPAN_ASSOC_LEVEL,
-
- /* RESERVED */
- COEX_RSRVD1,
- COEX_RSRVD2,
-
- COEX_EVENTS_NUM
-};
-
-#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK 0x1
-#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK 0x2
-#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK 0x4
-
-struct coex_event {
- u8 req_prio;
- u8 win_med_prio;
- u8 reserved;
- u8 flags;
-} __packed;
-
-#define COEX_FLAGS_STA_TABLE_VALID_MSK 0x1
-#define COEX_FLAGS_UNASSOC_WAKEUP_UMASK_MSK 0x4
-#define COEX_FLAGS_ASSOC_WAKEUP_UMASK_MSK 0x8
-#define COEX_FLAGS_COEX_ENABLE_MSK 0x80
-
-struct iwm_coex_prio_table_cmd {
- u8 flags;
- u8 reserved[3];
- struct coex_event sta_prio[COEX_EVENTS_NUM];
-} __packed;
-
-/* Coexistence definitions
- *
- * Constants to fill in the Priorities' Tables
- * RP - Requested Priority
- * WP - Win Medium Priority: priority assigned when the contention has been won
- * FLAGS - Combination of COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK and
- * COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK
- */
-
-#define COEX_UNASSOC_IDLE_FLAGS 0
-#define COEX_UNASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_UNASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_CALIBRATION_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_PERIODIC_CALIBRATION_FLAGS 0
-/* COEX_CONNECTION_ESTAB: we need DELAY_MEDIUM_FREE_NTFY to let WiMAX
- * disconnect from network. */
-#define COEX_CONNECTION_ESTAB_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-#define COEX_ASSOCIATED_IDLE_FLAGS 0
-#define COEX_ASSOC_MANUAL_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_ASSOC_AUTO_SCAN_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
-#define COEX_RF_ON_FLAGS 0
-#define COEX_RF_OFF_FLAGS 0
-#define COEX_STAND_ALONE_DEBUG_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK)
-#define COEX_IPAN_ASSOC_LEVEL_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-#define COEX_RSRVD1_FLAGS 0
-#define COEX_RSRVD2_FLAGS 0
-/* XOR_RF_ON is the event wrapping all radio ownership. We need
- * DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network. */
-#define COEX_XOR_RF_ON_FLAGS (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_MSK | \
- COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
- COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
-
-/* CT kill config command */
-struct iwm_ct_kill_cfg_cmd {
- u32 exit_threshold;
- u32 reserved;
- u32 entry_threshold;
-} __packed;
-
-
-/* LMAC OP CODES */
-#define REPLY_PAD 0x0
-#define REPLY_ALIVE 0x1
-#define REPLY_ERROR 0x2
-#define REPLY_ECHO 0x3
-#define REPLY_HALT 0x6
-
-/* RXON state commands */
-#define REPLY_RX_ON 0x10
-#define REPLY_RX_ON_ASSOC 0x11
-#define REPLY_RX_OFF 0x12
-#define REPLY_QOS_PARAM 0x13
-#define REPLY_RX_ON_TIMING 0x14
-#define REPLY_INTERNAL_QOS_PARAM 0x15
-#define REPLY_RX_INT_TIMEOUT_CNFG 0x16
-#define REPLY_NULL 0x17
-
-/* Multi-Station support */
-#define REPLY_ADD_STA 0x18
-#define REPLY_REMOVE_STA 0x19
-#define REPLY_RESET_ALL_STA 0x1a
-
-/* RX, TX */
-#define REPLY_ALM_RX 0x1b
-#define REPLY_TX 0x1c
-#define REPLY_TXFIFO_FLUSH 0x1e
-
-/* MISC commands */
-#define REPLY_MGMT_MCAST_KEY 0x1f
-#define REPLY_WEPKEY 0x20
-#define REPLY_INIT_IV 0x21
-#define REPLY_WRITE_MIB 0x22
-#define REPLY_READ_MIB 0x23
-#define REPLY_RADIO_FE 0x24
-#define REPLY_TXFIFO_CFG 0x25
-#define REPLY_WRITE_READ 0x26
-#define REPLY_INSTALL_SEC_KEY 0x27
-
-
-#define REPLY_RATE_SCALE 0x47
-#define REPLY_LEDS_CMD 0x48
-#define REPLY_TX_LINK_QUALITY_CMD 0x4e
-#define REPLY_ANA_MIB_OVERRIDE_CMD 0x4f
-#define REPLY_WRITE2REG_CMD 0x50
-
-/* winfi-wifi coexistence */
-#define COEX_PRIORITY_TABLE_CMD 0x5a
-#define COEX_MEDIUM_NOTIFICATION 0x5b
-#define COEX_EVENT_CMD 0x5c
-
-/* more Protocol and Protocol-test commands */
-#define REPLY_MAX_SLEEP_TIME_CMD 0x61
-#define CALIBRATION_CFG_CMD 0x65
-#define CALIBRATION_RES_NOTIFICATION 0x66
-#define CALIBRATION_COMPLETE_NOTIFICATION 0x67
-
-/* Measurements */
-#define REPLY_QUIET_CMD 0x71
-#define REPLY_CHANNEL_SWITCH 0x72
-#define CHANNEL_SWITCH_NOTIFICATION 0x73
-
-#define REPLY_SPECTRUM_MEASUREMENT_CMD 0x74
-#define SPECTRUM_MEASURE_NOTIFICATION 0x75
-#define REPLY_MEASUREMENT_ABORT_CMD 0x76
-
-/* Power Management */
-#define POWER_TABLE_CMD 0x77
-#define SAVE_RESTORE_ADDRESS_CMD 0x78
-#define REPLY_WATERMARK_CMD 0x79
-#define PM_DEBUG_STATISTIC_NOTIFIC 0x7B
-#define PD_FLUSH_N_NOTIFICATION 0x7C
-
-/* Scan commands and notifications */
-#define REPLY_SCAN_REQUEST_CMD 0x80
-#define REPLY_SCAN_ABORT_CMD 0x81
-#define SCAN_START_NOTIFICATION 0x82
-#define SCAN_RESULTS_NOTIFICATION 0x83
-#define SCAN_COMPLETE_NOTIFICATION 0x84
-
-/* Continuous TX commands */
-#define REPLY_CONT_TX_CMD 0x85
-#define END_OF_CONT_TX_NOTIFICATION 0x86
-
-/* Timer/Eeprom commands */
-#define TIMER_CMD 0x87
-#define EEPROM_WRITE_CMD 0x88
-
-/* PAPD commands */
-#define FEEDBACK_REQUEST_NOTIFICATION 0x8b
-#define REPLY_CW_CMD 0x8c
-
-/* IBSS/AP commands Continue */
-#define BEACON_NOTIFICATION 0x90
-#define REPLY_TX_BEACON 0x91
-#define REPLY_REQUEST_ATIM 0x93
-#define WHO_IS_AWAKE_NOTIFICATION 0x94
-#define TX_PWR_DBM_LIMIT_CMD 0x95
-#define QUIET_NOTIFICATION 0x96
-#define TX_PWR_TABLE_CMD 0x97
-#define TX_ANT_CONFIGURATION_CMD 0x98
-#define MEASURE_ABORT_NOTIFICATION 0x99
-#define REPLY_CALIBRATION_TUNE 0x9a
-
-/* bt config command */
-#define REPLY_BT_CONFIG 0x9b
-#define REPLY_STATISTICS_CMD 0x9c
-#define STATISTICS_NOTIFICATION 0x9d
-
-/* RF-KILL commands and notifications */
-#define REPLY_CARD_STATE_CMD 0xa0
-#define CARD_STATE_NOTIFICATION 0xa1
-
-/* Missed beacons notification */
-#define MISSED_BEACONS_NOTIFICATION 0xa2
-#define MISSED_BEACONS_NOTIFICATION_TH_CMD 0xa3
-
-#define REPLY_CT_KILL_CONFIG_CMD 0xa4
-
-/* HD commands and notifications */
-#define REPLY_HD_PARAMS_CMD 0xa6
-#define HD_PARAMS_NOTIFICATION 0xa7
-#define SENSITIVITY_CMD 0xa8
-#define U_APSD_PARAMS_CMD 0xa9
-#define NOISY_PLATFORM_CMD 0xaa
-#define ILLEGAL_CMD 0xac
-#define REPLY_PHY_CALIBRATION_CMD 0xb0
-#define REPLAY_RX_GAIN_CALIB_CMD 0xb1
-
-/* WiPAN commands */
-#define REPLY_WIPAN_PARAMS_CMD 0xb2
-#define REPLY_WIPAN_RX_ON_CMD 0xb3
-#define REPLY_WIPAN_RX_ON_TIMING 0xb4
-#define REPLY_WIPAN_TX_PWR_TABLE_CMD 0xb5
-#define REPLY_WIPAN_RXON_ASSOC_CMD 0xb6
-#define REPLY_WIPAN_QOS_PARAM 0xb7
-#define WIPAN_REPLY_WEPKEY 0xb8
-
-/* BeamForming commands */
-#define BEAMFORMER_CFG_CMD 0xba
-#define BEAMFORMEE_NOTIFICATION 0xbb
-
-/* TGn new Commands */
-#define REPLY_RX_PHY_CMD 0xc0
-#define REPLY_RX_MPDU_CMD 0xc1
-#define REPLY_MULTICAST_HASH 0xc2
-#define REPLY_KDR_RX 0xc3
-#define REPLY_RX_DSP_EXT_INFO 0xc4
-#define REPLY_COMPRESSED_BA 0xc5
-
-/* PNC commands */
-#define PNC_CONFIG_CMD 0xc8
-#define PNC_UPDATE_TABLE_CMD 0xc9
-#define XVT_GENERAL_CTRL_CMD 0xca
-#define REPLY_LEGACY_RADIO_FE 0xdd
-
-/* WoWLAN commands */
-#define WOWLAN_PATTERNS 0xe0
-#define WOWLAN_WAKEUP_FILTER 0xe1
-#define WOWLAN_TSC_RSC_PARAM 0xe2
-#define WOWLAN_TKIP_PARAM 0xe3
-#define WOWLAN_KEK_KCK_MATERIAL 0xe4
-#define WOWLAN_GET_STATUSES 0xe5
-#define WOWLAN_TX_POWER_PER_DB 0xe6
-#define REPLY_WOWLAN_GET_STATUSES WOWLAN_GET_STATUSES
-
-#define REPLY_DEBUG_CMD 0xf0
-#define REPLY_DSP_DEBUG_CMD 0xf1
-#define REPLY_DEBUG_MONITOR_CMD 0xf2
-#define REPLY_DEBUG_XVT_CMD 0xf3
-#define REPLY_DEBUG_DC_CALIB 0xf4
-#define REPLY_DYNAMIC_BP 0xf5
-
-/* General purpose Commands */
-#define REPLY_GP1_CMD 0xfa
-#define REPLY_GP2_CMD 0xfb
-#define REPLY_GP3_CMD 0xfc
-#define REPLY_GP4_CMD 0xfd
-#define REPLY_REPLAY_WRAPPER 0xfe
-#define REPLY_FRAME_DURATION_CALC_CMD 0xff
-
-#define LMAC_COMMAND_ID_MAX 0xff
-#define LMAC_COMMAND_ID_NUM (LMAC_COMMAND_ID_MAX + 1)
-
-
-/* Calibration */
-
-enum {
- PHY_CALIBRATE_DC_CMD = 0,
- PHY_CALIBRATE_LO_CMD = 1,
- PHY_CALIBRATE_RX_BB_CMD = 2,
- PHY_CALIBRATE_TX_IQ_CMD = 3,
- PHY_CALIBRATE_RX_IQ_CMD = 4,
- PHY_CALIBRATION_NOISE_CMD = 5,
- PHY_CALIBRATE_AGC_TABLE_CMD = 6,
- PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 7,
- PHY_CALIBRATE_OPCODES_NUM,
- SHILOH_PHY_CALIBRATE_DC_CMD = 8,
- SHILOH_PHY_CALIBRATE_LO_CMD = 9,
- SHILOH_PHY_CALIBRATE_RX_BB_CMD = 10,
- SHILOH_PHY_CALIBRATE_TX_IQ_CMD = 11,
- SHILOH_PHY_CALIBRATE_RX_IQ_CMD = 12,
- SHILOH_PHY_CALIBRATION_NOISE_CMD = 13,
- SHILOH_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
- SHILOH_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
- SHILOH_PHY_CALIBRATE_BASE_BAND_CMD = 16,
- SHILOH_PHY_CALIBRATE_TXIQ_PERIODIC_CMD = 17,
- CALIBRATION_CMD_NUM,
-};
-
-enum {
- CALIB_CFG_RX_BB_IDX = 0,
- CALIB_CFG_DC_IDX = 1,
- CALIB_CFG_LO_IDX = 2,
- CALIB_CFG_TX_IQ_IDX = 3,
- CALIB_CFG_RX_IQ_IDX = 4,
- CALIB_CFG_NOISE_IDX = 5,
- CALIB_CFG_CRYSTAL_IDX = 6,
- CALIB_CFG_TEMPERATURE_IDX = 7,
- CALIB_CFG_PAPD_IDX = 8,
- CALIB_CFG_LAST_IDX = CALIB_CFG_PAPD_IDX,
- CALIB_CFG_MODULE_NUM,
-};
-
-#define IWM_CALIB_MAP_INIT_MSK 0xFFFF
-#define IWM_CALIB_MAP_PER_LMAC(m) ((m & 0xFF0000) >> 16)
-#define IWM_CALIB_MAP_PER_UMAC(m) ((m & 0xFF000000) >> 24)
-#define IWM_CALIB_OPCODE_TO_INDEX(op) (op - PHY_CALIBRATE_OPCODES_NUM)
-
-struct iwm_lmac_calib_hdr {
- u8 opcode;
- u8 first_grp;
- u8 grp_num;
- u8 all_data_valid;
-} __packed;
-
-#define IWM_LMAC_CALIB_FREQ_GROUPS_NR 7
-#define IWM_CALIB_FREQ_GROUPS_NR 5
-#define IWM_CALIB_DC_MODES_NR 12
-
-struct iwm_calib_rxiq_entry {
- u16 ptam_postdist_ars;
- u16 ptam_postdist_arc;
-} __packed;
-
-struct iwm_calib_rxiq_group {
- struct iwm_calib_rxiq_entry mode[IWM_CALIB_DC_MODES_NR];
-} __packed;
-
-struct iwm_lmac_calib_rxiq {
- struct iwm_calib_rxiq_group group[IWM_LMAC_CALIB_FREQ_GROUPS_NR];
-} __packed;
-
-struct iwm_calib_rxiq {
- struct iwm_lmac_calib_hdr hdr;
- struct iwm_calib_rxiq_group group[IWM_CALIB_FREQ_GROUPS_NR];
-} __packed;
-
-#define LMAC_STA_ID_SEED 0x0f
-#define LMAC_STA_ID_POS 0
-
-#define LMAC_STA_COLOR_SEED 0x7
-#define LMAC_STA_COLOR_POS 4
-
-struct iwm_lmac_power_report {
- u8 pa_status;
- u8 pa_integ_res_A[3];
- u8 pa_integ_res_B[3];
- u8 pa_integ_res_C[3];
-} __packed;
-
-struct iwm_lmac_tx_resp {
- u8 frame_cnt; /* 1-no aggregation, greater then 1 - aggregation */
- u8 bt_kill_cnt;
- __le16 retry_cnt;
- __le32 initial_tx_rate;
- __le16 wireless_media_time;
- struct iwm_lmac_power_report power_report;
- __le32 tfd_info;
- __le16 seq_ctl;
- __le16 byte_cnt;
- u8 tlc_rate_info;
- u8 ra_tid;
- __le16 frame_ctl;
- __le32 status;
-} __packed;
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
deleted file mode 100644
index 1f868b166d10..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ /dev/null
@@ -1,847 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/ieee80211.h>
-#include <linux/wireless.h>
-#include <linux/slab.h>
-#include <linux/moduleparam.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "bus.h"
-#include "umac.h"
-#include "commands.h"
-#include "hal.h"
-#include "fw.h"
-#include "rx.h"
-
-static struct iwm_conf def_iwm_conf = {
-
- .sdio_ior_timeout = 5000,
- .calib_map = BIT(CALIB_CFG_DC_IDX) |
- BIT(CALIB_CFG_LO_IDX) |
- BIT(CALIB_CFG_TX_IQ_IDX) |
- BIT(CALIB_CFG_RX_IQ_IDX) |
- BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
- .expected_calib_map = BIT(PHY_CALIBRATE_DC_CMD) |
- BIT(PHY_CALIBRATE_LO_CMD) |
- BIT(PHY_CALIBRATE_TX_IQ_CMD) |
- BIT(PHY_CALIBRATE_RX_IQ_CMD) |
- BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
- .ct_kill_entry = 110,
- .ct_kill_exit = 110,
- .reset_on_fatal_err = 1,
- .auto_connect = 1,
- .enable_qos = 1,
- .mode = UMAC_MODE_BSS,
-
- /* UMAC configuration */
- .power_index = 0,
- .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
- .cts_to_self = 0,
-
- .assoc_timeout = 2,
- .roam_timeout = 10,
- .wireless_mode = WIRELESS_MODE_11A | WIRELESS_MODE_11G |
- WIRELESS_MODE_11N,
-
- /* IBSS */
- .ibss_band = UMAC_BAND_2GHZ,
- .ibss_channel = 1,
-
- .mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
-};
-
-static bool modparam_reset;
-module_param_named(reset, modparam_reset, bool, 0644);
-MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-
-static bool modparam_wimax_enable = true;
-module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
-MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
-
-int iwm_mode_to_nl80211_iftype(int mode)
-{
- switch (mode) {
- case UMAC_MODE_BSS:
- return NL80211_IFTYPE_STATION;
- case UMAC_MODE_IBSS:
- return NL80211_IFTYPE_ADHOC;
- default:
- return NL80211_IFTYPE_UNSPECIFIED;
- }
-
- return 0;
-}
-
-static void iwm_statistics_request(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, stats_request.work);
-
- iwm_send_umac_stats_req(iwm, 0);
-}
-
-static void iwm_disconnect_work(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, disconnect.work);
-
- if (iwm->umac_profile_active)
- iwm_invalidate_mlme_profile(iwm);
-
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = false;
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- iwm_link_off(iwm);
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
-}
-
-static void iwm_ct_kill_work(struct work_struct *work)
-{
- struct iwm_priv *iwm =
- container_of(work, struct iwm_priv, ct_kill_delay.work);
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
-
- IWM_INFO(iwm, "CT kill delay timeout\n");
-
- wiphy_rfkill_set_hw_state(wiphy, false);
-}
-
-static int __iwm_up(struct iwm_priv *iwm);
-static int __iwm_down(struct iwm_priv *iwm);
-
-static void iwm_reset_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_umac_profile *profile = NULL;
- int uninitialized_var(ret), retry = 0;
-
- iwm = container_of(work, struct iwm_priv, reset_worker);
-
- /*
- * XXX: The iwm->mutex is introduced purely for this reset work,
- * because the other users for iwm_up and iwm_down are only netdev
- * ndo_open and ndo_stop which are already protected by rtnl.
- * Please remove iwm->mutex together if iwm_reset_worker() is not
- * required in the future.
- */
- if (!mutex_trylock(&iwm->mutex)) {
- IWM_WARN(iwm, "We are in the middle of interface bringing "
- "UP/DOWN. Skip driver resetting.\n");
- return;
- }
-
- if (iwm->umac_profile_active) {
- profile = kmalloc(sizeof(struct iwm_umac_profile), GFP_KERNEL);
- if (profile)
- memcpy(profile, iwm->umac_profile, sizeof(*profile));
- else
- IWM_ERR(iwm, "Couldn't alloc memory for profile\n");
- }
-
- __iwm_down(iwm);
-
- while (retry++ < 3) {
- ret = __iwm_up(iwm);
- if (!ret)
- break;
-
- schedule_timeout_uninterruptible(10 * HZ);
- }
-
- if (ret) {
- IWM_WARN(iwm, "iwm_up() failed: %d\n", ret);
-
- kfree(profile);
- goto out;
- }
-
- if (profile) {
- IWM_DBG_MLME(iwm, DBG, "Resend UMAC profile\n");
- memcpy(iwm->umac_profile, profile, sizeof(*profile));
- iwm_send_mlme_profile(iwm);
- kfree(profile);
- } else
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
-
- out:
- mutex_unlock(&iwm->mutex);
-}
-
-static void iwm_auth_retry_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- int i, ret;
-
- iwm = container_of(work, struct iwm_priv, auth_retry_worker);
- if (iwm->umac_profile_active) {
- ret = iwm_invalidate_mlme_profile(iwm);
- if (ret < 0)
- return;
- }
-
- iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
-
- ret = iwm_send_mlme_profile(iwm);
- if (ret < 0)
- return;
-
- for (i = 0; i < IWM_NUM_KEYS; i++)
- if (iwm->keys[i].key_len)
- iwm_set_key(iwm, 0, &iwm->keys[i]);
-
- iwm_set_tx_key(iwm, iwm->default_key);
-}
-
-
-
-static void iwm_watchdog(unsigned long data)
-{
- struct iwm_priv *iwm = (struct iwm_priv *)data;
-
- IWM_WARN(iwm, "Watchdog expired: UMAC stalls!\n");
-
- if (modparam_reset)
- iwm_resetting(iwm);
-}
-
-int iwm_priv_init(struct iwm_priv *iwm)
-{
- int i, j;
- char name[32];
-
- iwm->status = 0;
- INIT_LIST_HEAD(&iwm->pending_notif);
- init_waitqueue_head(&iwm->notif_queue);
- init_waitqueue_head(&iwm->nonwifi_queue);
- init_waitqueue_head(&iwm->wifi_ntfy_queue);
- init_waitqueue_head(&iwm->mlme_queue);
- memcpy(&iwm->conf, &def_iwm_conf, sizeof(struct iwm_conf));
- spin_lock_init(&iwm->tx_credit.lock);
- INIT_LIST_HEAD(&iwm->wifi_pending_cmd);
- INIT_LIST_HEAD(&iwm->nonwifi_pending_cmd);
- iwm->wifi_seq_num = UMAC_WIFI_SEQ_NUM_BASE;
- iwm->nonwifi_seq_num = UMAC_NONWIFI_SEQ_NUM_BASE;
- spin_lock_init(&iwm->cmd_lock);
- iwm->scan_id = 1;
- INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
- INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
- INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
- INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
- INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
- INIT_LIST_HEAD(&iwm->bss_list);
-
- skb_queue_head_init(&iwm->rx_list);
- INIT_LIST_HEAD(&iwm->rx_tickets);
- spin_lock_init(&iwm->ticket_lock);
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- INIT_LIST_HEAD(&iwm->rx_packets[i]);
- spin_lock_init(&iwm->packet_lock[i]);
- }
-
- INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
-
- iwm->rx_wq = create_singlethread_workqueue(KBUILD_MODNAME "_rx");
- if (!iwm->rx_wq)
- return -EAGAIN;
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- INIT_WORK(&iwm->txq[i].worker, iwm_tx_worker);
- snprintf(name, 32, KBUILD_MODNAME "_tx_%d", i);
- iwm->txq[i].id = i;
- iwm->txq[i].wq = create_singlethread_workqueue(name);
- if (!iwm->txq[i].wq)
- return -EAGAIN;
-
- skb_queue_head_init(&iwm->txq[i].queue);
- skb_queue_head_init(&iwm->txq[i].stopped_queue);
- spin_lock_init(&iwm->txq[i].lock);
- }
-
- for (i = 0; i < IWM_NUM_KEYS; i++)
- memset(&iwm->keys[i], 0, sizeof(struct iwm_key));
-
- iwm->default_key = -1;
-
- for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- for (j = 0; j < IWM_UMAC_TID_NR; j++) {
- mutex_init(&iwm->sta_table[i].tid_info[j].mutex);
- iwm->sta_table[i].tid_info[j].stopped = false;
- }
-
- init_timer(&iwm->watchdog);
- iwm->watchdog.function = iwm_watchdog;
- iwm->watchdog.data = (unsigned long)iwm;
- mutex_init(&iwm->mutex);
-
- iwm->last_fw_err = kzalloc(sizeof(struct iwm_fw_error_hdr),
- GFP_KERNEL);
- if (iwm->last_fw_err == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-void iwm_priv_deinit(struct iwm_priv *iwm)
-{
- int i;
-
- for (i = 0; i < IWM_TX_QUEUES; i++)
- destroy_workqueue(iwm->txq[i].wq);
-
- destroy_workqueue(iwm->rx_wq);
- kfree(iwm->last_fw_err);
-}
-
-/*
- * We reset all the structures, and we reset the UMAC.
- * After calling this routine, you're expected to reload
- * the firmware.
- */
-void iwm_reset(struct iwm_priv *iwm)
-{
- struct iwm_notif *notif, *next;
-
- if (test_bit(IWM_STATUS_READY, &iwm->status))
- iwm_target_reset(iwm);
-
- if (test_bit(IWM_STATUS_RESETTING, &iwm->status)) {
- iwm->status = 0;
- set_bit(IWM_STATUS_RESETTING, &iwm->status);
- } else
- iwm->status = 0;
- iwm->scan_id = 1;
-
- list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) {
- list_del(&notif->pending);
- kfree(notif->buf);
- kfree(notif);
- }
-
- iwm_cmd_flush(iwm);
-
- flush_workqueue(iwm->rx_wq);
-
- iwm_link_off(iwm);
-}
-
-void iwm_resetting(struct iwm_priv *iwm)
-{
- set_bit(IWM_STATUS_RESETTING, &iwm->status);
-
- schedule_work(&iwm->reset_worker);
-}
-
-/*
- * Notification code:
- *
- * We're faced with the following issue: Any host command can
- * have an answer or not, and if there's an answer to expect,
- * it can be treated synchronously or asynchronously.
- * To work around the synchronous answer case, we implemented
- * our notification mechanism.
- * When a code path needs to wait for a command response
- * synchronously, it calls notif_handle(), which waits for the
- * right notification to show up, and then process it. Before
- * starting to wait, it registered as a waiter for this specific
- * answer (by toggling a bit in on of the handler_map), so that
- * the rx code knows that it needs to send a notification to the
- * waiting processes. It does so by calling iwm_notif_send(),
- * which adds the notification to the pending notifications list,
- * and then wakes the waiting processes up.
- */
-int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
- u8 cmd_id, u8 source, u8 *buf, unsigned long buf_size)
-{
- struct iwm_notif *notif;
-
- notif = kzalloc(sizeof(struct iwm_notif), GFP_KERNEL);
- if (!notif) {
- IWM_ERR(iwm, "Couldn't alloc memory for notification\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&notif->pending);
- notif->cmd = cmd;
- notif->cmd_id = cmd_id;
- notif->src = source;
- notif->buf = kzalloc(buf_size, GFP_KERNEL);
- if (!notif->buf) {
- IWM_ERR(iwm, "Couldn't alloc notification buffer\n");
- kfree(notif);
- return -ENOMEM;
- }
- notif->buf_size = buf_size;
- memcpy(notif->buf, buf, buf_size);
- list_add_tail(&notif->pending, &iwm->pending_notif);
-
- wake_up_interruptible(&iwm->notif_queue);
-
- return 0;
-}
-
-static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
- u8 source)
-{
- struct iwm_notif *notif;
-
- list_for_each_entry(notif, &iwm->pending_notif, pending) {
- if ((notif->cmd_id == cmd) && (notif->src == source)) {
- list_del(&notif->pending);
- return notif;
- }
- }
-
- return NULL;
-}
-
-static struct iwm_notif *iwm_notif_wait(struct iwm_priv *iwm, u32 cmd,
- u8 source, long timeout)
-{
- int ret;
- struct iwm_notif *notif;
- unsigned long *map = NULL;
-
- switch (source) {
- case IWM_SRC_LMAC:
- map = &iwm->lmac_handler_map[0];
- break;
- case IWM_SRC_UMAC:
- map = &iwm->umac_handler_map[0];
- break;
- case IWM_SRC_UDMA:
- map = &iwm->udma_handler_map[0];
- break;
- }
-
- set_bit(cmd, map);
-
- ret = wait_event_interruptible_timeout(iwm->notif_queue,
- ((notif = iwm_notif_find(iwm, cmd, source)) != NULL),
- timeout);
- clear_bit(cmd, map);
-
- if (!ret)
- return NULL;
-
- return notif;
-}
-
-int iwm_notif_handle(struct iwm_priv *iwm, u32 cmd, u8 source, long timeout)
-{
- int ret;
- struct iwm_notif *notif;
-
- notif = iwm_notif_wait(iwm, cmd, source, timeout);
- if (!notif)
- return -ETIME;
-
- ret = iwm_rx_handle_resp(iwm, notif->buf, notif->buf_size, notif->cmd);
- kfree(notif->buf);
- kfree(notif);
-
- return ret;
-}
-
-static int iwm_config_boot_params(struct iwm_priv *iwm)
-{
- struct iwm_udma_nonwifi_cmd target_cmd;
- int ret;
-
- /* check Wimax is off and config debug monitor */
- if (!modparam_wimax_enable) {
- u32 data1 = 0x1f;
- u32 addr1 = 0x606BE258;
-
- u32 data2_set = 0x0;
- u32 data2_clr = 0x1;
- u32 addr2 = 0x606BE100;
-
- u32 data3 = 0x1;
- u32 addr3 = 0x606BEC00;
-
- target_cmd.resp = 0;
- target_cmd.handle_by_hw = 0;
- target_cmd.eop = 1;
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.addr = cpu_to_le32(addr1);
- target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
- target_cmd.op2 = 0;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE;
- target_cmd.addr = cpu_to_le32(addr2);
- target_cmd.op1_sz = cpu_to_le32(data2_set);
- target_cmd.op2 = cpu_to_le32(data2_clr);
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data1);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
-
- target_cmd.opcode = UMAC_HDI_OUT_OPCODE_WRITE;
- target_cmd.addr = cpu_to_le32(addr3);
- target_cmd.op1_sz = cpu_to_le32(sizeof(u32));
- target_cmd.op2 = 0;
-
- ret = iwm_hal_send_target_cmd(iwm, &target_cmd, &data3);
- if (ret < 0) {
- IWM_ERR(iwm, "iwm_hal_send_target_cmd failed\n");
- return ret;
- }
- }
-
- return 0;
-}
-
-void iwm_init_default_profile(struct iwm_priv *iwm,
- struct iwm_umac_profile *profile)
-{
- memset(profile, 0, sizeof(struct iwm_umac_profile));
-
- profile->sec.auth_type = UMAC_AUTH_TYPE_OPEN;
- profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
- profile->sec.ucast_cipher = UMAC_CIPHER_TYPE_NONE;
- profile->sec.mcast_cipher = UMAC_CIPHER_TYPE_NONE;
-
- if (iwm->conf.enable_qos)
- profile->flags |= cpu_to_le16(UMAC_PROFILE_QOS_ALLOWED);
-
- profile->wireless_mode = iwm->conf.wireless_mode;
- profile->mode = cpu_to_le32(iwm->conf.mode);
-
- profile->ibss.atim = 0;
- profile->ibss.beacon_interval = 100;
- profile->ibss.join_only = 0;
- profile->ibss.band = iwm->conf.ibss_band;
- profile->ibss.channel = iwm->conf.ibss_channel;
-}
-
-void iwm_link_on(struct iwm_priv *iwm)
-{
- netif_carrier_on(iwm_to_ndev(iwm));
- netif_tx_wake_all_queues(iwm_to_ndev(iwm));
-
- iwm_send_umac_stats_req(iwm, 0);
-}
-
-void iwm_link_off(struct iwm_priv *iwm)
-{
- struct iw_statistics *wstats = &iwm->wstats;
- int i;
-
- netif_tx_stop_all_queues(iwm_to_ndev(iwm));
- netif_carrier_off(iwm_to_ndev(iwm));
-
- for (i = 0; i < IWM_TX_QUEUES; i++) {
- skb_queue_purge(&iwm->txq[i].queue);
- skb_queue_purge(&iwm->txq[i].stopped_queue);
-
- iwm->txq[i].concat_count = 0;
- iwm->txq[i].concat_ptr = iwm->txq[i].concat_buf;
-
- flush_workqueue(iwm->txq[i].wq);
- }
-
- iwm_rx_free(iwm);
-
- cancel_delayed_work_sync(&iwm->stats_request);
- memset(wstats, 0, sizeof(struct iw_statistics));
- wstats->qual.updated = IW_QUAL_ALL_INVALID;
-
- kfree(iwm->req_ie);
- iwm->req_ie = NULL;
- iwm->req_ie_len = 0;
- kfree(iwm->resp_ie);
- iwm->resp_ie = NULL;
- iwm->resp_ie_len = 0;
-
- del_timer_sync(&iwm->watchdog);
-}
-
-static void iwm_bss_list_clean(struct iwm_priv *iwm)
-{
- struct iwm_bss_info *bss, *next;
-
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node) {
- list_del(&bss->node);
- kfree(bss->bss);
- kfree(bss);
- }
-}
-
-static int iwm_channels_init(struct iwm_priv *iwm)
-{
- int ret;
-
- ret = iwm_send_umac_channel_list(iwm);
- if (ret) {
- IWM_ERR(iwm, "Send channel list failed\n");
- return ret;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a channel list notification\n");
- return ret;
- }
-
- return 0;
-}
-
-static int __iwm_up(struct iwm_priv *iwm)
-{
- int ret;
- struct iwm_notif *notif_reboot, *notif_ack = NULL;
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- u32 wireless_mode;
-
- ret = iwm_bus_enable(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldn't enable function\n");
- return ret;
- }
-
- iwm_rx_setup_handlers(iwm);
-
- /* Wait for initial BARKER_REBOOT from hardware */
- notif_reboot = iwm_notif_wait(iwm, IWM_BARKER_REBOOT_NOTIFICATION,
- IWM_SRC_UDMA, 2 * HZ);
- if (!notif_reboot) {
- IWM_ERR(iwm, "Wait for REBOOT_BARKER timeout\n");
- goto err_disable;
- }
-
- /* We send the barker back */
- ret = iwm_bus_send_chunk(iwm, notif_reboot->buf, 16);
- if (ret) {
- IWM_ERR(iwm, "REBOOT barker response failed\n");
- kfree(notif_reboot);
- goto err_disable;
- }
-
- kfree(notif_reboot->buf);
- kfree(notif_reboot);
-
- /* Wait for ACK_BARKER from hardware */
- notif_ack = iwm_notif_wait(iwm, IWM_ACK_BARKER_NOTIFICATION,
- IWM_SRC_UDMA, 2 * HZ);
- if (!notif_ack) {
- IWM_ERR(iwm, "Wait for ACK_BARKER timeout\n");
- goto err_disable;
- }
-
- kfree(notif_ack->buf);
- kfree(notif_ack);
-
- /* We start to config static boot parameters */
- ret = iwm_config_boot_params(iwm);
- if (ret) {
- IWM_ERR(iwm, "Config boot parameters failed\n");
- goto err_disable;
- }
-
- ret = iwm_read_mac(iwm, iwm_to_ndev(iwm)->dev_addr);
- if (ret) {
- IWM_ERR(iwm, "MAC reading failed\n");
- goto err_disable;
- }
- memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
- ETH_ALEN);
-
- /* We can load the FWs */
- ret = iwm_load_fw(iwm);
- if (ret) {
- IWM_ERR(iwm, "FW loading failed\n");
- goto err_disable;
- }
-
- ret = iwm_eeprom_fat_channels(iwm);
- if (ret) {
- IWM_ERR(iwm, "Couldnt read HT channels EEPROM entries\n");
- goto err_fw;
- }
-
- /*
- * Read our SKU capabilities.
- * If it's valid, we AND the configured wireless mode with the
- * device EEPROM value as the current profile wireless mode.
- */
- wireless_mode = iwm_eeprom_wireless_mode(iwm);
- if (wireless_mode) {
- iwm->conf.wireless_mode &= wireless_mode;
- if (iwm->umac_profile)
- iwm->umac_profile->wireless_mode =
- iwm->conf.wireless_mode;
- } else
- IWM_ERR(iwm, "Wrong SKU capabilities: 0x%x\n",
- *((u16 *)iwm_eeprom_access(iwm, IWM_EEPROM_SKU_CAP)));
-
- snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
- iwm->lmac_version, iwm->umac_version);
-
- /* We configure the UMAC and enable the wifi module */
- ret = iwm_send_umac_config(iwm,
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_LINK_EN) |
- cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_MLME_EN));
- if (ret) {
- IWM_ERR(iwm, "UMAC config failed\n");
- goto err_fw;
- }
-
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a wifi core status notification\n");
- goto err_fw;
- }
-
- if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
- UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
- IWM_DBG_BOOT(iwm, DBG, "Not all cores enabled:0x%x\n",
- iwm->core_enabled);
- ret = iwm_notif_handle(iwm, UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS,
- IWM_SRC_UMAC, WAIT_NOTIF_TIMEOUT);
- if (ret) {
- IWM_ERR(iwm, "Didn't get a core status notification\n");
- goto err_fw;
- }
-
- if (iwm->core_enabled != (UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN |
- UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN)) {
- IWM_ERR(iwm, "Not all cores enabled: 0x%x\n",
- iwm->core_enabled);
- goto err_fw;
- } else {
- IWM_INFO(iwm, "All cores enabled\n");
- }
- }
-
- ret = iwm_channels_init(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't init channels\n");
- goto err_fw;
- }
-
- /* Set the READY bit to indicate interface is brought up successfully */
- set_bit(IWM_STATUS_READY, &iwm->status);
-
- return 0;
-
- err_fw:
- iwm_eeprom_exit(iwm);
-
- err_disable:
- ret = iwm_bus_disable(iwm);
- if (ret < 0)
- IWM_ERR(iwm, "Couldn't disable function\n");
-
- return -EIO;
-}
-
-int iwm_up(struct iwm_priv *iwm)
-{
- int ret;
-
- mutex_lock(&iwm->mutex);
- ret = __iwm_up(iwm);
- mutex_unlock(&iwm->mutex);
-
- return ret;
-}
-
-static int __iwm_down(struct iwm_priv *iwm)
-{
- int ret;
-
- /* The interface is already down */
- if (!test_bit(IWM_STATUS_READY, &iwm->status))
- return 0;
-
- if (iwm->scan_request) {
- cfg80211_scan_done(iwm->scan_request, true);
- iwm->scan_request = NULL;
- }
-
- clear_bit(IWM_STATUS_READY, &iwm->status);
-
- iwm_eeprom_exit(iwm);
- iwm_bss_list_clean(iwm);
- iwm_init_default_profile(iwm, iwm->umac_profile);
- iwm->umac_profile_active = false;
- iwm->default_key = -1;
- iwm->core_enabled = 0;
-
- ret = iwm_bus_disable(iwm);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't disable function\n");
- return ret;
- }
-
- return 0;
-}
-
-int iwm_down(struct iwm_priv *iwm)
-{
- int ret;
-
- mutex_lock(&iwm->mutex);
- ret = __iwm_down(iwm);
- mutex_unlock(&iwm->mutex);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
deleted file mode 100644
index 5091d77e02ce..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ /dev/null
@@ -1,191 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- *
- */
-
-/*
- * This is the netdev related hooks for iwm.
- *
- * Some interesting code paths:
- *
- * iwm_open() (Called at netdev interface bringup time)
- * -> iwm_up() (main.c)
- * -> iwm_bus_enable()
- * -> if_sdio_enable() (In case of an SDIO bus)
- * -> sdio_enable_func()
- * -> iwm_notif_wait(BARKER_REBOOT) (wait for reboot barker)
- * -> iwm_notif_wait(ACK_BARKER) (wait for ACK barker)
- * -> iwm_load_fw() (fw.c)
- * -> iwm_load_umac()
- * -> iwm_load_lmac() (Calibration LMAC)
- * -> iwm_load_lmac() (Operational LMAC)
- * -> iwm_send_umac_config()
- *
- * iwm_stop() (Called at netdev interface bringdown time)
- * -> iwm_down()
- * -> iwm_bus_disable()
- * -> if_sdio_disable() (In case of an SDIO bus)
- * -> sdio_disable_func()
- */
-#include <linux/netdevice.h>
-#include <linux/slab.h>
-
-#include "iwm.h"
-#include "commands.h"
-#include "cfg80211.h"
-#include "debug.h"
-
-static int iwm_open(struct net_device *ndev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- return iwm_up(iwm);
-}
-
-static int iwm_stop(struct net_device *ndev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(ndev);
-
- return iwm_down(iwm);
-}
-
-/*
- * iwm AC to queue mapping
- *
- * AC_VO -> queue 3
- * AC_VI -> queue 2
- * AC_BE -> queue 1
- * AC_BK -> queue 0
- */
-static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-
-int iwm_tid_to_queue(u16 tid)
-{
- if (tid > IWM_UMAC_TID_NR - 2)
- return -EINVAL;
-
- return iwm_1d_to_queue[tid];
-}
-
-static u16 iwm_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
- skb->priority = cfg80211_classify8021d(skb);
-
- return iwm_1d_to_queue[skb->priority];
-}
-
-static const struct net_device_ops iwm_netdev_ops = {
- .ndo_open = iwm_open,
- .ndo_stop = iwm_stop,
- .ndo_start_xmit = iwm_xmit_frame,
- .ndo_select_queue = iwm_select_queue,
-};
-
-void *iwm_if_alloc(int sizeof_bus, struct device *dev,
- struct iwm_if_ops *if_ops)
-{
- struct net_device *ndev;
- struct wireless_dev *wdev;
- struct iwm_priv *iwm;
- int ret = 0;
-
- wdev = iwm_wdev_alloc(sizeof_bus, dev);
- if (IS_ERR(wdev))
- return wdev;
-
- iwm = wdev_to_iwm(wdev);
- iwm->bus_ops = if_ops;
- iwm->wdev = wdev;
-
- ret = iwm_priv_init(iwm);
- if (ret) {
- dev_err(dev, "failed to init iwm_priv\n");
- goto out_wdev;
- }
-
- wdev->iftype = iwm_mode_to_nl80211_iftype(iwm->conf.mode);
-
- ndev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
- if (!ndev) {
- dev_err(dev, "no memory for network device instance\n");
- ret = -ENOMEM;
- goto out_priv;
- }
-
- ndev->netdev_ops = &iwm_netdev_ops;
- ndev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
- wdev->netdev = ndev;
-
- iwm->umac_profile = kmalloc(sizeof(struct iwm_umac_profile),
- GFP_KERNEL);
- if (!iwm->umac_profile) {
- dev_err(dev, "Couldn't alloc memory for profile\n");
- ret = -ENOMEM;
- goto out_profile;
- }
-
- iwm_init_default_profile(iwm, iwm->umac_profile);
-
- return iwm;
-
- out_profile:
- free_netdev(ndev);
-
- out_priv:
- iwm_priv_deinit(iwm);
-
- out_wdev:
- iwm_wdev_free(iwm);
- return ERR_PTR(ret);
-}
-
-void iwm_if_free(struct iwm_priv *iwm)
-{
- if (!iwm_to_ndev(iwm))
- return;
-
- cancel_delayed_work_sync(&iwm->ct_kill_delay);
- free_netdev(iwm_to_ndev(iwm));
- iwm_priv_deinit(iwm);
- kfree(iwm->umac_profile);
- iwm->umac_profile = NULL;
- iwm_wdev_free(iwm);
-}
-
-int iwm_if_add(struct iwm_priv *iwm)
-{
- struct net_device *ndev = iwm_to_ndev(iwm);
- int ret;
-
- ret = register_netdev(ndev);
- if (ret < 0) {
- dev_err(&ndev->dev, "Failed to register netdev: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void iwm_if_remove(struct iwm_priv *iwm)
-{
- unregister_netdev(iwm_to_ndev(iwm));
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
deleted file mode 100644
index 7d708f4395f3..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ /dev/null
@@ -1,1701 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/sched.h>
-#include <linux/etherdevice.h>
-#include <linux/wireless.h>
-#include <linux/ieee80211.h>
-#include <linux/if_arp.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <net/iw_handler.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "hal.h"
-#include "umac.h"
-#include "lmac.h"
-#include "commands.h"
-#include "rx.h"
-#include "cfg80211.h"
-#include "eeprom.h"
-
-static int iwm_rx_check_udma_hdr(struct iwm_udma_in_hdr *hdr)
-{
- if ((le32_to_cpu(hdr->cmd) == UMAC_PAD_TERMINAL) ||
- (le32_to_cpu(hdr->size) == UMAC_PAD_TERMINAL))
- return -EINVAL;
-
- return 0;
-}
-
-static inline int iwm_rx_resp_size(struct iwm_udma_in_hdr *hdr)
-{
- return ALIGN(le32_to_cpu(hdr->size) + sizeof(struct iwm_udma_in_hdr),
- 16);
-}
-
-/*
- * Notification handlers:
- *
- * For every possible notification we can receive from the
- * target, we have a handler.
- * When we get a target notification, and there is no one
- * waiting for it, it's just processed through the rx code
- * path:
- *
- * iwm_rx_handle()
- * -> iwm_rx_handle_umac()
- * -> iwm_rx_handle_wifi()
- * -> iwm_rx_handle_resp()
- * -> iwm_ntf_*()
- *
- * OR
- *
- * -> iwm_rx_handle_non_wifi()
- *
- * If there are processes waiting for this notification, then
- * iwm_rx_handle_wifi() just wakes those processes up and they
- * grab the pending notification.
- */
-static int iwm_ntf_error(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_error *error;
- struct iwm_fw_error_hdr *fw_err;
-
- error = (struct iwm_umac_notif_error *)buf;
- fw_err = &error->err;
-
- memcpy(iwm->last_fw_err, fw_err, sizeof(struct iwm_fw_error_hdr));
-
- IWM_ERR(iwm, "%cMAC FW ERROR:\n",
- (le32_to_cpu(fw_err->category) == UMAC_SYS_ERR_CAT_LMAC) ? 'L' : 'U');
- IWM_ERR(iwm, "\tCategory: %d\n", le32_to_cpu(fw_err->category));
- IWM_ERR(iwm, "\tStatus: 0x%x\n", le32_to_cpu(fw_err->status));
- IWM_ERR(iwm, "\tPC: 0x%x\n", le32_to_cpu(fw_err->pc));
- IWM_ERR(iwm, "\tblink1: %d\n", le32_to_cpu(fw_err->blink1));
- IWM_ERR(iwm, "\tblink2: %d\n", le32_to_cpu(fw_err->blink2));
- IWM_ERR(iwm, "\tilink1: %d\n", le32_to_cpu(fw_err->ilink1));
- IWM_ERR(iwm, "\tilink2: %d\n", le32_to_cpu(fw_err->ilink2));
- IWM_ERR(iwm, "\tData1: 0x%x\n", le32_to_cpu(fw_err->data1));
- IWM_ERR(iwm, "\tData2: 0x%x\n", le32_to_cpu(fw_err->data2));
- IWM_ERR(iwm, "\tLine number: %d\n", le32_to_cpu(fw_err->line_num));
- IWM_ERR(iwm, "\tUMAC status: 0x%x\n", le32_to_cpu(fw_err->umac_status));
- IWM_ERR(iwm, "\tLMAC status: 0x%x\n", le32_to_cpu(fw_err->lmac_status));
- IWM_ERR(iwm, "\tSDIO status: 0x%x\n", le32_to_cpu(fw_err->sdio_status));
-
- iwm_resetting(iwm);
-
- return 0;
-}
-
-static int iwm_ntf_umac_alive(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_alive *alive_resp =
- (struct iwm_umac_notif_alive *)(buf);
- u16 status = le16_to_cpu(alive_resp->status);
-
- if (status == UMAC_NTFY_ALIVE_STATUS_ERR) {
- IWM_ERR(iwm, "Receive error UMAC_ALIVE\n");
- return -EIO;
- }
-
- iwm_tx_credit_init_pools(iwm, alive_resp);
-
- return 0;
-}
-
-static int iwm_ntf_init_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_umac_notif_init_complete *init_complete =
- (struct iwm_umac_notif_init_complete *)(buf);
- u16 status = le16_to_cpu(init_complete->status);
- bool blocked = (status == UMAC_NTFY_INIT_COMPLETE_STATUS_ERR);
-
- if (blocked)
- IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is on (radio off)\n");
- else
- IWM_DBG_NTF(iwm, DBG, "Hardware rf kill is off (radio on)\n");
-
- wiphy_rfkill_set_hw_state(wiphy, blocked);
-
- return 0;
-}
-
-static int iwm_ntf_tx_credit_update(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- int pool_nr, total_freed_pages;
- unsigned long pool_map;
- int i, id;
- struct iwm_umac_notif_page_dealloc *dealloc =
- (struct iwm_umac_notif_page_dealloc *)buf;
-
- pool_nr = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_CNT);
- pool_map = GET_VAL32(dealloc->changes, UMAC_DEALLOC_NTFY_CHANGES_MSK);
-
- IWM_DBG_TX(iwm, DBG, "UMAC dealloc notification: pool nr %d, "
- "update map 0x%lx\n", pool_nr, pool_map);
-
- spin_lock(&iwm->tx_credit.lock);
-
- for (i = 0; i < pool_nr; i++) {
- id = GET_VAL32(dealloc->grp_info[i],
- UMAC_DEALLOC_NTFY_GROUP_NUM);
- if (test_bit(id, &pool_map)) {
- total_freed_pages = GET_VAL32(dealloc->grp_info[i],
- UMAC_DEALLOC_NTFY_PAGE_CNT);
- iwm_tx_credit_inc(iwm, id, total_freed_pages);
- }
- }
-
- spin_unlock(&iwm->tx_credit.lock);
-
- return 0;
-}
-
-static int iwm_ntf_umac_reset(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, DBG, "UMAC RESET done\n");
-
- return 0;
-}
-
-static int iwm_ntf_lmac_version(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, INFO, "LMAC Version: %x.%x\n", buf[9], buf[8]);
-
- return 0;
-}
-
-static int iwm_ntf_tx(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_lmac_tx_resp *tx_resp;
- struct iwm_umac_wifi_in_hdr *hdr;
-
- tx_resp = (struct iwm_lmac_tx_resp *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- hdr = (struct iwm_umac_wifi_in_hdr *)buf;
-
- IWM_DBG_TX(iwm, DBG, "REPLY_TX, buf size: %lu\n", buf_size);
-
- IWM_DBG_TX(iwm, DBG, "Seqnum: %d\n",
- le16_to_cpu(hdr->sw_hdr.cmd.seq_num));
- IWM_DBG_TX(iwm, DBG, "\tFrame cnt: %d\n", tx_resp->frame_cnt);
- IWM_DBG_TX(iwm, DBG, "\tRetry cnt: %d\n",
- le16_to_cpu(tx_resp->retry_cnt));
- IWM_DBG_TX(iwm, DBG, "\tSeq ctl: %d\n", le16_to_cpu(tx_resp->seq_ctl));
- IWM_DBG_TX(iwm, DBG, "\tByte cnt: %d\n",
- le16_to_cpu(tx_resp->byte_cnt));
- IWM_DBG_TX(iwm, DBG, "\tStatus: 0x%x\n", le32_to_cpu(tx_resp->status));
-
- return 0;
-}
-
-
-static int iwm_ntf_calib_res(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- u8 opcode;
- u8 *calib_buf;
- struct iwm_lmac_calib_hdr *hdr = (struct iwm_lmac_calib_hdr *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
-
- opcode = hdr->opcode;
-
- BUG_ON(opcode >= CALIBRATION_CMD_NUM ||
- opcode < PHY_CALIBRATE_OPCODES_NUM);
-
- IWM_DBG_NTF(iwm, DBG, "Store calibration result for opcode: %d\n",
- opcode);
-
- buf_size -= sizeof(struct iwm_umac_wifi_in_hdr);
- calib_buf = iwm->calib_res[opcode].buf;
-
- if (!calib_buf || (iwm->calib_res[opcode].size < buf_size)) {
- kfree(calib_buf);
- calib_buf = kzalloc(buf_size, GFP_KERNEL);
- if (!calib_buf) {
- IWM_ERR(iwm, "Memory allocation failed: calib_res\n");
- return -ENOMEM;
- }
- iwm->calib_res[opcode].buf = calib_buf;
- iwm->calib_res[opcode].size = buf_size;
- }
-
- memcpy(calib_buf, hdr, buf_size);
- set_bit(opcode - PHY_CALIBRATE_OPCODES_NUM, &iwm->calib_done_map);
-
- return 0;
-}
-
-static int iwm_ntf_calib_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_NTF(iwm, DBG, "Calibration completed\n");
-
- return 0;
-}
-
-static int iwm_ntf_calib_cfg(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_lmac_cal_cfg_resp *cal_resp;
-
- cal_resp = (struct iwm_lmac_cal_cfg_resp *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
-
- IWM_DBG_NTF(iwm, DBG, "Calibration CFG command status: %d\n",
- le32_to_cpu(cal_resp->status));
-
- return 0;
-}
-
-static int iwm_ntf_wifi_status(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_wifi_status *status =
- (struct iwm_umac_notif_wifi_status *)buf;
-
- iwm->core_enabled |= le16_to_cpu(status->status);
-
- return 0;
-}
-
-static struct iwm_rx_ticket_node *
-iwm_rx_ticket_node_alloc(struct iwm_priv *iwm, struct iwm_rx_ticket *ticket)
-{
- struct iwm_rx_ticket_node *ticket_node;
-
- ticket_node = kzalloc(sizeof(struct iwm_rx_ticket_node), GFP_KERNEL);
- if (!ticket_node) {
- IWM_ERR(iwm, "Couldn't allocate ticket node\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ticket_node->ticket = kmemdup(ticket, sizeof(struct iwm_rx_ticket),
- GFP_KERNEL);
- if (!ticket_node->ticket) {
- IWM_ERR(iwm, "Couldn't allocate RX ticket\n");
- kfree(ticket_node);
- return ERR_PTR(-ENOMEM);
- }
-
- INIT_LIST_HEAD(&ticket_node->node);
-
- return ticket_node;
-}
-
-static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
-{
- kfree(ticket_node->ticket);
- kfree(ticket_node);
-}
-
-static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
-{
- u8 id_hash = IWM_RX_ID_GET_HASH(id);
- struct iwm_rx_packet *packet;
-
- spin_lock(&iwm->packet_lock[id_hash]);
- list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
- if (packet->id == id) {
- list_del(&packet->node);
- spin_unlock(&iwm->packet_lock[id_hash]);
- return packet;
- }
-
- spin_unlock(&iwm->packet_lock[id_hash]);
- return NULL;
-}
-
-static struct iwm_rx_packet *iwm_rx_packet_alloc(struct iwm_priv *iwm, u8 *buf,
- u32 size, u16 id)
-{
- struct iwm_rx_packet *packet;
-
- packet = kzalloc(sizeof(struct iwm_rx_packet), GFP_KERNEL);
- if (!packet) {
- IWM_ERR(iwm, "Couldn't allocate packet\n");
- return ERR_PTR(-ENOMEM);
- }
-
- packet->skb = dev_alloc_skb(size);
- if (!packet->skb) {
- IWM_ERR(iwm, "Couldn't allocate packet SKB\n");
- kfree(packet);
- return ERR_PTR(-ENOMEM);
- }
-
- packet->pkt_size = size;
-
- skb_put(packet->skb, size);
- memcpy(packet->skb->data, buf, size);
- INIT_LIST_HEAD(&packet->node);
- packet->id = id;
-
- return packet;
-}
-
-void iwm_rx_free(struct iwm_priv *iwm)
-{
- struct iwm_rx_ticket_node *ticket, *nt;
- struct iwm_rx_packet *packet, *np;
- int i;
-
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
- list_del(&ticket->node);
- iwm_rx_ticket_node_free(ticket);
- }
- spin_unlock(&iwm->ticket_lock);
-
- for (i = 0; i < IWM_RX_ID_HASH; i++) {
- spin_lock(&iwm->packet_lock[i]);
- list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
- node) {
- list_del(&packet->node);
- kfree_skb(packet->skb);
- kfree(packet);
- }
- spin_unlock(&iwm->packet_lock[i]);
- }
-}
-
-static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_rx_ticket *ntf_rx_ticket =
- (struct iwm_umac_notif_rx_ticket *)buf;
- struct iwm_rx_ticket *ticket =
- (struct iwm_rx_ticket *)ntf_rx_ticket->tickets;
- int i, schedule_rx = 0;
-
- for (i = 0; i < ntf_rx_ticket->num_tickets; i++) {
- struct iwm_rx_ticket_node *ticket_node;
-
- switch (le16_to_cpu(ticket->action)) {
- case IWM_RX_TICKET_RELEASE:
- case IWM_RX_TICKET_DROP:
- /* We can push the packet to the stack */
- ticket_node = iwm_rx_ticket_node_alloc(iwm, ticket);
- if (IS_ERR(ticket_node))
- return PTR_ERR(ticket_node);
-
- IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
- __le16_to_cpu(ticket->action) ==
- IWM_RX_TICKET_RELEASE ?
- "RELEASE" : "DROP",
- ticket->id);
- spin_lock(&iwm->ticket_lock);
- list_add_tail(&ticket_node->node, &iwm->rx_tickets);
- spin_unlock(&iwm->ticket_lock);
-
- /*
- * We received an Rx ticket, most likely there's
- * a packet pending for it, it's not worth going
- * through the packet hash list to double check.
- * Let's just fire the rx worker..
- */
- schedule_rx = 1;
-
- break;
-
- default:
- IWM_ERR(iwm, "Invalid RX ticket action: 0x%x\n",
- ticket->action);
- }
-
- ticket++;
- }
-
- if (schedule_rx)
- queue_work(iwm->rx_wq, &iwm->rx_worker);
-
- return 0;
-}
-
-static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- struct iwm_rx_packet *packet;
- u16 id, buf_offset;
- u32 packet_size;
- u8 id_hash;
-
- IWM_DBG_RX(iwm, DBG, "\n");
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- id = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
- buf_offset = sizeof(struct iwm_umac_wifi_in_hdr);
- packet_size = buf_size - sizeof(struct iwm_umac_wifi_in_hdr);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, seqnum: %d, packet size: %d\n",
- wifi_hdr->sw_hdr.cmd.cmd, id, packet_size);
- IWM_DBG_RX(iwm, DBG, "Packet id: %d\n", id);
- IWM_HEXDUMP(iwm, DBG, RX, "PACKET: ", buf + buf_offset, packet_size);
-
- packet = iwm_rx_packet_alloc(iwm, buf + buf_offset, packet_size, id);
- if (IS_ERR(packet))
- return PTR_ERR(packet);
-
- id_hash = IWM_RX_ID_GET_HASH(id);
- spin_lock(&iwm->packet_lock[id_hash]);
- list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
- spin_unlock(&iwm->packet_lock[id_hash]);
-
- /* We might (unlikely) have received the packet _after_ the ticket */
- queue_work(iwm->rx_wq, &iwm->rx_worker);
-
- return 0;
-}
-
-/* MLME handlers */
-static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_assoc_start *start;
-
- start = (struct iwm_umac_notif_assoc_start *)buf;
-
- IWM_DBG_MLME(iwm, INFO, "Association with %pM Started, reason: %d\n",
- start->bssid, le32_to_cpu(start->roam_reason));
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- return 0;
-}
-
-static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
-{
- if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
- iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
- (iwm->umac_profile->sec.ucast_cipher ==
- iwm->umac_profile->sec.mcast_cipher) &&
- (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
- return 1;
-
- return 0;
-}
-
-static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_channel *chan;
- struct iwm_umac_notif_assoc_complete *complete =
- (struct iwm_umac_notif_assoc_complete *)buf;
-
- IWM_DBG_MLME(iwm, INFO, "Association with %pM completed, status: %d\n",
- complete->bssid, complete->status);
-
- switch (le32_to_cpu(complete->status)) {
- case UMAC_ASSOC_COMPLETE_SUCCESS:
- chan = ieee80211_get_channel(wiphy,
- ieee80211_channel_to_frequency(complete->channel,
- complete->band == UMAC_BAND_2GHZ ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ));
- if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
- /* Associated to a unallowed channel, disassociate. */
- __iwm_invalidate_mlme_profile(iwm);
- IWM_WARN(iwm, "Couldn't associate with %pM due to "
- "channel %d is disabled. Check your local "
- "regulatory setting.\n",
- complete->bssid, complete->channel);
- goto failure;
- }
-
- set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
- iwm->channel = complete->channel;
-
- /* Internal roaming state, avoid notifying SME. */
- if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
- && iwm->conf.mode == UMAC_MODE_BSS) {
- cancel_delayed_work(&iwm->disconnect);
- cfg80211_roamed(iwm_to_ndev(iwm), NULL,
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- GFP_KERNEL);
- break;
- }
-
- iwm_link_on(iwm);
-
- if (iwm->conf.mode == UMAC_MODE_IBSS)
- goto ibss;
-
- if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
- cfg80211_connect_result(iwm_to_ndev(iwm),
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- else
- cfg80211_roamed(iwm_to_ndev(iwm), NULL,
- complete->bssid,
- iwm->req_ie, iwm->req_ie_len,
- iwm->resp_ie, iwm->resp_ie_len,
- GFP_KERNEL);
- break;
- case UMAC_ASSOC_COMPLETE_FAILURE:
- failure:
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- /* Internal roaming state, avoid notifying SME. */
- if (!test_and_clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status)
- && iwm->conf.mode == UMAC_MODE_BSS) {
- cancel_delayed_work(&iwm->disconnect);
- break;
- }
-
- iwm_link_off(iwm);
-
- if (iwm->conf.mode == UMAC_MODE_IBSS)
- goto ibss;
-
- if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
- if (!iwm_is_open_wep_profile(iwm)) {
- cfg80211_connect_result(iwm_to_ndev(iwm),
- complete->bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- } else {
- /* Let's try shared WEP auth */
- IWM_ERR(iwm, "Trying WEP shared auth\n");
- schedule_work(&iwm->auth_retry_worker);
- }
- else
- cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
- GFP_KERNEL);
- break;
- default:
- break;
- }
-
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
- return 0;
-
- ibss:
- cfg80211_ibss_joined(iwm_to_ndev(iwm), iwm->bssid, GFP_KERNEL);
- clear_bit(IWM_STATUS_RESETTING, &iwm->status);
- return 0;
-}
-
-static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_profile_invalidate *invalid;
- u32 reason;
-
- invalid = (struct iwm_umac_notif_profile_invalidate *)buf;
- reason = le32_to_cpu(invalid->reason);
-
- IWM_DBG_MLME(iwm, INFO, "Profile Invalidated. Reason: %d\n", reason);
-
- if (reason != UMAC_PROFILE_INVALID_REQUEST &&
- test_bit(IWM_STATUS_SME_CONNECTING, &iwm->status))
- cfg80211_connect_result(iwm_to_ndev(iwm), NULL, NULL, 0, NULL,
- 0, WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
-
- clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
- clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
-
- iwm->umac_profile_active = false;
- memset(iwm->bssid, 0, ETH_ALEN);
- iwm->channel = 0;
-
- iwm_link_off(iwm);
-
- wake_up_interruptible(&iwm->mlme_queue);
-
- return 0;
-}
-
-#define IWM_DISCONNECT_INTERVAL (5 * HZ)
-
-static int iwm_mlme_connection_terminated(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- IWM_DBG_MLME(iwm, DBG, "Connection terminated\n");
-
- schedule_delayed_work(&iwm->disconnect, IWM_DISCONNECT_INTERVAL);
-
- return 0;
-}
-
-static int iwm_mlme_scan_complete(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- int ret;
- struct iwm_umac_notif_scan_complete *scan_complete =
- (struct iwm_umac_notif_scan_complete *)buf;
- u32 result = le32_to_cpu(scan_complete->result);
-
- IWM_DBG_MLME(iwm, INFO, "type:0x%x result:0x%x seq:%d\n",
- le32_to_cpu(scan_complete->type),
- le32_to_cpu(scan_complete->result),
- scan_complete->seq_num);
-
- if (!test_and_clear_bit(IWM_STATUS_SCANNING, &iwm->status)) {
- IWM_ERR(iwm, "Scan complete while device not scanning\n");
- return -EIO;
- }
- if (!iwm->scan_request)
- return 0;
-
- ret = iwm_cfg80211_inform_bss(iwm);
-
- cfg80211_scan_done(iwm->scan_request,
- (result & UMAC_SCAN_RESULT_ABORTED) ? 1 : !!ret);
- iwm->scan_request = NULL;
-
- return ret;
-}
-
-static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_sta_info *umac_sta =
- (struct iwm_umac_notif_sta_info *)buf;
- struct iwm_sta_info *sta;
- int i;
-
- switch (le32_to_cpu(umac_sta->opcode)) {
- case UMAC_OPCODE_ADD_MODIFY:
- sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
-
- IWM_DBG_MLME(iwm, INFO, "%s STA: ID = %d, Color = %d, "
- "addr = %pM, qos = %d\n",
- sta->valid ? "Modify" : "Add",
- GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
- GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
- umac_sta->mac_addr,
- umac_sta->flags & UMAC_STA_FLAG_QOS);
-
- sta->valid = true;
- sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
- sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
- memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
- break;
- case UMAC_OPCODE_REMOVE:
- IWM_DBG_MLME(iwm, INFO, "Remove STA: ID = %d, Color = %d, "
- "addr = %pM\n",
- GET_VAL8(umac_sta->sta_id, LMAC_STA_ID),
- GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR),
- umac_sta->mac_addr);
-
- sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
-
- if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = false;
-
- break;
- case UMAC_OPCODE_CLEAR_ALL:
- for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = false;
-
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
-
- IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
-
- wiphy_rfkill_set_hw_state(wiphy, true);
-
- return 0;
-}
-
-static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_mgmt *mgmt;
- struct iwm_umac_notif_bss_info *umac_bss =
- (struct iwm_umac_notif_bss_info *)buf;
- struct ieee80211_channel *channel;
- struct ieee80211_supported_band *band;
- struct iwm_bss_info *bss;
- s32 signal;
- int freq;
- u16 frame_len = le16_to_cpu(umac_bss->frame_len);
- size_t bss_len = sizeof(struct iwm_umac_notif_bss_info) + frame_len;
-
- mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
-
- IWM_DBG_MLME(iwm, DBG, "New BSS info entry: %pM\n", mgmt->bssid);
- IWM_DBG_MLME(iwm, DBG, "\tType: 0x%x\n", le32_to_cpu(umac_bss->type));
- IWM_DBG_MLME(iwm, DBG, "\tTimestamp: %d\n",
- le32_to_cpu(umac_bss->timestamp));
- IWM_DBG_MLME(iwm, DBG, "\tTable Index: %d\n",
- le16_to_cpu(umac_bss->table_idx));
- IWM_DBG_MLME(iwm, DBG, "\tBand: %d\n", umac_bss->band);
- IWM_DBG_MLME(iwm, DBG, "\tChannel: %d\n", umac_bss->channel);
- IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
- IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
-
- list_for_each_entry(bss, &iwm->bss_list, node)
- if (bss->bss->table_idx == umac_bss->table_idx)
- break;
-
- if (&bss->node != &iwm->bss_list) {
- /* Remove the old BSS entry, we will add it back later. */
- list_del(&bss->node);
- kfree(bss->bss);
- } else {
- /* New BSS entry */
-
- bss = kzalloc(sizeof(struct iwm_bss_info), GFP_KERNEL);
- if (!bss) {
- IWM_ERR(iwm, "Couldn't allocate bss_info\n");
- return -ENOMEM;
- }
- }
-
- bss->bss = kzalloc(bss_len, GFP_KERNEL);
- if (!bss->bss) {
- kfree(bss);
- IWM_ERR(iwm, "Couldn't allocate bss\n");
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&bss->node);
- memcpy(bss->bss, umac_bss, bss_len);
-
- if (umac_bss->band == UMAC_BAND_2GHZ)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
- else if (umac_bss->band == UMAC_BAND_5GHZ)
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
- else {
- IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
- goto err;
- }
-
- freq = ieee80211_channel_to_frequency(umac_bss->channel, band->band);
- channel = ieee80211_get_channel(wiphy, freq);
- signal = umac_bss->rssi * 100;
-
- bss->cfg_bss = cfg80211_inform_bss_frame(wiphy, channel,
- mgmt, frame_len,
- signal, GFP_KERNEL);
- if (!bss->cfg_bss)
- goto err;
-
- list_add_tail(&bss->node, &iwm->bss_list);
-
- return 0;
- err:
- kfree(bss->bss);
- kfree(bss);
-
- return -EINVAL;
-}
-
-static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_bss_removed *bss_rm =
- (struct iwm_umac_notif_bss_removed *)buf;
- struct iwm_bss_info *bss, *next;
- u16 table_idx;
- int i;
-
- for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
- table_idx = le16_to_cpu(bss_rm->entries[i]) &
- IWM_BSS_REMOVE_INDEX_MSK;
- list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
- if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
- struct ieee80211_mgmt *mgmt;
-
- mgmt = (struct ieee80211_mgmt *)
- (bss->bss->frame_buf);
- IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
- mgmt->bssid);
- list_del(&bss->node);
- kfree(bss->bss);
- kfree(bss);
- }
- }
-
- return 0;
-}
-
-static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_mgt_frame *mgt_frame =
- (struct iwm_umac_notif_mgt_frame *)buf;
- struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
-
- IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
- le16_to_cpu(mgt_frame->len));
-
- if (ieee80211_is_assoc_req(mgt->frame_control)) {
- iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.assoc_req.variable);
- kfree(iwm->req_ie);
- iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
- iwm->req_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.reassoc_req.variable);
- kfree(iwm->req_ie);
- iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
- iwm->req_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.assoc_resp.variable);
- kfree(iwm->resp_ie);
- iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
- iwm->resp_ie_len, GFP_KERNEL);
- } else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
- - offsetof(struct ieee80211_mgmt,
- u.reassoc_resp.variable);
- kfree(iwm->resp_ie);
- iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
- iwm->resp_ie_len, GFP_KERNEL);
- } else {
- IWM_ERR(iwm, "Unsupported management frame: 0x%x",
- le16_to_cpu(mgt->frame_control));
- return 0;
- }
-
- return 0;
-}
-
-static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_wifi_if *notif =
- (struct iwm_umac_notif_wifi_if *)buf;
-
- switch (notif->status) {
- case WIFI_IF_NTFY_ASSOC_START:
- return iwm_mlme_assoc_start(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_ASSOC_COMPLETE:
- return iwm_mlme_assoc_complete(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE:
- return iwm_mlme_profile_invalidate(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_CONNECTION_TERMINATED:
- return iwm_mlme_connection_terminated(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_SCAN_COMPLETE:
- return iwm_mlme_scan_complete(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_STA_TABLE_CHANGE:
- return iwm_mlme_update_sta_table(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
- IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
- break;
- case WIFI_IF_NTFY_RADIO_PREEMPTION:
- return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
- return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
- case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
- return iwm_mlme_remove_bss(iwm, buf, buf_size, cmd);
- break;
- case WIFI_IF_NTFY_MGMT_FRAME:
- return iwm_mlme_mgt_frame(iwm, buf, buf_size, cmd);
- case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START:
- case WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE:
- case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START:
- case WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT:
- case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START:
- case WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE:
- case WIFI_DBG_IF_NTFY_CNCT_ATC_START:
- case WIFI_DBG_IF_NTFY_COEX_NOTIFICATION:
- case WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP:
- case WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP:
- IWM_DBG_MLME(iwm, DBG, "MLME debug notification: 0x%x\n",
- notif->status);
- break;
- default:
- IWM_ERR(iwm, "Unhandled notification: 0x%x\n", notif->status);
- break;
- }
-
- return 0;
-}
-
-#define IWM_STATS_UPDATE_INTERVAL (2 * HZ)
-
-static int iwm_ntf_statistics(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_stats *stats = (struct iwm_umac_notif_stats *)buf;
- struct iw_statistics *wstats = &iwm->wstats;
- u16 max_rate = 0;
- int i;
-
- IWM_DBG_MLME(iwm, DBG, "Statistics notification received\n");
-
- if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- for (i = 0; i < UMAC_NTF_RATE_SAMPLE_NR; i++) {
- max_rate = max_t(u16, max_rate,
- max(le16_to_cpu(stats->tx_rate[i]),
- le16_to_cpu(stats->rx_rate[i])));
- }
- /* UMAC passes rate info multiplies by 2 */
- iwm->rate = max_rate >> 1;
- }
- iwm->txpower = le32_to_cpu(stats->tx_power);
-
- wstats->status = 0;
-
- wstats->discard.nwid = le32_to_cpu(stats->rx_drop_other_bssid);
- wstats->discard.code = le32_to_cpu(stats->rx_drop_decode);
- wstats->discard.fragment = le32_to_cpu(stats->rx_drop_reassembly);
- wstats->discard.retries = le32_to_cpu(stats->tx_drop_max_retry);
-
- wstats->miss.beacon = le32_to_cpu(stats->missed_beacons);
-
- /* according to cfg80211 */
- if (stats->rssi_dbm < -110)
- wstats->qual.qual = 0;
- else if (stats->rssi_dbm > -40)
- wstats->qual.qual = 70;
- else
- wstats->qual.qual = stats->rssi_dbm + 110;
-
- wstats->qual.level = stats->rssi_dbm;
- wstats->qual.noise = stats->noise_dbm;
- wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
-
- schedule_delayed_work(&iwm->stats_request, IWM_STATS_UPDATE_INTERVAL);
-
- mod_timer(&iwm->watchdog, round_jiffies(jiffies + IWM_WATCHDOG_PERIOD));
-
- return 0;
-}
-
-static int iwm_ntf_eeprom_proxy(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_cmd_eeprom_proxy *eeprom_proxy =
- (struct iwm_umac_cmd_eeprom_proxy *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- struct iwm_umac_cmd_eeprom_proxy_hdr *hdr = &eeprom_proxy->hdr;
- u32 hdr_offset = le32_to_cpu(hdr->offset);
- u32 hdr_len = le32_to_cpu(hdr->len);
- u32 hdr_type = le32_to_cpu(hdr->type);
-
- IWM_DBG_NTF(iwm, DBG, "type: 0x%x, len: %d, offset: 0x%x\n",
- hdr_type, hdr_len, hdr_offset);
-
- if ((hdr_offset + hdr_len) > IWM_EEPROM_LEN)
- return -EINVAL;
-
- switch (hdr_type) {
- case IWM_UMAC_CMD_EEPROM_TYPE_READ:
- memcpy(iwm->eeprom + hdr_offset, eeprom_proxy->buf, hdr_len);
- break;
- case IWM_UMAC_CMD_EEPROM_TYPE_WRITE:
- default:
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
-static int iwm_ntf_channel_info_list(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_cmd_get_channel_list *ch_list =
- (struct iwm_umac_cmd_get_channel_list *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct ieee80211_supported_band *band;
- int i;
-
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
-
- for (i = 0; i < band->n_channels; i++) {
- unsigned long ch_mask_0 =
- le32_to_cpu(ch_list->ch[0].channels_mask);
- unsigned long ch_mask_2 =
- le32_to_cpu(ch_list->ch[2].channels_mask);
-
- if (!test_bit(i, &ch_mask_0))
- band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
-
- if (!test_bit(i, &ch_mask_2))
- band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
- }
-
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
-
- for (i = 0; i < min(band->n_channels, 32); i++) {
- unsigned long ch_mask_1 =
- le32_to_cpu(ch_list->ch[1].channels_mask);
- unsigned long ch_mask_3 =
- le32_to_cpu(ch_list->ch[3].channels_mask);
-
- if (!test_bit(i, &ch_mask_1))
- band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
-
- if (!test_bit(i, &ch_mask_3))
- band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS;
- }
-
- return 0;
-}
-
-static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_notif_stop_resume_tx *stp_res_tx =
- (struct iwm_umac_notif_stop_resume_tx *)buf;
- struct iwm_sta_info *sta_info;
- struct iwm_tid_info *tid_info;
- u8 sta_id = STA_ID_N_COLOR_ID(stp_res_tx->sta_id);
- u16 tid_msk = le16_to_cpu(stp_res_tx->stop_resume_tid_msk);
- int bit, ret = 0;
- bool stop = false;
-
- IWM_DBG_NTF(iwm, DBG, "stop/resume notification:\n"
- "\tflags: 0x%x\n"
- "\tSTA id: %d\n"
- "\tTID bitmask: 0x%x\n",
- stp_res_tx->flags, stp_res_tx->sta_id,
- stp_res_tx->stop_resume_tid_msk);
-
- if (stp_res_tx->flags & UMAC_STOP_TX_FLAG)
- stop = true;
-
- sta_info = &iwm->sta_table[sta_id];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Stoping an invalid STA: %d %d\n",
- sta_id, stp_res_tx->sta_id);
- return -EINVAL;
- }
-
- for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
- tid_info = &sta_info->tid_info[bit];
-
- mutex_lock(&tid_info->mutex);
- tid_info->stopped = stop;
- mutex_unlock(&tid_info->mutex);
-
- if (!stop) {
- struct iwm_tx_queue *txq;
- int queue = iwm_tid_to_queue(bit);
-
- if (queue < 0)
- continue;
-
- txq = &iwm->txq[queue];
- /*
- * If we resume, we have to move our SKBs
- * back to the tx queue and queue some work.
- */
- spin_lock_bh(&txq->lock);
- skb_queue_splice_init(&txq->queue, &txq->stopped_queue);
- spin_unlock_bh(&txq->lock);
-
- queue_work(txq->wq, &txq->worker);
- }
-
- }
-
- /* We send an ACK only for the stop case */
- if (stop)
- ret = iwm_send_umac_stop_resume_tx(iwm, stp_res_tx);
-
- return ret;
-}
-
-static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- struct iwm_umac_wifi_if *hdr;
-
- if (cmd == NULL) {
- IWM_ERR(iwm, "Couldn't find expected wifi command\n");
- return -EINVAL;
- }
-
- hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
-
- IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
- "oid is 0x%x\n", hdr->oid);
-
- set_bit(hdr->oid, &iwm->wifi_ntfy[0]);
- wake_up_interruptible(&iwm->wifi_ntfy_queue);
-
- switch (hdr->oid) {
- case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = true;
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-#define CT_KILL_DELAY (30 * HZ)
-static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size, struct iwm_wifi_cmd *cmd)
-{
- struct wiphy *wiphy = iwm_to_wiphy(iwm);
- struct iwm_lmac_card_state *state = (struct iwm_lmac_card_state *)
- (buf + sizeof(struct iwm_umac_wifi_in_hdr));
- u32 flags = le32_to_cpu(state->flags);
-
- IWM_INFO(iwm, "HW RF Kill %s, CT Kill %s\n",
- flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
- flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
-
- if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
- /*
- * We got a CTKILL event: We bring the interface down in
- * oder to cool the device down, and try to bring it up
- * 30 seconds later. If it's still too hot, we'll go through
- * this code path again.
- */
- cancel_delayed_work_sync(&iwm->ct_kill_delay);
- schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
- }
-
- wiphy_rfkill_set_hw_state(wiphy, flags &
- (IWM_CARD_STATE_HW_DISABLED |
- IWM_CARD_STATE_CTKILL_DISABLED));
-
- return 0;
-}
-
-static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- struct iwm_wifi_cmd *cmd;
- u8 source, cmd_id;
- u16 seq_num;
- u32 count;
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
- source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- if (source >= IWM_SRC_NUM) {
- IWM_CRIT(iwm, "invalid source %d\n", source);
- return -EINVAL;
- }
-
- if (cmd_id == REPLY_RX_MPDU_CMD)
- trace_iwm_rx_packet(iwm, buf, buf_size);
- else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
- (source == UMAC_HDI_IN_SOURCE_FW))
- trace_iwm_rx_ticket(iwm, buf, buf_size);
- else
- trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
-
- count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
- count += sizeof(struct iwm_umac_wifi_in_hdr) -
- sizeof(struct iwm_dev_cmd_hdr);
- if (count > buf_size) {
- IWM_CRIT(iwm, "count %d, buf size:%ld\n", count, buf_size);
- return -EINVAL;
- }
-
- seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
- cmd_id, source, seq_num);
-
- /*
- * If this is a response to a previously sent command, there must
- * be a pending command for this sequence number.
- */
- cmd = iwm_get_pending_wifi_cmd(iwm, seq_num);
-
- /* Notify the caller only for sync commands. */
- switch (source) {
- case UMAC_HDI_IN_SOURCE_FHRX:
- if (iwm->lmac_handlers[cmd_id] &&
- test_bit(cmd_id, &iwm->lmac_handler_map[0]))
- return iwm_notif_send(iwm, cmd, cmd_id, source,
- buf, count);
- break;
- case UMAC_HDI_IN_SOURCE_FW:
- if (iwm->umac_handlers[cmd_id] &&
- test_bit(cmd_id, &iwm->umac_handler_map[0]))
- return iwm_notif_send(iwm, cmd, cmd_id, source,
- buf, count);
- break;
- case UMAC_HDI_IN_SOURCE_UDMA:
- break;
- }
-
- return iwm_rx_handle_resp(iwm, buf, count, cmd);
-}
-
-int iwm_rx_handle_resp(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size,
- struct iwm_wifi_cmd *cmd)
-{
- u8 source, cmd_id;
- struct iwm_umac_wifi_in_hdr *wifi_hdr;
- int ret = 0;
-
- wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
- cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
-
- source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
-
- IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x\n", cmd_id, source);
-
- switch (source) {
- case UMAC_HDI_IN_SOURCE_FHRX:
- if (iwm->lmac_handlers[cmd_id])
- ret = iwm->lmac_handlers[cmd_id]
- (iwm, buf, buf_size, cmd);
- break;
- case UMAC_HDI_IN_SOURCE_FW:
- if (iwm->umac_handlers[cmd_id])
- ret = iwm->umac_handlers[cmd_id]
- (iwm, buf, buf_size, cmd);
- break;
- case UMAC_HDI_IN_SOURCE_UDMA:
- ret = -EINVAL;
- break;
- }
-
- kfree(cmd);
-
- return ret;
-}
-
-static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- u8 seq_num;
- struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
- struct iwm_nonwifi_cmd *cmd;
-
- trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
- seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
-
- /*
- * We received a non wifi answer.
- * Let's check if there's a pending command for it, and if so
- * replace the command payload with the buffer, and then wake the
- * callers up.
- * That means we only support synchronised non wifi command response
- * schemes.
- */
- list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
- if (cmd->seq_num == seq_num) {
- cmd->resp_received = true;
- cmd->buf.len = buf_size;
- memcpy(cmd->buf.hdr, buf, buf_size);
- wake_up_interruptible(&iwm->nonwifi_queue);
- }
-
- return 0;
-}
-
-static int iwm_rx_handle_umac(struct iwm_priv *iwm, u8 *buf,
- unsigned long buf_size)
-{
- int ret = 0;
- u8 op_code;
- unsigned long buf_offset = 0;
- struct iwm_udma_in_hdr *hdr;
-
- /*
- * To allow for a more efficient bus usage, UMAC
- * messages are encapsulated into UDMA ones. This
- * way we can have several UMAC messages in one bus
- * transfer.
- * A UDMA frame size is always aligned on 16 bytes,
- * and a UDMA frame must not start with a UMAC_PAD_TERMINAL
- * word. This is how we parse a bus frame into several
- * UDMA ones.
- */
- while (buf_offset < buf_size) {
-
- hdr = (struct iwm_udma_in_hdr *)(buf + buf_offset);
-
- if (iwm_rx_check_udma_hdr(hdr) < 0) {
- IWM_DBG_RX(iwm, DBG, "End of frame\n");
- break;
- }
-
- op_code = GET_VAL32(hdr->cmd, UMAC_HDI_IN_CMD_OPCODE);
-
- IWM_DBG_RX(iwm, DBG, "Op code: 0x%x\n", op_code);
-
- if (op_code == UMAC_HDI_IN_OPCODE_WIFI) {
- ret |= iwm_rx_handle_wifi(iwm, buf + buf_offset,
- buf_size - buf_offset);
- } else if (op_code < UMAC_HDI_IN_OPCODE_NONWIFI_MAX) {
- if (GET_VAL32(hdr->cmd,
- UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) !=
- UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG) {
- IWM_ERR(iwm, "Incorrect hw signature\n");
- return -EINVAL;
- }
- ret |= iwm_rx_handle_nonwifi(iwm, buf + buf_offset,
- buf_size - buf_offset);
- } else {
- IWM_ERR(iwm, "Invalid RX opcode: 0x%x\n", op_code);
- ret |= -EINVAL;
- }
-
- buf_offset += iwm_rx_resp_size(hdr);
- }
-
- return ret;
-}
-
-int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
-{
- struct iwm_udma_in_hdr *hdr;
-
- hdr = (struct iwm_udma_in_hdr *)buf;
-
- switch (le32_to_cpu(hdr->cmd)) {
- case UMAC_REBOOT_BARKER:
- if (test_bit(IWM_STATUS_READY, &iwm->status)) {
- IWM_ERR(iwm, "Unexpected BARKER\n");
-
- schedule_work(&iwm->reset_worker);
-
- return 0;
- }
-
- return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
- IWM_SRC_UDMA, buf, buf_size);
- case UMAC_ACK_BARKER:
- return iwm_notif_send(iwm, NULL, IWM_ACK_BARKER_NOTIFICATION,
- IWM_SRC_UDMA, NULL, 0);
- default:
- IWM_DBG_RX(iwm, DBG, "Received cmd: 0x%x\n", hdr->cmd);
- return iwm_rx_handle_umac(iwm, buf, buf_size);
- }
-
- return 0;
-}
-
-static const iwm_handler iwm_umac_handlers[] =
-{
- [UMAC_NOTIFY_OPCODE_ERROR] = iwm_ntf_error,
- [UMAC_NOTIFY_OPCODE_ALIVE] = iwm_ntf_umac_alive,
- [UMAC_NOTIFY_OPCODE_INIT_COMPLETE] = iwm_ntf_init_complete,
- [UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS] = iwm_ntf_wifi_status,
- [UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_mlme,
- [UMAC_NOTIFY_OPCODE_PAGE_DEALLOC] = iwm_ntf_tx_credit_update,
- [UMAC_NOTIFY_OPCODE_RX_TICKET] = iwm_ntf_rx_ticket,
- [UMAC_CMD_OPCODE_RESET] = iwm_ntf_umac_reset,
- [UMAC_NOTIFY_OPCODE_STATS] = iwm_ntf_statistics,
- [UMAC_CMD_OPCODE_EEPROM_PROXY] = iwm_ntf_eeprom_proxy,
- [UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST] = iwm_ntf_channel_info_list,
- [UMAC_CMD_OPCODE_STOP_RESUME_STA_TX] = iwm_ntf_stop_resume_tx,
- [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
- [UMAC_CMD_OPCODE_WIFI_IF_WRAPPER] = iwm_ntf_wifi_if_wrapper,
-};
-
-static const iwm_handler iwm_lmac_handlers[] =
-{
- [REPLY_TX] = iwm_ntf_tx,
- [REPLY_ALIVE] = iwm_ntf_lmac_version,
- [CALIBRATION_RES_NOTIFICATION] = iwm_ntf_calib_res,
- [CALIBRATION_COMPLETE_NOTIFICATION] = iwm_ntf_calib_complete,
- [CALIBRATION_CFG_CMD] = iwm_ntf_calib_cfg,
- [REPLY_RX_MPDU_CMD] = iwm_ntf_rx_packet,
- [CARD_STATE_NOTIFICATION] = iwm_ntf_card_state,
-};
-
-void iwm_rx_setup_handlers(struct iwm_priv *iwm)
-{
- iwm->umac_handlers = (iwm_handler *) iwm_umac_handlers;
- iwm->lmac_handlers = (iwm_handler *) iwm_lmac_handlers;
-}
-
-static void iwm_remove_iv(struct sk_buff *skb, u32 hdr_total_len)
-{
- struct ieee80211_hdr *hdr;
- unsigned int hdr_len;
-
- hdr = (struct ieee80211_hdr *)skb->data;
-
- if (!ieee80211_has_protected(hdr->frame_control))
- return;
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- if (hdr_total_len <= hdr_len)
- return;
-
- memmove(skb->data + (hdr_total_len - hdr_len), skb->data, hdr_len);
- skb_pull(skb, (hdr_total_len - hdr_len));
-}
-
-static void iwm_rx_adjust_packet(struct iwm_priv *iwm,
- struct iwm_rx_packet *packet,
- struct iwm_rx_ticket_node *ticket_node)
-{
- u32 payload_offset = 0, payload_len;
- struct iwm_rx_ticket *ticket = ticket_node->ticket;
- struct iwm_rx_mpdu_hdr *mpdu_hdr;
- struct ieee80211_hdr *hdr;
-
- mpdu_hdr = (struct iwm_rx_mpdu_hdr *)packet->skb->data;
- payload_offset += sizeof(struct iwm_rx_mpdu_hdr);
- /* Padding is 0 or 2 bytes */
- payload_len = le16_to_cpu(mpdu_hdr->len) +
- (le16_to_cpu(ticket->flags) & IWM_RX_TICKET_PAD_SIZE_MSK);
- payload_len -= ticket->tail_len;
-
- IWM_DBG_RX(iwm, DBG, "Packet adjusted, len:%d, offset:%d, "
- "ticket offset:%d ticket tail len:%d\n",
- payload_len, payload_offset, ticket->payload_offset,
- ticket->tail_len);
-
- IWM_HEXDUMP(iwm, DBG, RX, "RAW: ", packet->skb->data, packet->skb->len);
-
- skb_pull(packet->skb, payload_offset);
- skb_trim(packet->skb, payload_len);
-
- iwm_remove_iv(packet->skb, ticket->payload_offset);
-
- hdr = (struct ieee80211_hdr *) packet->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- /* UMAC handed QOS_DATA frame with 2 padding bytes appended
- * to the qos_ctl field in IEEE 802.11 headers. */
- memmove(packet->skb->data + IEEE80211_QOS_CTL_LEN + 2,
- packet->skb->data,
- ieee80211_hdrlen(hdr->frame_control) -
- IEEE80211_QOS_CTL_LEN);
- hdr = (struct ieee80211_hdr *) skb_pull(packet->skb,
- IEEE80211_QOS_CTL_LEN + 2);
- hdr->frame_control &= ~cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
- }
-
- IWM_HEXDUMP(iwm, DBG, RX, "ADJUSTED: ",
- packet->skb->data, packet->skb->len);
-}
-
-static void classify8023(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- /* frame has qos control */
- skb->priority = *qc & IEEE80211_QOS_CTL_TID_MASK;
- } else {
- skb->priority = 0;
- }
-}
-
-static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
-{
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct net_device *ndev = iwm_to_ndev(iwm);
- struct sk_buff_head list;
- struct sk_buff *frame;
-
- IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
-
- __skb_queue_head_init(&list);
- ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0,
- true);
-
- while ((frame = __skb_dequeue(&list))) {
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += frame->len;
-
- frame->protocol = eth_type_trans(frame, ndev);
- frame->ip_summed = CHECKSUM_NONE;
- memset(frame->cb, 0, sizeof(frame->cb));
-
- if (netif_rx_ni(frame) == NET_RX_DROP) {
- IWM_ERR(iwm, "Packet dropped\n");
- ndev->stats.rx_dropped++;
- }
- }
-}
-
-static void iwm_rx_process_packet(struct iwm_priv *iwm,
- struct iwm_rx_packet *packet,
- struct iwm_rx_ticket_node *ticket_node)
-{
- int ret;
- struct sk_buff *skb = packet->skb;
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct net_device *ndev = iwm_to_ndev(iwm);
-
- IWM_DBG_RX(iwm, DBG, "Processing packet ID %d\n", packet->id);
-
- switch (le16_to_cpu(ticket_node->ticket->action)) {
- case IWM_RX_TICKET_RELEASE:
- IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
-
- iwm_rx_adjust_packet(iwm, packet, ticket_node);
- skb->dev = iwm_to_ndev(iwm);
- classify8023(skb);
-
- if (le16_to_cpu(ticket_node->ticket->flags) &
- IWM_RX_TICKET_AMSDU_MSK) {
- iwm_rx_process_amsdu(iwm, skb);
- break;
- }
-
- ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
- if (ret < 0) {
- IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
- "%d\n", ret);
- kfree_skb(packet->skb);
- break;
- }
-
- IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
-
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
- skb->protocol = eth_type_trans(skb, ndev);
- skb->ip_summed = CHECKSUM_NONE;
- memset(skb->cb, 0, sizeof(skb->cb));
-
- if (netif_rx_ni(skb) == NET_RX_DROP) {
- IWM_ERR(iwm, "Packet dropped\n");
- ndev->stats.rx_dropped++;
- }
- break;
- case IWM_RX_TICKET_DROP:
- IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
- le16_to_cpu(ticket_node->ticket->flags));
- kfree_skb(packet->skb);
- break;
- default:
- IWM_ERR(iwm, "Unknown ticket action: %d\n",
- le16_to_cpu(ticket_node->ticket->action));
- kfree_skb(packet->skb);
- }
-
- kfree(packet);
- iwm_rx_ticket_node_free(ticket_node);
-}
-
-/*
- * Rx data processing:
- *
- * We're receiving Rx packet from the LMAC, and Rx ticket from
- * the UMAC.
- * To forward a target data packet upstream (i.e. to the
- * kernel network stack), we must have received an Rx ticket
- * that tells us we're allowed to release this packet (ticket
- * action is IWM_RX_TICKET_RELEASE). The Rx ticket also indicates,
- * among other things, where valid data actually starts in the Rx
- * packet.
- */
-void iwm_rx_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_rx_ticket_node *ticket, *next;
-
- iwm = container_of(work, struct iwm_priv, rx_worker);
-
- /*
- * We go through the tickets list and if there is a pending
- * packet for it, we push it upstream.
- * We stop whenever a ticket is missing its packet, as we're
- * supposed to send the packets in order.
- */
- spin_lock(&iwm->ticket_lock);
- list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
- struct iwm_rx_packet *packet =
- iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
-
- if (!packet) {
- IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
- "to be handled first\n",
- le16_to_cpu(ticket->ticket->id));
- break;
- }
-
- list_del(&ticket->node);
- iwm_rx_process_packet(iwm, packet, ticket);
- }
- spin_unlock(&iwm->ticket_lock);
-}
-
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.h b/drivers/net/wireless/iwmc3200wifi/rx.h
deleted file mode 100644
index da0db91cee59..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/rx.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_RX_H__
-#define __IWM_RX_H__
-
-#include <linux/skbuff.h>
-
-#include "umac.h"
-
-struct iwm_rx_ticket_node {
- struct list_head node;
- struct iwm_rx_ticket *ticket;
-};
-
-struct iwm_rx_packet {
- struct list_head node;
- u16 id;
- struct sk_buff *skb;
- unsigned long pkt_size;
-};
-
-void iwm_rx_worker(struct work_struct *work);
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
deleted file mode 100644
index 0042f204b07f..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * This is the SDIO bus specific hooks for iwm.
- * It also is the module's entry point.
- *
- * Interesting code paths:
- * iwm_sdio_probe() (Called by an SDIO bus scan)
- * -> iwm_if_alloc() (netdev.c)
- * -> iwm_wdev_alloc() (cfg80211.c, allocates and register our wiphy)
- * -> wiphy_new()
- * -> wiphy_register()
- * -> alloc_netdev_mq()
- * -> register_netdev()
- *
- * iwm_sdio_remove()
- * -> iwm_if_free() (netdev.c)
- * -> unregister_netdev()
- * -> iwm_wdev_free() (cfg80211.c)
- * -> wiphy_unregister()
- * -> wiphy_free()
- *
- * iwm_sdio_isr() (called in process context from the SDIO core code)
- * -> queue_work(.., isr_worker)
- * -- [async] --> iwm_sdio_isr_worker()
- * -> iwm_rx_handle()
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/netdevice.h>
-#include <linux/debugfs.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/sdio.h>
-#include <linux/mmc/sdio_func.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "bus.h"
-#include "sdio.h"
-
-static void iwm_sdio_isr_worker(struct work_struct *work)
-{
- struct iwm_sdio_priv *hw;
- struct iwm_priv *iwm;
- struct iwm_rx_info *rx_info;
- struct sk_buff *skb;
- u8 *rx_buf;
- unsigned long rx_size;
-
- hw = container_of(work, struct iwm_sdio_priv, isr_worker);
- iwm = hw_to_iwm(hw);
-
- while (!skb_queue_empty(&iwm->rx_list)) {
- skb = skb_dequeue(&iwm->rx_list);
- rx_info = skb_to_rx_info(skb);
- rx_size = rx_info->rx_size;
- rx_buf = skb->data;
-
- IWM_HEXDUMP(iwm, DBG, SDIO, "RX: ", rx_buf, rx_size);
- if (iwm_rx_handle(iwm, rx_buf, rx_size) < 0)
- IWM_WARN(iwm, "RX error\n");
-
- kfree_skb(skb);
- }
-}
-
-static void iwm_sdio_isr(struct sdio_func *func)
-{
- struct iwm_priv *iwm;
- struct iwm_sdio_priv *hw;
- struct iwm_rx_info *rx_info;
- struct sk_buff *skb;
- unsigned long buf_size, read_size;
- int ret;
- u8 val;
-
- hw = sdio_get_drvdata(func);
- iwm = hw_to_iwm(hw);
-
- buf_size = hw->blk_size;
-
- /* We're checking the status */
- val = sdio_readb(func, IWM_SDIO_INTR_STATUS_ADDR, &ret);
- if (val == 0 || ret < 0) {
- IWM_ERR(iwm, "Wrong INTR_STATUS\n");
- return;
- }
-
- /* See if we have free buffers */
- if (skb_queue_len(&iwm->rx_list) > IWM_RX_LIST_SIZE) {
- IWM_ERR(iwm, "No buffer for more Rx frames\n");
- return;
- }
-
- /* We first read the transaction size */
- read_size = sdio_readb(func, IWM_SDIO_INTR_GET_SIZE_ADDR + 1, &ret);
- read_size = read_size << 8;
-
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't read the xfer size\n");
- return;
- }
-
- /* We need to clear the INT register */
- sdio_writeb(func, 1, IWM_SDIO_INTR_CLEAR_ADDR, &ret);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't clear the INT register\n");
- return;
- }
-
- while (buf_size < read_size)
- buf_size <<= 1;
-
- skb = dev_alloc_skb(buf_size);
- if (!skb) {
- IWM_ERR(iwm, "Couldn't alloc RX skb\n");
- return;
- }
- rx_info = skb_to_rx_info(skb);
- rx_info->rx_size = read_size;
- rx_info->rx_buf_size = buf_size;
-
- /* Now we can read the actual buffer */
- ret = sdio_memcpy_fromio(func, skb_put(skb, read_size),
- IWM_SDIO_DATA_ADDR, read_size);
-
- /* The skb is put on a driver's specific Rx SKB list */
- skb_queue_tail(&iwm->rx_list, skb);
-
- /* We can now schedule the actual worker */
- queue_work(hw->isr_wq, &hw->isr_worker);
-}
-
-static void iwm_sdio_rx_free(struct iwm_sdio_priv *hw)
-{
- struct iwm_priv *iwm = hw_to_iwm(hw);
-
- flush_workqueue(hw->isr_wq);
-
- skb_queue_purge(&iwm->rx_list);
-}
-
-/* Bus ops */
-static int if_sdio_enable(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int ret;
-
- sdio_claim_host(hw->func);
-
- ret = sdio_enable_func(hw->func);
- if (ret) {
- IWM_ERR(iwm, "Couldn't enable the device: is TOP driver "
- "loaded and functional?\n");
- goto release_host;
- }
-
- iwm_reset(iwm);
-
- ret = sdio_claim_irq(hw->func, iwm_sdio_isr);
- if (ret) {
- IWM_ERR(iwm, "Failed to claim irq: %d\n", ret);
- goto release_host;
- }
-
- sdio_writeb(hw->func, 1, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
- if (ret < 0) {
- IWM_ERR(iwm, "Couldn't enable INTR: %d\n", ret);
- goto release_irq;
- }
-
- sdio_release_host(hw->func);
-
- IWM_DBG_SDIO(iwm, INFO, "IWM SDIO enable\n");
-
- return 0;
-
- release_irq:
- sdio_release_irq(hw->func);
- release_host:
- sdio_release_host(hw->func);
-
- return ret;
-}
-
-static int if_sdio_disable(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int ret;
-
- sdio_claim_host(hw->func);
- sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
- if (ret < 0)
- IWM_WARN(iwm, "Couldn't disable INTR: %d\n", ret);
-
- sdio_release_irq(hw->func);
- sdio_disable_func(hw->func);
- sdio_release_host(hw->func);
-
- iwm_sdio_rx_free(hw);
-
- iwm_reset(iwm);
-
- IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
-
- return 0;
-}
-
-static int if_sdio_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- int aligned_count = ALIGN(count, hw->blk_size);
- int ret;
-
- if ((unsigned long)buf & 0x3) {
- IWM_ERR(iwm, "buf <%p> is not dword aligned\n", buf);
- /* TODO: Is this a hardware limitation? use get_unligned */
- return -EINVAL;
- }
-
- sdio_claim_host(hw->func);
- ret = sdio_memcpy_toio(hw->func, IWM_SDIO_DATA_ADDR, buf,
- aligned_count);
- sdio_release_host(hw->func);
-
- return ret;
-}
-
-static ssize_t iwm_debugfs_sdio_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct iwm_priv *iwm = filp->private_data;
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
- char *buf;
- u8 cccr;
- int buf_len = 4096, ret;
- size_t len = 0;
-
- if (*ppos != 0)
- return 0;
- if (count < sizeof(buf))
- return -ENOSPC;
-
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- sdio_claim_host(hw->func);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IOEx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IOEx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IOEx: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IORx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IORx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IORx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IENx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IENx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IENx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_INTx, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_INTx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_INTx: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_ABORT, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_ABORTx\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_ABORT: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_IF, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_IF\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_IF: 0x%x\n", cccr);
-
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CAPS, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_CAPS\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_CAPS: 0x%x\n", cccr);
-
- cccr = sdio_f0_readb(hw->func, SDIO_CCCR_CIS, &ret);
- if (ret) {
- IWM_ERR(iwm, "Could not read SDIO_CCCR_CIS\n");
- goto err;
- }
- len += snprintf(buf + len, buf_len - len, "CCCR_CIS: 0x%x\n", cccr);
-
- ret = simple_read_from_buffer(buffer, len, ppos, buf, buf_len);
-err:
- sdio_release_host(hw->func);
-
- kfree(buf);
-
- return ret;
-}
-
-static const struct file_operations iwm_debugfs_sdio_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = iwm_debugfs_sdio_read,
- .llseek = default_llseek,
-};
-
-static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
-
- hw->cccr_dentry = debugfs_create_file("cccr", 0200,
- parent_dir, iwm,
- &iwm_debugfs_sdio_fops);
-}
-
-static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
-{
- struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
-
- debugfs_remove(hw->cccr_dentry);
-}
-
-static struct iwm_if_ops if_sdio_ops = {
- .enable = if_sdio_enable,
- .disable = if_sdio_disable,
- .send_chunk = if_sdio_send_chunk,
- .debugfs_init = if_sdio_debugfs_init,
- .debugfs_exit = if_sdio_debugfs_exit,
- .umac_name = "iwmc3200wifi-umac-sdio.bin",
- .calib_lmac_name = "iwmc3200wifi-calib-sdio.bin",
- .lmac_name = "iwmc3200wifi-lmac-sdio.bin",
-};
-MODULE_FIRMWARE("iwmc3200wifi-umac-sdio.bin");
-MODULE_FIRMWARE("iwmc3200wifi-calib-sdio.bin");
-MODULE_FIRMWARE("iwmc3200wifi-lmac-sdio.bin");
-
-static int iwm_sdio_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- struct iwm_priv *iwm;
- struct iwm_sdio_priv *hw;
- struct device *dev = &func->dev;
- int ret;
-
- /* check if TOP has already initialized the card */
- sdio_claim_host(func);
- ret = sdio_enable_func(func);
- if (ret) {
- dev_err(dev, "wait for TOP to enable the device\n");
- sdio_release_host(func);
- return ret;
- }
-
- ret = sdio_set_block_size(func, IWM_SDIO_BLK_SIZE);
-
- sdio_disable_func(func);
- sdio_release_host(func);
-
- if (ret < 0) {
- dev_err(dev, "Failed to set block size: %d\n", ret);
- return ret;
- }
-
- iwm = iwm_if_alloc(sizeof(struct iwm_sdio_priv), dev, &if_sdio_ops);
- if (IS_ERR(iwm)) {
- dev_err(dev, "allocate SDIO interface failed\n");
- return PTR_ERR(iwm);
- }
-
- hw = iwm_private(iwm);
- hw->iwm = iwm;
-
- iwm_debugfs_init(iwm);
-
- sdio_set_drvdata(func, hw);
-
- hw->func = func;
- hw->blk_size = IWM_SDIO_BLK_SIZE;
-
- hw->isr_wq = create_singlethread_workqueue(KBUILD_MODNAME "_sdio");
- if (!hw->isr_wq) {
- ret = -ENOMEM;
- goto debugfs_exit;
- }
-
- INIT_WORK(&hw->isr_worker, iwm_sdio_isr_worker);
-
- ret = iwm_if_add(iwm);
- if (ret) {
- dev_err(dev, "add SDIO interface failed\n");
- goto destroy_wq;
- }
-
- dev_info(dev, "IWM SDIO probe\n");
-
- return 0;
-
- destroy_wq:
- destroy_workqueue(hw->isr_wq);
- debugfs_exit:
- iwm_debugfs_exit(iwm);
- iwm_if_free(iwm);
- return ret;
-}
-
-static void iwm_sdio_remove(struct sdio_func *func)
-{
- struct iwm_sdio_priv *hw = sdio_get_drvdata(func);
- struct iwm_priv *iwm = hw_to_iwm(hw);
- struct device *dev = &func->dev;
-
- iwm_if_remove(iwm);
- destroy_workqueue(hw->isr_wq);
- iwm_debugfs_exit(iwm);
- iwm_if_free(iwm);
-
- sdio_set_drvdata(func, NULL);
-
- dev_info(dev, "IWM SDIO remove\n");
-}
-
-static const struct sdio_device_id iwm_sdio_ids[] = {
- /* Global/AGN SKU */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
- /* BGN SKU */
- { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
- { /* end: all zeroes */ },
-};
-MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
-
-static struct sdio_driver iwm_sdio_driver = {
- .name = "iwm_sdio",
- .id_table = iwm_sdio_ids,
- .probe = iwm_sdio_probe,
- .remove = iwm_sdio_remove,
-};
-
-static int __init iwm_sdio_init_module(void)
-{
- return sdio_register_driver(&iwm_sdio_driver);
-}
-
-static void __exit iwm_sdio_exit_module(void)
-{
- sdio_unregister_driver(&iwm_sdio_driver);
-}
-
-module_init(iwm_sdio_init_module);
-module_exit(iwm_sdio_exit_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(IWM_COPYRIGHT " " IWM_AUTHOR);
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.h b/drivers/net/wireless/iwmc3200wifi/sdio.h
deleted file mode 100644
index aab6b6892e45..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/sdio.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_SDIO_H__
-#define __IWM_SDIO_H__
-
-#define IWM_SDIO_DATA_ADDR 0x0
-#define IWM_SDIO_INTR_ENABLE_ADDR 0x14
-#define IWM_SDIO_INTR_STATUS_ADDR 0x13
-#define IWM_SDIO_INTR_CLEAR_ADDR 0x13
-#define IWM_SDIO_INTR_GET_SIZE_ADDR 0x2C
-
-#define IWM_SDIO_BLK_SIZE 256
-
-#define iwm_to_if_sdio(i) (struct iwm_sdio_priv *)(iwm->private)
-
-struct iwm_sdio_priv {
- struct sdio_func *func;
- struct iwm_priv *iwm;
-
- struct workqueue_struct *isr_wq;
- struct work_struct isr_worker;
-
- struct dentry *cccr_dentry;
-
- unsigned int blk_size;
-};
-
-#endif
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
deleted file mode 100644
index 904d36f22311..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "iwm.h"
-#define CREATE_TRACE_POINTS
-#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
deleted file mode 100644
index f5f7070b7e22..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/trace.h
+++ /dev/null
@@ -1,283 +0,0 @@
-#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWM_TRACE_H__
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWM_TRACING)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwm
-
-#define IWM_ENTRY __array(char, ndev_name, 16)
-#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
-#define IWM_PR_FMT "%s"
-#define IWM_PR_ARG __entry->ndev_name
-
-TRACE_EVENT(iwm_tx_nonwifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u8, resp)
- __field(u8, eot)
- __field(u8, hw)
- __field(u16, seq)
- __field(u32, addr)
- __field(u32, op1)
- __field(u32, op2)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
- __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
- __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
- __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
- __entry->addr = le32_to_cpu(hdr->addr);
- __entry->op1 = le32_to_cpu(hdr->op1_sz);
- __entry->op2 = le32_to_cpu(hdr->op2);
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
- "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
- IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
- __entry->hw, __entry->seq, __entry->addr, __entry->op1,
- __entry->op2
- )
-);
-
-TRACE_EVENT(iwm_tx_wifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u8, lmac)
- __field(u8, resp)
- __field(u8, eot)
- __field(u8, ra_tid)
- __field(u8, credit_group)
- __field(u8, color)
- __field(u16, seq)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->opcode = hdr->sw_hdr.cmd.cmd;
- __entry->lmac = 0;
- __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
- __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
- __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
- __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
- if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
- __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
- __entry->lmac = 1;
- __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
- }
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
- "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
- IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
- __entry->resp, __entry->eot, __entry->seq, __entry->color,
- __entry->ra_tid, __entry->credit_group
- )
-);
-
-TRACE_EVENT(iwm_tx_packets,
- TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, eot)
- __field(u8, ra_tid)
- __field(u8, credit_group)
- __field(u8, color)
- __field(u16, seq)
- __field(u8, npkt)
- __field(u32, bytes)
- ),
-
- TP_fast_assign(
- struct iwm_umac_wifi_out_hdr *hdr =
- (struct iwm_umac_wifi_out_hdr *)buf;
-
- IWM_ASSIGN;
- __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
- __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
- __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
- __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
- __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->npkt = 1;
- __entry->bytes = len;
-
- if (!__entry->eot) {
- int count;
- u8 *ptr = buf;
-
- __entry->npkt = 0;
- while (ptr < buf + len) {
- count = GET_VAL32(hdr->sw_hdr.meta_data,
- UMAC_FW_CMD_BYTE_COUNT);
- ptr += ALIGN(sizeof(*hdr) + count, 16);
- hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
- __entry->npkt++;
- }
- }
- ),
-
- TP_printk(
- IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
- "ra_tid 0x%x, credit_group 0x%x, embedded_packets %d, %d bytes",
- IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
- __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
- __entry->credit_group, __entry->npkt, __entry->bytes
- )
-);
-
-TRACE_EVENT(iwm_rx_nonwifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, opcode)
- __field(u16, seq)
- __field(u32, len)
- ),
-
- TP_fast_assign(
- struct iwm_udma_in_hdr *hdr = buf;
-
- IWM_ASSIGN;
- __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
- __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
- __entry->len = len;
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
- IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
- )
-);
-
-TRACE_EVENT(iwm_rx_wifi_cmd,
- TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
-
- TP_ARGS(iwm, hdr),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, cmd)
- __field(u8, source)
- __field(u16, seq)
- __field(u32, count)
- ),
-
- TP_fast_assign(
- IWM_ASSIGN;
- __entry->cmd = hdr->sw_hdr.cmd.cmd;
- __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
- __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
- IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
- __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
- __entry->cmd, __entry->seq, __entry->count
- )
-);
-
-#define iwm_ticket_action_symbol \
- { IWM_RX_TICKET_DROP, "DROP" }, \
- { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
- { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
- { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
-
-TRACE_EVENT(iwm_rx_ticket,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, action)
- __field(u8, reason)
- __field(u16, id)
- __field(u16, flags)
- ),
-
- TP_fast_assign(
- struct iwm_rx_ticket *ticket =
- ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
-
- IWM_ASSIGN;
- __entry->id = le16_to_cpu(ticket->id);
- __entry->action = le16_to_cpu(ticket->action);
- __entry->flags = le16_to_cpu(ticket->flags);
- __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
- IWM_PR_ARG, __entry->id,
- __print_symbolic(__entry->action, iwm_ticket_action_symbol),
- __entry->reason ? "reason" : "flags",
- __entry->reason ? __entry->reason : __entry->flags,
- __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
- )
-);
-
-TRACE_EVENT(iwm_rx_packet,
- TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
-
- TP_ARGS(iwm, buf, len),
-
- TP_STRUCT__entry(
- IWM_ENTRY
- __field(u8, source)
- __field(u16, id)
- __field(u32, len)
- ),
-
- TP_fast_assign(
- struct iwm_umac_wifi_in_hdr *hdr = buf;
-
- IWM_ASSIGN;
- __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
- __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
- __entry->len = len - sizeof(*hdr);
- ),
-
- TP_printk(
- IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
- IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
- "LMAC" : "UMAC", __entry->id, __entry->len
- )
-);
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
deleted file mode 100644
index be98074c0608..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ /dev/null
@@ -1,529 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-/*
- * iwm Tx theory of operation:
- *
- * 1) We receive a 802.3 frame from the stack
- * 2) We convert it to a 802.11 frame [iwm_xmit_frame]
- * 3) We queue it to its corresponding tx queue [iwm_xmit_frame]
- * 4) We schedule the tx worker. There is one worker per tx
- * queue. [iwm_xmit_frame]
- * 5) The tx worker is scheduled
- * 6) We go through every queued skb on the tx queue, and for each
- * and every one of them: [iwm_tx_worker]
- * a) We check if we have enough Tx credits (see below for a Tx
- * credits description) for the frame length. [iwm_tx_worker]
- * b) If we do, we aggregate the Tx frame into a UDMA one, by
- * concatenating one REPLY_TX command per Tx frame. [iwm_tx_worker]
- * c) When we run out of credits, or when we reach the maximum
- * concatenation size, we actually send the concatenated UDMA
- * frame. [iwm_tx_worker]
- *
- * When we run out of Tx credits, the skbs are filling the tx queue,
- * and eventually we will stop the netdev queue. [iwm_tx_worker]
- * The tx queue is emptied as we're getting new tx credits, by
- * scheduling the tx_worker. [iwm_tx_credit_inc]
- * The netdev queue is started again when we have enough tx credits,
- * and when our tx queue has some reasonable amout of space available
- * (i.e. half of the max size). [iwm_tx_worker]
- */
-
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/ieee80211.h>
-
-#include "iwm.h"
-#include "debug.h"
-#include "commands.h"
-#include "hal.h"
-#include "umac.h"
-#include "bus.h"
-
-#define IWM_UMAC_PAGE_ALLOC_WRAP 0xffff
-
-#define BYTES_TO_PAGES(n) (1 + ((n) >> ilog2(IWM_UMAC_PAGE_SIZE)) - \
- (((n) & (IWM_UMAC_PAGE_SIZE - 1)) == 0))
-
-#define pool_id_to_queue(id) ((id < IWM_TX_CMD_QUEUE) ? id : id - 1)
-#define queue_to_pool_id(q) ((q < IWM_TX_CMD_QUEUE) ? q : q + 1)
-
-/* require to hold tx_credit lock */
-static int iwm_tx_credit_get(struct iwm_tx_credit *tx_credit, int id)
-{
- struct pool_entry *pool = &tx_credit->pools[id];
- struct spool_entry *spool = &tx_credit->spools[pool->sid];
- int spool_pages;
-
- /* number of pages can be taken from spool by this pool */
- spool_pages = spool->max_pages - spool->alloc_pages +
- max(pool->min_pages - pool->alloc_pages, 0);
-
- return min(pool->max_pages - pool->alloc_pages, spool_pages);
-}
-
-static bool iwm_tx_credit_ok(struct iwm_priv *iwm, int id, int nb)
-{
- u32 npages = BYTES_TO_PAGES(nb);
-
- if (npages <= iwm_tx_credit_get(&iwm->tx_credit, id))
- return 1;
-
- set_bit(id, &iwm->tx_credit.full_pools_map);
-
- IWM_DBG_TX(iwm, DBG, "LINK: stop txq[%d], available credit: %d\n",
- pool_id_to_queue(id),
- iwm_tx_credit_get(&iwm->tx_credit, id));
-
- return 0;
-}
-
-void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages)
-{
- struct pool_entry *pool;
- struct spool_entry *spool;
- int freed_pages;
- int queue;
-
- BUG_ON(id >= IWM_MACS_OUT_GROUPS);
-
- pool = &iwm->tx_credit.pools[id];
- spool = &iwm->tx_credit.spools[pool->sid];
-
- freed_pages = total_freed_pages - pool->total_freed_pages;
- IWM_DBG_TX(iwm, DBG, "Free %d pages for pool[%d]\n", freed_pages, id);
-
- if (!freed_pages) {
- IWM_DBG_TX(iwm, DBG, "No pages are freed by UMAC\n");
- return;
- } else if (freed_pages < 0)
- freed_pages += IWM_UMAC_PAGE_ALLOC_WRAP + 1;
-
- if (pool->alloc_pages > pool->min_pages) {
- int spool_pages = pool->alloc_pages - pool->min_pages;
- spool_pages = min(spool_pages, freed_pages);
- spool->alloc_pages -= spool_pages;
- }
-
- pool->alloc_pages -= freed_pages;
- pool->total_freed_pages = total_freed_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
- "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
- pool->total_freed_pages, pool->sid, spool->alloc_pages);
-
- if (test_bit(id, &iwm->tx_credit.full_pools_map) &&
- (pool->alloc_pages < pool->max_pages / 2)) {
- clear_bit(id, &iwm->tx_credit.full_pools_map);
-
- queue = pool_id_to_queue(id);
-
- IWM_DBG_TX(iwm, DBG, "LINK: start txq[%d], available "
- "credit: %d\n", queue,
- iwm_tx_credit_get(&iwm->tx_credit, id));
- queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
- }
-}
-
-static void iwm_tx_credit_dec(struct iwm_priv *iwm, int id, int alloc_pages)
-{
- struct pool_entry *pool;
- struct spool_entry *spool;
- int spool_pages;
-
- IWM_DBG_TX(iwm, DBG, "Allocate %d pages for pool[%d]\n",
- alloc_pages, id);
-
- BUG_ON(id >= IWM_MACS_OUT_GROUPS);
-
- pool = &iwm->tx_credit.pools[id];
- spool = &iwm->tx_credit.spools[pool->sid];
-
- spool_pages = pool->alloc_pages + alloc_pages - pool->min_pages;
-
- if (pool->alloc_pages >= pool->min_pages)
- spool->alloc_pages += alloc_pages;
- else if (spool_pages > 0)
- spool->alloc_pages += spool_pages;
-
- pool->alloc_pages += alloc_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool[%d] pages alloc: %d, total_freed: %d, "
- "Spool[%d] pages alloc: %d\n", id, pool->alloc_pages,
- pool->total_freed_pages, pool->sid, spool->alloc_pages);
-}
-
-int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
-{
- u32 npages = BYTES_TO_PAGES(nb);
- int ret = 0;
-
- spin_lock(&iwm->tx_credit.lock);
-
- if (!iwm_tx_credit_ok(iwm, id, nb)) {
- IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
- ret = -ENOSPC;
- goto out;
- }
-
- iwm_tx_credit_dec(iwm, id, npages);
-
- out:
- spin_unlock(&iwm->tx_credit.lock);
- return ret;
-}
-
-/*
- * Since we're on an SDIO or USB bus, we are not sharing memory
- * for storing to be transmitted frames. The host needs to push
- * them upstream. As a consequence there needs to be a way for
- * the target to let us know if it can actually take more TX frames
- * or not. This is what Tx credits are for.
- *
- * For each Tx HW queue, we have a Tx pool, and then we have one
- * unique super pool (spool), which is actually a global pool of
- * all the UMAC pages.
- * For each Tx pool we have a min_pages, a max_pages fields, and a
- * alloc_pages fields. The alloc_pages tracks the number of pages
- * currently allocated from the tx pool.
- * Here are the rules to check if given a tx frame we have enough
- * tx credits for it:
- * 1) We translate the frame length into a number of UMAC pages.
- * Let's call them n_pages.
- * 2) For the corresponding tx pool, we check if n_pages +
- * pool->alloc_pages is higher than pool->min_pages. min_pages
- * represent a set of pre-allocated pages on the tx pool. If
- * that's the case, then we need to allocate those pages from
- * the spool. We can do so until we reach spool->max_pages.
- * 3) Each tx pool is not allowed to allocate more than pool->max_pages
- * from the spool, so once we're over min_pages, we can allocate
- * pages from the spool, but not more than max_pages.
- *
- * When the tx code path needs to send a tx frame, it checks first
- * if it has enough tx credits, following those rules. [iwm_tx_credit_get]
- * If it does, it then updates the pool and spool counters and
- * then send the frame. [iwm_tx_credit_alloc and iwm_tx_credit_dec]
- * On the other side, when the UMAC is done transmitting frames, it
- * will send a credit update notification to the host. This is when
- * the pool and spool counters gets to be decreased. [iwm_tx_credit_inc,
- * called from rx.c:iwm_ntf_tx_credit_update]
- *
- */
-void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
- struct iwm_umac_notif_alive *alive)
-{
- int i, sid, pool_pages;
-
- spin_lock(&iwm->tx_credit.lock);
-
- iwm->tx_credit.pool_nr = le16_to_cpu(alive->page_grp_count);
- iwm->tx_credit.full_pools_map = 0;
- memset(&iwm->tx_credit.spools[0], 0, sizeof(struct spool_entry));
-
- IWM_DBG_TX(iwm, DBG, "Pools number is %d\n", iwm->tx_credit.pool_nr);
-
- for (i = 0; i < iwm->tx_credit.pool_nr; i++) {
- __le32 page_grp_state = alive->page_grp_state[i];
-
- iwm->tx_credit.pools[i].id = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_NUM);
- iwm->tx_credit.pools[i].sid = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_SGRP_NUM);
- iwm->tx_credit.pools[i].min_pages = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE);
- iwm->tx_credit.pools[i].max_pages = GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE);
- iwm->tx_credit.pools[i].alloc_pages = 0;
- iwm->tx_credit.pools[i].total_freed_pages = 0;
-
- sid = iwm->tx_credit.pools[i].sid;
- pool_pages = iwm->tx_credit.pools[i].min_pages;
-
- if (iwm->tx_credit.spools[sid].max_pages == 0) {
- iwm->tx_credit.spools[sid].id = sid;
- iwm->tx_credit.spools[sid].max_pages =
- GET_VAL32(page_grp_state,
- UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE);
- iwm->tx_credit.spools[sid].alloc_pages = 0;
- }
-
- iwm->tx_credit.spools[sid].alloc_pages += pool_pages;
-
- IWM_DBG_TX(iwm, DBG, "Pool idx: %d, id: %d, sid: %d, capacity "
- "min: %d, max: %d, pool alloc: %d, total_free: %d, "
- "super poll alloc: %d\n",
- i, iwm->tx_credit.pools[i].id,
- iwm->tx_credit.pools[i].sid,
- iwm->tx_credit.pools[i].min_pages,
- iwm->tx_credit.pools[i].max_pages,
- iwm->tx_credit.pools[i].alloc_pages,
- iwm->tx_credit.pools[i].total_freed_pages,
- iwm->tx_credit.spools[sid].alloc_pages);
- }
-
- spin_unlock(&iwm->tx_credit.lock);
-}
-
-#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
-
-static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
- int pool_id, u8 *buf)
-{
- struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
- struct iwm_udma_wifi_cmd udma_cmd;
- struct iwm_umac_cmd umac_cmd;
- struct iwm_tx_info *tx_info = skb_to_tx_info(skb);
-
- udma_cmd.count = cpu_to_le16(skb->len +
- sizeof(struct iwm_umac_fw_cmd_hdr));
- /* set EOP to 0 here. iwm_udma_wifi_hdr_set_eop() will be
- * called later to set EOP for the last packet. */
- udma_cmd.eop = 0;
- udma_cmd.credit_group = pool_id;
- udma_cmd.ra_tid = tx_info->sta << 4 | tx_info->tid;
- udma_cmd.lmac_offset = 0;
-
- umac_cmd.id = REPLY_TX;
- umac_cmd.count = cpu_to_le16(skb->len);
- umac_cmd.color = tx_info->color;
- umac_cmd.resp = 0;
- umac_cmd.seq_num = cpu_to_le16(iwm_alloc_wifi_cmd_seq(iwm));
-
- iwm_build_udma_wifi_hdr(iwm, &hdr->hw_hdr, &udma_cmd);
- iwm_build_umac_hdr(iwm, &hdr->sw_hdr, &umac_cmd);
-
- memcpy(buf + sizeof(*hdr), skb->data, skb->len);
-
- return umac_cmd.seq_num;
-}
-
-static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
- struct iwm_tx_queue *txq)
-{
- int ret;
-
- if (!txq->concat_count)
- return 0;
-
- IWM_DBG_TX(iwm, DBG, "Send concatenated Tx: queue %d, %d bytes\n",
- txq->id, txq->concat_count);
-
- /* mark EOP for the last packet */
- iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
-
- trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
- ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
-
- txq->concat_count = 0;
- txq->concat_ptr = txq->concat_buf;
-
- return ret;
-}
-
-void iwm_tx_worker(struct work_struct *work)
-{
- struct iwm_priv *iwm;
- struct iwm_tx_info *tx_info = NULL;
- struct sk_buff *skb;
- struct iwm_tx_queue *txq;
- struct iwm_sta_info *sta_info;
- struct iwm_tid_info *tid_info;
- int cmdlen, ret, pool_id;
-
- txq = container_of(work, struct iwm_tx_queue, worker);
- iwm = container_of(txq, struct iwm_priv, txq[txq->id]);
-
- pool_id = queue_to_pool_id(txq->id);
-
- while (!test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
- !skb_queue_empty(&txq->queue)) {
-
- spin_lock_bh(&txq->lock);
- skb = skb_dequeue(&txq->queue);
- spin_unlock_bh(&txq->lock);
-
- tx_info = skb_to_tx_info(skb);
- sta_info = &iwm->sta_table[tx_info->sta];
- if (!sta_info->valid) {
- IWM_ERR(iwm, "Trying to send a frame to unknown STA\n");
- kfree_skb(skb);
- continue;
- }
-
- tid_info = &sta_info->tid_info[tx_info->tid];
-
- mutex_lock(&tid_info->mutex);
-
- /*
- * If the RAxTID is stopped, we queue the skb to the stopped
- * queue.
- * Whenever we'll get a UMAC notification to resume the tx flow
- * for this RAxTID, we'll merge back the stopped queue into the
- * regular queue. See iwm_ntf_stop_resume_tx() from rx.c.
- */
- if (tid_info->stopped) {
- IWM_DBG_TX(iwm, DBG, "%dx%d stopped\n",
- tx_info->sta, tx_info->tid);
- spin_lock_bh(&txq->lock);
- skb_queue_tail(&txq->stopped_queue, skb);
- spin_unlock_bh(&txq->lock);
-
- mutex_unlock(&tid_info->mutex);
- continue;
- }
-
- cmdlen = IWM_UDMA_HDR_LEN + skb->len;
-
- IWM_DBG_TX(iwm, DBG, "Tx frame on queue %d: skb: 0x%p, sta: "
- "%d, color: %d\n", txq->id, skb, tx_info->sta,
- tx_info->color);
-
- if (txq->concat_count + cmdlen > IWM_HAL_CONCATENATE_BUF_SIZE)
- iwm_tx_send_concat_packets(iwm, txq);
-
- ret = iwm_tx_credit_alloc(iwm, pool_id, cmdlen);
- if (ret) {
- IWM_DBG_TX(iwm, DBG, "not enough tx_credit for queue "
- "%d, Tx worker stopped\n", txq->id);
- spin_lock_bh(&txq->lock);
- skb_queue_head(&txq->queue, skb);
- spin_unlock_bh(&txq->lock);
-
- mutex_unlock(&tid_info->mutex);
- break;
- }
-
- txq->concat_ptr = txq->concat_buf + txq->concat_count;
- tid_info->last_seq_num =
- iwm_tx_build_packet(iwm, skb, pool_id, txq->concat_ptr);
- txq->concat_count += ALIGN(cmdlen, 16);
-
- mutex_unlock(&tid_info->mutex);
-
- kfree_skb(skb);
- }
-
- iwm_tx_send_concat_packets(iwm, txq);
-
- if (__netif_subqueue_stopped(iwm_to_ndev(iwm), txq->id) &&
- !test_bit(pool_id, &iwm->tx_credit.full_pools_map) &&
- (skb_queue_len(&txq->queue) < IWM_TX_LIST_SIZE / 2)) {
- IWM_DBG_TX(iwm, DBG, "LINK: start netif_subqueue[%d]", txq->id);
- netif_wake_subqueue(iwm_to_ndev(iwm), txq->id);
- }
-}
-
-int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
- struct iwm_priv *iwm = ndev_to_iwm(netdev);
- struct wireless_dev *wdev = iwm_to_wdev(iwm);
- struct iwm_tx_info *tx_info;
- struct iwm_tx_queue *txq;
- struct iwm_sta_info *sta_info;
- u8 *dst_addr, sta_id;
- u16 queue;
- int ret;
-
-
- if (!test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
- IWM_DBG_TX(iwm, DBG, "LINK: stop netif_all_queues: "
- "not associated\n");
- netif_tx_stop_all_queues(netdev);
- goto drop;
- }
-
- queue = skb_get_queue_mapping(skb);
- BUG_ON(queue >= IWM_TX_DATA_QUEUES); /* no iPAN yet */
-
- txq = &iwm->txq[queue];
-
- /* No free space for Tx, tx_worker is too slow */
- if ((skb_queue_len(&txq->queue) > IWM_TX_LIST_SIZE) ||
- (skb_queue_len(&txq->stopped_queue) > IWM_TX_LIST_SIZE)) {
- IWM_DBG_TX(iwm, DBG, "LINK: stop netif_subqueue[%d]\n", queue);
- netif_stop_subqueue(netdev, queue);
- return NETDEV_TX_BUSY;
- }
-
- ret = ieee80211_data_from_8023(skb, netdev->dev_addr, wdev->iftype,
- iwm->bssid, 0);
- if (ret) {
- IWM_ERR(iwm, "build wifi header failed\n");
- goto drop;
- }
-
- dst_addr = ((struct ieee80211_hdr *)(skb->data))->addr1;
-
- for (sta_id = 0; sta_id < IWM_STA_TABLE_NUM; sta_id++) {
- sta_info = &iwm->sta_table[sta_id];
- if (sta_info->valid &&
- !memcmp(dst_addr, sta_info->addr, ETH_ALEN))
- break;
- }
-
- if (sta_id == IWM_STA_TABLE_NUM) {
- IWM_ERR(iwm, "STA %pM not found in sta_table, Tx ignored\n",
- dst_addr);
- goto drop;
- }
-
- tx_info = skb_to_tx_info(skb);
- tx_info->sta = sta_id;
- tx_info->color = sta_info->color;
- /* UMAC uses TID 8 (vs. 0) for non QoS packets */
- if (sta_info->qos)
- tx_info->tid = skb->priority;
- else
- tx_info->tid = IWM_UMAC_MGMT_TID;
-
- spin_lock_bh(&iwm->txq[queue].lock);
- skb_queue_tail(&iwm->txq[queue].queue, skb);
- spin_unlock_bh(&iwm->txq[queue].lock);
-
- queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
-
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- return NETDEV_TX_OK;
-
- drop:
- netdev->stats.tx_dropped++;
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
-}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
deleted file mode 100644
index 4a137d334a42..000000000000
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ /dev/null
@@ -1,789 +0,0 @@
-/*
- * Intel Wireless Multicomm 3200 WiFi driver
- *
- * Copyright (C) 2009 Intel Corporation. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Intel Corporation <ilw@linux.intel.com>
- * Samuel Ortiz <samuel.ortiz@intel.com>
- * Zhu Yi <yi.zhu@intel.com>
- *
- */
-
-#ifndef __IWM_UMAC_H__
-#define __IWM_UMAC_H__
-
-struct iwm_udma_in_hdr {
- __le32 cmd;
- __le32 size;
-} __packed;
-
-struct iwm_udma_out_nonwifi_hdr {
- __le32 cmd;
- __le32 addr;
- __le32 op1_sz;
- __le32 op2;
-} __packed;
-
-struct iwm_udma_out_wifi_hdr {
- __le32 cmd;
- __le32 meta_data;
-} __packed;
-
-/* Sequence numbering */
-#define UMAC_WIFI_SEQ_NUM_BASE 1
-#define UMAC_WIFI_SEQ_NUM_MAX 0x4000
-#define UMAC_NONWIFI_SEQ_NUM_BASE 1
-#define UMAC_NONWIFI_SEQ_NUM_MAX 0x10
-
-/* MAC address address */
-#define WICO_MAC_ADDRESS_ADDR 0x604008F8
-
-/* RA / TID */
-#define UMAC_HDI_ACT_TBL_IDX_TID_POS 0
-#define UMAC_HDI_ACT_TBL_IDX_TID_SEED 0xF
-
-#define UMAC_HDI_ACT_TBL_IDX_RA_POS 4
-#define UMAC_HDI_ACT_TBL_IDX_RA_SEED 0xF
-
-#define UMAC_HDI_ACT_TBL_IDX_RA_UMAC 0xF
-#define UMAC_HDI_ACT_TBL_IDX_TID_UMAC 0x9
-#define UMAC_HDI_ACT_TBL_IDX_TID_LMAC 0xA
-
-#define UMAC_HDI_ACT_TBL_IDX_HOST_CMD \
- ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
- (UMAC_HDI_ACT_TBL_IDX_TID_UMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
-#define UMAC_HDI_ACT_TBL_IDX_UMAC_CMD \
- ((UMAC_HDI_ACT_TBL_IDX_RA_UMAC << UMAC_HDI_ACT_TBL_IDX_RA_POS) |\
- (UMAC_HDI_ACT_TBL_IDX_TID_LMAC << UMAC_HDI_ACT_TBL_IDX_TID_POS))
-
-/* STA ID and color */
-#define STA_ID_SEED (0x0f)
-#define STA_ID_POS (0)
-#define STA_ID_MSK (STA_ID_SEED << STA_ID_POS)
-
-#define STA_COLOR_SEED (0x7)
-#define STA_COLOR_POS (4)
-#define STA_COLOR_MSK (STA_COLOR_SEED << STA_COLOR_POS)
-
-#define STA_ID_N_COLOR_COLOR(id_n_color) \
- (((id_n_color) & STA_COLOR_MSK) >> STA_COLOR_POS)
-#define STA_ID_N_COLOR_ID(id_n_color) \
- (((id_n_color) & STA_ID_MSK) >> STA_ID_POS)
-
-/* iwm_umac_notif_alive.page_grp_state Group number -- bits [3:0] */
-#define UMAC_ALIVE_PAGE_STS_GRP_NUM_POS 0
-#define UMAC_ALIVE_PAGE_STS_GRP_NUM_SEED 0xF
-
-/* iwm_umac_notif_alive.page_grp_state Super group number -- bits [7:4] */
-#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_POS 4
-#define UMAC_ALIVE_PAGE_STS_SGRP_NUM_SEED 0xF
-
-/* iwm_umac_notif_alive.page_grp_state Group min size -- bits [15:8] */
-#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_POS 8
-#define UMAC_ALIVE_PAGE_STS_GRP_MIN_SIZE_SEED 0xFF
-
-/* iwm_umac_notif_alive.page_grp_state Group max size -- bits [23:16] */
-#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_POS 16
-#define UMAC_ALIVE_PAGE_STS_GRP_MAX_SIZE_SEED 0xFF
-
-/* iwm_umac_notif_alive.page_grp_state Super group max size -- bits [31:24] */
-#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_POS 24
-#define UMAC_ALIVE_PAGE_STS_SGRP_MAX_SIZE_SEED 0xFF
-
-/* Barkers */
-#define UMAC_REBOOT_BARKER 0xdeadbeef
-#define UMAC_ACK_BARKER 0xfeedbabe
-#define UMAC_PAD_TERMINAL 0xadadadad
-
-/* UMAC JMP address */
-#define UMAC_MU_FW_INST_DATA_12_ADDR 0xBF0000
-
-/* iwm_umac_hdi_out_hdr.cmd OP code -- bits [3:0] */
-#define UMAC_HDI_OUT_CMD_OPCODE_POS 0
-#define UMAC_HDI_OUT_CMD_OPCODE_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.cmd End-Of-Transfer -- bits [10:10] */
-#define UMAC_HDI_OUT_CMD_EOT_POS 10
-#define UMAC_HDI_OUT_CMD_EOT_SEED 0x1
-
-/* iwm_umac_hdi_out_hdr.cmd UTFD only usage -- bits [11:11] */
-#define UMAC_HDI_OUT_CMD_UTFD_ONLY_POS 11
-#define UMAC_HDI_OUT_CMD_UTFD_ONLY_SEED 0x1
-
-/* iwm_umac_hdi_out_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
-#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
-#define UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.cmd Signature -- bits [31:16] */
-#define UMAC_HDI_OUT_CMD_SIGNATURE_POS 16
-#define UMAC_HDI_OUT_CMD_SIGNATURE_SEED 0xFFFF
-
-/* iwm_umac_hdi_out_hdr.meta_data Byte count -- bits [11:0] */
-#define UMAC_HDI_OUT_BYTE_COUNT_POS 0
-#define UMAC_HDI_OUT_BYTE_COUNT_SEED 0xFFF
-
-/* iwm_umac_hdi_out_hdr.meta_data Credit group -- bits [15:12] */
-#define UMAC_HDI_OUT_CREDIT_GRP_POS 12
-#define UMAC_HDI_OUT_CREDIT_GRP_SEED 0xF
-
-/* iwm_umac_hdi_out_hdr.meta_data RA/TID -- bits [23:16] */
-#define UMAC_HDI_OUT_RATID_POS 16
-#define UMAC_HDI_OUT_RATID_SEED 0xFF
-
-/* iwm_umac_hdi_out_hdr.meta_data LMAC offset -- bits [31:24] */
-#define UMAC_HDI_OUT_LMAC_OFFSET_POS 24
-#define UMAC_HDI_OUT_LMAC_OFFSET_SEED 0xFF
-
-/* Signature */
-#define UMAC_HDI_OUT_SIGNATURE 0xCBBC
-
-/* buffer alignment */
-#define UMAC_HDI_BUF_ALIGN_MSK 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd OP code -- bits [3:0] */
-#define UMAC_HDI_IN_CMD_OPCODE_POS 0
-#define UMAC_HDI_IN_CMD_OPCODE_SEED 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi API response -- bits [6:4] */
-#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_POS 4
-#define UMAC_HDI_IN_CMD_NON_WIFI_RESP_SEED 0x7
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi API source -- bits [5:4] */
-#define UMAC_HDI_IN_CMD_SOURCE_POS 4
-#define UMAC_HDI_IN_CMD_SOURCE_SEED 0x3
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi API EOT -- bits [6:6] */
-#define UMAC_HDI_IN_CMD_EOT_POS 6
-#define UMAC_HDI_IN_CMD_EOT_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd timestamp present -- bits [7:7] */
-#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_POS 7
-#define UMAC_HDI_IN_CMD_TIME_STAMP_PRESENT_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi Non-last AMSDU -- bits [8:8] */
-#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_POS 8
-#define UMAC_HDI_IN_CMD_NON_LAST_AMSDU_SEED 0x1
-
-/* iwm_umac_hdi_in_hdr.cmd WiFi HW sequence number -- bits [31:9] */
-#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_POS 9
-#define UMAC_HDI_IN_CMD_HW_SEQ_NUM_SEED 0x7FFFFF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW sequence number -- bits [12:15] */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_POS 12
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM_SEED 0xF
-
-/* iwm_umac_hdi_in_hdr.cmd Non-WiFi HW signature -- bits [16:31] */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_POS 16
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG_SEED 0xFFFF
-
-/* Fixed Non-WiFi signature */
-#define UDMA_HDI_IN_CMD_NON_WIFI_HW_SIG 0xCBBC
-
-/* IN NTFY op-codes */
-#define UMAC_NOTIFY_OPCODE_ALIVE 0xA1
-#define UMAC_NOTIFY_OPCODE_INIT_COMPLETE 0xA2
-#define UMAC_NOTIFY_OPCODE_WIFI_CORE_STATUS 0xA3
-#define UMAC_NOTIFY_OPCODE_ERROR 0xA4
-#define UMAC_NOTIFY_OPCODE_DEBUG 0xA5
-#define UMAC_NOTIFY_OPCODE_WIFI_IF_WRAPPER 0xB0
-#define UMAC_NOTIFY_OPCODE_STATS 0xB1
-#define UMAC_NOTIFY_OPCODE_PAGE_DEALLOC 0xB3
-#define UMAC_NOTIFY_OPCODE_RX_TICKET 0xB4
-#define UMAC_NOTIFY_OPCODE_MAX (UMAC_NOTIFY_OPCODE_RX_TICKET -\
- UMAC_NOTIFY_OPCODE_ALIVE + 1)
-#define UMAC_NOTIFY_OPCODE_FIRST (UMAC_NOTIFY_OPCODE_ALIVE)
-
-/* HDI OUT OP CODE */
-#define UMAC_HDI_OUT_OPCODE_PING 0x0
-#define UMAC_HDI_OUT_OPCODE_READ 0x1
-#define UMAC_HDI_OUT_OPCODE_WRITE 0x2
-#define UMAC_HDI_OUT_OPCODE_JUMP 0x3
-#define UMAC_HDI_OUT_OPCODE_REBOOT 0x4
-#define UMAC_HDI_OUT_OPCODE_WRITE_PERSISTENT 0x5
-#define UMAC_HDI_OUT_OPCODE_READ_PERSISTENT 0x6
-#define UMAC_HDI_OUT_OPCODE_READ_MODIFY_WRITE 0x7
-/* #define UMAC_HDI_OUT_OPCODE_RESERVED 0x8..0xA */
-#define UMAC_HDI_OUT_OPCODE_WRITE_AUX_REG 0xB
-#define UMAC_HDI_OUT_OPCODE_WIFI 0xF
-
-/* HDI IN OP CODE -- Non WiFi*/
-#define UMAC_HDI_IN_OPCODE_PING 0x0
-#define UMAC_HDI_IN_OPCODE_READ 0x1
-#define UMAC_HDI_IN_OPCODE_WRITE 0x2
-#define UMAC_HDI_IN_OPCODE_WRITE_PERSISTENT 0x5
-#define UMAC_HDI_IN_OPCODE_READ_PERSISTENT 0x6
-#define UMAC_HDI_IN_OPCODE_READ_MODIFY_WRITE 0x7
-#define UMAC_HDI_IN_OPCODE_EP_MGMT 0x8
-#define UMAC_HDI_IN_OPCODE_CREDIT_CHANGE 0x9
-#define UMAC_HDI_IN_OPCODE_CTRL_DATABASE 0xA
-#define UMAC_HDI_IN_OPCODE_WRITE_AUX_REG 0xB
-#define UMAC_HDI_IN_OPCODE_NONWIFI_MAX \
- (UMAC_HDI_IN_OPCODE_WRITE_AUX_REG + 1)
-#define UMAC_HDI_IN_OPCODE_WIFI 0xF
-
-/* HDI IN SOURCE */
-#define UMAC_HDI_IN_SOURCE_FHRX 0x0
-#define UMAC_HDI_IN_SOURCE_UDMA 0x1
-#define UMAC_HDI_IN_SOURCE_FW 0x2
-#define UMAC_HDI_IN_SOURCE_RESERVED 0x3
-
-/* OUT CMD op-codes */
-#define UMAC_CMD_OPCODE_ECHO 0x01
-#define UMAC_CMD_OPCODE_HALT 0x02
-#define UMAC_CMD_OPCODE_RESET 0x03
-#define UMAC_CMD_OPCODE_BULK_EP_INACT_TIMEOUT 0x09
-#define UMAC_CMD_OPCODE_URB_CANCEL_ACK 0x0A
-#define UMAC_CMD_OPCODE_DCACHE_FLUSH 0x0B
-#define UMAC_CMD_OPCODE_EEPROM_PROXY 0x0C
-#define UMAC_CMD_OPCODE_TX_ECHO 0x0D
-#define UMAC_CMD_OPCODE_DBG_MON 0x0E
-#define UMAC_CMD_OPCODE_INTERNAL_TX 0x0F
-#define UMAC_CMD_OPCODE_SET_PARAM_FIX 0x10
-#define UMAC_CMD_OPCODE_SET_PARAM_VAR 0x11
-#define UMAC_CMD_OPCODE_GET_PARAM 0x12
-#define UMAC_CMD_OPCODE_DBG_EVENT_WRAPPER 0x13
-#define UMAC_CMD_OPCODE_TARGET 0x14
-#define UMAC_CMD_OPCODE_STATISTIC_REQUEST 0x15
-#define UMAC_CMD_OPCODE_GET_CHAN_INFO_LIST 0x16
-#define UMAC_CMD_OPCODE_SET_PARAM_LIST 0x17
-#define UMAC_CMD_OPCODE_GET_PARAM_LIST 0x18
-#define UMAC_CMD_OPCODE_STOP_RESUME_STA_TX 0x19
-#define UMAC_CMD_OPCODE_TEST_BLOCK_ACK 0x1A
-
-#define UMAC_CMD_OPCODE_BASE_WRAPPER 0xFA
-#define UMAC_CMD_OPCODE_LMAC_WRAPPER 0xFB
-#define UMAC_CMD_OPCODE_HW_TEST_WRAPPER 0xFC
-#define UMAC_CMD_OPCODE_WIFI_IF_WRAPPER 0xFD
-#define UMAC_CMD_OPCODE_WIFI_WRAPPER 0xFE
-#define UMAC_CMD_OPCODE_WIFI_PASS_THROUGH 0xFF
-
-/* UMAC WiFi interface op-codes */
-#define UMAC_WIFI_IF_CMD_SET_PROFILE 0x11
-#define UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE 0x12
-#define UMAC_WIFI_IF_CMD_SET_EXCLUDE_LIST 0x13
-#define UMAC_WIFI_IF_CMD_SCAN_REQUEST 0x14
-#define UMAC_WIFI_IF_CMD_SCAN_CONFIG 0x15
-#define UMAC_WIFI_IF_CMD_ADD_WEP40_KEY 0x16
-#define UMAC_WIFI_IF_CMD_ADD_WEP104_KEY 0x17
-#define UMAC_WIFI_IF_CMD_ADD_TKIP_KEY 0x18
-#define UMAC_WIFI_IF_CMD_ADD_CCMP_KEY 0x19
-#define UMAC_WIFI_IF_CMD_REMOVE_KEY 0x1A
-#define UMAC_WIFI_IF_CMD_GLOBAL_TX_KEY_ID 0x1B
-#define UMAC_WIFI_IF_CMD_SET_HOST_EXTENDED_IE 0x1C
-#define UMAC_WIFI_IF_CMD_GET_SUPPORTED_CHANNELS 0x1E
-#define UMAC_WIFI_IF_CMD_PMKID_UPDATE 0x1F
-#define UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER 0x20
-
-/* UMAC WiFi interface ports */
-#define UMAC_WIFI_IF_FLG_PORT_DEF 0x00
-#define UMAC_WIFI_IF_FLG_PORT_PAN 0x01
-#define UMAC_WIFI_IF_FLG_PORT_PAN_INVALID WIFI_IF_FLG_PORT_DEF
-
-/* UMAC WiFi interface actions */
-#define UMAC_WIFI_IF_FLG_ACT_GET 0x10
-#define UMAC_WIFI_IF_FLG_ACT_SET 0x20
-
-/* iwm_umac_fw_cmd_hdr.meta_data byte count -- bits [11:0] */
-#define UMAC_FW_CMD_BYTE_COUNT_POS 0
-#define UMAC_FW_CMD_BYTE_COUNT_SEED 0xFFF
-
-/* iwm_umac_fw_cmd_hdr.meta_data status -- bits [15:12] */
-#define UMAC_FW_CMD_STATUS_POS 12
-#define UMAC_FW_CMD_STATUS_SEED 0xF
-
-/* iwm_umac_fw_cmd_hdr.meta_data full TX command by Driver -- bits [16:16] */
-#define UMAC_FW_CMD_TX_DRV_FULL_CMD_POS 16
-#define UMAC_FW_CMD_TX_DRV_FULL_CMD_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX command by FW -- bits [17:17] */
-#define UMAC_FW_CMD_TX_FW_CMD_POS 17
-#define UMAC_FW_CMD_TX_FW_CMD_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX plaintext mode -- bits [18:18] */
-#define UMAC_FW_CMD_TX_PLAINTEXT_POS 18
-#define UMAC_FW_CMD_TX_PLAINTEXT_SEED 0x1
-
-/* iwm_umac_fw_cmd_hdr.meta_data STA color -- bits [22:20] */
-#define UMAC_FW_CMD_TX_STA_COLOR_POS 20
-#define UMAC_FW_CMD_TX_STA_COLOR_SEED 0x7
-
-/* iwm_umac_fw_cmd_hdr.meta_data TX life time (TU) -- bits [31:24] */
-#define UMAC_FW_CMD_TX_LIFETIME_TU_POS 24
-#define UMAC_FW_CMD_TX_LIFETIME_TU_SEED 0xFF
-
-/* iwm_dev_cmd_hdr.flags Response required -- bits [5:5] */
-#define UMAC_DEV_CMD_FLAGS_RESP_REQ_POS 5
-#define UMAC_DEV_CMD_FLAGS_RESP_REQ_SEED 0x1
-
-/* iwm_dev_cmd_hdr.flags Aborted command -- bits [6:6] */
-#define UMAC_DEV_CMD_FLAGS_ABORT_POS 6
-#define UMAC_DEV_CMD_FLAGS_ABORT_SEED 0x1
-
-/* iwm_dev_cmd_hdr.flags Internal command -- bits [7:7] */
-#define DEV_CMD_FLAGS_FLD_INTERNAL_POS 7
-#define DEV_CMD_FLAGS_FLD_INTERNAL_SEED 0x1
-
-/* Rx */
-/* Rx actions */
-#define IWM_RX_TICKET_DROP 0x0
-#define IWM_RX_TICKET_RELEASE 0x1
-#define IWM_RX_TICKET_SNIFFER 0x2
-#define IWM_RX_TICKET_ENQUEUE 0x3
-
-/* Rx flags */
-#define IWM_RX_TICKET_PAD_SIZE_MSK 0x2
-#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
-#define IWM_RX_TICKET_AMSDU_MSK 0x8
-#define IWM_RX_TICKET_DROP_REASON_POS 4
-#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
-
-#define IWM_RX_DROP_NO_DROP 0x0
-#define IWM_RX_DROP_BAD_CRC 0x1
-/* L2P no address match */
-#define IWM_RX_DROP_LMAC_ADDR_FILTER 0x2
-/* Multicast address not in list */
-#define IWM_RX_DROP_MCAST_ADDR_FILTER 0x3
-/* Control frames are not sent to the driver */
-#define IWM_RX_DROP_CTL_FRAME 0x4
-/* Our frame is back */
-#define IWM_RX_DROP_OUR_TX 0x5
-/* Association class filtering */
-#define IWM_RX_DROP_CLASS_FILTER 0x6
-/* Duplicated frame */
-#define IWM_RX_DROP_DUPLICATE_FILTER 0x7
-/* Decryption error */
-#define IWM_RX_DROP_SEC_ERR 0x8
-/* Unencrypted frame while encryption is on */
-#define IWM_RX_DROP_SEC_NO_ENCRYPTION 0x9
-/* Replay check failure */
-#define IWM_RX_DROP_SEC_REPLAY_ERR 0xa
-/* uCode and FW key color mismatch, check before replay */
-#define IWM_RX_DROP_SEC_KEY_COLOR_MISMATCH 0xb
-#define IWM_RX_DROP_SEC_TKIP_COUNTER_MEASURE 0xc
-/* No fragmentations Db is found */
-#define IWM_RX_DROP_FRAG_NO_RESOURCE 0xd
-/* Fragmention Db has seqCtl mismatch Vs. non-1st frag */
-#define IWM_RX_DROP_FRAG_ERR 0xe
-#define IWM_RX_DROP_FRAG_LOST 0xf
-#define IWM_RX_DROP_FRAG_COMPLETE 0x10
-/* Should be handled by UMAC */
-#define IWM_RX_DROP_MANAGEMENT 0x11
-/* STA not found by UMAC */
-#define IWM_RX_DROP_NO_STATION 0x12
-/* NULL or QoS NULL */
-#define IWM_RX_DROP_NULL_DATA 0x13
-#define IWM_RX_DROP_BA_REORDER_OLD_SEQCTL 0x14
-#define IWM_RX_DROP_BA_REORDER_DUPLICATE 0x15
-
-struct iwm_rx_ticket {
- __le16 action;
- __le16 id;
- __le16 flags;
- u8 payload_offset; /* includes: MAC header, pad, IV */
- u8 tail_len; /* includes: MIC, ICV, CRC (w/o STATUS) */
-} __packed;
-
-struct iwm_rx_mpdu_hdr {
- __le16 len;
- __le16 reserved;
-} __packed;
-
-/* UMAC SW WIFI API */
-
-struct iwm_dev_cmd_hdr {
- u8 cmd;
- u8 flags;
- __le16 seq_num;
-} __packed;
-
-struct iwm_umac_fw_cmd_hdr {
- __le32 meta_data;
- struct iwm_dev_cmd_hdr cmd;
-} __packed;
-
-struct iwm_umac_wifi_out_hdr {
- struct iwm_udma_out_wifi_hdr hw_hdr;
- struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __packed;
-
-struct iwm_umac_nonwifi_out_hdr {
- struct iwm_udma_out_nonwifi_hdr hw_hdr;
-} __packed;
-
-struct iwm_umac_wifi_in_hdr {
- struct iwm_udma_in_hdr hw_hdr;
- struct iwm_umac_fw_cmd_hdr sw_hdr;
-} __packed;
-
-struct iwm_umac_nonwifi_in_hdr {
- struct iwm_udma_in_hdr hw_hdr;
- __le32 time_stamp;
-} __packed;
-
-#define IWM_UMAC_PAGE_SIZE 0x200
-
-/* Notify structures */
-struct iwm_fw_version {
- u8 minor;
- u8 major;
- __le16 id;
-};
-
-struct iwm_fw_build {
- u8 type;
- u8 subtype;
- u8 platform;
- u8 opt;
-};
-
-struct iwm_fw_alive_hdr {
- struct iwm_fw_version ver;
- struct iwm_fw_build build;
- __le32 os_build;
- __le32 log_hdr_addr;
- __le32 log_buf_addr;
- __le32 sys_timer_addr;
-};
-
-#define WAIT_NOTIF_TIMEOUT (2 * HZ)
-#define SCAN_COMPLETE_TIMEOUT (3 * HZ)
-
-#define UMAC_NTFY_ALIVE_STATUS_ERR 0xDEAD
-#define UMAC_NTFY_ALIVE_STATUS_OK 0xCAFE
-
-#define UMAC_NTFY_INIT_COMPLETE_STATUS_ERR 0xDEAD
-#define UMAC_NTFY_INIT_COMPLETE_STATUS_OK 0xCAFE
-
-#define UMAC_NTFY_WIFI_CORE_STATUS_LINK_EN 0x40
-#define UMAC_NTFY_WIFI_CORE_STATUS_MLME_EN 0x80
-
-#define IWM_MACS_OUT_GROUPS 6
-#define IWM_MACS_OUT_SGROUPS 1
-
-
-#define WIFI_IF_NTFY_ASSOC_START 0x80
-#define WIFI_IF_NTFY_ASSOC_COMPLETE 0x81
-#define WIFI_IF_NTFY_PROFILE_INVALIDATE_COMPLETE 0x82
-#define WIFI_IF_NTFY_CONNECTION_TERMINATED 0x83
-#define WIFI_IF_NTFY_SCAN_COMPLETE 0x84
-#define WIFI_IF_NTFY_STA_TABLE_CHANGE 0x85
-#define WIFI_IF_NTFY_EXTENDED_IE_REQUIRED 0x86
-#define WIFI_IF_NTFY_RADIO_PREEMPTION 0x87
-#define WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED 0x88
-#define WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED 0x89
-#define WIFI_IF_NTFY_LINK_QUALITY_STATISTICS 0x8A
-#define WIFI_IF_NTFY_MGMT_FRAME 0x8B
-
-/* DEBUG INDICATIONS */
-#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_START 0xE0
-#define WIFI_DBG_IF_NTFY_SCAN_SUPER_JOB_COMPLETE 0xE1
-#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_START 0xE2
-#define WIFI_DBG_IF_NTFY_SCAN_CHANNEL_RESULT 0xE3
-#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_START 0xE4
-#define WIFI_DBG_IF_NTFY_SCAN_MINI_JOB_COMPLETE 0xE5
-#define WIFI_DBG_IF_NTFY_CNCT_ATC_START 0xE6
-#define WIFI_DBG_IF_NTFY_COEX_NOTIFICATION 0xE7
-#define WIFI_DBG_IF_NTFY_COEX_HANDLE_ENVELOP 0xE8
-#define WIFI_DBG_IF_NTFY_COEX_HANDLE_RELEASE_ENVELOP 0xE9
-
-#define WIFI_IF_NTFY_MAX 0xff
-
-/* Notification structures */
-struct iwm_umac_notif_wifi_if {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 status;
- u8 flags;
- __le16 buf_size;
-} __packed;
-
-#define UMAC_ROAM_REASON_FIRST_SELECTION 0x1
-#define UMAC_ROAM_REASON_AP_DEAUTH 0x2
-#define UMAC_ROAM_REASON_AP_CONNECT_LOST 0x3
-#define UMAC_ROAM_REASON_RSSI 0x4
-#define UMAC_ROAM_REASON_AP_ASSISTED_ROAM 0x5
-#define UMAC_ROAM_REASON_IBSS_COALESCING 0x6
-
-struct iwm_umac_notif_assoc_start {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 roam_reason;
- u8 bssid[ETH_ALEN];
- u8 reserved[2];
-} __packed;
-
-#define UMAC_ASSOC_COMPLETE_SUCCESS 0x0
-#define UMAC_ASSOC_COMPLETE_FAILURE 0x1
-
-struct iwm_umac_notif_assoc_complete {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 status;
- u8 bssid[ETH_ALEN];
- u8 band;
- u8 channel;
-} __packed;
-
-#define UMAC_PROFILE_INVALID_ASSOC_TIMEOUT 0x0
-#define UMAC_PROFILE_INVALID_ROAM_TIMEOUT 0x1
-#define UMAC_PROFILE_INVALID_REQUEST 0x2
-#define UMAC_PROFILE_INVALID_RF_PREEMPTED 0x3
-
-struct iwm_umac_notif_profile_invalidate {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 reason;
-} __packed;
-
-#define UMAC_SCAN_RESULT_SUCCESS 0x0
-#define UMAC_SCAN_RESULT_ABORTED 0x1
-#define UMAC_SCAN_RESULT_REJECTED 0x2
-#define UMAC_SCAN_RESULT_FAILED 0x3
-
-struct iwm_umac_notif_scan_complete {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 type;
- __le32 result;
- u8 seq_num;
-} __packed;
-
-#define UMAC_OPCODE_ADD_MODIFY 0x0
-#define UMAC_OPCODE_REMOVE 0x1
-#define UMAC_OPCODE_CLEAR_ALL 0x2
-
-#define UMAC_STA_FLAG_QOS 0x1
-
-struct iwm_umac_notif_sta_info {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 opcode;
- u8 mac_addr[ETH_ALEN];
- u8 sta_id; /* bits 0-3: station ID, bits 4-7: station color */
- u8 flags;
-} __packed;
-
-#define UMAC_BAND_2GHZ 0
-#define UMAC_BAND_5GHZ 1
-
-#define UMAC_CHANNEL_WIDTH_20MHZ 0
-#define UMAC_CHANNEL_WIDTH_40MHZ 1
-
-struct iwm_umac_notif_bss_info {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 type;
- __le32 timestamp;
- __le16 table_idx;
- __le16 frame_len;
- u8 band;
- u8 channel;
- s8 rssi;
- u8 reserved;
- u8 frame_buf[1];
-} __packed;
-
-#define IWM_BSS_REMOVE_INDEX_MSK 0x0fff
-#define IWM_BSS_REMOVE_FLAGS_MSK 0xfc00
-
-#define IWM_BSS_REMOVE_FLG_AGE 0x1000
-#define IWM_BSS_REMOVE_FLG_TIMEOUT 0x2000
-#define IWM_BSS_REMOVE_FLG_TABLE_FULL 0x4000
-
-struct iwm_umac_notif_bss_removed {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le32 count;
- __le16 entries[0];
-} __packed;
-
-struct iwm_umac_notif_mgt_frame {
- struct iwm_umac_notif_wifi_if mlme_hdr;
- __le16 len;
- u8 frame[1];
-} __packed;
-
-struct iwm_umac_notif_alive {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved1;
- struct iwm_fw_alive_hdr alive_data;
- __le16 reserved2;
- __le16 page_grp_count;
- __le32 page_grp_state[IWM_MACS_OUT_GROUPS];
-} __packed;
-
-struct iwm_umac_notif_init_complete {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved;
-} __packed;
-
-/* error categories */
-enum {
- UMAC_SYS_ERR_CAT_NONE = 0,
- UMAC_SYS_ERR_CAT_BOOT,
- UMAC_SYS_ERR_CAT_UMAC,
- UMAC_SYS_ERR_CAT_UAXM,
- UMAC_SYS_ERR_CAT_LMAC,
- UMAC_SYS_ERR_CAT_MAX
-};
-
-struct iwm_fw_error_hdr {
- __le32 category;
- __le32 status;
- __le32 pc;
- __le32 blink1;
- __le32 blink2;
- __le32 ilink1;
- __le32 ilink2;
- __le32 data1;
- __le32 data2;
- __le32 line_num;
- __le32 umac_status;
- __le32 lmac_status;
- __le32 sdio_status;
- __le32 dbm_sample_ctrl;
- __le32 dbm_buf_base;
- __le32 dbm_buf_end;
- __le32 dbm_buf_write_ptr;
- __le32 dbm_buf_cycle_cnt;
-} __packed;
-
-struct iwm_umac_notif_error {
- struct iwm_umac_wifi_in_hdr hdr;
- struct iwm_fw_error_hdr err;
-} __packed;
-
-#define UMAC_DEALLOC_NTFY_CHANGES_CNT_POS 0
-#define UMAC_DEALLOC_NTFY_CHANGES_CNT_SEED 0xff
-#define UMAC_DEALLOC_NTFY_CHANGES_MSK_POS 8
-#define UMAC_DEALLOC_NTFY_CHANGES_MSK_SEED 0xffffff
-#define UMAC_DEALLOC_NTFY_PAGE_CNT_POS 0
-#define UMAC_DEALLOC_NTFY_PAGE_CNT_SEED 0xffffff
-#define UMAC_DEALLOC_NTFY_GROUP_NUM_POS 24
-#define UMAC_DEALLOC_NTFY_GROUP_NUM_SEED 0xf
-
-struct iwm_umac_notif_page_dealloc {
- struct iwm_umac_wifi_in_hdr hdr;
- __le32 changes;
- __le32 grp_info[IWM_MACS_OUT_GROUPS];
-} __packed;
-
-struct iwm_umac_notif_wifi_status {
- struct iwm_umac_wifi_in_hdr hdr;
- __le16 status;
- __le16 reserved;
-} __packed;
-
-struct iwm_umac_notif_rx_ticket {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 num_tickets;
- u8 reserved[3];
- struct iwm_rx_ticket tickets[1];
-} __packed;
-
-/* Tx/Rx rates window (number of max of last update window per second) */
-#define UMAC_NTF_RATE_SAMPLE_NR 4
-
-/* Max numbers of bits required to go through all antennae in bitmasks */
-#define UMAC_PHY_NUM_CHAINS 3
-
-#define IWM_UMAC_MGMT_TID 8
-#define IWM_UMAC_TID_NR 9 /* 8 TIDs + MGMT */
-
-struct iwm_umac_notif_stats {
- struct iwm_umac_wifi_in_hdr hdr;
- __le32 flags;
- __le32 timestamp;
- __le16 tid_load[IWM_UMAC_TID_NR + 1]; /* 1 non-QoS + 1 dword align */
- __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
- __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
- __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
- s32 rssi_dbm;
- s32 noise_dbm;
- __le32 supp_rates;
- __le32 supp_ht_rates;
- __le32 missed_beacons;
- __le32 rx_beacons;
- __le32 rx_dir_pkts;
- __le32 rx_nondir_pkts;
- __le32 rx_multicast;
- __le32 rx_errors;
- __le32 rx_drop_other_bssid;
- __le32 rx_drop_decode;
- __le32 rx_drop_reassembly;
- __le32 rx_drop_bad_len;
- __le32 rx_drop_overflow;
- __le32 rx_drop_crc;
- __le32 rx_drop_missed;
- __le32 tx_dir_pkts;
- __le32 tx_nondir_pkts;
- __le32 tx_failure;
- __le32 tx_errors;
- __le32 tx_drop_max_retry;
- __le32 tx_err_abort;
- __le32 tx_err_carrier;
- __le32 rx_bytes;
- __le32 tx_bytes;
- __le32 tx_power;
- __le32 tx_max_power;
- __le32 roam_threshold;
- __le32 ap_assoc_nr;
- __le32 scan_full;
- __le32 scan_abort;
- __le32 ap_nr;
- __le32 roam_nr;
- __le32 roam_missed_beacons;
- __le32 roam_rssi;
- __le32 roam_unassoc;
- __le32 roam_deauth;
- __le32 roam_ap_loadblance;
-} __packed;
-
-#define UMAC_STOP_TX_FLAG 0x1
-#define UMAC_RESUME_TX_FLAG 0x2
-
-#define LAST_SEQ_NUM_INVALID 0xFFFF
-
-struct iwm_umac_notif_stop_resume_tx {
- struct iwm_umac_wifi_in_hdr hdr;
- u8 flags; /* UMAC_*_TX_FLAG_* */
- u8 sta_id;
- __le16 stop_resume_tid_msk; /* tid bitmask */
-} __packed;
-
-#define UMAC_MAX_NUM_PMKIDS 4
-
-/* WiFi interface wrapper header */
-struct iwm_umac_wifi_if {
- u8 oid;
- u8 flags;
- __le16 buf_size;
-} __packed;
-
-#define IWM_SEQ_NUM_HOST_MSK 0x0000
-#define IWM_SEQ_NUM_UMAC_MSK 0x4000
-#define IWM_SEQ_NUM_LMAC_MSK 0x8000
-#define IWM_SEQ_NUM_MSK 0xC000
-
-#endif
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 2fa879b015b6..eb5de800ed90 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -435,24 +435,40 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
* Set Channel
*/
-static int lbs_cfg_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type)
+static int lbs_cfg_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type)
{
struct lbs_private *priv = wiphy_priv(wiphy);
int ret = -ENOTSUPP;
- lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d, type %d",
- netdev_name(netdev), channel->center_freq, channel_type);
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d",
+ channel->center_freq, channel_type);
if (channel_type != NL80211_CHAN_NO_HT)
goto out;
- if (netdev == priv->mesh_dev)
- ret = lbs_mesh_set_channel(priv, channel->hw_value);
- else
- ret = lbs_set_channel(priv, channel->hw_value);
+ ret = lbs_set_channel(priv, channel->hw_value);
+
+ out:
+ lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
+ return ret;
+}
+
+static int lbs_cfg_set_mesh_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct ieee80211_channel *channel)
+{
+ struct lbs_private *priv = wiphy_priv(wiphy);
+ int ret = -ENOTSUPP;
+
+ lbs_deb_enter_args(LBS_DEB_CFG80211, "iface %s freq %d",
+ netdev_name(netdev), channel->center_freq);
+
+ if (netdev != priv->mesh_dev)
+ goto out;
+
+ ret = lbs_mesh_set_channel(priv, channel->hw_value);
out:
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
@@ -789,7 +805,6 @@ void lbs_scan_done(struct lbs_private *priv)
}
static int lbs_cfg_scan(struct wiphy *wiphy,
- struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct lbs_private *priv = wiphy_priv(wiphy);
@@ -2029,7 +2044,8 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
*/
static struct cfg80211_ops lbs_cfg80211_ops = {
- .set_channel = lbs_cfg_set_channel,
+ .set_monitor_channel = lbs_cfg_set_monitor_channel,
+ .libertas_set_mesh_channel = lbs_cfg_set_mesh_channel,
.scan = lbs_cfg_scan,
.connect = lbs_cfg_connect,
.disconnect = lbs_cfg_disconnect,
@@ -2164,13 +2180,15 @@ int lbs_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct lbs_private *priv = wiphy_priv(wiphy);
- int ret;
+ int ret = 0;
lbs_deb_enter_args(LBS_DEB_CFG80211, "cfg80211 regulatory domain "
"callback for domain %c%c\n", request->alpha2[0],
request->alpha2[1]);
- ret = lbs_set_11d_domain_info(priv, request, wiphy->bands);
+ memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2));
+ if (lbs_iface_active(priv))
+ ret = lbs_set_11d_domain_info(priv);
lbs_deb_leave(LBS_DEB_CFG80211);
return ret;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index d798bcc0d83a..26e68326710b 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -733,15 +733,13 @@ int lbs_get_rssi(struct lbs_private *priv, s8 *rssi, s8 *nf)
* to the firmware
*
* @priv: pointer to &struct lbs_private
- * @request: cfg80211 regulatory request structure
- * @bands: the device's supported bands and channels
*
* returns: 0 on success, error code on failure
*/
-int lbs_set_11d_domain_info(struct lbs_private *priv,
- struct regulatory_request *request,
- struct ieee80211_supported_band **bands)
+int lbs_set_11d_domain_info(struct lbs_private *priv)
{
+ struct wiphy *wiphy = priv->wdev->wiphy;
+ struct ieee80211_supported_band **bands = wiphy->bands;
struct cmd_ds_802_11d_domain_info cmd;
struct mrvl_ie_domain_param_set *domain = &cmd.domain;
struct ieee80211_country_ie_triplet *t;
@@ -752,21 +750,23 @@ int lbs_set_11d_domain_info(struct lbs_private *priv,
u8 first_channel = 0, next_chan = 0, max_pwr = 0;
u8 i, flag = 0;
size_t triplet_size;
- int ret;
+ int ret = 0;
lbs_deb_enter(LBS_DEB_11D);
+ if (!priv->country_code[0])
+ goto out;
memset(&cmd, 0, sizeof(cmd));
cmd.action = cpu_to_le16(CMD_ACT_SET);
lbs_deb_11d("Setting country code '%c%c'\n",
- request->alpha2[0], request->alpha2[1]);
+ priv->country_code[0], priv->country_code[1]);
domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
/* Set country code */
- domain->country_code[0] = request->alpha2[0];
- domain->country_code[1] = request->alpha2[1];
+ domain->country_code[0] = priv->country_code[0];
+ domain->country_code[1] = priv->country_code[1];
domain->country_code[2] = ' ';
/* Now set up the channel triplets; firmware is somewhat picky here
@@ -848,6 +848,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv,
ret = lbs_cmd_with_response(priv, CMD_802_11D_DOMAIN_INFO, &cmd);
+out:
lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
return ret;
}
@@ -1019,9 +1020,9 @@ static void lbs_submit_command(struct lbs_private *priv,
if (ret) {
netdev_info(priv->dev, "DNLD_CMD: hw_host_to_card failed: %d\n",
ret);
- /* Let the timer kick in and retry, and potentially reset
- the whole thing if the condition persists */
- timeo = HZ/4;
+ /* Reset dnld state machine, report failure */
+ priv->dnld_sent = DNLD_RES_RECEIVED;
+ lbs_complete_command(priv, cmdnode, ret);
}
if (command == CMD_802_11_DEEP_SLEEP) {
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index b280ef7a0aea..ab07608e13d0 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -128,9 +128,7 @@ int lbs_set_monitor_mode(struct lbs_private *priv, int enable);
int lbs_get_rssi(struct lbs_private *priv, s8 *snr, s8 *nf);
-int lbs_set_11d_domain_info(struct lbs_private *priv,
- struct regulatory_request *request,
- struct ieee80211_supported_band **bands);
+int lbs_set_11d_domain_info(struct lbs_private *priv);
int lbs_get_reg(struct lbs_private *priv, u16 reg, u16 offset, u32 *value);
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a06cc283e23d..668dd27616a0 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -483,7 +483,7 @@ static ssize_t lbs_rdmac_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->mac_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->mac_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
@@ -565,7 +565,7 @@ static ssize_t lbs_rdbbp_write(struct file *file,
res = -EFAULT;
goto out_unlock;
}
- priv->bbp_offset = simple_strtoul((char *)buf, NULL, 16);
+ priv->bbp_offset = simple_strtoul(buf, NULL, 16);
res = count;
out_unlock:
free_page(addr);
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 672005430aca..6bd1608992b0 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -49,6 +49,7 @@ struct lbs_private {
bool wiphy_registered;
struct cfg80211_scan_request *scan_req;
u8 assoc_bss[ETH_ALEN];
+ u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u8 disassoc_reason;
/* Mesh */
@@ -58,6 +59,7 @@ struct lbs_private {
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
+ u8 mesh_channel;
#endif
/* Debugfs */
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
index 601f2075355e..c0f9e7e862f6 100644
--- a/drivers/net/wireless/libertas/firmware.c
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -4,9 +4,7 @@
#include <linux/sched.h>
#include <linux/firmware.h>
-#include <linux/firmware.h>
#include <linux/module.h>
-#include <linux/sched.h>
#include "dev.h"
#include "decl.h"
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 2e2dbfa2ee50..96726f79a1dd 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -68,7 +68,6 @@
#define CMD_802_11_BEACON_STOP 0x0049
#define CMD_802_11_MAC_ADDRESS 0x004d
#define CMD_802_11_LED_GPIO_CTRL 0x004e
-#define CMD_802_11_EEPROM_ACCESS 0x0059
#define CMD_802_11_BAND_CONFIG 0x0058
#define CMD_GSPI_BUS_CONFIG 0x005a
#define CMD_802_11D_DOMAIN_INFO 0x005b
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index cd3b0d400618..55a77e41170a 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -302,14 +302,13 @@ error:
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbs_private *priv = (struct lbs_private *) cardp->priv;
+ struct lbs_private *priv = cardp->priv;
lbs_deb_enter(LBS_DEB_MAIN);
cardp->surprise_removed = 1;
if (priv) {
- priv->surpriseremoved = 1;
lbs_stop_card(priv);
lbs_remove_card(priv);
}
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index e96ee0aa8439..58048189bd24 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -152,6 +152,12 @@ int lbs_start_iface(struct lbs_private *priv)
goto err;
}
+ ret = lbs_set_11d_domain_info(priv);
+ if (ret) {
+ lbs_deb_net("set 11d domain info failed\n");
+ goto err;
+ }
+
lbs_update_channel(priv);
priv->iface_running = true;
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index e87c031b298f..97807751ebcf 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -131,16 +131,13 @@ static int lbs_mesh_config(struct lbs_private *priv, uint16_t action,
int lbs_mesh_set_channel(struct lbs_private *priv, u8 channel)
{
+ priv->mesh_channel = channel;
return lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, channel);
}
static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
{
- struct wireless_dev *mesh_wdev = priv->mesh_dev->ieee80211_ptr;
- if (mesh_wdev->channel)
- return mesh_wdev->channel->hw_value;
- else
- return 1;
+ return priv->mesh_channel ?: 1;
}
/***************************************************************************
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 19a5a92dd779..d576dd6665d3 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -253,7 +253,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
static void if_usb_disconnect(struct usb_interface *intf)
{
struct if_usb_card *cardp = usb_get_intfdata(intf);
- struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
+ struct lbtf_private *priv = cardp->priv;
lbtf_deb_enter(LBTF_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index a0b7cfd34685..643f968b05ee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -292,7 +292,7 @@ struct mac80211_hwsim_data {
struct list_head list;
struct ieee80211_hw *hw;
struct device *dev;
- struct ieee80211_supported_band bands[2];
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -571,7 +571,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
skb_dequeue(&data->pending);
}
- skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (skb == NULL)
goto nla_put_failure;
@@ -678,8 +678,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
continue;
if (data2->idle || !data2->started ||
- !hwsim_ps_rx_ok(data2, skb) ||
- !data->channel || !data2->channel ||
+ !hwsim_ps_rx_ok(data2, skb) || !data2->channel ||
data->channel->center_freq != data2->channel->center_freq ||
!(data->group & data2->group))
continue;
@@ -1083,6 +1082,8 @@ enum hwsim_testmode_attr {
enum hwsim_testmode_cmd {
HWSIM_TM_CMD_SET_PS = 0,
HWSIM_TM_CMD_GET_PS = 1,
+ HWSIM_TM_CMD_STOP_QUEUES = 2,
+ HWSIM_TM_CMD_WAKE_QUEUES = 3,
};
static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
@@ -1122,6 +1123,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
goto nla_put_failure;
return cfg80211_testmode_reply(skb);
+ case HWSIM_TM_CMD_STOP_QUEUES:
+ ieee80211_stop_queues(hw);
+ return 0;
+ case HWSIM_TM_CMD_WAKE_QUEUES:
+ ieee80211_wake_queues(hw);
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -1486,7 +1493,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
struct mac80211_hwsim_data *data2;
struct ieee80211_tx_info *txi;
struct hwsim_tx_rate *tx_attempts;
- struct sk_buff __user *ret_skb;
+ unsigned long ret_skb_ptr;
struct sk_buff *skb, *tmp;
struct mac_address *src;
unsigned int hwsim_flags;
@@ -1504,8 +1511,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]);
hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]);
- ret_skb = (struct sk_buff __user *)
- (unsigned long) nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
+ ret_skb_ptr = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]);
data2 = get_hwsim_data_ref_from_addr(src);
@@ -1514,7 +1520,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
/* look for the skb matching the cookie passed back from user */
skb_queue_walk_safe(&data2->pending, skb, tmp) {
- if (skb == ret_skb) {
+ if ((unsigned long)skb == ret_skb_ptr) {
skb_unlink(skb, &data2->pending);
found = true;
break;
@@ -1534,11 +1540,6 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
/* now send back TX status */
txi = IEEE80211_SKB_CB(skb);
- if (txi->control.vif)
- hwsim_check_magic(txi->control.vif);
- if (txi->control.sta)
- hwsim_check_sta_magic(txi->control.sta);
-
ieee80211_tx_info_clear_status(txi);
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
@@ -1857,7 +1858,7 @@ static int __init init_mac80211_hwsim(void)
sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
break;
default:
- break;
+ continue;
}
sband->ht_cap.ht_supported = true;
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index fe8ebfebcc0e..e535c937628b 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -101,8 +101,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
{
int tid;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
- struct host_cmd_ds_11n_delba *del_ba =
- (struct host_cmd_ds_11n_delba *) &resp->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &resp->params.del_ba;
uint16_t del_ba_param_set = le16_to_cpu(del_ba->del_ba_param_set);
tid = del_ba_param_set >> DELBA_TID_POS;
@@ -147,8 +146,7 @@ int mwifiex_ret_11n_addba_req(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
int tid;
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *) &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl;
add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn))
@@ -412,7 +410,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
memcpy((u8 *) bss_co_2040 +
sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_bss_co_2040 +
+ bss_desc->bcn_bss_co_2040 +
sizeof(struct ieee_types_header),
le16_to_cpu(bss_co_2040->header.len));
@@ -426,10 +424,8 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
- memcpy((u8 *) ext_cap +
- sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ext_cap +
- sizeof(struct ieee_types_header),
+ memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
+ bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
le16_to_cpu(ext_cap->header.len));
*buffer += sizeof(struct mwifiex_ie_types_extcap);
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 77646d777dce..28366e9211fb 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -105,8 +105,7 @@ static inline u8 mwifiex_space_avail_for_new_ba_stream(
priv = adapter->priv[i];
if (priv)
ba_stream_num += mwifiex_wmm_list_len(
- (struct list_head *)
- &priv->tx_ba_stream_tbl_ptr);
+ &priv->tx_ba_stream_tbl_ptr);
}
return ((ba_stream_num <
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 9c44088054dd..591ccd33f83c 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -256,7 +256,8 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
else
last_seq = priv->rx_seq[tid];
- if (last_seq >= new_node->start_win)
+ if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
+ last_seq >= new_node->start_win)
new_node->start_win = last_seq + 1;
new_node->win_size = win_size;
@@ -296,9 +297,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
*/
int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_addba_req *add_ba_req =
- (struct host_cmd_ds_11n_addba_req *)
- &cmd->params.add_ba_req;
+ struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
@@ -320,9 +319,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
struct host_cmd_ds_11n_addba_req
*cmd_addba_req)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &cmd->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
@@ -367,8 +364,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*/
int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
{
- struct host_cmd_ds_11n_delba *del_ba = (struct host_cmd_ds_11n_delba *)
- &cmd->params.del_ba;
+ struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
@@ -398,8 +394,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
int start_win, end_win, win_size;
u16 pkt_index;
- tbl = mwifiex_11n_get_rx_reorder_tbl((struct mwifiex_private *) priv,
- tid, ta);
+ tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
if (!tbl) {
if (pkt_type != PKT_TYPE_BAR)
mwifiex_process_rx_packet(priv->adapter, payload);
@@ -520,9 +515,7 @@ mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- struct host_cmd_ds_11n_addba_rsp *add_ba_rsp =
- (struct host_cmd_ds_11n_addba_rsp *)
- &resp->params.add_ba_rsp;
+ struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
int tid, win_size;
struct mwifiex_rx_reorder_tbl *tbl;
uint16_t block_ack_param_set;
@@ -596,5 +589,5 @@ void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
- memset(priv->rx_seq, 0, sizeof(priv->rx_seq));
+ mwifiex_reset_11n_rx_seq_num(priv);
}
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index f1bffebabc60..6c9815a0f5d8 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -37,6 +37,13 @@
#define ADDBA_RSP_STATUS_ACCEPT 0
+#define MWIFIEX_DEF_11N_RX_SEQ_NUM 0xffff
+
+static inline void mwifiex_reset_11n_rx_seq_num(struct mwifiex_private *priv)
+{
+ memset(priv->rx_seq, 0xff, sizeof(priv->rx_seq));
+}
+
int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *,
u16 seqNum,
u16 tid, u8 *ta,
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ce61b6fae1c9..fe42137384da 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -48,10 +48,9 @@ static const struct ieee80211_iface_combination mwifiex_iface_comb_ap_sta = {
* Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
static u8
-mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
- channel_type)
+mwifiex_chan_type_to_sec_chan_offset(enum nl80211_channel_type chan_type)
{
- switch (channel_type) {
+ switch (chan_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
return IEEE80211_HT_PARAM_CHA_SEC_NONE;
@@ -170,7 +169,9 @@ mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
if (!priv->sec_info.wep_enabled)
return 0;
- if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
+ if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
+ priv->wep_key_curr_index = key_index;
+ } else if (mwifiex_set_encode(priv, NULL, 0, key_index, NULL, 0)) {
wiphy_err(wiphy, "set default Tx key index\n");
return -EFAULT;
}
@@ -187,9 +188,25 @@ mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct key_params *params)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev);
+ struct mwifiex_wep_key *wep_key;
const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
const u8 *peer_mac = pairwise ? mac_addr : bc_mac;
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP &&
+ (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ params->cipher == WLAN_CIPHER_SUITE_WEP104)) {
+ if (params->key && params->key_len) {
+ wep_key = &priv->wep_key[key_index];
+ memset(wep_key, 0, sizeof(struct mwifiex_wep_key));
+ memcpy(wep_key->key_material, params->key,
+ params->key_len);
+ wep_key->key_index = key_index;
+ wep_key->key_length = params->key_len;
+ priv->sec_info.wep_enabled = 1;
+ }
+ return 0;
+ }
+
if (mwifiex_set_encode(priv, params->key, params->key_len,
key_index, peer_mac, 0)) {
wiphy_err(wiphy, "crypto keys added\n");
@@ -242,13 +259,13 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
flag = 1;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_power;
+ max_pwr = ch->max_reg_power;
no_of_parsed_chan = 1;
continue;
}
if (ch->hw_value == next_chan + 1 &&
- ch->max_power == max_pwr) {
+ ch->max_reg_power == max_pwr) {
next_chan++;
no_of_parsed_chan++;
} else {
@@ -259,7 +276,7 @@ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
no_of_triplet++;
first_chan = (u32) ch->hw_value;
next_chan = first_chan;
- max_pwr = ch->max_power;
+ max_pwr = ch->max_reg_power;
no_of_parsed_chan = 1;
}
}
@@ -321,79 +338,6 @@ static int mwifiex_reg_notifier(struct wiphy *wiphy,
}
/*
- * This function sets the RF channel.
- *
- * This function creates multiple IOCTL requests, populates them accordingly
- * and issues them to set the band/channel and frequency.
- */
-static int
-mwifiex_set_rf_channel(struct mwifiex_private *priv,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct mwifiex_chan_freq_power cfp;
- u32 config_bands = 0;
- struct wiphy *wiphy = priv->wdev->wiphy;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (chan) {
- /* Set appropriate bands */
- if (chan->band == IEEE80211_BAND_2GHZ) {
- if (channel_type == NL80211_CHAN_NO_HT)
- if (priv->adapter->config_bands == BAND_B ||
- priv->adapter->config_bands == BAND_G)
- config_bands =
- priv->adapter->config_bands;
- else
- config_bands = BAND_B | BAND_G;
- else
- config_bands = BAND_B | BAND_G | BAND_GN;
- } else {
- if (channel_type == NL80211_CHAN_NO_HT)
- config_bands = BAND_A;
- else
- config_bands = BAND_AN | BAND_A;
- }
-
- if (!((config_bands | adapter->fw_bands) &
- ~adapter->fw_bands)) {
- adapter->config_bands = config_bands;
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- adapter->adhoc_start_band = config_bands;
- if ((config_bands & BAND_GN) ||
- (config_bands & BAND_AN))
- adapter->adhoc_11n_enabled = true;
- else
- adapter->adhoc_11n_enabled = false;
- }
- }
- adapter->sec_chan_offset =
- mwifiex_cfg80211_channel_type_to_sec_chan_offset
- (channel_type);
- adapter->channel_type = channel_type;
-
- mwifiex_send_domain_info_cmd_fw(wiphy);
- }
-
- wiphy_dbg(wiphy, "info: setting band %d, chan offset %d, mode %d\n",
- config_bands, adapter->sec_chan_offset, priv->bss_mode);
- if (!chan)
- return 0;
-
- memset(&cfp, 0, sizeof(cfp));
- cfp.freq = chan->center_freq;
- cfp.channel = ieee80211_frequency_to_channel(chan->center_freq);
-
- if (mwifiex_bss_set_channel(priv, &cfp))
- return -EFAULT;
-
- if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
- return mwifiex_drv_change_adhoc_chan(priv, cfp.channel);
- else
- return mwifiex_uap_set_channel(priv, cfp.channel);
-}
-
-/*
* This function sets the fragmentation threshold.
*
* The fragmentation threshold value must lie between MWIFIEX_FRAG_MIN_VALUE
@@ -608,7 +552,7 @@ static int
mwifiex_dump_station_info(struct mwifiex_private *priv,
struct station_info *sinfo)
{
- struct mwifiex_rate_cfg rate;
+ u32 rate;
sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
@@ -634,9 +578,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/*
* Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
- * MCS index values for us are 0 to 7.
+ * MCS index values for us are 0 to 15.
*/
- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
+ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
sinfo->txrate.mcs = priv->tx_rate;
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
/* 40MHz rate */
@@ -654,7 +598,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->tx_packets = priv->stats.tx_packets;
sinfo->signal = priv->bcn_rssi_avg;
/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
- sinfo->txrate.legacy = rate.rate * 5;
+ sinfo->txrate.legacy = rate * 5;
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
@@ -809,8 +753,8 @@ static const u32 mwifiex_cipher_suites[] = {
/*
* CFG802.11 operation handler for setting bit rates.
*
- * Function selects legacy bang B/G/BG from corresponding bitrates selection.
- * Currently only 2.4GHz band is supported.
+ * Function configures data rates to firmware using bitrate mask
+ * provided by cfg80211.
*/
static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
struct net_device *dev,
@@ -818,43 +762,36 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
const struct cfg80211_bitrate_mask *mask)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- int index = 0, mode = 0, i;
- struct mwifiex_adapter *adapter = priv->adapter;
+ u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
+ enum ieee80211_band band;
- /* Currently only 2.4GHz is supported */
- for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
- /*
- * Rates below 6 Mbps in the table are CCK rates; 802.11b
- * and from 6 they are OFDM; 802.11G
- */
- if (mwifiex_rates[i].bitrate == 60) {
- index = 1 << i;
- break;
- }
+ if (!priv->media_connected) {
+ dev_err(priv->adapter->dev,
+ "Can not set Tx data rate in disconnected state\n");
+ return -EINVAL;
}
- if (mask->control[IEEE80211_BAND_2GHZ].legacy < index) {
- mode = BAND_B;
- } else {
- mode = BAND_G;
- if (mask->control[IEEE80211_BAND_2GHZ].legacy % index)
- mode |= BAND_B;
- }
+ band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
- adapter->config_bands = mode;
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- adapter->adhoc_start_band = mode;
- adapter->adhoc_11n_enabled = false;
- }
- }
- adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
- adapter->channel_type = NL80211_CHAN_NO_HT;
+ memset(bitmap_rates, 0, sizeof(bitmap_rates));
- wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
- (mode & BAND_B) ? "b" : "", (mode & BAND_G) ? "g" : "");
+ /* Fill HR/DSSS rates. */
+ if (band == IEEE80211_BAND_2GHZ)
+ bitmap_rates[0] = mask->control[band].legacy & 0x000f;
- return 0;
+ /* Fill OFDM rates */
+ if (band == IEEE80211_BAND_2GHZ)
+ bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
+ else
+ bitmap_rates[1] = mask->control[band].legacy;
+
+ /* Fill MCS rates */
+ bitmap_rates[2] = mask->control[band].mcs[0];
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ bitmap_rates[2] |= mask->control[band].mcs[1] << 8;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
+ HostCmd_ACT_GEN_SET, 0, bitmap_rates);
}
/*
@@ -896,6 +833,69 @@ static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
return 0;
}
+/* cfg80211 operation handler for change_beacon.
+ * Function retrieves and sets modified management IEs to FW.
+ */
+static int mwifiex_cfg80211_change_beacon(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_beacon_data *data)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP) {
+ wiphy_err(wiphy, "%s: bss_type mismatched\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!priv->bss_started) {
+ wiphy_err(wiphy, "%s: bss not started\n", __func__);
+ return -EINVAL;
+ }
+
+ if (mwifiex_set_mgmt_ies(priv, data)) {
+ wiphy_err(wiphy, "%s: setting mgmt ies failed\n", __func__);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ struct mwifiex_private *priv = mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY);
+ struct mwifiex_ds_ant_cfg ant_cfg;
+
+ if (!tx_ant || !rx_ant)
+ return -EOPNOTSUPP;
+
+ if (adapter->hw_dev_mcs_support != HT_STREAM_2X2) {
+ /* Not a MIMO chip. User should provide specific antenna number
+ * for Tx/Rx path or enable all antennas for diversity
+ */
+ if (tx_ant != rx_ant)
+ return -EOPNOTSUPP;
+
+ if ((tx_ant & (tx_ant - 1)) &&
+ (tx_ant != BIT(adapter->number_of_antenna) - 1))
+ return -EOPNOTSUPP;
+
+ if ((tx_ant == BIT(adapter->number_of_antenna) - 1) &&
+ (priv->adapter->number_of_antenna > 1)) {
+ tx_ant = RF_ANTENNA_AUTO;
+ rx_ant = RF_ANTENNA_AUTO;
+ }
+ }
+
+ ant_cfg.tx_ant = tx_ant;
+ ant_cfg.rx_ant = rx_ant;
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_RF_ANTENNA,
+ HostCmd_ACT_GEN_SET, 0, &ant_cfg);
+}
+
/* cfg80211 operation handler for stop ap.
* Function stops BSS running at uAP interface.
*/
@@ -926,10 +926,11 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
{
struct mwifiex_uap_bss_param *bss_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ u8 config_bands = 0;
if (priv->bss_type != MWIFIEX_BSS_TYPE_UAP)
return -1;
- if (mwifiex_set_mgmt_ies(priv, params))
+ if (mwifiex_set_mgmt_ies(priv, &params->beacon))
return -1;
bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
@@ -958,15 +959,41 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
/* firmware doesn't support this type of hidden SSID */
default:
+ kfree(bss_cfg);
return -EINVAL;
}
+ bss_cfg->channel =
+ (u8)ieee80211_frequency_to_channel(params->channel->center_freq);
+ bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
+
+ /* Set appropriate bands */
+ if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_B | BAND_G;
+ else
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ } else {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
+ }
+
+ if (!((config_bands | priv->adapter->fw_bands) &
+ ~priv->adapter->fw_bands))
+ priv->adapter->config_bands = config_bands;
+
+ mwifiex_send_domain_info_cmd_fw(wiphy);
+
if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
kfree(bss_cfg);
wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
return -1;
}
+ mwifiex_set_ht_params(priv, bss_cfg, params);
+
if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_UAP_BSS_STOP,
HostCmd_ACT_GEN_SET, 0, NULL)) {
wiphy_err(wiphy, "Failed to stop the BSS\n");
@@ -990,6 +1017,16 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
return -1;
}
+ if (priv->sec_info.wep_enabled)
+ priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
+ else
+ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE;
+
+ if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->curr_pkt_filter))
+ return -1;
+
return 0;
}
@@ -1082,7 +1119,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
struct cfg80211_ssid req_ssid;
int ret, auth_type = 0;
struct cfg80211_bss *bss = NULL;
- u8 is_scanning_required = 0;
+ u8 is_scanning_required = 0, config_bands = 0;
memset(&req_ssid, 0, sizeof(struct cfg80211_ssid));
@@ -1101,9 +1138,19 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
/* disconnect before try to associate */
mwifiex_deauthenticate(priv, NULL);
- if (channel)
- ret = mwifiex_set_rf_channel(priv, channel,
- priv->adapter->channel_type);
+ if (channel) {
+ if (mode == NL80211_IFTYPE_STATION) {
+ if (channel->band == IEEE80211_BAND_2GHZ)
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ else
+ config_bands = BAND_A | BAND_AN;
+
+ if (!((config_bands | priv->adapter->fw_bands) &
+ ~priv->adapter->fw_bands))
+ priv->adapter->config_bands = config_bands;
+ }
+ mwifiex_send_domain_info_cmd_fw(priv->wdev->wiphy);
+ }
/* As this is new association, clear locally stored
* keys and security related flags */
@@ -1268,6 +1315,76 @@ done:
}
/*
+ * This function sets following parameters for ibss network.
+ * - channel
+ * - start band
+ * - 11n flag
+ * - secondary channel offset
+ */
+static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
+ struct cfg80211_ibss_params *params)
+{
+ struct wiphy *wiphy = priv->wdev->wiphy;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ int index = 0, i;
+ u8 config_bands = 0;
+
+ if (params->channel->band == IEEE80211_BAND_2GHZ) {
+ if (!params->basic_rates) {
+ config_bands = BAND_B | BAND_G;
+ } else {
+ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
+ /*
+ * Rates below 6 Mbps in the table are CCK
+ * rates; 802.11b and from 6 they are OFDM;
+ * 802.11G
+ */
+ if (mwifiex_rates[i].bitrate == 60) {
+ index = 1 << i;
+ break;
+ }
+ }
+
+ if (params->basic_rates < index) {
+ config_bands = BAND_B;
+ } else {
+ config_bands = BAND_G;
+ if (params->basic_rates % index)
+ config_bands |= BAND_B;
+ }
+ }
+
+ if (params->channel_type != NL80211_CHAN_NO_HT)
+ config_bands |= BAND_GN;
+ } else {
+ if (params->channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
+ }
+
+ if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) {
+ adapter->config_bands = config_bands;
+ adapter->adhoc_start_band = config_bands;
+
+ if ((config_bands & BAND_GN) || (config_bands & BAND_AN))
+ adapter->adhoc_11n_enabled = true;
+ else
+ adapter->adhoc_11n_enabled = false;
+ }
+
+ adapter->sec_chan_offset =
+ mwifiex_chan_type_to_sec_chan_offset(params->channel_type);
+ priv->adhoc_channel =
+ ieee80211_frequency_to_channel(params->channel->center_freq);
+
+ wiphy_dbg(wiphy, "info: set ibss band %d, chan %d, chan offset %d\n",
+ config_bands, priv->adhoc_channel, adapter->sec_chan_offset);
+
+ return 0;
+}
+
+/*
* CFG802.11 operation handler to join an IBSS.
*
* This function does not work in any mode other than Ad-Hoc, or if
@@ -1289,6 +1406,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
+ mwifiex_set_ibss_params(priv, params);
+
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
@@ -1335,9 +1454,10 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
* it also informs the results.
*/
static int
-mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
+mwifiex_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *dev = request->wdev->netdev;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int i;
struct ieee80211_channel *chan;
@@ -1381,7 +1501,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
priv->user_scan_cfg->chan_list[i].scan_time = 0;
}
- if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ if (mwifiex_scan_networks(priv, priv->user_scan_cfg))
return -EFAULT;
if (request->ie && request->ie_len) {
@@ -1471,11 +1591,11 @@ mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info,
/*
* create a new virtual interface with the given name
*/
-struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
struct mwifiex_private *priv;
@@ -1596,16 +1716,16 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_init(priv);
#endif
- return dev;
+ return wdev;
}
EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf);
/*
* del_virtual_intf: remove the virtual interface determined by dev
*/
-int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
#ifdef CONFIG_DEBUG_FS
mwifiex_dev_debugfs_remove(priv);
@@ -1617,11 +1737,11 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
- if (dev->reg_state == NETREG_REGISTERED)
- unregister_netdevice(dev);
+ if (wdev->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdevice(wdev->netdev);
- if (dev->reg_state == NETREG_UNREGISTERED)
- free_netdev(dev);
+ if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
+ free_netdev(wdev->netdev);
/* Clear the priv in adapter */
priv->netdev = NULL;
@@ -1655,7 +1775,9 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
.start_ap = mwifiex_cfg80211_start_ap,
.stop_ap = mwifiex_cfg80211_stop_ap,
+ .change_beacon = mwifiex_cfg80211_change_beacon,
.set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
+ .set_antenna = mwifiex_cfg80211_set_antenna,
};
/*
@@ -1702,7 +1824,16 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
memcpy(wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_CUSTOM_REGULATORY;
+ wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2;
+
+ wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
+ wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
+
+ wiphy->features = NL80211_FEATURE_HT_IBSS;
/* Reserve space for mwifiex specific private data for BSS */
wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
@@ -1713,7 +1844,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wdev_priv = wiphy_priv(wiphy);
*(unsigned long *)wdev_priv = (unsigned long)adapter;
- set_wiphy_dev(wiphy, (struct device *)priv->adapter->dev);
+ set_wiphy_dev(wiphy, priv->adapter->dev);
ret = wiphy_register(wiphy);
if (ret < 0) {
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 560871b0e236..f69300f93f42 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -167,23 +167,6 @@ u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
}
/*
- * This function maps a data rate value into corresponding index in supported
- * rates table.
- */
-u8 mwifiex_data_rate_to_index(u32 rate)
-{
- u16 *ptr;
-
- if (rate) {
- ptr = memchr(mwifiex_data_rates, rate,
- sizeof(mwifiex_data_rates));
- if (ptr)
- return (u8) (ptr - mwifiex_data_rates);
- }
- return 0;
-}
-
-/*
* This function returns the current active data rates.
*
* The result may vary depending upon connection status.
@@ -277,20 +260,6 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
}
/*
- * This function converts rate bitmap into rate index.
- */
-int mwifiex_get_rate_index(u16 *rate_bitmap, int size)
-{
- int i;
-
- for (i = 0; i < size * 8; i++)
- if (rate_bitmap[i / 16] & (1 << (i % 16)))
- return i;
-
- return 0;
-}
-
-/*
* This function gets the supported data rates.
*
* The function works in both Ad-Hoc and infra mode by printing the
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 51e023ec1de4..c68adec3cc8b 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -578,6 +578,7 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
} else {
adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ queue_work(adapter->workqueue, &adapter->main_work);
}
return ret;
@@ -1102,7 +1103,8 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
&resp->params.opt_hs_cfg;
uint32_t conditions = le32_to_cpu(phs_cfg->params.hs_config.conditions);
- if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE)) {
+ if (phs_cfg->action == cpu_to_le16(HS_ACTIVATE) &&
+ adapter->iface_type == MWIFIEX_SDIO) {
mwifiex_hs_activated_event(priv, true);
return 0;
} else {
@@ -1114,6 +1116,9 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
}
if (conditions != HOST_SLEEP_CFG_CANCEL) {
adapter->is_hs_configured = true;
+ if (adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE)
+ mwifiex_hs_activated_event(priv, true);
} else {
adapter->is_hs_configured = false;
if (adapter->hs_activated)
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index f918f66e5e27..070ef25f5186 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -41,16 +41,7 @@
#define MWIFIEX_AMPDU_DEF_RXWINSIZE 16
#define MWIFIEX_DEFAULT_BLOCK_ACK_TIMEOUT 0xffff
-#define MWIFIEX_RATE_INDEX_HRDSSS0 0
-#define MWIFIEX_RATE_INDEX_HRDSSS3 3
-#define MWIFIEX_RATE_INDEX_OFDM0 4
-#define MWIFIEX_RATE_INDEX_OFDM7 11
-#define MWIFIEX_RATE_INDEX_MCS0 12
-
-#define MWIFIEX_RATE_BITMAP_OFDM0 16
-#define MWIFIEX_RATE_BITMAP_OFDM7 23
#define MWIFIEX_RATE_BITMAP_MCS0 32
-#define MWIFIEX_RATE_BITMAP_MCS127 159
#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 561452a5c818..e831b440a24a 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -124,6 +124,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
+#define TLV_TYPE_UAP_WEP_KEY (PROPRIETARY_TLV_BASE_ID + 59)
#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
#define TLV_TYPE_UAP_AKMP (PROPRIETARY_TLV_BASE_ID + 65)
@@ -162,6 +163,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
+ IEEE80211_HT_CAP_SM_PS)
+
+#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
+
/* dev_cap bitmap
* BIT
* 0-16 reserved
@@ -218,7 +225,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_BBP_REG_ACCESS 0x001a
#define HostCmd_CMD_RF_REG_ACCESS 0x001b
#define HostCmd_CMD_PMIC_REG_ACCESS 0x00ad
-#define HostCmd_CMD_802_11_RF_CHANNEL 0x001d
+#define HostCmd_CMD_RF_TX_PWR 0x001e
+#define HostCmd_CMD_RF_ANTENNA 0x0020
#define HostCmd_CMD_802_11_DEAUTHENTICATE 0x0024
#define HostCmd_CMD_MAC_CONTROL 0x0028
#define HostCmd_CMD_802_11_AD_HOC_START 0x002b
@@ -314,6 +322,12 @@ enum ENH_PS_MODES {
#define HostCmd_BSS_TYPE_MASK 0xf000
+#define HostCmd_ACT_SET_RX 0x0001
+#define HostCmd_ACT_SET_TX 0x0002
+#define HostCmd_ACT_SET_BOTH 0x0003
+
+#define RF_ANTENNA_AUTO 0xFFFF
+
#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
(((seq) & 0x00ff) | \
(((num) & 0x000f) << 8)) | \
@@ -869,6 +883,25 @@ struct host_cmd_ds_txpwr_cfg {
__le32 mode;
} __packed;
+struct host_cmd_ds_rf_tx_pwr {
+ __le16 action;
+ __le16 cur_level;
+ u8 max_power;
+ u8 min_power;
+} __packed;
+
+struct host_cmd_ds_rf_ant_mimo {
+ __le16 action_tx;
+ __le16 tx_ant_mode;
+ __le16 action_rx;
+ __le16 rx_ant_mode;
+};
+
+struct host_cmd_ds_rf_ant_siso {
+ __le16 action;
+ __le16 ant_mode;
+};
+
struct mwifiex_bcn_param {
u8 bssid[ETH_ALEN];
u8 rssi;
@@ -1195,6 +1228,13 @@ struct host_cmd_tlv_passphrase {
u8 passphrase[0];
} __packed;
+struct host_cmd_tlv_wep_key {
+ struct host_cmd_tlv tlv;
+ u8 key_index;
+ u8 is_default;
+ u8 key[1];
+};
+
struct host_cmd_tlv_auth_type {
struct host_cmd_tlv tlv;
u8 auth_type;
@@ -1251,14 +1291,6 @@ struct host_cmd_tlv_channel_band {
u8 channel;
} __packed;
-struct host_cmd_ds_802_11_rf_channel {
- __le16 action;
- __le16 current_channel;
- __le16 rf_type;
- __le16 reserved;
- u8 reserved_1[32];
-} __packed;
-
struct host_cmd_ds_version_ext {
u8 version_str_sel;
char version_str[128];
@@ -1343,10 +1375,12 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_rssi_info rssi_info;
struct host_cmd_ds_802_11_rssi_info_rsp rssi_info_rsp;
struct host_cmd_ds_802_11_snmp_mib smib;
- struct host_cmd_ds_802_11_rf_channel rf_channel;
struct host_cmd_ds_tx_rate_query tx_rate;
struct host_cmd_ds_tx_rate_cfg tx_rate_cfg;
struct host_cmd_ds_txpwr_cfg txp_cfg;
+ struct host_cmd_ds_rf_tx_pwr txp;
+ struct host_cmd_ds_rf_ant_mimo ant_mimo;
+ struct host_cmd_ds_rf_ant_siso ant_siso;
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index ceb82cd749cc..1d8dd003e396 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -51,8 +51,7 @@ mwifiex_ie_get_autoidx(struct mwifiex_private *priv, u16 subtype_mask,
for (i = 0; i < priv->adapter->max_mgmt_ie_index; i++) {
mask = le16_to_cpu(priv->mgmt_ie[i].mgmt_subtype_mask);
- len = le16_to_cpu(priv->mgmt_ie[i].ie_length) +
- le16_to_cpu(ie->ie_length);
+ len = le16_to_cpu(ie->ie_length);
if (mask == MWIFIEX_AUTO_IDX_MASK)
continue;
@@ -108,10 +107,8 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
return -1;
tmp = (u8 *)&priv->mgmt_ie[index].ie_buffer;
- tmp += le16_to_cpu(priv->mgmt_ie[index].ie_length);
memcpy(tmp, &ie->ie_buffer, le16_to_cpu(ie->ie_length));
- le16_add_cpu(&priv->mgmt_ie[index].ie_length,
- le16_to_cpu(ie->ie_length));
+ priv->mgmt_ie[index].ie_length = ie->ie_length;
priv->mgmt_ie[index].ie_index = cpu_to_le16(index);
priv->mgmt_ie[index].mgmt_subtype_mask =
cpu_to_le16(mask);
@@ -213,95 +210,67 @@ mwifiex_update_uap_custom_ie(struct mwifiex_private *priv,
/* save assoc resp ie index after auto-indexing */
*assoc_idx = *((u16 *)pos);
+ kfree(ap_custom_ie);
return ret;
}
-/* This function parses different IEs- Tail IEs, beacon IEs, probe response IEs,
- * association response IEs from cfg80211_ap_settings function and sets these IE
- * to FW.
+/* This function checks if WPS IE is present in passed buffer and copies it to
+ * mwifiex_ie structure.
+ * Function takes pointer to struct mwifiex_ie pointer as argument.
+ * If WPS IE is present memory is allocated for mwifiex_ie pointer and filled
+ * in with WPS IE. Caller should take care of freeing this memory.
*/
-int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
- struct cfg80211_ap_settings *params)
+static int mwifiex_update_wps_ie(const u8 *ies, int ies_len,
+ struct mwifiex_ie **ie_ptr, u16 mask)
{
- struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL;
- struct mwifiex_ie *ar_ie = NULL, *rsn_ie = NULL;
- struct ieee_types_header *ie = NULL;
- u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
- u16 ar_idx = MWIFIEX_AUTO_IDX_MASK, rsn_idx = MWIFIEX_AUTO_IDX_MASK;
- u16 mask;
- int ret = 0;
-
- if (params->beacon.tail && params->beacon.tail_len) {
- ie = (void *)cfg80211_find_ie(WLAN_EID_RSN, params->beacon.tail,
- params->beacon.tail_len);
- if (ie) {
- rsn_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!rsn_ie)
- return -ENOMEM;
-
- rsn_ie->ie_index = cpu_to_le16(rsn_idx);
- mask = MGMT_MASK_BEACON | MGMT_MASK_PROBE_RESP |
- MGMT_MASK_ASSOC_RESP;
- rsn_ie->mgmt_subtype_mask = cpu_to_le16(mask);
- rsn_ie->ie_length = cpu_to_le16(ie->len + 2);
- memcpy(rsn_ie->ie_buffer, ie, ie->len + 2);
-
- if (mwifiex_update_uap_custom_ie(priv, rsn_ie, &rsn_idx,
- NULL, NULL,
- NULL, NULL)) {
- ret = -1;
- goto done;
- }
+ struct ieee_types_header *wps_ie;
+ struct mwifiex_ie *ie = NULL;
+ const u8 *vendor_ie;
+
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS,
+ ies, ies_len);
+ if (vendor_ie) {
+ ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!ie)
+ return -ENOMEM;
- priv->rsn_idx = rsn_idx;
- }
+ wps_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(ie->ie_buffer, wps_ie, wps_ie->len + 2);
+ ie->ie_length = cpu_to_le16(wps_ie->len + 2);
+ ie->mgmt_subtype_mask = cpu_to_le16(mask);
+ ie->ie_index = cpu_to_le16(MWIFIEX_AUTO_IDX_MASK);
}
- if (params->beacon.beacon_ies && params->beacon.beacon_ies_len) {
- beacon_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!beacon_ie) {
- ret = -ENOMEM;
- goto done;
- }
-
- beacon_ie->ie_index = cpu_to_le16(beacon_idx);
- beacon_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON);
- beacon_ie->ie_length =
- cpu_to_le16(params->beacon.beacon_ies_len);
- memcpy(beacon_ie->ie_buffer, params->beacon.beacon_ies,
- params->beacon.beacon_ies_len);
- }
+ *ie_ptr = ie;
+ return 0;
+}
- if (params->beacon.proberesp_ies && params->beacon.proberesp_ies_len) {
- pr_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!pr_ie) {
- ret = -ENOMEM;
- goto done;
- }
+/* This function parses beacon IEs, probe response IEs, association response IEs
+ * from cfg80211_ap_settings->beacon and sets these IE to FW.
+ */
+static int mwifiex_set_mgmt_beacon_data_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *data)
+{
+ struct mwifiex_ie *beacon_ie = NULL, *pr_ie = NULL, *ar_ie = NULL;
+ u16 beacon_idx = MWIFIEX_AUTO_IDX_MASK, pr_idx = MWIFIEX_AUTO_IDX_MASK;
+ u16 ar_idx = MWIFIEX_AUTO_IDX_MASK;
+ int ret = 0;
- pr_ie->ie_index = cpu_to_le16(pr_idx);
- pr_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_PROBE_RESP);
- pr_ie->ie_length =
- cpu_to_le16(params->beacon.proberesp_ies_len);
- memcpy(pr_ie->ie_buffer, params->beacon.proberesp_ies,
- params->beacon.proberesp_ies_len);
- }
+ if (data->beacon_ies && data->beacon_ies_len)
+ mwifiex_update_wps_ie(data->beacon_ies, data->beacon_ies_len,
+ &beacon_ie, MGMT_MASK_BEACON);
- if (params->beacon.assocresp_ies && params->beacon.assocresp_ies_len) {
- ar_ie = kmalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
- if (!ar_ie) {
- ret = -ENOMEM;
- goto done;
- }
+ if (data->proberesp_ies && data->proberesp_ies_len)
+ mwifiex_update_wps_ie(data->proberesp_ies,
+ data->proberesp_ies_len, &pr_ie,
+ MGMT_MASK_PROBE_RESP);
- ar_ie->ie_index = cpu_to_le16(ar_idx);
- mask = MGMT_MASK_ASSOC_RESP | MGMT_MASK_REASSOC_RESP;
- ar_ie->mgmt_subtype_mask = cpu_to_le16(mask);
- ar_ie->ie_length =
- cpu_to_le16(params->beacon.assocresp_ies_len);
- memcpy(ar_ie->ie_buffer, params->beacon.assocresp_ies,
- params->beacon.assocresp_ies_len);
- }
+ if (data->assocresp_ies && data->assocresp_ies_len)
+ mwifiex_update_wps_ie(data->assocresp_ies,
+ data->assocresp_ies_len, &ar_ie,
+ MGMT_MASK_ASSOC_RESP |
+ MGMT_MASK_REASSOC_RESP);
if (beacon_ie || pr_ie || ar_ie) {
ret = mwifiex_update_uap_custom_ie(priv, beacon_ie,
@@ -319,11 +288,67 @@ done:
kfree(beacon_ie);
kfree(pr_ie);
kfree(ar_ie);
- kfree(rsn_ie);
return ret;
}
+/* This function parses different IEs-tail IEs, beacon IEs, probe response IEs,
+ * association response IEs from cfg80211_ap_settings function and sets these IE
+ * to FW.
+ */
+int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
+ struct cfg80211_beacon_data *info)
+{
+ struct mwifiex_ie *gen_ie;
+ struct ieee_types_header *rsn_ie, *wpa_ie = NULL;
+ u16 rsn_idx = MWIFIEX_AUTO_IDX_MASK, ie_len = 0;
+ const u8 *vendor_ie;
+
+ if (info->tail && info->tail_len) {
+ gen_ie = kzalloc(sizeof(struct mwifiex_ie), GFP_KERNEL);
+ if (!gen_ie)
+ return -ENOMEM;
+ gen_ie->ie_index = cpu_to_le16(rsn_idx);
+ gen_ie->mgmt_subtype_mask = cpu_to_le16(MGMT_MASK_BEACON |
+ MGMT_MASK_PROBE_RESP |
+ MGMT_MASK_ASSOC_RESP);
+
+ rsn_ie = (void *)cfg80211_find_ie(WLAN_EID_RSN,
+ info->tail, info->tail_len);
+ if (rsn_ie) {
+ memcpy(gen_ie->ie_buffer, rsn_ie, rsn_ie->len + 2);
+ ie_len = rsn_ie->len + 2;
+ gen_ie->ie_length = cpu_to_le16(ie_len);
+ }
+
+ vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ info->tail,
+ info->tail_len);
+ if (vendor_ie) {
+ wpa_ie = (struct ieee_types_header *)vendor_ie;
+ memcpy(gen_ie->ie_buffer + ie_len,
+ wpa_ie, wpa_ie->len + 2);
+ ie_len += wpa_ie->len + 2;
+ gen_ie->ie_length = cpu_to_le16(ie_len);
+ }
+
+ if (rsn_ie || wpa_ie) {
+ if (mwifiex_update_uap_custom_ie(priv, gen_ie, &rsn_idx,
+ NULL, NULL,
+ NULL, NULL)) {
+ kfree(gen_ie);
+ return -1;
+ }
+ priv->rsn_idx = rsn_idx;
+ }
+
+ kfree(gen_ie);
+ }
+
+ return mwifiex_set_mgmt_beacon_data_ies(priv, info);
+}
+
/* This function removes management IE set */
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv)
{
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index c1cb004db913..21fdc6c02775 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -57,6 +57,69 @@ static int mwifiex_add_bss_prio_tbl(struct mwifiex_private *priv)
return 0;
}
+static void scan_delay_timer_fn(unsigned long data)
+{
+ struct mwifiex_private *priv = (struct mwifiex_private *)data;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node, *tmp_node;
+ unsigned long flags;
+
+ if (!mwifiex_wmm_lists_empty(adapter)) {
+ if (adapter->scan_delay_cnt == MWIFIEX_MAX_SCAN_DELAY_CNT) {
+ /*
+ * Abort scan operation by cancelling all pending scan
+ * command
+ */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q,
+ list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock,
+ flags);
+
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev,
+ "info: %s: scan aborted\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 1);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
+ } else {
+ /*
+ * Tx data queue is still not empty, delay scan
+ * operation further by 20msec.
+ */
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ adapter->scan_delay_cnt++;
+ }
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
+ } else {
+ /*
+ * Tx data queue is empty. Get scan command from scan_pending_q
+ * and put to cmd_pending_q to resume scan operation
+ */
+ adapter->scan_delay_cnt = 0;
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ }
+}
+
/*
* This function initializes the private structure and sets default
* values to the members.
@@ -136,6 +199,9 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
priv->scan_block = false;
+ setup_timer(&priv->scan_delay_timer, scan_delay_timer_fn,
+ (unsigned long)priv);
+
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -278,7 +344,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->adhoc_awake_period = 0;
memset(&adapter->arp_filter, 0, sizeof(adapter->arp_filter));
adapter->arp_filter_size = 0;
- adapter->channel_type = NL80211_CHAN_HT20;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e6be6ee75951..50191539bb32 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -21,6 +21,7 @@
#define _MWIFIEX_IOCTL_H_
#include <net/mac80211.h>
+#include <net/lib80211.h>
enum {
MWIFIEX_SCAN_TYPE_UNCHANGED = 0,
@@ -71,6 +72,13 @@ struct wpa_param {
u8 passphrase[MWIFIEX_WPA_PASSHPHRASE_LEN];
};
+struct wep_key {
+ u8 key_index;
+ u8 is_default;
+ u16 length;
+ u8 key[WLAN_KEY_LEN_WEP104];
+};
+
#define KEY_MGMT_ON_HOST 0x03
#define MWIFIEX_AUTH_MODE_AUTO 0xFF
#define BAND_CONFIG_MANUAL 0x00
@@ -90,6 +98,8 @@ struct mwifiex_uap_bss_param {
u16 key_mgmt;
u16 key_mgmt_operation;
struct wpa_param wpa_cfg;
+ struct wep_key wep_cfg[NUM_WEP_KEYS];
+ struct ieee80211_ht_cap ht_cap;
};
enum {
@@ -215,12 +225,6 @@ struct mwifiex_ds_encrypt_key {
u8 wapi_rxpn[WAPI_RXPN_LEN];
};
-struct mwifiex_rate_cfg {
- u32 action;
- u32 is_rate_auto;
- u32 rate;
-};
-
struct mwifiex_power_cfg {
u32 is_power_auto;
u32 power_level;
@@ -267,6 +271,11 @@ struct mwifiex_ds_11n_amsdu_aggr_ctrl {
u16 curr_buf_size;
};
+struct mwifiex_ds_ant_cfg {
+ u32 tx_ant;
+ u32 rx_ant;
+};
+
#define MWIFIEX_NUM_OF_CMD_BUFFER 20
#define MWIFIEX_SIZE_OF_CMD_BUFFER 2048
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index d6b4fb04011f..82e63cee1e97 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1349,22 +1349,16 @@ static int mwifiex_deauthenticate_infra(struct mwifiex_private *priv, u8 *mac)
{
u8 mac_address[ETH_ALEN];
int ret;
- u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 };
- if (mac) {
- if (!memcmp(mac, zero_mac, sizeof(zero_mac)))
- memcpy((u8 *) &mac_address,
- (u8 *) &priv->curr_bss_params.bss_descriptor.
- mac_address, ETH_ALEN);
- else
- memcpy((u8 *) &mac_address, (u8 *) mac, ETH_ALEN);
- } else {
- memcpy((u8 *) &mac_address, (u8 *) &priv->curr_bss_params.
- bss_descriptor.mac_address, ETH_ALEN);
- }
+ if (!mac || is_zero_ether_addr(mac))
+ memcpy(mac_address,
+ priv->curr_bss_params.bss_descriptor.mac_address,
+ ETH_ALEN);
+ else
+ memcpy(mac_address, mac, ETH_ALEN);
ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_DEAUTHENTICATE,
- HostCmd_ACT_GEN_SET, 0, &mac_address);
+ HostCmd_ACT_GEN_SET, 0, mac_address);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 3192855c31c0..46803621d015 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -190,7 +190,8 @@ process_start:
adapter->tx_lock_flag)
break;
- if (adapter->scan_processing || adapter->data_sent ||
+ if ((adapter->scan_processing &&
+ !adapter->scan_delay_cnt) || adapter->data_sent ||
mwifiex_wmm_lists_empty(adapter)) {
if (adapter->cmd_sent || adapter->curr_cmd ||
(!is_command_pending(adapter)))
@@ -244,8 +245,8 @@ process_start:
}
}
- if (!adapter->scan_processing && !adapter->data_sent &&
- !mwifiex_wmm_lists_empty(adapter)) {
+ if ((!adapter->scan_processing || adapter->scan_delay_cnt) &&
+ !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) {
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
adapter->is_hs_configured = false;
@@ -376,7 +377,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
@@ -843,7 +844,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
rtnl_lock();
if (priv->wdev && priv->netdev)
- mwifiex_del_virtual_intf(adapter->wiphy, priv->netdev);
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
rtnl_unlock();
}
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index bd3b0bf94b9e..e7c2a82fd610 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -79,14 +79,17 @@ enum {
#define SCAN_BEACON_ENTRY_PAD 6
-#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 200
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 200
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 110
+#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define MWIFIEX_MAX_SCAN_DELAY_CNT 50
+#define MWIFIEX_SCAN_DELAY_MSEC 20
+
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
@@ -482,6 +485,7 @@ struct mwifiex_private {
u16 proberesp_idx;
u16 assocresp_idx;
u16 rsn_idx;
+ struct timer_list scan_delay_timer;
};
enum mwifiex_ba_status {
@@ -674,7 +678,6 @@ struct mwifiex_adapter {
u8 hw_dev_mcs_support;
u8 adhoc_11n_enabled;
u8 sec_chan_offset;
- enum nl80211_channel_type channel_type;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
@@ -686,6 +689,7 @@ struct mwifiex_adapter {
struct completion fw_load;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
u16 max_mgmt_ie_index;
+ u8 scan_delay_cnt;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -819,9 +823,7 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
u8 *rates);
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
-u8 mwifiex_data_rate_to_index(u32 rate);
u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
-int mwifiex_get_rate_index(u16 *rateBitmap, int size);
extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
void mwifiex_free_curr_bcn(struct mwifiex_private *priv);
@@ -835,6 +837,9 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params);
+void mwifiex_set_ht_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params);
/*
* This function checks if the queuing is RA based or not.
@@ -937,16 +942,13 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
-int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate);
+int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate);
int mwifiex_request_scan(struct mwifiex_private *priv,
struct cfg80211_ssid *req_ssid);
-int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
- struct mwifiex_user_scan_cfg *scan_req);
+int mwifiex_scan_networks(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in);
int mwifiex_set_radio(struct mwifiex_private *priv, u8 option);
-int mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel);
-
int mwifiex_set_encode(struct mwifiex_private *priv, const u8 *key,
int key_len, u8 key_index, const u8 *mac_addr,
int disable);
@@ -985,9 +987,6 @@ int mwifiex_set_tx_power(struct mwifiex_private *priv,
int mwifiex_main_process(struct mwifiex_adapter *);
-int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel);
-int mwifiex_bss_set_channel(struct mwifiex_private *,
- struct mwifiex_chan_freq_power *cfp);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
@@ -998,15 +997,17 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc);
-struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
- char *name, enum nl80211_iftype type,
- u32 *flags, struct vif_params *params);
-int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
+struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params);
+int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
- struct cfg80211_ap_settings *params);
+ struct cfg80211_beacon_data *data);
int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
u8 *mwifiex_11d_code_2_region(u8 code);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 74f045715723..04dc7ca4ac22 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -28,7 +28,10 @@
/* The maximum number of channels the firmware can scan per command */
#define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14
-#define MWIFIEX_CHANNELS_PER_SCAN_CMD 4
+#define MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD 4
+#define MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD 15
+#define MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD 27
+#define MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD 35
/* Memory needed to store a max sized Channel List TLV for a firmware scan */
#define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \
@@ -471,7 +474,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
* This routine is used for any scan that is not provided with a
* specific channel list to scan.
*/
-static void
+static int
mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
const struct mwifiex_user_scan_cfg
*user_scan_in,
@@ -528,6 +531,7 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
}
}
+ return chan_idx;
}
/*
@@ -727,6 +731,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u32 num_probes;
u32 ssid_len;
u32 chan_idx;
+ u32 chan_num;
u32 scan_type;
u16 scan_dur;
u8 channel;
@@ -850,7 +855,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
if (*filtered_scan)
*max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN;
else
- *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD;
+ *max_chan_per_scan = MWIFIEX_DEF_CHANNELS_PER_SCAN_CMD;
/* If the input config or adapter has the number of Probes set,
add tlv */
@@ -962,13 +967,28 @@ mwifiex_config_scan(struct mwifiex_private *priv,
dev_dbg(adapter->dev,
"info: Scan: Scanning current channel only\n");
}
-
+ chan_num = chan_idx;
} else {
dev_dbg(adapter->dev,
"info: Scan: Creating full region channel list\n");
- mwifiex_scan_create_channel_list(priv, user_scan_in,
- scan_chan_list,
- *filtered_scan);
+ chan_num = mwifiex_scan_create_channel_list(priv, user_scan_in,
+ scan_chan_list,
+ *filtered_scan);
+ }
+
+ /*
+ * In associated state we will reduce the number of channels scanned per
+ * scan command to avoid any traffic delay/loss. This number is decided
+ * based on total number of channels to be scanned due to constraints
+ * of command buffers.
+ */
+ if (priv->media_connected) {
+ if (chan_num < MWIFIEX_LIMIT_1_CHANNEL_PER_SCAN_CMD)
+ *max_chan_per_scan = 1;
+ else if (chan_num < MWIFIEX_LIMIT_2_CHANNELS_PER_SCAN_CMD)
+ *max_chan_per_scan = 2;
+ else if (chan_num < MWIFIEX_LIMIT_3_CHANNELS_PER_SCAN_CMD)
+ *max_chan_per_scan = 3;
}
}
@@ -1014,14 +1034,12 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
case TLV_TYPE_TSFTIMESTAMP:
dev_dbg(adapter->dev, "info: SCAN_RESP: TSF "
"timestamp TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
case TLV_TYPE_CHANNELBANDLIST:
dev_dbg(adapter->dev, "info: SCAN_RESP: channel"
" band list TLV, len = %d\n", tlv_len);
- *tlv_data = (struct mwifiex_ie_types_data *)
- current_tlv;
+ *tlv_data = current_tlv;
break;
default:
dev_err(adapter->dev,
@@ -1226,15 +1244,15 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
bss_entry->beacon_buf);
break;
case WLAN_EID_BSS_COEX_2040:
- bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_bss_co_2040 = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->bss_co_2040_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
break;
case WLAN_EID_EXT_CAPABILITY:
- bss_entry->bcn_ext_cap = (u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ bss_entry->bcn_ext_cap = current_ptr +
+ sizeof(struct ieee_types_header);
bss_entry->ext_cap_offset = (u16) (current_ptr +
sizeof(struct ieee_types_header) -
bss_entry->beacon_buf);
@@ -1276,8 +1294,8 @@ mwifiex_radio_type_to_band(u8 radio_type)
* order to send the appropriate scan commands to firmware to populate or
* update the internal driver scan table.
*/
-static int mwifiex_scan_networks(struct mwifiex_private *priv,
- const struct mwifiex_user_scan_cfg *user_scan_in)
+int mwifiex_scan_networks(struct mwifiex_private *priv,
+ const struct mwifiex_user_scan_cfg *user_scan_in)
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
@@ -1342,6 +1360,7 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
adapter->cmd_queued = cmd_node;
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
true);
+ queue_work(adapter->workqueue, &adapter->main_work);
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@@ -1358,26 +1377,6 @@ static int mwifiex_scan_networks(struct mwifiex_private *priv,
}
/*
- * Sends IOCTL request to start a scan with user configurations.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- *
- * Upon completion, it also generates a wireless event to notify
- * applications.
- */
-int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
- struct mwifiex_user_scan_cfg *scan_req)
-{
- int status;
-
- status = mwifiex_scan_networks(priv, scan_req);
- queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
-
- return status;
-}
-
-/*
* This function prepares a scan command to be sent to the firmware.
*
* This uses the scan command configuration sent to the command processing
@@ -1683,8 +1682,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
goto done;
}
if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(u8 *) (current_ptr +
- sizeof(struct ieee_types_header));
+ channel = *(current_ptr + sizeof(struct ieee_types_header));
break;
}
@@ -1772,14 +1770,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
priv->user_scan_cfg = NULL;
}
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
-
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
+ if (!mwifiex_wmm_lists_empty(adapter)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ cmd_pending_q */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
}
done:
@@ -2010,12 +2017,11 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
- (u8 *) (curr_bss->beacon_buf +
- curr_bss->bss_co_2040_offset);
+ (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset);
if (curr_bss->bcn_ext_cap)
- curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf +
- curr_bss->ext_cap_offset);
+ curr_bss->bcn_ext_cap = curr_bss->beacon_buf +
+ curr_bss->ext_cap_offset;
}
/*
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index e0377473282f..fc8a9bfa1248 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -978,10 +978,10 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
adapter->event_cause = *(u32 *) skb->data;
- skb_pull(skb, MWIFIEX_EVENT_HEADER_LEN);
-
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
- memcpy(adapter->event_body, skb->data, skb->len);
+ memcpy(adapter->event_body,
+ skb->data + MWIFIEX_EVENT_HEADER_LEN,
+ skb->len);
/* event cause has been saved to adapter->event_cause */
adapter->event_received = true;
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 40e025da6bc2..df3a33c530cf 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -260,6 +260,56 @@ static int mwifiex_cmd_tx_power_cfg(struct host_cmd_ds_command *cmd,
}
/*
+ * This function prepares command to get RF Tx power.
+ */
+static int mwifiex_cmd_rf_tx_power(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_rf_tx_pwr *txp = &cmd->params.txp;
+
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_tx_pwr)
+ + S_DS_GEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_RF_TX_PWR);
+ txp->action = cpu_to_le16(cmd_action);
+
+ return 0;
+}
+
+/*
+ * This function prepares command to set rf antenna.
+ */
+static int mwifiex_cmd_rf_antenna(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct mwifiex_ds_ant_cfg *ant_cfg)
+{
+ struct host_cmd_ds_rf_ant_mimo *ant_mimo = &cmd->params.ant_mimo;
+ struct host_cmd_ds_rf_ant_siso *ant_siso = &cmd->params.ant_siso;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_RF_ANTENNA);
+
+ if (cmd_action != HostCmd_ACT_GEN_SET)
+ return 0;
+
+ if (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) {
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_mimo) +
+ S_DS_GEN);
+ ant_mimo->action_tx = cpu_to_le16(HostCmd_ACT_SET_TX);
+ ant_mimo->tx_ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ ant_mimo->action_rx = cpu_to_le16(HostCmd_ACT_SET_RX);
+ ant_mimo->rx_ant_mode = cpu_to_le16((u16)ant_cfg->rx_ant);
+ } else {
+ cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_rf_ant_siso) +
+ S_DS_GEN);
+ ant_siso->action = cpu_to_le16(HostCmd_ACT_SET_BOTH);
+ ant_siso->ant_mode = cpu_to_le16((u16)ant_cfg->tx_ant);
+ }
+
+ return 0;
+}
+
+/*
* This function prepares command to set Host Sleep configuration.
*
* Preparation includes -
@@ -695,40 +745,6 @@ static int mwifiex_cmd_802_11d_domain_info(struct mwifiex_private *priv,
}
/*
- * This function prepares command to set/get RF channel.
- *
- * Preparation includes -
- * - Setting command ID, action and proper size
- * - Setting RF type and current RF channel (for SET only)
- * - Ensuring correct endian-ness
- */
-static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u16 *channel)
-{
- struct host_cmd_ds_802_11_rf_channel *rf_chan =
- &cmd->params.rf_channel;
- uint16_t rf_type = le16_to_cpu(rf_chan->rf_type);
-
- cmd->command = cpu_to_le16(HostCmd_CMD_802_11_RF_CHANNEL);
- cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_rf_channel)
- + S_DS_GEN);
-
- if (cmd_action == HostCmd_ACT_GEN_SET) {
- if ((priv->adapter->adhoc_start_band & BAND_A) ||
- (priv->adapter->adhoc_start_band & BAND_AN))
- rf_chan->rf_type =
- cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
-
- rf_type = le16_to_cpu(rf_chan->rf_type);
- SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
- rf_chan->current_channel = cpu_to_le16(*channel);
- }
- rf_chan->action = cpu_to_le16(cmd_action);
- return 0;
-}
-
-/*
* This function prepares command to set/get IBSS coalescing status.
*
* Preparation includes -
@@ -793,8 +809,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_mac_reg_access *mac_reg;
cmd->size = cpu_to_le16(sizeof(*mac_reg) + S_DS_GEN);
- mac_reg = (struct host_cmd_ds_mac_reg_access *) &cmd->
- params.mac_reg;
+ mac_reg = &cmd->params.mac_reg;
mac_reg->action = cpu_to_le16(cmd_action);
mac_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -806,8 +821,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_bbp_reg_access *bbp_reg;
cmd->size = cpu_to_le16(sizeof(*bbp_reg) + S_DS_GEN);
- bbp_reg = (struct host_cmd_ds_bbp_reg_access *)
- &cmd->params.bbp_reg;
+ bbp_reg = &cmd->params.bbp_reg;
bbp_reg->action = cpu_to_le16(cmd_action);
bbp_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -819,8 +833,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *rf_reg;
cmd->size = cpu_to_le16(sizeof(*rf_reg) + S_DS_GEN);
- rf_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ rf_reg = &cmd->params.rf_reg;
rf_reg->action = cpu_to_le16(cmd_action);
rf_reg->offset = cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
rf_reg->value = (u8) le32_to_cpu(reg_rw->value);
@@ -831,8 +844,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_pmic_reg_access *pmic_reg;
cmd->size = cpu_to_le16(sizeof(*pmic_reg) + S_DS_GEN);
- pmic_reg = (struct host_cmd_ds_pmic_reg_access *) &cmd->
- params.pmic_reg;
+ pmic_reg = &cmd->params.pmic_reg;
pmic_reg->action = cpu_to_le16(cmd_action);
pmic_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -844,8 +856,7 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
struct host_cmd_ds_rf_reg_access *cau_reg;
cmd->size = cpu_to_le16(sizeof(*cau_reg) + S_DS_GEN);
- cau_reg = (struct host_cmd_ds_rf_reg_access *)
- &cmd->params.rf_reg;
+ cau_reg = &cmd->params.rf_reg;
cau_reg->action = cpu_to_le16(cmd_action);
cau_reg->offset =
cpu_to_le16((u16) le32_to_cpu(reg_rw->offset));
@@ -856,7 +867,6 @@ static int mwifiex_cmd_reg_access(struct host_cmd_ds_command *cmd,
{
struct mwifiex_ds_read_eeprom *rd_eeprom = data_buf;
struct host_cmd_ds_802_11_eeprom_access *cmd_eeprom =
- (struct host_cmd_ds_802_11_eeprom_access *)
&cmd->params.eeprom;
cmd->size = cpu_to_le16(sizeof(*cmd_eeprom) + S_DS_GEN);
@@ -1055,6 +1065,14 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_tx_power_cfg(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_RF_TX_PWR:
+ ret = mwifiex_cmd_rf_tx_power(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
+ case HostCmd_CMD_RF_ANTENNA:
+ ret = mwifiex_cmd_rf_antenna(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
case HostCmd_CMD_802_11_PS_MODE_ENH:
ret = mwifiex_cmd_enh_power_mode(priv, cmd_ptr, cmd_action,
(uint16_t)cmd_oid, data_buf);
@@ -1117,10 +1135,6 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
S_DS_GEN);
ret = 0;
break;
- case HostCmd_CMD_802_11_RF_CHANNEL:
- ret = mwifiex_cmd_802_11_rf_channel(priv, cmd_ptr, cmd_action,
- data_buf);
- break;
case HostCmd_CMD_FUNC_INIT:
if (priv->adapter->hw_status == MWIFIEX_HW_STATUS_RESET)
priv->adapter->hw_status = MWIFIEX_HW_STATUS_READY;
@@ -1283,7 +1297,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
priv->data_rate = 0;
/* get tx power */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_TXPWR_CFG,
+ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_RF_TX_PWR,
HostCmd_ACT_GEN_GET, 0, NULL);
if (ret)
return -1;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index a79ed9bd9695..0b09004ebb25 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -227,7 +227,7 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
struct mwifiex_ds_get_stats *stats)
{
struct host_cmd_ds_802_11_get_log *get_log =
- (struct host_cmd_ds_802_11_get_log *) &resp->params.get_log;
+ &resp->params.get_log;
if (stats) {
stats->mcast_tx_frame = le32_to_cpu(get_log->mcast_tx_frame);
@@ -267,12 +267,10 @@ static int mwifiex_ret_get_log(struct mwifiex_private *priv,
*
* Based on the new rate bitmaps, the function re-evaluates if
* auto data rate has been activated. If not, it sends another
- * query to the firmware to get the current Tx data rate and updates
- * the driver value.
+ * query to the firmware to get the current Tx data rate.
*/
static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- struct mwifiex_rate_cfg *ds_rate)
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_tx_rate_cfg *rate_cfg = &resp->params.tx_rate_cfg;
struct mwifiex_rate_scope *rate_scope;
@@ -280,9 +278,8 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
u16 tlv, tlv_buf_len;
u8 *tlv_buf;
u32 i;
- int ret = 0;
- tlv_buf = (u8 *) ((u8 *) rate_cfg) +
+ tlv_buf = ((u8 *)rate_cfg) +
sizeof(struct host_cmd_ds_tx_rate_cfg);
tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
@@ -318,33 +315,11 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
if (priv->is_data_rate_auto)
priv->data_rate = 0;
else
- ret = mwifiex_send_cmd_async(priv,
- HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
-
- if (!ds_rate)
- return ret;
-
- if (le16_to_cpu(rate_cfg->action) == HostCmd_ACT_GEN_GET) {
- if (priv->is_data_rate_auto) {
- ds_rate->is_rate_auto = 1;
- return ret;
- }
- ds_rate->rate = mwifiex_get_rate_index(priv->bitmap_rates,
- sizeof(priv->bitmap_rates));
-
- if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_OFDM0 &&
- ds_rate->rate <= MWIFIEX_RATE_BITMAP_OFDM7)
- ds_rate->rate -= (MWIFIEX_RATE_BITMAP_OFDM0 -
- MWIFIEX_RATE_INDEX_OFDM0);
-
- if (ds_rate->rate >= MWIFIEX_RATE_BITMAP_MCS0 &&
- ds_rate->rate <= MWIFIEX_RATE_BITMAP_MCS127)
- ds_rate->rate -= (MWIFIEX_RATE_BITMAP_MCS0 -
- MWIFIEX_RATE_INDEX_MCS0);
- }
+ return mwifiex_send_cmd_async(priv,
+ HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL);
- return ret;
+ return 0;
}
/*
@@ -451,6 +426,57 @@ static int mwifiex_ret_tx_power_cfg(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of get RF Tx power.
+ */
+static int mwifiex_ret_rf_tx_power(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_rf_tx_pwr *txp = &resp->params.txp;
+ u16 action = le16_to_cpu(txp->action);
+
+ priv->tx_power_level = le16_to_cpu(txp->cur_level);
+
+ if (action == HostCmd_ACT_GEN_GET) {
+ priv->max_tx_power_level = txp->max_power;
+ priv->min_tx_power_level = txp->min_power;
+ }
+
+ dev_dbg(priv->adapter->dev,
+ "Current TxPower Level=%d, Max Power=%d, Min Power=%d\n",
+ priv->tx_power_level, priv->max_tx_power_level,
+ priv->min_tx_power_level);
+
+ return 0;
+}
+
+/*
+ * This function handles the command response of set rf antenna
+ */
+static int mwifiex_ret_rf_antenna(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_rf_ant_mimo *ant_mimo = &resp->params.ant_mimo;
+ struct host_cmd_ds_rf_ant_siso *ant_siso = &resp->params.ant_siso;
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ if (adapter->hw_dev_mcs_support == HT_STREAM_2X2)
+ dev_dbg(adapter->dev,
+ "RF_ANT_RESP: Tx action = 0x%x, Tx Mode = 0x%04x"
+ " Rx action = 0x%x, Rx Mode = 0x%04x\n",
+ le16_to_cpu(ant_mimo->action_tx),
+ le16_to_cpu(ant_mimo->tx_ant_mode),
+ le16_to_cpu(ant_mimo->action_rx),
+ le16_to_cpu(ant_mimo->rx_ant_mode));
+ else
+ dev_dbg(adapter->dev,
+ "RF_ANT_RESP: action = 0x%x, Mode = 0x%04x\n",
+ le16_to_cpu(ant_siso->action),
+ le16_to_cpu(ant_siso->ant_mode));
+
+ return 0;
+}
+
+/*
* This function handles the command response of set/get MAC address.
*
* Handling includes saving the MAC address in driver.
@@ -605,34 +631,6 @@ static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of get RF channel.
- *
- * Handling includes changing the header fields into CPU format
- * and saving the new channel in driver.
- */
-static int mwifiex_ret_802_11_rf_channel(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp,
- u16 *data_buf)
-{
- struct host_cmd_ds_802_11_rf_channel *rf_channel =
- &resp->params.rf_channel;
- u16 new_channel = le16_to_cpu(rf_channel->current_channel);
-
- if (priv->curr_bss_params.bss_descriptor.channel != new_channel) {
- dev_dbg(priv->adapter->dev, "cmd: Channel Switch: %d to %d\n",
- priv->curr_bss_params.bss_descriptor.channel,
- new_channel);
- /* Update the channel again */
- priv->curr_bss_params.bss_descriptor.channel = new_channel;
- }
-
- if (data_buf)
- *data_buf = new_channel;
-
- return 0;
-}
-
-/*
* This function handles the command response of get extended version.
*
* Handling includes forming the extended version string and sending it
@@ -679,39 +677,33 @@ static int mwifiex_ret_reg_access(u16 type, struct host_cmd_ds_command *resp,
eeprom = data_buf;
switch (type) {
case HostCmd_CMD_MAC_REG_ACCESS:
- r.mac = (struct host_cmd_ds_mac_reg_access *)
- &resp->params.mac_reg;
+ r.mac = &resp->params.mac_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.mac->offset));
reg_rw->value = r.mac->value;
break;
case HostCmd_CMD_BBP_REG_ACCESS:
- r.bbp = (struct host_cmd_ds_bbp_reg_access *)
- &resp->params.bbp_reg;
+ r.bbp = &resp->params.bbp_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.bbp->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_RF_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.bbp->value);
break;
case HostCmd_CMD_PMIC_REG_ACCESS:
- r.pmic = (struct host_cmd_ds_pmic_reg_access *)
- &resp->params.pmic_reg;
+ r.pmic = &resp->params.pmic_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.pmic->offset));
reg_rw->value = cpu_to_le32((u32) r.pmic->value);
break;
case HostCmd_CMD_CAU_REG_ACCESS:
- r.rf = (struct host_cmd_ds_rf_reg_access *)
- &resp->params.rf_reg;
+ r.rf = &resp->params.rf_reg;
reg_rw->offset = cpu_to_le32((u32) le16_to_cpu(r.rf->offset));
reg_rw->value = cpu_to_le32((u32) r.rf->value);
break;
case HostCmd_CMD_802_11_EEPROM_ACCESS:
- r.eeprom = (struct host_cmd_ds_802_11_eeprom_access *)
- &resp->params.eeprom;
+ r.eeprom = &resp->params.eeprom;
pr_debug("info: EEPROM read len=%x\n", r.eeprom->byte_count);
if (le16_to_cpu(eeprom->byte_count) <
le16_to_cpu(r.eeprom->byte_count)) {
@@ -787,7 +779,7 @@ static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
struct mwifiex_ds_misc_subsc_evt *sub_event)
{
struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
- (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
+ &resp->params.subsc_evt;
/* For every subscribe event command (Get/Set/Clear), FW reports the
* current set of subscribed events*/
@@ -833,7 +825,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_mac_multicast_adr(priv, resp);
break;
case HostCmd_CMD_TX_RATE_CFG:
- ret = mwifiex_ret_tx_rate_cfg(priv, resp, data_buf);
+ ret = mwifiex_ret_tx_rate_cfg(priv, resp);
break;
case HostCmd_CMD_802_11_SCAN:
ret = mwifiex_ret_802_11_scan(priv, resp);
@@ -847,6 +839,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_TXPWR_CFG:
ret = mwifiex_ret_tx_power_cfg(priv, resp);
break;
+ case HostCmd_CMD_RF_TX_PWR:
+ ret = mwifiex_ret_rf_tx_power(priv, resp);
+ break;
+ case HostCmd_CMD_RF_ANTENNA:
+ ret = mwifiex_ret_rf_antenna(priv, resp);
+ break;
case HostCmd_CMD_802_11_PS_MODE_ENH:
ret = mwifiex_ret_enh_power_mode(priv, resp, data_buf);
break;
@@ -878,9 +876,6 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_802_11_TX_RATE_QUERY:
ret = mwifiex_ret_802_11_tx_rate_query(priv, resp);
break;
- case HostCmd_CMD_802_11_RF_CHANNEL:
- ret = mwifiex_ret_802_11_rf_channel(priv, resp, data_buf);
- break;
case HostCmd_CMD_VERSION_EXT:
ret = mwifiex_ret_ver_ext(priv, resp, data_buf);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 4ace5a3dcd23..b8614a825460 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -406,9 +406,9 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
break;
case EVENT_UAP_STA_ASSOC:
- skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
memset(&sinfo, 0, sizeof(sinfo));
- event = (struct mwifiex_assoc_event *)adapter->event_skb->data;
+ event = (struct mwifiex_assoc_event *)
+ (adapter->event_body + MWIFIEX_UAP_EVENT_EXTRA_HEADER);
if (le16_to_cpu(event->type) == TLV_TYPE_UAP_MGMT_FRAME) {
len = -1;
@@ -422,7 +422,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (len != -1) {
sinfo.filled = STATION_INFO_ASSOC_REQ_IES;
- sinfo.assoc_req_ies = (u8 *)&event->data[len];
+ sinfo.assoc_req_ies = &event->data[len];
len = (u8 *)sinfo.assoc_req_ies -
(u8 *)&event->frame_control;
sinfo.assoc_req_ies_len =
@@ -433,9 +433,8 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
GFP_KERNEL);
break;
case EVENT_UAP_STA_DEAUTH:
- skb_pull(adapter->event_skb, MWIFIEX_UAP_EVENT_EXTRA_HEADER);
- cfg80211_del_sta(priv->netdev, adapter->event_skb->data,
- GFP_KERNEL);
+ cfg80211_del_sta(priv->netdev, adapter->event_body +
+ MWIFIEX_UAP_EVENT_EXTRA_HEADER, GFP_KERNEL);
break;
case EVENT_UAP_BSS_IDLE:
priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 106c449477b2..fb2136089a22 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -66,9 +66,6 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);
- /* Status pending, wake up main process */
- queue_work(adapter->workqueue, &adapter->main_work);
-
/* Wait for completion */
wait_event_interruptible(adapter->cmd_wait_q.wait,
*(cmd_queued->condition));
@@ -500,297 +497,24 @@ int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds);
/*
- * IOCTL request handler to set/get active channel.
- *
- * This function performs validity checking on channel/frequency
- * compatibility and returns failure if not valid.
- */
-int mwifiex_bss_set_channel(struct mwifiex_private *priv,
- struct mwifiex_chan_freq_power *chan)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- struct mwifiex_chan_freq_power *cfp = NULL;
-
- if (!chan)
- return -1;
-
- if (!chan->channel && !chan->freq)
- return -1;
- if (adapter->adhoc_start_band & BAND_AN)
- adapter->adhoc_start_band = BAND_G | BAND_B | BAND_GN;
- else if (adapter->adhoc_start_band & BAND_A)
- adapter->adhoc_start_band = BAND_G | BAND_B;
- if (chan->channel) {
- if (chan->channel <= MAX_CHANNEL_BAND_BG)
- cfp = mwifiex_get_cfp(priv, 0, (u16) chan->channel, 0);
- if (!cfp) {
- cfp = mwifiex_get_cfp(priv, BAND_A,
- (u16) chan->channel, 0);
- if (cfp) {
- if (adapter->adhoc_11n_enabled)
- adapter->adhoc_start_band = BAND_A
- | BAND_AN;
- else
- adapter->adhoc_start_band = BAND_A;
- }
- }
- } else {
- if (chan->freq <= MAX_FREQUENCY_BAND_BG)
- cfp = mwifiex_get_cfp(priv, 0, 0, chan->freq);
- if (!cfp) {
- cfp = mwifiex_get_cfp(priv, BAND_A, 0, chan->freq);
- if (cfp) {
- if (adapter->adhoc_11n_enabled)
- adapter->adhoc_start_band = BAND_A
- | BAND_AN;
- else
- adapter->adhoc_start_band = BAND_A;
- }
- }
- }
- if (!cfp || !cfp->channel) {
- dev_err(adapter->dev, "invalid channel/freq\n");
- return -1;
- }
- priv->adhoc_channel = (u8) cfp->channel;
- chan->channel = cfp->channel;
- chan->freq = cfp->freq;
-
- return 0;
-}
-
-/*
- * IOCTL request handler to set/get Ad-Hoc channel.
- *
- * This function prepares the correct firmware command and
- * issues it to set or get the ad-hoc channel.
- */
-static int mwifiex_bss_ioctl_ibss_channel(struct mwifiex_private *priv,
- u16 action, u16 *channel)
-{
- if (action == HostCmd_ACT_GEN_GET) {
- if (!priv->media_connected) {
- *channel = priv->adhoc_channel;
- return 0;
- }
- } else {
- priv->adhoc_channel = (u8) *channel;
- }
-
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_RF_CHANNEL,
- action, 0, channel);
-}
-
-/*
- * IOCTL request handler to change Ad-Hoc channel.
- *
- * This function allocates the IOCTL request buffer, fills it
- * with requisite parameters and calls the IOCTL handler.
- *
- * The function follows the following steps to perform the change -
- * - Get current IBSS information
- * - Get current channel
- * - If no change is required, return
- * - If not connected, change channel and return
- * - If connected,
- * - Disconnect
- * - Change channel
- * - Perform specific SSID scan with same SSID
- * - Start/Join the IBSS
- */
-int
-mwifiex_drv_change_adhoc_chan(struct mwifiex_private *priv, u16 channel)
-{
- int ret;
- struct mwifiex_bss_info bss_info;
- struct mwifiex_ssid_bssid ssid_bssid;
- u16 curr_chan = 0;
- struct cfg80211_bss *bss = NULL;
- struct ieee80211_channel *chan;
- enum ieee80211_band band;
-
- memset(&bss_info, 0, sizeof(bss_info));
-
- /* Get BSS information */
- if (mwifiex_get_bss_info(priv, &bss_info))
- return -1;
-
- /* Get current channel */
- ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_GET,
- &curr_chan);
-
- if (curr_chan == channel) {
- ret = 0;
- goto done;
- }
- dev_dbg(priv->adapter->dev, "cmd: updating channel from %d to %d\n",
- curr_chan, channel);
-
- if (!bss_info.media_connected) {
- ret = 0;
- goto done;
- }
-
- /* Do disonnect */
- memset(&ssid_bssid, 0, ETH_ALEN);
- ret = mwifiex_deauthenticate(priv, ssid_bssid.bssid);
-
- ret = mwifiex_bss_ioctl_ibss_channel(priv, HostCmd_ACT_GEN_SET,
- &channel);
-
- /* Do specific SSID scanning */
- if (mwifiex_request_scan(priv, &bss_info.ssid)) {
- ret = -1;
- goto done;
- }
-
- band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- chan = __ieee80211_get_channel(priv->wdev->wiphy,
- ieee80211_channel_to_frequency(channel,
- band));
-
- /* Find the BSS we want using available scan results */
- bss = cfg80211_get_bss(priv->wdev->wiphy, chan, bss_info.bssid,
- bss_info.ssid.ssid, bss_info.ssid.ssid_len,
- WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
- if (!bss)
- wiphy_warn(priv->wdev->wiphy, "assoc: bss %pM not in scan results\n",
- bss_info.bssid);
-
- ret = mwifiex_bss_start(priv, bss, &bss_info.ssid);
-done:
- return ret;
-}
-
-/*
- * IOCTL request handler to get rate.
- *
- * This function prepares the correct firmware command and
- * issues it to get the current rate if it is connected,
- * otherwise, the function returns the lowest supported rate
- * for the band.
- */
-static int mwifiex_rate_ioctl_get_rate_value(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- rate_cfg->is_rate_auto = priv->is_data_rate_auto;
- return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
- HostCmd_ACT_GEN_GET, 0, NULL);
-}
-
-/*
- * IOCTL request handler to set rate.
- *
- * This function prepares the correct firmware command and
- * issues it to set the current rate.
- *
- * The function also performs validation checking on the supplied value.
- */
-static int mwifiex_rate_ioctl_set_rate_value(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- u8 rates[MWIFIEX_SUPPORTED_RATES];
- u8 *rate;
- int rate_index, ret;
- u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
- u32 i;
- struct mwifiex_adapter *adapter = priv->adapter;
-
- if (rate_cfg->is_rate_auto) {
- memset(bitmap_rates, 0, sizeof(bitmap_rates));
- /* Support all HR/DSSS rates */
- bitmap_rates[0] = 0x000F;
- /* Support all OFDM rates */
- bitmap_rates[1] = 0x00FF;
- /* Support all HT-MCSs rate */
- for (i = 0; i < ARRAY_SIZE(priv->bitmap_rates) - 3; i++)
- bitmap_rates[i + 2] = 0xFFFF;
- bitmap_rates[9] = 0x3FFF;
- } else {
- memset(rates, 0, sizeof(rates));
- mwifiex_get_active_data_rates(priv, rates);
- rate = rates;
- for (i = 0; (rate[i] && i < MWIFIEX_SUPPORTED_RATES); i++) {
- dev_dbg(adapter->dev, "info: rate=%#x wanted=%#x\n",
- rate[i], rate_cfg->rate);
- if ((rate[i] & 0x7f) == (rate_cfg->rate & 0x7f))
- break;
- }
- if ((i == MWIFIEX_SUPPORTED_RATES) || !rate[i]) {
- dev_err(adapter->dev, "fixed data rate %#x is out "
- "of range\n", rate_cfg->rate);
- return -1;
- }
- memset(bitmap_rates, 0, sizeof(bitmap_rates));
-
- rate_index = mwifiex_data_rate_to_index(rate_cfg->rate);
-
- /* Only allow b/g rates to be set */
- if (rate_index >= MWIFIEX_RATE_INDEX_HRDSSS0 &&
- rate_index <= MWIFIEX_RATE_INDEX_HRDSSS3) {
- bitmap_rates[0] = 1 << rate_index;
- } else {
- rate_index -= 1; /* There is a 0x00 in the table */
- if (rate_index >= MWIFIEX_RATE_INDEX_OFDM0 &&
- rate_index <= MWIFIEX_RATE_INDEX_OFDM7)
- bitmap_rates[1] = 1 << (rate_index -
- MWIFIEX_RATE_INDEX_OFDM0);
- }
- }
-
- ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TX_RATE_CFG,
- HostCmd_ACT_GEN_SET, 0, bitmap_rates);
-
- return ret;
-}
-
-/*
- * IOCTL request handler to set/get rate.
- *
- * This function can be used to set/get either the rate value or the
- * rate index.
- */
-static int mwifiex_rate_ioctl_cfg(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate_cfg)
-{
- int status;
-
- if (!rate_cfg)
- return -1;
-
- if (rate_cfg->action == HostCmd_ACT_GEN_GET)
- status = mwifiex_rate_ioctl_get_rate_value(priv, rate_cfg);
- else
- status = mwifiex_rate_ioctl_set_rate_value(priv, rate_cfg);
-
- return status;
-}
-
-/*
* Sends IOCTL request to get the data rate.
*
* This function allocates the IOCTL request buffer, fills it
* with requisite parameters and calls the IOCTL handler.
*/
-int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
- struct mwifiex_rate_cfg *rate)
+int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate)
{
int ret;
- memset(rate, 0, sizeof(struct mwifiex_rate_cfg));
- rate->action = HostCmd_ACT_GEN_GET;
- ret = mwifiex_rate_ioctl_cfg(priv, rate);
+ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY,
+ HostCmd_ACT_GEN_GET, 0, NULL);
if (!ret) {
- if (rate->is_rate_auto)
- rate->rate = mwifiex_index_to_data_rate(priv,
- priv->tx_rate,
- priv->tx_htinfo
- );
+ if (priv->is_data_rate_auto)
+ *rate = mwifiex_index_to_data_rate(priv, priv->tx_rate,
+ priv->tx_htinfo);
else
- rate->rate = priv->data_rate;
- } else {
- ret = -1;
+ *rate = priv->data_rate;
}
return ret;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 89f9a2a45de3..f40e93fe894a 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -26,6 +26,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
struct mwifiex_uap_bss_param *bss_config,
struct cfg80211_ap_settings *params) {
int i;
+ struct mwifiex_wep_key wep_key;
if (!params->privacy) {
bss_config->protocol = PROTOCOL_NO_SECURITY;
@@ -65,7 +66,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
- bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_EAP;
}
break;
@@ -77,7 +78,7 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
}
if (params->crypto.wpa_versions &
NL80211_WPA_VERSION_2) {
- bss_config->protocol = PROTOCOL_WPA2;
+ bss_config->protocol |= PROTOCOL_WPA2;
bss_config->key_mgmt = KEY_MGMT_PSK;
}
break;
@@ -91,10 +92,19 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
case WLAN_CIPHER_SUITE_WEP104:
break;
case WLAN_CIPHER_SUITE_TKIP:
- bss_config->wpa_cfg.pairwise_cipher_wpa = CIPHER_TKIP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->wpa_cfg.pairwise_cipher_wpa |=
+ CIPHER_TKIP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
+ CIPHER_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
- bss_config->wpa_cfg.pairwise_cipher_wpa2 =
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_1)
+ bss_config->wpa_cfg.pairwise_cipher_wpa |=
+ CIPHER_AES_CCMP;
+ if (params->crypto.wpa_versions & NL80211_WPA_VERSION_2)
+ bss_config->wpa_cfg.pairwise_cipher_wpa2 |=
CIPHER_AES_CCMP;
default:
break;
@@ -104,6 +114,27 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
switch (params->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
+ if (priv->sec_info.wep_enabled) {
+ bss_config->protocol = PROTOCOL_STATIC_WEP;
+ bss_config->key_mgmt = KEY_MGMT_NONE;
+ bss_config->wpa_cfg.length = 0;
+
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ wep_key = priv->wep_key[i];
+ bss_config->wep_cfg[i].key_index = i;
+
+ if (priv->wep_key_curr_index == i)
+ bss_config->wep_cfg[i].is_default = 1;
+ else
+ bss_config->wep_cfg[i].is_default = 0;
+
+ bss_config->wep_cfg[i].length =
+ wep_key.key_length;
+ memcpy(&bss_config->wep_cfg[i].key,
+ &wep_key.key_material,
+ wep_key.key_length);
+ }
+ }
break;
case WLAN_CIPHER_SUITE_TKIP:
bss_config->wpa_cfg.group_cipher = CIPHER_TKIP;
@@ -118,6 +149,33 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv,
return 0;
}
+/* This function updates 11n related parameters from IE and sets them into
+ * bss_config structure.
+ */
+void
+mwifiex_set_ht_params(struct mwifiex_private *priv,
+ struct mwifiex_uap_bss_param *bss_cfg,
+ struct cfg80211_ap_settings *params)
+{
+ const u8 *ht_ie;
+
+ if (!ISSUPP_11NENABLED(priv->adapter->fw_cap_info))
+ return;
+
+ ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, params->beacon.tail,
+ params->beacon.tail_len);
+ if (ht_ie) {
+ memcpy(&bss_cfg->ht_cap, ht_ie + 2,
+ sizeof(struct ieee80211_ht_cap));
+ } else {
+ memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap));
+ bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP);
+ bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU;
+ }
+
+ return;
+}
+
/* This function initializes some of mwifiex_uap_bss_param variables.
* This helps FW in ignoring invalid values. These values may or may not
* be get updated to valid ones at later stage.
@@ -135,6 +193,120 @@ void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config)
}
/* This function parses BSS related parameters from structure
+ * and prepares TLVs specific to WPA/WPA2 security.
+ * These TLVs are appended to command buffer.
+ */
+static void
+mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_pwk_cipher *pwk_cipher;
+ struct host_cmd_tlv_gwk_cipher *gwk_cipher;
+ struct host_cmd_tlv_passphrase *passphrase;
+ struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+ u16 cmd_size = *param_size;
+ u8 *tlv = *tlv_buf;
+
+ tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
+ tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct host_cmd_tlv));
+ tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
+ tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
+ cmd_size += sizeof(struct host_cmd_tlv_akmp);
+ tlv += sizeof(struct host_cmd_tlv_akmp);
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
+ pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
+ pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
+ pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
+ pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
+ cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
+ gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
+ gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->tlv.len =
+ cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
+ sizeof(struct host_cmd_tlv));
+ gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
+ cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
+ tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
+ }
+
+ if (bss_cfg->wpa_cfg.length) {
+ passphrase = (struct host_cmd_tlv_passphrase *)tlv;
+ passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
+ memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
+ bss_cfg->wpa_cfg.length);
+ cmd_size += sizeof(struct host_cmd_tlv) +
+ bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
+ }
+
+ *param_size = cmd_size;
+ *tlv_buf = tlv;
+
+ return;
+}
+
+/* This function parses BSS related parameters from structure
+ * and prepares TLVs specific to WEP encryption.
+ * These TLVs are appended to command buffer.
+ */
+static void
+mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
+{
+ struct host_cmd_tlv_wep_key *wep_key;
+ u16 cmd_size = *param_size;
+ int i;
+ u8 *tlv = *tlv_buf;
+ struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
+
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (bss_cfg->wep_cfg[i].length &&
+ (bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
+ bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
+ wep_key = (struct host_cmd_tlv_wep_key *)tlv;
+ wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
+ wep_key->tlv.len =
+ cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
+ wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
+ wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
+ memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
+ bss_cfg->wep_cfg[i].length);
+ cmd_size += sizeof(struct host_cmd_tlv) + 2 +
+ bss_cfg->wep_cfg[i].length;
+ tlv += sizeof(struct host_cmd_tlv) + 2 +
+ bss_cfg->wep_cfg[i].length;
+ }
+ }
+
+ *param_size = cmd_size;
+ *tlv_buf = tlv;
+
+ return;
+}
+
+/* This function parses BSS related parameters from structure
* and prepares TLVs. These TLVs are appended to command buffer.
*/
static int
@@ -148,12 +320,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
struct host_cmd_tlv_frag_threshold *frag_threshold;
struct host_cmd_tlv_rts_threshold *rts_threshold;
struct host_cmd_tlv_retry_limit *retry_limit;
- struct host_cmd_tlv_pwk_cipher *pwk_cipher;
- struct host_cmd_tlv_gwk_cipher *gwk_cipher;
struct host_cmd_tlv_encrypt_protocol *encrypt_protocol;
struct host_cmd_tlv_auth_type *auth_type;
- struct host_cmd_tlv_passphrase *passphrase;
- struct host_cmd_tlv_akmp *tlv_akmp;
+ struct mwifiex_ie_types_htcap *htcap;
struct mwifiex_uap_bss_param *bss_cfg = cmd_buf;
u16 cmd_size = *param_size;
@@ -243,70 +412,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
}
if ((bss_cfg->protocol & PROTOCOL_WPA) ||
(bss_cfg->protocol & PROTOCOL_WPA2) ||
- (bss_cfg->protocol & PROTOCOL_EAP)) {
- tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
- tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
- tlv_akmp->tlv.len =
- cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
- sizeof(struct host_cmd_tlv));
- tlv_akmp->key_mgmt_operation =
- cpu_to_le16(bss_cfg->key_mgmt_operation);
- tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
- cmd_size += sizeof(struct host_cmd_tlv_akmp);
- tlv += sizeof(struct host_cmd_tlv_akmp);
-
- if (bss_cfg->wpa_cfg.pairwise_cipher_wpa &
- VALID_CIPHER_BITMAP) {
- pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type =
- cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
- pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
- pwk_cipher->cipher =
- bss_cfg->wpa_cfg.pairwise_cipher_wpa;
- cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
- }
- if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 &
- VALID_CIPHER_BITMAP) {
- pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
- pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
- pwk_cipher->cipher =
- bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
- cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_pwk_cipher);
- }
- if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
- gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
- gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
- gwk_cipher->tlv.len = cpu_to_le16(
- sizeof(struct host_cmd_tlv_gwk_cipher) -
- sizeof(struct host_cmd_tlv));
- gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
- cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
- tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
- }
- if (bss_cfg->wpa_cfg.length) {
- passphrase = (struct host_cmd_tlv_passphrase *)tlv;
- passphrase->tlv.type =
- cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
- passphrase->tlv.len =
- cpu_to_le16(bss_cfg->wpa_cfg.length);
- memcpy(passphrase->passphrase,
- bss_cfg->wpa_cfg.passphrase,
- bss_cfg->wpa_cfg.length);
- cmd_size += sizeof(struct host_cmd_tlv) +
- bss_cfg->wpa_cfg.length;
- tlv += sizeof(struct host_cmd_tlv) +
- bss_cfg->wpa_cfg.length;
- }
- }
+ (bss_cfg->protocol & PROTOCOL_EAP))
+ mwifiex_uap_bss_wpa(&tlv, cmd_buf, &cmd_size);
+ else
+ mwifiex_uap_bss_wep(&tlv, cmd_buf, &cmd_size);
+
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
@@ -330,6 +440,25 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
}
+ if (bss_cfg->ht_cap.cap_info) {
+ htcap = (struct mwifiex_ie_types_htcap *)tlv;
+ htcap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ htcap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ htcap->ht_cap.cap_info = bss_cfg->ht_cap.cap_info;
+ htcap->ht_cap.ampdu_params_info =
+ bss_cfg->ht_cap.ampdu_params_info;
+ memcpy(&htcap->ht_cap.mcs, &bss_cfg->ht_cap.mcs,
+ sizeof(struct ieee80211_mcs_info));
+ htcap->ht_cap.extended_ht_cap_info =
+ bss_cfg->ht_cap.extended_ht_cap_info;
+ htcap->ht_cap.tx_BF_cap_info = bss_cfg->ht_cap.tx_BF_cap_info;
+ htcap->ht_cap.antenna_selection_info =
+ bss_cfg->ht_cap.antenna_selection_info;
+ cmd_size += sizeof(struct mwifiex_ie_types_htcap);
+ tlv += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
*param_size = cmd_size;
return 0;
@@ -421,33 +550,3 @@ int mwifiex_uap_prepare_cmd(struct mwifiex_private *priv, u16 cmd_no,
return 0;
}
-
-/* This function sets the RF channel for AP.
- *
- * This function populates channel information in AP config structure
- * and sends command to configure channel information in AP.
- */
-int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
-{
- struct mwifiex_uap_bss_param *bss_cfg;
- struct wiphy *wiphy = priv->wdev->wiphy;
-
- bss_cfg = kzalloc(sizeof(struct mwifiex_uap_bss_param), GFP_KERNEL);
- if (!bss_cfg)
- return -ENOMEM;
-
- mwifiex_set_sys_config_invalid_data(bss_cfg);
- bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
- bss_cfg->channel = channel;
-
- if (mwifiex_send_cmd_async(priv, HostCmd_CMD_UAP_SYS_CONFIG,
- HostCmd_ACT_GEN_SET,
- UAP_BSS_PARAMS_I, bss_cfg)) {
- wiphy_err(wiphy, "Failed to set the uAP channel\n");
- kfree(bss_cfg);
- return -1;
- }
-
- kfree(bss_cfg);
- return 0;
-}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index 49ebf20c56eb..22a5916564b8 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -49,6 +49,7 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
struct device *dev = adapter->dev;
u32 recv_type;
__le32 tmp;
+ int ret;
if (adapter->hs_activated)
mwifiex_process_hs_config(adapter);
@@ -69,16 +70,19 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_CMD:
if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
dev_err(dev, "CMD: skb->len too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
} else if (!adapter->curr_cmd) {
dev_dbg(dev, "CMD: no curr_cmd\n");
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(
adapter, skb->data,
skb->len);
- return 0;
+ ret = 0;
+ goto exit_restore_skb;
}
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
adapter->curr_cmd->resp_skb = skb;
@@ -87,20 +91,22 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
case MWIFIEX_USB_TYPE_EVENT:
if (skb->len < sizeof(u32)) {
dev_err(dev, "EVENT: skb->len too small\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
adapter->event_cause = le32_to_cpu(tmp);
- skb_pull(skb, sizeof(u32));
dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
if (skb->len > MAX_EVENT_SIZE) {
dev_err(dev, "EVENT: event body too large\n");
- return -1;
+ ret = -1;
+ goto exit_restore_skb;
}
- skb_copy_from_linear_data(skb, adapter->event_body,
- skb->len);
+ memcpy(adapter->event_body, skb->data +
+ MWIFIEX_EVENT_HEADER_LEN, skb->len);
+
adapter->event_received = true;
adapter->event_skb = skb;
break;
@@ -124,6 +130,12 @@ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
}
return -EINPROGRESS;
+
+exit_restore_skb:
+ /* The buffer will be reused for further cmds/events */
+ skb_push(skb, INTF_HEADER_LEN);
+
+ return ret;
}
static void mwifiex_usb_rx_complete(struct urb *urb)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index f3fc65515857..3fa4d4176993 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -404,6 +404,8 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
priv->add_ba_param.tx_win_size = MWIFIEX_AMPDU_DEF_TXWINSIZE;
priv->add_ba_param.rx_win_size = MWIFIEX_AMPDU_DEF_RXWINSIZE;
+ mwifiex_reset_11n_rx_seq_num(priv);
+
atomic_set(&priv->wmm.tx_pkts_queued, 0);
atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
}
@@ -1221,6 +1223,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
if (!ptr->is_11n_enabled ||
mwifiex_is_ba_stream_setup(priv, ptr, tid) ||
+ priv->wps.session_enable ||
((priv->sec_info.wpa_enabled ||
priv->sec_info.wpa2_enabled) &&
!priv->wpa_is_gtk_set)) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index cf7bdc66f822..224e03ade145 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1665,7 +1665,9 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data(wh->frame_control)) {
- sta = info->control.sta;
+ rcu_read_lock();
+ sta = ieee80211_find_sta_by_ifaddr(hw, wh->addr1,
+ wh->addr2);
if (sta) {
sta_info = MWL8K_STA(sta);
BUG_ON(sta_info == NULL);
@@ -1682,6 +1684,7 @@ mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
sta_info->is_ampdu_allowed = true;
}
}
+ rcu_read_unlock();
}
ieee80211_tx_info_clear_status(info);
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index f7b15b8934fa..7b751fba7e1f 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -138,7 +138,7 @@ static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
+static int orinoco_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
@@ -160,10 +160,9 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
return err;
}
-static int orinoco_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+static int orinoco_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
{
struct orinoco_private *priv = wiphy_priv(wiphy);
int err = 0;
@@ -286,7 +285,7 @@ static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
const struct cfg80211_ops orinoco_cfg_ops = {
.change_virtual_intf = orinoco_change_vif,
- .set_channel = orinoco_set_channel,
+ .set_monitor_channel = orinoco_set_monitor_channel,
.scan = orinoco_scan,
.set_wiphy_params = orinoco_set_wiphy_params,
};
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index fa8ce5104781..14037092ba89 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -857,7 +857,7 @@ good_eeprom:
wiphy_warn(dev->wiphy,
"Invalid hwaddr! Using randomly generated MAC addr\n");
- random_ether_addr(perm_addr);
+ eth_random_addr(perm_addr);
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
}
@@ -905,7 +905,7 @@ int p54_read_eeprom(struct ieee80211_hw *dev)
while (eeprom_size) {
blocksize = min(eeprom_size, maxblocksize);
- ret = p54_download_eeprom(priv, (void *) (eeprom + offset),
+ ret = p54_download_eeprom(priv, eeprom + offset,
offset, blocksize);
if (unlikely(ret))
goto free;
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 18e82b31afa6..9ba85106eec0 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -478,7 +478,7 @@ int p54_scan(struct p54_common *priv, u16 mode, u16 dwell)
if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) {
memcpy(&body->longbow.curve_data,
- (void *) entry + sizeof(__le16),
+ entry + sizeof(__le16),
priv->curve_data->entry_size);
} else {
struct p54_scan_body *chan = &body->normal;
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 82a1cac920bd..f38786e02623 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -422,11 +422,11 @@ static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb)
* Clear manually, ieee80211_tx_info_clear_status would
* clear the counts too and we need them.
*/
- memset(&info->status.ampdu_ack_len, 0,
+ memset(&info->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
+ offsetof(struct ieee80211_tx_info, status.ack_signal));
BUILD_BUG_ON(offsetof(struct ieee80211_tx_info,
- status.ampdu_ack_len) != 23);
+ status.ack_signal) != 20);
if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN))
pad = entry_data->align[0];
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index 266d45bf86f5..799e148d0370 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -455,7 +455,7 @@ islpci_eth_receive(islpci_private *priv)
"Error mapping DMA address\n");
/* free the skbuf structure before aborting */
- dev_kfree_skb_irq((struct sk_buff *) skb);
+ dev_kfree_skb_irq(skb);
skb = NULL;
break;
}
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 86a738bf591c..598ca1cafb95 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1849,7 +1849,7 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
pr_debug("ray_cs: interrupt for *dev=%p\n", dev);
local = netdev_priv(dev);
- link = (struct pcmcia_device *)local->finder;
+ link = local->finder;
if (!pcmcia_dev_present(link)) {
pr_debug(
"ray_cs interrupt from device not present or suspended.\n");
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index dfcd02ab6cae..241162e8111d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -484,7 +484,7 @@ static int rndis_change_virtual_intf(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params);
-static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+static int rndis_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
static int rndis_set_wiphy_params(struct wiphy *wiphy, u32 changed);
@@ -1941,9 +1941,10 @@ static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
}
#define SCAN_DELAY_JIFFIES (6 * HZ)
-static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
+static int rndis_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
+ struct net_device *dev = request->wdev->netdev;
struct usbnet *usbdev = netdev_priv(dev);
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int ret;
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 299c3879582d..c7548da6573d 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -99,6 +99,14 @@ config RT2800PCI_RT53XX
rt2800pci driver.
Supported chips: RT5390
+config RT2800PCI_RT3290
+ bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ default y
+ ---help---
+ This adds support for rt3290 wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3290
endif
config RT2500USB
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5e6b50143165..8b9dbd76a252 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1455,7 +1455,7 @@ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 136b849f11b5..d2cf8a4bc8b5 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1585,7 +1585,7 @@ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 669aecdb411d..3aae36bb0a9e 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1352,7 +1352,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 9348521e0832..e252e9bafd0e 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
* RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF5360 2.4G 1T1R
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
@@ -67,9 +68,12 @@
#define RF3320 0x000b
#define RF3322 0x000c
#define RF3053 0x000d
+#define RF3290 0x3290
+#define RF5360 0x5360
#define RF5370 0x5370
#define RF5372 0x5372
#define RF5390 0x5390
+#define RF5392 0x5392
/*
* Chipset revisions.
@@ -114,6 +118,12 @@
* Registers.
*/
+
+/*
+ * MAC_CSR0_3290: MAC_CSR0 for RT3290 to identity MAC version number.
+ */
+#define MAC_CSR0_3290 0x0000
+
/*
* E2PROM_CSR: PCI EEPROM control register.
* RELOAD: Write 1 to reload eeprom content.
@@ -130,6 +140,150 @@
#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
/*
+ * CMB_CTRL_CFG
+ */
+#define CMB_CTRL 0x0020
+#define AUX_OPT_BIT0 FIELD32(0x00000001)
+#define AUX_OPT_BIT1 FIELD32(0x00000002)
+#define AUX_OPT_BIT2 FIELD32(0x00000004)
+#define AUX_OPT_BIT3 FIELD32(0x00000008)
+#define AUX_OPT_BIT4 FIELD32(0x00000010)
+#define AUX_OPT_BIT5 FIELD32(0x00000020)
+#define AUX_OPT_BIT6 FIELD32(0x00000040)
+#define AUX_OPT_BIT7 FIELD32(0x00000080)
+#define AUX_OPT_BIT8 FIELD32(0x00000100)
+#define AUX_OPT_BIT9 FIELD32(0x00000200)
+#define AUX_OPT_BIT10 FIELD32(0x00000400)
+#define AUX_OPT_BIT11 FIELD32(0x00000800)
+#define AUX_OPT_BIT12 FIELD32(0x00001000)
+#define AUX_OPT_BIT13 FIELD32(0x00002000)
+#define AUX_OPT_BIT14 FIELD32(0x00004000)
+#define AUX_OPT_BIT15 FIELD32(0x00008000)
+#define LDO25_LEVEL FIELD32(0x00030000)
+#define LDO25_LARGEA FIELD32(0x00040000)
+#define LDO25_FRC_ON FIELD32(0x00080000)
+#define CMB_RSV FIELD32(0x00300000)
+#define XTAL_RDY FIELD32(0x00400000)
+#define PLL_LD FIELD32(0x00800000)
+#define LDO_CORE_LEVEL FIELD32(0x0F000000)
+#define LDO_BGSEL FIELD32(0x30000000)
+#define LDO3_EN FIELD32(0x40000000)
+#define LDO0_EN FIELD32(0x80000000)
+
+/*
+ * EFUSE_CSR_3290: RT3290 EEPROM
+ */
+#define EFUSE_CTRL_3290 0x0024
+
+/*
+ * EFUSE_DATA3 of 3290
+ */
+#define EFUSE_DATA3_3290 0x0028
+
+/*
+ * EFUSE_DATA2 of 3290
+ */
+#define EFUSE_DATA2_3290 0x002c
+
+/*
+ * EFUSE_DATA1 of 3290
+ */
+#define EFUSE_DATA1_3290 0x0030
+
+/*
+ * EFUSE_DATA0 of 3290
+ */
+#define EFUSE_DATA0_3290 0x0034
+
+/*
+ * OSC_CTRL_CFG
+ * Ring oscillator configuration
+ */
+#define OSC_CTRL 0x0038
+#define OSC_REF_CYCLE FIELD32(0x00001fff)
+#define OSC_RSV FIELD32(0x0000e000)
+#define OSC_CAL_CNT FIELD32(0x0fff0000)
+#define OSC_CAL_ACK FIELD32(0x10000000)
+#define OSC_CLK_32K_VLD FIELD32(0x20000000)
+#define OSC_CAL_REQ FIELD32(0x40000000)
+#define OSC_ROSC_EN FIELD32(0x80000000)
+
+/*
+ * COEX_CFG_0
+ */
+#define COEX_CFG0 0x0040
+#define COEX_CFG_ANT FIELD32(0xff000000)
+/*
+ * COEX_CFG_1
+ */
+#define COEX_CFG1 0x0044
+
+/*
+ * COEX_CFG_2
+ */
+#define COEX_CFG2 0x0048
+#define BT_COEX_CFG1 FIELD32(0xff000000)
+#define BT_COEX_CFG0 FIELD32(0x00ff0000)
+#define WL_COEX_CFG1 FIELD32(0x0000ff00)
+#define WL_COEX_CFG0 FIELD32(0x000000ff)
+/*
+ * PLL_CTRL_CFG
+ * PLL configuration register
+ */
+#define PLL_CTRL 0x0050
+#define PLL_RESERVED_INPUT1 FIELD32(0x000000ff)
+#define PLL_RESERVED_INPUT2 FIELD32(0x0000ff00)
+#define PLL_CONTROL FIELD32(0x00070000)
+#define PLL_LPF_R1 FIELD32(0x00080000)
+#define PLL_LPF_C1_CTRL FIELD32(0x00300000)
+#define PLL_LPF_C2_CTRL FIELD32(0x00c00000)
+#define PLL_CP_CURRENT_CTRL FIELD32(0x03000000)
+#define PLL_PFD_DELAY_CTRL FIELD32(0x0c000000)
+#define PLL_LOCK_CTRL FIELD32(0x70000000)
+#define PLL_VBGBK_EN FIELD32(0x80000000)
+
+
+/*
+ * WLAN_CTRL_CFG
+ * RT3290 wlan configuration
+ */
+#define WLAN_FUN_CTRL 0x0080
+#define WLAN_EN FIELD32(0x00000001)
+#define WLAN_CLK_EN FIELD32(0x00000002)
+#define WLAN_RSV1 FIELD32(0x00000004)
+#define WLAN_RESET FIELD32(0x00000008)
+#define PCIE_APP0_CLK_REQ FIELD32(0x00000010)
+#define FRC_WL_ANT_SET FIELD32(0x00000020)
+#define INV_TR_SW0 FIELD32(0x00000040)
+#define WLAN_GPIO_IN_BIT0 FIELD32(0x00000100)
+#define WLAN_GPIO_IN_BIT1 FIELD32(0x00000200)
+#define WLAN_GPIO_IN_BIT2 FIELD32(0x00000400)
+#define WLAN_GPIO_IN_BIT3 FIELD32(0x00000800)
+#define WLAN_GPIO_IN_BIT4 FIELD32(0x00001000)
+#define WLAN_GPIO_IN_BIT5 FIELD32(0x00002000)
+#define WLAN_GPIO_IN_BIT6 FIELD32(0x00004000)
+#define WLAN_GPIO_IN_BIT7 FIELD32(0x00008000)
+#define WLAN_GPIO_IN_BIT_ALL FIELD32(0x0000ff00)
+#define WLAN_GPIO_OUT_BIT0 FIELD32(0x00010000)
+#define WLAN_GPIO_OUT_BIT1 FIELD32(0x00020000)
+#define WLAN_GPIO_OUT_BIT2 FIELD32(0x00040000)
+#define WLAN_GPIO_OUT_BIT3 FIELD32(0x00050000)
+#define WLAN_GPIO_OUT_BIT4 FIELD32(0x00100000)
+#define WLAN_GPIO_OUT_BIT5 FIELD32(0x00200000)
+#define WLAN_GPIO_OUT_BIT6 FIELD32(0x00400000)
+#define WLAN_GPIO_OUT_BIT7 FIELD32(0x00800000)
+#define WLAN_GPIO_OUT_BIT_ALL FIELD32(0x00ff0000)
+#define WLAN_GPIO_OUT_OE_BIT0 FIELD32(0x01000000)
+#define WLAN_GPIO_OUT_OE_BIT1 FIELD32(0x02000000)
+#define WLAN_GPIO_OUT_OE_BIT2 FIELD32(0x04000000)
+#define WLAN_GPIO_OUT_OE_BIT3 FIELD32(0x08000000)
+#define WLAN_GPIO_OUT_OE_BIT4 FIELD32(0x10000000)
+#define WLAN_GPIO_OUT_OE_BIT5 FIELD32(0x20000000)
+#define WLAN_GPIO_OUT_OE_BIT6 FIELD32(0x40000000)
+#define WLAN_GPIO_OUT_OE_BIT7 FIELD32(0x80000000)
+#define WLAN_GPIO_OUT_OE_BIT_ALL FIELD32(0xff000000)
+
+/*
* AUX_CTRL: Aux/PCI-E related configuration
*/
#define AUX_CTRL 0x10c
@@ -1760,9 +1914,11 @@ struct mac_iveiv_entry {
/*
* BBP 3: RX Antenna
*/
-#define BBP3_RX_ADC FIELD8(0x03)
+#define BBP3_RX_ADC FIELD8(0x03)
#define BBP3_RX_ANTENNA FIELD8(0x18)
#define BBP3_HT40_MINUS FIELD8(0x20)
+#define BBP3_ADC_MODE_SWITCH FIELD8(0x40)
+#define BBP3_ADC_INIT_MODE FIELD8(0x80)
/*
* BBP 4: Bandwidth
@@ -1772,6 +1928,14 @@ struct mac_iveiv_entry {
#define BBP4_MAC_IF_CTRL FIELD8(0x40)
/*
+ * BBP 47: Bandwidth
+ */
+#define BBP47_TSSI_REPORT_SEL FIELD8(0x03)
+#define BBP47_TSSI_UPDATE_REQ FIELD8(0x04)
+#define BBP47_TSSI_TSSI_MODE FIELD8(0x18)
+#define BBP47_TSSI_ADC6 FIELD8(0x80)
+
+/*
* BBP 109
*/
#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1914,6 +2078,16 @@ struct mac_iveiv_entry {
#define RFCSR27_R4 FIELD8(0x40)
/*
+ * RFCSR 29:
+ */
+#define RFCSR29_ADC6_TEST FIELD8(0x01)
+#define RFCSR29_ADC6_INT_TEST FIELD8(0x02)
+#define RFCSR29_RSSI_RESET FIELD8(0x04)
+#define RFCSR29_RSSI_ON FIELD8(0x08)
+#define RFCSR29_RSSI_RIP_CTRL FIELD8(0x30)
+#define RFCSR29_RSSI_GAIN FIELD8(0xc0)
+
+/*
* RFCSR 30:
*/
#define RFCSR30_TX_H20M FIELD8(0x02)
@@ -1944,6 +2118,11 @@ struct mac_iveiv_entry {
#define RFCSR49_TX FIELD8(0x3f)
/*
+ * RFCSR 50:
+ */
+#define RFCSR50_TX FIELD8(0x3f)
+
+/*
* RF registers
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index dfc90d34be6d..88455b1b9fe0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -354,16 +354,15 @@ int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev,
* of 4kb. Certain USB chipsets however require different firmware,
* which Ralink only provides attached to the original firmware
* file. Thus for USB devices, firmware files have a length
- * which is a multiple of 4kb.
+ * which is a multiple of 4kb. The firmware for rt3290 chip also
+ * have a length which is a multiple of 4kb.
*/
- if (rt2x00_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290))
fw_len = 4096;
- multiple = true;
- } else {
+ else
fw_len = 8192;
- multiple = true;
- }
+ multiple = true;
/*
* Validate the firmware length
*/
@@ -415,7 +414,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
return -EBUSY;
if (rt2x00_is_pci(rt2x00dev)) {
- if (rt2x00_rt(rt2x00dev, RT3572) ||
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_register_read(rt2x00dev, AUX_CTRL, &reg);
@@ -851,8 +851,13 @@ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
- rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
- return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0);
+ } else {
+ rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
+ return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
@@ -1935,8 +1940,50 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
-#define RT5390_POWER_BOUND 0x27
-#define RT5390_FREQ_OFFSET_BOUND 0x5f
+#define POWER_BOUND 0x27
+#define FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
+{
+ u8 rfcsr;
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3);
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ if (rf->channel <= 14) {
+ if (rf->channel == 6)
+ rt2800_bbp_write(rt2x00dev, 68, 0x0c);
+ else
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ if (rf->channel >= 1 && rf->channel <= 6)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0f);
+ else if (rf->channel >= 7 && rf->channel <= 11)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0e);
+ else if (rf->channel >= 12 && rf->channel <= 14)
+ rt2800_bbp_write(rt2x00dev, 59, 0x0d);
+ }
+}
static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
@@ -1952,13 +1999,27 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
- if (info->default_power1 > RT5390_POWER_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ if (info->default_power1 > POWER_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX,
+ info->default_power2);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+ }
+
rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ if (rt2x00_rt(rt2x00dev, RT5392)) {
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ }
rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -1966,9 +2027,8 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE,
- RT5390_FREQ_OFFSET_BOUND);
+ if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
else
rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
@@ -2021,15 +2081,6 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
}
}
}
-
- rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
- rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
- rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
-
- rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
- rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
- rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
}
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -2039,7 +2090,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
{
u32 reg;
unsigned int tx_pin;
- u8 bbp;
+ u8 bbp, rfcsr;
if (rf->channel <= 14) {
info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
@@ -2060,15 +2111,36 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
+ case RF3290:
+ rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
+ break;
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
break;
default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
}
+ if (rt2x00_rf(rt2x00dev, RF3290) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
+ rt2x00_rf(rt2x00dev, RF5370) ||
+ rt2x00_rf(rt2x00dev, RF5372) ||
+ rt2x00_rf(rt2x00dev, RF5390) ||
+ rt2x00_rf(rt2x00dev, RF5392)) {
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+ }
+
/*
* Change BBP settings
*/
@@ -2549,9 +2621,12 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
+ case RF3290:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
@@ -2682,6 +2757,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
@@ -2778,10 +2854,54 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN) == 1) {
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ }
+
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) {
+ rt2x00_set_field32(&reg, LDO0_EN, 1);
+ rt2x00_set_field32(&reg, LDO_BGSEL, 3);
+ rt2800_register_write(rt2x00dev, CMB_CTRL, reg);
+ }
+
+ rt2800_register_read(rt2x00dev, OSC_CTRL, &reg);
+ rt2x00_set_field32(&reg, OSC_ROSC_EN, 1);
+ rt2x00_set_field32(&reg, OSC_CAL_REQ, 1);
+ rt2x00_set_field32(&reg, OSC_REF_CYCLE, 0x27);
+ rt2800_register_write(rt2x00dev, OSC_CTRL, reg);
+
+ rt2800_register_read(rt2x00dev, COEX_CFG0, &reg);
+ rt2x00_set_field32(&reg, COEX_CFG_ANT, 0x5e);
+ rt2800_register_write(rt2x00dev, COEX_CFG0, reg);
+
+ rt2800_register_read(rt2x00dev, COEX_CFG2, &reg);
+ rt2x00_set_field32(&reg, BT_COEX_CFG1, 0x00);
+ rt2x00_set_field32(&reg, BT_COEX_CFG0, 0x17);
+ rt2x00_set_field32(&reg, WL_COEX_CFG1, 0x93);
+ rt2x00_set_field32(&reg, WL_COEX_CFG0, 0x7f);
+ rt2800_register_write(rt2x00dev, COEX_CFG2, reg);
+
+ rt2800_register_read(rt2x00dev, PLL_CTRL, &reg);
+ rt2x00_set_field32(&reg, PLL_CONTROL, 1);
+ rt2800_register_write(rt2x00dev, PLL_CTRL, reg);
+ }
+
if (rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
+
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0,
+ 0x00000404);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0,
+ 0x00000400);
+
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
@@ -3190,14 +3310,16 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_wait_bbp_ready(rt2x00dev)))
return -EACCES;
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_read(rt2x00dev, 4, &value);
rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
rt2800_bbp_write(rt2x00dev, 4, value);
}
if (rt2800_is_305x_soc(rt2x00dev) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
@@ -3206,20 +3328,26 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 65, 0x2c);
rt2800_bbp_write(rt2x00dev, 66, 0x38);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
- } else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ } else if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
- rt2800_bbp_write(rt2x00dev, 77, 0x59);
+
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_bbp_write(rt2x00dev, 77, 0x58);
+ else
+ rt2800_bbp_write(rt2x00dev, 77, 0x59);
} else {
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x10);
@@ -3244,23 +3372,33 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 81, 0x37);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_bbp_write(rt2x00dev, 74, 0x0b);
+ rt2800_bbp_write(rt2x00dev, 79, 0x18);
+ rt2800_bbp_write(rt2x00dev, 80, 0x09);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ }
+
rt2800_bbp_write(rt2x00dev, 82, 0x62);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 83, 0x7a);
else
rt2800_bbp_write(rt2x00dev, 83, 0x6a);
if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
- else if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ else if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 84, 0x9a);
else
rt2800_bbp_write(rt2x00dev, 84, 0x99);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 86, 0x38);
else
rt2800_bbp_write(rt2x00dev, 86, 0x00);
@@ -3270,8 +3408,9 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 91, 0x04);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 92, 0x02);
else
rt2800_bbp_write(rt2x00dev, 92, 0x00);
@@ -3285,6 +3424,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
+ rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT3572) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
@@ -3293,27 +3433,32 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
else
rt2800_bbp_write(rt2x00dev, 103, 0x00);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 104, 0x92);
if (rt2800_is_305x_soc(rt2x00dev))
rt2800_bbp_write(rt2x00dev, 105, 0x01);
+ else if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_bbp_write(rt2x00dev, 105, 0x1c);
else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 105, 0x3c);
else
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rt(rt2x00dev, RT5390))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390))
rt2800_bbp_write(rt2x00dev, 106, 0x03);
else if (rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 106, 0x12);
else
rt2800_bbp_write(rt2x00dev, 106, 0x35);
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392))
rt2800_bbp_write(rt2x00dev, 128, 0x12);
if (rt2x00_rt(rt2x00dev, RT5392)) {
@@ -3338,6 +3483,29 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 138, value);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_bbp_write(rt2x00dev, 67, 0x24);
+ rt2800_bbp_write(rt2x00dev, 143, 0x04);
+ rt2800_bbp_write(rt2x00dev, 142, 0x99);
+ rt2800_bbp_write(rt2x00dev, 150, 0x30);
+ rt2800_bbp_write(rt2x00dev, 151, 0x2e);
+ rt2800_bbp_write(rt2x00dev, 152, 0x20);
+ rt2800_bbp_write(rt2x00dev, 153, 0x34);
+ rt2800_bbp_write(rt2x00dev, 154, 0x40);
+ rt2800_bbp_write(rt2x00dev, 155, 0x3b);
+ rt2800_bbp_write(rt2x00dev, 253, 0x04);
+
+ rt2800_bbp_read(rt2x00dev, 47, &value);
+ rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1);
+ rt2800_bbp_write(rt2x00dev, 47, value);
+
+ /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */
+ rt2800_bbp_read(rt2x00dev, 3, &value);
+ rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1);
+ rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1);
+ rt2800_bbp_write(rt2x00dev, 3, value);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
int ant, div_mode;
@@ -3470,6 +3638,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_rt(rt2x00dev, RT3070) &&
!rt2x00_rt(rt2x00dev, RT3071) &&
!rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3290) &&
!rt2x00_rt(rt2x00dev, RT3390) &&
!rt2x00_rt(rt2x00dev, RT3572) &&
!rt2x00_rt(rt2x00dev, RT5390) &&
@@ -3480,8 +3649,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
/*
* Init RF calibration.
*/
- if (rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392)) {
+ if (rt2x00_rt(rt2x00dev, RT3290) ||
+ rt2x00_rt(rt2x00dev, RT5390) ||
+ rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
@@ -3519,6 +3689,53 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
+ } else if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
+ rt2800_rfcsr_write(rt2x00dev, 4, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0xa0);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf3);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0x53);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x4a);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x46);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x9f);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 25, 0x83);
+ rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
+ rt2800_rfcsr_write(rt2x00dev, 27, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x05);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x1b);
+ rt2800_rfcsr_write(rt2x00dev, 40, 0x0b);
+ rt2800_rfcsr_write(rt2x00dev, 41, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 42, 0xd5);
+ rt2800_rfcsr_write(rt2x00dev, 43, 0x7b);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x0e);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xa2);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x73);
+ rt2800_rfcsr_write(rt2x00dev, 47, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x98);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x38);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 58, 0x7f);
+ rt2800_rfcsr_write(rt2x00dev, 59, 0x09);
+ rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 61, 0xc1);
} else if (rt2x00_rt(rt2x00dev, RT3390)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
@@ -3927,6 +4144,12 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
}
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ rt2800_rfcsr_read(rt2x00dev, 29, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3);
+ rt2800_rfcsr_write(rt2x00dev, 29, rfcsr);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
@@ -4033,9 +4256,14 @@ EXPORT_SYMBOL_GPL(rt2800_disable_radio);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
+ u16 efuse_ctrl_reg;
- rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg);
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ efuse_ctrl_reg = EFUSE_CTRL_3290;
+ else
+ efuse_ctrl_reg = EFUSE_CTRL;
+ rt2800_register_read(rt2x00dev, efuse_ctrl_reg, &reg);
return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT);
}
EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
@@ -4043,27 +4271,44 @@ EXPORT_SYMBOL_GPL(rt2800_efuse_detect);
static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
{
u32 reg;
-
+ u16 efuse_ctrl_reg;
+ u16 efuse_data0_reg;
+ u16 efuse_data1_reg;
+ u16 efuse_data2_reg;
+ u16 efuse_data3_reg;
+
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ efuse_ctrl_reg = EFUSE_CTRL_3290;
+ efuse_data0_reg = EFUSE_DATA0_3290;
+ efuse_data1_reg = EFUSE_DATA1_3290;
+ efuse_data2_reg = EFUSE_DATA2_3290;
+ efuse_data3_reg = EFUSE_DATA3_3290;
+ } else {
+ efuse_ctrl_reg = EFUSE_CTRL;
+ efuse_data0_reg = EFUSE_DATA0;
+ efuse_data1_reg = EFUSE_DATA1;
+ efuse_data2_reg = EFUSE_DATA2;
+ efuse_data3_reg = EFUSE_DATA3;
+ }
mutex_lock(&rt2x00dev->csr_mutex);
- rt2800_register_read_lock(rt2x00dev, EFUSE_CTRL, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg, &reg);
rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
- rt2800_register_write_lock(rt2x00dev, EFUSE_CTRL, reg);
+ rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg);
/* Wait until the EEPROM has been loaded */
- rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
-
+ rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, &reg);
/* Apparently the data is read from end to start */
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data3_reg, &reg);
/* The returned value is in CPU order, but eeprom is le */
*(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data2_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data1_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg);
- rt2800_register_read_lock(rt2x00dev, EFUSE_DATA0, &reg);
+ rt2800_register_read_lock(rt2x00dev, efuse_data0_reg, &reg);
*(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg);
mutex_unlock(&rt2x00dev->csr_mutex);
@@ -4090,7 +4335,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
@@ -4225,9 +4470,14 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
* RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field
* RT53xx: defined in "EEPROM_CHIP_ID" field
*/
- rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
- rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ rt2800_register_read(rt2x00dev, MAC_CSR0_3290, &reg);
+ else
+ rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
+
+ if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT3290 ||
+ rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390 ||
+ rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5392)
rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value);
else
value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -4242,6 +4492,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RT3070:
case RT3071:
case RT3090:
+ case RT3290:
case RT3390:
case RT3572:
case RT5390:
@@ -4262,10 +4513,13 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3021:
case RF3022:
case RF3052:
+ case RF3290:
case RF3320:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
break;
default:
ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -4576,10 +4830,13 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00_rf(rt2x00dev, RF2020) ||
rt2x00_rf(rt2x00dev, RF3021) ||
rt2x00_rf(rt2x00dev, RF3022) ||
+ rt2x00_rf(rt2x00dev, RF3290) ||
rt2x00_rf(rt2x00dev, RF3320) ||
+ rt2x00_rf(rt2x00dev, RF5360) ||
rt2x00_rf(rt2x00dev, RF5370) ||
rt2x00_rf(rt2x00dev, RF5372) ||
- rt2x00_rf(rt2x00dev, RF5390)) {
+ rt2x00_rf(rt2x00dev, RF5390) ||
+ rt2x00_rf(rt2x00dev, RF5392)) {
spec->num_channels = 14;
spec->channels = rf_vals_3x;
} else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -4662,9 +4919,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3320:
case RF3052:
+ case RF3290:
+ case RF5360:
case RF5370:
case RF5372:
case RF5390:
+ case RF5392:
__set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags);
break;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index cad25bfebd7a..235376e9cb04 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -280,7 +280,13 @@ static void rt2800pci_stop_queue(struct data_queue *queue)
*/
static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
- return FIRMWARE_RT2860;
+ /*
+ * Chip rt3290 use specific 4KB firmware named rt3290.bin.
+ */
+ if (rt2x00_rt(rt2x00dev, RT3290))
+ return FIRMWARE_RT3290;
+ else
+ return FIRMWARE_RT2860;
}
static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev,
@@ -974,6 +980,66 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
return rt2800_validate_eeprom(rt2x00dev);
}
+static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
+{
+ u32 reg;
+ int i, count;
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ if (rt2x00_get_field32(reg, WLAN_EN))
+ return 0;
+
+ rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff);
+ rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 0);
+ rt2x00_set_field32(&reg, WLAN_EN, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+
+ udelay(REGISTER_BUSY_DELAY);
+
+ count = 0;
+ do {
+ /*
+ * Check PLL_LD & XTAL_RDY.
+ */
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, CMB_CTRL, &reg);
+ if (rt2x00_get_field32(reg, PLL_LD) &&
+ rt2x00_get_field32(reg, XTAL_RDY))
+ break;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ if (i >= REGISTER_BUSY_COUNT) {
+
+ if (count >= 10)
+ return -EIO;
+
+ rt2800_register_write(rt2x00dev, 0x58, 0x018);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x418);
+ udelay(REGISTER_BUSY_DELAY);
+ rt2800_register_write(rt2x00dev, 0x58, 0x618);
+ udelay(REGISTER_BUSY_DELAY);
+ count++;
+ } else {
+ count = 0;
+ }
+
+ rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, &reg);
+ rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0);
+ rt2x00_set_field32(&reg, WLAN_CLK_EN, 1);
+ rt2x00_set_field32(&reg, WLAN_RESET, 1);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2x00_set_field32(&reg, WLAN_RESET, 0);
+ rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg);
+ udelay(10);
+ rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff);
+ } while (count != 0);
+
+ return 0;
+}
static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
{
int retval;
@@ -997,6 +1063,17 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
return retval;
/*
+ * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan
+ * clk for rt3290. That avoid the MCU fail in start phase.
+ */
+ if (rt2x00_rt(rt2x00dev, RT3290)) {
+ retval = rt2800_enable_wlan_rt3290(rt2x00dev);
+
+ if (retval)
+ return retval;
+ }
+
+ /*
* This device has multiple filters for control frames
* and has a separate filter for PS Poll frames.
*/
@@ -1175,6 +1252,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1432, 0x7768) },
{ PCI_DEVICE(0x1462, 0x891a) },
{ PCI_DEVICE(0x1a3b, 0x1059) },
+#ifdef CONFIG_RT2800PCI_RT3290
+ { PCI_DEVICE(0x1814, 0x3290) },
+#endif
#ifdef CONFIG_RT2800PCI_RT33XX
{ PCI_DEVICE(0x1814, 0x3390) },
#endif
@@ -1188,6 +1268,7 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x3593) },
#endif
#ifdef CONFIG_RT2800PCI_RT53XX
+ { PCI_DEVICE(0x1814, 0x5360) },
{ PCI_DEVICE(0x1814, 0x5362) },
{ PCI_DEVICE(0x1814, 0x5390) },
{ PCI_DEVICE(0x1814, 0x5392) },
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 70e050d904c8..ab22a087c50d 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -47,6 +47,7 @@
* 8051 firmware image.
*/
#define FIRMWARE_RT2860 "rt2860.bin"
+#define FIRMWARE_RT3290 "rt3290.bin"
#define FIRMWARE_IMAGE_BASE 0x2000
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index bf78317a6adb..6cf336595e25 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -971,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
+ { USB_DEVICE(0x0411, 0x01ee) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
@@ -1137,6 +1138,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT33XX
/* Belkin */
{ USB_DEVICE(0x050d, 0x945b) },
+ /* D-Link */
+ { USB_DEVICE(0x2001, 0x3c17) },
/* Panasonic */
{ USB_DEVICE(0x083a, 0xb511) },
/* Philips */
@@ -1237,7 +1240,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
{ USB_DEVICE(0x07d1, 0x3c17) },
- { USB_DEVICE(0x2001, 0x3c17) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
/* Gemtek */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 8f754025b06e..8afb546c2b2d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -187,6 +187,7 @@ struct rt2x00_chip {
#define RT3070 0x3070
#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
+#define RT3290 0x3290
#define RT3390 0x3390
#define RT3572 0x3572
#define RT3593 0x3593
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index e7361d913e8e..49a63e973934 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -102,7 +102,7 @@ void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
/* Update the AID, this is needed for dynamic PS support */
rt2x00dev->aid = bss_conf->assoc ? bss_conf->aid : 0;
- rt2x00dev->last_beacon = bss_conf->last_tsf;
+ rt2x00dev->last_beacon = bss_conf->sync_tsf;
/* Update global beacon interval time, this is needed for PS support */
rt2x00dev->beacon_int = bss_conf->beacon_int;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e5404e576251..a6b88bd4a1a5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1161,6 +1161,8 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
BIT(NL80211_IFTYPE_MESH_POINT) |
BIT(NL80211_IFTYPE_WDS);
+ rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
/*
* Initialize work.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index dd24b2663b5e..4ff26c2159bf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -506,9 +506,19 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
return 0;
- else if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+
+ if (!test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
+ return -EOPNOTSUPP;
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -EOPNOTSUPP;
- else if (key->keylen > 32)
+
+ if (key->keylen > 32)
return -ENOSPC;
memset(&crypto, 0, sizeof(crypto));
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0a4653a92cab..a0c8caef3b0a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -256,6 +256,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
+ u16 chip;
retval = pci_enable_device(pci_dev);
if (retval) {
@@ -305,6 +306,14 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops)
if (retval)
goto exit_free_device;
+ /*
+ * Because rt3290 chip use different efuse offset to read efuse data.
+ * So before read efuse it need to indicate it is the
+ * rt3290 or not.
+ */
+ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
+ rt2x00dev->chip.rt = chip;
+
retval = rt2x00lib_probe_dev(rt2x00dev);
if (retval)
goto exit_free_reg;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 2fd830103415..f7e74a0a7759 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -774,9 +774,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void *data,
- bool (*fn)(struct queue_entry *entry,
- void *data))
+ bool (*fn)(struct queue_entry *entry))
{
unsigned long irqflags;
unsigned int index_start;
@@ -807,17 +805,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
*/
if (index_start < index_end) {
for (i = index_start; i < index_end; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
} else {
for (i = index_start; i < queue->limit; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
for (i = 0; i < index_end; i++) {
- if (fn(&queue->entries[i], data))
+ if (fn(&queue->entries[i]))
return true;
}
}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 5f1392c72673..9b8c10a86dee 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -584,7 +584,6 @@ struct data_queue_desc {
* @queue: Pointer to @data_queue
* @start: &enum queue_index Pointer to start index
* @end: &enum queue_index Pointer to end index
- * @data: Data to pass to the callback function
* @fn: The function to call for each &struct queue_entry
*
* This will walk through all entries in the queue, in chronological
@@ -597,9 +596,7 @@ struct data_queue_desc {
bool rt2x00queue_for_each_entry(struct data_queue *queue,
enum queue_index start,
enum queue_index end,
- void *data,
- bool (*fn)(struct queue_entry *entry,
- void *data));
+ bool (*fn)(struct queue_entry *entry));
/**
* rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index d357d1ed92f6..40ea80725a96 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
}
-static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
}
-static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,18 +427,12 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
case QID_AC_BE:
case QID_AC_BK:
if (!rt2x00queue_empty(queue))
- rt2x00queue_for_each_entry(queue,
- Q_INDEX_DONE,
- Q_INDEX,
- NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_kick_tx_entry);
break;
case QID_RX:
if (!rt2x00queue_full(queue))
- rt2x00queue_for_each_entry(queue,
- Q_INDEX_DONE,
- Q_INDEX,
- NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE,
rt2x00usb_kick_rx_entry);
break;
default:
@@ -447,7 +441,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
}
EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
-static bool rt2x00usb_flush_entry(struct queue_entry *entry, void* data)
+static bool rt2x00usb_flush_entry(struct queue_entry *entry)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -474,7 +468,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
unsigned int i;
if (drop)
- rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
+ rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
rt2x00usb_flush_entry);
/*
@@ -565,7 +559,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
entry->flags = 0;
if (entry->queue->qid == QID_RX)
- rt2x00usb_kick_rx_entry(entry, NULL);
+ rt2x00usb_kick_rx_entry(entry);
}
EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ee22bd74579d..f32259686b45 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2415,7 +2415,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 77ccbbc7da41..ba6e434b859d 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1770,7 +1770,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
- random_ether_addr(mac);
+ eth_random_addr(mac);
EEPROM(rt2x00dev, "MAC: %pM\n", mac);
}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2bebcb71a1e9..aceaf689f737 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -47,6 +47,8 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
{ PCI_DEVICE(0x1799, 0x6001) },
{ PCI_DEVICE(0x1799, 0x6020) },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x3300) },
+ { PCI_DEVICE(0x1186, 0x3301) },
+ { PCI_DEVICE(0x1432, 0x7106) },
{ }
};
@@ -1076,7 +1078,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
" randomly generated MAC addr\n", pci_name(pdev));
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
}
SET_IEEE80211_PERM_ADDR(dev, mac_addr);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4fb1ca1b86b9..71a30b026089 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1486,7 +1486,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
if (!is_valid_ether_addr(mac_addr)) {
printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
"generated MAC address\n");
- random_ether_addr(mac_addr);
+ eth_random_addr(mac_addr);
}
SET_IEEE80211_PERM_ADDR(dev, mac_addr);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index f4c852c6749b..942e56b77b60 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -167,7 +167,7 @@ static const u8 tid_to_ac[] = {
0, /* IEEE80211_AC_VO */
};
-u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid)
+u8 rtl_tid_to_ac(u8 tid)
{
return tid_to_ac[tid];
}
@@ -907,7 +907,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
struct rtl_priv *rtlpriv = rtl_priv(hw);
__le16 fc = hdr->frame_control;
- u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
+ u8 *act = (u8 *)skb->data + MAC80211_3ADDR_LEN;
u8 category;
if (!ieee80211_is_action(fc))
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 5a23a6d0f49d..f35af0fdaaf0 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -138,7 +138,7 @@ int rtl_send_smps_action(struct ieee80211_hw *hw,
enum ieee80211_smps_mode smps);
u8 *rtl_find_ie(u8 *data, unsigned int len, u8 ie);
void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
-u8 rtl_tid_to_ac(struct ieee80211_hw *hw, u8 tid);
+u8 rtl_tid_to_ac(u8 tid);
extern struct attribute_group rtl_attribute_group;
int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool isht, u8 desc_rate, bool first_ampdu);
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 3d8cc4a0c86d..5b4b4d4eaf9e 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -128,7 +128,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
"EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
ul_entry_idx, ul_key_id, ul_enc_alg,
ul_default_key, mac_addr);
@@ -146,7 +146,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
}
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
- (u8 *) key_content, us_config);
+ key_content, us_config);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "<===\n");
@@ -342,7 +342,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
memset(rtlpriv->sec.hwsec_cam_sta_addr[i], 0, ETH_ALEN);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- pr_info("&&&&&&&&&del entry %d\n", i);
+ RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
+ "del CAM entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 278e9f957e0d..a18ad2a98938 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -680,7 +680,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->short_preamble = bss_conf->use_short_preamble;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
- (u8 *) (&mac->short_preamble));
+ &mac->short_preamble);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
@@ -693,7 +693,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
mac->slot_time = RTL_SLOT_TIME_20;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *) (&mac->slot_time));
+ &mac->slot_time);
}
if (changed & BSS_CHANGED_HT) {
@@ -713,7 +713,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
- (u8 *) (&mac->max_mss_density));
+ &mac->max_mss_density);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
&mac->current_ampdu_factor);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
@@ -801,7 +801,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_CONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *) (&mstatus));
+ &mstatus);
ppsc->report_linked = true;
}
} else {
@@ -809,7 +809,7 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
u8 mstatus = RT_MEDIA_DISCONNECT;
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_JOINBSSRPT,
- (u8 *)(&mstatus));
+ &mstatus);
ppsc->report_linked = false;
}
}
@@ -836,7 +836,7 @@ static void rtl_op_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;
mac->tsf = tsf;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, &bibss);
}
static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
@@ -845,7 +845,7 @@ static void rtl_op_reset_tsf(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp = 0;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, &tmp);
}
static void rtl_op_sta_notify(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 1f143800a8d7..8e2f9afb125a 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -352,7 +352,7 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
(u8 *)&efuse_utilized);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
- (u8 *)&efuse_usage);
+ &efuse_usage);
done:
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++)
kfree(efuse_word[i]);
@@ -409,7 +409,7 @@ void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
else if (type == 2)
efuse_shadow_read_2byte(hw, offset, (u16 *) value);
else if (type == 4)
- efuse_shadow_read_4byte(hw, offset, (u32 *) value);
+ efuse_shadow_read_4byte(hw, offset, value);
}
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 2062ea1d7c80..80f75d3ba84a 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -480,7 +480,7 @@ static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
/* we juse use em for BE/BK/VI/VO */
for (tid = 7; tid >= 0; tid--) {
- u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(hw, tid)];
+ u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
while (!mac->act_scanning &&
rtlpriv->psc.rfpwr_state == ERFON) {
@@ -756,10 +756,10 @@ done:
if (index == rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
HW_DESC_RXERO,
- (u8 *)&tmp_one);
+ &tmp_one);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
index = (index + 1) % rtlpci->rxringcount;
}
@@ -934,7 +934,7 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
__skb_queue_tail(&ring->queue, pskb);
rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
- (u8 *)&temp_one);
+ &temp_one);
return;
}
@@ -1126,11 +1126,11 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
rxbuffersize);
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
- HW_DESC_RXERO, (u8 *)&tmp_one);
+ HW_DESC_RXERO, &tmp_one);
}
return 0;
}
@@ -1263,7 +1263,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_desc((u8 *) entry,
false,
HW_DESC_RXOWN,
- (u8 *)&tmp_one);
+ &tmp_one);
}
rtlpci->rx_ring[rx_queue_idx].idx = 0;
}
@@ -1273,17 +1273,18 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
*after reset, release previous pending packet,
*and force the tx idx to the first one
*/
- spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
if (rtlpci->tx_ring[i].desc) {
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
while (skb_queue_len(&ring->queue)) {
- struct rtl_tx_desc *entry =
- &ring->desc[ring->idx];
- struct sk_buff *skb =
- __skb_dequeue(&ring->queue);
+ struct rtl_tx_desc *entry;
+ struct sk_buff *skb;
+ spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
+ flags);
+ entry = &ring->desc[ring->idx];
+ skb = __skb_dequeue(&ring->queue);
pci_unmap_single(rtlpci->pdev,
rtlpriv->cfg->ops->
get_desc((u8 *)
@@ -1291,15 +1292,15 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
+ spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
+ flags);
+ kfree_skb(skb);
}
ring->idx = 0;
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
-
return 0;
}
@@ -1422,7 +1423,7 @@ static int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
__skb_queue_tail(&ring->queue, skb);
rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
- HW_DESC_OWN, (u8 *)&temp_one);
+ HW_DESC_OWN, &temp_one);
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 5ae26647f340..13ad33e85577 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -333,10 +333,10 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rpwm_val = 0x0C; /* RF on */
fw_pwrmode = FW_PS_ACTIVE_MODE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&fw_pwrmode));
+ &fw_pwrmode);
fw_current_inps = false;
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -356,11 +356,11 @@ static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
(u8 *) (&fw_current_inps));
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_PWRMODE,
- (u8 *) (&ppsc->fwctrl_psmode));
+ &ppsc->fwctrl_psmode);
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_SET_RPWM,
- (u8 *) (&rpwm_val));
+ &rpwm_val);
} else {
/* Reset the power save related parameters. */
ppsc->dot11_psmode = EACTIVE;
@@ -446,7 +446,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct ieee80211_hdr *hdr = (void *) data;
+ struct ieee80211_hdr *hdr = data;
struct ieee80211_tim_ie *tim_ie;
u8 *tim;
u8 tim_len;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index f7f48c7ac854..a45afda8259c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -656,9 +656,8 @@ static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 692c8ef5ee89..44febfde9493 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -168,7 +168,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes\n", size);
@@ -262,7 +262,7 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
return 1;
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 5c4d9bc040f1..bd0da7ef290b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -214,13 +214,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ &e_aci);
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -232,7 +232,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
@@ -257,7 +257,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -284,7 +284,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
else
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *(val);
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,17 +316,17 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
rtl92c_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *(val);
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -382,7 +382,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -396,13 +396,13 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val;
@@ -411,31 +411,30 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
udelay(1);
if (rpwm_val & BIT(7)) {
- rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- (*(u8 *) val));
+ rtl_write_byte(rtlpriv, REG_PCIE_HRPWM, *val);
} else {
rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
- ((*(u8 *) val) | BIT(7)));
+ *val | BIT(7));
}
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version))) {
rtl92c_dm_rf_saving(hw, true);
}
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, *val);
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -472,7 +471,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, *val);
break;
}
@@ -486,7 +485,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92ce_stop_tx_beacon(hw);
@@ -1589,10 +1588,10 @@ static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->autoload_failflag,
hwinfo);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = *&hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
"EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
@@ -1939,7 +1938,7 @@ void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 3af874e69595..52166640f167 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -605,7 +605,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool defaultadapter = true;
struct ieee80211_sta *sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 fw_qsel = _rtl92ce_map_hwqueue_to_fwqueue(skb, hw_queue);
@@ -806,7 +806,7 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 0c74d4f2eeb4..4bbb711a36c5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -381,11 +381,11 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, " VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did);
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->eeprom_version =
le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
rtlefuse->eeprom_oemid);
if (rtlhal->oem_id == RT_CID_DEFAULT) {
@@ -1660,7 +1660,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ &e_aci);
} else {
u8 sifstime = 0;
u8 u1bAIFS;
@@ -1685,7 +1685,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool)*val;
reg_tmp = 0;
if (short_preamble)
reg_tmp |= 0x80;
@@ -1696,7 +1696,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case NO_ENCRYPTION:
@@ -1729,7 +1729,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
density_to_set &= 0x1f;
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
@@ -1747,7 +1747,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 index = 0;
p_regtoset = regtoset_normal;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -1774,7 +1774,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
u32 u4b_ac_param;
u16 cw_min = le16_to_cpu(mac->ac[e_aci].cw_min);
u16 cw_max = le16_to_cpu(mac->ac[e_aci].cw_max);
@@ -1814,11 +1814,11 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
if (rtlusb->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_ACM_CTRL, (u8 *)(&e_aci));
+ HW_VAR_ACM_CTRL, &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)
(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -1874,7 +1874,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -1891,39 +1891,38 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:{
u8 rpwm_val = rtl_read_byte(rtlpriv, REG_USB_HRPWM);
if (rpwm_val & BIT(7))
- rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- (*(u8 *)val));
+ rtl_write_byte(rtlpriv, REG_USB_HRPWM, *val);
else
rtl_write_byte(rtlpriv, REG_USB_HRPWM,
- ((*(u8 *)val) | BIT(7)));
+ *val | BIT(7));
break;
}
case HW_VAR_H2C_FW_PWRMODE:{
- u8 psmode = (*(u8 *) val);
+ u8 psmode = *val;
if ((psmode != FW_PS_ACTIVE_MODE) &&
(!IS_92C_SERIAL(rtlhal->version)))
rtl92c_dm_rf_saving(hw, true);
- rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_pwrmode_cmd(hw, (*val));
break;
}
case HW_VAR_FW_PSMODE_STATUS:
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT:{
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = *val;
u8 tmp_reg422;
bool recover = false;
@@ -1948,7 +1947,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
tmp_reg422 | BIT(6));
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
}
- rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92c_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID:{
@@ -1961,7 +1960,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF:{
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92cu_stop_tx_beacon(hw);
@@ -2184,7 +2183,7 @@ void rtl92cu_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index d228358e6a40..9970c2b1b199 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -301,9 +301,11 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
+ {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
@@ -346,6 +348,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
{RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
{RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
{RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index 21bc827c5fa6..2e6eb356a93e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -668,7 +668,7 @@ void rtl92cu_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
SET_TX_DESC_OWN(pdesc, 1);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index a7d63a84551a..c0201ed69dd7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -696,7 +696,7 @@ static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *) (&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
index f548a8d0068d..895ae6c1f354 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c
@@ -120,7 +120,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferPtr = (u8 *) buffer;
+ u8 *bufferPtr = buffer;
u32 pagenums, remainSize;
u32 page, offset;
@@ -256,8 +256,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
return 1;
fwsize = rtlhal->fwsize;
- pfwheader = (u8 *) rtlhal->pfirmware;
- pfwdata = (u8 *) rtlhal->pfirmware;
+ pfwheader = rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index b338d526c422..f4051f4f0390 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -235,12 +235,12 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *) (&e_aci));
+ (&e_aci));
break;
}
case HW_VAR_ACK_PREAMBLE: {
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
@@ -252,7 +252,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *) val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
sec_min_space = 0;
if (min_spacing_to_set < sec_min_space)
@@ -271,7 +271,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY: {
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
@@ -293,7 +293,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
regtoSet = 0x66626641;
else
regtoSet = 0xb972a841;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -316,15 +316,15 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACM_CTRL,
- (u8 *) (&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL: {
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn =
(union aci_aifsn *)(&(mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -376,7 +376,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlpci->receive_config = ((u32 *) (val))[0];
break;
case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -390,16 +390,16 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtlefuse->efuse_usedbytes = *((u16 *) val);
break;
case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
case HW_VAR_IO_CMD:
rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
break;
case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
break;
case HW_VAR_SET_RPWM:
- rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (u8 *) (val));
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
break;
case HW_VAR_H2C_FW_PWRMODE:
break;
@@ -407,7 +407,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
ppsc->fw_current_inpsmode = *((bool *) val);
break;
case HW_VAR_H2C_FW_JOINBSSRPT: {
- u8 mstatus = (*(u8 *) val);
+ u8 mstatus = (*val);
u8 tmp_regcr, tmp_reg422;
bool recover = false;
@@ -435,7 +435,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_CR + 1,
(tmp_regcr & ~(BIT(0))));
}
- rtl92d_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
case HW_VAR_AID: {
@@ -447,7 +447,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_CORRECT_TSF: {
- u8 btype_ibss = ((u8 *) (val))[0];
+ u8 btype_ibss = val[0];
if (btype_ibss)
_rtl92de_stop_tx_beacon(hw);
@@ -1794,7 +1794,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
"RTL819X Not boot from eeprom, check it !!\n");
return;
}
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
+ rtlefuse->eeprom_oemid = hwinfo[EEPROM_CUSTOMER_ID];
_rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
/* VID, DID SE 0xA-D */
@@ -2115,7 +2115,7 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
if (!mac->ht_enable)
sifs_timer = 0x0a0a;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 18380a7829f1..442031256bce 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3345,21 +3345,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
switch (rtlhal->macphymode) {
case DUALMAC_SINGLEPHY:
rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->version |= RF_TYPE_2T2R;
rtlhal->bandset = BAND_ON_BOTH;
rtlhal->current_bandtype = BAND_ON_2_4G;
break;
case SINGLEMAC_SINGLEPHY:
rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= CHIP_92D_SINGLEPHY;
+ rtlhal->version |= RF_TYPE_2T2R;
rtlhal->bandset = BAND_ON_BOTH;
rtlhal->current_bandtype = BAND_ON_2_4G;
break;
case DUALMAC_DUALPHY:
rtlphy->rf_type = RF_1T1R;
- rtlhal->version &= (~CHIP_92D_SINGLEPHY);
+ rtlhal->version &= RF_TYPE_1T1R;
/* Now we let MAC0 run on 5G band. */
if (rtlhal->interfaceindex == 0) {
rtlhal->bandset = BAND_ON_5G;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
index 1666ef7fd87b..f80690d82c11 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.c
@@ -560,7 +560,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
unsigned int buf_len = 0;
@@ -761,11 +761,11 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
- SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
+ SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16)skb->len);
SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping);
SET_TX_DESC_RATE_ID(pdesc, 7);
SET_TX_DESC_MACID(pdesc, 0);
- SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
+ SET_TX_DESC_PKT_SIZE(pdesc, (u16) (skb->len));
SET_TX_DESC_FIRST_SEG(pdesc, 1);
SET_TX_DESC_LAST_SEG(pdesc, 1);
SET_TX_DESC_OFFSET(pdesc, 0x20);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index 2e1158026fb7..465f58157101 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -146,7 +146,7 @@ static void _rtl92s_dm_check_edca_turbo(struct ieee80211_hw *hw)
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- (u8 *)(&tmp));
+ &tmp);
rtlpriv->dm.current_turbo_edca = false;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index b141c35bf926..4542e6952b97 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -145,13 +145,13 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_AC_PARAM,
- (u8 *)(&e_aci));
+ (&e_aci));
}
break;
}
case HW_VAR_ACK_PREAMBLE:{
u8 reg_tmp;
- u8 short_preamble = (bool) (*(u8 *) val);
+ u8 short_preamble = (bool) (*val);
reg_tmp = (mac->cur_40_prime_sc) << 5;
if (short_preamble)
reg_tmp |= 0x80;
@@ -163,7 +163,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 min_spacing_to_set;
u8 sec_min_space;
- min_spacing_to_set = *((u8 *)val);
+ min_spacing_to_set = *val;
if (min_spacing_to_set <= 7) {
if (rtlpriv->sec.pairwise_enc_algorithm ==
NO_ENCRYPTION)
@@ -194,7 +194,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SHORTGI_DENSITY:{
u8 density_to_set;
- density_to_set = *((u8 *) val);
+ density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
@@ -216,7 +216,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
15, 15, 15, 15, 0};
u8 index = 0;
- factor_toset = *((u8 *) val);
+ factor_toset = *val;
if (factor_toset <= 3) {
factor_toset = (1 << (factor_toset + 2));
if (factor_toset > 0xf)
@@ -248,17 +248,17 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_AC_PARAM:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
rtl92s_dm_init_edca_turbo(hw);
if (rtlpci->acm_method != eAcmWay2_SW)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_ACM_CTRL,
- (u8 *)(&e_aci));
+ &e_aci);
break;
}
case HW_VAR_ACM_CTRL:{
- u8 e_aci = *((u8 *) val);
+ u8 e_aci = *val;
union aci_aifsn *p_aci_aifsn = (union aci_aifsn *)(&(
mac->ac[0].aifs));
u8 acm = p_aci_aifsn->f.acm;
@@ -313,7 +313,7 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_RETRY_LIMIT:{
- u8 retry_limit = ((u8 *) (val))[0];
+ u8 retry_limit = val[0];
rtl_write_word(rtlpriv, RETRY_LIMIT,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
@@ -328,14 +328,14 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
case HW_VAR_EFUSE_USAGE: {
- rtlefuse->efuse_usedpercentage = *((u8 *) val);
+ rtlefuse->efuse_usedpercentage = *val;
break;
}
case HW_VAR_IO_CMD: {
break;
}
case HW_VAR_WPA_CONFIG: {
- rtl_write_byte(rtlpriv, REG_SECR, *((u8 *) val));
+ rtl_write_byte(rtlpriv, REG_SECR, *val);
break;
}
case HW_VAR_SET_RPWM:{
@@ -1813,8 +1813,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 2;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_HT20_DIFF +
- index]) & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_HT20_DIFF + index] & 0xff;
rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
@@ -1830,14 +1829,13 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
else
index = 1;
- tempval = (*(u8 *)&hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index])
- & 0xff;
+ tempval = hwinfo[EEPROM_TX_PWR_OFDM_DIFF + index] & 0xff;
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] =
(tempval & 0xF);
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
((tempval >> 4) & 0xF);
- tempval = (*(u8 *)&hwinfo[TX_PWR_SAFETY_CHK]);
+ tempval = hwinfo[TX_PWR_SAFETY_CHK];
rtlefuse->txpwr_safetyflag = (tempval & 0x01);
}
@@ -1876,7 +1874,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read RF-indication and Tx Power gain
* index diff of legacy to HT OFDM rate. */
- tempval = (*(u8 *)&hwinfo[EEPROM_RFIND_POWERDIFF]) & 0xff;
+ tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
rtlefuse->eeprom_txpowerdiff = tempval;
rtlefuse->legacy_httxpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
@@ -1887,7 +1885,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Get TSSI value for each path. */
usvalue = *(u16 *)&hwinfo[EEPROM_TSSI_A];
rtlefuse->eeprom_tssi[RF90_PATH_A] = (u8)((usvalue & 0xff00) >> 8);
- usvalue = *(u8 *)&hwinfo[EEPROM_TSSI_B];
+ usvalue = hwinfo[EEPROM_TSSI_B];
rtlefuse->eeprom_tssi[RF90_PATH_B] = (u8)(usvalue & 0xff);
RTPRINT(rtlpriv, FINIT, INIT_TxPower, "TSSI_A = 0x%x, TSSI_B = 0x%x\n",
@@ -1896,7 +1894,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
/* Read antenna tx power offset of B/C/D to A from EEPROM */
/* and read ThermalMeter from EEPROM */
- tempval = *(u8 *)&hwinfo[EEPROM_THERMALMETER];
+ tempval = hwinfo[EEPROM_THERMALMETER];
rtlefuse->eeprom_thermalmeter = tempval;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
@@ -1906,20 +1904,20 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->tssi_13dbm = rtlefuse->eeprom_thermalmeter * 100;
/* Read CrystalCap from EEPROM */
- tempval = (*(u8 *)&hwinfo[EEPROM_CRYSTALCAP]) >> 4;
+ tempval = hwinfo[EEPROM_CRYSTALCAP] >> 4;
rtlefuse->eeprom_crystalcap = tempval;
/* CrystalCap, BIT(12)~15 */
rtlefuse->crystalcap = rtlefuse->eeprom_crystalcap;
/* Read IC Version && Channel Plan */
/* Version ID, Channel plan */
- rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
+ rtlefuse->eeprom_channelplan = hwinfo[EEPROM_CHANNELPLAN];
rtlefuse->txpwr_fromeprom = true;
RTPRINT(rtlpriv, FINIT, INIT_TxPower,
"EEPROM ChannelPlan = 0x%4x\n", rtlefuse->eeprom_channelplan);
/* Read Customer ID or Board Type!!! */
- tempval = *(u8 *)&hwinfo[EEPROM_BOARDTYPE];
+ tempval = hwinfo[EEPROM_BOARDTYPE];
/* Change RF type definition */
if (tempval == 0)
rtlphy->rf_type = RF_2T2R;
@@ -1941,7 +1939,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
- rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMID];
+ rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x",
rtlefuse->eeprom_oemid);
@@ -2251,7 +2249,7 @@ void rtl92se_update_channel_access_setting(struct ieee80211_hw *hw)
u16 sifs_timer;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- (u8 *)&mac->slot_time);
+ &mac->slot_time);
sifs_timer = 0x0e0e;
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 8d7099bc472c..b917a2a3caf7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1247,6 +1247,9 @@ static void _rtl92s_phy_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
/* Read HT 40 OFDM TX power */
ofdmpowerLevel[0] = rtlefuse->txpwrlevel_ht40_2s[0][index];
ofdmpowerLevel[1] = rtlefuse->txpwrlevel_ht40_2s[1][index];
+ } else {
+ ofdmpowerLevel[0] = 0;
+ ofdmpowerLevel[1] = 0;
}
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 730bcc919529..ad4b4803482d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -29,7 +29,6 @@
#include "../wifi.h"
#include "../core.h"
-#include "../pci.h"
#include "../base.h"
#include "../pci.h"
#include "reg.h"
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 812b5858f14a..36d1cb3aef8a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -599,7 +599,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct ieee80211_sta *sta = info->control.sta;
- u8 *pdesc = (u8 *) pdesc_tx;
+ u8 *pdesc = pdesc_tx;
u16 seq_number;
__le16 fc = hdr->frame_control;
u8 reserved_macid = 0;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a6049d7d51b3..aa970fc18a21 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -131,15 +131,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
u8 request;
u16 wvalue;
u16 index;
- __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ __le32 *data;
+ unsigned long flags;
+ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
+ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+ rtlpriv->usb_data_index = 0;
+ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
+ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
- rtlpriv->usb_data_index = 0;
return le32_to_cpu(*data);
}
@@ -951,6 +955,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
GFP_KERNEL);
if (!rtlpriv->usb_data)
return -ENOMEM;
+
+ /* this spin lock must be initialized early */
+ spin_lock_init(&rtlpriv->locks.usb_lock);
+
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
SET_IEEE80211_DEV(hw, &intf->dev);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index bd816aef26dc..cdaa21f29710 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1555,6 +1555,7 @@ struct rtl_locks {
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
spinlock_t waitq_lock;
+ spinlock_t usb_lock;
/*Dual mac*/
spinlock_t cck_and_rw_pagea_lock;
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
index 1a72932e2213..be800119d0a3 100644
--- a/drivers/net/wireless/ti/Kconfig
+++ b/drivers/net/wireless/ti/Kconfig
@@ -8,6 +8,7 @@ menuconfig WL_TI
if WL_TI
source "drivers/net/wireless/ti/wl1251/Kconfig"
source "drivers/net/wireless/ti/wl12xx/Kconfig"
+source "drivers/net/wireless/ti/wl18xx/Kconfig"
# keep last for automatic dependencies
source "drivers/net/wireless/ti/wlcore/Kconfig"
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
index 0a565622d4a4..4d6823983c04 100644
--- a/drivers/net/wireless/ti/Makefile
+++ b/drivers/net/wireless/ti/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_WLCORE) += wlcore/
obj-$(CONFIG_WL12XX) += wl12xx/
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
obj-$(CONFIG_WL1251) += wl1251/
+obj-$(CONFIG_WL18XX) += wl18xx/
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d733a0..6822b845efc1 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -277,15 +277,6 @@ int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
join->rx_config_options = wl->rx_config;
join->rx_filter_options = wl->rx_filter;
- /*
- * FIXME: disable temporarily all filters because after commit
- * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
- * association. The filter logic needs to be implemented properly
- * and once that is done, this hack can be removed.
- */
- join->rx_config_options = 0;
- join->rx_filter_options = WL1251_DEFAULT_RX_FILTER;
-
join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d1afb8e3b2ef..3118c425bcf1 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -334,6 +334,12 @@ static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
if (ret < 0)
goto out;
+ /*
+ * Join command applies filters, and if we are not associated,
+ * BSSID filter must be disabled for association to work.
+ */
+ if (is_zero_ether_addr(wl->bssid))
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
dtim_period);
@@ -348,33 +354,6 @@ out:
return ret;
}
-static void wl1251_filter_work(struct work_struct *work)
-{
- struct wl1251 *wl =
- container_of(work, struct wl1251, filter_work);
- int ret;
-
- mutex_lock(&wl->mutex);
-
- if (wl->state == WL1251_STATE_OFF)
- goto out;
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_join(wl, wl->bss_type, wl->channel, wl->beacon_int,
- wl->dtim_period);
- if (ret < 0)
- goto out_sleep;
-
-out_sleep:
- wl1251_ps_elp_sleep(wl);
-
-out:
- mutex_unlock(&wl->mutex);
-}
-
static void wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
@@ -478,7 +457,6 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->tx_work);
- cancel_work_sync(&wl->filter_work);
cancel_delayed_work_sync(&wl->elp_work);
mutex_lock(&wl->mutex);
@@ -681,13 +659,15 @@ out:
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
- FIF_OTHER_BSS)
+ FIF_OTHER_BSS | \
+ FIF_PROBE_REQ)
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total,u64 multicast)
{
struct wl1251 *wl = hw->priv;
+ int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
@@ -698,7 +678,7 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
/* no filters which we support changed */
return;
- /* FIXME: wl->rx_config and wl->rx_filter are not protected */
+ mutex_lock(&wl->mutex);
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
@@ -721,15 +701,25 @@ static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
}
if (*total & FIF_CONTROL)
wl->rx_filter |= CFG_RX_CTL_EN;
- if (*total & FIF_OTHER_BSS)
- wl->rx_filter &= ~CFG_BSSID_FILTER_EN;
+ if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+ if (*total & FIF_PROBE_REQ)
+ wl->rx_filter |= CFG_RX_PREQ_EN;
- /*
- * FIXME: workqueues need to be properly cancelled on stop(), for
- * now let's just disable changing the filter settings. They will
- * be updated any on config().
- */
- /* schedule_work(&wl->filter_work); */
+ if (wl->state == WL1251_STATE_OFF)
+ goto out;
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* send filters to firmware */
+ wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
+
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
}
/* HW encryption */
@@ -1390,7 +1380,6 @@ struct ieee80211_hw *wl1251_alloc_hw(void)
skb_queue_head_init(&wl->tx_queue);
- INIT_WORK(&wl->filter_work, wl1251_filter_work);
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
wl->scanning = false;
diff --git a/drivers/net/wireless/ti/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f5816c6f9..fd02060038de 100644
--- a/drivers/net/wireless/ti/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
@@ -315,7 +315,6 @@ struct wl1251 {
bool tx_queue_stopped;
struct work_struct tx_work;
- struct work_struct filter_work;
/* Pending TX frames */
struct sk_buff *tx_frames[16];
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
index 87f64b14db35..da509aa7d009 100644
--- a/drivers/net/wireless/ti/wl12xx/Makefile
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -1,3 +1,3 @@
-wl12xx-objs = main.o cmd.o acx.o
+wl12xx-objs = main.o cmd.o acx.o debugfs.o
obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
index d1f5aba0afce..2a26868b837d 100644
--- a/drivers/net/wireless/ti/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -24,6 +24,21 @@
#define __WL12XX_ACX_H__
#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+#define WL12XX_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_INIT_COMPLETE | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_CMD_COMPLETE | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA)
+
+#define WL12XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA)
struct wl1271_acx_host_config_bitmap {
struct acx_header header;
@@ -31,6 +46,228 @@ struct wl1271_acx_host_config_bitmap {
__le32 host_cfg_bitmap;
} __packed;
+struct wl12xx_acx_tx_statistics {
+ __le32 internal_desc_overflow;
+} __packed;
+
+struct wl12xx_acx_rx_statistics {
+ __le32 out_of_mem;
+ __le32 hdr_overflow;
+ __le32 hw_stuck;
+ __le32 dropped;
+ __le32 fcs_err;
+ __le32 xfr_hint_trig;
+ __le32 path_reset;
+ __le32 reset_counter;
+} __packed;
+
+struct wl12xx_acx_dma_statistics {
+ __le32 rx_requested;
+ __le32 rx_errors;
+ __le32 tx_requested;
+ __le32 tx_errors;
+} __packed;
+
+struct wl12xx_acx_isr_statistics {
+ /* host command complete */
+ __le32 cmd_cmplt;
+
+ /* fiqisr() */
+ __le32 fiqs;
+
+ /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
+ __le32 rx_headers;
+
+ /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
+ __le32 rx_completes;
+
+ /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
+ __le32 rx_mem_overflow;
+
+ /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
+ __le32 rx_rdys;
+
+ /* irqisr() */
+ __le32 irqs;
+
+ /* (INT_STS_ND & INT_TRIG_TX_PROC) */
+ __le32 tx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
+ __le32 decrypt_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA0) */
+ __le32 dma0_done;
+
+ /* (INT_STS_ND & INT_TRIG_DMA1) */
+ __le32 dma1_done;
+
+ /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
+ __le32 tx_exch_complete;
+
+ /* (INT_STS_ND & INT_TRIG_COMMAND) */
+ __le32 commands;
+
+ /* (INT_STS_ND & INT_TRIG_RX_PROC) */
+ __le32 rx_procs;
+
+ /* (INT_STS_ND & INT_TRIG_PM_802) */
+ __le32 hw_pm_mode_changes;
+
+ /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
+ __le32 host_acknowledges;
+
+ /* (INT_STS_ND & INT_TRIG_PM_PCI) */
+ __le32 pci_pm;
+
+ /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
+ __le32 wakeups;
+
+ /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
+ __le32 low_rssi;
+} __packed;
+
+struct wl12xx_acx_wep_statistics {
+ /* WEP address keys configured */
+ __le32 addr_key_count;
+
+ /* default keys configured */
+ __le32 default_key_count;
+
+ __le32 reserved;
+
+ /* number of times that WEP key not found on lookup */
+ __le32 key_not_found;
+
+ /* number of times that WEP key decryption failed */
+ __le32 decrypt_fail;
+
+ /* WEP packets decrypted */
+ __le32 packets;
+
+ /* WEP decrypt interrupts */
+ __le32 interrupt;
+} __packed;
+
+#define ACX_MISSED_BEACONS_SPREAD 10
+
+struct wl12xx_acx_pwr_statistics {
+ /* the amount of enters into power save mode (both PD & ELP) */
+ __le32 ps_enter;
+
+ /* the amount of enters into ELP mode */
+ __le32 elp_enter;
+
+ /* the amount of missing beacon interrupts to the host */
+ __le32 missing_bcns;
+
+ /* the amount of wake on host-access times */
+ __le32 wake_on_host;
+
+ /* the amount of wake on timer-expire */
+ __le32 wake_on_timer_exp;
+
+ /* the number of packets that were transmitted with PS bit set */
+ __le32 tx_with_ps;
+
+ /* the number of packets that were transmitted with PS bit clear */
+ __le32 tx_without_ps;
+
+ /* the number of received beacons */
+ __le32 rcvd_beacons;
+
+ /* the number of entering into PowerOn (power save off) */
+ __le32 power_save_off;
+
+ /* the number of entries into power save mode */
+ __le16 enable_ps;
+
+ /*
+ * the number of exits from power save, not including failed PS
+ * transitions
+ */
+ __le16 disable_ps;
+
+ /*
+ * the number of times the TSF counter was adjusted because
+ * of drift
+ */
+ __le32 fix_tsf_ps;
+
+ /* Gives statistics about the spread continuous missed beacons.
+ * The 16 LSB are dedicated for the PS mode.
+ * The 16 MSB are dedicated for the PS mode.
+ * cont_miss_bcns_spread[0] - single missed beacon.
+ * cont_miss_bcns_spread[1] - two continuous missed beacons.
+ * cont_miss_bcns_spread[2] - three continuous missed beacons.
+ * ...
+ * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
+ */
+ __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
+
+ /* the number of beacons in awake mode */
+ __le32 rcvd_awake_beacons;
+} __packed;
+
+struct wl12xx_acx_mic_statistics {
+ __le32 rx_pkts;
+ __le32 calc_failure;
+} __packed;
+
+struct wl12xx_acx_aes_statistics {
+ __le32 encrypt_fail;
+ __le32 decrypt_fail;
+ __le32 encrypt_packets;
+ __le32 decrypt_packets;
+ __le32 encrypt_interrupt;
+ __le32 decrypt_interrupt;
+} __packed;
+
+struct wl12xx_acx_event_statistics {
+ __le32 heart_beat;
+ __le32 calibration;
+ __le32 rx_mismatch;
+ __le32 rx_mem_empty;
+ __le32 rx_pool;
+ __le32 oom_late;
+ __le32 phy_transmit_error;
+ __le32 tx_stuck;
+} __packed;
+
+struct wl12xx_acx_ps_statistics {
+ __le32 pspoll_timeouts;
+ __le32 upsd_timeouts;
+ __le32 upsd_max_sptime;
+ __le32 upsd_max_apturn;
+ __le32 pspoll_max_apturn;
+ __le32 pspoll_utilization;
+ __le32 upsd_utilization;
+} __packed;
+
+struct wl12xx_acx_rxpipe_statistics {
+ __le32 rx_prep_beacon_drop;
+ __le32 descr_host_int_trig_rx_data;
+ __le32 beacon_buffer_thres_host_int_trig_rx_data;
+ __le32 missed_beacon_host_int_trig_rx_data;
+ __le32 tx_xfr_host_int_trig_rx_data;
+} __packed;
+
+struct wl12xx_acx_statistics {
+ struct acx_header header;
+
+ struct wl12xx_acx_tx_statistics tx;
+ struct wl12xx_acx_rx_statistics rx;
+ struct wl12xx_acx_dma_statistics dma;
+ struct wl12xx_acx_isr_statistics isr;
+ struct wl12xx_acx_wep_statistics wep;
+ struct wl12xx_acx_pwr_statistics pwr;
+ struct wl12xx_acx_aes_statistics aes;
+ struct wl12xx_acx_mic_statistics mic;
+ struct wl12xx_acx_event_statistics event;
+ struct wl12xx_acx_ps_statistics ps;
+ struct wl12xx_acx_rxpipe_statistics rxpipe;
+} __packed;
+
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
index 8ffaeb5f2147..622206241e83 100644
--- a/drivers/net/wireless/ti/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -65,6 +65,7 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
struct wl1271_general_parms_cmd *gen_parms;
struct wl1271_ini_general_params *gp =
&((struct wl1271_nvs_file *)wl->nvs)->general_params;
+ struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
@@ -84,11 +85,15 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
- if (gp->tx_bip_fem_auto_detect)
+ /* If we started in PLT FEM_DETECT mode, force auto detect */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ gen_parms->general_params.tx_bip_fem_auto_detect = true;
+
+ if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Override the REF CLK from the NVS with the one from platform data */
- gen_parms->general_params.ref_clock = wl->ref_clock;
+ gen_parms->general_params.ref_clock = priv->ref_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
@@ -105,8 +110,17 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
goto out;
}
+ /* If we are in calibrator based fem auto detect - save fem nr */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ wl->fem_manuf = gp->tx_bip_fem_manufacturer;
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
- answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ answer == false ?
+ "manual" :
+ wl->plt_mode == PLT_FEM_DETECT ?
+ "calibrator_fem_detect" :
+ "auto",
+ gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
@@ -118,6 +132,7 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
struct wl128x_general_parms_cmd *gen_parms;
struct wl128x_ini_general_params *gp =
&((struct wl128x_nvs_file *)wl->nvs)->general_params;
+ struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
@@ -137,12 +152,16 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
- if (gp->tx_bip_fem_auto_detect)
+ /* If we started in PLT FEM_DETECT mode, force auto detect */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ gen_parms->general_params.tx_bip_fem_auto_detect = true;
+
+ if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Replace REF and TCXO CLKs with the ones from platform data */
- gen_parms->general_params.ref_clock = wl->ref_clock;
- gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
+ gen_parms->general_params.ref_clock = priv->ref_clock;
+ gen_parms->general_params.tcxo_ref_clock = priv->tcxo_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
@@ -159,8 +178,17 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
goto out;
}
+ /* If we are in calibrator based fem auto detect - save fem nr */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ wl->fem_manuf = gp->tx_bip_fem_manufacturer;
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
- answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
+ answer == false ?
+ "manual" :
+ wl->plt_mode == PLT_FEM_DETECT ?
+ "calibrator_fem_detect" :
+ "auto",
+ gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
@@ -172,7 +200,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
struct wl1271_radio_parms_cmd *radio_parms;
struct wl1271_ini_general_params *gp = &nvs->general_params;
- int ret;
+ int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
@@ -183,11 +211,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+ fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
+
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl1271_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
- &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_2));
/* 5GHz parameters */
@@ -195,7 +225,7 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
&nvs->stat_radio_params_5,
sizeof(struct wl1271_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
- &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_5));
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
@@ -214,7 +244,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
struct wl128x_radio_parms_cmd *radio_parms;
struct wl128x_ini_general_params *gp = &nvs->general_params;
- int ret;
+ int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
@@ -225,11 +255,13 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
+ fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
+
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl128x_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
- &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_2));
/* 5GHz parameters */
@@ -237,7 +269,7 @@ int wl128x_cmd_radio_parms(struct wl1271 *wl)
&nvs->stat_radio_params_5,
sizeof(struct wl128x_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
- &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
+ &nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_5));
radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.c b/drivers/net/wireless/ti/wl12xx/debugfs.c
new file mode 100644
index 000000000000..0521cbf858cf
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.c
@@ -0,0 +1,243 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/debugfs.h"
+#include "../wlcore/wlcore.h"
+
+#include "wl12xx.h"
+#include "acx.h"
+#include "debugfs.h"
+
+#define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
+ DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics)
+
+WL12XX_DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
+/* skipping wep.reserved */
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
+/* skipping cont_miss_bcns_spread for now */
+WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
+
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
+ "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
+WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+
+int wl12xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *stats, *moddir;
+
+ moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
+ if (!moddir || IS_ERR(moddir)) {
+ entry = moddir;
+ goto err;
+ }
+
+ stats = debugfs_create_dir("fw_stats", moddir);
+ if (!stats || IS_ERR(stats)) {
+ entry = stats;
+ goto err;
+ }
+
+ DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
+
+ DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
+ DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
+ DEBUGFS_FWSTATS_ADD(rx, dropped);
+ DEBUGFS_FWSTATS_ADD(rx, fcs_err);
+ DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
+ DEBUGFS_FWSTATS_ADD(rx, path_reset);
+ DEBUGFS_FWSTATS_ADD(rx, reset_counter);
+
+ DEBUGFS_FWSTATS_ADD(dma, rx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, rx_errors);
+ DEBUGFS_FWSTATS_ADD(dma, tx_requested);
+ DEBUGFS_FWSTATS_ADD(dma, tx_errors);
+
+ DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
+ DEBUGFS_FWSTATS_ADD(isr, fiqs);
+ DEBUGFS_FWSTATS_ADD(isr, rx_headers);
+ DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
+ DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+ DEBUGFS_FWSTATS_ADD(isr, tx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma0_done);
+ DEBUGFS_FWSTATS_ADD(isr, dma1_done);
+ DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
+ DEBUGFS_FWSTATS_ADD(isr, commands);
+ DEBUGFS_FWSTATS_ADD(isr, rx_procs);
+ DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
+ DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
+ DEBUGFS_FWSTATS_ADD(isr, pci_pm);
+ DEBUGFS_FWSTATS_ADD(isr, wakeups);
+ DEBUGFS_FWSTATS_ADD(isr, low_rssi);
+
+ DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
+ DEBUGFS_FWSTATS_ADD(wep, default_key_count);
+ /* skipping wep.reserved */
+ DEBUGFS_FWSTATS_ADD(wep, key_not_found);
+ DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(wep, packets);
+ DEBUGFS_FWSTATS_ADD(wep, interrupt);
+
+ DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
+ DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
+ DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
+ DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
+ DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
+ /* skipping cont_miss_bcns_spread for now */
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
+
+ DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
+ DEBUGFS_FWSTATS_ADD(mic, calc_failure);
+
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
+ DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
+ DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
+
+ DEBUGFS_FWSTATS_ADD(event, heart_beat);
+ DEBUGFS_FWSTATS_ADD(event, calibration);
+ DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
+ DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
+ DEBUGFS_FWSTATS_ADD(event, rx_pool);
+ DEBUGFS_FWSTATS_ADD(event, oom_late);
+ DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
+ DEBUGFS_FWSTATS_ADD(event, tx_stuck);
+
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
+ DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
+ DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
+ DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+
+ return 0;
+
+err:
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ ret = -ENOMEM;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl12xx/debugfs.h b/drivers/net/wireless/ti/wl12xx/debugfs.h
new file mode 100644
index 000000000000..96898e291b78
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/debugfs.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL12XX_DEBUGFS_H__
+#define __WL12XX_DEBUGFS_H__
+
+int wl12xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir);
+
+#endif /* __WL12XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index d7dd3def07b5..f429fc110cb0 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -39,6 +39,10 @@
#include "reg.h"
#include "cmd.h"
#include "acx.h"
+#include "debugfs.h"
+
+static char *fref_param;
+static char *tcxo_param;
static struct wlcore_conf wl12xx_conf = {
.sg = {
@@ -212,7 +216,7 @@ static struct wlcore_conf wl12xx_conf = {
.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
.suspend_listen_interval = 3,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
- .bcn_filt_ie_count = 2,
+ .bcn_filt_ie_count = 3,
.bcn_filt_ie = {
[0] = {
.ie = WLAN_EID_CHANNEL_SWITCH,
@@ -222,9 +226,13 @@ static struct wlcore_conf wl12xx_conf = {
.ie = WLAN_EID_HT_OPERATION,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
+ [2] = {
+ .ie = WLAN_EID_ERP_INFO,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
},
- .synch_fail_thold = 10,
- .bss_lose_timeout = 100,
+ .synch_fail_thold = 12,
+ .bss_lose_timeout = 400,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
@@ -234,10 +242,11 @@ static struct wlcore_conf wl12xx_conf = {
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
- .dynamic_ps_timeout = 40,
+ .dynamic_ps_timeout = 1500,
.forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
+ .sta_sleep_auth = WL1271_PSM_ILLEGAL,
},
.itrim = {
.enable = false,
@@ -245,7 +254,7 @@ static struct wlcore_conf wl12xx_conf = {
},
.pm_config = {
.host_clk_settling_time = 5000,
- .host_fast_wakeup_support = false
+ .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
},
.roam_trigger = {
.trigger_pacing = 1,
@@ -305,8 +314,8 @@ static struct wlcore_conf wl12xx_conf = {
.swallow_period = 5,
.n_divider_fref_set_1 = 0xff, /* default */
.n_divider_fref_set_2 = 12,
- .m_divider_fref_set_1 = 148,
- .m_divider_fref_set_2 = 0xffff, /* default */
+ .m_divider_fref_set_1 = 0xffff,
+ .m_divider_fref_set_2 = 148, /* default */
.coex_pll_stabilization_time = 0xffffffff, /* default */
.ldo_stabilization_time = 0xffff, /* default */
.fm_disturbed_band_margin = 0xff, /* default */
@@ -581,19 +590,21 @@ static const int wl12xx_rtable[REG_TABLE_LEN] = {
};
/* TODO: maybe move to a new header file? */
-#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
-#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
-#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
+#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-5-mr.bin"
+#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-5-sr.bin"
+#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-5-plt.bin"
-#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
-#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
-#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
+#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-5-mr.bin"
+#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-5-sr.bin"
+#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-5-plt.bin"
-static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
+static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
+ int ret;
+
if (wl->chip.id != CHIP_ID_1283_PG20) {
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
- struct wl1271_rx_mem_pool_addr rx_mem_addr;
+ struct wl127x_rx_mem_pool_addr rx_mem_addr;
/*
* Choose the block we want to read
@@ -607,9 +618,13 @@ static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
- wl1271_write(wl, WL1271_SLV_REG_DATA,
- &rx_mem_addr, sizeof(rx_mem_addr), false);
+ ret = wlcore_write(wl, WL1271_SLV_REG_DATA, &rx_mem_addr,
+ sizeof(rx_mem_addr), false);
+ if (ret < 0)
+ return ret;
}
+
+ return 0;
}
static int wl12xx_identify_chip(struct wl1271 *wl)
@@ -621,10 +636,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
wl->chip.id);
- /* clear the alignment quirk, since we don't support it */
- wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+ wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
@@ -633,16 +647,18 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
+ WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
+ WL127X_MINOR_VER);
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
- /* clear the alignment quirk, since we don't support it */
- wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
+ wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
@@ -652,6 +668,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
+ wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
+ WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
+ WL127X_MINOR_VER);
break;
case CHIP_ID_1283_PG20:
@@ -660,6 +679,15 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
wl->plt_fw_name = WL128X_PLT_FW_NAME;
wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
wl->mr_fw_name = WL128X_FW_NAME_MULTI;
+
+ /* wl128x requires TX blocksize alignment */
+ wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_DUAL_PROBE_TMPL |
+ WLCORE_QUIRK_TKIP_HEADER_SPACE;
+
+ wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER, WL128X_IFTYPE_VER,
+ WL128X_MAJOR_VER, WL128X_SUBTYPE_VER,
+ WL128X_MINOR_VER);
break;
case CHIP_ID_1283_PG10:
default:
@@ -672,64 +700,95 @@ out:
return ret;
}
-static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+static int __must_check wl12xx_top_reg_write(struct wl1271 *wl, int addr,
+ u16 val)
{
+ int ret;
+
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
- wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ if (ret < 0)
+ goto out;
/* write value to OCP_POR_WDATA */
- wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val);
+ ret = wlcore_write32(wl, WL12XX_OCP_DATA_WRITE, val);
+ if (ret < 0)
+ goto out;
/* write 1 to OCP_CMD */
- wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
+ ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
}
-static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr)
+static int __must_check wl12xx_top_reg_read(struct wl1271 *wl, int addr,
+ u16 *out)
{
u32 val;
int timeout = OCP_CMD_LOOP;
+ int ret;
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
- wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
+ if (ret < 0)
+ return ret;
/* write 2 to OCP_CMD */
- wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
+ ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
+ if (ret < 0)
+ return ret;
/* poll for data ready */
do {
- val = wl1271_read32(wl, WL12XX_OCP_DATA_READ);
+ ret = wlcore_read32(wl, WL12XX_OCP_DATA_READ, &val);
+ if (ret < 0)
+ return ret;
} while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
- return 0xffff;
+ return -ETIMEDOUT;
}
/* check data status and return if OK */
- if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
- return val & 0xffff;
- else {
+ if ((val & OCP_STATUS_MASK) != OCP_STATUS_OK) {
wl1271_warning("Top register access returned error.");
- return 0xffff;
+ return -EIO;
}
+
+ if (out)
+ *out = val & 0xffff;
+
+ return 0;
}
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
{
u16 spare_reg;
+ int ret;
/* Mask bits [2] & [8:4] in the sys_clk_cfg register */
- spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+ ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
+ if (ret < 0)
+ return ret;
+
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= (BIT(3) | BIT(5) | BIT(6));
- wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ if (ret < 0)
+ return ret;
/* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
- wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
- WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+ ret = wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
+ WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
+ if (ret < 0)
+ return ret;
/* Delay execution for 15msec, to let the HW settle */
mdelay(15);
@@ -740,8 +799,12 @@ static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
{
u16 tcxo_detection;
+ int ret;
+
+ ret = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG, &tcxo_detection);
+ if (ret < 0)
+ return false;
- tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
if (tcxo_detection & TCXO_DET_FAILED)
return false;
@@ -751,8 +814,12 @@ static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
static bool wl128x_is_fref_valid(struct wl1271 *wl)
{
u16 fref_detection;
+ int ret;
+
+ ret = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG, &fref_detection);
+ if (ret < 0)
+ return false;
- fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
if (fref_detection & FREF_CLK_DETECT_FAIL)
return false;
@@ -761,11 +828,21 @@ static bool wl128x_is_fref_valid(struct wl1271 *wl)
static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
{
- wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
- wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
- wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
+ int ret;
- return 0;
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG,
+ MCS_PLL_CONFIG_REG_VAL);
+
+out:
+ return ret;
}
static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
@@ -773,30 +850,40 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
u16 spare_reg;
u16 pll_config;
u8 input_freq;
+ struct wl12xx_priv *priv = wl->priv;
+ int ret;
/* Mask bits [3:1] in the sys_clk_cfg register */
- spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
+ ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
+ if (ret < 0)
+ return ret;
+
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= BIT(2);
- wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
+ if (ret < 0)
+ return ret;
/* Handle special cases of the TCXO clock */
- if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
- wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
+ if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
+ priv->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
return wl128x_manually_configure_mcs_pll(wl);
/* Set the input frequency according to the selected clock source */
input_freq = (clk & 1) + 1;
- pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG);
+ ret = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG, &pll_config);
+ if (ret < 0)
+ return ret;
+
if (pll_config == 0xFFFF)
return -EFAULT;
pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
pll_config |= MCS_PLL_ENABLE_HP;
- wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
+ ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
- return 0;
+ return ret;
}
/*
@@ -808,26 +895,31 @@ static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
*/
static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
{
+ struct wl12xx_priv *priv = wl->priv;
u16 sys_clk_cfg;
+ int ret;
/* For XTAL-only modes, FREF will be used after switching from TCXO */
- if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
- wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
+ if (priv->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
+ priv->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
}
/* Query the HW, to determine which clock source we should use */
- sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG);
+ ret = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG, &sys_clk_cfg);
+ if (ret < 0)
+ return ret;
+
if (sys_clk_cfg == 0xFFFF)
return -EINVAL;
if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
goto fref_clk;
/* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
- if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
- wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
+ if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
+ priv->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
@@ -836,14 +928,14 @@ static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
/* TCXO clock is selected */
if (!wl128x_is_tcxo_valid(wl))
return -EINVAL;
- *selected_clock = wl->tcxo_clock;
+ *selected_clock = priv->tcxo_clock;
goto config_mcs_pll;
fref_clk:
/* FREF clock is selected */
if (!wl128x_is_fref_valid(wl))
return -EINVAL;
- *selected_clock = wl->ref_clock;
+ *selected_clock = priv->ref_clock;
config_mcs_pll:
return wl128x_configure_mcs_pll(wl, *selected_clock);
@@ -851,69 +943,98 @@ config_mcs_pll:
static int wl127x_boot_clk(struct wl1271 *wl)
{
+ struct wl12xx_priv *priv = wl->priv;
u32 pause;
u32 clk;
+ int ret;
if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
- if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
- wl->ref_clock == CONF_REF_CLK_38_4_E ||
- wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
+ if (priv->ref_clock == CONF_REF_CLK_19_2_E ||
+ priv->ref_clock == CONF_REF_CLK_38_4_E ||
+ priv->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
- else if (wl->ref_clock == CONF_REF_CLK_26_E ||
- wl->ref_clock == CONF_REF_CLK_52_E)
+ else if (priv->ref_clock == CONF_REF_CLK_26_E ||
+ priv->ref_clock == CONF_REF_CLK_26_M_XTAL ||
+ priv->ref_clock == CONF_REF_CLK_52_E)
/* ref clk: 26/52 */
clk = 0x5;
else
return -EINVAL;
- if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
+ if (priv->ref_clock != CONF_REF_CLK_19_2_E) {
u16 val;
/* Set clock type (open drain) */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE, &val);
+ if (ret < 0)
+ goto out;
+
val &= FREF_CLK_TYPE_BITS;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
+ if (ret < 0)
+ goto out;
/* Set clock pull mode (no pull) */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL, &val);
+ if (ret < 0)
+ goto out;
+
val |= NO_PULL;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
+ if (ret < 0)
+ goto out;
} else {
u16 val;
/* Set clock polarity */
- val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY);
+ ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY, &val);
+ if (ret < 0)
+ goto out;
+
val &= FREF_CLK_POLARITY_BITS;
val |= CLK_REQ_OUTN_SEL;
- wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
+ if (ret < 0)
+ goto out;
}
- wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk);
+ ret = wlcore_write32(wl, WL12XX_PLL_PARAMETERS, clk);
+ if (ret < 0)
+ goto out;
- pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS);
+ ret = wlcore_read32(wl, WL12XX_PLL_PARAMETERS, &pause);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
pause &= ~(WU_COUNTER_PAUSE_VAL);
pause |= WU_COUNTER_PAUSE_VAL;
- wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
+ ret = wlcore_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
- return 0;
+out:
+ return ret;
}
static int wl1271_boot_soft_reset(struct wl1271 *wl)
{
unsigned long timeout;
u32 boot_data;
+ int ret = 0;
/* perform soft reset */
- wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+ ret = wlcore_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
+ if (ret < 0)
+ goto out;
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
- boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET);
+ ret = wlcore_read32(wl, WL12XX_SLV_SOFT_RESET, &boot_data);
+ if (ret < 0)
+ goto out;
+
wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
@@ -929,16 +1050,20 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
}
/* disable Rx/Tx */
- wl1271_write32(wl, WL12XX_ENABLE, 0x0);
+ ret = wlcore_write32(wl, WL12XX_ENABLE, 0x0);
+ if (ret < 0)
+ goto out;
/* disable auto calibration on start*/
- wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff);
+ ret = wlcore_write32(wl, WL12XX_SPARE_A2, 0xffff);
- return 0;
+out:
+ return ret;
}
static int wl12xx_pre_boot(struct wl1271 *wl)
{
+ struct wl12xx_priv *priv = wl->priv;
int ret = 0;
u32 clk;
int selected_clock = -1;
@@ -954,30 +1079,43 @@ static int wl12xx_pre_boot(struct wl1271 *wl)
}
/* Continue the ELP wake up sequence */
- wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
udelay(500);
- wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ if (ret < 0)
+ goto out;
/* Read-modify-write DRPW_SCRATCH_START register (see next state)
to be used by DRPw FW. The RTRIM value will be added by the FW
before taking DRPw out of reset */
- clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START);
+ ret = wlcore_read32(wl, WL12XX_DRPW_SCRATCH_START, &clk);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
if (wl->chip.id == CHIP_ID_1283_PG20)
clk |= ((selected_clock & 0x3) << 1) << 4;
else
- clk |= (wl->ref_clock << 1) << 4;
+ clk |= (priv->ref_clock << 1) << 4;
- wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
+ ret = wlcore_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
+ if (ret < 0)
+ goto out;
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ if (ret < 0)
+ goto out;
/* Disable interrupts */
- wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ if (ret < 0)
+ goto out;
ret = wl1271_boot_soft_reset(wl);
if (ret < 0)
@@ -987,47 +1125,72 @@ out:
return ret;
}
-static void wl12xx_pre_upload(struct wl1271 *wl)
+static int wl12xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
+ u16 polarity;
+ int ret;
/* write firmware's last address (ie. it's length) to
* ACX_EEPROMLESS_IND_REG */
wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
- wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
+ ret = wlcore_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
+ if (ret < 0)
+ goto out;
- tmp = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
+ if (ret < 0)
+ goto out;
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
/* 6. read the EEPROM parameters */
- tmp = wl1271_read32(wl, WL12XX_SCR_PAD2);
+ ret = wlcore_read32(wl, WL12XX_SCR_PAD2, &tmp);
+ if (ret < 0)
+ goto out;
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
- if (wl->chip.id == CHIP_ID_1283_PG20)
- wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
-}
-
-static void wl12xx_enable_interrupts(struct wl1271 *wl)
-{
- u32 polarity;
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
+ if (ret < 0)
+ goto out;
+ }
- polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY);
+ /* polarity must be set before the firmware is loaded */
+ ret = wl12xx_top_reg_read(wl, OCP_REG_POLARITY, &polarity);
+ if (ret < 0)
+ goto out;
/* We use HIGH polarity, so unset the LOW bit */
polarity &= ~POLARITY_LOW;
- wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
+ ret = wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
+
+out:
+ return ret;
+}
- wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR);
+static int wl12xx_enable_interrupts(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL12XX_ACX_ALL_EVENTS_VECTOR);
+ if (ret < 0)
+ goto out;
wlcore_enable_interrupts(wl);
- wlcore_write_reg(wl, REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
- wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
+out:
+ return ret;
}
static int wl12xx_boot(struct wl1271 *wl)
@@ -1042,7 +1205,9 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl12xx_pre_upload(wl);
+ ret = wl12xx_pre_upload(wl);
+ if (ret < 0)
+ goto out;
ret = wlcore_boot_upload_firmware(wl);
if (ret < 0)
@@ -1052,22 +1217,30 @@ static int wl12xx_boot(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl12xx_enable_interrupts(wl);
+ ret = wl12xx_enable_interrupts(wl);
out:
return ret;
}
-static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
+static int wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len)
{
- wl1271_write(wl, cmd_box_addr, buf, len, false);
- wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
+ int ret;
+
+ ret = wlcore_write(wl, cmd_box_addr, buf, len, false);
+ if (ret < 0)
+ return ret;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
+
+ return ret;
}
-static void wl12xx_ack_event(struct wl1271 *wl)
+static int wl12xx_ack_event(struct wl1271 *wl)
{
- wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK);
+ return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
+ WL12XX_INTR_TRIG_EVENT_ACK);
}
static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
@@ -1147,12 +1320,13 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
return data_len - sizeof(*desc) - desc->pad_len;
}
-static void wl12xx_tx_delayed_compl(struct wl1271 *wl)
+static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff))
- return;
+ if (wl->fw_status_1->tx_results_counter ==
+ (wl->tx_results_count & 0xff))
+ return 0;
- wl1271_tx_complete(wl);
+ return wlcore_tx_complete(wl);
}
static int wl12xx_hw_init(struct wl1271 *wl)
@@ -1165,6 +1339,14 @@ static int wl12xx_hw_init(struct wl1271 *wl)
ret = wl128x_cmd_general_parms(wl);
if (ret < 0)
goto out;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
ret = wl128x_cmd_radio_parms(wl);
if (ret < 0)
goto out;
@@ -1181,6 +1363,14 @@ static int wl12xx_hw_init(struct wl1271 *wl)
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
goto out;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
ret = wl1271_cmd_radio_parms(wl);
if (ret < 0)
goto out;
@@ -1253,45 +1443,151 @@ static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
return supported;
}
-static void wl12xx_get_fuse_mac(struct wl1271 *wl)
+static int wl12xx_get_fuse_mac(struct wl1271 *wl)
{
u32 mac1, mac2;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ if (ret < 0)
+ goto out;
- wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
+ ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1, &mac1);
+ if (ret < 0)
+ goto out;
- mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
- mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
+ ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2, &mac2);
+ if (ret < 0)
+ goto out;
/* these are the two parts of the BD_ADDR */
wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
((mac1 & 0xff000000) >> 24);
wl->fuse_nic_addr = mac1 & 0xffffff;
- wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+
+out:
+ return ret;
}
-static s8 wl12xx_get_pg_ver(struct wl1271 *wl)
+static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
- u32 die_info;
+ u16 die_info;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
- die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
+ ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
+ &die_info);
else
- die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
+ ret = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1,
+ &die_info);
- return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
+ if (ret >= 0 && ver)
+ *ver = (s8)((die_info & PG_VER_MASK) >> PG_VER_OFFSET);
+
+ return ret;
}
-static void wl12xx_get_mac(struct wl1271 *wl)
+static int wl12xx_get_mac(struct wl1271 *wl)
{
if (wl12xx_mac_in_fuse(wl))
- wl12xx_get_fuse_mac(wl);
+ return wl12xx_get_fuse_mac(wl);
+
+ return 0;
+}
+
+static void wl12xx_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ desc->wl12xx_reserved = 0;
+}
+
+static int wl12xx_plt_init(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl->ops->boot(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl->ops->hw_init(wl);
+ if (ret < 0)
+ goto out_irq_disable;
+
+ /*
+ * If we are in calibrator based auto detect then we got the FEM nr
+ * in wl->fem_manuf. No need to continue further
+ */
+ if (wl->plt_mode == PLT_FEM_DETECT)
+ goto out;
+
+ ret = wl1271_acx_init_mem_config(wl);
+ if (ret < 0)
+ goto out_irq_disable;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Enable data path */
+ ret = wl1271_cmd_data_path(wl, 1);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure for CAM power saving (ie. always active) */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ goto out;
+
+out_free_memmap:
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+out_irq_disable:
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ wlcore_disable_interrupts(wl);
+ mutex_lock(&wl->mutex);
+out:
+ return ret;
+}
+
+static int wl12xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ if (is_gem)
+ return WL12XX_TX_HW_BLOCK_GEM_SPARE;
+
+ return WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
+}
+
+static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
static struct wlcore_ops wl12xx_ops = {
.identify_chip = wl12xx_identify_chip,
.identify_fw = wl12xx_identify_fw,
.boot = wl12xx_boot,
+ .plt_init = wl12xx_plt_init,
.trigger_cmd = wl12xx_trigger_cmd,
.ack_event = wl12xx_ack_event,
.calc_tx_blocks = wl12xx_calc_tx_blocks,
@@ -1306,6 +1602,13 @@ static struct wlcore_ops wl12xx_ops = {
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
+ .set_tx_desc_csum = wl12xx_set_tx_desc_csum,
+ .set_rx_csum = NULL,
+ .ap_get_mimo_wide_rate_mask = NULL,
+ .debugfs_init = wl12xx_debugfs_add_files,
+ .get_spare_blocks = wl12xx_get_spare_blocks,
+ .set_key = wl12xx_set_key,
+ .pre_pkt_send = NULL,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
@@ -1323,6 +1626,7 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
static int __devinit wl12xx_probe(struct platform_device *pdev)
{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
struct wl1271 *wl;
struct ieee80211_hw *hw;
struct wl12xx_priv *priv;
@@ -1334,19 +1638,63 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
}
wl = hw->priv;
+ priv = wl->priv;
wl->ops = &wl12xx_ops;
wl->ptable = wl12xx_ptable;
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = 16;
- wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
- wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
+ wl->num_rx_desc = 8;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
wl->fw_status_priv_len = 0;
- memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap));
+ wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
+ if (!fref_param) {
+ priv->ref_clock = pdata->board_ref_clock;
+ } else {
+ if (!strcmp(fref_param, "19.2"))
+ priv->ref_clock = WL12XX_REFCLOCK_19;
+ else if (!strcmp(fref_param, "26"))
+ priv->ref_clock = WL12XX_REFCLOCK_26;
+ else if (!strcmp(fref_param, "26x"))
+ priv->ref_clock = WL12XX_REFCLOCK_26_XTAL;
+ else if (!strcmp(fref_param, "38.4"))
+ priv->ref_clock = WL12XX_REFCLOCK_38;
+ else if (!strcmp(fref_param, "38.4x"))
+ priv->ref_clock = WL12XX_REFCLOCK_38_XTAL;
+ else if (!strcmp(fref_param, "52"))
+ priv->ref_clock = WL12XX_REFCLOCK_52;
+ else
+ wl1271_error("Invalid fref parameter %s", fref_param);
+ }
+
+ if (!tcxo_param) {
+ priv->tcxo_clock = pdata->board_tcxo_clock;
+ } else {
+ if (!strcmp(tcxo_param, "19.2"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
+ else if (!strcmp(tcxo_param, "26"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_26;
+ else if (!strcmp(tcxo_param, "38.4"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
+ else if (!strcmp(tcxo_param, "52"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_52;
+ else if (!strcmp(tcxo_param, "16.368"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
+ else if (!strcmp(tcxo_param, "32.736"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
+ else if (!strcmp(tcxo_param, "16.8"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
+ else if (!strcmp(tcxo_param, "33.6"))
+ priv->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
+ else
+ wl1271_error("Invalid tcxo parameter %s", tcxo_param);
+ }
+
return wlcore_probe(wl, pdev);
}
@@ -1378,6 +1726,13 @@ static void __exit wl12xx_exit(void)
}
module_exit(wl12xx_exit);
+module_param_named(fref, fref_param, charp, 0);
+MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
+
+module_param_named(tcxo, tcxo_param, charp, 0);
+MODULE_PARM_DESC(tcxo,
+ "TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 74cd332e23ef..26990fb4edea 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -24,8 +24,30 @@
#include "conf.h"
+/* minimum FW required for driver for wl127x */
+#define WL127X_CHIP_VER 6
+#define WL127X_IFTYPE_VER 3
+#define WL127X_MAJOR_VER 10
+#define WL127X_SUBTYPE_VER 2
+#define WL127X_MINOR_VER 115
+
+/* minimum FW required for driver for wl128x */
+#define WL128X_CHIP_VER 7
+#define WL128X_IFTYPE_VER 3
+#define WL128X_MAJOR_VER 10
+#define WL128X_SUBTYPE_VER 2
+#define WL128X_MINOR_VER 115
+
+struct wl127x_rx_mem_pool_addr {
+ u32 addr;
+ u32 addr_extra;
+};
+
struct wl12xx_priv {
struct wl12xx_priv_conf conf;
+
+ int ref_clock;
+ int tcxo_clock;
};
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/Kconfig b/drivers/net/wireless/ti/wl18xx/Kconfig
new file mode 100644
index 000000000000..1cfdb2548821
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Kconfig
@@ -0,0 +1,7 @@
+config WL18XX
+ tristate "TI wl18xx support"
+ depends on MAC80211
+ select WLCORE
+ ---help---
+ This module adds support for wireless adapters based on TI
+ WiLink 8 chipsets.
diff --git a/drivers/net/wireless/ti/wl18xx/Makefile b/drivers/net/wireless/ti/wl18xx/Makefile
new file mode 100644
index 000000000000..67c098734c7f
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/Makefile
@@ -0,0 +1,3 @@
+wl18xx-objs = main.o acx.o tx.o io.o debugfs.o
+
+obj-$(CONFIG_WL18XX) += wl18xx.o
diff --git a/drivers/net/wireless/ti/wl18xx/acx.c b/drivers/net/wireless/ti/wl18xx/acx.c
new file mode 100644
index 000000000000..72840e23bf59
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.c
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/acx.h"
+
+#include "acx.h"
+
+int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
+ u32 sdio_blk_size, u32 extra_mem_blks,
+ u32 len_field_size)
+{
+ struct wl18xx_acx_host_config_bitmap *bitmap_conf;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx cfg bitmap %d blk %d spare %d field %d",
+ host_cfg_bitmap, sdio_blk_size, extra_mem_blks,
+ len_field_size);
+
+ bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
+ if (!bitmap_conf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
+ bitmap_conf->host_sdio_block_size = cpu_to_le32(sdio_blk_size);
+ bitmap_conf->extra_mem_blocks = cpu_to_le32(extra_mem_blks);
+ bitmap_conf->length_field_size = cpu_to_le32(len_field_size);
+
+ ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
+ bitmap_conf, sizeof(*bitmap_conf));
+ if (ret < 0) {
+ wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(bitmap_conf);
+
+ return ret;
+}
+
+int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
+{
+ struct wl18xx_acx_checksum_state *acx;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx checksum state");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
+
+ ret = wl1271_cmd_configure(wl, ACX_CHECKSUM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("failed to set Tx checksum state: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl18xx_acx_clear_statistics(struct wl1271 *wl)
+{
+ struct wl18xx_acx_clear_statistics *acx;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx clear statistics");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wl1271_cmd_configure(wl, ACX_CLEAR_STATISTICS, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("failed to clear firmware statistics: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/acx.h b/drivers/net/wireless/ti/wl18xx/acx.h
new file mode 100644
index 000000000000..e2609a6b7341
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/acx.h
@@ -0,0 +1,287 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_ACX_H__
+#define __WL18XX_ACX_H__
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/acx.h"
+
+enum {
+ ACX_CLEAR_STATISTICS = 0x0047,
+};
+
+/* numbers of bits the length field takes (add 1 for the actual number) */
+#define WL18XX_HOST_IF_LEN_SIZE_FIELD 15
+
+#define WL18XX_ACX_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_INIT_COMPLETE | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_CMD_COMPLETE | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
+
+#define WL18XX_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
+
+struct wl18xx_acx_host_config_bitmap {
+ struct acx_header header;
+
+ __le32 host_cfg_bitmap;
+
+ __le32 host_sdio_block_size;
+
+ /* extra mem blocks per frame in TX. */
+ __le32 extra_mem_blocks;
+
+ /*
+ * number of bits of the length field in the first TX word
+ * (up to 15 - for using the entire 16 bits).
+ */
+ __le32 length_field_size;
+
+} __packed;
+
+enum {
+ CHECKSUM_OFFLOAD_DISABLED = 0,
+ CHECKSUM_OFFLOAD_ENABLED = 1,
+ CHECKSUM_OFFLOAD_FAKE_RX = 2,
+ CHECKSUM_OFFLOAD_INVALID = 0xFF
+};
+
+struct wl18xx_acx_checksum_state {
+ struct acx_header header;
+
+ /* enum acx_checksum_state */
+ u8 checksum_state;
+ u8 pad[3];
+} __packed;
+
+
+struct wl18xx_acx_error_stats {
+ u32 error_frame;
+ u32 error_null_Frame_tx_start;
+ u32 error_numll_frame_cts_start;
+ u32 error_bar_retry;
+ u32 error_frame_cts_nul_flid;
+} __packed;
+
+struct wl18xx_acx_debug_stats {
+ u32 debug1;
+ u32 debug2;
+ u32 debug3;
+ u32 debug4;
+ u32 debug5;
+ u32 debug6;
+} __packed;
+
+struct wl18xx_acx_ring_stats {
+ u32 prepared_descs;
+ u32 tx_cmplt;
+} __packed;
+
+struct wl18xx_acx_tx_stats {
+ u32 tx_prepared_descs;
+ u32 tx_cmplt;
+ u32 tx_template_prepared;
+ u32 tx_data_prepared;
+ u32 tx_template_programmed;
+ u32 tx_data_programmed;
+ u32 tx_burst_programmed;
+ u32 tx_starts;
+ u32 tx_imm_resp;
+ u32 tx_start_templates;
+ u32 tx_start_int_templates;
+ u32 tx_start_fw_gen;
+ u32 tx_start_data;
+ u32 tx_start_null_frame;
+ u32 tx_exch;
+ u32 tx_retry_template;
+ u32 tx_retry_data;
+ u32 tx_exch_pending;
+ u32 tx_exch_expiry;
+ u32 tx_done_template;
+ u32 tx_done_data;
+ u32 tx_done_int_template;
+ u32 tx_frame_checksum;
+ u32 tx_checksum_result;
+ u32 frag_called;
+ u32 frag_mpdu_alloc_failed;
+ u32 frag_init_called;
+ u32 frag_in_process_called;
+ u32 frag_tkip_called;
+ u32 frag_key_not_found;
+ u32 frag_need_fragmentation;
+ u32 frag_bad_mblk_num;
+ u32 frag_failed;
+ u32 frag_cache_hit;
+ u32 frag_cache_miss;
+} __packed;
+
+struct wl18xx_acx_rx_stats {
+ u32 rx_beacon_early_term;
+ u32 rx_out_of_mpdu_nodes;
+ u32 rx_hdr_overflow;
+ u32 rx_dropped_frame;
+ u32 rx_done_stage;
+ u32 rx_done;
+ u32 rx_defrag;
+ u32 rx_defrag_end;
+ u32 rx_cmplt;
+ u32 rx_pre_complt;
+ u32 rx_cmplt_task;
+ u32 rx_phy_hdr;
+ u32 rx_timeout;
+ u32 rx_timeout_wa;
+ u32 rx_wa_density_dropped_frame;
+ u32 rx_wa_ba_not_expected;
+ u32 rx_frame_checksum;
+ u32 rx_checksum_result;
+ u32 defrag_called;
+ u32 defrag_init_called;
+ u32 defrag_in_process_called;
+ u32 defrag_tkip_called;
+ u32 defrag_need_defrag;
+ u32 defrag_decrypt_failed;
+ u32 decrypt_key_not_found;
+ u32 defrag_need_decrypt;
+ u32 rx_tkip_replays;
+} __packed;
+
+struct wl18xx_acx_isr_stats {
+ u32 irqs;
+} __packed;
+
+#define PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD 10
+
+struct wl18xx_acx_pwr_stats {
+ u32 missing_bcns_cnt;
+ u32 rcvd_bcns_cnt;
+ u32 connection_out_of_sync;
+ u32 cont_miss_bcns_spread[PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD];
+ u32 rcvd_awake_bcns_cnt;
+} __packed;
+
+struct wl18xx_acx_event_stats {
+ u32 calibration;
+ u32 rx_mismatch;
+ u32 rx_mem_empty;
+} __packed;
+
+struct wl18xx_acx_ps_poll_stats {
+ u32 ps_poll_timeouts;
+ u32 upsd_timeouts;
+ u32 upsd_max_ap_turn;
+ u32 ps_poll_max_ap_turn;
+ u32 ps_poll_utilization;
+ u32 upsd_utilization;
+} __packed;
+
+struct wl18xx_acx_rx_filter_stats {
+ u32 beacon_filter;
+ u32 arp_filter;
+ u32 mc_filter;
+ u32 dup_filter;
+ u32 data_filter;
+ u32 ibss_filter;
+ u32 protection_filter;
+ u32 accum_arp_pend_requests;
+ u32 max_arp_queue_dep;
+} __packed;
+
+struct wl18xx_acx_rx_rate_stats {
+ u32 rx_frames_per_rates[50];
+} __packed;
+
+#define AGGR_STATS_TX_AGG 16
+#define AGGR_STATS_TX_RATE 16
+#define AGGR_STATS_RX_SIZE_LEN 16
+
+struct wl18xx_acx_aggr_stats {
+ u32 tx_agg_vs_rate[AGGR_STATS_TX_AGG * AGGR_STATS_TX_RATE];
+ u32 rx_size[AGGR_STATS_RX_SIZE_LEN];
+} __packed;
+
+#define PIPE_STATS_HW_FIFO 11
+
+struct wl18xx_acx_pipeline_stats {
+ u32 hs_tx_stat_fifo_int;
+ u32 hs_rx_stat_fifo_int;
+ u32 tcp_tx_stat_fifo_int;
+ u32 tcp_rx_stat_fifo_int;
+ u32 enc_tx_stat_fifo_int;
+ u32 enc_rx_stat_fifo_int;
+ u32 rx_complete_stat_fifo_int;
+ u32 pre_proc_swi;
+ u32 post_proc_swi;
+ u32 sec_frag_swi;
+ u32 pre_to_defrag_swi;
+ u32 defrag_to_csum_swi;
+ u32 csum_to_rx_xfer_swi;
+ u32 dec_packet_in;
+ u32 dec_packet_in_fifo_full;
+ u32 dec_packet_out;
+ u32 cs_rx_packet_in;
+ u32 cs_rx_packet_out;
+ u16 pipeline_fifo_full[PIPE_STATS_HW_FIFO];
+} __packed;
+
+struct wl18xx_acx_mem_stats {
+ u32 rx_free_mem_blks;
+ u32 tx_free_mem_blks;
+ u32 fwlog_free_mem_blks;
+ u32 fw_gen_free_mem_blks;
+} __packed;
+
+struct wl18xx_acx_statistics {
+ struct acx_header header;
+
+ struct wl18xx_acx_error_stats error;
+ struct wl18xx_acx_debug_stats debug;
+ struct wl18xx_acx_tx_stats tx;
+ struct wl18xx_acx_rx_stats rx;
+ struct wl18xx_acx_isr_stats isr;
+ struct wl18xx_acx_pwr_stats pwr;
+ struct wl18xx_acx_ps_poll_stats ps_poll;
+ struct wl18xx_acx_rx_filter_stats rx_filter;
+ struct wl18xx_acx_rx_rate_stats rx_rate;
+ struct wl18xx_acx_aggr_stats aggr_size;
+ struct wl18xx_acx_pipeline_stats pipeline;
+ struct wl18xx_acx_mem_stats mem;
+} __packed;
+
+struct wl18xx_acx_clear_statistics {
+ struct acx_header header;
+};
+
+int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
+ u32 sdio_blk_size, u32 extra_mem_blks,
+ u32 len_field_size);
+int wl18xx_acx_set_checksum_state(struct wl1271 *wl);
+int wl18xx_acx_clear_statistics(struct wl1271 *wl);
+
+#endif /* __WL18XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/conf.h b/drivers/net/wireless/ti/wl18xx/conf.h
new file mode 100644
index 000000000000..4d426cc20274
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/conf.h
@@ -0,0 +1,111 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_CONF_H__
+#define __WL18XX_CONF_H__
+
+#define WL18XX_CONF_MAGIC 0x10e100ca
+#define WL18XX_CONF_VERSION (WLCORE_CONF_VERSION | 0x0003)
+#define WL18XX_CONF_MASK 0x0000ffff
+#define WL18XX_CONF_SIZE (WLCORE_CONF_SIZE + \
+ sizeof(struct wl18xx_priv_conf))
+
+#define NUM_OF_CHANNELS_11_ABG 150
+#define NUM_OF_CHANNELS_11_P 7
+#define WL18XX_NUM_OF_SUB_BANDS 9
+#define SRF_TABLE_LEN 16
+#define PIN_MUXING_SIZE 2
+
+struct wl18xx_mac_and_phy_params {
+ u8 phy_standalone;
+ u8 rdl;
+ u8 enable_clpc;
+ u8 enable_tx_low_pwr_on_siso_rdl;
+ u8 auto_detect;
+ u8 dedicated_fem;
+
+ u8 low_band_component;
+
+ /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
+ u8 low_band_component_type;
+
+ u8 high_band_component;
+
+ /* Bit 0: One Hot, Bit 1: Control Enable, Bit 2: 1.8V, Bit 3: 3V */
+ u8 high_band_component_type;
+ u8 number_of_assembled_ant2_4;
+ u8 number_of_assembled_ant5;
+ u8 pin_muxing_platform_options[PIN_MUXING_SIZE];
+ u8 external_pa_dc2dc;
+ u8 tcxo_ldo_voltage;
+ u8 xtal_itrim_val;
+ u8 srf_state;
+ u8 srf1[SRF_TABLE_LEN];
+ u8 srf2[SRF_TABLE_LEN];
+ u8 srf3[SRF_TABLE_LEN];
+ u8 io_configuration;
+ u8 sdio_configuration;
+ u8 settings;
+ u8 rx_profile;
+ u8 per_chan_pwr_limit_arr_11abg[NUM_OF_CHANNELS_11_ABG];
+ u8 pwr_limit_reference_11_abg;
+ u8 per_chan_pwr_limit_arr_11p[NUM_OF_CHANNELS_11_P];
+ u8 pwr_limit_reference_11p;
+ u8 per_sub_band_tx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 per_sub_band_rx_trace_loss[WL18XX_NUM_OF_SUB_BANDS];
+ u8 primary_clock_setting_time;
+ u8 clock_valid_on_wake_up;
+ u8 secondary_clock_setting_time;
+ u8 board_type;
+ /* enable point saturation */
+ u8 psat;
+ /* low/medium/high Tx power in dBm */
+ s8 low_power_val;
+ s8 med_power_val;
+ s8 high_power_val;
+ u8 padding[1];
+} __packed;
+
+enum wl18xx_ht_mode {
+ /* Default - use MIMO, fallback to SISO20 */
+ HT_MODE_DEFAULT = 0,
+
+ /* Wide - use SISO40 */
+ HT_MODE_WIDE = 1,
+
+ /* Use SISO20 */
+ HT_MODE_SISO20 = 2,
+};
+
+struct wl18xx_ht_settings {
+ /* DEFAULT / WIDE / SISO20 */
+ u8 mode;
+} __packed;
+
+struct wl18xx_priv_conf {
+ /* Module params structures */
+ struct wl18xx_ht_settings ht;
+
+ /* this structure is copied wholesale to FW */
+ struct wl18xx_mac_and_phy_params phy;
+} __packed;
+
+#endif /* __WL18XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
new file mode 100644
index 000000000000..3ce6f1039af3
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -0,0 +1,403 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2009 Nokia Corporation
+ * Copyright (C) 2011-2012 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/debugfs.h"
+#include "../wlcore/wlcore.h"
+
+#include "wl18xx.h"
+#include "acx.h"
+#include "debugfs.h"
+
+#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
+ DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics)
+#define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \
+ DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
+
+
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug1, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug2, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug3, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug4, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug5, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(debug, debug6, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_null_Frame_tx_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_numll_frame_cts_start, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_bar_retry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_cts_nul_flid, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_beacon_early_term, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_out_of_mpdu_nodes, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_hdr_overflow, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_dropped_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_done, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag_end, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_density_dropped_frame, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_wa_ba_not_expected, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_frame_checksum, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_checksum_result, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_tkip_called, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_defrag, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_bcns_cnt, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
+ PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
+WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
+
+
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_timeouts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_timeouts, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_max_ap_turn, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_max_ap_turn, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, ps_poll_utilization, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(ps_poll, upsd_utilization, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, mc_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, dup_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, data_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, ibss_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
+ AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
+ AGGR_STATS_RX_SIZE_LEN);
+
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, tcp_rx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_csum_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, csum_to_rx_xfer_swi, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_in, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, cs_rx_packet_out, "%u");
+
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
+ PIPE_STATS_HW_FIFO);
+
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, rx_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, tx_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, fwlog_free_mem_blks, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE(mem, fw_gen_free_mem_blks, "%u");
+
+static ssize_t conf_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl18xx_priv *priv = wl->priv;
+ struct wlcore_conf_header header;
+ char *buf, *pos;
+ size_t len;
+ int ret;
+
+ len = WL18XX_CONF_SIZE;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ header.magic = cpu_to_le32(WL18XX_CONF_MAGIC);
+ header.version = cpu_to_le32(WL18XX_CONF_VERSION);
+ header.checksum = 0;
+
+ mutex_lock(&wl->mutex);
+
+ pos = buf;
+ memcpy(pos, &header, sizeof(header));
+ pos += sizeof(header);
+ memcpy(pos, &wl->conf, sizeof(wl->conf));
+ pos += sizeof(wl->conf);
+ memcpy(pos, &priv->conf, sizeof(priv->conf));
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations conf_ops = {
+ .read = conf_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t clear_fw_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF)
+ goto out;
+
+ ret = wl18xx_acx_clear_statistics(wl);
+ if (ret < 0) {
+ count = ret;
+ goto out;
+ }
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations clear_fw_stats_ops = {
+ .write = clear_fw_stats_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+int wl18xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *stats, *moddir;
+
+ moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
+ if (!moddir || IS_ERR(moddir)) {
+ entry = moddir;
+ goto err;
+ }
+
+ stats = debugfs_create_dir("fw_stats", moddir);
+ if (!stats || IS_ERR(stats)) {
+ entry = stats;
+ goto err;
+ }
+
+ DEBUGFS_ADD(clear_fw_stats, stats);
+
+ DEBUGFS_FWSTATS_ADD(debug, debug1);
+ DEBUGFS_FWSTATS_ADD(debug, debug2);
+ DEBUGFS_FWSTATS_ADD(debug, debug3);
+ DEBUGFS_FWSTATS_ADD(debug, debug4);
+ DEBUGFS_FWSTATS_ADD(debug, debug5);
+ DEBUGFS_FWSTATS_ADD(debug, debug6);
+
+ DEBUGFS_FWSTATS_ADD(error, error_frame);
+ DEBUGFS_FWSTATS_ADD(error, error_null_Frame_tx_start);
+ DEBUGFS_FWSTATS_ADD(error, error_numll_frame_cts_start);
+ DEBUGFS_FWSTATS_ADD(error, error_bar_retry);
+ DEBUGFS_FWSTATS_ADD(error, error_frame_cts_nul_flid);
+
+ DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
+ DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
+ DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared);
+ DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared);
+ DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
+ DEBUGFS_FWSTATS_ADD(tx, tx_starts);
+ DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
+ DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
+ DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
+ DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum);
+ DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(tx, frag_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
+ DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called);
+ DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found);
+ DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation);
+ DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num);
+ DEBUGFS_FWSTATS_ADD(tx, frag_failed);
+ DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit);
+ DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss);
+
+ DEBUGFS_FWSTATS_ADD(rx, rx_beacon_early_term);
+ DEBUGFS_FWSTATS_ADD(rx, rx_out_of_mpdu_nodes);
+ DEBUGFS_FWSTATS_ADD(rx, rx_hdr_overflow);
+ DEBUGFS_FWSTATS_ADD(rx, rx_dropped_frame);
+ DEBUGFS_FWSTATS_ADD(rx, rx_done);
+ DEBUGFS_FWSTATS_ADD(rx, rx_defrag);
+ DEBUGFS_FWSTATS_ADD(rx, rx_defrag_end);
+ DEBUGFS_FWSTATS_ADD(rx, rx_cmplt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_pre_complt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
+ DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
+ DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
+ DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
+ DEBUGFS_FWSTATS_ADD(rx, rx_wa_density_dropped_frame);
+ DEBUGFS_FWSTATS_ADD(rx, rx_wa_ba_not_expected);
+ DEBUGFS_FWSTATS_ADD(rx, rx_frame_checksum);
+ DEBUGFS_FWSTATS_ADD(rx, rx_checksum_result);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_tkip_called);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_need_defrag);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_decrypt_failed);
+ DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
+ DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
+ DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
+
+ DEBUGFS_FWSTATS_ADD(isr, irqs);
+
+ DEBUGFS_FWSTATS_ADD(pwr, missing_bcns_cnt);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_bcns_cnt);
+ DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
+ DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
+ DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
+
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_timeouts);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_max_ap_turn);
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_max_ap_turn);
+ DEBUGFS_FWSTATS_ADD(ps_poll, ps_poll_utilization);
+ DEBUGFS_FWSTATS_ADD(ps_poll, upsd_utilization);
+
+ DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, mc_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, dup_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, data_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, ibss_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, protection_filter);
+ DEBUGFS_FWSTATS_ADD(rx_filter, accum_arp_pend_requests);
+ DEBUGFS_FWSTATS_ADD(rx_filter, max_arp_queue_dep);
+
+ DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
+
+ DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_vs_rate);
+ DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
+
+ DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, tcp_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, tcp_rx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
+ DEBUGFS_FWSTATS_ADD(pipeline, pre_proc_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_csum_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, csum_to_rx_xfer_swi);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
+ DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
+ DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_in);
+ DEBUGFS_FWSTATS_ADD(pipeline, cs_rx_packet_out);
+ DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
+
+ DEBUGFS_FWSTATS_ADD(mem, rx_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, tx_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, fwlog_free_mem_blks);
+ DEBUGFS_FWSTATS_ADD(mem, fw_gen_free_mem_blks);
+
+ DEBUGFS_ADD(conf, moddir);
+
+ return 0;
+
+err:
+ if (IS_ERR(entry))
+ ret = PTR_ERR(entry);
+ else
+ ret = -ENOMEM;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.h b/drivers/net/wireless/ti/wl18xx/debugfs.h
new file mode 100644
index 000000000000..ed679bebf620
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2012 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_DEBUGFS_H__
+#define __WL18XX_DEBUGFS_H__
+
+int wl18xx_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir);
+
+#endif /* __WL18XX_DEBUGFS_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/io.c b/drivers/net/wireless/ti/wl18xx/io.c
new file mode 100644
index 000000000000..f0abf3ef2c95
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.c
@@ -0,0 +1,75 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/io.h"
+
+#include "io.h"
+
+int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+{
+ u32 tmp;
+ int ret;
+
+ if (WARN_ON(addr % 2))
+ return -EINVAL;
+
+ if ((addr % 4) == 0) {
+ ret = wlcore_read32(wl, addr, &tmp);
+ if (ret < 0)
+ goto out;
+
+ tmp = (tmp & 0xffff0000) | val;
+ ret = wlcore_write32(wl, addr, tmp);
+ } else {
+ ret = wlcore_read32(wl, addr - 2, &tmp);
+ if (ret < 0)
+ goto out;
+
+ tmp = (tmp & 0xffff) | (val << 16);
+ ret = wlcore_write32(wl, addr - 2, tmp);
+ }
+
+out:
+ return ret;
+}
+
+int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out)
+{
+ u32 val = 0;
+ int ret;
+
+ if (WARN_ON(addr % 2))
+ return -EINVAL;
+
+ if ((addr % 4) == 0) {
+ /* address is 4-bytes aligned */
+ ret = wlcore_read32(wl, addr, &val);
+ if (ret >= 0 && out)
+ *out = val & 0xffff;
+ } else {
+ ret = wlcore_read32(wl, addr - 2, &val);
+ if (ret >= 0 && out)
+ *out = (val & 0xffff0000) >> 16;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/io.h b/drivers/net/wireless/ti/wl18xx/io.h
new file mode 100644
index 000000000000..c32ae30277df
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/io.h
@@ -0,0 +1,28 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_IO_H__
+#define __WL18XX_IO_H__
+
+int __must_check wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+int __must_check wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out);
+
+#endif /* __WL18XX_IO_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
new file mode 100644
index 000000000000..69042bb9a097
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -0,0 +1,1610 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ip.h>
+#include <linux/firmware.h>
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/io.h"
+#include "../wlcore/acx.h"
+#include "../wlcore/tx.h"
+#include "../wlcore/rx.h"
+#include "../wlcore/io.h"
+#include "../wlcore/boot.h"
+
+#include "reg.h"
+#include "conf.h"
+#include "acx.h"
+#include "tx.h"
+#include "wl18xx.h"
+#include "io.h"
+#include "debugfs.h"
+
+#define WL18XX_RX_CHECKSUM_MASK 0x40
+
+static char *ht_mode_param = NULL;
+static char *board_type_param = NULL;
+static bool checksum_param = false;
+static bool enable_11a_param = true;
+static int num_rx_desc_param = -1;
+
+/* phy paramters */
+static int dc2dc_param = -1;
+static int n_antennas_2_param = -1;
+static int n_antennas_5_param = -1;
+static int low_band_component_param = -1;
+static int low_band_component_type_param = -1;
+static int high_band_component_param = -1;
+static int high_band_component_type_param = -1;
+static int pwr_limit_reference_11_abg_param = -1;
+
+static const u8 wl18xx_rate_to_idx_2ghz[] = {
+ /* MCS rates are used only with 11n */
+ 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
+ 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
+ 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
+ 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
+
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_54 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_48 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_36 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
+
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_18 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_12 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_11 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_9 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_6 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_2 */
+ 0 /* WL18XX_CONF_HW_RXTX_RATE_1 */
+};
+
+static const u8 wl18xx_rate_to_idx_5ghz[] = {
+ /* MCS rates are used only with 11n */
+ 15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
+ 14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
+ 13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
+ 12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
+ 11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
+ 10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
+ 9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
+ 8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
+
+ 7, /* WL18XX_CONF_HW_RXTX_RATE_54 */
+ 6, /* WL18XX_CONF_HW_RXTX_RATE_48 */
+ 5, /* WL18XX_CONF_HW_RXTX_RATE_36 */
+ 4, /* WL18XX_CONF_HW_RXTX_RATE_24 */
+
+ /* TI-specific rate */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
+
+ 3, /* WL18XX_CONF_HW_RXTX_RATE_18 */
+ 2, /* WL18XX_CONF_HW_RXTX_RATE_12 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_11 */
+ 1, /* WL18XX_CONF_HW_RXTX_RATE_9 */
+ 0, /* WL18XX_CONF_HW_RXTX_RATE_6 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_2 */
+ CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_1 */
+};
+
+static const u8 *wl18xx_band_rate_to_idx[] = {
+ [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+ [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+};
+
+enum wl18xx_hw_rates {
+ WL18XX_CONF_HW_RXTX_RATE_MCS15 = 0,
+ WL18XX_CONF_HW_RXTX_RATE_MCS14,
+ WL18XX_CONF_HW_RXTX_RATE_MCS13,
+ WL18XX_CONF_HW_RXTX_RATE_MCS12,
+ WL18XX_CONF_HW_RXTX_RATE_MCS11,
+ WL18XX_CONF_HW_RXTX_RATE_MCS10,
+ WL18XX_CONF_HW_RXTX_RATE_MCS9,
+ WL18XX_CONF_HW_RXTX_RATE_MCS8,
+ WL18XX_CONF_HW_RXTX_RATE_MCS7,
+ WL18XX_CONF_HW_RXTX_RATE_MCS6,
+ WL18XX_CONF_HW_RXTX_RATE_MCS5,
+ WL18XX_CONF_HW_RXTX_RATE_MCS4,
+ WL18XX_CONF_HW_RXTX_RATE_MCS3,
+ WL18XX_CONF_HW_RXTX_RATE_MCS2,
+ WL18XX_CONF_HW_RXTX_RATE_MCS1,
+ WL18XX_CONF_HW_RXTX_RATE_MCS0,
+ WL18XX_CONF_HW_RXTX_RATE_54,
+ WL18XX_CONF_HW_RXTX_RATE_48,
+ WL18XX_CONF_HW_RXTX_RATE_36,
+ WL18XX_CONF_HW_RXTX_RATE_24,
+ WL18XX_CONF_HW_RXTX_RATE_22,
+ WL18XX_CONF_HW_RXTX_RATE_18,
+ WL18XX_CONF_HW_RXTX_RATE_12,
+ WL18XX_CONF_HW_RXTX_RATE_11,
+ WL18XX_CONF_HW_RXTX_RATE_9,
+ WL18XX_CONF_HW_RXTX_RATE_6,
+ WL18XX_CONF_HW_RXTX_RATE_5_5,
+ WL18XX_CONF_HW_RXTX_RATE_2,
+ WL18XX_CONF_HW_RXTX_RATE_1,
+ WL18XX_CONF_HW_RXTX_RATE_MAX,
+};
+
+static struct wlcore_conf wl18xx_conf = {
+ .sg = {
+ .params = {
+ [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
+ [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
+ [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
+ [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
+ [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
+ [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
+ [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
+ [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
+ [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
+ [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
+ [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
+ [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
+ /* active scan params */
+ [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
+ [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
+ /* passive scan params */
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
+ [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
+ /* passive scan in dual antenna params */
+ [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
+ [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
+ /* general params */
+ [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
+ [CONF_SG_ANTENNA_CONFIGURATION] = 0,
+ [CONF_SG_BEACON_MISS_PERCENT] = 60,
+ [CONF_SG_DHCP_TIME] = 5000,
+ [CONF_SG_RXT] = 1200,
+ [CONF_SG_TXT] = 1000,
+ [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
+ [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
+ [CONF_SG_HV3_MAX_SERVED] = 6,
+ [CONF_SG_PS_POLL_TIMEOUT] = 10,
+ [CONF_SG_UPSD_TIMEOUT] = 10,
+ [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
+ [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
+ [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
+ /* AP params */
+ [CONF_AP_BEACON_MISS_TX] = 3,
+ [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
+ [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
+ [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
+ [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
+ [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
+ /* CTS Diluting params */
+ [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
+ [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
+ },
+ .state = CONF_SG_PROTECTIVE,
+ },
+ .rx = {
+ .rx_msdu_life_time = 512000,
+ .packet_detection_threshold = 0,
+ .ps_poll_timeout = 15,
+ .upsd_timeout = 15,
+ .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
+ .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
+ },
+ .tx = {
+ .tx_energy_detection = 0,
+ .sta_rc_conf = {
+ .enabled_rates = 0,
+ .short_retry_limit = 10,
+ .long_retry_limit = 10,
+ .aflags = 0,
+ },
+ .ac_conf_count = 4,
+ .ac_conf = {
+ [CONF_TX_AC_BE] = {
+ .ac = CONF_TX_AC_BE,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = 3,
+ .tx_op_limit = 0,
+ },
+ [CONF_TX_AC_BK] = {
+ .ac = CONF_TX_AC_BK,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = 7,
+ .tx_op_limit = 0,
+ },
+ [CONF_TX_AC_VI] = {
+ .ac = CONF_TX_AC_VI,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = CONF_TX_AIFS_PIFS,
+ .tx_op_limit = 3008,
+ },
+ [CONF_TX_AC_VO] = {
+ .ac = CONF_TX_AC_VO,
+ .cw_min = 15,
+ .cw_max = 63,
+ .aifsn = CONF_TX_AIFS_PIFS,
+ .tx_op_limit = 1504,
+ },
+ },
+ .max_tx_retries = 100,
+ .ap_aging_period = 300,
+ .tid_conf_count = 4,
+ .tid_conf = {
+ [CONF_TX_AC_BE] = {
+ .queue_id = CONF_TX_AC_BE,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_BE,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_BK] = {
+ .queue_id = CONF_TX_AC_BK,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_BK,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_VI] = {
+ .queue_id = CONF_TX_AC_VI,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_VI,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ [CONF_TX_AC_VO] = {
+ .queue_id = CONF_TX_AC_VO,
+ .channel_type = CONF_CHANNEL_TYPE_EDCF,
+ .tsid = CONF_TX_AC_VO,
+ .ps_scheme = CONF_PS_SCHEME_LEGACY,
+ .ack_policy = CONF_ACK_POLICY_LEGACY,
+ .apsd_conf = {0, 0},
+ },
+ },
+ .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
+ .tx_compl_timeout = 350,
+ .tx_compl_threshold = 10,
+ .basic_rate = CONF_HW_BIT_RATE_1MBPS,
+ .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
+ .tmpl_short_retry_limit = 10,
+ .tmpl_long_retry_limit = 10,
+ .tx_watchdog_timeout = 5000,
+ },
+ .conn = {
+ .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
+ .listen_interval = 1,
+ .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
+ .suspend_listen_interval = 3,
+ .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
+ .bcn_filt_ie_count = 3,
+ .bcn_filt_ie = {
+ [0] = {
+ .ie = WLAN_EID_CHANNEL_SWITCH,
+ .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
+ },
+ [1] = {
+ .ie = WLAN_EID_HT_OPERATION,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
+ [2] = {
+ .ie = WLAN_EID_ERP_INFO,
+ .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
+ },
+ },
+ .synch_fail_thold = 12,
+ .bss_lose_timeout = 400,
+ .beacon_rx_timeout = 10000,
+ .broadcast_timeout = 20000,
+ .rx_broadcast_in_ps = 1,
+ .ps_poll_threshold = 10,
+ .bet_enable = CONF_BET_MODE_ENABLE,
+ .bet_max_consecutive = 50,
+ .psm_entry_retries = 8,
+ .psm_exit_retries = 16,
+ .psm_entry_nullfunc_retries = 3,
+ .dynamic_ps_timeout = 1500,
+ .forced_ps = false,
+ .keep_alive_interval = 55000,
+ .max_listen_interval = 20,
+ .sta_sleep_auth = WL1271_PSM_ILLEGAL,
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
+ },
+ .roam_trigger = {
+ .trigger_pacing = 1,
+ .avg_weight_rssi_beacon = 20,
+ .avg_weight_rssi_data = 10,
+ .avg_weight_snr_beacon = 20,
+ .avg_weight_snr_data = 10,
+ },
+ .scan = {
+ .min_dwell_time_active = 7500,
+ .max_dwell_time_active = 30000,
+ .min_dwell_time_passive = 100000,
+ .max_dwell_time_passive = 100000,
+ .num_probe_reqs = 2,
+ .split_scan_timeout = 50000,
+ },
+ .sched_scan = {
+ /*
+ * Values are in TU/1000 but since sched scan FW command
+ * params are in TUs rounding up may occur.
+ */
+ .base_dwell_time = 7500,
+ .max_dwell_time_delta = 22500,
+ /* based on 250bits per probe @1Mbps */
+ .dwell_time_delta_per_probe = 2000,
+ /* based on 250bits per probe @6Mbps (plus a bit more) */
+ .dwell_time_delta_per_probe_5 = 350,
+ .dwell_time_passive = 100000,
+ .dwell_time_dfs = 150000,
+ .num_probe_reqs = 2,
+ .rssi_threshold = -90,
+ .snr_threshold = 0,
+ },
+ .ht = {
+ .rx_ba_win_size = 10,
+ .tx_ba_win_size = 64,
+ .inactivity_timeout = 10000,
+ .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
+ },
+ .mem = {
+ .num_stations = 1,
+ .ssid_profiles = 1,
+ .rx_block_num = 40,
+ .tx_min_block_num = 40,
+ .dynamic_memory = 1,
+ .min_req_tx_blocks = 45,
+ .min_req_rx_blocks = 22,
+ .tx_min = 27,
+ },
+ .fm_coex = {
+ .enable = true,
+ .swallow_period = 5,
+ .n_divider_fref_set_1 = 0xff, /* default */
+ .n_divider_fref_set_2 = 12,
+ .m_divider_fref_set_1 = 0xffff,
+ .m_divider_fref_set_2 = 148, /* default */
+ .coex_pll_stabilization_time = 0xffffffff, /* default */
+ .ldo_stabilization_time = 0xffff, /* default */
+ .fm_disturbed_band_margin = 0xff, /* default */
+ .swallow_clk_diff = 0xff, /* default */
+ },
+ .rx_streaming = {
+ .duration = 150,
+ .queues = 0x1,
+ .interval = 20,
+ .always = 0,
+ },
+ .fwlog = {
+ .mode = WL12XX_FWLOG_ON_DEMAND,
+ .mem_blocks = 2,
+ .severity = 0,
+ .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
+ .output = WL12XX_FWLOG_OUTPUT_HOST,
+ .threshold = 0,
+ },
+ .rate = {
+ .rate_retry_score = 32000,
+ .per_add = 8192,
+ .per_th1 = 2048,
+ .per_th2 = 4096,
+ .max_per = 8100,
+ .inverse_curiosity_factor = 5,
+ .tx_fail_low_th = 4,
+ .tx_fail_high_th = 10,
+ .per_alpha_shift = 4,
+ .per_add_shift = 13,
+ .per_beta1_shift = 10,
+ .per_beta2_shift = 8,
+ .rate_check_up = 2,
+ .rate_check_down = 12,
+ .rate_retry_policy = {
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00,
+ },
+ },
+ .hangover = {
+ .recover_time = 0,
+ .hangover_period = 20,
+ .dynamic_mode = 1,
+ .early_termination_mode = 1,
+ .max_period = 20,
+ .min_period = 1,
+ .increase_delta = 1,
+ .decrease_delta = 2,
+ .quiet_time = 4,
+ .increase_time = 1,
+ .window_size = 16,
+ },
+};
+
+static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
+ .ht = {
+ .mode = HT_MODE_DEFAULT,
+ },
+ .phy = {
+ .phy_standalone = 0x00,
+ .primary_clock_setting_time = 0x05,
+ .clock_valid_on_wake_up = 0x00,
+ .secondary_clock_setting_time = 0x05,
+ .board_type = BOARD_TYPE_HDK_18XX,
+ .rdl = 0x01,
+ .auto_detect = 0x00,
+ .dedicated_fem = FEM_NONE,
+ .low_band_component = COMPONENT_2_WAY_SWITCH,
+ .low_band_component_type = 0x06,
+ .high_band_component = COMPONENT_2_WAY_SWITCH,
+ .high_band_component_type = 0x09,
+ .tcxo_ldo_voltage = 0x00,
+ .xtal_itrim_val = 0x04,
+ .srf_state = 0x00,
+ .io_configuration = 0x01,
+ .sdio_configuration = 0x00,
+ .settings = 0x00,
+ .enable_clpc = 0x00,
+ .enable_tx_low_pwr_on_siso_rdl = 0x00,
+ .rx_profile = 0x00,
+ .pwr_limit_reference_11_abg = 0xc8,
+ .psat = 0,
+ .low_power_val = 0x00,
+ .med_power_val = 0x0a,
+ .high_power_val = 0x1e,
+ .external_pa_dc2dc = 0,
+ .number_of_assembled_ant2_4 = 1,
+ .number_of_assembled_ant5 = 1,
+ },
+};
+
+static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
+ [PART_TOP_PRCM_ELP_SOC] = {
+ .mem = { .start = 0x00A02000, .size = 0x00010000 },
+ .reg = { .start = 0x00807000, .size = 0x00005000 },
+ .mem2 = { .start = 0x00800000, .size = 0x0000B000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_DOWN] = {
+ .mem = { .start = 0x00000000, .size = 0x00014000 },
+ .reg = { .start = 0x00810000, .size = 0x0000BFFF },
+ .mem2 = { .start = 0x00000000, .size = 0x00000000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_BOOT] = {
+ .mem = { .start = 0x00700000, .size = 0x0000030c },
+ .reg = { .start = 0x00802000, .size = 0x00014578 },
+ .mem2 = { .start = 0x00B00404, .size = 0x00001000 },
+ .mem3 = { .start = 0x00C00000, .size = 0x00000400 },
+ },
+ [PART_WORK] = {
+ .mem = { .start = 0x00800000, .size = 0x000050FC },
+ .reg = { .start = 0x00B00404, .size = 0x00001000 },
+ .mem2 = { .start = 0x00C00000, .size = 0x00000400 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+ [PART_PHY_INIT] = {
+ .mem = { .start = 0x80926000,
+ .size = sizeof(struct wl18xx_mac_and_phy_params) },
+ .reg = { .start = 0x00000000, .size = 0x00000000 },
+ .mem2 = { .start = 0x00000000, .size = 0x00000000 },
+ .mem3 = { .start = 0x00000000, .size = 0x00000000 },
+ },
+};
+
+static const int wl18xx_rtable[REG_TABLE_LEN] = {
+ [REG_ECPU_CONTROL] = WL18XX_REG_ECPU_CONTROL,
+ [REG_INTERRUPT_NO_CLEAR] = WL18XX_REG_INTERRUPT_NO_CLEAR,
+ [REG_INTERRUPT_ACK] = WL18XX_REG_INTERRUPT_ACK,
+ [REG_COMMAND_MAILBOX_PTR] = WL18XX_REG_COMMAND_MAILBOX_PTR,
+ [REG_EVENT_MAILBOX_PTR] = WL18XX_REG_EVENT_MAILBOX_PTR,
+ [REG_INTERRUPT_TRIG] = WL18XX_REG_INTERRUPT_TRIG_H,
+ [REG_INTERRUPT_MASK] = WL18XX_REG_INTERRUPT_MASK,
+ [REG_PC_ON_RECOVERY] = WL18XX_SCR_PAD4,
+ [REG_CHIP_ID_B] = WL18XX_REG_CHIP_ID_B,
+ [REG_CMD_MBOX_ADDRESS] = WL18XX_CMD_MBOX_ADDRESS,
+
+ /* data access memory addresses, used with partition translation */
+ [REG_SLV_MEM_DATA] = WL18XX_SLV_MEM_DATA,
+ [REG_SLV_REG_DATA] = WL18XX_SLV_REG_DATA,
+
+ /* raw data access memory addresses */
+ [REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
+};
+
+static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
+ [CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
+ [CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
+ [CLOCK_CONFIG_16_8_M] = { 7, 100, 0, 0, false },
+ [CLOCK_CONFIG_19_2_M] = { 8, 100, 0, 0, false },
+ [CLOCK_CONFIG_26_M] = { 13, 120, 0, 0, false },
+ [CLOCK_CONFIG_32_736_M] = { 9, 132, 3751, 4, true },
+ [CLOCK_CONFIG_33_6_M] = { 7, 100, 0, 0, false },
+ [CLOCK_CONFIG_38_468_M] = { 8, 100, 0, 0, false },
+ [CLOCK_CONFIG_52_M] = { 13, 120, 0, 0, false },
+};
+
+/* TODO: maybe move to a new header file? */
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw.bin"
+
+static int wl18xx_identify_chip(struct wl1271 *wl)
+{
+ int ret = 0;
+
+ switch (wl->chip.id) {
+ case CHIP_ID_185x_PG20:
+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x (185x PG20)",
+ wl->chip.id);
+ wl->sr_fw_name = WL18XX_FW_NAME;
+ /* wl18xx uses the same firmware for PLT */
+ wl->plt_fw_name = WL18XX_FW_NAME;
+ wl->quirks |= WLCORE_QUIRK_NO_ELP |
+ WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
+ WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
+ WLCORE_QUIRK_TX_PAD_LAST_FRAME;
+
+ wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER, WL18XX_IFTYPE_VER,
+ WL18XX_MAJOR_VER, WL18XX_SUBTYPE_VER,
+ WL18XX_MINOR_VER);
+ break;
+ case CHIP_ID_185x_PG10:
+ wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
+ wl->chip.id);
+ ret = -ENODEV;
+ goto out;
+
+ default:
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int wl18xx_set_clk(struct wl1271 *wl)
+{
+ u16 clk_freq;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ /* TODO: PG2: apparently we need to read the clk type */
+
+ ret = wl18xx_top_reg_read(wl, PRIMARY_CLK_DETECT, &clk_freq);
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_BOOT, "clock freq %d (%d, %d, %d, %d, %s)", clk_freq,
+ wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m,
+ wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
+ wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
+ wl18xx_clk_table[clk_freq].n);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_M,
+ wl18xx_clk_table[clk_freq].m);
+ if (ret < 0)
+ goto out;
+
+ if (wl18xx_clk_table[clk_freq].swallow) {
+ /* first the 16 lower bits */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_1,
+ wl18xx_clk_table[clk_freq].q &
+ PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* then the 16 higher bits, masked out */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_2,
+ (wl18xx_clk_table[clk_freq].q >> 16) &
+ PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* first the 16 lower bits */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_1,
+ wl18xx_clk_table[clk_freq].p &
+ PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK);
+ if (ret < 0)
+ goto out;
+
+ /* then the 16 higher bits, masked out */
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
+ (wl18xx_clk_table[clk_freq].p >> 16) &
+ PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
+ } else {
+ ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
+ PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
+ }
+
+out:
+ return ret;
+}
+
+static int wl18xx_boot_soft_reset(struct wl1271 *wl)
+{
+ int ret;
+
+ /* disable Rx/Tx */
+ ret = wlcore_write32(wl, WL18XX_ENABLE, 0x0);
+ if (ret < 0)
+ goto out;
+
+ /* disable auto calibration on start*/
+ ret = wlcore_write32(wl, WL18XX_SPARE_A2, 0xffff);
+
+out:
+ return ret;
+}
+
+static int wl18xx_pre_boot(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl18xx_set_clk(wl);
+ if (ret < 0)
+ goto out;
+
+ /* Continue the ELP wake up sequence */
+ ret = wlcore_write32(wl, WL18XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
+ udelay(500);
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto out;
+
+ /* Disable interrupts */
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_boot_soft_reset(wl);
+
+out:
+ return ret;
+}
+
+static int wl18xx_pre_upload(struct wl1271 *wl)
+{
+ u32 tmp;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto out;
+
+ /* TODO: check if this is all needed */
+ ret = wlcore_write32(wl, WL18XX_EEPROMLESS_IND, WL18XX_EEPROMLESS_IND);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
+
+ ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
+
+out:
+ return ret;
+}
+
+static int wl18xx_set_mac_and_phy(struct wl1271 *wl)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ struct wl18xx_mac_and_phy_params *params;
+ int ret;
+
+ params = kmemdup(&priv->conf.phy, sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_write(wl, WL18XX_PHY_INIT_MEM_ADDR, params,
+ sizeof(*params), false);
+
+out:
+ kfree(params);
+ return ret;
+}
+
+static int wl18xx_enable_interrupts(struct wl1271 *wl)
+{
+ u32 event_mask, intr_mask;
+ int ret;
+
+ event_mask = WL18XX_ACX_EVENTS_VECTOR;
+ intr_mask = WL18XX_INTR_MASK;
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, event_mask);
+ if (ret < 0)
+ goto out;
+
+ wlcore_enable_interrupts(wl);
+
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~intr_mask);
+
+out:
+ return ret;
+}
+
+static int wl18xx_boot(struct wl1271 *wl)
+{
+ int ret;
+
+ ret = wl18xx_pre_boot(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_pre_upload(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_boot_upload_firmware(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_set_mac_and_phy(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_boot_run_firmware(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl18xx_enable_interrupts(wl);
+
+out:
+ return ret;
+}
+
+static int wl18xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
+ void *buf, size_t len)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ memcpy(priv->cmd_buf, buf, len);
+ memset(priv->cmd_buf + len, 0, WL18XX_CMD_MAX_SIZE - len);
+
+ return wlcore_write(wl, cmd_box_addr, priv->cmd_buf,
+ WL18XX_CMD_MAX_SIZE, false);
+}
+
+static int wl18xx_ack_event(struct wl1271 *wl)
+{
+ return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
+ WL18XX_INTR_TRIG_EVENT_ACK);
+}
+
+static u32 wl18xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
+{
+ u32 blk_size = WL18XX_TX_HW_BLOCK_SIZE;
+ return (len + blk_size - 1) / blk_size + spare_blks;
+}
+
+static void
+wl18xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+ u32 blks, u32 spare_blks)
+{
+ desc->wl18xx_mem.total_mem_blocks = blks;
+}
+
+static void
+wl18xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ desc->length = cpu_to_le16(skb->len);
+
+ /* if only the last frame is to be padded, we unset this bit on Tx */
+ if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME)
+ desc->wl18xx_mem.ctrl = WL18XX_TX_CTRL_NOT_PADDED;
+ else
+ desc->wl18xx_mem.ctrl = 0;
+
+ wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
+ "len: %d life: %d mem: %d", desc->hlid,
+ le16_to_cpu(desc->length),
+ le16_to_cpu(desc->life_time),
+ desc->wl18xx_mem.total_mem_blocks);
+}
+
+static enum wl_rx_buf_align
+wl18xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
+{
+ if (rx_desc & RX_BUF_PADDED_PAYLOAD)
+ return WLCORE_RX_BUF_PADDED;
+
+ return WLCORE_RX_BUF_ALIGNED;
+}
+
+static u32 wl18xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
+ u32 data_len)
+{
+ struct wl1271_rx_descriptor *desc = rx_data;
+
+ /* invalid packet */
+ if (data_len < sizeof(*desc))
+ return 0;
+
+ return data_len - sizeof(*desc);
+}
+
+static void wl18xx_tx_immediate_completion(struct wl1271 *wl)
+{
+ wl18xx_tx_immediate_complete(wl);
+}
+
+static int wl18xx_set_host_cfg_bitmap(struct wl1271 *wl, u32 extra_mem_blk)
+{
+ int ret;
+ u32 sdio_align_size = 0;
+ u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE |
+ HOST_IF_CFG_ADD_RX_ALIGNMENT;
+
+ /* Enable Tx SDIO padding */
+ if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) {
+ host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
+ sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
+ }
+
+ /* Enable Rx SDIO padding */
+ if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) {
+ host_cfg_bitmap |= HOST_IF_CFG_RX_PAD_TO_SDIO_BLK;
+ sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
+ }
+
+ ret = wl18xx_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap,
+ sdio_align_size, extra_mem_blk,
+ WL18XX_HOST_IF_LEN_SIZE_FIELD);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int wl18xx_hw_init(struct wl1271 *wl)
+{
+ int ret;
+ struct wl18xx_priv *priv = wl->priv;
+
+ /* (re)init private structures. Relevant on recovery as well. */
+ priv->last_fw_rls_idx = 0;
+ priv->extra_spare_vif_count = 0;
+
+ /* set the default amount of spare blocks in the bitmap */
+ ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
+ if (ret < 0)
+ return ret;
+
+ if (checksum_param) {
+ ret = wl18xx_acx_set_checksum_state(wl);
+ if (ret != 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ u32 ip_hdr_offset;
+ struct iphdr *ip_hdr;
+
+ if (!checksum_param) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
+ if (WARN_ON(ip_hdr_offset >= (1<<7))) {
+ desc->wl18xx_checksum_data = 0;
+ return;
+ }
+
+ desc->wl18xx_checksum_data = ip_hdr_offset << 1;
+
+ /* FW is interested only in the LSB of the protocol TCP=0 UDP=1 */
+ ip_hdr = (void *)skb_network_header(skb);
+ desc->wl18xx_checksum_data |= (ip_hdr->protocol & 0x01);
+}
+
+static void wl18xx_set_rx_csum(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb)
+{
+ if (desc->status & WL18XX_RX_CHECKSUM_MASK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ return priv->conf.phy.number_of_assembled_ant2_4 >= 2;
+}
+
+/*
+ * TODO: instead of having these two functions to get the rate mask,
+ * we should modify the wlvif->rate_set instead
+ */
+static u32 wl18xx_sta_get_ap_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ u32 hw_rate_set = wlvif->rate_set;
+
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
+ hw_rate_set |= CONF_TX_RATE_USE_WIDE_CHAN;
+
+ /* we don't support MIMO in wide-channel mode */
+ hw_rate_set &= ~CONF_TX_MIMO_RATES;
+ } else if (wl18xx_is_mimo_supported(wl)) {
+ wl1271_debug(DEBUG_ACX, "using MIMO channel rate mask");
+ hw_rate_set |= CONF_TX_MIMO_RATES;
+ }
+
+ return hw_rate_set;
+}
+
+static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
+ wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
+ wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
+
+ /* sanity check - we don't support this */
+ if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+ return 0;
+
+ return CONF_TX_RATE_USE_WIDE_CHAN;
+ } else if (wl18xx_is_mimo_supported(wl) &&
+ wlvif->band == IEEE80211_BAND_2GHZ) {
+ wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
+ /*
+ * we don't care about HT channel here - if a peer doesn't
+ * support MIMO, we won't enable it in its rates
+ */
+ return CONF_TX_MIMO_RATES;
+ } else {
+ return 0;
+ }
+}
+
+static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
+{
+ u32 fuse;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
+ if (ret < 0)
+ goto out;
+
+ if (ver)
+ *ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+
+out:
+ return ret;
+}
+
+#define WL18XX_CONF_FILE_NAME "ti-connectivity/wl18xx-conf.bin"
+static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ struct wlcore_conf_file *conf_file;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, WL18XX_CONF_FILE_NAME, dev);
+ if (ret < 0) {
+ wl1271_error("could not get configuration binary %s: %d",
+ WL18XX_CONF_FILE_NAME, ret);
+ goto out_fallback;
+ }
+
+ if (fw->size != WL18XX_CONF_SIZE) {
+ wl1271_error("configuration binary file size is wrong, expected %zu got %zu",
+ WL18XX_CONF_SIZE, fw->size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ conf_file = (struct wlcore_conf_file *) fw->data;
+
+ if (conf_file->header.magic != cpu_to_le32(WL18XX_CONF_MAGIC)) {
+ wl1271_error("configuration binary file magic number mismatch, "
+ "expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
+ conf_file->header.magic);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
+ wl1271_error("configuration binary file version not supported, "
+ "expected 0x%08x got 0x%08x",
+ WL18XX_CONF_VERSION, conf_file->header.version);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&wl->conf, &conf_file->core, sizeof(wl18xx_conf));
+ memcpy(&priv->conf, &conf_file->priv, sizeof(priv->conf));
+
+ goto out;
+
+out_fallback:
+ wl1271_warning("falling back to default config");
+
+ /* apply driver default configuration */
+ memcpy(&wl->conf, &wl18xx_conf, sizeof(wl18xx_conf));
+ /* apply default private configuration */
+ memcpy(&priv->conf, &wl18xx_default_priv_conf, sizeof(priv->conf));
+
+ /* For now we just fallback */
+ return 0;
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int wl18xx_plt_init(struct wl1271 *wl)
+{
+ int ret;
+
+ /* calibrator based auto/fem detect not supported for 18xx */
+ if (wl->plt_mode == PLT_FEM_DETECT) {
+ wl1271_error("wl18xx_plt_init: PLT FEM_DETECT not supported");
+ return -EINVAL;
+ }
+
+ ret = wlcore_write32(wl, WL18XX_SCR_PAD8, WL18XX_SCR_PAD8_PLT);
+ if (ret < 0)
+ return ret;
+
+ return wl->ops->boot(wl);
+}
+
+static int wl18xx_get_mac(struct wl1271 *wl)
+{
+ u32 mac1, mac2;
+ int ret;
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_1, &mac1);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_2, &mac2);
+ if (ret < 0)
+ goto out;
+
+ /* these are the two parts of the BD_ADDR */
+ wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
+ ((mac1 & 0xff000000) >> 24);
+ wl->fuse_nic_addr = (mac1 & 0xffffff);
+
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
+
+out:
+ return ret;
+}
+
+static int wl18xx_handle_static_data(struct wl1271 *wl,
+ struct wl1271_static_data *static_data)
+{
+ struct wl18xx_static_data_priv *static_data_priv =
+ (struct wl18xx_static_data_priv *) static_data->priv;
+
+ wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
+
+ return 0;
+}
+
+static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ struct wl18xx_priv *priv = wl->priv;
+
+ /* If we have VIFs requiring extra spare, indulge them */
+ if (priv->extra_spare_vif_count)
+ return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
+
+ return WL18XX_TX_HW_BLOCK_SPARE;
+}
+
+static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ struct wl18xx_priv *priv = wl->priv;
+ bool change_spare = false;
+ int ret;
+
+ /*
+ * when adding the first or removing the last GEM/TKIP interface,
+ * we have to adjust the number of spare blocks.
+ */
+ change_spare = (key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
+ key_conf->cipher == WLAN_CIPHER_SUITE_TKIP) &&
+ ((priv->extra_spare_vif_count == 0 && cmd == SET_KEY) ||
+ (priv->extra_spare_vif_count == 1 && cmd == DISABLE_KEY));
+
+ /* no need to change spare - just regular set_key */
+ if (!change_spare)
+ return wlcore_set_key(wl, cmd, vif, sta, key_conf);
+
+ /*
+ * stop the queues and flush to ensure the next packets are
+ * in sync with FW spare block accounting
+ */
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+ wl1271_tx_flush(wl);
+
+ ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
+ if (ret < 0)
+ goto out;
+
+ /* key is now set, change the spare blocks */
+ if (cmd == SET_KEY) {
+ ret = wl18xx_set_host_cfg_bitmap(wl,
+ WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
+ if (ret < 0)
+ goto out;
+
+ priv->extra_spare_vif_count++;
+ } else {
+ ret = wl18xx_set_host_cfg_bitmap(wl,
+ WL18XX_TX_HW_BLOCK_SPARE);
+ if (ret < 0)
+ goto out;
+
+ priv->extra_spare_vif_count--;
+ }
+
+out:
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
+ return ret;
+}
+
+static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
+ u32 buf_offset, u32 last_len)
+{
+ if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) {
+ struct wl1271_tx_hw_descr *last_desc;
+
+ /* get the last TX HW descriptor written to the aggr buf */
+ last_desc = (struct wl1271_tx_hw_descr *)(wl->aggr_buf +
+ buf_offset - last_len);
+
+ /* the last frame is padded up to an SDIO block */
+ last_desc->wl18xx_mem.ctrl &= ~WL18XX_TX_CTRL_NOT_PADDED;
+ return ALIGN(buf_offset, WL12XX_BUS_BLOCK_SIZE);
+ }
+
+ /* no modifications */
+ return buf_offset;
+}
+
+static struct wlcore_ops wl18xx_ops = {
+ .identify_chip = wl18xx_identify_chip,
+ .boot = wl18xx_boot,
+ .plt_init = wl18xx_plt_init,
+ .trigger_cmd = wl18xx_trigger_cmd,
+ .ack_event = wl18xx_ack_event,
+ .calc_tx_blocks = wl18xx_calc_tx_blocks,
+ .set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
+ .set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
+ .get_rx_buf_align = wl18xx_get_rx_buf_align,
+ .get_rx_packet_len = wl18xx_get_rx_packet_len,
+ .tx_immediate_compl = wl18xx_tx_immediate_completion,
+ .tx_delayed_compl = NULL,
+ .hw_init = wl18xx_hw_init,
+ .set_tx_desc_csum = wl18xx_set_tx_desc_csum,
+ .get_pg_ver = wl18xx_get_pg_ver,
+ .set_rx_csum = wl18xx_set_rx_csum,
+ .sta_get_ap_rate_mask = wl18xx_sta_get_ap_rate_mask,
+ .ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
+ .get_mac = wl18xx_get_mac,
+ .debugfs_init = wl18xx_debugfs_add_files,
+ .handle_static_data = wl18xx_handle_static_data,
+ .get_spare_blocks = wl18xx_get_spare_blocks,
+ .set_key = wl18xx_set_key,
+ .pre_pkt_send = wl18xx_pre_pkt_send,
+};
+
+/* HT cap appropriate for wide channels in 2Ghz */
+static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(150),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for wide channels in 5Ghz */
+static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(150),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for SISO 20 */
+static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
+ .cap = IEEE80211_HT_CAP_SGI_20,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(72),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+/* HT cap appropriate for MIMO rates in 20mhz channel */
+static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
+ .cap = IEEE80211_HT_CAP_SGI_20,
+ .ht_supported = true,
+ .ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .mcs = {
+ .rx_mask = { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, },
+ .rx_highest = cpu_to_le16(144),
+ .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
+ },
+};
+
+static int __devinit wl18xx_probe(struct platform_device *pdev)
+{
+ struct wl1271 *wl;
+ struct ieee80211_hw *hw;
+ struct wl18xx_priv *priv;
+ int ret;
+
+ hw = wlcore_alloc_hw(sizeof(*priv));
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ priv = wl->priv;
+ wl->ops = &wl18xx_ops;
+ wl->ptable = wl18xx_ptable;
+ wl->rtable = wl18xx_rtable;
+ wl->num_tx_desc = 32;
+ wl->num_rx_desc = 32;
+ wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
+ wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
+ wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
+ wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
+ wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
+
+ if (num_rx_desc_param != -1)
+ wl->num_rx_desc = num_rx_desc_param;
+
+ ret = wl18xx_conf_init(wl, &pdev->dev);
+ if (ret < 0)
+ goto out_free;
+
+ /* If the module param is set, update it in conf */
+ if (board_type_param) {
+ if (!strcmp(board_type_param, "fpga")) {
+ priv->conf.phy.board_type = BOARD_TYPE_FPGA_18XX;
+ } else if (!strcmp(board_type_param, "hdk")) {
+ priv->conf.phy.board_type = BOARD_TYPE_HDK_18XX;
+ } else if (!strcmp(board_type_param, "dvp")) {
+ priv->conf.phy.board_type = BOARD_TYPE_DVP_18XX;
+ } else if (!strcmp(board_type_param, "evb")) {
+ priv->conf.phy.board_type = BOARD_TYPE_EVB_18XX;
+ } else if (!strcmp(board_type_param, "com8")) {
+ priv->conf.phy.board_type = BOARD_TYPE_COM8_18XX;
+ } else {
+ wl1271_error("invalid board type '%s'",
+ board_type_param);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ /* HACK! Just for now we hardcode COM8 and HDK to 0x06 */
+ switch (priv->conf.phy.board_type) {
+ case BOARD_TYPE_HDK_18XX:
+ case BOARD_TYPE_COM8_18XX:
+ priv->conf.phy.low_band_component_type = 0x06;
+ break;
+ case BOARD_TYPE_FPGA_18XX:
+ case BOARD_TYPE_DVP_18XX:
+ case BOARD_TYPE_EVB_18XX:
+ priv->conf.phy.low_band_component_type = 0x05;
+ break;
+ default:
+ wl1271_error("invalid board type '%d'",
+ priv->conf.phy.board_type);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (low_band_component_param != -1)
+ priv->conf.phy.low_band_component = low_band_component_param;
+ if (low_band_component_type_param != -1)
+ priv->conf.phy.low_band_component_type =
+ low_band_component_type_param;
+ if (high_band_component_param != -1)
+ priv->conf.phy.high_band_component = high_band_component_param;
+ if (high_band_component_type_param != -1)
+ priv->conf.phy.high_band_component_type =
+ high_band_component_type_param;
+ if (pwr_limit_reference_11_abg_param != -1)
+ priv->conf.phy.pwr_limit_reference_11_abg =
+ pwr_limit_reference_11_abg_param;
+ if (n_antennas_2_param != -1)
+ priv->conf.phy.number_of_assembled_ant2_4 = n_antennas_2_param;
+ if (n_antennas_5_param != -1)
+ priv->conf.phy.number_of_assembled_ant5 = n_antennas_5_param;
+ if (dc2dc_param != -1)
+ priv->conf.phy.external_pa_dc2dc = dc2dc_param;
+
+ if (ht_mode_param) {
+ if (!strcmp(ht_mode_param, "default"))
+ priv->conf.ht.mode = HT_MODE_DEFAULT;
+ else if (!strcmp(ht_mode_param, "wide"))
+ priv->conf.ht.mode = HT_MODE_WIDE;
+ else if (!strcmp(ht_mode_param, "siso20"))
+ priv->conf.ht.mode = HT_MODE_SISO20;
+ else {
+ wl1271_error("invalid ht_mode '%s'", ht_mode_param);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+
+ if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
+ /*
+ * Only support mimo with multiple antennas. Fall back to
+ * siso20.
+ */
+ if (wl18xx_is_mimo_supported(wl))
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_mimo_ht_cap_2ghz);
+ else
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso20_ht_cap);
+
+ /* 5Ghz is always wide */
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso40_ht_cap_5ghz);
+ } else if (priv->conf.ht.mode == HT_MODE_WIDE) {
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso40_ht_cap_2ghz);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso40_ht_cap_5ghz);
+ } else if (priv->conf.ht.mode == HT_MODE_SISO20) {
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ &wl18xx_siso20_ht_cap);
+ wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ &wl18xx_siso20_ht_cap);
+ }
+
+ if (!checksum_param) {
+ wl18xx_ops.set_rx_csum = NULL;
+ wl18xx_ops.init_vif = NULL;
+ }
+
+ wl->enable_11a = enable_11a_param;
+
+ return wlcore_probe(wl, pdev);
+
+out_free:
+ wlcore_free_hw(wl);
+out:
+ return ret;
+}
+
+static const struct platform_device_id wl18xx_id_table[] __devinitconst = {
+ { "wl18xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
+
+static struct platform_driver wl18xx_driver = {
+ .probe = wl18xx_probe,
+ .remove = __devexit_p(wlcore_remove),
+ .id_table = wl18xx_id_table,
+ .driver = {
+ .name = "wl18xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl18xx_init(void)
+{
+ return platform_driver_register(&wl18xx_driver);
+}
+module_init(wl18xx_init);
+
+static void __exit wl18xx_exit(void)
+{
+ platform_driver_unregister(&wl18xx_driver);
+}
+module_exit(wl18xx_exit);
+
+module_param_named(ht_mode, ht_mode_param, charp, S_IRUSR);
+MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
+
+module_param_named(board_type, board_type_param, charp, S_IRUSR);
+MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
+ "dvp");
+
+module_param_named(checksum, checksum_param, bool, S_IRUSR);
+MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
+
+module_param_named(enable_11a, enable_11a_param, bool, S_IRUSR);
+MODULE_PARM_DESC(enable_11a, "Enable 11a (5GHz): boolean (defaults to true)");
+
+module_param_named(dc2dc, dc2dc_param, int, S_IRUSR);
+MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
+
+module_param_named(n_antennas_2, n_antennas_2_param, int, S_IRUSR);
+MODULE_PARM_DESC(n_antennas_2,
+ "Number of installed 2.4GHz antennas: 1 (default) or 2");
+
+module_param_named(n_antennas_5, n_antennas_5_param, int, S_IRUSR);
+MODULE_PARM_DESC(n_antennas_5,
+ "Number of installed 5GHz antennas: 1 (default) or 2");
+
+module_param_named(low_band_component, low_band_component_param, int,
+ S_IRUSR);
+MODULE_PARM_DESC(low_band_component, "Low band component: u8 "
+ "(default is 0x01)");
+
+module_param_named(low_band_component_type, low_band_component_type_param,
+ int, S_IRUSR);
+MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 "
+ "(default is 0x05 or 0x06 depending on the board_type)");
+
+module_param_named(high_band_component, high_band_component_param, int,
+ S_IRUSR);
+MODULE_PARM_DESC(high_band_component, "High band component: u8, "
+ "(default is 0x01)");
+
+module_param_named(high_band_component_type, high_band_component_type_param,
+ int, S_IRUSR);
+MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 "
+ "(default is 0x09)");
+
+module_param_named(pwr_limit_reference_11_abg,
+ pwr_limit_reference_11_abg_param, int, S_IRUSR);
+MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 "
+ "(default is 0xc8)");
+
+module_param_named(num_rx_desc,
+ num_rx_desc_param, int, S_IRUSR);
+MODULE_PARM_DESC(num_rx_desc_param,
+ "Number of Rx descriptors: u8 (default is 32)");
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
+MODULE_FIRMWARE(WL18XX_FW_NAME);
diff --git a/drivers/net/wireless/ti/wl18xx/reg.h b/drivers/net/wireless/ti/wl18xx/reg.h
new file mode 100644
index 000000000000..937b71d8783f
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/reg.h
@@ -0,0 +1,191 @@
+/*
+ * This file is part of wlcore
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __REG_H__
+#define __REG_H__
+
+#define WL18XX_REGISTERS_BASE 0x00800000
+#define WL18XX_CODE_BASE 0x00000000
+#define WL18XX_DATA_BASE 0x00400000
+#define WL18XX_DOUBLE_BUFFER_BASE 0x00600000
+#define WL18XX_MCU_KEY_SEARCH_BASE 0x00700000
+#define WL18XX_PHY_BASE 0x00900000
+#define WL18XX_TOP_OCP_BASE 0x00A00000
+#define WL18XX_PACKET_RAM_BASE 0x00B00000
+#define WL18XX_HOST_BASE 0x00C00000
+
+#define WL18XX_REGISTERS_DOWN_SIZE 0x0000B000
+
+#define WL18XX_REG_BOOT_PART_START 0x00802000
+#define WL18XX_REG_BOOT_PART_SIZE 0x00014578
+
+#define WL18XX_PHY_INIT_MEM_ADDR 0x80926000
+
+#define WL18XX_SDIO_WSPI_BASE (WL18XX_REGISTERS_BASE)
+#define WL18XX_REG_CONFIG_BASE (WL18XX_REGISTERS_BASE + 0x02000)
+#define WL18XX_WGCM_REGS_BASE (WL18XX_REGISTERS_BASE + 0x03000)
+#define WL18XX_ENC_BASE (WL18XX_REGISTERS_BASE + 0x04000)
+#define WL18XX_INTERRUPT_BASE (WL18XX_REGISTERS_BASE + 0x05000)
+#define WL18XX_UART_BASE (WL18XX_REGISTERS_BASE + 0x06000)
+#define WL18XX_WELP_BASE (WL18XX_REGISTERS_BASE + 0x07000)
+#define WL18XX_TCP_CKSM_BASE (WL18XX_REGISTERS_BASE + 0x08000)
+#define WL18XX_FIFO_BASE (WL18XX_REGISTERS_BASE + 0x09000)
+#define WL18XX_OCP_BRIDGE_BASE (WL18XX_REGISTERS_BASE + 0x0A000)
+#define WL18XX_PMAC_RX_BASE (WL18XX_REGISTERS_BASE + 0x14800)
+#define WL18XX_PMAC_ACM_BASE (WL18XX_REGISTERS_BASE + 0x14C00)
+#define WL18XX_PMAC_TX_BASE (WL18XX_REGISTERS_BASE + 0x15000)
+#define WL18XX_PMAC_CSR_BASE (WL18XX_REGISTERS_BASE + 0x15400)
+
+#define WL18XX_REG_ECPU_CONTROL (WL18XX_REGISTERS_BASE + 0x02004)
+#define WL18XX_REG_INTERRUPT_NO_CLEAR (WL18XX_REGISTERS_BASE + 0x050E8)
+#define WL18XX_REG_INTERRUPT_ACK (WL18XX_REGISTERS_BASE + 0x050F0)
+#define WL18XX_REG_INTERRUPT_TRIG (WL18XX_REGISTERS_BASE + 0x5074)
+#define WL18XX_REG_INTERRUPT_TRIG_H (WL18XX_REGISTERS_BASE + 0x5078)
+#define WL18XX_REG_INTERRUPT_MASK (WL18XX_REGISTERS_BASE + 0x0050DC)
+
+#define WL18XX_REG_CHIP_ID_B (WL18XX_REGISTERS_BASE + 0x01542C)
+
+#define WL18XX_SLV_MEM_DATA (WL18XX_HOST_BASE + 0x0018)
+#define WL18XX_SLV_REG_DATA (WL18XX_HOST_BASE + 0x0008)
+
+/* Scratch Pad registers*/
+#define WL18XX_SCR_PAD0 (WL18XX_REGISTERS_BASE + 0x0154EC)
+#define WL18XX_SCR_PAD1 (WL18XX_REGISTERS_BASE + 0x0154F0)
+#define WL18XX_SCR_PAD2 (WL18XX_REGISTERS_BASE + 0x0154F4)
+#define WL18XX_SCR_PAD3 (WL18XX_REGISTERS_BASE + 0x0154F8)
+#define WL18XX_SCR_PAD4 (WL18XX_REGISTERS_BASE + 0x0154FC)
+#define WL18XX_SCR_PAD4_SET (WL18XX_REGISTERS_BASE + 0x015504)
+#define WL18XX_SCR_PAD4_CLR (WL18XX_REGISTERS_BASE + 0x015500)
+#define WL18XX_SCR_PAD5 (WL18XX_REGISTERS_BASE + 0x015508)
+#define WL18XX_SCR_PAD5_SET (WL18XX_REGISTERS_BASE + 0x015510)
+#define WL18XX_SCR_PAD5_CLR (WL18XX_REGISTERS_BASE + 0x01550C)
+#define WL18XX_SCR_PAD6 (WL18XX_REGISTERS_BASE + 0x015514)
+#define WL18XX_SCR_PAD7 (WL18XX_REGISTERS_BASE + 0x015518)
+#define WL18XX_SCR_PAD8 (WL18XX_REGISTERS_BASE + 0x01551C)
+#define WL18XX_SCR_PAD9 (WL18XX_REGISTERS_BASE + 0x015520)
+
+/* Spare registers*/
+#define WL18XX_SPARE_A1 (WL18XX_REGISTERS_BASE + 0x002194)
+#define WL18XX_SPARE_A2 (WL18XX_REGISTERS_BASE + 0x002198)
+#define WL18XX_SPARE_A3 (WL18XX_REGISTERS_BASE + 0x00219C)
+#define WL18XX_SPARE_A4 (WL18XX_REGISTERS_BASE + 0x0021A0)
+#define WL18XX_SPARE_A5 (WL18XX_REGISTERS_BASE + 0x0021A4)
+#define WL18XX_SPARE_A6 (WL18XX_REGISTERS_BASE + 0x0021A8)
+#define WL18XX_SPARE_A7 (WL18XX_REGISTERS_BASE + 0x0021AC)
+#define WL18XX_SPARE_A8 (WL18XX_REGISTERS_BASE + 0x0021B0)
+#define WL18XX_SPARE_B1 (WL18XX_REGISTERS_BASE + 0x015524)
+#define WL18XX_SPARE_B2 (WL18XX_REGISTERS_BASE + 0x015528)
+#define WL18XX_SPARE_B3 (WL18XX_REGISTERS_BASE + 0x01552C)
+#define WL18XX_SPARE_B4 (WL18XX_REGISTERS_BASE + 0x015530)
+#define WL18XX_SPARE_B5 (WL18XX_REGISTERS_BASE + 0x015534)
+#define WL18XX_SPARE_B6 (WL18XX_REGISTERS_BASE + 0x015538)
+#define WL18XX_SPARE_B7 (WL18XX_REGISTERS_BASE + 0x01553C)
+#define WL18XX_SPARE_B8 (WL18XX_REGISTERS_BASE + 0x015540)
+
+#define WL18XX_REG_COMMAND_MAILBOX_PTR (WL18XX_SCR_PAD0)
+#define WL18XX_REG_EVENT_MAILBOX_PTR (WL18XX_SCR_PAD1)
+#define WL18XX_EEPROMLESS_IND (WL18XX_SCR_PAD4)
+
+#define WL18XX_WELP_ARM_COMMAND (WL18XX_REGISTERS_BASE + 0x7100)
+#define WL18XX_ENABLE (WL18XX_REGISTERS_BASE + 0x01543C)
+
+/* PRCM registers */
+#define PLATFORM_DETECTION 0xA0E3E0
+#define OCS_EN 0xA02080
+#define PRIMARY_CLK_DETECT 0xA020A6
+#define PLLSH_WCS_PLL_N 0xA02362
+#define PLLSH_WCS_PLL_M 0xA02360
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1 0xA02364
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2 0xA02366
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_1 0xA02368
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_2 0xA0236A
+#define PLLSH_WCS_PLL_SWALLOW_EN 0xA0236C
+#define PLLSH_WL_PLL_EN 0xA02392
+
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK 0xFFFF
+#define PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK 0x007F
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK 0xFFFF
+#define PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK 0x000F
+
+#define PLLSH_WCS_PLL_SWALLOW_EN_VAL1 0x1
+#define PLLSH_WCS_PLL_SWALLOW_EN_VAL2 0x12
+
+#define WL18XX_REG_FUSE_DATA_1_3 0xA0260C
+#define WL18XX_PG_VER_MASK 0x70
+#define WL18XX_PG_VER_OFFSET 4
+
+#define WL18XX_REG_FUSE_BD_ADDR_1 0xA02602
+#define WL18XX_REG_FUSE_BD_ADDR_2 0xA02606
+
+#define WL18XX_CMD_MBOX_ADDRESS 0xB007B4
+
+#define WL18XX_FW_STATUS_ADDR 0x50F8
+
+#define CHIP_ID_185x_PG10 (0x06030101)
+#define CHIP_ID_185x_PG20 (0x06030111)
+
+/*
+ * Host Command Interrupt. Setting this bit masks
+ * the interrupt that the host issues to inform
+ * the FW that it has sent a command
+ * to the Wlan hardware Command Mailbox.
+ */
+#define WL18XX_INTR_TRIG_CMD BIT(28)
+
+/*
+ * Host Event Acknowlegde Interrupt. The host
+ * sets this bit to acknowledge that it received
+ * the unsolicited information from the event
+ * mailbox.
+ */
+#define WL18XX_INTR_TRIG_EVENT_ACK BIT(29)
+
+/*
+ * To boot the firmware in PLT mode we need to write this value in
+ * SCR_PAD8 before starting.
+ */
+#define WL18XX_SCR_PAD8_PLT 0xBABABEBE
+
+enum {
+ COMPONENT_NO_SWITCH = 0x0,
+ COMPONENT_2_WAY_SWITCH = 0x1,
+ COMPONENT_3_WAY_SWITCH = 0x2,
+ COMPONENT_MATCHING = 0x3,
+};
+
+enum {
+ FEM_NONE = 0x0,
+ FEM_VENDOR_1 = 0x1,
+ FEM_VENDOR_2 = 0x2,
+ FEM_VENDOR_3 = 0x3,
+};
+
+enum {
+ BOARD_TYPE_EVB_18XX = 0,
+ BOARD_TYPE_DVP_18XX = 1,
+ BOARD_TYPE_HDK_18XX = 2,
+ BOARD_TYPE_FPGA_18XX = 3,
+ BOARD_TYPE_COM8_18XX = 4,
+
+ NUM_BOARD_TYPES,
+};
+
+#endif /* __REG_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
new file mode 100644
index 000000000000..5b1fb10d9fd7
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -0,0 +1,127 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include "../wlcore/wlcore.h"
+#include "../wlcore/cmd.h"
+#include "../wlcore/debug.h"
+#include "../wlcore/acx.h"
+#include "../wlcore/tx.h"
+
+#include "wl18xx.h"
+#include "tx.h"
+
+static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
+{
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+ int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
+ bool tx_success;
+
+ /* check for id legality */
+ if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
+ wl1271_warning("illegal id in tx completion: %d", id);
+ return;
+ }
+
+ /* a zero bit indicates Tx success */
+ tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
+
+
+ skb = wl->tx_frames[id];
+ info = IEEE80211_SKB_CB(skb);
+
+ if (wl12xx_is_dummy_packet(wl, skb)) {
+ wl1271_free_tx_id(wl, id);
+ return;
+ }
+
+ /* update the TX status info */
+ if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ /* no real data about Tx completion */
+ info->status.rates[0].idx = -1;
+ info->status.rates[0].count = 0;
+ info->status.rates[0].flags = 0;
+ info->status.ack_signal = -1;
+
+ if (!tx_success)
+ wl->stats.retry_count++;
+
+ /*
+ * TODO: update sequence number for encryption? seems to be
+ * unsupported for now. needed for recovery with encryption.
+ */
+
+ /* remove private header from packet */
+ skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
+
+ /* remove TKIP header space if present */
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
+ info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
+ skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
+ }
+
+ wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
+ id, skb, tx_success);
+
+ /* return the packet to the stack */
+ skb_queue_tail(&wl->deferred_tx_queue, skb);
+ queue_work(wl->freezable_wq, &wl->netstack_work);
+ wl1271_free_tx_id(wl, id);
+}
+
+void wl18xx_tx_immediate_complete(struct wl1271 *wl)
+{
+ struct wl18xx_fw_status_priv *status_priv =
+ (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ struct wl18xx_priv *priv = wl->priv;
+ u8 i;
+
+ /* nothing to do here */
+ if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
+ return;
+
+ /* freed Tx descriptors */
+ wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
+ priv->last_fw_rls_idx, status_priv->fw_release_idx);
+
+ if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) {
+ wl1271_error("invalid desc release index %d",
+ status_priv->fw_release_idx);
+ WARN_ON(1);
+ return;
+ }
+
+ for (i = priv->last_fw_rls_idx;
+ i != status_priv->fw_release_idx;
+ i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
+ wl18xx_tx_complete_packet(wl,
+ status_priv->released_tx_desc[i]);
+
+ wl->tx_results_count++;
+ }
+
+ priv->last_fw_rls_idx = status_priv->fw_release_idx;
+}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.h b/drivers/net/wireless/ti/wl18xx/tx.h
new file mode 100644
index 000000000000..ccddc548e44a
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/tx.h
@@ -0,0 +1,46 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_TX_H__
+#define __WL18XX_TX_H__
+
+#include "../wlcore/wlcore.h"
+
+#define WL18XX_TX_HW_BLOCK_SPARE 1
+/* for special cases - namely, TKIP and GEM */
+#define WL18XX_TX_HW_EXTRA_BLOCK_SPARE 2
+#define WL18XX_TX_HW_BLOCK_SIZE 268
+
+#define WL18XX_TX_STATUS_DESC_ID_MASK 0x7F
+#define WL18XX_TX_STATUS_STAT_BIT_IDX 7
+
+/* Indicates this TX HW frame is not padded to SDIO block size */
+#define WL18XX_TX_CTRL_NOT_PADDED BIT(7)
+
+/*
+ * The FW uses a special bit to indicate a wide channel should be used in
+ * the rate policy.
+ */
+#define CONF_TX_RATE_USE_WIDE_CHAN BIT(31)
+
+void wl18xx_tx_immediate_complete(struct wl1271 *wl);
+
+#endif /* __WL12XX_TX_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
new file mode 100644
index 000000000000..6452396fa1d4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -0,0 +1,95 @@
+/*
+ * This file is part of wl18xx
+ *
+ * Copyright (C) 2011 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL18XX_PRIV_H__
+#define __WL18XX_PRIV_H__
+
+#include "conf.h"
+
+/* minimum FW required for driver */
+#define WL18XX_CHIP_VER 8
+#define WL18XX_IFTYPE_VER 2
+#define WL18XX_MAJOR_VER 0
+#define WL18XX_SUBTYPE_VER 0
+#define WL18XX_MINOR_VER 100
+
+#define WL18XX_CMD_MAX_SIZE 740
+
+struct wl18xx_priv {
+ /* buffer for sending commands to FW */
+ u8 cmd_buf[WL18XX_CMD_MAX_SIZE];
+
+ struct wl18xx_priv_conf conf;
+
+ /* Index of last released Tx desc in FW */
+ u8 last_fw_rls_idx;
+
+ /* number of VIFs requiring extra spare mem-blocks */
+ int extra_spare_vif_count;
+};
+
+#define WL18XX_FW_MAX_TX_STATUS_DESC 33
+
+struct wl18xx_fw_status_priv {
+ /*
+ * Index in released_tx_desc for first byte that holds
+ * released tx host desc
+ */
+ u8 fw_release_idx;
+
+ /*
+ * Array of host Tx descriptors, where fw_release_idx
+ * indicated the first released idx.
+ */
+ u8 released_tx_desc[WL18XX_FW_MAX_TX_STATUS_DESC];
+
+ u8 padding[2];
+};
+
+#define WL18XX_PHY_VERSION_MAX_LEN 20
+
+struct wl18xx_static_data_priv {
+ char phy_version[WL18XX_PHY_VERSION_MAX_LEN];
+};
+
+struct wl18xx_clk_cfg {
+ u32 n;
+ u32 m;
+ u32 p;
+ u32 q;
+ bool swallow;
+};
+
+enum {
+ CLOCK_CONFIG_16_2_M = 1,
+ CLOCK_CONFIG_16_368_M,
+ CLOCK_CONFIG_16_8_M,
+ CLOCK_CONFIG_19_2_M,
+ CLOCK_CONFIG_26_M,
+ CLOCK_CONFIG_32_736_M,
+ CLOCK_CONFIG_33_6_M,
+ CLOCK_CONFIG_38_468_M,
+ CLOCK_CONFIG_52_M,
+
+ NUM_CLOCK_CONFIGS,
+};
+
+#endif /* __WL18XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
index 54156b0b5c2d..d7b907e67170 100644
--- a/drivers/net/wireless/ti/wlcore/Kconfig
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -1,7 +1,6 @@
config WLCORE
tristate "TI wlcore support"
depends on WL_TI && GENERIC_HARDIRQS && MAC80211
- depends on INET
select FW_LOADER
---help---
This module contains the main code for TI WLAN chips. It abstracts
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index f3d6fa508269..ce108a736bd0 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -70,7 +70,7 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
struct acx_sleep_auth *auth;
int ret;
- wl1271_debug(DEBUG_ACX, "acx sleep auth");
+ wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth);
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth) {
@@ -81,11 +81,18 @@ int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
auth->sleep_auth = sleep_auth;
ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
+ if (ret < 0) {
+ wl1271_error("could not configure sleep_auth to %d: %d",
+ sleep_auth, ret);
+ goto out;
+ }
+ wl->sleep_auth = sleep_auth;
out:
kfree(auth);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth);
int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int power)
@@ -708,14 +715,14 @@ out:
return ret;
}
-int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
+int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
{
int ret;
wl1271_debug(DEBUG_ACX, "acx statistics");
ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
- sizeof(*stats));
+ wl->stats.fw_stats_len);
if (ret < 0) {
wl1271_warning("acx statistics failed: %d", ret);
return -ENOMEM;
@@ -997,6 +1004,7 @@ out:
kfree(mem_conf);
return ret;
}
+EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg);
int wl1271_acx_init_mem_config(struct wl1271 *wl)
{
@@ -1027,6 +1035,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
return 0;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
{
@@ -1150,6 +1159,7 @@ out:
kfree(acx);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_acx_pm_config);
int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index e6a74869a5ff..d03215d6b3bd 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -51,21 +51,18 @@
#define WL1271_ACX_INTR_TRACE_A BIT(7)
/* Trace message on MBOX #B */
#define WL1271_ACX_INTR_TRACE_B BIT(8)
+/* SW FW Initiated interrupt Watchdog timer expiration */
+#define WL1271_ACX_SW_INTR_WATCHDOG BIT(9)
-#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
-#define WL1271_ACX_ALL_EVENTS_VECTOR (WL1271_ACX_INTR_WATCHDOG | \
- WL1271_ACX_INTR_INIT_COMPLETE | \
- WL1271_ACX_INTR_EVENT_A | \
- WL1271_ACX_INTR_EVENT_B | \
- WL1271_ACX_INTR_CMD_COMPLETE | \
- WL1271_ACX_INTR_HW_AVAILABLE | \
- WL1271_ACX_INTR_DATA)
-
-#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
- WL1271_ACX_INTR_EVENT_A | \
- WL1271_ACX_INTR_EVENT_B | \
- WL1271_ACX_INTR_HW_AVAILABLE | \
- WL1271_ACX_INTR_DATA)
+#define WL1271_ACX_INTR_ALL 0xFFFFFFFF
+
+/* all possible interrupts - only appropriate ones will be masked in */
+#define WLCORE_ALL_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
+ WL1271_ACX_INTR_EVENT_A | \
+ WL1271_ACX_INTR_EVENT_B | \
+ WL1271_ACX_INTR_HW_AVAILABLE | \
+ WL1271_ACX_INTR_DATA | \
+ WL1271_ACX_SW_INTR_WATCHDOG)
/* Target's information element */
struct acx_header {
@@ -121,6 +118,11 @@ enum wl1271_psm_mode {
/* Extreme low power */
WL1271_PSM_ELP = 2,
+
+ WL1271_PSM_MAX = WL1271_PSM_ELP,
+
+ /* illegal out of band value of PSM mode */
+ WL1271_PSM_ILLEGAL = 0xff
};
struct acx_sleep_auth {
@@ -417,228 +419,6 @@ struct acx_ctsprotect {
u8 padding[2];
} __packed;
-struct acx_tx_statistics {
- __le32 internal_desc_overflow;
-} __packed;
-
-struct acx_rx_statistics {
- __le32 out_of_mem;
- __le32 hdr_overflow;
- __le32 hw_stuck;
- __le32 dropped;
- __le32 fcs_err;
- __le32 xfr_hint_trig;
- __le32 path_reset;
- __le32 reset_counter;
-} __packed;
-
-struct acx_dma_statistics {
- __le32 rx_requested;
- __le32 rx_errors;
- __le32 tx_requested;
- __le32 tx_errors;
-} __packed;
-
-struct acx_isr_statistics {
- /* host command complete */
- __le32 cmd_cmplt;
-
- /* fiqisr() */
- __le32 fiqs;
-
- /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
- __le32 rx_headers;
-
- /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
- __le32 rx_completes;
-
- /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
- __le32 rx_mem_overflow;
-
- /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
- __le32 rx_rdys;
-
- /* irqisr() */
- __le32 irqs;
-
- /* (INT_STS_ND & INT_TRIG_TX_PROC) */
- __le32 tx_procs;
-
- /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
- __le32 decrypt_done;
-
- /* (INT_STS_ND & INT_TRIG_DMA0) */
- __le32 dma0_done;
-
- /* (INT_STS_ND & INT_TRIG_DMA1) */
- __le32 dma1_done;
-
- /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
- __le32 tx_exch_complete;
-
- /* (INT_STS_ND & INT_TRIG_COMMAND) */
- __le32 commands;
-
- /* (INT_STS_ND & INT_TRIG_RX_PROC) */
- __le32 rx_procs;
-
- /* (INT_STS_ND & INT_TRIG_PM_802) */
- __le32 hw_pm_mode_changes;
-
- /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
- __le32 host_acknowledges;
-
- /* (INT_STS_ND & INT_TRIG_PM_PCI) */
- __le32 pci_pm;
-
- /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
- __le32 wakeups;
-
- /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
- __le32 low_rssi;
-} __packed;
-
-struct acx_wep_statistics {
- /* WEP address keys configured */
- __le32 addr_key_count;
-
- /* default keys configured */
- __le32 default_key_count;
-
- __le32 reserved;
-
- /* number of times that WEP key not found on lookup */
- __le32 key_not_found;
-
- /* number of times that WEP key decryption failed */
- __le32 decrypt_fail;
-
- /* WEP packets decrypted */
- __le32 packets;
-
- /* WEP decrypt interrupts */
- __le32 interrupt;
-} __packed;
-
-#define ACX_MISSED_BEACONS_SPREAD 10
-
-struct acx_pwr_statistics {
- /* the amount of enters into power save mode (both PD & ELP) */
- __le32 ps_enter;
-
- /* the amount of enters into ELP mode */
- __le32 elp_enter;
-
- /* the amount of missing beacon interrupts to the host */
- __le32 missing_bcns;
-
- /* the amount of wake on host-access times */
- __le32 wake_on_host;
-
- /* the amount of wake on timer-expire */
- __le32 wake_on_timer_exp;
-
- /* the number of packets that were transmitted with PS bit set */
- __le32 tx_with_ps;
-
- /* the number of packets that were transmitted with PS bit clear */
- __le32 tx_without_ps;
-
- /* the number of received beacons */
- __le32 rcvd_beacons;
-
- /* the number of entering into PowerOn (power save off) */
- __le32 power_save_off;
-
- /* the number of entries into power save mode */
- __le16 enable_ps;
-
- /*
- * the number of exits from power save, not including failed PS
- * transitions
- */
- __le16 disable_ps;
-
- /*
- * the number of times the TSF counter was adjusted because
- * of drift
- */
- __le32 fix_tsf_ps;
-
- /* Gives statistics about the spread continuous missed beacons.
- * The 16 LSB are dedicated for the PS mode.
- * The 16 MSB are dedicated for the PS mode.
- * cont_miss_bcns_spread[0] - single missed beacon.
- * cont_miss_bcns_spread[1] - two continuous missed beacons.
- * cont_miss_bcns_spread[2] - three continuous missed beacons.
- * ...
- * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
- */
- __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
-
- /* the number of beacons in awake mode */
- __le32 rcvd_awake_beacons;
-} __packed;
-
-struct acx_mic_statistics {
- __le32 rx_pkts;
- __le32 calc_failure;
-} __packed;
-
-struct acx_aes_statistics {
- __le32 encrypt_fail;
- __le32 decrypt_fail;
- __le32 encrypt_packets;
- __le32 decrypt_packets;
- __le32 encrypt_interrupt;
- __le32 decrypt_interrupt;
-} __packed;
-
-struct acx_event_statistics {
- __le32 heart_beat;
- __le32 calibration;
- __le32 rx_mismatch;
- __le32 rx_mem_empty;
- __le32 rx_pool;
- __le32 oom_late;
- __le32 phy_transmit_error;
- __le32 tx_stuck;
-} __packed;
-
-struct acx_ps_statistics {
- __le32 pspoll_timeouts;
- __le32 upsd_timeouts;
- __le32 upsd_max_sptime;
- __le32 upsd_max_apturn;
- __le32 pspoll_max_apturn;
- __le32 pspoll_utilization;
- __le32 upsd_utilization;
-} __packed;
-
-struct acx_rxpipe_statistics {
- __le32 rx_prep_beacon_drop;
- __le32 descr_host_int_trig_rx_data;
- __le32 beacon_buffer_thres_host_int_trig_rx_data;
- __le32 missed_beacon_host_int_trig_rx_data;
- __le32 tx_xfr_host_int_trig_rx_data;
-} __packed;
-
-struct acx_statistics {
- struct acx_header header;
-
- struct acx_tx_statistics tx;
- struct acx_rx_statistics rx;
- struct acx_dma_statistics dma;
- struct acx_isr_statistics isr;
- struct acx_wep_statistics wep;
- struct acx_pwr_statistics pwr;
- struct acx_aes_statistics aes;
- struct acx_mic_statistics mic;
- struct acx_event_statistics event;
- struct acx_ps_statistics ps;
- struct acx_rxpipe_statistics rxpipe;
-} __packed;
-
struct acx_rate_class {
__le32 enabled_rates;
u8 short_retry_limit;
@@ -828,6 +608,8 @@ struct wl1271_acx_keep_alive_config {
#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
+#define HOST_IF_CFG_RX_PAD_TO_SDIO_BLK BIT(4)
+#define HOST_IF_CFG_ADD_RX_ALIGNMENT BIT(6)
enum {
WL1271_ACX_TRIG_TYPE_LEVEL = 0,
@@ -946,7 +728,7 @@ struct wl1271_acx_ht_information {
u8 padding[2];
} __packed;
-#define RX_BA_MAX_SESSIONS 2
+#define RX_BA_MAX_SESSIONS 3
struct wl1271_acx_ba_initiator_policy {
struct acx_header header;
@@ -1243,6 +1025,7 @@ enum {
ACX_CONFIG_HANGOVER = 0x0042,
ACX_FEATURE_CFG = 0x0043,
ACX_PROTECTION_CFG = 0x0044,
+ ACX_CHECKSUM_CONFIG = 0x0045,
};
@@ -1281,7 +1064,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
-int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
+int wl1271_acx_statistics(struct wl1271 *wl, void *stats);
int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
index 9b98230f84ce..375ea574eafb 100644
--- a/drivers/net/wireless/ti/wlcore/boot.c
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -33,22 +33,35 @@
#include "rx.h"
#include "hw_ops.h"
-static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
+static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
{
u32 cpu_ctrl;
+ int ret;
/* 10.5.0 run the firmware (I) */
- cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL);
+ ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl);
+ if (ret < 0)
+ goto out;
/* 10.5.1 run the firmware (II) */
cpu_ctrl |= flag;
- wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
+ ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
+
+out:
+ return ret;
}
-static int wlcore_parse_fw_ver(struct wl1271 *wl)
+static int wlcore_boot_parse_fw_ver(struct wl1271 *wl,
+ struct wl1271_static_data *static_data)
{
int ret;
+ strncpy(wl->chip.fw_ver_str, static_data->fw_version,
+ sizeof(wl->chip.fw_ver_str));
+
+ /* make sure the string is NULL-terminated */
+ wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+
ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
&wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
&wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
@@ -57,43 +70,96 @@ static int wlcore_parse_fw_ver(struct wl1271 *wl)
if (ret != 5) {
wl1271_warning("fw version incorrect value");
memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = wlcore_identify_fw(wl);
if (ret < 0)
- return ret;
+ goto out;
+out:
+ return ret;
+}
+
+static int wlcore_validate_fw_ver(struct wl1271 *wl)
+{
+ unsigned int *fw_ver = wl->chip.fw_ver;
+ unsigned int *min_ver = wl->min_fw_ver;
+ /* the chip must be exactly equal */
+ if (min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP])
+ goto fail;
+
+ /* always check the next digit if all previous ones are equal */
+
+ if (min_ver[FW_VER_IF_TYPE] < fw_ver[FW_VER_IF_TYPE])
+ goto out;
+ else if (min_ver[FW_VER_IF_TYPE] > fw_ver[FW_VER_IF_TYPE])
+ goto fail;
+
+ if (min_ver[FW_VER_MAJOR] < fw_ver[FW_VER_MAJOR])
+ goto out;
+ else if (min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR])
+ goto fail;
+
+ if (min_ver[FW_VER_SUBTYPE] < fw_ver[FW_VER_SUBTYPE])
+ goto out;
+ else if (min_ver[FW_VER_SUBTYPE] > fw_ver[FW_VER_SUBTYPE])
+ goto fail;
+
+ if (min_ver[FW_VER_MINOR] < fw_ver[FW_VER_MINOR])
+ goto out;
+ else if (min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])
+ goto fail;
+
+out:
return 0;
+
+fail:
+ wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is outdated.\n"
+ "Please use at least FW %u.%u.%u.%u.%u.\n"
+ "You can get more information at:\n"
+ "http://wireless.kernel.org/en/users/Drivers/wl12xx",
+ fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
+ fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
+ fw_ver[FW_VER_MINOR], min_ver[FW_VER_CHIP],
+ min_ver[FW_VER_IF_TYPE], min_ver[FW_VER_MAJOR],
+ min_ver[FW_VER_SUBTYPE], min_ver[FW_VER_MINOR]);
+ return -EINVAL;
}
-static int wlcore_boot_fw_version(struct wl1271 *wl)
+static int wlcore_boot_static_data(struct wl1271 *wl)
{
struct wl1271_static_data *static_data;
+ size_t len = sizeof(*static_data) + wl->static_data_priv_len;
int ret;
- static_data = kmalloc(sizeof(*static_data), GFP_KERNEL | GFP_DMA);
+ static_data = kmalloc(len, GFP_KERNEL);
if (!static_data) {
- wl1271_error("Couldn't allocate memory for static data!");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data),
- false);
-
- strncpy(wl->chip.fw_ver_str, static_data->fw_version,
- sizeof(wl->chip.fw_ver_str));
+ ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false);
+ if (ret < 0)
+ goto out_free;
- kfree(static_data);
+ ret = wlcore_boot_parse_fw_ver(wl, static_data);
+ if (ret < 0)
+ goto out_free;
- /* make sure the string is NULL-terminated */
- wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
+ ret = wlcore_validate_fw_ver(wl);
+ if (ret < 0)
+ goto out_free;
- ret = wlcore_parse_fw_ver(wl);
+ ret = wlcore_handle_static_data(wl, static_data);
if (ret < 0)
- return ret;
+ goto out_free;
- return 0;
+out_free:
+ kfree(static_data);
+out:
+ return ret;
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
@@ -102,6 +168,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
struct wlcore_partition_set partition;
int addr, chunk_num, partition_limit;
u8 *p, *chunk;
+ int ret;
/* whal_FwCtrl_LoadFwImageSm() */
@@ -123,7 +190,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
partition.mem.start = dest;
- wlcore_set_partition(wl, &partition);
+ ret = wlcore_set_partition(wl, &partition);
+ if (ret < 0)
+ goto out;
/* 10.1 set partition limit and chunk num */
chunk_num = 0;
@@ -137,7 +206,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
partition_limit = chunk_num * CHUNK_SIZE +
wl->ptable[PART_DOWN].mem.size;
partition.mem.start = addr;
- wlcore_set_partition(wl, &partition);
+ ret = wlcore_set_partition(wl, &partition);
+ if (ret < 0)
+ goto out;
}
/* 10.3 upload the chunk */
@@ -146,7 +217,9 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
- wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
+ ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false);
+ if (ret < 0)
+ goto out;
chunk_num++;
}
@@ -157,10 +230,11 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
fw_data_len % CHUNK_SIZE, p, addr);
- wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+ ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+out:
kfree(chunk);
- return 0;
+ return ret;
}
int wlcore_boot_upload_firmware(struct wl1271 *wl)
@@ -203,9 +277,12 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
int i;
u32 dest_addr, val;
u8 *nvs_ptr, *nvs_aligned;
+ int ret;
- if (wl->nvs == NULL)
+ if (wl->nvs == NULL) {
+ wl1271_error("NVS file is needed during boot");
return -ENODEV;
+ }
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
@@ -298,7 +375,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
- wl1271_write32(wl, dest_addr, val);
+ ret = wlcore_write32(wl, dest_addr, val);
+ if (ret < 0)
+ return ret;
nvs_ptr += 4;
dest_addr += 4;
@@ -324,7 +403,9 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ if (ret < 0)
+ return ret;
/* Copy the NVS tables to a new block to ensure alignment */
nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
@@ -332,11 +413,11 @@ int wlcore_boot_upload_nvs(struct wl1271 *wl)
return -ENOMEM;
/* And finally we upload the NVS tables */
- wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS,
- nvs_aligned, nvs_len, false);
+ ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len,
+ false);
kfree(nvs_aligned);
- return 0;
+ return ret;
out_badnvs:
wl1271_error("nvs data is malformed");
@@ -350,11 +431,17 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
u32 chip_id, intr;
/* Make sure we have the boot partition */
- wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ return ret;
- wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+ ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
+ if (ret < 0)
+ return ret;
- chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
@@ -367,7 +454,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ return ret;
if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
@@ -376,8 +465,10 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
- wlcore_write_reg(wl, REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_INIT_COMPLETE);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_INIT_COMPLETE);
+ if (ret < 0)
+ return ret;
break;
}
}
@@ -389,20 +480,25 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* get hardware config command mail box */
- wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR);
+ ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
/* get hardware config event mail box */
- wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR);
+ ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]);
+ if (ret < 0)
+ return ret;
+
wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
- ret = wlcore_boot_fw_version(wl);
+ ret = wlcore_boot_static_data(wl);
if (ret < 0) {
- wl1271_error("couldn't boot firmware");
+ wl1271_error("error getting static data");
return ret;
}
@@ -436,9 +532,9 @@ int wlcore_boot_run_firmware(struct wl1271 *wl)
}
/* set the working partition to its "running" mode offset */
- wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
/* firmware startup completed */
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
index 094981dd2227..a525225f990c 100644
--- a/drivers/net/wireless/ti/wlcore/boot.h
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -40,6 +40,7 @@ struct wl1271_static_data {
u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
u32 hw_version;
u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
+ u8 priv[0];
};
/* number of times we try to read the INIT interrupt */
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 5b128a971449..20e1bd923832 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -36,8 +36,10 @@
#include "cmd.h"
#include "event.h"
#include "tx.h"
+#include "hw_ops.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
+#define WL1271_WAIT_EVENT_FAST_POLL_COUNT 20
/*
* send command to firmware
@@ -64,17 +66,24 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
- wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
+ ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
+ if (ret < 0)
+ goto fail;
/*
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
- wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
+ ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
+ if (ret < 0)
+ goto fail;
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ goto fail;
+
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
@@ -88,13 +97,18 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
else
msleep(1);
- intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
+ if (ret < 0)
+ goto fail;
}
/* read back the status code of the command */
if (res_len == 0)
res_len = sizeof(struct wl1271_cmd_header);
- wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+
+ ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+ if (ret < 0)
+ goto fail;
status = le16_to_cpu(cmd->status);
if (status != CMD_STATUS_SUCCESS) {
@@ -103,11 +117,14 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
goto fail;
}
- wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE);
+ ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_CMD_COMPLETE);
+ if (ret < 0)
+ goto fail;
+
return 0;
fail:
- WARN_ON(1);
wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -116,35 +133,50 @@ fail:
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
-static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
+static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
+ u32 mask, bool *timeout)
{
u32 *events_vector;
u32 event;
- unsigned long timeout;
+ unsigned long timeout_time;
+ u16 poll_count = 0;
int ret = 0;
+ *timeout = false;
+
events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
if (!events_vector)
return -ENOMEM;
- timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
do {
- if (time_after(jiffies, timeout)) {
+ if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
(int)mask);
- ret = -ETIMEDOUT;
+ *timeout = true;
goto out;
}
- msleep(1);
+ poll_count++;
+ if (poll_count < WL1271_WAIT_EVENT_FAST_POLL_COUNT)
+ usleep_range(50, 51);
+ else
+ usleep_range(1000, 5000);
/* read from both event fields */
- wl1271_read(wl, wl->mbox_ptr[0], events_vector,
- sizeof(*events_vector), false);
+ ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector,
+ sizeof(*events_vector), false);
+ if (ret < 0)
+ goto out;
+
event = *events_vector & mask;
- wl1271_read(wl, wl->mbox_ptr[1], events_vector,
- sizeof(*events_vector), false);
+
+ ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector,
+ sizeof(*events_vector), false);
+ if (ret < 0)
+ goto out;
+
event |= *events_vector & mask;
} while (!event);
@@ -156,9 +188,10 @@ out:
static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
{
int ret;
+ bool timeout = false;
- ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask);
- if (ret != 0) {
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl, mask, &timeout);
+ if (ret != 0 || timeout) {
wl12xx_queue_recovery_work(wl);
return ret;
}
@@ -291,6 +324,23 @@ static int wl12xx_get_new_session_id(struct wl1271 *wl,
return wlvif->session_counter;
}
+static u8 wlcore_get_native_channel_type(u8 nl_channel_type)
+{
+ switch (nl_channel_type) {
+ case NL80211_CHAN_NO_HT:
+ return WLCORE_CHAN_NO_HT;
+ case NL80211_CHAN_HT20:
+ return WLCORE_CHAN_HT20;
+ case NL80211_CHAN_HT40MINUS:
+ return WLCORE_CHAN_HT40MINUS;
+ case NL80211_CHAN_HT40PLUS:
+ return WLCORE_CHAN_HT40PLUS;
+ default:
+ WARN_ON(1);
+ return WLCORE_CHAN_NO_HT;
+ }
+}
+
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -407,6 +457,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
+ cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
@@ -446,6 +497,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
+ bool timeout = false;
if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
@@ -468,6 +520,17 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
goto out_free;
}
+ /*
+ * Sometimes the firmware doesn't send this event, so we just
+ * time out without failing. Queue recovery for other
+ * failures.
+ */
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl,
+ ROLE_STOP_COMPLETE_EVENT_ID,
+ &timeout);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
@@ -482,6 +545,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
struct wl12xx_cmd_role_start *cmd;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u32 supported_rates;
int ret;
wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
@@ -519,6 +583,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* FIXME: Change when adding DFS */
cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
cmd->channel = wlvif->channel;
+ cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
@@ -531,7 +596,13 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
memcpy(cmd->ap.ssid, bss_conf->ssid, bss_conf->ssid_len);
}
- cmd->ap.local_rates = cpu_to_le32(0xffffffff);
+ supported_rates = CONF_TX_AP_ENABLED_RATES | CONF_TX_MCS_RATES |
+ wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+
+ wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
+ supported_rates);
+
+ cmd->ap.local_rates = cpu_to_le32(supported_rates);
switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
@@ -797,6 +868,7 @@ out:
kfree(cmd);
return ret;
}
+EXPORT_SYMBOL_GPL(wl1271_cmd_data_path);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ps_mode, u16 auto_ps_timeout)
@@ -953,12 +1025,14 @@ out:
int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len)
+ const u8 *ie, size_t ie_len, bool sched_scan)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
+ u16 template_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
+ u16 template_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
@@ -969,14 +1043,20 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
+ if (!sched_scan &&
+ (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
+ template_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4;
+ template_id_5 = CMD_TEMPL_APP_PROBE_REQ_5;
+ }
+
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, role_id,
- CMD_TEMPL_CFG_PROBE_REQ_2_4,
+ template_id_2_4,
skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, role_id,
- CMD_TEMPL_CFG_PROBE_REQ_5,
+ template_id_5,
skb->data, skb->len, 0, rate);
out:
@@ -1018,7 +1098,7 @@ out:
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- int ret, extra;
+ int ret, extra = 0;
u16 fc;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
@@ -1057,7 +1137,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* encryption space */
switch (wlvif->encryption_type) {
case KEY_TKIP:
- extra = WL1271_EXTRA_SPACE_TKIP;
+ if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
+ extra = WL1271_EXTRA_SPACE_TKIP;
break;
case KEY_AES:
extra = WL1271_EXTRA_SPACE_AES;
@@ -1346,13 +1427,18 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
- cmd->psd_type[i] = WL1271_PSD_UPSD_TRIGGER;
+ cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
+ WL1271_PSD_UPSD_TRIGGER;
else
- cmd->psd_type[i] = WL1271_PSD_LEGACY;
+ cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
+ WL1271_PSD_LEGACY;
+
sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
- sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
+ sta_rates |=
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
@@ -1378,6 +1464,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
+ bool timeout = false;
wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
@@ -1398,12 +1485,16 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
goto out_free;
}
+ ret = wl1271_cmd_wait_for_event_or_timeout(wl,
+ PEER_REMOVE_COMPLETE_EVENT_ID,
+ &timeout);
/*
* We are ok with a timeout here. The event is sometimes not sent
- * due to a firmware bug.
+ * due to a firmware bug. In case of another error (like SDIO timeout)
+ * queue a recovery.
*/
- wl1271_cmd_wait_for_event_or_timeout(wl,
- PEER_REMOVE_COMPLETE_EVENT_ID);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
out_free:
kfree(cmd);
@@ -1573,19 +1664,25 @@ out:
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
+ bool is_first_roc;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
+ is_first_roc = (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >=
+ WL12XX_MAX_ROLES);
+
ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
- ret = wl1271_cmd_wait_for_event(wl,
- REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
- if (ret < 0) {
- wl1271_error("cmd roc event completion error");
- goto out;
+ if (is_first_roc) {
+ ret = wl1271_cmd_wait_for_event(wl,
+ REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID);
+ if (ret < 0) {
+ wl1271_error("cmd roc event completion error");
+ goto out;
+ }
}
__set_bit(role_id, wl->roc_map);
@@ -1714,7 +1811,9 @@ int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return -EINVAL;
/* flush all pending packets */
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ goto out;
if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
ret = wl12xx_croc(wl, wlvif->dev_role_id);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index a46ae07cb77e..4ef0b095f0d6 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -58,7 +58,7 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
- const u8 *ie, size_t ie_len);
+ const u8 *ie, size_t ie_len, bool sched_scan);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct sk_buff *skb);
@@ -172,8 +172,8 @@ enum cmd_templ {
CMD_TEMPL_PS_POLL,
CMD_TEMPL_KLV,
CMD_TEMPL_DISCONNECT,
- CMD_TEMPL_PROBE_REQ_2_4, /* for firmware internal use only */
- CMD_TEMPL_PROBE_REQ_5, /* for firmware internal use only */
+ CMD_TEMPL_APP_PROBE_REQ_2_4,
+ CMD_TEMPL_APP_PROBE_REQ_5,
CMD_TEMPL_BAR, /* for firmware internal use only */
CMD_TEMPL_CTS, /*
* For CTS-to-self (FastCTS) mechanism
@@ -192,7 +192,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 750
+#define WL1271_EVENT_TIMEOUT 1500
struct wl1271_cmd_header {
__le16 id;
@@ -266,13 +266,22 @@ enum wlcore_band {
WLCORE_BAND_MAX_RADIO = 0x7F,
};
+enum wlcore_channel_type {
+ WLCORE_CHAN_NO_HT,
+ WLCORE_CHAN_HT20,
+ WLCORE_CHAN_HT40MINUS,
+ WLCORE_CHAN_HT40PLUS
+};
+
struct wl12xx_cmd_role_start {
struct wl1271_cmd_header header;
u8 role_id;
u8 band;
u8 channel;
- u8 padding;
+
+ /* enum wlcore_channel_type */
+ u8 channel_type;
union {
struct {
@@ -643,4 +652,25 @@ struct wl12xx_cmd_stop_channel_switch {
struct wl1271_cmd_header header;
} __packed;
+/* Used to check radio status after calibration */
+#define MAX_TLV_LENGTH 500
+#define TEST_CMD_P2G_CAL 2 /* TX BiP */
+
+struct wl1271_cmd_cal_p2g {
+ struct wl1271_cmd_header header;
+
+ struct wl1271_cmd_test_header test;
+
+ __le32 ver;
+ __le16 len;
+ u8 buf[MAX_TLV_LENGTH];
+ u8 type;
+ u8 padding;
+
+ __le16 radio_status;
+
+ u8 sub_band_mask;
+ u8 padding2;
+} __packed;
+
#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index fef0db4213bc..d77224f2ac6b 100644
--- a/drivers/net/wireless/ti/wlcore/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -45,7 +45,15 @@ enum {
CONF_HW_BIT_RATE_MCS_4 = BIT(17),
CONF_HW_BIT_RATE_MCS_5 = BIT(18),
CONF_HW_BIT_RATE_MCS_6 = BIT(19),
- CONF_HW_BIT_RATE_MCS_7 = BIT(20)
+ CONF_HW_BIT_RATE_MCS_7 = BIT(20),
+ CONF_HW_BIT_RATE_MCS_8 = BIT(21),
+ CONF_HW_BIT_RATE_MCS_9 = BIT(22),
+ CONF_HW_BIT_RATE_MCS_10 = BIT(23),
+ CONF_HW_BIT_RATE_MCS_11 = BIT(24),
+ CONF_HW_BIT_RATE_MCS_12 = BIT(25),
+ CONF_HW_BIT_RATE_MCS_13 = BIT(26),
+ CONF_HW_BIT_RATE_MCS_14 = BIT(27),
+ CONF_HW_BIT_RATE_MCS_15 = BIT(28),
};
enum {
@@ -310,7 +318,7 @@ enum {
struct conf_sg_settings {
u32 params[CONF_SG_PARAMS_MAX];
u8 state;
-};
+} __packed;
enum conf_rx_queue_type {
CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
@@ -394,7 +402,7 @@ struct conf_rx_settings {
* Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
*/
u8 queue_type;
-};
+} __packed;
#define CONF_TX_MAX_RATE_CLASSES 10
@@ -435,6 +443,12 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_MCS_5 | CONF_HW_BIT_RATE_MCS_6 | \
CONF_HW_BIT_RATE_MCS_7)
+#define CONF_TX_MIMO_RATES (CONF_HW_BIT_RATE_MCS_8 | \
+ CONF_HW_BIT_RATE_MCS_9 | CONF_HW_BIT_RATE_MCS_10 | \
+ CONF_HW_BIT_RATE_MCS_11 | CONF_HW_BIT_RATE_MCS_12 | \
+ CONF_HW_BIT_RATE_MCS_13 | CONF_HW_BIT_RATE_MCS_14 | \
+ CONF_HW_BIT_RATE_MCS_15)
+
/*
* Default rates for management traffic when operating in AP mode. This
* should be configured according to the basic rate set of the AP
@@ -487,7 +501,7 @@ struct conf_tx_rate_class {
* the policy (0 - long preamble, 1 - short preamble.
*/
u8 aflags;
-};
+} __packed;
#define CONF_TX_MAX_AC_COUNT 4
@@ -504,7 +518,7 @@ enum conf_tx_ac {
CONF_TX_AC_VI = 2, /* video */
CONF_TX_AC_VO = 3, /* voice */
CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
- CONF_TX_AC_ANY_TID = 0x1f
+ CONF_TX_AC_ANY_TID = 0xff
};
struct conf_tx_ac_category {
@@ -544,7 +558,7 @@ struct conf_tx_ac_category {
* Range: u16
*/
u16 tx_op_limit;
-};
+} __packed;
#define CONF_TX_MAX_TID_COUNT 8
@@ -578,7 +592,7 @@ struct conf_tx_tid {
u8 ps_scheme;
u8 ack_policy;
u32 apsd_conf[2];
-};
+} __packed;
struct conf_tx_settings {
/*
@@ -664,7 +678,7 @@ struct conf_tx_settings {
/* Time in ms for Tx watchdog timer to expire */
u32 tx_watchdog_timeout;
-};
+} __packed;
enum {
CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
@@ -711,7 +725,7 @@ struct conf_bcn_filt_rule {
* Version for the vendor specifie IE (221)
*/
u8 version[CONF_BCN_IE_VER_LEN];
-};
+} __packed;
#define CONF_MAX_RSSI_SNR_TRIGGERS 8
@@ -762,7 +776,7 @@ struct conf_sig_weights {
* Range: u8
*/
u8 snr_pkt_avg_weight;
-};
+} __packed;
enum conf_bcn_filt_mode {
CONF_BCN_FILT_MODE_DISABLED = 0,
@@ -810,7 +824,7 @@ struct conf_conn_settings {
*
* Range: CONF_BCN_FILT_MODE_*
*/
- enum conf_bcn_filt_mode bcn_filt_mode;
+ u8 bcn_filt_mode;
/*
* Configure Beacon filter pass-thru rules.
@@ -937,7 +951,13 @@ struct conf_conn_settings {
* Range: u16
*/
u8 max_listen_interval;
-};
+
+ /*
+ * Default sleep authorization for a new STA interface. This determines
+ * whether we can go to ELP.
+ */
+ u8 sta_sleep_auth;
+} __packed;
enum {
CONF_REF_CLK_19_2_E,
@@ -965,6 +985,11 @@ struct conf_itrim_settings {
/* moderation timeout in microsecs from the last TX */
u32 timeout;
+} __packed;
+
+enum conf_fast_wakeup {
+ CONF_FAST_WAKEUP_ENABLE,
+ CONF_FAST_WAKEUP_DISABLE,
};
struct conf_pm_config_settings {
@@ -978,10 +1003,10 @@ struct conf_pm_config_settings {
/*
* Host fast wakeup support
*
- * Range: true, false
+ * Range: enum conf_fast_wakeup
*/
- bool host_fast_wakeup_support;
-};
+ u8 host_fast_wakeup_support;
+} __packed;
struct conf_roam_trigger_settings {
/*
@@ -1018,7 +1043,7 @@ struct conf_roam_trigger_settings {
* Range: 0 - 255
*/
u8 avg_weight_snr_data;
-};
+} __packed;
struct conf_scan_settings {
/*
@@ -1064,7 +1089,7 @@ struct conf_scan_settings {
* Range: u32 Microsecs
*/
u32 split_scan_timeout;
-};
+} __packed;
struct conf_sched_scan_settings {
/*
@@ -1102,7 +1127,7 @@ struct conf_sched_scan_settings {
/* SNR threshold to be used for filtering */
s8 snr_threshold;
-};
+} __packed;
struct conf_ht_setting {
u8 rx_ba_win_size;
@@ -1111,7 +1136,7 @@ struct conf_ht_setting {
/* bitmap of enabled TIDs for TX BA sessions */
u8 tx_ba_tid_bitmap;
-};
+} __packed;
struct conf_memory_settings {
/* Number of stations supported in IBSS mode */
@@ -1151,7 +1176,7 @@ struct conf_memory_settings {
* Range: 0-120
*/
u8 tx_min;
-};
+} __packed;
struct conf_fm_coex {
u8 enable;
@@ -1164,7 +1189,7 @@ struct conf_fm_coex {
u16 ldo_stabilization_time;
u8 fm_disturbed_band_margin;
u8 swallow_clk_diff;
-};
+} __packed;
struct conf_rx_streaming_settings {
/*
@@ -1193,7 +1218,7 @@ struct conf_rx_streaming_settings {
* enable rx streaming also when there is no coex activity
*/
u8 always;
-};
+} __packed;
struct conf_fwlog {
/* Continuous or on-demand */
@@ -1217,7 +1242,7 @@ struct conf_fwlog {
/* Regulates the frequency of log messages */
u8 threshold;
-};
+} __packed;
#define ACX_RATE_MGMT_NUM_OF_RATES 13
struct conf_rate_policy_settings {
@@ -1236,7 +1261,7 @@ struct conf_rate_policy_settings {
u8 rate_check_up;
u8 rate_check_down;
u8 rate_retry_policy[ACX_RATE_MGMT_NUM_OF_RATES];
-};
+} __packed;
struct conf_hangover_settings {
u32 recover_time;
@@ -1250,7 +1275,23 @@ struct conf_hangover_settings {
u8 quiet_time;
u8 increase_time;
u8 window_size;
-};
+} __packed;
+
+/*
+ * The conf version consists of 4 bytes. The two MSB are the wlcore
+ * version, the two LSB are the lower driver's private conf
+ * version.
+ */
+#define WLCORE_CONF_VERSION (0x0002 << 16)
+#define WLCORE_CONF_MASK 0xffff0000
+#define WLCORE_CONF_SIZE (sizeof(struct wlcore_conf_header) + \
+ sizeof(struct wlcore_conf))
+
+struct wlcore_conf_header {
+ __le32 magic;
+ __le32 version;
+ __le32 checksum;
+} __packed;
struct wlcore_conf {
struct conf_sg_settings sg;
@@ -1269,6 +1310,12 @@ struct wlcore_conf {
struct conf_fwlog fwlog;
struct conf_rate_policy_settings rate;
struct conf_hangover_settings hangover;
-};
+} __packed;
+
+struct wlcore_conf_file {
+ struct wlcore_conf_header header;
+ struct wlcore_conf core;
+ u8 priv[0];
+} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index d5aea1ff5ad1..80dbc5304fac 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -25,6 +25,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "wlcore.h"
#include "debug.h"
@@ -32,14 +33,16 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "hw_ops.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
+#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
+
/* debugfs macros idea from mac80211 */
-#define DEBUGFS_FORMAT_BUFFER_SIZE 100
-static int wl1271_format_buffer(char __user *userbuf, size_t count,
- loff_t *ppos, char *fmt, ...)
+int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...)
{
va_list args;
char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
@@ -51,59 +54,9 @@ static int wl1271_format_buffer(char __user *userbuf, size_t count,
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
+EXPORT_SYMBOL_GPL(wl1271_format_buffer);
-#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
-static ssize_t name## _read(struct file *file, char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- return wl1271_format_buffer(userbuf, count, ppos, \
- fmt "\n", ##value); \
-} \
- \
-static const struct file_operations name## _ops = { \
- .read = name## _read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_ADD(name, parent) \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
-
-#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
- do { \
- entry = debugfs_create_file(#name, 0400, parent, \
- wl, &prefix## _## name## _ops); \
- if (!entry || IS_ERR(entry)) \
- goto err; \
- } while (0);
-
-#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
-static ssize_t sub## _ ##name## _read(struct file *file, \
- char __user *userbuf, \
- size_t count, loff_t *ppos) \
-{ \
- struct wl1271 *wl = file->private_data; \
- \
- wl1271_debugfs_update_stats(wl); \
- \
- return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
- wl->stats.fw_stats->sub.name); \
-} \
- \
-static const struct file_operations sub## _ ##name## _ops = { \
- .read = sub## _ ##name## _read, \
- .open = simple_open, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_FWSTATS_ADD(sub, name) \
- DEBUGFS_ADD(sub## _ ##name, stats)
-
-static void wl1271_debugfs_update_stats(struct wl1271 *wl)
+void wl1271_debugfs_update_stats(struct wl1271 *wl)
{
int ret;
@@ -125,97 +78,7 @@ static void wl1271_debugfs_update_stats(struct wl1271 *wl)
out:
mutex_unlock(&wl->mutex);
}
-
-DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
-
-DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
-DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
-DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
-DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
-DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
-DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
-DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
-
-DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
-DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
-DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
-
-DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
-DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
-DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
-DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
-DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
-DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
-DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
-DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
-DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
-DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
-DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
-
-DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
-DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
-/* skipping wep.reserved */
-DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
-DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
-DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
-
-DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
-DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
-/* skipping cont_miss_bcns_spread for now */
-DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
-
-DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
-DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
-
-DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
-DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
-DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
-
-DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
-DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
-DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
-DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
-DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
-DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
-
-DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
-DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
-DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
-
-DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
-DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
+EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats);
DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
DEBUGFS_READONLY_FILE(excessive_retries, "%u",
@@ -241,6 +104,89 @@ static const struct file_operations tx_queue_len_ops = {
.llseek = default_llseek,
};
+static void chip_op_handler(struct wl1271 *wl, unsigned long value,
+ void *arg)
+{
+ int ret;
+ int (*chip_op) (struct wl1271 *wl);
+
+ if (!arg) {
+ wl1271_warning("debugfs chip_op_handler with no callback");
+ return;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ return;
+
+ chip_op = arg;
+ chip_op(wl);
+
+ wl1271_ps_elp_sleep(wl);
+}
+
+
+static inline void no_write_handler(struct wl1271 *wl,
+ unsigned long value,
+ unsigned long param)
+{
+}
+
+#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
+ min_val, max_val, write_handler_locked, \
+ write_handler_arg) \
+ static ssize_t param##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ struct wl1271 *wl = file->private_data; \
+ return wl1271_format_buffer(user_buf, count, \
+ ppos, "%d\n", \
+ wl->conf.conf_sub_struct.param); \
+ } \
+ \
+ static ssize_t param##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos) \
+ { \
+ struct wl1271 *wl = file->private_data; \
+ unsigned long value; \
+ int ret; \
+ \
+ ret = kstrtoul_from_user(user_buf, count, 10, &value); \
+ if (ret < 0) { \
+ wl1271_warning("illegal value for " #param); \
+ return -EINVAL; \
+ } \
+ \
+ if (value < min_val || value > max_val) { \
+ wl1271_warning(#param " is not in valid range"); \
+ return -ERANGE; \
+ } \
+ \
+ mutex_lock(&wl->mutex); \
+ wl->conf.conf_sub_struct.param = value; \
+ \
+ write_handler_locked(wl, value, write_handler_arg); \
+ \
+ mutex_unlock(&wl->mutex); \
+ return count; \
+ } \
+ \
+ static const struct file_operations param##_ops = { \
+ .read = param##_read, \
+ .write = param##_write, \
+ .open = simple_open, \
+ .llseek = default_llseek, \
+ };
+
+WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100,
+ chip_op_handler, wl1271_acx_init_rx_interrupt)
+
static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -535,8 +481,7 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
DRIVER_STATE_PRINT_HEX(irq);
- DRIVER_STATE_PRINT_HEX(ref_clock);
- DRIVER_STATE_PRINT_HEX(tcxo_clock);
+ /* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
DRIVER_STATE_PRINT_HEX(hw_pg_ver);
DRIVER_STATE_PRINT_HEX(platform_quirks);
DRIVER_STATE_PRINT_HEX(chip.id);
@@ -647,7 +592,6 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
VIF_STATE_PRINT_INT(last_rssi_event);
VIF_STATE_PRINT_INT(ba_support);
VIF_STATE_PRINT_INT(ba_allowed);
- VIF_STATE_PRINT_INT(is_gem);
VIF_STATE_PRINT_LLHEX(tx_security_seq);
VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
}
@@ -1002,108 +946,281 @@ static const struct file_operations beacon_filtering_ops = {
.llseek = default_llseek,
};
-static int wl1271_debugfs_add_files(struct wl1271 *wl,
- struct dentry *rootdir)
+static ssize_t fw_stats_raw_read(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
{
- int ret = 0;
- struct dentry *entry, *stats, *streaming;
+ struct wl1271 *wl = file->private_data;
- stats = debugfs_create_dir("fw-statistics", rootdir);
- if (!stats || IS_ERR(stats)) {
- entry = stats;
- goto err;
+ wl1271_debugfs_update_stats(wl);
+
+ return simple_read_from_buffer(userbuf, count, ppos,
+ wl->stats.fw_stats,
+ wl->stats.fw_stats_len);
+}
+
+static const struct file_operations fw_stats_raw_ops = {
+ .read = fw_stats_raw_read,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t sleep_auth_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+
+ return wl1271_format_buffer(user_buf, count,
+ ppos, "%d\n",
+ wl->sleep_auth);
+}
+
+static ssize_t sleep_auth_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ unsigned long value;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in sleep_auth");
+ return -EINVAL;
+ }
+
+ if (value < 0 || value > WL1271_PSM_MAX) {
+ wl1271_warning("sleep_auth must be between 0 and %d",
+ WL1271_PSM_MAX);
+ return -ERANGE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ wl->conf.conn.sta_sleep_auth = value;
+
+ if (wl->state == WL1271_STATE_OFF) {
+ /* this will show up on "read" in case we are off */
+ wl->sleep_auth = value;
+ goto out;
}
- DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
-
- DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
- DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
- DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
- DEBUGFS_FWSTATS_ADD(rx, dropped);
- DEBUGFS_FWSTATS_ADD(rx, fcs_err);
- DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
- DEBUGFS_FWSTATS_ADD(rx, path_reset);
- DEBUGFS_FWSTATS_ADD(rx, reset_counter);
-
- DEBUGFS_FWSTATS_ADD(dma, rx_requested);
- DEBUGFS_FWSTATS_ADD(dma, rx_errors);
- DEBUGFS_FWSTATS_ADD(dma, tx_requested);
- DEBUGFS_FWSTATS_ADD(dma, tx_errors);
-
- DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
- DEBUGFS_FWSTATS_ADD(isr, fiqs);
- DEBUGFS_FWSTATS_ADD(isr, rx_headers);
- DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
- DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
- DEBUGFS_FWSTATS_ADD(isr, irqs);
- DEBUGFS_FWSTATS_ADD(isr, tx_procs);
- DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
- DEBUGFS_FWSTATS_ADD(isr, dma0_done);
- DEBUGFS_FWSTATS_ADD(isr, dma1_done);
- DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
- DEBUGFS_FWSTATS_ADD(isr, commands);
- DEBUGFS_FWSTATS_ADD(isr, rx_procs);
- DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
- DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
- DEBUGFS_FWSTATS_ADD(isr, pci_pm);
- DEBUGFS_FWSTATS_ADD(isr, wakeups);
- DEBUGFS_FWSTATS_ADD(isr, low_rssi);
-
- DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
- DEBUGFS_FWSTATS_ADD(wep, default_key_count);
- /* skipping wep.reserved */
- DEBUGFS_FWSTATS_ADD(wep, key_not_found);
- DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(wep, packets);
- DEBUGFS_FWSTATS_ADD(wep, interrupt);
-
- DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
- DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
- DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
- DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
- DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
- DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
- DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
- DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
- DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
- /* skipping cont_miss_bcns_spread for now */
- DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
-
- DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
- DEBUGFS_FWSTATS_ADD(mic, calc_failure);
-
- DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
- DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
- DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
-
- DEBUGFS_FWSTATS_ADD(event, heart_beat);
- DEBUGFS_FWSTATS_ADD(event, calibration);
- DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
- DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
- DEBUGFS_FWSTATS_ADD(event, rx_pool);
- DEBUGFS_FWSTATS_ADD(event, oom_late);
- DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
- DEBUGFS_FWSTATS_ADD(event, tx_stuck);
-
- DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
- DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
- DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
- DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
-
- DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
- DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
- DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_sleep_auth(wl, value);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations sleep_auth_ops = {
+ .read = sleep_auth_read,
+ .write = sleep_auth_write,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static ssize_t dev_mem_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wlcore_partition_set part, old_part;
+ size_t bytes = count;
+ int ret;
+ char *buf;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (bytes % 4)
+ return -EINVAL;
+
+ if (*ppos % 4)
+ return -EINVAL;
+
+ /* function should return in reasonable time */
+ bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
+
+ if (bytes == 0)
+ return -EINVAL;
+
+ memset(&part, 0, sizeof(part));
+ part.mem.start = file->f_pos;
+ part.mem.size = bytes;
+
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EFAULT;
+ goto skip_read;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto skip_read;
+
+ /* store current partition and switch partition */
+ memcpy(&old_part, &wl->curr_part, sizeof(old_part));
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0)
+ goto part_err;
+
+ ret = wlcore_raw_read(wl, 0, buf, bytes, false);
+ if (ret < 0)
+ goto read_err;
+
+read_err:
+ /* recover partition */
+ ret = wlcore_set_partition(wl, &old_part);
+ if (ret < 0)
+ goto part_err;
+
+part_err:
+ wl1271_ps_elp_sleep(wl);
+
+skip_read:
+ mutex_unlock(&wl->mutex);
+
+ if (ret == 0) {
+ ret = copy_to_user(user_buf, buf, bytes);
+ if (ret < bytes) {
+ bytes -= ret;
+ *ppos += bytes;
+ ret = 0;
+ } else {
+ ret = -EFAULT;
+ }
+ }
+
+ kfree(buf);
+
+ return ((ret == 0) ? bytes : ret);
+}
+
+static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wlcore_partition_set part, old_part;
+ size_t bytes = count;
+ int ret;
+ char *buf;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (bytes % 4)
+ return -EINVAL;
+
+ if (*ppos % 4)
+ return -EINVAL;
+
+ /* function should return in reasonable time */
+ bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
+
+ if (bytes == 0)
+ return -EINVAL;
+
+ memset(&part, 0, sizeof(part));
+ part.mem.start = file->f_pos;
+ part.mem.size = bytes;
+
+ buf = kmalloc(bytes, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf, user_buf, bytes);
+ if (ret) {
+ ret = -EFAULT;
+ goto err_out;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EFAULT;
+ goto skip_write;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto skip_write;
+
+ /* store current partition and switch partition */
+ memcpy(&old_part, &wl->curr_part, sizeof(old_part));
+ ret = wlcore_set_partition(wl, &part);
+ if (ret < 0)
+ goto part_err;
+
+ ret = wlcore_raw_write(wl, 0, buf, bytes, false);
+ if (ret < 0)
+ goto write_err;
+
+write_err:
+ /* recover partition */
+ ret = wlcore_set_partition(wl, &old_part);
+ if (ret < 0)
+ goto part_err;
+
+part_err:
+ wl1271_ps_elp_sleep(wl);
+
+skip_write:
+ mutex_unlock(&wl->mutex);
+
+ if (ret == 0)
+ *ppos += bytes;
+
+err_out:
+ kfree(buf);
+
+ return ((ret == 0) ? bytes : ret);
+}
+
+static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig)
+{
+ loff_t ret;
+
+ /* only requests of dword-aligned size and offset are supported */
+ if (offset % 4)
+ return -EINVAL;
+
+ switch (orig) {
+ case SEEK_SET:
+ file->f_pos = offset;
+ ret = file->f_pos;
+ break;
+ case SEEK_CUR:
+ file->f_pos += offset;
+ ret = file->f_pos;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct file_operations dev_mem_ops = {
+ .open = simple_open,
+ .read = dev_mem_read,
+ .write = dev_mem_write,
+ .llseek = dev_mem_seek,
+};
+
+static int wl1271_debugfs_add_files(struct wl1271 *wl,
+ struct dentry *rootdir)
+{
+ int ret = 0;
+ struct dentry *entry, *streaming;
DEBUGFS_ADD(tx_queue_len, rootdir);
DEBUGFS_ADD(retry_count, rootdir);
@@ -1120,6 +1237,11 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
DEBUGFS_ADD(forced_ps, rootdir);
DEBUGFS_ADD(split_scan_timeout, rootdir);
+ DEBUGFS_ADD(irq_pkt_threshold, rootdir);
+ DEBUGFS_ADD(irq_blk_threshold, rootdir);
+ DEBUGFS_ADD(irq_timeout, rootdir);
+ DEBUGFS_ADD(fw_stats_raw, rootdir);
+ DEBUGFS_ADD(sleep_auth, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
if (!streaming || IS_ERR(streaming))
@@ -1128,6 +1250,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
+ DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
return 0;
@@ -1145,7 +1268,7 @@ void wl1271_debugfs_reset(struct wl1271 *wl)
if (!wl->stats.fw_stats)
return;
- memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
+ memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len);
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
@@ -1160,34 +1283,34 @@ int wl1271_debugfs_init(struct wl1271 *wl)
if (IS_ERR(rootdir)) {
ret = PTR_ERR(rootdir);
- goto err;
+ goto out;
}
- wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
- GFP_KERNEL);
-
+ wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
if (!wl->stats.fw_stats) {
ret = -ENOMEM;
- goto err_fw;
+ goto out_remove;
}
wl->stats.fw_stats_update = jiffies;
ret = wl1271_debugfs_add_files(wl, rootdir);
+ if (ret < 0)
+ goto out_exit;
+ ret = wlcore_debugfs_init(wl, rootdir);
if (ret < 0)
- goto err_file;
+ goto out_exit;
- return 0;
+ goto out;
-err_file:
- kfree(wl->stats.fw_stats);
- wl->stats.fw_stats = NULL;
+out_exit:
+ wl1271_debugfs_exit(wl);
-err_fw:
+out_remove:
debugfs_remove_recursive(rootdir);
-err:
+out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index a8d3aef011ff..f7381dd69009 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,95 @@
#include "wlcore.h"
+int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);
+
int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
void wl1271_debugfs_reset(struct wl1271 *wl);
+void wl1271_debugfs_update_stats(struct wl1271 *wl);
+
+#define DEBUGFS_FORMAT_BUFFER_SIZE 256
+
+#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
+static ssize_t name## _read(struct file *file, char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ return wl1271_format_buffer(userbuf, count, ppos, \
+ fmt "\n", ##value); \
+} \
+ \
+static const struct file_operations name## _ops = { \
+ .read = name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_ADD(name, parent) \
+ do { \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+ } while (0);
+
+
+#define DEBUGFS_ADD_PREFIX(prefix, name, parent) \
+ do { \
+ entry = debugfs_create_file(#name, 0400, parent, \
+ wl, &prefix## _## name## _ops); \
+ if (!entry || IS_ERR(entry)) \
+ goto err; \
+ } while (0);
+
+#define DEBUGFS_FWSTATS_FILE(sub, name, fmt, struct_type) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ struct struct_type *stats = wl->stats.fw_stats; \
+ \
+ wl1271_debugfs_update_stats(wl); \
+ \
+ return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
+ stats->sub.name); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_FWSTATS_FILE_ARRAY(sub, name, len, struct_type) \
+static ssize_t sub## _ ##name## _read(struct file *file, \
+ char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ struct wl1271 *wl = file->private_data; \
+ struct struct_type *stats = wl->stats.fw_stats; \
+ char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
+ int res, i; \
+ \
+ wl1271_debugfs_update_stats(wl); \
+ \
+ for (i = 0; i < len; i++) \
+ res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
+ buf, i, stats->sub.name[i]); \
+ \
+ return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
+} \
+ \
+static const struct file_operations sub## _ ##name## _ops = { \
+ .read = sub## _ ##name## _read, \
+ .open = simple_open, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_FWSTATS_ADD(sub, name) \
+ DEBUGFS_ADD(sub## _ ##name, stats)
+
#endif /* WL1271_DEBUGFS_H */
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 28e2a633c3be..48907054d493 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -105,6 +105,7 @@ static int wl1271_event_process(struct wl1271 *wl)
u32 vector;
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
+ int ret;
wl1271_event_mbox_dump(mbox);
@@ -148,15 +149,33 @@ static int wl1271_event_process(struct wl1271 *wl)
int delay = wl->conf.conn.synch_fail_thold *
wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected.");
- cancel_delayed_work_sync(&wl->connection_loss_work);
+
+ /*
+ * if the work is already queued, it should take place. We
+ * don't want to delay the connection loss indication
+ * any more.
+ */
ieee80211_queue_delayed_work(wl->hw, &wl->connection_loss_work,
- msecs_to_jiffies(delay));
+ msecs_to_jiffies(delay));
+
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
+ GFP_KERNEL);
+ }
}
if (vector & REGAINED_BSS_EVENT_ID) {
/* TODO: check for multi-role */
wl1271_info("Beacon regained.");
- cancel_delayed_work_sync(&wl->connection_loss_work);
+ cancel_delayed_work(&wl->connection_loss_work);
+
+ /* sanity check - we can't lose and gain the beacon together */
+ WARN(vector & BSS_LOSE_EVENT_ID,
+ "Concurrent beacon loss and gain from FW");
}
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
@@ -210,7 +229,9 @@ static int wl1271_event_process(struct wl1271 *wl)
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- wl1271_tx_dummy_packet(wl);
+ ret = wl1271_tx_dummy_packet(wl);
+ if (ret < 0)
+ return ret;
}
/*
@@ -283,8 +304,10 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
return -EINVAL;
/* first we read the mbox descriptor */
- wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
- sizeof(*wl->mbox), false);
+ ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
+ sizeof(*wl->mbox), false);
+ if (ret < 0)
+ return ret;
/* process the descriptor */
ret = wl1271_event_process(wl);
@@ -295,7 +318,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
- wl->ops->ack_event(wl);
+ ret = wl->ops->ack_event(wl);
- return 0;
+ return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 9384b4d56c24..2673d783ec1e 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -65,11 +65,13 @@ wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
return wl->ops->get_rx_buf_align(wl, rx_desc);
}
-static inline void
+static inline int
wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
if (wl->ops->prepare_read)
- wl->ops->prepare_read(wl, rx_desc, len);
+ return wl->ops->prepare_read(wl, rx_desc, len);
+
+ return 0;
}
static inline u32
@@ -81,10 +83,12 @@ wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
}
-static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
+static inline int wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
{
if (wl->ops->tx_delayed_compl)
- wl->ops->tx_delayed_compl(wl);
+ return wl->ops->tx_delayed_compl(wl);
+
+ return 0;
}
static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
@@ -119,4 +123,82 @@ static inline int wlcore_identify_fw(struct wl1271 *wl)
return 0;
}
+static inline void
+wlcore_hw_set_tx_desc_csum(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb)
+{
+ if (!wl->ops->set_tx_desc_csum)
+ BUG_ON(1);
+
+ wl->ops->set_tx_desc_csum(wl, desc, skb);
+}
+
+static inline void
+wlcore_hw_set_rx_csum(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb)
+{
+ if (wl->ops->set_rx_csum)
+ wl->ops->set_rx_csum(wl, desc, skb);
+}
+
+static inline u32
+wlcore_hw_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
+{
+ if (wl->ops->ap_get_mimo_wide_rate_mask)
+ return wl->ops->ap_get_mimo_wide_rate_mask(wl, wlvif);
+
+ return 0;
+}
+
+static inline int
+wlcore_debugfs_init(struct wl1271 *wl, struct dentry *rootdir)
+{
+ if (wl->ops->debugfs_init)
+ return wl->ops->debugfs_init(wl, rootdir);
+
+ return 0;
+}
+
+static inline int
+wlcore_handle_static_data(struct wl1271 *wl, void *static_data)
+{
+ if (wl->ops->handle_static_data)
+ return wl->ops->handle_static_data(wl, static_data);
+
+ return 0;
+}
+
+static inline int
+wlcore_hw_get_spare_blocks(struct wl1271 *wl, bool is_gem)
+{
+ if (!wl->ops->get_spare_blocks)
+ BUG_ON(1);
+
+ return wl->ops->get_spare_blocks(wl, is_gem);
+}
+
+static inline int
+wlcore_hw_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
+ if (!wl->ops->set_key)
+ BUG_ON(1);
+
+ return wl->ops->set_key(wl, cmd, vif, sta, key_conf);
+}
+
+static inline u32
+wlcore_hw_pre_pkt_send(struct wl1271 *wl, u32 buf_offset, u32 last_len)
+{
+ if (wl->ops->pre_pkt_send)
+ return wl->ops->pre_pkt_send(wl, buf_offset, last_len);
+
+ return buf_offset;
+}
+
#endif
diff --git a/drivers/net/wireless/ti/wlcore/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc56212..d24fe3bbc672 100644
--- a/drivers/net/wireless/ti/wlcore/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
@@ -172,7 +172,19 @@ struct wl128x_ini_fem_params_5 {
/* NVS data structure */
#define WL1271_INI_NVS_SECTION_SIZE 468
-#define WL1271_INI_FEM_MODULE_COUNT 2
+
+/* We have four FEM module types: 0-RFMD, 1-TQS, 2-SKW, 3-TQS_HP */
+#define WL1271_INI_FEM_MODULE_COUNT 4
+
+/*
+ * In NVS we only store two FEM module entries -
+ * FEM modules 0,2,3 are stored in entry 0
+ * FEM module 1 is stored in entry 1
+ */
+#define WL12XX_NVS_FEM_MODULE_COUNT 2
+
+#define WL12XX_FEM_TO_NVS_ENTRY(ini_fem_module) \
+ ((ini_fem_module) == 1 ? 1 : 0)
#define WL1271_INI_LEGACY_NVS_FILE_SIZE 800
@@ -188,13 +200,13 @@ struct wl1271_nvs_file {
struct {
struct wl1271_ini_fem_params_2 params;
u8 padding;
- } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
struct wl1271_ini_band_params_5 stat_radio_params_5;
u8 padding3;
struct {
struct wl1271_ini_fem_params_5 params;
u8 padding;
- } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
} __packed;
struct wl128x_nvs_file {
@@ -209,12 +221,12 @@ struct wl128x_nvs_file {
struct {
struct wl128x_ini_fem_params_2 params;
u8 padding;
- } dyn_radio_params_2[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_2[WL12XX_NVS_FEM_MODULE_COUNT];
struct wl128x_ini_band_params_5 stat_radio_params_5;
u8 padding3;
struct {
struct wl128x_ini_fem_params_5 params;
u8 padding;
- } dyn_radio_params_5[WL1271_INI_FEM_MODULE_COUNT];
+ } dyn_radio_params_5[WL12XX_NVS_FEM_MODULE_COUNT];
} __packed;
#endif
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 9f89255eb6e6..a3c867786df8 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -54,6 +54,22 @@ int wl1271_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_APP_PROBE_REQ_2_4, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
+ CMD_TEMPL_APP_PROBE_REQ_5, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+ }
+
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template),
@@ -460,6 +476,9 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
/* unconditionally enable HT rates */
supported_rates |= CONF_TX_MCS_RATES;
+ /* get extra MIMO or wide-chan rates where the HW supports it */
+ supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
+
/* configure unicast TX rate classes */
for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
rc.enabled_rates = supported_rates;
@@ -551,29 +570,28 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- /*
- * consider all existing roles before configuring psm.
- * TODO: reconfigure on interface removal.
- */
- if (!wl->ap_count) {
- if (is_ap) {
- /* Configure for power always on */
+ /* consider all existing roles before configuring psm. */
+
+ if (wl->ap_count == 0 && is_ap) { /* first AP */
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ /* first STA, no APs */
+ } else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
+ u8 sta_auth = wl->conf.conn.sta_sleep_auth;
+ /* Configure for power according to debugfs */
+ if (sta_auth != WL1271_PSM_ILLEGAL)
+ ret = wl1271_acx_sleep_auth(wl, sta_auth);
+ /* Configure for power always on */
+ else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
- } else if (!wl->sta_count) {
- if (wl->quirks & WLCORE_QUIRK_NO_ELP) {
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
- } else {
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
- }
- }
+ /* Configure for ELP power saving */
+ else
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+
+ if (ret < 0)
+ return ret;
}
/* Mode specific init */
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 7cd0081aede5..68e74eefd296 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -48,12 +48,24 @@ void wlcore_disable_interrupts(struct wl1271 *wl)
}
EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
+void wlcore_disable_interrupts_nosync(struct wl1271 *wl)
+{
+ disable_irq_nosync(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync);
+
void wlcore_enable_interrupts(struct wl1271 *wl)
{
enable_irq(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
+void wlcore_synchronize_interrupts(struct wl1271 *wl)
+{
+ synchronize_irq(wl->irq);
+}
+EXPORT_SYMBOL_GPL(wlcore_synchronize_interrupts);
+
int wlcore_translate_addr(struct wl1271 *wl, int addr)
{
struct wlcore_partition_set *part = &wl->curr_part;
@@ -122,9 +134,11 @@ EXPORT_SYMBOL_GPL(wlcore_translate_addr);
* | |
*
*/
-void wlcore_set_partition(struct wl1271 *wl,
- const struct wlcore_partition_set *p)
+int wlcore_set_partition(struct wl1271 *wl,
+ const struct wlcore_partition_set *p)
{
+ int ret;
+
/* copy partition info */
memcpy(&wl->curr_part, p, sizeof(*p));
@@ -137,28 +151,41 @@ void wlcore_set_partition(struct wl1271 *wl,
wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
p->mem3.start, p->mem3.size);
- wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
- wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
- wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
- wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
- wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
- wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ if (ret < 0)
+ goto out;
+
/*
* We don't need the size of the last partition, as it is
* automatically calculated based on the total memory size and
* the sizes of the previous partitions.
*/
- wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
-}
-EXPORT_SYMBOL_GPL(wlcore_set_partition);
-
-void wlcore_select_partition(struct wl1271 *wl, u8 part)
-{
- wl1271_debug(DEBUG_IO, "setting partition %d", part);
+ ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
- wlcore_set_partition(wl, &wl->ptable[part]);
+out:
+ return ret;
}
-EXPORT_SYMBOL_GPL(wlcore_select_partition);
+EXPORT_SYMBOL_GPL(wlcore_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 8942954b56a0..259149f36fae 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -45,86 +45,122 @@
struct wl1271;
void wlcore_disable_interrupts(struct wl1271 *wl);
+void wlcore_disable_interrupts_nosync(struct wl1271 *wl);
void wlcore_enable_interrupts(struct wl1271 *wl);
+void wlcore_synchronize_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
int wlcore_translate_addr(struct wl1271 *wl, int addr);
/* Raw target IO, address is not translated */
-static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
+ void *buf, size_t len,
+ bool fixed)
{
- wl->if_ops->write(wl->dev, addr, buf, len, fixed);
+ int ret;
+
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ return -EIO;
+
+ ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
+ if (ret && wl->state != WL1271_STATE_OFF)
+ set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
+
+ return ret;
}
-static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
+ void *buf, size_t len,
+ bool fixed)
{
- wl->if_ops->read(wl->dev, addr, buf, len, fixed);
+ int ret;
+
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ return -EIO;
+
+ ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
+ if (ret && wl->state != WL1271_STATE_OFF)
+ set_bit(WL1271_FLAG_IO_FAILED, &wl->flags);
+
+ return ret;
}
-static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_read_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_raw_read(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_raw_write_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_raw_write(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
+static inline int __must_check wlcore_raw_read32(struct wl1271 *wl, int addr,
+ u32 *val)
{
- wl1271_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ int ret;
+
+ ret = wlcore_raw_read(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+ if (ret < 0)
+ return ret;
+
+ if (val)
+ *val = le32_to_cpu(wl->buffer_32);
- return le32_to_cpu(wl->buffer_32);
+ return 0;
}
-static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
+static inline int __must_check wlcore_raw_write32(struct wl1271 *wl, int addr,
+ u32 val)
{
wl->buffer_32 = cpu_to_le32(val);
- wl1271_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
+ return wlcore_raw_write(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
}
-static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_read(struct wl1271 *wl, int addr,
+ void *buf, size_t len, bool fixed)
{
int physical;
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_read(wl, physical, buf, len, fixed);
+ return wlcore_raw_read(wl, physical, buf, len, fixed);
}
-static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_write(struct wl1271 *wl, int addr,
+ void *buf, size_t len, bool fixed)
{
int physical;
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_write(wl, physical, buf, len, fixed);
+ return wlcore_raw_write(wl, physical, buf, len, fixed);
}
-static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_write_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_write(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_write(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf,
- size_t len, bool fixed)
+static inline int __must_check wlcore_read_data(struct wl1271 *wl, int reg,
+ void *buf, size_t len,
+ bool fixed)
{
- wl1271_read(wl, wl->rtable[reg], buf, len, fixed);
+ return wlcore_read(wl, wl->rtable[reg], buf, len, fixed);
}
-static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
- void *buf, size_t len, bool fixed)
+static inline int __must_check wlcore_read_hwaddr(struct wl1271 *wl, int hwaddr,
+ void *buf, size_t len,
+ bool fixed)
{
int physical;
int addr;
@@ -134,34 +170,47 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
physical = wlcore_translate_addr(wl, addr);
- wl1271_raw_read(wl, physical, buf, len, fixed);
+ return wlcore_raw_read(wl, physical, buf, len, fixed);
}
-static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
+static inline int __must_check wlcore_read32(struct wl1271 *wl, int addr,
+ u32 *val)
{
- return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr));
+ return wlcore_raw_read32(wl, wlcore_translate_addr(wl, addr), val);
}
-static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+static inline int __must_check wlcore_write32(struct wl1271 *wl, int addr,
+ u32 val)
{
- wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
+ return wlcore_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
}
-static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg)
+static inline int __must_check wlcore_read_reg(struct wl1271 *wl, int reg,
+ u32 *val)
{
- return wl1271_raw_read32(wl,
- wlcore_translate_addr(wl, wl->rtable[reg]));
+ return wlcore_raw_read32(wl,
+ wlcore_translate_addr(wl, wl->rtable[reg]),
+ val);
}
-static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val)
+static inline int __must_check wlcore_write_reg(struct wl1271 *wl, int reg,
+ u32 val)
{
- wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val);
+ return wlcore_raw_write32(wl,
+ wlcore_translate_addr(wl, wl->rtable[reg]),
+ val);
}
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl->dev, false);
- clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ int ret;
+
+ if (!test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags))
+ return;
+
+ ret = wl->if_ops->power(wl->dev, false);
+ if (!ret)
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
@@ -173,8 +222,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
return ret;
}
-void wlcore_set_partition(struct wl1271 *wl,
- const struct wlcore_partition_set *p);
+int wlcore_set_partition(struct wl1271 *wl,
+ const struct wlcore_partition_set *p);
bool wl1271_set_block_size(struct wl1271 *wl);
@@ -182,6 +231,4 @@ bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
-void wlcore_select_partition(struct wl1271 *wl, u8 part);
-
#endif
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index acef93390d3d..72548609f711 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -62,7 +62,7 @@ static bool no_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wlcore_op_stop_locked(struct wl1271 *wl);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static int wl12xx_set_authorized(struct wl1271 *wl,
@@ -320,46 +320,6 @@ static void wlcore_adjust_conf(struct wl1271 *wl)
}
}
-static int wl1271_plt_init(struct wl1271 *wl)
-{
- int ret;
-
- ret = wl->ops->hw_init(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_init_mem_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Enable data path */
- ret = wl1271_cmd_data_path(wl, 1);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Configure for CAM power saving (ie. always active) */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_free_memmap;
-
- /* configure PM */
- ret = wl1271_acx_pm_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- return 0;
-
- out_free_memmap:
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
-
- return ret;
-}
-
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, u8 tx_pkts)
@@ -387,7 +347,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status *status)
+ struct wl_fw_status_2 *status)
{
struct wl1271_link *lnk;
u32 cur_fw_ps_map;
@@ -418,8 +378,9 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
}
}
-static void wl12xx_fw_status(struct wl1271 *wl,
- struct wl_fw_status *status)
+static int wlcore_fw_status(struct wl1271 *wl,
+ struct wl_fw_status_1 *status_1,
+ struct wl_fw_status_2 *status_2)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
@@ -427,38 +388,42 @@ static void wl12xx_fw_status(struct wl1271 *wl,
int avail, freed_blocks;
int i;
size_t status_len;
+ int ret;
- status_len = sizeof(*status) + wl->fw_status_priv_len;
+ status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+ sizeof(*status_2) + wl->fw_status_priv_len;
- wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
+ status_len, false);
+ if (ret < 0)
+ return ret;
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status->intr,
- status->fw_rx_counter,
- status->drv_rx_counter,
- status->tx_results_counter);
+ status_1->intr,
+ status_1->fw_rx_counter,
+ status_1->drv_rx_counter,
+ status_1->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status->counters.tx_released_pkts[i] -
+ (status_2->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
}
/* prevent wrap-around in total blocks counter */
if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status->total_released_blks)))
- freed_blocks = le32_to_cpu(status->total_released_blks) -
+ le32_to_cpu(status_2->total_released_blks)))
+ freed_blocks = le32_to_cpu(status_2->total_released_blks) -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status->total_released_blks);
+ le32_to_cpu(status_2->total_released_blks);
- wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks);
+ wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
wl->tx_allocated_blocks -= freed_blocks;
@@ -474,7 +439,7 @@ static void wl12xx_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks;
+ avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -493,13 +458,15 @@ static void wl12xx_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status);
+ wl12xx_irq_update_links_status(wl, wlvif, status_2);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status->fw_localtime);
+ (s64)le32_to_cpu(status_2->fw_localtime);
+
+ return 0;
}
static void wl1271_flush_deferred_work(struct wl1271 *wl)
@@ -527,20 +494,15 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-static irqreturn_t wl1271_irq(int irq, void *cookie)
+static int wlcore_irq_locked(struct wl1271 *wl)
{
- int ret;
+ int ret = 0;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
- struct wl1271 *wl = (struct wl1271 *)cookie;
bool done = false;
unsigned int defer_count;
unsigned long flags;
- /* TX might be handled here, avoid redundant work */
- set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
- cancel_work_sync(&wl->tx_work);
-
/*
* In case edge triggered interrupt must be used, we cannot iterate
* more than once without introducing race conditions with the hardirq.
@@ -548,8 +510,6 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
loopcount = 1;
- mutex_lock(&wl->mutex);
-
wl1271_debug(DEBUG_IRQ, "IRQ work");
if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -568,21 +528,33 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- wl12xx_fw_status(wl, wl->fw_status);
+ ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ if (ret < 0)
+ goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status->intr);
- intr &= WL1271_INTR_MASK;
+ intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
continue;
}
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
- wl1271_error("watchdog interrupt received! "
+ wl1271_error("HW watchdog interrupt received! starting recovery.");
+ wl->watchdog_recovery = true;
+ ret = -EIO;
+
+ /* restarting the chip. ignore any other interrupt. */
+ goto out;
+ }
+
+ if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
+ wl1271_error("SW watchdog interrupt received! "
"starting recovery.");
- wl12xx_queue_recovery_work(wl);
+ wl->watchdog_recovery = true;
+ ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto out;
@@ -591,7 +563,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- wl12xx_rx(wl, wl->fw_status);
+ ret = wlcore_rx(wl, wl->fw_status_1);
+ if (ret < 0)
+ goto out;
/* Check if any tx blocks were freed */
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -602,13 +576,17 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ goto out;
} else {
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* check for tx results */
- wlcore_hw_tx_delayed_compl(wl);
+ ret = wlcore_hw_tx_delayed_compl(wl);
+ if (ret < 0)
+ goto out;
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -619,12 +597,16 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
if (intr & WL1271_ACX_INTR_EVENT_A) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0);
+ ret = wl1271_event_handle(wl, 0);
+ if (ret < 0)
+ goto out;
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1);
+ ret = wl1271_event_handle(wl, 1);
+ if (ret < 0)
+ goto out;
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -638,6 +620,25 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
wl1271_ps_elp_sleep(wl);
out:
+ return ret;
+}
+
+static irqreturn_t wlcore_irq(int irq, void *cookie)
+{
+ int ret;
+ unsigned long flags;
+ struct wl1271 *wl = cookie;
+
+ /* TX might be handled here, avoid redundant work */
+ set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
+ cancel_work_sync(&wl->tx_work);
+
+ mutex_lock(&wl->mutex);
+
+ ret = wlcore_irq_locked(wl);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+
spin_lock_irqsave(&wl->wl_lock, flags);
/* In case TX was not handled here, queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
@@ -743,7 +744,7 @@ out:
return ret;
}
-static int wl1271_fetch_nvs(struct wl1271 *wl)
+static void wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
int ret;
@@ -751,16 +752,15 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
- ret);
- return ret;
+ wl1271_debug(DEBUG_BOOT, "could not get nvs file %s: %d",
+ WL12XX_NVS_NAME, ret);
+ return;
}
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
goto out;
}
@@ -768,14 +768,17 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
out:
release_firmware(fw);
-
- return ret;
}
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
+
+ /* Avoid a recursive recovery */
+ if (!test_and_set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ wlcore_disable_interrupts_nosync(wl);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ }
}
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
@@ -801,14 +804,17 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
return len;
}
+#define WLCORE_FW_LOG_END 0x2000000
+
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 addr;
- u32 first_addr;
+ u32 offset;
+ u32 end_of_log;
u8 *block;
+ int ret;
if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
- (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
(wl->conf.fwlog.mem_blocks == 0))
return;
@@ -820,34 +826,49 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
/*
* Make sure the chip is awake and the logger isn't active.
- * This might fail if the firmware hanged.
+ * Do not send a stop fwlog command if the fw is hanged.
*/
- if (!wl1271_ps_elp_wakeup(wl))
+ if (wl1271_ps_elp_wakeup(wl))
+ goto out;
+ if (!wl->watchdog_recovery)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- wl12xx_fw_status(wl, wl->fw_status);
- first_addr = le32_to_cpu(wl->fw_status->log_start_addr);
- if (!first_addr)
+ ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ if (ret < 0)
goto out;
+ addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ if (!addr)
+ goto out;
+
+ if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
+ offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
+ end_of_log = WLCORE_FW_LOG_END;
+ } else {
+ offset = sizeof(addr);
+ end_of_log = addr;
+ }
+
/* Traverse the memory blocks linked list */
- addr = first_addr;
do {
memset(block, 0, WL12XX_HW_BLOCK_SIZE);
- wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
- false);
+ ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
+ false);
+ if (ret < 0)
+ goto out;
/*
* Memory blocks are linked to one another. The first 4 bytes
* of each memory block hold the hardware address of the next
- * one. The last memory block points to the first one.
+ * one. The last memory block points to the first one in
+ * on demand mode and is equal to 0x2000000 in continuous mode.
*/
addr = le32_to_cpup((__le32 *)block);
- if (!wl12xx_copy_fwlog(wl, block + sizeof(addr),
- WL12XX_HW_BLOCK_SIZE - sizeof(addr)))
+ if (!wl12xx_copy_fwlog(wl, block + offset,
+ WL12XX_HW_BLOCK_SIZE - offset))
break;
- } while (addr && (addr != first_addr));
+ } while (addr && (addr != end_of_log));
wake_up_interruptible(&wl->fwlog_waitq);
@@ -855,6 +876,34 @@ out:
kfree(block);
}
+static void wlcore_print_recovery(struct wl1271 *wl)
+{
+ u32 pc = 0;
+ u32 hint_sts = 0;
+ int ret;
+
+ wl1271_info("Hardware recovery in progress. FW ver: %s",
+ wl->chip.fw_ver_str);
+
+ /* change partitions momentarily so we can read the FW pc */
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ return;
+
+ ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
+ if (ret < 0)
+ return;
+
+ ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
+ if (ret < 0)
+ return;
+
+ wl1271_info("pc: 0x%x, hint_sts: 0x%08x", pc, hint_sts);
+
+ wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
+}
+
+
static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
@@ -867,26 +916,19 @@ static void wl1271_recovery_work(struct work_struct *work)
if (wl->state != WL1271_STATE_ON || wl->plt)
goto out_unlock;
- /* Avoid a recursive recovery */
- set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
-
- wl12xx_read_fwlog_panic(wl);
-
- wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
- wl->chip.fw_ver_str,
- wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
+ if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
+ wl12xx_read_fwlog_panic(wl);
+ wlcore_print_recovery(wl);
+ }
BUG_ON(bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
if (no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
- clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
goto out_unlock;
}
- BUG_ON(bug_on_recovery);
-
/*
* Advance security sequence number to overcome potential progress
* in the firmware during recovery. This doens't hurt if the network is
@@ -900,7 +942,7 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* Prevent spurious TX during FW restart */
- ieee80211_stop_queues(wl->hw);
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
if (wl->sched_scanning) {
ieee80211_sched_scan_stopped(wl->hw);
@@ -914,10 +956,8 @@ static void wl1271_recovery_work(struct work_struct *work)
vif = wl12xx_wlvif_to_vif(wlvif);
__wl1271_op_remove_interface(wl, vif, false);
}
- mutex_unlock(&wl->mutex);
- wl1271_op_stop(wl->hw);
- clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
+ wlcore_op_stop_locked(wl);
ieee80211_restart_hw(wl->hw);
@@ -925,26 +965,34 @@ static void wl1271_recovery_work(struct work_struct *work)
* Its safe to enable TX now - the queues are stopped after a request
* to restart the HW.
*/
- ieee80211_wake_queues(wl->hw);
- return;
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
+
out_unlock:
+ wl->watchdog_recovery = false;
+ clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
mutex_unlock(&wl->mutex);
}
-static void wl1271_fw_wakeup(struct wl1271 *wl)
+static int wlcore_fw_wakeup(struct wl1271 *wl)
{
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL);
- if (!wl->fw_status)
+ wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
+ sizeof(*wl->fw_status_2) +
+ wl->fw_status_priv_len, GFP_KERNEL);
+ if (!wl->fw_status_1)
return -ENOMEM;
+ wl->fw_status_2 = (struct wl_fw_status_2 *)
+ (((u8 *) wl->fw_status_1) +
+ WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+
wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
if (!wl->tx_res_if) {
- kfree(wl->fw_status);
+ kfree(wl->fw_status_1);
return -ENOMEM;
}
@@ -963,13 +1011,21 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
wl1271_io_reset(wl);
wl1271_io_init(wl);
- wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
+ if (ret < 0)
+ goto fail;
/* ELP module wake up */
- wl1271_fw_wakeup(wl);
+ ret = wlcore_fw_wakeup(wl);
+ if (ret < 0)
+ goto fail;
out:
return ret;
+
+fail:
+ wl1271_power_off(wl);
+ return ret;
}
static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
@@ -987,13 +1043,12 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
* simplify the code and since the performance impact is
* negligible, we use the same block size for all different
* chip types.
+ *
+ * Check if the bus supports blocksize alignment and, if it
+ * doesn't, make sure we don't have the quirk.
*/
- if (wl1271_set_block_size(wl))
- wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
-
- ret = wl->ops->identify_chip(wl);
- if (ret < 0)
- goto out;
+ if (!wl1271_set_block_size(wl))
+ wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
/* TODO: make sure the lower driver has set things up correctly */
@@ -1005,21 +1060,21 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
if (ret < 0)
goto out;
- /* No NVS from netlink, try to get it from the filesystem */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0)
- goto out;
- }
-
out:
return ret;
}
-int wl1271_plt_start(struct wl1271 *wl)
+int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
{
int retries = WL1271_BOOT_RETRIES;
struct wiphy *wiphy = wl->hw->wiphy;
+
+ static const char* const PLT_MODE[] = {
+ "PLT_OFF",
+ "PLT_ON",
+ "PLT_FEM_DETECT"
+ };
+
int ret;
mutex_lock(&wl->mutex);
@@ -1033,23 +1088,23 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
+ /* Indicate to lower levels that we are now in PLT mode */
+ wl->plt = true;
+ wl->plt_mode = plt_mode;
+
while (retries) {
retries--;
ret = wl12xx_chip_wakeup(wl, true);
if (ret < 0)
goto power_off;
- ret = wl->ops->boot(wl);
+ ret = wl->ops->plt_init(wl);
if (ret < 0)
goto power_off;
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto irq_disable;
-
- wl->plt = true;
wl->state = WL1271_STATE_ON;
- wl1271_notice("firmware booted in PLT mode (%s)",
+ wl1271_notice("firmware booted in PLT mode %s (%s)",
+ PLT_MODE[plt_mode],
wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1059,23 +1114,13 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
-irq_disable:
- mutex_unlock(&wl->mutex);
- /* Unlocking the mutex in the middle of handling is
- inherently unsafe. In this case we deem it safe to do,
- because we need to let any possibly pending IRQ out of
- the system (and while we are WL1271_STATE_OFF the IRQ
- work function will not do anything.) Also, any other
- possible concurrent operations will fail due to the
- current state, hence the wl1271 struct should be safe. */
- wlcore_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_work_sync(&wl->netstack_work);
- mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
}
+ wl->plt = false;
+ wl->plt_mode = PLT_OFF;
+
wl1271_error("firmware boot in PLT mode failed despite %d retries",
WL1271_BOOT_RETRIES);
out:
@@ -1125,8 +1170,10 @@ int wl1271_plt_stop(struct wl1271 *wl)
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
wl->flags = 0;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->state = WL1271_STATE_OFF;
wl->plt = false;
+ wl->plt_mode = PLT_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
@@ -1154,9 +1201,16 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
spin_lock_irqsave(&wl->wl_lock, flags);
- /* queue the packet */
+ /*
+ * drop the packet if the link is invalid or the queue is stopped
+ * for any reason but watermark. Watermark is a "soft"-stop so we
+ * allow these packets through.
+ */
if (hlid == WL12XX_INVALID_LINK_ID ||
- (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ (wlvif && !test_bit(hlid, wlvif->links_map)) ||
+ (wlcore_is_queue_stopped(wl, q) &&
+ !wlcore_is_queue_stopped_by_reason(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb);
goto out;
@@ -1172,10 +1226,12 @@ static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
- if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
+ if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
+ !wlcore_is_queue_stopped_by_reason(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
- ieee80211_stop_queue(wl->hw, mapping);
- set_bit(q, &wl->stopped_queues_map);
+ wlcore_stop_queue_locked(wl, q,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
/*
@@ -1209,7 +1265,7 @@ int wl1271_tx_dummy_packet(struct wl1271 *wl)
/* The FW is low on RX memory blocks, so send the dummy packet asap */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
- wl1271_tx_work_locked(wl);
+ return wlcore_tx_work_locked(wl);
/*
* If the FW TX is busy, TX work will be scheduled by the threaded
@@ -1476,8 +1532,15 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
int i, ret;
if (!wow || wow->any || !wow->n_patterns) {
- wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_filter_clear_all(wl);
+ ret = wl1271_acx_default_rx_filter_enable(wl, 0,
+ FILTER_SIGNAL);
+ if (ret)
+ goto out;
+
+ ret = wl1271_rx_filter_clear_all(wl);
+ if (ret)
+ goto out;
+
return 0;
}
@@ -1493,8 +1556,13 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
}
}
- wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
- wl1271_rx_filter_clear_all(wl);
+ ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
+ if (ret)
+ goto out;
+
+ ret = wl1271_rx_filter_clear_all(wl);
+ if (ret)
+ goto out;
/* Translate WoWLAN patterns into filters */
for (i = 0; i < wow->n_patterns; i++) {
@@ -1532,11 +1600,20 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
+ if ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- wl1271_configure_wowlan(wl, wow);
+ ret = wl1271_configure_wowlan(wl, wow);
+ if (ret < 0)
+ goto out_sleep;
+
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
@@ -1544,8 +1621,8 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
if (ret < 0)
wl1271_error("suspend: set wake up conditions failed: %d", ret);
+out_sleep:
wl1271_ps_elp_sleep(wl);
-
out:
return ret;
@@ -1592,6 +1669,13 @@ static void wl1271_configure_resume(struct wl1271 *wl,
if ((!is_ap) && (!is_sta))
return;
+ if (is_sta &&
+ ((wl->conf.conn.suspend_wake_up_event ==
+ wl->conf.conn.wake_up_event) &&
+ (wl->conf.conn.suspend_listen_interval ==
+ wl->conf.conn.listen_interval)))
+ return;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
return;
@@ -1624,6 +1708,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow);
+ /* we want to perform the recovery before suspending */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
+ wl1271_warning("postponing suspend to perform recovery");
+ return -EBUSY;
+ }
+
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
@@ -1664,7 +1754,8 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
unsigned long flags;
- bool run_irq_work = false;
+ bool run_irq_work = false, pending_recovery;
+ int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
wl->wow_enabled);
@@ -1680,17 +1771,37 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
run_irq_work = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
+ mutex_lock(&wl->mutex);
+
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+
if (run_irq_work) {
wl1271_debug(DEBUG_MAC80211,
"run postponed irq_work directly");
- wl1271_irq(0, wl);
+
+ /* don't talk to the HW if recovery is pending */
+ if (!pending_recovery) {
+ ret = wlcore_irq_locked(wl);
+ if (ret)
+ wl12xx_queue_recovery_work(wl);
+ }
+
wlcore_enable_interrupts(wl);
}
- mutex_lock(&wl->mutex);
+ if (pending_recovery) {
+ wl1271_warning("queuing forgotten recovery on resume");
+ ieee80211_queue_work(wl->hw, &wl->recovery_work);
+ goto out;
+ }
+
wl12xx_for_each_wlvif(wl, wlvif) {
wl1271_configure_resume(wl, wlvif);
}
+
+out:
wl->wow_enabled = false;
mutex_unlock(&wl->mutex);
@@ -1716,29 +1827,15 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
return 0;
}
-static void wl1271_op_stop(struct ieee80211_hw *hw)
+static void wlcore_op_stop_locked(struct wl1271 *wl)
{
- struct wl1271 *wl = hw->priv;
int i;
- wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
-
- /*
- * Interrupts must be disabled before setting the state to OFF.
- * Otherwise, the interrupt handler might be called and exit without
- * reading the interrupt status.
- */
- wlcore_disable_interrupts(wl);
- mutex_lock(&wl->mutex);
if (wl->state == WL1271_STATE_OFF) {
- mutex_unlock(&wl->mutex);
+ if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags))
+ wlcore_enable_interrupts(wl);
- /*
- * This will not necessarily enable interrupts as interrupts
- * may have been disabled when op_stop was called. It will,
- * however, balance the above call to disable_interrupts().
- */
- wlcore_enable_interrupts(wl);
return;
}
@@ -1747,8 +1844,16 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
* functions don't perform further work.
*/
wl->state = WL1271_STATE_OFF;
+
+ /*
+ * Use the nosync variant to disable interrupts, so the mutex could be
+ * held while doing so without deadlocking.
+ */
+ wlcore_disable_interrupts_nosync(wl);
+
mutex_unlock(&wl->mutex);
+ wlcore_synchronize_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
@@ -1758,15 +1863,23 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&wl->connection_loss_work);
/* let's notify MAC80211 about the remaining pending TX frames */
- wl12xx_tx_reset(wl, true);
+ wl12xx_tx_reset(wl);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
+ /*
+ * In case a recovery was scheduled, interrupts were disabled to avoid
+ * an interrupt storm. Now that the power is down, it is safe to
+ * re-enable interrupts to balance the disable depth
+ */
+ if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
+ wlcore_enable_interrupts(wl);
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->channel_type = NL80211_CHAN_NO_HT;
wl->tx_blocks_available = 0;
wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
@@ -1775,6 +1888,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
wl->sched_scanning = false;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
memset(wl->roles_map, 0, sizeof(wl->roles_map));
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
@@ -1799,12 +1913,24 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status);
- wl->fw_status = NULL;
+ kfree(wl->fw_status_1);
+ wl->fw_status_1 = NULL;
+ wl->fw_status_2 = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
+}
+
+static void wlcore_op_stop(struct ieee80211_hw *hw)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+
+ wlcore_op_stop_locked(wl);
mutex_unlock(&wl->mutex);
}
@@ -1894,6 +2020,9 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
} else {
/* init ap data */
wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
@@ -1903,13 +2032,19 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_allocate_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
+ wlvif->basic_rate_set = CONF_TX_AP_ENABLED_RATES;
+ /*
+ * TODO: check if basic_rate shouldn't be
+ * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ * instead (the same thing for STA above).
+ */
+ wlvif->basic_rate = CONF_TX_AP_ENABLED_RATES;
+ /* TODO: this seems to be used only for STA, check it */
+ wlvif->rate_set = CONF_TX_AP_ENABLED_RATES;
}
wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
/*
@@ -1919,6 +2054,7 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->band = wl->band;
wlvif->channel = wl->channel;
wlvif->power_level = wl->power_level;
+ wlvif->channel_type = wl->channel_type;
INIT_WORK(&wlvif->rx_streaming_enable_work,
wl1271_rx_streaming_enable_work);
@@ -2170,6 +2306,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i, ret;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
@@ -2250,11 +2387,33 @@ deinit:
wlvif->role_id = WL12XX_INVALID_ROLE_ID;
wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
- if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ if (is_ap)
wl->ap_count--;
else
wl->sta_count--;
+ /*
+ * Last AP, have more stations. Configure sleep auth according to STA.
+ * Don't do thin on unintended recovery.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
+ !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
+ goto unlock;
+
+ if (wl->ap_count == 0 && is_ap && wl->sta_count) {
+ u8 sta_auth = wl->conf.conn.sta_sleep_auth;
+ /* Configure for power according to debugfs */
+ if (sta_auth != WL1271_PSM_ILLEGAL)
+ wl1271_acx_sleep_auth(wl, sta_auth);
+ /* Configure for power always on */
+ else if (wl->quirks & WLCORE_QUIRK_NO_ELP)
+ wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ /* Configure for ELP power saving */
+ else
+ wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ }
+
+unlock:
mutex_unlock(&wl->mutex);
del_timer_sync(&wlvif->rx_streaming_timer);
@@ -2444,7 +2603,7 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl);
+ wl1271_scan_sched_scan_stop(wl, wlvif);
ieee80211_sched_scan_stopped(wl->hw);
}
@@ -2469,13 +2628,24 @@ static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
((wlvif->band != conf->channel->band) ||
- (wlvif->channel != channel))) {
+ (wlvif->channel != channel) ||
+ (wlvif->channel_type != conf->channel_type))) {
/* send all pending packets */
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0)
+ return ret;
+
wlvif->band = conf->channel->band;
wlvif->channel = channel;
+ wlvif->channel_type = conf->channel_type;
- if (!is_ap) {
+ if (is_ap) {
+ wl1271_set_band_rate(wl, wlvif);
+ ret = wl1271_init_ap_rates(wl, wlvif);
+ if (ret < 0)
+ wl1271_error("AP rate policy change failed %d",
+ ret);
+ } else {
/*
* FIXME: the mac80211 should really provide a fixed
* rate to use here. for now, just use the smallest
@@ -2583,8 +2753,9 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* frames, such as the deauth. To make sure those frames reach the air,
* wait here until the TX queue is fully flushed.
*/
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
+ if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) ||
+ ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE)))
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
@@ -2593,6 +2764,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
wl->band = conf->channel->band;
wl->channel = channel;
+ wl->channel_type = conf->channel_type;
}
if (changed & IEEE80211_CONF_CHANGE_POWER)
@@ -2825,17 +2997,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
- /*
- * A role set to GEM cipher requires different Tx settings (namely
- * spare blocks). Note when we are in this mode so the HW can adjust.
- */
- if (key_type == KEY_GEM) {
- if (action == KEY_ADD_OR_REPLACE)
- wlvif->is_gem = true;
- else if (action == KEY_REMOVE)
- wlvif->is_gem = false;
- }
-
if (is_ap) {
struct wl1271_station *wl_sta;
u8 hlid;
@@ -2913,12 +3074,21 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return 0;
}
-static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+
+ return wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
+}
+
+int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf)
+{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
@@ -3029,6 +3199,7 @@ out_unlock:
return ret;
}
+EXPORT_SYMBOL_GPL(wlcore_set_key);
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -3167,6 +3338,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
@@ -3180,7 +3352,7 @@ static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- wl1271_scan_sched_scan_stop(wl);
+ wl1271_scan_sched_scan_stop(wl, wlvif);
wl1271_ps_elp_sleep(wl);
out:
@@ -3316,8 +3488,15 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
skb->data,
skb->len, 0,
rates);
-
dev_kfree_skb(skb);
+
+ if (ret < 0)
+ goto out;
+
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+
+out:
return ret;
}
@@ -3422,6 +3601,87 @@ out:
return ret;
}
+static int wlcore_set_beacon_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ bool is_ap)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_hdr *hdr;
+ u32 min_rate;
+ int ret;
+ int ieoffset = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
+ u16 tmpl_id;
+
+ if (!beacon) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ wl1271_debug(DEBUG_MASTER, "beacon updated");
+
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
+ CMD_TEMPL_BEACON;
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
+ beacon->data,
+ beacon->len, 0,
+ min_rate);
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out;
+ }
+
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
+ /* remove TIM ie from probe response */
+ wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
+
+ /*
+ * remove p2p ie from probe response.
+ * the fw reponds to probe requests that don't include
+ * the p2p ie. probe requests with p2p ie will be passed,
+ * and will be responded by the supplicant (the spec
+ * forbids including the p2p ie when responding to probe
+ * requests that didn't include it).
+ */
+ wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P, ieoffset);
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+ if (is_ap)
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
+ beacon->data,
+ beacon->len,
+ min_rate);
+ else
+ ret = wl1271_cmd_template_set(wl, wlvif->role_id,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len, 0,
+ min_rate);
+end_bcn:
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out;
+
+out:
+ return ret;
+}
+
static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -3440,81 +3700,12 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
- wl1271_debug(DEBUG_AP, "probe response updated");
- set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
- }
+
+ wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
}
if ((changed & BSS_CHANGED_BEACON)) {
- struct ieee80211_hdr *hdr;
- u32 min_rate;
- int ieoffset = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
- struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
- u16 tmpl_id;
-
- if (!beacon) {
- ret = -EINVAL;
- goto out;
- }
-
- wl1271_debug(DEBUG_MASTER, "beacon updated");
-
- ret = wl1271_ssid_set(vif, beacon, ieoffset);
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out;
- }
- min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
- tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
- CMD_TEMPL_BEACON;
- ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
- beacon->data,
- beacon->len, 0,
- min_rate);
- if (ret < 0) {
- dev_kfree_skb(beacon);
- goto out;
- }
-
- /*
- * In case we already have a probe-resp beacon set explicitly
- * by usermode, don't use the beacon data.
- */
- if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
- goto end_bcn;
-
- /* remove TIM ie from probe response */
- wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
-
- /*
- * remove p2p ie from probe response.
- * the fw reponds to probe requests that don't include
- * the p2p ie. probe requests with p2p ie will be passed,
- * and will be responded by the supplicant (the spec
- * forbids including the p2p ie when responding to probe
- * requests that didn't include it).
- */
- wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
- WLAN_OUI_TYPE_WFA_P2P, ieoffset);
-
- hdr = (struct ieee80211_hdr *) beacon->data;
- hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_RESP);
- if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
- beacon->data,
- beacon->len,
- min_rate);
- else
- ret = wl1271_cmd_template_set(wl, wlvif->role_id,
- CMD_TEMPL_PROBE_RESPONSE,
- beacon->data,
- beacon->len, 0,
- min_rate);
-end_bcn:
- dev_kfree_skb(beacon);
+ ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
}
@@ -3551,6 +3742,14 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
+
+ ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_set_beacon_template(wl, vif, true);
+ if (ret < 0)
+ goto out;
}
ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
@@ -3691,7 +3890,8 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
sta_rate_set = sta->supp_rates[wl->hw->conf.channel->band];
if (sta->ht_cap.ht_supported)
sta_rate_set |=
- (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
+ (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
+ (sta->ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
sta_ht_cap = sta->ht_cap;
sta_exists = true;
@@ -3704,13 +3904,11 @@ sta_not_found:
u32 rates;
int ieoffset;
wlvif->aid = bss_conf->aid;
+ wlvif->channel_type = bss_conf->channel_type;
wlvif->beacon_int = bss_conf->beacon_int;
do_join = true;
set_assoc = true;
- /* Cancel connection_loss_work */
- cancel_delayed_work_sync(&wl->connection_loss_work);
-
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
@@ -3960,6 +4158,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
(int)changed);
+ /*
+ * make sure to cancel pending disconnections if our association
+ * state changed
+ */
+ if (!is_ap && (changed & BSS_CHANGED_ASSOC))
+ cancel_delayed_work_sync(&wl->connection_loss_work);
+
+ if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
+ !bss_conf->enable_beacon)
+ wl1271_tx_flush(wl);
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
@@ -4068,16 +4277,13 @@ out:
static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct wl1271 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
- survey->filled = SURVEY_INFO_NOISE_DBM;
- survey->noise = wl->noise;
-
+ survey->filled = 0;
return 0;
}
@@ -4343,9 +4549,14 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_RX_STOP:
if (!(*ba_bitmap & BIT(tid))) {
- ret = -EINVAL;
- wl1271_error("no active RX BA session on tid: %d",
+ /*
+ * this happens on reconfig - so only output a debug
+ * message for now, and don't fail the function.
+ */
+ wl1271_debug(DEBUG_MAC80211,
+ "no active RX BA session on tid: %d",
tid);
+ ret = 0;
break;
}
@@ -4394,7 +4605,7 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < WLCORE_NUM_BANDS; i++)
wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
@@ -4462,6 +4673,13 @@ out:
mutex_unlock(&wl->mutex);
}
+static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct wl1271 *wl = hw->priv;
+
+ wl1271_tx_flush(wl);
+}
+
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
@@ -4624,7 +4842,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
- .stop = wl1271_op_stop,
+ .stop = wlcore_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.change_interface = wl12xx_op_change_interface,
@@ -4636,7 +4854,7 @@ static const struct ieee80211_ops wl1271_ops = {
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
- .set_key = wl1271_op_set_key,
+ .set_key = wlcore_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.cancel_hw_scan = wl1271_op_cancel_hw_scan,
.sched_scan_start = wl1271_op_sched_scan_start,
@@ -4652,6 +4870,7 @@ static const struct ieee80211_ops wl1271_ops = {
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.channel_switch = wl12xx_op_channel_switch,
+ .flush = wlcore_op_flush,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
@@ -4882,18 +5101,22 @@ static int wl12xx_get_hw_info(struct wl1271 *wl)
if (ret < 0)
goto out;
- wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
+ ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
+ if (ret < 0)
+ goto out;
wl->fuse_oui_addr = 0;
wl->fuse_nic_addr = 0;
- wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
+ ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
+ if (ret < 0)
+ goto out;
if (wl->ops->get_mac)
- wl->ops->get_mac(wl);
+ ret = wl->ops->get_mac(wl);
- wl1271_power_off(wl);
out:
+ wl1271_power_off(wl);
return ret;
}
@@ -4905,14 +5128,8 @@ static int wl1271_register_hw(struct wl1271 *wl)
if (wl->mac80211_registered)
return 0;
- ret = wl12xx_get_hw_info(wl);
- if (ret < 0) {
- wl1271_error("couldn't get hw info");
- goto out;
- }
-
- ret = wl1271_fetch_nvs(wl);
- if (ret == 0) {
+ wl1271_fetch_nvs(wl);
+ if (wl->nvs != NULL) {
/* NOTE: The wl->nvs->nvs element must be first, in
* order to simplify the casting, we assume it is at
* the beginning of the wl->nvs structure.
@@ -4960,6 +5177,29 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
+static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wlcore_iface_combinations[] = {
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = wlcore_iface_limits,
+ .n_limits = ARRAY_SIZE(wlcore_iface_limits),
+ },
+};
+
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
@@ -4970,9 +5210,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
WL1271_CIPHER_SUITE_GEM,
};
- /* The tx descriptor buffer and the TKIP space. */
- wl->hw->extra_tx_headroom = WL1271_EXTRA_SPACE_TKIP +
- sizeof(struct wl1271_tx_hw_descr);
+ /* The tx descriptor buffer */
+ wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
+
+ if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
+ wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
/* unit us */
/* FIXME: find a proper value */
@@ -5025,12 +5267,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
*/
memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
sizeof(wl1271_band_2ghz));
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
- sizeof(wl->ht_cap));
+ memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
+ &wl->ht_cap[IEEE80211_BAND_2GHZ],
+ sizeof(*wl->ht_cap));
memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
sizeof(wl1271_band_5ghz));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
- sizeof(wl->ht_cap));
+ memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
+ &wl->ht_cap[IEEE80211_BAND_5GHZ],
+ sizeof(*wl->ht_cap));
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
&wl->bands[IEEE80211_BAND_2GHZ];
@@ -5049,6 +5293,11 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ /* allowed interface combinations */
+ wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
+ wl->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(wlcore_iface_combinations);
+
SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
@@ -5117,8 +5366,10 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->band = IEEE80211_BAND_2GHZ;
+ wl->channel_type = NL80211_CHAN_NO_HT;
wl->flags = 0;
wl->sg_enabled = true;
+ wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->hw_pg_ver = -1;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
@@ -5142,6 +5393,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
wl->state = WL1271_STATE_OFF;
wl->fw_type = WL12XX_FW_TYPE_NONE;
mutex_init(&wl->mutex);
+ mutex_init(&wl->flush_mutex);
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -5222,7 +5474,7 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status);
+ kfree(wl->fw_status_1);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
@@ -5279,8 +5531,6 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
wlcore_adjust_conf(wl);
wl->irq = platform_get_irq(pdev, 0);
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
wl->platform_quirks = pdata->platform_quirks;
wl->set_power = pdata->set_power;
wl->dev = &pdev->dev;
@@ -5293,7 +5543,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
else
irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
- ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
irqflags,
pdev->name, wl);
if (ret < 0) {
@@ -5301,6 +5551,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
goto out_free_hw;
}
+#ifdef CONFIG_PM
ret = enable_irq_wake(wl->irq);
if (!ret) {
wl->irq_wake_enabled = true;
@@ -5314,8 +5565,19 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
WL1271_RX_FILTER_MAX_PATTERN_SIZE;
}
}
+#endif
disable_irq(wl->irq);
+ ret = wl12xx_get_hw_info(wl);
+ if (ret < 0) {
+ wl1271_error("couldn't get hw info");
+ goto out_irq;
+ }
+
+ ret = wl->ops->identify_chip(wl);
+ if (ret < 0)
+ goto out_irq;
+
ret = wl1271_init_ieee80211(wl);
if (ret)
goto out_irq;
@@ -5328,7 +5590,7 @@ int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
wl1271_error("failed to create sysfs file bt_coex_state");
- goto out_irq;
+ goto out_unreg;
}
/* Create sysfs file to get HW PG version */
@@ -5353,6 +5615,9 @@ out_hw_pg_ver:
out_bt_coex_state:
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+out_unreg:
+ wl1271_unregister_hw(wl);
+
out_irq:
free_irq(wl->irq, wl);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 756eee2257b4..46d36fd30eba 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -28,11 +28,14 @@
#define WL1271_WAKEUP_TIMEOUT 500
+#define ELP_ENTRY_DELAY 5
+
void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct wl12xx_vif *wlvif;
+ int ret;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -61,7 +64,12 @@ void wl1271_elp_work(struct work_struct *work)
}
wl1271_debug(DEBUG_PSM, "chip to elp");
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
@@ -72,8 +80,9 @@ out:
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
+ u32 timeout;
- if (wl->quirks & WLCORE_QUIRK_NO_ELP)
+ if (wl->sleep_auth != WL1271_PSM_ELP)
return;
/* we shouldn't get consecutive sleep requests */
@@ -89,8 +98,13 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
return;
}
+ if (wl->conf.conn.forced_ps)
+ timeout = ELP_ENTRY_DELAY;
+ else
+ timeout = wl->conf.conn.dynamic_ps_timeout;
+
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
- msecs_to_jiffies(wl->conf.conn.dynamic_ps_timeout));
+ msecs_to_jiffies(timeout));
}
int wl1271_ps_elp_wakeup(struct wl1271 *wl)
@@ -127,7 +141,11 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
wl->elp_compl = &compl;
spin_unlock_irqrestore(&wl->wl_lock, flags);
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto err;
+ }
if (!pending) {
ret = wait_for_completion_timeout(
@@ -185,8 +203,12 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
- /* enable beacon early termination. Not relevant for 5GHz */
- if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ /*
+ * enable beacon early termination.
+ * Not relevant for 5GHz and for high rates.
+ */
+ if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -196,7 +218,8 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index d6a3c6b07827..f55e2f9e7ac5 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -127,7 +127,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
}
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
- reserved = NET_IP_ALIGN;
+ reserved = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
@@ -175,7 +175,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
*/
memcpy(buf, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
- skb_pull(skb, NET_IP_ALIGN);
+ skb_pull(skb, RX_BUF_ALIGN);
*hlid = desc->hlid;
@@ -186,6 +186,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
+ wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
@@ -199,17 +200,18 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
{
unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
- u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
- u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
+ u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
+ u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
u32 rx_counter;
u32 pkt_len, align_pkt_len;
u32 pkt_offset, des;
u8 hlid;
enum wl_rx_buf_align rx_align;
+ int ret = 0;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
@@ -223,7 +225,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
break;
buf_size += align_pkt_len;
rx_counter++;
- rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
+ rx_counter %= wl->num_rx_desc;
}
if (buf_size == 0) {
@@ -233,9 +235,14 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
/* Read all available packets at once */
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
- wlcore_hw_prepare_read(wl, des, buf_size);
- wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_size, true);
+ ret = wlcore_hw_prepare_read(wl, des, buf_size);
+ if (ret < 0)
+ goto out;
+
+ ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+ buf_size, true);
+ if (ret < 0)
+ goto out;
/* Split data into separate packets */
pkt_offset = 0;
@@ -263,7 +270,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
wl->rx_counter++;
drv_rx_counter++;
- drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
+ drv_rx_counter %= wl->num_rx_desc;
pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
}
}
@@ -272,11 +279,17 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
- if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
- wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
- wl->rx_counter);
+ if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
+ ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
+ wl->rx_counter);
+ if (ret < 0)
+ goto out;
+ }
wl12xx_rearm_rx_streaming(wl, active_hlids);
+
+out:
+ return ret;
}
#ifdef CONFIG_PM
@@ -305,14 +318,19 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return 0;
}
-void wl1271_rx_filter_clear_all(struct wl1271 *wl)
+int wl1271_rx_filter_clear_all(struct wl1271 *wl)
{
- int i;
+ int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
if (!wl->rx_filter_enabled[i])
continue;
- wl1271_rx_filter_enable(wl, i, 0, NULL);
+ ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
+ if (ret)
+ goto out;
}
+
+out:
+ return ret;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index e9a162a864ca..71eba1899915 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -38,8 +38,6 @@
#define RX_DESC_PACKETID_SHIFT 11
#define RX_MAX_PACKET_ID 3
-#define NUM_RX_PKT_DESC_MOD_MASK 7
-
#define RX_DESC_VALID_FCS 0x0001
#define RX_DESC_MATCH_RXADDR1 0x0002
#define RX_DESC_MCAST 0x0004
@@ -102,6 +100,15 @@
/* If set, the start of IP payload is not 4 bytes aligned */
#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
+/* If set, the buffer was padded by the FW to be 4 bytes aligned */
+#define RX_BUF_PADDED_PAYLOAD BIT(30)
+
+/*
+ * Account for the padding inserted by the FW in case of RX_ALIGNMENT
+ * or for fixing alignment in case the packet wasn't aligned.
+ */
+#define RX_BUF_ALIGN 2
+
/* Describes the alignment state of a Rx buffer */
enum wl_rx_buf_align {
WLCORE_RX_BUF_ALIGNED,
@@ -136,11 +143,11 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter);
-void wl1271_rx_filter_clear_all(struct wl1271 *wl);
+int wl1271_rx_filter_clear_all(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index ade21a011c45..dbeca1bfbb2c 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -226,7 +226,7 @@ static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
cmd->params.role_id, band,
wl->scan.ssid, wl->scan.ssid_len,
wl->scan.req->ie,
- wl->scan.req->ie_len);
+ wl->scan.req->ie_len, false);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -411,7 +411,8 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct conn_scan_ch_params *channels,
u32 band, bool radar, bool passive,
- int start, int max_channels)
+ int start, int max_channels,
+ u8 *n_pactive_ch)
{
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
@@ -479,6 +480,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
+ if ((band == IEEE80211_BAND_2GHZ) &&
+ (channels[j].channel >= 12) &&
+ (channels[j].channel <= 14) &&
+ (flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
+ !force_passive) {
+ /* pactive channels treated as DFS */
+ channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
+
+ /*
+ * n_pactive_ch is counted down from the end of
+ * the passive channel list
+ */
+ (*n_pactive_ch)++;
+ wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
+ *n_pactive_ch);
+ }
+
j++;
}
}
@@ -491,38 +509,47 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct wl1271_cmd_sched_scan_config *cfg)
{
+ u8 n_pactive_ch = 0;
+
cfg->passive[0] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
false, true, 0,
- MAX_CHANNELS_2GHZ);
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch);
cfg->active[0] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_2,
IEEE80211_BAND_2GHZ,
false, false,
cfg->passive[0],
- MAX_CHANNELS_2GHZ);
+ MAX_CHANNELS_2GHZ,
+ &n_pactive_ch);
cfg->passive[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
false, true, 0,
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
cfg->dfs =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
true, true,
cfg->passive[1],
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels_5,
IEEE80211_BAND_5GHZ,
false, false,
cfg->passive[1] + cfg->dfs,
- MAX_CHANNELS_5GHZ);
+ MAX_CHANNELS_5GHZ,
+ &n_pactive_ch);
/* 802.11j channels are not supported yet */
cfg->passive[2] = 0;
cfg->active[2] = 0;
+ cfg->n_pactive_ch = n_pactive_ch;
+
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
@@ -537,6 +564,7 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
/* Returns the scan type to be used or a negative value on error */
static int
wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req)
{
struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
@@ -565,6 +593,7 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
goto out;
}
+ cmd->role_id = wlvif->dev_role_id;
if (!n_match_ssids) {
/* No filter, with ssids */
type = SCAN_SSID_FILTER_DISABLED;
@@ -603,7 +632,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
continue;
for (j = 0; j < cmd->n_ssids; j++)
- if (!memcmp(req->ssids[i].ssid,
+ if ((req->ssids[i].ssid_len ==
+ cmd->ssids[j].len) &&
+ !memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
req->ssids[i].ssid_len)) {
cmd->ssids[j].type =
@@ -652,6 +683,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
if (!cfg)
return -ENOMEM;
+ cfg->role_id = wlvif->dev_role_id;
cfg->rssi_threshold = c->rssi_threshold;
cfg->snr_threshold = c->snr_threshold;
cfg->n_probe_reqs = c->num_probe_reqs;
@@ -669,7 +701,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
cfg->intervals[i] = cpu_to_le32(req->interval);
cfg->ssid_len = 0;
- ret = wl12xx_scan_sched_scan_ssid_list(wl, req);
+ ret = wl12xx_scan_sched_scan_ssid_list(wl, wlvif, req);
if (ret < 0)
goto out;
@@ -690,7 +722,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
- ies->len[band]);
+ ies->len[band], true);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
@@ -704,7 +736,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[band],
- ies->len[band]);
+ ies->len[band], true);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
@@ -734,13 +766,15 @@ int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
- if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
if (!start)
return -ENOMEM;
+ start->role_id = wlvif->dev_role_id;
start->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
@@ -762,7 +796,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl)
ieee80211_sched_scan_results(wl->hw);
}
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
+void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_stop *stop;
int ret = 0;
@@ -776,6 +810,7 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
return;
}
+ stop->role_id = wlvif->dev_role_id;
stop->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
diff --git a/drivers/net/wireless/ti/wlcore/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 81ee36ac2078..29f3c8d6b046 100644
--- a/drivers/net/wireless/ti/wlcore/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -40,7 +40,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
+void wl1271_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
#define WL1271_SCAN_MAX_CHANNELS 24
@@ -142,7 +142,8 @@ enum {
SCAN_BSS_TYPE_ANY,
};
-#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
+#define SCAN_CHANNEL_FLAGS_DFS BIT(0) /* channel is passive until an
+ activity is detected on it */
#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
struct conn_scan_ch_params {
@@ -185,7 +186,10 @@ struct wl1271_cmd_sched_scan_config {
u8 dfs;
- u8 padding[3];
+ u8 n_pactive_ch; /* number of pactive (passive until fw detects energy)
+ channels in BG band */
+ u8 role_id;
+ u8 padding[1];
struct conn_scan_ch_params channels_2[MAX_CHANNELS_2GHZ];
struct conn_scan_ch_params channels_5[MAX_CHANNELS_5GHZ];
@@ -212,21 +216,24 @@ struct wl1271_cmd_sched_scan_ssid_list {
u8 n_ssids;
struct wl1271_ssid ssids[SCHED_SCAN_MAX_SSIDS];
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
struct wl1271_cmd_sched_scan_start {
struct wl1271_cmd_header header;
u8 tag;
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
struct wl1271_cmd_sched_scan_stop {
struct wl1271_cmd_header header;
u8 tag;
- u8 padding[3];
+ u8 role_id;
+ u8 padding[2];
} __packed;
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 0a72347cfc4c..73ace4b2604e 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
+#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -32,6 +33,7 @@
#include <linux/gpio.h>
#include <linux/wl12xx.h>
#include <linux/pm_runtime.h>
+#include <linux/printk.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
@@ -45,6 +47,8 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+static bool dump = false;
+
struct wl12xx_sdio_glue {
struct device *dev;
struct platform_device *core;
@@ -67,8 +71,8 @@ static void wl1271_sdio_set_block_size(struct device *child,
sdio_release_host(func);
}
-static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -76,6 +80,13 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
sdio_claim_host(func);
+ if (unlikely(dump)) {
+ printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
+ print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+ }
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
@@ -92,12 +103,14 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
sdio_release_host(func);
- if (ret)
+ if (WARN_ON(ret))
dev_err(child->parent, "sdio read failed (%d)\n", ret);
+
+ return ret;
}
-static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
@@ -105,6 +118,13 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
sdio_claim_host(func);
+ if (unlikely(dump)) {
+ printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr);
+ print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buf, len, false);
+ }
+
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
@@ -121,25 +141,30 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
sdio_release_host(func);
- if (ret)
+ if (WARN_ON(ret))
dev_err(child->parent, "sdio write failed (%d)\n", ret);
+
+ return ret;
}
static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ struct mmc_card *card = func->card;
- /* If enabled, tell runtime PM not to power off the card */
- if (pm_runtime_enabled(&func->dev)) {
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
- } else {
- /* Runtime PM is disabled: power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
+ ret = pm_runtime_get_sync(&card->dev);
+ if (ret) {
+ /*
+ * Runtime PM might be temporarily disabled, or the device
+ * might have a positive reference counter. Make sure it is
+ * really powered on.
+ */
+ ret = mmc_power_restore_host(card->host);
+ if (ret < 0) {
+ pm_runtime_put_sync(&card->dev);
goto out;
+ }
}
sdio_claim_host(func);
@@ -154,20 +179,21 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
+ struct mmc_card *card = func->card;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
- /* Power off the card manually, even if runtime PM is enabled. */
- ret = mmc_power_save_host(func->card->host);
+ /* Power off the card manually in case it wasn't powered off above */
+ ret = mmc_power_save_host(card->host);
if (ret < 0)
- return ret;
+ goto out;
- /* If enabled, let runtime PM know the card is powered off */
- if (pm_runtime_enabled(&func->dev))
- ret = pm_runtime_put_sync(&func->dev);
+ /* Let runtime PM know the card is powered off */
+ pm_runtime_put_sync(&card->dev);
+out:
return ret;
}
@@ -196,6 +222,7 @@ static int __devinit wl1271_probe(struct sdio_func *func,
struct resource res[1];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
+ const char *chip_family;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
@@ -236,7 +263,18 @@ static int __devinit wl1271_probe(struct sdio_func *func,
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
- glue->core = platform_device_alloc("wl12xx", -1);
+ /*
+ * Due to a hardware bug, we can't differentiate wl18xx from
+ * wl12xx, because both report the same device ID. The only
+ * way to differentiate is by checking the SDIO revision,
+ * which is 3.00 on the wl18xx chips.
+ */
+ if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00)
+ chip_family = "wl18xx";
+ else
+ chip_family = "wl12xx";
+
+ glue->core = platform_device_alloc(chip_family, -1);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
@@ -367,12 +405,9 @@ static void __exit wl1271_exit(void)
module_init(wl1271_init);
module_exit(wl1271_exit);
+module_param(dump, bool, S_IRUSR | S_IWUSR);
+MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 553cd3cbb98c..8da4ed243ebc 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -193,8 +193,8 @@ static int wl12xx_spi_read_busy(struct device *child)
return -ETIMEDOUT;
}
-static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
@@ -238,7 +238,7 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
- return;
+ return 0;
}
spi_message_init(&m);
@@ -256,10 +256,12 @@ static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
buf += chunk_len;
len -= chunk_len;
}
+
+ return 0;
}
-static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
- size_t len, bool fixed)
+static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
+ void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
@@ -304,6 +306,8 @@ static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
}
spi_sync(to_spi_device(glue->dev), &m);
+
+ return 0;
}
static struct wl1271_if_operations spi_ops = {
@@ -431,10 +435,4 @@ module_exit(wl1271_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
-MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
-MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
-MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
-MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
MODULE_ALIAS("spi:wl1271");
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 0e59ea2cdd39..49e5ee1525c9 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -40,7 +40,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_CONFIGURE,
WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
- WL1271_TM_CMD_RECOVER,
+ WL1271_TM_CMD_RECOVER, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_GET_MAC,
__WL1271_TM_CMD_AFTER_LAST
@@ -108,6 +108,20 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
}
if (answer) {
+ /* If we got bip calibration answer print radio status */
+ struct wl1271_cmd_cal_p2g *params =
+ (struct wl1271_cmd_cal_p2g *) buf;
+
+ s16 radio_status = (s16) le16_to_cpu(params->radio_status);
+
+ if (params->test.id == TEST_CMD_P2G_CAL &&
+ radio_status < 0)
+ wl1271_warning("testmode cmd: radio status=%d",
+ radio_status);
+ else
+ wl1271_info("testmode cmd: radio status=%d",
+ radio_status);
+
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
if (!skb) {
@@ -115,8 +129,12 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
goto out_sleep;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_sleep;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_sleep;
@@ -128,11 +146,6 @@ out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -178,8 +191,12 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
goto out_free;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_free;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_free;
@@ -192,11 +209,6 @@ out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
@@ -231,6 +243,43 @@ static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
return 0;
}
+static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[])
+{
+ /* return FEM type */
+ int ret, len;
+ struct sk_buff *skb;
+
+ ret = wl1271_plt_start(wl, PLT_FEM_DETECT);
+ if (ret < 0)
+ goto out;
+
+ mutex_lock(&wl->mutex);
+
+ len = nla_total_size(sizeof(wl->fem_manuf));
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_mutex;
+ }
+
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(wl->fem_manuf),
+ &wl->fem_manuf)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out_mutex;
+ }
+
+ ret = cfg80211_testmode_reply(skb);
+
+out_mutex:
+ mutex_unlock(&wl->mutex);
+
+ /* We always stop plt after DETECT mode */
+ wl1271_plt_stop(wl);
+out:
+ return ret;
+}
+
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
{
u32 val;
@@ -244,11 +293,14 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
switch (val) {
- case 0:
+ case PLT_OFF:
ret = wl1271_plt_stop(wl);
break;
- case 1:
- ret = wl1271_plt_start(wl);
+ case PLT_ON:
+ ret = wl1271_plt_start(wl, PLT_ON);
+ break;
+ case PLT_FEM_DETECT:
+ ret = wl1271_tm_detect_fem(wl, tb);
break;
default:
ret = -EINVAL;
@@ -258,15 +310,6 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
return ret;
}
-static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
-{
- wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
-
- wl12xx_queue_recovery_work(wl);
-
- return 0;
-}
-
static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
{
struct sk_buff *skb;
@@ -298,8 +341,12 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
goto out;
}
- if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
- goto nla_put_failure;
+ if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) {
+ kfree_skb(skb);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out;
@@ -307,11 +354,6 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
out:
mutex_unlock(&wl->mutex);
return ret;
-
-nla_put_failure:
- kfree_skb(skb);
- ret = -EMSGSIZE;
- goto out;
}
int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
@@ -336,8 +378,6 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
return wl1271_tm_cmd_configure(wl, tb);
case WL1271_TM_CMD_SET_PLT_MODE:
return wl1271_tm_cmd_set_plt_mode(wl, tb);
- case WL1271_TM_CMD_RECOVER:
- return wl1271_tm_cmd_recover(wl, tb);
case WL1271_TM_CMD_GET_MAC:
return wl12xx_tm_cmd_get_mac(wl, tb);
default:
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 6893bc207994..f0081f746482 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -72,7 +72,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
return id;
}
-static void wl1271_free_tx_id(struct wl1271 *wl, int id)
+void wl1271_free_tx_id(struct wl1271 *wl, int id)
{
if (__test_and_clear_bit(id, wl->tx_frames_map)) {
if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
@@ -82,6 +82,7 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
wl->tx_frames_cnt--;
}
}
+EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
struct sk_buff *skb)
@@ -127,6 +128,7 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
{
return wl->dummy_packet == skb;
}
+EXPORT_SYMBOL(wl12xx_is_dummy_packet);
u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb)
@@ -146,10 +148,10 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
- if (ieee80211_is_mgmt(hdr->frame_control))
- return wlvif->ap.global_hlid;
- else
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
return wlvif->ap.bcast_hlid;
+ else
+ return wlvif->ap.global_hlid;
}
}
@@ -176,37 +178,34 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
+ !(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
EXPORT_SYMBOL(wlcore_calc_packet_alignment);
static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra, u32 buf_offset,
- u8 hlid)
+ u8 hlid, bool is_gem)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
u32 total_blocks;
int id, ret = -EBUSY, ac;
- u32 spare_blocks = wl->normal_tx_spare;
- bool is_dummy = false;
+ u32 spare_blocks;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
+ spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
+
/* allocate free identifier for the packet */
id = wl1271_alloc_tx_id(wl, skb);
if (id < 0)
return id;
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
- is_dummy = true;
- else if (wlvif->is_gem)
- spare_blocks = wl->gem_tx_spare;
-
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
@@ -228,7 +227,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (!is_dummy && wlvif &&
+ if (!wl12xx_is_dummy_packet(wl, skb) && wlvif &&
wlvif->bss_type == BSS_TYPE_AP_BSS &&
test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
@@ -268,6 +267,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (extra) {
int hdrlen = ieee80211_hdrlen(frame_control);
memmove(frame_start, hdr, hdrlen);
+ skb_set_network_header(skb, skb_network_offset(skb) + extra);
}
/* configure packet life time */
@@ -305,19 +305,25 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
if (is_dummy || !wlvif)
rate_idx = 0;
else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
- /* if the packets are destined for AP (have a STA entry)
- send them with AP rate policies, otherwise use default
- basic rates */
- if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ /*
+ * if the packets are data packets
+ * send them with AP rate policies (EAPOLs are an exception),
+ * otherwise use default basic rates
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_PAE))
+ rate_idx = wlvif->sta.basic_rate_idx;
+ else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
rate_idx = wlvif->sta.p2p_rate_idx;
- else if (control->control.sta)
+ else if (ieee80211_is_data(frame_control))
rate_idx = wlvif->sta.ap_rate_idx;
else
rate_idx = wlvif->sta.basic_rate_idx;
} else {
if (hlid == wlvif->ap.global_hlid)
rate_idx = wlvif->ap.mgmt_rate_idx;
- else if (hlid == wlvif->ap.bcast_hlid)
+ else if (hlid == wlvif->ap.bcast_hlid ||
+ skb->protocol == cpu_to_be16(ETH_P_PAE))
+ /* send AP bcast and EAPOLs using the min basic rate */
rate_idx = wlvif->ap.bcast_rate_idx;
else
rate_idx = wlvif->ap.ucast_rate_idx[ac];
@@ -330,9 +336,9 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
- desc->reserved = 0;
desc->tx_attr = cpu_to_le16(tx_attr);
+ wlcore_hw_set_tx_desc_csum(wl, desc, skb);
wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
}
@@ -346,16 +352,20 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u32 total_len;
u8 hlid;
bool is_dummy;
+ bool is_gem = false;
- if (!skb)
+ if (!skb) {
+ wl1271_error("discarding null skb");
return -EINVAL;
+ }
info = IEEE80211_SKB_CB(skb);
/* TODO: handle dummy packets on multi-vifs */
is_dummy = wl12xx_is_dummy_packet(wl, skb);
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_EXTRA_SPACE_TKIP;
@@ -373,6 +383,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return ret;
wlvif->default_key = idx;
}
+
+ is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
}
hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
@@ -380,7 +392,8 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
+ is_gem);
if (ret < 0)
return ret;
@@ -425,10 +438,10 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
rate_set >>= 1;
}
- /* MCS rates indication are on bits 16 - 23 */
+ /* MCS rates indication are on bits 16 - 31 */
rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
- for (bit = 0; bit < 8; bit++) {
+ for (bit = 0; bit < 16; bit++) {
if (rate_set & 0x1)
enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
rate_set >>= 1;
@@ -439,18 +452,15 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
- unsigned long flags;
int i;
for (i = 0; i < NUM_TX_QUEUES; i++) {
- if (test_bit(i, &wl->stopped_queues_map) &&
+ if (wlcore_is_queue_stopped_by_reason(wl, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
/* firmware buffer has space, restart queues */
- spin_lock_irqsave(&wl->wl_lock, flags);
- ieee80211_wake_queue(wl->hw,
- wl1271_tx_get_mac80211_queue(i));
- clear_bit(i, &wl->stopped_queues_map);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
+ wlcore_wake_queue(wl, i,
+ WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
}
}
@@ -656,18 +666,29 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
}
}
-void wl1271_tx_work_locked(struct wl1271 *wl)
+/*
+ * Returns failure values only in case of failed bus ops within this function.
+ * wl1271_prepare_tx_frame retvals won't be returned in order to avoid
+ * triggering recovery by higher layers when not necessary.
+ * In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
+ * will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
+ * can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
+ * within prepare_tx_frame code but there's nothing we should do about those
+ * as well.
+ */
+int wlcore_tx_work_locked(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
struct sk_buff *skb;
struct wl1271_tx_hw_descr *desc;
- u32 buf_offset = 0;
+ u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
- int ret;
+ int ret = 0;
+ int bus_ret = 0;
if (unlikely(wl->state == WL1271_STATE_OFF))
- return;
+ return 0;
while ((skb = wl1271_skb_dequeue(wl))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -685,8 +706,14 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Flush buffer and try again.
*/
wl1271_skb_queue_head(wl, wlvif, skb);
- wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_offset, true);
+
+ buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
+ last_len);
+ bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
+ wl->aggr_buf, buf_offset, true);
+ if (bus_ret < 0)
+ goto out;
+
sent_packets = true;
buf_offset = 0;
continue;
@@ -710,7 +737,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
- buf_offset += ret;
+ last_len = ret;
+ buf_offset += last_len;
wl->tx_packets_count++;
if (has_data) {
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -720,8 +748,12 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
out_ack:
if (buf_offset) {
- wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
- buf_offset, true);
+ buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
+ bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
+ buf_offset, true);
+ if (bus_ret < 0)
+ goto out;
+
sent_packets = true;
}
if (sent_packets) {
@@ -729,13 +761,19 @@ out_ack:
* Interrupt the firmware with the new packets. This is only
* required for older hardware revisions
*/
- if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
- wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
- wl->tx_packets_count);
+ if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
+ bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
+ wl->tx_packets_count);
+ if (bus_ret < 0)
+ goto out;
+ }
wl1271_handle_tx_low_watermark(wl);
}
wl12xx_rearm_rx_streaming(wl, active_hlids);
+
+out:
+ return bus_ret;
}
void wl1271_tx_work(struct work_struct *work)
@@ -748,7 +786,11 @@ void wl1271_tx_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_tx_work_locked(wl);
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0) {
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -849,7 +891,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
/* remove TKIP header space if present */
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
@@ -869,22 +912,27 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
}
/* Called upon reception of a TX complete interrupt */
-void wl1271_tx_complete(struct wl1271 *wl)
+int wlcore_tx_complete(struct wl1271 *wl)
{
- struct wl1271_acx_mem_map *memmap =
- (struct wl1271_acx_mem_map *)wl->target_mem_map;
+ struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
u32 count, fw_counter;
u32 i;
+ int ret;
/* read the tx results from the chipset */
- wl1271_read(wl, le32_to_cpu(memmap->tx_result),
- wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
+ wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ if (ret < 0)
+ goto out;
+
fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
/* write host counter to chipset (to ack) */
- wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter), fw_counter);
+ ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter), fw_counter);
+ if (ret < 0)
+ goto out;
count = fw_counter - wl->tx_results_count;
wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
@@ -904,8 +952,11 @@ void wl1271_tx_complete(struct wl1271 *wl)
wl->tx_results_count++;
}
+
+out:
+ return ret;
}
-EXPORT_SYMBOL(wl1271_tx_complete);
+EXPORT_SYMBOL(wlcore_tx_complete);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
@@ -958,7 +1009,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
@@ -973,15 +1024,12 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
wl->tx_queue_count[i] = 0;
}
- wl->stopped_queues_map = 0;
-
/*
* Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal.
* This call will always wake the TX queues.
*/
- if (reset_tx_queues)
- wl1271_handle_tx_low_watermark(wl);
+ wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < wl->num_tx_desc; i++) {
if (wl->tx_frames[i] == NULL)
@@ -998,7 +1046,8 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
*/
info = IEEE80211_SKB_CB(skb);
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
- if (info->control.hw_key &&
+ if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
+ info->control.hw_key &&
info->control.hw_key->cipher ==
WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -1024,6 +1073,11 @@ void wl1271_tx_flush(struct wl1271 *wl)
int i;
timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
+ /* only one flush should be in progress, for consistent queue state */
+ mutex_lock(&wl->flush_mutex);
+
+ wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+
while (!time_after(jiffies, timeout)) {
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
@@ -1032,7 +1086,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
if ((wl->tx_frames_cnt == 0) &&
(wl1271_tx_total_queue_count(wl) == 0)) {
mutex_unlock(&wl->mutex);
- return;
+ goto out;
}
mutex_unlock(&wl->mutex);
msleep(1);
@@ -1045,7 +1099,12 @@ void wl1271_tx_flush(struct wl1271 *wl)
for (i = 0; i < WL12XX_MAX_LINKS; i++)
wl1271_tx_reset_link_queues(wl, i);
mutex_unlock(&wl->mutex);
+
+out:
+ wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
+ mutex_unlock(&wl->flush_mutex);
}
+EXPORT_SYMBOL_GPL(wl1271_tx_flush);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{
@@ -1054,3 +1113,96 @@ u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
return BIT(__ffs(rate_set));
}
+
+void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ bool stopped = !!wl->queue_stop_reasons[queue];
+
+ /* queue should not be stopped for this reason */
+ WARN_ON(test_and_set_bit(reason, &wl->queue_stop_reasons[queue]));
+
+ if (stopped)
+ return;
+
+ ieee80211_stop_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+}
+
+void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ wlcore_stop_queue_locked(wl, queue, reason);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ /* queue should not be clear for this reason */
+ WARN_ON(!test_and_clear_bit(reason, &wl->queue_stop_reasons[queue]));
+
+ if (wl->queue_stop_reasons[queue])
+ goto out;
+
+ ieee80211_wake_queue(wl->hw, wl1271_tx_get_mac80211_queue(queue));
+
+out:
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+void wlcore_stop_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlcore_stop_queue(wl, i, reason);
+}
+EXPORT_SYMBOL_GPL(wlcore_stop_queues);
+
+void wlcore_wake_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason)
+{
+ int i;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++)
+ wlcore_wake_queue(wl, i, reason);
+}
+EXPORT_SYMBOL_GPL(wlcore_wake_queues);
+
+void wlcore_reset_stopped_queues(struct wl1271 *wl)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wl->wl_lock, flags);
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ if (!wl->queue_stop_reasons[i])
+ continue;
+
+ wl->queue_stop_reasons[i] = 0;
+ ieee80211_wake_queue(wl->hw,
+ wl1271_tx_get_mac80211_queue(i));
+ }
+
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+}
+
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason)
+{
+ return test_bit(reason, &wl->queue_stop_reasons[queue]);
+}
+
+bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue)
+{
+ return !!wl->queue_stop_reasons[queue];
+}
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 2fd6e5dc6f75..1e939b016155 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -85,6 +85,19 @@ struct wl128x_tx_mem {
u8 extra_bytes;
} __packed;
+struct wl18xx_tx_mem {
+ /*
+ * Total number of memory blocks allocated by the host for
+ * this packet.
+ */
+ u8 total_mem_blocks;
+
+ /*
+ * control bits
+ */
+ u8 ctrl;
+} __packed;
+
/*
* On wl128x based devices, when TX packets are aggregated, each packet
* size must be aligned to the SDIO block size. The maximum block size
@@ -100,6 +113,7 @@ struct wl1271_tx_hw_descr {
union {
struct wl127x_tx_mem wl127x_mem;
struct wl128x_tx_mem wl128x_mem;
+ struct wl18xx_tx_mem wl18xx_mem;
} __packed;
/* Device time (in us) when the packet arrived to the driver */
__le32 start_time;
@@ -116,7 +130,16 @@ struct wl1271_tx_hw_descr {
u8 tid;
/* host link ID (HLID) */
u8 hlid;
- u8 reserved;
+
+ union {
+ u8 wl12xx_reserved;
+
+ /*
+ * bit 0 -> 0 = udp, 1 = tcp
+ * bit 1:7 -> IP header offset
+ */
+ u8 wl18xx_checksum_data;
+ } __packed;
} __packed;
enum wl1271_tx_hw_res_status {
@@ -161,6 +184,13 @@ struct wl1271_tx_hw_res_if {
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __packed;
+enum wlcore_queue_stop_reason {
+ WLCORE_QUEUE_STOP_REASON_WATERMARK,
+ WLCORE_QUEUE_STOP_REASON_FW_RESTART,
+ WLCORE_QUEUE_STOP_REASON_FLUSH,
+ WLCORE_QUEUE_STOP_REASON_SPARE_BLK, /* 18xx specific */
+};
+
static inline int wl1271_tx_get_queue(int queue)
{
switch (queue) {
@@ -204,10 +234,10 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
}
void wl1271_tx_work(struct work_struct *work);
-void wl1271_tx_work_locked(struct wl1271 *wl);
-void wl1271_tx_complete(struct wl1271 *wl);
+int wlcore_tx_work_locked(struct wl1271 *wl);
+int wlcore_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
@@ -223,6 +253,21 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length);
+void wl1271_free_tx_id(struct wl1271 *wl, int id);
+void wlcore_stop_queue_locked(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_wake_queue(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_stop_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_wake_queues(struct wl1271 *wl,
+ enum wlcore_queue_stop_reason reason);
+void wlcore_reset_stopped_queues(struct wl1271 *wl);
+bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl, u8 queue,
+ enum wlcore_queue_stop_reason reason);
+bool wlcore_is_queue_stopped(struct wl1271 *wl, u8 queue);
/* from main.c */
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 0b3f0b586f4b..0ce7a8ebbd46 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -24,8 +24,9 @@
#include <linux/platform_device.h>
-#include "wl12xx.h"
+#include "wlcore_i.h"
#include "event.h"
+#include "boot.h"
/* The maximum number of Tx descriptors in all chip families */
#define WLCORE_MAX_TX_DESCRIPTORS 32
@@ -33,14 +34,16 @@
/* forward declaration */
struct wl1271_tx_hw_descr;
enum wl_rx_buf_align;
+struct wl1271_rx_descriptor;
struct wlcore_ops {
int (*identify_chip)(struct wl1271 *wl);
int (*identify_fw)(struct wl1271 *wl);
int (*boot)(struct wl1271 *wl);
- void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
- void *buf, size_t len);
- void (*ack_event)(struct wl1271 *wl);
+ int (*plt_init)(struct wl1271 *wl);
+ int (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
+ void *buf, size_t len);
+ int (*ack_event)(struct wl1271 *wl);
u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
void (*set_tx_desc_blocks)(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
@@ -50,17 +53,34 @@ struct wlcore_ops {
struct sk_buff *skb);
enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
u32 rx_desc);
- void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
+ int (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
u32 data_len);
- void (*tx_delayed_compl)(struct wl1271 *wl);
+ int (*tx_delayed_compl)(struct wl1271 *wl);
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
- s8 (*get_pg_ver)(struct wl1271 *wl);
- void (*get_mac)(struct wl1271 *wl);
+ int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
+ int (*get_mac)(struct wl1271 *wl);
+ void (*set_tx_desc_csum)(struct wl1271 *wl,
+ struct wl1271_tx_hw_descr *desc,
+ struct sk_buff *skb);
+ void (*set_rx_csum)(struct wl1271 *wl,
+ struct wl1271_rx_descriptor *desc,
+ struct sk_buff *skb);
+ u32 (*ap_get_mimo_wide_rate_mask)(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+ int (*debugfs_init)(struct wl1271 *wl, struct dentry *rootdir);
+ int (*handle_static_data)(struct wl1271 *wl,
+ struct wl1271_static_data *static_data);
+ int (*get_spare_blocks)(struct wl1271 *wl, bool is_gem);
+ int (*set_key)(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf);
+ u32 (*pre_pkt_send)(struct wl1271 *wl, u32 buf_offset, u32 last_len);
};
enum wlcore_partitions {
@@ -109,6 +129,15 @@ enum wlcore_registers {
REG_TABLE_LEN,
};
+struct wl1271_stats {
+ void *fw_stats;
+ unsigned long fw_stats_update;
+ size_t fw_stats_len;
+
+ unsigned int retry_count;
+ unsigned int excessive_retries;
+};
+
struct wl1271 {
struct ieee80211_hw *hw;
bool mac80211_registered;
@@ -121,13 +150,14 @@ struct wl1271 {
void (*set_power)(bool enable);
int irq;
- int ref_clock;
spinlock_t wl_lock;
enum wl1271_state state;
enum wl12xx_fw_type fw_type;
bool plt;
+ enum plt_mode plt_mode;
+ u8 fem_manuf;
u8 last_vif_count;
struct mutex mutex;
@@ -186,7 +216,7 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
int tx_queue_count[NUM_TX_QUEUES];
- long stopped_queues_map;
+ unsigned long queue_stop_reasons[NUM_TX_QUEUES];
/* Frames received, not handled yet by mac80211 */
struct sk_buff_head deferred_rx_queue;
@@ -205,9 +235,6 @@ struct wl1271 {
/* FW Rx counter */
u32 rx_counter;
- /* Rx memory pool address */
- struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
-
/* Intermediate buffer, used for packet aggregation */
u8 *aggr_buf;
@@ -228,6 +255,7 @@ struct wl1271 {
/* Hardware recovery work */
struct work_struct recovery_work;
+ bool watchdog_recovery;
/* Pointer that holds DMA-friendly block for the mailbox */
struct event_mailbox *mbox;
@@ -263,7 +291,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status *fw_status;
+ struct wl_fw_status_1 *fw_status_1;
+ struct wl_fw_status_2 *fw_status_2;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -277,9 +306,7 @@ struct wl1271 {
s8 noise;
/* bands supported by this instance of wl12xx */
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- int tcxo_clock;
+ struct ieee80211_supported_band bands[WLCORE_NUM_BANDS];
/*
* wowlan trigger was configured during suspend.
@@ -333,10 +360,8 @@ struct wl1271 {
/* number of TX descriptors the HW supports. */
u32 num_tx_desc;
-
- /* spare Tx blocks for normal/GEM operating modes */
- u32 normal_tx_spare;
- u32 gem_tx_spare;
+ /* number of RX descriptors the HW supports. */
+ u32 num_rx_desc;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -348,19 +373,57 @@ struct wl1271 {
u8 hw_min_ht_rate;
/* HW HT (11n) capabilities */
- struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+
+ /* size of the private static data */
+ size_t static_data_priv_len;
+
+ /* the current channel type */
+ enum nl80211_channel_type channel_type;
+
+ /* mutex for protecting the tx_flush function */
+ struct mutex flush_mutex;
+
+ /* sleep auth value currently configured to FW */
+ int sleep_auth;
+
+ /* the minimum FW version required for the driver to work */
+ unsigned int min_fw_ver[NUM_FW_VER];
};
int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
int __devexit wlcore_remove(struct platform_device *pdev);
struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
int wlcore_free_hw(struct wl1271 *wl);
+int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key_conf);
+
+static inline void
+wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
+}
+
+static inline void
+wlcore_set_min_fw_ver(struct wl1271 *wl, unsigned int chip,
+ unsigned int iftype, unsigned int major,
+ unsigned int subtype, unsigned int minor)
+{
+ wl->min_fw_ver[FW_VER_CHIP] = chip;
+ wl->min_fw_ver[FW_VER_IF_TYPE] = iftype;
+ wl->min_fw_ver[FW_VER_MAJOR] = major;
+ wl->min_fw_ver[FW_VER_SUBTYPE] = subtype;
+ wl->min_fw_ver[FW_VER_MINOR] = minor;
+}
/* Firmware image load chunk size */
#define CHUNK_SIZE 16384
@@ -385,6 +448,18 @@ int wlcore_free_hw(struct wl1271 *wl);
/* Some firmwares may not support ELP */
#define WLCORE_QUIRK_NO_ELP BIT(6)
+/* pad only the last frame in the aggregate buffer */
+#define WLCORE_QUIRK_TX_PAD_LAST_FRAME BIT(7)
+
+/* extra header space is required for TKIP */
+#define WLCORE_QUIRK_TKIP_HEADER_SPACE BIT(8)
+
+/* Some firmwares not support sched scans while connected */
+#define WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN BIT(9)
+
+/* separate probe response templates for one-shot and sched scans */
+#define WLCORE_QUIRK_DUAL_PROBE_TMPL BIT(10)
+
/* TODO: move to the lower drivers when all usages are abstracted */
#define CHIP_ID_1271_PG10 (0x4030101)
#define CHIP_ID_1271_PG20 (0x4030111)
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index f12bdf745180..c0505635bb00 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -22,8 +22,8 @@
*
*/
-#ifndef __WL12XX_H__
-#define __WL12XX_H__
+#ifndef __WLCORE_I_H__
+#define __WLCORE_I_H__
#include <linux/mutex.h>
#include <linux/completion.h>
@@ -35,15 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
-#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
-
-#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
-#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
-
-#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
-#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
-
/*
* wl127x and wl128x are using the same NVS file name. However, the
* ini parameters between them are different. The driver validates
@@ -71,6 +62,9 @@
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/* the driver supports the 2.4Ghz and 5Ghz bands */
+#define WLCORE_NUM_BANDS 2
+
#define WL12XX_MAX_RATE_POLICIES 16
/* Defined by FW as 0. Will not be freed or allocated. */
@@ -89,7 +83,7 @@
#define WL1271_AP_BSS_INDEX 0
#define WL1271_AP_DEF_BEACON_EXP 20
-#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+#define WL1271_AGGR_BUFFER_SIZE (5 * PAGE_SIZE)
enum wl1271_state {
WL1271_STATE_OFF,
@@ -132,16 +126,7 @@ struct wl1271_chip {
unsigned int fw_ver[NUM_FW_VER];
};
-struct wl1271_stats {
- struct acx_statistics *fw_stats;
- unsigned long fw_stats_update;
-
- unsigned int retry_count;
- unsigned int excessive_retries;
-};
-
#define NUM_TX_QUEUES 4
-#define NUM_RX_PKT_DESC 8
#define AP_MAX_STATIONS 8
@@ -159,13 +144,26 @@ struct wl_fw_packet_counters {
} __packed;
/* FW status registers */
-struct wl_fw_status {
+struct wl_fw_status_1 {
__le32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
+ __le32 rx_pkt_descs[0];
+} __packed;
+
+/*
+ * Each HW arch has a different number of Rx descriptors.
+ * The length of the status depends on it, since it holds an array
+ * of descriptors.
+ */
+#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
+ (sizeof(struct wl_fw_status_1) + \
+ (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
+ num_rx_desc)
+
+struct wl_fw_status_2 {
__le32 fw_localtime;
/*
@@ -194,11 +192,6 @@ struct wl_fw_status {
u8 priv[0];
} __packed;
-struct wl1271_rx_mem_pool_addr {
- u32 addr;
- u32 addr_extra;
-};
-
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
struct cfg80211_scan_request *req;
@@ -210,10 +203,10 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct device *child, int addr, void *buf, size_t len,
- bool fixed);
- void (*write)(struct device *child, int addr, void *buf, size_t len,
- bool fixed);
+ int __must_check (*read)(struct device *child, int addr, void *buf,
+ size_t len, bool fixed);
+ int __must_check (*write)(struct device *child, int addr, void *buf,
+ size_t len, bool fixed);
void (*reset)(struct device *child);
void (*init)(struct device *child);
int (*power)(struct device *child, bool enable);
@@ -248,6 +241,7 @@ enum wl12xx_flags {
WL1271_FLAG_RECOVERY_IN_PROGRESS,
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
+ WL1271_FLAG_IO_FAILED,
};
enum wl12xx_vif_flags {
@@ -299,6 +293,12 @@ enum rx_filter_action {
FILTER_FW_HANDLE = 2
};
+enum plt_mode {
+ PLT_OFF = 0,
+ PLT_ON = 1,
+ PLT_FEM_DETECT = 2,
+};
+
struct wl12xx_rx_filter_field {
__le16 offset;
u8 len;
@@ -367,8 +367,9 @@ struct wl12xx_vif {
/* The current band */
enum ieee80211_band band;
int channel;
+ enum nl80211_channel_type channel_type;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 bitrate_masks[WLCORE_NUM_BANDS];
u32 basic_rate_set;
/*
@@ -417,9 +418,6 @@ struct wl12xx_vif {
struct work_struct rx_streaming_disable_work;
struct timer_list rx_streaming_timer;
- /* does the current role use GEM for encryption (AP or STA) */
- bool is_gem;
-
/*
* This struct must be last!
* data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -467,7 +465,7 @@ struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
-int wl1271_plt_start(struct wl1271 *wl);
+int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode);
int wl1271_plt_stop(struct wl1271 *wl);
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
@@ -501,7 +499,8 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
/* Macros to handle wl1271.sta_rate_set */
#define HW_BG_RATES_MASK 0xffff
#define HW_HT_RATES_OFFSET 16
+#define HW_MIMO_RATES_OFFSET 24
#define WL12XX_HW_BLOCK_SIZE 256
-#endif
+#endif /* __WLCORE_I_H__ */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 117c4123943c..7ab922209b25 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -827,7 +827,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values,
static inline int zd_ioread32_locked(struct zd_chip *chip, u32 *value,
const zd_addr_t addr)
{
- return zd_ioread32v_locked(chip, value, (const zd_addr_t *)&addr, 1);
+ return zd_ioread32v_locked(chip, value, &addr, 1);
}
static inline int zd_iowrite16_locked(struct zd_chip *chip, u16 value,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.h b/drivers/net/wireless/zd1211rw/zd_usb.h
index 99193b456a79..45e3bb28a01c 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.h
+++ b/drivers/net/wireless/zd1211rw/zd_usb.h
@@ -274,7 +274,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
static inline int zd_usb_ioread16(struct zd_usb *usb, u16 *value,
const zd_addr_t addr)
{
- return zd_usb_ioread16v(usb, value, (const zd_addr_t *)&addr, 1);
+ return zd_usb_ioread16v(usb, value, &addr, 1);
}
void zd_usb_iowrite16v_async_start(struct zd_usb *usb);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f4a6fcaeffb1..682633bfe00f 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1363,8 +1363,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
INVALID_PENDING_IDX);
}
- __skb_queue_tail(&netbk->tx_queue, skb);
-
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
@@ -1376,6 +1374,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
}
gop = request_gop;
+ __skb_queue_tail(&netbk->tx_queue, skb);
+
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
index 1f74a77d040d..e7fd4938f9bc 100644
--- a/drivers/nfc/nfcwilink.c
+++ b/drivers/nfc/nfcwilink.c
@@ -535,9 +535,10 @@ static int nfcwilink_probe(struct platform_device *pdev)
drv->pdev = pdev;
protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
- | NFC_PROTO_ISO14443_MASK
- | NFC_PROTO_NFC_DEP_MASK;
+ | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
+ | NFC_PROTO_ISO14443_MASK
+ | NFC_PROTO_ISO14443_B_MASK
+ | NFC_PROTO_NFC_DEP_MASK;
drv->ndev = nci_allocate_device(&nfcwilink_ops,
protocols,
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 19110f0eb15f..d606f52fec84 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -38,13 +38,51 @@
#define SCM_VENDOR_ID 0x4E6
#define SCL3711_PRODUCT_ID 0x5591
+#define SONY_VENDOR_ID 0x054c
+#define PASORI_PRODUCT_ID 0x02e1
+
+#define PN533_QUIRKS_TYPE_A BIT(0)
+#define PN533_QUIRKS_TYPE_F BIT(1)
+#define PN533_QUIRKS_DEP BIT(2)
+#define PN533_QUIRKS_RAW_EXCHANGE BIT(3)
+
+#define PN533_DEVICE_STD 0x1
+#define PN533_DEVICE_PASORI 0x2
+
+#define PN533_ALL_PROTOCOLS (NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK |\
+ NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK |\
+ NFC_PROTO_NFC_DEP_MASK |\
+ NFC_PROTO_ISO14443_B_MASK)
+
+#define PN533_NO_TYPE_B_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \
+ NFC_PROTO_MIFARE_MASK | \
+ NFC_PROTO_FELICA_MASK | \
+ NFC_PROTO_ISO14443_MASK | \
+ NFC_PROTO_NFC_DEP_MASK)
+
static const struct usb_device_id pn533_table[] = {
- { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID) },
- { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID) },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = PN533_VENDOR_ID,
+ .idProduct = PN533_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_STD,
+ },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SCM_VENDOR_ID,
+ .idProduct = SCL3711_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_STD,
+ },
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = SONY_VENDOR_ID,
+ .idProduct = PASORI_PRODUCT_ID,
+ .driver_info = PN533_DEVICE_PASORI,
+ },
{ }
};
MODULE_DEVICE_TABLE(usb, pn533_table);
+/* How much time we spend listening for initiators */
+#define PN533_LISTEN_TIME 2
+
/* frame definitions */
#define PN533_FRAME_TAIL_SIZE 2
#define PN533_FRAME_SIZE(f) (sizeof(struct pn533_frame) + f->datalen + \
@@ -69,11 +107,16 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
#define PN533_CMD_RF_CONFIGURATION 0x32
#define PN533_CMD_IN_DATA_EXCHANGE 0x40
+#define PN533_CMD_IN_COMM_THRU 0x42
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
+#define PN533_CMD_TG_INIT_AS_TARGET 0x8c
+#define PN533_CMD_TG_GET_DATA 0x86
+#define PN533_CMD_TG_SET_DATA 0x8e
+
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
/* PN533 Return codes */
@@ -81,6 +124,9 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_MI_MASK 0x40
#define PN533_CMD_RET_SUCCESS 0x00
+/* PN533 status codes */
+#define PN533_STATUS_TARGET_RELEASED 0x29
+
struct pn533;
typedef int (*pn533_cmd_complete_t) (struct pn533 *dev, void *arg,
@@ -97,7 +143,14 @@ struct pn533_fw_version {
};
/* PN533_CMD_RF_CONFIGURATION */
+#define PN533_CFGITEM_TIMING 0x02
#define PN533_CFGITEM_MAX_RETRIES 0x05
+#define PN533_CFGITEM_PASORI 0x82
+
+#define PN533_CONFIG_TIMING_102 0xb
+#define PN533_CONFIG_TIMING_204 0xc
+#define PN533_CONFIG_TIMING_409 0xd
+#define PN533_CONFIG_TIMING_819 0xe
#define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00
#define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF
@@ -108,6 +161,12 @@ struct pn533_config_max_retries {
u8 mx_rty_passive_act;
} __packed;
+struct pn533_config_timing {
+ u8 rfu;
+ u8 atr_res_timeout;
+ u8 dep_timeout;
+} __packed;
+
/* PN533_CMD_IN_LIST_PASSIVE_TARGET */
/* felica commands opcode */
@@ -144,6 +203,7 @@ enum {
PN533_POLL_MOD_424KBPS_FELICA,
PN533_POLL_MOD_106KBPS_JEWEL,
PN533_POLL_MOD_847KBPS_B,
+ PN533_LISTEN_MOD,
__PN533_POLL_MOD_AFTER_LAST,
};
@@ -211,6 +271,9 @@ const struct pn533_poll_modulations poll_mod[] = {
},
.len = 3,
},
+ [PN533_LISTEN_MOD] = {
+ .len = 0,
+ },
};
/* PN533_CMD_IN_ATR */
@@ -237,7 +300,7 @@ struct pn533_cmd_jump_dep {
u8 active;
u8 baud;
u8 next;
- u8 gt[];
+ u8 data[];
} __packed;
struct pn533_cmd_jump_dep_response {
@@ -253,6 +316,29 @@ struct pn533_cmd_jump_dep_response {
u8 gt[];
} __packed;
+
+/* PN533_TG_INIT_AS_TARGET */
+#define PN533_INIT_TARGET_PASSIVE 0x1
+#define PN533_INIT_TARGET_DEP 0x2
+
+#define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3
+#define PN533_INIT_TARGET_RESP_ACTIVE 0x1
+#define PN533_INIT_TARGET_RESP_DEP 0x4
+
+struct pn533_cmd_init_target {
+ u8 mode;
+ u8 mifare[6];
+ u8 felica[18];
+ u8 nfcid3[10];
+ u8 gb_len;
+ u8 gb[];
+} __packed;
+
+struct pn533_cmd_init_target_response {
+ u8 mode;
+ u8 cmd[];
+} __packed;
+
struct pn533 {
struct usb_device *udev;
struct usb_interface *interface;
@@ -270,22 +356,33 @@ struct pn533 {
struct workqueue_struct *wq;
struct work_struct cmd_work;
+ struct work_struct poll_work;
struct work_struct mi_work;
+ struct work_struct tg_work;
+ struct timer_list listen_timer;
struct pn533_frame *wq_in_frame;
int wq_in_error;
+ int cancel_listen;
pn533_cmd_complete_t cmd_complete;
void *cmd_complete_arg;
- struct semaphore cmd_lock;
+ struct mutex cmd_lock;
u8 cmd;
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
u8 poll_mod_curr;
u32 poll_protocols;
+ u32 listen_protocols;
+
+ u8 *gb;
+ size_t gb_len;
u8 tgt_available_prots;
u8 tgt_active_prot;
+ u8 tgt_mode;
+
+ u32 device_type;
};
struct pn533_frame {
@@ -405,7 +502,7 @@ static void pn533_wq_cmd_complete(struct work_struct *work)
PN533_FRAME_CMD_PARAMS_LEN(in_frame));
if (rc != -EINPROGRESS)
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
}
static void pn533_recv_response(struct urb *urb)
@@ -583,7 +680,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (down_trylock(&dev->cmd_lock))
+ if (!mutex_trylock(&dev->cmd_lock))
return -EBUSY;
rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
@@ -593,7 +690,7 @@ static int pn533_send_cmd_frame_async(struct pn533 *dev,
return 0;
error:
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
return rc;
}
@@ -892,7 +989,7 @@ static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data,
if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len))
return -EPROTO;
- nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
return 0;
}
@@ -963,6 +1060,11 @@ static int pn533_target_found(struct pn533 *dev,
return 0;
}
+static inline void pn533_poll_next_mod(struct pn533 *dev)
+{
+ dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+}
+
static void pn533_poll_reset_mod_list(struct pn533 *dev)
{
dev->poll_mod_count = 0;
@@ -975,102 +1077,283 @@ static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index)
dev->poll_mod_count++;
}
-static void pn533_poll_create_mod_list(struct pn533 *dev, u32 protocols)
+static void pn533_poll_create_mod_list(struct pn533 *dev,
+ u32 im_protocols, u32 tm_protocols)
{
pn533_poll_reset_mod_list(dev);
- if (protocols & NFC_PROTO_MIFARE_MASK
- || protocols & NFC_PROTO_ISO14443_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK)
+ if (im_protocols & NFC_PROTO_MIFARE_MASK
+ || im_protocols & NFC_PROTO_ISO14443_MASK
+ || im_protocols & NFC_PROTO_NFC_DEP_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A);
- if (protocols & NFC_PROTO_FELICA_MASK
- || protocols & NFC_PROTO_NFC_DEP_MASK) {
+ if (im_protocols & NFC_PROTO_FELICA_MASK
+ || im_protocols & NFC_PROTO_NFC_DEP_MASK) {
pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA);
pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA);
}
- if (protocols & NFC_PROTO_JEWEL_MASK)
+ if (im_protocols & NFC_PROTO_JEWEL_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL);
- if (protocols & NFC_PROTO_ISO14443_MASK)
+ if (im_protocols & NFC_PROTO_ISO14443_B_MASK)
pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B);
+
+ if (tm_protocols)
+ pn533_poll_add_mod(dev, PN533_LISTEN_MOD);
}
-static void pn533_start_poll_frame(struct pn533_frame *frame,
- struct pn533_poll_modulations *mod)
+static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
{
+ struct pn533_poll_response *resp;
+ int rc;
- pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
- frame->datalen += mod->len;
+ resp = (struct pn533_poll_response *) params;
+ if (resp->nbtg) {
+ rc = pn533_target_found(dev, resp, params_len);
+
+ /* We must stop the poll after a valid target found */
+ if (rc == 0) {
+ pn533_poll_reset_mod_list(dev);
+ return 0;
+ }
+ }
+
+ return -EAGAIN;
+}
+
+static int pn533_init_target_frame(struct pn533_frame *frame,
+ u8 *gb, size_t gb_len)
+{
+ struct pn533_cmd_init_target *cmd;
+ size_t cmd_len;
+ u8 felica_params[18] = {0x1, 0xfe, /* DEP */
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xff, 0xff}; /* System code */
+ u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */
+ 0x0, 0x0, 0x0,
+ 0x40}; /* SEL_RES for DEP */
+
+ cmd_len = sizeof(struct pn533_cmd_init_target) + gb_len + 1;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(frame, PN533_CMD_TG_INIT_AS_TARGET);
+
+ /* DEP support only */
+ cmd->mode |= PN533_INIT_TARGET_DEP;
+
+ /* Felica params */
+ memcpy(cmd->felica, felica_params, 18);
+ get_random_bytes(cmd->felica + 2, 6);
+
+ /* NFCID3 */
+ memset(cmd->nfcid3, 0, 10);
+ memcpy(cmd->nfcid3, cmd->felica, 8);
+
+ /* MIFARE params */
+ memcpy(cmd->mifare, mifare_params, 6);
+
+ /* General bytes */
+ cmd->gb_len = gb_len;
+ memcpy(cmd->gb, gb, gb_len);
+
+ /* Len Tk */
+ cmd->gb[gb_len] = 0;
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), cmd, cmd_len);
+
+ frame->datalen += cmd_len;
pn533_tx_frame_finish(frame);
+
+ kfree(cmd);
+
+ return 0;
}
-static int pn533_start_poll_complete(struct pn533 *dev, void *arg,
- u8 *params, int params_len)
+#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
{
- struct pn533_poll_response *resp;
- struct pn533_poll_modulations *next_mod;
- int rc;
+ struct sk_buff *skb_resp = arg;
+ struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (params_len == -ENOENT) {
- nfc_dev_dbg(&dev->interface->dev, "Polling operation has been"
- " stopped");
- goto stop_poll;
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when starting as a target",
+ params_len);
+
+ return params_len;
+ }
+
+ if (params_len > 0 && params[0] != 0) {
+ nfc_tm_deactivated(dev->nfc_dev);
+
+ dev->tgt_mode = 0;
+
+ kfree_skb(skb_resp);
+ return 0;
}
+ skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
+ skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
+ skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
+
+ return nfc_tm_data_received(dev->nfc_dev, skb_resp);
+}
+
+static void pn533_wq_tg_get_data(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, tg_work);
+ struct pn533_frame *in_frame;
+ struct sk_buff *skb_resp;
+ size_t skb_resp_len;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
+ PN533_CMD_DATAEXCH_DATA_MAXLEN +
+ PN533_FRAME_TAIL_SIZE;
+
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
+ if (!skb_resp)
+ return;
+
+ in_frame = (struct pn533_frame *)skb_resp->data;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_TG_GET_DATA);
+ pn533_tx_frame_finish(dev->out_frame);
+
+ pn533_send_cmd_frame_async(dev, dev->out_frame, in_frame,
+ skb_resp_len,
+ pn533_tm_get_data_complete,
+ skb_resp, GFP_KERNEL);
+
+ return;
+}
+
+#define ATR_REQ_GB_OFFSET 17
+static int pn533_init_target_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_init_target_response *resp;
+ u8 frame, comm_mode = NFC_COMM_PASSIVE, *gb;
+ size_t gb_len;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
if (params_len < 0) {
- nfc_dev_err(&dev->interface->dev, "Error %d when running poll",
- params_len);
- goto stop_poll;
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when starting as a target",
+ params_len);
+
+ return params_len;
}
- resp = (struct pn533_poll_response *) params;
- if (resp->nbtg) {
- rc = pn533_target_found(dev, resp, params_len);
+ if (params_len < ATR_REQ_GB_OFFSET + 1)
+ return -EINVAL;
- /* We must stop the poll after a valid target found */
- if (rc == 0)
- goto stop_poll;
+ resp = (struct pn533_cmd_init_target_response *) params;
+
+ nfc_dev_dbg(&dev->interface->dev, "Target mode 0x%x param len %d\n",
+ resp->mode, params_len);
+
+ frame = resp->mode & PN533_INIT_TARGET_RESP_FRAME_MASK;
+ if (frame == PN533_INIT_TARGET_RESP_ACTIVE)
+ comm_mode = NFC_COMM_ACTIVE;
- if (rc != -EAGAIN)
- nfc_dev_err(&dev->interface->dev, "The target found is"
- " not valid - continuing to poll");
+ /* Again, only DEP */
+ if ((resp->mode & PN533_INIT_TARGET_RESP_DEP) == 0)
+ return -EOPNOTSUPP;
+
+ gb = resp->cmd + ATR_REQ_GB_OFFSET;
+ gb_len = params_len - (ATR_REQ_GB_OFFSET + 1);
+
+ rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK,
+ comm_mode, gb, gb_len);
+ if (rc < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error when signaling target activation");
+ return rc;
}
- dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count;
+ dev->tgt_mode = 1;
- next_mod = dev->poll_mod_active[dev->poll_mod_curr];
+ queue_work(dev->wq, &dev->tg_work);
- nfc_dev_dbg(&dev->interface->dev, "Polling next modulation (0x%x)",
- dev->poll_mod_curr);
+ return 0;
+}
- pn533_start_poll_frame(dev->out_frame, next_mod);
+static void pn533_listen_mode_timer(unsigned long data)
+{
+ struct pn533 *dev = (struct pn533 *) data;
- /* Don't need to down the semaphore again */
- rc = __pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_start_poll_complete,
- NULL, GFP_ATOMIC);
+ nfc_dev_dbg(&dev->interface->dev, "Listen mode timeout");
+
+ /* An ack will cancel the last issued command (poll) */
+ pn533_send_ack(dev, GFP_ATOMIC);
+
+ dev->cancel_listen = 1;
+
+ mutex_unlock(&dev->cmd_lock);
+
+ pn533_poll_next_mod(dev);
+
+ queue_work(dev->wq, &dev->poll_work);
+}
+
+static int pn533_poll_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (params_len == -ENOENT) {
+ if (dev->poll_mod_count != 0)
+ return 0;
+
+ nfc_dev_err(&dev->interface->dev,
+ "Polling operation has been stopped");
- if (rc == -EPERM) {
- nfc_dev_dbg(&dev->interface->dev, "Cannot poll next modulation"
- " because poll has been stopped");
goto stop_poll;
}
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to poll"
- " next modulation", rc);
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when running poll", params_len);
+
goto stop_poll;
}
- /* Inform caller function to do not up the semaphore */
- return -EINPROGRESS;
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ if (cur_mod->len == 0) {
+ del_timer(&dev->listen_timer);
+
+ return pn533_init_target_complete(dev, arg, params, params_len);
+ } else {
+ rc = pn533_start_poll_complete(dev, arg, params, params_len);
+ if (!rc)
+ return rc;
+ }
+
+ pn533_poll_next_mod(dev);
+
+ queue_work(dev->wq, &dev->poll_work);
+
+ return 0;
stop_poll:
pn533_poll_reset_mod_list(dev);
@@ -1078,61 +1361,104 @@ stop_poll:
return 0;
}
-static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+static void pn533_build_poll_frame(struct pn533 *dev,
+ struct pn533_frame *frame,
+ struct pn533_poll_modulations *mod)
{
- struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- struct pn533_poll_modulations *start_mod;
- int rc;
+ nfc_dev_dbg(&dev->interface->dev, "mod len %d\n", mod->len);
- nfc_dev_dbg(&dev->interface->dev, "%s - protocols=0x%x", __func__,
- protocols);
+ if (mod->len == 0) {
+ /* Listen mode */
+ pn533_init_target_frame(frame, dev->gb, dev->gb_len);
+ } else {
+ /* Polling mode */
+ pn533_tx_frame_init(frame, PN533_CMD_IN_LIST_PASSIVE_TARGET);
- if (dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev, "Polling operation already"
- " active");
- return -EBUSY;
- }
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(frame), &mod->data, mod->len);
+ frame->datalen += mod->len;
- if (dev->tgt_active_prot) {
- nfc_dev_err(&dev->interface->dev, "Cannot poll with a target"
- " already activated");
- return -EBUSY;
+ pn533_tx_frame_finish(frame);
}
+}
+
+static int pn533_send_poll_frame(struct pn533 *dev)
+{
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
- pn533_poll_create_mod_list(dev, protocols);
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
- if (!dev->poll_mod_count) {
- nfc_dev_err(&dev->interface->dev, "No valid protocols"
- " specified");
- rc = -EINVAL;
- goto error;
+ pn533_build_poll_frame(dev, dev->out_frame, cur_mod);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_poll_complete,
+ NULL, GFP_KERNEL);
+ if (rc)
+ nfc_dev_err(&dev->interface->dev, "Polling loop error %d", rc);
+
+ return rc;
+}
+
+static void pn533_wq_poll(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, poll_work);
+ struct pn533_poll_modulations *cur_mod;
+ int rc;
+
+ cur_mod = dev->poll_mod_active[dev->poll_mod_curr];
+
+ nfc_dev_dbg(&dev->interface->dev,
+ "%s cancel_listen %d modulation len %d",
+ __func__, dev->cancel_listen, cur_mod->len);
+
+ if (dev->cancel_listen == 1) {
+ dev->cancel_listen = 0;
+ usb_kill_urb(dev->in_urb);
}
- nfc_dev_dbg(&dev->interface->dev, "It will poll %d modulations types",
- dev->poll_mod_count);
+ rc = pn533_send_poll_frame(dev);
+ if (rc)
+ return;
- dev->poll_mod_curr = 0;
- start_mod = dev->poll_mod_active[dev->poll_mod_curr];
+ if (cur_mod->len == 0 && dev->poll_mod_count > 1)
+ mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ);
- pn533_start_poll_frame(dev->out_frame, start_mod);
+ return;
+}
- rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
- dev->in_maxlen, pn533_start_poll_complete,
- NULL, GFP_KERNEL);
+static int pn533_start_poll(struct nfc_dev *nfc_dev,
+ u32 im_protocols, u32 tm_protocols)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
- " start poll", rc);
- goto error;
+ nfc_dev_dbg(&dev->interface->dev,
+ "%s: im protocols 0x%x tm protocols 0x%x",
+ __func__, im_protocols, tm_protocols);
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot poll with a target already activated");
+ return -EBUSY;
}
- dev->poll_protocols = protocols;
+ if (dev->tgt_mode) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot poll while already being activated");
+ return -EBUSY;
+ }
- return 0;
+ if (tm_protocols) {
+ dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
+ if (dev->gb == NULL)
+ tm_protocols = 0;
+ }
-error:
- pn533_poll_reset_mod_list(dev);
- return rc;
+ dev->poll_mod_curr = 0;
+ pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
+ dev->poll_protocols = im_protocols;
+ dev->listen_protocols = tm_protocols;
+
+ return pn533_send_poll_frame(dev);
}
static void pn533_stop_poll(struct nfc_dev *nfc_dev)
@@ -1141,6 +1467,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+ del_timer(&dev->listen_timer);
+
if (!dev->poll_mod_count) {
nfc_dev_dbg(&dev->interface->dev, "Polling operation was not"
" running");
@@ -1152,6 +1480,8 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
/* prevent pn533_start_poll_complete to issue a new poll meanwhile */
usb_kill_urb(dev->in_urb);
+
+ pn533_poll_reset_mod_list(dev);
}
static int pn533_activate_target_nfcdep(struct pn533 *dev)
@@ -1349,13 +1679,29 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
return 0;
}
+static int pn533_mod_to_baud(struct pn533 *dev)
+{
+ switch (dev->poll_mod_curr) {
+ case PN533_POLL_MOD_106KBPS_A:
+ return 0;
+ case PN533_POLL_MOD_212KBPS_FELICA:
+ return 1;
+ case PN533_POLL_MOD_424KBPS_FELICA:
+ return 2;
+ default:
+ return -EINVAL;
+ }
+}
+
+#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8* gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_cmd_jump_dep *cmd;
- u8 cmd_len;
- int rc;
+ u8 cmd_len, *data_ptr;
+ u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
+ int rc, baud;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1371,7 +1717,17 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
return -EBUSY;
}
+ baud = pn533_mod_to_baud(dev);
+ if (baud < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Invalid curr modulation %d", dev->poll_mod_curr);
+ return baud;
+ }
+
cmd_len = sizeof(struct pn533_cmd_jump_dep) + gb_len;
+ if (comm_mode == NFC_COMM_PASSIVE)
+ cmd_len += PASSIVE_DATA_LEN;
+
cmd = kzalloc(cmd_len, GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
@@ -1379,10 +1735,18 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
cmd->active = !comm_mode;
- cmd->baud = 0;
+ cmd->next = 0;
+ cmd->baud = baud;
+ data_ptr = cmd->data;
+ if (comm_mode == NFC_COMM_PASSIVE && cmd->baud > 0) {
+ memcpy(data_ptr, passive_data, PASSIVE_DATA_LEN);
+ cmd->next |= 1;
+ data_ptr += PASSIVE_DATA_LEN;
+ }
+
if (gb != NULL && gb_len > 0) {
- cmd->next = 4; /* We have some Gi */
- memcpy(cmd->gt, gb, gb_len);
+ cmd->next |= 4; /* We have some Gi */
+ memcpy(data_ptr, gb, gb_len);
} else {
cmd->next = 0;
}
@@ -1407,15 +1771,25 @@ out:
static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
- pn533_deactivate_target(nfc_dev, 0);
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+
+ pn533_poll_reset_mod_list(dev);
+
+ if (dev->tgt_mode || dev->tgt_active_prot) {
+ pn533_send_ack(dev, GFP_KERNEL);
+ usb_kill_urb(dev->in_urb);
+ }
+
+ dev->tgt_active_prot = 0;
+ dev->tgt_mode = 0;
+
+ skb_queue_purge(&dev->resp_q);
return 0;
}
-#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
-#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
-
-static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
+static int pn533_build_tx_frame(struct pn533 *dev, struct sk_buff *skb,
+ bool target)
{
int payload_len = skb->len;
struct pn533_frame *out_frame;
@@ -1432,14 +1806,37 @@ static int pn533_data_exchange_tx_frame(struct pn533 *dev, struct sk_buff *skb)
return -ENOSYS;
}
- skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
- out_frame = (struct pn533_frame *) skb->data;
+ if (target == true) {
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot == NFC_PROTO_FELICA) {
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame,
+ PN533_CMD_IN_COMM_THRU);
+
+ break;
+ }
+
+ default:
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame,
+ PN533_CMD_IN_DATA_EXCHANGE);
+ tg = 1;
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame),
+ &tg, sizeof(u8));
+ out_frame->datalen += sizeof(u8);
+
+ break;
+ }
- pn533_tx_frame_init(out_frame, PN533_CMD_IN_DATA_EXCHANGE);
+ } else {
+ skb_push(skb, PN533_CMD_DATAEXCH_HEAD_LEN - 1);
+ out_frame = (struct pn533_frame *) skb->data;
+ pn533_tx_frame_init(out_frame, PN533_CMD_TG_SET_DATA);
+ }
- tg = 1;
- memcpy(PN533_FRAME_CMD_PARAMS_PTR(out_frame), &tg, sizeof(u8));
- out_frame->datalen += sizeof(u8);
/* The data is already in the out_frame, just update the datalen */
out_frame->datalen += payload_len;
@@ -1550,9 +1947,9 @@ error:
return 0;
}
-static int pn533_data_exchange(struct nfc_dev *nfc_dev,
- struct nfc_target *target, struct sk_buff *skb,
- data_exchange_cb_t cb, void *cb_context)
+static int pn533_transceive(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct pn533_frame *out_frame, *in_frame;
@@ -1570,7 +1967,7 @@ static int pn533_data_exchange(struct nfc_dev *nfc_dev,
goto error;
}
- rc = pn533_data_exchange_tx_frame(dev, skb);
+ rc = pn533_build_tx_frame(dev, skb, true);
if (rc)
goto error;
@@ -1618,6 +2015,63 @@ error:
return rc;
}
+static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when sending data",
+ params_len);
+
+ return params_len;
+ }
+
+ if (params_len > 0 && params[0] != 0) {
+ nfc_tm_deactivated(dev->nfc_dev);
+
+ dev->tgt_mode = 0;
+
+ return 0;
+ }
+
+ queue_work(dev->wq, &dev->tg_work);
+
+ return 0;
+}
+
+static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_frame *out_frame;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ rc = pn533_build_tx_frame(dev, skb, false);
+ if (rc)
+ goto error;
+
+ out_frame = (struct pn533_frame *) skb->data;
+
+ rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_tm_send_complete,
+ NULL, GFP_KERNEL);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when trying to send data", rc);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ kfree_skb(skb);
+
+ return rc;
+}
+
static void pn533_wq_mi_recv(struct work_struct *work)
{
struct pn533 *dev = container_of(work, struct pn533, mi_work);
@@ -1638,7 +2092,7 @@ static void pn533_wq_mi_recv(struct work_struct *work)
skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
- rc = pn533_data_exchange_tx_frame(dev, skb_cmd);
+ rc = pn533_build_tx_frame(dev, skb_cmd, true);
if (rc)
goto error_frame;
@@ -1677,7 +2131,7 @@ error_cmd:
kfree(arg);
- up(&dev->cmd_lock);
+ mutex_unlock(&dev->cmd_lock);
}
static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
@@ -1703,7 +2157,28 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
return rc;
}
-struct nfc_ops pn533_nfc_ops = {
+static int pn533_fw_reset(struct pn533 *dev)
+{
+ int rc;
+ u8 *params;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ pn533_tx_frame_init(dev->out_frame, 0x18);
+
+ params = PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame);
+ params[0] = 0x1;
+ dev->out_frame->datalen += 1;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen);
+
+ return rc;
+}
+
+static struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
.dep_link_up = pn533_dep_link_up,
@@ -1712,9 +2187,88 @@ struct nfc_ops pn533_nfc_ops = {
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
.deactivate_target = pn533_deactivate_target,
- .data_exchange = pn533_data_exchange,
+ .im_transceive = pn533_transceive,
+ .tm_send = pn533_tm_send,
};
+static int pn533_setup(struct pn533 *dev)
+{
+ struct pn533_config_max_retries max_retries;
+ struct pn533_config_timing timing;
+ u8 pasori_cfg[3] = {0x08, 0x01, 0x08};
+ int rc;
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
+ max_retries.mx_rty_psl = 2;
+ max_retries.mx_rty_passive_act =
+ PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+ timing.rfu = PN533_CONFIG_TIMING_102;
+ timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
+ timing.dep_timeout = PN533_CONFIG_TIMING_409;
+
+ break;
+
+ case PN533_DEVICE_PASORI:
+ max_retries.mx_rty_atr = 0x2;
+ max_retries.mx_rty_psl = 0x1;
+ max_retries.mx_rty_passive_act =
+ PN533_CONFIG_MAX_RETRIES_NO_RETRY;
+
+ timing.rfu = PN533_CONFIG_TIMING_102;
+ timing.atr_res_timeout = PN533_CONFIG_TIMING_102;
+ timing.dep_timeout = PN533_CONFIG_TIMING_204;
+
+ break;
+
+ default:
+ nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
+ return -EINVAL;
+ }
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
+ (u8 *)&max_retries, sizeof(max_retries));
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error on setting MAX_RETRIES config");
+ return rc;
+ }
+
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING,
+ (u8 *)&timing, sizeof(timing));
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error on setting RF timings");
+ return rc;
+ }
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ break;
+
+ case PN533_DEVICE_PASORI:
+ pn533_fw_reset(dev);
+
+ rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI,
+ pasori_cfg, 3);
+ if (rc) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error while settings PASORI config");
+ return rc;
+ }
+
+ pn533_fw_reset(dev);
+
+ break;
+ }
+
+ return 0;
+}
+
static int pn533_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -1722,7 +2276,6 @@ static int pn533_probe(struct usb_interface *interface,
struct pn533 *dev;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor *endpoint;
- struct pn533_config_max_retries max_retries;
int in_endpoint = 0;
int out_endpoint = 0;
int rc = -ENOMEM;
@@ -1735,7 +2288,7 @@ static int pn533_probe(struct usb_interface *interface,
dev->udev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
- sema_init(&dev->cmd_lock, 1);
+ mutex_init(&dev->cmd_lock);
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
@@ -1779,12 +2332,18 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
+ INIT_WORK(&dev->poll_work, pn533_wq_poll);
dev->wq = alloc_workqueue("pn533",
WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1);
if (dev->wq == NULL)
goto error;
+ init_timer(&dev->listen_timer);
+ dev->listen_timer.data = (unsigned long) dev;
+ dev->listen_timer.function = pn533_listen_mode_timer;
+
skb_queue_head_init(&dev->resp_q);
usb_set_intfdata(interface, dev);
@@ -1802,10 +2361,22 @@ static int pn533_probe(struct usb_interface *interface,
nfc_dev_info(&dev->interface->dev, "NXP PN533 firmware ver %d.%d now"
" attached", fw_ver->ver, fw_ver->rev);
- protocols = NFC_PROTO_JEWEL_MASK
- | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK
- | NFC_PROTO_ISO14443_MASK
- | NFC_PROTO_NFC_DEP_MASK;
+ dev->device_type = id->driver_info;
+ switch (dev->device_type) {
+ case PN533_DEVICE_STD:
+ protocols = PN533_ALL_PROTOCOLS;
+ break;
+
+ case PN533_DEVICE_PASORI:
+ protocols = PN533_NO_TYPE_B_PROTOCOLS;
+ break;
+
+ default:
+ nfc_dev_err(&dev->interface->dev, "Unknown device type %d\n",
+ dev->device_type);
+ rc = -EINVAL;
+ goto destroy_wq;
+ }
dev->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols,
PN533_CMD_DATAEXCH_HEAD_LEN,
@@ -1820,23 +2391,18 @@ static int pn533_probe(struct usb_interface *interface,
if (rc)
goto free_nfc_dev;
- max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
- max_retries.mx_rty_psl = 2;
- max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
- rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES,
- (u8 *) &max_retries, sizeof(max_retries));
-
- if (rc) {
- nfc_dev_err(&dev->interface->dev, "Error on setting MAX_RETRIES"
- " config");
- goto free_nfc_dev;
- }
+ rc = pn533_setup(dev);
+ if (rc)
+ goto unregister_nfc_dev;
return 0;
+unregister_nfc_dev:
+ nfc_unregister_device(dev->nfc_dev);
+
free_nfc_dev:
nfc_free_device(dev->nfc_dev);
+
destroy_wq:
destroy_workqueue(dev->wq);
error:
@@ -1865,6 +2431,8 @@ static void pn533_disconnect(struct usb_interface *interface)
skb_queue_purge(&dev->resp_q);
+ del_timer(&dev->listen_timer);
+
kfree(dev->in_frame);
usb_free_urb(dev->in_urb);
kfree(dev->out_frame);
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 281f18c2fb82..aa71807189ba 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -108,16 +108,22 @@ enum pn544_state {
#define PN544_NFC_WI_MGMT_GATE 0xA1
-static u8 pn544_custom_gates[] = {
- PN544_SYS_MGMT_GATE,
- PN544_SWP_MGMT_GATE,
- PN544_POLLING_LOOP_MGMT_GATE,
- PN544_NFC_WI_MGMT_GATE,
- PN544_RF_READER_F_GATE,
- PN544_RF_READER_JEWEL_GATE,
- PN544_RF_READER_ISO15693_GATE,
- PN544_RF_READER_NFCIP1_INITIATOR_GATE,
- PN544_RF_READER_NFCIP1_TARGET_GATE
+static struct nfc_hci_gate pn544_gates[] = {
+ {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE},
+ {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE},
+ {PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE}
};
/* Largest headroom needed for outgoing custom commands */
@@ -377,6 +383,9 @@ static int pn544_hci_open(struct nfc_shdlc *shdlc)
r = pn544_hci_enable(info, HCI_MODE);
+ if (r == 0)
+ info->state = PN544_ST_READY;
+
out:
mutex_unlock(&info->info_lock);
return r;
@@ -393,6 +402,8 @@ static void pn544_hci_close(struct nfc_shdlc *shdlc)
pn544_hci_disable(info);
+ info->state = PN544_ST_COLD;
+
out:
mutex_unlock(&info->info_lock);
}
@@ -576,7 +587,8 @@ static int pn544_hci_xmit(struct nfc_shdlc *shdlc, struct sk_buff *skb)
return pn544_hci_i2c_write(client, skb->data, skb->len);
}
-static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
+static int pn544_hci_start_poll(struct nfc_shdlc *shdlc,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_hci_dev *hdev = nfc_shdlc_get_hci_dev(shdlc);
u8 phases = 0;
@@ -584,7 +596,8 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
u8 duration[2];
u8 activated;
- pr_info(DRIVER_DESC ": %s protocols = %d\n", __func__, protocols);
+ pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n",
+ __func__, im_protocols, tm_protocols);
r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_END_OPERATION, NULL, 0);
@@ -604,10 +617,10 @@ static int pn544_hci_start_poll(struct nfc_shdlc *shdlc, u32 protocols)
if (r < 0)
return r;
- if (protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
+ if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK |
NFC_PROTO_JEWEL_MASK))
phases |= 1; /* Type A */
- if (protocols & NFC_PROTO_FELICA_MASK) {
+ if (im_protocols & NFC_PROTO_FELICA_MASK) {
phases |= (1 << 2); /* Type F 212 */
phases |= (1 << 3); /* Type F 424 */
}
@@ -842,10 +855,9 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
goto err_rti;
}
- init_data.gate_count = ARRAY_SIZE(pn544_custom_gates);
+ init_data.gate_count = ARRAY_SIZE(pn544_gates);
- memcpy(init_data.gates, pn544_custom_gates,
- ARRAY_SIZE(pn544_custom_gates));
+ memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates));
/*
* TODO: Session id must include the driver name + some bus addr
@@ -857,6 +869,7 @@ static int __devinit pn544_hci_probe(struct i2c_client *client,
NFC_PROTO_MIFARE_MASK |
NFC_PROTO_FELICA_MASK |
NFC_PROTO_ISO14443_MASK |
+ NFC_PROTO_ISO14443_B_MASK |
NFC_PROTO_NFC_DEP_MASK;
info->shdlc = nfc_shdlc_allocate(&pn544_shdlc_ops,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d9bfd49b1935..c181b94abc36 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -173,9 +173,9 @@ struct property *of_find_property(const struct device_node *np,
return NULL;
read_lock(&devtree_lock);
- for (pp = np->properties; pp != 0; pp = pp->next) {
+ for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, name) == 0) {
- if (lenp != 0)
+ if (lenp)
*lenp = pp->length;
break;
}
@@ -497,7 +497,7 @@ struct device_node *of_find_node_with_property(struct device_node *from,
read_lock(&devtree_lock);
np = from ? from->allnext : allnodes;
for (; np; np = np->allnext) {
- for (pp = np->properties; pp != 0; pp = pp->next) {
+ for (pp = np->properties; pp; pp = pp->next) {
if (of_prop_cmp(pp->name, prop_name) == 0) {
of_node_get(np);
goto out;
@@ -902,7 +902,7 @@ int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
/* Retrieve the phandle list property */
list = of_get_property(np, list_name, &size);
if (!list)
- return -EINVAL;
+ return -ENOENT;
list_end = list + size / sizeof(*list);
/* Loop over the phandles until all the requested entry is found */
@@ -1051,7 +1051,8 @@ int prom_remove_property(struct device_node *np, struct property *prop)
}
/*
- * prom_update_property - Update a property in a node.
+ * prom_update_property - Update a property in a node, if the property does
+ * not exist, add it.
*
* Note that we don't actually remove it, since we have given out
* who-knows-how-many pointers to the data using get-property.
@@ -1059,13 +1060,19 @@ int prom_remove_property(struct device_node *np, struct property *prop)
* and add the new property to the property list
*/
int prom_update_property(struct device_node *np,
- struct property *newprop,
- struct property *oldprop)
+ struct property *newprop)
{
- struct property **next;
+ struct property **next, *oldprop;
unsigned long flags;
int found = 0;
+ if (!newprop->name)
+ return -EINVAL;
+
+ oldprop = of_find_property(np, newprop->name, NULL);
+ if (!oldprop)
+ return prom_add_property(np, newprop);
+
write_lock_irqsave(&devtree_lock, flags);
next = &np->properties;
while (*next) {
@@ -1173,7 +1180,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
ap->stem[stem_len] = 0;
list_add_tail(&ap->link, &aliases_lookup);
pr_debug("adding DT alias:%s: stem=%s id=%i node=%s\n",
- ap->alias, ap->stem, ap->id, np ? np->full_name : NULL);
+ ap->alias, ap->stem, ap->id, of_node_full_name(np));
}
/**
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 9cf00602f566..ff8ab7b27373 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -255,7 +255,7 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
skiplevel:
/* Iterate again with new parent */
- pr_debug(" -> new parent: %s\n", newpar ? newpar->full_name : "<>");
+ pr_debug(" -> new parent: %s\n", of_node_full_name(newpar));
of_node_put(ipar);
ipar = newpar;
newpar = NULL;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 2574abde8d99..8e6c25f35040 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -57,6 +57,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
const __be32 *paddr;
u32 addr;
int len;
+ bool is_c45;
/* A PHY must have a reg property in the range [0-31] */
paddr = of_get_property(child, "reg", &len);
@@ -79,11 +80,18 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdio->irq[addr] = PHY_POLL;
}
- phy = get_phy_device(mdio, addr);
+ is_c45 = of_device_is_compatible(child,
+ "ethernet-phy-ieee802.3-c45");
+ phy = get_phy_device(mdio, addr, is_c45);
+
if (!phy || IS_ERR(phy)) {
- dev_err(&mdio->dev, "error probing PHY at address %i\n",
- addr);
- continue;
+ phy = phy_device_create(mdio, addr, 0, false, NULL);
+ if (!phy || IS_ERR(phy)) {
+ dev_err(&mdio->dev,
+ "error creating PHY at address %i\n",
+ addr);
+ continue;
+ }
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
index e7cad627a5d1..a27ec94877e4 100644
--- a/drivers/of/of_mtd.c
+++ b/drivers/of/of_mtd.c
@@ -32,7 +32,7 @@ static const char *nand_ecc_modes[] = {
* The function gets ecc mode string from property 'nand-ecc-mode',
* and return its index in nand_ecc_modes table, or errno in error case.
*/
-const int of_get_nand_ecc_mode(struct device_node *np)
+int of_get_nand_ecc_mode(struct device_node *np)
{
const char *pm;
int err, i;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 343ad29e211c..e44f8c2d239d 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -317,10 +317,9 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
for(; lookup->compatible != NULL; lookup++) {
if (!of_device_is_compatible(np, lookup->compatible))
continue;
- if (of_address_to_resource(np, 0, &res))
- continue;
- if (res.start != lookup->phys_addr)
- continue;
+ if (!of_address_to_resource(np, 0, &res))
+ if (res.start != lookup->phys_addr)
+ continue;
pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
return lookup;
}
@@ -462,4 +461,5 @@ int of_platform_populate(struct device_node *root,
of_node_put(root);
return rc;
}
+EXPORT_SYMBOL_GPL(of_platform_populate);
#endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index da14432806c6..f3cfa0b9adfa 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -1,5 +1,6 @@
/*
* Copyright 2010 ARM Ltd.
+ * Copyright 2012 Advanced Micro Devices, Inc., Robert Richter
*
* Perf-events backend for OProfile.
*/
@@ -25,7 +26,7 @@ static int oprofile_perf_enabled;
static DEFINE_MUTEX(oprofile_perf_mutex);
static struct op_counter_config *counter_config;
-static struct perf_event **perf_events[nr_cpumask_bits];
+static DEFINE_PER_CPU(struct perf_event **, perf_events);
static int num_counters;
/*
@@ -38,7 +39,7 @@ static void op_overflow_handler(struct perf_event *event,
u32 cpu = smp_processor_id();
for (id = 0; id < num_counters; ++id)
- if (perf_events[cpu][id] == event)
+ if (per_cpu(perf_events, cpu)[id] == event)
break;
if (id != num_counters)
@@ -74,7 +75,7 @@ static int op_create_counter(int cpu, int event)
{
struct perf_event *pevent;
- if (!counter_config[event].enabled || perf_events[cpu][event])
+ if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event])
return 0;
pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
@@ -91,18 +92,18 @@ static int op_create_counter(int cpu, int event)
return -EBUSY;
}
- perf_events[cpu][event] = pevent;
+ per_cpu(perf_events, cpu)[event] = pevent;
return 0;
}
static void op_destroy_counter(int cpu, int event)
{
- struct perf_event *pevent = perf_events[cpu][event];
+ struct perf_event *pevent = per_cpu(perf_events, cpu)[event];
if (pevent) {
perf_event_release_kernel(pevent);
- perf_events[cpu][event] = NULL;
+ per_cpu(perf_events, cpu)[event] = NULL;
}
}
@@ -257,12 +258,12 @@ void oprofile_perf_exit(void)
for_each_possible_cpu(cpu) {
for (id = 0; id < num_counters; ++id) {
- event = perf_events[cpu][id];
+ event = per_cpu(perf_events, cpu)[id];
if (event)
perf_event_release_kernel(event);
}
- kfree(perf_events[cpu]);
+ kfree(per_cpu(perf_events, cpu));
}
kfree(counter_config);
@@ -277,8 +278,6 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
if (ret)
return ret;
- memset(&perf_events, 0, sizeof(perf_events));
-
num_counters = perf_num_counters();
if (num_counters <= 0) {
pr_info("oprofile: no performance counters\n");
@@ -298,9 +297,9 @@ int __init oprofile_perf_init(struct oprofile_operations *ops)
}
for_each_possible_cpu(cpu) {
- perf_events[cpu] = kcalloc(num_counters,
+ per_cpu(perf_events, cpu) = kcalloc(num_counters,
sizeof(struct perf_event *), GFP_KERNEL);
- if (!perf_events[cpu]) {
+ if (!per_cpu(perf_events, cpu)) {
pr_info("oprofile: failed to allocate %d perf events "
"for cpu %d\n", num_counters, cpu);
ret = -ENOMEM;
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index 432d4bbcc62a..ffddc4f64268 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -174,7 +174,7 @@ static int dino_cfg_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
@@ -209,7 +209,7 @@ static int dino_cfg_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
struct dino_device *d = DINO_DEV(parisc_walk_tree(bus->bridge));
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 v = DINO_CFG_TOK(local_bus, devfn, where & ~3);
void __iomem *base_addr = d->hba.base_addr;
unsigned long flags;
@@ -554,7 +554,7 @@ dino_fixup_bus(struct pci_bus *bus)
struct dino_device *dino_dev = DINO_DEV(parisc_walk_tree(bus->bridge));
DBG(KERN_WARNING "%s(0x%p) bus %d platform_data 0x%p\n",
- __func__, bus, bus->secondary,
+ __func__, bus, bus->busn_res.start,
bus->bridge->platform_data);
/* Firmware doesn't set up card-mode dino, so we have to */
@@ -898,6 +898,7 @@ static int __init dino_probe(struct parisc_device *dev)
LIST_HEAD(resources);
struct pci_bus *bus;
unsigned long hpa = dev->hpa.start;
+ int max;
name = "Dino";
if (is_card_dino(&dev->id)) {
@@ -983,6 +984,10 @@ static int __init dino_probe(struct parisc_device *dev)
if (dino_dev->hba.gmmio_space.flags)
pci_add_resource(&resources, &dino_dev->hba.gmmio_space);
+ dino_dev->hba.bus_num.start = dino_current_bus;
+ dino_dev->hba.bus_num.end = 255;
+ dino_dev->hba.bus_num.flags = IORESOURCE_BUS;
+ pci_add_resource(&resources, &dino_dev->hba.bus_num);
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
@@ -998,12 +1003,13 @@ static int __init dino_probe(struct parisc_device *dev)
return 0;
}
- bus->subordinate = pci_scan_child_bus(bus);
+ max = pci_scan_child_bus(bus);
+ pci_bus_update_busn_res_end(bus, max);
/* This code *depends* on scanning being single threaded
* if it isn't, this global bus number count will fail
*/
- dino_current_bus = bus->subordinate + 1;
+ dino_current_bus = max + 1;
pci_bus_assign_resources(bus);
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1f9e9fefb8e7..9544cdc0d1af 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -146,7 +146,7 @@
#endif
#include <asm/ropes.h>
-#include "./iosapic_private.h"
+#include "iosapic_private.h"
#define MODULE_NAME "iosapic"
@@ -532,7 +532,7 @@ iosapic_xlate_pin(struct iosapic_info *isi, struct pci_dev *pcidev)
intr_slot = PCI_SLOT(pcidev->devfn);
}
DBG_IRT("iosapic_xlate_pin: bus %d slot %d pin %d\n",
- pcidev->bus->secondary, intr_slot, intr_pin);
+ pcidev->bus->busn_res.start, intr_slot, intr_pin);
return irt_find_irqline(isi, intr_slot, intr_pin);
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 052fa230bc77..4f9cf2456f4e 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -189,8 +189,8 @@ lba_dump_res(struct resource *r, int d)
static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
{
- u8 first_bus = d->hba.hba_bus->secondary;
- u8 last_sub_bus = d->hba.hba_bus->subordinate;
+ u8 first_bus = d->hba.hba_bus->busn_res.start;
+ u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
if ((bus < first_bus) ||
(bus > last_sub_bus) ||
@@ -364,7 +364,7 @@ lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
{
struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 tok = LBA_CFG_TOK(local_bus, devfn);
void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
@@ -380,7 +380,7 @@ static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int
return 0;
}
- if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->secondary, devfn, d)) {
+ if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
/* either don't want to look or know device isn't present. */
*data = ~0U;
@@ -431,7 +431,7 @@ lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
{
struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 tok = LBA_CFG_TOK(local_bus,devfn);
if ((pos > 255) || (devfn > 255))
@@ -444,7 +444,7 @@ static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int
return 0;
}
- if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->secondary, devfn, d))) {
+ if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
return 1; /* New Workaround */
}
@@ -481,7 +481,7 @@ static struct pci_ops elroy_cfg_ops = {
static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
{
struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 tok = LBA_CFG_TOK(local_bus, devfn);
void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
@@ -514,7 +514,7 @@ static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, i
{
struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
- u32 local_bus = (bus->parent == NULL) ? 0 : bus->secondary;
+ u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
u32 tok = LBA_CFG_TOK(local_bus,devfn);
if ((pos > 255) || (devfn > 255))
@@ -636,7 +636,7 @@ lba_fixup_bus(struct pci_bus *bus)
struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
- bus, bus->secondary, bus->bridge->platform_data);
+ bus, (int)bus->busn_res.start, bus->bridge->platform_data);
/*
** Properly Setup MMIO resources for this bus.
@@ -989,6 +989,7 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
case PAT_PBNUM:
lba_dev->hba.bus_num.start = p->start;
lba_dev->hba.bus_num.end = p->end;
+ lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
break;
case PAT_LMMIO:
@@ -1366,6 +1367,7 @@ lba_driver_probe(struct parisc_device *dev)
void *tmp_obj;
char *version;
void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
+ int max;
/* Read HW Rev First */
func_class = READ_REG32(addr + LBA_FCLASS);
@@ -1502,6 +1504,8 @@ lba_driver_probe(struct parisc_device *dev)
if (lba_dev->hba.gmmio_space.flags)
pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+ pci_add_resource(&resources, &lba_dev->hba.bus_num);
+
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
@@ -1511,7 +1515,7 @@ lba_driver_probe(struct parisc_device *dev)
return 0;
}
- lba_bus->subordinate = pci_scan_child_bus(lba_bus);
+ max = pci_scan_child_bus(lba_bus);
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1541,7 +1545,7 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
- lba_next_bus = lba_bus->subordinate + 1;
+ lba_next_bus = max + 1;
pci_bus_add_devices(lba_bus);
/* Whew! Finally done! Tell services we got this one covered. */
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 01c001f3b766..8d688b260e28 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_MN10300) += setup-bus.o
obj-$(CONFIG_MICROBLAZE) += setup-bus.o
obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
+obj-$(CONFIG_M68K) += setup-bus.o setup-irq.o
#
# ACPI Related PCI FW Functions
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2a581642c237..ba91a7e17519 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -162,7 +162,8 @@ int pci_user_read_config_##size \
if (ret > 0) \
ret = -EINVAL; \
return ret; \
-}
+} \
+EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
/* Returns 0 on success, negative values indicate error. */
#define PCI_USER_WRITE_CONFIG(size,type) \
@@ -181,7 +182,8 @@ int pci_user_write_config_##size \
if (ret > 0) \
ret = -EINVAL; \
return ret; \
-}
+} \
+EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
PCI_USER_READ_CONFIG(byte, u8)
PCI_USER_READ_CONFIG(word, u16)
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 4ce5ef2f2826..4b0970b46e0b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -164,6 +164,8 @@ pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
int pci_bus_add_device(struct pci_dev *dev)
{
int retval;
+
+ pci_fixup_device(pci_fixup_final, dev);
retval = device_add(&dev->dev);
if (retval)
return retval;
diff --git a/drivers/pci/hotplug-pci.c b/drivers/pci/hotplug-pci.c
index d3509cdeb554..6258dc260d9f 100644
--- a/drivers/pci/hotplug-pci.c
+++ b/drivers/pci/hotplug-pci.c
@@ -4,18 +4,26 @@
#include <linux/export.h>
#include "pci.h"
-
-unsigned int __devinit pci_do_scan_bus(struct pci_bus *bus)
+int __ref pci_hp_add_bridge(struct pci_dev *dev)
{
- unsigned int max;
-
- max = pci_scan_child_bus(bus);
+ struct pci_bus *parent = dev->bus;
+ int pass, busnr, start = parent->busn_res.start;
+ int end = parent->busn_res.end;
- /*
- * Make the discovered devices available.
- */
- pci_bus_add_devices(bus);
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent), busnr))
+ break;
+ }
+ if (busnr-- > end) {
+ printk(KERN_ERR "No bus number available for hot-added bridge %s\n",
+ pci_name(dev));
+ return -1;
+ }
+ for (pass = 0; pass < 2; pass++)
+ busnr = pci_scan_bridge(parent, dev, busnr, pass);
+ if (!dev->subordinate)
+ return -1;
- return max;
+ return 0;
}
-EXPORT_SYMBOL(pci_do_scan_bus);
+EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7722108e78df..a1afb5b39ad4 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -89,8 +89,6 @@ struct acpiphp_bridge {
/* PCI-to-PCI bridge device */
struct pci_dev *pci_dev;
-
- spinlock_t res_lock;
};
@@ -207,6 +205,6 @@ extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
/* variables */
-extern int acpiphp_debug;
+extern bool acpiphp_debug;
#endif /* _ACPIPHP_H */
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index aa41631e9e02..96316b74969f 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -47,8 +47,7 @@
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-static bool debug;
-int acpiphp_debug;
+bool acpiphp_debug;
/* local variables */
static int num_slots;
@@ -62,7 +61,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
-module_param(debug, bool, 0644);
+module_param_named(debug, acpiphp_debug, bool, 0644);
/* export the attention callback registration methods */
EXPORT_SYMBOL_GPL(acpiphp_register_attention);
@@ -379,8 +378,6 @@ static int __init acpiphp_init(void)
if (acpi_pci_disabled)
return 0;
- acpiphp_debug = debug;
-
/* read all the ACPI info from the system */
return init_acpi();
}
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 806c44fa645a..ad6fd6695495 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -100,11 +100,11 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
PCI_PRIMARY_BUS,
&buses);
- if (((buses >> 8) & 0xff) != bus->secondary) {
+ if (((buses >> 8) & 0xff) != bus->busn_res.start) {
buses = (buses & 0xff000000)
| ((unsigned int)(bus->primary) << 0)
- | ((unsigned int)(bus->secondary) << 8)
- | ((unsigned int)(bus->subordinate) << 16);
+ | ((unsigned int)(bus->busn_res.start) << 8)
+ | ((unsigned int)(bus->busn_res.end) << 16);
pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
}
return NOTIFY_OK;
@@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;
+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ warn("can't evaluate _ADR (%#x)\n", status);
+ return AE_OK;
+ }
+
+ device = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+
pdev = pbus->self;
if (pdev && pci_is_pcie(pdev)) {
tmp = acpi_find_root_bridge_handle(pdev);
@@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
}
- acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
- device = (adr >> 16) & 0xffff;
- function = adr & 0xffff;
-
newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
if (!newfunc)
return AE_NO_MEMORY;
@@ -391,8 +396,6 @@ static void add_host_bridge(acpi_handle *handle)
bridge->pci_bus = root->bus;
- spin_lock_init(&bridge->res_lock);
-
init_bridge_misc(bridge);
}
@@ -425,7 +428,6 @@ static void add_p2p_bridge(acpi_handle *handle)
* (which we access during module unload).
*/
get_device(&bridge->pci_bus->dev);
- spin_lock_init(&bridge->res_lock);
init_bridge_misc(bridge);
return;
@@ -692,7 +694,7 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
* bus->subordinate value because it could have
* padding in it.
*/
- max = bus->secondary;
+ max = bus->busn_res.start;
list_for_each(tmp, &bus->children) {
n = pci_bus_max_busnr(pci_bus_b(tmp));
@@ -878,6 +880,24 @@ static void disable_bridges(struct pci_bus *bus)
}
}
+/* return first device in slot, acquiring a reference on it */
+static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
+{
+ struct pci_bus *bus = slot->bridge->pci_bus;
+ struct pci_dev *dev;
+ struct pci_dev *ret = NULL;
+
+ down_read(&pci_bus_sem);
+ list_for_each_entry(dev, &bus->devices, bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device) {
+ ret = pci_dev_get(dev);
+ break;
+ }
+ up_read(&pci_bus_sem);
+
+ return ret;
+}
+
/**
* disable_device - disable a slot
* @slot: ACPI PHP slot
@@ -893,6 +913,7 @@ static int disable_device(struct acpiphp_slot *slot)
pdev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0));
if (!pdev)
goto err_exit;
+ pci_dev_put(pdev);
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->bridge) {
@@ -901,18 +922,22 @@ static int disable_device(struct acpiphp_slot *slot)
(u32)1, NULL, NULL);
func->bridge = NULL;
}
+ }
- pdev = pci_get_slot(slot->bridge->pci_bus,
- PCI_DEVFN(slot->device, func->function));
- if (pdev) {
- pci_stop_bus_device(pdev);
- if (pdev->subordinate) {
- disable_bridges(pdev->subordinate);
- pci_disable_device(pdev);
- }
- __pci_remove_bus_device(pdev);
- pci_dev_put(pdev);
+ /*
+ * enable_device() enumerates all functions in this device via
+ * pci_scan_slot(), whether they have associated ACPI hotplug
+ * methods (_EJ0, etc.) or not. Therefore, we remove all functions
+ * here.
+ */
+ while ((pdev = dev_in_slot(slot))) {
+ pci_stop_bus_device(pdev);
+ if (pdev->subordinate) {
+ disable_bridges(pdev->subordinate);
+ pci_disable_device(pdev);
}
+ __pci_remove_bus_device(pdev);
+ pci_dev_put(pdev);
}
list_for_each_entry(func, &slot->funcs, sibling) {
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 3fadf2f135e8..2b4c412f94c3 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -225,7 +225,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
- int status = -ENOMEM;
+ int status;
int i;
if (!(controller && bus))
@@ -237,18 +237,24 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
*/
for (i = first; i <= last; ++i) {
slot = kzalloc(sizeof (struct slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ status = -ENOMEM;
goto error;
+ }
hotplug_slot =
kzalloc(sizeof (struct hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ status = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof (struct hotplug_slot_info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ status = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->bus = bus;
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index ae853ccd0cd5..dcc75c785443 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -285,42 +285,19 @@ int __ref cpci_configure_slot(struct slot *slot)
for (fn = 0; fn < 8; fn++) {
struct pci_dev *dev;
- dev = pci_get_slot(parent, PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
+ dev = pci_get_slot(parent,
+ PCI_DEVFN(PCI_SLOT(slot->devfn), fn));
if (!dev)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- /* Find an unused bus number for the new bridge */
- struct pci_bus *child;
- unsigned char busnr, start = parent->secondary;
- unsigned char end = parent->subordinate;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent),
- busnr))
- break;
- }
- if (busnr >= end) {
- err("No free bus for hot-added bridge\n");
- pci_dev_put(dev);
- continue;
- }
- child = pci_add_new_bus(parent, dev, busnr);
- if (!child) {
- err("Cannot add new bus for %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
- child->subordinate = pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
- }
+ (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ pci_hp_add_bridge(dev);
pci_dev_put(dev);
}
- pci_bus_assign_resources(parent);
+ pci_assign_unassigned_bridge_resources(parent->self);
+
pci_bus_add_devices(parent);
- pci_enable_bridges(parent);
dbg("%s - exit", __func__);
return 0;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 187a199da93c..c8eaeb43fa5d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -611,7 +611,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
u32 tempdword;
char name[SLOT_NAME_SIZE];
void __iomem *slot_entry= NULL;
- int result = -ENOMEM;
+ int result;
dbg("%s\n", __func__);
@@ -623,19 +623,25 @@ static int ctrl_slot_setup(struct controller *ctrl,
while (number_of_slots) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ result = -ENOMEM;
goto error;
+ }
slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
GFP_KERNEL);
- if (!slot->hotplug_slot)
+ if (!slot->hotplug_slot) {
+ result = -ENOMEM;
goto error_slot;
+ }
hotplug_slot = slot->hotplug_slot;
hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
GFP_KERNEL);
- if (!hotplug_slot->info)
+ if (!hotplug_slot->info) {
+ result = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot_info = hotplug_slot->info;
slot->ctrl = ctrl;
diff --git a/drivers/pci/hotplug/cpqphp_pci.c b/drivers/pci/hotplug/cpqphp_pci.c
index 1c8494021a42..09801c6945ce 100644
--- a/drivers/pci/hotplug/cpqphp_pci.c
+++ b/drivers/pci/hotplug/cpqphp_pci.c
@@ -83,7 +83,6 @@ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iom
int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
{
- unsigned char bus;
struct pci_bus *child;
int num;
@@ -106,9 +105,10 @@ int cpqhp_configure_device (struct controller* ctrl, struct pci_func* func)
}
if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(func->pci_dev, PCI_SECONDARY_BUS, &bus);
- child = (struct pci_bus*) pci_add_new_bus(func->pci_dev->bus, (func->pci_dev), bus);
- pci_do_scan_bus(child);
+ pci_hp_add_bridge(func->pci_dev);
+ child = func->pci_dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
}
pci_dev_put(func->pci_dev);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 4fda7e6a86a7..cbd72d81d253 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -760,7 +760,7 @@ static u8 bus_structure_fixup(u8 busno)
for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) {
if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) &&
(l != 0x0000) && (l != 0xffff)) {
- debug("%s - Inside bus_struture_fixup()\n",
+ debug("%s - Inside bus_structure_fixup()\n",
__func__);
pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL);
break;
@@ -775,7 +775,6 @@ static u8 bus_structure_fixup(u8 busno)
static int ibm_configure_device(struct pci_func *func)
{
- unsigned char bus;
struct pci_bus *child;
int num;
int flag = 0; /* this is to make sure we don't double scan the bus,
@@ -805,9 +804,10 @@ static int ibm_configure_device(struct pci_func *func)
}
}
if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
- pci_read_config_byte(func->dev, PCI_SECONDARY_BUS, &bus);
- child = pci_add_new_bus(func->dev->bus, func->dev, bus);
- pci_do_scan_bus(child);
+ pci_hp_add_bridge(func->dev);
+ child = func->dev->subordinate;
+ if (child)
+ pci_bus_add_devices(child);
}
return 0;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 714ca5c4ed50..9df78bc14541 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -784,7 +784,7 @@ static int __init ebda_rsrc_controller (void)
hpc_ptr->ctlr_relative_id = ctlr;
hpc_ptr->slot_count = slot_num;
hpc_ptr->bus_count = bus_num;
- debug ("now enter ctlr data struture ---\n");
+ debug ("now enter ctlr data structure ---\n");
debug ("ctlr id: %x\n", ctlr_id);
debug ("ctlr_relative_id: %x\n", hpc_ptr->ctlr_relative_id);
debug ("count of slots controlled by this ctlr: %x\n", slot_num);
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 7b09e16173ad..c60f5f3e838d 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -109,7 +109,7 @@ int ibmphp_configure_card (struct pci_func *func, u8 slotno)
cur_func->function = function;
- debug ("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->funcion = %x\n",
+ debug ("inside the loop, cur_func->busno = %x, cur_func->device = %x, cur_func->function = %x\n",
cur_func->busno, cur_func->device, cur_func->function);
pci_bus_read_config_word (ibmphp_pci_bus, devfn, PCI_VENDOR_ID, &vendor_id);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 4b7cce1de6ec..26ffd3e3fb74 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -149,10 +149,6 @@ int pciehp_get_attention_status(struct slot *slot, u8 *status);
int pciehp_set_attention_status(struct slot *slot, u8 status);
int pciehp_get_latch_status(struct slot *slot, u8 *status);
int pciehp_get_adapter_status(struct slot *slot, u8 *status);
-int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *speed);
-int pciehp_get_max_link_width(struct slot *slot, enum pcie_link_width *val);
-int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *speed);
-int pciehp_get_cur_link_width(struct slot *slot, enum pcie_link_width *val);
int pciehp_query_power_fault(struct slot *slot);
void pciehp_green_led_on(struct slot *slot);
void pciehp_green_led_off(struct slot *slot);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index a960faec1021..302451e8289d 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -705,107 +705,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int pciehp_get_max_lnk_width(struct slot *slot,
- enum pcie_link_width *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_width lnk_wdth;
- u32 lnk_cap;
- int retval = 0;
-
- retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
- return retval;
- }
-
- switch ((lnk_cap & PCI_EXP_LNKSTA_NLW) >> 4){
- case 0:
- lnk_wdth = PCIE_LNK_WIDTH_RESRV;
- break;
- case 1:
- lnk_wdth = PCIE_LNK_X1;
- break;
- case 2:
- lnk_wdth = PCIE_LNK_X2;
- break;
- case 4:
- lnk_wdth = PCIE_LNK_X4;
- break;
- case 8:
- lnk_wdth = PCIE_LNK_X8;
- break;
- case 12:
- lnk_wdth = PCIE_LNK_X12;
- break;
- case 16:
- lnk_wdth = PCIE_LNK_X16;
- break;
- case 32:
- lnk_wdth = PCIE_LNK_X32;
- break;
- default:
- lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- break;
- }
-
- *value = lnk_wdth;
- ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth);
-
- return retval;
-}
-
-int pciehp_get_cur_lnk_width(struct slot *slot,
- enum pcie_link_width *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_width lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- int retval = 0;
- u16 lnk_status;
-
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
- __func__);
- return retval;
- }
-
- switch ((lnk_status & PCI_EXP_LNKSTA_NLW) >> 4){
- case 0:
- lnk_wdth = PCIE_LNK_WIDTH_RESRV;
- break;
- case 1:
- lnk_wdth = PCIE_LNK_X1;
- break;
- case 2:
- lnk_wdth = PCIE_LNK_X2;
- break;
- case 4:
- lnk_wdth = PCIE_LNK_X4;
- break;
- case 8:
- lnk_wdth = PCIE_LNK_X8;
- break;
- case 12:
- lnk_wdth = PCIE_LNK_X12;
- break;
- case 16:
- lnk_wdth = PCIE_LNK_X16;
- break;
- case 32:
- lnk_wdth = PCIE_LNK_X32;
- break;
- default:
- lnk_wdth = PCIE_LNK_WIDTH_UNKNOWN;
- break;
- }
-
- *value = lnk_wdth;
- ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth);
-
- return retval;
-}
-
int pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 47d9dc06b109..09cecaf450c5 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -34,29 +34,6 @@
#include "../pci.h"
#include "pciehp.h"
-static int __ref pciehp_add_bridge(struct pci_dev *dev)
-{
- struct pci_bus *parent = dev->bus;
- int pass, busnr, start = parent->secondary;
- int end = parent->subordinate;
-
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent), busnr))
- break;
- }
- if (busnr-- > end) {
- err("No bus number available for hot-added bridge %s\n",
- pci_name(dev));
- return -1;
- }
- for (pass = 0; pass < 2; pass++)
- busnr = pci_scan_bridge(parent, dev, busnr, pass);
- if (!dev->subordinate)
- return -1;
-
- return 0;
-}
-
int pciehp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
@@ -85,9 +62,8 @@ int pciehp_configure_device(struct slot *p_slot)
if (!dev)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- pciehp_add_bridge(dev);
- }
+ (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ pci_hp_add_bridge(dev);
pci_dev_put(dev);
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index b20ceaaa31f4..1f00b937f721 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -252,7 +252,7 @@ static int __init init_slots(void)
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
- int retval = -ENOMEM;
+ int retval;
int i;
/*
@@ -261,17 +261,23 @@ static int __init init_slots(void)
*/
for (i = 0; i < num_slots; ++i) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ retval = -ENOMEM;
goto error;
+ }
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->number = i;
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index de573113c102..f64ca92253da 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -397,13 +397,11 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
else
sn_io_slot_fixup(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- unsigned char sec_bus;
- pci_read_config_byte(dev, PCI_SECONDARY_BUS,
- &sec_bus);
- new_bus = pci_add_new_bus(dev->bus, dev,
- sec_bus);
- pci_scan_child_bus(new_bus);
- new_ppb = 1;
+ pci_hp_add_bridge(dev);
+ if (dev->subordinate) {
+ new_bus = dev->subordinate;
+ new_ppb = 1;
+ }
}
pci_dev_put(dev);
}
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 7414fd9ad1d2..b6de307248e4 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -99,22 +99,28 @@ static int init_slots(struct controller *ctrl)
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
+ int retval;
int i;
for (i = 0; i < ctrl->num_slots; i++) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
+ if (!slot) {
+ retval = -ENOMEM;
goto error;
+ }
hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
+ if (!hotplug_slot) {
+ retval = -ENOMEM;
goto error_slot;
+ }
slot->hotplug_slot = hotplug_slot;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
+ if (!info) {
+ retval = -ENOMEM;
goto error_hpslot;
+ }
hotplug_slot->info = info;
slot->hp_slot = i;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index b00b09bdd38a..f9b5a52e4115 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -262,9 +262,6 @@ static int board_added(struct slot *p_slot)
}
if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
- if (slots_not_empty)
- return WRONG_BUS_FREQUENCY;
-
if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
ctrl_err(ctrl, "%s: Issue of set bus speed mode command"
" failed\n", __func__);
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index df7e4bfadae3..c627ed9957d1 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -37,9 +37,10 @@
int __ref shpchp_configure_device(struct slot *p_slot)
{
struct pci_dev *dev;
- struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
- int num, fn;
struct controller *ctrl = p_slot->ctrl;
+ struct pci_dev *bridge = ctrl->pci_dev;
+ struct pci_bus *parent = bridge->subordinate;
+ int num, fn;
dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0));
if (dev) {
@@ -61,39 +62,23 @@ int __ref shpchp_configure_device(struct slot *p_slot)
if (!dev)
continue;
if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
- (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
- /* Find an unused bus number for the new bridge */
- struct pci_bus *child;
- unsigned char busnr, start = parent->secondary;
- unsigned char end = parent->subordinate;
- for (busnr = start; busnr <= end; busnr++) {
- if (!pci_find_bus(pci_domain_nr(parent),
- busnr))
- break;
- }
- if (busnr > end) {
- ctrl_err(ctrl,
- "No free bus for hot-added bridge\n");
- pci_dev_put(dev);
- continue;
- }
- child = pci_add_new_bus(parent, dev, busnr);
- if (!child) {
- ctrl_err(ctrl, "Cannot add new bus for %s\n",
- pci_name(dev));
- pci_dev_put(dev);
- continue;
- }
- child->subordinate = pci_do_scan_bus(child);
- pci_bus_size_bridges(child);
- }
+ (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS))
+ pci_hp_add_bridge(dev);
+ pci_dev_put(dev);
+ }
+
+ pci_assign_unassigned_bridge_resources(bridge);
+
+ for (fn = 0; fn < 8; fn++) {
+ dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, fn));
+ if (!dev)
+ continue;
pci_configure_slot(dev);
pci_dev_put(dev);
}
- pci_bus_assign_resources(parent);
pci_bus_add_devices(parent);
- pci_enable_bridges(parent);
+
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp_sysfs.c b/drivers/pci/hotplug/shpchp_sysfs.c
index efa30da1ae8f..eeb23ceae4a8 100644
--- a/drivers/pci/hotplug/shpchp_sysfs.c
+++ b/drivers/pci/hotplug/shpchp_sysfs.c
@@ -73,13 +73,13 @@ static ssize_t show_ctrl (struct device *dev, struct device_attribute *attr, cha
}
}
out += sprintf(out, "Free resources: bus numbers\n");
- for (busnr = bus->secondary; busnr <= bus->subordinate; busnr++) {
+ for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) {
if (!pci_find_bus(pci_domain_nr(bus), busnr))
break;
}
- if (busnr < bus->subordinate)
+ if (busnr < bus->busn_res.end)
out += sprintf(out, "start = %8.8x, length = %8.8x\n",
- busnr, (bus->subordinate - busnr));
+ busnr, (int)(bus->busn_res.end - busnr));
return out - buf;
}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 6554e1a0f634..74bbaf82638d 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -47,7 +47,7 @@ static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
if (!child)
return NULL;
- child->subordinate = busnr;
+ pci_bus_insert_busn_res(child, busnr, busnr);
child->dev.parent = bus->bridge;
rc = pci_bus_add_child(child);
if (rc) {
@@ -327,7 +327,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
iov->offset = offset;
iov->stride = stride;
- if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
+ if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->busn_res.end) {
dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
return -ENOMEM;
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 61e2fefeedab..fbf7b26c7c8a 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -48,6 +48,12 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
return;
+ if (pci_dev->current_state == PCI_D3cold) {
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
+ return;
+ }
+
if (!pci_dev->pm_cap || !pci_dev->pme_support
|| pci_check_pme_status(pci_dev)) {
if (pci_dev->pme_poll)
@@ -162,6 +168,20 @@ acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
return remove_pm_notifier(dev, pci_acpi_wake_dev);
}
+phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+{
+ acpi_status status = AE_NOT_EXIST;
+ unsigned long long mcfg_addr;
+
+ if (handle)
+ status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
+ NULL, &mcfg_addr);
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ return (phys_addr_t)mcfg_addr;
+}
+
/*
* _SxD returns the D-state with the highest power
* (lowest D-state number) supported in the S-state "x".
@@ -187,9 +207,13 @@ acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev)
static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
{
- int acpi_state;
+ int acpi_state, d_max;
- acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL);
+ if (pdev->no_d3cold)
+ d_max = ACPI_STATE_D3_HOT;
+ else
+ d_max = ACPI_STATE_D3_COLD;
+ acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max);
if (acpi_state < 0)
return PCI_POWER_ERROR;
@@ -296,7 +320,13 @@ static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable)
static int acpi_pci_run_wake(struct pci_dev *dev, bool enable)
{
- if (dev->pme_interrupt)
+ /*
+ * Per PCI Express Base Specification Revision 2.0 section
+ * 5.3.3.2 Link Wakeup, platform support is needed for D3cold
+ * waking up to power on the main link even if there is PME
+ * support for D3cold
+ */
+ if (dev->pme_interrupt && !dev->runtime_d3cold)
return 0;
if (!acpi_pm_device_run_wake(&dev->dev, enable))
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index bf0cee629b60..185be3703343 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -459,16 +459,17 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev)
return 0;
}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+
static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
{
- pci_restore_standard_config(pci_dev);
+ pci_power_up(pci_dev);
+ pci_restore_state(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-
/*
* Default "suspend" method for devices that have no driver provided suspend,
* or not even a driver at all (second part).
@@ -748,6 +749,18 @@ static int pci_pm_suspend_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ /*
+ * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
+ * PCI COMMAND register isn't 0, the BIOS assumes that the controller
+ * hasn't been quiesced and tries to turn it off. If the controller
+ * is already in D3, this can hang or cause memory corruption.
+ *
+ * Since the value of the COMMAND register doesn't matter once the
+ * device has been suspended, we can safely set it to 0 here.
+ */
+ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
+ pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+
return 0;
}
@@ -1019,10 +1032,13 @@ static int pci_pm_runtime_suspend(struct device *dev)
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
+ pci_dev->no_d3cold = false;
error = pm->runtime_suspend(dev);
suspend_report_result(pm->runtime_suspend, error);
if (error)
return error;
+ if (!pci_dev->d3cold_allowed)
+ pci_dev->no_d3cold = true;
pci_fixup_device(pci_fixup_suspend, pci_dev);
@@ -1044,17 +1060,23 @@ static int pci_pm_runtime_suspend(struct device *dev)
static int pci_pm_runtime_resume(struct device *dev)
{
+ int rc;
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
if (!pm || !pm->runtime_resume)
return -ENOSYS;
- pci_pm_default_resume_early(pci_dev);
+ pci_restore_standard_config(pci_dev);
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
__pci_enable_wake(pci_dev, PCI_D0, true, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
- return pm->runtime_resume(dev);
+ rc = pm->runtime_resume(dev);
+
+ pci_dev->runtime_d3cold = false;
+
+ return rc;
}
static int pci_pm_runtime_idle(struct device *dev)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 86c63fe45d11..6869009c7393 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -28,6 +28,7 @@
#include <linux/pci-aspm.h>
#include <linux/slab.h>
#include <linux/vgaarb.h>
+#include <linux/pm_runtime.h>
#include "pci.h"
static int sysfs_initialized; /* = 0 */
@@ -378,6 +379,31 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
#endif
+#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+static ssize_t d3cold_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ pdev->d3cold_allowed = !!val;
+ pm_runtime_resume(dev);
+
+ return count;
+}
+
+static ssize_t d3cold_allowed_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ return sprintf (buf, "%u\n", pdev->d3cold_allowed);
+}
+#endif
+
struct device_attribute pci_dev_attrs[] = {
__ATTR_RO(resource),
__ATTR_RO(vendor),
@@ -402,6 +428,9 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR(remove, (S_IWUSR|S_IWGRP), NULL, remove_store),
__ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_rescan_store),
#endif
+#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
+ __ATTR(d3cold_allowed, 0644, d3cold_allowed_show, d3cold_allowed_store),
+#endif
__ATTR_NULL,
};
@@ -1112,7 +1141,7 @@ static struct bin_attribute pcie_config_attr = {
.write = pci_write_config,
};
-int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
+int __weak pcibios_add_platform_entries(struct pci_dev *dev)
{
return 0;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 77cb54a65cde..f3ea977a5b1b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -110,7 +110,7 @@ unsigned char pci_bus_max_busnr(struct pci_bus* bus)
struct list_head *tmp;
unsigned char max, n;
- max = bus->subordinate;
+ max = bus->busn_res.end;
list_for_each(tmp, &bus->children) {
n = pci_bus_max_busnr(pci_bus_b(tmp));
if(n > max)
@@ -136,30 +136,6 @@ void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
EXPORT_SYMBOL_GPL(pci_ioremap_bar);
#endif
-#if 0
-/**
- * pci_max_busnr - returns maximum PCI bus number
- *
- * Returns the highest PCI bus number present in the system global list of
- * PCI buses.
- */
-unsigned char __devinit
-pci_max_busnr(void)
-{
- struct pci_bus *bus = NULL;
- unsigned char max, n;
-
- max = 0;
- while ((bus = pci_find_next_bus(bus)) != NULL) {
- n = pci_bus_max_busnr(bus);
- if(n > max)
- max = n;
- }
- return max;
-}
-
-#endif /* 0 */
-
#define PCI_FIND_CAP_TTL 48
static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
@@ -278,6 +254,38 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
}
/**
+ * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
+ * @dev: PCI device to check
+ *
+ * Like pci_pcie_cap() but also checks that the PCIe capability version is
+ * >= 2. Note that v1 capability structures could be sparse in that not
+ * all register fields were required. v2 requires the entire structure to
+ * be present size wise, while still allowing for non-implemented registers
+ * to exist but they must be hardwired to 0.
+ *
+ * Due to the differences in the versions of capability structures, one
+ * must be careful not to try and access non-existant registers that may
+ * exist in early versions - v1 - of Express devices.
+ *
+ * Returns the offset of the PCIe capability structure as long as the
+ * capability version is >= 2; otherwise 0 is returned.
+ */
+static int pci_pcie_cap2(struct pci_dev *dev)
+{
+ u16 flags;
+ int pos;
+
+ pos = pci_pcie_cap(dev);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2)
+ pos = 0;
+ }
+
+ return pos;
+}
+
+/**
* pci_find_ext_capability - Find an extended capability
* @dev: PCI device to query
* @cap: capability code
@@ -329,49 +337,6 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
-/**
- * pci_bus_find_ext_capability - find an extended capability
- * @bus: the PCI bus to query
- * @devfn: PCI device to query
- * @cap: capability code
- *
- * Like pci_find_ext_capability() but works for pci devices that do not have a
- * pci_dev structure set up yet.
- *
- * Returns the address of the requested capability structure within the
- * device's PCI configuration space or 0 in case the device does not
- * support it.
- */
-int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
- int cap)
-{
- u32 header;
- int ttl;
- int pos = PCI_CFG_SPACE_SIZE;
-
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
-
- if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
- return 0;
- if (header == 0xffffffff || header == 0)
- return 0;
-
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap)
- return pos;
-
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
-
- if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
- break;
- }
-
- return 0;
-}
-
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
@@ -622,7 +587,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
dev_info(&dev->dev, "Refused to change power state, "
"currently in D%d\n", dev->current_state);
- /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ /*
+ * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
@@ -654,6 +620,16 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
if (dev->pm_cap) {
u16 pmcsr;
+ /*
+ * Configuration space is not accessible for device in
+ * D3cold, so just keep or set D3cold for safety
+ */
+ if (dev->current_state == PCI_D3cold)
+ return;
+ if (state == PCI_D3cold) {
+ dev->current_state = PCI_D3cold;
+ return;
+ }
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
} else {
@@ -662,6 +638,19 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
}
/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+ if (platform_pci_power_manageable(dev))
+ platform_pci_set_power_state(dev, PCI_D0);
+
+ pci_raw_set_power_state(dev, PCI_D0);
+ pci_update_current_state(dev, PCI_D0);
+}
+
+/**
* pci_platform_power_transition - Use platform to change device power state
* @dev: PCI device to handle.
* @state: State to put the device into.
@@ -694,8 +683,50 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
*/
static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
{
- if (state == PCI_D0)
+ if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
+ /*
+ * Mandatory power management transition delays, see
+ * PCI Express Base Specification Revision 2.0 Section
+ * 6.6.1: Conventional Reset. Do not delay for
+ * devices powered on/off by corresponding bridge,
+ * because have already delayed for the bridge.
+ */
+ if (dev->runtime_d3cold) {
+ msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+ * D0uninitialized state, resume them to give
+ * them a chance to suspend again
+ */
+ pci_wakeup_bus(dev->subordinate);
+ }
+ }
+}
+
+/**
+ * __pci_dev_set_current_state - Set current state of a PCI device
+ * @dev: Device to handle
+ * @data: pointer to state to be set
+ */
+static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
+{
+ pci_power_t state = *(pci_power_t *)data;
+
+ dev->current_state = state;
+ return 0;
+}
+
+/**
+ * __pci_bus_set_current_state - Walk given bus and set current state of devices
+ * @bus: Top bus of the subtree to walk.
+ * @state: state to be set
+ */
+static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+{
+ if (bus)
+ pci_walk_bus(bus, __pci_dev_set_current_state, &state);
}
/**
@@ -707,8 +738,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
*/
int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
{
- return state >= PCI_D0 ?
- pci_platform_power_transition(dev, state) : -EINVAL;
+ int ret;
+
+ if (state <= PCI_D0)
+ return -EINVAL;
+ ret = pci_platform_power_transition(dev, state);
+ /* Power off the bridge may power off the whole hierarchy */
+ if (!ret && state == PCI_D3cold)
+ __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ return ret;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
@@ -732,8 +770,8 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
int error;
/* bound the state we're entering */
- if (state > PCI_D3hot)
- state = PCI_D3hot;
+ if (state > PCI_D3cold)
+ state = PCI_D3cold;
else if (state < PCI_D0)
state = PCI_D0;
else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
@@ -744,14 +782,23 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
*/
return 0;
+ /* Check if we're already there */
+ if (dev->current_state == state)
+ return 0;
+
__pci_start_power_transition(dev, state);
/* This device is quirked not to be put into D3, so
don't put it in D3 */
- if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
+ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
return 0;
- error = pci_raw_set_power_state(dev, state);
+ /*
+ * To put device in D3cold, we put device into D3hot in native
+ * way, then put device into D3cold with platform ops
+ */
+ error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
+ PCI_D3hot : state);
if (!__pci_complete_power_transition(dev, state))
error = 0;
@@ -822,12 +869,6 @@ EXPORT_SYMBOL(pci_choose_state);
((flags & PCI_EXP_FLAGS_VERS) > 1 || \
(type == PCI_EXP_TYPE_ROOT_PORT || \
type == PCI_EXP_TYPE_RC_EC))
-#define pcie_cap_has_devctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
-#define pcie_cap_has_lnkctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
-#define pcie_cap_has_sltctl2(type, flags) \
- ((flags & PCI_EXP_FLAGS_VERS) > 1)
static struct pci_cap_saved_state *pci_find_saved_cap(
struct pci_dev *pci_dev, char cap)
@@ -870,13 +911,14 @@ static int pci_save_pcie_state(struct pci_dev *dev)
pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
if (pcie_cap_has_rtctl(dev->pcie_type, flags))
pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
- if (pcie_cap_has_devctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
- if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
- if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
- pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
+ pos = pci_pcie_cap2(dev);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
return 0;
}
@@ -903,12 +945,14 @@ static void pci_restore_pcie_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
if (pcie_cap_has_rtctl(dev->pcie_type, flags))
pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
- if (pcie_cap_has_devctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
- if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
- if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
- pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
+
+ pos = pci_pcie_cap2(dev);
+ if (!pos)
+ return;
+
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
+ pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
}
@@ -1349,7 +1393,7 @@ void pcim_pin_device(struct pci_dev *pdev)
* is the default implementation. Architecture implementations can
* override this.
*/
-void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
+void __weak pcibios_disable_device (struct pci_dev *dev) {}
static void do_pci_disable_device(struct pci_dev *dev)
{
@@ -1413,8 +1457,8 @@ pci_disable_device(struct pci_dev *dev)
* Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
-int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
- enum pcie_reset_state state)
+int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state)
{
return -EINVAL;
}
@@ -1498,6 +1542,28 @@ void pci_pme_wakeup_bus(struct pci_bus *bus)
}
/**
+ * pci_wakeup - Wake up a PCI device
+ * @dev: Device to handle.
+ * @ign: ignored parameter
+ */
+static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
+{
+ pci_wakeup_event(pci_dev);
+ pm_request_resume(&pci_dev->dev);
+ return 0;
+}
+
+/**
+ * pci_wakeup_bus - Walk given bus and wake up devices on it
+ * @bus: Top bus of the subtree to walk.
+ */
+void pci_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_wakeup, NULL);
+}
+
+/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
* @state: PCI state from which device will issue PME#.
@@ -1518,6 +1584,16 @@ static void pci_pme_list_scan(struct work_struct *work)
if (!list_empty(&pci_pme_list)) {
list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
if (pme_dev->dev->pme_poll) {
+ struct pci_dev *bridge;
+
+ bridge = pme_dev->dev->bus->self;
+ /*
+ * If bridge is in low power state, the
+ * configuration space of subordinate devices
+ * may be not accessible
+ */
+ if (bridge && bridge->current_state != PCI_D0)
+ continue;
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
@@ -1744,10 +1820,9 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
- /* Some devices mustn't be in D3 during system sleep */
- if (target_state == PCI_D3hot &&
- (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
- return 0;
+ /* D3cold during system suspend/hibernate is not supported */
+ if (target_state > PCI_D3hot)
+ target_state = PCI_D3hot;
pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
@@ -1786,12 +1861,16 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
if (target_state == PCI_POWER_ERROR)
return -EIO;
+ dev->runtime_d3cold = target_state == PCI_D3cold;
+
__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
error = pci_set_power_state(dev, target_state);
- if (error)
+ if (error) {
__pci_enable_wake(dev, target_state, true, false);
+ dev->runtime_d3cold = false;
+ }
return error;
}
@@ -1861,6 +1940,7 @@ void pci_pm_init(struct pci_dev *dev)
dev->pm_cap = pm;
dev->d3_delay = PCI_PM_D3_WAIT;
+ dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -1988,7 +2068,7 @@ void pci_enable_ari(struct pci_dev *dev)
{
int pos;
u32 cap;
- u16 flags, ctrl;
+ u16 ctrl;
struct pci_dev *bridge;
if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
@@ -1999,18 +2079,14 @@ void pci_enable_ari(struct pci_dev *dev)
return;
bridge = dev->bus->self;
- if (!bridge || !pci_is_pcie(bridge))
+ if (!bridge)
return;
- pos = pci_pcie_cap(bridge);
+ /* ARI is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(bridge);
if (!pos)
return;
- /* ARI is a PCIe v2 feature */
- pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
- if ((flags & PCI_EXP_FLAGS_VERS) < 2)
- return;
-
pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_ARI))
return;
@@ -2023,7 +2099,7 @@ void pci_enable_ari(struct pci_dev *dev)
}
/**
- * pci_enable_ido - enable ID-based ordering on a device
+ * pci_enable_ido - enable ID-based Ordering on a device
* @dev: the PCI device
* @type: which types of IDO to enable
*
@@ -2036,7 +2112,8 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type)
int pos;
u16 ctrl;
- pos = pci_pcie_cap(dev);
+ /* ID-based Ordering is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return;
@@ -2059,10 +2136,8 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type)
int pos;
u16 ctrl;
- if (!pci_is_pcie(dev))
- return;
-
- pos = pci_pcie_cap(dev);
+ /* ID-based Ordering is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return;
@@ -2101,10 +2176,8 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
u16 ctrl;
int ret;
- if (!pci_is_pcie(dev))
- return -ENOTSUPP;
-
- pos = pci_pcie_cap(dev);
+ /* OBFF is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return -ENOTSUPP;
@@ -2113,7 +2186,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
return -ENOTSUPP; /* no OBFF support at all */
/* Make sure the topology supports OBFF as well */
- if (dev->bus) {
+ if (dev->bus->self) {
ret = pci_enable_obff(dev->bus->self, type);
if (ret)
return ret;
@@ -2154,10 +2227,8 @@ void pci_disable_obff(struct pci_dev *dev)
int pos;
u16 ctrl;
- if (!pci_is_pcie(dev))
- return;
-
- pos = pci_pcie_cap(dev);
+ /* OBFF is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return;
@@ -2174,15 +2245,13 @@ EXPORT_SYMBOL(pci_disable_obff);
* RETURNS:
* True if @dev supports latency tolerance reporting, false otherwise.
*/
-bool pci_ltr_supported(struct pci_dev *dev)
+static bool pci_ltr_supported(struct pci_dev *dev)
{
int pos;
u32 cap;
- if (!pci_is_pcie(dev))
- return false;
-
- pos = pci_pcie_cap(dev);
+ /* LTR is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return false;
@@ -2190,7 +2259,6 @@ bool pci_ltr_supported(struct pci_dev *dev)
return cap & PCI_EXP_DEVCAP2_LTR;
}
-EXPORT_SYMBOL(pci_ltr_supported);
/**
* pci_enable_ltr - enable latency tolerance reporting
@@ -2211,7 +2279,8 @@ int pci_enable_ltr(struct pci_dev *dev)
if (!pci_ltr_supported(dev))
return -ENOTSUPP;
- pos = pci_pcie_cap(dev);
+ /* LTR is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return -ENOTSUPP;
@@ -2220,7 +2289,7 @@ int pci_enable_ltr(struct pci_dev *dev)
return -EINVAL;
/* Enable upstream ports first */
- if (dev->bus) {
+ if (dev->bus->self) {
ret = pci_enable_ltr(dev->bus->self);
if (ret)
return ret;
@@ -2246,7 +2315,8 @@ void pci_disable_ltr(struct pci_dev *dev)
if (!pci_ltr_supported(dev))
return;
- pos = pci_pcie_cap(dev);
+ /* LTR is a PCIe cap v2 feature */
+ pos = pci_pcie_cap2(dev);
if (!pos)
return;
@@ -2365,6 +2435,75 @@ void pci_enable_acs(struct pci_dev *dev)
}
/**
+ * pci_acs_enabled - test ACS against required flags for a given device
+ * @pdev: device to test
+ * @acs_flags: required PCI ACS flags
+ *
+ * Return true if the device supports the provided flags. Automatically
+ * filters out flags that are not implemented on multifunction devices.
+ */
+bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int pos, ret;
+ u16 ctrl;
+
+ ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
+ if (ret >= 0)
+ return ret > 0;
+
+ if (!pci_is_pcie(pdev))
+ return false;
+
+ /* Filter out flags not applicable to multifunction */
+ if (pdev->multifunction)
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
+ PCI_ACS_EC | PCI_ACS_DT);
+
+ if (pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM ||
+ pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+ pdev->multifunction) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+ if ((ctrl & acs_flags) != acs_flags)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
+ * @start: starting downstream device
+ * @end: ending upstream device or NULL to search to the root bus
+ * @acs_flags: required flags
+ *
+ * Walk up a device tree from start to end testing PCI ACS support. If
+ * any step along the way does not support the required flags, return false.
+ */
+bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags)
+{
+ struct pci_dev *pdev, *parent = start;
+
+ do {
+ pdev = parent;
+
+ if (!pci_acs_enabled(pdev, acs_flags))
+ return false;
+
+ if (pci_is_root_bus(pdev->bus))
+ return (end == NULL);
+
+ parent = pdev->bus->self;
+ } while (pdev != end);
+
+ return true;
+}
+
+/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
* @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
@@ -2671,6 +2810,18 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
}
/**
+ * pcibios_setup - process "pci=" kernel boot arguments
+ * @str: string used to pass in "pci=" kernel boot arguments
+ *
+ * Process kernel boot arguments. This is the default implementation.
+ * Architecture specific implementations can override this as necessary.
+ */
+char * __weak __init pcibios_setup(char *str)
+{
+ return str;
+}
+
+/**
* pcibios_set_master - enable PCI bus-mastering for device dev
* @dev: the PCI device to enable
*
@@ -2881,6 +3032,9 @@ bool pci_intx_mask_supported(struct pci_dev *dev)
bool mask_supported = false;
u16 orig, new;
+ if (dev->broken_intx_masking)
+ return false;
+
pci_cfg_access_lock(dev);
pci_read_config_word(dev, PCI_COMMAND, &orig);
@@ -3400,8 +3554,7 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
if (o != v) {
- if (v > o && dev->bus &&
- (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
+ if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
return -EIO;
cmd &= ~PCI_X_CMD_MAX_READ;
@@ -3856,7 +4009,7 @@ static void __devinit pci_no_domains(void)
* greater than 0xff). This is the default implementation. Architecture
* implementations can override this.
*/
-int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
+int __weak pci_ext_cfg_avail(struct pci_dev *dev)
{
return 1;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index e4943479b234..bacbcba69cf3 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -67,9 +67,11 @@ struct pci_platform_pm_ops {
extern int pci_set_platform_pm(struct pci_platform_pm_ops *ops);
extern void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
+extern void pci_power_up(struct pci_dev *dev);
extern void pci_disable_enabled_device(struct pci_dev *dev);
extern int pci_finish_runtime_suspend(struct pci_dev *dev);
extern int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
+extern void pci_wakeup_bus(struct pci_bus *bus);
extern void pci_pm_init(struct pci_dev *dev);
extern void platform_pci_wakeup_init(struct pci_dev *dev);
extern void pci_allocate_cap_save_buffers(struct pci_dev *dev);
@@ -86,13 +88,6 @@ static inline bool pci_is_bridge(struct pci_dev *pci_dev)
return !!(pci_dev->subordinate);
}
-extern int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
-extern int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
-extern int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
-extern int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
-extern int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
-extern int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
-
struct pci_vpd_ops {
ssize_t (*read)(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
ssize_t (*write)(struct pci_dev *dev, loff_t pos, size_t count, const void *buf);
@@ -124,7 +119,7 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; }
#endif
/* Functions for PCI Hotplug drivers to use */
-extern unsigned int pci_do_scan_bus(struct pci_bus *bus);
+int pci_hp_add_bridge(struct pci_dev *dev);
#ifdef HAVE_PCI_LEGACY
extern void pci_create_legacy_files(struct pci_bus *bus);
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 275bf158ffa7..124f20ff11b2 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -59,7 +59,7 @@ static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
if (p->flags & ACPI_HEST_GLOBAL) {
- if ((info->pci_dev->is_pcie &&
+ if ((pci_is_pcie(info->pci_dev) &&
info->pci_dev->pcie_type == pcie_type) || bridge)
ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
} else
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index e0610bda1dea..3a7eefcb270a 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/init.h>
#include <linux/pcieport_if.h>
#include <linux/aer.h>
@@ -99,6 +100,51 @@ static int pcie_port_resume_noirq(struct device *dev)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
+struct d3cold_info {
+ bool no_d3cold;
+ unsigned int d3cold_delay;
+};
+
+static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
+{
+ struct d3cold_info *info = data;
+
+ info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
+ info->d3cold_delay);
+ if (pdev->no_d3cold)
+ info->no_d3cold = true;
+ return 0;
+}
+
+static int pcie_port_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct d3cold_info d3cold_info = {
+ .no_d3cold = false,
+ .d3cold_delay = PCI_PM_D3_WAIT,
+ };
+
+ /*
+ * If any subordinate device disable D3cold, we should not put
+ * the port into D3cold. The D3cold delay of port should be
+ * the max of that of all subordinate devices.
+ */
+ pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
+ pdev->no_d3cold = d3cold_info.no_d3cold;
+ pdev->d3cold_delay = d3cold_info.d3cold_delay;
+ return 0;
+}
+
+static int pcie_port_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+#else
+#define pcie_port_runtime_suspend NULL
+#define pcie_port_runtime_resume NULL
+#endif
+
static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.suspend = pcie_port_device_suspend,
.resume = pcie_port_device_resume,
@@ -107,6 +153,8 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.poweroff = pcie_port_device_suspend,
.restore = pcie_port_device_resume,
.resume_noirq = pcie_port_resume_noirq,
+ .runtime_suspend = pcie_port_runtime_suspend,
+ .runtime_resume = pcie_port_runtime_resume,
};
#define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops)
@@ -117,6 +165,14 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
#endif /* !PM */
/*
+ * PCIe port runtime suspend is broken for some chipsets, so use a
+ * black list to disable runtime PM for these chipsets.
+ */
+static const struct pci_device_id port_runtime_pm_black_list[] = {
+ { /* end: all zeroes */ }
+};
+
+/*
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
*
@@ -144,12 +200,16 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
return status;
pci_save_state(dev);
+ if (!pci_match_id(port_runtime_pm_black_list, dev))
+ pm_runtime_put_noidle(&dev->dev);
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
+ if (!pci_match_id(port_runtime_pm_black_list, dev))
+ pm_runtime_get_noresume(&dev->dev);
pcie_port_device_remove(dev);
pci_disable_device(dev);
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 658ac977cb56..6c143b4497ca 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -16,10 +16,47 @@
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
#define CARDBUS_RESERVE_BUSNR 3
+struct resource busn_resource = {
+ .name = "PCI busn",
+ .start = 0,
+ .end = 255,
+ .flags = IORESOURCE_BUS,
+};
+
/* Ugh. Need to stop exporting this to modules. */
LIST_HEAD(pci_root_buses);
EXPORT_SYMBOL(pci_root_buses);
+static LIST_HEAD(pci_domain_busn_res_list);
+
+struct pci_domain_busn_res {
+ struct list_head list;
+ struct resource res;
+ int domain_nr;
+};
+
+static struct resource *get_pci_domain_busn_res(int domain_nr)
+{
+ struct pci_domain_busn_res *r;
+
+ list_for_each_entry(r, &pci_domain_busn_res_list, list)
+ if (r->domain_nr == domain_nr)
+ return &r->res;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->domain_nr = domain_nr;
+ r->res.start = 0;
+ r->res.end = 0xff;
+ r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
+
+ list_add_tail(&r->list, &pci_domain_busn_res_list);
+
+ return &r->res;
+}
+
static int find_anything(struct device *dev, void *data)
{
return 1;
@@ -152,9 +189,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
- if (!dev->mmio_always_on)
- pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
-
/*
* All bits set in sz means the device isn't working properly.
* If the BAR isn't implemented, all bits must be 0. If it's a
@@ -239,6 +273,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
}
out:
+ if (!dev->mmio_always_on)
+ pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
+
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
fail:
res->flags = 0;
@@ -269,34 +306,38 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child)
{
struct pci_dev *dev = child->self;
u8 io_base_lo, io_limit_lo;
- unsigned long base, limit;
+ unsigned long io_mask, io_granularity, base, limit;
struct pci_bus_region region;
- struct resource *res, res2;
+ struct resource *res;
+
+ io_mask = PCI_IO_RANGE_MASK;
+ io_granularity = 0x1000;
+ if (dev->io_window_1k) {
+ /* Support 1K I/O space granularity */
+ io_mask = PCI_IO_1K_RANGE_MASK;
+ io_granularity = 0x400;
+ }
res = child->resource[0];
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
- base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
- limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
+ base = (io_base_lo & io_mask) << 8;
+ limit = (io_limit_lo & io_mask) << 8;
if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
u16 io_base_hi, io_limit_hi;
+
pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
- base |= (io_base_hi << 16);
- limit |= (io_limit_hi << 16);
+ base |= ((unsigned long) io_base_hi << 16);
+ limit |= ((unsigned long) io_limit_hi << 16);
}
- if (base && base <= limit) {
+ if (base <= limit) {
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
- res2.flags = res->flags;
region.start = base;
- region.end = limit + 0xfff;
- pcibios_bus_to_resource(dev, &res2, &region);
- if (!res->start)
- res->start = res2.start;
- if (!res->end)
- res->end = res2.end;
+ region.end = limit + io_granularity - 1;
+ pcibios_bus_to_resource(dev, res, &region);
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
}
}
@@ -312,9 +353,9 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
res = child->resource[1];
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
- if (base && base <= limit) {
+ base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ if (base <= limit) {
res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
region.start = base;
region.end = limit + 0xfffff;
@@ -334,11 +375,12 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
res = child->resource[2];
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
- base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
- limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+ base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+ limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
u32 mem_base_hi, mem_limit_hi;
+
pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
@@ -349,8 +391,8 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
*/
if (mem_base_hi <= mem_limit_hi) {
#if BITS_PER_LONG == 64
- base |= ((long) mem_base_hi) << 32;
- limit |= ((long) mem_limit_hi) << 32;
+ base |= ((unsigned long) mem_base_hi) << 32;
+ limit |= ((unsigned long) mem_limit_hi) << 32;
#else
if (mem_base_hi || mem_limit_hi) {
dev_err(&dev->dev, "can't handle 64-bit "
@@ -360,7 +402,7 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
#endif
}
}
- if (base && base <= limit) {
+ if (base <= limit) {
res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
IORESOURCE_MEM | IORESOURCE_PREFETCH;
if (res->flags & PCI_PREF_RANGE_TYPE_64)
@@ -381,8 +423,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
return;
- dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
- child->secondary, child->subordinate,
+ dev_info(&dev->dev, "PCI bridge to %pR%s\n",
+ &child->busn_res,
dev->transparent ? " (subtractive decode)" : "");
pci_bus_remove_resources(child);
@@ -599,9 +641,9 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
* Set up the primary, secondary and subordinate
* bus numbers.
*/
- child->number = child->secondary = busnr;
- child->primary = parent->secondary;
- child->subordinate = 0xff;
+ child->number = child->busn_res.start = busnr;
+ child->primary = parent->busn_res.start;
+ child->busn_res.end = 0xff;
if (!bridge)
return child;
@@ -643,8 +685,8 @@ static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
if (!pcibios_assign_all_busses())
return;
- while (parent->parent && parent->subordinate < max) {
- parent->subordinate = max;
+ while (parent->parent && parent->busn_res.end < max) {
+ parent->busn_res.end = max;
pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
parent = parent->parent;
}
@@ -718,15 +760,15 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
if (!child)
goto out;
child->primary = primary;
- child->subordinate = subordinate;
+ pci_bus_insert_busn_res(child, secondary, subordinate);
child->bridge_ctl = bctl;
}
cmax = pci_scan_child_bus(child);
if (cmax > max)
max = cmax;
- if (child->subordinate > max)
- max = child->subordinate;
+ if (child->busn_res.end > max)
+ max = child->busn_res.end;
} else {
/*
* We need to assign a number to this bus which we always
@@ -756,11 +798,12 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
child = pci_add_new_bus(bus, dev, ++max);
if (!child)
goto out;
+ pci_bus_insert_busn_res(child, max, 0xff);
}
buses = (buses & 0xff000000)
| ((unsigned int)(child->primary) << 0)
- | ((unsigned int)(child->secondary) << 8)
- | ((unsigned int)(child->subordinate) << 16);
+ | ((unsigned int)(child->busn_res.start) << 8)
+ | ((unsigned int)(child->busn_res.end) << 16);
/*
* yenta.c forces a secondary latency timer of 176.
@@ -805,8 +848,8 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
break;
while (parent->parent) {
if ((!pcibios_assign_all_busses()) &&
- (parent->subordinate > max) &&
- (parent->subordinate <= max+i)) {
+ (parent->busn_res.end > max) &&
+ (parent->busn_res.end <= max+i)) {
j = 1;
}
parent = parent->parent;
@@ -827,7 +870,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
/*
* Set the subordinate bus number to its real value.
*/
- child->subordinate = max;
+ pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
@@ -837,19 +880,19 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
/* Has only triggered on CardBus, fixup is in yenta_socket */
while (bus->parent) {
- if ((child->subordinate > bus->subordinate) ||
- (child->number > bus->subordinate) ||
+ if ((child->busn_res.end > bus->busn_res.end) ||
+ (child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
- (child->subordinate < bus->number)) {
- dev_info(&child->dev, "[bus %02x-%02x] %s "
- "hidden behind%s bridge %s [bus %02x-%02x]\n",
- child->number, child->subordinate,
- (bus->number > child->subordinate &&
- bus->subordinate < child->number) ?
+ (child->busn_res.end < bus->number)) {
+ dev_info(&child->dev, "%pR %s "
+ "hidden behind%s bridge %s %pR\n",
+ &child->busn_res,
+ (bus->number > child->busn_res.end &&
+ bus->busn_res.end < child->number) ?
"wholly" : "partially",
bus->self->transparent ? " transparent" : "",
dev_name(&bus->dev),
- bus->number, bus->subordinate);
+ &bus->busn_res);
}
bus = bus->parent;
}
@@ -1548,7 +1591,7 @@ EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
{
- unsigned int devfn, pass, max = bus->secondary;
+ unsigned int devfn, pass, max = bus->busn_res.start;
struct pci_dev *dev;
dev_dbg(&bus->dev, "scanning bus\n");
@@ -1642,7 +1685,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(b);
- b->number = b->secondary = bus;
+ b->number = b->busn_res.start = bus;
if (parent)
dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
@@ -1654,7 +1697,10 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
list_move_tail(&window->list, &bridge->windows);
res = window->res;
offset = window->offset;
- pci_bus_add_resource(b, res, 0);
+ if (res->flags & IORESOURCE_BUS)
+ pci_bus_insert_busn_res(b, bus, res->end);
+ else
+ pci_bus_add_resource(b, res, 0);
if (offset) {
if (resource_type(res) == IORESOURCE_IO)
fmt = " (bus address [%#06llx-%#06llx])";
@@ -1684,16 +1730,104 @@ err_out:
return NULL;
}
+int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource *parent_res, *conflict;
+
+ res->start = bus;
+ res->end = bus_max;
+ res->flags = IORESOURCE_BUS;
+
+ if (!pci_is_root_bus(b))
+ parent_res = &b->parent->busn_res;
+ else {
+ parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
+ res->flags |= IORESOURCE_PCI_FIXED;
+ }
+
+ conflict = insert_resource_conflict(parent_res, res);
+
+ if (conflict)
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
+ res, pci_is_root_bus(b) ? "domain " : "",
+ parent_res, conflict->name, conflict);
+ else
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR is inserted under %s%pR\n",
+ res, pci_is_root_bus(b) ? "domain " : "",
+ parent_res);
+
+ return conflict == NULL;
+}
+
+int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
+{
+ struct resource *res = &b->busn_res;
+ struct resource old_res = *res;
+ resource_size_t size;
+ int ret;
+
+ if (res->start > bus_max)
+ return -EINVAL;
+
+ size = bus_max - res->start + 1;
+ ret = adjust_resource(res, res->start, size);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR end %s updated to %02x\n",
+ &old_res, ret ? "can not be" : "is", bus_max);
+
+ if (!ret && !res->parent)
+ pci_bus_insert_busn_res(b, res->start, res->end);
+
+ return ret;
+}
+
+void pci_bus_release_busn_res(struct pci_bus *b)
+{
+ struct resource *res = &b->busn_res;
+ int ret;
+
+ if (!res->flags || !res->parent)
+ return;
+
+ ret = release_resource(res);
+ dev_printk(KERN_DEBUG, &b->dev,
+ "busn_res: %pR %s released\n",
+ res, ret ? "can not be" : "is");
+}
+
struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
+ struct pci_host_bridge_window *window;
+ bool found = false;
struct pci_bus *b;
+ int max;
+
+ list_for_each_entry(window, resources, list)
+ if (window->res->flags & IORESOURCE_BUS) {
+ found = true;
+ break;
+ }
b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
if (!b)
return NULL;
- b->subordinate = pci_scan_child_bus(b);
+ if (!found) {
+ dev_info(&b->dev,
+ "No busn resource found for root bus, will use [bus %02x-ff]\n",
+ bus);
+ pci_bus_insert_busn_res(b, bus, 255);
+ }
+
+ max = pci_scan_child_bus(b);
+
+ if (!found)
+ pci_bus_update_busn_res_end(b, max);
+
pci_bus_add_devices(b);
return b;
}
@@ -1708,9 +1842,10 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
pci_add_resource(&resources, &ioport_resource);
pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
if (b)
- b->subordinate = pci_scan_child_bus(b);
+ pci_scan_child_bus(b);
else
pci_free_resource_list(&resources);
return b;
@@ -1725,9 +1860,10 @@ struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
pci_add_resource(&resources, &ioport_resource);
pci_add_resource(&resources, &iomem_resource);
+ pci_add_resource(&resources, &busn_resource);
b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
if (b) {
- b->subordinate = pci_scan_child_bus(b);
+ pci_scan_child_bus(b);
pci_bus_add_devices(b);
} else {
pci_free_resource_list(&resources);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 194b243a2817..51553179e967 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -253,7 +253,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx)
* workaround applied too
* [Info kindly provided by ALi]
*/
-static void __init quirk_alimagik(struct pci_dev *dev)
+static void __devinit quirk_alimagik(struct pci_dev *dev)
{
if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) {
dev_info(&dev->dev, "Limiting direct PCI/PCI transfers\n");
@@ -789,7 +789,7 @@ static void __devinit quirk_amd_ioapic(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
-static void __init quirk_ioapic_rmw(struct pci_dev *dev)
+static void __devinit quirk_ioapic_rmw(struct pci_dev *dev)
{
if (dev->devfn == 0 && dev->bus->number == 0)
sis_apic_bug = 1;
@@ -801,7 +801,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw);
* Some settings of MMRBC can lead to data corruption so block changes.
* See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide
*/
-static void __init quirk_amd_8131_mmrbc(struct pci_dev *dev)
+static void __devinit quirk_amd_8131_mmrbc(struct pci_dev *dev)
{
if (dev->subordinate && dev->revision <= 0x12) {
dev_info(&dev->dev, "AMD8131 rev %x detected; "
@@ -1039,7 +1039,7 @@ static void quirk_disable_pxb(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
-static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev)
+static void quirk_amd_ide_mode(struct pci_dev *pdev)
{
/* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */
u8 tmp;
@@ -1082,7 +1082,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB
/*
* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same
*/
-static void __init quirk_ide_samemode(struct pci_dev *pdev)
+static void __devinit quirk_ide_samemode(struct pci_dev *pdev)
{
u8 prog;
@@ -1121,7 +1121,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
/* This was originally an Alpha specific thing, but it really fits here.
* The i82375 PCI/EISA bridge appears as non-classified. Fix that.
*/
-static void __init quirk_eisa_bridge(struct pci_dev *dev)
+static void __devinit quirk_eisa_bridge(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_EISA << 8;
}
@@ -1155,7 +1155,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_e
*/
static int asus_hides_smbus;
-static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
+static void __devinit asus_hides_smbus_hostbridge(struct pci_dev *dev)
{
if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
@@ -1538,7 +1538,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB3
#endif
#ifdef CONFIG_X86_IO_APIC
-static void __init quirk_alder_ioapic(struct pci_dev *pdev)
+static void __devinit quirk_alder_ioapic(struct pci_dev *pdev)
{
int i;
@@ -1777,7 +1777,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, qui
* but the PIO transfers won't work if BAR0 falls at the odd 8 bytes.
* Re-allocate the region if needed...
*/
-static void __init quirk_tc86c001_ide(struct pci_dev *dev)
+static void __devinit quirk_tc86c001_ide(struct pci_dev *dev)
{
struct resource *r = &dev->resource[0];
@@ -1938,53 +1938,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1
static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev)
{
u16 en1k;
- u8 io_base_lo, io_limit_lo;
- unsigned long base, limit;
- struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
pci_read_config_word(dev, 0x40, &en1k);
if (en1k & 0x200) {
dev_info(&dev->dev, "Enable I/O Space to 1KB granularity\n");
-
- pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
- pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
- base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
- limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8;
-
- if (base <= limit) {
- res->start = base;
- res->end = limit + 0x3ff;
- }
+ dev->io_window_1k = 1;
}
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
-/* Fix the IOBL_ADR for 1k I/O space granularity on the Intel P64H2
- * The IOBL_ADR gets re-written to 4k boundaries in pci_setup_bridge()
- * in drivers/pci/setup-bus.c
- */
-static void __devinit quirk_p64h2_1k_io_fix_iobl(struct pci_dev *dev)
-{
- u16 en1k, iobl_adr, iobl_adr_1k;
- struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES;
-
- pci_read_config_word(dev, 0x40, &en1k);
-
- if (en1k & 0x200) {
- pci_read_config_word(dev, PCI_IO_BASE, &iobl_adr);
-
- iobl_adr_1k = iobl_adr | (res->start >> 8) | (res->end & 0xfc00);
-
- if (iobl_adr != iobl_adr_1k) {
- dev_info(&dev->dev, "Fixing P64H2 IOBL_ADR from 0x%x to 0x%x for 1KB granularity\n",
- iobl_adr,iobl_adr_1k);
- pci_write_config_word(dev, PCI_IO_BASE, iobl_adr_1k);
- }
- }
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io_fix_iobl);
-
/* Under some circumstances, AER is not linked with extended capabilities.
* Force it to be linked by setting the corresponding control bit in the
* config space.
@@ -2104,7 +2067,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
PCI_DEVICE_ID_NX2_5709S,
quirk_brcm_570x_limit_vpd);
-static void __devinit quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
+static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
{
u32 rev;
@@ -2143,9 +2106,9 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
quirk_unhide_mch_dev6);
-#ifdef CONFIG_TILE
+#ifdef CONFIG_TILEPRO
/*
- * The Tilera TILEmpower platform needs to set the link speed
+ * The Tilera TILEmpower tilepro platform needs to set the link speed
* to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
* setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
* capability register of the PEX8624 PCIe switch. The switch
@@ -2160,7 +2123,7 @@ static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
-#endif /* CONFIG_TILE */
+#endif /* CONFIG_TILEPRO */
#ifdef CONFIG_PCI_MSI
/* Some chipsets do not support MSI. We cannot easily rely on setting
@@ -2169,7 +2132,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
* aware of it. Instead of setting the flag on all busses in the
* machine, simply disable MSI globally.
*/
-static void __init quirk_disable_all_msi(struct pci_dev *dev)
+static void __devinit quirk_disable_all_msi(struct pci_dev *dev)
{
pci_no_msi();
dev_warn(&dev->dev, "MSI quirk detected; MSI disabled\n");
@@ -2217,7 +2180,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
-static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
+static int msi_ht_cap_enabled(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2241,7 +2204,7 @@ static int __devinit msi_ht_cap_enabled(struct pci_dev *dev)
}
/* Check the hypertransport MSI mapping to know whether MSI is enabled or not */
-static void __devinit quirk_msi_ht_cap(struct pci_dev *dev)
+static void quirk_msi_ht_cap(struct pci_dev *dev)
{
if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
dev_warn(&dev->dev, "MSI quirk detected; "
@@ -2255,7 +2218,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2
/* The nVidia CK804 chipset may have 2 HT MSI mappings.
* MSI are supported if the MSI capability set in any of these mappings.
*/
-static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
+static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
{
struct pci_dev *pdev;
@@ -2279,7 +2242,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
quirk_nvidia_ck804_msi_ht_cap);
/* Force enable MSI mapping capability on HT bridges */
-static void __devinit ht_enable_msi_mapping(struct pci_dev *dev)
+static void ht_enable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2359,7 +2322,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
nvbridge_check_legacy_irq_routing);
-static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
+static int ht_check_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
int found = 0;
@@ -2387,7 +2350,7 @@ static int __devinit ht_check_msi_mapping(struct pci_dev *dev)
return found;
}
-static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
+static int host_bridge_with_leaf(struct pci_dev *host_bridge)
{
struct pci_dev *dev;
int pos;
@@ -2421,7 +2384,7 @@ static int __devinit host_bridge_with_leaf(struct pci_dev *host_bridge)
#define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */
#define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */
-static int __devinit is_end_of_ht_chain(struct pci_dev *dev)
+static int is_end_of_ht_chain(struct pci_dev *dev)
{
int pos, ctrl_off;
int end = 0;
@@ -2445,7 +2408,7 @@ out:
return end;
}
-static void __devinit nv_ht_enable_msi_mapping(struct pci_dev *dev)
+static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
{
struct pci_dev *host_bridge;
int pos;
@@ -2484,7 +2447,7 @@ out:
pci_dev_put(host_bridge);
}
-static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
+static void ht_disable_msi_mapping(struct pci_dev *dev)
{
int pos, ttl = 48;
@@ -2504,7 +2467,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
}
}
-static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
+static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
{
struct pci_dev *host_bridge;
int pos;
@@ -2541,23 +2504,26 @@ static void __devinit __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
else
nv_ht_enable_msi_mapping(dev);
}
- return;
+ goto out;
}
/* HT MSI is not enabled */
if (found == 1)
- return;
+ goto out;
/* Host bridge is not to HT, disable HT MSI mapping on this device */
ht_disable_msi_mapping(dev);
+
+out:
+ pci_dev_put(host_bridge);
}
-static void __devinit nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
+static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 1);
}
-static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
+static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
{
return __nv_msi_ht_cap_quirk(dev, 0);
}
@@ -2879,20 +2845,34 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
-static void do_one_fixup_debug(void (*fn)(struct pci_dev *dev), struct pci_dev *dev)
+static ktime_t fixup_debug_start(struct pci_dev *dev,
+ void (*fn)(struct pci_dev *dev))
+{
+ ktime_t calltime = ktime_set(0, 0);
+
+ dev_dbg(&dev->dev, "calling %pF\n", fn);
+ if (initcall_debug) {
+ pr_debug("calling %pF @ %i for %s\n",
+ fn, task_pid_nr(current), dev_name(&dev->dev));
+ calltime = ktime_get();
+ }
+
+ return calltime;
+}
+
+static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
+ void (*fn)(struct pci_dev *dev))
{
- ktime_t calltime, delta, rettime;
+ ktime_t delta, rettime;
unsigned long long duration;
- printk(KERN_DEBUG "calling %pF @ %i for %s\n",
- fn, task_pid_nr(current), dev_name(&dev->dev));
- calltime = ktime_get();
- fn(dev);
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "pci fixup %pF returned after %lld usecs for %s\n",
- fn, duration, dev_name(&dev->dev));
+ if (initcall_debug) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ pr_debug("pci fixup %pF returned after %lld usecs for %s\n",
+ fn, duration, dev_name(&dev->dev));
+ }
}
/*
@@ -2930,34 +2910,24 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
/*
- * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
- * ASUS motherboards will cause memory corruption or a system crash
- * if they are in D3 while the system is put into S3 sleep.
+ * Some devices may pass our check in pci_intx_mask_supported if
+ * PCI_COMMAND_INTX_DISABLE works though they actually do not properly
+ * support this feature.
*/
-static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
+static void __devinit quirk_broken_intx_masking(struct pci_dev *dev)
{
- const char *sys_info;
- static const char good_Asus_board[] = "P8Z68-V";
-
- if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
- return;
- if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
- return;
- sys_info = dmi_get_system_info(DMI_BOARD_NAME);
- if (sys_info && memcmp(sys_info, good_Asus_board,
- sizeof(good_Asus_board) - 1) == 0)
- return;
-
- dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
- dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
- device_set_wakeup_capable(&dev->dev, false);
+ dev->broken_intx_masking = 1;
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, 0x0030,
+ quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */
+ quirk_broken_intx_masking);
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
+ ktime_t calltime;
+
for (; f < end; f++)
if ((f->class == (u32) (dev->class >> f->class_shift) ||
f->class == (u32) PCI_ANY_ID) &&
@@ -2965,11 +2935,9 @@ static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
f->vendor == (u16) PCI_ANY_ID) &&
(f->device == dev->device ||
f->device == (u16) PCI_ANY_ID)) {
- dev_dbg(&dev->dev, "calling %pF\n", f->hook);
- if (initcall_debug)
- do_one_fixup_debug(f->hook, dev);
- else
- f->hook(dev);
+ calltime = fixup_debug_start(dev, f->hook);
+ f->hook(dev);
+ fixup_debug_report(dev, calltime, f->hook);
}
}
@@ -2988,6 +2956,7 @@ extern struct pci_fixup __end_pci_fixups_resume_early[];
extern struct pci_fixup __start_pci_fixups_suspend[];
extern struct pci_fixup __end_pci_fixups_suspend[];
+static bool pci_apply_fixup_final_quirks;
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
{
@@ -3005,6 +2974,8 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
break;
case pci_fixup_final:
+ if (!pci_apply_fixup_final_quirks)
+ return;
start = __start_pci_fixups_final;
end = __end_pci_fixups_final;
break;
@@ -3037,6 +3008,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_fixup_device);
+
static int __init pci_apply_final_quirks(void)
{
struct pci_dev *dev = NULL;
@@ -3047,6 +3019,7 @@ static int __init pci_apply_final_quirks(void)
printk(KERN_DEBUG "PCI: CLS %u bytes\n",
pci_cache_line_size << 2);
+ pci_apply_fixup_final_quirks = true;
for_each_pci_dev(dev) {
pci_fixup_device(pci_fixup_final, dev);
/*
@@ -3067,6 +3040,7 @@ static int __init pci_apply_final_quirks(void)
pci_cache_line_size = pci_dfl_cache_line_size;
}
}
+
if (!pci_cache_line_size) {
printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
cls << 2, pci_dfl_cache_line_size << 2);
@@ -3205,3 +3179,87 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY;
}
+
+static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
+{
+ if (!PCI_FUNC(dev->devfn))
+ return pci_dev_get(dev);
+
+ return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+}
+
+static const struct pci_dev_dma_source {
+ u16 vendor;
+ u16 device;
+ struct pci_dev *(*dma_source)(struct pci_dev *dev);
+} pci_dev_dma_source[] = {
+ /*
+ * https://bugzilla.redhat.com/show_bug.cgi?id=605888
+ *
+ * Some Ricoh devices use the function 0 source ID for DMA on
+ * other functions of a multifunction device. The DMA devices
+ * is therefore function 0, which will have implications of the
+ * iommu grouping of these devices.
+ */
+ { PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
+ { PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
+ { 0 }
+};
+
+/*
+ * IOMMUs with isolation capabilities need to be programmed with the
+ * correct source ID of a device. In most cases, the source ID matches
+ * the device doing the DMA, but sometimes hardware is broken and will
+ * tag the DMA as being sourced from a different device. This function
+ * allows that translation. Note that the reference count of the
+ * returned device is incremented on all paths.
+ */
+struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
+{
+ const struct pci_dev_dma_source *i;
+
+ for (i = pci_dev_dma_source; i->dma_source; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID))
+ return i->dma_source(dev);
+ }
+
+ return pci_dev_get(dev);
+}
+
+static const struct pci_dev_acs_enabled {
+ u16 vendor;
+ u16 device;
+ int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
+} pci_dev_acs_enabled[] = {
+ { 0 }
+};
+
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
+{
+ const struct pci_dev_acs_enabled *i;
+ int ret;
+
+ /*
+ * Allow devices that do not expose standard PCIe ACS capabilities
+ * or control to indicate their support here. Multi-function express
+ * devices which do not allow internal peer-to-peer between functions,
+ * but do not implement PCIe ACS may wish to return true here.
+ */
+ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
+ if ((i->vendor == dev->vendor ||
+ i->vendor == (u16)PCI_ANY_ID) &&
+ (i->device == dev->device ||
+ i->device == (u16)PCI_ANY_ID)) {
+ ret = i->acs_enabled(dev, acs_flags);
+ if (ret >= 0)
+ return ret;
+ }
+ }
+
+ return -ENOTTY;
+}
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index fd77e2bde2e8..04a4861b4749 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -68,6 +68,7 @@ void pci_remove_bus(struct pci_bus *pci_bus)
down_write(&pci_bus_sem);
list_del(&pci_bus->node);
+ pci_bus_release_busn_res(pci_bus);
up_write(&pci_bus_sem);
if (!pci_bus->is_added)
return;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 9d75dc8ca602..993d4a0a2469 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -15,6 +15,8 @@
#include "pci.h"
DECLARE_RWSEM(pci_bus_sem);
+EXPORT_SYMBOL_GPL(pci_bus_sem);
+
/*
* find the upstream PCIe-to-PCI bridge of a PCI device
* if the device is PCIE, return NULL
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 8fa2d4be88de..fb506137aaee 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -265,7 +265,7 @@ out:
* assign_requested_resources_sorted() - satisfy resource requests
*
* @head : head of the list tracking requests for resources
- * @failed_list : head of the list tracking requests that could
+ * @fail_head : head of the list tracking requests that could
* not be allocated
*
* Satisfy resource requests of each element in the list. Add
@@ -308,7 +308,7 @@ static void __assign_resources_sorted(struct list_head *head,
* Should not assign requested resources at first.
* they could be adjacent, so later reassign can not reallocate
* them one by one in parent resource window.
- * Try to assign requested + add_size at begining
+ * Try to assign requested + add_size at beginning
* if could do that, could get out early.
* if could not do that, we still try to assign requested at first,
* then try to reassign add_size for some resources.
@@ -404,8 +404,8 @@ void pci_setup_cardbus(struct pci_bus *bus)
struct resource *res;
struct pci_bus_region region;
- dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
- bus->secondary, bus->subordinate);
+ dev_info(&bridge->dev, "CardBus bridge to %pR\n",
+ &bus->busn_res);
res = bus->resource[0];
pcibios_resource_to_bus(bridge, &region, res);
@@ -469,16 +469,23 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
+ unsigned long io_mask;
+ u8 io_base_lo, io_limit_lo;
u32 l, io_upper16;
+ io_mask = PCI_IO_RANGE_MASK;
+ if (bridge->io_window_1k)
+ io_mask = PCI_IO_1K_RANGE_MASK;
+
/* Set up the top and bottom of the PCI I/O segment for this bus. */
res = bus->resource[0];
pcibios_resource_to_bus(bridge, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_dword(bridge, PCI_IO_BASE, &l);
l &= 0xffff0000;
- l |= (region.start >> 8) & 0x00f0;
- l |= region.end & 0xf000;
+ io_base_lo = (region.start >> 8) & io_mask;
+ io_limit_lo = (region.end >> 8) & io_mask;
+ l |= ((u32) io_limit_lo << 8) | io_base_lo;
/* Set up upper 16 bits of I/O base/limit. */
io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
dev_info(&bridge->dev, " bridge window %pR\n", res);
@@ -553,8 +560,8 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
{
struct pci_dev *bridge = bus->self;
- dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
- bus->secondary, bus->subordinate);
+ dev_info(&bridge->dev, "PCI bridge to %pR\n",
+ &bus->busn_res);
if (type & IORESOURCE_IO)
pci_setup_bridge_io(bus);
@@ -699,7 +706,7 @@ static resource_size_t calculate_memsize(resource_size_t size,
* @realloc_head : track the additional io window on this list
*
* Sizing the IO windows of the PCI-PCI bridge is trivial,
- * since these windows have 4K granularity and the IO ranges
+ * since these windows have 1K or 4K granularity and the IO ranges
* of non-bridge PCI devices are limited to 256 bytes.
* We must be careful with the ISA aliasing though.
*/
@@ -710,10 +717,17 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
unsigned long size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
+ resource_size_t min_align = 4096, align;
if (!b_res)
return;
+ /*
+ * Per spec, I/O windows are 4K-aligned, but some bridges have an
+ * extension to support 1K alignment.
+ */
+ if (bus->self->io_window_1k)
+ min_align = 1024;
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -731,34 +745,43 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
else
size1 += r_size;
+ align = pci_resource_alignment(dev, r);
+ if (align > min_align)
+ min_align = align;
+
if (realloc_head)
children_add_size += get_res_add_size(realloc_head, r);
}
}
+
+ if (min_align > 4096)
+ min_align = 4096;
+
size0 = calculate_iosize(size, min_size, size1,
- resource_size(b_res), 4096);
+ resource_size(b_res), min_align);
if (children_add_size > add_size)
add_size = children_add_size;
size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
calculate_iosize(size, min_size, add_size + size1,
- resource_size(b_res), 4096);
+ resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to [bus %02x-%02x] (unused)\n", b_res,
- bus->secondary, bus->subordinate);
+ "%pR to %pR (unused)\n", b_res,
+ &bus->busn_res);
b_res->flags = 0;
return;
}
- /* Alignment of the IO window is always 4K */
- b_res->start = 4096;
+
+ b_res->start = min_align;
b_res->end = b_res->start + size0 - 1;
b_res->flags |= IORESOURCE_STARTALIGN;
if (size1 > size0 && realloc_head) {
- add_to_list(realloc_head, bus->self, b_res, size1-size0, 4096);
+ add_to_list(realloc_head, bus->self, b_res, size1-size0,
+ min_align);
dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to [bus %02x-%02x] add_size %lx\n", b_res,
- bus->secondary, bus->subordinate, size1-size0);
+ "%pR to %pR add_size %lx\n", b_res,
+ &bus->busn_res, size1-size0);
}
}
@@ -863,8 +886,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (!size0 && !size1) {
if (b_res->start || b_res->end)
dev_info(&bus->self->dev, "disabling bridge window "
- "%pR to [bus %02x-%02x] (unused)\n", b_res,
- bus->secondary, bus->subordinate);
+ "%pR to %pR (unused)\n", b_res,
+ &bus->busn_res);
b_res->flags = 0;
return 1;
}
@@ -874,8 +897,8 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
if (size1 > size0 && realloc_head) {
add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align);
dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to [bus %02x-%02x] add_size %llx\n", b_res,
- bus->secondary, bus->subordinate, (unsigned long long)size1-size0);
+ "%pR to %pR add_size %llx\n", b_res,
+ &bus->busn_res, (unsigned long long)size1-size0);
}
return 1;
}
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index eea85dafc763..81b88bda7930 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -30,6 +30,8 @@
void pci_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
+ bool disable;
+ u16 cmd;
u32 new, check, mask;
int reg;
enum pci_bar_type type;
@@ -67,6 +69,18 @@ void pci_update_resource(struct pci_dev *dev, int resno)
new |= PCI_ROM_ADDRESS_ENABLE;
}
+ /*
+ * We can't update a 64-bit BAR atomically, so when possible,
+ * disable decoding so that a half-updated BAR won't conflict
+ * with another device.
+ */
+ disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on;
+ if (disable) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+ }
+
pci_write_config_dword(dev, reg, new);
pci_read_config_dword(dev, reg, &check);
@@ -84,6 +98,10 @@ void pci_update_resource(struct pci_dev *dev, int resno)
"(high %#08x != %#08x)\n", resno, new, check);
}
}
+
+ if (disable)
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
res->flags &= ~IORESOURCE_UNSET;
dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
resno, res, (unsigned long long)region.start,
@@ -127,33 +145,6 @@ void pci_disable_bridge_window(struct pci_dev *dev)
pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff);
}
-static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
- int resno, resource_size_t size, resource_size_t align)
-{
- struct resource *res = dev->resource + resno;
- resource_size_t min;
- int ret;
-
- min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
-
- /* First, try exact prefetching match.. */
- ret = pci_bus_alloc_resource(bus, res, size, align, min,
- IORESOURCE_PREFETCH,
- pcibios_align_resource, dev);
-
- if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) {
- /*
- * That failed.
- *
- * But a prefetching area can handle a non-prefetching
- * window (it will just not perform as well).
- */
- ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
- pcibios_align_resource, dev);
- }
- return ret;
-}
-
/*
* Generic function that returns a value indicating that the device's
* original BIOS BAR address was not saved and so is not available for
@@ -206,7 +197,35 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
return ret;
}
-static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resource_size_t min_align)
+static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
+ int resno, resource_size_t size, resource_size_t align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t min;
+ int ret;
+
+ min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM;
+
+ /* First, try exact prefetching match.. */
+ ret = pci_bus_alloc_resource(bus, res, size, align, min,
+ IORESOURCE_PREFETCH,
+ pcibios_align_resource, dev);
+
+ if (ret < 0 && (res->flags & IORESOURCE_PREFETCH)) {
+ /*
+ * That failed.
+ *
+ * But a prefetching area can handle a non-prefetching
+ * window (it will just not perform as well).
+ */
+ ret = pci_bus_alloc_resource(bus, res, size, align, min, 0,
+ pcibios_align_resource, dev);
+ }
+ return ret;
+}
+
+static int _pci_assign_resource(struct pci_dev *dev, int resno,
+ resource_size_t size, resource_size_t min_align)
{
struct resource *res = dev->resource + resno;
struct pci_bus *bus;
@@ -238,31 +257,6 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno, int size, resour
return ret;
}
-int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
- resource_size_t min_align)
-{
- struct resource *res = dev->resource + resno;
- resource_size_t new_size;
- int ret;
-
- if (!res->parent) {
- dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR "
- "\n", resno, res);
- return -EINVAL;
- }
-
- /* already aligned with min_align */
- new_size = resource_size(res) + addsize;
- ret = _pci_assign_resource(dev, resno, new_size, min_align);
- if (!ret) {
- res->flags &= ~IORESOURCE_STARTALIGN;
- dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
- if (resno < PCI_BRIDGE_RESOURCES)
- pci_update_resource(dev, resno);
- }
- return ret;
-}
-
int pci_assign_resource(struct pci_dev *dev, int resno)
{
struct resource *res = dev->resource + resno;
@@ -298,6 +292,31 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
return ret;
}
+int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize,
+ resource_size_t min_align)
+{
+ struct resource *res = dev->resource + resno;
+ resource_size_t new_size;
+ int ret;
+
+ if (!res->parent) {
+ dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR "
+ "\n", resno, res);
+ return -EINVAL;
+ }
+
+ /* already aligned with min_align */
+ new_size = resource_size(res) + addsize;
+ ret = _pci_assign_resource(dev, resno, new_size, min_align);
+ if (!ret) {
+ res->flags &= ~IORESOURCE_STARTALIGN;
+ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
+ if (resno < PCI_BRIDGE_RESOURCES)
+ pci_update_resource(dev, resno);
+ }
+ return ret;
+}
+
int pci_enable_resources(struct pci_dev *dev, int mask)
{
u16 cmd, old_cmd;
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 6e75153c5b4f..24caeaf50529 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -73,7 +73,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
pci_fixup_cardbus(bus);
- max = bus->secondary;
+ max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++)
list_for_each_entry(dev, &bus->devices, bus_list)
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index d07f9ac8c41d..667678db1153 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1048,8 +1048,8 @@ static void yenta_config_init(struct yenta_socket *socket)
config_writeb(socket, PCI_LATENCY_TIMER, 168);
config_writel(socket, PCI_PRIMARY_BUS,
(176 << 24) | /* sec. latency timer */
- (dev->subordinate->subordinate << 16) | /* subordinate bus */
- (dev->subordinate->secondary << 8) | /* secondary bus */
+ ((unsigned int)dev->subordinate->busn_res.end << 16) | /* subordinate bus */
+ ((unsigned int)dev->subordinate->busn_res.start << 8) | /* secondary bus */
dev->subordinate->primary); /* primary bus */
/*
@@ -1086,14 +1086,14 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
struct pci_bus *bridge_to_fix = cardbus_bridge->parent;
/* Check bus numbers are already set up correctly: */
- if (bridge_to_fix->subordinate >= cardbus_bridge->subordinate)
+ if (bridge_to_fix->busn_res.end >= cardbus_bridge->busn_res.end)
return; /* The subordinate number is ok, nothing to do */
if (!bridge_to_fix->parent)
return; /* Root bridges are ok */
/* stay within the limits of the bus range of the parent: */
- upper_limit = bridge_to_fix->parent->subordinate;
+ upper_limit = bridge_to_fix->parent->busn_res.end;
/* check the bus ranges of all silbling bridges to prevent overlap */
list_for_each(tmp, &bridge_to_fix->parent->children) {
@@ -1104,36 +1104,36 @@ static void yenta_fixup_parent_bridge(struct pci_bus *cardbus_bridge)
* current upper limit, set the new upper limit to
* the bus number below the silbling's range:
*/
- if (silbling->secondary > bridge_to_fix->subordinate
- && silbling->secondary <= upper_limit)
- upper_limit = silbling->secondary - 1;
+ if (silbling->busn_res.start > bridge_to_fix->busn_res.end
+ && silbling->busn_res.start <= upper_limit)
+ upper_limit = silbling->busn_res.start - 1;
}
/* Show that the wanted subordinate number is not possible: */
- if (cardbus_bridge->subordinate > upper_limit)
+ if (cardbus_bridge->busn_res.end > upper_limit)
dev_printk(KERN_WARNING, &cardbus_bridge->dev,
"Upper limit for fixing this "
"bridge's parent bridge: #%02x\n", upper_limit);
/* If we have room to increase the bridge's subordinate number, */
- if (bridge_to_fix->subordinate < upper_limit) {
+ if (bridge_to_fix->busn_res.end < upper_limit) {
/* use the highest number of the hidden bus, within limits */
unsigned char subordinate_to_assign =
- min(cardbus_bridge->subordinate, upper_limit);
+ min_t(int, cardbus_bridge->busn_res.end, upper_limit);
dev_printk(KERN_INFO, &bridge_to_fix->dev,
"Raising subordinate bus# of parent "
"bus (#%02x) from #%02x to #%02x\n",
bridge_to_fix->number,
- bridge_to_fix->subordinate, subordinate_to_assign);
+ (int)bridge_to_fix->busn_res.end, subordinate_to_assign);
/* Save the new subordinate in the bus struct of the bridge */
- bridge_to_fix->subordinate = subordinate_to_assign;
+ bridge_to_fix->busn_res.end = subordinate_to_assign;
/* and update the PCI config space with the new subordinate */
pci_write_config_byte(bridge_to_fix->self,
- PCI_SUBORDINATE_BUS, bridge_to_fix->subordinate);
+ PCI_SUBORDINATE_BUS, bridge_to_fix->busn_res.end);
}
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index c6e6ae0aa3b1..54e3588bef62 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -102,6 +102,14 @@ config PINCTRL_PXA910
select PINCTRL_PXA3xx
select PINCONF
+config PINCTRL_SINGLE
+ tristate "One-register-per-pin type device tree based pinctrl driver"
+ depends on OF
+ select PINMUX
+ select PINCONF
+ help
+ This selects the device tree based generic pinctrl driver.
+
config PINCTRL_SIRF
bool "CSR SiRFprimaII pin controller driver"
depends on ARCH_PRIMA2
@@ -130,7 +138,7 @@ config PINCTRL_U300
config PINCTRL_COH901
bool "ST-Ericsson U300 COH 901 335/571 GPIO"
- depends on GPIOLIB && ARCH_U300 && PINMUX_U300
+ depends on GPIOLIB && ARCH_U300 && PINCTRL_U300
help
Say yes here to support GPIO interface on ST-Ericsson U300.
The names of the two IP block variants supported are
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index 8c074376cdea..f40b1f81ff2c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
obj-$(CONFIG_PINCTRL_PXA168) += pinctrl-pxa168.o
obj-$(CONFIG_PINCTRL_PXA910) += pinctrl-pxa910.o
+obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
obj-$(CONFIG_PINCTRL_TEGRA) += pinctrl-tegra.o
obj-$(CONFIG_PINCTRL_TEGRA20) += pinctrl-tegra20.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 0cc053af70bd..fb7f3bebdc69 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -332,19 +332,16 @@ void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
}
EXPORT_SYMBOL_GPL(pinctrl_add_gpio_range);
-/**
- * pinctrl_remove_gpio_range() - remove a range of GPIOs fro a pin controller
- * @pctldev: pin controller device to remove the range from
- * @range: the GPIO range to remove
- */
-void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range)
+void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *ranges,
+ unsigned nranges)
{
- mutex_lock(&pinctrl_mutex);
- list_del(&range->node);
- mutex_unlock(&pinctrl_mutex);
+ int i;
+
+ for (i = 0; i < nranges; i++)
+ pinctrl_add_gpio_range(pctldev, &ranges[i]);
}
-EXPORT_SYMBOL_GPL(pinctrl_remove_gpio_range);
+EXPORT_SYMBOL_GPL(pinctrl_add_gpio_ranges);
/**
* pinctrl_get_group_selector() - returns the group selector for a group
@@ -1395,9 +1392,9 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct pinctrl_dev *pctldev;
int ret;
- if (pctldesc == NULL)
+ if (!pctldesc)
return NULL;
- if (pctldesc->name == NULL)
+ if (!pctldesc->name)
return NULL;
pctldev = kzalloc(sizeof(*pctldev), GFP_KERNEL);
@@ -1415,23 +1412,20 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pctldev->dev = dev;
/* check core ops for sanity */
- ret = pinctrl_check_ops(pctldev);
- if (ret) {
+ if (pinctrl_check_ops(pctldev)) {
dev_err(dev, "pinctrl ops lacks necessary functions\n");
goto out_err;
}
/* If we're implementing pinmuxing, check the ops for sanity */
if (pctldesc->pmxops) {
- ret = pinmux_check_ops(pctldev);
- if (ret)
+ if (pinmux_check_ops(pctldev))
goto out_err;
}
/* If we're implementing pinconfig, check the ops for sanity */
if (pctldesc->confops) {
- ret = pinconf_check_ops(pctldev);
- if (ret)
+ if (pinconf_check_ops(pctldev))
goto out_err;
}
@@ -1457,11 +1451,9 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
if (IS_ERR(s)) {
dev_dbg(dev, "failed to lookup the default state\n");
} else {
- ret = pinctrl_select_state_locked(pctldev->p, s);
- if (ret) {
+ if (pinctrl_select_state_locked(pctldev->p, s))
dev_err(dev,
"failed to select default state\n");
- }
}
}
@@ -1485,6 +1477,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register);
*/
void pinctrl_unregister(struct pinctrl_dev *pctldev)
{
+ struct pinctrl_gpio_range *range, *n;
if (pctldev == NULL)
return;
@@ -1500,6 +1493,10 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
+ /* remove gpio ranges map */
+ list_for_each_entry_safe(range, n, &pctldev->gpio_ranges, node)
+ list_del(&range->node);
+
kfree(pctldev);
mutex_unlock(&pinctrl_mutex);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 55697a5d7482..cc0f00d73d15 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -770,7 +770,7 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
dev_err(gpio->dev, "could not get GPIO clock\n");
goto err_no_clk;
}
- err = clk_enable(gpio->clk);
+ err = clk_prepare_enable(gpio->clk);
if (err) {
dev_err(gpio->dev, "could not enable GPIO clock\n");
goto err_no_clk_enable;
@@ -912,7 +912,7 @@ err_no_ioremap:
release_mem_region(gpio->memres->start, resource_size(gpio->memres));
err_no_ioregion:
err_no_resource:
- clk_disable(gpio->clk);
+ clk_disable_unprepare(gpio->clk);
err_no_clk_enable:
clk_put(gpio->clk);
err_no_clk:
@@ -943,7 +943,7 @@ static int __exit u300_gpio_remove(struct platform_device *pdev)
iounmap(gpio->base);
release_mem_region(gpio->memres->start,
resource_size(gpio->memres));
- clk_disable(gpio->clk);
+ clk_disable_unprepare(gpio->clk);
clk_put(gpio->clk);
platform_set_drvdata(pdev, NULL);
kfree(gpio);
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index dd6d93aa5334..44e97265cd7d 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -146,7 +146,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num = 1;
- int i;
+ int i, j;
/*
* first find the group of this node and check if we need create
@@ -184,13 +184,14 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create config map */
new_map++;
- for (i = 0; i < grp->npins; i++) {
+ for (i = j = 0; i < grp->npins; i++) {
if (!(grp->configs[i] & IMX_NO_PAD_CTL)) {
- new_map[i].type = PIN_MAP_TYPE_CONFIGS_PIN;
- new_map[i].data.configs.group_or_pin =
+ new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
+ new_map[j].data.configs.group_or_pin =
pin_get_name(pctldev, grp->pins[i]);
- new_map[i].data.configs.configs = &grp->configs[i];
- new_map[i].data.configs.num_configs = 1;
+ new_map[j].data.configs.configs = &grp->configs[i];
+ new_map[j].data.configs.num_configs = 1;
+ j++;
}
}
@@ -474,7 +475,9 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
grp->configs[j] = config & ~IMX_PAD_SION;
}
+#ifdef DEBUG
IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
+#endif
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-imx6q.c b/drivers/pinctrl/pinctrl-imx6q.c
index 7737d4d71a3c..e9bf71fbedca 100644
--- a/drivers/pinctrl/pinctrl-imx6q.c
+++ b/drivers/pinctrl/pinctrl-imx6q.c
@@ -1950,6 +1950,8 @@ static struct imx_pin_reg imx6q_pin_regs[] = {
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 5, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__GPIO_1_12 */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 6, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__SJC_DONE */
IMX_PIN_REG(MX6Q_PAD_SD2_DAT3, 0x0744, 0x035C, 7, 0x0000, 0), /* MX6Q_PAD_SD2_DAT3__ANATOP_TESTO_3 */
+ IMX_PIN_REG(MX6Q_PAD_ENET_RX_ER, 0x04EC, 0x01D8, 0, 0x0000, 0), /* MX6Q_PAD_ENET_RX_ER__ANATOP_USBOTG_ID */
+ IMX_PIN_REG(MX6Q_PAD_GPIO_1, 0x05F4, 0x0224, 3, 0x0000, 0), /* MX6Q_PAD_GPIO_1__ANATOP_USBOTG_ID */
};
/* Pad names for the pinmux subsystem */
diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c
index 8b2022276f71..6f99769c6733 100644
--- a/drivers/pinctrl/pinctrl-nomadik-db8500.c
+++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c
@@ -467,9 +467,12 @@ static const unsigned mc1_a_1_pins[] = { DB8500_PIN_AH16, DB8500_PIN_AG15,
DB8500_PIN_AH15 };
static const unsigned mc1dir_a_1_pins[] = { DB8500_PIN_AH13, DB8500_PIN_AG12,
DB8500_PIN_AH12, DB8500_PIN_AH11 };
-static const unsigned hsir_a_1_pins[] = { DB8500_PIN_AG10, DB8500_PIN_AH10 };
-static const unsigned hsit_a_1_pins[] = { DB8500_PIN_AJ11, DB8500_PIN_AJ9,
- DB8500_PIN_AH9, DB8500_PIN_AG9, DB8500_PIN_AG8, DB8500_PIN_AF8 };
+static const unsigned hsir_a_1_pins[] = { DB8500_PIN_AG10, DB8500_PIN_AH10,
+ DB8500_PIN_AJ11 };
+static const unsigned hsit_a_1_pins[] = { DB8500_PIN_AJ9, DB8500_PIN_AH9,
+ DB8500_PIN_AG9, DB8500_PIN_AG8, DB8500_PIN_AF8 };
+static const unsigned hsit_a_2_pins[] = { DB8500_PIN_AJ9, DB8500_PIN_AH9,
+ DB8500_PIN_AG9, DB8500_PIN_AG8 };
static const unsigned clkout_a_1_pins[] = { DB8500_PIN_AH7, DB8500_PIN_AJ6 };
static const unsigned clkout_a_2_pins[] = { DB8500_PIN_AG7, DB8500_PIN_AF7 };
static const unsigned usb_a_1_pins[] = { DB8500_PIN_AF28, DB8500_PIN_AE29,
@@ -508,9 +511,11 @@ static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,
DB8500_PIN_D9, DB8500_PIN_A5, DB8500_PIN_B4, DB8500_PIN_C8,
DB8500_PIN_A12, DB8500_PIN_C10, DB8500_PIN_B10, DB8500_PIN_B9,
DB8500_PIN_A9, DB8500_PIN_C7, DB8500_PIN_A7, DB8500_PIN_C5,
- DB8500_PIN_C9, DB8500_PIN_B14 };
-/* This chip select pin can be "ps0" in alt B so have it separately */
+ DB8500_PIN_C9 };
+/* This chip select pin can be "ps0" in alt C so have it separately */
static const unsigned smcs0_b_1_pins[] = { DB8500_PIN_E8 };
+/* This chip select pin can be "ps1" in alt C so have it separately */
+static const unsigned smcs1_b_1_pins[] = { DB8500_PIN_B14 };
static const unsigned ipgpio7_b_1_pins[] = { DB8500_PIN_B11 };
static const unsigned ipgpio2_b_1_pins[] = { DB8500_PIN_C12 };
static const unsigned ipgpio3_b_1_pins[] = { DB8500_PIN_C11 };
@@ -572,6 +577,7 @@ static const unsigned mc2rstn_c_1_pins[] = { DB8500_PIN_C8 };
static const unsigned kp_c_1_pins[] = { DB8500_PIN_C9, DB8500_PIN_B11,
DB8500_PIN_C12, DB8500_PIN_C11, DB8500_PIN_D17, DB8500_PIN_D16,
DB8500_PIN_C23, DB8500_PIN_D23 };
+static const unsigned smps0_c_1_pins[] = { DB8500_PIN_E8 };
static const unsigned smps1_c_1_pins[] = { DB8500_PIN_B14 };
static const unsigned u2rxtx_c_3_pins[] = { DB8500_PIN_B17, DB8500_PIN_C16 };
static const unsigned stmape_c_2_pins[] = { DB8500_PIN_C19, DB8500_PIN_C17,
@@ -595,6 +601,8 @@ static const unsigned kp_oc1_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3,
DB8500_PIN_D6, DB8500_PIN_B7 };
static const unsigned spi2_oc1_1_pins[] = { DB8500_PIN_AH13, DB8500_PIN_AG12,
DB8500_PIN_AH12, DB8500_PIN_AH11 };
+static const unsigned spi2_oc1_2_pins[] = { DB8500_PIN_AH13, DB8500_PIN_AH12,
+ DB8500_PIN_AH11 };
#define DB8500_PIN_GROUP(a,b) { .name = #a, .pins = a##_pins, \
.npins = ARRAY_SIZE(a##_pins), .altsetting = b }
@@ -610,6 +618,8 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(msp0tfstck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp0rfsrck_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(mc0_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0_dat47_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(mc0dat31dir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1txrx_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(msp1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(lcdb_a_1, NMK_GPIO_ALT_A),
@@ -631,6 +641,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(mc1_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsir_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(hsit_a_1, NMK_GPIO_ALT_A),
+ DB8500_PIN_GROUP(hsit_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(clkout_a_1, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(clkout_a_2, NMK_GPIO_ALT_A),
DB8500_PIN_GROUP(usb_a_1, NMK_GPIO_ALT_A),
@@ -653,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B),
+ DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(ipgpio7_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(ipgpio2_b_1, NMK_GPIO_ALT_B),
DB8500_PIN_GROUP(ipgpio3_b_1, NMK_GPIO_ALT_B),
@@ -693,6 +705,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
DB8500_PIN_GROUP(mc5_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(mc2rstn_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(kp_c_1, NMK_GPIO_ALT_C),
+ DB8500_PIN_GROUP(smps0_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(smps1_c_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(u2rxtx_c_3, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(stmape_c_2, NMK_GPIO_ALT_C),
@@ -709,6 +722,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = {
/* Other alt C1 column, these are still configured as alt C */
DB8500_PIN_GROUP(kp_oc1_1, NMK_GPIO_ALT_C),
DB8500_PIN_GROUP(spi2_oc1_1, NMK_GPIO_ALT_C),
+ DB8500_PIN_GROUP(spi2_oc1_2, NMK_GPIO_ALT_C),
};
/* We use this macro to define the groups applicable to a function */
@@ -731,7 +745,7 @@ DB8500_FUNC_GROUPS(ipi2c, "ipi2c_a_1", "ipi2c_a_2");
*/
DB8500_FUNC_GROUPS(msp0, "msp0txrx_a_1", "msp0tfstck_a_1", "msp0rfstck_a_1",
"msp0txrx_b_1", "msp0sck_b_1");
-DB8500_FUNC_GROUPS(mc0, "mc0_a_1");
+DB8500_FUNC_GROUPS(mc0, "mc0_a_1", "mc0_dat47_a_1", "mc0dat31dir_a_1");
/* MSP0 can swap RX/TX like MSP0 but has no SCK pin available */
DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1");
DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1");
@@ -752,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1",
DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1");
DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1");
DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1");
-DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1");
+DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2");
DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1");
DB8500_FUNC_GROUPS(usb, "usb_a_1");
DB8500_FUNC_GROUPS(trig, "trig_b_1");
@@ -768,7 +782,8 @@ DB8500_FUNC_GROUPS(uartmod, "uartmodtx_b_1", "uartmodrx_b_1", "uartmodrx_b_2",
DB8500_FUNC_GROUPS(stmmod, "stmmod_b_1", "stmmod_c_1");
DB8500_FUNC_GROUPS(spi3, "spi3_b_1");
/* Select between CS0 on alt B or PS1 on alt C */
-DB8500_FUNC_GROUPS(sm, "sm_b_1", "smcs0_b_1", "smcleale_c_1", "smps1_c_1");
+DB8500_FUNC_GROUPS(sm, "sm_b_1", "smcs0_b_1", "smcs1_b_1", "smcleale_c_1",
+ "smps0_c_1", "smps1_c_1");
DB8500_FUNC_GROUPS(lcda, "lcdaclk_b_1", "lcda_b_1");
DB8500_FUNC_GROUPS(ddrtrig, "ddrtrig_b_1");
DB8500_FUNC_GROUPS(pwl, "pwl_b_1", "pwl_b_2", "pwl_b_3", "pwl_b_4");
@@ -783,7 +798,7 @@ DB8500_FUNC_GROUPS(mc5, "mc5_c_1");
DB8500_FUNC_GROUPS(usbsim, "usbsim_c_1", "usbsim_c_2");
DB8500_FUNC_GROUPS(i2c3, "i2c3_c_1", "i2c3_c_2");
DB8500_FUNC_GROUPS(spi0, "spi0_c_1");
-DB8500_FUNC_GROUPS(spi2, "spi2_oc1_1");
+DB8500_FUNC_GROUPS(spi2, "spi2_oc1_1", "spi2_oc1_2");
#define FUNCTION(fname) \
{ \
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 3e7e47d6b385..53b0d49a7a1c 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -434,7 +434,7 @@ static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
/**
* nmk_config_pin - configure a pin's mux attributes
* @cfg: pin confguration
- *
+ * @sleep: Non-zero to apply the sleep mode configuration
* Configures a pin's mode (alternate function or GPIO), its pull up status,
* and its sleep mode based on the specified configuration. The @cfg is
* usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
@@ -1194,11 +1194,11 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
}
if (np) {
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ pdata = devm_kzalloc(&dev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- if (of_get_property(np, "supports-sleepmode", NULL))
+ if (of_get_property(np, "st,supports-sleepmode", NULL))
pdata->supports_sleepmode = true;
if (of_property_read_u32(np, "gpio-bank", &dev->id)) {
@@ -1229,29 +1229,23 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
goto out;
}
- if (request_mem_region(res->start, resource_size(res),
- dev_name(&dev->dev)) == NULL) {
- ret = -EBUSY;
- goto out;
- }
-
- base = ioremap(res->start, resource_size(res));
+ base = devm_request_and_ioremap(&dev->dev, res);
if (!base) {
ret = -ENOMEM;
- goto out_release;
+ goto out;
}
- clk = clk_get(&dev->dev, NULL);
+ clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto out_unmap;
+ goto out;
}
clk_prepare(clk);
- nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
+ nmk_chip = devm_kzalloc(&dev->dev, sizeof(*nmk_chip), GFP_KERNEL);
if (!nmk_chip) {
ret = -ENOMEM;
- goto out_clk;
+ goto out;
}
/*
@@ -1286,7 +1280,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
ret = gpiochip_add(&nmk_chip->chip);
if (ret)
- goto out_free;
+ goto out;
BUG_ON(nmk_chip->bank >= ARRAY_SIZE(nmk_gpio_chips));
@@ -1300,7 +1294,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
if (!nmk_chip->domain) {
pr_err("%s: Failed to create irqdomain\n", np->full_name);
ret = -ENOSYS;
- goto out_free;
+ goto out;
}
nmk_gpio_init_irq(nmk_chip);
@@ -1309,20 +1303,9 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
return 0;
-out_free:
- kfree(nmk_chip);
-out_clk:
- clk_disable(clk);
- clk_put(clk);
-out_unmap:
- iounmap(base);
-out_release:
- release_mem_region(res->start, resource_size(res));
out:
dev_err(&dev->dev, "Failure %i for GPIO %i-%i\n", ret,
pdata->first_gpio, pdata->first_gpio+31);
- if (np)
- kfree(pdata);
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
new file mode 100644
index 000000000000..76a4260f20f3
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -0,0 +1,987 @@
+/*
+ * Generic device tree based pinctrl driver for one register per pin
+ * type pinmux controllers
+ *
+ * Copyright (C) 2012 Texas Instruments, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/list.h>
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "core.h"
+
+#define DRIVER_NAME "pinctrl-single"
+#define PCS_MUX_NAME "pinctrl-single,pins"
+#define PCS_REG_NAME_LEN ((sizeof(unsigned long) * 2) + 1)
+#define PCS_OFF_DISABLED ~0U
+
+/**
+ * struct pcs_pingroup - pingroups for a function
+ * @np: pingroup device node pointer
+ * @name: pingroup name
+ * @gpins: array of the pins in the group
+ * @ngpins: number of pins in the group
+ * @node: list node
+ */
+struct pcs_pingroup {
+ struct device_node *np;
+ const char *name;
+ int *gpins;
+ int ngpins;
+ struct list_head node;
+};
+
+/**
+ * struct pcs_func_vals - mux function register offset and value pair
+ * @reg: register virtual address
+ * @val: register value
+ */
+struct pcs_func_vals {
+ void __iomem *reg;
+ unsigned val;
+};
+
+/**
+ * struct pcs_function - pinctrl function
+ * @name: pinctrl function name
+ * @vals: register and vals array
+ * @nvals: number of entries in vals array
+ * @pgnames: array of pingroup names the function uses
+ * @npgnames: number of pingroup names the function uses
+ * @node: list node
+ */
+struct pcs_function {
+ const char *name;
+ struct pcs_func_vals *vals;
+ unsigned nvals;
+ const char **pgnames;
+ int npgnames;
+ struct list_head node;
+};
+
+/**
+ * struct pcs_data - wrapper for data needed by pinctrl framework
+ * @pa: pindesc array
+ * @cur: index to current element
+ *
+ * REVISIT: We should be able to drop this eventually by adding
+ * support for registering pins individually in the pinctrl
+ * framework for those drivers that don't need a static array.
+ */
+struct pcs_data {
+ struct pinctrl_pin_desc *pa;
+ int cur;
+};
+
+/**
+ * struct pcs_name - register name for a pin
+ * @name: name of the pinctrl register
+ *
+ * REVISIT: We may want to make names optional in the pinctrl
+ * framework as some drivers may not care about pin names to
+ * avoid kernel bloat. The pin names can be deciphered by user
+ * space tools using debugfs based on the register address and
+ * SoC packaging information.
+ */
+struct pcs_name {
+ char name[PCS_REG_NAME_LEN];
+};
+
+/**
+ * struct pcs_device - pinctrl device instance
+ * @res: resources
+ * @base: virtual address of the controller
+ * @size: size of the ioremapped area
+ * @dev: device entry
+ * @pctl: pin controller device
+ * @mutex: mutex protecting the lists
+ * @width: bits per mux register
+ * @fmask: function register mask
+ * @fshift: function register shift
+ * @foff: value to turn mux off
+ * @fmax: max number of functions in fmask
+ * @names: array of register names for pins
+ * @pins: physical pins on the SoC
+ * @pgtree: pingroup index radix tree
+ * @ftree: function index radix tree
+ * @pingroups: list of pingroups
+ * @functions: list of functions
+ * @ngroups: number of pingroups
+ * @nfuncs: number of functions
+ * @desc: pin controller descriptor
+ * @read: register read function to use
+ * @write: register write function to use
+ */
+struct pcs_device {
+ struct resource *res;
+ void __iomem *base;
+ unsigned size;
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct mutex mutex;
+ unsigned width;
+ unsigned fmask;
+ unsigned fshift;
+ unsigned foff;
+ unsigned fmax;
+ struct pcs_name *names;
+ struct pcs_data pins;
+ struct radix_tree_root pgtree;
+ struct radix_tree_root ftree;
+ struct list_head pingroups;
+ struct list_head functions;
+ unsigned ngroups;
+ unsigned nfuncs;
+ struct pinctrl_desc desc;
+ unsigned (*read)(void __iomem *reg);
+ void (*write)(unsigned val, void __iomem *reg);
+};
+
+/*
+ * REVISIT: Reads and writes could eventually use regmap or something
+ * generic. But at least on omaps, some mux registers are performance
+ * critical as they may need to be remuxed every time before and after
+ * idle. Adding tests for register access width for every read and
+ * write like regmap is doing is not desired, and caching the registers
+ * does not help in this case.
+ */
+
+static unsigned __maybe_unused pcs_readb(void __iomem *reg)
+{
+ return readb(reg);
+}
+
+static unsigned __maybe_unused pcs_readw(void __iomem *reg)
+{
+ return readw(reg);
+}
+
+static unsigned __maybe_unused pcs_readl(void __iomem *reg)
+{
+ return readl(reg);
+}
+
+static void __maybe_unused pcs_writeb(unsigned val, void __iomem *reg)
+{
+ writeb(val, reg);
+}
+
+static void __maybe_unused pcs_writew(unsigned val, void __iomem *reg)
+{
+ writew(val, reg);
+}
+
+static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
+{
+ writel(val, reg);
+}
+
+static int pcs_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct pcs_device *pcs;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+
+ return pcs->ngroups;
+}
+
+static const char *pcs_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned gselector)
+{
+ struct pcs_device *pcs;
+ struct pcs_pingroup *group;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ group = radix_tree_lookup(&pcs->pgtree, gselector);
+ if (!group) {
+ dev_err(pcs->dev, "%s could not find pingroup%i\n",
+ __func__, gselector);
+ return NULL;
+ }
+
+ return group->name;
+}
+
+static int pcs_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned gselector,
+ const unsigned **pins,
+ unsigned *npins)
+{
+ struct pcs_device *pcs;
+ struct pcs_pingroup *group;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ group = radix_tree_lookup(&pcs->pgtree, gselector);
+ if (!group) {
+ dev_err(pcs->dev, "%s could not find pingroup%i\n",
+ __func__, gselector);
+ return -EINVAL;
+ }
+
+ *pins = group->gpins;
+ *npins = group->ngpins;
+
+ return 0;
+}
+
+static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, " " DRIVER_NAME);
+}
+
+static void pcs_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ struct pcs_device *pcs;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ devm_kfree(pcs->dev, map);
+}
+
+static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps);
+
+static struct pinctrl_ops pcs_pinctrl_ops = {
+ .get_groups_count = pcs_get_groups_count,
+ .get_group_name = pcs_get_group_name,
+ .get_group_pins = pcs_get_group_pins,
+ .pin_dbg_show = pcs_pin_dbg_show,
+ .dt_node_to_map = pcs_dt_node_to_map,
+ .dt_free_map = pcs_dt_free_map,
+};
+
+static int pcs_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct pcs_device *pcs;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+
+ return pcs->nfuncs;
+}
+
+static const char *pcs_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned fselector)
+{
+ struct pcs_device *pcs;
+ struct pcs_function *func;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ func = radix_tree_lookup(&pcs->ftree, fselector);
+ if (!func) {
+ dev_err(pcs->dev, "%s could not find function%i\n",
+ __func__, fselector);
+ return NULL;
+ }
+
+ return func->name;
+}
+
+static int pcs_get_function_groups(struct pinctrl_dev *pctldev,
+ unsigned fselector,
+ const char * const **groups,
+ unsigned * const ngroups)
+{
+ struct pcs_device *pcs;
+ struct pcs_function *func;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ func = radix_tree_lookup(&pcs->ftree, fselector);
+ if (!func) {
+ dev_err(pcs->dev, "%s could not find function%i\n",
+ __func__, fselector);
+ return -EINVAL;
+ }
+ *groups = func->pgnames;
+ *ngroups = func->npgnames;
+
+ return 0;
+}
+
+static int pcs_enable(struct pinctrl_dev *pctldev, unsigned fselector,
+ unsigned group)
+{
+ struct pcs_device *pcs;
+ struct pcs_function *func;
+ int i;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ func = radix_tree_lookup(&pcs->ftree, fselector);
+ if (!func)
+ return -EINVAL;
+
+ dev_dbg(pcs->dev, "enabling %s function%i\n",
+ func->name, fselector);
+
+ for (i = 0; i < func->nvals; i++) {
+ struct pcs_func_vals *vals;
+ unsigned val;
+
+ vals = &func->vals[i];
+ val = pcs->read(vals->reg);
+ val &= ~pcs->fmask;
+ val |= vals->val;
+ pcs->write(val, vals->reg);
+ }
+
+ return 0;
+}
+
+static void pcs_disable(struct pinctrl_dev *pctldev, unsigned fselector,
+ unsigned group)
+{
+ struct pcs_device *pcs;
+ struct pcs_function *func;
+ int i;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+ func = radix_tree_lookup(&pcs->ftree, fselector);
+ if (!func) {
+ dev_err(pcs->dev, "%s could not find function%i\n",
+ __func__, fselector);
+ return;
+ }
+
+ /*
+ * Ignore disable if function-off is not specified. Some hardware
+ * does not have clearly defined disable function. For pin specific
+ * off modes, you can use alternate named states as described in
+ * pinctrl-bindings.txt.
+ */
+ if (pcs->foff == PCS_OFF_DISABLED) {
+ dev_dbg(pcs->dev, "ignoring disable for %s function%i\n",
+ func->name, fselector);
+ return;
+ }
+
+ dev_dbg(pcs->dev, "disabling function%i %s\n",
+ fselector, func->name);
+
+ for (i = 0; i < func->nvals; i++) {
+ struct pcs_func_vals *vals;
+ unsigned val;
+
+ vals = &func->vals[i];
+ val = pcs->read(vals->reg);
+ val &= ~pcs->fmask;
+ val |= pcs->foff << pcs->fshift;
+ pcs->write(val, vals->reg);
+ }
+}
+
+static int pcs_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range, unsigned offset)
+{
+ return -ENOTSUPP;
+}
+
+static struct pinmux_ops pcs_pinmux_ops = {
+ .get_functions_count = pcs_get_functions_count,
+ .get_function_name = pcs_get_function_name,
+ .get_function_groups = pcs_get_function_groups,
+ .enable = pcs_enable,
+ .disable = pcs_disable,
+ .gpio_request_enable = pcs_request_gpio,
+};
+
+static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long config)
+{
+ return -ENOTSUPP;
+}
+
+static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long *config)
+{
+ return -ENOTSUPP;
+}
+
+static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long config)
+{
+ return -ENOTSUPP;
+}
+
+static void pcs_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned offset)
+{
+}
+
+static void pcs_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned selector)
+{
+}
+
+static struct pinconf_ops pcs_pinconf_ops = {
+ .pin_config_get = pcs_pinconf_get,
+ .pin_config_set = pcs_pinconf_set,
+ .pin_config_group_get = pcs_pinconf_group_get,
+ .pin_config_group_set = pcs_pinconf_group_set,
+ .pin_config_dbg_show = pcs_pinconf_dbg_show,
+ .pin_config_group_dbg_show = pcs_pinconf_group_dbg_show,
+};
+
+/**
+ * pcs_add_pin() - add a pin to the static per controller pin array
+ * @pcs: pcs driver instance
+ * @offset: register offset from base
+ */
+static int __devinit pcs_add_pin(struct pcs_device *pcs, unsigned offset)
+{
+ struct pinctrl_pin_desc *pin;
+ struct pcs_name *pn;
+ int i;
+
+ i = pcs->pins.cur;
+ if (i >= pcs->desc.npins) {
+ dev_err(pcs->dev, "too many pins, max %i\n",
+ pcs->desc.npins);
+ return -ENOMEM;
+ }
+
+ pin = &pcs->pins.pa[i];
+ pn = &pcs->names[i];
+ sprintf(pn->name, "%lx",
+ (unsigned long)pcs->res->start + offset);
+ pin->name = pn->name;
+ pin->number = i;
+ pcs->pins.cur++;
+
+ return i;
+}
+
+/**
+ * pcs_allocate_pin_table() - adds all the pins for the pinctrl driver
+ * @pcs: pcs driver instance
+ *
+ * In case of errors, resources are freed in pcs_free_resources.
+ *
+ * If your hardware needs holes in the address space, then just set
+ * up multiple driver instances.
+ */
+static int __devinit pcs_allocate_pin_table(struct pcs_device *pcs)
+{
+ int mux_bytes, nr_pins, i;
+
+ mux_bytes = pcs->width / BITS_PER_BYTE;
+ nr_pins = pcs->size / mux_bytes;
+
+ dev_dbg(pcs->dev, "allocating %i pins\n", nr_pins);
+ pcs->pins.pa = devm_kzalloc(pcs->dev,
+ sizeof(*pcs->pins.pa) * nr_pins,
+ GFP_KERNEL);
+ if (!pcs->pins.pa)
+ return -ENOMEM;
+
+ pcs->names = devm_kzalloc(pcs->dev,
+ sizeof(struct pcs_name) * nr_pins,
+ GFP_KERNEL);
+ if (!pcs->names)
+ return -ENOMEM;
+
+ pcs->desc.pins = pcs->pins.pa;
+ pcs->desc.npins = nr_pins;
+
+ for (i = 0; i < pcs->desc.npins; i++) {
+ unsigned offset;
+ int res;
+
+ offset = i * mux_bytes;
+ res = pcs_add_pin(pcs, offset);
+ if (res < 0) {
+ dev_err(pcs->dev, "error adding pins: %i\n", res);
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * pcs_add_function() - adds a new function to the function list
+ * @pcs: pcs driver instance
+ * @np: device node of the mux entry
+ * @name: name of the function
+ * @vals: array of mux register value pairs used by the function
+ * @nvals: number of mux register value pairs
+ * @pgnames: array of pingroup names for the function
+ * @npgnames: number of pingroup names
+ */
+static struct pcs_function *pcs_add_function(struct pcs_device *pcs,
+ struct device_node *np,
+ const char *name,
+ struct pcs_func_vals *vals,
+ unsigned nvals,
+ const char **pgnames,
+ unsigned npgnames)
+{
+ struct pcs_function *function;
+
+ function = devm_kzalloc(pcs->dev, sizeof(*function), GFP_KERNEL);
+ if (!function)
+ return NULL;
+
+ function->name = name;
+ function->vals = vals;
+ function->nvals = nvals;
+ function->pgnames = pgnames;
+ function->npgnames = npgnames;
+
+ mutex_lock(&pcs->mutex);
+ list_add_tail(&function->node, &pcs->functions);
+ radix_tree_insert(&pcs->ftree, pcs->nfuncs, function);
+ pcs->nfuncs++;
+ mutex_unlock(&pcs->mutex);
+
+ return function;
+}
+
+static void pcs_remove_function(struct pcs_device *pcs,
+ struct pcs_function *function)
+{
+ int i;
+
+ mutex_lock(&pcs->mutex);
+ for (i = 0; i < pcs->nfuncs; i++) {
+ struct pcs_function *found;
+
+ found = radix_tree_lookup(&pcs->ftree, i);
+ if (found == function)
+ radix_tree_delete(&pcs->ftree, i);
+ }
+ list_del(&function->node);
+ mutex_unlock(&pcs->mutex);
+}
+
+/**
+ * pcs_add_pingroup() - add a pingroup to the pingroup list
+ * @pcs: pcs driver instance
+ * @np: device node of the mux entry
+ * @name: name of the pingroup
+ * @gpins: array of the pins that belong to the group
+ * @ngpins: number of pins in the group
+ */
+static int pcs_add_pingroup(struct pcs_device *pcs,
+ struct device_node *np,
+ const char *name,
+ int *gpins,
+ int ngpins)
+{
+ struct pcs_pingroup *pingroup;
+
+ pingroup = devm_kzalloc(pcs->dev, sizeof(*pingroup), GFP_KERNEL);
+ if (!pingroup)
+ return -ENOMEM;
+
+ pingroup->name = name;
+ pingroup->np = np;
+ pingroup->gpins = gpins;
+ pingroup->ngpins = ngpins;
+
+ mutex_lock(&pcs->mutex);
+ list_add_tail(&pingroup->node, &pcs->pingroups);
+ radix_tree_insert(&pcs->pgtree, pcs->ngroups, pingroup);
+ pcs->ngroups++;
+ mutex_unlock(&pcs->mutex);
+
+ return 0;
+}
+
+/**
+ * pcs_get_pin_by_offset() - get a pin index based on the register offset
+ * @pcs: pcs driver instance
+ * @offset: register offset from the base
+ *
+ * Note that this is OK as long as the pins are in a static array.
+ */
+static int pcs_get_pin_by_offset(struct pcs_device *pcs, unsigned offset)
+{
+ unsigned index;
+
+ if (offset >= pcs->size) {
+ dev_err(pcs->dev, "mux offset out of range: 0x%x (0x%x)\n",
+ offset, pcs->size);
+ return -EINVAL;
+ }
+
+ index = offset / (pcs->width / BITS_PER_BYTE);
+
+ return index;
+}
+
+/**
+ * smux_parse_one_pinctrl_entry() - parses a device tree mux entry
+ * @pcs: pinctrl driver instance
+ * @np: device node of the mux entry
+ * @map: map entry
+ * @pgnames: pingroup names
+ *
+ * Note that this binding currently supports only sets of one register + value.
+ *
+ * Also note that this driver tries to avoid understanding pin and function
+ * names because of the extra bloat they would cause especially in the case of
+ * a large number of pins. This driver just sets what is specified for the board
+ * in the .dts file. Further user space debugging tools can be developed to
+ * decipher the pin and function names using debugfs.
+ *
+ * If you are concerned about the boot time, set up the static pins in
+ * the bootloader, and only set up selected pins as device tree entries.
+ */
+static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
+ struct device_node *np,
+ struct pinctrl_map **map,
+ const char **pgnames)
+{
+ struct pcs_func_vals *vals;
+ const __be32 *mux;
+ int size, rows, *pins, index = 0, found = 0, res = -ENOMEM;
+ struct pcs_function *function;
+
+ mux = of_get_property(np, PCS_MUX_NAME, &size);
+ if ((!mux) || (size < sizeof(*mux) * 2)) {
+ dev_err(pcs->dev, "bad data for mux %s\n",
+ np->name);
+ return -EINVAL;
+ }
+
+ size /= sizeof(*mux); /* Number of elements in array */
+ rows = size / 2; /* Each row is a key value pair */
+
+ vals = devm_kzalloc(pcs->dev, sizeof(*vals) * rows, GFP_KERNEL);
+ if (!vals)
+ return -ENOMEM;
+
+ pins = devm_kzalloc(pcs->dev, sizeof(*pins) * rows, GFP_KERNEL);
+ if (!pins)
+ goto free_vals;
+
+ while (index < size) {
+ unsigned offset, val;
+ int pin;
+
+ offset = be32_to_cpup(mux + index++);
+ val = be32_to_cpup(mux + index++);
+ vals[found].reg = pcs->base + offset;
+ vals[found].val = val;
+
+ pin = pcs_get_pin_by_offset(pcs, offset);
+ if (pin < 0) {
+ dev_err(pcs->dev,
+ "could not add functions for %s %ux\n",
+ np->name, offset);
+ break;
+ }
+ pins[found++] = pin;
+ }
+
+ pgnames[0] = np->name;
+ function = pcs_add_function(pcs, np, np->name, vals, found, pgnames, 1);
+ if (!function)
+ goto free_pins;
+
+ res = pcs_add_pingroup(pcs, np, np->name, pins, found);
+ if (res < 0)
+ goto free_function;
+
+ (*map)->type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)->data.mux.group = np->name;
+ (*map)->data.mux.function = np->name;
+
+ return 0;
+
+free_function:
+ pcs_remove_function(pcs, function);
+
+free_pins:
+ devm_kfree(pcs->dev, pins);
+
+free_vals:
+ devm_kfree(pcs->dev, vals);
+
+ return res;
+}
+/**
+ * pcs_dt_node_to_map() - allocates and parses pinctrl maps
+ * @pctldev: pinctrl instance
+ * @np_config: device tree pinmux entry
+ * @map: array of map entries
+ * @num_maps: number of maps
+ */
+static int pcs_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ struct pcs_device *pcs;
+ const char **pgnames;
+ int ret;
+
+ pcs = pinctrl_dev_get_drvdata(pctldev);
+
+ *map = devm_kzalloc(pcs->dev, sizeof(**map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ *num_maps = 0;
+
+ pgnames = devm_kzalloc(pcs->dev, sizeof(*pgnames), GFP_KERNEL);
+ if (!pgnames) {
+ ret = -ENOMEM;
+ goto free_map;
+ }
+
+ ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map, pgnames);
+ if (ret < 0) {
+ dev_err(pcs->dev, "no pins entries for %s\n",
+ np_config->name);
+ goto free_pgnames;
+ }
+ *num_maps = 1;
+
+ return 0;
+
+free_pgnames:
+ devm_kfree(pcs->dev, pgnames);
+free_map:
+ devm_kfree(pcs->dev, *map);
+
+ return ret;
+}
+
+/**
+ * pcs_free_funcs() - free memory used by functions
+ * @pcs: pcs driver instance
+ */
+static void pcs_free_funcs(struct pcs_device *pcs)
+{
+ struct list_head *pos, *tmp;
+ int i;
+
+ mutex_lock(&pcs->mutex);
+ for (i = 0; i < pcs->nfuncs; i++) {
+ struct pcs_function *func;
+
+ func = radix_tree_lookup(&pcs->ftree, i);
+ if (!func)
+ continue;
+ radix_tree_delete(&pcs->ftree, i);
+ }
+ list_for_each_safe(pos, tmp, &pcs->functions) {
+ struct pcs_function *function;
+
+ function = list_entry(pos, struct pcs_function, node);
+ list_del(&function->node);
+ }
+ mutex_unlock(&pcs->mutex);
+}
+
+/**
+ * pcs_free_pingroups() - free memory used by pingroups
+ * @pcs: pcs driver instance
+ */
+static void pcs_free_pingroups(struct pcs_device *pcs)
+{
+ struct list_head *pos, *tmp;
+ int i;
+
+ mutex_lock(&pcs->mutex);
+ for (i = 0; i < pcs->ngroups; i++) {
+ struct pcs_pingroup *pingroup;
+
+ pingroup = radix_tree_lookup(&pcs->pgtree, i);
+ if (!pingroup)
+ continue;
+ radix_tree_delete(&pcs->pgtree, i);
+ }
+ list_for_each_safe(pos, tmp, &pcs->pingroups) {
+ struct pcs_pingroup *pingroup;
+
+ pingroup = list_entry(pos, struct pcs_pingroup, node);
+ list_del(&pingroup->node);
+ }
+ mutex_unlock(&pcs->mutex);
+}
+
+/**
+ * pcs_free_resources() - free memory used by this driver
+ * @pcs: pcs driver instance
+ */
+static void pcs_free_resources(struct pcs_device *pcs)
+{
+ if (pcs->pctl)
+ pinctrl_unregister(pcs->pctl);
+
+ pcs_free_funcs(pcs);
+ pcs_free_pingroups(pcs);
+}
+
+#define PCS_GET_PROP_U32(name, reg, err) \
+ do { \
+ ret = of_property_read_u32(np, name, reg); \
+ if (ret) { \
+ dev_err(pcs->dev, err); \
+ return ret; \
+ } \
+ } while (0);
+
+static struct of_device_id pcs_of_match[];
+
+static int __devinit pcs_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *match;
+ struct resource *res;
+ struct pcs_device *pcs;
+ int ret;
+
+ match = of_match_device(pcs_of_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ pcs = devm_kzalloc(&pdev->dev, sizeof(*pcs), GFP_KERNEL);
+ if (!pcs) {
+ dev_err(&pdev->dev, "could not allocate\n");
+ return -ENOMEM;
+ }
+ pcs->dev = &pdev->dev;
+ mutex_init(&pcs->mutex);
+ INIT_LIST_HEAD(&pcs->pingroups);
+ INIT_LIST_HEAD(&pcs->functions);
+
+ PCS_GET_PROP_U32("pinctrl-single,register-width", &pcs->width,
+ "register width not specified\n");
+
+ PCS_GET_PROP_U32("pinctrl-single,function-mask", &pcs->fmask,
+ "function register mask not specified\n");
+ pcs->fshift = ffs(pcs->fmask) - 1;
+ pcs->fmax = pcs->fmask >> pcs->fshift;
+
+ ret = of_property_read_u32(np, "pinctrl-single,function-off",
+ &pcs->foff);
+ if (ret)
+ pcs->foff = PCS_OFF_DISABLED;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(pcs->dev, "could not get resource\n");
+ return -ENODEV;
+ }
+
+ pcs->res = devm_request_mem_region(pcs->dev, res->start,
+ resource_size(res), DRIVER_NAME);
+ if (!pcs->res) {
+ dev_err(pcs->dev, "could not get mem_region\n");
+ return -EBUSY;
+ }
+
+ pcs->size = resource_size(pcs->res);
+ pcs->base = devm_ioremap(pcs->dev, pcs->res->start, pcs->size);
+ if (!pcs->base) {
+ dev_err(pcs->dev, "could not ioremap\n");
+ return -ENODEV;
+ }
+
+ INIT_RADIX_TREE(&pcs->pgtree, GFP_KERNEL);
+ INIT_RADIX_TREE(&pcs->ftree, GFP_KERNEL);
+ platform_set_drvdata(pdev, pcs);
+
+ switch (pcs->width) {
+ case 8:
+ pcs->read = pcs_readb;
+ pcs->write = pcs_writeb;
+ break;
+ case 16:
+ pcs->read = pcs_readw;
+ pcs->write = pcs_writew;
+ break;
+ case 32:
+ pcs->read = pcs_readl;
+ pcs->write = pcs_writel;
+ break;
+ default:
+ break;
+ }
+
+ pcs->desc.name = DRIVER_NAME;
+ pcs->desc.pctlops = &pcs_pinctrl_ops;
+ pcs->desc.pmxops = &pcs_pinmux_ops;
+ pcs->desc.confops = &pcs_pinconf_ops;
+ pcs->desc.owner = THIS_MODULE;
+
+ ret = pcs_allocate_pin_table(pcs);
+ if (ret < 0)
+ goto free;
+
+ pcs->pctl = pinctrl_register(&pcs->desc, pcs->dev, pcs);
+ if (!pcs->pctl) {
+ dev_err(pcs->dev, "could not register single pinctrl driver\n");
+ ret = -EINVAL;
+ goto free;
+ }
+
+ dev_info(pcs->dev, "%i pins at pa %p size %u\n",
+ pcs->desc.npins, pcs->base, pcs->size);
+
+ return 0;
+
+free:
+ pcs_free_resources(pcs);
+
+ return ret;
+}
+
+static int __devexit pcs_remove(struct platform_device *pdev)
+{
+ struct pcs_device *pcs = platform_get_drvdata(pdev);
+
+ if (!pcs)
+ return 0;
+
+ pcs_free_resources(pcs);
+
+ return 0;
+}
+
+static struct of_device_id pcs_of_match[] __devinitdata = {
+ { .compatible = DRIVER_NAME, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pcs_of_match);
+
+static struct platform_driver pcs_driver = {
+ .probe = pcs_probe,
+ .remove = __devexit_p(pcs_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = pcs_of_match,
+ },
+};
+
+module_platform_driver(pcs_driver);
+
+MODULE_AUTHOR("Tony Lindgren <tony@atomide.com>");
+MODULE_DESCRIPTION("One-register-per-pin type device tree based pinctrl driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index e9f8e7d11001..2aae8a8978e9 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -8,24 +8,61 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <linux/irqdomain.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#define DRIVER_NAME "pinmux-sirf"
#define SIRFSOC_NUM_PADS 622
-#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
#define SIRFSOC_RSC_PIN_MUX 0x4
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_GPIO_CTRL(g, i) ((g)*0x100 + (i)*4)
+#define SIRFSOC_GPIO_DSP_EN0 (0x80)
+#define SIRFSOC_GPIO_PAD_EN(g) ((g)*0x100 + 0x84)
+#define SIRFSOC_GPIO_INT_STATUS(g) ((g)*0x100 + 0x8C)
+
+#define SIRFSOC_GPIO_CTL_INTR_LOW_MASK 0x1
+#define SIRFSOC_GPIO_CTL_INTR_HIGH_MASK 0x2
+#define SIRFSOC_GPIO_CTL_INTR_TYPE_MASK 0x4
+#define SIRFSOC_GPIO_CTL_INTR_EN_MASK 0x8
+#define SIRFSOC_GPIO_CTL_INTR_STS_MASK 0x10
+#define SIRFSOC_GPIO_CTL_OUT_EN_MASK 0x20
+#define SIRFSOC_GPIO_CTL_DATAOUT_MASK 0x40
+#define SIRFSOC_GPIO_CTL_DATAIN_MASK 0x80
+#define SIRFSOC_GPIO_CTL_PULL_MASK 0x100
+#define SIRFSOC_GPIO_CTL_PULL_HIGH 0x200
+#define SIRFSOC_GPIO_CTL_DSP_INT 0x400
+
+#define SIRFSOC_GPIO_NO_OF_BANKS 5
+#define SIRFSOC_GPIO_BANK_SIZE 32
+#define SIRFSOC_GPIO_NUM(bank, index) (((bank)*(32)) + (index))
+
+struct sirfsoc_gpio_bank {
+ struct of_mm_gpio_chip chip;
+ struct irq_domain *domain;
+ int id;
+ int parent_irq;
+ spinlock_t lock;
+};
+
+static struct sirfsoc_gpio_bank sgpio_bank[SIRFSOC_GPIO_NO_OF_BANKS];
+static DEFINE_SPINLOCK(sgpio_lock);
+
/*
* pad list for the pinmux subsystem
* refer to CS-131858-DC-6A.xls
@@ -1204,7 +1241,457 @@ static int __init sirfsoc_pinmux_init(void)
}
arch_initcall(sirfsoc_pinmux_init);
+static inline int sirfsoc_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = container_of(to_of_mm_gpio_chip(chip),
+ struct sirfsoc_gpio_bank, chip);
+
+ return irq_find_mapping(bank->domain, offset);
+}
+
+static inline int sirfsoc_gpio_to_offset(unsigned int gpio)
+{
+ return gpio % SIRFSOC_GPIO_BANK_SIZE;
+}
+
+static inline struct sirfsoc_gpio_bank *sirfsoc_gpio_to_bank(unsigned int gpio)
+{
+ return &sgpio_bank[gpio / SIRFSOC_GPIO_BANK_SIZE];
+}
+
+void sirfsoc_gpio_set_pull(unsigned gpio, unsigned mode)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_gpio_to_bank(gpio);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+
+ switch (mode) {
+ case SIRFSOC_GPIO_PULL_NONE:
+ val &= ~SIRFSOC_GPIO_CTL_PULL_MASK;
+ break;
+ case SIRFSOC_GPIO_PULL_UP:
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val |= SIRFSOC_GPIO_CTL_PULL_HIGH;
+ break;
+ case SIRFSOC_GPIO_PULL_DOWN:
+ val |= SIRFSOC_GPIO_CTL_PULL_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_PULL_HIGH;
+ break;
+ default:
+ break;
+ }
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+EXPORT_SYMBOL(sirfsoc_gpio_set_pull);
+
+static inline struct sirfsoc_gpio_bank *sirfsoc_irqchip_to_bank(struct gpio_chip *chip)
+{
+ return container_of(to_of_mm_gpio_chip(chip), struct sirfsoc_gpio_bank, chip);
+}
+
+static void sirfsoc_gpio_irq_ack(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static void __sirfsoc_gpio_irq_mask(struct sirfsoc_gpio_bank *bank, int idx)
+{
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static void sirfsoc_gpio_irq_mask(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+
+ __sirfsoc_gpio_irq_mask(bank, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
+}
+
+static void sirfsoc_gpio_irq_unmask(struct irq_data *d)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+}
+
+static int sirfsoc_gpio_irq_type(struct irq_data *d, unsigned type)
+{
+ struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
+ int idx = d->hwirq % SIRFSOC_GPIO_BANK_SIZE;
+ u32 val, offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ val = readl(bank->chip.regs + offset);
+ val &= ~SIRFSOC_GPIO_CTL_INTR_STS_MASK;
+
+ switch (type) {
+ case IRQ_TYPE_NONE:
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ val &= ~SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ val &= ~SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
+ val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_LOW_MASK |
+ SIRFSOC_GPIO_CTL_INTR_TYPE_MASK;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_HIGH_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ val |= SIRFSOC_GPIO_CTL_INTR_LOW_MASK;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ val |= SIRFSOC_GPIO_CTL_INTR_HIGH_MASK;
+ val &= ~(SIRFSOC_GPIO_CTL_INTR_LOW_MASK | SIRFSOC_GPIO_CTL_INTR_TYPE_MASK);
+ break;
+ }
+
+ writel(val, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+
+ return 0;
+}
+
+static struct irq_chip sirfsoc_irq_chip = {
+ .name = "sirf-gpio-irq",
+ .irq_ack = sirfsoc_gpio_irq_ack,
+ .irq_mask = sirfsoc_gpio_irq_mask,
+ .irq_unmask = sirfsoc_gpio_irq_unmask,
+ .irq_set_type = sirfsoc_gpio_irq_type,
+};
+
+static void sirfsoc_gpio_handle_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct sirfsoc_gpio_bank *bank = irq_get_handler_data(irq);
+ u32 status, ctrl;
+ int idx = 0;
+ unsigned int first_irq;
+
+ status = readl(bank->chip.regs + SIRFSOC_GPIO_INT_STATUS(bank->id));
+ if (!status) {
+ printk(KERN_WARNING
+ "%s: gpio id %d status %#x no interrupt is flaged\n",
+ __func__, bank->id, status);
+ handle_bad_irq(irq, desc);
+ return;
+ }
+
+ first_irq = bank->domain->revmap_data.legacy.first_irq;
+
+ while (status) {
+ ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, idx));
+
+ /*
+ * Here we must check whether the corresponding GPIO's interrupt
+ * has been enabled, otherwise just skip it
+ */
+ if ((status & 0x1) && (ctrl & SIRFSOC_GPIO_CTL_INTR_EN_MASK)) {
+ pr_debug("%s: gpio id %d idx %d happens\n",
+ __func__, bank->id, idx);
+ generic_handle_irq(first_irq + idx);
+ }
+
+ idx++;
+ status = status >> 1;
+ }
+}
+
+static inline void sirfsoc_gpio_set_input(struct sirfsoc_gpio_bank *bank, unsigned ctrl_offset)
+{
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl(bank->chip.regs + ctrl_offset);
+ val &= ~SIRFSOC_GPIO_CTL_OUT_EN_MASK;
+ writel(val, bank->chip.regs + ctrl_offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int sirfsoc_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ unsigned long flags;
+
+ if (pinctrl_request_gpio(chip->base + offset))
+ return -ENODEV;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ /*
+ * default status:
+ * set direction as input and mask irq
+ */
+ sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset));
+ __sirfsoc_gpio_irq_mask(bank, offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static void sirfsoc_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ __sirfsoc_gpio_irq_mask(bank, offset);
+ sirfsoc_gpio_set_input(bank, SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ pinctrl_free_gpio(chip->base + offset);
+}
+
+static int sirfsoc_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ unsigned long flags;
+ unsigned offset;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ sirfsoc_gpio_set_input(bank, offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
+}
+
+static inline void sirfsoc_gpio_set_output(struct sirfsoc_gpio_bank *bank, unsigned offset,
+ int value)
+{
+ u32 out_ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ out_ctrl = readl(bank->chip.regs + offset);
+ if (value)
+ out_ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ else
+ out_ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+
+ out_ctrl &= ~SIRFSOC_GPIO_CTL_INTR_EN_MASK;
+ out_ctrl |= SIRFSOC_GPIO_CTL_OUT_EN_MASK;
+ writel(out_ctrl, bank->chip.regs + offset);
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+static int sirfsoc_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ int idx = sirfsoc_gpio_to_offset(gpio);
+ u32 offset;
+ unsigned long flags;
+
+ offset = SIRFSOC_GPIO_CTRL(bank->id, idx);
+
+ spin_lock_irqsave(&sgpio_lock, flags);
+
+ sirfsoc_gpio_set_output(bank, offset, value);
+
+ spin_unlock_irqrestore(&sgpio_lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_gpio_get_value(struct gpio_chip *chip, unsigned offset)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ u32 val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ val = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return !!(val & SIRFSOC_GPIO_CTL_DATAIN_MASK);
+}
+
+static void sirfsoc_gpio_set_value(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct sirfsoc_gpio_bank *bank = sirfsoc_irqchip_to_bank(chip);
+ u32 ctrl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->lock, flags);
+
+ ctrl = readl(bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+ if (value)
+ ctrl |= SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ else
+ ctrl &= ~SIRFSOC_GPIO_CTL_DATAOUT_MASK;
+ writel(ctrl, bank->chip.regs + SIRFSOC_GPIO_CTRL(bank->id, offset));
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+}
+
+int sirfsoc_gpio_irq_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct sirfsoc_gpio_bank *bank = d->host_data;
+
+ if (!bank)
+ return -EINVAL;
+
+ irq_set_chip(irq, &sirfsoc_irq_chip);
+ irq_set_handler(irq, handle_level_irq);
+ irq_set_chip_data(irq, bank);
+ set_irq_flags(irq, IRQF_VALID);
+
+ return 0;
+}
+
+const struct irq_domain_ops sirfsoc_gpio_irq_simple_ops = {
+ .map = sirfsoc_gpio_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int __devinit sirfsoc_gpio_probe(struct device_node *np)
+{
+ int i, err = 0;
+ struct sirfsoc_gpio_bank *bank;
+ void *regs;
+ struct platform_device *pdev;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ return -ENODEV;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return -ENOMEM;
+
+ for (i = 0; i < SIRFSOC_GPIO_NO_OF_BANKS; i++) {
+ bank = &sgpio_bank[i];
+ spin_lock_init(&bank->lock);
+ bank->chip.gc.request = sirfsoc_gpio_request;
+ bank->chip.gc.free = sirfsoc_gpio_free;
+ bank->chip.gc.direction_input = sirfsoc_gpio_direction_input;
+ bank->chip.gc.get = sirfsoc_gpio_get_value;
+ bank->chip.gc.direction_output = sirfsoc_gpio_direction_output;
+ bank->chip.gc.set = sirfsoc_gpio_set_value;
+ bank->chip.gc.to_irq = sirfsoc_gpio_to_irq;
+ bank->chip.gc.base = i * SIRFSOC_GPIO_BANK_SIZE;
+ bank->chip.gc.ngpio = SIRFSOC_GPIO_BANK_SIZE;
+ bank->chip.gc.label = kstrdup(np->full_name, GFP_KERNEL);
+ bank->chip.gc.of_node = np;
+ bank->chip.regs = regs;
+ bank->id = i;
+ bank->parent_irq = platform_get_irq(pdev, i);
+ if (bank->parent_irq < 0) {
+ err = bank->parent_irq;
+ goto out;
+ }
+
+ err = gpiochip_add(&bank->chip.gc);
+ if (err) {
+ pr_err("%s: error in probe function with status %d\n",
+ np->full_name, err);
+ goto out;
+ }
+
+ bank->domain = irq_domain_add_legacy(np, SIRFSOC_GPIO_BANK_SIZE,
+ SIRFSOC_GPIO_IRQ_START + i * SIRFSOC_GPIO_BANK_SIZE, 0,
+ &sirfsoc_gpio_irq_simple_ops, bank);
+
+ if (!bank->domain) {
+ pr_err("%s: Failed to create irqdomain\n", np->full_name);
+ err = -ENOSYS;
+ goto out;
+ }
+
+ irq_set_chained_handler(bank->parent_irq, sirfsoc_gpio_handle_irq);
+ irq_set_handler_data(bank->parent_irq, bank);
+ }
+
+out:
+ iounmap(regs);
+ return err;
+}
+
+static int __init sirfsoc_gpio_init(void)
+{
+
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, pinmux_ids);
+
+ if (!np)
+ return -ENODEV;
+
+ return sirfsoc_gpio_probe(np);
+}
+subsys_initcall(sirfsoc_gpio_init);
+
MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Yuping Luo <yuping.luo@csr.com>, "
"Barry Song <baohua.song@csr.com>");
MODULE_DESCRIPTION("SIRFSOC pin control driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index b6934867d8d3..ae52e4e5d098 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -745,9 +745,9 @@ int __devinit tegra_pinctrl_probe(struct platform_device *pdev,
}
pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
- if (IS_ERR(pmx->pctl)) {
+ if (!pmx->pctl) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(pmx->pctl);
+ return -ENODEV;
}
pinctrl_add_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
@@ -764,7 +764,6 @@ int __devexit tegra_pinctrl_remove(struct platform_device *pdev)
{
struct tegra_pmx *pmx = platform_get_drvdata(pdev);
- pinctrl_remove_gpio_range(pmx->pctl, &tegra_pinctrl_gpio_range);
pinctrl_unregister(pmx->pctl);
return 0;
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 05d029911be6..a7ad8c112d91 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1113,8 +1113,6 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev)
int ret;
int i;
- pr_err("U300 PMX PROBE\n");
-
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
if (!upmx)
@@ -1175,15 +1173,11 @@ out_no_resource:
static int __devexit u300_pmx_remove(struct platform_device *pdev)
{
struct u300_pmx *upmx = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
- pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
pinctrl_unregister(upmx->pctl);
iounmap(upmx->virtbase);
release_mem_region(upmx->phybase, upmx->physize);
platform_set_drvdata(pdev, NULL);
- devm_kfree(&pdev->dev, upmx);
return 0;
}
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index b3f6b2873fdd..5d4f44f462f0 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -336,9 +336,9 @@ int __devinit spear_pinctrl_probe(struct platform_device *pdev,
spear_pinctrl_desc.npins = machdata->npins;
pmx->pctl = pinctrl_register(&spear_pinctrl_desc, &pdev->dev, pmx);
- if (IS_ERR(pmx->pctl)) {
+ if (!pmx->pctl) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
- return PTR_ERR(pmx->pctl);
+ return -ENODEV;
}
return 0;
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index ce875dc365e5..c8f40c9c0428 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1877,8 +1877,7 @@ static int acer_platform_remove(struct platform_device *device)
return 0;
}
-static int acer_platform_suspend(struct platform_device *dev,
-pm_message_t state)
+static int acer_suspend(struct device *dev)
{
u32 value;
struct acer_data *data = &interface->data;
@@ -1900,7 +1899,7 @@ pm_message_t state)
return 0;
}
-static int acer_platform_resume(struct platform_device *device)
+static int acer_resume(struct device *dev)
{
struct acer_data *data = &interface->data;
@@ -1916,6 +1915,8 @@ static int acer_platform_resume(struct platform_device *device)
return 0;
}
+static SIMPLE_DEV_PM_OPS(acer_pm, acer_suspend, acer_resume);
+
static void acer_platform_shutdown(struct platform_device *device)
{
struct acer_data *data = &interface->data;
@@ -1931,11 +1932,10 @@ static struct platform_driver acer_platform_driver = {
.driver = {
.name = "acer-wmi",
.owner = THIS_MODULE,
+ .pm = &acer_pm,
},
.probe = acer_platform_probe,
.remove = acer_platform_remove,
- .suspend = acer_platform_suspend,
- .resume = acer_platform_resume,
.shutdown = acer_platform_shutdown,
};
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 94f93b621d7b..e2230a2b2f8e 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -362,15 +362,18 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type)
return cmpc_remove_acpi_notify_device(acpi);
}
-static int cmpc_tablet_resume(struct acpi_device *acpi)
+static int cmpc_tablet_resume(struct device *dev)
{
- struct input_dev *inputdev = dev_get_drvdata(&acpi->dev);
+ struct input_dev *inputdev = dev_get_drvdata(dev);
+
unsigned long long val = 0;
- if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val)))
+ if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val)))
input_report_switch(inputdev, SW_TABLET_MODE, !val);
return 0;
}
+static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume);
+
static const struct acpi_device_id cmpc_tablet_device_ids[] = {
{CMPC_TABLET_HID, 0},
{"", 0}
@@ -384,9 +387,9 @@ static struct acpi_driver cmpc_tablet_acpi_driver = {
.ops = {
.add = cmpc_tablet_add,
.remove = cmpc_tablet_remove,
- .resume = cmpc_tablet_resume,
.notify = cmpc_tablet_handler,
- }
+ },
+ .drv.pm = &cmpc_tablet_pm,
};
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index da267eae8ba8..d2e41735a47b 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -440,12 +440,14 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
return 0;
}
-static int acpi_fujitsu_resume(struct acpi_device *adev)
+static int acpi_fujitsu_resume(struct device *dev)
{
fujitsu_reset();
return 0;
}
+static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume);
+
static struct acpi_driver acpi_fujitsu_driver = {
.name = MODULENAME,
.class = "hotkey",
@@ -453,8 +455,8 @@ static struct acpi_driver acpi_fujitsu_driver = {
.ops = {
.add = acpi_fujitsu_add,
.remove = acpi_fujitsu_remove,
- .resume = acpi_fujitsu_resume,
- }
+ },
+ .drv.pm = &acpi_fujitsu_pm,
};
static int __init fujitsu_module_init(void)
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 24a3ae065f1b..d9ab6f64dcec 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -305,17 +305,19 @@ static int hdaps_probe(struct platform_device *dev)
return 0;
}
-static int hdaps_resume(struct platform_device *dev)
+static int hdaps_resume(struct device *dev)
{
return hdaps_device_init();
}
+static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume);
+
static struct platform_driver hdaps_driver = {
.probe = hdaps_probe,
- .resume = hdaps_resume,
.driver = {
.name = "hdaps",
.owner = THIS_MODULE,
+ .pm = &hdaps_pm,
},
};
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 22b2dfa73148..f4d91154ad67 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -353,20 +353,22 @@ static int lis3lv02d_remove(struct acpi_device *device, int type)
#ifdef CONFIG_PM
-static int lis3lv02d_suspend(struct acpi_device *device, pm_message_t state)
+static int lis3lv02d_suspend(struct device *dev)
{
/* make sure the device is off when we suspend */
lis3lv02d_poweroff(&lis3_dev);
return 0;
}
-static int lis3lv02d_resume(struct acpi_device *device)
+static int lis3lv02d_resume(struct device *dev)
{
return lis3lv02d_poweron(&lis3_dev);
}
+
+static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+#define HP_ACCEL_PM (&hp_accel_pm)
#else
-#define lis3lv02d_suspend NULL
-#define lis3lv02d_resume NULL
+#define HP_ACCEL_PM NULL
#endif
/* For the HP MDPS aka 3D Driveguard */
@@ -377,9 +379,8 @@ static struct acpi_driver lis3lv02d_driver = {
.ops = {
.add = lis3lv02d_add,
.remove = lis3lv02d_remove,
- .suspend = lis3lv02d_suspend,
- .resume = lis3lv02d_resume,
- }
+ },
+ .drv.pm = HP_ACCEL_PM,
};
static int __init lis3lv02d_init_module(void)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 4f20f8dd3d7c..17f6dfd8dbfb 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -694,10 +694,10 @@ MODULE_DEVICE_TABLE(acpi, ideapad_device_ids);
static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
{
int ret, i;
- unsigned long cfg;
+ int cfg;
struct ideapad_private *priv;
- if (read_method_int(adevice->handle, "_CFG", (int *)&cfg))
+ if (read_method_int(adevice->handle, "_CFG", &cfg))
return -ENODEV;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
@@ -721,7 +721,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice)
goto input_failed;
for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) {
- if (test_bit(ideapad_rfk_data[i].cfgbit, &cfg))
+ if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg))
ideapad_register_rfkill(adevice, i);
else
priv->rfk[i] = NULL;
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 0ffdb3cde2bb..5051aa970e0a 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -72,6 +72,7 @@
#include <linux/string.h>
#include <linux/tick.h>
#include <linux/timer.h>
+#include <linux/dmi.h>
#include <drm/i915_drm.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -1485,6 +1486,24 @@ static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = {
MODULE_DEVICE_TABLE(pci, ips_id_table);
+static int ips_blacklist_callback(const struct dmi_system_id *id)
+{
+ pr_info("Blacklisted intel_ips for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id ips_blacklist[] = {
+ {
+ .callback = ips_blacklist_callback,
+ .ident = "HP ProBook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook"),
+ },
+ },
+ { } /* terminating entry */
+};
+
static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
u64 platform_info;
@@ -1494,6 +1513,9 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id)
u16 htshi, trc, trc_required_mask;
u8 tse;
+ if (dmi_check_system(ips_blacklist))
+ return -ENODEV;
+
ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL);
if (!ips)
return -ENOMEM;
@@ -1697,21 +1719,6 @@ static void ips_remove(struct pci_dev *dev)
dev_dbg(&dev->dev, "IPS driver removed\n");
}
-#ifdef CONFIG_PM
-static int ips_suspend(struct pci_dev *dev, pm_message_t state)
-{
- return 0;
-}
-
-static int ips_resume(struct pci_dev *dev)
-{
- return 0;
-}
-#else
-#define ips_suspend NULL
-#define ips_resume NULL
-#endif /* CONFIG_PM */
-
static void ips_shutdown(struct pci_dev *dev)
{
}
@@ -1721,8 +1728,6 @@ static struct pci_driver ips_pci_driver = {
.id_table = ips_id_table,
.probe = ips_probe,
.remove = ips_remove,
- .suspend = ips_suspend,
- .resume = ips_resume,
.shutdown = ips_shutdown,
};
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c
index 5ae9cd9c7e6e..ea7422f6fa03 100644
--- a/drivers/platform/x86/intel_mid_thermal.c
+++ b/drivers/platform/x86/intel_mid_thermal.c
@@ -418,23 +418,23 @@ static struct thermal_device_info *initialize_sensor(int index)
/**
* mid_thermal_resume - resume routine
- * @pdev: platform device structure
+ * @dev: device structure
*
* mid thermal resume: re-initializes the adc. Can sleep.
*/
-static int mid_thermal_resume(struct platform_device *pdev)
+static int mid_thermal_resume(struct device *dev)
{
- return mid_initialize_adc(&pdev->dev);
+ return mid_initialize_adc(dev);
}
/**
* mid_thermal_suspend - suspend routine
- * @pdev: platform device structure
+ * @dev: device structure
*
* mid thermal suspend implements the suspend functionality
* by stopping the ADC. Can sleep.
*/
-static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
+static int mid_thermal_suspend(struct device *dev)
{
/*
* This just stops the ADC and does not disable it.
@@ -444,6 +444,9 @@ static int mid_thermal_suspend(struct platform_device *pdev, pm_message_t mesg)
return configure_adc(0);
}
+static SIMPLE_DEV_PM_OPS(mid_thermal_pm,
+ mid_thermal_suspend, mid_thermal_resume);
+
/**
* read_curr_temp - reads the current temperature and stores in temp
* @temp: holds the current temperature value after reading
@@ -557,10 +560,9 @@ static struct platform_driver mid_thermal_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &mid_thermal_pm,
},
.probe = mid_thermal_probe,
- .suspend = mid_thermal_suspend,
- .resume = mid_thermal_resume,
.remove = __devexit_p(mid_thermal_remove),
.id_table = therm_id_table,
};
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index bb5132128b33..f64441844317 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -85,7 +85,8 @@
#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
-static int msi_laptop_resume(struct platform_device *device);
+static int msi_laptop_resume(struct device *device);
+static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
@@ -437,8 +438,8 @@ static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
.owner = THIS_MODULE,
+ .pm = &msi_laptop_pm,
},
- .resume = msi_laptop_resume,
};
static struct platform_device *msipf_device;
@@ -752,7 +753,7 @@ err_bluetooth:
return retval;
}
-static int msi_laptop_resume(struct platform_device *device)
+static int msi_laptop_resume(struct device *device)
{
u8 data;
int result;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index ffff8b4b4949..24480074bcf0 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -177,7 +177,6 @@ enum SINF_BITS { SINF_NUM_BATTERIES = 0,
static int acpi_pcc_hotkey_add(struct acpi_device *device);
static int acpi_pcc_hotkey_remove(struct acpi_device *device, int type);
-static int acpi_pcc_hotkey_resume(struct acpi_device *device);
static void acpi_pcc_hotkey_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id pcc_device_ids[] = {
@@ -189,6 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, pcc_device_ids);
+static int acpi_pcc_hotkey_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume);
+
static struct acpi_driver acpi_pcc_driver = {
.name = ACPI_PCC_DRIVER_NAME,
.class = ACPI_PCC_CLASS,
@@ -196,9 +198,9 @@ static struct acpi_driver acpi_pcc_driver = {
.ops = {
.add = acpi_pcc_hotkey_add,
.remove = acpi_pcc_hotkey_remove,
- .resume = acpi_pcc_hotkey_resume,
.notify = acpi_pcc_hotkey_notify,
},
+ .drv.pm = &acpi_pcc_hotkey_pm,
};
static const struct key_entry panasonic_keymap[] = {
@@ -538,11 +540,15 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc)
/* kernel module interface */
-static int acpi_pcc_hotkey_resume(struct acpi_device *device)
+static int acpi_pcc_hotkey_resume(struct device *dev)
{
- struct pcc_acpi *pcc = acpi_driver_data(device);
+ struct pcc_acpi *pcc;
+
+ if (!dev)
+ return -EINVAL;
- if (device == NULL || pcc == NULL)
+ pcc = acpi_driver_data(to_acpi_device(dev));
+ if (!pcc)
return -EINVAL;
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Sticky mode restore: %d\n",
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 210d4ae547c2..9363969ad07a 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -973,7 +973,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct device_attribute *attr,
const char *buffer, size_t count)
{
- unsigned long value = 0;
+ int value;
int ret = 0;
struct sony_nc_value *item =
container_of(attr, struct sony_nc_value, devattr);
@@ -984,7 +984,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
if (count > 31)
return -EINVAL;
- if (kstrtoul(buffer, 10, &value))
+ if (kstrtoint(buffer, 10, &value))
return -EINVAL;
if (item->validate)
@@ -994,7 +994,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
return value;
ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
- (int *)&value, NULL);
+ &value, NULL);
if (ret < 0)
return -EIO;
@@ -1010,6 +1010,7 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
+ int cmd_base;
u8 offset;
u8 maxlvl;
};
@@ -1037,7 +1038,7 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
- sony_call_snc_handle(sdev->handle, 0x0200, &result);
+ sony_call_snc_handle(sdev->handle, sdev->cmd_base + 0x100, &result);
return (result & 0xff) - sdev->offset;
}
@@ -1049,7 +1050,8 @@ static int sony_nc_update_status_ng(struct backlight_device *bd)
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness + sdev->offset;
- if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+ if (sony_call_snc_handle(sdev->handle, sdev->cmd_base | (value << 0x10),
+ &result))
return -EIO;
return value;
@@ -1172,6 +1174,11 @@ static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
/*
* ACPI callbacks
*/
+enum event_types {
+ HOTKEY = 1,
+ KILLSWITCH,
+ GFX_SWITCH
+};
static void sony_nc_notify(struct acpi_device *device, u32 event)
{
u32 real_ev = event;
@@ -1196,7 +1203,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
/* hotkey event */
case 0x0100:
case 0x0127:
- ev_type = 1;
+ ev_type = HOTKEY;
real_ev = sony_nc_hotkeys_decode(event, handle);
if (real_ev > 0)
@@ -1216,7 +1223,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
* update the rfkill device status when the
* switch is moved.
*/
- ev_type = 2;
+ ev_type = KILLSWITCH;
sony_call_snc_handle(handle, 0x0100, &result);
real_ev = result & 0x03;
@@ -1226,6 +1233,24 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
break;
+ case 0x0128:
+ case 0x0146:
+ /* Hybrid GFX switching */
+ sony_call_snc_handle(handle, 0x0000, &result);
+ dprintk("GFX switch event received (reason: %s)\n",
+ (result & 0x01) ?
+ "switch change" : "unknown");
+
+ /* verify the switch state
+ * 1: discrete GFX
+ * 0: integrated GFX
+ */
+ sony_call_snc_handle(handle, 0x0100, &result);
+
+ ev_type = GFX_SWITCH;
+ real_ev = result & 0xff;
+ break;
+
default:
dprintk("Unknown event 0x%x for handle 0x%x\n",
event, handle);
@@ -1238,7 +1263,7 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
} else {
/* old style event */
- ev_type = 1;
+ ev_type = HOTKEY;
sony_laptop_report_input_event(real_ev);
}
@@ -1452,7 +1477,7 @@ static void sony_nc_function_resume(void)
&result);
}
-static int sony_nc_resume(struct acpi_device *device)
+static int sony_nc_resume(struct device *dev)
{
struct sony_nc_value *item;
acpi_handle handle;
@@ -1484,6 +1509,8 @@ static int sony_nc_resume(struct acpi_device *device)
return 0;
}
+static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume);
+
static void sony_nc_rfkill_cleanup(void)
{
int i;
@@ -1893,32 +1920,33 @@ static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
* bits 4,5: store the limit into the EC
* bits 6,7: store the limit into the battery
*/
+ cmd = 0;
- /*
- * handle 0x0115 should allow storing on battery too;
- * handle 0x0136 same as 0x0115 + health status;
- * handle 0x013f, same as 0x0136 but no storing on the battery
- *
- * Store only inside the EC for now, regardless the handle number
- */
- if (value == 0)
- /* disable limits */
- cmd = 0x0;
+ if (value > 0) {
+ if (value <= 50)
+ cmd = 0x20;
- else if (value <= 50)
- cmd = 0x21;
+ else if (value <= 80)
+ cmd = 0x10;
- else if (value <= 80)
- cmd = 0x11;
+ else if (value <= 100)
+ cmd = 0x30;
- else if (value <= 100)
- cmd = 0x31;
+ else
+ return -EINVAL;
- else
- return -EINVAL;
+ /*
+ * handle 0x0115 should allow storing on battery too;
+ * handle 0x0136 same as 0x0115 + health status;
+ * handle 0x013f, same as 0x0136 but no storing on the battery
+ */
+ if (bcare_ctl->handle != 0x013f)
+ cmd = cmd | (cmd << 2);
- if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
- &result))
+ cmd = (cmd | 0x1) << 0x10;
+ }
+
+ if (sony_call_snc_handle(bcare_ctl->handle, cmd | 0x0100, &result))
return -EIO;
return count;
@@ -2113,7 +2141,7 @@ static ssize_t sony_nc_thermal_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
ssize_t count = 0;
- unsigned int mode = sony_nc_thermal_mode_get();
+ int mode = sony_nc_thermal_mode_get();
if (mode < 0)
return mode;
@@ -2472,6 +2500,7 @@ static void sony_nc_backlight_ng_read_limits(int handle,
{
u64 offset;
int i;
+ int lvl_table_len = 0;
u8 min = 0xff, max = 0x00;
unsigned char buffer[32] = { 0 };
@@ -2480,8 +2509,6 @@ static void sony_nc_backlight_ng_read_limits(int handle,
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
- if (offset < 0)
- return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
@@ -2491,11 +2518,21 @@ static void sony_nc_backlight_ng_read_limits(int handle,
if (i < 0)
return;
+ switch (handle) {
+ case 0x012f:
+ case 0x0137:
+ lvl_table_len = 9;
+ break;
+ case 0x143:
+ lvl_table_len = 16;
+ break;
+ }
+
/* the buffer lists brightness levels available, brightness levels are
* from position 0 to 8 in the array, other values are used by ALS
* control.
*/
- for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
+ for (i = 0; i < lvl_table_len && i < ARRAY_SIZE(buffer); i++) {
dprintk("Brightness level: %d\n", buffer[i]);
@@ -2520,16 +2557,24 @@ static void sony_nc_backlight_setup(void)
const struct backlight_ops *ops = NULL;
struct backlight_properties props;
- if (sony_find_snc_handle(0x12f) != -1) {
+ if (sony_find_snc_handle(0x12f) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
- } else if (sony_find_snc_handle(0x137) != -1) {
+ } else if (sony_find_snc_handle(0x137) >= 0) {
ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x0100;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+ } else if (sony_find_snc_handle(0x143) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
@@ -2597,6 +2642,12 @@ static int sony_nc_add(struct acpi_device *device)
}
}
+ result = sony_laptop_setup_input(device);
+ if (result) {
+ pr_err("Unable to create input devices\n");
+ goto outplatform;
+ }
+
if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
&handle))) {
int arg = 1;
@@ -2614,12 +2665,6 @@ static int sony_nc_add(struct acpi_device *device)
}
/* setup input devices and helper fifo */
- result = sony_laptop_setup_input(device);
- if (result) {
- pr_err("Unable to create input devices\n");
- goto outsnc;
- }
-
if (acpi_video_backlight_support()) {
pr_info("brightness ignored, must be controlled by ACPI video driver\n");
} else {
@@ -2667,22 +2712,21 @@ static int sony_nc_add(struct acpi_device *device)
return 0;
- out_sysfs:
+out_sysfs:
for (item = sony_nc_values; item->name; ++item) {
device_remove_file(&sony_pf_device->dev, &item->devattr);
}
sony_nc_backlight_cleanup();
-
- sony_laptop_remove_input();
-
- outsnc:
sony_nc_function_cleanup(sony_pf_device);
sony_nc_handles_cleanup(sony_pf_device);
- outpresent:
+outplatform:
+ sony_laptop_remove_input();
+
+outpresent:
sony_pf_remove();
- outwalk:
+outwalk:
sony_nc_rfkill_cleanup();
return result;
}
@@ -2728,9 +2772,9 @@ static struct acpi_driver sony_nc_driver = {
.ops = {
.add = sony_nc_add,
.remove = sony_nc_remove,
- .resume = sony_nc_resume,
.notify = sony_nc_notify,
},
+ .drv.pm = &sony_nc_pm,
};
/*********** SPIC (SNY6001) Device ***********/
@@ -4243,19 +4287,22 @@ err_free_resources:
return result;
}
-static int sony_pic_suspend(struct acpi_device *device, pm_message_t state)
+static int sony_pic_suspend(struct device *dev)
{
- if (sony_pic_disable(device))
+ if (sony_pic_disable(to_acpi_device(dev)))
return -ENXIO;
return 0;
}
-static int sony_pic_resume(struct acpi_device *device)
+static int sony_pic_resume(struct device *dev)
{
- sony_pic_enable(device, spic_dev.cur_ioport, spic_dev.cur_irq);
+ sony_pic_enable(to_acpi_device(dev),
+ spic_dev.cur_ioport, spic_dev.cur_irq);
return 0;
}
+static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume);
+
static const struct acpi_device_id sony_pic_device_ids[] = {
{SONY_PIC_HID, 0},
{"", 0},
@@ -4269,9 +4316,8 @@ static struct acpi_driver sony_pic_driver = {
.ops = {
.add = sony_pic_add,
.remove = sony_pic_remove,
- .suspend = sony_pic_suspend,
- .resume = sony_pic_resume,
},
+ .drv.pm = &sony_pic_pm,
};
static struct dmi_system_id __initdata sonypi_dmi_table[] = {
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 8b5610d88418..d5fd4a1193f8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -277,7 +277,7 @@ struct ibm_struct {
int (*write) (char *);
void (*exit) (void);
void (*resume) (void);
- void (*suspend) (pm_message_t state);
+ void (*suspend) (void);
void (*shutdown) (void);
struct list_head all_drivers;
@@ -922,8 +922,7 @@ static struct input_dev *tpacpi_inputdev;
static struct mutex tpacpi_inputdev_send_mutex;
static LIST_HEAD(tpacpi_all_drivers);
-static int tpacpi_suspend_handler(struct platform_device *pdev,
- pm_message_t state)
+static int tpacpi_suspend_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -931,13 +930,13 @@ static int tpacpi_suspend_handler(struct platform_device *pdev,
&tpacpi_all_drivers,
all_drivers) {
if (ibm->suspend)
- (ibm->suspend)(state);
+ (ibm->suspend)();
}
return 0;
}
-static int tpacpi_resume_handler(struct platform_device *pdev)
+static int tpacpi_resume_handler(struct device *dev)
{
struct ibm_struct *ibm, *itmp;
@@ -951,6 +950,9 @@ static int tpacpi_resume_handler(struct platform_device *pdev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(tpacpi_pm,
+ tpacpi_suspend_handler, tpacpi_resume_handler);
+
static void tpacpi_shutdown_handler(struct platform_device *pdev)
{
struct ibm_struct *ibm, *itmp;
@@ -967,9 +969,8 @@ static struct platform_driver tpacpi_pdriver = {
.driver = {
.name = TPACPI_DRVR_NAME,
.owner = THIS_MODULE,
+ .pm = &tpacpi_pm,
},
- .suspend = tpacpi_suspend_handler,
- .resume = tpacpi_resume_handler,
.shutdown = tpacpi_shutdown_handler,
};
@@ -3758,7 +3759,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
}
-static void hotkey_suspend(pm_message_t state)
+static void hotkey_suspend(void)
{
/* Do these on suspend, we get the events on early resume! */
hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
@@ -6329,7 +6330,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
return 0;
}
-static void brightness_suspend(pm_message_t state)
+static void brightness_suspend(void)
{
tpacpi_brightness_checkpoint_nvram();
}
@@ -6748,7 +6749,7 @@ static struct snd_kcontrol_new volume_alsa_control_mute __devinitdata = {
.get = volume_alsa_mute_get,
};
-static void volume_suspend(pm_message_t state)
+static void volume_suspend(void)
{
tpacpi_volume_checkpoint_nvram();
}
@@ -8107,7 +8108,7 @@ static void fan_exit(void)
flush_workqueue(tpacpi_wq);
}
-static void fan_suspend(pm_message_t state)
+static void fan_suspend(void)
{
int rc;
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index dab10f6edcd4..c13ba5bac93f 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1296,10 +1296,9 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
}
}
-static int toshiba_acpi_suspend(struct acpi_device *acpi_dev,
- pm_message_t state)
+static int toshiba_acpi_suspend(struct device *device)
{
- struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
@@ -1308,9 +1307,9 @@ static int toshiba_acpi_suspend(struct acpi_device *acpi_dev,
return 0;
}
-static int toshiba_acpi_resume(struct acpi_device *acpi_dev)
+static int toshiba_acpi_resume(struct device *device)
{
- struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
+ struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device));
u32 result;
if (dev->hotkey_dev)
@@ -1319,6 +1318,9 @@ static int toshiba_acpi_resume(struct acpi_device *acpi_dev)
return 0;
}
+static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm,
+ toshiba_acpi_suspend, toshiba_acpi_resume);
+
static struct acpi_driver toshiba_acpi_driver = {
.name = "Toshiba ACPI driver",
.owner = THIS_MODULE,
@@ -1328,9 +1330,8 @@ static struct acpi_driver toshiba_acpi_driver = {
.add = toshiba_acpi_add,
.remove = toshiba_acpi_remove,
.notify = toshiba_acpi_notify,
- .suspend = toshiba_acpi_suspend,
- .resume = toshiba_acpi_resume,
},
+ .drv.pm = &toshiba_acpi_pm,
};
static int __init toshiba_acpi_init(void)
diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c
index 5fb7186694df..715a43cb5e3c 100644
--- a/drivers/platform/x86/toshiba_bluetooth.c
+++ b/drivers/platform/x86/toshiba_bluetooth.c
@@ -34,7 +34,6 @@ MODULE_LICENSE("GPL");
static int toshiba_bt_rfkill_add(struct acpi_device *device);
static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
-static int toshiba_bt_resume(struct acpi_device *device);
static const struct acpi_device_id bt_device_ids[] = {
{ "TOS6205", 0},
@@ -42,6 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = {
};
MODULE_DEVICE_TABLE(acpi, bt_device_ids);
+static int toshiba_bt_resume(struct device *dev);
+static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
+
static struct acpi_driver toshiba_bt_rfkill_driver = {
.name = "Toshiba BT",
.class = "Toshiba",
@@ -50,9 +52,9 @@ static struct acpi_driver toshiba_bt_rfkill_driver = {
.add = toshiba_bt_rfkill_add,
.remove = toshiba_bt_rfkill_remove,
.notify = toshiba_bt_rfkill_notify,
- .resume = toshiba_bt_resume,
},
.owner = THIS_MODULE,
+ .drv.pm = &toshiba_bt_pm,
};
@@ -88,9 +90,9 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
toshiba_bluetooth_enable(device->handle);
}
-static int toshiba_bt_resume(struct acpi_device *device)
+static int toshiba_bt_resume(struct device *dev)
{
- return toshiba_bluetooth_enable(device->handle);
+ return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
}
static int toshiba_bt_rfkill_add(struct acpi_device *device)
diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c
index fad153dc0355..849c07c13bf6 100644
--- a/drivers/platform/x86/xo15-ebook.c
+++ b/drivers/platform/x86/xo15-ebook.c
@@ -77,11 +77,13 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event)
}
}
-static int ebook_switch_resume(struct acpi_device *device)
+static int ebook_switch_resume(struct device *dev)
{
- return ebook_send_state(device);
+ return ebook_send_state(to_acpi_device(dev));
}
+static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume);
+
static int ebook_switch_add(struct acpi_device *device)
{
struct ebook_switch *button;
@@ -161,10 +163,10 @@ static struct acpi_driver xo15_ebook_driver = {
.ids = ebook_device_ids,
.ops = {
.add = ebook_switch_add,
- .resume = ebook_switch_resume,
.remove = ebook_switch_remove,
.notify = ebook_switch_notify,
},
+ .drv.pm = &ebook_switch_pm,
};
static int __init xo15_ebook_init(void)
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index d21e8f59c84e..507a8e2b9a4c 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -170,8 +170,8 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
}
if (acpi_bus_power_manageable(handle)) {
- int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL);
-
+ int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
+ ACPI_STATE_D3);
if (power_state < 0)
power_state = (state.event == PM_EVENT_ON) ?
ACPI_STATE_D0 : ACPI_STATE_D3;
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index e3a3b4956f08..aa764ecc4e60 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -301,7 +301,7 @@ config AB8500_BM
bool "AB8500 Battery Management Driver"
depends on AB8500_CORE && AB8500_GPADC
help
- Say Y to include support for AB5500 battery management.
+ Say Y to include support for AB8500 battery management.
config AB8500_BATTERY_THERM_ON_BATCTRL
bool "Thermistor connected on BATCTRL ADC"
@@ -310,3 +310,5 @@ config AB8500_BATTERY_THERM_ON_BATCTRL
Say Y to enable battery temperature measurements using
thermistor connected on BATCTRL ADC.
endif # POWER_SUPPLY
+
+source "drivers/power/avs/Kconfig"
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b6b243416c0e..ee58afb1e71f 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -43,4 +43,5 @@ obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
+obj-$(CONFIG_POWER_AVS) += avs/
obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
new file mode 100644
index 000000000000..2a1008b61121
--- /dev/null
+++ b/drivers/power/avs/Kconfig
@@ -0,0 +1,12 @@
+menuconfig POWER_AVS
+ bool "Adaptive Voltage Scaling class support"
+ help
+ AVS is a power management technique which finely controls the
+ operating voltage of a device in order to optimize (i.e. reduce)
+ its power consumption.
+ At a given operating point the voltage is adapted depending on
+ static factors (chip manufacturing process) and dynamic factors
+ (temperature depending performance).
+ AVS is also called SmartReflex on OMAP devices.
+
+ Say Y here to enable Adaptive Voltage Scaling class support.
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
new file mode 100644
index 000000000000..0843386a6c19
--- /dev/null
+++ b/drivers/power/avs/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
diff --git a/arch/arm/mach-omap2/smartreflex.c b/drivers/power/avs/smartreflex.c
index 008fbd7b9352..44efc6e202af 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/drivers/power/avs/smartreflex.c
@@ -3,7 +3,7 @@
*
* Author: Thara Gopinath <thara@ti.com>
*
- * Copyright (C) 2010 Texas Instruments, Inc.
+ * Copyright (C) 2012 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2008 Nokia Corporation
@@ -25,39 +25,12 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-
-#include "common.h"
-
-#include "pm.h"
-#include "smartreflex.h"
+#include <linux/power/smartreflex.h>
#define SMARTREFLEX_NAME_LEN 16
#define NVALUE_NAME_LEN 40
#define SR_DISABLE_TIMEOUT 200
-struct omap_sr {
- struct list_head node;
- struct platform_device *pdev;
- struct omap_sr_nvalue_table *nvalue_table;
- struct voltagedomain *voltdm;
- struct dentry *dbg_dir;
- unsigned int irq;
- int srid;
- int ip_type;
- int nvalue_count;
- bool autocomp_active;
- u32 clk_length;
- u32 err_weight;
- u32 err_minlimit;
- u32 err_maxlimit;
- u32 accum_data;
- u32 senn_avgweight;
- u32 senp_avgweight;
- u32 senp_mod;
- u32 senn_mod;
- void __iomem *base;
-};
-
/* sr_list contains all the instances of smartreflex module */
static LIST_HEAD(sr_list);
@@ -148,7 +121,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
}
if (sr_class->notify)
- sr_class->notify(sr_info->voltdm, status);
+ sr_class->notify(sr_info, status);
return IRQ_HANDLED;
}
@@ -207,7 +180,7 @@ static void sr_set_regfields(struct omap_sr *sr)
sr->err_weight = OMAP3430_SR_ERRWEIGHT;
sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT;
sr->accum_data = OMAP3430_SR_ACCUMDATA;
- if (!(strcmp(sr->voltdm->name, "mpu"))) {
+ if (!(strcmp(sr->name, "smartreflex_mpu_iva"))) {
sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT;
sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT;
} else {
@@ -226,7 +199,7 @@ static void sr_start_vddautocomp(struct omap_sr *sr)
return;
}
- if (!sr_class->enable(sr->voltdm))
+ if (!sr_class->enable(sr))
sr->autocomp_active = true;
}
@@ -240,7 +213,7 @@ static void sr_stop_vddautocomp(struct omap_sr *sr)
}
if (sr->autocomp_active) {
- sr_class->disable(sr->voltdm, 1);
+ sr_class->disable(sr, 1);
sr->autocomp_active = false;
}
}
@@ -258,19 +231,13 @@ static void sr_stop_vddautocomp(struct omap_sr *sr)
*/
static int sr_late_init(struct omap_sr *sr_info)
{
- char *name;
struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
struct resource *mem;
int ret = 0;
if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
- name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
- if (name == NULL) {
- ret = -ENOMEM;
- goto error;
- }
ret = request_irq(sr_info->irq, sr_interrupt,
- 0, name, sr_info);
+ 0, sr_info->name, sr_info);
if (ret)
goto error;
disable_irq(sr_info->irq);
@@ -289,7 +256,6 @@ error:
dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
"interrupt handler. Smartreflex will"
"not function as desired\n", __func__);
- kfree(name);
kfree(sr_info);
return ret;
@@ -320,9 +286,9 @@ static void sr_v1_disable(struct omap_sr *sr)
* Wait for SR to be disabled.
* wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us.
*/
- omap_test_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
- ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
- timeout);
+ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) &
+ ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT,
+ timeout);
if (timeout >= SR_DISABLE_TIMEOUT)
dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
@@ -365,9 +331,9 @@ static void sr_v2_disable(struct omap_sr *sr)
* Wait for SR to be disabled.
* wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us.
*/
- omap_test_timeout((sr_read_reg(sr, IRQSTATUS) &
- IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
- timeout);
+ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) &
+ IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT,
+ timeout);
if (timeout >= SR_DISABLE_TIMEOUT)
dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n",
@@ -378,22 +344,23 @@ static void sr_v2_disable(struct omap_sr *sr)
sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT);
}
-static u32 sr_retrieve_nvalue(struct omap_sr *sr, u32 efuse_offs)
+static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row(
+ struct omap_sr *sr, u32 efuse_offs)
{
int i;
if (!sr->nvalue_table) {
dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n",
__func__);
- return 0;
+ return NULL;
}
for (i = 0; i < sr->nvalue_count; i++) {
if (sr->nvalue_table[i].efuse_offs == efuse_offs)
- return sr->nvalue_table[i].nvalue;
+ return &sr->nvalue_table[i];
}
- return 0;
+ return NULL;
}
/* Public Functions */
@@ -419,8 +386,7 @@ int sr_configure_errgen(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return PTR_ERR(sr);
}
@@ -487,8 +453,7 @@ int sr_disable_errgen(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return PTR_ERR(sr);
}
@@ -538,8 +503,7 @@ int sr_configure_minmax(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return PTR_ERR(sr);
}
@@ -620,12 +584,11 @@ int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
{
struct omap_volt_data *volt_data;
struct omap_sr *sr = _sr_lookup(voltdm);
- u32 nvalue_reciprocal;
+ struct omap_sr_nvalue_table *nvalue_row;
int ret;
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return PTR_ERR(sr);
}
@@ -637,16 +600,16 @@ int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
return PTR_ERR(volt_data);
}
- nvalue_reciprocal = sr_retrieve_nvalue(sr, volt_data->sr_efuse_offs);
+ nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs);
- if (!nvalue_reciprocal) {
- dev_warn(&sr->pdev->dev, "%s: NVALUE = 0 at voltage %ld\n",
- __func__, volt);
+ if (!nvalue_row) {
+ dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n",
+ __func__, volt);
return -ENODATA;
}
/* errminlimit is opp dependent and hence linked to voltage */
- sr->err_minlimit = volt_data->sr_errminlimit;
+ sr->err_minlimit = nvalue_row->errminlimit;
pm_runtime_get_sync(&sr->pdev->dev);
@@ -655,11 +618,11 @@ int sr_enable(struct voltagedomain *voltdm, unsigned long volt)
return 0;
/* Configure SR */
- ret = sr_class->configure(voltdm);
+ ret = sr_class->configure(sr);
if (ret)
return ret;
- sr_write_reg(sr, NVALUERECIPROCAL, nvalue_reciprocal);
+ sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue);
/* SRCONFIG - enable SR */
sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE);
@@ -678,8 +641,7 @@ void sr_disable(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return;
}
@@ -759,8 +721,7 @@ void omap_sr_enable(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return;
}
@@ -773,7 +734,7 @@ void omap_sr_enable(struct voltagedomain *voltdm)
return;
}
- sr_class->enable(voltdm);
+ sr_class->enable(sr);
}
/**
@@ -792,8 +753,7 @@ void omap_sr_disable(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return;
}
@@ -806,7 +766,7 @@ void omap_sr_disable(struct voltagedomain *voltdm)
return;
}
- sr_class->disable(voltdm, 0);
+ sr_class->disable(sr, 0);
}
/**
@@ -825,8 +785,7 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
struct omap_sr *sr = _sr_lookup(voltdm);
if (IS_ERR(sr)) {
- pr_warning("%s: omap_sr struct for sr_%s not found\n",
- __func__, voltdm->name);
+ pr_warning("%s: omap_sr struct for voltdm not found\n", __func__);
return;
}
@@ -839,7 +798,7 @@ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm)
return;
}
- sr_class->disable(voltdm, 1);
+ sr_class->disable(sr, 1);
}
/**
@@ -911,9 +870,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
struct omap_sr_data *pdata = pdev->dev.platform_data;
struct resource *mem, *irq;
struct dentry *nvalue_dir;
- struct omap_volt_data *volt_data;
int i, ret = 0;
- char *name;
sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL);
if (!sr_info) {
@@ -950,6 +907,14 @@ static int __init omap_sr_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_irq_safe(&pdev->dev);
+ sr_info->name = kasprintf(GFP_KERNEL, "%s", pdata->name);
+ if (!sr_info->name) {
+ dev_err(&pdev->dev, "%s: Unable to alloc SR instance name\n",
+ __func__);
+ ret = -ENOMEM;
+ goto err_release_region;
+ }
+
sr_info->pdev = pdev;
sr_info->srid = pdev->id;
sr_info->voltdm = pdata->voltdm;
@@ -997,20 +962,12 @@ static int __init omap_sr_probe(struct platform_device *pdev)
}
}
- name = kasprintf(GFP_KERNEL, "sr_%s", sr_info->voltdm->name);
- if (!name) {
- dev_err(&pdev->dev, "%s: Unable to alloc debugfs name\n",
- __func__);
- ret = -ENOMEM;
- goto err_iounmap;
- }
- sr_info->dbg_dir = debugfs_create_dir(name, sr_dbg_dir);
- kfree(name);
+ sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir);
if (IS_ERR_OR_NULL(sr_info->dbg_dir)) {
dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
__func__);
ret = PTR_ERR(sr_info->dbg_dir);
- goto err_iounmap;
+ goto err_free_name;
}
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
@@ -1019,8 +976,6 @@ static int __init omap_sr_probe(struct platform_device *pdev)
&sr_info->err_weight);
(void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir,
&sr_info->err_maxlimit);
- (void) debugfs_create_x32("errminlimit", S_IRUGO, sr_info->dbg_dir,
- &sr_info->err_minlimit);
nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir);
if (IS_ERR_OR_NULL(nvalue_dir)) {
@@ -1030,12 +985,10 @@ static int __init omap_sr_probe(struct platform_device *pdev)
goto err_debugfs;
}
- omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
- if (!volt_data) {
- dev_warn(&pdev->dev, "%s: No Voltage table for the"
- " corresponding vdd vdd_%s. Cannot create debugfs"
- "entries for n-values\n",
- __func__, sr_info->voltdm->name);
+ if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) {
+ dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n",
+ __func__, sr_info->name);
+
ret = -ENODATA;
goto err_debugfs;
}
@@ -1043,16 +996,23 @@ static int __init omap_sr_probe(struct platform_device *pdev)
for (i = 0; i < sr_info->nvalue_count; i++) {
char name[NVALUE_NAME_LEN + 1];
- snprintf(name, sizeof(name), "volt_%d",
- volt_data[i].volt_nominal);
+ snprintf(name, sizeof(name), "volt_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
+ snprintf(name, sizeof(name), "errminlimit_%lu",
+ sr_info->nvalue_table[i].volt_nominal);
+ (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
+ &(sr_info->nvalue_table[i].errminlimit));
+
}
return ret;
err_debugfs:
debugfs_remove_recursive(sr_info->dbg_dir);
+err_free_name:
+ kfree(sr_info->name);
err_iounmap:
list_del(&sr_info->node);
iounmap(sr_info->base);
@@ -1089,6 +1049,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
list_del(&sr_info->node);
iounmap(sr_info->base);
+ kfree(sr_info->name);
kfree(sr_info);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index c86b8864e411..f34c3be6c9fe 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -20,6 +20,7 @@ menuconfig REGULATOR
If unsure, say no.
+
if REGULATOR
config REGULATOR_DEBUG
@@ -88,6 +89,13 @@ config REGULATOR_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
regulator driver.
+config REGULATOR_ARIZONA
+ tristate "Wolfson Arizona class devices"
+ depends on MFD_ARIZONA
+ help
+ Support for the regulators found on Wolfson Arizona class
+ devices.
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -195,6 +203,14 @@ config REGULATOR_MAX8998
via I2C bus. The provided regulator is suitable for S3C6410
and S5PC1XX chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX77686
+ tristate "Maxim 77686 regulator"
+ depends on MFD_MAX77686
+ help
+ This driver controls a Maxim 77686 regulator
+ via I2C bus. The provided regulator is suitable for
+ Exynos-4 chips to control VARM and VINT voltages.
+
config REGULATOR_PCAP
tristate "Motorola PCAP2 regulator driver"
depends on EZX_PCAP
@@ -216,6 +232,19 @@ config REGULATOR_LP3972
Say Y here to support the voltage regulators and convertors
on National Semiconductors LP3972 PMIC
+config REGULATOR_LP872X
+ bool "TI/National Semiconductor LP8720/LP8725 voltage regulators"
+ depends on I2C=y
+ select REGMAP_I2C
+ help
+ This driver supports LP8720/LP8725 PMIC
+
+config REGULATOR_LP8788
+ bool "TI LP8788 Power Regulators"
+ depends on MFD_LP8788
+ help
+ This driver supports LP8788 voltage regulator chip.
+
config REGULATOR_PCF50633
tristate "NXP PCF50633 regulator driver"
depends on MFD_PCF50633
@@ -233,6 +262,14 @@ config REGULATOR_RC5T583
through regulator interface. The device supports multiple DCDC/LDO
outputs which can be controlled by i2c communication.
+config REGULATOR_S2MPS11
+ tristate "Samsung S2MPS11 voltage regulator"
+ depends on MFD_SEC_CORE
+ help
+ This driver supports a Samsung S2MPS11 voltage output regulator
+ via I2C bus. S2MPS11 is comprised of high efficient Buck converters
+ including Dual-Phase Buck converter, Buck-Boost converter, various LDOs.
+
config REGULATOR_S5M8767
tristate "Samsung S5M8767A voltage regulator"
depends on MFD_S5M_CORE
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 977fd46909ab..3342615cf25e 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
+obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
@@ -23,6 +24,9 @@ obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o
obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_LP3972) += lp3972.o
+obj-$(CONFIG_REGULATOR_LP872X) += lp872x.o
+obj-$(CONFIG_REGULATOR_LP8788) += lp8788-buck.o
+obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
@@ -30,6 +34,7 @@ obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
+obj-$(CONFIG_REGULATOR_MAX77686) += max77686.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
@@ -37,6 +42,7 @@ obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
+obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 06776ca945f2..6f45bfd22e83 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -33,11 +33,6 @@ struct aat2870_regulator {
struct aat2870_data *aat2870;
struct regulator_desc desc;
- const int *voltages; /* uV */
-
- int min_uV;
- int max_uV;
-
u8 enable_addr;
u8 enable_shift;
u8 enable_mask;
@@ -47,14 +42,6 @@ struct aat2870_regulator {
u8 voltage_mask;
};
-static int aat2870_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- struct aat2870_regulator *ri = rdev_get_drvdata(rdev);
-
- return ri->voltages[selector];
-}
-
static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -111,7 +98,7 @@ static int aat2870_ldo_is_enabled(struct regulator_dev *rdev)
}
static struct regulator_ops aat2870_ldo_ops = {
- .list_voltage = aat2870_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = aat2870_ldo_set_voltage_sel,
.get_voltage_sel = aat2870_ldo_get_voltage_sel,
.enable = aat2870_ldo_enable,
@@ -119,7 +106,7 @@ static struct regulator_ops aat2870_ldo_ops = {
.is_enabled = aat2870_ldo_is_enabled,
};
-static const int aat2870_ldo_voltages[] = {
+static const unsigned int aat2870_ldo_voltages[] = {
1200000, 1300000, 1500000, 1600000,
1800000, 2000000, 2200000, 2500000,
2600000, 2700000, 2800000, 2900000,
@@ -132,13 +119,11 @@ static const int aat2870_ldo_voltages[] = {
.name = #ids, \
.id = AAT2870_ID_##ids, \
.n_voltages = ARRAY_SIZE(aat2870_ldo_voltages), \
+ .volt_table = aat2870_ldo_voltages, \
.ops = &aat2870_ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
}, \
- .voltages = aat2870_ldo_voltages, \
- .min_uV = 1200000, \
- .max_uV = 3300000, \
}
static struct aat2870_regulator aat2870_regulators[] = {
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 03f4d9c604ec..182b553059c9 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -43,20 +43,12 @@
* @dev: handle to the device
* @plfdata: AB3100 platform data passed in at probe time
* @regreg: regulator register number in the AB3100
- * @fixed_voltage: a fixed voltage for this regulator, if this
- * 0 the voltages array is used instead.
- * @typ_voltages: an array of available typical voltages for
- * this regulator
- * @voltages_len: length of the array of available voltages
*/
struct ab3100_regulator {
struct regulator_dev *rdev;
struct device *dev;
struct ab3100_platform_data *plfdata;
u8 regreg;
- int fixed_voltage;
- int const *typ_voltages;
- u8 voltages_len;
};
/* The order in which registers are initialized */
@@ -80,7 +72,7 @@ static const u8 ab3100_reg_init_order[AB3100_NUM_REGULATORS+2] = {
#define LDO_C_VOLTAGE 2650000
#define LDO_D_VOLTAGE 2650000
-static const int ldo_e_buck_typ_voltages[] = {
+static const unsigned int ldo_e_buck_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -90,7 +82,7 @@ static const int ldo_e_buck_typ_voltages[] = {
900000,
};
-static const int ldo_f_typ_voltages[] = {
+static const unsigned int ldo_f_typ_voltages[] = {
1800000,
1400000,
1300000,
@@ -101,21 +93,21 @@ static const int ldo_f_typ_voltages[] = {
2650000,
};
-static const int ldo_g_typ_voltages[] = {
+static const unsigned int ldo_g_typ_voltages[] = {
2850000,
2750000,
1800000,
1500000,
};
-static const int ldo_h_typ_voltages[] = {
+static const unsigned int ldo_h_typ_voltages[] = {
2750000,
1800000,
1500000,
1200000,
};
-static const int ldo_k_typ_voltages[] = {
+static const unsigned int ldo_k_typ_voltages[] = {
2750000,
1800000,
};
@@ -126,40 +118,27 @@ static struct ab3100_regulator
ab3100_regulators[AB3100_NUM_REGULATORS] = {
{
.regreg = AB3100_LDO_A,
- .fixed_voltage = LDO_A_VOLTAGE,
},
{
.regreg = AB3100_LDO_C,
- .fixed_voltage = LDO_C_VOLTAGE,
},
{
.regreg = AB3100_LDO_D,
- .fixed_voltage = LDO_D_VOLTAGE,
},
{
.regreg = AB3100_LDO_E,
- .typ_voltages = ldo_e_buck_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages),
},
{
.regreg = AB3100_LDO_F,
- .typ_voltages = ldo_f_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_f_typ_voltages),
},
{
.regreg = AB3100_LDO_G,
- .typ_voltages = ldo_g_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_g_typ_voltages),
},
{
.regreg = AB3100_LDO_H,
- .typ_voltages = ldo_h_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_h_typ_voltages),
},
{
.regreg = AB3100_LDO_K,
- .typ_voltages = ldo_k_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_k_typ_voltages),
},
{
.regreg = AB3100_LDO_EXT,
@@ -167,8 +146,6 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = {
},
{
.regreg = AB3100_BUCK,
- .typ_voltages = ldo_e_buck_typ_voltages,
- .voltages_len = ARRAY_SIZE(ldo_e_buck_typ_voltages),
},
};
@@ -178,7 +155,7 @@ ab3100_regulators[AB3100_NUM_REGULATORS] = {
*/
static int ab3100_enable_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
int err;
u8 regval;
@@ -209,7 +186,7 @@ static int ab3100_enable_regulator(struct regulator_dev *reg)
static int ab3100_disable_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
int err;
u8 regval;
@@ -242,7 +219,7 @@ static int ab3100_disable_regulator(struct regulator_dev *reg)
static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
@@ -257,26 +234,12 @@ static int ab3100_is_enabled_regulator(struct regulator_dev *reg)
return regval & AB3100_REG_ON_MASK;
}
-static int ab3100_list_voltage_regulator(struct regulator_dev *reg,
- unsigned selector)
-{
- struct ab3100_regulator *abreg = reg->reg_data;
-
- if (selector >= abreg->voltages_len)
- return -EINVAL;
- return abreg->typ_voltages[selector];
-}
-
static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
- /* Return the voltage for fixed regulators immediately */
- if (abreg->fixed_voltage)
- return abreg->fixed_voltage;
-
/*
* For variable types, read out setting and index into
* supplied voltage list.
@@ -294,20 +257,20 @@ static int ab3100_get_voltage_regulator(struct regulator_dev *reg)
regval &= 0xE0;
regval >>= 5;
- if (regval >= abreg->voltages_len) {
+ if (regval >= reg->desc->n_voltages) {
dev_err(&reg->dev,
"regulator register %02x contains an illegal voltage setting\n",
abreg->regreg);
return -EINVAL;
}
- return abreg->typ_voltages[regval];
+ return reg->desc->volt_table[regval];
}
static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
unsigned selector)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
@@ -336,7 +299,7 @@ static int ab3100_set_voltage_regulator_sel(struct regulator_dev *reg,
static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
int uV)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
u8 regval;
int err;
int bestindex;
@@ -379,42 +342,22 @@ static int ab3100_set_suspend_voltage_regulator(struct regulator_dev *reg,
*/
static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
+ struct ab3100_regulator *abreg = rdev_get_drvdata(reg);
return abreg->plfdata->external_voltage;
}
-static int ab3100_enable_time_regulator(struct regulator_dev *reg)
+static int ab3100_get_fixed_voltage_regulator(struct regulator_dev *reg)
{
- struct ab3100_regulator *abreg = reg->reg_data;
-
- /* Per-regulator power on delay from spec */
- switch (abreg->regreg) {
- case AB3100_LDO_A: /* Fallthrough */
- case AB3100_LDO_C: /* Fallthrough */
- case AB3100_LDO_D: /* Fallthrough */
- case AB3100_LDO_E: /* Fallthrough */
- case AB3100_LDO_H: /* Fallthrough */
- case AB3100_LDO_K:
- return 200;
- case AB3100_LDO_F:
- return 600;
- case AB3100_LDO_G:
- return 400;
- case AB3100_BUCK:
- return 1000;
- default:
- break;
- }
- return 0;
+ return reg->desc->min_uV;
}
static struct regulator_ops regulator_ops_fixed = {
+ .list_voltage = regulator_list_voltage_linear,
.enable = ab3100_enable_regulator,
.disable = ab3100_disable_regulator,
.is_enabled = ab3100_is_enabled_regulator,
- .get_voltage = ab3100_get_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .get_voltage = ab3100_get_fixed_voltage_regulator,
};
static struct regulator_ops regulator_ops_variable = {
@@ -423,8 +366,7 @@ static struct regulator_ops regulator_ops_variable = {
.is_enabled = ab3100_is_enabled_regulator,
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage_sel = ab3100_set_voltage_regulator_sel,
- .list_voltage = ab3100_list_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops regulator_ops_variable_sleepable = {
@@ -434,8 +376,7 @@ static struct regulator_ops regulator_ops_variable_sleepable = {
.get_voltage = ab3100_get_voltage_regulator,
.set_voltage_sel = ab3100_set_voltage_regulator_sel,
.set_suspend_voltage = ab3100_set_suspend_voltage_regulator,
- .list_voltage = ab3100_list_voltage_regulator,
- .enable_time = ab3100_enable_time_regulator,
+ .list_voltage = regulator_list_voltage_table,
};
/*
@@ -457,62 +398,81 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.name = "LDO_A",
.id = AB3100_LDO_A,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_A_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_C",
.id = AB3100_LDO_C,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_C_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_D",
.id = AB3100_LDO_D,
.ops = &regulator_ops_fixed,
+ .n_voltages = 1,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = LDO_D_VOLTAGE,
+ .enable_time = 200,
},
{
.name = "LDO_E",
.id = AB3100_LDO_E,
.ops = &regulator_ops_variable_sleepable,
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
+ .volt_table = ldo_e_buck_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_F",
.id = AB3100_LDO_F,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_f_typ_voltages),
+ .volt_table = ldo_f_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 600,
},
{
.name = "LDO_G",
.id = AB3100_LDO_G,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_g_typ_voltages),
+ .volt_table = ldo_g_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 400,
},
{
.name = "LDO_H",
.id = AB3100_LDO_H,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_h_typ_voltages),
+ .volt_table = ldo_h_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_K",
.id = AB3100_LDO_K,
.ops = &regulator_ops_variable,
.n_voltages = ARRAY_SIZE(ldo_k_typ_voltages),
+ .volt_table = ldo_k_typ_voltages,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 200,
},
{
.name = "LDO_EXT",
@@ -528,6 +488,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
.n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages),
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .enable_time = 1000,
},
};
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index a739f5ca936a..13d424fc1c14 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -30,9 +30,6 @@
* @dev: device pointer
* @desc: regulator description
* @regulator_dev: regulator device
- * @max_uV: maximum voltage (for variable voltage supplies)
- * @min_uV: minimum voltage (for variable voltage supplies)
- * @fixed_uV: typical voltage (for fixed voltage supplies)
* @update_bank: bank to control on/off
* @update_reg: register to control on/off
* @update_mask: mask to enable/disable regulator
@@ -40,17 +37,12 @@
* @voltage_bank: bank to control regulator voltage
* @voltage_reg: register to control regulator voltage
* @voltage_mask: mask to control regulator voltage
- * @voltages: supported voltage table
- * @voltages_len: number of supported voltages for the regulator
* @delay: startup/set voltage delay in us
*/
struct ab8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *regulator;
- int max_uV;
- int min_uV;
- int fixed_uV;
u8 update_bank;
u8 update_reg;
u8 update_mask;
@@ -58,13 +50,11 @@ struct ab8500_regulator_info {
u8 voltage_bank;
u8 voltage_reg;
u8 voltage_mask;
- int const *voltages;
- int voltages_len;
unsigned int delay;
};
/* voltage tables for the vauxn/vintcore supplies */
-static const int ldo_vauxn_voltages[] = {
+static const unsigned int ldo_vauxn_voltages[] = {
1100000,
1200000,
1300000,
@@ -83,7 +73,7 @@ static const int ldo_vauxn_voltages[] = {
3300000,
};
-static const int ldo_vaux3_voltages[] = {
+static const unsigned int ldo_vaux3_voltages[] = {
1200000,
1500000,
1800000,
@@ -94,7 +84,7 @@ static const int ldo_vaux3_voltages[] = {
2910000,
};
-static const int ldo_vintcore_voltages[] = {
+static const unsigned int ldo_vintcore_voltages[] = {
1200000,
1225000,
1250000,
@@ -185,25 +175,6 @@ static int ab8500_regulator_is_enabled(struct regulator_dev *rdev)
return false;
}
-static int ab8500_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- /* return the uV for the fixed regulators */
- if (info->fixed_uV)
- return info->fixed_uV;
-
- if (selector >= info->voltages_len)
- return -EINVAL;
-
- return info->voltages[selector];
-}
-
static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev)
{
int ret, val;
@@ -279,14 +250,7 @@ static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_sel)
{
struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
- /* If the regulator isn't on, it won't take time here */
- ret = ab8500_regulator_is_enabled(rdev);
- if (ret < 0)
- return ret;
- if (!ret)
- return 0;
return info->delay;
}
@@ -296,21 +260,14 @@ static struct regulator_ops ab8500_regulator_ops = {
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage_sel = ab8500_regulator_get_voltage_sel,
.set_voltage_sel = ab8500_regulator_set_voltage_sel,
- .list_voltage = ab8500_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.enable_time = ab8500_regulator_enable_time,
.set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static int ab8500_fixed_get_voltage(struct regulator_dev *rdev)
{
- struct ab8500_regulator_info *info = rdev_get_drvdata(rdev);
-
- if (info == NULL) {
- dev_err(rdev_get_dev(rdev), "regulator info null pointer\n");
- return -EINVAL;
- }
-
- return info->fixed_uV;
+ return rdev->desc->min_uV;
}
static struct regulator_ops ab8500_regulator_fixed_ops = {
@@ -318,9 +275,8 @@ static struct regulator_ops ab8500_regulator_fixed_ops = {
.disable = ab8500_regulator_disable,
.is_enabled = ab8500_regulator_is_enabled,
.get_voltage = ab8500_fixed_get_voltage,
- .list_voltage = ab8500_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.enable_time = ab8500_regulator_enable_time,
- .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel,
};
static struct ab8500_regulator_info
@@ -329,7 +285,7 @@ static struct ab8500_regulator_info
* Variable Voltage Regulators
* name, min mV, max mV,
* update bank, reg, mask, enable val
- * volt bank, reg, mask, table, table length
+ * volt bank, reg, mask
*/
[AB8500_LDO_AUX1] = {
.desc = {
@@ -339,9 +295,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX1,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x03,
@@ -349,8 +304,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x1f,
.voltage_mask = 0x0f,
- .voltages = ldo_vauxn_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
},
[AB8500_LDO_AUX2] = {
.desc = {
@@ -360,9 +313,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX2,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vauxn_voltages),
+ .volt_table = ldo_vauxn_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x09,
.update_mask = 0x0c,
@@ -370,8 +322,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x20,
.voltage_mask = 0x0f,
- .voltages = ldo_vauxn_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vauxn_voltages),
},
[AB8500_LDO_AUX3] = {
.desc = {
@@ -381,9 +331,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUX3,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vaux3_voltages),
+ .volt_table = ldo_vaux3_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x04,
.update_reg = 0x0a,
.update_mask = 0x03,
@@ -391,8 +340,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x04,
.voltage_reg = 0x21,
.voltage_mask = 0x07,
- .voltages = ldo_vaux3_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vaux3_voltages),
},
[AB8500_LDO_INTCORE] = {
.desc = {
@@ -402,9 +349,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_INTCORE,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(ldo_vintcore_voltages),
+ .volt_table = ldo_vintcore_voltages,
},
- .min_uV = 1100000,
- .max_uV = 3300000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x44,
@@ -412,8 +358,6 @@ static struct ab8500_regulator_info
.voltage_bank = 0x03,
.voltage_reg = 0x80,
.voltage_mask = 0x38,
- .voltages = ldo_vintcore_voltages,
- .voltages_len = ARRAY_SIZE(ldo_vintcore_voltages),
},
/*
@@ -429,9 +373,9 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_TVOUT,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2000000,
},
.delay = 10000,
- .fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x80,
.update_mask = 0x82,
@@ -445,8 +389,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_USB,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 3300000,
},
- .fixed_uV = 3300000,
.update_bank = 0x03,
.update_reg = 0x82,
.update_mask = 0x03,
@@ -460,8 +404,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_AUDIO,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2000000,
},
- .fixed_uV = 2000000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x02,
@@ -475,8 +419,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANAMIC1,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2050000,
},
- .fixed_uV = 2050000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x08,
@@ -490,8 +434,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANAMIC2,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 2050000,
},
- .fixed_uV = 2050000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x10,
@@ -505,8 +449,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_DMIC,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 1800000,
},
- .fixed_uV = 1800000,
.update_bank = 0x03,
.update_reg = 0x83,
.update_mask = 0x04,
@@ -520,8 +464,8 @@ static struct ab8500_regulator_info
.id = AB8500_LDO_ANA,
.owner = THIS_MODULE,
.n_voltages = 1,
+ .min_uV = 1200000,
},
- .fixed_uV = 1200000,
.update_bank = 0x04,
.update_reg = 0x06,
.update_mask = 0x0c,
@@ -769,9 +713,7 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
if (info->desc.id == AB8500_LDO_AUX3) {
info->desc.n_voltages =
ARRAY_SIZE(ldo_vauxn_voltages);
- info->voltages = ldo_vauxn_voltages;
- info->voltages_len =
- ARRAY_SIZE(ldo_vauxn_voltages);
+ info->desc.volt_table = ldo_vauxn_voltages;
info->voltage_mask = 0xf;
}
}
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 46d05f38baf8..f123f7e3b752 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -89,9 +89,12 @@ static int ad5398_set_current_limit(struct regulator_dev *rdev, int min_uA, int
unsigned short data;
int ret;
- if (min_uA > chip->max_uA || min_uA < chip->min_uA)
- return -EINVAL;
- if (max_uA > chip->max_uA || max_uA < chip->min_uA)
+ if (min_uA < chip->min_uA)
+ min_uA = chip->min_uA;
+ if (max_uA > chip->max_uA)
+ max_uA = chip->max_uA;
+
+ if (min_uA > chip->max_uA || max_uA < chip->min_uA)
return -EINVAL;
selector = DIV_ROUND_UP((min_uA - chip->min_uA) * chip->current_level,
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index e82e7eaac0f1..e9c2085f9dfb 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -43,33 +43,15 @@ struct anatop_regulator {
struct regulator_init_data *initdata;
};
-static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
- int max_uV, unsigned *selector)
+static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector)
{
struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- u32 val, sel, mask;
- int uv;
-
- uv = min_uV;
- dev_dbg(&reg->dev, "%s: uv %d, min %d, max %d\n", __func__,
- uv, anatop_reg->min_voltage,
- anatop_reg->max_voltage);
-
- if (uv < anatop_reg->min_voltage) {
- if (max_uV > anatop_reg->min_voltage)
- uv = anatop_reg->min_voltage;
- else
- return -EINVAL;
- }
+ u32 val, mask;
if (!anatop_reg->control_reg)
return -ENOTSUPP;
- sel = DIV_ROUND_UP(uv - anatop_reg->min_voltage, 25000);
- if (sel * 25000 + anatop_reg->min_voltage > anatop_reg->max_voltage)
- return -EINVAL;
- val = anatop_reg->min_bit_val + sel;
- *selector = sel;
+ val = anatop_reg->min_bit_val + selector;
dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
anatop_reg->vol_bit_shift;
@@ -94,21 +76,11 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
return val - anatop_reg->min_bit_val;
}
-static int anatop_list_voltage(struct regulator_dev *reg, unsigned selector)
-{
- struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
- int uv;
-
- uv = anatop_reg->min_voltage + selector * 25000;
- dev_dbg(&reg->dev, "vddio = %d, selector = %u\n", uv, selector);
-
- return uv;
-}
-
static struct regulator_ops anatop_rops = {
- .set_voltage = anatop_set_voltage,
+ .set_voltage_sel = anatop_set_voltage_sel,
.get_voltage_sel = anatop_get_voltage_sel,
- .list_voltage = anatop_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static int __devinit anatop_regulator_probe(struct platform_device *pdev)
@@ -176,6 +148,8 @@ static int __devinit anatop_regulator_probe(struct platform_device *pdev)
rdesc->n_voltages = (sreg->max_voltage - sreg->min_voltage)
/ 25000 + 1;
+ rdesc->min_uV = sreg->min_voltage;
+ rdesc->uV_step = 25000;
config.dev = &pdev->dev;
config.init_data = initdata;
diff --git a/drivers/regulator/arizona-ldo1.c b/drivers/regulator/arizona-ldo1.c
new file mode 100644
index 000000000000..c8f95c07adb6
--- /dev/null
+++ b/drivers/regulator/arizona-ldo1.c
@@ -0,0 +1,138 @@
+/*
+ * arizona-ldo1.c -- LDO1 supply for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+struct arizona_ldo1 {
+ struct regulator_dev *regulator;
+ struct arizona *arizona;
+
+ struct regulator_consumer_supply supply;
+ struct regulator_init_data init_data;
+};
+
+static struct regulator_ops arizona_ldo1_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc arizona_ldo1 = {
+ .name = "LDO1",
+ .supply_name = "LDOVDD",
+ .type = REGULATOR_VOLTAGE,
+ .ops = &arizona_ldo1_ops,
+
+ .vsel_reg = ARIZONA_LDO1_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO1_VSEL_MASK,
+ .min_uV = 900000,
+ .uV_step = 50000,
+ .n_voltages = 7,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_init_data arizona_ldo1_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .num_consumer_supplies = 1,
+};
+
+static __devinit int arizona_ldo1_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct arizona_ldo1 *ldo1;
+ int ret;
+
+ ldo1 = devm_kzalloc(&pdev->dev, sizeof(*ldo1), GFP_KERNEL);
+ if (ldo1 == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ ldo1->arizona = arizona;
+
+ /*
+ * Since the chip usually supplies itself we provide some
+ * default init_data for it. This will be overridden with
+ * platform data if provided.
+ */
+ ldo1->init_data = arizona_ldo1_default;
+ ldo1->init_data.consumer_supplies = &ldo1->supply;
+ ldo1->supply.supply = "DCVDD";
+ ldo1->supply.dev_name = dev_name(arizona->dev);
+
+ config.dev = arizona->dev;
+ config.driver_data = ldo1;
+ config.regmap = arizona->regmap;
+ config.ena_gpio = arizona->pdata.ldoena;
+
+ if (arizona->pdata.ldo1)
+ config.init_data = arizona->pdata.ldo1;
+ else
+ config.init_data = &ldo1->init_data;
+
+ ldo1->regulator = regulator_register(&arizona_ldo1, &config);
+ if (IS_ERR(ldo1->regulator)) {
+ ret = PTR_ERR(ldo1->regulator);
+ dev_err(arizona->dev, "Failed to register LDO1 supply: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ldo1);
+
+ return 0;
+}
+
+static __devexit int arizona_ldo1_remove(struct platform_device *pdev)
+{
+ struct arizona_ldo1 *ldo1 = platform_get_drvdata(pdev);
+
+ regulator_unregister(ldo1->regulator);
+
+ return 0;
+}
+
+static struct platform_driver arizona_ldo1_driver = {
+ .probe = arizona_ldo1_probe,
+ .remove = __devexit_p(arizona_ldo1_remove),
+ .driver = {
+ .name = "arizona-ldo1",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(arizona_ldo1_driver);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("Arizona LDO1 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:arizona-ldo1");
diff --git a/drivers/regulator/arizona-micsupp.c b/drivers/regulator/arizona-micsupp.c
new file mode 100644
index 000000000000..450a069aa9b6
--- /dev/null
+++ b/drivers/regulator/arizona-micsupp.c
@@ -0,0 +1,188 @@
+/*
+ * arizona-micsupp.c -- Microphone supply for Arizona devices
+ *
+ * Copyright 2012 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/pdata.h>
+#include <linux/mfd/arizona/registers.h>
+
+#define ARIZONA_MICSUPP_MAX_SELECTOR 0x1f
+
+struct arizona_micsupp {
+ struct regulator_dev *regulator;
+ struct arizona *arizona;
+
+ struct regulator_consumer_supply supply;
+ struct regulator_init_data init_data;
+};
+
+static int arizona_micsupp_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > ARIZONA_MICSUPP_MAX_SELECTOR)
+ return -EINVAL;
+
+ if (selector == ARIZONA_MICSUPP_MAX_SELECTOR)
+ return 3300000;
+ else
+ return (selector * 50000) + 1700000;
+}
+
+static int arizona_micsupp_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ unsigned int voltage;
+ int selector;
+
+ if (min_uV < 1700000)
+ min_uV = 1700000;
+
+ if (min_uV > 3200000)
+ selector = ARIZONA_MICSUPP_MAX_SELECTOR;
+ else
+ selector = DIV_ROUND_UP(min_uV - 1700000, 50000);
+
+ if (selector < 0)
+ return -EINVAL;
+
+ voltage = arizona_micsupp_list_voltage(rdev, selector);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return selector;
+}
+
+static struct regulator_ops arizona_micsupp_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+
+ .list_voltage = arizona_micsupp_list_voltage,
+ .map_voltage = arizona_micsupp_map_voltage,
+
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+};
+
+static const struct regulator_desc arizona_micsupp = {
+ .name = "MICVDD",
+ .supply_name = "CPVDD",
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ARIZONA_MICSUPP_MAX_SELECTOR + 1,
+ .ops = &arizona_micsupp_ops,
+
+ .vsel_reg = ARIZONA_LDO2_CONTROL_1,
+ .vsel_mask = ARIZONA_LDO2_VSEL_MASK,
+ .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1,
+ .enable_mask = ARIZONA_CPMIC_ENA,
+
+ .owner = THIS_MODULE,
+};
+
+static const struct regulator_init_data arizona_micsupp_default = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_VOLTAGE,
+ .min_uV = 1700000,
+ .max_uV = 3300000,
+ },
+
+ .num_consumer_supplies = 1,
+};
+
+static __devinit int arizona_micsupp_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct arizona_micsupp *micsupp;
+ int ret;
+
+ micsupp = devm_kzalloc(&pdev->dev, sizeof(*micsupp), GFP_KERNEL);
+ if (micsupp == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ micsupp->arizona = arizona;
+
+ /*
+ * Since the chip usually supplies itself we provide some
+ * default init_data for it. This will be overridden with
+ * platform data if provided.
+ */
+ micsupp->init_data = arizona_micsupp_default;
+ micsupp->init_data.consumer_supplies = &micsupp->supply;
+ micsupp->supply.supply = "MICVDD";
+ micsupp->supply.dev_name = dev_name(arizona->dev);
+
+ config.dev = arizona->dev;
+ config.driver_data = micsupp;
+ config.regmap = arizona->regmap;
+
+ if (arizona->pdata.micvdd)
+ config.init_data = arizona->pdata.micvdd;
+ else
+ config.init_data = &micsupp->init_data;
+
+ /* Default to regulated mode until the API supports bypass */
+ regmap_update_bits(arizona->regmap, ARIZONA_MIC_CHARGE_PUMP_1,
+ ARIZONA_CPMIC_BYPASS, 0);
+
+ micsupp->regulator = regulator_register(&arizona_micsupp, &config);
+ if (IS_ERR(micsupp->regulator)) {
+ ret = PTR_ERR(micsupp->regulator);
+ dev_err(arizona->dev, "Failed to register mic supply: %d\n",
+ ret);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, micsupp);
+
+ return 0;
+}
+
+static __devexit int arizona_micsupp_remove(struct platform_device *pdev)
+{
+ struct arizona_micsupp *micsupp = platform_get_drvdata(pdev);
+
+ regulator_unregister(micsupp->regulator);
+
+ return 0;
+}
+
+static struct platform_driver arizona_micsupp_driver = {
+ .probe = arizona_micsupp_probe,
+ .remove = __devexit_p(arizona_micsupp_remove),
+ .driver = {
+ .name = "arizona-micsupp",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(arizona_micsupp_driver);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("Arizona microphone supply driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:arizona-micsupp");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 09a737c868b5..f092588a078c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -23,6 +23,7 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/regulator/of_regulator.h>
@@ -108,28 +109,6 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
return "";
}
-/* gets the regulator for a given consumer device */
-static struct regulator *get_device_regulator(struct device *dev)
-{
- struct regulator *regulator = NULL;
- struct regulator_dev *rdev;
-
- mutex_lock(&regulator_list_mutex);
- list_for_each_entry(rdev, &regulator_list, list) {
- mutex_lock(&rdev->mutex);
- list_for_each_entry(regulator, &rdev->consumer_list, list) {
- if (regulator->dev == dev) {
- mutex_unlock(&rdev->mutex);
- mutex_unlock(&regulator_list_mutex);
- return regulator;
- }
- }
- mutex_unlock(&rdev->mutex);
- }
- mutex_unlock(&regulator_list_mutex);
- return NULL;
-}
-
/**
* of_get_regulator - get a regulator device node based on supply name
* @dev: Device pointer for the consumer (of regulator) device
@@ -303,18 +282,6 @@ static int regulator_check_drms(struct regulator_dev *rdev)
return 0;
}
-static ssize_t device_requested_uA_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct regulator *regulator;
-
- regulator = get_device_regulator(dev);
- if (regulator == NULL)
- return 0;
-
- return sprintf(buf, "%d\n", regulator->uA_load);
-}
-
static ssize_t regulator_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -427,6 +394,9 @@ static ssize_t regulator_status_show(struct device *dev,
case REGULATOR_STATUS_STANDBY:
label = "standby";
break;
+ case REGULATOR_STATUS_UNDEFINED:
+ label = "undefined";
+ break;
default:
return -ERANGE;
}
@@ -967,6 +937,14 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
+ if (rdev->constraints->ramp_delay && ops->set_ramp_delay) {
+ ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to set ramp_delay\n");
+ goto out;
+ }
+ }
+
print_constraints(rdev);
return 0;
out:
@@ -1097,48 +1075,29 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
list_add(&regulator->list, &rdev->consumer_list);
if (dev) {
- /* create a 'requested_microamps_name' sysfs entry */
- size = scnprintf(buf, REG_STR_SIZE,
- "microamps_requested_%s-%s",
- dev_name(dev), supply_name);
- if (size >= REG_STR_SIZE)
- goto overflow_err;
-
regulator->dev = dev;
- sysfs_attr_init(&regulator->dev_attr.attr);
- regulator->dev_attr.attr.name = kstrdup(buf, GFP_KERNEL);
- if (regulator->dev_attr.attr.name == NULL)
- goto attr_name_err;
-
- regulator->dev_attr.attr.mode = 0444;
- regulator->dev_attr.show = device_requested_uA_show;
- err = device_create_file(dev, &regulator->dev_attr);
- if (err < 0) {
- rdev_warn(rdev, "could not add regulator_dev requested microamps sysfs entry\n");
- goto attr_name_err;
- }
- /* also add a link to the device sysfs entry */
+ /* Add a link to the device sysfs entry */
size = scnprintf(buf, REG_STR_SIZE, "%s-%s",
dev->kobj.name, supply_name);
if (size >= REG_STR_SIZE)
- goto attr_err;
+ goto overflow_err;
regulator->supply_name = kstrdup(buf, GFP_KERNEL);
if (regulator->supply_name == NULL)
- goto attr_err;
+ goto overflow_err;
err = sysfs_create_link(&rdev->dev.kobj, &dev->kobj,
buf);
if (err) {
rdev_warn(rdev, "could not add device link %s err %d\n",
dev->kobj.name, err);
- goto link_name_err;
+ /* non-fatal */
}
} else {
regulator->supply_name = kstrdup(supply_name, GFP_KERNEL);
if (regulator->supply_name == NULL)
- goto attr_err;
+ goto overflow_err;
}
regulator->debugfs = debugfs_create_dir(regulator->supply_name,
@@ -1165,12 +1124,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
mutex_unlock(&rdev->mutex);
return regulator;
-link_name_err:
- kfree(regulator->supply_name);
-attr_err:
- device_remove_file(regulator->dev, &regulator->dev_attr);
-attr_name_err:
- kfree(regulator->dev_attr.attr.name);
overflow_err:
list_del(&regulator->list);
kfree(regulator);
@@ -1181,7 +1134,7 @@ overflow_err:
static int _regulator_get_enable_time(struct regulator_dev *rdev)
{
if (!rdev->desc->ops->enable_time)
- return 0;
+ return rdev->desc->enable_time;
return rdev->desc->ops->enable_time(rdev);
}
@@ -1420,11 +1373,8 @@ void regulator_put(struct regulator *regulator)
debugfs_remove_recursive(regulator->debugfs);
/* remove any sysfs entries */
- if (regulator->dev) {
+ if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
- device_remove_file(regulator->dev, &regulator->dev_attr);
- kfree(regulator->dev_attr.attr.name);
- }
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
@@ -1459,19 +1409,61 @@ void devm_regulator_put(struct regulator *regulator)
{
int rc;
- rc = devres_destroy(regulator->dev, devm_regulator_release,
+ rc = devres_release(regulator->dev, devm_regulator_release,
devm_regulator_match, regulator);
- if (rc == 0)
- regulator_put(regulator);
- else
+ if (rc != 0)
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regulator_put);
+static int _regulator_do_enable(struct regulator_dev *rdev)
+{
+ int ret, delay;
+
+ /* Query before enabling in case configuration dependent. */
+ ret = _regulator_get_enable_time(rdev);
+ if (ret >= 0) {
+ delay = ret;
+ } else {
+ rdev_warn(rdev, "enable_time() failed: %d\n", ret);
+ delay = 0;
+ }
+
+ trace_regulator_enable(rdev_get_name(rdev));
+
+ if (rdev->ena_gpio) {
+ gpio_set_value_cansleep(rdev->ena_gpio,
+ !rdev->ena_gpio_invert);
+ rdev->ena_gpio_state = 1;
+ } else if (rdev->desc->ops->enable) {
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Allow the regulator to ramp; it would be useful to extend
+ * this for bulk operations so that the regulators can ramp
+ * together. */
+ trace_regulator_enable_delay(rdev_get_name(rdev));
+
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
+
+ trace_regulator_enable_complete(rdev_get_name(rdev));
+
+ return 0;
+}
+
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret, delay;
+ int ret;
/* check voltage and requested load before enabling */
if (rdev->constraints &&
@@ -1485,40 +1477,10 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (!_regulator_can_change_status(rdev))
return -EPERM;
- if (!rdev->desc->ops->enable)
- return -EINVAL;
-
- /* Query before enabling in case configuration
- * dependent. */
- ret = _regulator_get_enable_time(rdev);
- if (ret >= 0) {
- delay = ret;
- } else {
- rdev_warn(rdev, "enable_time() failed: %d\n",
- ret);
- delay = 0;
- }
-
- trace_regulator_enable(rdev_get_name(rdev));
-
- /* Allow the regulator to ramp; it would be useful
- * to extend this for bulk operations so that the
- * regulators can ramp together. */
- ret = rdev->desc->ops->enable(rdev);
+ ret = _regulator_do_enable(rdev);
if (ret < 0)
return ret;
- trace_regulator_enable_delay(rdev_get_name(rdev));
-
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
- }
-
- trace_regulator_enable_complete(rdev_get_name(rdev));
-
} else if (ret < 0) {
rdev_err(rdev, "is_enabled() failed: %d\n", ret);
return ret;
@@ -1567,6 +1529,30 @@ int regulator_enable(struct regulator *regulator)
}
EXPORT_SYMBOL_GPL(regulator_enable);
+static int _regulator_do_disable(struct regulator_dev *rdev)
+{
+ int ret;
+
+ trace_regulator_disable(rdev_get_name(rdev));
+
+ if (rdev->ena_gpio) {
+ gpio_set_value_cansleep(rdev->ena_gpio,
+ rdev->ena_gpio_invert);
+ rdev->ena_gpio_state = 0;
+
+ } else if (rdev->desc->ops->disable) {
+ ret = rdev->desc->ops->disable(rdev);
+ if (ret != 0)
+ return ret;
+ }
+
+ trace_regulator_disable_complete(rdev_get_name(rdev));
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
+ return 0;
+}
+
/* locks held by regulator_disable() */
static int _regulator_disable(struct regulator_dev *rdev)
{
@@ -1581,20 +1567,12 @@ static int _regulator_disable(struct regulator_dev *rdev)
(rdev->constraints && !rdev->constraints->always_on)) {
/* we are last user */
- if (_regulator_can_change_status(rdev) &&
- rdev->desc->ops->disable) {
- trace_regulator_disable(rdev_get_name(rdev));
-
- ret = rdev->desc->ops->disable(rdev);
+ if (_regulator_can_change_status(rdev)) {
+ ret = _regulator_do_disable(rdev);
if (ret < 0) {
rdev_err(rdev, "failed to disable\n");
return ret;
}
-
- trace_regulator_disable_complete(rdev_get_name(rdev));
-
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
}
rdev->use_count = 0;
@@ -1812,6 +1790,10 @@ EXPORT_SYMBOL_GPL(regulator_disable_regmap);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
+ /* A GPIO control always takes precedence */
+ if (rdev->ena_gpio)
+ return rdev->ena_gpio_state;
+
/* If we don't know then assume that the regulator is always on */
if (!rdev->desc->ops->is_enabled)
return 1;
@@ -1883,6 +1865,31 @@ int regulator_list_voltage_linear(struct regulator_dev *rdev,
EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
/**
+ * regulator_list_voltage_table - List voltages with table based mapping
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with table based mapping between voltages and
+ * selectors can set volt_table in the regulator descriptor
+ * and then use this function as their list_voltage() operation.
+ */
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (!rdev->desc->volt_table) {
+ BUG_ON(!rdev->desc->volt_table);
+ return -EINVAL;
+ }
+
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ return rdev->desc->volt_table[selector];
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
+
+/**
* regulator_list_voltage - enumerate supported voltages
* @regulator: regulator source
* @selector: identify voltage to list
@@ -1928,8 +1935,18 @@ EXPORT_SYMBOL_GPL(regulator_list_voltage);
int regulator_is_supported_voltage(struct regulator *regulator,
int min_uV, int max_uV)
{
+ struct regulator_dev *rdev = regulator->rdev;
int i, voltages, ret;
+ /* If we can't change voltage check the current voltage */
+ if (!(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_VOLTAGE)) {
+ ret = regulator_get_voltage(regulator);
+ if (ret >= 0)
+ return (min_uV >= ret && ret <= max_uV);
+ else
+ return ret;
+ }
+
ret = regulator_count_voltages(regulator);
if (ret < 0)
return ret;
@@ -2045,6 +2062,14 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
{
int ret, voltage;
+ /* Allow uV_step to be 0 for fixed voltage */
+ if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
+ if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
if (!rdev->desc->uV_step) {
BUG_ON(!rdev->desc->uV_step);
return -EINVAL;
@@ -2071,7 +2096,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
{
int ret;
int delay = 0;
- int best_val;
+ int best_val = 0;
unsigned int selector;
int old_selector = -1;
@@ -2084,7 +2109,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
* If we can't obtain the old selector there is not enough
* info to call set_voltage_time_sel().
*/
- if (rdev->desc->ops->set_voltage_time_sel &&
+ if (_regulator_is_enabled(rdev) &&
+ rdev->desc->ops->set_voltage_time_sel &&
rdev->desc->ops->get_voltage_sel) {
old_selector = rdev->desc->ops->get_voltage_sel(rdev);
if (old_selector < 0)
@@ -2094,29 +2120,45 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
if (rdev->desc->ops->set_voltage) {
ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
&selector);
+
+ if (ret >= 0) {
+ if (rdev->desc->ops->list_voltage)
+ best_val = rdev->desc->ops->list_voltage(rdev,
+ selector);
+ else
+ best_val = _regulator_get_voltage(rdev);
+ }
+
} else if (rdev->desc->ops->set_voltage_sel) {
- if (rdev->desc->ops->map_voltage)
+ if (rdev->desc->ops->map_voltage) {
ret = rdev->desc->ops->map_voltage(rdev, min_uV,
max_uV);
- else
- ret = regulator_map_voltage_iterate(rdev, min_uV,
- max_uV);
+ } else {
+ if (rdev->desc->ops->list_voltage ==
+ regulator_list_voltage_linear)
+ ret = regulator_map_voltage_linear(rdev,
+ min_uV, max_uV);
+ else
+ ret = regulator_map_voltage_iterate(rdev,
+ min_uV, max_uV);
+ }
if (ret >= 0) {
- selector = ret;
- ret = rdev->desc->ops->set_voltage_sel(rdev, ret);
+ best_val = rdev->desc->ops->list_voltage(rdev, ret);
+ if (min_uV <= best_val && max_uV >= best_val) {
+ selector = ret;
+ ret = rdev->desc->ops->set_voltage_sel(rdev,
+ ret);
+ } else {
+ ret = -EINVAL;
+ }
}
} else {
ret = -EINVAL;
}
- if (rdev->desc->ops->list_voltage)
- best_val = rdev->desc->ops->list_voltage(rdev, selector);
- else
- best_val = -1;
-
/* Call set_voltage_time_sel if successfully obtained old_selector */
- if (ret == 0 && old_selector >= 0 &&
+ if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
rdev->desc->ops->set_voltage_time_sel) {
delay = rdev->desc->ops->set_voltage_time_sel(rdev,
@@ -2126,19 +2168,19 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
delay);
delay = 0;
}
- }
- /* Insert any necessary delays */
- if (delay >= 1000) {
- mdelay(delay / 1000);
- udelay(delay % 1000);
- } else if (delay) {
- udelay(delay);
+ /* Insert any necessary delays */
+ if (delay >= 1000) {
+ mdelay(delay / 1000);
+ udelay(delay % 1000);
+ } else if (delay) {
+ udelay(delay);
+ }
}
- if (ret == 0)
+ if (ret == 0 && best_val >= 0)
_notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE,
- NULL);
+ (void *)best_val);
trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val);
@@ -2249,6 +2291,46 @@ int regulator_set_voltage_time(struct regulator *regulator,
EXPORT_SYMBOL_GPL(regulator_set_voltage_time);
/**
+ *regulator_set_voltage_time_sel - get raise/fall time
+ * @regulator: regulator source
+ * @old_selector: selector for starting voltage
+ * @new_selector: selector for target voltage
+ *
+ * Provided with the starting and target voltage selectors, this function
+ * returns time in microseconds required to rise or fall to this new voltage
+ *
+ * Drivers providing ramp_delay in regulation_constraints can use this as their
+ * set_voltage_time_sel() operation.
+ */
+int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ unsigned int ramp_delay = 0;
+ int old_volt, new_volt;
+
+ if (rdev->constraints->ramp_delay)
+ ramp_delay = rdev->constraints->ramp_delay;
+ else if (rdev->desc->ramp_delay)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ if (ramp_delay == 0) {
+ rdev_warn(rdev, "ramp_delay not set\n");
+ return 0;
+ }
+
+ /* sanity check */
+ if (!rdev->desc->ops->list_voltage)
+ return -EINVAL;
+
+ old_volt = rdev->desc->ops->list_voltage(rdev, old_selector);
+ new_volt = rdev->desc->ops->list_voltage(rdev, new_selector);
+
+ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_time_sel);
+
+/**
* regulator_sync_voltage - re-apply last regulator output voltage
* @regulator: regulator source
*
@@ -2519,9 +2601,12 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
{
struct regulator_dev *rdev = regulator->rdev;
struct regulator *consumer;
- int ret, output_uV, input_uV, total_uA_load = 0;
+ int ret, output_uV, input_uV = 0, total_uA_load = 0;
unsigned int mode;
+ if (rdev->supply)
+ input_uV = regulator_get_voltage(rdev->supply);
+
mutex_lock(&rdev->mutex);
/*
@@ -2554,10 +2639,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
goto out;
}
- /* get input voltage */
- input_uV = 0;
- if (rdev->supply)
- input_uV = regulator_get_voltage(rdev->supply);
+ /* No supply? Use constraint voltage */
if (input_uV <= 0)
input_uV = rdev->constraints->input_uV;
if (input_uV <= 0) {
@@ -2628,7 +2710,7 @@ static void _notifier_call_chain(struct regulator_dev *rdev,
unsigned long event, void *data)
{
/* call rdev chain first */
- blocking_notifier_call_chain(&rdev->notifier, event, NULL);
+ blocking_notifier_call_chain(&rdev->notifier, event, data);
}
/**
@@ -2744,7 +2826,7 @@ static void regulator_bulk_enable_async(void *data, async_cookie_t cookie)
int regulator_bulk_enable(int num_consumers,
struct regulator_bulk_data *consumers)
{
- LIST_HEAD(async_domain);
+ ASYNC_DOMAIN_EXCLUSIVE(async_domain);
int i;
int ret = 0;
@@ -2909,10 +2991,10 @@ int regulator_mode_to_status(unsigned int mode)
return REGULATOR_STATUS_NORMAL;
case REGULATOR_MODE_IDLE:
return REGULATOR_STATUS_IDLE;
- case REGULATOR_STATUS_STANDBY:
+ case REGULATOR_MODE_STANDBY:
return REGULATOR_STATUS_STANDBY;
default:
- return 0;
+ return REGULATOR_STATUS_UNDEFINED;
}
}
EXPORT_SYMBOL_GPL(regulator_mode_to_status);
@@ -3105,7 +3187,10 @@ regulator_register(const struct regulator_desc *regulator_desc,
rdev->reg_data = config->driver_data;
rdev->owner = regulator_desc->owner;
rdev->desc = regulator_desc;
- rdev->regmap = config->regmap;
+ if (config->regmap)
+ rdev->regmap = config->regmap;
+ else
+ rdev->regmap = dev_get_regmap(dev, NULL);
INIT_LIST_HEAD(&rdev->consumer_list);
INIT_LIST_HEAD(&rdev->list);
BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier);
@@ -3132,6 +3217,26 @@ regulator_register(const struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
+ if (config->ena_gpio) {
+ ret = gpio_request_one(config->ena_gpio,
+ GPIOF_DIR_OUT | config->ena_gpio_flags,
+ rdev_get_name(rdev));
+ if (ret != 0) {
+ rdev_err(rdev, "Failed to request enable GPIO%d: %d\n",
+ config->ena_gpio, ret);
+ goto clean;
+ }
+
+ rdev->ena_gpio = config->ena_gpio;
+ rdev->ena_gpio_invert = config->ena_gpio_invert;
+
+ if (config->ena_gpio_flags & GPIOF_OUT_INIT_HIGH)
+ rdev->ena_gpio_state = 1;
+
+ if (rdev->ena_gpio_invert)
+ rdev->ena_gpio_state = !rdev->ena_gpio_state;
+ }
+
/* set regulator constraints */
if (init_data)
constraints = &init_data->constraints;
@@ -3200,6 +3305,8 @@ unset_supplies:
scrub:
if (rdev->supply)
regulator_put(rdev->supply);
+ if (rdev->ena_gpio)
+ gpio_free(rdev->ena_gpio);
kfree(rdev->constraints);
device_unregister(&rdev->dev);
/* device core frees rdev */
@@ -3233,6 +3340,8 @@ void regulator_unregister(struct regulator_dev *rdev)
unset_regulator_supplies(rdev);
list_del(&rdev->list);
kfree(rdev->constraints);
+ if (rdev->ena_gpio)
+ gpio_free(rdev->ena_gpio);
device_unregister(&rdev->dev);
mutex_unlock(&regulator_list_mutex);
}
@@ -3472,6 +3581,15 @@ static int __init regulator_init_complete(void)
struct regulation_constraints *c;
int enabled, ret;
+ /*
+ * Since DT doesn't provide an idiomatic mechanism for
+ * enabling full constraints and since it's much more natural
+ * with DT to provide them just assume that a DT enabled
+ * system has full constraints.
+ */
+ if (of_have_populated_dt())
+ has_full_constraints = true;
+
mutex_lock(&regulator_list_mutex);
/* If we have a full configuration then disable any regulators
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 1005f5f7e603..36c5b92fe0af 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -107,6 +107,9 @@ static int da903x_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
struct device *da9034_dev = to_da903x_dev(rdev);
uint8_t val, mask;
+ if (rdev->desc->n_voltages == 1)
+ return -EINVAL;
+
val = selector << info->vol_shift;
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
@@ -120,6 +123,9 @@ static int da903x_get_voltage_sel(struct regulator_dev *rdev)
uint8_t val, mask;
int ret;
+ if (rdev->desc->n_voltages == 1)
+ return 0;
+
ret = da903x_read(da9034_dev, info->vol_reg, &val);
if (ret)
return ret;
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 88976d8d44ed..903299cf15cf 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -405,12 +405,12 @@ static int __devinit da9052_regulator_probe(struct platform_device *pdev)
if (!nproot)
return -ENODEV;
- for (np = of_get_next_child(nproot, NULL); np;
- np = of_get_next_child(nproot, np)) {
+ for_each_child_of_node(nproot, np) {
if (!of_node_cmp(np->name,
regulator->info->reg_desc.name)) {
config.init_data = of_get_regulator_init_data(
&pdev->dev, np);
+ config.of_node = np;
break;
}
}
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index cacd33c9d042..f9d027992aae 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -1,4 +1,5 @@
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/platform_device.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/fixed.h>
@@ -13,17 +14,20 @@ static void regulator_fixed_release(struct device *dev)
{
struct fixed_regulator_data *data = container_of(dev,
struct fixed_regulator_data, pdev.dev);
+ kfree(data->cfg.supply_name);
kfree(data);
}
/**
- * regulator_register_fixed - register a no-op fixed regulator
+ * regulator_register_fixed_name - register a no-op fixed regulator
* @id: platform device id
+ * @name: name to be used for the regulator
* @supplies: consumers for this regulator
* @num_supplies: number of consumers
+ * @uv: voltage in microvolts
*/
-struct platform_device *regulator_register_fixed(int id,
- struct regulator_consumer_supply *supplies, int num_supplies)
+struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv)
{
struct fixed_regulator_data *data;
@@ -31,8 +35,13 @@ struct platform_device *regulator_register_fixed(int id,
if (!data)
return NULL;
- data->cfg.supply_name = "fixed-dummy";
- data->cfg.microvolts = 0;
+ data->cfg.supply_name = kstrdup(name, GFP_KERNEL);
+ if (!data->cfg.supply_name) {
+ kfree(data);
+ return NULL;
+ }
+
+ data->cfg.microvolts = uv;
data->cfg.gpio = -EINVAL;
data->cfg.enabled_at_boot = 1;
data->cfg.init_data = &data->init_data;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f09fe7b20e82..185468c4d38f 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -35,10 +35,6 @@ struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
int microvolts;
- int gpio;
- unsigned startup_delay;
- bool enable_high;
- bool is_enabled;
};
@@ -61,11 +57,11 @@ of_get_fixed_voltage_config(struct device *dev)
config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
GFP_KERNEL);
if (!config)
- return NULL;
+ return ERR_PTR(-ENOMEM);
config->init_data = of_get_regulator_init_data(dev, dev->of_node);
if (!config->init_data)
- return NULL;
+ return ERR_PTR(-EINVAL);
init_data = config->init_data;
init_data->constraints.apply_uV = 0;
@@ -76,13 +72,26 @@ of_get_fixed_voltage_config(struct device *dev)
} else {
dev_err(dev,
"Fixed regulator specified with variable voltages\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (init_data->constraints.boot_on)
config->enabled_at_boot = true;
config->gpio = of_get_named_gpio(np, "gpio", 0);
+ /*
+ * of_get_named_gpio() currently returns ENODEV rather than
+ * EPROBE_DEFER. This code attempts to be compatible with both
+ * for now; the ENODEV check can be removed once the API is fixed.
+ * of_get_named_gpio() doesn't differentiate between a missing
+ * property (which would be fine here, since the GPIO is optional)
+ * and some other error. Patches have been posted for both issues.
+ * Once they are check in, we should replace this with:
+ * if (config->gpio < 0 && config->gpio != -ENOENT)
+ */
+ if ((config->gpio == -ENODEV) || (config->gpio == -EPROBE_DEFER))
+ return ERR_PTR(-EPROBE_DEFER);
+
delay = of_get_property(np, "startup-delay-us", NULL);
if (delay)
config->startup_delay = be32_to_cpu(*delay);
@@ -93,41 +102,10 @@ of_get_fixed_voltage_config(struct device *dev)
if (of_find_property(np, "gpio-open-drain", NULL))
config->gpio_is_open_drain = true;
- return config;
-}
-
-static int fixed_voltage_is_enabled(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- return data->is_enabled;
-}
-
-static int fixed_voltage_enable(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- gpio_set_value_cansleep(data->gpio, data->enable_high);
- data->is_enabled = true;
-
- return 0;
-}
-
-static int fixed_voltage_disable(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- gpio_set_value_cansleep(data->gpio, !data->enable_high);
- data->is_enabled = false;
-
- return 0;
-}
+ if (of_find_property(np, "vin-supply", NULL))
+ config->input_supply = "vin";
-static int fixed_voltage_enable_time(struct regulator_dev *dev)
-{
- struct fixed_voltage_data *data = rdev_get_drvdata(dev);
-
- return data->startup_delay;
+ return config;
}
static int fixed_voltage_get_voltage(struct regulator_dev *dev)
@@ -151,15 +129,6 @@ static int fixed_voltage_list_voltage(struct regulator_dev *dev,
return data->microvolts;
}
-static struct regulator_ops fixed_voltage_gpio_ops = {
- .is_enabled = fixed_voltage_is_enabled,
- .enable = fixed_voltage_enable,
- .disable = fixed_voltage_disable,
- .enable_time = fixed_voltage_enable_time,
- .get_voltage = fixed_voltage_get_voltage,
- .list_voltage = fixed_voltage_list_voltage,
-};
-
static struct regulator_ops fixed_voltage_ops = {
.get_voltage = fixed_voltage_get_voltage,
.list_voltage = fixed_voltage_list_voltage,
@@ -172,10 +141,13 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
struct regulator_config cfg = { };
int ret;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
config = of_get_fixed_voltage_config(&pdev->dev);
- else
+ if (IS_ERR(config))
+ return PTR_ERR(config);
+ } else {
config = pdev->dev.platform_data;
+ }
if (!config)
return -ENOMEM;
@@ -196,59 +168,44 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->desc.type = REGULATOR_VOLTAGE;
drvdata->desc.owner = THIS_MODULE;
+ drvdata->desc.ops = &fixed_voltage_ops;
- if (config->microvolts)
- drvdata->desc.n_voltages = 1;
+ drvdata->desc.enable_time = config->startup_delay;
- drvdata->microvolts = config->microvolts;
- drvdata->gpio = config->gpio;
- drvdata->startup_delay = config->startup_delay;
-
- if (gpio_is_valid(config->gpio)) {
- int gpio_flag;
- drvdata->enable_high = config->enable_high;
-
- /* FIXME: Remove below print warning
- *
- * config->gpio must be set to -EINVAL by platform code if
- * GPIO control is not required. However, early adopters
- * not requiring GPIO control may forget to initialize
- * config->gpio to -EINVAL. This will cause GPIO 0 to be used
- * for GPIO control.
- *
- * This warning will be removed once there are a couple of users
- * for this driver.
- */
- if (!config->gpio)
- dev_warn(&pdev->dev,
- "using GPIO 0 for regulator enable control\n");
-
- /*
- * set output direction without changing state
- * to prevent glitch
- */
- drvdata->is_enabled = config->enabled_at_boot;
- ret = drvdata->is_enabled ?
- config->enable_high : !config->enable_high;
- gpio_flag = ret ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
-
- if (config->gpio_is_open_drain)
- gpio_flag |= GPIOF_OPEN_DRAIN;
-
- ret = gpio_request_one(config->gpio, gpio_flag,
- config->supply_name);
- if (ret) {
+ if (config->input_supply) {
+ drvdata->desc.supply_name = kstrdup(config->input_supply,
+ GFP_KERNEL);
+ if (!drvdata->desc.supply_name) {
dev_err(&pdev->dev,
- "Could not obtain regulator enable GPIO %d: %d\n",
- config->gpio, ret);
+ "Failed to allocate input supply\n");
+ ret = -ENOMEM;
goto err_name;
}
+ }
+
+ if (config->microvolts)
+ drvdata->desc.n_voltages = 1;
- drvdata->desc.ops = &fixed_voltage_gpio_ops;
+ drvdata->microvolts = config->microvolts;
+ if (config->gpio >= 0)
+ cfg.ena_gpio = config->gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high) {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ } else {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ }
} else {
- drvdata->desc.ops = &fixed_voltage_ops;
+ if (config->enable_high) {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ } else {
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ }
}
+ if (config->gpio_is_open_drain)
+ cfg.ena_gpio_flags |= GPIOF_OPEN_DRAIN;
cfg.dev = &pdev->dev;
cfg.init_data = config->init_data;
@@ -259,7 +216,7 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
- goto err_gpio;
+ goto err_input;
}
platform_set_drvdata(pdev, drvdata);
@@ -269,9 +226,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
return 0;
-err_gpio:
- if (gpio_is_valid(config->gpio))
- gpio_free(config->gpio);
+err_input:
+ kfree(drvdata->desc.supply_name);
err_name:
kfree(drvdata->desc.name);
err:
@@ -283,8 +239,7 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
regulator_unregister(drvdata->dev);
- if (gpio_is_valid(drvdata->gpio))
- gpio_free(drvdata->gpio);
+ kfree(drvdata->desc.supply_name);
kfree(drvdata->desc.name);
return 0;
@@ -296,8 +251,6 @@ static const struct of_device_id fixed_of_match[] __devinitconst = {
{},
};
MODULE_DEVICE_TABLE(of, fixed_of_match);
-#else
-#define fixed_of_match NULL
#endif
static struct platform_driver regulator_fixed_voltage_driver = {
@@ -306,7 +259,7 @@ static struct platform_driver regulator_fixed_voltage_driver = {
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
- .of_match_table = fixed_of_match,
+ .of_match_table = of_match_ptr(fixed_of_match),
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 242851a4c1a6..34b67bee9323 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -36,11 +36,6 @@ struct gpio_regulator_data {
struct regulator_desc desc;
struct regulator_dev *dev;
- int enable_gpio;
- bool enable_high;
- bool is_enabled;
- unsigned startup_delay;
-
struct gpio *gpios;
int nr_gpios;
@@ -50,44 +45,6 @@ struct gpio_regulator_data {
int state;
};
-static int gpio_regulator_is_enabled(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- return data->is_enabled;
-}
-
-static int gpio_regulator_enable(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- if (gpio_is_valid(data->enable_gpio)) {
- gpio_set_value_cansleep(data->enable_gpio, data->enable_high);
- data->is_enabled = true;
- }
-
- return 0;
-}
-
-static int gpio_regulator_disable(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- if (gpio_is_valid(data->enable_gpio)) {
- gpio_set_value_cansleep(data->enable_gpio, !data->enable_high);
- data->is_enabled = false;
- }
-
- return 0;
-}
-
-static int gpio_regulator_enable_time(struct regulator_dev *dev)
-{
- struct gpio_regulator_data *data = rdev_get_drvdata(dev);
-
- return data->startup_delay;
-}
-
static int gpio_regulator_get_value(struct regulator_dev *dev)
{
struct gpio_regulator_data *data = rdev_get_drvdata(dev);
@@ -153,20 +110,12 @@ static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
}
static struct regulator_ops gpio_regulator_voltage_ops = {
- .is_enabled = gpio_regulator_is_enabled,
- .enable = gpio_regulator_enable,
- .disable = gpio_regulator_disable,
- .enable_time = gpio_regulator_enable_time,
.get_voltage = gpio_regulator_get_value,
.set_voltage = gpio_regulator_set_voltage,
.list_voltage = gpio_regulator_list_voltage,
};
static struct regulator_ops gpio_regulator_current_ops = {
- .is_enabled = gpio_regulator_is_enabled,
- .enable = gpio_regulator_enable,
- .disable = gpio_regulator_disable,
- .enable_time = gpio_regulator_enable_time,
.get_current_limit = gpio_regulator_get_value,
.set_current_limit = gpio_regulator_set_current_limit,
};
@@ -213,6 +162,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
drvdata->nr_states = config->nr_states;
drvdata->desc.owner = THIS_MODULE;
+ drvdata->desc.enable_time = config->startup_delay;
/* handle regulator type*/
switch (config->type) {
@@ -232,52 +182,12 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
break;
}
- drvdata->enable_gpio = config->enable_gpio;
- drvdata->startup_delay = config->startup_delay;
-
- if (gpio_is_valid(config->enable_gpio)) {
- drvdata->enable_high = config->enable_high;
-
- ret = gpio_request(config->enable_gpio, config->supply_name);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not obtain regulator enable GPIO %d: %d\n",
- config->enable_gpio, ret);
- goto err_memstate;
- }
-
- /* set output direction without changing state
- * to prevent glitch
- */
- if (config->enabled_at_boot) {
- drvdata->is_enabled = true;
- ret = gpio_direction_output(config->enable_gpio,
- config->enable_high);
- } else {
- drvdata->is_enabled = false;
- ret = gpio_direction_output(config->enable_gpio,
- !config->enable_high);
- }
-
- if (ret) {
- dev_err(&pdev->dev,
- "Could not configure regulator enable GPIO %d direction: %d\n",
- config->enable_gpio, ret);
- goto err_enablegpio;
- }
- } else {
- /* Regulator without GPIO control is considered
- * always enabled
- */
- drvdata->is_enabled = true;
- }
-
drvdata->nr_gpios = config->nr_gpios;
ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios);
if (ret) {
dev_err(&pdev->dev,
"Could not obtain regulator setting GPIOs: %d\n", ret);
- goto err_enablegpio;
+ goto err_memstate;
}
/* build initial state from gpio init data. */
@@ -292,6 +202,21 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
cfg.init_data = config->init_data;
cfg.driver_data = drvdata;
+ if (config->enable_gpio >= 0)
+ cfg.ena_gpio = config->enable_gpio;
+ cfg.ena_gpio_invert = !config->enable_high;
+ if (config->enabled_at_boot) {
+ if (config->enable_high)
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ else
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ } else {
+ if (config->enable_high)
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW;
+ else
+ cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+ }
+
drvdata->dev = regulator_register(&drvdata->desc, &cfg);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
@@ -305,9 +230,6 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
err_stategpio:
gpio_free_array(drvdata->gpios, drvdata->nr_gpios);
-err_enablegpio:
- if (gpio_is_valid(config->enable_gpio))
- gpio_free(config->enable_gpio);
err_memstate:
kfree(drvdata->states);
err_memgpio:
@@ -329,9 +251,6 @@ static int __devexit gpio_regulator_remove(struct platform_device *pdev)
kfree(drvdata->states);
kfree(drvdata->gpios);
- if (gpio_is_valid(drvdata->enable_gpio))
- gpio_free(drvdata->enable_gpio);
-
kfree(drvdata->desc.name);
return 0;
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index 56d273f25603..1d145a07ada9 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -75,19 +75,12 @@ static struct regulator_ops isl_core_ops = {
static int isl6271a_get_fixed_voltage(struct regulator_dev *dev)
{
- int id = rdev_get_id(dev);
- return (id == 1) ? 1100000 : 1300000;
-}
-
-static int isl6271a_list_fixed_voltage(struct regulator_dev *dev, unsigned selector)
-{
- int id = rdev_get_id(dev);
- return (id == 1) ? 1100000 : 1300000;
+ return dev->desc->min_uV;
}
static struct regulator_ops isl_fixed_ops = {
.get_voltage = isl6271a_get_fixed_voltage,
- .list_voltage = isl6271a_list_fixed_voltage,
+ .list_voltage = regulator_list_voltage_linear,
};
static const struct regulator_desc isl_rd[] = {
@@ -107,6 +100,7 @@ static const struct regulator_desc isl_rd[] = {
.ops = &isl_fixed_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = 1100000,
}, {
.name = "LDO2",
.id = 2,
@@ -114,6 +108,7 @@ static const struct regulator_desc isl_rd[] = {
.ops = &isl_fixed_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
+ .min_uV = 1300000,
},
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 981bea9cb9d7..7c6e3b8ff484 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -65,11 +65,11 @@ static const int buck_base_addr[] = {
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
-static const int buck_voltage_map[] = {
- 0, 800, 850, 900, 950, 1000, 1050, 1100,
- 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
- 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
- 3000, 3300,
+static const unsigned int buck_voltage_map[] = {
+ 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000,
+ 3000000, 3300000,
};
#define BUCK_TARGET_VOL_MASK 0x3f
@@ -98,39 +98,19 @@ static const int buck_voltage_map[] = {
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
-static const int ldo45_voltage_map[] = {
- 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
- 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+static const unsigned int ldo45_voltage_map[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000,
+ 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000,
};
-static const int ldo123_voltage_map[] = {
- 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
- 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+static const unsigned int ldo123_voltage_map[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
-static const int *ldo_voltage_map[] = {
- ldo123_voltage_map, /* LDO1 */
- ldo123_voltage_map, /* LDO2 */
- ldo123_voltage_map, /* LDO3 */
- ldo45_voltage_map, /* LDO4 */
- ldo45_voltage_map, /* LDO5 */
-};
-
-#define LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[(x - LP3971_LDO1)])
-
#define LDO_VOL_MIN_IDX 0x00
#define LDO_VOL_MAX_IDX 0x0f
-static int lp3971_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int ldo = rdev_get_id(dev) - LP3971_LDO1;
-
- if (index > LDO_VOL_MAX_IDX)
- return -EINVAL;
-
- return 1000 * LDO_VOL_VALUE_MAP(ldo)[index];
-}
-
static int lp3971_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
@@ -169,7 +149,7 @@ static int lp3971_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3971_reg_read(lp3971, LP3971_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LDO_VOL_CONTR_SHIFT(ldo)) & LDO_VOL_CONTR_MASK;
- return 1000 * LDO_VOL_VALUE_MAP(ldo)[val];
+ return dev->desc->volt_table[val];
}
static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -184,7 +164,7 @@ static int lp3971_ldo_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3971_ldo_ops = {
- .list_voltage = lp3971_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3971_ldo_is_enabled,
.enable = lp3971_ldo_enable,
.disable = lp3971_ldo_disable,
@@ -192,14 +172,6 @@ static struct regulator_ops lp3971_ldo_ops = {
.set_voltage_sel = lp3971_ldo_set_voltage_sel,
};
-static int lp3971_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- if (index < BUCK_TARGET_VOL_MIN_IDX || index > BUCK_TARGET_VOL_MAX_IDX)
- return -EINVAL;
-
- return 1000 * buck_voltage_map[index];
-}
-
static int lp3971_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3971 *lp3971 = rdev_get_drvdata(dev);
@@ -240,7 +212,7 @@ static int lp3971_dcdc_get_voltage(struct regulator_dev *dev)
reg &= BUCK_TARGET_VOL_MASK;
if (reg <= BUCK_TARGET_VOL_MAX_IDX)
- val = 1000 * buck_voltage_map[reg];
+ val = buck_voltage_map[reg];
else {
val = 0;
dev_warn(&dev->dev, "chip reported incorrect voltage value.\n");
@@ -273,7 +245,7 @@ static int lp3971_dcdc_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3971_dcdc_ops = {
- .list_voltage = lp3971_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3971_dcdc_is_enabled,
.enable = lp3971_dcdc_enable,
.disable = lp3971_dcdc_disable,
@@ -287,6 +259,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO1,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -295,6 +268,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO2,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -303,6 +277,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO3,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo123_voltage_map),
+ .volt_table = ldo123_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -311,6 +286,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO4,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .volt_table = ldo45_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -319,6 +295,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_LDO5,
.ops = &lp3971_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo45_voltage_map),
+ .volt_table = ldo45_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -327,6 +304,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC1,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -335,6 +313,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC2,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -343,6 +322,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3971_DCDC3,
.ops = &lp3971_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck_voltage_map),
+ .volt_table = buck_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index de073df7d344..3cdc755d9b22 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -74,54 +74,40 @@ struct lp3972 {
#define LP3972_OVER2_LDO4_EN BIT(4)
#define LP3972_OVER1_S_EN BIT(2)
-static const int ldo1_voltage_map[] = {
- 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
- 1900, 1925, 1950, 1975, 2000,
+static const unsigned int ldo1_voltage_map[] = {
+ 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 1975000, 2000000,
};
-static const int ldo23_voltage_map[] = {
- 1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
- 2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
+static const unsigned int ldo23_voltage_map[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3100000, 3200000, 3300000,
};
-static const int ldo4_voltage_map[] = {
- 1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
- 1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
+static const unsigned int ldo4_voltage_map[] = {
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1250000, 1300000, 1350000,
+ 1400000, 1500000, 1800000, 1900000, 2500000, 2800000, 3000000, 3300000,
};
-static const int ldo5_voltage_map[] = {
- 0, 0, 0, 0, 0, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int ldo5_voltage_map[] = {
+ 0, 0, 0, 0, 0, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-static const int buck1_voltage_map[] = {
- 725, 750, 775, 800, 825, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int buck1_voltage_map[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-static const int buck23_voltage_map[] = {
- 0, 800, 850, 900, 950, 1000, 1050, 1100,
- 1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
- 1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
- 3000, 3300,
-};
-
-static const int *ldo_voltage_map[] = {
- ldo1_voltage_map,
- ldo23_voltage_map,
- ldo23_voltage_map,
- ldo4_voltage_map,
- ldo5_voltage_map,
-};
-
-static const int *buck_voltage_map[] = {
- buck1_voltage_map,
- buck23_voltage_map,
- buck23_voltage_map,
+static const unsigned int buck23_voltage_map[] = {
+ 0, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1800000, 1900000, 2500000, 2800000,
+ 3000000, 3300000,
};
static const int ldo_output_enable_mask[] = {
@@ -160,7 +146,6 @@ static const int buck_base_addr[] = {
LP3972_B3TV_REG,
};
-#define LP3972_LDO_VOL_VALUE_MAP(x) (ldo_voltage_map[x])
#define LP3972_LDO_OUTPUT_ENABLE_MASK(x) (ldo_output_enable_mask[x])
#define LP3972_LDO_OUTPUT_ENABLE_REG(x) (ldo_output_enable_addr[x])
@@ -177,7 +162,6 @@ static const int buck_base_addr[] = {
#define LP3972_LDO_VOL_MIN_IDX(x) (((x) == 4) ? 0x05 : 0x00)
#define LP3972_LDO_VOL_MAX_IDX(x) ((x) ? (((x) == 4) ? 0x1f : 0x0f) : 0x0c)
-#define LP3972_BUCK_VOL_VALUE_MAP(x) (buck_voltage_map[x])
#define LP3972_BUCK_VOL_ENABLE_REG(x) (buck_vol_enable_addr[x])
#define LP3972_BUCK_VOL1_REG(x) (buck_base_addr[x])
#define LP3972_BUCK_VOL_MASK 0x1f
@@ -242,17 +226,6 @@ static int lp3972_set_bits(struct lp3972 *lp3972, u8 reg, u16 mask, u16 val)
return ret;
}
-static int lp3972_ldo_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int ldo = rdev_get_id(dev) - LP3972_LDO1;
-
- if (index < LP3972_LDO_VOL_MIN_IDX(ldo) ||
- index > LP3972_LDO_VOL_MAX_IDX(ldo))
- return -EINVAL;
-
- return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[index];
-}
-
static int lp3972_ldo_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
@@ -294,7 +267,7 @@ static int lp3972_ldo_get_voltage(struct regulator_dev *dev)
reg = lp3972_reg_read(lp3972, LP3972_LDO_VOL_CONTR_REG(ldo));
val = (reg >> LP3972_LDO_VOL_CONTR_SHIFT(ldo)) & mask;
- return 1000 * LP3972_LDO_VOL_VALUE_MAP(ldo)[val];
+ return dev->desc->volt_table[val];
}
static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
@@ -337,7 +310,7 @@ static int lp3972_ldo_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3972_ldo_ops = {
- .list_voltage = lp3972_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3972_ldo_is_enabled,
.enable = lp3972_ldo_enable,
.disable = lp3972_ldo_disable,
@@ -345,17 +318,6 @@ static struct regulator_ops lp3972_ldo_ops = {
.set_voltage_sel = lp3972_ldo_set_voltage_sel,
};
-static int lp3972_dcdc_list_voltage(struct regulator_dev *dev, unsigned index)
-{
- int buck = rdev_get_id(dev) - LP3972_DCDC1;
-
- if (index < LP3972_BUCK_VOL_MIN_IDX(buck) ||
- index > LP3972_BUCK_VOL_MAX_IDX(buck))
- return -EINVAL;
-
- return 1000 * buck_voltage_map[buck][index];
-}
-
static int lp3972_dcdc_is_enabled(struct regulator_dev *dev)
{
struct lp3972 *lp3972 = rdev_get_drvdata(dev);
@@ -401,7 +363,7 @@ static int lp3972_dcdc_get_voltage(struct regulator_dev *dev)
reg = lp3972_reg_read(lp3972, LP3972_BUCK_VOL1_REG(buck));
reg &= LP3972_BUCK_VOL_MASK;
if (reg <= LP3972_BUCK_VOL_MAX_IDX(buck))
- val = 1000 * buck_voltage_map[buck][reg];
+ val = dev->desc->volt_table[reg];
else {
val = 0;
dev_warn(&dev->dev, "chip reported incorrect voltage value."
@@ -436,7 +398,7 @@ static int lp3972_dcdc_set_voltage_sel(struct regulator_dev *dev,
}
static struct regulator_ops lp3972_dcdc_ops = {
- .list_voltage = lp3972_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.is_enabled = lp3972_dcdc_is_enabled,
.enable = lp3972_dcdc_enable,
.disable = lp3972_dcdc_disable,
@@ -450,6 +412,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO1,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo1_voltage_map),
+ .volt_table = ldo1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -458,6 +421,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO2,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -466,6 +430,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO3,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo23_voltage_map),
+ .volt_table = ldo23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -474,6 +439,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO4,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo4_voltage_map),
+ .volt_table = ldo4_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -482,6 +448,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_LDO5,
.ops = &lp3972_ldo_ops,
.n_voltages = ARRAY_SIZE(ldo5_voltage_map),
+ .volt_table = ldo5_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -490,6 +457,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC1,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck1_voltage_map),
+ .volt_table = buck1_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -498,6 +466,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC2,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
@@ -506,6 +475,7 @@ static const struct regulator_desc regulators[] = {
.id = LP3972_DCDC3,
.ops = &lp3972_dcdc_ops,
.n_voltages = ARRAY_SIZE(buck23_voltage_map),
+ .volt_table = buck23_voltage_map,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
new file mode 100644
index 000000000000..212c38eaba70
--- /dev/null
+++ b/drivers/regulator/lp872x.c
@@ -0,0 +1,943 @@
+/*
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/regulator/lp872x.h>
+#include <linux/regulator/driver.h>
+#include <linux/platform_device.h>
+
+/* Registers : LP8720/8725 shared */
+#define LP872X_GENERAL_CFG 0x00
+#define LP872X_LDO1_VOUT 0x01
+#define LP872X_LDO2_VOUT 0x02
+#define LP872X_LDO3_VOUT 0x03
+#define LP872X_LDO4_VOUT 0x04
+#define LP872X_LDO5_VOUT 0x05
+
+/* Registers : LP8720 */
+#define LP8720_BUCK_VOUT1 0x06
+#define LP8720_BUCK_VOUT2 0x07
+#define LP8720_ENABLE 0x08
+
+/* Registers : LP8725 */
+#define LP8725_LILO1_VOUT 0x06
+#define LP8725_LILO2_VOUT 0x07
+#define LP8725_BUCK1_VOUT1 0x08
+#define LP8725_BUCK1_VOUT2 0x09
+#define LP8725_BUCK2_VOUT1 0x0A
+#define LP8725_BUCK2_VOUT2 0x0B
+#define LP8725_BUCK_CTRL 0x0C
+#define LP8725_LDO_CTRL 0x0D
+
+/* Mask/shift : LP8720/LP8725 shared */
+#define LP872X_VOUT_M 0x1F
+#define LP872X_START_DELAY_M 0xE0
+#define LP872X_START_DELAY_S 5
+#define LP872X_EN_LDO1_M BIT(0)
+#define LP872X_EN_LDO2_M BIT(1)
+#define LP872X_EN_LDO3_M BIT(2)
+#define LP872X_EN_LDO4_M BIT(3)
+#define LP872X_EN_LDO5_M BIT(4)
+
+/* Mask/shift : LP8720 */
+#define LP8720_TIMESTEP_S 0 /* Addr 00h */
+#define LP8720_TIMESTEP_M BIT(0)
+#define LP8720_EXT_DVS_M BIT(2)
+#define LP8720_BUCK_FPWM_S 5 /* Addr 07h */
+#define LP8720_BUCK_FPWM_M BIT(5)
+#define LP8720_EN_BUCK_M BIT(5) /* Addr 08h */
+#define LP8720_DVS_SEL_M BIT(7)
+
+/* Mask/shift : LP8725 */
+#define LP8725_TIMESTEP_M 0xC0 /* Addr 00h */
+#define LP8725_TIMESTEP_S 6
+#define LP8725_BUCK1_EN_M BIT(0)
+#define LP8725_DVS1_M BIT(2)
+#define LP8725_DVS2_M BIT(3)
+#define LP8725_BUCK2_EN_M BIT(4)
+#define LP8725_BUCK_CL_M 0xC0 /* Addr 09h, 0Bh */
+#define LP8725_BUCK_CL_S 6
+#define LP8725_BUCK1_FPWM_S 1 /* Addr 0Ch */
+#define LP8725_BUCK1_FPWM_M BIT(1)
+#define LP8725_BUCK2_FPWM_S 5
+#define LP8725_BUCK2_FPWM_M BIT(5)
+#define LP8725_EN_LILO1_M BIT(5) /* Addr 0Dh */
+#define LP8725_EN_LILO2_M BIT(6)
+
+/* PWM mode */
+#define LP872X_FORCE_PWM 1
+#define LP872X_AUTO_PWM 0
+
+#define LP8720_NUM_REGULATORS 6
+#define LP8725_NUM_REGULATORS 9
+#define EXTERN_DVS_USED 0
+#define MAX_DELAY 6
+
+/* dump registers in regmap-debugfs */
+#define MAX_REGISTERS 0x0F
+
+enum lp872x_id {
+ LP8720,
+ LP8725,
+};
+
+struct lp872x {
+ struct regmap *regmap;
+ struct device *dev;
+ enum lp872x_id chipid;
+ struct lp872x_platform_data *pdata;
+ struct regulator_dev **regulators;
+ int num_regulators;
+ enum lp872x_dvs_state dvs_pin;
+ int dvs_gpio;
+};
+
+/* LP8720/LP8725 shared voltage table for LDOs */
+static const unsigned int lp872x_ldo_vtbl[] = {
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000, 1550000,
+ 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000, 2000000,
+ 2100000, 2200000, 2300000, 2400000, 2500000, 2600000, 2650000, 2700000,
+ 2750000, 2800000, 2850000, 2900000, 2950000, 3000000, 3100000, 3300000,
+};
+
+/* LP8720 LDO4 voltage table */
+static const unsigned int lp8720_ldo4_vtbl[] = {
+ 800000, 850000, 900000, 1000000, 1100000, 1200000, 1250000, 1300000,
+ 1350000, 1400000, 1450000, 1500000, 1550000, 1600000, 1650000, 1700000,
+ 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
+ 2400000, 2500000, 2600000, 2650000, 2700000, 2750000, 2800000, 2850000,
+};
+
+/* LP8725 LILO(Low Input Low Output) voltage table */
+static const unsigned int lp8725_lilo_vtbl[] = {
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+};
+
+/* LP8720 BUCK voltage table */
+#define EXT_R 0 /* external resistor divider */
+static const unsigned int lp8720_buck_vtbl[] = {
+ EXT_R, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000, 2050000, 2100000, 2150000, 2200000, 2250000, 2300000,
+};
+
+/* LP8725 BUCK voltage table */
+static const unsigned int lp8725_buck_vtbl[] = {
+ 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000, 1150000,
+ 1200000, 1250000, 1300000, 1350000, 1400000, 1500000, 1600000, 1700000,
+ 1750000, 1800000, 1850000, 1900000, 2000000, 2100000, 2200000, 2300000,
+ 2400000, 2500000, 2600000, 2700000, 2800000, 2850000, 2900000, 3000000,
+};
+
+/* LP8725 BUCK current limit */
+static const unsigned int lp8725_buck_uA[] = {
+ 460000, 780000, 1050000, 1370000,
+};
+
+static int lp872x_read_byte(struct lp872x *lp, u8 addr, u8 *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(lp->regmap, addr, &val);
+ if (ret < 0) {
+ dev_err(lp->dev, "failed to read 0x%.2x\n", addr);
+ return ret;
+ }
+
+ *data = (u8)val;
+ return 0;
+}
+
+static inline int lp872x_write_byte(struct lp872x *lp, u8 addr, u8 data)
+{
+ return regmap_write(lp->regmap, addr, data);
+}
+
+static inline int lp872x_update_bits(struct lp872x *lp, u8 addr,
+ unsigned int mask, u8 data)
+{
+ return regmap_update_bits(lp->regmap, addr, mask, data);
+}
+
+static int _rdev_to_offset(struct regulator_dev *rdev)
+{
+ enum lp872x_regulator_id id = rdev_get_id(rdev);
+
+ switch (id) {
+ case LP8720_ID_LDO1 ... LP8720_ID_BUCK:
+ return id;
+ case LP8725_ID_LDO1 ... LP8725_ID_BUCK2:
+ return id - LP8725_ID_BASE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp872x_get_timestep_usec(struct lp872x *lp)
+{
+ enum lp872x_id chip = lp->chipid;
+ u8 val, mask, shift;
+ int *time_usec, size, ret;
+ int lp8720_time_usec[] = { 25, 50 };
+ int lp8725_time_usec[] = { 32, 64, 128, 256 };
+
+ switch (chip) {
+ case LP8720:
+ mask = LP8720_TIMESTEP_M;
+ shift = LP8720_TIMESTEP_S;
+ time_usec = &lp8720_time_usec[0];
+ size = ARRAY_SIZE(lp8720_time_usec);
+ break;
+ case LP8725:
+ mask = LP8725_TIMESTEP_M;
+ shift = LP8725_TIMESTEP_S;
+ time_usec = &lp8725_time_usec[0];
+ size = ARRAY_SIZE(lp8725_time_usec);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
+ if (ret)
+ return -EINVAL;
+
+ val = (val & mask) >> shift;
+ if (val >= size)
+ return -EINVAL;
+
+ return *(time_usec + val);
+}
+
+static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id regulator = rdev_get_id(rdev);
+ int time_step_us = lp872x_get_timestep_usec(lp);
+ int ret, offset;
+ u8 addr, val;
+
+ if (time_step_us < 0)
+ return -EINVAL;
+
+ switch (regulator) {
+ case LP8720_ID_LDO1 ... LP8720_ID_LDO5:
+ case LP8725_ID_LDO1 ... LP8725_ID_LILO2:
+ offset = _rdev_to_offset(rdev);
+ if (offset < 0)
+ return -EINVAL;
+
+ addr = LP872X_LDO1_VOUT + offset;
+ break;
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT1;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT1;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP872X_START_DELAY_M) >> LP872X_START_DELAY_S;
+
+ return val > MAX_DELAY ? 0 : val * time_step_us;
+}
+
+static void lp872x_set_dvs(struct lp872x *lp, int gpio)
+{
+ enum lp872x_dvs_sel dvs_sel = lp->pdata->dvs->vsel;
+ enum lp872x_dvs_state state;
+
+ state = dvs_sel == SEL_V1 ? DVS_HIGH : DVS_LOW;
+ gpio_set_value(gpio, state);
+ lp->dvs_pin = state;
+}
+
+static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
+ enum lp872x_regulator_id buck)
+{
+ u8 val, addr;
+
+ if (lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val))
+ return 0;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ if (val & LP8720_EXT_DVS_M) {
+ addr = (lp->dvs_pin == DVS_HIGH) ?
+ LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
+ } else {
+ if (lp872x_read_byte(lp, LP8720_ENABLE, &val))
+ return 0;
+
+ addr = val & LP8720_DVS_SEL_M ?
+ LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
+ }
+ break;
+ case LP8725_ID_BUCK1:
+ if (val & LP8725_DVS1_M)
+ addr = LP8725_BUCK1_VOUT1;
+ else
+ addr = (lp->dvs_pin == DVS_HIGH) ?
+ LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = val & LP8725_DVS2_M ?
+ LP8725_BUCK2_VOUT1 : LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return 0;
+ }
+
+ return addr;
+}
+
+static bool lp872x_is_valid_buck_addr(u8 addr)
+{
+ switch (addr) {
+ case LP8720_BUCK_VOUT1:
+ case LP8720_BUCK_VOUT2:
+ case LP8725_BUCK1_VOUT1:
+ case LP8725_BUCK1_VOUT2:
+ case LP8725_BUCK2_VOUT1:
+ case LP8725_BUCK2_VOUT2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask = LP872X_VOUT_M;
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+
+ if (dvs && gpio_is_valid(dvs->gpio))
+ lp872x_set_dvs(lp, dvs->gpio);
+
+ addr = lp872x_select_buck_vout_addr(lp, buck);
+ if (!lp872x_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ return lp872x_update_bits(lp, addr, mask, selector);
+}
+
+static int lp872x_buck_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, val;
+ int ret;
+
+ addr = lp872x_select_buck_vout_addr(lp, buck);
+ if (!lp872x_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & LP872X_VOUT_M;
+}
+
+static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ int i, max = ARRAY_SIZE(lp8725_buck_uA);
+ u8 addr, val;
+
+ switch (buck) {
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < max ; i++)
+ if (lp8725_buck_uA[i] >= min_uA &&
+ lp8725_buck_uA[i] <= max_uA)
+ break;
+
+ if (i == max)
+ return -EINVAL;
+
+ val = i << LP8725_BUCK_CL_S;
+
+ return lp872x_update_bits(lp, addr, LP8725_BUCK_CL_M, val);
+}
+
+static int lp8725_buck_get_current_limit(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, val;
+ int ret;
+
+ switch (buck) {
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK1_VOUT2;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK2_VOUT2;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ val = (val & LP8725_BUCK_CL_M) >> LP8725_BUCK_CL_S;
+
+ return (val < ARRAY_SIZE(lp8725_buck_uA)) ?
+ lp8725_buck_uA[val] : -EINVAL;
+}
+
+static int lp872x_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask, shift, val;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT2;
+ mask = LP8720_BUCK_FPWM_M;
+ shift = LP8720_BUCK_FPWM_S;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK1_FPWM_M;
+ shift = LP8725_BUCK1_FPWM_S;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK2_FPWM_M;
+ shift = LP8725_BUCK2_FPWM_S;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (mode == REGULATOR_MODE_FAST)
+ val = LP872X_FORCE_PWM << shift;
+ else if (mode == REGULATOR_MODE_NORMAL)
+ val = LP872X_AUTO_PWM << shift;
+ else
+ return -EINVAL;
+
+ return lp872x_update_bits(lp, addr, mask, val);
+}
+
+static unsigned int lp872x_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct lp872x *lp = rdev_get_drvdata(rdev);
+ enum lp872x_regulator_id buck = rdev_get_id(rdev);
+ u8 addr, mask, val;
+ int ret;
+
+ switch (buck) {
+ case LP8720_ID_BUCK:
+ addr = LP8720_BUCK_VOUT2;
+ mask = LP8720_BUCK_FPWM_M;
+ break;
+ case LP8725_ID_BUCK1:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK1_FPWM_M;
+ break;
+ case LP8725_ID_BUCK2:
+ addr = LP8725_BUCK_CTRL;
+ mask = LP8725_BUCK2_FPWM_M;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = lp872x_read_byte(lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops lp872x_ldo_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+};
+
+static struct regulator_ops lp8720_buck_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp872x_buck_set_voltage_sel,
+ .get_voltage_sel = lp872x_buck_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+ .set_mode = lp872x_buck_set_mode,
+ .get_mode = lp872x_buck_get_mode,
+};
+
+static struct regulator_ops lp8725_buck_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp872x_buck_set_voltage_sel,
+ .get_voltage_sel = lp872x_buck_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp872x_regulator_enable_time,
+ .set_mode = lp872x_buck_set_mode,
+ .get_mode = lp872x_buck_get_mode,
+ .set_current_limit = lp8725_buck_set_current_limit,
+ .get_current_limit = lp8725_buck_get_current_limit,
+};
+
+static struct regulator_desc lp8720_regulator_desc[] = {
+ {
+ .name = "ldo1",
+ .id = LP8720_ID_LDO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO1_M,
+ },
+ {
+ .name = "ldo2",
+ .id = LP8720_ID_LDO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO2_M,
+ },
+ {
+ .name = "ldo3",
+ .id = LP8720_ID_LDO3,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO3_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO3_M,
+ },
+ {
+ .name = "ldo4",
+ .id = LP8720_ID_LDO4,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8720_ldo4_vtbl),
+ .volt_table = lp8720_ldo4_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO4_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO4_M,
+ },
+ {
+ .name = "ldo5",
+ .id = LP8720_ID_LDO5,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO5_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP872X_EN_LDO5_M,
+ },
+ {
+ .name = "buck",
+ .id = LP8720_ID_BUCK,
+ .ops = &lp8720_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8720_buck_vtbl),
+ .volt_table = lp8720_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8720_ENABLE,
+ .enable_mask = LP8720_EN_BUCK_M,
+ },
+};
+
+static struct regulator_desc lp8725_regulator_desc[] = {
+ {
+ .name = "ldo1",
+ .id = LP8725_ID_LDO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO1_M,
+ },
+ {
+ .name = "ldo2",
+ .id = LP8725_ID_LDO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO2_M,
+ },
+ {
+ .name = "ldo3",
+ .id = LP8725_ID_LDO3,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO3_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO3_M,
+ },
+ {
+ .name = "ldo4",
+ .id = LP8725_ID_LDO4,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO4_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO4_M,
+ },
+ {
+ .name = "ldo5",
+ .id = LP8725_ID_LDO5,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp872x_ldo_vtbl),
+ .volt_table = lp872x_ldo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP872X_LDO5_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP872X_EN_LDO5_M,
+ },
+ {
+ .name = "lilo1",
+ .id = LP8725_ID_LILO1,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
+ .volt_table = lp8725_lilo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8725_LILO1_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP8725_EN_LILO1_M,
+ },
+ {
+ .name = "lilo2",
+ .id = LP8725_ID_LILO2,
+ .ops = &lp872x_ldo_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_lilo_vtbl),
+ .volt_table = lp8725_lilo_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8725_LILO2_VOUT,
+ .vsel_mask = LP872X_VOUT_M,
+ .enable_reg = LP8725_LDO_CTRL,
+ .enable_mask = LP8725_EN_LILO2_M,
+ },
+ {
+ .name = "buck1",
+ .id = LP8725_ID_BUCK1,
+ .ops = &lp8725_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
+ .volt_table = lp8725_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP872X_GENERAL_CFG,
+ .enable_mask = LP8725_BUCK1_EN_M,
+ },
+ {
+ .name = "buck2",
+ .id = LP8725_ID_BUCK2,
+ .ops = &lp8725_buck_ops,
+ .n_voltages = ARRAY_SIZE(lp8725_buck_vtbl),
+ .volt_table = lp8725_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP872X_GENERAL_CFG,
+ .enable_mask = LP8725_BUCK2_EN_M,
+ },
+};
+
+static int lp872x_check_dvs_validity(struct lp872x *lp)
+{
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+ u8 val = 0;
+ int ret;
+
+ ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val);
+ if (ret)
+ return ret;
+
+ ret = 0;
+ if (lp->chipid == LP8720) {
+ if (val & LP8720_EXT_DVS_M)
+ ret = dvs ? 0 : -EINVAL;
+ } else {
+ if ((val & LP8725_DVS1_M) == EXTERN_DVS_USED)
+ ret = dvs ? 0 : -EINVAL;
+ }
+
+ return ret;
+}
+
+static int lp872x_init_dvs(struct lp872x *lp)
+{
+ int ret, gpio;
+ struct lp872x_dvs *dvs = lp->pdata->dvs;
+ enum lp872x_dvs_state pinstate;
+
+ ret = lp872x_check_dvs_validity(lp);
+ if (ret) {
+ dev_warn(lp->dev, "invalid dvs data: %d\n", ret);
+ return ret;
+ }
+
+ gpio = dvs->gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(lp->dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ pinstate = dvs->init_state;
+ ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
+ if (ret) {
+ dev_err(lp->dev, "gpio request err: %d\n", ret);
+ return ret;
+ }
+
+ lp->dvs_pin = pinstate;
+ lp->dvs_gpio = gpio;
+
+ return 0;
+}
+
+static int lp872x_config(struct lp872x *lp)
+{
+ struct lp872x_platform_data *pdata = lp->pdata;
+ int ret;
+
+ if (!pdata->update_config)
+ return 0;
+
+ ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config);
+ if (ret)
+ return ret;
+
+ return lp872x_init_dvs(lp);
+}
+
+static struct regulator_init_data
+*lp872x_find_regulator_init_data(int id, struct lp872x *lp)
+{
+ int i;
+
+ for (i = 0; i < lp->num_regulators; i++) {
+ if (lp->pdata->regulator_data[i].id == id)
+ return lp->pdata->regulator_data[i].init_data;
+ }
+
+ return NULL;
+}
+
+static int lp872x_regulator_register(struct lp872x *lp)
+{
+ struct regulator_desc *desc;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int i, ret;
+
+ for (i = 0 ; i < lp->num_regulators ; i++) {
+ desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
+ &lp8725_regulator_desc[i];
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp872x_find_regulator_init_data(desc->id, lp);
+ cfg.driver_data = lp;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(desc, &cfg);
+ if (IS_ERR(rdev)) {
+ dev_err(lp->dev, "regulator register err");
+ ret = PTR_ERR(rdev);
+ goto err;
+ }
+
+ *(lp->regulators + i) = rdev;
+ }
+
+ return 0;
+err:
+ while (--i >= 0) {
+ rdev = *(lp->regulators + i);
+ regulator_unregister(rdev);
+ }
+ return ret;
+}
+
+static void lp872x_regulator_unregister(struct lp872x *lp)
+{
+ struct regulator_dev *rdev;
+ int i;
+
+ for (i = 0 ; i < lp->num_regulators ; i++) {
+ rdev = *(lp->regulators + i);
+ regulator_unregister(rdev);
+ }
+}
+
+static const struct regmap_config lp872x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX_REGISTERS,
+};
+
+static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+{
+ struct lp872x *lp;
+ struct lp872x_platform_data *pdata = cl->dev.platform_data;
+ int ret, size, num_regulators;
+ const int lp872x_num_regulators[] = {
+ [LP8720] = LP8720_NUM_REGULATORS,
+ [LP8725] = LP8725_NUM_REGULATORS,
+ };
+
+ if (!pdata) {
+ dev_err(&cl->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL);
+ if (!lp)
+ goto err_mem;
+
+ num_regulators = lp872x_num_regulators[id->driver_data];
+ size = sizeof(struct regulator_dev *) * num_regulators;
+
+ lp->regulators = devm_kzalloc(&cl->dev, size, GFP_KERNEL);
+ if (!lp->regulators)
+ goto err_mem;
+
+ lp->regmap = devm_regmap_init_i2c(cl, &lp872x_regmap_config);
+ if (IS_ERR(lp->regmap)) {
+ ret = PTR_ERR(lp->regmap);
+ dev_err(&cl->dev, "regmap init i2c err: %d\n", ret);
+ goto err_dev;
+ }
+
+ lp->dev = &cl->dev;
+ lp->pdata = pdata;
+ lp->chipid = id->driver_data;
+ lp->num_regulators = num_regulators;
+ i2c_set_clientdata(cl, lp);
+
+ ret = lp872x_config(lp);
+ if (ret)
+ goto err_dev;
+
+ return lp872x_regulator_register(lp);
+
+err_mem:
+ return -ENOMEM;
+err_dev:
+ return ret;
+}
+
+static int __devexit lp872x_remove(struct i2c_client *cl)
+{
+ struct lp872x *lp = i2c_get_clientdata(cl);
+
+ lp872x_regulator_unregister(lp);
+ return 0;
+}
+
+static const struct i2c_device_id lp872x_ids[] = {
+ {"lp8720", LP8720},
+ {"lp8725", LP8725},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lp872x_ids);
+
+static struct i2c_driver lp872x_driver = {
+ .driver = {
+ .name = "lp872x",
+ .owner = THIS_MODULE,
+ },
+ .probe = lp872x_probe,
+ .remove = __devexit_p(lp872x_remove),
+ .id_table = lp872x_ids,
+};
+
+module_i2c_driver(lp872x_driver);
+
+MODULE_DESCRIPTION("TI/National Semiconductor LP872x PMU Regulator Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/lp8788-buck.c b/drivers/regulator/lp8788-buck.c
new file mode 100644
index 000000000000..6356e821400f
--- /dev/null
+++ b/drivers/regulator/lp8788-buck.c
@@ -0,0 +1,629 @@
+/*
+ * TI LP8788 MFD - buck regulator driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/mfd/lp8788.h>
+#include <linux/gpio.h>
+
+/* register address */
+#define LP8788_EN_BUCK 0x0C
+#define LP8788_BUCK_DVS_SEL 0x1D
+#define LP8788_BUCK1_VOUT0 0x1E
+#define LP8788_BUCK1_VOUT1 0x1F
+#define LP8788_BUCK1_VOUT2 0x20
+#define LP8788_BUCK1_VOUT3 0x21
+#define LP8788_BUCK2_VOUT0 0x22
+#define LP8788_BUCK2_VOUT1 0x23
+#define LP8788_BUCK2_VOUT2 0x24
+#define LP8788_BUCK2_VOUT3 0x25
+#define LP8788_BUCK3_VOUT 0x26
+#define LP8788_BUCK4_VOUT 0x27
+#define LP8788_BUCK1_TIMESTEP 0x28
+#define LP8788_BUCK_PWM 0x2D
+
+/* mask/shift bits */
+#define LP8788_EN_BUCK1_M BIT(0) /* Addr 0Ch */
+#define LP8788_EN_BUCK2_M BIT(1)
+#define LP8788_EN_BUCK3_M BIT(2)
+#define LP8788_EN_BUCK4_M BIT(3)
+#define LP8788_BUCK1_DVS_SEL_M 0x04 /* Addr 1Dh */
+#define LP8788_BUCK1_DVS_M 0x03
+#define LP8788_BUCK1_DVS_S 0
+#define LP8788_BUCK2_DVS_SEL_M 0x40
+#define LP8788_BUCK2_DVS_M 0x30
+#define LP8788_BUCK2_DVS_S 4
+#define LP8788_BUCK1_DVS_I2C BIT(2)
+#define LP8788_BUCK2_DVS_I2C BIT(6)
+#define LP8788_BUCK1_DVS_PIN (0 << 2)
+#define LP8788_BUCK2_DVS_PIN (0 << 6)
+#define LP8788_VOUT_M 0x1F /* Addr 1Eh ~ 27h */
+#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 28h ~ 2Bh */
+#define LP8788_STARTUP_TIME_S 3
+#define LP8788_FPWM_BUCK1_M BIT(0) /* Addr 2Dh */
+#define LP8788_FPWM_BUCK1_S 0
+#define LP8788_FPWM_BUCK2_M BIT(1)
+#define LP8788_FPWM_BUCK2_S 1
+#define LP8788_FPWM_BUCK3_M BIT(2)
+#define LP8788_FPWM_BUCK3_S 2
+#define LP8788_FPWM_BUCK4_M BIT(3)
+#define LP8788_FPWM_BUCK4_S 3
+
+#define INVALID_ADDR 0xFF
+#define LP8788_FORCE_PWM 1
+#define LP8788_AUTO_PWM 0
+#define PIN_LOW 0
+#define PIN_HIGH 1
+#define ENABLE_TIME_USEC 32
+
+enum lp8788_dvs_state {
+ DVS_LOW = GPIOF_OUT_INIT_LOW,
+ DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+};
+
+enum lp8788_dvs_mode {
+ REGISTER,
+ EXTPIN,
+};
+
+enum lp8788_buck_id {
+ BUCK1,
+ BUCK2,
+ BUCK3,
+ BUCK4,
+};
+
+struct lp8788_pwm_map {
+ u8 mask;
+ u8 shift;
+};
+
+struct lp8788_buck {
+ struct lp8788 *lp;
+ struct regulator_dev *regulator;
+ struct lp8788_pwm_map *pmap;
+ void *dvs;
+};
+
+/* BUCK 1 ~ 4 voltage table */
+static const int lp8788_buck_vtbl[] = {
+ 500000, 800000, 850000, 900000, 950000, 1000000, 1050000, 1100000,
+ 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000, 1500000,
+ 1550000, 1600000, 1650000, 1700000, 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000,
+};
+
+/* buck pwm mode selection : used for set/get_mode in regulator ops
+ * @forced pwm : fast mode
+ * @auto pwm : normal mode
+ */
+static struct lp8788_pwm_map buck_pmap[] = {
+ [BUCK1] = {
+ .mask = LP8788_FPWM_BUCK1_M,
+ .shift = LP8788_FPWM_BUCK1_S,
+ },
+ [BUCK2] = {
+ .mask = LP8788_FPWM_BUCK2_M,
+ .shift = LP8788_FPWM_BUCK2_S,
+ },
+ [BUCK3] = {
+ .mask = LP8788_FPWM_BUCK3_M,
+ .shift = LP8788_FPWM_BUCK3_S,
+ },
+ [BUCK4] = {
+ .mask = LP8788_FPWM_BUCK4_M,
+ .shift = LP8788_FPWM_BUCK4_S,
+ },
+};
+
+static const u8 buck1_vout_addr[] = {
+ LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1,
+ LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3,
+};
+
+static const u8 buck2_vout_addr[] = {
+ LP8788_BUCK2_VOUT0, LP8788_BUCK2_VOUT1,
+ LP8788_BUCK2_VOUT2, LP8788_BUCK2_VOUT3,
+};
+
+static void lp8788_buck1_set_dvs(struct lp8788_buck *buck)
+{
+ struct lp8788_buck1_dvs *dvs = (struct lp8788_buck1_dvs *)buck->dvs;
+ enum lp8788_dvs_state pinstate;
+
+ if (!dvs)
+ return;
+
+ pinstate = dvs->vsel == DVS_SEL_V0 ? DVS_LOW : DVS_HIGH;
+ if (gpio_is_valid(dvs->gpio))
+ gpio_set_value(dvs->gpio, pinstate);
+}
+
+static void lp8788_buck2_set_dvs(struct lp8788_buck *buck)
+{
+ struct lp8788_buck2_dvs *dvs = (struct lp8788_buck2_dvs *)buck->dvs;
+ enum lp8788_dvs_state pin1, pin2;
+
+ if (!dvs)
+ return;
+
+ switch (dvs->vsel) {
+ case DVS_SEL_V0:
+ pin1 = DVS_LOW;
+ pin2 = DVS_LOW;
+ break;
+ case DVS_SEL_V1:
+ pin1 = DVS_HIGH;
+ pin2 = DVS_LOW;
+ break;
+ case DVS_SEL_V2:
+ pin1 = DVS_LOW;
+ pin2 = DVS_HIGH;
+ break;
+ case DVS_SEL_V3:
+ pin1 = DVS_HIGH;
+ pin2 = DVS_HIGH;
+ break;
+ default:
+ return;
+ }
+
+ if (gpio_is_valid(dvs->gpio[0]))
+ gpio_set_value(dvs->gpio[0], pin1);
+
+ if (gpio_is_valid(dvs->gpio[1]))
+ gpio_set_value(dvs->gpio[1], pin2);
+}
+
+static void lp8788_set_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ switch (id) {
+ case BUCK1:
+ lp8788_buck1_set_dvs(buck);
+ break;
+ case BUCK2:
+ lp8788_buck2_set_dvs(buck);
+ break;
+ default:
+ break;
+ }
+}
+
+static enum lp8788_dvs_mode
+lp8788_get_buck_dvs_ctrl_mode(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ u8 val, mask;
+
+ switch (id) {
+ case BUCK1:
+ mask = LP8788_BUCK1_DVS_SEL_M;
+ break;
+ case BUCK2:
+ mask = LP8788_BUCK2_DVS_SEL_M;
+ break;
+ default:
+ return REGISTER;
+ }
+
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+
+ return val & mask ? REGISTER : EXTPIN;
+}
+
+static bool lp8788_is_valid_buck_addr(u8 addr)
+{
+ switch (addr) {
+ case LP8788_BUCK1_VOUT0:
+ case LP8788_BUCK1_VOUT1:
+ case LP8788_BUCK1_VOUT2:
+ case LP8788_BUCK1_VOUT3:
+ case LP8788_BUCK2_VOUT0:
+ case LP8788_BUCK2_VOUT1:
+ case LP8788_BUCK2_VOUT2:
+ case LP8788_BUCK2_VOUT3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u8 lp8788_select_buck_vout_addr(struct lp8788_buck *buck,
+ enum lp8788_buck_id id)
+{
+ enum lp8788_dvs_mode mode = lp8788_get_buck_dvs_ctrl_mode(buck, id);
+ struct lp8788_buck1_dvs *b1_dvs;
+ struct lp8788_buck2_dvs *b2_dvs;
+ u8 val, idx, addr;
+ int pin1, pin2;
+
+ switch (id) {
+ case BUCK1:
+ if (mode == EXTPIN) {
+ b1_dvs = (struct lp8788_buck1_dvs *)buck->dvs;
+ if (!b1_dvs)
+ goto err;
+
+ idx = gpio_get_value(b1_dvs->gpio) ? 1 : 0;
+ } else {
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+ idx = (val & LP8788_BUCK1_DVS_M) >> LP8788_BUCK1_DVS_S;
+ }
+ addr = buck1_vout_addr[idx];
+ break;
+ case BUCK2:
+ if (mode == EXTPIN) {
+ b2_dvs = (struct lp8788_buck2_dvs *)buck->dvs;
+ if (!b2_dvs)
+ goto err;
+
+ pin1 = gpio_get_value(b2_dvs->gpio[0]);
+ pin2 = gpio_get_value(b2_dvs->gpio[1]);
+
+ if (pin1 == PIN_LOW && pin2 == PIN_LOW)
+ idx = 0;
+ else if (pin1 == PIN_LOW && pin2 == PIN_HIGH)
+ idx = 2;
+ else if (pin1 == PIN_HIGH && pin2 == PIN_LOW)
+ idx = 1;
+ else
+ idx = 3;
+ } else {
+ lp8788_read_byte(buck->lp, LP8788_BUCK_DVS_SEL, &val);
+ idx = (val & LP8788_BUCK2_DVS_M) >> LP8788_BUCK2_DVS_S;
+ }
+ addr = buck2_vout_addr[idx];
+ break;
+ default:
+ goto err;
+ }
+
+ return addr;
+err:
+ return INVALID_ADDR;
+}
+
+static int lp8788_buck12_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ u8 addr;
+
+ if (buck->dvs)
+ lp8788_set_dvs(buck, id);
+
+ addr = lp8788_select_buck_vout_addr(buck, id);
+ if (!lp8788_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ return lp8788_update_bits(buck->lp, addr, LP8788_VOUT_M, selector);
+}
+
+static int lp8788_buck12_get_voltage_sel(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ int ret;
+ u8 val, addr;
+
+ addr = lp8788_select_buck_vout_addr(buck, id);
+ if (!lp8788_is_valid_buck_addr(addr))
+ return -EINVAL;
+
+ ret = lp8788_read_byte(buck->lp, addr, &val);
+ if (ret)
+ return ret;
+
+ return val & LP8788_VOUT_M;
+}
+
+static int lp8788_buck_enable_time(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ enum lp8788_buck_id id = rdev_get_id(rdev);
+ u8 val, addr = LP8788_BUCK1_TIMESTEP + id;
+
+ if (lp8788_read_byte(buck->lp, addr, &val))
+ return -EINVAL;
+
+ val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S;
+
+ return ENABLE_TIME_USEC * val;
+}
+
+static int lp8788_buck_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ struct lp8788_pwm_map *pmap = buck->pmap;
+ u8 val;
+
+ if (!pmap)
+ return -EINVAL;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = LP8788_FORCE_PWM << pmap->shift;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = LP8788_AUTO_PWM << pmap->shift;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return lp8788_update_bits(buck->lp, LP8788_BUCK_PWM, pmap->mask, val);
+}
+
+static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct lp8788_buck *buck = rdev_get_drvdata(rdev);
+ struct lp8788_pwm_map *pmap = buck->pmap;
+ u8 val;
+ int ret;
+
+ if (!pmap)
+ return -EINVAL;
+
+ ret = lp8788_read_byte(buck->lp, LP8788_BUCK_PWM, &val);
+ if (ret)
+ return ret;
+
+ return val & pmap->mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops lp8788_buck12_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = lp8788_buck12_set_voltage_sel,
+ .get_voltage_sel = lp8788_buck12_get_voltage_sel,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp8788_buck_enable_time,
+ .set_mode = lp8788_buck_set_mode,
+ .get_mode = lp8788_buck_get_mode,
+};
+
+static struct regulator_ops lp8788_buck34_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable_time = lp8788_buck_enable_time,
+ .set_mode = lp8788_buck_set_mode,
+ .get_mode = lp8788_buck_get_mode,
+};
+
+static struct regulator_desc lp8788_buck_desc[] = {
+ {
+ .name = "buck1",
+ .id = BUCK1,
+ .ops = &lp8788_buck12_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK1_M,
+ },
+ {
+ .name = "buck2",
+ .id = BUCK2,
+ .ops = &lp8788_buck12_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK2_M,
+ },
+ {
+ .name = "buck3",
+ .id = BUCK3,
+ .ops = &lp8788_buck34_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_BUCK3_VOUT,
+ .vsel_mask = LP8788_VOUT_M,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK3_M,
+ },
+ {
+ .name = "buck4",
+ .id = BUCK4,
+ .ops = &lp8788_buck34_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_buck_vtbl),
+ .volt_table = lp8788_buck_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_BUCK4_VOUT,
+ .vsel_mask = LP8788_VOUT_M,
+ .enable_reg = LP8788_EN_BUCK,
+ .enable_mask = LP8788_EN_BUCK4_M,
+ },
+};
+
+static int lp8788_set_default_dvs_ctrl_mode(struct lp8788 *lp,
+ enum lp8788_buck_id id)
+{
+ u8 mask, val;
+
+ switch (id) {
+ case BUCK1:
+ mask = LP8788_BUCK1_DVS_SEL_M;
+ val = LP8788_BUCK1_DVS_I2C;
+ break;
+ case BUCK2:
+ mask = LP8788_BUCK2_DVS_SEL_M;
+ val = LP8788_BUCK2_DVS_I2C;
+ break;
+ default:
+ return 0;
+ }
+
+ return lp8788_update_bits(lp, LP8788_BUCK_DVS_SEL, mask, val);
+}
+
+static int _gpio_request(struct lp8788_buck *buck, int gpio, char *name)
+{
+ struct device *dev = buck->lp->dev;
+
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ return devm_gpio_request_one(dev, gpio, DVS_LOW, name);
+}
+
+static int lp8788_dvs_gpio_request(struct lp8788_buck *buck,
+ enum lp8788_buck_id id)
+{
+ struct lp8788_platform_data *pdata = buck->lp->pdata;
+ char *b1_name = "LP8788_B1_DVS";
+ char *b2_name[] = { "LP8788_B2_DVS1", "LP8788_B2_DVS2" };
+ int i, gpio, ret;
+
+ switch (id) {
+ case BUCK1:
+ gpio = pdata->buck1_dvs->gpio;
+ ret = _gpio_request(buck, gpio, b1_name);
+ if (ret)
+ return ret;
+
+ buck->dvs = pdata->buck1_dvs;
+ break;
+ case BUCK2:
+ for (i = 0 ; i < LP8788_NUM_BUCK2_DVS ; i++) {
+ gpio = pdata->buck2_dvs->gpio[i];
+ ret = _gpio_request(buck, gpio, b2_name[i]);
+ if (ret)
+ return ret;
+ }
+ buck->dvs = pdata->buck2_dvs;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id)
+{
+ struct lp8788_platform_data *pdata = buck->lp->pdata;
+ u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M };
+ u8 val[] = { LP8788_BUCK1_DVS_PIN, LP8788_BUCK2_DVS_PIN };
+
+ /* no dvs for buck3, 4 */
+ if (id == BUCK3 || id == BUCK4)
+ return 0;
+
+ /* no dvs platform data, then dvs will be selected by I2C registers */
+ if (!pdata)
+ goto set_default_dvs_mode;
+
+ if ((id == BUCK1 && !pdata->buck1_dvs) ||
+ (id == BUCK2 && !pdata->buck2_dvs))
+ goto set_default_dvs_mode;
+
+ if (lp8788_dvs_gpio_request(buck, id))
+ goto set_default_dvs_mode;
+
+ return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id],
+ val[id]);
+
+set_default_dvs_mode:
+ return lp8788_set_default_dvs_ctrl_mode(buck->lp, id);
+}
+
+static __devinit int lp8788_buck_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_buck *buck;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ buck = devm_kzalloc(lp->dev, sizeof(struct lp8788_buck), GFP_KERNEL);
+ if (!buck)
+ return -ENOMEM;
+
+ buck->lp = lp;
+ buck->pmap = &buck_pmap[id];
+
+ ret = lp8788_init_dvs(buck, id);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->buck_data[id] : NULL;
+ cfg.driver_data = buck;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_buck_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "BUCK%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ buck->regulator = rdev;
+ platform_set_drvdata(pdev, buck);
+
+ return 0;
+}
+
+static int __devexit lp8788_buck_remove(struct platform_device *pdev)
+{
+ struct lp8788_buck *buck = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(buck->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_buck_driver = {
+ .probe = lp8788_buck_probe,
+ .remove = __devexit_p(lp8788_buck_remove),
+ .driver = {
+ .name = LP8788_DEV_BUCK,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init lp8788_buck_init(void)
+{
+ return platform_driver_register(&lp8788_buck_driver);
+}
+subsys_initcall(lp8788_buck_init);
+
+static void __exit lp8788_buck_exit(void)
+{
+ platform_driver_unregister(&lp8788_buck_driver);
+}
+module_exit(lp8788_buck_exit);
+
+MODULE_DESCRIPTION("TI LP8788 BUCK Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-buck");
diff --git a/drivers/regulator/lp8788-ldo.c b/drivers/regulator/lp8788-ldo.c
new file mode 100644
index 000000000000..d2122e41a96d
--- /dev/null
+++ b/drivers/regulator/lp8788-ldo.c
@@ -0,0 +1,842 @@
+/*
+ * TI LP8788 MFD - ldo regulator driver
+ *
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/gpio.h>
+#include <linux/mfd/lp8788.h>
+
+/* register address */
+#define LP8788_EN_LDO_A 0x0D /* DLDO 1 ~ 8 */
+#define LP8788_EN_LDO_B 0x0E /* DLDO 9 ~ 12, ALDO 1 ~ 4 */
+#define LP8788_EN_LDO_C 0x0F /* ALDO 5 ~ 10 */
+#define LP8788_EN_SEL 0x10
+#define LP8788_DLDO1_VOUT 0x2E
+#define LP8788_DLDO2_VOUT 0x2F
+#define LP8788_DLDO3_VOUT 0x30
+#define LP8788_DLDO4_VOUT 0x31
+#define LP8788_DLDO5_VOUT 0x32
+#define LP8788_DLDO6_VOUT 0x33
+#define LP8788_DLDO7_VOUT 0x34
+#define LP8788_DLDO8_VOUT 0x35
+#define LP8788_DLDO9_VOUT 0x36
+#define LP8788_DLDO10_VOUT 0x37
+#define LP8788_DLDO11_VOUT 0x38
+#define LP8788_DLDO12_VOUT 0x39
+#define LP8788_ALDO1_VOUT 0x3A
+#define LP8788_ALDO2_VOUT 0x3B
+#define LP8788_ALDO3_VOUT 0x3C
+#define LP8788_ALDO4_VOUT 0x3D
+#define LP8788_ALDO5_VOUT 0x3E
+#define LP8788_ALDO6_VOUT 0x3F
+#define LP8788_ALDO7_VOUT 0x40
+#define LP8788_ALDO8_VOUT 0x41
+#define LP8788_ALDO9_VOUT 0x42
+#define LP8788_ALDO10_VOUT 0x43
+#define LP8788_DLDO1_TIMESTEP 0x44
+
+/* mask/shift bits */
+#define LP8788_EN_DLDO1_M BIT(0) /* Addr 0Dh ~ 0Fh */
+#define LP8788_EN_DLDO2_M BIT(1)
+#define LP8788_EN_DLDO3_M BIT(2)
+#define LP8788_EN_DLDO4_M BIT(3)
+#define LP8788_EN_DLDO5_M BIT(4)
+#define LP8788_EN_DLDO6_M BIT(5)
+#define LP8788_EN_DLDO7_M BIT(6)
+#define LP8788_EN_DLDO8_M BIT(7)
+#define LP8788_EN_DLDO9_M BIT(0)
+#define LP8788_EN_DLDO10_M BIT(1)
+#define LP8788_EN_DLDO11_M BIT(2)
+#define LP8788_EN_DLDO12_M BIT(3)
+#define LP8788_EN_ALDO1_M BIT(4)
+#define LP8788_EN_ALDO2_M BIT(5)
+#define LP8788_EN_ALDO3_M BIT(6)
+#define LP8788_EN_ALDO4_M BIT(7)
+#define LP8788_EN_ALDO5_M BIT(0)
+#define LP8788_EN_ALDO6_M BIT(1)
+#define LP8788_EN_ALDO7_M BIT(2)
+#define LP8788_EN_ALDO8_M BIT(3)
+#define LP8788_EN_ALDO9_M BIT(4)
+#define LP8788_EN_ALDO10_M BIT(5)
+#define LP8788_EN_SEL_DLDO911_M BIT(0) /* Addr 10h */
+#define LP8788_EN_SEL_DLDO7_M BIT(1)
+#define LP8788_EN_SEL_ALDO7_M BIT(2)
+#define LP8788_EN_SEL_ALDO5_M BIT(3)
+#define LP8788_EN_SEL_ALDO234_M BIT(4)
+#define LP8788_EN_SEL_ALDO1_M BIT(5)
+#define LP8788_VOUT_5BIT_M 0x1F /* Addr 2Eh ~ 43h */
+#define LP8788_VOUT_4BIT_M 0x0F
+#define LP8788_VOUT_3BIT_M 0x07
+#define LP8788_VOUT_1BIT_M 0x01
+#define LP8788_STARTUP_TIME_M 0xF8 /* Addr 44h ~ 59h */
+#define LP8788_STARTUP_TIME_S 3
+
+#define ENABLE_TIME_USEC 32
+#define ENABLE GPIOF_OUT_INIT_HIGH
+#define DISABLE GPIOF_OUT_INIT_LOW
+
+enum lp8788_enable_mode {
+ REGISTER,
+ EXTPIN,
+};
+
+enum lp8788_ldo_id {
+ DLDO1,
+ DLDO2,
+ DLDO3,
+ DLDO4,
+ DLDO5,
+ DLDO6,
+ DLDO7,
+ DLDO8,
+ DLDO9,
+ DLDO10,
+ DLDO11,
+ DLDO12,
+ ALDO1,
+ ALDO2,
+ ALDO3,
+ ALDO4,
+ ALDO5,
+ ALDO6,
+ ALDO7,
+ ALDO8,
+ ALDO9,
+ ALDO10,
+};
+
+struct lp8788_ldo {
+ struct lp8788 *lp;
+ struct regulator_desc *desc;
+ struct regulator_dev *regulator;
+ struct lp8788_ldo_enable_pin *en_pin;
+};
+
+/* DLDO 1, 2, 3, 9 voltage table */
+const int lp8788_dldo1239_vtbl[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 2850000, 2850000, 2850000,
+ 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
+ 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000, 2850000,
+};
+
+/* DLDO 4 voltage table */
+static const int lp8788_dldo4_vtbl[] = { 1800000, 3000000 };
+
+/* DLDO 5, 7, 8 and ALDO 6 voltage table */
+static const int lp8788_dldo578_aldo6_vtbl[] = {
+ 1800000, 1900000, 2000000, 2100000, 2200000, 2300000, 2400000, 2500000,
+ 2600000, 2700000, 2800000, 2900000, 3000000, 3000000, 3000000, 3000000,
+};
+
+/* DLDO 6 voltage table */
+static const int lp8788_dldo6_vtbl[] = {
+ 3000000, 3100000, 3200000, 3300000, 3400000, 3500000, 3600000, 3600000,
+};
+
+/* DLDO 10, 11 voltage table */
+static const int lp8788_dldo1011_vtbl[] = {
+ 1100000, 1150000, 1200000, 1250000, 1300000, 1350000, 1400000, 1450000,
+ 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000, 1500000,
+};
+
+/* ALDO 1 voltage table */
+static const int lp8788_aldo1_vtbl[] = { 1800000, 2850000 };
+
+/* ALDO 7 voltage table */
+static const int lp8788_aldo7_vtbl[] = {
+ 1200000, 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1800000,
+};
+
+static enum lp8788_ldo_id lp8788_dldo_id[] = {
+ DLDO1,
+ DLDO2,
+ DLDO3,
+ DLDO4,
+ DLDO5,
+ DLDO6,
+ DLDO7,
+ DLDO8,
+ DLDO9,
+ DLDO10,
+ DLDO11,
+ DLDO12,
+};
+
+static enum lp8788_ldo_id lp8788_aldo_id[] = {
+ ALDO1,
+ ALDO2,
+ ALDO3,
+ ALDO4,
+ ALDO5,
+ ALDO6,
+ ALDO7,
+ ALDO8,
+ ALDO9,
+ ALDO10,
+};
+
+/* DLDO 7, 9 and 11, ALDO 1 ~ 5 and 7
+ : can be enabled either by external pin or by i2c register */
+static enum lp8788_enable_mode
+lp8788_get_ldo_enable_mode(struct lp8788_ldo *ldo, enum lp8788_ldo_id id)
+{
+ int ret;
+ u8 val, mask;
+
+ ret = lp8788_read_byte(ldo->lp, LP8788_EN_SEL, &val);
+ if (ret)
+ return ret;
+
+ switch (id) {
+ case DLDO7:
+ mask = LP8788_EN_SEL_DLDO7_M;
+ break;
+ case DLDO9:
+ case DLDO11:
+ mask = LP8788_EN_SEL_DLDO911_M;
+ break;
+ case ALDO1:
+ mask = LP8788_EN_SEL_ALDO1_M;
+ break;
+ case ALDO2 ... ALDO4:
+ mask = LP8788_EN_SEL_ALDO234_M;
+ break;
+ case ALDO5:
+ mask = LP8788_EN_SEL_ALDO5_M;
+ break;
+ case ALDO7:
+ mask = LP8788_EN_SEL_ALDO7_M;
+ break;
+ default:
+ return REGISTER;
+ }
+
+ return val & mask ? EXTPIN : REGISTER;
+}
+
+static int lp8788_ldo_ctrl_by_extern_pin(struct lp8788_ldo *ldo, int pinstate)
+{
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+
+ if (!pin)
+ return -EINVAL;
+
+ if (gpio_is_valid(pin->gpio))
+ gpio_set_value(pin->gpio, pinstate);
+
+ return 0;
+}
+
+static int lp8788_ldo_is_enabled_by_extern_pin(struct lp8788_ldo *ldo)
+{
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+
+ if (!pin)
+ return -EINVAL;
+
+ return gpio_get_value(pin->gpio) ? 1 : 0;
+}
+
+static int lp8788_ldo_enable(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_ctrl_by_extern_pin(ldo, ENABLE);
+ case REGISTER:
+ return regulator_enable_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_disable(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_ctrl_by_extern_pin(ldo, DISABLE);
+ case REGISTER:
+ return regulator_disable_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ enum lp8788_enable_mode mode = lp8788_get_ldo_enable_mode(ldo, id);
+
+ switch (mode) {
+ case EXTPIN:
+ return lp8788_ldo_is_enabled_by_extern_pin(ldo);
+ case REGISTER:
+ return regulator_is_enabled_regmap(rdev);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lp8788_ldo_enable_time(struct regulator_dev *rdev)
+{
+ struct lp8788_ldo *ldo = rdev_get_drvdata(rdev);
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+ u8 val, addr = LP8788_DLDO1_TIMESTEP + id;
+
+ if (lp8788_read_byte(ldo->lp, addr, &val))
+ return -EINVAL;
+
+ val = (val & LP8788_STARTUP_TIME_M) >> LP8788_STARTUP_TIME_S;
+
+ return ENABLE_TIME_USEC * val;
+}
+
+static int lp8788_ldo_fixed_get_voltage(struct regulator_dev *rdev)
+{
+ enum lp8788_ldo_id id = rdev_get_id(rdev);
+
+ switch (id) {
+ case ALDO2 ... ALDO5:
+ return 2850000;
+ case DLDO12:
+ case ALDO8 ... ALDO9:
+ return 2500000;
+ case ALDO10:
+ return 1100000;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct regulator_ops lp8788_ldo_voltage_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = lp8788_ldo_enable,
+ .disable = lp8788_ldo_disable,
+ .is_enabled = lp8788_ldo_is_enabled,
+ .enable_time = lp8788_ldo_enable_time,
+};
+
+static struct regulator_ops lp8788_ldo_voltage_fixed_ops = {
+ .get_voltage = lp8788_ldo_fixed_get_voltage,
+ .enable = lp8788_ldo_enable,
+ .disable = lp8788_ldo_disable,
+ .is_enabled = lp8788_ldo_is_enabled,
+ .enable_time = lp8788_ldo_enable_time,
+};
+
+static struct regulator_desc lp8788_dldo_desc[] = {
+ {
+ .name = "dldo1",
+ .id = DLDO1,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO1_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO1_M,
+ },
+ {
+ .name = "dldo2",
+ .id = DLDO2,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO2_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO2_M,
+ },
+ {
+ .name = "dldo3",
+ .id = DLDO3,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO3_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO3_M,
+ },
+ {
+ .name = "dldo4",
+ .id = DLDO4,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo4_vtbl),
+ .volt_table = lp8788_dldo4_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO4_VOUT,
+ .vsel_mask = LP8788_VOUT_1BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO4_M,
+ },
+ {
+ .name = "dldo5",
+ .id = DLDO5,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO5_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO5_M,
+ },
+ {
+ .name = "dldo6",
+ .id = DLDO6,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo6_vtbl),
+ .volt_table = lp8788_dldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO6_VOUT,
+ .vsel_mask = LP8788_VOUT_3BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO6_M,
+ },
+ {
+ .name = "dldo7",
+ .id = DLDO7,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO7_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO7_M,
+ },
+ {
+ .name = "dldo8",
+ .id = DLDO8,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO8_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_A,
+ .enable_mask = LP8788_EN_DLDO8_M,
+ },
+ {
+ .name = "dldo9",
+ .id = DLDO9,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1239_vtbl),
+ .volt_table = lp8788_dldo1239_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO9_VOUT,
+ .vsel_mask = LP8788_VOUT_5BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO9_M,
+ },
+ {
+ .name = "dldo10",
+ .id = DLDO10,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
+ .volt_table = lp8788_dldo1011_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO10_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO10_M,
+ },
+ {
+ .name = "dldo11",
+ .id = DLDO11,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo1011_vtbl),
+ .volt_table = lp8788_dldo1011_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_DLDO11_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO11_M,
+ },
+ {
+ .name = "dldo12",
+ .id = DLDO12,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_DLDO12_M,
+ },
+};
+
+static struct regulator_desc lp8788_aldo_desc[] = {
+ {
+ .name = "aldo1",
+ .id = ALDO1,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_aldo1_vtbl),
+ .volt_table = lp8788_aldo1_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO1_VOUT,
+ .vsel_mask = LP8788_VOUT_1BIT_M,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO1_M,
+ },
+ {
+ .name = "aldo2",
+ .id = ALDO2,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO2_M,
+ },
+ {
+ .name = "aldo3",
+ .id = ALDO3,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO3_M,
+ },
+ {
+ .name = "aldo4",
+ .id = ALDO4,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_B,
+ .enable_mask = LP8788_EN_ALDO4_M,
+ },
+ {
+ .name = "aldo5",
+ .id = ALDO5,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO5_M,
+ },
+ {
+ .name = "aldo6",
+ .id = ALDO6,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_dldo578_aldo6_vtbl),
+ .volt_table = lp8788_dldo578_aldo6_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO6_VOUT,
+ .vsel_mask = LP8788_VOUT_4BIT_M,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO6_M,
+ },
+ {
+ .name = "aldo7",
+ .id = ALDO7,
+ .ops = &lp8788_ldo_voltage_table_ops,
+ .n_voltages = ARRAY_SIZE(lp8788_aldo7_vtbl),
+ .volt_table = lp8788_aldo7_vtbl,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .vsel_reg = LP8788_ALDO7_VOUT,
+ .vsel_mask = LP8788_VOUT_3BIT_M,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO7_M,
+ },
+ {
+ .name = "aldo8",
+ .id = ALDO8,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO8_M,
+ },
+ {
+ .name = "aldo9",
+ .id = ALDO9,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO9_M,
+ },
+ {
+ .name = "aldo10",
+ .id = ALDO10,
+ .ops = &lp8788_ldo_voltage_fixed_ops,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .enable_reg = LP8788_EN_LDO_C,
+ .enable_mask = LP8788_EN_ALDO10_M,
+ },
+};
+
+static int lp8788_gpio_request_ldo_en(struct lp8788_ldo *ldo,
+ enum lp8788_ext_ldo_en_id id)
+{
+ struct device *dev = ldo->lp->dev;
+ struct lp8788_ldo_enable_pin *pin = ldo->en_pin;
+ int ret, gpio, pinstate;
+ char *name[] = {
+ [EN_ALDO1] = "LP8788_EN_ALDO1",
+ [EN_ALDO234] = "LP8788_EN_ALDO234",
+ [EN_ALDO5] = "LP8788_EN_ALDO5",
+ [EN_ALDO7] = "LP8788_EN_ALDO7",
+ [EN_DLDO7] = "LP8788_EN_DLDO7",
+ [EN_DLDO911] = "LP8788_EN_DLDO911",
+ };
+
+ gpio = pin->gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio: %d\n", gpio);
+ return -EINVAL;
+ }
+
+ pinstate = pin->init_state;
+ ret = devm_gpio_request_one(dev, gpio, pinstate, name[id]);
+ if (ret == -EBUSY) {
+ dev_warn(dev, "gpio%d already used\n", gpio);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int lp8788_config_ldo_enable_mode(struct lp8788_ldo *ldo,
+ enum lp8788_ldo_id id)
+{
+ int ret;
+ struct lp8788 *lp = ldo->lp;
+ struct lp8788_platform_data *pdata = lp->pdata;
+ enum lp8788_ext_ldo_en_id enable_id;
+ u8 en_mask[] = {
+ [EN_ALDO1] = LP8788_EN_SEL_ALDO1_M,
+ [EN_ALDO234] = LP8788_EN_SEL_ALDO234_M,
+ [EN_ALDO5] = LP8788_EN_SEL_ALDO5_M,
+ [EN_ALDO7] = LP8788_EN_SEL_ALDO7_M,
+ [EN_DLDO7] = LP8788_EN_SEL_DLDO7_M,
+ [EN_DLDO911] = LP8788_EN_SEL_DLDO911_M,
+ };
+ u8 val[] = {
+ [EN_ALDO1] = 0 << 5,
+ [EN_ALDO234] = 0 << 4,
+ [EN_ALDO5] = 0 << 3,
+ [EN_ALDO7] = 0 << 2,
+ [EN_DLDO7] = 0 << 1,
+ [EN_DLDO911] = 0 << 0,
+ };
+
+ switch (id) {
+ case DLDO7:
+ enable_id = EN_DLDO7;
+ break;
+ case DLDO9:
+ case DLDO11:
+ enable_id = EN_DLDO911;
+ break;
+ case ALDO1:
+ enable_id = EN_ALDO1;
+ break;
+ case ALDO2 ... ALDO4:
+ enable_id = EN_ALDO234;
+ break;
+ case ALDO5:
+ enable_id = EN_ALDO5;
+ break;
+ case ALDO7:
+ enable_id = EN_ALDO7;
+ break;
+ default:
+ return 0;
+ }
+
+ /* if no platform data for ldo pin, then set default enable mode */
+ if (!pdata || !pdata->ldo_pin || !pdata->ldo_pin[enable_id])
+ goto set_default_ldo_enable_mode;
+
+ ldo->en_pin = pdata->ldo_pin[enable_id];
+
+ ret = lp8788_gpio_request_ldo_en(ldo, enable_id);
+ if (ret)
+ goto set_default_ldo_enable_mode;
+
+ return ret;
+
+set_default_ldo_enable_mode:
+ return lp8788_update_bits(lp, LP8788_EN_SEL, en_mask[enable_id],
+ val[enable_id]);
+}
+
+static __devinit int lp8788_dldo_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_ldo *ldo;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ if (!ldo)
+ return -ENOMEM;
+
+ ldo->lp = lp;
+ ret = lp8788_config_ldo_enable_mode(ldo, lp8788_dldo_id[id]);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->dldo_data[id] : NULL;
+ cfg.driver_data = ldo;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_dldo_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "DLDO%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ ldo->regulator = rdev;
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+}
+
+static int __devexit lp8788_dldo_remove(struct platform_device *pdev)
+{
+ struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(ldo->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_dldo_driver = {
+ .probe = lp8788_dldo_probe,
+ .remove = __devexit_p(lp8788_dldo_remove),
+ .driver = {
+ .name = LP8788_DEV_DLDO,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int lp8788_aldo_probe(struct platform_device *pdev)
+{
+ struct lp8788 *lp = dev_get_drvdata(pdev->dev.parent);
+ int id = pdev->id;
+ struct lp8788_ldo *ldo;
+ struct regulator_config cfg = { };
+ struct regulator_dev *rdev;
+ int ret;
+
+ ldo = devm_kzalloc(lp->dev, sizeof(struct lp8788_ldo), GFP_KERNEL);
+ if (!ldo)
+ return -ENOMEM;
+
+ ldo->lp = lp;
+ ret = lp8788_config_ldo_enable_mode(ldo, lp8788_aldo_id[id]);
+ if (ret)
+ return ret;
+
+ cfg.dev = lp->dev;
+ cfg.init_data = lp->pdata ? lp->pdata->aldo_data[id] : NULL;
+ cfg.driver_data = ldo;
+ cfg.regmap = lp->regmap;
+
+ rdev = regulator_register(&lp8788_aldo_desc[id], &cfg);
+ if (IS_ERR(rdev)) {
+ ret = PTR_ERR(rdev);
+ dev_err(lp->dev, "ALDO%d regulator register err = %d\n",
+ id + 1, ret);
+ return ret;
+ }
+
+ ldo->regulator = rdev;
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+}
+
+static int __devexit lp8788_aldo_remove(struct platform_device *pdev)
+{
+ struct lp8788_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+ regulator_unregister(ldo->regulator);
+
+ return 0;
+}
+
+static struct platform_driver lp8788_aldo_driver = {
+ .probe = lp8788_aldo_probe,
+ .remove = __devexit_p(lp8788_aldo_remove),
+ .driver = {
+ .name = LP8788_DEV_ALDO,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init lp8788_ldo_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&lp8788_dldo_driver);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&lp8788_aldo_driver);
+}
+subsys_initcall(lp8788_ldo_init);
+
+static void __exit lp8788_ldo_exit(void)
+{
+ platform_driver_unregister(&lp8788_aldo_driver);
+ platform_driver_unregister(&lp8788_dldo_driver);
+}
+module_exit(lp8788_ldo_exit);
+
+MODULE_DESCRIPTION("TI LP8788 LDO Driver");
+MODULE_AUTHOR("Milo Kim");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:lp8788-dldo");
+MODULE_ALIAS("platform:lp8788-aldo");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index b9444ee08da9..f67af3c1b963 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -48,6 +48,14 @@ struct max1586_data {
};
/*
+ * V6 voltage
+ * On I2C bus, sending a "x" byte to the max1586 means :
+ * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
+ * As regulator framework doesn't accept voltages to be 0V, we use 1uV.
+ */
+static int v6_voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
+
+/*
* V3 voltage
* On I2C bus, sending a "x" byte to the max1586 means :
* set V3 to 0.700V + (x & 0x1f) * 0.025V
@@ -55,113 +63,49 @@ struct max1586_data {
* R24 and R25=100kOhm as described in the data sheet.
* The gain is approximately: 1 + R24/R25 + R24/185.5kOhm
*/
-static int max1586_v3_calc_voltage(struct max1586_data *max1586,
- unsigned selector)
-{
- unsigned range_uV = max1586->max_uV - max1586->min_uV;
-
- return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL);
-}
-
-static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned *selector)
+static int max1586_v3_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max1586_data *max1586 = rdev_get_drvdata(rdev);
struct i2c_client *client = max1586->client;
- unsigned range_uV = max1586->max_uV - max1586->min_uV;
u8 v3_prog;
- if (min_uV > max1586->max_uV || max_uV < max1586->min_uV)
- return -EINVAL;
- if (min_uV < max1586->min_uV)
- min_uV = max1586->min_uV;
-
- *selector = DIV_ROUND_UP((min_uV - max1586->min_uV) *
- MAX1586_V3_MAX_VSEL, range_uV);
- if (max1586_v3_calc_voltage(max1586, *selector) > max_uV)
- return -EINVAL;
-
dev_dbg(&client->dev, "changing voltage v3 to %dmv\n",
- max1586_v3_calc_voltage(max1586, *selector) / 1000);
+ regulator_list_voltage_linear(rdev, selector) / 1000);
- v3_prog = I2C_V3_SELECT | (u8) *selector;
+ v3_prog = I2C_V3_SELECT | (u8) selector;
return i2c_smbus_write_byte(client, v3_prog);
}
-static int max1586_v3_list(struct regulator_dev *rdev, unsigned selector)
-{
- struct max1586_data *max1586 = rdev_get_drvdata(rdev);
-
- if (selector > MAX1586_V3_MAX_VSEL)
- return -EINVAL;
- return max1586_v3_calc_voltage(max1586, selector);
-}
-
-/*
- * V6 voltage
- * On I2C bus, sending a "x" byte to the max1586 means :
- * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3)
- * As regulator framework doesn't accept voltages to be 0V, we use 1uV.
- */
-static int max1586_v6_calc_voltage(unsigned selector)
-{
- static int voltages_uv[] = { 1, 1800000, 2500000, 3000000 };
-
- return voltages_uv[selector];
-}
-
-static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned int *selector)
+static int max1586_v6_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned int selector)
{
struct i2c_client *client = rdev_get_drvdata(rdev);
u8 v6_prog;
- if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV)
- return -EINVAL;
- if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV)
- return -EINVAL;
-
- if (min_uV < 1800000)
- *selector = 0;
- else if (min_uV < 2500000)
- *selector = 1;
- else if (min_uV < 3000000)
- *selector = 2;
- else if (min_uV >= 3000000)
- *selector = 3;
-
- if (max1586_v6_calc_voltage(*selector) > max_uV)
- return -EINVAL;
-
dev_dbg(&client->dev, "changing voltage v6 to %dmv\n",
- max1586_v6_calc_voltage(*selector) / 1000);
+ rdev->desc->volt_table[selector] / 1000);
- v6_prog = I2C_V6_SELECT | (u8) *selector;
+ v6_prog = I2C_V6_SELECT | (u8) selector;
return i2c_smbus_write_byte(client, v6_prog);
}
-static int max1586_v6_list(struct regulator_dev *rdev, unsigned selector)
-{
- if (selector > MAX1586_V6_MAX_VSEL)
- return -EINVAL;
- return max1586_v6_calc_voltage(selector);
-}
-
/*
* The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back
* the set up value.
*/
static struct regulator_ops max1586_v3_ops = {
- .set_voltage = max1586_v3_set,
- .list_voltage = max1586_v3_list,
+ .set_voltage_sel = max1586_v3_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static struct regulator_ops max1586_v6_ops = {
- .set_voltage = max1586_v6_set,
- .list_voltage = max1586_v6_list,
+ .set_voltage_sel = max1586_v6_set_voltage_sel,
+ .list_voltage = regulator_list_voltage_table,
};
-static const struct regulator_desc max1586_reg[] = {
+static struct regulator_desc max1586_reg[] = {
{
.name = "Output_V3",
.id = MAX1586_V3,
@@ -176,6 +120,7 @@ static const struct regulator_desc max1586_reg[] = {
.ops = &max1586_v6_ops,
.type = REGULATOR_VOLTAGE,
.n_voltages = MAX1586_V6_MAX_VSEL + 1,
+ .volt_table = v6_voltages_uv,
.owner = THIS_MODULE,
},
};
@@ -213,6 +158,13 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
goto err;
}
+ if (id == MAX1586_V3) {
+ max1586_reg[id].min_uV = max1586->min_uV;
+ max1586_reg[id].uV_step =
+ (max1586->max_uV - max1586->min_uV) /
+ MAX1586_V3_MAX_VSEL;
+ }
+
config.dev = &client->dev;
config.init_data = pdata->subdevs[i].platform_data;
config.driver_data = max1586;
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
new file mode 100644
index 000000000000..c564af6f05a3
--- /dev/null
+++ b/drivers/regulator/max77686.c
@@ -0,0 +1,389 @@
+/*
+ * max77686.c - Regulator driver for the Maxim 77686
+ *
+ * Copyright (C) 2012 Samsung Electronics
+ * Chiwoong Byun <woong.byun@smasung.com>
+ * Jonghwa Lee <jonghwa3.lee@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This driver is based on max8997.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/max77686.h>
+#include <linux/mfd/max77686-private.h>
+
+#define MAX77686_LDO_MINUV 800000
+#define MAX77686_LDO_UVSTEP 50000
+#define MAX77686_LDO_LOW_MINUV 800000
+#define MAX77686_LDO_LOW_UVSTEP 25000
+#define MAX77686_BUCK_MINUV 750000
+#define MAX77686_BUCK_UVSTEP 50000
+#define MAX77686_RAMP_DELAY 100000 /* uV/us */
+#define MAX77686_DVS_RAMP_DELAY 27500 /* uV/us */
+#define MAX77686_DVS_MINUV 600000
+#define MAX77686_DVS_UVSTEP 12500
+
+#define MAX77686_OPMODE_SHIFT 6
+#define MAX77686_OPMODE_BUCK234_SHIFT 4
+#define MAX77686_OPMODE_MASK 0x3
+
+#define MAX77686_VSEL_MASK 0x3F
+#define MAX77686_DVS_VSEL_MASK 0xFF
+
+#define MAX77686_RAMP_RATE_MASK 0xC0
+
+#define MAX77686_REGULATORS MAX77686_REG_MAX
+#define MAX77686_LDOS 26
+
+enum max77686_ramp_rate {
+ RAMP_RATE_13P75MV,
+ RAMP_RATE_27P5MV,
+ RAMP_RATE_55MV,
+ RAMP_RATE_NO_CTRL, /* 100mV/us */
+};
+
+struct max77686_data {
+ struct regulator_dev **rdev;
+};
+
+static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ unsigned int ramp_value = RAMP_RATE_NO_CTRL;
+
+ switch (ramp_delay) {
+ case 1 ... 13750:
+ ramp_value = RAMP_RATE_13P75MV;
+ break;
+ case 13751 ... 27500:
+ ramp_value = RAMP_RATE_27P5MV;
+ break;
+ case 27501 ... 55000:
+ ramp_value = RAMP_RATE_55MV;
+ break;
+ case 55001 ... 100000:
+ break;
+ default:
+ pr_warn("%s: ramp_delay: %d not supported, setting 100000\n",
+ rdev->desc->name, ramp_delay);
+ }
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ MAX77686_RAMP_RATE_MASK, ramp_value << 6);
+}
+
+static struct regulator_ops max77686_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops max77686_buck_dvs_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = max77686_set_ramp_delay,
+};
+
+#define regulator_desc_ldo(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_MINUV, \
+ .uV_step = MAX77686_LDO_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
+#define regulator_desc_ldo_low(num) { \
+ .name = "LDO"#num, \
+ .id = MAX77686_LDO##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_LDO_LOW_MINUV, \
+ .uV_step = MAX77686_LDO_LOW_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_LDO1CTRL1 + num - 1, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_SHIFT, \
+}
+#define regulator_desc_buck(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_BUCK_MINUV, \
+ .uV_step = MAX77686_BUCK_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK5OUT + (num - 5) * 2, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK5CTRL + (num - 5) * 2, \
+ .enable_mask = MAX77686_OPMODE_MASK, \
+}
+#define regulator_desc_buck1(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_BUCK_MINUV, \
+ .uV_step = MAX77686_BUCK_UVSTEP, \
+ .ramp_delay = MAX77686_RAMP_DELAY, \
+ .n_voltages = MAX77686_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK1OUT, \
+ .vsel_mask = MAX77686_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK1CTRL, \
+ .enable_mask = MAX77686_OPMODE_MASK, \
+}
+#define regulator_desc_buck_dvs(num) { \
+ .name = "BUCK"#num, \
+ .id = MAX77686_BUCK##num, \
+ .ops = &max77686_buck_dvs_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = MAX77686_DVS_MINUV, \
+ .uV_step = MAX77686_DVS_UVSTEP, \
+ .ramp_delay = MAX77686_DVS_RAMP_DELAY, \
+ .n_voltages = MAX77686_DVS_VSEL_MASK + 1, \
+ .vsel_reg = MAX77686_REG_BUCK2DVS1 + (num - 2) * 10, \
+ .vsel_mask = MAX77686_DVS_VSEL_MASK, \
+ .enable_reg = MAX77686_REG_BUCK2CTRL1 + (num - 2) * 10, \
+ .enable_mask = MAX77686_OPMODE_MASK \
+ << MAX77686_OPMODE_BUCK234_SHIFT, \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo_low(1),
+ regulator_desc_ldo_low(2),
+ regulator_desc_ldo(3),
+ regulator_desc_ldo(4),
+ regulator_desc_ldo(5),
+ regulator_desc_ldo_low(6),
+ regulator_desc_ldo_low(7),
+ regulator_desc_ldo_low(8),
+ regulator_desc_ldo(9),
+ regulator_desc_ldo(10),
+ regulator_desc_ldo(11),
+ regulator_desc_ldo(12),
+ regulator_desc_ldo(13),
+ regulator_desc_ldo(14),
+ regulator_desc_ldo_low(15),
+ regulator_desc_ldo(16),
+ regulator_desc_ldo(17),
+ regulator_desc_ldo(18),
+ regulator_desc_ldo(19),
+ regulator_desc_ldo(20),
+ regulator_desc_ldo(21),
+ regulator_desc_ldo(22),
+ regulator_desc_ldo(23),
+ regulator_desc_ldo(24),
+ regulator_desc_ldo(25),
+ regulator_desc_ldo(26),
+ regulator_desc_buck1(1),
+ regulator_desc_buck_dvs(2),
+ regulator_desc_buck_dvs(3),
+ regulator_desc_buck_dvs(4),
+ regulator_desc_buck(5),
+ regulator_desc_buck(6),
+ regulator_desc_buck(7),
+ regulator_desc_buck(8),
+ regulator_desc_buck(9),
+};
+
+#ifdef CONFIG_OF
+static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+ struct max77686_platform_data *pdata)
+{
+ struct device_node *pmic_np, *regulators_np;
+ struct max77686_regulator_data *rdata;
+ struct of_regulator_match rmatch;
+ unsigned int i;
+
+ pmic_np = iodev->dev->of_node;
+ regulators_np = of_find_node_by_name(pmic_np, "voltage-regulators");
+ if (!regulators_np) {
+ dev_err(iodev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
+ }
+
+ pdata->num_regulators = ARRAY_SIZE(regulators);
+ rdata = devm_kzalloc(iodev->dev, sizeof(*rdata) *
+ pdata->num_regulators, GFP_KERNEL);
+ if (!rdata) {
+ dev_err(iodev->dev,
+ "could not allocate memory for regulator data\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < pdata->num_regulators; i++) {
+ rmatch.name = regulators[i].name;
+ rmatch.init_data = NULL;
+ rmatch.of_node = NULL;
+ of_regulator_match(iodev->dev, regulators_np, &rmatch, 1);
+ rdata[i].initdata = rmatch.init_data;
+ }
+
+ pdata->regulators = rdata;
+
+ return 0;
+}
+#else
+static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev,
+ struct max77686_platform_data *pdata)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static __devinit int max77686_pmic_probe(struct platform_device *pdev)
+{
+ struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_dev **rdev;
+ struct max77686_data *max77686;
+ int i, size;
+ int ret = 0;
+ struct regulator_config config = { };
+
+ dev_dbg(&pdev->dev, "%s\n", __func__);
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data found for regulator\n");
+ return -ENODEV;
+ }
+
+ if (iodev->dev->of_node) {
+ ret = max77686_pmic_dt_parse_pdata(iodev, pdata);
+ if (ret)
+ return ret;
+ }
+
+ if (pdata->num_regulators != MAX77686_REGULATORS) {
+ dev_err(&pdev->dev,
+ "Invalid initial data for regulator's initialiation\n");
+ return -EINVAL;
+ }
+
+ max77686 = devm_kzalloc(&pdev->dev, sizeof(struct max77686_data),
+ GFP_KERNEL);
+ if (!max77686)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * MAX77686_REGULATORS;
+ max77686->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!max77686->rdev)
+ return -ENOMEM;
+
+ rdev = max77686->rdev;
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap;
+ platform_set_drvdata(pdev, max77686);
+
+ for (i = 0; i < MAX77686_REGULATORS; i++) {
+ config.init_data = pdata->regulators[i].initdata;
+
+ rdev[i] = regulator_register(&regulators[i], &config);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&pdev->dev,
+ "regulator init failed for %d\n", i);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ while (--i >= 0)
+ regulator_unregister(rdev[i]);
+ return ret;
+}
+
+static int __devexit max77686_pmic_remove(struct platform_device *pdev)
+{
+ struct max77686_data *max77686 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = max77686->rdev;
+ int i;
+
+ for (i = 0; i < MAX77686_REGULATORS; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return 0;
+}
+
+static const struct platform_device_id max77686_pmic_id[] = {
+ {"max77686-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, max77686_pmic_id);
+
+static struct platform_driver max77686_pmic_driver = {
+ .driver = {
+ .name = "max77686-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max77686_pmic_probe,
+ .remove = __devexit_p(max77686_pmic_remove),
+ .id_table = max77686_pmic_id,
+};
+
+static int __init max77686_pmic_init(void)
+{
+ return platform_driver_register(&max77686_pmic_driver);
+}
+subsys_initcall(max77686_pmic_init);
+
+static void __exit max77686_pmic_cleanup(void)
+{
+ platform_driver_unregister(&max77686_pmic_driver);
+}
+module_exit(max77686_pmic_cleanup);
+
+MODULE_DESCRIPTION("MAXIM 77686 Regulator Driver");
+MODULE_AUTHOR("Chiwoong Byun <woong.byun@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 910c9b26d499..355ca7bad9d5 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -51,7 +51,6 @@ struct max8952_data {
bool vid0;
bool vid1;
- bool en;
};
static int max8952_read_reg(struct max8952_data *max8952, u8 reg)
@@ -80,38 +79,6 @@ static int max8952_list_voltage(struct regulator_dev *rdev,
return (max8952->pdata->dvs_mode[selector] * 10 + 770) * 1000;
}
-static int max8952_is_enabled(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
- return max8952->en;
-}
-
-static int max8952_enable(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
-
- /* If not valid, assume "ALWAYS_HIGH" */
- if (gpio_is_valid(max8952->pdata->gpio_en))
- gpio_set_value(max8952->pdata->gpio_en, 1);
-
- max8952->en = true;
- return 0;
-}
-
-static int max8952_disable(struct regulator_dev *rdev)
-{
- struct max8952_data *max8952 = rdev_get_drvdata(rdev);
-
- /* If not valid, assume "ALWAYS_HIGH" -> not permitted */
- if (gpio_is_valid(max8952->pdata->gpio_en))
- gpio_set_value(max8952->pdata->gpio_en, 0);
- else
- return -EPERM;
-
- max8952->en = false;
- return 0;
-}
-
static int max8952_get_voltage_sel(struct regulator_dev *rdev)
{
struct max8952_data *max8952 = rdev_get_drvdata(rdev);
@@ -146,12 +113,8 @@ static int max8952_set_voltage_sel(struct regulator_dev *rdev,
static struct regulator_ops max8952_ops = {
.list_voltage = max8952_list_voltage,
- .is_enabled = max8952_is_enabled,
- .enable = max8952_enable,
- .disable = max8952_disable,
.get_voltage_sel = max8952_get_voltage_sel,
.set_voltage_sel = max8952_set_voltage_sel,
- .set_suspend_disable = max8952_disable,
};
static const struct regulator_desc regulator = {
@@ -194,6 +157,10 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
config.init_data = &pdata->reg_data;
config.driver_data = max8952;
+ config.ena_gpio = pdata->gpio_en;
+ if (pdata->reg_data.constraints.boot_on)
+ config.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH;
+
max8952->rdev = regulator_register(&regulator, &config);
if (IS_ERR(max8952->rdev)) {
@@ -202,27 +169,9 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
return ret;
}
- max8952->en = !!(pdata->reg_data.constraints.boot_on);
max8952->vid0 = pdata->default_mode & 0x1;
max8952->vid1 = (pdata->default_mode >> 1) & 0x1;
- if (gpio_is_valid(pdata->gpio_en)) {
- if (!gpio_request(pdata->gpio_en, "MAX8952 EN"))
- gpio_direction_output(pdata->gpio_en, max8952->en);
- else
- err = 1;
- } else
- err = 2;
-
- if (err) {
- dev_info(max8952->dev, "EN gpio invalid: assume that EN"
- "is always High\n");
- max8952->en = 1;
- pdata->gpio_en = -1; /* Mark invalid */
- }
-
- err = 0;
-
if (gpio_is_valid(pdata->gpio_vid0) &&
gpio_is_valid(pdata->gpio_vid1)) {
if (!gpio_request(pdata->gpio_vid0, "MAX8952 VID0"))
@@ -308,7 +257,6 @@ static int __devexit max8952_pmic_remove(struct i2c_client *client)
gpio_free(pdata->gpio_vid0);
gpio_free(pdata->gpio_vid1);
- gpio_free(pdata->gpio_en);
return 0;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 704cd49ef375..e39a0c7260dc 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1025,7 +1025,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
*/
if (pdata->buck1_gpiodvs || pdata->buck2_gpiodvs ||
pdata->buck5_gpiodvs) {
- bool gpio1set = false, gpio2set = false;
if (!gpio_is_valid(pdata->buck125_gpios[0]) ||
!gpio_is_valid(pdata->buck125_gpios[1]) ||
@@ -1035,40 +1034,20 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
goto err_out;
}
- ret = gpio_request(pdata->buck125_gpios[0],
- "MAX8997 SET1");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET1\n");
- else if (ret)
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[0],
+ "MAX8997 SET1");
+ if (ret)
goto err_out;
- else
- gpio1set = true;
-
- ret = gpio_request(pdata->buck125_gpios[1],
- "MAX8997 SET2");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET2\n");
- else if (ret) {
- if (gpio1set)
- gpio_free(pdata->buck125_gpios[0]);
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[1],
+ "MAX8997 SET2");
+ if (ret)
goto err_out;
- } else
- gpio2set = true;
- ret = gpio_request(pdata->buck125_gpios[2],
+ ret = devm_gpio_request(&pdev->dev, pdata->buck125_gpios[2],
"MAX8997 SET3");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request"
- " on SET3\n");
- else if (ret) {
- if (gpio1set)
- gpio_free(pdata->buck125_gpios[0]);
- if (gpio2set)
- gpio_free(pdata->buck125_gpios[1]);
+ if (ret)
goto err_out;
- }
gpio_direction_output(pdata->buck125_gpios[0],
(max8997->buck125_gpioindex >> 2)
@@ -1079,7 +1058,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
gpio_direction_output(pdata->buck125_gpios[2],
(max8997->buck125_gpioindex >> 0)
& 0x1); /* SET3 */
- ret = 0;
}
/* DVS-GPIO disabled */
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 18bb58b9b96e..5dfa920ff0c8 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -111,27 +111,6 @@ static const struct voltage_map_desc *ldo_voltage_map[] = {
&buck4_voltage_map_desc, /* BUCK4 */
};
-static int max8998_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- const struct voltage_map_desc *desc;
- int ldo = rdev_get_id(rdev);
- int val;
-
- if (ldo >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[ldo];
- if (desc == NULL)
- return -EINVAL;
-
- val = desc->min + desc->step * selector;
- if (val > desc->max)
- return -EINVAL;
-
- return val * 1000;
-}
-
static int max8998_get_enable_register(struct regulator_dev *rdev,
int *reg, int *shift)
{
@@ -297,41 +276,18 @@ static int max8998_get_voltage_sel(struct regulator_dev *rdev)
return val;
}
-static int max8998_set_voltage_ldo(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int max8998_set_voltage_ldo_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct i2c_client *i2c = max8998->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
- const struct voltage_map_desc *desc;
- int ldo = rdev_get_id(rdev);
- int reg, shift = 0, mask, ret, i;
-
- if (ldo >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[ldo];
- if (desc == NULL)
- return -EINVAL;
-
- if (max_vol < desc->min || min_vol > desc->max)
- return -EINVAL;
-
- if (min_vol < desc->min)
- min_vol = desc->min;
-
- i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
-
- if (desc->min + desc->step*i > max_vol)
- return -EINVAL;
-
- *selector = i;
+ int reg, shift = 0, mask, ret;
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
- ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+ ret = max8998_update_reg(i2c, reg, selector<<shift, mask<<shift);
return ret;
}
@@ -347,41 +303,18 @@ static inline void buck2_gpio_set(int gpio, int v)
gpio_set_value(gpio, v & 0x1);
}
-static int max8998_set_voltage_buck(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int max8998_set_voltage_buck_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct max8998_data *max8998 = rdev_get_drvdata(rdev);
struct max8998_platform_data *pdata =
dev_get_platdata(max8998->iodev->dev);
struct i2c_client *i2c = max8998->iodev->i2c;
- int min_vol = min_uV / 1000, max_vol = max_uV / 1000;
- const struct voltage_map_desc *desc;
int buck = rdev_get_id(rdev);
int reg, shift = 0, mask, ret;
- int i, j, previous_sel;
+ int j, previous_sel;
static u8 buck1_last_val;
- if (buck >= ARRAY_SIZE(ldo_voltage_map))
- return -EINVAL;
-
- desc = ldo_voltage_map[buck];
-
- if (desc == NULL)
- return -EINVAL;
-
- if (max_vol < desc->min || min_vol > desc->max)
- return -EINVAL;
-
- if (min_vol < desc->min)
- min_vol = desc->min;
-
- i = DIV_ROUND_UP(min_vol - desc->min, desc->step);
-
- if (desc->min + desc->step*i > max_vol)
- return -EINVAL;
-
- *selector = i;
-
ret = max8998_get_voltage_register(rdev, &reg, &shift, &mask);
if (ret)
return ret;
@@ -390,19 +323,19 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
/* Check if voltage needs to be changed */
/* if previous_voltage equal new voltage, return */
- if (previous_sel == i) {
+ if (previous_sel == selector) {
dev_dbg(max8998->dev, "No voltage change, old:%d, new:%d\n",
- max8998_list_voltage(rdev, previous_sel),
- max8998_list_voltage(rdev, i));
+ regulator_list_voltage_linear(rdev, previous_sel),
+ regulator_list_voltage_linear(rdev, selector));
return ret;
}
switch (buck) {
case MAX8998_BUCK1:
dev_dbg(max8998->dev,
- "BUCK1, i:%d, buck1_vol1:%d, buck1_vol2:%d\n"
+ "BUCK1, selector:%d, buck1_vol1:%d, buck1_vol2:%d\n"
"buck1_vol3:%d, buck1_vol4:%d\n",
- i, max8998->buck1_vol[0], max8998->buck1_vol[1],
+ selector, max8998->buck1_vol[0], max8998->buck1_vol[1],
max8998->buck1_vol[2], max8998->buck1_vol[3]);
if (gpio_is_valid(pdata->buck1_set1) &&
@@ -411,7 +344,7 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck1_vol); j++) {
- if (max8998->buck1_vol[j] == i) {
+ if (max8998->buck1_vol[j] == selector) {
max8998->buck1_idx = j;
buck1_gpio_set(pdata->buck1_set1,
pdata->buck1_set2, j);
@@ -426,11 +359,11 @@ static int max8998_set_voltage_buck(struct regulator_dev *rdev,
max8998->buck1_idx = (buck1_last_val % 2) + 2;
dev_dbg(max8998->dev, "max8998->buck1_idx:%d\n",
max8998->buck1_idx);
- max8998->buck1_vol[max8998->buck1_idx] = i;
+ max8998->buck1_vol[max8998->buck1_idx] = selector;
ret = max8998_get_voltage_register(rdev, &reg,
&shift,
&mask);
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
buck1_gpio_set(pdata->buck1_set1,
pdata->buck1_set2, max8998->buck1_idx);
buck1_last_val++;
@@ -440,20 +373,20 @@ buck1_exit:
gpio_get_value(pdata->buck1_set2));
break;
} else {
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
}
break;
case MAX8998_BUCK2:
dev_dbg(max8998->dev,
- "BUCK2, i:%d buck2_vol1:%d, buck2_vol2:%d\n"
- , i, max8998->buck2_vol[0], max8998->buck2_vol[1]);
+ "BUCK2, selector:%d buck2_vol1:%d, buck2_vol2:%d\n",
+ selector, max8998->buck2_vol[0], max8998->buck2_vol[1]);
if (gpio_is_valid(pdata->buck2_set3)) {
/* check if requested voltage */
/* value is already defined */
for (j = 0; j < ARRAY_SIZE(max8998->buck2_vol); j++) {
- if (max8998->buck2_vol[j] == i) {
+ if (max8998->buck2_vol[j] == selector) {
max8998->buck2_idx = j;
buck2_gpio_set(pdata->buck2_set3, j);
goto buck2_exit;
@@ -465,20 +398,21 @@ buck1_exit:
max8998_get_voltage_register(rdev,
&reg, &shift, &mask);
- ret = max8998_write_reg(i2c, reg, i);
- max8998->buck2_vol[max8998->buck2_idx] = i;
+ ret = max8998_write_reg(i2c, reg, selector);
+ max8998->buck2_vol[max8998->buck2_idx] = selector;
buck2_gpio_set(pdata->buck2_set3, max8998->buck2_idx);
buck2_exit:
dev_dbg(max8998->dev, "%s: SET3:%d\n", i2c->name,
gpio_get_value(pdata->buck2_set3));
} else {
- ret = max8998_write_reg(i2c, reg, i);
+ ret = max8998_write_reg(i2c, reg, selector);
}
break;
case MAX8998_BUCK3:
case MAX8998_BUCK4:
- ret = max8998_update_reg(i2c, reg, i<<shift, mask<<shift);
+ ret = max8998_update_reg(i2c, reg, selector<<shift,
+ mask<<shift);
break;
}
@@ -519,34 +453,30 @@ static int max8998_set_voltage_buck_time_sel(struct regulator_dev *rdev,
}
static struct regulator_ops max8998_ldo_ops = {
- .list_voltage = max8998_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage_sel = max8998_get_voltage_sel,
- .set_voltage = max8998_set_voltage_ldo,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
+ .set_voltage_sel = max8998_set_voltage_ldo_sel,
};
static struct regulator_ops max8998_buck_ops = {
- .list_voltage = max8998_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
.get_voltage_sel = max8998_get_voltage_sel,
- .set_voltage = max8998_set_voltage_buck,
+ .set_voltage_sel = max8998_set_voltage_buck_sel,
.set_voltage_time_sel = max8998_set_voltage_buck_time_sel,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
};
static struct regulator_ops max8998_others_ops = {
.is_enabled = max8998_ldo_is_enabled,
.enable = max8998_ldo_enable,
.disable = max8998_ldo_disable,
- .set_suspend_enable = max8998_ldo_enable,
- .set_suspend_disable = max8998_ldo_disable,
};
static struct regulator_desc regulators[] = {
@@ -860,7 +790,10 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
desc = ldo_voltage_map[id];
if (desc && regulators[index].ops != &max8998_others_ops) {
int count = (desc->max - desc->min) / desc->step + 1;
+
regulators[index].n_voltages = count;
+ regulators[index].min_uV = desc->min * 1000;
+ regulators[index].uV_step = desc->step * 1000;
}
config.dev = max8998->dev;
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 7dcdfa283e93..4932e3449fe1 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -93,78 +93,78 @@
/* Voltage Values */
-static const int mc13783_sw3_val[] = {
+static const unsigned int mc13783_sw3_val[] = {
5000000, 5000000, 5000000, 5500000,
};
-static const int mc13783_vaudio_val[] = {
+static const unsigned int mc13783_vaudio_val[] = {
2775000,
};
-static const int mc13783_viohi_val[] = {
+static const unsigned int mc13783_viohi_val[] = {
2775000,
};
-static const int mc13783_violo_val[] = {
+static const unsigned int mc13783_violo_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int mc13783_vdig_val[] = {
+static const unsigned int mc13783_vdig_val[] = {
1200000, 1300000, 1500000, 1800000,
};
-static const int mc13783_vgen_val[] = {
+static const unsigned int mc13783_vgen_val[] = {
1200000, 1300000, 1500000, 1800000,
1100000, 2000000, 2775000, 2400000,
};
-static const int mc13783_vrfdig_val[] = {
+static const unsigned int mc13783_vrfdig_val[] = {
1200000, 1500000, 1800000, 1875000,
};
-static const int mc13783_vrfref_val[] = {
+static const unsigned int mc13783_vrfref_val[] = {
2475000, 2600000, 2700000, 2775000,
};
-static const int mc13783_vrfcp_val[] = {
+static const unsigned int mc13783_vrfcp_val[] = {
2700000, 2775000,
};
-static const int mc13783_vsim_val[] = {
+static const unsigned int mc13783_vsim_val[] = {
1800000, 2900000, 3000000,
};
-static const int mc13783_vesim_val[] = {
+static const unsigned int mc13783_vesim_val[] = {
1800000, 2900000,
};
-static const int mc13783_vcam_val[] = {
+static const unsigned int mc13783_vcam_val[] = {
1500000, 1800000, 2500000, 2550000,
2600000, 2750000, 2800000, 3000000,
};
-static const int mc13783_vrfbg_val[] = {
+static const unsigned int mc13783_vrfbg_val[] = {
1250000,
};
-static const int mc13783_vvib_val[] = {
+static const unsigned int mc13783_vvib_val[] = {
1300000, 1800000, 2000000, 3000000,
};
-static const int mc13783_vmmc_val[] = {
+static const unsigned int mc13783_vmmc_val[] = {
1600000, 1800000, 2000000, 2600000,
2700000, 2800000, 2900000, 3000000,
};
-static const int mc13783_vrf_val[] = {
+static const unsigned int mc13783_vrf_val[] = {
1500000, 1875000, 2700000, 2775000,
};
-static const int mc13783_gpo_val[] = {
+static const unsigned int mc13783_gpo_val[] = {
3100000,
};
-static const int mc13783_pwgtdrv_val[] = {
+static const unsigned int mc13783_pwgtdrv_val[] = {
5500000,
};
@@ -328,7 +328,7 @@ static struct regulator_ops mc13783_gpo_regulator_ops = {
.enable = mc13783_gpo_regulator_enable,
.disable = mc13783_gpo_regulator_disable,
.is_enabled = mc13783_gpo_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 970a233dbe46..b388b746452e 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -150,12 +150,12 @@
#define MC13892_USB1 50
#define MC13892_USB1_VUSBEN (1<<3)
-static const int mc13892_vcoincell[] = {
+static const unsigned int mc13892_vcoincell[] = {
2500000, 2700000, 2800000, 2900000, 3000000, 3100000,
3200000, 3300000,
};
-static const int mc13892_sw1[] = {
+static const unsigned int mc13892_sw1[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
@@ -164,7 +164,7 @@ static const int mc13892_sw1[] = {
1350000, 1375000
};
-static const int mc13892_sw[] = {
+static const unsigned int mc13892_sw[] = {
600000, 625000, 650000, 675000, 700000, 725000,
750000, 775000, 800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000, 1000000, 1025000,
@@ -176,65 +176,65 @@ static const int mc13892_sw[] = {
1800000, 1825000, 1850000, 1875000
};
-static const int mc13892_swbst[] = {
+static const unsigned int mc13892_swbst[] = {
5000000,
};
-static const int mc13892_viohi[] = {
+static const unsigned int mc13892_viohi[] = {
2775000,
};
-static const int mc13892_vpll[] = {
+static const unsigned int mc13892_vpll[] = {
1050000, 1250000, 1650000, 1800000,
};
-static const int mc13892_vdig[] = {
+static const unsigned int mc13892_vdig[] = {
1050000, 1250000, 1650000, 1800000,
};
-static const int mc13892_vsd[] = {
+static const unsigned int mc13892_vsd[] = {
1800000, 2000000, 2600000, 2700000,
2800000, 2900000, 3000000, 3150000,
};
-static const int mc13892_vusb2[] = {
+static const unsigned int mc13892_vusb2[] = {
2400000, 2600000, 2700000, 2775000,
};
-static const int mc13892_vvideo[] = {
+static const unsigned int mc13892_vvideo[] = {
2700000, 2775000, 2500000, 2600000,
};
-static const int mc13892_vaudio[] = {
+static const unsigned int mc13892_vaudio[] = {
2300000, 2500000, 2775000, 3000000,
};
-static const int mc13892_vcam[] = {
+static const unsigned int mc13892_vcam[] = {
2500000, 2600000, 2750000, 3000000,
};
-static const int mc13892_vgen1[] = {
+static const unsigned int mc13892_vgen1[] = {
1200000, 1500000, 2775000, 3150000,
};
-static const int mc13892_vgen2[] = {
+static const unsigned int mc13892_vgen2[] = {
1200000, 1500000, 1600000, 1800000,
2700000, 2800000, 3000000, 3150000,
};
-static const int mc13892_vgen3[] = {
+static const unsigned int mc13892_vgen3[] = {
1800000, 2900000,
};
-static const int mc13892_vusb[] = {
+static const unsigned int mc13892_vusb[] = {
3300000,
};
-static const int mc13892_gpo[] = {
+static const unsigned int mc13892_gpo[] = {
2750000,
};
-static const int mc13892_pwgtdrv[] = {
+static const unsigned int mc13892_pwgtdrv[] = {
5000000,
};
@@ -394,7 +394,7 @@ static struct regulator_ops mc13892_gpo_regulator_ops = {
.enable = mc13892_gpo_regulator_enable,
.disable = mc13892_gpo_regulator_disable,
.is_enabled = mc13892_gpo_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
@@ -436,7 +436,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev,
u32 valread;
int ret;
- value = mc13892_regulators[id].voltages[selector];
+ value = rdev->desc->volt_table[selector];
mc13xxx_lock(priv->mc13xxx);
ret = mc13xxx_reg_read(priv->mc13xxx,
@@ -469,8 +469,7 @@ err:
}
static struct regulator_ops mc13892_sw_regulator_ops = {
- .is_enabled = mc13xxx_sw_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = mc13892_sw_regulator_set_voltage_sel,
.get_voltage = mc13892_sw_regulator_get_voltage,
};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 4fa9704739bc..d6eda28ca5d0 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -80,20 +80,6 @@ static int mc13xxx_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13xxx_regulators[id].enable_bit) != 0;
}
-int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- int id = rdev_get_id(rdev);
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
-
- if (selector >= mc13xxx_regulators[id].desc.n_voltages)
- return -EINVAL;
-
- return mc13xxx_regulators[id].voltages[selector];
-}
-EXPORT_SYMBOL_GPL(mc13xxx_regulator_list_voltage);
-
static int mc13xxx_regulator_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -135,14 +121,14 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
- return mc13xxx_regulators[id].voltages[val];
+ return rdev->desc->volt_table[val];
}
struct regulator_ops mc13xxx_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = mc13xxx_regulator_set_voltage_sel,
.get_voltage = mc13xxx_regulator_get_voltage,
};
@@ -151,15 +137,13 @@ EXPORT_SYMBOL_GPL(mc13xxx_regulator_ops);
int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV,
int max_uV, unsigned *selector)
{
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
__func__, id, min_uV, max_uV);
- if (min_uV >= mc13xxx_regulators[id].voltages[0] &&
- max_uV <= mc13xxx_regulators[id].voltages[0])
+ if (min_uV <= rdev->desc->volt_table[0] &&
+ rdev->desc->volt_table[0] <= max_uV)
return 0;
else
return -EINVAL;
@@ -168,13 +152,11 @@ EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage);
int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev)
{
- struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
- struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators;
int id = rdev_get_id(rdev);
dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
- return mc13xxx_regulators[id].voltages[0];
+ return rdev->desc->volt_table[0];
}
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage);
@@ -182,18 +164,12 @@ struct regulator_ops mc13xxx_fixed_regulator_ops = {
.enable = mc13xxx_regulator_enable,
.disable = mc13xxx_regulator_disable,
.is_enabled = mc13xxx_regulator_is_enabled,
- .list_voltage = mc13xxx_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage = mc13xxx_fixed_regulator_set_voltage,
.get_voltage = mc13xxx_fixed_regulator_get_voltage,
};
EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops);
-int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
-{
- return 1;
-}
-EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
-
#ifdef CONFIG_OF
int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
{
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 044aba4d28ec..eaff5510b6df 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -22,7 +22,6 @@ struct mc13xxx_regulator {
int vsel_shift;
int vsel_mask;
int hi_bit;
- int const *voltages;
};
struct mc13xxx_regulator_priv {
@@ -33,10 +32,6 @@ struct mc13xxx_regulator_priv {
struct regulator_dev *regulators[];
};
-extern int mc13xxx_sw_regulator(struct regulator_dev *rdev);
-extern int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev);
-extern int mc13xxx_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector);
extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector);
extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
@@ -68,6 +63,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -78,7 +74,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.vsel_reg = prefix ## _vsel_reg, \
.vsel_shift = prefix ## _vsel_reg ## _ ## _name ## VSEL,\
.vsel_mask = prefix ## _vsel_reg ## _ ## _name ## VSEL_M,\
- .voltages = _voltages, \
}
#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
@@ -86,6 +81,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -93,7 +89,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
}, \
.reg = prefix ## _reg, \
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
}
#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
@@ -101,6 +96,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
.desc = { \
.name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
+ .volt_table = _voltages, \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
.id = prefix ## _name, \
@@ -108,7 +104,6 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
}, \
.reg = prefix ## _reg, \
.enable_bit = prefix ## _reg ## _ ## _name ## EN, \
- .voltages = _voltages, \
}
#define MC13xxx_DEFINE_SW(_name, _reg, _vsel_reg, _voltages, ops) \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 56593b75168a..3e4106f2bda9 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -20,7 +20,7 @@ static void of_get_regulation_constraints(struct device_node *np,
struct regulator_init_data **init_data)
{
const __be32 *min_uV, *max_uV, *uV_offset;
- const __be32 *min_uA, *max_uA;
+ const __be32 *min_uA, *max_uA, *ramp_delay;
struct regulation_constraints *constraints = &(*init_data)->constraints;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -60,6 +60,10 @@ static void of_get_regulation_constraints(struct device_node *np,
constraints->always_on = true;
else /* status change should be possible if not always on. */
constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+
+ ramp_delay = of_get_property(np, "regulator-ramp-delay", NULL);
+ if (ramp_delay)
+ constraints->ramp_delay = be32_to_cpu(*ramp_delay);
}
/**
@@ -88,15 +92,17 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
/**
- * of_regulator_match - extract regulator init data
+ * of_regulator_match - extract regulator init data when node
+ * property "regulator-compatible" matches with the regulator name.
* @dev: device requesting the data
* @node: parent device node of the regulators
* @matches: match table for the regulators
* @num_matches: number of entries in match table
*
* This function uses a match table specified by the regulator driver and
- * looks up the corresponding init data in the device tree. Note that the
- * match table is modified in place.
+ * looks up the corresponding init data in the device tree if
+ * regulator-compatible matches. Note that the match table is modified
+ * in place.
*
* Returns the number of matches found or a negative error code on failure.
*/
@@ -106,27 +112,40 @@ int of_regulator_match(struct device *dev, struct device_node *node,
{
unsigned int count = 0;
unsigned int i;
+ const char *regulator_comp;
+ struct device_node *child;
if (!dev || !node)
return -EINVAL;
- for (i = 0; i < num_matches; i++) {
- struct of_regulator_match *match = &matches[i];
- struct device_node *child;
-
- child = of_find_node_by_name(node, match->name);
- if (!child)
- continue;
-
- match->init_data = of_get_regulator_init_data(dev, child);
- if (!match->init_data) {
- dev_err(dev, "failed to parse DT for regulator %s\n",
+ for_each_child_of_node(node, child) {
+ regulator_comp = of_get_property(child,
+ "regulator-compatible", NULL);
+ if (!regulator_comp) {
+ dev_err(dev, "regulator-compatible is missing for node %s\n",
child->name);
- return -EINVAL;
+ continue;
+ }
+ for (i = 0; i < num_matches; i++) {
+ struct of_regulator_match *match = &matches[i];
+ if (match->of_node)
+ continue;
+
+ if (strcmp(match->name, regulator_comp))
+ continue;
+
+ match->init_data =
+ of_get_regulator_init_data(dev, child);
+ if (!match->init_data) {
+ dev_err(dev,
+ "failed to parse DT for regulator %s\n",
+ child->name);
+ return -EINVAL;
+ }
+ match->of_node = child;
+ count++;
+ break;
}
-
- match->of_node = child;
- count++;
}
return count;
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index 795f75a6ac33..17d19fbbc490 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -257,8 +257,7 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
unsigned int reg;
palmas_smps_read(pmic->palmas, palmas_regs_info[id].ctrl_addr, &reg);
- reg &= ~PALMAS_SMPS12_CTRL_STATUS_MASK;
- reg >>= PALMAS_SMPS12_CTRL_STATUS_SHIFT;
+ reg &= ~PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
switch (mode) {
case REGULATOR_MODE_NORMAL:
@@ -374,11 +373,22 @@ static int palmas_set_voltage_smps_sel(struct regulator_dev *dev,
static int palmas_map_voltage_smps(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
+ struct palmas_pmic *pmic = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
int ret, voltage;
- ret = ((min_uV - 500000) / 10000) + 1;
- if (ret < 0)
- return ret;
+ if (min_uV == 0)
+ return 0;
+
+ if (pmic->range[id]) { /* RANGE is x2 */
+ if (min_uV < 1000000)
+ min_uV = 1000000;
+ ret = DIV_ROUND_UP(min_uV - 1000000, 20000) + 1;
+ } else { /* RANGE is x1 */
+ if (min_uV < 500000)
+ min_uV = 500000;
+ ret = DIV_ROUND_UP(min_uV - 500000, 10000) + 1;
+ }
/* Map back into a voltage to verify we're still in bounds */
voltage = palmas_list_voltage_smps(rdev, ret);
@@ -400,19 +410,14 @@ static struct regulator_ops palmas_ops_smps = {
.map_voltage = palmas_map_voltage_smps,
};
-static int palmas_list_voltage_smps10(struct regulator_dev *dev,
- unsigned selector)
-{
- return 3750000 + (selector * 1250000);
-}
-
static struct regulator_ops palmas_ops_smps10 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = palmas_list_voltage_smps10,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
};
static int palmas_is_enabled_ldo(struct regulator_dev *dev)
@@ -522,7 +527,15 @@ static int palmas_smps_init(struct palmas *palmas, int id,
if (ret)
return ret;
- if (id != PALMAS_REG_SMPS10) {
+ switch (id) {
+ case PALMAS_REG_SMPS10:
+ if (reg_init->mode_sleep) {
+ reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
+ reg |= reg_init->mode_sleep <<
+ PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
+ }
+ break;
+ default:
if (reg_init->warm_reset)
reg |= PALMAS_SMPS12_CTRL_WR_S;
@@ -534,14 +547,8 @@ static int palmas_smps_init(struct palmas *palmas, int id,
reg |= reg_init->mode_sleep <<
PALMAS_SMPS12_CTRL_MODE_SLEEP_SHIFT;
}
- } else {
- if (reg_init->mode_sleep) {
- reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
- reg |= reg_init->mode_sleep <<
- PALMAS_SMPS10_CTRL_MODE_SLEEP_SHIFT;
- }
-
}
+
ret = palmas_smps_write(palmas, addr, reg);
if (ret)
return ret;
@@ -665,10 +672,8 @@ static __devinit int palmas_probe(struct platform_device *pdev)
pmic->desc[id].name = palmas_regs_info[id].name;
pmic->desc[id].id = id;
- if (id != PALMAS_REG_SMPS10) {
- pmic->desc[id].ops = &palmas_ops_smps;
- pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
- } else {
+ switch (id) {
+ case PALMAS_REG_SMPS10:
pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
pmic->desc[id].ops = &palmas_ops_smps10;
pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL;
@@ -677,6 +682,12 @@ static __devinit int palmas_probe(struct platform_device *pdev)
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
PALMAS_SMPS10_STATUS);
pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+ pmic->desc[id].min_uV = 3750000;
+ pmic->desc[id].uV_step = 1250000;
+ break;
+ default:
+ pmic->desc[id].ops = &palmas_ops_smps;
+ pmic->desc[id].n_voltages = PALMAS_SMPS_NUM_VOLTAGES;
}
pmic->desc[id].type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 8211101121f0..68777acc099f 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -18,80 +18,80 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/ezx-pcap.h>
-static const u16 V1_table[] = {
- 2775, 1275, 1600, 1725, 1825, 1925, 2075, 2275,
+static const unsigned int V1_table[] = {
+ 2775000, 1275000, 1600000, 1725000, 1825000, 1925000, 2075000, 2275000,
};
-static const u16 V2_table[] = {
- 2500, 2775,
+static const unsigned int V2_table[] = {
+ 2500000, 2775000,
};
-static const u16 V3_table[] = {
- 1075, 1275, 1550, 1725, 1876, 1950, 2075, 2275,
+static const unsigned int V3_table[] = {
+ 1075000, 1275000, 1550000, 1725000, 1876000, 1950000, 2075000, 2275000,
};
-static const u16 V4_table[] = {
- 1275, 1550, 1725, 1875, 1950, 2075, 2275, 2775,
+static const unsigned int V4_table[] = {
+ 1275000, 1550000, 1725000, 1875000, 1950000, 2075000, 2275000, 2775000,
};
-static const u16 V5_table[] = {
- 1875, 2275, 2475, 2775,
+static const unsigned int V5_table[] = {
+ 1875000, 2275000, 2475000, 2775000,
};
-static const u16 V6_table[] = {
- 2475, 2775,
+static const unsigned int V6_table[] = {
+ 2475000, 2775000,
};
-static const u16 V7_table[] = {
- 1875, 2775,
+static const unsigned int V7_table[] = {
+ 1875000, 2775000,
};
#define V8_table V4_table
-static const u16 V9_table[] = {
- 1575, 1875, 2475, 2775,
+static const unsigned int V9_table[] = {
+ 1575000, 1875000, 2475000, 2775000,
};
-static const u16 V10_table[] = {
- 5000,
+static const unsigned int V10_table[] = {
+ 5000000,
};
-static const u16 VAUX1_table[] = {
- 1875, 2475, 2775, 3000,
+static const unsigned int VAUX1_table[] = {
+ 1875000, 2475000, 2775000, 3000000,
};
#define VAUX2_table VAUX1_table
-static const u16 VAUX3_table[] = {
- 1200, 1200, 1200, 1200, 1400, 1600, 1800, 2000,
- 2200, 2400, 2600, 2800, 3000, 3200, 3400, 3600,
+static const unsigned int VAUX3_table[] = {
+ 1200000, 1200000, 1200000, 1200000, 1400000, 1600000, 1800000, 2000000,
+ 2200000, 2400000, 2600000, 2800000, 3000000, 3200000, 3400000, 3600000,
};
-static const u16 VAUX4_table[] = {
- 1800, 1800, 3000, 5000,
+static const unsigned int VAUX4_table[] = {
+ 1800000, 1800000, 3000000, 5000000,
};
-static const u16 VSIM_table[] = {
- 1875, 3000,
+static const unsigned int VSIM_table[] = {
+ 1875000, 3000000,
};
-static const u16 VSIM2_table[] = {
- 1875,
+static const unsigned int VSIM2_table[] = {
+ 1875000,
};
-static const u16 VVIB_table[] = {
- 1300, 1800, 2000, 3000,
+static const unsigned int VVIB_table[] = {
+ 1300000, 1800000, 2000000, 3000000,
};
-static const u16 SW1_table[] = {
- 900, 950, 1000, 1050, 1100, 1150, 1200, 1250,
- 1300, 1350, 1400, 1450, 1500, 1600, 1875, 2250,
+static const unsigned int SW1_table[] = {
+ 900000, 950000, 1000000, 1050000, 1100000, 1150000, 1200000, 1250000,
+ 1300000, 1350000, 1400000, 1450000, 1500000, 1600000, 1875000, 2250000,
};
#define SW2_table SW1_table
-static const u16 SW3_table[] = {
- 4000, 4500, 5000, 5500,
+static const unsigned int SW3_table[] = {
+ 4000000, 4500000, 5000000, 5500000,
};
struct pcap_regulator {
@@ -100,8 +100,6 @@ struct pcap_regulator {
const u8 index;
const u8 stby;
const u8 lowpwr;
- const u8 n_voltages;
- const u16 *voltage_table;
};
#define NA 0xff
@@ -113,8 +111,6 @@ struct pcap_regulator {
.index = _index, \
.stby = _stby, \
.lowpwr = _lowpwr, \
- .n_voltages = ARRAY_SIZE(_vreg##_table), \
- .voltage_table = _vreg##_table, \
}
static struct pcap_regulator vreg_table[] = {
@@ -157,11 +153,11 @@ static int pcap_regulator_set_voltage_sel(struct regulator_dev *rdev,
void *pcap = rdev_get_drvdata(rdev);
/* the regulator doesn't support voltage switching */
- if (vreg->n_voltages == 1)
+ if (rdev->desc->n_voltages == 1)
return -EINVAL;
return ezx_pcap_set_bits(pcap, vreg->reg,
- (vreg->n_voltages - 1) << vreg->index,
+ (rdev->desc->n_voltages - 1) << vreg->index,
selector << vreg->index);
}
@@ -171,11 +167,11 @@ static int pcap_regulator_get_voltage_sel(struct regulator_dev *rdev)
void *pcap = rdev_get_drvdata(rdev);
u32 tmp;
- if (vreg->n_voltages == 1)
+ if (rdev->desc->n_voltages == 1)
return 0;
ezx_pcap_read(pcap, vreg->reg, &tmp);
- tmp = ((tmp >> vreg->index) & (vreg->n_voltages - 1));
+ tmp = ((tmp >> vreg->index) & (rdev->desc->n_voltages - 1));
return tmp;
}
@@ -214,16 +210,8 @@ static int pcap_regulator_is_enabled(struct regulator_dev *rdev)
return (tmp >> vreg->en) & 1;
}
-static int pcap_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned int index)
-{
- struct pcap_regulator *vreg = &vreg_table[rdev_get_id(rdev)];
-
- return vreg->voltage_table[index] * 1000;
-}
-
static struct regulator_ops pcap_regulator_ops = {
- .list_voltage = pcap_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_voltage_sel = pcap_regulator_set_voltage_sel,
.get_voltage_sel = pcap_regulator_get_voltage_sel,
.enable = pcap_regulator_enable,
@@ -236,6 +224,7 @@ static struct regulator_ops pcap_regulator_ops = {
.name = #_vreg, \
.id = _vreg, \
.n_voltages = ARRAY_SIZE(_vreg##_table), \
+ .volt_table = _vreg##_table, \
.ops = &pcap_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 3c9d14c0017b..092e5cb848a1 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -100,13 +100,12 @@ static unsigned int ldo_voltage_value(u8 bits)
return 900 + (bits * 100);
}
-static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
+static int pcf50633_regulator_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct pcf50633 *pcf;
int regulator_id, millivolts;
- u8 volt_bits, regnr;
+ u8 volt_bits;
pcf = rdev_get_drvdata(rdev);
@@ -116,15 +115,11 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
millivolts = min_uV / 1000;
- regnr = rdev->desc->vsel_reg;
-
switch (regulator_id) {
case PCF50633_REGULATOR_AUTO:
volt_bits = auto_voltage_bits(millivolts);
break;
case PCF50633_REGULATOR_DOWN1:
- volt_bits = down_voltage_bits(millivolts);
- break;
case PCF50633_REGULATOR_DOWN2:
volt_bits = down_voltage_bits(millivolts);
break;
@@ -142,9 +137,7 @@ static int pcf50633_regulator_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
- *selector = volt_bits;
-
- return pcf50633_reg_write(pcf, regnr, volt_bits);
+ return volt_bits;
}
static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
@@ -159,8 +152,6 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
millivolts = auto_voltage_value(index);
break;
case PCF50633_REGULATOR_DOWN1:
- millivolts = down_voltage_value(index);
- break;
case PCF50633_REGULATOR_DOWN2:
millivolts = down_voltage_value(index);
break;
@@ -182,9 +173,10 @@ static int pcf50633_regulator_list_voltage(struct regulator_dev *rdev,
}
static struct regulator_ops pcf50633_regulator_ops = {
- .set_voltage = pcf50633_regulator_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = pcf50633_regulator_list_voltage,
+ .map_voltage = pcf50633_regulator_map_voltage,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
diff --git a/drivers/regulator/rc5t583-regulator.c b/drivers/regulator/rc5t583-regulator.c
index 1d34e64a1307..8bf4e8c9de9a 100644
--- a/drivers/regulator/rc5t583-regulator.c
+++ b/drivers/regulator/rc5t583-regulator.c
@@ -42,7 +42,6 @@ struct rc5t583_regulator_info {
/* Regulator specific turn-on delay and voltage settling time*/
int enable_uv_per_us;
- int change_uv_per_us;
/* Used by regulator core */
struct regulator_desc desc;
@@ -66,25 +65,6 @@ static int rc5t583_regulator_enable_time(struct regulator_dev *rdev)
return DIV_ROUND_UP(curr_uV, reg->reg_info->enable_uv_per_us);
}
-static int rc5t583_set_voltage_time_sel(struct regulator_dev *rdev,
- unsigned int old_selector, unsigned int new_selector)
-{
- struct rc5t583_regulator *reg = rdev_get_drvdata(rdev);
- int old_uV, new_uV;
- old_uV = regulator_list_voltage_linear(rdev, old_selector);
-
- if (old_uV < 0)
- return old_uV;
-
- new_uV = regulator_list_voltage_linear(rdev, new_selector);
- if (new_uV < 0)
- return new_uV;
-
- return DIV_ROUND_UP(abs(old_uV - new_uV),
- reg->reg_info->change_uv_per_us);
-}
-
-
static struct regulator_ops rc5t583_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
@@ -94,7 +74,7 @@ static struct regulator_ops rc5t583_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
- .set_voltage_time_sel = rc5t583_set_voltage_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
};
#define RC5T583_REG(_id, _en_reg, _en_bit, _disc_reg, _disc_bit, \
@@ -104,7 +84,6 @@ static struct regulator_ops rc5t583_ops = {
.disc_bit = _disc_bit, \
.deepsleep_reg = RC5T583_REG_##_id##DAC_DS, \
.enable_uv_per_us = _enable_mv * 1000, \
- .change_uv_per_us = 40 * 1000, \
.deepsleep_id = RC5T583_DS_##_id, \
.desc = { \
.name = "rc5t583-regulator-"#_id, \
@@ -119,6 +98,7 @@ static struct regulator_ops rc5t583_ops = {
.enable_mask = BIT(_en_bit), \
.min_uV = _min_mv * 1000, \
.uV_step = _step_uV, \
+ .ramp_delay = 40 * 1000, \
}, \
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
new file mode 100644
index 000000000000..4669dc9ac74a
--- /dev/null
+++ b/drivers/regulator/s2mps11.c
@@ -0,0 +1,363 @@
+/*
+ * s2mps11.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/samsung/core.h>
+#include <linux/mfd/samsung/s2mps11.h>
+
+struct s2mps11_info {
+ struct regulator_dev **rdev;
+
+ int ramp_delay2;
+ int ramp_delay34;
+ int ramp_delay5;
+ int ramp_delay16;
+ int ramp_delay7810;
+ int ramp_delay9;
+
+ bool buck6_ramp;
+ bool buck2_ramp;
+ bool buck3_ramp;
+ bool buck4_ramp;
+};
+
+static int get_ramp_delay(int ramp_delay)
+{
+ unsigned char cnt = 0;
+
+ ramp_delay /= 6;
+
+ while (true) {
+ ramp_delay = ramp_delay >> 1;
+ if (ramp_delay == 0)
+ break;
+ cnt++;
+ }
+ return cnt;
+}
+
+static struct regulator_ops s2mps11_ldo_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_ops s2mps11_buck_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .is_enabled = regulator_is_enabled_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+#define regulator_desc_ldo1(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS11_LDO##num, \
+ .ops = &s2mps11_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_LDO_MIN, \
+ .uV_step = S2MPS11_LDO_STEP1, \
+ .n_voltages = S2MPS11_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS11_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+#define regulator_desc_ldo2(num) { \
+ .name = "LDO"#num, \
+ .id = S2MPS11_LDO##num, \
+ .ops = &s2mps11_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_LDO_MIN, \
+ .uV_step = S2MPS11_LDO_STEP2, \
+ .n_voltages = S2MPS11_LDO_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .vsel_mask = S2MPS11_LDO_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_L1CTRL + num - 1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck1_4(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS11_BUCK##num, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B1CTRL2 + (num - 1) * 2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B1CTRL1 + (num - 1) * 2, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck5 { \
+ .name = "BUCK5", \
+ .id = S2MPS11_BUCK5, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B5CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B5CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck6_8(num) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS11_BUCK##num, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN1, \
+ .uV_step = S2MPS11_BUCK_STEP1, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B6CTRL2 + (num - 6) * 2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B6CTRL1 + (num - 6) * 2, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck9 { \
+ .name = "BUCK9", \
+ .id = S2MPS11_BUCK9, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN3, \
+ .uV_step = S2MPS11_BUCK_STEP3, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+#define regulator_desc_buck10 { \
+ .name = "BUCK10", \
+ .id = S2MPS11_BUCK10, \
+ .ops = &s2mps11_buck_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = S2MPS11_BUCK_MIN2, \
+ .uV_step = S2MPS11_BUCK_STEP2, \
+ .n_voltages = S2MPS11_BUCK_N_VOLTAGES, \
+ .vsel_reg = S2MPS11_REG_B9CTRL2, \
+ .vsel_mask = S2MPS11_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS11_REG_B9CTRL1, \
+ .enable_mask = S2MPS11_ENABLE_MASK \
+}
+
+static struct regulator_desc regulators[] = {
+ regulator_desc_ldo2(1),
+ regulator_desc_ldo1(2),
+ regulator_desc_ldo1(3),
+ regulator_desc_ldo1(4),
+ regulator_desc_ldo1(5),
+ regulator_desc_ldo2(6),
+ regulator_desc_ldo1(7),
+ regulator_desc_ldo1(8),
+ regulator_desc_ldo1(9),
+ regulator_desc_ldo1(10),
+ regulator_desc_ldo2(11),
+ regulator_desc_ldo1(12),
+ regulator_desc_ldo1(13),
+ regulator_desc_ldo1(14),
+ regulator_desc_ldo1(15),
+ regulator_desc_ldo1(16),
+ regulator_desc_ldo1(17),
+ regulator_desc_ldo1(18),
+ regulator_desc_ldo1(19),
+ regulator_desc_ldo1(20),
+ regulator_desc_ldo1(21),
+ regulator_desc_ldo2(22),
+ regulator_desc_ldo2(23),
+ regulator_desc_ldo1(24),
+ regulator_desc_ldo1(25),
+ regulator_desc_ldo1(26),
+ regulator_desc_ldo2(27),
+ regulator_desc_ldo1(28),
+ regulator_desc_ldo1(29),
+ regulator_desc_ldo1(30),
+ regulator_desc_ldo1(31),
+ regulator_desc_ldo1(32),
+ regulator_desc_ldo1(33),
+ regulator_desc_ldo1(34),
+ regulator_desc_ldo1(35),
+ regulator_desc_ldo1(36),
+ regulator_desc_ldo1(37),
+ regulator_desc_ldo1(38),
+ regulator_desc_buck1_4(1),
+ regulator_desc_buck1_4(2),
+ regulator_desc_buck1_4(3),
+ regulator_desc_buck1_4(4),
+ regulator_desc_buck5,
+ regulator_desc_buck6_8(6),
+ regulator_desc_buck6_8(7),
+ regulator_desc_buck6_8(8),
+ regulator_desc_buck9,
+ regulator_desc_buck10,
+};
+
+static __devinit int s2mps11_pmic_probe(struct platform_device *pdev)
+{
+ struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct regulator_config config = { };
+ struct regulator_dev **rdev;
+ struct s2mps11_info *s2mps11;
+ int i, ret, size;
+ unsigned char ramp_enable, ramp_reg = 0;
+
+ if (!pdata) {
+ dev_err(pdev->dev.parent, "Platform data not supplied\n");
+ return -ENODEV;
+ }
+
+ s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
+ GFP_KERNEL);
+ if (!s2mps11)
+ return -ENOMEM;
+
+ size = sizeof(struct regulator_dev *) * S2MPS11_REGULATOR_MAX;
+ s2mps11->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!s2mps11->rdev) {
+ return -ENOMEM;
+ }
+
+ rdev = s2mps11->rdev;
+ platform_set_drvdata(pdev, s2mps11);
+
+ s2mps11->ramp_delay2 = pdata->buck2_ramp_delay;
+ s2mps11->ramp_delay34 = pdata->buck34_ramp_delay;
+ s2mps11->ramp_delay5 = pdata->buck5_ramp_delay;
+ s2mps11->ramp_delay16 = pdata->buck16_ramp_delay;
+ s2mps11->ramp_delay7810 = pdata->buck7810_ramp_delay;
+ s2mps11->ramp_delay9 = pdata->buck9_ramp_delay;
+
+ s2mps11->buck6_ramp = pdata->buck6_ramp_enable;
+ s2mps11->buck2_ramp = pdata->buck2_ramp_enable;
+ s2mps11->buck3_ramp = pdata->buck3_ramp_enable;
+ s2mps11->buck4_ramp = pdata->buck4_ramp_enable;
+
+ ramp_enable = (s2mps11->buck2_ramp << 3) | (s2mps11->buck3_ramp << 2) |
+ (s2mps11->buck4_ramp << 1) | s2mps11->buck6_ramp ;
+
+ if (ramp_enable) {
+ if (s2mps11->buck2_ramp)
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) >> 6;
+ if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) >> 4;
+ sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
+ }
+
+ ramp_reg &= 0x00;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) >> 6;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) >> 4;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) >> 2;
+ ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
+ sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
+
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap;
+ config.init_data = pdata->regulators[i].initdata;
+ config.driver_data = s2mps11;
+
+ rdev[i] = regulator_register(&regulators[i], &config);
+ if (IS_ERR(rdev[i])) {
+ ret = PTR_ERR(rdev[i]);
+ dev_err(&pdev->dev, "regulator init failed for %d\n",
+ i);
+ rdev[i] = NULL;
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return ret;
+}
+
+static int __devexit s2mps11_pmic_remove(struct platform_device *pdev)
+{
+ struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev);
+ struct regulator_dev **rdev = s2mps11->rdev;
+ int i;
+
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++)
+ if (rdev[i])
+ regulator_unregister(rdev[i]);
+
+ return 0;
+}
+
+static const struct platform_device_id s2mps11_pmic_id[] = {
+ { "s2mps11-pmic", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s2mps11_pmic_id);
+
+static struct platform_driver s2mps11_pmic_driver = {
+ .driver = {
+ .name = "s2mps11-pmic",
+ .owner = THIS_MODULE,
+ },
+ .probe = s2mps11_pmic_probe,
+ .remove = __devexit_p(s2mps11_pmic_remove),
+ .id_table = s2mps11_pmic_id,
+};
+
+static int __init s2mps11_pmic_init(void)
+{
+ return platform_driver_register(&s2mps11_pmic_driver);
+}
+subsys_initcall(s2mps11_pmic_init);
+
+static void __exit s2mps11_pmic_exit(void)
+{
+ platform_driver_unregister(&s2mps11_pmic_driver);
+}
+module_exit(s2mps11_pmic_exit);
+
+/* Module information */
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("SAMSUNG S2MPS11 Regulator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 9caadb482178..102287fa7ecb 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -41,6 +41,7 @@ struct s5m8767_info {
u8 buck3_vol[8];
u8 buck4_vol[8];
int buck_gpios[3];
+ int buck_ds[3];
int buck_gpioindex;
};
@@ -120,27 +121,6 @@ static const struct s5m_voltage_desc *reg_voltage_map[] = {
[S5M8767_BUCK9] = &buck_voltage_val3,
};
-static int s5m8767_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- const struct s5m_voltage_desc *desc;
- int reg_id = rdev_get_id(rdev);
- int val;
-
- if (reg_id >= ARRAY_SIZE(reg_voltage_map) || reg_id < 0)
- return -EINVAL;
-
- desc = reg_voltage_map[reg_id];
- if (desc == NULL)
- return -EINVAL;
-
- val = desc->min + desc->step * selector;
- if (val > desc->max)
- return -EINVAL;
-
- return val;
-}
-
static unsigned int s5m8767_opmode_reg[][4] = {
/* {OFF, ON, LOWPOWER, SUSPEND} */
/* LDO1 ... LDO28 */
@@ -283,17 +263,17 @@ static int s5m8767_get_voltage_register(struct regulator_dev *rdev, int *_reg)
reg = S5M8767_REG_BUCK1CTRL2;
break;
case S5M8767_BUCK2:
- reg = S5M8767_REG_BUCK2DVS1;
+ reg = S5M8767_REG_BUCK2DVS2;
if (s5m8767->buck2_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK3:
- reg = S5M8767_REG_BUCK3DVS1;
+ reg = S5M8767_REG_BUCK3DVS2;
if (s5m8767->buck3_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
case S5M8767_BUCK4:
- reg = S5M8767_REG_BUCK4DVS1;
+ reg = S5M8767_REG_BUCK4DVS2;
if (s5m8767->buck4_gpiodvs)
reg += s5m8767->buck_gpioindex;
break;
@@ -357,32 +337,34 @@ static int s5m8767_convert_voltage_to_sel(
return selector;
}
-static inline void s5m8767_set_high(struct s5m8767_info *s5m8767)
+static inline int s5m8767_set_high(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
+
+ return 0;
}
-static inline void s5m8767_set_low(struct s5m8767_info *s5m8767)
+static inline int s5m8767_set_low(struct s5m8767_info *s5m8767)
{
int temp_index = s5m8767->buck_gpioindex;
gpio_set_value(s5m8767->buck_gpios[2], temp_index & 0x1);
gpio_set_value(s5m8767->buck_gpios[1], (temp_index >> 1) & 0x1);
gpio_set_value(s5m8767->buck_gpios[0], (temp_index >> 2) & 0x1);
+
+ return 0;
}
-static int s5m8767_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int s5m8767_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
{
struct s5m8767_info *s5m8767 = rdev_get_drvdata(rdev);
- const struct s5m_voltage_desc *desc;
int reg_id = rdev_get_id(rdev);
- int sel, reg, mask, ret = 0, old_index, index = 0;
- u8 val;
+ int reg, mask, ret = 0, old_index, index = 0;
u8 *buck234_vol = NULL;
switch (reg_id) {
@@ -407,15 +389,9 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
- desc = reg_voltage_map[reg_id];
-
- sel = s5m8767_convert_voltage_to_sel(desc, min_uV, max_uV);
- if (sel < 0)
- return sel;
-
/* buck234_vol != NULL means to control buck234 voltage via DVS GPIO */
if (buck234_vol) {
- while (*buck234_vol != sel) {
+ while (*buck234_vol != selector) {
buck234_vol++;
index++;
}
@@ -423,22 +399,16 @@ static int s5m8767_set_voltage(struct regulator_dev *rdev,
s5m8767->buck_gpioindex = index;
if (index > old_index)
- s5m8767_set_high(s5m8767);
+ return s5m8767_set_high(s5m8767);
else
- s5m8767_set_low(s5m8767);
+ return s5m8767_set_low(s5m8767);
} else {
ret = s5m8767_get_voltage_register(rdev, &reg);
if (ret)
return ret;
- s5m_reg_read(s5m8767->iodev, reg, &val);
- val = (val & ~mask) | sel;
-
- ret = s5m_reg_write(s5m8767->iodev, reg, val);
+ return s5m_reg_update(s5m8767->iodev, reg, selector, mask);
}
-
- *selector = sel;
- return ret;
}
static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
@@ -458,15 +428,21 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
}
static struct regulator_ops s5m8767_ops = {
- .list_voltage = s5m8767_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
.is_enabled = s5m8767_reg_is_enabled,
.enable = s5m8767_reg_enable,
.disable = s5m8767_reg_disable,
.get_voltage_sel = s5m8767_get_voltage_sel,
- .set_voltage = s5m8767_set_voltage,
+ .set_voltage_sel = s5m8767_set_voltage_sel,
.set_voltage_time_sel = s5m8767_set_voltage_time_sel,
};
+static struct regulator_ops s5m8767_buck78_ops = {
+ .is_enabled = s5m8767_reg_is_enabled,
+ .enable = s5m8767_reg_enable,
+ .disable = s5m8767_reg_disable,
+};
+
#define s5m8767_regulator_desc(_name) { \
.name = #_name, \
.id = S5M8767_##_name, \
@@ -475,6 +451,14 @@ static struct regulator_ops s5m8767_ops = {
.owner = THIS_MODULE, \
}
+#define s5m8767_regulator_buck78_desc(_name) { \
+ .name = #_name, \
+ .id = S5M8767_##_name, \
+ .ops = &s5m8767_buck78_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+}
+
static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(LDO1),
s5m8767_regulator_desc(LDO2),
@@ -510,8 +494,8 @@ static struct regulator_desc regulators[] = {
s5m8767_regulator_desc(BUCK4),
s5m8767_regulator_desc(BUCK5),
s5m8767_regulator_desc(BUCK6),
- s5m8767_regulator_desc(BUCK7),
- s5m8767_regulator_desc(BUCK8),
+ s5m8767_regulator_buck78_desc(BUCK7),
+ s5m8767_regulator_buck78_desc(BUCK8),
s5m8767_regulator_desc(BUCK9),
};
@@ -522,7 +506,7 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct regulator_dev **rdev;
struct s5m8767_info *s5m8767;
- int i, ret, size;
+ int i, ret, size, buck_init;
if (!pdata) {
dev_err(pdev->dev.parent, "Platform data not supplied\n");
@@ -573,12 +557,37 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck_gpios[0] = pdata->buck_gpios[0];
s5m8767->buck_gpios[1] = pdata->buck_gpios[1];
s5m8767->buck_gpios[2] = pdata->buck_gpios[2];
+ s5m8767->buck_ds[0] = pdata->buck_ds[0];
+ s5m8767->buck_ds[1] = pdata->buck_ds[1];
+ s5m8767->buck_ds[2] = pdata->buck_ds[2];
+
s5m8767->ramp_delay = pdata->buck_ramp_delay;
s5m8767->buck2_ramp = pdata->buck2_ramp_enable;
s5m8767->buck3_ramp = pdata->buck3_ramp_enable;
s5m8767->buck4_ramp = pdata->buck4_ramp_enable;
s5m8767->opmode = pdata->opmode;
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck2_init,
+ pdata->buck2_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK2DVS2, buck_init);
+
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck3_init,
+ pdata->buck3_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK3DVS2, buck_init);
+
+ buck_init = s5m8767_convert_voltage_to_sel(&buck_voltage_val2,
+ pdata->buck4_init,
+ pdata->buck4_init +
+ buck_voltage_val2.step);
+
+ s5m_reg_write(s5m8767->iodev, S5M8767_REG_BUCK4DVS2, buck_init);
+
for (i = 0; i < 8; i++) {
if (s5m8767->buck2_gpiodvs) {
s5m8767->buck2_vol[i] =
@@ -608,48 +617,70 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
}
}
- if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
- pdata->buck4_gpiodvs) {
- if (gpio_is_valid(pdata->buck_gpios[0]) &&
- gpio_is_valid(pdata->buck_gpios[1]) &&
- gpio_is_valid(pdata->buck_gpios[2])) {
- ret = gpio_request(pdata->buck_gpios[0],
- "S5M8767 SET1");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET1\n");
-
- ret = gpio_request(pdata->buck_gpios[1],
- "S5M8767 SET2");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET2\n");
-
- ret = gpio_request(pdata->buck_gpios[2],
- "S5M8767 SET3");
- if (ret == -EBUSY)
- dev_warn(&pdev->dev, "Duplicated gpio request for SET3\n");
- /* SET1 GPIO */
- gpio_direction_output(pdata->buck_gpios[0],
- (s5m8767->buck_gpioindex >> 2) & 0x1);
- /* SET2 GPIO */
- gpio_direction_output(pdata->buck_gpios[1],
- (s5m8767->buck_gpioindex >> 1) & 0x1);
- /* SET3 GPIO */
- gpio_direction_output(pdata->buck_gpios[2],
- (s5m8767->buck_gpioindex >> 0) & 0x1);
- ret = 0;
- } else {
- dev_err(&pdev->dev, "GPIO NOT VALID\n");
- ret = -EINVAL;
+ if (gpio_is_valid(pdata->buck_gpios[0]) &&
+ gpio_is_valid(pdata->buck_gpios[1]) &&
+ gpio_is_valid(pdata->buck_gpios[2])) {
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[0],
+ "S5M8767 SET1");
+ if (ret)
return ret;
- }
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[1],
+ "S5M8767 SET2");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_gpios[2],
+ "S5M8767 SET3");
+ if (ret)
+ return ret;
+
+ /* SET1 GPIO */
+ gpio_direction_output(pdata->buck_gpios[0],
+ (s5m8767->buck_gpioindex >> 2) & 0x1);
+ /* SET2 GPIO */
+ gpio_direction_output(pdata->buck_gpios[1],
+ (s5m8767->buck_gpioindex >> 1) & 0x1);
+ /* SET3 GPIO */
+ gpio_direction_output(pdata->buck_gpios[2],
+ (s5m8767->buck_gpioindex >> 0) & 0x1);
+ } else {
+ dev_err(&pdev->dev, "GPIO NOT VALID\n");
+ ret = -EINVAL;
+ return ret;
}
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
- (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
- (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
- (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1), 1 << 1);
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[0], "S5M8767 DS2");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[1], "S5M8767 DS3");
+ if (ret)
+ return ret;
+
+ ret = devm_gpio_request(&pdev->dev, pdata->buck_ds[2], "S5M8767 DS4");
+ if (ret)
+ return ret;
+
+ /* DS2 GPIO */
+ gpio_direction_output(pdata->buck_ds[0], 0x0);
+ /* DS3 GPIO */
+ gpio_direction_output(pdata->buck_ds[1], 0x0);
+ /* DS4 GPIO */
+ gpio_direction_output(pdata->buck_ds[2], 0x0);
+
+ if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
+ pdata->buck4_gpiodvs) {
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL,
+ (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL,
+ (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL,
+ (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1),
+ 1 << 1);
+ }
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
@@ -668,9 +699,6 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
s5m8767->buck4_vol[i]);
}
}
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK2CTRL, 0x78, 0xff);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK3CTRL, 0x58, 0xff);
- s5m_reg_update(s5m8767->iodev, S5M8767_REG_BUCK4CTRL, 0x78, 0xff);
if (s5m8767->buck2_ramp)
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP, 0x08, 0x08);
@@ -684,9 +712,13 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
if (s5m8767->buck2_ramp || s5m8767->buck3_ramp
|| s5m8767->buck4_ramp) {
switch (s5m8767->ramp_delay) {
- case 15:
+ case 5:
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
- 0xc0, 0xf0);
+ 0x40, 0xf0);
+ break;
+ case 10:
+ s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
+ 0x90, 0xf0);
break;
case 25:
s5m_reg_update(s5m8767->iodev, S5M8767_REG_DVSRAMP,
@@ -711,9 +743,12 @@ static __devinit int s5m8767_pmic_probe(struct platform_device *pdev)
int id = pdata->regulators[i].id;
desc = reg_voltage_map[id];
- if (desc)
+ if (desc) {
regulators[id].n_voltages =
(desc->max - desc->min) / desc->step + 1;
+ regulators[id].min_uV = desc->min;
+ regulators[id].uV_step = desc->step;
+ }
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index d840d8440a91..1378409efaec 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -20,7 +20,7 @@
#include <linux/mfd/core.h>
#include <linux/mfd/tps6105x.h>
-static const int tps6105x_voltages[] = {
+static const unsigned int tps6105x_voltages[] = {
4500000,
5000000,
5250000,
@@ -105,22 +105,13 @@ static int tps6105x_regulator_set_voltage_sel(struct regulator_dev *rdev,
return 0;
}
-static int tps6105x_regulator_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector >= ARRAY_SIZE(tps6105x_voltages))
- return -EINVAL;
-
- return tps6105x_voltages[selector];
-}
-
static struct regulator_ops tps6105x_regulator_ops = {
.enable = tps6105x_regulator_enable,
.disable = tps6105x_regulator_disable,
.is_enabled = tps6105x_regulator_is_enabled,
.get_voltage_sel = tps6105x_regulator_get_voltage_sel,
.set_voltage_sel = tps6105x_regulator_set_voltage_sel,
- .list_voltage = tps6105x_regulator_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static const struct regulator_desc tps6105x_regulator_desc = {
@@ -130,6 +121,7 @@ static const struct regulator_desc tps6105x_regulator_desc = {
.id = 0,
.owner = THIS_MODULE,
.n_voltages = ARRAY_SIZE(tps6105x_voltages),
+ .volt_table = tps6105x_voltages,
};
/*
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index e534269ed44a..68729a7c8709 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -65,10 +65,8 @@ struct tps62360_chip {
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
- int chip_id;
int vsel0_gpio;
int vsel1_gpio;
- int voltage_base;
u8 voltage_reg_mask;
bool en_internal_pulldn;
bool en_discharge;
@@ -76,7 +74,6 @@ struct tps62360_chip {
int lru_index[4];
int curr_vset_vsel[4];
int curr_vset_id;
- int change_uv_per_us;
};
/*
@@ -175,23 +172,6 @@ static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev,
return 0;
}
-static int tps62360_set_voltage_time_sel(struct regulator_dev *rdev,
- unsigned int old_selector, unsigned int new_selector)
-{
- struct tps62360_chip *tps = rdev_get_drvdata(rdev);
- int old_uV, new_uV;
-
- old_uV = regulator_list_voltage_linear(rdev, old_selector);
- if (old_uV < 0)
- return old_uV;
-
- new_uV = regulator_list_voltage_linear(rdev, new_selector);
- if (new_uV < 0)
- return new_uV;
-
- return DIV_ROUND_UP(abs(old_uV - new_uV), tps->change_uv_per_us);
-}
-
static int tps62360_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct tps62360_chip *tps = rdev_get_drvdata(rdev);
@@ -258,7 +238,7 @@ static struct regulator_ops tps62360_dcdc_ops = {
.set_voltage_sel = tps62360_dcdc_set_voltage_sel,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
- .set_voltage_time_sel = tps62360_set_voltage_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
.set_mode = tps62360_set_mode,
.get_mode = tps62360_get_mode,
};
@@ -301,7 +281,7 @@ static int __devinit tps62360_init_dcdc(struct tps62360_chip *tps,
ramp_ctrl = (ramp_ctrl >> 4) & 0x7;
/* ramp mV/us = 32/(2^ramp_ctrl) */
- tps->change_uv_per_us = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
+ tps->desc.ramp_delay = DIV_ROUND_UP(32000, BIT(ramp_ctrl));
return ret;
}
@@ -408,13 +388,13 @@ static int __devinit tps62360_probe(struct i2c_client *client,
switch (chip_id) {
case TPS62360:
case TPS62362:
- tps->voltage_base = TPS62360_BASE_VOLTAGE;
+ tps->desc.min_uV = TPS62360_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x3F;
tps->desc.n_voltages = TPS62360_N_VOLTAGES;
break;
case TPS62361:
case TPS62363:
- tps->voltage_base = TPS62361_BASE_VOLTAGE;
+ tps->desc.min_uV = TPS62361_BASE_VOLTAGE;
tps->voltage_reg_mask = 0x7F;
tps->desc.n_voltages = TPS62361_N_VOLTAGES;
break;
@@ -427,7 +407,6 @@ static int __devinit tps62360_probe(struct i2c_client *client,
tps->desc.ops = &tps62360_dcdc_ops;
tps->desc.type = REGULATOR_VOLTAGE;
tps->desc.owner = THIS_MODULE;
- tps->desc.min_uV = tps->voltage_base;
tps->desc.uV_step = 10000;
tps->regmap = devm_regmap_init_i2c(client, &tps62360_regmap_config);
@@ -449,24 +428,24 @@ static int __devinit tps62360_probe(struct i2c_client *client,
int gpio_flags;
gpio_flags = (pdata->vsel0_def_state) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = gpio_request_one(tps->vsel0_gpio,
+ ret = devm_gpio_request_one(&client->dev, tps->vsel0_gpio,
gpio_flags, "tps62360-vsel0");
if (ret) {
dev_err(&client->dev,
"%s(): Could not obtain vsel0 GPIO %d: %d\n",
__func__, tps->vsel0_gpio, ret);
- goto err_gpio0;
+ return ret;
}
gpio_flags = (pdata->vsel1_def_state) ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = gpio_request_one(tps->vsel1_gpio,
+ ret = devm_gpio_request_one(&client->dev, tps->vsel1_gpio,
gpio_flags, "tps62360-vsel1");
if (ret) {
dev_err(&client->dev,
"%s(): Could not obtain vsel1 GPIO %d: %d\n",
__func__, tps->vsel1_gpio, ret);
- goto err_gpio1;
+ return ret;
}
tps->valid_gpios = true;
@@ -484,7 +463,7 @@ static int __devinit tps62360_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(tps->dev, "%s(): Init failed with err = %d\n",
__func__, ret);
- goto err_init;
+ return ret;
}
config.dev = &client->dev;
@@ -498,21 +477,11 @@ static int __devinit tps62360_probe(struct i2c_client *client,
dev_err(tps->dev,
"%s(): regulator register failed with err %s\n",
__func__, id->name);
- ret = PTR_ERR(rdev);
- goto err_init;
+ return PTR_ERR(rdev);
}
tps->rdev = rdev;
return 0;
-
-err_init:
- if (gpio_is_valid(tps->vsel1_gpio))
- gpio_free(tps->vsel1_gpio);
-err_gpio1:
- if (gpio_is_valid(tps->vsel0_gpio))
- gpio_free(tps->vsel0_gpio);
-err_gpio0:
- return ret;
}
/**
@@ -525,12 +494,6 @@ static int __devexit tps62360_remove(struct i2c_client *client)
{
struct tps62360_chip *tps = i2c_get_clientdata(client);
- if (gpio_is_valid(tps->vsel1_gpio))
- gpio_free(tps->vsel1_gpio);
-
- if (gpio_is_valid(tps->vsel0_gpio))
- gpio_free(tps->vsel0_gpio);
-
regulator_unregister(tps->rdev);
return 0;
}
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 8f1be8586c72..6998d579d07b 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -69,10 +69,6 @@
#define TPS65023_REG_CTRL2_DCDC1 BIT(1)
#define TPS65023_REG_CTRL2_DCDC3 BIT(0)
-/* LDO_CTRL bitfields */
-#define TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_id) ((ldo_id)*4)
-#define TPS65023_LDO_CTRL_LDOx_MASK(ldo_id) (0x07 << ((ldo_id)*4))
-
/* Number of step-down converters available */
#define TPS65023_NUM_DCDC 3
/* Number of LDO voltage regulators available */
@@ -91,48 +87,53 @@
#define TPS65023_MAX_REG_ID TPS65023_LDO_2
/* Supported voltage values for regulators */
-static const u16 VCORE_VSEL_table[] = {
- 800, 825, 850, 875,
- 900, 925, 950, 975,
- 1000, 1025, 1050, 1075,
- 1100, 1125, 1150, 1175,
- 1200, 1225, 1250, 1275,
- 1300, 1325, 1350, 1375,
- 1400, 1425, 1450, 1475,
- 1500, 1525, 1550, 1600,
+static const unsigned int VCORE_VSEL_table[] = {
+ 800000, 825000, 850000, 875000,
+ 900000, 925000, 950000, 975000,
+ 1000000, 1025000, 1050000, 1075000,
+ 1100000, 1125000, 1150000, 1175000,
+ 1200000, 1225000, 1250000, 1275000,
+ 1300000, 1325000, 1350000, 1375000,
+ 1400000, 1425000, 1450000, 1475000,
+ 1500000, 1525000, 1550000, 1600000,
+};
+
+static const unsigned int DCDC_FIXED_3300000_VSEL_table[] = {
+ 3300000,
+};
+
+static const unsigned int DCDC_FIXED_1800000_VSEL_table[] = {
+ 1800000,
};
/* Supported voltage values for LDO regulators for tps65020 */
-static const u16 TPS65020_LDO1_VSEL_table[] = {
- 1000, 1050, 1100, 1300,
- 1800, 2500, 3000, 3300,
+static const unsigned int TPS65020_LDO1_VSEL_table[] = {
+ 1000000, 1050000, 1100000, 1300000,
+ 1800000, 2500000, 3000000, 3300000,
};
-static const u16 TPS65020_LDO2_VSEL_table[] = {
- 1000, 1050, 1100, 1300,
- 1800, 2500, 3000, 3300,
+static const unsigned int TPS65020_LDO2_VSEL_table[] = {
+ 1000000, 1050000, 1100000, 1300000,
+ 1800000, 2500000, 3000000, 3300000,
};
/* Supported voltage values for LDO regulators
* for tps65021 and tps65023 */
-static const u16 TPS65023_LDO1_VSEL_table[] = {
- 1000, 1100, 1300, 1800,
- 2200, 2600, 2800, 3150,
+static const unsigned int TPS65023_LDO1_VSEL_table[] = {
+ 1000000, 1100000, 1300000, 1800000,
+ 2200000, 2600000, 2800000, 3150000,
};
-static const u16 TPS65023_LDO2_VSEL_table[] = {
- 1050, 1200, 1300, 1800,
- 2500, 2800, 3000, 3300,
+static const unsigned int TPS65023_LDO2_VSEL_table[] = {
+ 1050000, 1200000, 1300000, 1800000,
+ 2500000, 2800000, 3000000, 3300000,
};
/* Regulator specific details */
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
- bool fixed;
u8 table_len;
- const u16 *table;
+ const unsigned int *table;
};
/* PMIC details */
@@ -150,7 +151,7 @@ struct tps_driver_data {
u8 core_regulator;
};
-static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
+static int tps65023_dcdc_get_voltage_sel(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int ret;
@@ -164,9 +165,9 @@ static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
if (ret != 0)
return ret;
data &= (tps->info[dcdc]->table_len - 1);
- return tps->info[dcdc]->table[data] * 1000;
+ return data;
} else
- return tps->info[dcdc]->min_uV;
+ return 0;
}
static int tps65023_dcdc_set_voltage_sel(struct regulator_dev *dev,
@@ -193,76 +194,14 @@ out:
return ret;
}
-static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int data, ldo = rdev_get_id(dev);
- int ret;
-
- if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
- return -EINVAL;
-
- ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
- if (ret != 0)
- return ret;
-
- data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
- data &= (tps->info[ldo]->table_len - 1);
- return tps->info[ldo]->table[data] * 1000;
-}
-
-static int tps65023_ldo_set_voltage_sel(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int ldo_index = rdev_get_id(dev) - TPS65023_LDO_1;
-
- return regmap_update_bits(tps->regmap, TPS65023_REG_LDO_CTRL,
- TPS65023_LDO_CTRL_LDOx_MASK(ldo_index),
- selector << TPS65023_LDO_CTRL_LDOx_SHIFT(ldo_index));
-}
-
-static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int dcdc = rdev_get_id(dev);
-
- if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
- return -EINVAL;
-
- if (dcdc == tps->core_regulator) {
- if (selector >= tps->info[dcdc]->table_len)
- return -EINVAL;
- else
- return tps->info[dcdc]->table[selector] * 1000;
- } else
- return tps->info[dcdc]->min_uV;
-}
-
-static int tps65023_ldo_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps_pmic *tps = rdev_get_drvdata(dev);
- int ldo = rdev_get_id(dev);
-
- if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
- return -EINVAL;
-
- if (selector >= tps->info[ldo]->table_len)
- return -EINVAL;
- else
- return tps->info[ldo]->table[selector] * 1000;
-}
-
/* Operations permitted on VDCDCx */
static struct regulator_ops tps65023_dcdc_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .get_voltage = tps65023_dcdc_get_voltage,
+ .get_voltage_sel = tps65023_dcdc_get_voltage_sel,
.set_voltage_sel = tps65023_dcdc_set_voltage_sel,
- .list_voltage = tps65023_dcdc_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
/* Operations permitted on LDOx */
@@ -270,9 +209,9 @@ static struct regulator_ops tps65023_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .get_voltage = tps65023_ldo_get_voltage,
- .set_voltage_sel = tps65023_ldo_set_voltage_sel,
- .list_voltage = tps65023_ldo_list_voltage,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regmap_config tps65023_regmap_config = {
@@ -325,19 +264,28 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
+ tps->desc[i].volt_table = info->table;
tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
&tps65023_ldo_ops : &tps65023_dcdc_ops);
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL;
- if (i == TPS65023_LDO_1)
+ switch (i) {
+ case TPS65023_LDO_1:
+ tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
+ tps->desc[i].vsel_mask = 0x07;
tps->desc[i].enable_mask = 1 << 1;
- else if (i == TPS65023_LDO_2)
+ break;
+ case TPS65023_LDO_2:
+ tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL;
+ tps->desc[i].vsel_mask = 0x70;
tps->desc[i].enable_mask = 1 << 2;
- else /* DCDCx */
+ break;
+ default: /* DCDCx */
tps->desc[i].enable_mask =
1 << (TPS65023_NUM_REGULATOR - i);
+ }
config.dev = &client->dev;
config.init_data = init_data;
@@ -384,35 +332,26 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
static const struct tps_info tps65020_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
-
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65020_LDO1_VSEL_table),
.table = TPS65020_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65020_LDO2_VSEL_table),
.table = TPS65020_LDO2_VSEL_table,
},
@@ -421,34 +360,26 @@ static const struct tps_info tps65020_regs[] = {
static const struct tps_info tps65021_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
.table = TPS65023_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
.table = TPS65023_LDO2_VSEL_table,
},
@@ -457,34 +388,26 @@ static const struct tps_info tps65021_regs[] = {
static const struct tps_info tps65023_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 800000,
- .max_uV = 1600000,
.table_len = ARRAY_SIZE(VCORE_VSEL_table),
.table = VCORE_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 3300000,
- .max_uV = 3300000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_3300000_VSEL_table),
+ .table = DCDC_FIXED_3300000_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 1800000,
- .max_uV = 1800000,
- .fixed = 1,
+ .table_len = ARRAY_SIZE(DCDC_FIXED_1800000_VSEL_table),
+ .table = DCDC_FIXED_1800000_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3150000,
.table_len = ARRAY_SIZE(TPS65023_LDO1_VSEL_table),
.table = TPS65023_LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 1050000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(TPS65023_LDO2_VSEL_table),
.table = TPS65023_LDO2_VSEL_table,
},
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index da38be1016aa..07d01ccdf308 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -43,58 +43,40 @@
/* Number of total regulators available */
#define TPS6507X_NUM_REGULATOR (TPS6507X_NUM_DCDC + TPS6507X_NUM_LDO)
-/* Supported voltage values for regulators (in milliVolts) */
-static const u16 VDCDCx_VSEL_table[] = {
- 725, 750, 775, 800,
- 825, 850, 875, 900,
- 925, 950, 975, 1000,
- 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200,
- 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400,
- 1425, 1450, 1475, 1500,
- 1550, 1600, 1650, 1700,
- 1750, 1800, 1850, 1900,
- 1950, 2000, 2050, 2100,
- 2150, 2200, 2250, 2300,
- 2350, 2400, 2450, 2500,
- 2550, 2600, 2650, 2700,
- 2750, 2800, 2850, 2900,
- 3000, 3100, 3200, 3300,
+/* Supported voltage values for regulators (in microVolts) */
+static const unsigned int VDCDCx_VSEL_table[] = {
+ 725000, 750000, 775000, 800000,
+ 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000,
+ 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000,
+ 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000,
+ 1425000, 1450000, 1475000, 1500000,
+ 1550000, 1600000, 1650000, 1700000,
+ 1750000, 1800000, 1850000, 1900000,
+ 1950000, 2000000, 2050000, 2100000,
+ 2150000, 2200000, 2250000, 2300000,
+ 2350000, 2400000, 2450000, 2500000,
+ 2550000, 2600000, 2650000, 2700000,
+ 2750000, 2800000, 2850000, 2900000,
+ 3000000, 3100000, 3200000, 3300000,
};
-static const u16 LDO1_VSEL_table[] = {
- 1000, 1100, 1200, 1250,
- 1300, 1350, 1400, 1500,
- 1600, 1800, 2500, 2750,
- 2800, 3000, 3100, 3300,
+static const unsigned int LDO1_VSEL_table[] = {
+ 1000000, 1100000, 1200000, 1250000,
+ 1300000, 1350000, 1400000, 1500000,
+ 1600000, 1800000, 2500000, 2750000,
+ 2800000, 3000000, 3100000, 3300000,
};
-static const u16 LDO2_VSEL_table[] = {
- 725, 750, 775, 800,
- 825, 850, 875, 900,
- 925, 950, 975, 1000,
- 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200,
- 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400,
- 1425, 1450, 1475, 1500,
- 1550, 1600, 1650, 1700,
- 1750, 1800, 1850, 1900,
- 1950, 2000, 2050, 2100,
- 2150, 2200, 2250, 2300,
- 2350, 2400, 2450, 2500,
- 2550, 2600, 2650, 2700,
- 2750, 2800, 2850, 2900,
- 3000, 3100, 3200, 3300,
-};
+/* The voltage mapping table for LDO2 is the same as VDCDCx */
+#define LDO2_VSEL_table VDCDCx_VSEL_table
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
u8 table_len;
- const u16 *table;
+ const unsigned int *table;
/* Does DCDC high or the low register defines output voltage? */
bool defdcdc_default;
@@ -103,36 +85,26 @@ struct tps_info {
static struct tps_info tps6507x_pmic_regs[] = {
{
.name = "VDCDC1",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC2",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "VDCDC3",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(VDCDCx_VSEL_table),
.table = VDCDCx_VSEL_table,
},
{
.name = "LDO1",
- .min_uV = 1000000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(LDO1_VSEL_table),
.table = LDO1_VSEL_table,
},
{
.name = "LDO2",
- .min_uV = 725000,
- .max_uV = 3300000,
.table_len = ARRAY_SIZE(LDO2_VSEL_table),
.table = LDO2_VSEL_table,
},
@@ -375,28 +347,13 @@ static int tps6507x_pmic_set_voltage_sel(struct regulator_dev *dev,
return tps6507x_pmic_reg_write(tps, reg, data);
}
-static int tps6507x_pmic_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps6507x_pmic *tps = rdev_get_drvdata(dev);
- int rid = rdev_get_id(dev);
-
- if (rid < TPS6507X_DCDC_1 || rid > TPS6507X_LDO_2)
- return -EINVAL;
-
- if (selector >= tps->info[rid]->table_len)
- return -EINVAL;
- else
- return tps->info[rid]->table[selector] * 1000;
-}
-
static struct regulator_ops tps6507x_pmic_ops = {
.is_enabled = tps6507x_pmic_is_enabled,
.enable = tps6507x_pmic_enable,
.disable = tps6507x_pmic_disable,
.get_voltage_sel = tps6507x_pmic_get_voltage_sel,
.set_voltage_sel = tps6507x_pmic_set_voltage_sel,
- .list_voltage = tps6507x_pmic_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static __devinit int tps6507x_pmic_probe(struct platform_device *pdev)
@@ -449,6 +406,7 @@ static __devinit int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].name = info->name;
tps->desc[i].id = i;
tps->desc[i].n_voltages = info->table_len;
+ tps->desc[i].volt_table = info->table;
tps->desc[i].ops = &tps6507x_pmic_ops;
tps->desc[i].type = REGULATOR_VOLTAGE;
tps->desc[i].owner = THIS_MODULE;
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 9d371d2cbcae..6caa222af77a 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -26,7 +26,7 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65217.h>
-#define TPS65217_REGULATOR(_name, _id, _ops, _n) \
+#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t) \
{ \
.name = _name, \
.id = _id, \
@@ -34,23 +34,23 @@
.n_voltages = _n, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .vsel_reg = _vr, \
+ .vsel_mask = _vm, \
+ .enable_reg = TPS65217_REG_ENABLE, \
+ .enable_mask = _em, \
+ .volt_table = _t, \
} \
-#define TPS65217_INFO(_nm, _min, _max, _f1, _f2, _t, _n, _em, _vr, _vm) \
+#define TPS65217_INFO(_nm, _min, _max, _f1, _f2) \
{ \
.name = _nm, \
.min_uV = _min, \
.max_uV = _max, \
.vsel_to_uv = _f1, \
.uv_to_vsel = _f2, \
- .table = _t, \
- .table_len = _n, \
- .enable_mask = _em, \
- .set_vout_reg = _vr, \
- .set_vout_mask = _vm, \
}
-static const int LDO1_VSEL_table[] = {
+static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
1600000, 1800000, 2500000, 2750000,
@@ -78,7 +78,7 @@ static int tps65217_vsel_to_uv1(unsigned int vsel)
static int tps65217_uv_to_vsel1(int uV, unsigned int *vsel)
{
- if ((uV < 0) && (uV > 3300000))
+ if (uV < 0 || uV > 3300000)
return -EINVAL;
if (uV <= 1500000)
@@ -112,7 +112,7 @@ static int tps65217_vsel_to_uv2(unsigned int vsel)
static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
{
- if ((uV < 0) && (uV > 3300000))
+ if (uV < 0 || uV > 3300000)
return -EINVAL;
if (uV <= 1900000)
@@ -127,46 +127,20 @@ static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
static struct tps_info tps65217_pmic_regs[] = {
TPS65217_INFO("DCDC1", 900000, 1800000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC1_EN,
- TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("DCDC2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC2_EN,
- TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("DCDC3", 900000, 1500000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_DC3_EN,
- TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK),
- TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL, LDO1_VSEL_table,
- 16, TPS65217_ENABLE_LDO1_EN, TPS65217_REG_DEFLDO1,
- TPS65217_DEFLDO1_LDO1_MASK),
+ tps65217_uv_to_vsel1),
+ TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL),
TPS65217_INFO("LDO2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1, NULL, 64, TPS65217_ENABLE_LDO2_EN,
- TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK),
+ tps65217_uv_to_vsel1),
TPS65217_INFO("LDO3", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2, NULL, 32,
- TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
- TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK),
+ tps65217_uv_to_vsel2),
TPS65217_INFO("LDO4", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2, NULL, 32,
- TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
- TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK),
+ tps65217_uv_to_vsel2),
};
-static int tps65217_pmic_is_enabled(struct regulator_dev *dev)
-{
- int ret;
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int data, rid = rdev_get_id(dev);
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- ret = tps65217_reg_read(tps, TPS65217_REG_ENABLE, &data);
- if (ret)
- return ret;
-
- return (data & tps->info[rid]->enable_mask) ? 1 : 0;
-}
-
static int tps65217_pmic_enable(struct regulator_dev *dev)
{
struct tps65217 *tps = rdev_get_drvdata(dev);
@@ -177,9 +151,8 @@ static int tps65217_pmic_enable(struct regulator_dev *dev)
/* Enable the regulator and password protection is level 1 */
return tps65217_set_bits(tps, TPS65217_REG_ENABLE,
- tps->info[rid]->enable_mask,
- tps->info[rid]->enable_mask,
- TPS65217_PROTECT_L1);
+ dev->desc->enable_mask, dev->desc->enable_mask,
+ TPS65217_PROTECT_L1);
}
static int tps65217_pmic_disable(struct regulator_dev *dev)
@@ -192,25 +165,7 @@ static int tps65217_pmic_disable(struct regulator_dev *dev)
/* Disable the regulator and password protection is level 1 */
return tps65217_clear_bits(tps, TPS65217_REG_ENABLE,
- tps->info[rid]->enable_mask, TPS65217_PROTECT_L1);
-}
-
-static int tps65217_pmic_get_voltage_sel(struct regulator_dev *dev)
-{
- int ret;
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int selector, rid = rdev_get_id(dev);
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- ret = tps65217_reg_read(tps, tps->info[rid]->set_vout_reg, &selector);
- if (ret)
- return ret;
-
- selector &= tps->info[rid]->set_vout_mask;
-
- return selector;
+ dev->desc->enable_mask, TPS65217_PROTECT_L1);
}
static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
@@ -221,8 +176,7 @@ static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
unsigned int rid = rdev_get_id(dev);
/* Set the voltage based on vsel value and write protect level is 2 */
- ret = tps65217_set_bits(tps, tps->info[rid]->set_vout_reg,
- tps->info[rid]->set_vout_mask,
+ ret = tps65217_set_bits(tps, dev->desc->vsel_reg, dev->desc->vsel_mask,
selector, TPS65217_PROTECT_L2);
/* Set GO bit for DCDCx to initiate voltage transistion */
@@ -252,10 +206,10 @@ static int tps65217_pmic_map_voltage(struct regulator_dev *dev,
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
- if (min_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
- return -EINVAL;
+ if (min_uV < tps->info[rid]->min_uV)
+ min_uV = tps->info[rid]->min_uV;
- if (max_uV < tps->info[rid]->min_uV || max_uV > tps->info[rid]->max_uV)
+ if (max_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
return -EINVAL;
ret = tps->info[rid]->uv_to_vsel(min_uV, &sel);
@@ -274,21 +228,18 @@ static int tps65217_pmic_list_voltage(struct regulator_dev *dev,
if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
return -EINVAL;
- if (selector >= tps->info[rid]->table_len)
+ if (selector >= dev->desc->n_voltages)
return -EINVAL;
- if (tps->info[rid]->table)
- return tps->info[rid]->table[selector];
-
return tps->info[rid]->vsel_to_uv(selector);
}
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static struct regulator_ops tps65217_pmic_ops = {
- .is_enabled = tps65217_pmic_is_enabled,
+ .is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
- .get_voltage_sel = tps65217_pmic_get_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
.list_voltage = tps65217_pmic_list_voltage,
.map_voltage = tps65217_pmic_map_voltage,
@@ -296,22 +247,38 @@ static struct regulator_ops tps65217_pmic_ops = {
/* Operations permitted on LDO1 */
static struct regulator_ops tps65217_pmic_ldo1_ops = {
- .is_enabled = tps65217_pmic_is_enabled,
+ .is_enabled = regulator_is_enabled_regmap,
.enable = tps65217_pmic_enable,
.disable = tps65217_pmic_disable,
- .get_voltage_sel = tps65217_pmic_get_voltage_sel,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
- .list_voltage = tps65217_pmic_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static const struct regulator_desc regulators[] = {
- TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16),
- TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64),
- TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32),
- TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32),
+ TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC1_EN, NULL),
+ TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC2_EN, NULL),
+ TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK,
+ TPS65217_ENABLE_DC3_EN, NULL),
+ TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16,
+ TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK,
+ TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table),
+ TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64,
+ TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK,
+ TPS65217_ENABLE_LDO2_EN, NULL),
+ TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32,
+ TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
+ TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
+ NULL),
+ TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32,
+ TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
+ TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
+ NULL),
};
static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
@@ -326,6 +293,7 @@ static int __devinit tps65217_regulator_probe(struct platform_device *pdev)
tps->info[pdev->id] = info;
config.dev = &pdev->dev;
+ config.of_node = pdev->dev.of_node;
config.init_data = pdev->dev.platform_data;
config.driver_data = tps;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 1b299aacf22f..947ece933d90 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -110,9 +110,6 @@
#define N_SWITCH 2
#define N_REGULATORS (N_DCDC + N_LDO + N_SWITCH)
-#define FIXED_ILIMSEL BIT(0)
-#define FIXED_VOLTAGE BIT(1)
-
#define CMD_READ(reg) ((reg) << 6)
#define CMD_WRITE(reg) (BIT(5) | (reg) << 6)
#define STAT_CLK BIT(3)
@@ -129,12 +126,9 @@ struct field {
struct supply_info {
const char *name;
int n_voltages;
- const int *voltages;
- int fixed_voltage;
+ const unsigned int *voltages;
int n_ilimsels;
- const int *ilimsels;
- int fixed_ilimsel;
- int flags;
+ const unsigned int *ilimsels;
struct field enable, voltage, ilimsel;
};
@@ -307,7 +301,7 @@ static int write_field(struct tps6524x *hw, const struct field *field,
val << field->shift);
}
-static const int dcdc1_voltages[] = {
+static const unsigned int dcdc1_voltages[] = {
800000, 825000, 850000, 875000,
900000, 925000, 950000, 975000,
1000000, 1025000, 1050000, 1075000,
@@ -318,7 +312,7 @@ static const int dcdc1_voltages[] = {
1500000, 1525000, 1550000, 1575000,
};
-static const int dcdc2_voltages[] = {
+static const unsigned int dcdc2_voltages[] = {
1400000, 1450000, 1500000, 1550000,
1600000, 1650000, 1700000, 1750000,
1800000, 1850000, 1900000, 1950000,
@@ -329,7 +323,7 @@ static const int dcdc2_voltages[] = {
2800000, 2850000, 2900000, 2950000,
};
-static const int dcdc3_voltages[] = {
+static const unsigned int dcdc3_voltages[] = {
2400000, 2450000, 2500000, 2550000, 2600000,
2650000, 2700000, 2750000, 2800000, 2850000,
2900000, 2950000, 3000000, 3050000, 3100000,
@@ -337,38 +331,54 @@ static const int dcdc3_voltages[] = {
3400000, 3450000, 3500000, 3550000, 3600000,
};
-static const int ldo1_voltages[] = {
+static const unsigned int ldo1_voltages[] = {
4300000, 4350000, 4400000, 4450000,
4500000, 4550000, 4600000, 4650000,
4700000, 4750000, 4800000, 4850000,
4900000, 4950000, 5000000, 5050000,
};
-static const int ldo2_voltages[] = {
+static const unsigned int ldo2_voltages[] = {
1100000, 1150000, 1200000, 1250000,
1300000, 1700000, 1750000, 1800000,
1850000, 1900000, 3150000, 3200000,
3250000, 3300000, 3350000, 3400000,
};
-static const int ldo_ilimsel[] = {
+static const unsigned int fixed_5000000_voltage[] = {
+ 5000000
+};
+
+static const unsigned int ldo_ilimsel[] = {
400000, 1500000
};
-static const int usb_ilimsel[] = {
+static const unsigned int usb_ilimsel[] = {
200000, 400000, 800000, 1000000
};
+static const unsigned int fixed_2400000_ilimsel[] = {
+ 2400000
+};
+
+static const unsigned int fixed_1200000_ilimsel[] = {
+ 1200000
+};
+
+static const unsigned int fixed_400000_ilimsel[] = {
+ 400000
+};
+
#define __MK_FIELD(_reg, _mask, _shift) \
{ .reg = (_reg), .mask = (_mask), .shift = (_shift), }
static const struct supply_info supply_info[N_REGULATORS] = {
{
.name = "DCDC1",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc1_voltages),
.voltages = dcdc1_voltages,
- .fixed_ilimsel = 2400000,
+ .n_ilimsels = ARRAY_SIZE(fixed_2400000_ilimsel),
+ .ilimsels = fixed_2400000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC1_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -376,10 +386,10 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "DCDC2",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc2_voltages),
.voltages = dcdc2_voltages,
- .fixed_ilimsel = 1200000,
+ .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel),
+ .ilimsels = fixed_1200000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC2_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -387,10 +397,10 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "DCDC3",
- .flags = FIXED_ILIMSEL,
.n_voltages = ARRAY_SIZE(dcdc3_voltages),
.voltages = dcdc3_voltages,
- .fixed_ilimsel = 1200000,
+ .n_ilimsels = ARRAY_SIZE(fixed_1200000_ilimsel),
+ .ilimsels = fixed_1200000_ilimsel,
.enable = __MK_FIELD(REG_DCDC_EN, DCDCDCDC_EN_MASK,
DCDCDCDC3_EN_SHIFT),
.voltage = __MK_FIELD(REG_DCDC_SET, DCDC_VDCDC_MASK,
@@ -424,8 +434,8 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "USB",
- .flags = FIXED_VOLTAGE,
- .fixed_voltage = 5000000,
+ .n_voltages = ARRAY_SIZE(fixed_5000000_voltage),
+ .voltages = fixed_5000000_voltage,
.n_ilimsels = ARRAY_SIZE(usb_ilimsel),
.ilimsels = usb_ilimsel,
.enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
@@ -435,29 +445,15 @@ static const struct supply_info supply_info[N_REGULATORS] = {
},
{
.name = "LCD",
- .flags = FIXED_VOLTAGE | FIXED_ILIMSEL,
- .fixed_voltage = 5000000,
- .fixed_ilimsel = 400000,
+ .n_voltages = ARRAY_SIZE(fixed_5000000_voltage),
+ .voltages = fixed_5000000_voltage,
+ .n_ilimsels = ARRAY_SIZE(fixed_400000_ilimsel),
+ .ilimsels = fixed_400000_ilimsel,
.enable = __MK_FIELD(REG_BLOCK_EN, BLOCK_MASK,
BLOCK_LCD_SHIFT),
},
};
-static int list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- const struct supply_info *info;
- struct tps6524x *hw;
-
- hw = rdev_get_drvdata(rdev);
- info = &supply_info[rdev_get_id(rdev)];
-
- if (info->flags & FIXED_VOLTAGE)
- return selector ? -EINVAL : info->fixed_voltage;
-
- return ((selector < info->n_voltages) ?
- info->voltages[selector] : -EINVAL);
-}
-
static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
const struct supply_info *info;
@@ -466,7 +462,7 @@ static int set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_VOLTAGE)
+ if (rdev->desc->n_voltages == 1)
return -EINVAL;
return write_field(hw, &info->voltage, selector);
@@ -481,7 +477,7 @@ static int get_voltage_sel(struct regulator_dev *rdev)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_VOLTAGE)
+ if (rdev->desc->n_voltages == 1)
return 0;
ret = read_field(hw, &info->voltage);
@@ -503,7 +499,7 @@ static int set_current_limit(struct regulator_dev *rdev, int min_uA,
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_ILIMSEL)
+ if (info->n_ilimsels == 1)
return -EINVAL;
for (i = 0; i < info->n_ilimsels; i++)
@@ -526,8 +522,8 @@ static int get_current_limit(struct regulator_dev *rdev)
hw = rdev_get_drvdata(rdev);
info = &supply_info[rdev_get_id(rdev)];
- if (info->flags & FIXED_ILIMSEL)
- return info->fixed_ilimsel;
+ if (info->n_ilimsels == 1)
+ return info->ilimsels[0];
ret = read_field(hw, &info->ilimsel);
if (ret < 0)
@@ -577,7 +573,7 @@ static struct regulator_ops regulator_ops = {
.disable = disable_supply,
.get_voltage_sel = get_voltage_sel,
.set_voltage_sel = set_voltage_sel,
- .list_voltage = list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.set_current_limit = set_current_limit,
.get_current_limit = get_current_limit,
};
@@ -629,13 +625,11 @@ static int __devinit pmic_probe(struct spi_device *spi)
hw->desc[i].name = info->name;
hw->desc[i].id = i;
hw->desc[i].n_voltages = info->n_voltages;
+ hw->desc[i].volt_table = info->voltages;
hw->desc[i].ops = &regulator_ops;
hw->desc[i].type = REGULATOR_VOLTAGE;
hw->desc[i].owner = THIS_MODULE;
- if (info->flags & FIXED_VOLTAGE)
- hw->desc[i].n_voltages = 1;
-
config.dev = dev;
config.init_data = init_data;
config.driver_data = hw;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index c0a214575380..e6da90ab5153 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -63,8 +63,6 @@ struct tps6586x_regulator {
int enable_bit[2];
int enable_reg[2];
- int *voltages;
-
/* for DVM regulators */
int go_reg;
int go_bit;
@@ -72,22 +70,9 @@ struct tps6586x_regulator {
static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev)
{
- return rdev_get_dev(rdev)->parent->parent;
+ return rdev_get_dev(rdev)->parent;
}
-static int tps6586x_list_voltage(struct regulator_dev *rdev, unsigned selector)
-{
- struct tps6586x_regulator *info = rdev_get_drvdata(rdev);
- int rid = rdev_get_id(rdev);
-
- /* LDO0 has minimal voltage 1.2V rather than 1.25V */
- if ((rid == TPS6586X_ID_LDO_0) && (selector == 0))
- return (info->voltages[0] - 50) * 1000;
-
- return info->voltages[selector] * 1000;
-}
-
-
static int tps6586x_set_voltage_sel(struct regulator_dev *rdev,
unsigned selector)
{
@@ -168,7 +153,7 @@ static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev)
}
static struct regulator_ops tps6586x_regulator_ops = {
- .list_voltage = tps6586x_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
.get_voltage_sel = tps6586x_get_voltage_sel,
.set_voltage_sel = tps6586x_set_voltage_sel,
@@ -177,39 +162,45 @@ static struct regulator_ops tps6586x_regulator_ops = {
.disable = tps6586x_regulator_disable,
};
-static int tps6586x_ldo_voltages[] = {
- 1250, 1500, 1800, 2500, 2700, 2850, 3100, 3300,
+static const unsigned int tps6586x_ldo0_voltages[] = {
+ 1200000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
+};
+
+static const unsigned int tps6586x_ldo4_voltages[] = {
+ 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000,
+ 1900000, 1925000, 1950000, 1975000, 2000000, 2025000, 2050000, 2075000,
+ 2100000, 2125000, 2150000, 2175000, 2200000, 2225000, 2250000, 2275000,
+ 2300000, 2325000, 2350000, 2375000, 2400000, 2425000, 2450000, 2475000,
};
-static int tps6586x_ldo4_voltages[] = {
- 1700, 1725, 1750, 1775, 1800, 1825, 1850, 1875,
- 1900, 1925, 1950, 1975, 2000, 2025, 2050, 2075,
- 2100, 2125, 2150, 2175, 2200, 2225, 2250, 2275,
- 2300, 2325, 2350, 2375, 2400, 2425, 2450, 2475,
+static const unsigned int tps6586x_ldo_voltages[] = {
+ 1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000,
};
-static int tps6586x_sm2_voltages[] = {
- 3000, 3050, 3100, 3150, 3200, 3250, 3300, 3350,
- 3400, 3450, 3500, 3550, 3600, 3650, 3700, 3750,
- 3800, 3850, 3900, 3950, 4000, 4050, 4100, 4150,
- 4200, 4250, 4300, 4350, 4400, 4450, 4500, 4550,
+static const unsigned int tps6586x_sm2_voltages[] = {
+ 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, 3350000,
+ 3400000, 3450000, 3500000, 3550000, 3600000, 3650000, 3700000, 3750000,
+ 3800000, 3850000, 3900000, 3950000, 4000000, 4050000, 4100000, 4150000,
+ 4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000,
};
-static int tps6586x_dvm_voltages[] = {
- 725, 750, 775, 800, 825, 850, 875, 900,
- 925, 950, 975, 1000, 1025, 1050, 1075, 1100,
- 1125, 1150, 1175, 1200, 1225, 1250, 1275, 1300,
- 1325, 1350, 1375, 1400, 1425, 1450, 1475, 1500,
+static const unsigned int tps6586x_dvm_voltages[] = {
+ 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000,
+ 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000,
+ 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000,
+ 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000,
};
-#define TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
.desc = { \
+ .supply_name = _pin_name, \
.name = "REG-" #_id, \
.ops = &tps6586x_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = TPS6586X_ID_##_id, \
.n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \
+ .volt_table = tps6586x_##vdata##_voltages, \
.owner = THIS_MODULE, \
}, \
.volt_reg = TPS6586X_##vreg, \
@@ -218,44 +209,45 @@ static int tps6586x_dvm_voltages[] = {
.enable_reg[0] = TPS6586X_SUPPLY##ereg0, \
.enable_bit[0] = (ebit0), \
.enable_reg[1] = TPS6586X_SUPPLY##ereg1, \
- .enable_bit[1] = (ebit1), \
- .voltages = tps6586x_##vdata##_voltages,
+ .enable_bit[1] = (ebit1),
#define TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
.go_reg = TPS6586X_##goreg, \
.go_bit = (gobit),
-#define TPS6586X_LDO(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
{ \
- TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
}
-#define TPS6586X_DVM(_id, vdata, vreg, shift, nbits, \
+#define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1, goreg, gobit) \
{ \
- TPS6586X_REGULATOR(_id, vdata, vreg, shift, nbits, \
+ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \
ereg0, ebit0, ereg1, ebit1) \
TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \
}
static struct tps6586x_regulator tps6586x_regulator[] = {
- TPS6586X_LDO(LDO_0, ldo, SUPPLYV1, 5, 3, ENC, 0, END, 0),
- TPS6586X_LDO(LDO_3, ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
- TPS6586X_LDO(LDO_5, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
- TPS6586X_LDO(LDO_6, ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
- TPS6586X_LDO(LDO_7, ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
- TPS6586X_LDO(LDO_8, ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
- TPS6586X_LDO(LDO_9, ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
- TPS6586X_LDO(LDO_RTC, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
- TPS6586X_LDO(LDO_1, dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
- TPS6586X_LDO(SM_2, sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
-
- TPS6586X_DVM(LDO_2, dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6),
- TPS6586X_DVM(LDO_4, ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6),
- TPS6586X_DVM(SM_0, dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
- TPS6586X_DVM(SM_1, dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
+ TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0),
+ TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2),
+ TPS6586X_LDO(LDO_5, NULL, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6),
+ TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4),
+ TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5),
+ TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6),
+ TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7),
+ TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7),
+ TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1),
+ TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7),
+
+ TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3,
+ ENB, 3, VCC2, 6),
+ TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3,
+ END, 3, VCC1, 6),
+ TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2),
+ TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0),
};
/*
@@ -362,7 +354,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
if (err)
return err;
- config.dev = &pdev->dev;
+ config.dev = pdev->dev.parent;
config.of_node = pdev->dev.of_node;
config.init_data = pdev->dev.platform_data;
config.driver_data = ri;
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 6bf864b4bdf6..793adda560c3 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -31,160 +31,147 @@
TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 | \
TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
-/* supported VIO voltages in millivolts */
-static const u16 VIO_VSEL_table[] = {
- 1500, 1800, 2500, 3300,
+/* supported VIO voltages in microvolts */
+static const unsigned int VIO_VSEL_table[] = {
+ 1500000, 1800000, 2500000, 3300000,
};
/* VSEL tables for TPS65910 specific LDOs and dcdc's */
-/* supported VDD3 voltages in millivolts */
-static const u16 VDD3_VSEL_table[] = {
- 5000,
+/* supported VDD3 voltages in microvolts */
+static const unsigned int VDD3_VSEL_table[] = {
+ 5000000,
};
-/* supported VDIG1 voltages in millivolts */
-static const u16 VDIG1_VSEL_table[] = {
- 1200, 1500, 1800, 2700,
+/* supported VDIG1 voltages in microvolts */
+static const unsigned int VDIG1_VSEL_table[] = {
+ 1200000, 1500000, 1800000, 2700000,
};
-/* supported VDIG2 voltages in millivolts */
-static const u16 VDIG2_VSEL_table[] = {
- 1000, 1100, 1200, 1800,
+/* supported VDIG2 voltages in microvolts */
+static const unsigned int VDIG2_VSEL_table[] = {
+ 1000000, 1100000, 1200000, 1800000,
};
-/* supported VPLL voltages in millivolts */
-static const u16 VPLL_VSEL_table[] = {
- 1000, 1100, 1800, 2500,
+/* supported VPLL voltages in microvolts */
+static const unsigned int VPLL_VSEL_table[] = {
+ 1000000, 1100000, 1800000, 2500000,
};
-/* supported VDAC voltages in millivolts */
-static const u16 VDAC_VSEL_table[] = {
- 1800, 2600, 2800, 2850,
+/* supported VDAC voltages in microvolts */
+static const unsigned int VDAC_VSEL_table[] = {
+ 1800000, 2600000, 2800000, 2850000,
};
-/* supported VAUX1 voltages in millivolts */
-static const u16 VAUX1_VSEL_table[] = {
- 1800, 2500, 2800, 2850,
+/* supported VAUX1 voltages in microvolts */
+static const unsigned int VAUX1_VSEL_table[] = {
+ 1800000, 2500000, 2800000, 2850000,
};
-/* supported VAUX2 voltages in millivolts */
-static const u16 VAUX2_VSEL_table[] = {
- 1800, 2800, 2900, 3300,
+/* supported VAUX2 voltages in microvolts */
+static const unsigned int VAUX2_VSEL_table[] = {
+ 1800000, 2800000, 2900000, 3300000,
};
-/* supported VAUX33 voltages in millivolts */
-static const u16 VAUX33_VSEL_table[] = {
- 1800, 2000, 2800, 3300,
+/* supported VAUX33 voltages in microvolts */
+static const unsigned int VAUX33_VSEL_table[] = {
+ 1800000, 2000000, 2800000, 3300000,
};
-/* supported VMMC voltages in millivolts */
-static const u16 VMMC_VSEL_table[] = {
- 1800, 2800, 3000, 3300,
+/* supported VMMC voltages in microvolts */
+static const unsigned int VMMC_VSEL_table[] = {
+ 1800000, 2800000, 3000000, 3300000,
};
struct tps_info {
const char *name;
- unsigned min_uV;
- unsigned max_uV;
+ const char *vin_name;
u8 n_voltages;
- const u16 *voltage_table;
+ const unsigned int *voltage_table;
int enable_time_us;
};
static struct tps_info tps65910_regs[] = {
{
.name = "vrtc",
+ .vin_name = "vcc7",
.enable_time_us = 2200,
},
{
.name = "vio",
- .min_uV = 1500000,
- .max_uV = 3300000,
+ .vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
- .min_uV = 600000,
- .max_uV = 4500000,
+ .vin_name = "vcc1",
.enable_time_us = 350,
},
{
.name = "vdd2",
- .min_uV = 600000,
- .max_uV = 4500000,
+ .vin_name = "vcc2",
.enable_time_us = 350,
},
{
.name = "vdd3",
- .min_uV = 5000000,
- .max_uV = 5000000,
.n_voltages = ARRAY_SIZE(VDD3_VSEL_table),
.voltage_table = VDD3_VSEL_table,
.enable_time_us = 200,
},
{
.name = "vdig1",
- .min_uV = 1200000,
- .max_uV = 2700000,
+ .vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG1_VSEL_table),
.voltage_table = VDIG1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdig2",
- .min_uV = 1000000,
- .max_uV = 1800000,
+ .vin_name = "vcc6",
.n_voltages = ARRAY_SIZE(VDIG2_VSEL_table),
.voltage_table = VDIG2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vpll",
- .min_uV = 1000000,
- .max_uV = 2500000,
+ .vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VPLL_VSEL_table),
.voltage_table = VPLL_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vdac",
- .min_uV = 1800000,
- .max_uV = 2850000,
+ .vin_name = "vcc5",
.n_voltages = ARRAY_SIZE(VDAC_VSEL_table),
.voltage_table = VDAC_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux1",
- .min_uV = 1800000,
- .max_uV = 2850000,
+ .vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX1_VSEL_table),
.voltage_table = VAUX1_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux2",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc4",
.n_voltages = ARRAY_SIZE(VAUX2_VSEL_table),
.voltage_table = VAUX2_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vaux33",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VAUX33_VSEL_table),
.voltage_table = VAUX33_VSEL_table,
.enable_time_us = 100,
},
{
.name = "vmmc",
- .min_uV = 1800000,
- .max_uV = 3300000,
+ .vin_name = "vcc3",
.n_voltages = ARRAY_SIZE(VMMC_VSEL_table),
.voltage_table = VMMC_VSEL_table,
.enable_time_us = 100,
@@ -194,91 +181,79 @@ static struct tps_info tps65910_regs[] = {
static struct tps_info tps65911_regs[] = {
{
.name = "vrtc",
+ .vin_name = "vcc7",
.enable_time_us = 2200,
},
{
.name = "vio",
- .min_uV = 1500000,
- .max_uV = 3300000,
+ .vin_name = "vccio",
.n_voltages = ARRAY_SIZE(VIO_VSEL_table),
.voltage_table = VIO_VSEL_table,
.enable_time_us = 350,
},
{
.name = "vdd1",
- .min_uV = 600000,
- .max_uV = 4500000,
- .n_voltages = 73,
+ .vin_name = "vcc1",
+ .n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vdd2",
- .min_uV = 600000,
- .max_uV = 4500000,
- .n_voltages = 73,
+ .vin_name = "vcc2",
+ .n_voltages = 0x4C,
.enable_time_us = 350,
},
{
.name = "vddctrl",
- .min_uV = 600000,
- .max_uV = 1400000,
- .n_voltages = 65,
+ .n_voltages = 0x44,
.enable_time_us = 900,
},
{
.name = "ldo1",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc6",
+ .n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo2",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc6",
+ .n_voltages = 0x33,
.enable_time_us = 420,
},
{
.name = "ldo3",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc5",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo4",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 47,
+ .vin_name = "vcc5",
+ .n_voltages = 0x33,
.enable_time_us = 230,
},
{
.name = "ldo5",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc4",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo6",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo7",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
{
.name = "ldo8",
- .min_uV = 1000000,
- .max_uV = 3300000,
- .n_voltages = 24,
+ .vin_name = "vcc3",
+ .n_voltages = 0x1A,
.enable_time_us = 230,
},
};
@@ -321,7 +296,6 @@ struct tps65910_reg {
struct tps65910 *mfd;
struct regulator_dev **rdev;
struct tps_info **info;
- struct mutex mutex;
int num_regulators;
int mode;
int (*get_ctrl_reg)(int);
@@ -329,71 +303,6 @@ struct tps65910_reg {
unsigned int board_ext_control[TPS65910_NUM_REGS];
};
-static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
-{
- unsigned int val;
- int err;
-
- err = tps65910_reg_read(pmic->mfd, reg, &val);
- if (err)
- return err;
-
- return val;
-}
-
-static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
- u8 set_mask, u8 clear_mask)
-{
- int err, data;
-
- mutex_lock(&pmic->mutex);
-
- data = tps65910_read(pmic, reg);
- if (data < 0) {
- dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
- err = data;
- goto out;
- }
-
- data &= ~clear_mask;
- data |= set_mask;
- err = tps65910_reg_write(pmic->mfd, reg, data);
- if (err)
- dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
-
-out:
- mutex_unlock(&pmic->mutex);
- return err;
-}
-
-static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
-{
- int data;
-
- mutex_lock(&pmic->mutex);
-
- data = tps65910_read(pmic, reg);
- if (data < 0)
- dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
-
- mutex_unlock(&pmic->mutex);
- return data;
-}
-
-static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
-{
- int err;
-
- mutex_lock(&pmic->mutex);
-
- err = tps65910_reg_write(pmic->mfd, reg, val);
- if (err < 0)
- dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
-
- mutex_unlock(&pmic->mutex);
- return err;
-}
-
static int tps65910_get_ctrl_register(int id)
{
switch (id) {
@@ -462,13 +371,6 @@ static int tps65911_get_ctrl_register(int id)
}
}
-static int tps65910_enable_time(struct regulator_dev *dev)
-{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- return pmic->info[id]->enable_time_us;
-}
-
static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
@@ -481,8 +383,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_NORMAL:
- return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
- LDO_ST_MODE_BIT);
+ return tps65910_reg_update_bits(pmic->mfd, reg,
+ LDO_ST_MODE_BIT | LDO_ST_ON_BIT,
+ LDO_ST_ON_BIT);
case REGULATOR_MODE_IDLE:
value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
return tps65910_reg_set_bits(mfd, reg, value);
@@ -496,15 +399,15 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
static unsigned int tps65910_get_mode(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int reg, value, id = rdev_get_id(dev);
+ int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- value = tps65910_reg_read_locked(pmic, reg);
- if (value < 0)
- return value;
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
if (!(value & LDO_ST_ON_BIT))
return REGULATOR_MODE_STANDBY;
@@ -517,33 +420,51 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
+ int ret, id = rdev_get_id(dev);
int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
switch (id) {
case TPS65910_REG_VDD1:
- opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
- mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_OP, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1, &mult);
+ if (ret < 0)
+ return ret;
mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD1_SR, &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDD1_OP_CMD_MASK;
opvsel &= VDD1_OP_SEL_MASK;
srvsel &= VDD1_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65910_REG_VDD2:
- opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
- mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_OP, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2, &mult);
+ if (ret < 0)
+ return ret;
mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
- srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65910_VDD2_SR, &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDD2_OP_CMD_MASK;
opvsel &= VDD2_OP_SEL_MASK;
srvsel &= VDD2_SR_SEL_MASK;
vselmax = 75;
break;
case TPS65911_REG_VDDCTRL:
- opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
- srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
+ ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_OP,
+ &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, TPS65911_VDDCTRL_SR,
+ &srvsel);
+ if (ret < 0)
+ return ret;
sr = opvsel & VDDCTRL_OP_CMD_MASK;
opvsel &= VDDCTRL_OP_SEL_MASK;
srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -577,15 +498,15 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
static int tps65910_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int reg, value, id = rdev_get_id(dev);
+ int ret, reg, value, id = rdev_get_id(dev);
reg = pmic->get_ctrl_reg(id);
if (reg < 0)
return reg;
- value = tps65910_reg_read_locked(pmic, reg);
- if (value < 0)
- return value;
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
switch (id) {
case TPS65910_REG_VIO:
@@ -609,18 +530,20 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
{
- return 5 * 1000 * 1000;
+ return dev->desc->volt_table[0];
}
static int tps65911_get_voltage_sel(struct regulator_dev *dev)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev);
- u8 value, reg;
+ int ret, id = rdev_get_id(dev);
+ unsigned int value, reg;
reg = pmic->get_ctrl_reg(id);
- value = tps65910_reg_read_locked(pmic, reg);
+ ret = tps65910_reg_read(pmic->mfd, reg, &value);
+ if (ret < 0)
+ return ret;
switch (id) {
case TPS65911_REG_LDO1:
@@ -662,10 +585,10 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_modify_bits(pmic, TPS65910_VDD1,
- (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
- VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
+ tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD1,
+ VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD1_VGAIN_SEL_SHIFT);
+ tps65910_reg_write(pmic->mfd, TPS65910_VDD1_OP, vsel);
break;
case TPS65910_REG_VDD2:
dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -673,14 +596,14 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
dcdc_mult--;
vsel = (selector % VDD1_2_NUM_VOLT_FINE) + 3;
- tps65910_modify_bits(pmic, TPS65910_VDD2,
- (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
- VDD1_VGAIN_SEL_MASK);
- tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
+ tps65910_reg_update_bits(pmic->mfd, TPS65910_VDD2,
+ VDD1_VGAIN_SEL_MASK,
+ dcdc_mult << VDD2_VGAIN_SEL_SHIFT);
+ tps65910_reg_write(pmic->mfd, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
vsel = selector + 3;
- tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
+ tps65910_reg_write(pmic->mfd, TPS65911_VDDCTRL_OP, vsel);
}
return 0;
@@ -706,8 +629,8 @@ static int tps65910_set_voltage_sel(struct regulator_dev *dev,
case TPS65910_REG_VAUX2:
case TPS65910_REG_VAUX33:
case TPS65910_REG_VMMC:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
}
return -EINVAL;
@@ -727,18 +650,18 @@ static int tps65911_set_voltage_sel(struct regulator_dev *dev,
case TPS65911_REG_LDO1:
case TPS65911_REG_LDO2:
case TPS65911_REG_LDO4:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO1_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65911_REG_LDO3:
case TPS65911_REG_LDO5:
case TPS65911_REG_LDO6:
case TPS65911_REG_LDO7:
case TPS65911_REG_LDO8:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO3_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
case TPS65910_REG_VIO:
- return tps65910_modify_bits(pmic, reg,
- (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+ return tps65910_reg_update_bits(pmic->mfd, reg, LDO_SEL_MASK,
+ selector << LDO_SEL_SHIFT);
}
return -EINVAL;
@@ -768,23 +691,6 @@ static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
return volt * 100 * mult;
}
-static int tps65910_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps65910_reg *pmic = rdev_get_drvdata(dev);
- int id = rdev_get_id(dev), voltage;
-
- if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
- return -EINVAL;
-
- if (selector >= pmic->info[id]->n_voltages)
- return -EINVAL;
- else
- voltage = pmic->info[id]->voltage_table[selector] * 1000;
-
- return voltage;
-}
-
static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
{
struct tps65910_reg *pmic = rdev_get_drvdata(dev);
@@ -816,7 +722,7 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
step_mv = 100;
break;
case TPS65910_REG_VIO:
- return pmic->info[id]->voltage_table[selector] * 1000;
+ return pmic->info[id]->voltage_table[selector];
default:
return -EINVAL;
}
@@ -824,42 +730,16 @@ static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
return (LDO_MIN_VOLT + selector * step_mv) * 1000;
}
-static int tps65910_set_voltage_dcdc_time_sel(struct regulator_dev *dev,
- unsigned int old_selector, unsigned int new_selector)
-{
- int id = rdev_get_id(dev);
- int old_volt, new_volt;
-
- old_volt = tps65910_list_voltage_dcdc(dev, old_selector);
- if (old_volt < 0)
- return old_volt;
-
- new_volt = tps65910_list_voltage_dcdc(dev, new_selector);
- if (new_volt < 0)
- return new_volt;
-
- /* VDD1 and VDD2 are 12.5mV/us, VDDCTRL is 100mV/20us */
- switch (id) {
- case TPS65910_REG_VDD1:
- case TPS65910_REG_VDD2:
- return DIV_ROUND_UP(abs(old_volt - new_volt), 12500);
- case TPS65911_REG_VDDCTRL:
- return DIV_ROUND_UP(abs(old_volt - new_volt), 5000);
- }
- return -EINVAL;
-}
-
/* Regulator ops (except VRTC) */
static struct regulator_ops tps65910_ops_dcdc = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_dcdc_sel,
.set_voltage_sel = tps65910_set_voltage_dcdc_sel,
- .set_voltage_time_sel = tps65910_set_voltage_dcdc_time_sel,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
.list_voltage = tps65910_list_voltage_dcdc,
};
@@ -867,30 +747,27 @@ static struct regulator_ops tps65910_ops_vdd3 = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage = tps65910_get_voltage_vdd3,
- .list_voltage = tps65910_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops tps65910_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65910_get_voltage_sel,
.set_voltage_sel = tps65910_set_voltage_sel,
- .list_voltage = tps65910_list_voltage,
+ .list_voltage = regulator_list_voltage_table,
};
static struct regulator_ops tps65911_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .enable_time = tps65910_enable_time,
.set_mode = tps65910_set_mode,
.get_mode = tps65910_get_mode,
.get_voltage_sel = tps65911_get_voltage_sel,
@@ -996,19 +873,27 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
(tps65910_chip_id(mfd) == TPS65911))) {
int op_reg_add = pmic->get_ctrl_reg(id) + 1;
int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
- int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
- int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
+ int opvsel, srvsel;
+
+ ret = tps65910_reg_read(pmic->mfd, op_reg_add, &opvsel);
+ if (ret < 0)
+ return ret;
+ ret = tps65910_reg_read(pmic->mfd, sr_reg_add, &srvsel);
+ if (ret < 0)
+ return ret;
+
if (opvsel & VDD1_OP_CMD_MASK) {
u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
- ret = tps65910_reg_write_locked(pmic, op_reg_add,
- reg_val);
+
+ ret = tps65910_reg_write(pmic->mfd, op_reg_add,
+ reg_val);
if (ret < 0) {
dev_err(mfd->dev,
"Error in configuring op register\n");
return ret;
}
}
- ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
+ ret = tps65910_reg_write(pmic->mfd, sr_reg_add, 0);
if (ret < 0) {
dev_err(mfd->dev, "Error in settting sr register\n");
return ret;
@@ -1126,6 +1011,7 @@ static struct tps65910_board *tps65910_parse_dt_reg_data(
"ti,regulator-ext-sleep-control", &prop);
if (!ret)
pmic_plat_data->regulator_ext_sleep_control[idx] = prop;
+
}
return pmic_plat_data;
@@ -1136,7 +1022,7 @@ static inline struct tps65910_board *tps65910_parse_dt_reg_data(
struct of_regulator_match **tps65910_reg_matches)
{
*tps65910_reg_matches = NULL;
- return 0;
+ return NULL;
}
#endif
@@ -1168,7 +1054,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
return -ENOMEM;
}
- mutex_init(&pmic->mutex);
pmic->mfd = tps65910;
platform_set_drvdata(pdev, pmic);
@@ -1229,23 +1114,31 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->info[i] = info;
pmic->desc[i].name = info->name;
+ pmic->desc[i].supply_name = info->vin_name;
pmic->desc[i].id = i;
pmic->desc[i].n_voltages = info->n_voltages;
+ pmic->desc[i].enable_time = info->enable_time_us;
if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
pmic->desc[i].ops = &tps65910_ops_dcdc;
pmic->desc[i].n_voltages = VDD1_2_NUM_VOLT_FINE *
VDD1_2_NUM_VOLT_COARSE;
+ pmic->desc[i].ramp_delay = 12500;
} else if (i == TPS65910_REG_VDD3) {
- if (tps65910_chip_id(tps65910) == TPS65910)
+ if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops_vdd3;
- else
+ pmic->desc[i].volt_table = info->voltage_table;
+ } else {
pmic->desc[i].ops = &tps65910_ops_dcdc;
+ pmic->desc[i].ramp_delay = 5000;
+ }
} else {
- if (tps65910_chip_id(tps65910) == TPS65910)
+ if (tps65910_chip_id(tps65910) == TPS65910) {
pmic->desc[i].ops = &tps65910_ops;
- else
+ pmic->desc[i].volt_table = info->voltage_table;
+ } else {
pmic->desc[i].ops = &tps65911_ops;
+ }
}
err = tps65910_set_ext_sleep_config(pmic, i,
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index c7390711d954..242fe90dc565 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -43,9 +43,6 @@ struct twlreg_info {
u8 table_len;
const u16 *table;
- /* regulator specific turn-on delay */
- u16 delay;
-
/* State REMAP default configuration */
u8 remap;
@@ -223,20 +220,6 @@ static int twl6030reg_enable(struct regulator_dev *rdev)
return ret;
}
-static int twl4030reg_enable_time(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return info->delay;
-}
-
-static int twl6030reg_enable_time(struct regulator_dev *rdev)
-{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
-
- return info->delay;
-}
-
static int twl4030reg_disable(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -508,7 +491,6 @@ static struct regulator_ops twl4030ldo_ops = {
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
.is_enabled = twl4030reg_is_enabled,
- .enable_time = twl4030reg_enable_time,
.set_mode = twl4030reg_set_mode,
@@ -577,59 +559,53 @@ static struct regulator_ops twl6030coresmps_ops = {
.get_voltage = twl6030coresmps_get_voltage,
};
-static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
+static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel)
{
- struct twlreg_info *info = rdev_get_drvdata(rdev);
+ struct twlreg_info *info = rdev_get_drvdata(rdev);
- return ((info->min_mV + (index * 100)) * 1000);
+ switch (sel) {
+ case 0:
+ return 0;
+ case 1 ... 24:
+ /* Linear mapping from 00000001 to 00011000:
+ * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001)
+ */
+ return (info->min_mV + 100 * (sel - 1)) * 1000;
+ case 25 ... 30:
+ return -EINVAL;
+ case 31:
+ return 2750000;
+ default:
+ return -EINVAL;
+ }
}
static int
-twl6030ldo_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
- unsigned *selector)
+twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel;
-
- if ((min_uV/1000 < info->min_mV) || (max_uV/1000 > info->max_mV))
- return -EDOM;
-
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- vsel = (min_uV/1000 - 1000)/100 + 1;
- *selector = vsel;
- return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, vsel);
+ return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE,
+ selector);
}
-static int twl6030ldo_get_voltage(struct regulator_dev *rdev)
+static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev)
{
struct twlreg_info *info = rdev_get_drvdata(rdev);
- int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
- VREG_VOLTAGE);
-
- if (vsel < 0)
- return vsel;
+ int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE);
- /*
- * Use the below formula to calculate vsel
- * mV = 1000mv + 100mv * (vsel - 1)
- */
- return (1000 + (100 * (vsel - 1))) * 1000;
+ return vsel;
}
static struct regulator_ops twl6030ldo_ops = {
.list_voltage = twl6030ldo_list_voltage,
- .set_voltage = twl6030ldo_set_voltage,
- .get_voltage = twl6030ldo_get_voltage,
+ .set_voltage_sel = twl6030ldo_set_voltage_sel,
+ .get_voltage_sel = twl6030ldo_get_voltage_sel,
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -663,7 +639,6 @@ static struct regulator_ops twl4030fixed_ops = {
.enable = twl4030reg_enable,
.disable = twl4030reg_disable,
.is_enabled = twl4030reg_is_enabled,
- .enable_time = twl4030reg_enable_time,
.set_mode = twl4030reg_set_mode,
@@ -678,7 +653,6 @@ static struct regulator_ops twl6030fixed_ops = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -689,7 +663,6 @@ static struct regulator_ops twl6030_fixed_resource = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.get_status = twl6030reg_get_status,
};
@@ -886,7 +859,6 @@ static struct regulator_ops twlsmps_ops = {
.enable = twl6030reg_enable,
.disable = twl6030reg_disable,
.is_enabled = twl6030reg_is_enabled,
- .enable_time = twl6030reg_enable_time,
.set_mode = twl6030reg_set_mode,
@@ -909,7 +881,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.id = num, \
.table_len = ARRAY_SIZE(label##_VSEL_table), \
.table = label##_VSEL_table, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -918,6 +889,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.ops = &twl4030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -925,7 +897,6 @@ static struct twlreg_info TWL4030_INFO_##label = { \
static struct twlreg_info TWL4030_INFO_##label = { \
.base = offset, \
.id = num, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -933,6 +904,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \
.ops = &twl4030smps_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -955,7 +927,7 @@ static struct twlreg_info TWL6030_INFO_##label = { \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
- .n_voltages = (max_mVolts - min_mVolts)/100 + 1, \
+ .n_voltages = 32, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -970,7 +942,7 @@ static struct twlreg_info TWL6025_INFO_##label = { \
.desc = { \
.name = #label, \
.id = TWL6025_REG_##label, \
- .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
+ .n_voltages = 32, \
.ops = &twl6030ldo_ops, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
@@ -983,7 +955,6 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \
.base = offset, \
.id = num, \
.min_mV = mVolts, \
- .delay = turnon_delay, \
.remap = remap_conf, \
.desc = { \
.name = #label, \
@@ -992,19 +963,20 @@ static struct twlreg_info TWLFIXED_INFO_##label = { \
.ops = &operations, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
#define TWL6030_FIXED_RESOURCE(label, offset, turnon_delay) \
static struct twlreg_info TWLRES_INFO_##label = { \
.base = offset, \
- .delay = turnon_delay, \
.desc = { \
.name = #label, \
.id = TWL6030_REG_##label, \
.ops = &twl6030_fixed_resource, \
.type = REGULATOR_VOLTAGE, \
.owner = THIS_MODULE, \
+ .enable_time = turnon_delay, \
}, \
}
@@ -1109,7 +1081,6 @@ static u8 twl_get_smps_mult(void)
#define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label)
#define TWL6025_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6025, label)
#define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label)
-#define TWLRES_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLRES, label)
#define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label)
static const struct of_device_id twl_of_match[] __devinitconst = {
@@ -1157,7 +1128,6 @@ static const struct of_device_id twl_of_match[] __devinitconst = {
TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB),
TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8),
TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1),
- TWLRES_OF_MATCH("ti,twl6030-clk32kg", CLK32KG),
TWLSMPS_OF_MATCH("ti,twl6025-smps3", SMPS3),
TWLSMPS_OF_MATCH("ti,twl6025-smps4", SMPS4),
TWLSMPS_OF_MATCH("ti,twl6025-vio", VIO),
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 099da11e989f..7413885be01b 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -215,8 +215,8 @@ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
+static int wm831x_buckv_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
u16 vsel;
@@ -251,20 +251,14 @@ static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state)
return 0;
}
-static int wm831x_buckv_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int wm831x_buckv_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned vsel)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL;
- int vsel, ret;
-
- vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV);
- if (vsel < 0)
- return vsel;
-
- *selector = vsel;
+ int ret;
/* If this value is already set then do a GPIO update if we can */
if (dcdc->dvs_gpio && dcdc->on_vsel == vsel)
@@ -315,7 +309,7 @@ static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev,
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
int vsel;
- vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV);
+ vsel = wm831x_buckv_map_voltage(rdev, uV, uV);
if (vsel < 0)
return vsel;
@@ -373,9 +367,10 @@ static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev)
}
static struct regulator_ops wm831x_buckv_ops = {
- .set_voltage = wm831x_buckv_set_voltage,
+ .set_voltage_sel = wm831x_buckv_set_voltage_sel,
.get_voltage_sel = wm831x_buckv_get_voltage_sel,
.list_voltage = wm831x_buckv_list_voltage,
+ .map_voltage = wm831x_buckv_map_voltage,
.set_suspend_voltage = wm831x_buckv_set_suspend_voltage,
.set_current_limit = wm831x_buckv_set_current_limit,
.get_current_limit = wm831x_buckv_get_current_limit,
@@ -599,60 +594,25 @@ static struct platform_driver wm831x_buckv_driver = {
* BUCKP specifics
*/
-static int wm831x_buckp_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector <= WM831X_BUCKP_MAX_SELECTOR)
- return 850000 + (selector * 25000);
- else
- return -EINVAL;
-}
-
-static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV, int *selector)
+static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
struct wm831x *wm831x = dcdc->wm831x;
- u16 vsel;
-
- if (min_uV <= 34000000)
- vsel = (min_uV - 850000) / 25000;
- else
- return -EINVAL;
-
- if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV)
- return -EINVAL;
-
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_buckp_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
- u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG;
-
- return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
-}
-
-static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev,
- int uV)
-{
- struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev);
u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL;
- unsigned selector;
+ int sel;
+
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, sel);
}
static struct regulator_ops wm831x_buckp_ops = {
- .set_voltage = wm831x_buckp_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = wm831x_buckp_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.set_suspend_voltage = wm831x_buckp_set_suspend_voltage,
.is_enabled = regulator_is_enabled_regmap,
@@ -715,6 +675,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
dcdc->desc.vsel_mask = WM831X_DC3_ON_VSEL_MASK;
dcdc->desc.enable_reg = WM831X_DCDC_ENABLE;
dcdc->desc.enable_mask = 1 << id;
+ dcdc->desc.min_uV = 850000;
+ dcdc->desc.uV_step = 25000;
config.dev = pdev->dev.parent;
if (pdata)
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index a9a28d8ac185..5cb70ca1e98d 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -78,13 +78,10 @@ static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV,
- unsigned *selector)
+static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
+ int volt, vsel;
if (min_uV < 900000)
vsel = 0;
@@ -94,36 +91,25 @@ static int wm831x_gp_ldo_set_voltage_int(struct regulator_dev *rdev, int reg,
vsel = ((min_uV - 1700000) / 100000)
+ WM831X_GP_LDO_SELECTOR_LOW + 1;
- ret = wm831x_gp_ldo_list_voltage(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
+ volt = wm831x_gp_ldo_list_voltage(rdev, vsel);
+ if (volt < min_uV || volt > max_uV)
return -EINVAL;
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_gp_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_ON_CONTROL;
-
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
+ return vsel;
}
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- unsigned int selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- return wm831x_gp_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ sel = wm831x_gp_ldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
+
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO1_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_gp_ldo_get_mode(struct regulator_dev *rdev)
@@ -243,8 +229,9 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
static struct regulator_ops wm831x_gp_ldo_ops = {
.list_voltage = wm831x_gp_ldo_list_voltage,
+ .map_voltage = wm831x_gp_ldo_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_gp_ldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
.get_mode = wm831x_gp_ldo_get_mode,
.set_mode = wm831x_gp_ldo_set_mode,
@@ -384,13 +371,10 @@ static int wm831x_aldo_list_voltage(struct regulator_dev *rdev,
return -EINVAL;
}
-static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
- int min_uV, int max_uV,
- unsigned *selector)
+static int wm831x_aldo_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
+ int volt, vsel;
if (min_uV < 1000000)
vsel = 0;
@@ -400,35 +384,26 @@ static int wm831x_aldo_set_voltage_int(struct regulator_dev *rdev, int reg,
vsel = ((min_uV - 1700000) / 100000)
+ WM831X_ALDO_SELECTOR_LOW + 1;
- ret = wm831x_aldo_list_voltage(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
+ volt = wm831x_aldo_list_voltage(rdev, vsel);
+ if (volt < min_uV || volt > max_uV)
return -EINVAL;
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_aldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_ON_CONTROL;
+ return vsel;
- return wm831x_aldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
}
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- unsigned int selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
+
+ sel = wm831x_aldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_aldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO7_ON_VSEL_MASK, sel);
}
static unsigned int wm831x_aldo_get_mode(struct regulator_dev *rdev)
@@ -506,8 +481,9 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_aldo_ops = {
.list_voltage = wm831x_aldo_list_voltage,
+ .map_voltage = wm831x_aldo_map_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_aldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
.get_mode = wm831x_aldo_get_mode,
.set_mode = wm831x_aldo_set_mode,
@@ -628,47 +604,18 @@ static struct platform_driver wm831x_aldo_driver = {
#define WM831X_ALIVE_LDO_MAX_SELECTOR 0xf
-static int wm831x_alive_ldo_set_voltage_int(struct regulator_dev *rdev,
- int reg,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- struct wm831x *wm831x = ldo->wm831x;
- int vsel, ret;
-
- vsel = (min_uV - 800000) / 50000;
-
- ret = regulator_list_voltage_linear(rdev, vsel);
- if (ret < 0)
- return ret;
- if (ret < min_uV || ret > max_uV)
- return -EINVAL;
-
- *selector = vsel;
-
- return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, vsel);
-}
-
-static int wm831x_alive_ldo_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV,
- unsigned *selector)
-{
- struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL;
-
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, min_uV, max_uV,
- selector);
-}
-
static int wm831x_alive_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
{
struct wm831x_ldo *ldo = rdev_get_drvdata(rdev);
- int reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
- unsigned selector;
+ struct wm831x *wm831x = ldo->wm831x;
+ int sel, reg = ldo->base + WM831X_ALIVE_LDO_SLEEP_CONTROL;
+
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return sel;
- return wm831x_alive_ldo_set_voltage_int(rdev, reg, uV, uV, &selector);
+ return wm831x_set_bits(wm831x, reg, WM831X_LDO11_ON_VSEL_MASK, sel);
}
static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
@@ -690,8 +637,9 @@ static int wm831x_alive_ldo_get_status(struct regulator_dev *rdev)
static struct regulator_ops wm831x_alive_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .set_voltage = wm831x_alive_ldo_set_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_alive_ldo_set_suspend_voltage,
.get_status = wm831x_alive_ldo_get_status,
@@ -753,6 +701,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
ldo->desc.enable_mask = 1 << id;
ldo->desc.min_uV = 800000;
ldo->desc.uV_step = 50000;
+ ldo->desc.enable_time = 1000;
config.dev = pdev->dev.parent;
if (pdata)
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 94e550dc70b6..7f0fa22ef2aa 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -108,33 +108,6 @@ static int get_isink_val(int min_uA, int max_uA, u16 *setting)
return -EINVAL;
}
-static inline int wm8350_ldo_val_to_mvolts(unsigned int val)
-{
- if (val < 16)
- return (val * 50) + 900;
- else
- return ((val - 16) * 100) + 1800;
-
-}
-
-static inline unsigned int wm8350_ldo_mvolts_to_val(int mV)
-{
- if (mV < 1800)
- return (mV - 900) / 50;
- else
- return ((mV - 1800) / 100) + 16;
-}
-
-static inline int wm8350_dcdc_val_to_mvolts(unsigned int val)
-{
- return (val * 25) + 850;
-}
-
-static inline unsigned int wm8350_dcdc_mvolts_to_val(int mV)
-{
- return (mV - 850) / 25;
-}
-
static int wm8350_isink_set_current(struct regulator_dev *rdev, int min_uA,
int max_uA)
{
@@ -359,104 +332,13 @@ int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
}
EXPORT_SYMBOL_GPL(wm8350_isink_set_flash);
-static int wm8350_dcdc_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV, unsigned *selector)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, dcdc = rdev_get_id(rdev), mV,
- min_mV = min_uV / 1000, max_mV = max_uV / 1000;
- u16 val;
-
- if (min_mV < 850 || min_mV > 4025)
- return -EINVAL;
- if (max_mV < 850 || max_mV > 4025)
- return -EINVAL;
-
- /* step size is 25mV */
- mV = (min_mV - 826) / 25;
- if (wm8350_dcdc_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_dcdc_val_to_mvolts(mV) < min_mV);
-
- switch (dcdc) {
- case WM8350_DCDC_1:
- volt_reg = WM8350_DCDC1_CONTROL;
- break;
- case WM8350_DCDC_3:
- volt_reg = WM8350_DCDC3_CONTROL;
- break;
- case WM8350_DCDC_4:
- volt_reg = WM8350_DCDC4_CONTROL;
- break;
- case WM8350_DCDC_6:
- volt_reg = WM8350_DCDC6_CONTROL;
- break;
- case WM8350_DCDC_2:
- case WM8350_DCDC_5:
- default:
- return -EINVAL;
- }
-
- *selector = mV;
-
- /* all DCDCs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg, val | mV);
- return 0;
-}
-
-static int wm8350_dcdc_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, dcdc = rdev_get_id(rdev);
-
- switch (dcdc) {
- case WM8350_DCDC_1:
- volt_reg = WM8350_DCDC1_CONTROL;
- break;
- case WM8350_DCDC_3:
- volt_reg = WM8350_DCDC3_CONTROL;
- break;
- case WM8350_DCDC_4:
- volt_reg = WM8350_DCDC4_CONTROL;
- break;
- case WM8350_DCDC_6:
- volt_reg = WM8350_DCDC6_CONTROL;
- break;
- case WM8350_DCDC_2:
- case WM8350_DCDC_5:
- default:
- return -EINVAL;
- }
-
- /* all DCDCs have same mV bits */
- return wm8350_reg_read(wm8350, volt_reg) & WM8350_DC1_VSEL_MASK;
-}
-
-static int wm8350_dcdc_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector > WM8350_DCDC_MAX_VSEL)
- return -EINVAL;
- return wm8350_dcdc_val_to_mvolts(selector) * 1000;
-}
-
static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, mV = uV / 1000, dcdc = rdev_get_id(rdev);
+ int sel, volt_reg, dcdc = rdev_get_id(rdev);
u16 val;
- dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, mV);
-
- if (mV && (mV < 850 || mV > 4025)) {
- dev_err(wm8350->dev,
- "DCDC%d suspend voltage %d mV out of range\n",
- dcdc, mV);
- return -EINVAL;
- }
- if (mV == 0)
- mV = 850;
+ dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, dcdc, uV / 1000);
switch (dcdc) {
case WM8350_DCDC_1:
@@ -477,10 +359,13 @@ static int wm8350_dcdc_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return -EINVAL;
}
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return -EINVAL;
+
/* all DCDCs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_DC1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg,
- val | wm8350_dcdc_mvolts_to_val(mV));
+ wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
@@ -657,19 +542,49 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
return 0;
}
+static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ if (selector > WM8350_LDO1_VSEL_MASK)
+ return -EINVAL;
+
+ if (selector < 16)
+ return (selector * 50000) + 900000;
+ else
+ return ((selector - 16) * 100000) + 1800000;
+}
+
+static int wm8350_ldo_map_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV)
+{
+ int volt, sel;
+ int min_mV = min_uV / 1000;
+ int max_mV = max_uV / 1000;
+
+ if (min_mV < 900 || min_mV > 3300)
+ return -EINVAL;
+ if (max_mV < 900 || max_mV > 3300)
+ return -EINVAL;
+
+ if (min_mV < 1800) /* step size is 50mV < 1800mV */
+ sel = DIV_ROUND_UP(min_uV - 900, 50);
+ else /* step size is 100mV > 1800mV */
+ sel = DIV_ROUND_UP(min_uV - 1800, 100) + 16;
+
+ volt = wm8350_ldo_list_voltage(rdev, sel);
+ if (volt < min_uV || volt > max_uV)
+ return -EINVAL;
+
+ return sel;
+}
+
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, mV = uV / 1000, ldo = rdev_get_id(rdev);
+ int sel, volt_reg, ldo = rdev_get_id(rdev);
u16 val;
- dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, mV);
-
- if (mV < 900 || mV > 3300) {
- dev_err(wm8350->dev, "LDO%d voltage %d mV out of range\n",
- ldo, mV);
- return -EINVAL;
- }
+ dev_dbg(wm8350->dev, "%s %d mV %d\n", __func__, ldo, uV / 1000);
switch (ldo) {
case WM8350_LDO_1:
@@ -688,10 +603,13 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return -EINVAL;
}
+ sel = wm8350_ldo_map_voltage(rdev, uV, uV);
+ if (sel < 0)
+ return -EINVAL;
+
/* all LDOs have same mV bits */
val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg,
- val | wm8350_ldo_mvolts_to_val(mV));
+ wm8350_reg_write(wm8350, volt_reg, val | sel);
return 0;
}
@@ -753,92 +671,6 @@ static int wm8350_ldo_set_suspend_disable(struct regulator_dev *rdev)
return 0;
}
-static int wm8350_ldo_set_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV, unsigned *selector)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, ldo = rdev_get_id(rdev), mV, min_mV = min_uV / 1000,
- max_mV = max_uV / 1000;
- u16 val;
-
- if (min_mV < 900 || min_mV > 3300)
- return -EINVAL;
- if (max_mV < 900 || max_mV > 3300)
- return -EINVAL;
-
- if (min_mV < 1800) {
- /* step size is 50mV < 1800mV */
- mV = (min_mV - 851) / 50;
- if (wm8350_ldo_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV);
- } else {
- /* step size is 100mV > 1800mV */
- mV = ((min_mV - 1701) / 100) + 16;
- if (wm8350_ldo_val_to_mvolts(mV) > max_mV)
- return -EINVAL;
- BUG_ON(wm8350_ldo_val_to_mvolts(mV) < min_mV);
- }
-
- switch (ldo) {
- case WM8350_LDO_1:
- volt_reg = WM8350_LDO1_CONTROL;
- break;
- case WM8350_LDO_2:
- volt_reg = WM8350_LDO2_CONTROL;
- break;
- case WM8350_LDO_3:
- volt_reg = WM8350_LDO3_CONTROL;
- break;
- case WM8350_LDO_4:
- volt_reg = WM8350_LDO4_CONTROL;
- break;
- default:
- return -EINVAL;
- }
-
- *selector = mV;
-
- /* all LDOs have same mV bits */
- val = wm8350_reg_read(wm8350, volt_reg) & ~WM8350_LDO1_VSEL_MASK;
- wm8350_reg_write(wm8350, volt_reg, val | mV);
- return 0;
-}
-
-static int wm8350_ldo_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int volt_reg, ldo = rdev_get_id(rdev);
-
- switch (ldo) {
- case WM8350_LDO_1:
- volt_reg = WM8350_LDO1_CONTROL;
- break;
- case WM8350_LDO_2:
- volt_reg = WM8350_LDO2_CONTROL;
- break;
- case WM8350_LDO_3:
- volt_reg = WM8350_LDO3_CONTROL;
- break;
- case WM8350_LDO_4:
- volt_reg = WM8350_LDO4_CONTROL;
- break;
- default:
- return -EINVAL;
- }
-
- /* all LDOs have same mV bits */
- return wm8350_reg_read(wm8350, volt_reg) & WM8350_LDO1_VSEL_MASK;
-}
-
-static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector > WM8350_LDO1_VSEL_MASK)
- return -EINVAL;
- return wm8350_ldo_val_to_mvolts(selector) * 1000;
-}
-
int wm8350_dcdc_set_slot(struct wm8350 *wm8350, int dcdc, u16 start,
u16 stop, u16 fault)
{
@@ -959,63 +791,6 @@ int wm8350_dcdc25_set_mode(struct wm8350 *wm8350, int dcdc, u16 mode,
}
EXPORT_SYMBOL_GPL(wm8350_dcdc25_set_mode);
-static int wm8350_dcdc_enable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev);
- u16 shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
-static int wm8350_dcdc_disable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev);
- u16 shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
-
- return 0;
-}
-
-static int wm8350_ldo_enable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev);
- u16 shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- wm8350_set_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
-static int wm8350_ldo_disable(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev);
- u16 shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- wm8350_clear_bits(wm8350, WM8350_DCDC_LDO_REQUESTED, 1 << shift);
- return 0;
-}
-
static int force_continuous_enable(struct wm8350 *wm8350, int dcdc, int enable)
{
int reg = 0, ret;
@@ -1197,42 +972,17 @@ static unsigned int wm8350_dcdc_get_optimum_mode(struct regulator_dev *rdev,
return mode;
}
-static int wm8350_dcdc_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int dcdc = rdev_get_id(rdev), shift;
-
- if (dcdc < WM8350_DCDC_1 || dcdc > WM8350_DCDC_6)
- return -EINVAL;
-
- shift = dcdc - WM8350_DCDC_1;
- return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED)
- & (1 << shift);
-}
-
-static int wm8350_ldo_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- int ldo = rdev_get_id(rdev), shift;
-
- if (ldo < WM8350_LDO_1 || ldo > WM8350_LDO_4)
- return -EINVAL;
-
- shift = (ldo - WM8350_LDO_1) + 8;
- return wm8350_reg_read(wm8350, WM8350_DCDC_LDO_REQUESTED)
- & (1 << shift);
-}
-
static struct regulator_ops wm8350_dcdc_ops = {
- .set_voltage = wm8350_dcdc_set_voltage,
- .get_voltage_sel = wm8350_dcdc_get_voltage_sel,
- .list_voltage = wm8350_dcdc_list_voltage,
- .enable = wm8350_dcdc_enable,
- .disable = wm8350_dcdc_disable,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_dcdc_get_mode,
.set_mode = wm8350_dcdc_set_mode,
.get_optimum_mode = wm8350_dcdc_get_optimum_mode,
- .is_enabled = wm8350_dcdc_is_enabled,
.set_suspend_voltage = wm8350_dcdc_set_suspend_voltage,
.set_suspend_enable = wm8350_dcdc_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc_set_suspend_disable,
@@ -1240,20 +990,21 @@ static struct regulator_ops wm8350_dcdc_ops = {
};
static struct regulator_ops wm8350_dcdc2_5_ops = {
- .enable = wm8350_dcdc_enable,
- .disable = wm8350_dcdc_disable,
- .is_enabled = wm8350_dcdc_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.set_suspend_enable = wm8350_dcdc25_set_suspend_enable,
.set_suspend_disable = wm8350_dcdc25_set_suspend_disable,
};
static struct regulator_ops wm8350_ldo_ops = {
- .set_voltage = wm8350_ldo_set_voltage,
- .get_voltage_sel = wm8350_ldo_get_voltage_sel,
+ .map_voltage = wm8350_ldo_map_voltage,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
.list_voltage = wm8350_ldo_list_voltage,
- .enable = wm8350_ldo_enable,
- .disable = wm8350_ldo_disable,
- .is_enabled = wm8350_ldo_is_enabled,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.get_mode = wm8350_ldo_get_mode,
.set_suspend_voltage = wm8350_ldo_set_suspend_voltage,
.set_suspend_enable = wm8350_ldo_set_suspend_enable,
@@ -1277,6 +1028,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC1_CONTROL,
+ .vsel_mask = WM8350_DC1_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC1_ENA,
.owner = THIS_MODULE,
},
{
@@ -1285,6 +1042,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC2,
.type = REGULATOR_VOLTAGE,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC2_ENA,
.owner = THIS_MODULE,
},
{
@@ -1294,6 +1053,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC3_CONTROL,
+ .vsel_mask = WM8350_DC3_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC3_ENA,
.owner = THIS_MODULE,
},
{
@@ -1303,6 +1068,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC4_CONTROL,
+ .vsel_mask = WM8350_DC4_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC4_ENA,
.owner = THIS_MODULE,
},
{
@@ -1311,6 +1082,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.ops = &wm8350_dcdc2_5_ops,
.irq = WM8350_IRQ_UV_DC5,
.type = REGULATOR_VOLTAGE,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC5_ENA,
.owner = THIS_MODULE,
},
{
@@ -1320,6 +1093,12 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_DC6,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_DCDC_MAX_VSEL + 1,
+ .min_uV = 850000,
+ .uV_step = 25000,
+ .vsel_reg = WM8350_DCDC6_CONTROL,
+ .vsel_mask = WM8350_DC6_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_DC6_ENA,
.owner = THIS_MODULE,
},
{
@@ -1329,6 +1108,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO1_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO1_CONTROL,
+ .vsel_mask = WM8350_LDO1_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO1_ENA,
.owner = THIS_MODULE,
},
{
@@ -1338,6 +1121,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO2_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO2_CONTROL,
+ .vsel_mask = WM8350_LDO2_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO2_ENA,
.owner = THIS_MODULE,
},
{
@@ -1347,6 +1134,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO3_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO3_CONTROL,
+ .vsel_mask = WM8350_LDO3_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO3_ENA,
.owner = THIS_MODULE,
},
{
@@ -1356,6 +1147,10 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO4_VSEL_MASK + 1,
+ .vsel_reg = WM8350_LDO4_CONTROL,
+ .vsel_mask = WM8350_LDO4_VSEL_MASK,
+ .enable_reg = WM8350_DCDC_LDO_REQUESTED,
+ .enable_mask = WM8350_LDO4_ENA,
.owner = THIS_MODULE,
},
{
@@ -1429,6 +1224,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.init_data = pdev->dev.platform_data;
config.driver_data = dev_get_drvdata(&pdev->dev);
+ config.regmap = wm8350->regmap;
/* register regulator */
rdev = regulator_register(&wm8350_reg[pdev->id], &config);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 69a2b7ce5e4a..9035dd053611 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -28,34 +28,26 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
if (selector < 15)
return 900000 + (selector * 50000);
else
- return 1600000 + ((selector - 14) * 100000);
+ return 1700000 + ((selector - 15) * 100000);
}
static int wm8400_ldo_map_voltage(struct regulator_dev *dev,
int min_uV, int max_uV)
{
u16 val;
+ int volt;
if (min_uV < 900000 || min_uV > 3300000)
return -EINVAL;
- if (min_uV < 1700000) {
- /* Steps of 50mV from 900mV; */
+ if (min_uV < 1700000) /* Steps of 50mV from 900mV; */
val = DIV_ROUND_UP(min_uV - 900000, 50000);
+ else /* Steps of 100mV from 1700mV */
+ val = DIV_ROUND_UP(min_uV - 1700000, 100000) + 15;
- if ((val * 50000) + 900000 > max_uV)
- return -EINVAL;
- BUG_ON((val * 50000) + 900000 < min_uV);
- } else {
- /* Steps of 100mV from 1700mV */
- val = DIV_ROUND_UP(min_uV - 1700000, 100000);
-
- if ((val * 100000) + 1700000 > max_uV)
- return -EINVAL;
- BUG_ON((val * 100000) + 1700000 < min_uV);
-
- val += 0xf;
- }
+ volt = wm8400_ldo_list_voltage(dev, val);
+ if (volt < min_uV || volt > max_uV)
+ return -EINVAL;
return val;
}
@@ -152,6 +144,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_mode = wm8400_dcdc_get_mode,
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 9a994316e63c..86bb48db149e 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -26,8 +26,6 @@
#include <linux/mfd/wm8994/pdata.h>
struct wm8994_ldo {
- int enable;
- bool is_enabled;
struct regulator_dev *regulator;
struct wm8994 *wm8994;
};
@@ -35,64 +33,9 @@ struct wm8994_ldo {
#define WM8994_LDO1_MAX_SELECTOR 0x7
#define WM8994_LDO2_MAX_SELECTOR 0x3
-static int wm8994_ldo_enable(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- /* If we have no soft control assume that the LDO is always enabled. */
- if (!ldo->enable)
- return 0;
-
- gpio_set_value_cansleep(ldo->enable, 1);
- ldo->is_enabled = true;
-
- return 0;
-}
-
-static int wm8994_ldo_disable(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- /* If we have no soft control assume that the LDO is always enabled. */
- if (!ldo->enable)
- return -EINVAL;
-
- gpio_set_value_cansleep(ldo->enable, 0);
- ldo->is_enabled = false;
-
- return 0;
-}
-
-static int wm8994_ldo_is_enabled(struct regulator_dev *rdev)
-{
- struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
-
- return ldo->is_enabled;
-}
-
-static int wm8994_ldo_enable_time(struct regulator_dev *rdev)
-{
- /* 3ms is fairly conservative but this shouldn't be too performance
- * critical; can be tweaked per-system if required. */
- return 3000;
-}
-
-static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector > WM8994_LDO1_MAX_SELECTOR)
- return -EINVAL;
-
- return (selector * 100000) + 2400000;
-}
-
static struct regulator_ops wm8994_ldo1_ops = {
- .enable = wm8994_ldo_enable,
- .disable = wm8994_ldo_disable,
- .is_enabled = wm8994_ldo_is_enabled,
- .enable_time = wm8994_ldo_enable_time,
-
- .list_voltage = wm8994_ldo1_list_voltage,
+ .list_voltage = regulator_list_voltage_linear,
+ .map_voltage = regulator_map_voltage_linear,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
};
@@ -124,11 +67,6 @@ static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
}
static struct regulator_ops wm8994_ldo2_ops = {
- .enable = wm8994_ldo_enable,
- .disable = wm8994_ldo_disable,
- .is_enabled = wm8994_ldo_is_enabled,
- .enable_time = wm8994_ldo_enable_time,
-
.list_voltage = wm8994_ldo2_list_voltage,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
@@ -143,6 +81,9 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.vsel_reg = WM8994_LDO_1,
.vsel_mask = WM8994_LDO1_VSEL_MASK,
.ops = &wm8994_ldo1_ops,
+ .min_uV = 2400000,
+ .uV_step = 100000,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
{
@@ -153,6 +94,7 @@ static const struct regulator_desc wm8994_ldo_desc[] = {
.vsel_reg = WM8994_LDO_2,
.vsel_mask = WM8994_LDO2_VSEL_MASK,
.ops = &wm8994_ldo2_ops,
+ .enable_time = 3000,
.owner = THIS_MODULE,
},
};
@@ -176,39 +118,26 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->wm8994 = wm8994;
- if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
- ldo->enable = pdata->ldo[id].enable;
-
- ret = gpio_request_one(ldo->enable, 0, "WM8994 LDO enable");
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
- ret);
- goto err;
- }
- } else
- ldo->is_enabled = true;
-
config.dev = wm8994->dev;
config.driver_data = ldo;
config.regmap = wm8994->regmap;
- if (pdata)
+ if (pdata) {
config.init_data = pdata->ldo[id].init_data;
+ config.ena_gpio = pdata->ldo[id].enable;
+ }
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &config);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
id + 1, ret);
- goto err_gpio;
+ goto err;
}
platform_set_drvdata(pdev, ldo);
return 0;
-err_gpio:
- if (gpio_is_valid(ldo->enable))
- gpio_free(ldo->enable);
err:
return ret;
}
@@ -220,8 +149,6 @@ static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
regulator_unregister(ldo->regulator);
- if (gpio_is_valid(ldo->enable))
- gpio_free(ldo->enable);
return 0;
}
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 24d880e78ec6..f8d818abf98c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -4,9 +4,11 @@ menu "Remoteproc drivers (EXPERIMENTAL)"
config REMOTEPROC
tristate
depends on EXPERIMENTAL
+ select FW_CONFIG
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
+ depends on EXPERIMENTAL
depends on ARCH_OMAP4
depends on OMAP_IOMMU
select REMOTEPROC
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 75506ec2840e..f56c8ba3a861 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -188,6 +188,26 @@ static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
rpdev->id.name);
}
+/**
+ * __ept_release() - deallocate an rpmsg endpoint
+ * @kref: the ept's reference count
+ *
+ * This function deallocates an ept, and is invoked when its @kref refcount
+ * drops to zero.
+ *
+ * Never invoke this function directly!
+ */
+static void __ept_release(struct kref *kref)
+{
+ struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
+ refcount);
+ /*
+ * At this point no one holds a reference to ept anymore,
+ * so we can directly free it
+ */
+ kfree(ept);
+}
+
/* for more info, see below documentation of rpmsg_create_ept() */
static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
@@ -206,6 +226,9 @@ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
return NULL;
}
+ kref_init(&ept->refcount);
+ mutex_init(&ept->cb_lock);
+
ept->rpdev = rpdev;
ept->cb = cb;
ept->priv = priv;
@@ -238,7 +261,7 @@ rem_idr:
idr_remove(&vrp->endpoints, request);
free_ept:
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ kref_put(&ept->refcount, __ept_release);
return NULL;
}
@@ -302,11 +325,17 @@ EXPORT_SYMBOL(rpmsg_create_ept);
static void
__rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
{
+ /* make sure new inbound messages can't find this ept anymore */
mutex_lock(&vrp->endpoints_lock);
idr_remove(&vrp->endpoints, ept->addr);
mutex_unlock(&vrp->endpoints_lock);
- kfree(ept);
+ /* make sure in-flight inbound messages won't invoke cb anymore */
+ mutex_lock(&ept->cb_lock);
+ ept->cb = NULL;
+ mutex_unlock(&ept->cb_lock);
+
+ kref_put(&ept->refcount, __ept_release);
}
/**
@@ -790,12 +819,28 @@ static void rpmsg_recv_done(struct virtqueue *rvq)
/* use the dst addr to fetch the callback of the appropriate user */
mutex_lock(&vrp->endpoints_lock);
+
ept = idr_find(&vrp->endpoints, msg->dst);
+
+ /* let's make sure no one deallocates ept while we use it */
+ if (ept)
+ kref_get(&ept->refcount);
+
mutex_unlock(&vrp->endpoints_lock);
- if (ept && ept->cb)
- ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src);
- else
+ if (ept) {
+ /* make sure ept->cb doesn't go away while we use it */
+ mutex_lock(&ept->cb_lock);
+
+ if (ept->cb)
+ ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
+ msg->src);
+
+ mutex_unlock(&ept->cb_lock);
+
+ /* farewell, ept, we don't need you anymore */
+ kref_put(&ept->refcount, __ept_release);
+ } else
dev_warn(dev, "msg received with no recepient\n");
/* publish the real size of the buffer */
@@ -1040,7 +1085,7 @@ static int __init rpmsg_init(void)
return ret;
}
-module_init(rpmsg_init);
+subsys_initcall(rpmsg_init);
static void __exit rpmsg_fini(void)
{
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 4bcf9ca2818a..370889d0489b 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -17,6 +17,7 @@
#include <linux/mfd/abx500.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/delay.h>
+#include <linux/of.h>
#define AB8500_RTC_SOFF_STAT_REG 0x00
#define AB8500_RTC_CC_CONF_REG 0x01
@@ -422,7 +423,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
}
err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
- IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
+ IRQF_NO_SUSPEND | IRQF_ONESHOT, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -430,7 +431,6 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
-
err = ab8500_sysfs_rtc_register(&pdev->dev);
if (err) {
dev_err(&pdev->dev, "sysfs RTC failed to register\n");
@@ -454,10 +454,16 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ab8500_rtc_match[] = {
+ { .compatible = "stericsson,ab8500-rtc", },
+ {}
+};
+
static struct platform_driver ab8500_rtc_driver = {
.driver = {
.name = "ab8500-rtc",
.owner = THIS_MODULE,
+ .of_match_table = ab8500_rtc_match,
},
.probe = ab8500_rtc_probe,
.remove = __devexit_p(ab8500_rtc_remove),
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index dc474bc6522d..fca9790c7de7 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/completion.h>
+#include <linux/io.h>
#include <asm/uaccess.h>
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 4267789ca995..132333d75408 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -568,6 +568,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
+ pm_wakeup_event(cmos_rtc.dev, 0);
}
spin_unlock(&rtc_lock);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 5e1d64ee5228..e3e50d69baf8 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -202,10 +202,11 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
struct platform_device *pdev = dev_id;
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
+ unsigned long flags;
u32 status;
u32 events = 0;
- spin_lock_irq(&pdata->rtc->irq_lock);
+ spin_lock_irqsave(&pdata->rtc->irq_lock, flags);
status = readw(ioaddr + RTC_RTCISR) & readw(ioaddr + RTC_RTCIENR);
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
@@ -224,7 +225,7 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
events |= (RTC_PF | RTC_IRQF);
rtc_update_irq(pdata->rtc, 1, events);
- spin_unlock_irq(&pdata->rtc->irq_lock);
+ spin_unlock_irqrestore(&pdata->rtc->irq_lock, flags);
return IRQ_HANDLED;
}
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 1f76320e545b..e2785479113c 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -458,12 +458,12 @@ static int __devexit spear_rtc_remove(struct platform_device *pdev)
clk_disable(config->clk);
clk_put(config->clk);
iounmap(config->ioaddr);
- kfree(config);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
release_mem_region(res->start, resource_size(res));
platform_set_drvdata(pdev, NULL);
rtc_device_unregister(config->rtc);
+ kfree(config);
return 0;
}
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 10287865e330..739ef55694f4 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/rtc.h>
#include <linux/slab.h>
+#include <linux/of_device.h>
#include <mach/common.h>
@@ -265,6 +266,12 @@ static int stmp3xxx_rtc_resume(struct platform_device *dev)
#define stmp3xxx_rtc_resume NULL
#endif
+static const struct of_device_id rtc_dt_ids[] = {
+ { .compatible = "fsl,stmp3xxx-rtc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rtc_dt_ids);
+
static struct platform_driver stmp3xxx_rtcdrv = {
.probe = stmp3xxx_rtc_probe,
.remove = stmp3xxx_rtc_remove,
@@ -273,6 +280,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
.driver = {
.name = "stmp3xxx-rtc",
.owner = THIS_MODULE,
+ .of_match_table = rtc_dt_ids,
},
};
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 258abeabf624..c5d06fe83bba 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -510,7 +510,7 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, twl_rtc_interrupt,
- IRQF_TRIGGER_RISING,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index f3509120a507..15370a2c5ff0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
@@ -52,7 +51,7 @@ void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
- " Copyright 2000 IBM Corporation");
+ " Copyright IBM Corp. 2000");
MODULE_SUPPORTED_DEVICE("dasd");
MODULE_LICENSE("GPL");
@@ -82,6 +81,7 @@ static void dasd_profile_exit(struct dasd_profile *);
static wait_queue_head_t dasd_init_waitq;
static wait_queue_head_t dasd_flush_wq;
static wait_queue_head_t generic_waitq;
+static wait_queue_head_t shutdown_waitq;
/*
* Allocate memory for a new device structure.
@@ -1994,6 +1994,8 @@ static void dasd_device_tasklet(struct dasd_device *device)
/* Now check if the head of the ccw queue needs to be started. */
__dasd_device_start_head(device);
spin_unlock_irq(get_ccwdev_lock(device->cdev));
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
dasd_put_device(device);
}
@@ -2632,6 +2634,8 @@ static void dasd_block_tasklet(struct dasd_block *block)
__dasd_block_start_head(block);
spin_unlock(&block->queue_lock);
spin_unlock_irq(&block->request_queue_lock);
+ if (waitqueue_active(&shutdown_waitq))
+ wake_up(&shutdown_waitq);
dasd_put_device(block->base);
}
@@ -3474,6 +3478,32 @@ char *dasd_get_sense(struct irb *irb)
}
EXPORT_SYMBOL_GPL(dasd_get_sense);
+static inline int _wait_for_empty_queues(struct dasd_device *device)
+{
+ if (device->block)
+ return list_empty(&device->ccw_queue) &&
+ list_empty(&device->block->ccw_queue);
+ else
+ return list_empty(&device->ccw_queue);
+}
+
+void dasd_generic_shutdown(struct ccw_device *cdev)
+{
+ struct dasd_device *device;
+
+ device = dasd_device_from_cdev(cdev);
+ if (IS_ERR(device))
+ return;
+
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
+ dasd_schedule_device_bh(device);
+
+ wait_event(shutdown_waitq, _wait_for_empty_queues(device));
+}
+EXPORT_SYMBOL_GPL(dasd_generic_shutdown);
+
static int __init dasd_init(void)
{
int rc;
@@ -3481,6 +3511,7 @@ static int __init dasd_init(void)
init_waitqueue_head(&dasd_init_waitq);
init_waitqueue_head(&dasd_flush_wq);
init_waitqueue_head(&generic_waitq);
+ init_waitqueue_head(&shutdown_waitq);
/* register 'common' DASD debug area, used for all DBF_XXX calls */
dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 0326571e7ffa..f8212d54013a 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1,9 +1,8 @@
/*
- * File...........: linux/drivers/s390/block/dasd_3990_erp.c
* Author(s)......: Horst Hummel <Horst.Hummel@de.ibm.com>
* Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000, 2001
+ * Copyright IBM Corp. 2000, 2001
*
*/
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b3beed5434e4..157defe5e069 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -1,7 +1,7 @@
/*
* PAV alias management for the DASD ECKD discipline
*
- * Copyright IBM Corporation, 2007
+ * Copyright IBM Corp. 2007
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index d71511c7850a..b2b8c18eeced 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_devmap.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999,2001
*
* Device mapping and dasd= parameter parsing functions. All devmap
* functions may not be called from interrupt context. In particular
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 0cea7e98f464..9bd5da36f99e 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -1,10 +1,9 @@
/*
- * File...........: linux/drivers/s390/block/dasd_diag.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_diag.h b/drivers/s390/block/dasd_diag.h
index 4f71fbe60c82..a803cc731586 100644
--- a/drivers/s390/block/dasd_diag.h
+++ b/drivers/s390/block/dasd_diag.h
@@ -1,10 +1,9 @@
/*
- * File...........: linux/drivers/s390/block/dasd_diag.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.h
* ...............: by Hartmunt Penner <hpenner@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bc2e8a7c265b..40a826a7295f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_eckd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
@@ -4247,6 +4246,7 @@ static struct ccw_driver dasd_eckd_driver = {
.set_online = dasd_eckd_set_online,
.notify = dasd_generic_notify,
.path_event = dasd_generic_path_event,
+ .shutdown = dasd_generic_shutdown,
.freeze = dasd_generic_pm_freeze,
.thaw = dasd_generic_restore_device,
.restore = dasd_generic_restore_device,
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 4a688a873a77..2555e494591f 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -1,9 +1,8 @@
/*
- * File...........: linux/drivers/s390/block/dasd_eckd.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Copyright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 16c5208c3dc7..ff901b5509c1 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -1,7 +1,7 @@
/*
* Character device driver for extended error reporting.
*
- * Copyright (C) 2005 IBM Corporation
+ * Copyright IBM Corp. 2005
* extended error reporting for DASD ECKD devices
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 0eafe2e421e7..d01ef82f8757 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
*/
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a62a75358eb9..fb7f3bdc6604 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_fba.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* Copyright IBM Corp. 1999, 2009
diff --git a/drivers/s390/block/dasd_fba.h b/drivers/s390/block/dasd_fba.h
index 14c910baa5fe..b5d3db0e5efb 100644
--- a/drivers/s390/block/dasd_fba.h
+++ b/drivers/s390/block/dasd_fba.h
@@ -1,8 +1,7 @@
/*
- * File...........: linux/drivers/s390/block/dasd_fba.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
+ * Coypright IBM Corp. 1999, 2000
*
*/
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 19a1ff03d65e..f64921756ad6 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_genhd.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
* gendisk related functions for the dasd driver.
*
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index c05da00583f0..7ff93eea673d 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -1,5 +1,4 @@
/*
- * File...........: linux/drivers/s390/block/dasd_int.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
@@ -686,6 +685,7 @@ int dasd_generic_set_offline (struct ccw_device *cdev);
int dasd_generic_notify(struct ccw_device *, int);
int dasd_generic_last_path_gone(struct dasd_device *);
int dasd_generic_path_operational(struct dasd_device *);
+void dasd_generic_shutdown(struct ccw_device *);
void dasd_generic_handle_state_change(struct dasd_device *);
int dasd_generic_pm_freeze(struct ccw_device *);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 792c69e78fe2..cceae70279f6 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_ioctl.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
+ * Copyright IBM Corp. 1999, 2001
*
* i/o controls for the dasd driver.
*/
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index e12989fff4ff..78ac905a5b7f 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -1,11 +1,10 @@
/*
- * File...........: linux/drivers/s390/block/dasd_proc.c
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
* Carsten Otte <Cotte@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
- * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2002
+ * Coypright IBM Corp. 1999, 2002
*
* /proc interface for the dasd driver.
*
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 0e9a309b9669..8de2deb176d7 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
diff --git a/drivers/s390/char/ctrlchar.h b/drivers/s390/char/ctrlchar.h
index 935ffa0ea7c6..1a53552f4981 100644
--- a/drivers/s390/char/ctrlchar.h
+++ b/drivers/s390/char/ctrlchar.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/ctrlchar.c
* Unified handling of special chars.
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Fritz Elfert <felfert@millenux.com> <elfert@de.ibm.com>
*
*/
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 7ef9cfdc17d8..01463b052ae7 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/keyboard.c
* ebcdic keycode functions for s390 console drivers
*
* S390 version
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index f682f4e49680..d0ae2be58191 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/keyboard.h
* ebcdic keycode functions for s390 console drivers
*
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
*/
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 30f29a0020a1..3fcc000efc53 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -654,16 +654,6 @@ sclp_remove_processed(struct sccb_header *sccb)
EXPORT_SYMBOL(sclp_remove_processed);
-struct init_sccb {
- struct sccb_header header;
- u16 _reserved;
- u16 mask_length;
- sccb_mask_t receive_mask;
- sccb_mask_t send_mask;
- sccb_mask_t sclp_receive_mask;
- sccb_mask_t sclp_send_mask;
-} __attribute__((packed));
-
/* Prepare init mask request. Called while sclp_lock is locked. */
static inline void
__sclp_make_init_req(u32 receive_mask, u32 send_mask)
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index 49a1bb52bc87..d7e97ae9ef6d 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -88,6 +88,16 @@ struct sccb_header {
u16 response_code;
} __attribute__((packed));
+struct init_sccb {
+ struct sccb_header header;
+ u16 _reserved;
+ u16 mask_length;
+ sccb_mask_t receive_mask;
+ sccb_mask_t send_mask;
+ sccb_mask_t sclp_receive_mask;
+ sccb_mask_t sclp_send_mask;
+} __attribute__((packed));
+
extern u64 sclp_facilities;
#define SCLP_HAS_CHP_INFO (sclp_facilities & 0x8000000000000000ULL)
#define SCLP_HAS_CHP_RECONFIG (sclp_facilities & 0x2000000000000000ULL)
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 766cb7b19b40..71ea923c322d 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -48,6 +48,7 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __attribute__((packed, aligned(PAGE_SIZE)));
+static struct init_sccb __initdata early_event_mask_sccb __aligned(PAGE_SIZE);
static struct read_info_sccb __initdata early_read_info_sccb;
static int __initdata early_read_info_sccb_valid;
@@ -104,6 +105,19 @@ static void __init sclp_read_info_early(void)
}
}
+static void __init sclp_event_mask_early(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+ int rc;
+
+ do {
+ memset(sccb, 0, sizeof(*sccb));
+ sccb->header.length = sizeof(*sccb);
+ sccb->mask_length = sizeof(sccb_mask_t);
+ rc = sclp_cmd_sync_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
+ } while (rc == -EBUSY);
+}
+
void __init sclp_facilities_detect(void)
{
struct read_info_sccb *sccb;
@@ -119,6 +133,30 @@ void __init sclp_facilities_detect(void)
rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
rzm <<= 20;
+
+ sclp_event_mask_early();
+}
+
+bool __init sclp_has_linemode(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (sccb->sclp_send_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK))
+ return 1;
+ return 0;
+}
+
+bool __init sclp_has_vt220(void)
+{
+ struct init_sccb *sccb = &early_event_mask_sccb;
+
+ if (sccb->header.response_code != 0x20)
+ return 0;
+ if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
+ return 1;
+ return 0;
}
unsigned long long sclp_get_rnmax(void)
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 3c03c1060be6..444d36183a25 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/char/sclp_config.c
- *
* Copyright IBM Corp. 2007
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_cpi.c b/drivers/s390/char/sclp_cpi.c
index 5716487b8c9d..d70d8c20229c 100644
--- a/drivers/s390/char/sclp_cpi.c
+++ b/drivers/s390/char/sclp_cpi.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi.c
* SCLP control programm identification
*
* Copyright IBM Corp. 2001, 2007
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index bd1b9c919051..2acea809e2ac 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi_sys.c
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2001, 2007
diff --git a/drivers/s390/char/sclp_cpi_sys.h b/drivers/s390/char/sclp_cpi_sys.h
index deef3e6ff496..65bb6a99c97f 100644
--- a/drivers/s390/char/sclp_cpi_sys.h
+++ b/drivers/s390/char/sclp_cpi_sys.h
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_cpi_sys.h
* SCLP control program identification sysfs interface
*
* Copyright IBM Corp. 2007
diff --git a/drivers/s390/char/sclp_ocf.c b/drivers/s390/char/sclp_ocf.c
index ab294d5a534e..2553db0fdb52 100644
--- a/drivers/s390/char/sclp_ocf.c
+++ b/drivers/s390/char/sclp_ocf.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/sclp_ocf.c
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
diff --git a/drivers/s390/char/sclp_quiesce.c b/drivers/s390/char/sclp_quiesce.c
index 69df137310bc..475e470d9768 100644
--- a/drivers/s390/char/sclp_quiesce.c
+++ b/drivers/s390/char/sclp_quiesce.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/sclp_quiesce.c
* signal quiesce handler
*
- * (C) Copyright IBM Corp. 1999,2004
+ * Copyright IBM Corp. 1999, 2004
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 50f7115990ff..6a6f76bf6e3d 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -1,7 +1,7 @@
/*
* Sclp "store data in absolut storage"
*
- * Copyright IBM Corp. 2003,2007
+ * Copyright IBM Corp. 2003, 2007
* Author(s): Michael Holzheu
*/
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c
index e66a75b3822c..0792c85baafe 100644
--- a/drivers/s390/char/sclp_tty.c
+++ b/drivers/s390/char/sclp_tty.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/sclp_tty.c
* SCLP line mode terminal driver.
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/drivers/s390/char/sclp_tty.h b/drivers/s390/char/sclp_tty.h
index 4b965b22fecd..c8773421c31f 100644
--- a/drivers/s390/char/sclp_tty.h
+++ b/drivers/s390/char/sclp_tty.h
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/sclp_tty.h
* interface to the SCLP-read/write driver
*
* S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 1999
* Author(s): Martin Peschke <mpeschke@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index bc6c7cfd36b6..c06be6cc2fc3 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape.h
* tape device driver for 3480/3490E/3590 tapes.
*
* S390 and zSeries version
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index b28de80b7ca4..6ae929c024ae 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_34xx.c
* tape device discipline for 3480/3490 tapes.
*
* Copyright IBM Corp. 2001, 2009
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index a5c6614b0db2..1b0eb49f739c 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_3590.c
* tape device discipline for 3590 tapes.
*
* Copyright IBM Corp. 2001, 2009
diff --git a/drivers/s390/char/tape_3590.h b/drivers/s390/char/tape_3590.h
index 4534055f1376..36b759e89d22 100644
--- a/drivers/s390/char/tape_3590.h
+++ b/drivers/s390/char/tape_3590.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/tape_3590.h
* tape device discipline for 3590 tapes.
*
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Stefan Bader <shbader@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c
index 46886a7578c6..2d61db3fc62a 100644
--- a/drivers/s390/char/tape_char.c
+++ b/drivers/s390/char/tape_char.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape_char.c
* character device frontend for tape device driver
*
* S390 and zSeries version
- * Copyright IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 55343df61edd..54b3c79203f5 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -1,6 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2004
- * tape_class.c
+ * Copyright IBM Corp. 2004
*
* Tape class device support
*
@@ -17,7 +16,7 @@
MODULE_AUTHOR("Stefan Bader <shbader@de.ibm.com>");
MODULE_DESCRIPTION(
- "(C) Copyright IBM Corp. 2004 All Rights Reserved.\n"
+ "Copyright IBM Corp. 2004 All Rights Reserved.\n"
"tape_class.c"
);
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index ba2092f741d5..a332c10d50ad 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -1,6 +1,5 @@
/*
- * (C) Copyright IBM Corp. 2004 All Rights Reserved.
- * tape_class.h
+ * Copyright IBM Corp. 2004 All Rights Reserved.
*
* Tape class device support
*
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 585618663ba4..f3b5123faf08 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/tape_core.c
* basic function of the tape device driver
*
* S390 and zSeries version
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c
index 0ceb37984f77..8733b232a116 100644
--- a/drivers/s390/char/tape_proc.c
+++ b/drivers/s390/char/tape_proc.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape.c
* tape device driver for S/390 and zSeries tapes.
*
* S390 and zSeries version
- * Copyright (C) 2001 IBM Corporation
+ * Copyright IBM Corp. 2001
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c
index e7650170274a..981a99fd8d42 100644
--- a/drivers/s390/char/tape_std.c
+++ b/drivers/s390/char/tape_std.c
@@ -1,9 +1,8 @@
/*
- * drivers/s390/char/tape_std.c
* standard tape device functions for ibm tapes.
*
* S390 and zSeries version
- * Copyright (C) 2001,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2001, 2002
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Michael Holzheu <holzheu@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
diff --git a/drivers/s390/char/tape_std.h b/drivers/s390/char/tape_std.h
index 1fc952359341..c5816ad9ed7d 100644
--- a/drivers/s390/char/tape_std.h
+++ b/drivers/s390/char/tape_std.h
@@ -1,8 +1,7 @@
/*
- * drivers/s390/char/tape_std.h
* standard tape device functions for ibm tapes.
*
- * Copyright (C) IBM Corp. 2001,2006
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Tuan Ngo-Anh <ngoanh@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 10ec690197cb..1928f3458d10 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -1,11 +1,10 @@
/*
- * drivers/s390/char/tty3270.c
* IBM/3270 Driver - tty functions.
*
* Author(s):
* Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
* Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * -- Copyright IBM Corp. 2003
*/
#include <linux/module.h>
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
index 799da57f0390..11141a8f8974 100644
--- a/drivers/s390/char/tty3270.h
+++ b/drivers/s390/char/tty3270.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/char/tty3270.h
- *
* Copyright IBM Corp. 2007
*
*/
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 89c03e6b1c0c..0fdedadff7bc 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -1,5 +1,5 @@
/*
- * Copyright IBM Corp. 2004,2010
+ * Copyright IBM Corp. 2004, 2010
* Interface implementation for communication with the z/VM control program
*
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
diff --git a/drivers/s390/char/vmcp.h b/drivers/s390/char/vmcp.h
index 6a993948e188..1e29b0418382 100644
--- a/drivers/s390/char/vmcp.h
+++ b/drivers/s390/char/vmcp.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2004, 2005 IBM Corporation
+ * Copyright IBM Corp. 2004, 2005
* Interface implementation for communication with the z/VM control program
* Version 1.0
* Author(s): Christian Borntraeger <cborntra@de.ibm.com>
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 524d988d89dd..c131bc40f962 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/char/vmlogrdr.c
* character device driver for reading z/VM system service records
*
*
@@ -656,10 +655,19 @@ static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
len = strlen(buf);
return len;
}
-
-
static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
NULL);
+static struct attribute *vmlogrdr_drv_attrs[] = {
+ &driver_attr_recording_status.attr,
+ NULL,
+};
+static struct attribute_group vmlogrdr_drv_attr_group = {
+ .attrs = vmlogrdr_drv_attrs,
+};
+static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
+ &vmlogrdr_drv_attr_group,
+ NULL,
+};
static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_autopurge.attr,
@@ -668,6 +676,13 @@ static struct attribute *vmlogrdr_attrs[] = {
&dev_attr_recording.attr,
NULL,
};
+static struct attribute_group vmlogrdr_attr_group = {
+ .attrs = vmlogrdr_attrs,
+};
+static const struct attribute_group *vmlogrdr_attr_groups[] = {
+ &vmlogrdr_attr_group,
+ NULL,
+};
static int vmlogrdr_pm_prepare(struct device *dev)
{
@@ -692,18 +707,14 @@ static const struct dev_pm_ops vmlogrdr_pm_ops = {
.prepare = vmlogrdr_pm_prepare,
};
-static struct attribute_group vmlogrdr_attr_group = {
- .attrs = vmlogrdr_attrs,
-};
-
static struct class *vmlogrdr_class;
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
.pm = &vmlogrdr_pm_ops,
+ .groups = vmlogrdr_drv_attr_groups,
};
-
static int vmlogrdr_register_driver(void)
{
int ret;
@@ -717,21 +728,14 @@ static int vmlogrdr_register_driver(void)
if (ret)
goto out_iucv;
- ret = driver_create_file(&vmlogrdr_driver,
- &driver_attr_recording_status);
- if (ret)
- goto out_driver;
-
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
ret = PTR_ERR(vmlogrdr_class);
vmlogrdr_class = NULL;
- goto out_attr;
+ goto out_driver;
}
return 0;
-out_attr:
- driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
out_driver:
driver_unregister(&vmlogrdr_driver);
out_iucv:
@@ -745,7 +749,6 @@ static void vmlogrdr_unregister_driver(void)
{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
- driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
@@ -762,6 +765,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus;
dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver;
+ dev->groups = vmlogrdr_attr_groups;
dev_set_drvdata(dev, priv);
/*
* The release function could be called after the
@@ -779,11 +783,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
return ret;
}
- ret = sysfs_create_group(&dev->kobj, &vmlogrdr_attr_group);
- if (ret) {
- device_unregister(dev);
- return ret;
- }
priv->class_device = device_create(vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
@@ -791,7 +790,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
if (IS_ERR(priv->class_device)) {
ret = PTR_ERR(priv->class_device);
priv->class_device=NULL;
- sysfs_remove_group(&dev->kobj, &vmlogrdr_attr_group);
device_unregister(dev);
return ret;
}
@@ -804,7 +802,6 @@ static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
- sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
device_unregister(priv->device);
priv->device=NULL;
}
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 2211277a1079..e9b72311e254 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -1,7 +1,7 @@
/*
* Watchdog implementation based on z/VM Watchdog Timer API
*
- * Copyright IBM Corp. 2004,2009
+ * Copyright IBM Corp. 2004, 2009
*
* The user space watchdog daemon can use this driver as
* /dev/vmwatchdog to have z/VM execute the specified CP
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index 3303d66b2794..e3b9308b0fe3 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -5,7 +5,7 @@
*
* For more information please refer to Documentation/s390/zfcpdump.txt
*
- * Copyright IBM Corp. 2003,2008
+ * Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
*/
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 65d2e769dfa1..bc10220f6847 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/airq.c
* Support for adapter interruptions
*
- * Copyright IBM Corp. 1999,2007
+ * Copyright IBM Corp. 1999, 2007
* Author(s): Ingo Adlung <adlung@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Arnd Bergmann <arndb@de.ibm.com>
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 08c66035dd19..2d2a966a3b39 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -1,9 +1,7 @@
/*
- * drivers/s390/cio/blacklist.c
* S/390 common I/O routines -- blacklisting of specific devices
*
- * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 1999, 2002
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index e792436c9270..50ad5fdd815d 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.c
- *
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -362,10 +360,13 @@ static struct attribute *chp_attrs[] = {
&dev_attr_shared.attr,
NULL,
};
-
static struct attribute_group chp_attr_group = {
.attrs = chp_attrs,
};
+static const struct attribute_group *chp_attr_groups[] = {
+ &chp_attr_group,
+ NULL,
+};
static void chp_release(struct device *dev)
{
@@ -397,6 +398,7 @@ int chp_new(struct chp_id chpid)
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
+ chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
@@ -426,16 +428,10 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
- ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
- if (ret) {
- device_unregister(&chp->dev);
- goto out;
- }
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
- sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
goto out;
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 12b4903d6fe3..e1399dbee834 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/chp.h
- *
- * Copyright IBM Corp. 2007,2010
+ * Copyright IBM Corp. 2007, 2010
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a84631a7391d..cfe0c087fe5c 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/chsc.c
* S/390 common I/O routines -- channel subsystem call
*
- * Copyright IBM Corp. 1999,2010
+ * Copyright IBM Corp. 1999, 2010
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a6ddaed8793d..33d1ef703593 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/cio.c
* S/390 common I/O routines -- low level i/o calls
*
- * Copyright IBM Corp. 1999,2008
+ * Copyright IBM Corp. 1999, 2008
* Author(s): Ingo Adlung (adlung@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Arnd Bergmann (arndb@de.ibm.com)
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 204ca728e7fd..c9fc61c0a866 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/cmf.c
- *
* Linux on zSeries Channel Measurement Facility support
*
- * Copyright 2000,2006 IBM Corporation
+ * Copyright IBM Corp. 2000, 2006
*
* Authors: Arnd Bergmann <arndb@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -1341,7 +1339,7 @@ module_init(init_cmf);
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("channel measurement facility base driver\n"
- "Copyright 2003 IBM Corporation\n");
+ "Copyright IBM Corp. 2003\n");
EXPORT_SYMBOL_GPL(enable_cmf);
EXPORT_SYMBOL_GPL(disable_cmf);
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d0a2dff43fb4..0f8a25f98b10 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -1,7 +1,7 @@
/*
* Channel report handling code
*
- * Copyright IBM Corp. 2000,2009
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Ingo Adlung <adlung@de.ibm.com>,
* Martin Schwidefsky <schwidefsky@de.ibm.com>,
* Cornelia Huck <cornelia.huck@de.ibm.com>,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index f8f952d52045..ed25c8740a9c 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device.c
* bus driver for ccw devices
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 1b853513c891..1bb1d00095af 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -1,8 +1,7 @@
/*
- * drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
- * Copyright IBM Corp. 2002,2008
+ * Copyright IBM Corp. 2002, 2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index 78a0b43862c5..d4fa30541a33 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -1,7 +1,7 @@
/*
* CCW device SENSE ID I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 07a4fd29f096..368368fe04b2 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -1,7 +1,7 @@
/*
* CCW device PGID and path verification I/O handling.
*
- * Copyright IBM Corp. 2002,2009
+ * Copyright IBM Corp. 2002, 2009
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 66d8066ef22a..15b56a15db15 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -1,8 +1,5 @@
/*
- * drivers/s390/cio/device_status.c
- *
- * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
- * IBM Corporation
+ * Copyright IBM Corp. 2002
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c
index 4d10981c7cc1..e6d5f8c49524 100644
--- a/drivers/s390/cio/idset.c
+++ b/drivers/s390/cio/idset.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/idset.c
- *
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h
index 7543da4529f9..3d943f03591e 100644
--- a/drivers/s390/cio/idset.h
+++ b/drivers/s390/cio/idset.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/idset.h
- *
* Copyright IBM Corp. 2007
* Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
*/
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b962ffbc0803..5132554d7917 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/qdio.h
- *
- * Copyright 2000,2009 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
*/
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 29021f4e96b6..e6e0d31c02ac 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -1,7 +1,5 @@
/*
- * drivers/s390/cio/qdio_debug.c
- *
- * Copyright IBM Corp. 2008,2009
+ * Copyright IBM Corp. 2008, 2009
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
*/
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 5d70bd162ae9..e1f646800ddb 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/cio/qdio_debug.h
- *
* Copyright IBM Corp. 2008
*
* Author: Jan Glauber (jang@linux.vnet.ibm.com)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 7493efafa0d5..e06fa03ea1e4 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/cio/qdio_main.c
- *
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
- * Copyright 2000,2008 IBM Corp.
+ * Copyright IBM Corp. 2000, 2008
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
* 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index ecf12f0aca7b..6c973db14983 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -1,9 +1,7 @@
/*
- * driver/s390/cio/qdio_setup.c
- *
* qdio queue initialization
*
- * Copyright (C) IBM Corp. 2008
+ * Copyright IBM Corp. 2008
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 011eadea3ee4..2e060088fa87 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/cio/thinint_qdio.c
- *
- * Copyright 2000,2009 IBM Corp.
+ * Copyright IBM Corp. 2000, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
* Cornelia Huck <cornelia.huck@de.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b987d4619586..ae258a4b4e5e 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/crypto/ap_bus.c
- *
- * Copyright (C) 2006 IBM Corporation
+ * Copyright IBM Corp. 2006
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -70,7 +68,7 @@ static int ap_select_domain(void);
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
- "Copyright 2006 IBM Corporation");
+ "Copyright IBM Corp. 2006");
MODULE_LICENSE("GPL");
/*
@@ -338,6 +336,12 @@ static int ap_queue_enable_interruption(ap_qid_t qid, void *ind)
break;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
+ if (i < AP_MAX_RESET - 1) {
+ udelay(5);
+ status = ap_queue_interruption_control(qid,
+ ind);
+ continue;
+ }
break;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 726fc65809d8..52d61995af88 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,7 +1,5 @@
/*
- * linux/drivers/s390/crypto/ap_bus.h
- *
- * Copyright (C) 2006 IBM Corporation
+ * Copyright IBM Corp. 2006
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 88523208d47d..2f94132246a1 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_api.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -47,7 +45,7 @@
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor interface, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static DEFINE_SPINLOCK(zcrypt_device_lock);
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 9688f3985b07..7a32c4bc8ef9 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_api.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index ed82f2f59b17..1f42f103c761 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cca_key.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 46812440425a..744c668f586c 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cex2a.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -66,7 +64,7 @@ static struct ap_device_id zcrypt_cex2a_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("CEX2A Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_cex2a_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 0350665810cf..0dce4b9af184 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_cex2a.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 03ba27f05f92..0965e2626d18 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_error.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c
index ad7951c21b79..f2b71d8df01f 100644
--- a/drivers/s390/crypto/zcrypt_pcica.c
+++ b/drivers/s390/crypto/zcrypt_pcica.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcica.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -56,7 +54,7 @@ static struct ap_device_id zcrypt_pcica_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcica_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCICA Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcica_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcica.h b/drivers/s390/crypto/zcrypt_pcica.h
index 3be11187f6df..9a59155cad51 100644
--- a/drivers/s390/crypto/zcrypt_pcica.h
+++ b/drivers/s390/crypto/zcrypt_pcica.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcica.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c
index e5dd335fda53..0d90a4334055 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.c
+++ b/drivers/s390/crypto/zcrypt_pcicc.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcicc.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -68,7 +66,7 @@ static struct ap_device_id zcrypt_pcicc_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcicc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCICC Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcicc_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcicc.h b/drivers/s390/crypto/zcrypt_pcicc.h
index 6d4454846c8f..7fe27e15075b 100644
--- a/drivers/s390/crypto/zcrypt_pcicc.h
+++ b/drivers/s390/crypto/zcrypt_pcicc.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcicc.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index f7cc43401816..ccb4f8b60c75 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcixcc.c
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -78,7 +76,7 @@ static struct ap_device_id zcrypt_pcixcc_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_pcixcc_ids);
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("PCIXCC Cryptographic Coprocessor device driver, "
- "Copyright 2001, 2006 IBM Corporation");
+ "Copyright IBM Corp. 2001, 2006");
MODULE_LICENSE("GPL");
static int zcrypt_pcixcc_probe(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.h b/drivers/s390/crypto/zcrypt_pcixcc.h
index 8cb7d7a6973b..c7cdf599e46b 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.h
+++ b/drivers/s390/crypto/zcrypt_pcixcc.h
@@ -1,9 +1,7 @@
/*
- * linux/drivers/s390/crypto/zcrypt_pcixcc.h
- *
* zcrypt 2.1.0
*
- * Copyright (C) 2001, 2006 IBM Corporation
+ * Copyright IBM Corp. 2001, 2006
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index d74e9ae6dfb3..47cccd52aae8 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -1,5 +1,5 @@
/*
- * kvm_virtio.c - virtio for kvm on s390
+ * virtio for kvm on s390
*
* Copyright IBM Corp. 2008
*
@@ -25,6 +25,7 @@
#include <asm/io.h>
#include <asm/kvm_para.h>
#include <asm/kvm_virtio.h>
+#include <asm/sclp.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -468,7 +469,7 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
static int __init s390_virtio_console_init(void)
{
- if (!MACHINE_IS_KVM)
+ if (sclp_has_vt220() || sclp_has_linemode())
return -ENODEV;
return virtio_cons_early_init(early_put_chars);
}
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 6b1ff90d2f00..a0a4afe537d0 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1,5 +1,4 @@
/*
- * drivers/s390/net/claw.c
* ESCON CLAW network driver
*
* Linux for zSeries version
@@ -3380,5 +3379,5 @@ module_exit(claw_cleanup);
MODULE_AUTHOR("Andy Richter <richtera@us.ibm.com>");
MODULE_DESCRIPTION("Linux for System z CLAW Driver\n" \
- "Copyright 2000,2008 IBM Corporation\n");
+ "Copyright IBM Corp. 2000, 2008\n");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c
index d962fd741a23..6514e1cb3f1c 100644
--- a/drivers/s390/net/ctcm_dbug.c
+++ b/drivers/s390/net/ctcm_dbug.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_dbug.c
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h
index 26966d0b9abd..47bf0501995e 100644
--- a/drivers/s390/net/ctcm_dbug.h
+++ b/drivers/s390/net/ctcm_dbug.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_dbug.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index a69766900a17..d4ade9e92fbb 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_fsms.c
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 046d077fabbb..c963d04799c0 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_fsms.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index 3cd25544a27a..5227e5734a9d 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_main.c
- *
* Copyright IBM Corp. 2001, 2009
* Author(s):
* Original CTC driver(s):
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index b9056a55d995..477c933685f3 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_main.h
- *
* Copyright IBM Corp. 2001, 2007
* Authors: Fritz Elfert (felfert@millenux.com)
* Peter Tiedemann (ptiedem@de.ibm.com)
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index ac7975b7a837..05b734a2b5b7 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_mpc.c
- *
* Copyright IBM Corp. 2004, 2007
* Authors: Belinda Thompson (belindat@us.ibm.com)
* Andy Richter (richtera@us.ibm.com)
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 1fa07b0c11c0..bd1b1cc54ffa 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_mpc.h
- *
* Copyright IBM Corp. 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 0c27ae726475..985b5dcbdac8 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/ctcm_sysfs.c
- *
* Copyright IBM Corp. 2007, 2007
* Authors: Peter Tiedemann (ptiedem@de.ibm.com)
*
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 06e8f31ff3dc..fa7adad6f9ba 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core.h
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e118e1e1e1c1..7a8b09612c41 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c
index 7fab6544def6..5cebfddb86bd 100644
--- a/drivers/s390/net/qeth_core_mpc.c
+++ b/drivers/s390/net/qeth_core_mpc.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_mpc.c
- *
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index a11b30c38423..3690bbf2cb3c 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_mpc.h
- *
* Copyright IBM Corp. 2007
* Author(s): Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index f163af575c48..9655dc0ea0ec 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_core_sys.c
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 426986518e96..2db409330c21 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l2_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
@@ -647,7 +645,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
} else {
- random_ether_addr(card->dev->dev_addr);
+ eth_random_addr(card->dev->dev_addr);
memcpy(card->dev->dev_addr, vendor_pre, 3);
}
return 0;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index e367315a63f0..29c1c00e3a0f 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3.h
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7be5e9775691..0cf706699a04 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3_main.c
- *
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
@@ -1473,7 +1471,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr,
cmd->data.create_destroy_addr.unique_id, ETH_ALEN);
else
- random_ether_addr(card->dev->dev_addr);
+ eth_random_addr(card->dev->dev_addr);
return 0;
}
@@ -2700,10 +2698,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup_skb(dst, skb);
if (n) {
cast_type = n->type;
rcu_read_unlock();
+ neigh_release(n);
if ((cast_type == RTN_BROADCAST) ||
(cast_type == RTN_MULTICAST) ||
(cast_type == RTN_ANYCAST))
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 4cafedf950ad..ebc379486267 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -1,6 +1,4 @@
/*
- * drivers/s390/net/qeth_l3_sys.c
- *
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h
index 149a1151608d..45bc925928ca 100644
--- a/drivers/s390/net/smsgiucv.h
+++ b/drivers/s390/net/smsgiucv.h
@@ -1,7 +1,7 @@
/*
* IUCV special message driver
*
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2003
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 086018109662..aff8621de806 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -3,7 +3,7 @@
*
* Module interface and handling of zfcp data structures.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
/*
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 96f13ad88123..e37f04551948 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -3,7 +3,7 @@
*
* Registration and callback for the s390 common I/O layer.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fab2c2592a97..fbd8b4db6025 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -5,7 +5,7 @@
* Access Control Lists / Control File Data Channel;
* handling of response code and states for ports and LUNs.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a9a816e4aa55..3c1d22097ad0 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index ed5d921e82cd..2955e1a3deaf 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -3,7 +3,7 @@
*
* Global definitions for the zfcp device driver.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_DEF_H
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index e1b4f800e226..92d3df6ac8ba 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -3,7 +3,7 @@
*
* Error Recovery Procedures (ERP).
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2302e1cfb76c..36f422770ff5 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef ZFCP_EXT_H
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 297e6b71ce9c..88688a80b2c1 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -3,7 +3,7 @@
*
* Fibre Channel related functions for the zfcp device driver.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 4561f3bf7300..b1d2024ed513 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -4,7 +4,7 @@
* Fibre Channel related definitions and inline functions for the zfcp
* device driver
*
- * Copyright IBM Corporation 2009
+ * Copyright IBM Corp. 2009
*/
#ifndef ZFCP_FC_H
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e9a787e2e6a5..e1c1efc2c5a0 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -3,7 +3,7 @@
*
* Implementation of FSF commands.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index db8c85382dca..5e795b86931b 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,7 +3,7 @@
*
* Interface to the FSF support functions.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#ifndef FSF_H
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e14da5751d32..b9fffc8d94a7 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
*
* Setup and helper functions to access QDIO.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 8ac7f5342d29..497cd379b0d1 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -3,7 +3,7 @@
*
* Header file for zfcp qdio interface
*
- * Copyright IBM Corporation 2010
+ * Copyright IBM Corp. 2010
*/
#ifndef ZFCP_QDIO_H
diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h
index a72d1b730aba..7c2c6194dfca 100644
--- a/drivers/s390/scsi/zfcp_reqlist.h
+++ b/drivers/s390/scsi/zfcp_reqlist.h
@@ -4,7 +4,7 @@
* Data structure and helper functions for tracking pending FSF
* requests.
*
- * Copyright IBM Corporation 2009
+ * Copyright IBM Corp. 2009
*/
#ifndef ZFCP_REQLIST_H
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index b79576b64f45..7b31e3f403f9 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corporation 2002, 2010
+ * Copyright IBM Corp. 2002, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index cdc4ff78a7ba..c66af27b230b 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -3,7 +3,7 @@
*
* sysfs attributes.
*
- * Copyright IBM Corporation 2008, 2010
+ * Copyright IBM Corp. 2008, 2010
*/
#define KMSG_COMPONENT "zfcp"
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 20796ebc33ce..3f2bff0d3aa2 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -4,7 +4,7 @@
* Tracking of manually configured LUNs and helper functions to
* register the LUNs with the SCSI midlayer.
*
- * Copyright IBM Corporation 2010
+ * Copyright IBM Corp. 2010
*/
#include "zfcp_def.h"
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index e9559782d3ec..74bf1aa7af46 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -263,23 +263,6 @@ config SCSI_SCAN_ASYNC
You can override this choice by specifying "scsi_mod.scan=sync"
or async on the kernel's command line.
-config SCSI_WAIT_SCAN
- tristate # No prompt here, this is an invisible symbol.
- default m
- depends on SCSI
- depends on MODULES
-# scsi_wait_scan is a loadable module which waits until all the async scans are
-# complete. The idea is to use it in initrd/ initramfs scripts. You modprobe
-# it after all the modprobes of the root SCSI drivers and it will wait until
-# they have all finished scanning their buses before allowing the boot to
-# proceed. (This method is not applicable if targets boot independently in
-# parallel with the initiator, or with transports with non-deterministic target
-# discovery schemes, or if a transport driver does not support scsi_wait_scan.)
-#
-# This symbol is not exposed as a prompt because little is to be gained by
-# disabling it, whereas people who accidentally switch it off may wonder why
-# their mkinitrd gets into trouble.
-
menu "SCSI Transports"
depends on SCSI
@@ -461,7 +444,7 @@ config SCSI_ACARD
config SCSI_AHA152X
tristate "Adaptec AHA152X/2825 support"
- depends on ISA && SCSI && !64BIT
+ depends on ISA && SCSI
select SCSI_SPI_ATTRS
select CHECK_SIGNATURE
---help---
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 1a3368b08615..888f73a4aae1 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -159,8 +159,6 @@ obj-$(CONFIG_SCSI_OSD_INITIATOR) += osd/
# This goes last, so that "real" scsi devices probe earlier
obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o
-obj-$(CONFIG_SCSI_WAIT_SCAN) += scsi_wait_scan.o
-
scsi_mod-y += scsi.o hosts.o scsi_ioctl.o constants.o \
scsicam.o scsi_error.o scsi_lib.o
scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 52551662d107..d79457ac8bef 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -135,6 +135,8 @@ struct inquiry_data {
static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
+static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
@@ -152,10 +154,14 @@ int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
+int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
" 0=off, 1=on");
+module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
+ " 0=off, 1=on");
module_param(nondasd, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
" 0=off, 1=on");
@@ -963,25 +969,44 @@ static void io_callback(void *context, struct fib * fibptr);
static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
{
- u16 fibsize;
- struct aac_raw_io *readcmd;
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+
aac_fib_init(fib);
- readcmd = (struct aac_raw_io *) fib_data(fib);
- readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
- readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- readcmd->count = cpu_to_le32(count<<9);
- readcmd->cid = cpu_to_le16(scmd_id(cmd));
- readcmd->flags = cpu_to_le16(IO_TYPE_READ);
- readcmd->bpTotal = 0;
- readcmd->bpComplete = 0;
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *readcmd2;
+ readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(readcmd2, 0, sizeof(struct aac_raw_io2));
+ readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd2->byteCount = cpu_to_le32(count<<9);
+ readcmd2->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
+ aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *readcmd;
+ readcmd = (struct aac_raw_io *) fib_data(fib);
+ readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ readcmd->count = cpu_to_le32(count<<9);
+ readcmd->cid = cpu_to_le16(scmd_id(cmd));
+ readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
+ readcmd->bpTotal = 0;
+ readcmd->bpComplete = 0;
+ aac_build_sgraw(cmd, &readcmd->sg);
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+ }
- aac_build_sgraw(cmd, &readcmd->sg);
- fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(readcmd->sg.count) - 1) * sizeof (struct sgentryraw));
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
- return aac_fib_send(ContainerRawIo,
+ return aac_fib_send(command,
fib,
fibsize,
FsaNormal,
@@ -1052,28 +1077,50 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
{
- u16 fibsize;
- struct aac_raw_io *writecmd;
+ struct aac_dev *dev = fib->dev;
+ u16 fibsize, command;
+
aac_fib_init(fib);
- writecmd = (struct aac_raw_io *) fib_data(fib);
- writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
- writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
- writecmd->count = cpu_to_le32(count<<9);
- writecmd->cid = cpu_to_le16(scmd_id(cmd));
- writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
- (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
- cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
- cpu_to_le16(IO_TYPE_WRITE);
- writecmd->bpTotal = 0;
- writecmd->bpComplete = 0;
-
- aac_build_sgraw(cmd, &writecmd->sg);
- fibsize = sizeof(struct aac_raw_io) + ((le32_to_cpu(writecmd->sg.count) - 1) * sizeof (struct sgentryraw));
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
+ struct aac_raw_io2 *writecmd2;
+ writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
+ memset(writecmd2, 0, sizeof(struct aac_raw_io2));
+ writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd2->byteCount = cpu_to_le32(count<<9);
+ writecmd2->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
+ cpu_to_le16(RIO2_IO_TYPE_WRITE);
+ aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
+ command = ContainerRawIo2;
+ fibsize = sizeof(struct aac_raw_io2) +
+ ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+ } else {
+ struct aac_raw_io *writecmd;
+ writecmd = (struct aac_raw_io *) fib_data(fib);
+ writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+ writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+ writecmd->count = cpu_to_le32(count<<9);
+ writecmd->cid = cpu_to_le16(scmd_id(cmd));
+ writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
+ (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+ cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
+ cpu_to_le16(RIO_TYPE_WRITE);
+ writecmd->bpTotal = 0;
+ writecmd->bpComplete = 0;
+ aac_build_sgraw(cmd, &writecmd->sg);
+ command = ContainerRawIo;
+ fibsize = sizeof(struct aac_raw_io) +
+ ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+ }
+
BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
/*
* Now send the Fib to the adapter
*/
- return aac_fib_send(ContainerRawIo,
+ return aac_fib_send(command,
fib,
fibsize,
FsaNormal,
@@ -1492,8 +1539,6 @@ int aac_get_adapter_info(struct aac_dev* dev)
dev->a_ops.adapter_write = aac_write_block;
}
dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
- if (dev->adapter_info.options & AAC_OPT_NEW_COMM_TYPE1)
- dev->adapter_info.options |= AAC_OPT_NEW_COMM;
if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
/*
* Worst case size that could cause sg overflow when
@@ -2616,12 +2661,18 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
- /*
- * Calculate resid for sg
- */
- scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
- - le32_to_cpu(srbreply->data_xfer_length));
+ if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+ /* fast response */
+ srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
+ srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
+ } else {
+ /*
+ * Calculate resid for sg
+ */
+ scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+ - le32_to_cpu(srbreply->data_xfer_length));
+ }
scsi_dma_unmap(scsicmd);
@@ -2954,6 +3005,118 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
return byte_count;
}
+static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
+{
+ unsigned long byte_count = 0;
+ int nseg;
+
+ nseg = scsi_dma_map(scsicmd);
+ BUG_ON(nseg < 0);
+ if (nseg) {
+ struct scatterlist *sg;
+ int i, conformable = 0;
+ u32 min_size = PAGE_SIZE, cur_size;
+
+ scsi_for_each_sg(scsicmd, sg, nseg, i) {
+ int count = sg_dma_len(sg);
+ u64 addr = sg_dma_address(sg);
+
+ BUG_ON(i >= sg_max);
+ rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+ rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+ cur_size = cpu_to_le32(count);
+ rio2->sge[i].length = cur_size;
+ rio2->sge[i].flags = 0;
+ if (i == 0) {
+ conformable = 1;
+ rio2->sgeFirstSize = cur_size;
+ } else if (i == 1) {
+ rio2->sgeNominalSize = cur_size;
+ min_size = cur_size;
+ } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+ conformable = 0;
+ if (cur_size < min_size)
+ min_size = cur_size;
+ }
+ byte_count += count;
+ }
+
+ /* hba wants the size to be exact */
+ if (byte_count > scsi_bufflen(scsicmd)) {
+ u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+ (byte_count - scsi_bufflen(scsicmd));
+ rio2->sge[i-1].length = cpu_to_le32(temp);
+ byte_count = scsi_bufflen(scsicmd);
+ }
+
+ rio2->sgeCnt = cpu_to_le32(nseg);
+ rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+ /* not conformable: evaluate required sg elements */
+ if (!conformable) {
+ int j, nseg_new = nseg, err_found;
+ for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+ err_found = 0;
+ nseg_new = 2;
+ for (j = 1; j < nseg - 1; ++j) {
+ if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+ err_found = 1;
+ break;
+ }
+ nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
+ }
+ if (!err_found)
+ break;
+ }
+ if (i > 0 && nseg_new <= sg_max)
+ aac_convert_sgraw2(rio2, i, nseg, nseg_new);
+ } else
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+ /* Check for command underflow */
+ if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+ printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+ byte_count, scsicmd->underflow);
+ }
+ }
+
+ return byte_count;
+}
+
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
+{
+ struct sge_ieee1212 *sge;
+ int i, j, pos;
+ u32 addr_low;
+
+ if (aac_convert_sgl == 0)
+ return 0;
+
+ sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC);
+ if (sge == NULL)
+ return -1;
+
+ for (i = 1, pos = 1; i < nseg-1; ++i) {
+ for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
+ addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
+ sge[pos].addrLow = addr_low;
+ sge[pos].addrHigh = rio2->sge[i].addrHigh;
+ if (addr_low < rio2->sge[i].addrLow)
+ sge[pos].addrHigh++;
+ sge[pos].length = pages * PAGE_SIZE;
+ sge[pos].flags = 0;
+ pos++;
+ }
+ }
+ sge[pos] = rio2->sge[nseg-1];
+ memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
+
+ kfree(sge);
+ rio2->sgeCnt = cpu_to_le32(nseg_new);
+ rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+ rio2->sgeNominalSize = pages * PAGE_SIZE;
+ return 0;
+}
+
#ifdef AAC_DETAILED_STATUS_INFO
struct aac_srb_status_info {
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 3fcf62724fad..9e933a88a8bc 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,7 +12,7 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 28900
+# define AAC_DRIVER_BUILD 29800
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
@@ -100,6 +100,13 @@ struct user_sgentryraw {
u32 flags; /* reserved for F/W use */
};
+struct sge_ieee1212 {
+ u32 addrLow;
+ u32 addrHigh;
+ u32 length;
+ u32 flags;
+};
+
/*
* SGMAP
*
@@ -270,6 +277,8 @@ enum aac_queue_types {
*/
#define FIB_MAGIC 0x0001
+#define FIB_MAGIC2 0x0004
+#define FIB_MAGIC2_64 0x0005
/*
* Define the priority levels the FSA communication routines support.
@@ -296,22 +305,20 @@ struct aac_fibhdr {
__le32 XferState; /* Current transfer state for this CCB */
__le16 Command; /* Routing information for the destination */
u8 StructType; /* Type FIB */
- u8 Flags; /* Flags for FIB */
+ u8 Unused; /* Unused */
__le16 Size; /* Size of this FIB in bytes */
__le16 SenderSize; /* Size of the FIB in the sender
(for response sizing) */
__le32 SenderFibAddress; /* Host defined data in the FIB */
- __le32 ReceiverFibAddress;/* Logical address of this FIB for
- the adapter */
- u32 SenderData; /* Place holder for the sender to store data */
union {
- struct {
- __le32 _ReceiverTimeStart; /* Timestamp for
- receipt of fib */
- __le32 _ReceiverTimeDone; /* Timestamp for
- completion of fib */
- } _s;
- } _u;
+ __le32 ReceiverFibAddress;/* Logical address of this FIB for
+ the adapter (old) */
+ __le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
+ __le32 TimeStamp; /* otherwise timestamp for FW internal use */
+ } u;
+ u32 Handle; /* FIB handle used for MSGU commnunication */
+ u32 Previous; /* FW internal use */
+ u32 Next; /* FW internal use */
};
struct hw_fib {
@@ -361,6 +368,7 @@ struct hw_fib {
#define ContainerCommand 500
#define ContainerCommand64 501
#define ContainerRawIo 502
+#define ContainerRawIo2 503
/*
* Scsi Port commands (scsi passthrough)
*/
@@ -417,6 +425,7 @@ enum fib_xfer_state {
#define ADAPTER_INIT_STRUCT_REVISION 3
#define ADAPTER_INIT_STRUCT_REVISION_4 4 // rocket science
#define ADAPTER_INIT_STRUCT_REVISION_6 6 /* PMC src */
+#define ADAPTER_INIT_STRUCT_REVISION_7 7 /* Denali */
struct aac_init
{
@@ -441,7 +450,9 @@ struct aac_init
#define INITFLAGS_NEW_COMM_SUPPORTED 0x00000001
#define INITFLAGS_DRIVER_USES_UTC_TIME 0x00000010
#define INITFLAGS_DRIVER_SUPPORTS_PM 0x00000020
-#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000041
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED 0x00000040
+#define INITFLAGS_FAST_JBOD_SUPPORTED 0x00000080
+#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED 0x00000100
__le32 MaxIoCommands; /* max outstanding commands */
__le32 MaxIoSize; /* largest I/O command */
__le32 MaxFibSize; /* largest FIB to adapter */
@@ -1052,10 +1063,11 @@ struct aac_dev
struct adapter_ops a_ops;
unsigned long fsrev; /* Main driver's revision number */
- unsigned long dbg_base; /* address of UART
+ resource_size_t base_start; /* main IO base */
+ resource_size_t dbg_base; /* address of UART
* debug buffer */
- unsigned base_size, dbg_size; /* Size of
+ resource_size_t base_size, dbg_size; /* Size of
* mapped in region */
struct aac_init *init; /* Holds initialization info to communicate with adapter */
@@ -1123,6 +1135,7 @@ struct aac_dev
# define AAC_COMM_PRODUCER 0
# define AAC_COMM_MESSAGE 1
# define AAC_COMM_MESSAGE_TYPE1 3
+# define AAC_COMM_MESSAGE_TYPE2 4
u8 raw_io_interface;
u8 raw_io_64;
u8 printf_enabled;
@@ -1181,6 +1194,7 @@ struct aac_dev
#define FIB_CONTEXT_FLAG_TIMED_OUT (0x00000001)
#define FIB_CONTEXT_FLAG (0x00000002)
#define FIB_CONTEXT_FLAG_WAIT (0x00000004)
+#define FIB_CONTEXT_FLAG_FASTRESP (0x00000008)
/*
* Define the command values
@@ -1287,6 +1301,22 @@ struct aac_dev
#define CMDATA_SYNCH 4
#define CMUNSTABLE 5
+#define RIO_TYPE_WRITE 0x0000
+#define RIO_TYPE_READ 0x0001
+#define RIO_SUREWRITE 0x0008
+
+#define RIO2_IO_TYPE 0x0003
+#define RIO2_IO_TYPE_WRITE 0x0000
+#define RIO2_IO_TYPE_READ 0x0001
+#define RIO2_IO_TYPE_VERIFY 0x0002
+#define RIO2_IO_ERROR 0x0004
+#define RIO2_IO_SUREWRITE 0x0008
+#define RIO2_SGL_CONFORMANT 0x0010
+#define RIO2_SG_FORMAT 0xF000
+#define RIO2_SG_FORMAT_ARC 0x0000
+#define RIO2_SG_FORMAT_SRL 0x1000
+#define RIO2_SG_FORMAT_IEEE1212 0x2000
+
struct aac_read
{
__le32 command;
@@ -1331,9 +1361,6 @@ struct aac_write64
__le32 block;
__le16 pad;
__le16 flags;
-#define IO_TYPE_WRITE 0x00000000
-#define IO_TYPE_READ 0x00000001
-#define IO_SUREWRITE 0x00000008
struct sgmap64 sg; // Must be last in struct because it is variable
};
struct aac_write_reply
@@ -1354,6 +1381,22 @@ struct aac_raw_io
struct sgmapraw sg;
};
+struct aac_raw_io2 {
+ __le32 blockLow;
+ __le32 blockHigh;
+ __le32 byteCount;
+ __le16 cid;
+ __le16 flags; /* RIO2 flags */
+ __le32 sgeFirstSize; /* size of first sge el. */
+ __le32 sgeNominalSize; /* size of 2nd sge el. (if conformant) */
+ u8 sgeCnt; /* only 8 bits required */
+ u8 bpTotal; /* reserved for F/W use */
+ u8 bpComplete; /* reserved for F/W use */
+ u8 sgeFirstIndex; /* reserved for F/W use */
+ u8 unused[4];
+ struct sge_ieee1212 sge[1];
+};
+
#define CT_FLUSH_CACHE 129
struct aac_synchronize {
__le32 command; /* VM_ContainerConfig */
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 0bd38da4ada0..1ef041bc60c8 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -498,6 +498,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
return -ENOMEM;
}
aac_fib_init(srbfib);
+ /* raw_srb FIB is not FastResponseCapable */
+ srbfib->hw_fib_va->header.XferState &= ~cpu_to_le32(FastResponseCapable);
srbcmd = (struct aac_srb*) fib_data(srbfib);
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index a35f54ebdce0..8e5d3be16127 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -58,7 +58,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
dma_addr_t phys;
unsigned long aac_max_hostphysmempages;
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1)
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2)
host_rrq_size = (dev->scsi_host_ptr->can_queue
+ AAC_NUM_MGT_FIB) * sizeof(u32);
size = fibsize + sizeof(struct aac_init) + commsize +
@@ -75,7 +76,8 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
dev->comm_phys = phys;
dev->comm_size = size;
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
dev->host_rrq = (u32 *)(base + fibsize);
dev->host_rrq_pa = phys + fibsize;
memset(dev->host_rrq, 0, host_rrq_size);
@@ -115,26 +117,32 @@ static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long co
else
init->HostPhysMemPages = cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
- init->InitFlags = 0;
+ init->InitFlags = cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+ INITFLAGS_DRIVER_SUPPORTS_PM);
+ init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+ init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+ init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
+ init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
+
if (dev->comm_interface == AAC_COMM_MESSAGE) {
init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
dprintk((KERN_WARNING"aacraid: New Comm Interface enabled\n"));
} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
- init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_TYPE1_SUPPORTED);
- dprintk((KERN_WARNING
- "aacraid: New Comm Interface type1 enabled\n"));
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type1 enabled\n"));
+ } else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ init->InitStructRevision = cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+ init->InitFlags |= cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+ INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | INITFLAGS_FAST_JBOD_SUPPORTED);
+ init->HostRRQ_AddrHigh = cpu_to_le32((u32)((u64)dev->host_rrq_pa >> 32));
+ init->HostRRQ_AddrLow = cpu_to_le32((u32)(dev->host_rrq_pa & 0xffffffff));
+ init->MiniPortRevision = cpu_to_le32(0L); /* number of MSI-X */
+ dprintk((KERN_WARNING"aacraid: New Comm Interface type2 enabled\n"));
}
- init->InitFlags |= cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
- INITFLAGS_DRIVER_SUPPORTS_PM);
- init->MaxIoCommands = cpu_to_le32(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
- init->MaxIoSize = cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
- init->MaxFibSize = cpu_to_le32(dev->max_fib_size);
-
- init->MaxNumAif = cpu_to_le32(dev->max_num_aif);
- init->HostRRQ_AddrHigh = (u32)((u64)dev->host_rrq_pa >> 32);
- init->HostRRQ_AddrLow = (u32)(dev->host_rrq_pa & 0xffffffff);
-
/*
* Increment the base address by the amount already used
@@ -354,13 +362,15 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE1))) {
/* driver supports TYPE1 (Tupelo) */
dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+ } else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
+ /* driver supports TYPE2 (Denali) */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
} else if ((status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE4)) ||
- (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3)) ||
- (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE2))) {
- /* driver doesn't support TYPE2 (Series7), TYPE3 and TYPE4 */
- /* switch to sync. mode */
- dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
- dev->sync_mode = 1;
+ (status[1] & le32_to_cpu(AAC_OPT_NEW_COMM_TYPE3))) {
+ /* driver doesn't TYPE3 and TYPE4 */
+ /* switch to sync. mode */
+ dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+ dev->sync_mode = 1;
}
}
if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4b32ca442433..1be0776a80c4 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -136,6 +136,7 @@ int aac_fib_setup(struct aac_dev * dev)
i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
i++, fibptr++)
{
+ fibptr->flags = 0;
fibptr->dev = dev;
fibptr->hw_fib_va = hw_fib;
fibptr->data = (void *) fibptr->hw_fib_va->data;
@@ -240,11 +241,11 @@ void aac_fib_init(struct fib *fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
+ memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
hw_fib->header.StructType = FIB_MAGIC;
hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
- hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
- hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+ hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
}
@@ -259,7 +260,6 @@ void aac_fib_init(struct fib *fibptr)
static void fib_dealloc(struct fib * fibptr)
{
struct hw_fib *hw_fib = fibptr->hw_fib_va;
- BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
hw_fib->header.XferState = 0;
}
@@ -370,7 +370,7 @@ int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw
entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
entry->addr = hw_fib->header.SenderFibAddress;
/* Restore adapters pointer to the FIB */
- hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
+ hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress; /* Let the adapter now where to find its data */
map = 0;
}
/*
@@ -450,7 +450,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
*/
hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
- hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
+ hw_fib->header.Handle = (u32)(fibptr - dev->fibs) + 1;
/*
* Set FIB state to indicate where it came from and if we want a
* response from the adapter. Also load the command from the
@@ -460,7 +460,6 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
*/
hw_fib->header.Command = cpu_to_le16(command);
hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
- fibptr->hw_fib_va->header.Flags = 0; /* 0 the flags field - internal only*/
/*
* Set the size of the Fib we want to send to the adapter
*/
@@ -564,10 +563,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
* functioning because an interrupt routing or other
* hardware failure has occurred.
*/
- unsigned long count = 36000000L; /* 3 minutes */
+ unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
while (down_trylock(&fibptr->event_wait)) {
int blink;
- if (--count == 0) {
+ if (time_is_before_eq_jiffies(timeout)) {
struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
spin_lock_irqsave(q->lock, qflags);
q->numpending--;
@@ -588,7 +587,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
return -EFAULT;
}
- udelay(5);
+ /* We used to udelay() here but that absorbed
+ * a CPU when a timeout occured. Not very
+ * useful. */
+ cpu_relax();
}
} else if (down_interruptible(&fibptr->event_wait)) {
/* Do nothing ... satisfy
@@ -708,7 +710,8 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
unsigned long nointr = 0;
unsigned long qflags;
- if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+ dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
kfree(hw_fib);
return 0;
}
@@ -721,7 +724,9 @@ int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
/*
* If we plan to do anything check the structure type first.
*/
- if (hw_fib->header.StructType != FIB_MAGIC) {
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64) {
if (dev->comm_interface == AAC_COMM_MESSAGE)
kfree(hw_fib);
return -EINVAL;
@@ -783,7 +788,9 @@ int aac_fib_complete(struct fib *fibptr)
* If we plan to do anything check the structure type first.
*/
- if (hw_fib->header.StructType != FIB_MAGIC)
+ if (hw_fib->header.StructType != FIB_MAGIC &&
+ hw_fib->header.StructType != FIB_MAGIC2 &&
+ hw_fib->header.StructType != FIB_MAGIC2_64)
return -EINVAL;
/*
* This block completes a cdb which orginated on the host and we
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
index f0c66a80ad13..d81b2810f0f7 100644
--- a/drivers/scsi/aacraid/dpcsup.c
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -101,6 +101,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
@@ -121,7 +122,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
- fib->flags = 0;
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
@@ -367,6 +368,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
*/
*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+ fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
}
FIB_COUNTER_INCREMENT(aac_config.FibRecved);
@@ -387,7 +389,7 @@ unsigned int aac_intr_normal(struct aac_dev *dev, u32 index,
* NOTE: we cannot touch the fib after this
* call, because it may have been deallocated.
*/
- fib->flags = 0;
+ fib->flags &= FIB_CONTEXT_FLAG_FASTRESP;
fib->callback(fib->callback_data, fib);
} else {
unsigned long flagv;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 0d279c445a30..7199534cd07d 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1089,8 +1089,17 @@ static struct scsi_host_template aac_driver_template = {
static void __aac_shutdown(struct aac_dev * aac)
{
- if (aac->aif_thread)
+ if (aac->aif_thread) {
+ int i;
+ /* Clear out events first */
+ for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
+ struct fib *fib = &aac->fibs[i];
+ if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+ (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
+ up(&fib->event_wait);
+ }
kthread_stop(aac->thread);
+ }
aac_send_shutdown(aac);
aac_adapter_disable_int(aac);
free_irq(aac->pdev->irq, aac);
@@ -1145,11 +1154,11 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
goto out_disable_pdev;
shost->irq = pdev->irq;
- shost->base = pci_resource_start(pdev, 0);
shost->unique_id = unique_id;
shost->max_cmd_len = 16;
aac = (struct aac_dev *)shost->hostdata;
+ aac->base_start = pci_resource_start(pdev, 0);
aac->scsi_host_ptr = shost;
aac->pdev = pdev;
aac->name = aac_driver_template.name;
@@ -1157,7 +1166,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
aac->cardtype = index;
INIT_LIST_HEAD(&aac->entry);
- aac->fibs = kmalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
+ aac->fibs = kzalloc(sizeof(struct fib) * (shost->can_queue + AAC_NUM_MGT_FIB), GFP_KERNEL);
if (!aac->fibs)
goto out_free_host;
spin_lock_init(&aac->fib_lock);
@@ -1191,6 +1200,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev,
if (IS_ERR(aac->thread)) {
printk(KERN_ERR "aacraid: Unable to create command thread.\n");
error = PTR_ERR(aac->thread);
+ aac->thread = NULL;
goto out_deinit;
}
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
index f397d21a0c06..6c53b1d8b2ba 100644
--- a/drivers/scsi/aacraid/nark.c
+++ b/drivers/scsi/aacraid/nark.c
@@ -49,14 +49,14 @@ static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
dev->base = NULL;
return 0;
}
- dev->scsi_host_ptr->base = pci_resource_start(dev->pdev, 2);
+ dev->base_start = pci_resource_start(dev->pdev, 2);
dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
((u64)pci_resource_start(dev->pdev, 1) << 32),
sizeof(struct rx_registers) - sizeof(struct rx_inbound));
dev->base = NULL;
if (dev->regs.rx == NULL)
return -1;
- dev->base = ioremap(dev->scsi_host_ptr->base, size);
+ dev->base = ioremap(dev->base_start, size);
if (dev->base == NULL) {
iounmap(dev->regs.rx);
dev->regs.rx = NULL;
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
index be44de92429a..7d8013feedde 100644
--- a/drivers/scsi/aacraid/rkt.c
+++ b/drivers/scsi/aacraid/rkt.c
@@ -79,7 +79,7 @@ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
iounmap(dev->regs.rkt);
return 0;
}
- dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size);
+ dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
if (dev->base == NULL)
return -1;
dev->IndexRegs = &dev->regs.rkt->IndexRegs;
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
index b029c7cc785b..dada38aeacc0 100644
--- a/drivers/scsi/aacraid/rx.c
+++ b/drivers/scsi/aacraid/rx.c
@@ -471,7 +471,7 @@ static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
iounmap(dev->regs.rx);
return 0;
}
- dev->base = dev->regs.rx = ioremap(dev->scsi_host_ptr->base, size);
+ dev->base = dev->regs.rx = ioremap(dev->base_start, size);
if (dev->base == NULL)
return -1;
dev->IndexRegs = &dev->regs.rx->IndexRegs;
@@ -653,7 +653,7 @@ int _aac_rx_init(struct aac_dev *dev)
name, instance);
goto error_iounmap;
}
- dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
index beb533630d4b..2244f315f33b 100644
--- a/drivers/scsi/aacraid/sa.c
+++ b/drivers/scsi/aacraid/sa.c
@@ -305,7 +305,7 @@ static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
iounmap(dev->regs.sa);
return 0;
}
- dev->base = dev->regs.sa = ioremap(dev->scsi_host_ptr->base, size);
+ dev->base = dev->regs.sa = ioremap(dev->base_start, size);
return (dev->base == NULL) ? -1 : 0;
}
@@ -393,7 +393,7 @@ int aac_sa_init(struct aac_dev *dev)
name, instance);
goto error_iounmap;
}
- dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 762820636304..3b021ec63255 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -56,25 +56,14 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
if (bellbits & PmDoorBellResponseSent) {
bellbits = PmDoorBellResponseSent;
/* handle async. status */
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
our_interrupt = 1;
index = dev->host_rrq_idx;
- if (dev->host_rrq[index] == 0) {
- u32 old_index = index;
- /* adjust index */
- do {
- index++;
- if (index == dev->scsi_host_ptr->can_queue +
- AAC_NUM_MGT_FIB)
- index = 0;
- if (dev->host_rrq[index] != 0)
- break;
- } while (index != old_index);
- dev->host_rrq_idx = index;
- }
for (;;) {
isFastResponse = 0;
/* remove toggle bit (31) */
- handle = (dev->host_rrq[index] & 0x7fffffff);
+ handle = le32_to_cpu(dev->host_rrq[index]) & 0x7fffffff;
/* check fast response bit (30) */
if (handle & 0x40000000)
isFastResponse = 1;
@@ -93,6 +82,8 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
} else {
bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
if (bellbits_shifted & DoorBellAifPending) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
our_interrupt = 1;
/* handle AIF */
aac_intr_normal(dev, 0, 2, 0, NULL);
@@ -100,6 +91,13 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
unsigned long sflags;
struct list_head *entry;
int send_it = 0;
+ extern int aac_sync_mode;
+
+ if (!aac_sync_mode) {
+ src_writel(dev, MUnit.ODR_C, bellbits);
+ src_readl(dev, MUnit.ODR_C);
+ our_interrupt = 1;
+ }
if (dev->sync_fib) {
our_interrupt = 1;
@@ -132,7 +130,6 @@ static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
}
if (our_interrupt) {
- src_writel(dev, MUnit.ODR_C, bellbits);
return IRQ_HANDLED;
}
return IRQ_NONE;
@@ -336,6 +333,9 @@ static void aac_src_start_adapter(struct aac_dev *dev)
{
struct aac_init *init;
+ /* reset host_rrq_idx first */
+ dev->host_rrq_idx = 0;
+
init = dev->init;
init->HostElapsedSeconds = cpu_to_le32(get_seconds());
@@ -389,30 +389,51 @@ static int aac_src_deliver_message(struct fib *fib)
struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
unsigned long qflags;
u32 fibsize;
- u64 address;
+ dma_addr_t address;
struct aac_fib_xporthdr *pFibX;
+ u16 hdr_size = le16_to_cpu(fib->hw_fib_va->header.Size);
spin_lock_irqsave(q->lock, qflags);
q->numpending++;
spin_unlock_irqrestore(q->lock, qflags);
- /* Calculate the amount to the fibsize bits */
- fibsize = (sizeof(struct aac_fib_xporthdr) +
- fib->hw_fib_va->header.Size + 127) / 128 - 1;
- if (fibsize > (ALIGN32 - 1))
- fibsize = ALIGN32 - 1;
-
- /* Fill XPORT header */
- pFibX = (struct aac_fib_xporthdr *)
- ((unsigned char *)fib->hw_fib_va -
- sizeof(struct aac_fib_xporthdr));
- pFibX->Handle = fib->hw_fib_va->header.SenderData + 1;
- pFibX->HostAddress = fib->hw_fib_pa;
- pFibX->Size = fib->hw_fib_va->header.Size;
- address = fib->hw_fib_pa - (u64)sizeof(struct aac_fib_xporthdr);
-
- src_writel(dev, MUnit.IQ_H, (u32)(address >> 32));
- src_writel(dev, MUnit.IQ_L, (u32)(address & 0xffffffff) + fibsize);
+ if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+ /* New FIB header, 32-bit */
+ address = fib->hw_fib_pa;
+ fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+ fib->hw_fib_va->header.SenderFibAddress = (u32)address;
+ fib->hw_fib_va->header.u.TimeStamp = 0;
+ BUG_ON((u32)(address >> 32) != 0L);
+ address |= fibsize;
+ } else {
+ /* Calculate the amount to the fibsize bits */
+ fibsize = (sizeof(struct aac_fib_xporthdr) + hdr_size + 127) / 128 - 1;
+ if (fibsize > (ALIGN32 - 1))
+ return -EMSGSIZE;
+
+ /* Fill XPORT header */
+ pFibX = (void *)fib->hw_fib_va - sizeof(struct aac_fib_xporthdr);
+ pFibX->Handle = cpu_to_le32(fib->hw_fib_va->header.Handle);
+ pFibX->HostAddress = cpu_to_le64(fib->hw_fib_pa);
+ pFibX->Size = cpu_to_le32(hdr_size);
+
+ /*
+ * The xport header has been 32-byte aligned for us so that fibsize
+ * can be masked out of this address by hardware. -- BenC
+ */
+ address = fib->hw_fib_pa - sizeof(struct aac_fib_xporthdr);
+ if (address & (ALIGN32 - 1))
+ return -EINVAL;
+ address |= fibsize;
+ }
+
+ src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+
return 0;
}
@@ -435,8 +456,7 @@ static int aac_src_ioremap(struct aac_dev *dev, u32 size)
dev->base = NULL;
if (dev->regs.src.bar1 == NULL)
return -1;
- dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base,
- size);
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
if (dev->base == NULL) {
iounmap(dev->regs.src.bar1);
dev->regs.src.bar1 = NULL;
@@ -459,7 +479,7 @@ static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
dev->base = dev->regs.src.bar0 = NULL;
return 0;
}
- dev->base = dev->regs.src.bar0 = ioremap(dev->scsi_host_ptr->base, size);
+ dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
if (dev->base == NULL)
return -1;
dev->IndexRegs = &((struct src_registers __iomem *)
@@ -753,7 +773,7 @@ int aac_srcv_init(struct aac_dev *dev)
if (aac_init_adapter(dev) == NULL)
goto error_iounmap;
- if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+ if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE2)
goto error_iounmap;
dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
@@ -764,7 +784,7 @@ int aac_srcv_init(struct aac_dev *dev)
name, instance);
goto error_iounmap;
}
- dev->dbg_base = dev->scsi_host_ptr->base;
+ dev->dbg_base = dev->base_start;
dev->dbg_base_mapped = dev->base;
dev->dbg_size = dev->base_size;
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 19a36945e6fd..dd4547bf6881 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -2984,8 +2984,8 @@ static int get_command(char *pos, Scsi_Cmnd * ptr)
char *start = pos;
int i;
- SPRINTF("0x%08x: target=%d; lun=%d; cmnd=( ",
- (unsigned int) ptr, ptr->device->id, ptr->device->lun);
+ SPRINTF("%p: target=%d; lun=%d; cmnd=( ",
+ ptr, ptr->device->id, ptr->device->lun);
for (i = 0; i < COMMAND_SIZE(ptr->cmnd[0]); i++)
SPRINTF("0x%02x ", ptr->cmnd[i]);
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index f79c8f9e33a4..770c48ddbe5e 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -49,7 +49,7 @@
#define SCSI_BUF_PA(address) isa_virt_to_bus(address)
#define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset)
-#include<linux/stat.h>
+#include <linux/stat.h>
#ifdef DEBUG
#define DEB(x) x
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 532d212b6b2c..393e7ce8e95a 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -201,7 +201,7 @@ static void asd_get_response_tasklet(struct asd_ascb *ascb,
if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) {
resp->frame_len = le16_to_cpu(*(__le16 *)(r+6));
- memcpy(&resp->ending_fis[0], r+16, 24);
+ memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE);
ts->buf_valid_size = sizeof(*resp);
}
}
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index cbde1dca45ad..def24a1079ad 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2821,7 +2821,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
int i, count = 0;
struct MessageUnit_A __iomem *pmuA = acb->pmuA;
struct MessageUnit_C __iomem *pmuC = acb->pmuC;
- u32 temp = 0;
+
/* backup pci config data */
printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
for (i = 0; i < 64; i++) {
@@ -2839,7 +2839,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
writel(0x2, &pmuC->write_sequence);
writel(0x7, &pmuC->write_sequence);
writel(0xD, &pmuC->write_sequence);
- } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
+ } while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index 937000db62a8..bcc4966e8ba4 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -5722,9 +5722,7 @@ bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport)
* The memory for the bfad_vport_s is freed from the FC function
* template vport_delete entry point.
*/
- if (vport_drv)
- bfad_im_port_delete(vport_drv->drv_port.bfad,
- &vport_drv->drv_port);
+ bfad_im_port_delete(vport_drv->drv_port.bfad, &vport_drv->drv_port);
}
/*
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 14e6284e48e4..8cdb79c2fcdf 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2357,7 +2357,7 @@ bfa_ioc_mbox_isr(struct bfa_ioc_s *ioc)
return;
}
- if ((mc > BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
+ if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
return;
mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 2e4b0be14a20..2c8f0c713076 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1383,6 +1383,8 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfa_sm_set_state(bfad, bfad_sm_uninit);
spin_lock_init(&bfad->bfad_lock);
+ spin_lock_init(&bfad->bfad_aen_spinlock);
+
pci_set_drvdata(pdev, bfad);
bfad->ref_count = 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index e1f4b10df42a..9c1495b321d9 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3008,12 +3008,15 @@ bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
* buffer of size bsg_data->payload_len
*/
bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
- if (!bsg_fcpt)
+ if (!bsg_fcpt) {
+ rc = -ENOMEM;
goto out;
+ }
if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
bsg_data->payload_len)) {
kfree(bsg_fcpt);
+ rc = -EIO;
goto out;
}
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 1ac09afe35ee..2eebf8d4d58b 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -687,25 +687,21 @@ bfa_status_t
bfad_im_probe(struct bfad_s *bfad)
{
struct bfad_im_s *im;
- bfa_status_t rc = BFA_STATUS_OK;
im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
- if (im == NULL) {
- rc = BFA_STATUS_ENOMEM;
- goto ext;
- }
+ if (im == NULL)
+ return BFA_STATUS_ENOMEM;
bfad->im = im;
im->bfad = bfad;
if (bfad_thread_workq(bfad) != BFA_STATUS_OK) {
kfree(im);
- rc = BFA_STATUS_FAILED;
+ return BFA_STATUS_FAILED;
}
INIT_WORK(&im->aen_im_notify_work, bfad_aen_im_notify_handler);
-ext:
- return rc;
+ return BFA_STATUS_OK;
}
void
diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile
index a92695a25176..141149e8cdad 100644
--- a/drivers/scsi/bnx2fc/Makefile
+++ b/drivers/scsi/bnx2fc/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o
-bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o
+bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \
+ bnx2fc_debug.o
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 0578fa0dc14b..3486845ba301 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -11,6 +11,8 @@
* Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -57,12 +59,12 @@
#include <scsi/fc/fc_fcp.h>
#include "57xx_hsi_bnx2fc.h"
-#include "bnx2fc_debug.h"
#include "../../net/ethernet/broadcom/cnic_if.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.11"
+#define BNX2FC_VERSION "1.0.12"
#define PFX "bnx2fc: "
@@ -84,6 +86,8 @@
#define BNX2FC_NUM_MAX_SESS 1024
#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS))
+#define BNX2FC_MAX_NPIV 256
+
#define BNX2FC_MAX_OUTSTANDING_CMNDS 2048
#define BNX2FC_CAN_QUEUE BNX2FC_MAX_OUTSTANDING_CMNDS
#define BNX2FC_ELSTM_XIDS BNX2FC_CAN_QUEUE
@@ -206,6 +210,7 @@ struct bnx2fc_hba {
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
+ struct fcoe_capabilities fcoe_cap;
/*destroy handling */
struct timer_list destroy_timer;
@@ -274,6 +279,7 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
#define BNX2FC_FLAG_EXPL_LOGO 0x8
+#define BNX2FC_FLAG_DISABLE_FAILED 0x9
u8 src_addr[ETH_ALEN];
u32 max_sqes;
@@ -554,4 +560,7 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req,
int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
enum fc_rctl r_ctl);
+
+#include "bnx2fc_debug.h"
+
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c
new file mode 100644
index 000000000000..0cbee1b23ee2
--- /dev/null
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c
@@ -0,0 +1,70 @@
+#include "bnx2fc.h"
+
+void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_IO)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (io_req && io_req->port && io_req->port->lport &&
+ io_req->port->lport->host)
+ shost_printk(KERN_INFO, io_req->port->lport->host,
+ PFX "xid:0x%x %pV",
+ io_req->xid, &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
+
+void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_TGT)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
+ tgt->rport)
+ shost_printk(KERN_INFO, tgt->port->lport->host,
+ PFX "port:%x %pV",
+ tgt->rport->port_id, &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
+
+void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (likely(!(bnx2fc_debug_level & LOG_HBA)))
+ return;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (lport && lport->host)
+ shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf);
+ else
+ pr_info("NULL %pV", &vaf);
+
+ va_end(args);
+}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h
index 3416d9a746c7..4808ff99621f 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_debug.h
+++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h
@@ -11,60 +11,23 @@
extern unsigned int bnx2fc_debug_level;
-#define BNX2FC_CHK_LOGGING(LEVEL, CMD) \
- do { \
- if (unlikely(bnx2fc_debug_level & LEVEL)) \
- do { \
- CMD; \
- } while (0); \
- } while (0)
-
-#define BNX2FC_ELS_DBG(fmt, arg...) \
- BNX2FC_CHK_LOGGING(LOG_ELS, \
- printk(KERN_INFO PFX fmt, ##arg))
-
-#define BNX2FC_MISC_DBG(fmt, arg...) \
- BNX2FC_CHK_LOGGING(LOG_MISC, \
- printk(KERN_INFO PFX fmt, ##arg))
-
-#define BNX2FC_IO_DBG(io_req, fmt, arg...) \
- do { \
- if (!io_req || !io_req->port || !io_req->port->lport || \
- !io_req->port->lport->host) \
- BNX2FC_CHK_LOGGING(LOG_IO, \
- printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
- else \
- BNX2FC_CHK_LOGGING(LOG_IO, \
- shost_printk(KERN_INFO, \
- (io_req)->port->lport->host, \
- PFX "xid:0x%x " fmt, \
- (io_req)->xid, ##arg)); \
- } while (0)
-
-#define BNX2FC_TGT_DBG(tgt, fmt, arg...) \
- do { \
- if (!tgt || !tgt->port || !tgt->port->lport || \
- !tgt->port->lport->host || !tgt->rport) \
- BNX2FC_CHK_LOGGING(LOG_TGT, \
- printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
- else \
- BNX2FC_CHK_LOGGING(LOG_TGT, \
- shost_printk(KERN_INFO, \
- (tgt)->port->lport->host, \
- PFX "port:%x " fmt, \
- (tgt)->rport->port_id, ##arg)); \
- } while (0)
-
-
-#define BNX2FC_HBA_DBG(lport, fmt, arg...) \
- do { \
- if (!lport || !lport->host) \
- BNX2FC_CHK_LOGGING(LOG_HBA, \
- printk(KERN_INFO PFX "NULL " fmt, ##arg)); \
- else \
- BNX2FC_CHK_LOGGING(LOG_HBA, \
- shost_printk(KERN_INFO, lport->host, \
- PFX fmt, ##arg)); \
- } while (0)
+#define BNX2FC_ELS_DBG(fmt, ...) \
+do { \
+ if (unlikely(bnx2fc_debug_level & LOG_ELS)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define BNX2FC_MISC_DBG(fmt, ...) \
+do { \
+ if (unlikely(bnx2fc_debug_level & LOG_MISC)) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0)
+
+__printf(2, 3)
+void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...);
+__printf(2, 3)
+void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...);
+__printf(2, 3)
+void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...);
#endif
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f52f668fd247..ae1cb7639d99 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Apr 24, 2012"
+#define DRV_MODULE_RELDATE "Jun 04, 2012"
static char version[] __devinitdata =
@@ -286,7 +286,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct fcoe_port *port;
struct fcoe_hdr *hp;
struct bnx2fc_rport *tgt;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
u8 sof, eof;
u32 crc;
unsigned int hlen, tlen, elen;
@@ -412,7 +412,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
}
/*update tx stats */
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
@@ -522,7 +522,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
struct fc_frame_header *fh;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
@@ -551,7 +551,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->RxFrames++;
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
@@ -942,7 +942,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
FC_PORTTYPE_UNKNOWN;
mutex_unlock(&lport->lp_mutex);
fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->dev_stats,
+ per_cpu_ptr(lport->stats,
get_cpu())->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
@@ -1326,6 +1326,7 @@ static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba)
static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
{
struct bnx2fc_hba *hba;
+ struct fcoe_capabilities *fcoe_cap;
int rc;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
@@ -1361,6 +1362,21 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n");
goto cmgr_err;
}
+ fcoe_cap = &hba->fcoe_cap;
+
+ fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES <<
+ FCOE_IOS_PER_CONNECTION_SHIFT;
+ fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS <<
+ FCOE_LOGINS_PER_PORT_SHIFT;
+ fcoe_cap->capability2 = BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_NUMBER_OF_EXCHANGES_SHIFT;
+ fcoe_cap->capability2 |= BNX2FC_MAX_NPIV <<
+ FCOE_NPIV_WWN_PER_PORT_SHIFT;
+ fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS <<
+ FCOE_TARGETS_SUPPORTED_SHIFT;
+ fcoe_cap->capability3 |= BNX2FC_MAX_OUTSTANDING_CMNDS <<
+ FCOE_OUTSTANDING_COMMANDS_SHIFT;
+ fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL;
init_waitqueue_head(&hba->shutdown_wait);
init_waitqueue_head(&hba->destroy_wait);
@@ -1691,6 +1707,32 @@ static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba)
hba->pcidev = NULL;
}
+/**
+ * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats
+ *
+ * @handle: transport handle pointing to adapter struture
+ */
+static int bnx2fc_ulp_get_stats(void *handle)
+{
+ struct bnx2fc_hba *hba = handle;
+ struct cnic_dev *cnic;
+ struct fcoe_stats_info *stats_addr;
+
+ if (!hba)
+ return -EINVAL;
+
+ cnic = hba->cnic;
+ stats_addr = &cnic->stats_addr->fcoe_stat;
+ if (!stats_addr)
+ return -EINVAL;
+
+ strncpy(stats_addr->version, BNX2FC_VERSION,
+ sizeof(stats_addr->version));
+ stats_addr->txq_size = BNX2FC_SQ_WQES_MAX;
+ stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX;
+
+ return 0;
+}
/**
@@ -1944,6 +1986,7 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
adapter_count++;
mutex_unlock(&bnx2fc_dev_lock);
+ dev->fcoe_cap = &hba->fcoe_cap;
clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
rc = dev->register_device(dev, CNIC_ULP_FCOE,
(void *) hba);
@@ -2019,11 +2062,11 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
- struct net_device *phys_dev;
+ struct net_device *phys_dev = netdev;
struct fc_lport *lport;
struct ethtool_drvinfo drvinfo;
int rc = 0;
- int vlan_id;
+ int vlan_id = 0;
BNX2FC_MISC_DBG("Entered bnx2fc_create\n");
if (fip_mode != FIP_MODE_FABRIC) {
@@ -2041,14 +2084,9 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
}
/* obtain physical netdev */
- if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
phys_dev = vlan_dev_real_dev(netdev);
- vlan_id = vlan_dev_vlan_id(netdev);
- } else {
- printk(KERN_ERR PFX "Not a vlan device\n");
- rc = -EINVAL;
- goto netdev_err;
- }
+
/* verify if the physical device is a netxtreme2 device */
if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
@@ -2083,9 +2121,13 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
goto ifput_err;
}
+ if (netdev->priv_flags & IFF_802_1Q_VLAN) {
+ vlan_id = vlan_dev_vlan_id(netdev);
+ interface->vlan_enabled = 1;
+ }
+
ctlr = bnx2fc_to_ctlr(interface);
interface->vlan_id = vlan_id;
- interface->vlan_enabled = 1;
interface->timer_work_queue =
create_singlethread_workqueue("bnx2fc_timer_wq");
@@ -2152,13 +2194,10 @@ mod_err:
**/
static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic)
{
- struct list_head *list;
- struct list_head *temp;
struct bnx2fc_hba *hba;
/* Called with bnx2fc_dev_lock held */
- list_for_each_safe(list, temp, &adapter_list) {
- hba = (struct bnx2fc_hba *)list;
+ list_for_each_entry(hba, &adapter_list, list) {
if (hba->cnic == cnic)
return hba;
}
@@ -2252,15 +2291,17 @@ static int bnx2fc_fcoe_reset(struct Scsi_Host *shost)
static bool bnx2fc_match(struct net_device *netdev)
{
+ struct net_device *phys_dev = netdev;
+
mutex_lock(&bnx2fc_dev_lock);
- if (netdev->priv_flags & IFF_802_1Q_VLAN) {
- struct net_device *phys_dev = vlan_dev_real_dev(netdev);
+ if (netdev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(netdev);
- if (bnx2fc_hba_lookup(phys_dev)) {
- mutex_unlock(&bnx2fc_dev_lock);
- return true;
- }
+ if (bnx2fc_hba_lookup(phys_dev)) {
+ mutex_unlock(&bnx2fc_dev_lock);
+ return true;
}
+
mutex_unlock(&bnx2fc_dev_lock);
return false;
}
@@ -2290,9 +2331,9 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu)
p = &per_cpu(bnx2fc_percpu, cpu);
- thread = kthread_create(bnx2fc_percpu_io_thread,
- (void *)p,
- "bnx2fc_thread/%d", cpu);
+ thread = kthread_create_on_node(bnx2fc_percpu_io_thread,
+ (void *)p, cpu_to_node(cpu),
+ "bnx2fc_thread/%d", cpu);
/* bind thread to the cpu */
if (likely(!IS_ERR(thread))) {
kthread_bind(thread, cpu);
@@ -2643,4 +2684,5 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb = {
.cnic_stop = bnx2fc_ulp_stop,
.indicate_kcqes = bnx2fc_indicate_kcqe,
.indicate_netevent = bnx2fc_indicate_netevent,
+ .cnic_get_stats = bnx2fc_ulp_get_stats,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 2ca6bfe4ce5e..6d6eee42ac7d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -1244,7 +1244,9 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
if (disable_kcqe->completion_status) {
printk(KERN_ERR PFX "Disable failed with cmpl status %d\n",
disable_kcqe->completion_status);
- return;
+ set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags);
+ set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
+ wake_up_interruptible(&tgt->upld_wait);
} else {
/* disable successful */
BNX2FC_TGT_DBG(tgt, "disable successful\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4f7453b9e41e..73f231ccd45b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -405,11 +405,10 @@ free_cmd_pool:
goto free_cmgr;
for (i = 0; i < num_possible_cpus() + 1; i++) {
- struct list_head *list;
- struct list_head *tmp;
+ struct bnx2fc_cmd *tmp, *io_req;
- list_for_each_safe(list, tmp, &cmgr->free_list[i]) {
- struct bnx2fc_cmd *io_req = (struct bnx2fc_cmd *)list;
+ list_for_each_entry_safe(io_req, tmp,
+ &cmgr->free_list[i], link) {
list_del(&io_req->link);
kfree(io_req);
}
@@ -1436,9 +1435,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
struct bnx2fc_rport *tgt = io_req->tgt;
- struct list_head *list;
- struct list_head *tmp;
- struct bnx2fc_cmd *cmd;
+ struct bnx2fc_cmd *cmd, *tmp;
int tm_lun = sc_cmd->device->lun;
int rc = 0;
int lun;
@@ -1449,9 +1446,8 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
* Walk thru the active_ios queue and ABORT the IO
* that matches with the LUN that was reset
*/
- list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n");
- cmd = (struct bnx2fc_cmd *)list;
lun = cmd->sc_cmd->device->lun;
if (lun == tm_lun) {
/* Initiate ABTS on this cmd */
@@ -1476,9 +1472,7 @@ static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req)
static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
{
struct bnx2fc_rport *tgt = io_req->tgt;
- struct list_head *list;
- struct list_head *tmp;
- struct bnx2fc_cmd *cmd;
+ struct bnx2fc_cmd *cmd, *tmp;
int rc = 0;
/* called with tgt_lock held */
@@ -1487,9 +1481,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
* Walk thru the active_ios queue and ABORT the IO
* that matches with the LUN that was reset
*/
- list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) {
BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n");
- cmd = (struct bnx2fc_cmd *)list;
/* Initiate ABTS */
if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
&cmd->req_flags)) {
@@ -1980,7 +1973,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct fc_lport *lport = port->lport;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
int task_idx, index;
u16 xid;
@@ -1991,7 +1984,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
io_req->data_xfer_len = scsi_bufflen(sc_cmd);
sc_cmd->SCp.ptr = (char *)io_req;
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
io_req->io_req_flags = BNX2FC_READ;
stats->InputRequests++;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 082a25c3117e..b9d0d9cb17f9 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -150,8 +150,7 @@ tgt_init_err:
void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
{
struct bnx2fc_cmd *io_req;
- struct list_head *list;
- struct list_head *tmp;
+ struct bnx2fc_cmd *tmp;
int rc;
int i = 0;
BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
@@ -160,9 +159,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
spin_lock_bh(&tgt->tgt_lock);
tgt->flush_in_prog = 1;
- list_for_each_safe(list, tmp, &tgt->active_cmd_queue) {
+ list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
i++;
- io_req = (struct bnx2fc_cmd *)list;
list_del_init(&io_req->link);
io_req->on_active_queue = 0;
BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
@@ -181,13 +179,18 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- rc = bnx2fc_initiate_cleanup(io_req);
- BUG_ON(rc);
+
+ /* Do not issue cleanup when disable request failed */
+ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
+ bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
+ else {
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
}
- list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
+ list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
i++;
- io_req = (struct bnx2fc_cmd *)list;
list_del_init(&io_req->link);
io_req->on_tmf_queue = 0;
BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
@@ -195,9 +198,8 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
complete(&io_req->tm_done);
}
- list_for_each_safe(list, tmp, &tgt->els_queue) {
+ list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
i++;
- io_req = (struct bnx2fc_cmd *)list;
list_del_init(&io_req->link);
io_req->on_active_queue = 0;
@@ -212,13 +214,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
io_req->cb_arg = NULL;
}
- rc = bnx2fc_initiate_cleanup(io_req);
- BUG_ON(rc);
+ /* Do not issue cleanup when disable request failed */
+ if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
+ bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
+ else {
+ rc = bnx2fc_initiate_cleanup(io_req);
+ BUG_ON(rc);
+ }
}
- list_for_each_safe(list, tmp, &tgt->io_retire_queue) {
+ list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
i++;
- io_req = (struct bnx2fc_cmd *)list;
list_del_init(&io_req->link);
BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
@@ -321,9 +327,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
del_timer_sync(&tgt->upld_timer);
- } else
+ } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
+ printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
+ " not sent to FW\n");
+ } else {
printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
" not sent to FW\n");
+ }
/* Free session resources */
bnx2fc_free_session_resc(hba, tgt);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index dc0a08e69c82..f2db5fe7bdc2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -267,7 +267,13 @@ struct bnx2i_cmd_request {
* task statistics for write response
*/
struct bnx2i_write_resp_task_stat {
- u32 num_data_ins;
+#if defined(__BIG_ENDIAN)
+ u16 num_r2ts;
+ u16 num_data_outs;
+#elif defined(__LITTLE_ENDIAN)
+ u16 num_data_outs;
+ u16 num_r2ts;
+#endif
};
/*
@@ -275,11 +281,11 @@ struct bnx2i_write_resp_task_stat {
*/
struct bnx2i_read_resp_task_stat {
#if defined(__BIG_ENDIAN)
- u16 num_data_outs;
- u16 num_r2ts;
+ u16 reserved;
+ u16 num_data_ins;
#elif defined(__LITTLE_ENDIAN)
- u16 num_r2ts;
- u16 num_data_outs;
+ u16 num_data_ins;
+ u16 reserved;
#endif
};
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 0c53c28dc3d3..3f9e7061258e 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -44,6 +44,8 @@
#include "57xx_iscsi_hsi.h"
#include "57xx_iscsi_constants.h"
+#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h"
+
#define BNX2_ISCSI_DRIVER_NAME "bnx2i"
#define BNX2I_MAX_ADAPTERS 8
@@ -126,6 +128,43 @@
#define REG_WR(__hba, offset, val) \
writel(val, __hba->regview + offset)
+#ifdef CONFIG_32BIT
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ spin_lock_bh(&__hba->stat_lock); \
+ dst->field##_lo = __hba->stats.field##_lo; \
+ dst->field##_hi = __hba->stats.field##_hi; \
+ spin_unlock_bh(&__hba->stat_lock); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ if (spin_trylock(&__hba->stat_lock)) { \
+ if (__hba->stats.field##_lo + len < \
+ __hba->stats.field##_lo) \
+ __hba->stats.field##_hi++; \
+ __hba->stats.field##_lo += len; \
+ spin_unlock(&__hba->stat_lock); \
+ } \
+ } while (0)
+
+#else
+#define GET_STATS_64(__hba, dst, field) \
+ do { \
+ u64 val, *out; \
+ \
+ val = __hba->bnx2i_stats.field; \
+ out = (u64 *)&__hba->stats.field##_lo; \
+ *out = cpu_to_le64(val); \
+ out = (u64 *)&dst->field##_lo; \
+ *out = cpu_to_le64(val); \
+ } while (0)
+
+#define ADD_STATS_64(__hba, field, len) \
+ do { \
+ __hba->bnx2i_stats.field += len; \
+ } while (0)
+#endif
/**
* struct generic_pdu_resc - login pdu resource structure
@@ -288,6 +327,15 @@ struct iscsi_cid_queue {
struct bnx2i_conn **conn_cid_tbl;
};
+
+struct bnx2i_stats_info {
+ u64 rx_pdus;
+ u64 rx_bytes;
+ u64 tx_pdus;
+ u64 tx_bytes;
+};
+
+
/**
* struct bnx2i_hba - bnx2i adapter structure
*
@@ -341,6 +389,8 @@ struct iscsi_cid_queue {
* @ctx_ccell_tasks: captures number of ccells and tasks supported by
* currently offloaded connection, used to decode
* context memory
+ * @stat_lock: spin lock used by the statistic collector (32 bit)
+ * @stats: local iSCSI statistic collection place holder
*
* Adapter Data Structure
*/
@@ -350,6 +400,7 @@ struct bnx2i_hba {
struct pci_dev *pcidev;
struct net_device *netdev;
void __iomem *regview;
+ resource_size_t reg_base;
u32 age;
unsigned long cnic_dev_type;
@@ -426,6 +477,12 @@ struct bnx2i_hba {
u32 num_sess_opened;
u32 num_conn_opened;
unsigned int ctx_ccell_tasks;
+
+#ifdef CONFIG_32BIT
+ spinlock_t stat_lock;
+#endif
+ struct bnx2i_stats_info bnx2i_stats;
+ struct iscsi_stats_info stats;
};
@@ -749,6 +806,8 @@ extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
extern void bnx2i_start(void *handle);
extern void bnx2i_stop(void *handle);
+extern int bnx2i_get_stats(void *handle);
+
extern struct bnx2i_hba *get_adapter_list_head(void);
struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index ece47e502282..33d6630529de 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1350,6 +1350,7 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
struct cqe *cqe)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd_response *resp_cqe;
struct bnx2i_cmd *bnx2i_cmd;
struct iscsi_task *task;
@@ -1367,16 +1368,26 @@ int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
conn->datain_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.read_stat.num_data_ins;
conn->rxdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.read_stat.num_data_ins);
+ ADD_STATS_64(hba, rx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
} else {
conn->dataout_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_data_outs;
+ resp_cqe->task_stat.write_stat.num_data_outs;
conn->r2t_pdus_cnt +=
- resp_cqe->task_stat.read_stat.num_r2ts;
+ resp_cqe->task_stat.write_stat.num_r2ts;
conn->txdata_octets +=
bnx2i_cmd->req.total_data_transfer_length;
+ ADD_STATS_64(hba, tx_pdus,
+ resp_cqe->task_stat.write_stat.num_data_outs);
+ ADD_STATS_64(hba, tx_bytes,
+ bnx2i_cmd->req.total_data_transfer_length);
+ ADD_STATS_64(hba, rx_pdus,
+ resp_cqe->task_stat.write_stat.num_r2ts);
}
bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
@@ -1961,6 +1972,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
{
struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct qp_info *qp;
struct bnx2i_nop_in_msg *nopin;
int tgt_async_msg;
@@ -1973,7 +1985,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (!qp->cq_virt) {
printk(KERN_ALERT "bnx2i (%s): cq resr freed in bh execution!",
- bnx2i_conn->hba->netdev->name);
+ hba->netdev->name);
goto out;
}
while (1) {
@@ -1985,9 +1997,9 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
if (nopin->op_code == ISCSI_OP_NOOP_IN &&
nopin->itt == (u16) RESERVED_ITT) {
printk(KERN_ALERT "bnx2i: Unsolicited "
- "NOP-In detected for suspended "
- "connection dev=%s!\n",
- bnx2i_conn->hba->netdev->name);
+ "NOP-In detected for suspended "
+ "connection dev=%s!\n",
+ hba->netdev->name);
bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
goto cqe_out;
}
@@ -2001,7 +2013,7 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
/* Run the kthread engine only for data cmds
All other cmds will be completed in this bh! */
bnx2i_queue_scsi_cmd_resp(session, bnx2i_conn, nopin);
- break;
+ goto done;
case ISCSI_OP_LOGIN_RSP:
bnx2i_process_login_resp(session, bnx2i_conn,
qp->cq_cons_qe);
@@ -2044,11 +2056,15 @@ static int bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
nopin->op_code);
}
+
+ ADD_STATS_64(hba, rx_pdus, 1);
+ ADD_STATS_64(hba, rx_bytes, nopin->data_length);
+done:
if (!tgt_async_msg) {
if (!atomic_read(&bnx2i_conn->ep->num_active_cmds))
printk(KERN_ALERT "bnx2i (%s): no active cmd! "
"op 0x%x\n",
- bnx2i_conn->hba->netdev->name,
+ hba->netdev->name,
nopin->op_code);
else
atomic_dec(&bnx2i_conn->ep->num_active_cmds);
@@ -2692,6 +2708,7 @@ struct cnic_ulp_ops bnx2i_cnic_cb = {
.cm_remote_close = bnx2i_cm_remote_close,
.cm_remote_abort = bnx2i_cm_remote_abort,
.iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+ .cnic_get_stats = bnx2i_get_stats,
.owner = THIS_MODULE
};
@@ -2724,7 +2741,6 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
goto arm_cq;
}
- reg_base = ep->hba->netdev->base_addr;
if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
(ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
@@ -2740,7 +2756,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
/* 5709 device in normal node and 5706/5708 devices */
reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
- ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+ ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off,
MB_KERNEL_CTX_SIZE);
if (!ep->qp.ctx_base)
return -ENOMEM;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 8b6816706ee5..b17637aab9a7 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -381,6 +381,46 @@ void bnx2i_ulp_exit(struct cnic_dev *dev)
/**
+ * bnx2i_get_stats - Retrieve various statistic from iSCSI offload
+ * @handle: bnx2i_hba
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ * retrieve various iSCSI offload related statistics.
+ */
+int bnx2i_get_stats(void *handle)
+{
+ struct bnx2i_hba *hba = handle;
+ struct iscsi_stats_info *stats;
+
+ if (!hba)
+ return -EINVAL;
+
+ stats = (struct iscsi_stats_info *)hba->cnic->stats_addr;
+
+ if (!stats)
+ return -ENOMEM;
+
+ strlcpy(stats->version, DRV_MODULE_VERSION, sizeof(stats->version));
+ memcpy(stats->mac_add1 + 2, hba->cnic->mac_addr, ETH_ALEN);
+
+ stats->max_frame_size = hba->netdev->mtu;
+ stats->txq_size = hba->max_sqes;
+ stats->rxq_size = hba->max_cqes;
+
+ stats->txq_avg_depth = 0;
+ stats->rxq_avg_depth = 0;
+
+ GET_STATS_64(hba, stats, rx_pdus);
+ GET_STATS_64(hba, stats, rx_bytes);
+
+ GET_STATS_64(hba, stats, tx_pdus);
+ GET_STATS_64(hba, stats, tx_bytes);
+
+ return 0;
+}
+
+
+/**
* bnx2i_percpu_thread_create - Create a receive thread for an
* online CPU
*
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f8d516b53161..3b34c13e2f02 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -811,13 +811,13 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
bnx2i_identify_device(hba);
bnx2i_setup_host_queue_size(hba, shost);
+ hba->reg_base = pci_resource_start(hba->pcidev, 0);
if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr,
- BNX2_MQ_CONFIG2);
+ hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
if (!hba->regview)
goto ioreg_map_err;
} else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
- hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+ hba->regview = pci_iomap(hba->pcidev, 0, 4096);
if (!hba->regview)
goto ioreg_map_err;
}
@@ -874,6 +874,11 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->conn_ctx_destroy_tmo = 2 * HZ;
}
+#ifdef CONFIG_32BIT
+ spin_lock_init(&hba->stat_lock);
+#endif
+ memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
+
if (iscsi_host_add(shost, &hba->pcidev->dev))
goto free_dump_mem;
return hba;
@@ -884,7 +889,7 @@ cid_que_err:
bnx2i_free_mp_bdt(hba);
mp_bdt_mem_err:
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
ioreg_map_err:
@@ -910,7 +915,7 @@ void bnx2i_free_hba(struct bnx2i_hba *hba)
pci_dev_put(hba->pcidev);
if (hba->regview) {
- iounmap(hba->regview);
+ pci_iounmap(hba->pcidev, hba->regview);
hba->regview = NULL;
}
bnx2i_free_mp_bdt(hba);
@@ -1181,12 +1186,18 @@ static int
bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+ struct bnx2i_hba *hba = bnx2i_conn->hba;
struct bnx2i_cmd *cmd = task->dd_data;
memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
bnx2i_setup_cmd_wqe_template(cmd);
bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+
+ /* Tx PDU/data length count */
+ ADD_STATS_64(hba, tx_pdus, 1);
+ ADD_STATS_64(hba, tx_bytes, task->data_count);
+
if (task->data_count) {
memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
task->data_count);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 36739da8bc15..49692a1ac44a 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst, ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev,
+ &csk->daddr.sin_addr.s_addr);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 5a4a3bfc60cf..f924b3c3720e 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -438,8 +438,8 @@ static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb,
if (submode)
wr_ulp_mode = FW_OFLD_TX_DATA_WR_ULPMODE(ULP2_MODE_ISCSI) |
FW_OFLD_TX_DATA_WR_ULPSUBMODE(submode);
- req->tunnel_to_proxy = htonl(wr_ulp_mode) |
- FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1);
+ req->tunnel_to_proxy = htonl(wr_ulp_mode |
+ FW_OFLD_TX_DATA_WR_SHOVE(skb_peek(&csk->write_queue) ? 0 : 1));
req->plen = htonl(len);
if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT))
cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
@@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- n = dst_get_neighbour_noref(csk->dst);
+ n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr);
if (!n) {
pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
goto rel_resource;
@@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
send_act_open_req(csk, skb, csk->l2t);
+ neigh_release(n);
return 0;
rel_resource:
+ if (n)
+ neigh_release(n);
if (skb)
__kfree_skb(skb);
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d9253db1d0e2..b44c1cff3114 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- n = dst_get_neighbour_noref(dst);
+ n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr);
if (!n) {
err = -ENODEV;
goto rel_rt;
@@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
&daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
if (ndev->flags & IFF_LOOPBACK) {
@@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
pr_info("dst %pI4, %s, NOT cxgbi device.\n",
&daddr->sin_addr.s_addr, ndev->name);
err = -ENETUNREACH;
- goto rel_rt;
+ goto rel_neigh;
}
log_debug(1 << CXGBI_DBG_SOCK,
"route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
@@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk = cxgbi_sock_create(cdev);
if (!csk) {
err = -ENOMEM;
- goto rel_rt;
+ goto rel_neigh;
}
csk->cdev = cdev;
csk->port_id = port;
@@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
csk->daddr.sin_port = daddr->sin_port;
csk->daddr.sin_family = daddr->sin_family;
csk->saddr.sin_addr.s_addr = fl4.saddr;
+ neigh_release(n);
return csk;
+rel_neigh:
+ neigh_release(n);
+
rel_rt:
ip_rt_put(rt);
if (csk)
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 48e46f5b77cc..33e422e75835 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -468,7 +468,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_handler_exist);
/*
* scsi_dh_attach - Attach device handler
- * @sdev - sdev the handler should be attached to
+ * @q - Request queue that is associated with the scsi_device
+ * the handler should be attached to
* @name - name of the handler to attach
*/
int scsi_dh_attach(struct request_queue *q, const char *name)
@@ -498,7 +499,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach);
/*
* scsi_dh_detach - Detach device handler
- * @sdev - sdev the handler should be detached from
+ * @q - Request queue that is associated with the scsi_device
+ * the handler should be detached from
*
* This function will detach the device handler only
* if the sdev is not part of the internal list, ie
@@ -527,6 +529,38 @@ void scsi_dh_detach(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(scsi_dh_detach);
+/*
+ * scsi_dh_attached_handler_name - Get attached device handler's name
+ * @q - Request queue that is associated with the scsi_device
+ * that may have a device handler attached
+ * @gfp - the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Returns name of attached handler, NULL if no handler is attached.
+ * Caller must take care to free the returned string.
+ */
+const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
+{
+ unsigned long flags;
+ struct scsi_device *sdev;
+ const char *handler_name = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ sdev = q->queuedata;
+ if (!sdev || !get_device(&sdev->sdev_gendev))
+ sdev = NULL;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ if (!sdev)
+ return NULL;
+
+ if (sdev->scsi_dh_data)
+ handler_name = kstrdup(sdev->scsi_dh_data->scsi_dh->name, gfp);
+
+ put_device(&sdev->sdev_gendev);
+ return handler_name;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_attached_handler_name);
+
static struct notifier_block scsi_dh_nb = {
.notifier_call = scsi_dh_notifier
};
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index fda9cdea0e60..08d80a6d272a 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -46,13 +46,16 @@
#define TPGS_SUPPORT_OFFLINE 0x40
#define TPGS_SUPPORT_TRANSITION 0x80
+#define RTPG_FMT_MASK 0x70
+#define RTPG_FMT_EXT_HDR 0x10
+
#define TPGS_MODE_UNINITIALIZED -1
#define TPGS_MODE_NONE 0x0
#define TPGS_MODE_IMPLICIT 0x1
#define TPGS_MODE_EXPLICIT 0x2
#define ALUA_INQUIRY_SIZE 36
-#define ALUA_FAILOVER_TIMEOUT (60 * HZ)
+#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
/* flags passed from user level */
@@ -68,6 +71,7 @@ struct alua_dh_data {
unsigned char inq[ALUA_INQUIRY_SIZE];
unsigned char *buff;
int bufflen;
+ unsigned char transition_tmo;
unsigned char sense[SCSI_SENSE_BUFFERSIZE];
int senselen;
struct scsi_device *sdev;
@@ -128,7 +132,7 @@ static struct request *get_alua_req(struct scsi_device *sdev,
rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER;
rq->retries = ALUA_FAILOVER_RETRIES;
- rq->timeout = ALUA_FAILOVER_TIMEOUT;
+ rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ;
return rq;
}
@@ -174,7 +178,8 @@ done:
* submit_rtpg - Issue a REPORT TARGET GROUP STATES command
* @sdev: sdev the command should be sent to
*/
-static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
+static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h,
+ bool rtpg_ext_hdr_req)
{
struct request *rq;
int err = SCSI_DH_RES_TEMP_UNAVAIL;
@@ -185,7 +190,10 @@ static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
/* Prepare the command. */
rq->cmd[0] = MAINTENANCE_IN;
- rq->cmd[1] = MI_REPORT_TARGET_PGS;
+ if (rtpg_ext_hdr_req)
+ rq->cmd[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
+ else
+ rq->cmd[1] = MI_REPORT_TARGET_PGS;
rq->cmd[6] = (h->bufflen >> 24) & 0xff;
rq->cmd[7] = (h->bufflen >> 16) & 0xff;
rq->cmd[8] = (h->bufflen >> 8) & 0xff;
@@ -518,11 +526,18 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
int len, k, off, valid_states = 0;
unsigned char *ucp;
unsigned err;
- unsigned long expiry, interval = 1000;
+ bool rtpg_ext_hdr_req = 1;
+ unsigned long expiry, interval = 0;
+ unsigned int tpg_desc_tbl_off;
+ unsigned char orig_transition_tmo;
+
+ if (!h->transition_tmo)
+ expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT * HZ);
+ else
+ expiry = round_jiffies_up(jiffies + h->transition_tmo * HZ);
- expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT);
retry:
- err = submit_rtpg(sdev, h);
+ err = submit_rtpg(sdev, h, rtpg_ext_hdr_req);
if (err == SCSI_DH_IO && h->senselen > 0) {
err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
@@ -530,6 +545,21 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
if (!err)
return SCSI_DH_IO;
+ /*
+ * submit_rtpg() has failed on existing arrays
+ * when requesting extended header info, and
+ * the array doesn't support extended headers,
+ * even though it shouldn't according to T10.
+ * The retry without rtpg_ext_hdr_req set
+ * handles this.
+ */
+ if (rtpg_ext_hdr_req == 1 &&
+ sense_hdr.sense_key == ILLEGAL_REQUEST &&
+ sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
+ rtpg_ext_hdr_req = 0;
+ goto retry;
+ }
+
err = alua_check_sense(sdev, &sense_hdr);
if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry))
goto retry;
@@ -556,7 +586,28 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
goto retry;
}
- for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) {
+ orig_transition_tmo = h->transition_tmo;
+ if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && h->buff[5] != 0)
+ h->transition_tmo = h->buff[5];
+ else
+ h->transition_tmo = ALUA_FAILOVER_TIMEOUT;
+
+ if (orig_transition_tmo != h->transition_tmo) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s: transition timeout set to %d seconds\n",
+ ALUA_DH_NAME, h->transition_tmo);
+ expiry = jiffies + h->transition_tmo * HZ;
+ }
+
+ if ((h->buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
+ tpg_desc_tbl_off = 8;
+ else
+ tpg_desc_tbl_off = 4;
+
+ for (k = tpg_desc_tbl_off, ucp = h->buff + tpg_desc_tbl_off;
+ k < len;
+ k += off, ucp += off) {
+
if (h->group_id == (ucp[2] << 8) + ucp[3]) {
h->state = ucp[0] & 0x0f;
h->pref = ucp[0] >> 7;
@@ -581,7 +632,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h)
case TPGS_STATE_TRANSITIONING:
if (time_before(jiffies, expiry)) {
/* State transition, retry */
- interval *= 2;
+ interval += 2000;
msleep(interval);
goto retry;
}
@@ -691,9 +742,9 @@ static int alua_activate(struct scsi_device *sdev,
stpg = 0;
break;
case TPGS_STATE_STANDBY:
+ case TPGS_STATE_UNAVAILABLE:
stpg = 1;
break;
- case TPGS_STATE_UNAVAILABLE:
case TPGS_STATE_OFFLINE:
err = SCSI_DH_IO;
break;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index fe30b1b65e1d..078d262ac7cc 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1529,7 +1529,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
return 0;
err:
- per_cpu_ptr(lport->dev_stats, get_cpu())->ErrorFrames++;
+ per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
put_cpu();
err2:
kfree_skb(skb);
@@ -1569,7 +1569,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
struct ethhdr *eh;
struct fcoe_crc_eof *cp;
struct sk_buff *skb;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
struct fc_frame_header *fh;
unsigned int hlen; /* header length implies the version */
unsigned int tlen; /* trailer length */
@@ -1680,7 +1680,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
skb_shinfo(skb)->gso_size = 0;
}
/* update tx stats: regardless if LLD fails */
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->TxFrames++;
stats->TxWords += wlen;
put_cpu();
@@ -1714,7 +1714,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
struct fcoe_interface *fcoe;
struct fc_frame_header *fh;
struct sk_buff *skb = (struct sk_buff *)fp;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
/*
* We only check CRC if no offload is available and if it is
@@ -1745,7 +1745,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
return 0;
}
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->InvalidCRCCount++;
if (stats->InvalidCRCCount < 5)
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
@@ -1762,7 +1762,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
u32 fr_len;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
struct fcoe_crc_eof crc_eof;
struct fc_frame *fp;
struct fcoe_port *port;
@@ -1793,7 +1793,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
@@ -1851,23 +1851,25 @@ static int fcoe_percpu_receive_thread(void *arg)
set_user_nice(current, -20);
+retry:
while (!kthread_should_stop()) {
spin_lock_bh(&p->fcoe_rx_list.lock);
skb_queue_splice_init(&p->fcoe_rx_list, &tmp);
- spin_unlock_bh(&p->fcoe_rx_list.lock);
-
- while ((skb = __skb_dequeue(&tmp)) != NULL)
- fcoe_recv_frame(skb);
- spin_lock_bh(&p->fcoe_rx_list.lock);
- if (!skb_queue_len(&p->fcoe_rx_list)) {
+ if (!skb_queue_len(&tmp)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_bh(&p->fcoe_rx_list.lock);
schedule();
set_current_state(TASK_RUNNING);
- } else
- spin_unlock_bh(&p->fcoe_rx_list.lock);
+ goto retry;
+ }
+
+ spin_unlock_bh(&p->fcoe_rx_list.lock);
+
+ while ((skb = __skb_dequeue(&tmp)) != NULL)
+ fcoe_recv_frame(skb);
+
}
return 0;
}
@@ -1970,7 +1972,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
struct fcoe_ctlr *ctlr;
struct fcoe_interface *fcoe;
struct fcoe_port *port;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
u32 link_possible = 1;
u32 mfs;
int rc = NOTIFY_OK;
@@ -2024,7 +2026,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
if (link_possible && !fcoe_link_ok(lport))
fcoe_ctlr_link_up(ctlr);
else if (fcoe_ctlr_link_down(ctlr)) {
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->LinkFailureCount++;
put_cpu();
fcoe_clean_pending_queue(lport);
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index d68d57241ee6..2ebe03a4b51d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -788,11 +788,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
unsigned long deadline;
unsigned long sel_time = 0;
struct list_head del_list;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
INIT_LIST_HEAD(&del_list);
- stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
+ stats = per_cpu_ptr(fip->lp->stats, get_cpu());
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
@@ -1104,8 +1104,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
- struct fcoe_dev_stats *stats;
struct fcoe_fcf *sel;
+ struct fc_stats *stats;
enum fip_desc_type els_dtype = 0;
u8 els_op;
u8 sub;
@@ -1249,7 +1249,7 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
fr_dev(fp) = lport;
fr_encaps(fp) = els_dtype;
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->RxFrames++;
stats->RxWords += skb->len / FIP_BPW;
put_cpu();
@@ -1353,7 +1353,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
ntoh24(vp->fd_fc_id));
if (vn_port && (vn_port == lport)) {
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
+ per_cpu_ptr(lport->stats,
get_cpu())->VLinkFailureCount++;
put_cpu();
fcoe_ctlr_reset(fip);
@@ -1383,8 +1383,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
* followed by physical port
*/
mutex_lock(&fip->ctlr_mutex);
- per_cpu_ptr(lport->dev_stats,
- get_cpu())->VLinkFailureCount++;
+ per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++;
put_cpu();
fcoe_ctlr_reset(fip);
mutex_unlock(&fip->ctlr_mutex);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 2bc163198d33..5e751689a089 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -102,7 +102,7 @@ static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
int ret;
ret = kstrtoul(buf, 0, val);
- if (ret || *val < 0)
+ if (ret)
return -EINVAL;
/*
* Check for overflow; dev_loss_tmo is u32
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index b46f43dced78..ac76d8a042d7 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -89,7 +89,7 @@ void __fcoe_get_lesb(struct fc_lport *lport,
{
unsigned int cpu;
u32 lfc, vlfc, mdac;
- struct fcoe_dev_stats *devst;
+ struct fc_stats *stats;
struct fcoe_fc_els_lesb *lesb;
struct rtnl_link_stats64 temp;
@@ -99,10 +99,10 @@ void __fcoe_get_lesb(struct fc_lport *lport,
lesb = (struct fcoe_fc_els_lesb *)fc_lesb;
memset(lesb, 0, sizeof(*lesb));
for_each_possible_cpu(cpu) {
- devst = per_cpu_ptr(lport->dev_stats, cpu);
- lfc += devst->LinkFailureCount;
- vlfc += devst->VLinkFailureCount;
- mdac += devst->MissDiscAdvCount;
+ stats = per_cpu_ptr(lport->stats, cpu);
+ lfc += stats->LinkFailureCount;
+ vlfc += stats->VLinkFailureCount;
+ mdac += stats->MissDiscAdvCount;
}
lesb->lesb_link_fail = htonl(lfc);
lesb->lesb_vlink_fail = htonl(vlfc);
@@ -502,7 +502,7 @@ static int __init fcoe_transport_init(void)
return 0;
}
-static int __exit fcoe_transport_exit(void)
+static int fcoe_transport_exit(void)
{
struct fcoe_transport *ft;
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index a3a056a9db67..593085a52275 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -42,7 +42,7 @@
#include "scsi_logging.h"
-static atomic_t scsi_host_next_hn; /* host_no for next new host */
+static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
static void scsi_host_cls_release(struct device *dev)
@@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
struct Scsi_Host *shost = dev_to_shost(dev);
struct device *parent = dev->parent;
struct request_queue *q;
+ void *queuedata;
scsi_proc_hostdir_rm(shost->hostt);
@@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev)
destroy_workqueue(shost->work_q);
q = shost->uspace_req_q;
if (q) {
- kfree(q->queuedata);
- q->queuedata = NULL;
- scsi_free_queue(q);
+ queuedata = q->queuedata;
+ blk_cleanup_queue(q);
+ kfree(queuedata);
}
scsi_destroy_command_freelist(shost);
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 10b65556937b..192724ed7a32 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.6 (090910)";
+static const char driver_ver[] = "v1.6 (091225)";
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -958,6 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
{
struct Scsi_Host *host = NULL;
struct hptiop_hba *hba;
+ struct hptiop_adapter_ops *iop_ops;
struct hpt_iop_request_get_config iop_config;
struct hpt_iop_request_set_config set_config;
dma_addr_t start_phy;
@@ -978,7 +979,8 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
pci_set_master(pcidev);
/* Enable 64bit DMA if possible */
- if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
+ iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
+ if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(iop_ops->hw_dma_bit_mask))) {
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
goto disable_pci_device;
@@ -998,7 +1000,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba = (struct hptiop_hba *)host->hostdata;
- hba->ops = (struct hptiop_adapter_ops *)id->driver_data;
+ hba->ops = iop_ops;
hba->pcidev = pcidev;
hba->host = host;
hba->initialized = 0;
@@ -1239,6 +1241,7 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
.iop_intr = iop_intr_itl,
.post_msg = hptiop_post_msg_itl,
.post_req = hptiop_post_req_itl,
+ .hw_dma_bit_mask = 64,
};
static struct hptiop_adapter_ops hptiop_mv_ops = {
@@ -1254,6 +1257,7 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
.iop_intr = iop_intr_mv,
.post_msg = hptiop_post_msg_mv,
.post_req = hptiop_post_req_mv,
+ .hw_dma_bit_mask = 33,
};
static struct pci_device_id hptiop_id_table[] = {
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index 0b871c0ae568..baa648d87fde 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -297,6 +297,7 @@ struct hptiop_adapter_ops {
int (*iop_intr)(struct hptiop_hba *hba);
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
+ int hw_dma_bit_mask;
};
#define HPT_IOCTL_RESULT_OK 0
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 47e28b555029..92c1d86d1fc6 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -166,6 +166,9 @@ static struct scsi_host_template isci_sht = {
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
.use_clustering = ENABLE_CLUSTERING,
+ .eh_abort_handler = sas_eh_abort_handler,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = sas_eh_bus_reset_handler,
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = isci_host_attrs,
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index aceffadb21c7..c772d8d27159 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -99,11 +99,6 @@ struct fc_exch_mgr {
u16 max_xid;
u16 pool_max_index;
- /*
- * currently exchange mgr stats are updated but not used.
- * either stats can be expose via sysfs or remove them
- * all together if not used XXX
- */
struct {
atomic_t no_free_exch;
atomic_t no_free_exch_xid;
@@ -124,7 +119,7 @@ struct fc_exch_mgr {
* for each anchor to determine if that EM should be used. The last
* anchor in the list will always match to handle any exchanges not
* handled by other EMs. The non-default EMs would be added to the
- * anchor list by HW that provides FCoE offloads.
+ * anchor list by HW that provides offloads.
*/
struct fc_exch_mgr_anchor {
struct list_head ema_list;
@@ -339,6 +334,52 @@ static void fc_exch_release(struct fc_exch *ep)
}
/**
+ * fc_exch_timer_cancel() - cancel exch timer
+ * @ep: The exchange whose timer to be canceled
+ */
+static inline void fc_exch_timer_cancel(struct fc_exch *ep)
+{
+ if (cancel_delayed_work(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled\n");
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+}
+
+/**
+ * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
+ * the exchange lock held
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ *
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ */
+static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ return;
+
+ FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
+
+ if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+ msecs_to_jiffies(timer_msec)))
+ fc_exch_hold(ep); /* hold for timer */
+}
+
+/**
+ * fc_exch_timer_set() - Lock the exchange and set the timer
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
* fc_exch_done_locked() - Complete an exchange with the exchange lock held
* @ep: The exchange that is complete
*/
@@ -359,8 +400,7 @@ static int fc_exch_done_locked(struct fc_exch *ep)
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
ep->state |= FC_EX_DONE;
- if (cancel_delayed_work(&ep->timeout_work))
- atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ fc_exch_timer_cancel(ep);
rc = 0;
}
return rc;
@@ -424,40 +464,6 @@ static void fc_exch_delete(struct fc_exch *ep)
}
/**
- * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
- * the exchange lock held
- * @ep: The exchange whose timer will start
- * @timer_msec: The timeout period
- *
- * Used for upper level protocols to time out the exchange.
- * The timer is cancelled when it fires or when the exchange completes.
- */
-static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
- unsigned int timer_msec)
-{
- if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
- return;
-
- FC_EXCH_DBG(ep, "Exchange timer armed\n");
-
- if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
- msecs_to_jiffies(timer_msec)))
- fc_exch_hold(ep); /* hold for timer */
-}
-
-/**
- * fc_exch_timer_set() - Lock the exchange and set the timer
- * @ep: The exchange whose timer will start
- * @timer_msec: The timeout period
- */
-static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
-{
- spin_lock_bh(&ep->ex_lock);
- fc_exch_timer_set_locked(ep, timer_msec);
- spin_unlock_bh(&ep->ex_lock);
-}
-
-/**
* fc_seq_send() - Send a frame using existing sequence/exchange pair
* @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent
@@ -986,7 +992,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
/*
* Update sequence_id based on incoming last
* frame of sequence exchange. This is needed
- * for FCoE target where DDP has been used
+ * for FC target where DDP has been used
* on target where, stack is indicated only
* about last frame's (payload _header) header.
* Whereas "seq_id" which is part of
@@ -1549,8 +1555,10 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
- if (cancel_delayed_work_sync(&ep->timeout_work))
+ if (cancel_delayed_work_sync(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled\n");
fc_exch_release(ep); /* release from pending timer hold */
+ }
spin_lock_bh(&ep->ex_lock);
switch (fh->fh_r_ctl) {
@@ -1737,8 +1745,7 @@ static void fc_exch_reset(struct fc_exch *ep)
spin_lock_bh(&ep->ex_lock);
fc_exch_abort_locked(ep, 0);
ep->state |= FC_EX_RST_CLEANUP;
- if (cancel_delayed_work(&ep->timeout_work))
- atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ fc_exch_timer_cancel(ep);
resp = ep->resp;
ep->resp = NULL;
if (ep->esb_stat & ESB_ST_REC_QUAL)
@@ -2133,10 +2140,8 @@ static void fc_exch_els_rrq(struct fc_frame *fp)
ep->esb_stat &= ~ESB_ST_REC_QUAL;
atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
}
- if (ep->esb_stat & ESB_ST_COMPLETE) {
- if (cancel_delayed_work(&ep->timeout_work))
- atomic_dec(&ep->ex_refcnt); /* drop timer hold */
- }
+ if (ep->esb_stat & ESB_ST_COMPLETE)
+ fc_exch_timer_cancel(ep);
spin_unlock_bh(&ep->ex_lock);
@@ -2156,6 +2161,31 @@ out:
}
/**
+ * fc_exch_update_stats() - update exches stats to lport
+ * @lport: The local port to update exchange manager stats
+ */
+void fc_exch_update_stats(struct fc_lport *lport)
+{
+ struct fc_host_statistics *st;
+ struct fc_exch_mgr_anchor *ema;
+ struct fc_exch_mgr *mp;
+
+ st = &lport->host_stats;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ mp = ema->mp;
+ st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
+ st->fc_no_free_exch_xid +=
+ atomic_read(&mp->stats.no_free_exch_xid);
+ st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
+ st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
+ st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
+ st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
+ }
+}
+EXPORT_SYMBOL(fc_exch_update_stats);
+
+/**
* fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
* @lport: The local port to add the exchange manager to
* @mp: The exchange manager to be added to the local port
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index f7357308655a..14243fa5f8e8 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -158,6 +158,9 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
+ } else {
+ per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++;
+ put_cpu();
}
return fsp;
}
@@ -264,6 +267,9 @@ static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
if (!fsp->seq_ptr)
return -EINVAL;
+ per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++;
+ put_cpu();
+
fsp->state |= FC_SRB_ABORT_PENDING;
return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
}
@@ -420,6 +426,8 @@ static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
if (likely(fp))
return fp;
+ per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++;
+ put_cpu();
/* error case */
fc_fcp_can_queue_ramp_down(lport);
return NULL;
@@ -434,7 +442,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct scsi_cmnd *sc = fsp->cmd;
struct fc_lport *lport = fsp->lp;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
struct fc_frame_header *fh;
size_t start_offset;
size_t offset;
@@ -496,7 +504,7 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
stats->ErrorFrames++;
/* per cpu count, not total count, but OK for limit */
if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT)
@@ -1372,10 +1380,10 @@ static void fc_fcp_timeout(unsigned long data)
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
- if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
- fc_fcp_rec(fsp);
- else if (fsp->state & FC_SRB_RCV_STATUS)
+ if (fsp->state & FC_SRB_RCV_STATUS)
fc_fcp_complete_locked(fsp);
+ else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
@@ -1786,7 +1794,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_rport_libfc_priv *rpriv;
int rval;
int rc = 0;
- struct fcoe_dev_stats *stats;
+ struct fc_stats *stats;
rval = fc_remote_port_chkready(rport);
if (rval) {
@@ -1835,7 +1843,7 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
/*
* setup the data direction
*/
- stats = per_cpu_ptr(lport->dev_stats, get_cpu());
+ stats = per_cpu_ptr(lport->stats, get_cpu());
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
stats->InputRequests++;
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
index 981329a17c48..0382ac06906e 100644
--- a/drivers/scsi/libfc/fc_frame.c
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -49,7 +49,7 @@ u32 fc_frame_crc_check(struct fc_frame *fp)
EXPORT_SYMBOL(fc_frame_crc_check);
/*
- * Allocate a frame intended to be sent via fcoe_xmit.
+ * Allocate a frame intended to be sent.
* Get an sk_buff for the frame and set the length.
*/
struct fc_frame *_fc_frame_alloc(size_t len)
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index c1402fb499ab..f04d15c67df3 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -299,47 +299,54 @@ EXPORT_SYMBOL(fc_get_host_speed);
*/
struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
{
- struct fc_host_statistics *fcoe_stats;
+ struct fc_host_statistics *fc_stats;
struct fc_lport *lport = shost_priv(shost);
struct timespec v0, v1;
unsigned int cpu;
u64 fcp_in_bytes = 0;
u64 fcp_out_bytes = 0;
- fcoe_stats = &lport->host_stats;
- memset(fcoe_stats, 0, sizeof(struct fc_host_statistics));
+ fc_stats = &lport->host_stats;
+ memset(fc_stats, 0, sizeof(struct fc_host_statistics));
jiffies_to_timespec(jiffies, &v0);
jiffies_to_timespec(lport->boot_time, &v1);
- fcoe_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
+ fc_stats->seconds_since_last_reset = (v0.tv_sec - v1.tv_sec);
for_each_possible_cpu(cpu) {
- struct fcoe_dev_stats *stats;
-
- stats = per_cpu_ptr(lport->dev_stats, cpu);
-
- fcoe_stats->tx_frames += stats->TxFrames;
- fcoe_stats->tx_words += stats->TxWords;
- fcoe_stats->rx_frames += stats->RxFrames;
- fcoe_stats->rx_words += stats->RxWords;
- fcoe_stats->error_frames += stats->ErrorFrames;
- fcoe_stats->invalid_crc_count += stats->InvalidCRCCount;
- fcoe_stats->fcp_input_requests += stats->InputRequests;
- fcoe_stats->fcp_output_requests += stats->OutputRequests;
- fcoe_stats->fcp_control_requests += stats->ControlRequests;
+ struct fc_stats *stats;
+
+ stats = per_cpu_ptr(lport->stats, cpu);
+
+ fc_stats->tx_frames += stats->TxFrames;
+ fc_stats->tx_words += stats->TxWords;
+ fc_stats->rx_frames += stats->RxFrames;
+ fc_stats->rx_words += stats->RxWords;
+ fc_stats->error_frames += stats->ErrorFrames;
+ fc_stats->invalid_crc_count += stats->InvalidCRCCount;
+ fc_stats->fcp_input_requests += stats->InputRequests;
+ fc_stats->fcp_output_requests += stats->OutputRequests;
+ fc_stats->fcp_control_requests += stats->ControlRequests;
fcp_in_bytes += stats->InputBytes;
fcp_out_bytes += stats->OutputBytes;
- fcoe_stats->link_failure_count += stats->LinkFailureCount;
+ fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails;
+ fc_stats->fcp_packet_aborts += stats->FcpPktAborts;
+ fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails;
+ fc_stats->link_failure_count += stats->LinkFailureCount;
}
- fcoe_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
- fcoe_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
- fcoe_stats->lip_count = -1;
- fcoe_stats->nos_count = -1;
- fcoe_stats->loss_of_sync_count = -1;
- fcoe_stats->loss_of_signal_count = -1;
- fcoe_stats->prim_seq_protocol_err_count = -1;
- fcoe_stats->dumped_frames = -1;
- return fcoe_stats;
+ fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
+ fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
+ fc_stats->lip_count = -1;
+ fc_stats->nos_count = -1;
+ fc_stats->loss_of_sync_count = -1;
+ fc_stats->loss_of_signal_count = -1;
+ fc_stats->prim_seq_protocol_err_count = -1;
+ fc_stats->dumped_frames = -1;
+
+ /* update exches stats */
+ fc_exch_update_stats(lport);
+
+ return fc_stats;
}
EXPORT_SYMBOL(fc_get_host_stats);
@@ -973,7 +980,8 @@ drop:
rcu_read_unlock();
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
- lport->tt.exch_done(sp);
+ if (sp)
+ lport->tt.exch_done(sp);
}
/**
@@ -1590,8 +1598,9 @@ static void fc_lport_timeout(struct work_struct *work)
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
- fc_lport_enter_ms(lport, lport->state);
- break;
+ FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
+ fc_lport_state(lport));
+ /* fall thru */
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 441d88ad99a7..a59fcdc8fd63 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -139,12 +139,12 @@ static void sas_ata_task_done(struct sas_task *task)
if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
((stat->stat == SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.command_set == ATAPI_COMMAND_SET))) {
- ata_tf_from_fis(resp->ending_fis, &dev->sata_dev.tf);
+ memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
- qc->err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
} else {
- link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.tf.command);
+ link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
if (unlikely(link->eh_info.err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
}
@@ -161,8 +161,8 @@ static void sas_ata_task_done(struct sas_task *task)
qc->flags |= ATA_QCFLAG_FAILED;
}
- dev->sata_dev.tf.feature = 0x04; /* status err */
- dev->sata_dev.tf.command = ATA_ERR;
+ dev->sata_dev.fis[3] = 0x04; /* status err */
+ dev->sata_dev.fis[2] = ATA_ERR;
}
}
@@ -269,7 +269,7 @@ static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct domain_device *dev = qc->ap->private_data;
- memcpy(&qc->result_tf, &dev->sata_dev.tf, sizeof(qc->result_tf));
+ ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
return true;
}
@@ -523,6 +523,31 @@ static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
i->dft->lldd_ata_set_dmamode(dev);
}
+static void sas_ata_sched_eh(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
+ ha->eh_active++;
+ ata_std_sched_eh(ap);
+ spin_unlock_irqrestore(&ha->lock, flags);
+}
+
+void sas_ata_end_eh(struct ata_port *ap)
+{
+ struct domain_device *dev = ap->private_data;
+ struct sas_ha_struct *ha = dev->port->ha;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
+ ha->eh_active--;
+ spin_unlock_irqrestore(&ha->lock, flags);
+}
+
static struct ata_port_operations sas_sata_ops = {
.prereset = ata_std_prereset,
.hardreset = sas_ata_hard_reset,
@@ -536,6 +561,8 @@ static struct ata_port_operations sas_sata_ops = {
.port_start = ata_sas_port_start,
.port_stop = ata_sas_port_stop,
.set_dmamode = sas_ata_set_dmamode,
+ .sched_eh = sas_ata_sched_eh,
+ .end_eh = sas_ata_end_eh,
};
static struct ata_port_info sata_port_info = {
@@ -591,7 +618,6 @@ void sas_ata_task_abort(struct sas_task *task)
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(qc->scsicmd->request);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_schedule_eh(qc->scsicmd->device->host);
return;
}
@@ -708,10 +734,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
struct ata_port *ap = dev->sata_dev.ap;
struct sas_ha_struct *ha = dev->port->ha;
- /* hold a reference over eh since we may be racing with final
- * remove once all commands are completed
- */
- kref_get(&dev->kref);
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
ata_scsi_port_error_handler(ha->core.shost, ap);
sas_put_device(dev);
@@ -720,7 +742,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie)
void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
- LIST_HEAD(async);
+ ASYNC_DOMAIN_EXCLUSIVE(async);
int i;
/* it's ok to defer revalidation events during ata eh, these
@@ -742,6 +764,13 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost)
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (!dev_is_sata(dev))
continue;
+
+ /* hold a reference over eh since we may be
+ * racing with final remove once all commands
+ * are completed
+ */
+ kref_get(&dev->kref);
+
async_schedule_domain(async_sas_ata_eh, dev, &async);
}
spin_unlock(&port->dev_list_lock);
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 629a0865b130..3e9dc1a84358 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -39,18 +39,13 @@ void sas_init_dev(struct domain_device *dev)
{
switch (dev->dev_type) {
case SAS_END_DEV:
+ INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
break;
case EDGE_DEV:
case FANOUT_DEV:
INIT_LIST_HEAD(&dev->ex_dev.children);
mutex_init(&dev->ex_dev.cmd_mutex);
break;
- case SATA_DEV:
- case SATA_PM:
- case SATA_PM_PORT:
- case SATA_PENDING:
- INIT_LIST_HEAD(&dev->sata_dev.children);
- break;
default:
break;
}
@@ -286,6 +281,8 @@ void sas_free_device(struct kref *kref)
static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
{
+ struct sas_ha_struct *ha = port->ha;
+
sas_notify_lldd_dev_gone(dev);
if (!dev->parent)
dev->port->port_dev = NULL;
@@ -294,8 +291,18 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
+ if (dev_is_sata(dev))
+ sas_ata_end_eh(dev->sata_dev.ap);
spin_unlock_irq(&port->dev_list_lock);
+ spin_lock_irq(&ha->lock);
+ if (dev->dev_type == SAS_END_DEV &&
+ !list_empty(&dev->ssp_dev.eh_list_node)) {
+ list_del_init(&dev->ssp_dev.eh_list_node);
+ ha->eh_active--;
+ }
+ spin_unlock_irq(&ha->lock);
+
sas_put_device(dev);
}
@@ -488,9 +495,9 @@ static void sas_chain_event(int event, unsigned long *pending,
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
- spin_lock_irqsave(&ha->state_lock, flags);
+ spin_lock_irqsave(&ha->lock, flags);
sas_chain_work(ha, sw);
- spin_unlock_irqrestore(&ha->state_lock, flags);
+ spin_unlock_irqrestore(&ha->lock, flags);
}
}
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index 4e4292d210c1..789c4d8bb7a7 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -47,9 +47,9 @@ static void sas_queue_event(int event, unsigned long *pending,
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
- spin_lock_irqsave(&ha->state_lock, flags);
+ spin_lock_irqsave(&ha->lock, flags);
sas_queue_work(ha, work);
- spin_unlock_irqrestore(&ha->state_lock, flags);
+ spin_unlock_irqrestore(&ha->lock, flags);
}
}
@@ -61,18 +61,18 @@ void __sas_drain_work(struct sas_ha_struct *ha)
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
- spin_lock_irq(&ha->state_lock);
- spin_unlock_irq(&ha->state_lock);
+ spin_lock_irq(&ha->lock);
+ spin_unlock_irq(&ha->lock);
drain_workqueue(wq);
- spin_lock_irq(&ha->state_lock);
+ spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
sas_queue_work(ha, sw);
}
- spin_unlock_irq(&ha->state_lock);
+ spin_unlock_irq(&ha->lock);
}
int sas_drain_work(struct sas_ha_struct *ha)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index caa0525d2523..efc6e72f09f3 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -51,14 +51,14 @@ static void smp_task_timedout(unsigned long _task)
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->timer))
+ if (!del_timer(&task->slow_task->timer))
return;
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
/* Give it some long enough timeout. In seconds. */
@@ -79,7 +79,7 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
break;
}
- task = sas_alloc_task(GFP_KERNEL);
+ task = sas_alloc_slow_task(GFP_KERNEL);
if (!task) {
res = -ENOMEM;
break;
@@ -91,20 +91,20 @@ static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
task->task_done = smp_task_done;
- task->timer.data = (unsigned long) task;
- task->timer.function = smp_task_timedout;
- task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
- add_timer(&task->timer);
+ task->slow_task->timer.data = (unsigned long) task;
+ task->slow_task->timer.function = smp_task_timedout;
+ task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
res = i->dft->lldd_execute_task(task, 1, GFP_KERNEL);
if (res) {
- del_timer(&task->timer);
+ del_timer(&task->slow_task->timer);
SAS_DPRINTK("executing SMP task failed:%d\n", res);
break;
}
- wait_for_completion(&task->completion);
+ wait_for_completion(&task->slow_task->completion);
res = -ECOMM;
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
SAS_DPRINTK("smp task timed out or aborted\n");
@@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev(
}
/* See if this phy is part of a wide port */
-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
{
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
int i;
@@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
sas_port_add_phy(ephy->port, phy->phy);
phy->port = ephy->port;
phy->phy_state = PHY_DEVICE_DISCOVERED;
- return 0;
+ return true;
}
}
- return -ENODEV;
+ return false;
}
static struct domain_device *sas_ex_discover_expander(
@@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
return res;
}
- res = sas_ex_join_wide_port(dev, phy_id);
- if (!res) {
+ if (sas_ex_join_wide_port(dev, phy_id)) {
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
@@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
SAS_ADDR(child->sas_addr)) {
ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
- res = sas_ex_join_wide_port(dev, i);
- if (!res)
+ if (sas_ex_join_wide_port(dev, i))
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
@@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
{
struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
struct domain_device *child;
- bool found = false;
- int res, i;
+ int res;
SAS_DPRINTK("ex %016llx phy%d new device attached\n",
SAS_ADDR(dev->sas_addr), phy_id);
res = sas_ex_phy_discover(dev, phy_id);
if (res)
- goto out;
- /* to support the wide port inserted */
- for (i = 0; i < dev->ex_dev.num_phys; i++) {
- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
- if (i == phy_id)
- continue;
- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
- SAS_ADDR(ex_phy->attached_sas_addr)) {
- found = true;
- break;
- }
- }
- if (found) {
- sas_ex_join_wide_port(dev, phy_id);
+ return res;
+
+ if (sas_ex_join_wide_port(dev, phy_id))
return 0;
- }
+
res = sas_ex_discover_devices(dev, phy_id);
- if (!res)
- goto out;
+ if (res)
+ return res;
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(ex_phy->attached_sas_addr)) {
@@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
break;
}
}
-out:
return res;
}
@@ -2005,6 +1990,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
u8 sas_addr[8];
int res;
+ memset(sas_addr, 0, 8);
res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
switch (res) {
case SMP_RESP_NO_PHY:
@@ -2017,9 +2003,13 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res;
case SMP_RESP_FUNC_ACC:
break;
+ case -ECOMM:
+ break;
+ default:
+ return res;
}
- if (SAS_ADDR(sas_addr) == 0) {
+ if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
return res;
@@ -2109,9 +2099,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- if (res)
- goto out;
- if (dev) {
+ while (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -2123,8 +2111,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
+
+ dev = NULL;
+ res = sas_find_bcast_dev(port_dev, &dev);
}
-out:
return res;
}
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 10cb5ae30977..014297c05880 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -48,18 +48,37 @@ struct sas_task *sas_alloc_task(gfp_t flags)
INIT_LIST_HEAD(&task->list);
spin_lock_init(&task->task_state_lock);
task->task_state_flags = SAS_TASK_STATE_PENDING;
- init_timer(&task->timer);
- init_completion(&task->completion);
}
return task;
}
EXPORT_SYMBOL_GPL(sas_alloc_task);
+struct sas_task *sas_alloc_slow_task(gfp_t flags)
+{
+ struct sas_task *task = sas_alloc_task(flags);
+ struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
+
+ if (!task || !slow) {
+ if (task)
+ kmem_cache_free(sas_task_cache, task);
+ kfree(slow);
+ return NULL;
+ }
+
+ task->slow_task = slow;
+ init_timer(&slow->timer);
+ init_completion(&slow->completion);
+
+ return task;
+}
+EXPORT_SYMBOL_GPL(sas_alloc_slow_task);
+
void sas_free_task(struct sas_task *task)
{
if (task) {
BUG_ON(!list_empty(&task->list));
+ kfree(task->slow_task);
kmem_cache_free(sas_task_cache, task);
}
}
@@ -114,9 +133,11 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
sas_ha->lldd_queue_size = 128; /* Sanity */
set_bit(SAS_HA_REGISTERED, &sas_ha->state);
- spin_lock_init(&sas_ha->state_lock);
+ spin_lock_init(&sas_ha->lock);
mutex_init(&sas_ha->drain_mutex);
+ init_waitqueue_head(&sas_ha->eh_wait_q);
INIT_LIST_HEAD(&sas_ha->defer_q);
+ INIT_LIST_HEAD(&sas_ha->eh_dev_q);
error = sas_register_phys(sas_ha);
if (error) {
@@ -163,9 +184,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
* events to be queued, and flush any in-progress drainers
*/
mutex_lock(&sas_ha->drain_mutex);
- spin_lock_irq(&sas_ha->state_lock);
+ spin_lock_irq(&sas_ha->lock);
clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
- spin_unlock_irq(&sas_ha->state_lock);
+ spin_unlock_irq(&sas_ha->lock);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
@@ -411,9 +432,9 @@ static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
d->reset_result = 0;
d->hard_reset = hard_reset;
- spin_lock_irq(&ha->state_lock);
+ spin_lock_irq(&ha->lock);
sas_queue_work(ha, &d->reset_work);
- spin_unlock_irq(&ha->state_lock);
+ spin_unlock_irq(&ha->lock);
rc = sas_drain_work(ha);
if (rc == 0)
@@ -438,9 +459,9 @@ static int queue_phy_enable(struct sas_phy *phy, int enable)
d->enable_result = 0;
d->enable = enable;
- spin_lock_irq(&ha->state_lock);
+ spin_lock_irq(&ha->lock);
sas_queue_work(ha, &d->enable_work);
- spin_unlock_irq(&ha->state_lock);
+ spin_unlock_irq(&ha->lock);
rc = sas_drain_work(ha);
if (rc == 0)
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index f0b9b7bf1882..6e795a174a12 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -460,14 +460,109 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev)
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
+static void sas_wait_eh(struct domain_device *dev)
+{
+ struct sas_ha_struct *ha = dev->port->ha;
+ DEFINE_WAIT(wait);
+
+ if (dev_is_sata(dev)) {
+ ata_port_wait_eh(dev->sata_dev.ap);
+ return;
+ }
+ retry:
+ spin_lock_irq(&ha->lock);
+
+ while (test_bit(SAS_DEV_EH_PENDING, &dev->state)) {
+ prepare_to_wait(&ha->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_irq(&ha->lock);
+ schedule();
+ spin_lock_irq(&ha->lock);
+ }
+ finish_wait(&ha->eh_wait_q, &wait);
+
+ spin_unlock_irq(&ha->lock);
+
+ /* make sure SCSI EH is complete */
+ if (scsi_host_in_recovery(ha->core.shost)) {
+ msleep(10);
+ goto retry;
+ }
+}
+EXPORT_SYMBOL(sas_wait_eh);
+
+static int sas_queue_reset(struct domain_device *dev, int reset_type, int lun, int wait)
+{
+ struct sas_ha_struct *ha = dev->port->ha;
+ int scheduled = 0, tries = 100;
+
+ /* ata: promote lun reset to bus reset */
+ if (dev_is_sata(dev)) {
+ sas_ata_schedule_reset(dev);
+ if (wait)
+ sas_ata_wait_eh(dev);
+ return SUCCESS;
+ }
+
+ while (!scheduled && tries--) {
+ spin_lock_irq(&ha->lock);
+ if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
+ !test_bit(reset_type, &dev->state)) {
+ scheduled = 1;
+ ha->eh_active++;
+ list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
+ set_bit(SAS_DEV_EH_PENDING, &dev->state);
+ set_bit(reset_type, &dev->state);
+ int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
+ scsi_schedule_eh(ha->core.shost);
+ }
+ spin_unlock_irq(&ha->lock);
+
+ if (wait)
+ sas_wait_eh(dev);
+
+ if (scheduled)
+ return SUCCESS;
+ }
+
+ SAS_DPRINTK("%s reset of %s failed\n",
+ reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
+ dev_name(&dev->rphy->dev));
+
+ return FAILED;
+}
+
+int sas_eh_abort_handler(struct scsi_cmnd *cmd)
+{
+ int res;
+ struct sas_task *task = TO_SAS_TASK(cmd);
+ struct Scsi_Host *host = cmd->device->host;
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ if (current != host->ehandler)
+ return FAILED;
+
+ if (!i->dft->lldd_abort_task)
+ return FAILED;
+
+ res = i->dft->lldd_abort_task(task);
+ if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
+ return SUCCESS;
+
+ return FAILED;
+}
+EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
+
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_internal *i =
- to_sas_internal(dev->port->ha->core.shost->transportt);
- struct scsi_lun lun;
int res;
+ struct scsi_lun lun;
+ struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_internal *i = to_sas_internal(host->transportt);
+
+ if (current != host->ehandler)
+ return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun, 0);
int_to_scsilun(cmd->device->lun, &lun);
@@ -481,21 +576,22 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
return FAILED;
}
-/* Attempt to send a phy (bus) reset */
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_phy *phy = sas_get_local_phy(dev);
int res;
+ struct Scsi_Host *host = cmd->device->host;
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
+ struct sas_internal *i = to_sas_internal(host->transportt);
- res = sas_phy_reset(phy, 1);
- if (res)
- SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
- kobject_name(&phy->dev.kobj),
- res);
- sas_put_local_phy(phy);
+ if (current != host->ehandler)
+ return sas_queue_reset(dev, SAS_DEV_RESET, 0, 0);
- if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
+ if (!i->dft->lldd_I_T_nexus_reset)
+ return FAILED;
+
+ res = i->dft->lldd_I_T_nexus_reset(dev);
+ if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
+ res == -ENODEV)
return SUCCESS;
return FAILED;
@@ -667,16 +763,53 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
goto out;
}
+static void sas_eh_handle_resets(struct Scsi_Host *shost)
+{
+ struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
+ struct sas_internal *i = to_sas_internal(shost->transportt);
+
+ /* handle directed resets to sas devices */
+ spin_lock_irq(&ha->lock);
+ while (!list_empty(&ha->eh_dev_q)) {
+ struct domain_device *dev;
+ struct ssp_device *ssp;
+
+ ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
+ list_del_init(&ssp->eh_list_node);
+ dev = container_of(ssp, typeof(*dev), ssp_dev);
+ kref_get(&dev->kref);
+ WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
+
+ spin_unlock_irq(&ha->lock);
+
+ if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
+ i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
+
+ if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
+ i->dft->lldd_I_T_nexus_reset(dev);
+
+ sas_put_device(dev);
+ spin_lock_irq(&ha->lock);
+ clear_bit(SAS_DEV_EH_PENDING, &dev->state);
+ ha->eh_active--;
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+
void sas_scsi_recover_host(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
- unsigned long flags;
LIST_HEAD(eh_work_q);
+ int tries = 0;
+ bool retry;
- spin_lock_irqsave(shost->host_lock, flags);
+retry:
+ tries++;
+ retry = true;
+ spin_lock_irq(shost->host_lock);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
- shost->host_eh_scheduled = 0;
- spin_unlock_irqrestore(shost->host_lock, flags);
+ spin_unlock_irq(shost->host_lock);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
@@ -705,13 +838,26 @@ out:
if (ha->lldd_max_execute_num > 1)
wake_up_process(ha->core.queue_thread);
+ sas_eh_handle_resets(shost);
+
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
- SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
- __func__, shost->host_busy, shost->host_failed);
+ /* check if any new eh work was scheduled during the last run */
+ spin_lock_irq(&ha->lock);
+ if (ha->eh_active == 0) {
+ shost->host_eh_scheduled = 0;
+ retry = false;
+ }
+ spin_unlock_irq(&ha->lock);
+
+ if (retry)
+ goto retry;
+
+ SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
+ __func__, shost->host_busy, shost->host_failed, tries);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
@@ -988,9 +1134,13 @@ void sas_task_abort(struct sas_task *task)
/* Escape for libsas internal commands */
if (!sc) {
- if (!del_timer(&task->timer))
+ struct sas_task_slow *slow = task->slow_task;
+
+ if (!slow)
+ return;
+ if (!del_timer(&slow->timer))
return;
- task->timer.function(task->timer.data);
+ slow->timer.function(slow->timer.data);
return;
}
@@ -1003,7 +1153,6 @@ void sas_task_abort(struct sas_task *task)
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(sc->request);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_schedule_eh(sc->device->host);
}
}
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index fe5d396aca73..e2516ba8ebfa 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -22,7 +22,9 @@
ccflags-$(GCOV) := -fprofile-arcs -ftest-coverage
ccflags-$(GCOV) += -O0
+ifdef WARNINGS_BECOME_ERRORS
ccflags-y += -Werror
+endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index e5da6da20f8a..a65c05a8d488 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -96,6 +96,10 @@ struct lpfc_sli2_slim;
/* queue dump line buffer size */
#define LPFC_LBUF_SZ 128
+/* mailbox system shutdown options */
+#define LPFC_MBX_NO_WAIT 0
+#define LPFC_MBX_WAIT 1
+
enum lpfc_polling_flags {
ENABLE_FCP_RING_POLLING = 0x1,
DISABLE_FCP_RING_INT = 0x2
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5eb2bc116183..adef5bb2100e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3617,6 +3617,91 @@ lpfc_sriov_nr_virtfn_init(struct lpfc_hba *phba, int val)
static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
+/**
+ * lpfc_fcp_imax_store
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: string with the number of fast-path FCP interrupts per second.
+ * @count: unused variable.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then set the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, i;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+
+ /* Value range is [636,651042] */
+ if (val < LPFC_MIM_IMAX || val > LPFC_DMULT_CONST)
+ return -EINVAL;
+
+ phba->cfg_fcp_imax = (uint32_t)val;
+ for (i = 0; i < phba->cfg_fcp_eq_count; i += LPFC_MAX_EQ_DELAY)
+ lpfc_modify_fcp_eq_delay(phba, i);
+
+ return strlen(buf);
+}
+
+/*
+# lpfc_fcp_imax: The maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+static int lpfc_fcp_imax = LPFC_FP_DEF_IMAX;
+module_param(lpfc_fcp_imax, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_fcp_imax,
+ "Set the maximum number of fast-path FCP interrupts per second");
+lpfc_param_show(fcp_imax)
+
+/**
+ * lpfc_fcp_imax_init - Set the initial sr-iov virtual function enable
+ * @phba: lpfc_hba pointer.
+ * @val: link speed value.
+ *
+ * Description:
+ * If val is in a valid range [636,651042], then initialize the adapter's
+ * maximum number of fast-path FCP interrupts per second.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= LPFC_MIM_IMAX && val <= LPFC_DMULT_CONST) {
+ phba->cfg_fcp_imax = val;
+ return 0;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3016 fcp_imax: %d out of range, using default\n", val);
+ phba->cfg_fcp_imax = LPFC_FP_DEF_IMAX;
+
+ return 0;
+}
+
+static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
+ lpfc_fcp_imax_show, lpfc_fcp_imax_store);
+
/*
# lpfc_fcp_class: Determines FC class to use for the FCP protocol.
# Value range is [2,3]. Default value is 3.
@@ -3758,14 +3843,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
-# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
-#
-# Value range is [636,651042]. Default value is 10000.
-*/
-LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
- "Set the maximum number of fast-path FCP interrupts per second");
-
-/*
# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
#
# Value range is [1,31]. Default value is 4.
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 9b2a16f3bc79..8a2a514a2553 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -183,7 +183,7 @@ int lpfc_post_buffer(struct lpfc_hba *, struct lpfc_sli_ring *, int);
void lpfc_decode_firmware_rev(struct lpfc_hba *, char *, int);
int lpfc_online(struct lpfc_hba *);
void lpfc_unblock_mgmt_io(struct lpfc_hba *);
-void lpfc_offline_prep(struct lpfc_hba *);
+void lpfc_offline_prep(struct lpfc_hba *, int);
void lpfc_offline(struct lpfc_hba *);
void lpfc_reset_hba(struct lpfc_hba *);
@@ -273,7 +273,7 @@ int lpfc_sli_host_down(struct lpfc_vport *);
int lpfc_sli_hba_down(struct lpfc_hba *);
int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
int lpfc_sli_handle_mb_event(struct lpfc_hba *);
-void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *, int);
int lpfc_sli_check_eratt(struct lpfc_hba *);
void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
struct lpfc_sli_ring *, uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 616c400dae14..afe368fd1b98 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -395,8 +395,13 @@ lpfc_debug_dump_fcp_cq(struct lpfc_hba *phba, int fcp_wqidx)
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
- if (fcp_cqidx >= phba->cfg_fcp_eq_count)
- return;
+ if (phba->intr_type == MSIX) {
+ if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ return;
+ } else {
+ if (fcp_cqidx > 0)
+ return;
+ }
printk(KERN_ERR "FCP CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]:\n",
fcp_wqidx, phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
@@ -426,8 +431,13 @@ lpfc_debug_dump_fcp_eq(struct lpfc_hba *phba, int fcp_wqidx)
for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++)
if (phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id == fcp_cqid)
break;
- if (fcp_cqidx >= phba->cfg_fcp_eq_count)
- return;
+ if (phba->intr_type == MSIX) {
+ if (fcp_cqidx >= phba->cfg_fcp_eq_count)
+ return;
+ } else {
+ if (fcp_cqidx > 0)
+ return;
+ }
if (phba->cfg_fcp_eq_count == 0) {
fcp_eqidx = -1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 5bb269e224f6..9b4f92941dce 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -530,7 +530,7 @@ lpfc_work_list_done(struct lpfc_hba *phba)
break;
case LPFC_EVT_OFFLINE_PREP:
if (phba->link_state >= LPFC_LINK_DOWN)
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
*(int *)(evtp->evt_arg1) = 0;
complete((struct completion *)(evtp->evt_arg2));
break;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index f1946dfda5b4..953603a7a43c 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -874,6 +874,7 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15
#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20
#define LPFC_MBOX_OPCODE_NOP 0x21
+#define LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY 0x29
#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35
#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36
#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37
@@ -940,6 +941,13 @@ struct eq_context {
uint32_t reserved3;
};
+struct eq_delay_info {
+ uint32_t eq_id;
+ uint32_t phase;
+ uint32_t delay_multi;
+};
+#define LPFC_MAX_EQ_DELAY 8
+
struct sgl_page_pairs {
uint32_t sgl_pg0_addr_lo;
uint32_t sgl_pg0_addr_hi;
@@ -1002,6 +1010,19 @@ struct lpfc_mbx_eq_create {
} u;
};
+struct lpfc_mbx_modify_eq_delay {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t num_eq;
+ struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
struct lpfc_mbx_eq_destroy {
struct mbox_header header;
union {
@@ -2875,6 +2896,7 @@ struct lpfc_mqe {
struct lpfc_mbx_mq_create mq_create;
struct lpfc_mbx_mq_create_ext mq_create_ext;
struct lpfc_mbx_eq_create eq_create;
+ struct lpfc_mbx_modify_eq_delay eq_delay;
struct lpfc_mbx_cq_create cq_create;
struct lpfc_mbx_wq_create wq_create;
struct lpfc_mbx_rq_create rq_create;
@@ -3084,6 +3106,28 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_EVENT_TYPE_SHARED_LINK 0x2
};
+struct lpfc_acqe_misconfigured_event {
+ struct {
+ uint32_t word0;
+#define lpfc_sli_misconfigured_port0_SHIFT 0
+#define lpfc_sli_misconfigured_port0_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port0_WORD word0
+#define lpfc_sli_misconfigured_port1_SHIFT 8
+#define lpfc_sli_misconfigured_port1_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port1_WORD word0
+#define lpfc_sli_misconfigured_port2_SHIFT 16
+#define lpfc_sli_misconfigured_port2_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port2_WORD word0
+#define lpfc_sli_misconfigured_port3_SHIFT 24
+#define lpfc_sli_misconfigured_port3_MASK 0x000000FF
+#define lpfc_sli_misconfigured_port3_WORD word0
+ } theEvent;
+#define LPFC_SLI_EVENT_STATUS_VALID 0x00
+#define LPFC_SLI_EVENT_STATUS_NOT_PRESENT 0x01
+#define LPFC_SLI_EVENT_STATUS_WRONG_TYPE 0x02
+#define LPFC_SLI_EVENT_STATUS_UNSUPPORTED 0x03
+};
+
struct lpfc_acqe_sli {
uint32_t event_data1;
uint32_t event_data2;
@@ -3094,6 +3138,7 @@ struct lpfc_acqe_sli {
#define LPFC_SLI_EVENT_TYPE_NORM_TEMP 0x3
#define LPFC_SLI_EVENT_TYPE_NVLOG_POST 0x4
#define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5
+#define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9
};
/*
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 411ed48d79da..45c15208be9f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -73,6 +73,8 @@ static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
+static void lpfc_sli4_disable_intr(struct lpfc_hba *);
+static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1169,7 +1171,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
lpfc_reset_barrier(phba);
@@ -1193,7 +1195,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
static void
lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
{
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
lpfc_sli4_brdreset(phba);
lpfc_hba_down_post(phba);
@@ -1251,7 +1253,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
* There was a firmware error. Take the hba offline and then
* attempt to restart it.
*/
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
/* Wait for the ER1 bit to clear.*/
@@ -1372,7 +1374,7 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
* There was a firmware error. Take the hba offline and then
* attempt to restart it.
*/
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) { /* Initialize the HBA */
@@ -1428,6 +1430,54 @@ lpfc_handle_eratt_s3(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
+ * @phba: pointer to lpfc hba data structure.
+ * @mbx_action: flag for mailbox shutdown action.
+ *
+ * This routine is invoked to perform an SLI4 port PCI function reset in
+ * response to port status register polling attention. It waits for port
+ * status register (ERR, RDY, RN) bits before proceeding with function reset.
+ * During this process, interrupt vectors are freed and later requested
+ * for handling possible port resource change.
+ **/
+static int
+lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action)
+{
+ int rc;
+ uint32_t intr_mode;
+
+ /*
+ * On error status condition, driver need to wait for port
+ * ready before performing reset.
+ */
+ rc = lpfc_sli4_pdev_status_reg_wait(phba);
+ if (!rc) {
+ /* need reset: attempt for port recovery */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
+ lpfc_offline_prep(phba, mbx_action);
+ lpfc_offline(phba);
+ /* release interrupt for possible resource change */
+ lpfc_sli4_disable_intr(phba);
+ lpfc_sli_brdrestart(phba);
+ /* request and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3175 Failed to enable interrupt\n");
+ return -EIO;
+ } else {
+ phba->intr_mode = intr_mode;
+ }
+ rc = lpfc_online(phba);
+ if (rc == 0)
+ lpfc_unblock_mgmt_io(phba);
+ }
+ return rc;
+}
+
+/**
* lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
* @phba: pointer to lpfc hba data structure.
*
@@ -1506,30 +1556,18 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3145 Port Down: Provisioning\n");
- /*
- * On error status condition, driver need to wait for port
- * ready before performing reset.
- */
- rc = lpfc_sli4_pdev_status_reg_wait(phba);
- if (!rc) {
- /* need reset: attempt for port recovery */
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Reset Needed: Attempting Port "
- "Recovery...\n");
- lpfc_offline_prep(phba);
- lpfc_offline(phba);
- lpfc_sli_brdrestart(phba);
- if (lpfc_online(phba) == 0) {
- lpfc_unblock_mgmt_io(phba);
- /* don't report event on forced debug dump */
- if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
- reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
- return;
- else
- break;
- }
- /* fall through for not able to recover */
+
+ /* Check port status register for function reset */
+ rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT);
+ if (rc == 0) {
+ /* don't report event on forced debug dump */
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ return;
+ else
+ break;
}
+ /* fall through for not able to recover */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"3152 Unrecoverable error, bring the port "
"offline\n");
@@ -2494,15 +2532,19 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
* driver prepares the HBA interface for online or offline.
**/
static void
-lpfc_block_mgmt_io(struct lpfc_hba * phba)
+lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
{
unsigned long iflag;
uint8_t actcmd = MBX_HEARTBEAT;
unsigned long timeout;
- timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ if (mbx_action == LPFC_MBX_NO_WAIT)
+ return;
+ timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
/* Determine how long we might wait for the active mailbox
@@ -2592,7 +2634,7 @@ lpfc_online(struct lpfc_hba *phba)
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
"0458 Bring Adapter online\n");
- lpfc_block_mgmt_io(phba);
+ lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
if (!lpfc_sli_queue_setup(phba)) {
lpfc_unblock_mgmt_io(phba);
@@ -2660,7 +2702,7 @@ lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
* queue to make it ready to be brought offline.
**/
void
-lpfc_offline_prep(struct lpfc_hba * phba)
+lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
{
struct lpfc_vport *vport = phba->pport;
struct lpfc_nodelist *ndlp, *next_ndlp;
@@ -2671,7 +2713,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
if (vport->fc_flag & FC_OFFLINE_MODE)
return;
- lpfc_block_mgmt_io(phba);
+ lpfc_block_mgmt_io(phba, mbx_action);
lpfc_linkdown(phba);
@@ -2718,7 +2760,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
}
lpfc_destroy_vport_work_array(phba, vports);
- lpfc_sli_mbox_sys_shutdown(phba);
+ lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
}
/**
@@ -3684,12 +3726,76 @@ out_free_pmb:
static void
lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
{
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "2901 Async SLI event - Event Data1:x%08x Event Data2:"
- "x%08x SLI Event Type:%d",
- acqe_sli->event_data1, acqe_sli->event_data2,
- bf_get(lpfc_trailer_type, acqe_sli));
- return;
+ char port_name;
+ char message[80];
+ uint8_t status;
+ struct lpfc_acqe_misconfigured_event *misconfigured;
+
+ /* special case misconfigured event as it contains data for all ports */
+ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2) ||
+ (bf_get(lpfc_trailer_type, acqe_sli) !=
+ LPFC_SLI_EVENT_TYPE_MISCONFIGURED)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "2901 Async SLI event - Event Data1:x%08x Event Data2:"
+ "x%08x SLI Event Type:%d\n",
+ acqe_sli->event_data1, acqe_sli->event_data2,
+ bf_get(lpfc_trailer_type, acqe_sli));
+ return;
+ }
+
+ port_name = phba->Port[0];
+ if (port_name == 0x00)
+ port_name = '?'; /* get port name is empty */
+
+ misconfigured = (struct lpfc_acqe_misconfigured_event *)
+ &acqe_sli->event_data1;
+
+ /* fetch the status for this port */
+ switch (phba->sli4_hba.lnk_info.lnk_no) {
+ case LPFC_LINK_NUMBER_0:
+ status = bf_get(lpfc_sli_misconfigured_port0,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_1:
+ status = bf_get(lpfc_sli_misconfigured_port1,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_2:
+ status = bf_get(lpfc_sli_misconfigured_port2,
+ &misconfigured->theEvent);
+ break;
+ case LPFC_LINK_NUMBER_3:
+ status = bf_get(lpfc_sli_misconfigured_port3,
+ &misconfigured->theEvent);
+ break;
+ default:
+ status = ~LPFC_SLI_EVENT_STATUS_VALID;
+ break;
+ }
+
+ switch (status) {
+ case LPFC_SLI_EVENT_STATUS_VALID:
+ return; /* no message if the sfp is okay */
+ case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
+ sprintf(message, "Not installed");
+ break;
+ case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
+ sprintf(message,
+ "Optics of two types installed");
+ break;
+ case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
+ sprintf(message, "Incompatible optics");
+ break;
+ default:
+ /* firmware is reporting a status we don't know about */
+ sprintf(message, "Unknown event status x%02x", status);
+ break;
+ }
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3176 Misconfigured Physical Port - "
+ "Port Name %c %s\n", port_name, message);
}
/**
@@ -4312,7 +4418,7 @@ lpfc_reset_hba(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
return;
}
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
@@ -5514,14 +5620,45 @@ lpfc_destroy_shost(struct lpfc_hba *phba)
static void
lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
{
+ uint32_t old_mask;
+ uint32_t old_guard;
+
int pagecnt = 10;
if (lpfc_prot_mask && lpfc_prot_guard) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"1478 Registering BlockGuard with the "
"SCSI layer\n");
- scsi_host_set_prot(shost, lpfc_prot_mask);
- scsi_host_set_guard(shost, lpfc_prot_guard);
+
+ old_mask = lpfc_prot_mask;
+ old_guard = lpfc_prot_guard;
+
+ /* Only allow supported values */
+ lpfc_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIX_TYPE0_PROTECTION |
+ SHOST_DIX_TYPE1_PROTECTION);
+ lpfc_prot_guard &= (SHOST_DIX_GUARD_IP | SHOST_DIX_GUARD_CRC);
+
+ /* DIF Type 1 protection for profiles AST1/C1 is end to end */
+ if (lpfc_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
+ lpfc_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
+
+ if (lpfc_prot_mask && lpfc_prot_guard) {
+ if ((old_mask != lpfc_prot_mask) ||
+ (old_guard != lpfc_prot_guard))
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1475 Registering BlockGuard with the "
+ "SCSI layer: mask %d guard %d\n",
+ lpfc_prot_mask, lpfc_prot_guard);
+
+ scsi_host_set_prot(shost, lpfc_prot_mask);
+ scsi_host_set_guard(shost, lpfc_prot_guard);
+ } else
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1479 Not Registering BlockGuard with the SCSI "
+ "layer, Bad protection parameters: %d %d\n",
+ old_mask, old_guard);
}
+
if (!_dump_buf_data) {
while (pagecnt) {
spin_lock_init(&_dump_buf_lock);
@@ -8859,7 +8996,7 @@ lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
"0473 PCI device Power Management suspend.\n");
/* Bring down the device */
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
kthread_stop(phba->worker_thread);
@@ -8985,7 +9122,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
"2710 PCI channel disable preparing for reset\n");
/* Block any management I/Os to the device */
- lpfc_block_mgmt_io(phba);
+ lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
@@ -9129,7 +9266,7 @@ lpfc_io_slot_reset_s3(struct pci_dev *pdev)
phba->intr_mode = intr_mode;
/* Take device offline, it will perform cleanup */
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
@@ -9603,7 +9740,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
"2843 PCI device Power Management suspend.\n");
/* Bring down the device */
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
kthread_stop(phba->worker_thread);
@@ -9729,7 +9866,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
"2826 PCI channel disable preparing for reset\n");
/* Block any management I/Os to the device */
- lpfc_block_mgmt_io(phba);
+ lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
/* Block all SCSI devices' I/Os on the host */
lpfc_scsi_dev_block(phba);
@@ -9902,7 +10039,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
*/
if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
/* Perform device reset */
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
/* Bring the device back online */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 66e09069f281..925975d2d765 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4275,10 +4275,8 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
* Catch race where our node has transitioned, but the
* transport is still transitioning.
*/
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
- goto out_fail_command;
- }
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto out_tgt_busy;
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth)
goto out_tgt_busy;
@@ -4412,12 +4410,12 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
struct lpfc_iocbq *abtsiocb;
struct lpfc_scsi_buf *lpfc_cmd;
IOCB_t *cmd, *icmd;
- int ret = SUCCESS;
+ int ret = SUCCESS, status = 0;
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
- ret = fc_block_scsi_eh(cmnd);
- if (ret)
- return ret;
+ status = fc_block_scsi_eh(cmnd);
+ if (status)
+ return status;
spin_lock_irq(&phba->hbalock);
/* driver queued commands are in process of being flushed */
@@ -4435,7 +4433,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
"2873 SCSI Layer I/O Abort Request IO CMPL Status "
"x%x ID %d LUN %d\n",
- ret, cmnd->device->id, cmnd->device->lun);
+ SUCCESS, cmnd->device->id, cmnd->device->lun);
return SUCCESS;
}
@@ -4762,7 +4760,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status;
+ int status, ret = SUCCESS;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -4803,9 +4801,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_LUN);
- return status;
+ return ret;
}
/**
@@ -4829,7 +4827,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
unsigned tgt_id = cmnd->device->id;
unsigned int lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
- int status;
+ int status, ret = SUCCESS;
if (!rdata) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
@@ -4870,9 +4868,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
* So, continue on.
* We will report success if all the i/o aborts successfully.
*/
- status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
- LPFC_CTX_TGT);
- return status;
+ ret = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+ LPFC_CTX_TGT);
+ return ret;
}
/**
@@ -4982,7 +4980,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
int rc, ret = SUCCESS;
- lpfc_offline_prep(phba);
+ lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);
rc = lpfc_sli_brdrestart(phba);
if (rc)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index b4720a109817..9cbd20b1328b 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -8984,7 +8984,7 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
int i;
/* Shutdown the mailbox command sub-system */
- lpfc_sli_mbox_sys_shutdown(phba);
+ lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
lpfc_hba_down_prep(phba);
@@ -9996,11 +9996,17 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
* sub-system flush routine to gracefully bring down mailbox sub-system.
**/
void
-lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
{
struct lpfc_sli *psli = &phba->sli;
unsigned long timeout;
+ if (mbx_action == LPFC_MBX_NO_WAIT) {
+ /* delay 100ms for port state */
+ msleep(100);
+ lpfc_sli_mbox_sys_flush(phba);
+ return;
+ }
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
spin_lock_irq(&phba->hbalock);
@@ -12042,6 +12048,83 @@ out_fail:
}
/**
+ * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @startq: The starting FCP EQ to modify
+ *
+ * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @startq
+ * is used to get the starting FCP EQ to change.
+ * This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return -ENOMEM. If the queue create mailbox command
+ * fails this function will return -ENXIO.
+ **/
+uint32_t
+lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
+{
+ struct lpfc_mbx_modify_eq_delay *eq_delay;
+ LPFC_MBOXQ_t *mbox;
+ struct lpfc_queue *eq;
+ int cnt, rc, length, status = 0;
+ uint32_t shdr_status, shdr_add_status;
+ int fcp_eqidx;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint16_t dmult;
+
+ if (startq >= phba->cfg_fcp_eq_count)
+ return 0;
+
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
+ length, LPFC_SLI4_MBX_EMBED);
+ eq_delay = &mbox->u.mqe.un.eq_delay;
+
+ /* Calculate delay multiper from maximum interrupt per second */
+ dmult = LPFC_DMULT_CONST/phba->cfg_fcp_imax - 1;
+
+ cnt = 0;
+ for (fcp_eqidx = startq; fcp_eqidx < phba->cfg_fcp_eq_count;
+ fcp_eqidx++) {
+ eq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ if (!eq)
+ continue;
+ eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
+ eq_delay->u.request.eq[cnt].phase = 0;
+ eq_delay->u.request.eq[cnt].delay_multi = dmult;
+ cnt++;
+ if (cnt >= LPFC_MAX_EQ_DELAY)
+ break;
+ }
+ eq_delay->u.request.num_eq = cnt;
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->context1 = NULL;
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2512 MODIFY_EQ_DELAY mailbox failed with "
+ "status x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ status = -ENXIO;
+ }
+ mempool_free(mbox, phba->mbox_mem_pool);
+ return status;
+}
+
+/**
* lpfc_eq_create - Create an Event Queue on the HBA
* @phba: HBA structure that indicates port to create a queue on.
* @eq: The queue structure to use to create the event queue.
@@ -12228,8 +12311,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0361 Unsupported CQ count. (%d)\n",
cq->entry_count);
- if (cq->entry_count < 256)
- return -EINVAL;
+ if (cq->entry_count < 256) {
+ status = -EINVAL;
+ goto out;
+ }
/* otherwise default to smallest count (drop through) */
case 256:
bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
@@ -12420,8 +12505,10 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0362 Unsupported MQ count. (%d)\n",
mq->entry_count);
- if (mq->entry_count < 16)
- return -EINVAL;
+ if (mq->entry_count < 16) {
+ status = -EINVAL;
+ goto out;
+ }
/* otherwise default to smallest count (drop through) */
case 16:
bf_set(lpfc_mq_context_ring_size,
@@ -12710,8 +12797,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2535 Unsupported RQ count. (%d)\n",
hrq->entry_count);
- if (hrq->entry_count < 512)
- return -EINVAL;
+ if (hrq->entry_count < 512) {
+ status = -EINVAL;
+ goto out;
+ }
/* otherwise default to smallest count (drop through) */
case 512:
bf_set(lpfc_rq_context_rqe_count,
@@ -12791,8 +12880,10 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2536 Unsupported RQ count. (%d)\n",
drq->entry_count);
- if (drq->entry_count < 512)
- return -EINVAL;
+ if (drq->entry_count < 512) {
+ status = -EINVAL;
+ goto out;
+ }
/* otherwise default to smallest count (drop through) */
case 512:
bf_set(lpfc_rq_context_rqe_count,
@@ -15855,24 +15946,18 @@ lpfc_drain_txq(struct lpfc_hba *phba)
spin_lock_irqsave(&phba->hbalock, iflags);
piocbq = lpfc_sli_ringtx_get(phba, pring);
+ if (!piocbq) {
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "2823 txq empty and txq_cnt is %d\n ",
+ pring->txq_cnt);
+ break;
+ }
sglq = __lpfc_sli_get_sglq(phba, piocbq);
if (!sglq) {
__lpfc_sli_ringtx_put(phba, pring, piocbq);
spin_unlock_irqrestore(&phba->hbalock, iflags);
break;
- } else {
- if (!piocbq) {
- /* The txq_cnt out of sync. This should
- * never happen
- */
- sglq = __lpfc_clear_active_sglq(phba,
- sglq->sli4_lxritag);
- spin_unlock_irqrestore(&phba->hbalock, iflags);
- lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "2823 txq empty and txq_cnt is %d\n ",
- pring->txq_cnt);
- break;
- }
}
/* The xri and iocb resources secured,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a4a77080091b..ec756118c5c1 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -598,6 +598,7 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
uint32_t);
void lpfc_sli4_queue_free(struct lpfc_queue *);
uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
struct lpfc_queue *, uint32_t, uint32_t);
int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 59c57a409981..4704e5b5088e 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.31"
+#define LPFC_DRIVER_VERSION "8.3.32"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 4d39a9ffc081..97825f116954 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -524,7 +524,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
mega_passthru *pthru;
scb_t *scb;
mbox_t *mbox;
- long seg;
+ u32 seg;
char islogical;
int max_ldrv_num;
int channel = 0;
@@ -858,7 +858,7 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
/* Calculate Scatter-Gather info */
mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
- (u32 *)&mbox->m_out.xferaddr, (u32 *)&seg);
+ (u32 *)&mbox->m_out.xferaddr, &seg);
return scb;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 35bd13879fed..54b1c5bb310f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -2731,7 +2731,7 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
}
out:
- spin_unlock_irq(&adapter->lock);
+ spin_unlock(&adapter->lock);
return rval;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index b6dd3a5de7f9..b3a1a30055d6 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -1158,6 +1158,7 @@ extern struct scsi_transport_template *mpt2sas_transport_template;
extern int scsi_internal_device_block(struct scsi_device *sdev);
extern u8 mpt2sas_stm_zero_smid_handler(struct MPT2SAS_ADAPTER *ioc,
u8 msix_index, u32 reply);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
#endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 76973e8ca4ba..b1ebd6f8dab3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2904,7 +2904,7 @@ _scsih_ublock_io_all_device(struct MPT2SAS_ADAPTER *ioc)
dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, "device_running, "
"handle(0x%04x)\n",
sas_device_priv_data->sas_target->handle));
- scsi_internal_device_unblock(sdev);
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
}
}
/**
@@ -2933,7 +2933,7 @@ _scsih_ublock_io_device(struct MPT2SAS_ADAPTER *ioc, u64 sas_address)
"sas address(0x%016llx)\n", ioc->name,
(unsigned long long)sas_address));
sas_device_priv_data->block = 0;
- scsi_internal_device_unblock(sdev);
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
}
}
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index fd3b2839843b..4539d59a0857 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -885,7 +885,6 @@ static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
struct completion *completion, int is_tmf,
struct mvs_tmf_task *tmf)
{
- struct domain_device *dev = task->dev;
struct mvs_info *mvi = NULL;
u32 rc = 0;
u32 pass = 0;
@@ -1365,9 +1364,9 @@ void mvs_dev_gone(struct domain_device *dev)
static void mvs_task_done(struct sas_task *task)
{
- if (!del_timer(&task->timer))
+ if (!del_timer(&task->slow_task->timer))
return;
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
static void mvs_tmf_timedout(unsigned long data)
@@ -1375,7 +1374,7 @@ static void mvs_tmf_timedout(unsigned long data)
struct sas_task *task = (struct sas_task *)data;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
#define MVS_TASK_TIMEOUT 20
@@ -1386,7 +1385,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
struct sas_task *task = NULL;
for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_task(GFP_KERNEL);
+ task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -1396,20 +1395,20 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev,
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = mvs_task_done;
- task->timer.data = (unsigned long) task;
- task->timer.function = mvs_tmf_timedout;
- task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
- add_timer(&task->timer);
+ task->slow_task->timer.data = (unsigned long) task;
+ task->slow_task->timer.function = mvs_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
if (res) {
- del_timer(&task->timer);
+ del_timer(&task->slow_task->timer);
mv_printk("executing internel task failed:%d\n", res);
goto ex_err;
}
- wait_for_completion(&task->completion);
+ wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 3b11edd4a50c..b961112395d5 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -650,9 +650,9 @@ int pm8001_dev_found(struct domain_device *dev)
static void pm8001_task_done(struct sas_task *task)
{
- if (!del_timer(&task->timer))
+ if (!del_timer(&task->slow_task->timer))
return;
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
static void pm8001_tmf_timedout(unsigned long data)
@@ -660,7 +660,7 @@ static void pm8001_tmf_timedout(unsigned long data)
struct sas_task *task = (struct sas_task *)data;
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
- complete(&task->completion);
+ complete(&task->slow_task->completion);
}
#define PM8001_TASK_TIMEOUT 20
@@ -683,7 +683,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev);
for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_task(GFP_KERNEL);
+ task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
@@ -691,21 +691,21 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->timer.data = (unsigned long)task;
- task->timer.function = pm8001_tmf_timedout;
- task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
- add_timer(&task->timer);
+ task->slow_task->timer.data = (unsigned long)task;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
+ add_timer(&task->slow_task->timer);
res = pm8001_task_exec(task, 1, GFP_KERNEL, 1, tmf);
if (res) {
- del_timer(&task->timer);
+ del_timer(&task->slow_task->timer);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Executing internal task "
"failed\n"));
goto ex_err;
}
- wait_for_completion(&task->completion);
+ wait_for_completion(&task->slow_task->completion);
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
@@ -765,17 +765,17 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = NULL;
for (retry = 0; retry < 3; retry++) {
- task = sas_alloc_task(GFP_KERNEL);
+ task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->timer.data = (unsigned long)task;
- task->timer.function = pm8001_tmf_timedout;
- task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
- add_timer(&task->timer);
+ task->slow_task->timer.data = (unsigned long)task;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
+ task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
+ add_timer(&task->slow_task->timer);
res = pm8001_tag_alloc(pm8001_ha, &ccb_tag);
if (res)
@@ -789,13 +789,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha,
pm8001_dev, flag, task_tag, ccb_tag);
if (res) {
- del_timer(&task->timer);
+ del_timer(&task->slow_task->timer);
PM8001_FAIL_DBG(pm8001_ha,
pm8001_printk("Executing internal task "
"failed\n"));
goto ex_err;
}
- wait_for_completion(&task->completion);
+ wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
@@ -962,8 +962,9 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev)
struct pm8001_device *pm8001_dev;
struct pm8001_hba_info *pm8001_ha;
struct sas_phy *phy;
+
if (!dev || !dev->lldd_dev)
- return -1;
+ return -ENODEV;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index ca5084743135..a44653b42161 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -685,7 +685,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha)
pcix_set_mmrbc(ha->pdev, 2048);
/* PCIe -- adjust Maximum Read Request Size (2048). */
- if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(ha->pdev))
pcie_set_readrq(ha->pdev, 2048);
pci_disable_rom(ha->pdev);
@@ -721,7 +721,7 @@ qla25xx_pci_config(scsi_qla_host_t *vha)
pci_write_config_word(ha->pdev, PCI_COMMAND, w);
/* PCIe -- adjust Maximum Read Request Size (2048). */
- if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
+ if (pci_is_pcie(ha->pdev))
pcie_set_readrq(ha->pdev, 2048);
pci_disable_rom(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index caf627ba7fa8..9ce3a8f8754f 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1620,7 +1620,7 @@ qla82xx_pci_info_str(struct scsi_qla_host *vha, char *str)
char lwstr[6];
uint16_t lnk;
- pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pcie_reg = pci_pcie_cap(ha->pdev);
pci_read_config_word(ha->pdev, pcie_reg + PCI_EXP_LNKSTA, &lnk);
ha->link_width = (lnk >> 4) & 0x3f;
@@ -2528,7 +2528,7 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
}
/* Negotiated Link width */
- pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pcie_cap = pci_pcie_cap(ha->pdev);
pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
ha->link_width = (lnk >> 4) & 0x3f;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 6d1d873a20e2..fb8cd3847d4b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -482,12 +482,12 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
uint32_t pci_bus;
int pcie_reg;
- pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pcie_reg = pci_pcie_cap(ha->pdev);
if (pcie_reg) {
char lwstr[6];
uint16_t pcie_lstat, lspeed, lwidth;
- pcie_reg += 0x12;
+ pcie_reg += PCI_EXP_LNKCAP;
pci_read_config_word(ha->pdev, pcie_reg, &pcie_lstat);
lspeed = pcie_lstat & (BIT_0 | BIT_1 | BIT_2 | BIT_3);
lwidth = (pcie_lstat &
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 6986552b47e6..5b30132960c7 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2643,19 +2643,9 @@ static void qlt_do_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
atio->u.isp24.fcp_hdr.s_id);
- if (sess) {
- if (unlikely(sess->tearing_down)) {
- sess = NULL;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- goto out_term;
- } else {
- /*
- * Do the extra kref_get() before dropping
- * qla_hw_data->hardware_lock.
- */
- kref_get(&sess->se_sess->sess_kref);
- }
- }
+ /* Do kref_get() before dropping qla_hw_data->hardware_lock. */
+ if (sess)
+ kref_get(&sess->se_sess->sess_kref);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (unlikely(!sess)) {
@@ -3960,7 +3950,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt *tgt = ha->tgt.qla_tgt;
- int reason_code;
+ int login_code;
ql_dbg(ql_dbg_tgt, vha, 0xe039,
"scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
@@ -4003,9 +3993,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
{
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
"qla_target(%d): Async LOOP_UP occured "
- "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
- le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
- le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
if (tgt->link_reinit_iocb_pending) {
qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
0, 0, 0, 0, 0, 0);
@@ -4020,23 +4010,24 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
"qla_target(%d): Async event %#x occured "
- "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
- le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
case MBA_PORT_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
"qla_target(%d): Port update async event %#x "
- "occured: updating the ports database (m[1]=%x, m[2]=%x, "
- "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
- le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
- le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
- reason_code = le16_to_cpu(mailbox[2]);
- if (reason_code == 0x4)
+ "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+ "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
+ le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
+
+ login_code = le16_to_cpu(mailbox[2]);
+ if (login_code == 0x4)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
"Async MB 2: Got PLOGI Complete\n");
- else if (reason_code == 0x7)
+ else if (login_code == 0x7)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
"Async MB 2: Port Logged Out\n");
break;
@@ -4044,9 +4035,9 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
"qla_target(%d): Async event %#x occured: "
- "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
- code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
- le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
+ "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
+ code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
+ le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
break;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 9f9ef1644fd9..170af1571214 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -639,7 +639,7 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
- int (*handle_data)(struct qla_tgt_cmd *);
+ void (*handle_data)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -813,7 +813,6 @@ struct qla_tgt_sess {
unsigned int conf_compl_supported:1;
unsigned int deleted:1;
unsigned int local:1;
- unsigned int tearing_down:1;
struct se_session *se_sess;
struct scsi_qla_host *vha;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6e64314dbbb3..4752f65a9272 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -38,8 +38,6 @@
#include <linux/string.h>
#include <linux/configfs.h>
#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -466,8 +464,7 @@ static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
vha = sess->vha;
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- sess->tearing_down = 1;
- target_splice_sess_cmd_list(se_sess);
+ target_sess_cmd_list_set_waiting(se_sess);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
return 1;
@@ -600,28 +597,15 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
return -EINVAL;
}
- target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
+ return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
cmd->unpacked_lun, data_length, fcp_task_attr,
data_dir, flags);
- return 0;
}
-static void tcm_qla2xxx_do_rsp(struct work_struct *work)
+static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
- /*
- * Dispatch ->queue_status from workqueue process context
- */
- transport_generic_request_failure(&cmd->se_cmd);
-}
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
-{
- struct se_cmd *se_cmd = &cmd->se_cmd;
- unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
@@ -631,24 +615,26 @@ static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
* Check if se_cmd has already been aborted via LUN_RESET, and
* waiting upon completion in tcm_qla2xxx_write_pending_status()
*/
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- if (se_cmd->transport_state & CMD_T_ABORTED) {
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
- complete(&se_cmd->t_transport_stop_comp);
- return 0;
+ if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
+ complete(&cmd->se_cmd.t_transport_stop_comp);
+ return;
}
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
- se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
- INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
- return 0;
+ cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
+ transport_generic_request_failure(&cmd->se_cmd);
+ return;
}
- /*
- * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
- * status to the backstore processing thread.
- */
- return transport_generic_handle_data(&cmd->se_cmd);
+
+ return target_execute_cmd(&cmd->se_cmd);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
+{
+ INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
+ queue_work(tcm_qla2xxx_free_wq, &cmd->work);
}
/*
@@ -1690,7 +1676,6 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
.tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
.tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
.tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
- .new_cmd_map = NULL,
.check_stop_free = tcm_qla2xxx_check_stop_free,
.release_cmd = tcm_qla2xxx_release_cmd,
.put_session = tcm_qla2xxx_put_session,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 96a5616a8fda..7fdba7f1ffb7 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -279,6 +279,7 @@ struct qla_ddb_index {
struct list_head list;
uint16_t fw_ddb_idx;
struct dev_db_entry fw_ddb;
+ uint8_t flash_isid[6];
};
#define DDB_IPADDR_LEN 64
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 20b49d019043..5b2525c4139e 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -183,7 +183,8 @@ int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
struct ddb_entry *ddb_entry, uint32_t state);
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
-int qla4xxx_post_aen_work(struct scsi_qla_host *ha, uint32_t aen_code,
+int qla4xxx_post_aen_work(struct scsi_qla_host *ha,
+ enum iscsi_host_event_code aen_code,
uint32_t data_size, uint8_t *data);
int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
uint32_t payload_size, uint32_t pid, uint8_t *ipaddr);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index bf36723b84e1..ddd9472066cb 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -126,7 +126,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
qla4xxx_init_response_q_entries(ha);
- /* Initialize mabilbox active array */
+ /* Initialize mailbox active array */
for (i = 0; i < MAX_MRB; i++)
ha->active_mrb_array[i] = NULL;
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 228b67020d2c..939d7261c37a 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1590,7 +1590,7 @@ qla4_8xxx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
}
/* Negotiated Link width */
- pcie_cap = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
+ pcie_cap = pci_pcie_cap(ha->pdev);
pci_read_config_word(ha->pdev, pcie_cap + PCI_EXP_LNKSTA, &lnk);
ha->link_width = (lnk >> 4) & 0x3f;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index cd15678f9ada..9da426628b97 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -4299,7 +4299,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
- struct ql4_tuple_ddb *tddb)
+ struct ql4_tuple_ddb *tddb,
+ uint8_t *flash_isid)
{
uint16_t options = 0;
@@ -4314,7 +4315,12 @@ static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
tddb->port = le16_to_cpu(fw_ddb_entry->port);
- memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0], sizeof(tddb->isid));
+
+ if (flash_isid == NULL)
+ memcpy(&tddb->isid[0], &fw_ddb_entry->isid[0],
+ sizeof(tddb->isid));
+ else
+ memcpy(&tddb->isid[0], &flash_isid[0], sizeof(tddb->isid));
}
static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
@@ -4385,7 +4391,7 @@ static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
goto exit_check;
}
- qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
@@ -4407,6 +4413,102 @@ exit_check:
return ret;
}
+/**
+ * qla4xxx_check_existing_isid - check if target with same isid exist
+ * in target list
+ * @list_nt: list of target
+ * @isid: isid to check
+ *
+ * This routine return QLA_SUCCESS if target with same isid exist
+ **/
+static int qla4xxx_check_existing_isid(struct list_head *list_nt, uint8_t *isid)
+{
+ struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct dev_db_entry *fw_ddb_entry;
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ fw_ddb_entry = &nt_ddb_idx->fw_ddb;
+
+ if (memcmp(&fw_ddb_entry->isid[0], &isid[0],
+ sizeof(nt_ddb_idx->fw_ddb.isid)) == 0) {
+ return QLA_SUCCESS;
+ }
+ }
+ return QLA_ERROR;
+}
+
+/**
+ * qla4xxx_update_isid - compare ddbs and updated isid
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target
+ * @fw_ddb_entry: firmware ddb entry
+ *
+ * This routine update isid if ddbs have same iqn, same isid and
+ * different IP addr.
+ * Return QLA_SUCCESS if isid is updated.
+ **/
+static int qla4xxx_update_isid(struct scsi_qla_host *ha,
+ struct list_head *list_nt,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ uint8_t base_value, i;
+
+ base_value = fw_ddb_entry->isid[1] & 0x1f;
+ for (i = 0; i < 8; i++) {
+ fw_ddb_entry->isid[1] = (base_value | (i << 5));
+ if (qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+ break;
+ }
+
+ if (!qla4xxx_check_existing_isid(list_nt, fw_ddb_entry->isid))
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_should_update_isid - check if isid need to update
+ * @ha: Pointer to host adapter structure.
+ * @old_tddb: ddb tuple
+ * @new_tddb: ddb tuple
+ *
+ * Return QLA_SUCCESS if different IP, different PORT, same iqn,
+ * same isid
+ **/
+static int qla4xxx_should_update_isid(struct scsi_qla_host *ha,
+ struct ql4_tuple_ddb *old_tddb,
+ struct ql4_tuple_ddb *new_tddb)
+{
+ if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr) == 0) {
+ /* Same ip */
+ if (old_tddb->port == new_tddb->port)
+ return QLA_ERROR;
+ }
+
+ if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+ /* different iqn */
+ return QLA_ERROR;
+
+ if (memcmp(&old_tddb->isid[0], &new_tddb->isid[0],
+ sizeof(old_tddb->isid)))
+ /* different isid */
+ return QLA_ERROR;
+
+ return QLA_SUCCESS;
+}
+
+/**
+ * qla4xxx_is_flash_ddb_exists - check if fw_ddb_entry already exists in list_nt
+ * @ha: Pointer to host adapter structure.
+ * @list_nt: list of nt target.
+ * @fw_ddb_entry: firmware ddb entry.
+ *
+ * This routine check if fw_ddb_entry already exists in list_nt to avoid
+ * duplicate ddb in list_nt.
+ * Return QLA_SUCCESS if duplicate ddb exit in list_nl.
+ * Note: This function also update isid of DDB if required.
+ **/
+
static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
struct list_head *list_nt,
struct dev_db_entry *fw_ddb_entry)
@@ -4414,7 +4516,7 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
struct ql4_tuple_ddb *fw_tddb = NULL;
struct ql4_tuple_ddb *tmp_tddb = NULL;
- int ret = QLA_ERROR;
+ int rval, ret = QLA_ERROR;
fw_tddb = vzalloc(sizeof(*fw_tddb));
if (!fw_tddb) {
@@ -4432,12 +4534,28 @@ static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
goto exit_check;
}
- qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+ qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, NULL);
list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
- qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
- if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true)) {
- ret = QLA_SUCCESS; /* found */
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb,
+ nt_ddb_idx->flash_isid);
+ ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, true);
+ /* found duplicate ddb */
+ if (ret == QLA_SUCCESS)
+ goto exit_check;
+ }
+
+ list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+ qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb, NULL);
+
+ ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb);
+ if (ret == QLA_SUCCESS) {
+ rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry);
+ if (rval == QLA_SUCCESS)
+ ret = QLA_ERROR;
+ else
+ ret = QLA_SUCCESS;
+
goto exit_check;
}
}
@@ -4788,14 +4906,26 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
nt_ddb_idx->fw_ddb_idx = idx;
- memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
- sizeof(struct dev_db_entry));
-
- if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
- fw_ddb_entry) == QLA_SUCCESS) {
+ /* Copy original isid as it may get updated in function
+ * qla4xxx_update_isid(). We need original isid in
+ * function qla4xxx_compare_tuple_ddb to find duplicate
+ * target */
+ memcpy(&nt_ddb_idx->flash_isid[0],
+ &fw_ddb_entry->isid[0],
+ sizeof(nt_ddb_idx->flash_isid));
+
+ ret = qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry);
+ if (ret == QLA_SUCCESS) {
+ /* free nt_ddb_idx and do not add to list_nt */
vfree(nt_ddb_idx);
goto continue_next_nt;
}
+
+ /* Copy updated isid */
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
list_add_tail(&nt_ddb_idx->list, list_nt);
} else if (is_reset == RESET_ADAPTER) {
if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index cc1cc3518b87..725034f4252c 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k18"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index bbbc9c918d4c..2936b447cae9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -54,6 +54,7 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
+#include <linux/async.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -91,7 +92,7 @@ EXPORT_SYMBOL(scsi_logging_level);
#endif
/* sd, scsi core and power management need to coordinate flushing async actions */
-LIST_HEAD(scsi_sd_probe_domain);
+ASYNC_DOMAIN(scsi_sd_probe_domain);
EXPORT_SYMBOL(scsi_sd_probe_domain);
/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
@@ -1354,6 +1355,7 @@ static void __exit exit_scsi(void)
scsi_exit_devinfo();
scsi_exit_procfs();
scsi_exit_queue();
+ async_unregister_domain(&scsi_sd_probe_domain);
}
subsys_initcall(init_scsi);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index d0f71e5d065f..4a6381c87253 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* requests are started.
*/
scsi_run_host_queues(shost);
+
+ /*
+ * if eh is active and host_eh_scheduled is pending we need to re-run
+ * recovery. we do this check after scsi_run_host_queues() to allow
+ * everything pent up since the last eh run a chance to make forward
+ * progress before we sync again. Either we'll immediately re-run
+ * recovery or scsi_device_unbusy() will wake us again when these
+ * pending commands complete.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->host_eh_scheduled)
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
@@ -1804,15 +1818,14 @@ int scsi_error_handler(void *data)
* We never actually get interrupted because kthread_run
* disables signal delivery for the created thread.
*/
- set_current_state(TASK_INTERRUPTIBLE);
while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
if ((shost->host_failed == 0 && shost->host_eh_scheduled == 0) ||
shost->host_failed != shost->host_busy) {
SCSI_LOG_ERROR_RECOVERY(1,
printk("Error handler scsi_eh_%d sleeping\n",
shost->host_no));
schedule();
- set_current_state(TASK_INTERRUPTIBLE);
continue;
}
@@ -1849,7 +1862,6 @@ int scsi_error_handler(void *data)
scsi_restart_operations(shost);
if (!shost->eh_noresume)
scsi_autopm_put_host(shost);
- set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 08f1e297c735..ffd77739ae3e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -126,7 +126,7 @@ static void scsi_unprep_request(struct request *req)
* for a requeue after completion, which should only occur in this
* file.
*/
-static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
+static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
struct Scsi_Host *host = cmd->device->host;
struct scsi_device *device = cmd->device;
@@ -172,15 +172,14 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
/*
* Requeue this command. It will go before all other commands
- * that are already in the queue.
+ * that are already in the queue. Schedule requeue work under
+ * lock such that the kblockd_schedule_work() call happens
+ * before blk_cleanup_queue() finishes.
*/
spin_lock_irqsave(q->queue_lock, flags);
blk_requeue_request(q, cmd->request);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
kblockd_schedule_work(q, &device->requeue_work);
-
- return 0;
+ spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
@@ -202,9 +201,9 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
* Notes: This could be called either from an interrupt context or a
* normal process context.
*/
-int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
+void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
{
- return __scsi_queue_insert(cmd, reason, 1);
+ __scsi_queue_insert(cmd, reason, 1);
}
/**
* scsi_execute - insert request and wait for the result
@@ -423,10 +422,6 @@ static void scsi_run_queue(struct request_queue *q)
LIST_HEAD(starved_list);
unsigned long flags;
- /* if the device is dead, sdev will be NULL, so no queue to run */
- if (!sdev)
- return;
-
shost = sdev->host;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);
@@ -500,15 +495,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
+ struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
+ /*
+ * We need to hold a reference on the device to avoid the queue being
+ * killed after the unlock and before scsi_run_queue is invoked which
+ * may happen because scsi_unprep_request() puts the command which
+ * releases its reference on the device.
+ */
+ get_device(&sdev->sdev_gendev);
+
spin_lock_irqsave(q->queue_lock, flags);
scsi_unprep_request(req);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
+
+ put_device(&sdev->sdev_gendev);
}
void scsi_next_command(struct scsi_cmnd *cmd)
@@ -1190,6 +1196,7 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
switch (sdev->sdev_state) {
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
/*
* If the device is offline we refuse to process any
* commands. The device must be brought online
@@ -1387,16 +1394,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
* may be changed after request stacking drivers call the function,
* regardless of taking lock or not.
*
- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
- * (e.g. !sdev), scsi needs to return 'not busy'.
- * Otherwise, request stacking drivers may hold requests forever.
+ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
+ * needs to return 'not busy'. Otherwise, request stacking drivers
+ * may hold requests forever.
*/
static int scsi_lld_busy(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (!sdev)
+ if (blk_queue_dead(q))
return 0;
shost = sdev->host;
@@ -1507,12 +1514,6 @@ static void scsi_request_fn(struct request_queue *q)
struct scsi_cmnd *cmd;
struct request *req;
- if (!sdev) {
- while ((req = blk_peek_request(q)) != NULL)
- scsi_kill_request(req, q);
- return;
- }
-
if(!get_device(&sdev->sdev_gendev))
/* We must be tearing the block queue down already */
return;
@@ -1714,20 +1715,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
return q;
}
-void scsi_free_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- WARN_ON(q->queuedata);
-
- /* cause scsi_request_fn() to kill all non-finished requests */
- spin_lock_irqsave(q->queue_lock, flags);
- q->request_fn(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-
- blk_cleanup_queue(q);
-}
-
/*
* Function: scsi_block_requests()
*
@@ -2098,6 +2085,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_CREATED:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_QUIESCE:
case SDEV_BLOCK:
break;
@@ -2110,6 +2098,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
break;
default:
goto illegal;
@@ -2117,6 +2106,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
break;
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
switch (oldstate) {
case SDEV_CREATED:
case SDEV_RUNNING:
@@ -2153,6 +2143,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_RUNNING:
case SDEV_QUIESCE:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_BLOCK:
break;
default:
@@ -2165,6 +2156,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
case SDEV_CREATED:
case SDEV_RUNNING:
case SDEV_OFFLINE:
+ case SDEV_TRANSPORT_OFFLINE:
case SDEV_CANCEL:
break;
default:
@@ -2422,7 +2414,6 @@ EXPORT_SYMBOL(scsi_target_resume);
* (which must be a legal transition). When the device is in this
* state, all commands are deferred until the scsi lld reenables
* the device with scsi_device_unblock or device_block_tmo fires.
- * This routine assumes the host_lock is held on entry.
*/
int
scsi_internal_device_block(struct scsi_device *sdev)
@@ -2455,6 +2446,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
/**
* scsi_internal_device_unblock - resume a device after a block request
* @sdev: device to resume
+ * @new_state: state to set devices to after unblocking
*
* Called by scsi lld's or the midlayer to restart the device queue
* for the previously suspended scsi device. Called from interrupt or
@@ -2464,25 +2456,29 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block);
*
* Notes:
* This routine transitions the device to the SDEV_RUNNING state
- * (which must be a legal transition) allowing the midlayer to
- * goose the queue for this device. This routine assumes the
- * host_lock is held upon entry.
+ * or to one of the offline states (which must be a legal transition)
+ * allowing the midlayer to goose the queue for this device.
*/
int
-scsi_internal_device_unblock(struct scsi_device *sdev)
+scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
-
- /*
- * Try to transition the scsi device to SDEV_RUNNING
- * and goose the device queue if successful.
+
+ /*
+ * Try to transition the scsi device to SDEV_RUNNING or one of the
+ * offlined states and goose the device queue if successful.
*/
if (sdev->sdev_state == SDEV_BLOCK)
- sdev->sdev_state = SDEV_RUNNING;
- else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
- sdev->sdev_state = SDEV_CREATED;
- else if (sdev->sdev_state != SDEV_CANCEL &&
+ sdev->sdev_state = new_state;
+ else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
+ if (new_state == SDEV_TRANSPORT_OFFLINE ||
+ new_state == SDEV_OFFLINE)
+ sdev->sdev_state = new_state;
+ else
+ sdev->sdev_state = SDEV_CREATED;
+ } else if (sdev->sdev_state != SDEV_CANCEL &&
sdev->sdev_state != SDEV_OFFLINE)
return -EINVAL;
@@ -2523,26 +2519,26 @@ EXPORT_SYMBOL_GPL(scsi_target_block);
static void
device_unblock(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_unblock(sdev);
+ scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
}
static int
target_unblock(struct device *dev, void *data)
{
if (scsi_is_target_device(dev))
- starget_for_each_device(to_scsi_target(dev), NULL,
+ starget_for_each_device(to_scsi_target(dev), data,
device_unblock);
return 0;
}
void
-scsi_target_unblock(struct device *dev)
+scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
{
if (scsi_is_target_device(dev))
- starget_for_each_device(to_scsi_target(dev), NULL,
+ starget_for_each_device(to_scsi_target(dev), &new_state,
device_unblock);
else
- device_for_each_child(dev, NULL, target_unblock);
+ device_for_each_child(dev, &new_state, target_unblock);
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index c77628afbf9f..8818dd681c19 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -486,6 +486,10 @@ void
scsi_netlink_init(void)
{
int error;
+ struct netlink_kernel_cfg cfg = {
+ .input = scsi_nl_rcv_msg,
+ .groups = SCSI_NL_GRP_CNT,
+ };
INIT_LIST_HEAD(&scsi_nl_drivers);
@@ -497,8 +501,7 @@ scsi_netlink_init(void)
}
scsi_nl_sock = netlink_kernel_create(&init_net, NETLINK_SCSITRANSPORT,
- SCSI_NL_GRP_CNT, scsi_nl_rcv_msg, NULL,
- THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!scsi_nl_sock) {
printk(KERN_ERR "%s: register of receive handler failed\n",
__func__);
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d4201ded3b22..dc0ad85853e2 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -76,23 +76,24 @@ static int scsi_bus_resume_common(struct device *dev)
{
int err = 0;
- if (scsi_is_sdev_device(dev)) {
- /*
- * Parent device may have runtime suspended as soon as
- * it is woken up during the system resume.
- *
- * Resume it on behalf of child.
- */
- pm_runtime_get_sync(dev->parent);
- err = scsi_dev_type_resume(dev);
- pm_runtime_put_sync(dev->parent);
- }
+ /*
+ * Parent device may have runtime suspended as soon as
+ * it is woken up during the system resume.
+ *
+ * Resume it on behalf of child.
+ */
+ pm_runtime_get_sync(dev->parent);
+ if (scsi_is_sdev_device(dev))
+ err = scsi_dev_type_resume(dev);
if (err == 0) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
+
+ pm_runtime_put_sync(dev->parent);
+
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 07ce3f51701d..8f9a0cadc296 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -2,6 +2,8 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
+#include <linux/async.h>
+#include <scsi/scsi_device.h>
struct request_queue;
struct request;
@@ -79,12 +81,11 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd);
/* scsi_lib.c */
extern int scsi_maybe_unblock_host(struct scsi_device *sdev);
extern void scsi_device_unbusy(struct scsi_device *sdev);
-extern int scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
+extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason);
extern void scsi_next_command(struct scsi_cmnd *cmd);
extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
-extern void scsi_free_queue(struct request_queue *q);
extern int scsi_init_queue(void);
extern void scsi_exit_queue(void);
struct request_queue;
@@ -163,7 +164,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
#endif /* CONFIG_PM_RUNTIME */
-extern struct list_head scsi_sd_probe_domain;
+extern struct async_domain scsi_sd_probe_domain;
/*
* internal scsi timeout functions: for use by mid-layer and transport
@@ -172,6 +173,7 @@ extern struct list_head scsi_sd_probe_domain;
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 2e5fe584aad3..56a93794c470 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
do {
if (list_empty(&scanning_hosts))
- goto out;
+ return 0;
/* If we can't get memory immediately, that's OK. Just
* sleep a little. Even if we never get memory, the async
* scans will finish eventually.
@@ -179,26 +179,11 @@ int scsi_complete_async_scans(void)
}
done:
spin_unlock(&async_scan_lock);
- kfree(data);
-
- out:
- async_synchronize_full_domain(&scsi_sd_probe_domain);
+ kfree(data);
return 0;
}
-/* Only exported for the benefit of scsi_wait_scan */
-EXPORT_SYMBOL_GPL(scsi_complete_async_scans);
-
-#ifndef MODULE
-/*
- * For async scanning we need to wait for all the scans to complete before
- * trying to mount the root fs. Otherwise non-modular drivers may not be ready
- * yet.
- */
-late_initcall(scsi_complete_async_scans);
-#endif
-
/**
* scsi_unlock_floptical - unlock device via a special MODE SENSE command
* @sdev: scsi device to send command to
@@ -1717,6 +1702,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost) {
+ /* target removed before the device could be added */
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
if (!scsi_host_scan_allowed(shost) ||
scsi_sysfs_add_sdev(sdev) != 0)
__scsi_remove_device(sdev);
@@ -1842,14 +1830,13 @@ static void do_scsi_scan_host(struct Scsi_Host *shost)
}
}
-static int do_scan_async(void *_data)
+static void do_scan_async(void *_data, async_cookie_t c)
{
struct async_scan_data *data = _data;
struct Scsi_Host *shost = data->shost;
do_scsi_scan_host(shost);
scsi_finish_async_scan(data);
- return 0;
}
/**
@@ -1858,7 +1845,6 @@ static int do_scan_async(void *_data)
**/
void scsi_scan_host(struct Scsi_Host *shost)
{
- struct task_struct *p;
struct async_scan_data *data;
if (strncmp(scsi_scan_type, "none", 4) == 0)
@@ -1873,9 +1859,11 @@ void scsi_scan_host(struct Scsi_Host *shost)
return;
}
- p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
- if (IS_ERR(p))
- do_scan_async(data);
+ /* register with the async subsystem so wait_for_device_probe()
+ * will flush this work
+ */
+ async_schedule(do_scan_async, data);
+
/* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
}
EXPORT_SYMBOL(scsi_scan_host);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 04c2a278076e..093d4f6a54d2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -35,6 +35,7 @@ static const struct {
{ SDEV_DEL, "deleted" },
{ SDEV_QUIESCE, "quiesce" },
{ SDEV_OFFLINE, "offline" },
+ { SDEV_TRANSPORT_OFFLINE, "transport-offline" },
{ SDEV_BLOCK, "blocked" },
{ SDEV_CREATED_BLOCK, "created-blocked" },
};
@@ -966,16 +967,20 @@ void __scsi_remove_device(struct scsi_device *sdev)
device_del(dev);
} else
put_device(&sdev->sdev_dev);
+
+ /*
+ * Stop accepting new requests and wait until all queuecommand() and
+ * scsi_run_queue() invocations have finished before tearing down the
+ * device.
+ */
scsi_device_set_state(sdev, SDEV_DEL);
+ blk_cleanup_queue(sdev->request_queue);
+ cancel_work_sync(&sdev->requeue_work);
+
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
transport_destroy_device(dev);
- /* cause the request function to reject all I/O requests */
- sdev->request_queue->queuedata = NULL;
-
- /* Freeing the queue signals to block that we're done */
- scsi_free_queue(sdev->request_queue);
put_device(dev);
}
@@ -1000,7 +1005,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
struct scsi_device *sdev;
spin_lock_irqsave(shost->host_lock, flags);
- starget->reap_ref++;
restart:
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
@@ -1014,14 +1018,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_reap(starget);
-}
-
-static int __remove_child (struct device * dev, void * data)
-{
- if (scsi_is_target_device(dev))
- __scsi_remove_target(to_scsi_target(dev));
- return 0;
}
/**
@@ -1034,14 +1030,34 @@ static int __remove_child (struct device * dev, void * data)
*/
void scsi_remove_target(struct device *dev)
{
- if (scsi_is_target_device(dev)) {
- __scsi_remove_target(to_scsi_target(dev));
- return;
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ struct scsi_target *starget, *found;
+ unsigned long flags;
+
+ restart:
+ found = NULL;
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+ found = starget;
+ found->reap_ref++;
+ break;
+ }
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
- get_device(dev);
- device_for_each_child(dev, NULL, __remove_child);
- put_device(dev);
+ if (found) {
+ __scsi_remove_target(found);
+ scsi_target_reap(found);
+ /* in the case where @dev has multiple starget children,
+ * continue removing.
+ *
+ * FIXME: does such a case exist?
+ */
+ goto restart;
+ }
}
EXPORT_SYMBOL(scsi_remove_target);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 579760420d53..2d1e68db9b3f 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1744,6 +1744,15 @@ fc_host_statistic(fcp_output_requests);
fc_host_statistic(fcp_control_requests);
fc_host_statistic(fcp_input_megabytes);
fc_host_statistic(fcp_output_megabytes);
+fc_host_statistic(fcp_packet_alloc_failures);
+fc_host_statistic(fcp_packet_aborts);
+fc_host_statistic(fcp_frame_alloc_failures);
+fc_host_statistic(fc_no_free_exch);
+fc_host_statistic(fc_no_free_exch_xid);
+fc_host_statistic(fc_xid_not_found);
+fc_host_statistic(fc_xid_busy);
+fc_host_statistic(fc_seq_not_found);
+fc_host_statistic(fc_non_bls_resp);
static ssize_t
fc_reset_statistics(struct device *dev, struct device_attribute *attr,
@@ -1784,6 +1793,15 @@ static struct attribute *fc_statistics_attrs[] = {
&device_attr_host_fcp_control_requests.attr,
&device_attr_host_fcp_input_megabytes.attr,
&device_attr_host_fcp_output_megabytes.attr,
+ &device_attr_host_fcp_packet_alloc_failures.attr,
+ &device_attr_host_fcp_packet_aborts.attr,
+ &device_attr_host_fcp_frame_alloc_failures.attr,
+ &device_attr_host_fc_no_free_exch.attr,
+ &device_attr_host_fc_no_free_exch_xid.attr,
+ &device_attr_host_fc_xid_not_found.attr,
+ &device_attr_host_fc_xid_busy.attr,
+ &device_attr_host_fc_seq_not_found.attr,
+ &device_attr_host_fc_non_bls_resp.attr,
&device_attr_host_reset_statistics.attr,
NULL
};
@@ -2477,11 +2495,9 @@ static void fc_terminate_rport_io(struct fc_rport *rport)
i->f->terminate_rport_io(rport);
/*
- * must unblock to flush queued IO. The caller will have set
- * the port_state or flags, so that fc_remote_port_chkready will
- * fail IO.
+ * Must unblock to flush queued IO. scsi-ml will fail incoming reqs.
*/
- scsi_target_unblock(&rport->dev);
+ scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
}
/**
@@ -2812,8 +2828,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
/* if target, initiate a scan */
if (rport->scsi_target_id != -1) {
- scsi_target_unblock(&rport->dev);
-
+ scsi_target_unblock(&rport->dev,
+ SDEV_RUNNING);
spin_lock_irqsave(shost->host_lock,
flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -2882,7 +2898,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
spin_unlock_irqrestore(shost->host_lock, flags);
if (ids->roles & FC_PORT_ROLE_FCP_TARGET) {
- scsi_target_unblock(&rport->dev);
+ scsi_target_unblock(&rport->dev, SDEV_RUNNING);
/* initiate a scan of the target */
spin_lock_irqsave(shost->host_lock, flags);
@@ -3087,7 +3103,7 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
/* ensure any stgt delete functions are done */
fc_flush_work(shost);
- scsi_target_unblock(&rport->dev);
+ scsi_target_unblock(&rport->dev, SDEV_RUNNING);
/* initiate a scan of the target */
spin_lock_irqsave(shost->host_lock, flags);
rport->flags |= FC_RPORT_SCAN_PENDING;
@@ -3131,7 +3147,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
"blocked FC remote port time out: no longer"
" a FCP target, removing starget\n");
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_unblock(&rport->dev);
+ scsi_target_unblock(&rport->dev, SDEV_TRANSPORT_OFFLINE);
fc_queue_work(shost, &rport->stgt_delete_work);
return;
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 1cf640e575da..09809d06eccb 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -907,7 +907,7 @@ static void session_recovery_timedout(struct work_struct *work)
session->transport->session_recovery_timedout(session);
ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
- scsi_target_unblock(&session->dev);
+ scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
}
@@ -930,7 +930,7 @@ static void __iscsi_unblock_session(struct work_struct *work)
session->state = ISCSI_SESSION_LOGGED_IN;
spin_unlock_irqrestore(&session->lock, flags);
/* start IO */
- scsi_target_unblock(&session->dev);
+ scsi_target_unblock(&session->dev, SDEV_RUNNING);
/*
* Only do kernel scanning if the driver is properly hooked into
* the async scanning code (drivers like iscsi_tcp do login and
@@ -1180,7 +1180,7 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
session->state = ISCSI_SESSION_FREE;
spin_unlock_irqrestore(&session->lock, flags);
- scsi_target_unblock(&session->dev);
+ scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
/* flush running scans then delete devices */
scsi_flush_work(shost);
__iscsi_unbind_session(&session->unbind_work);
@@ -2936,7 +2936,10 @@ EXPORT_SYMBOL_GPL(iscsi_unregister_transport);
static __init int iscsi_transport_init(void)
{
int err;
-
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ .input = iscsi_if_rx,
+ };
printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
ISCSI_TRANSPORT_VERSION);
@@ -2966,8 +2969,8 @@ static __init int iscsi_transport_init(void)
if (err)
goto unregister_conn_class;
- nls = netlink_kernel_create(&init_net, NETLINK_ISCSI, 1, iscsi_if_rx,
- NULL, THIS_MODULE);
+ nls = netlink_kernel_create(&init_net, NETLINK_ISCSI,
+ THIS_MODULE, &cfg);
if (!nls) {
err = -ENOBUFS;
goto unregister_session_class;
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
deleted file mode 100644
index ae7814874618..000000000000
--- a/drivers/scsi/scsi_wait_scan.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * scsi_wait_scan.c
- *
- * Copyright (C) 2006 James Bottomley <James.Bottomley@SteelEye.com>
- *
- * This is a simple module to wait until all the async scans are
- * complete. The idea is to use it in initrd/initramfs scripts. You
- * modprobe it after all the modprobes of the root SCSI drivers and it
- * will wait until they have all finished scanning their busses before
- * allowing the boot to proceed
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include "scsi_priv.h"
-
-static int __init wait_scan_init(void)
-{
- /*
- * First we need to wait for device probing to finish;
- * the drivers we just loaded might just still be probing
- * and might not yet have reached the scsi async scanning
- */
- wait_for_device_probe();
- /*
- * and then we wait for the actual asynchronous scsi scan
- * to finish.
- */
- scsi_complete_async_scans();
- return 0;
-}
-
-static void __exit wait_scan_exit(void)
-{
-}
-
-MODULE_DESCRIPTION("SCSI wait for scans");
-MODULE_AUTHOR("James Bottomley");
-MODULE_LICENSE("GPL");
-
-late_initcall(wait_scan_init);
-module_exit(wait_scan_exit);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6f72b80121a0..4df73e52a4f9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2261,8 +2261,13 @@ bad_sense:
sd_printk(KERN_ERR, sdkp, "Asking for cache data failed\n");
defaults:
- sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
- sdkp->WCE = 0;
+ if (sdp->wce_default_on) {
+ sd_printk(KERN_NOTICE, sdkp, "Assuming drive cache: write back\n");
+ sdkp->WCE = 1;
+ } else {
+ sd_printk(KERN_ERR, sdkp, "Assuming drive cache: write through\n");
+ sdkp->WCE = 0;
+ }
sdkp->RCD = 0;
sdkp->DPOFUA = 0;
}
@@ -2704,6 +2709,7 @@ static int sd_probe(struct device *dev)
sdkp->disk = gd;
sdkp->index = index;
atomic_set(&sdkp->openers, 0);
+ atomic_set(&sdkp->device->ioerr_cnt, 0);
if (!sdp->request_queue->rq_timeout) {
if (sdp->type != TYPE_MOD)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 6a4fd00117ca..58f4ba6fe412 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -232,11 +232,11 @@ static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
* the host controller
* @reg_hcs - host controller status register value
*
- * Returns 0 if device present, non-zero if no device detected
+ * Returns 1 if device present, 0 if no device detected
*/
static inline int ufshcd_is_device_present(u32 reg_hcs)
{
- return (DEVICE_PRESENT & reg_hcs) ? 0 : -1;
+ return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
}
/**
@@ -911,7 +911,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
- if (ufshcd_is_device_present(reg)) {
+ if (!ufshcd_is_device_present(reg)) {
dev_err(&hba->pdev->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
@@ -1163,6 +1163,8 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
task_result = FAILED;
+ else
+ task_result = SUCCESS;
} else {
task_result = FAILED;
dev_err(&hba->pdev->dev,
@@ -1556,7 +1558,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
goto out;
}
clear_bit(free_slot, &hba->tm_condition);
- return ufshcd_task_req_compl(hba, free_slot);
+ err = ufshcd_task_req_compl(hba, free_slot);
out:
return err;
}
@@ -1580,7 +1582,7 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
tag = cmd->request->tag;
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
- if (err)
+ if (err == FAILED)
goto out;
for (pos = 0; pos < hba->nutrs; pos++) {
@@ -1620,7 +1622,7 @@ static int ufshcd_host_reset(struct scsi_cmnd *cmd)
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
return SUCCESS;
- return (ufshcd_do_reset(hba) == SUCCESS) ? SUCCESS : FAILED;
+ return ufshcd_do_reset(hba);
}
/**
@@ -1652,7 +1654,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
spin_unlock_irqrestore(host->host_lock, flags);
err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
- if (err)
+ if (err == FAILED)
goto out;
scsi_dma_unmap(cmd);
@@ -1953,24 +1955,7 @@ static struct pci_driver ufshcd_pci_driver = {
#endif
};
-/**
- * ufshcd_init - Driver registration routine
- */
-static int __init ufshcd_init(void)
-{
- return pci_register_driver(&ufshcd_pci_driver);
-}
-module_init(ufshcd_init);
-
-/**
- * ufshcd_exit - Driver exit clean-up routine
- */
-static void __exit ufshcd_exit(void)
-{
- pci_unregister_driver(&ufshcd_pci_driver);
-}
-module_exit(ufshcd_exit);
-
+module_pci_driver(ufshcd_pci_driver);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
"Vinayak Holikatti <h.vinayak@samsung.com>");
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1b3843117268..c7030fbee79c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -25,6 +25,7 @@
#include <scsi/scsi_cmnd.h>
#define VIRTIO_SCSI_MEMPOOL_SZ 64
+#define VIRTIO_SCSI_EVENT_LEN 8
/* Command queue element */
struct virtio_scsi_cmd {
@@ -43,20 +44,42 @@ struct virtio_scsi_cmd {
} resp;
} ____cacheline_aligned_in_smp;
-/* Driver instance state */
-struct virtio_scsi {
- /* Protects ctrl_vq, req_vq and sg[] */
+struct virtio_scsi_event_node {
+ struct virtio_scsi *vscsi;
+ struct virtio_scsi_event event;
+ struct work_struct work;
+};
+
+struct virtio_scsi_vq {
+ /* Protects vq */
spinlock_t vq_lock;
- struct virtio_device *vdev;
- struct virtqueue *ctrl_vq;
- struct virtqueue *event_vq;
- struct virtqueue *req_vq;
+ struct virtqueue *vq;
+};
+
+/* Per-target queue state */
+struct virtio_scsi_target_state {
+ /* Protects sg. Lock hierarchy is tgt_lock -> vq_lock. */
+ spinlock_t tgt_lock;
/* For sglist construction when adding commands to the virtqueue. */
struct scatterlist sg[];
};
+/* Driver instance state */
+struct virtio_scsi {
+ struct virtio_device *vdev;
+
+ struct virtio_scsi_vq ctrl_vq;
+ struct virtio_scsi_vq event_vq;
+ struct virtio_scsi_vq req_vq;
+
+ /* Get some buffers ready for event vq */
+ struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
+
+ struct virtio_scsi_target_state *tgt[];
+};
+
static struct kmem_cache *virtscsi_cmd_cache;
static mempool_t *virtscsi_cmd_pool;
@@ -147,26 +170,25 @@ static void virtscsi_complete_cmd(void *buf)
static void virtscsi_vq_done(struct virtqueue *vq, void (*fn)(void *buf))
{
- struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
- struct virtio_scsi *vscsi = shost_priv(sh);
void *buf;
- unsigned long flags;
unsigned int len;
- spin_lock_irqsave(&vscsi->vq_lock, flags);
-
do {
virtqueue_disable_cb(vq);
while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
fn(buf);
} while (!virtqueue_enable_cb(vq));
-
- spin_unlock_irqrestore(&vscsi->vq_lock, flags);
}
static void virtscsi_req_done(struct virtqueue *vq)
{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vscsi->req_vq.vq_lock, flags);
virtscsi_vq_done(vq, virtscsi_complete_cmd);
+ spin_unlock_irqrestore(&vscsi->req_vq.vq_lock, flags);
};
static void virtscsi_complete_free(void *buf)
@@ -181,12 +203,123 @@ static void virtscsi_complete_free(void *buf)
static void virtscsi_ctrl_done(struct virtqueue *vq)
{
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vscsi->ctrl_vq.vq_lock, flags);
virtscsi_vq_done(vq, virtscsi_complete_free);
+ spin_unlock_irqrestore(&vscsi->ctrl_vq.vq_lock, flags);
};
+static int virtscsi_kick_event(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event_node *event_node)
+{
+ int ret;
+ struct scatterlist sg;
+ unsigned long flags;
+
+ sg_set_buf(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+
+ ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
+ if (ret >= 0)
+ virtqueue_kick(vscsi->event_vq.vq);
+
+ spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
+
+ return ret;
+}
+
+static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
+ vscsi->event_list[i].vscsi = vscsi;
+ virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
+ }
+
+ return 0;
+}
+
+static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
+{
+ int i;
+
+ for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
+ cancel_work_sync(&vscsi->event_list[i].work);
+}
+
+static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
+ struct virtio_scsi_event *event)
+{
+ struct scsi_device *sdev;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ unsigned int target = event->lun[1];
+ unsigned int lun = (event->lun[2] << 8) | event->lun[3];
+
+ switch (event->reason) {
+ case VIRTIO_SCSI_EVT_RESET_RESCAN:
+ scsi_add_device(shost, 0, target, lun);
+ break;
+ case VIRTIO_SCSI_EVT_RESET_REMOVED:
+ sdev = scsi_device_lookup(shost, 0, target, lun);
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ } else {
+ pr_err("SCSI device %d 0 %d %d not found\n",
+ shost->host_no, target, lun);
+ }
+ break;
+ default:
+ pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ }
+}
+
+static void virtscsi_handle_event(struct work_struct *work)
+{
+ struct virtio_scsi_event_node *event_node =
+ container_of(work, struct virtio_scsi_event_node, work);
+ struct virtio_scsi *vscsi = event_node->vscsi;
+ struct virtio_scsi_event *event = &event_node->event;
+
+ if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
+ event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
+ scsi_scan_host(virtio_scsi_host(vscsi->vdev));
+ }
+
+ switch (event->event) {
+ case VIRTIO_SCSI_T_NO_EVENT:
+ break;
+ case VIRTIO_SCSI_T_TRANSPORT_RESET:
+ virtscsi_handle_transport_reset(vscsi, event);
+ break;
+ default:
+ pr_err("Unsupport virtio scsi event %x\n", event->event);
+ }
+ virtscsi_kick_event(vscsi, event_node);
+}
+
+static void virtscsi_complete_event(void *buf)
+{
+ struct virtio_scsi_event_node *event_node = buf;
+
+ INIT_WORK(&event_node->work, virtscsi_handle_event);
+ schedule_work(&event_node->work);
+}
+
static void virtscsi_event_done(struct virtqueue *vq)
{
- virtscsi_vq_done(vq, virtscsi_complete_free);
+ struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
+ virtscsi_vq_done(vq, virtscsi_complete_event);
+ spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
};
static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
@@ -212,25 +345,17 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx,
* @req_size : size of the request buffer
* @resp_size : size of the response buffer
*
- * Called with vq_lock held.
+ * Called with tgt_lock held.
*/
-static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
+static void virtscsi_map_cmd(struct virtio_scsi_target_state *tgt,
struct virtio_scsi_cmd *cmd,
unsigned *out_num, unsigned *in_num,
size_t req_size, size_t resp_size)
{
struct scsi_cmnd *sc = cmd->sc;
- struct scatterlist *sg = vscsi->sg;
+ struct scatterlist *sg = tgt->sg;
unsigned int idx = 0;
- if (sc) {
- struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
- BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
-
- /* TODO: check feature bit and fail if unsupported? */
- BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
- }
-
/* Request header. */
sg_set_buf(&sg[idx++], &cmd->req, req_size);
@@ -250,7 +375,8 @@ static void virtscsi_map_cmd(struct virtio_scsi *vscsi,
*in_num = idx - *out_num;
}
-static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
+static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
+ struct virtio_scsi_vq *vq,
struct virtio_scsi_cmd *cmd,
size_t req_size, size_t resp_size, gfp_t gfp)
{
@@ -258,24 +384,35 @@ static int virtscsi_kick_cmd(struct virtio_scsi *vscsi, struct virtqueue *vq,
unsigned long flags;
int ret;
- spin_lock_irqsave(&vscsi->vq_lock, flags);
-
- virtscsi_map_cmd(vscsi, cmd, &out_num, &in_num, req_size, resp_size);
+ spin_lock_irqsave(&tgt->tgt_lock, flags);
+ virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
- ret = virtqueue_add_buf(vq, vscsi->sg, out_num, in_num, cmd, gfp);
+ spin_lock(&vq->vq_lock);
+ ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
+ spin_unlock(&tgt->tgt_lock);
if (ret >= 0)
- virtqueue_kick(vq);
+ ret = virtqueue_kick_prepare(vq->vq);
+
+ spin_unlock_irqrestore(&vq->vq_lock, flags);
- spin_unlock_irqrestore(&vscsi->vq_lock, flags);
+ if (ret > 0)
+ virtqueue_notify(vq->vq);
return ret;
}
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
{
struct virtio_scsi *vscsi = shost_priv(sh);
+ struct virtio_scsi_target_state *tgt = vscsi->tgt[sc->device->id];
struct virtio_scsi_cmd *cmd;
int ret;
+ struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+ BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
+
+ /* TODO: check feature bit and fail if unsupported? */
+ BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
+
dev_dbg(&sc->device->sdev_gendev,
"cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
@@ -300,7 +437,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
- if (virtscsi_kick_cmd(vscsi, vscsi->req_vq, cmd,
+ if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
GFP_ATOMIC) >= 0)
ret = 0;
@@ -312,10 +449,11 @@ out:
static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
{
DECLARE_COMPLETION_ONSTACK(comp);
+ struct virtio_scsi_target_state *tgt = vscsi->tgt[cmd->sc->device->id];
int ret = FAILED;
cmd->comp = &comp;
- if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd,
+ if (virtscsi_kick_cmd(tgt, &vscsi->ctrl_vq, cmd,
sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
GFP_NOIO) < 0)
goto out;
@@ -408,11 +546,63 @@ static struct scsi_host_template virtscsi_host_template = {
&__val, sizeof(__val)); \
})
+static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
+ struct virtqueue *vq)
+{
+ spin_lock_init(&virtscsi_vq->vq_lock);
+ virtscsi_vq->vq = vq;
+}
+
+static struct virtio_scsi_target_state *virtscsi_alloc_tgt(
+ struct virtio_device *vdev, int sg_elems)
+{
+ struct virtio_scsi_target_state *tgt;
+ gfp_t gfp_mask = GFP_KERNEL;
+
+ /* We need extra sg elements at head and tail. */
+ tgt = kmalloc(sizeof(*tgt) + sizeof(tgt->sg[0]) * (sg_elems + 2),
+ gfp_mask);
+
+ if (!tgt)
+ return NULL;
+
+ spin_lock_init(&tgt->tgt_lock);
+ sg_init_table(tgt->sg, sg_elems + 2);
+ return tgt;
+}
+
+static void virtscsi_scan(struct virtio_device *vdev)
+{
+ struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
+
+ scsi_scan_host(shost);
+}
+
+static void virtscsi_remove_vqs(struct virtio_device *vdev)
+{
+ struct Scsi_Host *sh = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(sh);
+ u32 i, num_targets;
+
+ /* Stop all the virtqueues. */
+ vdev->config->reset(vdev);
+
+ num_targets = sh->max_id;
+ for (i = 0; i < num_targets; i++) {
+ kfree(vscsi->tgt[i]);
+ vscsi->tgt[i] = NULL;
+ }
+
+ vdev->config->del_vqs(vdev);
+}
+
static int virtscsi_init(struct virtio_device *vdev,
- struct virtio_scsi *vscsi)
+ struct virtio_scsi *vscsi, int num_targets)
{
int err;
struct virtqueue *vqs[3];
+ u32 i, sg_elems;
+
vq_callback_t *callbacks[] = {
virtscsi_ctrl_done,
virtscsi_event_done,
@@ -429,13 +619,32 @@ static int virtscsi_init(struct virtio_device *vdev,
if (err)
return err;
- vscsi->ctrl_vq = vqs[0];
- vscsi->event_vq = vqs[1];
- vscsi->req_vq = vqs[2];
+ virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
+ virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
+ virtscsi_init_vq(&vscsi->req_vq, vqs[2]);
virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
- return 0;
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_kick_event_all(vscsi);
+
+ /* We need to know how many segments before we allocate. */
+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
+
+ for (i = 0; i < num_targets; i++) {
+ vscsi->tgt[i] = virtscsi_alloc_tgt(vdev, sg_elems);
+ if (!vscsi->tgt[i]) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+ err = 0;
+
+out:
+ if (err)
+ virtscsi_remove_vqs(vdev);
+ return err;
}
static int __devinit virtscsi_probe(struct virtio_device *vdev)
@@ -443,31 +652,25 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
int err;
- u32 sg_elems;
+ u32 sg_elems, num_targets;
u32 cmd_per_lun;
- /* We need to know how many segments before we allocate.
- * We need an extra sg elements at head and tail.
- */
- sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
-
/* Allocate memory and link the structs together. */
+ num_targets = virtscsi_config_get(vdev, max_target) + 1;
shost = scsi_host_alloc(&virtscsi_host_template,
- sizeof(*vscsi) + sizeof(vscsi->sg[0]) * (sg_elems + 2));
+ sizeof(*vscsi)
+ + num_targets * sizeof(struct virtio_scsi_target_state));
if (!shost)
return -ENOMEM;
+ sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
shost->sg_tablesize = sg_elems;
vscsi = shost_priv(shost);
vscsi->vdev = vdev;
vdev->priv = shost;
- /* Random initializations. */
- spin_lock_init(&vscsi->vq_lock);
- sg_init_table(vscsi->sg, sg_elems + 2);
-
- err = virtscsi_init(vdev, vscsi);
+ err = virtscsi_init(vdev, vscsi, num_targets);
if (err)
goto virtscsi_init_failed;
@@ -475,15 +678,16 @@ static int __devinit virtscsi_probe(struct virtio_device *vdev)
shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1;
- shost->max_id = virtscsi_config_get(vdev, max_target) + 1;
+ shost->max_id = num_targets;
shost->max_channel = 0;
shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
err = scsi_add_host(shost, &vdev->dev);
if (err)
goto scsi_add_host_failed;
-
- scsi_scan_host(shost);
-
+ /*
+ * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
+ * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
+ */
return 0;
scsi_add_host_failed:
@@ -493,17 +697,13 @@ virtscsi_init_failed:
return err;
}
-static void virtscsi_remove_vqs(struct virtio_device *vdev)
-{
- /* Stop all the virtqueues. */
- vdev->config->reset(vdev);
-
- vdev->config->del_vqs(vdev);
-}
-
static void __devexit virtscsi_remove(struct virtio_device *vdev)
{
struct Scsi_Host *shost = virtio_scsi_host(vdev);
+ struct virtio_scsi *vscsi = shost_priv(shost);
+
+ if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
+ virtscsi_cancel_event_work(vscsi);
scsi_remove_host(shost);
@@ -523,7 +723,7 @@ static int virtscsi_restore(struct virtio_device *vdev)
struct Scsi_Host *sh = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(sh);
- return virtscsi_init(vdev, vscsi);
+ return virtscsi_init(vdev, vscsi, sh->max_id);
}
#endif
@@ -532,11 +732,18 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
+static unsigned int features[] = {
+ VIRTIO_SCSI_F_HOTPLUG
+};
+
static struct virtio_driver virtio_scsi_driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = virtscsi_probe,
+ .scan = virtscsi_scan,
#ifdef CONFIG_PM
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
index f168a6159961..d860ef743568 100644
--- a/drivers/sh/Kconfig
+++ b/drivers/sh/Kconfig
@@ -1,5 +1,6 @@
menu "SuperH / SH-Mobile Driver Options"
source "drivers/sh/intc/Kconfig"
+source "drivers/sh/pfc/Kconfig"
endmenu
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 7139ad2f2086..e57895b1a425 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -5,6 +5,7 @@ obj-y := intc/
obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
+obj-$(CONFIG_SH_PFC) += pfc/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
-obj-$(CONFIG_GENERIC_GPIO) += pfc.o
+
obj-y += pm_runtime.o
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index f0d015dd0fef..07e9fb4f8041 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -14,6 +14,8 @@
#include <linux/io.h>
#include <linux/sh_clk.h>
+#define CPG_CKSTP_BIT BIT(8)
+
static unsigned int sh_clk_read(struct clk *clk)
{
if (clk->flags & CLK_ENABLE_REG_8BIT)
@@ -66,71 +68,43 @@ int __init sh_clk_mstp_register(struct clk *clks, int nr)
return ret;
}
-static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+/*
+ * Div/mult table lookup helpers
+ */
+static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
{
- return clk_rate_table_round(clk, clk->freq_table, rate);
+ return clk->priv;
}
-static int sh_clk_div6_divisors[64] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
-};
+static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
+{
+ return clk_to_div_table(clk)->div_mult_table;
+}
-static struct clk_div_mult_table sh_clk_div6_table = {
- .divisors = sh_clk_div6_divisors,
- .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
-};
+/*
+ * Common div ops
+ */
+static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk_rate_table_round(clk, clk->freq_table, rate);
+}
-static unsigned long sh_clk_div6_recalc(struct clk *clk)
+static unsigned long sh_clk_div_recalc(struct clk *clk)
{
- struct clk_div_mult_table *table = &sh_clk_div6_table;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
unsigned int idx;
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, NULL);
+ table, clk->arch_flags ? &clk->arch_flags : NULL);
- idx = sh_clk_read(clk) & 0x003f;
+ idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
return clk->freq_table[idx].frequency;
}
-static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
-{
- struct clk_div_mult_table *table = &sh_clk_div6_table;
- u32 value;
- int ret, i;
-
- if (!clk->parent_table || !clk->parent_num)
- return -EINVAL;
-
- /* Search the parent */
- for (i = 0; i < clk->parent_num; i++)
- if (clk->parent_table[i] == parent)
- break;
-
- if (i == clk->parent_num)
- return -ENODEV;
-
- ret = clk_reparent(clk, parent);
- if (ret < 0)
- return ret;
-
- value = sh_clk_read(clk) &
- ~(((1 << clk->src_width) - 1) << clk->src_shift);
-
- sh_clk_write(value | (i << clk->src_shift), clk);
-
- /* Rebuild the frequency table */
- clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, NULL);
-
- return 0;
-}
-
-static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
+static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
{
+ struct clk_div_table *dt = clk_to_div_table(clk);
unsigned long value;
int idx;
@@ -139,51 +113,53 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
return idx;
value = sh_clk_read(clk);
- value &= ~0x3f;
- value |= idx;
+ value &= ~(clk->div_mask << clk->enable_bit);
+ value |= (idx << clk->enable_bit);
sh_clk_write(value, clk);
+
+ /* XXX: Should use a post-change notifier */
+ if (dt->kick)
+ dt->kick(clk);
+
return 0;
}
-static int sh_clk_div6_enable(struct clk *clk)
+static int sh_clk_div_enable(struct clk *clk)
{
- unsigned long value;
- int ret;
-
- ret = sh_clk_div6_set_rate(clk, clk->rate);
- if (ret == 0) {
- value = sh_clk_read(clk);
- value &= ~0x100; /* clear stop bit to enable clock */
- sh_clk_write(value, clk);
- }
- return ret;
+ sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
+ return 0;
}
-static void sh_clk_div6_disable(struct clk *clk)
+static void sh_clk_div_disable(struct clk *clk)
{
- unsigned long value;
+ unsigned int val;
- value = sh_clk_read(clk);
- value |= 0x100; /* stop clock */
- value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- sh_clk_write(value, clk);
+ val = sh_clk_read(clk);
+ val |= CPG_CKSTP_BIT;
+
+ /*
+ * div6 clocks require the divisor field to be non-zero or the
+ * above CKSTP toggle silently fails. Ensure that the divisor
+ * array is reset to its initial state on disable.
+ */
+ if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
+ val |= clk->div_mask;
+
+ sh_clk_write(val, clk);
}
-static struct sh_clk_ops sh_clk_div6_clk_ops = {
- .recalc = sh_clk_div6_recalc,
+static struct sh_clk_ops sh_clk_div_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .set_rate = sh_clk_div6_set_rate,
- .enable = sh_clk_div6_enable,
- .disable = sh_clk_div6_disable,
};
-static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
- .recalc = sh_clk_div6_recalc,
+static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .set_rate = sh_clk_div6_set_rate,
- .enable = sh_clk_div6_enable,
- .disable = sh_clk_div6_disable,
- .set_parent = sh_clk_div6_set_parent,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
};
static int __init sh_clk_init_parent(struct clk *clk)
@@ -218,12 +194,12 @@ static int __init sh_clk_init_parent(struct clk *clk)
return 0;
}
-static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
- struct sh_clk_ops *ops)
+static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
+ struct clk_div_table *table, struct sh_clk_ops *ops)
{
struct clk *clkp;
void *freq_table;
- int nr_divs = sh_clk_div6_table.nr_divisors;
+ int nr_divs = table->div_mult_table->nr_divisors;
int freq_table_size = sizeof(struct cpufreq_frequency_table);
int ret = 0;
int k;
@@ -231,7 +207,7 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
freq_table_size *= (nr_divs + 1);
freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
if (!freq_table) {
- pr_err("sh_clk_div6_register: unable to alloc memory\n");
+ pr_err("%s: unable to alloc memory\n", __func__);
return -ENOMEM;
}
@@ -239,47 +215,98 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
clkp = clks + k;
clkp->ops = ops;
+ clkp->priv = table;
+
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
- ret = clk_register(clkp);
- if (ret < 0)
- break;
- ret = sh_clk_init_parent(clkp);
+ ret = clk_register(clkp);
+ if (ret == 0)
+ ret = sh_clk_init_parent(clkp);
}
return ret;
}
-int __init sh_clk_div6_register(struct clk *clks, int nr)
-{
- return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
-}
+/*
+ * div6 support
+ */
+static int sh_clk_div6_divisors[64] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
+};
-int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
-{
- return sh_clk_div6_register_ops(clks, nr,
- &sh_clk_div6_reparent_clk_ops);
-}
+static struct clk_div_mult_table div6_div_mult_table = {
+ .divisors = sh_clk_div6_divisors,
+ .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
+};
-static unsigned long sh_clk_div4_recalc(struct clk *clk)
+static struct clk_div_table sh_clk_div6_table = {
+ .div_mult_table = &div6_div_mult_table,
+};
+
+static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
- unsigned int idx;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
+ u32 value;
+ int ret, i;
+ if (!clk->parent_table || !clk->parent_num)
+ return -EINVAL;
+
+ /* Search the parent */
+ for (i = 0; i < clk->parent_num; i++)
+ if (clk->parent_table[i] == parent)
+ break;
+
+ if (i == clk->parent_num)
+ return -ENODEV;
+
+ ret = clk_reparent(clk, parent);
+ if (ret < 0)
+ return ret;
+
+ value = sh_clk_read(clk) &
+ ~(((1 << clk->src_width) - 1) << clk->src_shift);
+
+ sh_clk_write(value | (i << clk->src_shift), clk);
+
+ /* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
- table, &clk->arch_flags);
+ table, NULL);
- idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
+ return 0;
+}
- return clk->freq_table[idx].frequency;
+static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
+ .recalc = sh_clk_div_recalc,
+ .round_rate = sh_clk_div_round_rate,
+ .set_rate = sh_clk_div_set_rate,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
+ .set_parent = sh_clk_div6_set_parent,
+};
+
+int __init sh_clk_div6_register(struct clk *clks, int nr)
+{
+ return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+ &sh_clk_div_enable_clk_ops);
+}
+
+int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
+{
+ return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
+ &sh_clk_div6_reparent_clk_ops);
}
+/*
+ * div4 support
+ */
static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
{
- struct clk_div4_table *d4t = clk->priv;
- struct clk_div_mult_table *table = d4t->div_mult_table;
+ struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
u32 value;
int ret;
@@ -306,107 +333,31 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
-static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
-{
- struct clk_div4_table *d4t = clk->priv;
- unsigned long value;
- int idx = clk_rate_table_find(clk, clk->freq_table, rate);
- if (idx < 0)
- return idx;
-
- value = sh_clk_read(clk);
- value &= ~(0xf << clk->enable_bit);
- value |= (idx << clk->enable_bit);
- sh_clk_write(value, clk);
-
- if (d4t->kick)
- d4t->kick(clk);
-
- return 0;
-}
-
-static int sh_clk_div4_enable(struct clk *clk)
-{
- sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
- return 0;
-}
-
-static void sh_clk_div4_disable(struct clk *clk)
-{
- sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
-}
-
-static struct sh_clk_ops sh_clk_div4_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
-};
-
-static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
- .round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
-};
-
static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
- .recalc = sh_clk_div4_recalc,
- .set_rate = sh_clk_div4_set_rate,
+ .recalc = sh_clk_div_recalc,
+ .set_rate = sh_clk_div_set_rate,
.round_rate = sh_clk_div_round_rate,
- .enable = sh_clk_div4_enable,
- .disable = sh_clk_div4_disable,
+ .enable = sh_clk_div_enable,
+ .disable = sh_clk_div_disable,
.set_parent = sh_clk_div4_set_parent,
};
-static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
- struct clk_div4_table *table, struct sh_clk_ops *ops)
-{
- struct clk *clkp;
- void *freq_table;
- int nr_divs = table->div_mult_table->nr_divisors;
- int freq_table_size = sizeof(struct cpufreq_frequency_table);
- int ret = 0;
- int k;
-
- freq_table_size *= (nr_divs + 1);
- freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
- if (!freq_table) {
- pr_err("sh_clk_div4_register: unable to alloc memory\n");
- return -ENOMEM;
- }
-
- for (k = 0; !ret && (k < nr); k++) {
- clkp = clks + k;
-
- clkp->ops = ops;
- clkp->priv = table;
-
- clkp->freq_table = freq_table + (k * freq_table_size);
- clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
- ret = clk_register(clkp);
- }
-
- return ret;
-}
-
int __init sh_clk_div4_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
}
int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_enable_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table,
+ &sh_clk_div_enable_clk_ops);
}
int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
struct clk_div4_table *table)
{
- return sh_clk_div4_register_ops(clks, nr, table,
- &sh_clk_div4_reparent_clk_ops);
+ return sh_clk_div_register_ops(clks, nr, table,
+ &sh_clk_div4_reparent_clk_ops);
}
diff --git a/drivers/sh/intc/Makefile b/drivers/sh/intc/Makefile
index bb5df868d77a..44f006d09471 100644
--- a/drivers/sh/intc/Makefile
+++ b/drivers/sh/intc/Makefile
@@ -1,4 +1,4 @@
-obj-y := access.o chip.o core.o dynamic.o handle.o virq.o
+obj-y := access.o chip.o core.o handle.o virq.o
obj-$(CONFIG_INTC_BALANCING) += balancing.o
obj-$(CONFIG_INTC_USERIMASK) += userimask.o
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
deleted file mode 100644
index 14eb01ef5d72..000000000000
--- a/drivers/sh/intc/dynamic.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Dynamic IRQ management
- *
- * Copyright (C) 2010 Paul Mundt
- *
- * Modelled after arch/x86/kernel/apic/io_apic.c
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define pr_fmt(fmt) "intc: " fmt
-
-#include <linux/irq.h>
-#include <linux/bitmap.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include "internals.h" /* only for activate_irq() damage.. */
-
-/*
- * The IRQ bitmap provides a global map of bound IRQ vectors for a
- * given platform. Allocation of IRQs are either static through the CPU
- * vector map, or dynamic in the case of board mux vectors or MSI.
- *
- * As this is a central point for all IRQ controllers on the system,
- * each of the available sources are mapped out here. This combined with
- * sparseirq makes it quite trivial to keep the vector map tightly packed
- * when dynamically creating IRQs, as well as tying in to otherwise
- * unused irq_desc positions in the sparse array.
- */
-
-/*
- * Dynamic IRQ allocation and deallocation
- */
-unsigned int create_irq_nr(unsigned int irq_want, int node)
-{
- int irq = irq_alloc_desc_at(irq_want, node);
- if (irq < 0)
- return 0;
-
- activate_irq(irq);
- return irq;
-}
-
-int create_irq(void)
-{
- int irq = irq_alloc_desc(numa_node_id());
- if (irq >= 0)
- activate_irq(irq);
-
- return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
- irq_free_desc(irq);
-}
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index 93cec21e788b..f30ac9354ff2 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -219,12 +219,14 @@ restart:
if (radix_tree_deref_retry(entry))
goto restart;
- irq = create_irq();
+ irq = irq_alloc_desc(numa_node_id());
if (unlikely(irq < 0)) {
pr_err("no more free IRQs, bailing..\n");
break;
}
+ activate_irq(irq);
+
pr_info("Setting up a chained VIRQ from %d -> %d\n",
irq, entry->pirq);
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
deleted file mode 100644
index 522c6c46d1be..000000000000
--- a/drivers/sh/pfc.c
+++ /dev/null
@@ -1,739 +0,0 @@
-/*
- * Pinmuxed GPIO support for SuperH.
- *
- * Copyright (C) 2008 Magnus Damm
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/bitops.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-
-static void pfc_iounmap(struct pinmux_info *pip)
-{
- int k;
-
- for (k = 0; k < pip->num_resources; k++)
- if (pip->window[k].virt)
- iounmap(pip->window[k].virt);
-
- kfree(pip->window);
- pip->window = NULL;
-}
-
-static int pfc_ioremap(struct pinmux_info *pip)
-{
- struct resource *res;
- int k;
-
- if (!pip->num_resources)
- return 0;
-
- pip->window = kzalloc(pip->num_resources * sizeof(*pip->window),
- GFP_NOWAIT);
- if (!pip->window)
- goto err1;
-
- for (k = 0; k < pip->num_resources; k++) {
- res = pip->resource + k;
- WARN_ON(resource_type(res) != IORESOURCE_MEM);
- pip->window[k].phys = res->start;
- pip->window[k].size = resource_size(res);
- pip->window[k].virt = ioremap_nocache(res->start,
- resource_size(res));
- if (!pip->window[k].virt)
- goto err2;
- }
-
- return 0;
-
-err2:
- pfc_iounmap(pip);
-err1:
- return -1;
-}
-
-static void __iomem *pfc_phys_to_virt(struct pinmux_info *pip,
- unsigned long address)
-{
- struct pfc_window *window;
- int k;
-
- /* scan through physical windows and convert address */
- for (k = 0; k < pip->num_resources; k++) {
- window = pip->window + k;
-
- if (address < window->phys)
- continue;
-
- if (address >= (window->phys + window->size))
- continue;
-
- return window->virt + (address - window->phys);
- }
-
- /* no windows defined, register must be 1:1 mapped virt:phys */
- return (void __iomem *)address;
-}
-
-static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
-{
- if (enum_id < r->begin)
- return 0;
-
- if (enum_id > r->end)
- return 0;
-
- return 1;
-}
-
-static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width)
-{
- switch (reg_width) {
- case 8:
- return ioread8(mapped_reg);
- case 16:
- return ioread16(mapped_reg);
- case 32:
- return ioread32(mapped_reg);
- }
-
- BUG();
- return 0;
-}
-
-static void gpio_write_raw_reg(void __iomem *mapped_reg,
- unsigned long reg_width,
- unsigned long data)
-{
- switch (reg_width) {
- case 8:
- iowrite8(data, mapped_reg);
- return;
- case 16:
- iowrite16(data, mapped_reg);
- return;
- case 32:
- iowrite32(data, mapped_reg);
- return;
- }
-
- BUG();
-}
-
-static int gpio_read_bit(struct pinmux_data_reg *dr,
- unsigned long in_pos)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("read_bit: addr = %lx, pos = %ld, "
- "r_width = %ld\n", dr->reg, pos, dr->reg_width);
-
- return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
-}
-
-static void gpio_write_bit(struct pinmux_data_reg *dr,
- unsigned long in_pos, unsigned long value)
-{
- unsigned long pos;
-
- pos = dr->reg_width - (in_pos + 1);
-
- pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
- "r_width = %ld\n",
- dr->reg, !!value, pos, dr->reg_width);
-
- if (value)
- set_bit(pos, &dr->reg_shadow);
- else
- clear_bit(pos, &dr->reg_shadow);
-
- gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
-}
-
-static void config_reg_helper(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long in_pos,
- void __iomem **mapped_regp,
- unsigned long *maskp,
- unsigned long *posp)
-{
- int k;
-
- *mapped_regp = pfc_phys_to_virt(gpioc, crp->reg);
-
- if (crp->field_width) {
- *maskp = (1 << crp->field_width) - 1;
- *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
- } else {
- *maskp = (1 << crp->var_field_width[in_pos]) - 1;
- *posp = crp->reg_width;
- for (k = 0; k <= in_pos; k++)
- *posp -= crp->var_field_width[k];
- }
-}
-
-static int read_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long field)
-{
- void __iomem *mapped_reg;
- unsigned long mask, pos;
-
- config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
-
- pr_debug("read_reg: addr = %lx, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, field, crp->reg_width, crp->field_width);
-
- return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
-}
-
-static void write_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- unsigned long field, unsigned long value)
-{
- void __iomem *mapped_reg;
- unsigned long mask, pos, data;
-
- config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
-
- pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
- "r_width = %ld, f_width = %ld\n",
- crp->reg, value, field, crp->reg_width, crp->field_width);
-
- mask = ~(mask << pos);
- value = value << pos;
-
- data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
- data &= mask;
- data |= value;
-
- if (gpioc->unlock_reg)
- gpio_write_raw_reg(pfc_phys_to_virt(gpioc, gpioc->unlock_reg),
- 32, ~data);
-
- gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
-}
-
-static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
-{
- struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
- struct pinmux_data_reg *data_reg;
- int k, n;
-
- if (!enum_in_range(gpiop->enum_id, &gpioc->data))
- return -1;
-
- k = 0;
- while (1) {
- data_reg = gpioc->data_regs + k;
-
- if (!data_reg->reg_width)
- break;
-
- data_reg->mapped_reg = pfc_phys_to_virt(gpioc, data_reg->reg);
-
- for (n = 0; n < data_reg->reg_width; n++) {
- if (data_reg->enum_ids[n] == gpiop->enum_id) {
- gpiop->flags &= ~PINMUX_FLAG_DREG;
- gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
- gpiop->flags &= ~PINMUX_FLAG_DBIT;
- gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
- return 0;
- }
- }
- k++;
- }
-
- BUG();
-
- return -1;
-}
-
-static void setup_data_regs(struct pinmux_info *gpioc)
-{
- struct pinmux_data_reg *drp;
- int k;
-
- for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++)
- setup_data_reg(gpioc, k);
-
- k = 0;
- while (1) {
- drp = gpioc->data_regs + k;
-
- if (!drp->reg_width)
- break;
-
- drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
- drp->reg_width);
- k++;
- }
-}
-
-static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
- struct pinmux_data_reg **drp, int *bitp)
-{
- struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
- int k, n;
-
- if (!enum_in_range(gpiop->enum_id, &gpioc->data))
- return -1;
-
- k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
- n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
- *drp = gpioc->data_regs + k;
- *bitp = n;
- return 0;
-}
-
-static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp,
- int *fieldp, int *valuep,
- unsigned long **cntp)
-{
- struct pinmux_cfg_reg *config_reg;
- unsigned long r_width, f_width, curr_width, ncomb;
- int k, m, n, pos, bit_pos;
-
- k = 0;
- while (1) {
- config_reg = gpioc->cfg_regs + k;
-
- r_width = config_reg->reg_width;
- f_width = config_reg->field_width;
-
- if (!r_width)
- break;
-
- pos = 0;
- m = 0;
- for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
- if (f_width)
- curr_width = f_width;
- else
- curr_width = config_reg->var_field_width[m];
-
- ncomb = 1 << curr_width;
- for (n = 0; n < ncomb; n++) {
- if (config_reg->enum_ids[pos + n] == enum_id) {
- *crp = config_reg;
- *fieldp = m;
- *valuep = n;
- *cntp = &config_reg->cnt[m];
- return 0;
- }
- }
- pos += ncomb;
- m++;
- }
- k++;
- }
-
- return -1;
-}
-
-static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
- int pos, pinmux_enum_t *enum_idp)
-{
- pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
- pinmux_enum_t *data = gpioc->gpio_data;
- int k;
-
- if (!enum_in_range(enum_id, &gpioc->data)) {
- if (!enum_in_range(enum_id, &gpioc->mark)) {
- pr_err("non data/mark enum_id for gpio %d\n", gpio);
- return -1;
- }
- }
-
- if (pos) {
- *enum_idp = data[pos + 1];
- return pos + 1;
- }
-
- for (k = 0; k < gpioc->gpio_data_size; k++) {
- if (data[k] == enum_id) {
- *enum_idp = data[k + 1];
- return k + 1;
- }
- }
-
- pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
- return -1;
-}
-
-enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
-
-static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
- int pinmux_type, int cfg_mode)
-{
- struct pinmux_cfg_reg *cr = NULL;
- pinmux_enum_t enum_id;
- struct pinmux_range *range;
- int in_range, pos, field, value;
- unsigned long *cntp;
-
- switch (pinmux_type) {
-
- case PINMUX_TYPE_FUNCTION:
- range = NULL;
- break;
-
- case PINMUX_TYPE_OUTPUT:
- range = &gpioc->output;
- break;
-
- case PINMUX_TYPE_INPUT:
- range = &gpioc->input;
- break;
-
- case PINMUX_TYPE_INPUT_PULLUP:
- range = &gpioc->input_pu;
- break;
-
- case PINMUX_TYPE_INPUT_PULLDOWN:
- range = &gpioc->input_pd;
- break;
-
- default:
- goto out_err;
- }
-
- pos = 0;
- enum_id = 0;
- field = 0;
- value = 0;
- while (1) {
- pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
- if (pos <= 0)
- goto out_err;
-
- if (!enum_id)
- break;
-
- /* first check if this is a function enum */
- in_range = enum_in_range(enum_id, &gpioc->function);
- if (!in_range) {
- /* not a function enum */
- if (range) {
- /*
- * other range exists, so this pin is
- * a regular GPIO pin that now is being
- * bound to a specific direction.
- *
- * for this case we only allow function enums
- * and the enums that match the other range.
- */
- in_range = enum_in_range(enum_id, range);
-
- /*
- * special case pass through for fixed
- * input-only or output-only pins without
- * function enum register association.
- */
- if (in_range && enum_id == range->force)
- continue;
- } else {
- /*
- * no other range exists, so this pin
- * must then be of the function type.
- *
- * allow function type pins to select
- * any combination of function/in/out
- * in their MARK lists.
- */
- in_range = 1;
- }
- }
-
- if (!in_range)
- continue;
-
- if (get_config_reg(gpioc, enum_id, &cr,
- &field, &value, &cntp) != 0)
- goto out_err;
-
- switch (cfg_mode) {
- case GPIO_CFG_DRYRUN:
- if (!*cntp ||
- (read_config_reg(gpioc, cr, field) != value))
- continue;
- break;
-
- case GPIO_CFG_REQ:
- write_config_reg(gpioc, cr, field, value);
- *cntp = *cntp + 1;
- break;
-
- case GPIO_CFG_FREE:
- *cntp = *cntp - 1;
- break;
- }
- }
-
- return 0;
- out_err:
- return -1;
-}
-
-static DEFINE_SPINLOCK(gpio_lock);
-
-static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip)
-{
- return container_of(chip, struct pinmux_info, chip);
-}
-
-static int sh_gpio_request(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- struct pinmux_data_reg *dummy;
- unsigned long flags;
- int i, ret, pinmux_type;
-
- ret = -EINVAL;
-
- if (!gpioc)
- goto err_out;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
- goto err_unlock;
-
- /* setup pin function here if no data is associated with pin */
-
- if (get_data_reg(gpioc, offset, &dummy, &i) != 0)
- pinmux_type = PINMUX_TYPE_FUNCTION;
- else
- pinmux_type = PINMUX_TYPE_GPIO;
-
- if (pinmux_type == PINMUX_TYPE_FUNCTION) {
- if (pinmux_config_gpio(gpioc, offset,
- pinmux_type,
- GPIO_CFG_DRYRUN) != 0)
- goto err_unlock;
-
- if (pinmux_config_gpio(gpioc, offset,
- pinmux_type,
- GPIO_CFG_REQ) != 0)
- BUG();
- }
-
- gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[offset].flags |= pinmux_type;
-
- ret = 0;
- err_unlock:
- spin_unlock_irqrestore(&gpio_lock, flags);
- err_out:
- return ret;
-}
-
-static void sh_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int pinmux_type;
-
- if (!gpioc)
- return;
-
- spin_lock_irqsave(&gpio_lock, flags);
-
- pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE;
- pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE);
- gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE;
-
- spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-static int pinmux_direction(struct pinmux_info *gpioc,
- unsigned gpio, int new_pinmux_type)
-{
- int pinmux_type;
- int ret = -EINVAL;
-
- if (!gpioc)
- goto err_out;
-
- pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
-
- switch (pinmux_type) {
- case PINMUX_TYPE_GPIO:
- break;
- case PINMUX_TYPE_OUTPUT:
- case PINMUX_TYPE_INPUT:
- case PINMUX_TYPE_INPUT_PULLUP:
- case PINMUX_TYPE_INPUT_PULLDOWN:
- pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
- break;
- default:
- goto err_out;
- }
-
- if (pinmux_config_gpio(gpioc, gpio,
- new_pinmux_type,
- GPIO_CFG_DRYRUN) != 0)
- goto err_out;
-
- if (pinmux_config_gpio(gpioc, gpio,
- new_pinmux_type,
- GPIO_CFG_REQ) != 0)
- BUG();
-
- gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE;
- gpioc->gpios[gpio].flags |= new_pinmux_type;
-
- ret = 0;
- err_out:
- return ret;
-}
-
-static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&gpio_lock, flags);
- ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
-}
-
-static void sh_gpio_set_value(struct pinmux_info *gpioc,
- unsigned gpio, int value)
-{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
-
- if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
- BUG();
- else
- gpio_write_bit(dr, bit, value);
-}
-
-static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- unsigned long flags;
- int ret;
-
- sh_gpio_set_value(gpioc, offset, value);
- spin_lock_irqsave(&gpio_lock, flags);
- ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT);
- spin_unlock_irqrestore(&gpio_lock, flags);
-
- return ret;
-}
-
-static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
-{
- struct pinmux_data_reg *dr = NULL;
- int bit = 0;
-
- if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
- return -EINVAL;
-
- return gpio_read_bit(dr, bit);
-}
-
-static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- return sh_gpio_get_value(chip_to_pinmux(chip), offset);
-}
-
-static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
-}
-
-static int sh_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct pinmux_info *gpioc = chip_to_pinmux(chip);
- pinmux_enum_t enum_id;
- pinmux_enum_t *enum_ids;
- int i, k, pos;
-
- pos = 0;
- enum_id = 0;
- while (1) {
- pos = get_gpio_enum_id(gpioc, offset, pos, &enum_id);
- if (pos <= 0 || !enum_id)
- break;
-
- for (i = 0; i < gpioc->gpio_irq_size; i++) {
- enum_ids = gpioc->gpio_irq[i].enum_ids;
- for (k = 0; enum_ids[k]; k++) {
- if (enum_ids[k] == enum_id)
- return gpioc->gpio_irq[i].irq;
- }
- }
- }
-
- return -ENOSYS;
-}
-
-int register_pinmux(struct pinmux_info *pip)
-{
- struct gpio_chip *chip = &pip->chip;
- int ret;
-
- pr_info("%s handling gpio %d -> %d\n",
- pip->name, pip->first_gpio, pip->last_gpio);
-
- ret = pfc_ioremap(pip);
- if (ret < 0)
- return ret;
-
- setup_data_regs(pip);
-
- chip->request = sh_gpio_request;
- chip->free = sh_gpio_free;
- chip->direction_input = sh_gpio_direction_input;
- chip->get = sh_gpio_get;
- chip->direction_output = sh_gpio_direction_output;
- chip->set = sh_gpio_set;
- chip->to_irq = sh_gpio_to_irq;
-
- WARN_ON(pip->first_gpio != 0); /* needs testing */
-
- chip->label = pip->name;
- chip->owner = THIS_MODULE;
- chip->base = pip->first_gpio;
- chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
-
- ret = gpiochip_add(chip);
- if (ret < 0)
- pfc_iounmap(pip);
-
- return ret;
-}
-
-int unregister_pinmux(struct pinmux_info *pip)
-{
- pr_info("%s deregistering\n", pip->name);
- pfc_iounmap(pip);
- return gpiochip_remove(&pip->chip);
-}
diff --git a/drivers/sh/pfc/Kconfig b/drivers/sh/pfc/Kconfig
new file mode 100644
index 000000000000..804f9ad1bf4a
--- /dev/null
+++ b/drivers/sh/pfc/Kconfig
@@ -0,0 +1,26 @@
+comment "Pin function controller options"
+
+config SH_PFC
+ # XXX move off the gpio dependency
+ depends on GENERIC_GPIO
+ select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB
+ select PINCTRL_SH_PFC
+ def_bool y
+
+#
+# Placeholder for now, rehome to drivers/pinctrl once the PFC APIs
+# have settled.
+#
+config PINCTRL_SH_PFC
+ tristate "SuperH PFC pin controller driver"
+ depends on SH_PFC
+ select PINCTRL
+ select PINMUX
+ select PINCONF
+
+config GPIO_SH_PFC
+ tristate "SuperH PFC GPIO support"
+ depends on SH_PFC && GPIOLIB
+ help
+ This enables support for GPIOs within the SoC's pin function
+ controller.
diff --git a/drivers/sh/pfc/Makefile b/drivers/sh/pfc/Makefile
new file mode 100644
index 000000000000..7916027cce37
--- /dev/null
+++ b/drivers/sh/pfc/Makefile
@@ -0,0 +1,3 @@
+obj-y += core.o
+obj-$(CONFIG_PINCTRL_SH_PFC) += pinctrl.o
+obj-$(CONFIG_GPIO_SH_PFC) += gpio.o
diff --git a/drivers/sh/pfc/core.c b/drivers/sh/pfc/core.c
new file mode 100644
index 000000000000..68169373c98b
--- /dev/null
+++ b/drivers/sh/pfc/core.c
@@ -0,0 +1,572 @@
+/*
+ * SuperH Pin Function Controller support.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2009 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sh_pfc.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+#include <linux/pinctrl/machine.h>
+
+static struct sh_pfc *sh_pfc __read_mostly;
+
+static inline bool sh_pfc_initialized(void)
+{
+ return !!sh_pfc;
+}
+
+static void pfc_iounmap(struct sh_pfc *pfc)
+{
+ int k;
+
+ for (k = 0; k < pfc->num_resources; k++)
+ if (pfc->window[k].virt)
+ iounmap(pfc->window[k].virt);
+
+ kfree(pfc->window);
+ pfc->window = NULL;
+}
+
+static int pfc_ioremap(struct sh_pfc *pfc)
+{
+ struct resource *res;
+ int k;
+
+ if (!pfc->num_resources)
+ return 0;
+
+ pfc->window = kzalloc(pfc->num_resources * sizeof(*pfc->window),
+ GFP_NOWAIT);
+ if (!pfc->window)
+ goto err1;
+
+ for (k = 0; k < pfc->num_resources; k++) {
+ res = pfc->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ pfc->window[k].phys = res->start;
+ pfc->window[k].size = resource_size(res);
+ pfc->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!pfc->window[k].virt)
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ pfc_iounmap(pfc);
+err1:
+ return -1;
+}
+
+static void __iomem *pfc_phys_to_virt(struct sh_pfc *pfc,
+ unsigned long address)
+{
+ struct pfc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < pfc->num_resources; k++) {
+ window = pfc->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ return window->virt + (address - window->phys);
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return (void __iomem *)address;
+}
+
+static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
+{
+ if (enum_id < r->begin)
+ return 0;
+
+ if (enum_id > r->end)
+ return 0;
+
+ return 1;
+}
+
+static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width)
+{
+ switch (reg_width) {
+ case 8:
+ return ioread8(mapped_reg);
+ case 16:
+ return ioread16(mapped_reg);
+ case 32:
+ return ioread32(mapped_reg);
+ }
+
+ BUG();
+ return 0;
+}
+
+static void gpio_write_raw_reg(void __iomem *mapped_reg,
+ unsigned long reg_width,
+ unsigned long data)
+{
+ switch (reg_width) {
+ case 8:
+ iowrite8(data, mapped_reg);
+ return;
+ case 16:
+ iowrite16(data, mapped_reg);
+ return;
+ case 32:
+ iowrite32(data, mapped_reg);
+ return;
+ }
+
+ BUG();
+}
+
+int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("read_bit: addr = %lx, pos = %ld, "
+ "r_width = %ld\n", dr->reg, pos, dr->reg_width);
+
+ return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_read_bit);
+
+void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
+ unsigned long value)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
+ "r_width = %ld\n",
+ dr->reg, !!value, pos, dr->reg_width);
+
+ if (value)
+ set_bit(pos, &dr->reg_shadow);
+ else
+ clear_bit(pos, &dr->reg_shadow);
+
+ gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
+}
+EXPORT_SYMBOL_GPL(sh_pfc_write_bit);
+
+static void config_reg_helper(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
+{
+ int k;
+
+ *mapped_regp = pfc_phys_to_virt(pfc, crp->reg);
+
+ if (crp->field_width) {
+ *maskp = (1 << crp->field_width) - 1;
+ *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
+ } else {
+ *maskp = (1 << crp->var_field_width[in_pos]) - 1;
+ *posp = crp->reg_width;
+ for (k = 0; k <= in_pos; k++)
+ *posp -= crp->var_field_width[k];
+ }
+}
+
+static int read_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos;
+
+ config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("read_reg: addr = %lx, field = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ crp->reg, field, crp->reg_width, crp->field_width);
+
+ return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
+}
+
+static void write_config_reg(struct sh_pfc *pfc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos, data;
+
+ config_reg_helper(pfc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
+ "r_width = %ld, f_width = %ld\n",
+ crp->reg, value, field, crp->reg_width, crp->field_width);
+
+ mask = ~(mask << pos);
+ value = value << pos;
+
+ data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data &= mask;
+ data |= value;
+
+ if (pfc->unlock_reg)
+ gpio_write_raw_reg(pfc_phys_to_virt(pfc, pfc->unlock_reg),
+ 32, ~data);
+
+ gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
+}
+
+static int setup_data_reg(struct sh_pfc *pfc, unsigned gpio)
+{
+ struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ struct pinmux_data_reg *data_reg;
+ int k, n;
+
+ if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ return -1;
+
+ k = 0;
+ while (1) {
+ data_reg = pfc->data_regs + k;
+
+ if (!data_reg->reg_width)
+ break;
+
+ data_reg->mapped_reg = pfc_phys_to_virt(pfc, data_reg->reg);
+
+ for (n = 0; n < data_reg->reg_width; n++) {
+ if (data_reg->enum_ids[n] == gpiop->enum_id) {
+ gpiop->flags &= ~PINMUX_FLAG_DREG;
+ gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
+ gpiop->flags &= ~PINMUX_FLAG_DBIT;
+ gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
+ return 0;
+ }
+ }
+ k++;
+ }
+
+ BUG();
+
+ return -1;
+}
+
+static void setup_data_regs(struct sh_pfc *pfc)
+{
+ struct pinmux_data_reg *drp;
+ int k;
+
+ for (k = pfc->first_gpio; k <= pfc->last_gpio; k++)
+ setup_data_reg(pfc, k);
+
+ k = 0;
+ while (1) {
+ drp = pfc->data_regs + k;
+
+ if (!drp->reg_width)
+ break;
+
+ drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
+ k++;
+ }
+}
+
+int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
+ struct pinmux_data_reg **drp, int *bitp)
+{
+ struct pinmux_gpio *gpiop = &pfc->gpios[gpio];
+ int k, n;
+
+ if (!enum_in_range(gpiop->enum_id, &pfc->data))
+ return -1;
+
+ k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
+ n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
+ *drp = pfc->data_regs + k;
+ *bitp = n;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_get_data_reg);
+
+static int get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
+ struct pinmux_cfg_reg **crp,
+ int *fieldp, int *valuep,
+ unsigned long **cntp)
+{
+ struct pinmux_cfg_reg *config_reg;
+ unsigned long r_width, f_width, curr_width, ncomb;
+ int k, m, n, pos, bit_pos;
+
+ k = 0;
+ while (1) {
+ config_reg = pfc->cfg_regs + k;
+
+ r_width = config_reg->reg_width;
+ f_width = config_reg->field_width;
+
+ if (!r_width)
+ break;
+
+ pos = 0;
+ m = 0;
+ for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ if (f_width)
+ curr_width = f_width;
+ else
+ curr_width = config_reg->var_field_width[m];
+
+ ncomb = 1 << curr_width;
+ for (n = 0; n < ncomb; n++) {
+ if (config_reg->enum_ids[pos + n] == enum_id) {
+ *crp = config_reg;
+ *fieldp = m;
+ *valuep = n;
+ *cntp = &config_reg->cnt[m];
+ return 0;
+ }
+ }
+ pos += ncomb;
+ m++;
+ }
+ k++;
+ }
+
+ return -1;
+}
+
+int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
+ pinmux_enum_t *enum_idp)
+{
+ pinmux_enum_t enum_id = pfc->gpios[gpio].enum_id;
+ pinmux_enum_t *data = pfc->gpio_data;
+ int k;
+
+ if (!enum_in_range(enum_id, &pfc->data)) {
+ if (!enum_in_range(enum_id, &pfc->mark)) {
+ pr_err("non data/mark enum_id for gpio %d\n", gpio);
+ return -1;
+ }
+ }
+
+ if (pos) {
+ *enum_idp = data[pos + 1];
+ return pos + 1;
+ }
+
+ for (k = 0; k < pfc->gpio_data_size; k++) {
+ if (data[k] == enum_id) {
+ *enum_idp = data[k + 1];
+ return k + 1;
+ }
+ }
+
+ pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
+ return -1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_gpio_to_enum);
+
+int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
+ int cfg_mode)
+{
+ struct pinmux_cfg_reg *cr = NULL;
+ pinmux_enum_t enum_id;
+ struct pinmux_range *range;
+ int in_range, pos, field, value;
+ unsigned long *cntp;
+
+ switch (pinmux_type) {
+
+ case PINMUX_TYPE_FUNCTION:
+ range = NULL;
+ break;
+
+ case PINMUX_TYPE_OUTPUT:
+ range = &pfc->output;
+ break;
+
+ case PINMUX_TYPE_INPUT:
+ range = &pfc->input;
+ break;
+
+ case PINMUX_TYPE_INPUT_PULLUP:
+ range = &pfc->input_pu;
+ break;
+
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ range = &pfc->input_pd;
+ break;
+
+ default:
+ goto out_err;
+ }
+
+ pos = 0;
+ enum_id = 0;
+ field = 0;
+ value = 0;
+ while (1) {
+ pos = sh_pfc_gpio_to_enum(pfc, gpio, pos, &enum_id);
+ if (pos <= 0)
+ goto out_err;
+
+ if (!enum_id)
+ break;
+
+ /* first check if this is a function enum */
+ in_range = enum_in_range(enum_id, &pfc->function);
+ if (!in_range) {
+ /* not a function enum */
+ if (range) {
+ /*
+ * other range exists, so this pin is
+ * a regular GPIO pin that now is being
+ * bound to a specific direction.
+ *
+ * for this case we only allow function enums
+ * and the enums that match the other range.
+ */
+ in_range = enum_in_range(enum_id, range);
+
+ /*
+ * special case pass through for fixed
+ * input-only or output-only pins without
+ * function enum register association.
+ */
+ if (in_range && enum_id == range->force)
+ continue;
+ } else {
+ /*
+ * no other range exists, so this pin
+ * must then be of the function type.
+ *
+ * allow function type pins to select
+ * any combination of function/in/out
+ * in their MARK lists.
+ */
+ in_range = 1;
+ }
+ }
+
+ if (!in_range)
+ continue;
+
+ if (get_config_reg(pfc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
+ goto out_err;
+
+ switch (cfg_mode) {
+ case GPIO_CFG_DRYRUN:
+ if (!*cntp ||
+ (read_config_reg(pfc, cr, field) != value))
+ continue;
+ break;
+
+ case GPIO_CFG_REQ:
+ write_config_reg(pfc, cr, field, value);
+ *cntp = *cntp + 1;
+ break;
+
+ case GPIO_CFG_FREE:
+ *cntp = *cntp - 1;
+ break;
+ }
+ }
+
+ return 0;
+ out_err:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_config_gpio);
+
+int register_sh_pfc(struct sh_pfc *pfc)
+{
+ int (*initroutine)(struct sh_pfc *) = NULL;
+ int ret;
+
+ /*
+ * Ensure that the type encoding fits
+ */
+ BUILD_BUG_ON(PINMUX_FLAG_TYPE > ((1 << PINMUX_FLAG_DBIT_SHIFT) - 1));
+
+ if (sh_pfc)
+ return -EBUSY;
+
+ ret = pfc_ioremap(pfc);
+ if (unlikely(ret < 0))
+ return ret;
+
+ spin_lock_init(&pfc->lock);
+
+ pinctrl_provide_dummies();
+ setup_data_regs(pfc);
+
+ sh_pfc = pfc;
+
+ /*
+ * Initialize pinctrl bindings first
+ */
+ initroutine = symbol_request(sh_pfc_register_pinctrl);
+ if (initroutine) {
+ ret = (*initroutine)(pfc);
+ symbol_put_addr(initroutine);
+
+ if (unlikely(ret != 0))
+ goto err;
+ } else {
+ pr_err("failed to initialize pinctrl bindings\n");
+ goto err;
+ }
+
+ /*
+ * Then the GPIO chip
+ */
+ initroutine = symbol_request(sh_pfc_register_gpiochip);
+ if (initroutine) {
+ ret = (*initroutine)(pfc);
+ symbol_put_addr(initroutine);
+
+ /*
+ * If the GPIO chip fails to come up we still leave the
+ * PFC state as it is, given that there are already
+ * extant users of it that have succeeded by this point.
+ */
+ if (unlikely(ret != 0)) {
+ pr_notice("failed to init GPIO chip, ignoring...\n");
+ ret = 0;
+ }
+ }
+
+ pr_info("%s support registered\n", pfc->name);
+
+ return 0;
+
+err:
+ pfc_iounmap(pfc);
+ sh_pfc = NULL;
+
+ return ret;
+}
diff --git a/drivers/sh/pfc/gpio.c b/drivers/sh/pfc/gpio.c
new file mode 100644
index 000000000000..62bca98474a9
--- /dev/null
+++ b/drivers/sh/pfc/gpio.c
@@ -0,0 +1,239 @@
+/*
+ * SuperH Pin Function Controller GPIO driver.
+ *
+ * Copyright (C) 2008 Magnus Damm
+ * Copyright (C) 2009 - 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define pr_fmt(fmt) "sh_pfc " KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+
+struct sh_pfc_chip {
+ struct sh_pfc *pfc;
+ struct gpio_chip gpio_chip;
+};
+
+static struct sh_pfc_chip *gpio_to_pfc_chip(struct gpio_chip *gc)
+{
+ return container_of(gc, struct sh_pfc_chip, gpio_chip);
+}
+
+static struct sh_pfc *gpio_to_pfc(struct gpio_chip *gc)
+{
+ return gpio_to_pfc_chip(gc)->pfc;
+}
+
+static int sh_gpio_request(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_request_gpio(offset);
+}
+
+static void sh_gpio_free(struct gpio_chip *gc, unsigned offset)
+{
+ pinctrl_free_gpio(offset);
+}
+
+static void sh_gpio_set_value(struct sh_pfc *pfc, unsigned gpio, int value)
+{
+ struct pinmux_data_reg *dr = NULL;
+ int bit = 0;
+
+ if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ BUG();
+ else
+ sh_pfc_write_bit(dr, bit, value);
+}
+
+static int sh_gpio_get_value(struct sh_pfc *pfc, unsigned gpio)
+{
+ struct pinmux_data_reg *dr = NULL;
+ int bit = 0;
+
+ if (!pfc || sh_pfc_get_data_reg(pfc, gpio, &dr, &bit) != 0)
+ return -EINVAL;
+
+ return sh_pfc_read_bit(dr, bit);
+}
+
+static int sh_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ return pinctrl_gpio_direction_input(offset);
+}
+
+static int sh_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
+ int value)
+{
+ sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+
+ return pinctrl_gpio_direction_output(offset);
+}
+
+static int sh_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ return sh_gpio_get_value(gpio_to_pfc(gc), offset);
+}
+
+static void sh_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ sh_gpio_set_value(gpio_to_pfc(gc), offset, value);
+}
+
+static int sh_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct sh_pfc *pfc = gpio_to_pfc(gc);
+ pinmux_enum_t enum_id;
+ pinmux_enum_t *enum_ids;
+ int i, k, pos;
+
+ pos = 0;
+ enum_id = 0;
+ while (1) {
+ pos = sh_pfc_gpio_to_enum(pfc, offset, pos, &enum_id);
+ if (pos <= 0 || !enum_id)
+ break;
+
+ for (i = 0; i < pfc->gpio_irq_size; i++) {
+ enum_ids = pfc->gpio_irq[i].enum_ids;
+ for (k = 0; enum_ids[k]; k++) {
+ if (enum_ids[k] == enum_id)
+ return pfc->gpio_irq[i].irq;
+ }
+ }
+ }
+
+ return -ENOSYS;
+}
+
+static void sh_pfc_gpio_setup(struct sh_pfc_chip *chip)
+{
+ struct sh_pfc *pfc = chip->pfc;
+ struct gpio_chip *gc = &chip->gpio_chip;
+
+ gc->request = sh_gpio_request;
+ gc->free = sh_gpio_free;
+ gc->direction_input = sh_gpio_direction_input;
+ gc->get = sh_gpio_get;
+ gc->direction_output = sh_gpio_direction_output;
+ gc->set = sh_gpio_set;
+ gc->to_irq = sh_gpio_to_irq;
+
+ WARN_ON(pfc->first_gpio != 0); /* needs testing */
+
+ gc->label = pfc->name;
+ gc->owner = THIS_MODULE;
+ gc->base = pfc->first_gpio;
+ gc->ngpio = (pfc->last_gpio - pfc->first_gpio) + 1;
+}
+
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
+{
+ struct sh_pfc_chip *chip;
+ int ret;
+
+ chip = kzalloc(sizeof(struct sh_pfc_chip), GFP_KERNEL);
+ if (unlikely(!chip))
+ return -ENOMEM;
+
+ chip->pfc = pfc;
+
+ sh_pfc_gpio_setup(chip);
+
+ ret = gpiochip_add(&chip->gpio_chip);
+ if (unlikely(ret < 0))
+ kfree(chip);
+
+ pr_info("%s handling gpio %d -> %d\n",
+ pfc->name, pfc->first_gpio, pfc->last_gpio);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_register_gpiochip);
+
+static int sh_pfc_gpio_match(struct gpio_chip *gc, void *data)
+{
+ return !!strstr(gc->label, data);
+}
+
+static int __devinit sh_pfc_gpio_probe(struct platform_device *pdev)
+{
+ struct sh_pfc_chip *chip;
+ struct gpio_chip *gc;
+
+ gc = gpiochip_find("_pfc", sh_pfc_gpio_match);
+ if (unlikely(!gc)) {
+ pr_err("Cant find gpio chip\n");
+ return -ENODEV;
+ }
+
+ chip = gpio_to_pfc_chip(gc);
+ platform_set_drvdata(pdev, chip);
+
+ pr_info("attaching to GPIO chip %s\n", chip->pfc->name);
+
+ return 0;
+}
+
+static int __devexit sh_pfc_gpio_remove(struct platform_device *pdev)
+{
+ struct sh_pfc_chip *chip = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&chip->gpio_chip);
+ if (unlikely(ret < 0))
+ return ret;
+
+ kfree(chip);
+ return 0;
+}
+
+static struct platform_driver sh_pfc_gpio_driver = {
+ .probe = sh_pfc_gpio_probe,
+ .remove = __devexit_p(sh_pfc_gpio_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device sh_pfc_gpio_device = {
+ .name = KBUILD_MODNAME,
+ .id = -1,
+};
+
+static int __init sh_pfc_gpio_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&sh_pfc_gpio_driver);
+ if (likely(!rc)) {
+ rc = platform_device_register(&sh_pfc_gpio_device);
+ if (unlikely(rc))
+ platform_driver_unregister(&sh_pfc_gpio_driver);
+ }
+
+ return rc;
+}
+
+static void __exit sh_pfc_gpio_exit(void)
+{
+ platform_device_unregister(&sh_pfc_gpio_device);
+ platform_driver_unregister(&sh_pfc_gpio_driver);
+}
+
+module_init(sh_pfc_gpio_init);
+module_exit(sh_pfc_gpio_exit);
+
+MODULE_AUTHOR("Magnus Damm, Paul Mundt");
+MODULE_DESCRIPTION("GPIO driver for SuperH pin function controller");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:pfc-gpio");
diff --git a/drivers/sh/pfc/pinctrl.c b/drivers/sh/pfc/pinctrl.c
new file mode 100644
index 000000000000..0802b6c0d653
--- /dev/null
+++ b/drivers/sh/pfc/pinctrl.c
@@ -0,0 +1,530 @@
+/*
+ * SuperH Pin Function Controller pinmux support.
+ *
+ * Copyright (C) 2012 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#define DRV_NAME "pinctrl-sh_pfc"
+
+#define pr_fmt(fmt) DRV_NAME " " KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sh_pfc.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+
+struct sh_pfc_pinctrl {
+ struct pinctrl_dev *pctl;
+ struct sh_pfc *pfc;
+
+ struct pinmux_gpio **functions;
+ unsigned int nr_functions;
+
+ struct pinctrl_pin_desc *pads;
+ unsigned int nr_pads;
+
+ spinlock_t lock;
+};
+
+static struct sh_pfc_pinctrl *sh_pfc_pmx;
+
+static int sh_pfc_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->nr_pads;
+}
+
+static const char *sh_pfc_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->pads[selector].name;
+}
+
+static int sh_pfc_get_group_pins(struct pinctrl_dev *pctldev, unsigned group,
+ const unsigned **pins, unsigned *num_pins)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = &pmx->pads[group].number;
+ *num_pins = 1;
+
+ return 0;
+}
+
+static void sh_pfc_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
+ unsigned offset)
+{
+ seq_printf(s, "%s", DRV_NAME);
+}
+
+static struct pinctrl_ops sh_pfc_pinctrl_ops = {
+ .get_groups_count = sh_pfc_get_groups_count,
+ .get_group_name = sh_pfc_get_group_name,
+ .get_group_pins = sh_pfc_get_group_pins,
+ .pin_dbg_show = sh_pfc_pin_dbg_show,
+};
+
+static int sh_pfc_get_functions_count(struct pinctrl_dev *pctldev)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->nr_functions;
+}
+
+static const char *sh_pfc_get_function_name(struct pinctrl_dev *pctldev,
+ unsigned selector)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ return pmx->functions[selector]->name;
+}
+
+static int sh_pfc_get_function_groups(struct pinctrl_dev *pctldev, unsigned func,
+ const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = &pmx->functions[func]->name;
+ *num_groups = 1;
+
+ return 0;
+}
+
+static int sh_pfc_noop_enable(struct pinctrl_dev *pctldev, unsigned func,
+ unsigned group)
+{
+ return 0;
+}
+
+static void sh_pfc_noop_disable(struct pinctrl_dev *pctldev, unsigned func,
+ unsigned group)
+{
+}
+
+static inline int sh_pfc_config_function(struct sh_pfc *pfc, unsigned offset)
+{
+ if (sh_pfc_config_gpio(pfc, offset,
+ PINMUX_TYPE_FUNCTION,
+ GPIO_CFG_DRYRUN) != 0)
+ return -EINVAL;
+
+ if (sh_pfc_config_gpio(pfc, offset,
+ PINMUX_TYPE_FUNCTION,
+ GPIO_CFG_REQ) != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sh_pfc_reconfig_pin(struct sh_pfc *pfc, unsigned offset,
+ int new_type)
+{
+ unsigned long flags;
+ int pinmux_type;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ /*
+ * See if the present config needs to first be de-configured.
+ */
+ switch (pinmux_type) {
+ case PINMUX_TYPE_GPIO:
+ break;
+ case PINMUX_TYPE_OUTPUT:
+ case PINMUX_TYPE_INPUT:
+ case PINMUX_TYPE_INPUT_PULLUP:
+ case PINMUX_TYPE_INPUT_PULLDOWN:
+ sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
+ break;
+ default:
+ goto err;
+ }
+
+ /*
+ * Dry run
+ */
+ if (sh_pfc_config_gpio(pfc, offset, new_type,
+ GPIO_CFG_DRYRUN) != 0)
+ goto err;
+
+ /*
+ * Request
+ */
+ if (sh_pfc_config_gpio(pfc, offset, new_type,
+ GPIO_CFG_REQ) != 0)
+ goto err;
+
+ pfc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
+ pfc->gpios[offset].flags |= new_type;
+
+ ret = 0;
+
+err:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return ret;
+}
+
+
+static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ unsigned long flags;
+ int ret, pinmux_type;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ switch (pinmux_type) {
+ case PINMUX_TYPE_FUNCTION:
+ pr_notice_once("Use of GPIO API for function requests is "
+ "deprecated, convert to pinctrl\n");
+ /* handle for now */
+ ret = sh_pfc_config_function(pfc, offset);
+ if (unlikely(ret < 0))
+ goto err;
+
+ break;
+ case PINMUX_TYPE_GPIO:
+ break;
+ default:
+ pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
+ return -ENOTSUPP;
+ }
+
+ ret = 0;
+
+err:
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ return ret;
+}
+
+static void sh_pfc_gpio_disable_free(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+ unsigned long flags;
+ int pinmux_type;
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ pinmux_type = pfc->gpios[offset].flags & PINMUX_FLAG_TYPE;
+
+ sh_pfc_config_gpio(pfc, offset, pinmux_type, GPIO_CFG_FREE);
+
+ spin_unlock_irqrestore(&pfc->lock, flags);
+}
+
+static int sh_pfc_gpio_set_direction(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned offset, bool input)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ int type = input ? PINMUX_TYPE_INPUT : PINMUX_TYPE_OUTPUT;
+
+ return sh_pfc_reconfig_pin(pmx->pfc, offset, type);
+}
+
+static struct pinmux_ops sh_pfc_pinmux_ops = {
+ .get_functions_count = sh_pfc_get_functions_count,
+ .get_function_name = sh_pfc_get_function_name,
+ .get_function_groups = sh_pfc_get_function_groups,
+ .enable = sh_pfc_noop_enable,
+ .disable = sh_pfc_noop_disable,
+ .gpio_request_enable = sh_pfc_gpio_request_enable,
+ .gpio_disable_free = sh_pfc_gpio_disable_free,
+ .gpio_set_direction = sh_pfc_gpio_set_direction,
+};
+
+static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+
+ *config = pfc->gpios[pin].flags & PINMUX_FLAG_TYPE;
+
+ return 0;
+}
+
+static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
+ struct sh_pfc *pfc = pmx->pfc;
+
+ /* Validate the new type */
+ if (config >= PINMUX_FLAG_TYPE)
+ return -EINVAL;
+
+ return sh_pfc_reconfig_pin(pmx->pfc, pin, config);
+}
+
+static void sh_pfc_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned pin)
+{
+ const char *pinmux_type_str[] = {
+ [PINMUX_TYPE_NONE] = "none",
+ [PINMUX_TYPE_FUNCTION] = "function",
+ [PINMUX_TYPE_GPIO] = "gpio",
+ [PINMUX_TYPE_OUTPUT] = "output",
+ [PINMUX_TYPE_INPUT] = "input",
+ [PINMUX_TYPE_INPUT_PULLUP] = "input bias pull up",
+ [PINMUX_TYPE_INPUT_PULLDOWN] = "input bias pull down",
+ };
+ unsigned long config;
+ int rc;
+
+ rc = sh_pfc_pinconf_get(pctldev, pin, &config);
+ if (unlikely(rc != 0))
+ return;
+
+ seq_printf(s, " %s", pinmux_type_str[config]);
+}
+
+static struct pinconf_ops sh_pfc_pinconf_ops = {
+ .pin_config_get = sh_pfc_pinconf_get,
+ .pin_config_set = sh_pfc_pinconf_set,
+ .pin_config_dbg_show = sh_pfc_pinconf_dbg_show,
+};
+
+static struct pinctrl_gpio_range sh_pfc_gpio_range = {
+ .name = DRV_NAME,
+ .id = 0,
+};
+
+static struct pinctrl_desc sh_pfc_pinctrl_desc = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .pctlops = &sh_pfc_pinctrl_ops,
+ .pmxops = &sh_pfc_pinmux_ops,
+ .confops = &sh_pfc_pinconf_ops,
+};
+
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
+{
+ sh_pfc_pmx = kzalloc(sizeof(struct sh_pfc_pinctrl), GFP_KERNEL);
+ if (unlikely(!sh_pfc_pmx))
+ return -ENOMEM;
+
+ spin_lock_init(&sh_pfc_pmx->lock);
+
+ sh_pfc_pmx->pfc = pfc;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sh_pfc_register_pinctrl);
+
+static inline void __devinit sh_pfc_map_one_gpio(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx,
+ struct pinmux_gpio *gpio,
+ unsigned offset)
+{
+ struct pinmux_data_reg *dummy;
+ unsigned long flags;
+ int bit;
+
+ gpio->flags &= ~PINMUX_FLAG_TYPE;
+
+ if (sh_pfc_get_data_reg(pfc, offset, &dummy, &bit) == 0)
+ gpio->flags |= PINMUX_TYPE_GPIO;
+ else {
+ gpio->flags |= PINMUX_TYPE_FUNCTION;
+
+ spin_lock_irqsave(&pmx->lock, flags);
+ pmx->nr_functions++;
+ spin_unlock_irqrestore(&pmx->lock, flags);
+ }
+}
+
+/* pinmux ranges -> pinctrl pin descs */
+static int __devinit sh_pfc_map_gpios(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx)
+{
+ unsigned long flags;
+ int i;
+
+ pmx->nr_pads = pfc->last_gpio - pfc->first_gpio + 1;
+
+ pmx->pads = kmalloc(sizeof(struct pinctrl_pin_desc) * pmx->nr_pads,
+ GFP_KERNEL);
+ if (unlikely(!pmx->pads)) {
+ pmx->nr_pads = 0;
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&pfc->lock, flags);
+
+ /*
+ * We don't necessarily have a 1:1 mapping between pin and linux
+ * GPIO number, as the latter maps to the associated enum_id.
+ * Care needs to be taken to translate back to pin space when
+ * dealing with any pin configurations.
+ */
+ for (i = 0; i < pmx->nr_pads; i++) {
+ struct pinctrl_pin_desc *pin = pmx->pads + i;
+ struct pinmux_gpio *gpio = pfc->gpios + i;
+
+ pin->number = pfc->first_gpio + i;
+ pin->name = gpio->name;
+
+ /* XXX */
+ if (unlikely(!gpio->enum_id))
+ continue;
+
+ sh_pfc_map_one_gpio(pfc, pmx, gpio, i);
+ }
+
+ spin_unlock_irqrestore(&pfc->lock, flags);
+
+ sh_pfc_pinctrl_desc.pins = pmx->pads;
+ sh_pfc_pinctrl_desc.npins = pmx->nr_pads;
+
+ return 0;
+}
+
+static int __devinit sh_pfc_map_functions(struct sh_pfc *pfc,
+ struct sh_pfc_pinctrl *pmx)
+{
+ unsigned long flags;
+ int i, fn;
+
+ pmx->functions = kzalloc(pmx->nr_functions * sizeof(void *),
+ GFP_KERNEL);
+ if (unlikely(!pmx->functions))
+ return -ENOMEM;
+
+ spin_lock_irqsave(&pmx->lock, flags);
+
+ for (i = fn = 0; i < pmx->nr_pads; i++) {
+ struct pinmux_gpio *gpio = pfc->gpios + i;
+
+ if ((gpio->flags & PINMUX_FLAG_TYPE) == PINMUX_TYPE_FUNCTION)
+ pmx->functions[fn++] = gpio;
+ }
+
+ spin_unlock_irqrestore(&pmx->lock, flags);
+
+ return 0;
+}
+
+static int __devinit sh_pfc_pinctrl_probe(struct platform_device *pdev)
+{
+ struct sh_pfc *pfc;
+ int ret;
+
+ if (unlikely(!sh_pfc_pmx))
+ return -ENODEV;
+
+ pfc = sh_pfc_pmx->pfc;
+
+ ret = sh_pfc_map_gpios(pfc, sh_pfc_pmx);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = sh_pfc_map_functions(pfc, sh_pfc_pmx);
+ if (unlikely(ret != 0))
+ goto free_pads;
+
+ sh_pfc_pmx->pctl = pinctrl_register(&sh_pfc_pinctrl_desc, &pdev->dev,
+ sh_pfc_pmx);
+ if (IS_ERR(sh_pfc_pmx->pctl)) {
+ ret = PTR_ERR(sh_pfc_pmx->pctl);
+ goto free_functions;
+ }
+
+ sh_pfc_gpio_range.npins = pfc->last_gpio - pfc->first_gpio + 1;
+ sh_pfc_gpio_range.base = pfc->first_gpio;
+ sh_pfc_gpio_range.pin_base = pfc->first_gpio;
+
+ pinctrl_add_gpio_range(sh_pfc_pmx->pctl, &sh_pfc_gpio_range);
+
+ platform_set_drvdata(pdev, sh_pfc_pmx);
+
+ return 0;
+
+free_functions:
+ kfree(sh_pfc_pmx->functions);
+free_pads:
+ kfree(sh_pfc_pmx->pads);
+ kfree(sh_pfc_pmx);
+
+ return ret;
+}
+
+static int __devexit sh_pfc_pinctrl_remove(struct platform_device *pdev)
+{
+ struct sh_pfc_pinctrl *pmx = platform_get_drvdata(pdev);
+
+ pinctrl_remove_gpio_range(pmx->pctl, &sh_pfc_gpio_range);
+ pinctrl_unregister(pmx->pctl);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(sh_pfc_pmx->functions);
+ kfree(sh_pfc_pmx->pads);
+ kfree(sh_pfc_pmx);
+
+ return 0;
+}
+
+static struct platform_driver sh_pfc_pinctrl_driver = {
+ .probe = sh_pfc_pinctrl_probe,
+ .remove = __devexit_p(sh_pfc_pinctrl_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct platform_device sh_pfc_pinctrl_device = {
+ .name = DRV_NAME,
+ .id = -1,
+};
+
+static int __init sh_pfc_pinctrl_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&sh_pfc_pinctrl_driver);
+ if (likely(!rc)) {
+ rc = platform_device_register(&sh_pfc_pinctrl_device);
+ if (unlikely(rc))
+ platform_driver_unregister(&sh_pfc_pinctrl_driver);
+ }
+
+ return rc;
+}
+
+static void __exit sh_pfc_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sh_pfc_pinctrl_driver);
+}
+
+subsys_initcall(sh_pfc_pinctrl_init);
+module_exit(sh_pfc_pinctrl_exit);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index cd2fe350e724..cb90bc62d0a9 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -125,7 +125,7 @@ config SPI_BUTTERFLY
config SPI_COLDFIRE_QSPI
tristate "Freescale Coldfire QSPI controller"
- depends on (M520x || M523x || M5249 || M527x || M528x || M532x)
+ depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
help
This enables support for the Coldfire QSPI controller in master
mode.
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 972a94c58be3..646a7657fe62 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -27,10 +27,15 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <mach/dma.h>
#include <plat/s3c64xx-spi.h>
+#define MAX_SPI_PORTS 3
+
/* Registers and bit-fields */
#define S3C64XX_SPI_CH_CFG 0x00
@@ -74,11 +79,6 @@
#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
-#define S3C64XX_SPI_ACT(c) writel(0, (c)->regs + S3C64XX_SPI_SLAVE_SEL)
-
-#define S3C64XX_SPI_DEACT(c) writel(S3C64XX_SPI_SLAVE_SIG_INACT, \
- (c)->regs + S3C64XX_SPI_SLAVE_SEL)
-
#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
@@ -113,13 +113,12 @@
#define S3C64XX_SPI_FBCLK_MSK (3<<0)
-#define S3C64XX_SPI_ST_TRLCNTZ(v, i) ((((v) >> (i)->rx_lvl_offset) & \
- (((i)->fifo_lvl_mask + 1))) \
- ? 1 : 0)
-
-#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
-#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
-#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
+#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
+ (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
+ FIFO_LVL_MASK(i))
#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
#define S3C64XX_SPI_TRAILCNT_OFF 19
@@ -135,6 +134,29 @@ struct s3c64xx_spi_dma_data {
unsigned ch;
enum dma_data_direction direction;
enum dma_ch dmach;
+ struct property *dma_prop;
+};
+
+/**
+ * struct s3c64xx_spi_info - SPI Controller hardware info
+ * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
+ * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
+ * @clk_from_cmu: True, if the controller does not include a clock mux and
+ * prescaler unit.
+ *
+ * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
+ * differ in some aspects such as the size of the fifo and spi bus clock
+ * setup. Such differences are specified to the driver using this structure
+ * which is provided as driver data to the driver.
+ */
+struct s3c64xx_spi_port_config {
+ int fifo_lvl_mask[MAX_SPI_PORTS];
+ int rx_lvl_offset;
+ int tx_st_done;
+ bool high_speed;
+ bool clk_from_cmu;
};
/**
@@ -175,6 +197,9 @@ struct s3c64xx_spi_driver_data {
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
struct samsung_dma_ops *ops;
+ struct s3c64xx_spi_port_config *port_conf;
+ unsigned int port_id;
+ unsigned long gpios[4];
};
static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
@@ -183,7 +208,6 @@ static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long loops;
u32 val;
@@ -199,7 +223,7 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
- } while (TX_FIFO_LVL(val, sci) && loops--);
+ } while (TX_FIFO_LVL(val, sdd) && loops--);
if (loops == 0)
dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
@@ -208,7 +232,7 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
loops = msecs_to_loops(1);
do {
val = readl(regs + S3C64XX_SPI_STATUS);
- if (RX_FIFO_LVL(val, sci))
+ if (RX_FIFO_LVL(val, sdd))
readl(regs + S3C64XX_SPI_RX_DATA);
else
break;
@@ -262,14 +286,24 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
unsigned len, dma_addr_t buf)
{
struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep_info info;
+ struct samsung_dma_prep info;
+ struct samsung_dma_config config;
- if (dma->direction == DMA_DEV_TO_MEM)
+ if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
- else
+ config.direction = sdd->rx_dma.direction;
+ config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.width = sdd->cur_bpw / 8;
+ sdd->ops->config(sdd->rx_dma.ch, &config);
+ } else {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, tx_dma);
+ config.direction = sdd->tx_dma.direction;
+ config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.width = sdd->cur_bpw / 8;
+ sdd->ops->config(sdd->tx_dma.ch, &config);
+ }
info.cap = DMA_SLAVE;
info.len = len;
@@ -284,20 +318,17 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
{
- struct samsung_dma_info info;
+ struct samsung_dma_req req;
sdd->ops = samsung_dma_get_ops();
- info.cap = DMA_SLAVE;
- info.client = &s3c64xx_spi_dma_client;
- info.width = sdd->cur_bpw / 8;
+ req.cap = DMA_SLAVE;
+ req.client = &s3c64xx_spi_dma_client;
- info.direction = sdd->rx_dma.direction;
- info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info);
- info.direction = sdd->tx_dma.direction;
- info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info);
+ req.dt_dmach_prop = sdd->rx_dma.dma_prop;
+ sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req);
+ req.dt_dmach_prop = sdd->tx_dma.dma_prop;
+ sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req);
return 1;
}
@@ -306,7 +337,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 modecfg, chcfg;
@@ -356,7 +386,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
if (xfer->rx_buf != NULL) {
sdd->state |= RXBUSY;
- if (sci->high_speed && sdd->cur_speed >= 30000000UL
+ if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
&& !(sdd->cur_mode & SPI_CPHA))
chcfg |= S3C64XX_SPI_CH_HS_EN;
@@ -383,20 +413,19 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
cs = sdd->tgl_spi->controller_data;
- cs->set_level(cs->line,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
+ gpio_set_value(cs->line,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
cs = spi->controller_data;
- cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
+ gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
struct spi_transfer *xfer, int dma_mode)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
unsigned long val;
int ms;
@@ -413,7 +442,7 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
val = msecs_to_loops(ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sci) < xfer->len && --val);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
}
if (!val)
@@ -432,8 +461,8 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
if (xfer->rx_buf == NULL) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sci)
- || !S3C64XX_SPI_ST_TX_DONE(status, sci))
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
&& --val) {
cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
@@ -477,17 +506,16 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
- cs->set_level(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
void __iomem *regs = sdd->regs;
u32 val;
/* Disable Clock */
- if (sci->clk_from_cmu) {
+ if (sdd->port_conf->clk_from_cmu) {
clk_disable(sdd->src_clk);
} else {
val = readl(regs + S3C64XX_SPI_CLK_CFG);
@@ -531,7 +559,7 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
writel(val, regs + S3C64XX_SPI_MODE_CFG);
- if (sci->clk_from_cmu) {
+ if (sdd->port_conf->clk_from_cmu) {
/* Configure Clock */
/* There is half-multiplier before the SPI */
clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
@@ -557,7 +585,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
@@ -573,7 +600,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
/* Map until end or first fail */
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->tx_buf != NULL) {
@@ -607,7 +634,6 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct spi_message *msg)
{
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
@@ -616,7 +642,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
continue;
if (xfer->rx_buf != NULL
@@ -635,7 +661,6 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
struct spi_device *spi = msg->spi;
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct spi_transfer *xfer;
@@ -690,7 +715,7 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
}
/* Polling method for xfers not bigger than FIFO capacity */
- if (xfer->len <= ((sci->fifo_lvl_mask >> 1) + 1))
+ if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
use_dma = 0;
else
use_dma = 1;
@@ -707,14 +732,15 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
enable_cs(sdd, spi);
/* Start the signals */
- S3C64XX_SPI_ACT(sdd);
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
/* Quiese the signals */
- S3C64XX_SPI_DEACT(sdd);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT,
+ sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (status) {
dev_err(&spi->dev, "I/O Error: "
@@ -795,6 +821,48 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
+static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
+ struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+ struct device_node *slave_np, *data_np;
+ u32 fb_delay = 0;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_err(&spi->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ for_each_child_of_node(slave_np, data_np)
+ if (!strcmp(data_np->name, "controller-data"))
+ break;
+ if (!data_np) {
+ dev_err(&spi->dev, "child node 'controller-data' not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs) {
+ dev_err(&spi->dev, "could not allocate memory for controller"
+ " data\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
+ if (!gpio_is_valid(cs->line)) {
+ dev_err(&spi->dev, "chip select gpio is not specified or "
+ "invalid\n");
+ kfree(cs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
+ cs->fb_delay = fb_delay;
+ return cs;
+}
+
/*
* Here we only check the validity of requested configuration
* and save the configuration in a local data-structure.
@@ -808,14 +876,31 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_info *sci;
struct spi_message *msg;
unsigned long flags;
- int err = 0;
+ int err;
- if (cs == NULL || cs->set_level == NULL) {
+ sdd = spi_master_get_devdata(spi->master);
+ if (!cs && spi->dev.of_node) {
+ cs = s3c64xx_get_slave_ctrldata(sdd, spi);
+ spi->controller_data = cs;
+ }
+
+ if (IS_ERR_OR_NULL(cs)) {
dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
return -ENODEV;
}
- sdd = spi_master_get_devdata(spi->master);
+ if (!spi_get_ctldata(spi)) {
+ err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (err) {
+ dev_err(&spi->dev,
+ "Failed to get /CS gpio [%d]: %d\n",
+ cs->line, err);
+ goto err_gpio_req;
+ }
+ spi_set_ctldata(spi, cs);
+ }
+
sci = sdd->cntrlr_info;
spin_lock_irqsave(&sdd->lock, flags);
@@ -826,7 +911,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
dev_err(&spi->dev,
"setup: attempt while mssg in queue!\n");
spin_unlock_irqrestore(&sdd->lock, flags);
- return -EBUSY;
+ err = -EBUSY;
+ goto err_msgq;
}
}
@@ -844,7 +930,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_get_sync(&sdd->pdev->dev);
/* Check if we can provide the requested rate */
- if (!sci->clk_from_cmu) {
+ if (!sdd->port_conf->clk_from_cmu) {
u32 psr, speed;
/* Max possible */
@@ -869,22 +955,44 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
}
speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
- if (spi->max_speed_hz >= speed)
+ if (spi->max_speed_hz >= speed) {
spi->max_speed_hz = speed;
- else
+ } else {
err = -EINVAL;
+ goto setup_exit;
+ }
}
pm_runtime_put(&sdd->pdev->dev);
+ disable_cs(sdd, spi);
+ return 0;
setup_exit:
-
/* setup() returns with device de-selected */
disable_cs(sdd, spi);
+err_msgq:
+ gpio_free(cs->line);
+ spi_set_ctldata(spi, NULL);
+
+err_gpio_req:
+ kfree(cs);
+
return err;
}
+static void s3c64xx_spi_cleanup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
+
+ if (cs) {
+ gpio_free(cs->line);
+ if (spi->dev.of_node)
+ kfree(cs);
+ }
+ spi_set_ctldata(spi, NULL);
+}
+
static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
@@ -920,12 +1028,12 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
sdd->cur_speed = 0;
- S3C64XX_SPI_DEACT(sdd);
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
/* Disable Interrupts - we use Polling if not DMA mode */
writel(0, regs + S3C64XX_SPI_INT_EN);
- if (!sci->clk_from_cmu)
+ if (!sdd->port_conf->clk_from_cmu)
writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
regs + S3C64XX_SPI_CLK_CFG);
writel(0, regs + S3C64XX_SPI_MODE_CFG);
@@ -946,40 +1054,165 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
flush_fifo(sdd);
}
-static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+static int __devinit s3c64xx_spi_get_dmares(
+ struct s3c64xx_spi_driver_data *sdd, bool tx)
+{
+ struct platform_device *pdev = sdd->pdev;
+ struct s3c64xx_spi_dma_data *dma_data;
+ struct property *prop;
+ struct resource *res;
+ char prop_name[15], *chan_str;
+
+ if (tx) {
+ dma_data = &sdd->tx_dma;
+ dma_data->direction = DMA_TO_DEVICE;
+ chan_str = "tx";
+ } else {
+ dma_data = &sdd->rx_dma;
+ dma_data->direction = DMA_FROM_DEVICE;
+ chan_str = "rx";
+ }
+
+ if (!sdd->pdev->dev.of_node) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, tx ? 0 : 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get SPI-%s dma "
+ "resource\n", chan_str);
+ return -ENXIO;
+ }
+ dma_data->dmach = res->start;
+ return 0;
+ }
+
+ sprintf(prop_name, "%s-dma-channel", chan_str);
+ prop = of_find_property(pdev->dev.of_node, prop_name, NULL);
+ if (!prop) {
+ dev_err(&pdev->dev, "%s dma channel property not specified\n",
+ chan_str);
+ return -ENXIO;
+ }
+
+ dma_data->dmach = DMACH_DT_PROP;
+ dma_data->dma_prop = prop;
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
+{
+ struct device *dev = &sdd->pdev->dev;
+ int idx, gpio, ret;
+
+ /* find gpios for mosi, miso and clock lines */
+ for (idx = 0; idx < 3; idx++) {
+ gpio = of_get_gpio(dev->of_node, idx);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio);
+ goto free_gpio;
+ }
+
+ ret = gpio_request(gpio, "spi-bus");
+ if (ret) {
+ dev_err(dev, "gpio [%d] request failed: %d\n",
+ gpio, ret);
+ goto free_gpio;
+ }
+ }
+ return 0;
+
+free_gpio:
+ while (--idx >= 0)
+ gpio_free(sdd->gpios[idx]);
+ return -EINVAL;
+}
+
+static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
+{
+ unsigned int idx;
+ for (idx = 0; idx < 3; idx++)
+ gpio_free(sdd->gpios[idx]);
+}
+
+static struct __devinit s3c64xx_spi_info * s3c64xx_spi_parse_dt(
+ struct device *dev)
{
- struct resource *mem_res, *dmatx_res, *dmarx_res;
- struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
- struct spi_master *master;
- int ret, irq;
- char clk_name[16];
+ u32 temp;
- if (pdev->id < 0) {
- dev_err(&pdev->dev,
- "Invalid platform device id-%d\n", pdev->id);
- return -ENODEV;
+ sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
+ if (!sci) {
+ dev_err(dev, "memory allocation for spi_info failed\n");
+ return ERR_PTR(-ENOMEM);
}
- if (pdev->dev.platform_data == NULL) {
- dev_err(&pdev->dev, "platform_data missing!\n");
- return -ENODEV;
+ if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
+ dev_warn(dev, "spi bus clock parent not specified, using "
+ "clock at index 0 as parent\n");
+ sci->src_clk_nr = 0;
+ } else {
+ sci->src_clk_nr = temp;
}
- sci = pdev->dev.platform_data;
+ if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
+ dev_warn(dev, "number of chip select lines not specified, "
+ "assuming 1 chip select line\n");
+ sci->num_cs = 1;
+ } else {
+ sci->num_cs = temp;
+ }
+
+ return sci;
+}
+#else
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ return dev->platform_data;
+}
+
+static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd)
+{
+ return -EINVAL;
+}
+
+static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd)
+{
+}
+#endif
- /* Check for availability of necessary resource */
+static const struct of_device_id s3c64xx_spi_dt_match[];
- dmatx_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (dmatx_res == NULL) {
- dev_err(&pdev->dev, "Unable to get SPI-Tx dma resource\n");
- return -ENXIO;
+static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
+ return (struct s3c64xx_spi_port_config *)match->data;
}
+#endif
+ return (struct s3c64xx_spi_port_config *)
+ platform_get_device_id(pdev)->driver_data;
+}
- dmarx_res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (dmarx_res == NULL) {
- dev_err(&pdev->dev, "Unable to get SPI-Rx dma resource\n");
- return -ENXIO;
+static int __init s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
+ struct spi_master *master;
+ int ret, irq;
+ char clk_name[16];
+
+ if (!sci && pdev->dev.of_node) {
+ sci = s3c64xx_spi_parse_dt(&pdev->dev);
+ if (IS_ERR(sci))
+ return PTR_ERR(sci);
+ }
+
+ if (!sci) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1004,19 +1237,37 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sdd = spi_master_get_devdata(master);
+ sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
sdd->master = master;
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
- sdd->tx_dma.dmach = dmatx_res->start;
- sdd->tx_dma.direction = DMA_MEM_TO_DEV;
- sdd->rx_dma.dmach = dmarx_res->start;
- sdd->rx_dma.direction = DMA_DEV_TO_MEM;
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, "
+ "errno %d\n", ret);
+ goto err0;
+ }
+ sdd->port_id = ret;
+ } else {
+ sdd->port_id = pdev->id;
+ }
sdd->cur_bpw = 8;
- master->bus_num = pdev->id;
+ ret = s3c64xx_spi_get_dmares(sdd, true);
+ if (ret)
+ goto err0;
+
+ ret = s3c64xx_spi_get_dmares(sdd, false);
+ if (ret)
+ goto err0;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = sdd->port_id;
master->setup = s3c64xx_spi_setup;
+ master->cleanup = s3c64xx_spi_cleanup;
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->transfer_one_message = s3c64xx_spi_transfer_one_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
@@ -1025,21 +1276,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- if (request_mem_region(mem_res->start,
- resource_size(mem_res), pdev->name) == NULL) {
- dev_err(&pdev->dev, "Req mem region failed\n");
- ret = -ENXIO;
- goto err0;
- }
-
- sdd->regs = ioremap(mem_res->start, resource_size(mem_res));
+ sdd->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
if (sdd->regs == NULL) {
dev_err(&pdev->dev, "Unable to remap IO\n");
ret = -ENXIO;
goto err1;
}
- if (sci->cfg_gpio == NULL || sci->cfg_gpio(pdev)) {
+ if (!sci->cfg_gpio && pdev->dev.of_node) {
+ if (s3c64xx_spi_parse_dt_gpio(sdd))
+ return -EBUSY;
+ } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) {
dev_err(&pdev->dev, "Unable to config gpio\n");
ret = -EBUSY;
goto err2;
@@ -1075,7 +1322,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
}
/* Setup Deufult Mode */
- s3c64xx_spi_hwinit(sdd, pdev->id);
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
@@ -1100,7 +1347,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d "
"with %d Slaves attached\n",
- pdev->id, master->num_chipselect);
+ sdd->port_id, master->num_chipselect);
dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
mem_res->end, mem_res->start,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
@@ -1120,10 +1367,10 @@ err5:
err4:
clk_put(sdd->clk);
err3:
+ if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
err2:
- iounmap((void *) sdd->regs);
err1:
- release_mem_region(mem_res->start, resource_size(mem_res));
err0:
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1135,7 +1382,6 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
- struct resource *mem_res;
pm_runtime_disable(&pdev->dev);
@@ -1151,11 +1397,8 @@ static int s3c64xx_spi_remove(struct platform_device *pdev)
clk_disable(sdd->clk);
clk_put(sdd->clk);
- iounmap((void *) sdd->regs);
-
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem_res != NULL)
- release_mem_region(mem_res->start, resource_size(mem_res));
+ if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
platform_set_drvdata(pdev, NULL);
spi_master_put(master);
@@ -1175,6 +1418,9 @@ static int s3c64xx_spi_suspend(struct device *dev)
clk_disable(sdd->src_clk);
clk_disable(sdd->clk);
+ if (!sdd->cntrlr_info->cfg_gpio && dev->of_node)
+ s3c64xx_spi_dt_gpio_free(sdd);
+
sdd->cur_speed = 0; /* Output Clock is stopped */
return 0;
@@ -1182,18 +1428,20 @@ static int s3c64xx_spi_suspend(struct device *dev)
static int s3c64xx_spi_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
- sci->cfg_gpio(pdev);
+ if (!sci->cfg_gpio && dev->of_node)
+ s3c64xx_spi_parse_dt_gpio(sdd);
+ else
+ sci->cfg_gpio();
/* Enable the clock */
clk_enable(sdd->src_clk);
clk_enable(sdd->clk);
- s3c64xx_spi_hwinit(sdd, pdev->id);
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
spi_master_resume(master);
@@ -1231,13 +1479,89 @@ static const struct dev_pm_ops s3c64xx_spi_pm = {
s3c64xx_spi_runtime_resume, NULL)
};
+struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+};
+
+struct s3c64xx_spi_port_config s5p64x0_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+};
+
+struct s3c64xx_spi_port_config s5pc100_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+};
+
+struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+};
+
+static struct platform_device_id s3c64xx_spi_driver_ids[] = {
+ {
+ .name = "s3c2443-spi",
+ .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
+ }, {
+ .name = "s3c6410-spi",
+ .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
+ }, {
+ .name = "s5p64x0-spi",
+ .driver_data = (kernel_ulong_t)&s5p64x0_spi_port_config,
+ }, {
+ .name = "s5pc100-spi",
+ .driver_data = (kernel_ulong_t)&s5pc100_spi_port_config,
+ }, {
+ .name = "s5pv210-spi",
+ .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
+ }, {
+ .name = "exynos4210-spi",
+ .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
+ },
+ { },
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,exynos4210-spi",
+ .data = (void *)&exynos4_spi_port_config,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
+#endif /* CONFIG_OF */
+
static struct platform_driver s3c64xx_spi_driver = {
.driver = {
.name = "s3c64xx-spi",
.owner = THIS_MODULE,
.pm = &s3c64xx_spi_pm,
+ .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
},
.remove = s3c64xx_spi_remove,
+ .id_table = s3c64xx_spi_driver_ids,
};
MODULE_ALIAS("platform:s3c64xx-spi");
diff --git a/drivers/spi/spi-tegra.c b/drivers/spi/spi-tegra.c
index ae6d78a3e912..7f99ff3553a6 100644
--- a/drivers/spi/spi-tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -261,7 +261,7 @@ static void spi_tegra_start_transfer(struct spi_device *spi,
clk_set_rate(tspi->clk, speed);
if (tspi->cur_speed == 0)
- clk_enable(tspi->clk);
+ clk_prepare_enable(tspi->clk);
tspi->cur_speed = speed;
@@ -373,7 +373,7 @@ static void tegra_spi_rx_dma_complete(struct tegra_dma_req *req)
spi = m->state;
spi_tegra_start_message(spi, m);
} else {
- clk_disable(tspi->clk);
+ clk_disable_unprepare(tspi->clk);
tspi->cur_speed = 0;
}
}
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index f551e5376147..266aa1648a02 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -36,6 +36,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4328) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4329) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) },
+ { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl);
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index 266c7c5c86dc..ab4627cf1114 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -90,6 +90,8 @@ const char *ssb_core_name(u16 coreid)
return "ARM 1176";
case SSB_DEV_ARM_7TDMI:
return "ARM 7TDMI";
+ case SSB_DEV_ARM_CM3:
+ return "ARM Cortex M3";
}
return "UNKNOWN";
}
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index 2d1afecbbb60..92d3ea5eb44d 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -80,7 +80,7 @@
#define INLINE static __inline
#endif
-#include<linux/slab.h>
+#include <linux/slab.h>
#define S626_SIZE 0x0200
#define SIZEOF_ADDRESS_SPACE 0x0200
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 51665132c61b..87c3a07ed80e 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -88,13 +88,15 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
void *msg, int len))
{
struct sock *sock;
+ struct netlink_kernel_cfg cfg = {
+ .input = netlink_rcv,
+ };
#if !defined(DEFINE_MUTEX)
init_MUTEX(&netlink_mutex);
#endif
- sock = netlink_kernel_create(&init_net, unit, 0, netlink_rcv, NULL,
- THIS_MODULE);
+ sock = netlink_kernel_create(&init_net, unit, THIS_MODULE, &cfg);
if (sock)
rcv_cb = cb;
@@ -127,8 +129,12 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
seq++;
- nlh = NLMSG_PUT(skb, 0, seq, type, len);
- memcpy(NLMSG_DATA(nlh), msg, len);
+ nlh = nlmsg_put(skb, 0, seq, type, len, 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return -EMSGSIZE;
+ }
+ memcpy(nlmsg_data(nlh), msg, len);
NETLINK_CB(skb).pid = 0;
NETLINK_CB(skb).dst_group = 0;
@@ -144,7 +150,5 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
}
ret = 0;
}
-
-nlmsg_failure:
return ret;
}
diff --git a/drivers/staging/media/go7007/wis-i2c.h b/drivers/staging/media/go7007/wis-i2c.h
index 3c2b9be455df..6d09c06c8560 100644
--- a/drivers/staging/media/go7007/wis-i2c.h
+++ b/drivers/staging/media/go7007/wis-i2c.h
@@ -25,11 +25,6 @@
#define I2C_DRIVERID_WIS_TW2804 0xf0f6
#define I2C_DRIVERID_S2250 0xf0f7
-/* Flag to indicate that the client needs to be accessed with SCCB semantics */
-/* We re-use the I2C_M_TEN value so the flag passes through the masks in the
- * core I2C code. Major kludge, but the I2C layer ain't exactly flexible. */
-#define I2C_CLIENT_SCCB 0x10
-
/* Definitions for new video decoder commands */
struct video_decoder_resolution {
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 3c60088871e0..9356886f489b 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -675,7 +675,7 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
{
u32 val;
- clk_enable(nvec->i2c_clk);
+ clk_prepare_enable(nvec->i2c_clk);
tegra_periph_reset_assert(nvec->i2c_clk);
udelay(2);
@@ -695,14 +695,14 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
enable_irq(nvec->irq);
- clk_disable(nvec->i2c_clk);
+ clk_disable_unprepare(nvec->i2c_clk);
}
static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
{
disable_irq(nvec->irq);
writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
- clk_disable(nvec->i2c_clk);
+ clk_disable_unprepare(nvec->i2c_clk);
}
static void nvec_power_off(void)
@@ -812,7 +812,7 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
tegra_init_i2c_slave(nvec);
- clk_enable(i2c_clk);
+ clk_prepare_enable(i2c_clk);
/* enable event reporting */
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
index f238d574da0c..2092a9167d29 100644
--- a/drivers/staging/omapdrm/omap_drv.h
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -25,8 +25,8 @@
#include <linux/types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <linux/platform_data/omap_drm.h>
#include "omap_drm.h"
-#include "omap_priv.h"
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 61648d84fbb6..9fdcb561422f 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -9,7 +9,8 @@ target_core_mod-y := target_core_configfs.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
- target_core_cdb.o \
+ target_core_sbc.o \
+ target_core_spc.o \
target_core_ua.o \
target_core_rd.o \
target_core_stat.o
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d57d10cb2e47..97c0f78c3c9c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -429,19 +429,8 @@ int iscsit_reset_np_thread(
int iscsit_del_np_comm(struct iscsi_np *np)
{
- if (!np->np_socket)
- return 0;
-
- /*
- * Some network transports allocate their own struct sock->file,
- * see if we need to free any additional allocated resources.
- */
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(np->np_socket->file);
- np->np_socket->file = NULL;
- }
-
- sock_release(np->np_socket);
+ if (np->np_socket)
+ sock_release(np->np_socket);
return 0;
}
@@ -1413,8 +1402,10 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
spin_unlock_bh(&cmd->istate_lock);
iscsit_stop_dataout_timer(cmd);
- return (!ooo_cmdsn) ? transport_generic_handle_data(
- &cmd->se_cmd) : 0;
+ if (ooo_cmdsn)
+ return 0;
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
} else /* DATAOUT_CANNOT_RECOVER */
return -1;
@@ -2683,7 +2674,7 @@ static int iscsit_send_logout_response(
*/
logout_conn = iscsit_get_conn_from_cid_rcfr(sess,
cmd->logout_cid);
- if ((logout_conn)) {
+ if (logout_conn) {
iscsit_connection_reinstatement_rcfr(logout_conn);
iscsit_dec_conn_usage_count(logout_conn);
}
@@ -4077,13 +4068,8 @@ int iscsit_close_connection(
kfree(conn->conn_ops);
conn->conn_ops = NULL;
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
+ if (conn->sock)
sock_release(conn->sock);
- }
conn->thread_set = NULL;
pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 69dc8e35c03a..a7b25e783b58 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -47,28 +47,6 @@ struct lio_target_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
-struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
- struct config_item *item,
- struct iscsi_tiqn **tiqn_out)
-{
- struct se_portal_group *se_tpg = container_of(to_config_group(item),
- struct se_portal_group, tpg_group);
- struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
- int ret;
-
- if (!tpg) {
- pr_err("Unable to locate struct iscsi_portal_group "
- "pointer\n");
- return NULL;
- }
- ret = iscsit_get_tpg(tpg);
- if (ret < 0)
- return NULL;
-
- *tiqn_out = tpg->tpg_tiqn;
- return tpg;
-}
-
/* Start items for lio_target_portal_cit */
static ssize_t lio_target_np_show_sctp(
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 1c70144cdaf1..8a908b28d8b2 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
/* Used for struct iscsi_np->np_flags */
enum np_flags_table {
NPF_IP_NETWORK = 0x00,
- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
};
/* Used for struct iscsi_np->np_thread_state */
@@ -481,6 +480,7 @@ struct iscsi_tmr_req {
bool task_reassign:1;
u32 ref_cmd_sn;
u32 exp_data_sn;
+ struct iscsi_cmd *ref_cmd;
struct iscsi_conn_recovery *conn_recovery;
struct se_tmr_req *se_tmr_req;
};
@@ -503,7 +503,6 @@ struct iscsi_conn {
u16 local_port;
int net_size;
u32 auth_id;
-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
u32 conn_flags;
/* Used for iscsi_tx_login_rsp() */
u32 login_itt;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index ecdd46deedda..3df8a2cef86f 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -965,8 +965,8 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
if (cmd->immediate_data) {
if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) {
spin_unlock_bh(&cmd->istate_lock);
- return transport_generic_handle_data(
- &cmd->se_cmd);
+ target_execute_cmd(&cmd->se_cmd);
+ return 0;
}
spin_unlock_bh(&cmd->istate_lock);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index a3656c9903a1..0694d9b1bce6 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -518,7 +518,7 @@ int iscsi_login_post_auth_non_zero_tsih(
* initiator and release the new connection.
*/
conn_ptr = iscsit_get_conn_from_cid_rcfr(sess, cid);
- if ((conn_ptr)) {
+ if (conn_ptr) {
pr_err("Connection exists with CID %hu for %s,"
" performing connection reinstatement.\n",
conn_ptr->cid, sess->sess_ops->InitiatorName);
@@ -539,7 +539,7 @@ int iscsi_login_post_auth_non_zero_tsih(
if (sess->sess_ops->ErrorRecoveryLevel == 2) {
cr = iscsit_get_inactive_connection_recovery_entry(
sess, cid);
- if ((cr)) {
+ if (cr) {
pr_debug("Performing implicit logout"
" for connection recovery on CID: %hu\n",
conn->cid);
@@ -795,22 +795,6 @@ int iscsi_target_setup_login_socket(
}
np->np_socket = sock;
/*
- * The SCTP stack needs struct socket->file.
- */
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!sock->file) {
- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
- if (!sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- ret = -ENOMEM;
- goto fail;
- }
- np->np_flags |= NPF_SCTP_STRUCT_FILE;
- }
- }
- /*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket(
fail:
np->np_socket = NULL;
- if (sock) {
- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
- kfree(sock->file);
- sock->file = NULL;
- }
-
+ if (sock)
sock_release(sock);
- }
return ret;
}
static int __iscsi_target_login_thread(struct iscsi_np *np)
{
u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
- int err, ret = 0, set_sctp_conn_flag, stop;
+ int err, ret = 0, stop;
struct iscsi_conn *conn = NULL;
struct iscsi_login *login;
struct iscsi_portal_group *tpg = NULL;
@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
struct sockaddr_in6 sock_in6;
flush_signals(current);
- set_sctp_conn_flag = 0;
sock = np->np_socket;
spin_lock_bh(&np->np_thread_lock);
@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
spin_unlock_bh(&np->np_thread_lock);
goto out;
}
- /*
- * The SCTP stack needs struct socket->file.
- */
- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
- (np->np_network_transport == ISCSI_SCTP_UDP)) {
- if (!new_sock->file) {
- new_sock->file = kzalloc(
- sizeof(struct file), GFP_KERNEL);
- if (!new_sock->file) {
- pr_err("Unable to allocate struct"
- " file for SCTP\n");
- sock_release(new_sock);
- /* Get another socket */
- return 1;
- }
- set_sctp_conn_flag = 1;
- }
- }
-
iscsi_start_login_thread_timer(np);
conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
if (!conn) {
pr_err("Could not allocate memory for"
" new connection\n");
- if (set_sctp_conn_flag) {
- kfree(new_sock->file);
- new_sock->file = NULL;
- }
sock_release(new_sock);
/* Get another socket */
return 1;
@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
conn->conn_state = TARG_CONN_STATE_FREE;
conn->sock = new_sock;
- if (set_sctp_conn_flag)
- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
-
pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
conn->conn_state = TARG_CONN_STATE_XPT_UP;
@@ -1081,7 +1032,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto new_sess_out;
zero_tsih = (pdu->tsih == 0x0000);
- if ((zero_tsih)) {
+ if (zero_tsih) {
/*
* This is the leading connection of a new session.
* We wait until after authentication to check for
@@ -1205,13 +1156,8 @@ old_sess_out:
iscsi_release_param_list(conn->param_list);
conn->param_list = NULL;
}
- if (conn->sock) {
- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
- kfree(conn->sock->file);
- conn->sock->file = NULL;
- }
+ if (conn->sock)
sock_release(conn->sock);
- }
kfree(conn);
if (tpg) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index ed5241e7f12a..0c4760fabfc0 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -681,7 +681,7 @@ int iscsi_update_param_value(struct iscsi_param *param, char *value)
param->value = kzalloc(strlen(value) + 1, GFP_KERNEL);
if (!param->value) {
pr_err("Unable to allocate memory for value.\n");
- return -1;
+ return -ENOMEM;
}
memcpy(param->value, value, strlen(value));
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index f4e640b51fd1..f62fe123d902 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -19,6 +19,7 @@
******************************************************************************/
#include <asm/unaligned.h>
+#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -61,7 +62,7 @@ u8 iscsit_tmr_abort_task(
}
se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
+ tmr_req->ref_cmd = ref_cmd;
tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn;
@@ -121,7 +122,7 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
- int ret;
+ int ret, ref_lun;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",
@@ -155,9 +156,16 @@ u8 iscsit_tmr_task_reassign(
return ISCSI_TMF_RSP_REJECTED;
}
+ ref_lun = scsilun_to_int(&hdr->lun);
+ if (ref_lun != ref_cmd->se_cmd.orig_fe_lun) {
+ pr_err("Unable to perform connection recovery for"
+ " differing ref_lun: %d ref_cmd orig_fe_lun: %u\n",
+ ref_lun, ref_cmd->se_cmd.orig_fe_lun);
+ return ISCSI_TMF_RSP_REJECTED;
+ }
+
se_tmr->ref_task_tag = hdr->rtt;
- se_tmr->ref_cmd = &ref_cmd->se_cmd;
- se_tmr->ref_task_lun = get_unaligned_le64(&hdr->lun);
+ tmr_req->ref_cmd = ref_cmd;
tmr_req->ref_cmd_sn = hdr->refcmdsn;
tmr_req->exp_data_sn = hdr->exp_datasn;
tmr_req->conn_recovery = cr;
@@ -191,9 +199,7 @@ static int iscsit_task_reassign_complete_nop_out(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -251,7 +257,8 @@ static int iscsit_task_reassign_complete_write(
pr_debug("WRITE ITT: 0x%08x: t_state: %d"
" never sent to transport\n",
cmd->init_task_tag, cmd->se_cmd.t_state);
- return transport_generic_handle_data(se_cmd);
+ target_execute_cmd(se_cmd);
+ return 0;
}
cmd->i_state = ISTATE_SEND_STATUS;
@@ -360,9 +367,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_conn_recovery *cr;
if (!cmd->cr) {
@@ -385,7 +390,7 @@ static int iscsit_task_reassign_complete_scsi_cmnd(
list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
spin_unlock_bh(&conn->cmd_lock);
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
+ if (cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
cmd->i_state = ISTATE_SEND_STATUS;
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
return 0;
@@ -411,17 +416,14 @@ static int iscsit_task_reassign_complete(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd;
struct iscsi_cmd *cmd;
int ret = 0;
- if (!se_tmr->ref_cmd) {
+ if (!tmr_req->ref_cmd) {
pr_err("TMR Request is missing a RefCmd struct iscsi_cmd.\n");
return -1;
}
- se_cmd = se_tmr->ref_cmd;
- cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ cmd = tmr_req->ref_cmd;
cmd->conn = conn;
@@ -547,9 +549,7 @@ int iscsit_task_reassign_prepare_write(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *cmd = tmr_req->ref_cmd;
struct iscsi_pdu *pdu = NULL;
struct iscsi_r2t *r2t = NULL, *r2t_tmp;
int first_incomplete_r2t = 1, i = 0;
@@ -782,14 +782,12 @@ int iscsit_check_task_reassign_expdatasn(
struct iscsi_tmr_req *tmr_req,
struct iscsi_conn *conn)
{
- struct se_tmr_req *se_tmr = tmr_req->se_tmr_req;
- struct se_cmd *se_cmd = se_tmr->ref_cmd;
- struct iscsi_cmd *ref_cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+ struct iscsi_cmd *ref_cmd = tmr_req->ref_cmd;
if (ref_cmd->iscsi_opcode != ISCSI_OP_SCSI_CMD)
return 0;
- if (se_cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
+ if (ref_cmd->se_cmd.se_cmd_flags & SCF_SENT_CHECK_CONDITION)
return 0;
if (ref_cmd->data_direction == DMA_NONE)
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 879d8d0fa3fe..a38a3f8ab0d9 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -303,6 +303,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
{
struct iscsi_param *param;
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+ int ret;
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_ACTIVE) {
@@ -319,19 +320,19 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param) {
spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ return -EINVAL;
}
if (ISCSI_TPG_ATTRIB(tpg)->authentication) {
- if (!strcmp(param->value, NONE))
- if (iscsi_update_param_value(param, CHAP) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
- }
- if (iscsit_ta_authentication(tpg, 1) < 0) {
- spin_unlock(&tpg->tpg_state_lock);
- return -ENOMEM;
+ if (!strcmp(param->value, NONE)) {
+ ret = iscsi_update_param_value(param, CHAP);
+ if (ret)
+ goto err;
}
+
+ ret = iscsit_ta_authentication(tpg, 1);
+ if (ret < 0)
+ goto err;
}
tpg->tpg_state = TPG_STATE_ACTIVE;
@@ -344,6 +345,10 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg)
spin_unlock(&tiqn->tiqn_tpg_lock);
return 0;
+
+err:
+ spin_unlock(&tpg->tpg_state_lock);
+ return ret;
}
int iscsit_tpg_disable_portal_group(struct iscsi_portal_group *tpg, int force)
@@ -558,7 +563,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
if ((authentication != 1) && (authentication != 0)) {
pr_err("Illegal value for authentication parameter:"
" %u, ignoring request.\n", authentication);
- return -1;
+ return -EINVAL;
}
memset(buf1, 0, sizeof(buf1));
@@ -593,7 +598,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
} else {
snprintf(buf1, sizeof(buf1), "%s", param->value);
none = strstr(buf1, NONE);
- if ((none))
+ if (none)
goto out;
strncat(buf1, ",", strlen(","));
strncat(buf1, NONE, strlen(NONE));
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 38dfac2b0a1c..5491c632a15e 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -211,12 +211,11 @@ static void tcm_loop_submission_work(struct work_struct *work)
/*
* Because some userspace code via scsi-generic do not memset their
* associated read buffers, go ahead and do that here for type
- * SCF_SCSI_CONTROL_SG_IO_CDB. Also note that this is currently
- * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
- * by target core in target_setup_cmd_from_cdb() ->
- * transport_generic_cmd_sequencer().
+ * non-data CDBs. Also note that this is currently guaranteed to be a
+ * single SGL for this case by target core in
+ * target_setup_cmd_from_cdb() -> transport_generic_cmd_sequencer().
*/
- if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
+ if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
se_cmd->data_direction == DMA_FROM_DEVICE) {
struct scatterlist *sg = scsi_sglist(sc);
unsigned char *buf = kmap(sg_page(sg)) + sg->offset;
@@ -779,7 +778,7 @@ static int tcm_loop_write_pending(struct se_cmd *se_cmd)
* We now tell TCM to add this WRITE CDB directly into the TCM storage
* object execution queue.
*/
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
return 0;
}
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 7e6136e2ce81..39ddba584b30 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -1219,28 +1219,14 @@ static void sbp_handle_command(struct sbp_target_request *req)
ret = sbp_fetch_command(req);
if (ret) {
pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
- req->status.status |= cpu_to_be32(
- STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
- STATUS_BLOCK_DEAD(0) |
- STATUS_BLOCK_LEN(1) |
- STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
- sbp_send_status(req);
- sbp_free_request(req);
- return;
+ goto err;
}
ret = sbp_fetch_page_table(req);
if (ret) {
pr_debug("sbp_handle_command: fetch page table failed: %d\n",
ret);
- req->status.status |= cpu_to_be32(
- STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
- STATUS_BLOCK_DEAD(0) |
- STATUS_BLOCK_LEN(1) |
- STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
- sbp_send_status(req);
- sbp_free_request(req);
- return;
+ goto err;
}
unpacked_lun = req->login->lun->unpacked_lun;
@@ -1249,9 +1235,21 @@ static void sbp_handle_command(struct sbp_target_request *req)
pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
req->orb_pointer, unpacked_lun, data_length, data_dir);
- target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
- req->sense_buf, unpacked_lun, data_length,
- MSG_SIMPLE_TAG, data_dir, 0);
+ if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
+ req->sense_buf, unpacked_lun, data_length,
+ MSG_SIMPLE_TAG, data_dir, 0))
+ goto err;
+
+ return;
+
+err:
+ req->status.status |= cpu_to_be32(
+ STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
+ STATUS_BLOCK_DEAD(0) |
+ STATUS_BLOCK_LEN(1) |
+ STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
+ sbp_send_status(req);
+ sbp_free_request(req);
}
/*
@@ -1784,8 +1782,7 @@ static int sbp_write_pending(struct se_cmd *se_cmd)
return ret;
}
- transport_generic_process_write(se_cmd);
-
+ target_execute_cmd(se_cmd);
return 0;
}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 5ad972856a8d..cf2c66f3c116 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -300,8 +300,8 @@ int core_free_device_list_for_node(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
@@ -342,72 +342,46 @@ void core_update_device_list_access(
spin_unlock_irq(&nacl->device_list_lock);
}
-/* core_update_device_list_for_node():
+/* core_enable_device_list_for_node():
*
*
*/
-int core_update_device_list_for_node(
+int core_enable_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
- struct se_portal_group *tpg,
- int enable)
+ struct se_portal_group *tpg)
{
struct se_port *port = lun->lun_sep;
- struct se_dev_entry *deve = nacl->device_list[mapped_lun];
- int trans = 0;
- /*
- * If the MappedLUN entry is being disabled, the entry in
- * port->sep_alua_list must be removed now before clearing the
- * struct se_dev_entry pointers below as logic in
- * core_alua_do_transition_tg_pt() depends on these being present.
- */
- if (!enable) {
- /*
- * deve->se_lun_acl will be NULL for demo-mode created LUNs
- * that have not been explicitly concerted to MappedLUNs ->
- * struct se_lun_acl, but we remove deve->alua_port_list from
- * port->sep_alua_list. This also means that active UAs and
- * NodeACL context specific PR metadata for demo-mode
- * MappedLUN *deve will be released below..
- */
- spin_lock_bh(&port->sep_alua_lock);
- list_del(&deve->alua_port_list);
- spin_unlock_bh(&port->sep_alua_lock);
- }
+ struct se_dev_entry *deve;
spin_lock_irq(&nacl->device_list_lock);
- if (enable) {
- /*
- * Check if the call is handling demo mode -> explict LUN ACL
- * transition. This transition must be for the same struct se_lun
- * + mapped_lun that was setup in demo mode..
- */
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (deve->se_lun_acl != NULL) {
- pr_err("struct se_dev_entry->se_lun_acl"
- " already set for demo mode -> explict"
- " LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- if (deve->se_lun != lun) {
- pr_err("struct se_dev_entry->se_lun does"
- " match passed struct se_lun for demo mode"
- " -> explict LUN ACL transition\n");
- spin_unlock_irq(&nacl->device_list_lock);
- return -EINVAL;
- }
- deve->se_lun_acl = lun_acl;
- trans = 1;
- } else {
- deve->se_lun = lun;
- deve->se_lun_acl = lun_acl;
- deve->mapped_lun = mapped_lun;
- deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
+
+ deve = nacl->device_list[mapped_lun];
+
+ /*
+ * Check if the call is handling demo mode -> explict LUN ACL
+ * transition. This transition must be for the same struct se_lun
+ * + mapped_lun that was setup in demo mode..
+ */
+ if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ if (deve->se_lun_acl != NULL) {
+ pr_err("struct se_dev_entry->se_lun_acl"
+ " already set for demo mode -> explict"
+ " LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
}
+ if (deve->se_lun != lun) {
+ pr_err("struct se_dev_entry->se_lun does"
+ " match passed struct se_lun for demo mode"
+ " -> explict LUN ACL transition\n");
+ spin_unlock_irq(&nacl->device_list_lock);
+ return -EINVAL;
+ }
+ deve->se_lun_acl = lun_acl;
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
@@ -417,27 +391,72 @@ int core_update_device_list_for_node(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
- if (trans) {
- spin_unlock_irq(&nacl->device_list_lock);
- return 0;
- }
- deve->creation_time = get_jiffies_64();
- deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock);
+ return 0;
+ }
- spin_lock_bh(&port->sep_alua_lock);
- list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
- spin_unlock_bh(&port->sep_alua_lock);
+ deve->se_lun = lun;
+ deve->se_lun_acl = lun_acl;
+ deve->mapped_lun = mapped_lun;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
- return 0;
+ if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
+ } else {
+ deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
+ deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
+
+ deve->creation_time = get_jiffies_64();
+ deve->attach_count++;
+ spin_unlock_irq(&nacl->device_list_lock);
+
+ spin_lock_bh(&port->sep_alua_lock);
+ list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
+ spin_unlock_bh(&port->sep_alua_lock);
+
+ return 0;
+}
+
+/* core_disable_device_list_for_node():
+ *
+ *
+ */
+int core_disable_device_list_for_node(
+ struct se_lun *lun,
+ struct se_lun_acl *lun_acl,
+ u32 mapped_lun,
+ u32 lun_access,
+ struct se_node_acl *nacl,
+ struct se_portal_group *tpg)
+{
+ struct se_port *port = lun->lun_sep;
+ struct se_dev_entry *deve = nacl->device_list[mapped_lun];
+
+ /*
+ * If the MappedLUN entry is being disabled, the entry in
+ * port->sep_alua_list must be removed now before clearing the
+ * struct se_dev_entry pointers below as logic in
+ * core_alua_do_transition_tg_pt() depends on these being present.
+ *
+ * deve->se_lun_acl will be NULL for demo-mode created LUNs
+ * that have not been explicitly converted to MappedLUNs ->
+ * struct se_lun_acl, but we remove deve->alua_port_list from
+ * port->sep_alua_list. This also means that active UAs and
+ * NodeACL context specific PR metadata for demo-mode
+ * MappedLUN *deve will be released below..
+ */
+ spin_lock_bh(&port->sep_alua_lock);
+ list_del(&deve->alua_port_list);
+ spin_unlock_bh(&port->sep_alua_lock);
/*
* Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
* PR operation to complete.
*/
- spin_unlock_irq(&nacl->device_list_lock);
while (atomic_read(&deve->pr_ref_count) != 0)
cpu_relax();
+
spin_lock_irq(&nacl->device_list_lock);
/*
* Disable struct se_dev_entry LUN ACL mapping
@@ -475,9 +494,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
continue;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL,
+ core_disable_device_list_for_node(lun, NULL,
deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
- nacl, tpg, 0);
+ nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
@@ -715,7 +734,7 @@ void se_release_device_for_hba(struct se_device *dev)
se_dev_stop(dev);
if (dev->dev_ptr) {
- kthread_stop(dev->process_thread);
+ destroy_workqueue(dev->tmr_wq);
if (dev->transport->free_device)
dev->transport->free_device(dev->dev_ptr);
}
@@ -822,7 +841,7 @@ int se_dev_check_shutdown(struct se_device *dev)
return ret;
}
-u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
{
u32 tmp, aligned_max_sectors;
/*
@@ -1273,7 +1292,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
- struct se_hba *hba,
struct se_device *dev,
u32 lun)
{
@@ -1298,7 +1316,7 @@ struct se_lun *core_dev_add_lun(
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
- tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
+ tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
@@ -1470,8 +1488,8 @@ int core_dev_add_initiator_node_lun_acl(
lacl->se_lun = lun;
- if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
- lun_access, nacl, tpg, 1) < 0)
+ if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
+ lun_access, nacl, tpg) < 0)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@@ -1514,8 +1532,8 @@ int core_dev_del_initiator_node_lun_acl(
smp_mb__after_atomic_dec();
spin_unlock(&lun->lun_acl_lock);
- core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
lacl->se_lun = NULL;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 405cc98eaed6..ea479e54f5fd 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -764,8 +764,7 @@ static int target_fabric_port_link(
goto out;
}
- lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
- lun->unpacked_lun);
+ lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun);
if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
ret = PTR_ERR(lun_p);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 9f99d0404908..9e2100551c78 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -331,7 +331,7 @@ static int fd_do_writev(struct se_cmd *cmd, struct scatterlist *sgl,
return 1;
}
-static void fd_emulate_sync_cache(struct se_cmd *cmd)
+static int fd_execute_sync_cache(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
@@ -365,7 +365,7 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (immed)
- return;
+ return 0;
if (ret) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -373,11 +373,15 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
} else {
target_complete_cmd(cmd, SAM_STAT_GOOD);
}
+
+ return 0;
}
-static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int fd_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
int ret = 0;
@@ -550,6 +554,16 @@ static sector_t fd_get_blocks(struct se_device *dev)
return div_u64(dev_size, dev->se_sub_dev->se_dev_attrib.block_size);
}
+static struct spc_ops fd_spc_ops = {
+ .execute_rw = fd_execute_rw,
+ .execute_sync_cache = fd_execute_sync_cache,
+};
+
+static int fd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &fd_spc_ops);
+}
+
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
@@ -561,8 +575,7 @@ static struct se_subsystem_api fileio_template = {
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
- .execute_cmd = fd_execute_cmd,
- .do_sync_cache = fd_emulate_sync_cache,
+ .parse_cdb = fd_parse_cdb,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index fd47950727b4..76db75e836ed 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -40,6 +40,7 @@
#include <linux/module.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
+#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
@@ -96,6 +97,7 @@ static struct se_device *iblock_create_virtdevice(
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
+ fmode_t mode;
int ret = -EINVAL;
if (!ib_dev) {
@@ -117,8 +119,11 @@ static struct se_device *iblock_create_virtdevice(
pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
- bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
- FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
+ mode = FMODE_READ|FMODE_EXCL;
+ if (!ib_dev->ibd_readonly)
+ mode |= FMODE_WRITE;
+
+ bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
ret = PTR_ERR(bd);
goto failed;
@@ -292,7 +297,7 @@ static void iblock_end_io_flush(struct bio *bio, int err)
* Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
* always flush the whole cache.
*/
-static void iblock_emulate_sync_cache(struct se_cmd *cmd)
+static int iblock_execute_sync_cache(struct se_cmd *cmd)
{
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (cmd->t_task_cdb[1] & 0x2);
@@ -311,23 +316,98 @@ static void iblock_emulate_sync_cache(struct se_cmd *cmd)
if (!immed)
bio->bi_private = cmd;
submit_bio(WRITE_FLUSH, bio);
+ return 0;
}
-static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
+static int iblock_execute_unmap(struct se_cmd *cmd)
{
+ struct se_device *dev = cmd->se_dev;
struct iblock_dev *ibd = dev->dev_ptr;
- struct block_device *bd = ibd->ibd_bd;
- int barrier = 0;
+ unsigned char *buf, *ptr = NULL;
+ sector_t lba;
+ int size = cmd->data_length;
+ u32 range;
+ int ret = 0;
+ int dl, bd_dl;
+
+ buf = transport_kmap_data_sg(cmd);
+
+ dl = get_unaligned_be16(&buf[0]);
+ bd_dl = get_unaligned_be16(&buf[2]);
+
+ size = min(size - 8, bd_dl);
+ if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* First UNMAP block descriptor starts at 8 byte offset */
+ ptr = &buf[8];
+ pr_debug("UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u"
+ " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
+
+ while (size >= 16) {
+ lba = get_unaligned_be64(&ptr[0]);
+ range = get_unaligned_be32(&ptr[8]);
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
+ (unsigned long long)lba, range);
+
+ if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (lba + range > dev->transport->get_blocks(dev) + 1) {
+ cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
+ ret = -EINVAL;
+ goto err;
+ }
- return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
+ ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+ GFP_KERNEL, 0);
+ if (ret < 0) {
+ pr_err("blkdev_issue_discard() failed: %d\n",
+ ret);
+ goto err;
+ }
+
+ ptr += 16;
+ size -= 16;
+ }
+
+err:
+ transport_kunmap_data_sg(cmd);
+ if (!ret)
+ target_complete_cmd(cmd, GOOD);
+ return ret;
+}
+
+static int iblock_execute_write_same(struct se_cmd *cmd)
+{
+ struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
+ int ret;
+
+ ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
+ spc_get_write_same_sectors(cmd), GFP_KERNEL,
+ 0);
+ if (ret < 0) {
+ pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+ return ret;
+ }
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
enum {
- Opt_udev_path, Opt_force, Opt_err
+ Opt_udev_path, Opt_readonly, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
+ {Opt_readonly, "readonly=%d"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
@@ -340,6 +420,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
+ unsigned long tmp_readonly;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -372,6 +453,22 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
+ case Opt_readonly:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = strict_strtoul(arg_p, 0, &tmp_readonly);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed for"
+ " readonly=\n");
+ goto out;
+ }
+ ib_dev->ibd_readonly = tmp_readonly;
+ pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
+ break;
case Opt_force:
break;
default:
@@ -411,11 +508,10 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
- if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
- bl += sprintf(b + bl, " UDEV PATH: %s\n",
+ if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+ bl += sprintf(b + bl, " UDEV PATH: %s",
ibd->ibd_udev_path);
- } else
- bl += sprintf(b + bl, "\n");
+ bl += sprintf(b + bl, " readonly: %d\n", ibd->ibd_readonly);
bl += sprintf(b + bl, " ");
if (bd) {
@@ -493,9 +589,11 @@ static void iblock_submit_bios(struct bio_list *list, int rw)
blk_finish_plug(&plug);
}
-static int iblock_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int iblock_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *dev = cmd->se_dev;
struct iblock_req *ibr;
struct bio *bio;
@@ -642,6 +740,18 @@ static void iblock_bio_done(struct bio *bio, int err)
iblock_complete_cmd(cmd);
}
+static struct spc_ops iblock_spc_ops = {
+ .execute_rw = iblock_execute_rw,
+ .execute_sync_cache = iblock_execute_sync_cache,
+ .execute_write_same = iblock_execute_write_same,
+ .execute_unmap = iblock_execute_unmap,
+};
+
+static int iblock_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &iblock_spc_ops);
+}
+
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
@@ -653,9 +763,7 @@ static struct se_subsystem_api iblock_template = {
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
- .execute_cmd = iblock_execute_cmd,
- .do_discard = iblock_do_discard,
- .do_sync_cache = iblock_emulate_sync_cache,
+ .parse_cdb = iblock_parse_cdb,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 66cf7b9e205e..533627ae79ec 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -18,6 +18,7 @@ struct iblock_dev {
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
+ bool ibd_readonly;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 165e82429687..0fd428225d11 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -4,25 +4,16 @@
/* target_core_alua.c */
extern struct t10_alua_lu_gp *default_lu_gp;
-/* target_core_cdb.c */
-int target_emulate_inquiry(struct se_cmd *cmd);
-int target_emulate_readcapacity(struct se_cmd *cmd);
-int target_emulate_readcapacity_16(struct se_cmd *cmd);
-int target_emulate_modesense(struct se_cmd *cmd);
-int target_emulate_request_sense(struct se_cmd *cmd);
-int target_emulate_unmap(struct se_cmd *cmd);
-int target_emulate_write_same(struct se_cmd *cmd);
-int target_emulate_synchronize_cache(struct se_cmd *cmd);
-int target_emulate_noop(struct se_cmd *cmd);
-
/* target_core_device.c */
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
-int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
- u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
+int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
@@ -56,8 +47,7 @@ int se_dev_set_max_sectors(struct se_device *, u32);
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
-struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
- struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
int core_dev_del_lun(struct se_portal_group *, u32);
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
@@ -104,7 +94,6 @@ void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
void transport_cmd_finish_abort(struct se_cmd *, int);
-void __target_remove_from_execute_list(struct se_cmd *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
@@ -116,6 +105,7 @@ int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
int transport_clear_lun_from_sessions(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
+int target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
/* target_core_stat.c */
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 85564998500a..1e946502c378 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -507,7 +507,7 @@ static int core_scsi3_pr_seq_non_holder(
* Check if write exclusive initiator ports *NOT* holding the
* WRITE_EXCLUSIVE_* reservation.
*/
- if ((we) && !(registered_nexus)) {
+ if (we && !registered_nexus) {
if (cmd->data_direction == DMA_TO_DEVICE) {
/*
* Conflict for write exclusive
@@ -2031,7 +2031,7 @@ static int __core_scsi3_write_aptpl_to_file(
if (IS_ERR(file) || !file || !file->f_dentry) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
+ return IS_ERR(file) ? PTR_ERR(file) : -ENOENT;
}
iov[0].iov_base = &buf[0];
@@ -2486,7 +2486,7 @@ static int core_scsi3_pro_reserve(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if ((pr_res_holder)) {
+ if (pr_res_holder) {
/*
* From spc4r17 Section 5.7.9: Reserving:
*
@@ -3818,7 +3818,7 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- ret = EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -3828,7 +3828,8 @@ int target_scsi3_emulate_pr_out(struct se_cmd *cmd)
*/
if (!cmd->se_sess) {
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (cmd->data_length < 24) {
@@ -4029,7 +4030,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder;
- if ((pr_reg)) {
+ if (pr_reg) {
/*
* Set the hardcoded Additional Length
*/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 4ce2cf642fce..6e32ff6f2fa0 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -35,8 +35,10 @@
#include <linux/spinlock.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
-#include <linux/file.h>
+#include <linux/ratelimit.h>
#include <linux/module.h>
+#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -46,12 +48,14 @@
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
+#include "target_core_alua.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static struct se_subsystem_api pscsi_template;
+static int pscsi_execute_cmd(struct se_cmd *cmd);
static void pscsi_req_done(struct request *, int);
/* pscsi_attach_hba():
@@ -1019,9 +1023,79 @@ fail:
return -ENOMEM;
}
-static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+/*
+ * Clear a lun set in the cdb if the initiator talking to use spoke
+ * and old standards version, as we can't assume the underlying device
+ * won't choke up on it.
+ */
+static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
+{
+ switch (cdb[0]) {
+ case READ_10: /* SBC - RDProtect */
+ case READ_12: /* SBC - RDProtect */
+ case READ_16: /* SBC - RDProtect */
+ case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+ case VERIFY: /* SBC - VRProtect */
+ case VERIFY_16: /* SBC - VRProtect */
+ case WRITE_VERIFY: /* SBC - VRProtect */
+ case WRITE_VERIFY_12: /* SBC - VRProtect */
+ case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+ break;
+ default:
+ cdb[1] &= 0x1f; /* clear logical unit number */
+ break;
+ }
+}
+
+static int pscsi_parse_cdb(struct se_cmd *cmd)
+{
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned int dummy_size;
+ int ret;
+
+ if (cmd->se_cmd_flags & SCF_BIDI) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -EINVAL;
+ }
+
+ pscsi_clear_cdb_lun(cdb);
+
+ /*
+ * For REPORT LUNS we always need to emulate the response, for everything
+ * else the default for pSCSI is to pass the command to the underlying
+ * LLD / physical hardware.
+ */
+ switch (cdb[0]) {
+ case REPORT_LUNS:
+ ret = spc_parse_cdb(cmd, &dummy_size);
+ if (ret)
+ return ret;
+ break;
+ case READ_6:
+ case READ_10:
+ case READ_12:
+ case READ_16:
+ case WRITE_6:
+ case WRITE_10:
+ case WRITE_12:
+ case WRITE_16:
+ case WRITE_VERIFY:
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ /* FALLTHROUGH*/
+ default:
+ cmd->execute_cmd = pscsi_execute_cmd;
+ break;
+ }
+
+ return 0;
+}
+
+static int pscsi_execute_cmd(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt;
struct request *req;
@@ -1042,7 +1116,7 @@ static int pscsi_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
+ if (!sgl) {
req = blk_get_request(pdv->pdv_sd->request_queue,
(data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
@@ -1188,7 +1262,7 @@ static struct se_subsystem_api pscsi_template = {
.create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
- .execute_cmd = pscsi_execute_cmd,
+ .parse_cdb = pscsi_parse_cdb,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index d0ceb873c0e5..d00bbe33ff8b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -284,9 +284,11 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return NULL;
}
-static int rd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
- u32 sgl_nents, enum dma_data_direction data_direction)
+static int rd_execute_rw(struct se_cmd *cmd)
{
+ struct scatterlist *sgl = cmd->t_data_sg;
+ u32 sgl_nents = cmd->t_data_nents;
+ enum dma_data_direction data_direction = cmd->data_direction;
struct se_device *se_dev = cmd->se_dev;
struct rd_dev *dev = se_dev->dev_ptr;
struct rd_dev_sg_table *table;
@@ -460,6 +462,15 @@ static sector_t rd_get_blocks(struct se_device *dev)
return blocks_long;
}
+static struct spc_ops rd_spc_ops = {
+ .execute_rw = rd_execute_rw,
+};
+
+static int rd_parse_cdb(struct se_cmd *cmd)
+{
+ return sbc_parse_cdb(cmd, &rd_spc_ops);
+}
+
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -468,7 +479,7 @@ static struct se_subsystem_api rd_mcp_template = {
.allocate_virtdevice = rd_allocate_virtdevice,
.create_virtdevice = rd_create_virtdevice,
.free_device = rd_free_device,
- .execute_cmd = rd_execute_cmd,
+ .parse_cdb = rd_parse_cdb,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
new file mode 100644
index 000000000000..a9dd9469e3bd
--- /dev/null
+++ b/drivers/target/target_core_sbc.c
@@ -0,0 +1,581 @@
+/*
+ * SCSI Block Commands (SBC) parsing and emulation.
+ *
+ * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
+ * Copyright (c) 2005, 2006, 2007 SBE, Inc.
+ * Copyright (c) 2007-2010 Rising Tide Systems
+ * Copyright (c) 2008-2010 Linux-iSCSI.org
+ *
+ * Nicholas A. Bellinger <nab@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ratelimit.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+
+#include <target/target_core_base.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
+#include "target_core_ua.h"
+
+
+static int sbc_emulate_readcapacity(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned long long blocks_long = dev->transport->get_blocks(dev);
+ u32 blocks;
+
+ if (blocks_long >= 0x00000000ffffffff)
+ blocks = 0xffffffff;
+ else
+ blocks = (u32)blocks_long;
+
+ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 24) & 0xff;
+ buf[1] = (blocks >> 16) & 0xff;
+ buf[2] = (blocks >> 8) & 0xff;
+ buf[3] = blocks & 0xff;
+ buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static int sbc_emulate_readcapacity_16(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned long long blocks = dev->transport->get_blocks(dev);
+
+ buf = transport_kmap_data_sg(cmd);
+
+ buf[0] = (blocks >> 56) & 0xff;
+ buf[1] = (blocks >> 48) & 0xff;
+ buf[2] = (blocks >> 40) & 0xff;
+ buf[3] = (blocks >> 32) & 0xff;
+ buf[4] = (blocks >> 24) & 0xff;
+ buf[5] = (blocks >> 16) & 0xff;
+ buf[6] = (blocks >> 8) & 0xff;
+ buf[7] = blocks & 0xff;
+ buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
+ /*
+ * Set Thin Provisioning Enable bit following sbc3r22 in section
+ * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
+ buf[14] = 0x80;
+
+ transport_kunmap_data_sg(cmd);
+
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+int spc_get_write_same_sectors(struct se_cmd *cmd)
+{
+ u32 num_blocks;
+
+ if (cmd->t_task_cdb[0] == WRITE_SAME)
+ num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
+ else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
+ else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+
+ /*
+ * Use the explicit range when non zero is supplied, otherwise calculate
+ * the remaining range based on ->get_blocks() - starting LBA.
+ */
+ if (num_blocks)
+ return num_blocks;
+
+ return cmd->se_dev->transport->get_blocks(cmd->se_dev) -
+ cmd->t_task_lba + 1;
+}
+EXPORT_SYMBOL(spc_get_write_same_sectors);
+
+static int sbc_emulate_verify(struct se_cmd *cmd)
+{
+ target_complete_cmd(cmd, GOOD);
+ return 0;
+}
+
+static inline u32 sbc_get_size(struct se_cmd *cmd, u32 sectors)
+{
+ return cmd->se_dev->se_sub_dev->se_dev_attrib.block_size * sectors;
+}
+
+static int sbc_check_valid_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ unsigned long long end_lba;
+ u32 sectors;
+
+ sectors = cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size;
+ end_lba = dev->transport->get_blocks(dev) + 1;
+
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("target: lba %llu, sectors %u exceeds end lba %llu\n",
+ cmd->t_task_lba, sectors, end_lba);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline u32 transport_get_sectors_6(unsigned char *cdb)
+{
+ /*
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
+ */
+ return cdb[4] ? : 256;
+}
+
+static inline u32 transport_get_sectors_10(unsigned char *cdb)
+{
+ return (u32)(cdb[7] << 8) + cdb[8];
+}
+
+static inline u32 transport_get_sectors_12(unsigned char *cdb)
+{
+ return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
+}
+
+static inline u32 transport_get_sectors_16(unsigned char *cdb)
+{
+ return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
+ (cdb[12] << 8) + cdb[13];
+}
+
+/*
+ * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
+ */
+static inline u32 transport_get_sectors_32(unsigned char *cdb)
+{
+ return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
+ (cdb[30] << 8) + cdb[31];
+
+}
+
+static inline u32 transport_lba_21(unsigned char *cdb)
+{
+ return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
+}
+
+static inline u32 transport_lba_32(unsigned char *cdb)
+{
+ return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+}
+
+static inline unsigned long long transport_lba_64(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
+ __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+/*
+ * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
+ */
+static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
+{
+ unsigned int __v1, __v2;
+
+ __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
+ __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
+
+ return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
+}
+
+static int sbc_write_same_supported(struct se_device *dev,
+ unsigned char *flags)
+{
+ if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
+ pr_err("WRITE_SAME PBDATA and LBDATA"
+ " bits not supported for Block Discard"
+ " Emulation\n");
+ return -ENOSYS;
+ }
+
+ /*
+ * Currently for the emulated case we only accept
+ * tpws with the UNMAP=1 bit set.
+ */
+ if (!(flags[0] & 0x08)) {
+ pr_err("WRITE_SAME w/o UNMAP bit not"
+ " supported for Block Discard Emulation\n");
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static void xdreadwrite_callback(struct se_cmd *cmd)
+{
+ unsigned char *buf, *addr;
+ struct scatterlist *sg;
+ unsigned int offset;
+ int i;
+ int count;
+ /*
+ * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
+ *
+ * 1) read the specified logical block(s);
+ * 2) transfer logical blocks from the data-out buffer;
+ * 3) XOR the logical blocks transferred from the data-out buffer with
+ * the logical blocks read, storing the resulting XOR data in a buffer;
+ * 4) if the DISABLE WRITE bit is set to zero, then write the logical
+ * blocks transferred from the data-out buffer; and
+ * 5) transfer the resulting XOR data to the data-in buffer.
+ */
+ buf = kmalloc(cmd->data_length, GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
+ return;
+ }
+ /*
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
+ * into the locally allocated *buf
+ */
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
+ /*
+ * Now perform the XOR against the BIDI read memory located at
+ * cmd->t_mem_bidi_list
+ */
+
+ offset = 0;
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg));
+ if (!addr)
+ goto out;
+
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
+
+ offset += sg->length;
+ kunmap_atomic(addr);
+ }
+
+out:
+ kfree(buf);
+}
+
+int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops)
+{
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned int size;
+ u32 sectors = 0;
+ int ret;
+
+ switch (cdb[0]) {
+ case READ_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_10:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case READ_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_6:
+ sectors = transport_get_sectors_6(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_10:
+ case WRITE_VERIFY:
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_12:
+ sectors = transport_get_sectors_12(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case WRITE_16:
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+ cmd->execute_cmd = ops->execute_rw;
+ break;
+ case XDWRITEREAD_10:
+ if ((cmd->data_direction != DMA_TO_DEVICE) ||
+ !(cmd->se_cmd_flags & SCF_BIDI))
+ goto out_invalid_cdb_field;
+ sectors = transport_get_sectors_10(cdb);
+
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run after I/O completion.
+ */
+ cmd->execute_cmd = ops->execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case VARIABLE_LENGTH_CMD:
+ {
+ u16 service_action = get_unaligned_be16(&cdb[8]);
+ switch (service_action) {
+ case XDWRITEREAD_32:
+ sectors = transport_get_sectors_32(cdb);
+
+ /*
+ * Use WRITE_32 and READ_32 opcodes for the emulated
+ * XDWRITE_READ_32 logic.
+ */
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+
+ /*
+ * Setup BIDI XOR callback to be run during after I/O
+ * completion.
+ */
+ cmd->execute_cmd = ops->execute_rw;
+ cmd->transport_complete_callback = &xdreadwrite_callback;
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
+ break;
+ case WRITE_SAME_32:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_32(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ if (sbc_write_same_supported(dev, &cdb[10]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ default:
+ pr_err("VARIABLE_LENGTH_CMD service action"
+ " 0x%04x not supported\n", service_action);
+ goto out_unsupported_cdb;
+ }
+ break;
+ }
+ case READ_CAPACITY:
+ size = READ_CAP_LEN;
+ cmd->execute_cmd = sbc_emulate_readcapacity;
+ break;
+ case SERVICE_ACTION_IN:
+ switch (cmd->t_task_cdb[1] & 0x1f) {
+ case SAI_READ_CAPACITY_16:
+ cmd->execute_cmd = sbc_emulate_readcapacity_16;
+ break;
+ default:
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
+ goto out_invalid_cdb_field;
+ }
+ size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case SYNCHRONIZE_CACHE:
+ case SYNCHRONIZE_CACHE_16:
+ if (!ops->execute_sync_cache)
+ goto out_unsupported_cdb;
+
+ /*
+ * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
+ */
+ if (cdb[0] == SYNCHRONIZE_CACHE) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
+
+ size = sbc_get_size(cmd, sectors);
+
+ /*
+ * Check to ensure that LBA + Range does not exceed past end of
+ * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
+ */
+ if (cmd->t_task_lba || sectors) {
+ if (sbc_check_valid_sectors(cmd) < 0)
+ goto out_invalid_cdb_field;
+ }
+ cmd->execute_cmd = ops->execute_sync_cache;
+ break;
+ case UNMAP:
+ if (!ops->execute_unmap)
+ goto out_unsupported_cdb;
+
+ size = get_unaligned_be16(&cdb[7]);
+ cmd->execute_cmd = ops->execute_unmap;
+ break;
+ case WRITE_SAME_16:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_16(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
+
+ if (sbc_write_same_supported(dev, &cdb[1]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ case WRITE_SAME:
+ if (!ops->execute_write_same)
+ goto out_unsupported_cdb;
+
+ sectors = transport_get_sectors_10(cdb);
+ if (!sectors) {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, 1);
+ cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
+
+ /*
+ * Follow sbcr26 with WRITE_SAME (10) and check for the existence
+ * of byte 1 bit 3 UNMAP instead of original reserved field
+ */
+ if (sbc_write_same_supported(dev, &cdb[1]) < 0)
+ goto out_unsupported_cdb;
+ cmd->execute_cmd = ops->execute_write_same;
+ break;
+ case VERIFY:
+ size = 0;
+ cmd->execute_cmd = sbc_emulate_verify;
+ break;
+ default:
+ ret = spc_parse_cdb(cmd, &size);
+ if (ret)
+ return ret;
+ }
+
+ /* reject any command that we don't have a handler for */
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd)
+ goto out_unsupported_cdb;
+
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ unsigned long long end_lba;
+
+ if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds fabric_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ su_dev->se_dev_attrib.fabric_max_sectors);
+ goto out_invalid_cdb_field;
+ }
+ if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
+ printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
+ " big sectors %u exceeds backend hw_max_sectors:"
+ " %u\n", cdb[0], sectors,
+ su_dev->se_dev_attrib.hw_max_sectors);
+ goto out_invalid_cdb_field;
+ }
+
+ end_lba = dev->transport->get_blocks(dev) + 1;
+ if (cmd->t_task_lba + sectors > end_lba) {
+ pr_err("cmd exceeds last lba %llu "
+ "(lba %llu, sectors %u)\n",
+ end_lba, cmd->t_task_lba, sectors);
+ goto out_invalid_cdb_field;
+ }
+
+ size = sbc_get_size(cmd, sectors);
+ }
+
+ ret = target_cmd_size_check(cmd, size);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+
+out_unsupported_cdb:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -EINVAL;
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(sbc_parse_cdb);
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_spc.c
index 9888693a18fe..4c861de538c9 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_spc.c
@@ -1,5 +1,5 @@
/*
- * CDB emulation for non-READ/WRITE commands.
+ * SCSI Primary Commands (SPC) parsing and emulation.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
@@ -26,17 +26,21 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
+
#include <scsi/scsi.h>
+#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_backend.h>
#include <target/target_core_fabric.h>
#include "target_core_internal.h"
+#include "target_core_alua.h"
+#include "target_core_pr.h"
#include "target_core_ua.h"
-static void
-target_fill_alua_data(struct se_port *port, unsigned char *buf)
+
+static void spc_fill_alua_data(struct se_port *port, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -65,8 +69,7 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
-static int
-target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
+static int spc_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
{
struct se_lun *lun = cmd->se_lun;
struct se_device *dev = cmd->se_dev;
@@ -93,7 +96,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
- target_fill_alua_data(lun->lun_sep, buf);
+ spc_fill_alua_data(lun->lun_sep, buf);
buf[7] = 0x2; /* CmdQue=1 */
@@ -106,8 +109,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd, char *buf)
}
/* unit serial number */
-static int
-target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u16 len = 0;
@@ -127,8 +129,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static void
-target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
+static void spc_parse_naa_6h_vendor_specific(struct se_device *dev,
+ unsigned char *buf)
{
unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0];
int cnt;
@@ -162,8 +164,7 @@ target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf)
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
-static int
-target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
struct se_lun *lun = cmd->se_lun;
@@ -220,7 +221,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- target_parse_naa_6h_vendor_specific(dev, &buf[off]);
+ spc_parse_naa_6h_vendor_specific(dev, &buf[off]);
len = 20;
off = (len + 4);
@@ -414,8 +415,7 @@ check_scsi_name:
}
/* Extended INQUIRY Data VPD Page */
-static int
-target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
@@ -428,15 +428,14 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Limits VPD page */
-static int
-target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
u32 max_sectors;
int have_tp = 0;
/*
- * Following sbc3r22 section 6.5.3 Block Limits VPD page, when
+ * Following spc3r22 section 6.5.3 Block Limits VPD page, when
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
@@ -500,8 +499,7 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
}
/* Block Device Characteristics VPD page */
-static int
-target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
@@ -513,13 +511,12 @@ target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
}
/* Thin Provisioning VPD */
-static int
-target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
/*
- * From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
+ * From spc3r22 section 6.5.4 Thin Provisioning VPD page:
*
* The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
* zero, then the page length shall be set to 0004h. If the DP bit
@@ -564,25 +561,23 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
static struct {
uint8_t page;
int (*emulate)(struct se_cmd *, unsigned char *);
} evpd_handlers[] = {
- { .page = 0x00, .emulate = target_emulate_evpd_00 },
- { .page = 0x80, .emulate = target_emulate_evpd_80 },
- { .page = 0x83, .emulate = target_emulate_evpd_83 },
- { .page = 0x86, .emulate = target_emulate_evpd_86 },
- { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
- { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
- { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+ { .page = 0x00, .emulate = spc_emulate_evpd_00 },
+ { .page = 0x80, .emulate = spc_emulate_evpd_80 },
+ { .page = 0x83, .emulate = spc_emulate_evpd_83 },
+ { .page = 0x86, .emulate = spc_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = spc_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = spc_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = spc_emulate_evpd_b2 },
};
/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+static int spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
int p;
@@ -601,7 +596,7 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
-int target_emulate_inquiry(struct se_cmd *cmd)
+static int spc_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg;
@@ -643,7 +638,7 @@ int target_emulate_inquiry(struct se_cmd *cmd)
goto out;
}
- ret = target_emulate_inquiry_std(cmd, buf);
+ ret = spc_emulate_inquiry_std(cmd, buf);
goto out;
}
@@ -671,70 +666,7 @@ out:
return ret;
}
-int target_emulate_readcapacity(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks_long = dev->transport->get_blocks(dev);
- u32 blocks;
-
- if (blocks_long >= 0x00000000ffffffff)
- blocks = 0xffffffff;
- else
- blocks = (u32)blocks_long;
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 24) & 0xff;
- buf[1] = (blocks >> 16) & 0xff;
- buf[2] = (blocks >> 8) & 0xff;
- buf[3] = blocks & 0xff;
- buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
-
- transport_kunmap_data_sg(cmd);
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-int target_emulate_readcapacity_16(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf;
- unsigned long long blocks = dev->transport->get_blocks(dev);
-
- buf = transport_kmap_data_sg(cmd);
-
- buf[0] = (blocks >> 56) & 0xff;
- buf[1] = (blocks >> 48) & 0xff;
- buf[2] = (blocks >> 40) & 0xff;
- buf[3] = (blocks >> 32) & 0xff;
- buf[4] = (blocks >> 24) & 0xff;
- buf[5] = (blocks >> 16) & 0xff;
- buf[6] = (blocks >> 8) & 0xff;
- buf[7] = blocks & 0xff;
- buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
- buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
- buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
- buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
- /*
- * Set Thin Provisioning Enable bit following sbc3r22 in section
- * READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
- */
- if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
- buf[14] = 0x80;
-
- transport_kunmap_data_sg(cmd);
-
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
-
-static int
-target_modesense_rwrecovery(unsigned char *p)
+static int spc_modesense_rwrecovery(unsigned char *p)
{
p[0] = 0x01;
p[1] = 0x0a;
@@ -742,8 +674,7 @@ target_modesense_rwrecovery(unsigned char *p)
return 12;
}
-static int
-target_modesense_control(struct se_device *dev, unsigned char *p)
+static int spc_modesense_control(struct se_device *dev, unsigned char *p)
{
p[0] = 0x0a;
p[1] = 0x0a;
@@ -828,8 +759,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
return 12;
}
-static int
-target_modesense_caching(struct se_device *dev, unsigned char *p)
+static int spc_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
@@ -840,8 +770,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
return 20;
}
-static void
-target_modesense_write_protect(unsigned char *buf, int type)
+static void spc_modesense_write_protect(unsigned char *buf, int type)
{
/*
* I believe that the WP bit (bit 7) in the mode header is the same for
@@ -856,8 +785,7 @@ target_modesense_write_protect(unsigned char *buf, int type)
}
}
-static void
-target_modesense_dpofua(unsigned char *buf, int type)
+static void spc_modesense_dpofua(unsigned char *buf, int type)
{
switch (type) {
case TYPE_DISK:
@@ -868,7 +796,7 @@ target_modesense_dpofua(unsigned char *buf, int type)
}
}
-int target_emulate_modesense(struct se_cmd *cmd)
+static int spc_emulate_modesense(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
char *cdb = cmd->t_task_cdb;
@@ -883,18 +811,18 @@ int target_emulate_modesense(struct se_cmd *cmd)
switch (cdb[2] & 0x3f) {
case 0x01:
- length = target_modesense_rwrecovery(&buf[offset]);
+ length = spc_modesense_rwrecovery(&buf[offset]);
break;
case 0x08:
- length = target_modesense_caching(dev, &buf[offset]);
+ length = spc_modesense_caching(dev, &buf[offset]);
break;
case 0x0a:
- length = target_modesense_control(dev, &buf[offset]);
+ length = spc_modesense_control(dev, &buf[offset]);
break;
case 0x3f:
- length = target_modesense_rwrecovery(&buf[offset]);
- length += target_modesense_caching(dev, &buf[offset+length]);
- length += target_modesense_control(dev, &buf[offset+length]);
+ length = spc_modesense_rwrecovery(&buf[offset]);
+ length += spc_modesense_caching(dev, &buf[offset+length]);
+ length += spc_modesense_control(dev, &buf[offset+length]);
break;
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
@@ -912,11 +840,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[3], type);
+ spc_modesense_write_protect(&buf[3], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[3], type);
+ spc_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
offset = cmd->data_length;
@@ -928,11 +856,11 @@ int target_emulate_modesense(struct se_cmd *cmd)
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
- target_modesense_write_protect(&buf[2], type);
+ spc_modesense_write_protect(&buf[2], type);
if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
- target_modesense_dpofua(&buf[2], type);
+ spc_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
@@ -946,7 +874,7 @@ int target_emulate_modesense(struct se_cmd *cmd)
return 0;
}
-int target_emulate_request_sense(struct se_cmd *cmd)
+static int spc_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task_cdb;
unsigned char *buf;
@@ -1005,126 +933,172 @@ end:
return 0;
}
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_unmap(struct se_cmd *cmd)
+static int spc_emulate_testunitready(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
- unsigned char *buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task_cdb[0];
- sector_t lba;
- unsigned int size = cmd->data_length, range;
- int ret = 0, offset;
- unsigned short dl, bd_dl;
-
- if (!dev->transport->do_discard) {
- pr_err("UNMAP emulation not supported for: %s\n",
- dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- /* First UNMAP block descriptor starts at 8 byte offset */
- offset = 8;
- size -= 8;
- dl = get_unaligned_be16(&cdb[0]);
- bd_dl = get_unaligned_be16(&cdb[2]);
-
- buf = transport_kmap_data_sg(cmd);
-
- ptr = &buf[offset];
- pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
- " ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
-
- while (size) {
- lba = get_unaligned_be64(&ptr[0]);
- range = get_unaligned_be32(&ptr[8]);
- pr_debug("UNMAP: Using lba: %llu and range: %u\n",
- (unsigned long long)lba, range);
-
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_err("blkdev_issue_discard() failed: %d\n",
- ret);
- goto err;
- }
-
- ptr += 16;
- size -= 16;
- }
-
-err:
- transport_kunmap_data_sg(cmd);
- if (!ret)
- target_complete_cmd(cmd, GOOD);
- return ret;
+ target_complete_cmd(cmd, GOOD);
+ return 0;
}
-/*
- * Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
- * Note this is not used for TCM/pSCSI passthrough
- */
-int target_emulate_write_same(struct se_cmd *cmd)
+int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
{
struct se_device *dev = cmd->se_dev;
- sector_t range;
- sector_t lba = cmd->t_task_lba;
- u32 num_blocks;
- int ret;
-
- if (!dev->transport->do_discard) {
- pr_err("WRITE_SAME emulation not supported"
- " for: %s\n", dev->transport->name);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
- }
-
- if (cmd->t_task_cdb[0] == WRITE_SAME)
- num_blocks = get_unaligned_be16(&cmd->t_task_cdb[7]);
- else if (cmd->t_task_cdb[0] == WRITE_SAME_16)
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- else /* WRITE_SAME_32 via VARIABLE_LENGTH_CMD */
- num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
-
- /*
- * Use the explicit range when non zero is supplied, otherwise calculate
- * the remaining range based on ->get_blocks() - starting LBA.
- */
- if (num_blocks != 0)
- range = num_blocks;
- else
- range = (dev->transport->get_blocks(dev) - lba);
-
- pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
- (unsigned long long)lba, (unsigned long long)range);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ unsigned char *cdb = cmd->t_task_cdb;
- ret = dev->transport->do_discard(dev, lba, range);
- if (ret < 0) {
- pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
- return ret;
- }
+ switch (cdb[0]) {
+ case MODE_SELECT:
+ *size = cdb[4];
+ break;
+ case MODE_SELECT_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case MODE_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case MODE_SENSE_10:
+ *size = (cdb[7] << 8) + cdb[8];
+ cmd->execute_cmd = spc_emulate_modesense;
+ break;
+ case LOG_SELECT:
+ case LOG_SENSE:
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case PERSISTENT_RESERVE_IN:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_cmd = target_scsi3_emulate_pr_in;
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case PERSISTENT_RESERVE_OUT:
+ if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
+ cmd->execute_cmd = target_scsi3_emulate_pr_out;
+ *size = (cdb[7] << 8) + cdb[8];
+ break;
+ case RELEASE:
+ case RELEASE_10:
+ if (cdb[0] == RELEASE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
+
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_cmd = target_scsi2_reservation_release;
+ break;
+ case RESERVE:
+ case RESERVE_10:
+ /*
+ * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
+ * Assume the passthrough or $FABRIC_MOD will tell us about it.
+ */
+ if (cdb[0] == RESERVE_10)
+ *size = (cdb[7] << 8) | cdb[8];
+ else
+ *size = cmd->data_length;
- target_complete_cmd(cmd, GOOD);
- return 0;
-}
+ /*
+ * Setup the legacy emulated handler for SPC-2 and
+ * >= SPC-3 compatible reservation handling (CRH=1)
+ * Otherwise, we assume the underlying SCSI logic is
+ * is running in SPC_PASSTHROUGH, and wants reservations
+ * emulation disabled.
+ */
+ if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
+ cmd->execute_cmd = target_scsi2_reservation_reserve;
+ break;
+ case REQUEST_SENSE:
+ *size = cdb[4];
+ cmd->execute_cmd = spc_emulate_request_sense;
+ break;
+ case INQUIRY:
+ *size = (cdb[3] << 8) + cdb[4];
-int target_emulate_synchronize_cache(struct se_cmd *cmd)
-{
- if (!cmd->se_dev->transport->do_sync_cache) {
- pr_err("SYNCHRONIZE_CACHE emulation not supported"
- " for: %s\n", cmd->se_dev->transport->name);
+ /*
+ * Do implict HEAD_OF_QUEUE processing for INQUIRY.
+ * See spc4r17 section 5.3
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ cmd->execute_cmd = spc_emulate_inquiry;
+ break;
+ case SECURITY_PROTOCOL_IN:
+ case SECURITY_PROTOCOL_OUT:
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ break;
+ case EXTENDED_COPY:
+ case READ_ATTRIBUTE:
+ case RECEIVE_COPY_RESULTS:
+ case WRITE_ATTRIBUTE:
+ *size = (cdb[10] << 24) | (cdb[11] << 16) |
+ (cdb[12] << 8) | cdb[13];
+ break;
+ case RECEIVE_DIAGNOSTIC:
+ case SEND_DIAGNOSTIC:
+ *size = (cdb[3] << 8) | cdb[4];
+ break;
+ case WRITE_BUFFER:
+ *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
+ break;
+ case REPORT_LUNS:
+ cmd->execute_cmd = target_report_luns;
+ *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
+ /*
+ * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
+ * See spc4r17 section 5.3
+ */
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case TEST_UNIT_READY:
+ cmd->execute_cmd = spc_emulate_testunitready;
+ *size = 0;
+ break;
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_IN from SCC-2
+ * Check for emulated MI_REPORT_TARGET_PGS
+ */
+ if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_cmd =
+ target_emulate_report_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
+ /*
+ * MAINTENANCE_OUT from SCC-2
+ * Check for emulated MO_SET_TARGET_PGS.
+ */
+ if (cdb[1] == MO_SET_TARGET_PGS &&
+ su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
+ cmd->execute_cmd =
+ target_emulate_set_target_port_groups;
+ }
+ *size = get_unaligned_be32(&cdb[6]);
+ } else {
+ /*
+ * GPCMD_SEND_KEY from multi media commands
+ */
+ *size = get_unaligned_be16(&cdb[8]);
+ }
+ break;
+ default:
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ " 0x%02x, sending CHECK_CONDITION.\n",
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -ENOSYS;
+ return -EINVAL;
}
- cmd->se_dev->transport->do_sync_cache(cmd);
- return 0;
-}
-
-int target_emulate_noop(struct se_cmd *cmd)
-{
- target_complete_cmd(cmd, GOOD);
return 0;
}
+EXPORT_SYMBOL(spc_parse_cdb);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 84caf1bed9a3..1c59a3c23b2c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -295,9 +295,6 @@ static void core_tmr_drain_state_list(
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
-
- if (!list_empty(&cmd->execute_list))
- __target_remove_from_execute_list(cmd);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
@@ -354,57 +351,6 @@ static void core_tmr_drain_state_list(
}
}
-static void core_tmr_drain_cmd_list(
- struct se_device *dev,
- struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
- int tas,
- struct list_head *preempt_and_abort_list)
-{
- LIST_HEAD(drain_cmd_list);
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- struct se_cmd *cmd, *tcmd;
- unsigned long flags;
-
- /*
- * Release all commands remaining in the per-device command queue.
- *
- * This follows the same logic as above for the state list.
- */
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
- /*
- * For PREEMPT_AND_ABORT usage, only process commands
- * with a matching reservation key.
- */
- if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
- continue;
- /*
- * Not aborting PROUT PREEMPT_AND_ABORT CDB..
- */
- if (prout_cmd == cmd)
- continue;
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
- }
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- while (!list_empty(&drain_cmd_list)) {
- cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
- list_del_init(&cmd->se_queue_node);
-
- pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
- " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, cmd->t_state,
- atomic_read(&cmd->t_fe_count));
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&cmd->t_fe_count));
- }
-}
-
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -447,8 +393,7 @@ int core_tmr_lun_reset(
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
preempt_and_abort_list);
- core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
- preempt_and_abort_list);
+
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8bd58e284185..b8628a5014b9 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -77,8 +77,8 @@ static void core_clear_initiator_node_from_tpg(
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
- core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
- TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
+ core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
+ TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
@@ -172,8 +172,8 @@ void core_tpg_add_node_to_devs(
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
- core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
- lun_access, acl, tpg, 1);
+ core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
+ lun_access, acl, tpg);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
@@ -306,10 +306,8 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
* TPG LUNs if the fabric is not explictly asking for
* tpg_check_demo_mode_login_only() == 1.
*/
- if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
- (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
- do { ; } while (0);
- else
+ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
+ (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_irq(&tpg->acl_node_lock);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 634d0f31a28c..0eaae23d12b5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -66,15 +66,12 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;
struct kmem_cache *t10_alua_tg_pt_gp_cache;
struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
-static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
static int transport_generic_get_mem(struct se_cmd *cmd);
+static int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
static void transport_put_cmd(struct se_cmd *cmd);
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void target_complete_ok_work(struct work_struct *work);
@@ -195,14 +192,6 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
-static void transport_init_queue_obj(struct se_queue_obj *qobj)
-{
- atomic_set(&qobj->queue_cnt, 0);
- INIT_LIST_HEAD(&qobj->qobj_list);
- init_waitqueue_head(&qobj->thread_wq);
- spin_lock_init(&qobj->cmd_queue_lock);
-}
-
void transport_subsystem_check_init(void)
{
int ret;
@@ -243,7 +232,6 @@ struct se_session *transport_init_session(void)
INIT_LIST_HEAD(&se_sess->sess_list);
INIT_LIST_HEAD(&se_sess->sess_acl_list);
INIT_LIST_HEAD(&se_sess->sess_cmd_list);
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
spin_lock_init(&se_sess->sess_cmd_lock);
kref_init(&se_sess->sess_kref);
@@ -468,18 +456,7 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-/* transport_cmd_check_stop():
- *
- * 'transport_off = 1' determines if CMD_T_ACTIVE should be cleared.
- * 'transport_off = 2' determines if task_dev_state should be removed.
- *
- * A non-zero u8 t_state sets cmd->t_state.
- * Returns 1 when command is stopped, else 0.
- */
-static int transport_cmd_check_stop(
- struct se_cmd *cmd,
- int transport_off,
- u8 t_state)
+static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
{
unsigned long flags;
@@ -493,13 +470,23 @@ static int transport_cmd_check_stop(
__func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2)
+ if (remove_from_lists)
target_remove_from_state_list(cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->transport_lun_stop_comp);
return 1;
}
+
+ if (remove_from_lists) {
+ target_remove_from_state_list(cmd);
+
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
+ }
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -509,58 +496,36 @@ static int transport_cmd_check_stop(
__func__, __LINE__,
cmd->se_tfo->get_task_tag(cmd));
- if (transport_off == 2)
- target_remove_from_state_list(cmd);
-
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2 handoff
- * to FE.
- */
- if (transport_off == 2)
- cmd->se_lun = NULL;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return 1;
}
- if (transport_off) {
- cmd->transport_state &= ~CMD_T_ACTIVE;
- if (transport_off == 2) {
- target_remove_from_state_list(cmd);
- /*
- * Clear struct se_cmd->se_lun before the transport_off == 2
- * handoff to fabric module.
- */
- cmd->se_lun = NULL;
- /*
- * Some fabric modules like tcm_loop can release
- * their internally allocated I/O reference now and
- * struct se_cmd now.
- *
- * Fabric modules are expected to return '1' here if the
- * se_cmd being passed is released at this point,
- * or zero if not being released.
- */
- if (cmd->se_tfo->check_stop_free != NULL) {
- spin_unlock_irqrestore(
- &cmd->t_state_lock, flags);
-
- return cmd->se_tfo->check_stop_free(cmd);
- }
+
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ if (remove_from_lists) {
+ /*
+ * Some fabric modules like tcm_loop can release
+ * their internally allocated I/O reference now and
+ * struct se_cmd now.
+ *
+ * Fabric modules are expected to return '1' here if the
+ * se_cmd being passed is released at this point,
+ * or zero if not being released.
+ */
+ if (cmd->se_tfo->check_stop_free != NULL) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ return cmd->se_tfo->check_stop_free(cmd);
}
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ }
- return 0;
- } else if (t_state)
- cmd->t_state = t_state;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
return 0;
}
static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{
- return transport_cmd_check_stop(cmd, 2, 0);
+ return transport_cmd_check_stop(cmd, true);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
@@ -591,79 +556,8 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove) {
- transport_remove_cmd_from_queue(cmd);
+ if (remove)
transport_put_cmd(cmd);
- }
-}
-
-static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state,
- bool at_head)
-{
- struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = &dev->dev_queue_obj;
- unsigned long flags;
-
- if (t_state) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = t_state;
- cmd->transport_state |= CMD_T_ACTIVE;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-
- /* If the cmd is already on the list, remove it before we add it */
- if (!list_empty(&cmd->se_queue_node))
- list_del(&cmd->se_queue_node);
- else
- atomic_inc(&qobj->queue_cnt);
-
- if (at_head)
- list_add(&cmd->se_queue_node, &qobj->qobj_list);
- else
- list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
- cmd->transport_state |= CMD_T_QUEUED;
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- wake_up_interruptible(&qobj->thread_wq);
-}
-
-static struct se_cmd *
-transport_get_cmd_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- unsigned long flags;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (list_empty(&qobj->qobj_list)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return NULL;
- }
- cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
-
- cmd->transport_state &= ~CMD_T_QUEUED;
- list_del_init(&cmd->se_queue_node);
- atomic_dec(&qobj->queue_cnt);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
-
- return cmd;
-}
-
-static void transport_remove_cmd_from_queue(struct se_cmd *cmd)
-{
- struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;
- unsigned long flags;
-
- spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(cmd->transport_state & CMD_T_QUEUED)) {
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return;
- }
- cmd->transport_state &= ~CMD_T_QUEUED;
- atomic_dec(&qobj->queue_cnt);
- list_del_init(&cmd->se_queue_node);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
}
static void target_complete_failure_work(struct work_struct *work)
@@ -742,68 +636,11 @@ static void target_add_to_state_list(struct se_cmd *cmd)
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
-static void __target_add_to_execute_list(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- bool head_of_queue = false;
-
- if (!list_empty(&cmd->execute_list))
- return;
-
- if (dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED &&
- cmd->sam_task_attr == MSG_HEAD_TAG)
- head_of_queue = true;
-
- if (head_of_queue)
- list_add(&cmd->execute_list, &dev->execute_list);
- else
- list_add_tail(&cmd->execute_list, &dev->execute_list);
-
- atomic_inc(&dev->execute_tasks);
-
- if (cmd->state_active)
- return;
-
- if (head_of_queue)
- list_add(&cmd->state_list, &dev->state_list);
- else
- list_add_tail(&cmd->state_list, &dev->state_list);
-
- cmd->state_active = true;
-}
-
-static void target_add_to_execute_list(struct se_cmd *cmd)
-{
- unsigned long flags;
- struct se_device *dev = cmd->se_dev;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- __target_add_to_execute_list(cmd);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
-void __target_remove_from_execute_list(struct se_cmd *cmd)
-{
- list_del_init(&cmd->execute_list);
- atomic_dec(&cmd->se_dev->execute_tasks);
-}
-
-static void target_remove_from_execute_list(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- unsigned long flags;
-
- if (WARN_ON(list_empty(&cmd->execute_list)))
- return;
-
- spin_lock_irqsave(&dev->execute_task_lock, flags);
- __target_remove_from_execute_list(cmd);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-}
-
/*
* Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
*/
+static void transport_write_pending_qf(struct se_cmd *cmd);
+static void transport_complete_qf(struct se_cmd *cmd);
static void target_qf_do_work(struct work_struct *work)
{
@@ -827,7 +664,10 @@ static void target_qf_do_work(struct work_struct *work)
(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
: "UNKNOWN");
- transport_add_cmd_to_queue(cmd, cmd->t_state, true);
+ if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
+ transport_write_pending_qf(cmd);
+ else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK)
+ transport_complete_qf(cmd);
}
}
@@ -874,8 +714,7 @@ void transport_dump_dev_state(
break;
}
- *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
- atomic_read(&dev->execute_tasks), dev->queue_depth);
+ *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size,
dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
@@ -1212,7 +1051,6 @@ struct se_device *transport_add_device_to_core_hba(
return NULL;
}
- transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
dev->dev_ptr = transport_dev;
@@ -1222,7 +1060,6 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
- INIT_LIST_HEAD(&dev->execute_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
@@ -1261,17 +1098,17 @@ struct se_device *transport_add_device_to_core_hba(
* Setup the Asymmetric Logical Unit Assignment for struct se_device
*/
if (core_setup_alua(dev, force_pt) < 0)
- goto out;
+ goto err_dev_list;
/*
* Startup the struct se_device processing thread
*/
- dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", dev->transport->name);
- if (IS_ERR(dev->process_thread)) {
- pr_err("Unable to create kthread: LIO_%s\n",
+ dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
+ dev->transport->name);
+ if (!dev->tmr_wq) {
+ pr_err("Unable to create tmr workqueue for %s\n",
dev->transport->name);
- goto out;
+ goto err_dev_list;
}
/*
* Setup work_queue for QUEUE_FULL
@@ -1289,7 +1126,7 @@ struct se_device *transport_add_device_to_core_hba(
if (!inquiry_prod || !inquiry_rev) {
pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
- goto out;
+ goto err_wq;
}
strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
@@ -1299,9 +1136,10 @@ struct se_device *transport_add_device_to_core_hba(
scsi_dump_inquiry(dev);
return dev;
-out:
- kthread_stop(dev->process_thread);
+err_wq:
+ destroy_workqueue(dev->tmr_wq);
+err_dev_list:
spin_lock(&hba->device_lock);
list_del(&dev->dev_list);
hba->dev_count--;
@@ -1315,35 +1153,54 @@ out:
}
EXPORT_SYMBOL(transport_add_device_to_core_hba);
-/* transport_generic_prepare_cdb():
- *
- * Since the Initiator sees iSCSI devices as LUNs, the SCSI CDB will
- * contain the iSCSI LUN in bits 7-5 of byte 1 as per SAM-2.
- * The point of this is since we are mapping iSCSI LUNs to
- * SCSI Target IDs having a non-zero LUN in the CDB will throw the
- * devices and HBAs for a loop.
- */
-static inline void transport_generic_prepare_cdb(
- unsigned char *cdb)
+int target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
{
- switch (cdb[0]) {
- case READ_10: /* SBC - RDProtect */
- case READ_12: /* SBC - RDProtect */
- case READ_16: /* SBC - RDProtect */
- case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
- case VERIFY: /* SBC - VRProtect */
- case VERIFY_16: /* SBC - VRProtect */
- case WRITE_VERIFY: /* SBC - VRProtect */
- case WRITE_VERIFY_12: /* SBC - VRProtect */
- case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
- break;
- default:
- cdb[1] &= 0x1f; /* clear logical unit number */
- break;
+ struct se_device *dev = cmd->se_dev;
+
+ if (cmd->unknown_data_length) {
+ cmd->data_length = size;
+ } else if (size != cmd->data_length) {
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ " %u does not match SCSI CDB Length: %u for SAM Opcode:"
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
+ cmd->data_length, size, cmd->t_task_cdb[0]);
+
+ cmd->cmd_spdtl = size;
+
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ pr_err("Rejecting underflow/overflow"
+ " WRITE data\n");
+ goto out_invalid_cdb_field;
+ }
+ /*
+ * Reject READ_* or WRITE_* with overflow/underflow for
+ * type SCF_SCSI_DATA_CDB.
+ */
+ if (dev->se_sub_dev->se_dev_attrib.block_size != 512) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
+ " CDB on non 512-byte sector setup subsystem"
+ " plugin: %s\n", dev->transport->name);
+ /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
+ goto out_invalid_cdb_field;
+ }
+
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ }
+ cmd->data_length = size;
}
-}
-static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
+ return 0;
+
+out_invalid_cdb_field:
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+}
/*
* Used by fabric modules containing a local struct se_cmd within their
@@ -1361,9 +1218,7 @@ void transport_init_se_cmd(
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
- INIT_LIST_HEAD(&cmd->execute_list);
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->transport_lun_fe_stop_comp);
init_completion(&cmd->transport_lun_stop_comp);
@@ -1418,9 +1273,12 @@ int target_setup_cmd_from_cdb(
struct se_cmd *cmd,
unsigned char *cdb)
{
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
+ u32 pr_reg_type = 0;
+ u8 alua_ascq = 0;
+ unsigned long flags;
int ret;
- transport_generic_prepare_cdb(cdb);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
@@ -1457,15 +1315,66 @@ int target_setup_cmd_from_cdb(
* Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
+
/*
- * Setup the received CDB based on SCSI defined opcodes and
- * perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The cmd->t_task_cdb
- * pointer is expected to be setup before we reach this point.
+ * Check for an existing UNIT ATTENTION condition
*/
- ret = transport_generic_cmd_sequencer(cmd, cdb);
+ if (core_scsi3_ua_check(cmd, cdb) < 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
+ return -EINVAL;
+ }
+
+ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
+ if (ret != 0) {
+ /*
+ * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+ * The ALUA additional sense code qualifier (ASCQ) is determined
+ * by the ALUA primary or secondary access state..
+ */
+ if (ret > 0) {
+ pr_debug("[%s]: ALUA TG Port not available, "
+ "SenseKey: NOT_READY, ASC/ASCQ: "
+ "0x04/0x%02x\n",
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+ transport_set_sense_codes(cmd, 0x04, alua_ascq);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
+ return -EINVAL;
+ }
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
+
+ /*
+ * Check status for SPC-3 Persistent Reservations
+ */
+ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type)) {
+ if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
+ /*
+ * This means the CDB is allowed for the SCSI Initiator port
+ * when said port is *NOT* holding the legacy SPC-2 or
+ * SPC-3 Persistent Reservation.
+ */
+ }
+
+ ret = cmd->se_dev->transport->parse_cdb(cmd);
if (ret < 0)
return ret;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
/*
* Check for SAM Task Attribute Emulation
*/
@@ -1503,10 +1412,9 @@ int transport_handle_cdb_direct(
return -EINVAL;
}
/*
- * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE following
- * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()
- * in existing usage to ensure that outstanding descriptors are handled
- * correctly during shutdown via transport_wait_for_tasks()
+ * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
+ * outstanding descriptors are handled correctly during shutdown via
+ * transport_wait_for_tasks()
*
* Also, we don't take cmd->t_state_lock here as we only expect
* this to be called for initial descriptor submission.
@@ -1540,10 +1448,14 @@ EXPORT_SYMBOL(transport_handle_cdb_direct);
* @data_dir: DMA data direction
* @flags: flags for command submission from target_sc_flags_tables
*
+ * Returns non zero to signal active I/O shutdown failure. All other
+ * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
+ * but still return zero here.
+ *
* This may only be called from process context, and also currently
* assumes internal allocation of fabric payload buffer by target-core.
**/
-void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{
@@ -1569,7 +1481,9 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
* for fabrics using TARGET_SCF_ACK_KREF that expect a second
* kref_put() to happen during fabric packet acknowledgement.
*/
- target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ rc = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (rc)
+ return rc;
/*
* Signal bidirectional data payloads to target-core
*/
@@ -1582,16 +1496,13 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
target_put_sess_cmd(se_sess, se_cmd);
- return;
+ return 0;
}
- /*
- * Sanitize CDBs via transport_generic_cmd_sequencer() and
- * allocate the necessary tasks to complete the received CDB+data
- */
+
rc = target_setup_cmd_from_cdb(se_cmd, cdb);
if (rc != 0) {
transport_generic_request_failure(se_cmd);
- return;
+ return 0;
}
/*
@@ -1600,14 +1511,8 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
*/
core_alua_check_nonop_delay(se_cmd);
- /*
- * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
- * for immediate execution of READs, otherwise wait for
- * transport_generic_handle_data() to be called for WRITEs
- * when fabric has filled the incoming buffer.
- */
transport_handle_cdb_direct(se_cmd);
- return;
+ return 0;
}
EXPORT_SYMBOL(target_submit_cmd);
@@ -1662,7 +1567,11 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
- target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ ret = target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ if (ret) {
+ core_tmr_release_req(se_cmd->se_tmr_req);
+ return ret;
+ }
ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
if (ret) {
@@ -1680,67 +1589,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
EXPORT_SYMBOL(target_submit_tmr);
/*
- * Used by fabric module frontends defining a TFO->new_cmd_map() caller
- * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
- * complete setup in TCM process context w/ TFO->new_cmd_map().
- */
-int transport_generic_handle_cdb_map(
- struct se_cmd *cmd)
-{
- if (!cmd->se_lun) {
- dump_stack();
- pr_err("cmd->se_lun is NULL\n");
- return -EINVAL;
- }
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_cdb_map);
-
-/* transport_generic_handle_data():
- *
- *
- */
-int transport_generic_handle_data(
- struct se_cmd *cmd)
-{
- /*
- * For the software fabric case, then we assume the nexus is being
- * failed/shutdown when signals are pending from the kthread context
- * caller, so we return a failure. For the HW target mode case running
- * in interrupt code, the signal_pending() check is skipped.
- */
- if (!in_interrupt() && signal_pending(current))
- return -EPERM;
- /*
- * If the received CDB has aleady been ABORTED by the generic
- * target engine, we now call transport_check_aborted_status()
- * to queue any delated TASK_ABORTED status for the received CDB to the
- * fabric module as we are expecting no further incoming DATA OUT
- * sequences at this point.
- */
- if (transport_check_aborted_status(cmd, 1) != 0)
- return 0;
-
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_data);
-
-/* transport_generic_handle_tmr():
- *
- *
- */
-int transport_generic_handle_tmr(
- struct se_cmd *cmd)
-{
- transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);
- return 0;
-}
-EXPORT_SYMBOL(transport_generic_handle_tmr);
-
-/*
* If the cmd is active, request it to be stopped and sleep until it
* has completed.
*/
@@ -1797,6 +1645,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
case TCM_UNKNOWN_MODE_PAGE:
case TCM_WRITE_PROTECTED:
+ case TCM_ADDRESS_OUT_OF_RANGE:
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
@@ -1832,13 +1681,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
- /*
- * If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
- * make the call to transport_send_check_condition_and_sense()
- * directly. Otherwise expect the fabric to make the call to
- * transport_send_check_condition_and_sense() after handling
- * possible unsoliticied write data payloads.
- */
+
ret = transport_send_check_condition_and_sense(cmd,
cmd->scsi_sense_reason, 0);
if (ret == -EAGAIN || ret == -ENOMEM)
@@ -1856,406 +1699,123 @@ queue_full:
}
EXPORT_SYMBOL(transport_generic_request_failure);
-static inline u32 transport_lba_21(unsigned char *cdb)
+static void __target_execute_cmd(struct se_cmd *cmd)
{
- return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3];
-}
+ int error = 0;
-static inline u32 transport_lba_32(unsigned char *cdb)
-{
- return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
-}
-
-static inline unsigned long long transport_lba_64(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5];
- __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-/*
- * For VARIABLE_LENGTH_CDB w/ 32 byte extended CDBs
- */
-static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
-{
- unsigned int __v1, __v2;
-
- __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15];
- __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19];
-
- return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32;
-}
-
-static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
-/*
- * Called from Fabric Module context from transport_execute_tasks()
- *
- * The return of this function determins if the tasks from struct se_cmd
- * get added to the execution queue in transport_execute_tasks(),
- * or are added to the delayed or ordered lists here.
- */
-static inline int transport_execute_task_attr(struct se_cmd *cmd)
-{
- if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
- return 1;
- /*
- * Check for the existence of HEAD_OF_QUEUE, and if true return 1
- * to allow the passed struct se_cmd list of tasks to the front of the list.
- */
- if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- pr_debug("Added HEAD_OF_QUEUE for CDB:"
- " 0x%02x, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- return 1;
- } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- atomic_inc(&cmd->se_dev->dev_ordered_sync);
- smp_mb__after_atomic_inc();
-
- pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
- " list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0],
- cmd->se_ordered_id);
- /*
- * Add ORDERED command to tail of execution queue if
- * no other older commands exist that need to be
- * completed first.
- */
- if (!atomic_read(&cmd->se_dev->simple_cmds))
- return 1;
- } else {
- /*
- * For SIMPLE and UNTAGGED Task Attribute commands
- */
- atomic_inc(&cmd->se_dev->simple_cmds);
- smp_mb__after_atomic_inc();
- }
- /*
- * Otherwise if one or more outstanding ORDERED task attribute exist,
- * add the dormant task(s) built for the passed struct se_cmd to the
- * execution queue and become in Active state for this struct se_device.
- */
- if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
- /*
- * Otherwise, add cmd w/ tasks to delayed cmd queue that
- * will be drained upon completion of HEAD_OF_QUEUE task.
- */
- spin_lock(&cmd->se_dev->delayed_cmd_lock);
- cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_node,
- &cmd->se_dev->delayed_cmd_list);
- spin_unlock(&cmd->se_dev->delayed_cmd_lock);
-
- pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
- " delayed CMD list, se_ordered_id: %u\n",
- cmd->t_task_cdb[0], cmd->sam_task_attr,
- cmd->se_ordered_id);
- /*
- * Return zero to let transport_execute_tasks() know
- * not to add the delayed tasks to the execution list.
- */
- return 0;
- }
- /*
- * Otherwise, no ORDERED task attributes exist..
- */
- return 1;
-}
-
-/*
- * Called from fabric module context in transport_generic_new_cmd() and
- * transport_generic_process_write()
- */
-static void transport_execute_tasks(struct se_cmd *cmd)
-{
- int add_tasks;
- struct se_device *se_dev = cmd->se_dev;
- /*
- * Call transport_cmd_check_stop() to see if a fabric exception
- * has occurred that prevents execution.
- */
- if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
- /*
- * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
- * attribute for the tasks of the received struct se_cmd CDB
- */
- add_tasks = transport_execute_task_attr(cmd);
- if (add_tasks) {
- __transport_execute_tasks(se_dev, cmd);
- return;
- }
- }
- __transport_execute_tasks(se_dev, NULL);
-}
-
-static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
-{
- int error;
- struct se_cmd *cmd = NULL;
- unsigned long flags;
-
-check_depth:
- spin_lock_irq(&dev->execute_task_lock);
- if (new_cmd != NULL)
- __target_add_to_execute_list(new_cmd);
-
- if (list_empty(&dev->execute_list)) {
- spin_unlock_irq(&dev->execute_task_lock);
- return 0;
- }
- cmd = list_first_entry(&dev->execute_list, struct se_cmd, execute_list);
- __target_remove_from_execute_list(cmd);
- spin_unlock_irq(&dev->execute_task_lock);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state |= CMD_T_BUSY;
- cmd->transport_state |= CMD_T_SENT;
-
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state |= (CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
if (cmd->execute_cmd)
error = cmd->execute_cmd(cmd);
- else {
- error = dev->transport->execute_cmd(cmd, cmd->t_data_sg,
- cmd->t_data_nents, cmd->data_direction);
- }
- if (error != 0) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~CMD_T_BUSY;
- cmd->transport_state &= ~CMD_T_SENT;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ if (error) {
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
+ spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd);
}
-
- new_cmd = NULL;
- goto check_depth;
-
- return 0;
}
-static inline u32 transport_get_sectors_6(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
+void target_execute_cmd(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 8-bit sector value.
- */
- if (!dev)
- goto type_disk;
-
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
+ * If the received CDB has aleady been aborted stop processing it here.
*/
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
-
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value. SBC-3 says:
- *
- * A TRANSFER LENGTH field set to zero specifies that 256
- * logical blocks shall be written. Any other value
- * specifies the number of logical blocks that shall be
- * written.
- */
-type_disk:
- return cdb[4] ? : 256;
-}
-
-static inline u32 transport_get_sectors_10(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
+ if (transport_check_aborted_status(cmd, 1))
+ return;
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 16-bit sector value.
+ * Determine if IOCTL context caller in requesting the stopping of this
+ * command for LUN shutdown purposes.
*/
- if (!dev)
- goto type_disk;
+ spin_lock_irq(&cmd->t_state_lock);
+ if (cmd->transport_state & CMD_T_LUN_STOP) {
+ pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
- /*
- * XXX_10 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
+ cmd->transport_state &= ~CMD_T_ACTIVE;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->transport_lun_stop_comp);
+ return;
}
-
/*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 16-bit sector value.
- */
-type_disk:
- return (u32)(cdb[7] << 8) + cdb[8];
-}
-
-static inline u32 transport_get_sectors_12(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
+ * Determine if frontend context caller is requesting the stopping of
+ * this command for frontend exceptions.
*/
- if (!dev)
- goto type_disk;
+ if (cmd->transport_state & CMD_T_STOP) {
+ pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
+ __func__, __LINE__,
+ cmd->se_tfo->get_task_tag(cmd));
- /*
- * XXX_12 is not defined in SSC, throw an exception
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- *ret = -EINVAL;
- return 0;
+ spin_unlock_irq(&cmd->t_state_lock);
+ complete(&cmd->t_transport_stop_comp);
+ return;
}
- /*
- * Everything else assume TYPE_DISK Sector CDB location.
- * Use 32-bit sector value.
- */
-type_disk:
- return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9];
-}
-
-static inline u32 transport_get_sectors_16(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
- struct se_device *dev = cmd->se_dev;
-
- /*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
- */
- if (!dev)
- goto type_disk;
+ cmd->t_state = TRANSPORT_PROCESSING;
+ spin_unlock_irq(&cmd->t_state_lock);
- /*
- * Use 24-bit allocation length for TYPE_TAPE.
- */
- if (dev->transport->get_device_type(dev) == TYPE_TAPE)
- return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
-
-type_disk:
- return (u32)(cdb[10] << 24) + (cdb[11] << 16) +
- (cdb[12] << 8) + cdb[13];
-}
+ if (dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ goto execute;
-/*
- * Used for VARIABLE_LENGTH_CDB WRITE_32 and READ_32 variants
- */
-static inline u32 transport_get_sectors_32(
- unsigned char *cdb,
- struct se_cmd *cmd,
- int *ret)
-{
/*
- * Assume TYPE_DISK for non struct se_device objects.
- * Use 32-bit sector value.
+ * Check for the existence of HEAD_OF_QUEUE, and if true return 1
+ * to allow the passed struct se_cmd list of tasks to the front of the list.
*/
- return (u32)(cdb[28] << 24) + (cdb[29] << 16) +
- (cdb[30] << 8) + cdb[31];
+ switch (cmd->sam_task_attr) {
+ case MSG_HEAD_TAG:
+ pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
+ "se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
+ goto execute;
+ case MSG_ORDERED_TAG:
+ atomic_inc(&dev->dev_ordered_sync);
+ smp_mb__after_atomic_inc();
-}
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
+ " se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->se_ordered_id);
-static inline u32 transport_get_size(
- u32 sectors,
- unsigned char *cdb,
- struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
-
- if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
- if (cdb[1] & 1) { /* sectors */
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
- } else /* bytes */
- return sectors;
+ /*
+ * Execute an ORDERED command if no other older commands
+ * exist that need to be completed first.
+ */
+ if (!atomic_read(&dev->simple_cmds))
+ goto execute;
+ break;
+ default:
+ /*
+ * For SIMPLE and UNTAGGED Task Attribute commands
+ */
+ atomic_inc(&dev->simple_cmds);
+ smp_mb__after_atomic_inc();
+ break;
}
- pr_debug("Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size,
- sectors, dev->se_sub_dev->se_dev_attrib.block_size * sectors,
- dev->transport->name);
-
- return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
-}
+ if (atomic_read(&dev->dev_ordered_sync) != 0) {
+ spin_lock(&dev->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
+ spin_unlock(&dev->delayed_cmd_lock);
-static void transport_xor_callback(struct se_cmd *cmd)
-{
- unsigned char *buf, *addr;
- struct scatterlist *sg;
- unsigned int offset;
- int i;
- int count;
- /*
- * From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
- *
- * 1) read the specified logical block(s);
- * 2) transfer logical blocks from the data-out buffer;
- * 3) XOR the logical blocks transferred from the data-out buffer with
- * the logical blocks read, storing the resulting XOR data in a buffer;
- * 4) if the DISABLE WRITE bit is set to zero, then write the logical
- * blocks transferred from the data-out buffer; and
- * 5) transfer the resulting XOR data to the data-in buffer.
- */
- buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!buf) {
- pr_err("Unable to allocate xor_callback buf\n");
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ " delayed CMD list, se_ordered_id: %u\n",
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->se_ordered_id);
return;
}
- /*
- * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
- * into the locally allocated *buf
- */
- sg_copy_to_buffer(cmd->t_data_sg,
- cmd->t_data_nents,
- buf,
- cmd->data_length);
+execute:
/*
- * Now perform the XOR against the BIDI read memory located at
- * cmd->t_mem_bidi_list
+ * Otherwise, no ORDERED task attributes exist..
*/
-
- offset = 0;
- for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
- addr = kmap_atomic(sg_page(sg));
- if (!addr)
- goto out;
-
- for (i = 0; i < sg->length; i++)
- *(addr + sg->offset + i) ^= *(buf + offset + i);
-
- offset += sg->length;
- kunmap_atomic(addr);
- }
-
-out:
- kfree(buf);
+ __target_execute_cmd(cmd);
}
+EXPORT_SYMBOL(target_execute_cmd);
/*
* Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd
@@ -2312,737 +1872,31 @@ out:
return -1;
}
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = cmd->se_dev;
- u32 sectors;
-
- if (dev->transport->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
-
- if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
- pr_err("LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- cmd->t_task_lba, sectors,
- transport_dev_end_lba(dev));
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
-{
- /*
- * Determine if the received WRITE_SAME is used to for direct
- * passthrough into Linux/SCSI with struct request via TCM/pSCSI
- * or we are signaling the use of internal WRITE_SAME + UNMAP=1
- * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
- */
- int passthrough = (dev->transport->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
-
- if (!passthrough) {
- if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
- pr_err("WRITE_SAME PBDATA and LBDATA"
- " bits not supported for Block Discard"
- " Emulation\n");
- return -ENOSYS;
- }
- /*
- * Currently for the emulated case we only accept
- * tpws with the UNMAP=1 bit set.
- */
- if (!(flags[0] & 0x08)) {
- pr_err("WRITE_SAME w/o UNMAP bit not"
- " supported for Block Discard Emulation\n");
- return -ENOSYS;
- }
- }
-
- return 0;
-}
-
-/* transport_generic_cmd_sequencer():
- *
- * Generic Command Sequencer that should work for most DAS transport
- * drivers.
- *
- * Called from target_setup_cmd_from_cdb() in the $FABRIC_MOD
- * RX Thread.
- *
- * FIXME: Need to support other SCSI OPCODES where as well.
+/*
+ * Process all commands up to the last received ORDERED task attribute which
+ * requires another blocking boundary
*/
-static int transport_generic_cmd_sequencer(
- struct se_cmd *cmd,
- unsigned char *cdb)
+static void target_restart_delayed_cmds(struct se_device *dev)
{
- struct se_device *dev = cmd->se_dev;
- struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- int ret = 0, sector_ret = 0, passthrough;
- u32 sectors = 0, size = 0, pr_reg_type = 0;
- u16 service_action;
- u8 alua_ascq = 0;
- /*
- * Check for an existing UNIT ATTENTION condition
- */
- if (core_scsi3_ua_check(cmd, cdb) < 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -EINVAL;
- }
- /*
- * Check status of Asymmetric Logical Unit Assignment port
- */
- ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
- if (ret != 0) {
- /*
- * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
- * The ALUA additional sense code qualifier (ASCQ) is determined
- * by the ALUA primary or secondary access state..
- */
- if (ret > 0) {
- pr_debug("[%s]: ALUA TG Port not available,"
- " SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- cmd->se_tfo->get_fabric_name(), alua_ascq);
+ for (;;) {
+ struct se_cmd *cmd;
- transport_set_sense_codes(cmd, 0x04, alua_ascq);
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -EINVAL;
- }
- goto out_invalid_cdb_field;
- }
- /*
- * Check status for SPC-3 Persistent Reservations
- */
- if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return -EBUSY;
- }
- /*
- * This means the CDB is allowed for the SCSI Initiator port
- * when said port is *NOT* holding the legacy SPC-2 or
- * SPC-3 Persistent Reservation.
- */
- }
-
- /*
- * If we operate in passthrough mode we skip most CDB emulation and
- * instead hand the commands down to the physical SCSI device.
- */
- passthrough =
- (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV);
-
- switch (cdb[0]) {
- case READ_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_10:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case READ_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_6:
- sectors = transport_get_sectors_6(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_21(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_10:
- case WRITE_VERIFY:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_12:
- sectors = transport_get_sectors_12(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case WRITE_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_64(cdb);
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- break;
- case XDWRITEREAD_10:
- if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->se_cmd_flags & SCF_BIDI))
- goto out_invalid_cdb_field;
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- cmd->t_task_lba = transport_lba_32(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run after I/O completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case VARIABLE_LENGTH_CMD:
- service_action = get_unaligned_be16(&cdb[8]);
- switch (service_action) {
- case XDWRITEREAD_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- /*
- * Use WRITE_32 and READ_32 opcodes for the emulated
- * XDWRITE_READ_32 logic.
- */
- cmd->t_task_lba = transport_lba_64_ext(cdb);
- cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
-
- /*
- * Do now allow BIDI commands for passthrough mode.
- */
- if (passthrough)
- goto out_unsupported_cdb;
-
- /*
- * Setup BIDI XOR callback to be run during after I/O
- * completion.
- */
- cmd->transport_complete_callback = &transport_xor_callback;
- if (cdb[1] & 0x8)
- cmd->se_cmd_flags |= SCF_FUA;
- break;
- case WRITE_SAME_32:
- sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
- " supported\n");
- goto out_invalid_cdb_field;
- }
-
- cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[10], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- default:
- pr_err("VARIABLE_LENGTH_CMD service action"
- " 0x%04x not supported\n", service_action);
- goto out_unsupported_cdb;
- }
- break;
- case MAINTENANCE_IN:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_IN from SCC-2 */
- /*
- * Check for emulated MI_REPORT_TARGET_PGS.
- */
- if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_cmd =
- target_emulate_report_target_port_groups;
- }
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_SEND_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SELECT_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MODE_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_modesense;
- break;
- case MODE_SENSE_10:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_modesense;
- break;
- case GPCMD_READ_BUFFER_CAPACITY:
- case GPCMD_SEND_OPC:
- case LOG_SELECT:
- case LOG_SENSE:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_BLOCK_LIMITS:
- size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_GET_CONFIGURATION:
- case GPCMD_READ_FORMAT_CAPACITIES:
- case GPCMD_READ_DISC_INFO:
- case GPCMD_READ_TRACK_RZONE_INFO:
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_IN:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_in;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case PERSISTENT_RESERVE_OUT:
- if (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS)
- cmd->execute_cmd = target_scsi3_emulate_pr_out;
- size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GPCMD_MECHANISM_STATUS:
- case GPCMD_READ_DVD_STRUCTURE:
- size = (cdb[8] << 8) + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_POSITION:
- size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case MAINTENANCE_OUT:
- if (dev->transport->get_device_type(dev) != TYPE_ROM) {
- /* MAINTENANCE_OUT from SCC-2
- *
- * Check for emulated MO_SET_TARGET_PGS.
- */
- if (cdb[1] == MO_SET_TARGET_PGS &&
- su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
- cmd->execute_cmd =
- target_emulate_set_target_port_groups;
- }
-
- size = (cdb[6] << 24) | (cdb[7] << 16) |
- (cdb[8] << 8) | cdb[9];
- } else {
- /* GPCMD_REPORT_KEY from multi media commands */
- size = (cdb[8] << 8) + cdb[9];
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case INQUIRY:
- size = (cdb[3] << 8) + cdb[4];
- /*
- * Do implict HEAD_OF_QUEUE processing for INQUIRY.
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_inquiry;
- break;
- case READ_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case READ_CAPACITY:
- size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_readcapacity;
- break;
- case READ_MEDIA_SERIAL_NUMBER:
- case SECURITY_PROTOCOL_IN:
- case SECURITY_PROTOCOL_OUT:
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case SERVICE_ACTION_IN:
- switch (cmd->t_task_cdb[1] & 0x1f) {
- case SAI_READ_CAPACITY_16:
- if (!passthrough)
- cmd->execute_cmd =
- target_emulate_readcapacity_16;
- break;
- default:
- if (passthrough)
- break;
-
- pr_err("Unsupported SA: 0x%02x\n",
- cmd->t_task_cdb[1] & 0x1f);
- goto out_invalid_cdb_field;
- }
- /*FALLTHROUGH*/
- case ACCESS_CONTROL_IN:
- case ACCESS_CONTROL_OUT:
- case EXTENDED_COPY:
- case READ_ATTRIBUTE:
- case RECEIVE_COPY_RESULTS:
- case WRITE_ATTRIBUTE:
- size = (cdb[10] << 24) | (cdb[11] << 16) |
- (cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RECEIVE_DIAGNOSTIC:
- case SEND_DIAGNOSTIC:
- size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
-#if 0
- case GPCMD_READ_CD:
- sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
-#endif
- case READ_TOC:
- size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case REQUEST_SENSE:
- size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_request_sense;
- break;
- case READ_ELEMENT_STATUS:
- size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case WRITE_BUFFER:
- size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case RESERVE:
- case RESERVE_10:
- /*
- * The SPC-2 RESERVE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RESERVE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
-
- /*
- * Setup the legacy emulated handler for SPC-2 and
- * >= SPC-3 compatible reservation handling (CRH=1)
- * Otherwise, we assume the underlying SCSI logic is
- * is running in SPC_PASSTHROUGH, and wants reservations
- * emulation disabled.
- */
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_reserve;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case RELEASE:
- case RELEASE_10:
- /*
- * The SPC-2 RELEASE does not contain a size in the SCSI CDB.
- * Assume the passthrough or $FABRIC_MOD will tell us about it.
- */
- if (cdb[0] == RELEASE_10)
- size = (cdb[7] << 8) | cdb[8];
- else
- size = cmd->data_length;
-
- if (su_dev->t10_pr.res_type != SPC_PASSTHROUGH)
- cmd->execute_cmd = target_scsi2_reservation_release;
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case SYNCHRONIZE_CACHE:
- case SYNCHRONIZE_CACHE_16:
- /*
- * Extract LBA and range to be flushed for emulated SYNCHRONIZE_CACHE
- */
- if (cdb[0] == SYNCHRONIZE_CACHE) {
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_32(cdb);
- } else {
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- cmd->t_task_lba = transport_lba_64(cdb);
- }
- if (sector_ret)
- goto out_unsupported_cdb;
-
- size = transport_get_size(sectors, cdb, cmd);
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
-
- if (passthrough)
+ spin_lock(&dev->delayed_cmd_lock);
+ if (list_empty(&dev->delayed_cmd_list)) {
+ spin_unlock(&dev->delayed_cmd_lock);
break;
-
- /*
- * Check to ensure that LBA + Range does not exceed past end of
- * device for IBLOCK and FILEIO ->do_sync_cache() backend calls
- */
- if ((cmd->t_task_lba != 0) || (sectors != 0)) {
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- goto out_invalid_cdb_field;
- }
- cmd->execute_cmd = target_emulate_synchronize_cache;
- break;
- case UNMAP:
- size = get_unaligned_be16(&cdb[7]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_unmap;
- break;
- case WRITE_SAME_16:
- sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
-
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
}
- cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
-
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- case WRITE_SAME:
- sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- if (sector_ret)
- goto out_unsupported_cdb;
+ cmd = list_entry(dev->delayed_cmd_list.next,
+ struct se_cmd, se_delayed_node);
+ list_del(&cmd->se_delayed_node);
+ spin_unlock(&dev->delayed_cmd_lock);
- if (sectors)
- size = transport_get_size(1, cdb, cmd);
- else {
- pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
- goto out_invalid_cdb_field;
- }
+ __target_execute_cmd(cmd);
- cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- /*
- * Follow sbcr26 with WRITE_SAME (10) and check for the existence
- * of byte 1 bit 3 UNMAP instead of original reserved field
- */
- if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_unsupported_cdb;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_write_same;
- break;
- case ALLOW_MEDIUM_REMOVAL:
- case ERASE:
- case REZERO_UNIT:
- case SEEK_10:
- case SPACE:
- case START_STOP:
- case TEST_UNIT_READY:
- case VERIFY:
- case WRITE_FILEMARKS:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- if (!passthrough)
- cmd->execute_cmd = target_emulate_noop;
- break;
- case GPCMD_CLOSE_TRACK:
- case INITIALIZE_ELEMENT_STATUS:
- case GPCMD_LOAD_UNLOAD:
- case GPCMD_SET_SPEED:
- case MOVE_MEDIUM:
- cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
- break;
- case REPORT_LUNS:
- cmd->execute_cmd = target_report_luns;
- size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- /*
- * Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
- * See spc4r17 section 5.3
- */
- if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
- cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case GET_EVENT_STATUS_NOTIFICATION:
- size = (cdb[7] << 8) | cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- case ATA_16:
- /* Only support ATA passthrough to pSCSI backends.. */
- if (!passthrough)
- goto out_unsupported_cdb;
-
- /* T_LENGTH */
- switch (cdb[2] & 0x3) {
- case 0x0:
- sectors = 0;
- break;
- case 0x1:
- sectors = (((cdb[1] & 0x1) ? cdb[3] : 0) << 8) | cdb[4];
- break;
- case 0x2:
- sectors = (((cdb[1] & 0x1) ? cdb[5] : 0) << 8) | cdb[6];
+ if (cmd->sam_task_attr == MSG_ORDERED_TAG)
break;
- case 0x3:
- pr_err("T_LENGTH=0x3 not supported for ATA_16\n");
- goto out_invalid_cdb_field;
- }
-
- /* BYTE_BLOCK */
- if (cdb[2] & 0x4) {
- /* BLOCK T_TYPE: 512 or sector */
- size = sectors * ((cdb[2] & 0x10) ?
- dev->se_sub_dev->se_dev_attrib.block_size : 512);
- } else {
- /* BYTE */
- size = sectors;
- }
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
- break;
- default:
- pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
- " 0x%02x, sending CHECK_CONDITION.\n",
- cmd->se_tfo->get_fabric_name(), cdb[0]);
- goto out_unsupported_cdb;
- }
-
- if (cmd->unknown_data_length)
- cmd->data_length = size;
-
- if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
- " %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
- cmd->data_length, size, cdb[0]);
-
- cmd->cmd_spdtl = size;
-
- if (cmd->data_direction == DMA_TO_DEVICE) {
- pr_err("Rejecting underflow/overflow"
- " WRITE data\n");
- goto out_invalid_cdb_field;
- }
- /*
- * Reject READ_* or WRITE_* with overflow/underflow for
- * type SCF_SCSI_DATA_SG_IO_CDB.
- */
- if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
- pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
- " CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", dev->transport->name);
- /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- goto out_invalid_cdb_field;
- }
-
- if (size > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = (size - cmd->data_length);
- } else {
- cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
- cmd->residual_count = (cmd->data_length - size);
- }
- cmd->data_length = size;
}
-
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- if (sectors > su_dev->se_dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.fabric_max_sectors);
- goto out_invalid_cdb_field;
- }
- if (sectors > su_dev->se_dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- su_dev->se_dev_attrib.hw_max_sectors);
- goto out_invalid_cdb_field;
- }
- }
-
- /* reject any command that we don't have a handler for */
- if (!(passthrough || cmd->execute_cmd ||
- (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
- goto out_unsupported_cdb;
-
- transport_set_supported_SAM_opcode(cmd);
- return ret;
-
-out_unsupported_cdb:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -EINVAL;
-out_invalid_cdb_field:
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
}
/*
@@ -3052,8 +1906,6 @@ out_invalid_cdb_field:
static void transport_complete_task_attr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
- struct se_cmd *cmd_p, *cmd_tmp;
- int new_active_tasks = 0;
if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
atomic_dec(&dev->simple_cmds);
@@ -3075,38 +1927,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
- /*
- * Process all commands up to the last received
- * ORDERED task attribute which requires another blocking
- * boundary
- */
- spin_lock(&dev->delayed_cmd_lock);
- list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_node) {
-
- list_del(&cmd_p->se_delayed_node);
- spin_unlock(&dev->delayed_cmd_lock);
-
- pr_debug("Calling add_tasks() for"
- " cmd_p: 0x%02x Task Attr: 0x%02x"
- " Dormant -> Active, se_ordered_id: %u\n",
- cmd_p->t_task_cdb[0],
- cmd_p->sam_task_attr, cmd_p->se_ordered_id);
- target_add_to_execute_list(cmd_p);
- new_active_tasks++;
-
- spin_lock(&dev->delayed_cmd_lock);
- if (cmd_p->sam_task_attr == MSG_ORDERED_TAG)
- break;
- }
- spin_unlock(&dev->delayed_cmd_lock);
- /*
- * If new tasks have become active, wake up the transport thread
- * to do the processing of the Active tasks.
- */
- if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+ target_restart_delayed_cmds(dev);
}
static void transport_complete_qf(struct se_cmd *cmd)
@@ -3365,31 +2187,27 @@ int transport_generic_map_mem_to_cmd(
if (!sgl || !sgl_count)
return 0;
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * Reject SCSI data overflow with map_mem_to_cmd() as incoming
- * scatterlists already have been set to follow what the fabric
- * passes for the original expected data transfer length.
- */
- if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
- pr_warn("Rejecting SCSI DATA overflow for fabric using"
- " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -EINVAL;
- }
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
- cmd->t_data_sg = sgl;
- cmd->t_data_nents = sgl_count;
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
- if (sgl_bidi && sgl_bidi_count) {
- cmd->t_bidi_data_sg = sgl_bidi;
- cmd->t_bidi_data_nents = sgl_bidi_count;
- }
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
}
-
+ cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
@@ -3461,7 +2279,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
cmd->t_data_nents = nents;
sg_init_table(cmd->t_data_sg, nents);
- zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+ zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_CDB ? 0 : __GFP_ZERO;
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -3492,7 +2310,6 @@ out:
*/
int transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = cmd->se_dev;
int ret = 0;
/*
@@ -3508,8 +2325,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
}
/* Workaround for handling zero-length control CDBs */
- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
- !cmd->data_length) {
+ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) {
spin_lock_irq(&cmd->t_state_lock);
cmd->t_state = TRANSPORT_COMPLETE;
cmd->transport_state |= CMD_T_ACTIVE;
@@ -3527,52 +2343,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
return 0;
}
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- struct se_dev_attrib *attr = &dev->se_sub_dev->se_dev_attrib;
-
- if (transport_cmd_get_valid_sectors(cmd) < 0)
- return -EINVAL;
-
- BUG_ON(cmd->data_length % attr->block_size);
- BUG_ON(DIV_ROUND_UP(cmd->data_length, attr->block_size) >
- attr->hw_max_sectors);
- }
-
atomic_inc(&cmd->t_fe_count);
/*
- * For WRITEs, let the fabric know its buffer is ready.
- *
- * The command will be added to the execution queue after its write
- * data has arrived.
+ * If this command is not a write we can execute it right here,
+ * for write buffers we need to notify the fabric driver first
+ * and let it call back once the write buffers are ready.
*/
- if (cmd->data_direction == DMA_TO_DEVICE) {
- target_add_to_state_list(cmd);
- return transport_generic_write_pending(cmd);
+ target_add_to_state_list(cmd);
+ if (cmd->data_direction != DMA_TO_DEVICE) {
+ target_execute_cmd(cmd);
+ return 0;
}
- /*
- * Everything else but a WRITE, add the command to the execution queue.
- */
- transport_execute_tasks(cmd);
- return 0;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ transport_cmd_check_stop(cmd, false);
+
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
+
+ if (ret < 0)
+ return ret;
+ return 1;
out_fail:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev);
+ return 0;
}
EXPORT_SYMBOL(transport_generic_new_cmd);
-/* transport_generic_process_write():
- *
- *
- */
-void transport_generic_process_write(struct se_cmd *cmd)
-{
- transport_execute_tasks(cmd);
-}
-EXPORT_SYMBOL(transport_generic_process_write);
-
static void transport_write_pending_qf(struct se_cmd *cmd)
{
int ret;
@@ -3585,43 +2394,6 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
}
}
-static int transport_generic_write_pending(struct se_cmd *cmd)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- /*
- * Clear the se_cmd for WRITE_PENDING status in order to set
- * CMD_T_ACTIVE so that transport_generic_handle_data can be called
- * from HW target mode interrupt code. This is safe to be called
- * with transport_off=1 before the cmd->se_tfo->write_pending
- * because the se_cmd->se_lun pointer is not being cleared.
- */
- transport_cmd_check_stop(cmd, 1, 0);
-
- /*
- * Call the fabric write_pending function here to let the
- * frontend know that WRITE buffers are ready.
- */
- ret = cmd->se_tfo->write_pending(cmd);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- else if (ret < 0)
- return ret;
-
- return 1;
-
-queue_full:
- pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
- cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
- transport_handle_queue_full(cmd, cmd->se_dev);
- return 0;
-}
-
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3648,10 +2420,11 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
* @se_cmd: command descriptor to add
* @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
- bool ack_kref)
+static int target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ bool ack_kref)
{
unsigned long flags;
+ int ret = 0;
kref_init(&se_cmd->cmd_kref);
/*
@@ -3665,11 +2438,17 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
}
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
+ if (se_sess->sess_tearing_down) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
+
+out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ return ret;
}
-EXPORT_SYMBOL(target_get_sess_cmd);
static void target_release_cmd_kref(struct kref *kref)
{
@@ -3704,28 +2483,27 @@ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_put_sess_cmd);
-/* target_splice_sess_cmd_list - Split active cmds into sess_wait_list
- * @se_sess: session to split
+/* target_sess_cmd_list_set_waiting - Flag all commands in
+ * sess_cmd_list to complete cmd_wait_comp. Set
+ * sess_tearing_down so no more commands are queued.
+ * @se_sess: session to flag
*/
-void target_splice_sess_cmd_list(struct se_session *se_sess)
+void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
{
struct se_cmd *se_cmd;
unsigned long flags;
- WARN_ON(!list_empty(&se_sess->sess_wait_list));
- INIT_LIST_HEAD(&se_sess->sess_wait_list);
-
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
- se_sess->sess_tearing_down = 1;
- list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
+ WARN_ON(se_sess->sess_tearing_down);
+ se_sess->sess_tearing_down = 1;
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list)
se_cmd->cmd_wait_set = 1;
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
-EXPORT_SYMBOL(target_splice_sess_cmd_list);
+EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
/* target_wait_for_sess_cmds - Wait for outstanding descriptors
* @se_sess: session to wait for active I/O
@@ -3739,7 +2517,7 @@ void target_wait_for_sess_cmds(
bool rc = false;
list_for_each_entry_safe(se_cmd, tmp_cmd,
- &se_sess->sess_wait_list, se_cmd_list) {
+ &se_sess->sess_cmd_list, se_cmd_list) {
list_del(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
@@ -3791,26 +2569,20 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
cmd->se_tfo->get_task_tag(cmd));
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- transport_cmd_check_stop(cmd, 1, 0);
+ transport_cmd_check_stop(cmd, false);
return -EPERM;
}
cmd->transport_state |= CMD_T_LUN_FE_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
// XXX: audit task_flags checks.
spin_lock_irqsave(&cmd->t_state_lock, flags);
if ((cmd->transport_state & CMD_T_BUSY) &&
(cmd->transport_state & CMD_T_SENT)) {
if (!target_stop_cmd(cmd, &flags))
ret++;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- } else {
- spin_unlock_irqrestore(&cmd->t_state_lock,
- flags);
- target_remove_from_execute_list(cmd);
}
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
pr_debug("ConfigFS: cmd: %p stop tasks ret:"
" %d\n", cmd, ret);
@@ -3821,7 +2593,6 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd);
return 0;
}
@@ -3840,11 +2611,6 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
struct se_cmd, se_lun_node);
list_del_init(&cmd->se_lun_node);
- /*
- * This will notify iscsi_target_transport.c:
- * transport_cmd_check_stop() that a LUN shutdown is in
- * progress for the iscsi_cmd_t.
- */
spin_lock(&cmd->t_state_lock);
pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n",
@@ -3911,7 +2677,7 @@ check_cond:
spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
- transport_cmd_check_stop(cmd, 1, 0);
+ transport_cmd_check_stop(cmd, false);
complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
@@ -3967,10 +2733,7 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return false;
}
- /*
- * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE
- * has been set in transport_set_supported_SAM_opcode().
- */
+
if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -4028,8 +2791,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
-
wait_for_completion(&cmd->t_transport_stop_comp);
spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -4212,6 +2973,15 @@ int transport_send_check_condition_and_sense(
/* WRITE PROTECTED */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
break;
+ case TCM_ADDRESS_OUT_OF_RANGE:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
+ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
+ break;
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
@@ -4312,8 +3082,9 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd);
}
-static int transport_generic_do_tmr(struct se_cmd *cmd)
+static void target_tmr_work(struct work_struct *work)
{
+ struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
@@ -4349,80 +3120,13 @@ static int transport_generic_do_tmr(struct se_cmd *cmd)
cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop_to_fabric(cmd);
- return 0;
}
-/* transport_processing_thread():
- *
- *
- */
-static int transport_processing_thread(void *param)
+int transport_generic_handle_tmr(
+ struct se_cmd *cmd)
{
- int ret;
- struct se_cmd *cmd;
- struct se_device *dev = param;
-
- while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
- atomic_read(&dev->dev_queue_obj.queue_cnt) ||
- kthread_should_stop());
- if (ret < 0)
- goto out;
-
-get_cmd:
- cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
- if (!cmd)
- continue;
-
- switch (cmd->t_state) {
- case TRANSPORT_NEW_CMD:
- BUG();
- break;
- case TRANSPORT_NEW_CMD_MAP:
- if (!cmd->se_tfo->new_cmd_map) {
- pr_err("cmd->se_tfo->new_cmd_map is"
- " NULL for TRANSPORT_NEW_CMD_MAP\n");
- BUG();
- }
- ret = cmd->se_tfo->new_cmd_map(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- transport_generic_request_failure(cmd);
- break;
- }
- break;
- case TRANSPORT_PROCESS_WRITE:
- transport_generic_process_write(cmd);
- break;
- case TRANSPORT_PROCESS_TMR:
- transport_generic_do_tmr(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_WP:
- transport_write_pending_qf(cmd);
- break;
- case TRANSPORT_COMPLETE_QF_OK:
- transport_complete_qf(cmd);
- break;
- default:
- pr_err("Unknown t_state: %d for ITT: 0x%08x "
- "i_state: %d on SE LUN: %u\n",
- cmd->t_state,
- cmd->se_tfo->get_task_tag(cmd),
- cmd->se_tfo->get_cmd_state(cmd),
- cmd->se_lun->unpacked_lun);
- BUG();
- }
-
- goto get_cmd;
- }
-
-out:
- WARN_ON(!list_empty(&dev->state_list));
- WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));
- dev->process_thread = NULL;
+ INIT_WORK(&cmd->work, target_tmr_work);
+ queue_work(cmd->se_dev->tmr_wq, &cmd->work);
return 0;
}
+EXPORT_SYMBOL(transport_generic_handle_tmr);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index f03fb9730f5b..b9cb5006177e 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -215,7 +215,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
*/
if ((ep->xid <= lport->lro_xid) &&
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
- if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) &&
+ if ((se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
lport->tt.ddp_target(lport, ep->xid,
se_cmd->t_data_sg,
se_cmd->t_data_nents))
@@ -230,6 +230,8 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
+ if (cmd->aborted)
+ return ~0;
return fc_seq_exch(cmd->seq)->rxid;
}
@@ -541,9 +543,11 @@ static void ft_send_work(struct work_struct *work)
* Use a single se_cmd->cmd_kref as we expect to release se_cmd
* directly from ft_check_stop_free callback in response path.
*/
- target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
- &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
- ntohl(fcp->fc_dl), task_attr, data_dir, 0);
+ if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
+ &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
+ ntohl(fcp->fc_dl), task_attr, data_dir, 0))
+ goto err;
+
pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 071a505f98fc..ad36ede1a1ea 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -183,6 +183,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
return ft_queue_status(se_cmd);
}
+static void ft_execute_work(struct work_struct *work)
+{
+ struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
+
+ target_execute_cmd(&cmd->se_cmd);
+}
+
/*
* Receive write data frame.
*/
@@ -307,8 +314,10 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
cmd->write_data_len += tlen;
}
last_frame:
- if (cmd->write_data_len == se_cmd->data_length)
- transport_generic_handle_data(se_cmd);
+ if (cmd->write_data_len == se_cmd->data_length) {
+ INIT_WORK(&cmd->work, ft_execute_work);
+ queue_work(cmd->sess->tport->tpg->workqueue, &cmd->work);
+ }
drop:
fc_frame_free(fp);
}
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index cb99da920068..87901fa74dd7 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -58,7 +58,8 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
struct ft_tport *tport;
int i;
- tport = rcu_dereference(lport->prov[FC_TYPE_FCP]);
+ tport = rcu_dereference_protected(lport->prov[FC_TYPE_FCP],
+ lockdep_is_held(&ft_lport_lock));
if (tport && tport->tpg)
return tport;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index ced26c8ccd57..0d2ea0c224c3 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -401,7 +401,7 @@ out:
}
#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL_RAW
-void __init udbg_init_debug_opal(void)
+void __init udbg_init_debug_opal_raw(void)
{
u32 index = CONFIG_PPC_EARLY_DEBUG_OPAL_VTERMNO;
hvc_opal_privs[index] = &hvc_opal_boot_priv;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 944eaeb8e0cf..1e456dca4f60 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -209,11 +209,10 @@ static int xen_hvm_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
if (!info)
return -ENOMEM;
- }
-
- /* already configured */
- if (info->intf != NULL)
+ } else if (info->intf != NULL) {
+ /* already configured */
return 0;
+ }
/*
* If the toolstack (or the hypervisor) hasn't set these values, the
* default value is 0. Even though mfn = 0 and evtchn = 0 are
@@ -259,12 +258,10 @@ static int xen_pv_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
if (!info)
return -ENOMEM;
- }
-
- /* already configured */
- if (info->intf != NULL)
+ } else if (info->intf != NULL) {
+ /* already configured */
return 0;
-
+ }
info->evtchn = xen_start_info->console.domU.evtchn;
info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
info->vtermno = HVC_COOKIE;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 4ef747307ecb..d5c689d6217e 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -169,7 +169,6 @@
#define SERIAL_IMX_MAJOR 207
#define MINOR_START 16
#define DEV_NAME "ttymxc"
-#define MAX_INTERNAL_IRQ MXC_INTERNAL_IRQS
/*
* This determines how often we check the modem status signals
@@ -741,10 +740,7 @@ static int imx_startup(struct uart_port *port)
/* do not use RTS IRQ on IrDA */
if (!USE_IRDA(sport)) {
- retval = request_irq(sport->rtsirq, imx_rtsint,
- (sport->rtsirq < MAX_INTERNAL_IRQ) ? 0 :
- IRQF_TRIGGER_FALLING |
- IRQF_TRIGGER_RISING,
+ retval = request_irq(sport->rtsirq, imx_rtsint, 0,
DRIVER_NAME, sport);
if (retval)
goto error_out3;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index ec56d8397aae..2e341b81ff89 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/of_device.h>
#include <asm/cacheflush.h>
@@ -675,6 +676,30 @@ static struct uart_driver auart_driver = {
#endif
};
+/*
+ * This function returns 1 if pdev isn't a device instatiated by dt, 0 if it
+ * could successfully get all information from dt or a negative errno.
+ */
+static int serial_mxs_probe_dt(struct mxs_auart_port *s,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ if (!np)
+ /* no device tree device */
+ return 1;
+
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id: %d\n", ret);
+ return ret;
+ }
+ s->port.line = ret;
+
+ return 0;
+}
+
static int __devinit mxs_auart_probe(struct platform_device *pdev)
{
struct mxs_auart_port *s;
@@ -689,6 +714,12 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
goto out;
}
+ ret = serial_mxs_probe_dt(s, pdev);
+ if (ret > 0)
+ s->port.line = pdev->id < 0 ? 0 : pdev->id;
+ else if (ret < 0)
+ goto out_free;
+
pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
if (IS_ERR(pinctrl)) {
ret = PTR_ERR(pinctrl);
@@ -711,7 +742,6 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
s->port.membase = ioremap(r->start, resource_size(r));
s->port.ops = &mxs_auart_ops;
s->port.iotype = UPIO_MEM;
- s->port.line = pdev->id < 0 ? 0 : pdev->id;
s->port.fifosize = 16;
s->port.uartclk = clk_get_rate(s->clk);
s->port.type = PORT_IMX;
@@ -728,7 +758,7 @@ static int __devinit mxs_auart_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, s);
- auart_port[pdev->id] = s;
+ auart_port[s->port.line] = s;
mxs_auart_reset(&s->port);
@@ -769,12 +799,19 @@ static int __devexit mxs_auart_remove(struct platform_device *pdev)
return 0;
}
+static struct of_device_id mxs_auart_dt_ids[] = {
+ { .compatible = "fsl,imx23-auart", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_auart_dt_ids);
+
static struct platform_driver mxs_auart_driver = {
.probe = mxs_auart_probe,
.remove = __devexit_p(mxs_auart_remove),
.driver = {
.name = "mxs-auart",
.owner = THIS_MODULE,
+ .of_match_table = mxs_auart_dt_ids,
},
};
@@ -807,3 +844,4 @@ module_init(mxs_auart_init);
module_exit(mxs_auart_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Freescale MXS application uart driver");
+MODULE_ALIAS("platform:mxs-auart");
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 1bd9163bc118..d4d8c9453cd8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1615,9 +1615,9 @@ static bool filter(struct dma_chan *chan, void *slave)
struct sh_dmae_slave *param = slave;
dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
- param->slave_id);
+ param->shdma_slave.slave_id);
- chan->private = param;
+ chan->private = &param->shdma_slave;
return true;
}
@@ -1656,7 +1656,7 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_tx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
- param->slave_id = s->cfg->dma_slave_tx;
+ param->shdma_slave.slave_id = s->cfg->dma_slave_tx;
s->cookie_tx = -EINVAL;
chan = dma_request_channel(mask, filter, param);
@@ -1684,7 +1684,7 @@ static void sci_request_dma(struct uart_port *port)
param = &s->param_rx;
/* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
- param->slave_id = s->cfg->dma_slave_rx;
+ param->shdma_slave.slave_id = s->cfg->dma_slave_rx;
chan = dma_request_channel(mask, filter, param);
dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
diff --git a/drivers/usb/atm/xusbatm.c b/drivers/usb/atm/xusbatm.c
index 14ec9f0c5924..b3b1bb78b2ef 100644
--- a/drivers/usb/atm/xusbatm.c
+++ b/drivers/usb/atm/xusbatm.c
@@ -20,7 +20,7 @@
******************************************************************************/
#include <linux/module.h>
-#include <linux/etherdevice.h> /* for random_ether_addr() */
+#include <linux/etherdevice.h> /* for eth_random_addr() */
#include "usbatm.h"
@@ -163,7 +163,7 @@ static int xusbatm_atm_start(struct usbatm_data *usbatm,
atm_dbg(usbatm, "%s entered\n", __func__);
/* use random MAC as we've no way to get it from the device */
- random_ether_addr(atm_dev->esi);
+ eth_random_addr(atm_dev->esi);
return 0;
}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 8fd398dffced..ee469274a3fe 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -500,6 +500,8 @@ retry:
goto retry;
}
if (!desc->reslength) { /* zero length read */
+ dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__);
+ clear_bit(WDM_READ, &desc->flags);
spin_unlock_irq(&desc->iuspin);
goto retry;
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 25a7422ee657..8fb484984c86 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2324,12 +2324,16 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay, bool warm);
-/* Is a USB 3.0 port in the Inactive state? */
-static bool hub_port_inactive(struct usb_hub *hub, u16 portstatus)
+/* Is a USB 3.0 port in the Inactive or Complinance Mode state?
+ * Port worm reset is required to recover
+ */
+static bool hub_port_warm_reset_required(struct usb_hub *hub, u16 portstatus)
{
return hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE) ==
- USB_SS_PORT_LS_SS_INACTIVE;
+ (((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_SS_INACTIVE) ||
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_COMP_MOD)) ;
}
static int hub_port_wait_reset(struct usb_hub *hub, int port1,
@@ -2365,7 +2369,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
*
* See https://bugzilla.kernel.org/show_bug.cgi?id=41752
*/
- if (hub_port_inactive(hub, portstatus)) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
int ret;
if ((portchange & USB_PORT_STAT_C_CONNECTION))
@@ -4408,9 +4412,7 @@ static void hub_events(void)
/* Warm reset a USB3 protocol port if it's in
* SS.Inactive state.
*/
- if (hub_is_superspeed(hub->hdev) &&
- (portstatus & USB_PORT_STAT_LINK_STATE)
- == USB_SS_PORT_LS_SS_INACTIVE) {
+ if (hub_port_warm_reset_required(hub, portstatus)) {
dev_dbg(hub_dev, "warm reset port %d\n", i);
hub_port_reset(hub, i, NULL,
HUB_BH_RESET_TIME, true);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index bddc8fd9a7be..271ca161d7ef 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -185,7 +185,7 @@ config USB_FUSB300
config USB_OMAP
tristate "OMAP USB Device Controller"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP1
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
select USB_OTG_UTILS if ARCH_OMAP
help
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index a460e8c204f4..89cbd2b22ab0 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -44,7 +44,8 @@
#include <asm/mach-types.h>
#include <plat/dma.h>
-#include <plat/usb.h>
+
+#include <mach/usb.h>
#include "omap_udc.h"
diff --git a/drivers/usb/gadget/tcm_usb_gadget.c b/drivers/usb/gadget/tcm_usb_gadget.c
index c46439c8dd74..5444866e13ef 100644
--- a/drivers/usb/gadget/tcm_usb_gadget.c
+++ b/drivers/usb/gadget/tcm_usb_gadget.c
@@ -294,7 +294,7 @@ static int bot_send_write_request(struct usbg_cmd *cmd)
pr_err("%s(%d)\n", __func__, __LINE__);
wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -725,7 +725,7 @@ static int uasp_send_write_request(struct usbg_cmd *cmd)
}
wait_for_completion(&cmd->write_complete);
- transport_generic_process_write(se_cmd);
+ target_execute_cmd(se_cmd);
cleanup:
return ret;
}
@@ -1065,16 +1065,20 @@ static void usbg_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
+ goto out;
}
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE);
+ 0, cmd->prio_attr, dir, TARGET_SCF_UNKNOWN_SIZE) < 0)
+ goto out;
+
+ return;
+
+out:
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
}
static int usbg_submit_command(struct f_uas *fu,
@@ -1177,16 +1181,20 @@ static void bot_cmd_work(struct work_struct *work)
tv_nexus->tvn_se_sess->se_tpg->se_tpg_tfo,
tv_nexus->tvn_se_sess, cmd->data_len, DMA_NONE,
cmd->prio_attr, cmd->sense_iu.sense);
-
- transport_send_check_condition_and_sense(se_cmd,
- TCM_UNSUPPORTED_SCSI_OPCODE, 1);
- usbg_cleanup_cmd(cmd);
- return;
+ goto out;
}
- target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
+ if (target_submit_cmd(se_cmd, tv_nexus->tvn_se_sess,
cmd->cmd_buf, cmd->sense_iu.sense, cmd->unpacked_lun,
- cmd->data_len, cmd->prio_attr, dir, 0);
+ cmd->data_len, cmd->prio_attr, dir, 0) < 0)
+ goto out;
+
+ return;
+
+out:
+ transport_send_check_condition_and_sense(se_cmd,
+ TCM_UNSUPPORTED_SCSI_OPCODE, 1);
+ usbg_cleanup_cmd(cmd);
}
static int bot_submit_command(struct f_uas *fu,
@@ -1400,19 +1408,6 @@ static u32 usbg_tpg_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static int usbg_new_cmd(struct se_cmd *se_cmd)
-{
- struct usbg_cmd *cmd = container_of(se_cmd, struct usbg_cmd,
- se_cmd);
- int ret;
-
- ret = target_setup_cmd_from_cdb(se_cmd, cmd->cmd_buf);
- if (ret)
- return ret;
-
- return transport_generic_map_mem_to_cmd(se_cmd, NULL, 0, NULL, 0);
-}
-
static void usbg_cmd_release(struct kref *ref)
{
struct usbg_cmd *cmd = container_of(ref, struct usbg_cmd,
@@ -1902,7 +1897,6 @@ static struct target_core_fabric_ops usbg_ops = {
.tpg_alloc_fabric_acl = usbg_alloc_fabric_acl,
.tpg_release_fabric_acl = usbg_release_fabric_acl,
.tpg_get_inst_index = usbg_tpg_get_inst_index,
- .new_cmd_map = usbg_new_cmd,
.release_cmd = usbg_release_cmd,
.shutdown_session = usbg_shutdown_session,
.close_session = usbg_close_session,
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 47cf48b51c9d..b9e1925b2df0 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -724,7 +724,7 @@ static int get_ether_addr(const char *str, u8 *dev_addr)
if (is_valid_ether_addr(dev_addr))
return 0;
}
- random_ether_addr(dev_addr);
+ eth_random_addr(dev_addr);
return 1;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 83e58df29fe3..dcfaaa91a3fb 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -308,7 +308,7 @@ config USB_OHCI_HCD
config USB_OHCI_HCD_OMAP1
bool "OHCI support for OMAP1/2 chips"
- depends on USB_OHCI_HCD && (ARCH_OMAP1 || ARCH_OMAP2)
+ depends on USB_OHCI_HCD && ARCH_OMAP1
default y
---help---
Enables support for the OHCI controller on OMAP1/2 chips.
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 800be38c78b4..1d9401e0990a 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1349,6 +1349,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_msm_driver
#endif
+#ifdef CONFIG_TILE_USB
+#include "ehci-tilegx.c"
+#define PLATFORM_DRIVER ehci_hcd_tilegx_driver
+#endif
+
#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP
#include "ehci-pmcmsp.c"
#define PLATFORM_DRIVER ehci_hcd_msp_driver
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 17cfb8a1131c..c30435499a02 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -281,14 +281,13 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
+ /* Hold PHYs in reset while initializing EHCI controller */
if (pdata->phy_reset) {
if (gpio_is_valid(pdata->reset_gpio_port[0]))
- gpio_request_one(pdata->reset_gpio_port[0],
- GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[0], 0);
if (gpio_is_valid(pdata->reset_gpio_port[1]))
- gpio_request_one(pdata->reset_gpio_port[1],
- GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+ gpio_set_value_cansleep(pdata->reset_gpio_port[1], 0);
/* Hold the PHY in RESET for enough time till DIR is high */
udelay(10);
@@ -330,6 +329,11 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
ehci_reset(omap_ehci);
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret) {
+ dev_err(dev, "failed to add hcd with err %d\n", ret);
+ goto err_add_hcd;
+ }
if (pdata->phy_reset) {
/* Hold the PHY in RESET for enough time till
@@ -344,12 +348,6 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
gpio_set_value_cansleep(pdata->reset_gpio_port[1], 1);
}
- ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (ret) {
- dev_err(dev, "failed to add hcd with err %d\n", ret);
- goto err_add_hcd;
- }
-
/* root ports should always stay powered */
ehci_port_power(omap_ehci, 1);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 68548236ec42..ab8a3bf628e3 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -46,8 +46,8 @@ static void tegra_ehci_power_up(struct usb_hcd *hcd)
{
struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller);
- clk_enable(tegra->emc_clk);
- clk_enable(tegra->clk);
+ clk_prepare_enable(tegra->emc_clk);
+ clk_prepare_enable(tegra->clk);
tegra_usb_phy_power_on(tegra->phy);
tegra->host_resumed = 1;
}
@@ -58,8 +58,8 @@ static void tegra_ehci_power_down(struct usb_hcd *hcd)
tegra->host_resumed = 0;
tegra_usb_phy_power_off(tegra->phy);
- clk_disable(tegra->clk);
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->clk);
+ clk_disable_unprepare(tegra->emc_clk);
}
static int tegra_ehci_internal_port_reset(
@@ -671,7 +671,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_clk;
}
- err = clk_enable(tegra->clk);
+ err = clk_prepare_enable(tegra->clk);
if (err)
goto fail_clken;
@@ -682,7 +682,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_emc_clk;
}
- clk_enable(tegra->emc_clk);
+ clk_prepare_enable(tegra->emc_clk);
clk_set_rate(tegra->emc_clk, 400000000);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -782,10 +782,10 @@ fail:
fail_phy:
iounmap(hcd->regs);
fail_io:
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->emc_clk);
clk_put(tegra->emc_clk);
fail_emc_clk:
- clk_disable(tegra->clk);
+ clk_disable_unprepare(tegra->clk);
fail_clken:
clk_put(tegra->clk);
fail_clk:
@@ -820,10 +820,10 @@ static int tegra_ehci_remove(struct platform_device *pdev)
tegra_usb_phy_close(tegra->phy);
iounmap(hcd->regs);
- clk_disable(tegra->clk);
+ clk_disable_unprepare(tegra->clk);
clk_put(tegra->clk);
- clk_disable(tegra->emc_clk);
+ clk_disable_unprepare(tegra->emc_clk);
clk_put(tegra->emc_clk);
kfree(tegra);
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
new file mode 100644
index 000000000000..1d215cdb9dea
--- /dev/null
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB EHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ehc(void)
+{
+}
+
+static void tilegx_stop_ehc(void)
+{
+}
+
+static int tilegx_ehci_setup(struct usb_hcd *hcd)
+{
+ int ret = ehci_init(hcd);
+
+ /*
+ * Some drivers do:
+ *
+ * struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ * ehci->need_io_watchdog = 0;
+ *
+ * here, but since this is a new driver we're going to leave the
+ * watchdog enabled. Later we may try to turn it off and see
+ * whether we run into any problems.
+ */
+
+ return ret;
+}
+
+static const struct hc_driver ehci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .reset = tilegx_ehci_setup,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+ .relinquish_port = ehci_relinquish_port,
+ .port_handed_over = ehci_port_handed_over,
+
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+};
+
+static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 1) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ehc();
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = hcd->regs;
+ ehci->regs =
+ hcd->regs + HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
+ /* cache this readonly data; minimize chip reads */
+ ehci->hcs_params = readl(&ehci->caps->hcs_params);
+
+ /* Create our IRQs and register them. */
+ pdata->irq = create_irq();
+ if (pdata->irq < 0) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ return ret;
+ }
+
+err_have_irq:
+ destroy_irq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ehc();
+ usb_put_hcd(hcd);
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ehc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ destroy_irq(pdata->irq);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void ehci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ehci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ehci_hcd_tilegx_driver = {
+ .probe = ehci_hcd_tilegx_drv_probe,
+ .remove = ehci_hcd_tilegx_drv_remove,
+ .shutdown = ehci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ehci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ehci");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index e0adf5c0cf55..2b1e8d84c873 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1100,6 +1100,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_octeon_driver
#endif
+#ifdef CONFIG_TILE_USB
+#include "ohci-tilegx.c"
+#define PLATFORM_DRIVER ohci_hcd_tilegx_driver
+#endif
+
#ifdef CONFIG_USB_CNS3XXX_OHCI
#include "ohci-cns3xxx.c"
#define PLATFORM_DRIVER ohci_hcd_cns3xxx_driver
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 9ce35d0d9d5d..b02c344e2cc9 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -20,14 +20,15 @@
#include <linux/clk.h>
#include <linux/gpio.h>
-#include <mach/hardware.h>
#include <asm/io.h>
#include <asm/mach-types.h>
#include <plat/mux.h>
-#include <mach/irqs.h>
#include <plat/fpga.h>
-#include <plat/usb.h>
+
+#include <mach/hardware.h>
+#include <mach/irqs.h>
+#include <mach/usb.h>
/* OMAP-1510 OHCI has its own MMU for DMA */
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
new file mode 100644
index 000000000000..1ae7b28a71c2
--- /dev/null
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ */
+
+/*
+ * Tilera TILE-Gx USB OHCI host controller driver.
+ */
+
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/usb/tilegx.h>
+#include <linux/usb.h>
+
+#include <asm/homecache.h>
+
+#include <gxio/iorpc_usb_host.h>
+#include <gxio/usb_host.h>
+
+static void tilegx_start_ohc(void)
+{
+}
+
+static void tilegx_stop_ohc(void)
+{
+}
+
+static int tilegx_ohci_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ dev_err(hcd->self.controller, "can't start %s\n",
+ hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver ohci_tilegx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Tile-Gx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * Generic hardware linkage.
+ */
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY | HCD_LOCAL_MEM | HCD_USB11,
+
+ /*
+ * Basic lifecycle operations.
+ */
+ .start = tilegx_ohci_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * Managing I/O requests and associated device resources.
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * Scheduling support.
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * Root hub support.
+ */
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ pte_t pte = { 0 };
+ int my_cpu = smp_processor_id();
+ int ret;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ /*
+ * Try to initialize our GXIO context; if we can't, the device
+ * doesn't exist.
+ */
+ if (gxio_usb_host_init(&pdata->usb_ctx, pdata->dev_index, 0) != 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd)
+ return -ENOMEM;
+
+ /*
+ * We don't use rsrc_start to map in our registers, but seems like
+ * we ought to set it to something, so we use the register VA.
+ */
+ hcd->rsrc_start =
+ (ulong) gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+ hcd->rsrc_len = gxio_usb_host_get_reg_len(&pdata->usb_ctx);
+ hcd->regs = gxio_usb_host_get_reg_start(&pdata->usb_ctx);
+
+ tilegx_start_ohc();
+
+ /* Create our IRQs and register them. */
+ pdata->irq = create_irq();
+ if (pdata->irq < 0) {
+ ret = -ENXIO;
+ goto err_no_irq;
+ }
+
+ tile_irq_activate(pdata->irq, TILE_IRQ_PERCPU);
+
+ /* Configure interrupts. */
+ ret = gxio_usb_host_cfg_interrupt(&pdata->usb_ctx,
+ cpu_x(my_cpu), cpu_y(my_cpu),
+ KERNEL_PL, pdata->irq);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ /* Register all of our memory. */
+ pte = pte_set_home(pte, PAGE_HOME_HASH);
+ ret = gxio_usb_host_register_client_memory(&pdata->usb_ctx, pte, 0);
+ if (ret) {
+ ret = -ENXIO;
+ goto err_have_irq;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, pdata->irq, IRQF_SHARED);
+ if (ret == 0) {
+ platform_set_drvdata(pdev, hcd);
+ return ret;
+ }
+
+err_have_irq:
+ destroy_irq(pdata->irq);
+err_no_irq:
+ tilegx_stop_ohc();
+ usb_put_hcd(hcd);
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ return ret;
+}
+
+static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct tilegx_usb_platform_data* pdata = pdev->dev.platform_data;
+
+ usb_remove_hcd(hcd);
+ usb_put_hcd(hcd);
+ tilegx_stop_ohc();
+ gxio_usb_host_destroy(&pdata->usb_ctx);
+ destroy_irq(pdata->irq);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void ohci_hcd_tilegx_drv_shutdown(struct platform_device *pdev)
+{
+ usb_hcd_platform_shutdown(pdev);
+ ohci_hcd_tilegx_drv_remove(pdev);
+}
+
+static struct platform_driver ohci_hcd_tilegx_driver = {
+ .probe = ohci_hcd_tilegx_drv_probe,
+ .remove = ohci_hcd_tilegx_drv_remove,
+ .shutdown = ohci_hcd_tilegx_drv_shutdown,
+ .driver = {
+ .name = "tilegx-ohci",
+ .owner = THIS_MODULE,
+ }
+};
+
+MODULE_ALIAS("platform:tilegx-ohci");
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 2732ef660c5c..7b01094d7993 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -462,6 +462,42 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
}
}
+/* Updates Link Status for super Speed port */
+static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+{
+ u32 pls = status_reg & PORT_PLS_MASK;
+
+ /* resume state is a xHCI internal state.
+ * Do not report it to usb core.
+ */
+ if (pls == XDEV_RESUME)
+ return;
+
+ /* When the CAS bit is set then warm reset
+ * should be performed on port
+ */
+ if (status_reg & PORT_CAS) {
+ /* The CAS bit can be set while the port is
+ * in any link state.
+ * Only roothubs have CAS bit, so we
+ * pretend to be in compliance mode
+ * unless we're already in compliance
+ * or the inactive state.
+ */
+ if (pls != USB_SS_PORT_LS_COMP_MOD &&
+ pls != USB_SS_PORT_LS_SS_INACTIVE) {
+ pls = USB_SS_PORT_LS_COMP_MOD;
+ }
+ /* Return also connection bit -
+ * hub state machine resets port
+ * when this bit is set.
+ */
+ pls |= USB_PORT_STAT_CONNECTION;
+ }
+ /* update status field */
+ *status |= pls;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -606,13 +642,9 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
else
status |= USB_PORT_STAT_POWER;
}
- /* Port Link State */
+ /* Update Port Link State for super speed ports*/
if (hcd->speed == HCD_USB3) {
- /* resume state is a xHCI internal state.
- * Do not report it to usb core.
- */
- if ((temp & PORT_PLS_MASK) != XDEV_RESUME)
- status |= (temp & PORT_PLS_MASK);
+ xhci_hub_report_link_state(&status, temp);
}
if (bus_state->port_c_suspend & (1 << wIndex))
status |= 1 << USB_PORT_FEAT_C_SUSPEND;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 23b4aefd1036..8275645889da 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -885,6 +885,17 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
num_trbs_free_temp = ep_ring->num_trbs_free;
dequeue_temp = ep_ring->dequeue;
+ /* If we get two back-to-back stalls, and the first stalled transfer
+ * ends just before a link TRB, the dequeue pointer will be left on
+ * the link TRB by the code in the while loop. So we have to update
+ * the dequeue pointer one segment further, or we'll jump off
+ * the segment into la-la-land.
+ */
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
+ }
+
while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
/* We have more usable TRBs */
ep_ring->num_trbs_free++;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index de3d6e3e57be..55c0785810c9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -341,7 +341,11 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
-/* bit 24 reserved */
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
diff --git a/drivers/usb/otg/isp1301_omap.c b/drivers/usb/otg/isp1301_omap.c
index 70cf5d7bca48..e0558dfcfafc 100644
--- a/drivers/usb/otg/isp1301_omap.c
+++ b/drivers/usb/otg/isp1301_omap.c
@@ -36,9 +36,9 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
-#include <plat/usb.h>
#include <plat/mux.h>
+#include <mach/usb.h>
#ifndef DEBUG
#undef VERBOSE
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index a165490bae48..8c9bb1ad3069 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -19,7 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
-#include "./common.h"
+#include "common.h"
/*
* image of renesas_usbhs
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 3f3ccd358753..dddf40a59ded 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -22,8 +22,8 @@
struct usbhs_priv;
-#include "./mod.h"
-#include "./pipe.h"
+#include "mod.h"
+#include "pipe.h"
/*
*
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 6ec7f838d7fa..30b757a3f59e 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -17,8 +17,8 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/scatterlist.h>
-#include "./common.h"
-#include "./pipe.h"
+#include "common.h"
+#include "pipe.h"
#define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo))
#define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo))
@@ -994,7 +994,7 @@ static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
*
* usbhs doesn't recognize id = 0 as valid DMA
*/
- if (0 == slave->slave_id)
+ if (0 == slave->shdma_slave.slave_id)
return false;
chan->private = slave;
@@ -1173,8 +1173,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->port = D0FIFO;
fifo->sel = D0FIFOSEL;
fifo->ctr = D0FIFOCTR;
- fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
- fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
+ fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id);
+ fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id);
/* D1FIFO */
fifo = usbhsf_get_d1fifo(priv);
@@ -1182,8 +1182,8 @@ int usbhs_fifo_probe(struct usbhs_priv *priv)
fifo->port = D1FIFO;
fifo->sel = D1FIFOSEL;
fifo->ctr = D1FIFOCTR;
- fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
- fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
+ fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id);
+ fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id);
return 0;
}
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index 0871e816df45..82a628f96c03 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -16,8 +16,8 @@
*/
#include <linux/interrupt.h>
-#include "./common.h"
-#include "./mod.h"
+#include "common.h"
+#include "mod.h"
#define usbhs_priv_to_modinfo(priv) (&priv->mod_info)
#define usbhs_mod_info_call(priv, func, param...) \
diff --git a/drivers/usb/renesas_usbhs/mod.h b/drivers/usb/renesas_usbhs/mod.h
index 6c6875533f01..1ef5bf604070 100644
--- a/drivers/usb/renesas_usbhs/mod.h
+++ b/drivers/usb/renesas_usbhs/mod.h
@@ -19,7 +19,7 @@
#include <linux/spinlock.h>
#include <linux/usb/renesas_usbhs.h>
-#include "./common.h"
+#include "common.h"
/*
* struct
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index feb06d6d2814..122526cfd32b 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -16,8 +16,8 @@
*/
#include <linux/delay.h>
#include <linux/slab.h>
-#include "./common.h"
-#include "./pipe.h"
+#include "common.h"
+#include "pipe.h"
/*
* macros
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index fa18b7dc2b2a..08786c06dcf1 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -17,8 +17,8 @@
#ifndef RENESAS_USB_PIPE_H
#define RENESAS_USB_PIPE_H
-#include "./common.h"
-#include "./fifo.h"
+#include "common.h"
+#include "fifo.h"
/*
* struct
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 81423f7361db..d47eb06fe463 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -222,14 +222,6 @@ static int metrousb_open(struct tty_struct *tty, struct usb_serial_port *port)
metro_priv->throttled = 0;
spin_unlock_irqrestore(&metro_priv->lock, flags);
- /*
- * Force low_latency on so that our tty_push actually forces the data
- * through, otherwise it is scheduled, and with high data rates (like
- * with OHCI) data can get lost.
- */
- if (tty)
- tty->low_latency = 1;
-
/* Clear the urb pipe. */
usb_clear_halt(serial->dev, port->interrupt_in_urb->pipe);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index adf8ce72be50..417ab1b0aa30 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -497,6 +497,15 @@ static void option_instat_callback(struct urb *urb);
/* MediaTek products */
#define MEDIATEK_VENDOR_ID 0x0e8d
+#define MEDIATEK_PRODUCT_DC_1COM 0x00a0
+#define MEDIATEK_PRODUCT_DC_4COM 0x00a5
+#define MEDIATEK_PRODUCT_DC_5COM 0x00a4
+#define MEDIATEK_PRODUCT_7208_1COM 0x7101
+#define MEDIATEK_PRODUCT_7208_2COM 0x7102
+#define MEDIATEK_PRODUCT_FP_1COM 0x0003
+#define MEDIATEK_PRODUCT_FP_2COM 0x0023
+#define MEDIATEK_PRODUCT_FPDC_1COM 0x0043
+#define MEDIATEK_PRODUCT_FPDC_2COM 0x0033
/* Cellient products */
#define CELLIENT_VENDOR_ID 0x2692
@@ -554,6 +563,10 @@ static const struct option_blacklist_info net_intf1_blacklist = {
.reserved = BIT(1),
};
+static const struct option_blacklist_info net_intf2_blacklist = {
+ .reserved = BIT(2),
+};
+
static const struct option_blacklist_info net_intf3_blacklist = {
.reserved = BIT(3),
};
@@ -1099,6 +1112,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
@@ -1240,6 +1255,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_5COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_1COM, 0x02, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7208_2COM, 0x02, 0x02, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FP_2COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_1COM, 0x0a, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_FPDC_2COM, 0x0a, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 11418da9bc09..a3d54366afcc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -236,6 +236,11 @@ static int slave_configure(struct scsi_device *sdev)
US_FL_SCM_MULT_TARG)) &&
us->protocol == USB_PR_BULK)
us->use_last_sector_hacks = 1;
+
+ /* Check if write cache default on flag is set or not */
+ if (us->fflags & US_FL_WRITE_CACHE)
+ sdev->wce_default_on = 1;
+
} else {
/* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1719886bb9be..62a31bea0634 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1267,6 +1267,12 @@ UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
0 ),
+/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
+UNUSUAL_DEV(0x0bc2, 0x2300, 0x0000, 0x9999,
+ "Seagate",
+ "Portable HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
@@ -1468,6 +1474,12 @@ UNUSUAL_DEV( 0x1058, 0x0704, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_SANE_SENSE),
+/* Reported by Namjae Jeon <namjae.jeon@samsung.com> */
+UNUSUAL_DEV(0x1058, 0x070a, 0x0000, 0x9999,
+ "Western Digital",
+ "My Passport HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_WRITE_CACHE),
+
/* Reported by Fabio Venturi <f.venturi@tdnet.it>
* The device reports a vendor-specific bDeviceClass.
*/
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index e23c30ab66da..d012fe4329e7 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -473,7 +473,7 @@ static void adjust_quirks(struct us_data *us)
US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
- US_FL_INITIAL_READ10);
+ US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE);
p = quirks;
while (*p) {
@@ -529,6 +529,9 @@ static void adjust_quirks(struct us_data *us)
case 'o':
f |= US_FL_CAPACITY_OK;
break;
+ case 'p':
+ f |= US_FL_WRITE_CACHE;
+ break;
case 'r':
f |= US_FL_IGNORE_RESIDUE;
break;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index f82a7394756e..072cbbadbc36 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -823,14 +823,14 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_net_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
- features = VHOST_FEATURES;
+ features = VHOST_NET_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_FEATURES)
+ if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_net_set_features(n, features);
case VHOST_RESET_OWNER:
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 3de00d9fae2e..91d6f060aade 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -261,14 +261,14 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return vhost_test_run(n, test);
case VHOST_GET_FEATURES:
- features = VHOST_FEATURES;
+ features = VHOST_NET_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
- if (features & ~VHOST_FEATURES)
+ if (features & ~VHOST_NET_FEATURES)
return -EOPNOTSUPP;
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 112156f68afb..ef82a0d18489 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -64,7 +64,7 @@ static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
return 0;
}
-static void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
INIT_LIST_HEAD(&work->node);
work->fn = fn;
@@ -137,8 +137,7 @@ void vhost_poll_flush(struct vhost_poll *poll)
vhost_work_flush(poll->dev, &poll->work);
}
-static inline void vhost_work_queue(struct vhost_dev *dev,
- struct vhost_work *work)
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned long flags;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 8de1fd5b8efb..1125af3d27d1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -43,6 +43,9 @@ struct vhost_poll {
struct vhost_dev *dev;
};
+void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
+void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
+
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev);
void vhost_poll_start(struct vhost_poll *poll, struct file *file);
@@ -201,7 +204,8 @@ enum {
VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) |
- (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VHOST_F_LOG_ALL),
+ VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
(1ULL << VIRTIO_NET_F_MRG_RXBUF),
};
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index c22e8d39a2cb..a1d58e9d3073 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -336,8 +336,8 @@ static void arcfb_lcd_update_horiz(struct arcfb_par *par, unsigned int left,
}
/*
- * here we start the process of spliting out the fb update into
- * individual blocks of pixels. we end up spliting into 64x64 blocks
+ * here we start the process of splitting out the fb update into
+ * individual blocks of pixels. we end up splitting into 64x64 blocks
* and finally down to 64x8 pages.
*/
static void arcfb_lcd_update(struct arcfb_par *par, unsigned int dx,
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index d99505b16374..15055395cd95 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -939,7 +939,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
* up a splash image.
*/
} else {
- /* alocate memory buffer */
+ /* allocate memory buffer */
ret = atmel_lcdfb_alloc_video_memory(sinfo);
if (ret < 0) {
dev_err(dev, "cannot allocate framebuffer: %d\n", ret);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 622f12b62a47..3f2e8c13f1ca 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -863,7 +863,7 @@ static int aty_var_to_crtc(const struct fb_info *info,
if ((xres > 1600) || (yres > 1200)) {
FAIL("MACH64 chips are designed for max 1600x1200\n"
- "select anoter resolution.");
+ "select another resolution.");
}
h_sync_strt = h_disp + var->right_margin;
h_sync_end = h_sync_strt + var->hsync_len;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index ce1506b75adf..9e279ee38da8 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -2018,7 +2018,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
if ((rinfo->family == CHIP_FAMILY_RS100) ||
(rinfo->family == CHIP_FAMILY_RS200)) {
/* This is to workaround the asic bug for RMX, some versions
- of BIOS dosen't have this register initialized correctly.
+ of BIOS doesn't have this register initialized correctly.
*/
OUTREGP(CRTC_MORE_CNTL, CRTC_H_CUTOFF_ACTIVE_EN,
~CRTC_H_CUTOFF_ACTIVE_EN);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index f49181c73113..f75da8758adc 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -228,6 +228,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
data->port = pdata->flags;
if (data->port < 0) {
dev_err(&pdev->dev, "wrong platform data is assigned");
+ kfree(data);
return -EINVAL;
}
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 9bdd4b0c18c8..d0f121bd8b25 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -58,7 +58,7 @@ static const unsigned short ppi_pins[] = {
*/
static struct bfin_adv7393_fb_par {
- /* structure holding blackfin / adv7393 paramters when
+ /* structure holding blackfin / adv7393 parameters when
screen is blanked */
struct {
u8 Mode; /* ntsc/pal/? */
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 738c8ce7d132..bc67d05cad60 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -1611,7 +1611,7 @@ static void init_vgachip(struct fb_info *info)
/* ext. display controls: ext.adr. wrap */
vga_wcrt(cinfo->regbase, CL_CRT1B, 0x02);
- /* Set/Reset registes: - */
+ /* Set/Reset registers: - */
vga_wgfx(cinfo->regbase, VGA_GFX_SR_VALUE, 0x00);
/* Set/Reset enable: - */
vga_wgfx(cinfo->regbase, VGA_GFX_SR_ENABLE, 0x00);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 6ce76d56c3a1..bcb0e3ae1e9d 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -752,7 +752,7 @@ int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp,
/*
* If Rx sends defer, Tx sends only reads
- * request without sending addres
+ * request without sending address
*/
if (!defer)
retval = exynos_dp_select_i2c_device(dp,
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 6c1f5c314a42..9908e75ae761 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -106,7 +106,7 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
/*
* data from Display controller(FIMD) is transferred in video mode
- * but in case of command mode, all settigs is updated to registers.
+ * but in case of command mode, all settings are updated to registers.
*/
exynos_mipi_dsi_stand_by(dsim, 1);
}
diff --git a/drivers/video/i740fb.c b/drivers/video/i740fb.c
index fe574d84ed99..ff3f8808e4e9 100644
--- a/drivers/video/i740fb.c
+++ b/drivers/video/i740fb.c
@@ -497,7 +497,7 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
mem = vxres * vyres * ((bpp + 1) / 8);
if (mem > info->screen_size) {
- dev_err(info->device, "not enough video memory (%d KB requested, %ld KB avaliable)\n",
+ dev_err(info->device, "not enough video memory (%d KB requested, %ld KB available)\n",
mem >> 10, info->screen_size >> 10);
return -ENOMEM;
}
@@ -728,7 +728,7 @@ static void vga_protect(struct i740fb_par *par)
i740outreg_mask(par, VGA_SEQ_I, VGA_SEQ_CLOCK_MODE, 0x20, 0x20);
i740inb(par, 0x3DA);
- i740outb(par, VGA_ATT_W, 0x00); /* enable pallete access */
+ i740outb(par, VGA_ATT_W, 0x00); /* enable palette access */
}
static void vga_unprotect(struct i740fb_par *par)
@@ -737,7 +737,7 @@ static void vga_unprotect(struct i740fb_par *par)
i740outreg_mask(par, VGA_SEQ_I, VGA_SEQ_CLOCK_MODE, 0, 0x20);
i740inb(par, 0x3DA);
- i740outb(par, VGA_ATT_W, 0x20); /* disable pallete access */
+ i740outb(par, VGA_ATT_W, 0x20); /* disable palette access */
}
static int i740fb_set_par(struct fb_info *info)
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index abbe691047bd..49619b441500 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -41,12 +41,14 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/pinctrl/consumer.h>
-#include <mach/mxsfb.h>
+#include <linux/mxsfb.h>
#define REG_SET 4
#define REG_CLR 8
@@ -750,16 +752,43 @@ static void __devexit mxsfb_free_videomem(struct mxsfb_info *host)
}
}
+static struct platform_device_id mxsfb_devtype[] = {
+ {
+ .name = "imx23-fb",
+ .driver_data = MXSFB_V3,
+ }, {
+ .name = "imx28-fb",
+ .driver_data = MXSFB_V4,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
+
+static const struct of_device_id mxsfb_dt_ids[] = {
+ { .compatible = "fsl,imx23-lcdif", .data = &mxsfb_devtype[0], },
+ { .compatible = "fsl,imx28-lcdif", .data = &mxsfb_devtype[1], },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxsfb_dt_ids);
+
static int __devinit mxsfb_probe(struct platform_device *pdev)
{
+ const struct of_device_id *of_id =
+ of_match_device(mxsfb_dt_ids, &pdev->dev);
struct mxsfb_platform_data *pdata = pdev->dev.platform_data;
struct resource *res;
struct mxsfb_info *host;
struct fb_info *fb_info;
struct fb_modelist *modelist;
struct pinctrl *pinctrl;
+ int panel_enable;
+ enum of_gpio_flags flags;
int i, ret;
+ if (of_id)
+ pdev->id_entry = of_id->data;
+
if (!pdata) {
dev_err(&pdev->dev, "No platformdata. Giving up\n");
return -ENODEV;
@@ -807,6 +836,22 @@ static int __devinit mxsfb_probe(struct platform_device *pdev)
goto error_getclock;
}
+ panel_enable = of_get_named_gpio_flags(pdev->dev.of_node,
+ "panel-enable-gpios", 0, &flags);
+ if (gpio_is_valid(panel_enable)) {
+ unsigned long f = GPIOF_OUT_INIT_HIGH;
+ if (flags == OF_GPIO_ACTIVE_LOW)
+ f = GPIOF_OUT_INIT_LOW;
+ ret = devm_gpio_request_one(&pdev->dev, panel_enable,
+ f, "panel-enable");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request gpio %d: %d\n",
+ panel_enable, ret);
+ goto error_panel_enable;
+ }
+ }
+
fb_info->pseudo_palette = kmalloc(sizeof(u32) * 16, GFP_KERNEL);
if (!fb_info->pseudo_palette) {
ret = -ENOMEM;
@@ -854,6 +899,7 @@ error_register:
error_init_fb:
kfree(fb_info->pseudo_palette);
error_pseudo_pallette:
+error_panel_enable:
clk_put(host->clk);
error_getclock:
error_getpin:
@@ -901,19 +947,6 @@ static void mxsfb_shutdown(struct platform_device *pdev)
writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
}
-static struct platform_device_id mxsfb_devtype[] = {
- {
- .name = "imx23-fb",
- .driver_data = MXSFB_V3,
- }, {
- .name = "imx28-fb",
- .driver_data = MXSFB_V4,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
-
static struct platform_driver mxsfb_driver = {
.probe = mxsfb_probe,
.remove = __devexit_p(mxsfb_remove),
@@ -921,6 +954,7 @@ static struct platform_driver mxsfb_driver = {
.id_table = mxsfb_devtype,
.driver = {
.name = DRIVER_NAME,
+ .of_match_table = mxsfb_dt_ids,
},
};
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 5066eee10ccf..58bd9c27369d 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -32,6 +32,7 @@
#include <linux/io.h>
#include <linux/device.h>
#include <linux/regulator/consumer.h>
+#include <linux/suspend.h>
#include <video/omapdss.h>
@@ -201,6 +202,28 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
/* PLATFORM DEVICE */
+static int omap_dss_pm_notif(struct notifier_block *b, unsigned long v, void *d)
+{
+ DSSDBG("pm notif %lu\n", v);
+
+ switch (v) {
+ case PM_SUSPEND_PREPARE:
+ DSSDBG("suspending displays\n");
+ return dss_suspend_all_devices();
+
+ case PM_POST_SUSPEND:
+ DSSDBG("resuming displays\n");
+ return dss_resume_all_devices();
+
+ default:
+ return 0;
+ }
+}
+
+static struct notifier_block omap_dss_pm_notif_block = {
+ .notifier_call = omap_dss_pm_notif,
+};
+
static int __init omap_dss_probe(struct platform_device *pdev)
{
struct omap_dss_board_info *pdata = pdev->dev.platform_data;
@@ -224,6 +247,8 @@ static int __init omap_dss_probe(struct platform_device *pdev)
else if (pdata->default_device)
core.default_display_name = pdata->default_device->name;
+ register_pm_notifier(&omap_dss_pm_notif_block);
+
return 0;
err_debugfs:
@@ -233,6 +258,8 @@ err_debugfs:
static int omap_dss_remove(struct platform_device *pdev)
{
+ unregister_pm_notifier(&omap_dss_pm_notif_block);
+
dss_uninitialize_debugfs();
dss_uninit_overlays(pdev);
@@ -247,25 +274,9 @@ static void omap_dss_shutdown(struct platform_device *pdev)
dss_disable_all_devices();
}
-static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
-{
- DSSDBG("suspend %d\n", state.event);
-
- return dss_suspend_all_devices();
-}
-
-static int omap_dss_resume(struct platform_device *pdev)
-{
- DSSDBG("resume\n");
-
- return dss_resume_all_devices();
-}
-
static struct platform_driver omap_dss_driver = {
.remove = omap_dss_remove,
.shutdown = omap_dss_shutdown,
- .suspend = omap_dss_suspend,
- .resume = omap_dss_resume,
.driver = {
.name = "omapdss",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 4749ac356469..397d4eee11bb 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -384,7 +384,7 @@ void dispc_runtime_put(void)
DSSDBG("dispc_runtime_put\n");
r = pm_runtime_put_sync(&dispc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index ca8382d346e9..14ce8cc079e3 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -1075,7 +1075,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
DSSDBG("dsi_runtime_put\n");
r = pm_runtime_put_sync(&dsi->pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
/* source clock for DSI PLL. this could also be PCLKFREE */
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 770632359a17..d2b57197b292 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -731,7 +731,7 @@ static void dss_runtime_put(void)
DSSDBG("dss_runtime_put\n");
r = pm_runtime_put_sync(&dss.pdev->dev);
- WARN_ON(r < 0 && r != -EBUSY);
+ WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
/* DEBUGFS */
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 8195c7166d20..26a2430a7028 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -138,7 +138,7 @@ static void hdmi_runtime_put(void)
DSSDBG("hdmi_runtime_put\n");
r = pm_runtime_put_sync(&hdmi.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static int __init hdmi_init_display(struct omap_dss_device *dssdev)
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 3d8c206e90e5..7985fa12b9b4 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -141,7 +141,7 @@ static void rfbi_runtime_put(void)
DSSDBG("rfbi_runtime_put\n");
r = pm_runtime_put_sync(&rfbi.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
void rfbi_bus_lock(void)
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 2b8973931ff4..3907c8b6ecbc 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -402,7 +402,7 @@ static void venc_runtime_put(void)
DSSDBG("venc_runtime_put\n");
r = pm_runtime_put_sync(&venc.pdev->dev);
- WARN_ON(r < 0);
+ WARN_ON(r < 0 && r != -ENOSYS);
}
static const struct venc_config *venc_timings_to_config(
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index ea7b661e7229..69bf9d07c237 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -189,7 +189,7 @@ struct s3c_fb_vsync {
/**
* struct s3c_fb - overall hardware state of the hardware
- * @slock: The spinlock protection for this data sturcture.
+ * @slock: The spinlock protection for this data sturucture.
* @dev: The device that we bound to, for printing, etc.
* @bus_clk: The clk (hclk) feeding our interface and possibly pixclk.
* @lcd_clk: The clk (sclk) feeding pixclk.
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index f3d3b9ce4751..0d0f52c18fd8 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -662,7 +662,7 @@ static void savage_get_default_par(struct savagefb_par *par, struct savage_reg *
vga_out8(0x3c4, 0x18, par);
reg->SR18 = vga_in8(0x3c5, par);
- /* Save flat panel expansion regsters. */
+ /* Save flat panel expansion registers. */
if (par->chip == S3_SAVAGE_MX) {
int i;
@@ -815,7 +815,7 @@ static void savage_set_default_par(struct savagefb_par *par,
vga_out8(0x3c4, 0x18, par);
vga_out8(0x3c5, reg->SR18, par);
- /* Save flat panel expansion regsters. */
+ /* Save flat panel expansion registers. */
if (par->chip == S3_SAVAGE_MX) {
int i;
@@ -1318,7 +1318,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
vga_out8(0x3c4, 0x15, par);
vga_out8(0x3c5, reg->SR15, par);
- /* Restore flat panel expansion regsters. */
+ /* Restore flat panel expansion registers. */
if (par->chip == S3_SAVAGE_MX) {
int i;
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c
index 66de832361cc..f082ae55c0c9 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/sis/init.c
@@ -2628,7 +2628,8 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
else if(VCLK >= 135) data = 0x02;
if(SiS_Pr->ChipType == SIS_540) {
- if((VCLK == 203) || (VCLK < 234)) data = 0x02;
+ /* Was == 203 or < 234 which made no sense */
+ if (VCLK < 234) data = 0x02;
}
if(SiS_Pr->ChipType < SIS_315H) {
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index af3ef27ad36c..26f864289498 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -1,7 +1,7 @@
/*
* smscufx.c -- Framebuffer driver for SMSC UFX USB controller
*
- * Copyright (C) 2011 Steve Glendinning <steve.glendinning@smsc.com>
+ * Copyright (C) 2011 Steve Glendinning <steve.glendinning@shawell.net>
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
@@ -1002,7 +1002,7 @@ static int ufx_ops_ioctl(struct fb_info *info, unsigned int cmd,
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == UFX_IOCTL_REPORT_DAMAGE) {
/* If we have a damage-aware client, turn fb_defio "off"
- * To avoid perf imact of unecessary page fault handling.
+ * To avoid perf imact of unnecessary page fault handling.
* Done by resetting the delay for this fb_info to a very
* long period. Pages will become writable and stay that way.
* Reset to normal value when all clients have closed this fb.
@@ -1466,7 +1466,7 @@ static int ufx_read_edid(struct ufx_data *dev, u8 *edid, int edid_len)
/* all FF's in the first 16 bytes indicates nothing is connected */
for (i = 0; i < 16; i++) {
if (edid[i] != 0xFF) {
- pr_debug("edid data read succesfully");
+ pr_debug("edid data read successfully");
return EDID_LENGTH;
}
}
@@ -1972,6 +1972,6 @@ MODULE_PARM_DESC(console, "Allow fbcon to be used on this display");
module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
MODULE_PARM_DESC(fb_defio, "Enable fb_defio mmap support");
-MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
+MODULE_AUTHOR("Steve Glendinning <steve.glendinning@shawell.net>");
MODULE_DESCRIPTION("SMSC UFX kernel framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index b9c2b948d34d..eb931b8626fa 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -12,7 +12,7 @@
#include <asm/io.h>
-/* XXX This device has a 'dev-comm' property which aparently is
+/* XXX This device has a 'dev-comm' property which apparently is
* XXX a pointer into the openfirmware's address space which is
* XXX a shared area the kernel driver can use to keep OBP
* XXX informed about the current resolution setting. The idea
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f3558070e375..c3b3f7f0d9d1 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -141,8 +141,11 @@ static int virtio_dev_probe(struct device *_d)
err = drv->probe(dev);
if (err)
add_status(dev, VIRTIO_CONFIG_S_FAILED);
- else
+ else {
add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ if (drv->scan)
+ drv->scan(dev);
+ }
return err;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index bfbc15ca38dd..0908e6044333 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -47,7 +47,7 @@ struct virtio_balloon
struct task_struct *thread;
/* Waiting for host to ack the pages we released. */
- struct completion acked;
+ wait_queue_head_t acked;
/* Number of balloon pages we've told the Host we're not using. */
unsigned int num_pages;
@@ -89,29 +89,25 @@ static struct page *balloon_pfn_to_page(u32 pfn)
static void balloon_ack(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (vb)
- complete(&vb->acked);
+ wake_up(&vb->acked);
}
static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
{
struct scatterlist sg;
+ unsigned int len;
sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns);
- init_completion(&vb->acked);
-
/* We should always be able to add one buffer to an empty queue. */
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
/* When host has read buffer, this completes via balloon_ack */
- wait_for_completion(&vb->acked);
+ wait_event(vb->acked, virtqueue_get_buf(vq, &len));
}
static void set_page_pfns(u32 pfns[], struct page *page)
@@ -231,12 +227,8 @@ static void update_balloon_stats(struct virtio_balloon *vb)
*/
static void stats_request(struct virtqueue *vq)
{
- struct virtio_balloon *vb;
- unsigned int len;
+ struct virtio_balloon *vb = vq->vdev->priv;
- vb = virtqueue_get_buf(vq, &len);
- if (!vb)
- return;
vb->need_stats_update = 1;
wake_up(&vb->config_change);
}
@@ -245,11 +237,14 @@ static void stats_handle_request(struct virtio_balloon *vb)
{
struct virtqueue *vq;
struct scatterlist sg;
+ unsigned int len;
vb->need_stats_update = 0;
update_balloon_stats(vb);
vq = vb->stats_vq;
+ if (!virtqueue_get_buf(vq, &len))
+ return;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
@@ -358,6 +353,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
INIT_LIST_HEAD(&vb->pages);
vb->num_pages = 0;
init_waitqueue_head(&vb->config_change);
+ init_waitqueue_head(&vb->acked);
vb->vdev = vdev;
vb->need_stats_update = 0;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 979d6eed9a0f..5ceb1cd50195 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on SOC_OMAP2430 || ARCH_OMAP3
+ depends on ARCH_OMAP2PLUS
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 5ef385bfed18..291897c881be 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -1,7 +1,7 @@
/*
* drivers/w1/masters/omap_hdq.c
*
- * Copyright (C) 2007 Texas Instruments, Inc.
+ * Copyright (C) 2007,2012 Texas Instruments, Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -14,9 +14,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/sched.h>
+#include <linux/pm_runtime.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -61,8 +61,6 @@ struct hdq_data {
/* lock status update */
struct mutex hdq_mutex;
int hdq_usecount;
- struct clk *hdq_ick;
- struct clk *hdq_fck;
u8 hdq_irqstatus;
/* device lock */
spinlock_t hdq_spinlock;
@@ -102,20 +100,20 @@ static struct w1_bus_master omap_w1_master = {
/* HDQ register I/O routines */
static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
{
- return __raw_readb(hdq_data->hdq_base + offset);
+ return __raw_readl(hdq_data->hdq_base + offset);
}
static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
{
- __raw_writeb(val, hdq_data->hdq_base + offset);
+ __raw_writel(val, hdq_data->hdq_base + offset);
}
static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
u8 val, u8 mask)
{
- u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask)
+ u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
| (val & mask);
- __raw_writeb(new_val, hdq_data->hdq_base + offset);
+ __raw_writel(new_val, hdq_data->hdq_base + offset);
return new_val;
}
@@ -419,17 +417,8 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
hdq_data->hdq_usecount++;
try_module_get(THIS_MODULE);
if (1 == hdq_data->hdq_usecount) {
- if (clk_enable(hdq_data->hdq_ick)) {
- dev_dbg(hdq_data->dev, "Can not enable ick\n");
- ret = -ENODEV;
- goto clk_err;
- }
- if (clk_enable(hdq_data->hdq_fck)) {
- dev_dbg(hdq_data->dev, "Can not enable fck\n");
- clk_disable(hdq_data->hdq_ick);
- ret = -ENODEV;
- goto clk_err;
- }
+
+ pm_runtime_get_sync(hdq_data->dev);
/* make sure HDQ is out of reset */
if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
@@ -450,9 +439,6 @@ static int omap_hdq_get(struct hdq_data *hdq_data)
}
}
-clk_err:
- clk_put(hdq_data->hdq_ick);
- clk_put(hdq_data->hdq_fck);
out:
mutex_unlock(&hdq_data->hdq_mutex);
rtn:
@@ -475,10 +461,8 @@ static int omap_hdq_put(struct hdq_data *hdq_data)
} else {
hdq_data->hdq_usecount--;
module_put(THIS_MODULE);
- if (0 == hdq_data->hdq_usecount) {
- clk_disable(hdq_data->hdq_ick);
- clk_disable(hdq_data->hdq_fck);
- }
+ if (0 == hdq_data->hdq_usecount)
+ pm_runtime_put_sync(hdq_data->dev);
}
mutex_unlock(&hdq_data->hdq_mutex);
@@ -591,35 +575,11 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
goto err_ioremap;
}
- /* get interface & functional clock objects */
- hdq_data->hdq_ick = clk_get(&pdev->dev, "ick");
- if (IS_ERR(hdq_data->hdq_ick)) {
- dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n");
- ret = PTR_ERR(hdq_data->hdq_ick);
- goto err_ick;
- }
-
- hdq_data->hdq_fck = clk_get(&pdev->dev, "fck");
- if (IS_ERR(hdq_data->hdq_fck)) {
- dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n");
- ret = PTR_ERR(hdq_data->hdq_fck);
- goto err_fck;
- }
-
hdq_data->hdq_usecount = 0;
mutex_init(&hdq_data->hdq_mutex);
- if (clk_enable(hdq_data->hdq_ick)) {
- dev_dbg(&pdev->dev, "Can not enable ick\n");
- ret = -ENODEV;
- goto err_intfclk;
- }
-
- if (clk_enable(hdq_data->hdq_fck)) {
- dev_dbg(&pdev->dev, "Can not enable fck\n");
- ret = -ENODEV;
- goto err_fnclk;
- }
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
@@ -641,9 +601,7 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
omap_hdq_break(hdq_data);
- /* don't clock the HDQ until it is needed */
- clk_disable(hdq_data->hdq_ick);
- clk_disable(hdq_data->hdq_fck);
+ pm_runtime_put_sync(&pdev->dev);
omap_w1_master.data = hdq_data;
@@ -655,20 +613,11 @@ static int __devinit omap_hdq_probe(struct platform_device *pdev)
return 0;
-err_w1:
err_irq:
- clk_disable(hdq_data->hdq_fck);
-
-err_fnclk:
- clk_disable(hdq_data->hdq_ick);
-
-err_intfclk:
- clk_put(hdq_data->hdq_fck);
-
-err_fck:
- clk_put(hdq_data->hdq_ick);
+ pm_runtime_put_sync(&pdev->dev);
+err_w1:
+ pm_runtime_disable(&pdev->dev);
-err_ick:
iounmap(hdq_data->hdq_base);
err_ioremap:
@@ -696,8 +645,7 @@ static int omap_hdq_remove(struct platform_device *pdev)
mutex_unlock(&hdq_data->hdq_mutex);
/* remove module dependency */
- clk_put(hdq_data->hdq_ick);
- clk_put(hdq_data->hdq_fck);
+ pm_runtime_disable(&pdev->dev);
free_irq(INT_24XX_HDQ_IRQ, hdq_data);
platform_set_drvdata(pdev, NULL);
iounmap(hdq_data->hdq_base);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index fe819b76de56..53d75719078e 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -279,6 +279,7 @@ config DAVINCI_WATCHDOG
config ORION_WATCHDOG
tristate "Orion watchdog"
depends on ARCH_ORION5X || ARCH_KIRKWOOD
+ select WATCHDOG_CORE
help
Say Y here if to include support for the watchdog timer
in the Marvell Orion5x and Kirkwood ARM SoCs.
@@ -578,6 +579,7 @@ config INTEL_SCU_WATCHDOG
config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
+ select WATCHDOG_CORE
select LPC_ICH
---help---
Hardware driver for the intel TCO timer based watchdog devices.
@@ -1115,10 +1117,10 @@ config BOOKE_WDT
config BOOKE_WDT_DEFAULT_TIMEOUT
int "PowerPC Book-E Watchdog Timer Default Timeout"
depends on BOOKE_WDT
- default 38 if FSL_BOOKE
- range 0 63 if FSL_BOOKE
- default 3 if !FSL_BOOKE
- range 0 3 if !FSL_BOOKE
+ default 38 if PPC_FSL_BOOK3E
+ range 0 63 if PPC_FSL_BOOK3E
+ default 3 if !PPC_FSL_BOOK3E
+ range 0 3 if !PPC_FSL_BOOK3E
help
Select the default watchdog timer period to be used by the PowerPC
Book-E watchdog driver. A watchdog "event" occurs when the bit
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 8379dc32fd90..551880bfd629 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -302,7 +302,7 @@ static void bcm63xx_wdt_shutdown(struct platform_device *pdev)
bcm63xx_wdt_pause();
}
-static struct platform_driver bcm63xx_wdt = {
+static struct platform_driver bcm63xx_wdt_driver = {
.probe = bcm63xx_wdt_probe,
.remove = __devexit_p(bcm63xx_wdt_remove),
.shutdown = bcm63xx_wdt_shutdown,
@@ -312,7 +312,7 @@ static struct platform_driver bcm63xx_wdt = {
}
};
-module_platform_driver(bcm63xx_wdt);
+module_platform_driver(bcm63xx_wdt_driver);
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index ce0ab4415eff..3fe82d0e8caa 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -37,7 +37,7 @@
u32 booke_wdt_enabled;
u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
#define WDTP_MASK (WDTP(0x3f))
#else
@@ -190,7 +190,7 @@ static long booke_wdt_ioctl(struct file *file,
case WDIOC_SETTIMEOUT:
if (get_user(tmp, p))
return -EFAULT;
-#ifdef CONFIG_FSL_BOOKE
+#ifdef CONFIG_PPC_FSL_BOOK3E
/* period of 1 gives the largest possible timeout */
if (tmp > period_to_sec(1))
return -EINVAL;
diff --git a/drivers/watchdog/coh901327_wdt.c b/drivers/watchdog/coh901327_wdt.c
index 6876430a9f5e..cb5da5c3ece2 100644
--- a/drivers/watchdog/coh901327_wdt.c
+++ b/drivers/watchdog/coh901327_wdt.c
@@ -263,6 +263,7 @@ static int __exit coh901327_remove(struct platform_device *pdev)
watchdog_unregister_device(&coh901327_wdt);
coh901327_disable();
free_irq(irq, pdev);
+ clk_unprepare(clk);
clk_put(clk);
iounmap(virtbase);
release_mem_region(phybase, physize);
@@ -300,9 +301,9 @@ static int __init coh901327_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "could not get clock\n");
goto out_no_clk;
}
- ret = clk_enable(clk);
+ ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(&pdev->dev, "could not enable clock\n");
+ dev_err(&pdev->dev, "could not prepare and enable clock\n");
goto out_no_clk_enable;
}
@@ -369,7 +370,7 @@ static int __init coh901327_probe(struct platform_device *pdev)
out_no_wdog:
free_irq(irq, pdev);
out_no_irq:
- clk_disable(clk);
+ clk_disable_unprepare(clk);
out_no_clk_enable:
clk_put(clk);
out_no_clk:
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index c65b0a5a020c..016bd9355190 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -56,6 +56,7 @@
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
+#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
@@ -195,7 +196,7 @@ static inline int superio_enter(int base)
return -EBUSY;
}
- /* according to the datasheet the key must be send twice! */
+ /* according to the datasheet the key must be sent twice! */
outb(SIO_UNLOCK_KEY, base);
outb(SIO_UNLOCK_KEY, base);
@@ -756,6 +757,7 @@ static int __init f71808e_find(int sioaddr)
err = f71862fg_pin_configure(0); /* validate module parameter */
break;
case SIO_F71869_ID:
+ case SIO_F71869A_ID:
watchdog.type = f71869;
break;
case SIO_F71882_ID:
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9c2c27c3b424..ceed39f26011 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -47,7 +47,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
-#define DRV_VERSION "1.07"
+#define DRV_VERSION "1.10"
/* Includes */
#include <linux/module.h> /* For module specific items */
@@ -88,8 +88,6 @@
#define TCOv2_TMR (TCOBASE + 0x12) /* TCOv2 Timer Initial Value */
/* internal variables */
-static unsigned long is_active;
-static char expect_release;
static struct { /* this is private data for the iTCO_wdt device */
/* TCO version/generation */
unsigned int iTCO_version;
@@ -106,12 +104,12 @@ static struct { /* this is private data for the iTCO_wdt device */
} iTCO_wdt_private;
/* module parameters */
-#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
-static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
+#define WATCHDOG_TIMEOUT 30 /* 30 sec default heartbeat */
+static int heartbeat = WATCHDOG_TIMEOUT; /* in seconds */
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog timeout in seconds. "
"5..76 (TCO v1) or 3..614 (TCO v2), default="
- __MODULE_STRING(WATCHDOG_HEARTBEAT) ")");
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
@@ -178,13 +176,13 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
return ret; /* returns: 0 = OK, -EIO = Error */
}
-static int iTCO_wdt_start(void)
+static int iTCO_wdt_start(struct watchdog_device *wd_dev)
{
unsigned int val;
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
+ iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, wd_dev->timeout);
/* disable chipset's NO_REBOOT bit */
if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -212,7 +210,7 @@ static int iTCO_wdt_start(void)
return 0;
}
-static int iTCO_wdt_stop(void)
+static int iTCO_wdt_stop(struct watchdog_device *wd_dev)
{
unsigned int val;
@@ -236,11 +234,11 @@ static int iTCO_wdt_stop(void)
return 0;
}
-static int iTCO_wdt_keepalive(void)
+static int iTCO_wdt_ping(struct watchdog_device *wd_dev)
{
spin_lock(&iTCO_wdt_private.io_lock);
- iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
+ iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, wd_dev->timeout);
/* Reload the timer by writing to the TCO Timer Counter register */
if (iTCO_wdt_private.iTCO_version == 2)
@@ -257,7 +255,7 @@ static int iTCO_wdt_keepalive(void)
return 0;
}
-static int iTCO_wdt_set_heartbeat(int t)
+static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
{
unsigned int val16;
unsigned char val8;
@@ -304,14 +302,15 @@ static int iTCO_wdt_set_heartbeat(int t)
return -EINVAL;
}
- heartbeat = t;
+ wd_dev->timeout = t;
return 0;
}
-static int iTCO_wdt_get_timeleft(int *time_left)
+static unsigned int iTCO_wdt_get_timeleft(struct watchdog_device *wd_dev)
{
unsigned int val16;
unsigned char val8;
+ unsigned int time_left = 0;
/* read the TCO Timer */
if (iTCO_wdt_private.iTCO_version == 2) {
@@ -320,7 +319,7 @@ static int iTCO_wdt_get_timeleft(int *time_left)
val16 &= 0x3ff;
spin_unlock(&iTCO_wdt_private.io_lock);
- *time_left = (val16 * 6) / 10;
+ time_left = (val16 * 6) / 10;
} else if (iTCO_wdt_private.iTCO_version == 1) {
spin_lock(&iTCO_wdt_private.io_lock);
val8 = inb(TCO_RLD);
@@ -329,156 +328,35 @@ static int iTCO_wdt_get_timeleft(int *time_left)
val8 += (inb(TCOv1_TMR) & 0x3f);
spin_unlock(&iTCO_wdt_private.io_lock);
- *time_left = (val8 * 6) / 10;
- } else
- return -EINVAL;
- return 0;
-}
-
-/*
- * /dev/watchdog handling
- */
-
-static int iTCO_wdt_open(struct inode *inode, struct file *file)
-{
- /* /dev/watchdog can only be opened once */
- if (test_and_set_bit(0, &is_active))
- return -EBUSY;
-
- /*
- * Reload and activate timer
- */
- iTCO_wdt_start();
- return nonseekable_open(inode, file);
-}
-
-static int iTCO_wdt_release(struct inode *inode, struct file *file)
-{
- /*
- * Shut off the timer.
- */
- if (expect_release == 42) {
- iTCO_wdt_stop();
- } else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- iTCO_wdt_keepalive();
- }
- clear_bit(0, &is_active);
- expect_release = 0;
- return 0;
-}
-
-static ssize_t iTCO_wdt_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- /* See if we got the magic character 'V' and reload the timer */
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* note: just in case someone wrote the magic
- character five months ago... */
- expect_release = 0;
-
- /* scan to see whether or not we got the
- magic character */
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_release = 42;
- }
- }
-
- /* someone wrote to us, we should reload the timer */
- iTCO_wdt_keepalive();
- }
- return len;
-}
-
-static long iTCO_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int new_options, retval = -EINVAL;
- int new_heartbeat;
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- static const struct watchdog_info ident = {
- .options = WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
- .firmware_version = 0,
- .identity = DRV_NAME,
- };
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- {
- if (get_user(new_options, p))
- return -EFAULT;
-
- if (new_options & WDIOS_DISABLECARD) {
- iTCO_wdt_stop();
- retval = 0;
- }
- if (new_options & WDIOS_ENABLECARD) {
- iTCO_wdt_keepalive();
- iTCO_wdt_start();
- retval = 0;
- }
- return retval;
- }
- case WDIOC_KEEPALIVE:
- iTCO_wdt_keepalive();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- {
- if (get_user(new_heartbeat, p))
- return -EFAULT;
- if (iTCO_wdt_set_heartbeat(new_heartbeat))
- return -EINVAL;
- iTCO_wdt_keepalive();
- /* Fall */
- }
- case WDIOC_GETTIMEOUT:
- return put_user(heartbeat, p);
- case WDIOC_GETTIMELEFT:
- {
- int time_left;
- if (iTCO_wdt_get_timeleft(&time_left))
- return -EINVAL;
- return put_user(time_left, p);
- }
- default:
- return -ENOTTY;
+ time_left = (val8 * 6) / 10;
}
+ return time_left;
}
/*
* Kernel Interfaces
*/
-static const struct file_operations iTCO_wdt_fops = {
+static const struct watchdog_info ident = {
+ .options = WDIOF_SETTIMEOUT |
+ WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE,
+ .firmware_version = 0,
+ .identity = DRV_NAME,
+};
+
+static const struct watchdog_ops iTCO_wdt_ops = {
.owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = iTCO_wdt_write,
- .unlocked_ioctl = iTCO_wdt_ioctl,
- .open = iTCO_wdt_open,
- .release = iTCO_wdt_release,
+ .start = iTCO_wdt_start,
+ .stop = iTCO_wdt_stop,
+ .ping = iTCO_wdt_ping,
+ .set_timeout = iTCO_wdt_set_timeout,
+ .get_timeleft = iTCO_wdt_get_timeleft,
};
-static struct miscdevice iTCO_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &iTCO_wdt_fops,
+static struct watchdog_device iTCO_wdt_watchdog_dev = {
+ .info = &ident,
+ .ops = &iTCO_wdt_ops,
};
/*
@@ -489,10 +367,10 @@ static void __devexit iTCO_wdt_cleanup(void)
{
/* Stop the timer before we leave */
if (!nowayout)
- iTCO_wdt_stop();
+ iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
/* Deregister */
- misc_deregister(&iTCO_wdt_miscdev);
+ watchdog_unregister_device(&iTCO_wdt_watchdog_dev);
/* release resources */
release_region(iTCO_wdt_private.tco_res->start,
@@ -605,20 +483,25 @@ static int __devinit iTCO_wdt_probe(struct platform_device *dev)
outw(0x0002, TCO2_STS); /* Clear SECOND_TO_STS bit */
outw(0x0004, TCO2_STS); /* Clear BOOT_STS bit */
+ iTCO_wdt_watchdog_dev.bootstatus = 0;
+ iTCO_wdt_watchdog_dev.timeout = WATCHDOG_TIMEOUT;
+ watchdog_set_nowayout(&iTCO_wdt_watchdog_dev, nowayout);
+ iTCO_wdt_watchdog_dev.parent = dev->dev.parent;
+
/* Make sure the watchdog is not running */
- iTCO_wdt_stop();
+ iTCO_wdt_stop(&iTCO_wdt_watchdog_dev);
/* Check that the heartbeat value is within it's range;
if not reset to the default */
- if (iTCO_wdt_set_heartbeat(heartbeat)) {
- iTCO_wdt_set_heartbeat(WATCHDOG_HEARTBEAT);
- pr_info("timeout value out of range, using %d\n", heartbeat);
+ if (iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, heartbeat)) {
+ iTCO_wdt_set_timeout(&iTCO_wdt_watchdog_dev, WATCHDOG_TIMEOUT);
+ pr_info("timeout value out of range, using %d\n",
+ WATCHDOG_TIMEOUT);
}
- ret = misc_register(&iTCO_wdt_miscdev);
+ ret = watchdog_register_device(&iTCO_wdt_watchdog_dev);
if (ret != 0) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
+ pr_err("cannot register watchdog device (err=%d)\n", ret);
goto unreg_tco;
}
@@ -659,7 +542,7 @@ static int __devexit iTCO_wdt_remove(struct platform_device *dev)
static void iTCO_wdt_shutdown(struct platform_device *dev)
{
- iTCO_wdt_stop();
+ iTCO_wdt_stop(NULL);
}
static struct platform_driver iTCO_wdt_driver = {
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 5f0d776f902c..8f541b940053 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -232,7 +232,7 @@ static void __devinit ie6xx_wdt_debugfs_init(void)
S_IFREG | S_IRUGO, NULL, NULL, &ie6xx_wdt_dbg_operations);
}
-static void __devexit ie6xx_wdt_debugfs_exit(void)
+static void ie6xx_wdt_debugfs_exit(void)
{
debugfs_remove(ie6xx_wdt_data.debugfs);
}
@@ -242,7 +242,7 @@ static void __devinit ie6xx_wdt_debugfs_init(void)
{
}
-static void __devexit ie6xx_wdt_debugfs_exit(void)
+static void ie6xx_wdt_debugfs_exit(void)
{
}
#endif
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 55d2f66dbeae..294fb4e00521 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -297,7 +297,7 @@ static int __devinit xwdt_probe(struct platform_device *pdev)
no_timeout = 0;
- pfreq = (u32 *)of_get_property(pdev->dev.of_node->parent,
+ pfreq = (u32 *)of_get_property(pdev->dev.of_node,
"clock-frequency", NULL);
if (pfreq == NULL) {
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 8285d65cd207..fceec4f4eb7e 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -126,8 +126,6 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
u32 pre_margin = GET_WLDR_VAL(timer_margin);
void __iomem *base = wdev->base;
- pm_runtime_get_sync(wdev->dev);
-
/* just count up at 32 KHz */
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
@@ -135,8 +133,6 @@ static void omap_wdt_set_timeout(struct omap_wdt_dev *wdev)
__raw_writel(pre_margin, base + OMAP_WATCHDOG_LDR);
while (__raw_readl(base + OMAP_WATCHDOG_WPS) & 0x04)
cpu_relax();
-
- pm_runtime_put_sync(wdev->dev);
}
/*
@@ -166,8 +162,6 @@ static int omap_wdt_open(struct inode *inode, struct file *file)
omap_wdt_ping(wdev); /* trigger loading of new timeout value */
omap_wdt_enable(wdev);
- pm_runtime_put_sync(wdev->dev);
-
return nonseekable_open(inode, file);
}
@@ -179,8 +173,6 @@ static int omap_wdt_release(struct inode *inode, struct file *file)
* Shut off the timer unless NOWAYOUT is defined.
*/
#ifndef CONFIG_WATCHDOG_NOWAYOUT
- pm_runtime_get_sync(wdev->dev);
-
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
@@ -199,11 +191,9 @@ static ssize_t omap_wdt_write(struct file *file, const char __user *data,
/* Refresh LOAD_TIME. */
if (len) {
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
}
return len;
}
@@ -236,18 +226,15 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
(int __user *)arg);
return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_margin, (int __user *)arg))
return -EFAULT;
omap_wdt_adjust_timeout(new_margin);
- pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
omap_wdt_disable(wdev);
omap_wdt_set_timeout(wdev);
@@ -255,7 +242,6 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
omap_wdt_ping(wdev);
spin_unlock(&wdt_lock);
- pm_runtime_put_sync(wdev->dev);
/* Fall */
case WDIOC_GETTIMEOUT:
return put_user(timer_margin, (int __user *)arg);
@@ -363,7 +349,6 @@ static void omap_wdt_shutdown(struct platform_device *pdev)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
- pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
@@ -403,7 +388,6 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
if (wdev->omap_wdt_users) {
- pm_runtime_get_sync(wdev->dev);
omap_wdt_disable(wdev);
pm_runtime_put_sync(wdev->dev);
}
@@ -419,7 +403,6 @@ static int omap_wdt_resume(struct platform_device *pdev)
pm_runtime_get_sync(wdev->dev);
omap_wdt_enable(wdev);
omap_wdt_ping(wdev);
- pm_runtime_put_sync(wdev->dev);
}
return 0;
@@ -430,6 +413,12 @@ static int omap_wdt_resume(struct platform_device *pdev)
#define omap_wdt_resume NULL
#endif
+static const struct of_device_id omap_wdt_of_match[] = {
+ { .compatible = "ti,omap3-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_wdt_of_match);
+
static struct platform_driver omap_wdt_driver = {
.probe = omap_wdt_probe,
.remove = __devexit_p(omap_wdt_remove),
@@ -439,6 +428,7 @@ static struct platform_driver omap_wdt_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "omap_wdt",
+ .of_match_table = omap_wdt_of_match,
},
};
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 0f5736949c61..a73bea4aa1ba 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,22 +16,21 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/init.h>
-#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <mach/bridge-regs.h>
/*
* Watchdog timer block registers.
*/
#define TIMER_CTRL 0x0000
-#define WDT_EN 0x0010
+#define WDT_EN 0x0010
#define WDT_VAL 0x0024
#define WDT_MAX_CYCLE_COUNT 0xffffffff
@@ -44,27 +43,27 @@ static unsigned int wdt_max_duration; /* (seconds) */
static struct clk *clk;
static unsigned int wdt_tclk;
static void __iomem *wdt_reg;
-static unsigned long wdt_status;
static DEFINE_SPINLOCK(wdt_lock);
-static void orion_wdt_ping(void)
+static int orion_wdt_ping(struct watchdog_device *wdt_dev)
{
spin_lock(&wdt_lock);
/* Reload watchdog duration */
- writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
+ writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
spin_unlock(&wdt_lock);
+ return 0;
}
-static void orion_wdt_enable(void)
+static int orion_wdt_start(struct watchdog_device *wdt_dev)
{
u32 reg;
spin_lock(&wdt_lock);
/* Set watchdog duration */
- writel(wdt_tclk * heartbeat, wdt_reg + WDT_VAL);
+ writel(wdt_tclk * wdt_dev->timeout, wdt_reg + WDT_VAL);
/* Clear watchdog timer interrupt */
reg = readl(BRIDGE_CAUSE);
@@ -82,9 +81,10 @@ static void orion_wdt_enable(void)
writel(reg, RSTOUTn_MASK);
spin_unlock(&wdt_lock);
+ return 0;
}
-static void orion_wdt_disable(void)
+static int orion_wdt_stop(struct watchdog_device *wdt_dev)
{
u32 reg;
@@ -101,139 +101,44 @@ static void orion_wdt_disable(void)
writel(reg, wdt_reg + TIMER_CTRL);
spin_unlock(&wdt_lock);
+ return 0;
}
-static int orion_wdt_get_timeleft(int *time_left)
+static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev)
{
+ unsigned int time_left;
+
spin_lock(&wdt_lock);
- *time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
+ time_left = readl(wdt_reg + WDT_VAL) / wdt_tclk;
spin_unlock(&wdt_lock);
- return 0;
-}
-static int orion_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(WDT_IN_USE, &wdt_status))
- return -EBUSY;
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- orion_wdt_enable();
- return nonseekable_open(inode, file);
+ return time_left;
}
-static ssize_t orion_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
+static int orion_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ unsigned int timeout)
{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
- for (i = 0; i != len; i++) {
- char c;
-
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- set_bit(WDT_OK_TO_CLOSE, &wdt_status);
- }
- }
- orion_wdt_ping();
- }
- return len;
-}
-
-static int orion_wdt_settimeout(int new_time)
-{
- if ((new_time <= 0) || (new_time > wdt_max_duration))
- return -EINVAL;
-
- /* Set new watchdog time to be used when
- * orion_wdt_enable() or orion_wdt_ping() is called. */
- heartbeat = new_time;
+ wdt_dev->timeout = timeout;
return 0;
}
-static const struct watchdog_info ident = {
- .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING,
- .identity = "Orion Watchdog",
+static const struct watchdog_info orion_wdt_info = {
+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+ .identity = "Orion Watchdog",
};
-static long orion_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret = -ENOTTY;
- int time;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
- sizeof(ident)) ? -EFAULT : 0;
- break;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
- break;
-
- case WDIOC_KEEPALIVE:
- orion_wdt_ping();
- ret = 0;
- break;
-
- case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
- if (ret)
- break;
-
- if (orion_wdt_settimeout(time)) {
- ret = -EINVAL;
- break;
- }
- orion_wdt_ping();
- /* Fall through */
-
- case WDIOC_GETTIMEOUT:
- ret = put_user(heartbeat, (int *)arg);
- break;
-
- case WDIOC_GETTIMELEFT:
- if (orion_wdt_get_timeleft(&time)) {
- ret = -EINVAL;
- break;
- }
- ret = put_user(time, (int *)arg);
- break;
- }
- return ret;
-}
-
-static int orion_wdt_release(struct inode *inode, struct file *file)
-{
- if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
- orion_wdt_disable();
- else
- pr_crit("Device closed unexpectedly - timer will not stop\n");
- clear_bit(WDT_IN_USE, &wdt_status);
- clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
-
- return 0;
-}
-
-
-static const struct file_operations orion_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = orion_wdt_write,
- .unlocked_ioctl = orion_wdt_ioctl,
- .open = orion_wdt_open,
- .release = orion_wdt_release,
+static const struct watchdog_ops orion_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = orion_wdt_start,
+ .stop = orion_wdt_stop,
+ .ping = orion_wdt_ping,
+ .set_timeout = orion_wdt_set_timeout,
+ .get_timeleft = orion_wdt_get_timeleft,
};
-static struct miscdevice orion_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &orion_wdt_fops,
+static struct watchdog_device orion_wdt = {
+ .info = &orion_wdt_info,
+ .ops = &orion_wdt_ops,
};
static int __devinit orion_wdt_probe(struct platform_device *pdev)
@@ -241,29 +146,34 @@ static int __devinit orion_wdt_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- printk(KERN_ERR "Orion Watchdog missing clock\n");
+ dev_err(&pdev->dev, "Orion Watchdog missing clock\n");
return -ENODEV;
}
clk_prepare_enable(clk);
wdt_tclk = clk_get_rate(clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- wdt_reg = ioremap(res->start, resource_size(res));
-
- if (orion_wdt_miscdev.parent)
- return -EBUSY;
- orion_wdt_miscdev.parent = &pdev->dev;
+ wdt_reg = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!wdt_reg)
+ return -ENOMEM;
wdt_max_duration = WDT_MAX_CYCLE_COUNT / wdt_tclk;
- if (orion_wdt_settimeout(heartbeat))
+
+ if ((heartbeat < 1) || (heartbeat > wdt_max_duration))
heartbeat = wdt_max_duration;
- ret = misc_register(&orion_wdt_miscdev);
- if (ret)
+ orion_wdt.timeout = heartbeat;
+ orion_wdt.min_timeout = 1;
+ orion_wdt.max_timeout = wdt_max_duration;
+
+ watchdog_set_nowayout(&orion_wdt, nowayout);
+ ret = watchdog_register_device(&orion_wdt);
+ if (ret) {
+ clk_disable_unprepare(clk);
return ret;
+ }
pr_info("Initial timeout %d sec%s\n",
heartbeat, nowayout ? ", nowayout" : "");
@@ -272,27 +182,14 @@ static int __devinit orion_wdt_probe(struct platform_device *pdev)
static int __devexit orion_wdt_remove(struct platform_device *pdev)
{
- int ret;
-
- if (test_bit(WDT_IN_USE, &wdt_status)) {
- orion_wdt_disable();
- clear_bit(WDT_IN_USE, &wdt_status);
- }
-
- ret = misc_deregister(&orion_wdt_miscdev);
- if (!ret)
- orion_wdt_miscdev.parent = NULL;
-
+ watchdog_unregister_device(&orion_wdt);
clk_disable_unprepare(clk);
- clk_put(clk);
-
- return ret;
+ return 0;
}
static void orion_wdt_shutdown(struct platform_device *pdev)
{
- if (test_bit(WDT_IN_USE, &wdt_status))
- orion_wdt_disable();
+ orion_wdt_stop(&orion_wdt);
}
static struct platform_driver orion_wdt_driver = {
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 200ece5e2a22..9245b4d23bfe 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -519,21 +519,7 @@ static struct platform_driver s3c2410wdt_driver = {
},
};
-
-static int __init watchdog_init(void)
-{
- pr_info("S3C2410 Watchdog Timer, (c) 2004 Simtec Electronics\n");
-
- return platform_driver_register(&s3c2410wdt_driver);
-}
-
-static void __exit watchdog_exit(void)
-{
- platform_driver_unregister(&s3c2410wdt_driver);
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
+module_platform_driver(s3c2410wdt_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, "
"Dimitry Andric <dimitry.andric@tomtom.com>");
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index f8477002b728..9681ada0f252 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -136,6 +136,8 @@ static void sch311x_wdt_set_timeout(int t)
static void sch311x_wdt_start(void)
{
+ unsigned char t;
+
spin_lock(&sch311x_wdt_data.io_lock);
/* set watchdog's timeout */
@@ -149,7 +151,8 @@ static void sch311x_wdt_start(void)
* Bit 4-6 (Reserved)
* Bit 7, Output Type: 0 = Push Pull Bit, 1 = Open Drain
*/
- outb(0x0e, sch311x_wdt_data.runtime_reg + GP60);
+ t = inb(sch311x_wdt_data.runtime_reg + GP60);
+ outb((t & ~0x0d) | 0x0c, sch311x_wdt_data.runtime_reg + GP60);
spin_unlock(&sch311x_wdt_data.io_lock);
@@ -157,10 +160,13 @@ static void sch311x_wdt_start(void)
static void sch311x_wdt_stop(void)
{
+ unsigned char t;
+
spin_lock(&sch311x_wdt_data.io_lock);
/* stop the watchdog */
- outb(0x01, sch311x_wdt_data.runtime_reg + GP60);
+ t = inb(sch311x_wdt_data.runtime_reg + GP60);
+ outb((t & ~0x0d) | 0x01, sch311x_wdt_data.runtime_reg + GP60);
/* disable timeout by setting it to 0 */
sch311x_wdt_set_timeout(0);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8d2501e604dd..d4dffcd52873 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -196,4 +196,12 @@ config XEN_ACPI_PROCESSOR
called xen_acpi_processor If you do not know what to choose, select
M here. If the CPUFREQ drivers are built in, select Y here.
+config XEN_MCE_LOG
+ bool "Xen platform mcelog"
+ depends on XEN_DOM0 && X86_64 && X86_MCE
+ default n
+ help
+ Allow kernel fetching MCE error from Xen platform and
+ converting it into Linux mcelog format for mcelog tools
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index fc3488631136..d80bea5535a2 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,9 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
+obj-$(CONFIG_XEN_DOM0) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
+obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/mcelog.c b/drivers/xen/mcelog.c
new file mode 100644
index 000000000000..8feee08bcb43
--- /dev/null
+++ b/drivers/xen/mcelog.c
@@ -0,0 +1,414 @@
+/******************************************************************************
+ * mcelog.c
+ * Driver for receiving and transferring machine check error infomation
+ *
+ * Copyright (c) 2012 Intel Corporation
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
+ * Author: Ke, Liping <liping.ke@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/capability.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+
+#include <xen/interface/xen.h>
+#include <xen/events.h>
+#include <xen/interface/vcpu.h>
+#include <xen/xen.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/hypervisor.h>
+
+#define XEN_MCELOG "xen_mcelog: "
+
+static struct mc_info g_mi;
+static struct mcinfo_logical_cpu *g_physinfo;
+static uint32_t ncpus;
+
+static DEFINE_MUTEX(mcelog_lock);
+
+static struct xen_mce_log xen_mcelog = {
+ .signature = XEN_MCE_LOG_SIGNATURE,
+ .len = XEN_MCE_LOG_LEN,
+ .recordlen = sizeof(struct xen_mce),
+};
+
+static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
+static int xen_mce_chrdev_open_count; /* #times opened */
+static int xen_mce_chrdev_open_exclu; /* already open exclusive? */
+
+static DECLARE_WAIT_QUEUE_HEAD(xen_mce_chrdev_wait);
+
+static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
+{
+ spin_lock(&xen_mce_chrdev_state_lock);
+
+ if (xen_mce_chrdev_open_exclu ||
+ (xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return -EBUSY;
+ }
+
+ if (file->f_flags & O_EXCL)
+ xen_mce_chrdev_open_exclu = 1;
+ xen_mce_chrdev_open_count++;
+
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return nonseekable_open(inode, file);
+}
+
+static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
+{
+ spin_lock(&xen_mce_chrdev_state_lock);
+
+ xen_mce_chrdev_open_count--;
+ xen_mce_chrdev_open_exclu = 0;
+
+ spin_unlock(&xen_mce_chrdev_state_lock);
+
+ return 0;
+}
+
+static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
+ size_t usize, loff_t *off)
+{
+ char __user *buf = ubuf;
+ unsigned num;
+ int i, err;
+
+ mutex_lock(&mcelog_lock);
+
+ num = xen_mcelog.next;
+
+ /* Only supports full reads right now */
+ err = -EINVAL;
+ if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
+ goto out;
+
+ err = 0;
+ for (i = 0; i < num; i++) {
+ struct xen_mce *m = &xen_mcelog.entry[i];
+
+ err |= copy_to_user(buf, m, sizeof(*m));
+ buf += sizeof(*m);
+ }
+
+ memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
+ xen_mcelog.next = 0;
+
+ if (err)
+ err = -EFAULT;
+
+out:
+ mutex_unlock(&mcelog_lock);
+
+ return err ? err : buf - ubuf;
+}
+
+static unsigned int xen_mce_chrdev_poll(struct file *file, poll_table *wait)
+{
+ poll_wait(file, &xen_mce_chrdev_wait, wait);
+
+ if (xen_mcelog.next)
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
+ unsigned long arg)
+{
+ int __user *p = (int __user *)arg;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case MCE_GET_RECORD_LEN:
+ return put_user(sizeof(struct xen_mce), p);
+ case MCE_GET_LOG_LEN:
+ return put_user(XEN_MCE_LOG_LEN, p);
+ case MCE_GETCLEAR_FLAGS: {
+ unsigned flags;
+
+ do {
+ flags = xen_mcelog.flags;
+ } while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
+
+ return put_user(flags, p);
+ }
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations xen_mce_chrdev_ops = {
+ .open = xen_mce_chrdev_open,
+ .release = xen_mce_chrdev_release,
+ .read = xen_mce_chrdev_read,
+ .poll = xen_mce_chrdev_poll,
+ .unlocked_ioctl = xen_mce_chrdev_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice xen_mce_chrdev_device = {
+ MISC_MCELOG_MINOR,
+ "mcelog",
+ &xen_mce_chrdev_ops,
+};
+
+/*
+ * Caller should hold the mcelog_lock
+ */
+static void xen_mce_log(struct xen_mce *mce)
+{
+ unsigned entry;
+
+ entry = xen_mcelog.next;
+
+ /*
+ * When the buffer fills up discard new entries.
+ * Assume that the earlier errors are the more
+ * interesting ones:
+ */
+ if (entry >= XEN_MCE_LOG_LEN) {
+ set_bit(XEN_MCE_OVERFLOW,
+ (unsigned long *)&xen_mcelog.flags);
+ return;
+ }
+
+ memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
+
+ xen_mcelog.next++;
+}
+
+static int convert_log(struct mc_info *mi)
+{
+ struct mcinfo_common *mic;
+ struct mcinfo_global *mc_global;
+ struct mcinfo_bank *mc_bank;
+ struct xen_mce m;
+ uint32_t i;
+
+ mic = NULL;
+ x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
+ if (unlikely(!mic)) {
+ pr_warning(XEN_MCELOG "Failed to find global error info\n");
+ return -ENODEV;
+ }
+
+ memset(&m, 0, sizeof(struct xen_mce));
+
+ mc_global = (struct mcinfo_global *)mic;
+ m.mcgstatus = mc_global->mc_gstatus;
+ m.apicid = mc_global->mc_apicid;
+
+ for (i = 0; i < ncpus; i++)
+ if (g_physinfo[i].mc_apicid == m.apicid)
+ break;
+ if (unlikely(i == ncpus)) {
+ pr_warning(XEN_MCELOG "Failed to match cpu with apicid %d\n",
+ m.apicid);
+ return -ENODEV;
+ }
+
+ m.socketid = g_physinfo[i].mc_chipid;
+ m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
+ m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
+ m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
+
+ mic = NULL;
+ x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
+ if (unlikely(!mic)) {
+ pr_warning(XEN_MCELOG "Fail to find bank error info\n");
+ return -ENODEV;
+ }
+
+ do {
+ if ((!mic) || (mic->size == 0) ||
+ (mic->type != MC_TYPE_GLOBAL &&
+ mic->type != MC_TYPE_BANK &&
+ mic->type != MC_TYPE_EXTENDED &&
+ mic->type != MC_TYPE_RECOVERY))
+ break;
+
+ if (mic->type == MC_TYPE_BANK) {
+ mc_bank = (struct mcinfo_bank *)mic;
+ m.misc = mc_bank->mc_misc;
+ m.status = mc_bank->mc_status;
+ m.addr = mc_bank->mc_addr;
+ m.tsc = mc_bank->mc_tsc;
+ m.bank = mc_bank->mc_bank;
+ m.finished = 1;
+ /*log this record*/
+ xen_mce_log(&m);
+ }
+ mic = x86_mcinfo_next(mic);
+ } while (1);
+
+ return 0;
+}
+
+static int mc_queue_handle(uint32_t flags)
+{
+ struct xen_mc mc_op;
+ int ret = 0;
+
+ mc_op.cmd = XEN_MC_fetch;
+ mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
+ set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
+ do {
+ mc_op.u.mc_fetch.flags = flags;
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to fetch %s error log\n",
+ (flags == XEN_MC_URGENT) ?
+ "urgnet" : "nonurgent");
+ break;
+ }
+
+ if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
+ mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
+ break;
+ else {
+ ret = convert_log(&g_mi);
+ if (ret)
+ pr_warning(XEN_MCELOG
+ "Failed to convert this error log, "
+ "continue acking it anyway\n");
+
+ mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG
+ "Failed to ack previous error log\n");
+ break;
+ }
+ }
+ } while (1);
+
+ return ret;
+}
+
+/* virq handler for machine check error info*/
+static void xen_mce_work_fn(struct work_struct *work)
+{
+ int err;
+
+ mutex_lock(&mcelog_lock);
+
+ /* urgent mc_info */
+ err = mc_queue_handle(XEN_MC_URGENT);
+ if (err)
+ pr_err(XEN_MCELOG
+ "Failed to handle urgent mc_info queue, "
+ "continue handling nonurgent mc_info queue anyway.\n");
+
+ /* nonurgent mc_info */
+ err = mc_queue_handle(XEN_MC_NONURGENT);
+ if (err)
+ pr_err(XEN_MCELOG
+ "Failed to handle nonurgent mc_info queue.\n");
+
+ /* wake processes polling /dev/mcelog */
+ wake_up_interruptible(&xen_mce_chrdev_wait);
+
+ mutex_unlock(&mcelog_lock);
+}
+static DECLARE_WORK(xen_mce_work, xen_mce_work_fn);
+
+static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
+{
+ schedule_work(&xen_mce_work);
+ return IRQ_HANDLED;
+}
+
+static int bind_virq_for_mce(void)
+{
+ int ret;
+ struct xen_mc mc_op;
+
+ memset(&mc_op, 0, sizeof(struct xen_mc));
+
+ /* Fetch physical CPU Numbers */
+ mc_op.cmd = XEN_MC_physcpuinfo;
+ mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
+ set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to get CPU numbers\n");
+ return ret;
+ }
+
+ /* Fetch each CPU Physical Info for later reference*/
+ ncpus = mc_op.u.mc_physcpuinfo.ncpus;
+ g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
+ GFP_KERNEL);
+ if (!g_physinfo)
+ return -ENOMEM;
+ set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
+ ret = HYPERVISOR_mca(&mc_op);
+ if (ret) {
+ pr_err(XEN_MCELOG "Failed to get CPU info\n");
+ kfree(g_physinfo);
+ return ret;
+ }
+
+ ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
+ xen_mce_interrupt, 0, "mce", NULL);
+ if (ret < 0) {
+ pr_err(XEN_MCELOG "Failed to bind virq\n");
+ kfree(g_physinfo);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init xen_late_init_mcelog(void)
+{
+ /* Only DOM0 is responsible for MCE logging */
+ if (xen_initial_domain()) {
+ /* register character device /dev/mcelog for xen mcelog */
+ if (misc_register(&xen_mce_chrdev_device))
+ return -ENODEV;
+ return bind_virq_for_mce();
+ }
+
+ return -ENODEV;
+}
+device_initcall(xen_late_init_mcelog);
diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c
new file mode 100644
index 000000000000..067fcfa1723e
--- /dev/null
+++ b/drivers/xen/pcpu.c
@@ -0,0 +1,371 @@
+/******************************************************************************
+ * pcpu.c
+ * Management physical cpu in dom0, get pcpu info and provide sys interface
+ *
+ * Copyright (c) 2012 Intel Corporation
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/stat.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/interface/platform.h>
+#include <asm/xen/hypervisor.h>
+#include <asm/xen/hypercall.h>
+
+#define XEN_PCPU "xen_cpu: "
+
+/*
+ * @cpu_id: Xen physical cpu logic number
+ * @flags: Xen physical cpu status flag
+ * - XEN_PCPU_FLAGS_ONLINE: cpu is online
+ * - XEN_PCPU_FLAGS_INVALID: cpu is not present
+ */
+struct pcpu {
+ struct list_head list;
+ struct device dev;
+ uint32_t cpu_id;
+ uint32_t flags;
+};
+
+static struct bus_type xen_pcpu_subsys = {
+ .name = "xen_cpu",
+ .dev_name = "xen_cpu",
+};
+
+static DEFINE_MUTEX(xen_pcpu_lock);
+
+static LIST_HEAD(xen_pcpus);
+
+static int xen_pcpu_down(uint32_t cpu_id)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_cpu_offline,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.cpu_ol.cpuid = cpu_id,
+ };
+
+ return HYPERVISOR_dom0_op(&op);
+}
+
+static int xen_pcpu_up(uint32_t cpu_id)
+{
+ struct xen_platform_op op = {
+ .cmd = XENPF_cpu_online,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.cpu_ol.cpuid = cpu_id,
+ };
+
+ return HYPERVISOR_dom0_op(&op);
+}
+
+static ssize_t show_online(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pcpu *cpu = container_of(dev, struct pcpu, dev);
+
+ return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
+}
+
+static ssize_t __ref store_online(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
+ unsigned long long val;
+ ssize_t ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (kstrtoull(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ switch (val) {
+ case 0:
+ ret = xen_pcpu_down(pcpu->cpu_id);
+ break;
+ case 1:
+ ret = xen_pcpu_up(pcpu->cpu_id);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret >= 0)
+ ret = count;
+ return ret;
+}
+static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+
+static bool xen_pcpu_online(uint32_t flags)
+{
+ return !!(flags & XEN_PCPU_FLAGS_ONLINE);
+}
+
+static void pcpu_online_status(struct xenpf_pcpuinfo *info,
+ struct pcpu *pcpu)
+{
+ if (xen_pcpu_online(info->flags) &&
+ !xen_pcpu_online(pcpu->flags)) {
+ /* the pcpu is onlined */
+ pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
+ kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
+ } else if (!xen_pcpu_online(info->flags) &&
+ xen_pcpu_online(pcpu->flags)) {
+ /* The pcpu is offlined */
+ pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
+ kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
+ }
+}
+
+static struct pcpu *get_pcpu(uint32_t cpu_id)
+{
+ struct pcpu *pcpu;
+
+ list_for_each_entry(pcpu, &xen_pcpus, list) {
+ if (pcpu->cpu_id == cpu_id)
+ return pcpu;
+ }
+
+ return NULL;
+}
+
+static void pcpu_release(struct device *dev)
+{
+ struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
+
+ list_del(&pcpu->list);
+ kfree(pcpu);
+}
+
+static void unregister_and_remove_pcpu(struct pcpu *pcpu)
+{
+ struct device *dev;
+
+ if (!pcpu)
+ return;
+
+ dev = &pcpu->dev;
+ if (dev->id)
+ device_remove_file(dev, &dev_attr_online);
+
+ /* pcpu remove would be implicitly done */
+ device_unregister(dev);
+}
+
+static int register_pcpu(struct pcpu *pcpu)
+{
+ struct device *dev;
+ int err = -EINVAL;
+
+ if (!pcpu)
+ return err;
+
+ dev = &pcpu->dev;
+ dev->bus = &xen_pcpu_subsys;
+ dev->id = pcpu->cpu_id;
+ dev->release = pcpu_release;
+
+ err = device_register(dev);
+ if (err) {
+ pcpu_release(dev);
+ return err;
+ }
+
+ /*
+ * Xen never offline cpu0 due to several restrictions
+ * and assumptions. This basically doesn't add a sys control
+ * to user, one cannot attempt to offline BSP.
+ */
+ if (dev->id) {
+ err = device_create_file(dev, &dev_attr_online);
+ if (err) {
+ device_unregister(dev);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
+{
+ struct pcpu *pcpu;
+ int err;
+
+ if (info->flags & XEN_PCPU_FLAGS_INVALID)
+ return ERR_PTR(-ENODEV);
+
+ pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
+ if (!pcpu)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&pcpu->list);
+ pcpu->cpu_id = info->xen_cpuid;
+ pcpu->flags = info->flags;
+
+ /* Need hold on xen_pcpu_lock before pcpu list manipulations */
+ list_add_tail(&pcpu->list, &xen_pcpus);
+
+ err = register_pcpu(pcpu);
+ if (err) {
+ pr_warning(XEN_PCPU "Failed to register pcpu%u\n",
+ info->xen_cpuid);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return pcpu;
+}
+
+/*
+ * Caller should hold the xen_pcpu_lock
+ */
+static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
+{
+ int ret;
+ struct pcpu *pcpu = NULL;
+ struct xenpf_pcpuinfo *info;
+ struct xen_platform_op op = {
+ .cmd = XENPF_get_cpuinfo,
+ .interface_version = XENPF_INTERFACE_VERSION,
+ .u.pcpu_info.xen_cpuid = cpu,
+ };
+
+ ret = HYPERVISOR_dom0_op(&op);
+ if (ret)
+ return ret;
+
+ info = &op.u.pcpu_info;
+ if (max_cpu)
+ *max_cpu = info->max_present;
+
+ pcpu = get_pcpu(cpu);
+
+ /*
+ * Only those at cpu present map has its sys interface.
+ */
+ if (info->flags & XEN_PCPU_FLAGS_INVALID) {
+ if (pcpu)
+ unregister_and_remove_pcpu(pcpu);
+ return 0;
+ }
+
+ if (!pcpu) {
+ pcpu = create_and_register_pcpu(info);
+ if (IS_ERR_OR_NULL(pcpu))
+ return -ENODEV;
+ } else
+ pcpu_online_status(info, pcpu);
+
+ return 0;
+}
+
+/*
+ * Sync dom0's pcpu information with xen hypervisor's
+ */
+static int xen_sync_pcpus(void)
+{
+ /*
+ * Boot cpu always have cpu_id 0 in xen
+ */
+ uint32_t cpu = 0, max_cpu = 0;
+ int err = 0;
+ struct pcpu *pcpu, *tmp;
+
+ mutex_lock(&xen_pcpu_lock);
+
+ while (!err && (cpu <= max_cpu)) {
+ err = sync_pcpu(cpu, &max_cpu);
+ cpu++;
+ }
+
+ if (err)
+ list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
+ unregister_and_remove_pcpu(pcpu);
+
+ mutex_unlock(&xen_pcpu_lock);
+
+ return err;
+}
+
+static void xen_pcpu_work_fn(struct work_struct *work)
+{
+ xen_sync_pcpus();
+}
+static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
+
+static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
+{
+ schedule_work(&xen_pcpu_work);
+ return IRQ_HANDLED;
+}
+
+static int __init xen_pcpu_init(void)
+{
+ int irq, ret;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
+ xen_pcpu_interrupt, 0,
+ "xen-pcpu", NULL);
+ if (irq < 0) {
+ pr_warning(XEN_PCPU "Failed to bind pcpu virq\n");
+ return irq;
+ }
+
+ ret = subsys_system_register(&xen_pcpu_subsys, NULL);
+ if (ret) {
+ pr_warning(XEN_PCPU "Failed to register pcpu subsys\n");
+ goto err1;
+ }
+
+ ret = xen_sync_pcpus();
+ if (ret) {
+ pr_warning(XEN_PCPU "Failed to sync pcpu info\n");
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ bus_unregister(&xen_pcpu_subsys);
+err1:
+ unbind_from_irqhandler(irq, NULL);
+ return ret;
+}
+arch_initcall(xen_pcpu_init);
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 2389e581e23c..d4c50d63acbc 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -101,6 +101,19 @@ static int platform_pci_resume(struct pci_dev *pdev)
return 0;
}
+static void __devinit prepare_shared_info(void)
+{
+#ifdef CONFIG_KEXEC
+ unsigned long addr;
+ struct shared_info *hvm_shared_info;
+
+ addr = alloc_xen_mmio(PAGE_SIZE);
+ hvm_shared_info = ioremap(addr, PAGE_SIZE);
+ memset(hvm_shared_info, 0, PAGE_SIZE);
+ xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT);
+#endif
+}
+
static int __devinit platform_pci_init(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -109,6 +122,9 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
long mmio_addr, mmio_len;
unsigned int max_nr_gframes;
+ if (!xen_domain())
+ return -ENODEV;
+
i = pci_enable_device(pdev);
if (i)
return i;
@@ -135,6 +151,8 @@ static int __devinit platform_pci_init(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ prepare_shared_info();
+
if (!xen_have_vector_callback) {
ret = xen_allocate_irq(pdev);
if (ret) {
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 7ff2569e17ae..b590ee067fcd 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -520,15 +520,18 @@ static int __init xen_acpi_processor_init(void)
if (!pr_backup) {
pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
- memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
+ if (pr_backup)
+ memcpy(pr_backup, _pr, sizeof(struct acpi_processor));
}
(void)upload_pm_data(_pr);
}
rc = check_acpi_ids(pr_backup);
- if (rc)
- goto err_unregister;
kfree(pr_backup);
+ pr_backup = NULL;
+
+ if (rc)
+ goto err_unregister;
return 0;
err_unregister:
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 30d7be026c18..46ae0f9f02ad 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -124,7 +124,7 @@ static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
return val;
}
-static int pcibios_err_to_errno(int err)
+static int xen_pcibios_err_to_errno(int err)
{
switch (err) {
case PCIBIOS_SUCCESSFUL:
@@ -202,7 +202,7 @@ out:
pci_name(dev), size, offset, value);
*ret_val = value;
- return pcibios_err_to_errno(err);
+ return xen_pcibios_err_to_errno(err);
}
int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
@@ -290,7 +290,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
}
}
- return pcibios_err_to_errno(err);
+ return xen_pcibios_err_to_errno(err);
}
void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index d1c217b23a42..bce15cf4a8df 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -618,6 +618,23 @@ static struct xenbus_watch *find_watch(const char *token)
return NULL;
}
+static void xs_reset_watches(void)
+{
+ int err, supported = 0;
+
+ if (!xen_hvm_domain())
+ return;
+
+ err = xenbus_scanf(XBT_NIL, "control",
+ "platform-feature-xs_reset_watches", "%d", &supported);
+ if (err != 1 || !supported)
+ return;
+
+ err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+ if (err && err != -EEXIST)
+ printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
+}
+
/* Register callback to watch this node. */
int register_xenbus_watch(struct xenbus_watch *watch)
{
@@ -900,5 +917,8 @@ int xs_init(void)
if (IS_ERR(task))
return PTR_ERR(task);
+ /* shutdown watches for kexec boot */
+ xs_reset_watches();
+
return 0;
}
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index e78956cbd702..34c59f14a1c9 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -144,7 +144,7 @@ extern void v9fs_session_close(struct v9fs_session_info *v9ses);
extern void v9fs_session_cancel(struct v9fs_session_info *v9ses);
extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses);
extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nameidata);
+ unsigned int flags);
extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
diff --git a/fs/9p/vfs_dentry.c b/fs/9p/vfs_dentry.c
index d529437ff442..64600b5d0522 100644
--- a/fs/9p/vfs_dentry.c
+++ b/fs/9p/vfs_dentry.c
@@ -100,13 +100,13 @@ static void v9fs_dentry_release(struct dentry *dentry)
}
}
-static int v9fs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct p9_fid *fid;
struct inode *inode;
struct v9fs_inode *v9inode;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 57ccb7537dae..cbf9dbb1b2a2 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -712,88 +712,34 @@ error:
}
/**
- * v9fs_vfs_create - VFS hook to create files
+ * v9fs_vfs_create - VFS hook to create a regular file
+ *
+ * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called
+ * for mknod(2).
+ *
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @mode: create permissions
- * @nd: path information
*
*/
static int
v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
- int err;
- u32 perm;
- int flags;
- struct file *filp;
- struct v9fs_inode *v9inode;
- struct v9fs_session_info *v9ses;
- struct p9_fid *fid, *inode_fid;
-
- err = 0;
- fid = NULL;
- v9ses = v9fs_inode2v9ses(dir);
- perm = unixmode2p9mode(v9ses, mode);
- if (nd)
- flags = nd->intent.open.flags;
- else
- flags = O_RDWR;
+ struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
+ u32 perm = unixmode2p9mode(v9ses, mode);
+ struct p9_fid *fid;
- fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
- v9fs_uflags2omode(flags,
- v9fs_proto_dotu(v9ses)));
- if (IS_ERR(fid)) {
- err = PTR_ERR(fid);
- fid = NULL;
- goto error;
- }
+ /* P9_OEXCL? */
+ fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_ORDWR);
+ if (IS_ERR(fid))
+ return PTR_ERR(fid);
v9fs_invalidate_inode_attr(dir);
- /* if we are opening a file, assign the open fid to the file */
- if (nd) {
- v9inode = V9FS_I(dentry->d_inode);
- mutex_lock(&v9inode->v_mutex);
- if (v9ses->cache && !v9inode->writeback_fid &&
- ((flags & O_ACCMODE) != O_RDONLY)) {
- /*
- * clone a fid and add it to writeback_fid
- * we do it during open time instead of
- * page dirty time via write_begin/page_mkwrite
- * because we want write after unlink usecase
- * to work.
- */
- inode_fid = v9fs_writeback_fid(dentry);
- if (IS_ERR(inode_fid)) {
- err = PTR_ERR(inode_fid);
- mutex_unlock(&v9inode->v_mutex);
- goto error;
- }
- v9inode->writeback_fid = (void *) inode_fid;
- }
- mutex_unlock(&v9inode->v_mutex);
- filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
- if (IS_ERR(filp)) {
- err = PTR_ERR(filp);
- goto error;
- }
-
- filp->private_data = fid;
-#ifdef CONFIG_9P_FSCACHE
- if (v9ses->cache)
- v9fs_cache_inode_set_cookie(dentry->d_inode, filp);
-#endif
- } else
- p9_client_clunk(fid);
+ p9_client_clunk(fid);
return 0;
-
-error:
- if (fid)
- p9_client_clunk(fid);
-
- return err;
}
/**
@@ -839,7 +785,7 @@ static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
*/
struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nameidata)
+ unsigned int flags)
{
struct dentry *res;
struct super_block *sb;
@@ -849,8 +795,8 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
char *name;
int result = 0;
- p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p nameidata: %p\n",
- dir, dentry->d_name.name, dentry, nameidata);
+ p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p flags: %x\n",
+ dir, dentry->d_name.name, dentry, flags);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@@ -910,6 +856,86 @@ error:
return ERR_PTR(result);
}
+static int
+v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode,
+ int *opened)
+{
+ int err;
+ u32 perm;
+ struct v9fs_inode *v9inode;
+ struct v9fs_session_info *v9ses;
+ struct p9_fid *fid, *inode_fid;
+ struct dentry *res = NULL;
+
+ if (d_unhashed(dentry)) {
+ res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ dentry = res;
+ }
+
+ /* Only creates */
+ if (!(flags & O_CREAT) || dentry->d_inode)
+ return finish_no_open(file, res);
+
+ err = 0;
+ fid = NULL;
+ v9ses = v9fs_inode2v9ses(dir);
+ perm = unixmode2p9mode(v9ses, mode);
+ fid = v9fs_create(v9ses, dir, dentry, NULL, perm,
+ v9fs_uflags2omode(flags,
+ v9fs_proto_dotu(v9ses)));
+ if (IS_ERR(fid)) {
+ err = PTR_ERR(fid);
+ fid = NULL;
+ goto error;
+ }
+
+ v9fs_invalidate_inode_attr(dir);
+ v9inode = V9FS_I(dentry->d_inode);
+ mutex_lock(&v9inode->v_mutex);
+ if (v9ses->cache && !v9inode->writeback_fid &&
+ ((flags & O_ACCMODE) != O_RDONLY)) {
+ /*
+ * clone a fid and add it to writeback_fid
+ * we do it during open time instead of
+ * page dirty time via write_begin/page_mkwrite
+ * because we want write after unlink usecase
+ * to work.
+ */
+ inode_fid = v9fs_writeback_fid(dentry);
+ if (IS_ERR(inode_fid)) {
+ err = PTR_ERR(inode_fid);
+ mutex_unlock(&v9inode->v_mutex);
+ goto error;
+ }
+ v9inode->writeback_fid = (void *) inode_fid;
+ }
+ mutex_unlock(&v9inode->v_mutex);
+ err = finish_open(file, dentry, generic_file_open, opened);
+ if (err)
+ goto error;
+
+ file->private_data = fid;
+#ifdef CONFIG_9P_FSCACHE
+ if (v9ses->cache)
+ v9fs_cache_inode_set_cookie(dentry->d_inode, file);
+#endif
+
+ *opened |= FILE_CREATED;
+out:
+ dput(res);
+ return err;
+
+error:
+ if (fid)
+ p9_client_clunk(fid);
+ goto out;
+}
+
/**
* v9fs_vfs_unlink - VFS unlink hook to delete an inode
* @i: inode that is being unlinked
@@ -1488,6 +1514,7 @@ out:
static const struct inode_operations v9fs_dir_inode_operations_dotu = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
+ .atomic_open = v9fs_vfs_atomic_open,
.symlink = v9fs_vfs_symlink,
.link = v9fs_vfs_link,
.unlink = v9fs_vfs_unlink,
@@ -1502,6 +1529,7 @@ static const struct inode_operations v9fs_dir_inode_operations_dotu = {
static const struct inode_operations v9fs_dir_inode_operations = {
.create = v9fs_vfs_create,
.lookup = v9fs_vfs_lookup,
+ .atomic_open = v9fs_vfs_atomic_open,
.unlink = v9fs_vfs_unlink,
.mkdir = v9fs_vfs_mkdir,
.rmdir = v9fs_vfs_rmdir,
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index e3dd2a1e2bfc..40895546e103 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -230,20 +230,25 @@ int v9fs_open_to_dotl_flags(int flags)
* @dir: directory inode that is being created
* @dentry: dentry that is being deleted
* @mode: create permissions
- * @nd: path information
*
*/
static int
v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
- struct nameidata *nd)
+ bool excl)
+{
+ return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+}
+
+static int
+v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t omode,
+ int *opened)
{
int err = 0;
gid_t gid;
- int flags;
umode_t mode;
char *name = NULL;
- struct file *filp;
struct p9_qid qid;
struct inode *inode;
struct p9_fid *fid = NULL;
@@ -251,19 +256,23 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
struct p9_fid *dfid, *ofid, *inode_fid;
struct v9fs_session_info *v9ses;
struct posix_acl *pacl = NULL, *dacl = NULL;
+ struct dentry *res = NULL;
- v9ses = v9fs_inode2v9ses(dir);
- if (nd)
- flags = nd->intent.open.flags;
- else {
- /*
- * create call without LOOKUP_OPEN is due
- * to mknod of regular files. So use mknod
- * operation.
- */
- return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0);
+ if (d_unhashed(dentry)) {
+ res = v9fs_vfs_lookup(dir, dentry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ dentry = res;
}
+ /* Only creates */
+ if (!(flags & O_CREAT) || dentry->d_inode)
+ return finish_no_open(file, res);
+
+ v9ses = v9fs_inode2v9ses(dir);
+
name = (char *) dentry->d_name.name;
p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
name, flags, omode);
@@ -272,7 +281,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
- return err;
+ goto out;
}
/* clone a fid to use for creation */
@@ -280,7 +289,7 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
- return err;
+ goto out;
}
gid = v9fs_get_fsgid_for_create(dir);
@@ -345,17 +354,18 @@ v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
}
mutex_unlock(&v9inode->v_mutex);
/* Since we are opening a file, assign the open fid to the file */
- filp = lookup_instantiate_filp(nd, dentry, generic_file_open);
- if (IS_ERR(filp)) {
- err = PTR_ERR(filp);
+ err = finish_open(file, dentry, generic_file_open, opened);
+ if (err)
goto err_clunk_old_fid;
- }
- filp->private_data = ofid;
+ file->private_data = ofid;
#ifdef CONFIG_9P_FSCACHE
if (v9ses->cache)
- v9fs_cache_inode_set_cookie(inode, filp);
+ v9fs_cache_inode_set_cookie(inode, file);
#endif
- return 0;
+ *opened |= FILE_CREATED;
+out:
+ dput(res);
+ return err;
error:
if (fid)
@@ -364,7 +374,7 @@ err_clunk_old_fid:
if (ofid)
p9_client_clunk(ofid);
v9fs_set_create_acl(NULL, &dacl, &pacl);
- return err;
+ goto out;
}
/**
@@ -982,6 +992,7 @@ out:
const struct inode_operations v9fs_dir_inode_operations_dotl = {
.create = v9fs_vfs_create_dotl,
+ .atomic_open = v9fs_vfs_atomic_open_dotl,
.lookup = v9fs_vfs_lookup,
.link = v9fs_vfs_link_dotl,
.symlink = v9fs_vfs_symlink_dotl,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 8c92a9ba8330..137d50396898 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -89,7 +89,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (v9ses->cache)
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE;
- sb->s_flags = flags | MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+ sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
if (!v9ses->cache)
sb->s_flags |= MS_SYNCHRONOUS;
@@ -137,7 +137,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto close_session;
}
- sb = sget(fs_type, NULL, v9fs_set_super, v9ses);
+ sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses);
if (IS_ERR(sb)) {
retval = PTR_ERR(sb);
goto clunk_fid;
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index 3d83075aaa2e..b3be2e7c5643 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -266,7 +266,7 @@ const struct dentry_operations adfs_dentry_operations = {
};
static struct dentry *
-adfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+adfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode = NULL;
struct object_info obj;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 06fdcc9382c4..bdaec92353c2 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -246,7 +246,6 @@ static struct inode *adfs_alloc_inode(struct super_block *sb)
static void adfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
}
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 1fceb320d2f2..6e216419f340 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -3,6 +3,7 @@
#include <linux/buffer_head.h>
#include <linux/amigaffs.h>
#include <linux/mutex.h>
+#include <linux/workqueue.h>
/* AmigaOS allows file names with up to 30 characters length.
* Names longer than that will be silently truncated. If you
@@ -100,6 +101,10 @@ struct affs_sb_info {
char *s_prefix; /* Prefix for volumes and assigns. */
char s_volume[32]; /* Volume prefix for absolute symlinks. */
spinlock_t symlink_lock; /* protects the previous two */
+ struct super_block *sb; /* the VFS superblock object */
+ int work_queued; /* non-zero delayed work is queued */
+ struct delayed_work sb_work; /* superblock flush delayed work */
+ spinlock_t work_lock; /* protects sb_work and work_queued */
};
#define SF_INTL 0x0001 /* International filesystem. */
@@ -120,6 +125,8 @@ static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
return sb->s_fs_info;
}
+void affs_mark_sb_dirty(struct super_block *sb);
+
/* amigaffs.c */
extern int affs_insert_hash(struct inode *inode, struct buffer_head *bh);
@@ -146,9 +153,9 @@ extern void affs_free_bitmap(struct super_block *sb);
/* namei.c */
extern int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len);
-extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *);
+extern struct dentry *affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int);
extern int affs_unlink(struct inode *dir, struct dentry *dentry);
-extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *);
+extern int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool);
extern int affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
extern int affs_rmdir(struct inode *dir, struct dentry *dentry);
extern int affs_link(struct dentry *olddentry, struct inode *dir,
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index 52a6407682e6..eb82ee53ee0b 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -122,22 +122,16 @@ affs_remove_hash(struct inode *dir, struct buffer_head *rem_bh)
}
static void
-affs_fix_dcache(struct dentry *dentry, u32 entry_ino)
+affs_fix_dcache(struct inode *inode, u32 entry_ino)
{
- struct inode *inode = dentry->d_inode;
- void *data = dentry->d_fsdata;
- struct list_head *head, *next;
-
+ struct dentry *dentry;
+ struct hlist_node *p;
spin_lock(&inode->i_lock);
- head = &inode->i_dentry;
- next = head->next;
- while (next != head) {
- dentry = list_entry(next, struct dentry, d_alias);
+ hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
if (entry_ino == (u32)(long)dentry->d_fsdata) {
- dentry->d_fsdata = data;
+ dentry->d_fsdata = (void *)inode->i_ino;
break;
}
- next = next->next;
}
spin_unlock(&inode->i_lock);
}
@@ -177,7 +171,11 @@ affs_remove_link(struct dentry *dentry)
}
affs_lock_dir(dir);
- affs_fix_dcache(dentry, link_ino);
+ /*
+ * if there's a dentry for that block, make it
+ * refer to inode itself.
+ */
+ affs_fix_dcache(inode, link_ino);
retval = affs_remove_hash(dir, link_bh);
if (retval) {
affs_unlock_dir(dir);
diff --git a/fs/affs/bitmap.c b/fs/affs/bitmap.c
index 3e262711ae06..6e0be43ef6ef 100644
--- a/fs/affs/bitmap.c
+++ b/fs/affs/bitmap.c
@@ -103,7 +103,7 @@ affs_free_block(struct super_block *sb, u32 block)
*(__be32 *)bh->b_data = cpu_to_be32(tmp - mask);
mark_buffer_dirty(bh);
- sb->s_dirt = 1;
+ affs_mark_sb_dirty(sb);
bm->bm_free++;
mutex_unlock(&sbi->s_bmlock);
@@ -248,7 +248,7 @@ find_bit:
*(__be32 *)bh->b_data = cpu_to_be32(tmp + mask);
mark_buffer_dirty(bh);
- sb->s_dirt = 1;
+ affs_mark_sb_dirty(sb);
mutex_unlock(&sbi->s_bmlock);
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 47806940aac0..ff65884a7839 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -211,7 +211,7 @@ affs_find_entry(struct inode *dir, struct dentry *dentry)
}
struct dentry *
-affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+affs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
@@ -255,7 +255,7 @@ affs_unlink(struct inode *dir, struct dentry *dentry)
}
int
-affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
+affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 0782653a05a2..c70f1e5fc024 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -17,6 +17,7 @@
#include <linux/magic.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/writeback.h>
#include "affs.h"
extern struct timezone sys_tz;
@@ -25,15 +26,17 @@ static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
static int affs_remount (struct super_block *sb, int *flags, char *data);
static void
-affs_commit_super(struct super_block *sb, int wait, int clean)
+affs_commit_super(struct super_block *sb, int wait)
{
struct affs_sb_info *sbi = AFFS_SB(sb);
struct buffer_head *bh = sbi->s_root_bh;
struct affs_root_tail *tail = AFFS_ROOT_TAIL(sb, bh);
- tail->bm_flag = cpu_to_be32(clean);
+ lock_buffer(bh);
secs_to_datestamp(get_seconds(), &tail->disk_change);
affs_fix_checksum(sb, bh);
+ unlock_buffer(bh);
+
mark_buffer_dirty(bh);
if (wait)
sync_dirty_buffer(bh);
@@ -45,9 +48,7 @@ affs_put_super(struct super_block *sb)
struct affs_sb_info *sbi = AFFS_SB(sb);
pr_debug("AFFS: put_super()\n");
- if (!(sb->s_flags & MS_RDONLY) && sb->s_dirt)
- affs_commit_super(sb, 1, 1);
-
+ cancel_delayed_work_sync(&sbi->sb_work);
kfree(sbi->s_prefix);
affs_free_bitmap(sb);
affs_brelse(sbi->s_root_bh);
@@ -55,26 +56,43 @@ affs_put_super(struct super_block *sb)
sb->s_fs_info = NULL;
}
-static void
-affs_write_super(struct super_block *sb)
+static int
+affs_sync_fs(struct super_block *sb, int wait)
{
- lock_super(sb);
- if (!(sb->s_flags & MS_RDONLY))
- affs_commit_super(sb, 1, 2);
- sb->s_dirt = 0;
- unlock_super(sb);
+ affs_commit_super(sb, wait);
+ return 0;
+}
+
+static void flush_superblock(struct work_struct *work)
+{
+ struct affs_sb_info *sbi;
+ struct super_block *sb;
+
+ sbi = container_of(work, struct affs_sb_info, sb_work.work);
+ sb = sbi->sb;
- pr_debug("AFFS: write_super() at %lu, clean=2\n", get_seconds());
+ spin_lock(&sbi->work_lock);
+ sbi->work_queued = 0;
+ spin_unlock(&sbi->work_lock);
+
+ affs_commit_super(sb, 1);
}
-static int
-affs_sync_fs(struct super_block *sb, int wait)
+void affs_mark_sb_dirty(struct super_block *sb)
{
- lock_super(sb);
- affs_commit_super(sb, wait, 2);
- sb->s_dirt = 0;
- unlock_super(sb);
- return 0;
+ struct affs_sb_info *sbi = AFFS_SB(sb);
+ unsigned long delay;
+
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+ spin_lock(&sbi->work_lock);
+ if (!sbi->work_queued) {
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+ queue_delayed_work(system_long_wq, &sbi->sb_work, delay);
+ sbi->work_queued = 1;
+ }
+ spin_unlock(&sbi->work_lock);
}
static struct kmem_cache * affs_inode_cachep;
@@ -138,7 +156,6 @@ static const struct super_operations affs_sops = {
.write_inode = affs_write_inode,
.evict_inode = affs_evict_inode,
.put_super = affs_put_super,
- .write_super = affs_write_super,
.sync_fs = affs_sync_fs,
.statfs = affs_statfs,
.remount_fs = affs_remount,
@@ -305,8 +322,11 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_fs_info = sbi;
+ sbi->sb = sb;
mutex_init(&sbi->s_bmlock);
spin_lock_init(&sbi->symlink_lock);
+ spin_lock_init(&sbi->work_lock);
+ INIT_DELAYED_WORK(&sbi->sb_work, flush_superblock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
&blocksize,&sbi->s_prefix,
@@ -531,6 +551,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
+ flush_delayed_work_sync(&sbi->sb_work);
replace_mount_options(sb, new_opts);
sbi->s_flags = mount_flags;
@@ -549,10 +570,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
return 0;
- if (*flags & MS_RDONLY) {
- affs_write_super(sb);
+ if (*flags & MS_RDONLY)
affs_free_bitmap(sb);
- } else
+ else
res = affs_init_bitmap(sb, flags);
return res;
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e22dc4b4a503..db477906ba4f 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -20,16 +20,16 @@
#include "internal.h"
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd);
+ unsigned int flags);
static int afs_dir_open(struct inode *inode, struct file *file);
static int afs_readdir(struct file *file, void *dirent, filldir_t filldir);
-static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd);
+static int afs_d_revalidate(struct dentry *dentry, unsigned int flags);
static int afs_d_delete(const struct dentry *dentry);
static void afs_d_release(struct dentry *dentry);
static int afs_lookup_filldir(void *_cookie, const char *name, int nlen,
loff_t fpos, u64 ino, unsigned dtype);
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd);
+ bool excl);
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static int afs_rmdir(struct inode *dir, struct dentry *dentry);
static int afs_unlink(struct inode *dir, struct dentry *dentry);
@@ -516,7 +516,7 @@ out:
* look up an entry in a directory
*/
static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct afs_vnode *vnode;
struct afs_fid fid;
@@ -598,7 +598,7 @@ success:
* - NOTE! the hit can be a negative hit too, so we can't assume we have an
* inode
*/
-static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct afs_vnode *vnode, *dir;
struct afs_fid uninitialized_var(fid);
@@ -607,7 +607,7 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
void *dir_version;
int ret;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
vnode = AFS_FS_I(dentry->d_inode);
@@ -949,7 +949,7 @@ error:
* create a regular file on an AFS filesystem
*/
static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct afs_file_status status;
struct afs_callback cb;
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 298cf8919ec7..9682c33d5daf 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -22,7 +22,7 @@
static struct dentry *afs_mntpt_lookup(struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd);
+ unsigned int flags);
static int afs_mntpt_open(struct inode *inode, struct file *file);
static void afs_mntpt_expiry_timed_out(struct work_struct *work);
@@ -104,7 +104,7 @@ out:
*/
static struct dentry *afs_mntpt_lookup(struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
_enter("%p,%p{%p{%s},%s}",
dir,
diff --git a/fs/afs/super.c b/fs/afs/super.c
index f02b31e7e648..df8c6047c2a1 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -395,7 +395,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
as->volume = vol;
/* allocate a deviceless superblock */
- sb = sget(fs_type, afs_test_super, afs_set_super, as);
+ sb = sget(fs_type, afs_test_super, afs_set_super, flags, as);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
afs_put_volume(vol);
@@ -406,7 +406,6 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
if (!sb->s_root) {
/* initial superblock/root creation */
_debug("create");
- sb->s_flags = flags;
ret = afs_fill_super(sb, &params);
if (ret < 0) {
deactivate_locked_super(sb);
diff --git a/fs/aio.c b/fs/aio.c
index 55c4c7656053..71f613cf4a85 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -56,13 +56,6 @@ static struct kmem_cache *kioctx_cachep;
static struct workqueue_struct *aio_wq;
-/* Used for rare fput completion. */
-static void aio_fput_routine(struct work_struct *);
-static DECLARE_WORK(fput_work, aio_fput_routine);
-
-static DEFINE_SPINLOCK(fput_lock);
-static LIST_HEAD(fput_head);
-
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
@@ -479,7 +472,6 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
{
unsigned short allocated, to_alloc;
long avail;
- bool called_fput = false;
struct kiocb *req, *n;
struct aio_ring *ring;
@@ -495,28 +487,11 @@ static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
if (allocated == 0)
goto out;
-retry:
spin_lock_irq(&ctx->ctx_lock);
ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
BUG_ON(avail < 0);
- if (avail == 0 && !called_fput) {
- /*
- * Handle a potential starvation case. It is possible that
- * we hold the last reference on a struct file, causing us
- * to delay the final fput to non-irq context. In this case,
- * ctx->reqs_active is artificially high. Calling the fput
- * routine here may free up a slot in the event completion
- * ring, allowing this allocation to succeed.
- */
- kunmap_atomic(ring);
- spin_unlock_irq(&ctx->ctx_lock);
- aio_fput_routine(NULL);
- called_fput = true;
- goto retry;
- }
-
if (avail < allocated) {
/* Trim back the number of requests. */
list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
@@ -570,36 +545,6 @@ static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
wake_up_all(&ctx->wait);
}
-static void aio_fput_routine(struct work_struct *data)
-{
- spin_lock_irq(&fput_lock);
- while (likely(!list_empty(&fput_head))) {
- struct kiocb *req = list_kiocb(fput_head.next);
- struct kioctx *ctx = req->ki_ctx;
-
- list_del(&req->ki_list);
- spin_unlock_irq(&fput_lock);
-
- /* Complete the fput(s) */
- if (req->ki_filp != NULL)
- fput(req->ki_filp);
-
- /* Link the iocb into the context's free list */
- rcu_read_lock();
- spin_lock_irq(&ctx->ctx_lock);
- really_put_req(ctx, req);
- /*
- * at that point ctx might've been killed, but actual
- * freeing is RCU'd
- */
- spin_unlock_irq(&ctx->ctx_lock);
- rcu_read_unlock();
-
- spin_lock_irq(&fput_lock);
- }
- spin_unlock_irq(&fput_lock);
-}
-
/* __aio_put_req
* Returns true if this put was the last user of the request.
*/
@@ -618,21 +563,9 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
req->ki_cancel = NULL;
req->ki_retry = NULL;
- /*
- * Try to optimize the aio and eventfd file* puts, by avoiding to
- * schedule work in case it is not final fput() time. In normal cases,
- * we would not be holding the last reference to the file*, so
- * this function will be executed w/out any aio kthread wakeup.
- */
- if (unlikely(!fput_atomic(req->ki_filp))) {
- spin_lock(&fput_lock);
- list_add(&req->ki_list, &fput_head);
- spin_unlock(&fput_lock);
- schedule_work(&fput_work);
- } else {
- req->ki_filp = NULL;
- really_put_req(ctx, req);
- }
+ fput(req->ki_filp);
+ req->ki_filp = NULL;
+ really_put_req(ctx, req);
return 1;
}
diff --git a/fs/attr.c b/fs/attr.c
index 0da90951d277..29e38a1f7f77 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -171,6 +171,8 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
struct timespec now;
unsigned int ia_valid = attr->ia_valid;
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
+
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_TIMES_SET)) {
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
@@ -250,5 +252,4 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
return error;
}
-
EXPORT_SYMBOL(notify_change);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index aa9103f8f01b..abf645c1703b 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -257,8 +257,8 @@ static int autofs_dev_ioctl_open_mountpoint(const char *name, dev_t devid)
* corresponding to the autofs fs we want to open.
*/
- filp = dentry_open(path.dentry, path.mnt, O_RDONLY,
- current_cred());
+ filp = dentry_open(&path, O_RDONLY, current_cred());
+ path_put(&path);
if (IS_ERR(filp)) {
err = PTR_ERR(filp);
goto out;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 75e5f1c8e028..e7396cfdb109 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -32,7 +32,7 @@ static long autofs4_root_ioctl(struct file *,unsigned int,unsigned long);
static long autofs4_root_compat_ioctl(struct file *,unsigned int,unsigned long);
#endif
static int autofs4_dir_open(struct inode *inode, struct file *file);
-static struct dentry *autofs4_lookup(struct inode *,struct dentry *, struct nameidata *);
+static struct dentry *autofs4_lookup(struct inode *,struct dentry *, unsigned int);
static struct vfsmount *autofs4_d_automount(struct path *);
static int autofs4_d_manage(struct dentry *, bool);
static void autofs4_dentry_release(struct dentry *);
@@ -458,7 +458,7 @@ int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
}
/* Lookups in the root directory */
-static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct autofs_sb_info *sbi;
struct autofs_info *ino;
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 1b35d6bd06b0..b1342ffb3cf6 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -173,13 +173,13 @@ static const struct file_operations bad_file_ops =
};
static int bad_inode_create (struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
return -EIO;
}
static struct dentry *bad_inode_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
return ERR_PTR(-EIO);
}
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index e18da23d42b5..cf7f3c67c8b7 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -34,7 +34,7 @@ static int befs_readdir(struct file *, void *, filldir_t);
static int befs_get_block(struct inode *, sector_t, struct buffer_head *, int);
static int befs_readpage(struct file *file, struct page *page);
static sector_t befs_bmap(struct address_space *mapping, sector_t block);
-static struct dentry *befs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static struct dentry *befs_lookup(struct inode *, struct dentry *, unsigned int);
static struct inode *befs_iget(struct super_block *, unsigned long);
static struct inode *befs_alloc_inode(struct super_block *sb);
static void befs_destroy_inode(struct inode *inode);
@@ -159,7 +159,7 @@ befs_get_block(struct inode *inode, sector_t block,
}
static struct dentry *
-befs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode = NULL;
struct super_block *sb = dir->i_sb;
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c
index d12c7966db27..2785ef91191a 100644
--- a/fs/bfs/dir.c
+++ b/fs/bfs/dir.c
@@ -85,7 +85,7 @@ const struct file_operations bfs_dir_operations = {
extern void dump_imap(const char *, struct super_block *);
static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
int err;
struct inode *inode;
@@ -133,7 +133,7 @@ static int bfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
static struct dentry *bfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode = NULL;
struct buffer_head *bh;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c2bbe1fb1326..1e519195d45b 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1710,3 +1710,39 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty)
return res;
}
EXPORT_SYMBOL(__invalidate_device);
+
+void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
+{
+ struct inode *inode, *old_inode = NULL;
+
+ spin_lock(&inode_sb_list_lock);
+ list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
+ struct address_space *mapping = inode->i_mapping;
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
+ mapping->nrpages == 0) {
+ spin_unlock(&inode->i_lock);
+ continue;
+ }
+ __iget(inode);
+ spin_unlock(&inode->i_lock);
+ spin_unlock(&inode_sb_list_lock);
+ /*
+ * We hold a reference to 'inode' so it couldn't have been
+ * removed from s_inodes list while we dropped the
+ * inode_sb_list_lock. We cannot iput the inode now as we can
+ * be holding the last reference and we cannot iput it under
+ * inode_sb_list_lock. So we keep the reference and iput it
+ * later.
+ */
+ iput(old_inode);
+ old_inode = inode;
+
+ func(I_BDEV(inode), arg);
+
+ spin_lock(&inode_sb_list_lock);
+ }
+ spin_unlock(&inode_sb_list_lock);
+ iput(old_inode);
+}
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 7301cdb4b2cb..a383c18e74e8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -301,10 +301,14 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
eb = path->nodes[level];
- if (!eb) {
- WARN_ON(1);
- ret = 1;
- goto out;
+ while (!eb) {
+ if (!level) {
+ WARN_ON(1);
+ ret = 1;
+ goto out;
+ }
+ level--;
+ eb = path->nodes[level];
}
ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
@@ -835,6 +839,7 @@ again:
}
ret = __add_delayed_refs(head, delayed_ref_seq,
&prefs_delayed);
+ mutex_unlock(&head->mutex);
if (ret) {
spin_unlock(&delayed_refs->lock);
goto out;
@@ -928,8 +933,6 @@ again:
}
out:
- if (head)
- mutex_unlock(&head->mutex);
btrfs_free_path(path);
while (!list_empty(&prefs)) {
ref = list_first_entry(&prefs, struct __prelim_ref, list);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 15cbc2bf4ff0..8206b3900587 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1024,11 +1024,18 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
if (!looped && !tm)
return 0;
/*
- * we must have key remove operations in the log before the
- * replace operation.
+ * if there are no tree operation for the oldest root, we simply
+ * return it. this should only happen if that (old) root is at
+ * level 0.
*/
- BUG_ON(!tm);
+ if (!tm)
+ break;
+ /*
+ * if there's an operation that's not a root replacement, we
+ * found the oldest version of our root. normally, we'll find a
+ * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
+ */
if (tm->op != MOD_LOG_ROOT_REPLACE)
break;
@@ -1087,11 +1094,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
tm->generation);
break;
case MOD_LOG_KEY_ADD:
- if (tm->slot != n - 1) {
- o_dst = btrfs_node_key_ptr_offset(tm->slot);
- o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
- memmove_extent_buffer(eb, o_dst, o_src, p_size);
- }
+ /* if a move operation is needed it's in the log */
n--;
break;
case MOD_LOG_MOVE_KEYS:
@@ -1192,16 +1195,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
}
tm = tree_mod_log_search(root->fs_info, logical, time_seq);
- /*
- * there was an item in the log when __tree_mod_log_oldest_root
- * returned. this one must not go away, because the time_seq passed to
- * us must be blocking its removal.
- */
- BUG_ON(!tm);
-
if (old_root)
- eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
- root->nodesize);
+ eb = alloc_dummy_extent_buffer(logical, root->nodesize);
else
eb = btrfs_clone_extent_buffer(root->node);
btrfs_tree_read_unlock(root->node);
@@ -1216,7 +1211,10 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
btrfs_set_header_level(eb, old_root->level);
btrfs_set_header_generation(eb, old_generation);
}
- __tree_mod_log_rewind(eb, time_seq, tm);
+ if (tm)
+ __tree_mod_log_rewind(eb, time_seq, tm);
+ else
+ WARN_ON(btrfs_header_level(eb) != 0);
extent_buffer_get(eb);
return eb;
@@ -2995,7 +2993,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
static void insert_ptr(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_disk_key *key, u64 bytenr,
- int slot, int level, int tree_mod_log)
+ int slot, int level)
{
struct extent_buffer *lower;
int nritems;
@@ -3008,7 +3006,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
BUG_ON(slot > nritems);
BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
if (slot != nritems) {
- if (tree_mod_log && level)
+ if (level)
tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
slot, nritems - slot);
memmove_extent_buffer(lower,
@@ -3016,7 +3014,7 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
btrfs_node_key_ptr_offset(slot),
(nritems - slot) * sizeof(struct btrfs_key_ptr));
}
- if (tree_mod_log && level) {
+ if (level) {
ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
MOD_LOG_KEY_ADD);
BUG_ON(ret < 0);
@@ -3104,7 +3102,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(split);
insert_ptr(trans, root, path, &disk_key, split->start,
- path->slots[level + 1] + 1, level + 1, 1);
+ path->slots[level + 1] + 1, level + 1);
if (path->slots[level] >= mid) {
path->slots[level] -= mid;
@@ -3641,7 +3639,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
btrfs_set_header_nritems(l, mid);
btrfs_item_key(right, &disk_key, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1] + 1, 1, 0);
+ path->slots[1] + 1, 1);
btrfs_mark_buffer_dirty(right);
btrfs_mark_buffer_dirty(l);
@@ -3848,7 +3846,7 @@ again:
if (mid <= slot) {
btrfs_set_header_nritems(right, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1] + 1, 1, 0);
+ path->slots[1] + 1, 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -3857,7 +3855,7 @@ again:
} else {
btrfs_set_header_nritems(right, 0);
insert_ptr(trans, root, path, &disk_key, right->start,
- path->slots[1], 1, 0);
+ path->slots[1], 1);
btrfs_tree_unlock(path->nodes[0]);
free_extent_buffer(path->nodes[0]);
path->nodes[0] = right;
@@ -5121,6 +5119,18 @@ again:
if (!path->skip_locking) {
ret = btrfs_try_tree_read_lock(next);
+ if (!ret && time_seq) {
+ /*
+ * If we don't get the lock, we may be racing
+ * with push_leaf_left, holding that lock while
+ * itself waiting for the leaf we've currently
+ * locked. To solve this situation, we give up
+ * on our lock and cycle.
+ */
+ btrfs_release_path(path);
+ cond_resched();
+ goto again;
+ }
if (!ret) {
btrfs_set_path_blocking(path);
btrfs_tree_read_lock(next);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7b845ff4af99..2936ca49b3b4 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2354,12 +2354,17 @@ retry_root_backup:
BTRFS_CSUM_TREE_OBJECTID, csum_root);
if (ret)
goto recovery_tree_root;
-
csum_root->track_dirty = 1;
fs_info->generation = generation;
fs_info->last_trans_committed = generation;
+ ret = btrfs_recover_balance(fs_info);
+ if (ret) {
+ printk(KERN_WARNING "btrfs: failed to recover balance\n");
+ goto fail_block_groups;
+ }
+
ret = btrfs_init_dev_stats(fs_info);
if (ret) {
printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
@@ -2485,20 +2490,23 @@ retry_root_backup:
goto fail_trans_kthread;
}
- if (!(sb->s_flags & MS_RDONLY)) {
- down_read(&fs_info->cleanup_work_sem);
- err = btrfs_orphan_cleanup(fs_info->fs_root);
- if (!err)
- err = btrfs_orphan_cleanup(fs_info->tree_root);
- up_read(&fs_info->cleanup_work_sem);
+ if (sb->s_flags & MS_RDONLY)
+ return 0;
- if (!err)
- err = btrfs_recover_balance(fs_info->tree_root);
+ down_read(&fs_info->cleanup_work_sem);
+ if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) ||
+ (ret = btrfs_orphan_cleanup(fs_info->tree_root))) {
+ up_read(&fs_info->cleanup_work_sem);
+ close_ctree(tree_root);
+ return ret;
+ }
+ up_read(&fs_info->cleanup_work_sem);
- if (err) {
- close_ctree(tree_root);
- return err;
- }
+ ret = btrfs_resume_balance_async(fs_info);
+ if (ret) {
+ printk(KERN_WARNING "btrfs: failed to resume balance\n");
+ close_ctree(tree_root);
+ return ret;
}
return 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4b5a1e1bdefb..6e1d36702ff7 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2347,12 +2347,10 @@ next:
return count;
}
-
static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
- unsigned long num_refs)
+ unsigned long num_refs,
+ struct list_head *first_seq)
{
- struct list_head *first_seq = delayed_refs->seq_head.next;
-
spin_unlock(&delayed_refs->lock);
pr_debug("waiting for more refs (num %ld, first %p)\n",
num_refs, first_seq);
@@ -2381,6 +2379,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_delayed_ref_node *ref;
struct list_head cluster;
+ struct list_head *first_seq = NULL;
int ret;
u64 delayed_start;
int run_all = count == (unsigned long)-1;
@@ -2436,8 +2435,10 @@ again:
*/
consider_waiting = 1;
num_refs = delayed_refs->num_entries;
+ first_seq = root->fs_info->tree_mod_seq_list.next;
} else {
- wait_for_more_refs(delayed_refs, num_refs);
+ wait_for_more_refs(delayed_refs,
+ num_refs, first_seq);
/*
* after waiting, things have changed. we
* dropped the lock and someone else might have
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index aaa12c1eb348..deafe19c34b5 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -929,7 +929,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits,
/**
- * convert_extent - convert all bits in a given range from one bit to another
+ * convert_extent_bit - convert all bits in a given range from one bit to
+ * another
* @tree: the io tree to search
* @start: the start offset in bytes
* @end: the end offset in bytes (inclusive)
@@ -3324,6 +3325,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
writepage_t writepage, void *data,
void (*flush_fn)(void *))
{
+ struct inode *inode = mapping->host;
int ret = 0;
int done = 0;
int nr_to_write_done = 0;
@@ -3334,6 +3336,18 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
int scanned = 0;
int tag;
+ /*
+ * We have to hold onto the inode so that ordered extents can do their
+ * work when the IO finishes. The alternative to this is failing to add
+ * an ordered extent if the igrab() fails there and that is a huge pain
+ * to deal with, so instead just hold onto the inode throughout the
+ * writepages operation. If it fails here we are freeing up the inode
+ * anyway and we'd rather not waste our time writing out stuff that is
+ * going to be truncated anyway.
+ */
+ if (!igrab(inode))
+ return 0;
+
pagevec_init(&pvec, 0);
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
@@ -3428,6 +3442,7 @@ retry:
index = 0;
goto retry;
}
+ btrfs_add_delayed_iput(inode);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 70dc8ca73e25..9aa01ec2138d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1334,7 +1334,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
loff_t *ppos, size_t count, size_t ocount)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = fdentry(file)->d_inode;
struct iov_iter i;
ssize_t written;
ssize_t written_buffered;
@@ -1344,18 +1343,6 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
count, ocount);
- /*
- * the generic O_DIRECT will update in-memory i_size after the
- * DIOs are done. But our endio handlers that update the on
- * disk i_size never update past the in memory i_size. So we
- * need one more update here to catch any additions to the
- * file
- */
- if (inode->i_size != BTRFS_I(inode)->disk_i_size) {
- btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
- mark_inode_dirty(inode);
- }
-
if (written < 0 || written == count)
return written;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 81296c57405a..6c4e2baa9290 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1543,29 +1543,26 @@ again:
end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
/*
- * XXX - this can go away after a few releases.
- *
- * since the only user of btrfs_remove_free_space is the tree logging
- * stuff, and the only way to test that is under crash conditions, we
- * want to have this debug stuff here just in case somethings not
- * working. Search the bitmap for the space we are trying to use to
- * make sure its actually there. If its not there then we need to stop
- * because something has gone wrong.
+ * We need to search for bits in this bitmap. We could only cover some
+ * of the extent in this bitmap thanks to how we add space, so we need
+ * to search for as much as it as we can and clear that amount, and then
+ * go searching for the next bit.
*/
search_start = *offset;
- search_bytes = *bytes;
+ search_bytes = ctl->unit;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
BUG_ON(ret < 0 || search_start != *offset);
- if (*offset > bitmap_info->offset && *offset + *bytes > end) {
- bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
- *bytes -= end - *offset + 1;
- *offset = end + 1;
- } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
- bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
- *bytes = 0;
- }
+ /* We may have found more bits than what we need */
+ search_bytes = min(search_bytes, *bytes);
+
+ /* Cannot clear past the end of the bitmap */
+ search_bytes = min(search_bytes, end - search_start + 1);
+
+ bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
+ *offset += search_bytes;
+ *bytes -= search_bytes;
if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index);
@@ -1596,7 +1593,7 @@ again:
* everything over again.
*/
search_start = *offset;
- search_bytes = *bytes;
+ search_bytes = ctl->unit;
ret = search_bitmap(ctl, bitmap_info, &search_start,
&search_bytes);
if (ret < 0 || search_start != *offset)
@@ -1879,12 +1876,14 @@ int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
{
struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
struct btrfs_free_space *info;
- struct btrfs_free_space *next_info = NULL;
int ret = 0;
spin_lock(&ctl->tree_lock);
again:
+ if (!bytes)
+ goto out_lock;
+
info = tree_search_offset(ctl, offset, 0, 0);
if (!info) {
/*
@@ -1905,88 +1904,48 @@ again:
}
}
- if (info->bytes < bytes && rb_next(&info->offset_index)) {
- u64 end;
- next_info = rb_entry(rb_next(&info->offset_index),
- struct btrfs_free_space,
- offset_index);
-
- if (next_info->bitmap)
- end = next_info->offset +
- BITS_PER_BITMAP * ctl->unit - 1;
- else
- end = next_info->offset + next_info->bytes;
-
- if (next_info->bytes < bytes ||
- next_info->offset > offset || offset > end) {
- printk(KERN_CRIT "Found free space at %llu, size %llu,"
- " trying to use %llu\n",
- (unsigned long long)info->offset,
- (unsigned long long)info->bytes,
- (unsigned long long)bytes);
- WARN_ON(1);
- ret = -EINVAL;
- goto out_lock;
- }
-
- info = next_info;
- }
-
- if (info->bytes == bytes) {
+ if (!info->bitmap) {
unlink_free_space(ctl, info);
- if (info->bitmap) {
- kfree(info->bitmap);
- ctl->total_bitmaps--;
- }
- kmem_cache_free(btrfs_free_space_cachep, info);
- ret = 0;
- goto out_lock;
- }
-
- if (!info->bitmap && info->offset == offset) {
- unlink_free_space(ctl, info);
- info->offset += bytes;
- info->bytes -= bytes;
- ret = link_free_space(ctl, info);
- WARN_ON(ret);
- goto out_lock;
- }
+ if (offset == info->offset) {
+ u64 to_free = min(bytes, info->bytes);
+
+ info->bytes -= to_free;
+ info->offset += to_free;
+ if (info->bytes) {
+ ret = link_free_space(ctl, info);
+ WARN_ON(ret);
+ } else {
+ kmem_cache_free(btrfs_free_space_cachep, info);
+ }
- if (!info->bitmap && info->offset <= offset &&
- info->offset + info->bytes >= offset + bytes) {
- u64 old_start = info->offset;
- /*
- * we're freeing space in the middle of the info,
- * this can happen during tree log replay
- *
- * first unlink the old info and then
- * insert it again after the hole we're creating
- */
- unlink_free_space(ctl, info);
- if (offset + bytes < info->offset + info->bytes) {
- u64 old_end = info->offset + info->bytes;
+ offset += to_free;
+ bytes -= to_free;
+ goto again;
+ } else {
+ u64 old_end = info->bytes + info->offset;
- info->offset = offset + bytes;
- info->bytes = old_end - info->offset;
+ info->bytes = offset - info->offset;
ret = link_free_space(ctl, info);
WARN_ON(ret);
if (ret)
goto out_lock;
- } else {
- /* the hole we're creating ends at the end
- * of the info struct, just free the info
- */
- kmem_cache_free(btrfs_free_space_cachep, info);
- }
- spin_unlock(&ctl->tree_lock);
- /* step two, insert a new info struct to cover
- * anything before the hole
- */
- ret = btrfs_add_free_space(block_group, old_start,
- offset - old_start);
- WARN_ON(ret); /* -ENOMEM */
- goto out;
+ /* Not enough bytes in this entry to satisfy us */
+ if (old_end < offset + bytes) {
+ bytes -= old_end - offset;
+ offset = old_end;
+ goto again;
+ } else if (old_end == offset + bytes) {
+ /* all done */
+ goto out_lock;
+ }
+ spin_unlock(&ctl->tree_lock);
+
+ ret = btrfs_add_free_space(block_group, offset + bytes,
+ old_end - (offset + bytes));
+ WARN_ON(ret);
+ goto out;
+ }
}
ret = remove_from_bitmap(ctl, info, &offset, &bytes);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d8bb0dbc4941..fb8d671d00e6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3754,7 +3754,7 @@ void btrfs_evict_inode(struct inode *inode)
btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (root->fs_info->log_root_recovering) {
- BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+ BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags));
goto no_delete;
}
@@ -4247,7 +4247,7 @@ static void btrfs_dentry_release(struct dentry *dentry)
}
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct dentry *ret;
@@ -4893,7 +4893,7 @@ out_unlock:
}
static int btrfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(dir)->root;
@@ -5876,8 +5876,17 @@ map:
bh_result->b_size = len;
bh_result->b_bdev = em->bdev;
set_buffer_mapped(bh_result);
- if (create && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- set_buffer_new(bh_result);
+ if (create) {
+ if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
+ set_buffer_new(bh_result);
+
+ /*
+ * Need to update the i_size under the extent lock so buffered
+ * readers will get the updated i_size when we unlock.
+ */
+ if (start + len > i_size_read(inode))
+ i_size_write(inode, start + len);
+ }
free_extent_map(em);
@@ -6360,12 +6369,48 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
*/
ordered = btrfs_lookup_ordered_range(inode, lockstart,
lockend - lockstart + 1);
- if (!ordered)
+
+ /*
+ * We need to make sure there are no buffered pages in this
+ * range either, we could have raced between the invalidate in
+ * generic_file_direct_write and locking the extent. The
+ * invalidate needs to happen so that reads after a write do not
+ * get stale data.
+ */
+ if (!ordered && (!writing ||
+ !test_range_bit(&BTRFS_I(inode)->io_tree,
+ lockstart, lockend, EXTENT_UPTODATE, 0,
+ cached_state)))
break;
+
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
+
+ if (ordered) {
+ btrfs_start_ordered_extent(inode, ordered, 1);
+ btrfs_put_ordered_extent(ordered);
+ } else {
+ /* Screw you mmap */
+ ret = filemap_write_and_wait_range(file->f_mapping,
+ lockstart,
+ lockend);
+ if (ret)
+ goto out;
+
+ /*
+ * If we found a page that couldn't be invalidated just
+ * fall back to buffered.
+ */
+ ret = invalidate_inode_pages2_range(file->f_mapping,
+ lockstart >> PAGE_CACHE_SHIFT,
+ lockend >> PAGE_CACHE_SHIFT);
+ if (ret) {
+ if (ret == -EBUSY)
+ ret = 0;
+ goto out;
+ }
+ }
+
cond_resched();
}
@@ -6942,7 +6987,7 @@ void btrfs_destroy_inode(struct inode *inode)
struct btrfs_ordered_extent *ordered;
struct btrfs_root *root = BTRFS_I(inode)->root;
- WARN_ON(!list_empty(&inode->i_dentry));
+ WARN_ON(!hlist_empty(&inode->i_dentry));
WARN_ON(inode->i_data.nrpages);
WARN_ON(BTRFS_I(inode)->outstanding_extents);
WARN_ON(BTRFS_I(inode)->reserved_extents);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0e92e5763005..1e9f6c019ad0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3268,7 +3268,7 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
if (fs_info->sb->s_flags & MS_RDONLY)
return -EROFS;
- ret = mnt_want_write(file->f_path.mnt);
+ ret = mnt_want_write_file(file);
if (ret)
return ret;
@@ -3338,7 +3338,7 @@ out_bargs:
out:
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
- mnt_drop_write(file->f_path.mnt);
+ mnt_drop_write_file(file);
return ret;
}
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 497c530724cf..e440aa653c30 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -339,7 +339,7 @@ struct btrfs_ioctl_get_dev_stats {
#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
struct btrfs_ioctl_vol_args_v2)
-#define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
+#define BTRFS_IOC_SUBVOL_GETFLAGS _IOR(BTRFS_IOCTL_MAGIC, 25, __u64)
#define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
struct btrfs_ioctl_scrub_args)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0eb9a4da069e..b19d75567728 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1068,7 +1068,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
}
bdev = fs_devices->latest_bdev;
- s = sget(fs_type, btrfs_test_super, btrfs_set_super, fs_info);
+ s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+ fs_info);
if (IS_ERR(s)) {
error = PTR_ERR(s);
goto error_close_devices;
@@ -1082,7 +1083,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags | MS_NOSEC;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
btrfs_sb(s)->bdev_holder = fs_type;
error = btrfs_fill_super(s, fs_devices, data,
@@ -1187,6 +1187,10 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
if (ret)
goto restore;
+ ret = btrfs_resume_balance_async(fs_info);
+ if (ret)
+ goto restore;
+
sb->s_flags &= ~MS_RDONLY;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2017d0ff511c..8abeae4224f9 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -690,6 +690,8 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
kfree(name);
iput(inode);
+
+ btrfs_run_delayed_items(trans, root);
return ret;
}
@@ -895,6 +897,7 @@ again:
ret = btrfs_unlink_inode(trans, root, dir,
inode, victim_name,
victim_name_len);
+ btrfs_run_delayed_items(trans, root);
}
kfree(victim_name);
ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
@@ -1475,6 +1478,9 @@ again:
ret = btrfs_unlink_inode(trans, root, dir, inode,
name, name_len);
BUG_ON(ret);
+
+ btrfs_run_delayed_items(trans, root);
+
kfree(name);
iput(inode);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8a3d2594b807..ecaad40e7ef4 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2845,31 +2845,48 @@ out:
static int balance_kthread(void *data)
{
- struct btrfs_balance_control *bctl =
- (struct btrfs_balance_control *)data;
- struct btrfs_fs_info *fs_info = bctl->fs_info;
+ struct btrfs_fs_info *fs_info = data;
int ret = 0;
mutex_lock(&fs_info->volume_mutex);
mutex_lock(&fs_info->balance_mutex);
- set_balance_control(bctl);
-
- if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
- printk(KERN_INFO "btrfs: force skipping balance\n");
- } else {
+ if (fs_info->balance_ctl) {
printk(KERN_INFO "btrfs: continuing balance\n");
- ret = btrfs_balance(bctl, NULL);
+ ret = btrfs_balance(fs_info->balance_ctl, NULL);
}
mutex_unlock(&fs_info->balance_mutex);
mutex_unlock(&fs_info->volume_mutex);
+
return ret;
}
-int btrfs_recover_balance(struct btrfs_root *tree_root)
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
struct task_struct *tsk;
+
+ spin_lock(&fs_info->balance_lock);
+ if (!fs_info->balance_ctl) {
+ spin_unlock(&fs_info->balance_lock);
+ return 0;
+ }
+ spin_unlock(&fs_info->balance_lock);
+
+ if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+ printk(KERN_INFO "btrfs: force skipping balance\n");
+ return 0;
+ }
+
+ tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
+ if (IS_ERR(tsk))
+ return PTR_ERR(tsk);
+
+ return 0;
+}
+
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
+{
struct btrfs_balance_control *bctl;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
@@ -2882,29 +2899,30 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
if (!path)
return -ENOMEM;
- bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
- if (!bctl) {
- ret = -ENOMEM;
- goto out;
- }
-
key.objectid = BTRFS_BALANCE_OBJECTID;
key.type = BTRFS_BALANCE_ITEM_KEY;
key.offset = 0;
- ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+ ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
- goto out_bctl;
+ goto out;
if (ret > 0) { /* ret = -ENOENT; */
ret = 0;
- goto out_bctl;
+ goto out;
+ }
+
+ bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+ if (!bctl) {
+ ret = -ENOMEM;
+ goto out;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
- bctl->fs_info = tree_root->fs_info;
- bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+ bctl->fs_info = fs_info;
+ bctl->flags = btrfs_balance_flags(leaf, item);
+ bctl->flags |= BTRFS_BALANCE_RESUME;
btrfs_balance_data(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
@@ -2913,14 +2931,13 @@ int btrfs_recover_balance(struct btrfs_root *tree_root)
btrfs_balance_sys(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
- tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
- if (IS_ERR(tsk))
- ret = PTR_ERR(tsk);
- else
- goto out;
+ mutex_lock(&fs_info->volume_mutex);
+ mutex_lock(&fs_info->balance_mutex);
-out_bctl:
- kfree(bctl);
+ set_balance_control(bctl);
+
+ mutex_unlock(&fs_info->balance_mutex);
+ mutex_unlock(&fs_info->volume_mutex);
out:
btrfs_free_path(path);
return ret;
@@ -4061,16 +4078,18 @@ static void btrfs_end_bio(struct bio *bio, int err)
BUG_ON(stripe_index >= bbio->num_stripes);
dev = bbio->stripes[stripe_index].dev;
- if (bio->bi_rw & WRITE)
- btrfs_dev_stat_inc(dev,
- BTRFS_DEV_STAT_WRITE_ERRS);
- else
- btrfs_dev_stat_inc(dev,
- BTRFS_DEV_STAT_READ_ERRS);
- if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
- btrfs_dev_stat_inc(dev,
- BTRFS_DEV_STAT_FLUSH_ERRS);
- btrfs_dev_stat_print_on_error(dev);
+ if (dev->bdev) {
+ if (bio->bi_rw & WRITE)
+ btrfs_dev_stat_inc(dev,
+ BTRFS_DEV_STAT_WRITE_ERRS);
+ else
+ btrfs_dev_stat_inc(dev,
+ BTRFS_DEV_STAT_READ_ERRS);
+ if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+ btrfs_dev_stat_inc(dev,
+ BTRFS_DEV_STAT_FLUSH_ERRS);
+ btrfs_dev_stat_print_on_error(dev);
+ }
}
}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 74366f27a76b..95f6637614db 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -281,7 +281,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
int btrfs_balance(struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs);
-int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info);
+int btrfs_recover_balance(struct btrfs_fs_info *fs_info);
int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
diff --git a/fs/buffer.c b/fs/buffer.c
index 838a9cf246bd..c7062c896d7c 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1036,6 +1036,9 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
static struct buffer_head *
__getblk_slow(struct block_device *bdev, sector_t block, int size)
{
+ int ret;
+ struct buffer_head *bh;
+
/* Size must be multiple of hard sectorsize */
if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
(size < 512 || size > PAGE_SIZE))) {
@@ -1048,20 +1051,21 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
return NULL;
}
- for (;;) {
- struct buffer_head * bh;
- int ret;
+retry:
+ bh = __find_get_block(bdev, block, size);
+ if (bh)
+ return bh;
+ ret = grow_buffers(bdev, block, size);
+ if (ret == 0) {
+ free_more_memory();
+ goto retry;
+ } else if (ret > 0) {
bh = __find_get_block(bdev, block, size);
if (bh)
return bh;
-
- ret = grow_buffers(bdev, block, size);
- if (ret < 0)
- return NULL;
- if (ret == 0)
- free_more_memory();
}
+ return NULL;
}
/*
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 7f0771d3894e..b0b5f7cdfffa 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -567,7 +567,7 @@ lookup_again:
if (ret < 0)
goto create_error;
start = jiffies;
- ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
+ ret = vfs_create(dir->d_inode, next, S_IFREG, true);
cachefiles_hist(cachefiles_create_histogram, start);
if (ret < 0)
goto create_error;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 0e3c0924cc3a..c0353dfac51f 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -891,6 +891,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
struct cachefiles_cache *cache;
mm_segment_t old_fs;
struct file *file;
+ struct path path;
loff_t pos, eof;
size_t len;
void *data;
@@ -916,10 +917,9 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
/* write the page to the backing filesystem and let it store it in its
* own time */
- dget(object->backer);
- mntget(cache->mnt);
- file = dentry_open(object->backer, cache->mnt, O_RDWR,
- cache->cache_cred);
+ path.mnt = cache->mnt;
+ path.dentry = object->backer;
+ file = dentry_open(&path, O_RDWR, cache->cache_cred);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
} else {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 3e8094be4604..00894ff9246c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -576,7 +576,7 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
* the MDS so that it gets our 'caps wanted' value in a single op.
*/
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -594,14 +594,6 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
if (err < 0)
return ERR_PTR(err);
- /* open (but not create!) intent? */
- if (nd &&
- (nd->flags & LOOKUP_OPEN) &&
- !(nd->intent.open.flags & O_CREAT)) {
- int mode = nd->intent.open.create_mode & ~current->fs->umask;
- return ceph_lookup_open(dir, dentry, nd, mode, 1);
- }
-
/* can we conclude ENOENT locally? */
if (dentry->d_inode == NULL) {
struct ceph_inode_info *ci = ceph_inode(dir);
@@ -642,13 +634,51 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
return dentry;
}
+int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode,
+ int *opened)
+{
+ int err;
+ struct dentry *res = NULL;
+
+ if (!(flags & O_CREAT)) {
+ if (dentry->d_name.len > NAME_MAX)
+ return -ENAMETOOLONG;
+
+ err = ceph_init_dentry(dentry);
+ if (err < 0)
+ return err;
+
+ return ceph_lookup_open(dir, dentry, file, flags, mode, opened);
+ }
+
+ if (d_unhashed(dentry)) {
+ res = ceph_lookup(dir, dentry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ dentry = res;
+ }
+
+ /* We don't deal with positive dentries here */
+ if (dentry->d_inode)
+ return finish_no_open(file, res);
+
+ *opened |= FILE_CREATED;
+ err = ceph_lookup_open(dir, dentry, file, flags, mode, opened);
+ dput(res);
+
+ return err;
+}
+
/*
* If we do a create but get no trace back from the MDS, follow up with
* a lookup (the VFS expects us to link up the provided dentry).
*/
int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
{
- struct dentry *result = ceph_lookup(dir, dentry, NULL);
+ struct dentry *result = ceph_lookup(dir, dentry, 0);
if (result && !IS_ERR(result)) {
/*
@@ -700,25 +730,9 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
}
static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
- dout("create in dir %p dentry %p name '%.*s'\n",
- dir, dentry, dentry->d_name.len, dentry->d_name.name);
-
- if (ceph_snap(dir) != CEPH_NOSNAP)
- return -EROFS;
-
- if (nd) {
- BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
- dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
- /* hrm, what should i do here if we get aliased? */
- if (IS_ERR(dentry))
- return PTR_ERR(dentry);
- return 0;
- }
-
- /* fall back to mknod */
- return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
+ return ceph_mknod(dir, dentry, mode, 0);
}
static int ceph_symlink(struct inode *dir, struct dentry *dentry,
@@ -1028,12 +1042,12 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
/*
* Check if cached dentry can be trusted.
*/
-static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int valid = 0;
struct inode *dir;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
@@ -1080,7 +1094,7 @@ static void ceph_d_release(struct dentry *dentry)
}
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
/*
* Eventually, we'll want to revalidate snapped metadata
@@ -1357,6 +1371,7 @@ const struct inode_operations ceph_dir_iops = {
.rmdir = ceph_unlink,
.rename = ceph_rename,
.create = ceph_create,
+ .atomic_open = ceph_atomic_open,
};
const struct dentry_operations ceph_dentry_ops = {
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 988d4f302e48..1b81d6c31878 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -213,22 +213,15 @@ out:
* may_open() fails, the struct *file gets cleaned up (i.e.
* ceph_release gets called). So fear not!
*/
-/*
- * flags
- * path_lookup_open -> LOOKUP_OPEN
- * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
- */
-struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd, int mode,
- int locked_dir)
+int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned flags, umode_t mode,
+ int *opened)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
- struct file *file;
struct ceph_mds_request *req;
struct dentry *ret;
int err;
- int flags = nd->intent.open.flags;
dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
@@ -236,7 +229,7 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
/* do the open */
req = prepare_open_request(dir->i_sb, flags, mode);
if (IS_ERR(req))
- return ERR_CAST(req);
+ return PTR_ERR(req);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
if (flags & O_CREAT) {
@@ -254,14 +247,17 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
err = ceph_handle_notrace_create(dir, dentry);
if (err)
goto out;
- file = lookup_instantiate_filp(nd, req->r_dentry, ceph_open);
- if (IS_ERR(file))
- err = PTR_ERR(file);
+ err = finish_open(file, req->r_dentry, ceph_open, opened);
out:
ret = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req);
dout("ceph_lookup_open result=%p\n", ret);
- return ret;
+
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ dput(ret);
+ return err;
}
int ceph_release(struct inode *inode, struct file *file)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 1e67dd7305a4..7076109f014d 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -871,7 +871,7 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
if (ceph_test_opt(fsc->client, NOSHARE))
compare_super = NULL;
- sb = sget(fs_type, compare_super, ceph_set_super, fsc);
+ sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
if (IS_ERR(sb)) {
res = ERR_CAST(sb);
goto out;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index fc35036d258d..f4d5522cb619 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -806,9 +806,9 @@ extern int ceph_copy_from_page_vector(struct page **pages,
loff_t off, size_t len);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_open(struct inode *inode, struct file *file);
-extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd, int mode,
- int locked_dir);
+extern int ceph_lookup_open(struct inode *dir, struct dentry *dentry,
+ struct file *od, unsigned flags,
+ umode_t mode, int *opened);
extern int ceph_release(struct inode *inode, struct file *filp);
/* dir.c */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b6e344eb0ba..a7610cfedf0a 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -257,7 +257,6 @@ cifs_alloc_inode(struct super_block *sb)
static void cifs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
}
@@ -638,7 +637,10 @@ cifs_do_mount(struct file_system_type *fs_type,
mnt_data.cifs_sb = cifs_sb;
mnt_data.flags = flags;
- sb = sget(fs_type, cifs_match_super, cifs_set_super, &mnt_data);
+ /* BB should we make this contingent on mount parm? */
+ flags |= MS_NODIRATIME | MS_NOATIME;
+
+ sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
if (IS_ERR(sb)) {
root = ERR_CAST(sb);
cifs_umount(cifs_sb);
@@ -649,10 +651,6 @@ cifs_do_mount(struct file_system_type *fs_type,
cFYI(1, "Use existing superblock");
cifs_umount(cifs_sb);
} else {
- sb->s_flags = flags;
- /* BB should we make this contingent on mount parm? */
- sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
-
rc = cifs_read_super(sb);
if (rc) {
root = ERR_PTR(rc);
@@ -778,6 +776,7 @@ struct file_system_type cifs_fs_type = {
};
const struct inode_operations cifs_dir_inode_ops = {
.create = cifs_create,
+ .atomic_open = cifs_atomic_open,
.lookup = cifs_lookup,
.getattr = cifs_getattr,
.unlink = cifs_unlink,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 65365358c976..1c49c5a9b27a 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -45,9 +45,12 @@ extern const struct address_space_operations cifs_addr_ops_smallbuf;
extern const struct inode_operations cifs_dir_inode_ops;
extern struct inode *cifs_root_iget(struct super_block *);
extern int cifs_create(struct inode *, struct dentry *, umode_t,
- struct nameidata *);
+ bool excl);
+extern int cifs_atomic_open(struct inode *, struct dentry *,
+ struct file *, unsigned, umode_t,
+ int *);
extern struct dentry *cifs_lookup(struct inode *, struct dentry *,
- struct nameidata *);
+ unsigned int);
extern int cifs_unlink(struct inode *dir, struct dentry *dentry);
extern int cifs_hardlink(struct dentry *, struct inode *, struct dentry *);
extern int cifs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5b400730c213..4ee522b3f66f 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -86,7 +86,31 @@ static struct {
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
#endif /* CIFS_POSIX */
-/* Forward declarations */
+#ifdef CONFIG_HIGHMEM
+/*
+ * On arches that have high memory, kmap address space is limited. By
+ * serializing the kmap operations on those arches, we ensure that we don't
+ * end up with a bunch of threads in writeback with partially mapped page
+ * arrays, stuck waiting for kmap to come back. That situation prevents
+ * progress and can deadlock.
+ */
+static DEFINE_MUTEX(cifs_kmap_mutex);
+
+static inline void
+cifs_kmap_lock(void)
+{
+ mutex_lock(&cifs_kmap_mutex);
+}
+
+static inline void
+cifs_kmap_unlock(void)
+{
+ mutex_unlock(&cifs_kmap_mutex);
+}
+#else /* !CONFIG_HIGHMEM */
+#define cifs_kmap_lock() do { ; } while(0)
+#define cifs_kmap_unlock() do { ; } while(0)
+#endif /* CONFIG_HIGHMEM */
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -1503,7 +1527,9 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
}
/* marshal up the page array */
+ cifs_kmap_lock();
len = rdata->marshal_iov(rdata, data_len);
+ cifs_kmap_unlock();
data_len -= len;
/* issue the read if we have any iovecs left to fill */
@@ -2069,7 +2095,9 @@ cifs_async_writev(struct cifs_writedata *wdata)
* and set the iov_len properly for each one. It may also set
* wdata->bytes too.
*/
+ cifs_kmap_lock();
wdata->marshal_iov(iov, wdata);
+ cifs_kmap_unlock();
cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 78db68a5cf44..94b7788c3189 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1653,24 +1653,26 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
* If yes, we have encountered a double deliminator
* reset the NULL character to the deliminator
*/
- if (tmp_end < end && tmp_end[1] == delim)
+ if (tmp_end < end && tmp_end[1] == delim) {
tmp_end[0] = delim;
- /* Keep iterating until we get to a single deliminator
- * OR the end
- */
- while ((tmp_end = strchr(tmp_end, delim)) != NULL &&
- (tmp_end[1] == delim)) {
- tmp_end = (char *) &tmp_end[2];
- }
+ /* Keep iterating until we get to a single
+ * deliminator OR the end
+ */
+ while ((tmp_end = strchr(tmp_end, delim))
+ != NULL && (tmp_end[1] == delim)) {
+ tmp_end = (char *) &tmp_end[2];
+ }
- /* Reset var options to point to next element */
- if (tmp_end) {
- tmp_end[0] = '\0';
- options = (char *) &tmp_end[1];
- } else
- /* Reached the end of the mount option string */
- options = end;
+ /* Reset var options to point to next element */
+ if (tmp_end) {
+ tmp_end[0] = '\0';
+ options = (char *) &tmp_end[1];
+ } else
+ /* Reached the end of the mount option
+ * string */
+ options = end;
+ }
/* Now build new password string */
temp_len = strlen(value);
@@ -3443,6 +3445,18 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
+/*
+ * On hosts with high memory, we can't currently support wsize/rsize that are
+ * larger than we can kmap at once. Cap the rsize/wsize at
+ * LAST_PKMAP * PAGE_SIZE. We'll never be able to fill a read or write request
+ * larger than that anyway.
+ */
+#ifdef CONFIG_HIGHMEM
+#define CIFS_KMAP_SIZE_LIMIT (LAST_PKMAP * PAGE_CACHE_SIZE)
+#else /* CONFIG_HIGHMEM */
+#define CIFS_KMAP_SIZE_LIMIT (1<<24)
+#endif /* CONFIG_HIGHMEM */
+
static unsigned int
cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
{
@@ -3473,6 +3487,9 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
wsize = min_t(unsigned int, wsize,
server->maxBuf - sizeof(WRITE_REQ) + 4);
+ /* limit to the amount that we can kmap at once */
+ wsize = min_t(unsigned int, wsize, CIFS_KMAP_SIZE_LIMIT);
+
/* hard limit of CIFS_MAX_WSIZE */
wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
@@ -3493,18 +3510,15 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
* MS-CIFS indicates that servers are only limited by the client's
* bufsize for reads, testing against win98se shows that it throws
* INVALID_PARAMETER errors if you try to request too large a read.
+ * OS/2 just sends back short reads.
*
- * If the server advertises a MaxBufferSize of less than one page,
- * assume that it also can't satisfy reads larger than that either.
- *
- * FIXME: Is there a better heuristic for this?
+ * If the server doesn't advertise CAP_LARGE_READ_X, then assume that
+ * it can't handle a read request larger than its MaxBufferSize either.
*/
if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
defsize = CIFS_DEFAULT_IOSIZE;
else if (server->capabilities & CAP_LARGE_READ_X)
defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
- else if (server->maxBuf >= PAGE_CACHE_SIZE)
- defsize = CIFSMaxBufSize;
else
defsize = server->maxBuf - sizeof(READ_RSP);
@@ -3517,6 +3531,9 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
if (!(server->capabilities & CAP_LARGE_READ_X))
rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
+ /* limit to the amount that we can kmap at once */
+ rsize = min_t(unsigned int, rsize, CIFS_KMAP_SIZE_LIMIT);
+
/* hard limit of CIFS_MAX_RSIZE */
rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index ec4e9a2a12f8..a180265a10b5 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -133,108 +133,141 @@ cifs_bp_rename_retry:
return full_path;
}
+/*
+ * Don't allow the separator character in a path component.
+ * The VFS will not allow "/", but "\" is allowed by posix.
+ */
+static int
+check_name(struct dentry *direntry)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
+ int i;
+
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
+ for (i = 0; i < direntry->d_name.len; i++) {
+ if (direntry->d_name.name[i] == '\\') {
+ cFYI(1, "Invalid file name");
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
+
/* Inode operations in similar order to how they appear in Linux file fs.h */
-int
-cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
- struct nameidata *nd)
+static int cifs_do_create(struct inode *inode, struct dentry *direntry,
+ int xid, struct tcon_link *tlink, unsigned oflags,
+ umode_t mode, __u32 *oplock, __u16 *fileHandle,
+ int *created)
{
int rc = -ENOENT;
- int xid;
int create_options = CREATE_NOT_DIR;
- __u32 oplock = 0;
- int oflags;
- /*
- * BB below access is probably too much for mknod to request
- * but we have to do query and setpathinfo so requesting
- * less could fail (unless we want to request getatr and setatr
- * permissions (only). At least for POSIX we do not have to
- * request so much.
- */
- int desiredAccess = GENERIC_READ | GENERIC_WRITE;
- __u16 fileHandle;
- struct cifs_sb_info *cifs_sb;
- struct tcon_link *tlink;
- struct cifs_tcon *tcon;
+ int desiredAccess;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_tcon *tcon = tlink_tcon(tlink);
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
- int disposition = FILE_OVERWRITE_IF;
-
- xid = GetXid();
-
- cifs_sb = CIFS_SB(inode->i_sb);
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- FreeXid(xid);
- return PTR_ERR(tlink);
- }
- tcon = tlink_tcon(tlink);
+ int disposition;
+ *oplock = 0;
if (tcon->ses->server->oplocks)
- oplock = REQ_OPLOCK;
-
- if (nd)
- oflags = nd->intent.open.file->f_flags;
- else
- oflags = O_RDONLY | O_CREAT;
+ *oplock = REQ_OPLOCK;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
rc = -ENOMEM;
- goto cifs_create_out;
+ goto out;
}
if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
+ !tcon->broken_posix_open &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_open(full_path, &newinode,
- inode->i_sb, mode, oflags, &oplock, &fileHandle, xid);
- /* EIO could indicate that (posix open) operation is not
- supported, despite what server claimed in capability
- negotiation. EREMOTE indicates DFS junction, which is not
- handled in posix open */
-
- if (rc == 0) {
- if (newinode == NULL) /* query inode info */
+ inode->i_sb, mode, oflags, oplock, fileHandle, xid);
+ switch (rc) {
+ case 0:
+ if (newinode == NULL) {
+ /* query inode info */
goto cifs_create_get_file_info;
- else /* success, no need to query */
- goto cifs_create_set_dentry;
- } else if ((rc != -EIO) && (rc != -EREMOTE) &&
- (rc != -EOPNOTSUPP) && (rc != -EINVAL))
- goto cifs_create_out;
- /* else fallthrough to retry, using older open call, this is
- case where server does not support this SMB level, and
- falsely claims capability (also get here for DFS case
- which should be rare for path not covered on files) */
- }
+ }
+
+ if (!S_ISREG(newinode->i_mode)) {
+ /*
+ * The server may allow us to open things like
+ * FIFOs, but the client isn't set up to deal
+ * with that. If it's not a regular file, just
+ * close it and proceed as if it were a normal
+ * lookup.
+ */
+ CIFSSMBClose(xid, tcon, *fileHandle);
+ goto cifs_create_get_file_info;
+ }
+ /* success, no need to query */
+ goto cifs_create_set_dentry;
+
+ case -ENOENT:
+ goto cifs_create_get_file_info;
+
+ case -EIO:
+ case -EINVAL:
+ /*
+ * EIO could indicate that (posix open) operation is not
+ * supported, despite what server claimed in capability
+ * negotiation.
+ *
+ * POSIX open in samba versions 3.3.1 and earlier could
+ * incorrectly fail with invalid parameter.
+ */
+ tcon->broken_posix_open = true;
+ break;
+
+ case -EREMOTE:
+ case -EOPNOTSUPP:
+ /*
+ * EREMOTE indicates DFS junction, which is not handled
+ * in posix open. If either that or op not supported
+ * returned, follow the normal lookup.
+ */
+ break;
- if (nd) {
- /* if the file is going to stay open, then we
- need to set the desired access properly */
- desiredAccess = 0;
- if (OPEN_FMODE(oflags) & FMODE_READ)
- desiredAccess |= GENERIC_READ; /* is this too little? */
- if (OPEN_FMODE(oflags) & FMODE_WRITE)
- desiredAccess |= GENERIC_WRITE;
-
- if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
- disposition = FILE_CREATE;
- else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
- disposition = FILE_OVERWRITE_IF;
- else if ((oflags & O_CREAT) == O_CREAT)
- disposition = FILE_OPEN_IF;
- else
- cFYI(1, "Create flag not set in create function");
+ default:
+ goto out;
+ }
+ /*
+ * fallthrough to retry, using older open call, this is case
+ * where server does not support this SMB level, and falsely
+ * claims capability (also get here for DFS case which should be
+ * rare for path not covered on files)
+ */
}
+ desiredAccess = 0;
+ if (OPEN_FMODE(oflags) & FMODE_READ)
+ desiredAccess |= GENERIC_READ; /* is this too little? */
+ if (OPEN_FMODE(oflags) & FMODE_WRITE)
+ desiredAccess |= GENERIC_WRITE;
+
+ disposition = FILE_OVERWRITE_IF;
+ if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+ disposition = FILE_CREATE;
+ else if ((oflags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
+ disposition = FILE_OVERWRITE_IF;
+ else if ((oflags & O_CREAT) == O_CREAT)
+ disposition = FILE_OPEN_IF;
+ else
+ cFYI(1, "Create flag not set in create function");
+
/* BB add processing to set equivalent of mode - e.g. via CreateX with
ACLs */
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
rc = -ENOMEM;
- goto cifs_create_out;
+ goto out;
}
/*
@@ -250,7 +283,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
if (tcon->ses->capabilities & CAP_NT_SMBS)
rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
- &fileHandle, &oplock, buf, cifs_sb->local_nls,
+ fileHandle, oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
else
rc = -EIO; /* no NT SMB support fall into legacy open below */
@@ -259,17 +292,17 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
/* old server, retry the open legacy style */
rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
- &fileHandle, &oplock, buf, cifs_sb->local_nls,
+ fileHandle, oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
cFYI(1, "cifs_create returned 0x%x", rc);
- goto cifs_create_out;
+ goto out;
}
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
- if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
+ if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) {
struct cifs_unix_set_info_args args = {
.mode = mode,
.ctime = NO_CHANGE_64,
@@ -278,6 +311,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
.device = 0,
};
+ *created |= FILE_CREATED;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = (__u64) current_fsuid();
if (inode->i_mode & S_ISGID)
@@ -288,7 +322,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- CIFSSMBUnixSetFileInfo(xid, tcon, &args, fileHandle,
+ CIFSSMBUnixSetFileInfo(xid, tcon, &args, *fileHandle,
current->tgid);
} else {
/* BB implement mode setting via Windows security
@@ -305,11 +339,11 @@ cifs_create_get_file_info:
inode->i_sb, xid);
else {
rc = cifs_get_inode_info(&newinode, full_path, buf,
- inode->i_sb, xid, &fileHandle);
+ inode->i_sb, xid, fileHandle);
if (newinode) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
newinode->i_mode = mode;
- if ((oplock & CIFS_CREATE_ACTION) &&
+ if ((*oplock & CIFS_CREATE_ACTION) &&
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) {
newinode->i_uid = current_fsuid();
if (inode->i_mode & S_ISGID)
@@ -321,40 +355,139 @@ cifs_create_get_file_info:
}
cifs_create_set_dentry:
- if (rc == 0)
- d_instantiate(direntry, newinode);
- else
+ if (rc != 0) {
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+ goto out;
+ }
+ d_drop(direntry);
+ d_add(direntry, newinode);
- if (newinode && nd) {
- struct cifsFileInfo *pfile_info;
- struct file *filp;
+ /* ENOENT for create? How weird... */
+ rc = -ENOENT;
+ if (!newinode) {
+ CIFSSMBClose(xid, tcon, *fileHandle);
+ goto out;
+ }
+ rc = 0;
- filp = lookup_instantiate_filp(nd, direntry, generic_file_open);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- CIFSSMBClose(xid, tcon, fileHandle);
- goto cifs_create_out;
- }
+out:
+ kfree(buf);
+ kfree(full_path);
+ return rc;
+}
- pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock);
- if (pfile_info == NULL) {
- fput(filp);
- CIFSSMBClose(xid, tcon, fileHandle);
- rc = -ENOMEM;
- }
- } else {
+int
+cifs_atomic_open(struct inode *inode, struct dentry *direntry,
+ struct file *file, unsigned oflags, umode_t mode,
+ int *opened)
+{
+ int rc;
+ int xid;
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ __u16 fileHandle;
+ __u32 oplock;
+ struct file *filp;
+ struct cifsFileInfo *pfile_info;
+
+ /* Posix open is only called (at lookup time) for file create now. For
+ * opens (rather than creates), because we do not know if it is a file
+ * or directory yet, and current Samba no longer allows us to do posix
+ * open on dirs, we could end up wasting an open call on what turns out
+ * to be a dir. For file opens, we wait to call posix open till
+ * cifs_open. It could be added to atomic_open in the future but the
+ * performance tradeoff of the extra network request when EISDIR or
+ * EACCES is returned would have to be weighed against the 50% reduction
+ * in network traffic in the other paths.
+ */
+ if (!(oflags & O_CREAT)) {
+ struct dentry *res = cifs_lookup(inode, direntry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ return finish_no_open(file, res);
+ }
+
+ rc = check_name(direntry);
+ if (rc)
+ return rc;
+
+ xid = GetXid();
+
+ cFYI(1, "parent inode = 0x%p name is: %s and dentry = 0x%p",
+ inode, direntry->d_name.name, direntry);
+
+ tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+ filp = ERR_CAST(tlink);
+ if (IS_ERR(tlink))
+ goto free_xid;
+
+ tcon = tlink_tcon(tlink);
+
+ rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
+ &oplock, &fileHandle, opened);
+
+ if (rc)
+ goto out;
+
+ rc = finish_open(file, direntry, generic_file_open, opened);
+ if (rc) {
CIFSSMBClose(xid, tcon, fileHandle);
+ goto out;
}
-cifs_create_out:
- kfree(buf);
- kfree(full_path);
+ pfile_info = cifs_new_fileinfo(fileHandle, filp, tlink, oplock);
+ if (pfile_info == NULL) {
+ CIFSSMBClose(xid, tcon, fileHandle);
+ fput(filp);
+ rc = -ENOMEM;
+ }
+
+out:
cifs_put_tlink(tlink);
+free_xid:
FreeXid(xid);
return rc;
}
+int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
+ bool excl)
+{
+ int rc;
+ int xid = GetXid();
+ /*
+ * BB below access is probably too much for mknod to request
+ * but we have to do query and setpathinfo so requesting
+ * less could fail (unless we want to request getatr and setatr
+ * permissions (only). At least for POSIX we do not have to
+ * request so much.
+ */
+ unsigned oflags = O_EXCL | O_CREAT | O_RDWR;
+ struct tcon_link *tlink;
+ __u16 fileHandle;
+ __u32 oplock;
+ int created = FILE_CREATED;
+
+ cFYI(1, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p",
+ inode, direntry->d_name.name, direntry);
+
+ tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+ rc = PTR_ERR(tlink);
+ if (IS_ERR(tlink))
+ goto free_xid;
+
+ rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
+ &oplock, &fileHandle, &created);
+ if (!rc)
+ CIFSSMBClose(xid, tlink_tcon(tlink), fileHandle);
+
+ cifs_put_tlink(tlink);
+free_xid:
+ FreeXid(xid);
+
+ return rc;
+}
+
int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
dev_t device_number)
{
@@ -488,20 +621,15 @@ mknod_out:
struct dentry *
cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
- struct nameidata *nd)
+ unsigned int flags)
{
int xid;
int rc = 0; /* to get around spurious gcc warning, set to zero here */
- __u32 oplock;
- __u16 fileHandle = 0;
- bool posix_open = false;
struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
- struct cifsFileInfo *cfile;
struct inode *newInode = NULL;
char *full_path = NULL;
- struct file *filp;
xid = GetXid();
@@ -518,31 +646,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
pTcon = tlink_tcon(tlink);
- oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0;
-
- /*
- * Don't allow the separator character in a path component.
- * The VFS will not allow "/", but "\" is allowed by posix.
- */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
- int i;
- for (i = 0; i < direntry->d_name.len; i++)
- if (direntry->d_name.name[i] == '\\') {
- cFYI(1, "Invalid file name");
- rc = -EINVAL;
- goto lookup_out;
- }
- }
-
- /*
- * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
- * the VFS handle the create.
- */
- if (nd && (nd->flags & LOOKUP_EXCL)) {
- d_instantiate(direntry, NULL);
- rc = 0;
+ rc = check_name(direntry);
+ if (rc)
goto lookup_out;
- }
/* can not grab the rename sem here since it would
deadlock in the cases (beginning of sys_rename itself)
@@ -560,80 +666,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
}
cFYI(1, "Full path: %s inode = 0x%p", full_path, direntry->d_inode);
- /* Posix open is only called (at lookup time) for file create now.
- * For opens (rather than creates), because we do not know if it
- * is a file or directory yet, and current Samba no longer allows
- * us to do posix open on dirs, we could end up wasting an open call
- * on what turns out to be a dir. For file opens, we wait to call posix
- * open till cifs_open. It could be added here (lookup) in the future
- * but the performance tradeoff of the extra network request when EISDIR
- * or EACCES is returned would have to be weighed against the 50%
- * reduction in network traffic in the other paths.
- */
if (pTcon->unix_ext) {
- if (nd && !(nd->flags & LOOKUP_DIRECTORY) &&
- (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
- (nd->intent.open.file->f_flags & O_CREAT)) {
- rc = cifs_posix_open(full_path, &newInode,
- parent_dir_inode->i_sb,
- nd->intent.open.create_mode,
- nd->intent.open.file->f_flags, &oplock,
- &fileHandle, xid);
- /*
- * The check below works around a bug in POSIX
- * open in samba versions 3.3.1 and earlier where
- * open could incorrectly fail with invalid parameter.
- * If either that or op not supported returned, follow
- * the normal lookup.
- */
- switch (rc) {
- case 0:
- /*
- * The server may allow us to open things like
- * FIFOs, but the client isn't set up to deal
- * with that. If it's not a regular file, just
- * close it and proceed as if it were a normal
- * lookup.
- */
- if (newInode && !S_ISREG(newInode->i_mode)) {
- CIFSSMBClose(xid, pTcon, fileHandle);
- break;
- }
- case -ENOENT:
- posix_open = true;
- case -EOPNOTSUPP:
- break;
- default:
- pTcon->broken_posix_open = true;
- }
- }
- if (!posix_open)
- rc = cifs_get_inode_info_unix(&newInode, full_path,
- parent_dir_inode->i_sb, xid);
- } else
+ rc = cifs_get_inode_info_unix(&newInode, full_path,
+ parent_dir_inode->i_sb, xid);
+ } else {
rc = cifs_get_inode_info(&newInode, full_path, NULL,
parent_dir_inode->i_sb, xid, NULL);
+ }
if ((rc == 0) && (newInode != NULL)) {
d_add(direntry, newInode);
- if (posix_open) {
- filp = lookup_instantiate_filp(nd, direntry,
- generic_file_open);
- if (IS_ERR(filp)) {
- rc = PTR_ERR(filp);
- CIFSSMBClose(xid, pTcon, fileHandle);
- goto lookup_out;
- }
-
- cfile = cifs_new_fileinfo(fileHandle, filp, tlink,
- oplock);
- if (cfile == NULL) {
- fput(filp);
- CIFSSMBClose(xid, pTcon, fileHandle);
- rc = -ENOMEM;
- goto lookup_out;
- }
- }
/* since paths are not looked up by component - the parent
directories are presumed to be good here */
renew_parental_timestamps(direntry);
@@ -658,9 +700,9 @@ lookup_out:
}
static int
-cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
+cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
{
- if (nd && (nd->flags & LOOKUP_RCU))
+ if (flags & LOOKUP_RCU)
return -ECHILD;
if (direntry->d_inode) {
@@ -689,7 +731,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
- if (!nd)
+ if (!flags)
return 0;
/*
@@ -697,7 +739,7 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
* case sensitive name which is specified by user if this is
* for creation.
*/
- if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
if (time_after(jiffies, direntry->d_time + HZ) || !lookupCacheEnabled)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 745da3d0653e..8e8bb49112ff 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -800,7 +800,7 @@ cifs_find_inode(struct inode *inode, void *opaque)
return 0;
/* if it's not a directory or has no dentries, then flag it */
- if (S_ISDIR(inode->i_mode) && !list_empty(&inode->i_dentry))
+ if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry))
fattr->cf_flags |= CIFS_FATTR_INO_COLLISION;
return 1;
@@ -825,9 +825,10 @@ static bool
inode_has_hashed_dentries(struct inode *inode)
{
struct dentry *dentry;
+ struct hlist_node *p;
spin_lock(&inode->i_lock);
- list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&inode->i_lock);
return true;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 0a8224d1c4c5..a4217f02fab2 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -86,9 +86,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
dentry = d_lookup(parent, name);
if (dentry) {
- /* FIXME: check for inode number changes? */
- if (dentry->d_inode != NULL)
+ inode = dentry->d_inode;
+ /* update inode in place if i_ino didn't change */
+ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ cifs_fattr_to_inode(inode, fattr);
return dentry;
+ }
d_drop(dentry);
dput(dentry);
}
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3097ee58fd7d..f25d4ea14be4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -365,16 +365,14 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
if (mid == NULL)
return -ENOMEM;
- /* put it on the pending_mid_q */
- spin_lock(&GlobalMid_Lock);
- list_add_tail(&mid->qhead, &server->pending_mid_q);
- spin_unlock(&GlobalMid_Lock);
-
rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
- if (rc)
- delete_mid(mid);
+ if (rc) {
+ DeleteMidQEntry(mid);
+ return rc;
+ }
+
*ret_mid = mid;
- return rc;
+ return 0;
}
/*
@@ -407,17 +405,21 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
mid->callback_data = cbdata;
mid->mid_state = MID_REQUEST_SUBMITTED;
+ /* put it on the pending_mid_q */
+ spin_lock(&GlobalMid_Lock);
+ list_add_tail(&mid->qhead, &server->pending_mid_q);
+ spin_unlock(&GlobalMid_Lock);
+
+
cifs_in_send_inc(server);
rc = smb_sendv(server, iov, nvec);
cifs_in_send_dec(server);
cifs_save_when_sent(mid);
mutex_unlock(&server->srv_mutex);
- if (rc)
- goto out_err;
+ if (rc == 0)
+ return 0;
- return rc;
-out_err:
delete_mid(mid);
add_credits(server, 1);
wake_up(&server->request_q);
diff --git a/fs/coda/cache.c b/fs/coda/cache.c
index 690157876184..958ae0e0ff8c 100644
--- a/fs/coda/cache.c
+++ b/fs/coda/cache.c
@@ -89,17 +89,13 @@ int coda_cache_check(struct inode *inode, int mask)
/* this won't do any harm: just flag all children */
static void coda_flag_children(struct dentry *parent, int flag)
{
- struct list_head *child;
struct dentry *de;
spin_lock(&parent->d_lock);
- list_for_each(child, &parent->d_subdirs)
- {
- de = list_entry(child, struct dentry, d_u.d_child);
+ list_for_each_entry(de, &parent->d_subdirs, d_u.d_child) {
/* don't know what to do with negative dentries */
- if ( ! de->d_inode )
- continue;
- coda_flag_inode(de->d_inode, flag);
+ if (de->d_inode )
+ coda_flag_inode(de->d_inode, flag);
}
spin_unlock(&parent->d_lock);
return;
diff --git a/fs/coda/dir.c b/fs/coda/dir.c
index 177515829062..49fe52d25600 100644
--- a/fs/coda/dir.c
+++ b/fs/coda/dir.c
@@ -30,8 +30,8 @@
#include "coda_int.h"
/* dir inode-ops */
-static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, struct nameidata *nd);
-static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, struct nameidata *nd);
+static int coda_create(struct inode *dir, struct dentry *new, umode_t mode, bool excl);
+static struct dentry *coda_lookup(struct inode *dir, struct dentry *target, unsigned int flags);
static int coda_link(struct dentry *old_dentry, struct inode *dir_inode,
struct dentry *entry);
static int coda_unlink(struct inode *dir_inode, struct dentry *entry);
@@ -46,7 +46,7 @@ static int coda_rename(struct inode *old_inode, struct dentry *old_dentry,
static int coda_readdir(struct file *file, void *buf, filldir_t filldir);
/* dentry ops */
-static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd);
+static int coda_dentry_revalidate(struct dentry *de, unsigned int flags);
static int coda_dentry_delete(const struct dentry *);
/* support routines */
@@ -94,7 +94,7 @@ const struct file_operations coda_dir_operations = {
/* inode operations for directories */
/* access routines: lookup, readlink, permission */
-static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, struct nameidata *nd)
+static struct dentry *coda_lookup(struct inode *dir, struct dentry *entry, unsigned int flags)
{
struct super_block *sb = dir->i_sb;
const char *name = entry->d_name.name;
@@ -188,7 +188,7 @@ static inline void coda_dir_drop_nlink(struct inode *dir)
}
/* creation routines: create, mknod, mkdir, link, symlink */
-static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, struct nameidata *nd)
+static int coda_create(struct inode *dir, struct dentry *de, umode_t mode, bool excl)
{
int error;
const char *name=de->d_name.name;
@@ -536,12 +536,12 @@ out:
}
/* called when a cache lookup succeeds */
-static int coda_dentry_revalidate(struct dentry *de, struct nameidata *nd)
+static int coda_dentry_revalidate(struct dentry *de, unsigned int flags)
{
struct inode *inode;
struct coda_inode_info *cii;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = de->d_inode;
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 7e6c52d8a207..7414ae24a79b 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -442,7 +442,7 @@ static int configfs_attach_attr(struct configfs_dirent * sd, struct dentry * den
static struct dentry * configfs_lookup(struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct configfs_dirent * parent_sd = dentry->d_parent->d_fsdata;
struct configfs_dirent * sd;
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index d013c46402ed..28cca01ca9c9 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -417,7 +417,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
/*
* Lookup and fill in the inode data..
*/
-static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
unsigned int offset = 0;
struct inode *inode = NULL;
diff --git a/fs/dcache.c b/fs/dcache.c
index 40469044088d..8086636bf796 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -218,7 +218,7 @@ static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
- WARN_ON(!list_empty(&dentry->d_alias));
+ WARN_ON(!hlist_unhashed(&dentry->d_alias));
if (dname_external(dentry))
kfree(dentry->d_name.name);
kmem_cache_free(dentry_cache, dentry);
@@ -267,7 +267,7 @@ static void dentry_iput(struct dentry * dentry)
struct inode *inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
- list_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -291,7 +291,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
{
struct inode *inode = dentry->d_inode;
dentry->d_inode = NULL;
- list_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_alias);
dentry_rcuwalk_barrier(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
@@ -699,10 +699,11 @@ EXPORT_SYMBOL(dget_parent);
static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
{
struct dentry *alias, *discon_alias;
+ struct hlist_node *p;
again:
discon_alias = NULL;
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
spin_lock(&alias->d_lock);
if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
if (IS_ROOT(alias) &&
@@ -737,7 +738,7 @@ struct dentry *d_find_alias(struct inode *inode)
{
struct dentry *de = NULL;
- if (!list_empty(&inode->i_dentry)) {
+ if (!hlist_empty(&inode->i_dentry)) {
spin_lock(&inode->i_lock);
de = __d_find_alias(inode, 0);
spin_unlock(&inode->i_lock);
@@ -753,9 +754,10 @@ EXPORT_SYMBOL(d_find_alias);
void d_prune_aliases(struct inode *inode)
{
struct dentry *dentry;
+ struct hlist_node *p;
restart:
spin_lock(&inode->i_lock);
- list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
__dget_dlock(dentry);
@@ -977,7 +979,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
inode = dentry->d_inode;
if (inode) {
dentry->d_inode = NULL;
- list_del_init(&dentry->d_alias);
+ hlist_del_init(&dentry->d_alias);
if (dentry->d_op && dentry->d_op->d_iput)
dentry->d_op->d_iput(dentry, inode);
else
@@ -1312,7 +1314,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
INIT_HLIST_BL_NODE(&dentry->d_hash);
INIT_LIST_HEAD(&dentry->d_lru);
INIT_LIST_HEAD(&dentry->d_subdirs);
- INIT_LIST_HEAD(&dentry->d_alias);
+ INIT_HLIST_NODE(&dentry->d_alias);
INIT_LIST_HEAD(&dentry->d_u.d_child);
d_set_d_op(dentry, dentry->d_sb->s_d_op);
@@ -1400,7 +1402,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
if (inode) {
if (unlikely(IS_AUTOMOUNT(inode)))
dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
- list_add(&dentry->d_alias, &inode->i_dentry);
+ hlist_add_head(&dentry->d_alias, &inode->i_dentry);
}
dentry->d_inode = inode;
dentry_rcuwalk_barrier(dentry);
@@ -1425,7 +1427,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
void d_instantiate(struct dentry *entry, struct inode * inode)
{
- BUG_ON(!list_empty(&entry->d_alias));
+ BUG_ON(!hlist_unhashed(&entry->d_alias));
if (inode)
spin_lock(&inode->i_lock);
__d_instantiate(entry, inode);
@@ -1458,13 +1460,14 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry,
int len = entry->d_name.len;
const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash;
+ struct hlist_node *p;
if (!inode) {
__d_instantiate(entry, NULL);
return NULL;
}
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
/*
* Don't need alias->d_lock here, because aliases with
* d_parent == entry->d_parent are not subject to name or
@@ -1490,7 +1493,7 @@ struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
{
struct dentry *result;
- BUG_ON(!list_empty(&entry->d_alias));
+ BUG_ON(!hlist_unhashed(&entry->d_alias));
if (inode)
spin_lock(&inode->i_lock);
@@ -1531,9 +1534,9 @@ static struct dentry * __d_find_any_alias(struct inode *inode)
{
struct dentry *alias;
- if (list_empty(&inode->i_dentry))
+ if (hlist_empty(&inode->i_dentry))
return NULL;
- alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
+ alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
__dget(alias);
return alias;
}
@@ -1607,7 +1610,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
spin_lock(&tmp->d_lock);
tmp->d_inode = inode;
tmp->d_flags |= DCACHE_DISCONNECTED;
- list_add(&tmp->d_alias, &inode->i_dentry);
+ hlist_add_head(&tmp->d_alias, &inode->i_dentry);
hlist_bl_lock(&tmp->d_sb->s_anon);
hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
hlist_bl_unlock(&tmp->d_sb->s_anon);
@@ -2384,14 +2387,13 @@ static struct dentry *__d_unalias(struct inode *inode,
struct dentry *dentry, struct dentry *alias)
{
struct mutex *m1 = NULL, *m2 = NULL;
- struct dentry *ret;
+ struct dentry *ret = ERR_PTR(-EBUSY);
/* If alias and dentry share a parent, then no extra locks required */
if (alias->d_parent == dentry->d_parent)
goto out_unalias;
/* See lock_rename() */
- ret = ERR_PTR(-EBUSY);
if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
goto out_err;
m1 = &dentry->d_sb->s_vfs_rename_mutex;
@@ -2399,8 +2401,10 @@ static struct dentry *__d_unalias(struct inode *inode,
goto out_err;
m2 = &alias->d_parent->d_inode->i_mutex;
out_unalias:
- __d_move(alias, dentry);
- ret = alias;
+ if (likely(!d_mountpoint(alias))) {
+ __d_move(alias, dentry);
+ ret = alias;
+ }
out_err:
spin_unlock(&inode->i_lock);
if (m2)
@@ -2622,7 +2626,7 @@ global_root:
if (!slash)
error = prepend(buffer, buflen, "/", 1);
if (!error)
- error = real_mount(vfsmnt)->mnt_ns ? 1 : 2;
+ error = is_mounted(vfsmnt) ? 1 : 2;
goto out;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b80bc846a15a..d17c20fd74e6 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -54,13 +54,12 @@ static struct inode *debugfs_get_inode(struct super_block *sb, umode_t mode, dev
break;
case S_IFLNK:
inode->i_op = &debugfs_link_operations;
- inode->i_fop = fops;
inode->i_private = data;
break;
case S_IFDIR:
inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = fops ? fops : &simple_dir_operations;
- inode->i_private = data;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_private = NULL;
/* directory inodes start off with i_nlink == 2
* (for "." entry) */
@@ -91,13 +90,12 @@ static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
return error;
}
-static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode,
- void *data, const struct file_operations *fops)
+static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int res;
mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
- res = debugfs_mknod(dir, dentry, mode, 0, data, fops);
+ res = debugfs_mknod(dir, dentry, mode, 0, NULL, NULL);
if (!res) {
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
@@ -106,10 +104,10 @@ static int debugfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode,
}
static int debugfs_link(struct inode *dir, struct dentry *dentry, umode_t mode,
- void *data, const struct file_operations *fops)
+ void *data)
{
mode = (mode & S_IALLUGO) | S_IFLNK;
- return debugfs_mknod(dir, dentry, mode, 0, data, fops);
+ return debugfs_mknod(dir, dentry, mode, 0, data, NULL);
}
static int debugfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
@@ -293,13 +291,19 @@ static struct file_system_type debug_fs_type = {
.kill_sb = kill_litter_super,
};
-static int debugfs_create_by_name(const char *name, umode_t mode,
- struct dentry *parent,
- struct dentry **dentry,
- void *data,
- const struct file_operations *fops)
+struct dentry *__create_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops)
{
- int error = 0;
+ struct dentry *dentry = NULL;
+ int error;
+
+ pr_debug("debugfs: creating file '%s'\n",name);
+
+ error = simple_pin_fs(&debug_fs_type, &debugfs_mount,
+ &debugfs_mount_count);
+ if (error)
+ goto exit;
/* If the parent is not specified, we create it in the root.
* We need the root dentry to do this, which is in the super
@@ -309,30 +313,35 @@ static int debugfs_create_by_name(const char *name, umode_t mode,
if (!parent)
parent = debugfs_mount->mnt_root;
- *dentry = NULL;
+ dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
- *dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(*dentry)) {
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (!IS_ERR(dentry)) {
switch (mode & S_IFMT) {
case S_IFDIR:
- error = debugfs_mkdir(parent->d_inode, *dentry, mode,
- data, fops);
+ error = debugfs_mkdir(parent->d_inode, dentry, mode);
+
break;
case S_IFLNK:
- error = debugfs_link(parent->d_inode, *dentry, mode,
- data, fops);
+ error = debugfs_link(parent->d_inode, dentry, mode,
+ data);
break;
default:
- error = debugfs_create(parent->d_inode, *dentry, mode,
+ error = debugfs_create(parent->d_inode, dentry, mode,
data, fops);
break;
}
- dput(*dentry);
+ dput(dentry);
} else
- error = PTR_ERR(*dentry);
+ error = PTR_ERR(dentry);
mutex_unlock(&parent->d_inode->i_mutex);
- return error;
+ if (error) {
+ dentry = NULL;
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ }
+exit:
+ return dentry;
}
/**
@@ -365,25 +374,15 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
- struct dentry *dentry = NULL;
- int error;
-
- pr_debug("debugfs: creating file '%s'\n",name);
-
- error = simple_pin_fs(&debug_fs_type, &debugfs_mount,
- &debugfs_mount_count);
- if (error)
- goto exit;
-
- error = debugfs_create_by_name(name, mode, parent, &dentry,
- data, fops);
- if (error) {
- dentry = NULL;
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
- goto exit;
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ case 0:
+ break;
+ default:
+ BUG();
}
-exit:
- return dentry;
+
+ return __create_file(name, mode, parent, data, fops);
}
EXPORT_SYMBOL_GPL(debugfs_create_file);
@@ -407,8 +406,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
*/
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
{
- return debugfs_create_file(name,
- S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
+ return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
parent, NULL, NULL);
}
EXPORT_SYMBOL_GPL(debugfs_create_dir);
@@ -446,8 +444,7 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
if (!link)
return NULL;
- result = debugfs_create_file(name, S_IFLNK | S_IRWXUGO, parent, link,
- NULL);
+ result = __create_file(name, S_IFLNK | S_IRWXUGO, parent, link, NULL);
if (!result)
kfree(link);
return result;
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 979c1e309c73..14afbabe6546 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -439,15 +439,15 @@ static struct dentry *devpts_mount(struct file_system_type *fs_type,
return ERR_PTR(error);
if (opts.newinstance)
- s = sget(fs_type, NULL, set_anon_super, NULL);
+ s = sget(fs_type, NULL, set_anon_super, flags, NULL);
else
- s = sget(fs_type, compare_init_pts_sb, set_anon_super, NULL);
+ s = sget(fs_type, compare_init_pts_sb, set_anon_super, flags,
+ NULL);
if (IS_ERR(s))
return ERR_CAST(s);
if (!s->s_root) {
- s->s_flags = flags;
error = devpts_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error)
goto out_undo_sget;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0c85fae37666..1faf4cb56f39 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -1258,7 +1258,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
- ((rw & READ) || (dio->result == sdio.size)))
+ ((rw == READ) || (dio->result == sdio.size)))
retval = -EIOCBQUEUED;
if (retval != -EIOCBQUEUED)
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 534c1d46e69e..1b5d9af937df 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -32,7 +32,7 @@
/**
* ecryptfs_d_revalidate - revalidate an ecryptfs dentry
* @dentry: The ecryptfs dentry
- * @nd: The associated nameidata
+ * @flags: lookup flags
*
* Called when the VFS needs to revalidate a dentry. This
* is called whenever a name lookup finds a dentry in the
@@ -42,32 +42,20 @@
* Returns 1 if valid, 0 otherwise.
*
*/
-static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *lower_dentry;
struct vfsmount *lower_mnt;
- struct dentry *dentry_save = NULL;
- struct vfsmount *vfsmount_save = NULL;
int rc = 1;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
goto out;
- if (nd) {
- dentry_save = nd->path.dentry;
- vfsmount_save = nd->path.mnt;
- nd->path.dentry = lower_dentry;
- nd->path.mnt = lower_mnt;
- }
- rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
- if (nd) {
- nd->path.dentry = dentry_save;
- nd->path.mnt = vfsmount_save;
- }
+ rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (dentry->d_inode) {
struct inode *lower_inode =
ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 867b64c5d84f..989e034f02bd 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -550,20 +550,6 @@ extern struct kmem_cache *ecryptfs_key_record_cache;
extern struct kmem_cache *ecryptfs_key_sig_cache;
extern struct kmem_cache *ecryptfs_global_auth_tok_cache;
extern struct kmem_cache *ecryptfs_key_tfm_cache;
-extern struct kmem_cache *ecryptfs_open_req_cache;
-
-struct ecryptfs_open_req {
-#define ECRYPTFS_REQ_PROCESSED 0x00000001
-#define ECRYPTFS_REQ_DROPPED 0x00000002
-#define ECRYPTFS_REQ_ZOMBIE 0x00000004
- u32 flags;
- struct file **lower_file;
- struct dentry *lower_dentry;
- struct vfsmount *lower_mnt;
- wait_queue_head_t wait;
- struct mutex mux;
- struct list_head kthread_ctl_list;
-};
struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb);
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index a07441a0a878..ffa2be57804d 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -173,7 +173,7 @@ ecryptfs_do_create(struct inode *directory_inode,
inode = ERR_CAST(lower_dir_dentry);
goto out;
}
- rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, NULL);
+ rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, true);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
@@ -240,7 +240,6 @@ out:
* @dir: The inode of the directory in which to create the file.
* @dentry: The eCryptfs dentry
* @mode: The mode of the new file.
- * @nd: nameidata
*
* Creates a new file.
*
@@ -248,7 +247,7 @@ out:
*/
static int
ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
struct inode *ecryptfs_inode;
int rc;
@@ -270,8 +269,8 @@ ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
iput(ecryptfs_inode);
goto out;
}
- d_instantiate(ecryptfs_dentry, ecryptfs_inode);
unlock_new_inode(ecryptfs_inode);
+ d_instantiate(ecryptfs_dentry, ecryptfs_inode);
out:
return rc;
}
@@ -374,7 +373,7 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
*/
static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
struct dentry *ecryptfs_dentry,
- struct nameidata *ecryptfs_nd)
+ unsigned int flags)
{
char *encrypted_and_encoded_name = NULL;
size_t encrypted_and_encoded_name_size;
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c
index 69f994a7d524..809e67d05ca3 100644
--- a/fs/ecryptfs/kthread.c
+++ b/fs/ecryptfs/kthread.c
@@ -27,7 +27,12 @@
#include <linux/mount.h>
#include "ecryptfs_kernel.h"
-struct kmem_cache *ecryptfs_open_req_cache;
+struct ecryptfs_open_req {
+ struct file **lower_file;
+ struct path path;
+ struct completion done;
+ struct list_head kthread_ctl_list;
+};
static struct ecryptfs_kthread_ctl {
#define ECRYPTFS_KTHREAD_ZOMBIE 0x00000001
@@ -67,18 +72,10 @@ static int ecryptfs_threadfn(void *ignored)
req = list_first_entry(&ecryptfs_kthread_ctl.req_list,
struct ecryptfs_open_req,
kthread_ctl_list);
- mutex_lock(&req->mux);
list_del(&req->kthread_ctl_list);
- if (!(req->flags & ECRYPTFS_REQ_ZOMBIE)) {
- dget(req->lower_dentry);
- mntget(req->lower_mnt);
- (*req->lower_file) = dentry_open(
- req->lower_dentry, req->lower_mnt,
- (O_RDWR | O_LARGEFILE), current_cred());
- req->flags |= ECRYPTFS_REQ_PROCESSED;
- }
- wake_up(&req->wait);
- mutex_unlock(&req->mux);
+ *req->lower_file = dentry_open(&req->path,
+ (O_RDWR | O_LARGEFILE), current_cred());
+ complete(&req->done);
}
mutex_unlock(&ecryptfs_kthread_ctl.mux);
}
@@ -111,10 +108,9 @@ void ecryptfs_destroy_kthread(void)
ecryptfs_kthread_ctl.flags |= ECRYPTFS_KTHREAD_ZOMBIE;
list_for_each_entry(req, &ecryptfs_kthread_ctl.req_list,
kthread_ctl_list) {
- mutex_lock(&req->mux);
- req->flags |= ECRYPTFS_REQ_ZOMBIE;
- wake_up(&req->wait);
- mutex_unlock(&req->mux);
+ list_del(&req->kthread_ctl_list);
+ *req->lower_file = ERR_PTR(-EIO);
+ complete(&req->done);
}
mutex_unlock(&ecryptfs_kthread_ctl.mux);
kthread_stop(ecryptfs_kthread);
@@ -136,34 +132,26 @@ int ecryptfs_privileged_open(struct file **lower_file,
struct vfsmount *lower_mnt,
const struct cred *cred)
{
- struct ecryptfs_open_req *req;
+ struct ecryptfs_open_req req;
int flags = O_LARGEFILE;
int rc = 0;
+ init_completion(&req.done);
+ req.lower_file = lower_file;
+ req.path.dentry = lower_dentry;
+ req.path.mnt = lower_mnt;
+
/* Corresponding dput() and mntput() are done when the
* lower file is fput() when all eCryptfs files for the inode are
* released. */
- dget(lower_dentry);
- mntget(lower_mnt);
flags |= IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
- (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
+ (*lower_file) = dentry_open(&req.path, flags, cred);
if (!IS_ERR(*lower_file))
goto out;
- if (flags & O_RDONLY) {
+ if ((flags & O_ACCMODE) == O_RDONLY) {
rc = PTR_ERR((*lower_file));
goto out;
}
- req = kmem_cache_alloc(ecryptfs_open_req_cache, GFP_KERNEL);
- if (!req) {
- rc = -ENOMEM;
- goto out;
- }
- mutex_init(&req->mux);
- req->lower_file = lower_file;
- req->lower_dentry = lower_dentry;
- req->lower_mnt = lower_mnt;
- init_waitqueue_head(&req->wait);
- req->flags = 0;
mutex_lock(&ecryptfs_kthread_ctl.mux);
if (ecryptfs_kthread_ctl.flags & ECRYPTFS_KTHREAD_ZOMBIE) {
rc = -EIO;
@@ -171,27 +159,14 @@ int ecryptfs_privileged_open(struct file **lower_file,
printk(KERN_ERR "%s: We are in the middle of shutting down; "
"aborting privileged request to open lower file\n",
__func__);
- goto out_free;
+ goto out;
}
- list_add_tail(&req->kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
+ list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list);
mutex_unlock(&ecryptfs_kthread_ctl.mux);
wake_up(&ecryptfs_kthread_ctl.wait);
- wait_event(req->wait, (req->flags != 0));
- mutex_lock(&req->mux);
- BUG_ON(req->flags == 0);
- if (req->flags & ECRYPTFS_REQ_DROPPED
- || req->flags & ECRYPTFS_REQ_ZOMBIE) {
- rc = -EIO;
- printk(KERN_WARNING "%s: Privileged open request dropped\n",
- __func__);
- goto out_unlock;
- }
- if (IS_ERR(*req->lower_file))
- rc = PTR_ERR(*req->lower_file);
-out_unlock:
- mutex_unlock(&req->mux);
-out_free:
- kmem_cache_free(ecryptfs_open_req_cache, req);
+ wait_for_completion(&req.done);
+ if (IS_ERR(*lower_file))
+ rc = PTR_ERR(*lower_file);
out:
return rc;
}
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 68954937a071..1c0b3b6b75c6 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -499,13 +499,12 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
goto out;
}
- s = sget(fs_type, NULL, set_anon_super, NULL);
+ s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s)) {
rc = PTR_ERR(s);
goto out;
}
- s->s_flags = flags;
rc = bdi_setup_and_register(&sbi->bdi, "ecryptfs", BDI_CAP_MAP_COPY);
if (rc)
goto out1;
@@ -682,11 +681,6 @@ static struct ecryptfs_cache_info {
.name = "ecryptfs_key_tfm_cache",
.size = sizeof(struct ecryptfs_key_tfm),
},
- {
- .cache = &ecryptfs_open_req_cache,
- .name = "ecryptfs_open_req_cache",
- .size = sizeof(struct ecryptfs_open_req),
- },
};
static void ecryptfs_free_kmem_caches(void)
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 3a06f4043df4..c0038f6566d4 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -49,7 +49,10 @@ ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
mutex_lock(&ecryptfs_daemon_hash_mux);
/* TODO: Just use file->private_data? */
rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- BUG_ON(rc || !daemon);
+ if (rc || !daemon) {
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ return -EINVAL;
+ }
mutex_lock(&daemon->mux);
mutex_unlock(&ecryptfs_daemon_hash_mux);
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
@@ -122,6 +125,7 @@ ecryptfs_miscdev_open(struct inode *inode, struct file *file)
goto out_unlock_daemon;
}
daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+ file->private_data = daemon;
atomic_inc(&ecryptfs_num_miscdev_opens);
out_unlock_daemon:
mutex_unlock(&daemon->mux);
@@ -152,9 +156,9 @@ ecryptfs_miscdev_release(struct inode *inode, struct file *file)
mutex_lock(&ecryptfs_daemon_hash_mux);
rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- BUG_ON(rc || !daemon);
+ if (rc || !daemon)
+ daemon = file->private_data;
mutex_lock(&daemon->mux);
- BUG_ON(daemon->pid != task_pid(current));
BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
atomic_dec(&ecryptfs_num_miscdev_opens);
@@ -191,31 +195,32 @@ int ecryptfs_send_miscdev(char *data, size_t data_size,
struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
u16 msg_flags, struct ecryptfs_daemon *daemon)
{
- int rc = 0;
+ struct ecryptfs_message *msg;
- mutex_lock(&msg_ctx->mux);
- msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
- GFP_KERNEL);
- if (!msg_ctx->msg) {
- rc = -ENOMEM;
+ msg = kmalloc((sizeof(*msg) + data_size), GFP_KERNEL);
+ if (!msg) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to kmalloc(%zd, GFP_KERNEL)\n", __func__,
- (sizeof(*msg_ctx->msg) + data_size));
- goto out_unlock;
+ (sizeof(*msg) + data_size));
+ return -ENOMEM;
}
+
+ mutex_lock(&msg_ctx->mux);
+ msg_ctx->msg = msg;
msg_ctx->msg->index = msg_ctx->index;
msg_ctx->msg->data_len = data_size;
msg_ctx->type = msg_type;
memcpy(msg_ctx->msg->data, data, data_size);
msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
- mutex_lock(&daemon->mux);
list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+ mutex_unlock(&msg_ctx->mux);
+
+ mutex_lock(&daemon->mux);
daemon->num_queued_msg_ctx++;
wake_up_interruptible(&daemon->wait);
mutex_unlock(&daemon->mux);
-out_unlock:
- mutex_unlock(&msg_ctx->mux);
- return rc;
+
+ return 0;
}
/*
@@ -269,8 +274,16 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
mutex_lock(&ecryptfs_daemon_hash_mux);
/* TODO: Just use file->private_data? */
rc = ecryptfs_find_daemon_by_euid(&daemon, euid, current_user_ns());
- BUG_ON(rc || !daemon);
+ if (rc || !daemon) {
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ return -EINVAL;
+ }
mutex_lock(&daemon->mux);
+ if (task_pid(current) != daemon->pid) {
+ mutex_unlock(&daemon->mux);
+ mutex_unlock(&ecryptfs_daemon_hash_mux);
+ return -EPERM;
+ }
if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
rc = 0;
mutex_unlock(&ecryptfs_daemon_hash_mux);
@@ -307,9 +320,6 @@ check_list:
* message from the queue; try again */
goto check_list;
}
- BUG_ON(euid != daemon->euid);
- BUG_ON(current_user_ns() != daemon->user_ns);
- BUG_ON(task_pid(current) != daemon->pid);
msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
struct ecryptfs_msg_ctx, daemon_out_list);
BUG_ON(!msg_ctx);
diff --git a/fs/efs/efs.h b/fs/efs/efs.h
index d8305b582ab0..5528926ac7f6 100644
--- a/fs/efs/efs.h
+++ b/fs/efs/efs.h
@@ -129,7 +129,7 @@ extern struct inode *efs_iget(struct super_block *, unsigned long);
extern efs_block_t efs_map_block(struct inode *, efs_block_t);
extern int efs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern struct dentry *efs_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct dentry *efs_lookup(struct inode *, struct dentry *, unsigned int);
extern struct dentry *efs_fh_to_dentry(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type);
extern struct dentry *efs_fh_to_parent(struct super_block *sb, struct fid *fid,
diff --git a/fs/efs/namei.c b/fs/efs/namei.c
index 832b10ded82f..96f66d213a19 100644
--- a/fs/efs/namei.c
+++ b/fs/efs/namei.c
@@ -58,7 +58,8 @@ static efs_ino_t efs_find_entry(struct inode *inode, const char *name, int len)
return(0);
}
-struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) {
+struct dentry *efs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+{
efs_ino_t inodenum;
struct inode *inode = NULL;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 74598f67efeb..1c8b55670804 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1710,7 +1710,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
- if ((epds.events & EPOLLWAKEUP) && !capable(CAP_EPOLLWAKEUP))
+ if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
epds.events &= ~EPOLLWAKEUP;
/*
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index fc7161d6bf6b..4731fd991efe 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -46,7 +46,7 @@ static inline int exofs_add_nondir(struct dentry *dentry, struct inode *inode)
}
static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode;
ino_t ino;
@@ -60,7 +60,7 @@ static struct dentry *exofs_lookup(struct inode *dir, struct dentry *dentry,
}
static int exofs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode = exofs_new_inode(dir, mode);
int err = PTR_ERR(inode);
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 49cf230554a2..24a49d47e935 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -735,13 +735,7 @@ static int _prepare_for_striping(struct ore_io_state *ios)
out:
ios->numdevs = devs_in_group;
ios->pages_consumed = cur_pg;
- if (unlikely(ret)) {
- if (length == ios->length)
- return ret;
- else
- ios->length -= length;
- }
- return 0;
+ return ret;
}
int ore_create(struct ore_io_state *ios)
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index d222c77cfa1b..5f376d14fdcc 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -144,26 +144,26 @@ static void _sp2d_reset(struct __stripe_pages_2d *sp2d,
{
unsigned data_devs = sp2d->data_devs;
unsigned group_width = data_devs + sp2d->parity;
- unsigned p;
+ int p, c;
if (!sp2d->needed)
return;
- for (p = 0; p < sp2d->pages_in_unit; p++) {
- struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
-
- if (_1ps->write_count < group_width) {
- unsigned c;
+ for (c = data_devs - 1; c >= 0; --c)
+ for (p = sp2d->pages_in_unit - 1; p >= 0; --p) {
+ struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
- for (c = 0; c < data_devs; c++)
- if (_1ps->page_is_read[c]) {
- struct page *page = _1ps->pages[c];
+ if (_1ps->page_is_read[c]) {
+ struct page *page = _1ps->pages[c];
- r4w->put_page(priv, page);
- _1ps->page_is_read[c] = false;
- }
+ r4w->put_page(priv, page);
+ _1ps->page_is_read[c] = false;
+ }
}
+ for (p = 0; p < sp2d->pages_in_unit; p++) {
+ struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
+
memset(_1ps->pages, 0, group_width * sizeof(*_1ps->pages));
_1ps->write_count = 0;
_1ps->tx = NULL;
@@ -461,16 +461,12 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret)
* ios->sp2d[p][*], xor is calculated the same way. These pages are
* allocated/freed and don't go through cache
*/
-static int _read_4_write(struct ore_io_state *ios)
+static int _read_4_write_first_stripe(struct ore_io_state *ios)
{
- struct ore_io_state *ios_read;
struct ore_striping_info read_si;
struct __stripe_pages_2d *sp2d = ios->sp2d;
u64 offset = ios->si.first_stripe_start;
- u64 last_stripe_end;
- unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
- unsigned i, c, p, min_p = sp2d->pages_in_unit, max_p = -1;
- int ret;
+ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
if (offset == ios->offset) /* Go to start collect $200 */
goto read_last_stripe;
@@ -478,6 +474,9 @@ static int _read_4_write(struct ore_io_state *ios)
min_p = _sp2d_min_pg(sp2d);
max_p = _sp2d_max_pg(sp2d);
+ ORE_DBGMSG("stripe_start=0x%llx ios->offset=0x%llx min_p=%d max_p=%d\n",
+ offset, ios->offset, min_p, max_p);
+
for (c = 0; ; c++) {
ore_calc_stripe_info(ios->layout, offset, 0, &read_si);
read_si.obj_offset += min_p * PAGE_SIZE;
@@ -512,6 +511,18 @@ static int _read_4_write(struct ore_io_state *ios)
}
read_last_stripe:
+ return 0;
+}
+
+static int _read_4_write_last_stripe(struct ore_io_state *ios)
+{
+ struct ore_striping_info read_si;
+ struct __stripe_pages_2d *sp2d = ios->sp2d;
+ u64 offset;
+ u64 last_stripe_end;
+ unsigned bytes_in_stripe = ios->si.bytes_in_stripe;
+ unsigned c, p, min_p = sp2d->pages_in_unit, max_p = -1;
+
offset = ios->offset + ios->length;
if (offset % PAGE_SIZE)
_add_to_r4w_last_page(ios, &offset);
@@ -527,15 +538,15 @@ read_last_stripe:
c = _dev_order(ios->layout->group_width * ios->layout->mirrors_p1,
ios->layout->mirrors_p1, read_si.par_dev, read_si.dev);
- BUG_ON(ios->si.first_stripe_start + bytes_in_stripe != last_stripe_end);
- /* unaligned IO must be within a single stripe */
-
if (min_p == sp2d->pages_in_unit) {
/* Didn't do it yet */
min_p = _sp2d_min_pg(sp2d);
max_p = _sp2d_max_pg(sp2d);
}
+ ORE_DBGMSG("offset=0x%llx stripe_end=0x%llx min_p=%d max_p=%d\n",
+ offset, last_stripe_end, min_p, max_p);
+
while (offset < last_stripe_end) {
struct __1_page_stripe *_1ps = &sp2d->_1p_stripes[p];
@@ -568,6 +579,15 @@ read_last_stripe:
}
read_it:
+ return 0;
+}
+
+static int _read_4_write_execute(struct ore_io_state *ios)
+{
+ struct ore_io_state *ios_read;
+ unsigned i;
+ int ret;
+
ios_read = ios->ios_read_4_write;
if (!ios_read)
return 0;
@@ -591,6 +611,8 @@ read_it:
}
_mark_read4write_pages_uptodate(ios_read, ret);
+ ore_put_io_state(ios_read);
+ ios->ios_read_4_write = NULL; /* Might need a reuse at last stripe */
return 0;
}
@@ -626,8 +648,11 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
/* If first stripe, Read in all read4write pages
* (if needed) before we calculate the first parity.
*/
- _read_4_write(ios);
+ _read_4_write_first_stripe(ios);
}
+ if (!cur_len) /* If last stripe r4w pages of last stripe */
+ _read_4_write_last_stripe(ios);
+ _read_4_write_execute(ios);
for (i = 0; i < num_pages; i++) {
pages[i] = _raid_page_alloc();
@@ -654,34 +679,14 @@ int _ore_add_parity_unit(struct ore_io_state *ios,
int _ore_post_alloc_raid_stuff(struct ore_io_state *ios)
{
- struct ore_layout *layout = ios->layout;
-
if (ios->parity_pages) {
+ struct ore_layout *layout = ios->layout;
unsigned pages_in_unit = layout->stripe_unit / PAGE_SIZE;
- unsigned stripe_size = ios->si.bytes_in_stripe;
- u64 last_stripe, first_stripe;
if (_sp2d_alloc(pages_in_unit, layout->group_width,
layout->parity, &ios->sp2d)) {
return -ENOMEM;
}
-
- /* Round io down to last full strip */
- first_stripe = div_u64(ios->offset, stripe_size);
- last_stripe = div_u64(ios->offset + ios->length, stripe_size);
-
- /* If an IO spans more then a single stripe it must end at
- * a stripe boundary. The reminder at the end is pushed into the
- * next IO.
- */
- if (last_stripe != first_stripe) {
- ios->length = last_stripe * stripe_size - ios->offset;
-
- BUG_ON(!ios->length);
- ios->nr_pages = (ios->length + PAGE_SIZE - 1) /
- PAGE_SIZE;
- ios->si.length = ios->length; /*make it consistent */
- }
}
return 0;
}
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b0201ca6e9c6..29ab099e3e08 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -19,19 +19,19 @@
#define dprintk(fmt, args...) do{}while(0)
-static int get_name(struct vfsmount *mnt, struct dentry *dentry, char *name,
- struct dentry *child);
+static int get_name(const struct path *path, char *name, struct dentry *child);
static int exportfs_get_name(struct vfsmount *mnt, struct dentry *dir,
char *name, struct dentry *child)
{
const struct export_operations *nop = dir->d_sb->s_export_op;
+ struct path path = {.mnt = mnt, .dentry = dir};
if (nop->get_name)
return nop->get_name(dir, name, child);
else
- return get_name(mnt, dir, name, child);
+ return get_name(&path, name, child);
}
/*
@@ -44,13 +44,14 @@ find_acceptable_alias(struct dentry *result,
{
struct dentry *dentry, *toput = NULL;
struct inode *inode;
+ struct hlist_node *p;
if (acceptable(context, result))
return result;
inode = result->d_inode;
spin_lock(&inode->i_lock);
- list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
dget(dentry);
spin_unlock(&inode->i_lock);
if (toput)
@@ -248,11 +249,10 @@ static int filldir_one(void * __buf, const char * name, int len,
* calls readdir on the parent until it finds an entry with
* the same inode number as the child, and returns that.
*/
-static int get_name(struct vfsmount *mnt, struct dentry *dentry,
- char *name, struct dentry *child)
+static int get_name(const struct path *path, char *name, struct dentry *child)
{
const struct cred *cred = current_cred();
- struct inode *dir = dentry->d_inode;
+ struct inode *dir = path->dentry->d_inode;
int error;
struct file *file;
struct getdents_callback buffer;
@@ -266,7 +266,7 @@ static int get_name(struct vfsmount *mnt, struct dentry *dentry,
/*
* Open the directory ...
*/
- file = dentry_open(dget(dentry), mntget(mnt), O_RDONLY, cred);
+ file = dentry_open(path, O_RDONLY, cred);
error = PTR_ERR(file);
if (IS_ERR(file))
goto out;
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index f663a67d7bf0..73b0d9519836 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -41,8 +41,8 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
{
int err = ext2_add_link(dentry, inode);
if (!err) {
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
}
inode_dec_link_count(inode);
@@ -55,7 +55,7 @@ static inline int ext2_add_nondir(struct dentry *dentry, struct inode *inode)
* Methods themselves.
*/
-static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
{
struct inode * inode;
ino_t ino;
@@ -94,7 +94,7 @@ struct dentry *ext2_get_parent(struct dentry *child)
* If the create succeeds, we fill in the inode information
* with d_instantiate().
*/
-static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
+static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
{
struct inode *inode;
@@ -242,8 +242,8 @@ static int ext2_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
if (err)
goto out_fail;
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
out:
return err;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index b3621cb7ea31..9f311d27b16f 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -771,13 +771,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- goto failed_unlock;
+ goto failed;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
- goto failed_unlock;
+ goto failed;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
@@ -1130,7 +1130,7 @@ failed_sbi:
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-failed_unlock:
+failed:
return ret;
}
@@ -1184,6 +1184,12 @@ static int ext2_sync_fs(struct super_block *sb, int wait)
struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = EXT2_SB(sb)->s_es;
+ /*
+ * Write quota structures to quota file, sync_blockdev() will write
+ * them to disk later
+ */
+ dquot_writeback_dquots(sb, -1);
+
spin_lock(&sbi->s_lock);
if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
ext2_debug("setting valid to 0\n");
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index 92490e9f85ca..c8fff930790d 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -300,10 +300,11 @@ loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
+ loff_t htree_max = ext3_get_htree_eof(file);
if (likely(dx_dir))
return generic_file_llseek_size(file, offset, origin,
- ext3_get_htree_eof(file));
+ htree_max, htree_max);
else
return generic_file_llseek(file, offset, origin);
}
diff --git a/fs/ext3/fsync.c b/fs/ext3/fsync.c
index d4dff278cbd8..b31dbd4c46ad 100644
--- a/fs/ext3/fsync.c
+++ b/fs/ext3/fsync.c
@@ -92,8 +92,13 @@ int ext3_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* disk caches manually so that data really is on persistent
* storage
*/
- if (needs_barrier)
- blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (needs_barrier) {
+ int err;
+
+ err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
+ if (!ret)
+ ret = err;
+ }
out:
trace_ext3_sync_file_exit(inode, ret);
return ret;
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index eeb63dfc5d20..8f4fddac01a6 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1011,7 +1011,7 @@ errout:
return NULL;
}
-static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
{
struct inode * inode;
struct ext3_dir_entry_2 * de;
@@ -1671,8 +1671,8 @@ static int ext3_add_nondir(handle_t *handle,
int err = ext3_add_entry(handle, dentry, inode);
if (!err) {
ext3_mark_inode_dirty(handle, inode);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
}
drop_nlink(inode);
@@ -1690,7 +1690,7 @@ static int ext3_add_nondir(handle_t *handle,
* with d_instantiate().
*/
static int ext3_create (struct inode * dir, struct dentry * dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
handle_t *handle;
struct inode * inode;
@@ -1836,8 +1836,8 @@ out_clear_inode:
if (err)
goto out_clear_inode;
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
out_stop:
brelse(dir_block);
ext3_journal_stop(handle);
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 8c3a44b7c375..ff9bcdc5b0d5 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -2058,7 +2058,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent)
goto failed_mount3;
}
- ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY);
+ if (ext3_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+ sb->s_flags |= MS_RDONLY;
EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS;
ext3_orphan_cleanup(sb, es);
@@ -2526,6 +2527,11 @@ static int ext3_sync_fs(struct super_block *sb, int wait)
tid_t target;
trace_ext3_sync_fs(sb, wait);
+ /*
+ * Writeback quota in non-journalled quota case - journalled quota has
+ * no dirty dquots
+ */
+ dquot_writeback_dquots(sb, -1);
if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
if (wait)
log_wait_commit(EXT3_SB(sb)->s_journal, target);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index aa39e600d159..8e07d2a5a139 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -324,74 +324,27 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
/*
- * ext4_dir_llseek() based on generic_file_llseek() to handle both
- * non-htree and htree directories, where the "offset" is in terms
- * of the filename hash value instead of the byte offset.
+ * ext4_dir_llseek() calls generic_file_llseek_size to handle htree
+ * directories, where the "offset" is in terms of the filename hash
+ * value instead of the byte offset.
*
- * NOTE: offsets obtained *before* ext4_set_inode_flag(dir, EXT4_INODE_INDEX)
- * will be invalid once the directory was converted into a dx directory
+ * Because we may return a 64-bit hash that is well beyond offset limits,
+ * we need to pass the max hash as the maximum allowable offset in
+ * the htree directory case.
+ *
+ * For non-htree, ext4_llseek already chooses the proper max offset.
*/
loff_t ext4_dir_llseek(struct file *file, loff_t offset, int origin)
{
struct inode *inode = file->f_mapping->host;
- loff_t ret = -EINVAL;
int dx_dir = is_dx_dir(inode);
+ loff_t htree_max = ext4_get_htree_eof(file);
- mutex_lock(&inode->i_mutex);
-
- /* NOTE: relative offsets with dx directories might not work
- * as expected, as it is difficult to figure out the
- * correct offset between dx hashes */
-
- switch (origin) {
- case SEEK_END:
- if (unlikely(offset > 0))
- goto out_err; /* not supported for directories */
-
- /* so only negative offsets are left, does that have a
- * meaning for directories at all? */
- if (dx_dir)
- offset += ext4_get_htree_eof(file);
- else
- offset += inode->i_size;
- break;
- case SEEK_CUR:
- /*
- * Here we special-case the lseek(fd, 0, SEEK_CUR)
- * position-querying operation. Avoid rewriting the "same"
- * f_pos value back to the file because a concurrent read(),
- * write() or lseek() might have altered it
- */
- if (offset == 0) {
- offset = file->f_pos;
- goto out_ok;
- }
-
- offset += file->f_pos;
- break;
- }
-
- if (unlikely(offset < 0))
- goto out_err;
-
- if (!dx_dir) {
- if (offset > inode->i_sb->s_maxbytes)
- goto out_err;
- } else if (offset > ext4_get_htree_eof(file))
- goto out_err;
-
- /* Special lock needed here? */
- if (offset != file->f_pos) {
- file->f_pos = offset;
- file->f_version = 0;
- }
-
-out_ok:
- ret = offset;
-out_err:
- mutex_unlock(&inode->i_mutex);
-
- return ret;
+ if (likely(dx_dir))
+ return generic_file_llseek_size(file, offset, origin,
+ htree_max, htree_max);
+ else
+ return ext4_llseek(file, offset, origin);
}
/*
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8c7642a00054..782eecb57e43 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -211,9 +211,9 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
}
/*
- * ext4_llseek() copied from generic_file_llseek() to handle both
- * block-mapped and extent-mapped maxbytes values. This should
- * otherwise be identical with generic_file_llseek().
+ * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
+ * by calling generic_file_llseek_size() with the appropriate maxbytes
+ * value for each.
*/
loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
{
@@ -225,7 +225,8 @@ loff_t ext4_llseek(struct file *file, loff_t offset, int origin)
else
maxbytes = inode->i_sb->s_maxbytes;
- return generic_file_llseek_size(file, offset, origin, maxbytes);
+ return generic_file_llseek_size(file, offset, origin,
+ maxbytes, i_size_read(inode));
}
const struct file_operations ext4_file_operations = {
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index bb6c7d811313..2a1dcea4f12e 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -135,14 +135,7 @@ static int ext4_sync_parent(struct inode *inode)
inode = igrab(inode);
while (ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
- dentry = NULL;
- spin_lock(&inode->i_lock);
- if (!list_empty(&inode->i_dentry)) {
- dentry = list_first_entry(&inode->i_dentry,
- struct dentry, d_alias);
- dget(dentry);
- }
- spin_unlock(&inode->i_lock);
+ dentry = d_find_any_alias(inode);
if (!dentry)
break;
next = igrab(dentry->d_parent->d_inode);
@@ -232,7 +225,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (!journal) {
ret = __sync_inode(inode, datasync);
- if (!ret && !list_empty(&inode->i_dentry))
+ if (!ret && !hlist_empty(&inode->i_dentry))
ret = ext4_sync_parent(inode);
goto out;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index e34deac3f366..7f7dad787603 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -268,7 +268,6 @@ group_extend_out:
err = ext4_move_extents(filp, donor_filp, me.orig_start,
me.donor_start, me.len, &me.moved_len);
mnt_drop_write_file(filp);
- mnt_drop_write(filp->f_path.mnt);
if (copy_to_user((struct move_extent __user *)arg,
&me, sizeof(me)))
@@ -390,7 +389,7 @@ group_add_out:
if (err)
return err;
- err = mnt_want_write(filp->f_path.mnt);
+ err = mnt_want_write_file(filp);
if (err)
goto resizefs_out;
@@ -402,7 +401,7 @@ group_add_out:
}
if (err == 0)
err = err2;
- mnt_drop_write(filp->f_path.mnt);
+ mnt_drop_write_file(filp);
resizefs_out:
ext4_resize_end(sb);
return err;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 5845cd97bf8b..d0d3f0e87f99 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1312,7 +1312,7 @@ errout:
return NULL;
}
-static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
struct ext4_dir_entry_2 *de;
@@ -2072,8 +2072,8 @@ static int ext4_add_nondir(handle_t *handle,
int err = ext4_add_entry(handle, dentry, inode);
if (!err) {
ext4_mark_inode_dirty(handle, inode);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
}
drop_nlink(inode);
@@ -2091,7 +2091,7 @@ static int ext4_add_nondir(handle_t *handle,
* with d_instantiate().
*/
static int ext4_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
handle_t *handle;
struct inode *inode;
@@ -2249,8 +2249,8 @@ out_clear_inode:
err = ext4_mark_inode_dirty(handle, dir);
if (err)
goto out_clear_inode;
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
out_stop:
brelse(dir_block);
ext4_journal_stop(handle);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index eb7aa3e4ef05..d8759401ecae 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4325,6 +4325,11 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->dio_unwritten_wq);
+ /*
+ * Writeback quota in non-journalled quota case - journalled quota has
+ * no dirty dquots
+ */
+ dquot_writeback_dquots(sb, -1);
if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
if (wait)
jbd2_log_wait_commit(sbi->s_journal, target);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index a3d81ebf6d86..0038b32cb362 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -738,22 +738,21 @@ static int
fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
{
int len = *lenp;
- u32 ipos_h, ipos_m, ipos_l;
+ struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
+ loff_t i_pos;
if (len < 5) {
*lenp = 5;
return 255; /* no room */
}
- ipos_h = MSDOS_I(inode)->i_pos >> 8;
- ipos_m = (MSDOS_I(inode)->i_pos & 0xf0) << 24;
- ipos_l = (MSDOS_I(inode)->i_pos & 0x0f) << 28;
+ i_pos = fat_i_pos_read(sbi, inode);
*lenp = 5;
fh[0] = inode->i_ino;
fh[1] = inode->i_generation;
- fh[2] = ipos_h;
- fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
- fh[4] = ipos_l;
+ fh[2] = i_pos >> 8;
+ fh[3] = ((i_pos & 0xf0) << 24) | MSDOS_I(inode)->i_logstart;
+ fh[4] = (i_pos & 0x0f) << 28;
if (parent)
fh[4] |= MSDOS_I(parent)->i_logstart;
return 3;
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index c5938c9084b9..70d993a93805 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -201,7 +201,7 @@ static const struct dentry_operations msdos_dentry_operations = {
/***** Get inode using directory and name */
static struct dentry *msdos_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -265,7 +265,7 @@ static int msdos_add_entry(struct inode *dir, const unsigned char *name,
/***** Create a file */
static int msdos_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = NULL;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 98ae804f5273..6cc480652433 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -41,9 +41,9 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
return ret;
}
-static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int vfat_revalidate(struct dentry *dentry, unsigned int flags)
{
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
/* This is not negative dentry. Always valid. */
@@ -52,9 +52,9 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
return vfat_revalidate_shortname(dentry);
}
-static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
+static int vfat_revalidate_ci(struct dentry *dentry, unsigned int flags)
{
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
/*
@@ -74,7 +74,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
- if (!nd)
+ if (!flags)
return 0;
/*
@@ -82,7 +82,7 @@ static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
* case sensitive name which is specified by user if this is
* for creation.
*/
- if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return vfat_revalidate_shortname(dentry);
@@ -714,7 +714,7 @@ static int vfat_d_anon_disconn(struct dentry *dentry)
}
static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
@@ -772,7 +772,7 @@ error:
}
static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
diff --git a/fs/fifo.c b/fs/fifo.c
index b1a524d798e7..cf6f4345ceb0 100644
--- a/fs/fifo.c
+++ b/fs/fifo.c
@@ -14,7 +14,7 @@
#include <linux/sched.h>
#include <linux/pipe_fs_i.h>
-static void wait_for_partner(struct inode* inode, unsigned int *cnt)
+static int wait_for_partner(struct inode* inode, unsigned int *cnt)
{
int cur = *cnt;
@@ -23,6 +23,7 @@ static void wait_for_partner(struct inode* inode, unsigned int *cnt)
if (signal_pending(current))
break;
}
+ return cur == *cnt ? -ERESTARTSYS : 0;
}
static void wake_up_partner(struct inode* inode)
@@ -67,8 +68,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
- wait_for_partner(inode, &pipe->w_counter);
- if(signal_pending(current))
+ if (wait_for_partner(inode, &pipe->w_counter))
goto err_rd;
}
}
@@ -90,8 +90,7 @@ static int fifo_open(struct inode *inode, struct file *filp)
wake_up_partner(inode);
if (!pipe->readers) {
- wait_for_partner(inode, &pipe->r_counter);
- if (signal_pending(current))
+ if (wait_for_partner(inode, &pipe->r_counter))
goto err_wr;
}
break;
diff --git a/fs/file_table.c b/fs/file_table.c
index a305d9e2d1b2..b3fc4d67a26b 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -23,6 +23,8 @@
#include <linux/lglock.h>
#include <linux/percpu_counter.h>
#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/atomic.h>
@@ -251,7 +253,6 @@ static void __fput(struct file *file)
}
fops_put(file->f_op);
put_pid(file->f_owner.pid);
- file_sb_list_del(file);
if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
i_readcount_dec(inode);
if (file->f_mode & FMODE_WRITE)
@@ -263,10 +264,77 @@ static void __fput(struct file *file)
mntput(mnt);
}
+static DEFINE_SPINLOCK(delayed_fput_lock);
+static LIST_HEAD(delayed_fput_list);
+static void delayed_fput(struct work_struct *unused)
+{
+ LIST_HEAD(head);
+ spin_lock_irq(&delayed_fput_lock);
+ list_splice_init(&delayed_fput_list, &head);
+ spin_unlock_irq(&delayed_fput_lock);
+ while (!list_empty(&head)) {
+ struct file *f = list_first_entry(&head, struct file, f_u.fu_list);
+ list_del_init(&f->f_u.fu_list);
+ __fput(f);
+ }
+}
+
+static void ____fput(struct callback_head *work)
+{
+ __fput(container_of(work, struct file, f_u.fu_rcuhead));
+}
+
+/*
+ * If kernel thread really needs to have the final fput() it has done
+ * to complete, call this. The only user right now is the boot - we
+ * *do* need to make sure our writes to binaries on initramfs has
+ * not left us with opened struct file waiting for __fput() - execve()
+ * won't work without that. Please, don't add more callers without
+ * very good reasons; in particular, never call that with locks
+ * held and never call that from a thread that might need to do
+ * some work on any kind of umount.
+ */
+void flush_delayed_fput(void)
+{
+ delayed_fput(NULL);
+}
+
+static DECLARE_WORK(delayed_fput_work, delayed_fput);
+
void fput(struct file *file)
{
- if (atomic_long_dec_and_test(&file->f_count))
+ if (atomic_long_dec_and_test(&file->f_count)) {
+ struct task_struct *task = current;
+ file_sb_list_del(file);
+ if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
+ unsigned long flags;
+ spin_lock_irqsave(&delayed_fput_lock, flags);
+ list_add(&file->f_u.fu_list, &delayed_fput_list);
+ schedule_work(&delayed_fput_work);
+ spin_unlock_irqrestore(&delayed_fput_lock, flags);
+ return;
+ }
+ init_task_work(&file->f_u.fu_rcuhead, ____fput);
+ task_work_add(task, &file->f_u.fu_rcuhead, true);
+ }
+}
+
+/*
+ * synchronous analog of fput(); for kernel threads that might be needed
+ * in some umount() (and thus can't use flush_delayed_fput() without
+ * risking deadlocks), need to wait for completion of __fput() and know
+ * for this specific struct file it won't involve anything that would
+ * need them. Use only if you really need it - at the very least,
+ * don't blindly convert fput() by kernel thread to that.
+ */
+void __fput_sync(struct file *file)
+{
+ if (atomic_long_dec_and_test(&file->f_count)) {
+ struct task_struct *task = current;
+ file_sb_list_del(file);
+ BUG_ON(!(task->flags & PF_KTHREAD));
__fput(file);
+ }
}
EXPORT_SYMBOL(fput);
@@ -483,10 +551,8 @@ void mark_files_ro(struct super_block *sb)
{
struct file *f;
-retry:
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
- struct vfsmount *mnt;
if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
continue;
if (!file_count(f))
@@ -499,12 +565,7 @@ retry:
if (file_check_writeable(f) != 0)
continue;
file_release_write(f);
- mnt = mntget(f->f_path.mnt);
- /* This can sleep, so we can't hold the spinlock. */
- lg_global_unlock(&files_lglock);
- mnt_drop_write(mnt);
- mntput(mnt);
- goto retry;
+ mnt_drop_write_file(f);
} while_file_list_for_each_entry;
lg_global_unlock(&files_lglock);
}
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 3360f1e678ad..bd447e88f208 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -48,7 +48,7 @@
#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize))
-static struct dentry * vxfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int);
static int vxfs_readdir(struct file *, void *, filldir_t);
const struct inode_operations vxfs_dir_inode_ops = {
@@ -203,7 +203,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
* in the return pointer.
*/
static struct dentry *
-vxfs_lookup(struct inode *dip, struct dentry *dp, struct nameidata *nd)
+vxfs_lookup(struct inode *dip, struct dentry *dp, unsigned int flags)
{
struct inode *ip = NULL;
ino_t ino;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 41a3ccff18d8..8f660dd6137a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1315,6 +1315,8 @@ void writeback_inodes_sb_nr(struct super_block *sb,
.reason = reason,
};
+ if (sb->s_bdi == &noop_backing_dev_info)
+ return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
wait_for_completion(&done);
@@ -1398,6 +1400,9 @@ void sync_inodes_sb(struct super_block *sb)
.reason = WB_REASON_SYNC,
};
+ /* Nothing to do? */
+ if (sb->s_bdi == &noop_backing_dev_info)
+ return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_queue_work(sb->s_bdi, &work);
diff --git a/fs/fs_struct.c b/fs/fs_struct.c
index e159e682ad4c..5df4775fea03 100644
--- a/fs/fs_struct.c
+++ b/fs/fs_struct.c
@@ -6,18 +6,6 @@
#include <linux/fs_struct.h>
#include "internal.h"
-static inline void path_get_longterm(struct path *path)
-{
- path_get(path);
- mnt_make_longterm(path->mnt);
-}
-
-static inline void path_put_longterm(struct path *path)
-{
- mnt_make_shortterm(path->mnt);
- path_put(path);
-}
-
/*
* Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
* It can block.
@@ -26,7 +14,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
{
struct path old_root;
- path_get_longterm(path);
+ path_get(path);
spin_lock(&fs->lock);
write_seqcount_begin(&fs->seq);
old_root = fs->root;
@@ -34,7 +22,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
write_seqcount_end(&fs->seq);
spin_unlock(&fs->lock);
if (old_root.dentry)
- path_put_longterm(&old_root);
+ path_put(&old_root);
}
/*
@@ -45,7 +33,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
{
struct path old_pwd;
- path_get_longterm(path);
+ path_get(path);
spin_lock(&fs->lock);
write_seqcount_begin(&fs->seq);
old_pwd = fs->pwd;
@@ -54,7 +42,7 @@ void set_fs_pwd(struct fs_struct *fs, struct path *path)
spin_unlock(&fs->lock);
if (old_pwd.dentry)
- path_put_longterm(&old_pwd);
+ path_put(&old_pwd);
}
static inline int replace_path(struct path *p, const struct path *old, const struct path *new)
@@ -84,7 +72,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
write_seqcount_end(&fs->seq);
while (hits--) {
count++;
- path_get_longterm(new_root);
+ path_get(new_root);
}
spin_unlock(&fs->lock);
}
@@ -92,13 +80,13 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
while (count--)
- path_put_longterm(old_root);
+ path_put(old_root);
}
void free_fs_struct(struct fs_struct *fs)
{
- path_put_longterm(&fs->root);
- path_put_longterm(&fs->pwd);
+ path_put(&fs->root);
+ path_put(&fs->pwd);
kmem_cache_free(fs_cachep, fs);
}
@@ -132,9 +120,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
spin_lock(&old->lock);
fs->root = old->root;
- path_get_longterm(&fs->root);
+ path_get(&fs->root);
fs->pwd = old->pwd;
- path_get_longterm(&fs->pwd);
+ path_get(&fs->pwd);
spin_unlock(&old->lock);
}
return fs;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 334e0b18a014..8964cf3999b2 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -154,7 +154,7 @@ u64 fuse_get_attr_version(struct fuse_conn *fc)
* the lookup once more. If the lookup results in the same inode,
* then refresh the attributes, timeouts and mark the dentry valid.
*/
-static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
+static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
{
struct inode *inode;
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
if (!inode)
return 0;
- if (nd && (nd->flags & LOOKUP_RCU))
+ if (flags & LOOKUP_RCU)
return -ECHILD;
fc = get_fuse_conn(inode);
@@ -249,7 +249,7 @@ static struct dentry *fuse_d_add_directory(struct dentry *entry,
/* This tries to shrink the subtree below alias */
fuse_invalidate_entry(alias);
dput(alias);
- if (!list_empty(&inode->i_dentry))
+ if (!hlist_empty(&inode->i_dentry))
return ERR_PTR(-EBUSY);
} else {
dput(alias);
@@ -316,7 +316,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
}
static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
- struct nameidata *nd)
+ unsigned int flags)
{
int err;
struct fuse_entry_out outarg;
@@ -370,7 +370,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
* 'mknod' + 'open' requests.
*/
static int fuse_create_open(struct inode *dir, struct dentry *entry,
- umode_t mode, struct nameidata *nd)
+ struct file *file, unsigned flags,
+ umode_t mode, int *opened)
{
int err;
struct inode *inode;
@@ -381,15 +382,11 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_open_out outopen;
struct fuse_entry_out outentry;
struct fuse_file *ff;
- struct file *file;
- int flags = nd->intent.open.flags;
-
- if (fc->no_create)
- return -ENOSYS;
forget = fuse_alloc_forget();
+ err = -ENOMEM;
if (!forget)
- return -ENOMEM;
+ goto out_err;
req = fuse_get_req(fc);
err = PTR_ERR(req);
@@ -428,11 +425,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
req->out.args[1].value = &outopen;
fuse_request_send(fc, req);
err = req->out.h.error;
- if (err) {
- if (err == -ENOSYS)
- fc->no_create = 1;
+ if (err)
goto out_free_ff;
- }
err = -EIO;
if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
@@ -448,28 +442,74 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
flags &= ~(O_CREAT | O_EXCL | O_TRUNC);
fuse_sync_release(ff, flags);
fuse_queue_forget(fc, forget, outentry.nodeid, 1);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_err;
}
kfree(forget);
d_instantiate(entry, inode);
fuse_change_entry_timeout(entry, &outentry);
fuse_invalidate_attr(dir);
- file = lookup_instantiate_filp(nd, entry, generic_file_open);
- if (IS_ERR(file)) {
+ err = finish_open(file, entry, generic_file_open, opened);
+ if (err) {
fuse_sync_release(ff, flags);
- return PTR_ERR(file);
+ } else {
+ file->private_data = fuse_file_get(ff);
+ fuse_finish_open(inode, file);
}
- file->private_data = fuse_file_get(ff);
- fuse_finish_open(inode, file);
- return 0;
+ return err;
- out_free_ff:
+out_free_ff:
fuse_file_free(ff);
- out_put_request:
+out_put_request:
fuse_put_request(fc, req);
- out_put_forget_req:
+out_put_forget_req:
kfree(forget);
+out_err:
+ return err;
+}
+
+static int fuse_mknod(struct inode *, struct dentry *, umode_t, dev_t);
+static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
+ struct file *file, unsigned flags,
+ umode_t mode, int *opened)
+{
+ int err;
+ struct fuse_conn *fc = get_fuse_conn(dir);
+ struct dentry *res = NULL;
+
+ if (d_unhashed(entry)) {
+ res = fuse_lookup(dir, entry, 0);
+ if (IS_ERR(res))
+ return PTR_ERR(res);
+
+ if (res)
+ entry = res;
+ }
+
+ if (!(flags & O_CREAT) || entry->d_inode)
+ goto no_open;
+
+ /* Only creates */
+ *opened |= FILE_CREATED;
+
+ if (fc->no_create)
+ goto mknod;
+
+ err = fuse_create_open(dir, entry, file, flags, mode, opened);
+ if (err == -ENOSYS) {
+ fc->no_create = 1;
+ goto mknod;
+ }
+out_dput:
+ dput(res);
return err;
+
+mknod:
+ err = fuse_mknod(dir, entry, mode, 0);
+ if (err)
+ goto out_dput;
+no_open:
+ return finish_no_open(file, res);
}
/*
@@ -571,14 +611,8 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
}
static int fuse_create(struct inode *dir, struct dentry *entry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
- if (nd) {
- int err = fuse_create_open(dir, entry, mode, nd);
- if (err != -ENOSYS)
- return err;
- /* Fall back on mknod */
- }
return fuse_mknod(dir, entry, mode, 0);
}
@@ -1646,6 +1680,7 @@ static const struct inode_operations fuse_dir_inode_operations = {
.link = fuse_link,
.setattr = fuse_setattr,
.create = fuse_create,
+ .atomic_open = fuse_atomic_open,
.mknod = fuse_mknod,
.permission = fuse_permission,
.getattr = fuse_getattr,
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index e80a464850c8..d6526347d386 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -614,7 +614,6 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
int alloc_required;
int error = 0;
- struct gfs2_qadata *qa = NULL;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
struct page *page;
@@ -638,15 +637,9 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
if (alloc_required) {
- qa = gfs2_qadata_get(ip);
- if (!qa) {
- error = -ENOMEM;
- goto out_unlock;
- }
-
error = gfs2_quota_lock_check(ip);
if (error)
- goto out_alloc_put;
+ goto out_unlock;
error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (error)
@@ -708,8 +701,6 @@ out_trans_fail:
gfs2_inplace_release(ip);
out_qunlock:
gfs2_quota_unlock(ip);
-out_alloc_put:
- gfs2_qadata_put(ip);
}
out_unlock:
if (&ip->i_inode == sdp->sd_rindex) {
@@ -846,7 +837,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
struct buffer_head *dibh;
- struct gfs2_qadata *qa = ip->i_qadata;
unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
unsigned int to = from + len;
int ret;
@@ -878,12 +868,10 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
brelse(dibh);
failed:
gfs2_trans_end(sdp);
- if (ip->i_res)
+ if (gfs2_mb_reserved(ip))
gfs2_inplace_release(ip);
- if (qa) {
+ if (ip->i_res->rs_qa_qd_num)
gfs2_quota_unlock(ip);
- gfs2_qadata_put(ip);
- }
if (inode == sdp->sd_rindex) {
gfs2_glock_dq(&m_ip->i_gh);
gfs2_holder_uninit(&m_ip->i_gh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index dab54099dd98..49cd7dd4a9fa 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -785,6 +785,9 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
if (error)
goto out_rlist;
+ if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
+ gfs2_rs_deltree(ip->i_res);
+
error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
RES_INDIRECT + RES_STATFS + RES_QUOTA,
revokes);
@@ -1045,12 +1048,13 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
find_metapath(sdp, lblock, &mp, ip->i_height);
- if (!gfs2_qadata_get(ip))
- return -ENOMEM;
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ return error;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
- goto out;
+ return error;
while (height--) {
struct strip_mine sm;
@@ -1064,8 +1068,6 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
gfs2_quota_unhold(ip);
-out:
- gfs2_qadata_put(ip);
return error;
}
@@ -1167,19 +1169,14 @@ static int do_grow(struct inode *inode, u64 size)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *dibh;
- struct gfs2_qadata *qa = NULL;
int error;
int unstuff = 0;
if (gfs2_is_stuffed(ip) &&
(size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
- qa = gfs2_qadata_get(ip);
- if (qa == NULL)
- return -ENOMEM;
-
error = gfs2_quota_lock_check(ip);
if (error)
- goto do_grow_alloc_put;
+ return error;
error = gfs2_inplace_reserve(ip, 1);
if (error)
@@ -1214,8 +1211,6 @@ do_grow_release:
gfs2_inplace_release(ip);
do_grow_qunlock:
gfs2_quota_unlock(ip);
-do_grow_alloc_put:
- gfs2_qadata_put(ip);
}
return error;
}
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 0da8da2c991d..4fddb3c22d25 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -25,7 +25,7 @@
/**
* gfs2_drevalidate - Check directory lookup consistency
* @dentry: the mapping to check
- * @nd:
+ * @flags: lookup flags
*
* Check to make sure the lookup necessary to arrive at this inode from its
* parent is still good.
@@ -33,7 +33,7 @@
* Returns: 1 if the dentry is ok, 0 if it isn't
*/
-static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
+static int gfs2_drevalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *parent;
struct gfs2_sbd *sdp;
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
int error;
int had_lock = 0;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
parent = dget_parent(dentry);
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 8aaeb07a07b5..259b088cfc4c 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1854,14 +1854,9 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
if (!ht)
return -ENOMEM;
- if (!gfs2_qadata_get(dip)) {
- error = -ENOMEM;
- goto out;
- }
-
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
- goto out_put;
+ goto out;
/* Count the number of leaves */
bh = leaf_bh;
@@ -1942,8 +1937,6 @@ out_rg_gunlock:
out_rlist:
gfs2_rlist_free(&rlist);
gfs2_quota_unhold(dip);
-out_put:
- gfs2_qadata_put(dip);
out:
kfree(ht);
return error;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 31b199f6efc1..9aa6af13823c 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -142,6 +142,7 @@ static const u32 fsflags_to_gfs2[32] = {
[7] = GFS2_DIF_NOATIME,
[12] = GFS2_DIF_EXHASH,
[14] = GFS2_DIF_INHERIT_JDATA,
+ [17] = GFS2_DIF_TOPDIR,
};
static const u32 gfs2_to_fsflags[32] = {
@@ -150,6 +151,7 @@ static const u32 gfs2_to_fsflags[32] = {
[gfs2fl_AppendOnly] = FS_APPEND_FL,
[gfs2fl_NoAtime] = FS_NOATIME_FL,
[gfs2fl_ExHash] = FS_INDEX_FL,
+ [gfs2fl_TopLevel] = FS_TOPDIR_FL,
[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
};
@@ -203,6 +205,7 @@ void gfs2_set_inode_flags(struct inode *inode)
GFS2_DIF_NOATIME| \
GFS2_DIF_SYNC| \
GFS2_DIF_SYSTEM| \
+ GFS2_DIF_TOPDIR| \
GFS2_DIF_INHERIT_JDATA)
/**
@@ -298,6 +301,7 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
if (!S_ISDIR(inode->i_mode)) {
+ gfsflags &= ~GFS2_DIF_TOPDIR;
if (gfsflags & GFS2_DIF_INHERIT_JDATA)
gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
return do_gfs2_set_flags(filp, gfsflags, ~0);
@@ -366,7 +370,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
u64 pos = page->index << PAGE_CACHE_SHIFT;
unsigned int data_blocks, ind_blocks, rblocks;
struct gfs2_holder gh;
- struct gfs2_qadata *qa;
loff_t size;
int ret;
@@ -376,6 +379,13 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
*/
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+ ret = gfs2_rs_alloc(ip);
+ if (ret)
+ return ret;
+
+ atomic_set(&ip->i_res->rs_sizehint,
+ PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift);
+
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh);
if (ret)
@@ -393,14 +403,13 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
goto out_unlock;
}
- ret = -ENOMEM;
- qa = gfs2_qadata_get(ip);
- if (qa == NULL)
+ ret = gfs2_rindex_update(sdp);
+ if (ret)
goto out_unlock;
ret = gfs2_quota_lock_check(ip);
if (ret)
- goto out_alloc_put;
+ goto out_unlock;
gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
if (ret)
@@ -447,8 +456,6 @@ out_trans_fail:
gfs2_inplace_release(ip);
out_quota_unlock:
gfs2_quota_unlock(ip);
-out_alloc_put:
- gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&gh);
out:
@@ -567,16 +574,14 @@ fail:
static int gfs2_release(struct inode *inode, struct file *file)
{
- struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
- struct gfs2_file *fp;
+ struct gfs2_inode *ip = GFS2_I(inode);
- fp = file->private_data;
+ kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_assert_warn(sdp, fp))
- return -EIO;
-
- kfree(fp);
+ if ((file->f_mode & FMODE_WRITE) &&
+ (atomic_read(&inode->i_writecount) == 1))
+ gfs2_rs_delete(ip);
return 0;
}
@@ -653,12 +658,20 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ size_t writesize = iov_length(iov, nr_segs);
+ struct dentry *dentry = file->f_dentry;
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_sbd *sdp;
+ int ret;
+ sdp = GFS2_SB(file->f_mapping->host);
+ ret = gfs2_rs_alloc(ip);
+ if (ret)
+ return ret;
+
+ atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift);
if (file->f_flags & O_APPEND) {
- struct dentry *dentry = file->f_dentry;
- struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
struct gfs2_holder gh;
- int ret;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
if (ret)
@@ -751,7 +764,6 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
loff_t bytes, max_bytes;
- struct gfs2_qadata *qa;
int error;
const loff_t pos = offset;
const loff_t count = len;
@@ -774,11 +786,17 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
if (bytes == 0)
bytes = sdp->sd_sb.sb_bsize;
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return error;
+
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
error = gfs2_glock_nq(&ip->i_gh);
if (unlikely(error))
goto out_uninit;
+ atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift);
+
while (len > 0) {
if (len < bytes)
bytes = len;
@@ -787,15 +805,9 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
offset += bytes;
continue;
}
- qa = gfs2_qadata_get(ip);
- if (!qa) {
- error = -ENOMEM;
- goto out_unlock;
- }
-
error = gfs2_quota_lock_check(ip);
if (error)
- goto out_alloc_put;
+ goto out_unlock;
retry:
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
@@ -835,7 +847,6 @@ retry:
offset += max_bytes;
gfs2_inplace_release(ip);
gfs2_quota_unlock(ip);
- gfs2_qadata_put(ip);
}
if (error == 0)
@@ -846,8 +857,6 @@ out_trans_fail:
gfs2_inplace_release(ip);
out_qunlock:
gfs2_quota_unlock(ip);
-out_alloc_put:
- gfs2_qadata_put(ip);
out_unlock:
gfs2_glock_dq(&ip->i_gh);
out_uninit:
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index dab2526071cc..1ed81f40da0d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -46,10 +46,11 @@
#include "trace_gfs2.h"
struct gfs2_glock_iter {
- int hash; /* hash bucket index */
- struct gfs2_sbd *sdp; /* incore superblock */
- struct gfs2_glock *gl; /* current glock struct */
- char string[512]; /* scratch space */
+ int hash; /* hash bucket index */
+ unsigned nhash; /* Index within current bucket */
+ struct gfs2_sbd *sdp; /* incore superblock */
+ struct gfs2_glock *gl; /* current glock struct */
+ loff_t last_pos; /* last position */
};
typedef void (*glock_examiner) (struct gfs2_glock * gl);
@@ -767,6 +768,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_stats.stats[GFS2_LKS_DCOUNT] = 0;
gl->gl_stats.stats[GFS2_LKS_QCOUNT] = 0;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+ memset(gl->gl_lvb, 0, 32 * sizeof(char));
gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
@@ -948,9 +950,7 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
va_start(args, fmt);
if (seq) {
- struct gfs2_glock_iter *gi = seq->private;
- vsprintf(gi->string, fmt, args);
- seq_printf(seq, gi->string);
+ seq_vprintf(seq, fmt, args);
} else {
vaf.fmt = fmt;
vaf.va = &args;
@@ -1854,8 +1854,14 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
gl = gi->gl;
if (gl) {
gi->gl = glock_hash_next(gl);
+ gi->nhash++;
} else {
+ if (gi->hash >= GFS2_GL_HASH_SIZE) {
+ rcu_read_unlock();
+ return 1;
+ }
gi->gl = glock_hash_chain(gi->hash);
+ gi->nhash = 0;
}
while (gi->gl == NULL) {
gi->hash++;
@@ -1864,6 +1870,7 @@ static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
return 1;
}
gi->gl = glock_hash_chain(gi->hash);
+ gi->nhash = 0;
}
/* Skip entries for other sb and dead entries */
} while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
@@ -1876,7 +1883,12 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
- gi->hash = 0;
+ if (gi->last_pos <= *pos)
+ n = gi->nhash + (*pos - gi->last_pos);
+ else
+ gi->hash = 0;
+
+ gi->nhash = 0;
rcu_read_lock();
do {
@@ -1884,6 +1896,7 @@ static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
return NULL;
} while (n--);
+ gi->last_pos = *pos;
return gi->gl;
}
@@ -1893,7 +1906,7 @@ static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
struct gfs2_glock_iter *gi = seq->private;
(*pos)++;
-
+ gi->last_pos = *pos;
if (gfs2_glock_iter_next(gi))
return NULL;
@@ -1964,6 +1977,8 @@ static const struct seq_operations gfs2_sbstats_seq_ops = {
.show = gfs2_sbstats_seq_show,
};
+#define GFS2_SEQ_GOODSIZE min(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER, 65536UL)
+
static int gfs2_glocks_open(struct inode *inode, struct file *file)
{
int ret = seq_open_private(file, &gfs2_glock_seq_ops,
@@ -1972,6 +1987,9 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
+ seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
+ if (seq->buf)
+ seq->size = GFS2_SEQ_GOODSIZE;
}
return ret;
}
@@ -1984,6 +2002,9 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->sdp = inode->i_private;
+ seq->buf = kmalloc(GFS2_SEQ_GOODSIZE, GFP_KERNEL | __GFP_NOWARN);
+ if (seq->buf)
+ seq->size = GFS2_SEQ_GOODSIZE;
}
return ret;
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 67fd6beffece..aaecc8085fc5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -84,17 +84,22 @@ struct gfs2_rgrpd {
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
+ u32 rd_reserved; /* number of blocks reserved */
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration;
struct gfs2_bitmap *rd_bits;
struct gfs2_sbd *rd_sbd;
+ struct gfs2_rgrp_lvb *rd_rgl;
u32 rd_last_alloc;
u32 rd_flags;
#define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
#define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
#define GFS2_RDF_ERROR 0x40000000 /* error in rg */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
+ spinlock_t rd_rsspin; /* protects reservation related vars */
+ struct rb_root rd_rstree; /* multi-block reservation tree */
+ u32 rd_rs_cnt; /* count of current reservations */
};
enum gfs2_state_bits {
@@ -232,6 +237,38 @@ struct gfs2_holder {
unsigned long gh_ip;
};
+/* Resource group multi-block reservation, in order of appearance:
+
+ Step 1. Function prepares to write, allocates a mb, sets the size hint.
+ Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info
+ Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use
+ Step 4. Bits are assigned from the rgrp based on either the reservation
+ or wherever it can.
+*/
+
+struct gfs2_blkreserv {
+ /* components used during write (step 1): */
+ atomic_t rs_sizehint; /* hint of the write size */
+
+ /* components used during inplace_reserve (step 2): */
+ u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
+
+ /* components used during get_local_rgrp (step 3): */
+ struct gfs2_rgrpd *rs_rgd; /* pointer to the gfs2_rgrpd */
+ struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */
+ struct rb_node rs_node; /* link to other block reservations */
+
+ /* components used during block searches and assignments (step 4): */
+ struct gfs2_bitmap *rs_bi; /* bitmap for the current allocation */
+ u32 rs_biblk; /* start block relative to the bi */
+ u32 rs_free; /* how many blocks are still free */
+
+ /* ancillary quota stuff */
+ struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS];
+ struct gfs2_holder rs_qa_qd_ghs[2 * MAXQUOTAS];
+ unsigned int rs_qa_qd_num;
+};
+
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
@@ -289,18 +326,6 @@ struct gfs2_glock {
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
-struct gfs2_qadata { /* quota allocation data */
- /* Quota stuff */
- struct gfs2_quota_data *qa_qd[2*MAXQUOTAS];
- struct gfs2_holder qa_qd_ghs[2*MAXQUOTAS];
- unsigned int qa_qd_num;
-};
-
-struct gfs2_blkreserv {
- u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */
- struct gfs2_holder rs_rgd_gh; /* Filled in by gfs2_inplace_reserve() */
-};
-
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
@@ -308,7 +333,6 @@ enum {
GIF_SW_PAGED = 3,
};
-
struct gfs2_inode {
struct inode i_inode;
u64 i_no_addr;
@@ -319,8 +343,7 @@ struct gfs2_inode {
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
- struct gfs2_qadata *i_qadata; /* quota allocation data */
- struct gfs2_blkreserv *i_res; /* resource group block reservation */
+ struct gfs2_blkreserv *i_res; /* rgrp multi-block reservation */
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
@@ -473,6 +496,7 @@ struct gfs2_args {
unsigned int ar_discard:1; /* discard requests */
unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
+ unsigned int ar_rgrplvb:1; /* use lvbs for rgrp info */
int ar_commit; /* Commit interval */
int ar_statfs_quantum; /* The fast statfs interval */
int ar_quota_quantum; /* The quota interval */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index a9ba2444e077..4ce22e547308 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -521,12 +521,13 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
int error;
munge_mode_uid_gid(dip, &mode, &uid, &gid);
- if (!gfs2_qadata_get(dip))
- return -ENOMEM;
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ return error;
error = gfs2_quota_lock(dip, uid, gid);
if (error)
- goto out;
+ return error;
error = gfs2_quota_check(dip, uid, gid);
if (error)
@@ -542,8 +543,6 @@ static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl,
out_quota:
gfs2_quota_unlock(dip);
-out:
- gfs2_qadata_put(dip);
return error;
}
@@ -551,14 +550,13 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name,
struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
- struct gfs2_qadata *qa;
int alloc_required;
struct buffer_head *dibh;
int error;
- qa = gfs2_qadata_get(dip);
- if (!qa)
- return -ENOMEM;
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ return error;
error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
@@ -605,13 +603,13 @@ fail_end_trans:
gfs2_trans_end(sdp);
fail_ipreserv:
- gfs2_inplace_release(dip);
+ if (alloc_required)
+ gfs2_inplace_release(dip);
fail_quota_locks:
gfs2_quota_unlock(dip);
fail:
- gfs2_qadata_put(dip);
return error;
}
@@ -657,7 +655,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
const struct qstr *name = &dentry->d_name;
struct gfs2_holder ghs[2];
struct inode *inode = NULL;
- struct gfs2_inode *dip = GFS2_I(dir);
+ struct gfs2_inode *dip = GFS2_I(dir), *ip;
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 };
int error;
@@ -667,6 +665,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG;
+ /* We need a reservation to allocate the new dinode block. The
+ directory ip temporarily points to the reservation, but this is
+ being done to get a set of contiguous blocks for the new dinode.
+ Since this is a create, we don't have a sizehint yet, so it will
+ have to use the minimum reservation size. */
+ error = gfs2_rs_alloc(dip);
+ if (error)
+ return error;
+
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
if (error)
goto fail;
@@ -700,19 +707,29 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (IS_ERR(inode))
goto fail_gunlock2;
- error = gfs2_inode_refresh(GFS2_I(inode));
+ ip = GFS2_I(inode);
+ error = gfs2_inode_refresh(ip);
if (error)
goto fail_gunlock2;
+ /* The newly created inode needs a reservation so it can allocate
+ xattrs. At the same time, we want new blocks allocated to the new
+ dinode to be as contiguous as possible. Since we allocated the
+ dinode block under the directory's reservation, we transfer
+ ownership of that reservation to the new inode. The directory
+ doesn't need a reservation unless it needs a new allocation. */
+ ip->i_res = dip->i_res;
+ dip->i_res = NULL;
+
error = gfs2_acl_create(dip, inode);
if (error)
goto fail_gunlock2;
- error = gfs2_security_init(dip, GFS2_I(inode), name);
+ error = gfs2_security_init(dip, ip, name);
if (error)
goto fail_gunlock2;
- error = link_dinode(dip, name, GFS2_I(inode));
+ error = link_dinode(dip, name, ip);
if (error)
goto fail_gunlock2;
@@ -722,10 +739,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
gfs2_trans_end(sdp);
/* Check if we reserved space in the rgrp. Function link_dinode may
not, depending on whether alloc is required. */
- if (dip->i_res)
+ if (gfs2_mb_reserved(dip))
gfs2_inplace_release(dip);
gfs2_quota_unlock(dip);
- gfs2_qadata_put(dip);
mark_inode_dirty(inode);
gfs2_glock_dq_uninit_m(2, ghs);
d_instantiate(dentry, inode);
@@ -740,6 +756,7 @@ fail_gunlock:
iput(inode);
}
fail:
+ gfs2_rs_delete(dip);
if (bh)
brelse(bh);
return error;
@@ -755,11 +772,8 @@ fail:
*/
static int gfs2_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
- int excl = 0;
- if (nd && (nd->flags & LOOKUP_EXCL))
- excl = 1;
return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl);
}
@@ -775,7 +789,7 @@ static int gfs2_create(struct inode *dir, struct dentry *dentry,
*/
static struct dentry *gfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode = gfs2_lookupi(dir, &dentry->d_name, 0);
if (inode && !IS_ERR(inode)) {
@@ -819,6 +833,10 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (S_ISDIR(inode->i_mode))
return -EPERM;
+ error = gfs2_rs_alloc(dip);
+ if (error)
+ return error;
+
gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
@@ -870,16 +888,9 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
error = 0;
if (alloc_required) {
- struct gfs2_qadata *qa = gfs2_qadata_get(dip);
-
- if (!qa) {
- error = -ENOMEM;
- goto out_gunlock;
- }
-
error = gfs2_quota_lock_check(dip);
if (error)
- goto out_alloc;
+ goto out_gunlock;
error = gfs2_inplace_reserve(dip, sdp->sd_max_dirres);
if (error)
@@ -922,9 +933,6 @@ out_ipres:
out_gunlock_q:
if (alloc_required)
gfs2_quota_unlock(dip);
-out_alloc:
- if (alloc_required)
- gfs2_qadata_put(dip);
out_gunlock:
gfs2_glock_dq(ghs + 1);
out_child:
@@ -1234,6 +1242,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error)
return error;
+ error = gfs2_rs_alloc(ndip);
+ if (error)
+ return error;
+
if (odip != ndip) {
error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
0, &r_gh);
@@ -1357,16 +1369,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
if (alloc_required) {
- struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
-
- if (!qa) {
- error = -ENOMEM;
- goto out_gunlock;
- }
-
error = gfs2_quota_lock_check(ndip);
if (error)
- goto out_alloc;
+ goto out_gunlock;
error = gfs2_inplace_reserve(ndip, sdp->sd_max_dirres);
if (error)
@@ -1427,9 +1432,6 @@ out_ipreserv:
out_gunlock_q:
if (alloc_required)
gfs2_quota_unlock(ndip);
-out_alloc:
- if (alloc_required)
- gfs2_qadata_put(ndip);
out_gunlock:
while (x--) {
gfs2_glock_dq(ghs + x);
@@ -1590,12 +1592,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
if (!(attr->ia_valid & ATTR_GID) || ogid == ngid)
ogid = ngid = NO_QUOTA_CHANGE;
- if (!gfs2_qadata_get(ip))
- return -ENOMEM;
-
error = gfs2_quota_lock(ip, nuid, ngid);
if (error)
- goto out_alloc;
+ return error;
if (ouid != NO_QUOTA_CHANGE || ogid != NO_QUOTA_CHANGE) {
error = gfs2_quota_check(ip, nuid, ngid);
@@ -1621,8 +1620,6 @@ out_end_trans:
gfs2_trans_end(sdp);
out_gunlock_q:
gfs2_quota_unlock(ip);
-out_alloc:
- gfs2_qadata_put(ip);
return error;
}
@@ -1644,6 +1641,10 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
struct gfs2_holder i_gh;
int error;
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return error;
+
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
return error;
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 852c1be1dd3b..8ff95a2d54ee 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -401,9 +401,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
goto out;
set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
- gfs2_meta_check(sdp, bd->bd_bh);
- gfs2_pin(sdp, bd->bd_bh);
mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
+ if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
+ printk(KERN_ERR
+ "Attempting to add uninitialised block to journal (inplace block=%lld)\n",
+ (unsigned long long)bd->bd_bh->b_blocknr);
+ BUG();
+ }
+ gfs2_pin(sdp, bd->bd_bh);
mh->__pad0 = cpu_to_be64(0);
mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
sdp->sd_log_num_buf++;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 6cdb0f2a1b09..e04d0e09ee7b 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -43,7 +43,6 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode);
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
- ip->i_qadata = NULL;
ip->i_res = NULL;
ip->i_hash_cache = NULL;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 6c1e5d1c404a..3a56c8d94de0 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -213,8 +213,10 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct gfs2_sbd *sdp = gl->gl_sbd;
struct buffer_head *bh;
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
+ *bhp = NULL;
return -EIO;
+ }
*bhp = bh = gfs2_getbuf(gl, blkno, CREATE);
@@ -235,6 +237,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
if (tr && tr->tr_touched)
gfs2_io_error_bh(sdp, bh);
brelse(bh);
+ *bhp = NULL;
return -EIO;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index b8c250fc4922..e5af9dc420ef 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1118,20 +1118,33 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
}
error = init_names(sdp, silent);
- if (error)
- goto fail;
+ if (error) {
+ /* In this case, we haven't initialized sysfs, so we have to
+ manually free the sdp. */
+ free_percpu(sdp->sd_lkstats);
+ kfree(sdp);
+ sb->s_fs_info = NULL;
+ return error;
+ }
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s", sdp->sd_table_name);
- gfs2_create_debugfs_file(sdp);
-
error = gfs2_sys_fs_add(sdp);
+ /*
+ * If we hit an error here, gfs2_sys_fs_add will have called function
+ * kobject_put which causes the sysfs usage count to go to zero, which
+ * causes sysfs to call function gfs2_sbd_release, which frees sdp.
+ * Subsequent error paths here will call gfs2_sys_fs_del, which also
+ * kobject_put to free sdp.
+ */
if (error)
- goto fail;
+ return error;
+
+ gfs2_create_debugfs_file(sdp);
error = gfs2_lm_mount(sdp, silent);
if (error)
- goto fail_sys;
+ goto fail_debug;
error = init_locking(sdp, &mount_gh, DO);
if (error)
@@ -1215,12 +1228,12 @@ fail_locking:
fail_lm:
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
-fail_sys:
- gfs2_sys_fs_del(sdp);
-fail:
+fail_debug:
gfs2_delete_debugfs_file(sdp);
free_percpu(sdp->sd_lkstats);
- kfree(sdp);
+ /* gfs2_sys_fs_del must be the last thing we do, since it causes
+ * sysfs to call function gfs2_sbd_release, which frees sdp. */
+ gfs2_sys_fs_del(sdp);
sb->s_fs_info = NULL;
return error;
}
@@ -1286,7 +1299,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
error = -EBUSY;
goto error_bdev;
}
- s = sget(fs_type, test_gfs2_super, set_gfs2_super, bdev);
+ s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
error = PTR_ERR(s);
if (IS_ERR(s))
@@ -1316,7 +1329,6 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
@@ -1360,7 +1372,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
dev_name, error);
return ERR_PTR(error);
}
- s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super,
+ s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags,
path.dentry->d_inode->i_sb->s_bdev);
path_put(&path);
if (IS_ERR(s)) {
@@ -1390,10 +1402,9 @@ static void gfs2_kill_sb(struct super_block *sb)
sdp->sd_root_dir = NULL;
sdp->sd_master_dir = NULL;
shrink_dcache_sb(sb);
- kill_block_super(sb);
gfs2_delete_debugfs_file(sdp);
free_percpu(sdp->sd_lkstats);
- kfree(sdp);
+ kill_block_super(sb);
}
struct file_system_type gfs2_fs_type = {
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index b97178e7d397..a3bde91645c2 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -494,11 +494,15 @@ static void qdsb_put(struct gfs2_quota_data *qd)
int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_qadata *qa = ip->i_qadata;
- struct gfs2_quota_data **qd = qa->qa_qd;
+ struct gfs2_quota_data **qd;
int error;
- if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
+ if (ip->i_res == NULL)
+ gfs2_rs_alloc(ip);
+
+ qd = ip->i_res->rs_qa_qd;
+
+ if (gfs2_assert_warn(sdp, !ip->i_res->rs_qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
return -EIO;
@@ -508,20 +512,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
if (error)
goto out;
- qa->qa_qd_num++;
+ ip->i_res->rs_qa_qd_num++;
qd++;
error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
if (error)
goto out;
- qa->qa_qd_num++;
+ ip->i_res->rs_qa_qd_num++;
qd++;
if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
error = qdsb_get(sdp, QUOTA_USER, uid, qd);
if (error)
goto out;
- qa->qa_qd_num++;
+ ip->i_res->rs_qa_qd_num++;
qd++;
}
@@ -529,7 +533,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
if (error)
goto out;
- qa->qa_qd_num++;
+ ip->i_res->rs_qa_qd_num++;
qd++;
}
@@ -542,16 +546,17 @@ out:
void gfs2_quota_unhold(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_qadata *qa = ip->i_qadata;
unsigned int x;
+ if (ip->i_res == NULL)
+ return;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
- for (x = 0; x < qa->qa_qd_num; x++) {
- qdsb_put(qa->qa_qd[x]);
- qa->qa_qd[x] = NULL;
+ for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
+ qdsb_put(ip->i_res->rs_qa_qd[x]);
+ ip->i_res->rs_qa_qd[x] = NULL;
}
- qa->qa_qd_num = 0;
+ ip->i_res->rs_qa_qd_num = 0;
}
static int sort_qd(const void *a, const void *b)
@@ -764,6 +769,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
unsigned int nalloc = 0, blocks;
int error;
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ return error;
+
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
@@ -915,7 +924,6 @@ fail:
int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
int error = 0;
@@ -928,15 +936,15 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
- sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
- sort_qd, NULL);
+ sort(ip->i_res->rs_qa_qd, ip->i_res->rs_qa_qd_num,
+ sizeof(struct gfs2_quota_data *), sort_qd, NULL);
- for (x = 0; x < qa->qa_qd_num; x++) {
+ for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
int force = NO_FORCE;
- qd = qa->qa_qd[x];
+ qd = ip->i_res->rs_qa_qd[x];
if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
force = FORCE;
- error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
+ error = do_glock(qd, force, &ip->i_res->rs_qa_qd_ghs[x]);
if (error)
break;
}
@@ -945,7 +953,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
set_bit(GIF_QD_LOCKED, &ip->i_flags);
else {
while (x--)
- gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
+ gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
gfs2_quota_unhold(ip);
}
@@ -990,7 +998,6 @@ static int need_sync(struct gfs2_quota_data *qd)
void gfs2_quota_unlock(struct gfs2_inode *ip)
{
- struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qda[4];
unsigned int count = 0;
unsigned int x;
@@ -998,14 +1005,14 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
- for (x = 0; x < qa->qa_qd_num; x++) {
+ for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
struct gfs2_quota_data *qd;
int sync;
- qd = qa->qa_qd[x];
+ qd = ip->i_res->rs_qa_qd[x];
sync = need_sync(qd);
- gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
+ gfs2_glock_dq_uninit(&ip->i_res->rs_qa_qd_ghs[x]);
if (sync && qd_trylock(qd))
qda[count++] = qd;
@@ -1038,7 +1045,6 @@ static int print_message(struct gfs2_quota_data *qd, char *type)
int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
s64 value;
unsigned int x;
@@ -1050,8 +1056,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
- for (x = 0; x < qa->qa_qd_num; x++) {
- qd = qa->qa_qd[x];
+ for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
+ qd = ip->i_res->rs_qa_qd[x];
if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
@@ -1089,7 +1095,6 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid)
{
- struct gfs2_qadata *qa = ip->i_qadata;
struct gfs2_quota_data *qd;
unsigned int x;
@@ -1098,8 +1103,8 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
- for (x = 0; x < qa->qa_qd_num; x++) {
- qd = qa->qa_qd[x];
+ for (x = 0; x < ip->i_res->rs_qa_qd_num; x++) {
+ qd = ip->i_res->rs_qa_qd[x];
if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
(qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
@@ -1108,7 +1113,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
}
}
-int gfs2_quota_sync(struct super_block *sb, int type, int wait)
+int gfs2_quota_sync(struct super_block *sb, int type)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_data **qda;
@@ -1154,7 +1159,7 @@ int gfs2_quota_sync(struct super_block *sb, int type, int wait)
static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
{
- return gfs2_quota_sync(sb, type, 0);
+ return gfs2_quota_sync(sb, type);
}
int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
@@ -1549,10 +1554,14 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
if (error)
return error;
+ error = gfs2_rs_alloc(ip);
+ if (error)
+ goto out_put;
+
mutex_lock(&ip->i_inode.i_mutex);
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
if (error)
- goto out_put;
+ goto out_unlockput;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error)
goto out_q;
@@ -1609,8 +1618,9 @@ out_i:
gfs2_glock_dq_uninit(&i_gh);
out_q:
gfs2_glock_dq_uninit(&q_gh);
-out_put:
+out_unlockput:
mutex_unlock(&ip->i_inode.i_mutex);
+out_put:
qd_put(qd);
return error;
}
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index 90bf1c302a98..f25d98b87904 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -26,7 +26,7 @@ extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid);
extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
u32 uid, u32 gid);
-extern int gfs2_quota_sync(struct super_block *sb, int type, int wait);
+extern int gfs2_quota_sync(struct super_block *sb, int type);
extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id);
extern int gfs2_quota_init(struct gfs2_sbd *sdp);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index f74fb9bd1973..4d34887a601d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -35,6 +35,9 @@
#define BFITNOENT ((u32)~0)
#define NO_BLOCK ((u64)~0)
+#define RSRV_CONTENTION_FACTOR 4
+#define RGRP_RSRV_MAX_CONTENDERS 2
+
#if BITS_PER_LONG == 32
#define LBITMASK (0x55555555UL)
#define LBITSKIP55 (0x55555555UL)
@@ -178,6 +181,57 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
}
/**
+ * rs_cmp - multi-block reservation range compare
+ * @blk: absolute file system block number of the new reservation
+ * @len: number of blocks in the new reservation
+ * @rs: existing reservation to compare against
+ *
+ * returns: 1 if the block range is beyond the reach of the reservation
+ * -1 if the block range is before the start of the reservation
+ * 0 if the block range overlaps with the reservation
+ */
+static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
+{
+ u64 startblk = gfs2_rs_startblk(rs);
+
+ if (blk >= startblk + rs->rs_free)
+ return 1;
+ if (blk + len - 1 < startblk)
+ return -1;
+ return 0;
+}
+
+/**
+ * rs_find - Find a rgrp multi-block reservation that contains a given block
+ * @rgd: The rgrp
+ * @rgblk: The block we're looking for, relative to the rgrp
+ */
+static struct gfs2_blkreserv *rs_find(struct gfs2_rgrpd *rgd, u32 rgblk)
+{
+ struct rb_node **newn;
+ int rc;
+ u64 fsblk = rgblk + rgd->rd_data0;
+
+ spin_lock(&rgd->rd_rsspin);
+ newn = &rgd->rd_rstree.rb_node;
+ while (*newn) {
+ struct gfs2_blkreserv *cur =
+ rb_entry(*newn, struct gfs2_blkreserv, rs_node);
+ rc = rs_cmp(fsblk, 1, cur);
+ if (rc < 0)
+ newn = &((*newn)->rb_left);
+ else if (rc > 0)
+ newn = &((*newn)->rb_right);
+ else {
+ spin_unlock(&rgd->rd_rsspin);
+ return cur;
+ }
+ }
+ spin_unlock(&rgd->rd_rsspin);
+ return NULL;
+}
+
+/**
* gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
* a block in a given allocation state.
* @buf: the buffer that holds the bitmaps
@@ -417,6 +471,137 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
}
}
+/**
+ * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
+ * @ip: the inode for this reservation
+ */
+int gfs2_rs_alloc(struct gfs2_inode *ip)
+{
+ int error = 0;
+ struct gfs2_blkreserv *res;
+
+ if (ip->i_res)
+ return 0;
+
+ res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
+ if (!res)
+ error = -ENOMEM;
+
+ down_write(&ip->i_rw_mutex);
+ if (ip->i_res)
+ kmem_cache_free(gfs2_rsrv_cachep, res);
+ else
+ ip->i_res = res;
+ up_write(&ip->i_rw_mutex);
+ return error;
+}
+
+static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
+{
+ gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
+ rs->rs_rgd->rd_addr, gfs2_rs_startblk(rs), rs->rs_biblk,
+ rs->rs_free);
+}
+
+/**
+ * __rs_deltree - remove a multi-block reservation from the rgd tree
+ * @rs: The reservation to remove
+ *
+ */
+static void __rs_deltree(struct gfs2_blkreserv *rs)
+{
+ struct gfs2_rgrpd *rgd;
+
+ if (!gfs2_rs_active(rs))
+ return;
+
+ rgd = rs->rs_rgd;
+ /* We can't do this: The reason is that when the rgrp is invalidated,
+ it's in the "middle" of acquiring the glock, but the HOLDER bit
+ isn't set yet:
+ BUG_ON(!gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl));*/
+ trace_gfs2_rs(NULL, rs, TRACE_RS_TREEDEL);
+
+ if (!RB_EMPTY_ROOT(&rgd->rd_rstree))
+ rb_erase(&rs->rs_node, &rgd->rd_rstree);
+ BUG_ON(!rgd->rd_rs_cnt);
+ rgd->rd_rs_cnt--;
+
+ if (rs->rs_free) {
+ /* return reserved blocks to the rgrp and the ip */
+ BUG_ON(rs->rs_rgd->rd_reserved < rs->rs_free);
+ rs->rs_rgd->rd_reserved -= rs->rs_free;
+ rs->rs_free = 0;
+ clear_bit(GBF_FULL, &rs->rs_bi->bi_flags);
+ smp_mb__after_clear_bit();
+ }
+ /* We can't change any of the step 1 or step 2 components of the rs.
+ E.g. We can't set rs_rgd to NULL because the rgd glock is held and
+ dequeued through this pointer.
+ Can't: atomic_set(&rs->rs_sizehint, 0);
+ Can't: rs->rs_requested = 0;
+ Can't: rs->rs_rgd = NULL;*/
+ rs->rs_bi = NULL;
+ rs->rs_biblk = 0;
+}
+
+/**
+ * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
+ * @rs: The reservation to remove
+ *
+ */
+void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
+{
+ struct gfs2_rgrpd *rgd;
+
+ if (!gfs2_rs_active(rs))
+ return;
+
+ rgd = rs->rs_rgd;
+ spin_lock(&rgd->rd_rsspin);
+ __rs_deltree(rs);
+ spin_unlock(&rgd->rd_rsspin);
+}
+
+/**
+ * gfs2_rs_delete - delete a multi-block reservation
+ * @ip: The inode for this reservation
+ *
+ */
+void gfs2_rs_delete(struct gfs2_inode *ip)
+{
+ down_write(&ip->i_rw_mutex);
+ if (ip->i_res) {
+ gfs2_rs_deltree(ip->i_res);
+ trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
+ BUG_ON(ip->i_res->rs_free);
+ kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
+ ip->i_res = NULL;
+ }
+ up_write(&ip->i_rw_mutex);
+}
+
+/**
+ * return_all_reservations - return all reserved blocks back to the rgrp.
+ * @rgd: the rgrp that needs its space back
+ *
+ * We previously reserved a bunch of blocks for allocation. Now we need to
+ * give them back. This leave the reservation structures in tact, but removes
+ * all of their corresponding "no-fly zones".
+ */
+static void return_all_reservations(struct gfs2_rgrpd *rgd)
+{
+ struct rb_node *n;
+ struct gfs2_blkreserv *rs;
+
+ spin_lock(&rgd->rd_rsspin);
+ while ((n = rb_first(&rgd->rd_rstree))) {
+ rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
+ __rs_deltree(rs);
+ }
+ spin_unlock(&rgd->rd_rsspin);
+}
+
void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
{
struct rb_node *n;
@@ -439,6 +624,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
gfs2_free_clones(rgd);
kfree(rgd->rd_bits);
+ return_all_reservations(rgd);
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
}
}
@@ -616,6 +802,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
rgd->rd_data = be32_to_cpu(buf.ri_data);
rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
+ spin_lock_init(&rgd->rd_rsspin);
error = compute_bitstructs(rgd);
if (error)
@@ -627,6 +814,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_gl->gl_object = rgd;
+ rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
if (rgd->rd_data > sdp->sd_max_rg_data)
sdp->sd_max_rg_data = rgd->rd_data;
@@ -736,9 +924,65 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
}
+static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
+{
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+ struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
+
+ if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
+ rgl->rl_dinodes != str->rg_dinodes ||
+ rgl->rl_igeneration != str->rg_igeneration)
+ return 0;
+ return 1;
+}
+
+static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
+{
+ const struct gfs2_rgrp *str = buf;
+
+ rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
+ rgl->rl_flags = str->rg_flags;
+ rgl->rl_free = str->rg_free;
+ rgl->rl_dinodes = str->rg_dinodes;
+ rgl->rl_igeneration = str->rg_igeneration;
+ rgl->__pad = 0UL;
+}
+
+static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
+{
+ struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
+ u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
+ rgl->rl_unlinked = cpu_to_be32(unlinked);
+}
+
+static u32 count_unlinked(struct gfs2_rgrpd *rgd)
+{
+ struct gfs2_bitmap *bi;
+ const u32 length = rgd->rd_length;
+ const u8 *buffer = NULL;
+ u32 i, goal, count = 0;
+
+ for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
+ goal = 0;
+ buffer = bi->bi_bh->b_data + bi->bi_offset;
+ WARN_ON(!buffer_uptodate(bi->bi_bh));
+ while (goal < bi->bi_len * GFS2_NBBY) {
+ goal = gfs2_bitfit(buffer, bi->bi_len, goal,
+ GFS2_BLKST_UNLINKED);
+ if (goal == BFITNOENT)
+ break;
+ count++;
+ goal++;
+ }
+ }
+
+ return count;
+}
+
+
/**
- * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps
- * @gh: The glock holder for the resource group
+ * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
+ * @rgd: the struct gfs2_rgrpd describing the RG to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
* Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
@@ -746,9 +990,8 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
* Returns: errno
*/
-int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
+int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
{
- struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_glock *gl = rgd->rd_gl;
unsigned int length = rgd->rd_length;
@@ -756,6 +999,9 @@ int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
unsigned int x, y;
int error;
+ if (rgd->rd_bits[0].bi_bh != NULL)
+ return 0;
+
for (x = 0; x < length; x++) {
bi = rgd->rd_bits + x;
error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
@@ -782,7 +1028,20 @@ int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
}
-
+ if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
+ rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
+ rgd->rd_bits[0].bi_bh->b_data);
+ }
+ else if (sdp->sd_args.ar_rgrplvb) {
+ if (!gfs2_rgrp_lvb_valid(rgd)){
+ gfs2_consist_rgrpd(rgd);
+ error = -EIO;
+ goto fail;
+ }
+ if (rgd->rd_rgl->rl_unlinked == 0)
+ rgd->rd_flags &= ~GFS2_RDF_CHECK;
+ }
return 0;
fail:
@@ -796,6 +1055,39 @@ fail:
return error;
}
+int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
+{
+ u32 rl_flags;
+
+ if (rgd->rd_flags & GFS2_RDF_UPTODATE)
+ return 0;
+
+ if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
+ return gfs2_rgrp_bh_get(rgd);
+
+ rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
+ rl_flags &= ~GFS2_RDF_MASK;
+ rgd->rd_flags &= GFS2_RDF_MASK;
+ rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
+ if (rgd->rd_rgl->rl_unlinked == 0)
+ rgd->rd_flags &= ~GFS2_RDF_CHECK;
+ rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
+ rgd->rd_free_clone = rgd->rd_free;
+ rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
+ rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
+ return 0;
+}
+
+int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
+{
+ struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
+ struct gfs2_sbd *sdp = rgd->rd_sbd;
+
+ if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
+ return 0;
+ return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
+}
+
/**
* gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
* @gh: The glock holder for the resource group
@@ -809,8 +1101,10 @@ void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
for (x = 0; x < length; x++) {
struct gfs2_bitmap *bi = rgd->rd_bits + x;
- brelse(bi->bi_bh);
- bi->bi_bh = NULL;
+ if (bi->bi_bh) {
+ brelse(bi->bi_bh);
+ bi->bi_bh = NULL;
+ }
}
}
@@ -954,6 +1248,7 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
rgd->rd_flags |= GFS2_RGF_TRIMMED;
gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
gfs2_rgrp_out(rgd, bh->b_data);
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
gfs2_trans_end(sdp);
}
}
@@ -974,38 +1269,184 @@ out:
}
/**
- * gfs2_qadata_get - get the struct gfs2_qadata structure for an inode
- * @ip: the incore GFS2 inode structure
+ * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
+ * @bi: the bitmap with the blocks
+ * @ip: the inode structure
+ * @biblk: the 32-bit block number relative to the start of the bitmap
+ * @amount: the number of blocks to reserve
*
- * Returns: the struct gfs2_qadata
+ * Returns: NULL - reservation was already taken, so not inserted
+ * pointer to the inserted reservation
*/
+static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
+ struct gfs2_inode *ip, u32 biblk,
+ int amount)
+{
+ struct rb_node **newn, *parent = NULL;
+ int rc;
+ struct gfs2_blkreserv *rs = ip->i_res;
+ struct gfs2_rgrpd *rgd = rs->rs_rgd;
+ u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
-struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip)
+ spin_lock(&rgd->rd_rsspin);
+ newn = &rgd->rd_rstree.rb_node;
+ BUG_ON(!ip->i_res);
+ BUG_ON(gfs2_rs_active(rs));
+ /* Figure out where to put new node */
+ /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
+ while (*newn) {
+ struct gfs2_blkreserv *cur =
+ rb_entry(*newn, struct gfs2_blkreserv, rs_node);
+
+ parent = *newn;
+ rc = rs_cmp(fsblock, amount, cur);
+ if (rc > 0)
+ newn = &((*newn)->rb_right);
+ else if (rc < 0)
+ newn = &((*newn)->rb_left);
+ else {
+ spin_unlock(&rgd->rd_rsspin);
+ return NULL; /* reservation already in use */
+ }
+ }
+
+ /* Do our reservation work */
+ rs = ip->i_res;
+ rs->rs_free = amount;
+ rs->rs_biblk = biblk;
+ rs->rs_bi = bi;
+ rb_link_node(&rs->rs_node, parent, newn);
+ rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
+
+ /* Do our inode accounting for the reservation */
+ /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
+
+ /* Do our rgrp accounting for the reservation */
+ rgd->rd_reserved += amount; /* blocks reserved */
+ rgd->rd_rs_cnt++; /* number of in-tree reservations */
+ spin_unlock(&rgd->rd_rsspin);
+ trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
+ return rs;
+}
+
+/**
+ * unclaimed_blocks - return number of blocks that aren't spoken for
+ */
+static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- int error;
- BUG_ON(ip->i_qadata != NULL);
- ip->i_qadata = kzalloc(sizeof(struct gfs2_qadata), GFP_NOFS);
- error = gfs2_rindex_update(sdp);
- if (error)
- fs_warn(sdp, "rindex update returns %d\n", error);
- return ip->i_qadata;
+ return rgd->rd_free_clone - rgd->rd_reserved;
}
/**
- * gfs2_blkrsv_get - get the struct gfs2_blkreserv structure for an inode
- * @ip: the incore GFS2 inode structure
+ * rg_mblk_search - find a group of multiple free blocks
+ * @rgd: the resource group descriptor
+ * @rs: the block reservation
+ * @ip: pointer to the inode for which we're reserving blocks
*
- * Returns: the struct gfs2_qadata
+ * This is very similar to rgblk_search, except we're looking for whole
+ * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
+ * on aligned dwords for speed's sake.
+ *
+ * Returns: 0 if successful or BFITNOENT if there isn't enough free space
*/
-static int gfs2_blkrsv_get(struct gfs2_inode *ip)
+static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
- BUG_ON(ip->i_res != NULL);
- ip->i_res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
- if (!ip->i_res)
- return -ENOMEM;
- return 0;
+ struct gfs2_bitmap *bi = rgd->rd_bits;
+ const u32 length = rgd->rd_length;
+ u32 blk;
+ unsigned int buf, x, search_bytes;
+ u8 *buffer = NULL;
+ u8 *ptr, *end, *nonzero;
+ u32 goal, rsv_bytes;
+ struct gfs2_blkreserv *rs;
+ u32 best_rs_bytes, unclaimed;
+ int best_rs_blocks;
+
+ /* Find bitmap block that contains bits for goal block */
+ if (rgrp_contains_block(rgd, ip->i_goal))
+ goal = ip->i_goal - rgd->rd_data0;
+ else
+ goal = rgd->rd_last_alloc;
+ for (buf = 0; buf < length; buf++) {
+ bi = rgd->rd_bits + buf;
+ /* Convert scope of "goal" from rgrp-wide to within
+ found bit block */
+ if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
+ goal -= bi->bi_start * GFS2_NBBY;
+ goto do_search;
+ }
+ }
+ buf = 0;
+ goal = 0;
+
+do_search:
+ best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
+ (RGRP_RSRV_MINBLKS * rgd->rd_length));
+ best_rs_bytes = (best_rs_blocks *
+ (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
+ GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
+ best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
+ unclaimed = unclaimed_blocks(rgd);
+ if (best_rs_bytes * GFS2_NBBY > unclaimed)
+ best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
+
+ for (x = 0; x <= length; x++) {
+ bi = rgd->rd_bits + buf;
+
+ if (test_bit(GBF_FULL, &bi->bi_flags))
+ goto skip;
+
+ WARN_ON(!buffer_uptodate(bi->bi_bh));
+ if (bi->bi_clone)
+ buffer = bi->bi_clone + bi->bi_offset;
+ else
+ buffer = bi->bi_bh->b_data + bi->bi_offset;
+
+ /* We have to keep the reservations aligned on u64 boundaries
+ otherwise we could get situations where a byte can't be
+ used because it's after a reservation, but a free bit still
+ is within the reservation's area. */
+ ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
+ end = (buffer + bi->bi_len);
+ while (ptr < end) {
+ rsv_bytes = 0;
+ if ((ptr + best_rs_bytes) <= end)
+ search_bytes = best_rs_bytes;
+ else
+ search_bytes = end - ptr;
+ BUG_ON(!search_bytes);
+ nonzero = memchr_inv(ptr, 0, search_bytes);
+ /* If the lot is all zeroes, reserve the whole size. If
+ there's enough zeroes to satisfy the request, use
+ what we can. If there's not enough, keep looking. */
+ if (nonzero == NULL)
+ rsv_bytes = search_bytes;
+ else if ((nonzero - ptr) * GFS2_NBBY >=
+ ip->i_res->rs_requested)
+ rsv_bytes = (nonzero - ptr);
+
+ if (rsv_bytes) {
+ blk = ((ptr - buffer) * GFS2_NBBY);
+ BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
+ rs = rs_insert(bi, ip, blk,
+ rsv_bytes * GFS2_NBBY);
+ if (IS_ERR(rs))
+ return PTR_ERR(rs);
+ if (rs)
+ return 0;
+ }
+ ptr += ALIGN(search_bytes, sizeof(u64));
+ }
+skip:
+ /* Try next bitmap block (wrap back to rgrp header
+ if at end) */
+ buf++;
+ buf %= length;
+ goal = 0;
+ }
+
+ return BFITNOENT;
}
/**
@@ -1014,24 +1455,26 @@ static int gfs2_blkrsv_get(struct gfs2_inode *ip)
* @ip: the inode
*
* If there's room for the requested blocks to be allocated from the RG:
+ * This will try to get a multi-block reservation first, and if that doesn't
+ * fit, it will take what it can.
*
* Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
*/
-static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip)
+static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
{
- const struct gfs2_blkreserv *rs = ip->i_res;
+ struct gfs2_blkreserv *rs = ip->i_res;
if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
return 0;
- if (rgd->rd_free_clone >= rs->rs_requested)
+ /* Look for a multi-block reservation. */
+ if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
+ rg_mblk_search(rgd, ip) != BFITNOENT)
+ return 1;
+ if (unclaimed_blocks(rgd) >= rs->rs_requested)
return 1;
- return 0;
-}
-static inline u32 gfs2_bi2rgd_blk(struct gfs2_bitmap *bi, u32 blk)
-{
- return (bi->bi_start * GFS2_NBBY) + blk;
+ return 0;
}
/**
@@ -1101,119 +1544,120 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
}
/**
- * get_local_rgrp - Choose and lock a rgrp for allocation
+ * gfs2_inplace_reserve - Reserve space in the filesystem
* @ip: the inode to reserve space for
- * @last_unlinked: the last unlinked block
- *
- * Try to acquire rgrp in way which avoids contending with others.
+ * @requested: the number of blocks to be reserved
*
* Returns: errno
*/
-static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
+int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd, *begin = NULL;
+ struct gfs2_rgrpd *begin = NULL;
struct gfs2_blkreserv *rs = ip->i_res;
- int error, rg_locked, flags = LM_FLAG_TRY;
+ int error = 0, rg_locked, flags = LM_FLAG_TRY;
+ u64 last_unlinked = NO_BLOCK;
int loops = 0;
- if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal))
- rgd = begin = ip->i_rgd;
- else
- rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
-
- if (rgd == NULL)
+ if (sdp->sd_args.ar_rgrplvb)
+ flags |= GL_SKIP;
+ rs->rs_requested = requested;
+ if (gfs2_assert_warn(sdp, requested)) {
+ error = -EINVAL;
+ goto out;
+ }
+ if (gfs2_rs_active(rs)) {
+ begin = rs->rs_rgd;
+ flags = 0; /* Yoda: Do or do not. There is no try */
+ } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
+ rs->rs_rgd = begin = ip->i_rgd;
+ } else {
+ rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
+ }
+ if (rs->rs_rgd == NULL)
return -EBADSLT;
while (loops < 3) {
rg_locked = 0;
- if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
+ if (gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl)) {
rg_locked = 1;
error = 0;
+ } else if (!loops && !gfs2_rs_active(rs) &&
+ rs->rs_rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
+ /* If the rgrp already is maxed out for contenders,
+ we can eliminate it as a "first pass" without even
+ requesting the rgrp glock. */
+ error = GLR_TRYFAILED;
} else {
- error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
- flags, &rs->rs_rgd_gh);
+ error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl,
+ LM_ST_EXCLUSIVE, flags,
+ &rs->rs_rgd_gh);
+ if (!error && sdp->sd_args.ar_rgrplvb) {
+ error = update_rgrp_lvb(rs->rs_rgd);
+ if (error) {
+ gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
+ return error;
+ }
+ }
}
switch (error) {
case 0:
- if (try_rgrp_fit(rgd, ip)) {
- ip->i_rgd = rgd;
+ if (gfs2_rs_active(rs)) {
+ if (unclaimed_blocks(rs->rs_rgd) +
+ rs->rs_free >= rs->rs_requested) {
+ ip->i_rgd = rs->rs_rgd;
+ return 0;
+ }
+ /* We have a multi-block reservation, but the
+ rgrp doesn't have enough free blocks to
+ satisfy the request. Free the reservation
+ and look for a suitable rgrp. */
+ gfs2_rs_deltree(rs);
+ }
+ if (try_rgrp_fit(rs->rs_rgd, ip)) {
+ if (sdp->sd_args.ar_rgrplvb)
+ gfs2_rgrp_bh_get(rs->rs_rgd);
+ ip->i_rgd = rs->rs_rgd;
return 0;
}
- if (rgd->rd_flags & GFS2_RDF_CHECK)
- try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr);
+ if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) {
+ if (sdp->sd_args.ar_rgrplvb)
+ gfs2_rgrp_bh_get(rs->rs_rgd);
+ try_rgrp_unlink(rs->rs_rgd, &last_unlinked,
+ ip->i_no_addr);
+ }
if (!rg_locked)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
/* fall through */
case GLR_TRYFAILED:
- rgd = gfs2_rgrpd_get_next(rgd);
- if (rgd == begin) {
- flags = 0;
- loops++;
- }
+ rs->rs_rgd = gfs2_rgrpd_get_next(rs->rs_rgd);
+ rs->rs_rgd = rs->rs_rgd ? : begin; /* if NULL, wrap */
+ if (rs->rs_rgd != begin) /* If we didn't wrap */
+ break;
+
+ flags &= ~LM_FLAG_TRY;
+ loops++;
+ /* Check that fs hasn't grown if writing to rindex */
+ if (ip == GFS2_I(sdp->sd_rindex) &&
+ !sdp->sd_rindex_uptodate) {
+ error = gfs2_ri_update(ip);
+ if (error)
+ goto out;
+ } else if (loops == 2)
+ /* Flushing the log may release space */
+ gfs2_log_flush(sdp, NULL);
break;
default:
- return error;
+ goto out;
}
}
-
- return -ENOSPC;
-}
-
-static void gfs2_blkrsv_put(struct gfs2_inode *ip)
-{
- BUG_ON(ip->i_res == NULL);
- kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
- ip->i_res = NULL;
-}
-
-/**
- * gfs2_inplace_reserve - Reserve space in the filesystem
- * @ip: the inode to reserve space for
- * @requested: the number of blocks to be reserved
- *
- * Returns: errno
- */
-
-int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_blkreserv *rs;
- int error;
- u64 last_unlinked = NO_BLOCK;
- int tries = 0;
-
- error = gfs2_blkrsv_get(ip);
- if (error)
- return error;
-
- rs = ip->i_res;
- rs->rs_requested = requested;
- if (gfs2_assert_warn(sdp, requested)) {
- error = -EINVAL;
- goto out;
- }
-
- do {
- error = get_local_rgrp(ip, &last_unlinked);
- if (error != -ENOSPC)
- break;
- /* Check that fs hasn't grown if writing to rindex */
- if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) {
- error = gfs2_ri_update(ip);
- if (error)
- break;
- continue;
- }
- /* Flushing the log may release space */
- gfs2_log_flush(sdp, NULL);
- } while (tries++ < 3);
+ error = -ENOSPC;
out:
if (error)
- gfs2_blkrsv_put(ip);
+ rs->rs_requested = 0;
return error;
}
@@ -1228,9 +1672,15 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
{
struct gfs2_blkreserv *rs = ip->i_res;
+ if (!rs)
+ return;
+
+ if (!rs->rs_free)
+ gfs2_rs_deltree(rs);
+
if (rs->rs_rgd_gh.gh_gl)
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
- gfs2_blkrsv_put(ip);
+ rs->rs_requested = 0;
}
/**
@@ -1326,7 +1776,27 @@ do_search:
if (state != GFS2_BLKST_UNLINKED && bi->bi_clone)
buffer = bi->bi_clone + bi->bi_offset;
- biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
+ while (1) {
+ struct gfs2_blkreserv *rs;
+ u32 rgblk;
+
+ biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state);
+ if (biblk == BFITNOENT)
+ break;
+ /* Check if this block is reserved() */
+ rgblk = gfs2_bi2rgd_blk(bi, biblk);
+ rs = rs_find(rgd, rgblk);
+ if (rs == NULL)
+ break;
+
+ BUG_ON(rs->rs_bi != bi);
+ biblk = BFITNOENT;
+ /* This should jump to the first block after the
+ reservation. */
+ goal = rs->rs_biblk + rs->rs_free;
+ if (goal >= bi->bi_len * GFS2_NBBY)
+ break;
+ }
if (biblk != BFITNOENT)
break;
@@ -1362,8 +1832,9 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
u32 blk, bool dinode, unsigned int *n)
{
const unsigned int elen = *n;
- u32 goal;
+ u32 goal, rgblk;
const u8 *buffer = NULL;
+ struct gfs2_blkreserv *rs;
*n = 0;
buffer = bi->bi_bh->b_data + bi->bi_offset;
@@ -1376,6 +1847,10 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi,
goal++;
if (goal >= (bi->bi_len * GFS2_NBBY))
break;
+ rgblk = gfs2_bi2rgd_blk(bi, goal);
+ rs = rs_find(rgd, rgblk);
+ if (rs) /* Oops, we bumped into someone's reservation */
+ break;
if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
GFS2_BLKST_FREE)
break;
@@ -1451,12 +1926,22 @@ static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
{
- const struct gfs2_rgrpd *rgd = gl->gl_object;
+ struct gfs2_rgrpd *rgd = gl->gl_object;
+ struct gfs2_blkreserv *trs;
+ const struct rb_node *n;
+
if (rgd == NULL)
return 0;
- gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n",
+ gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
(unsigned long long)rgd->rd_addr, rgd->rd_flags,
- rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes);
+ rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
+ rgd->rd_reserved);
+ spin_lock(&rgd->rd_rsspin);
+ for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
+ trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
+ dump_rs(seq, trs);
+ }
+ spin_unlock(&rgd->rd_rsspin);
return 0;
}
@@ -1471,10 +1956,63 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
}
/**
+ * claim_reserved_blks - Claim previously reserved blocks
+ * @ip: the inode that's claiming the reservation
+ * @dinode: 1 if this block is a dinode block, otherwise data block
+ * @nblocks: desired extent length
+ *
+ * Lay claim to previously allocated block reservation blocks.
+ * Returns: Starting block number of the blocks claimed.
+ * Sets *nblocks to the actual extent length allocated.
+ */
+static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode,
+ unsigned int *nblocks)
+{
+ struct gfs2_blkreserv *rs = ip->i_res;
+ struct gfs2_rgrpd *rgd = rs->rs_rgd;
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
+ struct gfs2_bitmap *bi;
+ u64 start_block = gfs2_rs_startblk(rs);
+ const unsigned int elen = *nblocks;
+
+ /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/
+ gfs2_assert_withdraw(sdp, rgd);
+ /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
+ bi = rs->rs_bi;
+ gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
+
+ for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) {
+ /* Make sure the bitmap hasn't changed */
+ gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_biblk,
+ dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
+ rs->rs_biblk++;
+ rs->rs_free--;
+
+ BUG_ON(!rgd->rd_reserved);
+ rgd->rd_reserved--;
+ dinode = false;
+ trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
+ }
+
+ if (!rs->rs_free) {
+ struct gfs2_rgrpd *rgd = ip->i_res->rs_rgd;
+
+ gfs2_rs_deltree(rs);
+ /* -nblocks because we haven't returned to do the math yet.
+ I'm doing the math backwards to prevent negative numbers,
+ but think of it as:
+ if (unclaimed_blocks(rgd) - *nblocks >= RGRP_RSRV_MINBLKS */
+ if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS + *nblocks)
+ rg_mblk_search(rgd, ip);
+ }
+ return start_block;
+}
+
+/**
* gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
* @ip: the inode to allocate the block for
* @bn: Used to return the starting block number
- * @ndata: requested number of blocks/extent length (value/result)
+ * @nblocks: requested number of blocks/extent length (value/result)
* @dinode: 1 if we're allocating a dinode block, else 0
* @generation: the generation number of the inode
*
@@ -1496,23 +2034,37 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
/* Only happens if there is a bug in gfs2, return something distinctive
* to ensure that it is noticed.
*/
- if (ip->i_res == NULL)
+ if (ip->i_res->rs_requested == 0)
return -ECANCELED;
- rgd = ip->i_rgd;
-
- if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
- goal = ip->i_goal - rgd->rd_data0;
- else
- goal = rgd->rd_last_alloc;
-
- blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
+ /* Check if we have a multi-block reservation, and if so, claim the
+ next free block from it. */
+ if (gfs2_rs_active(ip->i_res)) {
+ BUG_ON(!ip->i_res->rs_free);
+ rgd = ip->i_res->rs_rgd;
+ block = claim_reserved_blks(ip, dinode, nblocks);
+ } else {
+ rgd = ip->i_rgd;
- /* Since all blocks are reserved in advance, this shouldn't happen */
- if (blk == BFITNOENT)
- goto rgrp_error;
+ if (!dinode && rgrp_contains_block(rgd, ip->i_goal))
+ goal = ip->i_goal - rgd->rd_data0;
+ else
+ goal = rgd->rd_last_alloc;
+
+ blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi);
+
+ /* Since all blocks are reserved in advance, this shouldn't
+ happen */
+ if (blk == BFITNOENT) {
+ printk(KERN_WARNING "BFITNOENT, nblocks=%u\n",
+ *nblocks);
+ printk(KERN_WARNING "FULL=%d\n",
+ test_bit(GBF_FULL, &rgd->rd_bits->bi_flags));
+ goto rgrp_error;
+ }
- block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+ block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks);
+ }
ndata = *nblocks;
if (dinode)
ndata--;
@@ -1529,8 +2081,10 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
brelse(dibh);
}
}
- if (rgd->rd_free < *nblocks)
+ if (rgd->rd_free < *nblocks) {
+ printk(KERN_WARNING "nblocks=%u\n", *nblocks);
goto rgrp_error;
+ }
rgd->rd_free -= *nblocks;
if (dinode) {
@@ -1542,6 +2096,7 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
if (dinode)
@@ -1588,6 +2143,7 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
/* Directories keep their data in the metadata address space */
if (meta || ip->i_depth)
@@ -1624,6 +2180,8 @@ void gfs2_unlink_di(struct inode *inode)
trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
+ update_rgrp_lvb_unlinked(rgd, 1);
}
static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
@@ -1643,6 +2201,8 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+ gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
+ update_rgrp_lvb_unlinked(rgd, -1);
gfs2_statfs_change(sdp, 0, +1, -1);
}
@@ -1784,6 +2344,7 @@ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_uninit(&rlist->rl_ghs[x]);
kfree(rlist->rl_ghs);
+ rlist->rl_ghs = NULL;
}
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index b4b10f4de25f..ca6e26729b86 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -13,6 +13,14 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
+/* Since each block in the file system is represented by two bits in the
+ * bitmap, one 64-bit word in the bitmap will represent 32 blocks.
+ * By reserving 32 blocks at a time, we can optimize / shortcut how we search
+ * through the bitmaps by looking a word at a time.
+ */
+#define RGRP_RSRV_MINBYTES 8
+#define RGRP_RSRV_MINBLKS ((u32)(RGRP_RSRV_MINBYTES * GFS2_NBBY))
+
struct gfs2_rgrpd;
struct gfs2_sbd;
struct gfs2_holder;
@@ -29,13 +37,7 @@ extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
-extern struct gfs2_qadata *gfs2_qadata_get(struct gfs2_inode *ip);
-static inline void gfs2_qadata_put(struct gfs2_inode *ip)
-{
- BUG_ON(ip->i_qadata == NULL);
- kfree(ip->i_qadata);
- ip->i_qadata = NULL;
-}
+extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
extern int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested);
extern void gfs2_inplace_release(struct gfs2_inode *ip);
@@ -43,6 +45,9 @@ extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
bool dinode, u64 *generation);
+extern int gfs2_rs_alloc(struct gfs2_inode *ip);
+extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
+extern void gfs2_rs_delete(struct gfs2_inode *ip);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
@@ -68,4 +73,30 @@ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
extern int gfs2_fitrim(struct file *filp, void __user *argp);
+/* This is how to tell if a multi-block reservation is "inplace" reserved: */
+static inline int gfs2_mb_reserved(struct gfs2_inode *ip)
+{
+ if (ip->i_res && ip->i_res->rs_requested)
+ return 1;
+ return 0;
+}
+
+/* This is how to tell if a multi-block reservation is in the rgrp tree: */
+static inline int gfs2_rs_active(struct gfs2_blkreserv *rs)
+{
+ if (rs && rs->rs_bi)
+ return 1;
+ return 0;
+}
+
+static inline u32 gfs2_bi2rgd_blk(const struct gfs2_bitmap *bi, u32 blk)
+{
+ return (bi->bi_start * GFS2_NBBY) + blk;
+}
+
+static inline u64 gfs2_rs_startblk(const struct gfs2_blkreserv *rs)
+{
+ return gfs2_bi2rgd_blk(rs->rs_bi, rs->rs_biblk) + rs->rs_rgd->rd_data0;
+}
+
#endif /* __RGRP_DOT_H__ */
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 713e621c240b..fc3168f47a14 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -78,6 +78,8 @@ enum {
Opt_quota_quantum,
Opt_barrier,
Opt_nobarrier,
+ Opt_rgrplvb,
+ Opt_norgrplvb,
Opt_error,
};
@@ -115,6 +117,8 @@ static const match_table_t tokens = {
{Opt_quota_quantum, "quota_quantum=%d"},
{Opt_barrier, "barrier"},
{Opt_nobarrier, "nobarrier"},
+ {Opt_rgrplvb, "rgrplvb"},
+ {Opt_norgrplvb, "norgrplvb"},
{Opt_error, NULL}
};
@@ -267,6 +271,12 @@ int gfs2_mount_args(struct gfs2_args *args, char *options)
case Opt_nobarrier:
args->ar_nobarrier = 1;
break;
+ case Opt_rgrplvb:
+ args->ar_rgrplvb = 1;
+ break;
+ case Opt_norgrplvb:
+ args->ar_rgrplvb = 0;
+ break;
case Opt_error:
default:
printk(KERN_WARNING "GFS2: invalid mount option: %s\n", o);
@@ -838,7 +848,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
int error;
flush_workqueue(gfs2_delete_workqueue);
- gfs2_quota_sync(sdp->sd_vfs, 0, 1);
+ gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, GL_NOCACHE,
@@ -952,6 +962,8 @@ restart:
static int gfs2_sync_fs(struct super_block *sb, int wait)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
+
+ gfs2_quota_sync(sb, -1);
if (wait && sdp)
gfs2_log_flush(sdp, NULL);
return 0;
@@ -1379,6 +1391,8 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
seq_printf(s, ",nobarrier");
if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
seq_printf(s, ",demote_interface_used");
+ if (args->ar_rgrplvb)
+ seq_printf(s, ",rgrplvb");
return 0;
}
@@ -1399,7 +1413,6 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_qadata *qa;
struct gfs2_rgrpd *rgd;
struct gfs2_holder gh;
int error;
@@ -1409,13 +1422,13 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return -EIO;
}
- qa = gfs2_qadata_get(ip);
- if (!qa)
- return -ENOMEM;
+ error = gfs2_rindex_update(sdp);
+ if (error)
+ return error;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
- goto out;
+ return error;
rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
if (!rgd) {
@@ -1443,8 +1456,6 @@ out_rg_gunlock:
gfs2_glock_dq_uninit(&gh);
out_qs:
gfs2_quota_unhold(ip);
-out:
- gfs2_qadata_put(ip);
return error;
}
@@ -1545,6 +1556,9 @@ out_truncate:
out_unlock:
/* Error path for case 1 */
+ if (gfs2_rs_active(ip->i_res))
+ gfs2_rs_deltree(ip->i_res);
+
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags))
gfs2_glock_dq(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
@@ -1554,6 +1568,7 @@ out_unlock:
out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
+ gfs2_rs_delete(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL;
@@ -1576,6 +1591,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
ip->i_flags = 0;
ip->i_gl = NULL;
ip->i_rgd = NULL;
+ ip->i_res = NULL;
}
return &ip->i_inode;
}
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 9c2592b1d5ff..8056b7b7238e 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -168,7 +168,7 @@ static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
if (simple_strtol(buf, NULL, 0) != 1)
return -EINVAL;
- gfs2_quota_sync(sdp->sd_vfs, 0, 1);
+ gfs2_quota_sync(sdp->sd_vfs, 0);
return len;
}
@@ -276,7 +276,15 @@ static struct attribute *gfs2_attrs[] = {
NULL,
};
+static void gfs2_sbd_release(struct kobject *kobj)
+{
+ struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
+
+ kfree(sdp);
+}
+
static struct kobj_type gfs2_ktype = {
+ .release = gfs2_sbd_release,
.default_attrs = gfs2_attrs,
.sysfs_ops = &gfs2_attr_ops,
};
@@ -583,6 +591,7 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
char ro[20];
char spectator[20];
char *envp[] = { ro, spectator, NULL };
+ int sysfs_frees_sdp = 0;
sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0);
sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
@@ -591,8 +600,10 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
"%s", sdp->sd_table_name);
if (error)
- goto fail;
+ goto fail_reg;
+ sysfs_frees_sdp = 1; /* Freeing sdp is now done by sysfs calling
+ function gfs2_sbd_release. */
error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
if (error)
goto fail_reg;
@@ -615,9 +626,13 @@ fail_lock_module:
fail_tune:
sysfs_remove_group(&sdp->sd_kobj, &tune_group);
fail_reg:
- kobject_put(&sdp->sd_kobj);
-fail:
+ free_percpu(sdp->sd_lkstats);
fs_err(sdp, "error %d adding sysfs files", error);
+ if (sysfs_frees_sdp)
+ kobject_put(&sdp->sd_kobj);
+ else
+ kfree(sdp);
+ sb->s_fs_info = NULL;
return error;
}
diff --git a/fs/gfs2/trace_gfs2.h b/fs/gfs2/trace_gfs2.h
index 1b8b81588199..a25c252fe412 100644
--- a/fs/gfs2/trace_gfs2.h
+++ b/fs/gfs2/trace_gfs2.h
@@ -14,6 +14,7 @@
#include <linux/ktime.h>
#include "incore.h"
#include "glock.h"
+#include "rgrp.h"
#define dlm_state_name(nn) { DLM_LOCK_##nn, #nn }
#define glock_trace_name(x) __print_symbolic(x, \
@@ -31,6 +32,17 @@
{ GFS2_BLKST_DINODE, "dinode" }, \
{ GFS2_BLKST_UNLINKED, "unlinked" })
+#define TRACE_RS_DELETE 0
+#define TRACE_RS_TREEDEL 1
+#define TRACE_RS_INSERT 2
+#define TRACE_RS_CLAIM 3
+
+#define rs_func_name(x) __print_symbolic(x, \
+ { 0, "del " }, \
+ { 1, "tdel" }, \
+ { 2, "ins " }, \
+ { 3, "clm " })
+
#define show_glock_flags(flags) __print_flags(flags, "", \
{(1UL << GLF_LOCK), "l" }, \
{(1UL << GLF_DEMOTE), "D" }, \
@@ -470,6 +482,7 @@ TRACE_EVENT(gfs2_block_alloc,
__field( u8, block_state )
__field( u64, rd_addr )
__field( u32, rd_free_clone )
+ __field( u32, rd_reserved )
),
TP_fast_assign(
@@ -480,16 +493,58 @@ TRACE_EVENT(gfs2_block_alloc,
__entry->block_state = block_state;
__entry->rd_addr = rgd->rd_addr;
__entry->rd_free_clone = rgd->rd_free_clone;
+ __entry->rd_reserved = rgd->rd_reserved;
),
- TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u",
+ TP_printk("%u,%u bmap %llu alloc %llu/%lu %s rg:%llu rf:%u rr:%lu",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->inum,
(unsigned long long)__entry->start,
(unsigned long)__entry->len,
block_state_name(__entry->block_state),
(unsigned long long)__entry->rd_addr,
- __entry->rd_free_clone)
+ __entry->rd_free_clone, (unsigned long)__entry->rd_reserved)
+);
+
+/* Keep track of multi-block reservations as they are allocated/freed */
+TRACE_EVENT(gfs2_rs,
+
+ TP_PROTO(const struct gfs2_inode *ip, const struct gfs2_blkreserv *rs,
+ u8 func),
+
+ TP_ARGS(ip, rs, func),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( u64, rd_addr )
+ __field( u32, rd_free_clone )
+ __field( u32, rd_reserved )
+ __field( u64, inum )
+ __field( u64, start )
+ __field( u32, free )
+ __field( u8, func )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rs->rs_rgd ? rs->rs_rgd->rd_sbd->sd_vfs->s_dev : 0;
+ __entry->rd_addr = rs->rs_rgd ? rs->rs_rgd->rd_addr : 0;
+ __entry->rd_free_clone = rs->rs_rgd ? rs->rs_rgd->rd_free_clone : 0;
+ __entry->rd_reserved = rs->rs_rgd ? rs->rs_rgd->rd_reserved : 0;
+ __entry->inum = ip ? ip->i_no_addr : 0;
+ __entry->start = gfs2_rs_startblk(rs);
+ __entry->free = rs->rs_free;
+ __entry->func = func;
+ ),
+
+ TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s "
+ "f:%lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->inum,
+ (unsigned long long)__entry->start,
+ (unsigned long long)__entry->rd_addr,
+ (unsigned long)__entry->rd_free_clone,
+ (unsigned long)__entry->rd_reserved,
+ rs_func_name(__entry->func), (unsigned long)__entry->free)
);
#endif /* _TRACE_GFS2_H */
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h
index 125d4572e1c0..41f42cdccbb8 100644
--- a/fs/gfs2/trans.h
+++ b/fs/gfs2/trans.h
@@ -31,7 +31,7 @@ struct gfs2_glock;
static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip)
{
const struct gfs2_blkreserv *rs = ip->i_res;
- if (rs->rs_requested < ip->i_rgd->rd_length)
+ if (rs && rs->rs_requested < ip->i_rgd->rd_length)
return rs->rs_requested + 1;
return ip->i_rgd->rd_length;
}
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 3586b0dd6aa7..80535739ac7b 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -79,23 +79,19 @@ int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *type, const char *function,
char *file, unsigned int line);
-static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
- struct buffer_head *bh,
- const char *function,
- char *file, unsigned int line)
+static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
+ struct buffer_head *bh)
{
struct gfs2_meta_header *mh = (struct gfs2_meta_header *)bh->b_data;
u32 magic = be32_to_cpu(mh->mh_magic);
- if (unlikely(magic != GFS2_MAGIC))
- return gfs2_meta_check_ii(sdp, bh, "magic number", function,
- file, line);
+ if (unlikely(magic != GFS2_MAGIC)) {
+ printk(KERN_ERR "GFS2: Magic number missing at %llu\n",
+ (unsigned long long)bh->b_blocknr);
+ return -EIO;
+ }
return 0;
}
-#define gfs2_meta_check(sdp, bh) \
-gfs2_meta_check_i((sdp), (bh), __func__, __FILE__, __LINE__)
-
-
int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
u16 type, u16 t,
const char *function,
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 927f4df874ae..27a0b4a901f5 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -325,12 +325,11 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
struct gfs2_ea_header *ea,
struct gfs2_ea_header *prev, int leave)
{
- struct gfs2_qadata *qa;
int error;
- qa = gfs2_qadata_get(ip);
- if (!qa)
- return -ENOMEM;
+ error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
+ if (error)
+ return error;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
@@ -340,7 +339,6 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
gfs2_quota_unhold(ip);
out_alloc:
- gfs2_qadata_put(ip);
return error;
}
@@ -713,17 +711,16 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
unsigned int blks,
ea_skeleton_call_t skeleton_call, void *private)
{
- struct gfs2_qadata *qa;
struct buffer_head *dibh;
int error;
- qa = gfs2_qadata_get(ip);
- if (!qa)
- return -ENOMEM;
+ error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
+ if (error)
+ return error;
error = gfs2_quota_lock_check(ip);
if (error)
- goto out;
+ return error;
error = gfs2_inplace_reserve(ip, blks);
if (error)
@@ -753,8 +750,6 @@ out_ipres:
gfs2_inplace_release(ip);
out_gunlock_q:
gfs2_quota_unlock(ip);
-out:
- gfs2_qadata_put(ip);
return error;
}
@@ -1494,16 +1489,15 @@ out_gunlock:
int gfs2_ea_dealloc(struct gfs2_inode *ip)
{
- struct gfs2_qadata *qa;
int error;
- qa = gfs2_qadata_get(ip);
- if (!qa)
- return -ENOMEM;
+ error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
+ if (error)
+ return error;
error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
- goto out_alloc;
+ return error;
error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
if (error)
@@ -1519,8 +1513,6 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip)
out_quota:
gfs2_quota_unhold(ip);
-out_alloc:
- gfs2_qadata_put(ip);
return error;
}
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 62fc14ea4b73..422dde2ec0a1 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -18,7 +18,7 @@
* hfs_lookup()
*/
static struct dentry *hfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
hfs_cat_rec rec;
struct hfs_find_data fd;
@@ -187,7 +187,7 @@ static int hfs_dir_release(struct inode *inode, struct file *file)
* the directory and the name (and its length) of the new file.
*/
static int hfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
int res;
diff --git a/fs/hfs/extent.c b/fs/hfs/extent.c
index 2c16316d2917..a67955a0c36f 100644
--- a/fs/hfs/extent.c
+++ b/fs/hfs/extent.c
@@ -432,7 +432,7 @@ out:
if (inode->i_ino < HFS_FIRSTUSER_CNID)
set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
- sb->s_dirt = 1;
+ hfs_mark_mdb_dirty(sb);
}
return res;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index 1bf967c6bfdc..8275175acf6e 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/uaccess.h>
@@ -137,16 +138,15 @@ struct hfs_sb_info {
gid_t s_gid; /* The gid of all files */
int session, part;
-
struct nls_table *nls_io, *nls_disk;
-
struct mutex bitmap_lock;
-
unsigned long flags;
-
u16 blockoffset;
-
int fs_div;
+ struct super_block *sb;
+ int work_queued; /* non-zero delayed work is queued */
+ struct delayed_work mdb_work; /* MDB flush delayed work */
+ spinlock_t work_lock; /* protects mdb_work and work_queued */
};
#define HFS_FLG_BITMAP_DIRTY 0
@@ -226,6 +226,9 @@ extern int hfs_compare_dentry(const struct dentry *parent,
extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
+/* super.c */
+extern void hfs_mark_mdb_dirty(struct super_block *sb);
+
extern struct timezone sys_tz;
/*
@@ -253,7 +256,7 @@ static inline const char *hfs_mdb_name(struct super_block *sb)
static inline void hfs_bitmap_dirty(struct super_block *sb)
{
set_bit(HFS_FLG_BITMAP_DIRTY, &HFS_SB(sb)->flags);
- sb->s_dirt = 1;
+ hfs_mark_mdb_dirty(sb);
}
#define sb_bread512(sb, sec, data) ({ \
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 761ec06354b4..ee1bc55677f1 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -220,7 +220,7 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
insert_inode_hash(inode);
mark_inode_dirty(inode);
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
- sb->s_dirt = 1;
+ hfs_mark_mdb_dirty(sb);
return inode;
}
@@ -235,7 +235,7 @@ void hfs_delete_inode(struct inode *inode)
if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
HFS_SB(sb)->root_dirs--;
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
- sb->s_dirt = 1;
+ hfs_mark_mdb_dirty(sb);
return;
}
HFS_SB(sb)->file_count--;
@@ -248,7 +248,7 @@ void hfs_delete_inode(struct inode *inode)
}
}
set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
- sb->s_dirt = 1;
+ hfs_mark_mdb_dirty(sb);
}
void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
@@ -489,7 +489,7 @@ out:
}
static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode = NULL;
hfs_cat_rec rec;
@@ -644,13 +644,7 @@ static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
/* sync the superblock to buffers */
sb = inode->i_sb;
- if (sb->s_dirt) {
- lock_super(sb);
- sb->s_dirt = 0;
- if (!(sb->s_flags & MS_RDONLY))
- hfs_mdb_commit(sb);
- unlock_super(sb);
- }
+ flush_delayed_work_sync(&HFS_SB(sb)->mdb_work);
/* .. finally sync the buffers to disk */
err = sync_blockdev(sb->s_bdev);
if (!ret)
diff --git a/fs/hfs/mdb.c b/fs/hfs/mdb.c
index 1563d5ce5764..5fd51a5833ff 100644
--- a/fs/hfs/mdb.c
+++ b/fs/hfs/mdb.c
@@ -260,6 +260,10 @@ void hfs_mdb_commit(struct super_block *sb)
{
struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+ lock_buffer(HFS_SB(sb)->mdb_bh);
if (test_and_clear_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags)) {
/* These parameters may have been modified, so write them back */
mdb->drLsMod = hfs_mtime();
@@ -283,9 +287,13 @@ void hfs_mdb_commit(struct super_block *sb)
&mdb->drXTFlSize, NULL);
hfs_inode_write_fork(HFS_SB(sb)->cat_tree->inode, mdb->drCTExtRec,
&mdb->drCTFlSize, NULL);
+
+ lock_buffer(HFS_SB(sb)->alt_mdb_bh);
memcpy(HFS_SB(sb)->alt_mdb, HFS_SB(sb)->mdb, HFS_SECTOR_SIZE);
HFS_SB(sb)->alt_mdb->drAtrb |= cpu_to_be16(HFS_SB_ATTRIB_UNMNT);
HFS_SB(sb)->alt_mdb->drAtrb &= cpu_to_be16(~HFS_SB_ATTRIB_INCNSTNT);
+ unlock_buffer(HFS_SB(sb)->alt_mdb_bh);
+
mark_buffer_dirty(HFS_SB(sb)->alt_mdb_bh);
sync_dirty_buffer(HFS_SB(sb)->alt_mdb_bh);
}
@@ -308,7 +316,11 @@ void hfs_mdb_commit(struct super_block *sb)
break;
}
len = min((int)sb->s_blocksize - off, size);
+
+ lock_buffer(bh);
memcpy(bh->b_data + off, ptr, len);
+ unlock_buffer(bh);
+
mark_buffer_dirty(bh);
brelse(bh);
block++;
@@ -317,6 +329,7 @@ void hfs_mdb_commit(struct super_block *sb)
size -= len;
}
}
+ unlock_buffer(HFS_SB(sb)->mdb_bh);
}
void hfs_mdb_close(struct super_block *sb)
diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 7b4c537d6e13..4eb873e0c07b 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -29,43 +29,9 @@ static struct kmem_cache *hfs_inode_cachep;
MODULE_LICENSE("GPL");
-/*
- * hfs_write_super()
- *
- * Description:
- * This function is called by the VFS only. When the filesystem
- * is mounted r/w it updates the MDB on disk.
- * Input Variable(s):
- * struct super_block *sb: Pointer to the hfs superblock
- * Output Variable(s):
- * NONE
- * Returns:
- * void
- * Preconditions:
- * 'sb' points to a "valid" (struct super_block).
- * Postconditions:
- * The MDB is marked 'unsuccessfully unmounted' by clearing bit 8 of drAtrb
- * (hfs_put_super() must set this flag!). Some MDB fields are updated
- * and the MDB buffer is written to disk by calling hfs_mdb_commit().
- */
-static void hfs_write_super(struct super_block *sb)
-{
- lock_super(sb);
- sb->s_dirt = 0;
-
- /* sync everything to the buffers */
- if (!(sb->s_flags & MS_RDONLY))
- hfs_mdb_commit(sb);
- unlock_super(sb);
-}
-
static int hfs_sync_fs(struct super_block *sb, int wait)
{
- lock_super(sb);
hfs_mdb_commit(sb);
- sb->s_dirt = 0;
- unlock_super(sb);
-
return 0;
}
@@ -78,13 +44,44 @@ static int hfs_sync_fs(struct super_block *sb, int wait)
*/
static void hfs_put_super(struct super_block *sb)
{
- if (sb->s_dirt)
- hfs_write_super(sb);
+ cancel_delayed_work_sync(&HFS_SB(sb)->mdb_work);
hfs_mdb_close(sb);
/* release the MDB's resources */
hfs_mdb_put(sb);
}
+static void flush_mdb(struct work_struct *work)
+{
+ struct hfs_sb_info *sbi;
+ struct super_block *sb;
+
+ sbi = container_of(work, struct hfs_sb_info, mdb_work.work);
+ sb = sbi->sb;
+
+ spin_lock(&sbi->work_lock);
+ sbi->work_queued = 0;
+ spin_unlock(&sbi->work_lock);
+
+ hfs_mdb_commit(sb);
+}
+
+void hfs_mark_mdb_dirty(struct super_block *sb)
+{
+ struct hfs_sb_info *sbi = HFS_SB(sb);
+ unsigned long delay;
+
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+ spin_lock(&sbi->work_lock);
+ if (!sbi->work_queued) {
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+ queue_delayed_work(system_long_wq, &sbi->mdb_work, delay);
+ sbi->work_queued = 1;
+ }
+ spin_unlock(&sbi->work_lock);
+}
+
/*
* hfs_statfs()
*
@@ -184,7 +181,6 @@ static const struct super_operations hfs_super_operations = {
.write_inode = hfs_write_inode,
.evict_inode = hfs_evict_inode,
.put_super = hfs_put_super,
- .write_super = hfs_write_super,
.sync_fs = hfs_sync_fs,
.statfs = hfs_statfs,
.remount_fs = hfs_remount,
@@ -387,7 +383,10 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
return -ENOMEM;
+ sbi->sb = sb;
sb->s_fs_info = sbi;
+ spin_lock_init(&sbi->work_lock);
+ INIT_DELAYED_WORK(&sbi->mdb_work, flush_mdb);
res = -EINVAL;
if (!parse_options((char *)data, sbi)) {
diff --git a/fs/hfs/sysdep.c b/fs/hfs/sysdep.c
index 19cf291eb91f..91b91fd3a901 100644
--- a/fs/hfs/sysdep.c
+++ b/fs/hfs/sysdep.c
@@ -13,12 +13,12 @@
/* dentry case-handling: just lowercase everything */
-static int hfs_revalidate_dentry(struct dentry *dentry, struct nameidata *nd)
+static int hfs_revalidate_dentry(struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int diff;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c
index 1cad80c789cb..4cfbe2edd296 100644
--- a/fs/hfsplus/bitmap.c
+++ b/fs/hfsplus/bitmap.c
@@ -153,7 +153,7 @@ done:
kunmap(page);
*max = offset + (curr - pptr) * 32 + i - start;
sbi->free_blocks -= *max;
- sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(sb);
dprint(DBG_BITMAP, "-> %u,%u\n", start, *max);
out:
mutex_unlock(&sbi->alloc_mutex);
@@ -228,7 +228,7 @@ out:
set_page_dirty(page);
kunmap(page);
sbi->free_blocks += len;
- sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(sb);
mutex_unlock(&sbi->alloc_mutex);
return 0;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 26b53fb09f68..6b9f921ef2fa 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -25,7 +25,7 @@ static inline void hfsplus_instantiate(struct dentry *dentry,
/* Find the entry inside dir named dentry->d_name */
static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode = NULL;
struct hfs_find_data fd;
@@ -316,7 +316,7 @@ static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir,
inode->i_ctime = CURRENT_TIME_SEC;
mark_inode_dirty(inode);
sbi->file_count++;
- dst_dir->i_sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(dst_dir->i_sb);
out:
mutex_unlock(&sbi->vh_mutex);
return res;
@@ -465,7 +465,7 @@ out:
}
static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
return hfsplus_mknod(dir, dentry, mode, 0);
}
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 4e75ac646fea..558dbb463a4e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -153,8 +153,11 @@ struct hfsplus_sb_info {
gid_t gid;
int part, session;
-
unsigned long flags;
+
+ int work_queued; /* non-zero delayed work is queued */
+ struct delayed_work sync_work; /* FS sync delayed work */
+ spinlock_t work_lock; /* protects sync_work and work_queued */
};
#define HFSPLUS_SB_WRITEBACKUP 0
@@ -428,7 +431,7 @@ int hfsplus_show_options(struct seq_file *, struct dentry *);
/* super.c */
struct inode *hfsplus_iget(struct super_block *, unsigned long);
-int hfsplus_sync_fs(struct super_block *sb, int wait);
+void hfsplus_mark_mdb_dirty(struct super_block *sb);
/* tables.c */
extern u16 hfsplus_case_fold_table[];
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 82b69ee4dacc..3d8b4a675ba0 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -168,7 +168,7 @@ const struct dentry_operations hfsplus_dentry_operations = {
};
static struct dentry *hfsplus_file_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
struct hfs_find_data fd;
struct super_block *sb = dir->i_sb;
@@ -431,7 +431,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
sbi->file_count++;
insert_inode_hash(inode);
mark_inode_dirty(inode);
- sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(sb);
return inode;
}
@@ -442,7 +442,7 @@ void hfsplus_delete_inode(struct inode *inode)
if (S_ISDIR(inode->i_mode)) {
HFSPLUS_SB(sb)->folder_count--;
- sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(sb);
return;
}
HFSPLUS_SB(sb)->file_count--;
@@ -455,7 +455,7 @@ void hfsplus_delete_inode(struct inode *inode)
inode->i_size = 0;
hfsplus_file_truncate(inode);
}
- sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(sb);
}
void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index a9bca4b8768b..473332098013 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -124,7 +124,7 @@ static int hfsplus_system_write_inode(struct inode *inode)
if (fork->total_size != cpu_to_be64(inode->i_size)) {
set_bit(HFSPLUS_SB_WRITEBACKUP, &sbi->flags);
- inode->i_sb->s_dirt = 1;
+ hfsplus_mark_mdb_dirty(inode->i_sb);
}
hfsplus_inode_write_fork(inode, fork);
if (tree)
@@ -161,7 +161,7 @@ static void hfsplus_evict_inode(struct inode *inode)
}
}
-int hfsplus_sync_fs(struct super_block *sb, int wait)
+static int hfsplus_sync_fs(struct super_block *sb, int wait)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
struct hfsplus_vh *vhdr = sbi->s_vhdr;
@@ -171,9 +171,7 @@ int hfsplus_sync_fs(struct super_block *sb, int wait)
if (!wait)
return 0;
- dprint(DBG_SUPER, "hfsplus_write_super\n");
-
- sb->s_dirt = 0;
+ dprint(DBG_SUPER, "hfsplus_sync_fs\n");
/*
* Explicitly write out the special metadata inodes.
@@ -226,12 +224,34 @@ out:
return error;
}
-static void hfsplus_write_super(struct super_block *sb)
+static void delayed_sync_fs(struct work_struct *work)
{
- if (!(sb->s_flags & MS_RDONLY))
- hfsplus_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
+ struct hfsplus_sb_info *sbi;
+
+ sbi = container_of(work, struct hfsplus_sb_info, sync_work.work);
+
+ spin_lock(&sbi->work_lock);
+ sbi->work_queued = 0;
+ spin_unlock(&sbi->work_lock);
+
+ hfsplus_sync_fs(sbi->alloc_file->i_sb, 1);
+}
+
+void hfsplus_mark_mdb_dirty(struct super_block *sb)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
+ unsigned long delay;
+
+ if (sb->s_flags & MS_RDONLY)
+ return;
+
+ spin_lock(&sbi->work_lock);
+ if (!sbi->work_queued) {
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+ queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
+ sbi->work_queued = 1;
+ }
+ spin_unlock(&sbi->work_lock);
}
static void hfsplus_put_super(struct super_block *sb)
@@ -240,8 +260,7 @@ static void hfsplus_put_super(struct super_block *sb)
dprint(DBG_SUPER, "hfsplus_put_super\n");
- if (!sb->s_fs_info)
- return;
+ cancel_delayed_work_sync(&sbi->sync_work);
if (!(sb->s_flags & MS_RDONLY) && sbi->s_vhdr) {
struct hfsplus_vh *vhdr = sbi->s_vhdr;
@@ -328,7 +347,6 @@ static const struct super_operations hfsplus_sops = {
.write_inode = hfsplus_write_inode,
.evict_inode = hfsplus_evict_inode,
.put_super = hfsplus_put_super,
- .write_super = hfsplus_write_super,
.sync_fs = hfsplus_sync_fs,
.statfs = hfsplus_statfs,
.remount_fs = hfsplus_remount,
@@ -355,6 +373,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = sbi;
mutex_init(&sbi->alloc_mutex);
mutex_init(&sbi->vh_mutex);
+ spin_lock_init(&sbi->work_lock);
+ INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
hfsplus_fill_defaults(sbi);
err = -EINVAL;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2afa5bbccf9b..124146543aa7 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -553,7 +553,7 @@ static int read_name(struct inode *ino, char *name)
}
int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
char *name;
@@ -595,7 +595,7 @@ int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
struct dentry *hostfs_lookup(struct inode *ino, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode;
char *name;
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index b8472f803f4e..78e12b2e0ea2 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -189,7 +189,7 @@ out:
* to tell read_inode to read fnode or not.
*/
-struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index c07ef1f1ced6..ac1ead194db5 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -220,7 +220,7 @@ extern const struct dentry_operations hpfs_dentry_operations;
/* dir.c */
-struct dentry *hpfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+struct dentry *hpfs_lookup(struct inode *, struct dentry *, unsigned int);
extern const struct file_operations hpfs_dir_ops;
/* dnode.c */
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 9083ef8af58c..bc9082482f68 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -115,7 +115,7 @@ bail:
return err;
}
-static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
+static int hpfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
const unsigned char *name = dentry->d_name.name;
unsigned len = dentry->d_name.len;
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index d4f93b52cec5..c1dffe47fde2 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -138,7 +138,7 @@ static int file_removed(struct dentry *dentry, const char *file)
}
static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct dentry *proc_dentry, *parent;
struct qstr *name = &dentry->d_name;
@@ -420,8 +420,7 @@ static int hppfs_open(struct inode *inode, struct file *file)
{
const struct cred *cred = file->f_cred;
struct hppfs_private *data;
- struct vfsmount *proc_mnt;
- struct dentry *proc_dentry;
+ struct path path;
char *host_file;
int err, fd, type, filter;
@@ -434,12 +433,11 @@ static int hppfs_open(struct inode *inode, struct file *file)
if (host_file == NULL)
goto out_free2;
- proc_dentry = HPPFS_I(inode)->proc_dentry;
- proc_mnt = inode->i_sb->s_fs_info;
+ path.mnt = inode->i_sb->s_fs_info;
+ path.dentry = HPPFS_I(inode)->proc_dentry;
/* XXX This isn't closed anywhere */
- data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt),
- file_mode(file->f_mode), cred);
+ data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred);
err = PTR_ERR(data->proc_file);
if (IS_ERR(data->proc_file))
goto out_free1;
@@ -484,8 +482,7 @@ static int hppfs_dir_open(struct inode *inode, struct file *file)
{
const struct cred *cred = file->f_cred;
struct hppfs_private *data;
- struct vfsmount *proc_mnt;
- struct dentry *proc_dentry;
+ struct path path;
int err;
err = -ENOMEM;
@@ -493,10 +490,9 @@ static int hppfs_dir_open(struct inode *inode, struct file *file)
if (data == NULL)
goto out;
- proc_dentry = HPPFS_I(inode)->proc_dentry;
- proc_mnt = inode->i_sb->s_fs_info;
- data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt),
- file_mode(file->f_mode), cred);
+ path.mnt = inode->i_sb->s_fs_info;
+ path.dentry = HPPFS_I(inode)->proc_dentry;
+ data->proc_file = dentry_open(&path, file_mode(file->f_mode), cred);
err = PTR_ERR(data->proc_file);
if (IS_ERR(data->proc_file))
goto out_free;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index cc9281b6c628..e13e9bdb0bf5 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -565,7 +565,7 @@ static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mod
return retval;
}
-static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
+static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
diff --git a/fs/inode.c b/fs/inode.c
index c99163b1b310..775cbabd4fa5 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -182,7 +182,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
}
inode->i_private = NULL;
inode->i_mapping = mapping;
- INIT_LIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
+ INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
#ifdef CONFIG_FS_POSIX_ACL
inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
diff --git a/fs/internal.h b/fs/internal.h
index 18bc216ea09d..a6fd56c68b11 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -42,6 +42,11 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
extern void __init chrdev_init(void);
/*
+ * namei.c
+ */
+extern int __inode_permission(struct inode *, int);
+
+/*
* namespace.c
*/
extern int copy_mount_options(const void __user *, unsigned long *);
@@ -50,8 +55,6 @@ extern int copy_mount_string(const void __user *, char **);
extern struct vfsmount *lookup_mnt(struct path *);
extern int finish_automount(struct vfsmount *, struct path *);
-extern void mnt_make_longterm(struct vfsmount *);
-extern void mnt_make_shortterm(struct vfsmount *);
extern int sb_prepare_remount_readonly(struct super_block *);
extern void __init mnt_init(void);
@@ -84,9 +87,6 @@ extern struct super_block *user_get_super(dev_t);
/*
* open.c
*/
-struct nameidata;
-extern struct file *nameidata_to_filp(struct nameidata *);
-extern void release_open_intent(struct nameidata *);
struct open_flags {
int open_flag;
umode_t mode;
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index aa4356d09eee..1d3804492aa7 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -134,6 +134,7 @@ isofs_export_encode_fh(struct inode *inode,
len = 3;
fh32[0] = ei->i_iget5_block;
fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
+ fh16[3] = 0; /* avoid leaking uninitialized data */
fh32[2] = inode->i_generation;
if (parent) {
struct iso_inode_info *eparent;
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
index 0e73f63d9274..3620ad1ea9bc 100644
--- a/fs/isofs/isofs.h
+++ b/fs/isofs/isofs.h
@@ -114,7 +114,7 @@ extern int isofs_name_translate(struct iso_directory_record *, char *, struct in
int get_joliet_filename(struct iso_directory_record *, unsigned char *, struct inode *);
int get_acorn_filename(struct iso_directory_record *, char *, struct inode *);
-extern struct dentry *isofs_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int flags);
extern struct buffer_head *isofs_bread(struct inode *, sector_t);
extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long);
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 1e2946f2a69e..c167028844ed 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -163,7 +163,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
return 0;
}
-struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
int found;
unsigned long uninitialized_var(block);
diff --git a/fs/jbd/recovery.c b/fs/jbd/recovery.c
index 008bf062fd26..a748fe21465a 100644
--- a/fs/jbd/recovery.c
+++ b/fs/jbd/recovery.c
@@ -265,8 +265,11 @@ int journal_recover(journal_t *journal)
if (!err)
err = err2;
/* Flush disk caches to get replayed data on the permanent storage */
- if (journal->j_flags & JFS_BARRIER)
- blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ if (journal->j_flags & JFS_BARRIER) {
+ err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
+ if (!err)
+ err = err2;
+ }
return err;
}
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index b56018896d5e..ad7774d32095 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -25,9 +25,9 @@
static int jffs2_readdir (struct file *, void *, filldir_t);
static int jffs2_create (struct inode *,struct dentry *,umode_t,
- struct nameidata *);
+ bool);
static struct dentry *jffs2_lookup (struct inode *,struct dentry *,
- struct nameidata *);
+ unsigned int);
static int jffs2_link (struct dentry *,struct inode *,struct dentry *);
static int jffs2_unlink (struct inode *,struct dentry *);
static int jffs2_symlink (struct inode *,struct dentry *,const char *);
@@ -74,7 +74,7 @@ const struct inode_operations jffs2_dir_inode_operations =
nice and simple
*/
static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
- struct nameidata *nd)
+ unsigned int flags)
{
struct jffs2_inode_info *dir_f;
struct jffs2_full_dirent *fd = NULL, *fd_list;
@@ -175,7 +175,7 @@ static int jffs2_readdir(struct file *filp, void *dirent, filldir_t filldir)
static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
struct jffs2_raw_inode *ri;
struct jffs2_inode_info *f, *dir_f;
@@ -226,8 +226,8 @@ static int jffs2_create(struct inode *dir_i, struct dentry *dentry,
__func__, inode->i_ino, inode->i_mode, inode->i_nlink,
f->inocache->pino_nlink, inode->i_mapping->nrpages);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
fail:
@@ -446,8 +446,8 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
fail:
@@ -591,8 +591,8 @@ static int jffs2_mkdir (struct inode *dir_i, struct dentry *dentry, umode_t mode
mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
fail:
@@ -766,8 +766,8 @@ static int jffs2_mknod (struct inode *dir_i, struct dentry *dentry, umode_t mode
mutex_unlock(&dir_f->sem);
jffs2_complete_reservation(c);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
return 0;
fail:
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 07c91ca6017d..3b91a7ad6086 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -73,7 +73,7 @@ static inline void free_ea_wmap(struct inode *inode)
*
*/
static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
int rc = 0;
tid_t tid; /* transaction id */
@@ -176,8 +176,8 @@ static int jfs_create(struct inode *dip, struct dentry *dentry, umode_t mode,
unlock_new_inode(ip);
iput(ip);
} else {
- d_instantiate(dentry, ip);
unlock_new_inode(ip);
+ d_instantiate(dentry, ip);
}
out2:
@@ -309,8 +309,8 @@ static int jfs_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
unlock_new_inode(ip);
iput(ip);
} else {
- d_instantiate(dentry, ip);
unlock_new_inode(ip);
+ d_instantiate(dentry, ip);
}
out2:
@@ -1043,8 +1043,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
unlock_new_inode(ip);
iput(ip);
} else {
- d_instantiate(dentry, ip);
unlock_new_inode(ip);
+ d_instantiate(dentry, ip);
}
out2:
@@ -1424,8 +1424,8 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
unlock_new_inode(ip);
iput(ip);
} else {
- d_instantiate(dentry, ip);
unlock_new_inode(ip);
+ d_instantiate(dentry, ip);
}
out1:
@@ -1436,7 +1436,7 @@ static int jfs_mknod(struct inode *dir, struct dentry *dentry,
return rc;
}
-static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, unsigned int flags)
{
struct btstack btstack;
ino_t inum;
@@ -1570,7 +1570,7 @@ out:
return result;
}
-static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int jfs_ci_revalidate(struct dentry *dentry, unsigned int flags)
{
/*
* This is not negative dentry. Always valid.
@@ -1589,7 +1589,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
- if (!nd)
+ if (!flags)
return 0;
/*
@@ -1597,7 +1597,7 @@ static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
* case sensitive name which is specified by user if this is
* for creation.
*/
- if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
+ if (flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return 1;
}
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 4a82950f412f..c55c7452d285 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -601,6 +601,11 @@ static int jfs_sync_fs(struct super_block *sb, int wait)
/* log == NULL indicates read-only mount */
if (log) {
+ /*
+ * Write quota structures to quota file, sync_blockdev() will
+ * write them to disk later
+ */
+ dquot_writeback_dquots(sb, -1);
jfs_flush_journal(log, wait);
jfs_syncpt(log, 0);
}
diff --git a/fs/libfs.c b/fs/libfs.c
index f86ec27a4230..a74cb1725ac6 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -53,7 +53,7 @@ static int simple_delete_dentry(const struct dentry *dentry)
* Lookup the data. This is trivial - if the dentry didn't already
* exist, we know it is negative. Set d_op to delete negative dentries.
*/
-struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+struct dentry *simple_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
static const struct dentry_operations simple_dentry_operations = {
.d_delete = simple_delete_dentry,
@@ -222,15 +222,15 @@ struct dentry *mount_pseudo(struct file_system_type *fs_type, char *name,
const struct super_operations *ops,
const struct dentry_operations *dops, unsigned long magic)
{
- struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+ struct super_block *s;
struct dentry *dentry;
struct inode *root;
struct qstr d_name = QSTR_INIT(name, strlen(name));
+ s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
- s->s_flags = MS_NOUSER;
s->s_maxbytes = MAX_LFS_FILESIZE;
s->s_blocksize = PAGE_SIZE;
s->s_blocksize_bits = PAGE_SHIFT;
diff --git a/fs/locks.c b/fs/locks.c
index 814c51d0de47..82c353304f9e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
return 0;
}
-static int assign_type(struct file_lock *fl, int type)
+static int assign_type(struct file_lock *fl, long type)
{
switch (type) {
case F_RDLCK:
@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
/*
* Initialize a lease, use the default lock manager operations
*/
-static int lease_init(struct file *filp, int type, struct file_lock *fl)
+static int lease_init(struct file *filp, long type, struct file_lock *fl)
{
if (assign_type(fl, type) != 0)
return -EINVAL;
@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
}
/* Allocate a file_lock initialised to this type of lease */
-static struct file_lock *lease_alloc(struct file *filp, int type)
+static struct file_lock *lease_alloc(struct file *filp, long type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
@@ -1465,7 +1465,7 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
case F_WRLCK:
return generic_add_lease(filp, arg, flp);
default:
- BUG();
+ return -EINVAL;
}
}
EXPORT_SYMBOL(generic_setlease);
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index bea5d1b9954b..26e4a941532f 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -349,7 +349,7 @@ static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
}
static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct page *page;
struct logfs_disk_dentry *dd;
@@ -502,7 +502,7 @@ static int logfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
static int logfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index 97bca623d893..345c24b8a6f8 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -519,7 +519,7 @@ static struct dentry *logfs_get_sb_device(struct logfs_super *super,
log_super("LogFS: Start mount %x\n", mount_count++);
err = -EINVAL;
- sb = sget(type, logfs_sb_test, logfs_sb_set, super);
+ sb = sget(type, logfs_sb_test, logfs_sb_set, flags | MS_NOATIME, super);
if (IS_ERR(sb)) {
super->s_devops->put_device(super);
kfree(super);
@@ -542,7 +542,6 @@ static struct dentry *logfs_get_sb_device(struct logfs_super *super,
sb->s_maxbytes = (1ull << 43) - 1;
sb->s_max_links = LOGFS_LINK_MAX;
sb->s_op = &logfs_super_operations;
- sb->s_flags = flags | MS_NOATIME;
err = logfs_read_sb(sb, sb->s_flags & MS_RDONLY);
if (err)
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index 2d0ee1786305..0db73d9dd668 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -18,7 +18,7 @@ static int add_nondir(struct dentry *dentry, struct inode *inode)
return err;
}
-static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *minix_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
{
struct inode * inode = NULL;
ino_t ino;
@@ -55,7 +55,7 @@ static int minix_mknod(struct inode * dir, struct dentry *dentry, umode_t mode,
}
static int minix_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
return minix_mknod(dir, dentry, mode, 0);
}
diff --git a/fs/mount.h b/fs/mount.h
index 4ef36d93e5a2..4f291f9de641 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -22,7 +22,6 @@ struct mount {
struct vfsmount mnt;
#ifdef CONFIG_SMP
struct mnt_pcp __percpu *mnt_pcp;
- atomic_t mnt_longterm; /* how many of the refs are longterm */
#else
int mnt_count;
int mnt_writers;
@@ -49,6 +48,8 @@ struct mount {
int mnt_ghosts;
};
+#define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
+
static inline struct mount *real_mount(struct vfsmount *mnt)
{
return container_of(mnt, struct mount, mnt);
@@ -59,6 +60,12 @@ static inline int mnt_has_parent(struct mount *mnt)
return mnt != mnt->mnt_parent;
}
+static inline int is_mounted(struct vfsmount *mnt)
+{
+ /* neither detached nor internal? */
+ return !IS_ERR_OR_NULL(real_mount(mnt));
+}
+
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
static inline void get_mnt_ns(struct mnt_namespace *ns)
@@ -67,10 +74,12 @@ static inline void get_mnt_ns(struct mnt_namespace *ns)
}
struct proc_mounts {
- struct seq_file m; /* must be the first element */
+ struct seq_file m;
struct mnt_namespace *ns;
struct path root;
int (*show)(struct seq_file *, struct vfsmount *);
};
+#define proc_mounts(p) (container_of((p), struct proc_mounts, m))
+
extern const struct seq_operations mounts_op;
diff --git a/fs/namei.c b/fs/namei.c
index 7d694194024a..2ccc35c4dc24 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -315,31 +315,22 @@ static inline int do_inode_permission(struct inode *inode, int mask)
}
/**
- * inode_permission - check for access rights to a given inode
- * @inode: inode to check permission on
- * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...)
+ * __inode_permission - Check for access rights to a given inode
+ * @inode: Inode to check permission on
+ * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
*
- * Used to check for read/write/execute permissions on an inode.
- * We use "fsuid" for this, letting us set arbitrary permissions
- * for filesystem access without changing the "normal" uids which
- * are used for other things.
+ * Check for read/write/execute permissions on an inode.
*
* When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
+ *
+ * This does not check for a read-only file system. You probably want
+ * inode_permission().
*/
-int inode_permission(struct inode *inode, int mask)
+int __inode_permission(struct inode *inode, int mask)
{
int retval;
if (unlikely(mask & MAY_WRITE)) {
- umode_t mode = inode->i_mode;
-
- /*
- * Nobody gets write access to a read-only fs.
- */
- if (IS_RDONLY(inode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- return -EROFS;
-
/*
* Nobody gets write access to an immutable file.
*/
@@ -359,6 +350,47 @@ int inode_permission(struct inode *inode, int mask)
}
/**
+ * sb_permission - Check superblock-level permissions
+ * @sb: Superblock of inode to check permission on
+ * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ *
+ * Separate out file-system wide checks from inode-specific permission checks.
+ */
+static int sb_permission(struct super_block *sb, struct inode *inode, int mask)
+{
+ if (unlikely(mask & MAY_WRITE)) {
+ umode_t mode = inode->i_mode;
+
+ /* Nobody gets write access to a read-only fs. */
+ if ((sb->s_flags & MS_RDONLY) &&
+ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
+ return -EROFS;
+ }
+ return 0;
+}
+
+/**
+ * inode_permission - Check for access rights to a given inode
+ * @inode: Inode to check permission on
+ * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC)
+ *
+ * Check for read/write/execute permissions on an inode. We use fs[ug]id for
+ * this, letting us set arbitrary permissions for filesystem access without
+ * changing the "normal" UIDs which are used for other things.
+ *
+ * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask.
+ */
+int inode_permission(struct inode *inode, int mask)
+{
+ int retval;
+
+ retval = sb_permission(inode->i_sb, inode, mask);
+ if (retval)
+ return retval;
+ return __inode_permission(inode, mask);
+}
+
+/**
* path_get - get a reference to a path
* @path: path to get the reference to
*
@@ -395,6 +427,18 @@ EXPORT_SYMBOL(path_put);
* to restart the path walk from the beginning in ref-walk mode.
*/
+static inline void lock_rcu_walk(void)
+{
+ br_read_lock(&vfsmount_lock);
+ rcu_read_lock();
+}
+
+static inline void unlock_rcu_walk(void)
+{
+ rcu_read_unlock();
+ br_read_unlock(&vfsmount_lock);
+}
+
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
@@ -448,8 +492,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
}
mntget(nd->path.mnt);
- rcu_read_unlock();
- br_read_unlock(&vfsmount_lock);
+ unlock_rcu_walk();
nd->flags &= ~LOOKUP_RCU;
return 0;
@@ -463,25 +506,9 @@ err_root:
return -ECHILD;
}
-/**
- * release_open_intent - free up open intent resources
- * @nd: pointer to nameidata
- */
-void release_open_intent(struct nameidata *nd)
+static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
{
- struct file *file = nd->intent.open.file;
-
- if (file && !IS_ERR(file)) {
- if (file->f_path.dentry == NULL)
- put_filp(file);
- else
- fput(file);
- }
-}
-
-static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
-{
- return dentry->d_op->d_revalidate(dentry, nd);
+ return dentry->d_op->d_revalidate(dentry, flags);
}
/**
@@ -506,15 +533,13 @@ static int complete_walk(struct nameidata *nd)
spin_lock(&dentry->d_lock);
if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
spin_unlock(&dentry->d_lock);
- rcu_read_unlock();
- br_read_unlock(&vfsmount_lock);
+ unlock_rcu_walk();
return -ECHILD;
}
BUG_ON(nd->inode != dentry->d_inode);
spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
- rcu_read_unlock();
- br_read_unlock(&vfsmount_lock);
+ unlock_rcu_walk();
}
if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -527,7 +552,7 @@ static int complete_walk(struct nameidata *nd)
return 0;
/* Note: we do not d_invalidate() */
- status = d_revalidate(dentry, nd);
+ status = d_revalidate(dentry, nd->flags);
if (status > 0)
return 0;
@@ -602,10 +627,25 @@ static inline void path_to_nameidata(const struct path *path,
nd->path.dentry = path->dentry;
}
+/*
+ * Helper to directly jump to a known parsed path from ->follow_link,
+ * caller must have taken a reference to path beforehand.
+ */
+void nd_jump_link(struct nameidata *nd, struct path *path)
+{
+ path_put(&nd->path);
+
+ nd->path = *path;
+ nd->inode = nd->path.dentry->d_inode;
+ nd->flags |= LOOKUP_JUMPED;
+
+ BUG_ON(nd->inode->i_op->follow_link);
+}
+
static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
{
struct inode *inode = link->dentry->d_inode;
- if (!IS_ERR(cookie) && inode->i_op->put_link)
+ if (inode->i_op->put_link)
inode->i_op->put_link(link->dentry, nd, cookie);
path_put(link);
}
@@ -613,19 +653,19 @@ static inline void put_link(struct nameidata *nd, struct path *link, void *cooki
static __always_inline int
follow_link(struct path *link, struct nameidata *nd, void **p)
{
- int error;
struct dentry *dentry = link->dentry;
+ int error;
+ char *s;
BUG_ON(nd->flags & LOOKUP_RCU);
if (link->mnt == nd->path.mnt)
mntget(link->mnt);
- if (unlikely(current->total_link_count >= 40)) {
- *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */
- path_put(&nd->path);
- return -ELOOP;
- }
+ error = -ELOOP;
+ if (unlikely(current->total_link_count >= 40))
+ goto out_put_nd_path;
+
cond_resched();
current->total_link_count++;
@@ -633,30 +673,28 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
nd_set_link(nd, NULL);
error = security_inode_follow_link(link->dentry, nd);
- if (error) {
- *p = ERR_PTR(error); /* no ->put_link(), please */
- path_put(&nd->path);
- return error;
- }
+ if (error)
+ goto out_put_nd_path;
nd->last_type = LAST_BIND;
*p = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(*p);
- if (!IS_ERR(*p)) {
- char *s = nd_get_link(nd);
- error = 0;
- if (s)
- error = __vfs_follow_link(nd, s);
- else if (nd->last_type == LAST_BIND) {
- nd->flags |= LOOKUP_JUMPED;
- nd->inode = nd->path.dentry->d_inode;
- if (nd->inode->i_op->follow_link) {
- /* stepped on a _really_ weird one */
- path_put(&nd->path);
- error = -ELOOP;
- }
- }
+ if (IS_ERR(*p))
+ goto out_put_nd_path;
+
+ error = 0;
+ s = nd_get_link(nd);
+ if (s) {
+ error = __vfs_follow_link(nd, s);
+ if (unlikely(error))
+ put_link(nd, link, *p);
}
+
+ return error;
+
+out_put_nd_path:
+ path_put(&nd->path);
+ path_put(link);
return error;
}
@@ -675,6 +713,16 @@ static int follow_up_rcu(struct path *path)
return 1;
}
+/*
+ * follow_up - Find the mountpoint of path's vfsmount
+ *
+ * Given a path, find the mountpoint of its source file system.
+ * Replace @path with the path of the mountpoint in the parent mount.
+ * Up is towards /.
+ *
+ * Return 1 if we went up a level and 0 if we were already at the
+ * root.
+ */
int follow_up(struct path *path)
{
struct mount *mnt = real_mount(path->mnt);
@@ -683,7 +731,7 @@ int follow_up(struct path *path)
br_read_lock(&vfsmount_lock);
parent = mnt->mnt_parent;
- if (&parent->mnt == path->mnt) {
+ if (parent == mnt) {
br_read_unlock(&vfsmount_lock);
return 0;
}
@@ -946,8 +994,7 @@ failed:
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
- rcu_read_unlock();
- br_read_unlock(&vfsmount_lock);
+ unlock_rcu_walk();
return -ECHILD;
}
@@ -1048,7 +1095,7 @@ static void follow_dotdot(struct nameidata *nd)
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
- struct nameidata *nd, bool *need_lookup)
+ unsigned int flags, bool *need_lookup)
{
struct dentry *dentry;
int error;
@@ -1059,7 +1106,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
if (d_need_lookup(dentry)) {
*need_lookup = true;
} else if (dentry->d_flags & DCACHE_OP_REVALIDATE) {
- error = d_revalidate(dentry, nd);
+ error = d_revalidate(dentry, flags);
if (unlikely(error <= 0)) {
if (error < 0) {
dput(dentry);
@@ -1089,7 +1136,7 @@ static struct dentry *lookup_dcache(struct qstr *name, struct dentry *dir,
* dir->d_inode->i_mutex must be held
*/
static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct dentry *old;
@@ -1099,7 +1146,7 @@ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ENOENT);
}
- old = dir->i_op->lookup(dir, dentry, nd);
+ old = dir->i_op->lookup(dir, dentry, flags);
if (unlikely(old)) {
dput(dentry);
dentry = old;
@@ -1108,16 +1155,16 @@ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry,
}
static struct dentry *__lookup_hash(struct qstr *name,
- struct dentry *base, struct nameidata *nd)
+ struct dentry *base, unsigned int flags)
{
bool need_lookup;
struct dentry *dentry;
- dentry = lookup_dcache(name, base, nd, &need_lookup);
+ dentry = lookup_dcache(name, base, flags, &need_lookup);
if (!need_lookup)
return dentry;
- return lookup_real(base->d_inode, dentry, nd);
+ return lookup_real(base->d_inode, dentry, flags);
}
/*
@@ -1167,7 +1214,7 @@ static int lookup_fast(struct nameidata *nd, struct qstr *name,
if (unlikely(d_need_lookup(dentry)))
goto unlazy;
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
- status = d_revalidate(dentry, nd);
+ status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status != -ECHILD)
need_reval = 0;
@@ -1197,7 +1244,7 @@ unlazy:
}
if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval)
- status = d_revalidate(dentry, nd);
+ status = d_revalidate(dentry, nd->flags);
if (unlikely(status <= 0)) {
if (status < 0) {
dput(dentry);
@@ -1236,7 +1283,7 @@ static int lookup_slow(struct nameidata *nd, struct qstr *name,
BUG_ON(nd->inode != parent->d_inode);
mutex_lock(&parent->d_inode->i_mutex);
- dentry = __lookup_hash(name, parent, nd);
+ dentry = __lookup_hash(name, parent, nd->flags);
mutex_unlock(&parent->d_inode->i_mutex);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@@ -1284,8 +1331,7 @@ static void terminate_walk(struct nameidata *nd)
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
- rcu_read_unlock();
- br_read_unlock(&vfsmount_lock);
+ unlock_rcu_walk();
}
}
@@ -1383,9 +1429,10 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
void *cookie;
res = follow_link(&link, nd, &cookie);
- if (!res)
- res = walk_component(nd, path, &nd->last,
- nd->last_type, LOOKUP_FOLLOW);
+ if (res)
+ break;
+ res = walk_component(nd, path, &nd->last,
+ nd->last_type, LOOKUP_FOLLOW);
put_link(nd, &link, cookie);
} while (res > 0);
@@ -1651,8 +1698,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->path = nd->root;
nd->inode = inode;
if (flags & LOOKUP_RCU) {
- br_read_lock(&vfsmount_lock);
- rcu_read_lock();
+ lock_rcu_walk();
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
} else {
path_get(&nd->path);
@@ -1664,8 +1710,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (*name=='/') {
if (flags & LOOKUP_RCU) {
- br_read_lock(&vfsmount_lock);
- rcu_read_lock();
+ lock_rcu_walk();
set_root_rcu(nd);
} else {
set_root(nd);
@@ -1677,8 +1722,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
struct fs_struct *fs = current->fs;
unsigned seq;
- br_read_lock(&vfsmount_lock);
- rcu_read_lock();
+ lock_rcu_walk();
do {
seq = read_seqcount_begin(&fs->seq);
@@ -1713,8 +1757,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
if (fput_needed)
*fp = file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
- br_read_lock(&vfsmount_lock);
- rcu_read_lock();
+ lock_rcu_walk();
} else {
path_get(&file->f_path);
fput_light(file, fput_needed);
@@ -1777,8 +1820,9 @@ static int path_lookupat(int dfd, const char *name,
struct path link = path;
nd->flags |= LOOKUP_PARENT;
err = follow_link(&link, nd, &cookie);
- if (!err)
- err = lookup_last(nd, &path);
+ if (err)
+ break;
+ err = lookup_last(nd, &path);
put_link(nd, &link, cookie);
}
}
@@ -1821,9 +1865,27 @@ static int do_path_lookup(int dfd, const char *name,
return retval;
}
-int kern_path_parent(const char *name, struct nameidata *nd)
+/* does lookup, returns the object with parent locked */
+struct dentry *kern_path_locked(const char *name, struct path *path)
{
- return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd);
+ struct nameidata nd;
+ struct dentry *d;
+ int err = do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, &nd);
+ if (err)
+ return ERR_PTR(err);
+ if (nd.last_type != LAST_NORM) {
+ path_put(&nd.path);
+ return ERR_PTR(-EINVAL);
+ }
+ mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+ d = __lookup_hash(&nd.last, nd.path.dentry, 0);
+ if (IS_ERR(d)) {
+ mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+ path_put(&nd.path);
+ return d;
+ }
+ *path = nd.path;
+ return d;
}
int kern_path(const char *name, unsigned int flags, struct path *path)
@@ -1866,7 +1928,7 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
*/
static struct dentry *lookup_hash(struct nameidata *nd)
{
- return __lookup_hash(&nd->last, nd->path.dentry, nd);
+ return __lookup_hash(&nd->last, nd->path.dentry, nd->flags);
}
/**
@@ -1913,7 +1975,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
if (err)
return ERR_PTR(err);
- return __lookup_hash(&this, base, NULL);
+ return __lookup_hash(&this, base, 0);
}
int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
@@ -2086,10 +2148,9 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
}
int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool want_excl)
{
int error = may_create(dir, dentry);
-
if (error)
return error;
@@ -2100,7 +2161,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
error = security_inode_create(dir, dentry, mode);
if (error)
return error;
- error = dir->i_op->create(dir, dentry, mode, nd);
+ error = dir->i_op->create(dir, dentry, mode, want_excl);
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -2187,21 +2248,275 @@ static inline int open_to_namei_flags(int flag)
return flag;
}
+static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode)
+{
+ int error = security_path_mknod(dir, dentry, mode, 0);
+ if (error)
+ return error;
+
+ error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC);
+ if (error)
+ return error;
+
+ return security_inode_create(dir->dentry->d_inode, dentry, mode);
+}
+
/*
- * Handle the last step of open()
+ * Attempt to atomically look up, create and open a file from a negative
+ * dentry.
+ *
+ * Returns 0 if successful. The file will have been created and attached to
+ * @file by the filesystem calling finish_open().
+ *
+ * Returns 1 if the file was looked up only or didn't need creating. The
+ * caller will need to perform the open themselves. @path will have been
+ * updated to point to the new dentry. This may be negative.
+ *
+ * Returns an error code otherwise.
+ */
+static int atomic_open(struct nameidata *nd, struct dentry *dentry,
+ struct path *path, struct file *file,
+ const struct open_flags *op,
+ bool *want_write, bool need_lookup,
+ int *opened)
+{
+ struct inode *dir = nd->path.dentry->d_inode;
+ unsigned open_flag = open_to_namei_flags(op->open_flag);
+ umode_t mode;
+ int error;
+ int acc_mode;
+ int create_error = 0;
+ struct dentry *const DENTRY_NOT_SET = (void *) -1UL;
+
+ BUG_ON(dentry->d_inode);
+
+ /* Don't create child dentry for a dead directory. */
+ if (unlikely(IS_DEADDIR(dir))) {
+ error = -ENOENT;
+ goto out;
+ }
+
+ mode = op->mode & S_IALLUGO;
+ if ((open_flag & O_CREAT) && !IS_POSIXACL(dir))
+ mode &= ~current_umask();
+
+ if (open_flag & O_EXCL) {
+ open_flag &= ~O_TRUNC;
+ *opened |= FILE_CREATED;
+ }
+
+ /*
+ * Checking write permission is tricky, bacuse we don't know if we are
+ * going to actually need it: O_CREAT opens should work as long as the
+ * file exists. But checking existence breaks atomicity. The trick is
+ * to check access and if not granted clear O_CREAT from the flags.
+ *
+ * Another problem is returing the "right" error value (e.g. for an
+ * O_EXCL open we want to return EEXIST not EROFS).
+ */
+ if ((open_flag & (O_CREAT | O_TRUNC)) ||
+ (open_flag & O_ACCMODE) != O_RDONLY) {
+ error = mnt_want_write(nd->path.mnt);
+ if (!error) {
+ *want_write = true;
+ } else if (!(open_flag & O_CREAT)) {
+ /*
+ * No O_CREATE -> atomicity not a requirement -> fall
+ * back to lookup + open
+ */
+ goto no_open;
+ } else if (open_flag & (O_EXCL | O_TRUNC)) {
+ /* Fall back and fail with the right error */
+ create_error = error;
+ goto no_open;
+ } else {
+ /* No side effects, safe to clear O_CREAT */
+ create_error = error;
+ open_flag &= ~O_CREAT;
+ }
+ }
+
+ if (open_flag & O_CREAT) {
+ error = may_o_create(&nd->path, dentry, op->mode);
+ if (error) {
+ create_error = error;
+ if (open_flag & O_EXCL)
+ goto no_open;
+ open_flag &= ~O_CREAT;
+ }
+ }
+
+ if (nd->flags & LOOKUP_DIRECTORY)
+ open_flag |= O_DIRECTORY;
+
+ file->f_path.dentry = DENTRY_NOT_SET;
+ file->f_path.mnt = nd->path.mnt;
+ error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode,
+ opened);
+ if (error < 0) {
+ if (create_error && error == -ENOENT)
+ error = create_error;
+ goto out;
+ }
+
+ acc_mode = op->acc_mode;
+ if (*opened & FILE_CREATED) {
+ fsnotify_create(dir, dentry);
+ acc_mode = MAY_OPEN;
+ }
+
+ if (error) { /* returned 1, that is */
+ if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) {
+ error = -EIO;
+ goto out;
+ }
+ if (file->f_path.dentry) {
+ dput(dentry);
+ dentry = file->f_path.dentry;
+ }
+ goto looked_up;
+ }
+
+ /*
+ * We didn't have the inode before the open, so check open permission
+ * here.
+ */
+ error = may_open(&file->f_path, acc_mode, open_flag);
+ if (error)
+ fput(file);
+
+out:
+ dput(dentry);
+ return error;
+
+no_open:
+ if (need_lookup) {
+ dentry = lookup_real(dir, dentry, nd->flags);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ if (create_error) {
+ int open_flag = op->open_flag;
+
+ error = create_error;
+ if ((open_flag & O_EXCL)) {
+ if (!dentry->d_inode)
+ goto out;
+ } else if (!dentry->d_inode) {
+ goto out;
+ } else if ((open_flag & O_TRUNC) &&
+ S_ISREG(dentry->d_inode->i_mode)) {
+ goto out;
+ }
+ /* will fail later, go on to get the right error */
+ }
+ }
+looked_up:
+ path->dentry = dentry;
+ path->mnt = nd->path.mnt;
+ return 1;
+}
+
+/*
+ * Look up and maybe create and open the last component.
+ *
+ * Must be called with i_mutex held on parent.
+ *
+ * Returns 0 if the file was successfully atomically created (if necessary) and
+ * opened. In this case the file will be returned attached to @file.
+ *
+ * Returns 1 if the file was not completely opened at this time, though lookups
+ * and creations will have been performed and the dentry returned in @path will
+ * be positive upon return if O_CREAT was specified. If O_CREAT wasn't
+ * specified then a negative dentry may be returned.
+ *
+ * An error code is returned otherwise.
+ *
+ * FILE_CREATE will be set in @*opened if the dentry was created and will be
+ * cleared otherwise prior to returning.
*/
-static struct file *do_last(struct nameidata *nd, struct path *path,
- const struct open_flags *op, const char *pathname)
+static int lookup_open(struct nameidata *nd, struct path *path,
+ struct file *file,
+ const struct open_flags *op,
+ bool *want_write, int *opened)
{
struct dentry *dir = nd->path.dentry;
+ struct inode *dir_inode = dir->d_inode;
struct dentry *dentry;
+ int error;
+ bool need_lookup;
+
+ *opened &= ~FILE_CREATED;
+ dentry = lookup_dcache(&nd->last, dir, nd->flags, &need_lookup);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ /* Cached positive dentry: will open in f_op->open */
+ if (!need_lookup && dentry->d_inode)
+ goto out_no_open;
+
+ if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) {
+ return atomic_open(nd, dentry, path, file, op, want_write,
+ need_lookup, opened);
+ }
+
+ if (need_lookup) {
+ BUG_ON(dentry->d_inode);
+
+ dentry = lookup_real(dir_inode, dentry, nd->flags);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ }
+
+ /* Negative dentry, just create the file */
+ if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
+ umode_t mode = op->mode;
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current_umask();
+ /*
+ * This write is needed to ensure that a
+ * rw->ro transition does not occur between
+ * the time when the file is created and when
+ * a permanent write count is taken through
+ * the 'struct file' in finish_open().
+ */
+ error = mnt_want_write(nd->path.mnt);
+ if (error)
+ goto out_dput;
+ *want_write = true;
+ *opened |= FILE_CREATED;
+ error = security_path_mknod(&nd->path, dentry, mode, 0);
+ if (error)
+ goto out_dput;
+ error = vfs_create(dir->d_inode, dentry, mode,
+ nd->flags & LOOKUP_EXCL);
+ if (error)
+ goto out_dput;
+ }
+out_no_open:
+ path->dentry = dentry;
+ path->mnt = nd->path.mnt;
+ return 1;
+
+out_dput:
+ dput(dentry);
+ return error;
+}
+
+/*
+ * Handle the last step of open()
+ */
+static int do_last(struct nameidata *nd, struct path *path,
+ struct file *file, const struct open_flags *op,
+ int *opened, const char *pathname)
+{
+ struct dentry *dir = nd->path.dentry;
int open_flag = op->open_flag;
- int will_truncate = open_flag & O_TRUNC;
- int want_write = 0;
+ bool will_truncate = (open_flag & O_TRUNC) != 0;
+ bool want_write = false;
int acc_mode = op->acc_mode;
- struct file *filp;
struct inode *inode;
- int symlink_ok = 0;
+ bool symlink_ok = false;
struct path save_parent = { .dentry = NULL, .mnt = NULL };
bool retried = false;
int error;
@@ -2214,112 +2529,99 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
case LAST_DOT:
error = handle_dots(nd, nd->last_type);
if (error)
- return ERR_PTR(error);
+ return error;
/* fallthrough */
case LAST_ROOT:
error = complete_walk(nd);
if (error)
- return ERR_PTR(error);
+ return error;
audit_inode(pathname, nd->path.dentry);
if (open_flag & O_CREAT) {
error = -EISDIR;
- goto exit;
+ goto out;
}
- goto ok;
+ goto finish_open;
case LAST_BIND:
error = complete_walk(nd);
if (error)
- return ERR_PTR(error);
+ return error;
audit_inode(pathname, dir);
- goto ok;
+ goto finish_open;
}
if (!(open_flag & O_CREAT)) {
if (nd->last.name[nd->last.len])
nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
- symlink_ok = 1;
+ symlink_ok = true;
/* we _can_ be in RCU mode here */
error = lookup_fast(nd, &nd->last, path, &inode);
- if (unlikely(error)) {
- if (error < 0)
- goto exit;
+ if (likely(!error))
+ goto finish_lookup;
- error = lookup_slow(nd, &nd->last, path);
- if (error < 0)
- goto exit;
+ if (error < 0)
+ goto out;
- inode = path->dentry->d_inode;
- }
- goto finish_lookup;
- }
-
- /* create side of things */
- /*
- * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED has been
- * cleared when we got to the last component we are about to look up
- */
- error = complete_walk(nd);
- if (error)
- return ERR_PTR(error);
+ BUG_ON(nd->inode != dir->d_inode);
+ } else {
+ /* create side of things */
+ /*
+ * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED
+ * has been cleared when we got to the last component we are
+ * about to look up
+ */
+ error = complete_walk(nd);
+ if (error)
+ return error;
- audit_inode(pathname, dir);
- error = -EISDIR;
- /* trailing slashes? */
- if (nd->last.name[nd->last.len])
- goto exit;
+ audit_inode(pathname, dir);
+ error = -EISDIR;
+ /* trailing slashes? */
+ if (nd->last.name[nd->last.len])
+ goto out;
+ }
retry_lookup:
mutex_lock(&dir->d_inode->i_mutex);
+ error = lookup_open(nd, path, file, op, &want_write, opened);
+ mutex_unlock(&dir->d_inode->i_mutex);
- dentry = lookup_hash(nd);
- error = PTR_ERR(dentry);
- if (IS_ERR(dentry)) {
- mutex_unlock(&dir->d_inode->i_mutex);
- goto exit;
- }
+ if (error <= 0) {
+ if (error)
+ goto out;
- path->dentry = dentry;
- path->mnt = nd->path.mnt;
+ if ((*opened & FILE_CREATED) ||
+ !S_ISREG(file->f_path.dentry->d_inode->i_mode))
+ will_truncate = false;
- /* Negative dentry, just create the file */
- if (!dentry->d_inode) {
- umode_t mode = op->mode;
- if (!IS_POSIXACL(dir->d_inode))
- mode &= ~current_umask();
- /*
- * This write is needed to ensure that a
- * rw->ro transition does not occur between
- * the time when the file is created and when
- * a permanent write count is taken through
- * the 'struct file' in nameidata_to_filp().
- */
- error = mnt_want_write(nd->path.mnt);
- if (error)
- goto exit_mutex_unlock;
- want_write = 1;
+ audit_inode(pathname, file->f_path.dentry);
+ goto opened;
+ }
+
+ if (*opened & FILE_CREATED) {
/* Don't check for write permission, don't truncate */
open_flag &= ~O_TRUNC;
- will_truncate = 0;
+ will_truncate = false;
acc_mode = MAY_OPEN;
- error = security_path_mknod(&nd->path, dentry, mode, 0);
- if (error)
- goto exit_mutex_unlock;
- error = vfs_create(dir->d_inode, dentry, mode, nd);
- if (error)
- goto exit_mutex_unlock;
- mutex_unlock(&dir->d_inode->i_mutex);
- dput(nd->path.dentry);
- nd->path.dentry = dentry;
- goto common;
+ path_to_nameidata(path, nd);
+ goto finish_open_created;
}
/*
* It already exists.
*/
- mutex_unlock(&dir->d_inode->i_mutex);
audit_inode(pathname, path->dentry);
+ /*
+ * If atomic_open() acquired write access it is dropped now due to
+ * possible mount and symlink following (this might be optimized away if
+ * necessary...)
+ */
+ if (want_write) {
+ mnt_drop_write(nd->path.mnt);
+ want_write = false;
+ }
+
error = -EEXIST;
if (open_flag & O_EXCL)
goto exit_dput;
@@ -2338,18 +2640,18 @@ finish_lookup:
error = -ENOENT;
if (!inode) {
path_to_nameidata(path, nd);
- goto exit;
+ goto out;
}
if (should_follow_link(inode, !symlink_ok)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(unlazy_walk(nd, path->dentry))) {
error = -ECHILD;
- goto exit;
+ goto out;
}
}
BUG_ON(inode != path->dentry->d_inode);
- return NULL;
+ return 1;
}
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
@@ -2365,119 +2667,122 @@ finish_lookup:
error = complete_walk(nd);
if (error) {
path_put(&save_parent);
- return ERR_PTR(error);
+ return error;
}
error = -EISDIR;
if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
- goto exit;
+ goto out;
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
- goto exit;
+ goto out;
audit_inode(pathname, nd->path.dentry);
-ok:
+finish_open:
if (!S_ISREG(nd->inode->i_mode))
- will_truncate = 0;
+ will_truncate = false;
if (will_truncate) {
error = mnt_want_write(nd->path.mnt);
if (error)
- goto exit;
- want_write = 1;
+ goto out;
+ want_write = true;
}
-common:
+finish_open_created:
error = may_open(&nd->path, acc_mode, open_flag);
if (error)
- goto exit;
- filp = nameidata_to_filp(nd);
- if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
- BUG_ON(save_parent.dentry != dir);
- path_put(&nd->path);
- nd->path = save_parent;
- nd->inode = dir->d_inode;
- save_parent.mnt = NULL;
- save_parent.dentry = NULL;
- if (want_write) {
- mnt_drop_write(nd->path.mnt);
- want_write = 0;
- }
- retried = true;
- goto retry_lookup;
- }
- if (!IS_ERR(filp)) {
- error = ima_file_check(filp, op->acc_mode);
- if (error) {
- fput(filp);
- filp = ERR_PTR(error);
- }
+ goto out;
+ file->f_path.mnt = nd->path.mnt;
+ error = finish_open(file, nd->path.dentry, NULL, opened);
+ if (error) {
+ if (error == -EOPENSTALE)
+ goto stale_open;
+ goto out;
}
- if (!IS_ERR(filp)) {
- if (will_truncate) {
- error = handle_truncate(filp);
- if (error) {
- fput(filp);
- filp = ERR_PTR(error);
- }
- }
+opened:
+ error = open_check_o_direct(file);
+ if (error)
+ goto exit_fput;
+ error = ima_file_check(file, op->acc_mode);
+ if (error)
+ goto exit_fput;
+
+ if (will_truncate) {
+ error = handle_truncate(file);
+ if (error)
+ goto exit_fput;
}
out:
if (want_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
terminate_walk(nd);
- return filp;
+ return error;
-exit_mutex_unlock:
- mutex_unlock(&dir->d_inode->i_mutex);
exit_dput:
path_put_conditional(path, nd);
-exit:
- filp = ERR_PTR(error);
goto out;
+exit_fput:
+ fput(file);
+ goto out;
+
+stale_open:
+ /* If no saved parent or already retried then can't retry */
+ if (!save_parent.dentry || retried)
+ goto out;
+
+ BUG_ON(save_parent.dentry != dir);
+ path_put(&nd->path);
+ nd->path = save_parent;
+ nd->inode = dir->d_inode;
+ save_parent.mnt = NULL;
+ save_parent.dentry = NULL;
+ if (want_write) {
+ mnt_drop_write(nd->path.mnt);
+ want_write = false;
+ }
+ retried = true;
+ goto retry_lookup;
}
static struct file *path_openat(int dfd, const char *pathname,
struct nameidata *nd, const struct open_flags *op, int flags)
{
struct file *base = NULL;
- struct file *filp;
+ struct file *file;
struct path path;
+ int opened = 0;
int error;
- filp = get_empty_filp();
- if (!filp)
+ file = get_empty_filp();
+ if (!file)
return ERR_PTR(-ENFILE);
- filp->f_flags = op->open_flag;
- nd->intent.open.file = filp;
- nd->intent.open.flags = open_to_namei_flags(op->open_flag);
- nd->intent.open.create_mode = op->mode;
+ file->f_flags = op->open_flag;
error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base);
if (unlikely(error))
- goto out_filp;
+ goto out;
current->total_link_count = 0;
error = link_path_walk(pathname, nd);
if (unlikely(error))
- goto out_filp;
+ goto out;
- filp = do_last(nd, &path, op, pathname);
- while (unlikely(!filp)) { /* trailing symlink */
+ error = do_last(nd, &path, file, op, &opened, pathname);
+ while (unlikely(error > 0)) { /* trailing symlink */
struct path link = path;
void *cookie;
if (!(nd->flags & LOOKUP_FOLLOW)) {
path_put_conditional(&path, nd);
path_put(&nd->path);
- filp = ERR_PTR(-ELOOP);
+ error = -ELOOP;
break;
}
nd->flags |= LOOKUP_PARENT;
nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
error = follow_link(&link, nd, &cookie);
if (unlikely(error))
- filp = ERR_PTR(error);
- else
- filp = do_last(nd, &path, op, pathname);
+ break;
+ error = do_last(nd, &path, file, op, &opened, pathname);
put_link(nd, &link, cookie);
}
out:
@@ -2485,18 +2790,20 @@ out:
path_put(&nd->root);
if (base)
fput(base);
- release_open_intent(nd);
- if (filp == ERR_PTR(-EOPENSTALE)) {
- if (flags & LOOKUP_RCU)
- filp = ERR_PTR(-ECHILD);
- else
- filp = ERR_PTR(-ESTALE);
+ if (!(opened & FILE_OPENED)) {
+ BUG_ON(!error);
+ put_filp(file);
}
- return filp;
-
-out_filp:
- filp = ERR_PTR(error);
- goto out;
+ if (unlikely(error)) {
+ if (error == -EOPENSTALE) {
+ if (flags & LOOKUP_RCU)
+ error = -ECHILD;
+ else
+ error = -ESTALE;
+ }
+ file = ERR_PTR(error);
+ }
+ return file;
}
struct file *do_filp_open(int dfd, const char *pathname,
@@ -2551,7 +2858,6 @@ struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path
goto out;
nd.flags &= ~LOOKUP_PARENT;
nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
- nd.intent.open.flags = O_EXCL;
/*
* Do the final lookup.
@@ -2670,7 +2976,7 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode,
goto out_drop_write;
switch (mode & S_IFMT) {
case 0: case S_IFREG:
- error = vfs_create(path.dentry->d_inode,dentry,mode,NULL);
+ error = vfs_create(path.dentry->d_inode,dentry,mode,true);
break;
case S_IFCHR: case S_IFBLK:
error = vfs_mknod(path.dentry->d_inode,dentry,mode,
diff --git a/fs/namespace.c b/fs/namespace.c
index 1e4a5fe3d7b7..c53d3381b0d0 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -515,8 +515,20 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
}
/*
- * lookup_mnt increments the ref count before returning
- * the vfsmount struct.
+ * lookup_mnt - Return the first child mount mounted at path
+ *
+ * "First" means first mounted chronologically. If you create the
+ * following mounts:
+ *
+ * mount /dev/sda1 /mnt
+ * mount /dev/sda2 /mnt
+ * mount /dev/sda3 /mnt
+ *
+ * Then lookup_mnt() on the base /mnt dentry in the root mount will
+ * return successively the root dentry and vfsmount of /dev/sda1, then
+ * /dev/sda2, then /dev/sda3, then NULL.
+ *
+ * lookup_mnt takes a reference to the found vfsmount.
*/
struct vfsmount *lookup_mnt(struct path *path)
{
@@ -621,21 +633,6 @@ static void attach_mnt(struct mount *mnt, struct path *path)
list_add_tail(&mnt->mnt_child, &real_mount(path->mnt)->mnt_mounts);
}
-static inline void __mnt_make_longterm(struct mount *mnt)
-{
-#ifdef CONFIG_SMP
- atomic_inc(&mnt->mnt_longterm);
-#endif
-}
-
-/* needs vfsmount lock for write */
-static inline void __mnt_make_shortterm(struct mount *mnt)
-{
-#ifdef CONFIG_SMP
- atomic_dec(&mnt->mnt_longterm);
-#endif
-}
-
/*
* vfsmount lock must be held for write
*/
@@ -649,10 +646,8 @@ static void commit_tree(struct mount *mnt)
BUG_ON(parent == mnt);
list_add_tail(&head, &mnt->mnt_list);
- list_for_each_entry(m, &head, mnt_list) {
+ list_for_each_entry(m, &head, mnt_list)
m->mnt_ns = n;
- __mnt_make_longterm(m);
- }
list_splice(&head, n->list.prev);
@@ -725,56 +720,60 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
int flag)
{
struct super_block *sb = old->mnt.mnt_sb;
- struct mount *mnt = alloc_vfsmnt(old->mnt_devname);
+ struct mount *mnt;
+ int err;
- if (mnt) {
- if (flag & (CL_SLAVE | CL_PRIVATE))
- mnt->mnt_group_id = 0; /* not a peer of original */
- else
- mnt->mnt_group_id = old->mnt_group_id;
-
- if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
- int err = mnt_alloc_group_id(mnt);
- if (err)
- goto out_free;
- }
+ mnt = alloc_vfsmnt(old->mnt_devname);
+ if (!mnt)
+ return ERR_PTR(-ENOMEM);
- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
- atomic_inc(&sb->s_active);
- mnt->mnt.mnt_sb = sb;
- mnt->mnt.mnt_root = dget(root);
- mnt->mnt_mountpoint = mnt->mnt.mnt_root;
- mnt->mnt_parent = mnt;
- br_write_lock(&vfsmount_lock);
- list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
- br_write_unlock(&vfsmount_lock);
+ if (flag & (CL_SLAVE | CL_PRIVATE))
+ mnt->mnt_group_id = 0; /* not a peer of original */
+ else
+ mnt->mnt_group_id = old->mnt_group_id;
- if (flag & CL_SLAVE) {
- list_add(&mnt->mnt_slave, &old->mnt_slave_list);
- mnt->mnt_master = old;
- CLEAR_MNT_SHARED(mnt);
- } else if (!(flag & CL_PRIVATE)) {
- if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
- list_add(&mnt->mnt_share, &old->mnt_share);
- if (IS_MNT_SLAVE(old))
- list_add(&mnt->mnt_slave, &old->mnt_slave);
- mnt->mnt_master = old->mnt_master;
- }
- if (flag & CL_MAKE_SHARED)
- set_mnt_shared(mnt);
-
- /* stick the duplicate mount on the same expiry list
- * as the original if that was on one */
- if (flag & CL_EXPIRE) {
- if (!list_empty(&old->mnt_expire))
- list_add(&mnt->mnt_expire, &old->mnt_expire);
- }
+ if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) {
+ err = mnt_alloc_group_id(mnt);
+ if (err)
+ goto out_free;
}
+
+ mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+ atomic_inc(&sb->s_active);
+ mnt->mnt.mnt_sb = sb;
+ mnt->mnt.mnt_root = dget(root);
+ mnt->mnt_mountpoint = mnt->mnt.mnt_root;
+ mnt->mnt_parent = mnt;
+ br_write_lock(&vfsmount_lock);
+ list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
+ br_write_unlock(&vfsmount_lock);
+
+ if (flag & CL_SLAVE) {
+ list_add(&mnt->mnt_slave, &old->mnt_slave_list);
+ mnt->mnt_master = old;
+ CLEAR_MNT_SHARED(mnt);
+ } else if (!(flag & CL_PRIVATE)) {
+ if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old))
+ list_add(&mnt->mnt_share, &old->mnt_share);
+ if (IS_MNT_SLAVE(old))
+ list_add(&mnt->mnt_slave, &old->mnt_slave);
+ mnt->mnt_master = old->mnt_master;
+ }
+ if (flag & CL_MAKE_SHARED)
+ set_mnt_shared(mnt);
+
+ /* stick the duplicate mount on the same expiry list
+ * as the original if that was on one */
+ if (flag & CL_EXPIRE) {
+ if (!list_empty(&old->mnt_expire))
+ list_add(&mnt->mnt_expire, &old->mnt_expire);
+ }
+
return mnt;
out_free:
free_vfsmnt(mnt);
- return NULL;
+ return ERR_PTR(err);
}
static inline void mntfree(struct mount *mnt)
@@ -804,7 +803,8 @@ static void mntput_no_expire(struct mount *mnt)
put_again:
#ifdef CONFIG_SMP
br_read_lock(&vfsmount_lock);
- if (likely(atomic_read(&mnt->mnt_longterm))) {
+ if (likely(mnt->mnt_ns)) {
+ /* shouldn't be the last one */
mnt_add_count(mnt, -1);
br_read_unlock(&vfsmount_lock);
return;
@@ -939,7 +939,7 @@ EXPORT_SYMBOL(replace_mount_options);
/* iterator; we want it to have access to namespace_sem, thus here... */
static void *m_start(struct seq_file *m, loff_t *pos)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
down_read(&namespace_sem);
return seq_list_start(&p->ns->list, *pos);
@@ -947,7 +947,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
return seq_list_next(v, &p->ns->list, pos);
}
@@ -959,7 +959,7 @@ static void m_stop(struct seq_file *m, void *v)
static int m_show(struct seq_file *m, void *v)
{
- struct proc_mounts *p = container_of(m, struct proc_mounts, m);
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = list_entry(v, struct mount, mnt_list);
return p->show(m, &r->mnt);
}
@@ -1074,8 +1074,6 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
- if (p->mnt_ns)
- __mnt_make_shortterm(p);
p->mnt_ns = NULL;
list_del_init(&p->mnt_child);
if (mnt_has_parent(p)) {
@@ -1260,11 +1258,12 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
struct path path;
if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
- return NULL;
+ return ERR_PTR(-EINVAL);
res = q = clone_mnt(mnt, dentry, flag);
- if (!q)
- goto Enomem;
+ if (IS_ERR(q))
+ return q;
+
q->mnt_mountpoint = mnt->mnt_mountpoint;
p = mnt;
@@ -1286,8 +1285,8 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
path.mnt = &q->mnt;
path.dentry = p->mnt_mountpoint;
q = clone_mnt(p, p->mnt.mnt_root, flag);
- if (!q)
- goto Enomem;
+ if (IS_ERR(q))
+ goto out;
br_write_lock(&vfsmount_lock);
list_add_tail(&q->mnt_list, &res->mnt_list);
attach_mnt(q, &path);
@@ -1295,7 +1294,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
}
}
return res;
-Enomem:
+out:
if (res) {
LIST_HEAD(umount_list);
br_write_lock(&vfsmount_lock);
@@ -1303,9 +1302,11 @@ Enomem:
br_write_unlock(&vfsmount_lock);
release_mounts(&umount_list);
}
- return NULL;
+ return q;
}
+/* Caller should check returned pointer for errors */
+
struct vfsmount *collect_mounts(struct path *path)
{
struct mount *tree;
@@ -1313,7 +1314,9 @@ struct vfsmount *collect_mounts(struct path *path)
tree = copy_tree(real_mount(path->mnt), path->dentry,
CL_COPY_ALL | CL_PRIVATE);
up_write(&namespace_sem);
- return tree ? &tree->mnt : NULL;
+ if (IS_ERR(tree))
+ return NULL;
+ return &tree->mnt;
}
void drop_collected_mounts(struct vfsmount *mnt)
@@ -1608,14 +1611,15 @@ static int do_loopback(struct path *path, char *old_name,
if (!check_mnt(real_mount(path->mnt)) || !check_mnt(old))
goto out2;
- err = -ENOMEM;
if (recurse)
mnt = copy_tree(old, old_path.dentry, 0);
else
mnt = clone_mnt(old, old_path.dentry, 0);
- if (!mnt)
- goto out2;
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
+ goto out;
+ }
err = graft_tree(mnt, path);
if (err) {
@@ -2209,23 +2213,6 @@ static struct mnt_namespace *alloc_mnt_ns(void)
return new_ns;
}
-void mnt_make_longterm(struct vfsmount *mnt)
-{
- __mnt_make_longterm(real_mount(mnt));
-}
-
-void mnt_make_shortterm(struct vfsmount *m)
-{
-#ifdef CONFIG_SMP
- struct mount *mnt = real_mount(m);
- if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
- return;
- br_write_lock(&vfsmount_lock);
- atomic_dec(&mnt->mnt_longterm);
- br_write_unlock(&vfsmount_lock);
-#endif
-}
-
/*
* Allocate a new namespace structure and populate it with contents
* copied from the namespace of the passed in task structure.
@@ -2246,10 +2233,10 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
down_write(&namespace_sem);
/* First pass: copy the tree topology */
new = copy_tree(old, old->mnt.mnt_root, CL_COPY_ALL | CL_EXPIRE);
- if (!new) {
+ if (IS_ERR(new)) {
up_write(&namespace_sem);
kfree(new_ns);
- return ERR_PTR(-ENOMEM);
+ return ERR_CAST(new);
}
new_ns->root = new;
br_write_lock(&vfsmount_lock);
@@ -2265,18 +2252,13 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
q = new;
while (p) {
q->mnt_ns = new_ns;
- __mnt_make_longterm(q);
if (fs) {
if (&p->mnt == fs->root.mnt) {
fs->root.mnt = mntget(&q->mnt);
- __mnt_make_longterm(q);
- mnt_make_shortterm(&p->mnt);
rootmnt = &p->mnt;
}
if (&p->mnt == fs->pwd.mnt) {
fs->pwd.mnt = mntget(&q->mnt);
- __mnt_make_longterm(q);
- mnt_make_shortterm(&p->mnt);
pwdmnt = &p->mnt;
}
}
@@ -2320,7 +2302,6 @@ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m)
if (!IS_ERR(new_ns)) {
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
- __mnt_make_longterm(mnt);
new_ns->root = mnt;
list_add(&new_ns->list, &mnt->mnt_list);
} else {
@@ -2615,7 +2596,7 @@ struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
* it is a longterm mount, don't release mnt until
* we unmount before file sys is unregistered
*/
- mnt_make_longterm(mnt);
+ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL;
}
return mnt;
}
@@ -2625,7 +2606,9 @@ void kern_unmount(struct vfsmount *mnt)
{
/* release long term mount so mount point can be released */
if (!IS_ERR_OR_NULL(mnt)) {
- mnt_make_shortterm(mnt);
+ br_write_lock(&vfsmount_lock);
+ real_mount(mnt)->mnt_ns = NULL;
+ br_write_unlock(&vfsmount_lock);
mntput(mnt);
}
}
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index aeed93a6bde0..4117e7b377bb 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -30,8 +30,8 @@ static void ncp_do_readdir(struct file *, void *, filldir_t,
static int ncp_readdir(struct file *, void *, filldir_t);
-static int ncp_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
-static struct dentry *ncp_lookup(struct inode *, struct dentry *, struct nameidata *);
+static int ncp_create(struct inode *, struct dentry *, umode_t, bool);
+static struct dentry *ncp_lookup(struct inode *, struct dentry *, unsigned int);
static int ncp_unlink(struct inode *, struct dentry *);
static int ncp_mkdir(struct inode *, struct dentry *, umode_t);
static int ncp_rmdir(struct inode *, struct dentry *);
@@ -72,7 +72,7 @@ const struct inode_operations ncp_dir_inode_operations =
/*
* Dentry operations routines
*/
-static int ncp_lookup_validate(struct dentry *, struct nameidata *);
+static int ncp_lookup_validate(struct dentry *, unsigned int);
static int ncp_hash_dentry(const struct dentry *, const struct inode *,
struct qstr *);
static int ncp_compare_dentry(const struct dentry *, const struct inode *,
@@ -290,7 +290,7 @@ leave_me:;
static int
-ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
+ncp_lookup_validate(struct dentry *dentry, unsigned int flags)
{
struct ncp_server *server;
struct dentry *parent;
@@ -302,7 +302,7 @@ ncp_lookup_validate(struct dentry *dentry, struct nameidata *nd)
if (dentry == dentry->d_sb->s_root)
return 1;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
parent = dget_parent(dentry);
@@ -836,7 +836,7 @@ out:
return result;
}
-static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct ncp_server *server = NCP_SERVER(dir);
struct inode *inode = NULL;
@@ -980,7 +980,7 @@ out:
}
static int ncp_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
return ncp_create_new(dir, dentry, mode, 0, 0);
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index f430057ff3b3..a6b1c7fb8232 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -46,8 +46,8 @@
static int nfs_opendir(struct inode *, struct file *);
static int nfs_closedir(struct inode *, struct file *);
static int nfs_readdir(struct file *, void *, filldir_t);
-static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+static struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
+static int nfs_create(struct inode *, struct dentry *, umode_t, bool);
static int nfs_mkdir(struct inode *, struct dentry *, umode_t);
static int nfs_rmdir(struct inode *, struct dentry *);
static int nfs_unlink(struct inode *, struct dentry *);
@@ -111,11 +111,13 @@ const struct inode_operations nfs3_dir_inode_operations = {
#ifdef CONFIG_NFS_V4
-static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
-static int nfs_open_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd);
+static int nfs_atomic_open(struct inode *, struct dentry *,
+ struct file *, unsigned, umode_t,
+ int *);
const struct inode_operations nfs4_dir_inode_operations = {
- .create = nfs_open_create,
- .lookup = nfs_atomic_lookup,
+ .create = nfs_create,
+ .lookup = nfs_lookup,
+ .atomic_open = nfs_atomic_open,
.link = nfs_link,
.unlink = nfs_unlink,
.symlink = nfs_symlink,
@@ -1029,27 +1031,14 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
}
/*
- * Return the intent data that applies to this particular path component
- *
- * Note that the current set of intents only apply to the very last
- * component of the path and none of them is set before that last
- * component.
- */
-static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd,
- unsigned int mask)
-{
- return nd->flags & mask;
-}
-
-/*
* Use intent information to check whether or not we're going to do
* an O_EXCL create using this path component.
*/
-static int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
+static int nfs_is_exclusive_create(struct inode *dir, unsigned int flags)
{
if (NFS_PROTO(dir)->version == 2)
return 0;
- return nd && nfs_lookup_check_intent(nd, LOOKUP_EXCL);
+ return flags & LOOKUP_EXCL;
}
/*
@@ -1061,25 +1050,20 @@ static int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
*
*/
static inline
-int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
+int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags)
{
struct nfs_server *server = NFS_SERVER(inode);
if (IS_AUTOMOUNT(inode))
return 0;
- if (nd != NULL) {
- /* VFS wants an on-the-wire revalidation */
- if (nd->flags & LOOKUP_REVAL)
- goto out_force;
- /* This is an open(2) */
- if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
- !(server->flags & NFS_MOUNT_NOCTO) &&
- (S_ISREG(inode->i_mode) ||
- S_ISDIR(inode->i_mode)))
- goto out_force;
- return 0;
- }
- return nfs_revalidate_inode(server, inode);
+ /* VFS wants an on-the-wire revalidation */
+ if (flags & LOOKUP_REVAL)
+ goto out_force;
+ /* This is an open(2) */
+ if ((flags & LOOKUP_OPEN) && !(server->flags & NFS_MOUNT_NOCTO) &&
+ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
+ goto out_force;
+ return 0;
out_force:
return __nfs_revalidate_inode(server, inode);
}
@@ -1093,10 +1077,10 @@ out_force:
*/
static inline
int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
/* Don't revalidate a negative dentry if we're creating a new file */
- if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0)
+ if (flags & LOOKUP_CREATE)
return 0;
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
return 1;
@@ -1114,7 +1098,7 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
* If the parent directory is seen to have changed, we throw out the
* cached dentry and do a new lookup.
*/
-static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *dir;
struct inode *inode;
@@ -1123,7 +1107,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
struct nfs_fattr *fattr = NULL;
int error;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
parent = dget_parent(dentry);
@@ -1132,7 +1116,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
inode = dentry->d_inode;
if (!inode) {
- if (nfs_neg_need_reval(dir, dentry, nd))
+ if (nfs_neg_need_reval(dir, dentry, flags))
goto out_bad;
goto out_valid_noent;
}
@@ -1148,8 +1132,8 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
goto out_set_verifier;
/* Force a full look up iff the parent directory has changed */
- if (!nfs_is_exclusive_create(dir, nd) && nfs_check_verifier(dir, dentry)) {
- if (nfs_lookup_verify_inode(inode, nd))
+ if (!nfs_is_exclusive_create(dir, flags) && nfs_check_verifier(dir, dentry)) {
+ if (nfs_lookup_verify_inode(inode, flags))
goto out_zap_parent;
goto out_valid;
}
@@ -1286,7 +1270,7 @@ const struct dentry_operations nfs_dentry_operations = {
.d_release = nfs_d_release,
};
-static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *res;
struct dentry *parent;
@@ -1307,7 +1291,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
* If we're doing an exclusive create, optimize away the lookup
* but don't hash the dentry.
*/
- if (nfs_is_exclusive_create(dir, nd)) {
+ if (nfs_is_exclusive_create(dir, flags)) {
d_instantiate(dentry, NULL);
res = NULL;
goto out;
@@ -1354,7 +1338,7 @@ out:
}
#ifdef CONFIG_NFS_V4
-static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *);
+static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
@@ -1364,24 +1348,6 @@ const struct dentry_operations nfs4_dentry_operations = {
.d_release = nfs_d_release,
};
-/*
- * Use intent information to determine whether we need to substitute
- * the NFSv4-style stateful OPEN for the LOOKUP call
- */
-static int is_atomic_open(struct nameidata *nd)
-{
- if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0)
- return 0;
- /* NFS does not (yet) have a stateful open for directories */
- if (nd->flags & LOOKUP_DIRECTORY)
- return 0;
- /* Are we trying to write to a read only partition? */
- if (__mnt_is_readonly(nd->path.mnt) &&
- (nd->intent.open.flags & (O_CREAT|O_TRUNC|O_ACCMODE)))
- return 0;
- return 1;
-}
-
static fmode_t flags_to_mode(int flags)
{
fmode_t res = (__force fmode_t)flags & FMODE_EXEC;
@@ -1403,136 +1369,143 @@ static int do_open(struct inode *inode, struct file *filp)
return 0;
}
-static int nfs_intent_set_file(struct nameidata *nd, struct nfs_open_context *ctx)
+static int nfs_finish_open(struct nfs_open_context *ctx,
+ struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ int *opened)
{
- struct file *filp;
- int ret = 0;
+ int err;
+
+ if (ctx->dentry != dentry) {
+ dput(ctx->dentry);
+ ctx->dentry = dget(dentry);
+ }
/* If the open_intent is for execute, we have an extra check to make */
if (ctx->mode & FMODE_EXEC) {
- ret = nfs_may_open(ctx->dentry->d_inode,
- ctx->cred,
- nd->intent.open.flags);
- if (ret < 0)
+ err = nfs_may_open(dentry->d_inode, ctx->cred, open_flags);
+ if (err < 0)
goto out;
}
- filp = lookup_instantiate_filp(nd, ctx->dentry, do_open);
- if (IS_ERR(filp))
- ret = PTR_ERR(filp);
- else
- nfs_file_set_open_context(filp, ctx);
+
+ err = finish_open(file, dentry, do_open, opened);
+ if (err)
+ goto out;
+ nfs_file_set_open_context(file, ctx);
+
out:
put_nfs_open_context(ctx);
- return ret;
+ return err;
}
-static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
+ struct file *file, unsigned open_flags,
+ umode_t mode, int *opened)
{
struct nfs_open_context *ctx;
- struct iattr attr;
- struct dentry *res = NULL;
+ struct dentry *res;
+ struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
- int open_flags;
int err;
- dfprintk(VFS, "NFS: atomic_lookup(%s/%ld), %s\n",
+ /* Expect a negative dentry */
+ BUG_ON(dentry->d_inode);
+
+ dfprintk(VFS, "NFS: atomic_open(%s/%ld), %s\n",
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
- /* Check that we are indeed trying to open this file */
- if (!is_atomic_open(nd))
+ /* NFS only supports OPEN on regular files */
+ if ((open_flags & O_DIRECTORY)) {
+ if (!d_unhashed(dentry)) {
+ /*
+ * Hashed negative dentry with O_DIRECTORY: dentry was
+ * revalidated and is fine, no need to perform lookup
+ * again
+ */
+ return -ENOENT;
+ }
goto no_open;
-
- if (dentry->d_name.len > NFS_SERVER(dir)->namelen) {
- res = ERR_PTR(-ENAMETOOLONG);
- goto out;
- }
-
- /* Let vfs_create() deal with O_EXCL. Instantiate, but don't hash
- * the dentry. */
- if (nd->flags & LOOKUP_EXCL) {
- d_instantiate(dentry, NULL);
- goto out;
}
- open_flags = nd->intent.open.flags;
- attr.ia_valid = ATTR_OPEN;
-
- ctx = create_nfs_open_context(dentry, open_flags);
- res = ERR_CAST(ctx);
- if (IS_ERR(ctx))
- goto out;
+ if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
+ return -ENAMETOOLONG;
- if (nd->flags & LOOKUP_CREATE) {
- attr.ia_mode = nd->intent.open.create_mode;
+ if (open_flags & O_CREAT) {
attr.ia_valid |= ATTR_MODE;
- attr.ia_mode &= ~current_umask();
- } else
- open_flags &= ~(O_EXCL | O_CREAT);
-
+ attr.ia_mode = mode & ~current_umask();
+ }
if (open_flags & O_TRUNC) {
attr.ia_valid |= ATTR_SIZE;
attr.ia_size = 0;
}
- /* Open the file on the server */
+ ctx = create_nfs_open_context(dentry, open_flags);
+ err = PTR_ERR(ctx);
+ if (IS_ERR(ctx))
+ goto out;
+
nfs_block_sillyrename(dentry->d_parent);
inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr);
+ d_drop(dentry);
if (IS_ERR(inode)) {
nfs_unblock_sillyrename(dentry->d_parent);
put_nfs_open_context(ctx);
- switch (PTR_ERR(inode)) {
- /* Make a negative dentry */
- case -ENOENT:
- d_add(dentry, NULL);
- res = NULL;
- goto out;
- /* This turned out not to be a regular file */
- case -EISDIR:
- case -ENOTDIR:
+ err = PTR_ERR(inode);
+ switch (err) {
+ case -ENOENT:
+ d_add(dentry, NULL);
+ break;
+ case -EISDIR:
+ case -ENOTDIR:
+ goto no_open;
+ case -ELOOP:
+ if (!(open_flags & O_NOFOLLOW))
goto no_open;
- case -ELOOP:
- if (!(nd->intent.open.flags & O_NOFOLLOW))
- goto no_open;
+ break;
/* case -EINVAL: */
- default:
- res = ERR_CAST(inode);
- goto out;
+ default:
+ break;
}
+ goto out;
}
res = d_add_unique(dentry, inode);
- nfs_unblock_sillyrename(dentry->d_parent);
- if (res != NULL) {
- dput(ctx->dentry);
- ctx->dentry = dget(res);
+ if (res != NULL)
dentry = res;
- }
- err = nfs_intent_set_file(nd, ctx);
- if (err < 0) {
- if (res != NULL)
- dput(res);
- return ERR_PTR(err);
- }
-out:
+
+ nfs_unblock_sillyrename(dentry->d_parent);
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- return res;
+
+ err = nfs_finish_open(ctx, dentry, file, open_flags, opened);
+
+ dput(res);
+out:
+ return err;
+
no_open:
- return nfs_lookup(dir, dentry, nd);
+ res = nfs_lookup(dir, dentry, 0);
+ err = PTR_ERR(res);
+ if (IS_ERR(res))
+ goto out;
+
+ return finish_no_open(file, res);
}
-static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *parent = NULL;
struct inode *inode;
struct inode *dir;
- int openflags, ret = 0;
+ int ret = 0;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
- inode = dentry->d_inode;
- if (!is_atomic_open(nd) || d_mountpoint(dentry))
+ if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
+ goto no_open;
+ if (d_mountpoint(dentry))
goto no_open;
+ inode = dentry->d_inode;
parent = dget_parent(dentry);
dir = parent->d_inode;
@@ -1540,7 +1513,7 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
* optimize away revalidation of negative dentries.
*/
if (inode == NULL) {
- if (!nfs_neg_need_reval(dir, dentry, nd))
+ if (!nfs_neg_need_reval(dir, dentry, flags))
ret = 1;
goto out;
}
@@ -1548,9 +1521,8 @@ static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
/* NFS only supports OPEN on regular files */
if (!S_ISREG(inode->i_mode))
goto no_open_dput;
- openflags = nd->intent.open.flags;
/* We cannot do exclusive creation on a positive dentry */
- if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
+ if (flags & LOOKUP_EXCL)
goto no_open_dput;
/* Let f_op->open() actually open (and revalidate) the file */
@@ -1563,48 +1535,7 @@ out:
no_open_dput:
dput(parent);
no_open:
- return nfs_lookup_revalidate(dentry, nd);
-}
-
-static int nfs_open_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
-{
- struct nfs_open_context *ctx = NULL;
- struct iattr attr;
- int error;
- int open_flags = O_CREAT|O_EXCL;
-
- dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
- dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
-
- attr.ia_mode = mode;
- attr.ia_valid = ATTR_MODE;
-
- if (nd)
- open_flags = nd->intent.open.flags;
-
- ctx = create_nfs_open_context(dentry, open_flags);
- error = PTR_ERR(ctx);
- if (IS_ERR(ctx))
- goto out_err_drop;
-
- error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, ctx);
- if (error != 0)
- goto out_put_ctx;
- if (nd) {
- error = nfs_intent_set_file(nd, ctx);
- if (error < 0)
- goto out_err;
- } else {
- put_nfs_open_context(ctx);
- }
- return 0;
-out_put_ctx:
- put_nfs_open_context(ctx);
-out_err_drop:
- d_drop(dentry);
-out_err:
- return error;
+ return nfs_lookup_revalidate(dentry, flags);
}
#endif /* CONFIG_NFSV4 */
@@ -1658,11 +1589,11 @@ out_error:
* reply path made it appear to have failed.
*/
static int nfs_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
struct iattr attr;
+ int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT;
int error;
- int open_flags = O_CREAT|O_EXCL;
dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
@@ -1670,10 +1601,7 @@ static int nfs_create(struct inode *dir, struct dentry *dentry,
attr.ia_mode = mode;
attr.ia_valid = ATTR_MODE;
- if (nd)
- open_flags = nd->intent.open.flags;
-
- error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, NULL);
+ error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags);
if (error != 0)
goto out_err;
return 0;
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 9a4cbfc85d81..48253372ab1d 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -484,6 +484,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
if (!nfs_pageio_add_request(&desc, req)) {
+ nfs_list_remove_request(req);
nfs_list_add_request(req, &failed);
spin_lock(cinfo.lock);
dreq->flags = 0;
@@ -494,8 +495,11 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
}
nfs_pageio_complete(&desc);
- while (!list_empty(&failed))
+ while (!list_empty(&failed)) {
+ req = nfs_list_entry(failed.next);
+ nfs_list_remove_request(req);
nfs_unlock_and_release_request(req);
+ }
if (put_dreq(dreq))
nfs_direct_write_complete(dreq, dreq->inode);
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 8abfb19bd3aa..a67990f90bd7 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -62,7 +62,7 @@ static int nfs_superblock_set_dummy_root(struct super_block *sb, struct inode *i
*/
spin_lock(&sb->s_root->d_inode->i_lock);
spin_lock(&sb->s_root->d_lock);
- list_del_init(&sb->s_root->d_alias);
+ hlist_del_init(&sb->s_root->d_alias);
spin_unlock(&sb->s_root->d_lock);
spin_unlock(&sb->s_root->d_inode->i_lock);
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 2292a0fd2bff..3187e24e8f78 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -314,7 +314,7 @@ static void nfs3_free_createdata(struct nfs3_createdata *data)
*/
static int
nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nfs_open_context *ctx)
+ int flags)
{
struct nfs3_createdata *data;
umode_t mode = sattr->ia_mode;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 15fc7e4664ed..c157b2089b47 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2806,37 +2806,22 @@ static int nfs4_proc_readlink(struct inode *inode, struct page *page,
}
/*
- * Got race?
- * We will need to arrange for the VFS layer to provide an atomic open.
- * Until then, this create/open method is prone to inefficiency and race
- * conditions due to the lookup, create, and open VFS calls from sys_open()
- * placed on the wire.
- *
- * Given the above sorry state of affairs, I'm simply sending an OPEN.
- * The file will be opened again in the subsequent VFS open call
- * (nfs4_proc_file_open).
- *
- * The open for read will just hang around to be used by any process that
- * opens the file O_RDONLY. This will all be resolved with the VFS changes.
+ * This is just for mknod. open(O_CREAT) will always do ->open_context().
*/
-
static int
nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nfs_open_context *ctx)
+ int flags)
{
- struct dentry *de = dentry;
+ struct nfs_open_context *ctx;
struct nfs4_state *state;
- struct rpc_cred *cred = NULL;
- fmode_t fmode = 0;
int status = 0;
- if (ctx != NULL) {
- cred = ctx->cred;
- de = ctx->dentry;
- fmode = ctx->mode;
- }
+ ctx = alloc_nfs_open_context(dentry, FMODE_READ);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
sattr->ia_mode &= ~current_umask();
- state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
+ state = nfs4_do_open(dir, dentry, ctx->mode, flags, sattr, ctx->cred, NULL);
d_drop(dentry);
if (IS_ERR(state)) {
status = PTR_ERR(state);
@@ -2844,11 +2829,9 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
}
d_add(dentry, igrab(state->inode));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- if (ctx != NULL)
- ctx->state = state;
- else
- nfs4_close_sync(state, fmode);
+ ctx->state = state;
out:
+ put_nfs_open_context(ctx);
return status;
}
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index b47277baebab..f50d3e8d6f22 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -454,7 +454,10 @@ int objio_read_pagelist(struct nfs_read_data *rdata)
objios->ios->done = _read_done;
dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
rdata->args.offset, rdata->args.count);
- return ore_read(objios->ios);
+ ret = ore_read(objios->ios);
+ if (unlikely(ret))
+ objio_free_result(&objios->oir);
+ return ret;
}
/*
@@ -486,8 +489,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
struct nfs_write_data *wdata = objios->oir.rpcdata;
struct address_space *mapping = wdata->header->inode->i_mapping;
pgoff_t index = offset / PAGE_SIZE;
- struct page *page = find_get_page(mapping, index);
+ struct page *page;
+ loff_t i_size = i_size_read(wdata->header->inode);
+
+ if (offset >= i_size) {
+ *uptodate = true;
+ dprintk("%s: g_zero_page index=0x%lx\n", __func__, index);
+ return ZERO_PAGE(0);
+ }
+ page = find_get_page(mapping, index);
if (!page) {
page = find_or_create_page(mapping, index, GFP_NOFS);
if (unlikely(!page)) {
@@ -507,8 +518,10 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
static void __r4w_put_page(void *priv, struct page *page)
{
- dprintk("%s: index=0x%lx\n", __func__, page->index);
- page_cache_release(page);
+ dprintk("%s: index=0x%lx\n", __func__,
+ (page == ZERO_PAGE(0)) ? -1UL : page->index);
+ if (ZERO_PAGE(0) != page)
+ page_cache_release(page);
return;
}
@@ -539,8 +552,10 @@ int objio_write_pagelist(struct nfs_write_data *wdata, int how)
dprintk("%s: offset=0x%llx length=0x%x\n", __func__,
wdata->args.offset, wdata->args.count);
ret = ore_write(objios->ios);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ objio_free_result(&objios->oir);
return ret;
+ }
if (objios->sync)
_write_done(objios->ios, objios);
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 617c7419a08e..4433806e116f 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -259,7 +259,7 @@ static void nfs_free_createdata(const struct nfs_createdata *data)
static int
nfs_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
- int flags, struct nfs_open_context *ctx)
+ int flags)
{
struct nfs_createdata *data;
struct rpc_message msg = {
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 906f09c7d842..8b2a2977b720 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2419,7 +2419,7 @@ static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
sb_mntdata.mntflags |= MS_SYNCHRONOUS;
/* Get a superblock - note that we may end up sharing one that already exists */
- s = sget(fs_type, compare_super, nfs_set_super, &sb_mntdata);
+ s = sget(fs_type, compare_super, nfs_set_super, flags, &sb_mntdata);
if (IS_ERR(s)) {
mntroot = ERR_CAST(s);
goto out_err_nosb;
@@ -2860,6 +2860,8 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
dfprintk(MOUNT, "--> nfs4_try_mount()\n");
+ mount_info->fill_super = nfs4_fill_super;
+
export_path = data->nfs_server.export_path;
data->nfs_server.export_path = "/";
root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c8bd9c3be7f7..4700a0a929d7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -745,7 +745,7 @@ __be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
- struct dentry *dentry;
+ struct path path;
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
@@ -762,8 +762,9 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
if (err)
goto out;
- dentry = fhp->fh_dentry;
- inode = dentry->d_inode;
+ path.mnt = fhp->fh_export->ex_path.mnt;
+ path.dentry = fhp->fh_dentry;
+ inode = path.dentry->d_inode;
/* Disallow write access to files with the append-only bit set
* or any access when mandatory locking enabled
@@ -792,8 +793,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
else
flags = O_WRONLY|O_LARGEFILE;
}
- *filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
- flags, current_cred());
+ *filp = dentry_open(&path, flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
else {
@@ -1329,7 +1329,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
err = 0;
switch (type) {
case S_IFREG:
- host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+ host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (!host_err)
nfsd_check_ignore_resizing(iap);
break;
@@ -1492,7 +1492,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out;
}
- host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+ host_err = vfs_create(dirp, dchild, iap->ia_mode, true);
if (host_err < 0) {
fh_drop_write(fhp);
goto out_nfserr;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index b72847988b78..1d0c0b84c5a3 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -63,7 +63,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
*/
static struct dentry *
-nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+nilfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
ino_t ino;
@@ -85,7 +85,7 @@ nilfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
* with d_instantiate().
*/
static int nilfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
struct nilfs_transaction_info ti;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 1099a76cee59..d57c42f974ea 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1288,7 +1288,8 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
err = -EBUSY;
goto failed;
}
- s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, sd.bdev);
+ s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
+ sd.bdev);
mutex_unlock(&sd.bdev->bd_fsfreeze_mutex);
if (IS_ERR(s)) {
err = PTR_ERR(s);
@@ -1301,7 +1302,6 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
s_new = true;
/* New superblock instance created */
- s->s_flags = flags;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(sd.bdev));
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 3568c8a8b138..d43803669739 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -61,8 +61,6 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
{
int client_fd;
- struct dentry *dentry;
- struct vfsmount *mnt;
struct file *new_file;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
@@ -81,12 +79,10 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
* we need a new file handle for the userspace program so it can read even if it was
* originally opened O_WRONLY.
*/
- dentry = dget(event->path.dentry);
- mnt = mntget(event->path.mnt);
/* it's possible this event was an overflow event. in that case dentry and mnt
* are NULL; That's fine, just don't call dentry open */
- if (dentry && mnt)
- new_file = dentry_open(dentry, mnt,
+ if (event->path.dentry && event->path.mnt)
+ new_file = dentry_open(&event->path,
group->fanotify_data.f_flags | FMODE_NONOTIFY,
current_cred());
else
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index b39c5c161adb..6baadb5a8430 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -52,6 +52,7 @@ void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
void __fsnotify_update_child_dentry_flags(struct inode *inode)
{
struct dentry *alias;
+ struct hlist_node *p;
int watched;
if (!S_ISDIR(inode->i_mode))
@@ -63,7 +64,7 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
spin_lock(&inode->i_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
+ hlist_for_each_entry(alias, p, &inode->i_dentry, d_alias) {
struct dentry *child;
/* run all of the children of the original inode and fix their
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 358273e59ade..436f36037e09 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -101,7 +101,7 @@
* Locking: Caller must hold i_mutex on the directory.
*/
static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent,
- struct nameidata *nd)
+ unsigned int flags)
{
ntfs_volume *vol = NTFS_SB(dir_ino->i_sb);
struct inode *dent_inode;
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index e5ba34818332..8db4b58b2e4b 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -49,14 +49,13 @@ void ocfs2_dentry_attach_gen(struct dentry *dentry)
}
-static int ocfs2_dentry_revalidate(struct dentry *dentry,
- struct nameidata *nd)
+static int ocfs2_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
int ret = 0; /* if all else fails, just return false */
struct ocfs2_super *osb;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
@@ -170,13 +169,11 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
u64 parent_blkno,
int skip_unhashed)
{
- struct list_head *p;
- struct dentry *dentry = NULL;
+ struct hlist_node *p;
+ struct dentry *dentry;
spin_lock(&inode->i_lock);
- list_for_each(p, &inode->i_dentry) {
- dentry = list_entry(p, struct dentry, d_alias);
-
+ hlist_for_each_entry(dentry, p, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) {
trace_ocfs2_find_local_alias(dentry->d_name.len,
@@ -184,16 +181,13 @@ struct dentry *ocfs2_find_local_alias(struct inode *inode,
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
- break;
+ spin_unlock(&inode->i_lock);
+ return dentry;
}
spin_unlock(&dentry->d_lock);
-
- dentry = NULL;
}
-
spin_unlock(&inode->i_lock);
-
- return dentry;
+ return NULL;
}
DEFINE_SPINLOCK(dentry_attach_lock);
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index e31d6ae013ab..83b6f98e0665 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -526,7 +526,7 @@ bail:
static int dlmfs_create(struct inode *dir,
struct dentry *dentry,
umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
int status = 0;
struct inode *inode;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 81a4cd22f80b..4f7795fb5fc0 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -456,7 +456,7 @@ static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
stats->ls_gets++;
stats->ls_total += ktime_to_ns(kt);
/* overflow */
- if (unlikely(stats->ls_gets) == 0) {
+ if (unlikely(stats->ls_gets == 0)) {
stats->ls_gets++;
stats->ls_total = ktime_to_ns(kt);
}
@@ -3932,6 +3932,8 @@ unqueue:
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
+ unsigned long flags;
+
assert_spin_locked(&lockres->l_lock);
if (lockres->l_flags & OCFS2_LOCK_FREEING) {
@@ -3945,21 +3947,22 @@ static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
- spin_lock(&osb->dc_task_lock);
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
if (list_empty(&lockres->l_blocked_list)) {
list_add_tail(&lockres->l_blocked_list,
&osb->blocked_lock_list);
osb->blocked_lock_count++;
}
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{
unsigned long processed;
+ unsigned long flags;
struct ocfs2_lock_res *lockres;
- spin_lock(&osb->dc_task_lock);
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
osb->dc_work_sequence = osb->dc_wake_sequence;
@@ -3972,38 +3975,40 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
osb->blocked_lock_count--;
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
BUG_ON(!processed);
processed--;
ocfs2_process_blocked_lock(osb, lockres);
- spin_lock(&osb->dc_task_lock);
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
}
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
{
int empty = 0;
+ unsigned long flags;
- spin_lock(&osb->dc_task_lock);
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
if (list_empty(&osb->blocked_lock_list))
empty = 1;
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
return empty;
}
static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
{
int should_wake = 0;
+ unsigned long flags;
- spin_lock(&osb->dc_task_lock);
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
if (osb->dc_work_sequence != osb->dc_wake_sequence)
should_wake = 1;
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
return should_wake;
}
@@ -4033,10 +4038,12 @@ static int ocfs2_downconvert_thread(void *arg)
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
{
- spin_lock(&osb->dc_task_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&osb->dc_task_lock, flags);
/* make sure the voting thread gets a swipe at whatever changes
* the caller may have made to the voting state */
osb->dc_wake_sequence++;
- spin_unlock(&osb->dc_task_lock);
+ spin_unlock_irqrestore(&osb->dc_task_lock, flags);
wake_up(&osb->dc_event);
}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 2f5b92ef0e53..70b5863a2d64 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -923,8 +923,6 @@ out_unlock:
ocfs2_inode_unlock(inode, 0);
out:
- if (ret && ret != -ENXIO)
- ret = -ENXIO;
return ret;
}
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 061591a3ab08..7602783d7f41 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -1950,7 +1950,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
if (ret < 0)
mlog_errno(ret);
- if (file->f_flags & O_SYNC)
+ if (file && (file->f_flags & O_SYNC))
handle->h_sync = 1;
ocfs2_commit_trans(osb, handle);
@@ -2422,8 +2422,10 @@ out_dio:
unaligned_dio = 0;
}
- if (unaligned_dio)
+ if (unaligned_dio) {
+ ocfs2_iocb_clear_unaligned_aio(iocb);
atomic_dec(&OCFS2_I(inode)->ip_unaligned_aio);
+ }
out:
if (rw_level != -1)
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 9f39c640cddf..f1fd0741162b 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -98,7 +98,7 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
static struct dentry *ocfs2_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
int status;
u64 blkno;
@@ -618,7 +618,7 @@ static int ocfs2_mkdir(struct inode *dir,
static int ocfs2_create(struct inode *dir,
struct dentry *dentry,
umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
int ret;
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 92fcd575775a..0a86e302655f 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -399,8 +399,6 @@ int ocfs2_global_read_info(struct super_block *sb, int type)
msecs_to_jiffies(oinfo->dqi_syncms));
out_err:
- if (status)
- mlog_errno(status);
return status;
out_unlock:
ocfs2_unlock_global_qf(oinfo, 0);
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c
index f00576ec320f..fb5b3ff79dc6 100644
--- a/fs/omfs/dir.c
+++ b/fs/omfs/dir.c
@@ -285,13 +285,13 @@ static int omfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
static int omfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
return omfs_add_node(dir, dentry, mode | S_IFREG);
}
static struct dentry *omfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct buffer_head *bh;
struct inode *inode = NULL;
diff --git a/fs/open.c b/fs/open.c
index d6c79a0dffc7..1e914b397e12 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -397,10 +397,10 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
{
struct file *file;
struct inode *inode;
- int error;
+ int error, fput_needed;
error = -EBADF;
- file = fget(fd);
+ file = fget_raw_light(fd, &fput_needed);
if (!file)
goto out;
@@ -414,7 +414,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
if (!error)
set_fs_pwd(current->fs, &file->f_path);
out_putf:
- fput(file);
+ fput_light(file, fput_needed);
out:
return error;
}
@@ -537,25 +537,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
return error;
}
-SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
-{
- struct path path;
- int error;
-
- error = user_path(filename, &path);
- if (error)
- goto out;
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_release;
- error = chown_common(&path, user, group);
- mnt_drop_write(path.mnt);
-out_release:
- path_put(&path);
-out:
- return error;
-}
-
SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
gid_t, group, int, flag)
{
@@ -583,23 +564,15 @@ out:
return error;
}
-SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
+SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
{
- struct path path;
- int error;
+ return sys_fchownat(AT_FDCWD, filename, user, group, 0);
+}
- error = user_lpath(filename, &path);
- if (error)
- goto out;
- error = mnt_want_write(path.mnt);
- if (error)
- goto out_release;
- error = chown_common(&path, user, group);
- mnt_drop_write(path.mnt);
-out_release:
- path_put(&path);
-out:
- return error;
+SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group)
+{
+ return sys_fchownat(AT_FDCWD, filename, user, group,
+ AT_SYMLINK_NOFOLLOW);
}
SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
@@ -667,10 +640,9 @@ int open_check_o_direct(struct file *f)
return 0;
}
-static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
- struct file *f,
- int (*open)(struct inode *, struct file *),
- const struct cred *cred)
+static int do_dentry_open(struct file *f,
+ int (*open)(struct inode *, struct file *),
+ const struct cred *cred)
{
static const struct file_operations empty_fops = {};
struct inode *inode;
@@ -682,9 +654,9 @@ static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
if (unlikely(f->f_flags & O_PATH))
f->f_mode = FMODE_PATH;
- inode = dentry->d_inode;
+ inode = f->f_path.dentry->d_inode;
if (f->f_mode & FMODE_WRITE) {
- error = __get_file_write_access(inode, mnt);
+ error = __get_file_write_access(inode, f->f_path.mnt);
if (error)
goto cleanup_file;
if (!special_file(inode->i_mode))
@@ -692,14 +664,12 @@ static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
}
f->f_mapping = inode->i_mapping;
- f->f_path.dentry = dentry;
- f->f_path.mnt = mnt;
f->f_pos = 0;
file_sb_list_add(f, inode->i_sb);
if (unlikely(f->f_mode & FMODE_PATH)) {
f->f_op = &empty_fops;
- return f;
+ return 0;
}
f->f_op = fops_get(inode->i_fop);
@@ -726,10 +696,11 @@ static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
- return f;
+ return 0;
cleanup_all:
fops_put(f->f_op);
+ file_sb_list_del(f);
if (f->f_mode & FMODE_WRITE) {
put_write_access(inode);
if (!special_file(inode->i_mode)) {
@@ -740,124 +711,62 @@ cleanup_all:
* here, so just reset the state.
*/
file_reset_write(f);
- mnt_drop_write(mnt);
+ mnt_drop_write(f->f_path.mnt);
}
}
- file_sb_list_del(f);
- f->f_path.dentry = NULL;
- f->f_path.mnt = NULL;
cleanup_file:
- dput(dentry);
- mntput(mnt);
- return ERR_PTR(error);
-}
-
-static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
- struct file *f,
- int (*open)(struct inode *, struct file *),
- const struct cred *cred)
-{
- struct file *res = do_dentry_open(dentry, mnt, f, open, cred);
- if (!IS_ERR(res)) {
- int error = open_check_o_direct(f);
- if (error) {
- fput(res);
- res = ERR_PTR(error);
- }
- } else {
- put_filp(f);
- }
- return res;
+ path_put(&f->f_path);
+ f->f_path.mnt = NULL;
+ f->f_path.dentry = NULL;
+ return error;
}
/**
- * lookup_instantiate_filp - instantiates the open intent filp
- * @nd: pointer to nameidata
+ * finish_open - finish opening a file
+ * @od: opaque open data
* @dentry: pointer to dentry
* @open: open callback
*
- * Helper for filesystems that want to use lookup open intents and pass back
- * a fully instantiated struct file to the caller.
- * This function is meant to be called from within a filesystem's
- * lookup method.
- * Beware of calling it for non-regular files! Those ->open methods might block
- * (e.g. in fifo_open), leaving you with parent locked (and in case of fifo,
- * leading to a deadlock, as nobody can open that fifo anymore, because
- * another process to open fifo will block on locked parent when doing lookup).
- * Note that in case of error, nd->intent.open.file is destroyed, but the
- * path information remains valid.
+ * This can be used to finish opening a file passed to i_op->atomic_open().
+ *
* If the open callback is set to NULL, then the standard f_op->open()
* filesystem callback is substituted.
*/
-struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
- int (*open)(struct inode *, struct file *))
+int finish_open(struct file *file, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *),
+ int *opened)
{
- const struct cred *cred = current_cred();
+ int error;
+ BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
- if (IS_ERR(nd->intent.open.file))
- goto out;
- if (IS_ERR(dentry))
- goto out_err;
- nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt),
- nd->intent.open.file,
- open, cred);
-out:
- return nd->intent.open.file;
-out_err:
- release_open_intent(nd);
- nd->intent.open.file = ERR_CAST(dentry);
- goto out;
+ mntget(file->f_path.mnt);
+ file->f_path.dentry = dget(dentry);
+
+ error = do_dentry_open(file, open, current_cred());
+ if (!error)
+ *opened |= FILE_OPENED;
+
+ return error;
}
-EXPORT_SYMBOL_GPL(lookup_instantiate_filp);
+EXPORT_SYMBOL(finish_open);
/**
- * nameidata_to_filp - convert a nameidata to an open filp.
- * @nd: pointer to nameidata
- * @flags: open flags
+ * finish_no_open - finish ->atomic_open() without opening the file
+ *
+ * @od: opaque open data
+ * @dentry: dentry or NULL (as returned from ->lookup())
*
- * Note that this function destroys the original nameidata
+ * This can be used to set the result of a successful lookup in ->atomic_open().
+ * The filesystem's atomic_open() method shall return NULL after calling this.
*/
-struct file *nameidata_to_filp(struct nameidata *nd)
+int finish_no_open(struct file *file, struct dentry *dentry)
{
- const struct cred *cred = current_cred();
- struct file *filp;
-
- /* Pick up the filp from the open intent */
- filp = nd->intent.open.file;
-
- /* Has the filesystem initialised the file for us? */
- if (filp->f_path.dentry != NULL) {
- nd->intent.open.file = NULL;
- } else {
- struct file *res;
-
- path_get(&nd->path);
- res = do_dentry_open(nd->path.dentry, nd->path.mnt,
- filp, NULL, cred);
- if (!IS_ERR(res)) {
- int error;
-
- nd->intent.open.file = NULL;
- BUG_ON(res != filp);
-
- error = open_check_o_direct(filp);
- if (error) {
- fput(filp);
- filp = ERR_PTR(error);
- }
- } else {
- /* Allow nd->intent.open.file to be recycled */
- filp = res;
- }
- }
- return filp;
+ file->f_path.dentry = dentry;
+ return 1;
}
+EXPORT_SYMBOL(finish_no_open);
-/*
- * dentry_open() will have done dput(dentry) and mntput(mnt) if it returns an
- * error.
- */
-struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
+struct file *dentry_open(const struct path *path, int flags,
const struct cred *cred)
{
int error;
@@ -866,18 +775,28 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags,
validate_creds(cred);
/* We must always pass in a valid mount pointer. */
- BUG_ON(!mnt);
+ BUG_ON(!path->mnt);
error = -ENFILE;
f = get_empty_filp();
- if (f == NULL) {
- dput(dentry);
- mntput(mnt);
+ if (f == NULL)
return ERR_PTR(error);
- }
f->f_flags = flags;
- return __dentry_open(dentry, mnt, f, NULL, cred);
+ f->f_path = *path;
+ path_get(&f->f_path);
+ error = do_dentry_open(f, NULL, cred);
+ if (!error) {
+ error = open_check_o_direct(f);
+ if (error) {
+ fput(f);
+ f = ERR_PTR(error);
+ }
+ } else {
+ put_filp(f);
+ f = ERR_PTR(error);
+ }
+ return f;
}
EXPORT_SYMBOL(dentry_open);
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index bc49c975d501..4a3477949bca 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -170,13 +170,13 @@ static const struct file_operations openprom_operations = {
.llseek = generic_file_llseek,
};
-static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *);
+static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, unsigned int);
static const struct inode_operations openprom_inode_operations = {
.lookup = openpromfs_lookup,
};
-static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
struct op_inode_info *ent_oi, *oi = OP_I(dir);
struct device_node *dp, *child;
diff --git a/fs/pnode.c b/fs/pnode.c
index bed378db0758..3e000a51ac0d 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -237,8 +237,9 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
source = get_source(m, prev_dest_mnt, prev_src_mnt, &type);
- if (!(child = copy_tree(source, source->mnt.mnt_root, type))) {
- ret = -ENOMEM;
+ child = copy_tree(source, source->mnt.mnt_root, type);
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
list_splice(tree_list, tmp_list.prev);
goto out;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 437195f204e1..2772208338f8 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1427,16 +1427,19 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
+ struct path path;
int error = -EACCES;
- /* We don't need a base pointer in the /proc filesystem */
- path_put(&nd->path);
-
/* Are we allowed to snoop on the tasks file descriptors? */
if (!proc_fd_access_allowed(inode))
goto out;
- error = PROC_I(inode)->op.proc_get_link(dentry, &nd->path);
+ error = PROC_I(inode)->op.proc_get_link(dentry, &path);
+ if (error)
+ goto out;
+
+ nd_jump_link(nd, &path);
+ return NULL;
out:
return ERR_PTR(error);
}
@@ -1601,13 +1604,13 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
* made this apply to all per process world readable and executable
* directories.
*/
-int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
+int pid_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
struct task_struct *task;
const struct cred *cred;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
@@ -1781,7 +1784,7 @@ static int proc_fd_link(struct dentry *dentry, struct path *path)
return proc_fd_info(dentry->d_inode, path, NULL);
}
-static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int tid_fd_revalidate(struct dentry *dentry, unsigned int flags)
{
struct inode *inode;
struct task_struct *task;
@@ -1789,7 +1792,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
struct files_struct *files;
const struct cred *cred;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
inode = dentry->d_inode;
@@ -1868,7 +1871,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
d_set_d_op(dentry, &tid_fd_dentry_operations);
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (tid_fd_revalidate(dentry, NULL))
+ if (tid_fd_revalidate(dentry, 0))
error = NULL;
out:
@@ -1956,7 +1959,7 @@ out_no_task:
}
static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
return proc_lookupfd_common(dir, dentry, proc_fd_instantiate);
}
@@ -2003,7 +2006,7 @@ static int dname_to_vma_addr(struct dentry *dentry,
return 0;
}
-static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
{
unsigned long vm_start, vm_end;
bool exact_vma_exists = false;
@@ -2013,7 +2016,7 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
struct inode *inode;
int status = 0;
- if (nd && nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
if (!capable(CAP_SYS_ADMIN)) {
@@ -2145,7 +2148,7 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
}
static struct dentry *proc_map_files_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
unsigned long vm_start, vm_end;
struct vm_area_struct *vma;
@@ -2371,7 +2374,7 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
d_set_d_op(dentry, &tid_fd_dentry_operations);
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (tid_fd_revalidate(dentry, NULL))
+ if (tid_fd_revalidate(dentry, 0))
error = NULL;
out:
@@ -2380,7 +2383,7 @@ static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
static struct dentry *proc_lookupfdinfo(struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
return proc_lookupfd_common(dir, dentry, proc_fdinfo_instantiate);
}
@@ -2430,7 +2433,7 @@ static struct dentry *proc_pident_instantiate(struct inode *dir,
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (pid_revalidate(dentry, NULL))
+ if (pid_revalidate(dentry, 0))
error = NULL;
out:
return error;
@@ -2630,7 +2633,7 @@ static const struct file_operations proc_attr_dir_operations = {
};
static struct dentry *proc_attr_dir_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
return proc_pident_lookup(dir, dentry,
attr_dir_stuff, ARRAY_SIZE(attr_dir_stuff));
@@ -3114,7 +3117,8 @@ static const struct file_operations proc_tgid_base_operations = {
.llseek = default_llseek,
};
-static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+{
return proc_pident_lookup(dir, dentry,
tgid_base_stuff, ARRAY_SIZE(tgid_base_stuff));
}
@@ -3237,13 +3241,13 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (pid_revalidate(dentry, NULL))
+ if (pid_revalidate(dentry, 0))
error = NULL;
out:
return error;
}
-struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *result;
struct task_struct *task;
@@ -3470,7 +3474,8 @@ static int proc_tid_base_readdir(struct file * filp,
tid_base_stuff,ARRAY_SIZE(tid_base_stuff));
}
-static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd){
+static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
+{
return proc_pident_lookup(dir, dentry,
tid_base_stuff, ARRAY_SIZE(tid_base_stuff));
}
@@ -3508,13 +3513,13 @@ static struct dentry *proc_task_instantiate(struct inode *dir,
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (pid_revalidate(dentry, NULL))
+ if (pid_revalidate(dentry, 0))
error = NULL;
out:
return error;
}
-static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
+static struct dentry *proc_task_lookup(struct inode *dir, struct dentry * dentry, unsigned int flags)
{
struct dentry *result = ERR_PTR(-ENOENT);
struct task_struct *task;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 2edf34f2eb61..b3647fe6a608 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -446,7 +446,7 @@ out_unlock:
}
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
return proc_lookup_de(PDE(dir), dir, dentry);
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index eca4aca5b6e2..e1167a1c9126 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -106,7 +106,7 @@ void pde_users_dec(struct proc_dir_entry *pde);
extern spinlock_t proc_subdir_lock;
-struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
+struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsigned int);
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
unsigned long task_vsize(struct mm_struct *);
unsigned long task_statm(struct mm_struct *,
@@ -132,7 +132,7 @@ int proc_remount(struct super_block *sb, int *flags, char *data);
* of the /proc/<pid> subdirectories.
*/
int proc_readdir(struct file *, void *, filldir_t);
-struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
+struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
@@ -142,7 +142,7 @@ typedef struct dentry *instantiate_t(struct inode *, struct dentry *,
int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
const char *name, int len,
instantiate_t instantiate, struct task_struct *task, const void *ptr);
-int pid_revalidate(struct dentry *dentry, struct nameidata *nd);
+int pid_revalidate(struct dentry *dentry, unsigned int flags);
struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *task);
extern const struct dentry_operations pid_dentry_operations;
int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 0d9e23a39e49..b178ed733c36 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -56,7 +56,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
d_set_d_op(dentry, &pid_dentry_operations);
d_add(dentry, inode);
/* Close the race of the process dying before we return the dentry */
- if (pid_revalidate(dentry, NULL))
+ if (pid_revalidate(dentry, 0))
error = NULL;
out:
return error;
@@ -140,7 +140,7 @@ const struct file_operations proc_ns_dir_operations = {
};
static struct dentry *proc_ns_dir_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
struct dentry *error;
struct task_struct *task = get_proc_task(dir);
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index 927cbd115e53..df7dd08d4391 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -101,6 +101,11 @@ void proc_device_tree_update_prop(struct proc_dir_entry *pde,
{
struct proc_dir_entry *ent;
+ if (!oldprop) {
+ proc_device_tree_add_prop(pde, newprop);
+ return;
+ }
+
for (ent = pde->subdir; ent != NULL; ent = ent->next)
if (ent->data == oldprop)
break;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 06e1cc17caf6..fe72cd073dea 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -119,7 +119,7 @@ static struct net *get_proc_task_net(struct inode *dir)
}
static struct dentry *proc_tgid_net_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+ struct dentry *dentry, unsigned int flags)
{
struct dentry *de;
struct net *net;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 3476bca8f7af..dfafeb2b05a0 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -433,7 +433,7 @@ static struct ctl_table_header *grab_header(struct inode *inode)
}
static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct ctl_table_header *head = grab_header(dir);
struct ctl_table_header *h = NULL;
@@ -794,9 +794,9 @@ static const struct inode_operations proc_sys_dir_operations = {
.getattr = proc_sys_getattr,
};
-static int proc_sys_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int proc_sys_revalidate(struct dentry *dentry, unsigned int flags)
{
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
return !PROC_I(dentry->d_inode)->sysctl->unregistering;
}
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 7c30fce037c0..9a2d9fd7cadd 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -111,7 +111,7 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
options = data;
}
- sb = sget(fs_type, proc_test_super, proc_set_super, ns);
+ sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
if (IS_ERR(sb))
return ERR_CAST(sb);
@@ -121,7 +121,6 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
}
if (!sb->s_root) {
- sb->s_flags = flags;
err = proc_fill_super(sb);
if (err) {
deactivate_locked_super(sb);
@@ -200,13 +199,12 @@ static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
return 0;
}
-static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags)
{
- if (!proc_lookup(dir, dentry, nd)) {
+ if (!proc_lookup(dir, dentry, flags))
return NULL;
- }
- return proc_pid_lookup(dir, dentry, nd);
+ return proc_pid_lookup(dir, dentry, flags);
}
static int proc_root_readdir(struct file * filp,
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 5e289a7cbad1..5fe34c355e85 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -17,7 +17,7 @@
static unsigned mounts_poll(struct file *file, poll_table *wait)
{
- struct proc_mounts *p = file->private_data;
+ struct proc_mounts *p = proc_mounts(file->private_data);
struct mnt_namespace *ns = p->ns;
unsigned res = POLLIN | POLLRDNORM;
@@ -121,7 +121,7 @@ out:
static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
{
- struct proc_mounts *p = m->private;
+ struct proc_mounts *p = proc_mounts(m);
struct mount *r = real_mount(mnt);
struct super_block *sb = mnt->mnt_sb;
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -268,7 +268,6 @@ static int mounts_open_common(struct inode *inode, struct file *file,
if (ret)
goto err_free;
- p->m.private = p;
p->ns = ns;
p->root = root;
p->m.poll_event = ns->event;
@@ -288,7 +287,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
static int mounts_release(struct inode *inode, struct file *file)
{
- struct proc_mounts *p = file->private_data;
+ struct proc_mounts *p = proc_mounts(file->private_data);
path_put(&p->root);
put_mnt_ns(p->ns);
return seq_release(inode, file);
diff --git a/fs/qnx4/namei.c b/fs/qnx4/namei.c
index a512c0b30e8e..d024505ba007 100644
--- a/fs/qnx4/namei.c
+++ b/fs/qnx4/namei.c
@@ -95,7 +95,7 @@ static struct buffer_head *qnx4_find_entry(int len, struct inode *dir,
return NULL;
}
-struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
int ino;
struct qnx4_inode_entry *de;
diff --git a/fs/qnx4/qnx4.h b/fs/qnx4/qnx4.h
index 244d4620189b..34e2d329c97e 100644
--- a/fs/qnx4/qnx4.h
+++ b/fs/qnx4/qnx4.h
@@ -23,7 +23,7 @@ struct qnx4_inode_info {
};
extern struct inode *qnx4_iget(struct super_block *, unsigned long);
-extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
+extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags);
extern unsigned long qnx4_count_free_blocks(struct super_block *sb);
extern unsigned long qnx4_block_map(struct inode *inode, long iblock);
diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c
index e44012dc5645..2049c814bda4 100644
--- a/fs/qnx6/inode.c
+++ b/fs/qnx6/inode.c
@@ -622,7 +622,6 @@ static struct inode *qnx6_alloc_inode(struct super_block *sb)
static void qnx6_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
kmem_cache_free(qnx6_inode_cachep, QNX6_I(inode));
}
diff --git a/fs/qnx6/namei.c b/fs/qnx6/namei.c
index 8a97289e04ad..0561326a94f5 100644
--- a/fs/qnx6/namei.c
+++ b/fs/qnx6/namei.c
@@ -13,7 +13,7 @@
#include "qnx6.h"
struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
unsigned ino;
struct page *page;
diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
index 6c5e02a0b6a8..b00fcc960d37 100644
--- a/fs/qnx6/qnx6.h
+++ b/fs/qnx6/qnx6.h
@@ -45,7 +45,7 @@ struct qnx6_inode_info {
extern struct inode *qnx6_iget(struct super_block *sb, unsigned ino);
extern struct dentry *qnx6_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd);
+ unsigned int flags);
#ifdef CONFIG_QNX6FS_DEBUG
extern void qnx6_superblock_debug(struct qnx6_super_block *,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 10cbe841cb7e..36a29b753c79 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -78,7 +78,7 @@
#include <linux/quotaops.h>
#include "../internal.h" /* ugh */
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* There are three quota SMP locks. dq_list_lock protects all lists with quotas
@@ -595,12 +595,14 @@ out:
}
EXPORT_SYMBOL(dquot_scan_active);
-int dquot_quota_sync(struct super_block *sb, int type, int wait)
+/* Write all dquot structures to quota files */
+int dquot_writeback_dquots(struct super_block *sb, int type)
{
struct list_head *dirty;
struct dquot *dquot;
struct quota_info *dqopt = sb_dqopt(sb);
int cnt;
+ int err, ret = 0;
mutex_lock(&dqopt->dqonoff_mutex);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -624,7 +626,9 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
atomic_inc(&dquot->dq_count);
spin_unlock(&dq_list_lock);
dqstats_inc(DQST_LOOKUPS);
- sb->dq_op->write_dquot(dquot);
+ err = sb->dq_op->write_dquot(dquot);
+ if (!ret && err)
+ err = ret;
dqput(dquot);
spin_lock(&dq_list_lock);
}
@@ -638,7 +642,21 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
dqstats_inc(DQST_SYNCS);
mutex_unlock(&dqopt->dqonoff_mutex);
- if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
+ return ret;
+}
+EXPORT_SYMBOL(dquot_writeback_dquots);
+
+/* Write all dquot structures to disk and make them visible from userspace */
+int dquot_quota_sync(struct super_block *sb, int type)
+{
+ struct quota_info *dqopt = sb_dqopt(sb);
+ int cnt;
+ int ret;
+
+ ret = dquot_writeback_dquots(sb, type);
+ if (ret)
+ return ret;
+ if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
return 0;
/* This is not very clever (and fast) but currently I don't know about
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 9a391204ca27..6f155788cbc6 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -9,7 +9,7 @@
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/current.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
@@ -47,7 +47,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
static void quota_sync_one(struct super_block *sb, void *arg)
{
if (sb->s_qcop && sb->s_qcop->quota_sync)
- sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
+ sb->s_qcop->quota_sync(sb, *(int *)arg);
}
static int quota_sync_all(int type)
@@ -270,7 +270,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
case Q_SYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
- return sb->s_qcop->quota_sync(sb, type, 1);
+ return sb->s_qcop->quota_sync(sb, type);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index fbb0b478a346..d5378d028589 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -110,6 +110,7 @@ int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
/* prevent the page from being discarded on memory pressure */
SetPageDirty(page);
+ SetPageUptodate(page);
unlock_page(page);
put_page(page);
diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c
index a1fdabe21dec..eab8c09d3801 100644
--- a/fs/ramfs/inode.c
+++ b/fs/ramfs/inode.c
@@ -114,7 +114,7 @@ static int ramfs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode)
return retval;
}
-static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
+static int ramfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
{
return ramfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
diff --git a/fs/read_write.c b/fs/read_write.c
index c20614f86c01..1adfb691e4f1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -55,10 +55,11 @@ static loff_t lseek_execute(struct file *file, struct inode *inode,
* @file: file structure to seek on
* @offset: file offset to seek to
* @origin: type of seek
- * @size: max size of file system
+ * @size: max size of this file in file system
+ * @eof: offset used for SEEK_END position
*
* This is a variant of generic_file_llseek that allows passing in a custom
- * file size.
+ * maximum file size and a custom EOF position, for e.g. hashed directories
*
* Synchronization:
* SEEK_SET and SEEK_END are unsynchronized (but atomic on 64bit platforms)
@@ -67,13 +68,13 @@ static loff_t lseek_execute(struct file *file, struct inode *inode,
*/
loff_t
generic_file_llseek_size(struct file *file, loff_t offset, int origin,
- loff_t maxsize)
+ loff_t maxsize, loff_t eof)
{
struct inode *inode = file->f_mapping->host;
switch (origin) {
case SEEK_END:
- offset += i_size_read(inode);
+ offset += eof;
break;
case SEEK_CUR:
/*
@@ -99,7 +100,7 @@ generic_file_llseek_size(struct file *file, loff_t offset, int origin,
* In the generic case the entire file is data, so as long as
* offset isn't at the end of the file then the offset is data.
*/
- if (offset >= i_size_read(inode))
+ if (offset >= eof)
return -ENXIO;
break;
case SEEK_HOLE:
@@ -107,9 +108,9 @@ generic_file_llseek_size(struct file *file, loff_t offset, int origin,
* There is a virtual hole at the end of the file, so as long as
* offset isn't i_size or larger, return i_size.
*/
- if (offset >= i_size_read(inode))
+ if (offset >= eof)
return -ENXIO;
- offset = i_size_read(inode);
+ offset = eof;
break;
}
@@ -132,7 +133,8 @@ loff_t generic_file_llseek(struct file *file, loff_t offset, int origin)
struct inode *inode = file->f_mapping->host;
return generic_file_llseek_size(file, offset, origin,
- inode->i_sb->s_maxbytes);
+ inode->i_sb->s_maxbytes,
+ i_size_read(inode));
}
EXPORT_SYMBOL(generic_file_llseek);
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 84e8a69cee9d..8567fb847601 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -322,7 +322,7 @@ static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
}
static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
int retval;
int lock_depth;
@@ -573,7 +573,7 @@ static int new_inode_init(struct inode *inode, struct inode *dir, umode_t mode)
}
static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
int retval;
struct inode *inode;
@@ -634,8 +634,8 @@ static int reiserfs_create(struct inode *dir, struct dentry *dentry, umode_t mod
reiserfs_update_inode_transaction(inode);
reiserfs_update_inode_transaction(dir);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
out_failed:
@@ -712,8 +712,8 @@ static int reiserfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode
goto out_failed;
}
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
out_failed:
@@ -800,8 +800,8 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
// the above add_entry did not update dir's stat data
reiserfs_update_sd(&th, dir);
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
out_failed:
reiserfs_write_unlock_once(dir->i_sb, lock_depth);
@@ -1096,8 +1096,8 @@ static int reiserfs_symlink(struct inode *parent_dir,
goto out_failed;
}
- d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ d_instantiate(dentry, inode);
retval = journal_end(&th, parent_dir->i_sb, jbegin_count);
out_failed:
reiserfs_write_unlock(parent_dir->i_sb);
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 2c1ade692cc8..e60e87035bb3 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -403,7 +403,7 @@ static void *r_start(struct seq_file *m, loff_t * pos)
if (l)
return NULL;
- if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, s)))
+ if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, s)))
return NULL;
up_write(&s->s_umount);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 651ce767b55d..7a37dabf5a96 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -68,6 +68,11 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
{
struct reiserfs_transaction_handle th;
+ /*
+ * Writeback quota in non-journalled quota case - journalled quota has
+ * no dirty dquots
+ */
+ dquot_writeback_dquots(s, -1);
reiserfs_write_lock(s);
if (!journal_begin(&th, s, 1))
if (!journal_end_sync(&th, s, 1))
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 46fc1c20a6b1..d319963aeb11 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -62,7 +62,7 @@
static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
{
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- return dir->i_op->create(dir, dentry, mode, NULL);
+ return dir->i_op->create(dir, dentry, mode, true);
}
#endif
@@ -942,7 +942,7 @@ int reiserfs_permission(struct inode *inode, int mask)
return generic_permission(inode, mask);
}
-static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int xattr_hide_revalidate(struct dentry *dentry, unsigned int flags)
{
return -EPERM;
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index e64f6b5f7ae5..77c5f2173983 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -210,7 +210,7 @@ out:
* look up an entry in a directory
*/
static struct dentry *romfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
unsigned long offset, maxoff;
struct inode *inode;
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 0cbd0494b79e..14cf9de1dbe1 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -385,15 +385,12 @@ int seq_escape(struct seq_file *m, const char *s, const char *esc)
}
EXPORT_SYMBOL(seq_escape);
-int seq_printf(struct seq_file *m, const char *f, ...)
+int seq_vprintf(struct seq_file *m, const char *f, va_list args)
{
- va_list args;
int len;
if (m->count < m->size) {
- va_start(args, f);
len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
- va_end(args);
if (m->count + len < m->size) {
m->count += len;
return 0;
@@ -402,6 +399,19 @@ int seq_printf(struct seq_file *m, const char *f, ...)
seq_set_overflow(m);
return -1;
}
+EXPORT_SYMBOL(seq_vprintf);
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+ int ret;
+ va_list args;
+
+ va_start(args, f);
+ ret = seq_vprintf(m, f, args);
+ va_end(args);
+
+ return ret;
+}
EXPORT_SYMBOL(seq_printf);
/**
diff --git a/fs/splice.c b/fs/splice.c
index c9f1318a3b82..7bf08fa22ec9 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -273,13 +273,16 @@ void spd_release_page(struct splice_pipe_desc *spd, unsigned int i)
* Check if we need to grow the arrays holding pages and partial page
* descriptions.
*/
-int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
+int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
{
- if (pipe->buffers <= PIPE_DEF_BUFFERS)
+ unsigned int buffers = ACCESS_ONCE(pipe->buffers);
+
+ spd->nr_pages_max = buffers;
+ if (buffers <= PIPE_DEF_BUFFERS)
return 0;
- spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL);
- spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL);
+ spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL);
+ spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL);
if (spd->pages && spd->partial)
return 0;
@@ -289,10 +292,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd)
return -ENOMEM;
}
-void splice_shrink_spd(struct pipe_inode_info *pipe,
- struct splice_pipe_desc *spd)
+void splice_shrink_spd(struct splice_pipe_desc *spd)
{
- if (pipe->buffers <= PIPE_DEF_BUFFERS)
+ if (spd->nr_pages_max <= PIPE_DEF_BUFFERS)
return;
kfree(spd->pages);
@@ -315,6 +317,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &page_cache_pipe_buf_ops,
.spd_release = spd_release_page,
@@ -326,7 +329,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- nr_pages = min(req_pages, pipe->buffers);
+ nr_pages = min(req_pages, spd.nr_pages_max);
/*
* Lookup the (hopefully) full range of pages we need.
@@ -497,7 +500,7 @@ fill_it:
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
return error;
}
@@ -598,6 +601,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &default_pipe_buf_ops,
.spd_release = spd_release_page,
@@ -608,8 +612,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
res = -ENOMEM;
vec = __vec;
- if (pipe->buffers > PIPE_DEF_BUFFERS) {
- vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL);
+ if (spd.nr_pages_max > PIPE_DEF_BUFFERS) {
+ vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL);
if (!vec)
goto shrink_ret;
}
@@ -617,7 +621,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
offset = *ppos & ~PAGE_CACHE_MASK;
nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) {
+ for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) {
struct page *page;
page = alloc_page(GFP_USER);
@@ -665,7 +669,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
shrink_ret:
if (vec != __vec)
kfree(vec);
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
return res;
err:
@@ -1614,6 +1618,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &user_page_pipe_buf_ops,
.spd_release = spd_release_page,
@@ -1629,13 +1634,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages,
spd.partial, false,
- pipe->buffers);
+ spd.nr_pages_max);
if (spd.nr_pages <= 0)
ret = spd.nr_pages;
else
ret = splice_to_pipe(pipe, &spd);
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
return ret;
}
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c
index abcc58f3c152..7834a517f7f4 100644
--- a/fs/squashfs/namei.c
+++ b/fs/squashfs/namei.c
@@ -134,7 +134,7 @@ out:
static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
const unsigned char *name = dentry->d_name.name;
int len = dentry->d_name.len;
diff --git a/fs/super.c b/fs/super.c
index cf001775617f..c743fb3be4b8 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -105,11 +105,12 @@ static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
/**
* alloc_super - create new superblock
* @type: filesystem type superblock should belong to
+ * @flags: the mount flags
*
* Allocates and initializes a new &struct super_block. alloc_super()
* returns a pointer new superblock or %NULL if allocation had failed.
*/
-static struct super_block *alloc_super(struct file_system_type *type)
+static struct super_block *alloc_super(struct file_system_type *type, int flags)
{
struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
static const struct super_operations default_op;
@@ -136,6 +137,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
#else
INIT_LIST_HEAD(&s->s_files);
#endif
+ s->s_flags = flags;
s->s_bdi = &default_backing_dev_info;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
@@ -415,11 +417,13 @@ EXPORT_SYMBOL(generic_shutdown_super);
* @type: filesystem type superblock should belong to
* @test: comparison callback
* @set: setup callback
+ * @flags: mount flags
* @data: argument to each of them
*/
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
+ int flags,
void *data)
{
struct super_block *s = NULL;
@@ -450,7 +454,7 @@ retry:
}
if (!s) {
spin_unlock(&sb_lock);
- s = alloc_super(type);
+ s = alloc_super(type, flags);
if (!s)
return ERR_PTR(-ENOMEM);
goto retry;
@@ -925,13 +929,12 @@ struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
{
struct super_block *sb;
- sb = sget(fs_type, ns_test_super, ns_set_super, data);
+ sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
if (IS_ERR(sb))
return ERR_CAST(sb);
if (!sb->s_root) {
int err;
- sb->s_flags = flags;
err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (err) {
deactivate_locked_super(sb);
@@ -992,7 +995,8 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
error = -EBUSY;
goto error_bdev;
}
- s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
+ s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
+ bdev);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
if (IS_ERR(s))
goto error_s;
@@ -1017,7 +1021,6 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
} else {
char b[BDEVNAME_SIZE];
- s->s_flags = flags | MS_NOSEC;
s->s_mode = mode;
strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
sb_set_blocksize(s, block_size(bdev));
@@ -1062,13 +1065,11 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
int (*fill_super)(struct super_block *, void *, int))
{
int error;
- struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+ struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
- s->s_flags = flags;
-
error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
@@ -1091,11 +1092,10 @@ struct dentry *mount_single(struct file_system_type *fs_type,
struct super_block *s;
int error;
- s = sget(fs_type, compare_single, set_anon_super, NULL);
+ s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
if (!s->s_root) {
- s->s_flags = flags;
error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(s);
diff --git a/fs/sync.c b/fs/sync.c
index 11e3d1c44901..eb8722dc556f 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -29,16 +29,6 @@
*/
static int __sync_filesystem(struct super_block *sb, int wait)
{
- /*
- * This should be safe, as we require bdi backing to actually
- * write out data in the first place
- */
- if (sb->s_bdi == &noop_backing_dev_info)
- return 0;
-
- if (sb->s_qcop && sb->s_qcop->quota_sync)
- sb->s_qcop->quota_sync(sb, -1, wait);
-
if (wait)
sync_inodes_sb(sb);
else
@@ -77,29 +67,48 @@ int sync_filesystem(struct super_block *sb)
}
EXPORT_SYMBOL_GPL(sync_filesystem);
-static void sync_one_sb(struct super_block *sb, void *arg)
+static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!(sb->s_flags & MS_RDONLY))
- __sync_filesystem(sb, *(int *)arg);
+ sync_inodes_sb(sb);
}
-/*
- * Sync all the data for all the filesystems (called by sys_sync() and
- * emergency sync)
- */
-static void sync_filesystems(int wait)
+
+static void sync_fs_one_sb(struct super_block *sb, void *arg)
{
- iterate_supers(sync_one_sb, &wait);
+ if (!(sb->s_flags & MS_RDONLY) && sb->s_op->sync_fs)
+ sb->s_op->sync_fs(sb, *(int *)arg);
+}
+
+static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
+{
+ filemap_fdatawrite(bdev->bd_inode->i_mapping);
+}
+
+static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
+{
+ filemap_fdatawait(bdev->bd_inode->i_mapping);
}
/*
- * sync everything. Start out by waking pdflush, because that writes back
- * all queues in parallel.
+ * Sync everything. We start by waking flusher threads so that most of
+ * writeback runs on all devices in parallel. Then we sync all inodes reliably
+ * which effectively also waits for all flusher threads to finish doing
+ * writeback. At this point all data is on disk so metadata should be stable
+ * and we tell filesystems to sync their metadata via ->sync_fs() calls.
+ * Finally, we writeout all block devices because some filesystems (e.g. ext2)
+ * just write metadata (such as inodes or bitmaps) to block device page cache
+ * and do not sync it on their own in ->sync_fs().
*/
SYSCALL_DEFINE0(sync)
{
+ int nowait = 0, wait = 1;
+
wakeup_flusher_threads(0, WB_REASON_SYNC);
- sync_filesystems(0);
- sync_filesystems(1);
+ iterate_supers(sync_inodes_one_sb, NULL);
+ iterate_supers(sync_fs_one_sb, &nowait);
+ iterate_supers(sync_fs_one_sb, &wait);
+ iterate_bdevs(fdatawrite_one_bdev, NULL);
+ iterate_bdevs(fdatawait_one_bdev, NULL);
if (unlikely(laptop_mode))
laptop_sync_completion();
return 0;
@@ -107,12 +116,18 @@ SYSCALL_DEFINE0(sync)
static void do_sync_work(struct work_struct *work)
{
+ int nowait = 0;
+
/*
* Sync twice to reduce the possibility we skipped some inodes / pages
* because they were temporarily locked
*/
- sync_filesystems(0);
- sync_filesystems(0);
+ iterate_supers(sync_inodes_one_sb, &nowait);
+ iterate_supers(sync_fs_one_sb, &nowait);
+ iterate_bdevs(fdatawrite_one_bdev, NULL);
+ iterate_supers(sync_inodes_one_sb, &nowait);
+ iterate_supers(sync_fs_one_sb, &nowait);
+ iterate_bdevs(fdatawrite_one_bdev, NULL);
printk("Emergency Sync complete\n");
kfree(work);
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index e6bb9b2a4cbe..a5cf784f9cc2 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -300,15 +300,15 @@ void release_sysfs_dirent(struct sysfs_dirent * sd)
static int sysfs_dentry_delete(const struct dentry *dentry)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
- return !!(sd->s_flags & SYSFS_FLAG_REMOVED);
+ return !(sd && !(sd->s_flags & SYSFS_FLAG_REMOVED));
}
-static int sysfs_dentry_revalidate(struct dentry *dentry, struct nameidata *nd)
+static int sysfs_dentry_revalidate(struct dentry *dentry, unsigned int flags)
{
struct sysfs_dirent *sd;
int is_dir;
- if (nd->flags & LOOKUP_RCU)
+ if (flags & LOOKUP_RCU)
return -ECHILD;
sd = dentry->d_fsdata;
@@ -355,18 +355,15 @@ out_bad:
return 0;
}
-static void sysfs_dentry_iput(struct dentry *dentry, struct inode *inode)
+static void sysfs_dentry_release(struct dentry *dentry)
{
- struct sysfs_dirent * sd = dentry->d_fsdata;
-
- sysfs_put(sd);
- iput(inode);
+ sysfs_put(dentry->d_fsdata);
}
-static const struct dentry_operations sysfs_dentry_ops = {
+const struct dentry_operations sysfs_dentry_ops = {
.d_revalidate = sysfs_dentry_revalidate,
.d_delete = sysfs_dentry_delete,
- .d_iput = sysfs_dentry_iput,
+ .d_release = sysfs_dentry_release,
};
struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
@@ -764,7 +761,7 @@ int sysfs_create_dir(struct kobject * kobj)
}
static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct dentry *ret = NULL;
struct dentry *parent = dentry->d_parent;
@@ -786,6 +783,7 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
ret = ERR_PTR(-ENOENT);
goto out_unlock;
}
+ dentry->d_fsdata = sysfs_get(sd);
/* attach dentry and inode */
inode = sysfs_get_inode(dir->i_sb, sd);
@@ -795,16 +793,7 @@ static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
}
/* instantiate and hash dentry */
- ret = d_find_alias(inode);
- if (!ret) {
- d_set_d_op(dentry, &sysfs_dentry_ops);
- dentry->d_fsdata = sysfs_get(sd);
- d_add(dentry, inode);
- } else {
- d_move(ret, dentry);
- iput(inode);
- }
-
+ ret = d_materialise_unique(dentry, inode);
out_unlock:
mutex_unlock(&sysfs_mutex);
return ret;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 52c3bdb66a84..71eb7e253927 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -68,6 +68,7 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
}
root->d_fsdata = &sysfs_root;
sb->s_root = root;
+ sb->s_d_op = &sysfs_dentry_ops;
return 0;
}
@@ -117,13 +118,12 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
info->ns[type] = kobj_ns_grab_current(type);
- sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
+ sb = sget(fs_type, sysfs_test_super, sysfs_set_super, flags, info);
if (IS_ERR(sb) || sb->s_fs_info != info)
free_sysfs_super_info(info);
if (IS_ERR(sb))
return ERR_CAST(sb);
if (!sb->s_root) {
- sb->s_flags = flags;
error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (error) {
deactivate_locked_super(sb);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 661a9639570b..d73c0932bbd6 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -157,6 +157,7 @@ extern struct kmem_cache *sysfs_dir_cachep;
*/
extern struct mutex sysfs_mutex;
extern spinlock_t sysfs_assoc_lock;
+extern const struct dentry_operations sysfs_dentry_ops;
extern const struct file_operations sysfs_dir_operations;
extern const struct inode_operations sysfs_dir_inode_operations;
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 08d0b2568cd3..80e1e2b18df1 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -43,7 +43,6 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
* then attach current time stamp.
* But if the filesystem was marked clean, keep it clean.
*/
- sb->s_dirt = 0;
old_time = fs32_to_cpu(sbi, *sbi->s_sb_time);
if (sbi->s_type == FSTYPE_SYSV4) {
if (*sbi->s_sb_state == cpu_to_fs32(sbi, 0x7c269d38 - old_time))
@@ -57,23 +56,12 @@ static int sysv_sync_fs(struct super_block *sb, int wait)
return 0;
}
-static void sysv_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- sysv_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
-}
-
static int sysv_remount(struct super_block *sb, int *flags, char *data)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
- lock_super(sb);
+
if (sbi->s_forced_ro)
*flags |= MS_RDONLY;
- if (*flags & MS_RDONLY)
- sysv_write_super(sb);
- unlock_super(sb);
return 0;
}
@@ -81,9 +69,6 @@ static void sysv_put_super(struct super_block *sb)
{
struct sysv_sb_info *sbi = SYSV_SB(sb);
- if (sb->s_dirt)
- sysv_write_super(sb);
-
if (!(sb->s_flags & MS_RDONLY)) {
/* XXX ext2 also updates the state here */
mark_buffer_dirty(sbi->s_bh1);
@@ -357,7 +342,6 @@ const struct super_operations sysv_sops = {
.write_inode = sysv_write_inode,
.evict_inode = sysv_evict_inode,
.put_super = sysv_put_super,
- .write_super = sysv_write_super,
.sync_fs = sysv_sync_fs,
.remount_fs = sysv_remount,
.statfs = sysv_statfs,
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index d7466e293614..1c0d5f264767 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -43,7 +43,7 @@ const struct dentry_operations sysv_dentry_operations = {
.d_hash = sysv_hash,
};
-static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+static struct dentry *sysv_lookup(struct inode * dir, struct dentry * dentry, unsigned int flags)
{
struct inode * inode = NULL;
ino_t ino;
@@ -80,7 +80,7 @@ static int sysv_mknod(struct inode * dir, struct dentry * dentry, umode_t mode,
return err;
}
-static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, struct nameidata *nd)
+static int sysv_create(struct inode * dir, struct dentry * dentry, umode_t mode, bool excl)
{
return sysv_mknod(dir, dentry, mode, 0);
}
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
index 11b07672f6c5..0bc35fdc58e2 100644
--- a/fs/sysv/sysv.h
+++ b/fs/sysv/sysv.h
@@ -117,7 +117,6 @@ static inline void dirty_sb(struct super_block *sb)
mark_buffer_dirty(sbi->s_bh1);
if (sbi->s_bh1 != sbi->s_bh2)
mark_buffer_dirty(sbi->s_bh2);
- sb->s_dirt = 1;
}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 92df3b081539..bb3167257aab 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2802,6 +2802,8 @@ static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
val = d->chk_fs;
else if (dent == d->dfs_tst_rcvry)
val = d->tst_rcvry;
+ else if (dent == d->dfs_ro_error)
+ val = c->ro_error;
else
return -EINVAL;
@@ -2885,6 +2887,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u,
d->chk_fs = val;
else if (dent == d->dfs_tst_rcvry)
d->tst_rcvry = val;
+ else if (dent == d->dfs_ro_error)
+ c->ro_error = !!val;
else
return -EINVAL;
@@ -2996,6 +3000,13 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
goto out_remove;
d->dfs_tst_rcvry = dent;
+ fname = "ro_error";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_ro_error = dent;
+
return 0;
out_remove:
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 486a8e024fb6..8b8cc4e945f4 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -79,6 +79,10 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c,
* @dfs_chk_lprops: debugfs knob to enable UBIFS LEP properties extra checks
* @dfs_chk_fs: debugfs knob to enable UBIFS contents extra checks
* @dfs_tst_rcvry: debugfs knob to enable UBIFS recovery testing
+ * @dfs_ro_error: debugfs knob to switch UBIFS to R/O mode (different to
+ * re-mounting to R/O mode because it does not flush any buffers
+ * and UBIFS just starts returning -EROFS on all write
+ * operations)
*/
struct ubifs_debug_info {
struct ubifs_zbranch old_zroot;
@@ -122,6 +126,7 @@ struct ubifs_debug_info {
struct dentry *dfs_chk_lprops;
struct dentry *dfs_chk_fs;
struct dentry *dfs_tst_rcvry;
+ struct dentry *dfs_ro_error;
};
/**
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index a6d42efc76d2..c95681cf1b71 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -184,7 +184,7 @@ static int dbg_check_name(const struct ubifs_info *c,
}
static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
int err;
union ubifs_key key;
@@ -246,7 +246,7 @@ out:
}
static int ubifs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
struct ubifs_info *c = dir->i_sb->s_fs_info;
@@ -969,7 +969,7 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct ubifs_budget_req ino_req = { .dirtied_ino = 1,
.dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
struct timespec time;
- unsigned int saved_nlink;
+ unsigned int uninitialized_var(saved_nlink);
/*
* Budget request settings: deletion direntry, new direntry, removing
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b02734db187c..cebf17ea0458 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -176,7 +176,7 @@ int ubifs_orphan_start_commit(struct ubifs_info *c)
*last = orphan;
last = &orphan->cnext;
}
- *last = orphan->cnext;
+ *last = NULL;
c->cmt_orphans = c->new_orphans;
c->new_orphans = 0;
dbg_cmt("%d orphans to commit", c->cmt_orphans);
@@ -382,7 +382,7 @@ static int consolidate(struct ubifs_info *c)
last = &orphan->cnext;
cnt += 1;
}
- *last = orphan->cnext;
+ *last = NULL;
ubifs_assert(cnt == c->tot_orphans - c->new_orphans);
c->cmt_orphans = cnt;
c->ohead_lnum = c->orph_first;
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 3a2da7e476e5..eba46d4a7619 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -1007,7 +1007,7 @@ out:
*/
int ubifs_replay_journal(struct ubifs_info *c)
{
- int err, i, lnum, offs, free;
+ int err, lnum, free;
BUILD_BUG_ON(UBIFS_TRUN_KEY > 5);
@@ -1025,25 +1025,17 @@ int ubifs_replay_journal(struct ubifs_info *c)
dbg_mnt("start replaying the journal");
c->replaying = 1;
lnum = c->ltail_lnum = c->lhead_lnum;
- offs = c->lhead_offs;
- for (i = 0; i < c->log_lebs; i++, lnum++) {
- if (lnum >= UBIFS_LOG_LNUM + c->log_lebs) {
- /*
- * The log is logically circular, we reached the last
- * LEB, switch to the first one.
- */
- lnum = UBIFS_LOG_LNUM;
- offs = 0;
- }
- err = replay_log_leb(c, lnum, offs, c->sbuf);
+ lnum = UBIFS_LOG_LNUM;
+ do {
+ err = replay_log_leb(c, lnum, 0, c->sbuf);
if (err == 1)
/* We hit the end of the log */
break;
if (err)
goto out;
- offs = 0;
- }
+ lnum = ubifs_next_log_lnum(c, lnum);
+ } while (lnum != UBIFS_LOG_LNUM);
err = replay_buds(c);
if (err)
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index ef3d1ba6d992..15e2fc5aa60b 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -718,8 +718,12 @@ static int fixup_free_space(struct ubifs_info *c)
lnum = ubifs_next_log_lnum(c, lnum);
}
- /* Fixup the current log head */
- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+ /*
+ * Fixup the log head which contains the only a CS node at the
+ * beginning.
+ */
+ err = fixup_leb(c, c->lhead_lnum,
+ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
if (err)
goto out;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 5862dd9d2784..1c766c39c038 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -2136,7 +2136,7 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- sb = sget(fs_type, sb_test, sb_set, c);
+ sb = sget(fs_type, sb_test, sb_set, flags, c);
if (IS_ERR(sb)) {
err = PTR_ERR(sb);
kfree(c);
@@ -2153,7 +2153,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
goto out_deact;
}
} else {
- sb->s_flags = flags;
err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
if (err)
goto out_deact;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 873e1bab9c4c..fafaad795cd6 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1247,7 +1247,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
{
struct fileEntry *fe;
struct extendedFileEntry *efe;
- int offset;
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
struct udf_inode_info *iinfo = UDF_I(inode);
unsigned int link_count;
@@ -1359,7 +1358,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
- offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
(inode->i_sb->s_blocksize_bits - 9);
@@ -1381,8 +1379,6 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
- offset = sizeof(struct extendedFileEntry) +
- iinfo->i_lenEAttr;
}
switch (fe->icbTag.fileType) {
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 18024178ac4c..95fee278ab9d 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -251,7 +251,7 @@ out_ok:
}
static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct inode *inode = NULL;
struct fileIdentDesc cfi;
@@ -551,7 +551,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
}
static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct udf_fileident_bh fibh;
struct inode *inode;
@@ -1279,6 +1279,7 @@ static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
*lenp = 3;
fid->udf.block = location.logicalBlockNum;
fid->udf.partref = location.partitionReferenceNum;
+ fid->udf.parent_partref = 0;
fid->udf.generation = inode->i_generation;
if (parent) {
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 8d86a8706c0e..dcbf98722afc 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -252,6 +252,63 @@ static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
return 0;
}
+static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
+{
+ int i;
+ int nr_groups = bitmap->s_nr_groups;
+ int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
+ nr_groups);
+
+ for (i = 0; i < nr_groups; i++)
+ if (bitmap->s_block_bitmap[i])
+ brelse(bitmap->s_block_bitmap[i]);
+
+ if (size <= PAGE_SIZE)
+ kfree(bitmap);
+ else
+ vfree(bitmap);
+}
+
+static void udf_free_partition(struct udf_part_map *map)
+{
+ int i;
+ struct udf_meta_data *mdata;
+
+ if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
+ iput(map->s_uspace.s_table);
+ if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
+ iput(map->s_fspace.s_table);
+ if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
+ udf_sb_free_bitmap(map->s_uspace.s_bitmap);
+ if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
+ udf_sb_free_bitmap(map->s_fspace.s_bitmap);
+ if (map->s_partition_type == UDF_SPARABLE_MAP15)
+ for (i = 0; i < 4; i++)
+ brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
+ else if (map->s_partition_type == UDF_METADATA_MAP25) {
+ mdata = &map->s_type_specific.s_metadata;
+ iput(mdata->s_metadata_fe);
+ mdata->s_metadata_fe = NULL;
+
+ iput(mdata->s_mirror_fe);
+ mdata->s_mirror_fe = NULL;
+
+ iput(mdata->s_bitmap_fe);
+ mdata->s_bitmap_fe = NULL;
+ }
+}
+
+static void udf_sb_free_partitions(struct super_block *sb)
+{
+ struct udf_sb_info *sbi = UDF_SB(sb);
+ int i;
+
+ for (i = 0; i < sbi->s_partitions; i++)
+ udf_free_partition(&sbi->s_partmaps[i]);
+ kfree(sbi->s_partmaps);
+ sbi->s_partmaps = NULL;
+}
+
static int udf_show_options(struct seq_file *seq, struct dentry *root)
{
struct super_block *sb = root->d_sb;
@@ -1283,7 +1340,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
table_len = le32_to_cpu(lvd->mapTableLength);
- if (sizeof(*lvd) + table_len > sb->s_blocksize) {
+ if (table_len > sb->s_blocksize - sizeof(*lvd)) {
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));
@@ -1596,7 +1653,11 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
/* responsible for finding the PartitionDesc(s) */
if (!udf_process_sequence(sb, main_s, main_e, fileset))
return 1;
- return !udf_process_sequence(sb, reserve_s, reserve_e, fileset);
+ udf_sb_free_partitions(sb);
+ if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
+ return 1;
+ udf_sb_free_partitions(sb);
+ return 0;
}
/*
@@ -1861,55 +1922,8 @@ u64 lvid_get_unique_id(struct super_block *sb)
return ret;
}
-static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
-{
- int i;
- int nr_groups = bitmap->s_nr_groups;
- int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
- nr_groups);
-
- for (i = 0; i < nr_groups; i++)
- if (bitmap->s_block_bitmap[i])
- brelse(bitmap->s_block_bitmap[i]);
-
- if (size <= PAGE_SIZE)
- kfree(bitmap);
- else
- vfree(bitmap);
-}
-
-static void udf_free_partition(struct udf_part_map *map)
-{
- int i;
- struct udf_meta_data *mdata;
-
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
- iput(map->s_uspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
- iput(map->s_fspace.s_table);
- if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
- udf_sb_free_bitmap(map->s_uspace.s_bitmap);
- if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
- udf_sb_free_bitmap(map->s_fspace.s_bitmap);
- if (map->s_partition_type == UDF_SPARABLE_MAP15)
- for (i = 0; i < 4; i++)
- brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
- else if (map->s_partition_type == UDF_METADATA_MAP25) {
- mdata = &map->s_type_specific.s_metadata;
- iput(mdata->s_metadata_fe);
- mdata->s_metadata_fe = NULL;
-
- iput(mdata->s_mirror_fe);
- mdata->s_mirror_fe = NULL;
-
- iput(mdata->s_bitmap_fe);
- mdata->s_bitmap_fe = NULL;
- }
-}
-
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
- int i;
int ret;
struct inode *inode = NULL;
struct udf_options uopt;
@@ -1974,7 +1988,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_op = &udf_sb_ops;
sb->s_export_op = &udf_export_ops;
- sb->s_dirt = 0;
sb->s_magic = UDF_SUPER_MAGIC;
sb->s_time_gran = 1000;
@@ -2072,9 +2085,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
error_out:
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
- if (sbi->s_partitions)
- for (i = 0; i < sbi->s_partitions; i++)
- udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
@@ -2082,8 +2092,7 @@ error_out:
if (!(sb->s_flags & MS_RDONLY))
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
-
- kfree(sbi->s_partmaps);
+ udf_sb_free_partitions(sb);
kfree(sbi);
sb->s_fs_info = NULL;
@@ -2096,10 +2105,6 @@ void _udf_err(struct super_block *sb, const char *function,
struct va_format vaf;
va_list args;
- /* mark sb error */
- if (!(sb->s_flags & MS_RDONLY))
- sb->s_dirt = 1;
-
va_start(args, fmt);
vaf.fmt = fmt;
@@ -2128,16 +2133,12 @@ void _udf_warn(struct super_block *sb, const char *function,
static void udf_put_super(struct super_block *sb)
{
- int i;
struct udf_sb_info *sbi;
sbi = UDF_SB(sb);
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
- if (sbi->s_partitions)
- for (i = 0; i < sbi->s_partitions; i++)
- udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
@@ -2145,7 +2146,7 @@ static void udf_put_super(struct super_block *sb)
if (!(sb->s_flags & MS_RDONLY))
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
- kfree(sbi->s_partmaps);
+ udf_sb_free_partitions(sb);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
}
@@ -2161,7 +2162,6 @@ static int udf_sync_fs(struct super_block *sb, int wait)
* the buffer for IO
*/
mark_buffer_dirty(sbi->s_lvid_bh);
- sb->s_dirt = 0;
sbi->s_lvid_dirty = 0;
}
mutex_unlock(&sbi->s_alloc_mutex);
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 4b98fee8e161..8a9657d7f7c6 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -248,7 +248,7 @@ void udf_truncate_extents(struct inode *inode)
/* We managed to free all extents in the
* indirect extent - free it too */
BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block,
+ udf_free_blocks(sb, NULL, &epos.block,
0, indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
@@ -275,7 +275,7 @@ void udf_truncate_extents(struct inode *inode)
if (indirect_ext_len) {
BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, &epos.block, 0, indirect_ext_len);
+ udf_free_blocks(sb, NULL, &epos.block, 0, indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
mark_inode_dirty(inode);
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index ebe10314e512..de038da6f6bd 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -129,7 +129,6 @@ static inline void udf_updated_lvid(struct super_block *sb)
WARN_ON_ONCE(((struct logicalVolIntegrityDesc *)
bh->b_data)->integrityType !=
cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN));
- sb->s_dirt = 1;
UDF_SB(sb)->s_lvid_dirty = 1;
}
extern u64 lvid_get_unique_id(struct super_block *sb);
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 42694e11c23d..1b3e410bf334 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -116,7 +116,7 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
unlock_super (sb);
UFSD("EXIT\n");
@@ -214,7 +214,7 @@ do_more:
goto do_more;
}
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
unlock_super (sb);
UFSD("EXIT\n");
return;
@@ -557,7 +557,7 @@ static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
@@ -677,7 +677,7 @@ succed:
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
result += cgno * uspi->s_fpg;
UFSD("EXIT3, result %llu\n", (unsigned long long)result);
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c
index 4ec5c1085a87..e84cbe21b986 100644
--- a/fs/ufs/ialloc.c
+++ b/fs/ufs/ialloc.c
@@ -116,7 +116,7 @@ void ufs_free_inode (struct inode * inode)
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
unlock_super (sb);
UFSD("EXIT\n");
}
@@ -288,7 +288,7 @@ cg_found:
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
if (sb->s_flags & MS_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
inode->i_ino = cg * uspi->s_ipg + bit;
inode_init_owner(inode, dir, mode);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index a2281cadefa1..90d74b8f8eba 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -46,7 +46,7 @@ static inline int ufs_add_nondir(struct dentry *dentry, struct inode *inode)
return err;
}
-static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsigned int flags)
{
struct inode * inode = NULL;
ino_t ino;
@@ -71,7 +71,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
* with d_instantiate().
*/
static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
struct inode *inode;
int err;
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 302f340d0071..444927e5706b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -302,7 +302,7 @@ void ufs_error (struct super_block * sb, const char * function,
if (!(sb->s_flags & MS_RDONLY)) {
usb1->fs_clean = UFS_FSBAD;
ubh_mark_buffer_dirty(USPI_UBH(uspi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
sb->s_flags |= MS_RDONLY;
}
va_start (args, fmt);
@@ -334,7 +334,7 @@ void ufs_panic (struct super_block * sb, const char * function,
if (!(sb->s_flags & MS_RDONLY)) {
usb1->fs_clean = UFS_FSBAD;
ubh_mark_buffer_dirty(USPI_UBH(uspi));
- sb->s_dirt = 1;
+ ufs_mark_sb_dirty(sb);
}
va_start (args, fmt);
vsnprintf (error_buf, sizeof(error_buf), fmt, args);
@@ -691,6 +691,83 @@ static void ufs_put_super_internal(struct super_block *sb)
UFSD("EXIT\n");
}
+static int ufs_sync_fs(struct super_block *sb, int wait)
+{
+ struct ufs_sb_private_info * uspi;
+ struct ufs_super_block_first * usb1;
+ struct ufs_super_block_third * usb3;
+ unsigned flags;
+
+ lock_ufs(sb);
+ lock_super(sb);
+
+ UFSD("ENTER\n");
+
+ flags = UFS_SB(sb)->s_flags;
+ uspi = UFS_SB(sb)->s_uspi;
+ usb1 = ubh_get_usb_first(uspi);
+ usb3 = ubh_get_usb_third(uspi);
+
+ usb1->fs_time = cpu_to_fs32(sb, get_seconds());
+ if ((flags & UFS_ST_MASK) == UFS_ST_SUN ||
+ (flags & UFS_ST_MASK) == UFS_ST_SUNOS ||
+ (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
+ ufs_set_fs_state(sb, usb1, usb3,
+ UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
+ ufs_put_cstotal(sb);
+
+ UFSD("EXIT\n");
+ unlock_super(sb);
+ unlock_ufs(sb);
+
+ return 0;
+}
+
+static void delayed_sync_fs(struct work_struct *work)
+{
+ struct ufs_sb_info *sbi;
+
+ sbi = container_of(work, struct ufs_sb_info, sync_work.work);
+
+ spin_lock(&sbi->work_lock);
+ sbi->work_queued = 0;
+ spin_unlock(&sbi->work_lock);
+
+ ufs_sync_fs(sbi->sb, 1);
+}
+
+void ufs_mark_sb_dirty(struct super_block *sb)
+{
+ struct ufs_sb_info *sbi = UFS_SB(sb);
+ unsigned long delay;
+
+ spin_lock(&sbi->work_lock);
+ if (!sbi->work_queued) {
+ delay = msecs_to_jiffies(dirty_writeback_interval * 10);
+ queue_delayed_work(system_long_wq, &sbi->sync_work, delay);
+ sbi->work_queued = 1;
+ }
+ spin_unlock(&sbi->work_lock);
+}
+
+static void ufs_put_super(struct super_block *sb)
+{
+ struct ufs_sb_info * sbi = UFS_SB(sb);
+
+ UFSD("ENTER\n");
+
+ if (!(sb->s_flags & MS_RDONLY))
+ ufs_put_super_internal(sb);
+ cancel_delayed_work_sync(&sbi->sync_work);
+
+ ubh_brelse_uspi (sbi->s_uspi);
+ kfree (sbi->s_uspi);
+ kfree (sbi);
+ sb->s_fs_info = NULL;
+ UFSD("EXIT\n");
+ return;
+}
+
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ufs_sb_info * sbi;
@@ -716,6 +793,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi)
goto failed_nomem;
sb->s_fs_info = sbi;
+ sbi->sb = sb;
UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY));
@@ -727,6 +805,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
}
#endif
mutex_init(&sbi->mutex);
+ spin_lock_init(&sbi->work_lock);
+ INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs);
/*
* Set default mount options
* Parse mount options
@@ -1191,68 +1271,6 @@ failed_nomem:
return -ENOMEM;
}
-static int ufs_sync_fs(struct super_block *sb, int wait)
-{
- struct ufs_sb_private_info * uspi;
- struct ufs_super_block_first * usb1;
- struct ufs_super_block_third * usb3;
- unsigned flags;
-
- lock_ufs(sb);
- lock_super(sb);
-
- UFSD("ENTER\n");
-
- flags = UFS_SB(sb)->s_flags;
- uspi = UFS_SB(sb)->s_uspi;
- usb1 = ubh_get_usb_first(uspi);
- usb3 = ubh_get_usb_third(uspi);
-
- usb1->fs_time = cpu_to_fs32(sb, get_seconds());
- if ((flags & UFS_ST_MASK) == UFS_ST_SUN ||
- (flags & UFS_ST_MASK) == UFS_ST_SUNOS ||
- (flags & UFS_ST_MASK) == UFS_ST_SUNx86)
- ufs_set_fs_state(sb, usb1, usb3,
- UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
- ufs_put_cstotal(sb);
- sb->s_dirt = 0;
-
- UFSD("EXIT\n");
- unlock_super(sb);
- unlock_ufs(sb);
-
- return 0;
-}
-
-static void ufs_write_super(struct super_block *sb)
-{
- if (!(sb->s_flags & MS_RDONLY))
- ufs_sync_fs(sb, 1);
- else
- sb->s_dirt = 0;
-}
-
-static void ufs_put_super(struct super_block *sb)
-{
- struct ufs_sb_info * sbi = UFS_SB(sb);
-
- UFSD("ENTER\n");
-
- if (sb->s_dirt)
- ufs_write_super(sb);
-
- if (!(sb->s_flags & MS_RDONLY))
- ufs_put_super_internal(sb);
-
- ubh_brelse_uspi (sbi->s_uspi);
- kfree (sbi->s_uspi);
- kfree (sbi);
- sb->s_fs_info = NULL;
- UFSD("EXIT\n");
- return;
-}
-
-
static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
{
struct ufs_sb_private_info * uspi;
@@ -1308,7 +1326,6 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
ufs_set_fs_state(sb, usb1, usb3,
UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
ubh_mark_buffer_dirty (USPI_UBH(uspi));
- sb->s_dirt = 0;
sb->s_flags |= MS_RDONLY;
} else {
/*
@@ -1458,7 +1475,6 @@ static const struct super_operations ufs_super_ops = {
.write_inode = ufs_write_inode,
.evict_inode = ufs_evict_inode,
.put_super = ufs_put_super,
- .write_super = ufs_write_super,
.sync_fs = ufs_sync_fs,
.statfs = ufs_statfs,
.remount_fs = ufs_remount,
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 528750b7e701..343e6fc571e5 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -20,6 +20,10 @@ struct ufs_sb_info {
unsigned s_mount_opt;
struct mutex mutex;
struct task_struct *mutex_owner;
+ struct super_block *sb;
+ int work_queued; /* non-zero if the delayed work is queued */
+ struct delayed_work sync_work; /* FS sync delayed work */
+ spinlock_t work_lock; /* protects sync_work and work_queued */
};
struct ufs_inode_info {
@@ -123,6 +127,7 @@ extern __printf(3, 4)
void ufs_error(struct super_block *, const char *, const char *, ...);
extern __printf(3, 4)
void ufs_panic(struct super_block *, const char *, const char *, ...);
+void ufs_mark_sb_dirty(struct super_block *sb);
/* symlink.c */
extern const struct inode_operations ufs_fast_symlink_inode_operations;
diff --git a/fs/ufs/ufs_fs.h b/fs/ufs/ufs_fs.h
index 8aba544f9fad..0cbd5d340b67 100644
--- a/fs/ufs/ufs_fs.h
+++ b/fs/ufs/ufs_fs.h
@@ -34,6 +34,7 @@
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/fs.h>
+#include <linux/workqueue.h>
#include <asm/div64.h>
typedef __u64 __bitwise __fs64;
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 9d1aeb7e2734..4f33c32affe3 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1074,13 +1074,13 @@ restart:
* If we couldn't get anything, give up.
*/
if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
+ xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
+
if (!forced++) {
trace_xfs_alloc_near_busy(args);
xfs_log_force(args->mp, XFS_LOG_SYNC);
goto restart;
}
-
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
trace_xfs_alloc_size_neither(args);
args->agbno = NULLAGBLOCK;
return 0;
@@ -2434,13 +2434,22 @@ xfs_alloc_vextent_worker(
current_restore_flags_nested(&pflags, PF_FSTRANS);
}
-
-int /* error */
+/*
+ * Data allocation requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. Metadata
+ * requests, OTOH, are generally from low stack usage paths, so avoid the
+ * context switch overhead here.
+ */
+int
xfs_alloc_vextent(
- xfs_alloc_arg_t *args) /* allocation argument structure */
+ struct xfs_alloc_arg *args)
{
DECLARE_COMPLETION_ONSTACK(done);
+ if (!args->userdata)
+ return __xfs_alloc_vextent(args);
+
+
args->done = &done;
INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
queue_work(xfs_alloc_wq, &args->work);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index a4beb421018a..269b35c084da 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -989,27 +989,6 @@ xfs_buf_ioerror_alert(
(__uint64_t)XFS_BUF_ADDR(bp), func, bp->b_error, bp->b_length);
}
-int
-xfs_bwrite(
- struct xfs_buf *bp)
-{
- int error;
-
- ASSERT(xfs_buf_islocked(bp));
-
- bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
-
- xfs_bdstrat_cb(bp);
-
- error = xfs_buf_iowait(bp);
- if (error) {
- xfs_force_shutdown(bp->b_target->bt_mount,
- SHUTDOWN_META_IO_ERROR);
- }
- return error;
-}
-
/*
* Called when we want to stop a buffer from getting written or read.
* We attach the EIO error, muck with its flags, and call xfs_buf_ioend
@@ -1079,14 +1058,7 @@ xfs_bioerror_relse(
return EIO;
}
-
-/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
+STATIC int
xfs_bdstrat_cb(
struct xfs_buf *bp)
{
@@ -1107,6 +1079,27 @@ xfs_bdstrat_cb(
return 0;
}
+int
+xfs_bwrite(
+ struct xfs_buf *bp)
+{
+ int error;
+
+ ASSERT(xfs_buf_islocked(bp));
+
+ bp->b_flags |= XBF_WRITE;
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+
+ xfs_bdstrat_cb(bp);
+
+ error = xfs_buf_iowait(bp);
+ if (error) {
+ xfs_force_shutdown(bp->b_target->bt_mount,
+ SHUTDOWN_META_IO_ERROR);
+ }
+ return error;
+}
+
/*
* Wrapper around bdstrat so that we can stop data from going to disk in case
* we are shutting down the filesystem. Typically user data goes thru this
@@ -1243,7 +1236,7 @@ xfs_buf_iorequest(
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
- _xfs_buf_ioend(bp, 0);
+ _xfs_buf_ioend(bp, 1);
xfs_buf_rele(bp);
}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 7f1d1392ce37..79344c48008e 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -180,7 +180,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
extern int xfs_bwrite(struct xfs_buf *bp);
extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 45df2b857d48..d9e451115f98 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -954,7 +954,7 @@ xfs_buf_iodone_callbacks(
if (!XFS_BUF_ISSTALE(bp)) {
bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
- xfs_bdstrat_cb(bp);
+ xfs_buf_iorequest(bp);
} else {
xfs_buf_relse(bp);
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 3a05a41b5d76..1f1535d25a9b 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -208,6 +208,7 @@ xfs_open_by_handle(
struct inode *inode;
struct dentry *dentry;
fmode_t fmode;
+ struct path path;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
@@ -252,8 +253,10 @@ xfs_open_by_handle(
goto out_dput;
}
- filp = dentry_open(dentry, mntget(parfilp->f_path.mnt),
- hreq->oflags, cred);
+ path.mnt = parfilp->f_path.mnt;
+ path.dentry = dentry;
+ filp = dentry_open(&path, hreq->oflags, cred);
+ dput(dentry);
if (IS_ERR(filp)) {
put_unused_fd(fd);
return PTR_ERR(filp);
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 1a25fd802798..9c4340f5c3e0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -179,7 +179,7 @@ xfs_vn_create(
struct inode *dir,
struct dentry *dentry,
umode_t mode,
- struct nameidata *nd)
+ bool flags)
{
return xfs_vn_mknod(dir, dentry, mode, 0);
}
@@ -197,7 +197,7 @@ STATIC struct dentry *
xfs_vn_lookup(
struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct xfs_inode *cip;
struct xfs_name name;
@@ -222,7 +222,7 @@ STATIC struct dentry *
xfs_vn_ci_lookup(
struct inode *dir,
struct dentry *dentry,
- struct nameidata *nd)
+ unsigned int flags)
{
struct xfs_inode *ip;
struct xfs_name xname;
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 38f508816e4a..b177f97f53b6 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -62,6 +62,7 @@
#define METHOD_NAME__AEI "_AEI"
#define METHOD_NAME__PRW "_PRW"
#define METHOD_NAME__SRS "_SRS"
+#define METHOD_NAME__CBA "_CBA"
/* Method names - these methods must appear at the namespace root */
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 9e6e1c6eb60a..01e2925523ea 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -117,9 +117,6 @@ struct acpi_device;
typedef int (*acpi_op_add) (struct acpi_device * device);
typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
typedef int (*acpi_op_start) (struct acpi_device * device);
-typedef int (*acpi_op_suspend) (struct acpi_device * device,
- pm_message_t state);
-typedef int (*acpi_op_resume) (struct acpi_device * device);
typedef int (*acpi_op_bind) (struct acpi_device * device);
typedef int (*acpi_op_unbind) (struct acpi_device * device);
typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
@@ -133,8 +130,6 @@ struct acpi_device_ops {
acpi_op_add add;
acpi_op_remove remove;
acpi_op_start start;
- acpi_op_suspend suspend;
- acpi_op_resume resume;
acpi_op_bind bind;
acpi_op_unbind unbind;
acpi_op_notify notify;
@@ -401,6 +396,7 @@ struct acpi_pci_root {
u32 osc_support_set; /* _OSC state of support bits */
u32 osc_control_set; /* _OSC state of control bits */
+ phys_addr_t mcfg_addr;
};
/* helper */
@@ -414,13 +410,13 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
#ifdef CONFIG_PM
-int acpi_pm_device_sleep_state(struct device *, int *);
+int acpi_pm_device_sleep_state(struct device *, int *, int);
#else
-static inline int acpi_pm_device_sleep_state(struct device *d, int *p)
+static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
{
if (p)
*p = ACPI_STATE_D0;
- return ACPI_STATE_D3;
+ return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0;
}
#endif
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 9d650476d5dc..64ec644808bc 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -59,10 +59,7 @@ struct acpi_processor_cx {
u8 entry_method;
u8 index;
u32 latency;
- u32 latency_ticks;
u32 power;
- u32 usage;
- u64 time;
u8 bm_sts_skip;
char desc[ACPI_CX_DESC_LEN];
};
@@ -334,8 +331,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device);
-int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
-int acpi_processor_resume(struct acpi_device * device);
+int acpi_processor_suspend(struct device *dev);
+int acpi_processor_resume(struct device *dev);
extern struct cpuidle_driver acpi_idle_driver;
/* in processor_thermal.c */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
index c544356b374b..294b1e755ab2 100644
--- a/include/asm-generic/dma-contiguous.h
+++ b/include/asm-generic/dma-contiguous.h
@@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
{
if (dev)
dev->cma_area = cma;
- if (!dev || !dma_contiguous_default_area)
+ if (!dev && !dma_contiguous_default_area)
dma_contiguous_default_area = cma;
}
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 8760be30b375..cb2a7d1ad47b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -376,6 +376,7 @@ header-y += tty.h
header-y += types.h
header-y += udf_fs_i.h
header-y += udp.h
+header-y += uhid.h
header-y += uinput.h
header-y += uio.h
header-y += ultrasound.h
diff --git a/include/linux/ac97_codec.h b/include/linux/ac97_codec.h
deleted file mode 100644
index 0260c3e79fdd..000000000000
--- a/include/linux/ac97_codec.h
+++ /dev/null
@@ -1,362 +0,0 @@
-#ifndef _AC97_CODEC_H_
-#define _AC97_CODEC_H_
-
-#include <linux/types.h>
-#include <linux/soundcard.h>
-
-/* AC97 1.0 */
-#define AC97_RESET 0x0000 //
-#define AC97_MASTER_VOL_STEREO 0x0002 // Line Out
-#define AC97_HEADPHONE_VOL 0x0004 //
-#define AC97_MASTER_VOL_MONO 0x0006 // TAD Output
-#define AC97_MASTER_TONE 0x0008 //
-#define AC97_PCBEEP_VOL 0x000a // none
-#define AC97_PHONE_VOL 0x000c // TAD Input (mono)
-#define AC97_MIC_VOL 0x000e // MIC Input (mono)
-#define AC97_LINEIN_VOL 0x0010 // Line Input (stereo)
-#define AC97_CD_VOL 0x0012 // CD Input (stereo)
-#define AC97_VIDEO_VOL 0x0014 // none
-#define AC97_AUX_VOL 0x0016 // Aux Input (stereo)
-#define AC97_PCMOUT_VOL 0x0018 // Wave Output (stereo)
-#define AC97_RECORD_SELECT 0x001a //
-#define AC97_RECORD_GAIN 0x001c
-#define AC97_RECORD_GAIN_MIC 0x001e
-#define AC97_GENERAL_PURPOSE 0x0020
-#define AC97_3D_CONTROL 0x0022
-#define AC97_MODEM_RATE 0x0024
-#define AC97_POWER_CONTROL 0x0026
-
-/* AC'97 2.0 */
-#define AC97_EXTENDED_ID 0x0028 /* Extended Audio ID */
-#define AC97_EXTENDED_STATUS 0x002A /* Extended Audio Status */
-#define AC97_PCM_FRONT_DAC_RATE 0x002C /* PCM Front DAC Rate */
-#define AC97_PCM_SURR_DAC_RATE 0x002E /* PCM Surround DAC Rate */
-#define AC97_PCM_LFE_DAC_RATE 0x0030 /* PCM LFE DAC Rate */
-#define AC97_PCM_LR_ADC_RATE 0x0032 /* PCM LR ADC Rate */
-#define AC97_PCM_MIC_ADC_RATE 0x0034 /* PCM MIC ADC Rate */
-#define AC97_CENTER_LFE_MASTER 0x0036 /* Center + LFE Master Volume */
-#define AC97_SURROUND_MASTER 0x0038 /* Surround (Rear) Master Volume */
-#define AC97_RESERVED_3A 0x003A /* Reserved in AC '97 < 2.2 */
-
-/* AC'97 2.2 */
-#define AC97_SPDIF_CONTROL 0x003A /* S/PDIF Control */
-
-/* range 0x3c-0x58 - MODEM */
-#define AC97_EXTENDED_MODEM_ID 0x003C
-#define AC97_EXTEND_MODEM_STAT 0x003E
-#define AC97_LINE1_RATE 0x0040
-#define AC97_LINE2_RATE 0x0042
-#define AC97_HANDSET_RATE 0x0044
-#define AC97_LINE1_LEVEL 0x0046
-#define AC97_LINE2_LEVEL 0x0048
-#define AC97_HANDSET_LEVEL 0x004A
-#define AC97_GPIO_CONFIG 0x004C
-#define AC97_GPIO_POLARITY 0x004E
-#define AC97_GPIO_STICKY 0x0050
-#define AC97_GPIO_WAKE_UP 0x0052
-#define AC97_GPIO_STATUS 0x0054
-#define AC97_MISC_MODEM_STAT 0x0056
-#define AC97_RESERVED_58 0x0058
-
-/* registers 0x005a - 0x007a are vendor reserved */
-
-#define AC97_VENDOR_ID1 0x007c
-#define AC97_VENDOR_ID2 0x007e
-
-/* volume control bit defines */
-#define AC97_MUTE 0x8000
-#define AC97_MICBOOST 0x0040
-#define AC97_LEFTVOL 0x3f00
-#define AC97_RIGHTVOL 0x003f
-
-/* record mux defines */
-#define AC97_RECMUX_MIC 0x0000
-#define AC97_RECMUX_CD 0x0101
-#define AC97_RECMUX_VIDEO 0x0202
-#define AC97_RECMUX_AUX 0x0303
-#define AC97_RECMUX_LINE 0x0404
-#define AC97_RECMUX_STEREO_MIX 0x0505
-#define AC97_RECMUX_MONO_MIX 0x0606
-#define AC97_RECMUX_PHONE 0x0707
-
-/* general purpose register bit defines */
-#define AC97_GP_LPBK 0x0080 /* Loopback mode */
-#define AC97_GP_MS 0x0100 /* Mic Select 0=Mic1, 1=Mic2 */
-#define AC97_GP_MIX 0x0200 /* Mono output select 0=Mix, 1=Mic */
-#define AC97_GP_RLBK 0x0400 /* Remote Loopback - Modem line codec */
-#define AC97_GP_LLBK 0x0800 /* Local Loopback - Modem Line codec */
-#define AC97_GP_LD 0x1000 /* Loudness 1=on */
-#define AC97_GP_3D 0x2000 /* 3D Enhancement 1=on */
-#define AC97_GP_ST 0x4000 /* Stereo Enhancement 1=on */
-#define AC97_GP_POP 0x8000 /* Pcm Out Path, 0=pre 3D, 1=post 3D */
-
-/* extended audio status and control bit defines */
-#define AC97_EA_VRA 0x0001 /* Variable bit rate enable bit */
-#define AC97_EA_DRA 0x0002 /* Double-rate audio enable bit */
-#define AC97_EA_SPDIF 0x0004 /* S/PDIF Enable bit */
-#define AC97_EA_VRM 0x0008 /* Variable bit rate for MIC enable bit */
-#define AC97_EA_CDAC 0x0040 /* PCM Center DAC is ready (Read only) */
-#define AC97_EA_SDAC 0x0040 /* PCM Surround DACs are ready (Read only) */
-#define AC97_EA_LDAC 0x0080 /* PCM LFE DAC is ready (Read only) */
-#define AC97_EA_MDAC 0x0100 /* MIC ADC is ready (Read only) */
-#define AC97_EA_SPCV 0x0400 /* S/PDIF configuration valid (Read only) */
-#define AC97_EA_PRI 0x0800 /* Turns the PCM Center DAC off */
-#define AC97_EA_PRJ 0x1000 /* Turns the PCM Surround DACs off */
-#define AC97_EA_PRK 0x2000 /* Turns the PCM LFE DAC off */
-#define AC97_EA_PRL 0x4000 /* Turns the MIC ADC off */
-#define AC97_EA_SLOT_MASK 0xffcf /* Mask for slot assignment bits */
-#define AC97_EA_SPSA_3_4 0x0000 /* Slot assigned to 3 & 4 */
-#define AC97_EA_SPSA_7_8 0x0010 /* Slot assigned to 7 & 8 */
-#define AC97_EA_SPSA_6_9 0x0020 /* Slot assigned to 6 & 9 */
-#define AC97_EA_SPSA_10_11 0x0030 /* Slot assigned to 10 & 11 */
-
-/* S/PDIF control bit defines */
-#define AC97_SC_PRO 0x0001 /* Professional status */
-#define AC97_SC_NAUDIO 0x0002 /* Non audio stream */
-#define AC97_SC_COPY 0x0004 /* Copyright status */
-#define AC97_SC_PRE 0x0008 /* Preemphasis status */
-#define AC97_SC_CC_MASK 0x07f0 /* Category Code mask */
-#define AC97_SC_L 0x0800 /* Generation Level status */
-#define AC97_SC_SPSR_MASK 0xcfff /* S/PDIF Sample Rate bits */
-#define AC97_SC_SPSR_44K 0x0000 /* Use 44.1kHz Sample rate */
-#define AC97_SC_SPSR_48K 0x2000 /* Use 48kHz Sample rate */
-#define AC97_SC_SPSR_32K 0x3000 /* Use 32kHz Sample rate */
-#define AC97_SC_DRS 0x4000 /* Double Rate S/PDIF */
-#define AC97_SC_V 0x8000 /* Validity status */
-
-/* powerdown control and status bit defines */
-
-/* status */
-#define AC97_PWR_MDM 0x0010 /* Modem section ready */
-#define AC97_PWR_REF 0x0008 /* Vref nominal */
-#define AC97_PWR_ANL 0x0004 /* Analog section ready */
-#define AC97_PWR_DAC 0x0002 /* DAC section ready */
-#define AC97_PWR_ADC 0x0001 /* ADC section ready */
-
-/* control */
-#define AC97_PWR_PR0 0x0100 /* ADC and Mux powerdown */
-#define AC97_PWR_PR1 0x0200 /* DAC powerdown */
-#define AC97_PWR_PR2 0x0400 /* Output mixer powerdown (Vref on) */
-#define AC97_PWR_PR3 0x0800 /* Output mixer powerdown (Vref off) */
-#define AC97_PWR_PR4 0x1000 /* AC-link powerdown */
-#define AC97_PWR_PR5 0x2000 /* Internal Clk disable */
-#define AC97_PWR_PR6 0x4000 /* HP amp powerdown */
-#define AC97_PWR_PR7 0x8000 /* Modem off - if supported */
-
-/* extended audio ID register bit defines */
-#define AC97_EXTID_VRA 0x0001
-#define AC97_EXTID_DRA 0x0002
-#define AC97_EXTID_SPDIF 0x0004
-#define AC97_EXTID_VRM 0x0008
-#define AC97_EXTID_DSA0 0x0010
-#define AC97_EXTID_DSA1 0x0020
-#define AC97_EXTID_CDAC 0x0040
-#define AC97_EXTID_SDAC 0x0080
-#define AC97_EXTID_LDAC 0x0100
-#define AC97_EXTID_AMAP 0x0200
-#define AC97_EXTID_REV0 0x0400
-#define AC97_EXTID_REV1 0x0800
-#define AC97_EXTID_ID0 0x4000
-#define AC97_EXTID_ID1 0x8000
-
-/* extended status register bit defines */
-#define AC97_EXTSTAT_VRA 0x0001
-#define AC97_EXTSTAT_DRA 0x0002
-#define AC97_EXTSTAT_SPDIF 0x0004
-#define AC97_EXTSTAT_VRM 0x0008
-#define AC97_EXTSTAT_SPSA0 0x0010
-#define AC97_EXTSTAT_SPSA1 0x0020
-#define AC97_EXTSTAT_CDAC 0x0040
-#define AC97_EXTSTAT_SDAC 0x0080
-#define AC97_EXTSTAT_LDAC 0x0100
-#define AC97_EXTSTAT_MADC 0x0200
-#define AC97_EXTSTAT_SPCV 0x0400
-#define AC97_EXTSTAT_PRI 0x0800
-#define AC97_EXTSTAT_PRJ 0x1000
-#define AC97_EXTSTAT_PRK 0x2000
-#define AC97_EXTSTAT_PRL 0x4000
-
-/* extended audio ID register bit defines */
-#define AC97_EXTID_VRA 0x0001
-#define AC97_EXTID_DRA 0x0002
-#define AC97_EXTID_SPDIF 0x0004
-#define AC97_EXTID_VRM 0x0008
-#define AC97_EXTID_DSA0 0x0010
-#define AC97_EXTID_DSA1 0x0020
-#define AC97_EXTID_CDAC 0x0040
-#define AC97_EXTID_SDAC 0x0080
-#define AC97_EXTID_LDAC 0x0100
-#define AC97_EXTID_AMAP 0x0200
-#define AC97_EXTID_REV0 0x0400
-#define AC97_EXTID_REV1 0x0800
-#define AC97_EXTID_ID0 0x4000
-#define AC97_EXTID_ID1 0x8000
-
-/* extended status register bit defines */
-#define AC97_EXTSTAT_VRA 0x0001
-#define AC97_EXTSTAT_DRA 0x0002
-#define AC97_EXTSTAT_SPDIF 0x0004
-#define AC97_EXTSTAT_VRM 0x0008
-#define AC97_EXTSTAT_SPSA0 0x0010
-#define AC97_EXTSTAT_SPSA1 0x0020
-#define AC97_EXTSTAT_CDAC 0x0040
-#define AC97_EXTSTAT_SDAC 0x0080
-#define AC97_EXTSTAT_LDAC 0x0100
-#define AC97_EXTSTAT_MADC 0x0200
-#define AC97_EXTSTAT_SPCV 0x0400
-#define AC97_EXTSTAT_PRI 0x0800
-#define AC97_EXTSTAT_PRJ 0x1000
-#define AC97_EXTSTAT_PRK 0x2000
-#define AC97_EXTSTAT_PRL 0x4000
-
-/* useful power states */
-#define AC97_PWR_D0 0x0000 /* everything on */
-#define AC97_PWR_D1 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR4
-#define AC97_PWR_D2 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR2|AC97_PWR_PR3|AC97_PWR_PR4
-#define AC97_PWR_D3 AC97_PWR_PR0|AC97_PWR_PR1|AC97_PWR_PR2|AC97_PWR_PR3|AC97_PWR_PR4
-#define AC97_PWR_ANLOFF AC97_PWR_PR2|AC97_PWR_PR3 /* analog section off */
-
-/* Total number of defined registers. */
-#define AC97_REG_CNT 64
-
-
-/* OSS interface to the ac97s.. */
-#define AC97_STEREO_MASK (SOUND_MASK_VOLUME|SOUND_MASK_PCM|\
- SOUND_MASK_LINE|SOUND_MASK_CD|\
- SOUND_MASK_ALTPCM|SOUND_MASK_IGAIN|\
- SOUND_MASK_LINE1|SOUND_MASK_VIDEO)
-
-#define AC97_SUPPORTED_MASK (AC97_STEREO_MASK | \
- SOUND_MASK_BASS|SOUND_MASK_TREBLE|\
- SOUND_MASK_SPEAKER|SOUND_MASK_MIC|\
- SOUND_MASK_PHONEIN|SOUND_MASK_PHONEOUT)
-
-#define AC97_RECORD_MASK (SOUND_MASK_MIC|\
- SOUND_MASK_CD|SOUND_MASK_IGAIN|SOUND_MASK_VIDEO|\
- SOUND_MASK_LINE1| SOUND_MASK_LINE|\
- SOUND_MASK_PHONEIN)
-
-/* original check is not good enough in case FOO is greater than
- * SOUND_MIXER_NRDEVICES because the supported_mixers has exactly
- * SOUND_MIXER_NRDEVICES elements.
- * before matching the given mixer against the bitmask in supported_mixers we
- * check if mixer number exceeds maximum allowed size which is as mentioned
- * above SOUND_MIXER_NRDEVICES */
-#define supported_mixer(CODEC,FOO) ((FOO >= 0) && \
- (FOO < SOUND_MIXER_NRDEVICES) && \
- (CODEC)->supported_mixers & (1<<FOO) )
-
-struct ac97_codec {
- /* Linked list of codecs */
- struct list_head list;
-
- /* AC97 controller connected with */
- void *private_data;
-
- char *name;
- int id;
- int dev_mixer;
- int type;
- u32 model;
-
- unsigned int modem:1;
-
- struct ac97_ops *codec_ops;
-
- /* controller specific lower leverl ac97 accessing routines.
- must be re-entrant safe */
- u16 (*codec_read) (struct ac97_codec *codec, u8 reg);
- void (*codec_write) (struct ac97_codec *codec, u8 reg, u16 val);
-
- /* Wait for codec-ready. Ok to sleep here. */
- void (*codec_wait) (struct ac97_codec *codec);
-
- /* callback used by helper drivers for interesting ac97 setups */
- void (*codec_unregister) (struct ac97_codec *codec);
-
- struct ac97_driver *driver;
- void *driver_private; /* Private data for the driver */
-
- spinlock_t lock;
-
- /* OSS mixer masks */
- int modcnt;
- int supported_mixers;
- int stereo_mixers;
- int record_sources;
-
- /* Property flags */
- int flags;
-
- int bit_resolution;
-
- /* OSS mixer interface */
- int (*read_mixer) (struct ac97_codec *codec, int oss_channel);
- void (*write_mixer)(struct ac97_codec *codec, int oss_channel,
- unsigned int left, unsigned int right);
- int (*recmask_io) (struct ac97_codec *codec, int rw, int mask);
- int (*mixer_ioctl)(struct ac97_codec *codec, unsigned int cmd, unsigned long arg);
-
- /* saved OSS mixer states */
- unsigned int mixer_state[SOUND_MIXER_NRDEVICES];
-
- /* Software Modem interface */
- int (*modem_ioctl)(struct ac97_codec *codec, unsigned int cmd, unsigned long arg);
-};
-
-/*
- * Operation structures for each known AC97 chip
- */
-
-struct ac97_ops
-{
- /* Initialise */
- int (*init)(struct ac97_codec *c);
- /* Amplifier control */
- int (*amplifier)(struct ac97_codec *codec, int on);
- /* Digital mode control */
- int (*digital)(struct ac97_codec *codec, int slots, int rate, int mode);
-#define AUDIO_DIGITAL 0x8000
-#define AUDIO_PRO 0x4000
-#define AUDIO_DRS 0x2000
-#define AUDIO_CCMASK 0x003F
-
-#define AC97_DELUDED_MODEM 1 /* Audio codec reports its a modem */
-#define AC97_NO_PCM_VOLUME 2 /* Volume control is missing */
-#define AC97_DEFAULT_POWER_OFF 4 /* Needs warm reset to power up */
-};
-
-extern int ac97_probe_codec(struct ac97_codec *);
-
-extern struct ac97_codec *ac97_alloc_codec(void);
-extern void ac97_release_codec(struct ac97_codec *codec);
-
-struct ac97_driver {
- struct list_head list;
- char *name;
- u32 codec_id;
- u32 codec_mask;
- int (*probe) (struct ac97_codec *codec, struct ac97_driver *driver);
- void (*remove) (struct ac97_codec *codec, struct ac97_driver *driver);
-};
-
-/* quirk types */
-enum {
- AC97_TUNE_DEFAULT = -1, /* use default from quirk list (not valid in list) */
- AC97_TUNE_NONE = 0, /* nothing extra to do */
- AC97_TUNE_HP_ONLY, /* headphone (true line-out) control as master only */
- AC97_TUNE_SWAP_HP, /* swap headphone and master controls */
- AC97_TUNE_SWAP_SURROUND, /* swap master and surround controls */
- AC97_TUNE_AD_SHARING, /* for AD1985, turn on OMS bit and use headphone */
- AC97_TUNE_ALC_JACK, /* for Realtek, enable JACK detection */
-};
-
-struct ac97_quirk {
- unsigned short vendor; /* PCI vendor id */
- unsigned short device; /* PCI device id */
- unsigned short mask; /* device id bit mask, 0 = accept all */
- const char *name; /* name shown as info */
- int type; /* quirk type above */
-};
-
-#endif /* _AC97_CODEC_H_ */
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 2314ad8b3c9c..b1a520ec8b59 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -140,6 +140,7 @@ struct kiocb {
(x)->ki_dtor = NULL; \
(x)->ki_obj.tsk = tsk; \
(x)->ki_user_data = 0; \
+ (x)->private = NULL; \
} while (0)
#define AIO_RING_MAGIC 0xa10a10a1
diff --git a/include/linux/async.h b/include/linux/async.h
index 68a9530196f2..7a24fe9b44b4 100644
--- a/include/linux/async.h
+++ b/include/linux/async.h
@@ -9,19 +9,47 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
+#ifndef __ASYNC_H__
+#define __ASYNC_H__
#include <linux/types.h>
#include <linux/list.h>
typedef u64 async_cookie_t;
typedef void (async_func_ptr) (void *data, async_cookie_t cookie);
+struct async_domain {
+ struct list_head node;
+ struct list_head domain;
+ int count;
+ unsigned registered:1;
+};
+
+/*
+ * domain participates in global async_synchronize_full
+ */
+#define ASYNC_DOMAIN(_name) \
+ struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
+ .domain = LIST_HEAD_INIT(_name.domain), \
+ .count = 0, \
+ .registered = 1 }
+
+/*
+ * domain is free to go out of scope as soon as all pending work is
+ * complete, this domain does not participate in async_synchronize_full
+ */
+#define ASYNC_DOMAIN_EXCLUSIVE(_name) \
+ struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \
+ .domain = LIST_HEAD_INIT(_name.domain), \
+ .count = 0, \
+ .registered = 0 }
extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data);
extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
- struct list_head *list);
+ struct async_domain *domain);
+void async_unregister_domain(struct async_domain *domain);
extern void async_synchronize_full(void);
-extern void async_synchronize_full_domain(struct list_head *list);
+extern void async_synchronize_full_domain(struct async_domain *domain);
extern void async_synchronize_cookie(async_cookie_t cookie);
extern void async_synchronize_cookie_domain(async_cookie_t cookie,
- struct list_head *list);
-
+ struct async_domain *domain);
+#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 8deaf6d050c3..1954a4e305a3 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -7,6 +7,7 @@
#include <linux/bcma/bcma_driver_chipcommon.h>
#include <linux/bcma/bcma_driver_pci.h>
#include <linux/bcma/bcma_driver_mips.h>
+#include <linux/bcma/bcma_driver_gmac_cmn.h>
#include <linux/ssb/ssb.h> /* SPROM sharing */
#include "bcma_regs.h"
@@ -70,6 +71,13 @@ struct bcma_host_ops {
/* Core-ID values. */
#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
+#define BCMA_CORE_4706_CHIPCOMMON 0x500
+#define BCMA_CORE_4706_SOC_RAM 0x50E
+#define BCMA_CORE_4706_MAC_GBIT 0x52D
+#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
+#define BCMA_CORE_ALTA 0x534 /* I2S core */
+#define BCMA_CORE_4706_MAC_GBIT_COMMON 0x5DC
+#define BCMA_CORE_DDR23_PHY 0x5DD
#define BCMA_CORE_INVALID 0x700
#define BCMA_CORE_CHIPCOMMON 0x800
#define BCMA_CORE_ILINE20 0x801
@@ -130,6 +138,36 @@ struct bcma_host_ops {
#define BCMA_MAX_NR_CORES 16
+/* Chip IDs of PCIe devices */
+#define BCMA_CHIP_ID_BCM4313 0x4313
+#define BCMA_CHIP_ID_BCM43224 43224
+#define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8
+#define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa
+#define BCMA_CHIP_ID_BCM43225 43225
+#define BCMA_CHIP_ID_BCM43227 43227
+#define BCMA_CHIP_ID_BCM43228 43228
+#define BCMA_CHIP_ID_BCM43421 43421
+#define BCMA_CHIP_ID_BCM43428 43428
+#define BCMA_CHIP_ID_BCM43431 43431
+#define BCMA_CHIP_ID_BCM43460 43460
+#define BCMA_CHIP_ID_BCM4331 0x4331
+#define BCMA_CHIP_ID_BCM6362 0x6362
+#define BCMA_CHIP_ID_BCM4360 0x4360
+#define BCMA_CHIP_ID_BCM4352 0x4352
+
+/* Chip IDs of SoCs */
+#define BCMA_CHIP_ID_BCM4706 0x5300
+#define BCMA_CHIP_ID_BCM4716 0x4716
+#define BCMA_PKG_ID_BCM4716 8
+#define BCMA_PKG_ID_BCM4717 9
+#define BCMA_PKG_ID_BCM4718 10
+#define BCMA_CHIP_ID_BCM47162 47162
+#define BCMA_CHIP_ID_BCM4748 0x4748
+#define BCMA_CHIP_ID_BCM4749 0x4749
+#define BCMA_CHIP_ID_BCM5356 0x5356
+#define BCMA_CHIP_ID_BCM5357 0x5357
+#define BCMA_CHIP_ID_BCM53572 53572
+
struct bcma_device {
struct bcma_bus *bus;
struct bcma_device_id id;
@@ -215,6 +253,7 @@ struct bcma_bus {
struct bcma_drv_cc drv_cc;
struct bcma_drv_pci drv_pci;
struct bcma_drv_mips drv_mips;
+ struct bcma_drv_gmac_cmn drv_gmac_cmn;
/* We decided to share SPROM struct with SSB as long as we do not need
* any hacks for BCMA. This simplifies drivers code. */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 8bbfe31fbac8..3c80885fa829 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -24,7 +24,7 @@
#define BCMA_CC_FLASHT_NONE 0x00000000 /* No flash */
#define BCMA_CC_FLASHT_STSER 0x00000100 /* ST serial flash */
#define BCMA_CC_FLASHT_ATSER 0x00000200 /* Atmel serial flash */
-#define BCMA_CC_FLASHT_NFLASH 0x00000200
+#define BCMA_CC_FLASHT_NFLASH 0x00000200 /* NAND flash */
#define BCMA_CC_FLASHT_PARA 0x00000700 /* Parallel flash */
#define BCMA_CC_CAP_PLLT 0x00038000 /* PLL Type */
#define BCMA_PLLTYPE_NONE 0x00000000
@@ -45,6 +45,7 @@
#define BCMA_CC_CAP_PMU 0x10000000 /* PMU available (rev >= 20) */
#define BCMA_CC_CAP_ECI 0x20000000 /* ECI available (rev >= 20) */
#define BCMA_CC_CAP_SPROM 0x40000000 /* SPROM present */
+#define BCMA_CC_CAP_NFLASH 0x80000000 /* NAND flash present (rev >= 35 or BCM4706?) */
#define BCMA_CC_CORECTL 0x0008
#define BCMA_CC_CORECTL_UARTCLK0 0x00000001 /* Drive UART with internal clock */
#define BCMA_CC_CORECTL_SE 0x00000002 /* sync clk out enable (corerev >= 3) */
@@ -88,6 +89,11 @@
#define BCMA_CC_CHIPST_4313_OTP_PRESENT 2
#define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2
#define BCMA_CC_CHIPST_4331_OTP_PRESENT 4
+#define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */
+#define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */
+#define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */
+#define BCMA_CC_CHIPST_4706_MIPS_BENDIAN BIT(3) /* 0: little, 1: big endian */
+#define BCMA_CC_CHIPST_4706_PCIE1_DISABLE BIT(5) /* PCIE1 enable strap pin */
#define BCMA_CC_JCMD 0x0030 /* Rev >= 10 only */
#define BCMA_CC_JCMD_START 0x80000000
#define BCMA_CC_JCMD_BUSY 0x80000000
@@ -117,10 +123,58 @@
#define BCMA_CC_JCTL_EXT_EN 2 /* Enable external targets */
#define BCMA_CC_JCTL_EN 1 /* Enable Jtag master */
#define BCMA_CC_FLASHCTL 0x0040
+/* Start/busy bit in flashcontrol */
+#define BCMA_CC_FLASHCTL_OPCODE 0x000000ff
+#define BCMA_CC_FLASHCTL_ACTION 0x00000700
+#define BCMA_CC_FLASHCTL_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */
#define BCMA_CC_FLASHCTL_START 0x80000000
#define BCMA_CC_FLASHCTL_BUSY BCMA_CC_FLASHCTL_START
+/* Flashcontrol action + opcodes for ST flashes */
+#define BCMA_CC_FLASHCTL_ST_WREN 0x0006 /* Write Enable */
+#define BCMA_CC_FLASHCTL_ST_WRDIS 0x0004 /* Write Disable */
+#define BCMA_CC_FLASHCTL_ST_RDSR 0x0105 /* Read Status Register */
+#define BCMA_CC_FLASHCTL_ST_WRSR 0x0101 /* Write Status Register */
+#define BCMA_CC_FLASHCTL_ST_READ 0x0303 /* Read Data Bytes */
+#define BCMA_CC_FLASHCTL_ST_PP 0x0302 /* Page Program */
+#define BCMA_CC_FLASHCTL_ST_SE 0x02d8 /* Sector Erase */
+#define BCMA_CC_FLASHCTL_ST_BE 0x00c7 /* Bulk Erase */
+#define BCMA_CC_FLASHCTL_ST_DP 0x00b9 /* Deep Power-down */
+#define BCMA_CC_FLASHCTL_ST_RES 0x03ab /* Read Electronic Signature */
+#define BCMA_CC_FLASHCTL_ST_CSA 0x1000 /* Keep chip select asserted */
+#define BCMA_CC_FLASHCTL_ST_SSE 0x0220 /* Sub-sector Erase */
+/* Flashcontrol action + opcodes for Atmel flashes */
+#define BCMA_CC_FLASHCTL_AT_READ 0x07e8
+#define BCMA_CC_FLASHCTL_AT_PAGE_READ 0x07d2
+#define BCMA_CC_FLASHCTL_AT_STATUS 0x01d7
+#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE 0x0384
+#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE 0x0387
+#define BCMA_CC_FLASHCTL_AT_BUF1_ERASE_PROGRAM 0x0283
+#define BCMA_CC_FLASHCTL_AT_BUF2_ERASE_PROGRAM 0x0286
+#define BCMA_CC_FLASHCTL_AT_BUF1_PROGRAM 0x0288
+#define BCMA_CC_FLASHCTL_AT_BUF2_PROGRAM 0x0289
+#define BCMA_CC_FLASHCTL_AT_PAGE_ERASE 0x0281
+#define BCMA_CC_FLASHCTL_AT_BLOCK_ERASE 0x0250
+#define BCMA_CC_FLASHCTL_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382
+#define BCMA_CC_FLASHCTL_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385
+#define BCMA_CC_FLASHCTL_AT_BUF1_LOAD 0x0253
+#define BCMA_CC_FLASHCTL_AT_BUF2_LOAD 0x0255
+#define BCMA_CC_FLASHCTL_AT_BUF1_COMPARE 0x0260
+#define BCMA_CC_FLASHCTL_AT_BUF2_COMPARE 0x0261
+#define BCMA_CC_FLASHCTL_AT_BUF1_REPROGRAM 0x0258
+#define BCMA_CC_FLASHCTL_AT_BUF2_REPROGRAM 0x0259
#define BCMA_CC_FLASHADDR 0x0044
#define BCMA_CC_FLASHDATA 0x0048
+/* Status register bits for ST flashes */
+#define BCMA_CC_FLASHDATA_ST_WIP 0x01 /* Write In Progress */
+#define BCMA_CC_FLASHDATA_ST_WEL 0x02 /* Write Enable Latch */
+#define BCMA_CC_FLASHDATA_ST_BP_MASK 0x1c /* Block Protect */
+#define BCMA_CC_FLASHDATA_ST_BP_SHIFT 2
+#define BCMA_CC_FLASHDATA_ST_SRWD 0x80 /* Status Register Write Disable */
+/* Status register bits for Atmel flashes */
+#define BCMA_CC_FLASHDATA_AT_READY 0x80
+#define BCMA_CC_FLASHDATA_AT_MISMATCH 0x40
+#define BCMA_CC_FLASHDATA_AT_ID_MASK 0x38
+#define BCMA_CC_FLASHDATA_AT_ID_SHIFT 3
#define BCMA_CC_BCAST_ADDR 0x0050
#define BCMA_CC_BCAST_DATA 0x0054
#define BCMA_CC_GPIOPULLUP 0x0058 /* Rev >= 20 only */
@@ -280,6 +334,15 @@
/* 4706 PMU */
#define BCMA_CC_PMU4706_MAINPLL_PLL0 0
+#define BCMA_CC_PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */
+#define BCMA_CC_PMU6_4706_PROC_P2DIV_MASK 0x000f0000
+#define BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT 16
+#define BCMA_CC_PMU6_4706_PROC_P1DIV_MASK 0x0000f000
+#define BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT 12
+#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8
+#define BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT 3
+#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007
+#define BCMA_CC_PMU6_4706_PROC_NDIV_MODE_SHIFT 0
/* ALP clock on pre-PMU chips */
#define BCMA_CC_PMU_ALP_CLOCK 20000000
@@ -308,6 +371,19 @@
#define BCMA_CC_PPL_PCHI_OFF 5
#define BCMA_CC_PPL_PCHI_MASK 0x0000003f
+#define BCMA_CC_PMU_PLL_CTL0 0
+#define BCMA_CC_PMU_PLL_CTL1 1
+#define BCMA_CC_PMU_PLL_CTL2 2
+#define BCMA_CC_PMU_PLL_CTL3 3
+#define BCMA_CC_PMU_PLL_CTL4 4
+#define BCMA_CC_PMU_PLL_CTL5 5
+
+#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000
+#define BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT 20
+
+#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000
+#define BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT 20
+
/* BCM4331 ChipControl numbers. */
#define BCMA_CHIPCTL_4331_BT_COEXIST BIT(0) /* 0 disable */
#define BCMA_CHIPCTL_4331_SECI BIT(1) /* 0 SECI is disabled (JATG functional) */
@@ -321,9 +397,18 @@
#define BCMA_CHIPCTL_4331_OVR_PIPEAUXPWRDOWN BIT(9) /* override core control on pipe_AuxPowerDown */
#define BCMA_CHIPCTL_4331_PCIE_AUXCLKEN BIT(10) /* pcie_auxclkenable */
#define BCMA_CHIPCTL_4331_PCIE_PIPE_PLLDOWN BIT(11) /* pcie_pipe_pllpowerdown */
+#define BCMA_CHIPCTL_4331_EXTPA_EN2 BIT(12) /* 0 ext pa disable, 1 ext pa enabled */
#define BCMA_CHIPCTL_4331_BT_SHD0_ON_GPIO4 BIT(16) /* enable bt_shd0 at gpio4 */
#define BCMA_CHIPCTL_4331_BT_SHD1_ON_GPIO5 BIT(17) /* enable bt_shd1 at gpio5 */
+/* 43224 chip-specific ChipControl register bits */
+#define BCMA_CCTRL_43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */
+#define BCMA_CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */
+#define BCMA_CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */
+
+/* 4313 Chip specific ChipControl register bits */
+#define BCMA_CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */
+
/* Data for the PMU, if available.
* Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
*/
@@ -411,5 +496,6 @@ extern void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
u32 offset, u32 mask, u32 set);
extern void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc,
u32 offset, u32 mask, u32 set);
+extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_driver_gmac_cmn.h b/include/linux/bcma/bcma_driver_gmac_cmn.h
new file mode 100644
index 000000000000..def894b83b0d
--- /dev/null
+++ b/include/linux/bcma/bcma_driver_gmac_cmn.h
@@ -0,0 +1,100 @@
+#ifndef LINUX_BCMA_DRIVER_GMAC_CMN_H_
+#define LINUX_BCMA_DRIVER_GMAC_CMN_H_
+
+#include <linux/types.h>
+
+#define BCMA_GMAC_CMN_STAG0 0x000
+#define BCMA_GMAC_CMN_STAG1 0x004
+#define BCMA_GMAC_CMN_STAG2 0x008
+#define BCMA_GMAC_CMN_STAG3 0x00C
+#define BCMA_GMAC_CMN_PARSER_CTL 0x020
+#define BCMA_GMAC_CMN_MIB_MAX_LEN 0x024
+#define BCMA_GMAC_CMN_PHY_ACCESS 0x100
+#define BCMA_GMAC_CMN_PA_DATA_MASK 0x0000ffff
+#define BCMA_GMAC_CMN_PA_ADDR_MASK 0x001f0000
+#define BCMA_GMAC_CMN_PA_ADDR_SHIFT 16
+#define BCMA_GMAC_CMN_PA_REG_MASK 0x1f000000
+#define BCMA_GMAC_CMN_PA_REG_SHIFT 24
+#define BCMA_GMAC_CMN_PA_WRITE 0x20000000
+#define BCMA_GMAC_CMN_PA_START 0x40000000
+#define BCMA_GMAC_CMN_PHY_CTL 0x104
+#define BCMA_GMAC_CMN_PC_EPA_MASK 0x0000001f
+#define BCMA_GMAC_CMN_PC_MCT_MASK 0x007f0000
+#define BCMA_GMAC_CMN_PC_MCT_SHIFT 16
+#define BCMA_GMAC_CMN_PC_MTE 0x00800000
+#define BCMA_GMAC_CMN_GMAC0_RGMII_CTL 0x110
+#define BCMA_GMAC_CMN_CFP_ACCESS 0x200
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA0 0x210
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA1 0x214
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA2 0x218
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA3 0x21C
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA4 0x220
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA5 0x224
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA6 0x228
+#define BCMA_GMAC_CMN_CFP_TCAM_DATA7 0x22C
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK0 0x230
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK1 0x234
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK2 0x238
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK3 0x23C
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK4 0x240
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK5 0x244
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK6 0x248
+#define BCMA_GMAC_CMN_CFP_TCAM_MASK7 0x24C
+#define BCMA_GMAC_CMN_CFP_ACTION_DATA 0x250
+#define BCMA_GMAC_CMN_TCAM_BIST_CTL 0x2A0
+#define BCMA_GMAC_CMN_TCAM_BIST_STATUS 0x2A4
+#define BCMA_GMAC_CMN_TCAM_CMP_STATUS 0x2A8
+#define BCMA_GMAC_CMN_TCAM_DISABLE 0x2AC
+#define BCMA_GMAC_CMN_TCAM_TEST_CTL 0x2F0
+#define BCMA_GMAC_CMN_UDF_0_A3_A0 0x300
+#define BCMA_GMAC_CMN_UDF_0_A7_A4 0x304
+#define BCMA_GMAC_CMN_UDF_0_A8 0x308
+#define BCMA_GMAC_CMN_UDF_1_A3_A0 0x310
+#define BCMA_GMAC_CMN_UDF_1_A7_A4 0x314
+#define BCMA_GMAC_CMN_UDF_1_A8 0x318
+#define BCMA_GMAC_CMN_UDF_2_A3_A0 0x320
+#define BCMA_GMAC_CMN_UDF_2_A7_A4 0x324
+#define BCMA_GMAC_CMN_UDF_2_A8 0x328
+#define BCMA_GMAC_CMN_UDF_0_B3_B0 0x330
+#define BCMA_GMAC_CMN_UDF_0_B7_B4 0x334
+#define BCMA_GMAC_CMN_UDF_0_B8 0x338
+#define BCMA_GMAC_CMN_UDF_1_B3_B0 0x340
+#define BCMA_GMAC_CMN_UDF_1_B7_B4 0x344
+#define BCMA_GMAC_CMN_UDF_1_B8 0x348
+#define BCMA_GMAC_CMN_UDF_2_B3_B0 0x350
+#define BCMA_GMAC_CMN_UDF_2_B7_B4 0x354
+#define BCMA_GMAC_CMN_UDF_2_B8 0x358
+#define BCMA_GMAC_CMN_UDF_0_C3_C0 0x360
+#define BCMA_GMAC_CMN_UDF_0_C7_C4 0x364
+#define BCMA_GMAC_CMN_UDF_0_C8 0x368
+#define BCMA_GMAC_CMN_UDF_1_C3_C0 0x370
+#define BCMA_GMAC_CMN_UDF_1_C7_C4 0x374
+#define BCMA_GMAC_CMN_UDF_1_C8 0x378
+#define BCMA_GMAC_CMN_UDF_2_C3_C0 0x380
+#define BCMA_GMAC_CMN_UDF_2_C7_C4 0x384
+#define BCMA_GMAC_CMN_UDF_2_C8 0x388
+#define BCMA_GMAC_CMN_UDF_0_D3_D0 0x390
+#define BCMA_GMAC_CMN_UDF_0_D7_D4 0x394
+#define BCMA_GMAC_CMN_UDF_0_D11_D8 0x394
+
+struct bcma_drv_gmac_cmn {
+ struct bcma_device *core;
+
+ /* Drivers accessing BCMA_GMAC_CMN_PHY_ACCESS and
+ * BCMA_GMAC_CMN_PHY_CTL need to take that mutex first. */
+ struct mutex phy_mutex;
+};
+
+/* Register access */
+#define gmac_cmn_read16(gc, offset) bcma_read16((gc)->core, offset)
+#define gmac_cmn_read32(gc, offset) bcma_read32((gc)->core, offset)
+#define gmac_cmn_write16(gc, offset, val) bcma_write16((gc)->core, offset, val)
+#define gmac_cmn_write32(gc, offset, val) bcma_write32((gc)->core, offset, val)
+
+#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
+extern void __devinit bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
+#else
+static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc) { }
+#endif
+
+#endif /* LINUX_BCMA_DRIVER_GMAC_CMN_H_ */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ba43f408baa3..07954b05b86c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
-extern void blk_abort_queue(struct request_queue *);
extern void blk_unprep_request(struct request *);
/*
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 324fe08ea3b1..6d6795d46a75 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -91,6 +91,11 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal);
+void *___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+ unsigned long size,
+ unsigned long align,
+ unsigned long goal,
+ unsigned long limit);
extern void *__alloc_bootmem_low(unsigned long size,
unsigned long align,
unsigned long goal);
diff --git a/include/linux/can.h b/include/linux/can.h
index 9a19bcb3eeaf..018055efc034 100644
--- a/include/linux/can.h
+++ b/include/linux/can.h
@@ -21,7 +21,7 @@
/* special address description flags for the CAN_ID */
#define CAN_EFF_FLAG 0x80000000U /* EFF/SFF is set in the MSB */
#define CAN_RTR_FLAG 0x40000000U /* remote transmission request */
-#define CAN_ERR_FLAG 0x20000000U /* error frame */
+#define CAN_ERR_FLAG 0x20000000U /* error message frame */
/* valid bits in CAN ID for frame formats */
#define CAN_SFF_MASK 0x000007FFU /* standard frame format (SFF) */
@@ -32,32 +32,84 @@
* Controller Area Network Identifier structure
*
* bit 0-28 : CAN identifier (11/29 bit)
- * bit 29 : error frame flag (0 = data frame, 1 = error frame)
+ * bit 29 : error message frame flag (0 = data frame, 1 = error message)
* bit 30 : remote transmission request flag (1 = rtr frame)
* bit 31 : frame format flag (0 = standard 11 bit, 1 = extended 29 bit)
*/
typedef __u32 canid_t;
+#define CAN_SFF_ID_BITS 11
+#define CAN_EFF_ID_BITS 29
+
/*
- * Controller Area Network Error Frame Mask structure
+ * Controller Area Network Error Message Frame Mask structure
*
* bit 0-28 : error class mask (see include/linux/can/error.h)
* bit 29-31 : set to zero
*/
typedef __u32 can_err_mask_t;
+/* CAN payload length and DLC definitions according to ISO 11898-1 */
+#define CAN_MAX_DLC 8
+#define CAN_MAX_DLEN 8
+
+/* CAN FD payload length and DLC definitions according to ISO 11898-7 */
+#define CANFD_MAX_DLC 15
+#define CANFD_MAX_DLEN 64
+
/**
* struct can_frame - basic CAN frame structure
- * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above.
- * @can_dlc: the data length field of the CAN frame
- * @data: the CAN frame payload.
+ * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
+ * @can_dlc: frame payload length in byte (0 .. 8) aka data length code
+ * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1
+ * mapping of the 'data length code' to the real payload length
+ * @data: CAN frame payload (up to 8 byte)
*/
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
- __u8 can_dlc; /* data length code: 0 .. 8 */
- __u8 data[8] __attribute__((aligned(8)));
+ __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */
+ __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8)));
+};
+
+/*
+ * defined bits for canfd_frame.flags
+ *
+ * As the default for CAN FD should be to support the high data rate in the
+ * payload section of the frame (HDR) and to support up to 64 byte in the
+ * data section (EDL) the bits are only set in the non-default case.
+ * Btw. as long as there's no real implementation for CAN FD network driver
+ * these bits are only preliminary.
+ *
+ * RX: NOHDR/NOEDL - info about received CAN FD frame
+ * ESI - bit from originating CAN controller
+ * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller
+ * ESI - bit is set by local CAN controller
+ */
+#define CANFD_NOHDR 0x01 /* frame without high data rate */
+#define CANFD_NOEDL 0x02 /* frame without extended data length */
+#define CANFD_ESI 0x04 /* error state indicator */
+
+/**
+ * struct canfd_frame - CAN flexible data rate frame structure
+ * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition
+ * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN)
+ * @flags: additional flags for CAN FD
+ * @__res0: reserved / padding
+ * @__res1: reserved / padding
+ * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte)
+ */
+struct canfd_frame {
+ canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
+ __u8 len; /* frame payload length in byte */
+ __u8 flags; /* additional flags for CAN FD */
+ __u8 __res0; /* reserved / padding */
+ __u8 __res1; /* reserved / padding */
+ __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
};
+#define CAN_MTU (sizeof(struct can_frame))
+#define CANFD_MTU (sizeof(struct canfd_frame))
+
/* particular protocols of the protocol family PF_CAN */
#define CAN_RAW 1 /* RAW sockets */
#define CAN_BCM 2 /* Broadcast Manager */
@@ -97,7 +149,7 @@ struct sockaddr_can {
* <received_can_id> & mask == can_id & mask
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
- * filter for error frames (CAN_ERR_FLAG bit set in mask).
+ * filter for error message frames (CAN_ERR_FLAG bit set in mask).
*/
struct can_filter {
canid_t can_id;
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index 0ccc1cd28b95..78c6c52073ad 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -17,10 +17,10 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20090105"
+#define CAN_VERSION "20120528"
/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "8"
+#define CAN_ABI_VERSION "9"
#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5d2efe7e3f1b..2b2fc345afca 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -33,7 +33,7 @@ struct can_priv {
struct can_device_stats can_stats;
struct can_bittiming bittiming;
- struct can_bittiming_const *bittiming_const;
+ const struct can_bittiming_const *bittiming_const;
struct can_clock clock;
enum can_state state;
@@ -61,23 +61,40 @@ struct can_priv {
* To be used in the CAN netdriver receive path to ensure conformance with
* ISO 11898-1 Chapter 8.4.2.3 (DLC field)
*/
-#define get_can_dlc(i) (min_t(__u8, (i), 8))
+#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
+#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline int can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
{
- const struct can_frame *cf = (struct can_frame *)skb->data;
-
- if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) {
- kfree_skb(skb);
- dev->stats.tx_dropped++;
- return 1;
- }
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (skb->protocol == htons(ETH_P_CAN)) {
+ if (unlikely(skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->protocol == htons(ETH_P_CANFD)) {
+ if (unlikely(skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
return 0;
+
+inval_skb:
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return 1;
}
+/* get data length from can_dlc with sanitized can_dlc */
+u8 can_dlc2len(u8 can_dlc);
+
+/* map the sanitized data length to an appropriate data length code */
+u8 can_len2dlc(u8 len);
+
struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
void free_candev(struct net_device *dev);
diff --git a/include/linux/can/error.h b/include/linux/can/error.h
index 63e855ea6b84..7b7148bded71 100644
--- a/include/linux/can/error.h
+++ b/include/linux/can/error.h
@@ -1,7 +1,7 @@
/*
* linux/can/error.h
*
- * Definitions of the CAN error frame to be filtered and passed to the user.
+ * Definitions of the CAN error messages to be filtered and passed to the user.
*
* Author: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
@@ -12,7 +12,7 @@
#ifndef CAN_ERROR_H
#define CAN_ERROR_H
-#define CAN_ERR_DLC 8 /* dlc for error frames */
+#define CAN_ERR_DLC 8 /* dlc for error message frames */
/* error class (mask) in can_id */
#define CAN_ERR_TX_TIMEOUT 0x00000001U /* TX timeout (by netdevice driver) */
diff --git a/include/linux/can/raw.h b/include/linux/can/raw.h
index 781f3a3701be..a814062b0719 100644
--- a/include/linux/can/raw.h
+++ b/include/linux/can/raw.h
@@ -23,7 +23,8 @@ enum {
CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */
CAN_RAW_ERR_FILTER, /* set filter for error frames */
CAN_RAW_LOOPBACK, /* local loopback (default:on) */
- CAN_RAW_RECV_OWN_MSGS /* receive my own msgs (default:off) */
+ CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */
+ CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */
};
#endif
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 68d56effc328..d10b7ed595b1 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -360,11 +360,11 @@ struct cpu_vfs_cap_data {
#define CAP_WAKE_ALARM 35
-/* Allow preventing system suspends while epoll events are pending */
+/* Allow preventing system suspends */
-#define CAP_EPOLLWAKEUP 36
+#define CAP_BLOCK_SUSPEND 36
-#define CAP_LAST_CAP CAP_EPOLLWAKEUP
+#define CAP_LAST_CAP CAP_BLOCK_SUSPEND
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 2521a95fa6d9..44c87e731e9d 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -163,16 +163,8 @@ struct ceph_connection {
/* connection negotiation temps */
char in_banner[CEPH_BANNER_MAX_LEN];
- union {
- struct { /* outgoing connection */
- struct ceph_msg_connect out_connect;
- struct ceph_msg_connect_reply in_reply;
- };
- struct { /* incoming */
- struct ceph_msg_connect in_connect;
- struct ceph_msg_connect_reply out_reply;
- };
- };
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
struct ceph_entity_addr actual_peer_addr;
/* message out temps */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index d3f5fba2c159..c90eaa803440 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -500,21 +500,8 @@ struct cgroup_subsys {
const char *name;
/*
- * Protects sibling/children links of cgroups in this
- * hierarchy, plus protects which hierarchy (or none) the
- * subsystem is a part of (i.e. root/sibling). To avoid
- * potential deadlocks, the following operations should not be
- * undertaken while holding any hierarchy_mutex:
- *
- * - allocating memory
- * - initiating hotplug events
- */
- struct mutex hierarchy_mutex;
- struct lock_class_key subsys_key;
-
- /*
* Link to parent, and list entry in parent's children.
- * Protected by this->hierarchy_mutex and cgroup_lock()
+ * Protected by cgroup_lock()
*/
struct cgroupfs_root *root;
struct list_head sibling;
@@ -602,7 +589,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
* the lifetime of cgroup_subsys_state is subsys's matter.
*
* Looking up and scanning function should be called under rcu_read_lock().
- * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
+ * Taking cgroup_mutex is not necessary for following calls.
* But the css returned by this routine can be "not populated yet" or "being
* destroyed". The caller should check css and cgroup's status.
*/
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index eb3f84bc5325..9c7f5807824b 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -64,7 +64,7 @@ struct clk {
.parent_names = _parent_names, \
.num_parents = ARRAY_SIZE(_parent_names), \
.parents = _parents, \
- .flags = _flags, \
+ .flags = _flags | CLK_IS_BASIC, \
}
#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
@@ -103,9 +103,9 @@ struct clk {
DEFINE_CLK(_name, clk_gate_ops, _flags, \
_name##_parent_names, _name##_parents);
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
+#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
_flags, _reg, _shift, _width, \
- _divider_flags, _lock) \
+ _divider_flags, _table, _lock) \
static struct clk _name; \
static const char *_name##_parent_names[] = { \
_parent_name, \
@@ -121,11 +121,27 @@ struct clk {
.shift = _shift, \
.width = _width, \
.flags = _divider_flags, \
+ .table = _table, \
.lock = _lock, \
}; \
DEFINE_CLK(_name, clk_divider_ops, _flags, \
_name##_parent_names, _name##_parents);
+#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
+ _flags, _reg, _shift, _width, \
+ _divider_flags, _lock) \
+ _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
+ _flags, _reg, _shift, _width, \
+ _divider_flags, NULL, _lock)
+
+#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name, \
+ _parent_ptr, _flags, _reg, \
+ _shift, _width, _divider_flags, \
+ _table, _lock) \
+ _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
+ _flags, _reg, _shift, _width, \
+ _divider_flags, _table, _lock) \
+
#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
_reg, _shift, _width, \
_mux_flags, _lock) \
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 4a0b483986c3..77335fac943e 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -25,6 +25,7 @@
#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */
#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
+#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */
struct clk_hw;
@@ -143,7 +144,7 @@ struct clk_init_data {
*/
struct clk_hw {
struct clk *clk;
- struct clk_init_data *init;
+ const struct clk_init_data *init;
};
/*
@@ -171,6 +172,8 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
+void of_fixed_clk_setup(struct device_node *np);
+
/**
* struct clk_gate - gating clock
*
@@ -203,6 +206,11 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_div_table {
+ unsigned int val;
+ unsigned int div;
+};
+
/**
* struct clk_divider - adjustable divider clock
*
@@ -210,6 +218,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
* @reg: register containing the divider
* @shift: shift to the divider bit field
* @width: width of the divider bit field
+ * @table: array of value/divider pairs, last entry should have div = 0
* @lock: register lock
*
* Clock with an adjustable divider affecting its output frequency. Implements
@@ -229,6 +238,7 @@ struct clk_divider {
u8 shift;
u8 width;
u8 flags;
+ const struct clk_div_table *table;
spinlock_t *lock;
};
@@ -240,6 +250,11 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, spinlock_t *lock);
+struct clk *clk_register_divider_table(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width,
+ u8 clk_divider_flags, const struct clk_div_table *table,
+ spinlock_t *lock);
/**
* struct clk_mux - multiplexer clock
@@ -334,5 +349,19 @@ void __clk_unprepare(struct clk *clk);
void __clk_reparent(struct clk *clk, struct clk *new_parent);
unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
+struct of_device_id;
+
+typedef void (*of_clk_init_cb_t)(struct device_node *);
+
+int of_clk_add_provider(struct device_node *np,
+ struct clk *(*clk_src_get)(struct of_phandle_args *args,
+ void *data),
+ void *data);
+void of_clk_del_provider(struct device_node *np);
+struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
+ void *data);
+const char *of_clk_get_parent_name(struct device_node *np, int index);
+void of_clk_init(const struct of_device_id *matches);
+
#endif /* CONFIG_COMMON_CLK */
#endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index ad5c43e8ae8a..2fd6a4234531 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -12,6 +12,7 @@
#ifndef __LINUX_CLK_H
#define __LINUX_CLK_H
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/notifier.h>
@@ -86,7 +87,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
/**
* clk_get - lookup and obtain a reference to a clock producer.
* @dev: device for clock "consumer"
- * @id: clock comsumer ID
+ * @id: clock consumer ID
*
* Returns a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
@@ -103,7 +104,7 @@ struct clk *clk_get(struct device *dev, const char *id);
/**
* devm_clk_get - lookup and obtain a managed reference to a clock producer.
* @dev: device for clock "consumer"
- * @id: clock comsumer ID
+ * @id: clock consumer ID
*
* Returns a struct clk corresponding to the clock producer, or
* valid IS_ERR() condition containing errno. The implementation
@@ -310,4 +311,23 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id);
int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
struct device *dev);
+struct device_node;
+struct of_phandle_args;
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+struct clk *of_clk_get(struct device_node *np, int index);
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name);
+struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec);
+#else
+static inline struct clk *of_clk_get(struct device_node *np, int index)
+{
+ return ERR_PTR(-ENOENT);
+}
+static inline struct clk *of_clk_get_by_name(struct device_node *np,
+ const char *name)
+{
+ return ERR_PTR(-ENOENT);
+}
+#endif
+
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 2e9b9ebbeb78..ce7a074f2519 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -73,8 +73,9 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- /* prepare workqueues for other notifiers */
- CPU_PRI_WORKQUEUE = 5,
+ /* bring up workqueues before normal notifiers and down after */
+ CPU_PRI_WORKQUEUE_UP = 5,
+ CPU_PRI_WORKQUEUE_DOWN = -5,
};
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
diff --git a/include/linux/cpu_rmap.h b/include/linux/cpu_rmap.h
index 473771a528c0..ac3bbb5b9502 100644
--- a/include/linux/cpu_rmap.h
+++ b/include/linux/cpu_rmap.h
@@ -1,3 +1,6 @@
+#ifndef __LINUX_CPU_RMAP_H
+#define __LINUX_CPU_RMAP_H
+
/*
* cpu_rmap.c: CPU affinity reverse-map support
* Copyright 2011 Solarflare Communications Inc.
@@ -71,3 +74,4 @@ extern void free_irq_cpu_rmap(struct cpu_rmap *rmap);
extern int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq);
#endif
+#endif /* __LINUX_CPU_RMAP_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 6c26a3da0e03..89dcd30ac8ea 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -34,6 +34,7 @@ struct cpuidle_driver;
struct cpuidle_state_usage {
void *driver_data;
+ unsigned long long disable;
unsigned long long usage;
unsigned long long time; /* in US */
};
@@ -46,7 +47,7 @@ struct cpuidle_state {
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
unsigned int target_residency; /* in US */
- unsigned int disable;
+ bool disabled; /* disabled on all CPUs */
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -136,13 +137,17 @@ struct cpuidle_driver {
extern void disable_cpuidle(void);
extern int cpuidle_idle_call(void);
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
-struct cpuidle_driver *cpuidle_get_driver(void);
+extern struct cpuidle_driver *cpuidle_get_driver(void);
+extern struct cpuidle_driver *cpuidle_driver_ref(void);
+extern void cpuidle_driver_unref(void);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
extern void cpuidle_pause_and_lock(void);
extern void cpuidle_resume_and_unlock(void);
+extern void cpuidle_pause(void);
+extern void cpuidle_resume(void);
extern int cpuidle_enable_device(struct cpuidle_device *dev);
extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_wrap_enter(struct cpuidle_device *dev,
@@ -157,6 +162,8 @@ static inline int cpuidle_idle_call(void) { return -ENODEV; }
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
+static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
+static inline void cpuidle_driver_unref(void) {}
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -164,6 +171,8 @@ static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
static inline void cpuidle_pause_and_lock(void) { }
static inline void cpuidle_resume_and_unlock(void) { }
+static inline void cpuidle_pause(void) { }
+static inline void cpuidle_resume(void) { }
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
{return -ENODEV; }
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
@@ -202,14 +211,7 @@ struct cpuidle_governor {
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
-#ifdef CONFIG_INTEL_IDLE
-extern int intel_idle_cpu_init(int cpu);
#else
-static inline int intel_idle_cpu_init(int cpu) { return -1; }
-#endif
-
-#else
-static inline int intel_idle_cpu_init(int cpu) { return -1; }
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 094789ff3e9f..caa34e50537e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -128,7 +128,7 @@ struct dentry {
struct rcu_head d_rcu;
} d_u;
struct list_head d_subdirs; /* our children */
- struct list_head d_alias; /* inode alias list */
+ struct hlist_node d_alias; /* inode alias list */
};
/*
@@ -144,7 +144,7 @@ enum dentry_d_lock_class
};
struct dentry_operations {
- int (*d_revalidate)(struct dentry *, struct nameidata *);
+ int (*d_revalidate)(struct dentry *, unsigned int);
int (*d_hash)(const struct dentry *, const struct inode *,
struct qstr *);
int (*d_compare)(const struct dentry *, const struct inode *,
diff --git a/include/linux/device.h b/include/linux/device.h
index 161d96241b1b..5083bccae967 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -36,6 +36,7 @@ struct subsys_private;
struct bus_type;
struct device_node;
struct iommu_ops;
+struct iommu_group;
struct bus_attribute {
struct attribute attr;
@@ -687,6 +688,7 @@ struct device {
const struct attribute_group **groups; /* optional groups */
void (*release)(struct device *dev);
+ struct iommu_group *iommu_group;
};
/* Get the wakeup routines, which depend on struct device */
@@ -865,8 +867,6 @@ extern int (*platform_notify_remove)(struct device *dev);
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
-extern void wait_for_device_probe(void);
-
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
extern int devtmpfs_delete_node(struct device *dev);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 56377df39124..9c02a4508b25 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -338,6 +338,9 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
+ * @slave_id: Slave requester id. Only valid for slave channels. The dma
+ * slave peripheral will have unique id as dma requester which need to be
+ * pass as slave config.
*
* This struct is passed in as configuration data to a DMA engine
* in order to set up a certain channel for DMA transport at runtime.
@@ -365,6 +368,7 @@ struct dma_slave_config {
u32 src_maxburst;
u32 dst_maxburst;
bool device_fc;
+ unsigned int slave_id;
};
static inline const char *dma_chan_name(struct dma_chan *chan)
@@ -670,6 +674,12 @@ static inline int dmaengine_resume(struct dma_chan *chan)
return dmaengine_device_control(chan, DMA_RESUME, 0);
}
+static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *state)
+{
+ return chan->device->device_tx_status(chan, cookie, state);
+}
+
static inline dma_cookie_t dmaengine_submit(struct dma_async_tx_descriptor *desc)
{
return desc->tx_submit(desc);
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 07261d52a6df..1148575fd134 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -53,4 +53,5 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs);
cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs);
void dw_apb_clocksource_unregister(struct dw_apb_clocksource *dw_cs);
+extern struct sys_timer dw_apb_timer;
#endif /* __DW_APB_TIMER_H__ */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 3d406e0ede6d..d426336d92d9 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -124,17 +124,30 @@ static inline bool is_valid_ether_addr(const u8 *addr)
}
/**
- * random_ether_addr - Generate software assigned random Ethernet address
+ * eth_random_addr - Generate software assigned random Ethernet address
* @addr: Pointer to a six-byte array containing the Ethernet address
*
* Generate a random Ethernet address (MAC) that is not multicast
* and has the local assigned bit set.
*/
-static inline void random_ether_addr(u8 *addr)
+static inline void eth_random_addr(u8 *addr)
{
- get_random_bytes (addr, ETH_ALEN);
- addr [0] &= 0xfe; /* clear multicast bit */
- addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
+ get_random_bytes(addr, ETH_ALEN);
+ addr[0] &= 0xfe; /* clear multicast bit */
+ addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
+}
+
+#define random_ether_addr(addr) eth_random_addr(addr)
+
+/**
+ * eth_broadcast_addr - Assign broadcast address
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Assign the broadcast address to the given address array.
+ */
+static inline void eth_broadcast_addr(u8 *addr)
+{
+ memset(addr, 0xff, ETH_ALEN);
}
/**
@@ -149,7 +162,7 @@ static inline void random_ether_addr(u8 *addr)
static inline void eth_hw_addr_random(struct net_device *dev)
{
dev->addr_assign_type |= NET_ADDR_RANDOM;
- random_ether_addr(dev->dev_addr);
+ eth_random_addr(dev->dev_addr);
}
/**
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e17fa7140588..21eff418091b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -137,6 +137,35 @@ struct ethtool_eeprom {
};
/**
+ * struct ethtool_eee - Energy Efficient Ethernet information
+ * @cmd: ETHTOOL_{G,S}EEE
+ * @supported: Mask of %SUPPORTED_* flags for the speed/duplex combinations
+ * for which there is EEE support.
+ * @advertised: Mask of %ADVERTISED_* flags for the speed/duplex combinations
+ * advertised as eee capable.
+ * @lp_advertised: Mask of %ADVERTISED_* flags for the speed/duplex
+ * combinations advertised by the link partner as eee capable.
+ * @eee_active: Result of the eee auto negotiation.
+ * @eee_enabled: EEE configured mode (enabled/disabled).
+ * @tx_lpi_enabled: Whether the interface should assert its tx lpi, given
+ * that eee was negotiated.
+ * @tx_lpi_timer: Time in microseconds the interface delays prior to asserting
+ * its tx lpi (after reaching 'idle' state). Effective only when eee
+ * was negotiated and tx_lpi_enabled was set.
+ */
+struct ethtool_eee {
+ __u32 cmd;
+ __u32 supported;
+ __u32 advertised;
+ __u32 lp_advertised;
+ __u32 eee_active;
+ __u32 eee_enabled;
+ __u32 tx_lpi_enabled;
+ __u32 tx_lpi_timer;
+ __u32 reserved[2];
+};
+
+/**
* struct ethtool_modinfo - plugin module eeprom information
* @cmd: %ETHTOOL_GMODULEINFO
* @type: Standard the module information conforms to %ETH_MODULE_SFF_xxxx
@@ -945,6 +974,8 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* @get_module_info: Get the size and type of the eeprom contained within
* a plug-in module.
* @get_module_eeprom: Get the eeprom information from the plug-in module
+ * @get_eee: Get Energy-Efficient (EEE) supported and status.
+ * @set_eee: Set EEE status (enable/disable) as well as LPI timers.
*
* All operations are optional (i.e. the function pointer may be set
* to %NULL) and callers must take this into account. Callers must
@@ -1011,6 +1042,8 @@ struct ethtool_ops {
struct ethtool_modinfo *);
int (*get_module_eeprom)(struct net_device *,
struct ethtool_eeprom *, u8 *);
+ int (*get_eee)(struct net_device *, struct ethtool_eee *);
+ int (*set_eee)(struct net_device *, struct ethtool_eee *);
};
@@ -1089,6 +1122,8 @@ struct ethtool_ops {
#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */
#define ETHTOOL_GMODULEINFO 0x00000042 /* Get plug-in module information */
#define ETHTOOL_GMODULEEEPROM 0x00000043 /* Get plug-in module eeprom */
+#define ETHTOOL_GEEE 0x00000044 /* Get EEE settings */
+#define ETHTOOL_SEEE 0x00000045 /* Set EEE settings */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -1118,6 +1153,10 @@ struct ethtool_ops {
#define SUPPORTED_10000baseR_FEC (1 << 20)
#define SUPPORTED_20000baseMLD2_Full (1 << 21)
#define SUPPORTED_20000baseKR2_Full (1 << 22)
+#define SUPPORTED_40000baseKR4_Full (1 << 23)
+#define SUPPORTED_40000baseCR4_Full (1 << 24)
+#define SUPPORTED_40000baseSR4_Full (1 << 25)
+#define SUPPORTED_40000baseLR4_Full (1 << 26)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
@@ -1143,6 +1182,10 @@ struct ethtool_ops {
#define ADVERTISED_10000baseR_FEC (1 << 20)
#define ADVERTISED_20000baseMLD2_Full (1 << 21)
#define ADVERTISED_20000baseKR2_Full (1 << 22)
+#define ADVERTISED_40000baseKR4_Full (1 << 23)
+#define ADVERTISED_40000baseCR4_Full (1 << 24)
+#define ADVERTISED_40000baseSR4_Full (1 << 25)
+#define ADVERTISED_40000baseLR4_Full (1 << 26)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index 6f8be328770a..f4bb378ccf6a 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -34,7 +34,7 @@
* re-allowed until epoll_wait is called again after consuming the wakeup
* event(s).
*
- * Requires CAP_EPOLLWAKEUP
+ * Requires CAP_BLOCK_SUSPEND
*/
#define EPOLLWAKEUP (1 << 29)
diff --git a/include/linux/file.h b/include/linux/file.h
index 58bf158c53d9..a22408bac0d0 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -39,4 +39,7 @@ extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
+extern void flush_delayed_fput(void);
+extern void __fput_sync(struct file *);
+
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 17fd887c798f..8fabb037a48d 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -826,7 +826,7 @@ struct inode {
struct list_head i_lru; /* inode LRU list */
struct list_head i_sb_list;
union {
- struct list_head i_dentry;
+ struct hlist_head i_dentry;
struct rcu_head i_rcu;
};
u64 i_version;
@@ -1571,7 +1571,7 @@ extern void unlock_super(struct super_block *);
/*
* VFS helper functions..
*/
-extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *);
+extern int vfs_create(struct inode *, struct dentry *, umode_t, bool);
extern int vfs_mkdir(struct inode *, struct dentry *, umode_t);
extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t);
extern int vfs_symlink(struct inode *, struct dentry *, const char *);
@@ -1666,7 +1666,7 @@ struct file_operations {
};
struct inode_operations {
- struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *);
+ struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
void * (*follow_link) (struct dentry *, struct nameidata *);
int (*permission) (struct inode *, int);
struct posix_acl * (*get_acl)(struct inode *, int);
@@ -1674,7 +1674,7 @@ struct inode_operations {
int (*readlink) (struct dentry *, char __user *,int);
void (*put_link) (struct dentry *, struct nameidata *, void *);
- int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *);
+ int (*create) (struct inode *,struct dentry *, umode_t, bool);
int (*link) (struct dentry *,struct inode *,struct dentry *);
int (*unlink) (struct inode *,struct dentry *);
int (*symlink) (struct inode *,struct dentry *,const char *);
@@ -1693,6 +1693,9 @@ struct inode_operations {
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
u64 len);
int (*update_time)(struct inode *, struct timespec *, int);
+ int (*atomic_open)(struct inode *, struct dentry *,
+ struct file *, unsigned open_flag,
+ umode_t create_mode, int *opened);
} ____cacheline_aligned;
struct seq_file;
@@ -1911,7 +1914,7 @@ void free_anon_bdev(dev_t);
struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
- void *data);
+ int flags, void *data);
extern struct dentry *mount_pseudo(struct file_system_type *, char *,
const struct super_operations *ops,
const struct dentry_operations *dops,
@@ -2057,10 +2060,17 @@ extern long do_sys_open(int dfd, const char __user *filename, int flags,
extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int);
-extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
- const struct cred *);
+extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern char * getname(const char __user *);
+enum {
+ FILE_CREATED = 1,
+ FILE_OPENED = 2
+};
+extern int finish_open(struct file *file, struct dentry *dentry,
+ int (*open)(struct inode *, struct file *),
+ int *opened);
+extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
@@ -2091,6 +2101,7 @@ extern sector_t blkdev_max_block(struct block_device *bdev);
extern void bd_forget(struct inode *inode);
extern void bdput(struct block_device *);
extern void invalidate_bdev(struct block_device *);
+extern void iterate_bdevs(void (*)(struct block_device *, void *), void *);
extern int sync_blockdev(struct block_device *bdev);
extern void kill_bdev(struct block_device *);
extern struct super_block *freeze_bdev(struct block_device *);
@@ -2112,6 +2123,10 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
{
return 0;
}
+
+static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg)
+{
+}
#endif
extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
@@ -2438,7 +2453,7 @@ extern loff_t noop_llseek(struct file *file, loff_t offset, int origin);
extern loff_t no_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin);
extern loff_t generic_file_llseek_size(struct file *file, loff_t offset,
- int origin, loff_t maxsize);
+ int origin, loff_t maxsize, loff_t eof);
extern int generic_file_open(struct inode * inode, struct file * filp);
extern int nonseekable_open(struct inode * inode, struct file * filp);
@@ -2560,7 +2575,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata);
-extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *);
+extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
extern ssize_t generic_read_dir(struct file *, char __user *, size_t, loff_t *);
extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 176a939d1547..af961d6f7ab1 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -65,7 +65,7 @@ struct trace_iterator {
void *private;
int cpu_file;
struct mutex mutex;
- struct ring_buffer_iter *buffer_iter[NR_CPUS];
+ struct ring_buffer_iter **buffer_iter;
unsigned long iter_flags;
/* trace_seq for __print_flags() and __print_symbolic() etc. */
@@ -207,6 +207,9 @@ struct ftrace_event_call {
* bit 1: enabled
* bit 2: filter_active
* bit 3: enabled cmd record
+ * bit 4: allow trace by non root (cap any)
+ * bit 5: failed to apply filter
+ * bit 6: ftrace internal event (do not enable)
*
* Changes to flags must hold the event_mutex.
*
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 7a114016ac7d..5ab61c1eb6bf 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -85,7 +85,7 @@ enum {
/* All generic netlink requests are serialized by a global lock. */
extern void genl_lock(void);
extern void genl_unlock(void);
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
extern int lockdep_genl_is_held(void);
#endif
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index fa98bdb073b9..b2de1f9a88d6 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -170,6 +170,16 @@ struct gfs2_rindex {
#define GFS2_RGF_NOALLOC 0x00000008
#define GFS2_RGF_TRIMMED 0x00000010
+struct gfs2_rgrp_lvb {
+ __be32 rl_magic;
+ __be32 rl_flags;
+ __be32 rl_free;
+ __be32 rl_dinodes;
+ __be64 rl_igeneration;
+ __be32 rl_unlinked;
+ __be32 __pad;
+};
+
struct gfs2_rgrp {
struct gfs2_meta_header rg_header;
@@ -214,6 +224,7 @@ enum {
gfs2fl_NoAtime = 7,
gfs2fl_Sync = 8,
gfs2fl_System = 9,
+ gfs2fl_TopLevel = 10,
gfs2fl_TruncInProg = 29,
gfs2fl_InheritDirectio = 30,
gfs2fl_InheritJdata = 31,
@@ -230,8 +241,9 @@ enum {
#define GFS2_DIF_NOATIME 0x00000080
#define GFS2_DIF_SYNC 0x00000100
#define GFS2_DIF_SYSTEM 0x00000200 /* New in gfs2 */
+#define GFS2_DIF_TOPDIR 0x00000400 /* New in gfs2 */
#define GFS2_DIF_TRUNC_IN_PROG 0x20000000 /* New in gfs2 */
-#define GFS2_DIF_INHERIT_DIRECTIO 0x40000000
+#define GFS2_DIF_INHERIT_DIRECTIO 0x40000000 /* only in gfs1 */
#define GFS2_DIF_INHERIT_JDATA 0x80000000
struct gfs2_dinode {
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index f07fc2d08159..2e31e8b3a190 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -22,8 +22,8 @@
/* Gpio pin is open source */
#define GPIOF_OPEN_SOURCE (1 << 3)
-#define GPIOF_EXPORT (1 << 2)
-#define GPIOF_EXPORT_CHANGEABLE (1 << 3)
+#define GPIOF_EXPORT (1 << 4)
+#define GPIOF_EXPORT_CHANGEABLE (1 << 5)
#define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT)
#define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 449fa385703d..42970de1b40c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -200,6 +200,7 @@ struct hid_item {
#define HID_UP_DIGITIZER 0x000d0000
#define HID_UP_PID 0x000f0000
#define HID_UP_HPVENDOR 0xff7f0000
+#define HID_UP_HPVENDOR2 0xff010000
#define HID_UP_MSVENDOR 0xff000000
#define HID_UP_CUSTOM 0x00ff0000
#define HID_UP_LOGIVENDOR 0xffbc0000
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index fd0dc30c9f15..cc07d2777bbe 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -165,6 +165,7 @@ enum hrtimer_base_type {
* @lock: lock protecting the base and associated clock bases
* and timers
* @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set: Indicates that clock was set from irq context.
* @expires_next: absolute time of the next event which was scheduled
* via clock_set_next_event()
* @hres_active: State of high resolution mode
@@ -177,7 +178,8 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
- unsigned long active_bases;
+ unsigned int active_bases;
+ unsigned int clock_was_set;
#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires_next;
int hres_active;
@@ -286,6 +288,8 @@ extern void hrtimer_peek_ahead_timers(void);
# define MONOTONIC_RES_NSEC HIGH_RES_NSEC
# define KTIME_MONOTONIC_RES KTIME_HIGH_RES
+extern void clock_was_set_delayed(void);
+
#else
# define MONOTONIC_RES_NSEC LOW_RES_NSEC
@@ -306,6 +310,9 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
}
+
+static inline void clock_was_set_delayed(void) { }
+
#endif
extern void clock_was_set(void);
@@ -320,6 +327,7 @@ extern ktime_t ktime_get(void);
extern ktime_t ktime_get_real(void);
extern ktime_t ktime_get_boottime(void);
extern ktime_t ktime_get_monotonic_offset(void);
+extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index ddfa04108baf..1d0fe4877b1f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -425,6 +425,8 @@ void i2c_unlock_adapter(struct i2c_adapter *);
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
+#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
+ /* Must match I2C_M_STOP|IGNORE_NAK */
/* i2c adapter classes (bitmask) */
#define I2C_CLASS_HWMON (1<<0) /* lm_sensors, ... */
@@ -541,6 +543,7 @@ struct i2c_msg {
__u16 flags;
#define I2C_M_TEN 0x0010 /* this is a ten bit chip address */
#define I2C_M_RD 0x0001 /* read data, from slave to master */
+#define I2C_M_STOP 0x8000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_NOSTART 0x4000 /* if I2C_FUNC_NOSTART */
#define I2C_M_REV_DIR_ADDR 0x2000 /* if I2C_FUNC_PROTOCOL_MANGLING */
#define I2C_M_IGNORE_NAK 0x1000 /* if I2C_FUNC_PROTOCOL_MANGLING */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 3993477103a5..555382660bc4 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -683,7 +683,6 @@ struct twl4030_audio_data {
};
struct twl4030_platform_data {
- unsigned irq_base, irq_end;
struct twl4030_clock_init_data *clock;
struct twl4030_bci_platform_data *bci;
struct twl4030_gpio_platform_data *gpio;
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index ce9af8918514..e02fc682bb68 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -47,6 +47,7 @@
#define IEEE80211_FCTL_MOREDATA 0x2000
#define IEEE80211_FCTL_PROTECTED 0x4000
#define IEEE80211_FCTL_ORDER 0x8000
+#define IEEE80211_FCTL_CTL_EXT 0x0f00
#define IEEE80211_SCTL_FRAG 0x000F
#define IEEE80211_SCTL_SEQ 0xFFF0
@@ -54,6 +55,7 @@
#define IEEE80211_FTYPE_MGMT 0x0000
#define IEEE80211_FTYPE_CTL 0x0004
#define IEEE80211_FTYPE_DATA 0x0008
+#define IEEE80211_FTYPE_EXT 0x000c
/* management */
#define IEEE80211_STYPE_ASSOC_REQ 0x0000
@@ -70,6 +72,7 @@
#define IEEE80211_STYPE_ACTION 0x00D0
/* control */
+#define IEEE80211_STYPE_CTL_EXT 0x0060
#define IEEE80211_STYPE_BACK_REQ 0x0080
#define IEEE80211_STYPE_BACK 0x0090
#define IEEE80211_STYPE_PSPOLL 0x00A0
@@ -97,6 +100,18 @@
#define IEEE80211_STYPE_QOS_CFPOLL 0x00E0
#define IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
+/* extension, added by 802.11ad */
+#define IEEE80211_STYPE_DMG_BEACON 0x0000
+
+/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
+#define IEEE80211_CTL_EXT_POLL 0x2000
+#define IEEE80211_CTL_EXT_SPR 0x3000
+#define IEEE80211_CTL_EXT_GRANT 0x4000
+#define IEEE80211_CTL_EXT_DMG_CTS 0x5000
+#define IEEE80211_CTL_EXT_DMG_DTS 0x6000
+#define IEEE80211_CTL_EXT_SSW 0x8000
+#define IEEE80211_CTL_EXT_SSW_FBACK 0x9000
+#define IEEE80211_CTL_EXT_SSW_ACK 0xa000
/* miscellaneous IEEE 802.11 constants */
#define IEEE80211_MAX_FRAG_THRESHOLD 2352
@@ -568,6 +583,26 @@ struct ieee80211s_hdr {
#define MESH_FLAGS_PS_DEEP 0x4
/**
+ * enum ieee80211_preq_flags - mesh PREQ element flags
+ *
+ * @IEEE80211_PREQ_PROACTIVE_PREP_FLAG: proactive PREP subfield
+ */
+enum ieee80211_preq_flags {
+ IEEE80211_PREQ_PROACTIVE_PREP_FLAG = 1<<2,
+};
+
+/**
+ * enum ieee80211_preq_target_flags - mesh PREQ element per target flags
+ *
+ * @IEEE80211_PREQ_TO_FLAG: target only subfield
+ * @IEEE80211_PREQ_USN_FLAG: unknown target HWMP sequence number subfield
+ */
+enum ieee80211_preq_target_flags {
+ IEEE80211_PREQ_TO_FLAG = 1<<0,
+ IEEE80211_PREQ_USN_FLAG = 1<<2,
+};
+
+/**
* struct ieee80211_quiet_ie
*
* This structure refers to "Quiet information element"
@@ -1072,6 +1107,73 @@ struct ieee80211_ht_operation {
#define WLAN_HT_SMPS_CONTROL_STATIC 1
#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+#define VHT_MCS_SUPPORTED_SET_SIZE 8
+
+struct ieee80211_vht_capabilities {
+ __le32 vht_capabilities_info;
+ u8 vht_supported_mcs_set[VHT_MCS_SUPPORTED_SET_SIZE];
+} __packed;
+
+struct ieee80211_vht_operation {
+ u8 vht_op_info_chwidth;
+ u8 vht_op_info_chan_center_freq_seg1_idx;
+ u8 vht_op_info_chan_center_freq_seg2_idx;
+ __le16 vht_basic_mcs_set;
+} __packed;
+
+/**
+ * struct ieee80211_vht_mcs_info - VHT MCS information
+ * @rx_mcs_map: RX MCS map 2 bits for each stream, total 8 streams
+ * @rx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can receive. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
+ * @tx_mcs_map: TX MCS map 2 bits for each stream, total 8 streams
+ * @tx_highest: Indicates highest long GI VHT PPDU data rate
+ * STA can transmit. Rate expressed in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest TX data rate supported.
+ */
+struct ieee80211_vht_mcs_info {
+ __le16 rx_mcs_map;
+ __le16 rx_highest;
+ __le16 tx_mcs_map;
+ __le16 tx_highest;
+} __packed;
+
+#define IEEE80211_VHT_MCS_ZERO_TO_SEVEN_SUPPORT 0
+#define IEEE80211_VHT_MCS_ZERO_TO_EIGHT_SUPPORT 1
+#define IEEE80211_VHT_MCS_ZERO_TO_NINE_SUPPORT 2
+#define IEEE80211_VHT_MCS_NOT_SUPPORTED 3
+
+/* 802.11ac VHT Capabilities */
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
+#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 0x00000002
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ 0x00000004
+#define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ 0x00000008
+#define IEEE80211_VHT_CAP_RXLDPC 0x00000010
+#define IEEE80211_VHT_CAP_SHORT_GI_80 0x00000020
+#define IEEE80211_VHT_CAP_SHORT_GI_160 0x00000040
+#define IEEE80211_VHT_CAP_TXSTBC 0x00000080
+#define IEEE80211_VHT_CAP_RXSTBC_1 0x00000100
+#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
+#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
+#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
+#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
+#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
+#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
+#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
+#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
+#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
+#define IEEE80211_VHT_CAP_HTC_VHT 0x00400000
+#define IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT 0x00800000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB 0x08000000
+#define IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB 0x0c000000
+#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
+#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1104,6 +1206,21 @@ struct ieee80211_ht_operation {
#define WLAN_CAPABILITY_QOS (1<<9)
#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
+
+/* DMG (60gHz) 802.11ad */
+/* type - bits 0..1 */
+#define WLAN_CAPABILITY_DMG_TYPE_IBSS (1<<0) /* Tx by: STA */
+#define WLAN_CAPABILITY_DMG_TYPE_PBSS (2<<0) /* Tx by: PCP */
+#define WLAN_CAPABILITY_DMG_TYPE_AP (3<<0) /* Tx by: AP */
+
+#define WLAN_CAPABILITY_DMG_CBAP_ONLY (1<<2)
+#define WLAN_CAPABILITY_DMG_CBAP_SOURCE (1<<3)
+#define WLAN_CAPABILITY_DMG_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_DMG_ECPAC (1<<5)
+
+#define WLAN_CAPABILITY_DMG_SPECTRUM_MGMT (1<<8)
+#define WLAN_CAPABILITY_DMG_RADIO_MEASURE (1<<12)
+
/* measurement */
#define IEEE80211_SPCT_MSR_RPRT_MODE_LATE (1<<0)
#define IEEE80211_SPCT_MSR_RPRT_MODE_INCAPABLE (1<<1)
@@ -1113,7 +1230,6 @@ struct ieee80211_ht_operation {
#define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1
#define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2
-
/* 802.11g ERP information element */
#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
#define WLAN_ERP_USE_PROTECTION (1<<1)
@@ -1125,6 +1241,16 @@ enum {
WLAN_ERP_PREAMBLE_LONG = 1,
};
+/* Band ID, 802.11ad #8.4.1.45 */
+enum {
+ IEEE80211_BANDID_TV_WS = 0, /* TV white spaces */
+ IEEE80211_BANDID_SUB1 = 1, /* Sub-1 GHz (excluding TV white spaces) */
+ IEEE80211_BANDID_2G = 2, /* 2.4 GHz */
+ IEEE80211_BANDID_3G = 3, /* 3.6 GHz */
+ IEEE80211_BANDID_5G = 4, /* 4.9 and 5 GHz */
+ IEEE80211_BANDID_60G = 5, /* 60 GHz */
+};
+
/* Status codes */
enum ieee80211_statuscode {
WLAN_STATUS_SUCCESS = 0,
@@ -1176,6 +1302,17 @@ enum ieee80211_statuscode {
WLAN_STATUS_ANTI_CLOG_REQUIRED = 76,
WLAN_STATUS_FCG_NOT_SUPP = 78,
WLAN_STATUS_STA_NO_TBTT = 78,
+ /* 802.11ad */
+ WLAN_STATUS_REJECTED_WITH_SUGGESTED_CHANGES = 39,
+ WLAN_STATUS_REJECTED_FOR_DELAY_PERIOD = 47,
+ WLAN_STATUS_REJECT_WITH_SCHEDULE = 83,
+ WLAN_STATUS_PENDING_ADMITTING_FST_SESSION = 86,
+ WLAN_STATUS_PERFORMING_FST_NOW = 87,
+ WLAN_STATUS_PENDING_GAP_IN_BA_WINDOW = 88,
+ WLAN_STATUS_REJECT_U_PID_SETTING = 89,
+ WLAN_STATUS_REJECT_DSE_BAND = 96,
+ WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
+ WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
};
@@ -1332,6 +1469,43 @@ enum ieee80211_eid {
WLAN_EID_DSE_REGISTERED_LOCATION = 58,
WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+
+ WLAN_EID_VHT_CAPABILITY = 191,
+ WLAN_EID_VHT_OPERATION = 192,
+
+ /* 802.11ad */
+ WLAN_EID_NON_TX_BSSID_CAP = 83,
+ WLAN_EID_WAKEUP_SCHEDULE = 143,
+ WLAN_EID_EXT_SCHEDULE = 144,
+ WLAN_EID_STA_AVAILABILITY = 145,
+ WLAN_EID_DMG_TSPEC = 146,
+ WLAN_EID_DMG_AT = 147,
+ WLAN_EID_DMG_CAP = 148,
+ WLAN_EID_DMG_OPERATION = 151,
+ WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
+ WLAN_EID_DMG_BEAM_REFINEMENT = 153,
+ WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154,
+ WLAN_EID_AWAKE_WINDOW = 157,
+ WLAN_EID_MULTI_BAND = 158,
+ WLAN_EID_ADDBA_EXT = 159,
+ WLAN_EID_NEXT_PCP_LIST = 160,
+ WLAN_EID_PCP_HANDOVER = 161,
+ WLAN_EID_DMG_LINK_MARGIN = 162,
+ WLAN_EID_SWITCHING_STREAM = 163,
+ WLAN_EID_SESSION_TRANSITION = 164,
+ WLAN_EID_DYN_TONE_PAIRING_REPORT = 165,
+ WLAN_EID_CLUSTER_REPORT = 166,
+ WLAN_EID_RELAY_CAP = 167,
+ WLAN_EID_RELAY_XFER_PARAM_SET = 168,
+ WLAN_EID_BEAM_LINK_MAINT = 169,
+ WLAN_EID_MULTIPLE_MAC_ADDR = 170,
+ WLAN_EID_U_PID = 171,
+ WLAN_EID_DMG_LINK_ADAPT_ACK = 172,
+ WLAN_EID_QUIET_PERIOD_REQ = 175,
+ WLAN_EID_QUIET_PERIOD_RESP = 177,
+ WLAN_EID_EPAC_POLICY = 182,
+ WLAN_EID_CLISTER_TIME_OFF = 183,
+ WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190,
};
/* Action category code */
@@ -1348,7 +1522,10 @@ enum ieee80211_category {
WLAN_CATEGORY_MESH_ACTION = 13,
WLAN_CATEGORY_MULTIHOP_ACTION = 14,
WLAN_CATEGORY_SELF_PROTECTED = 15,
+ WLAN_CATEGORY_DMG = 16,
WLAN_CATEGORY_WMM = 17,
+ WLAN_CATEGORY_FST = 18,
+ WLAN_CATEGORY_UNPROT_DMG = 20,
WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
};
@@ -1443,7 +1620,7 @@ enum ieee80211_tdls_actioncode {
*
* @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
* @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
- * that will be specified in a vendor specific information element
+ * that will be specified in a vendor specific information element
*/
enum {
IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
@@ -1455,7 +1632,7 @@ enum {
*
* @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
* @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
- * be specified in a vendor specific information element
+ * be specified in a vendor specific information element
*/
enum {
IEEE80211_PATH_PROTOCOL_HWMP = 1,
@@ -1467,13 +1644,35 @@ enum {
*
* @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
* @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
- * specified in a vendor specific information element
+ * specified in a vendor specific information element
*/
enum {
IEEE80211_PATH_METRIC_AIRTIME = 1,
IEEE80211_PATH_METRIC_VENDOR = 255,
};
+/**
+ * enum ieee80211_root_mode_identifier - root mesh STA mode identifier
+ *
+ * These attribute are used by dot11MeshHWMPRootMode to set root mesh STA mode
+ *
+ * @IEEE80211_ROOTMODE_NO_ROOT: the mesh STA is not a root mesh STA (default)
+ * @IEEE80211_ROOTMODE_ROOT: the mesh STA is a root mesh STA if greater than
+ * this value
+ * @IEEE80211_PROACTIVE_PREQ_NO_PREP: the mesh STA is a root mesh STA supports
+ * the proactive PREQ with proactive PREP subfield set to 0
+ * @IEEE80211_PROACTIVE_PREQ_WITH_PREP: the mesh STA is a root mesh STA
+ * supports the proactive PREQ with proactive PREP subfield set to 1
+ * @IEEE80211_PROACTIVE_RANN: the mesh STA is a root mesh STA supports
+ * the proactive RANN
+ */
+enum ieee80211_root_mode_identifier {
+ IEEE80211_ROOTMODE_NO_ROOT = 0,
+ IEEE80211_ROOTMODE_ROOT = 1,
+ IEEE80211_PROACTIVE_PREQ_NO_PREP = 2,
+ IEEE80211_PROACTIVE_PREQ_WITH_PREP = 3,
+ IEEE80211_PROACTIVE_RANN = 4,
+};
/*
* IEEE 802.11-2007 7.3.2.9 Country information element
@@ -1574,6 +1773,7 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_CCMP 0x000FAC04
#define WLAN_CIPHER_SUITE_WEP104 0x000FAC05
#define WLAN_CIPHER_SUITE_AES_CMAC 0x000FAC06
+#define WLAN_CIPHER_SUITE_GCMP 0x000FAC08
#define WLAN_CIPHER_SUITE_SMS4 0x00147201
@@ -1589,6 +1789,10 @@ enum ieee80211_sa_query_action {
#define WLAN_OUI_WFA 0x506f9a
#define WLAN_OUI_TYPE_WFA_P2P 9
+#define WLAN_OUI_MICROSOFT 0x0050f2
+#define WLAN_OUI_TYPE_MICROSOFT_WPA 1
+#define WLAN_OUI_TYPE_MICROSOFT_WMM 2
+#define WLAN_OUI_TYPE_MICROSOFT_WPS 4
/*
* WMM/802.11e Tspec Element
diff --git a/include/linux/if.h b/include/linux/if.h
index f995c663c493..1ec407b01e46 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -81,6 +81,8 @@
#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
#define IFF_TEAM_PORT 0x40000 /* device used as team port */
#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */
+#define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address
+ * change when it's running */
#define IF_GET_IFACE 0x0001 /* for querying only */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 56d907a2c804..167ce5b363d2 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -105,7 +105,8 @@
#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
-#define ETH_P_CAN 0x000C /* Controller Area Network */
+#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */
+#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/
#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f715750d0b87..ac173bd2ab65 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -140,6 +140,8 @@ enum {
IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */
#define IFLA_PROMISCUITY IFLA_PROMISCUITY
+ IFLA_NUM_TX_QUEUES,
+ IFLA_NUM_RX_QUEUES,
__IFLA_MAX
};
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 8185f57a9c7f..6960fc1841a7 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -13,6 +13,9 @@
#ifdef __KERNEL__
+#include <linux/netpoll.h>
+#include <net/sch_generic.h>
+
struct team_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
@@ -60,9 +63,54 @@ struct team_port {
unsigned int mtu;
} orig;
- struct rcu_head rcu;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ struct netpoll *np;
+#endif
+
+ long mode_priv[0];
};
+static inline bool team_port_enabled(struct team_port *port)
+{
+ return port->index != -1;
+}
+
+static inline bool team_port_txable(struct team_port *port)
+{
+ return port->linkup && team_port_enabled(port);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static inline void team_netpoll_send_skb(struct team_port *port,
+ struct sk_buff *skb)
+{
+ struct netpoll *np = port->np;
+
+ if (np)
+ netpoll_send_skb(np, skb);
+}
+#else
+static inline void team_netpoll_send_skb(struct team_port *port,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
+ struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
+ sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
+ skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
+
+ skb->dev = port->dev;
+ if (unlikely(netpoll_tx_running(port->dev))) {
+ team_netpoll_send_skb(port, skb);
+ return 0;
+ }
+ return dev_queue_xmit(skb);
+}
+
struct team_mode_ops {
int (*init)(struct team *team);
void (*exit)(struct team *team);
@@ -73,6 +121,8 @@ struct team_mode_ops {
int (*port_enter)(struct team *team, struct team_port *port);
void (*port_leave)(struct team *team, struct team_port *port);
void (*port_change_mac)(struct team *team, struct team_port *port);
+ void (*port_enabled)(struct team *team, struct team_port *port);
+ void (*port_disabled)(struct team *team, struct team_port *port);
};
enum team_option_type {
@@ -82,6 +132,11 @@ enum team_option_type {
TEAM_OPTION_TYPE_BOOL,
};
+struct team_option_inst_info {
+ u32 array_index;
+ struct team_port *port; /* != NULL if per-port */
+};
+
struct team_gsetter_ctx {
union {
u32 u32_val;
@@ -92,23 +147,28 @@ struct team_gsetter_ctx {
} bin_val;
bool bool_val;
} data;
- struct team_port *port;
+ struct team_option_inst_info *info;
};
struct team_option {
struct list_head list;
const char *name;
bool per_port;
+ unsigned int array_size; /* != 0 means the option is array */
enum team_option_type type;
+ int (*init)(struct team *team, struct team_option_inst_info *info);
int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
};
+extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
+extern void team_options_change_check(struct team *team);
+
struct team_mode {
- struct list_head list;
const char *kind;
struct module *owner;
size_t priv_size;
+ size_t port_priv_size;
const struct team_mode_ops *ops;
};
@@ -178,8 +238,11 @@ extern int team_options_register(struct team *team,
extern void team_options_unregister(struct team *team,
const struct team_option *option,
size_t option_count);
-extern int team_mode_register(struct team_mode *mode);
-extern int team_mode_unregister(struct team_mode *mode);
+extern int team_mode_register(const struct team_mode *mode);
+extern void team_mode_unregister(const struct team_mode *mode);
+
+#define TEAM_DEFAULT_NUM_TX_QUEUES 16
+#define TEAM_DEFAULT_NUM_RX_QUEUES 16
#endif /* __KERNEL__ */
@@ -241,6 +304,7 @@ enum {
TEAM_ATTR_OPTION_DATA, /* dynamic */
TEAM_ATTR_OPTION_REMOVED, /* flag */
TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */
+ TEAM_ATTR_OPTION_ARRAY_INDEX, /* u32 */ /* for array options */
__TEAM_ATTR_OPTION_MAX,
TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
diff --git a/include/linux/if_tunnel.h b/include/linux/if_tunnel.h
index 16b92d008bed..5efff60b6f56 100644
--- a/include/linux/if_tunnel.h
+++ b/include/linux/if_tunnel.h
@@ -80,4 +80,18 @@ enum {
#define IFLA_GRE_MAX (__IFLA_GRE_MAX - 1)
+/* VTI-mode i_flags */
+#define VTI_ISVTI 0x0001
+
+enum {
+ IFLA_VTI_UNSPEC,
+ IFLA_VTI_LINK,
+ IFLA_VTI_IKEY,
+ IFLA_VTI_OKEY,
+ IFLA_VTI_LOCAL,
+ IFLA_VTI_REMOTE,
+ __IFLA_VTI_MAX,
+};
+
+#define IFLA_VTI_MAX (__IFLA_VTI_MAX - 1)
#endif /* _IF_TUNNEL_H_ */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 597f4a9f3240..67f9ddacb70c 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -38,6 +38,7 @@ enum
IPV4_DEVCONF_ACCEPT_LOCAL,
IPV4_DEVCONF_SRC_VMARK,
IPV4_DEVCONF_PROXY_ARP_PVLAN,
+ IPV4_DEVCONF_ROUTE_LOCALNET,
__IPV4_DEVCONF_MAX
};
@@ -131,6 +132,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_PROMOTE_SECONDARIES(in_dev) \
IN_DEV_ORCONF((in_dev), \
PROMOTE_SECONDARIES)
+#define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
#define IN_DEV_RX_REDIRECTS(in_dev) \
((IN_DEV_FORWARD(in_dev) && \
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 9e65eff6af3b..8a7476186990 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -168,8 +168,8 @@ extern struct cred init_cred;
.children = LIST_HEAD_INIT(tsk.children), \
.sibling = LIST_HEAD_INIT(tsk.sibling), \
.group_leader = &tsk, \
- RCU_INIT_POINTER(.real_cred, &init_cred), \
- RCU_INIT_POINTER(.cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \
+ RCU_POINTER_INITIALIZER(cred, &init_cred), \
.comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
diff --git a/include/linux/input.h b/include/linux/input.h
index a81671453575..2740d080ec6b 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -116,6 +116,7 @@ struct input_keymap_entry {
/**
* EVIOCGMTSLOTS(len) - get MT slot values
+ * @len: size of the data buffer in bytes
*
* The ioctl buffer argument should be binary equivalent to
*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index e6ca56de9936..78e2ada50cd5 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -308,6 +308,8 @@ enum {
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
+ u64 reg_phys; /* physical address of hw register set */
+ u64 reg_size; /* size of hw register set */
u64 cap;
u64 ecap;
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
diff --git a/include/linux/io.h b/include/linux/io.h
index 7fd2d2138bf3..069e4075f872 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -67,4 +67,13 @@ int check_signature(const volatile void __iomem *io_addr,
const unsigned char *signature, int length);
void devm_ioremap_release(struct device *dev, void *res);
+/*
+ * Some systems do not have legacy ISA devices.
+ * /dev/port is not a valid interface on these systems.
+ * So for those archs, <asm/io.h> should define the following symbol.
+ */
+#ifndef arch_has_dev_port
+#define arch_has_dev_port() (1)
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 450293f6d68b..54d6d690073c 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -26,6 +26,7 @@
#define IOMMU_CACHE (4) /* DMA cache coherency */
struct iommu_ops;
+struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
@@ -37,16 +38,28 @@ struct iommu_domain;
typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
struct device *, unsigned long, int, void *);
+struct iommu_domain_geometry {
+ dma_addr_t aperture_start; /* First address that can be mapped */
+ dma_addr_t aperture_end; /* Last address that can be mapped */
+ bool force_aperture; /* DMA only allowed in mappable range? */
+};
+
struct iommu_domain {
struct iommu_ops *ops;
void *priv;
iommu_fault_handler_t handler;
void *handler_token;
+ struct iommu_domain_geometry geometry;
};
#define IOMMU_CAP_CACHE_COHERENCY 0x1
#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */
+enum iommu_attr {
+ DOMAIN_ATTR_MAX,
+ DOMAIN_ATTR_GEOMETRY,
+};
+
#ifdef CONFIG_IOMMU_API
/**
@@ -59,7 +72,10 @@ struct iommu_domain {
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @iova_to_phys: translate iova to physical address
* @domain_has_cap: domain capabilities query
- * @commit: commit iommu domain
+ * @add_device: add device to iommu grouping
+ * @remove_device: remove device from iommu grouping
+ * @domain_get_attr: Query domain attributes
+ * @domain_set_attr: Change domain attributes
* @pgsize_bitmap: bitmap of supported page sizes
*/
struct iommu_ops {
@@ -75,10 +91,23 @@ struct iommu_ops {
unsigned long iova);
int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap);
+ int (*add_device)(struct device *dev);
+ void (*remove_device)(struct device *dev);
int (*device_group)(struct device *dev, unsigned int *groupid);
+ int (*domain_get_attr)(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data);
+ int (*domain_set_attr)(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data);
unsigned long pgsize_bitmap;
};
+#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
+#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
+#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
+#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
+#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
+#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
+
extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops);
extern bool iommu_present(struct bus_type *bus);
extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
@@ -97,7 +126,34 @@ extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
-extern int iommu_device_group(struct device *dev, unsigned int *groupid);
+
+extern int iommu_attach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+extern void iommu_detach_group(struct iommu_domain *domain,
+ struct iommu_group *group);
+extern struct iommu_group *iommu_group_alloc(void);
+extern void *iommu_group_get_iommudata(struct iommu_group *group);
+extern void iommu_group_set_iommudata(struct iommu_group *group,
+ void *iommu_data,
+ void (*release)(void *iommu_data));
+extern int iommu_group_set_name(struct iommu_group *group, const char *name);
+extern int iommu_group_add_device(struct iommu_group *group,
+ struct device *dev);
+extern void iommu_group_remove_device(struct device *dev);
+extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+ int (*fn)(struct device *, void *));
+extern struct iommu_group *iommu_group_get(struct device *dev);
+extern void iommu_group_put(struct iommu_group *group);
+extern int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb);
+extern int iommu_group_unregister_notifier(struct iommu_group *group,
+ struct notifier_block *nb);
+extern int iommu_group_id(struct iommu_group *group);
+
+extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
+ void *data);
+extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
+ void *data);
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
@@ -142,6 +198,7 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
+struct iommu_group {};
static inline bool iommu_present(struct bus_type *bus)
{
@@ -197,11 +254,88 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
{
}
-static inline int iommu_device_group(struct device *dev, unsigned int *groupid)
+int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+ return -ENODEV;
+}
+
+void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+{
+}
+
+struct iommu_group *iommu_group_alloc(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+void *iommu_group_get_iommudata(struct iommu_group *group)
+{
+ return NULL;
+}
+
+void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
+ void (*release)(void *iommu_data))
+{
+}
+
+int iommu_group_set_name(struct iommu_group *group, const char *name)
+{
+ return -ENODEV;
+}
+
+int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+{
+ return -ENODEV;
+}
+
+void iommu_group_remove_device(struct device *dev)
+{
+}
+
+int iommu_group_for_each_dev(struct iommu_group *group, void *data,
+ int (*fn)(struct device *, void *))
+{
+ return -ENODEV;
+}
+
+struct iommu_group *iommu_group_get(struct device *dev)
+{
+ return NULL;
+}
+
+void iommu_group_put(struct iommu_group *group)
+{
+}
+
+int iommu_group_register_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
{
return -ENODEV;
}
+int iommu_group_unregister_notifier(struct iommu_group *group,
+ struct notifier_block *nb)
+{
+ return 0;
+}
+
+int iommu_group_id(struct iommu_group *group)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ return -EINVAL;
+}
+
+static inline int iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ return -EINVAL;
+}
+
#endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8260ef779762..379e433e15e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -299,9 +299,9 @@ struct ipv6_pinfo {
struct in6_addr rcv_saddr;
struct in6_addr daddr;
struct in6_pktinfo sticky_pktinfo;
- struct in6_addr *daddr_cache;
+ const struct in6_addr *daddr_cache;
#ifdef CONFIG_IPV6_SUBTREES
- struct in6_addr *saddr_cache;
+ const struct in6_addr *saddr_cache;
#endif
__be32 flow_label;
@@ -410,6 +410,22 @@ struct tcp6_sock {
extern int inet6_sk_rebuild_header(struct sock *sk);
+struct inet6_timewait_sock {
+ struct in6_addr tw_v6_daddr;
+ struct in6_addr tw_v6_rcv_saddr;
+};
+
+struct tcp6_timewait_sock {
+ struct tcp_timewait_sock tcp6tw_tcp;
+ struct inet6_timewait_sock tcp6tw_inet6;
+};
+
+static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
+{
+ return (struct inet6_timewait_sock *)(((u8 *)sk) +
+ inet_twsk(sk)->tw_ipv6_offset);
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
{
@@ -459,28 +475,12 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only)
#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
-struct inet6_timewait_sock {
- struct in6_addr tw_v6_daddr;
- struct in6_addr tw_v6_rcv_saddr;
-};
-
-struct tcp6_timewait_sock {
- struct tcp_timewait_sock tcp6tw_tcp;
- struct inet6_timewait_sock tcp6tw_inet6;
-};
-
static inline u16 inet6_tw_offset(const struct proto *prot)
{
return prot->twsk_prot->twsk_obj_size -
sizeof(struct inet6_timewait_sock);
}
-static inline struct inet6_timewait_sock *inet6_twsk(const struct sock *sk)
-{
- return (struct inet6_timewait_sock *)(((u8 *)sk) +
- inet_twsk(sk)->tw_ipv6_offset);
-}
-
static inline struct in6_addr *__inet6_rcv_saddr(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 61f5cec031e0..553fb66da130 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -150,9 +150,7 @@ struct irq_data {
void *handler_data;
void *chip_data;
struct msi_desc *msi_desc;
-#ifdef CONFIG_SMP
cpumask_var_t affinity;
-#endif
};
/*
@@ -301,8 +299,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
* @irq_pm_shutdown: function called from core code on shutdown once per chip
* @irq_print_chip: optional to print special chip info in show_interrupts
* @flags: chip specific flags
- *
- * @release: release function solely used by UML
*/
struct irq_chip {
const char *name;
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index c513a40510f5..0976fc46d1e0 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -42,8 +42,7 @@
* allowed.
*
* Not initializing the key (static data is initialized to 0s anyway) is the
- * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
- * equivalent with static_branch().
+ * same as using STATIC_KEY_INIT_FALSE.
*
*/
@@ -107,12 +106,6 @@ static __always_inline bool static_key_true(struct static_key *key)
return !static_key_false(key);
}
-/* Deprecated. Please use 'static_key_false() instead. */
-static __always_inline bool static_branch(struct static_key *key)
-{
- return arch_static_branch(key);
-}
-
extern struct jump_entry __start___jump_table[];
extern struct jump_entry __stop___jump_table[];
@@ -166,14 +159,6 @@ static __always_inline bool static_key_true(struct static_key *key)
return false;
}
-/* Deprecated. Please use 'static_key_false() instead. */
-static __always_inline bool static_branch(struct static_key *key)
-{
- if (unlikely(atomic_read(&key->enabled)) > 0)
- return true;
- return false;
-}
-
static inline void static_key_slow_inc(struct static_key *key)
{
atomic_inc(&key->enabled);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e07f5e0c5df4..604382143bcf 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -377,7 +377,6 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
- SYSTEM_SUSPEND_DISK,
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
diff --git a/include/linux/key.h b/include/linux/key.h
index 4cd22ed627ef..cef3b315ba7c 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -303,7 +303,9 @@ static inline bool key_is_instantiated(const struct key *key)
rwsem_is_locked(&((struct key *)(KEY))->sem)))
#define rcu_assign_keypointer(KEY, PAYLOAD) \
- (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))
+do { \
+ rcu_assign_pointer((KEY)->payload.rcudata, (PAYLOAD)); \
+} while (0)
#ifdef CONFIG_SYSCTL
extern ctl_table key_sysctls[];
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index d6bd50110ec2..2e7a1e032c71 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -55,12 +55,17 @@ struct kmsg_dumper {
#ifdef CONFIG_PRINTK
void kmsg_dump(enum kmsg_dump_reason reason);
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len);
+
bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len);
bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
char *buf, size_t size, size_t *len);
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper);
+
void kmsg_dump_rewind(struct kmsg_dumper *dumper);
int kmsg_dump_register(struct kmsg_dumper *dumper);
@@ -71,6 +76,13 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
{
}
+static inline bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper,
+ bool syslog, const char *line,
+ size_t size, size_t *len)
+{
+ return false;
+}
+
static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
const char *line, size_t size, size_t *len)
{
@@ -83,6 +95,10 @@ static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
return false;
}
+static inline void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+}
+
static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
{
}
diff --git a/include/linux/ks8851_mll.h b/include/linux/ks8851_mll.h
new file mode 100644
index 000000000000..e9ccfb59ed30
--- /dev/null
+++ b/include/linux/ks8851_mll.h
@@ -0,0 +1,33 @@
+/*
+ * ks8861_mll platform data struct definition
+ * Copyright (c) 2012 BTicino S.p.A.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _LINUX_KS8851_MLL_H
+#define _LINUX_KS8851_MLL_H
+
+#include <linux/if_ether.h>
+
+/**
+ * struct ks8851_mll_platform_data - Platform data of the KS8851_MLL network driver
+ * @macaddr: The MAC address of the device, set to all 0:s to use the on in
+ * the chip.
+ */
+struct ks8851_mll_platform_data {
+ u8 mac_addr[ETH_ALEN];
+};
+
+#endif
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 0714b24c0e45..22ccf9dee177 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -49,8 +49,6 @@ extern int tsk_fork_get_node(struct task_struct *tsk);
* can be queued and flushed using queue/flush_kthread_work()
* respectively. Queued kthread_works are processed by a kthread
* running kthread_worker_fn().
- *
- * A kthread_work can't be freed while it is executing.
*/
struct kthread_work;
typedef void (*kthread_work_func_t)(struct kthread_work *work);
@@ -59,15 +57,14 @@ struct kthread_worker {
spinlock_t lock;
struct list_head work_list;
struct task_struct *task;
+ struct kthread_work *current_work;
};
struct kthread_work {
struct list_head node;
kthread_work_func_t func;
wait_queue_head_t done;
- atomic_t flushing;
- int queue_seq;
- int done_seq;
+ struct kthread_worker *worker;
};
#define KTHREAD_WORKER_INIT(worker) { \
@@ -79,7 +76,6 @@ struct kthread_work {
.node = LIST_HEAD_INIT((work).node), \
.func = (fn), \
.done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \
- .flushing = ATOMIC_INIT(0), \
}
#define DEFINE_KTHREAD_WORKER(worker) \
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 09f2b3aa2da7..2ce09aa7d3b3 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -617,6 +617,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_SIGNAL_MSI 77
#define KVM_CAP_PPC_GET_SMMU_INFO 78
#define KVM_CAP_S390_COW 79
+#define KVM_CAP_PPC_ALLOC_HTAB 80
#ifdef KVM_CAP_IRQ_ROUTING
@@ -828,6 +829,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
+/* Available with KVM_CAP_PPC_ALLOC_HTAB */
+#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
/*
* ioctls for vcpu fds
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c4464356b35b..b70b48b01098 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -306,7 +306,7 @@ struct kvm {
struct hlist_head irq_ack_notifier_list;
#endif
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
struct mmu_notifier mmu_notifier;
unsigned long mmu_notifier_seq;
long mmu_notifier_count;
@@ -314,13 +314,19 @@ struct kvm {
long tlbs_dirty;
};
-/* The guest did something we don't support. */
-#define pr_unimpl(vcpu, fmt, ...) \
- pr_err_ratelimited("kvm: %i: cpu%i " fmt, \
- current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
+#define kvm_err(fmt, ...) \
+ pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_info(fmt, ...) \
+ pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_debug(fmt, ...) \
+ pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
+#define kvm_pr_unimpl(fmt, ...) \
+ pr_err_ratelimited("kvm [%i]: " fmt, \
+ task_tgid_nr(current), ## __VA_ARGS__)
-#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
-#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
+/* The guest did something we don't support. */
+#define vcpu_unimpl(vcpu, fmt, ...) \
+ kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
{
@@ -535,6 +541,9 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
+void *kvm_kvzalloc(unsigned long size);
+void kvm_kvfree(const void *addr);
+
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -771,7 +780,7 @@ struct kvm_stats_debugfs_item {
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
-#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
{
if (unlikely(vcpu->kvm->mmu_notifier_count))
@@ -793,7 +802,7 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
+#ifdef KVM_CAP_IRQ_ROUTING
#define KVM_MAX_IRQ_ROUTES 1024
@@ -815,7 +824,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
#ifdef CONFIG_HAVE_KVM_EVENTFD
void kvm_eventfd_init(struct kvm *kvm);
-int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
+int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
void kvm_irqfd_release(struct kvm *kvm);
void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +833,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
static inline void kvm_eventfd_init(struct kvm *kvm) {}
-static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
return -EINVAL;
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index cc22b943db83..64f90e17e51d 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -843,6 +843,8 @@ struct ata_port_operations {
void (*error_handler)(struct ata_port *ap);
void (*lost_interrupt)(struct ata_port *ap);
void (*post_internal_cmd)(struct ata_queued_cmd *qc);
+ void (*sched_eh)(struct ata_port *ap);
+ void (*end_eh)(struct ata_port *ap);
/*
* Optional features
@@ -1166,6 +1168,8 @@ extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
ata_postreset_fn_t postreset);
extern void ata_std_error_handler(struct ata_port *ap);
+extern void ata_std_sched_eh(struct ata_port *ap);
+extern void ata_std_end_eh(struct ata_port *ap);
extern int ata_link_nr_enabled(struct ata_link *link);
/*
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index dfb947959ec9..7cccafe50e7b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -43,7 +43,11 @@
#define MDIO_PKGID2 15
#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
+#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
/* Media-dependent registers. */
#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
@@ -56,7 +60,6 @@
#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
-#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
@@ -82,6 +85,7 @@
#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
/* 10 Gb/s */
#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
@@ -237,9 +241,25 @@
#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
-/* AN EEE Advertisement register. */
-#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */
-#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */
+/* EEE Supported/Advertisement/LP Advertisement registers.
+ *
+ * EEE capability Register (3.20), Advertisement (7.60) and
+ * Link partner ability (7.61) registers have and can use the same identical
+ * bit masks.
+ */
+#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */
+#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */
+/* Note: the two defines above can be potentially used by the user-land
+ * and cannot remove them now.
+ * So, we define the new generic MDIO_EEE_100TX and MDIO_EEE_1000T macros
+ * using the previous ones (that can be considered obsolete).
+ */
+#define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */
+#define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */
+#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */
+#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */
+#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */
+#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */
/* LASI RX_ALARM control/status registers. */
#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index a6bb10235148..19dc455b4f3d 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -50,9 +50,7 @@ phys_addr_t memblock_find_in_range_node(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
-int memblock_free_reserved_regions(void);
-int memblock_reserve_reserved_regions(void);
-
+phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
void memblock_allow_resize(void);
int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid);
int memblock_add(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/mfd/abx500/ab8500-codec.h b/include/linux/mfd/abx500/ab8500-codec.h
new file mode 100644
index 000000000000..dc6529202cdd
--- /dev/null
+++ b/include/linux/mfd/abx500/ab8500-codec.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef AB8500_CORE_CODEC_H
+#define AB8500_CORE_CODEC_H
+
+/* Mic-types */
+enum amic_type {
+ AMIC_TYPE_SINGLE_ENDED,
+ AMIC_TYPE_DIFFERENTIAL
+};
+
+/* Mic-biases */
+enum amic_micbias {
+ AMIC_MICBIAS_VAMIC1,
+ AMIC_MICBIAS_VAMIC2
+};
+
+/* Bias-voltage */
+enum ear_cm_voltage {
+ EAR_CMV_0_95V,
+ EAR_CMV_1_10V,
+ EAR_CMV_1_27V,
+ EAR_CMV_1_58V
+};
+
+/* Analog microphone settings */
+struct amic_settings {
+ enum amic_type mic1_type;
+ enum amic_type mic2_type;
+ enum amic_micbias mic1a_micbias;
+ enum amic_micbias mic1b_micbias;
+ enum amic_micbias mic2_micbias;
+};
+
+/* Platform data structure for the audio-parts of the AB8500 */
+struct ab8500_codec_platform_data {
+ struct amic_settings amics;
+ enum ear_cm_voltage ear_cmv;
+};
+
+#endif
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 91dd3ef63e99..bc9b84b60ec6 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -266,6 +266,7 @@ struct ab8500 {
struct regulator_reg_init;
struct regulator_init_data;
struct ab8500_gpio_platform_data;
+struct ab8500_codec_platform_data;
/**
* struct ab8500_platform_data - AB8500 platform data
@@ -284,6 +285,7 @@ struct ab8500_platform_data {
int num_regulator;
struct regulator_init_data *regulator;
struct ab8500_gpio_platform_data *gpio;
+ struct ab8500_codec_platform_data *codec;
};
extern int __devinit ab8500_init(struct ab8500 *ab8500,
diff --git a/include/linux/mfd/s5m87xx/s5m-core.h b/include/linux/mfd/s5m87xx/s5m-core.h
index 21603b42f22f..0b2e0ed309f5 100644
--- a/include/linux/mfd/s5m87xx/s5m-core.h
+++ b/include/linux/mfd/s5m87xx/s5m-core.h
@@ -347,6 +347,7 @@ struct s5m_platform_data {
bool buck_voltage_lock;
int buck_gpios[3];
+ int buck_ds[3];
int buck2_voltage[8];
bool buck2_gpiodvs;
int buck3_voltage[8];
@@ -369,6 +370,10 @@ struct s5m_platform_data {
bool buck2_ramp_enable;
bool buck3_ramp_enable;
bool buck4_ramp_enable;
+
+ int buck2_init;
+ int buck3_init;
+ int buck4_init;
};
#endif /* __LINUX_MFD_S5M_CORE_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index f5171dbf8850..d83af39815ab 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -101,6 +101,7 @@ struct tmio_mmc_host;
struct tmio_mmc_data {
unsigned int hclk;
unsigned long capabilities;
+ unsigned long capabilities2;
unsigned long flags;
u32 ocr_mask; /* available voltages */
struct tmio_mmc_dma *dma;
@@ -110,6 +111,9 @@ struct tmio_mmc_data {
void (*set_clk_div)(struct platform_device *host, int state);
int (*get_cd)(struct platform_device *host);
int (*write16_hook)(struct tmio_mmc_host *host, int addr);
+ /* clock management callbacks */
+ int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
+ void (*clk_disable)(struct platform_device *pdev);
};
/*
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index e030ef9a64ee..12c06870829a 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -217,7 +217,8 @@ enum tps65217_regulator_id {
* Board data may be used to initialize regulator.
*/
struct tps65217_board {
- struct regulator_init_data *tps65217_init_data;
+ struct regulator_init_data *tps65217_init_data[TPS65217_NUM_REGULATOR];
+ struct device_node *of_node[TPS65217_NUM_REGULATOR];
};
/**
@@ -227,11 +228,6 @@ struct tps65217_board {
* @max_uV: minimum micro volts
* @vsel_to_uv: Function pointer to get voltage from selector
* @uv_to_vsel: Function pointer to get selector from voltage
- * @table: Table for non-uniform voltage step-size
- * @table_len: Length of the voltage table
- * @enable_mask: Regulator enable mask bits
- * @set_vout_reg: Regulator output voltage set register
- * @set_vout_mask: Regulator output voltage set mask
*
* This data is used to check the regualtor voltage limits while setting.
*/
@@ -241,11 +237,6 @@ struct tps_info {
int max_uV;
int (*vsel_to_uv)(unsigned int vsel);
int (*uv_to_vsel)(int uV, unsigned int *vsel);
- const int *table;
- unsigned int table_len;
- unsigned int enable_mask;
- unsigned int set_vout_reg;
- unsigned int set_vout_mask;
};
/**
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index dd8dc0a6c462..6c4c478e21a4 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -880,4 +880,10 @@ static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
return regmap_update_bits(tps65910->regmap, reg, mask, 0);
}
+static inline int tps65910_reg_update_bits(struct tps65910 *tps65910, u8 reg,
+ u8 mask, u8 val)
+{
+ return regmap_update_bits(tps65910->regmap, reg, mask, val);
+}
+
#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 2783eca629a0..8ef3a7a11592 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -21,6 +21,8 @@
#define MII_EXPANSION 0x06 /* Expansion register */
#define MII_CTRL1000 0x09 /* 1000BASE-T control */
#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
+#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
#define MII_ESTATUS 0x0f /* Extended Status */
#define MII_DCOUNTER 0x12 /* Disconnect counter */
#define MII_FCSCOUNTER 0x13 /* False carrier counter */
@@ -141,6 +143,13 @@
#define FLOW_CTRL_TX 0x01
#define FLOW_CTRL_RX 0x02
+/* MMD Access Control register fields */
+#define MII_MMD_CTRL_DEVAD_MASK 0x1f /* Mask MMD DEVAD*/
+#define MII_MMD_CTRL_ADDR 0x0000 /* Address */
+#define MII_MMD_CTRL_NOINCR 0x4000 /* no post increment */
+#define MII_MMD_CTRL_INCR_RDWT 0x8000 /* post increment on reads & writes */
+#define MII_MMD_CTRL_INCR_ON_WT 0xC000 /* post increment on writes only */
+
/* This structure is used in all SIOCxMIIxxx ioctl calls */
struct mii_ioctl_data {
__u16 phy_id;
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 0549d2115507..e0deeb2cc939 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -35,6 +35,7 @@
#define MPT_MINOR 220
#define MPT2SAS_MINOR 221
#define UINPUT_MINOR 223
+#define MISC_MCELOG_MINOR 227
#define HPET_MINOR 228
#define FUSE_MINOR 229
#define KVM_MINOR 232
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 1f3860a8a109..260695186256 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -154,6 +154,10 @@ enum {
/* set port opcode modifiers */
MLX4_SET_PORT_PRIO2TC = 0x8,
MLX4_SET_PORT_SCHEDULER = 0x9,
+
+ /* register/delete flow steering network rules */
+ MLX4_QP_FLOW_STEERING_ATTACH = 0x65,
+ MLX4_QP_FLOW_STEERING_DETACH = 0x66,
};
enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6a8f002b8ed3..bd6c9fcdf2dd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/radix-tree.h>
+#include <linux/cpu_rmap.h>
#include <linux/atomic.h>
@@ -56,6 +57,13 @@ enum {
MLX4_MAX_PORTS = 2
};
+/* base qkey for use in sriov tunnel-qp/proxy-qp communication.
+ * These qkeys must not be allowed for general use. This is a 64k range,
+ * and to test for violation, we use the mask (protect against future chg).
+ */
+#define MLX4_RESERVED_QKEY_BASE (0xFFFF0000)
+#define MLX4_RESERVED_QKEY_MASK (0xFFFF0000)
+
enum {
MLX4_BOARD_ID_LEN = 64
};
@@ -70,6 +78,36 @@ enum {
MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
};
+/* Driver supports 3 diffrent device methods to manage traffic steering:
+ * -device managed - High level API for ib and eth flow steering. FW is
+ * managing flow steering tables.
+ * - B0 steering mode - Common low level API for ib and (if supported) eth.
+ * - A0 steering mode - Limited low level API for eth. In case of IB,
+ * B0 mode is in use.
+ */
+enum {
+ MLX4_STEERING_MODE_A0,
+ MLX4_STEERING_MODE_B0,
+ MLX4_STEERING_MODE_DEVICE_MANAGED
+};
+
+static inline const char *mlx4_steering_mode_str(int steering_mode)
+{
+ switch (steering_mode) {
+ case MLX4_STEERING_MODE_A0:
+ return "A0 steering";
+
+ case MLX4_STEERING_MODE_B0:
+ return "B0 steering";
+
+ case MLX4_STEERING_MODE_DEVICE_MANAGED:
+ return "Device managed flow steering";
+
+ default:
+ return "Unrecognize steering mode";
+ }
+}
+
enum {
MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
@@ -96,13 +134,15 @@ enum {
MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
- MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55
+ MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
+ MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
};
enum {
MLX4_DEV_CAP_FLAG2_RSS = 1LL << 0,
MLX4_DEV_CAP_FLAG2_RSS_TOP = 1LL << 1,
- MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2
+ MLX4_DEV_CAP_FLAG2_RSS_XOR = 1LL << 2,
+ MLX4_DEV_CAP_FLAG2_FS_EN = 1LL << 3
};
#define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
@@ -138,6 +178,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
+ MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
MLX4_EVENT_TYPE_NONE = 0xff,
};
@@ -235,12 +276,32 @@ enum {
MLX4_MAX_FAST_REG_PAGES = 511,
};
+enum {
+ MLX4_DEV_PMC_SUBTYPE_GUID_INFO = 0x14,
+ MLX4_DEV_PMC_SUBTYPE_PORT_INFO = 0x15,
+ MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE = 0x16,
+};
+
+/* Port mgmt change event handling */
+enum {
+ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK = 1 << 0,
+ MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK = 1 << 1,
+ MLX4_EQ_PORT_INFO_LID_CHANGE_MASK = 1 << 2,
+ MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK = 1 << 3,
+ MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK = 1 << 4,
+};
+
+#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
+ MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
+
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
}
struct mlx4_phys_caps {
+ u32 gid_phys_table_len[MLX4_MAX_PORTS + 1];
+ u32 pkey_phys_table_len[MLX4_MAX_PORTS + 1];
u32 num_phys_eqs;
};
@@ -273,6 +334,8 @@ struct mlx4_caps {
int max_qp_init_rdma;
int max_qp_dest_rdma;
int sqp_start;
+ u32 base_sqpn;
+ u32 base_tunnel_sqpn;
int num_srqs;
int max_srq_wqes;
int max_srq_sge;
@@ -295,6 +358,8 @@ struct mlx4_caps {
int num_amgms;
int reserved_mcgs;
int num_qp_per_mgm;
+ int steering_mode;
+ int fs_log_max_ucast_qp_range_size;
int num_pds;
int reserved_pds;
int max_xrcds;
@@ -509,8 +574,85 @@ struct mlx4_dev {
u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN];
int num_vfs;
+ u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
+ u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
};
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ struct {
+ __be16 current_temperature;
+ __be16 warning_threshold;
+ } __packed warming;
+ struct {
+ u8 reserved[3];
+ u8 port;
+ union {
+ struct {
+ __be16 mstr_sm_lid;
+ __be16 port_lid;
+ __be32 changed_attr;
+ u8 reserved[3];
+ u8 mstr_sm_sl;
+ __be64 gid_prefix;
+ } __packed port_info;
+ struct {
+ __be32 block_ptr;
+ __be32 tbl_entries_mask;
+ } __packed tbl_change_info;
+ } params;
+ } __packed port_mgmt_change;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_init_port_param {
int set_guid0;
int set_node_guid;
@@ -534,6 +676,15 @@ struct mlx4_init_port_param {
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+#define MLX4_INVALID_SLAVE_ID 0xFF
+
+void handle_port_mgmt_change_event(struct work_struct *work);
+
+static inline int mlx4_master_func_num(struct mlx4_dev *dev)
+{
+ return dev->caps.function;
+}
+
static inline int mlx4_is_master(struct mlx4_dev *dev)
{
return dev->flags & MLX4_FLAG_MASTER;
@@ -623,9 +774,99 @@ int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot);
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- int block_mcast_loopback, enum mlx4_protocol protocol);
+ u8 port, int block_mcast_loopback,
+ enum mlx4_protocol protocol, u64 *reg_id);
int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
- enum mlx4_protocol protocol);
+ enum mlx4_protocol protocol, u64 reg_id);
+
+enum {
+ MLX4_DOMAIN_UVERBS = 0x1000,
+ MLX4_DOMAIN_ETHTOOL = 0x2000,
+ MLX4_DOMAIN_RFS = 0x3000,
+ MLX4_DOMAIN_NIC = 0x5000,
+};
+
+enum mlx4_net_trans_rule_id {
+ MLX4_NET_TRANS_RULE_ID_ETH = 0,
+ MLX4_NET_TRANS_RULE_ID_IB,
+ MLX4_NET_TRANS_RULE_ID_IPV6,
+ MLX4_NET_TRANS_RULE_ID_IPV4,
+ MLX4_NET_TRANS_RULE_ID_TCP,
+ MLX4_NET_TRANS_RULE_ID_UDP,
+ MLX4_NET_TRANS_RULE_NUM, /* should be last */
+};
+
+enum mlx4_net_trans_promisc_mode {
+ MLX4_FS_PROMISC_NONE = 0,
+ MLX4_FS_PROMISC_UPLINK,
+ /* For future use. Not implemented yet */
+ MLX4_FS_PROMISC_FUNCTION_PORT,
+ MLX4_FS_PROMISC_ALL_MULTI,
+};
+
+struct mlx4_spec_eth {
+ u8 dst_mac[6];
+ u8 dst_mac_msk[6];
+ u8 src_mac[6];
+ u8 src_mac_msk[6];
+ u8 ether_type_enable;
+ __be16 ether_type;
+ __be16 vlan_id_msk;
+ __be16 vlan_id;
+};
+
+struct mlx4_spec_tcp_udp {
+ __be16 dst_port;
+ __be16 dst_port_msk;
+ __be16 src_port;
+ __be16 src_port_msk;
+};
+
+struct mlx4_spec_ipv4 {
+ __be32 dst_ip;
+ __be32 dst_ip_msk;
+ __be32 src_ip;
+ __be32 src_ip_msk;
+};
+
+struct mlx4_spec_ib {
+ __be32 r_qpn;
+ __be32 qpn_msk;
+ u8 dst_gid[16];
+ u8 dst_gid_msk[16];
+};
+
+struct mlx4_spec_list {
+ struct list_head list;
+ enum mlx4_net_trans_rule_id id;
+ union {
+ struct mlx4_spec_eth eth;
+ struct mlx4_spec_ib ib;
+ struct mlx4_spec_ipv4 ipv4;
+ struct mlx4_spec_tcp_udp tcp_udp;
+ };
+};
+
+enum mlx4_net_trans_hw_rule_queue {
+ MLX4_NET_TRANS_Q_FIFO,
+ MLX4_NET_TRANS_Q_LIFO,
+};
+
+struct mlx4_net_trans_rule {
+ struct list_head list;
+ enum mlx4_net_trans_hw_rule_queue queue_mode;
+ bool exclusive;
+ bool allow_loopback;
+ enum mlx4_net_trans_promisc_mode promisc_mode;
+ u8 port;
+ u16 priority;
+ u32 qpn;
+};
+
+int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
+ enum mlx4_net_trans_promisc_mode mode);
+int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
+ enum mlx4_net_trans_promisc_mode mode);
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port);
@@ -659,7 +900,8 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev);
-int mlx4_assign_eq(struct mlx4_dev *dev, char* name , int* vector);
+int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
+ int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
@@ -668,4 +910,10 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+int mlx4_flow_attach(struct mlx4_dev *dev,
+ struct mlx4_net_trans_rule *rule, u64 *reg_id);
+int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
+
+int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index 5f1298b1b5ef..d813704b963b 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -37,18 +37,21 @@
struct mlx4_dev;
+#define MLX4_MAC_MASK 0xffffffffffffULL
+
enum mlx4_dev_event {
MLX4_DEV_EVENT_CATASTROPHIC_ERROR,
MLX4_DEV_EVENT_PORT_UP,
MLX4_DEV_EVENT_PORT_DOWN,
MLX4_DEV_EVENT_PORT_REINIT,
+ MLX4_DEV_EVENT_PORT_MGMT_CHANGE,
};
struct mlx4_interface {
void * (*add) (struct mlx4_dev *dev);
void (*remove)(struct mlx4_dev *dev, void *context);
void (*event) (struct mlx4_dev *dev, void *context,
- enum mlx4_dev_event event, int port);
+ enum mlx4_dev_event event, unsigned long param);
void * (*get_dev)(struct mlx4_dev *dev, void *context, u8 port);
struct list_head list;
enum mlx4_protocol protocol;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b36d08ce5c57..f9f279cf5b1b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void);
enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
+ MF_MUST_KILL = 1 << 2,
};
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d76513b5b263..111aca5e97f3 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -149,6 +149,7 @@ struct sd_switch_caps {
#define SD_SET_CURRENT_LIMIT_400 1
#define SD_SET_CURRENT_LIMIT_600 2
#define SD_SET_CURRENT_LIMIT_800 3
+#define SD_SET_CURRENT_NO_CHANGE (-1)
#define SD_MAX_CURRENT_200 (1 << SD_SET_CURRENT_LIMIT_200)
#define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400)
diff --git a/include/linux/mmc/cd-gpio.h b/include/linux/mmc/cd-gpio.h
deleted file mode 100644
index cefaba038ccb..000000000000
--- a/include/linux/mmc/cd-gpio.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Generic GPIO card-detect helper header
- *
- * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef MMC_CD_GPIO_H
-#define MMC_CD_GPIO_H
-
-struct mmc_host;
-int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio);
-void mmc_cd_gpio_free(struct mmc_host *host);
-
-#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0707d228d7f1..f578a71d82a6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -11,6 +11,7 @@
#define LINUX_MMC_HOST_H
#include <linux/leds.h>
+#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/device.h>
#include <linux/fault-inject.h>
@@ -150,11 +151,31 @@ struct mmc_async_req {
int (*err_check) (struct mmc_card *, struct mmc_async_req *);
};
-struct mmc_hotplug {
- unsigned int irq;
+/**
+ * struct mmc_slot - MMC slot functions
+ *
+ * @cd_irq: MMC/SD-card slot hotplug detection IRQ or -EINVAL
+ * @lock: protect the @handler_priv pointer
+ * @handler_priv: MMC/SD-card slot context
+ *
+ * Some MMC/SD host controllers implement slot-functions like card and
+ * write-protect detection natively. However, a large number of controllers
+ * leave these functions to the CPU. This struct provides a hook to attach
+ * such slot-function drivers.
+ */
+struct mmc_slot {
+ int cd_irq;
+ struct mutex lock;
void *handler_priv;
};
+struct regulator;
+
+struct mmc_supply {
+ struct regulator *vmmc; /* Card power supply */
+ struct regulator *vqmmc; /* Optional Vccq supply */
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
@@ -168,6 +189,9 @@ struct mmc_host {
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
struct notifier_block pm_notify;
+ u32 max_current_330;
+ u32 max_current_300;
+ u32 max_current_180;
#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
#define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */
@@ -211,16 +235,9 @@ struct mmc_host {
#define MMC_CAP_UHS_SDR50 (1 << 17) /* Host supports UHS SDR50 mode */
#define MMC_CAP_UHS_SDR104 (1 << 18) /* Host supports UHS SDR104 mode */
#define MMC_CAP_UHS_DDR50 (1 << 19) /* Host supports UHS DDR50 mode */
-#define MMC_CAP_SET_XPC_330 (1 << 20) /* Host supports >150mA current at 3.3V */
-#define MMC_CAP_SET_XPC_300 (1 << 21) /* Host supports >150mA current at 3.0V */
-#define MMC_CAP_SET_XPC_180 (1 << 22) /* Host supports >150mA current at 1.8V */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
-#define MMC_CAP_MAX_CURRENT_200 (1 << 26) /* Host max current limit is 200mA */
-#define MMC_CAP_MAX_CURRENT_400 (1 << 27) /* Host max current limit is 400mA */
-#define MMC_CAP_MAX_CURRENT_600 (1 << 28) /* Host max current limit is 600mA */
-#define MMC_CAP_MAX_CURRENT_800 (1 << 29) /* Host max current limit is 800mA */
#define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */
#define MMC_CAP_HW_RESET (1 << 31) /* Hardware reset */
@@ -238,6 +255,8 @@ struct mmc_host {
#define MMC_CAP2_BROKEN_VOLTAGE (1 << 7) /* Use the broken voltage */
#define MMC_CAP2_DETECT_ON_ERR (1 << 8) /* On I/O err check card removal */
#define MMC_CAP2_HC_ERASE_SZ (1 << 9) /* High-capacity erase size */
+#define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */
+#define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */
mmc_pm_flag_t pm_caps; /* supported pm features */
unsigned int power_notify_type;
@@ -290,7 +309,7 @@ struct mmc_host {
struct delayed_work detect;
int detect_change; /* card detect flag */
- struct mmc_hotplug hotplug;
+ struct mmc_slot slot;
const struct mmc_bus_ops *bus_ops; /* current bus driver */
unsigned int bus_refs; /* reference counter */
@@ -309,6 +328,7 @@ struct mmc_host {
#ifdef CONFIG_REGULATOR
bool regulator_enabled; /* regulator state */
#endif
+ struct mmc_supply supply;
struct dentry *debugfs_root;
@@ -357,13 +377,12 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
wake_up_process(host->sdio_irq_thread);
}
-struct regulator;
-
#ifdef CONFIG_REGULATOR
int mmc_regulator_get_ocrmask(struct regulator *supply);
int mmc_regulator_set_ocr(struct mmc_host *mmc,
struct regulator *supply,
unsigned short vdd_bit);
+int mmc_regulator_get_supply(struct mmc_host *mmc);
#else
static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
{
@@ -376,6 +395,11 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
{
return 0;
}
+
+static inline int mmc_regulator_get_supply(struct mmc_host *mmc)
+{
+ return 0;
+}
#endif
int mmc_card_awake(struct mmc_host *host);
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index e9051e1cb1ce..ac83b105bedd 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -122,6 +122,7 @@ struct sdhci_host {
#define SDHCI_PV_ENABLED (1<<8) /* Preset value enabled */
#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
#define SDHCI_HS200_NEEDS_TUNING (1<<10) /* HS200 needs tuning */
+#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
unsigned int version; /* SDHCI spec. version */
@@ -155,7 +156,8 @@ struct sdhci_host {
struct timer_list timer; /* Timer for timeouts */
- unsigned int caps; /* Alternative capabilities */
+ unsigned int caps; /* Alternative CAPABILITY_0 */
+ unsigned int caps1; /* Alternative CAPABILITY_1 */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index 05f0e3db1c12..e7d5dd67bb74 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -32,18 +32,14 @@
* 1111 : Peripheral clock (sup_pclk set '1')
*/
-struct sh_mmcif_dma {
- struct sh_dmae_slave chan_priv_tx;
- struct sh_dmae_slave chan_priv_rx;
-};
-
struct sh_mmcif_plat_data {
void (*set_pwr)(struct platform_device *pdev, int state);
void (*down_pwr)(struct platform_device *pdev);
int (*get_cd)(struct platform_device *pdef);
- struct sh_mmcif_dma *dma; /* Deprecated. Instead */
- unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */
+ unsigned int slave_id_tx; /* embedded slave_id_[tr]x */
unsigned int slave_id_rx;
+ bool use_cd_gpio : 1;
+ unsigned int cd_gpio;
u8 sup_pclk; /* 1 :SH7757, 0: SH7724/SH7372 */
unsigned long caps;
u32 ocr;
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index e94e620aeddc..b65679ffa880 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -23,6 +23,7 @@ struct sh_mobile_sdhi_info {
int dma_slave_rx;
unsigned long tmio_flags;
unsigned long tmio_caps;
+ unsigned long tmio_caps2;
u32 tmio_ocr_mask; /* available MMC voltages */
unsigned int cd_gpio;
struct tmio_mmc_data *pdata;
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
new file mode 100644
index 000000000000..7d88d27bfafa
--- /dev/null
+++ b/include/linux/mmc/slot-gpio.h
@@ -0,0 +1,24 @@
+/*
+ * Generic GPIO card-detect helper header
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef MMC_SLOT_GPIO_H
+#define MMC_SLOT_GPIO_H
+
+struct mmc_host;
+
+int mmc_gpio_get_ro(struct mmc_host *host);
+int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio);
+void mmc_gpio_free_ro(struct mmc_host *host);
+
+int mmc_gpio_get_cd(struct mmc_host *host);
+int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio);
+void mmc_gpio_free_cd(struct mmc_host *host);
+
+#endif
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2427706f78b4..458988bd55a1 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -188,7 +188,7 @@ static inline int is_unevictable_lru(enum lru_list lru)
struct zone_reclaim_stat {
/*
* The pageout code in vmscan.c keeps track of how many of the
- * mem/swap backed and file backed pages are refeferenced.
+ * mem/swap backed and file backed pages are referenced.
* The higher the rotated/scanned ratio, the more valuable
* that cache is.
*
@@ -694,7 +694,7 @@ typedef struct pglist_data {
range, including holes */
int node_id;
wait_queue_head_t kswapd_wait;
- struct task_struct *kswapd;
+ struct task_struct *kswapd; /* Protected by lock_memory_hotplug() */
int kswapd_max_order;
enum zone_type classzone_idx;
} pg_data_t;
diff --git a/arch/arm/mach-mxs/include/mach/mxsfb.h b/include/linux/mxsfb.h
index e4d79791515e..f14943d55315 100644
--- a/arch/arm/mach-mxs/include/mach/mxsfb.h
+++ b/include/linux/mxsfb.h
@@ -14,8 +14,8 @@
* MA 02110-1301, USA.
*/
-#ifndef __MACH_FB_H
-#define __MACH_FB_H
+#ifndef __LINUX_MXSFB_H
+#define __LINUX_MXSFB_H
#include <linux/fb.h>
@@ -46,4 +46,4 @@ struct mxsfb_platform_data {
*/
};
-#endif /* __MACH_FB_H */
+#endif /* __LINUX_MXSFB_H */
diff --git a/include/linux/namei.h b/include/linux/namei.h
index ffc02135c483..d2ef8b34b967 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,12 +7,6 @@
struct vfsmount;
-struct open_intent {
- int flags;
- int create_mode;
- struct file *file;
-};
-
enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
@@ -25,11 +19,6 @@ struct nameidata {
int last_type;
unsigned depth;
char *saved_names[MAX_NESTED_LINKS + 1];
-
- /* Intent data */
- union {
- struct open_intent open;
- } intent;
};
/*
@@ -78,13 +67,10 @@ extern int kern_path(const char *, unsigned, struct path *);
extern struct dentry *kern_path_create(int, const char *, struct path *, int);
extern struct dentry *user_path_create(int, const char __user *, struct path *, int);
-extern int kern_path_parent(const char *, struct nameidata *);
+extern struct dentry *kern_path_locked(const char *, struct path *);
extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
const char *, unsigned int, struct path *);
-extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
- int (*open)(struct inode *, struct file *));
-
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern int follow_down_one(struct path *);
@@ -94,6 +80,8 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+extern void nd_jump_link(struct nameidata *nd, struct path *path);
+
static inline void nd_set_link(struct nameidata *nd, char *path)
{
nd->saved_names[nd->depth] = path;
diff --git a/include/linux/net.h b/include/linux/net.h
index e9ac2df079ba..99276c3dc89a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -72,6 +72,7 @@ struct net;
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_EXTERNALLY_ALLOCATED 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
@@ -247,6 +248,7 @@ extern int sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags);
extern int sock_map_fd(struct socket *sock, int flags);
extern struct socket *sockfd_lookup(int fd, int *err);
+extern struct socket *sock_from_file(struct file *file, int *err);
#define sockfd_put(sock) fput(sock->file)
extern int net_ratelimit(void);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d94cb1431519..eb06e58bed0b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1046,10 +1046,9 @@ struct net_device {
*/
char name[IFNAMSIZ];
- struct pm_qos_request pm_qos_req;
-
- /* device name hash chain */
+ /* device name hash chain, please keep it close to name[] */
struct hlist_node name_hlist;
+
/* snmp alias */
char *ifalias;
@@ -1322,6 +1321,8 @@ struct net_device {
/* group the device belongs to */
int group;
+
+ struct pm_qos_request pm_qos_req;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -1626,6 +1627,7 @@ extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
extern void dev_disable_lro(struct net_device *dev);
+extern int dev_loopback_xmit(struct sk_buff *newskb);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice_queue(struct net_device *dev,
@@ -2108,7 +2110,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
static inline int netif_copy_real_num_queues(struct net_device *to_dev,
const struct net_device *from_dev)
{
- netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
+ int err;
+
+ err = netif_set_real_num_tx_queues(to_dev,
+ from_dev->real_num_tx_queues);
+ if (err)
+ return err;
#ifdef CONFIG_RPS
return netif_set_real_num_rx_queues(to_dev,
from_dev->real_num_rx_queues);
@@ -2117,6 +2124,9 @@ static inline int netif_copy_real_num_queues(struct net_device *to_dev,
#endif
}
+#define DEFAULT_MAX_NUM_RSS_QUEUES (8)
+extern int netif_get_num_default_rss_queues(void);
+
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index ff9c84c29b28..c613cf0d7884 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -94,6 +94,16 @@ static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
a1->all[3] == a2->all[3];
}
+static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
+ union nf_inet_addr *result,
+ const union nf_inet_addr *mask)
+{
+ result->all[0] = a1->all[0] & mask->all[0];
+ result->all[1] = a1->all[1] & mask->all[1];
+ result->all[2] = a1->all[2] & mask->all[2];
+ result->all[3] = a1->all[3] & mask->all[3];
+}
+
extern void netfilter_init(void);
/* Largest hook number + 1 */
@@ -383,6 +393,22 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
+
+struct nf_conn;
+struct nlattr;
+
+struct nfq_ct_hook {
+ size_t (*build_size)(const struct nf_conn *ct);
+ int (*build)(struct sk_buff *skb, struct nf_conn *ct);
+ int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
+};
+extern struct nfq_ct_hook __rcu *nfq_ct_hook;
+
+struct nfq_ct_nat_hook {
+ void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
+ u32 ctinfo, int off);
+};
+extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 1697036336b6..874ae8f2706b 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -10,6 +10,7 @@ header-y += nfnetlink.h
header-y += nfnetlink_acct.h
header-y += nfnetlink_compat.h
header-y += nfnetlink_conntrack.h
+header-y += nfnetlink_cthelper.h
header-y += nfnetlink_cttimeout.h
header-y += nfnetlink_log.h
header-y += nfnetlink_queue.h
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h
index 0ce91d56a5f2..0dfc8b7210a3 100644
--- a/include/linux/netfilter/nf_conntrack_sip.h
+++ b/include/linux/netfilter/nf_conntrack_sip.h
@@ -2,6 +2,8 @@
#define __NF_CONNTRACK_SIP_H__
#ifdef __KERNEL__
+#include <net/netfilter/nf_conntrack_expect.h>
+
#define SIP_PORT 5060
#define SIP_TIMEOUT 3600
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index a1048c1587d1..18341cdb2443 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -50,7 +50,8 @@ struct nfgenmsg {
#define NFNL_SUBSYS_IPSET 6
#define NFNL_SUBSYS_ACCT 7
#define NFNL_SUBSYS_CTNETLINK_TIMEOUT 8
-#define NFNL_SUBSYS_COUNT 9
+#define NFNL_SUBSYS_CTHELPER 9
+#define NFNL_SUBSYS_COUNT 10
#ifdef __KERNEL__
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index e58e4b93c108..f649f7423ca2 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -7,6 +7,8 @@ enum cntl_msg_types {
IPCTNL_MSG_CT_GET,
IPCTNL_MSG_CT_DELETE,
IPCTNL_MSG_CT_GET_CTRZERO,
+ IPCTNL_MSG_CT_GET_STATS_CPU,
+ IPCTNL_MSG_CT_GET_STATS,
IPCTNL_MSG_MAX
};
@@ -15,6 +17,7 @@ enum ctnl_exp_msg_types {
IPCTNL_MSG_EXP_NEW,
IPCTNL_MSG_EXP_GET,
IPCTNL_MSG_EXP_DELETE,
+ IPCTNL_MSG_EXP_GET_STATS_CPU,
IPCTNL_MSG_EXP_MAX
};
@@ -191,6 +194,7 @@ enum ctattr_expect_nat {
enum ctattr_help {
CTA_HELP_UNSPEC,
CTA_HELP_NAME,
+ CTA_HELP_INFO,
__CTA_HELP_MAX
};
#define CTA_HELP_MAX (__CTA_HELP_MAX - 1)
@@ -202,4 +206,39 @@ enum ctattr_secctx {
};
#define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1)
+enum ctattr_stats_cpu {
+ CTA_STATS_UNSPEC,
+ CTA_STATS_SEARCHED,
+ CTA_STATS_FOUND,
+ CTA_STATS_NEW,
+ CTA_STATS_INVALID,
+ CTA_STATS_IGNORE,
+ CTA_STATS_DELETE,
+ CTA_STATS_DELETE_LIST,
+ CTA_STATS_INSERT,
+ CTA_STATS_INSERT_FAILED,
+ CTA_STATS_DROP,
+ CTA_STATS_EARLY_DROP,
+ CTA_STATS_ERROR,
+ CTA_STATS_SEARCH_RESTART,
+ __CTA_STATS_MAX,
+};
+#define CTA_STATS_MAX (__CTA_STATS_MAX - 1)
+
+enum ctattr_stats_global {
+ CTA_STATS_GLOBAL_UNSPEC,
+ CTA_STATS_GLOBAL_ENTRIES,
+ __CTA_STATS_GLOBAL_MAX,
+};
+#define CTA_STATS_GLOBAL_MAX (__CTA_STATS_GLOBAL_MAX - 1)
+
+enum ctattr_expect_stats {
+ CTA_STATS_EXP_UNSPEC,
+ CTA_STATS_EXP_NEW,
+ CTA_STATS_EXP_CREATE,
+ CTA_STATS_EXP_DELETE,
+ __CTA_STATS_EXP_MAX,
+};
+#define CTA_STATS_EXP_MAX (__CTA_STATS_EXP_MAX - 1)
+
#endif /* _IPCONNTRACK_NETLINK_H */
diff --git a/include/linux/netfilter/nfnetlink_cthelper.h b/include/linux/netfilter/nfnetlink_cthelper.h
new file mode 100644
index 000000000000..33659f6fad3e
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_cthelper.h
@@ -0,0 +1,55 @@
+#ifndef _NFNL_CTHELPER_H_
+#define _NFNL_CTHELPER_H_
+
+#define NFCT_HELPER_STATUS_DISABLED 0
+#define NFCT_HELPER_STATUS_ENABLED 1
+
+enum nfnl_acct_msg_types {
+ NFNL_MSG_CTHELPER_NEW,
+ NFNL_MSG_CTHELPER_GET,
+ NFNL_MSG_CTHELPER_DEL,
+ NFNL_MSG_CTHELPER_MAX
+};
+
+enum nfnl_cthelper_type {
+ NFCTH_UNSPEC,
+ NFCTH_NAME,
+ NFCTH_TUPLE,
+ NFCTH_QUEUE_NUM,
+ NFCTH_POLICY,
+ NFCTH_PRIV_DATA_LEN,
+ NFCTH_STATUS,
+ __NFCTH_MAX
+};
+#define NFCTH_MAX (__NFCTH_MAX - 1)
+
+enum nfnl_cthelper_policy_type {
+ NFCTH_POLICY_SET_UNSPEC,
+ NFCTH_POLICY_SET_NUM,
+ NFCTH_POLICY_SET,
+ NFCTH_POLICY_SET1 = NFCTH_POLICY_SET,
+ NFCTH_POLICY_SET2,
+ NFCTH_POLICY_SET3,
+ NFCTH_POLICY_SET4,
+ __NFCTH_POLICY_SET_MAX
+};
+#define NFCTH_POLICY_SET_MAX (__NFCTH_POLICY_SET_MAX - 1)
+
+enum nfnl_cthelper_pol_type {
+ NFCTH_POLICY_UNSPEC,
+ NFCTH_POLICY_NAME,
+ NFCTH_POLICY_EXPECT_MAX,
+ NFCTH_POLICY_EXPECT_TIMEOUT,
+ __NFCTH_POLICY_MAX
+};
+#define NFCTH_POLICY_MAX (__NFCTH_POLICY_MAX - 1)
+
+enum nfnl_cthelper_tuple_type {
+ NFCTH_TUPLE_UNSPEC,
+ NFCTH_TUPLE_L3PROTONUM,
+ NFCTH_TUPLE_L4PROTONUM,
+ __NFCTH_TUPLE_MAX,
+};
+#define NFCTH_TUPLE_MAX (__NFCTH_TUPLE_MAX - 1)
+
+#endif /* _NFNL_CTHELPER_H */
diff --git a/include/linux/netfilter/nfnetlink_queue.h b/include/linux/netfilter/nfnetlink_queue.h
index 24b32e6c009e..3b1c1360aedf 100644
--- a/include/linux/netfilter/nfnetlink_queue.h
+++ b/include/linux/netfilter/nfnetlink_queue.h
@@ -42,6 +42,8 @@ enum nfqnl_attr_type {
NFQA_IFINDEX_PHYSOUTDEV, /* __u32 ifindex */
NFQA_HWADDR, /* nfqnl_msg_packet_hw */
NFQA_PAYLOAD, /* opaque data payload */
+ NFQA_CT, /* nf_conntrack_netlink.h */
+ NFQA_CT_INFO, /* enum ip_conntrack_info */
__NFQA_MAX
};
@@ -84,8 +86,15 @@ enum nfqnl_attr_config {
NFQA_CFG_CMD, /* nfqnl_msg_config_cmd */
NFQA_CFG_PARAMS, /* nfqnl_msg_config_params */
NFQA_CFG_QUEUE_MAXLEN, /* __u32 */
+ NFQA_CFG_MASK, /* identify which flags to change */
+ NFQA_CFG_FLAGS, /* value of these flags (__u32) */
__NFQA_CFG_MAX
};
#define NFQA_CFG_MAX (__NFQA_CFG_MAX-1)
+/* Flags for NFQA_CFG_FLAGS */
+#define NFQA_CFG_F_FAIL_OPEN (1 << 0)
+#define NFQA_CFG_F_CONNTRACK (1 << 1)
+#define NFQA_CFG_F_MAX (1 << 2)
+
#endif /* _NFNETLINK_QUEUE_H */
diff --git a/include/linux/netfilter/xt_connlimit.h b/include/linux/netfilter/xt_connlimit.h
index d1366f05d1b2..f1656096121e 100644
--- a/include/linux/netfilter/xt_connlimit.h
+++ b/include/linux/netfilter/xt_connlimit.h
@@ -22,13 +22,8 @@ struct xt_connlimit_info {
#endif
};
unsigned int limit;
- union {
- /* revision 0 */
- unsigned int inverse;
-
- /* revision 1 */
- __u32 flags;
- };
+ /* revision 1 */
+ __u32 flags;
/* Used internally by the kernel */
struct xt_connlimit_data *data __attribute__((aligned(8)));
diff --git a/include/linux/netfilter/xt_recent.h b/include/linux/netfilter/xt_recent.h
index 83318e01425e..6ef36c113e89 100644
--- a/include/linux/netfilter/xt_recent.h
+++ b/include/linux/netfilter/xt_recent.h
@@ -32,4 +32,14 @@ struct xt_recent_mtinfo {
__u8 side;
};
+struct xt_recent_mtinfo_v1 {
+ __u32 seconds;
+ __u32 hit_count;
+ __u8 check_set;
+ __u8 invert;
+ char name[XT_RECENT_NAME_LEN];
+ __u8 side;
+ union nf_inet_addr mask;
+};
+
#endif /* _LINUX_NETFILTER_XT_RECENT_H */
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index fa0946c549d3..e2b12801378d 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -66,6 +66,7 @@ enum nf_ip_hook_priorities {
NF_IP_PRI_SECURITY = 50,
NF_IP_PRI_NAT_SRC = 100,
NF_IP_PRI_SELINUX_LAST = 225,
+ NF_IP_PRI_CONNTRACK_HELPER = 300,
NF_IP_PRI_CONNTRACK_CONFIRM = INT_MAX,
NF_IP_PRI_LAST = INT_MAX,
};
diff --git a/include/linux/netfilter_ipv4/Kbuild b/include/linux/netfilter_ipv4/Kbuild
index c61b8fb1a9ef..8ba0c5b72ea9 100644
--- a/include/linux/netfilter_ipv4/Kbuild
+++ b/include/linux/netfilter_ipv4/Kbuild
@@ -5,7 +5,6 @@ header-y += ipt_LOG.h
header-y += ipt_REJECT.h
header-y += ipt_TTL.h
header-y += ipt_ULOG.h
-header-y += ipt_addrtype.h
header-y += ipt_ah.h
header-y += ipt_ecn.h
header-y += ipt_ttl.h
diff --git a/include/linux/netfilter_ipv4/ipt_addrtype.h b/include/linux/netfilter_ipv4/ipt_addrtype.h
deleted file mode 100644
index 0da42237c8da..000000000000
--- a/include/linux/netfilter_ipv4/ipt_addrtype.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#ifndef _IPT_ADDRTYPE_H
-#define _IPT_ADDRTYPE_H
-
-#include <linux/types.h>
-
-enum {
- IPT_ADDRTYPE_INVERT_SOURCE = 0x0001,
- IPT_ADDRTYPE_INVERT_DEST = 0x0002,
- IPT_ADDRTYPE_LIMIT_IFACE_IN = 0x0004,
- IPT_ADDRTYPE_LIMIT_IFACE_OUT = 0x0008,
-};
-
-struct ipt_addrtype_info_v1 {
- __u16 source; /* source-type mask */
- __u16 dest; /* dest-type mask */
- __u32 flags;
-};
-
-/* revision 0 */
-struct ipt_addrtype_info {
- __u16 source; /* source-type mask */
- __u16 dest; /* dest-type mask */
- __u32 invert_source;
- __u32 invert_dest;
-};
-
-#endif
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 57c025127f1d..7c8a513ce7a3 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -71,6 +71,7 @@ enum nf_ip6_hook_priorities {
NF_IP6_PRI_SECURITY = 50,
NF_IP6_PRI_NAT_SRC = 100,
NF_IP6_PRI_SELINUX_LAST = 225,
+ NF_IP6_PRI_CONNTRACK_HELPER = 300,
NF_IP6_PRI_LAST = INT_MAX,
};
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 0f628ffa420c..f74dd133788f 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -174,11 +174,17 @@ struct netlink_skb_parms {
extern void netlink_table_grab(void);
extern void netlink_table_ungrab(void);
-extern struct sock *netlink_kernel_create(struct net *net,
- int unit,unsigned int groups,
- void (*input)(struct sk_buff *skb),
- struct mutex *cb_mutex,
- struct module *module);
+/* optional Netlink kernel configuration parameters */
+struct netlink_kernel_cfg {
+ unsigned int groups;
+ void (*input)(struct sk_buff *skb);
+ struct mutex *cb_mutex;
+ void (*bind)(int group);
+};
+
+extern struct sock *netlink_kernel_create(struct net *net, int unit,
+ struct module *module,
+ struct netlink_kernel_cfg *cfg);
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
@@ -241,14 +247,6 @@ struct netlink_notify {
struct nlmsghdr *
__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags);
-#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
-({ if (unlikely(skb_tailroom(skb) < (int)NLMSG_SPACE(len))) \
- goto nlmsg_failure; \
- __nlmsg_put(skb, pid, seq, type, len, flags); })
-
-#define NLMSG_PUT(skb, pid, seq, type, len) \
- NLMSG_NEW(skb, pid, seq, type, len, 0)
-
struct netlink_dump_control {
int (*dump)(struct sk_buff *skb, struct netlink_callback *);
int (*done)(struct netlink_callback*);
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index 5dfa091c3347..28f5389c924b 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -43,7 +43,7 @@ struct netpoll_info {
void netpoll_send_udp(struct netpoll *np, const char *msg, int len);
void netpoll_print_options(struct netpoll *np);
int netpoll_parse_options(struct netpoll *np, char *opt);
-int __netpoll_setup(struct netpoll *np);
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
int netpoll_setup(struct netpoll *np);
int netpoll_trap(void);
void netpoll_set_trap(int trap);
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 0ae9b5857c83..6189f27e305b 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -56,6 +56,10 @@
* %NFC_ATTR_PROTOCOLS)
* @NFC_EVENT_DEVICE_REMOVED: event emitted when a device is removed
* (it sends %NFC_ATTR_DEVICE_INDEX)
+ * @NFC_EVENT_TM_ACTIVATED: event emitted when the adapter is activated in
+ * target mode.
+ * @NFC_EVENT_DEVICE_DEACTIVATED: event emitted when the adapter is deactivated
+ * from target mode.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -71,6 +75,8 @@ enum nfc_commands {
NFC_EVENT_DEVICE_ADDED,
NFC_EVENT_DEVICE_REMOVED,
NFC_EVENT_TARGET_LOST,
+ NFC_EVENT_TM_ACTIVATED,
+ NFC_EVENT_TM_DEACTIVATED,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -94,6 +100,8 @@ enum nfc_commands {
* @NFC_ATTR_TARGET_SENSF_RES: NFC-F targets extra information, max 18 bytes
* @NFC_ATTR_COMM_MODE: Passive or active mode
* @NFC_ATTR_RF_MODE: Initiator or target
+ * @NFC_ATTR_IM_PROTOCOLS: Initiator mode protocols to poll for
+ * @NFC_ATTR_TM_PROTOCOLS: Target mode protocols to listen for
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -109,6 +117,8 @@ enum nfc_attrs {
NFC_ATTR_COMM_MODE,
NFC_ATTR_RF_MODE,
NFC_ATTR_DEVICE_POWERED,
+ NFC_ATTR_IM_PROTOCOLS,
+ NFC_ATTR_TM_PROTOCOLS,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
@@ -118,6 +128,7 @@ enum nfc_attrs {
#define NFC_NFCID1_MAXSIZE 10
#define NFC_SENSB_RES_MAXSIZE 12
#define NFC_SENSF_RES_MAXSIZE 18
+#define NFC_GB_MAXSIZE 48
/* NFC protocols */
#define NFC_PROTO_JEWEL 1
@@ -125,8 +136,9 @@ enum nfc_attrs {
#define NFC_PROTO_FELICA 3
#define NFC_PROTO_ISO14443 4
#define NFC_PROTO_NFC_DEP 5
+#define NFC_PROTO_ISO14443_B 6
-#define NFC_PROTO_MAX 6
+#define NFC_PROTO_MAX 7
/* NFC communication modes */
#define NFC_COMM_ACTIVE 0
@@ -135,13 +147,15 @@ enum nfc_attrs {
/* NFC RF modes */
#define NFC_RF_INITIATOR 0
#define NFC_RF_TARGET 1
+#define NFC_RF_NONE 2
/* NFC protocols masks used in bitsets */
-#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL)
-#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE)
-#define NFC_PROTO_FELICA_MASK (1 << NFC_PROTO_FELICA)
-#define NFC_PROTO_ISO14443_MASK (1 << NFC_PROTO_ISO14443)
-#define NFC_PROTO_NFC_DEP_MASK (1 << NFC_PROTO_NFC_DEP)
+#define NFC_PROTO_JEWEL_MASK (1 << NFC_PROTO_JEWEL)
+#define NFC_PROTO_MIFARE_MASK (1 << NFC_PROTO_MIFARE)
+#define NFC_PROTO_FELICA_MASK (1 << NFC_PROTO_FELICA)
+#define NFC_PROTO_ISO14443_MASK (1 << NFC_PROTO_ISO14443)
+#define NFC_PROTO_NFC_DEP_MASK (1 << NFC_PROTO_NFC_DEP)
+#define NFC_PROTO_ISO14443_B_MASK (1 << NFC_PROTO_ISO14443_B)
struct sockaddr_nfc {
sa_family_t sa_family;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 8aadd90b808a..d3b7c18b18f4 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1374,7 +1374,7 @@ struct nfs_rpc_ops {
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
- struct iattr *, int, struct nfs_open_context *);
+ struct iattr *, int);
int (*remove) (struct inode *, struct qstr *);
void (*unlink_setup) (struct rpc_message *, struct inode *dir);
void (*unlink_rpc_prepare) (struct rpc_task *, struct nfs_unlinkdata *);
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index a6959f72745e..2f3878806403 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -170,6 +170,8 @@
* %NL80211_ATTR_CIPHER_GROUP, %NL80211_ATTR_WPA_VERSIONS,
* %NL80211_ATTR_AKM_SUITES, %NL80211_ATTR_PRIVACY,
* %NL80211_ATTR_AUTH_TYPE and %NL80211_ATTR_INACTIVITY_TIMEOUT.
+ * The channel to use can be set on the interface or be given using the
+ * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_WIPHY_CHANNEL_TYPE attrs.
* @NL80211_CMD_NEW_BEACON: old alias for %NL80211_CMD_START_AP
* @NL80211_CMD_STOP_AP: Stop AP operation on the given interface
* @NL80211_CMD_DEL_BEACON: old alias for %NL80211_CMD_STOP_AP
@@ -275,6 +277,12 @@
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
*
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
+ * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
+ * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
+ *
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
* has been changed and provides details of the request information
* that caused the change such as who initiated the regulatory request
@@ -454,6 +462,10 @@
* the frame.
* @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for
* backward compatibility.
+ *
+ * @NL80211_CMD_SET_POWER_SAVE: Set powersave, using %NL80211_ATTR_PS_STATE
+ * @NL80211_CMD_GET_POWER_SAVE: Get powersave status in %NL80211_ATTR_PS_STATE
+ *
* @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
* is used to configure connection quality monitoring notification trigger
* levels.
@@ -759,6 +771,9 @@ enum nl80211_commands {
* @NL80211_ATTR_IFNAME: network interface name
* @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype
*
+ * @NL80211_ATTR_WDEV: wireless device identifier, used for pseudo-devices
+ * that don't have a netdev (u64)
+ *
* @NL80211_ATTR_MAC: MAC address (various uses)
*
* @NL80211_ATTR_KEY_DATA: (temporal) key data; for TKIP this consists of
@@ -769,6 +784,13 @@ enum nl80211_commands {
* section 7.3.2.25.1, e.g. 0x000FAC04)
* @NL80211_ATTR_KEY_SEQ: transmit key sequence number (IV/PN) for TKIP and
* CCMP keys, each six bytes in little endian
+ * @NL80211_ATTR_KEY_DEFAULT: Flag attribute indicating the key is default key
+ * @NL80211_ATTR_KEY_DEFAULT_MGMT: Flag attribute indicating the key is the
+ * default management key
+ * @NL80211_ATTR_CIPHER_SUITES_PAIRWISE: For crypto settings for connect or
+ * other commands, indicates which pairwise cipher suites are used
+ * @NL80211_ATTR_CIPHER_SUITE_GROUP: For crypto settings for connect or
+ * other commands, indicates which group cipher suite is used
*
* @NL80211_ATTR_BEACON_INTERVAL: beacon interval in TU
* @NL80211_ATTR_DTIM_PERIOD: DTIM period for beaconing
@@ -1004,6 +1026,8 @@ enum nl80211_commands {
* @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
* acknowledged by the recipient.
*
+ * @NL80211_ATTR_PS_STATE: powersave state, using &enum nl80211_ps_state values.
+ *
* @NL80211_ATTR_CQM: connection quality monitor configuration in a
* nested attribute with %NL80211_ATTR_CQM_* sub-attributes.
*
@@ -1061,7 +1085,7 @@ enum nl80211_commands {
* flag isn't set, the frame will be rejected. This is also used as an
* nl80211 capability flag.
*
- * @NL80211_ATTR_BSS_HTOPMODE: HT operation mode (u16)
+ * @NL80211_ATTR_BSS_HT_OPMODE: HT operation mode (u16)
*
* @NL80211_ATTR_KEY_DEFAULT_TYPES: A nested attribute containing flags
* attributes, specifying what a key should be set as default as.
@@ -1085,10 +1109,10 @@ enum nl80211_commands {
* indicate which WoW triggers should be enabled. This is also
* used by %NL80211_CMD_GET_WOWLAN to get the currently enabled WoWLAN
* triggers.
-
+ *
* @NL80211_ATTR_SCHED_SCAN_INTERVAL: Interval between scheduled scan
* cycles, in msecs.
-
+ *
* @NL80211_ATTR_SCHED_SCAN_MATCH: Nested attribute with one or more
* sets of attributes to match during scheduled scans. Only BSSs
* that match any of the sets will be reported. These are
@@ -1115,7 +1139,7 @@ enum nl80211_commands {
* are managed in software: interfaces of these types aren't subject to
* any restrictions in their number or combinations.
*
- * @%NL80211_ATTR_REKEY_DATA: nested attribute containing the information
+ * @NL80211_ATTR_REKEY_DATA: nested attribute containing the information
* necessary for GTK rekeying in the device, see &enum nl80211_rekey_data.
*
* @NL80211_ATTR_SCAN_SUPP_RATES: rates per to be advertised as supported in scan,
@@ -1182,7 +1206,6 @@ enum nl80211_commands {
* @NL80211_ATTR_FEATURE_FLAGS: This u32 attribute contains flags from
* &enum nl80211_feature_flags and is advertised in wiphy information.
* @NL80211_ATTR_PROBE_RESP_OFFLOAD: Indicates that the HW responds to probe
- *
* requests while operating in AP-mode.
* This attribute holds a bitmap of the supported protocols for
* offloading (see &enum nl80211_probe_resp_offload_support_attr).
@@ -1222,6 +1245,12 @@ enum nl80211_commands {
* @NL80211_ATTR_BG_SCAN_PERIOD: Background scan period in seconds
* or 0 to disable background scan.
*
+ * @NL80211_ATTR_USER_REG_HINT_TYPE: type of regulatory hint passed from
+ * userspace. If unset it is assumed the hint comes directly from
+ * a user. If set code could specify exactly what type of source
+ * was used to provide the hint. For the different types of
+ * allowed user regulatory hints see nl80211_user_reg_hint_type.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1473,6 +1502,10 @@ enum nl80211_attrs {
NL80211_ATTR_BG_SCAN_PERIOD,
+ NL80211_ATTR_WDEV,
+
+ NL80211_ATTR_USER_REG_HINT_TYPE,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1520,6 +1553,13 @@ enum nl80211_attrs {
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2
+#define NL80211_MIN_REMAIN_ON_CHANNEL_TIME 10
+
+/* default RSSI threshold for scan results if none specified. */
+#define NL80211_SCAN_RSSI_THOLD_OFF -300
+
+#define NL80211_CQM_TXE_MAX_INTVL 1800
+
/**
* enum nl80211_iftype - (virtual) interface types
*
@@ -1613,12 +1653,20 @@ struct nl80211_sta_flag_update {
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
* when getting information about the bitrate of a station.
+ * There are 2 attributes for bitrate, a legacy one that represents
+ * a 16-bit value, and new one that represents a 32-bit value.
+ * If the rate value fits into 16 bit, both attributes are reported
+ * with the same value. If the rate is too high to fit into 16 bits
+ * (>6.5535Gbps) only 32-bit attribute is included.
+ * User space tools encouraged to use the 32-bit attribute and fall
+ * back to the 16-bit one for compatibility with older kernels.
*
* @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved
* @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s)
* @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8)
* @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate
* @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval
+ * @NL80211_RATE_INFO_BITRATE32: total bitrate (u32, 100kbit/s)
* @NL80211_RATE_INFO_MAX: highest rate_info number currently defined
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
@@ -1628,6 +1676,7 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_MCS,
NL80211_RATE_INFO_40_MHZ_WIDTH,
NL80211_RATE_INFO_SHORT_GI,
+ NL80211_RATE_INFO_BITRATE32,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -1788,6 +1837,9 @@ enum nl80211_mpath_info {
* @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
* @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
+ * @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
+ * defined in 802.11ac
+ * @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -1801,6 +1853,9 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
+ NL80211_BAND_ATTR_VHT_MCS_SET,
+ NL80211_BAND_ATTR_VHT_CAPA,
+
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
@@ -1952,6 +2007,8 @@ enum nl80211_reg_rule_attr {
* @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
* only report BSS with matching SSID.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
+ * BSS in scan results. Filtering is turned off if not specified.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -1959,7 +2016,8 @@ enum nl80211_reg_rule_attr {
enum nl80211_sched_scan_match_attr {
__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID,
- NL80211_ATTR_SCHED_SCAN_MATCH_SSID,
+ NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
+ NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -1967,6 +2025,9 @@ enum nl80211_sched_scan_match_attr {
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST - 1
};
+/* only for backward compatibility */
+#define NL80211_ATTR_SCHED_SCAN_MATCH_SSID NL80211_SCHED_SCAN_MATCH_ATTR_SSID
+
/**
* enum nl80211_reg_rule_flags - regulatory rule flags
*
@@ -2008,6 +2069,26 @@ enum nl80211_dfs_regions {
};
/**
+ * enum nl80211_user_reg_hint_type - type of user regulatory hint
+ *
+ * @NL80211_USER_REG_HINT_USER: a user sent the hint. This is always
+ * assumed if the attribute is not set.
+ * @NL80211_USER_REG_HINT_CELL_BASE: the hint comes from a cellular
+ * base station. Device drivers that have been tested to work
+ * properly to support this type of hint can enable these hints
+ * by setting the NL80211_FEATURE_CELL_BASE_REG_HINTS feature
+ * capability on the struct wiphy. The wireless core will
+ * ignore all cell base station hints until at least one device
+ * present has been registered with the wireless core that
+ * has listed NL80211_FEATURE_CELL_BASE_REG_HINTS as a
+ * supported feature.
+ */
+enum nl80211_user_reg_hint_type {
+ NL80211_USER_REG_HINT_USER = 0,
+ NL80211_USER_REG_HINT_CELL_BASE = 1,
+};
+
+/**
* enum nl80211_survey_info - survey information
*
* These attribute types are used with %NL80211_ATTR_SURVEY_INFO
@@ -2086,78 +2167,91 @@ enum nl80211_mntr_flags {
* @__NL80211_MESHCONF_INVALID: internal use
*
* @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
- * millisecond units, used by the Peer Link Open message
+ * millisecond units, used by the Peer Link Open message
*
* @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the initial confirm timeout, in
- * millisecond units, used by the peer link management to close a peer link
+ * millisecond units, used by the peer link management to close a peer link
*
* @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
- * millisecond units
+ * millisecond units
*
* @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed
- * on this mesh interface
+ * on this mesh interface
*
* @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link
- * open retries that can be sent to establish a new peer link instance in a
- * mesh
+ * open retries that can be sent to establish a new peer link instance in a
+ * mesh
*
* @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
- * point.
+ * point.
*
* @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically
- * open peer links when we detect compatible mesh peers.
+ * open peer links when we detect compatible mesh peers.
*
* @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
- * containing a PREQ that an MP can send to a particular destination (path
- * target)
+ * containing a PREQ that an MP can send to a particular destination (path
+ * target)
*
* @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths
- * (in milliseconds)
+ * (in milliseconds)
*
* @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait
- * until giving up on a path discovery (in milliseconds)
+ * until giving up on a path discovery (in milliseconds)
*
* @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh
- * points receiving a PREQ shall consider the forwarding information from the
- * root to be valid. (TU = time unit)
+ * points receiving a PREQ shall consider the forwarding information from
+ * the root to be valid. (TU = time unit)
*
* @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in
- * TUs) during which an MP can send only one action frame containing a PREQ
- * reference element
+ * TUs) during which an MP can send only one action frame containing a PREQ
+ * reference element
*
* @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
- * that it takes for an HWMP information element to propagate across the mesh
+ * that it takes for an HWMP information element to propagate across the
+ * mesh
*
* @NL80211_MESHCONF_HWMP_ROOTMODE: whether root mode is enabled or not
*
* @NL80211_MESHCONF_ELEMENT_TTL: specifies the value of TTL field set at a
- * source mesh point for path selection elements.
+ * source mesh point for path selection elements.
*
* @NL80211_MESHCONF_HWMP_RANN_INTERVAL: The interval of time (in TUs) between
- * root announcements are transmitted.
+ * root announcements are transmitted.
*
* @NL80211_MESHCONF_GATE_ANNOUNCEMENTS: Advertise that this mesh station has
- * access to a broader network beyond the MBSS. This is done via Root
- * Announcement frames.
+ * access to a broader network beyond the MBSS. This is done via Root
+ * Announcement frames.
*
* @NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL: The minimum interval of time (in
- * TUs) during which a mesh STA can send only one Action frame containing a
- * PERR element.
+ * TUs) during which a mesh STA can send only one Action frame containing a
+ * PERR element.
*
* @NL80211_MESHCONF_FORWARDING: set Mesh STA as forwarding or non-forwarding
- * or forwarding entity (default is TRUE - forwarding entity)
+ * or forwarding entity (default is TRUE - forwarding entity)
*
* @NL80211_MESHCONF_RSSI_THRESHOLD: RSSI threshold in dBm. This specifies the
- * threshold for average signal strength of candidate station to establish
- * a peer link.
- *
- * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
+ * threshold for average signal strength of candidate station to establish
+ * a peer link.
*
* @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors
- * to synchronize to for 11s default synchronization method (see 11C.12.2.2)
+ * to synchronize to for 11s default synchronization method
+ * (see 11C.12.2.2)
*
* @NL80211_MESHCONF_HT_OPMODE: set mesh HT protection mode.
*
+ * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
+ *
+ * @NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT: The time (in TUs) for
+ * which mesh STAs receiving a proactive PREQ shall consider the forwarding
+ * information to the root mesh STA to be valid.
+ *
+ * @NL80211_MESHCONF_HWMP_ROOT_INTERVAL: The interval of time (in TUs) between
+ * proactive PREQs are transmitted.
+ *
+ * @NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL: The minimum interval of time
+ * (in TUs) during which a mesh STA can send only one Action frame
+ * containing a PREQ element for root path confirmation.
+ *
* @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
*/
enum nl80211_meshconf_params {
@@ -2184,6 +2278,9 @@ enum nl80211_meshconf_params {
NL80211_MESHCONF_RSSI_THRESHOLD,
NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
NL80211_MESHCONF_HT_OPMODE,
+ NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
/* keep last */
__NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2199,34 +2296,36 @@ enum nl80211_meshconf_params {
* @__NL80211_MESH_SETUP_INVALID: Internal use
*
* @NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL: Enable this option to use a
- * vendor specific path selection algorithm or disable it to use the default
- * HWMP.
+ * vendor specific path selection algorithm or disable it to use the
+ * default HWMP.
*
* @NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC: Enable this option to use a
- * vendor specific path metric or disable it to use the default Airtime
- * metric.
+ * vendor specific path metric or disable it to use the default Airtime
+ * metric.
*
* @NL80211_MESH_SETUP_IE: Information elements for this mesh, for instance, a
- * robust security network ie, or a vendor specific information element that
- * vendors will use to identify the path selection methods and metrics in use.
+ * robust security network ie, or a vendor specific information element
+ * that vendors will use to identify the path selection methods and
+ * metrics in use.
*
* @NL80211_MESH_SETUP_USERSPACE_AUTH: Enable this option if an authentication
- * daemon will be authenticating mesh candidates.
+ * daemon will be authenticating mesh candidates.
*
* @NL80211_MESH_SETUP_USERSPACE_AMPE: Enable this option if an authentication
- * daemon will be securing peer link frames. AMPE is a secured version of Mesh
- * Peering Management (MPM) and is implemented with the assistance of a
- * userspace daemon. When this flag is set, the kernel will send peer
- * management frames to a userspace daemon that will implement AMPE
- * functionality (security capabilities selection, key confirmation, and key
- * management). When the flag is unset (default), the kernel can autonomously
- * complete (unsecured) mesh peering without the need of a userspace daemon.
- *
- * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
+ * daemon will be securing peer link frames. AMPE is a secured version of
+ * Mesh Peering Management (MPM) and is implemented with the assistance of
+ * a userspace daemon. When this flag is set, the kernel will send peer
+ * management frames to a userspace daemon that will implement AMPE
+ * functionality (security capabilities selection, key confirmation, and
+ * key management). When the flag is unset (default), the kernel can
+ * autonomously complete (unsecured) mesh peering without the need of a
+ * userspace daemon.
*
* @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a
- * vendor specific synchronization method or disable it to use the default
- * neighbor offset synchronization
+ * vendor specific synchronization method or disable it to use the default
+ * neighbor offset synchronization
+ *
+ * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
*
* @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
*/
@@ -2490,12 +2589,19 @@ enum nl80211_tx_rate_attributes {
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
* @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz)
+ * @NL80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
*/
enum nl80211_band {
NL80211_BAND_2GHZ,
NL80211_BAND_5GHZ,
+ NL80211_BAND_60GHZ,
};
+/**
+ * enum nl80211_ps_state - powersave state
+ * @NL80211_PS_DISABLED: powersave is disabled
+ * @NL80211_PS_ENABLED: powersave is enabled
+ */
enum nl80211_ps_state {
NL80211_PS_DISABLED,
NL80211_PS_ENABLED,
@@ -2513,6 +2619,17 @@ enum nl80211_ps_state {
* @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event
* @NL80211_ATTR_CQM_PKT_LOSS_EVENT: a u32 value indicating that this many
* consecutive packets were not acknowledged by the peer
+ * @NL80211_ATTR_CQM_TXE_RATE: TX error rate in %. Minimum % of TX failures
+ * during the given %NL80211_ATTR_CQM_TXE_INTVL before an
+ * %NL80211_CMD_NOTIFY_CQM with reported %NL80211_ATTR_CQM_TXE_RATE and
+ * %NL80211_ATTR_CQM_TXE_PKTS is generated.
+ * @NL80211_ATTR_CQM_TXE_PKTS: number of attempted packets in a given
+ * %NL80211_ATTR_CQM_TXE_INTVL before %NL80211_ATTR_CQM_TXE_RATE is
+ * checked.
+ * @NL80211_ATTR_CQM_TXE_INTVL: interval in seconds. Specifies the periodic
+ * interval in which %NL80211_ATTR_CQM_TXE_PKTS and
+ * %NL80211_ATTR_CQM_TXE_RATE must be satisfied before generating an
+ * %NL80211_CMD_NOTIFY_CQM. Set to 0 to turn off TX error reporting.
* @__NL80211_ATTR_CQM_AFTER_LAST: internal
* @NL80211_ATTR_CQM_MAX: highest key attribute
*/
@@ -2522,6 +2639,9 @@ enum nl80211_attr_cqm {
NL80211_ATTR_CQM_RSSI_HYST,
NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
NL80211_ATTR_CQM_PKT_LOSS_EVENT,
+ NL80211_ATTR_CQM_TXE_RATE,
+ NL80211_ATTR_CQM_TXE_PKTS,
+ NL80211_ATTR_CQM_TXE_INTVL,
/* keep last */
__NL80211_ATTR_CQM_AFTER_LAST,
@@ -2534,10 +2654,14 @@ enum nl80211_attr_cqm {
* configured threshold
* @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the
* configured threshold
+ * @NL80211_CQM_RSSI_BEACON_LOSS_EVENT: The device experienced beacon loss.
+ * (Note that deauth/disassoc will still follow if the AP is not
+ * available. This event might get used as roaming event, etc.)
*/
enum nl80211_cqm_rssi_threshold_event {
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT,
};
@@ -2867,11 +2991,15 @@ enum nl80211_ap_sme_features {
* @NL80211_FEATURE_HT_IBSS: This driver supports IBSS with HT datarates.
* @NL80211_FEATURE_INACTIVITY_TIMER: This driver takes care of freeing up
* the connected inactive stations in AP mode.
+ * @NL80211_FEATURE_CELL_BASE_REG_HINTS: This driver has been tested
+ * to work properly to suppport receiving regulatory hints from
+ * cellular base stations.
*/
enum nl80211_feature_flags {
NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
NL80211_FEATURE_HT_IBSS = 1 << 1,
NL80211_FEATURE_INACTIVITY_TIMER = 1 << 2,
+ NL80211_FEATURE_CELL_BASE_REG_HINTS = 1 << 3,
};
/**
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 5a3db3aa5f17..fd4f2d1cdf6c 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -130,18 +130,8 @@ enum {
enum {
__IEEE802154_DEV_INVALID = -1,
- /* TODO:
- * Nowadays three device types supported by this stack at linux-zigbee
- * project: WPAN = 0, MONITOR = 1 and SMAC = 2.
- *
- * Since this stack implementation exists many years, it's definitely
- * bad idea to change the assigned values due to they are already used
- * by third-party userspace software like: iz-tools, wireshark...
- *
- * Currently only monitor device is added and initialized by '1' for
- * compatibility.
- */
- IEEE802154_DEV_MONITOR = 1,
+ IEEE802154_DEV_WPAN,
+ IEEE802154_DEV_MONITOR,
__IEEE802154_DEV_MAX,
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 2ec1083af7ff..0e9cf9eec085 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -163,6 +163,11 @@ static inline int of_node_to_nid(struct device_node *np) { return -1; }
#define of_node_to_nid of_node_to_nid
#endif
+static inline const char* of_node_full_name(struct device_node *np)
+{
+ return np ? np->full_name : "<no-node>";
+}
+
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
#define for_each_node_by_name(dn, name) \
@@ -260,8 +265,7 @@ extern int of_machine_is_compatible(const char *compat);
extern int prom_add_property(struct device_node* np, struct property* prop);
extern int prom_remove_property(struct device_node *np, struct property *prop);
extern int prom_update_property(struct device_node *np,
- struct property *newprop,
- struct property *oldprop);
+ struct property *newprop);
#if defined(CONFIG_OF_DYNAMIC)
/* For updating the device tree at runtime */
@@ -303,6 +307,11 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
#else /* CONFIG_OF */
+static inline const char* of_node_full_name(struct device_node *np)
+{
+ return "<no-node>";
+}
+
static inline bool of_have_populated_dt(void)
{
return false;
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
new file mode 100644
index 000000000000..51a560f34bca
--- /dev/null
+++ b/include/linux/of_iommu.h
@@ -0,0 +1,21 @@
+#ifndef __OF_IOMMU_H
+#define __OF_IOMMU_H
+
+#ifdef CONFIG_OF_IOMMU
+
+extern int of_get_dma_window(struct device_node *dn, const char *prefix,
+ int index, unsigned long *busno, dma_addr_t *addr,
+ size_t *size);
+
+#else
+
+static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
+ int index, unsigned long *busno, dma_addr_t *addr,
+ size_t *size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_OF_IOMMU */
+
+#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_mtd.h b/include/linux/of_mtd.h
index bae1b6094c63..ed7f267e6389 100644
--- a/include/linux/of_mtd.h
+++ b/include/linux/of_mtd.h
@@ -11,7 +11,7 @@
#ifdef CONFIG_OF_MTD
#include <linux/of.h>
-extern const int of_get_nand_ecc_mode(struct device_node *np);
+int of_get_nand_ecc_mode(struct device_node *np);
int of_get_nand_bus_width(struct device_node *np);
bool of_get_nand_on_flash_bbt(struct device_node *np);
#endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 44623500f419..248fba2af98a 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -17,6 +17,7 @@ extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev);
extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
struct pci_dev *pci_dev);
extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev);
+extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle);
static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
{
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fefb4e19bf6a..5faa8310eec9 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -132,9 +132,10 @@ static inline const char *pci_power_name(pci_power_t state)
return pci_power_names[1 + (int) state];
}
-#define PCI_PM_D2_DELAY 200
-#define PCI_PM_D3_WAIT 10
-#define PCI_PM_BUS_WAIT 50
+#define PCI_PM_D2_DELAY 200
+#define PCI_PM_D3_WAIT 10
+#define PCI_PM_D3COLD_WAIT 100
+#define PCI_PM_BUS_WAIT 50
/** The pci_channel state describes connectivity between the CPU and
* the pci device. If some PCI bus between here and the pci device
@@ -176,8 +177,6 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
/* Provide indication device is assigned by a Virtual Machine Manager */
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
- /* Device causes system crash if in D3 during S3 sleep */
- PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
};
enum pci_irq_reroute_variant {
@@ -280,11 +279,18 @@ struct pci_dev {
unsigned int pme_poll:1; /* Poll device's PME status bit */
unsigned int d1_support:1; /* Low power state D1 is supported */
unsigned int d2_support:1; /* Low power state D2 is supported */
- unsigned int no_d1d2:1; /* Only allow D0 and D3 */
+ unsigned int no_d1d2:1; /* D1 and D2 are forbidden */
+ unsigned int no_d3cold:1; /* D3cold is forbidden */
+ unsigned int d3cold_allowed:1; /* D3cold is allowed by user */
unsigned int mmio_always_on:1; /* disallow turning off io/mem
decoding during bar sizing */
unsigned int wakeup_prepared:1;
+ unsigned int runtime_d3cold:1; /* whether go through runtime
+ D3cold, not set for devices
+ powered on/off by the
+ corresponding bridge */
unsigned int d3_delay; /* D3->D0 transition time in ms */
+ unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -326,6 +332,8 @@ struct pci_dev {
unsigned int is_hotplug_bridge:1;
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
+ unsigned int broken_intx_masking:1;
+ unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -370,6 +378,8 @@ static inline int pci_channel_offline(struct pci_dev *pdev)
return (pdev->error_state != pci_channel_io_normal);
}
+extern struct resource busn_resource;
+
struct pci_host_bridge_window {
struct list_head list;
struct resource *res; /* host bridge aperture (CPU address) */
@@ -421,6 +431,7 @@ struct pci_bus {
struct list_head slots; /* list of slots on this bus */
struct resource *resource[PCI_BRIDGE_RESOURCE_NUM];
struct list_head resources; /* address space routed to this bus */
+ struct resource busn_res; /* bus numbers routed to this bus */
struct pci_ops *ops; /* configuration access functions */
void *sysdata; /* hook for sys-specific extension */
@@ -428,8 +439,6 @@ struct pci_bus {
unsigned char number; /* bus number */
unsigned char primary; /* number of primary bridge */
- unsigned char secondary; /* number of secondary bridge */
- unsigned char subordinate; /* max number of subordinate buses */
unsigned char max_bus_speed; /* enum pci_bus_speed */
unsigned char cur_bus_speed; /* enum pci_bus_speed */
@@ -476,6 +485,32 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false;
#define PCIBIOS_SET_FAILED 0x88
#define PCIBIOS_BUFFER_TOO_SMALL 0x89
+/*
+ * Translate above to generic errno for passing back through non-pci.
+ */
+static inline int pcibios_err_to_errno(int err)
+{
+ if (err <= PCIBIOS_SUCCESSFUL)
+ return err; /* Assume already errno */
+
+ switch (err) {
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return -ENOENT;
+ case PCIBIOS_BAD_VENDOR_ID:
+ return -EINVAL;
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return -ENODEV;
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return -EFAULT;
+ case PCIBIOS_SET_FAILED:
+ return -EIO;
+ case PCIBIOS_BUFFER_TOO_SMALL:
+ return -ENOSPC;
+ }
+
+ return -ENOTTY;
+}
+
/* Low-level architecture-dependent routines */
struct pci_ops {
@@ -644,6 +679,7 @@ extern int no_pci_devices(void);
void pcibios_fixup_bus(struct pci_bus *);
int __must_check pcibios_enable_device(struct pci_dev *, int mask);
+/* Architecture specific versions may override this (weak) */
char *pcibios_setup(char *str);
/* Used only when drivers/pci/setup.c is used */
@@ -670,6 +706,9 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
+int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax);
+int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax);
+void pci_bus_release_busn_res(struct pci_bus *b);
struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata,
struct list_head *resources);
@@ -716,8 +755,6 @@ enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
int pci_find_capability(struct pci_dev *dev, int cap);
int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
int pci_find_ext_capability(struct pci_dev *dev, int cap);
-int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
- int cap);
int pci_find_ht_capability(struct pci_dev *dev, int ht_cap);
int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap);
struct pci_bus *pci_find_next_bus(const struct pci_bus *from);
@@ -779,6 +816,14 @@ static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
}
+/* user-space driven config access */
+int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val);
+int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val);
+int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val);
+int pci_user_write_config_byte(struct pci_dev *dev, int where, u8 val);
+int pci_user_write_config_word(struct pci_dev *dev, int where, u16 val);
+int pci_user_write_config_dword(struct pci_dev *dev, int where, u32 val);
+
int __must_check pci_enable_device(struct pci_dev *dev);
int __must_check pci_enable_device_io(struct pci_dev *dev);
int __must_check pci_enable_device_mem(struct pci_dev *dev);
@@ -877,7 +922,6 @@ enum pci_obff_signal_type {
int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type);
void pci_disable_obff(struct pci_dev *dev);
-bool pci_ltr_supported(struct pci_dev *dev);
int pci_enable_ltr(struct pci_dev *dev);
void pci_disable_ltr(struct pci_dev *dev);
int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns);
@@ -1334,6 +1378,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
static inline int pci_domain_nr(struct pci_bus *bus)
{ return 0; }
+static inline struct pci_dev *pci_dev_get(struct pci_dev *dev)
+{ return NULL; }
+
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
#define dev_num_vf(d) (0)
@@ -1488,9 +1535,20 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
+int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) {}
+static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
+{
+ return pci_dev_get(dev);
+}
+static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
+ u16 acs_flags)
+{
+ return -ENOTTY;
+}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
@@ -1593,7 +1651,9 @@ static inline bool pci_is_pcie(struct pci_dev *dev)
}
void pci_request_acs(void);
-
+bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
+bool pci_acs_path_enabled(struct pci_dev *start,
+ struct pci_dev *end, u16 acs_flags);
#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index ab741b0d0074..fc3526077348 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -517,6 +517,7 @@
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
@@ -2755,6 +2756,17 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
+#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
+#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
+#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
+#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 4b608f543412..53274bff5773 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -26,6 +26,7 @@
* Under PCI, each device has 256 bytes of configuration address space,
* of which the first 64 bytes are standardized as follows:
*/
+#define PCI_STD_HEADER_SIZEOF 64
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
@@ -125,7 +126,8 @@
#define PCI_IO_RANGE_TYPE_MASK 0x0fUL /* I/O bridging type */
#define PCI_IO_RANGE_TYPE_16 0x00
#define PCI_IO_RANGE_TYPE_32 0x01
-#define PCI_IO_RANGE_MASK (~0x0fUL)
+#define PCI_IO_RANGE_MASK (~0x0fUL) /* Standard 4K I/O windows */
+#define PCI_IO_1K_RANGE_MASK (~0x03UL) /* Intel 1K I/O windows */
#define PCI_SEC_STATUS 0x1e /* Secondary status register, only bit 14 used */
#define PCI_MEMORY_BASE 0x20 /* Memory range behind */
#define PCI_MEMORY_LIMIT 0x22
@@ -209,9 +211,12 @@
#define PCI_CAP_ID_SHPC 0x0C /* PCI Standard Hot-Plug Controller */
#define PCI_CAP_ID_SSVID 0x0D /* Bridge subsystem vendor/device ID */
#define PCI_CAP_ID_AGP3 0x0E /* AGP Target PCI-PCI bridge */
+#define PCI_CAP_ID_SECDEV 0x0F /* Secure Device */
#define PCI_CAP_ID_EXP 0x10 /* PCI Express */
#define PCI_CAP_ID_MSIX 0x11 /* MSI-X */
+#define PCI_CAP_ID_SATA 0x12 /* SATA Data/Index Conf. */
#define PCI_CAP_ID_AF 0x13 /* PCI Advanced Features */
+#define PCI_CAP_ID_MAX PCI_CAP_ID_AF
#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */
#define PCI_CAP_FLAGS 2 /* Capability defined flags (16 bits) */
#define PCI_CAP_SIZEOF 4
@@ -276,6 +281,7 @@
#define PCI_VPD_ADDR_MASK 0x7fff /* Address mask */
#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */
#define PCI_VPD_DATA 4 /* 32-bits of data returned here */
+#define PCI_CAP_VPD_SIZEOF 8
/* Slot Identification */
@@ -297,8 +303,10 @@
#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
/* MSI-X registers */
#define PCI_MSIX_FLAGS 2
@@ -308,6 +316,7 @@
#define PCI_MSIX_TABLE 4
#define PCI_MSIX_PBA 8
#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
+#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
/* MSI-X entry's format */
#define PCI_MSIX_ENTRY_SIZE 16
@@ -338,6 +347,7 @@
#define PCI_AF_CTRL_FLR 0x01
#define PCI_AF_STATUS 5
#define PCI_AF_STATUS_TP 0x01
+#define PCI_CAP_AF_SIZEOF 6 /* size of AF registers */
/* PCI-X registers */
@@ -374,6 +384,10 @@
#define PCI_X_STATUS_SPL_ERR 0x20000000 /* Rcvd Split Completion Error Msg */
#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
+#define PCI_X_ECC_CSR 8 /* ECC control and status */
+#define PCI_CAP_PCIX_SIZEOF_V0 8 /* size of registers for Version 0 */
+#define PCI_CAP_PCIX_SIZEOF_V1 24 /* size for Version 1 */
+#define PCI_CAP_PCIX_SIZEOF_V2 PCI_CAP_PCIX_SIZEOF_V1 /* Same for v2 */
/* PCI Bridge Subsystem ID registers */
@@ -462,6 +476,7 @@
#define PCI_EXP_LNKSTA_DLLLA 0x2000 /* Data Link Layer Link Active */
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints end here */
#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
@@ -507,6 +522,12 @@
#define PCI_EXP_RTSTA 32 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
+/*
+ * Note that the following PCI Express 'Capability Structure' registers
+ * were introduced with 'Capability Version' 0x2 (v2). These registers
+ * do not exist on devices with Capability Version 1. Use pci_pcie_cap2()
+ * to use these fields safely.
+ */
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */
@@ -521,6 +542,7 @@
#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
@@ -529,23 +551,43 @@
#define PCI_EXT_CAP_VER(header) ((header >> 16) & 0xf)
#define PCI_EXT_CAP_NEXT(header) ((header >> 20) & 0xffc)
-#define PCI_EXT_CAP_ID_ERR 1
-#define PCI_EXT_CAP_ID_VC 2
-#define PCI_EXT_CAP_ID_DSN 3
-#define PCI_EXT_CAP_ID_PWR 4
-#define PCI_EXT_CAP_ID_VNDR 11
-#define PCI_EXT_CAP_ID_ACS 13
-#define PCI_EXT_CAP_ID_ARI 14
-#define PCI_EXT_CAP_ID_ATS 15
-#define PCI_EXT_CAP_ID_SRIOV 16
-#define PCI_EXT_CAP_ID_PRI 19
-#define PCI_EXT_CAP_ID_LTR 24
-#define PCI_EXT_CAP_ID_PASID 27
+#define PCI_EXT_CAP_ID_ERR 0x01 /* Advanced Error Reporting */
+#define PCI_EXT_CAP_ID_VC 0x02 /* Virtual Channel Capability */
+#define PCI_EXT_CAP_ID_DSN 0x03 /* Device Serial Number */
+#define PCI_EXT_CAP_ID_PWR 0x04 /* Power Budgeting */
+#define PCI_EXT_CAP_ID_RCLD 0x05 /* Root Complex Link Declaration */
+#define PCI_EXT_CAP_ID_RCILC 0x06 /* Root Complex Internal Link Control */
+#define PCI_EXT_CAP_ID_RCEC 0x07 /* Root Complex Event Collector */
+#define PCI_EXT_CAP_ID_MFVC 0x08 /* Multi-Function VC Capability */
+#define PCI_EXT_CAP_ID_VC9 0x09 /* same as _VC */
+#define PCI_EXT_CAP_ID_RCRB 0x0A /* Root Complex RB? */
+#define PCI_EXT_CAP_ID_VNDR 0x0B /* Vendor Specific */
+#define PCI_EXT_CAP_ID_CAC 0x0C /* Config Access - obsolete */
+#define PCI_EXT_CAP_ID_ACS 0x0D /* Access Control Services */
+#define PCI_EXT_CAP_ID_ARI 0x0E /* Alternate Routing ID */
+#define PCI_EXT_CAP_ID_ATS 0x0F /* Address Translation Services */
+#define PCI_EXT_CAP_ID_SRIOV 0x10 /* Single Root I/O Virtualization */
+#define PCI_EXT_CAP_ID_MRIOV 0x11 /* Multi Root I/O Virtualization */
+#define PCI_EXT_CAP_ID_MCAST 0x12 /* Multicast */
+#define PCI_EXT_CAP_ID_PRI 0x13 /* Page Request Interface */
+#define PCI_EXT_CAP_ID_AMD_XXX 0x14 /* reserved for AMD */
+#define PCI_EXT_CAP_ID_REBAR 0x15 /* resizable BAR */
+#define PCI_EXT_CAP_ID_DPA 0x16 /* dynamic power alloc */
+#define PCI_EXT_CAP_ID_TPH 0x17 /* TPH request */
+#define PCI_EXT_CAP_ID_LTR 0x18 /* latency tolerance reporting */
+#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe */
+#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
+#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
+
+#define PCI_EXT_CAP_DSN_SIZEOF 12
+#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_TRAIN 0x00000001 /* Training */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
+#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
#define PCI_ERR_UNC_POISON_TLP 0x00001000 /* Poisoned TLP */
#define PCI_ERR_UNC_FCP 0x00002000 /* Flow Control Protocol */
#define PCI_ERR_UNC_COMP_TIME 0x00004000 /* Completion Timeout */
@@ -555,6 +597,11 @@
#define PCI_ERR_UNC_MALF_TLP 0x00040000 /* Malformed TLP */
#define PCI_ERR_UNC_ECRC 0x00080000 /* ECRC Error Status */
#define PCI_ERR_UNC_UNSUP 0x00100000 /* Unsupported Request */
+#define PCI_ERR_UNC_ACSV 0x00200000 /* ACS Violation */
+#define PCI_ERR_UNC_INTN 0x00400000 /* internal error */
+#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
+#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
+#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
/* Same bits as above */
#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
@@ -565,6 +612,9 @@
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
#define PCI_ERR_COR_REP_ROLL 0x00000100 /* REPLAY_NUM Rollover */
#define PCI_ERR_COR_REP_TIMER 0x00001000 /* Replay Timer Timeout */
+#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
+#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
+#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
/* Same bits as above */
#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
@@ -596,12 +646,18 @@
/* Virtual Channel */
#define PCI_VC_PORT_REG1 4
+#define PCI_VC_REG1_EVCC 0x7 /* extended vc count */
#define PCI_VC_PORT_REG2 8
+#define PCI_VC_REG2_32_PHASE 0x2
+#define PCI_VC_REG2_64_PHASE 0x4
+#define PCI_VC_REG2_128_PHASE 0x8
#define PCI_VC_PORT_CTRL 12
#define PCI_VC_PORT_STATUS 14
#define PCI_VC_RES_CAP 16
#define PCI_VC_RES_CTRL 20
#define PCI_VC_RES_STATUS 26
+#define PCI_CAP_VC_BASE_SIZEOF 0x10
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
/* Power Budgeting */
#define PCI_PWR_DSR 4 /* Data Select Register */
@@ -614,6 +670,7 @@
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
#define PCI_PWR_CAP 12 /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
+#define PCI_EXT_CAP_PWR_SIZEOF 16
/*
* Hypertransport sub capability types
@@ -646,6 +703,8 @@
#define HT_CAPTYPE_ERROR_RETRY 0xC0 /* Retry on error configuration */
#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
+#define HT_CAP_SIZEOF_LONG 28 /* slave & primary */
+#define HT_CAP_SIZEOF_SHORT 24 /* host & secondary */
/* Alternative Routing-ID Interpretation */
#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
@@ -656,6 +715,7 @@
#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
+#define PCI_EXT_CAP_ARI_SIZEOF 8
/* Address Translation Service */
#define PCI_ATS_CAP 0x04 /* ATS Capability Register */
@@ -665,6 +725,7 @@
#define PCI_ATS_CTRL_ENABLE 0x8000 /* ATS Enable */
#define PCI_ATS_CTRL_STU(x) ((x) & 0x1f) /* Smallest Translation Unit */
#define PCI_ATS_MIN_STU 12 /* shift of minimum STU block */
+#define PCI_EXT_CAP_ATS_SIZEOF 8
/* Page Request Interface */
#define PCI_PRI_CTRL 0x04 /* PRI control register */
@@ -676,6 +737,7 @@
#define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */
#define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
#define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
+#define PCI_EXT_CAP_PRI_SIZEOF 16
/* PASID capability */
#define PCI_PASID_CAP 0x04 /* PASID feature register */
@@ -685,6 +747,7 @@
#define PCI_PASID_CTRL_ENABLE 0x01 /* Enable bit */
#define PCI_PASID_CTRL_EXEC 0x02 /* Exec permissions Enable */
#define PCI_PASID_CTRL_PRIV 0x04 /* Priviledge Mode Enable */
+#define PCI_EXT_CAP_PASID_SIZEOF 8
/* Single Root I/O Virtualization */
#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
@@ -716,12 +779,14 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
+#define PCI_EXT_CAP_SRIOV_SIZEOF 64
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
#define PCI_LTR_VALUE_MASK 0x000003ff
#define PCI_LTR_SCALE_MASK 0x00001c00
#define PCI_LTR_SCALE_SHIFT 10
+#define PCI_EXT_CAP_LTR_SIZEOF 8
/* Access Control Service */
#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
@@ -732,7 +797,38 @@
#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
#define PCI_ACS_EC 0x20 /* P2P Egress Control */
#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
+#define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */
#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
+#define PCI_VSEC_HDR 4 /* extended cap - vendor specific */
+#define PCI_VSEC_HDR_LEN_SHIFT 20 /* shift for length field */
+
+/* sata capability */
+#define PCI_SATA_REGS 4 /* SATA REGs specifier */
+#define PCI_SATA_REGS_MASK 0xF /* location - BAR#/inline */
+#define PCI_SATA_REGS_INLINE 0xF /* REGS in config space */
+#define PCI_SATA_SIZEOF_SHORT 8
+#define PCI_SATA_SIZEOF_LONG 16
+
+/* resizable BARs */
+#define PCI_REBAR_CTRL 8 /* control register */
+#define PCI_REBAR_CTRL_NBAR_MASK (7 << 5) /* mask for # bars */
+#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # bars */
+
+/* dynamic power allocation */
+#define PCI_DPA_CAP 4 /* capability register */
+#define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */
+#define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */
+
+/* TPH Requester */
+#define PCI_TPH_CAP 4 /* capability register */
+#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */
+#define PCI_TPH_LOC_NONE 0x000 /* no location */
+#define PCI_TPH_LOC_CAP 0x200 /* in capability */
+#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
+#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 45db49f64bb4..76c5c8b724a7 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -677,6 +677,7 @@ struct hw_perf_event {
u64 last_tag;
unsigned long config_base;
unsigned long event_base;
+ int event_base_rdpmc;
int idx;
int last_cpu;
@@ -1106,6 +1107,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr,
struct task_struct *task,
perf_overflow_handler_t callback,
void *context);
+extern void perf_pmu_migrate_context(struct pmu *pmu,
+ int src_cpu, int dst_cpu);
extern u64 perf_event_read_value(struct perf_event *event,
u64 *enabled, u64 *running);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index c291cae8ce32..93b3cf77f564 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -243,6 +243,15 @@ enum phy_state {
PHY_RESUMING
};
+/**
+ * struct phy_c45_device_ids - 802.3-c45 Device Identifiers
+ * @devices_in_package: Bit vector of devices present.
+ * @device_ids: The device identifer for each present device.
+ */
+struct phy_c45_device_ids {
+ u32 devices_in_package;
+ u32 device_ids[8];
+};
/* phy_device: An instance of a PHY
*
@@ -250,6 +259,8 @@ enum phy_state {
* bus: Pointer to the bus this PHY is on
* dev: driver model device structure for this PHY
* phy_id: UID for this device found during discovery
+ * c45_ids: 802.3-c45 Device Identifers if is_c45.
+ * is_c45: Set to true if this phy uses clause 45 addressing.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* addr: Bus address of PHY
@@ -285,6 +296,9 @@ struct phy_device {
u32 phy_id;
+ struct phy_c45_device_ids c45_ids;
+ bool is_c45;
+
enum phy_state state;
u32 dev_flags;
@@ -412,6 +426,12 @@ struct phy_driver {
/* Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
+ /* Returns true if this is a suitable driver for the given
+ * phydev. If NULL, matching is based on phy_id and
+ * phy_id_mask.
+ */
+ int (*match_phy_device)(struct phy_device *phydev);
+
/* Handles ethtool queries for hardware time stamping. */
int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
@@ -480,7 +500,9 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
}
-struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+ bool is_c45, struct phy_c45_device_ids *c45_ids);
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
int phy_init_hw(struct phy_device *phydev);
struct phy_device * phy_attach(struct net_device *dev,
@@ -511,7 +533,9 @@ int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
+void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver);
+int phy_drivers_register(struct phy_driver *new_driver, int n);
void phy_state_machine(struct work_struct *work);
void phy_start_machine(struct phy_device *phydev,
void (*handler)(struct net_device *));
@@ -532,6 +556,11 @@ int phy_register_fixup_for_uid(u32 phy_uid, u32 phy_uid_mask,
int (*run)(struct phy_device *));
int phy_scan_fixups(struct phy_device *phydev);
+int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
+int phy_get_eee_err(struct phy_device *phydev);
+int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
+int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
+
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h
index 3b894a668d32..69393a662532 100644
--- a/include/linux/pinctrl/pinctrl.h
+++ b/include/linux/pinctrl/pinctrl.h
@@ -131,8 +131,9 @@ extern void pinctrl_unregister(struct pinctrl_dev *pctldev);
extern bool pin_is_valid(struct pinctrl_dev *pctldev, int pin);
extern void pinctrl_add_gpio_range(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range);
-extern void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range);
+extern void pinctrl_add_gpio_ranges(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *ranges,
+ unsigned nranges);
extern const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev);
extern void *pinctrl_dev_get_drvdata(struct pinctrl_dev *pctldev);
#else
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index defbde203d07..082eafaf026b 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -451,8 +451,10 @@ enum {
#define TCF_EM_U32 3
#define TCF_EM_META 4
#define TCF_EM_TEXT 5
-#define TCF_EM_VLAN 6
-#define TCF_EM_MAX 6
+#define TCF_EM_VLAN 6
+#define TCF_EM_CANID 7
+#define TCF_EM_IPSET 8
+#define TCF_EM_MAX 8
enum {
TCF_EM_PROG_TC
diff --git a/include/linux/platform_data/clk-integrator.h b/include/linux/platform_data/clk-integrator.h
new file mode 100644
index 000000000000..83fe9c283bb8
--- /dev/null
+++ b/include/linux/platform_data/clk-integrator.h
@@ -0,0 +1 @@
+void integrator_clk_init(bool is_cp);
diff --git a/include/linux/platform_data/clk-nomadik.h b/include/linux/platform_data/clk-nomadik.h
new file mode 100644
index 000000000000..5713c87b2477
--- /dev/null
+++ b/include/linux/platform_data/clk-nomadik.h
@@ -0,0 +1,2 @@
+/* Minimal platform data header */
+void nomadik_clk_init(void);
diff --git a/include/linux/platform_data/clk-u300.h b/include/linux/platform_data/clk-u300.h
new file mode 100644
index 000000000000..8429e73911a1
--- /dev/null
+++ b/include/linux/platform_data/clk-u300.h
@@ -0,0 +1 @@
+void __init u300_clk_init(void __iomem *base);
diff --git a/include/linux/platform_data/mmp_audio.h b/include/linux/platform_data/mmp_audio.h
new file mode 100644
index 000000000000..0f25d165abd6
--- /dev/null
+++ b/include/linux/platform_data/mmp_audio.h
@@ -0,0 +1,22 @@
+/*
+ * MMP Platform AUDIO Management
+ *
+ * Copyright (c) 2011 Marvell Semiconductors Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef MMP_AUDIO_H
+#define MMP_AUDIO_H
+
+struct mmp_audio_platdata {
+ u32 period_max_capture;
+ u32 buffer_max_capture;
+ u32 period_max_playback;
+ u32 buffer_max_playback;
+};
+
+#endif /* MMP_AUDIO_H */
diff --git a/drivers/staging/omapdrm/omap_priv.h b/include/linux/platform_data/omap_drm.h
index ef6441447147..3da73bdc2031 100644
--- a/drivers/staging/omapdrm/omap_priv.h
+++ b/include/linux/platform_data/omap_drm.h
@@ -1,8 +1,8 @@
/*
- * include/drm/omap_priv.h
+ * DRM/KMS platform data for TI OMAP platforms
*
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob@ti.com>
+ * Copyright (C) 2012 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,13 +17,11 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#ifndef __OMAP_PRIV_H__
-#define __OMAP_PRIV_H__
+#ifndef __PLATFORM_DATA_OMAP_DRM_H__
+#define __PLATFORM_DATA_OMAP_DRM_H__
-/* Non-userspace facing APIs
- */
-
-/* optional platform data to configure the default configuration of which
+/*
+ * Optional platform data to configure the default configuration of which
* pipes/overlays/CRTCs are used.. if this is not provided, then instead the
* first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
* one manager, with priority given to managers that are connected to
@@ -49,7 +47,6 @@ struct omap_kms_platform_data {
struct omap_drm_platform_data {
struct omap_kms_platform_data *kms_pdata;
- struct omap_dmm_platform_data *dmm_pdata;
};
-#endif /* __OMAP_DRM_H__ */
+#endif /* __PLATFORM_DATA_OMAP_DRM_H__ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 30f794eb3826..a7d6172922d4 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/notifier.h>
+#include <linux/cpuidle.h>
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -45,6 +46,11 @@ struct gpd_dev_ops {
bool (*active_wakeup)(struct device *dev);
};
+struct gpd_cpu_data {
+ unsigned int saved_exit_latency;
+ struct cpuidle_state *idle_state;
+};
+
struct generic_pm_domain {
struct dev_pm_domain domain; /* PM domain operations */
struct list_head gpd_list_node; /* Node in the global PM domains list */
@@ -75,6 +81,7 @@ struct generic_pm_domain {
bool max_off_time_changed;
bool cached_power_down_ok;
struct device_node *of_node; /* Node in device tree */
+ struct gpd_cpu_data *cpu_data;
};
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -105,6 +112,7 @@ struct generic_pm_domain_data {
struct gpd_timing_data td;
struct notifier_block nb;
struct mutex lock;
+ unsigned int refcount;
bool need_restore;
bool always_on;
};
@@ -155,6 +163,8 @@ extern int pm_genpd_add_callbacks(struct device *dev,
struct gpd_dev_ops *ops,
struct gpd_timing_data *td);
extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
+extern int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
+extern int genpd_detach_cpuidle(struct generic_pm_domain *genpd);
extern void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off);
@@ -211,6 +221,14 @@ static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
{
return -ENOSYS;
}
+static inline int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
+{
+ return -ENOSYS;
+}
+static inline int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
+{
+ return -ENOSYS;
+}
static inline void pm_genpd_init(struct generic_pm_domain *genpd,
struct dev_power_governor *gov, bool is_off)
{
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 233149cb19f4..9924ea1f22e0 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -66,7 +66,7 @@ enum pm_qos_req_action {
static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
{
- return req->dev != 0;
+ return req->dev != NULL;
}
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
diff --git a/arch/arm/mach-omap2/smartreflex.h b/include/linux/power/smartreflex.h
index 5809141171f8..3101e62a1213 100644
--- a/arch/arm/mach-omap2/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -17,12 +17,13 @@
* published by the Free Software Foundation.
*/
-#ifndef __ASM_ARM_OMAP_SMARTREFLEX_H
-#define __ASM_ARM_OMAP_SMARTREFLEX_H
+#ifndef __POWER_SMARTREFLEX_H
+#define __POWER_SMARTREFLEX_H
+#include <linux/types.h>
#include <linux/platform_device.h>
-
-#include "voltage.h"
+#include <linux/delay.h>
+#include <plat/voltage.h>
/*
* Different Smartreflex IPs version. The v1 is the 65nm version used in
@@ -142,6 +143,51 @@
#define OMAP3430_SR_ERRWEIGHT 0x04
#define OMAP3430_SR_ERRMAXLIMIT 0x02
+struct omap_sr {
+ char *name;
+ struct list_head node;
+ struct platform_device *pdev;
+ struct omap_sr_nvalue_table *nvalue_table;
+ struct voltagedomain *voltdm;
+ struct dentry *dbg_dir;
+ unsigned int irq;
+ int srid;
+ int ip_type;
+ int nvalue_count;
+ bool autocomp_active;
+ u32 clk_length;
+ u32 err_weight;
+ u32 err_minlimit;
+ u32 err_maxlimit;
+ u32 accum_data;
+ u32 senn_avgweight;
+ u32 senp_avgweight;
+ u32 senp_mod;
+ u32 senn_mod;
+ void __iomem *base;
+};
+
+/**
+ * test_cond_timeout - busy-loop, testing a condition
+ * @cond: condition to test until it evaluates to true
+ * @timeout: maximum number of microseconds in the timeout
+ * @index: loop index (integer)
+ *
+ * Loop waiting for @cond to become true or until at least @timeout
+ * microseconds have passed. To use, define some integer @index in the
+ * calling code. After running, if @index == @timeout, then the loop has
+ * timed out.
+ *
+ * Copied from omap_test_timeout */
+#define sr_test_cond_timeout(cond, timeout, index) \
+({ \
+ for (index = 0; index < timeout; index++) { \
+ if (cond) \
+ break; \
+ udelay(1); \
+ } \
+})
+
/**
* struct omap_sr_pmic_data - Strucutre to be populated by pmic code to pass
* pmic specific info to smartreflex driver
@@ -161,7 +207,7 @@ struct omap_smartreflex_dev_attr {
const char *sensor_voltdm_name;
};
-#ifdef CONFIG_OMAP_SMARTREFLEX
+#ifdef CONFIG_POWER_AVS_OMAP
/*
* The smart reflex driver supports CLASS1 CLASS2 and CLASS3 SR.
* The smartreflex class driver should pass the class type.
@@ -186,10 +232,10 @@ struct omap_smartreflex_dev_attr {
* based decisions.
*/
struct omap_sr_class_data {
- int (*enable)(struct voltagedomain *voltdm);
- int (*disable)(struct voltagedomain *voltdm, int is_volt_reset);
- int (*configure)(struct voltagedomain *voltdm);
- int (*notify)(struct voltagedomain *voltdm, u32 status);
+ int (*enable)(struct omap_sr *sr);
+ int (*disable)(struct omap_sr *sr, int is_volt_reset);
+ int (*configure)(struct omap_sr *sr);
+ int (*notify)(struct omap_sr *sr, u32 status);
u8 notify_flags;
u8 class_type;
};
@@ -197,17 +243,22 @@ struct omap_sr_class_data {
/**
* struct omap_sr_nvalue_table - Smartreflex n-target value info
*
- * @efuse_offs: The offset of the efuse where n-target values are stored.
- * @nvalue: The n-target value.
+ * @efuse_offs: The offset of the efuse where n-target values are stored.
+ * @nvalue: The n-target value.
+ * @errminlimit: The value of the ERRMINLIMIT bitfield for this n-target
+ * @volt_nominal: microvolts DC that the VDD is initially programmed to
*/
struct omap_sr_nvalue_table {
u32 efuse_offs;
u32 nvalue;
+ u32 errminlimit;
+ unsigned long volt_nominal;
};
/**
* struct omap_sr_data - Smartreflex platform data.
*
+ * @name: instance name
* @ip_type: Smartreflex IP type.
* @senp_mod: SENPENABLE value for the sr
* @senn_mod: SENNENABLE value for sr
@@ -219,6 +270,7 @@ struct omap_sr_nvalue_table {
* @voltdm: Pointer to the voltage domain associated with the SR
*/
struct omap_sr_data {
+ const char *name;
int ip_type;
u32 senp_mod;
u32 senn_mod;
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 3988012255dc..289760f424aa 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -141,6 +141,8 @@
* Changing LSM security domain is considered a new privilege. So, for example,
* asking selinux for a specific new context (e.g. with runcon) will result
* in execve returning -EPERM.
+ *
+ * See Documentation/prctl/no_new_privs.txt for more details.
*/
#define PR_SET_NO_NEW_PRIVS 38
#define PR_GET_NO_NEW_PRIVS 39
diff --git a/include/linux/quota.h b/include/linux/quota.h
index c09fa042b5ea..524ede8a160a 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -333,7 +333,7 @@ struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, struct path *);
int (*quota_on_meta)(struct super_block *, int, int);
int (*quota_off)(struct super_block *, int);
- int (*quota_sync)(struct super_block *, int, int);
+ int (*quota_sync)(struct super_block *, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 17b977304a09..ec6b65feaaba 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -83,7 +83,8 @@ int dquot_quota_on(struct super_block *sb, int type, int format_id,
int dquot_quota_on_mount(struct super_block *sb, char *qf_name,
int format_id, int type);
int dquot_quota_off(struct super_block *sb, int type);
-int dquot_quota_sync(struct super_block *sb, int type, int wait);
+int dquot_writeback_dquots(struct super_block *sb, int type);
+int dquot_quota_sync(struct super_block *sb, int type);
int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_get_dqblk(struct super_block *sb, int type, qid_t id,
@@ -255,6 +256,11 @@ static inline int dquot_resume(struct super_block *sb, int type)
#define dquot_file_open generic_file_open
+static inline int dquot_writeback_dquots(struct super_block *sb, int type)
+{
+ return 0;
+}
+
#endif /* CONFIG_QUOTA */
static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 26d1a47591f1..115ead2b5155 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -147,6 +147,7 @@ extern void synchronize_sched(void);
extern void __rcu_read_lock(void);
extern void __rcu_read_unlock(void);
+extern void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -184,7 +185,6 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */
extern void rcu_sched_qs(int cpu);
extern void rcu_bh_qs(int cpu);
-extern void rcu_preempt_note_context_switch(void);
extern void rcu_check_callbacks(int cpu, int user);
struct notifier_block;
extern void rcu_idle_enter(void);
@@ -256,6 +256,10 @@ static inline void destroy_rcu_head_on_stack(struct rcu_head *head)
}
#endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP)
+extern int rcu_is_cpu_idle(void);
+#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SMP) */
+
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU)
bool rcu_lockdep_current_cpu_online(void);
#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
@@ -267,15 +271,6 @@ static inline bool rcu_lockdep_current_cpu_online(void)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-#ifdef CONFIG_PROVE_RCU
-extern int rcu_is_cpu_idle(void);
-#else /* !CONFIG_PROVE_RCU */
-static inline int rcu_is_cpu_idle(void)
-{
- return 0;
-}
-#endif /* else !CONFIG_PROVE_RCU */
-
static inline void rcu_lock_acquire(struct lockdep_map *map)
{
lock_acquire(map, 0, 0, 2, 1, NULL, _THIS_IP_);
@@ -432,8 +427,7 @@ extern int rcu_my_thread_group_empty(void);
static inline void rcu_preempt_sleep_check(void)
{
rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
- "Illegal context switch in RCU read-side "
- "critical section");
+ "Illegal context switch in RCU read-side critical section");
}
#else /* #ifdef CONFIG_PROVE_RCU */
static inline void rcu_preempt_sleep_check(void)
@@ -514,10 +508,10 @@ static inline void rcu_preempt_sleep_check(void)
(_________p1); \
})
#define __rcu_assign_pointer(p, v, space) \
- ({ \
+ do { \
smp_wmb(); \
(p) = (typeof(*v) __force space *)(v); \
- })
+ } while (0)
/**
@@ -852,7 +846,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
*
* Assigns the specified value to the specified RCU-protected
* pointer, ensuring that any concurrent RCU readers will see
- * any prior initialization. Returns the value assigned.
+ * any prior initialization.
*
* Inserts memory barriers on architectures that require them
* (which is most of them), and also prevents the compiler from
@@ -904,25 +898,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
* the reader-accessible portions of the linked structure.
*/
#define RCU_INIT_POINTER(p, v) \
- p = (typeof(*v) __force __rcu *)(v)
-
-static __always_inline bool __is_kfree_rcu_offset(unsigned long offset)
-{
- return offset < 4096;
-}
-
-static __always_inline
-void __kfree_rcu(struct rcu_head *head, unsigned long offset)
-{
- typedef void (*rcu_callback)(struct rcu_head *);
-
- BUILD_BUG_ON(!__builtin_constant_p(offset));
-
- /* See the kfree_rcu() header comment. */
- BUILD_BUG_ON(!__is_kfree_rcu_offset(offset));
+ do { \
+ p = (typeof(*v) __force __rcu *)(v); \
+ } while (0)
- kfree_call_rcu(head, (rcu_callback)offset);
-}
+/**
+ * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer
+ *
+ * GCC-style initialization for an RCU-protected pointer in a structure field.
+ */
+#define RCU_POINTER_INITIALIZER(p, v) \
+ .p = (typeof(*v) __force __rcu *)(v)
/*
* Does the specified offset indicate that the corresponding rcu_head
@@ -936,7 +922,7 @@ void __kfree_rcu(struct rcu_head *head, unsigned long offset)
#define __kfree_rcu(head, offset) \
do { \
BUILD_BUG_ON(!__is_kfree_rcu_offset(offset)); \
- call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
+ kfree_call_rcu(head, (void (*)(struct rcu_head *))(unsigned long)(offset)); \
} while (0)
/**
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 854dc4c5c271..4e56a9c69a35 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -87,6 +87,10 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#ifdef CONFIG_TINY_RCU
+static inline void rcu_preempt_note_context_switch(void)
+{
+}
+
static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
{
*delta_jiffies = ULONG_MAX;
@@ -95,6 +99,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
#else /* #ifdef CONFIG_TINY_RCU */
+void rcu_preempt_note_context_switch(void);
int rcu_preempt_needs_cpu(void);
static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
@@ -108,6 +113,7 @@ static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
static inline void rcu_note_context_switch(int cpu)
{
rcu_sched_qs(cpu);
+ rcu_preempt_note_context_switch();
}
/*
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 56af22ec9aba..7f7e00df3adf 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -14,12 +14,14 @@
*/
#include <linux/list.h>
+#include <linux/rbtree.h>
struct module;
struct device;
struct i2c_client;
struct spi_device;
struct regmap;
+struct regmap_range_cfg;
/* An enum of all the supported cache types */
enum regcache_type {
@@ -43,6 +45,14 @@ struct reg_default {
#ifdef CONFIG_REGMAP
+enum regmap_endian {
+ /* Unspecified -> 0 -> Backwards compatible default */
+ REGMAP_ENDIAN_DEFAULT = 0,
+ REGMAP_ENDIAN_BIG,
+ REGMAP_ENDIAN_LITTLE,
+ REGMAP_ENDIAN_NATIVE,
+};
+
/**
* Configuration for the register map of a device.
*
@@ -84,6 +94,15 @@ struct reg_default {
* @reg_defaults_raw: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults_raw: Number of elements in reg_defaults_raw.
+ * @reg_format_endian: Endianness for formatted register addresses. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ * @val_format_endian: Endianness for formatted register values. If this is
+ * DEFAULT, the @reg_format_endian_default value from the
+ * regmap bus is used.
+ *
+ * @ranges: Array of configuration entries for virtual address ranges.
+ * @num_ranges: Number of range configuration entries.
*/
struct regmap_config {
const char *name;
@@ -109,6 +128,43 @@ struct regmap_config {
u8 write_flag_mask;
bool use_single_rw;
+
+ enum regmap_endian reg_format_endian;
+ enum regmap_endian val_format_endian;
+
+ const struct regmap_range_cfg *ranges;
+ unsigned int n_ranges;
+};
+
+/**
+ * Configuration for indirectly accessed or paged registers.
+ * Registers, mapped to this virtual range, are accessed in two steps:
+ * 1. page selector register update;
+ * 2. access through data window registers.
+ *
+ * @range_min: Address of the lowest register address in virtual range.
+ * @range_max: Address of the highest register in virtual range.
+ *
+ * @page_sel_reg: Register with selector field.
+ * @page_sel_mask: Bit shift for selector value.
+ * @page_sel_shift: Bit mask for selector value.
+ *
+ * @window_start: Address of first (lowest) register in data window.
+ * @window_len: Number of registers in data window.
+ */
+struct regmap_range_cfg {
+ /* Registers of virtual address range */
+ unsigned int range_min;
+ unsigned int range_max;
+
+ /* Page selector for indirect addressing */
+ unsigned int selector_reg;
+ unsigned int selector_mask;
+ int selector_shift;
+
+ /* Data window (per each page) */
+ unsigned int window_start;
+ unsigned int window_len;
};
typedef int (*regmap_hw_write)(void *context, const void *data,
@@ -133,6 +189,12 @@ typedef void (*regmap_hw_free_context)(void *context);
* data.
* @read_flag_mask: Mask to be set in the top byte of the register when doing
* a read.
+ * @reg_format_endian_default: Default endianness for formatted register
+ * addresses. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
+ * @val_format_endian_default: Default endianness for formatted register
+ * values. Used when the regmap_config specifies DEFAULT. If this is
+ * DEFAULT, BIG is assumed.
*/
struct regmap_bus {
bool fast_io;
@@ -141,6 +203,8 @@ struct regmap_bus {
regmap_hw_read read;
regmap_hw_free_context free_context;
u8 read_flag_mask;
+ enum regmap_endian reg_format_endian_default;
+ enum regmap_endian val_format_endian_default;
};
struct regmap *regmap_init(struct device *dev,
@@ -219,6 +283,7 @@ struct regmap_irq {
* @status_base: Base status register address.
* @mask_base: Base mask register address.
* @ack_base: Base ack address. If zero then the chip is clear on read.
+ * @wake_base: Base address for wake enables. If zero unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
*
* @num_regs: Number of registers in each control bank.
@@ -232,6 +297,7 @@ struct regmap_irq_chip {
unsigned int status_base;
unsigned int mask_base;
unsigned int ack_base;
+ unsigned int wake_base;
unsigned int irq_reg_stride;
int num_regs;
@@ -243,7 +309,7 @@ struct regmap_irq_chip {
struct regmap_irq_chip_data;
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
- int irq_base, struct regmap_irq_chip *chip,
+ int irq_base, const struct regmap_irq_chip *chip,
struct regmap_irq_chip_data **data);
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *data);
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data);
@@ -361,7 +427,6 @@ static inline int regmap_register_patch(struct regmap *map,
static inline struct regmap *dev_get_regmap(struct device *dev,
const char *name)
{
- WARN_ONCE(1, "regmap API is disabled");
return NULL;
}
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 4ed1b30ac5fc..da339fd8c755 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -291,6 +291,12 @@ static inline int regulator_set_voltage(struct regulator *regulator,
static inline int regulator_get_voltage(struct regulator *regulator)
{
+ return -EINVAL;
+}
+
+static inline int regulator_is_supported_voltage(struct regulator *regulator,
+ int min_uV, int max_uV)
+{
return 0;
}
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index b0432cc2b169..bac4c871f3bd 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -32,6 +32,8 @@ enum regulator_status {
REGULATOR_STATUS_NORMAL,
REGULATOR_STATUS_IDLE,
REGULATOR_STATUS_STANDBY,
+ /* in case that any other status doesn't apply */
+ REGULATOR_STATUS_UNDEFINED,
};
/**
@@ -67,6 +69,8 @@ enum regulator_status {
*
* @enable_time: Time taken for the regulator voltage output voltage to
* stabilise after being enabled, in microseconds.
+ * @set_ramp_delay: Set the ramp delay for the regulator. The driver should
+ * select ramp delay equal to or less than(closest) ramp_delay.
* @set_voltage_time_sel: Time taken for the regulator voltage output voltage
* to stabilise after being set to a new value, in microseconds.
* The function provides the from and to voltage selector, the
@@ -113,6 +117,7 @@ struct regulator_ops {
/* Time taken to enable or set voltage on the regulator */
int (*enable_time) (struct regulator_dev *);
+ int (*set_ramp_delay) (struct regulator_dev *, int ramp_delay);
int (*set_voltage_time_sel) (struct regulator_dev *,
unsigned int old_selector,
unsigned int new_selector);
@@ -170,11 +175,15 @@ enum regulator_type {
*
* @min_uV: Voltage given by the lowest selector (if linear mapping)
* @uV_step: Voltage increase with each selector (if linear mapping)
+ * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @volt_table: Voltage mapping table (if table based mapping)
*
* @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
* @vsel_mask: Mask for register bitfield used for selector
* @enable_reg: Register for control when using regmap enable/disable ops
* @enable_mask: Mask for control when using regmap enable/disable ops
+ *
+ * @enable_time: Time taken for initial enable of regulator (in uS).
*/
struct regulator_desc {
const char *name;
@@ -188,11 +197,16 @@ struct regulator_desc {
unsigned int min_uV;
unsigned int uV_step;
+ unsigned int ramp_delay;
+
+ const unsigned int *volt_table;
unsigned int vsel_reg;
unsigned int vsel_mask;
unsigned int enable_reg;
unsigned int enable_mask;
+
+ unsigned int enable_time;
};
/**
@@ -208,6 +222,9 @@ struct regulator_desc {
* @of_node: OpenFirmware node to parse for device tree bindings (may be
* NULL).
* @regmap: regmap to use for core regmap helpers
+ * @ena_gpio: GPIO controlling regulator enable.
+ * @ena_gpio_invert: Sense for GPIO enable control.
+ * @ena_gpio_flags: Flags to use when calling gpio_request_one()
*/
struct regulator_config {
struct device *dev;
@@ -215,6 +232,10 @@ struct regulator_config {
void *driver_data;
struct device_node *of_node;
struct regmap *regmap;
+
+ int ena_gpio;
+ unsigned int ena_gpio_invert:1;
+ unsigned int ena_gpio_flags;
};
/*
@@ -253,6 +274,10 @@ struct regulator_dev {
void *reg_data; /* regulator_dev data */
struct dentry *debugfs;
+
+ int ena_gpio;
+ unsigned int ena_gpio_invert:1;
+ unsigned int ena_gpio_state:1;
};
struct regulator_dev *
@@ -271,6 +296,8 @@ int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+ unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
@@ -280,6 +307,9 @@ int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel);
int regulator_is_enabled_regmap(struct regulator_dev *rdev);
int regulator_enable_regmap(struct regulator_dev *rdev);
int regulator_disable_regmap(struct regulator_dev *rdev);
+int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector);
void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data);
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index f83f7440b488..48918be649d4 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -22,6 +22,7 @@ struct regulator_init_data;
/**
* struct fixed_voltage_config - fixed_voltage_config structure
* @supply_name: Name of the regulator supply
+ * @input_supply: Name of the input regulator supply
* @microvolts: Output voltage of regulator
* @gpio: GPIO to use for enable control
* set to -EINVAL if not used
@@ -46,6 +47,7 @@ struct regulator_init_data;
*/
struct fixed_voltage_config {
const char *supply_name;
+ const char *input_supply;
int microvolts;
int gpio;
unsigned startup_delay;
@@ -58,14 +60,17 @@ struct fixed_voltage_config {
struct regulator_consumer_supply;
#if IS_ENABLED(CONFIG_REGULATOR)
-struct platform_device *regulator_register_fixed(int id,
- struct regulator_consumer_supply *supplies, int num_supplies);
+struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv);
#else
-static inline struct platform_device *regulator_register_fixed(int id,
- struct regulator_consumer_supply *supplies, int num_supplies)
+static inline struct platform_device *regulator_register_always_on(int id, const char *name,
+ struct regulator_consumer_supply *supplies, int num_supplies, int uv)
{
return NULL;
}
#endif
+#define regulator_register_fixed(id, s, ns) regulator_register_always_on(id, \
+ "fixed-dummy", s, ns, 0)
+
#endif
diff --git a/include/linux/regulator/lp872x.h b/include/linux/regulator/lp872x.h
new file mode 100644
index 000000000000..132e05c46661
--- /dev/null
+++ b/include/linux/regulator/lp872x.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Texas Instruments
+ *
+ * Author: Milo(Woogyom) Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __LP872X_REGULATOR_H__
+#define __LP872X_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+
+#define LP872X_MAX_REGULATORS 9
+
+enum lp872x_regulator_id {
+ LP8720_ID_BASE,
+ LP8720_ID_LDO1 = LP8720_ID_BASE,
+ LP8720_ID_LDO2,
+ LP8720_ID_LDO3,
+ LP8720_ID_LDO4,
+ LP8720_ID_LDO5,
+ LP8720_ID_BUCK,
+
+ LP8725_ID_BASE,
+ LP8725_ID_LDO1 = LP8725_ID_BASE,
+ LP8725_ID_LDO2,
+ LP8725_ID_LDO3,
+ LP8725_ID_LDO4,
+ LP8725_ID_LDO5,
+ LP8725_ID_LILO1,
+ LP8725_ID_LILO2,
+ LP8725_ID_BUCK1,
+ LP8725_ID_BUCK2,
+
+ LP872X_ID_MAX,
+};
+
+enum lp872x_dvs_state {
+ DVS_LOW = GPIOF_OUT_INIT_LOW,
+ DVS_HIGH = GPIOF_OUT_INIT_HIGH,
+};
+
+enum lp872x_dvs_sel {
+ SEL_V1,
+ SEL_V2,
+};
+
+/**
+ * lp872x_dvs
+ * @gpio : gpio pin number for dvs control
+ * @vsel : dvs selector for buck v1 or buck v2 register
+ * @init_state : initial dvs pin state
+ */
+struct lp872x_dvs {
+ int gpio;
+ enum lp872x_dvs_sel vsel;
+ enum lp872x_dvs_state init_state;
+};
+
+/**
+ * lp872x_regdata
+ * @id : regulator id
+ * @init_data : init data for each regulator
+ */
+struct lp872x_regulator_data {
+ enum lp872x_regulator_id id;
+ struct regulator_init_data *init_data;
+};
+
+/**
+ * lp872x_platform_data
+ * @general_config : the value of LP872X_GENERAL_CFG register
+ * @update_config : if LP872X_GENERAL_CFG register is updated, set true
+ * @regulator_data : platform regulator id and init data
+ * @dvs : dvs data for buck voltage control
+ */
+struct lp872x_platform_data {
+ u8 general_config;
+ bool update_config;
+ struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
+ struct lp872x_dvs *dvs;
+};
+
+#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index b02108446be7..40dd0a394cfa 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -92,6 +92,7 @@ struct regulator_state {
* mode.
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
+ * @ramp_delay: Time to settle down after voltage change (unit: uV/us)
*/
struct regulation_constraints {
@@ -125,6 +126,8 @@ struct regulation_constraints {
/* mode to set on startup */
unsigned int initial_mode;
+ unsigned int ramp_delay;
+
/* constraint flags */
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index a8e50e44203c..82a673905edb 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -38,6 +38,8 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
/* The feature bitmap for virtio rpmsg */
#define VIRTIO_RPMSG_F_NS 0 /* RP supports name service notifications */
@@ -120,7 +122,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
+ * @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
*
@@ -140,7 +144,9 @@ typedef void (*rpmsg_rx_cb_t)(struct rpmsg_channel *, void *, int, void *, u32);
*/
struct rpmsg_endpoint {
struct rpmsg_channel *rpdev;
+ struct kref refcount;
rpmsg_rx_cb_t cb;
+ struct mutex cb_lock;
u32 addr;
void *priv;
};
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2c1de8982c85..db71c4ad8624 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -612,12 +612,6 @@ struct tcamsg {
#include <linux/mutex.h>
#include <linux/netdevice.h>
-static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
-{
- int len = strlen(str) + 1;
- return len > rta->rta_len || memcmp(RTA_DATA(rta), str, len);
-}
-
extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo);
extern int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid);
extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
@@ -625,124 +619,7 @@ extern void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid,
extern void rtnl_set_sk_err(struct net *net, u32 group, int error);
extern int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics);
extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
- u32 id, u32 ts, u32 tsage, long expires,
- u32 error);
-
-extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data);
-
-#define RTA_PUT(skb, attrtype, attrlen, data) \
-({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
- goto rtattr_failure; \
- __rta_fill(skb, attrtype, attrlen, data); })
-
-#define RTA_APPEND(skb, attrlen, data) \
-({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
- goto rtattr_failure; \
- memcpy(skb_put(skb, attrlen), data, attrlen); })
-
-#define RTA_PUT_NOHDR(skb, attrlen, data) \
-({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \
- memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \
- RTA_ALIGN(attrlen) - attrlen); })
-
-#define RTA_PUT_U8(skb, attrtype, value) \
-({ u8 _tmp = (value); \
- RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
-
-#define RTA_PUT_U16(skb, attrtype, value) \
-({ u16 _tmp = (value); \
- RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
-
-#define RTA_PUT_U32(skb, attrtype, value) \
-({ u32 _tmp = (value); \
- RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
-
-#define RTA_PUT_U64(skb, attrtype, value) \
-({ u64 _tmp = (value); \
- RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
-
-#define RTA_PUT_SECS(skb, attrtype, value) \
- RTA_PUT_U64(skb, attrtype, (value) / HZ)
-
-#define RTA_PUT_MSECS(skb, attrtype, value) \
- RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
-
-#define RTA_PUT_STRING(skb, attrtype, value) \
- RTA_PUT(skb, attrtype, strlen(value) + 1, value)
-
-#define RTA_PUT_FLAG(skb, attrtype) \
- RTA_PUT(skb, attrtype, 0, NULL);
-
-#define RTA_NEST(skb, type) \
-({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
- RTA_PUT(skb, type, 0, NULL); \
- __start; })
-
-#define RTA_NEST_END(skb, start) \
-({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
- (skb)->len; })
-
-#define RTA_NEST_COMPAT(skb, type, attrlen, data) \
-({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \
- RTA_PUT(skb, type, attrlen, data); \
- RTA_NEST(skb, type); \
- __start; })
-
-#define RTA_NEST_COMPAT_END(skb, start) \
-({ struct rtattr *__nest = (void *)(start) + NLMSG_ALIGN((start)->rta_len); \
- (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \
- RTA_NEST_END(skb, __nest); \
- (skb)->len; })
-
-#define RTA_NEST_CANCEL(skb, start) \
-({ if (start) \
- skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
- -1; })
-
-#define RTA_GET_U8(rta) \
-({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
- goto rtattr_failure; \
- *(u8 *) RTA_DATA(rta); })
-
-#define RTA_GET_U16(rta) \
-({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
- goto rtattr_failure; \
- *(u16 *) RTA_DATA(rta); })
-
-#define RTA_GET_U32(rta) \
-({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
- goto rtattr_failure; \
- *(u32 *) RTA_DATA(rta); })
-
-#define RTA_GET_U64(rta) \
-({ u64 _tmp; \
- if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
- goto rtattr_failure; \
- memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
- _tmp; })
-
-#define RTA_GET_FLAG(rta) (!!(rta))
-
-#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
-#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
-
-static inline struct rtattr *
-__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
-{
- struct rtattr *rta;
- int size = RTA_LENGTH(attrlen);
-
- rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
- rta->rta_type = attrtype;
- rta->rta_len = size;
- memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
- return rta;
-}
-
-#define __RTA_PUT(skb, attrtype, attrlen) \
-({ if (unlikely(skb_tailroom(skb) < (int)RTA_SPACE(attrlen))) \
- goto rtattr_failure; \
- __rta_reserve(skb, attrtype, attrlen); })
+ u32 id, long expires, u32 error);
extern void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change);
@@ -794,13 +671,6 @@ extern void __rtnl_unlock(void);
} \
} while(0)
-static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
-{
- return RTA_GET_U32(rta[RTA_TABLE-1]);
-rtattr_failure:
- return table;
-}
-
extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4059c0f33f07..1a2ebd39b800 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1405,7 +1405,7 @@ struct task_struct {
int (*notifier)(void *priv);
void *notifier_data;
sigset_t *notifier_mask;
- struct hlist_head task_works;
+ struct callback_head *task_works;
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
@@ -1546,7 +1546,6 @@ struct task_struct {
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
- struct list_head *scm_work_list;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
@@ -1581,7 +1580,6 @@ struct task_struct {
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
- int uprobe_srcu_id;
#endif
};
@@ -1871,22 +1869,12 @@ static inline void rcu_copy_process(struct task_struct *p)
INIT_LIST_HEAD(&p->rcu_node_entry);
}
-static inline void rcu_switch_from(struct task_struct *prev)
-{
- if (prev->rcu_read_lock_nesting != 0)
- rcu_preempt_note_context_switch();
-}
-
#else
static inline void rcu_copy_process(struct task_struct *p)
{
}
-static inline void rcu_switch_from(struct task_struct *prev)
-{
-}
-
#endif
#ifdef CONFIG_SMP
@@ -1909,6 +1897,14 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p,
}
#endif
+#ifdef CONFIG_NO_HZ
+void calc_load_enter_idle(void);
+void calc_load_exit_idle(void);
+#else
+static inline void calc_load_enter_idle(void) { }
+static inline void calc_load_exit_idle(void) { }
+#endif /* CONFIG_NO_HZ */
+
#ifndef CONFIG_CPUMASK_OFFSTACK
static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
{
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index fc61854f6224..83c44eefe698 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -86,6 +86,7 @@ int seq_puts(struct seq_file *m, const char *s);
int seq_write(struct seq_file *seq, const void *data, size_t len);
__printf(2, 3) int seq_printf(struct seq_file *, const char *, ...);
+__printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args);
int seq_path(struct seq_file *, const struct path *, const char *);
int seq_dentry(struct seq_file *, struct dentry *, const char *);
diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h
index c4a5a8cd4469..631af63af42d 100644
--- a/include/linux/sfi_acpi.h
+++ b/include/linux/sfi_acpi.h
@@ -66,7 +66,7 @@ extern int sfi_acpi_table_parse(char *signature, char *oem_id,
char *oem_table_id,
int (*handler)(struct acpi_table_header *));
-static inline int acpi_sfi_table_parse(char *signature,
+static inline int __init acpi_sfi_table_parse(char *signature,
int (*handler)(struct acpi_table_header *))
{
if (!acpi_table_parse(signature, handler))
@@ -83,7 +83,7 @@ static inline int sfi_acpi_table_parse(char *signature, char *oem_id,
return -1;
}
-static inline int acpi_sfi_table_parse(char *signature,
+static inline int __init acpi_sfi_table_parse(char *signature,
int (*handler)(struct acpi_table_header *))
{
return acpi_table_parse(signature, handler);
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index c513b73cd7cb..50910913b268 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -18,7 +18,6 @@ struct clk_mapping {
struct kref ref;
};
-
struct sh_clk_ops {
#ifdef CONFIG_SH_CLK_CPG_LEGACY
void (*init)(struct clk *clk);
@@ -31,6 +30,10 @@ struct sh_clk_ops {
long (*round_rate)(struct clk *clk, unsigned long rate);
};
+#define SH_CLK_DIV_MSK(div) ((1 << (div)) - 1)
+#define SH_CLK_DIV4_MSK SH_CLK_DIV_MSK(4)
+#define SH_CLK_DIV6_MSK SH_CLK_DIV_MSK(6)
+
struct clk {
struct list_head node;
struct clk *parent;
@@ -52,6 +55,7 @@ struct clk {
unsigned int enable_bit;
void __iomem *mapped_reg;
+ unsigned int div_mask;
unsigned long arch_flags;
void *priv;
struct clk_mapping *mapping;
@@ -65,6 +69,8 @@ struct clk {
#define CLK_ENABLE_REG_16BIT BIT(2)
#define CLK_ENABLE_REG_8BIT BIT(3)
+#define CLK_MASK_DIV_ON_DISABLE BIT(4)
+
#define CLK_ENABLE_REG_MASK (CLK_ENABLE_REG_32BIT | \
CLK_ENABLE_REG_16BIT | \
CLK_ENABLE_REG_8BIT)
@@ -146,14 +152,17 @@ static inline int __deprecated sh_clk_mstp32_register(struct clk *clks, int nr)
.enable_reg = (void __iomem *)_reg, \
.enable_bit = _shift, \
.arch_flags = _div_bitmap, \
+ .div_mask = SH_CLK_DIV4_MSK, \
.flags = _flags, \
}
-struct clk_div4_table {
+struct clk_div_table {
struct clk_div_mult_table *div_mult_table;
void (*kick)(struct clk *clk);
};
+#define clk_div4_table clk_div_table
+
int sh_clk_div4_register(struct clk *clks, int nr,
struct clk_div4_table *table);
int sh_clk_div4_enable_register(struct clk *clks, int nr,
@@ -165,7 +174,9 @@ int sh_clk_div4_reparent_register(struct clk *clks, int nr,
_num_parents, _src_shift, _src_width) \
{ \
.enable_reg = (void __iomem *)_reg, \
- .flags = _flags, \
+ .enable_bit = 0, /* unused */ \
+ .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
+ .div_mask = SH_CLK_DIV6_MSK, \
.parent_table = _parents, \
.parent_num = _num_parents, \
.src_shift = _src_shift, \
@@ -176,7 +187,9 @@ int sh_clk_div4_reparent_register(struct clk *clks, int nr,
{ \
.parent = _parent, \
.enable_reg = (void __iomem *)_reg, \
- .flags = _flags, \
+ .enable_bit = 0, /* unused */ \
+ .div_mask = SH_CLK_DIV6_MSK, \
+ .flags = _flags | CLK_MASK_DIV_ON_DISABLE, \
}
int sh_clk_div6_register(struct clk *clks, int nr);
diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h
index 425450b980b8..b64d6bec6f90 100644
--- a/include/linux/sh_dma.h
+++ b/include/linux/sh_dma.h
@@ -10,38 +10,27 @@
#ifndef SH_DMA_H
#define SH_DMA_H
-#include <linux/list.h>
#include <linux/dmaengine.h>
+#include <linux/list.h>
+#include <linux/shdma-base.h>
+#include <linux/types.h>
+
+struct device;
/* Used by slave DMA clients to request DMA to/from a specific peripheral */
struct sh_dmae_slave {
- unsigned int slave_id; /* Set by the platform */
- struct device *dma_dev; /* Set by the platform */
- const struct sh_dmae_slave_config *config; /* Set by the driver */
-};
-
-struct sh_dmae_regs {
- u32 sar; /* SAR / source address */
- u32 dar; /* DAR / destination address */
- u32 tcr; /* TCR / transfer count */
-};
-
-struct sh_desc {
- struct sh_dmae_regs hw;
- struct list_head node;
- struct dma_async_tx_descriptor async_tx;
- enum dma_transfer_direction direction;
- dma_cookie_t cookie;
- size_t partial;
- int chunks;
- int mark;
+ struct shdma_slave shdma_slave; /* Set by the platform */
};
+/*
+ * Supplied by platforms to specify, how a DMA channel has to be configured for
+ * a certain peripheral
+ */
struct sh_dmae_slave_config {
- unsigned int slave_id;
- dma_addr_t addr;
- u32 chcr;
- char mid_rid;
+ int slave_id;
+ dma_addr_t addr;
+ u32 chcr;
+ char mid_rid;
};
struct sh_dmae_channel {
@@ -110,4 +99,6 @@ struct sh_dmae_pdata {
#define CHCR_TE 0x00000002
#define CHCR_IE 0x00000004
+bool shdma_chan_filter(struct dma_chan *chan, void *arg);
+
#endif
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h
index 5c15aed9c4b2..c19a0925829a 100644
--- a/include/linux/sh_pfc.h
+++ b/include/linux/sh_pfc.h
@@ -11,22 +11,24 @@
#ifndef __SH_PFC_H
#define __SH_PFC_H
+#include <linux/stringify.h>
#include <asm-generic/gpio.h>
typedef unsigned short pinmux_enum_t;
typedef unsigned short pinmux_flag_t;
-#define PINMUX_TYPE_NONE 0
-#define PINMUX_TYPE_FUNCTION 1
-#define PINMUX_TYPE_GPIO 2
-#define PINMUX_TYPE_OUTPUT 3
-#define PINMUX_TYPE_INPUT 4
-#define PINMUX_TYPE_INPUT_PULLUP 5
-#define PINMUX_TYPE_INPUT_PULLDOWN 6
+enum {
+ PINMUX_TYPE_NONE,
-#define PINMUX_FLAG_TYPE (0x7)
-#define PINMUX_FLAG_WANT_PULLUP (1 << 3)
-#define PINMUX_FLAG_WANT_PULLDOWN (1 << 4)
+ PINMUX_TYPE_FUNCTION,
+ PINMUX_TYPE_GPIO,
+ PINMUX_TYPE_OUTPUT,
+ PINMUX_TYPE_INPUT,
+ PINMUX_TYPE_INPUT_PULLUP,
+ PINMUX_TYPE_INPUT_PULLDOWN,
+
+ PINMUX_FLAG_TYPE, /* must be last */
+};
#define PINMUX_FLAG_DBIT_SHIFT 5
#define PINMUX_FLAG_DBIT (0x1f << PINMUX_FLAG_DBIT_SHIFT)
@@ -36,9 +38,12 @@ typedef unsigned short pinmux_flag_t;
struct pinmux_gpio {
pinmux_enum_t enum_id;
pinmux_flag_t flags;
+ const char *name;
};
-#define PINMUX_GPIO(gpio, data_or_mark) [gpio] = { data_or_mark }
+#define PINMUX_GPIO(gpio, data_or_mark) \
+ [gpio] = { .name = __stringify(gpio), .enum_id = data_or_mark, .flags = PINMUX_TYPE_NONE }
+
#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
struct pinmux_cfg_reg {
@@ -89,7 +94,7 @@ struct pfc_window {
unsigned long size;
};
-struct pinmux_info {
+struct sh_pfc {
char *name;
pinmux_enum_t reserved_id;
struct pinmux_range data;
@@ -112,17 +117,45 @@ struct pinmux_info {
struct pinmux_irq *gpio_irq;
unsigned int gpio_irq_size;
+ spinlock_t lock;
+
struct resource *resource;
unsigned int num_resources;
struct pfc_window *window;
unsigned long unlock_reg;
-
- struct gpio_chip chip;
};
-int register_pinmux(struct pinmux_info *pip);
-int unregister_pinmux(struct pinmux_info *pip);
+/* XXX compat for now */
+#define pinmux_info sh_pfc
+
+/* drivers/sh/pfc/gpio.c */
+int sh_pfc_register_gpiochip(struct sh_pfc *pfc);
+
+/* drivers/sh/pfc/pinctrl.c */
+int sh_pfc_register_pinctrl(struct sh_pfc *pfc);
+
+/* drivers/sh/pfc/core.c */
+int register_sh_pfc(struct sh_pfc *pfc);
+
+int sh_pfc_read_bit(struct pinmux_data_reg *dr, unsigned long in_pos);
+void sh_pfc_write_bit(struct pinmux_data_reg *dr, unsigned long in_pos,
+ unsigned long value);
+int sh_pfc_get_data_reg(struct sh_pfc *pfc, unsigned gpio,
+ struct pinmux_data_reg **drp, int *bitp);
+int sh_pfc_gpio_to_enum(struct sh_pfc *pfc, unsigned gpio, int pos,
+ pinmux_enum_t *enum_idp);
+int sh_pfc_config_gpio(struct sh_pfc *pfc, unsigned gpio, int pinmux_type,
+ int cfg_mode);
+
+/* xxx */
+static inline int register_pinmux(struct pinmux_info *pip)
+{
+ struct sh_pfc *pfc = pip;
+ return register_sh_pfc(pfc);
+}
+
+enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
/* helper macro for port */
#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
new file mode 100644
index 000000000000..93f9821554b6
--- /dev/null
+++ b/include/linux/shdma-base.h
@@ -0,0 +1,124 @@
+/*
+ * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
+ *
+ * extracted from shdma.c and headers
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SHDMA_BASE_H
+#define SHDMA_BASE_H
+
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+/**
+ * shdma_pm_state - DMA channel PM state
+ * SHDMA_PM_ESTABLISHED: either idle or during data transfer
+ * SHDMA_PM_BUSY: during the transfer preparation, when we have to
+ * drop the lock temporarily
+ * SHDMA_PM_PENDING: transfers pending
+ */
+enum shdma_pm_state {
+ SHDMA_PM_ESTABLISHED,
+ SHDMA_PM_BUSY,
+ SHDMA_PM_PENDING,
+};
+
+struct device;
+
+/*
+ * Drivers, using this library are expected to embed struct shdma_dev,
+ * struct shdma_chan, struct shdma_desc, and struct shdma_slave
+ * in their respective device, channel, descriptor and slave objects.
+ */
+
+struct shdma_slave {
+ int slave_id;
+};
+
+struct shdma_desc {
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ enum dma_transfer_direction direction;
+ dma_cookie_t cookie;
+ int chunks;
+ int mark;
+};
+
+struct shdma_chan {
+ spinlock_t chan_lock; /* Channel operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_free; /* Free link descriptors */
+ struct dma_chan dma_chan; /* DMA channel */
+ struct device *dev; /* Channel device */
+ void *desc; /* buffer for descriptor array */
+ int desc_num; /* desc count */
+ size_t max_xfer_len; /* max transfer length */
+ int id; /* Raw id of this channel */
+ int irq; /* Channel IRQ */
+ int slave_id; /* Client ID for slave DMA */
+ enum shdma_pm_state pm_state;
+};
+
+/**
+ * struct shdma_ops - simple DMA driver operations
+ * desc_completed: return true, if this is the descriptor, that just has
+ * completed (atomic)
+ * halt_channel: stop DMA channel operation (atomic)
+ * channel_busy: return true, if the channel is busy (atomic)
+ * slave_addr: return slave DMA address
+ * desc_setup: set up the hardware specific descriptor portion (atomic)
+ * set_slave: bind channel to a slave
+ * setup_xfer: configure channel hardware for operation (atomic)
+ * start_xfer: start the DMA transfer (atomic)
+ * embedded_desc: return Nth struct shdma_desc pointer from the
+ * descriptor array
+ * chan_irq: process channel IRQ, return true if a transfer has
+ * completed (atomic)
+ */
+struct shdma_ops {
+ bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
+ void (*halt_channel)(struct shdma_chan *);
+ bool (*channel_busy)(struct shdma_chan *);
+ dma_addr_t (*slave_addr)(struct shdma_chan *);
+ int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
+ dma_addr_t, dma_addr_t, size_t *);
+ int (*set_slave)(struct shdma_chan *, int, bool);
+ void (*setup_xfer)(struct shdma_chan *, int);
+ void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
+ struct shdma_desc *(*embedded_desc)(void *, int);
+ bool (*chan_irq)(struct shdma_chan *, int);
+};
+
+struct shdma_dev {
+ struct dma_device dma_dev;
+ struct shdma_chan **schan;
+ const struct shdma_ops *ops;
+ size_t desc_size;
+};
+
+#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
+ i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
+
+int shdma_request_irq(struct shdma_chan *, int,
+ unsigned long, const char *);
+void shdma_free_irq(struct shdma_chan *);
+bool shdma_reset(struct shdma_dev *sdev);
+void shdma_chan_probe(struct shdma_dev *sdev,
+ struct shdma_chan *schan, int id);
+void shdma_chan_remove(struct shdma_chan *schan);
+int shdma_init(struct device *dev, struct shdma_dev *sdev,
+ int chan_num);
+void shdma_cleanup(struct shdma_dev *sdev);
+
+#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 642cb7355df3..d205c4be7f5b 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1667,6 +1667,22 @@ static inline void skb_orphan(struct sk_buff *skb)
}
/**
+ * skb_orphan_frags - orphan the frags contained in a buffer
+ * @skb: buffer to orphan frags from
+ * @gfp_mask: allocation mask for replacement pages
+ *
+ * For each frag in the SKB which needs a destructor (i.e. has an
+ * owner) create a copy of that frag and release the original
+ * page by calling the destructor.
+ */
+static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
+{
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
+ return 0;
+ return skb_copy_ubufs(skb, gfp_mask);
+}
+
+/**
* __skb_queue_purge - empty a list
* @list: list to empty
*
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 717fb746c9a8..dd6f06be3c9f 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -90,10 +90,6 @@ void kick_all_cpus_sync(void);
void __init call_function_init(void);
void generic_smp_call_function_single_interrupt(void);
void generic_smp_call_function_interrupt(void);
-void ipi_call_lock(void);
-void ipi_call_unlock(void);
-void ipi_call_lock_irq(void);
-void ipi_call_unlock_irq(void);
#else
static inline void call_function_init(void) { }
#endif
@@ -181,7 +177,6 @@ static inline int up_smp_call_function(smp_call_func_t func, void *info)
} while (0)
static inline void smp_send_reschedule(int cpu) { }
-#define num_booting_cpus() 1
#define smp_prepare_boot_cpu() do {} while (0)
#define smp_call_function_many(mask, func, info, wait) \
(up_smp_call_function(func, info))
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 2e68f5ba0389..00bc189cb395 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -208,7 +208,6 @@ enum
LINUX_MIB_TCPDSACKOFOSENT, /* TCPDSACKOfoSent */
LINUX_MIB_TCPDSACKRECV, /* TCPDSACKRecv */
LINUX_MIB_TCPDSACKOFORECV, /* TCPDSACKOfoRecv */
- LINUX_MIB_TCPABORTONSYN, /* TCPAbortOnSyn */
LINUX_MIB_TCPABORTONDATA, /* TCPAbortOnData */
LINUX_MIB_TCPABORTONCLOSE, /* TCPAbortOnClose */
LINUX_MIB_TCPABORTONMEMORY, /* TCPAbortOnMemory */
@@ -233,7 +232,13 @@ enum
LINUX_MIB_TCPREQQFULLDOCOOKIES, /* TCPReqQFullDoCookies */
LINUX_MIB_TCPREQQFULLDROP, /* TCPReqQFullDrop */
LINUX_MIB_TCPRETRANSFAIL, /* TCPRetransFail */
- LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */
+ LINUX_MIB_TCPRCVCOALESCE, /* TCPRcvCoalesce */
+ LINUX_MIB_TCPOFOQUEUE, /* TCPOFOQueue */
+ LINUX_MIB_TCPOFODROP, /* TCPOFODrop */
+ LINUX_MIB_TCPOFOMERGE, /* TCPOFOMerge */
+ LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
+ LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
+ LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */
__LINUX_MIB_MAX
};
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index db4bae78bda9..e3e395acc2fd 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -18,6 +18,7 @@ enum {
SK_MEMINFO_FWD_ALLOC,
SK_MEMINFO_WMEM_QUEUED,
SK_MEMINFO_OPTMEM,
+ SK_MEMINFO_BACKLOG,
SK_MEMINFO_VARS,
};
@@ -43,6 +44,5 @@ void sock_diag_save_cookie(void *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
-extern struct sock *sock_diag_nlsk;
#endif /* KERNEL */
#endif
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 25d6322fb635..ba7b2e817cfa 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -268,6 +268,7 @@ struct ucred {
#define MSG_SENDPAGE_NOTLAST 0x20000 /* sendpage() internal : not the last page */
#define MSG_EOF MSG_FIN
+#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file
descriptor received through
SCM_RIGHTS */
diff --git a/include/linux/spi/at86rf230.h b/include/linux/spi/at86rf230.h
new file mode 100644
index 000000000000..b2b1afbb3202
--- /dev/null
+++ b/include/linux/spi/at86rf230.h
@@ -0,0 +1,31 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ */
+#ifndef AT86RF230_H
+#define AT86RF230_H
+
+struct at86rf230_platform_data {
+ int rstn;
+ int slp_tr;
+ int dig2;
+};
+
+#endif
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 26e5b613deda..09a545a7dfa3 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -51,7 +51,8 @@ struct partial_page {
struct splice_pipe_desc {
struct page **pages; /* page map */
struct partial_page *partial; /* pages[] may not be contig */
- int nr_pages; /* number of pages in map */
+ int nr_pages; /* number of populated pages in map */
+ unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
unsigned int flags; /* splice flags */
const struct pipe_buf_operations *ops;/* ops associated with output pipe */
void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
/*
* for dynamic pipe sizing
*/
-extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *);
-extern void splice_shrink_spd(struct pipe_inode_info *,
- struct splice_pipe_desc *);
+extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
+extern void splice_shrink_spd(struct splice_pipe_desc *);
extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index bc14bd738ade..bb674c02f306 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -243,6 +243,7 @@ struct ssb_bus_ops {
#define SSB_DEV_MINI_MACPHY 0x823
#define SSB_DEV_ARM_1176 0x824
#define SSB_DEV_ARM_7TDMI 0x825
+#define SSB_DEV_ARM_CM3 0x82A
/* Vendor-ID values */
#define SSB_VENDOR_BROADCOM 0x4243
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index cd83059fb592..0c808d7fa579 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -408,6 +408,12 @@ static inline void unlock_system_sleep(void) {}
#endif /* !CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM_SLEEP_DEBUG
+extern bool pm_print_times_enabled;
+#else
+#define pm_print_times_enabled (false)
+#endif
+
#ifdef CONFIG_PM_AUTOSLEEP
/* kernel/power/autosleep.c */
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
index 294d5d5e90b1..fb46b03b1852 100644
--- a/include/linux/task_work.h
+++ b/include/linux/task_work.h
@@ -4,29 +4,21 @@
#include <linux/list.h>
#include <linux/sched.h>
-struct task_work;
-typedef void (*task_work_func_t)(struct task_work *);
-
-struct task_work {
- struct hlist_node hlist;
- task_work_func_t func;
- void *data;
-};
+typedef void (*task_work_func_t)(struct callback_head *);
static inline void
-init_task_work(struct task_work *twork, task_work_func_t func, void *data)
+init_task_work(struct callback_head *twork, task_work_func_t func)
{
twork->func = func;
- twork->data = data;
}
-int task_work_add(struct task_struct *task, struct task_work *twork, bool);
-struct task_work *task_work_cancel(struct task_struct *, task_work_func_t);
+int task_work_add(struct task_struct *task, struct callback_head *twork, bool);
+struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
void task_work_run(void);
static inline void exit_task_work(struct task_struct *task)
{
- if (unlikely(!hlist_empty(&task->task_works)))
+ if (unlikely(task->task_works))
task_work_run();
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 5f359dbfcdce..eb125a4c30b3 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -243,6 +243,16 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
return (tcp_hdr(skb)->doff - 5) * 4;
}
+/* TCP Fast Open */
+#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */
+#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */
+
+/* TCP Fast Open Cookie as stored in memory */
+struct tcp_fastopen_cookie {
+ s8 len;
+ u8 val[TCP_FASTOPEN_COOKIE_MAX];
+};
+
/* This defines a selective acknowledgement block. */
struct tcp_sack_block_wire {
__be32 start_seq;
@@ -339,6 +349,9 @@ struct tcp_sock {
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
+ struct list_head tsq_node; /* anchor in tsq_tasklet.head list */
+ unsigned long tsq_flags;
+
/* Data for direct copy to user */
struct {
struct sk_buff_head prequeue;
@@ -373,7 +386,9 @@ struct tcp_sock {
unused : 1;
u8 repair_queue;
u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
- early_retrans_delayed:1; /* Delayed ER timer installed */
+ early_retrans_delayed:1, /* Delayed ER timer installed */
+ syn_data:1, /* SYN includes data */
+ syn_fastopen:1; /* SYN includes Fast Open option */
/* RTT measurement */
u32 srtt; /* smoothed round trip time << 3 */
@@ -478,6 +493,9 @@ struct tcp_sock {
u32 probe_seq_start;
u32 probe_seq_end;
} mtu_probe;
+ u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
+ * while socket was owned by user.
+ */
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
@@ -487,6 +505,9 @@ struct tcp_sock {
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
+/* TCP fastopen related information */
+ struct tcp_fastopen_request *fastopen_req;
+
/* When the cookie options are generated and exchanged, then this
* object holds a reference to them (cookie_values->kref). Also
* contains related tcp_cookie_transactions fields.
@@ -494,6 +515,17 @@ struct tcp_sock {
struct tcp_cookie_values *cookie_values;
};
+enum tsq_flags {
+ TSQ_THROTTLED,
+ TSQ_QUEUED,
+ TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */
+ TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */
+ TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */
+ TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
+ * tcp_v{4|6}_mtu_reduced()
+ */
+};
+
static inline struct tcp_sock *tcp_sk(const struct sock *sk)
{
return (struct tcp_sock *)sk;
@@ -507,7 +539,7 @@ struct tcp_timewait_sock {
u32 tw_ts_recent;
long tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
- struct tcp_md5sig_key *tw_md5_key;
+ struct tcp_md5sig_key *tw_md5_key;
#endif
/* Few sockets in timewait have cookies; in that case, then this
* object holds a reference to them (tw_cookie_values->kref).
diff --git a/include/linux/tick.h b/include/linux/tick.h
index ab8be90b5cc9..f37fceb69b73 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -31,10 +31,10 @@ enum tick_nohz_mode {
* struct tick_sched - sched tick emulation and no idle tick control/stats
* @sched_timer: hrtimer to schedule the periodic tick in high
* resolution mode
- * @idle_tick: Store the last idle tick expiry time when the tick
- * timer is modified for idle sleeps. This is necessary
+ * @last_tick: Store the last tick expiry time when the tick
+ * timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
- * when the CPU returns from idle
+ * when the CPU returns from nohz sleep.
* @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
@@ -51,7 +51,7 @@ struct tick_sched {
struct hrtimer sched_timer;
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
- ktime_t idle_tick;
+ ktime_t last_tick;
int inidle;
int tick_stopped;
unsigned long idle_jiffies;
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h
new file mode 100644
index 000000000000..dfdfdc03115b
--- /dev/null
+++ b/include/linux/time-armada-370-xp.h
@@ -0,0 +1,18 @@
+/*
+ * Marvell Armada 370/XP SoC timer handling.
+ *
+ * Copyright (C) 2012 Marvell
+ *
+ * Lior Amsalem <alior@marvell.com>
+ * Gregory CLEMENT <gregory.clement@free-electrons.com>
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ *
+ */
+#ifndef __TIME_ARMADA_370_XPPRCMU_H
+#define __TIME_ARMADA_370_XPPRCMU_H
+
+#include <linux/init.h>
+
+void __init armada_370_xp_timer_init(void);
+
+#endif
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
index 9730b0e51e46..c98928420100 100644
--- a/include/linux/tipc_config.h
+++ b/include/linux/tipc_config.h
@@ -102,8 +102,8 @@
#define TIPC_CMD_SET_LINK_TOL 0x4107 /* tx link_config, rx none */
#define TIPC_CMD_SET_LINK_PRI 0x4108 /* tx link_config, rx none */
#define TIPC_CMD_SET_LINK_WINDOW 0x4109 /* tx link_config, rx none */
-#define TIPC_CMD_SET_LOG_SIZE 0x410A /* tx unsigned, rx none */
-#define TIPC_CMD_DUMP_LOG 0x410B /* tx none, rx ultra_string */
+#define TIPC_CMD_SET_LOG_SIZE 0x410A /* obsoleted */
+#define TIPC_CMD_DUMP_LOG 0x410B /* obsoleted */
#define TIPC_CMD_RESET_LINK_STATS 0x410C /* tx link_name, rx none */
/*
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 6a4d82bedb03..1e98b5530425 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -192,7 +192,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
* hlist_add_head(task->task_works);
*/
smp_mb__after_clear_bit();
- if (unlikely(!hlist_empty(&current->task_works)))
+ if (unlikely(current->task_works))
task_work_run();
}
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index bd96ecd0e05c..802de56c41e8 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -153,7 +153,7 @@ static inline void tracepoint_synchronize_unregister(void)
} \
static inline void trace_##name##_rcuidle(proto) \
{ \
- if (static_branch(&__tracepoint_##name.key)) \
+ if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
diff --git a/include/linux/types.h b/include/linux/types.h
index 9c1bd539ea70..bf0dd7524b2a 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -246,14 +246,15 @@ struct ustat {
};
/**
- * struct rcu_head - callback structure for use with RCU
+ * struct callback_head - callback structure for use with RCU and task_work
* @next: next update requests in a list
* @func: actual update function to call after the grace period.
*/
-struct rcu_head {
- struct rcu_head *next;
- void (*func)(struct rcu_head *head);
+struct callback_head {
+ struct callback_head *next;
+ void (*func)(struct callback_head *head);
};
+#define rcu_head callback_head
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
diff --git a/include/linux/uhid.h b/include/linux/uhid.h
new file mode 100644
index 000000000000..9c6974f16966
--- /dev/null
+++ b/include/linux/uhid.h
@@ -0,0 +1,104 @@
+#ifndef __UHID_H_
+#define __UHID_H_
+
+/*
+ * User-space I/O driver support for HID subsystem
+ * Copyright (c) 2012 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Public header for user-space communication. We try to keep every structure
+ * aligned but to be safe we also use __attribute__((__packed__)). Therefore,
+ * the communication should be ABI compatible even between architectures.
+ */
+
+#include <linux/input.h>
+#include <linux/types.h>
+
+enum uhid_event_type {
+ UHID_CREATE,
+ UHID_DESTROY,
+ UHID_START,
+ UHID_STOP,
+ UHID_OPEN,
+ UHID_CLOSE,
+ UHID_OUTPUT,
+ UHID_OUTPUT_EV,
+ UHID_INPUT,
+ UHID_FEATURE,
+ UHID_FEATURE_ANSWER,
+};
+
+struct uhid_create_req {
+ __u8 name[128];
+ __u8 phys[64];
+ __u8 uniq[64];
+ __u8 __user *rd_data;
+ __u16 rd_size;
+
+ __u16 bus;
+ __u32 vendor;
+ __u32 product;
+ __u32 version;
+ __u32 country;
+} __attribute__((__packed__));
+
+#define UHID_DATA_MAX 4096
+
+enum uhid_report_type {
+ UHID_FEATURE_REPORT,
+ UHID_OUTPUT_REPORT,
+ UHID_INPUT_REPORT,
+};
+
+struct uhid_input_req {
+ __u8 data[UHID_DATA_MAX];
+ __u16 size;
+} __attribute__((__packed__));
+
+struct uhid_output_req {
+ __u8 data[UHID_DATA_MAX];
+ __u16 size;
+ __u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_output_ev_req {
+ __u16 type;
+ __u16 code;
+ __s32 value;
+} __attribute__((__packed__));
+
+struct uhid_feature_req {
+ __u32 id;
+ __u8 rnum;
+ __u8 rtype;
+} __attribute__((__packed__));
+
+struct uhid_feature_answer_req {
+ __u32 id;
+ __u16 err;
+ __u16 size;
+ __u8 data[UHID_DATA_MAX];
+};
+
+struct uhid_event {
+ __u32 type;
+
+ union {
+ struct uhid_create_req create;
+ struct uhid_input_req input;
+ struct uhid_output_req output;
+ struct uhid_output_ev_req output_ev;
+ struct uhid_feature_req feature;
+ struct uhid_feature_answer_req feature_answer;
+ } u;
+} __attribute__((__packed__));
+
+#endif /* __UHID_H_ */
diff --git a/include/linux/usb/tilegx.h b/include/linux/usb/tilegx.h
new file mode 100644
index 000000000000..2d65e3435680
--- /dev/null
+++ b/include/linux/usb/tilegx.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012 Tilera Corporation. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for
+ * more details.
+ *
+ * Structure to contain platform-specific data related to Tile-Gx USB
+ * controllers.
+ */
+
+#ifndef _LINUX_USB_TILEGX_H
+#define _LINUX_USB_TILEGX_H
+
+#include <gxio/usb_host.h>
+
+struct tilegx_usb_platform_data {
+ /* GXIO device index. */
+ int dev_index;
+
+ /* GXIO device context. */
+ gxio_usb_host_context_t usb_ctx;
+
+ /* Device IRQ. */
+ unsigned int irq;
+};
+
+#endif /* _LINUX_USB_TILEGX_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 76f439647c4b..f87cf622317f 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -66,9 +66,8 @@ struct usbnet {
# define EVENT_STS_SPLIT 3
# define EVENT_LINK_RESET 4
# define EVENT_RX_PAUSED 5
-# define EVENT_DEV_WAKING 6
-# define EVENT_DEV_ASLEEP 7
-# define EVENT_DEV_OPEN 8
+# define EVENT_DEV_ASLEEP 6
+# define EVENT_DEV_OPEN 7
};
static inline struct usb_driver *driver_of(struct usb_interface *intf)
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 17df3600bcef..e84e769aaddc 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -64,7 +64,9 @@
US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \
/* cannot handle READ_CAPACITY_16 */ \
US_FLAG(INITIAL_READ10, 0x00100000) \
- /* Initial READ(10) (and others) must be retried */
+ /* Initial READ(10) (and others) must be retried */ \
+ US_FLAG(WRITE_CACHE, 0x00200000) \
+ /* Write Cache status is not available */
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8efd28ae5597..a1ba8bbd9fbe 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -92,6 +92,7 @@ struct virtio_driver {
const unsigned int *feature_table;
unsigned int feature_table_size;
int (*probe)(struct virtio_device *dev);
+ void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
#ifdef CONFIG_PM
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
index 8ddeafdc0546..dc8d305b0e05 100644
--- a/include/linux/virtio_scsi.h
+++ b/include/linux/virtio_scsi.h
@@ -69,6 +69,10 @@ struct virtio_scsi_config {
u32 max_lun;
} __packed;
+/* Feature Bits */
+#define VIRTIO_SCSI_F_INOUT 0
+#define VIRTIO_SCSI_F_HOTPLUG 1
+
/* Response codes */
#define VIRTIO_SCSI_S_OK 0
#define VIRTIO_SCSI_S_OVERRUN 1
@@ -105,6 +109,11 @@ struct virtio_scsi_config {
#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
+/* Reasons of transport reset event */
+#define VIRTIO_SCSI_EVT_RESET_HARD 0
+#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
+#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
+
#define VIRTIO_SCSI_S_SIMPLE 0
#define VIRTIO_SCSI_S_ORDERED 1
#define VIRTIO_SCSI_S_HEAD 2
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index f2b801c4b555..089a09d001d1 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -46,7 +46,8 @@ struct prefix_info {
#include <net/if_inet6.h>
#include <net/ipv6.h>
-#define IN6_ADDR_HSIZE 16
+#define IN6_ADDR_HSIZE_SHIFT 4
+#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
extern int addrconf_init(void);
extern void addrconf_cleanup(void);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2ee33da36a7a..b5f8988e4283 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -14,10 +14,11 @@ extern struct sock *unix_get_socket(struct file *filp);
extern struct sock *unix_peer_get(struct sock *);
#define UNIX_HASH_SIZE 256
+#define UNIX_HASH_BITS 8
extern unsigned int unix_tot_inflight;
extern spinlock_t unix_table_lock;
-extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
struct unix_address {
atomic_t refcnt;
diff --git a/include/net/arp.h b/include/net/arp.h
index 4a1f3fb562eb..7f7df93f37cd 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -15,24 +15,31 @@ static inline u32 arp_hashfn(u32 key, const struct net_device *dev, u32 hash_rnd
return val * hash_rnd;
}
-static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
+static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
{
- struct neigh_hash_table *nht;
+ struct neigh_hash_table *nht = rcu_dereference_bh(arp_tbl.nht);
struct neighbour *n;
u32 hash_val;
- rcu_read_lock_bh();
- nht = rcu_dereference_bh(arp_tbl.nht);
hash_val = arp_hashfn(key, dev, nht->hash_rnd[0]) >> (32 - nht->hash_shift);
for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
n != NULL;
n = rcu_dereference_bh(n->next)) {
- if (n->dev == dev && *(u32 *)n->primary_key == key) {
- if (!atomic_inc_not_zero(&n->refcnt))
- n = NULL;
- break;
- }
+ if (n->dev == dev && *(u32 *)n->primary_key == key)
+ return n;
}
+
+ return NULL;
+}
+
+static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32 key)
+{
+ struct neighbour *n;
+
+ rcu_read_lock_bh();
+ n = __ipv4_neigh_lookup_noref(dev, key);
+ if (n && !atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
rcu_read_unlock_bh();
return n;
diff --git a/include/net/bluetooth/a2mp.h b/include/net/bluetooth/a2mp.h
new file mode 100644
index 000000000000..6a76e0a0705e
--- /dev/null
+++ b/include/net/bluetooth/a2mp.h
@@ -0,0 +1,126 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#ifndef __A2MP_H
+#define __A2MP_H
+
+#include <net/bluetooth/l2cap.h>
+
+#define A2MP_FEAT_EXT 0x8000
+
+struct amp_mgr {
+ struct l2cap_conn *l2cap_conn;
+ struct l2cap_chan *a2mp_chan;
+ struct kref kref;
+ __u8 ident;
+ __u8 handle;
+ unsigned long flags;
+};
+
+struct a2mp_cmd {
+ __u8 code;
+ __u8 ident;
+ __le16 len;
+ __u8 data[0];
+} __packed;
+
+/* A2MP command codes */
+#define A2MP_COMMAND_REJ 0x01
+struct a2mp_cmd_rej {
+ __le16 reason;
+ __u8 data[0];
+} __packed;
+
+#define A2MP_DISCOVER_REQ 0x02
+struct a2mp_discov_req {
+ __le16 mtu;
+ __le16 ext_feat;
+} __packed;
+
+struct a2mp_cl {
+ __u8 id;
+ __u8 type;
+ __u8 status;
+} __packed;
+
+#define A2MP_DISCOVER_RSP 0x03
+struct a2mp_discov_rsp {
+ __le16 mtu;
+ __le16 ext_feat;
+ struct a2mp_cl cl[0];
+} __packed;
+
+#define A2MP_CHANGE_NOTIFY 0x04
+#define A2MP_CHANGE_RSP 0x05
+
+#define A2MP_GETINFO_REQ 0x06
+struct a2mp_info_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETINFO_RSP 0x07
+struct a2mp_info_rsp {
+ __u8 id;
+ __u8 status;
+ __le32 total_bw;
+ __le32 max_bw;
+ __le32 min_latency;
+ __le16 pal_cap;
+ __le16 assoc_size;
+} __packed;
+
+#define A2MP_GETAMPASSOC_REQ 0x08
+struct a2mp_amp_assoc_req {
+ __u8 id;
+} __packed;
+
+#define A2MP_GETAMPASSOC_RSP 0x09
+struct a2mp_amp_assoc_rsp {
+ __u8 id;
+ __u8 status;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_REQ 0x0A
+#define A2MP_DISCONNPHYSLINK_REQ 0x0C
+struct a2mp_physlink_req {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 amp_assoc[0];
+} __packed;
+
+#define A2MP_CREATEPHYSLINK_RSP 0x0B
+#define A2MP_DISCONNPHYSLINK_RSP 0x0D
+struct a2mp_physlink_rsp {
+ __u8 local_id;
+ __u8 remote_id;
+ __u8 status;
+} __packed;
+
+/* A2MP response status */
+#define A2MP_STATUS_SUCCESS 0x00
+#define A2MP_STATUS_INVALID_CTRL_ID 0x01
+#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02
+#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02
+#define A2MP_STATUS_COLLISION_OCCURED 0x03
+#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04
+#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05
+#define A2MP_STATUS_SECURITY_VIOLATION 0x06
+
+void amp_mgr_get(struct amp_mgr *mgr);
+int amp_mgr_put(struct amp_mgr *mgr);
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb);
+
+#endif /* __A2MP_H */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 961669b648fd..565d4bee1e49 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -1,4 +1,4 @@
-/*
+/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
@@ -12,22 +12,19 @@
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
- CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
- WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
- COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __BLUETOOTH_H
#define __BLUETOOTH_H
-#include <asm/types.h>
-#include <asm/byteorder.h>
-#include <linux/list.h>
#include <linux/poll.h>
#include <net/sock.h>
@@ -168,8 +165,8 @@ typedef struct {
#define BDADDR_LE_PUBLIC 0x01
#define BDADDR_LE_RANDOM 0x02
-#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}})
-#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}})
+#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} })
+#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} })
/* Copy, swap, convert BD Address */
static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2)
@@ -215,7 +212,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags);
int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags);
-uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait);
+uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
@@ -225,12 +222,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
/* Skb helpers */
struct l2cap_ctrl {
- unsigned int sframe : 1,
- poll : 1,
- final : 1,
- fcs : 1,
- sar : 2,
- super : 2;
+ unsigned int sframe:1,
+ poll:1,
+ final:1,
+ fcs:1,
+ sar:2,
+ super:2;
__u16 reqseq;
__u16 txseq;
__u8 retries;
@@ -249,7 +246,8 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
{
struct sk_buff *skb;
- if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) {
+ skb = alloc_skb(len + BT_SKB_RESERVE, how);
+ if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
@@ -261,7 +259,8 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk,
{
struct sk_buff *skb;
- if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) {
+ skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err);
+ if (skb) {
skb_reserve(skb, BT_SKB_RESERVE);
bt_cb(skb)->incoming = 0;
}
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3def64ba77fa..ccd723e0f783 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -30,6 +30,9 @@
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
+#define HCI_LINK_KEY_SIZE 16
+#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE)
+
/* HCI dev events */
#define HCI_DEV_REG 1
#define HCI_DEV_UNREG 2
@@ -56,9 +59,12 @@
#define HCI_BREDR 0x00
#define HCI_AMP 0x01
+/* First BR/EDR Controller shall have ID = 0 */
+#define HCI_BREDR_ID 0
+
/* HCI device quirks */
enum {
- HCI_QUIRK_NO_RESET,
+ HCI_QUIRK_RESET_ON_CLOSE,
HCI_QUIRK_RAW_DEVICE,
HCI_QUIRK_FIXUP_BUFFER_SIZE
};
@@ -133,13 +139,12 @@ enum {
#define HCIINQUIRY _IOR('H', 240, int)
/* HCI timeouts */
-#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
-#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
-#define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */
-#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
-#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
-#define HCI_CMD_TIMEOUT (1000) /* 1 seconds */
-#define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */
+#define HCI_DISCONN_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+#define HCI_PAIRING_TIMEOUT msecs_to_jiffies(60000) /* 60 seconds */
+#define HCI_INIT_TIMEOUT msecs_to_jiffies(10000) /* 10 seconds */
+#define HCI_CMD_TIMEOUT msecs_to_jiffies(1000) /* 1 second */
+#define HCI_ACL_TX_TIMEOUT msecs_to_jiffies(45000) /* 45 seconds */
+#define HCI_AUTO_OFF_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
/* HCI data types */
#define HCI_COMMAND_PKT 0x01
@@ -371,7 +376,7 @@ struct hci_cp_reject_conn_req {
#define HCI_OP_LINK_KEY_REPLY 0x040b
struct hci_cp_link_key_reply {
bdaddr_t bdaddr;
- __u8 link_key[16];
+ __u8 link_key[HCI_LINK_KEY_SIZE];
} __packed;
#define HCI_OP_LINK_KEY_NEG_REPLY 0x040c
@@ -523,6 +528,28 @@ struct hci_cp_io_capability_neg_reply {
__u8 reason;
} __packed;
+#define HCI_OP_CREATE_PHY_LINK 0x0435
+struct hci_cp_create_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_ACCEPT_PHY_LINK 0x0436
+struct hci_cp_accept_phy_link {
+ __u8 phy_handle;
+ __u8 key_len;
+ __u8 key_type;
+ __u8 key[HCI_AMP_LINK_KEY_SIZE];
+} __packed;
+
+#define HCI_OP_DISCONN_PHY_LINK 0x0437
+struct hci_cp_disconn_phy_link {
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
#define HCI_OP_SNIFF_MODE 0x0803
struct hci_cp_sniff_mode {
__le16 handle;
@@ -818,6 +845,31 @@ struct hci_rp_read_local_amp_info {
__le32 be_flush_to;
} __packed;
+#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a
+struct hci_cp_read_local_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 max_len;
+} __packed;
+struct hci_rp_read_local_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+
+#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
+struct hci_cp_write_remote_amp_assoc {
+ __u8 phy_handle;
+ __le16 len_so_far;
+ __le16 rem_len;
+ __u8 frag[0];
+} __packed;
+struct hci_rp_write_remote_amp_assoc {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
#define HCI_OP_LE_SET_EVENT_MASK 0x2001
struct hci_cp_le_set_event_mask {
__u8 mask[8];
@@ -1048,7 +1100,7 @@ struct hci_ev_link_key_req {
#define HCI_EV_LINK_KEY_NOTIFY 0x18
struct hci_ev_link_key_notify {
bdaddr_t bdaddr;
- __u8 link_key[16];
+ __u8 link_key[HCI_LINK_KEY_SIZE];
__u8 key_type;
} __packed;
@@ -1196,6 +1248,39 @@ struct hci_ev_le_meta {
__u8 subevent;
} __packed;
+#define HCI_EV_PHY_LINK_COMPLETE 0x40
+struct hci_ev_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_CHANNEL_SELECTED 0x41
+struct hci_ev_channel_selected {
+ __u8 phy_handle;
+} __packed;
+
+#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42
+struct hci_ev_disconn_phy_link_complete {
+ __u8 status;
+ __u8 phy_handle;
+ __u8 reason;
+} __packed;
+
+#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45
+struct hci_ev_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 phy_handle;
+ __u8 flow_spec_id;
+} __packed;
+
+#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46
+struct hci_ev_disconn_logical_link_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
#define HCI_EV_NUM_COMP_BLOCKS 0x48
struct hci_comp_blocks_info {
__le16 handle;
@@ -1296,7 +1381,6 @@ struct hci_sco_hdr {
__u8 dlen;
} __packed;
-#include <linux/skbuff.h>
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@@ -1313,12 +1397,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
}
/* Command opcode pack/unpack */
-#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10))
+#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10)))
#define hci_opcode_ogf(op) (op >> 10)
#define hci_opcode_ocf(op) (op & 0x03ff)
/* ACL handle and flags pack/unpack */
-#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12))
+#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12)))
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 9fc7728f94e4..475b8c04ba52 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -25,7 +25,6 @@
#ifndef __HCI_CORE_H
#define __HCI_CORE_H
-#include <linux/interrupt.h>
#include <net/bluetooth/hci.h>
/* HCI priority */
@@ -65,7 +64,7 @@ struct discovery_state {
DISCOVERY_RESOLVING,
DISCOVERY_STOPPING,
} state;
- struct list_head all; /* All devices found during inquiry */
+ struct list_head all; /* All devices found during inquiry */
struct list_head unknown; /* Name state not known */
struct list_head resolve; /* Name needs to be resolved */
__u32 timestamp;
@@ -105,7 +104,7 @@ struct link_key {
struct list_head list;
bdaddr_t bdaddr;
u8 type;
- u8 val[16];
+ u8 val[HCI_LINK_KEY_SIZE];
u8 pin_len;
};
@@ -333,6 +332,7 @@ struct hci_conn {
void *l2cap_data;
void *sco_data;
void *smp_conn;
+ struct amp_mgr *amp_mgr;
struct hci_conn *link;
@@ -360,7 +360,8 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status);
extern int l2cap_disconn_ind(struct hci_conn *hcon);
extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason);
extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt);
-extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags);
+extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb,
+ u16 flags);
extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr);
extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status);
@@ -429,8 +430,8 @@ enum {
static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
- test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
+ return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
+ test_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
}
static inline void hci_conn_hash_init(struct hci_dev *hdev)
@@ -586,18 +587,24 @@ void hci_conn_put_device(struct hci_conn *conn);
static inline void hci_conn_hold(struct hci_conn *conn)
{
+ BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
+ atomic_read(&conn->refcnt) + 1);
+
atomic_inc(&conn->refcnt);
cancel_delayed_work(&conn->disc_work);
}
static inline void hci_conn_put(struct hci_conn *conn)
{
+ BT_DBG("hcon %p refcnt %d -> %d", conn, atomic_read(&conn->refcnt),
+ atomic_read(&conn->refcnt) - 1);
+
if (atomic_dec_and_test(&conn->refcnt)) {
unsigned long timeo;
if (conn->type == ACL_LINK || conn->type == LE_LINK) {
del_timer(&conn->idle_timer);
if (conn->state == BT_CONNECTED) {
- timeo = msecs_to_jiffies(conn->disc_timeout);
+ timeo = conn->disc_timeout;
if (!conn->out)
timeo *= 2;
} else {
@@ -640,6 +647,19 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data)
dev_set_drvdata(&hdev->dev, data);
}
+/* hci_dev_list shall be locked */
+static inline uint8_t __hci_num_ctrl(void)
+{
+ uint8_t count = 0;
+ struct list_head *p;
+
+ list_for_each(p, &hci_dev_list) {
+ count++;
+ }
+
+ return count;
+}
+
struct hci_dev *hci_dev_get(int index);
struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
@@ -661,7 +681,8 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
int hci_get_auth_info(struct hci_dev *hdev, void __user *arg);
int hci_inquiry(void __user *arg);
-struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
+struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr);
int hci_blacklist_clear(struct hci_dev *hdev);
int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 1c7d1cd5e679..a7679f8913d2 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -40,11 +40,11 @@
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */
#define L2CAP_DEFAULT_ACK_TO 200
-#define L2CAP_LE_DEFAULT_MTU 23
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
#define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF
#define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF
#define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */
+#define L2CAP_LE_MIN_MTU 23
#define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100)
#define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000)
@@ -52,6 +52,8 @@
#define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000)
#define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000)
+#define L2CAP_A2MP_DEFAULT_MTU 670
+
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
@@ -229,9 +231,14 @@ struct l2cap_conn_rsp {
__le16 status;
} __packed;
+/* protocol/service multiplexer (PSM) */
+#define L2CAP_PSM_SDP 0x0001
+#define L2CAP_PSM_RFCOMM 0x0003
+
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
+#define L2CAP_CID_A2MP 0x0003
#define L2CAP_CID_LE_DATA 0x0004
#define L2CAP_CID_LE_SIGNALING 0x0005
#define L2CAP_CID_SMP 0x0006
@@ -271,6 +278,9 @@ struct l2cap_conf_rsp {
#define L2CAP_CONF_PENDING 0x0004
#define L2CAP_CONF_EFS_REJECT 0x0005
+/* configuration req/rsp continuation flag */
+#define L2CAP_CONF_FLAG_CONTINUATION 0x0001
+
struct l2cap_conf_opt {
__u8 type;
__u8 len;
@@ -419,11 +429,6 @@ struct l2cap_seq_list {
#define L2CAP_SEQ_LIST_CLEAR 0xFFFF
#define L2CAP_SEQ_LIST_TAIL 0x8000
-struct srej_list {
- __u16 tx_seq;
- struct list_head list;
-};
-
struct l2cap_chan {
struct sock *sk;
@@ -459,6 +464,7 @@ struct l2cap_chan {
__u16 tx_win;
__u16 tx_win_max;
+ __u16 ack_win;
__u8 max_tx;
__u16 retrans_timeout;
__u16 monitor_timeout;
@@ -475,14 +481,12 @@ struct l2cap_chan {
__u16 expected_ack_seq;
__u16 expected_tx_seq;
__u16 buffer_seq;
- __u16 buffer_seq_srej;
__u16 srej_save_reqseq;
__u16 last_acked_seq;
__u16 frames_sent;
__u16 unacked_frames;
__u8 retry_count;
__u16 srej_queue_next;
- __u8 num_acked;
__u16 sdu_len;
struct sk_buff *sdu;
struct sk_buff *sdu_last_frag;
@@ -515,7 +519,6 @@ struct l2cap_chan {
struct sk_buff_head srej_q;
struct l2cap_seq_list srej_list;
struct l2cap_seq_list retrans_list;
- struct list_head srej_l;
struct list_head list;
struct list_head global_l;
@@ -528,10 +531,14 @@ struct l2cap_chan {
struct l2cap_ops {
char *name;
- struct l2cap_chan *(*new_connection) (void *data);
- int (*recv) (void *data, struct sk_buff *skb);
- void (*close) (void *data);
- void (*state_change) (void *data, int state);
+ struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan);
+ int (*recv) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
+ void (*teardown) (struct l2cap_chan *chan, int err);
+ void (*close) (struct l2cap_chan *chan);
+ void (*state_change) (struct l2cap_chan *chan,
+ int state);
+ void (*ready) (struct l2cap_chan *chan);
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long len, int nb);
};
@@ -575,6 +582,7 @@ struct l2cap_conn {
#define L2CAP_CHAN_RAW 1
#define L2CAP_CHAN_CONN_LESS 2
#define L2CAP_CHAN_CONN_ORIENTED 3
+#define L2CAP_CHAN_CONN_FIX_A2MP 4
/* ----- L2CAP socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
@@ -597,6 +605,7 @@ enum {
CONF_EWS_RECV,
CONF_LOC_CONF_PEND,
CONF_REM_CONF_PEND,
+ CONF_NOT_COMPLETE,
};
#define L2CAP_CONF_MAX_CONF_REQ 2
@@ -664,11 +673,15 @@ enum {
static inline void l2cap_chan_hold(struct l2cap_chan *c)
{
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
atomic_inc(&c->refcnt);
}
static inline void l2cap_chan_put(struct l2cap_chan *c)
{
+ BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->refcnt));
+
if (atomic_dec_and_test(&c->refcnt))
kfree(c);
}
@@ -713,11 +726,7 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan,
#define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t))
#define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer)
-#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \
- msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO));
#define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer)
-#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \
- msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO));
#define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer)
#define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \
msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
@@ -736,173 +745,17 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq)
return (seq + 1) % (chan->tx_win_max + 1);
}
-static inline int l2cap_tx_window_full(struct l2cap_chan *ch)
-{
- int sub;
-
- sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64;
-
- if (sub < 0)
- sub += 64;
-
- return sub == ch->remote_tx_win;
-}
-
-static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >>
- L2CAP_EXT_CTRL_REQSEQ_SHIFT;
- else
- return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
-}
-
-static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
- L2CAP_EXT_CTRL_REQSEQ;
- else
- return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ;
-}
-
-static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >>
- L2CAP_EXT_CTRL_TXSEQ_SHIFT;
- else
- return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
-}
-
-static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
- L2CAP_EXT_CTRL_TXSEQ;
- else
- return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ;
-}
-
-static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE;
- else
- return ctrl & L2CAP_CTRL_FRAME_TYPE;
-}
-
-static inline __u32 __set_sframe(struct l2cap_chan *chan)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return L2CAP_EXT_CTRL_FRAME_TYPE;
- else
- return L2CAP_CTRL_FRAME_TYPE;
-}
-
-static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl)
+static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan)
{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
- else
- return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
+ return NULL;
}
-static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar)
+static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err)
{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR;
- else
- return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR;
}
-static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl)
+static inline void l2cap_chan_no_ready(struct l2cap_chan *chan)
{
- return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START;
-}
-
-static inline __u32 __get_sar_mask(struct l2cap_chan *chan)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return L2CAP_EXT_CTRL_SAR;
- else
- return L2CAP_CTRL_SAR;
-}
-
-static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >>
- L2CAP_EXT_CTRL_SUPER_SHIFT;
- else
- return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
-}
-
-static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) &
- L2CAP_EXT_CTRL_SUPERVISE;
- else
- return (super << L2CAP_CTRL_SUPER_SHIFT) &
- L2CAP_CTRL_SUPERVISE;
-}
-
-static inline __u32 __set_ctrl_final(struct l2cap_chan *chan)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return L2CAP_EXT_CTRL_FINAL;
- else
- return L2CAP_CTRL_FINAL;
-}
-
-static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return ctrl & L2CAP_EXT_CTRL_FINAL;
- else
- return ctrl & L2CAP_CTRL_FINAL;
-}
-
-static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return L2CAP_EXT_CTRL_POLL;
- else
- return L2CAP_CTRL_POLL;
-}
-
-static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return ctrl & L2CAP_EXT_CTRL_POLL;
- else
- return ctrl & L2CAP_CTRL_POLL;
-}
-
-static inline __u32 __get_control(struct l2cap_chan *chan, void *p)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return get_unaligned_le32(p);
- else
- return get_unaligned_le16(p);
-}
-
-static inline void __put_control(struct l2cap_chan *chan, __u32 control,
- void *p)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return put_unaligned_le32(control, p);
- else
- return put_unaligned_le16(control, p);
-}
-
-static inline __u8 __ctrl_size(struct l2cap_chan *chan)
-{
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE;
- else
- return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE;
}
extern bool disable_ertm;
@@ -926,5 +779,8 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
int l2cap_chan_check_security(struct l2cap_chan *chan);
void l2cap_chan_set_defaults(struct l2cap_chan *chan);
+int l2cap_ertm_init(struct l2cap_chan *chan);
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan);
+void l2cap_chan_del(struct l2cap_chan *chan, int err);
#endif /* __L2CAP_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 23fd0546fccb..4348ee8bda69 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -444,7 +444,7 @@ struct mgmt_ev_auth_failed {
struct mgmt_ev_device_found {
struct mgmt_addr_info addr;
__s8 rssi;
- __u8 flags[4];
+ __le32 flags;
__le16 eir_len;
__u8 eir[0];
} __packed;
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 439dadc8102f..bcb9cc3ce98b 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -93,25 +93,25 @@ struct cfhsi_desc {
#endif
/* Structure implemented by the CAIF HSI driver. */
-struct cfhsi_drv {
- void (*tx_done_cb) (struct cfhsi_drv *drv);
- void (*rx_done_cb) (struct cfhsi_drv *drv);
- void (*wake_up_cb) (struct cfhsi_drv *drv);
- void (*wake_down_cb) (struct cfhsi_drv *drv);
+struct cfhsi_cb_ops {
+ void (*tx_done_cb) (struct cfhsi_cb_ops *drv);
+ void (*rx_done_cb) (struct cfhsi_cb_ops *drv);
+ void (*wake_up_cb) (struct cfhsi_cb_ops *drv);
+ void (*wake_down_cb) (struct cfhsi_cb_ops *drv);
};
/* Structure implemented by HSI device. */
-struct cfhsi_dev {
- int (*cfhsi_up) (struct cfhsi_dev *dev);
- int (*cfhsi_down) (struct cfhsi_dev *dev);
- int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_dev *dev);
- int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_dev *dev);
- int (*cfhsi_wake_up) (struct cfhsi_dev *dev);
- int (*cfhsi_wake_down) (struct cfhsi_dev *dev);
- int (*cfhsi_get_peer_wake) (struct cfhsi_dev *dev, bool *status);
- int (*cfhsi_fifo_occupancy)(struct cfhsi_dev *dev, size_t *occupancy);
- int (*cfhsi_rx_cancel)(struct cfhsi_dev *dev);
- struct cfhsi_drv *drv;
+struct cfhsi_ops {
+ int (*cfhsi_up) (struct cfhsi_ops *dev);
+ int (*cfhsi_down) (struct cfhsi_ops *dev);
+ int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev);
+ int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev);
+ int (*cfhsi_wake_up) (struct cfhsi_ops *dev);
+ int (*cfhsi_wake_down) (struct cfhsi_ops *dev);
+ int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status);
+ int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy);
+ int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev);
+ struct cfhsi_cb_ops *cb_ops;
};
/* Structure holds status of received CAIF frames processing */
@@ -132,17 +132,26 @@ enum {
CFHSI_PRIO_LAST,
};
+struct cfhsi_config {
+ u32 inactivity_timeout;
+ u32 aggregation_timeout;
+ u32 head_align;
+ u32 tail_align;
+ u32 q_high_mark;
+ u32 q_low_mark;
+};
+
/* Structure implemented by CAIF HSI drivers. */
struct cfhsi {
struct caif_dev_common cfdev;
struct net_device *ndev;
struct platform_device *pdev;
struct sk_buff_head qhead[CFHSI_PRIO_LAST];
- struct cfhsi_drv drv;
- struct cfhsi_dev *dev;
+ struct cfhsi_cb_ops cb_ops;
+ struct cfhsi_ops *ops;
int tx_state;
struct cfhsi_rx_state rx_state;
- unsigned long inactivity_timeout;
+ struct cfhsi_config cfg;
int rx_len;
u8 *rx_ptr;
u8 *tx_buf;
@@ -150,8 +159,6 @@ struct cfhsi {
u8 *rx_flip_buf;
spinlock_t lock;
int flow_off_sent;
- u32 q_low_mark;
- u32 q_high_mark;
struct list_head list;
struct work_struct wake_up_work;
struct work_struct wake_down_work;
@@ -164,13 +171,31 @@ struct cfhsi {
struct timer_list rx_slowpath_timer;
/* TX aggregation */
- unsigned long aggregation_timeout;
int aggregation_len;
struct timer_list aggregation_timer;
unsigned long bits;
};
-
extern struct platform_driver cfhsi_driver;
+/**
+ * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters.
+ * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before
+ * taking the HSI wakeline down, in milliseconds.
+ * When using RT Netlink to create, destroy or configure a CAIF HSI interface,
+ * enum ifla_caif_hsi is used to specify the configuration attributes.
+ */
+enum ifla_caif_hsi {
+ __IFLA_CAIF_HSI_UNSPEC,
+ __IFLA_CAIF_HSI_INACTIVITY_TOUT,
+ __IFLA_CAIF_HSI_AGGREGATION_TOUT,
+ __IFLA_CAIF_HSI_HEAD_ALIGN,
+ __IFLA_CAIF_HSI_TAIL_ALIGN,
+ __IFLA_CAIF_HSI_QHIGH_WATERMARK,
+ __IFLA_CAIF_HSI_QLOW_WATERMARK,
+ __IFLA_CAIF_HSI_MAX
+};
+
+extern struct cfhsi_ops *cfhsi_get_ops(void);
+
#endif /* CAIF_HSI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0289d4ce7070..493fa0c79005 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -70,11 +70,13 @@
*
* @IEEE80211_BAND_2GHZ: 2.4GHz ISM band
* @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
+ * @IEEE80211_BAND_60GHZ: around 60 GHz band (58.32 - 64.80 GHz)
* @IEEE80211_NUM_BANDS: number of defined bands
*/
enum ieee80211_band {
IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
+ IEEE80211_BAND_60GHZ = NL80211_BAND_60GHZ,
/* keep last */
IEEE80211_NUM_BANDS
@@ -211,6 +213,22 @@ struct ieee80211_sta_ht_cap {
};
/**
+ * struct ieee80211_sta_vht_cap - STA's VHT capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ac VHT capabilities for an STA.
+ *
+ * @vht_supported: is VHT supported by the STA
+ * @cap: VHT capabilities map as described in 802.11ac spec
+ * @vht_mcs: Supported VHT MCS rates
+ */
+struct ieee80211_sta_vht_cap {
+ bool vht_supported;
+ u32 cap; /* use IEEE80211_VHT_CAP_ */
+ struct ieee80211_vht_mcs_info vht_mcs;
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
@@ -233,6 +251,7 @@ struct ieee80211_supported_band {
int n_channels;
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
};
/*
@@ -404,6 +423,8 @@ struct cfg80211_beacon_data {
*
* Used to configure an AP interface.
*
+ * @channel: the channel to start the AP on
+ * @channel_type: the channel type to use
* @beacon: beacon data
* @beacon_interval: beacon interval
* @dtim_period: DTIM period
@@ -417,6 +438,9 @@ struct cfg80211_beacon_data {
* @inactivity_timeout: time in seconds to determine station's inactivity.
*/
struct cfg80211_ap_settings {
+ struct ieee80211_channel *channel;
+ enum nl80211_channel_type channel_type;
+
struct cfg80211_beacon_data beacon;
int beacon_interval, dtim_period;
@@ -556,11 +580,13 @@ enum station_info_flags {
* @RATE_INFO_FLAGS_MCS: @tx_bitrate_mcs filled
* @RATE_INFO_FLAGS_40_MHZ_WIDTH: 40 Mhz width transmission
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
+ * @RATE_INFO_FLAGS_60G: 60gHz MCS
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = 1<<0,
RATE_INFO_FLAGS_40_MHZ_WIDTH = 1<<1,
RATE_INFO_FLAGS_SHORT_GI = 1<<2,
+ RATE_INFO_FLAGS_60G = 1<<3,
};
/**
@@ -622,10 +648,10 @@ struct sta_bss_parameters {
* @llid: mesh local link id
* @plid: mesh peer link id
* @plink_state: mesh peer link state
- * @signal: the signal strength, type depends on the wiphy's signal_type
- NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
- * @signal_avg: avg signal strength, type depends on the wiphy's signal_type
- NOTE: For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal: The signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
+ * @signal_avg: Average signal strength, type depends on the wiphy's signal_type.
+ * For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_.
* @txrate: current unicast bitrate from this station
* @rxrate: current unicast bitrate to this station
* @rx_packets: packets received from this station
@@ -785,47 +811,101 @@ struct bss_parameters {
int ht_opmode;
};
-/*
+/**
* struct mesh_config - 802.11s mesh configuration
*
* These parameters can be changed while the mesh is active.
+ *
+ * @dot11MeshRetryTimeout: the initial retry timeout in millisecond units used
+ * by the Mesh Peering Open message
+ * @dot11MeshConfirmTimeout: the initial retry timeout in millisecond units
+ * used by the Mesh Peering Open message
+ * @dot11MeshHoldingTimeout: the confirm timeout in millisecond units used by
+ * the mesh peering management to close a mesh peering
+ * @dot11MeshMaxPeerLinks: the maximum number of peer links allowed on this
+ * mesh interface
+ * @dot11MeshMaxRetries: the maximum number of peer link open retries that can
+ * be sent to establish a new peer link instance in a mesh
+ * @dot11MeshTTL: the value of TTL field set at a source mesh STA
+ * @element_ttl: the value of TTL field set at a mesh STA for path selection
+ * elements
+ * @auto_open_plinks: whether we should automatically open peer links when we
+ * detect compatible mesh peers
+ * @dot11MeshNbrOffsetMaxNeighbor: the maximum number of neighbors to
+ * synchronize to for 11s default synchronization method
+ * @dot11MeshHWMPmaxPREQretries: the number of action frames containing a PREQ
+ * that an originator mesh STA can send to a particular path target
+ * @path_refresh_time: how frequently to refresh mesh paths in milliseconds
+ * @min_discovery_timeout: the minimum length of time to wait until giving up on
+ * a path discovery in milliseconds
+ * @dot11MeshHWMPactivePathTimeout: the time (in TUs) for which mesh STAs
+ * receiving a PREQ shall consider the forwarding information from the
+ * root to be valid. (TU = time unit)
+ * @dot11MeshHWMPpreqMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one action frame containing a PREQ
+ * element
+ * @dot11MeshHWMPperrMinInterval: the minimum interval of time (in TUs) during
+ * which a mesh STA can send only one Action frame containing a PERR
+ * element
+ * @dot11MeshHWMPnetDiameterTraversalTime: the interval of time (in TUs) that
+ * it takes for an HWMP information element to propagate across the mesh
+ * @dot11MeshHWMPRootMode: the configuration of a mesh STA as root mesh STA
+ * @dot11MeshHWMPRannInterval: the interval of time (in TUs) between root
+ * announcements are transmitted
+ * @dot11MeshGateAnnouncementProtocol: whether to advertise that this mesh
+ * station has access to a broader network beyond the MBSS. (This is
+ * missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol set to true
+ * only means that the station will announce others it's a mesh gate, but
+ * not necessarily using the gate announcement protocol. Still keeping the
+ * same nomenclature to be in sync with the spec)
+ * @dot11MeshForwarding: whether the Mesh STA is forwarding or non-forwarding
+ * entity (default is TRUE - forwarding entity)
+ * @rssi_threshold: the threshold for average signal strength of candidate
+ * station to establish a peer link
+ * @ht_opmode: mesh HT protection mode
+ *
+ * @dot11MeshHWMPactivePathToRootTimeout: The time (in TUs) for which mesh STAs
+ * receiving a proactive PREQ shall consider the forwarding information to
+ * the root mesh STA to be valid.
+ *
+ * @dot11MeshHWMProotInterval: The interval of time (in TUs) between proactive
+ * PREQs are transmitted.
+ * @dot11MeshHWMPconfirmationInterval: The minimum interval of time (in TUs)
+ * during which a mesh STA can send only one Action frame containing
+ * a PREQ element for root path confirmation.
*/
struct mesh_config {
- /* Timeouts in ms */
- /* Mesh plink management parameters */
u16 dot11MeshRetryTimeout;
u16 dot11MeshConfirmTimeout;
u16 dot11MeshHoldingTimeout;
u16 dot11MeshMaxPeerLinks;
- u8 dot11MeshMaxRetries;
- u8 dot11MeshTTL;
- /* ttl used in path selection information elements */
- u8 element_ttl;
+ u8 dot11MeshMaxRetries;
+ u8 dot11MeshTTL;
+ u8 element_ttl;
bool auto_open_plinks;
- /* neighbor offset synchronization */
u32 dot11MeshNbrOffsetMaxNeighbor;
- /* HWMP parameters */
- u8 dot11MeshHWMPmaxPREQretries;
+ u8 dot11MeshHWMPmaxPREQretries;
u32 path_refresh_time;
u16 min_discovery_timeout;
u32 dot11MeshHWMPactivePathTimeout;
u16 dot11MeshHWMPpreqMinInterval;
u16 dot11MeshHWMPperrMinInterval;
u16 dot11MeshHWMPnetDiameterTraversalTime;
- u8 dot11MeshHWMPRootMode;
+ u8 dot11MeshHWMPRootMode;
u16 dot11MeshHWMPRannInterval;
- /* This is missnamed in draft 12.0: dot11MeshGateAnnouncementProtocol
- * set to true only means that the station will announce others it's a
- * mesh gate, but not necessarily using the gate announcement protocol.
- * Still keeping the same nomenclature to be in sync with the spec. */
- bool dot11MeshGateAnnouncementProtocol;
+ bool dot11MeshGateAnnouncementProtocol;
bool dot11MeshForwarding;
s32 rssi_threshold;
u16 ht_opmode;
+ u32 dot11MeshHWMPactivePathToRootTimeout;
+ u16 dot11MeshHWMProotInterval;
+ u16 dot11MeshHWMPconfirmationInterval;
};
/**
* struct mesh_setup - 802.11s mesh setup configuration
+ * @channel: the channel to start the mesh network on
+ * @channel_type: the channel type to use
* @mesh_id: the mesh ID
* @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
* @sync_method: which synchronization method to use
@@ -840,6 +920,8 @@ struct mesh_config {
* These parameters are fixed when the mesh is created.
*/
struct mesh_setup {
+ struct ieee80211_channel *channel;
+ enum nl80211_channel_type channel_type;
const u8 *mesh_id;
u8 mesh_id_len;
u8 sync_method;
@@ -917,7 +999,7 @@ struct cfg80211_ssid {
* @ie_len: length of ie in octets
* @rates: bitmap of rates to advertise for each band
* @wiphy: the wiphy this was for
- * @dev: the interface
+ * @wdev: the wireless device to scan for
* @aborted: (internal) scan request was notified as aborted
* @no_cck: used to send probe requests at non CCK rate in 2GHz band
*/
@@ -930,9 +1012,10 @@ struct cfg80211_scan_request {
u32 rates[IEEE80211_NUM_BANDS];
+ struct wireless_dev *wdev;
+
/* internal */
struct wiphy *wiphy;
- struct net_device *dev;
bool aborted;
bool no_cck;
@@ -966,6 +1049,7 @@ struct cfg80211_match_set {
* @wiphy: the wiphy this was for
* @dev: the interface
* @channels: channels to scan
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -976,6 +1060,7 @@ struct cfg80211_sched_scan_request {
size_t ie_len;
struct cfg80211_match_set *match_sets;
int n_match_sets;
+ s32 rssi_thold;
/* internal */
struct wiphy *wiphy;
@@ -1351,10 +1436,10 @@ struct cfg80211_gtk_rekey_data {
*
* @add_virtual_intf: create a new virtual interface with the given name,
* must set the struct wireless_dev's iftype. Beware: You must create
- * the new netdev in the wiphy's network namespace! Returns the netdev,
- * or an ERR_PTR.
+ * the new netdev in the wiphy's network namespace! Returns the struct
+ * wireless_dev, or an ERR_PTR.
*
- * @del_virtual_intf: remove the virtual interface determined by ifindex.
+ * @del_virtual_intf: remove the virtual interface
*
* @change_virtual_intf: change type/configuration of virtual interface,
* keep the struct wireless_dev's iftype updated.
@@ -1411,14 +1496,14 @@ struct cfg80211_gtk_rekey_data {
*
* @set_txq_params: Set TX queue parameters
*
- * @set_channel: Set channel for a given wireless interface. Some devices
- * may support multi-channel operation (by channel hopping) so cfg80211
- * doesn't verify much. Note, however, that the passed netdev may be
- * %NULL as well if the user requested changing the channel for the
- * device itself, or for a monitor interface.
- * @get_channel: Get the current operating channel, should return %NULL if
- * there's no single defined operating channel if for example the
- * device implements channel hopping for multi-channel virtual interfaces.
+ * @libertas_set_mesh_channel: Only for backward compatibility for libertas,
+ * as it doesn't implement join_mesh and needs to set the channel to
+ * join the mesh instead.
+ *
+ * @set_monitor_channel: Set the monitor mode channel for the device. If other
+ * interfaces are active this callback should reject the configuration.
+ * If no interfaces are active or the device is down, the channel should
+ * be stored for when a monitor interface becomes active.
*
* @scan: Request to do a scan. If returning zero, the scan request is given
* the driver, and will be valid until passed to cfg80211_scan_done().
@@ -1488,6 +1573,8 @@ struct cfg80211_gtk_rekey_data {
* @set_power_mgmt: Configure WLAN power management. A timeout value of -1
* allows the driver to adjust the dynamic ps timeout value.
* @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
+ * @set_cqm_txe_config: Configure connection quality monitor TX error
+ * thresholds.
* @sched_scan_start: Tell the driver to start a scheduled scan.
* @sched_scan_stop: Tell the driver to stop an ongoing scheduled
* scan. The driver_initiated flag specifies whether the driver
@@ -1525,18 +1612,23 @@ struct cfg80211_gtk_rekey_data {
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
* See @ethtool_ops.get_strings
+ *
+ * @get_channel: Get the current operating channel for the virtual interface.
+ * For monitor interfaces, it should return %NULL unless there's a single
+ * current monitoring channel.
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
int (*resume)(struct wiphy *wiphy);
void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
- struct net_device * (*add_virtual_intf)(struct wiphy *wiphy,
- char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params);
- int (*del_virtual_intf)(struct wiphy *wiphy, struct net_device *dev);
+ struct wireless_dev * (*add_virtual_intf)(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params);
+ int (*del_virtual_intf)(struct wiphy *wiphy,
+ struct wireless_dev *wdev);
int (*change_virtual_intf)(struct wiphy *wiphy,
struct net_device *dev,
enum nl80211_iftype type, u32 *flags,
@@ -1605,11 +1697,15 @@ struct cfg80211_ops {
int (*set_txq_params)(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_txq_params *params);
- int (*set_channel)(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type);
+ int (*libertas_set_mesh_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan);
+
+ int (*set_monitor_channel)(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type);
- int (*scan)(struct wiphy *wiphy, struct net_device *dev,
+ int (*scan)(struct wiphy *wiphy,
struct cfg80211_scan_request *request);
int (*auth)(struct wiphy *wiphy, struct net_device *dev,
@@ -1663,23 +1759,23 @@ struct cfg80211_ops {
int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
int (*remain_on_channel)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie);
int (*cancel_remain_on_channel)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie);
- int (*mgmt_tx)(struct wiphy *wiphy, struct net_device *dev,
+ int (*mgmt_tx)(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie);
int (*mgmt_tx_cancel_wait)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie);
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
@@ -1689,8 +1785,12 @@ struct cfg80211_ops {
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst);
+ int (*set_cqm_txe_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u32 rate, u32 pkts, u32 intvl);
+
void (*mgmt_frame_register)(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg);
int (*set_antenna)(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant);
@@ -1721,15 +1821,17 @@ struct cfg80211_ops {
struct net_device *dev,
u16 noack_map);
- struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy,
- enum nl80211_channel_type *type);
-
int (*get_et_sset_count)(struct wiphy *wiphy,
struct net_device *dev, int sset);
void (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev,
struct ethtool_stats *stats, u64 *data);
void (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev,
u32 sset, u8 *data);
+
+ struct ieee80211_channel *
+ (*get_channel)(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ enum nl80211_channel_type *type);
};
/*
@@ -2083,7 +2185,9 @@ struct wiphy {
char fw_version[ETHTOOL_BUSINFO_LEN];
u32 hw_version;
+#ifdef CONFIG_PM
struct wiphy_wowlan_support wowlan;
+#endif
u16 max_remain_on_channel_duration;
@@ -2250,20 +2354,31 @@ struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
/**
- * struct wireless_dev - wireless per-netdev state
+ * struct wireless_dev - wireless device state
+ *
+ * For netdevs, this structure must be allocated by the driver
+ * that uses the ieee80211_ptr field in struct net_device (this
+ * is intentional so it can be allocated along with the netdev.)
+ * It need not be registered then as netdev registration will
+ * be intercepted by cfg80211 to see the new wireless device.
*
- * This structure must be allocated by the driver/stack
- * that uses the ieee80211_ptr field in struct net_device
- * (this is intentional so it can be allocated along with
- * the netdev.)
+ * For non-netdev uses, it must also be allocated by the driver
+ * in response to the cfg80211 callbacks that require it, as
+ * there's no netdev registration in that case it may not be
+ * allocated outside of callback operations that return it.
*
* @wiphy: pointer to hardware description
* @iftype: interface type
* @list: (private) Used to collect the interfaces
- * @netdev: (private) Used to reference back to the netdev
+ * @netdev: (private) Used to reference back to the netdev, may be %NULL
+ * @identifier: (private) Identifier used in nl80211 to identify this
+ * wireless device if it has no netdev
* @current_bss: (private) Used by the internal configuration code
* @channel: (private) Used by the internal configuration code to track
- * user-set AP, monitor and WDS channels for wireless extensions
+ * the user-set AP, monitor and WDS channel
+ * @preset_chan: (private) Used by the internal configuration code to
+ * track the channel to be used for AP later
+ * @preset_chantype: (private) the corresponding channel type
* @bssid: (private) Used by the internal configuration code
* @ssid: (private) Used by the internal configuration code
* @ssid_len: (private) Used by the internal configuration code
@@ -2289,6 +2404,8 @@ struct wireless_dev {
struct list_head list;
struct net_device *netdev;
+ u32 identifier;
+
struct list_head mgmt_registrations;
spinlock_t mgmt_registrations_lock;
@@ -2313,8 +2430,14 @@ struct wireless_dev {
spinlock_t event_lock;
struct cfg80211_internal_bss *current_bss; /* associated / joined */
+ struct ieee80211_channel *preset_chan;
+ enum nl80211_channel_type preset_chantype;
+
+ /* for AP and mesh channel tracking */
struct ieee80211_channel *channel;
+ bool ibss_fixed;
+
bool ps;
int ps_timeout;
@@ -3169,7 +3292,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
/**
* cfg80211_ready_on_channel - notification of remain_on_channel start
- * @dev: network device
+ * @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
* @channel_type: Channel type
@@ -3177,21 +3300,20 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
* channel
* @gfp: allocation flags
*/
-void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp);
/**
* cfg80211_remain_on_channel_expired - remain_on_channel duration expired
- * @dev: network device
+ * @wdev: wireless device
* @cookie: the request cookie
* @chan: The current channel (from remain_on_channel request)
* @channel_type: Channel type
* @gfp: allocation flags
*/
-void cfg80211_remain_on_channel_expired(struct net_device *dev,
- u64 cookie,
+void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
gfp_t gfp);
@@ -3219,7 +3341,7 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
/**
* cfg80211_rx_mgmt - notification of received, unprocessed management frame
- * @dev: network device
+ * @wdev: wireless device receiving the frame
* @freq: Frequency on which the frame was received in MHz
* @sig_dbm: signal strength in mBm, or 0 if unknown
* @buf: Management frame (header + body)
@@ -3234,12 +3356,12 @@ void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp);
* This function is called whenever an Action frame is received for a station
* mode interface, but is not processed in kernel.
*/
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_dbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
- * @dev: network device
+ * @wdev: wireless device receiving the frame
* @cookie: Cookie returned by cfg80211_ops::mgmt_tx()
* @buf: Management frame (header + body)
* @len: length of the frame data
@@ -3250,7 +3372,7 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_dbm,
* transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the
* transmission attempt.
*/
-void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
+void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack, gfp_t gfp);
@@ -3280,6 +3402,21 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
const u8 *peer, u32 num_packets, gfp_t gfp);
/**
+ * cfg80211_cqm_txe_notify - TX error rate event
+ * @dev: network device
+ * @peer: peer's MAC address
+ * @num_packets: how many packets were lost
+ * @rate: % of packets which failed transmission
+ * @intvl: interval (in s) over which the TX failure threshold was breached.
+ * @gfp: context flags
+ *
+ * Notify userspace when configured % TX failures over number of packets in a
+ * given interval is exceeded.
+ */
+void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
+
+/**
* cfg80211_gtk_rekey_notify - notify userspace about driver rekeying
* @dev: network device
* @bssid: BSSID of AP (to avoid races)
@@ -3359,11 +3496,14 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
const u8 *frame, size_t len,
int freq, int sig_dbm, gfp_t gfp);
-/*
+/**
* cfg80211_can_beacon_sec_chan - test if ht40 on extension channel can be used
* @wiphy: the wiphy
* @chan: main channel
* @channel_type: HT mode
+ *
+ * This function returns true if there is no secondary channel or the secondary
+ * channel can be used for beaconing (i.e. is not a radar channel etc.)
*/
bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
struct ieee80211_channel *chan,
@@ -3386,7 +3526,7 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
*
* return 0 if MCS index >= 32
*/
-u16 cfg80211_calculate_bitrate(struct rate_info *rate);
+u32 cfg80211_calculate_bitrate(struct rate_info *rate);
/* Logging, debugging and troubleshooting/diagnostic helpers. */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index c507e05d172f..4f7d6a182381 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -67,6 +67,8 @@ extern void dn_rt_cache_flush(int delay);
struct dn_route {
struct dst_entry dst;
+ struct neighbour *n;
+
struct flowidn fld;
__le16 rt_saddr;
diff --git a/include/net/dst.h b/include/net/dst.h
index 8197eadca819..baf597890064 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -42,16 +42,16 @@ struct dst_entry {
struct dst_entry *from;
};
struct dst_entry *path;
- struct neighbour __rcu *_neighbour;
+ void *__pad0;
#ifdef CONFIG_XFRM
struct xfrm_state *xfrm;
#else
void *__pad1;
#endif
- int (*input)(struct sk_buff*);
- int (*output)(struct sk_buff*);
+ int (*input)(struct sk_buff *);
+ int (*output)(struct sk_buff *);
- int flags;
+ unsigned short flags;
#define DST_HOST 0x0001
#define DST_NOXFRM 0x0002
#define DST_NOPOLICY 0x0004
@@ -62,8 +62,23 @@ struct dst_entry {
#define DST_FAKE_RTABLE 0x0080
#define DST_XFRM_TUNNEL 0x0100
+ unsigned short pending_confirm;
+
short error;
+
+ /* A non-zero value of dst->obsolete forces by-hand validation
+ * of the route entry. Positive values are set by the generic
+ * dst layer to indicate that the entry has been forcefully
+ * destroyed.
+ *
+ * Negative values are used by the implementation layer code to
+ * force invocation of the dst_ops->check() method.
+ */
short obsolete;
+#define DST_OBSOLETE_NONE 0
+#define DST_OBSOLETE_DEAD 2
+#define DST_OBSOLETE_FORCE_CHK -1
+#define DST_OBSOLETE_KILL -2
unsigned short header_len; /* more space at head required */
unsigned short trailer_len; /* space to reserve at tail */
#ifdef CONFIG_IP_ROUTE_CLASSID
@@ -94,21 +109,6 @@ struct dst_entry {
};
};
-static inline struct neighbour *dst_get_neighbour_noref(struct dst_entry *dst)
-{
- return rcu_dereference(dst->_neighbour);
-}
-
-static inline struct neighbour *dst_get_neighbour_noref_raw(struct dst_entry *dst)
-{
- return rcu_dereference_raw(dst->_neighbour);
-}
-
-static inline void dst_set_neighbour(struct dst_entry *dst, struct neighbour *neigh)
-{
- rcu_assign_pointer(dst->_neighbour, neigh);
-}
-
extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
extern const u32 dst_default_metrics[RTAX_MAX];
@@ -222,12 +222,6 @@ static inline unsigned long dst_metric_rtt(const struct dst_entry *dst, int metr
return msecs_to_jiffies(dst_metric(dst, metric));
}
-static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
- unsigned long rtt)
-{
- dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
-}
-
static inline u32
dst_allfrag(const struct dst_entry *dst)
{
@@ -241,7 +235,7 @@ dst_metric_locked(const struct dst_entry *dst, int metric)
return dst_metric(dst, RTAX_LOCK) & (1<<metric);
}
-static inline void dst_hold(struct dst_entry * dst)
+static inline void dst_hold(struct dst_entry *dst)
{
/*
* If your kernel compilation stops here, please check
@@ -264,8 +258,7 @@ static inline void dst_use_noref(struct dst_entry *dst, unsigned long time)
dst->lastuse = time;
}
-static inline
-struct dst_entry * dst_clone(struct dst_entry * dst)
+static inline struct dst_entry *dst_clone(struct dst_entry *dst)
{
if (dst)
atomic_inc(&dst->__refcnt);
@@ -371,14 +364,15 @@ static inline struct dst_entry *skb_dst_pop(struct sk_buff *skb)
}
extern int dst_discard(struct sk_buff *skb);
-extern void *dst_alloc(struct dst_ops * ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, int flags);
-extern void __dst_free(struct dst_entry * dst);
-extern struct dst_entry *dst_destroy(struct dst_entry * dst);
+extern void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
+ int initial_ref, int initial_obsolete,
+ unsigned short flags);
+extern void __dst_free(struct dst_entry *dst);
+extern struct dst_entry *dst_destroy(struct dst_entry *dst);
-static inline void dst_free(struct dst_entry * dst)
+static inline void dst_free(struct dst_entry *dst)
{
- if (dst->obsolete > 1)
+ if (dst->obsolete > 0)
return;
if (!atomic_read(&dst->__refcnt)) {
dst = dst_destroy(dst);
@@ -396,19 +390,35 @@ static inline void dst_rcu_free(struct rcu_head *head)
static inline void dst_confirm(struct dst_entry *dst)
{
- if (dst) {
- struct neighbour *n;
+ dst->pending_confirm = 1;
+}
- rcu_read_lock();
- n = dst_get_neighbour_noref(dst);
- neigh_confirm(n);
- rcu_read_unlock();
+static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
+ struct sk_buff *skb)
+{
+ struct hh_cache *hh;
+
+ if (unlikely(dst->pending_confirm)) {
+ n->confirmed = jiffies;
+ dst->pending_confirm = 0;
}
+
+ hh = &n->hh;
+ if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ return neigh_hh_output(hh, skb);
+ else
+ return n->output(n, skb);
}
static inline struct neighbour *dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
{
- return dst->ops->neigh_lookup(dst, daddr);
+ return dst->ops->neigh_lookup(dst, NULL, daddr);
+}
+
+static inline struct neighbour *dst_neigh_lookup_skb(const struct dst_entry *dst,
+ struct sk_buff *skb)
+{
+ return dst->ops->neigh_lookup(dst, skb, NULL);
}
static inline void dst_link_failure(struct sk_buff *skb)
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 3682a0a076c1..2f26dfb8450e 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -8,6 +8,7 @@ struct dst_entry;
struct kmem_cachep;
struct net_device;
struct sk_buff;
+struct sock;
struct dst_ops {
unsigned short family;
@@ -24,9 +25,14 @@ struct dst_ops {
struct net_device *dev, int how);
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
- void (*update_pmtu)(struct dst_entry *dst, u32 mtu);
+ void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+ void (*redirect)(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
int (*local_out)(struct sk_buff *skb);
- struct neighbour * (*neigh_lookup)(const struct dst_entry *dst, const void *daddr);
+ struct neighbour * (*neigh_lookup)(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
struct kmem_cache *kmem_cachep;
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 075f1e3a0fed..e361f4882426 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -52,6 +52,7 @@ struct fib_rules_ops {
struct sk_buff *,
struct fib_rule_hdr *,
struct nlattr **);
+ void (*delete)(struct fib_rule *);
int (*compare)(struct fib_rule *,
struct fib_rule_hdr *,
struct nlattr **);
diff --git a/include/net/flow.h b/include/net/flow.h
index 6c469dbdb917..e1dd5082ec7e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -20,8 +20,7 @@ struct flowi_common {
__u8 flowic_proto;
__u8 flowic_flags;
#define FLOWI_FLAG_ANYSRC 0x01
-#define FLOWI_FLAG_PRECOW_METRICS 0x02
-#define FLOWI_FLAG_CAN_SLEEP 0x04
+#define FLOWI_FLAG_CAN_SLEEP 0x02
__u32 flowic_secid;
};
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index ccb68880abf5..48905cd3884c 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -5,6 +5,8 @@
#include <net/netlink.h>
#include <net/net_namespace.h>
+#define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN)
+
/**
* struct genl_multicast_group - generic netlink multicast group
* @name: name of the multicast group, names are per-family
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 1866a676c810..04642c920431 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -26,6 +26,7 @@ extern int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb, bool relax);
extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
+ struct flowi6 *fl6,
const struct request_sock *req);
extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
@@ -42,4 +43,6 @@ extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
extern int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl);
+
+extern struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 22fac9892b16..234008782c8c 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -14,9 +14,11 @@ struct sockaddr;
struct socket;
extern int inet_release(struct socket *sock);
-extern int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
+extern int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
-extern int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
+extern int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags);
+extern int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
extern int inet_accept(struct socket *sock, struct socket *newsock, int flags);
extern int inet_sendmsg(struct kiocb *iocb, struct socket *sock,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7d83f90f203f..5ee66f517b4f 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,6 @@ struct inet_connection_sock_af_ops {
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
- struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
u16 net_header_len;
u16 net_frag_header_len;
u16 sockaddr_len;
@@ -337,4 +336,6 @@ extern int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
extern int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen);
+
+extern struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu);
#endif /* _INET_CONNECTION_SOCK_H */
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 808fc5f76b03..54be0287eb98 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -379,10 +379,10 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo,
const __be16 sport,
const __be16 dport)
{
- struct sock *sk;
+ struct sock *sk = skb_steal_sock(skb);
const struct iphdr *iph = ip_hdr(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
+ if (sk)
return sk;
else
return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index ae17e1352d7e..613cfa401672 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -172,6 +172,7 @@ struct inet_sock {
int uc_index;
int mc_index;
__be32 mc_addr;
+ int rx_dst_ifindex;
struct ip_mc_socklist __rcu *mc_list;
struct inet_cork_full cork;
};
@@ -245,8 +246,6 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
if (inet_sk(sk)->transparent || inet_sk(sk)->hdrincl)
flags |= FLOWI_FLAG_ANYSRC;
- if (sk->sk_protocol == IPPROTO_TCP)
- flags |= FLOWI_FLAG_PRECOW_METRICS;
return flags;
}
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 2040bff945d4..53f464d7cddc 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -36,25 +36,19 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
unsigned long rate_last;
- unsigned long pmtu_expires;
- u32 pmtu_orig;
- u32 pmtu_learned;
- struct inetpeer_addr_base redirect_learned;
union {
struct list_head gc_list;
struct rcu_head gc_rcu;
};
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
- * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+ * are not available: rid, ip_id_count
* We can share memory with rcu_head to help keep inet_peer small.
*/
union {
struct {
atomic_t rid; /* Frag reception counter */
atomic_t ip_id_count; /* IP ID for the next packet */
- __u32 tcp_ts;
- __u32 tcp_ts_stamp;
};
struct rcu_head rcu;
struct inet_peer *gc_next;
@@ -65,6 +59,69 @@ struct inet_peer {
atomic_t refcnt;
};
+struct inet_peer_base {
+ struct inet_peer __rcu *root;
+ seqlock_t lock;
+ u32 flush_seq;
+ int total;
+};
+
+#define INETPEER_BASE_BIT 0x1UL
+
+static inline struct inet_peer *inetpeer_ptr(unsigned long val)
+{
+ BUG_ON(val & INETPEER_BASE_BIT);
+ return (struct inet_peer *) val;
+}
+
+static inline struct inet_peer_base *inetpeer_base_ptr(unsigned long val)
+{
+ if (!(val & INETPEER_BASE_BIT))
+ return NULL;
+ val &= ~INETPEER_BASE_BIT;
+ return (struct inet_peer_base *) val;
+}
+
+static inline bool inetpeer_ptr_is_peer(unsigned long val)
+{
+ return !(val & INETPEER_BASE_BIT);
+}
+
+static inline void __inetpeer_ptr_set_peer(unsigned long *val, struct inet_peer *peer)
+{
+ /* This implicitly clears INETPEER_BASE_BIT */
+ *val = (unsigned long) peer;
+}
+
+static inline bool inetpeer_ptr_set_peer(unsigned long *ptr, struct inet_peer *peer)
+{
+ unsigned long val = (unsigned long) peer;
+ unsigned long orig = *ptr;
+
+ if (!(orig & INETPEER_BASE_BIT) ||
+ cmpxchg(ptr, orig, val) != orig)
+ return false;
+ return true;
+}
+
+static inline void inetpeer_init_ptr(unsigned long *ptr, struct inet_peer_base *base)
+{
+ *ptr = (unsigned long) base | INETPEER_BASE_BIT;
+}
+
+static inline void inetpeer_transfer_peer(unsigned long *to, unsigned long *from)
+{
+ unsigned long val = *from;
+
+ *to = val;
+ if (inetpeer_ptr_is_peer(val)) {
+ struct inet_peer *peer = inetpeer_ptr(val);
+ atomic_inc(&peer->refcnt);
+ }
+}
+
+extern void inet_peer_base_init(struct inet_peer_base *);
+
void inet_initpeers(void) __init;
#define INETPEER_METRICS_NEW (~(u32) 0)
@@ -75,31 +132,38 @@ static inline bool inet_metrics_new(const struct inet_peer *p)
}
/* can be called with or without local BH being disabled */
-struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create);
+struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ const struct inetpeer_addr *daddr,
+ int create);
-static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
+static inline struct inet_peer *inet_getpeer_v4(struct inet_peer_base *base,
+ __be32 v4daddr,
+ int create)
{
struct inetpeer_addr daddr;
daddr.addr.a4 = v4daddr;
daddr.family = AF_INET;
- return inet_getpeer(&daddr, create);
+ return inet_getpeer(base, &daddr, create);
}
-static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr, int create)
+static inline struct inet_peer *inet_getpeer_v6(struct inet_peer_base *base,
+ const struct in6_addr *v6daddr,
+ int create)
{
struct inetpeer_addr daddr;
*(struct in6_addr *)daddr.addr.a6 = *v6daddr;
daddr.family = AF_INET6;
- return inet_getpeer(&daddr, create);
+ return inet_getpeer(base, &daddr, create);
}
/* can be called from BH context or outside */
extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
-extern void inetpeer_invalidate_tree(int family);
+extern void inetpeer_invalidate_tree(struct inet_peer_base *);
+extern void inetpeer_invalidate_family(int family);
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
diff --git a/include/net/ip.h b/include/net/ip.h
index 83e0619f59d0..bd5e444a19ce 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -158,8 +158,9 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}
-void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- const struct ip_reply_arg *arg, unsigned int len);
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len);
struct ipv4_config {
int log_martians;
@@ -210,6 +211,9 @@ extern int inet_peer_threshold;
extern int inet_peer_minttl;
extern int inet_peer_maxttl;
+/* From ip_input.c */
+extern int sysctl_ip_early_demux;
+
/* From ip_output.c */
extern int sysctl_ip_dynaddr;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 0ae759a6c76e..0fedbd8d747a 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -86,6 +86,8 @@ struct fib6_table;
struct rt6_info {
struct dst_entry dst;
+ struct neighbour *n;
+
/*
* Tail elements of dst_entry (__refcnt etc.)
* and these elements (rarely used in hot path) are in
@@ -107,7 +109,7 @@ struct rt6_info {
u32 rt6i_peer_genid;
struct inet6_dev *rt6i_idev;
- struct inet_peer *rt6i_peer;
+ unsigned long _rt6i_peer;
#ifdef CONFIG_XFRM
u32 rt6i_flow_cache_genid;
@@ -118,6 +120,36 @@ struct rt6_info {
u8 rt6i_protocol;
};
+static inline struct inet_peer *rt6_peer_ptr(struct rt6_info *rt)
+{
+ return inetpeer_ptr(rt->_rt6i_peer);
+}
+
+static inline bool rt6_has_peer(struct rt6_info *rt)
+{
+ return inetpeer_ptr_is_peer(rt->_rt6i_peer);
+}
+
+static inline void __rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
+{
+ __inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
+}
+
+static inline bool rt6_set_peer(struct rt6_info *rt, struct inet_peer *peer)
+{
+ return inetpeer_ptr_set_peer(&rt->_rt6i_peer, peer);
+}
+
+static inline void rt6_init_peer(struct rt6_info *rt, struct inet_peer_base *base)
+{
+ inetpeer_init_ptr(&rt->_rt6i_peer, base);
+}
+
+static inline void rt6_transfer_peer(struct rt6_info *rt, struct rt6_info *ort)
+{
+ inetpeer_transfer_peer(&rt->_rt6i_peer, &ort->_rt6i_peer);
+}
+
static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
{
return ((struct rt6_info *)dst)->rt6i_idev;
@@ -207,6 +239,7 @@ struct fib6_table {
u32 tb6_id;
rwlock_t tb6_lock;
struct fib6_node tb6_root;
+ struct inet_peer_base tb6_peers;
};
#define RT6_TABLE_UNSPEC RT_TABLE_UNSPEC
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 37c1a1ed82c1..5fa2af00634a 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -53,16 +53,25 @@ static inline unsigned int rt6_flags2srcprefs(int flags)
return (flags >> 3) & 7;
}
-extern void rt6_bind_peer(struct rt6_info *rt,
- int create);
+extern void rt6_bind_peer(struct rt6_info *rt, int create);
+
+static inline struct inet_peer *__rt6_get_peer(struct rt6_info *rt, int create)
+{
+ if (rt6_has_peer(rt))
+ return rt6_peer_ptr(rt);
+
+ rt6_bind_peer(rt, create);
+ return (rt6_has_peer(rt) ? rt6_peer_ptr(rt) : NULL);
+}
static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
{
- if (rt->rt6i_peer)
- return rt->rt6i_peer;
+ return __rt6_get_peer(rt, 0);
+}
- rt6_bind_peer(rt, 0);
- return rt->rt6i_peer;
+static inline struct inet_peer *rt6_get_peer_create(struct rt6_info *rt)
+{
+ return __rt6_get_peer(rt, 1);
}
extern void ip6_route_input(struct sk_buff *skb);
@@ -124,17 +133,12 @@ extern int rt6_route_rcv(struct net_device *dev,
u8 *opt, int len,
const struct in6_addr *gwaddr);
-extern void rt6_redirect(const struct in6_addr *dest,
- const struct in6_addr *src,
- const struct in6_addr *saddr,
- struct neighbour *neigh,
- u8 *lladdr,
- int on_link);
-
-extern void rt6_pmtu_discovery(const struct in6_addr *daddr,
- const struct in6_addr *saddr,
- struct net_device *dev,
- u32 pmtu);
+extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
+ int oif, u32 mark);
+extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
+ __be32 mtu);
+extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
@@ -154,7 +158,8 @@ extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
* Store a destination cache entry in a socket
*/
static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct rt6_info *rt = (struct rt6_info *) dst;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index fc73e667b50e..358fb86f57eb 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -9,6 +9,8 @@
#define IP6_TNL_F_CAP_XMIT 0x10000
/* capable of receiving packets */
#define IP6_TNL_F_CAP_RCV 0x20000
+/* determine capability on a per-packet basis */
+#define IP6_TNL_F_CAP_PER_PACKET 0x40000
/* IPv6 tunnel */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 78df0866cc38..e69c3a47153d 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -18,7 +18,9 @@
#include <net/flow.h>
#include <linux/seq_file.h>
+#include <linux/rcupdate.h>
#include <net/fib_rules.h>
+#include <net/inetpeer.h>
struct fib_config {
u8 fc_dst_len;
@@ -44,6 +46,23 @@ struct fib_config {
};
struct fib_info;
+struct rtable;
+
+struct fib_nh_exception {
+ struct fib_nh_exception __rcu *fnhe_next;
+ __be32 fnhe_daddr;
+ u32 fnhe_pmtu;
+ __be32 fnhe_gw;
+ unsigned long fnhe_expires;
+ unsigned long fnhe_stamp;
+};
+
+struct fnhe_hash_bucket {
+ struct fib_nh_exception __rcu *chain;
+};
+
+#define FNHE_HASH_SIZE 2048
+#define FNHE_RECLAIM_DEPTH 5
struct fib_nh {
struct net_device *nh_dev;
@@ -62,6 +81,9 @@ struct fib_nh {
__be32 nh_gw;
__be32 nh_saddr;
int nh_saddr_genid;
+ struct rtable *nh_rth_output;
+ struct rtable *nh_rth_input;
+ struct fnhe_hash_bucket *nh_exceptions;
};
/*
@@ -105,12 +127,10 @@ struct fib_result {
unsigned char nh_sel;
unsigned char type;
unsigned char scope;
+ u32 tclassid;
struct fib_info *fi;
struct fib_table *table;
struct list_head *fa_head;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- struct fib_rule *r;
-#endif
};
struct fib_result_nl {
@@ -157,11 +177,11 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
FIB_RES_SADDR(net, res))
struct fib_table {
- struct hlist_node tb_hlist;
- u32 tb_id;
- int tb_default;
- int tb_num_default;
- unsigned long tb_data[0];
+ struct hlist_node tb_hlist;
+ u32 tb_id;
+ int tb_default;
+ int tb_num_default;
+ unsigned long tb_data[0];
};
extern int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
@@ -214,24 +234,55 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
extern int __net_init fib4_rules_init(struct net *net);
extern void __net_exit fib4_rules_exit(struct net *net);
-#ifdef CONFIG_IP_ROUTE_CLASSID
-extern u32 fib_rules_tclass(const struct fib_result *res);
-#endif
-
-extern int fib_lookup(struct net *n, struct flowi4 *flp, struct fib_result *res);
-
extern struct fib_table *fib_new_table(struct net *net, u32 id);
extern struct fib_table *fib_get_table(struct net *net, u32 id);
+extern int __fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res);
+
+static inline int fib_lookup(struct net *net, struct flowi4 *flp,
+ struct fib_result *res)
+{
+ if (!net->ipv4.fib_has_custom_rules) {
+ res->tclassid = 0;
+ if (net->ipv4.fib_local &&
+ !fib_table_lookup(net->ipv4.fib_local, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ if (net->ipv4.fib_main &&
+ !fib_table_lookup(net->ipv4.fib_main, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ if (net->ipv4.fib_default &&
+ !fib_table_lookup(net->ipv4.fib_default, flp, res,
+ FIB_LOOKUP_NOREF))
+ return 0;
+ return -ENETUNREACH;
+ }
+ return __fib_lookup(net, flp, res);
+}
+
#endif /* CONFIG_IP_MULTIPLE_TABLES */
/* Exported by fib_frontend.c */
extern const struct nla_policy rtm_ipv4_policy[];
extern void ip_fib_init(void);
+extern __be32 fib_compute_spec_dst(struct sk_buff *skb);
extern int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
- __be32 *spec_dst, u32 *itag);
+ struct in_device *idev, u32 *itag);
extern void fib_select_default(struct fib_result *res);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return net->ipv4.fib_num_tclassid_users;
+}
+#else
+static inline int fib_num_tclassid_users(struct net *net)
+{
+ return 0;
+}
+#endif
/* Exported by fib_semantics.c */
extern int ip_fib_check_default(__be32 gw, struct net_device *dev);
@@ -253,7 +304,7 @@ static inline void fib_combine_itag(u32 *itag, const struct fib_result *res)
#endif
*itag = FIB_RES_NH(*res).nh_tclassid<<16;
#ifdef CONFIG_IP_MULTIPLE_TABLES
- rtag = fib_rules_tclass(res);
+ rtag = res->tclassid;
if (*itag == 0)
*itag = (rtag<<16);
*itag |= (rtag>>16);
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index d6146b4811c2..95374d1696a1 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1425,7 +1425,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (!ct || !nf_ct_is_untracked(ct)) {
- nf_reset(skb);
+ nf_conntrack_put(skb->nfct);
skb->nfct = &nf_ct_untracked_get()->ct_general;
skb->nfctinfo = IP_CT_NEW;
nf_conntrack_get(skb->nfct);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index aecf88436abf..01c34b363a34 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -251,6 +251,8 @@ static inline void fl6_sock_release(struct ip6_flowlabel *fl)
atomic_dec(&fl->users);
}
+extern void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info);
+
extern int ip6_ra_control(struct sock *sk, int sel);
extern int ipv6_parse_hopopts(struct sk_buff *skb);
@@ -298,14 +300,23 @@ static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr
return memcmp(a1, a2, sizeof(struct in6_addr));
}
-static inline int
+static inline bool
ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m,
const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ulm = (const unsigned long *)m;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return !!(((ul1[0] ^ ul2[0]) & ulm[0]) |
+ ((ul1[1] ^ ul2[1]) & ulm[1]));
+#else
return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) |
((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) |
((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) |
((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]));
+#endif
}
static inline void ipv6_addr_prefix(struct in6_addr *pfx,
@@ -335,10 +346,17 @@ static inline void ipv6_addr_set(struct in6_addr *addr,
static inline bool ipv6_addr_equal(const struct in6_addr *a1,
const struct in6_addr *a2)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul1 = (const unsigned long *)a1;
+ const unsigned long *ul2 = (const unsigned long *)a2;
+
+ return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL;
+#else
return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) |
(a1->s6_addr32[1] ^ a2->s6_addr32[1]) |
(a1->s6_addr32[2] ^ a2->s6_addr32[2]) |
(a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0;
+#endif
}
static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
@@ -391,8 +409,27 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+
+ return (ul[0] | ul[1]) == 0UL;
+#else
return (a->s6_addr32[0] | a->s6_addr32[1] |
a->s6_addr32[2] | a->s6_addr32[3]) == 0;
+#endif
+}
+
+static inline u32 ipv6_addr_hash(const struct in6_addr *a)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
+ const unsigned long *ul = (const unsigned long *)a;
+ unsigned long x = ul[0] ^ ul[1];
+
+ return (u32)(x ^ (x >> 32));
+#else
+ return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^
+ a->s6_addr32[2] ^ a->s6_addr32[3]);
+#endif
}
static inline bool ipv6_addr_loopback(const struct in6_addr *a)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 95e39b6a02ec..bb86aa6f98dd 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -233,8 +233,10 @@ enum ieee80211_rssi_event {
* valid in station mode only while @assoc is true and if also
* requested by %IEEE80211_HW_NEED_DTIM_PERIOD (cf. also hw conf
* @ps_dtim_period)
- * @last_tsf: last beacon's/probe response's TSF timestamp (could be old
+ * @sync_tsf: last beacon's/probe response's TSF timestamp (could be old
* as it may have been received during scanning long ago)
+ * @sync_device_ts: the device timestamp corresponding to the sync_tsf,
+ * the driver/device can use this to calculate synchronisation
* @beacon_int: beacon interval
* @assoc_capability: capabilities taken from assoc resp
* @basic_rates: bitmap of basic rates, each bit stands for an
@@ -281,7 +283,8 @@ struct ieee80211_bss_conf {
u8 dtim_period;
u16 beacon_int;
u16 assoc_capability;
- u64 last_tsf;
+ u64 sync_tsf;
+ u32 sync_device_ts;
u32 basic_rates;
int mcast_rate[IEEE80211_NUM_BANDS];
u16 ht_operation_mode;
@@ -475,7 +478,7 @@ enum mac80211_rate_control_flags {
#define IEEE80211_TX_INFO_RATE_DRIVER_DATA_SIZE 24
/* maximum number of rate stages */
-#define IEEE80211_TX_MAX_RATES 5
+#define IEEE80211_TX_MAX_RATES 4
/**
* struct ieee80211_tx_rate - rate selection/status
@@ -563,11 +566,11 @@ struct ieee80211_tx_info {
} control;
struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
- u8 ampdu_ack_len;
int ack_signal;
+ u8 ampdu_ack_len;
u8 ampdu_len;
u8 antenna;
- /* 14 bytes free */
+ /* 21 bytes free */
} status;
struct {
struct ieee80211_tx_rate driver_rates[
@@ -634,7 +637,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
info->status.rates[i].count = 0;
BUILD_BUG_ON(
- offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
+ offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
memset(&info->status.ampdu_ack_len, 0,
sizeof(struct ieee80211_tx_info) -
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
@@ -696,6 +699,8 @@ enum mac80211_rx_flags {
*
* @mactime: value in microseconds of the 64-bit Time Synchronization Function
* (TSF) timer when the first data symbol (MPDU) arrived at the hardware.
+ * @device_timestamp: arbitrary timestamp for the device, mac80211 doesn't use
+ * it but can store it and pass it back to the driver for synchronisation
* @band: the active band when this frame was received
* @freq: frequency the radio was tuned to when receiving this frame, in MHz
* @signal: signal strength when receiving this frame, either in dBm, in dB or
@@ -709,13 +714,14 @@ enum mac80211_rx_flags {
*/
struct ieee80211_rx_status {
u64 mactime;
- enum ieee80211_band band;
- int freq;
- int signal;
- int antenna;
- int rate_idx;
- int flag;
- unsigned int rx_flags;
+ u32 device_timestamp;
+ u16 flag;
+ u16 freq;
+ u8 rate_idx;
+ u8 rx_flags;
+ u8 band;
+ u8 antenna;
+ s8 signal;
};
/**
@@ -1297,6 +1303,10 @@ enum ieee80211_hw_flags {
* reports, by default it is set to _MCS, _GI and _BW but doesn't
* include _FMT. Use %IEEE80211_RADIOTAP_MCS_HAVE_* values, only
* adding _BW is supported today.
+ *
+ * @netdev_features: netdev features to be set in each netdev created
+ * from this HW. Note only HW checksum features are currently
+ * compatible with mac80211. Other feature bits will be rejected.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -1319,6 +1329,7 @@ struct ieee80211_hw {
u8 max_tx_aggregation_subframes;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
+ netdev_features_t netdev_features;
};
/**
@@ -1891,19 +1902,6 @@ enum ieee80211_rate_control_changed {
* The low-level driver should send the frame out based on
* configuration in the TX control data. This handler should,
* preferably, never fail and stop queues appropriately.
- * This must be implemented if @tx_frags is not.
- * Must be atomic.
- *
- * @tx_frags: Called to transmit multiple fragments of a single MSDU.
- * This handler must consume all fragments, sending out some of
- * them only is useless and it can't ask for some of them to be
- * queued again. If the frame is not fragmented the queue has a
- * single SKB only. To avoid issues with the networking stack
- * when TX status is reported the frames should be removed from
- * the skb queue.
- * If this is used, the tx_info @vif and @sta pointers will be
- * invalid -- you must not use them in that case.
- * This must be implemented if @tx isn't.
* Must be atomic.
*
* @start: Called before the first netdevice attached to the hardware
@@ -2183,7 +2181,10 @@ enum ieee80211_rate_control_changed {
* offload. Frames to transmit on the off-channel channel are transmitted
* normally except for the %IEEE80211_TX_CTL_TX_OFFCHAN flag. When the
* duration (which will always be non-zero) expires, the driver must call
- * ieee80211_remain_on_channel_expired(). This callback may sleep.
+ * ieee80211_remain_on_channel_expired().
+ * Note that this callback may be called while the device is in IDLE and
+ * must be accepted in this case.
+ * This callback may sleep.
* @cancel_remain_on_channel: Requests that an ongoing off-channel period is
* aborted before it expires. This callback may sleep.
*
@@ -2246,11 +2247,24 @@ enum ieee80211_rate_control_changed {
* @get_et_strings: Ethtool API to get a set of strings to describe stats
* and perhaps other supported types of ethtool data-sets.
*
+ * @get_rssi: Get current signal strength in dBm, the function is optional
+ * and can sleep.
+ *
+ * @mgd_prepare_tx: Prepare for transmitting a management frame for association
+ * before associated. In multi-channel scenarios, a virtual interface is
+ * bound to a channel before it is associated, but as it isn't associated
+ * yet it need not necessarily be given airtime, in particular since any
+ * transmission to a P2P GO needs to be synchronized against the GO's
+ * powersave state. mac80211 will call this function before transmitting a
+ * management frame prior to having successfully associated to allow the
+ * driver to give it channel time for the transmission, to get a response
+ * and to be able to synchronize with the GO.
+ * The callback will be called before each transmission and upon return
+ * mac80211 will transmit the frame right away.
+ * The callback is optional and can (should!) sleep.
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
- void (*tx_frags)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct sk_buff_head *skbs);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
#ifdef CONFIG_PM
@@ -2385,6 +2399,11 @@ struct ieee80211_ops {
void (*get_et_strings)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data);
+ int (*get_rssi)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 *rssi_dbm);
+
+ void (*mgd_prepare_tx)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
};
/**
@@ -3557,16 +3576,6 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
gfp_t gfp);
/**
- * ieee80211_get_operstate - get the operstate of the vif
- *
- * @vif: &struct ieee80211_vif pointer from the add_interface callback.
- *
- * The driver might need to know the operstate of the net_device
- * (specifically, whether the link is IF_OPER_UP after resume)
- */
-unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif);
-
-/**
* ieee80211_chswitch_done - Complete channel switch process
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @success: make the channel switch successful or not
@@ -3589,22 +3598,6 @@ void ieee80211_request_smps(struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_mode);
/**
- * ieee80211_key_removed - disable hw acceleration for key
- * @key_conf: The key hw acceleration should be disabled for
- *
- * This allows drivers to indicate that the given key has been
- * removed from hardware acceleration, due to a new key that
- * was added. Don't use this if the key can continue to be used
- * for TX, if the key restriction is on RX only it is permitted
- * to keep the key for TX only and not call this function.
- *
- * Due to locking constraints, it may only be called during
- * @set_key. This function must be allowed to sleep, and the
- * key it tries to disable may still be used until it returns.
- */
-void ieee80211_key_removed(struct ieee80211_key_conf *key_conf);
-
-/**
* ieee80211_ready_on_channel - notification of remain-on-channel start
* @hw: pointer as obtained from ieee80211_alloc_hw()
*/
@@ -3829,12 +3822,6 @@ void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
- struct sk_buff *skb, bool need_basic);
-
-int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
- struct sk_buff *skb, bool need_basic);
-
/**
* ieee80211_ave_rssi - report the average rssi for the specified interface
*
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index c9f8ab5cc687..d0d11df9cba1 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -21,6 +21,14 @@
#include <net/af_ieee802154.h>
+/* General MAC frame format:
+ * 2 bytes: Frame Control
+ * 1 byte: Sequence Number
+ * 20 bytes: Addressing fields
+ * 14 bytes: Auxiliary Security Header
+ */
+#define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14)
+
/* The following flags are used to indicate changed address settings from
* the stack to the hardware.
*/
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index c02b6ad3f6c5..96a3b5c03e37 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -47,6 +47,8 @@ enum {
#include <linux/icmpv6.h>
#include <linux/in6.h>
#include <linux/types.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
#include <net/neighbour.h>
@@ -80,6 +82,54 @@ struct nd_opt_hdr {
__u8 nd_opt_len;
} __packed;
+/* ND options */
+struct ndisc_options {
+ struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
+#ifdef CONFIG_IPV6_ROUTE_INFO
+ struct nd_opt_hdr *nd_opts_ri;
+ struct nd_opt_hdr *nd_opts_ri_end;
+#endif
+ struct nd_opt_hdr *nd_useropts;
+ struct nd_opt_hdr *nd_useropts_end;
+};
+
+#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
+#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
+#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
+#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
+#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
+#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
+
+#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
+
+extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+ struct ndisc_options *ndopts);
+
+/*
+ * Return the padding between the option length and the start of the
+ * link addr. Currently only IP-over-InfiniBand needs this, although
+ * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
+ * also need a pad of 2.
+ */
+static int ndisc_addr_option_pad(unsigned short type)
+{
+ switch (type) {
+ case ARPHRD_INFINIBAND: return 2;
+ default: return 0;
+ }
+}
+
+static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
+ struct net_device *dev)
+{
+ u8 *lladdr = (u8 *)(p + 1);
+ int lladdrlen = p->nd_opt_len << 3;
+ int prepad = ndisc_addr_option_pad(dev->type);
+ if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
+ return NULL;
+ return lladdr + prepad;
+}
+
static inline u32 ndisc_hashfn(const void *pkey, const struct net_device *dev, __u32 *hash_rnd)
{
const u32 *p32 = pkey;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6cdfeedb650b..344d8988842a 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -202,9 +202,16 @@ extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
struct net *net,
const void *pkey);
-extern struct neighbour * neigh_create(struct neigh_table *tbl,
+extern struct neighbour * __neigh_create(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *dev,
+ bool want_ref);
+static inline struct neighbour *neigh_create(struct neigh_table *tbl,
const void *pkey,
- struct net_device *dev);
+ struct net_device *dev)
+{
+ return __neigh_create(tbl, pkey, dev, true);
+}
extern void neigh_destroy(struct neighbour *neigh);
extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
@@ -302,12 +309,6 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) atomic_inc(&(n)->refcnt)
-static inline void neigh_confirm(struct neighbour *neigh)
-{
- if (neigh)
- neigh->confirmed = jiffies;
-}
-
static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{
unsigned long now = jiffies;
@@ -351,15 +352,6 @@ static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
return dev_queue_xmit(skb);
}
-static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
-{
- struct hh_cache *hh = &n->hh;
- if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
- return neigh_hh_output(hh, skb);
- else
- return n->output(n, skb);
-}
-
static inline struct neighbour *
__neigh_lookup(struct neigh_table *tbl, const void *pkey, struct net_device *dev, int creat)
{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ac9195e6a062..ae1cd6c9ba52 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -101,6 +101,7 @@ struct net {
struct netns_xfrm xfrm;
#endif
struct netns_ipvs *ipvs;
+ struct sock *diag_nlsk;
};
diff --git a/include/net/netevent.h b/include/net/netevent.h
index 086f8a5b59dc..3ce4988c9c08 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -12,10 +12,14 @@
*/
struct dst_entry;
+struct neighbour;
struct netevent_redirect {
struct dst_entry *old;
+ struct neighbour *old_neigh;
struct dst_entry *new;
+ struct neighbour *new_neigh;
+ const void *daddr;
};
enum netevent_notif_type {
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index cce7f6a798bf..f1494feba79f 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -39,36 +39,6 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
-/* Add protocol helper include file here */
-#include <linux/netfilter/nf_conntrack_ftp.h>
-#include <linux/netfilter/nf_conntrack_pptp.h>
-#include <linux/netfilter/nf_conntrack_h323.h>
-#include <linux/netfilter/nf_conntrack_sane.h>
-#include <linux/netfilter/nf_conntrack_sip.h>
-
-/* per conntrack: application helper private data */
-union nf_conntrack_help {
- /* insert conntrack helper private data (master) here */
-#if defined(CONFIG_NF_CONNTRACK_FTP) || defined(CONFIG_NF_CONNTRACK_FTP_MODULE)
- struct nf_ct_ftp_master ct_ftp_info;
-#endif
-#if defined(CONFIG_NF_CONNTRACK_PPTP) || \
- defined(CONFIG_NF_CONNTRACK_PPTP_MODULE)
- struct nf_ct_pptp_master ct_pptp_info;
-#endif
-#if defined(CONFIG_NF_CONNTRACK_H323) || \
- defined(CONFIG_NF_CONNTRACK_H323_MODULE)
- struct nf_ct_h323_master ct_h323_info;
-#endif
-#if defined(CONFIG_NF_CONNTRACK_SANE) || \
- defined(CONFIG_NF_CONNTRACK_SANE_MODULE)
- struct nf_ct_sane_master ct_sane_info;
-#endif
-#if defined(CONFIG_NF_CONNTRACK_SIP) || defined(CONFIG_NF_CONNTRACK_SIP_MODULE)
- struct nf_ct_sip_master ct_sip_info;
-#endif
-};
-
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/timer.h>
@@ -89,12 +59,13 @@ struct nf_conn_help {
/* Helper. if any */
struct nf_conntrack_helper __rcu *helper;
- union nf_conntrack_help help;
-
struct hlist_head expectations;
/* Current number of expected connections */
u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
+
+ /* private helper information. */
+ char data[];
};
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index aced085132e7..d8f5b9f52169 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -28,8 +28,8 @@ extern unsigned int nf_conntrack_in(struct net *net,
extern int nf_conntrack_init(struct net *net);
extern void nf_conntrack_cleanup(struct net *net);
-extern int nf_conntrack_proto_init(void);
-extern void nf_conntrack_proto_fini(void);
+extern int nf_conntrack_proto_init(struct net *net);
+extern void nf_conntrack_proto_fini(struct net *net);
extern bool
nf_ct_get_tuple(const struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index a88fb6939387..e1ce1048fe5f 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -78,7 +78,7 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (net->ct.nf_conntrack_event_cb == NULL)
+ if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
e = nf_ct_ecache_find(ct);
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 4619caadd9d1..983f00263243 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -59,10 +59,12 @@ static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
return nf_ct_net(exp->master);
}
+#define NF_CT_EXP_POLICY_NAME_LEN 16
+
struct nf_conntrack_expect_policy {
unsigned int max_expected;
unsigned int timeout;
- const char *name;
+ char name[NF_CT_EXP_POLICY_NAME_LEN];
};
#define NF_CT_EXPECT_CLASS_DEFAULT 0
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 96755c3798a5..8b4d1fc29096 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -80,10 +80,13 @@ static inline void nf_ct_ext_free(struct nf_conn *ct)
}
/* Add this type, returns pointer to data or NULL. */
-void *
-__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
+void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp);
+
#define nf_ct_ext_add(ct, id, gfp) \
- ((id##_TYPE *)__nf_ct_ext_add((ct), (id), (gfp)))
+ ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp)))
+#define nf_ct_ext_add_length(ct, id, len, gfp) \
+ ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp)))
#define NF_CT_EXT_F_PREALLOC 0x0001
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 1d1889409b9e..9aad956d1008 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -11,18 +11,27 @@
#define _NF_CONNTRACK_HELPER_H
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_expect.h>
struct module;
+enum nf_ct_helper_flags {
+ NF_CT_HELPER_F_USERSPACE = (1 << 0),
+ NF_CT_HELPER_F_CONFIGURED = (1 << 1),
+};
+
#define NF_CT_HELPER_NAME_LEN 16
struct nf_conntrack_helper {
struct hlist_node hnode; /* Internal use. */
- const char *name; /* name of the module */
+ char name[NF_CT_HELPER_NAME_LEN]; /* name of the module */
struct module *me; /* pointer to self */
const struct nf_conntrack_expect_policy *expect_policy;
+ /* length of internal data, ie. sizeof(struct nf_ct_*_master) */
+ size_t data_len;
+
/* Tuple of things we will help (compared against server response) */
struct nf_conntrack_tuple tuple;
@@ -35,8 +44,12 @@ struct nf_conntrack_helper {
void (*destroy)(struct nf_conn *ct);
+ int (*from_nlattr)(struct nlattr *attr, struct nf_conn *ct);
int (*to_nlattr)(struct sk_buff *skb, const struct nf_conn *ct);
unsigned int expect_class_max;
+
+ unsigned int flags;
+ unsigned int queue_num; /* For user-space helpers. */
};
extern struct nf_conntrack_helper *
@@ -48,7 +61,7 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum);
extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
-extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
+extern struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, struct nf_conntrack_helper *helper, gfp_t gfp);
extern int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
@@ -60,6 +73,15 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct)
return nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
}
+static inline void *nfct_help_data(const struct nf_conn *ct)
+{
+ struct nf_conn_help *help;
+
+ help = nf_ct_ext_find(ct, NF_CT_EXT_HELPER);
+
+ return (void *)help->data;
+}
+
extern int nf_conntrack_helper_init(struct net *net);
extern void nf_conntrack_helper_fini(struct net *net);
@@ -82,4 +104,7 @@ nf_ct_helper_expectfn_find_by_name(const char *name);
struct nf_ct_helper_expectfn *
nf_ct_helper_expectfn_find_by_symbol(const void *symbol);
+extern struct hlist_head *nf_ct_helper_hash;
+extern unsigned int nf_ct_helper_hsize;
+
#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index 9699c028b74b..6f7c13f4ac03 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -64,11 +64,12 @@ struct nf_conntrack_l3proto {
size_t nla_size;
#ifdef CONFIG_SYSCTL
- struct ctl_table_header *ctl_table_header;
const char *ctl_table_path;
- struct ctl_table *ctl_table;
#endif /* CONFIG_SYSCTL */
+ /* Init l3proto pernet data */
+ int (*init_net)(struct net *net);
+
/* Module (if any) which this is connected to. */
struct module *me;
};
@@ -76,8 +77,10 @@ struct nf_conntrack_l3proto {
extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX];
/* Protocol registration. */
-extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
-extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+extern int nf_conntrack_l3proto_register(struct net *net,
+ struct nf_conntrack_l3proto *proto);
+extern void nf_conntrack_l3proto_unregister(struct net *net,
+ struct nf_conntrack_l3proto *proto);
extern struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
extern void nf_ct_l3proto_put(struct nf_conntrack_l3proto *p);
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 3b572bb20aa2..c3be4aef6bf7 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -12,6 +12,7 @@
#include <linux/netlink.h>
#include <net/netlink.h>
#include <net/netfilter/nf_conntrack.h>
+#include <net/netns/generic.h>
struct seq_file;
@@ -86,23 +87,21 @@ struct nf_conntrack_l4proto {
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
size_t obj_size;
- int (*nlattr_to_obj)(struct nlattr *tb[], void *data);
+ int (*nlattr_to_obj)(struct nlattr *tb[],
+ struct net *net, void *data);
int (*obj_to_nlattr)(struct sk_buff *skb, const void *data);
unsigned int nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
#endif
+ int *net_id;
+ /* Init l4proto pernet data */
+ int (*init_net)(struct net *net, u_int16_t proto);
+
+ /* Return the per-net protocol part. */
+ struct nf_proto_net *(*get_net_proto)(struct net *net);
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header **ctl_table_header;
- struct ctl_table *ctl_table;
- unsigned int *ctl_table_users;
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- struct ctl_table_header *ctl_compat_table_header;
- struct ctl_table *ctl_compat_table;
-#endif
-#endif
/* Protocol name */
const char *name;
@@ -123,8 +122,18 @@ nf_ct_l4proto_find_get(u_int16_t l3proto, u_int8_t l4proto);
extern void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p);
/* Protocol registration. */
-extern int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *proto);
-extern void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *proto);
+extern int nf_conntrack_l4proto_register(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+extern void nf_conntrack_l4proto_unregister(struct net *net,
+ struct nf_conntrack_l4proto *proto);
+
+static inline void nf_ct_kfree_compat_sysctl_table(struct nf_proto_net *pn)
+{
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ kfree(pn->ctl_compat_table);
+ pn->ctl_compat_table = NULL;
+#endif
+}
/* Generic netlink helpers */
extern int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 02bb6c29dc3d..7d8fb7b46c44 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -54,4 +54,8 @@ extern void nf_nat_follow_master(struct nf_conn *ct,
extern s16 nf_nat_get_offset(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
+
+extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ u32 dir, int off);
+
#endif
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
new file mode 100644
index 000000000000..86267a529514
--- /dev/null
+++ b/include/net/netfilter/nfnetlink_queue.h
@@ -0,0 +1,43 @@
+#ifndef _NET_NFNL_QUEUE_H_
+#define _NET_NFNL_QUEUE_H_
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+struct nf_conn;
+
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
+ enum ip_conntrack_info *ctinfo);
+struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
+ const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo);
+int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo);
+void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff);
+#else
+inline struct nf_conn *
+nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
+{
+ return NULL;
+}
+
+inline struct nf_conn *nfqnl_ct_parse(const struct sk_buff *skb,
+ const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo)
+{
+ return NULL;
+}
+
+inline int
+nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ return 0;
+}
+
+inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff)
+{
+}
+#endif /* NF_CONNTRACK */
+#endif
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index a053a19870cf..3aecdc7a84fb 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -4,10 +4,64 @@
#include <linux/list.h>
#include <linux/list_nulls.h>
#include <linux/atomic.h>
+#include <linux/netfilter/nf_conntrack_tcp.h>
struct ctl_table_header;
struct nf_conntrack_ecache;
+struct nf_proto_net {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *ctl_table_header;
+ struct ctl_table *ctl_table;
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ struct ctl_table_header *ctl_compat_header;
+ struct ctl_table *ctl_compat_table;
+#endif
+#endif
+ unsigned int users;
+};
+
+struct nf_generic_net {
+ struct nf_proto_net pn;
+ unsigned int timeout;
+};
+
+struct nf_tcp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
+ unsigned int tcp_loose;
+ unsigned int tcp_be_liberal;
+ unsigned int tcp_max_retrans;
+};
+
+enum udp_conntrack {
+ UDP_CT_UNREPLIED,
+ UDP_CT_REPLIED,
+ UDP_CT_MAX
+};
+
+struct nf_udp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[UDP_CT_MAX];
+};
+
+struct nf_icmp_net {
+ struct nf_proto_net pn;
+ unsigned int timeout;
+};
+
+struct nf_ip_net {
+ struct nf_generic_net generic;
+ struct nf_tcp_net tcp;
+ struct nf_udp_net udp;
+ struct nf_icmp_net icmp;
+ struct nf_icmp_net icmpv6;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ struct ctl_table_header *ctl_table_header;
+ struct ctl_table *ctl_table;
+#endif
+};
+
struct netns_ct {
atomic_t count;
unsigned int expect_count;
@@ -28,6 +82,7 @@ struct netns_ct {
unsigned int sysctl_log_invalid; /* Log invalid packets */
int sysctl_auto_assign_helper;
bool auto_assign_helper_warned;
+ struct nf_ip_net nf_ct_proto;
#ifdef CONFIG_SYSCTL
struct ctl_table_header *sysctl_header;
struct ctl_table_header *acct_sysctl_header;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index bbd023a1c9b9..0ffb8e31f3cd 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -7,10 +7,12 @@
#include <net/inet_frag.h>
+struct tcpm_hash_bucket;
struct ctl_table_header;
struct ipv4_devconf;
struct fib_rules_ops;
struct hlist_head;
+struct fib_table;
struct sock;
struct netns_ipv4 {
@@ -24,13 +26,21 @@ struct netns_ipv4 {
struct ipv4_devconf *devconf_dflt;
#ifdef CONFIG_IP_MULTIPLE_TABLES
struct fib_rules_ops *rules_ops;
+ bool fib_has_custom_rules;
+ struct fib_table *fib_local;
+ struct fib_table *fib_main;
+ struct fib_table *fib_default;
+#endif
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ int fib_num_tclassid_users;
#endif
struct hlist_head *fib_table_hash;
struct sock *fibnl;
struct sock **icmp_sk;
- struct sock *tcp_sock;
-
+ struct inet_peer_base *peers;
+ struct tcpm_hash_bucket *tcp_metrics_hash;
+ unsigned int tcp_metrics_hash_log;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *iptable_filter;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index b42be53587ba..df0a5456a3fd 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -33,6 +33,7 @@ struct netns_ipv6 {
struct netns_sysctl_ipv6 sysctl;
struct ipv6_devconf *devconf_all;
struct ipv6_devconf *devconf_dflt;
+ struct inet_peer_base *peers;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *ip6table_filter;
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index d58fdec47597..2719dec6b5a8 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -35,7 +35,7 @@ struct cgroup_netprio_state {
extern int net_prio_subsys_id;
#endif
-extern void sock_update_netprioidx(struct sock *sk);
+extern void sock_update_netprioidx(struct sock *sk, struct task_struct *task);
#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
@@ -82,7 +82,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
#endif /* CONFIG_NETPRIO_CGROUP */
#else
-#define sock_update_netprioidx(sk)
+#define sock_update_netprioidx(sk, task)
#endif
#endif /* _NET_CLS_CGROUP_H */
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 4467c9460857..f5169b04f082 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -31,7 +31,8 @@ struct nfc_hci_ops {
void (*close) (struct nfc_hci_dev *hdev);
int (*hci_ready) (struct nfc_hci_dev *hdev);
int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
- int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols);
+ int (*start_poll) (struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols);
int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
struct nfc_target *target);
int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
@@ -43,10 +44,20 @@ struct nfc_hci_ops {
struct nfc_target *target);
};
-#define NFC_HCI_MAX_CUSTOM_GATES 15
+/* Pipes */
+#define NFC_HCI_INVALID_PIPE 0x80
+#define NFC_HCI_LINK_MGMT_PIPE 0x00
+#define NFC_HCI_ADMIN_PIPE 0x01
+
+struct nfc_hci_gate {
+ u8 gate;
+ u8 pipe;
+};
+
+#define NFC_HCI_MAX_CUSTOM_GATES 50
struct nfc_hci_init_data {
u8 gate_count;
- u8 gates[NFC_HCI_MAX_CUSTOM_GATES];
+ struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
char session_id[9];
};
@@ -111,6 +122,8 @@ void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err);
+
/* Host IDs */
#define NFC_HCI_HOST_CONTROLLER_ID 0x00
#define NFC_HCI_TERMINAL_HOST_ID 0x01
@@ -179,7 +192,8 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
/* connecting to gates and sending hci instructions */
-int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate);
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe);
int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate);
int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev);
int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index b7ca4a2a1d72..6431f5e39022 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -53,7 +53,8 @@ struct nfc_target;
struct nfc_ops {
int (*dev_up)(struct nfc_dev *dev);
int (*dev_down)(struct nfc_dev *dev);
- int (*start_poll)(struct nfc_dev *dev, u32 protocols);
+ int (*start_poll)(struct nfc_dev *dev,
+ u32 im_protocols, u32 tm_protocols);
void (*stop_poll)(struct nfc_dev *dev);
int (*dep_link_up)(struct nfc_dev *dev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len);
@@ -62,9 +63,10 @@ struct nfc_ops {
u32 protocol);
void (*deactivate_target)(struct nfc_dev *dev,
struct nfc_target *target);
- int (*data_exchange)(struct nfc_dev *dev, struct nfc_target *target,
+ int (*im_transceive)(struct nfc_dev *dev, struct nfc_target *target,
struct sk_buff *skb, data_exchange_cb_t cb,
void *cb_context);
+ int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
};
@@ -99,10 +101,10 @@ struct nfc_dev {
int targets_generation;
struct device dev;
bool dev_up;
+ u8 rf_mode;
bool polling;
struct nfc_target *active_target;
bool dep_link_up;
- u32 dep_rf_mode;
struct nfc_genl_data genl_data;
u32 supported_protocols;
@@ -188,6 +190,7 @@ struct sk_buff *nfc_alloc_recv_skb(unsigned int size, gfp_t gfp);
int nfc_set_remote_general_bytes(struct nfc_dev *dev,
u8 *gt, u8 gt_len);
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int ntargets);
@@ -196,4 +199,11 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
+int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
+ u8 *gb, size_t gb_len);
+int nfc_tm_deactivated(struct nfc_dev *dev);
+int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb);
+
+void nfc_driver_failure(struct nfc_dev *dev, int err);
+
#endif /* __NET_NFC_H */
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
index ab06afd462da..35e930d2f638 100644
--- a/include/net/nfc/shdlc.h
+++ b/include/net/nfc/shdlc.h
@@ -27,7 +27,8 @@ struct nfc_shdlc_ops {
void (*close) (struct nfc_shdlc *shdlc);
int (*hci_ready) (struct nfc_shdlc *shdlc);
int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
- int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols);
+ int (*start_poll) (struct nfc_shdlc *shdlc,
+ u32 im_protocols, u32 tm_protocols);
int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
struct nfc_target *target);
int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
diff --git a/include/net/protocol.h b/include/net/protocol.h
index 875f4895b033..057f2d315567 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -29,11 +29,15 @@
#include <linux/ipv6.h>
#endif
-#define MAX_INET_PROTOS 256 /* Must be a power of 2 */
-
+/* This is one larger than the largest protocol value that can be
+ * found in an ipv4 or ipv6 header. Since in both cases the protocol
+ * value is presented in a __u8, this is defined to be 256.
+ */
+#define MAX_INET_PROTOS 256
/* This is used to register protocols. */
struct net_protocol {
+ void (*early_demux)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
int (*gso_send_check)(struct sk_buff *skb);
diff --git a/include/net/regulatory.h b/include/net/regulatory.h
index a5f79933e211..7dcaa2794fde 100644
--- a/include/net/regulatory.h
+++ b/include/net/regulatory.h
@@ -52,6 +52,10 @@ enum environment_cap {
* DFS master operation on a known DFS region (NL80211_DFS_*),
* dfs_region represents that region. Drivers can use this and the
* @alpha2 to adjust their device's DFS parameters as required.
+ * @user_reg_hint_type: if the @initiator was of type
+ * %NL80211_REGDOM_SET_BY_USER, this classifies the type
+ * of hint passed. This could be any of the %NL80211_USER_REG_HINT_*
+ * types.
* @intersect: indicates whether the wireless core should intersect
* the requested regulatory domain with the presently set regulatory
* domain.
@@ -70,6 +74,7 @@ enum environment_cap {
struct regulatory_request {
int wiphy_idx;
enum nl80211_reg_initiator initiator;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
char alpha2[2];
u8 dfs_region;
bool intersect;
diff --git a/include/net/route.h b/include/net/route.h
index 98705468ac03..c29ef2733f2d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -40,45 +40,39 @@
#define RT_CONN_FLAGS(sk) (RT_TOS(inet_sk(sk)->tos) | sock_flag(sk, SOCK_LOCALROUTE))
struct fib_nh;
-struct inet_peer;
struct fib_info;
struct rtable {
struct dst_entry dst;
- /* Lookup key. */
- __be32 rt_key_dst;
- __be32 rt_key_src;
-
int rt_genid;
unsigned int rt_flags;
__u16 rt_type;
- __u8 rt_key_tos;
+ __u16 rt_is_input;
- __be32 rt_dst; /* Path destination */
- __be32 rt_src; /* Path source */
- int rt_route_iif;
int rt_iif;
- int rt_oif;
- __u32 rt_mark;
/* Info on neighbour */
__be32 rt_gateway;
/* Miscellaneous cached information */
- __be32 rt_spec_dst; /* RFC1122 specific destination */
- u32 rt_peer_genid;
- struct inet_peer *peer; /* long-living peer info */
- struct fib_info *fi; /* for client ref to shared metrics */
+ u32 rt_pmtu;
};
static inline bool rt_is_input_route(const struct rtable *rt)
{
- return rt->rt_route_iif != 0;
+ return rt->rt_is_input != 0;
}
static inline bool rt_is_output_route(const struct rtable *rt)
{
- return rt->rt_route_iif == 0;
+ return rt->rt_is_input == 0;
+}
+
+static inline __be32 rt_nexthop(const struct rtable *rt, __be32 daddr)
+{
+ if (rt->rt_gateway)
+ return rt->rt_gateway;
+ return daddr;
}
struct ip_rt_acct {
@@ -111,10 +105,7 @@ extern struct ip_rt_acct __percpu *ip_rt_acct;
struct in_device;
extern int ip_rt_init(void);
-extern void ip_rt_redirect(__be32 old_gw, __be32 dst, __be32 new_gw,
- __be32 src, struct net_device *dev);
extern void rt_cache_flush(struct net *net, int how);
-extern void rt_cache_flush_batch(struct net *net);
extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp);
extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
struct sock *sk);
@@ -166,24 +157,16 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4
return ip_route_output_key(net, fl4);
}
-extern int ip_route_input_common(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin, bool noref);
-
-static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
-{
- return ip_route_input_common(skb, dst, src, tos, devin, false);
-}
-
-static inline int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src,
- u8 tos, struct net_device *devin)
-{
- return ip_route_input_common(skb, dst, src, tos, devin, true);
-}
+extern int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin);
-extern unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
- unsigned short new_mtu, struct net_device *dev);
-extern void ip_rt_send_redirect(struct sk_buff *skb);
+extern void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
+ int oif, u32 mark, u8 protocol, int flow_flags);
+extern void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu);
+extern void ipv4_redirect(struct sk_buff *skb, struct net *net,
+ int oif, u32 mark, u8 protocol, int flow_flags);
+extern void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk);
+extern void ip_rt_send_redirect(struct sk_buff *skb);
extern unsigned int inet_addr_type(struct net *net, __be32 addr);
extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
@@ -244,8 +227,6 @@ static inline void ip_route_connect_init(struct flowi4 *fl4, __be32 dst, __be32
if (inet_sk(sk)->transparent)
flow_flags |= FLOWI_FLAG_ANYSRC;
- if (protocol == IPPROTO_TCP)
- flow_flags |= FLOWI_FLAG_PRECOW_METRICS;
if (can_sleep)
flow_flags |= FLOWI_FLAG_CAN_SLEEP;
@@ -294,20 +275,13 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable
return rt;
}
-extern void rt_bind_peer(struct rtable *rt, __be32 daddr, int create);
-
-static inline struct inet_peer *rt_get_peer(struct rtable *rt, __be32 daddr)
-{
- if (rt->peer)
- return rt->peer;
-
- rt_bind_peer(rt, daddr, 0);
- return rt->peer;
-}
-
static inline int inet_iif(const struct sk_buff *skb)
{
- return skb_rtable(skb)->rt_iif;
+ int iif = skb_rtable(skb)->rt_iif;
+
+ if (iif)
+ return iif;
+ return skb->skb_iif;
}
extern int sysctl_ip_default_ttl;
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index bbcfd0993432..6b00c4fc4291 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -44,8 +44,10 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @get_xstats_size: Function to calculate required room for dumping device
* specific statistics
* @fill_xstats: Function to dump device specific statistics
- * @get_tx_queues: Function to determine number of transmit queues to create when
- * creating a new device.
+ * @get_num_tx_queues: Function to determine number of transmit queues
+ * to create when creating a new device.
+ * @get_num_rx_queues: Function to determine number of receive queues
+ * to create when creating a new device.
*/
struct rtnl_link_ops {
struct list_head list;
@@ -77,8 +79,8 @@ struct rtnl_link_ops {
size_t (*get_xstats_size)(const struct net_device *dev);
int (*fill_xstats)(struct sk_buff *skb,
const struct net_device *dev);
- int (*get_tx_queues)(struct net *net,
- struct nlattr *tb[]);
+ unsigned int (*get_num_tx_queues)(void);
+ unsigned int (*get_num_rx_queues)(void);
};
extern int __rtnl_link_register(struct rtnl_link_ops *ops);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 9d7d54a00e63..d9611e032418 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,7 +220,7 @@ struct tcf_proto {
struct qdisc_skb_cb {
unsigned int pkt_len;
- u16 bond_queue_mapping;
+ u16 slave_dev_queue_mapping;
u16 _pad;
unsigned char data[20];
};
diff --git a/include/net/scm.h b/include/net/scm.h
index d456f4c71a32..079d7887dac1 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -13,7 +13,6 @@
#define SCM_MAX_FD 253
struct scm_fp_list {
- struct list_head list;
short count;
short max;
struct file *fp[SCM_MAX_FD];
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 942b864f6135..d053d2e99876 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -334,6 +334,7 @@ typedef enum {
typedef enum {
SCTP_TRANSPORT_UP,
SCTP_TRANSPORT_DOWN,
+ SCTP_TRANSPORT_PF,
} sctp_transport_cmd_t;
/* These are the address scopes defined mainly for IPv4 addresses
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a2ef81466b00..ff499640528b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -162,6 +162,8 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *,
void sctp_err_finish(struct sock *, struct sctp_association *);
void sctp_icmp_frag_needed(struct sock *, struct sctp_association *,
struct sctp_transport *t, __u32 pmtu);
+void sctp_icmp_redirect(struct sock *, struct sctp_transport *,
+ struct sk_buff *);
void sctp_icmp_proto_unreachable(struct sock *sk,
struct sctp_association *asoc,
struct sctp_transport *t);
@@ -517,10 +519,10 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
return frag;
}
-static inline void sctp_assoc_pending_pmtu(struct sctp_association *asoc)
+static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_association *asoc)
{
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
asoc->pmtu_pending = 0;
}
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e4652fe58958..fc5e60016e37 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -161,6 +161,12 @@ extern struct sctp_globals {
int max_retrans_path;
int max_retrans_init;
+ /* Potentially-Failed.Max.Retrans sysctl value
+ * taken from:
+ * http://tools.ietf.org/html/draft-nishida-tsvwg-sctp-failover-05
+ */
+ int pf_retrans;
+
/*
* Policy for preforming sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
@@ -258,6 +264,7 @@ extern struct sctp_globals {
#define sctp_sndbuf_policy (sctp_globals.sndbuf_policy)
#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy)
#define sctp_max_retrans_path (sctp_globals.max_retrans_path)
+#define sctp_pf_retrans (sctp_globals.pf_retrans)
#define sctp_max_retrans_init (sctp_globals.max_retrans_init)
#define sctp_sack_timeout (sctp_globals.sack_timeout)
#define sctp_hb_interval (sctp_globals.hb_interval)
@@ -912,6 +919,9 @@ struct sctp_transport {
/* Is this structure kfree()able? */
malloced:1;
+ /* Has this transport moved the ctsn since we last sacked */
+ __u32 sack_generation;
+
struct flowi fl;
/* This is the peer's IP address and port. */
@@ -987,10 +997,15 @@ struct sctp_transport {
/* This is the max_retrans value for the transport and will
* be initialized from the assocs value. This can be changed
- * using SCTP_SET_PEER_ADDR_PARAMS socket option.
+ * using the SCTP_SET_PEER_ADDR_PARAMS socket option.
*/
__u16 pathmaxrxt;
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be changed
+ * using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ int pf_retrans;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1088,7 +1103,7 @@ void sctp_transport_burst_limited(struct sctp_transport *);
void sctp_transport_burst_reset(struct sctp_transport *);
unsigned long sctp_transport_timeout(struct sctp_transport *);
void sctp_transport_reset(struct sctp_transport *);
-void sctp_transport_update_pmtu(struct sctp_transport *, u32);
+void sctp_transport_update_pmtu(struct sock *, struct sctp_transport *, u32);
void sctp_transport_immediate_rtx(struct sctp_transport *);
@@ -1584,6 +1599,7 @@ struct sctp_association {
*/
__u8 sack_needed; /* Do we need to sack the peer? */
__u32 sack_cnt;
+ __u32 sack_generation;
/* These are capabilities which our peer advertised. */
__u8 ecn_capable:1, /* Can peer do ECN? */
@@ -1660,6 +1676,12 @@ struct sctp_association {
*/
int max_retrans;
+ /* This is the partially failed retrans value for the transport
+ * and will be initialized from the assocs value. This can be
+ * changed using the SCTP_PEER_ADDR_THLDS socket option
+ */
+ int pf_retrans;
+
/* Maximum number of times the endpoint will retransmit INIT */
__u16 max_init_attempts;
@@ -1999,7 +2021,7 @@ void sctp_assoc_update(struct sctp_association *old,
__u32 sctp_association_get_next_tsn(struct sctp_association *);
-void sctp_assoc_sync_pmtu(struct sctp_association *);
+void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
void sctp_assoc_set_primary(struct sctp_association *,
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index e7728bc14ccf..2c5d2b4d5d1e 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -117,7 +117,8 @@ void sctp_tsnmap_free(struct sctp_tsnmap *map);
int sctp_tsnmap_check(const struct sctp_tsnmap *, __u32 tsn);
/* Mark this TSN as seen. */
-int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn);
+int sctp_tsnmap_mark(struct sctp_tsnmap *, __u32 tsn,
+ struct sctp_transport *trans);
/* Mark this TSN and all lower as seen. */
void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn);
diff --git a/include/net/sctp/user.h b/include/net/sctp/user.h
index 0842ef00b2fe..1b02d7ad453b 100644
--- a/include/net/sctp/user.h
+++ b/include/net/sctp/user.h
@@ -93,6 +93,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_GET_ASSOC_NUMBER 28 /* Read only */
#define SCTP_GET_ASSOC_ID_LIST 29 /* Read only */
#define SCTP_AUTO_ASCONF 30
+#define SCTP_PEER_ADDR_THLDS 31
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -649,6 +650,7 @@ struct sctp_paddrinfo {
*/
enum sctp_spinfo_state {
SCTP_INACTIVE,
+ SCTP_PF,
SCTP_ACTIVE,
SCTP_UNCONFIRMED,
SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
@@ -741,4 +743,13 @@ typedef struct {
int sd;
} sctp_peeloff_arg_t;
+/*
+ * Peer Address Thresholds socket option
+ */
+struct sctp_paddrthlds {
+ sctp_assoc_t spt_assoc_id;
+ struct sockaddr_storage spt_address;
+ __u16 spt_pathmaxrxt;
+ __u16 spt_pathpfthld;
+};
#endif /* __net_sctp_user_h__ */
diff --git a/include/net/sock.h b/include/net/sock.h
index 4a4521699563..e067f8c18f88 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -198,6 +198,7 @@ struct cg_proto;
* @sk_lock: synchronizer
* @sk_rcvbuf: size of receive buffer in bytes
* @sk_wq: sock wait queue and async head
+ * @sk_rx_dst: receive input route used by early tcp demux
* @sk_dst_cache: destination cache
* @sk_dst_lock: destination cache lock
* @sk_policy: flow policy
@@ -317,6 +318,7 @@ struct sock {
struct xfrm_policy *sk_policy[2];
#endif
unsigned long sk_flags;
+ struct dst_entry *sk_rx_dst;
struct dst_entry *sk_dst_cache;
spinlock_t sk_dst_lock;
atomic_t sk_wmem_alloc;
@@ -856,6 +858,9 @@ struct proto {
int (*backlog_rcv) (struct sock *sk,
struct sk_buff *skb);
+ void (*release_cb)(struct sock *sk);
+ void (*mtu_reduced)(struct sock *sk);
+
/* Keeping track of sk's, looking them up, and port selection methods. */
void (*hash)(struct sock *sk);
void (*unhash)(struct sock *sk);
@@ -1426,6 +1431,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk,
gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
+extern void sock_edemux(struct sk_buff *skb);
extern int sock_setsockopt(struct socket *sock, int level,
int op, char __user *optval,
@@ -2152,7 +2158,7 @@ static inline void sk_change_net(struct sock *sk, struct net *net)
static inline struct sock *skb_steal_sock(struct sk_buff *skb)
{
- if (unlikely(skb->sk)) {
+ if (skb->sk) {
struct sock *sk = skb->sk;
skb->destructor = NULL;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e79aa48d9fc1..e19124b84cd2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -170,6 +170,11 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
+#define TCPOPT_EXP 254 /* Experimental */
+/* Magic number to be after the option value for sharing TCP
+ * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
+ */
+#define TCPOPT_FASTOPEN_MAGIC 0xF989
/*
* TCP option lengths
@@ -180,6 +185,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_PERM 2
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
+#define TCPOLEN_EXP_FASTOPEN_BASE 4
#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
@@ -206,6 +212,10 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
#define TCP_INIT_CWND 10
+/* Bit Flags for sysctl_tcp_fastopen */
+#define TFO_CLIENT_ENABLE 1
+#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
+
extern struct inet_timewait_death_row tcp_death_row;
/* sysctl variables for tcp */
@@ -222,6 +232,7 @@ extern int sysctl_tcp_retries1;
extern int sysctl_tcp_retries2;
extern int sysctl_tcp_orphan_retries;
extern int sysctl_tcp_syncookies;
+extern int sysctl_tcp_fastopen;
extern int sysctl_tcp_retrans_collapse;
extern int sysctl_tcp_stdurg;
extern int sysctl_tcp_rfc1337;
@@ -253,6 +264,8 @@ extern int sysctl_tcp_cookie_size;
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
+extern int sysctl_tcp_limit_output_bytes;
+extern int sysctl_tcp_challenge_ack_limit;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -321,19 +334,24 @@ extern struct proto tcp_prot;
extern void tcp_init_mem(struct net *net);
+extern void tcp_tasklet_init(void);
+
extern void tcp_v4_err(struct sk_buff *skb, u32);
extern void tcp_shutdown (struct sock *sk, int how);
+extern void tcp_v4_early_demux(struct sk_buff *skb);
extern int tcp_v4_rcv(struct sk_buff *skb);
-extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
-extern void *tcp_v4_tw_get_peer(struct sock *sk);
+extern struct inet_peer *tcp_v4_get_peer(struct sock *sk);
extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size);
extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
+extern void tcp_release_cb(struct sock *sk);
+extern void tcp_write_timer_handler(struct sock *sk);
+extern void tcp_delack_timer_handler(struct sock *sk);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len);
@@ -388,6 +406,19 @@ extern void tcp_enter_frto(struct sock *sk);
extern void tcp_enter_loss(struct sock *sk, int how);
extern void tcp_clear_retrans(struct tcp_sock *tp);
extern void tcp_update_metrics(struct sock *sk);
+extern void tcp_init_metrics(struct sock *sk);
+extern void tcp_metrics_init(void);
+extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check);
+extern bool tcp_remember_stamp(struct sock *sk);
+extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
+extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie,
+ int *syn_loss, unsigned long *last_syn_loss);
+extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie,
+ bool syn_lost);
+extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+extern void tcp_disable_fack(struct tcp_sock *tp);
extern void tcp_close(struct sock *sk, long timeout);
extern void tcp_init_sock(struct sock *sk);
extern unsigned int tcp_poll(struct file * file, struct socket *sock,
@@ -406,7 +437,7 @@ extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t len, int nonblock, int flags, int *addr_len);
extern void tcp_parse_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx, const u8 **hvpp,
- int estab);
+ int estab, struct tcp_fastopen_cookie *foc);
extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
/*
@@ -556,6 +587,8 @@ static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
return (tp->srtt >> 3) + tp->rttvar;
}
+extern void tcp_set_rto(struct sock *sk);
+
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
{
tp->pred_flags = htonl((tp->tcp_header_len << 26) |
@@ -1264,6 +1297,15 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff
extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
const struct tcp_md5sig_key *key);
+struct tcp_fastopen_request {
+ /* Fast Open cookie. Size 0 means a cookie request */
+ struct tcp_fastopen_cookie cookie;
+ struct msghdr *data; /* data in MSG_FASTOPEN */
+ u16 copied; /* queued in tcp_connect() */
+};
+
+void tcp_free_fastopen_req(struct tcp_sock *tp);
+
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
{
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 8d6689cb2c66..68f0ecad6c6e 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -22,7 +22,6 @@ struct timewait_sock_ops {
int (*twsk_unique)(struct sock *sk,
struct sock *sktw, void *twp);
void (*twsk_destructor)(struct sock *sk);
- void *(*twsk_getpeer)(struct sock *sk);
};
static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -41,11 +40,4 @@ static inline void twsk_destructor(struct sock *sk)
sk->sk_prot->twsk_prot->twsk_destructor(sk);
}
-static inline void *twsk_getpeer(struct sock *sk)
-{
- if (sk->sk_prot->twsk_prot->twsk_getpeer)
- return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
- return NULL;
-}
-
#endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e0a55df5bde8..d9509eb29b80 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1475,6 +1475,8 @@ extern int xfrm4_output(struct sk_buff *skb);
extern int xfrm4_output_finish(struct sk_buff *skb);
extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
+extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
+extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
extern int xfrm6_extract_header(struct sk_buff *skb);
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1682,13 +1684,11 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
{
- if ((m->m | m->v) &&
- nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
- goto nla_put_failure;
- return 0;
+ int ret = 0;
-nla_put_failure:
- return -1;
+ if (m->m | m->v)
+ ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
+ return ret;
}
#endif /* _NET_XFRM_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 83f77ac33957..0e3ff30647d5 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -262,6 +262,18 @@ struct ib_cm_event {
void *private_data;
};
+#define CM_REQ_ATTR_ID cpu_to_be16(0x0010)
+#define CM_MRA_ATTR_ID cpu_to_be16(0x0011)
+#define CM_REJ_ATTR_ID cpu_to_be16(0x0012)
+#define CM_REP_ATTR_ID cpu_to_be16(0x0013)
+#define CM_RTU_ATTR_ID cpu_to_be16(0x0014)
+#define CM_DREQ_ATTR_ID cpu_to_be16(0x0015)
+#define CM_DREP_ATTR_ID cpu_to_be16(0x0016)
+#define CM_SIDR_REQ_ATTR_ID cpu_to_be16(0x0017)
+#define CM_SIDR_REP_ATTR_ID cpu_to_be16(0x0018)
+#define CM_LAP_ATTR_ID cpu_to_be16(0x0019)
+#define CM_APR_ATTR_ID cpu_to_be16(0x001A)
+
/**
* ib_cm_handler - User-defined callback to process communication events.
* @cm_id: Communication identifier associated with the reported event.
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index d44a56388a3e..8275e539bace 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -251,6 +251,28 @@ struct ib_sa_service_rec {
u64 data64[2];
};
+#define IB_SA_GUIDINFO_REC_LID IB_SA_COMP_MASK(0)
+#define IB_SA_GUIDINFO_REC_BLOCK_NUM IB_SA_COMP_MASK(1)
+#define IB_SA_GUIDINFO_REC_RES1 IB_SA_COMP_MASK(2)
+#define IB_SA_GUIDINFO_REC_RES2 IB_SA_COMP_MASK(3)
+#define IB_SA_GUIDINFO_REC_GID0 IB_SA_COMP_MASK(4)
+#define IB_SA_GUIDINFO_REC_GID1 IB_SA_COMP_MASK(5)
+#define IB_SA_GUIDINFO_REC_GID2 IB_SA_COMP_MASK(6)
+#define IB_SA_GUIDINFO_REC_GID3 IB_SA_COMP_MASK(7)
+#define IB_SA_GUIDINFO_REC_GID4 IB_SA_COMP_MASK(8)
+#define IB_SA_GUIDINFO_REC_GID5 IB_SA_COMP_MASK(9)
+#define IB_SA_GUIDINFO_REC_GID6 IB_SA_COMP_MASK(10)
+#define IB_SA_GUIDINFO_REC_GID7 IB_SA_COMP_MASK(11)
+
+struct ib_sa_guidinfo_rec {
+ __be16 lid;
+ u8 block_num;
+ /* reserved */
+ u8 res1;
+ __be32 res2;
+ u8 guid_info_list[64];
+};
+
struct ib_sa_client {
atomic_t users;
struct completion comp;
@@ -385,4 +407,15 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
*/
void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
+/* Support GuidInfoRecord */
+int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
+ struct ib_device *device, u8 port_num,
+ struct ib_sa_guidinfo_rec *rec,
+ ib_sa_comp_mask comp_mask, u8 method,
+ int timeout_ms, gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_sa_guidinfo_rec *resp,
+ void *context),
+ void *context,
+ struct ib_sa_query **sa_query);
#endif /* IB_SA_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 51988f808181..ad3a3142383a 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -357,4 +357,14 @@ void rdma_set_service_type(struct rdma_cm_id *id, int tos);
*/
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse);
+/**
+ * rdma_set_afonly - Specify that listens are restricted to the
+ * bound address family only.
+ * @id: Communication identifer to configure.
+ * @afonly: Value indicating if listens are restricted.
+ *
+ * Must be set before identifier is in the listening state.
+ */
+int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
+
#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index 5348a000c8f3..1ee9239ff8c2 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -224,6 +224,7 @@ enum {
enum {
RDMA_OPTION_ID_TOS = 0,
RDMA_OPTION_ID_REUSEADDR = 1,
+ RDMA_OPTION_ID_AFONLY = 2,
RDMA_OPTION_IB_PATH = 1
};
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 8f9dfba3fcf0..399162b50a8d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -224,7 +224,7 @@ struct fc_rport_priv {
};
/**
- * struct fcoe_dev_stats - fcoe stats structure
+ * struct fc_stats - fc stats structure
* @SecondsSinceLastReset: Seconds since the last reset
* @TxFrames: Number of transmitted frames
* @TxWords: Number of transmitted words
@@ -232,6 +232,9 @@ struct fc_rport_priv {
* @RxWords: Number of received words
* @ErrorFrames: Number of received error frames
* @DumpedFrames: Number of dumped frames
+ * @FcpPktAllocFails: Number of fcp packet allocation failures
+ * @FcpPktAborts: Number of fcp packet aborts
+ * @FcpFrameAllocFails: Number of fcp frame allocation failures
* @LinkFailureCount: Number of link failures
* @LossOfSignalCount: Number for signal losses
* @InvalidTxWordCount: Number of invalid transmitted words
@@ -244,7 +247,7 @@ struct fc_rport_priv {
* @VLinkFailureCount: Number of virtual link failures
* @MissDiscAdvCount: Number of missing FIP discovery advertisement
*/
-struct fcoe_dev_stats {
+struct fc_stats {
u64 SecondsSinceLastReset;
u64 TxFrames;
u64 TxWords;
@@ -252,6 +255,9 @@ struct fcoe_dev_stats {
u64 RxWords;
u64 ErrorFrames;
u64 DumpedFrames;
+ u64 FcpPktAllocFails;
+ u64 FcpPktAborts;
+ u64 FcpFrameAllocFails;
u64 LinkFailureCount;
u64 LossOfSignalCount;
u64 InvalidTxWordCount;
@@ -510,7 +516,7 @@ struct libfc_function_template {
int (*ddp_done)(struct fc_lport *, u16);
/*
* Sets up the DDP context for a given exchange id on the given
- * scatterlist if LLD supports DDP for FCoE target.
+ * scatterlist if LLD supports DDP for target.
*
* STATUS: OPTIONAL
*/
@@ -817,8 +823,7 @@ enum fc_lport_event {
* @state: Identifies the state
* @boot_time: Timestamp indicating when the local port came online
* @host_stats: SCSI host statistics
- * @dev_stats: FCoE device stats (TODO: libfc should not be
- * FCoE aware)
+ * @stats: FC local port stats (TODO separate libfc LLD stats)
* @retry_count: Number of retries in the current state
* @port_id: FC Port ID
* @wwpn: World Wide Port Name
@@ -867,7 +872,7 @@ struct fc_lport {
enum fc_lport_state state;
unsigned long boot_time;
struct fc_host_statistics host_stats;
- struct fcoe_dev_stats __percpu *dev_stats;
+ struct fc_stats __percpu *stats;
u8 retry_count;
/* Fabric information */
@@ -980,8 +985,8 @@ static inline void fc_lport_state_enter(struct fc_lport *lport,
*/
static inline int fc_lport_init_stats(struct fc_lport *lport)
{
- lport->dev_stats = alloc_percpu(struct fcoe_dev_stats);
- if (!lport->dev_stats)
+ lport->stats = alloc_percpu(struct fc_stats);
+ if (!lport->stats)
return -ENOMEM;
return 0;
}
@@ -992,7 +997,7 @@ static inline int fc_lport_init_stats(struct fc_lport *lport)
*/
static inline void fc_lport_free_stats(struct fc_lport *lport)
{
- free_percpu(lport->dev_stats);
+ free_percpu(lport->stats);
}
/**
@@ -1116,6 +1121,7 @@ void fc_fill_hdr(struct fc_frame *, const struct fc_frame *,
* EXCHANGE MANAGER LAYER
*****************************/
int fc_exch_init(struct fc_lport *);
+void fc_exch_update_stats(struct fc_lport *lport);
struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *,
struct fc_exch_mgr *,
bool (*match)(struct fc_frame *));
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index f4f1c96dca72..ae33706afeb0 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -163,20 +163,29 @@ enum ata_command_set {
ATAPI_COMMAND_SET = 1,
};
+#define ATA_RESP_FIS_SIZE 24
+
struct sata_device {
enum ata_command_set command_set;
struct smp_resp rps_resp; /* report_phy_sata_resp */
u8 port_no; /* port number, if this is a PM (Port) */
- struct list_head children; /* PM Ports if this is a PM */
struct ata_port *ap;
struct ata_host ata_host;
- struct ata_taskfile tf;
+ u8 fis[ATA_RESP_FIS_SIZE];
+};
+
+struct ssp_device {
+ struct list_head eh_list_node; /* pending a user requested eh action */
+ struct scsi_lun reset_lun;
};
enum {
SAS_DEV_GONE,
SAS_DEV_DESTROY,
+ SAS_DEV_EH_PENDING,
+ SAS_DEV_LU_RESET,
+ SAS_DEV_RESET,
};
struct domain_device {
@@ -210,6 +219,7 @@ struct domain_device {
union {
struct expander_device ex_dev;
struct sata_device sata_dev; /* STP & directly attached */
+ struct ssp_device ssp_dev;
};
void *lldd_dev;
@@ -384,7 +394,10 @@ struct sas_ha_struct {
struct list_head defer_q; /* work queued while draining */
struct mutex drain_mutex;
unsigned long state;
- spinlock_t state_lock;
+ spinlock_t lock;
+ int eh_active;
+ wait_queue_head_t eh_wait_q;
+ struct list_head eh_dev_q;
struct mutex disco_mutex;
@@ -537,7 +550,7 @@ enum exec_status {
*/
struct ata_task_resp {
u16 frame_len;
- u8 ending_fis[24]; /* dev to host or data-in */
+ u8 ending_fis[ATA_RESP_FIS_SIZE]; /* dev to host or data-in */
};
#define SAS_STATUS_BUF_SIZE 96
@@ -600,10 +613,6 @@ struct sas_task {
enum sas_protocol task_proto;
- /* Used by the discovery code. */
- struct timer_list timer;
- struct completion completion;
-
union {
struct sas_ata_task ata_task;
struct sas_smp_task smp_task;
@@ -620,8 +629,15 @@ struct sas_task {
void *lldd_task; /* for use by LLDDs */
void *uldd_task;
+ struct sas_task_slow *slow_task;
+};
- struct work_struct abort_work;
+struct sas_task_slow {
+ /* standard/extra infrastructure for slow path commands (SMP and
+ * internal lldd commands
+ */
+ struct timer_list timer;
+ struct completion completion;
};
#define SAS_TASK_STATE_PENDING 1
@@ -631,6 +647,7 @@ struct sas_task {
#define SAS_TASK_AT_INITIATOR 16
extern struct sas_task *sas_alloc_task(gfp_t flags);
+extern struct sas_task *sas_alloc_slow_task(gfp_t flags);
extern void sas_free_task(struct sas_task *task);
struct sas_domain_function_template {
@@ -706,6 +723,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *);
void sas_init_dev(struct domain_device *);
void sas_task_abort(struct sas_task *);
+int sas_eh_abort_handler(struct scsi_cmnd *cmd);
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd);
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd);
diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h
index 77670e823ed8..2dfbdaa0b34a 100644
--- a/include/scsi/sas_ata.h
+++ b/include/scsi/sas_ata.h
@@ -45,6 +45,7 @@ void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q,
void sas_ata_schedule_reset(struct domain_device *dev);
void sas_ata_wait_eh(struct domain_device *dev);
void sas_probe_sata(struct asd_sas_port *port);
+void sas_ata_end_eh(struct ata_port *ap);
#else
@@ -85,6 +86,10 @@ static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy
{
return 0;
}
+
+static inline void sas_ata_end_eh(struct ata_port *ap)
+{
+}
#endif
#endif /* _SAS_ATA_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 4527b3a13321..66216c1acb48 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -161,6 +161,8 @@ struct scsi_cmnd;
#define MI_REPORT_PRIORITY 0x0e
#define MI_REPORT_TIMESTAMP 0x0f
#define MI_MANAGEMENT_PROTOCOL_IN 0x10
+/* value for MI_REPORT_TARGET_PGS ext header */
+#define MI_EXT_HDR_PARAM_FMT 0x20
/* values for maintenance out */
#define MO_SET_IDENTIFYING_INFORMATION 0x06
#define MO_SET_TARGET_PGS 0x0a
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 1e1198546c72..ac06cc595890 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -134,10 +134,16 @@ struct scsi_cmnd {
static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
+ struct scsi_driver **sdp;
+
if (!cmd->request->rq_disk)
return NULL;
- return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
+ sdp = (struct scsi_driver **)cmd->request->rq_disk->private_data;
+ if (!sdp)
+ return NULL;
+
+ return *sdp;
}
extern struct scsi_cmnd *scsi_get_command(struct scsi_device *, gfp_t);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index aff7525de194..9895f69294fc 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -42,6 +42,7 @@ enum scsi_device_state {
* originate in the mid-layer) */
SDEV_OFFLINE, /* Device offlined (by error handling or
* user request */
+ SDEV_TRANSPORT_OFFLINE, /* Offlined by transport class error handler */
SDEV_BLOCK, /* Device blocked by scsi lld. No
* scsi commands from user or midlayer
* should be issued to the scsi
@@ -154,6 +155,7 @@ struct scsi_device {
unsigned try_rc_10_first:1; /* Try READ_CAPACACITY_10 first */
unsigned is_visible:1; /* is the device visible in sysfs */
unsigned can_power_off:1; /* Device supports runtime power off */
+ unsigned wce_default_on:1; /* Cache is ON by default */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
struct list_head event_list; /* asserted events */
@@ -374,7 +376,7 @@ extern void scsi_scan_target(struct device *parent, unsigned int channel,
unsigned int id, unsigned int lun, int rescan);
extern void scsi_target_reap(struct scsi_target *);
extern void scsi_target_block(struct device *);
-extern void scsi_target_unblock(struct device *);
+extern void scsi_target_unblock(struct device *, enum scsi_device_state);
extern void scsi_remove_target(struct device *);
extern void int_to_scsilun(unsigned int, struct scsi_lun *);
extern int scsilun_to_int(struct scsi_lun *);
@@ -422,6 +424,7 @@ static inline unsigned int sdev_id(struct scsi_device *sdev)
static inline int scsi_device_online(struct scsi_device *sdev)
{
return (sdev->sdev_state != SDEV_OFFLINE &&
+ sdev->sdev_state != SDEV_TRANSPORT_OFFLINE &&
sdev->sdev_state != SDEV_DEL);
}
static inline int scsi_device_blocked(struct scsi_device *sdev)
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index e3f2db212ddc..620c723ee8ed 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -60,6 +60,7 @@ extern int scsi_dh_activate(struct request_queue *, activate_complete, void *);
extern int scsi_dh_handler_exist(const char *);
extern int scsi_dh_attach(struct request_queue *, const char *);
extern void scsi_dh_detach(struct request_queue *);
+extern const char *scsi_dh_attached_handler_name(struct request_queue *, gfp_t);
extern int scsi_dh_set_params(struct request_queue *, const char *);
#else
static inline int scsi_dh_activate(struct request_queue *req,
@@ -80,6 +81,11 @@ static inline void scsi_dh_detach(struct request_queue *q)
{
return;
}
+static inline const char *scsi_dh_attached_handler_name(struct request_queue *q,
+ gfp_t gfp)
+{
+ return NULL;
+}
static inline int scsi_dh_set_params(struct request_queue *req, const char *params)
{
return -SCSI_DH_NOSYS;
diff --git a/include/scsi/scsi_scan.h b/include/scsi/scsi_scan.h
deleted file mode 100644
index 78898889243d..000000000000
--- a/include/scsi/scsi_scan.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _SCSI_SCSI_SCAN_H
-#define _SCSI_SCSI_SCAN_H
-
-#ifdef CONFIG_SCSI
-/* drivers/scsi/scsi_scan.c */
-extern int scsi_complete_async_scans(void);
-#else
-static inline int scsi_complete_async_scans(void) { return 0; }
-#endif
-
-#endif /* _SCSI_SCSI_SCAN_H */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 719faf1863ad..b797e8fad669 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -426,6 +426,18 @@ struct fc_host_statistics {
u64 fcp_control_requests;
u64 fcp_input_megabytes;
u64 fcp_output_megabytes;
+ u64 fcp_packet_alloc_failures; /* fcp packet allocation failures */
+ u64 fcp_packet_aborts; /* fcp packet aborted */
+ u64 fcp_frame_alloc_failures; /* fcp frame allocation failures */
+
+ /* fc exches statistics */
+ u64 fc_no_free_exch; /* no free exch memory */
+ u64 fc_no_free_exch_xid; /* no free exch id */
+ u64 fc_xid_not_found; /* exch not found for a response */
+ u64 fc_xid_busy; /* exch exist for new a request */
+ u64 fc_seq_not_found; /* seq is not found for exchange */
+ u64 fc_non_bls_resp; /* a non BLS response frame with
+ a sequence responder in new exch */
};
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
new file mode 100644
index 000000000000..26f406e0f673
--- /dev/null
+++ b/include/sound/designware_i2s.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __SOUND_DESIGNWARE_I2S_H
+#define __SOUND_DESIGNWARE_I2S_H
+
+#include <linux/dmaengine.h>
+#include <linux/types.h>
+
+/*
+ * struct i2s_clk_config_data - represent i2s clk configuration data
+ * @chan_nr: number of channel
+ * @data_width: number of bits per sample (8/16/24/32 bit)
+ * @sample_rate: sampling frequency (8Khz, 16Khz, 32Khz, 44Khz, 48Khz)
+ */
+struct i2s_clk_config_data {
+ int chan_nr;
+ u32 data_width;
+ u32 sample_rate;
+};
+
+struct i2s_platform_data {
+ #define DWC_I2S_PLAY (1 << 0)
+ #define DWC_I2S_RECORD (1 << 1)
+ unsigned int cap;
+ int channel;
+ u32 snd_fmts;
+ u32 snd_rates;
+
+ void *play_dma_data;
+ void *capture_dma_data;
+ bool (*filter)(struct dma_chan *chan, void *slave);
+ int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+};
+
+struct i2s_dma_data {
+ void *data;
+ dma_addr_t addr;
+ u32 max_burst;
+ enum dma_slave_buswidth addr_width;
+ bool (*filter)(struct dma_chan *chan, void *slave);
+};
+
+/* I2S DMA registers */
+#define I2S_RXDMA 0x01C0
+#define I2S_TXDMA 0x01C8
+
+#define TWO_CHANNEL_SUPPORT 2 /* up to 2.0 */
+#define FOUR_CHANNEL_SUPPORT 4 /* up to 3.1 */
+#define SIX_CHANNEL_SUPPORT 6 /* up to 5.1 */
+#define EIGHT_CHANNEL_SUPPORT 8 /* up to 7.1 */
+
+#endif /* __SOUND_DESIGNWARE_I2S_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index a8fcaa6d531f..b877334bbb0f 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -39,6 +39,7 @@ int snd_hwparams_to_dma_slave_config(const struct snd_pcm_substream *substream,
const struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config);
int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream);
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream);
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data);
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 0d1112815be3..c75c0d1a85e2 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -810,7 +810,7 @@ int snd_pcm_hw_constraint_integer(struct snd_pcm_runtime *runtime, snd_pcm_hw_pa
int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
- struct snd_pcm_hw_constraint_list *l);
+ const struct snd_pcm_hw_constraint_list *l);
int snd_pcm_hw_constraint_ratnums(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
@@ -893,6 +893,7 @@ extern const struct snd_pcm_hw_constraint_list snd_pcm_known_rates;
int snd_pcm_limit_hw_rates(struct snd_pcm_runtime *runtime);
unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
+unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substream,
struct snd_dma_buffer *bufp)
@@ -1073,4 +1074,15 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
const char *snd_pcm_format_name(snd_pcm_format_t format);
+/**
+ * Get a string naming the direction of a stream
+ */
+static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return "Playback";
+ else
+ return "Capture";
+}
+
#endif /* __SOUND_PCM_H */
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index f494f1e3c900..37ae12e0ab06 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -22,6 +22,8 @@
*
*/
+#include <sound/pcm.h>
+
int snd_pcm_hw_param_first(struct snd_pcm_substream *pcm,
struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, int *dir);
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index e3833d9f1914..abe373d57adc 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -229,6 +229,10 @@ struct device;
{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
.shift = wshift, .invert = winvert, \
.event = wevent, .event_flags = wflags}
+#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
+{ .id = snd_soc_dapm_clock_supply, .name = wname, \
+ .reg = SND_SOC_NOPM, .event = dapm_clock_event, \
+ .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* generic widgets */
#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
@@ -245,6 +249,7 @@ struct device;
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
+
/* dapm kcontrol types */
#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -327,6 +332,8 @@ int dapm_reg_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
int dapm_regulator_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
+int dapm_clock_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event);
/* dapm controls */
int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
@@ -367,6 +374,8 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm);
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
+int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route, int num);
int snd_soc_dapm_weak_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
@@ -432,6 +441,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_post, /* machine specific post widget - exec last */
snd_soc_dapm_supply, /* power/clock supply */
snd_soc_dapm_regulator_supply, /* external regulator */
+ snd_soc_dapm_clock_supply, /* external clock */
snd_soc_dapm_aif_in, /* audio interface input */
snd_soc_dapm_aif_out, /* audio interface output */
snd_soc_dapm_siggen, /* signal generator */
@@ -537,6 +547,8 @@ struct snd_soc_dapm_widget {
struct list_head dirty;
int inputs;
int outputs;
+
+ struct clk *clk;
};
struct snd_soc_dapm_update {
diff --git a/include/sound/soc.h b/include/sound/soc.h
index c703871f5f65..e063380f63a2 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -42,11 +42,22 @@
((unsigned long)&(struct soc_mixer_control) \
{.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
.max = xmax, .platform_max = xmax, .invert = xinvert})
+#define SOC_DOUBLE_R_RANGE_VALUE(xlreg, xrreg, xshift, xmin, xmax, xinvert) \
+ ((unsigned long)&(struct soc_mixer_control) \
+ {.reg = xlreg, .rreg = xrreg, .shift = xshift, .rshift = xshift, \
+ .min = xmin, .max = xmax, .platform_max = xmax, .invert = xinvert})
#define SOC_SINGLE(xname, reg, shift, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
.put = snd_soc_put_volsw, \
.private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+#define SOC_SINGLE_RANGE(xname, xreg, xshift, xmin, xmax, xinvert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+ .info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
+ .put = snd_soc_put_volsw_range, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .shift = xshift, .min = xmin,\
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -67,6 +78,16 @@
{.reg = xreg, .rreg = xreg, \
.shift = xshift, .rshift = xshift, \
.max = xmax, .min = xmin} }
+#define SOC_SINGLE_RANGE_TLV(xname, xreg, xshift, xmin, xmax, xinvert, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_range, \
+ .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .shift = xshift, .min = xmin,\
+ .max = xmax, .platform_max = xmax, .invert = xinvert} }
#define SOC_DOUBLE(xname, reg, shift_left, shift_right, max, invert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
@@ -79,6 +100,13 @@
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
xmax, xinvert) }
+#define SOC_DOUBLE_R_RANGE(xname, reg_left, reg_right, xshift, xmin, \
+ xmax, xinvert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+ .info = snd_soc_info_volsw_range, \
+ .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
+ .private_value = SOC_DOUBLE_R_RANGE_VALUE(reg_left, reg_right, \
+ xshift, xmin, xmax, xinvert) }
#define SOC_DOUBLE_TLV(xname, reg, shift_left, shift_right, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -97,6 +125,16 @@
.get = snd_soc_get_volsw, .put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
xmax, xinvert) }
+#define SOC_DOUBLE_R_RANGE_TLV(xname, reg_left, reg_right, xshift, xmin, \
+ xmax, xinvert, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
+ SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .info = snd_soc_info_volsw_range, \
+ .get = snd_soc_get_volsw_range, .put = snd_soc_put_volsw_range, \
+ .private_value = SOC_DOUBLE_R_RANGE_VALUE(reg_left, reg_right, \
+ xshift, xmin, xmax, xinvert) }
#define SOC_DOUBLE_R_SX_TLV(xname, xreg, xrreg, xshift, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -460,6 +498,12 @@ int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo);
+int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
+int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
int snd_soc_limit_volume(struct snd_soc_codec *codec,
const char *name, int max);
int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
@@ -785,13 +829,36 @@ struct snd_soc_dai_link {
/* config - must be set by machine driver */
const char *name; /* Codec name */
const char *stream_name; /* Stream name */
- const char *codec_name; /* for multi-codec */
- const struct device_node *codec_of_node;
- const char *platform_name; /* for multi-platform */
- const struct device_node *platform_of_node;
+ /*
+ * You MAY specify the link's CPU-side device, either by device name,
+ * or by DT/OF node, but not both. If this information is omitted,
+ * the CPU-side DAI is matched using .cpu_dai_name only, which hence
+ * must be globally unique. These fields are currently typically used
+ * only for codec to codec links, or systems using device tree.
+ */
+ const char *cpu_name;
+ const struct device_node *cpu_of_node;
+ /*
+ * You MAY specify the DAI name of the CPU DAI. If this information is
+ * omitted, the CPU-side DAI is matched using .cpu_name/.cpu_of_node
+ * only, which only works well when that device exposes a single DAI.
+ */
const char *cpu_dai_name;
- const struct device_node *cpu_dai_of_node;
+ /*
+ * You MUST specify the link's codec, either by device name, or by
+ * DT/OF node, but not both.
+ */
+ const char *codec_name;
+ const struct device_node *codec_of_node;
+ /* You MUST specify the DAI name within the codec */
const char *codec_dai_name;
+ /*
+ * You MAY specify the link's platform/PCM/DMA driver, either by
+ * device name, or by DT/OF node, but not both. Some forms of link
+ * do not need a platform.
+ */
+ const char *platform_name;
+ const struct device_node *platform_of_node;
int be_id; /* optional ID for machine driver BE identification */
const struct snd_soc_pcm_stream *params;
diff --git a/include/sound/spear_dma.h b/include/sound/spear_dma.h
new file mode 100644
index 000000000000..1b365bfdfb37
--- /dev/null
+++ b/include/sound/spear_dma.h
@@ -0,0 +1,35 @@
+/*
+* linux/spear_dma.h
+*
+* Copyright (ST) 2012 Rajeev Kumar (rajeev-dlh.kumar@st.com)
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*
+*/
+
+#ifndef SPEAR_DMA_H
+#define SPEAR_DMA_H
+
+#include <linux/dmaengine.h>
+
+struct spear_dma_data {
+ void *data;
+ dma_addr_t addr;
+ u32 max_burst;
+ enum dma_slave_buswidth addr_width;
+ bool (*filter)(struct dma_chan *chan, void *slave);
+};
+
+#endif /* SPEAR_DMA_H */
diff --git a/include/sound/spear_spdif.h b/include/sound/spear_spdif.h
new file mode 100644
index 000000000000..a12f39695610
--- /dev/null
+++ b/include/sound/spear_spdif.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (ST) 2012 Vipin Kumar (vipin.kumar@st.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __SOUND_SPDIF_H
+#define __SOUND_SPDIF_H
+
+struct spear_spdif_platform_data {
+ /* DMA params */
+ void *dma_params;
+ bool (*filter)(struct dma_chan *chan, void *slave);
+ void (*reset_perip)(void);
+};
+
+#endif /* SOUND_SPDIF_H */
diff --git a/include/sound/tlv.h b/include/sound/tlv.h
index 7067e2dfb0b9..a64d8fe3f855 100644
--- a/include/sound/tlv.h
+++ b/include/sound/tlv.h
@@ -38,21 +38,31 @@
#define SNDRV_CTL_TLVT_DB_MINMAX 4 /* dB scale with min/max */
#define SNDRV_CTL_TLVT_DB_MINMAX_MUTE 5 /* dB scale with min/max with mute */
+#define TLV_ITEM(type, ...) \
+ (type), TLV_LENGTH(__VA_ARGS__), __VA_ARGS__
+#define TLV_LENGTH(...) \
+ ((unsigned int)sizeof((const unsigned int[]) { __VA_ARGS__ }))
+
+#define TLV_CONTAINER_ITEM(...) \
+ TLV_ITEM(SNDRV_CTL_TLVT_CONTAINER, __VA_ARGS__)
+#define DECLARE_TLV_CONTAINER(name, ...) \
+ unsigned int name[] = { TLV_CONTAINER_ITEM(__VA_ARGS__) }
+
#define TLV_DB_SCALE_MASK 0xffff
#define TLV_DB_SCALE_MUTE 0x10000
#define TLV_DB_SCALE_ITEM(min, step, mute) \
- SNDRV_CTL_TLVT_DB_SCALE, 2 * sizeof(unsigned int), \
- (min), ((step) & TLV_DB_SCALE_MASK) | ((mute) ? TLV_DB_SCALE_MUTE : 0)
+ TLV_ITEM(SNDRV_CTL_TLVT_DB_SCALE, \
+ (min), \
+ ((step) & TLV_DB_SCALE_MASK) | \
+ ((mute) ? TLV_DB_SCALE_MUTE : 0))
#define DECLARE_TLV_DB_SCALE(name, min, step, mute) \
unsigned int name[] = { TLV_DB_SCALE_ITEM(min, step, mute) }
/* dB scale specified with min/max values instead of step */
#define TLV_DB_MINMAX_ITEM(min_dB, max_dB) \
- SNDRV_CTL_TLVT_DB_MINMAX, 2 * sizeof(unsigned int), \
- (min_dB), (max_dB)
+ TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX, (min_dB), (max_dB))
#define TLV_DB_MINMAX_MUTE_ITEM(min_dB, max_dB) \
- SNDRV_CTL_TLVT_DB_MINMAX_MUTE, 2 * sizeof(unsigned int), \
- (min_dB), (max_dB)
+ TLV_ITEM(SNDRV_CTL_TLVT_DB_MINMAX_MUTE, (min_dB), (max_dB))
#define DECLARE_TLV_DB_MINMAX(name, min_dB, max_dB) \
unsigned int name[] = { TLV_DB_MINMAX_ITEM(min_dB, max_dB) }
#define DECLARE_TLV_DB_MINMAX_MUTE(name, min_dB, max_dB) \
@@ -60,13 +70,16 @@
/* linear volume between min_dB and max_dB (.01dB unit) */
#define TLV_DB_LINEAR_ITEM(min_dB, max_dB) \
- SNDRV_CTL_TLVT_DB_LINEAR, 2 * sizeof(unsigned int), \
- (min_dB), (max_dB)
+ TLV_ITEM(SNDRV_CTL_TLVT_DB_LINEAR, (min_dB), (max_dB))
#define DECLARE_TLV_DB_LINEAR(name, min_dB, max_dB) \
unsigned int name[] = { TLV_DB_LINEAR_ITEM(min_dB, max_dB) }
/* dB range container */
/* Each item is: <min> <max> <TLV> */
+#define TLV_DB_RANGE_ITEM(...) \
+ TLV_ITEM(SNDRV_CTL_TLVT_DB_RANGE, __VA_ARGS__)
+#define DECLARE_TLV_DB_RANGE(name, ...) \
+ unsigned int name[] = { TLV_DB_RANGE_ITEM(__VA_ARGS__) }
/* The below assumes that each item TLV is 4 words like DB_SCALE or LINEAR */
#define TLV_DB_RANGE_HEAD(num) \
SNDRV_CTL_TLVT_DB_RANGE, 6 * (num) * sizeof(unsigned int)
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 5456343ebe4c..4f67c762cd74 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -341,7 +341,7 @@ int vx_change_frequency(struct vx_core *chip);
/*
* PM
*/
-int snd_vx_suspend(struct vx_core *card, pm_message_t state);
+int snd_vx_suspend(struct vx_core *card);
int snd_vx_resume(struct vx_core *card);
/*
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 2d7db85e93ae..f1405d335a96 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -24,10 +24,8 @@ struct se_subsystem_api {
struct se_subsystem_dev *, void *);
void (*free_device)(void *);
int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *);
- int (*execute_cmd)(struct se_cmd *, struct scatterlist *, u32,
- enum dma_data_direction);
- int (*do_discard)(struct se_device *, sector_t, u32);
- void (*do_sync_cache)(struct se_cmd *);
+
+ int (*parse_cdb)(struct se_cmd *cmd);
ssize_t (*check_configfs_dev_params)(struct se_hba *,
struct se_subsystem_dev *);
ssize_t (*set_configfs_dev_params)(struct se_hba *,
@@ -40,6 +38,13 @@ struct se_subsystem_api {
unsigned char *(*get_sense_buffer)(struct se_cmd *);
};
+struct spc_ops {
+ int (*execute_rw)(struct se_cmd *cmd);
+ int (*execute_sync_cache)(struct se_cmd *cmd);
+ int (*execute_write_same)(struct se_cmd *cmd);
+ int (*execute_unmap)(struct se_cmd *cmd);
+};
+
int transport_subsystem_register(struct se_subsystem_api *);
void transport_subsystem_release(struct se_subsystem_api *);
@@ -49,6 +54,10 @@ struct se_device *transport_add_device_to_core_hba(struct se_hba *,
void target_complete_cmd(struct se_cmd *, u8);
+int sbc_parse_cdb(struct se_cmd *cmd, struct spc_ops *ops);
+int spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
+int spc_get_write_same_sectors(struct se_cmd *cmd);
+
void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index dc35d8660aa6..128ce46fa48a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -145,12 +145,9 @@ enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
TRANSPORT_WRITE_PENDING = 3,
- TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
TRANSPORT_COMPLETE = 6,
- TRANSPORT_PROCESS_TMR = 9,
TRANSPORT_ISTATE_PROCESSING = 11,
- TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_COMPLETE_QF_WP = 18,
TRANSPORT_COMPLETE_QF_OK = 19,
};
@@ -160,25 +157,20 @@ enum se_cmd_flags_table {
SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
SCF_TRANSPORT_TASK_SENSE = 0x00000002,
SCF_EMULATED_TASK_SENSE = 0x00000004,
- SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
- SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
- SCF_SCSI_NON_DATA_CDB = 0x00000020,
- SCF_SCSI_TMR_CDB = 0x00000040,
- SCF_SCSI_CDB_EXCEPTION = 0x00000080,
- SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_FUA = 0x00000200,
- SCF_SE_LUN_CMD = 0x00000800,
- SCF_SE_ALLOW_EOO = 0x00001000,
- SCF_BIDI = 0x00002000,
- SCF_SENT_CHECK_CONDITION = 0x00004000,
- SCF_OVERFLOW_BIT = 0x00008000,
- SCF_UNDERFLOW_BIT = 0x00010000,
- SCF_SENT_DELAYED_TAS = 0x00020000,
- SCF_ALUA_NON_OPTIMIZED = 0x00040000,
- SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
- SCF_UNUSED = 0x00100000,
- SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00200000,
- SCF_ACK_KREF = 0x00400000,
+ SCF_SCSI_DATA_CDB = 0x00000008,
+ SCF_SCSI_TMR_CDB = 0x00000010,
+ SCF_SCSI_CDB_EXCEPTION = 0x00000020,
+ SCF_SCSI_RESERVATION_CONFLICT = 0x00000040,
+ SCF_FUA = 0x00000080,
+ SCF_SE_LUN_CMD = 0x00000100,
+ SCF_BIDI = 0x00000400,
+ SCF_SENT_CHECK_CONDITION = 0x00000800,
+ SCF_OVERFLOW_BIT = 0x00001000,
+ SCF_UNDERFLOW_BIT = 0x00002000,
+ SCF_SENT_DELAYED_TAS = 0x00004000,
+ SCF_ALUA_NON_OPTIMIZED = 0x00008000,
+ SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
+ SCF_ACK_KREF = 0x00040000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -220,6 +212,7 @@ enum tcm_sense_reason_table {
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
TCM_RESERVATION_CONFLICT = 0x10,
+ TCM_ADDRESS_OUT_OF_RANGE = 0x11,
};
enum target_sc_flags_table {
@@ -471,13 +464,6 @@ struct t10_reservation {
struct t10_reservation_ops pr_ops;
};
-struct se_queue_obj {
- atomic_t queue_cnt;
- spinlock_t cmd_queue_lock;
- struct list_head qobj_list;
- wait_queue_head_t thread_wq;
-};
-
struct se_tmr_req {
/* Task Management function to be performed */
u8 function;
@@ -486,11 +472,8 @@ struct se_tmr_req {
int call_transport;
/* Reference to ITT that Task Mgmt should be performed */
u32 ref_task_tag;
- /* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
- u64 ref_task_lun;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
- struct se_cmd *ref_cmd;
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
@@ -537,7 +520,6 @@ struct se_cmd {
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
- struct list_head se_queue_node;
struct list_head se_cmd_list;
struct completion cmd_wait_comp;
struct kref cmd_kref;
@@ -575,7 +557,6 @@ struct se_cmd {
struct scatterlist *t_bidi_data_sg;
unsigned int t_bidi_data_nents;
- struct list_head execute_list;
struct list_head state_list;
bool state_active;
@@ -633,7 +614,6 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
struct kref sess_kref;
};
@@ -780,13 +760,11 @@ struct se_device {
/* Active commands on this virtual SE device */
atomic_t simple_cmds;
atomic_t dev_ordered_id;
- atomic_t execute_tasks;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
struct se_obj dev_access_obj;
struct se_obj dev_export_obj;
- struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock;
@@ -802,11 +780,9 @@ struct se_device {
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- /* Pointer to descriptor for processing thread */
- struct task_struct *process_thread;
+ struct workqueue_struct *tmr_wq;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head execute_list;
struct list_head state_list;
struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index c78a23333c4f..69fb3cfd02d7 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -33,12 +33,6 @@ struct target_core_fabric_ops {
struct se_node_acl *);
u32 (*tpg_get_inst_index)(struct se_portal_group *);
/*
- * Optional function pointer for TCM to perform command map
- * from TCM processing thread context, for those struct se_cmd
- * initially allocated in interrupt context.
- */
- int (*new_cmd_map)(struct se_cmd *);
- /*
* Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop().
*
@@ -108,20 +102,18 @@ void transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
struct se_session *, u32, int, int, unsigned char *);
int transport_lookup_cmd_lun(struct se_cmd *, u32);
int target_setup_cmd_from_cdb(struct se_cmd *, unsigned char *);
-void target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+int target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
unsigned char *, u32, u32, int, int, int);
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
unsigned char *sense, u32 unpacked_lun,
void *fabric_tmr_ptr, unsigned char tm_type,
gfp_t, unsigned int, int);
int transport_handle_cdb_direct(struct se_cmd *);
-int transport_generic_handle_cdb_map(struct se_cmd *);
-int transport_generic_handle_data(struct se_cmd *);
int transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
struct scatterlist *, u32, struct scatterlist *, u32);
int transport_generic_new_cmd(struct se_cmd *);
-void transport_generic_process_write(struct se_cmd *);
+void target_execute_cmd(struct se_cmd *cmd);
void transport_generic_free_cmd(struct se_cmd *, int);
@@ -129,9 +121,8 @@ bool transport_wait_for_tasks(struct se_cmd *);
int transport_check_aborted_status(struct se_cmd *, int);
int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
-void target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
int target_put_sess_cmd(struct se_session *, struct se_cmd *);
-void target_splice_sess_cmd_list(struct se_session *);
+void target_sess_cmd_list_set_waiting(struct se_session *);
void target_wait_for_sess_cmds(struct se_session *, int);
int core_alua_check_nonop_delay(struct se_cmd *);
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 46e3cd8e197a..7ef9e759f499 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -13,7 +13,8 @@
ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
- ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
+ ERSN(S390_UCONTROL)
TRACE_EVENT(kvm_userspace_exit,
TP_PROTO(__u32 reason, int errno),
@@ -36,7 +37,7 @@ TRACE_EVENT(kvm_userspace_exit,
__entry->errno < 0 ? -__entry->errno : __entry->reason)
);
-#if defined(__KVM_HAVE_IOAPIC)
+#if defined(__KVM_HAVE_IRQ_LINE)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
TP_ARGS(gsi, level, irq_source_id),
@@ -56,7 +57,9 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
+#endif
+#if defined(__KVM_HAVE_IOAPIC)
#define kvm_deliver_mode \
{0x0, "Fixed"}, \
{0x1, "LowPrio"}, \
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index d274734b2aa4..5bde94d8585b 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -541,6 +541,50 @@ TRACE_EVENT(rcu_torture_read,
__entry->rcutorturename, __entry->rhp)
);
+/*
+ * Tracepoint for _rcu_barrier() execution. The string "s" describes
+ * the _rcu_barrier phase:
+ * "Begin": rcu_barrier_callback() started.
+ * "Check": rcu_barrier_callback() checking for piggybacking.
+ * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit.
+ * "Inc1": rcu_barrier_callback() piggyback check counter incremented.
+ * "Offline": rcu_barrier_callback() found offline CPU
+ * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks.
+ * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks.
+ * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
+ * "CB": An rcu_barrier_callback() invoked a callback, not the last.
+ * "LastCB": An rcu_barrier_callback() invoked the last callback.
+ * "Inc2": rcu_barrier_callback() piggyback check counter incremented.
+ * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
+ * is the count of remaining callbacks, and "done" is the piggybacking count.
+ */
+TRACE_EVENT(rcu_barrier,
+
+ TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+
+ TP_ARGS(rcuname, s, cpu, cnt, done),
+
+ TP_STRUCT__entry(
+ __field(char *, rcuname)
+ __field(char *, s)
+ __field(int, cpu)
+ __field(int, cnt)
+ __field(unsigned long, done)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->s = s;
+ __entry->cpu = cpu;
+ __entry->cnt = cnt;
+ __entry->done = done;
+ ),
+
+ TP_printk("%s %s cpu %d remaining %d # %lu",
+ __entry->rcuname, __entry->s, __entry->cpu, __entry->cnt,
+ __entry->done)
+);
+
#else /* #ifdef CONFIG_RCU_TRACE */
#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
@@ -564,6 +608,7 @@ TRACE_EVENT(rcu_torture_read,
#define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \
do { } while (0)
#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0)
+#define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0)
#endif /* #else #ifdef CONFIG_RCU_TRACE */
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index 4018f5058f27..f28d1b65f178 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -54,7 +54,7 @@ TRACE_EVENT(workqueue_queue_work,
__entry->function = work->func;
__entry->workqueue = cwq->wq;
__entry->req_cpu = req_cpu;
- __entry->cpu = cwq->gcwq->cpu;
+ __entry->cpu = cwq->pool->gcwq->cpu;
),
TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 769724944fc6..c6bc2faaf261 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -571,6 +571,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_flags
#undef __print_symbolic
+#undef __print_hex
#undef __get_dynamic_array
#undef __get_str
diff --git a/include/xen/events.h b/include/xen/events.h
index 04399b28e821..9c641deb65d2 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -58,6 +58,8 @@ void notify_remote_via_irq(int irq);
void xen_irq_resume(void);
+void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn);
+
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq);
void xen_set_irq_pending(int irq);
diff --git a/include/xen/interface/io/xs_wire.h b/include/xen/interface/io/xs_wire.h
index 7cdfca24eafb..794deb07eb53 100644
--- a/include/xen/interface/io/xs_wire.h
+++ b/include/xen/interface/io/xs_wire.h
@@ -29,7 +29,8 @@ enum xsd_sockmsg_type
XS_IS_DOMAIN_INTRODUCED,
XS_RESUME,
XS_SET_TARGET,
- XS_RESTRICT
+ XS_RESTRICT,
+ XS_RESET_WATCHES,
};
#define XS_WRITE_NONE "NONE"
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index 486653f0dd8f..61fa66160983 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -314,6 +314,13 @@ struct xenpf_pcpuinfo {
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_pcpuinfo);
+#define XENPF_cpu_online 56
+#define XENPF_cpu_offline 57
+struct xenpf_cpu_ol {
+ uint32_t cpuid;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol);
+
struct xen_platform_op {
uint32_t cmd;
uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
@@ -330,6 +337,7 @@ struct xen_platform_op {
struct xenpf_getidletime getidletime;
struct xenpf_set_processor_pminfo set_pminfo;
struct xenpf_pcpuinfo pcpu_info;
+ struct xenpf_cpu_ol cpu_ol;
uint8_t pad[128];
} u;
};
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
new file mode 100644
index 000000000000..73a4ea714d93
--- /dev/null
+++ b/include/xen/interface/xen-mca.h
@@ -0,0 +1,385 @@
+/******************************************************************************
+ * arch-x86/mca.h
+ * Guest OS machine check interface to x86 Xen.
+ *
+ * Contributed by Advanced Micro Devices, Inc.
+ * Author: Christoph Egger <Christoph.Egger@amd.com>
+ *
+ * Updated by Intel Corporation
+ * Author: Liu, Jinsong <jinsong.liu@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
+#define __XEN_PUBLIC_ARCH_X86_MCA_H__
+
+/* Hypercall */
+#define __HYPERVISOR_mca __HYPERVISOR_arch_0
+
+#define XEN_MCA_INTERFACE_VERSION 0x01ecc003
+
+/* IN: Dom0 calls hypercall to retrieve nonurgent error log entry */
+#define XEN_MC_NONURGENT 0x1
+/* IN: Dom0 calls hypercall to retrieve urgent error log entry */
+#define XEN_MC_URGENT 0x2
+/* IN: Dom0 acknowledges previosly-fetched error log entry */
+#define XEN_MC_ACK 0x4
+
+/* OUT: All is ok */
+#define XEN_MC_OK 0x0
+/* OUT: Domain could not fetch data. */
+#define XEN_MC_FETCHFAILED 0x1
+/* OUT: There was no machine check data to fetch. */
+#define XEN_MC_NODATA 0x2
+
+#ifndef __ASSEMBLY__
+/* vIRQ injected to Dom0 */
+#define VIRQ_MCA VIRQ_ARCH_0
+
+/*
+ * mc_info entry types
+ * mca machine check info are recorded in mc_info entries.
+ * when fetch mca info, it can use MC_TYPE_... to distinguish
+ * different mca info.
+ */
+#define MC_TYPE_GLOBAL 0
+#define MC_TYPE_BANK 1
+#define MC_TYPE_EXTENDED 2
+#define MC_TYPE_RECOVERY 3
+
+struct mcinfo_common {
+ uint16_t type; /* structure type */
+ uint16_t size; /* size of this struct in bytes */
+};
+
+#define MC_FLAG_CORRECTABLE (1 << 0)
+#define MC_FLAG_UNCORRECTABLE (1 << 1)
+#define MC_FLAG_RECOVERABLE (1 << 2)
+#define MC_FLAG_POLLED (1 << 3)
+#define MC_FLAG_RESET (1 << 4)
+#define MC_FLAG_CMCI (1 << 5)
+#define MC_FLAG_MCE (1 << 6)
+
+/* contains x86 global mc information */
+struct mcinfo_global {
+ struct mcinfo_common common;
+
+ uint16_t mc_domid; /* running domain at the time in error */
+ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
+ uint32_t mc_socketid; /* physical socket of the physical core */
+ uint16_t mc_coreid; /* physical impacted core */
+ uint16_t mc_core_threadid; /* core thread of physical core */
+ uint32_t mc_apicid;
+ uint32_t mc_flags;
+ uint64_t mc_gstatus; /* global status */
+};
+
+/* contains x86 bank mc information */
+struct mcinfo_bank {
+ struct mcinfo_common common;
+
+ uint16_t mc_bank; /* bank nr */
+ uint16_t mc_domid; /* domain referenced by mc_addr if valid */
+ uint64_t mc_status; /* bank status */
+ uint64_t mc_addr; /* bank address */
+ uint64_t mc_misc;
+ uint64_t mc_ctrl2;
+ uint64_t mc_tsc;
+};
+
+struct mcinfo_msr {
+ uint64_t reg; /* MSR */
+ uint64_t value; /* MSR value */
+};
+
+/* contains mc information from other or additional mc MSRs */
+struct mcinfo_extended {
+ struct mcinfo_common common;
+ uint32_t mc_msrs; /* Number of msr with valid values. */
+ /*
+ * Currently Intel extended MSR (32/64) include all gp registers
+ * and E(R)FLAGS, E(R)IP, E(R)MISC, up to 11/19 of them might be
+ * useful at present. So expand this array to 16/32 to leave room.
+ */
+ struct mcinfo_msr mc_msr[sizeof(void *) * 4];
+};
+
+/* Recovery Action flags. Giving recovery result information to DOM0 */
+
+/* Xen takes successful recovery action, the error is recovered */
+#define REC_ACTION_RECOVERED (0x1 << 0)
+/* No action is performed by XEN */
+#define REC_ACTION_NONE (0x1 << 1)
+/* It's possible DOM0 might take action ownership in some case */
+#define REC_ACTION_NEED_RESET (0x1 << 2)
+
+/*
+ * Different Recovery Action types, if the action is performed successfully,
+ * REC_ACTION_RECOVERED flag will be returned.
+ */
+
+/* Page Offline Action */
+#define MC_ACTION_PAGE_OFFLINE (0x1 << 0)
+/* CPU offline Action */
+#define MC_ACTION_CPU_OFFLINE (0x1 << 1)
+/* L3 cache disable Action */
+#define MC_ACTION_CACHE_SHRINK (0x1 << 2)
+
+/*
+ * Below interface used between XEN/DOM0 for passing XEN's recovery action
+ * information to DOM0.
+ */
+struct page_offline_action {
+ /* Params for passing the offlined page number to DOM0 */
+ uint64_t mfn;
+ uint64_t status;
+};
+
+struct cpu_offline_action {
+ /* Params for passing the identity of the offlined CPU to DOM0 */
+ uint32_t mc_socketid;
+ uint16_t mc_coreid;
+ uint16_t mc_core_threadid;
+};
+
+#define MAX_UNION_SIZE 16
+struct mcinfo_recovery {
+ struct mcinfo_common common;
+ uint16_t mc_bank; /* bank nr */
+ uint8_t action_flags;
+ uint8_t action_types;
+ union {
+ struct page_offline_action page_retire;
+ struct cpu_offline_action cpu_offline;
+ uint8_t pad[MAX_UNION_SIZE];
+ } action_info;
+};
+
+
+#define MCINFO_MAXSIZE 768
+struct mc_info {
+ /* Number of mcinfo_* entries in mi_data */
+ uint32_t mi_nentries;
+ uint32_t flags;
+ uint64_t mi_data[(MCINFO_MAXSIZE - 1) / 8];
+};
+DEFINE_GUEST_HANDLE_STRUCT(mc_info);
+
+#define __MC_MSR_ARRAYSIZE 8
+#define __MC_MSR_MCGCAP 0
+#define __MC_NMSRS 1
+#define MC_NCAPS 7
+struct mcinfo_logical_cpu {
+ uint32_t mc_cpunr;
+ uint32_t mc_chipid;
+ uint16_t mc_coreid;
+ uint16_t mc_threadid;
+ uint32_t mc_apicid;
+ uint32_t mc_clusterid;
+ uint32_t mc_ncores;
+ uint32_t mc_ncores_active;
+ uint32_t mc_nthreads;
+ uint32_t mc_cpuid_level;
+ uint32_t mc_family;
+ uint32_t mc_vendor;
+ uint32_t mc_model;
+ uint32_t mc_step;
+ char mc_vendorid[16];
+ char mc_brandid[64];
+ uint32_t mc_cpu_caps[MC_NCAPS];
+ uint32_t mc_cache_size;
+ uint32_t mc_cache_alignment;
+ uint32_t mc_nmsrvals;
+ struct mcinfo_msr mc_msrvalues[__MC_MSR_ARRAYSIZE];
+};
+DEFINE_GUEST_HANDLE_STRUCT(mcinfo_logical_cpu);
+
+/*
+ * Prototype:
+ * uint32_t x86_mcinfo_nentries(struct mc_info *mi);
+ */
+#define x86_mcinfo_nentries(_mi) \
+ ((_mi)->mi_nentries)
+/*
+ * Prototype:
+ * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
+ */
+#define x86_mcinfo_first(_mi) \
+ ((struct mcinfo_common *)(_mi)->mi_data)
+/*
+ * Prototype:
+ * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
+ */
+#define x86_mcinfo_next(_mic) \
+ ((struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size))
+
+/*
+ * Prototype:
+ * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
+ */
+static inline void x86_mcinfo_lookup(struct mcinfo_common **ret,
+ struct mc_info *mi, uint16_t type)
+{
+ uint32_t i;
+ struct mcinfo_common *mic;
+ bool found = 0;
+
+ if (!ret || !mi)
+ return;
+
+ mic = x86_mcinfo_first(mi);
+ for (i = 0; i < x86_mcinfo_nentries(mi); i++) {
+ if (mic->type == type) {
+ found = 1;
+ break;
+ }
+ mic = x86_mcinfo_next(mic);
+ }
+
+ *ret = found ? mic : NULL;
+}
+
+/*
+ * Fetch machine check data from hypervisor.
+ */
+#define XEN_MC_fetch 1
+struct xen_mc_fetch {
+ /*
+ * IN: XEN_MC_NONURGENT, XEN_MC_URGENT,
+ * XEN_MC_ACK if ack'king an earlier fetch
+ * OUT: XEN_MC_OK, XEN_MC_FETCHAILED, XEN_MC_NODATA
+ */
+ uint32_t flags;
+ uint32_t _pad0;
+ /* OUT: id for ack, IN: id we are ack'ing */
+ uint64_t fetch_id;
+
+ /* OUT variables. */
+ GUEST_HANDLE(mc_info) data;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_mc_fetch);
+
+
+/*
+ * This tells the hypervisor to notify a DomU about the machine check error
+ */
+#define XEN_MC_notifydomain 2
+struct xen_mc_notifydomain {
+ /* IN variables */
+ uint16_t mc_domid; /* The unprivileged domain to notify */
+ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify */
+
+ /* IN/OUT variables */
+ uint32_t flags;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_mc_notifydomain);
+
+#define XEN_MC_physcpuinfo 3
+struct xen_mc_physcpuinfo {
+ /* IN/OUT */
+ uint32_t ncpus;
+ uint32_t _pad0;
+ /* OUT */
+ GUEST_HANDLE(mcinfo_logical_cpu) info;
+};
+
+#define XEN_MC_msrinject 4
+#define MC_MSRINJ_MAXMSRS 8
+struct xen_mc_msrinject {
+ /* IN */
+ uint32_t mcinj_cpunr; /* target processor id */
+ uint32_t mcinj_flags; /* see MC_MSRINJ_F_* below */
+ uint32_t mcinj_count; /* 0 .. count-1 in array are valid */
+ uint32_t _pad0;
+ struct mcinfo_msr mcinj_msr[MC_MSRINJ_MAXMSRS];
+};
+
+/* Flags for mcinj_flags above; bits 16-31 are reserved */
+#define MC_MSRINJ_F_INTERPOSE 0x1
+
+#define XEN_MC_mceinject 5
+struct xen_mc_mceinject {
+ unsigned int mceinj_cpunr; /* target processor id */
+};
+
+struct xen_mc {
+ uint32_t cmd;
+ uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
+ union {
+ struct xen_mc_fetch mc_fetch;
+ struct xen_mc_notifydomain mc_notifydomain;
+ struct xen_mc_physcpuinfo mc_physcpuinfo;
+ struct xen_mc_msrinject mc_msrinject;
+ struct xen_mc_mceinject mc_mceinject;
+ } u;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xen_mc);
+
+/* Fields are zero when not available */
+struct xen_mce {
+ __u64 status;
+ __u64 misc;
+ __u64 addr;
+ __u64 mcgstatus;
+ __u64 ip;
+ __u64 tsc; /* cpu time stamp counter */
+ __u64 time; /* wall time_t when error was detected */
+ __u8 cpuvendor; /* cpu vendor as encoded in system.h */
+ __u8 inject_flags; /* software inject flags */
+ __u16 pad;
+ __u32 cpuid; /* CPUID 1 EAX */
+ __u8 cs; /* code segment */
+ __u8 bank; /* machine check bank */
+ __u8 cpu; /* cpu number; obsolete; use extcpu now */
+ __u8 finished; /* entry is valid */
+ __u32 extcpu; /* linux cpu number that detected the error */
+ __u32 socketid; /* CPU socket ID */
+ __u32 apicid; /* CPU initial apic ID */
+ __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
+};
+
+/*
+ * This structure contains all data related to the MCE log. Also
+ * carries a signature to make it easier to find from external
+ * debugging tools. Each entry is only valid when its finished flag
+ * is set.
+ */
+
+#define XEN_MCE_LOG_LEN 32
+
+struct xen_mce_log {
+ char signature[12]; /* "MACHINECHECK" */
+ unsigned len; /* = XEN_MCE_LOG_LEN */
+ unsigned next;
+ unsigned flags;
+ unsigned recordlen; /* length of struct xen_mce */
+ struct xen_mce entry[XEN_MCE_LOG_LEN];
+};
+
+#define XEN_MCE_OVERFLOW 0 /* bit 0 in flags means overflow */
+
+#define XEN_MCE_LOG_SIGNATURE "MACHINECHECK"
+
+#define MCE_GET_RECORD_LEN _IOR('M', 1, int)
+#define MCE_GET_LOG_LEN _IOR('M', 2, int)
+#define MCE_GETCLEAR_FLAGS _IOR('M', 3, int)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index a890804945e3..0801468f9abe 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -80,6 +80,7 @@
#define VIRQ_CONSOLE 2 /* (DOM0) Bytes received on emergency console. */
#define VIRQ_DOM_EXC 3 /* (DOM0) Exceptional event for some domain. */
#define VIRQ_DEBUGGER 6 /* (DOM0) A domain has paused for debugging. */
+#define VIRQ_PCPU_STATE 9 /* (DOM0) PCPU state changed */
/* Architecture-specific VIRQ definitions. */
#define VIRQ_ARCH_0 16
diff --git a/init/main.c b/init/main.c
index b5cc0a7c4708..3f151f6c6da7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -68,6 +68,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/file.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -804,8 +805,8 @@ static noinline int init_post(void)
system_state = SYSTEM_RUNNING;
numa_default_policy();
-
current->signal->flags |= SIGNAL_UNKILLABLE;
+ flush_delayed_fput();
if (ramdisk_execute_command) {
run_init_process(ramdisk_execute_command);
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 8ce57691e7b6..f8e54f5b9080 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -413,7 +413,7 @@ static void mqueue_evict_inode(struct inode *inode)
}
static int mqueue_create(struct inode *dir, struct dentry *dentry,
- umode_t mode, struct nameidata *nd)
+ umode_t mode, bool excl)
{
struct inode *inode;
struct mq_attr *attr = dentry->d_fsdata;
@@ -721,8 +721,8 @@ static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
/*
* Invoked when creating a new queue via sys_mq_open
*/
-static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
- struct dentry *dentry, int oflag, umode_t mode,
+static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
+ struct path *path, int oflag, umode_t mode,
struct mq_attr *attr)
{
const struct cred *cred = current_cred();
@@ -732,9 +732,9 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
if (attr) {
ret = mq_attr_ok(ipc_ns, attr);
if (ret)
- goto out;
+ return ERR_PTR(ret);
/* store for use during create */
- dentry->d_fsdata = attr;
+ path->dentry->d_fsdata = attr;
} else {
struct mq_attr def_attr;
@@ -744,71 +744,51 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
ipc_ns->mq_msgsize_default);
ret = mq_attr_ok(ipc_ns, &def_attr);
if (ret)
- goto out;
+ return ERR_PTR(ret);
}
mode &= ~current_umask();
- ret = mnt_want_write(ipc_ns->mq_mnt);
+ ret = mnt_want_write(path->mnt);
if (ret)
- goto out;
- ret = vfs_create(dir->d_inode, dentry, mode, NULL);
- dentry->d_fsdata = NULL;
- if (ret)
- goto out_drop_write;
-
- result = dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
+ return ERR_PTR(ret);
+ ret = vfs_create(dir, path->dentry, mode, true);
+ path->dentry->d_fsdata = NULL;
+ if (!ret)
+ result = dentry_open(path, oflag, cred);
+ else
+ result = ERR_PTR(ret);
/*
* dentry_open() took a persistent mnt_want_write(),
* so we can now drop this one.
*/
- mnt_drop_write(ipc_ns->mq_mnt);
+ mnt_drop_write(path->mnt);
return result;
-
-out_drop_write:
- mnt_drop_write(ipc_ns->mq_mnt);
-out:
- dput(dentry);
- mntput(ipc_ns->mq_mnt);
- return ERR_PTR(ret);
}
/* Opens existing queue */
-static struct file *do_open(struct ipc_namespace *ipc_ns,
- struct dentry *dentry, int oflag)
+static struct file *do_open(struct path *path, int oflag)
{
- int ret;
- const struct cred *cred = current_cred();
-
static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
MAY_READ | MAY_WRITE };
-
- if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) {
- ret = -EINVAL;
- goto err;
- }
-
- if (inode_permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE])) {
- ret = -EACCES;
- goto err;
- }
-
- return dentry_open(dentry, ipc_ns->mq_mnt, oflag, cred);
-
-err:
- dput(dentry);
- mntput(ipc_ns->mq_mnt);
- return ERR_PTR(ret);
+ int acc;
+ if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
+ return ERR_PTR(-EINVAL);
+ acc = oflag2acc[oflag & O_ACCMODE];
+ if (inode_permission(path->dentry->d_inode, acc))
+ return ERR_PTR(-EACCES);
+ return dentry_open(path, oflag, current_cred());
}
SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
struct mq_attr __user *, u_attr)
{
- struct dentry *dentry;
+ struct path path;
struct file *filp;
char *name;
struct mq_attr attr;
int fd, error;
struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
+ struct dentry *root = ipc_ns->mq_mnt->mnt_root;
if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
return -EFAULT;
@@ -822,52 +802,49 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
if (fd < 0)
goto out_putname;
- mutex_lock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
- dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name));
- if (IS_ERR(dentry)) {
- error = PTR_ERR(dentry);
+ error = 0;
+ mutex_lock(&root->d_inode->i_mutex);
+ path.dentry = lookup_one_len(name, root, strlen(name));
+ if (IS_ERR(path.dentry)) {
+ error = PTR_ERR(path.dentry);
goto out_putfd;
}
- mntget(ipc_ns->mq_mnt);
+ path.mnt = mntget(ipc_ns->mq_mnt);
if (oflag & O_CREAT) {
- if (dentry->d_inode) { /* entry already exists */
- audit_inode(name, dentry);
+ if (path.dentry->d_inode) { /* entry already exists */
+ audit_inode(name, path.dentry);
if (oflag & O_EXCL) {
error = -EEXIST;
goto out;
}
- filp = do_open(ipc_ns, dentry, oflag);
+ filp = do_open(&path, oflag);
} else {
- filp = do_create(ipc_ns, ipc_ns->mq_mnt->mnt_root,
- dentry, oflag, mode,
+ filp = do_create(ipc_ns, root->d_inode,
+ &path, oflag, mode,
u_attr ? &attr : NULL);
}
} else {
- if (!dentry->d_inode) {
+ if (!path.dentry->d_inode) {
error = -ENOENT;
goto out;
}
- audit_inode(name, dentry);
- filp = do_open(ipc_ns, dentry, oflag);
+ audit_inode(name, path.dentry);
+ filp = do_open(&path, oflag);
}
- if (IS_ERR(filp)) {
+ if (!IS_ERR(filp))
+ fd_install(fd, filp);
+ else
error = PTR_ERR(filp);
- goto out_putfd;
- }
-
- fd_install(fd, filp);
- goto out_upsem;
-
out:
- dput(dentry);
- mntput(ipc_ns->mq_mnt);
+ path_put(&path);
out_putfd:
- put_unused_fd(fd);
- fd = error;
-out_upsem:
- mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex);
+ if (error) {
+ put_unused_fd(fd);
+ fd = error;
+ }
+ mutex_unlock(&root->d_inode->i_mutex);
out_putname:
putname(name);
return fd;
diff --git a/kernel/async.c b/kernel/async.c
index bd0c168a3bbe..9d3118384858 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -62,8 +62,10 @@ static async_cookie_t next_cookie = 1;
#define MAX_WORK 32768
static LIST_HEAD(async_pending);
-static LIST_HEAD(async_running);
+static ASYNC_DOMAIN(async_running);
+static LIST_HEAD(async_domains);
static DEFINE_SPINLOCK(async_lock);
+static DEFINE_MUTEX(async_register_mutex);
struct async_entry {
struct list_head list;
@@ -71,7 +73,7 @@ struct async_entry {
async_cookie_t cookie;
async_func_ptr *func;
void *data;
- struct list_head *running;
+ struct async_domain *running;
};
static DECLARE_WAIT_QUEUE_HEAD(async_done);
@@ -82,13 +84,12 @@ static atomic_t entry_count;
/*
* MUST be called with the lock held!
*/
-static async_cookie_t __lowest_in_progress(struct list_head *running)
+static async_cookie_t __lowest_in_progress(struct async_domain *running)
{
struct async_entry *entry;
- if (!list_empty(running)) {
- entry = list_first_entry(running,
- struct async_entry, list);
+ if (!list_empty(&running->domain)) {
+ entry = list_first_entry(&running->domain, typeof(*entry), list);
return entry->cookie;
}
@@ -99,7 +100,7 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
return next_cookie; /* "infinity" value */
}
-static async_cookie_t lowest_in_progress(struct list_head *running)
+static async_cookie_t lowest_in_progress(struct async_domain *running)
{
unsigned long flags;
async_cookie_t ret;
@@ -119,10 +120,11 @@ static void async_run_entry_fn(struct work_struct *work)
container_of(work, struct async_entry, work);
unsigned long flags;
ktime_t uninitialized_var(calltime), delta, rettime;
+ struct async_domain *running = entry->running;
/* 1) move self to the running queue */
spin_lock_irqsave(&async_lock, flags);
- list_move_tail(&entry->list, entry->running);
+ list_move_tail(&entry->list, &running->domain);
spin_unlock_irqrestore(&async_lock, flags);
/* 2) run (and print duration) */
@@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work)
/* 3) remove self from the running queue */
spin_lock_irqsave(&async_lock, flags);
list_del(&entry->list);
+ if (running->registered && --running->count == 0)
+ list_del_init(&running->node);
/* 4) free the entry */
kfree(entry);
@@ -156,7 +160,7 @@ static void async_run_entry_fn(struct work_struct *work)
wake_up(&async_done);
}
-static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
+static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running)
{
struct async_entry *entry;
unsigned long flags;
@@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l
spin_lock_irqsave(&async_lock, flags);
newcookie = entry->cookie = next_cookie++;
list_add_tail(&entry->list, &async_pending);
+ if (running->registered && running->count++ == 0)
+ list_add_tail(&running->node, &async_domains);
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
@@ -223,7 +229,7 @@ EXPORT_SYMBOL_GPL(async_schedule);
* Note: This function may be called from atomic or non-atomic contexts.
*/
async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
- struct list_head *running)
+ struct async_domain *running)
{
return __async_schedule(ptr, data, running);
}
@@ -236,22 +242,52 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
*/
void async_synchronize_full(void)
{
+ mutex_lock(&async_register_mutex);
do {
- async_synchronize_cookie(next_cookie);
- } while (!list_empty(&async_running) || !list_empty(&async_pending));
+ struct async_domain *domain = NULL;
+
+ spin_lock_irq(&async_lock);
+ if (!list_empty(&async_domains))
+ domain = list_first_entry(&async_domains, typeof(*domain), node);
+ spin_unlock_irq(&async_lock);
+
+ async_synchronize_cookie_domain(next_cookie, domain);
+ } while (!list_empty(&async_domains));
+ mutex_unlock(&async_register_mutex);
}
EXPORT_SYMBOL_GPL(async_synchronize_full);
/**
+ * async_unregister_domain - ensure no more anonymous waiters on this domain
+ * @domain: idle domain to flush out of any async_synchronize_full instances
+ *
+ * async_synchronize_{cookie|full}_domain() are not flushed since callers
+ * of these routines should know the lifetime of @domain
+ *
+ * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing
+ */
+void async_unregister_domain(struct async_domain *domain)
+{
+ mutex_lock(&async_register_mutex);
+ spin_lock_irq(&async_lock);
+ WARN_ON(!domain->registered || !list_empty(&domain->node) ||
+ !list_empty(&domain->domain));
+ domain->registered = 0;
+ spin_unlock_irq(&async_lock);
+ mutex_unlock(&async_register_mutex);
+}
+EXPORT_SYMBOL_GPL(async_unregister_domain);
+
+/**
* async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
- * @list: running list to synchronize on
+ * @domain: running list to synchronize on
*
* This function waits until all asynchronous function calls for the
- * synchronization domain specified by the running list @list have been done.
+ * synchronization domain specified by the running list @domain have been done.
*/
-void async_synchronize_full_domain(struct list_head *list)
+void async_synchronize_full_domain(struct async_domain *domain)
{
- async_synchronize_cookie_domain(next_cookie, list);
+ async_synchronize_cookie_domain(next_cookie, domain);
}
EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
@@ -261,14 +297,16 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
* @running: running list to synchronize on
*
* This function waits until all asynchronous function calls for the
- * synchronization domain specified by the running list @list submitted
+ * synchronization domain specified by running list @running submitted
* prior to @cookie have been done.
*/
-void async_synchronize_cookie_domain(async_cookie_t cookie,
- struct list_head *running)
+void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running)
{
ktime_t uninitialized_var(starttime), delta, endtime;
+ if (!running)
+ return;
+
if (initcall_debug && system_state == SYSTEM_BOOTING) {
printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current));
starttime = ktime_get();
diff --git a/kernel/audit.c b/kernel/audit.c
index 1c7f2c61416b..4a3f28d2ca65 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -384,7 +384,7 @@ static void audit_hold_skb(struct sk_buff *skb)
static void audit_printk_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- char *data = NLMSG_DATA(nlh);
+ char *data = nlmsg_data(nlh);
if (nlh->nlmsg_type != AUDIT_EOE) {
if (printk_ratelimit())
@@ -516,14 +516,15 @@ struct sk_buff *audit_make_reply(int pid, int seq, int type, int done,
if (!skb)
return NULL;
- nlh = NLMSG_NEW(skb, pid, seq, t, size, flags);
- data = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, t, size, flags);
+ if (!nlh)
+ goto out_kfree_skb;
+ data = nlmsg_data(nlh);
memcpy(data, payload, size);
return skb;
-nlmsg_failure: /* Used by NLMSG_NEW */
- if (skb)
- kfree_skb(skb);
+out_kfree_skb:
+ kfree_skb(skb);
return NULL;
}
@@ -680,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
sessionid = audit_get_sessionid(current);
security_task_getsecid(current, &sid);
seq = nlh->nlmsg_seq;
- data = NLMSG_DATA(nlh);
+ data = nlmsg_data(nlh);
switch (msg_type) {
case AUDIT_GET:
@@ -961,14 +962,17 @@ static void audit_receive(struct sk_buff *skb)
static int __init audit_init(void)
{
int i;
+ struct netlink_kernel_cfg cfg = {
+ .input = audit_receive,
+ };
if (audit_initialized == AUDIT_DISABLED)
return 0;
printk(KERN_INFO "audit: initializing netlink socket (%s)\n",
audit_default ? "enabled" : "disabled");
- audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0,
- audit_receive, NULL, THIS_MODULE);
+ audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT,
+ THIS_MODULE, &cfg);
if (!audit_sock)
audit_panic("cannot initialize netlink socket");
else
@@ -1060,13 +1064,15 @@ static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
if (!ab->skb)
- goto nlmsg_failure;
+ goto err;
- nlh = NLMSG_NEW(ab->skb, 0, 0, type, 0, 0);
+ nlh = nlmsg_put(ab->skb, 0, 0, type, 0, 0);
+ if (!nlh)
+ goto out_kfree_skb;
return ab;
-nlmsg_failure: /* Used by NLMSG_NEW */
+out_kfree_skb:
kfree_skb(ab->skb);
ab->skb = NULL;
err:
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 5bf0790497e7..3a5ca582ba1e 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -595,7 +595,7 @@ void audit_trim_trees(void)
root_mnt = collect_mounts(&path);
path_put(&path);
- if (!root_mnt)
+ if (IS_ERR(root_mnt))
goto skip_it;
spin_lock(&hash_lock);
@@ -669,8 +669,8 @@ int audit_add_tree_rule(struct audit_krule *rule)
goto Err;
mnt = collect_mounts(&path);
path_put(&path);
- if (!mnt) {
- err = -ENOMEM;
+ if (IS_ERR(mnt)) {
+ err = PTR_ERR(mnt);
goto Err;
}
@@ -719,8 +719,8 @@ int audit_tag_tree(char *old, char *new)
return err;
tagged = collect_mounts(&path2);
path_put(&path2);
- if (!tagged)
- return -ENOMEM;
+ if (IS_ERR(tagged))
+ return PTR_ERR(tagged);
err = kern_path(old, 0, &path1);
if (err) {
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index e683869365d9..3823281401b5 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -355,34 +355,15 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
/* Get path information necessary for adding watches. */
static int audit_get_nd(struct audit_watch *watch, struct path *parent)
{
- struct nameidata nd;
- struct dentry *d;
- int err;
-
- err = kern_path_parent(watch->path, &nd);
- if (err)
- return err;
-
- if (nd.last_type != LAST_NORM) {
- path_put(&nd.path);
- return -EINVAL;
- }
-
- mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
- d = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
- if (IS_ERR(d)) {
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
- path_put(&nd.path);
+ struct dentry *d = kern_path_locked(watch->path, parent);
+ if (IS_ERR(d))
return PTR_ERR(d);
- }
+ mutex_unlock(&parent->dentry->d_inode->i_mutex);
if (d->d_inode) {
/* update watch filter fields */
watch->dev = d->d_inode->i_sb->s_dev;
watch->ino = d->d_inode->i_ino;
}
- mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
-
- *parent = nd.path;
dput(d);
return 0;
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 2097684cf194..79818507e444 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -822,7 +822,7 @@ EXPORT_SYMBOL_GPL(cgroup_unlock);
*/
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
-static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
+static struct dentry *cgroup_lookup(struct inode *, struct dentry *, unsigned int);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
static int cgroup_populate_dir(struct cgroup *cgrp);
static const struct inode_operations cgroup_dir_inode_operations;
@@ -901,13 +901,10 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
mutex_unlock(&cgroup_mutex);
/*
- * We want to drop the active superblock reference from the
- * cgroup creation after all the dentry refs are gone -
- * kill_sb gets mighty unhappy otherwise. Mark
- * dentry->d_fsdata with cgroup_diput() to tell
- * cgroup_d_release() to call deactivate_super().
+ * Drop the active superblock reference that we took when we
+ * created the cgroup
*/
- dentry->d_fsdata = cgroup_diput;
+ deactivate_super(cgrp->root->sb);
/*
* if we're getting rid of the cgroup, refcount should ensure
@@ -933,13 +930,6 @@ static int cgroup_delete(const struct dentry *d)
return 1;
}
-static void cgroup_d_release(struct dentry *dentry)
-{
- /* did cgroup_diput() tell me to deactivate super? */
- if (dentry->d_fsdata == cgroup_diput)
- deactivate_super(dentry->d_sb);
-}
-
static void remove_dir(struct dentry *d)
{
struct dentry *parent = dget(d->d_parent);
@@ -964,7 +954,7 @@ static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
dget(d);
d_delete(d);
- simple_unlink(d->d_inode, d);
+ simple_unlink(cgrp->dentry->d_inode, d);
list_del_init(&cfe->node);
dput(d);
@@ -1078,28 +1068,24 @@ static int rebind_subsystems(struct cgroupfs_root *root,
BUG_ON(cgrp->subsys[i]);
BUG_ON(!dummytop->subsys[i]);
BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
- mutex_lock(&ss->hierarchy_mutex);
cgrp->subsys[i] = dummytop->subsys[i];
cgrp->subsys[i]->cgroup = cgrp;
list_move(&ss->sibling, &root->subsys_list);
ss->root = root;
if (ss->bind)
ss->bind(cgrp);
- mutex_unlock(&ss->hierarchy_mutex);
/* refcount was already taken, and we're keeping it */
} else if (bit & removed_bits) {
/* We're removing this subsystem */
BUG_ON(ss == NULL);
BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
- mutex_lock(&ss->hierarchy_mutex);
if (ss->bind)
ss->bind(dummytop);
dummytop->subsys[i]->cgroup = dummytop;
cgrp->subsys[i] = NULL;
subsys[i]->root = &rootnode;
list_move(&ss->sibling, &rootnode.subsys_list);
- mutex_unlock(&ss->hierarchy_mutex);
/* subsystem is now free - drop reference on module */
module_put(ss->module);
} else if (bit & final_bits) {
@@ -1547,7 +1533,6 @@ static int cgroup_get_rootdir(struct super_block *sb)
static const struct dentry_operations cgroup_dops = {
.d_iput = cgroup_diput,
.d_delete = cgroup_delete,
- .d_release = cgroup_d_release,
};
struct inode *inode =
@@ -1598,7 +1583,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
opts.new_root = new_root;
/* Locate an existing or new sb for this hierarchy */
- sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
+ sb = sget(fs_type, cgroup_test_super, cgroup_set_super, 0, &opts);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
cgroup_drop_root(opts.new_root);
@@ -2581,7 +2566,7 @@ static const struct inode_operations cgroup_dir_inode_operations = {
.rename = cgroup_rename,
};
-static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
+static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
{
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@@ -3894,8 +3879,12 @@ static void css_dput_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
container_of(work, struct cgroup_subsys_state, dput_work);
+ struct dentry *dentry = css->cgroup->dentry;
+ struct super_block *sb = dentry->d_sb;
- dput(css->cgroup->dentry);
+ atomic_inc(&sb->s_active);
+ dput(dentry);
+ deactivate_super(sb);
}
static void init_cgroup_css(struct cgroup_subsys_state *css,
@@ -3922,37 +3911,6 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
set_bit(CSS_CLEAR_CSS_REFS, &css->flags);
}
-static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
-{
- /* We need to take each hierarchy_mutex in a consistent order */
- int i;
-
- /*
- * No worry about a race with rebind_subsystems that might mess up the
- * locking order, since both parties are under cgroup_mutex.
- */
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- if (ss->root == root)
- mutex_lock(&ss->hierarchy_mutex);
- }
-}
-
-static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
-{
- int i;
-
- for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
- struct cgroup_subsys *ss = subsys[i];
- if (ss == NULL)
- continue;
- if (ss->root == root)
- mutex_unlock(&ss->hierarchy_mutex);
- }
-}
-
/*
* cgroup_create - create a cgroup
* @parent: cgroup that will be parent of the new cgroup
@@ -4013,9 +3971,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
ss->post_clone(cgrp);
}
- cgroup_lock_hierarchy(root);
list_add(&cgrp->sibling, &cgrp->parent->children);
- cgroup_unlock_hierarchy(root);
root->number_of_cgroups++;
err = cgroup_create_dir(cgrp, dentry, mode);
@@ -4042,9 +3998,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
err_remove:
- cgroup_lock_hierarchy(root);
list_del(&cgrp->sibling);
- cgroup_unlock_hierarchy(root);
root->number_of_cgroups--;
err_destroy:
@@ -4252,10 +4206,8 @@ again:
list_del_init(&cgrp->release_list);
raw_spin_unlock(&release_list_lock);
- cgroup_lock_hierarchy(cgrp->root);
/* delete this cgroup from parent->children */
list_del_init(&cgrp->sibling);
- cgroup_unlock_hierarchy(cgrp->root);
list_del_init(&cgrp->allcg_node);
@@ -4329,8 +4281,6 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));
- mutex_init(&ss->hierarchy_mutex);
- lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ss->active = 1;
/* this function shouldn't be used with modular subsystems, since they
@@ -4457,8 +4407,6 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
}
write_unlock(&css_set_lock);
- mutex_init(&ss->hierarchy_mutex);
- lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ss->active = 1;
/* success! */
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 67b847dfa2bb..1f91413edb87 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -14,6 +14,7 @@
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/kmsg_dump.h>
#include <linux/reboot.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
@@ -2040,8 +2041,15 @@ static int kdb_env(int argc, const char **argv)
*/
static int kdb_dmesg(int argc, const char **argv)
{
- char *syslog_data[4], *start, *end, c = '\0', *p;
- int diag, logging, logsize, lines = 0, adjust = 0, n;
+ int diag;
+ int logging;
+ int lines = 0;
+ int adjust = 0;
+ int n = 0;
+ int skip = 0;
+ struct kmsg_dumper dumper = { .active = 1 };
+ size_t len;
+ char buf[201];
if (argc > 2)
return KDB_ARGCOUNT;
@@ -2064,22 +2072,10 @@ static int kdb_dmesg(int argc, const char **argv)
kdb_set(2, setargs);
}
- /* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
- * logical start, end+1. */
- kdb_syslog_data(syslog_data);
- if (syslog_data[2] == syslog_data[3])
- return 0;
- logsize = syslog_data[1] - syslog_data[0];
- start = syslog_data[2];
- end = syslog_data[3];
-#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
- for (n = 0, p = start; p < end; ++p) {
- c = *KDB_WRAP(p);
- if (c == '\n')
- ++n;
- }
- if (c != '\n')
- ++n;
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, 1, NULL, 0, NULL))
+ n++;
+
if (lines < 0) {
if (adjust >= n)
kdb_printf("buffer only contains %d lines, nothing "
@@ -2087,21 +2083,11 @@ static int kdb_dmesg(int argc, const char **argv)
else if (adjust - lines >= n)
kdb_printf("buffer only contains %d lines, last %d "
"lines printed\n", n, n - adjust);
- if (adjust) {
- for (; start < end && adjust; ++start) {
- if (*KDB_WRAP(start) == '\n')
- --adjust;
- }
- if (start < end)
- ++start;
- }
- for (p = start; p < end && lines; ++p) {
- if (*KDB_WRAP(p) == '\n')
- ++lines;
- }
- end = p;
+ skip = adjust;
+ lines = abs(lines);
} else if (lines > 0) {
- int skip = n - (adjust + lines);
+ skip = n - lines - adjust;
+ lines = abs(lines);
if (adjust >= n) {
kdb_printf("buffer only contains %d lines, "
"nothing printed\n", n);
@@ -2112,35 +2098,24 @@ static int kdb_dmesg(int argc, const char **argv)
kdb_printf("buffer only contains %d lines, first "
"%d lines printed\n", n, lines);
}
- for (; start < end && skip; ++start) {
- if (*KDB_WRAP(start) == '\n')
- --skip;
- }
- for (p = start; p < end && lines; ++p) {
- if (*KDB_WRAP(p) == '\n')
- --lines;
- }
- end = p;
+ } else {
+ lines = n;
}
- /* Do a line at a time (max 200 chars) to reduce protocol overhead */
- c = '\n';
- while (start != end) {
- char buf[201];
- p = buf;
- if (KDB_FLAG(CMD_INTERRUPT))
- return 0;
- while (start < end && (c = *KDB_WRAP(start)) &&
- (p - buf) < sizeof(buf)-1) {
- ++start;
- *p++ = c;
- if (c == '\n')
- break;
+
+ if (skip >= n || skip < 0)
+ return 0;
+
+ kmsg_dump_rewind_nolock(&dumper);
+ while (kmsg_dump_get_line_nolock(&dumper, 1, buf, sizeof(buf), &len)) {
+ if (skip) {
+ skip--;
+ continue;
}
- *p = '\0';
- kdb_printf("%s", buf);
+ if (!lines--)
+ break;
+
+ kdb_printf("%.*s\n", (int)len - 1, buf);
}
- if (c != '\n')
- kdb_printf("\n");
return 0;
}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 47c4e56e513b..392ec6a25844 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -205,7 +205,6 @@ extern char kdb_grep_string[];
extern int kdb_grep_leading;
extern int kdb_grep_trailing;
extern char *kdb_cmds[];
-extern void kdb_syslog_data(char *syslog_data[]);
extern unsigned long kdb_task_state_string(const char *);
extern char kdb_task_state_char (const struct task_struct *);
extern unsigned long kdb_task_state(const struct task_struct *p,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d7d71d6ec972..f1cf0edeb39a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1645,6 +1645,8 @@ perf_install_in_context(struct perf_event_context *ctx,
lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
+ if (event->cpu != -1)
+ event->cpu = cpu;
if (!task) {
/*
@@ -6252,6 +6254,8 @@ SYSCALL_DEFINE5(perf_event_open,
}
}
+ get_online_cpus();
+
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
@@ -6304,7 +6308,7 @@ SYSCALL_DEFINE5(perf_event_open,
/*
* Get the target context (task or percpu):
*/
- ctx = find_get_context(pmu, task, cpu);
+ ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
@@ -6377,20 +6381,23 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_lock(&ctx->mutex);
if (move_group) {
- perf_install_in_context(ctx, group_leader, cpu);
+ synchronize_rcu();
+ perf_install_in_context(ctx, group_leader, event->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
- perf_install_in_context(ctx, sibling, cpu);
+ perf_install_in_context(ctx, sibling, event->cpu);
get_ctx(ctx);
}
}
- perf_install_in_context(ctx, event, cpu);
+ perf_install_in_context(ctx, event, event->cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
+ put_online_cpus();
+
event->owner = current;
mutex_lock(&current->perf_event_mutex);
@@ -6419,6 +6426,7 @@ err_context:
err_alloc:
free_event(event);
err_task:
+ put_online_cpus();
if (task)
put_task_struct(task);
err_group_fd:
@@ -6479,6 +6487,39 @@ err:
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
+void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
+{
+ struct perf_event_context *src_ctx;
+ struct perf_event_context *dst_ctx;
+ struct perf_event *event, *tmp;
+ LIST_HEAD(events);
+
+ src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
+ dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
+
+ mutex_lock(&src_ctx->mutex);
+ list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
+ event_entry) {
+ perf_remove_from_context(event);
+ put_ctx(src_ctx);
+ list_add(&event->event_entry, &events);
+ }
+ mutex_unlock(&src_ctx->mutex);
+
+ synchronize_rcu();
+
+ mutex_lock(&dst_ctx->mutex);
+ list_for_each_entry_safe(event, tmp, &events, event_entry) {
+ list_del(&event->event_entry);
+ if (event->state >= PERF_EVENT_STATE_OFF)
+ event->state = PERF_EVENT_STATE_INACTIVE;
+ perf_install_in_context(dst_ctx, event, dst_cpu);
+ get_ctx(dst_ctx);
+ }
+ mutex_unlock(&dst_ctx->mutex);
+}
+EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
+
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 985be4d80fe8..f93532748bca 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -38,13 +38,29 @@
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
-static struct srcu_struct uprobes_srcu;
static struct rb_root uprobes_tree = RB_ROOT;
static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
#define UPROBES_HASH_SZ 13
+/*
+ * We need separate register/unregister and mmap/munmap lock hashes because
+ * of mmap_sem nesting.
+ *
+ * uprobe_register() needs to install probes on (potentially) all processes
+ * and thus needs to acquire multiple mmap_sems (consequtively, not
+ * concurrently), whereas uprobe_mmap() is called while holding mmap_sem
+ * for the particular process doing the mmap.
+ *
+ * uprobe_register()->register_for_each_vma() needs to drop/acquire mmap_sem
+ * because of lock order against i_mmap_mutex. This means there's a hole in
+ * the register vma iteration where a mmap() can happen.
+ *
+ * Thus uprobe_register() can race with uprobe_mmap() and we can try and
+ * install a probe where one is already installed.
+ */
+
/* serialize (un)register */
static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
@@ -61,17 +77,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
*/
static atomic_t uprobe_events = ATOMIC_INIT(0);
-/*
- * Maintain a temporary per vma info that can be used to search if a vma
- * has already been handled. This structure is introduced since extending
- * vm_area_struct wasnt recommended.
- */
-struct vma_info {
- struct list_head probe_list;
- struct mm_struct *mm;
- loff_t vaddr;
-};
-
struct uprobe {
struct rb_node rb_node; /* node in the rb tree */
atomic_t ref;
@@ -100,7 +105,8 @@ static bool valid_vma(struct vm_area_struct *vma, bool is_register)
if (!is_register)
return true;
- if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
+ if ((vma->vm_flags & (VM_HUGETLB|VM_READ|VM_WRITE|VM_EXEC|VM_SHARED))
+ == (VM_READ|VM_EXEC))
return true;
return false;
@@ -129,33 +135,17 @@ static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
{
struct mm_struct *mm = vma->vm_mm;
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep;
- spinlock_t *ptl;
unsigned long addr;
- int err = -EFAULT;
+ spinlock_t *ptl;
+ pte_t *ptep;
addr = page_address_in_vma(page, vma);
if (addr == -EFAULT)
- goto out;
-
- pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
- goto out;
-
- pud = pud_offset(pgd, addr);
- if (!pud_present(*pud))
- goto out;
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
- goto out;
+ return -EFAULT;
- ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
+ ptep = page_check_address(page, mm, addr, &ptl, 0);
if (!ptep)
- goto out;
+ return -EAGAIN;
get_page(kpage);
page_add_new_anon_rmap(kpage, vma, addr);
@@ -174,10 +164,8 @@ static int __replace_page(struct vm_area_struct *vma, struct page *page, struct
try_to_free_swap(page);
put_page(page);
pte_unmap_unlock(ptep, ptl);
- err = 0;
-out:
- return err;
+ return 0;
}
/**
@@ -222,9 +210,8 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
void *vaddr_old, *vaddr_new;
struct vm_area_struct *vma;
struct uprobe *uprobe;
- loff_t addr;
int ret;
-
+retry:
/* Read the page with vaddr into memory */
ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
if (ret <= 0)
@@ -246,10 +233,6 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (mapping != vma->vm_file->f_mapping)
goto put_out;
- addr = vma_address(vma, uprobe->offset);
- if (vaddr != (unsigned long)addr)
- goto put_out;
-
ret = -ENOMEM;
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
if (!new_page)
@@ -267,11 +250,7 @@ static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
vaddr_new = kmap_atomic(new_page);
memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
-
- /* poke the new insn in, ASSUMES we don't cross page boundary */
- vaddr &= ~PAGE_MASK;
- BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
- memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+ memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
kunmap_atomic(vaddr_new);
kunmap_atomic(vaddr_old);
@@ -291,6 +270,8 @@ unlock_out:
put_out:
put_page(old_page);
+ if (unlikely(ret == -EAGAIN))
+ goto retry;
return ret;
}
@@ -312,7 +293,7 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_
void *vaddr_new;
int ret;
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
+ ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
if (ret <= 0)
return ret;
@@ -333,10 +314,20 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
uprobe_opcode_t opcode;
int result;
+ if (current->mm == mm) {
+ pagefault_disable();
+ result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
+ sizeof(opcode));
+ pagefault_enable();
+
+ if (likely(result == 0))
+ goto out;
+ }
+
result = read_opcode(mm, vaddr, &opcode);
if (result)
return result;
-
+out:
if (is_swbp_insn(&opcode))
return 1;
@@ -355,7 +346,9 @@ static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
{
int result;
-
+ /*
+ * See the comment near uprobes_hash().
+ */
result = is_swbp_at_addr(mm, vaddr);
if (result == 1)
return -EEXIST;
@@ -520,7 +513,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
uprobe->inode = igrab(inode);
uprobe->offset = offset;
init_rwsem(&uprobe->consumer_rwsem);
- INIT_LIST_HEAD(&uprobe->pending_list);
/* add to uprobes_tree, sorted on inode:offset */
cur_uprobe = insert_uprobe(uprobe);
@@ -588,20 +580,22 @@ static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
}
static int
-__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
- unsigned long nbytes, unsigned long offset)
+__copy_insn(struct address_space *mapping, struct file *filp, char *insn,
+ unsigned long nbytes, loff_t offset)
{
- struct file *filp = vma->vm_file;
struct page *page;
void *vaddr;
- unsigned long off1;
- unsigned long idx;
+ unsigned long off;
+ pgoff_t idx;
if (!filp)
return -EINVAL;
- idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
- off1 = offset &= ~PAGE_MASK;
+ if (!mapping->a_ops->readpage)
+ return -EIO;
+
+ idx = offset >> PAGE_CACHE_SHIFT;
+ off = offset & ~PAGE_MASK;
/*
* Ensure that the page that has the original instruction is
@@ -612,22 +606,20 @@ __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *ins
return PTR_ERR(page);
vaddr = kmap_atomic(page);
- memcpy(insn, vaddr + off1, nbytes);
+ memcpy(insn, vaddr + off, nbytes);
kunmap_atomic(vaddr);
page_cache_release(page);
return 0;
}
-static int
-copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
+static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping;
unsigned long nbytes;
int bytes;
- addr &= ~PAGE_MASK;
- nbytes = PAGE_SIZE - addr;
+ nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
mapping = uprobe->inode->i_mapping;
/* Instruction at end of binary; copy only available bytes */
@@ -638,13 +630,13 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
/* Instruction at the page-boundary; copy bytes in second page */
if (nbytes < bytes) {
- if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
- bytes - nbytes, uprobe->offset + nbytes))
- return -ENOMEM;
-
+ int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
+ bytes - nbytes, uprobe->offset + nbytes);
+ if (err)
+ return err;
bytes = nbytes;
}
- return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
+ return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
}
/*
@@ -672,9 +664,8 @@ copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
*/
static int
install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
- struct vm_area_struct *vma, loff_t vaddr)
+ struct vm_area_struct *vma, unsigned long vaddr)
{
- unsigned long addr;
int ret;
/*
@@ -687,20 +678,22 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
if (!uprobe->consumers)
return -EEXIST;
- addr = (unsigned long)vaddr;
-
if (!(uprobe->flags & UPROBE_COPY_INSN)) {
- ret = copy_insn(uprobe, vma, addr);
+ ret = copy_insn(uprobe, vma->vm_file);
if (ret)
return ret;
if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
- return -EEXIST;
+ return -ENOTSUPP;
- ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
+ ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
if (ret)
return ret;
+ /* write_opcode() assumes we don't cross page boundary */
+ BUG_ON((uprobe->offset & ~PAGE_MASK) +
+ UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
+
uprobe->flags |= UPROBE_COPY_INSN;
}
@@ -713,7 +706,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
* Hence increment before and decrement on failure.
*/
atomic_inc(&mm->uprobes_state.count);
- ret = set_swbp(&uprobe->arch, mm, addr);
+ ret = set_swbp(&uprobe->arch, mm, vaddr);
if (ret)
atomic_dec(&mm->uprobes_state.count);
@@ -721,27 +714,21 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
}
static void
-remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
+remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
{
- if (!set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true))
+ if (!set_orig_insn(&uprobe->arch, mm, vaddr, true))
atomic_dec(&mm->uprobes_state.count);
}
/*
- * There could be threads that have hit the breakpoint and are entering the
- * notifier code and trying to acquire the uprobes_treelock. The thread
- * calling delete_uprobe() that is removing the uprobe from the rb_tree can
- * race with these threads and might acquire the uprobes_treelock compared
- * to some of the breakpoint hit threads. In such a case, the breakpoint
- * hit threads will not find the uprobe. The current unregistering thread
- * waits till all other threads have hit a breakpoint, to acquire the
- * uprobes_treelock before the uprobe is removed from the rbtree.
+ * There could be threads that have already hit the breakpoint. They
+ * will recheck the current insn and restart if find_uprobe() fails.
+ * See find_active_uprobe().
*/
static void delete_uprobe(struct uprobe *uprobe)
{
unsigned long flags;
- synchronize_srcu(&uprobes_srcu);
spin_lock_irqsave(&uprobes_treelock, flags);
rb_erase(&uprobe->rb_node, &uprobes_tree);
spin_unlock_irqrestore(&uprobes_treelock, flags);
@@ -750,139 +737,135 @@ static void delete_uprobe(struct uprobe *uprobe)
atomic_dec(&uprobe_events);
}
-static struct vma_info *
-__find_next_vma_info(struct address_space *mapping, struct list_head *head,
- struct vma_info *vi, loff_t offset, bool is_register)
+struct map_info {
+ struct map_info *next;
+ struct mm_struct *mm;
+ unsigned long vaddr;
+};
+
+static inline struct map_info *free_map_info(struct map_info *info)
+{
+ struct map_info *next = info->next;
+ kfree(info);
+ return next;
+}
+
+static struct map_info *
+build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
{
+ unsigned long pgoff = offset >> PAGE_SHIFT;
struct prio_tree_iter iter;
struct vm_area_struct *vma;
- struct vma_info *tmpvi;
- unsigned long pgoff;
- int existing_vma;
- loff_t vaddr;
-
- pgoff = offset >> PAGE_SHIFT;
+ struct map_info *curr = NULL;
+ struct map_info *prev = NULL;
+ struct map_info *info;
+ int more = 0;
+ again:
+ mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
if (!valid_vma(vma, is_register))
continue;
- existing_vma = 0;
- vaddr = vma_address(vma, offset);
-
- list_for_each_entry(tmpvi, head, probe_list) {
- if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
- existing_vma = 1;
- break;
- }
+ if (!prev && !more) {
+ /*
+ * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
+ * reclaim. This is optimistic, no harm done if it fails.
+ */
+ prev = kmalloc(sizeof(struct map_info),
+ GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (prev)
+ prev->next = NULL;
}
-
- /*
- * Another vma needs a probe to be installed. However skip
- * installing the probe if the vma is about to be unlinked.
- */
- if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
- vi->mm = vma->vm_mm;
- vi->vaddr = vaddr;
- list_add(&vi->probe_list, head);
-
- return vi;
+ if (!prev) {
+ more++;
+ continue;
}
- }
- return NULL;
-}
-
-/*
- * Iterate in the rmap prio tree and find a vma where a probe has not
- * yet been inserted.
- */
-static struct vma_info *
-find_next_vma_info(struct address_space *mapping, struct list_head *head,
- loff_t offset, bool is_register)
-{
- struct vma_info *vi, *retvi;
+ if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
+ continue;
- vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
- if (!vi)
- return ERR_PTR(-ENOMEM);
+ info = prev;
+ prev = prev->next;
+ info->next = curr;
+ curr = info;
- mutex_lock(&mapping->i_mmap_mutex);
- retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
+ info->mm = vma->vm_mm;
+ info->vaddr = vma_address(vma, offset);
+ }
mutex_unlock(&mapping->i_mmap_mutex);
- if (!retvi)
- kfree(vi);
+ if (!more)
+ goto out;
+
+ prev = curr;
+ while (curr) {
+ mmput(curr->mm);
+ curr = curr->next;
+ }
- return retvi;
+ do {
+ info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
+ if (!info) {
+ curr = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ info->next = prev;
+ prev = info;
+ } while (--more);
+
+ goto again;
+ out:
+ while (prev)
+ prev = free_map_info(prev);
+ return curr;
}
static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
{
- struct list_head try_list;
- struct vm_area_struct *vma;
- struct address_space *mapping;
- struct vma_info *vi, *tmpvi;
- struct mm_struct *mm;
- loff_t vaddr;
- int ret;
+ struct map_info *info;
+ int err = 0;
- mapping = uprobe->inode->i_mapping;
- INIT_LIST_HEAD(&try_list);
+ info = build_map_info(uprobe->inode->i_mapping,
+ uprobe->offset, is_register);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
- ret = 0;
+ while (info) {
+ struct mm_struct *mm = info->mm;
+ struct vm_area_struct *vma;
- for (;;) {
- vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
- if (!vi)
- break;
+ if (err)
+ goto free;
- if (IS_ERR(vi)) {
- ret = PTR_ERR(vi);
- break;
- }
+ down_write(&mm->mmap_sem);
+ vma = find_vma(mm, (unsigned long)info->vaddr);
+ if (!vma || !valid_vma(vma, is_register))
+ goto unlock;
- mm = vi->mm;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, (unsigned long)vi->vaddr);
- if (!vma || !valid_vma(vma, is_register)) {
- list_del(&vi->probe_list);
- kfree(vi);
- up_read(&mm->mmap_sem);
- mmput(mm);
- continue;
- }
- vaddr = vma_address(vma, uprobe->offset);
if (vma->vm_file->f_mapping->host != uprobe->inode ||
- vaddr != vi->vaddr) {
- list_del(&vi->probe_list);
- kfree(vi);
- up_read(&mm->mmap_sem);
- mmput(mm);
- continue;
- }
-
- if (is_register)
- ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
- else
- remove_breakpoint(uprobe, mm, vi->vaddr);
+ vma_address(vma, uprobe->offset) != info->vaddr)
+ goto unlock;
- up_read(&mm->mmap_sem);
- mmput(mm);
if (is_register) {
- if (ret && ret == -EEXIST)
- ret = 0;
- if (ret)
- break;
+ err = install_breakpoint(uprobe, mm, vma, info->vaddr);
+ /*
+ * We can race against uprobe_mmap(), see the
+ * comment near uprobe_hash().
+ */
+ if (err == -EEXIST)
+ err = 0;
+ } else {
+ remove_breakpoint(uprobe, mm, info->vaddr);
}
+ unlock:
+ up_write(&mm->mmap_sem);
+ free:
+ mmput(mm);
+ info = free_map_info(info);
}
- list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
- list_del(&vi->probe_list);
- kfree(vi);
- }
-
- return ret;
+ return err;
}
static int __uprobe_register(struct uprobe *uprobe)
@@ -1048,7 +1031,7 @@ static void build_probe_list(struct inode *inode, struct list_head *head)
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
- struct uprobe *uprobe, *u;
+ struct uprobe *uprobe;
struct inode *inode;
int ret, count;
@@ -1066,12 +1049,9 @@ int uprobe_mmap(struct vm_area_struct *vma)
ret = 0;
count = 0;
- list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
- loff_t vaddr;
-
- list_del(&uprobe->pending_list);
+ list_for_each_entry(uprobe, &tmp_list, pending_list) {
if (!ret) {
- vaddr = vma_address(vma, uprobe->offset);
+ loff_t vaddr = vma_address(vma, uprobe->offset);
if (vaddr < vma->vm_start || vaddr >= vma->vm_end) {
put_uprobe(uprobe);
@@ -1079,8 +1059,10 @@ int uprobe_mmap(struct vm_area_struct *vma)
}
ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
-
- /* Ignore double add: */
+ /*
+ * We can race against uprobe_register(), see the
+ * comment near uprobe_hash().
+ */
if (ret == -EEXIST) {
ret = 0;
@@ -1115,7 +1097,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
struct list_head tmp_list;
- struct uprobe *uprobe, *u;
+ struct uprobe *uprobe;
struct inode *inode;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false))
@@ -1132,11 +1114,8 @@ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned lon
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, &tmp_list);
- list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
- loff_t vaddr;
-
- list_del(&uprobe->pending_list);
- vaddr = vma_address(vma, uprobe->offset);
+ list_for_each_entry(uprobe, &tmp_list, pending_list) {
+ loff_t vaddr = vma_address(vma, uprobe->offset);
if (vaddr >= start && vaddr < end) {
/*
@@ -1378,9 +1357,6 @@ void uprobe_free_utask(struct task_struct *t)
{
struct uprobe_task *utask = t->utask;
- if (t->uprobe_srcu_id != -1)
- srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
-
if (!utask)
return;
@@ -1398,7 +1374,6 @@ void uprobe_free_utask(struct task_struct *t)
void uprobe_copy_process(struct task_struct *t)
{
t->utask = NULL;
- t->uprobe_srcu_id = -1;
}
/*
@@ -1417,7 +1392,6 @@ static struct uprobe_task *add_utask(void)
if (unlikely(!utask))
return NULL;
- utask->active_uprobe = NULL;
current->utask = utask;
return utask;
}
@@ -1479,41 +1453,64 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
return false;
}
+static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
+{
+ struct mm_struct *mm = current->mm;
+ struct uprobe *uprobe = NULL;
+ struct vm_area_struct *vma;
+
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, bp_vaddr);
+ if (vma && vma->vm_start <= bp_vaddr) {
+ if (valid_vma(vma, false)) {
+ struct inode *inode;
+ loff_t offset;
+
+ inode = vma->vm_file->f_mapping->host;
+ offset = bp_vaddr - vma->vm_start;
+ offset += (vma->vm_pgoff << PAGE_SHIFT);
+ uprobe = find_uprobe(inode, offset);
+ }
+
+ if (!uprobe)
+ *is_swbp = is_swbp_at_addr(mm, bp_vaddr);
+ } else {
+ *is_swbp = -EFAULT;
+ }
+ up_read(&mm->mmap_sem);
+
+ return uprobe;
+}
+
/*
* Run handler and ask thread to singlestep.
* Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
*/
static void handle_swbp(struct pt_regs *regs)
{
- struct vm_area_struct *vma;
struct uprobe_task *utask;
struct uprobe *uprobe;
- struct mm_struct *mm;
unsigned long bp_vaddr;
+ int uninitialized_var(is_swbp);
- uprobe = NULL;
bp_vaddr = uprobe_get_swbp_addr(regs);
- mm = current->mm;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, bp_vaddr);
-
- if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
- struct inode *inode;
- loff_t offset;
-
- inode = vma->vm_file->f_mapping->host;
- offset = bp_vaddr - vma->vm_start;
- offset += (vma->vm_pgoff << PAGE_SHIFT);
- uprobe = find_uprobe(inode, offset);
- }
-
- srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
- current->uprobe_srcu_id = -1;
- up_read(&mm->mmap_sem);
+ uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
if (!uprobe) {
- /* No matching uprobe; signal SIGTRAP. */
- send_sig(SIGTRAP, current, 0);
+ if (is_swbp > 0) {
+ /* No matching uprobe; signal SIGTRAP. */
+ send_sig(SIGTRAP, current, 0);
+ } else {
+ /*
+ * Either we raced with uprobe_unregister() or we can't
+ * access this memory. The latter is only possible if
+ * another thread plays with our ->mm. In both cases
+ * we can simply restart. If this vma was unmapped we
+ * can pretend this insn was not executed yet and get
+ * the (correct) SIGSEGV after restart.
+ */
+ instruction_pointer_set(regs, bp_vaddr);
+ }
return;
}
@@ -1620,7 +1617,6 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs)
utask->state = UTASK_BP_HIT;
set_thread_flag(TIF_UPROBE);
- current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
return 1;
}
@@ -1655,7 +1651,6 @@ static int __init init_uprobes(void)
mutex_init(&uprobes_mutex[i]);
mutex_init(&uprobes_mmap_mutex[i]);
}
- init_srcu_struct(&uprobes_srcu);
return register_die_notifier(&uprobe_exception_nb);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f59cc334516..d17f6c4ddfa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -953,14 +953,11 @@ void do_exit(long code)
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
- * an exiting task cleaning up the robust pi futexes, and in
- * task_work_add() to avoid the race with exit_task_work().
+ * an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
- exit_task_work(tsk);
-
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
@@ -995,6 +992,7 @@ void do_exit(long code)
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
+ exit_task_work(tsk);
check_stack_usage();
exit_thread();
diff --git a/kernel/fork.c b/kernel/fork.c
index ab5211b9e622..ff1cad3b7bdc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -304,12 +304,17 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
}
err = arch_dup_task_struct(tsk, orig);
- if (err)
- goto out;
+ /*
+ * We defer looking at err, because we will need this setup
+ * for the clean up path to work correctly.
+ */
tsk->stack = ti;
-
setup_thread_stack(tsk, orig);
+
+ if (err)
+ goto out;
+
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
@@ -1415,7 +1420,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
- INIT_HLIST_HEAD(&p->task_works);
+ p->task_works = NULL;
/* Now that the task is set up, run cgroup callbacks if
* necessary. We need to run them before the task is visible
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ae34bf51682b..6db7a5ed52b5 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -657,6 +657,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
return 0;
}
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+ ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+
+ return ktime_get_update_offsets(offs_real, offs_boot);
+}
+
/*
* Retrigger next event is called after clock was set
*
@@ -665,22 +673,12 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
static void retrigger_next_event(void *arg)
{
struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
- struct timespec realtime_offset, xtim, wtm, sleep;
if (!hrtimer_hres_active())
return;
- /* Optimized out for !HIGH_RES */
- get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
- set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
-
- /* Adjust CLOCK_REALTIME offset */
raw_spin_lock(&base->lock);
- base->clock_base[HRTIMER_BASE_REALTIME].offset =
- timespec_to_ktime(realtime_offset);
- base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
- timespec_to_ktime(sleep);
-
+ hrtimer_update_base(base);
hrtimer_force_reprogram(base, 0);
raw_spin_unlock(&base->lock);
}
@@ -710,13 +708,25 @@ static int hrtimer_switch_to_hres(void)
base->clock_base[i].resolution = KTIME_HIGH_RES;
tick_setup_sched_timer();
-
/* "Retrigger" the interrupt to get things going */
retrigger_next_event(NULL);
local_irq_restore(flags);
return 1;
}
+/*
+ * Called from timekeeping code to reprogramm the hrtimer interrupt
+ * device. If called from the timer interrupt context we defer it to
+ * softirq context.
+ */
+void clock_was_set_delayed(void)
+{
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+ cpu_base->clock_was_set = 1;
+ __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+}
+
#else
static inline int hrtimer_hres_active(void) { return 0; }
@@ -1250,11 +1260,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
cpu_base->nr_events++;
dev->next_event.tv64 = KTIME_MAX;
- entry_time = now = ktime_get();
+ raw_spin_lock(&cpu_base->lock);
+ entry_time = now = hrtimer_update_base(cpu_base);
retry:
expires_next.tv64 = KTIME_MAX;
-
- raw_spin_lock(&cpu_base->lock);
/*
* We set expires_next to KTIME_MAX here with cpu_base->lock
* held to prevent that a timer is enqueued in our queue via
@@ -1330,8 +1339,12 @@ retry:
* We need to prevent that we loop forever in the hrtimer
* interrupt routine. We give it 3 attempts to avoid
* overreacting on some spurious event.
+ *
+ * Acquire base lock for updating the offsets and retrieving
+ * the current time.
*/
- now = ktime_get();
+ raw_spin_lock(&cpu_base->lock);
+ now = hrtimer_update_base(cpu_base);
cpu_base->nr_retries++;
if (++retries < 3)
goto retry;
@@ -1343,6 +1356,7 @@ retry:
*/
cpu_base->nr_hangs++;
cpu_base->hang_detected = 1;
+ raw_spin_unlock(&cpu_base->lock);
delta = ktime_sub(now, entry_time);
if (delta.tv64 > cpu_base->max_hang_time.tv64)
cpu_base->max_hang_time = delta;
@@ -1395,6 +1409,13 @@ void hrtimer_peek_ahead_timers(void)
static void run_hrtimer_softirq(struct softirq_action *h)
{
+ struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+
+ if (cpu_base->clock_was_set) {
+ cpu_base->clock_was_set = 0;
+ clock_was_set();
+ }
+
hrtimer_peek_ahead_timers();
}
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 41c1564103f1..38c5eb839c92 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -448,7 +448,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
}
pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
- hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
+ hwirq, of_node_full_name(domain->of_node), virq);
return virq;
}
@@ -477,7 +477,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
return intspec[0];
#endif
pr_warning("no irq domain found for %s !\n",
- controller->full_name);
+ of_node_full_name(controller));
return 0;
}
@@ -725,8 +725,8 @@ static int virq_debug_show(struct seq_file *m, void *private)
data = irq_desc_get_chip_data(desc);
seq_printf(m, data ? "0x%p " : " %p ", data);
- if (desc->irq_data.domain && desc->irq_data.domain->of_node)
- p = desc->irq_data.domain->of_node->full_name;
+ if (desc->irq_data.domain)
+ p = of_node_full_name(desc->irq_data.domain->of_node);
else
p = none;
seq_printf(m, "%s\n", p);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8c548232ba39..814c9ef6bba1 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -781,7 +781,7 @@ static void wake_threads_waitq(struct irq_desc *desc)
wake_up(&desc->wait_for_threads);
}
-static void irq_thread_dtor(struct task_work *unused)
+static void irq_thread_dtor(struct callback_head *unused)
{
struct task_struct *tsk = current;
struct irq_desc *desc;
@@ -813,7 +813,7 @@ static void irq_thread_dtor(struct task_work *unused)
*/
static int irq_thread(void *data)
{
- struct task_work on_exit_work;
+ struct callback_head on_exit_work;
static const struct sched_param param = {
.sched_priority = MAX_USER_RT_PRIO/2,
};
@@ -830,7 +830,7 @@ static int irq_thread(void *data)
sched_setscheduler(current, SCHED_FIFO, &param);
- init_task_work(&on_exit_work, irq_thread_dtor, NULL);
+ init_task_work(&on_exit_work, irq_thread_dtor);
task_work_add(current, &on_exit_work, false);
while (!irq_wait_for_interrupt(action)) {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 3d3de633702e..b579af57ea10 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -360,16 +360,12 @@ repeat:
struct kthread_work, node);
list_del_init(&work->node);
}
+ worker->current_work = work;
spin_unlock_irq(&worker->lock);
if (work) {
__set_current_state(TASK_RUNNING);
work->func(work);
- smp_wmb(); /* wmb worker-b0 paired with flush-b1 */
- work->done_seq = work->queue_seq;
- smp_mb(); /* mb worker-b1 paired with flush-b0 */
- if (atomic_read(&work->flushing))
- wake_up_all(&work->done);
} else if (!freezing(current))
schedule();
@@ -378,6 +374,19 @@ repeat:
}
EXPORT_SYMBOL_GPL(kthread_worker_fn);
+/* insert @work before @pos in @worker */
+static void insert_kthread_work(struct kthread_worker *worker,
+ struct kthread_work *work,
+ struct list_head *pos)
+{
+ lockdep_assert_held(&worker->lock);
+
+ list_add_tail(&work->node, pos);
+ work->worker = worker;
+ if (likely(worker->task))
+ wake_up_process(worker->task);
+}
+
/**
* queue_kthread_work - queue a kthread_work
* @worker: target kthread_worker
@@ -395,10 +404,7 @@ bool queue_kthread_work(struct kthread_worker *worker,
spin_lock_irqsave(&worker->lock, flags);
if (list_empty(&work->node)) {
- list_add_tail(&work->node, &worker->work_list);
- work->queue_seq++;
- if (likely(worker->task))
- wake_up_process(worker->task);
+ insert_kthread_work(worker, work, &worker->work_list);
ret = true;
}
spin_unlock_irqrestore(&worker->lock, flags);
@@ -406,6 +412,18 @@ bool queue_kthread_work(struct kthread_worker *worker,
}
EXPORT_SYMBOL_GPL(queue_kthread_work);
+struct kthread_flush_work {
+ struct kthread_work work;
+ struct completion done;
+};
+
+static void kthread_flush_work_fn(struct kthread_work *work)
+{
+ struct kthread_flush_work *fwork =
+ container_of(work, struct kthread_flush_work, work);
+ complete(&fwork->done);
+}
+
/**
* flush_kthread_work - flush a kthread_work
* @work: work to flush
@@ -414,39 +432,37 @@ EXPORT_SYMBOL_GPL(queue_kthread_work);
*/
void flush_kthread_work(struct kthread_work *work)
{
- int seq = work->queue_seq;
-
- atomic_inc(&work->flushing);
+ struct kthread_flush_work fwork = {
+ KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
+ COMPLETION_INITIALIZER_ONSTACK(fwork.done),
+ };
+ struct kthread_worker *worker;
+ bool noop = false;
- /*
- * mb flush-b0 paired with worker-b1, to make sure either
- * worker sees the above increment or we see done_seq update.
- */
- smp_mb__after_atomic_inc();
+retry:
+ worker = work->worker;
+ if (!worker)
+ return;
- /* A - B <= 0 tests whether B is in front of A regardless of overflow */
- wait_event(work->done, seq - work->done_seq <= 0);
- atomic_dec(&work->flushing);
+ spin_lock_irq(&worker->lock);
+ if (work->worker != worker) {
+ spin_unlock_irq(&worker->lock);
+ goto retry;
+ }
- /*
- * rmb flush-b1 paired with worker-b0, to make sure our caller
- * sees every change made by work->func().
- */
- smp_mb__after_atomic_dec();
-}
-EXPORT_SYMBOL_GPL(flush_kthread_work);
+ if (!list_empty(&work->node))
+ insert_kthread_work(worker, &fwork.work, work->node.next);
+ else if (worker->current_work == work)
+ insert_kthread_work(worker, &fwork.work, worker->work_list.next);
+ else
+ noop = true;
-struct kthread_flush_work {
- struct kthread_work work;
- struct completion done;
-};
+ spin_unlock_irq(&worker->lock);
-static void kthread_flush_work_fn(struct kthread_work *work)
-{
- struct kthread_flush_work *fwork =
- container_of(work, struct kthread_flush_work, work);
- complete(&fwork->done);
+ if (!noop)
+ wait_for_completion(&fwork.done);
}
+EXPORT_SYMBOL_GPL(flush_kthread_work);
/**
* flush_kthread_worker - flush all current works on a kthread_worker
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 8f9b4eb974e0..a70518c9d82f 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -175,7 +175,7 @@ config PM_TEST_SUSPEND
You probably want to have your system's RTC driver statically
linked, ensuring that it's available when this test runs.
-config CAN_PM_TRACE
+config PM_SLEEP_DEBUG
def_bool y
depends on PM_DEBUG && PM_SLEEP
@@ -196,7 +196,7 @@ config PM_TRACE
config PM_TRACE_RTC
bool "Suspend/resume event tracing"
- depends on CAN_PM_TRACE
+ depends on PM_SLEEP_DEBUG
depends on X86
select PM_TRACE
---help---
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8b53db38a279..b26f5f1e773e 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -5,6 +5,7 @@
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2004 Pavel Machek <pavel@ucw.cz>
* Copyright (c) 2009 Rafael J. Wysocki, Novell Inc.
+ * Copyright (C) 2012 Bojan Smojver <bojan@rexursive.com>
*
* This file is released under the GPLv2.
*/
@@ -27,7 +28,6 @@
#include <linux/syscore_ops.h>
#include <linux/ctype.h>
#include <linux/genhd.h>
-#include <scsi/scsi_scan.h>
#include "power.h"
@@ -46,6 +46,9 @@ enum {
HIBERNATION_PLATFORM,
HIBERNATION_SHUTDOWN,
HIBERNATION_REBOOT,
+#ifdef CONFIG_SUSPEND
+ HIBERNATION_SUSPEND,
+#endif
/* keep last */
__HIBERNATION_AFTER_LAST
};
@@ -354,6 +357,7 @@ int hibernation_snapshot(int platform_mode)
}
suspend_console();
+ ftrace_stop();
pm_restrict_gfp_mask();
error = dpm_suspend(PMSG_FREEZE);
@@ -379,6 +383,7 @@ int hibernation_snapshot(int platform_mode)
if (error || !in_suspend)
pm_restore_gfp_mask();
+ ftrace_start();
resume_console();
dpm_complete(msg);
@@ -481,6 +486,7 @@ int hibernation_restore(int platform_mode)
pm_prepare_console();
suspend_console();
+ ftrace_stop();
pm_restrict_gfp_mask();
error = dpm_suspend_start(PMSG_QUIESCE);
if (!error) {
@@ -488,6 +494,7 @@ int hibernation_restore(int platform_mode)
dpm_resume_end(PMSG_RECOVER);
}
pm_restore_gfp_mask();
+ ftrace_start();
resume_console();
pm_restore_console();
return error;
@@ -514,6 +521,7 @@ int hibernation_platform_enter(void)
entering_platform_hibernation = true;
suspend_console();
+ ftrace_stop();
error = dpm_suspend_start(PMSG_HIBERNATE);
if (error) {
if (hibernation_ops->recover)
@@ -557,6 +565,7 @@ int hibernation_platform_enter(void)
Resume_devices:
entering_platform_hibernation = false;
dpm_resume_end(PMSG_RESTORE);
+ ftrace_start();
resume_console();
Close:
@@ -574,6 +583,10 @@ int hibernation_platform_enter(void)
*/
static void power_down(void)
{
+#ifdef CONFIG_SUSPEND
+ int error;
+#endif
+
switch (hibernation_mode) {
case HIBERNATION_REBOOT:
kernel_restart(NULL);
@@ -583,6 +596,25 @@ static void power_down(void)
case HIBERNATION_SHUTDOWN:
kernel_power_off();
break;
+#ifdef CONFIG_SUSPEND
+ case HIBERNATION_SUSPEND:
+ error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+ if (error) {
+ if (hibernation_ops)
+ hibernation_mode = HIBERNATION_PLATFORM;
+ else
+ hibernation_mode = HIBERNATION_SHUTDOWN;
+ power_down();
+ }
+ /*
+ * Restore swap signature.
+ */
+ error = swsusp_unmark();
+ if (error)
+ printk(KERN_ERR "PM: Swap will be unusable! "
+ "Try swapon -a.\n");
+ return;
+#endif
}
kernel_halt();
/*
@@ -748,13 +780,6 @@ static int software_resume(void)
async_synchronize_full();
}
- /*
- * We can't depend on SCSI devices being available after loading
- * one of their modules until scsi_complete_async_scans() is
- * called and the resume device usually is a SCSI one.
- */
- scsi_complete_async_scans();
-
swsusp_resume_device = name_to_dev_t(resume_file);
if (!swsusp_resume_device) {
error = -ENODEV;
@@ -827,6 +852,9 @@ static const char * const hibernation_modes[] = {
[HIBERNATION_PLATFORM] = "platform",
[HIBERNATION_SHUTDOWN] = "shutdown",
[HIBERNATION_REBOOT] = "reboot",
+#ifdef CONFIG_SUSPEND
+ [HIBERNATION_SUSPEND] = "suspend",
+#endif
};
/*
@@ -867,6 +895,9 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
switch (i) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
+#ifdef CONFIG_SUSPEND
+ case HIBERNATION_SUSPEND:
+#endif
break;
case HIBERNATION_PLATFORM:
if (hibernation_ops)
@@ -907,6 +938,9 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
switch (mode) {
case HIBERNATION_SHUTDOWN:
case HIBERNATION_REBOOT:
+#ifdef CONFIG_SUSPEND
+ case HIBERNATION_SUSPEND:
+#endif
hibernation_mode = mode;
break;
case HIBERNATION_PLATFORM:
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 428f8a034e96..f458238109cc 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -235,6 +235,47 @@ late_initcall(pm_debugfs_init);
#endif /* CONFIG_PM_SLEEP */
+#ifdef CONFIG_PM_SLEEP_DEBUG
+/*
+ * pm_print_times: print time taken by devices to suspend and resume.
+ *
+ * show() returns whether printing of suspend and resume times is enabled.
+ * store() accepts 0 or 1. 0 disables printing and 1 enables it.
+ */
+bool pm_print_times_enabled;
+
+static ssize_t pm_print_times_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", pm_print_times_enabled);
+}
+
+static ssize_t pm_print_times_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > 1)
+ return -EINVAL;
+
+ pm_print_times_enabled = !!val;
+ return n;
+}
+
+power_attr(pm_print_times);
+
+static inline void pm_print_times_init(void)
+{
+ pm_print_times_enabled = !!initcall_debug;
+}
+#else /* !CONFIG_PP_SLEEP_DEBUG */
+static inline void pm_print_times_init(void) {}
+#endif /* CONFIG_PM_SLEEP_DEBUG */
+
struct kobject *power_kobj;
/**
@@ -531,6 +572,9 @@ static struct attribute * g[] = {
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
+#ifdef CONFIG_PM_SLEEP_DEBUG
+ &pm_print_times_attr.attr,
+#endif
#endif
NULL,
};
@@ -566,6 +610,7 @@ static int __init pm_init(void)
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return error;
+ pm_print_times_init();
return pm_autosleep_init();
}
diff --git a/kernel/power/power.h b/kernel/power/power.h
index b0bd4beaebfe..7d4b7ffb3c1d 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -156,6 +156,9 @@ extern void swsusp_free(void);
extern int swsusp_read(unsigned int *flags_p);
extern int swsusp_write(unsigned int flags);
extern void swsusp_close(fmode_t);
+#ifdef CONFIG_SUSPEND
+extern int swsusp_unmark(void);
+#endif
/* kernel/power/block_io.c */
extern struct block_device *hib_resume_bdev;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 396d262b8fd0..c8b7446b27df 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <linux/ftrace.h>
#include <trace/events/power.h>
#include "power.h"
@@ -212,6 +213,7 @@ int suspend_devices_and_enter(suspend_state_t state)
goto Close;
}
suspend_console();
+ ftrace_stop();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
@@ -231,6 +233,7 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
+ ftrace_start();
resume_console();
Close:
if (suspend_ops->end)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 11e22c068e8b..3c9d764eb0d8 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -448,9 +448,9 @@ static int save_image(struct swap_map_handle *handle,
struct timeval start;
struct timeval stop;
- printk(KERN_INFO "PM: Saving image data pages (%u pages) ... ",
+ printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
nr_to_write);
- m = nr_to_write / 100;
+ m = nr_to_write / 10;
if (!m)
m = 1;
nr_pages = 0;
@@ -464,7 +464,8 @@ static int save_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
- printk(KERN_CONT "\b\b\b\b%3d%%", nr_pages / m);
+ printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_on_bio_chain(&bio);
@@ -472,9 +473,7 @@ static int save_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret)
- printk(KERN_CONT "\b\b\b\bdone\n");
- else
- printk(KERN_CONT "\n");
+ printk(KERN_INFO "PM: Image saving done.\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
return ret;
}
@@ -668,9 +667,9 @@ static int save_image_lzo(struct swap_map_handle *handle,
printk(KERN_INFO
"PM: Using %u thread(s) for compression.\n"
- "PM: Compressing and saving image data (%u pages) ... ",
+ "PM: Compressing and saving image data (%u pages)...\n",
nr_threads, nr_to_write);
- m = nr_to_write / 100;
+ m = nr_to_write / 10;
if (!m)
m = 1;
nr_pages = 0;
@@ -690,8 +689,10 @@ static int save_image_lzo(struct swap_map_handle *handle,
data_of(*snapshot), PAGE_SIZE);
if (!(nr_pages % m))
- printk(KERN_CONT "\b\b\b\b%3d%%",
- nr_pages / m);
+ printk(KERN_INFO
+ "PM: Image saving progress: "
+ "%3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
if (!off)
@@ -761,11 +762,8 @@ out_finish:
do_gettimeofday(&stop);
if (!ret)
ret = err2;
- if (!ret) {
- printk(KERN_CONT "\b\b\b\bdone\n");
- } else {
- printk(KERN_CONT "\n");
- }
+ if (!ret)
+ printk(KERN_INFO "PM: Image saving done.\n");
swsusp_show_speed(&start, &stop, nr_to_write, "Wrote");
out_clean:
if (crc) {
@@ -973,9 +971,9 @@ static int load_image(struct swap_map_handle *handle,
int err2;
unsigned nr_pages;
- printk(KERN_INFO "PM: Loading image data pages (%u pages) ... ",
+ printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
nr_to_read);
- m = nr_to_read / 100;
+ m = nr_to_read / 10;
if (!m)
m = 1;
nr_pages = 0;
@@ -993,7 +991,8 @@ static int load_image(struct swap_map_handle *handle,
if (ret)
break;
if (!(nr_pages % m))
- printk("\b\b\b\b%3d%%", nr_pages / m);
+ printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_on_bio_chain(&bio);
@@ -1001,12 +1000,11 @@ static int load_image(struct swap_map_handle *handle,
if (!ret)
ret = err2;
if (!ret) {
- printk("\b\b\b\bdone\n");
+ printk(KERN_INFO "PM: Image loading done.\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
- } else
- printk("\n");
+ }
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
return ret;
}
@@ -1185,9 +1183,9 @@ static int load_image_lzo(struct swap_map_handle *handle,
printk(KERN_INFO
"PM: Using %u thread(s) for decompression.\n"
- "PM: Loading and decompressing image data (%u pages) ... ",
+ "PM: Loading and decompressing image data (%u pages)...\n",
nr_threads, nr_to_read);
- m = nr_to_read / 100;
+ m = nr_to_read / 10;
if (!m)
m = 1;
nr_pages = 0;
@@ -1319,7 +1317,10 @@ static int load_image_lzo(struct swap_map_handle *handle,
data[thr].unc + off, PAGE_SIZE);
if (!(nr_pages % m))
- printk("\b\b\b\b%3d%%", nr_pages / m);
+ printk(KERN_INFO
+ "PM: Image loading progress: "
+ "%3d%%\n",
+ nr_pages / m * 10);
nr_pages++;
ret = snapshot_write_next(snapshot);
@@ -1344,7 +1345,7 @@ out_finish:
}
do_gettimeofday(&stop);
if (!ret) {
- printk("\b\b\b\bdone\n");
+ printk(KERN_INFO "PM: Image loading done.\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
@@ -1357,8 +1358,7 @@ out_finish:
}
}
}
- } else
- printk("\n");
+ }
swsusp_show_speed(&start, &stop, nr_to_read, "Read");
out_clean:
for (i = 0; i < ring_size; i++)
@@ -1472,6 +1472,34 @@ void swsusp_close(fmode_t mode)
blkdev_put(hib_resume_bdev, mode);
}
+/**
+ * swsusp_unmark - Unmark swsusp signature in the resume device
+ */
+
+#ifdef CONFIG_SUSPEND
+int swsusp_unmark(void)
+{
+ int error;
+
+ hib_bio_read_page(swsusp_resume_block, swsusp_header, NULL);
+ if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
+ memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
+ error = hib_bio_write_page(swsusp_resume_block,
+ swsusp_header, NULL);
+ } else {
+ printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
+ error = -ENODEV;
+ }
+
+ /*
+ * We just returned from suspend, we don't need the image any more.
+ */
+ free_all_swap_pages(root_swap);
+
+ return error;
+}
+#endif
+
static int swsusp_header_init(void)
{
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 91b0fd021a95..4ed81e74f86f 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -24,7 +24,6 @@
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
-#include <scsi/scsi_scan.h>
#include <asm/uaccess.h>
@@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
* appear.
*/
wait_for_device_probe();
- scsi_complete_async_scans();
data->swap = -1;
data->mode = O_WRONLY;
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index c8fba3380076..8f50de394d22 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -9,6 +9,7 @@
* manipulate wakelocks on Android.
*/
+#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -188,6 +189,9 @@ int pm_wake_lock(const char *buf)
size_t len;
int ret = 0;
+ if (!capable(CAP_BLOCK_SUSPEND))
+ return -EPERM;
+
while (*str && !isspace(*str))
str++;
@@ -231,6 +235,9 @@ int pm_wake_unlock(const char *buf)
size_t len;
int ret = 0;
+ if (!capable(CAP_BLOCK_SUSPEND))
+ return -EPERM;
+
len = strlen(buf);
if (!len)
return -EINVAL;
diff --git a/kernel/printk.c b/kernel/printk.c
index a2276b916769..ac4bc9e79465 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -193,12 +193,21 @@ static int console_may_schedule;
* separated by ',', and find the message after the ';' character.
*/
+enum log_flags {
+ LOG_NOCONS = 1, /* already flushed, do not print to console */
+ LOG_NEWLINE = 2, /* text ended with a newline */
+ LOG_PREFIX = 4, /* text started with a prefix */
+ LOG_CONT = 8, /* text is a fragment of a continuation line */
+};
+
struct log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
u16 dict_len; /* length of dictionary buffer */
- u16 level; /* syslog level + facility */
+ u8 facility; /* syslog facility */
+ u8 flags:5; /* internal record flags */
+ u8 level:3; /* syslog level */
};
/*
@@ -210,6 +219,8 @@ static DEFINE_RAW_SPINLOCK(logbuf_lock);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
+static enum log_flags syslog_prev;
+static size_t syslog_partial;
/* index and sequence number of the first record stored in the buffer */
static u64 log_first_seq;
@@ -286,6 +297,7 @@ static u32 log_next(u32 idx)
/* insert record into the buffer, discard old ones, update heads */
static void log_store(int facility, int level,
+ enum log_flags flags, u64 ts_nsec,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
@@ -329,8 +341,13 @@ static void log_store(int facility, int level,
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
msg->dict_len = dict_len;
- msg->level = (facility << 3) | (level & 7);
- msg->ts_nsec = local_clock();
+ msg->facility = facility;
+ msg->level = level & 7;
+ msg->flags = flags & 0x1f;
+ if (ts_nsec > 0)
+ msg->ts_nsec = ts_nsec;
+ else
+ msg->ts_nsec = local_clock();
memset(log_dict(msg) + dict_len, 0, pad_len);
msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
@@ -417,20 +434,20 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
ret = mutex_lock_interruptible(&user->lock);
if (ret)
return ret;
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
while (user->seq == log_next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
goto out;
}
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
ret = wait_event_interruptible(log_wait,
user->seq != log_next_seq);
if (ret)
goto out;
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
}
if (user->seq < log_first_seq) {
@@ -438,7 +455,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
user->idx = log_first_idx;
user->seq = log_first_seq;
ret = -EPIPE;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
goto out;
}
@@ -446,13 +463,13 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
ts_usec = msg->ts_nsec;
do_div(ts_usec, 1000);
len = sprintf(user->buf, "%u,%llu,%llu;",
- msg->level, user->seq, ts_usec);
+ (msg->facility << 3) | msg->level, user->seq, ts_usec);
/* escape non-printable characters */
for (i = 0; i < msg->text_len; i++) {
unsigned char c = log_text(msg)[i];
- if (c < ' ' || c >= 128)
+ if (c < ' ' || c >= 127 || c == '\\')
len += sprintf(user->buf + len, "\\x%02x", c);
else
user->buf[len++] = c;
@@ -476,7 +493,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
continue;
}
- if (c < ' ' || c >= 128) {
+ if (c < ' ' || c >= 127 || c == '\\') {
len += sprintf(user->buf + len, "\\x%02x", c);
continue;
}
@@ -488,7 +505,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
user->idx = log_next(user->idx);
user->seq++;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
if (len > count) {
ret = -EINVAL;
@@ -515,7 +532,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
if (offset)
return -ESPIPE;
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
switch (whence) {
case SEEK_SET:
/* the first record */
@@ -539,7 +556,7 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
default:
ret = -EINVAL;
}
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
return ret;
}
@@ -553,14 +570,14 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
poll_wait(file, &log_wait, wait);
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
ret = POLLIN|POLLRDNORM;
}
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
return ret;
}
@@ -584,10 +601,10 @@ static int devkmsg_open(struct inode *inode, struct file *file)
mutex_init(&user->lock);
- raw_spin_lock(&logbuf_lock);
+ raw_spin_lock_irq(&logbuf_lock);
user->idx = log_first_idx;
user->seq = log_first_seq;
- raw_spin_unlock(&logbuf_lock);
+ raw_spin_unlock_irq(&logbuf_lock);
file->private_data = user;
return 0;
@@ -787,44 +804,64 @@ static bool printk_time;
#endif
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
+static size_t print_time(u64 ts, char *buf)
+{
+ unsigned long rem_nsec;
+
+ if (!printk_time)
+ return 0;
+
+ if (!buf)
+ return 15;
+
+ rem_nsec = do_div(ts, 1000000000);
+ return sprintf(buf, "[%5lu.%06lu] ",
+ (unsigned long)ts, rem_nsec / 1000);
+}
+
static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
{
size_t len = 0;
+ unsigned int prefix = (msg->facility << 3) | msg->level;
if (syslog) {
if (buf) {
- len += sprintf(buf, "<%u>", msg->level);
+ len += sprintf(buf, "<%u>", prefix);
} else {
len += 3;
- if (msg->level > 9)
+ if (prefix > 999)
+ len += 3;
+ else if (prefix > 99)
+ len += 2;
+ else if (prefix > 9)
len++;
- if (msg->level > 99)
- len++;
- }
- }
-
- if (printk_time) {
- if (buf) {
- unsigned long long ts = msg->ts_nsec;
- unsigned long rem_nsec = do_div(ts, 1000000000);
-
- len += sprintf(buf + len, "[%5lu.%06lu] ",
- (unsigned long) ts, rem_nsec / 1000);
- } else {
- len += 15;
}
}
+ len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
return len;
}
-static size_t msg_print_text(const struct log *msg, bool syslog,
- char *buf, size_t size)
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+ bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
size_t text_size = msg->text_len;
+ bool prefix = true;
+ bool newline = true;
size_t len = 0;
+ if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
+ prefix = false;
+
+ if (msg->flags & LOG_CONT) {
+ if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
+ prefix = false;
+
+ if (!(msg->flags & LOG_NEWLINE))
+ newline = false;
+ }
+
do {
const char *next = memchr(text, '\n', text_size);
size_t text_len;
@@ -842,16 +879,22 @@ static size_t msg_print_text(const struct log *msg, bool syslog,
text_len + 1>= size - len)
break;
- len += print_prefix(msg, syslog, buf + len);
+ if (prefix)
+ len += print_prefix(msg, syslog, buf + len);
memcpy(buf + len, text, text_len);
len += text_len;
- buf[len++] = '\n';
+ if (next || newline)
+ buf[len++] = '\n';
} else {
/* SYSLOG_ACTION_* buffer size only calculation */
- len += print_prefix(msg, syslog, NULL);
- len += text_len + 1;
+ if (prefix)
+ len += print_prefix(msg, syslog, NULL);
+ len += text_len;
+ if (next || newline)
+ len++;
}
+ prefix = true;
text = next;
} while (text);
@@ -862,28 +905,60 @@ static int syslog_print(char __user *buf, int size)
{
char *text;
struct log *msg;
- int len;
+ int len = 0;
text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
- raw_spin_lock_irq(&logbuf_lock);
- if (syslog_seq < log_first_seq) {
- /* messages are gone, move to first one */
- syslog_seq = log_first_seq;
- syslog_idx = log_first_idx;
- }
- msg = log_from_idx(syslog_idx);
- len = msg_print_text(msg, true, text, LOG_LINE_MAX);
- syslog_idx = log_next(syslog_idx);
- syslog_seq++;
- raw_spin_unlock_irq(&logbuf_lock);
+ while (size > 0) {
+ size_t n;
+ size_t skip;
- if (len > size)
- len = -EINVAL;
- else if (len > 0 && copy_to_user(buf, text, len))
- len = -EFAULT;
+ raw_spin_lock_irq(&logbuf_lock);
+ if (syslog_seq < log_first_seq) {
+ /* messages are gone, move to first one */
+ syslog_seq = log_first_seq;
+ syslog_idx = log_first_idx;
+ syslog_prev = 0;
+ syslog_partial = 0;
+ }
+ if (syslog_seq == log_next_seq) {
+ raw_spin_unlock_irq(&logbuf_lock);
+ break;
+ }
+
+ skip = syslog_partial;
+ msg = log_from_idx(syslog_idx);
+ n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
+ if (n - syslog_partial <= size) {
+ /* message fits into buffer, move forward */
+ syslog_idx = log_next(syslog_idx);
+ syslog_seq++;
+ syslog_prev = msg->flags;
+ n -= syslog_partial;
+ syslog_partial = 0;
+ } else if (!len){
+ /* partial read(), remember position */
+ n = size;
+ syslog_partial += n;
+ } else
+ n = 0;
+ raw_spin_unlock_irq(&logbuf_lock);
+
+ if (!n)
+ break;
+
+ if (copy_to_user(buf, text + skip, n)) {
+ if (!len)
+ len = -EFAULT;
+ break;
+ }
+
+ len += n;
+ size -= n;
+ buf += n;
+ }
kfree(text);
return len;
@@ -903,6 +978,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
u64 next_seq;
u64 seq;
u32 idx;
+ enum log_flags prev;
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
@@ -916,10 +992,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
*/
seq = clear_seq;
idx = clear_idx;
+ prev = 0;
while (seq < log_next_seq) {
struct log *msg = log_from_idx(idx);
- len += msg_print_text(msg, true, NULL, 0);
+ len += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -927,10 +1004,11 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
/* move first record forward until length fits into the buffer */
seq = clear_seq;
idx = clear_idx;
+ prev = 0;
while (len > size && seq < log_next_seq) {
struct log *msg = log_from_idx(idx);
- len -= msg_print_text(msg, true, NULL, 0);
+ len -= msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
}
@@ -939,17 +1017,19 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
next_seq = log_next_seq;
len = 0;
+ prev = 0;
while (len >= 0 && seq < next_seq) {
struct log *msg = log_from_idx(idx);
int textlen;
- textlen = msg_print_text(msg, true, text, LOG_LINE_MAX);
+ textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
if (textlen < 0) {
len = textlen;
break;
}
idx = log_next(idx);
seq++;
+ prev = msg->flags;
raw_spin_unlock_irq(&logbuf_lock);
if (copy_to_user(buf + len, text, textlen))
@@ -962,6 +1042,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
/* messages are gone, move to next one */
seq = log_first_seq;
idx = log_first_idx;
+ prev = 0;
}
}
}
@@ -980,7 +1061,6 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
{
bool clear = false;
static int saved_console_loglevel = -1;
- static DEFINE_MUTEX(syslog_mutex);
int error;
error = check_syslog_permissions(type, from_file);
@@ -1007,17 +1087,11 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
error = -EFAULT;
goto out;
}
- error = mutex_lock_interruptible(&syslog_mutex);
- if (error)
- goto out;
error = wait_event_interruptible(log_wait,
syslog_seq != log_next_seq);
- if (error) {
- mutex_unlock(&syslog_mutex);
+ if (error)
goto out;
- }
error = syslog_print(buf, len);
- mutex_unlock(&syslog_mutex);
break;
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
@@ -1040,6 +1114,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
/* Clear ring buffer */
case SYSLOG_ACTION_CLEAR:
syslog_print_all(NULL, 0, true);
+ break;
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == -1)
@@ -1072,6 +1147,8 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
syslog_idx = log_first_idx;
+ syslog_prev = 0;
+ syslog_partial = 0;
}
if (from_file) {
/*
@@ -1081,19 +1158,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
*/
error = log_next_idx - syslog_idx;
} else {
- u64 seq;
- u32 idx;
+ u64 seq = syslog_seq;
+ u32 idx = syslog_idx;
+ enum log_flags prev = syslog_prev;
error = 0;
- seq = syslog_seq;
- idx = syslog_idx;
while (seq < log_next_seq) {
struct log *msg = log_from_idx(idx);
- error += msg_print_text(msg, true, NULL, 0);
+ error += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
+ prev = msg->flags;
}
+ error -= syslog_partial;
}
raw_spin_unlock_irq(&logbuf_lock);
break;
@@ -1114,21 +1192,6 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
}
-#ifdef CONFIG_KGDB_KDB
-/* kdb dmesg command needs access to the syslog buffer. do_syslog()
- * uses locks so it cannot be used during debugging. Just tell kdb
- * where the start and end of the physical and logical logs are. This
- * is equivalent to do_syslog(3).
- */
-void kdb_syslog_data(char *syslog_data[4])
-{
- syslog_data[0] = log_buf;
- syslog_data[1] = log_buf + log_buf_len;
- syslog_data[2] = log_buf + log_first_idx;
- syslog_data[3] = log_buf + log_next_idx;
-}
-#endif /* CONFIG_KGDB_KDB */
-
static bool __read_mostly ignore_loglevel;
static int __init ignore_loglevel_setup(char *str)
@@ -1272,22 +1335,98 @@ static inline void printk_delay(void)
}
}
+/*
+ * Continuation lines are buffered, and not committed to the record buffer
+ * until the line is complete, or a race forces it. The line fragments
+ * though, are printed immediately to the consoles to ensure everything has
+ * reached the console in case of a kernel crash.
+ */
+static struct cont {
+ char buf[LOG_LINE_MAX];
+ size_t len; /* length == 0 means unused buffer */
+ size_t cons; /* bytes written to console */
+ struct task_struct *owner; /* task of first print*/
+ u64 ts_nsec; /* time of first print */
+ u8 level; /* log level of first message */
+ u8 facility; /* log level of first message */
+ bool flushed:1; /* buffer sealed and committed */
+} cont;
+
+static void cont_flush(void)
+{
+ if (cont.flushed)
+ return;
+ if (cont.len == 0)
+ return;
+
+ log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+ NULL, 0, cont.buf, cont.len);
+
+ cont.flushed = true;
+}
+
+static bool cont_add(int facility, int level, const char *text, size_t len)
+{
+ if (cont.len && cont.flushed)
+ return false;
+
+ if (cont.len + len > sizeof(cont.buf)) {
+ cont_flush();
+ return false;
+ }
+
+ if (!cont.len) {
+ cont.facility = facility;
+ cont.level = level;
+ cont.owner = current;
+ cont.ts_nsec = local_clock();
+ cont.cons = 0;
+ cont.flushed = false;
+ }
+
+ memcpy(cont.buf + cont.len, text, len);
+ cont.len += len;
+ return true;
+}
+
+static size_t cont_print_text(char *text, size_t size)
+{
+ size_t textlen = 0;
+ size_t len;
+
+ if (cont.cons == 0) {
+ textlen += print_time(cont.ts_nsec, text);
+ size -= textlen;
+ }
+
+ len = cont.len - cont.cons;
+ if (len > 0) {
+ if (len+1 > size)
+ len = size-1;
+ memcpy(text + textlen, cont.buf + cont.cons, len);
+ textlen += len;
+ cont.cons = cont.len;
+ }
+
+ if (cont.flushed) {
+ text[textlen++] = '\n';
+ /* got everything, release buffer */
+ cont.len = 0;
+ }
+ return textlen;
+}
+
asmlinkage int vprintk_emit(int facility, int level,
const char *dict, size_t dictlen,
const char *fmt, va_list args)
{
static int recursion_bug;
- static char cont_buf[LOG_LINE_MAX];
- static size_t cont_len;
- static int cont_level;
- static struct task_struct *cont_task;
static char textbuf[LOG_LINE_MAX];
char *text = textbuf;
size_t text_len;
+ enum log_flags lflags = 0;
unsigned long flags;
int this_cpu;
- bool newline = false;
- bool prefix = false;
int printed_len = 0;
boot_delay_msec();
@@ -1326,7 +1465,8 @@ asmlinkage int vprintk_emit(int facility, int level,
recursion_bug = 0;
printed_len += strlen(recursion_msg);
/* emit KERN_CRIT message */
- log_store(0, 2, NULL, 0, recursion_msg, printed_len);
+ log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
+ NULL, 0, recursion_msg, printed_len);
}
/*
@@ -1338,7 +1478,7 @@ asmlinkage int vprintk_emit(int facility, int level,
/* mark and strip a trailing newline */
if (text_len && text[text_len-1] == '\n') {
text_len--;
- newline = true;
+ lflags |= LOG_NEWLINE;
}
/* strip syslog prefix and extract log level or control flags */
@@ -1348,7 +1488,7 @@ asmlinkage int vprintk_emit(int facility, int level,
if (level == -1)
level = text[1] - '0';
case 'd': /* KERN_DEFAULT */
- prefix = true;
+ lflags |= LOG_PREFIX;
case 'c': /* KERN_CONT */
text += 3;
text_len -= 3;
@@ -1358,61 +1498,41 @@ asmlinkage int vprintk_emit(int facility, int level,
if (level == -1)
level = default_message_loglevel;
- if (dict) {
- prefix = true;
- newline = true;
- }
+ if (dict)
+ lflags |= LOG_PREFIX|LOG_NEWLINE;
- if (!newline) {
- if (cont_len && (prefix || cont_task != current)) {
- /*
- * Flush earlier buffer, which is either from a
- * different thread, or when we got a new prefix.
- */
- log_store(facility, cont_level, NULL, 0, cont_buf, cont_len);
- cont_len = 0;
- }
-
- if (!cont_len) {
- cont_level = level;
- cont_task = current;
- }
+ if (!(lflags & LOG_NEWLINE)) {
+ /*
+ * Flush the conflicting buffer. An earlier newline was missing,
+ * or another task also prints continuation lines.
+ */
+ if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
+ cont_flush();
- /* buffer or append to earlier buffer from the same thread */
- if (cont_len + text_len > sizeof(cont_buf))
- text_len = sizeof(cont_buf) - cont_len;
- memcpy(cont_buf + cont_len, text, text_len);
- cont_len += text_len;
+ /* buffer line if possible, otherwise store it right away */
+ if (!cont_add(facility, level, text, text_len))
+ log_store(facility, level, lflags | LOG_CONT, 0,
+ dict, dictlen, text, text_len);
} else {
- if (cont_len && cont_task == current) {
- if (prefix) {
- /*
- * New prefix from the same thread; flush. We
- * either got no earlier newline, or we race
- * with an interrupt.
- */
- log_store(facility, cont_level,
- NULL, 0, cont_buf, cont_len);
- cont_len = 0;
- }
+ bool stored = false;
- /* append to the earlier buffer and flush */
- if (cont_len + text_len > sizeof(cont_buf))
- text_len = sizeof(cont_buf) - cont_len;
- memcpy(cont_buf + cont_len, text, text_len);
- cont_len += text_len;
- log_store(facility, cont_level,
- NULL, 0, cont_buf, cont_len);
- cont_len = 0;
- cont_task = NULL;
- printed_len = cont_len;
- } else {
- /* ordinary single and terminated line */
- log_store(facility, level,
- dict, dictlen, text, text_len);
- printed_len = text_len;
+ /*
+ * If an earlier newline was missing and it was the same task,
+ * either merge it with the current buffer and flush, or if
+ * there was a race with interrupts (prefix == true) then just
+ * flush it out and store this line separately.
+ */
+ if (cont.len && cont.owner == current) {
+ if (!(lflags & LOG_PREFIX))
+ stored = cont_add(facility, level, text, text_len);
+ cont_flush();
}
+
+ if (!stored)
+ log_store(facility, level, lflags, 0,
+ dict, dictlen, text, text_len);
}
+ printed_len += text_len;
/*
* Try to acquire and then immediately release the console semaphore.
@@ -1499,11 +1619,18 @@ EXPORT_SYMBOL(printk);
#else
#define LOG_LINE_MAX 0
+static struct cont {
+ size_t len;
+ size_t cons;
+ u8 level;
+ bool flushed:1;
+} cont;
static struct log *log_from_idx(u32 idx) { return NULL; }
static u32 log_next(u32 idx) { return 0; }
static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, bool syslog,
- char *buf, size_t size) { return 0; }
+static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+ bool syslog, char *buf, size_t size) { return 0; }
+static size_t cont_print_text(char *text, size_t size) { return 0; }
#endif /* CONFIG_PRINTK */
@@ -1778,6 +1905,7 @@ void wake_up_klogd(void)
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
+static enum log_flags console_prev;
/**
* console_unlock - unlock the console system
@@ -1795,6 +1923,7 @@ static u32 console_idx;
*/
void console_unlock(void)
{
+ static char text[LOG_LINE_MAX];
static u64 seen_seq;
unsigned long flags;
bool wake_klogd = false;
@@ -1807,10 +1936,23 @@ void console_unlock(void)
console_may_schedule = 0;
+ /* flush buffered message fragment immediately to console */
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ if (cont.len && (cont.cons < cont.len || cont.flushed)) {
+ size_t len;
+
+ len = cont_print_text(text, sizeof(text));
+ raw_spin_unlock(&logbuf_lock);
+ stop_critical_timings();
+ call_console_drivers(cont.level, text, len);
+ start_critical_timings();
+ local_irq_restore(flags);
+ } else
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
again:
for (;;) {
struct log *msg;
- static char text[LOG_LINE_MAX];
size_t len;
int level;
@@ -1824,18 +1966,35 @@ again:
/* messages are gone, move to first one */
console_seq = log_first_seq;
console_idx = log_first_idx;
+ console_prev = 0;
}
-
+skip:
if (console_seq == log_next_seq)
break;
msg = log_from_idx(console_idx);
- level = msg->level & 7;
-
- len = msg_print_text(msg, false, text, sizeof(text));
+ if (msg->flags & LOG_NOCONS) {
+ /*
+ * Skip record we have buffered and already printed
+ * directly to the console when we received it.
+ */
+ console_idx = log_next(console_idx);
+ console_seq++;
+ /*
+ * We will get here again when we register a new
+ * CON_PRINTBUFFER console. Clear the flag so we
+ * will properly dump everything later.
+ */
+ msg->flags &= ~LOG_NOCONS;
+ goto skip;
+ }
+ level = msg->level;
+ len = msg_print_text(msg, console_prev, false,
+ text, sizeof(text));
console_idx = log_next(console_idx);
console_seq++;
+ console_prev = msg->flags;
raw_spin_unlock(&logbuf_lock);
stop_critical_timings(); /* don't trace print latency */
@@ -2098,6 +2257,7 @@ void register_console(struct console *newcon)
raw_spin_lock_irqsave(&logbuf_lock, flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
+ console_prev = syslog_prev;
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
/*
* We're about to replay the log buffer. Only do this to the
@@ -2350,7 +2510,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
}
/**
- * kmsg_dump_get_line - retrieve one kmsg log line
+ * kmsg_dump_get_line_nolock - retrieve one kmsg log line (unlocked version)
* @dumper: registered kmsg dumper
* @syslog: include the "<4>" prefixes
* @line: buffer to copy the line to
@@ -2365,11 +2525,12 @@ void kmsg_dump(enum kmsg_dump_reason reason)
*
* A return value of FALSE indicates that there are no more records to
* read.
+ *
+ * The function is similar to kmsg_dump_get_line(), but grabs no locks.
*/
-bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
- char *line, size_t size, size_t *len)
+bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len)
{
- unsigned long flags;
struct log *msg;
size_t l = 0;
bool ret = false;
@@ -2377,7 +2538,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
if (!dumper->active)
goto out;
- raw_spin_lock_irqsave(&logbuf_lock, flags);
if (dumper->cur_seq < log_first_seq) {
/* messages are gone, move to first available one */
dumper->cur_seq = log_first_seq;
@@ -2385,31 +2545,57 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
}
/* last entry */
- if (dumper->cur_seq >= log_next_seq) {
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+ if (dumper->cur_seq >= log_next_seq)
goto out;
- }
msg = log_from_idx(dumper->cur_idx);
- l = msg_print_text(msg, syslog,
- line, size);
+ l = msg_print_text(msg, 0, syslog, line, size);
dumper->cur_idx = log_next(dumper->cur_idx);
dumper->cur_seq++;
ret = true;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
out:
if (len)
*len = l;
return ret;
}
+
+/**
+ * kmsg_dump_get_line - retrieve one kmsg log line
+ * @dumper: registered kmsg dumper
+ * @syslog: include the "<4>" prefixes
+ * @line: buffer to copy the line to
+ * @size: maximum size of the buffer
+ * @len: length of line placed into buffer
+ *
+ * Start at the beginning of the kmsg buffer, with the oldest kmsg
+ * record, and copy one record into the provided buffer.
+ *
+ * Consecutive calls will return the next available record moving
+ * towards the end of the buffer with the youngest messages.
+ *
+ * A return value of FALSE indicates that there are no more records to
+ * read.
+ */
+bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
+ char *line, size_t size, size_t *len)
+{
+ unsigned long flags;
+ bool ret;
+
+ raw_spin_lock_irqsave(&logbuf_lock, flags);
+ ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
/**
* kmsg_dump_get_buffer - copy kmsg log lines
* @dumper: registered kmsg dumper
* @syslog: include the "<4>" prefixes
- * @line: buffer to copy the line to
+ * @buf: buffer to copy the line to
* @size: maximum size of the buffer
* @len: length of line placed into buffer
*
@@ -2432,6 +2618,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
u32 idx;
u64 next_seq;
u32 next_idx;
+ enum log_flags prev;
size_t l = 0;
bool ret = false;
@@ -2454,23 +2641,27 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
/* calculate length of entire buffer */
seq = dumper->cur_seq;
idx = dumper->cur_idx;
+ prev = 0;
while (seq < dumper->next_seq) {
struct log *msg = log_from_idx(idx);
- l += msg_print_text(msg, true, NULL, 0);
+ l += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
+ prev = msg->flags;
}
/* move first record forward until length fits into the buffer */
seq = dumper->cur_seq;
idx = dumper->cur_idx;
+ prev = 0;
while (l > size && seq < dumper->next_seq) {
struct log *msg = log_from_idx(idx);
- l -= msg_print_text(msg, true, NULL, 0);
+ l -= msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
+ prev = msg->flags;
}
/* last message in next interation */
@@ -2478,14 +2669,14 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
next_idx = idx;
l = 0;
+ prev = 0;
while (seq < dumper->next_seq) {
struct log *msg = log_from_idx(idx);
- l += msg_print_text(msg, syslog,
- buf + l, size - l);
-
+ l += msg_print_text(msg, prev, syslog, buf + l, size - l);
idx = log_next(idx);
seq++;
+ prev = msg->flags;
}
dumper->next_seq = next_seq;
@@ -2500,6 +2691,24 @@ out:
EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
/**
+ * kmsg_dump_rewind_nolock - reset the interator (unlocked version)
+ * @dumper: registered kmsg dumper
+ *
+ * Reset the dumper's iterator so that kmsg_dump_get_line() and
+ * kmsg_dump_get_buffer() can be called again and used multiple
+ * times within the same dumper.dump() callback.
+ *
+ * The function is similar to kmsg_dump_rewind(), but grabs no locks.
+ */
+void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
+{
+ dumper->cur_seq = clear_seq;
+ dumper->cur_idx = clear_idx;
+ dumper->next_seq = log_next_seq;
+ dumper->next_idx = log_next_idx;
+}
+
+/**
* kmsg_dump_rewind - reset the interator
* @dumper: registered kmsg dumper
*
@@ -2512,10 +2721,7 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
unsigned long flags;
raw_spin_lock_irqsave(&logbuf_lock, flags);
- dumper->cur_seq = clear_seq;
- dumper->cur_idx = clear_idx;
- dumper->next_seq = log_next_seq;
- dumper->next_idx = log_next_idx;
+ kmsg_dump_rewind_nolock(dumper);
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
}
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 95cba41ce1e9..4e6a61b15e86 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -54,6 +54,50 @@
#ifdef CONFIG_PREEMPT_RCU
/*
+ * Preemptible RCU implementation for rcu_read_lock().
+ * Just increment ->rcu_read_lock_nesting, shared state will be updated
+ * if we block.
+ */
+void __rcu_read_lock(void)
+{
+ current->rcu_read_lock_nesting++;
+ barrier(); /* critical section after entry code. */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_lock);
+
+/*
+ * Preemptible RCU implementation for rcu_read_unlock().
+ * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
+ * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
+ * invoke rcu_read_unlock_special() to clean up after a context switch
+ * in an RCU read-side critical section and other special cases.
+ */
+void __rcu_read_unlock(void)
+{
+ struct task_struct *t = current;
+
+ if (t->rcu_read_lock_nesting != 1) {
+ --t->rcu_read_lock_nesting;
+ } else {
+ barrier(); /* critical section before exit code. */
+ t->rcu_read_lock_nesting = INT_MIN;
+ barrier(); /* assign before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ barrier(); /* ->rcu_read_unlock_special load before assign */
+ t->rcu_read_lock_nesting = 0;
+ }
+#ifdef CONFIG_PROVE_LOCKING
+ {
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ }
+#endif /* #ifdef CONFIG_PROVE_LOCKING */
+}
+EXPORT_SYMBOL_GPL(__rcu_read_unlock);
+
+/*
* Check for a task exiting while in a preemptible-RCU read-side
* critical section, clean up if so. No need to issue warnings,
* as debug_check_no_locks_held() already does this if lockdep
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 37a5444204d2..547b1fe5b052 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -172,7 +172,7 @@ void rcu_irq_enter(void)
local_irq_restore(flags);
}
-#ifdef CONFIG_PROVE_RCU
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
/*
* Test whether RCU thinks that the current CPU is idle.
@@ -183,7 +183,7 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
-#endif /* #ifdef CONFIG_PROVE_RCU */
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
/*
* Test whether the current CPU was interrupted from idle. Nested
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index fc31a2d65100..918fd1e8509c 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -132,7 +132,6 @@ static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
RCU_TRACE(.rcb.name = "rcu_preempt")
};
-static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(void);
static void rcu_report_exp_done(void);
@@ -351,8 +350,9 @@ static int rcu_initiate_boost(void)
rcu_preempt_ctrlblk.boost_tasks =
rcu_preempt_ctrlblk.gp_tasks;
invoke_rcu_callbacks();
- } else
+ } else {
RCU_TRACE(rcu_initiate_boost_trace());
+ }
return 1;
}
@@ -527,23 +527,11 @@ void rcu_preempt_note_context_switch(void)
}
/*
- * Tiny-preemptible RCU implementation for rcu_read_lock().
- * Just increment ->rcu_read_lock_nesting, shared state will be updated
- * if we block.
- */
-void __rcu_read_lock(void)
-{
- current->rcu_read_lock_nesting++;
- barrier(); /* needed if we ever invoke rcu_read_lock in rcutiny.c */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
-/*
* Handle special cases during rcu_read_unlock(), such as needing to
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static noinline void rcu_read_unlock_special(struct task_struct *t)
+void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -627,38 +615,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
}
/*
- * Tiny-preemptible RCU implementation for rcu_read_unlock().
- * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
- * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- */
-void __rcu_read_unlock(void)
-{
- struct task_struct *t = current;
-
- barrier(); /* needed if we ever invoke rcu_read_unlock in rcutiny.c */
- if (t->rcu_read_lock_nesting != 1)
- --t->rcu_read_lock_nesting;
- else {
- t->rcu_read_lock_nesting = INT_MIN;
- barrier(); /* assign before ->rcu_read_unlock_special load */
- if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
- barrier(); /* ->rcu_read_unlock_special load before assign */
- t->rcu_read_lock_nesting = 0;
- }
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
-
- WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
- }
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
-/*
* Check for a quiescent state from the current CPU. When a task blocks,
* the task is recorded in the rcu_preempt_ctrlblk structure, which is
* checked elsewhere. This is called from the scheduling-clock interrupt.
@@ -823,9 +779,9 @@ void synchronize_rcu_expedited(void)
rpcp->exp_tasks = NULL;
/* Wait for tail of ->blkd_tasks list to drain. */
- if (!rcu_preempted_readers_exp())
+ if (!rcu_preempted_readers_exp()) {
local_irq_restore(flags);
- else {
+ } else {
rcu_initiate_boost();
local_irq_restore(flags);
wait_event(sync_rcu_preempt_exp_wq,
@@ -846,8 +802,6 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
*/
int rcu_preempt_needs_cpu(void)
{
- if (!rcu_preempt_running_reader())
- rcu_preempt_cpu_qs();
return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
}
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index e66b34ab7555..25b15033c61f 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -49,8 +49,7 @@
#include <asm/byteorder.h>
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
- "Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
static int nfakewriters = 4; /* # fake writer threads */
@@ -206,6 +205,7 @@ static unsigned long boost_starttime; /* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
/* and boost task create/destroy. */
static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
+static bool barrier_phase; /* Test phase. */
static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
@@ -407,8 +407,9 @@ rcu_torture_cb(struct rcu_head *p)
if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
rp->rtort_mbtest = 0;
rcu_torture_free(rp);
- } else
+ } else {
cur_ops->deferred_free(rp);
+ }
}
static int rcu_no_completed(void)
@@ -635,6 +636,17 @@ static void srcu_torture_synchronize(void)
synchronize_srcu(&srcu_ctl);
}
+static void srcu_torture_call(struct rcu_head *head,
+ void (*func)(struct rcu_head *head))
+{
+ call_srcu(&srcu_ctl, head, func);
+}
+
+static void srcu_torture_barrier(void)
+{
+ srcu_barrier(&srcu_ctl);
+}
+
static int srcu_torture_stats(char *page)
{
int cnt = 0;
@@ -661,8 +673,8 @@ static struct rcu_torture_ops srcu_ops = {
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
- .call = NULL,
- .cb_barrier = NULL,
+ .call = srcu_torture_call,
+ .cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
.name = "srcu"
};
@@ -1013,7 +1025,11 @@ rcu_torture_fakewriter(void *arg)
do {
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
- cur_ops->sync();
+ if (cur_ops->cb_barrier != NULL &&
+ rcu_random(&rand) % (nfakewriters * 8) == 0)
+ cur_ops->cb_barrier();
+ else
+ cur_ops->sync();
rcu_stutter_wait("rcu_torture_fakewriter");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
@@ -1183,27 +1199,27 @@ rcu_torture_printk(char *page)
}
cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
- "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
- "rtmbe: %d rtbke: %ld rtbre: %ld "
- "rtbf: %ld rtb: %ld nt: %ld "
- "onoff: %ld/%ld:%ld/%ld "
- "barrier: %ld/%ld:%ld",
+ "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
rcu_torture_current,
rcu_torture_current_version,
list_empty(&rcu_torture_freelist),
atomic_read(&n_rcu_torture_alloc),
atomic_read(&n_rcu_torture_alloc_fail),
- atomic_read(&n_rcu_torture_free),
+ atomic_read(&n_rcu_torture_free));
+ cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
atomic_read(&n_rcu_torture_mberror),
n_rcu_torture_boost_ktrerror,
- n_rcu_torture_boost_rterror,
+ n_rcu_torture_boost_rterror);
+ cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
n_rcu_torture_boost_failure,
n_rcu_torture_boosts,
- n_rcu_torture_timers,
+ n_rcu_torture_timers);
+ cnt += sprintf(&page[cnt], "onoff: %ld/%ld:%ld/%ld ",
n_online_successes,
n_online_attempts,
n_offline_successes,
- n_offline_attempts,
+ n_offline_attempts);
+ cnt += sprintf(&page[cnt], "barrier: %ld/%ld:%ld",
n_barrier_successes,
n_barrier_attempts,
n_rcu_torture_barrier_error);
@@ -1445,8 +1461,7 @@ rcu_torture_shutdown(void *arg)
delta = shutdown_time - jiffies_snap;
if (verbose)
printk(KERN_ALERT "%s" TORTURE_FLAG
- "rcu_torture_shutdown task: %lu "
- "jiffies remaining\n",
+ "rcu_torture_shutdown task: %lu jiffies remaining\n",
torture_type, delta);
schedule_timeout_interruptible(delta);
jiffies_snap = ACCESS_ONCE(jiffies);
@@ -1498,8 +1513,7 @@ rcu_torture_onoff(void *arg)
if (cpu_down(cpu) == 0) {
if (verbose)
printk(KERN_ALERT "%s" TORTURE_FLAG
- "rcu_torture_onoff task: "
- "offlined %d\n",
+ "rcu_torture_onoff task: offlined %d\n",
torture_type, cpu);
n_offline_successes++;
}
@@ -1512,8 +1526,7 @@ rcu_torture_onoff(void *arg)
if (cpu_up(cpu) == 0) {
if (verbose)
printk(KERN_ALERT "%s" TORTURE_FLAG
- "rcu_torture_onoff task: "
- "onlined %d\n",
+ "rcu_torture_onoff task: onlined %d\n",
torture_type, cpu);
n_online_successes++;
}
@@ -1631,6 +1644,7 @@ void rcu_torture_barrier_cbf(struct rcu_head *rcu)
static int rcu_torture_barrier_cbs(void *arg)
{
long myid = (long)arg;
+ bool lastphase = 0;
struct rcu_head rcu;
init_rcu_head_on_stack(&rcu);
@@ -1638,9 +1652,11 @@ static int rcu_torture_barrier_cbs(void *arg)
set_user_nice(current, 19);
do {
wait_event(barrier_cbs_wq[myid],
- atomic_read(&barrier_cbs_count) == n_barrier_cbs ||
+ barrier_phase != lastphase ||
kthread_should_stop() ||
fullstop != FULLSTOP_DONTSTOP);
+ lastphase = barrier_phase;
+ smp_mb(); /* ensure barrier_phase load before ->call(). */
if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
break;
cur_ops->call(&rcu, rcu_torture_barrier_cbf);
@@ -1665,7 +1681,8 @@ static int rcu_torture_barrier(void *arg)
do {
atomic_set(&barrier_cbs_invoked, 0);
atomic_set(&barrier_cbs_count, n_barrier_cbs);
- /* wake_up() path contains the required barriers. */
+ smp_mb(); /* Ensure barrier_phase after prior assignments. */
+ barrier_phase = !barrier_phase;
for (i = 0; i < n_barrier_cbs; i++)
wake_up(&barrier_cbs_wq[i]);
wait_event(barrier_wq,
@@ -1684,7 +1701,7 @@ static int rcu_torture_barrier(void *arg)
schedule_timeout_interruptible(HZ / 10);
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_barrier task stopping");
- rcutorture_shutdown_absorb("rcu_torture_barrier_cbs");
+ rcutorture_shutdown_absorb("rcu_torture_barrier");
while (!kthread_should_stop())
schedule_timeout_interruptible(1);
return 0;
@@ -1908,8 +1925,8 @@ rcu_torture_init(void)
static struct rcu_torture_ops *torture_ops[] =
{ &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
&rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
- &srcu_ops, &srcu_sync_ops, &srcu_raw_ops,
- &srcu_raw_sync_ops, &srcu_expedited_ops,
+ &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
+ &srcu_raw_ops, &srcu_raw_sync_ops,
&sched_ops, &sched_sync_ops, &sched_expedited_ops, };
mutex_lock(&fullstop_mutex);
@@ -1931,8 +1948,7 @@ rcu_torture_init(void)
return -EINVAL;
}
if (cur_ops->fqs == NULL && fqs_duration != 0) {
- printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
- "fqs_duration, fqs disabled.\n");
+ printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
fqs_duration = 0;
}
if (cur_ops->init)
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 3b0f1337f75b..f280e542e3e9 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -60,36 +60,44 @@
/* Data structures. */
-static struct lock_class_key rcu_node_class[NUM_RCU_LVLS];
-
-#define RCU_STATE_INITIALIZER(structname) { \
- .level = { &structname##_state.node[0] }, \
- .levelcnt = { \
- NUM_RCU_LVL_0, /* root of hierarchy. */ \
- NUM_RCU_LVL_1, \
- NUM_RCU_LVL_2, \
- NUM_RCU_LVL_3, \
- NUM_RCU_LVL_4, /* == MAX_RCU_LVLS */ \
- }, \
+static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
+
+#define RCU_STATE_INITIALIZER(sname, cr) { \
+ .level = { &sname##_state.node[0] }, \
+ .call = cr, \
.fqs_state = RCU_GP_IDLE, \
.gpnum = -300, \
.completed = -300, \
- .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.onofflock), \
- .orphan_nxttail = &structname##_state.orphan_nxtlist, \
- .orphan_donetail = &structname##_state.orphan_donelist, \
- .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&structname##_state.fqslock), \
- .n_force_qs = 0, \
- .n_force_qs_ngp = 0, \
- .name = #structname, \
+ .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \
+ .orphan_nxttail = &sname##_state.orphan_nxtlist, \
+ .orphan_donetail = &sname##_state.orphan_donelist, \
+ .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
+ .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \
+ .name = #sname, \
}
-struct rcu_state rcu_sched_state = RCU_STATE_INITIALIZER(rcu_sched);
+struct rcu_state rcu_sched_state =
+ RCU_STATE_INITIALIZER(rcu_sched, call_rcu_sched);
DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh);
+struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state;
+LIST_HEAD(rcu_struct_flavors);
+
+/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
+static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
+module_param(rcu_fanout_leaf, int, 0);
+int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
+static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */
+ NUM_RCU_LVL_0,
+ NUM_RCU_LVL_1,
+ NUM_RCU_LVL_2,
+ NUM_RCU_LVL_3,
+ NUM_RCU_LVL_4,
+};
+int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
/*
* The rcu_scheduler_active variable transitions from zero to one just
@@ -147,13 +155,6 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
unsigned long rcutorture_testseq;
unsigned long rcutorture_vernum;
-/* State information for rcu_barrier() and friends. */
-
-static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
-static atomic_t rcu_barrier_cpu_count;
-static DEFINE_MUTEX(rcu_barrier_mutex);
-static struct completion rcu_barrier_completion;
-
/*
* Return true if an RCU grace period is in progress. The ACCESS_ONCE()s
* permit this function to be invoked without holding the root rcu_node
@@ -201,6 +202,7 @@ void rcu_note_context_switch(int cpu)
{
trace_rcu_utilization("Start context switch");
rcu_sched_qs(cpu);
+ rcu_preempt_note_context_switch(cpu);
trace_rcu_utilization("End context switch");
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
@@ -357,7 +359,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval)
struct task_struct *idle = idle_task(smp_processor_id());
trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
- ftrace_dump(DUMP_ALL);
+ ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
@@ -467,7 +469,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval)
trace_rcu_dyntick("Error on exit: not idle task",
oldval, rdtp->dynticks_nesting);
- ftrace_dump(DUMP_ALL);
+ ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
idle->pid, idle->comm); /* must be idle task! */
@@ -584,8 +586,6 @@ void rcu_nmi_exit(void)
WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
}
-#ifdef CONFIG_PROVE_RCU
-
/**
* rcu_is_cpu_idle - see if RCU thinks that the current CPU is idle
*
@@ -603,7 +603,7 @@ int rcu_is_cpu_idle(void)
}
EXPORT_SYMBOL(rcu_is_cpu_idle);
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
/*
* Is the current CPU online? Disable preemption to avoid false positives
@@ -644,9 +644,7 @@ bool rcu_lockdep_current_cpu_online(void)
}
EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
-#endif /* #ifdef CONFIG_PROVE_RCU */
+#endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
/**
* rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
@@ -732,7 +730,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
int cpu;
long delta;
unsigned long flags;
- int ndetected;
+ int ndetected = 0;
struct rcu_node *rnp = rcu_get_root(rsp);
/* Only let one CPU complain about others per time interval. */
@@ -773,7 +771,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
*/
rnp = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp->lock, flags);
- ndetected = rcu_print_task_stall(rnp);
+ ndetected += rcu_print_task_stall(rnp);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
print_cpu_stall_info_end();
@@ -859,9 +857,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
*/
void rcu_cpu_stall_reset(void)
{
- rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2;
- rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
- rcu_preempt_stall_reset();
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
}
static struct notifier_block rcu_panic_block = {
@@ -893,8 +892,9 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct
if (rnp->qsmask & rdp->grpmask) {
rdp->qs_pending = 1;
rdp->passed_quiesce = 0;
- } else
+ } else {
rdp->qs_pending = 0;
+ }
zero_cpu_stall_ticks(rdp);
}
}
@@ -936,6 +936,18 @@ check_for_new_grace_period(struct rcu_state *rsp, struct rcu_data *rdp)
}
/*
+ * Initialize the specified rcu_data structure's callback list to empty.
+ */
+static void init_callback_list(struct rcu_data *rdp)
+{
+ int i;
+
+ rdp->nxtlist = NULL;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ rdp->nxttail[i] = &rdp->nxtlist;
+}
+
+/*
* Advance this CPU's callbacks, but only if the current grace period
* has ended. This may be called only from the CPU to whom the rdp
* belongs. In addition, the corresponding leaf rcu_node structure's
@@ -1327,8 +1339,6 @@ static void
rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp)
{
- int i;
-
/*
* Orphan the callbacks. First adjust the counts. This is safe
* because ->onofflock excludes _rcu_barrier()'s adoption of
@@ -1339,7 +1349,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
rsp->qlen += rdp->qlen;
rdp->n_cbs_orphaned += rdp->qlen;
rdp->qlen_lazy = 0;
- rdp->qlen = 0;
+ ACCESS_ONCE(rdp->qlen) = 0;
}
/*
@@ -1368,9 +1378,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
}
/* Finally, initialize the rcu_data structure's list to empty. */
- rdp->nxtlist = NULL;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
- rdp->nxttail[i] = &rdp->nxtlist;
+ init_callback_list(rdp);
}
/*
@@ -1504,6 +1512,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (need_report & RCU_OFL_TASKS_EXP_GP)
rcu_report_exp_rnp(rsp, rnp, true);
+ WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
+ "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
+ cpu, rdp->qlen, rdp->nxtlist);
}
#else /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1530,7 +1541,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
{
unsigned long flags;
struct rcu_head *next, *list, **tail;
- int bl, count, count_lazy;
+ int bl, count, count_lazy, i;
/* If no callbacks are ready, just return.*/
if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -1553,9 +1564,9 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
*rdp->nxttail[RCU_DONE_TAIL] = NULL;
tail = rdp->nxttail[RCU_DONE_TAIL];
- for (count = RCU_NEXT_SIZE - 1; count >= 0; count--)
- if (rdp->nxttail[count] == rdp->nxttail[RCU_DONE_TAIL])
- rdp->nxttail[count] = &rdp->nxtlist;
+ for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
+ if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
+ rdp->nxttail[i] = &rdp->nxtlist;
local_irq_restore(flags);
/* Invoke callbacks. */
@@ -1583,15 +1594,15 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
if (list != NULL) {
*tail = rdp->nxtlist;
rdp->nxtlist = list;
- for (count = 0; count < RCU_NEXT_SIZE; count++)
- if (&rdp->nxtlist == rdp->nxttail[count])
- rdp->nxttail[count] = tail;
+ for (i = 0; i < RCU_NEXT_SIZE; i++)
+ if (&rdp->nxtlist == rdp->nxttail[i])
+ rdp->nxttail[i] = tail;
else
break;
}
smp_mb(); /* List handling before counting for rcu_barrier(). */
rdp->qlen_lazy -= count_lazy;
- rdp->qlen -= count;
+ ACCESS_ONCE(rdp->qlen) -= count;
rdp->n_cbs_invoked += count;
/* Reinstate batch limit if we have worked down the excess. */
@@ -1604,6 +1615,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
rdp->n_force_qs_snap = rsp->n_force_qs;
} else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
rdp->qlen_last_fqs_check = rdp->qlen;
+ WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
local_irq_restore(flags);
@@ -1744,8 +1756,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
break; /* grace period idle or initializing, ignore. */
case RCU_SAVE_DYNTICK:
- if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
- break; /* So gcc recognizes the dead code. */
raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
@@ -1787,9 +1797,10 @@ unlock_fqs_ret:
* whom the rdp belongs.
*/
static void
-__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+__rcu_process_callbacks(struct rcu_state *rsp)
{
unsigned long flags;
+ struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
WARN_ON_ONCE(rdp->beenonline == 0);
@@ -1825,11 +1836,11 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static void rcu_process_callbacks(struct softirq_action *unused)
{
+ struct rcu_state *rsp;
+
trace_rcu_utilization("Start RCU core");
- __rcu_process_callbacks(&rcu_sched_state,
- &__get_cpu_var(rcu_sched_data));
- __rcu_process_callbacks(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
- rcu_preempt_process_callbacks();
+ for_each_rcu_flavor(rsp)
+ __rcu_process_callbacks(rsp);
trace_rcu_utilization("End RCU core");
}
@@ -1856,6 +1867,56 @@ static void invoke_rcu_core(void)
raise_softirq(RCU_SOFTIRQ);
}
+/*
+ * Handle any core-RCU processing required by a call_rcu() invocation.
+ */
+static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
+ struct rcu_head *head, unsigned long flags)
+{
+ /*
+ * If called from an extended quiescent state, invoke the RCU
+ * core in order to force a re-evaluation of RCU's idleness.
+ */
+ if (rcu_is_cpu_idle() && cpu_online(smp_processor_id()))
+ invoke_rcu_core();
+
+ /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
+ if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
+ return;
+
+ /*
+ * Force the grace period if too many callbacks or too long waiting.
+ * Enforce hysteresis, and don't invoke force_quiescent_state()
+ * if some other CPU has recently done so. Also, don't bother
+ * invoking force_quiescent_state() if the newly enqueued callback
+ * is the only one waiting for a grace period to complete.
+ */
+ if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
+
+ /* Are we ignoring a completed grace period? */
+ rcu_process_gp_end(rsp, rdp);
+ check_for_new_grace_period(rsp, rdp);
+
+ /* Start a new grace period if one not already started. */
+ if (!rcu_gp_in_progress(rsp)) {
+ unsigned long nestflag;
+ struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+ raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
+ rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
+ } else {
+ /* Give the grace period a kick. */
+ rdp->blimit = LONG_MAX;
+ if (rsp->n_force_qs == rdp->n_force_qs_snap &&
+ *rdp->nxttail[RCU_DONE_TAIL] != head)
+ force_quiescent_state(rsp, 0);
+ rdp->n_force_qs_snap = rsp->n_force_qs;
+ rdp->qlen_last_fqs_check = rdp->qlen;
+ }
+ } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
+ force_quiescent_state(rsp, 1);
+}
+
static void
__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
struct rcu_state *rsp, bool lazy)
@@ -1880,7 +1941,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
rdp = this_cpu_ptr(rsp->rda);
/* Add the callback to our list. */
- rdp->qlen++;
+ ACCESS_ONCE(rdp->qlen)++;
if (lazy)
rdp->qlen_lazy++;
else
@@ -1895,43 +1956,8 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
else
trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
- /* If interrupts were disabled, don't dive into RCU core. */
- if (irqs_disabled_flags(flags)) {
- local_irq_restore(flags);
- return;
- }
-
- /*
- * Force the grace period if too many callbacks or too long waiting.
- * Enforce hysteresis, and don't invoke force_quiescent_state()
- * if some other CPU has recently done so. Also, don't bother
- * invoking force_quiescent_state() if the newly enqueued callback
- * is the only one waiting for a grace period to complete.
- */
- if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
-
- /* Are we ignoring a completed grace period? */
- rcu_process_gp_end(rsp, rdp);
- check_for_new_grace_period(rsp, rdp);
-
- /* Start a new grace period if one not already started. */
- if (!rcu_gp_in_progress(rsp)) {
- unsigned long nestflag;
- struct rcu_node *rnp_root = rcu_get_root(rsp);
-
- raw_spin_lock_irqsave(&rnp_root->lock, nestflag);
- rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */
- } else {
- /* Give the grace period a kick. */
- rdp->blimit = LONG_MAX;
- if (rsp->n_force_qs == rdp->n_force_qs_snap &&
- *rdp->nxttail[RCU_DONE_TAIL] != head)
- force_quiescent_state(rsp, 0);
- rdp->n_force_qs_snap = rsp->n_force_qs;
- rdp->qlen_last_fqs_check = rdp->qlen;
- }
- } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies))
- force_quiescent_state(rsp, 1);
+ /* Go handle any RCU core processing required. */
+ __call_rcu_core(rsp, rdp, head, flags);
local_irq_restore(flags);
}
@@ -1961,28 +1987,16 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
* occasionally incorrectly indicate that there are multiple CPUs online
* when there was in fact only one the whole time, as this just adds
* some overhead: RCU still operates correctly.
- *
- * Of course, sampling num_online_cpus() with preemption enabled can
- * give erroneous results if there are concurrent CPU-hotplug operations.
- * For example, given a demonic sequence of preemptions in num_online_cpus()
- * and CPU-hotplug operations, there could be two or more CPUs online at
- * all times, but num_online_cpus() might well return one (or even zero).
- *
- * However, all such demonic sequences require at least one CPU-offline
- * operation. Furthermore, rcu_blocking_is_gp() giving the wrong answer
- * is only a problem if there is an RCU read-side critical section executing
- * throughout. But RCU-sched and RCU-bh read-side critical sections
- * disable either preemption or bh, which prevents a CPU from going offline.
- * Therefore, the only way that rcu_blocking_is_gp() can incorrectly return
- * that there is only one CPU when in fact there was more than one throughout
- * is when there were no RCU readers in the system. If there are no
- * RCU readers, the grace period by definition can be of zero length,
- * regardless of the number of online CPUs.
*/
static inline int rcu_blocking_is_gp(void)
{
+ int ret;
+
might_sleep(); /* Check for RCU read-side critical section. */
- return num_online_cpus() <= 1;
+ preempt_disable();
+ ret = num_online_cpus() <= 1;
+ preempt_enable();
+ return ret;
}
/**
@@ -2117,9 +2131,9 @@ void synchronize_sched_expedited(void)
put_online_cpus();
/* No joy, try again later. Or just synchronize_sched(). */
- if (trycount++ < 10)
+ if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
- else {
+ } else {
synchronize_sched();
return;
}
@@ -2240,9 +2254,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
*/
static int rcu_pending(int cpu)
{
- return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) ||
- __rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
- rcu_preempt_pending(cpu);
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
+ return 1;
+ return 0;
}
/*
@@ -2252,20 +2269,41 @@ static int rcu_pending(int cpu)
*/
static int rcu_cpu_has_callbacks(int cpu)
{
+ struct rcu_state *rsp;
+
/* RCU callbacks either ready or pending? */
- return per_cpu(rcu_sched_data, cpu).nxtlist ||
- per_cpu(rcu_bh_data, cpu).nxtlist ||
- rcu_preempt_cpu_has_callbacks(cpu);
+ for_each_rcu_flavor(rsp)
+ if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
+ return 1;
+ return 0;
+}
+
+/*
+ * Helper function for _rcu_barrier() tracing. If tracing is disabled,
+ * the compiler is expected to optimize this away.
+ */
+static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+ int cpu, unsigned long done)
+{
+ trace_rcu_barrier(rsp->name, s, cpu,
+ atomic_read(&rsp->barrier_cpu_count), done);
}
/*
* RCU callback function for _rcu_barrier(). If we are last, wake
* up the task executing _rcu_barrier().
*/
-static void rcu_barrier_callback(struct rcu_head *notused)
+static void rcu_barrier_callback(struct rcu_head *rhp)
{
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
- complete(&rcu_barrier_completion);
+ struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
+ struct rcu_state *rsp = rdp->rsp;
+
+ if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
+ _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
+ complete(&rsp->barrier_completion);
+ } else {
+ _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
+ }
}
/*
@@ -2273,35 +2311,63 @@ static void rcu_barrier_callback(struct rcu_head *notused)
*/
static void rcu_barrier_func(void *type)
{
- int cpu = smp_processor_id();
- struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu);
- void (*call_rcu_func)(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
+ struct rcu_state *rsp = type;
+ struct rcu_data *rdp = __this_cpu_ptr(rsp->rda);
- atomic_inc(&rcu_barrier_cpu_count);
- call_rcu_func = type;
- call_rcu_func(head, rcu_barrier_callback);
+ _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
+ atomic_inc(&rsp->barrier_cpu_count);
+ rsp->call(&rdp->barrier_head, rcu_barrier_callback);
}
/*
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
*/
-static void _rcu_barrier(struct rcu_state *rsp,
- void (*call_rcu_func)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)))
+static void _rcu_barrier(struct rcu_state *rsp)
{
int cpu;
unsigned long flags;
struct rcu_data *rdp;
- struct rcu_head rh;
+ struct rcu_data rd;
+ unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+ unsigned long snap_done;
- init_rcu_head_on_stack(&rh);
+ init_rcu_head_on_stack(&rd.barrier_head);
+ _rcu_barrier_trace(rsp, "Begin", -1, snap);
/* Take mutex to serialize concurrent rcu_barrier() requests. */
- mutex_lock(&rcu_barrier_mutex);
+ mutex_lock(&rsp->barrier_mutex);
+
+ /*
+ * Ensure that all prior references, including to ->n_barrier_done,
+ * are ordered before the _rcu_barrier() machinery.
+ */
+ smp_mb(); /* See above block comment. */
+
+ /*
+ * Recheck ->n_barrier_done to see if others did our work for us.
+ * This means checking ->n_barrier_done for an even-to-odd-to-even
+ * transition. The "if" expression below therefore rounds the old
+ * value up to the next even number and adds two before comparing.
+ */
+ snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+ _rcu_barrier_trace(rsp, "Check", -1, snap_done);
+ if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+ _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
+ smp_mb(); /* caller's subsequent code after above check. */
+ mutex_unlock(&rsp->barrier_mutex);
+ return;
+ }
- smp_mb(); /* Prevent any prior operations from leaking in. */
+ /*
+ * Increment ->n_barrier_done to avoid duplicate work. Use
+ * ACCESS_ONCE() to prevent the compiler from speculating
+ * the increment to precede the early-exit check.
+ */
+ ACCESS_ONCE(rsp->n_barrier_done)++;
+ WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
+ _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
+ smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
/*
* Initialize the count to one rather than to zero in order to
@@ -2320,8 +2386,8 @@ static void _rcu_barrier(struct rcu_state *rsp,
* 6. Both rcu_barrier_callback() callbacks are invoked, awakening
* us -- but before CPU 1's orphaned callbacks are invoked!!!
*/
- init_completion(&rcu_barrier_completion);
- atomic_set(&rcu_barrier_cpu_count, 1);
+ init_completion(&rsp->barrier_completion);
+ atomic_set(&rsp->barrier_cpu_count, 1);
raw_spin_lock_irqsave(&rsp->onofflock, flags);
rsp->rcu_barrier_in_progress = current;
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
@@ -2337,14 +2403,19 @@ static void _rcu_barrier(struct rcu_state *rsp,
preempt_disable();
rdp = per_cpu_ptr(rsp->rda, cpu);
if (cpu_is_offline(cpu)) {
+ _rcu_barrier_trace(rsp, "Offline", cpu,
+ rsp->n_barrier_done);
preempt_enable();
while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen))
schedule_timeout_interruptible(1);
} else if (ACCESS_ONCE(rdp->qlen)) {
- smp_call_function_single(cpu, rcu_barrier_func,
- (void *)call_rcu_func, 1);
+ _rcu_barrier_trace(rsp, "OnlineQ", cpu,
+ rsp->n_barrier_done);
+ smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
preempt_enable();
} else {
+ _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
+ rsp->n_barrier_done);
preempt_enable();
}
}
@@ -2361,24 +2432,32 @@ static void _rcu_barrier(struct rcu_state *rsp,
rcu_adopt_orphan_cbs(rsp);
rsp->rcu_barrier_in_progress = NULL;
raw_spin_unlock_irqrestore(&rsp->onofflock, flags);
- atomic_inc(&rcu_barrier_cpu_count);
+ atomic_inc(&rsp->barrier_cpu_count);
smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */
- call_rcu_func(&rh, rcu_barrier_callback);
+ rd.rsp = rsp;
+ rsp->call(&rd.barrier_head, rcu_barrier_callback);
/*
* Now that we have an rcu_barrier_callback() callback on each
* CPU, and thus each counted, remove the initial count.
*/
- if (atomic_dec_and_test(&rcu_barrier_cpu_count))
- complete(&rcu_barrier_completion);
+ if (atomic_dec_and_test(&rsp->barrier_cpu_count))
+ complete(&rsp->barrier_completion);
+
+ /* Increment ->n_barrier_done to prevent duplicate work. */
+ smp_mb(); /* Keep increment after above mechanism. */
+ ACCESS_ONCE(rsp->n_barrier_done)++;
+ WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
+ _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
+ smp_mb(); /* Keep increment before caller's subsequent code. */
/* Wait for all rcu_barrier_callback() callbacks to be invoked. */
- wait_for_completion(&rcu_barrier_completion);
+ wait_for_completion(&rsp->barrier_completion);
/* Other rcu_barrier() invocations can now safely proceed. */
- mutex_unlock(&rcu_barrier_mutex);
+ mutex_unlock(&rsp->barrier_mutex);
- destroy_rcu_head_on_stack(&rh);
+ destroy_rcu_head_on_stack(&rd.barrier_head);
}
/**
@@ -2386,7 +2465,7 @@ static void _rcu_barrier(struct rcu_state *rsp,
*/
void rcu_barrier_bh(void)
{
- _rcu_barrier(&rcu_bh_state, call_rcu_bh);
+ _rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -2395,7 +2474,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
*/
void rcu_barrier_sched(void)
{
- _rcu_barrier(&rcu_sched_state, call_rcu_sched);
+ _rcu_barrier(&rcu_sched_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
@@ -2406,18 +2485,15 @@ static void __init
rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
{
unsigned long flags;
- int i;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rcu_get_root(rsp);
/* Set up local state, ensuring consistent view of global state. */
raw_spin_lock_irqsave(&rnp->lock, flags);
rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
- rdp->nxtlist = NULL;
- for (i = 0; i < RCU_NEXT_SIZE; i++)
- rdp->nxttail[i] = &rdp->nxtlist;
+ init_callback_list(rdp);
rdp->qlen_lazy = 0;
- rdp->qlen = 0;
+ ACCESS_ONCE(rdp->qlen) = 0;
rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -2491,9 +2567,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
static void __cpuinit rcu_prepare_cpu(int cpu)
{
- rcu_init_percpu_data(cpu, &rcu_sched_state, 0);
- rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
- rcu_preempt_init_percpu_data(cpu);
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ rcu_init_percpu_data(cpu, rsp,
+ strcmp(rsp->name, "rcu_preempt") == 0);
}
/*
@@ -2505,6 +2583,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
long cpu = (long)hcpu;
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode;
+ struct rcu_state *rsp;
trace_rcu_utilization("Start CPU hotplug");
switch (action) {
@@ -2529,18 +2608,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
* touch any data without introducing corruption. We send the
* dying CPU's callbacks to an arbitrarily chosen online CPU.
*/
- rcu_cleanup_dying_cpu(&rcu_bh_state);
- rcu_cleanup_dying_cpu(&rcu_sched_state);
- rcu_preempt_cleanup_dying_cpu();
+ for_each_rcu_flavor(rsp)
+ rcu_cleanup_dying_cpu(rsp);
rcu_cleanup_after_idle(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- rcu_cleanup_dead_cpu(cpu, &rcu_bh_state);
- rcu_cleanup_dead_cpu(cpu, &rcu_sched_state);
- rcu_preempt_cleanup_dead_cpu(cpu);
+ for_each_rcu_flavor(rsp)
+ rcu_cleanup_dead_cpu(cpu, rsp);
break;
default:
break;
@@ -2573,9 +2650,9 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
{
int i;
- for (i = NUM_RCU_LVLS - 1; i > 0; i--)
+ for (i = rcu_num_lvls - 1; i > 0; i--)
rsp->levelspread[i] = CONFIG_RCU_FANOUT;
- rsp->levelspread[0] = CONFIG_RCU_FANOUT_LEAF;
+ rsp->levelspread[0] = rcu_fanout_leaf;
}
#else /* #ifdef CONFIG_RCU_FANOUT_EXACT */
static void __init rcu_init_levelspread(struct rcu_state *rsp)
@@ -2585,7 +2662,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
int i;
cprv = NR_CPUS;
- for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ for (i = rcu_num_lvls - 1; i >= 0; i--) {
ccur = rsp->levelcnt[i];
rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
cprv = ccur;
@@ -2612,13 +2689,15 @@ static void __init rcu_init_one(struct rcu_state *rsp,
/* Initialize the level-tracking arrays. */
- for (i = 1; i < NUM_RCU_LVLS; i++)
+ for (i = 0; i < rcu_num_lvls; i++)
+ rsp->levelcnt[i] = num_rcu_lvl[i];
+ for (i = 1; i < rcu_num_lvls; i++)
rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
rcu_init_levelspread(rsp);
/* Initialize the elements themselves, starting from the leaves. */
- for (i = NUM_RCU_LVLS - 1; i >= 0; i--) {
+ for (i = rcu_num_lvls - 1; i >= 0; i--) {
cpustride *= rsp->levelspread[i];
rnp = rsp->level[i];
for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
@@ -2648,13 +2727,74 @@ static void __init rcu_init_one(struct rcu_state *rsp,
}
rsp->rda = rda;
- rnp = rsp->level[NUM_RCU_LVLS - 1];
+ rnp = rsp->level[rcu_num_lvls - 1];
for_each_possible_cpu(i) {
while (i > rnp->grphi)
rnp++;
per_cpu_ptr(rsp->rda, i)->mynode = rnp;
rcu_boot_init_percpu_data(i, rsp);
}
+ list_add(&rsp->flavors, &rcu_struct_flavors);
+}
+
+/*
+ * Compute the rcu_node tree geometry from kernel parameters. This cannot
+ * replace the definitions in rcutree.h because those are needed to size
+ * the ->node array in the rcu_state structure.
+ */
+static void __init rcu_init_geometry(void)
+{
+ int i;
+ int j;
+ int n = nr_cpu_ids;
+ int rcu_capacity[MAX_RCU_LVLS + 1];
+
+ /* If the compile-time values are accurate, just leave. */
+ if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF)
+ return;
+
+ /*
+ * Compute number of nodes that can be handled an rcu_node tree
+ * with the given number of levels. Setting rcu_capacity[0] makes
+ * some of the arithmetic easier.
+ */
+ rcu_capacity[0] = 1;
+ rcu_capacity[1] = rcu_fanout_leaf;
+ for (i = 2; i <= MAX_RCU_LVLS; i++)
+ rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
+
+ /*
+ * The boot-time rcu_fanout_leaf parameter is only permitted
+ * to increase the leaf-level fanout, not decrease it. Of course,
+ * the leaf-level fanout cannot exceed the number of bits in
+ * the rcu_node masks. Finally, the tree must be able to accommodate
+ * the configured number of CPUs. Complain and fall back to the
+ * compile-time values if these limits are exceeded.
+ */
+ if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
+ rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
+ n > rcu_capacity[MAX_RCU_LVLS]) {
+ WARN_ON(1);
+ return;
+ }
+
+ /* Calculate the number of rcu_nodes at each level of the tree. */
+ for (i = 1; i <= MAX_RCU_LVLS; i++)
+ if (n <= rcu_capacity[i]) {
+ for (j = 0; j <= i; j++)
+ num_rcu_lvl[j] =
+ DIV_ROUND_UP(n, rcu_capacity[i - j]);
+ rcu_num_lvls = i;
+ for (j = i + 1; j <= MAX_RCU_LVLS; j++)
+ num_rcu_lvl[j] = 0;
+ break;
+ }
+
+ /* Calculate the total number of rcu_node structures. */
+ rcu_num_nodes = 0;
+ for (i = 0; i <= MAX_RCU_LVLS; i++)
+ rcu_num_nodes += num_rcu_lvl[i];
+ rcu_num_nodes -= n;
}
void __init rcu_init(void)
@@ -2662,6 +2802,7 @@ void __init rcu_init(void)
int cpu;
rcu_bootup_announce();
+ rcu_init_geometry();
rcu_init_one(&rcu_sched_state, &rcu_sched_data);
rcu_init_one(&rcu_bh_state, &rcu_bh_data);
__rcu_init_preempt();
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index ea056495783e..4d29169f2124 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -42,28 +42,28 @@
#define RCU_FANOUT_4 (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
#if NR_CPUS <= RCU_FANOUT_1
-# define NUM_RCU_LVLS 1
+# define RCU_NUM_LVLS 1
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 (NR_CPUS)
# define NUM_RCU_LVL_2 0
# define NUM_RCU_LVL_3 0
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_2
-# define NUM_RCU_LVLS 2
+# define RCU_NUM_LVLS 2
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
# define NUM_RCU_LVL_2 (NR_CPUS)
# define NUM_RCU_LVL_3 0
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_3
-# define NUM_RCU_LVLS 3
+# define RCU_NUM_LVLS 3
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
# define NUM_RCU_LVL_3 (NR_CPUS)
# define NUM_RCU_LVL_4 0
#elif NR_CPUS <= RCU_FANOUT_4
-# define NUM_RCU_LVLS 4
+# define RCU_NUM_LVLS 4
# define NUM_RCU_LVL_0 1
# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
@@ -76,6 +76,9 @@
#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3 + NUM_RCU_LVL_4)
#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
+extern int rcu_num_lvls;
+extern int rcu_num_nodes;
+
/*
* Dynticks per-CPU state.
*/
@@ -97,6 +100,7 @@ struct rcu_dynticks {
/* # times non-lazy CBs posted to CPU. */
unsigned long nonlazy_posted_snap;
/* idle-period nonlazy_posted snapshot. */
+ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
};
@@ -206,7 +210,7 @@ struct rcu_node {
*/
#define rcu_for_each_node_breadth_first(rsp, rnp) \
for ((rnp) = &(rsp)->node[0]; \
- (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+ (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
/*
* Do a breadth-first scan of the non-leaf rcu_node structures for the
@@ -215,7 +219,7 @@ struct rcu_node {
*/
#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
for ((rnp) = &(rsp)->node[0]; \
- (rnp) < (rsp)->level[NUM_RCU_LVLS - 1]; (rnp)++)
+ (rnp) < (rsp)->level[rcu_num_lvls - 1]; (rnp)++)
/*
* Scan the leaves of the rcu_node hierarchy for the specified rcu_state
@@ -224,8 +228,8 @@ struct rcu_node {
* It is still a leaf node, even if it is also the root node.
*/
#define rcu_for_each_leaf_node(rsp, rnp) \
- for ((rnp) = (rsp)->level[NUM_RCU_LVLS - 1]; \
- (rnp) < &(rsp)->node[NUM_RCU_NODES]; (rnp)++)
+ for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
+ (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
/* Index values for nxttail array in struct rcu_data. */
#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
@@ -311,6 +315,9 @@ struct rcu_data {
unsigned long n_rp_need_fqs;
unsigned long n_rp_need_nothing;
+ /* 6) _rcu_barrier() callback. */
+ struct rcu_head barrier_head;
+
int cpu;
struct rcu_state *rsp;
};
@@ -357,10 +364,12 @@ do { \
*/
struct rcu_state {
struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
- struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
+ struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
- u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
+ u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
+ void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
+ void (*func)(struct rcu_head *head));
/* The following fields are guarded by the root rcu_node's lock. */
@@ -392,6 +401,11 @@ struct rcu_state {
struct task_struct *rcu_barrier_in_progress;
/* Task doing rcu_barrier(), */
/* or NULL if no barrier. */
+ struct mutex barrier_mutex; /* Guards barrier fields. */
+ atomic_t barrier_cpu_count; /* # CPUs waiting on. */
+ struct completion barrier_completion; /* Wake at barrier end. */
+ unsigned long n_barrier_done; /* ++ at start and end of */
+ /* _rcu_barrier(). */
raw_spinlock_t fqslock; /* Only one task forcing */
/* quiescent states. */
unsigned long jiffies_force_qs; /* Time at which to invoke */
@@ -409,8 +423,13 @@ struct rcu_state {
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
char *name; /* Name of structure. */
+ struct list_head flavors; /* List of RCU flavors. */
};
+extern struct list_head rcu_struct_flavors;
+#define for_each_rcu_flavor(rsp) \
+ list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
+
/* Return values for rcu_preempt_offline_tasks(). */
#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
@@ -444,6 +463,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
/* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void);
long rcu_batches_completed(void);
+static void rcu_preempt_note_context_switch(int cpu);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
@@ -452,25 +472,18 @@ static void rcu_stop_cpu_kthread(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp);
-static void rcu_preempt_stall_reset(void);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU
static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
struct rcu_node *rnp,
struct rcu_data *rdp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-static void rcu_preempt_cleanup_dead_cpu(int cpu);
static void rcu_preempt_check_callbacks(int cpu);
-static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake);
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
-static int rcu_preempt_pending(int cpu);
-static int rcu_preempt_cpu_has_callbacks(int cpu);
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
-static void rcu_preempt_cleanup_dying_cpu(void);
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 5271a020887e..7f3244c0df01 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -68,17 +68,21 @@ static void __init rcu_bootup_announce_oddness(void)
printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
#endif
#if NUM_RCU_LVL_4 != 0
- printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
+ printk(KERN_INFO "\tFour-level hierarchy is enabled.\n");
#endif
+ if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
+ printk(KERN_INFO "\tExperimental boot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
+ if (nr_cpu_ids != NR_CPUS)
+ printk(KERN_INFO "\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
}
#ifdef CONFIG_TREE_PREEMPT_RCU
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
+struct rcu_state rcu_preempt_state =
+ RCU_STATE_INITIALIZER(rcu_preempt, call_rcu);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
-static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
/*
@@ -153,7 +157,7 @@ static void rcu_preempt_qs(int cpu)
*
* Caller must disable preemption.
*/
-void rcu_preempt_note_context_switch(void)
+static void rcu_preempt_note_context_switch(int cpu)
{
struct task_struct *t = current;
unsigned long flags;
@@ -164,7 +168,7 @@ void rcu_preempt_note_context_switch(void)
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
- rdp = __this_cpu_ptr(rcu_preempt_state.rda);
+ rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu);
rnp = rdp->mynode;
raw_spin_lock_irqsave(&rnp->lock, flags);
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
@@ -228,23 +232,11 @@ void rcu_preempt_note_context_switch(void)
* means that we continue to block the current grace period.
*/
local_irq_save(flags);
- rcu_preempt_qs(smp_processor_id());
+ rcu_preempt_qs(cpu);
local_irq_restore(flags);
}
/*
- * Tree-preemptible RCU implementation for rcu_read_lock().
- * Just increment ->rcu_read_lock_nesting, shared state will be updated
- * if we block.
- */
-void __rcu_read_lock(void)
-{
- current->rcu_read_lock_nesting++;
- barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
-/*
* Check for preempted RCU readers blocking the current grace period
* for the specified rcu_node structure. If the caller needs a reliable
* answer, it must hold the rcu_node's ->lock.
@@ -310,7 +302,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static noinline void rcu_read_unlock_special(struct task_struct *t)
+void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -398,8 +390,9 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
rnp->grphi,
!!rnp->gp_tasks);
rcu_report_unblock_qs_rnp(rnp, flags);
- } else
+ } else {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ }
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
@@ -418,38 +411,6 @@ static noinline void rcu_read_unlock_special(struct task_struct *t)
}
}
-/*
- * Tree-preemptible RCU implementation for rcu_read_unlock().
- * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
- * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
- * invoke rcu_read_unlock_special() to clean up after a context switch
- * in an RCU read-side critical section and other special cases.
- */
-void __rcu_read_unlock(void)
-{
- struct task_struct *t = current;
-
- if (t->rcu_read_lock_nesting != 1)
- --t->rcu_read_lock_nesting;
- else {
- barrier(); /* critical section before exit code. */
- t->rcu_read_lock_nesting = INT_MIN;
- barrier(); /* assign before ->rcu_read_unlock_special load */
- if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
- barrier(); /* ->rcu_read_unlock_special load before assign */
- t->rcu_read_lock_nesting = 0;
- }
-#ifdef CONFIG_PROVE_LOCKING
- {
- int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
-
- WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
- }
-#endif /* #ifdef CONFIG_PROVE_LOCKING */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_unlock);
-
#ifdef CONFIG_RCU_CPU_STALL_VERBOSE
/*
@@ -540,16 +501,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
}
/*
- * Suppress preemptible RCU's CPU stall warnings by pushing the
- * time of the next stall-warning message comfortably far into the
- * future.
- */
-static void rcu_preempt_stall_reset(void)
-{
- rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
-}
-
-/*
* Check that the list of blocked tasks for the newly completed grace
* period is in fact empty. It is a serious bug to complete a grace
* period that still has RCU readers blocked! This function must be
@@ -650,14 +601,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
- * Do CPU-offline processing for preemptible RCU.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
- rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
-}
-
-/*
* Check for a quiescent state from the current CPU. When a task blocks,
* the task is recorded in the corresponding CPU's rcu_node structure,
* which is checked elsewhere.
@@ -677,15 +620,6 @@ static void rcu_preempt_check_callbacks(int cpu)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
-/*
- * Process callbacks for preemptible RCU.
- */
-static void rcu_preempt_process_callbacks(void)
-{
- __rcu_process_callbacks(&rcu_preempt_state,
- &__get_cpu_var(rcu_preempt_data));
-}
-
#ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void)
@@ -824,9 +758,9 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
int must_wait = 0;
raw_spin_lock_irqsave(&rnp->lock, flags);
- if (list_empty(&rnp->blkd_tasks))
+ if (list_empty(&rnp->blkd_tasks)) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
- else {
+ } else {
rnp->exp_tasks = rnp->blkd_tasks.next;
rcu_initiate_boost(rnp, flags); /* releases rnp->lock */
must_wait = 1;
@@ -870,9 +804,9 @@ void synchronize_rcu_expedited(void)
* expedited grace period for us, just leave.
*/
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
- if (trycount++ < 10)
+ if (trycount++ < 10) {
udelay(trycount * num_online_cpus());
- else {
+ } else {
synchronize_rcu();
return;
}
@@ -917,51 +851,16 @@ mb_ret:
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-/*
- * Check to see if there is any immediate preemptible-RCU-related work
- * to be done.
- */
-static int rcu_preempt_pending(int cpu)
-{
- return __rcu_pending(&rcu_preempt_state,
- &per_cpu(rcu_preempt_data, cpu));
-}
-
-/*
- * Does preemptible RCU have callbacks on this CPU?
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
- return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
-}
-
/**
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
*/
void rcu_barrier(void)
{
- _rcu_barrier(&rcu_preempt_state, call_rcu);
+ _rcu_barrier(&rcu_preempt_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
/*
- * Initialize preemptible RCU's per-CPU data.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
- rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
-}
-
-/*
- * Move preemptible RCU's callbacks from dying CPU to other online CPU
- * and record a quiescent state.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
- rcu_cleanup_dying_cpu(&rcu_preempt_state);
-}
-
-/*
* Initialize preemptible RCU's state structures.
*/
static void __init __rcu_init_preempt(void)
@@ -1002,6 +901,14 @@ void rcu_force_quiescent_state(void)
EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
/*
+ * Because preemptible RCU does not exist, we never have to check for
+ * CPUs being in quiescent states.
+ */
+static void rcu_preempt_note_context_switch(int cpu)
+{
+}
+
+/*
* Because preemptible RCU does not exist, there are never any preempted
* RCU readers.
*/
@@ -1038,14 +945,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
}
/*
- * Because preemptible RCU does not exist, there is no need to suppress
- * its CPU stall warnings.
- */
-static void rcu_preempt_stall_reset(void)
-{
-}
-
-/*
* Because there is no preemptible RCU, there can be no readers blocked,
* so there is no need to check for blocked tasks. So check only for
* bogus qsmask values.
@@ -1073,14 +972,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
- * Because preemptible RCU does not exist, it never needs CPU-offline
- * processing.
- */
-static void rcu_preempt_cleanup_dead_cpu(int cpu)
-{
-}
-
-/*
* Because preemptible RCU does not exist, it never has any callbacks
* to check.
*/
@@ -1089,14 +980,6 @@ static void rcu_preempt_check_callbacks(int cpu)
}
/*
- * Because preemptible RCU does not exist, it never has any callbacks
- * to process.
- */
-static void rcu_preempt_process_callbacks(void)
-{
-}
-
-/*
* Queue an RCU callback for lazy invocation after a grace period.
* This will likely be later named something like "call_rcu_lazy()",
* but this change will require some way of tagging the lazy RCU
@@ -1137,22 +1020,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
- * Because preemptible RCU does not exist, it never has any work to do.
- */
-static int rcu_preempt_pending(int cpu)
-{
- return 0;
-}
-
-/*
- * Because preemptible RCU does not exist, it never has callbacks
- */
-static int rcu_preempt_cpu_has_callbacks(int cpu)
-{
- return 0;
-}
-
-/*
* Because preemptible RCU does not exist, rcu_barrier() is just
* another name for rcu_barrier_sched().
*/
@@ -1163,21 +1030,6 @@ void rcu_barrier(void)
EXPORT_SYMBOL_GPL(rcu_barrier);
/*
- * Because preemptible RCU does not exist, there is no per-CPU
- * data to initialize.
- */
-static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
-{
-}
-
-/*
- * Because there is no preemptible RCU, there is no cleanup to do.
- */
-static void rcu_preempt_cleanup_dying_cpu(void)
-{
-}
-
-/*
* Because preemptible RCU does not exist, it need not be initialized.
*/
static void __init __rcu_init_preempt(void)
@@ -1960,9 +1812,11 @@ static void rcu_idle_count_callbacks_posted(void)
*/
#define RCU_IDLE_FLUSHES 5 /* Number of dyntick-idle tries. */
#define RCU_IDLE_OPT_FLUSHES 3 /* Optional dyntick-idle tries. */
-#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
+#define RCU_IDLE_GP_DELAY 4 /* Roughly one grace period. */
#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
+extern int tick_nohz_enabled;
+
/*
* Does the specified flavor of RCU have non-lazy callbacks pending on
* the specified CPU? Both RCU flavor and CPU are specified by the
@@ -2039,10 +1893,13 @@ int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
return 1;
}
/* Set up for the possibility that RCU will post a timer. */
- if (rcu_cpu_has_nonlazy_callbacks(cpu))
- *delta_jiffies = RCU_IDLE_GP_DELAY;
- else
- *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
+ if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
+ *delta_jiffies = round_up(RCU_IDLE_GP_DELAY + jiffies,
+ RCU_IDLE_GP_DELAY) - jiffies;
+ } else {
+ *delta_jiffies = jiffies + RCU_IDLE_LAZY_GP_DELAY;
+ *delta_jiffies = round_jiffies(*delta_jiffies) - jiffies;
+ }
return 0;
}
@@ -2101,6 +1958,7 @@ static void rcu_cleanup_after_idle(int cpu)
del_timer(&rdtp->idle_gp_timer);
trace_rcu_prep_idle("Cleanup after idle");
+ rdtp->tick_nohz_enabled_snap = ACCESS_ONCE(tick_nohz_enabled);
}
/*
@@ -2126,6 +1984,18 @@ static void rcu_prepare_for_idle(int cpu)
{
struct timer_list *tp;
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+ int tne;
+
+ /* Handle nohz enablement switches conservatively. */
+ tne = ACCESS_ONCE(tick_nohz_enabled);
+ if (tne != rdtp->tick_nohz_enabled_snap) {
+ if (rcu_cpu_has_callbacks(cpu))
+ invoke_rcu_core(); /* force nohz to see update. */
+ rdtp->tick_nohz_enabled_snap = tne;
+ return;
+ }
+ if (!tne)
+ return;
/*
* If this is an idle re-entry, for example, due to use of
@@ -2179,10 +2049,11 @@ static void rcu_prepare_for_idle(int cpu)
if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
trace_rcu_prep_idle("Dyntick with callbacks");
rdtp->idle_gp_timer_expires =
- jiffies + RCU_IDLE_GP_DELAY;
+ round_up(jiffies + RCU_IDLE_GP_DELAY,
+ RCU_IDLE_GP_DELAY);
} else {
rdtp->idle_gp_timer_expires =
- jiffies + RCU_IDLE_LAZY_GP_DELAY;
+ round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY);
trace_rcu_prep_idle("Dyntick with lazy callbacks");
}
tp = &rdtp->idle_gp_timer;
@@ -2223,8 +2094,9 @@ static void rcu_prepare_for_idle(int cpu)
if (rcu_cpu_has_callbacks(cpu)) {
trace_rcu_prep_idle("More callbacks");
invoke_rcu_core();
- } else
+ } else {
trace_rcu_prep_idle("Callbacks drained");
+ }
}
/*
@@ -2261,6 +2133,7 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
{
+ *cp = '\0';
}
#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index d4bc16ddd1d4..abffb486e94e 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,31 @@
#define RCU_TREE_NONCORE
#include "rcutree.h"
+static int show_rcubarrier(struct seq_file *m, void *unused)
+{
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ seq_printf(m, "%s: %c bcc: %d nbd: %lu\n",
+ rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.',
+ atomic_read(&rsp->barrier_cpu_count),
+ rsp->n_barrier_done);
+ return 0;
+}
+
+static int rcubarrier_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_rcubarrier, NULL);
+}
+
+static const struct file_operations rcubarrier_fops = {
+ .owner = THIS_MODULE,
+ .open = rcubarrier_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
#ifdef CONFIG_RCU_BOOST
static char convert_kthread_status(unsigned int kthread_status)
@@ -95,24 +120,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
}
-#define PRINT_RCU_DATA(name, func, m) \
- do { \
- int _p_r_d_i; \
- \
- for_each_possible_cpu(_p_r_d_i) \
- func(m, &per_cpu(name, _p_r_d_i)); \
- } while (0)
-
static int show_rcudata(struct seq_file *m, void *unused)
{
-#ifdef CONFIG_TREE_PREEMPT_RCU
- seq_puts(m, "rcu_preempt:\n");
- PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- seq_puts(m, "rcu_sched:\n");
- PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data, m);
- seq_puts(m, "rcu_bh:\n");
- PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data, m);
+ int cpu;
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp) {
+ seq_printf(m, "%s:\n", rsp->name);
+ for_each_possible_cpu(cpu)
+ print_one_rcu_data(m, per_cpu_ptr(rsp->rda, cpu));
+ }
return 0;
}
@@ -166,6 +183,9 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
static int show_rcudata_csv(struct seq_file *m, void *unused)
{
+ int cpu;
+ struct rcu_state *rsp;
+
seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\",");
seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\"");
@@ -173,14 +193,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
seq_puts(m, "\"kt\",\"ktl\"");
#endif /* #ifdef CONFIG_RCU_BOOST */
seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
-#ifdef CONFIG_TREE_PREEMPT_RCU
- seq_puts(m, "\"rcu_preempt:\"\n");
- PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- seq_puts(m, "\"rcu_sched:\"\n");
- PRINT_RCU_DATA(rcu_sched_data, print_one_rcu_data_csv, m);
- seq_puts(m, "\"rcu_bh:\"\n");
- PRINT_RCU_DATA(rcu_bh_data, print_one_rcu_data_csv, m);
+ for_each_rcu_flavor(rsp) {
+ seq_printf(m, "\"%s:\"\n", rsp->name);
+ for_each_possible_cpu(cpu)
+ print_one_rcu_data_csv(m, per_cpu_ptr(rsp->rda, cpu));
+ }
return 0;
}
@@ -201,8 +218,7 @@ static const struct file_operations rcudata_csv_fops = {
static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
{
- seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu "
- "j=%04x bt=%04x\n",
+ seq_printf(m, "%d:%d tasks=%c%c%c%c kt=%c ntb=%lu neb=%lu nnb=%lu ",
rnp->grplo, rnp->grphi,
"T."[list_empty(&rnp->blkd_tasks)],
"N."[!rnp->gp_tasks],
@@ -210,11 +226,11 @@ static void print_one_rcu_node_boost(struct seq_file *m, struct rcu_node *rnp)
"B."[!rnp->boost_tasks],
convert_kthread_status(rnp->boost_kthread_status),
rnp->n_tasks_boosted, rnp->n_exp_boosts,
- rnp->n_normal_boosts,
+ rnp->n_normal_boosts);
+ seq_printf(m, "j=%04x bt=%04x\n",
(int)(jiffies & 0xffff),
(int)(rnp->boost_time & 0xffff));
- seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
- " balk",
+ seq_printf(m, " balk: nt=%lu egt=%lu bt=%lu nb=%lu ny=%lu nos=%lu\n",
rnp->n_balk_blkd_tasks,
rnp->n_balk_exp_gp_tasks,
rnp->n_balk_boost_tasks,
@@ -270,15 +286,15 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
struct rcu_node *rnp;
gpnum = rsp->gpnum;
- seq_printf(m, "c=%lu g=%lu s=%d jfq=%ld j=%x "
- "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
- rsp->completed, gpnum, rsp->fqs_state,
+ seq_printf(m, "%s: c=%lu g=%lu s=%d jfq=%ld j=%x ",
+ rsp->name, rsp->completed, gpnum, rsp->fqs_state,
(long)(rsp->jiffies_force_qs - jiffies),
- (int)(jiffies & 0xffff),
+ (int)(jiffies & 0xffff));
+ seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
rsp->n_force_qs, rsp->n_force_qs_ngp,
rsp->n_force_qs - rsp->n_force_qs_ngp,
rsp->n_force_qs_lh, rsp->qlen_lazy, rsp->qlen);
- for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < NUM_RCU_NODES; rnp++) {
+ for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
if (rnp->level != level) {
seq_puts(m, "\n");
level = rnp->level;
@@ -295,14 +311,10 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
static int show_rcuhier(struct seq_file *m, void *unused)
{
-#ifdef CONFIG_TREE_PREEMPT_RCU
- seq_puts(m, "rcu_preempt:\n");
- print_one_rcu_state(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- seq_puts(m, "rcu_sched:\n");
- print_one_rcu_state(m, &rcu_sched_state);
- seq_puts(m, "rcu_bh:\n");
- print_one_rcu_state(m, &rcu_bh_state);
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ print_one_rcu_state(m, rsp);
return 0;
}
@@ -343,11 +355,10 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
static int show_rcugp(struct seq_file *m, void *unused)
{
-#ifdef CONFIG_TREE_PREEMPT_RCU
- show_one_rcugp(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- show_one_rcugp(m, &rcu_sched_state);
- show_one_rcugp(m, &rcu_bh_state);
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp)
+ show_one_rcugp(m, rsp);
return 0;
}
@@ -366,44 +377,36 @@ static const struct file_operations rcugp_fops = {
static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
{
- seq_printf(m, "%3d%cnp=%ld "
- "qsp=%ld rpq=%ld cbr=%ld cng=%ld "
- "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
+ seq_printf(m, "%3d%cnp=%ld ",
rdp->cpu,
cpu_is_offline(rdp->cpu) ? '!' : ' ',
- rdp->n_rcu_pending,
+ rdp->n_rcu_pending);
+ seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
rdp->n_rp_qs_pending,
rdp->n_rp_report_qs,
rdp->n_rp_cb_ready,
- rdp->n_rp_cpu_needs_gp,
+ rdp->n_rp_cpu_needs_gp);
+ seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n",
rdp->n_rp_gp_completed,
rdp->n_rp_gp_started,
rdp->n_rp_need_fqs,
rdp->n_rp_need_nothing);
}
-static void print_rcu_pendings(struct seq_file *m, struct rcu_state *rsp)
+static int show_rcu_pending(struct seq_file *m, void *unused)
{
int cpu;
struct rcu_data *rdp;
-
- for_each_possible_cpu(cpu) {
- rdp = per_cpu_ptr(rsp->rda, cpu);
- if (rdp->beenonline)
- print_one_rcu_pending(m, rdp);
+ struct rcu_state *rsp;
+
+ for_each_rcu_flavor(rsp) {
+ seq_printf(m, "%s:\n", rsp->name);
+ for_each_possible_cpu(cpu) {
+ rdp = per_cpu_ptr(rsp->rda, cpu);
+ if (rdp->beenonline)
+ print_one_rcu_pending(m, rdp);
+ }
}
-}
-
-static int show_rcu_pending(struct seq_file *m, void *unused)
-{
-#ifdef CONFIG_TREE_PREEMPT_RCU
- seq_puts(m, "rcu_preempt:\n");
- print_rcu_pendings(m, &rcu_preempt_state);
-#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
- seq_puts(m, "rcu_sched:\n");
- print_rcu_pendings(m, &rcu_sched_state);
- seq_puts(m, "rcu_bh:\n");
- print_rcu_pendings(m, &rcu_bh_state);
return 0;
}
@@ -453,6 +456,11 @@ static int __init rcutree_trace_init(void)
if (!rcudir)
goto free_out;
+ retval = debugfs_create_file("rcubarrier", 0444, rcudir,
+ NULL, &rcubarrier_fops);
+ if (!retval)
+ goto free_out;
+
retval = debugfs_create_file("rcudata", 0444, rcudir,
NULL, &rcudata_fops);
if (!retval)
diff --git a/kernel/relay.c b/kernel/relay.c
index ab56a1764d4d..e8cd2027abbd 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
struct splice_pipe_desc spd = {
.pages = pages,
.nr_pages = 0,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.partial = partial,
.flags = flags,
.ops = &relay_pipe_buf_ops,
@@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in,
ret += padding;
out:
- splice_shrink_spd(pipe, &spd);
- return ret;
+ splice_shrink_spd(&spd);
+ return ret;
}
static ssize_t relay_file_splice_read(struct file *in,
diff --git a/kernel/resource.c b/kernel/resource.c
index e1d2b8ee76d5..dc8b47764443 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -722,14 +722,12 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
write_lock(&resource_lock);
+ if (!parent)
+ goto skip;
+
if ((start < parent->start) || (end > parent->end))
goto out;
- for (tmp = res->child; tmp; tmp = tmp->sibling) {
- if ((tmp->start < start) || (tmp->end > end))
- goto out;
- }
-
if (res->sibling && (res->sibling->start <= end))
goto out;
@@ -741,6 +739,11 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
goto out;
}
+skip:
+ for (tmp = res->child; tmp; tmp = tmp->sibling)
+ if ((tmp->start < start) || (tmp->end > end))
+ goto out;
+
res->start = start;
res->end = end;
result = 0;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d5594a4268d4..468bdd44c1ba 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2081,7 +2081,6 @@ context_switch(struct rq *rq, struct task_struct *prev,
#endif
/* Here we just switch the register state and the stack. */
- rcu_switch_from(prev);
switch_to(prev, next, prev);
barrier();
@@ -2161,11 +2160,73 @@ unsigned long this_cpu_load(void)
}
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ * nr_active = 0;
+ * for_each_possible_cpu(cpu)
+ * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ * - for_each_possible_cpu() is prohibitively expensive on machines with
+ * serious number of cpus, therefore we need to take a distributed approach
+ * to calculating nr_active.
+ *
+ * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ * So assuming nr_active := 0 when we start out -- true per definition, we
+ * can simply take per-cpu deltas and fold those into a global accumulate
+ * to obtain the same result. See calc_load_fold_active().
+ *
+ * Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ * across the machine, we assume 10 ticks is sufficient time for every
+ * cpu to have completed this task.
+ *
+ * This places an upper-bound on the IRQ-off latency of the machine. Then
+ * again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ * this would add another cross-cpu cacheline miss and atomic operation
+ * to the wakeup path. Instead we increment on whatever cpu the task ran
+ * when it went into uninterruptible state and decrement on whatever cpu
+ * did the wakeup. This means that only the sum of nr_uninterruptible over
+ * all cpus yields the correct result.
+ *
+ * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
/* Variables and functions for calc_load */
static atomic_long_t calc_load_tasks;
static unsigned long calc_load_update;
unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun);
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads: pointer to dest load array
+ * @offset: offset to add
+ * @shift: shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+ loads[0] = (avenrun[0] + offset) << shift;
+ loads[1] = (avenrun[1] + offset) << shift;
+ loads[2] = (avenrun[2] + offset) << shift;
+}
static long calc_load_fold_active(struct rq *this_rq)
{
@@ -2182,6 +2243,9 @@ static long calc_load_fold_active(struct rq *this_rq)
return delta;
}
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
@@ -2193,30 +2257,118 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
#ifdef CONFIG_NO_HZ
/*
- * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ * - When we go NO_HZ idle during the window, we can negate our sample
+ * contribution, causing under-accounting.
+ *
+ * We avoid this by keeping two idle-delta counters and flipping them
+ * when the window starts, thus separating old and new NO_HZ load.
+ *
+ * The only trick is the slight shift in index flip for read vs write.
+ *
+ * 0s 5s 10s 15s
+ * +10 +10 +10 +10
+ * |-|-----------|-|-----------|-|-----------|-|
+ * r:0 0 1 1 0 0 1 1 0
+ * w:0 1 1 0 0 1 1 0 0
+ *
+ * This ensures we'll fold the old idle contribution in this window while
+ * accumlating the new one.
+ *
+ * - When we wake up from NO_HZ idle during the window, we push up our
+ * contribution, since we effectively move our sample point to a known
+ * busy state.
+ *
+ * This is solved by pushing the window forward, and thus skipping the
+ * sample, for this cpu (effectively using the idle-delta for this cpu which
+ * was in effect at the time the window opened). This also solves the issue
+ * of having to deal with a cpu having been in NOHZ idle for multiple
+ * LOAD_FREQ intervals.
*
* When making the ILB scale, we should try to pull this in as well.
*/
-static atomic_long_t calc_load_tasks_idle;
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
-void calc_load_account_idle(struct rq *this_rq)
+static inline int calc_load_write_idx(void)
{
+ int idx = calc_load_idx;
+
+ /*
+ * See calc_global_nohz(), if we observe the new index, we also
+ * need to observe the new update time.
+ */
+ smp_rmb();
+
+ /*
+ * If the folding window started, make sure we start writing in the
+ * next idle-delta.
+ */
+ if (!time_before(jiffies, calc_load_update))
+ idx++;
+
+ return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
+{
+ return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+ struct rq *this_rq = this_rq();
long delta;
+ /*
+ * We're going into NOHZ mode, if there's any pending delta, fold it
+ * into the pending idle delta.
+ */
delta = calc_load_fold_active(this_rq);
- if (delta)
- atomic_long_add(delta, &calc_load_tasks_idle);
+ if (delta) {
+ int idx = calc_load_write_idx();
+ atomic_long_add(delta, &calc_load_idle[idx]);
+ }
}
-static long calc_load_fold_idle(void)
+void calc_load_exit_idle(void)
{
- long delta = 0;
+ struct rq *this_rq = this_rq();
+
+ /*
+ * If we're still before the sample window, we're done.
+ */
+ if (time_before(jiffies, this_rq->calc_load_update))
+ return;
/*
- * Its got a race, we don't care...
+ * We woke inside or after the sample window, this means we're already
+ * accounted through the nohz accounting, so skip the entire deal and
+ * sync up for the next window.
*/
- if (atomic_long_read(&calc_load_tasks_idle))
- delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
+ this_rq->calc_load_update = calc_load_update;
+ if (time_before(jiffies, this_rq->calc_load_update + 10))
+ this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+ int idx = calc_load_read_idx();
+ long delta = 0;
+
+ if (atomic_long_read(&calc_load_idle[idx]))
+ delta = atomic_long_xchg(&calc_load_idle[idx], 0);
return delta;
}
@@ -2302,66 +2454,39 @@ static void calc_global_nohz(void)
{
long delta, active, n;
- /*
- * If we crossed a calc_load_update boundary, make sure to fold
- * any pending idle changes, the respective CPUs might have
- * missed the tick driven calc_load_account_active() update
- * due to NO_HZ.
- */
- delta = calc_load_fold_idle();
- if (delta)
- atomic_long_add(delta, &calc_load_tasks);
-
- /*
- * It could be the one fold was all it took, we done!
- */
- if (time_before(jiffies, calc_load_update + 10))
- return;
-
- /*
- * Catch-up, fold however many we are behind still
- */
- delta = jiffies - calc_load_update - 10;
- n = 1 + (delta / LOAD_FREQ);
+ if (!time_before(jiffies, calc_load_update + 10)) {
+ /*
+ * Catch-up, fold however many we are behind still
+ */
+ delta = jiffies - calc_load_update - 10;
+ n = 1 + (delta / LOAD_FREQ);
- active = atomic_long_read(&calc_load_tasks);
- active = active > 0 ? active * FIXED_1 : 0;
+ active = atomic_long_read(&calc_load_tasks);
+ active = active > 0 ? active * FIXED_1 : 0;
- avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
- avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
- avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+ avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+ avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+ avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
- calc_load_update += n * LOAD_FREQ;
-}
-#else
-void calc_load_account_idle(struct rq *this_rq)
-{
-}
+ calc_load_update += n * LOAD_FREQ;
+ }
-static inline long calc_load_fold_idle(void)
-{
- return 0;
+ /*
+ * Flip the idle index...
+ *
+ * Make sure we first write the new time then flip the index, so that
+ * calc_load_write_idx() will see the new time when it reads the new
+ * index, this avoids a double flip messing things up.
+ */
+ smp_wmb();
+ calc_load_idx++;
}
+#else /* !CONFIG_NO_HZ */
-static void calc_global_nohz(void)
-{
-}
-#endif
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
-/**
- * get_avenrun - get the load average array
- * @loads: pointer to dest load array
- * @offset: offset to add
- * @shift: shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
- loads[0] = (avenrun[0] + offset) << shift;
- loads[1] = (avenrun[1] + offset) << shift;
- loads[2] = (avenrun[2] + offset) << shift;
-}
+#endif /* CONFIG_NO_HZ */
/*
* calc_load - update the avenrun load estimates 10 ticks after the
@@ -2369,11 +2494,18 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
*/
void calc_global_load(unsigned long ticks)
{
- long active;
+ long active, delta;
if (time_before(jiffies, calc_load_update + 10))
return;
+ /*
+ * Fold the 'old' idle-delta to include all NO_HZ cpus.
+ */
+ delta = calc_load_fold_idle();
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+
active = atomic_long_read(&calc_load_tasks);
active = active > 0 ? active * FIXED_1 : 0;
@@ -2384,12 +2516,7 @@ void calc_global_load(unsigned long ticks)
calc_load_update += LOAD_FREQ;
/*
- * Account one period with whatever state we found before
- * folding in the nohz state and ageing the entire idle period.
- *
- * This avoids loosing a sample when we go idle between
- * calc_load_account_active() (10 ticks ago) and now and thus
- * under-accounting.
+ * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
*/
calc_global_nohz();
}
@@ -2406,7 +2533,6 @@ static void calc_load_account_active(struct rq *this_rq)
return;
delta = calc_load_fold_active(this_rq);
- delta += calc_load_fold_idle();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
@@ -2414,6 +2540,10 @@ static void calc_load_account_active(struct rq *this_rq)
}
/*
+ * End of global load-average stuff
+ */
+
+/*
* The exact cpuload at various idx values, calculated at every tick would be
* load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
*
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index b44d604b35d1..b6baf370cae9 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -25,7 +25,6 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
static struct task_struct *pick_next_task_idle(struct rq *rq)
{
schedstat_inc(rq, sched_goidle);
- calc_load_account_idle(rq);
return rq->idle;
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6d52cea7f33d..55844f24435a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -942,8 +942,6 @@ static inline u64 sched_avg_period(void)
return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
}
-void calc_load_account_idle(struct rq *this_rq);
-
#ifdef CONFIG_SCHED_HRTICK
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 677102789cf2..be4f856d52f8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1971,6 +1971,13 @@ static void ptrace_do_notify(int signr, int exit_code, int why)
void ptrace_notify(int exit_code)
{
BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+ if (unlikely(current->task_works)) {
+ if (test_and_clear_ti_thread_flag(current_thread_info(),
+ TIF_NOTIFY_RESUME)) {
+ smp_mb__after_clear_bit();
+ task_work_run();
+ }
+ }
spin_lock_irq(&current->sighand->siglock);
ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
@@ -2191,6 +2198,14 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct signal_struct *signal = current->signal;
int signr;
+ if (unlikely(current->task_works)) {
+ if (test_and_clear_ti_thread_flag(current_thread_info(),
+ TIF_NOTIFY_RESUME)) {
+ smp_mb__after_clear_bit();
+ task_work_run();
+ }
+ }
+
if (unlikely(uprobe_deny_signal()))
return 0;
diff --git a/kernel/smp.c b/kernel/smp.c
index d0ae5b24875e..29dd40a9f2f4 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -581,26 +581,6 @@ int smp_call_function(smp_call_func_t func, void *info, int wait)
return 0;
}
EXPORT_SYMBOL(smp_call_function);
-
-void ipi_call_lock(void)
-{
- raw_spin_lock(&call_function.lock);
-}
-
-void ipi_call_unlock(void)
-{
- raw_spin_unlock(&call_function.lock);
-}
-
-void ipi_call_lock_irq(void)
-{
- raw_spin_lock_irq(&call_function.lock);
-}
-
-void ipi_call_unlock_irq(void)
-{
- raw_spin_unlock_irq(&call_function.lock);
-}
#endif /* USE_GENERIC_SMP_HELPERS */
/* Setup configured maximum number of CPUs to activate */
diff --git a/kernel/smpboot.h b/kernel/smpboot.h
index 80c0acfb8472..6ef9433e1c70 100644
--- a/kernel/smpboot.h
+++ b/kernel/smpboot.h
@@ -3,8 +3,6 @@
struct task_struct;
-int smpboot_prepare(unsigned int cpu);
-
#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
struct task_struct *idle_thread_get(unsigned int cpu);
void idle_thread_set_boot_cpu(void);
diff --git a/kernel/sys.c b/kernel/sys.c
index e0c8ffc50d7f..2d39a84cd857 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1788,7 +1788,6 @@ SYSCALL_DEFINE1(umask, int, mask)
#ifdef CONFIG_CHECKPOINT_RESTORE
static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
{
- struct vm_area_struct *vma;
struct file *exe_file;
struct dentry *dentry;
int err;
@@ -1816,13 +1815,17 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
down_write(&mm->mmap_sem);
/*
- * Forbid mm->exe_file change if there are mapped other files.
+ * Forbid mm->exe_file change if old file still mapped.
*/
err = -EBUSY;
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
- &exe_file->f_path))
- goto exit_unlock;
+ if (mm->exe_file) {
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next)
+ if (vma->vm_file &&
+ path_equal(&vma->vm_file->f_path,
+ &mm->exe_file->f_path))
+ goto exit_unlock;
}
/*
@@ -1835,6 +1838,7 @@ static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
goto exit_unlock;
+ err = 0;
set_mm_exe_file(mm, exe_file);
exit_unlock:
up_write(&mm->mmap_sem);
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 82d1c794066d..91d4e1742a0c 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -3,82 +3,78 @@
#include <linux/tracehook.h>
int
-task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
+task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
{
+ struct callback_head *last, *first;
unsigned long flags;
- int err = -ESRCH;
-#ifndef TIF_NOTIFY_RESUME
- if (notify)
- return -ENOTSUPP;
-#endif
/*
- * We must not insert the new work if the task has already passed
- * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
- * and check PF_EXITING under pi_lock.
+ * Not inserting the new work if the task has already passed
+ * exit_task_work() is the responisbility of callers.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- if (likely(!(task->flags & PF_EXITING))) {
- hlist_add_head(&twork->hlist, &task->task_works);
- err = 0;
- }
+ last = task->task_works;
+ first = last ? last->next : twork;
+ twork->next = first;
+ if (last)
+ last->next = twork;
+ task->task_works = twork;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
- if (likely(!err) && notify)
+ if (notify)
set_notify_resume(task);
- return err;
+ return 0;
}
-struct task_work *
+struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
unsigned long flags;
- struct task_work *twork;
- struct hlist_node *pos;
+ struct callback_head *last, *res = NULL;
raw_spin_lock_irqsave(&task->pi_lock, flags);
- hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
- if (twork->func == func) {
- hlist_del(&twork->hlist);
- goto found;
+ last = task->task_works;
+ if (last) {
+ struct callback_head *q = last, *p = q->next;
+ while (1) {
+ if (p->func == func) {
+ q->next = p->next;
+ if (p == last)
+ task->task_works = q == p ? NULL : q;
+ res = p;
+ break;
+ }
+ if (p == last)
+ break;
+ q = p;
+ p = q->next;
}
}
- twork = NULL;
- found:
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
- return twork;
+ return res;
}
void task_work_run(void)
{
struct task_struct *task = current;
- struct hlist_head task_works;
- struct hlist_node *pos;
+ struct callback_head *p, *q;
- raw_spin_lock_irq(&task->pi_lock);
- hlist_move_list(&task->task_works, &task_works);
- raw_spin_unlock_irq(&task->pi_lock);
+ while (1) {
+ raw_spin_lock_irq(&task->pi_lock);
+ p = task->task_works;
+ task->task_works = NULL;
+ raw_spin_unlock_irq(&task->pi_lock);
- if (unlikely(hlist_empty(&task_works)))
- return;
- /*
- * We use hlist to save the space in task_struct, but we want fifo.
- * Find the last entry, the list should be short, then process them
- * in reverse order.
- */
- for (pos = task_works.first; pos->next; pos = pos->next)
- ;
+ if (unlikely(!p))
+ return;
- for (;;) {
- struct hlist_node **pprev = pos->pprev;
- struct task_work *twork = container_of(pos, struct task_work,
- hlist);
- twork->func(twork);
-
- if (pprev == &task_works.first)
- break;
- pos = container_of(pprev, struct hlist_node, next);
+ q = p->next; /* head */
+ p->next = NULL; /* cut it */
+ while (q) {
+ p = q->next;
+ q->func(q);
+ q = p;
+ }
}
}
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 70b33abcc7bb..b7fbadc5c973 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -409,7 +409,9 @@ int second_overflow(unsigned long secs)
time_state = TIME_DEL;
break;
case TIME_INS:
- if (secs % 86400 == 0) {
+ if (!(time_status & STA_INS))
+ time_state = TIME_OK;
+ else if (secs % 86400 == 0) {
leap = -1;
time_state = TIME_OOP;
time_tai++;
@@ -418,7 +420,9 @@ int second_overflow(unsigned long secs)
}
break;
case TIME_DEL:
- if ((secs + 1) % 86400 == 0) {
+ if (!(time_status & STA_DEL))
+ time_state = TIME_OK;
+ else if ((secs + 1) % 86400 == 0) {
leap = 1;
time_tai--;
time_state = TIME_WAIT;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 869997833928..024540f97f74 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -105,7 +105,7 @@ static ktime_t tick_init_jiffy_update(void)
/*
* NO HZ enabled ?
*/
-static int tick_nohz_enabled __read_mostly = 1;
+int tick_nohz_enabled __read_mostly = 1;
/*
* Enable / Disable tickless mode
@@ -271,50 +271,15 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
}
EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
-static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
+static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
+ ktime_t now, int cpu)
{
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
+ ktime_t last_update, expires, ret = { .tv64 = 0 };
unsigned long rcu_delta_jiffies;
- ktime_t last_update, expires, now;
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
u64 time_delta;
- int cpu;
-
- cpu = smp_processor_id();
- ts = &per_cpu(tick_cpu_sched, cpu);
-
- now = tick_nohz_start_idle(cpu, ts);
-
- /*
- * If this cpu is offline and it is the one which updates
- * jiffies, then give up the assignment and let it be taken by
- * the cpu which runs the tick timer next. If we don't drop
- * this here the jiffies might be stale and do_timer() never
- * invoked.
- */
- if (unlikely(!cpu_online(cpu))) {
- if (cpu == tick_do_timer_cpu)
- tick_do_timer_cpu = TICK_DO_TIMER_NONE;
- }
-
- if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
- return;
- if (need_resched())
- return;
-
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
- static int ratelimit;
-
- if (ratelimit < 10) {
- printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
- (unsigned int) local_softirq_pending());
- ratelimit++;
- }
- return;
- }
-
- ts->idle_calls++;
/* Read jiffies and the time when jiffies were updated last */
do {
seq = read_seqbegin(&xtime_lock);
@@ -397,6 +362,8 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
goto out;
+ ret = expires;
+
/*
* nohz_stop_sched_tick can be called several times before
* the nohz_restart_sched_tick is called. This happens when
@@ -406,17 +373,12 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
*/
if (!ts->tick_stopped) {
select_nohz_load_balancer(1);
+ calc_load_enter_idle();
- ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
+ ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
ts->tick_stopped = 1;
- ts->idle_jiffies = last_jiffies;
}
- ts->idle_sleeps++;
-
- /* Mark expires */
- ts->idle_expires = expires;
-
/*
* If the expiration time == KTIME_MAX, then
* in this case we simply stop the tick timer.
@@ -447,6 +409,65 @@ out:
ts->next_jiffies = next_jiffies;
ts->last_jiffies = last_jiffies;
ts->sleep_length = ktime_sub(dev->next_event, now);
+
+ return ret;
+}
+
+static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
+{
+ /*
+ * If this cpu is offline and it is the one which updates
+ * jiffies, then give up the assignment and let it be taken by
+ * the cpu which runs the tick timer next. If we don't drop
+ * this here the jiffies might be stale and do_timer() never
+ * invoked.
+ */
+ if (unlikely(!cpu_online(cpu))) {
+ if (cpu == tick_do_timer_cpu)
+ tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+ }
+
+ if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+ return false;
+
+ if (need_resched())
+ return false;
+
+ if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+ static int ratelimit;
+
+ if (ratelimit < 10) {
+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n",
+ (unsigned int) local_softirq_pending());
+ ratelimit++;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static void __tick_nohz_idle_enter(struct tick_sched *ts)
+{
+ ktime_t now, expires;
+ int cpu = smp_processor_id();
+
+ now = tick_nohz_start_idle(cpu, ts);
+
+ if (can_stop_idle_tick(cpu, ts)) {
+ int was_stopped = ts->tick_stopped;
+
+ ts->idle_calls++;
+
+ expires = tick_nohz_stop_sched_tick(ts, now, cpu);
+ if (expires.tv64 > 0LL) {
+ ts->idle_sleeps++;
+ ts->idle_expires = expires;
+ }
+
+ if (!was_stopped && ts->tick_stopped)
+ ts->idle_jiffies = ts->last_jiffies;
+ }
}
/**
@@ -484,7 +505,7 @@ void tick_nohz_idle_enter(void)
* update of the idle time accounting in tick_nohz_start_idle().
*/
ts->inidle = 1;
- tick_nohz_stop_sched_tick(ts);
+ __tick_nohz_idle_enter(ts);
local_irq_enable();
}
@@ -504,7 +525,7 @@ void tick_nohz_irq_exit(void)
if (!ts->inidle)
return;
- tick_nohz_stop_sched_tick(ts);
+ __tick_nohz_idle_enter(ts);
}
/**
@@ -522,7 +543,7 @@ ktime_t tick_nohz_get_sleep_length(void)
static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
{
hrtimer_cancel(&ts->sched_timer);
- hrtimer_set_expires(&ts->sched_timer, ts->idle_tick);
+ hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
while (1) {
/* Forward the time to expire in the future */
@@ -545,6 +566,41 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
}
}
+static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
+{
+ /* Update jiffies first */
+ select_nohz_load_balancer(0);
+ tick_do_update_jiffies64(now);
+ update_cpu_load_nohz();
+
+ touch_softlockup_watchdog();
+ /*
+ * Cancel the scheduled timer and restore the tick
+ */
+ ts->tick_stopped = 0;
+ ts->idle_exittime = now;
+
+ tick_nohz_restart(ts, now);
+}
+
+static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
+{
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ unsigned long ticks;
+ /*
+ * We stopped the tick in idle. Update process times would miss the
+ * time we slept as update_process_times does only a 1 tick
+ * accounting. Enforce that this is accounted to idle !
+ */
+ ticks = jiffies - ts->idle_jiffies;
+ /*
+ * We might be one off. Do not randomly account a huge number of ticks!
+ */
+ if (ticks && ticks < LONG_MAX)
+ account_idle_ticks(ticks);
+#endif
+}
+
/**
* tick_nohz_idle_exit - restart the idle tick from the idle task
*
@@ -556,9 +612,6 @@ void tick_nohz_idle_exit(void)
{
int cpu = smp_processor_id();
struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
- unsigned long ticks;
-#endif
ktime_t now;
local_irq_disable();
@@ -573,39 +626,11 @@ void tick_nohz_idle_exit(void)
if (ts->idle_active)
tick_nohz_stop_idle(cpu, now);
- if (!ts->tick_stopped) {
- local_irq_enable();
- return;
+ if (ts->tick_stopped) {
+ tick_nohz_restart_sched_tick(ts, now);
+ tick_nohz_account_idle_ticks(ts);
}
- /* Update jiffies first */
- select_nohz_load_balancer(0);
- tick_do_update_jiffies64(now);
- update_cpu_load_nohz();
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
- /*
- * We stopped the tick in idle. Update process times would miss the
- * time we slept as update_process_times does only a 1 tick
- * accounting. Enforce that this is accounted to idle !
- */
- ticks = jiffies - ts->idle_jiffies;
- /*
- * We might be one off. Do not randomly account a huge number of ticks!
- */
- if (ticks && ticks < LONG_MAX)
- account_idle_ticks(ticks);
-#endif
-
- touch_softlockup_watchdog();
- /*
- * Cancel the scheduled timer and restore the tick
- */
- ts->tick_stopped = 0;
- ts->idle_exittime = now;
-
- tick_nohz_restart(ts, now);
-
local_irq_enable();
}
@@ -809,7 +834,8 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
*/
if (ts->tick_stopped) {
touch_softlockup_watchdog();
- ts->idle_jiffies++;
+ if (idle_cpu(cpu))
+ ts->idle_jiffies++;
}
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6f46a00a1e8a..f045cc50832d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -24,32 +24,32 @@
/* Structure holding internal timekeeping values. */
struct timekeeper {
/* Current clocksource used for timekeeping. */
- struct clocksource *clock;
+ struct clocksource *clock;
/* NTP adjusted clock multiplier */
- u32 mult;
+ u32 mult;
/* The shift value of the current clocksource. */
- int shift;
-
+ u32 shift;
/* Number of clock cycles in one NTP interval. */
- cycle_t cycle_interval;
+ cycle_t cycle_interval;
/* Number of clock shifted nano seconds in one NTP interval. */
- u64 xtime_interval;
+ u64 xtime_interval;
/* shifted nano seconds left over when rounding cycle_interval */
- s64 xtime_remainder;
+ s64 xtime_remainder;
/* Raw nano seconds accumulated per NTP interval. */
- u32 raw_interval;
+ u32 raw_interval;
+
+ /* Current CLOCK_REALTIME time in seconds */
+ u64 xtime_sec;
+ /* Clock shifted nano seconds */
+ u64 xtime_nsec;
- /* Clock shifted nano seconds remainder not stored in xtime.tv_nsec. */
- u64 xtime_nsec;
/* Difference between accumulated time and NTP time in ntp
* shifted nano seconds. */
- s64 ntp_error;
+ s64 ntp_error;
/* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds. */
- int ntp_error_shift;
+ u32 ntp_error_shift;
- /* The current time */
- struct timespec xtime;
/*
* wall_to_monotonic is what we need to add to xtime (or xtime corrected
* for sub jiffie times) to get to monotonic time. Monotonic is pegged
@@ -64,14 +64,17 @@ struct timekeeper {
* - wall_to_monotonic is no longer the boot time, getboottime must be
* used instead.
*/
- struct timespec wall_to_monotonic;
+ struct timespec wall_to_monotonic;
/* time spent in suspend */
- struct timespec total_sleep_time;
+ struct timespec total_sleep_time;
/* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
- struct timespec raw_time;
-
+ struct timespec raw_time;
+ /* Offset clock monotonic -> clock realtime */
+ ktime_t offs_real;
+ /* Offset clock monotonic -> clock boottime */
+ ktime_t offs_boot;
/* Seqlock for all timekeeper values */
- seqlock_t lock;
+ seqlock_t lock;
};
static struct timekeeper timekeeper;
@@ -82,11 +85,37 @@ static struct timekeeper timekeeper;
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
+static inline void tk_normalize_xtime(struct timekeeper *tk)
+{
+ while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
+ tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
+ tk->xtime_sec++;
+ }
+}
+
+static struct timespec tk_xtime(struct timekeeper *tk)
+{
+ struct timespec ts;
+
+ ts.tv_sec = tk->xtime_sec;
+ ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
+ return ts;
+}
+
+static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
+{
+ tk->xtime_sec = ts->tv_sec;
+ tk->xtime_nsec = ts->tv_nsec << tk->shift;
+}
+static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
+{
+ tk->xtime_sec += ts->tv_sec;
+ tk->xtime_nsec += ts->tv_nsec << tk->shift;
+}
/**
* timekeeper_setup_internals - Set up internals to use clocksource clock.
@@ -98,12 +127,14 @@ int __read_mostly timekeeping_suspended;
*
* Unless you're the timekeeping code, you should not be using this!
*/
-static void timekeeper_setup_internals(struct clocksource *clock)
+static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
{
cycle_t interval;
u64 tmp, ntpinterval;
+ struct clocksource *old_clock;
- timekeeper.clock = clock;
+ old_clock = tk->clock;
+ tk->clock = clock;
clock->cycle_last = clock->read(clock);
/* Do the ns -> cycle conversion first, using original mult */
@@ -116,71 +147,96 @@ static void timekeeper_setup_internals(struct clocksource *clock)
tmp = 1;
interval = (cycle_t) tmp;
- timekeeper.cycle_interval = interval;
+ tk->cycle_interval = interval;
/* Go back from cycles -> shifted ns */
- timekeeper.xtime_interval = (u64) interval * clock->mult;
- timekeeper.xtime_remainder = ntpinterval - timekeeper.xtime_interval;
- timekeeper.raw_interval =
+ tk->xtime_interval = (u64) interval * clock->mult;
+ tk->xtime_remainder = ntpinterval - tk->xtime_interval;
+ tk->raw_interval =
((u64) interval * clock->mult) >> clock->shift;
- timekeeper.xtime_nsec = 0;
- timekeeper.shift = clock->shift;
+ /* if changing clocks, convert xtime_nsec shift units */
+ if (old_clock) {
+ int shift_change = clock->shift - old_clock->shift;
+ if (shift_change < 0)
+ tk->xtime_nsec >>= -shift_change;
+ else
+ tk->xtime_nsec <<= shift_change;
+ }
+ tk->shift = clock->shift;
- timekeeper.ntp_error = 0;
- timekeeper.ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
+ tk->ntp_error = 0;
+ tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
/*
* The timekeeper keeps its own mult values for the currently
* active clocksource. These value will be adjusted via NTP
* to counteract clock drifting.
*/
- timekeeper.mult = clock->mult;
+ tk->mult = clock->mult;
}
/* Timekeeper helper functions. */
-static inline s64 timekeeping_get_ns(void)
+static inline s64 timekeeping_get_ns(struct timekeeper *tk)
{
cycle_t cycle_now, cycle_delta;
struct clocksource *clock;
+ s64 nsec;
/* read clocksource: */
- clock = timekeeper.clock;
+ clock = tk->clock;
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
- /* return delta convert to nanoseconds using ntp adjusted mult. */
- return clocksource_cyc2ns(cycle_delta, timekeeper.mult,
- timekeeper.shift);
+ nsec = cycle_delta * tk->mult + tk->xtime_nsec;
+ nsec >>= tk->shift;
+
+ /* If arch requires, add in gettimeoffset() */
+ return nsec + arch_gettimeoffset();
}
-static inline s64 timekeeping_get_ns_raw(void)
+static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
{
cycle_t cycle_now, cycle_delta;
struct clocksource *clock;
+ s64 nsec;
/* read clocksource: */
- clock = timekeeper.clock;
+ clock = tk->clock;
cycle_now = clock->read(clock);
/* calculate the delta since the last update_wall_time: */
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
- /* return delta convert to nanoseconds. */
- return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+ /* convert delta to nanoseconds. */
+ nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
+
+ /* If arch requires, add in gettimeoffset() */
+ return nsec + arch_gettimeoffset();
+}
+
+static void update_rt_offset(struct timekeeper *tk)
+{
+ struct timespec tmp, *wtm = &tk->wall_to_monotonic;
+
+ set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec);
+ tk->offs_real = timespec_to_ktime(tmp);
}
/* must hold write on timekeeper.lock */
-static void timekeeping_update(bool clearntp)
+static void timekeeping_update(struct timekeeper *tk, bool clearntp)
{
+ struct timespec xt;
+
if (clearntp) {
- timekeeper.ntp_error = 0;
+ tk->ntp_error = 0;
ntp_clear();
}
- update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
- timekeeper.clock, timekeeper.mult);
+ update_rt_offset(tk);
+ xt = tk_xtime(tk);
+ update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
}
@@ -191,27 +247,26 @@ static void timekeeping_update(bool clearntp)
* update_wall_time(). This is useful before significant clock changes,
* as it avoids having to deal with this time offset explicitly.
*/
-static void timekeeping_forward_now(void)
+static void timekeeping_forward_now(struct timekeeper *tk)
{
cycle_t cycle_now, cycle_delta;
struct clocksource *clock;
s64 nsec;
- clock = timekeeper.clock;
+ clock = tk->clock;
cycle_now = clock->read(clock);
cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
clock->cycle_last = cycle_now;
- nsec = clocksource_cyc2ns(cycle_delta, timekeeper.mult,
- timekeeper.shift);
+ tk->xtime_nsec += cycle_delta * tk->mult;
/* If arch requires, add in gettimeoffset() */
- nsec += arch_gettimeoffset();
+ tk->xtime_nsec += arch_gettimeoffset() << tk->shift;
- timespec_add_ns(&timekeeper.xtime, nsec);
+ tk_normalize_xtime(tk);
nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
- timespec_add_ns(&timekeeper.raw_time, nsec);
+ timespec_add_ns(&tk->raw_time, nsec);
}
/**
@@ -223,18 +278,15 @@ static void timekeeping_forward_now(void)
void getnstimeofday(struct timespec *ts)
{
unsigned long seq;
- s64 nsecs;
+ s64 nsecs = 0;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqbegin(&timekeeper.lock);
- *ts = timekeeper.xtime;
- nsecs = timekeeping_get_ns();
-
- /* If arch requires, add in gettimeoffset() */
- nsecs += arch_gettimeoffset();
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
} while (read_seqretry(&timekeeper.lock, seq));
@@ -251,13 +303,10 @@ ktime_t ktime_get(void)
do {
seq = read_seqbegin(&timekeeper.lock);
- secs = timekeeper.xtime.tv_sec +
+ secs = timekeeper.xtime_sec +
timekeeper.wall_to_monotonic.tv_sec;
- nsecs = timekeeper.xtime.tv_nsec +
+ nsecs = timekeeping_get_ns(&timekeeper) +
timekeeper.wall_to_monotonic.tv_nsec;
- nsecs += timekeeping_get_ns();
- /* If arch requires, add in gettimeoffset() */
- nsecs += arch_gettimeoffset();
} while (read_seqretry(&timekeeper.lock, seq));
/*
@@ -280,22 +329,19 @@ void ktime_get_ts(struct timespec *ts)
{
struct timespec tomono;
unsigned int seq;
- s64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqbegin(&timekeeper.lock);
- *ts = timekeeper.xtime;
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
tomono = timekeeper.wall_to_monotonic;
- nsecs = timekeeping_get_ns();
- /* If arch requires, add in gettimeoffset() */
- nsecs += arch_gettimeoffset();
} while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
- ts->tv_nsec + tomono.tv_nsec + nsecs);
+ ts->tv_nsec + tomono.tv_nsec);
}
EXPORT_SYMBOL_GPL(ktime_get_ts);
@@ -318,20 +364,14 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
WARN_ON_ONCE(timekeeping_suspended);
do {
- u32 arch_offset;
-
seq = read_seqbegin(&timekeeper.lock);
*ts_raw = timekeeper.raw_time;
- *ts_real = timekeeper.xtime;
-
- nsecs_raw = timekeeping_get_ns_raw();
- nsecs_real = timekeeping_get_ns();
+ ts_real->tv_sec = timekeeper.xtime_sec;
+ ts_real->tv_nsec = 0;
- /* If arch requires, add in gettimeoffset() */
- arch_offset = arch_gettimeoffset();
- nsecs_raw += arch_offset;
- nsecs_real += arch_offset;
+ nsecs_raw = timekeeping_get_ns_raw(&timekeeper);
+ nsecs_real = timekeeping_get_ns(&timekeeper);
} while (read_seqretry(&timekeeper.lock, seq));
@@ -366,7 +406,7 @@ EXPORT_SYMBOL(do_gettimeofday);
*/
int do_settimeofday(const struct timespec *tv)
{
- struct timespec ts_delta;
+ struct timespec ts_delta, xt;
unsigned long flags;
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
@@ -374,15 +414,18 @@ int do_settimeofday(const struct timespec *tv)
write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now();
+ timekeeping_forward_now(&timekeeper);
+
+ xt = tk_xtime(&timekeeper);
+ ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
+ ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
- ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec;
- ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec;
timekeeper.wall_to_monotonic =
timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
- timekeeper.xtime = *tv;
- timekeeping_update(true);
+ tk_set_xtime(&timekeeper, tv);
+
+ timekeeping_update(&timekeeper, true);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -409,13 +452,14 @@ int timekeeping_inject_offset(struct timespec *ts)
write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now();
+ timekeeping_forward_now(&timekeeper);
- timekeeper.xtime = timespec_add(timekeeper.xtime, *ts);
+
+ tk_xtime_add(&timekeeper, ts);
timekeeper.wall_to_monotonic =
timespec_sub(timekeeper.wall_to_monotonic, *ts);
- timekeeping_update(true);
+ timekeeping_update(&timekeeper, true);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -440,14 +484,14 @@ static int change_clocksource(void *data)
write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now();
+ timekeeping_forward_now(&timekeeper);
if (!new->enable || new->enable(new) == 0) {
old = timekeeper.clock;
- timekeeper_setup_internals(new);
+ tk_setup_internals(&timekeeper, new);
if (old->disable)
old->disable(old);
}
- timekeeping_update(true);
+ timekeeping_update(&timekeeper, true);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -497,7 +541,7 @@ void getrawmonotonic(struct timespec *ts)
do {
seq = read_seqbegin(&timekeeper.lock);
- nsecs = timekeeping_get_ns_raw();
+ nsecs = timekeeping_get_ns_raw(&timekeeper);
*ts = timekeeper.raw_time;
} while (read_seqretry(&timekeeper.lock, seq));
@@ -532,6 +576,7 @@ u64 timekeeping_max_deferment(void)
{
unsigned long seq;
u64 ret;
+
do {
seq = read_seqbegin(&timekeeper.lock);
@@ -592,18 +637,17 @@ void __init timekeeping_init(void)
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
- timekeeper_setup_internals(clock);
+ tk_setup_internals(&timekeeper, clock);
- timekeeper.xtime.tv_sec = now.tv_sec;
- timekeeper.xtime.tv_nsec = now.tv_nsec;
+ tk_set_xtime(&timekeeper, &now);
timekeeper.raw_time.tv_sec = 0;
timekeeper.raw_time.tv_nsec = 0;
- if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
- boot.tv_sec = timekeeper.xtime.tv_sec;
- boot.tv_nsec = timekeeper.xtime.tv_nsec;
- }
+ if (boot.tv_sec == 0 && boot.tv_nsec == 0)
+ boot = tk_xtime(&timekeeper);
+
set_normalized_timespec(&timekeeper.wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
+ update_rt_offset(&timekeeper);
timekeeper.total_sleep_time.tv_sec = 0;
timekeeper.total_sleep_time.tv_nsec = 0;
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -612,6 +656,12 @@ void __init timekeeping_init(void)
/* time in seconds when suspend began */
static struct timespec timekeeping_suspend_time;
+static void update_sleep_time(struct timespec t)
+{
+ timekeeper.total_sleep_time = t;
+ timekeeper.offs_boot = timespec_to_ktime(t);
+}
+
/**
* __timekeeping_inject_sleeptime - Internal function to add sleep interval
* @delta: pointer to a timespec delta value
@@ -619,7 +669,8 @@ static struct timespec timekeeping_suspend_time;
* Takes a timespec offset measuring a suspend interval and properly
* adds the sleep offset to the timekeeping variables.
*/
-static void __timekeeping_inject_sleeptime(struct timespec *delta)
+static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
+ struct timespec *delta)
{
if (!timespec_valid(delta)) {
printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
@@ -627,11 +678,9 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
return;
}
- timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
- timekeeper.wall_to_monotonic =
- timespec_sub(timekeeper.wall_to_monotonic, *delta);
- timekeeper.total_sleep_time = timespec_add(
- timekeeper.total_sleep_time, *delta);
+ tk_xtime_add(tk, delta);
+ tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta);
+ update_sleep_time(timespec_add(tk->total_sleep_time, *delta));
}
@@ -657,11 +706,11 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now();
+ timekeeping_forward_now(&timekeeper);
- __timekeeping_inject_sleeptime(delta);
+ __timekeeping_inject_sleeptime(&timekeeper, delta);
- timekeeping_update(true);
+ timekeeping_update(&timekeeper, true);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -690,12 +739,13 @@ static void timekeeping_resume(void)
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
- __timekeeping_inject_sleeptime(&ts);
+ __timekeeping_inject_sleeptime(&timekeeper, &ts);
}
/* re-base the last cycle value */
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
+ timekeeping_update(&timekeeper, false);
write_sequnlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -715,7 +765,7 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
write_seqlock_irqsave(&timekeeper.lock, flags);
- timekeeping_forward_now();
+ timekeeping_forward_now(&timekeeper);
timekeeping_suspended = 1;
/*
@@ -724,7 +774,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
- delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time);
+ delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time);
delta_delta = timespec_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
@@ -763,7 +813,8 @@ device_initcall(timekeeping_init_ops);
* If the error is already larger, we look ahead even further
* to compensate for late or lost adjustments.
*/
-static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
+static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
+ s64 error, s64 *interval,
s64 *offset)
{
s64 tick_error, i;
@@ -779,7 +830,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
* here. This is tuned so that an error of about 1 msec is adjusted
* within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
*/
- error2 = timekeeper.ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
+ error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
error2 = abs(error2);
for (look_ahead = 0; error2 > 0; look_ahead++)
error2 >>= 2;
@@ -788,8 +839,8 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
* Now calculate the error in (1 << look_ahead) ticks, but first
* remove the single look ahead already included in the error.
*/
- tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1);
- tick_error -= timekeeper.xtime_interval >> 1;
+ tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
+ tick_error -= tk->xtime_interval >> 1;
error = ((error - tick_error) >> look_ahead) + tick_error;
/* Finally calculate the adjustment shift value. */
@@ -814,9 +865,9 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
* this is optimized for the most common adjustments of -1,0,1,
* for other values we can do a bit more work.
*/
-static void timekeeping_adjust(s64 offset)
+static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
{
- s64 error, interval = timekeeper.cycle_interval;
+ s64 error, interval = tk->cycle_interval;
int adj;
/*
@@ -832,7 +883,7 @@ static void timekeeping_adjust(s64 offset)
*
* Note: It does not "save" on aggravation when reading the code.
*/
- error = timekeeper.ntp_error >> (timekeeper.ntp_error_shift - 1);
+ error = tk->ntp_error >> (tk->ntp_error_shift - 1);
if (error > interval) {
/*
* We now divide error by 4(via shift), which checks if
@@ -854,7 +905,8 @@ static void timekeeping_adjust(s64 offset)
if (likely(error <= interval))
adj = 1;
else
- adj = timekeeping_bigadjust(error, &interval, &offset);
+ adj = timekeeping_bigadjust(tk, error, &interval,
+ &offset);
} else if (error < -interval) {
/* See comment above, this is just switched for the negative */
error >>= 2;
@@ -863,18 +915,17 @@ static void timekeeping_adjust(s64 offset)
interval = -interval;
offset = -offset;
} else
- adj = timekeeping_bigadjust(error, &interval, &offset);
- } else /* No adjustment needed */
+ adj = timekeeping_bigadjust(tk, error, &interval,
+ &offset);
+ } else
return;
- if (unlikely(timekeeper.clock->maxadj &&
- (timekeeper.mult + adj >
- timekeeper.clock->mult + timekeeper.clock->maxadj))) {
+ if (unlikely(tk->clock->maxadj &&
+ (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
printk_once(KERN_WARNING
"Adjusting %s more than 11%% (%ld vs %ld)\n",
- timekeeper.clock->name, (long)timekeeper.mult + adj,
- (long)timekeeper.clock->mult +
- timekeeper.clock->maxadj);
+ tk->clock->name, (long)tk->mult + adj,
+ (long)tk->clock->mult + tk->clock->maxadj);
}
/*
* So the following can be confusing.
@@ -925,11 +976,60 @@ static void timekeeping_adjust(s64 offset)
*
* XXX - TODO: Doc ntp_error calculation.
*/
- timekeeper.mult += adj;
- timekeeper.xtime_interval += interval;
- timekeeper.xtime_nsec -= offset;
- timekeeper.ntp_error -= (interval - offset) <<
- timekeeper.ntp_error_shift;
+ tk->mult += adj;
+ tk->xtime_interval += interval;
+ tk->xtime_nsec -= offset;
+ tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
+
+ /*
+ * It may be possible that when we entered this function, xtime_nsec
+ * was very small. Further, if we're slightly speeding the clocksource
+ * in the code above, its possible the required corrective factor to
+ * xtime_nsec could cause it to underflow.
+ *
+ * Now, since we already accumulated the second, cannot simply roll
+ * the accumulated second back, since the NTP subsystem has been
+ * notified via second_overflow. So instead we push xtime_nsec forward
+ * by the amount we underflowed, and add that amount into the error.
+ *
+ * We'll correct this error next time through this function, when
+ * xtime_nsec is not as small.
+ */
+ if (unlikely((s64)tk->xtime_nsec < 0)) {
+ s64 neg = -(s64)tk->xtime_nsec;
+ tk->xtime_nsec = 0;
+ tk->ntp_error += neg << tk->ntp_error_shift;
+ }
+
+}
+
+
+/**
+ * accumulate_nsecs_to_secs - Accumulates nsecs into secs
+ *
+ * Helper function that accumulates a the nsecs greater then a second
+ * from the xtime_nsec field to the xtime_secs field.
+ * It also calls into the NTP code to handle leapsecond processing.
+ *
+ */
+static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
+{
+ u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
+
+ while (tk->xtime_nsec >= nsecps) {
+ int leap;
+
+ tk->xtime_nsec -= nsecps;
+ tk->xtime_sec++;
+
+ /* Figure out if its a leap sec and apply if needed */
+ leap = second_overflow(tk->xtime_sec);
+ tk->xtime_sec += leap;
+ tk->wall_to_monotonic.tv_sec -= leap;
+ if (leap)
+ clock_was_set_delayed();
+
+ }
}
@@ -942,44 +1042,36 @@ static void timekeeping_adjust(s64 offset)
*
* Returns the unconsumed cycles.
*/
-static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
+static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
+ u32 shift)
{
- u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs;
- /* If the offset is smaller than a shifted interval, do nothing */
- if (offset < timekeeper.cycle_interval<<shift)
+ /* If the offset is smaller then a shifted interval, do nothing */
+ if (offset < tk->cycle_interval<<shift)
return offset;
/* Accumulate one shifted interval */
- offset -= timekeeper.cycle_interval << shift;
- timekeeper.clock->cycle_last += timekeeper.cycle_interval << shift;
+ offset -= tk->cycle_interval << shift;
+ tk->clock->cycle_last += tk->cycle_interval << shift;
- timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
- while (timekeeper.xtime_nsec >= nsecps) {
- int leap;
- timekeeper.xtime_nsec -= nsecps;
- timekeeper.xtime.tv_sec++;
- leap = second_overflow(timekeeper.xtime.tv_sec);
- timekeeper.xtime.tv_sec += leap;
- timekeeper.wall_to_monotonic.tv_sec -= leap;
- }
+ tk->xtime_nsec += tk->xtime_interval << shift;
+ accumulate_nsecs_to_secs(tk);
/* Accumulate raw time */
- raw_nsecs = timekeeper.raw_interval << shift;
- raw_nsecs += timekeeper.raw_time.tv_nsec;
+ raw_nsecs = tk->raw_interval << shift;
+ raw_nsecs += tk->raw_time.tv_nsec;
if (raw_nsecs >= NSEC_PER_SEC) {
u64 raw_secs = raw_nsecs;
raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- timekeeper.raw_time.tv_sec += raw_secs;
+ tk->raw_time.tv_sec += raw_secs;
}
- timekeeper.raw_time.tv_nsec = raw_nsecs;
+ tk->raw_time.tv_nsec = raw_nsecs;
/* Accumulate error between NTP and clock interval */
- timekeeper.ntp_error += ntp_tick_length() << shift;
- timekeeper.ntp_error -=
- (timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
- (timekeeper.ntp_error_shift + shift);
+ tk->ntp_error += ntp_tick_length() << shift;
+ tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
+ (tk->ntp_error_shift + shift);
return offset;
}
@@ -995,6 +1087,7 @@ static void update_wall_time(void)
cycle_t offset;
int shift = 0, maxshift;
unsigned long flags;
+ s64 remainder;
write_seqlock_irqsave(&timekeeper.lock, flags);
@@ -1009,8 +1102,6 @@ static void update_wall_time(void)
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
- timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec <<
- timekeeper.shift;
/*
* With NO_HZ we may have to accumulate many cycle_intervals
@@ -1026,62 +1117,36 @@ static void update_wall_time(void)
maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
- offset = logarithmic_accumulation(offset, shift);
+ offset = logarithmic_accumulation(&timekeeper, offset, shift);
if(offset < timekeeper.cycle_interval<<shift)
shift--;
}
/* correct the clock when NTP error is too big */
- timekeeping_adjust(offset);
-
- /*
- * Since in the loop above, we accumulate any amount of time
- * in xtime_nsec over a second into xtime.tv_sec, its possible for
- * xtime_nsec to be fairly small after the loop. Further, if we're
- * slightly speeding the clocksource up in timekeeping_adjust(),
- * its possible the required corrective factor to xtime_nsec could
- * cause it to underflow.
- *
- * Now, we cannot simply roll the accumulated second back, since
- * the NTP subsystem has been notified via second_overflow. So
- * instead we push xtime_nsec forward by the amount we underflowed,
- * and add that amount into the error.
- *
- * We'll correct this error next time through this function, when
- * xtime_nsec is not as small.
- */
- if (unlikely((s64)timekeeper.xtime_nsec < 0)) {
- s64 neg = -(s64)timekeeper.xtime_nsec;
- timekeeper.xtime_nsec = 0;
- timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
- }
+ timekeeping_adjust(&timekeeper, offset);
/*
- * Store full nanoseconds into xtime after rounding it up and
- * add the remainder to the error difference.
- */
- timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >>
- timekeeper.shift) + 1;
- timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec <<
- timekeeper.shift;
- timekeeper.ntp_error += timekeeper.xtime_nsec <<
- timekeeper.ntp_error_shift;
+ * Store only full nanoseconds into xtime_nsec after rounding
+ * it up and add the remainder to the error difference.
+ * XXX - This is necessary to avoid small 1ns inconsistnecies caused
+ * by truncating the remainder in vsyscalls. However, it causes
+ * additional work to be done in timekeeping_adjust(). Once
+ * the vsyscall implementations are converted to use xtime_nsec
+ * (shifted nanoseconds), this can be killed.
+ */
+ remainder = timekeeper.xtime_nsec & ((1 << timekeeper.shift) - 1);
+ timekeeper.xtime_nsec -= remainder;
+ timekeeper.xtime_nsec += 1 << timekeeper.shift;
+ timekeeper.ntp_error += remainder << timekeeper.ntp_error_shift;
/*
* Finally, make sure that after the rounding
- * xtime.tv_nsec isn't larger than NSEC_PER_SEC
+ * xtime_nsec isn't larger than NSEC_PER_SEC
*/
- if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
- int leap;
- timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
- timekeeper.xtime.tv_sec++;
- leap = second_overflow(timekeeper.xtime.tv_sec);
- timekeeper.xtime.tv_sec += leap;
- timekeeper.wall_to_monotonic.tv_sec -= leap;
- }
+ accumulate_nsecs_to_secs(&timekeeper);
- timekeeping_update(false);
+ timekeeping_update(&timekeeper, false);
out:
write_sequnlock_irqrestore(&timekeeper.lock, flags);
@@ -1126,21 +1191,20 @@ void get_monotonic_boottime(struct timespec *ts)
{
struct timespec tomono, sleep;
unsigned int seq;
- s64 nsecs;
WARN_ON(timekeeping_suspended);
do {
seq = read_seqbegin(&timekeeper.lock);
- *ts = timekeeper.xtime;
+ ts->tv_sec = timekeeper.xtime_sec;
+ ts->tv_nsec = timekeeping_get_ns(&timekeeper);
tomono = timekeeper.wall_to_monotonic;
sleep = timekeeper.total_sleep_time;
- nsecs = timekeeping_get_ns();
} while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
- ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
+ ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec);
}
EXPORT_SYMBOL_GPL(get_monotonic_boottime);
@@ -1173,13 +1237,13 @@ EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- return timekeeper.xtime.tv_sec;
+ return timekeeper.xtime_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return timekeeper.xtime;
+ return tk_xtime(&timekeeper);
}
struct timespec current_kernel_time(void)
@@ -1190,7 +1254,7 @@ struct timespec current_kernel_time(void)
do {
seq = read_seqbegin(&timekeeper.lock);
- now = timekeeper.xtime;
+ now = tk_xtime(&timekeeper);
} while (read_seqretry(&timekeeper.lock, seq));
return now;
@@ -1205,7 +1269,7 @@ struct timespec get_monotonic_coarse(void)
do {
seq = read_seqbegin(&timekeeper.lock);
- now = timekeeper.xtime;
+ now = tk_xtime(&timekeeper);
mono = timekeeper.wall_to_monotonic;
} while (read_seqretry(&timekeeper.lock, seq));
@@ -1240,12 +1304,43 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
do {
seq = read_seqbegin(&timekeeper.lock);
- *xtim = timekeeper.xtime;
+ *xtim = tk_xtime(&timekeeper);
*wtom = timekeeper.wall_to_monotonic;
*sleep = timekeeper.total_sleep_time;
} while (read_seqretry(&timekeeper.lock, seq));
}
+#ifdef CONFIG_HIGH_RES_TIMERS
+/**
+ * ktime_get_update_offsets - hrtimer helper
+ * @offs_real: pointer to storage for monotonic -> realtime offset
+ * @offs_boot: pointer to storage for monotonic -> boottime offset
+ *
+ * Returns current monotonic time and updates the offsets
+ * Called from hrtimer_interupt() or retrigger_next_event()
+ */
+ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
+{
+ ktime_t now;
+ unsigned int seq;
+ u64 secs, nsecs;
+
+ do {
+ seq = read_seqbegin(&timekeeper.lock);
+
+ secs = timekeeper.xtime_sec;
+ nsecs = timekeeping_get_ns(&timekeeper);
+
+ *offs_real = timekeeper.offs_real;
+ *offs_boot = timekeeper.offs_boot;
+ } while (read_seqretry(&timekeeper.lock, seq));
+
+ now = ktime_add_ns(ktime_set(secs, 0), nsecs);
+ now = ktime_sub(now, *offs_real);
+ return now;
+}
+#endif
+
/**
* ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
*/
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3258455549f4..af5a7e9f164b 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -167,7 +167,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
{
struct tick_sched *ts = tick_get_tick_sched(cpu);
P(nohz_mode);
- P_ns(idle_tick);
+ P_ns(last_tick);
P(tick_stopped);
P(idle_jiffies);
P(idle_calls);
@@ -259,7 +259,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.6\n");
+ SEQ_printf(m, "Timer List Version: v0.7\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
diff --git a/kernel/timer.c b/kernel/timer.c
index 6ec7e7e0db43..a61c09374eba 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -77,6 +77,7 @@ struct tvec_base {
struct timer_list *running_timer;
unsigned long timer_jiffies;
unsigned long next_timer;
+ unsigned long active_timers;
struct tvec_root tv1;
struct tvec tv2;
struct tvec tv3;
@@ -330,7 +331,8 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
}
EXPORT_SYMBOL_GPL(set_timer_slack);
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
{
unsigned long expires = timer->expires;
unsigned long idx = expires - base->timer_jiffies;
@@ -372,6 +374,19 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
list_add_tail(&timer->entry, vec);
}
+static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+{
+ __internal_add_timer(base, timer);
+ /*
+ * Update base->active_timers and base->next_timer
+ */
+ if (!tbase_get_deferrable(timer->base)) {
+ if (time_before(timer->expires, base->next_timer))
+ base->next_timer = timer->expires;
+ base->active_timers++;
+ }
+}
+
#ifdef CONFIG_TIMER_STATS
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
{
@@ -654,8 +669,7 @@ void init_timer_deferrable_key(struct timer_list *timer,
}
EXPORT_SYMBOL(init_timer_deferrable_key);
-static inline void detach_timer(struct timer_list *timer,
- int clear_pending)
+static inline void detach_timer(struct timer_list *timer, bool clear_pending)
{
struct list_head *entry = &timer->entry;
@@ -667,6 +681,29 @@ static inline void detach_timer(struct timer_list *timer,
entry->prev = LIST_POISON2;
}
+static inline void
+detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
+{
+ detach_timer(timer, true);
+ if (!tbase_get_deferrable(timer->base))
+ timer->base->active_timers--;
+}
+
+static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+ bool clear_pending)
+{
+ if (!timer_pending(timer))
+ return 0;
+
+ detach_timer(timer, clear_pending);
+ if (!tbase_get_deferrable(timer->base)) {
+ timer->base->active_timers--;
+ if (timer->expires == base->next_timer)
+ base->next_timer = base->timer_jiffies;
+ }
+ return 1;
+}
+
/*
* We are using hashed locking: holding per_cpu(tvec_bases).lock
* means that all timers which are tied to this base via timer->base are
@@ -712,16 +749,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
base = lock_timer_base(timer, &flags);
- if (timer_pending(timer)) {
- detach_timer(timer, 0);
- if (timer->expires == base->next_timer &&
- !tbase_get_deferrable(timer->base))
- base->next_timer = base->timer_jiffies;
- ret = 1;
- } else {
- if (pending_only)
- goto out_unlock;
- }
+ ret = detach_if_pending(timer, base, false);
+ if (!ret && pending_only)
+ goto out_unlock;
debug_activate(timer, expires);
@@ -752,9 +782,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
}
timer->expires = expires;
- if (time_before(timer->expires, base->next_timer) &&
- !tbase_get_deferrable(timer->base))
- base->next_timer = timer->expires;
internal_add_timer(base, timer);
out_unlock:
@@ -920,9 +947,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
spin_lock_irqsave(&base->lock, flags);
timer_set_base(timer, base);
debug_activate(timer, timer->expires);
- if (time_before(timer->expires, base->next_timer) &&
- !tbase_get_deferrable(timer->base))
- base->next_timer = timer->expires;
internal_add_timer(base, timer);
/*
* Check whether the other CPU is idle and needs to be
@@ -959,13 +983,7 @@ int del_timer(struct timer_list *timer)
timer_stats_timer_clear_start_info(timer);
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
- if (timer_pending(timer)) {
- detach_timer(timer, 1);
- if (timer->expires == base->next_timer &&
- !tbase_get_deferrable(timer->base))
- base->next_timer = base->timer_jiffies;
- ret = 1;
- }
+ ret = detach_if_pending(timer, base, true);
spin_unlock_irqrestore(&base->lock, flags);
}
@@ -990,19 +1008,10 @@ int try_to_del_timer_sync(struct timer_list *timer)
base = lock_timer_base(timer, &flags);
- if (base->running_timer == timer)
- goto out;
-
- timer_stats_timer_clear_start_info(timer);
- ret = 0;
- if (timer_pending(timer)) {
- detach_timer(timer, 1);
- if (timer->expires == base->next_timer &&
- !tbase_get_deferrable(timer->base))
- base->next_timer = base->timer_jiffies;
- ret = 1;
+ if (base->running_timer != timer) {
+ timer_stats_timer_clear_start_info(timer);
+ ret = detach_if_pending(timer, base, true);
}
-out:
spin_unlock_irqrestore(&base->lock, flags);
return ret;
@@ -1089,7 +1098,8 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index)
*/
list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
BUG_ON(tbase_get_base(timer->base) != base);
- internal_add_timer(base, timer);
+ /* No accounting, while moving them */
+ __internal_add_timer(base, timer);
}
return index;
@@ -1178,7 +1188,7 @@ static inline void __run_timers(struct tvec_base *base)
timer_stats_account_timer(timer);
base->running_timer = timer;
- detach_timer(timer, 1);
+ detach_expired_timer(timer, base);
spin_unlock_irq(&base->lock);
call_timer_fn(timer, fn, data);
@@ -1316,18 +1326,21 @@ static unsigned long cmp_next_hrtimer_event(unsigned long now,
unsigned long get_next_timer_interrupt(unsigned long now)
{
struct tvec_base *base = __this_cpu_read(tvec_bases);
- unsigned long expires;
+ unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
/*
* Pretend that there is no timer pending if the cpu is offline.
* Possible pending timers will be migrated later to an active cpu.
*/
if (cpu_is_offline(smp_processor_id()))
- return now + NEXT_TIMER_MAX_DELTA;
+ return expires;
+
spin_lock(&base->lock);
- if (time_before_eq(base->next_timer, base->timer_jiffies))
- base->next_timer = __next_timer_interrupt(base);
- expires = base->next_timer;
+ if (base->active_timers) {
+ if (time_before_eq(base->next_timer, base->timer_jiffies))
+ base->next_timer = __next_timer_interrupt(base);
+ expires = base->next_timer;
+ }
spin_unlock(&base->lock);
if (time_before_eq(expires, now))
@@ -1704,6 +1717,7 @@ static int __cpuinit init_timers_cpu(int cpu)
base->timer_jiffies = jiffies;
base->next_timer = base->timer_jiffies;
+ base->active_timers = 0;
return 0;
}
@@ -1714,11 +1728,9 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
while (!list_empty(head)) {
timer = list_first_entry(head, struct timer_list, entry);
- detach_timer(timer, 0);
+ /* We ignore the accounting on the dying cpu */
+ detach_timer(timer, false);
timer_set_base(timer, new_base);
- if (time_before(timer->expires, new_base->next_timer) &&
- !tbase_get_deferrable(timer->base))
- new_base->next_timer = timer->expires;
internal_add_timer(new_base, timer);
}
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a008663d86c8..b4f20fba09fc 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -312,7 +312,7 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
static int __register_ftrace_function(struct ftrace_ops *ops)
{
- if (ftrace_disabled)
+ if (unlikely(ftrace_disabled))
return -ENODEV;
if (FTRACE_WARN_ON(ops == &global_ops))
@@ -4299,16 +4299,12 @@ int register_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
- if (unlikely(ftrace_disabled))
- goto out_unlock;
-
ret = __register_ftrace_function(ops);
if (!ret)
ret = ftrace_startup(ops, 0);
-
- out_unlock:
mutex_unlock(&ftrace_lock);
+
return ret;
}
EXPORT_SYMBOL_GPL(register_ftrace_function);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 1d0f6a8a0e5e..49491fa7daa2 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1075,6 +1075,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
rb_init_page(bpage->page);
INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
+ INIT_LIST_HEAD(&cpu_buffer->new_pages);
ret = rb_allocate_pages(cpu_buffer, nr_pages);
if (ret < 0)
@@ -1346,10 +1347,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
* If something was added to this page, it was full
* since it is not the tail page. So we deduct the
* bytes consumed in ring buffer from here.
- * No need to update overruns, since this page is
- * deleted from ring buffer and its entries are
- * already accounted for.
+ * Increment overrun to account for the lost events.
*/
+ local_add(page_entries, &cpu_buffer->overrun);
local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
}
@@ -3239,6 +3239,10 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
if (cpu_buffer->commit_page == cpu_buffer->reader_page)
goto out;
+ /* Don't bother swapping if the ring buffer is empty */
+ if (rb_num_of_entries(cpu_buffer) == 0)
+ goto out;
+
/*
* Reset the reader page to size zero.
*/
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 49249c28690d..a120f98c4112 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -830,6 +830,8 @@ int register_tracer(struct tracer *type)
current_trace = saved_tracer;
if (ret) {
printk(KERN_CONT "FAILED!\n");
+ /* Add the warning after printing 'FAILED' */
+ WARN_ON(1);
goto out;
}
/* Only reset on passing, to avoid touching corrupted buffers */
@@ -1708,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
static void trace_iterator_increment(struct trace_iterator *iter)
{
+ struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
+
iter->idx++;
- if (iter->buffer_iter[iter->cpu])
- ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
+ if (buf_iter)
+ ring_buffer_read(buf_iter, NULL);
}
static struct trace_entry *
@@ -1718,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events)
{
struct ring_buffer_event *event;
- struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
+ struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts);
@@ -1856,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
tr->data[cpu]->skipped_entries = 0;
- if (!iter->buffer_iter[cpu])
+ buf_iter = trace_buffer_iter(iter, cpu);
+ if (!buf_iter)
return;
- buf_iter = iter->buffer_iter[cpu];
ring_buffer_iter_reset(buf_iter);
/*
@@ -2205,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
int trace_empty(struct trace_iterator *iter)
{
+ struct ring_buffer_iter *buf_iter;
int cpu;
/* If we are looking at one CPU buffer, only check that one */
if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
cpu = iter->cpu_file;
- if (iter->buffer_iter[cpu]) {
- if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+ buf_iter = trace_buffer_iter(iter, cpu);
+ if (buf_iter) {
+ if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2221,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
}
for_each_tracing_cpu(cpu) {
- if (iter->buffer_iter[cpu]) {
- if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+ buf_iter = trace_buffer_iter(iter, cpu);
+ if (buf_iter) {
+ if (!ring_buffer_iter_empty(buf_iter))
return 0;
} else {
if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2381,6 +2388,11 @@ __tracing_open(struct inode *inode, struct file *file)
if (!iter)
return ERR_PTR(-ENOMEM);
+ iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
+ GFP_KERNEL);
+ if (!iter->buffer_iter)
+ goto release;
+
/*
* We make a copy of the current tracer to avoid concurrent
* changes on it while we are reading.
@@ -2441,6 +2453,8 @@ __tracing_open(struct inode *inode, struct file *file)
fail:
mutex_unlock(&trace_types_lock);
kfree(iter->trace);
+ kfree(iter->buffer_iter);
+release:
seq_release_private(inode, file);
return ERR_PTR(-ENOMEM);
}
@@ -2481,6 +2495,7 @@ static int tracing_release(struct inode *inode, struct file *file)
mutex_destroy(&iter->mutex);
free_cpumask_var(iter->started);
kfree(iter->trace);
+ kfree(iter->buffer_iter);
seq_release_private(inode, file);
return 0;
}
@@ -3609,6 +3624,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
.pages = pages_def,
.partial = partial_def,
.nr_pages = 0, /* This gets updated below. */
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &tracing_pipe_buf_ops,
.spd_release = tracing_spd_release_pipe,
@@ -3680,7 +3696,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
ret = splice_to_pipe(pipe, &spd);
out:
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
return ret;
out_err:
@@ -4231,6 +4247,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
struct splice_pipe_desc spd = {
.pages = pages_def,
.partial = partial_def,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &buffer_pipe_buf_ops,
.spd_release = buffer_spd_release,
@@ -4318,7 +4335,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
}
ret = splice_to_pipe(pipe, &spd);
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
out:
return ret;
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 5aec220d2de0..55e1f7f0db12 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -317,6 +317,14 @@ struct tracer {
#define TRACE_PIPE_ALL_CPU -1
+static inline struct ring_buffer_iter *
+trace_buffer_iter(struct trace_iterator *iter, int cpu)
+{
+ if (iter->buffer_iter && iter->buffer_iter[cpu])
+ return iter->buffer_iter[cpu];
+ return NULL;
+}
+
int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void);
void trace_wake_up(void);
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a7d2a4c653d8..ce27c8ba8d31 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
next = &data->ret;
} else {
- ring_iter = iter->buffer_iter[iter->cpu];
+ ring_iter = trace_buffer_iter(iter, iter->cpu);
/* First peek to compare current entry and the next one */
if (ring_iter)
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index df611a0e76c5..123b189c732c 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1325,4 +1325,4 @@ __init static int init_events(void)
return 0;
}
-device_initcall(init_events);
+early_initcall(init_events);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9a3128dc67df..692d97628a10 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -45,32 +45,41 @@
#include "workqueue_sched.h"
enum {
- /* global_cwq flags */
- GCWQ_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
- GCWQ_MANAGING_WORKERS = 1 << 1, /* managing workers */
- GCWQ_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */
- GCWQ_FREEZING = 1 << 3, /* freeze in progress */
- GCWQ_HIGHPRI_PENDING = 1 << 4, /* highpri works on queue */
+ /*
+ * global_cwq flags
+ *
+ * A bound gcwq is either associated or disassociated with its CPU.
+ * While associated (!DISASSOCIATED), all workers are bound to the
+ * CPU and none has %WORKER_UNBOUND set and concurrency management
+ * is in effect.
+ *
+ * While DISASSOCIATED, the cpu may be offline and all workers have
+ * %WORKER_UNBOUND set and concurrency management disabled, and may
+ * be executing on any CPU. The gcwq behaves as an unbound one.
+ *
+ * Note that DISASSOCIATED can be flipped only while holding
+ * managership of all pools on the gcwq to avoid changing binding
+ * state while create_worker() is in progress.
+ */
+ GCWQ_DISASSOCIATED = 1 << 0, /* cpu can't serve workers */
+ GCWQ_FREEZING = 1 << 1, /* freeze in progress */
+
+ /* pool flags */
+ POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */
/* worker flags */
WORKER_STARTED = 1 << 0, /* started */
WORKER_DIE = 1 << 1, /* die die die */
WORKER_IDLE = 1 << 2, /* is idle */
WORKER_PREP = 1 << 3, /* preparing to run works */
- WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
WORKER_REBIND = 1 << 5, /* mom is home, come back */
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
WORKER_UNBOUND = 1 << 7, /* worker is unbound */
- WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
- WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
+ WORKER_NOT_RUNNING = WORKER_PREP | WORKER_REBIND | WORKER_UNBOUND |
+ WORKER_CPU_INTENSIVE,
- /* gcwq->trustee_state */
- TRUSTEE_START = 0, /* start */
- TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
- TRUSTEE_BUTCHER = 2, /* butcher workers */
- TRUSTEE_RELEASE = 3, /* release workers */
- TRUSTEE_DONE = 4, /* trustee is done */
+ NR_WORKER_POOLS = 2, /* # worker pools per gcwq */
BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
@@ -84,13 +93,13 @@ enum {
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */
- TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
/*
* Rescue workers are used only on emergencies and shared by
* all cpus. Give -20.
*/
RESCUER_NICE_LEVEL = -20,
+ HIGHPRI_NICE_LEVEL = -20,
};
/*
@@ -115,6 +124,8 @@ enum {
*/
struct global_cwq;
+struct worker_pool;
+struct idle_rebind;
/*
* The poor guys doing the actual heavy lifting. All on-duty workers
@@ -131,12 +142,31 @@ struct worker {
struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
struct list_head scheduled; /* L: scheduled works */
struct task_struct *task; /* I: worker task */
- struct global_cwq *gcwq; /* I: the associated gcwq */
+ struct worker_pool *pool; /* I: the associated pool */
/* 64 bytes boundary on 64bit, 32 on 32bit */
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
- struct work_struct rebind_work; /* L: rebind worker to cpu */
+
+ /* for rebinding worker to CPU */
+ struct idle_rebind *idle_rebind; /* L: for idle worker */
+ struct work_struct rebind_work; /* L: for busy worker */
+};
+
+struct worker_pool {
+ struct global_cwq *gcwq; /* I: the owning gcwq */
+ unsigned int flags; /* X: flags */
+
+ struct list_head worklist; /* L: list of pending works */
+ int nr_workers; /* L: total number of workers */
+ int nr_idle; /* L: currently idle ones */
+
+ struct list_head idle_list; /* X: list of idle workers */
+ struct timer_list idle_timer; /* L: worker idle timeout */
+ struct timer_list mayday_timer; /* L: SOS timer for workers */
+
+ struct mutex manager_mutex; /* mutex manager should hold */
+ struct ida worker_ida; /* L: for worker IDs */
};
/*
@@ -146,27 +176,16 @@ struct worker {
*/
struct global_cwq {
spinlock_t lock; /* the gcwq lock */
- struct list_head worklist; /* L: list of pending works */
unsigned int cpu; /* I: the associated cpu */
unsigned int flags; /* L: GCWQ_* flags */
- int nr_workers; /* L: total number of workers */
- int nr_idle; /* L: currently idle ones */
-
- /* workers are chained either in the idle_list or busy_hash */
- struct list_head idle_list; /* X: list of idle workers */
+ /* workers are chained either in busy_hash or pool idle_list */
struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
/* L: hash of busy workers */
- struct timer_list idle_timer; /* L: worker idle timeout */
- struct timer_list mayday_timer; /* L: SOS timer for dworkers */
-
- struct ida worker_ida; /* L: for worker IDs */
+ struct worker_pool pools[2]; /* normal and highpri pools */
- struct task_struct *trustee; /* L: for gcwq shutdown */
- unsigned int trustee_state; /* L: trustee state */
- wait_queue_head_t trustee_wait; /* trustee wait */
- struct worker *first_idle; /* L: first idle worker */
+ wait_queue_head_t rebind_hold; /* rebind hold wait */
} ____cacheline_aligned_in_smp;
/*
@@ -175,7 +194,7 @@ struct global_cwq {
* aligned at two's power of the number of flag bits.
*/
struct cpu_workqueue_struct {
- struct global_cwq *gcwq; /* I: the associated gcwq */
+ struct worker_pool *pool; /* I: the associated pool */
struct workqueue_struct *wq; /* I: the owning workqueue */
int work_color; /* L: current color */
int flush_color; /* L: flushing color */
@@ -264,6 +283,10 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
+#define for_each_worker_pool(pool, gcwq) \
+ for ((pool) = &(gcwq)->pools[0]; \
+ (pool) < &(gcwq)->pools[NR_WORKER_POOLS]; (pool)++)
+
#define for_each_busy_worker(worker, i, pos, gcwq) \
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
@@ -444,7 +467,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
* try_to_wake_up(). Put it in a separate cacheline.
*/
static DEFINE_PER_CPU(struct global_cwq, global_cwq);
-static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
+static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
/*
* Global cpu workqueue and nr_running counter for unbound gcwq. The
@@ -452,10 +475,17 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
* workers have WORKER_UNBOUND set.
*/
static struct global_cwq unbound_global_cwq;
-static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */
+static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
+ [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
+};
static int worker_thread(void *__worker);
+static int worker_pool_pri(struct worker_pool *pool)
+{
+ return pool - pool->gcwq->pools;
+}
+
static struct global_cwq *get_gcwq(unsigned int cpu)
{
if (cpu != WORK_CPU_UNBOUND)
@@ -464,12 +494,15 @@ static struct global_cwq *get_gcwq(unsigned int cpu)
return &unbound_global_cwq;
}
-static atomic_t *get_gcwq_nr_running(unsigned int cpu)
+static atomic_t *get_pool_nr_running(struct worker_pool *pool)
{
+ int cpu = pool->gcwq->cpu;
+ int idx = worker_pool_pri(pool);
+
if (cpu != WORK_CPU_UNBOUND)
- return &per_cpu(gcwq_nr_running, cpu);
+ return &per_cpu(pool_nr_running, cpu)[idx];
else
- return &unbound_gcwq_nr_running;
+ return &unbound_pool_nr_running[idx];
}
static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
@@ -555,7 +588,7 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
if (data & WORK_STRUCT_CWQ)
return ((struct cpu_workqueue_struct *)
- (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
+ (data & WORK_STRUCT_WQ_DATA_MASK))->pool->gcwq;
cpu = data >> WORK_STRUCT_FLAG_BITS;
if (cpu == WORK_CPU_NONE)
@@ -566,60 +599,62 @@ static struct global_cwq *get_work_gcwq(struct work_struct *work)
}
/*
- * Policy functions. These define the policies on how the global
- * worker pool is managed. Unless noted otherwise, these functions
- * assume that they're being called with gcwq->lock held.
+ * Policy functions. These define the policies on how the global worker
+ * pools are managed. Unless noted otherwise, these functions assume that
+ * they're being called with gcwq->lock held.
*/
-static bool __need_more_worker(struct global_cwq *gcwq)
+static bool __need_more_worker(struct worker_pool *pool)
{
- return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
- gcwq->flags & GCWQ_HIGHPRI_PENDING;
+ return !atomic_read(get_pool_nr_running(pool));
}
/*
* Need to wake up a worker? Called from anything but currently
* running workers.
+ *
+ * Note that, because unbound workers never contribute to nr_running, this
+ * function will always return %true for unbound gcwq as long as the
+ * worklist isn't empty.
*/
-static bool need_more_worker(struct global_cwq *gcwq)
+static bool need_more_worker(struct worker_pool *pool)
{
- return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
+ return !list_empty(&pool->worklist) && __need_more_worker(pool);
}
/* Can I start working? Called from busy but !running workers. */
-static bool may_start_working(struct global_cwq *gcwq)
+static bool may_start_working(struct worker_pool *pool)
{
- return gcwq->nr_idle;
+ return pool->nr_idle;
}
/* Do I need to keep working? Called from currently running workers. */
-static bool keep_working(struct global_cwq *gcwq)
+static bool keep_working(struct worker_pool *pool)
{
- atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
+ atomic_t *nr_running = get_pool_nr_running(pool);
- return !list_empty(&gcwq->worklist) &&
- (atomic_read(nr_running) <= 1 ||
- gcwq->flags & GCWQ_HIGHPRI_PENDING);
+ return !list_empty(&pool->worklist) && atomic_read(nr_running) <= 1;
}
/* Do we need a new worker? Called from manager. */
-static bool need_to_create_worker(struct global_cwq *gcwq)
+static bool need_to_create_worker(struct worker_pool *pool)
{
- return need_more_worker(gcwq) && !may_start_working(gcwq);
+ return need_more_worker(pool) && !may_start_working(pool);
}
/* Do I need to be the manager? */
-static bool need_to_manage_workers(struct global_cwq *gcwq)
+static bool need_to_manage_workers(struct worker_pool *pool)
{
- return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
+ return need_to_create_worker(pool) ||
+ (pool->flags & POOL_MANAGE_WORKERS);
}
/* Do we have too many workers and should some go away? */
-static bool too_many_workers(struct global_cwq *gcwq)
+static bool too_many_workers(struct worker_pool *pool)
{
- bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
- int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
- int nr_busy = gcwq->nr_workers - nr_idle;
+ bool managing = mutex_is_locked(&pool->manager_mutex);
+ int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
+ int nr_busy = pool->nr_workers - nr_idle;
return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
}
@@ -629,26 +664,26 @@ static bool too_many_workers(struct global_cwq *gcwq)
*/
/* Return the first worker. Safe with preemption disabled */
-static struct worker *first_worker(struct global_cwq *gcwq)
+static struct worker *first_worker(struct worker_pool *pool)
{
- if (unlikely(list_empty(&gcwq->idle_list)))
+ if (unlikely(list_empty(&pool->idle_list)))
return NULL;
- return list_first_entry(&gcwq->idle_list, struct worker, entry);
+ return list_first_entry(&pool->idle_list, struct worker, entry);
}
/**
* wake_up_worker - wake up an idle worker
- * @gcwq: gcwq to wake worker for
+ * @pool: worker pool to wake worker from
*
- * Wake up the first idle worker of @gcwq.
+ * Wake up the first idle worker of @pool.
*
* CONTEXT:
* spin_lock_irq(gcwq->lock).
*/
-static void wake_up_worker(struct global_cwq *gcwq)
+static void wake_up_worker(struct worker_pool *pool)
{
- struct worker *worker = first_worker(gcwq);
+ struct worker *worker = first_worker(pool);
if (likely(worker))
wake_up_process(worker->task);
@@ -670,7 +705,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
struct worker *worker = kthread_data(task);
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(get_gcwq_nr_running(cpu));
+ atomic_inc(get_pool_nr_running(worker->pool));
}
/**
@@ -692,8 +727,8 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
unsigned int cpu)
{
struct worker *worker = kthread_data(task), *to_wakeup = NULL;
- struct global_cwq *gcwq = get_gcwq(cpu);
- atomic_t *nr_running = get_gcwq_nr_running(cpu);
+ struct worker_pool *pool = worker->pool;
+ atomic_t *nr_running = get_pool_nr_running(pool);
if (worker->flags & WORKER_NOT_RUNNING)
return NULL;
@@ -706,14 +741,14 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
* worklist not empty test sequence is in insert_work().
* Please read comment there.
*
- * NOT_RUNNING is clear. This means that trustee is not in
- * charge and we're running on the local cpu w/ rq lock held
- * and preemption disabled, which in turn means that none else
- * could be manipulating idle_list, so dereferencing idle_list
- * without gcwq lock is safe.
+ * NOT_RUNNING is clear. This means that we're bound to and
+ * running on the local cpu w/ rq lock held and preemption
+ * disabled, which in turn means that none else could be
+ * manipulating idle_list, so dereferencing idle_list without gcwq
+ * lock is safe.
*/
- if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
- to_wakeup = first_worker(gcwq);
+ if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist))
+ to_wakeup = first_worker(pool);
return to_wakeup ? to_wakeup->task : NULL;
}
@@ -733,7 +768,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
static inline void worker_set_flags(struct worker *worker, unsigned int flags,
bool wakeup)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
WARN_ON_ONCE(worker->task != current);
@@ -744,12 +779,12 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
*/
if ((flags & WORKER_NOT_RUNNING) &&
!(worker->flags & WORKER_NOT_RUNNING)) {
- atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
+ atomic_t *nr_running = get_pool_nr_running(pool);
if (wakeup) {
if (atomic_dec_and_test(nr_running) &&
- !list_empty(&gcwq->worklist))
- wake_up_worker(gcwq);
+ !list_empty(&pool->worklist))
+ wake_up_worker(pool);
} else
atomic_dec(nr_running);
}
@@ -769,7 +804,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
*/
static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
unsigned int oflags = worker->flags;
WARN_ON_ONCE(worker->task != current);
@@ -783,7 +818,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
*/
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
if (!(worker->flags & WORKER_NOT_RUNNING))
- atomic_inc(get_gcwq_nr_running(gcwq->cpu));
+ atomic_inc(get_pool_nr_running(pool));
}
/**
@@ -867,43 +902,6 @@ static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
}
/**
- * gcwq_determine_ins_pos - find insertion position
- * @gcwq: gcwq of interest
- * @cwq: cwq a work is being queued for
- *
- * A work for @cwq is about to be queued on @gcwq, determine insertion
- * position for the work. If @cwq is for HIGHPRI wq, the work is
- * queued at the head of the queue but in FIFO order with respect to
- * other HIGHPRI works; otherwise, at the end of the queue. This
- * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
- * there are HIGHPRI works pending.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
- *
- * RETURNS:
- * Pointer to inserstion position.
- */
-static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
- struct cpu_workqueue_struct *cwq)
-{
- struct work_struct *twork;
-
- if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
- return &gcwq->worklist;
-
- list_for_each_entry(twork, &gcwq->worklist, entry) {
- struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
-
- if (!(tcwq->wq->flags & WQ_HIGHPRI))
- break;
- }
-
- gcwq->flags |= GCWQ_HIGHPRI_PENDING;
- return &twork->entry;
-}
-
-/**
* insert_work - insert a work into gcwq
* @cwq: cwq @work belongs to
* @work: work to insert
@@ -920,7 +918,7 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
{
- struct global_cwq *gcwq = cwq->gcwq;
+ struct worker_pool *pool = cwq->pool;
/* we own @work, set data and link */
set_work_cwq(work, cwq, extra_flags);
@@ -940,8 +938,8 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
*/
smp_mb();
- if (__need_more_worker(gcwq))
- wake_up_worker(gcwq);
+ if (__need_more_worker(pool))
+ wake_up_worker(pool);
}
/*
@@ -1043,7 +1041,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
if (likely(cwq->nr_active < cwq->max_active)) {
trace_workqueue_activate_work(work);
cwq->nr_active++;
- worklist = gcwq_determine_ins_pos(gcwq, cwq);
+ worklist = &cwq->pool->worklist;
} else {
work_flags |= WORK_STRUCT_DELAYED;
worklist = &cwq->delayed_works;
@@ -1192,7 +1190,8 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
*/
static void worker_enter_idle(struct worker *worker)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
+ struct global_cwq *gcwq = pool->gcwq;
BUG_ON(worker->flags & WORKER_IDLE);
BUG_ON(!list_empty(&worker->entry) &&
@@ -1200,27 +1199,24 @@ static void worker_enter_idle(struct worker *worker)
/* can't use worker_set_flags(), also called from start_worker() */
worker->flags |= WORKER_IDLE;
- gcwq->nr_idle++;
+ pool->nr_idle++;
worker->last_active = jiffies;
/* idle_list is LIFO */
- list_add(&worker->entry, &gcwq->idle_list);
+ list_add(&worker->entry, &pool->idle_list);
- if (likely(!(worker->flags & WORKER_ROGUE))) {
- if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
- mod_timer(&gcwq->idle_timer,
- jiffies + IDLE_WORKER_TIMEOUT);
- } else
- wake_up_all(&gcwq->trustee_wait);
+ if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
+ mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
/*
- * Sanity check nr_running. Because trustee releases gcwq->lock
- * between setting %WORKER_ROGUE and zapping nr_running, the
- * warning may trigger spuriously. Check iff trustee is idle.
+ * Sanity check nr_running. Because gcwq_unbind_fn() releases
+ * gcwq->lock between setting %WORKER_UNBOUND and zapping
+ * nr_running, the warning may trigger spuriously. Check iff
+ * unbind is not in progress.
*/
- WARN_ON_ONCE(gcwq->trustee_state == TRUSTEE_DONE &&
- gcwq->nr_workers == gcwq->nr_idle &&
- atomic_read(get_gcwq_nr_running(gcwq->cpu)));
+ WARN_ON_ONCE(!(gcwq->flags & GCWQ_DISASSOCIATED) &&
+ pool->nr_workers == pool->nr_idle &&
+ atomic_read(get_pool_nr_running(pool)));
}
/**
@@ -1234,11 +1230,11 @@ static void worker_enter_idle(struct worker *worker)
*/
static void worker_leave_idle(struct worker *worker)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
BUG_ON(!(worker->flags & WORKER_IDLE));
worker_clr_flags(worker, WORKER_IDLE);
- gcwq->nr_idle--;
+ pool->nr_idle--;
list_del_init(&worker->entry);
}
@@ -1258,11 +1254,11 @@ static void worker_leave_idle(struct worker *worker)
* verbatim as it's best effort and blocking and gcwq may be
* [dis]associated in the meantime.
*
- * This function tries set_cpus_allowed() and locks gcwq and verifies
- * the binding against GCWQ_DISASSOCIATED which is set during
- * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
- * idle state or fetches works without dropping lock, it can guarantee
- * the scheduling requirement described in the first paragraph.
+ * This function tries set_cpus_allowed() and locks gcwq and verifies the
+ * binding against %GCWQ_DISASSOCIATED which is set during
+ * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
+ * enters idle state or fetches works without dropping lock, it can
+ * guarantee the scheduling requirement described in the first paragraph.
*
* CONTEXT:
* Might sleep. Called without any lock but returns with gcwq->lock
@@ -1275,7 +1271,7 @@ static void worker_leave_idle(struct worker *worker)
static bool worker_maybe_bind_and_lock(struct worker *worker)
__acquires(&gcwq->lock)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct global_cwq *gcwq = worker->pool->gcwq;
struct task_struct *task = worker->task;
while (true) {
@@ -1308,16 +1304,40 @@ __acquires(&gcwq->lock)
}
}
+struct idle_rebind {
+ int cnt; /* # workers to be rebound */
+ struct completion done; /* all workers rebound */
+};
+
+/*
+ * Rebind an idle @worker to its CPU. During CPU onlining, this has to
+ * happen synchronously for idle workers. worker_thread() will test
+ * %WORKER_REBIND before leaving idle and call this function.
+ */
+static void idle_worker_rebind(struct worker *worker)
+{
+ struct global_cwq *gcwq = worker->pool->gcwq;
+
+ /* CPU must be online at this point */
+ WARN_ON(!worker_maybe_bind_and_lock(worker));
+ if (!--worker->idle_rebind->cnt)
+ complete(&worker->idle_rebind->done);
+ spin_unlock_irq(&worker->pool->gcwq->lock);
+
+ /* we did our part, wait for rebind_workers() to finish up */
+ wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
+}
+
/*
- * Function for worker->rebind_work used to rebind rogue busy workers
- * to the associated cpu which is coming back online. This is
- * scheduled by cpu up but can race with other cpu hotplug operations
- * and may be executed twice without intervening cpu down.
+ * Function for @worker->rebind.work used to rebind unbound busy workers to
+ * the associated cpu which is coming back online. This is scheduled by
+ * cpu up but can race with other cpu hotplug operations and may be
+ * executed twice without intervening cpu down.
*/
-static void worker_rebind_fn(struct work_struct *work)
+static void busy_worker_rebind_fn(struct work_struct *work)
{
struct worker *worker = container_of(work, struct worker, rebind_work);
- struct global_cwq *gcwq = worker->gcwq;
+ struct global_cwq *gcwq = worker->pool->gcwq;
if (worker_maybe_bind_and_lock(worker))
worker_clr_flags(worker, WORKER_REBIND);
@@ -1325,6 +1345,112 @@ static void worker_rebind_fn(struct work_struct *work)
spin_unlock_irq(&gcwq->lock);
}
+/**
+ * rebind_workers - rebind all workers of a gcwq to the associated CPU
+ * @gcwq: gcwq of interest
+ *
+ * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding
+ * is different for idle and busy ones.
+ *
+ * The idle ones should be rebound synchronously and idle rebinding should
+ * be complete before any worker starts executing work items with
+ * concurrency management enabled; otherwise, scheduler may oops trying to
+ * wake up non-local idle worker from wq_worker_sleeping().
+ *
+ * This is achieved by repeatedly requesting rebinding until all idle
+ * workers are known to have been rebound under @gcwq->lock and holding all
+ * idle workers from becoming busy until idle rebinding is complete.
+ *
+ * Once idle workers are rebound, busy workers can be rebound as they
+ * finish executing their current work items. Queueing the rebind work at
+ * the head of their scheduled lists is enough. Note that nr_running will
+ * be properbly bumped as busy workers rebind.
+ *
+ * On return, all workers are guaranteed to either be bound or have rebind
+ * work item scheduled.
+ */
+static void rebind_workers(struct global_cwq *gcwq)
+ __releases(&gcwq->lock) __acquires(&gcwq->lock)
+{
+ struct idle_rebind idle_rebind;
+ struct worker_pool *pool;
+ struct worker *worker;
+ struct hlist_node *pos;
+ int i;
+
+ lockdep_assert_held(&gcwq->lock);
+
+ for_each_worker_pool(pool, gcwq)
+ lockdep_assert_held(&pool->manager_mutex);
+
+ /*
+ * Rebind idle workers. Interlocked both ways. We wait for
+ * workers to rebind via @idle_rebind.done. Workers will wait for
+ * us to finish up by watching %WORKER_REBIND.
+ */
+ init_completion(&idle_rebind.done);
+retry:
+ idle_rebind.cnt = 1;
+ INIT_COMPLETION(idle_rebind.done);
+
+ /* set REBIND and kick idle ones, we'll wait for these later */
+ for_each_worker_pool(pool, gcwq) {
+ list_for_each_entry(worker, &pool->idle_list, entry) {
+ if (worker->flags & WORKER_REBIND)
+ continue;
+
+ /* morph UNBOUND to REBIND */
+ worker->flags &= ~WORKER_UNBOUND;
+ worker->flags |= WORKER_REBIND;
+
+ idle_rebind.cnt++;
+ worker->idle_rebind = &idle_rebind;
+
+ /* worker_thread() will call idle_worker_rebind() */
+ wake_up_process(worker->task);
+ }
+ }
+
+ if (--idle_rebind.cnt) {
+ spin_unlock_irq(&gcwq->lock);
+ wait_for_completion(&idle_rebind.done);
+ spin_lock_irq(&gcwq->lock);
+ /* busy ones might have become idle while waiting, retry */
+ goto retry;
+ }
+
+ /*
+ * All idle workers are rebound and waiting for %WORKER_REBIND to
+ * be cleared inside idle_worker_rebind(). Clear and release.
+ * Clearing %WORKER_REBIND from this foreign context is safe
+ * because these workers are still guaranteed to be idle.
+ */
+ for_each_worker_pool(pool, gcwq)
+ list_for_each_entry(worker, &pool->idle_list, entry)
+ worker->flags &= ~WORKER_REBIND;
+
+ wake_up_all(&gcwq->rebind_hold);
+
+ /* rebind busy workers */
+ for_each_busy_worker(worker, i, pos, gcwq) {
+ struct work_struct *rebind_work = &worker->rebind_work;
+
+ /* morph UNBOUND to REBIND */
+ worker->flags &= ~WORKER_UNBOUND;
+ worker->flags |= WORKER_REBIND;
+
+ if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
+ work_data_bits(rebind_work)))
+ continue;
+
+ /* wq doesn't matter, use the default one */
+ debug_work_activate(rebind_work);
+ insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
+ worker->scheduled.next,
+ work_color_to_flags(WORK_NO_COLOR));
+ }
+}
+
static struct worker *alloc_worker(void)
{
struct worker *worker;
@@ -1333,7 +1459,7 @@ static struct worker *alloc_worker(void)
if (worker) {
INIT_LIST_HEAD(&worker->entry);
INIT_LIST_HEAD(&worker->scheduled);
- INIT_WORK(&worker->rebind_work, worker_rebind_fn);
+ INIT_WORK(&worker->rebind_work, busy_worker_rebind_fn);
/* on creation a worker is in !idle && prep state */
worker->flags = WORKER_PREP;
}
@@ -1342,10 +1468,9 @@ static struct worker *alloc_worker(void)
/**
* create_worker - create a new workqueue worker
- * @gcwq: gcwq the new worker will belong to
- * @bind: whether to set affinity to @cpu or not
+ * @pool: pool the new worker will belong to
*
- * Create a new worker which is bound to @gcwq. The returned worker
+ * Create a new worker which is bound to @pool. The returned worker
* can be started by calling start_worker() or destroyed using
* destroy_worker().
*
@@ -1355,16 +1480,17 @@ static struct worker *alloc_worker(void)
* RETURNS:
* Pointer to the newly created worker.
*/
-static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
+static struct worker *create_worker(struct worker_pool *pool)
{
- bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
+ struct global_cwq *gcwq = pool->gcwq;
+ const char *pri = worker_pool_pri(pool) ? "H" : "";
struct worker *worker = NULL;
int id = -1;
spin_lock_irq(&gcwq->lock);
- while (ida_get_new(&gcwq->worker_ida, &id)) {
+ while (ida_get_new(&pool->worker_ida, &id)) {
spin_unlock_irq(&gcwq->lock);
- if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
+ if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL))
goto fail;
spin_lock_irq(&gcwq->lock);
}
@@ -1374,38 +1500,43 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
if (!worker)
goto fail;
- worker->gcwq = gcwq;
+ worker->pool = pool;
worker->id = id;
- if (!on_unbound_cpu)
+ if (gcwq->cpu != WORK_CPU_UNBOUND)
worker->task = kthread_create_on_node(worker_thread,
- worker,
- cpu_to_node(gcwq->cpu),
- "kworker/%u:%d", gcwq->cpu, id);
+ worker, cpu_to_node(gcwq->cpu),
+ "kworker/%u:%d%s", gcwq->cpu, id, pri);
else
worker->task = kthread_create(worker_thread, worker,
- "kworker/u:%d", id);
+ "kworker/u:%d%s", id, pri);
if (IS_ERR(worker->task))
goto fail;
+ if (worker_pool_pri(pool))
+ set_user_nice(worker->task, HIGHPRI_NICE_LEVEL);
+
/*
- * A rogue worker will become a regular one if CPU comes
- * online later on. Make sure every worker has
- * PF_THREAD_BOUND set.
+ * Determine CPU binding of the new worker depending on
+ * %GCWQ_DISASSOCIATED. The caller is responsible for ensuring the
+ * flag remains stable across this function. See the comments
+ * above the flag definition for details.
+ *
+ * As an unbound worker may later become a regular one if CPU comes
+ * online, make sure every worker has %PF_THREAD_BOUND set.
*/
- if (bind && !on_unbound_cpu)
+ if (!(gcwq->flags & GCWQ_DISASSOCIATED)) {
kthread_bind(worker->task, gcwq->cpu);
- else {
+ } else {
worker->task->flags |= PF_THREAD_BOUND;
- if (on_unbound_cpu)
- worker->flags |= WORKER_UNBOUND;
+ worker->flags |= WORKER_UNBOUND;
}
return worker;
fail:
if (id >= 0) {
spin_lock_irq(&gcwq->lock);
- ida_remove(&gcwq->worker_ida, id);
+ ida_remove(&pool->worker_ida, id);
spin_unlock_irq(&gcwq->lock);
}
kfree(worker);
@@ -1424,7 +1555,7 @@ fail:
static void start_worker(struct worker *worker)
{
worker->flags |= WORKER_STARTED;
- worker->gcwq->nr_workers++;
+ worker->pool->nr_workers++;
worker_enter_idle(worker);
wake_up_process(worker->task);
}
@@ -1440,7 +1571,8 @@ static void start_worker(struct worker *worker)
*/
static void destroy_worker(struct worker *worker)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
+ struct global_cwq *gcwq = pool->gcwq;
int id = worker->id;
/* sanity check frenzy */
@@ -1448,9 +1580,9 @@ static void destroy_worker(struct worker *worker)
BUG_ON(!list_empty(&worker->scheduled));
if (worker->flags & WORKER_STARTED)
- gcwq->nr_workers--;
+ pool->nr_workers--;
if (worker->flags & WORKER_IDLE)
- gcwq->nr_idle--;
+ pool->nr_idle--;
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
@@ -1461,29 +1593,30 @@ static void destroy_worker(struct worker *worker)
kfree(worker);
spin_lock_irq(&gcwq->lock);
- ida_remove(&gcwq->worker_ida, id);
+ ida_remove(&pool->worker_ida, id);
}
-static void idle_worker_timeout(unsigned long __gcwq)
+static void idle_worker_timeout(unsigned long __pool)
{
- struct global_cwq *gcwq = (void *)__gcwq;
+ struct worker_pool *pool = (void *)__pool;
+ struct global_cwq *gcwq = pool->gcwq;
spin_lock_irq(&gcwq->lock);
- if (too_many_workers(gcwq)) {
+ if (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
/* idle_list is kept in LIFO order, check the last one */
- worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
+ worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires))
- mod_timer(&gcwq->idle_timer, expires);
+ mod_timer(&pool->idle_timer, expires);
else {
/* it's been idle for too long, wake up manager */
- gcwq->flags |= GCWQ_MANAGE_WORKERS;
- wake_up_worker(gcwq);
+ pool->flags |= POOL_MANAGE_WORKERS;
+ wake_up_worker(pool);
}
}
@@ -1500,7 +1633,7 @@ static bool send_mayday(struct work_struct *work)
return false;
/* mayday mayday mayday */
- cpu = cwq->gcwq->cpu;
+ cpu = cwq->pool->gcwq->cpu;
/* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
if (cpu == WORK_CPU_UNBOUND)
cpu = 0;
@@ -1509,37 +1642,38 @@ static bool send_mayday(struct work_struct *work)
return true;
}
-static void gcwq_mayday_timeout(unsigned long __gcwq)
+static void gcwq_mayday_timeout(unsigned long __pool)
{
- struct global_cwq *gcwq = (void *)__gcwq;
+ struct worker_pool *pool = (void *)__pool;
+ struct global_cwq *gcwq = pool->gcwq;
struct work_struct *work;
spin_lock_irq(&gcwq->lock);
- if (need_to_create_worker(gcwq)) {
+ if (need_to_create_worker(pool)) {
/*
* We've been trying to create a new worker but
* haven't been successful. We might be hitting an
* allocation deadlock. Send distress signals to
* rescuers.
*/
- list_for_each_entry(work, &gcwq->worklist, entry)
+ list_for_each_entry(work, &pool->worklist, entry)
send_mayday(work);
}
spin_unlock_irq(&gcwq->lock);
- mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
}
/**
* maybe_create_worker - create a new worker if necessary
- * @gcwq: gcwq to create a new worker for
+ * @pool: pool to create a new worker for
*
- * Create a new worker for @gcwq if necessary. @gcwq is guaranteed to
+ * Create a new worker for @pool if necessary. @pool is guaranteed to
* have at least one idle worker on return from this function. If
* creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
- * sent to all rescuers with works scheduled on @gcwq to resolve
+ * sent to all rescuers with works scheduled on @pool to resolve
* possible allocation deadlock.
*
* On return, need_to_create_worker() is guaranteed to be false and
@@ -1554,52 +1688,54 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
-static bool maybe_create_worker(struct global_cwq *gcwq)
+static bool maybe_create_worker(struct worker_pool *pool)
__releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
- if (!need_to_create_worker(gcwq))
+ struct global_cwq *gcwq = pool->gcwq;
+
+ if (!need_to_create_worker(pool))
return false;
restart:
spin_unlock_irq(&gcwq->lock);
/* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
- mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
+ mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
while (true) {
struct worker *worker;
- worker = create_worker(gcwq, true);
+ worker = create_worker(pool);
if (worker) {
- del_timer_sync(&gcwq->mayday_timer);
+ del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&gcwq->lock);
start_worker(worker);
- BUG_ON(need_to_create_worker(gcwq));
+ BUG_ON(need_to_create_worker(pool));
return true;
}
- if (!need_to_create_worker(gcwq))
+ if (!need_to_create_worker(pool))
break;
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(CREATE_COOLDOWN);
- if (!need_to_create_worker(gcwq))
+ if (!need_to_create_worker(pool))
break;
}
- del_timer_sync(&gcwq->mayday_timer);
+ del_timer_sync(&pool->mayday_timer);
spin_lock_irq(&gcwq->lock);
- if (need_to_create_worker(gcwq))
+ if (need_to_create_worker(pool))
goto restart;
return true;
}
/**
* maybe_destroy_worker - destroy workers which have been idle for a while
- * @gcwq: gcwq to destroy workers for
+ * @pool: pool to destroy workers for
*
- * Destroy @gcwq workers which have been idle for longer than
+ * Destroy @pool workers which have been idle for longer than
* IDLE_WORKER_TIMEOUT.
*
* LOCKING:
@@ -1610,19 +1746,19 @@ restart:
* false if no action was taken and gcwq->lock stayed locked, true
* otherwise.
*/
-static bool maybe_destroy_workers(struct global_cwq *gcwq)
+static bool maybe_destroy_workers(struct worker_pool *pool)
{
bool ret = false;
- while (too_many_workers(gcwq)) {
+ while (too_many_workers(pool)) {
struct worker *worker;
unsigned long expires;
- worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
+ worker = list_entry(pool->idle_list.prev, struct worker, entry);
expires = worker->last_active + IDLE_WORKER_TIMEOUT;
if (time_before(jiffies, expires)) {
- mod_timer(&gcwq->idle_timer, expires);
+ mod_timer(&pool->idle_timer, expires);
break;
}
@@ -1655,31 +1791,22 @@ static bool maybe_destroy_workers(struct global_cwq *gcwq)
*/
static bool manage_workers(struct worker *worker)
{
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
bool ret = false;
- if (gcwq->flags & GCWQ_MANAGING_WORKERS)
+ if (!mutex_trylock(&pool->manager_mutex))
return ret;
- gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
+ pool->flags &= ~POOL_MANAGE_WORKERS;
/*
* Destroy and then create so that may_start_working() is true
* on return.
*/
- ret |= maybe_destroy_workers(gcwq);
- ret |= maybe_create_worker(gcwq);
-
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
-
- /*
- * The trustee might be waiting to take over the manager
- * position, tell it we're done.
- */
- if (unlikely(gcwq->trustee))
- wake_up_all(&gcwq->trustee_wait);
+ ret |= maybe_destroy_workers(pool);
+ ret |= maybe_create_worker(pool);
+ mutex_unlock(&pool->manager_mutex);
return ret;
}
@@ -1728,10 +1855,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
{
struct work_struct *work = list_first_entry(&cwq->delayed_works,
struct work_struct, entry);
- struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
trace_workqueue_activate_work(work);
- move_linked_works(work, pos, NULL);
+ move_linked_works(work, &cwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
cwq->nr_active++;
}
@@ -1804,7 +1930,8 @@ __releases(&gcwq->lock)
__acquires(&gcwq->lock)
{
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
- struct global_cwq *gcwq = cwq->gcwq;
+ struct worker_pool *pool = worker->pool;
+ struct global_cwq *gcwq = pool->gcwq;
struct hlist_head *bwh = busy_worker_head(gcwq, work);
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
work_func_t f = work->func;
@@ -1823,6 +1950,15 @@ __acquires(&gcwq->lock)
lockdep_copy_map(&lockdep_map, &work->lockdep_map);
#endif
/*
+ * Ensure we're on the correct CPU. DISASSOCIATED test is
+ * necessary to avoid spurious warnings from rescuers servicing the
+ * unbound or a disassociated gcwq.
+ */
+ WARN_ON_ONCE(!(worker->flags & (WORKER_UNBOUND | WORKER_REBIND)) &&
+ !(gcwq->flags & GCWQ_DISASSOCIATED) &&
+ raw_smp_processor_id() != gcwq->cpu);
+
+ /*
* A single work shouldn't be executed concurrently by
* multiple workers on a single cpu. Check whether anyone is
* already processing the work. If so, defer the work to the
@@ -1846,27 +1982,19 @@ __acquires(&gcwq->lock)
list_del_init(&work->entry);
/*
- * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
- * wake up another worker; otherwise, clear HIGHPRI_PENDING.
- */
- if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
- struct work_struct *nwork = list_first_entry(&gcwq->worklist,
- struct work_struct, entry);
-
- if (!list_empty(&gcwq->worklist) &&
- get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
- wake_up_worker(gcwq);
- else
- gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
- }
-
- /*
* CPU intensive works don't participate in concurrency
* management. They're the scheduler's responsibility.
*/
if (unlikely(cpu_intensive))
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
+ /*
+ * Unbound gcwq isn't concurrency managed and work items should be
+ * executed ASAP. Wake up another worker if necessary.
+ */
+ if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
+ wake_up_worker(pool);
+
spin_unlock_irq(&gcwq->lock);
work_clear_pending(work);
@@ -1939,28 +2067,38 @@ static void process_scheduled_works(struct worker *worker)
static int worker_thread(void *__worker)
{
struct worker *worker = __worker;
- struct global_cwq *gcwq = worker->gcwq;
+ struct worker_pool *pool = worker->pool;
+ struct global_cwq *gcwq = pool->gcwq;
/* tell the scheduler that this is a workqueue worker */
worker->task->flags |= PF_WQ_WORKER;
woke_up:
spin_lock_irq(&gcwq->lock);
- /* DIE can be set only while we're idle, checking here is enough */
- if (worker->flags & WORKER_DIE) {
+ /*
+ * DIE can be set only while idle and REBIND set while busy has
+ * @worker->rebind_work scheduled. Checking here is enough.
+ */
+ if (unlikely(worker->flags & (WORKER_REBIND | WORKER_DIE))) {
spin_unlock_irq(&gcwq->lock);
- worker->task->flags &= ~PF_WQ_WORKER;
- return 0;
+
+ if (worker->flags & WORKER_DIE) {
+ worker->task->flags &= ~PF_WQ_WORKER;
+ return 0;
+ }
+
+ idle_worker_rebind(worker);
+ goto woke_up;
}
worker_leave_idle(worker);
recheck:
/* no more worker necessary? */
- if (!need_more_worker(gcwq))
+ if (!need_more_worker(pool))
goto sleep;
/* do we need to manage? */
- if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
+ if (unlikely(!may_start_working(pool)) && manage_workers(worker))
goto recheck;
/*
@@ -1979,7 +2117,7 @@ recheck:
do {
struct work_struct *work =
- list_first_entry(&gcwq->worklist,
+ list_first_entry(&pool->worklist,
struct work_struct, entry);
if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
@@ -1991,11 +2129,11 @@ recheck:
move_linked_works(work, &worker->scheduled, NULL);
process_scheduled_works(worker);
}
- } while (keep_working(gcwq));
+ } while (keep_working(pool));
worker_set_flags(worker, WORKER_PREP, false);
sleep:
- if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
+ if (unlikely(need_to_manage_workers(pool)) && manage_workers(worker))
goto recheck;
/*
@@ -2053,14 +2191,15 @@ repeat:
for_each_mayday_cpu(cpu, wq->mayday_mask) {
unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
- struct global_cwq *gcwq = cwq->gcwq;
+ struct worker_pool *pool = cwq->pool;
+ struct global_cwq *gcwq = pool->gcwq;
struct work_struct *work, *n;
__set_current_state(TASK_RUNNING);
mayday_clear_cpu(cpu, wq->mayday_mask);
/* migrate to the target cpu if possible */
- rescuer->gcwq = gcwq;
+ rescuer->pool = pool;
worker_maybe_bind_and_lock(rescuer);
/*
@@ -2068,7 +2207,7 @@ repeat:
* process'em.
*/
BUG_ON(!list_empty(&rescuer->scheduled));
- list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
+ list_for_each_entry_safe(work, n, &pool->worklist, entry)
if (get_work_cwq(work) == cwq)
move_linked_works(work, scheduled, &n);
@@ -2079,8 +2218,8 @@ repeat:
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
*/
- if (keep_working(gcwq))
- wake_up_worker(gcwq);
+ if (keep_working(pool))
+ wake_up_worker(pool);
spin_unlock_irq(&gcwq->lock);
}
@@ -2205,7 +2344,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
- struct global_cwq *gcwq = cwq->gcwq;
+ struct global_cwq *gcwq = cwq->pool->gcwq;
spin_lock_irq(&gcwq->lock);
@@ -2421,9 +2560,9 @@ reflush:
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
bool drained;
- spin_lock_irq(&cwq->gcwq->lock);
+ spin_lock_irq(&cwq->pool->gcwq->lock);
drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
- spin_unlock_irq(&cwq->gcwq->lock);
+ spin_unlock_irq(&cwq->pool->gcwq->lock);
if (drained)
continue;
@@ -2463,7 +2602,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
*/
smp_rmb();
cwq = get_work_cwq(work);
- if (unlikely(!cwq || gcwq != cwq->gcwq))
+ if (unlikely(!cwq || gcwq != cwq->pool->gcwq))
goto already_gone;
} else if (wait_executing) {
worker = find_worker_executing_work(gcwq, work);
@@ -2984,13 +3123,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
if (flags & WQ_MEM_RECLAIM)
flags |= WQ_RESCUER;
- /*
- * Unbound workqueues aren't concurrency managed and should be
- * dispatched to workers immediately.
- */
- if (flags & WQ_UNBOUND)
- flags |= WQ_HIGHPRI;
-
max_active = max_active ?: WQ_DFL_ACTIVE;
max_active = wq_clamp_max_active(max_active, flags, wq->name);
@@ -3011,9 +3143,10 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
for_each_cwq_cpu(cpu, wq) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
struct global_cwq *gcwq = get_gcwq(cpu);
+ int pool_idx = (bool)(flags & WQ_HIGHPRI);
BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
- cwq->gcwq = gcwq;
+ cwq->pool = &gcwq->pools[pool_idx];
cwq->wq = wq;
cwq->flush_color = -1;
cwq->max_active = max_active;
@@ -3225,369 +3358,143 @@ EXPORT_SYMBOL_GPL(work_busy);
* gcwqs serve mix of short, long and very long running works making
* blocked draining impractical.
*
- * This is solved by allowing a gcwq to be detached from CPU, running
- * it with unbound (rogue) workers and allowing it to be reattached
- * later if the cpu comes back online. A separate thread is created
- * to govern a gcwq in such state and is called the trustee of the
- * gcwq.
- *
- * Trustee states and their descriptions.
- *
- * START Command state used on startup. On CPU_DOWN_PREPARE, a
- * new trustee is started with this state.
- *
- * IN_CHARGE Once started, trustee will enter this state after
- * assuming the manager role and making all existing
- * workers rogue. DOWN_PREPARE waits for trustee to
- * enter this state. After reaching IN_CHARGE, trustee
- * tries to execute the pending worklist until it's empty
- * and the state is set to BUTCHER, or the state is set
- * to RELEASE.
- *
- * BUTCHER Command state which is set by the cpu callback after
- * the cpu has went down. Once this state is set trustee
- * knows that there will be no new works on the worklist
- * and once the worklist is empty it can proceed to
- * killing idle workers.
- *
- * RELEASE Command state which is set by the cpu callback if the
- * cpu down has been canceled or it has come online
- * again. After recognizing this state, trustee stops
- * trying to drain or butcher and clears ROGUE, rebinds
- * all remaining workers back to the cpu and releases
- * manager role.
- *
- * DONE Trustee will enter this state after BUTCHER or RELEASE
- * is complete.
- *
- * trustee CPU draining
- * took over down complete
- * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
- * | | ^
- * | CPU is back online v return workers |
- * ----------------> RELEASE --------------
+ * This is solved by allowing a gcwq to be disassociated from the CPU
+ * running as an unbound one and allowing it to be reattached later if the
+ * cpu comes back online.
*/
-/**
- * trustee_wait_event_timeout - timed event wait for trustee
- * @cond: condition to wait for
- * @timeout: timeout in jiffies
- *
- * wait_event_timeout() for trustee to use. Handles locking and
- * checks for RELEASE request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * Positive indicating left time if @cond is satisfied, 0 if timed
- * out, -1 if canceled.
- */
-#define trustee_wait_event_timeout(cond, timeout) ({ \
- long __ret = (timeout); \
- while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
- __ret) { \
- spin_unlock_irq(&gcwq->lock); \
- __wait_event_timeout(gcwq->trustee_wait, (cond) || \
- (gcwq->trustee_state == TRUSTEE_RELEASE), \
- __ret); \
- spin_lock_irq(&gcwq->lock); \
- } \
- gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
-})
-
-/**
- * trustee_wait_event - event wait for trustee
- * @cond: condition to wait for
- *
- * wait_event() for trustee to use. Automatically handles locking and
- * checks for CANCEL request.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by trustee.
- *
- * RETURNS:
- * 0 if @cond is satisfied, -1 if canceled.
- */
-#define trustee_wait_event(cond) ({ \
- long __ret1; \
- __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
- __ret1 < 0 ? -1 : 0; \
-})
-
-static int __cpuinit trustee_thread(void *__gcwq)
+/* claim manager positions of all pools */
+static void gcwq_claim_management_and_lock(struct global_cwq *gcwq)
{
- struct global_cwq *gcwq = __gcwq;
- struct worker *worker;
- struct work_struct *work;
- struct hlist_node *pos;
- long rc;
- int i;
-
- BUG_ON(gcwq->cpu != smp_processor_id());
+ struct worker_pool *pool;
+ for_each_worker_pool(pool, gcwq)
+ mutex_lock_nested(&pool->manager_mutex, pool - gcwq->pools);
spin_lock_irq(&gcwq->lock);
- /*
- * Claim the manager position and make all workers rogue.
- * Trustee must be bound to the target cpu and can't be
- * cancelled.
- */
- BUG_ON(gcwq->cpu != smp_processor_id());
- rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
- BUG_ON(rc < 0);
-
- gcwq->flags |= GCWQ_MANAGING_WORKERS;
-
- list_for_each_entry(worker, &gcwq->idle_list, entry)
- worker->flags |= WORKER_ROGUE;
+}
- for_each_busy_worker(worker, i, pos, gcwq)
- worker->flags |= WORKER_ROGUE;
+/* release manager positions */
+static void gcwq_release_management_and_unlock(struct global_cwq *gcwq)
+{
+ struct worker_pool *pool;
- /*
- * Call schedule() so that we cross rq->lock and thus can
- * guarantee sched callbacks see the rogue flag. This is
- * necessary as scheduler callbacks may be invoked from other
- * cpus.
- */
spin_unlock_irq(&gcwq->lock);
- schedule();
- spin_lock_irq(&gcwq->lock);
+ for_each_worker_pool(pool, gcwq)
+ mutex_unlock(&pool->manager_mutex);
+}
- /*
- * Sched callbacks are disabled now. Zap nr_running. After
- * this, nr_running stays zero and need_more_worker() and
- * keep_working() are always true as long as the worklist is
- * not empty.
- */
- atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
+static void gcwq_unbind_fn(struct work_struct *work)
+{
+ struct global_cwq *gcwq = get_gcwq(smp_processor_id());
+ struct worker_pool *pool;
+ struct worker *worker;
+ struct hlist_node *pos;
+ int i;
- spin_unlock_irq(&gcwq->lock);
- del_timer_sync(&gcwq->idle_timer);
- spin_lock_irq(&gcwq->lock);
+ BUG_ON(gcwq->cpu != smp_processor_id());
- /*
- * We're now in charge. Notify and proceed to drain. We need
- * to keep the gcwq running during the whole CPU down
- * procedure as other cpu hotunplug callbacks may need to
- * flush currently running tasks.
- */
- gcwq->trustee_state = TRUSTEE_IN_CHARGE;
- wake_up_all(&gcwq->trustee_wait);
+ gcwq_claim_management_and_lock(gcwq);
/*
- * The original cpu is in the process of dying and may go away
- * anytime now. When that happens, we and all workers would
- * be migrated to other cpus. Try draining any left work. We
- * want to get it over with ASAP - spam rescuers, wake up as
- * many idlers as necessary and create new ones till the
- * worklist is empty. Note that if the gcwq is frozen, there
- * may be frozen works in freezable cwqs. Don't declare
- * completion while frozen.
+ * We've claimed all manager positions. Make all workers unbound
+ * and set DISASSOCIATED. Before this, all workers except for the
+ * ones which are still executing works from before the last CPU
+ * down must be on the cpu. After this, they may become diasporas.
*/
- while (gcwq->nr_workers != gcwq->nr_idle ||
- gcwq->flags & GCWQ_FREEZING ||
- gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
- int nr_works = 0;
-
- list_for_each_entry(work, &gcwq->worklist, entry) {
- send_mayday(work);
- nr_works++;
- }
+ for_each_worker_pool(pool, gcwq)
+ list_for_each_entry(worker, &pool->idle_list, entry)
+ worker->flags |= WORKER_UNBOUND;
- list_for_each_entry(worker, &gcwq->idle_list, entry) {
- if (!nr_works--)
- break;
- wake_up_process(worker->task);
- }
+ for_each_busy_worker(worker, i, pos, gcwq)
+ worker->flags |= WORKER_UNBOUND;
- if (need_to_create_worker(gcwq)) {
- spin_unlock_irq(&gcwq->lock);
- worker = create_worker(gcwq, false);
- spin_lock_irq(&gcwq->lock);
- if (worker) {
- worker->flags |= WORKER_ROGUE;
- start_worker(worker);
- }
- }
+ gcwq->flags |= GCWQ_DISASSOCIATED;
- /* give a breather */
- if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
- break;
- }
+ gcwq_release_management_and_unlock(gcwq);
/*
- * Either all works have been scheduled and cpu is down, or
- * cpu down has already been canceled. Wait for and butcher
- * all workers till we're canceled.
+ * Call schedule() so that we cross rq->lock and thus can guarantee
+ * sched callbacks see the %WORKER_UNBOUND flag. This is necessary
+ * as scheduler callbacks may be invoked from other cpus.
*/
- do {
- rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
- while (!list_empty(&gcwq->idle_list))
- destroy_worker(list_first_entry(&gcwq->idle_list,
- struct worker, entry));
- } while (gcwq->nr_workers && rc >= 0);
+ schedule();
/*
- * At this point, either draining has completed and no worker
- * is left, or cpu down has been canceled or the cpu is being
- * brought back up. There shouldn't be any idle one left.
- * Tell the remaining busy ones to rebind once it finishes the
- * currently scheduled works by scheduling the rebind_work.
+ * Sched callbacks are disabled now. Zap nr_running. After this,
+ * nr_running stays zero and need_more_worker() and keep_working()
+ * are always true as long as the worklist is not empty. @gcwq now
+ * behaves as unbound (in terms of concurrency management) gcwq
+ * which is served by workers tied to the CPU.
+ *
+ * On return from this function, the current worker would trigger
+ * unbound chain execution of pending work items if other workers
+ * didn't already.
*/
- WARN_ON(!list_empty(&gcwq->idle_list));
-
- for_each_busy_worker(worker, i, pos, gcwq) {
- struct work_struct *rebind_work = &worker->rebind_work;
-
- /*
- * Rebind_work may race with future cpu hotplug
- * operations. Use a separate flag to mark that
- * rebinding is scheduled.
- */
- worker->flags |= WORKER_REBIND;
- worker->flags &= ~WORKER_ROGUE;
-
- /* queue rebind_work, wq doesn't matter, use the default one */
- if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
- work_data_bits(rebind_work)))
- continue;
-
- debug_work_activate(rebind_work);
- insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
- worker->scheduled.next,
- work_color_to_flags(WORK_NO_COLOR));
- }
-
- /* relinquish manager role */
- gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
-
- /* notify completion */
- gcwq->trustee = NULL;
- gcwq->trustee_state = TRUSTEE_DONE;
- wake_up_all(&gcwq->trustee_wait);
- spin_unlock_irq(&gcwq->lock);
- return 0;
+ for_each_worker_pool(pool, gcwq)
+ atomic_set(get_pool_nr_running(pool), 0);
}
-/**
- * wait_trustee_state - wait for trustee to enter the specified state
- * @gcwq: gcwq the trustee of interest belongs to
- * @state: target state to wait for
- *
- * Wait for the trustee to reach @state. DONE is already matched.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock) which may be released and regrabbed
- * multiple times. To be used by cpu_callback.
+/*
+ * Workqueues should be brought up before normal priority CPU notifiers.
+ * This will be registered high priority CPU notifier.
*/
-static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
-__releases(&gcwq->lock)
-__acquires(&gcwq->lock)
-{
- if (!(gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE)) {
- spin_unlock_irq(&gcwq->lock);
- __wait_event(gcwq->trustee_wait,
- gcwq->trustee_state == state ||
- gcwq->trustee_state == TRUSTEE_DONE);
- spin_lock_irq(&gcwq->lock);
- }
-}
-
-static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct global_cwq *gcwq = get_gcwq(cpu);
- struct task_struct *new_trustee = NULL;
- struct worker *uninitialized_var(new_worker);
- unsigned long flags;
-
- action &= ~CPU_TASKS_FROZEN;
+ struct worker_pool *pool;
- switch (action) {
- case CPU_DOWN_PREPARE:
- new_trustee = kthread_create(trustee_thread, gcwq,
- "workqueue_trustee/%d\n", cpu);
- if (IS_ERR(new_trustee))
- return notifier_from_errno(PTR_ERR(new_trustee));
- kthread_bind(new_trustee, cpu);
- /* fall through */
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- new_worker = create_worker(gcwq, false);
- if (!new_worker) {
- if (new_trustee)
- kthread_stop(new_trustee);
- return NOTIFY_BAD;
- }
- }
-
- /* some are called w/ irq disabled, don't disturb irq status */
- spin_lock_irqsave(&gcwq->lock, flags);
+ for_each_worker_pool(pool, gcwq) {
+ struct worker *worker;
- switch (action) {
- case CPU_DOWN_PREPARE:
- /* initialize trustee and tell it to acquire the gcwq */
- BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
- gcwq->trustee = new_trustee;
- gcwq->trustee_state = TRUSTEE_START;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
- /* fall through */
- case CPU_UP_PREPARE:
- BUG_ON(gcwq->first_idle);
- gcwq->first_idle = new_worker;
- break;
+ if (pool->nr_workers)
+ continue;
- case CPU_DYING:
- /*
- * Before this, the trustee and all workers except for
- * the ones which are still executing works from
- * before the last CPU down must be on the cpu. After
- * this, they'll all be diasporas.
- */
- gcwq->flags |= GCWQ_DISASSOCIATED;
- break;
+ worker = create_worker(pool);
+ if (!worker)
+ return NOTIFY_BAD;
- case CPU_POST_DEAD:
- gcwq->trustee_state = TRUSTEE_BUTCHER;
- /* fall through */
- case CPU_UP_CANCELED:
- destroy_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
+ spin_lock_irq(&gcwq->lock);
+ start_worker(worker);
+ spin_unlock_irq(&gcwq->lock);
+ }
break;
case CPU_DOWN_FAILED:
case CPU_ONLINE:
+ gcwq_claim_management_and_lock(gcwq);
gcwq->flags &= ~GCWQ_DISASSOCIATED;
- if (gcwq->trustee_state != TRUSTEE_DONE) {
- gcwq->trustee_state = TRUSTEE_RELEASE;
- wake_up_process(gcwq->trustee);
- wait_trustee_state(gcwq, TRUSTEE_DONE);
- }
-
- /*
- * Trustee is done and there might be no worker left.
- * Put the first_idle in and request a real manager to
- * take a look.
- */
- spin_unlock_irq(&gcwq->lock);
- kthread_bind(gcwq->first_idle->task, cpu);
- spin_lock_irq(&gcwq->lock);
- gcwq->flags |= GCWQ_MANAGE_WORKERS;
- start_worker(gcwq->first_idle);
- gcwq->first_idle = NULL;
+ rebind_workers(gcwq);
+ gcwq_release_management_and_unlock(gcwq);
break;
}
+ return NOTIFY_OK;
+}
- spin_unlock_irqrestore(&gcwq->lock, flags);
+/*
+ * Workqueues should be brought down after normal priority CPU notifiers.
+ * This will be registered as low priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct work_struct unbind_work;
- return notifier_from_errno(0);
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_PREPARE:
+ /* unbinding should happen on the local CPU */
+ INIT_WORK_ONSTACK(&unbind_work, gcwq_unbind_fn);
+ schedule_work_on(cpu, &unbind_work);
+ flush_work(&unbind_work);
+ break;
+ }
+ return NOTIFY_OK;
}
#ifdef CONFIG_SMP
@@ -3746,6 +3653,7 @@ void thaw_workqueues(void)
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
+ struct worker_pool *pool;
struct workqueue_struct *wq;
spin_lock_irq(&gcwq->lock);
@@ -3767,7 +3675,8 @@ void thaw_workqueues(void)
cwq_activate_first_delayed(cwq);
}
- wake_up_worker(gcwq);
+ for_each_worker_pool(pool, gcwq)
+ wake_up_worker(pool);
spin_unlock_irq(&gcwq->lock);
}
@@ -3783,46 +3692,57 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
+ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
+ struct worker_pool *pool;
spin_lock_init(&gcwq->lock);
- INIT_LIST_HEAD(&gcwq->worklist);
gcwq->cpu = cpu;
gcwq->flags |= GCWQ_DISASSOCIATED;
- INIT_LIST_HEAD(&gcwq->idle_list);
for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
- init_timer_deferrable(&gcwq->idle_timer);
- gcwq->idle_timer.function = idle_worker_timeout;
- gcwq->idle_timer.data = (unsigned long)gcwq;
+ for_each_worker_pool(pool, gcwq) {
+ pool->gcwq = gcwq;
+ INIT_LIST_HEAD(&pool->worklist);
+ INIT_LIST_HEAD(&pool->idle_list);
+
+ init_timer_deferrable(&pool->idle_timer);
+ pool->idle_timer.function = idle_worker_timeout;
+ pool->idle_timer.data = (unsigned long)pool;
- setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
- (unsigned long)gcwq);
+ setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
+ (unsigned long)pool);
- ida_init(&gcwq->worker_ida);
+ mutex_init(&pool->manager_mutex);
+ ida_init(&pool->worker_ida);
+ }
- gcwq->trustee_state = TRUSTEE_DONE;
- init_waitqueue_head(&gcwq->trustee_wait);
+ init_waitqueue_head(&gcwq->rebind_hold);
}
/* create the initial worker */
for_each_online_gcwq_cpu(cpu) {
struct global_cwq *gcwq = get_gcwq(cpu);
- struct worker *worker;
+ struct worker_pool *pool;
if (cpu != WORK_CPU_UNBOUND)
gcwq->flags &= ~GCWQ_DISASSOCIATED;
- worker = create_worker(gcwq, true);
- BUG_ON(!worker);
- spin_lock_irq(&gcwq->lock);
- start_worker(worker);
- spin_unlock_irq(&gcwq->lock);
+
+ for_each_worker_pool(pool, gcwq) {
+ struct worker *worker;
+
+ worker = create_worker(pool);
+ BUG_ON(!worker);
+ spin_lock_irq(&gcwq->lock);
+ start_worker(worker);
+ spin_unlock_irq(&gcwq->lock);
+ }
}
system_wq = alloc_workqueue("events", 0, 0);
diff --git a/lib/Kconfig b/lib/Kconfig
index a9e15403434e..8269d56dcdaa 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -378,14 +378,6 @@ config MPILIB
It is used to implement RSA digital signature verification,
which is used by IMA/EVM digital signature extension.
-config MPILIB_EXTRA
- bool
- depends on MPILIB
- help
- Additional sources of multiprecision maths library from GnuPG.
- This code is unnecessary for RSA digital signature verification,
- but can be compiled if needed.
-
config SIGNATURE
tristate
depends on KEYS && CRYPTO
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ff5bdee4716d..4a186508bf8b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -714,7 +714,7 @@ config STACKTRACE
config DEBUG_STACK_USAGE
bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !IA64 && !PARISC
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
diff --git a/lib/div64.c b/lib/div64.c
index 3ea24907d52e..a163b6caef73 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -87,7 +87,7 @@ EXPORT_SYMBOL(div_s64_rem);
* by the book 'Hacker's Delight'. The original source and full proof
* can be found here and is available for use without restriction.
*
- * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c'
+ * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
*/
#ifndef div64_u64
u64 div64_u64(u64 dividend, u64 divisor)
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 518aea714d21..66ce41489133 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -78,7 +78,7 @@ static LIST_HEAD(free_entries);
static DEFINE_SPINLOCK(free_entries_lock);
/* Global disable flag - will be set in case of an error */
-static bool global_disable __read_mostly;
+static u32 global_disable __read_mostly;
/* Global error count */
static u32 error_count;
@@ -657,7 +657,7 @@ static int dma_debug_fs_init(void)
global_disable_dent = debugfs_create_bool("disabled", 0444,
dma_debug_dent,
- (u32 *)&global_disable);
+ &global_disable);
if (!global_disable_dent)
goto out_err;
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 1a91efa6d121..0401d2916d9f 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -373,13 +373,16 @@ EXPORT_SYMBOL_GPL(add_uevent_var);
static int uevent_net_init(struct net *net)
{
struct uevent_sock *ue_sk;
+ struct netlink_kernel_cfg cfg = {
+ .groups = 1,
+ };
ue_sk = kzalloc(sizeof(*ue_sk), GFP_KERNEL);
if (!ue_sk)
return -ENOMEM;
ue_sk->sk = netlink_kernel_create(net, NETLINK_KOBJECT_UEVENT,
- 1, NULL, NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!ue_sk->sk) {
printk(KERN_ERR
"kobject_uevent: unable to create netlink socket!\n");
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 23a5e031cd8b..c24c2f7e296f 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -87,12 +87,10 @@ void __list_add_rcu(struct list_head *new,
struct list_head *prev, struct list_head *next)
{
WARN(next->prev != prev,
- "list_add_rcu corruption. next->prev should be "
- "prev (%p), but was %p. (next=%p).\n",
+ "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
prev, next->prev, next);
WARN(prev->next != next,
- "list_add_rcu corruption. prev->next should be "
- "next (%p), but was %p. (prev=%p).\n",
+ "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
new->next = next;
new->prev = prev;
diff --git a/lib/mpi/Makefile b/lib/mpi/Makefile
index 567d52e74d77..45ca90a8639c 100644
--- a/lib/mpi/Makefile
+++ b/lib/mpi/Makefile
@@ -19,14 +19,3 @@ mpi-y = \
mpih-mul.o \
mpi-pow.o \
mpiutil.o
-
-mpi-$(CONFIG_MPILIB_EXTRA) += \
- mpi-add.o \
- mpi-div.o \
- mpi-cmp.o \
- mpi-gcd.o \
- mpi-inline.o \
- mpi-inv.o \
- mpi-mpow.o \
- mpi-mul.o \
- mpi-scan.o
diff --git a/lib/mpi/generic_mpi-asm-defs.h b/lib/mpi/generic_mpi-asm-defs.h
deleted file mode 100644
index 047d1f5a7249..000000000000
--- a/lib/mpi/generic_mpi-asm-defs.h
+++ /dev/null
@@ -1,4 +0,0 @@
-/* This file defines some basic constants for the MPI machinery. We
- * need to define the types on a per-CPU basis, so it is done with
- * this file here. */
-#define BYTES_PER_MPI_LIMB (SIZEOF_UNSIGNED_LONG)
diff --git a/lib/mpi/mpi-add.c b/lib/mpi/mpi-add.c
deleted file mode 100644
index f56b9ba295e6..000000000000
--- a/lib/mpi/mpi-add.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/* mpi-add.c - MPI functions
- * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
- * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * Note: This code is heavily based on the GNU MP Library.
- * Actually it's the same code with only minor changes in the
- * way the data is stored; this is to support the abstraction
- * of an optional secure memory allocation which may be used
- * to avoid revealing of sensitive data due to paging etc.
- * The GNU MP Library itself is published under the LGPL;
- * however I decided to publish this code under the plain GPL.
- */
-
-#include "mpi-internal.h"
-
-/****************
- * Add the unsigned integer V to the mpi-integer U and store the
- * result in W. U and V may be the same.
- */
-int mpi_add_ui(MPI w, const MPI u, unsigned long v)
-{
- mpi_ptr_t wp, up;
- mpi_size_t usize, wsize;
- int usign, wsign;
-
- usize = u->nlimbs;
- usign = u->sign;
- wsign = 0;
-
- /* If not space for W (and possible carry), increase space. */
- wsize = usize + 1;
- if (w->alloced < wsize)
- if (mpi_resize(w, wsize) < 0)
- return -ENOMEM;
-
- /* These must be after realloc (U may be the same as W). */
- up = u->d;
- wp = w->d;
-
- if (!usize) { /* simple */
- wp[0] = v;
- wsize = v ? 1 : 0;
- } else if (!usign) { /* mpi is not negative */
- mpi_limb_t cy;
- cy = mpihelp_add_1(wp, up, usize, v);
- wp[usize] = cy;
- wsize = usize + cy;
- } else { /* The signs are different. Need exact comparison to determine
- * which operand to subtract from which. */
- if (usize == 1 && up[0] < v) {
- wp[0] = v - up[0];
- wsize = 1;
- } else {
- mpihelp_sub_1(wp, up, usize, v);
- /* Size can decrease with at most one limb. */
- wsize = usize - (wp[usize - 1] == 0);
- wsign = 1;
- }
- }
-
- w->nlimbs = wsize;
- w->sign = wsign;
- return 0;
-}
-
-int mpi_add(MPI w, MPI u, MPI v)
-{
- mpi_ptr_t wp, up, vp;
- mpi_size_t usize, vsize, wsize;
- int usign, vsign, wsign;
-
- if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
- usize = v->nlimbs;
- usign = v->sign;
- vsize = u->nlimbs;
- vsign = u->sign;
- wsize = usize + 1;
- if (RESIZE_IF_NEEDED(w, wsize) < 0)
- return -ENOMEM;
- /* These must be after realloc (u or v may be the same as w). */
- up = v->d;
- vp = u->d;
- } else {
- usize = u->nlimbs;
- usign = u->sign;
- vsize = v->nlimbs;
- vsign = v->sign;
- wsize = usize + 1;
- if (RESIZE_IF_NEEDED(w, wsize) < 0)
- return -ENOMEM;
- /* These must be after realloc (u or v may be the same as w). */
- up = u->d;
- vp = v->d;
- }
- wp = w->d;
- wsign = 0;
-
- if (!vsize) { /* simple */
- MPN_COPY(wp, up, usize);
- wsize = usize;
- wsign = usign;
- } else if (usign != vsign) { /* different sign */
- /* This test is right since USIZE >= VSIZE */
- if (usize != vsize) {
- mpihelp_sub(wp, up, usize, vp, vsize);
- wsize = usize;
- MPN_NORMALIZE(wp, wsize);
- wsign = usign;
- } else if (mpihelp_cmp(up, vp, usize) < 0) {
- mpihelp_sub_n(wp, vp, up, usize);
- wsize = usize;
- MPN_NORMALIZE(wp, wsize);
- if (!usign)
- wsign = 1;
- } else {
- mpihelp_sub_n(wp, up, vp, usize);
- wsize = usize;
- MPN_NORMALIZE(wp, wsize);
- if (usign)
- wsign = 1;
- }
- } else { /* U and V have same sign. Add them. */
- mpi_limb_t cy = mpihelp_add(wp, up, usize, vp, vsize);
- wp[usize] = cy;
- wsize = usize + cy;
- if (usign)
- wsign = 1;
- }
-
- w->nlimbs = wsize;
- w->sign = wsign;
- return 0;
-}
-
-/****************
- * Subtract the unsigned integer V from the mpi-integer U and store the
- * result in W.
- */
-int mpi_sub_ui(MPI w, MPI u, unsigned long v)
-{
- mpi_ptr_t wp, up;
- mpi_size_t usize, wsize;
- int usign, wsign;
-
- usize = u->nlimbs;
- usign = u->sign;
- wsign = 0;
-
- /* If not space for W (and possible carry), increase space. */
- wsize = usize + 1;
- if (w->alloced < wsize)
- if (mpi_resize(w, wsize) < 0)
- return -ENOMEM;
-
- /* These must be after realloc (U may be the same as W). */
- up = u->d;
- wp = w->d;
-
- if (!usize) { /* simple */
- wp[0] = v;
- wsize = v ? 1 : 0;
- wsign = 1;
- } else if (usign) { /* mpi and v are negative */
- mpi_limb_t cy;
- cy = mpihelp_add_1(wp, up, usize, v);
- wp[usize] = cy;
- wsize = usize + cy;
- } else { /* The signs are different. Need exact comparison to determine
- * which operand to subtract from which. */
- if (usize == 1 && up[0] < v) {
- wp[0] = v - up[0];
- wsize = 1;
- wsign = 1;
- } else {
- mpihelp_sub_1(wp, up, usize, v);
- /* Size can decrease with at most one limb. */
- wsize = usize - (wp[usize - 1] == 0);
- }
- }
-
- w->nlimbs = wsize;
- w->sign = wsign;
- return 0;
-}
-
-int mpi_sub(MPI w, MPI u, MPI v)
-{
- int rc;
-
- if (w == v) {
- MPI vv;
- if (mpi_copy(&vv, v) < 0)
- return -ENOMEM;
- vv->sign = !vv->sign;
- rc = mpi_add(w, u, vv);
- mpi_free(vv);
- } else {
- /* fixme: this is not thread-save (we temp. modify v) */
- v->sign = !v->sign;
- rc = mpi_add(w, u, v);
- v->sign = !v->sign;
- }
- return rc;
-}
-
-int mpi_addm(MPI w, MPI u, MPI v, MPI m)
-{
- if (mpi_add(w, u, v) < 0 || mpi_fdiv_r(w, w, m) < 0)
- return -ENOMEM;
- return 0;
-}
-
-int mpi_subm(MPI w, MPI u, MPI v, MPI m)
-{
- if (mpi_sub(w, u, v) < 0 || mpi_fdiv_r(w, w, m) < 0)
- return -ENOMEM;
- return 0;
-}
diff --git a/lib/mpi/mpi-bit.c b/lib/mpi/mpi-bit.c
index 0c505361da19..568724804f29 100644
--- a/lib/mpi/mpi-bit.c
+++ b/lib/mpi/mpi-bit.c
@@ -54,165 +54,3 @@ unsigned mpi_get_nbits(MPI a)
return n;
}
EXPORT_SYMBOL_GPL(mpi_get_nbits);
-
-/****************
- * Test whether bit N is set.
- */
-int mpi_test_bit(MPI a, unsigned n)
-{
- unsigned limbno, bitno;
- mpi_limb_t limb;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs)
- return 0; /* too far left: this is a 0 */
- limb = a->d[limbno];
- return (limb & (A_LIMB_1 << bitno)) ? 1 : 0;
-}
-
-/****************
- * Set bit N of A.
- */
-int mpi_set_bit(MPI a, unsigned n)
-{
- unsigned limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs) { /* resize */
- if (a->alloced >= limbno)
- if (mpi_resize(a, limbno + 1) < 0)
- return -ENOMEM;
- a->nlimbs = limbno + 1;
- }
- a->d[limbno] |= (A_LIMB_1 << bitno);
- return 0;
-}
-
-/****************
- * Set bit N of A. and clear all bits above
- */
-int mpi_set_highbit(MPI a, unsigned n)
-{
- unsigned limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs) { /* resize */
- if (a->alloced >= limbno)
- if (mpi_resize(a, limbno + 1) < 0)
- return -ENOMEM;
- a->nlimbs = limbno + 1;
- }
- a->d[limbno] |= (A_LIMB_1 << bitno);
- for (bitno++; bitno < BITS_PER_MPI_LIMB; bitno++)
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
- a->nlimbs = limbno + 1;
- return 0;
-}
-
-/****************
- * clear bit N of A and all bits above
- */
-void mpi_clear_highbit(MPI a, unsigned n)
-{
- unsigned limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs)
- return; /* not allocated, so need to clear bits :-) */
-
- for (; bitno < BITS_PER_MPI_LIMB; bitno++)
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
- a->nlimbs = limbno + 1;
-}
-
-/****************
- * Clear bit N of A.
- */
-void mpi_clear_bit(MPI a, unsigned n)
-{
- unsigned limbno, bitno;
-
- limbno = n / BITS_PER_MPI_LIMB;
- bitno = n % BITS_PER_MPI_LIMB;
-
- if (limbno >= a->nlimbs)
- return; /* don't need to clear this bit, it's to far to left */
- a->d[limbno] &= ~(A_LIMB_1 << bitno);
-}
-
-/****************
- * Shift A by N bits to the right
- * FIXME: should use alloc_limb if X and A are same.
- */
-int mpi_rshift(MPI x, MPI a, unsigned n)
-{
- mpi_ptr_t xp;
- mpi_size_t xsize;
-
- xsize = a->nlimbs;
- x->sign = a->sign;
- if (RESIZE_IF_NEEDED(x, (size_t) xsize) < 0)
- return -ENOMEM;
- xp = x->d;
-
- if (xsize) {
- mpihelp_rshift(xp, a->d, xsize, n);
- MPN_NORMALIZE(xp, xsize);
- }
- x->nlimbs = xsize;
- return 0;
-}
-
-/****************
- * Shift A by COUNT limbs to the left
- * This is used only within the MPI library
- */
-int mpi_lshift_limbs(MPI a, unsigned int count)
-{
- const int n = a->nlimbs;
- mpi_ptr_t ap;
- int i;
-
- if (!count || !n)
- return 0;
-
- if (RESIZE_IF_NEEDED(a, n + count) < 0)
- return -ENOMEM;
-
- ap = a->d;
- for (i = n - 1; i >= 0; i--)
- ap[i + count] = ap[i];
- for (i = 0; i < count; i++)
- ap[i] = 0;
- a->nlimbs += count;
- return 0;
-}
-
-/****************
- * Shift A by COUNT limbs to the right
- * This is used only within the MPI library
- */
-void mpi_rshift_limbs(MPI a, unsigned int count)
-{
- mpi_ptr_t ap = a->d;
- mpi_size_t n = a->nlimbs;
- unsigned int i;
-
- if (count >= n) {
- a->nlimbs = 0;
- return;
- }
-
- for (i = 0; i < n - count; i++)
- ap[i] = ap[i + count];
- ap[i] = 0;
- a->nlimbs -= count;
-}
diff --git a/lib/mpi/mpi-cmp.c b/lib/mpi/mpi-cmp.c
deleted file mode 100644
index 914bc42a8a80..000000000000
--- a/lib/mpi/mpi-cmp.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/* mpi-cmp.c - MPI functions
- * Copyright (C) 1998, 1999 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-
-int mpi_cmp_ui(MPI u, unsigned long v)
-{
- mpi_limb_t limb = v;
-
- mpi_normalize(u);
- if (!u->nlimbs && !limb)
- return 0;
- if (u->sign)
- return -1;
- if (u->nlimbs > 1)
- return 1;
-
- if (u->d[0] == limb)
- return 0;
- else if (u->d[0] > limb)
- return 1;
- else
- return -1;
-}
-
-int mpi_cmp(MPI u, MPI v)
-{
- mpi_size_t usize, vsize;
- int cmp;
-
- mpi_normalize(u);
- mpi_normalize(v);
- usize = u->nlimbs;
- vsize = v->nlimbs;
- if (!u->sign && v->sign)
- return 1;
- if (u->sign && !v->sign)
- return -1;
- if (usize != vsize && !u->sign && !v->sign)
- return usize - vsize;
- if (usize != vsize && u->sign && v->sign)
- return vsize + usize;
- if (!usize)
- return 0;
- cmp = mpihelp_cmp(u->d, v->d, usize);
- if (!cmp)
- return 0;
- if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0))
- return 1;
- return -1;
-}
diff --git a/lib/mpi/mpi-div.c b/lib/mpi/mpi-div.c
deleted file mode 100644
index f68cbbb4d4a4..000000000000
--- a/lib/mpi/mpi-div.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/* mpi-div.c - MPI functions
- * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
- * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * Note: This code is heavily based on the GNU MP Library.
- * Actually it's the same code with only minor changes in the
- * way the data is stored; this is to support the abstraction
- * of an optional secure memory allocation which may be used
- * to avoid revealing of sensitive data due to paging etc.
- * The GNU MP Library itself is published under the LGPL;
- * however I decided to publish this code under the plain GPL.
- */
-
-#include <linux/string.h>
-#include "mpi-internal.h"
-#include "longlong.h"
-
-int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor)
-{
- int rc = -ENOMEM;
- int divisor_sign = divisor->sign;
- MPI temp_divisor = NULL;
-
- /* We need the original value of the divisor after the remainder has been
- * preliminary calculated. We have to copy it to temporary space if it's
- * the same variable as REM. */
- if (rem == divisor) {
- if (mpi_copy(&temp_divisor, divisor) < 0)
- goto nomem;
- divisor = temp_divisor;
- }
-
- if (mpi_tdiv_qr(NULL, rem, dividend, divisor) < 0)
- goto nomem;
- if (((divisor_sign ? 1 : 0) ^ (dividend->sign ? 1 : 0)) && rem->nlimbs)
- if (mpi_add(rem, rem, divisor) < 0)
- goto nomem;
-
- rc = 0;
-
-nomem:
- if (temp_divisor)
- mpi_free(temp_divisor);
- return rc;
-}
-
-/****************
- * Division rounding the quotient towards -infinity.
- * The remainder gets the same sign as the denominator.
- * rem is optional
- */
-
-ulong mpi_fdiv_r_ui(MPI rem, MPI dividend, ulong divisor)
-{
- mpi_limb_t rlimb;
-
- rlimb = mpihelp_mod_1(dividend->d, dividend->nlimbs, divisor);
- if (rlimb && dividend->sign)
- rlimb = divisor - rlimb;
-
- if (rem) {
- rem->d[0] = rlimb;
- rem->nlimbs = rlimb ? 1 : 0;
- }
- return rlimb;
-}
-
-int mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor)
-{
- MPI tmp = mpi_alloc(mpi_get_nlimbs(quot));
- if (!tmp)
- return -ENOMEM;
- mpi_fdiv_qr(quot, tmp, dividend, divisor);
- mpi_free(tmp);
- return 0;
-}
-
-int mpi_fdiv_qr(MPI quot, MPI rem, MPI dividend, MPI divisor)
-{
- int divisor_sign = divisor->sign;
- MPI temp_divisor = NULL;
-
- if (quot == divisor || rem == divisor) {
- if (mpi_copy(&temp_divisor, divisor) < 0)
- return -ENOMEM;
- divisor = temp_divisor;
- }
-
- if (mpi_tdiv_qr(quot, rem, dividend, divisor) < 0)
- goto nomem;
-
- if ((divisor_sign ^ dividend->sign) && rem->nlimbs) {
- if (mpi_sub_ui(quot, quot, 1) < 0)
- goto nomem;
- if (mpi_add(rem, rem, divisor) < 0)
- goto nomem;
- }
-
- if (temp_divisor)
- mpi_free(temp_divisor);
-
- return 0;
-
-nomem:
- mpi_free(temp_divisor);
- return -ENOMEM;
-}
-
-/* If den == quot, den needs temporary storage.
- * If den == rem, den needs temporary storage.
- * If num == quot, num needs temporary storage.
- * If den has temporary storage, it can be normalized while being copied,
- * i.e no extra storage should be allocated.
- */
-
-int mpi_tdiv_r(MPI rem, MPI num, MPI den)
-{
- return mpi_tdiv_qr(NULL, rem, num, den);
-}
-
-int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
-{
- int rc = -ENOMEM;
- mpi_ptr_t np, dp;
- mpi_ptr_t qp, rp;
- mpi_size_t nsize = num->nlimbs;
- mpi_size_t dsize = den->nlimbs;
- mpi_size_t qsize, rsize;
- mpi_size_t sign_remainder = num->sign;
- mpi_size_t sign_quotient = num->sign ^ den->sign;
- unsigned normalization_steps;
- mpi_limb_t q_limb;
- mpi_ptr_t marker[5];
- int markidx = 0;
-
- if (!dsize)
- return -EINVAL;
-
- memset(marker, 0, sizeof(marker));
-
- /* Ensure space is enough for quotient and remainder.
- * We need space for an extra limb in the remainder, because it's
- * up-shifted (normalized) below. */
- rsize = nsize + 1;
- if (mpi_resize(rem, rsize) < 0)
- goto nomem;
-
- qsize = rsize - dsize; /* qsize cannot be bigger than this. */
- if (qsize <= 0) {
- if (num != rem) {
- rem->nlimbs = num->nlimbs;
- rem->sign = num->sign;
- MPN_COPY(rem->d, num->d, nsize);
- }
- if (quot) {
- /* This needs to follow the assignment to rem, in case the
- * numerator and quotient are the same. */
- quot->nlimbs = 0;
- quot->sign = 0;
- }
- return 0;
- }
-
- if (quot)
- if (mpi_resize(quot, qsize) < 0)
- goto nomem;
-
- /* Read pointers here, when reallocation is finished. */
- np = num->d;
- dp = den->d;
- rp = rem->d;
-
- /* Optimize division by a single-limb divisor. */
- if (dsize == 1) {
- mpi_limb_t rlimb;
- if (quot) {
- qp = quot->d;
- rlimb = mpihelp_divmod_1(qp, np, nsize, dp[0]);
- qsize -= qp[qsize - 1] == 0;
- quot->nlimbs = qsize;
- quot->sign = sign_quotient;
- } else
- rlimb = mpihelp_mod_1(np, nsize, dp[0]);
- rp[0] = rlimb;
- rsize = rlimb != 0 ? 1 : 0;
- rem->nlimbs = rsize;
- rem->sign = sign_remainder;
- return 0;
- }
-
- if (quot) {
- qp = quot->d;
- /* Make sure QP and NP point to different objects. Otherwise the
- * numerator would be gradually overwritten by the quotient limbs. */
- if (qp == np) { /* Copy NP object to temporary space. */
- np = marker[markidx++] = mpi_alloc_limb_space(nsize);
- if (!np)
- goto nomem;
- MPN_COPY(np, qp, nsize);
- }
- } else /* Put quotient at top of remainder. */
- qp = rp + dsize;
-
- count_leading_zeros(normalization_steps, dp[dsize - 1]);
-
- /* Normalize the denominator, i.e. make its most significant bit set by
- * shifting it NORMALIZATION_STEPS bits to the left. Also shift the
- * numerator the same number of steps (to keep the quotient the same!).
- */
- if (normalization_steps) {
- mpi_ptr_t tp;
- mpi_limb_t nlimb;
-
- /* Shift up the denominator setting the most significant bit of
- * the most significant word. Use temporary storage not to clobber
- * the original contents of the denominator. */
- tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
- if (!tp)
- goto nomem;
- mpihelp_lshift(tp, dp, dsize, normalization_steps);
- dp = tp;
-
- /* Shift up the numerator, possibly introducing a new most
- * significant word. Move the shifted numerator in the remainder
- * meanwhile. */
- nlimb = mpihelp_lshift(rp, np, nsize, normalization_steps);
- if (nlimb) {
- rp[nsize] = nlimb;
- rsize = nsize + 1;
- } else
- rsize = nsize;
- } else {
- /* The denominator is already normalized, as required. Copy it to
- * temporary space if it overlaps with the quotient or remainder. */
- if (dp == rp || (quot && (dp == qp))) {
- mpi_ptr_t tp;
-
- tp = marker[markidx++] = mpi_alloc_limb_space(dsize);
- if (!tp)
- goto nomem;
- MPN_COPY(tp, dp, dsize);
- dp = tp;
- }
-
- /* Move the numerator to the remainder. */
- if (rp != np)
- MPN_COPY(rp, np, nsize);
-
- rsize = nsize;
- }
-
- q_limb = mpihelp_divrem(qp, 0, rp, rsize, dp, dsize);
-
- if (quot) {
- qsize = rsize - dsize;
- if (q_limb) {
- qp[qsize] = q_limb;
- qsize += 1;
- }
-
- quot->nlimbs = qsize;
- quot->sign = sign_quotient;
- }
-
- rsize = dsize;
- MPN_NORMALIZE(rp, rsize);
-
- if (normalization_steps && rsize) {
- mpihelp_rshift(rp, rp, rsize, normalization_steps);
- rsize -= rp[rsize - 1] == 0 ? 1 : 0;
- }
-
- rem->nlimbs = rsize;
- rem->sign = sign_remainder;
-
- rc = 0;
-nomem:
- while (markidx)
- mpi_free_limb_space(marker[--markidx]);
- return rc;
-}
-
-int mpi_tdiv_q_2exp(MPI w, MPI u, unsigned count)
-{
- mpi_size_t usize, wsize;
- mpi_size_t limb_cnt;
-
- usize = u->nlimbs;
- limb_cnt = count / BITS_PER_MPI_LIMB;
- wsize = usize - limb_cnt;
- if (limb_cnt >= usize)
- w->nlimbs = 0;
- else {
- mpi_ptr_t wp;
- mpi_ptr_t up;
-
- if (RESIZE_IF_NEEDED(w, wsize) < 0)
- return -ENOMEM;
- wp = w->d;
- up = u->d;
-
- count %= BITS_PER_MPI_LIMB;
- if (count) {
- mpihelp_rshift(wp, up + limb_cnt, wsize, count);
- wsize -= !wp[wsize - 1];
- } else {
- MPN_COPY_INCR(wp, up + limb_cnt, wsize);
- }
-
- w->nlimbs = wsize;
- }
- return 0;
-}
-
-/****************
- * Check whether dividend is divisible by divisor
- * (note: divisor must fit into a limb)
- */
-int mpi_divisible_ui(MPI dividend, ulong divisor)
-{
- return !mpihelp_mod_1(dividend->d, dividend->nlimbs, divisor);
-}
diff --git a/lib/mpi/mpi-gcd.c b/lib/mpi/mpi-gcd.c
deleted file mode 100644
index 13c48aef9c4e..000000000000
--- a/lib/mpi/mpi-gcd.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/* mpi-gcd.c - MPI functions
- * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-
-/****************
- * Find the greatest common divisor G of A and B.
- * Return: true if this 1, false in all other cases
- */
-int mpi_gcd(MPI g, const MPI xa, const MPI xb)
-{
- MPI a = NULL, b = NULL;
-
- if (mpi_copy(&a, xa) < 0)
- goto nomem;
-
- if (mpi_copy(&b, xb) < 0)
- goto nomem;
-
- /* TAOCP Vol II, 4.5.2, Algorithm A */
- a->sign = 0;
- b->sign = 0;
- while (mpi_cmp_ui(b, 0)) {
- if (mpi_fdiv_r(g, a, b) < 0) /* g used as temorary variable */
- goto nomem;
- if (mpi_set(a, b) < 0)
- goto nomem;
- if (mpi_set(b, g) < 0)
- goto nomem;
- }
- if (mpi_set(g, a) < 0)
- goto nomem;
-
- mpi_free(a);
- mpi_free(b);
- return !mpi_cmp_ui(g, 1);
-
-nomem:
- mpi_free(a);
- mpi_free(b);
- return -ENOMEM;
-}
diff --git a/lib/mpi/mpi-inline.c b/lib/mpi/mpi-inline.c
deleted file mode 100644
index 654f68aeed8b..000000000000
--- a/lib/mpi/mpi-inline.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/* mpi-inline.c
- * Copyright (C) 1999, 2000, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-/* put the inline functions as real functions into the lib */
-#define G10_MPI_INLINE_DECL
-
-#include "mpi-internal.h"
-
-/* always include the header becuase it is only
- * included by mpi-internal if __GCC__ is defined but we
- * need it here in all cases and the above definition of
- * of the macro allows us to do so
- */
-#include "mpi-inline.h"
diff --git a/lib/mpi/mpi-inv.c b/lib/mpi/mpi-inv.c
deleted file mode 100644
index 0951f9847745..000000000000
--- a/lib/mpi/mpi-inv.c
+++ /dev/null
@@ -1,187 +0,0 @@
-/* mpi-inv.c - MPI functions
- * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-
-/****************
- * Calculate the multiplicative inverse X of A mod N
- * That is: Find the solution x for
- * 1 = (a*x) mod n
- */
-int mpi_invm(MPI x, const MPI a, const MPI n)
-{
- /* Extended Euclid's algorithm (See TAOPC Vol II, 4.5.2, Alg X)
- * modified according to Michael Penk's solution for Exercice 35
- * with further enhancement */
- MPI u = NULL, v = NULL;
- MPI u1 = NULL, u2 = NULL, u3 = NULL;
- MPI v1 = NULL, v2 = NULL, v3 = NULL;
- MPI t1 = NULL, t2 = NULL, t3 = NULL;
- unsigned k;
- int sign;
- int odd = 0;
- int rc = -ENOMEM;
-
- if (mpi_copy(&u, a) < 0)
- goto cleanup;
- if (mpi_copy(&v, n) < 0)
- goto cleanup;
-
- for (k = 0; !mpi_test_bit(u, 0) && !mpi_test_bit(v, 0); k++) {
- if (mpi_rshift(u, u, 1) < 0)
- goto cleanup;
- if (mpi_rshift(v, v, 1) < 0)
- goto cleanup;
- }
- odd = mpi_test_bit(v, 0);
-
- u1 = mpi_alloc_set_ui(1);
- if (!u1)
- goto cleanup;
- if (!odd) {
- u2 = mpi_alloc_set_ui(0);
- if (!u2)
- goto cleanup;
- }
- if (mpi_copy(&u3, u) < 0)
- goto cleanup;
- if (mpi_copy(&v1, v) < 0)
- goto cleanup;
- if (!odd) {
- v2 = mpi_alloc(mpi_get_nlimbs(u));
- if (!v2)
- goto cleanup;
- if (mpi_sub(v2, u1, u) < 0)
- goto cleanup; /* U is used as const 1 */
- }
- if (mpi_copy(&v3, v) < 0)
- goto cleanup;
- if (mpi_test_bit(u, 0)) { /* u is odd */
- t1 = mpi_alloc_set_ui(0);
- if (!t1)
- goto cleanup;
- if (!odd) {
- t2 = mpi_alloc_set_ui(1);
- if (!t2)
- goto cleanup;
- t2->sign = 1;
- }
- if (mpi_copy(&t3, v) < 0)
- goto cleanup;
- t3->sign = !t3->sign;
- goto Y4;
- } else {
- t1 = mpi_alloc_set_ui(1);
- if (!t1)
- goto cleanup;
- if (!odd) {
- t2 = mpi_alloc_set_ui(0);
- if (!t2)
- goto cleanup;
- }
- if (mpi_copy(&t3, u) < 0)
- goto cleanup;
- }
- do {
- do {
- if (!odd) {
- if (mpi_test_bit(t1, 0) || mpi_test_bit(t2, 0)) { /* one is odd */
- if (mpi_add(t1, t1, v) < 0)
- goto cleanup;
- if (mpi_sub(t2, t2, u) < 0)
- goto cleanup;
- }
- if (mpi_rshift(t1, t1, 1) < 0)
- goto cleanup;
- if (mpi_rshift(t2, t2, 1) < 0)
- goto cleanup;
- if (mpi_rshift(t3, t3, 1) < 0)
- goto cleanup;
- } else {
- if (mpi_test_bit(t1, 0))
- if (mpi_add(t1, t1, v) < 0)
- goto cleanup;
- if (mpi_rshift(t1, t1, 1) < 0)
- goto cleanup;
- if (mpi_rshift(t3, t3, 1) < 0)
- goto cleanup;
- }
-Y4:
- ;
- } while (!mpi_test_bit(t3, 0)); /* while t3 is even */
-
- if (!t3->sign) {
- if (mpi_set(u1, t1) < 0)
- goto cleanup;
- if (!odd)
- if (mpi_set(u2, t2) < 0)
- goto cleanup;
- if (mpi_set(u3, t3) < 0)
- goto cleanup;
- } else {
- if (mpi_sub(v1, v, t1) < 0)
- goto cleanup;
- sign = u->sign;
- u->sign = !u->sign;
- if (!odd)
- if (mpi_sub(v2, u, t2) < 0)
- goto cleanup;
- u->sign = sign;
- sign = t3->sign;
- t3->sign = !t3->sign;
- if (mpi_set(v3, t3) < 0)
- goto cleanup;
- t3->sign = sign;
- }
- if (mpi_sub(t1, u1, v1) < 0)
- goto cleanup;
- if (!odd)
- if (mpi_sub(t2, u2, v2) < 0)
- goto cleanup;
- if (mpi_sub(t3, u3, v3) < 0)
- goto cleanup;
- if (t1->sign) {
- if (mpi_add(t1, t1, v) < 0)
- goto cleanup;
- if (!odd)
- if (mpi_sub(t2, t2, u) < 0)
- goto cleanup;
- }
- } while (mpi_cmp_ui(t3, 0)); /* while t3 != 0 */
- /* mpi_lshift( u3, k ); */
- rc = mpi_set(x, u1);
-
-cleanup:
- mpi_free(u1);
- mpi_free(v1);
- mpi_free(t1);
- if (!odd) {
- mpi_free(u2);
- mpi_free(v2);
- mpi_free(t2);
- }
- mpi_free(u3);
- mpi_free(v3);
- mpi_free(t3);
-
- mpi_free(u);
- mpi_free(v);
- return rc;
-}
diff --git a/lib/mpi/mpi-mpow.c b/lib/mpi/mpi-mpow.c
deleted file mode 100644
index 7328d0d6c748..000000000000
--- a/lib/mpi/mpi-mpow.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/* mpi-mpow.c - MPI functions
- * Copyright (C) 1998, 1999, 2000 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-#include "longlong.h"
-
-static int build_index(const MPI *exparray, int k, int i, int t)
-{
- int j, bitno;
- int index = 0;
-
- bitno = t - i;
- for (j = k - 1; j >= 0; j--) {
- index <<= 1;
- if (mpi_test_bit(exparray[j], bitno))
- index |= 1;
- }
- return index;
-}
-
-/****************
- * RES = (BASE[0] ^ EXP[0]) * (BASE[1] ^ EXP[1]) * ... * mod M
- */
-int mpi_mulpowm(MPI res, MPI *basearray, MPI *exparray, MPI m)
-{
- int rc = -ENOMEM;
- int k; /* number of elements */
- int t; /* bit size of largest exponent */
- int i, j, idx;
- MPI *G = NULL; /* table with precomputed values of size 2^k */
- MPI tmp = NULL;
-
- for (k = 0; basearray[k]; k++)
- ;
- if (!k) {
- pr_emerg("mpi_mulpowm: assert(k) failed\n");
- BUG();
- }
- for (t = 0, i = 0; (tmp = exparray[i]); i++) {
- j = mpi_get_nbits(tmp);
- if (j > t)
- t = j;
- }
- if (i != k) {
- pr_emerg("mpi_mulpowm: assert(i==k) failed\n");
- BUG();
- }
- if (!t) {
- pr_emerg("mpi_mulpowm: assert(t) failed\n");
- BUG();
- }
- if (k >= 10) {
- pr_emerg("mpi_mulpowm: assert(k<10) failed\n");
- BUG();
- }
-
- G = kzalloc((1 << k) * sizeof *G, GFP_KERNEL);
- if (!G)
- goto err_out;
-
- /* and calculate */
- tmp = mpi_alloc(mpi_get_nlimbs(m) + 1);
- if (!tmp)
- goto nomem;
- if (mpi_set_ui(res, 1) < 0)
- goto nomem;
- for (i = 1; i <= t; i++) {
- if (mpi_mulm(tmp, res, res, m) < 0)
- goto nomem;
- idx = build_index(exparray, k, i, t);
- if (!(idx >= 0 && idx < (1 << k))) {
- pr_emerg("mpi_mulpowm: assert(idx >= 0 && idx < (1<<k)) failed\n");
- BUG();
- }
- if (!G[idx]) {
- if (!idx) {
- G[0] = mpi_alloc_set_ui(1);
- if (!G[0])
- goto nomem;
- } else {
- for (j = 0; j < k; j++) {
- if ((idx & (1 << j))) {
- if (!G[idx]) {
- if (mpi_copy
- (&G[idx],
- basearray[j]) < 0)
- goto nomem;
- } else {
- if (mpi_mulm
- (G[idx], G[idx],
- basearray[j],
- m) < 0)
- goto nomem;
- }
- }
- }
- if (!G[idx]) {
- G[idx] = mpi_alloc(0);
- if (!G[idx])
- goto nomem;
- }
- }
- }
- if (mpi_mulm(res, tmp, G[idx], m) < 0)
- goto nomem;
- }
-
- rc = 0;
-nomem:
- /* cleanup */
- mpi_free(tmp);
- for (i = 0; i < (1 << k); i++)
- mpi_free(G[i]);
- kfree(G);
-err_out:
- return rc;
-}
diff --git a/lib/mpi/mpi-mul.c b/lib/mpi/mpi-mul.c
deleted file mode 100644
index 1f3219e27292..000000000000
--- a/lib/mpi/mpi-mul.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/* mpi-mul.c - MPI functions
- * Copyright (C) 1994, 1996 Free Software Foundation, Inc.
- * Copyright (C) 1998, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- *
- * Note: This code is heavily based on the GNU MP Library.
- * Actually it's the same code with only minor changes in the
- * way the data is stored; this is to support the abstraction
- * of an optional secure memory allocation which may be used
- * to avoid revealing of sensitive data due to paging etc.
- * The GNU MP Library itself is published under the LGPL;
- * however I decided to publish this code under the plain GPL.
- */
-
-#include "mpi-internal.h"
-
-int mpi_mul_ui(MPI prod, MPI mult, unsigned long small_mult)
-{
- mpi_size_t size, prod_size;
- mpi_ptr_t prod_ptr;
- mpi_limb_t cy;
- int sign;
-
- size = mult->nlimbs;
- sign = mult->sign;
-
- if (!size || !small_mult) {
- prod->nlimbs = 0;
- prod->sign = 0;
- return 0;
- }
-
- prod_size = size + 1;
- if (prod->alloced < prod_size)
- if (mpi_resize(prod, prod_size) < 0)
- return -ENOMEM;
- prod_ptr = prod->d;
-
- cy = mpihelp_mul_1(prod_ptr, mult->d, size, (mpi_limb_t) small_mult);
- if (cy)
- prod_ptr[size++] = cy;
- prod->nlimbs = size;
- prod->sign = sign;
- return 0;
-}
-
-int mpi_mul_2exp(MPI w, MPI u, unsigned long cnt)
-{
- mpi_size_t usize, wsize, limb_cnt;
- mpi_ptr_t wp;
- mpi_limb_t wlimb;
- int usign, wsign;
-
- usize = u->nlimbs;
- usign = u->sign;
-
- if (!usize) {
- w->nlimbs = 0;
- w->sign = 0;
- return 0;
- }
-
- limb_cnt = cnt / BITS_PER_MPI_LIMB;
- wsize = usize + limb_cnt + 1;
- if (w->alloced < wsize)
- if (mpi_resize(w, wsize) < 0)
- return -ENOMEM;
- wp = w->d;
- wsize = usize + limb_cnt;
- wsign = usign;
-
- cnt %= BITS_PER_MPI_LIMB;
- if (cnt) {
- wlimb = mpihelp_lshift(wp + limb_cnt, u->d, usize, cnt);
- if (wlimb) {
- wp[wsize] = wlimb;
- wsize++;
- }
- } else {
- MPN_COPY_DECR(wp + limb_cnt, u->d, usize);
- }
-
- /* Zero all whole limbs at low end. Do it here and not before calling
- * mpn_lshift, not to lose for U == W. */
- MPN_ZERO(wp, limb_cnt);
-
- w->nlimbs = wsize;
- w->sign = wsign;
- return 0;
-}
-
-int mpi_mul(MPI w, MPI u, MPI v)
-{
- int rc = -ENOMEM;
- mpi_size_t usize, vsize, wsize;
- mpi_ptr_t up, vp, wp;
- mpi_limb_t cy;
- int usign, vsign, sign_product;
- int assign_wp = 0;
- mpi_ptr_t tmp_limb = NULL;
-
- if (u->nlimbs < v->nlimbs) { /* Swap U and V. */
- usize = v->nlimbs;
- usign = v->sign;
- up = v->d;
- vsize = u->nlimbs;
- vsign = u->sign;
- vp = u->d;
- } else {
- usize = u->nlimbs;
- usign = u->sign;
- up = u->d;
- vsize = v->nlimbs;
- vsign = v->sign;
- vp = v->d;
- }
- sign_product = usign ^ vsign;
- wp = w->d;
-
- /* Ensure W has space enough to store the result. */
- wsize = usize + vsize;
- if (w->alloced < (size_t) wsize) {
- if (wp == up || wp == vp) {
- wp = mpi_alloc_limb_space(wsize);
- if (!wp)
- goto nomem;
- assign_wp = 1;
- } else {
- if (mpi_resize(w, wsize) < 0)
- goto nomem;
- wp = w->d;
- }
- } else { /* Make U and V not overlap with W. */
- if (wp == up) {
- /* W and U are identical. Allocate temporary space for U. */
- up = tmp_limb = mpi_alloc_limb_space(usize);
- if (!up)
- goto nomem;
- /* Is V identical too? Keep it identical with U. */
- if (wp == vp)
- vp = up;
- /* Copy to the temporary space. */
- MPN_COPY(up, wp, usize);
- } else if (wp == vp) {
- /* W and V are identical. Allocate temporary space for V. */
- vp = tmp_limb = mpi_alloc_limb_space(vsize);
- if (!vp)
- goto nomem;
- /* Copy to the temporary space. */
- MPN_COPY(vp, wp, vsize);
- }
- }
-
- if (!vsize)
- wsize = 0;
- else {
- if (mpihelp_mul(wp, up, usize, vp, vsize, &cy) < 0)
- goto nomem;
- wsize -= cy ? 0 : 1;
- }
-
- if (assign_wp)
- mpi_assign_limb_space(w, wp, wsize);
-
- w->nlimbs = wsize;
- w->sign = sign_product;
- rc = 0;
-nomem:
- if (tmp_limb)
- mpi_free_limb_space(tmp_limb);
- return rc;
-}
-
-int mpi_mulm(MPI w, MPI u, MPI v, MPI m)
-{
- if (mpi_mul(w, u, v) < 0)
- return -ENOMEM;
- return mpi_fdiv_r(w, w, m);
-}
diff --git a/lib/mpi/mpi-scan.c b/lib/mpi/mpi-scan.c
deleted file mode 100644
index b2da5ad96199..000000000000
--- a/lib/mpi/mpi-scan.c
+++ /dev/null
@@ -1,136 +0,0 @@
-/* mpi-scan.c - MPI functions
- * Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
- *
- * This file is part of GnuPG.
- *
- * GnuPG is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * GnuPG is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include "mpi-internal.h"
-#include "longlong.h"
-
-/****************
- * Scan through an mpi and return byte for byte. a -1 is returned to indicate
- * the end of the mpi. Scanning is done from the lsb to the msb, returned
- * values are in the range of 0 .. 255.
- *
- * FIXME: This code is VERY ugly!
- */
-int mpi_getbyte(const MPI a, unsigned idx)
-{
- int i, j;
- unsigned n;
- mpi_ptr_t ap;
- mpi_limb_t limb;
-
- ap = a->d;
- for (n = 0, i = 0; i < a->nlimbs; i++) {
- limb = ap[i];
- for (j = 0; j < BYTES_PER_MPI_LIMB; j++, n++)
- if (n == idx)
- return (limb >> j * 8) & 0xff;
- }
- return -1;
-}
-
-/****************
- * Put a value at position IDX into A. idx counts from lsb to msb
- */
-void mpi_putbyte(MPI a, unsigned idx, int xc)
-{
- int i, j;
- unsigned n;
- mpi_ptr_t ap;
- mpi_limb_t limb, c;
-
- c = xc & 0xff;
- ap = a->d;
- for (n = 0, i = 0; i < a->alloced; i++) {
- limb = ap[i];
- for (j = 0; j < BYTES_PER_MPI_LIMB; j++, n++)
- if (n == idx) {
-#if BYTES_PER_MPI_LIMB == 4
- if (j == 0)
- limb = (limb & 0xffffff00) | c;
- else if (j == 1)
- limb = (limb & 0xffff00ff) | (c << 8);
- else if (j == 2)
- limb = (limb & 0xff00ffff) | (c << 16);
- else
- limb = (limb & 0x00ffffff) | (c << 24);
-#elif BYTES_PER_MPI_LIMB == 8
- if (j == 0)
- limb = (limb & 0xffffffffffffff00) | c;
- else if (j == 1)
- limb =
- (limb & 0xffffffffffff00ff) | (c <<
- 8);
- else if (j == 2)
- limb =
- (limb & 0xffffffffff00ffff) | (c <<
- 16);
- else if (j == 3)
- limb =
- (limb & 0xffffffff00ffffff) | (c <<
- 24);
- else if (j == 4)
- limb =
- (limb & 0xffffff00ffffffff) | (c <<
- 32);
- else if (j == 5)
- limb =
- (limb & 0xffff00ffffffffff) | (c <<
- 40);
- else if (j == 6)
- limb =
- (limb & 0xff00ffffffffffff) | (c <<
- 48);
- else
- limb =
- (limb & 0x00ffffffffffffff) | (c <<
- 56);
-#else
-#error please enhance this function, its ugly - i know.
-#endif
- if (a->nlimbs <= i)
- a->nlimbs = i + 1;
- ap[i] = limb;
- return;
- }
- }
- log_bug("index out of range\n");
-}
-
-/****************
- * Count the number of zerobits at the low end of A
- */
-unsigned mpi_trailing_zeros(const MPI a)
-{
- unsigned n, count = 0;
-
- for (n = 0; n < a->nlimbs; n++) {
- if (a->d[n]) {
- unsigned nn;
- mpi_limb_t alimb = a->d[n];
-
- count_trailing_zeros(nn, alimb);
- count += nn;
- break;
- }
- count += BITS_PER_MPI_LIMB;
- }
- return count;
-
-}
diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
index f26b41fcb48c..f0fa65995800 100644
--- a/lib/mpi/mpicoder.c
+++ b/lib/mpi/mpicoder.c
@@ -74,81 +74,6 @@ leave:
EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
/****************
- * Make an mpi from a character string.
- */
-int mpi_fromstr(MPI val, const char *str)
-{
- int hexmode = 0, sign = 0, prepend_zero = 0, i, j, c, c1, c2;
- unsigned nbits, nbytes, nlimbs;
- mpi_limb_t a;
-
- if (*str == '-') {
- sign = 1;
- str++;
- }
- if (*str == '0' && str[1] == 'x')
- hexmode = 1;
- else
- return -EINVAL; /* other bases are not yet supported */
- str += 2;
-
- nbits = strlen(str) * 4;
- if (nbits % 8)
- prepend_zero = 1;
- nbytes = (nbits + 7) / 8;
- nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
- if (val->alloced < nlimbs)
- if (!mpi_resize(val, nlimbs))
- return -ENOMEM;
- i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
- i %= BYTES_PER_MPI_LIMB;
- j = val->nlimbs = nlimbs;
- val->sign = sign;
- for (; j > 0; j--) {
- a = 0;
- for (; i < BYTES_PER_MPI_LIMB; i++) {
- if (prepend_zero) {
- c1 = '0';
- prepend_zero = 0;
- } else
- c1 = *str++;
- assert(c1);
- c2 = *str++;
- assert(c2);
- if (c1 >= '0' && c1 <= '9')
- c = c1 - '0';
- else if (c1 >= 'a' && c1 <= 'f')
- c = c1 - 'a' + 10;
- else if (c1 >= 'A' && c1 <= 'F')
- c = c1 - 'A' + 10;
- else {
- mpi_clear(val);
- return 1;
- }
- c <<= 4;
- if (c2 >= '0' && c2 <= '9')
- c |= c2 - '0';
- else if (c2 >= 'a' && c2 <= 'f')
- c |= c2 - 'a' + 10;
- else if (c2 >= 'A' && c2 <= 'F')
- c |= c2 - 'A' + 10;
- else {
- mpi_clear(val);
- return 1;
- }
- a <<= 8;
- a |= c;
- }
- i = 0;
-
- val->d[j - 1] = a;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mpi_fromstr);
-
-/****************
* Return an allocated buffer with the MPI (msb first).
* NBYTES receives the length of this buffer. Caller must free the
* return string (This function does return a 0 byte buffer with NBYTES
diff --git a/lib/mpi/mpih-div.c b/lib/mpi/mpih-div.c
index cde1aaec18da..c57d1d46295e 100644
--- a/lib/mpi/mpih-div.c
+++ b/lib/mpi/mpih-div.c
@@ -37,159 +37,6 @@
#define UDIV_TIME UMUL_TIME
#endif
-/* FIXME: We should be using invert_limb (or invert_normalized_limb)
- * here (not udiv_qrnnd).
- */
-
-mpi_limb_t
-mpihelp_mod_1(mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb)
-{
- mpi_size_t i;
- mpi_limb_t n1, n0, r;
- int dummy;
-
- /* Botch: Should this be handled at all? Rely on callers? */
- if (!dividend_size)
- return 0;
-
- /* If multiplication is much faster than division, and the
- * dividend is large, pre-invert the divisor, and use
- * only multiplications in the inner loop.
- *
- * This test should be read:
- * Does it ever help to use udiv_qrnnd_preinv?
- * && Does what we save compensate for the inversion overhead?
- */
- if (UDIV_TIME > (2 * UMUL_TIME + 6)
- && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
- int normalization_steps;
-
- count_leading_zeros(normalization_steps, divisor_limb);
- if (normalization_steps) {
- mpi_limb_t divisor_limb_inverted;
-
- divisor_limb <<= normalization_steps;
-
- /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
- * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
- * most significant bit (with weight 2**N) implicit.
- *
- * Special case for DIVISOR_LIMB == 100...000.
- */
- if (!(divisor_limb << 1))
- divisor_limb_inverted = ~(mpi_limb_t) 0;
- else
- udiv_qrnnd(divisor_limb_inverted, dummy,
- -divisor_limb, 0, divisor_limb);
-
- n1 = dividend_ptr[dividend_size - 1];
- r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
-
- /* Possible optimization:
- * if (r == 0
- * && divisor_limb > ((n1 << normalization_steps)
- * | (dividend_ptr[dividend_size - 2] >> ...)))
- * ...one division less...
- */
- for (i = dividend_size - 2; i >= 0; i--) {
- n0 = dividend_ptr[i];
- UDIV_QRNND_PREINV(dummy, r, r,
- ((n1 << normalization_steps)
- | (n0 >>
- (BITS_PER_MPI_LIMB -
- normalization_steps))),
- divisor_limb,
- divisor_limb_inverted);
- n1 = n0;
- }
- UDIV_QRNND_PREINV(dummy, r, r,
- n1 << normalization_steps,
- divisor_limb, divisor_limb_inverted);
- return r >> normalization_steps;
- } else {
- mpi_limb_t divisor_limb_inverted;
-
- /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
- * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
- * most significant bit (with weight 2**N) implicit.
- *
- * Special case for DIVISOR_LIMB == 100...000.
- */
- if (!(divisor_limb << 1))
- divisor_limb_inverted = ~(mpi_limb_t) 0;
- else
- udiv_qrnnd(divisor_limb_inverted, dummy,
- -divisor_limb, 0, divisor_limb);
-
- i = dividend_size - 1;
- r = dividend_ptr[i];
-
- if (r >= divisor_limb)
- r = 0;
- else
- i--;
-
- for (; i >= 0; i--) {
- n0 = dividend_ptr[i];
- UDIV_QRNND_PREINV(dummy, r, r,
- n0, divisor_limb,
- divisor_limb_inverted);
- }
- return r;
- }
- } else {
- if (UDIV_NEEDS_NORMALIZATION) {
- int normalization_steps;
-
- count_leading_zeros(normalization_steps, divisor_limb);
- if (normalization_steps) {
- divisor_limb <<= normalization_steps;
-
- n1 = dividend_ptr[dividend_size - 1];
- r = n1 >> (BITS_PER_MPI_LIMB -
- normalization_steps);
-
- /* Possible optimization:
- * if (r == 0
- * && divisor_limb > ((n1 << normalization_steps)
- * | (dividend_ptr[dividend_size - 2] >> ...)))
- * ...one division less...
- */
- for (i = dividend_size - 2; i >= 0; i--) {
- n0 = dividend_ptr[i];
- udiv_qrnnd(dummy, r, r,
- ((n1 << normalization_steps)
- | (n0 >>
- (BITS_PER_MPI_LIMB -
- normalization_steps))),
- divisor_limb);
- n1 = n0;
- }
- udiv_qrnnd(dummy, r, r,
- n1 << normalization_steps,
- divisor_limb);
- return r >> normalization_steps;
- }
- }
- /* No normalization needed, either because udiv_qrnnd doesn't require
- * it, or because DIVISOR_LIMB is already normalized. */
- i = dividend_size - 1;
- r = dividend_ptr[i];
-
- if (r >= divisor_limb)
- r = 0;
- else
- i--;
-
- for (; i >= 0; i--) {
- n0 = dividend_ptr[i];
- udiv_qrnnd(dummy, r, r, n0, divisor_limb);
- }
- return r;
- }
-}
-
/* Divide num (NP/NSIZE) by den (DP/DSIZE) and write
* the NSIZE-DSIZE least significant quotient limbs at QP
* and the DSIZE long remainder at NP. If QEXTRA_LIMBS is
@@ -387,159 +234,3 @@ q_test:
return most_significant_q_limb;
}
-
-/****************
- * Divide (DIVIDEND_PTR,,DIVIDEND_SIZE) by DIVISOR_LIMB.
- * Write DIVIDEND_SIZE limbs of quotient at QUOT_PTR.
- * Return the single-limb remainder.
- * There are no constraints on the value of the divisor.
- *
- * QUOT_PTR and DIVIDEND_PTR might point to the same limb.
- */
-
-mpi_limb_t
-mpihelp_divmod_1(mpi_ptr_t quot_ptr,
- mpi_ptr_t dividend_ptr, mpi_size_t dividend_size,
- mpi_limb_t divisor_limb)
-{
- mpi_size_t i;
- mpi_limb_t n1, n0, r;
- int dummy;
-
- if (!dividend_size)
- return 0;
-
- /* If multiplication is much faster than division, and the
- * dividend is large, pre-invert the divisor, and use
- * only multiplications in the inner loop.
- *
- * This test should be read:
- * Does it ever help to use udiv_qrnnd_preinv?
- * && Does what we save compensate for the inversion overhead?
- */
- if (UDIV_TIME > (2 * UMUL_TIME + 6)
- && (UDIV_TIME - (2 * UMUL_TIME + 6)) * dividend_size > UDIV_TIME) {
- int normalization_steps;
-
- count_leading_zeros(normalization_steps, divisor_limb);
- if (normalization_steps) {
- mpi_limb_t divisor_limb_inverted;
-
- divisor_limb <<= normalization_steps;
-
- /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
- * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
- * most significant bit (with weight 2**N) implicit.
- */
- /* Special case for DIVISOR_LIMB == 100...000. */
- if (!(divisor_limb << 1))
- divisor_limb_inverted = ~(mpi_limb_t) 0;
- else
- udiv_qrnnd(divisor_limb_inverted, dummy,
- -divisor_limb, 0, divisor_limb);
-
- n1 = dividend_ptr[dividend_size - 1];
- r = n1 >> (BITS_PER_MPI_LIMB - normalization_steps);
-
- /* Possible optimization:
- * if (r == 0
- * && divisor_limb > ((n1 << normalization_steps)
- * | (dividend_ptr[dividend_size - 2] >> ...)))
- * ...one division less...
- */
- for (i = dividend_size - 2; i >= 0; i--) {
- n0 = dividend_ptr[i];
- UDIV_QRNND_PREINV(quot_ptr[i + 1], r, r,
- ((n1 << normalization_steps)
- | (n0 >>
- (BITS_PER_MPI_LIMB -
- normalization_steps))),
- divisor_limb,
- divisor_limb_inverted);
- n1 = n0;
- }
- UDIV_QRNND_PREINV(quot_ptr[0], r, r,
- n1 << normalization_steps,
- divisor_limb, divisor_limb_inverted);
- return r >> normalization_steps;
- } else {
- mpi_limb_t divisor_limb_inverted;
-
- /* Compute (2**2N - 2**N * DIVISOR_LIMB) / DIVISOR_LIMB. The
- * result is a (N+1)-bit approximation to 1/DIVISOR_LIMB, with the
- * most significant bit (with weight 2**N) implicit.
- */
- /* Special case for DIVISOR_LIMB == 100...000. */
- if (!(divisor_limb << 1))
- divisor_limb_inverted = ~(mpi_limb_t) 0;
- else
- udiv_qrnnd(divisor_limb_inverted, dummy,
- -divisor_limb, 0, divisor_limb);
-
- i = dividend_size - 1;
- r = dividend_ptr[i];
-
- if (r >= divisor_limb)
- r = 0;
- else
- quot_ptr[i--] = 0;
-
- for (; i >= 0; i--) {
- n0 = dividend_ptr[i];
- UDIV_QRNND_PREINV(quot_ptr[i], r, r,
- n0, divisor_limb,
- divisor_limb_inverted);
- }
- return r;
- }
- } else {
- if (UDIV_NEEDS_NORMALIZATION) {
- int normalization_steps;
-
- count_leading_zeros(normalization_steps, divisor_limb);
- if (normalization_steps) {
- divisor_limb <<= normalization_steps;
-
- n1 = dividend_ptr[dividend_size - 1];
- r = n1 >> (BITS_PER_MPI_LIMB -
- normalization_steps);
-
- /* Possible optimization:
- * if (r == 0
- * && divisor_limb > ((n1 << normalization_steps)
- * | (dividend_ptr[dividend_size - 2] >> ...)))
- * ...one division less...
- */
- for (i = dividend_size - 2; i >= 0; i--) {
- n0 = dividend_ptr[i];
- udiv_qrnnd(quot_ptr[i + 1], r, r,
- ((n1 << normalization_steps)
- | (n0 >>
- (BITS_PER_MPI_LIMB -
- normalization_steps))),
- divisor_limb);
- n1 = n0;
- }
- udiv_qrnnd(quot_ptr[0], r, r,
- n1 << normalization_steps,
- divisor_limb);
- return r >> normalization_steps;
- }
- }
- /* No normalization needed, either because udiv_qrnnd doesn't require
- * it, or because DIVISOR_LIMB is already normalized. */
- i = dividend_size - 1;
- r = dividend_ptr[i];
-
- if (r >= divisor_limb)
- r = 0;
- else
- quot_ptr[i--] = 0;
-
- for (; i >= 0; i--) {
- n0 = dividend_ptr[i];
- udiv_qrnnd(quot_ptr[i], r, r, n0, divisor_limb);
- }
- return r;
- }
-}
diff --git a/lib/mpi/mpih-mul.c b/lib/mpi/mpih-mul.c
index c69c5eef233b..7c841719fdfb 100644
--- a/lib/mpi/mpih-mul.c
+++ b/lib/mpi/mpih-mul.c
@@ -330,36 +330,6 @@ mpih_sqr_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t size, mpi_ptr_t tspace)
}
}
-/* This should be made into an inline function in gmp.h. */
-int mpihelp_mul_n(mpi_ptr_t prodp, mpi_ptr_t up, mpi_ptr_t vp, mpi_size_t size)
-{
- if (up == vp) {
- if (size < KARATSUBA_THRESHOLD)
- mpih_sqr_n_basecase(prodp, up, size);
- else {
- mpi_ptr_t tspace;
- tspace = mpi_alloc_limb_space(2 * size);
- if (!tspace)
- return -ENOMEM;
- mpih_sqr_n(prodp, up, size, tspace);
- mpi_free_limb_space(tspace);
- }
- } else {
- if (size < KARATSUBA_THRESHOLD)
- mul_n_basecase(prodp, up, vp, size);
- else {
- mpi_ptr_t tspace;
- tspace = mpi_alloc_limb_space(2 * size);
- if (!tspace)
- return -ENOMEM;
- mul_n(prodp, up, vp, size, tspace);
- mpi_free_limb_space(tspace);
- }
- }
-
- return 0;
-}
-
int
mpihelp_mul_karatsuba_case(mpi_ptr_t prodp,
mpi_ptr_t up, mpi_size_t usize,
diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
index 26e4ed31e256..657979f71bef 100644
--- a/lib/mpi/mpiutil.c
+++ b/lib/mpi/mpiutil.c
@@ -106,13 +106,6 @@ int mpi_resize(MPI a, unsigned nlimbs)
return 0;
}
-void mpi_clear(MPI a)
-{
- a->nlimbs = 0;
- a->nbits = 0;
- a->flags = 0;
-}
-
void mpi_free(MPI a)
{
if (!a)
@@ -128,84 +121,3 @@ void mpi_free(MPI a)
kfree(a);
}
EXPORT_SYMBOL_GPL(mpi_free);
-
-/****************
- * Note: This copy function should not interpret the MPI
- * but copy it transparently.
- */
-int mpi_copy(MPI *copied, const MPI a)
-{
- size_t i;
- MPI b;
-
- *copied = NULL;
-
- if (a) {
- b = mpi_alloc(a->nlimbs);
- if (!b)
- return -ENOMEM;
-
- b->nlimbs = a->nlimbs;
- b->sign = a->sign;
- b->flags = a->flags;
- b->nbits = a->nbits;
-
- for (i = 0; i < b->nlimbs; i++)
- b->d[i] = a->d[i];
-
- *copied = b;
- }
-
- return 0;
-}
-
-int mpi_set(MPI w, const MPI u)
-{
- mpi_ptr_t wp, up;
- mpi_size_t usize = u->nlimbs;
- int usign = u->sign;
-
- if (RESIZE_IF_NEEDED(w, (size_t) usize) < 0)
- return -ENOMEM;
-
- wp = w->d;
- up = u->d;
- MPN_COPY(wp, up, usize);
- w->nlimbs = usize;
- w->nbits = u->nbits;
- w->flags = u->flags;
- w->sign = usign;
- return 0;
-}
-
-int mpi_set_ui(MPI w, unsigned long u)
-{
- if (RESIZE_IF_NEEDED(w, 1) < 0)
- return -ENOMEM;
- w->d[0] = u;
- w->nlimbs = u ? 1 : 0;
- w->sign = 0;
- w->nbits = 0;
- w->flags = 0;
- return 0;
-}
-
-MPI mpi_alloc_set_ui(unsigned long u)
-{
- MPI w = mpi_alloc(1);
- if (!w)
- return w;
- w->d[0] = u;
- w->nlimbs = u ? 1 : 0;
- w->sign = 0;
- return w;
-}
-
-void mpi_swap(MPI a, MPI b)
-{
- struct gcry_mpi tmp;
-
- tmp = *a;
- *a = *b;
- *b = tmp;
-}
diff --git a/mm/bootmem.c b/mm/bootmem.c
index ec4fcb7a56c8..bcb63ac48cc5 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -698,7 +698,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size, unsigned long align,
unsigned long goal, unsigned long limit)
{
@@ -710,6 +710,10 @@ again:
if (ptr)
return ptr;
+ /* do not panic in alloc_bootmem_bdata() */
+ if (limit && goal + size > limit)
+ limit = 0;
+
ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
if (ptr)
return ptr;
diff --git a/mm/bounce.c b/mm/bounce.c
index d1be02ca1889..042086775561 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -24,23 +24,25 @@
static mempool_t *page_pool, *isa_page_pool;
-#ifdef CONFIG_HIGHMEM
+#if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
static __init int init_emergency_pool(void)
{
-#ifndef CONFIG_MEMORY_HOTPLUG
+#if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
if (max_pfn <= max_low_pfn)
return 0;
#endif
page_pool = mempool_create_page_pool(POOL_SIZE, 0);
BUG_ON(!page_pool);
- printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
+ printk("bounce pool size: %d pages\n", POOL_SIZE);
return 0;
}
__initcall(init_emergency_pool);
+#endif
+#ifdef CONFIG_HIGHMEM
/*
* highmem version, map in to vec
*/
diff --git a/mm/compaction.c b/mm/compaction.c
index 7ea259d82a99..2f42d9528539 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -701,8 +701,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
if (err) {
putback_lru_pages(&cc->migratepages);
cc->nr_migratepages = 0;
+ if (err == -ENOMEM) {
+ ret = COMPACT_PARTIAL;
+ goto out;
+ }
}
-
}
out:
diff --git a/mm/frontswap.c b/mm/frontswap.c
index e25025574a02..6b3e71a2cd48 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -11,15 +11,11 @@
* This work is licensed under the terms of the GNU GPL, version 2.
*/
-#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/swapops.h>
-#include <linux/proc_fs.h>
#include <linux/security.h>
-#include <linux/capability.h>
#include <linux/module.h>
-#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>
@@ -110,16 +106,21 @@ void __frontswap_init(unsigned type)
BUG_ON(sis == NULL);
if (sis->frontswap_map == NULL)
return;
- if (frontswap_enabled)
- (*frontswap_ops.init)(type);
+ frontswap_ops.init(type);
}
EXPORT_SYMBOL(__frontswap_init);
+static inline void __frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
+{
+ frontswap_clear(sis, offset);
+ atomic_dec(&sis->frontswap_pages);
+}
+
/*
* "Store" data from a page to frontswap and associate it with the page's
* swaptype and offset. Page must be locked and in the swap cache.
* If frontswap already contains a page with matching swaptype and
- * offset, the frontswap implmentation may either overwrite the data and
+ * offset, the frontswap implementation may either overwrite the data and
* return success or invalidate the page from frontswap and return failure.
*/
int __frontswap_store(struct page *page)
@@ -134,22 +135,21 @@ int __frontswap_store(struct page *page)
BUG_ON(sis == NULL);
if (frontswap_test(sis, offset))
dup = 1;
- ret = (*frontswap_ops.store)(type, offset, page);
+ ret = frontswap_ops.store(type, offset, page);
if (ret == 0) {
frontswap_set(sis, offset);
inc_frontswap_succ_stores();
if (!dup)
atomic_inc(&sis->frontswap_pages);
- } else if (dup) {
+ } else {
/*
failed dup always results in automatic invalidate of
the (older) page from frontswap
*/
- frontswap_clear(sis, offset);
- atomic_dec(&sis->frontswap_pages);
- inc_frontswap_failed_stores();
- } else
inc_frontswap_failed_stores();
+ if (dup)
+ __frontswap_clear(sis, offset);
+ }
if (frontswap_writethrough_enabled)
/* report failure so swap also writes to swap device */
ret = -1;
@@ -173,7 +173,7 @@ int __frontswap_load(struct page *page)
BUG_ON(!PageLocked(page));
BUG_ON(sis == NULL);
if (frontswap_test(sis, offset))
- ret = (*frontswap_ops.load)(type, offset, page);
+ ret = frontswap_ops.load(type, offset, page);
if (ret == 0)
inc_frontswap_loads();
return ret;
@@ -190,9 +190,8 @@ void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
BUG_ON(sis == NULL);
if (frontswap_test(sis, offset)) {
- (*frontswap_ops.invalidate_page)(type, offset);
- atomic_dec(&sis->frontswap_pages);
- frontswap_clear(sis, offset);
+ frontswap_ops.invalidate_page(type, offset);
+ __frontswap_clear(sis, offset);
inc_frontswap_invalidates();
}
}
@@ -209,67 +208,102 @@ void __frontswap_invalidate_area(unsigned type)
BUG_ON(sis == NULL);
if (sis->frontswap_map == NULL)
return;
- (*frontswap_ops.invalidate_area)(type);
+ frontswap_ops.invalidate_area(type);
atomic_set(&sis->frontswap_pages, 0);
memset(sis->frontswap_map, 0, sis->max / sizeof(long));
}
EXPORT_SYMBOL(__frontswap_invalidate_area);
-/*
- * Frontswap, like a true swap device, may unnecessarily retain pages
- * under certain circumstances; "shrink" frontswap is essentially a
- * "partial swapoff" and works by calling try_to_unuse to attempt to
- * unuse enough frontswap pages to attempt to -- subject to memory
- * constraints -- reduce the number of pages in frontswap to the
- * number given in the parameter target_pages.
- */
-void frontswap_shrink(unsigned long target_pages)
+static unsigned long __frontswap_curr_pages(void)
{
- struct swap_info_struct *si = NULL;
- int si_frontswap_pages;
- unsigned long total_pages = 0, total_pages_to_unuse;
- unsigned long pages = 0, pages_to_unuse = 0;
int type;
- bool locked = false;
+ unsigned long totalpages = 0;
+ struct swap_info_struct *si = NULL;
- /*
- * we don't want to hold swap_lock while doing a very
- * lengthy try_to_unuse, but swap_list may change
- * so restart scan from swap_list.head each time
- */
- spin_lock(&swap_lock);
- locked = true;
- total_pages = 0;
+ assert_spin_locked(&swap_lock);
for (type = swap_list.head; type >= 0; type = si->next) {
si = swap_info[type];
- total_pages += atomic_read(&si->frontswap_pages);
+ totalpages += atomic_read(&si->frontswap_pages);
}
- if (total_pages <= target_pages)
- goto out;
- total_pages_to_unuse = total_pages - target_pages;
+ return totalpages;
+}
+
+static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
+ int *swapid)
+{
+ int ret = -EINVAL;
+ struct swap_info_struct *si = NULL;
+ int si_frontswap_pages;
+ unsigned long total_pages_to_unuse = total;
+ unsigned long pages = 0, pages_to_unuse = 0;
+ int type;
+
+ assert_spin_locked(&swap_lock);
for (type = swap_list.head; type >= 0; type = si->next) {
si = swap_info[type];
si_frontswap_pages = atomic_read(&si->frontswap_pages);
- if (total_pages_to_unuse < si_frontswap_pages)
+ if (total_pages_to_unuse < si_frontswap_pages) {
pages = pages_to_unuse = total_pages_to_unuse;
- else {
+ } else {
pages = si_frontswap_pages;
pages_to_unuse = 0; /* unuse all */
}
/* ensure there is enough RAM to fetch pages from frontswap */
- if (security_vm_enough_memory_mm(current->mm, pages))
+ if (security_vm_enough_memory_mm(current->mm, pages)) {
+ ret = -ENOMEM;
continue;
+ }
vm_unacct_memory(pages);
+ *unused = pages_to_unuse;
+ *swapid = type;
+ ret = 0;
break;
}
- if (type < 0)
- goto out;
- locked = false;
+
+ return ret;
+}
+
+static int __frontswap_shrink(unsigned long target_pages,
+ unsigned long *pages_to_unuse,
+ int *type)
+{
+ unsigned long total_pages = 0, total_pages_to_unuse;
+
+ assert_spin_locked(&swap_lock);
+
+ total_pages = __frontswap_curr_pages();
+ if (total_pages <= target_pages) {
+ /* Nothing to do */
+ *pages_to_unuse = 0;
+ return 0;
+ }
+ total_pages_to_unuse = total_pages - target_pages;
+ return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
+}
+
+/*
+ * Frontswap, like a true swap device, may unnecessarily retain pages
+ * under certain circumstances; "shrink" frontswap is essentially a
+ * "partial swapoff" and works by calling try_to_unuse to attempt to
+ * unuse enough frontswap pages to attempt to -- subject to memory
+ * constraints -- reduce the number of pages in frontswap to the
+ * number given in the parameter target_pages.
+ */
+void frontswap_shrink(unsigned long target_pages)
+{
+ unsigned long pages_to_unuse = 0;
+ int type, ret;
+
+ /*
+ * we don't want to hold swap_lock while doing a very
+ * lengthy try_to_unuse, but swap_list may change
+ * so restart scan from swap_list.head each time
+ */
+ spin_lock(&swap_lock);
+ ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
spin_unlock(&swap_lock);
- try_to_unuse(type, true, pages_to_unuse);
-out:
- if (locked)
- spin_unlock(&swap_lock);
+ if (ret == 0 && pages_to_unuse)
+ try_to_unuse(type, true, pages_to_unuse);
return;
}
EXPORT_SYMBOL(frontswap_shrink);
@@ -281,16 +315,12 @@ EXPORT_SYMBOL(frontswap_shrink);
*/
unsigned long frontswap_curr_pages(void)
{
- int type;
unsigned long totalpages = 0;
- struct swap_info_struct *si = NULL;
spin_lock(&swap_lock);
- for (type = swap_list.head; type >= 0; type = si->next) {
- si = swap_info[type];
- totalpages += atomic_read(&si->frontswap_pages);
- }
+ totalpages = __frontswap_curr_pages();
spin_unlock(&swap_lock);
+
return totalpages;
}
EXPORT_SYMBOL(frontswap_curr_pages);
diff --git a/mm/madvise.c b/mm/madvise.c
index deff1b64a08c..14d260fa0d17 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -15,6 +15,7 @@
#include <linux/sched.h>
#include <linux/ksm.h>
#include <linux/fs.h>
+#include <linux/file.h>
/*
* Any behaviour which results in changes to the vma->vm_flags needs to
@@ -204,14 +205,16 @@ static long madvise_remove(struct vm_area_struct *vma,
{
loff_t offset;
int error;
+ struct file *f;
*prev = NULL; /* tell sys_madvise we drop mmap_sem */
if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
return -EINVAL;
- if (!vma->vm_file || !vma->vm_file->f_mapping
- || !vma->vm_file->f_mapping->host) {
+ f = vma->vm_file;
+
+ if (!f || !f->f_mapping || !f->f_mapping->host) {
return -EINVAL;
}
@@ -221,11 +224,18 @@ static long madvise_remove(struct vm_area_struct *vma,
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
- /* filesystem's fallocate may need to take i_mutex */
+ /*
+ * Filesystem's fallocate may need to take i_mutex. We need to
+ * explicitly grab a reference because the vma (and hence the
+ * vma's reference to the file) can go away as soon as we drop
+ * mmap_sem.
+ */
+ get_file(f);
up_read(&current->mm->mmap_sem);
- error = do_fallocate(vma->vm_file,
+ error = do_fallocate(f,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
offset, end - start);
+ fput(f);
down_read(&current->mm->mmap_sem);
return error;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index d4382095f8bd..5cc6731b00cc 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
MAX_NUMNODES);
}
-/*
- * Free memblock.reserved.regions
- */
-int __init_memblock memblock_free_reserved_regions(void)
-{
- if (memblock.reserved.regions == memblock_reserved_init_regions)
- return 0;
-
- return memblock_free(__pa(memblock.reserved.regions),
- sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
-/*
- * Reserve memblock.reserved.regions
- */
-int __init_memblock memblock_reserve_reserved_regions(void)
-{
- if (memblock.reserved.regions == memblock_reserved_init_regions)
- return 0;
-
- return memblock_reserve(__pa(memblock.reserved.regions),
- sizeof(struct memblock_region) * memblock.reserved.max);
-}
-
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
{
type->total_size -= type->regions[r].size;
@@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
}
}
+phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
+ phys_addr_t *addr)
+{
+ if (memblock.reserved.regions == memblock_reserved_init_regions)
+ return 0;
+
+ *addr = __pa(memblock.reserved.regions);
+
+ return PAGE_ALIGN(sizeof(struct memblock_region) *
+ memblock.reserved.max);
+}
+
/**
* memblock_double_array - double the size of the memblock regions array
* @type: memblock type of the regions array being doubled
@@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
phys_addr_t new_area_size)
{
struct memblock_region *new_array, *old_array;
+ phys_addr_t old_alloc_size, new_alloc_size;
phys_addr_t old_size, new_size, addr;
int use_slab = slab_is_available();
int *in_slab;
@@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
/* Calculate new doubled size */
old_size = type->max * sizeof(struct memblock_region);
new_size = old_size << 1;
+ /*
+ * We need to allocated new one align to PAGE_SIZE,
+ * so we can free them completely later.
+ */
+ old_alloc_size = PAGE_ALIGN(old_size);
+ new_alloc_size = PAGE_ALIGN(new_size);
/* Retrieve the slab flag */
if (type == &memblock.memory)
@@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
addr = memblock_find_in_range(new_area_start + new_area_size,
memblock.current_limit,
- new_size, sizeof(phys_addr_t));
+ new_alloc_size, PAGE_SIZE);
if (!addr && new_area_size)
addr = memblock_find_in_range(0,
min(new_area_start, memblock.current_limit),
- new_size, sizeof(phys_addr_t));
+ new_alloc_size, PAGE_SIZE);
new_array = addr ? __va(addr) : 0;
}
@@ -279,13 +274,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
kfree(old_array);
else if (old_array != memblock_memory_init_regions &&
old_array != memblock_reserved_init_regions)
- memblock_free(__pa(old_array), old_size);
+ memblock_free(__pa(old_array), old_alloc_size);
/* Reserve the new array if that comes from the memblock.
* Otherwise, we needn't do it
*/
if (!use_slab)
- BUG_ON(memblock_reserve(addr, new_size));
+ BUG_ON(memblock_reserve(addr, new_alloc_size));
/* Update slab flag */
*in_slab = use_slab;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ab1e7145e290..de4ce7058450 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
* Also when FAIL is set do a force kill because something went
* wrong earlier.
*/
-static void kill_procs(struct list_head *to_kill, int doit, int trapno,
+static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
int fail, struct page *page, unsigned long pfn,
int flags)
{
struct to_kill *tk, *next;
list_for_each_entry_safe (tk, next, to_kill, nd) {
- if (doit) {
+ if (forcekill) {
/*
* In case something went wrong with munmapping
* make sure the process doesn't catch the
@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
struct address_space *mapping;
LIST_HEAD(tokill);
int ret;
- int kill = 1;
+ int kill = 1, forcekill;
struct page *hpage = compound_head(p);
struct page *ppage;
@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* be called inside page lock (it's recommended but not enforced).
*/
mapping = page_mapping(hpage);
- if (!PageDirty(hpage) && mapping &&
+ if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
mapping_cap_writeback_dirty(mapping)) {
if (page_mkclean(hpage)) {
SetPageDirty(hpage);
@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* Now that the dirty bit has been propagated to the
* struct page and all unmaps done we can decide if
* killing is needed or not. Only kill when the page
- * was dirty, otherwise the tokill list is merely
+ * was dirty or the process is not restartable,
+ * otherwise the tokill list is merely
* freed. When there was a problem unmapping earlier
* use a more force-full uncatchable kill to prevent
* any accesses to the poisoned memory.
*/
- kill_procs(&tokill, !!PageDirty(ppage), trapno,
+ forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
+ kill_procs(&tokill, forcekill, trapno,
ret != SWAP_SUCCESS, p, pfn, flags);
return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0d7e3ec8e0f3..427bb291dd0f 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -618,7 +618,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
pgdat = hotadd_new_pgdat(nid, start);
ret = -ENOMEM;
if (!pgdat)
- goto out;
+ goto error;
new_pgdat = 1;
}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index d23415c001bc..405573010f99 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -105,27 +105,35 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
__free_pages_bootmem(pfn_to_page(i), 0);
}
+static unsigned long __init __free_memory_core(phys_addr_t start,
+ phys_addr_t end)
+{
+ unsigned long start_pfn = PFN_UP(start);
+ unsigned long end_pfn = min_t(unsigned long,
+ PFN_DOWN(end), max_low_pfn);
+
+ if (start_pfn > end_pfn)
+ return 0;
+
+ __free_pages_memory(start_pfn, end_pfn);
+
+ return end_pfn - start_pfn;
+}
+
unsigned long __init free_low_memory_core_early(int nodeid)
{
unsigned long count = 0;
- phys_addr_t start, end;
+ phys_addr_t start, end, size;
u64 i;
- /* free reserved array temporarily so that it's treated as free area */
- memblock_free_reserved_regions();
-
- for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
- unsigned long start_pfn = PFN_UP(start);
- unsigned long end_pfn = min_t(unsigned long,
- PFN_DOWN(end), max_low_pfn);
- if (start_pfn < end_pfn) {
- __free_pages_memory(start_pfn, end_pfn);
- count += end_pfn - start_pfn;
- }
- }
+ for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL)
+ count += __free_memory_core(start, end);
+
+ /* free range that is used for reserved array if we allocate it */
+ size = get_allocated_memblock_reserved_regions_info(&start);
+ if (size)
+ count += __free_memory_core(start, start + size);
- /* put region array back? */
- memblock_reserve_reserved_regions();
return count;
}
@@ -274,7 +282,7 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
return ___alloc_bootmem(size, align, goal, limit);
}
-static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
+void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
unsigned long size,
unsigned long align,
unsigned long goal,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 44030096da63..4a4f9219683f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5635,7 +5635,12 @@ static struct page *
__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
int **resultp)
{
- return alloc_page(GFP_HIGHUSER_MOVABLE);
+ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
+
+ if (PageHighMem(page))
+ gfp_mask |= __GFP_HIGHMEM;
+
+ return alloc_page(gfp_mask);
}
/* [start, end) must belong to a single zone. */
diff --git a/mm/shmem.c b/mm/shmem.c
index a15a466d0d1d..c15b998e5a86 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -264,46 +264,55 @@ static int shmem_radix_tree_replace(struct address_space *mapping,
}
/*
+ * Sometimes, before we decide whether to proceed or to fail, we must check
+ * that an entry was not already brought back from swap by a racing thread.
+ *
+ * Checking page is not enough: by the time a SwapCache page is locked, it
+ * might be reused, and again be SwapCache, using the same swap as before.
+ */
+static bool shmem_confirm_swap(struct address_space *mapping,
+ pgoff_t index, swp_entry_t swap)
+{
+ void *item;
+
+ rcu_read_lock();
+ item = radix_tree_lookup(&mapping->page_tree, index);
+ rcu_read_unlock();
+ return item == swp_to_radix_entry(swap);
+}
+
+/*
* Like add_to_page_cache_locked, but error if expected item has gone.
*/
static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected)
{
- int error = 0;
+ int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page));
+ page_cache_get(page);
+ page->mapping = mapping;
+ page->index = index;
+
+ spin_lock_irq(&mapping->tree_lock);
if (!expected)
- error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+ error = radix_tree_insert(&mapping->page_tree, index, page);
+ else
+ error = shmem_radix_tree_replace(mapping, index, expected,
+ page);
if (!error) {
- page_cache_get(page);
- page->mapping = mapping;
- page->index = index;
-
- spin_lock_irq(&mapping->tree_lock);
- if (!expected)
- error = radix_tree_insert(&mapping->page_tree,
- index, page);
- else
- error = shmem_radix_tree_replace(mapping, index,
- expected, page);
- if (!error) {
- mapping->nrpages++;
- __inc_zone_page_state(page, NR_FILE_PAGES);
- __inc_zone_page_state(page, NR_SHMEM);
- spin_unlock_irq(&mapping->tree_lock);
- } else {
- page->mapping = NULL;
- spin_unlock_irq(&mapping->tree_lock);
- page_cache_release(page);
- }
- if (!expected)
- radix_tree_preload_end();
+ mapping->nrpages++;
+ __inc_zone_page_state(page, NR_FILE_PAGES);
+ __inc_zone_page_state(page, NR_SHMEM);
+ spin_unlock_irq(&mapping->tree_lock);
+ } else {
+ page->mapping = NULL;
+ spin_unlock_irq(&mapping->tree_lock);
+ page_cache_release(page);
}
- if (error)
- mem_cgroup_uncharge_cache_page(page);
return error;
}
@@ -1124,9 +1133,9 @@ repeat:
/* We have to do this with page locked to prevent races */
lock_page(page);
if (!PageSwapCache(page) || page_private(page) != swap.val ||
- page->mapping) {
+ !shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST; /* try again */
- goto failed;
+ goto unlock;
}
if (!PageUptodate(page)) {
error = -EIO;
@@ -1142,9 +1151,12 @@ repeat:
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
- if (!error)
+ if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, swp_to_radix_entry(swap));
+ /* We already confirmed swap, and make no allocation */
+ VM_BUG_ON(error);
+ }
if (error)
goto failed;
@@ -1181,11 +1193,18 @@ repeat:
__set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
- if (!error)
- error = shmem_add_to_page_cache(page, mapping, index,
- gfp, NULL);
if (error)
goto decused;
+ error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
+ if (!error) {
+ error = shmem_add_to_page_cache(page, mapping, index,
+ gfp, NULL);
+ radix_tree_preload_end();
+ }
+ if (error) {
+ mem_cgroup_uncharge_cache_page(page);
+ goto decused;
+ }
lru_cache_add_anon(page);
spin_lock(&info->lock);
@@ -1245,14 +1264,10 @@ decused:
unacct:
shmem_unacct_blocks(info->flags, 1);
failed:
- if (swap.val && error != -EINVAL) {
- struct page *test = find_get_page(mapping, index);
- if (test && !radix_tree_exceptional_entry(test))
- page_cache_release(test);
- /* Have another try if the entry has changed */
- if (test != swp_to_radix_entry(swap))
- error = -EEXIST;
- }
+ if (swap.val && error != -EINVAL &&
+ !shmem_confirm_swap(mapping, index, swap))
+ error = -EEXIST;
+unlock:
if (page) {
unlock_page(page);
page_cache_release(page);
@@ -1264,7 +1279,7 @@ failed:
spin_unlock(&info->lock);
goto repeat;
}
- if (error == -EEXIST)
+ if (error == -EEXIST) /* from above or from radix_tree_insert */
goto repeat;
return error;
}
@@ -1594,6 +1609,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
+ .nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &page_cache_pipe_buf_ops,
.spd_release = spd_release_page,
@@ -1682,7 +1698,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
- splice_shrink_spd(pipe, &spd);
+ splice_shrink_spd(&spd);
if (error > 0) {
*ppos += error;
@@ -1691,98 +1707,6 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
return error;
}
-/*
- * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
- */
-static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
- pgoff_t index, pgoff_t end, int origin)
-{
- struct page *page;
- struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
- bool done = false;
- int i;
-
- pagevec_init(&pvec, 0);
- pvec.nr = 1; /* start small: we may be there already */
- while (!done) {
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- pvec.nr, pvec.pages, indices);
- if (!pvec.nr) {
- if (origin == SEEK_DATA)
- index = end;
- break;
- }
- for (i = 0; i < pvec.nr; i++, index++) {
- if (index < indices[i]) {
- if (origin == SEEK_HOLE) {
- done = true;
- break;
- }
- index = indices[i];
- }
- page = pvec.pages[i];
- if (page && !radix_tree_exceptional_entry(page)) {
- if (!PageUptodate(page))
- page = NULL;
- }
- if (index >= end ||
- (page && origin == SEEK_DATA) ||
- (!page && origin == SEEK_HOLE)) {
- done = true;
- break;
- }
- }
- shmem_deswap_pagevec(&pvec);
- pagevec_release(&pvec);
- pvec.nr = PAGEVEC_SIZE;
- cond_resched();
- }
- return index;
-}
-
-static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
-{
- struct address_space *mapping;
- struct inode *inode;
- pgoff_t start, end;
- loff_t new_offset;
-
- if (origin != SEEK_DATA && origin != SEEK_HOLE)
- return generic_file_llseek_size(file, offset, origin,
- MAX_LFS_FILESIZE);
- mapping = file->f_mapping;
- inode = mapping->host;
- mutex_lock(&inode->i_mutex);
- /* We're holding i_mutex so we can access i_size directly */
-
- if (offset < 0)
- offset = -EINVAL;
- else if (offset >= inode->i_size)
- offset = -ENXIO;
- else {
- start = offset >> PAGE_CACHE_SHIFT;
- end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- new_offset = shmem_seek_hole_data(mapping, start, end, origin);
- new_offset <<= PAGE_CACHE_SHIFT;
- if (new_offset > offset) {
- if (new_offset < inode->i_size)
- offset = new_offset;
- else if (origin == SEEK_DATA)
- offset = -ENXIO;
- else
- offset = inode->i_size;
- }
- }
-
- if (offset >= 0 && offset != file->f_pos) {
- file->f_pos = offset;
- file->f_version = 0;
- }
- mutex_unlock(&inode->i_mutex);
- return offset;
-}
-
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
@@ -1953,7 +1877,7 @@ static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
}
static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
- struct nameidata *nd)
+ bool excl)
{
return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
}
@@ -2786,7 +2710,7 @@ static const struct address_space_operations shmem_aops = {
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
- .llseek = shmem_file_llseek,
+ .llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
diff --git a/mm/sparse.c b/mm/sparse.c
index 6a4bf9160e85..c7bb952400c8 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -275,8 +275,9 @@ static unsigned long * __init
sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
unsigned long size)
{
- pg_data_t *host_pgdat;
- unsigned long goal;
+ unsigned long goal, limit;
+ unsigned long *p;
+ int nid;
/*
* A page may contain usemaps for other sections preventing the
* page being freed and making a section unremovable while
@@ -287,10 +288,17 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
* from the same section as the pgdat where possible to avoid
* this problem.
*/
- goal = __pa(pgdat) & PAGE_SECTION_MASK;
- host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
- return __alloc_bootmem_node_nopanic(host_pgdat, size,
- SMP_CACHE_BYTES, goal);
+ goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
+ limit = goal + (1UL << PA_SECTION_SHIFT);
+ nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
+again:
+ p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size,
+ SMP_CACHE_BYTES, goal, limit);
+ if (!p && limit) {
+ limit = 0;
+ goto again;
+ }
+ return p;
}
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index eeb3bc9d1d36..347b3ff2a478 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1567,7 +1567,8 @@ static int vmscan_swappiness(struct scan_control *sc)
* by looking at the fraction of the pages scanned we did rotate back
* onto the active list instead of evict.
*
- * nr[0] = anon pages to scan; nr[1] = file pages to scan
+ * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
+ * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
*/
static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
unsigned long *nr)
@@ -2537,7 +2538,7 @@ loop_again:
* consider it to be no longer congested. It's
* possible there are dirty pages backed by
* congested BDIs but as pressure is relieved,
- * spectulatively avoid congestion waits
+ * speculatively avoid congestion waits
*/
zone_clear_flag(zone, ZONE_CONGESTED);
if (i <= *classzone_idx)
@@ -2688,7 +2689,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
* them before going back to sleep.
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- schedule();
+
+ if (!kthread_should_stop())
+ schedule();
+
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
} else {
if (remaining)
@@ -2955,14 +2959,17 @@ int kswapd_run(int nid)
}
/*
- * Called by memory hotplug when all memory in a node is offlined.
+ * Called by memory hotplug when all memory in a node is offlined. Caller must
+ * hold lock_memory_hotplug().
*/
void kswapd_stop(int nid)
{
struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
- if (kswapd)
+ if (kswapd) {
kthread_stop(kswapd);
+ NODE_DATA(nid)->kswapd = NULL;
+ }
}
static int __init kswapd_init(void)
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 6089f0cf23b4..9096bcb08132 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -403,6 +403,9 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break;
case NETDEV_DOWN:
+ if (dev->features & NETIF_F_HW_VLAN_FILTER)
+ vlan_vid_del(dev, 0);
+
/* Put all VLANs for this dev in the down state too. */
for (i = 0; i < VLAN_N_VID; i++) {
vlandev = vlan_group_get_device(grp, i);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index da1bc9c3cf38..73a2a83ee2da 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -681,10 +681,7 @@ static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *n
if (!netpoll)
goto out;
- netpoll->dev = real_dev;
- strlcpy(netpoll->dev_name, real_dev->name, IFNAMSIZ);
-
- err = __netpoll_setup(netpoll);
+ err = __netpoll_setup(netpoll, real_dev);
if (err) {
kfree(netpoll);
goto out;
diff --git a/net/9p/client.c b/net/9p/client.c
index a170893d70e0..8260f132b32e 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1548,7 +1548,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
kernel_buf = 1;
indata = data;
} else
- indata = (char *)udata;
+ indata = (__force char *)udata;
/*
* response header len is 11
* PDU Header(7) + IO Size (4)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2a167658bb95..35b8911b1c8e 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -212,7 +212,7 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
* this takes a list of pages.
* @sg: scatter/gather list to pack into
* @start: which segment of the sg_list to start at
- * @**pdata: a list of pages to add into sg.
+ * @pdata: a list of pages to add into sg.
* @nr_pages: number of pages to pack into the scatter/gather list
* @data: data to pack into scatter/gather list
* @count: amount of data to pack into the scatter/gather list
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 86852963b7f7..33475291c9c1 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -129,8 +129,8 @@ found:
/**
* atalk_find_or_insert_socket - Try to find a socket matching ADDR
- * @sk - socket to insert in the list if it is not there already
- * @sat - address to search for
+ * @sk: socket to insert in the list if it is not there already
+ * @sat: address to search for
*
* Try to find a socket matching ADDR in the socket list, if found then return
* it. If not, insert SK into the socket list.
@@ -1066,8 +1066,8 @@ static int atalk_release(struct socket *sock)
/**
* atalk_pick_and_bind_port - Pick a source port when one is not given
- * @sk - socket to insert into the tables
- * @sat - address to search for
+ * @sk: socket to insert into the tables
+ * @sat: address to search for
*
* Pick a source port when one is not given. If we can find a suitable free
* one, we insert the socket into the tables using it.
diff --git a/net/atm/lec.c b/net/atm/lec.c
index a7d172105c99..2e3d942e77f1 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -231,9 +231,11 @@ static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
if (skb_headroom(skb) < 2) {
pr_debug("reallocating skb\n");
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
- kfree_skb(skb);
- if (skb2 == NULL)
+ if (unlikely(!skb2)) {
+ kfree_skb(skb);
return NETDEV_TX_OK;
+ }
+ consume_skb(skb);
skb = skb2;
}
skb_push(skb, 2);
@@ -1602,7 +1604,7 @@ static void lec_arp_expire_vcc(unsigned long data)
{
unsigned long flags;
struct lec_arp_table *to_remove = (struct lec_arp_table *)data;
- struct lec_priv *priv = (struct lec_priv *)to_remove->priv;
+ struct lec_priv *priv = to_remove->priv;
del_timer(&to_remove->timer);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index ce1e59fdae7b..226dca989448 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -283,7 +283,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
kfree_skb(n);
goto nospace;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = n;
if (skb == NULL)
return DROP_PACKET;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 051f7abae66d..779095ded689 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -842,6 +842,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
case AX25_P_NETROM:
if (ax25_protocol_is_registered(AX25_P_NETROM))
return -ESOCKTNOSUPPORT;
+ break;
#endif
#ifdef CONFIG_ROSE_MODULE
case AX25_P_ROSE:
diff --git a/net/ax25/ax25_addr.c b/net/ax25/ax25_addr.c
index 9162409559cf..e7c9b0ea17a1 100644
--- a/net/ax25/ax25_addr.c
+++ b/net/ax25/ax25_addr.c
@@ -189,8 +189,10 @@ const unsigned char *ax25_addr_parse(const unsigned char *buf, int len,
digi->ndigi = 0;
while (!(buf[-1] & AX25_EBIT)) {
- if (d >= AX25_MAX_DIGIS) return NULL; /* Max of 6 digis */
- if (len < 7) return NULL; /* Short packet */
+ if (d >= AX25_MAX_DIGIS)
+ return NULL;
+ if (len < AX25_ADDR_LEN)
+ return NULL;
memcpy(&digi->calls[d], buf, AX25_ADDR_LEN);
digi->ndigi = d + 1;
diff --git a/net/ax25/ax25_out.c b/net/ax25/ax25_out.c
index be8a25e0db65..be2acab9be9d 100644
--- a/net/ax25/ax25_out.c
+++ b/net/ax25/ax25_out.c
@@ -350,7 +350,7 @@ void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skbn;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a65588040b9e..d39097737e38 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -474,7 +474,7 @@ struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
if (skb->sk != NULL)
skb_set_owner_w(skbn, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skbn;
}
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 6d5c1940667d..8676d2b1d574 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -19,11 +19,10 @@
#
obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
-batman-adv-y += bat_debugfs.o
batman-adv-y += bat_iv_ogm.o
-batman-adv-y += bat_sysfs.o
batman-adv-y += bitarray.o
batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
+batman-adv-y += debugfs.o
batman-adv-y += gateway_client.o
batman-adv-y += gateway_common.o
batman-adv-y += hard-interface.o
@@ -35,6 +34,7 @@ batman-adv-y += ring_buffer.o
batman-adv-y += routing.o
batman-adv-y += send.o
batman-adv-y += soft-interface.o
+batman-adv-y += sysfs.o
batman-adv-y += translation-table.o
batman-adv-y += unicast.o
batman-adv-y += vis.o
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 9852a688ba43..a0ba3bff9b36 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,12 +15,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BAT_ALGO_H_
#define _NET_BATMAN_ADV_BAT_ALGO_H_
-int bat_iv_init(void);
+int batadv_iv_init(void);
#endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
deleted file mode 100644
index 3b588f86d770..000000000000
--- a/net/batman-adv/bat_debugfs.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-
-#include <linux/debugfs.h>
-
-#include "bat_debugfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "soft-interface.h"
-#include "vis.h"
-#include "icmp_socket.h"
-#include "bridge_loop_avoidance.h"
-
-static struct dentry *bat_debugfs;
-
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-#define LOG_BUFF_MASK (log_buff_len-1)
-#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
-
-static int log_buff_len = LOG_BUF_LEN;
-
-static void emit_log_char(struct debug_log *debug_log, char c)
-{
- LOG_BUFF(debug_log->log_end) = c;
- debug_log->log_end++;
-
- if (debug_log->log_end - debug_log->log_start > log_buff_len)
- debug_log->log_start = debug_log->log_end - log_buff_len;
-}
-
-__printf(2, 3)
-static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
-{
- va_list args;
- static char debug_log_buf[256];
- char *p;
-
- if (!debug_log)
- return 0;
-
- spin_lock_bh(&debug_log->lock);
- va_start(args, fmt);
- vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
- va_end(args);
-
- for (p = debug_log_buf; *p != 0; p++)
- emit_log_char(debug_log, *p);
-
- spin_unlock_bh(&debug_log->lock);
-
- wake_up(&debug_log->queue_wait);
-
- return 0;
-}
-
-int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
-{
- va_list args;
- char tmp_log_buf[256];
-
- va_start(args, fmt);
- vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
- fdebug_log(bat_priv->debug_log, "[%10u] %s",
- jiffies_to_msecs(jiffies), tmp_log_buf);
- va_end(args);
-
- return 0;
-}
-
-static int log_open(struct inode *inode, struct file *file)
-{
- nonseekable_open(inode, file);
- file->private_data = inode->i_private;
- inc_module_count();
- return 0;
-}
-
-static int log_release(struct inode *inode, struct file *file)
-{
- dec_module_count();
- return 0;
-}
-
-static ssize_t log_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
- int error, i = 0;
- char c;
-
- if ((file->f_flags & O_NONBLOCK) &&
- !(debug_log->log_end - debug_log->log_start))
- return -EAGAIN;
-
- if (!buf)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
-
- error = wait_event_interruptible(debug_log->queue_wait,
- (debug_log->log_start - debug_log->log_end));
-
- if (error)
- return error;
-
- spin_lock_bh(&debug_log->lock);
-
- while ((!error) && (i < count) &&
- (debug_log->log_start != debug_log->log_end)) {
- c = LOG_BUFF(debug_log->log_start);
-
- debug_log->log_start++;
-
- spin_unlock_bh(&debug_log->lock);
-
- error = __put_user(c, buf);
-
- spin_lock_bh(&debug_log->lock);
-
- buf++;
- i++;
-
- }
-
- spin_unlock_bh(&debug_log->lock);
-
- if (!error)
- return i;
-
- return error;
-}
-
-static unsigned int log_poll(struct file *file, poll_table *wait)
-{
- struct bat_priv *bat_priv = file->private_data;
- struct debug_log *debug_log = bat_priv->debug_log;
-
- poll_wait(file, &debug_log->queue_wait, wait);
-
- if (debug_log->log_end - debug_log->log_start)
- return POLLIN | POLLRDNORM;
-
- return 0;
-}
-
-static const struct file_operations log_fops = {
- .open = log_open,
- .release = log_release,
- .read = log_read,
- .poll = log_poll,
- .llseek = no_llseek,
-};
-
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- struct dentry *d;
-
- if (!bat_priv->debug_dir)
- goto err;
-
- bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
- if (!bat_priv->debug_log)
- goto err;
-
- spin_lock_init(&bat_priv->debug_log->lock);
- init_waitqueue_head(&bat_priv->debug_log->queue_wait);
-
- d = debugfs_create_file("log", S_IFREG | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &log_fops);
- if (d)
- goto err;
-
- return 0;
-
-err:
- return 1;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- kfree(bat_priv->debug_log);
- bat_priv->debug_log = NULL;
-}
-#else /* CONFIG_BATMAN_ADV_DEBUG */
-static int debug_log_setup(struct bat_priv *bat_priv)
-{
- bat_priv->debug_log = NULL;
- return 0;
-}
-
-static void debug_log_cleanup(struct bat_priv *bat_priv)
-{
- return;
-}
-#endif
-
-static int bat_algorithms_open(struct inode *inode, struct file *file)
-{
- return single_open(file, bat_algo_seq_print_text, NULL);
-}
-
-static int originators_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, orig_seq_print_text, net_dev);
-}
-
-static int gateways_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, gw_client_seq_print_text, net_dev);
-}
-
-static int transtable_global_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, tt_global_seq_print_text, net_dev);
-}
-
-#ifdef CONFIG_BATMAN_ADV_BLA
-static int bla_claim_table_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, bla_claim_table_seq_print_text, net_dev);
-}
-#endif
-
-static int transtable_local_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, tt_local_seq_print_text, net_dev);
-}
-
-static int vis_data_open(struct inode *inode, struct file *file)
-{
- struct net_device *net_dev = (struct net_device *)inode->i_private;
- return single_open(file, vis_seq_print_text, net_dev);
-}
-
-struct bat_debuginfo {
- struct attribute attr;
- const struct file_operations fops;
-};
-
-#define BAT_DEBUGINFO(_name, _mode, _open) \
-struct bat_debuginfo bat_debuginfo_##_name = { \
- .attr = { .name = __stringify(_name), \
- .mode = _mode, }, \
- .fops = { .owner = THIS_MODULE, \
- .open = _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
- } \
-};
-
-static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
-static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
-static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
-static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
-#ifdef CONFIG_BATMAN_ADV_BLA
-static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
-#endif
-static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
-static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
-
-static struct bat_debuginfo *mesh_debuginfos[] = {
- &bat_debuginfo_originators,
- &bat_debuginfo_gateways,
- &bat_debuginfo_transtable_global,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &bat_debuginfo_bla_claim_table,
-#endif
- &bat_debuginfo_transtable_local,
- &bat_debuginfo_vis_data,
- NULL,
-};
-
-void debugfs_init(void)
-{
- struct bat_debuginfo *bat_debug;
- struct dentry *file;
-
- bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
- if (bat_debugfs == ERR_PTR(-ENODEV))
- bat_debugfs = NULL;
-
- if (!bat_debugfs)
- goto out;
-
- bat_debug = &bat_debuginfo_routing_algos;
- file = debugfs_create_file(bat_debug->attr.name,
- S_IFREG | bat_debug->attr.mode,
- bat_debugfs, NULL, &bat_debug->fops);
- if (!file)
- pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
-
-out:
- return;
-}
-
-void debugfs_destroy(void)
-{
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_debugfs);
- bat_debugfs = NULL;
- }
-}
-
-int debugfs_add_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_debuginfo **bat_debug;
- struct dentry *file;
-
- if (!bat_debugfs)
- goto out;
-
- bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
- if (!bat_priv->debug_dir)
- goto out;
-
- bat_socket_setup(bat_priv);
- debug_log_setup(bat_priv);
-
- for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
- file = debugfs_create_file(((*bat_debug)->attr).name,
- S_IFREG | ((*bat_debug)->attr).mode,
- bat_priv->debug_dir,
- dev, &(*bat_debug)->fops);
- if (!file) {
- bat_err(dev, "Can't add debugfs file: %s/%s\n",
- dev->name, ((*bat_debug)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-rem_attr:
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
-out:
-#ifdef CONFIG_DEBUG_FS
- return -ENOMEM;
-#else
- return 0;
-#endif /* CONFIG_DEBUG_FS */
-}
-
-void debugfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
-
- debug_log_cleanup(bat_priv);
-
- if (bat_debugfs) {
- debugfs_remove_recursive(bat_priv->debug_dir);
- bat_priv->debug_dir = NULL;
- }
-}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index dc53798ebb47..e877af8bdd1e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -30,15 +28,16 @@
#include "send.h"
#include "bat_algo.h"
-static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- struct orig_node *orig_node,
- struct orig_node *orig_neigh,
- uint32_t seqno)
+static struct batadv_neigh_node *
+batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr,
+ struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh, __be32 seqno)
{
- struct neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node;
- neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
+ neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr,
+ ntohl(seqno));
if (!neigh_node)
goto out;
@@ -55,30 +54,30 @@ out:
return neigh_node;
}
-static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
+static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
uint32_t random_seqno;
- int res = -1;
+ int res = -ENOMEM;
/* randomize initial seqno to avoid collision */
get_random_bytes(&random_seqno, sizeof(random_seqno));
atomic_set(&hard_iface->seqno, random_seqno);
- hard_iface->packet_len = BATMAN_OGM_HLEN;
+ hard_iface->packet_len = BATADV_OGM_HLEN;
hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
if (!hard_iface->packet_buff)
goto out;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->header.packet_type = BAT_IV_OGM;
- batman_ogm_packet->header.version = COMPAT_VERSION;
- batman_ogm_packet->header.ttl = 2;
- batman_ogm_packet->flags = NO_FLAGS;
- batman_ogm_packet->tq = TQ_MAX_VALUE;
- batman_ogm_packet->tt_num_changes = 0;
- batman_ogm_packet->ttvn = 0;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->header.ttl = 2;
+ batadv_ogm_packet->flags = BATADV_NO_FLAGS;
+ batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
+ batadv_ogm_packet->tt_num_changes = 0;
+ batadv_ogm_packet->ttvn = 0;
res = 0;
@@ -86,133 +85,152 @@ out:
return res;
}
-static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
+static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
{
kfree(hard_iface->packet_buff);
hard_iface->packet_buff = NULL;
}
-static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
+static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- memcpy(batman_ogm_packet->orig,
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ memcpy(batadv_ogm_packet->orig,
hard_iface->net_dev->dev_addr, ETH_ALEN);
- memcpy(batman_ogm_packet->prev_sender,
+ memcpy(batadv_ogm_packet->prev_sender,
hard_iface->net_dev->dev_addr, ETH_ALEN);
}
-static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
+static void
+batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
- batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
- batman_ogm_packet->header.ttl = TTL;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
+ batadv_ogm_packet->header.ttl = BATADV_TTL;
}
/* when do we schedule our own ogm to be sent */
-static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
+static unsigned long
+batadv_iv_ogm_emit_send_time(const struct batadv_priv *bat_priv)
{
- return jiffies + msecs_to_jiffies(
- atomic_read(&bat_priv->orig_interval) -
- JITTER + (random32() % 2*JITTER));
+ unsigned int msecs;
+
+ msecs = atomic_read(&bat_priv->orig_interval) - BATADV_JITTER;
+ msecs += (random32() % 2 * BATADV_JITTER);
+
+ return jiffies + msecs_to_jiffies(msecs);
}
/* when do we schedule a ogm packet to be sent */
-static unsigned long bat_iv_ogm_fwd_send_time(void)
+static unsigned long batadv_iv_ogm_fwd_send_time(void)
{
- return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
+ return jiffies + msecs_to_jiffies(random32() % (BATADV_JITTER / 2));
}
/* apply hop penalty for a normal link */
-static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
+static uint8_t batadv_hop_penalty(uint8_t tq,
+ const struct batadv_priv *bat_priv)
{
int hop_penalty = atomic_read(&bat_priv->hop_penalty);
- return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
+ int new_tq;
+
+ new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty);
+ new_tq /= BATADV_TQ_MAX_VALUE;
+
+ return new_tq;
}
/* is there another aggregated packet here? */
-static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
- int tt_num_changes)
+static int batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
+ int tt_num_changes)
{
- int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
+ int next_buff_pos = 0;
+
+ next_buff_pos += buff_pos + BATADV_OGM_HLEN;
+ next_buff_pos += batadv_tt_len(tt_num_changes);
return (next_buff_pos <= packet_len) &&
- (next_buff_pos <= MAX_AGGREGATION_BYTES);
+ (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
}
/* send a batman ogm to a given interface */
-static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
- struct hard_iface *hard_iface)
+static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
+ struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
char *fwd_str;
uint8_t packet_num;
int16_t buff_pos;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
struct sk_buff *skb;
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
return;
packet_num = 0;
buff_pos = 0;
- batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
/* adjust all flags and log packets */
- while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
- batman_ogm_packet->tt_num_changes)) {
+ while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
+ batadv_ogm_packet->tt_num_changes)) {
/* we might have aggregated direct link packets with an
- * ordinary base packet */
+ * ordinary base packet
+ */
if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
(forw_packet->if_incoming == hard_iface))
- batman_ogm_packet->flags |= DIRECTLINK;
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
- batman_ogm_packet->flags &= ~DIRECTLINK;
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
"Sending own" :
"Forwarding"));
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
- fwd_str, (packet_num > 0 ? "aggregated " : ""),
- batman_ogm_packet->orig,
- ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
- (batman_ogm_packet->flags & DIRECTLINK ?
- "on" : "off"),
- batman_ogm_packet->ttvn, hard_iface->net_dev->name,
- hard_iface->net_dev->dev_addr);
-
- buff_pos += BATMAN_OGM_HLEN +
- tt_len(batman_ogm_packet->tt_num_changes);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
+ fwd_str, (packet_num > 0 ? "aggregated " : ""),
+ batadv_ogm_packet->orig,
+ ntohl(batadv_ogm_packet->seqno),
+ batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
+ "on" : "off"),
+ batadv_ogm_packet->ttvn, hard_iface->net_dev->name,
+ hard_iface->net_dev->dev_addr);
+
+ buff_pos += BATADV_OGM_HLEN;
+ buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
packet_num++;
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(forw_packet->skb->data + buff_pos);
}
/* create clone because function is called more than once */
skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
- if (skb)
- send_skb_packet(skb, hard_iface, broadcast_addr);
+ if (skb) {
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES,
+ skb->len + ETH_HLEN);
+ batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr);
+ }
}
/* send a batman ogm packet */
-static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
+static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
struct net_device *soft_iface;
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_ogm_packet *batadv_ogm_packet;
unsigned char directlink;
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(forw_packet->skb->data);
- directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+ directlink = (batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0);
if (!forw_packet->if_incoming) {
pr_err("Error - can't forward packet: incoming iface not specified\n");
@@ -222,31 +240,33 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
soft_iface = forw_packet->if_incoming->soft_iface;
bat_priv = netdev_priv(soft_iface);
- if (forw_packet->if_incoming->if_status != IF_ACTIVE)
+ if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- /* multihomed peer assumed */
- /* non-primary OGMs are only broadcasted on their interface */
- if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
+ /* multihomed peer assumed
+ * non-primary OGMs are only broadcasted on their interface
+ */
+ if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
- bat_dbg(DBG_BATMAN, bat_priv,
- "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
- (forw_packet->own ? "Sending own" : "Forwarding"),
- batman_ogm_packet->orig,
- ntohl(batman_ogm_packet->seqno),
- batman_ogm_packet->header.ttl,
- forw_packet->if_incoming->net_dev->name,
- forw_packet->if_incoming->net_dev->dev_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
+ (forw_packet->own ? "Sending own" : "Forwarding"),
+ batadv_ogm_packet->orig,
+ ntohl(batadv_ogm_packet->seqno),
+ batadv_ogm_packet->header.ttl,
+ forw_packet->if_incoming->net_dev->name,
+ forw_packet->if_incoming->net_dev->dev_addr);
/* skb is only used once and than forw_packet is free'd */
- send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
- broadcast_addr);
+ batadv_send_skb_packet(forw_packet->skb,
+ forw_packet->if_incoming,
+ batadv_broadcast_addr);
forw_packet->skb = NULL;
goto out;
@@ -254,70 +274,70 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
/* broadcast on every interface */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
- bat_iv_ogm_send_to_if(forw_packet, hard_iface);
+ batadv_iv_ogm_send_to_if(forw_packet, hard_iface);
}
rcu_read_unlock();
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/* return true if new_packet can be aggregated with forw_packet */
-static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
- *new_batman_ogm_packet,
- struct bat_priv *bat_priv,
- int packet_len, unsigned long send_time,
- bool directlink,
- const struct hard_iface *if_incoming,
- const struct forw_packet *forw_packet)
+static bool
+batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
+ struct batadv_priv *bat_priv,
+ int packet_len, unsigned long send_time,
+ bool directlink,
+ const struct batadv_hard_iface *if_incoming,
+ const struct batadv_forw_packet *forw_packet)
{
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
int aggregated_bytes = forw_packet->packet_len + packet_len;
- struct hard_iface *primary_if = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
bool res = false;
+ unsigned long aggregation_end_time;
- batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)forw_packet->skb->data;
+ aggregation_end_time = send_time;
+ aggregation_end_time += msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
- /**
- * we can aggregate the current packet to this aggregated packet
+ /* we can aggregate the current packet to this aggregated packet
* if:
*
* - the send time is within our MAX_AGGREGATION_MS time
* - the resulting packet wont be bigger than
* MAX_AGGREGATION_BYTES
*/
-
if (time_before(send_time, forw_packet->send_time) &&
- time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
- forw_packet->send_time) &&
- (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
+ time_after_eq(aggregation_end_time, forw_packet->send_time) &&
+ (aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
- /**
- * check aggregation compatibility
+ /* check aggregation compatibility
* -> direct link packets are broadcasted on
* their interface only
* -> aggregate packet if the current packet is
* a "global" packet as well as the base
* packet
*/
-
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* packets without direct link flag and high TTL
- * are flooded through the net */
+ * are flooded through the net
+ */
if ((!directlink) &&
- (!(batman_ogm_packet->flags & DIRECTLINK)) &&
- (batman_ogm_packet->header.ttl != 1) &&
+ (!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
+ (batadv_ogm_packet->header.ttl != 1) &&
/* own packets originating non-primary
- * interfaces leave only that interface */
+ * interfaces leave only that interface
+ */
((!forw_packet->own) ||
(forw_packet->if_incoming == primary_if))) {
res = true;
@@ -325,15 +345,17 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
}
/* if the incoming packet is sent via this one
- * interface only - we still can aggregate */
+ * interface only - we still can aggregate
+ */
if ((directlink) &&
- (new_batman_ogm_packet->header.ttl == 1) &&
+ (new_bat_ogm_packet->header.ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
* own secondary interface packets
- * (= secondary interface packets in general) */
- (batman_ogm_packet->flags & DIRECTLINK ||
+ * (= secondary interface packets in general)
+ */
+ (batadv_ogm_packet->flags & BATADV_DIRECTLINK ||
(forw_packet->own &&
forw_packet->if_incoming != primary_if))) {
res = true;
@@ -343,29 +365,30 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return res;
}
/* create a new aggregated packet and add this packet to it */
-static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
- int packet_len, unsigned long send_time,
- bool direct_link,
- struct hard_iface *if_incoming,
- int own_packet)
+static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
+ int packet_len, unsigned long send_time,
+ bool direct_link,
+ struct batadv_hard_iface *if_incoming,
+ int own_packet)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct forw_packet *forw_packet_aggr;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_forw_packet *forw_packet_aggr;
unsigned char *skb_buff;
+ unsigned int skb_size;
if (!atomic_inc_not_zero(&if_incoming->refcount))
return;
/* own packet should always be scheduled */
if (!own_packet) {
- if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "batman packet queue full\n");
+ if (!batadv_atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "batman packet queue full\n");
goto out;
}
}
@@ -378,12 +401,12 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
}
if ((atomic_read(&bat_priv->aggregated_ogms)) &&
- (packet_len < MAX_AGGREGATION_BYTES))
- forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
- ETH_HLEN);
+ (packet_len < BATADV_MAX_AGGREGATION_BYTES))
+ skb_size = BATADV_MAX_AGGREGATION_BYTES + ETH_HLEN;
else
- forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
+ skb_size = packet_len + ETH_HLEN;
+ forw_packet_aggr->skb = dev_alloc_skb(skb_size);
if (!forw_packet_aggr->skb) {
if (!own_packet)
atomic_inc(&bat_priv->batman_queue_left);
@@ -401,7 +424,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
forw_packet_aggr->own = own_packet;
forw_packet_aggr->if_incoming = if_incoming;
forw_packet_aggr->num_packets = 0;
- forw_packet_aggr->direct_link_flags = NO_FLAGS;
+ forw_packet_aggr->direct_link_flags = BATADV_NO_FLAGS;
forw_packet_aggr->send_time = send_time;
/* save packet direct link flag status */
@@ -415,20 +438,20 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
- send_outstanding_bat_ogm_packet);
- queue_delayed_work(bat_event_workqueue,
+ batadv_send_outstanding_bat_ogm_packet);
+ queue_delayed_work(batadv_event_workqueue,
&forw_packet_aggr->delayed_work,
send_time - jiffies);
return;
out:
- hardif_free_ref(if_incoming);
+ batadv_hardif_free_ref(if_incoming);
}
/* aggregate a new packet into the existing ogm packet */
-static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
- const unsigned char *packet_buff,
- int packet_len, bool direct_link)
+static void batadv_iv_ogm_aggregate(struct batadv_forw_packet *forw_packet_aggr,
+ const unsigned char *packet_buff,
+ int packet_len, bool direct_link)
{
unsigned char *skb_buff;
@@ -443,22 +466,25 @@ static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
(1 << forw_packet_aggr->num_packets);
}
-static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
- unsigned char *packet_buff,
- int packet_len, struct hard_iface *if_incoming,
- int own_packet, unsigned long send_time)
+static void batadv_iv_ogm_queue_add(struct batadv_priv *bat_priv,
+ unsigned char *packet_buff,
+ int packet_len,
+ struct batadv_hard_iface *if_incoming,
+ int own_packet, unsigned long send_time)
{
- /**
- * _aggr -> pointer to the packet we want to aggregate with
+ /* _aggr -> pointer to the packet we want to aggregate with
* _pos -> pointer to the position in the queue
*/
- struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
+ struct batadv_forw_packet *forw_packet_aggr = NULL;
+ struct batadv_forw_packet *forw_packet_pos = NULL;
struct hlist_node *tmp_node;
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_ogm_packet *batadv_ogm_packet;
bool direct_link;
+ unsigned long max_aggregation_jiffies;
- batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
- direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
+ direct_link = batadv_ogm_packet->flags & BATADV_DIRECTLINK ? 1 : 0;
+ max_aggregation_jiffies = msecs_to_jiffies(BATADV_MAX_AGGREGATION_MS);
/* find position for the packet in the forward queue */
spin_lock_bh(&bat_priv->forw_bat_list_lock);
@@ -466,11 +492,11 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
hlist_for_each_entry(forw_packet_pos, tmp_node,
&bat_priv->forw_bat_list, list) {
- if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
- bat_priv, packet_len,
- send_time, direct_link,
- if_incoming,
- forw_packet_pos)) {
+ if (batadv_iv_ogm_can_aggregate(batadv_ogm_packet,
+ bat_priv, packet_len,
+ send_time, direct_link,
+ if_incoming,
+ forw_packet_pos)) {
forw_packet_aggr = forw_packet_pos;
break;
}
@@ -478,42 +504,41 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
}
/* nothing to aggregate with - either aggregation disabled or no
- * suitable aggregation packet found */
+ * suitable aggregation packet found
+ */
if (!forw_packet_aggr) {
/* the following section can run without the lock */
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- /**
- * if we could not aggregate this packet with one of the others
+ /* if we could not aggregate this packet with one of the others
* we hold it back for a while, so that it might be aggregated
* later on
*/
- if ((!own_packet) &&
- (atomic_read(&bat_priv->aggregated_ogms)))
- send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
+ if (!own_packet && atomic_read(&bat_priv->aggregated_ogms))
+ send_time += max_aggregation_jiffies;
- bat_iv_ogm_aggregate_new(packet_buff, packet_len,
- send_time, direct_link,
- if_incoming, own_packet);
+ batadv_iv_ogm_aggregate_new(packet_buff, packet_len,
+ send_time, direct_link,
+ if_incoming, own_packet);
} else {
- bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
- packet_len, direct_link);
+ batadv_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
+ packet_len, direct_link);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
}
}
-static void bat_iv_ogm_forward(struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- bool is_single_hop_neigh,
- bool is_from_best_next_hop,
- struct hard_iface *if_incoming)
+static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ bool is_single_hop_neigh,
+ bool is_from_best_next_hop,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
uint8_t tt_num_changes;
- if (batman_ogm_packet->header.ttl <= 1) {
- bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
+ if (batadv_ogm_packet->header.ttl <= 1) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -525,110 +550,113 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
* simply drop the ogm.
*/
if (is_single_hop_neigh)
- batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
+ batadv_ogm_packet->flags |= BATADV_NOT_BEST_NEXT_HOP;
else
return;
}
- tt_num_changes = batman_ogm_packet->tt_num_changes;
+ tt_num_changes = batadv_ogm_packet->tt_num_changes;
- batman_ogm_packet->header.ttl--;
- memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
+ batadv_ogm_packet->header.ttl--;
+ memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* apply hop penalty */
- batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
+ batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq,
+ bat_priv);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: tq: %i, ttl: %i\n",
- batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
-
- batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
- batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: tq: %i, ttl: %i\n",
+ batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
/* switch of primaries first hop flag when forwarding */
- batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
+ batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
if (is_single_hop_neigh)
- batman_ogm_packet->flags |= DIRECTLINK;
+ batadv_ogm_packet->flags |= BATADV_DIRECTLINK;
else
- batman_ogm_packet->flags &= ~DIRECTLINK;
+ batadv_ogm_packet->flags &= ~BATADV_DIRECTLINK;
- bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
- BATMAN_OGM_HLEN + tt_len(tt_num_changes),
- if_incoming, 0, bat_iv_ogm_fwd_send_time());
+ batadv_iv_ogm_queue_add(bat_priv, (unsigned char *)batadv_ogm_packet,
+ BATADV_OGM_HLEN + batadv_tt_len(tt_num_changes),
+ if_incoming, 0, batadv_iv_ogm_fwd_send_time());
}
-static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
- int tt_num_changes)
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *primary_if;
- int vis_server;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *primary_if;
+ int vis_server, tt_num_changes = 0;
vis_server = atomic_read(&bat_priv->vis_mode);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ if (hard_iface == primary_if)
+ tt_num_changes = batadv_tt_append_diff(bat_priv,
+ &hard_iface->packet_buff,
+ &hard_iface->packet_len,
+ BATADV_OGM_HLEN);
- batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)hard_iface->packet_buff;
/* change sequence number to network order */
- batman_ogm_packet->seqno =
+ batadv_ogm_packet->seqno =
htonl((uint32_t)atomic_read(&hard_iface->seqno));
+ atomic_inc(&hard_iface->seqno);
- batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
- batman_ogm_packet->tt_crc = htons((uint16_t)
- atomic_read(&bat_priv->tt_crc));
+ batadv_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
+ batadv_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
if (tt_num_changes >= 0)
- batman_ogm_packet->tt_num_changes = tt_num_changes;
+ batadv_ogm_packet->tt_num_changes = tt_num_changes;
- if (vis_server == VIS_TYPE_SERVER_SYNC)
- batman_ogm_packet->flags |= VIS_SERVER;
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC)
+ batadv_ogm_packet->flags |= BATADV_VIS_SERVER;
else
- batman_ogm_packet->flags &= ~VIS_SERVER;
+ batadv_ogm_packet->flags &= ~BATADV_VIS_SERVER;
if ((hard_iface == primary_if) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
- batman_ogm_packet->gw_flags =
+ (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_SERVER))
+ batadv_ogm_packet->gw_flags =
(uint8_t)atomic_read(&bat_priv->gw_bandwidth);
else
- batman_ogm_packet->gw_flags = NO_FLAGS;
-
- atomic_inc(&hard_iface->seqno);
+ batadv_ogm_packet->gw_flags = BATADV_NO_FLAGS;
- slide_own_bcast_window(hard_iface);
- bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
- hard_iface->packet_len, hard_iface, 1,
- bat_iv_ogm_emit_send_time(bat_priv));
+ batadv_slide_own_bcast_window(hard_iface);
+ batadv_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
+ hard_iface->packet_len, hard_iface, 1,
+ batadv_iv_ogm_emit_send_time(bat_priv));
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
- *batman_ogm_packet,
- struct hard_iface *if_incoming,
- const unsigned char *tt_buff,
- int is_duplicate)
+static void
+batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming,
+ const unsigned char *tt_buff,
+ int is_duplicate)
{
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
- struct neigh_node *router = NULL;
- struct orig_node *orig_node_tmp;
+ struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_orig_node *orig_node_tmp;
struct hlist_node *node;
uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+ uint8_t *neigh_addr;
- bat_dbg(DBG_BATMAN, bat_priv,
- "update_originator(): Searching and updating originator entry of received packet\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "update_originator(): Searching and updating originator entry of received packet\n");
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming) &&
- atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
+ neigh_addr = tmp_neigh_node->addr;
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ tmp_neigh_node->if_incoming == if_incoming &&
+ atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
neigh_node = tmp_neigh_node;
continue;
}
@@ -637,53 +665,55 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
continue;
spin_lock_bh(&tmp_neigh_node->lq_update_lock);
- ring_buffer_set(tmp_neigh_node->tq_recv,
- &tmp_neigh_node->tq_index, 0);
+ batadv_ring_buffer_set(tmp_neigh_node->tq_recv,
+ &tmp_neigh_node->tq_index, 0);
tmp_neigh_node->tq_avg =
- ring_buffer_avg(tmp_neigh_node->tq_recv);
+ batadv_ring_buffer_avg(tmp_neigh_node->tq_recv);
spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
}
if (!neigh_node) {
- struct orig_node *orig_tmp;
+ struct batadv_orig_node *orig_tmp;
- orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
+ orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source);
if (!orig_tmp)
goto unlock;
- neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
- orig_node, orig_tmp,
- batman_ogm_packet->seqno);
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ ethhdr->h_source,
+ orig_node, orig_tmp,
+ batadv_ogm_packet->seqno);
- orig_node_free_ref(orig_tmp);
+ batadv_orig_node_free_ref(orig_tmp);
if (!neigh_node)
goto unlock;
} else
- bat_dbg(DBG_BATMAN, bat_priv,
- "Updating existing last-hop neighbor of originator\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Updating existing last-hop neighbor of originator\n");
rcu_read_unlock();
- orig_node->flags = batman_ogm_packet->flags;
+ orig_node->flags = batadv_ogm_packet->flags;
neigh_node->last_seen = jiffies;
spin_lock_bh(&neigh_node->lq_update_lock);
- ring_buffer_set(neigh_node->tq_recv,
- &neigh_node->tq_index,
- batman_ogm_packet->tq);
- neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
+ batadv_ring_buffer_set(neigh_node->tq_recv,
+ &neigh_node->tq_index,
+ batadv_ogm_packet->tq);
+ neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
spin_unlock_bh(&neigh_node->lq_update_lock);
if (!is_duplicate) {
- orig_node->last_ttl = batman_ogm_packet->header.ttl;
- neigh_node->last_ttl = batman_ogm_packet->header.ttl;
+ orig_node->last_ttl = batadv_ogm_packet->header.ttl;
+ neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
}
- bonding_candidate_add(orig_node, neigh_node);
+ batadv_bonding_candidate_add(orig_node, neigh_node);
/* if this neighbor already is our next hop there is nothing
- * to change */
- router = orig_node_get_router(orig_node);
+ * to change
+ */
+ router = batadv_orig_node_get_router(orig_node);
if (router == neigh_node)
goto update_tt;
@@ -692,7 +722,8 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
goto update_tt;
/* if the TQ is the same and the link not more symmetric we
- * won't consider it either */
+ * won't consider it either
+ */
if (router && (neigh_node->tq_avg == router->tq_avg)) {
orig_node_tmp = router->orig_node;
spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
@@ -710,30 +741,31 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
goto update_tt;
}
- update_route(bat_priv, orig_node, neigh_node);
+ batadv_update_route(bat_priv, orig_node, neigh_node);
update_tt:
/* I have to check for transtable changes only if the OGM has been
- * sent through a primary interface */
- if (((batman_ogm_packet->orig != ethhdr->h_source) &&
- (batman_ogm_packet->header.ttl > 2)) ||
- (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
- tt_update_orig(bat_priv, orig_node, tt_buff,
- batman_ogm_packet->tt_num_changes,
- batman_ogm_packet->ttvn,
- batman_ogm_packet->tt_crc);
+ * sent through a primary interface
+ */
+ if (((batadv_ogm_packet->orig != ethhdr->h_source) &&
+ (batadv_ogm_packet->header.ttl > 2)) ||
+ (batadv_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
+ batadv_tt_update_orig(bat_priv, orig_node, tt_buff,
+ batadv_ogm_packet->tt_num_changes,
+ batadv_ogm_packet->ttvn,
+ ntohs(batadv_ogm_packet->tt_crc));
- if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
- gw_node_update(bat_priv, orig_node,
- batman_ogm_packet->gw_flags);
+ if (orig_node->gw_flags != batadv_ogm_packet->gw_flags)
+ batadv_gw_node_update(bat_priv, orig_node,
+ batadv_ogm_packet->gw_flags);
- orig_node->gw_flags = batman_ogm_packet->gw_flags;
+ orig_node->gw_flags = batadv_ogm_packet->gw_flags;
/* restart gateway selection if fast or late switching was enabled */
if ((orig_node->gw_flags) &&
- (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
+ (atomic_read(&bat_priv->gw_mode) == BATADV_GW_MODE_CLIENT) &&
(atomic_read(&bat_priv->gw_sel_class) > 2))
- gw_check_election(bat_priv, orig_node);
+ batadv_gw_check_election(bat_priv, orig_node);
goto out;
@@ -741,29 +773,32 @@ unlock:
rcu_read_unlock();
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
-static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- struct batman_ogm_packet *batman_ogm_packet,
- struct hard_iface *if_incoming)
+static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node;
struct hlist_node *node;
uint8_t total_count;
- uint8_t orig_eq_count, neigh_rq_count, tq_own;
- int tq_asym_penalty, ret = 0;
+ uint8_t orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
+ unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
+ int tq_asym_penalty, inv_asym_penalty, ret = 0;
+ unsigned int combined_tq;
/* find corresponding one hop neighbor */
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_neigh_node->neigh_list, list) {
- if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
+ if (!batadv_compare_eth(tmp_neigh_node->addr,
+ orig_neigh_node->orig))
continue;
if (tmp_neigh_node->if_incoming != if_incoming)
@@ -778,11 +813,11 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
rcu_read_unlock();
if (!neigh_node)
- neigh_node = bat_iv_ogm_neigh_new(if_incoming,
- orig_neigh_node->orig,
- orig_neigh_node,
- orig_neigh_node,
- batman_ogm_packet->seqno);
+ neigh_node = batadv_iv_ogm_neigh_new(if_incoming,
+ orig_neigh_node->orig,
+ orig_neigh_node,
+ orig_neigh_node,
+ batadv_ogm_packet->seqno);
if (!neigh_node)
goto out;
@@ -803,47 +838,52 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
total_count = (orig_eq_count > neigh_rq_count ?
neigh_rq_count : orig_eq_count);
- /* if we have too few packets (too less data) we set tq_own to zero */
- /* if we receive too few packets it is not considered bidirectional */
- if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
- (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
+ /* if we have too few packets (too less data) we set tq_own to zero
+ * if we receive too few packets it is not considered bidirectional
+ */
+ if (total_count < BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM ||
+ neigh_rq_count < BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM)
tq_own = 0;
else
/* neigh_node->real_packet_count is never zero as we
* only purge old information when getting new
- * information */
- tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
+ * information
+ */
+ tq_own = (BATADV_TQ_MAX_VALUE * total_count) / neigh_rq_count;
/* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
* affect the nearly-symmetric links only a little, but
* punishes asymmetric links more. This will give a value
* between 0 and TQ_MAX_VALUE
*/
- tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
- (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE);
-
- batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
- * tq_asym_penalty) /
- (TQ_MAX_VALUE * TQ_MAX_VALUE));
-
- bat_dbg(DBG_BATMAN, bat_priv,
- "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
- orig_node->orig, orig_neigh_node->orig, total_count,
- neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
+ neigh_rq_inv = BATADV_TQ_LOCAL_WINDOW_SIZE - neigh_rq_count;
+ neigh_rq_inv_cube = neigh_rq_inv * neigh_rq_inv * neigh_rq_inv;
+ neigh_rq_max_cube = BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE;
+ inv_asym_penalty = BATADV_TQ_MAX_VALUE * neigh_rq_inv_cube;
+ inv_asym_penalty /= neigh_rq_max_cube;
+ tq_asym_penalty = BATADV_TQ_MAX_VALUE - inv_asym_penalty;
+
+ combined_tq = batadv_ogm_packet->tq * tq_own * tq_asym_penalty;
+ combined_tq /= BATADV_TQ_MAX_VALUE * BATADV_TQ_MAX_VALUE;
+ batadv_ogm_packet->tq = combined_tq;
+
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
+ orig_node->orig, orig_neigh_node->orig, total_count,
+ neigh_rq_count, tq_own,
+ tq_asym_penalty, batadv_ogm_packet->tq);
/* if link has the minimum required transmission quality
- * consider it bidirectional */
- if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
+ * consider it bidirectional
+ */
+ if (batadv_ogm_packet->tq >= BATADV_TQ_TOTAL_BIDRECT_LIMIT)
ret = 1;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
return ret;
}
@@ -855,90 +895,94 @@ out:
* -1 the packet is old and has been received while the seqno window
* was protected. Caller should drop it.
*/
-static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
- const struct batman_ogm_packet
- *batman_ogm_packet,
- const struct hard_iface *if_incoming)
+static int
+batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
+ const struct batadv_ogm_packet *batadv_ogm_packet,
+ const struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct orig_node *orig_node;
- struct neigh_node *tmp_neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *tmp_neigh_node;
struct hlist_node *node;
int is_duplicate = 0;
int32_t seq_diff;
int need_update = 0;
int set_mark, ret = -1;
+ uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
+ uint8_t *neigh_addr;
- orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return 0;
spin_lock_bh(&orig_node->ogm_cnt_lock);
- seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
+ seq_diff = seqno - orig_node->last_real_seqno;
/* signalize caller that the packet is to be dropped. */
if (!hlist_empty(&orig_node->neigh_list) &&
- window_protected(bat_priv, seq_diff,
- &orig_node->batman_seqno_reset))
+ batadv_window_protected(bat_priv, seq_diff,
+ &orig_node->batman_seqno_reset))
goto out;
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
- is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
- orig_node->last_real_seqno,
- batman_ogm_packet->seqno);
+ is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
+ orig_node->last_real_seqno,
+ seqno);
- if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
- (tmp_neigh_node->if_incoming == if_incoming))
+ neigh_addr = tmp_neigh_node->addr;
+ if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
+ tmp_neigh_node->if_incoming == if_incoming)
set_mark = 1;
else
set_mark = 0;
/* if the window moved, set the update flag. */
- need_update |= bit_get_packet(bat_priv,
- tmp_neigh_node->real_bits,
- seq_diff, set_mark);
+ need_update |= batadv_bit_get_packet(bat_priv,
+ tmp_neigh_node->real_bits,
+ seq_diff, set_mark);
tmp_neigh_node->real_packet_count =
bitmap_weight(tmp_neigh_node->real_bits,
- TQ_LOCAL_WINDOW_SIZE);
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
}
rcu_read_unlock();
if (need_update) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "updating last_seqno: old %u, new %u\n",
- orig_node->last_real_seqno, batman_ogm_packet->seqno);
- orig_node->last_real_seqno = batman_ogm_packet->seqno;
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "updating last_seqno: old %u, new %u\n",
+ orig_node->last_real_seqno, seqno);
+ orig_node->last_real_seqno = seqno;
}
ret = is_duplicate;
out:
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
- struct batman_ogm_packet *batman_ogm_packet,
- const unsigned char *tt_buff,
- struct hard_iface *if_incoming)
+static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
+ struct batadv_ogm_packet *batadv_ogm_packet,
+ const unsigned char *tt_buff,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct hard_iface *hard_iface;
- struct orig_node *orig_neigh_node, *orig_node;
- struct neigh_node *router = NULL, *router_router = NULL;
- struct neigh_node *orig_neigh_router = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_orig_node *orig_neigh_node, *orig_node;
+ struct batadv_neigh_node *router = NULL, *router_router = NULL;
+ struct batadv_neigh_node *orig_neigh_router = NULL;
int has_directlink_flag;
int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
- int is_broadcast = 0, is_bidirectional;
+ int is_broadcast = 0, is_bidirect;
bool is_single_hop_neigh = false;
bool is_from_best_next_hop = false;
- int is_duplicate;
+ int is_duplicate, sameseq, simlar_ttl;
uint32_t if_incoming_seqno;
+ uint8_t *prev_sender;
/* Silently drop when the batman packet is actually not a
* correct packet.
@@ -948,49 +992,53 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
* it as an additional length.
*
* TODO: A more sane solution would be to have a bit in the
- * batman_ogm_packet to detect whether the packet is the last
+ * batadv_ogm_packet to detect whether the packet is the last
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
+ if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
if_incoming_seqno = atomic_read(&if_incoming->seqno);
- has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
+ if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
+ has_directlink_flag = 1;
+ else
+ has_directlink_flag = 0;
- if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
+ if (batadv_compare_eth(ethhdr->h_source, batadv_ogm_packet->orig))
is_single_hop_neigh = true;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
- ethhdr->h_source, if_incoming->net_dev->name,
- if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
- batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
- batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
- batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
- batman_ogm_packet->header.ttl,
- batman_ogm_packet->header.version, has_directlink_flag);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ ethhdr->h_source, if_incoming->net_dev->name,
+ if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
+ batadv_ogm_packet->prev_sender,
+ ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->ttvn,
+ ntohs(batadv_ogm_packet->tt_crc),
+ batadv_ogm_packet->tt_num_changes, batadv_ogm_packet->tq,
+ batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->header.version, has_directlink_flag);
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
if (hard_iface->soft_iface != if_incoming->soft_iface)
continue;
- if (compare_eth(ethhdr->h_source,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(ethhdr->h_source,
+ hard_iface->net_dev->dev_addr))
is_my_addr = 1;
- if (compare_eth(batman_ogm_packet->orig,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(batadv_ogm_packet->orig,
+ hard_iface->net_dev->dev_addr))
is_my_orig = 1;
- if (compare_eth(batman_ogm_packet->prev_sender,
- hard_iface->net_dev->dev_addr))
+ if (batadv_compare_eth(batadv_ogm_packet->prev_sender,
+ hard_iface->net_dev->dev_addr))
is_my_oldorig = 1;
if (is_broadcast_ether_addr(ethhdr->h_source))
@@ -998,268 +1046,278 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
}
rcu_read_unlock();
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
+ if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batadv_ogm_packet->header.version);
return;
}
if (is_my_addr) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: received my own broadcast (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: received my own broadcast (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
if (is_broadcast) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
if (is_my_orig) {
unsigned long *word;
int offset;
+ int32_t bit_pos;
+ int16_t if_num;
+ uint8_t *weight;
- orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
+ orig_neigh_node = batadv_get_orig_node(bat_priv,
+ ethhdr->h_source);
if (!orig_neigh_node)
return;
/* neighbor has to indicate direct link and it has to
- * come via the corresponding interface */
- /* save packet seqno for bidirectional check */
+ * come via the corresponding interface
+ * save packet seqno for bidirectional check
+ */
if (has_directlink_flag &&
- compare_eth(if_incoming->net_dev->dev_addr,
- batman_ogm_packet->orig)) {
- offset = if_incoming->if_num * NUM_WORDS;
+ batadv_compare_eth(if_incoming->net_dev->dev_addr,
+ batadv_ogm_packet->orig)) {
+ if_num = if_incoming->if_num;
+ offset = if_num * BATADV_NUM_WORDS;
spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
word = &(orig_neigh_node->bcast_own[offset]);
- bat_set_bit(word,
- if_incoming_seqno -
- batman_ogm_packet->seqno - 2);
- orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
- bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
+ bit_pos = if_incoming_seqno - 2;
+ bit_pos -= ntohl(batadv_ogm_packet->seqno);
+ batadv_set_bit(word, bit_pos);
+ weight = &orig_neigh_node->bcast_own_sum[if_num];
+ *weight = bitmap_weight(word,
+ BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
}
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet from myself (via neighbor)\n");
- orig_node_free_ref(orig_neigh_node);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from myself (via neighbor)\n");
+ batadv_orig_node_free_ref(orig_neigh_node);
return;
}
if (is_my_oldorig) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
- if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
- ethhdr->h_source);
+ if (batadv_ogm_packet->flags & BATADV_NOT_BEST_NEXT_HOP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
+ ethhdr->h_source);
return;
}
- orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
+ orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
if (!orig_node)
return;
- is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
- if_incoming);
+ is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+ if_incoming);
if (is_duplicate == -1) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: packet within seqno protection time (sender: %pM)\n",
- ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: packet within seqno protection time (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
- if (batman_ogm_packet->tq == 0) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: originator packet with tq equal 0\n");
+ if (batadv_ogm_packet->tq == 0) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet with tq equal 0\n");
goto out;
}
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (router)
- router_router = orig_node_get_router(router->orig_node);
+ router_router = batadv_orig_node_get_router(router->orig_node);
if ((router && router->tq_avg != 0) &&
- (compare_eth(router->addr, ethhdr->h_source)))
+ (batadv_compare_eth(router->addr, ethhdr->h_source)))
is_from_best_next_hop = true;
+ prev_sender = batadv_ogm_packet->prev_sender;
/* avoid temporary routing loops */
if (router && router_router &&
- (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
- !(compare_eth(batman_ogm_packet->orig,
- batman_ogm_packet->prev_sender)) &&
- (compare_eth(router->addr, router_router->addr))) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
- ethhdr->h_source);
+ (batadv_compare_eth(router->addr, prev_sender)) &&
+ !(batadv_compare_eth(batadv_ogm_packet->orig, prev_sender)) &&
+ (batadv_compare_eth(router->addr, router_router->addr))) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
+ ethhdr->h_source);
goto out;
}
/* if sender is a direct neighbor the sender mac equals
- * originator mac */
+ * originator mac
+ */
orig_neigh_node = (is_single_hop_neigh ?
orig_node :
- get_orig_node(bat_priv, ethhdr->h_source));
+ batadv_get_orig_node(bat_priv, ethhdr->h_source));
if (!orig_neigh_node)
goto out;
- orig_neigh_router = orig_node_get_router(orig_neigh_node);
+ orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node);
/* drop packet if sender is not a direct neighbor and if we
- * don't route towards it */
+ * don't route towards it
+ */
if (!is_single_hop_neigh && (!orig_neigh_router)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: OGM via unknown neighbor!\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: OGM via unknown neighbor!\n");
goto out_neigh;
}
- is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
- batman_ogm_packet, if_incoming);
+ is_bidirect = batadv_iv_ogm_calc_tq(orig_node, orig_neigh_node,
+ batadv_ogm_packet, if_incoming);
- bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
+ batadv_bonding_save_primary(orig_node, orig_neigh_node,
+ batadv_ogm_packet);
/* update ranking if it is not a duplicate or has the same
- * seqno and similar ttl as the non-duplicate */
- if (is_bidirectional &&
- (!is_duplicate ||
- ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
- (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
- bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
- batman_ogm_packet, if_incoming,
- tt_buff, is_duplicate);
+ * seqno and similar ttl as the non-duplicate
+ */
+ sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
+ simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
+ batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
+ batadv_ogm_packet, if_incoming,
+ tt_buff, is_duplicate);
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
/* mark direct link on incoming interface */
- bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- is_single_hop_neigh, is_from_best_next_hop,
- if_incoming);
+ batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
+ is_single_hop_neigh,
+ is_from_best_next_hop, if_incoming);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
goto out_neigh;
}
/* multihop originator */
- if (!is_bidirectional) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: not received via bidirectional link\n");
+ if (!is_bidirect) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: not received via bidirectional link\n");
goto out_neigh;
}
if (is_duplicate) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: duplicate packet received\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: duplicate packet received\n");
goto out_neigh;
}
- bat_dbg(DBG_BATMAN, bat_priv,
- "Forwarding packet: rebroadcast originator packet\n");
- bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
- is_single_hop_neigh, is_from_best_next_hop,
- if_incoming);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Forwarding packet: rebroadcast originator packet\n");
+ batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
+ is_single_hop_neigh, is_from_best_next_hop,
+ if_incoming);
out_neigh:
if ((orig_neigh_node) && (!is_single_hop_neigh))
- orig_node_free_ref(orig_neigh_node);
+ batadv_orig_node_free_ref(orig_neigh_node);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (router_router)
- neigh_node_free_ref(router_router);
+ batadv_neigh_node_free_ref(router_router);
if (orig_neigh_router)
- neigh_node_free_ref(orig_neigh_router);
+ batadv_neigh_node_free_ref(orig_neigh_router);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-static int bat_iv_ogm_receive(struct sk_buff *skb,
- struct hard_iface *if_incoming)
+static int batadv_iv_ogm_receive(struct sk_buff *skb,
+ struct batadv_hard_iface *if_incoming)
{
- struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
- struct batman_ogm_packet *batman_ogm_packet;
+ struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
+ struct batadv_ogm_packet *batadv_ogm_packet;
struct ethhdr *ethhdr;
int buff_pos = 0, packet_len;
unsigned char *tt_buff, *packet_buff;
bool ret;
- ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
+ ret = batadv_check_management_packet(skb, if_incoming, BATADV_OGM_HLEN);
if (!ret)
return NET_RX_DROP;
/* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
* that does not have B.A.T.M.A.N. IV enabled ?
*/
- if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
+ if (bat_priv->bat_algo_ops->bat_ogm_emit != batadv_iv_ogm_emit)
return NET_RX_DROP;
+ batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
+ batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
+ skb->len + ETH_HLEN);
+
packet_len = skb_headlen(skb);
ethhdr = (struct ethhdr *)skb_mac_header(skb);
packet_buff = skb->data;
- batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;
/* unpack the aggregated packets and process them one by one */
do {
- /* network to host order for our 32bit seqno and the
- orig_interval */
- batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
- batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
-
- tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
+ tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;
- bat_iv_ogm_process(ethhdr, batman_ogm_packet,
- tt_buff, if_incoming);
+ batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff,
+ if_incoming);
- buff_pos += BATMAN_OGM_HLEN +
- tt_len(batman_ogm_packet->tt_num_changes);
+ buff_pos += BATADV_OGM_HLEN;
+ buff_pos += batadv_tt_len(batadv_ogm_packet->tt_num_changes);
- batman_ogm_packet = (struct batman_ogm_packet *)
+ batadv_ogm_packet = (struct batadv_ogm_packet *)
(packet_buff + buff_pos);
- } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
- batman_ogm_packet->tt_num_changes));
+ } while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len,
+ batadv_ogm_packet->tt_num_changes));
kfree_skb(skb);
return NET_RX_SUCCESS;
}
-static struct bat_algo_ops batman_iv __read_mostly = {
- .name = "BATMAN IV",
- .bat_iface_enable = bat_iv_ogm_iface_enable,
- .bat_iface_disable = bat_iv_ogm_iface_disable,
- .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
- .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
- .bat_ogm_schedule = bat_iv_ogm_schedule,
- .bat_ogm_emit = bat_iv_ogm_emit,
+static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
+ .name = "BATMAN_IV",
+ .bat_iface_enable = batadv_iv_ogm_iface_enable,
+ .bat_iface_disable = batadv_iv_ogm_iface_disable,
+ .bat_iface_update_mac = batadv_iv_ogm_iface_update_mac,
+ .bat_primary_iface_set = batadv_iv_ogm_primary_iface_set,
+ .bat_ogm_schedule = batadv_iv_ogm_schedule,
+ .bat_ogm_emit = batadv_iv_ogm_emit,
};
-int __init bat_iv_init(void)
+int __init batadv_iv_init(void)
{
int ret;
/* batman originator packet */
- ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
+ ret = batadv_recv_handler_register(BATADV_IV_OGM,
+ batadv_iv_ogm_receive);
if (ret < 0)
goto out;
- ret = bat_algo_register(&batman_iv);
+ ret = batadv_algo_register(&batadv_batman_iv);
if (ret < 0)
goto handler_unregister;
goto out;
handler_unregister:
- recv_handler_unregister(BAT_IV_OGM);
+ batadv_recv_handler_unregister(BATADV_IV_OGM);
out:
return ret;
}
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
deleted file mode 100644
index 5bc7b66d32dc..000000000000
--- a/net/batman-adv/bat_sysfs.c
+++ /dev/null
@@ -1,735 +0,0 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
- *
- * Marek Lindner
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA
- *
- */
-
-#include "main.h"
-#include "bat_sysfs.h"
-#include "translation-table.h"
-#include "originator.h"
-#include "hard-interface.h"
-#include "gateway_common.h"
-#include "gateway_client.h"
-#include "vis.h"
-
-static struct net_device *kobj_to_netdev(struct kobject *obj)
-{
- struct device *dev = container_of(obj->parent, struct device, kobj);
- return to_net_dev(dev);
-}
-
-static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
-{
- struct net_device *net_dev = kobj_to_netdev(obj);
- return netdev_priv(net_dev);
-}
-
-#define UEV_TYPE_VAR "BATTYPE="
-#define UEV_ACTION_VAR "BATACTION="
-#define UEV_DATA_VAR "BATDATA="
-
-static char *uev_action_str[] = {
- "add",
- "del",
- "change"
-};
-
-static char *uev_type_str[] = {
- "gw"
-};
-
-/* Use this, if you have customized show and store functions */
-#define BAT_ATTR(_name, _mode, _show, _store) \
-struct bat_attribute bat_attr_##_name = { \
- .attr = {.name = __stringify(_name), \
- .mode = _mode }, \
- .show = _show, \
- .store = _store, \
-};
-
-#define BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct bat_priv *bat_priv = netdev_priv(net_dev); \
- return __store_bool_attr(buff, count, _post_func, attr, \
- &bat_priv->_name, net_dev); \
-}
-
-#define BAT_ATTR_SIF_SHOW_BOOL(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
- return sprintf(buff, "%s\n", \
- atomic_read(&bat_priv->_name) == 0 ? \
- "disabled" : "enabled"); \
-} \
-
-/* Use this, if you are going to turn a [name] in the soft-interface
- * (bat_priv) on or off */
-#define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \
- static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \
- static BAT_ATTR_SIF_SHOW_BOOL(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-#define BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct bat_priv *bat_priv = netdev_priv(net_dev); \
- return __store_uint_attr(buff, count, _min, _max, _post_func, \
- attr, &bat_priv->_name, net_dev); \
-}
-
-#define BAT_ATTR_SIF_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj); \
- return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
-} \
-
-/* Use this, if you are going to set [name] in the soft-interface
- * (bat_priv) to an unsigned integer value */
-#define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_SIF_SHOW_UINT(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-#define BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
-ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \
- char *buff, size_t count) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
- ssize_t length; \
- \
- if (!hard_iface) \
- return 0; \
- \
- length = __store_uint_attr(buff, count, _min, _max, _post_func, \
- attr, &hard_iface->_name, net_dev); \
- \
- hardif_free_ref(hard_iface); \
- return length; \
-}
-
-#define BAT_ATTR_HIF_SHOW_UINT(_name) \
-ssize_t show_##_name(struct kobject *kobj, \
- struct attribute *attr, char *buff) \
-{ \
- struct net_device *net_dev = kobj_to_netdev(kobj); \
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \
- ssize_t length; \
- \
- if (!hard_iface) \
- return 0; \
- \
- length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
- \
- hardif_free_ref(hard_iface); \
- return length; \
-}
-
-/* Use this, if you are going to set [name] in hard_iface to an
- * unsigned integer value*/
-#define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
- static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
- static BAT_ATTR_HIF_SHOW_UINT(_name) \
- static BAT_ATTR(_name, _mode, show_##_name, store_##_name)
-
-
-static int store_bool_attr(char *buff, size_t count,
- struct net_device *net_dev,
- const char *attr_name, atomic_t *attr)
-{
- int enabled = -1;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if ((strncmp(buff, "1", 2) == 0) ||
- (strncmp(buff, "enable", 7) == 0) ||
- (strncmp(buff, "enabled", 8) == 0))
- enabled = 1;
-
- if ((strncmp(buff, "0", 2) == 0) ||
- (strncmp(buff, "disable", 8) == 0) ||
- (strncmp(buff, "disabled", 9) == 0))
- enabled = 0;
-
- if (enabled < 0) {
- bat_info(net_dev,
- "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == enabled)
- return count;
-
- bat_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
- atomic_read(attr) == 1 ? "enabled" : "disabled",
- enabled == 1 ? "enabled" : "disabled");
-
- atomic_set(attr, (unsigned int)enabled);
- return count;
-}
-
-static inline ssize_t __store_bool_attr(char *buff, size_t count,
- void (*post_func)(struct net_device *),
- struct attribute *attr,
- atomic_t *attr_store, struct net_device *net_dev)
-{
- int ret;
-
- ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
- if (post_func && ret)
- post_func(net_dev);
-
- return ret;
-}
-
-static int store_uint_attr(const char *buff, size_t count,
- struct net_device *net_dev, const char *attr_name,
- unsigned int min, unsigned int max, atomic_t *attr)
-{
- unsigned long uint_val;
- int ret;
-
- ret = kstrtoul(buff, 10, &uint_val);
- if (ret) {
- bat_info(net_dev,
- "%s: Invalid parameter received: %s\n",
- attr_name, buff);
- return -EINVAL;
- }
-
- if (uint_val < min) {
- bat_info(net_dev, "%s: Value is too small: %lu min: %u\n",
- attr_name, uint_val, min);
- return -EINVAL;
- }
-
- if (uint_val > max) {
- bat_info(net_dev, "%s: Value is too big: %lu max: %u\n",
- attr_name, uint_val, max);
- return -EINVAL;
- }
-
- if (atomic_read(attr) == uint_val)
- return count;
-
- bat_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
-
- atomic_set(attr, uint_val);
- return count;
-}
-
-static inline ssize_t __store_uint_attr(const char *buff, size_t count,
- int min, int max,
- void (*post_func)(struct net_device *),
- const struct attribute *attr,
- atomic_t *attr_store, struct net_device *net_dev)
-{
- int ret;
-
- ret = store_uint_attr(buff, count, net_dev, attr->name,
- min, max, attr_store);
- if (post_func && ret)
- post_func(net_dev);
-
- return ret;
-}
-
-static ssize_t show_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int vis_mode = atomic_read(&bat_priv->vis_mode);
-
- return sprintf(buff, "%s\n",
- vis_mode == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-}
-
-static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- unsigned long val;
- int ret, vis_mode_tmp = -1;
-
- ret = kstrtoul(buff, 10, &val);
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_CLIENT_UPDATE)) ||
- (strncmp(buff, "client", 6) == 0) ||
- (strncmp(buff, "off", 3) == 0))
- vis_mode_tmp = VIS_TYPE_CLIENT_UPDATE;
-
- if (((count == 2) && (!ret) && (val == VIS_TYPE_SERVER_SYNC)) ||
- (strncmp(buff, "server", 6) == 0))
- vis_mode_tmp = VIS_TYPE_SERVER_SYNC;
-
- if (vis_mode_tmp < 0) {
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- bat_info(net_dev,
- "Invalid parameter for 'vis mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
- return count;
-
- bat_info(net_dev, "Changing vis mode from: %s to: %s\n",
- atomic_read(&bat_priv->vis_mode) == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
- "client" : "server");
-
- atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
- return count;
-}
-
-static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
-}
-
-static void post_gw_deselect(struct net_device *net_dev)
-{
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- gw_deselect(bat_priv);
-}
-
-static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int bytes_written;
-
- switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_CLIENT:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_CLIENT_NAME);
- break;
- case GW_MODE_SERVER:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_SERVER_NAME);
- break;
- default:
- bytes_written = sprintf(buff, "%s\n", GW_MODE_OFF_NAME);
- break;
- }
-
- return bytes_written;
-}
-
-static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- char *curr_gw_mode_str;
- int gw_mode_tmp = -1;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strncmp(buff, GW_MODE_OFF_NAME, strlen(GW_MODE_OFF_NAME)) == 0)
- gw_mode_tmp = GW_MODE_OFF;
-
- if (strncmp(buff, GW_MODE_CLIENT_NAME,
- strlen(GW_MODE_CLIENT_NAME)) == 0)
- gw_mode_tmp = GW_MODE_CLIENT;
-
- if (strncmp(buff, GW_MODE_SERVER_NAME,
- strlen(GW_MODE_SERVER_NAME)) == 0)
- gw_mode_tmp = GW_MODE_SERVER;
-
- if (gw_mode_tmp < 0) {
- bat_info(net_dev,
- "Invalid parameter for 'gw mode' setting received: %s\n",
- buff);
- return -EINVAL;
- }
-
- if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
- return count;
-
- switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_CLIENT:
- curr_gw_mode_str = GW_MODE_CLIENT_NAME;
- break;
- case GW_MODE_SERVER:
- curr_gw_mode_str = GW_MODE_SERVER_NAME;
- break;
- default:
- curr_gw_mode_str = GW_MODE_OFF_NAME;
- break;
- }
-
- bat_info(net_dev, "Changing gw mode from: %s to: %s\n",
- curr_gw_mode_str, buff);
-
- gw_deselect(bat_priv);
- atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
- return count;
-}
-
-static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct bat_priv *bat_priv = kobj_to_batpriv(kobj);
- int down, up;
- int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
-
- gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
- return sprintf(buff, "%i%s/%i%s\n",
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
-}
-
-static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- return gw_bandwidth_set(net_dev, buff, count);
-}
-
-BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
-BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
-#ifdef CONFIG_BATMAN_ADV_BLA
-BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
-#endif
-BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
-BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
-static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
-static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL);
-static BAT_ATTR(gw_mode, S_IRUGO | S_IWUSR, show_gw_mode, store_gw_mode);
-BAT_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * JITTER, INT_MAX, NULL);
-BAT_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, TQ_MAX_VALUE, NULL);
-BAT_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
- post_gw_deselect);
-static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
- store_gw_bwidth);
-#ifdef CONFIG_BATMAN_ADV_DEBUG
-BAT_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
-#endif
-
-static struct bat_attribute *mesh_attrs[] = {
- &bat_attr_aggregated_ogms,
- &bat_attr_bonding,
-#ifdef CONFIG_BATMAN_ADV_BLA
- &bat_attr_bridge_loop_avoidance,
-#endif
- &bat_attr_fragmentation,
- &bat_attr_ap_isolation,
- &bat_attr_vis_mode,
- &bat_attr_routing_algo,
- &bat_attr_gw_mode,
- &bat_attr_orig_interval,
- &bat_attr_hop_penalty,
- &bat_attr_gw_sel_class,
- &bat_attr_gw_bandwidth,
-#ifdef CONFIG_BATMAN_ADV_DEBUG
- &bat_attr_log_level,
-#endif
- NULL,
-};
-
-int sysfs_add_meshif(struct net_device *dev)
-{
- struct kobject *batif_kobject = &dev->dev.kobj;
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
- int err;
-
- bat_priv->mesh_obj = kobject_create_and_add(SYSFS_IF_MESH_SUBDIR,
- batif_kobject);
- if (!bat_priv->mesh_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_MESH_SUBDIR);
- goto out;
- }
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(bat_priv->mesh_obj,
- &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_MESH_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_meshif(struct net_device *dev)
-{
- struct bat_priv *bat_priv = netdev_priv(dev);
- struct bat_attribute **bat_attr;
-
- for (bat_attr = mesh_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
-
- kobject_put(bat_priv->mesh_obj);
- bat_priv->mesh_obj = NULL;
-}
-
-static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- ssize_t length;
-
- if (!hard_iface)
- return 0;
-
- length = sprintf(buff, "%s\n", hard_iface->if_status == IF_NOT_IN_USE ?
- "none" : hard_iface->soft_iface->name);
-
- hardif_free_ref(hard_iface);
-
- return length;
-}
-
-static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
- char *buff, size_t count)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- int status_tmp = -1;
- int ret = count;
-
- if (!hard_iface)
- return count;
-
- if (buff[count - 1] == '\n')
- buff[count - 1] = '\0';
-
- if (strlen(buff) >= IFNAMSIZ) {
- pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
- buff);
- hardif_free_ref(hard_iface);
- return -EINVAL;
- }
-
- if (strncmp(buff, "none", 4) == 0)
- status_tmp = IF_NOT_IN_USE;
- else
- status_tmp = IF_I_WANT_YOU;
-
- if (hard_iface->if_status == status_tmp)
- goto out;
-
- if ((hard_iface->soft_iface) &&
- (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
- goto out;
-
- if (!rtnl_trylock()) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- if (status_tmp == IF_NOT_IN_USE) {
- hardif_disable_interface(hard_iface);
- goto unlock;
- }
-
- /* if the interface already is in use */
- if (hard_iface->if_status != IF_NOT_IN_USE)
- hardif_disable_interface(hard_iface);
-
- ret = hardif_enable_interface(hard_iface, buff);
-
-unlock:
- rtnl_unlock();
-out:
- hardif_free_ref(hard_iface);
- return ret;
-}
-
-static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr,
- char *buff)
-{
- struct net_device *net_dev = kobj_to_netdev(kobj);
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- ssize_t length;
-
- if (!hard_iface)
- return 0;
-
- switch (hard_iface->if_status) {
- case IF_TO_BE_REMOVED:
- length = sprintf(buff, "disabling\n");
- break;
- case IF_INACTIVE:
- length = sprintf(buff, "inactive\n");
- break;
- case IF_ACTIVE:
- length = sprintf(buff, "active\n");
- break;
- case IF_TO_BE_ACTIVATED:
- length = sprintf(buff, "enabling\n");
- break;
- case IF_NOT_IN_USE:
- default:
- length = sprintf(buff, "not in use\n");
- break;
- }
-
- hardif_free_ref(hard_iface);
-
- return length;
-}
-
-static BAT_ATTR(mesh_iface, S_IRUGO | S_IWUSR,
- show_mesh_iface, store_mesh_iface);
-static BAT_ATTR(iface_status, S_IRUGO, show_iface_status, NULL);
-
-static struct bat_attribute *batman_attrs[] = {
- &bat_attr_mesh_iface,
- &bat_attr_iface_status,
- NULL,
-};
-
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
-{
- struct kobject *hardif_kobject = &dev->dev.kobj;
- struct bat_attribute **bat_attr;
- int err;
-
- *hardif_obj = kobject_create_and_add(SYSFS_IF_BAT_SUBDIR,
- hardif_kobject);
-
- if (!*hardif_obj) {
- bat_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
- SYSFS_IF_BAT_SUBDIR);
- goto out;
- }
-
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr) {
- err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
- if (err) {
- bat_err(dev, "Can't add sysfs file: %s/%s/%s\n",
- dev->name, SYSFS_IF_BAT_SUBDIR,
- ((*bat_attr)->attr).name);
- goto rem_attr;
- }
- }
-
- return 0;
-
-rem_attr:
- for (bat_attr = batman_attrs; *bat_attr; ++bat_attr)
- sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
-out:
- return -ENOMEM;
-}
-
-void sysfs_del_hardif(struct kobject **hardif_obj)
-{
- kobject_put(*hardif_obj);
- *hardif_obj = NULL;
-}
-
-int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
- enum uev_action action, const char *data)
-{
- int ret = -1;
- struct hard_iface *primary_if = NULL;
- struct kobject *bat_kobj;
- char *uevent_env[4] = { NULL, NULL, NULL, NULL };
-
- primary_if = primary_if_get_selected(bat_priv);
- if (!primary_if)
- goto out;
-
- bat_kobj = &primary_if->soft_iface->dev.kobj;
-
- uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
- strlen(uev_type_str[type]) + 1,
- GFP_ATOMIC);
- if (!uevent_env[0])
- goto out;
-
- sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
-
- uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
- strlen(uev_action_str[action]) + 1,
- GFP_ATOMIC);
- if (!uevent_env[1])
- goto out;
-
- sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
-
- /* If the event is DEL, ignore the data field */
- if (action != UEV_DEL) {
- uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
- strlen(data) + 1, GFP_ATOMIC);
- if (!uevent_env[2])
- goto out;
-
- sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
- }
-
- ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
-out:
- kfree(uevent_env[0]);
- kfree(uevent_env[1]);
- kfree(uevent_env[2]);
-
- if (primary_if)
- hardif_free_ref(primary_if);
-
- if (ret)
- bat_dbg(DBG_BATMAN, bat_priv,
- "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
- uev_type_str[type], uev_action_str[action],
- (action == UEV_DEL ? "NULL" : data), ret);
- return ret;
-}
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 07ae6e1b8aca..aea174cdbfbd 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -25,12 +23,12 @@
#include <linux/bitops.h>
/* shift the packet array by n places. */
-static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
+static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
{
- if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+ if (n <= 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
- bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
+ bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
}
@@ -40,58 +38,57 @@ static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
* 1 if the window was moved (either new or very old)
* 0 if the window was not moved/shifted.
*/
-int bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark)
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ int32_t seq_num_diff, int set_mark)
{
- struct bat_priv *bat_priv = priv;
+ struct batadv_priv *bat_priv = priv;
/* sequence number is slightly older. We already got a sequence number
- * higher than this one, so we just mark it. */
-
- if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
+ * higher than this one, so we just mark it.
+ */
+ if (seq_num_diff <= 0 && seq_num_diff > -BATADV_TQ_LOCAL_WINDOW_SIZE) {
if (set_mark)
- bat_set_bit(seq_bits, -seq_num_diff);
+ batadv_set_bit(seq_bits, -seq_num_diff);
return 0;
}
/* sequence number is slightly newer, so we shift the window and
- * set the mark if required */
-
- if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
- bat_bitmap_shift_left(seq_bits, seq_num_diff);
+ * set the mark if required
+ */
+ if (seq_num_diff > 0 && seq_num_diff < BATADV_TQ_LOCAL_WINDOW_SIZE) {
+ batadv_bitmap_shift_left(seq_bits, seq_num_diff);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
/* sequence number is much newer, probably missed a lot of packets */
-
- if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) &&
- (seq_num_diff < EXPECTED_SEQNO_RANGE)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "We missed a lot of packets (%i) !\n",
- seq_num_diff - 1);
- bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
+ if (seq_num_diff >= BATADV_TQ_LOCAL_WINDOW_SIZE &&
+ seq_num_diff < BATADV_EXPECTED_SEQNO_RANGE) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "We missed a lot of packets (%i) !\n",
+ seq_num_diff - 1);
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
/* received a much older packet. The other host either restarted
* or the old packet got delayed somewhere in the network. The
* packet should be dropped without calling this function if the
- * seqno window is protected. */
-
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
- (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
+ * seqno window is protected.
+ */
+ if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
+ seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Other host probably restarted!\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Other host probably restarted!\n");
- bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
+ bitmap_zero(seq_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
if (set_mark)
- bat_set_bit(seq_bits, 0);
+ batadv_set_bit(seq_bits, 0);
return 1;
}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 1835c15cda41..a081ce1c0514 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,39 +15,40 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_
/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
-static inline int bat_test_bit(const unsigned long *seq_bits,
- uint32_t last_seqno, uint32_t curr_seqno)
+ * and curr_seqno is within range of last_seqno
+ */
+static inline int batadv_test_bit(const unsigned long *seq_bits,
+ uint32_t last_seqno, uint32_t curr_seqno)
{
int32_t diff;
diff = last_seqno - curr_seqno;
- if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
+ if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return 0;
else
return test_bit(diff, seq_bits);
}
/* turn corresponding bit on, so we can remember that we got the packet */
-static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
+static inline void batadv_set_bit(unsigned long *seq_bits, int32_t n)
{
/* if too old, just drop it */
- if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
+ if (n < 0 || n >= BATADV_TQ_LOCAL_WINDOW_SIZE)
return;
set_bit(n, seq_bits); /* turn the position on */
}
/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old */
-int bit_get_packet(void *priv, unsigned long *seq_bits,
- int32_t seq_num_diff, int set_mark);
+ * new, 0 if old
+ */
+int batadv_bit_get_packet(void *priv, unsigned long *seq_bits,
+ int32_t seq_num_diff, int set_mark);
#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 8bf97515a77d..6705d35b17ce 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -33,14 +31,14 @@
#include <net/arp.h>
#include <linux/if_vlan.h>
-static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
+static const uint8_t batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
-static void bla_periodic_work(struct work_struct *work);
-static void bla_send_announce(struct bat_priv *bat_priv,
- struct backbone_gw *backbone_gw);
+static void batadv_bla_periodic_work(struct work_struct *work);
+static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_backbone_gw *backbone_gw);
/* return the index of the claim */
-static inline uint32_t choose_claim(const void *data, uint32_t size)
+static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -60,7 +58,8 @@ static inline uint32_t choose_claim(const void *data, uint32_t size)
}
/* return the index of the backbone gateway */
-static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
+static inline uint32_t batadv_choose_backbone_gw(const void *data,
+ uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -81,74 +80,75 @@ static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
/* compares address and vid of two backbone gws */
-static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
+static int batadv_compare_backbone_gw(const struct hlist_node *node,
+ const void *data2)
{
- const void *data1 = container_of(node, struct backbone_gw,
+ const void *data1 = container_of(node, struct batadv_backbone_gw,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
}
/* compares address and vid of two claims */
-static int compare_claim(const struct hlist_node *node, const void *data2)
+static int batadv_compare_claim(const struct hlist_node *node,
+ const void *data2)
{
- const void *data1 = container_of(node, struct claim,
+ const void *data1 = container_of(node, struct batadv_claim,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
}
/* free a backbone gw */
-static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
+static void batadv_backbone_gw_free_ref(struct batadv_backbone_gw *backbone_gw)
{
if (atomic_dec_and_test(&backbone_gw->refcount))
kfree_rcu(backbone_gw, rcu);
}
/* finally deinitialize the claim */
-static void claim_free_rcu(struct rcu_head *rcu)
+static void batadv_claim_free_rcu(struct rcu_head *rcu)
{
- struct claim *claim;
+ struct batadv_claim *claim;
- claim = container_of(rcu, struct claim, rcu);
+ claim = container_of(rcu, struct batadv_claim, rcu);
- backbone_gw_free_ref(claim->backbone_gw);
+ batadv_backbone_gw_free_ref(claim->backbone_gw);
kfree(claim);
}
/* free a claim, call claim_free_rcu if its the last reference */
-static void claim_free_ref(struct claim *claim)
+static void batadv_claim_free_ref(struct batadv_claim *claim)
{
if (atomic_dec_and_test(&claim->refcount))
- call_rcu(&claim->rcu, claim_free_rcu);
+ call_rcu(&claim->rcu, batadv_claim_free_rcu);
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @data: search data (may be local/static data)
*
* looks for a claim in the hash, and returns it if found
* or NULL otherwise.
*/
-static struct claim *claim_hash_find(struct bat_priv *bat_priv,
- struct claim *data)
+static struct batadv_claim *batadv_claim_hash_find(struct batadv_priv *bat_priv,
+ struct batadv_claim *data)
{
- struct hashtable_t *hash = bat_priv->claim_hash;
+ struct batadv_hashtable *hash = bat_priv->claim_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct claim *claim;
- struct claim *claim_tmp = NULL;
+ struct batadv_claim *claim;
+ struct batadv_claim *claim_tmp = NULL;
int index;
if (!hash)
return NULL;
- index = choose_claim(data, hash->size);
+ index = batadv_choose_claim(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
- if (!compare_claim(&claim->hash_entry, data))
+ if (!batadv_compare_claim(&claim->hash_entry, data))
continue;
if (!atomic_inc_not_zero(&claim->refcount))
@@ -163,21 +163,22 @@ static struct claim *claim_hash_find(struct bat_priv *bat_priv,
}
/**
+ * batadv_backbone_hash_find - looks for a claim in the hash
* @bat_priv: the bat priv with all the soft interface information
* @addr: the address of the originator
* @vid: the VLAN ID
*
- * looks for a claim in the hash, and returns it if found
- * or NULL otherwise.
+ * Returns claim if found or NULL otherwise.
*/
-static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
- uint8_t *addr, short vid)
+static struct batadv_backbone_gw *
+batadv_backbone_hash_find(struct batadv_priv *bat_priv,
+ uint8_t *addr, short vid)
{
- struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct backbone_gw search_entry, *backbone_gw;
- struct backbone_gw *backbone_gw_tmp = NULL;
+ struct batadv_backbone_gw search_entry, *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw_tmp = NULL;
int index;
if (!hash)
@@ -186,13 +187,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
memcpy(search_entry.orig, addr, ETH_ALEN);
search_entry.vid = vid;
- index = choose_backbone_gw(&search_entry, hash->size);
+ index = batadv_choose_backbone_gw(&search_entry, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (!compare_backbone_gw(&backbone_gw->hash_entry,
- &search_entry))
+ if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
+ &search_entry))
continue;
if (!atomic_inc_not_zero(&backbone_gw->refcount))
@@ -207,12 +208,13 @@ static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
}
/* delete all claims for a backbone */
-static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
+static void
+batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct claim *claim;
+ struct batadv_claim *claim;
int i;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -231,36 +233,35 @@ static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
if (claim->backbone_gw != backbone_gw)
continue;
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
hlist_del_rcu(node);
}
spin_unlock_bh(list_lock);
}
/* all claims gone, intialize CRC */
- backbone_gw->crc = BLA_CRC_INIT;
+ backbone_gw->crc = BATADV_BLA_CRC_INIT;
}
/**
+ * batadv_bla_send_claim - sends a claim frame according to the provided info
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address to be announced within the claim
* @vid: the VLAN ID
* @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
- *
- * sends a claim frame according to the provided info.
*/
-static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
- short vid, int claimtype)
+static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
+ short vid, int claimtype)
{
struct sk_buff *skb;
struct ethhdr *ethhdr;
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
struct net_device *soft_iface;
uint8_t *hw_src;
- struct bla_claim_dst local_claim_dest;
- uint32_t zeroip = 0;
+ struct batadv_bla_claim_dst local_claim_dest;
+ __be32 zeroip = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return;
@@ -294,40 +295,41 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
/* now we pretend that the client would have sent this ... */
switch (claimtype) {
- case CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_ADD:
/* normal claim frame
* set Ethernet SRC to the clients mac
*/
memcpy(ethhdr->h_source, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
break;
- case CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_DEL:
/* unclaim frame
* set HW SRC to the clients mac
*/
memcpy(hw_src, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac,
+ vid);
break;
- case CLAIM_TYPE_ANNOUNCE:
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
/* announcement frame
* set HW SRC to the special mac containg the crc
*/
memcpy(hw_src, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
- ethhdr->h_source, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
+ ethhdr->h_source, vid);
break;
- case CLAIM_TYPE_REQUEST:
+ case BATADV_CLAIM_TYPE_REQUEST:
/* request frame
* set HW SRC to the special mac containg the crc
*/
memcpy(hw_src, mac, ETH_ALEN);
memcpy(ethhdr->h_dest, mac, ETH_ALEN);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
- ethhdr->h_source, ethhdr->h_dest, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
+ ethhdr->h_source, ethhdr->h_dest, vid);
break;
}
@@ -344,10 +346,11 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
netif_rx(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/**
+ * batadv_bla_get_backbone_gw
* @bat_priv: the bat priv with all the soft interface information
* @orig: the mac address of the originator
* @vid: the VLAN ID
@@ -355,21 +358,22 @@ out:
* searches for the backbone gw or creates a new one if it could not
* be found.
*/
-static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
- uint8_t *orig, short vid)
+static struct batadv_backbone_gw *
+batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, uint8_t *orig,
+ short vid)
{
- struct backbone_gw *entry;
- struct orig_node *orig_node;
+ struct batadv_backbone_gw *entry;
+ struct batadv_orig_node *orig_node;
int hash_added;
- entry = backbone_hash_find(bat_priv, orig, vid);
+ entry = batadv_backbone_hash_find(bat_priv, orig, vid);
if (entry)
return entry;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
- orig, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
+ orig, vid);
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
@@ -377,7 +381,7 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
entry->vid = vid;
entry->lasttime = jiffies;
- entry->crc = BLA_CRC_INIT;
+ entry->crc = BATADV_BLA_CRC_INIT;
entry->bat_priv = bat_priv;
atomic_set(&entry->request_sent, 0);
memcpy(entry->orig, orig, ETH_ALEN);
@@ -385,8 +389,10 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
/* one for the hash, one for returning */
atomic_set(&entry->refcount, 2);
- hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
- choose_backbone_gw, entry, &entry->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->backbone_hash,
+ batadv_compare_backbone_gw,
+ batadv_choose_backbone_gw, entry,
+ &entry->hash_entry);
if (unlikely(hash_added != 0)) {
/* hash failed, free the structure */
@@ -395,11 +401,11 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
}
/* this is a gateway now, remove any tt entries */
- orig_node = orig_hash_find(bat_priv, orig);
+ orig_node = batadv_orig_hash_find(bat_priv, orig);
if (orig_node) {
- tt_global_del_orig(bat_priv, orig_node,
- "became a backbone gateway");
- orig_node_free_ref(orig_node);
+ batadv_tt_global_del_orig(bat_priv, orig_node,
+ "became a backbone gateway");
+ batadv_orig_node_free_ref(orig_node);
}
return entry;
}
@@ -407,43 +413,46 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
/* update or add the own backbone gw to make sure we announce
* where we receive other backbone gws
*/
-static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- short vid)
+static void
+batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
- backbone_gw = bla_get_backbone_gw(bat_priv,
- primary_if->net_dev->dev_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid);
if (unlikely(!backbone_gw))
return;
backbone_gw->lasttime = jiffies;
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @vid: the vid where the request came on
*
* Repeat all of our own claims, and finally send an ANNOUNCE frame
* to allow the requester another check if the CRC is correct now.
*/
-static void bla_answer_request(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, short vid)
+static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ short vid)
{
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
- struct claim *claim;
- struct backbone_gw *backbone_gw;
+ struct batadv_hashtable *hash;
+ struct batadv_claim *claim;
+ struct batadv_backbone_gw *backbone_gw;
int i;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_answer_request(): received a claim request, send all of our own claims again\n");
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_answer_request(): received a claim request, send all of our own claims again\n");
- backbone_gw = backbone_hash_find(bat_priv,
- primary_if->net_dev->dev_addr, vid);
+ backbone_gw = batadv_backbone_hash_find(bat_priv,
+ primary_if->net_dev->dev_addr,
+ vid);
if (!backbone_gw)
return;
@@ -457,36 +466,34 @@ static void bla_answer_request(struct bat_priv *bat_priv,
if (claim->backbone_gw != backbone_gw)
continue;
- bla_send_claim(bat_priv, claim->addr, claim->vid,
- CLAIM_TYPE_ADD);
+ batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
+ BATADV_CLAIM_TYPE_ADD);
}
rcu_read_unlock();
}
/* finally, send an announcement frame */
- bla_send_announce(bat_priv, backbone_gw);
- backbone_gw_free_ref(backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
-/**
- * @backbone_gw: the backbone gateway from whom we are out of sync
+/* @backbone_gw: the backbone gateway from whom we are out of sync
*
* When the crc is wrong, ask the backbone gateway for a full table update.
* After the request, it will repeat all of his own claims and finally
* send an announcement claim with which we can check again.
*/
-static void bla_send_request(struct backbone_gw *backbone_gw)
+static void batadv_bla_send_request(struct batadv_backbone_gw *backbone_gw)
{
/* first, remove all old entries */
- bla_del_backbone_claims(backbone_gw);
+ batadv_bla_del_backbone_claims(backbone_gw);
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "Sending REQUEST to %pM\n",
- backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "Sending REQUEST to %pM\n", backbone_gw->orig);
/* send request */
- bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
- backbone_gw->vid, CLAIM_TYPE_REQUEST);
+ batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
+ backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
/* no local broadcasts should be sent or received, for now. */
if (!atomic_read(&backbone_gw->request_sent)) {
@@ -495,45 +502,45 @@ static void bla_send_request(struct backbone_gw *backbone_gw)
}
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @backbone_gw: our backbone gateway which should be announced
*
* This function sends an announcement. It is called from multiple
* places.
*/
-static void bla_send_announce(struct bat_priv *bat_priv,
- struct backbone_gw *backbone_gw)
+static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
+ struct batadv_backbone_gw *backbone_gw)
{
uint8_t mac[ETH_ALEN];
- uint16_t crc;
+ __be16 crc;
- memcpy(mac, announce_mac, 4);
+ memcpy(mac, batadv_announce_mac, 4);
crc = htons(backbone_gw->crc);
- memcpy(&mac[4], (uint8_t *)&crc, 2);
+ memcpy(&mac[4], &crc, 2);
- bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
+ batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
+ BATADV_CLAIM_TYPE_ANNOUNCE);
}
/**
+ * batadv_bla_add_claim - Adds a claim in the claim hash
* @bat_priv: the bat priv with all the soft interface information
* @mac: the mac address of the claim
* @vid: the VLAN ID of the frame
* @backbone_gw: the backbone gateway which claims it
- *
- * Adds a claim in the claim hash.
*/
-static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
- const short vid, struct backbone_gw *backbone_gw)
+static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
+ const uint8_t *mac, const short vid,
+ struct batadv_backbone_gw *backbone_gw)
{
- struct claim *claim;
- struct claim search_claim;
+ struct batadv_claim *claim;
+ struct batadv_claim search_claim;
int hash_added;
memcpy(search_claim.addr, mac, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* create a new claim entry if it does not exist yet. */
if (!claim) {
@@ -547,11 +554,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
claim->backbone_gw = backbone_gw;
atomic_set(&claim->refcount, 2);
- bat_dbg(DBG_BLA, bat_priv,
- "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
- mac, vid);
- hash_added = hash_add(bat_priv->claim_hash, compare_claim,
- choose_claim, claim, &claim->hash_entry);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
+ mac, vid);
+ hash_added = batadv_hash_add(bat_priv->claim_hash,
+ batadv_compare_claim,
+ batadv_choose_claim, claim,
+ &claim->hash_entry);
if (unlikely(hash_added != 0)) {
/* only local changes happened. */
@@ -564,13 +573,13 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
/* no need to register a new backbone */
goto claim_free_ref;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_add_claim(): changing ownership for %pM, vid %d\n",
- mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_add_claim(): changing ownership for %pM, vid %d\n",
+ mac, vid);
claim->backbone_gw->crc ^=
crc16(0, claim->addr, ETH_ALEN);
- backbone_gw_free_ref(claim->backbone_gw);
+ batadv_backbone_gw_free_ref(claim->backbone_gw);
}
/* set (new) backbone gw */
@@ -581,45 +590,48 @@ static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
backbone_gw->lasttime = jiffies;
claim_free_ref:
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
}
/* Delete a claim from the claim hash which has the
* given mac address and vid.
*/
-static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
- const short vid)
+static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
+ const uint8_t *mac, const short vid)
{
- struct claim search_claim, *claim;
+ struct batadv_claim search_claim, *claim;
memcpy(search_claim.addr, mac, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim)
return;
- bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n",
+ mac, vid);
- hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
- claim_free_ref(claim); /* reference from the hash is gone */
+ batadv_hash_remove(bat_priv->claim_hash, batadv_compare_claim,
+ batadv_choose_claim, claim);
+ batadv_claim_free_ref(claim); /* reference from the hash is gone */
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
/* don't need the reference from hash_find() anymore */
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
}
/* check for ANNOUNCE frame, return 1 if handled */
-static int handle_announce(struct bat_priv *bat_priv,
- uint8_t *an_addr, uint8_t *backbone_addr, short vid)
+static int batadv_handle_announce(struct batadv_priv *bat_priv,
+ uint8_t *an_addr, uint8_t *backbone_addr,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
uint16_t crc;
- if (memcmp(an_addr, announce_mac, 4) != 0)
+ if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
return 0;
- backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
if (unlikely(!backbone_gw))
return 1;
@@ -627,19 +639,19 @@ static int handle_announce(struct bat_priv *bat_priv,
/* handle as ANNOUNCE frame */
backbone_gw->lasttime = jiffies;
- crc = ntohs(*((uint16_t *)(&an_addr[4])));
+ crc = ntohs(*((__be16 *)(&an_addr[4])));
- bat_dbg(DBG_BLA, bat_priv,
- "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
- vid, backbone_gw->orig, crc);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+ vid, backbone_gw->orig, crc);
if (backbone_gw->crc != crc) {
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
- backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
- crc);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+ backbone_gw->orig, backbone_gw->vid,
+ backbone_gw->crc, crc);
- bla_send_request(backbone_gw);
+ batadv_bla_send_request(backbone_gw);
} else {
/* if we have sent a request and the crc was OK,
* we can allow traffic again.
@@ -650,88 +662,92 @@ static int handle_announce(struct bat_priv *bat_priv,
}
}
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* check for REQUEST frame, return 1 if handled */
-static int handle_request(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *backbone_addr,
- struct ethhdr *ethhdr, short vid)
+static int batadv_handle_request(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ struct ethhdr *ethhdr, short vid)
{
/* check for REQUEST frame */
- if (!compare_eth(backbone_addr, ethhdr->h_dest))
+ if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
return 0;
/* sanity check, this should not happen on a normal switch,
* we ignore it in this case.
*/
- if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
return 1;
- bat_dbg(DBG_BLA, bat_priv,
- "handle_request(): REQUEST vid %d (sent by %pM)...\n",
- vid, ethhdr->h_source);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_request(): REQUEST vid %d (sent by %pM)...\n",
+ vid, ethhdr->h_source);
- bla_answer_request(bat_priv, primary_if, vid);
+ batadv_bla_answer_request(bat_priv, primary_if, vid);
return 1;
}
/* check for UNCLAIM frame, return 1 if handled */
-static int handle_unclaim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *backbone_addr,
- uint8_t *claim_addr, short vid)
+static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr,
+ uint8_t *claim_addr, short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
/* unclaim in any case if it is our own */
- if (primary_if && compare_eth(backbone_addr,
- primary_if->net_dev->dev_addr))
- bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
+ if (primary_if && batadv_compare_eth(backbone_addr,
+ primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_DEL);
- backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
if (!backbone_gw)
return 1;
/* this must be an UNCLAIM frame */
- bat_dbg(DBG_BLA, bat_priv,
- "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
- claim_addr, vid, backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
+ claim_addr, vid, backbone_gw->orig);
- bla_del_claim(bat_priv, claim_addr, vid);
- backbone_gw_free_ref(backbone_gw);
+ batadv_bla_del_claim(bat_priv, claim_addr, vid);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* check for CLAIM frame, return 1 if handled */
-static int handle_claim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, uint8_t *backbone_addr,
- uint8_t *claim_addr, short vid)
+static int batadv_handle_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *backbone_addr, uint8_t *claim_addr,
+ short vid)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
/* register the gateway if not yet available, and add the claim. */
- backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
+ backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid);
if (unlikely(!backbone_gw))
return 1;
/* this must be a CLAIM frame */
- bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
- if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
- bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
+ batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ batadv_bla_send_claim(bat_priv, claim_addr, vid,
+ BATADV_CLAIM_TYPE_ADD);
/* TODO: we could call something like tt_local_del() here. */
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/**
+ * batadv_check_claim_group
* @bat_priv: the bat priv with all the soft interface information
* @hw_src: the Hardware source in the ARP Header
* @hw_dst: the Hardware destination in the ARP Header
@@ -746,16 +762,16 @@ static int handle_claim(struct bat_priv *bat_priv,
* 1 - if is a claim packet from another group
* 0 - if it is not a claim packet
*/
-static int check_claim_group(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- uint8_t *hw_src, uint8_t *hw_dst,
- struct ethhdr *ethhdr)
+static int batadv_check_claim_group(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ uint8_t *hw_src, uint8_t *hw_dst,
+ struct ethhdr *ethhdr)
{
uint8_t *backbone_addr;
- struct orig_node *orig_node;
- struct bla_claim_dst *bla_dst, *bla_dst_own;
+ struct batadv_orig_node *orig_node;
+ struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
- bla_dst = (struct bla_claim_dst *)hw_dst;
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
bla_dst_own = &bat_priv->claim_dest;
/* check if it is a claim packet in general */
@@ -767,12 +783,12 @@ static int check_claim_group(struct bat_priv *bat_priv,
* otherwise assume it is in the hw_src
*/
switch (bla_dst->type) {
- case CLAIM_TYPE_ADD:
+ case BATADV_CLAIM_TYPE_ADD:
backbone_addr = hw_src;
break;
- case CLAIM_TYPE_REQUEST:
- case CLAIM_TYPE_ANNOUNCE:
- case CLAIM_TYPE_DEL:
+ case BATADV_CLAIM_TYPE_REQUEST:
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ case BATADV_CLAIM_TYPE_DEL:
backbone_addr = ethhdr->h_source;
break;
default:
@@ -780,7 +796,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
}
/* don't accept claim frames from ourselves */
- if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
+ if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
return 0;
/* if its already the same group, it is fine. */
@@ -788,7 +804,7 @@ static int check_claim_group(struct bat_priv *bat_priv,
return 2;
/* lets see if this originator is in our mesh */
- orig_node = orig_hash_find(bat_priv, backbone_addr);
+ orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
/* dont accept claims from gateways which are not in
* the same mesh or group.
@@ -798,20 +814,19 @@ static int check_claim_group(struct bat_priv *bat_priv,
/* if our mesh friends mac is bigger, use it for ourselves. */
if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
- bat_dbg(DBG_BLA, bat_priv,
- "taking other backbones claim group: %04x\n",
- ntohs(bla_dst->group));
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "taking other backbones claim group: %04x\n",
+ ntohs(bla_dst->group));
bla_dst_own->group = bla_dst->group;
}
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return 2;
}
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
*
* Check if this is a claim frame, and process it accordingly.
@@ -819,15 +834,15 @@ static int check_claim_group(struct bat_priv *bat_priv,
* returns 1 if it was a claim frame, otherwise return 0 to
* tell the callee that it can use the frame on its own.
*/
-static int bla_process_claim(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct sk_buff *skb)
+static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct sk_buff *skb)
{
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
struct arphdr *arphdr;
uint8_t *hw_src, *hw_dst;
- struct bla_claim_dst *bla_dst;
+ struct batadv_bla_claim_dst *bla_dst;
uint16_t proto;
int headlen;
short vid = -1;
@@ -860,7 +875,6 @@ static int bla_process_claim(struct bat_priv *bat_priv,
/* Check whether the ARP frame carries a valid
* IP information
*/
-
if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
return 0;
if (arphdr->ar_pro != htons(ETH_P_IP))
@@ -872,59 +886,62 @@ static int bla_process_claim(struct bat_priv *bat_priv,
hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
hw_dst = hw_src + ETH_ALEN + 4;
- bla_dst = (struct bla_claim_dst *)hw_dst;
+ bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
/* check if it is a claim frame. */
- ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
+ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
+ ethhdr);
if (ret == 1)
- bat_dbg(DBG_BLA, bat_priv,
- "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, vid, hw_src, hw_dst);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
if (ret < 2)
return ret;
/* become a backbone gw ourselves on this vlan if not happened yet */
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
/* check for the different types of claim frames ... */
switch (bla_dst->type) {
- case CLAIM_TYPE_ADD:
- if (handle_claim(bat_priv, primary_if, hw_src,
- ethhdr->h_source, vid))
+ case BATADV_CLAIM_TYPE_ADD:
+ if (batadv_handle_claim(bat_priv, primary_if, hw_src,
+ ethhdr->h_source, vid))
return 1;
break;
- case CLAIM_TYPE_DEL:
- if (handle_unclaim(bat_priv, primary_if,
- ethhdr->h_source, hw_src, vid))
+ case BATADV_CLAIM_TYPE_DEL:
+ if (batadv_handle_unclaim(bat_priv, primary_if,
+ ethhdr->h_source, hw_src, vid))
return 1;
break;
- case CLAIM_TYPE_ANNOUNCE:
- if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
+ case BATADV_CLAIM_TYPE_ANNOUNCE:
+ if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
+ vid))
return 1;
break;
- case CLAIM_TYPE_REQUEST:
- if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
+ case BATADV_CLAIM_TYPE_REQUEST:
+ if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
+ vid))
return 1;
break;
}
- bat_dbg(DBG_BLA, bat_priv,
- "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
- ethhdr->h_source, vid, hw_src, hw_dst);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
+ ethhdr->h_source, vid, hw_src, hw_dst);
return 1;
}
/* Check when we last heard from other nodes, and remove them in case of
* a time out, or clean all backbone gws if now is set.
*/
-static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
+static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
int i;
@@ -941,29 +958,30 @@ static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
head, hash_entry) {
if (now)
goto purge_now;
- if (!has_timed_out(backbone_gw->lasttime,
- BLA_BACKBONE_TIMEOUT))
+ if (!batadv_has_timed_out(backbone_gw->lasttime,
+ BATADV_BLA_BACKBONE_TIMEOUT))
continue;
- bat_dbg(DBG_BLA, backbone_gw->bat_priv,
- "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
- backbone_gw->orig);
+ batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
+ "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
+ backbone_gw->orig);
purge_now:
/* don't wait for the pending request anymore */
if (atomic_read(&backbone_gw->request_sent))
atomic_dec(&bat_priv->bla_num_requests);
- bla_del_backbone_claims(backbone_gw);
+ batadv_bla_del_backbone_claims(backbone_gw);
hlist_del_rcu(node);
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
}
spin_unlock_bh(list_lock);
}
}
/**
+ * batadv_bla_purge_claims
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the selected primary interface, may be NULL if now is set
* @now: whether the whole hash shall be wiped now
@@ -971,13 +989,14 @@ purge_now:
* Check when we heard last time from our own claims, and remove them in case of
* a time out, or clean all claims if now is set
*/
-static void bla_purge_claims(struct bat_priv *bat_priv,
- struct hard_iface *primary_if, int now)
+static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ int now)
{
- struct claim *claim;
+ struct batadv_claim *claim;
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
int i;
hash = bat_priv->claim_hash;
@@ -991,42 +1010,42 @@ static void bla_purge_claims(struct bat_priv *bat_priv,
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
if (now)
goto purge_now;
- if (!compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
continue;
- if (!has_timed_out(claim->lasttime,
- BLA_CLAIM_TIMEOUT))
+ if (!batadv_has_timed_out(claim->lasttime,
+ BATADV_BLA_CLAIM_TIMEOUT))
continue;
- bat_dbg(DBG_BLA, bat_priv,
- "bla_purge_claims(): %pM, vid %d, time out\n",
- claim->addr, claim->vid);
+ batadv_dbg(BATADV_DBG_BLA, bat_priv,
+ "bla_purge_claims(): %pM, vid %d, time out\n",
+ claim->addr, claim->vid);
purge_now:
- handle_unclaim(bat_priv, primary_if,
- claim->backbone_gw->orig,
- claim->addr, claim->vid);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ claim->backbone_gw->orig,
+ claim->addr, claim->vid);
}
rcu_read_unlock();
}
}
/**
+ * batadv_bla_update_orig_address
* @bat_priv: the bat priv with all the soft interface information
* @primary_if: the new selected primary_if
* @oldif: the old primary interface, may be NULL
*
* Update the backbone gateways when the own orig address changes.
- *
*/
-void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif)
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
{
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
struct hlist_node *node;
struct hlist_head *head;
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
int i;
/* reset bridge loop avoidance group id */
@@ -1034,8 +1053,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
if (!oldif) {
- bla_purge_claims(bat_priv, NULL, 1);
- bla_purge_backbone_gw(bat_priv, 1);
+ batadv_bla_purge_claims(bat_priv, NULL, 1);
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
return;
}
@@ -1049,8 +1068,8 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
/* own orig still holds the old value. */
- if (!compare_eth(backbone_gw->orig,
- oldif->net_dev->dev_addr))
+ if (!batadv_compare_eth(backbone_gw->orig,
+ oldif->net_dev->dev_addr))
continue;
memcpy(backbone_gw->orig,
@@ -1058,7 +1077,7 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
/* send an announce frame so others will ask for our
* claims and update their tables.
*/
- bla_send_announce(bat_priv, backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
}
rcu_read_unlock();
}
@@ -1067,36 +1086,36 @@ void bla_update_orig_address(struct bat_priv *bat_priv,
/* (re)start the timer */
-static void bla_start_timer(struct bat_priv *bat_priv)
+static void batadv_bla_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
- queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
- msecs_to_jiffies(BLA_PERIOD_LENGTH));
+ INIT_DELAYED_WORK(&bat_priv->bla_work, batadv_bla_periodic_work);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work,
+ msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
}
/* periodic work to do:
* * purge structures when they are too old
* * send announcements
*/
-static void bla_periodic_work(struct work_struct *work)
+static void batadv_bla_periodic_work(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, bla_work);
+ struct batadv_priv *bat_priv;
struct hlist_node *node;
struct hlist_head *head;
- struct backbone_gw *backbone_gw;
- struct hashtable_t *hash;
- struct hard_iface *primary_if;
+ struct batadv_backbone_gw *backbone_gw;
+ struct batadv_hashtable *hash;
+ struct batadv_hard_iface *primary_if;
int i;
- primary_if = primary_if_get_selected(bat_priv);
+ bat_priv = container_of(delayed_work, struct batadv_priv, bla_work);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- bla_purge_claims(bat_priv, primary_if, 0);
- bla_purge_backbone_gw(bat_priv, 0);
+ batadv_bla_purge_claims(bat_priv, primary_if, 0);
+ batadv_bla_purge_backbone_gw(bat_priv, 0);
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
goto out;
@@ -1110,67 +1129,81 @@ static void bla_periodic_work(struct work_struct *work)
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (!compare_eth(backbone_gw->orig,
- primary_if->net_dev->dev_addr))
+ if (!batadv_compare_eth(backbone_gw->orig,
+ primary_if->net_dev->dev_addr))
continue;
backbone_gw->lasttime = jiffies;
- bla_send_announce(bat_priv, backbone_gw);
+ batadv_bla_send_announce(bat_priv, backbone_gw);
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
- bla_start_timer(bat_priv);
+ batadv_bla_start_timer(bat_priv);
}
+/* The hash for claim and backbone hash receive the same key because they
+ * are getting initialized by hash_new with the same key. Reinitializing
+ * them with to different keys to allow nested locking without generating
+ * lockdep warnings
+ */
+static struct lock_class_key batadv_claim_hash_lock_class_key;
+static struct lock_class_key batadv_backbone_hash_lock_class_key;
+
/* initialize all bla structures */
-int bla_init(struct bat_priv *bat_priv)
+int batadv_bla_init(struct batadv_priv *bat_priv)
{
int i;
uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
- bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
/* setting claim destination address */
memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
bat_priv->claim_dest.type = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (primary_if) {
bat_priv->claim_dest.group =
htons(crc16(0, primary_if->net_dev->dev_addr,
ETH_ALEN));
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
} else {
bat_priv->claim_dest.group = 0; /* will be set later */
}
/* initialize the duplicate list */
- for (i = 0; i < DUPLIST_SIZE; i++)
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
bat_priv->bcast_duplist[i].entrytime =
- jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
+ jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
bat_priv->bcast_duplist_curr = 0;
if (bat_priv->claim_hash)
- return 1;
+ return 0;
- bat_priv->claim_hash = hash_new(128);
- bat_priv->backbone_hash = hash_new(32);
+ bat_priv->claim_hash = batadv_hash_new(128);
+ bat_priv->backbone_hash = batadv_hash_new(32);
if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
- return -1;
+ return -ENOMEM;
- bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
+ batadv_hash_set_lock_class(bat_priv->claim_hash,
+ &batadv_claim_hash_lock_class_key);
+ batadv_hash_set_lock_class(bat_priv->backbone_hash,
+ &batadv_backbone_hash_lock_class_key);
- bla_start_timer(bat_priv);
- return 1;
+ batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
+
+ batadv_bla_start_timer(bat_priv);
+ return 0;
}
/**
+ * batadv_bla_check_bcast_duplist
* @bat_priv: the bat priv with all the soft interface information
* @bcast_packet: originator mac address
* @hdr_size: maximum length of the frame
@@ -1183,17 +1216,15 @@ int bla_init(struct bat_priv *bat_priv)
* with a good chance that it is the same packet. If it is furthermore
* sent by another host, drop it. We allow equal packets from
* the same host however as this might be intended.
- *
- **/
-
-int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet,
- int hdr_size)
+ */
+int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size)
{
int i, length, curr;
uint8_t *content;
uint16_t crc;
- struct bcast_duplist_entry *entry;
+ struct batadv_bcast_duplist_entry *entry;
length = hdr_size - sizeof(*bcast_packet);
content = (uint8_t *)bcast_packet;
@@ -1202,20 +1233,21 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
/* calculate the crc ... */
crc = crc16(0, content, length);
- for (i = 0 ; i < DUPLIST_SIZE; i++) {
- curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
+ for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
+ curr = (bat_priv->bcast_duplist_curr + i) % BATADV_DUPLIST_SIZE;
entry = &bat_priv->bcast_duplist[curr];
/* we can stop searching if the entry is too old ;
* later entries will be even older
*/
- if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
+ if (batadv_has_timed_out(entry->entrytime,
+ BATADV_DUPLIST_TIMEOUT))
break;
if (entry->crc != crc)
continue;
- if (compare_eth(entry->orig, bcast_packet->orig))
+ if (batadv_compare_eth(entry->orig, bcast_packet->orig))
continue;
/* this entry seems to match: same crc, not too old,
@@ -1224,7 +1256,8 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
return 1;
}
/* not found, add a new entry (overwrite the oldest entry) */
- curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
+ curr = (bat_priv->bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
+ curr %= BATADV_DUPLIST_SIZE;
entry = &bat_priv->bcast_duplist[curr];
entry->crc = crc;
entry->entrytime = jiffies;
@@ -1237,22 +1270,19 @@ int bla_check_bcast_duplist(struct bat_priv *bat_priv,
-/**
- * @bat_priv: the bat priv with all the soft interface information
+/* @bat_priv: the bat priv with all the soft interface information
* @orig: originator mac address
*
* check if the originator is a gateway for any VLAN ID.
*
* returns 1 if it is found, 0 otherwise
- *
*/
-
-int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
+int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig)
{
- struct hashtable_t *hash = bat_priv->backbone_hash;
+ struct batadv_hashtable *hash = bat_priv->backbone_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
int i;
if (!atomic_read(&bat_priv->bridge_loop_avoidance))
@@ -1266,7 +1296,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
rcu_read_lock();
hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
- if (compare_eth(backbone_gw->orig, orig)) {
+ if (batadv_compare_eth(backbone_gw->orig, orig)) {
rcu_read_unlock();
return 1;
}
@@ -1279,6 +1309,7 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
/**
+ * batadv_bla_is_backbone_gw
* @skb: the frame to be checked
* @orig_node: the orig_node of the frame
* @hdr_size: maximum length of the frame
@@ -1286,14 +1317,13 @@ int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
* bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
* if the orig_node is also a gateway on the soft interface, otherwise it
* returns 0.
- *
*/
-int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node, int hdr_size)
+int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node, int hdr_size)
{
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
short vid = -1;
if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
@@ -1315,42 +1345,43 @@ int bla_is_backbone_gw(struct sk_buff *skb,
}
/* see if this originator is a backbone gw for this VLAN */
-
- backbone_gw = backbone_hash_find(orig_node->bat_priv,
- orig_node->orig, vid);
+ backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
+ orig_node->orig, vid);
if (!backbone_gw)
return 0;
- backbone_gw_free_ref(backbone_gw);
+ batadv_backbone_gw_free_ref(backbone_gw);
return 1;
}
/* free all bla structures (for softinterface free or module unload) */
-void bla_free(struct bat_priv *bat_priv)
+void batadv_bla_free(struct batadv_priv *bat_priv)
{
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
cancel_delayed_work_sync(&bat_priv->bla_work);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (bat_priv->claim_hash) {
- bla_purge_claims(bat_priv, primary_if, 1);
- hash_destroy(bat_priv->claim_hash);
+ batadv_bla_purge_claims(bat_priv, primary_if, 1);
+ batadv_hash_destroy(bat_priv->claim_hash);
bat_priv->claim_hash = NULL;
}
if (bat_priv->backbone_hash) {
- bla_purge_backbone_gw(bat_priv, 1);
- hash_destroy(bat_priv->backbone_hash);
+ batadv_bla_purge_backbone_gw(bat_priv, 1);
+ batadv_hash_destroy(bat_priv->backbone_hash);
bat_priv->backbone_hash = NULL;
}
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/**
+ * batadv_bla_rx
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
+ * @is_bcast: the packet came in a broadcast packet type.
*
* bla_rx avoidance checks if:
* * we have to race for a claim
@@ -1359,18 +1390,18 @@ void bla_free(struct bat_priv *bat_priv)
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
- *
*/
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast)
{
struct ethhdr *ethhdr;
- struct claim search_claim, *claim = NULL;
- struct hard_iface *primary_if;
+ struct batadv_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
int ret;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto handled;
@@ -1380,47 +1411,52 @@ int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
/* don't allow broadcasts while requests are in flight */
- if (is_multicast_ether_addr(ethhdr->h_dest))
+ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
goto handled;
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
if (!claim) {
/* possible optimization: race for a claim */
/* No claim exists yet, claim it for us!
*/
- handle_claim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
/* if it is our own claim ... */
- if (compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ if (batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
/* ... allow it in any case */
claim->lasttime = jiffies;
goto allow;
}
/* if it is a broadcast ... */
- if (is_multicast_ether_addr(ethhdr->h_dest)) {
- /* ... drop it. the responsible gateway is in charge. */
+ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
+ /* ... drop it. the responsible gateway is in charge.
+ *
+ * We need to check is_bcast because with the gateway
+ * feature, broadcasts (like DHCP requests) may be sent
+ * using a unicast packet type.
+ */
goto handled;
} else {
/* seems the client considers us as its best gateway.
* send a claim and update the claim table
* immediately.
*/
- handle_claim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_claim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
allow:
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = 0;
goto out;
@@ -1430,13 +1466,14 @@ handled:
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (claim)
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
return ret;
}
/**
+ * batadv_bla_tx
* @bat_priv: the bat priv with all the soft interface information
* @skb: the frame to be checked
* @vid: the VLAN ID of the frame
@@ -1448,16 +1485,15 @@ out:
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
- *
*/
-int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid)
{
struct ethhdr *ethhdr;
- struct claim search_claim, *claim = NULL;
- struct hard_iface *primary_if;
+ struct batadv_claim search_claim, *claim = NULL;
+ struct batadv_hard_iface *primary_if;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1467,7 +1503,7 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
/* in VLAN case, the mac header might not be set. */
skb_reset_mac_header(skb);
- if (bla_process_claim(bat_priv, primary_if, skb))
+ if (batadv_bla_process_claim(bat_priv, primary_if, skb))
goto handled;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1480,21 +1516,21 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
search_claim.vid = vid;
- claim = claim_hash_find(bat_priv, &search_claim);
+ claim = batadv_claim_hash_find(bat_priv, &search_claim);
/* if no claim exists, allow it. */
if (!claim)
goto allow;
/* check if we are responsible. */
- if (compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr)) {
+ if (batadv_compare_eth(claim->backbone_gw->orig,
+ primary_if->net_dev->dev_addr)) {
/* if yes, the client has roamed and we have
* to unclaim it.
*/
- handle_unclaim(bat_priv, primary_if,
- primary_if->net_dev->dev_addr,
- ethhdr->h_source, vid);
+ batadv_handle_unclaim(bat_priv, primary_if,
+ primary_if->net_dev->dev_addr,
+ ethhdr->h_source, vid);
goto allow;
}
@@ -1511,33 +1547,34 @@ int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
goto allow;
}
allow:
- bla_update_own_backbone_gw(bat_priv, primary_if, vid);
+ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
ret = 0;
goto out;
handled:
ret = 1;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (claim)
- claim_free_ref(claim);
+ batadv_claim_free_ref(claim);
return ret;
}
-int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->claim_hash;
- struct claim *claim;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->claim_hash;
+ struct batadv_claim *claim;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
bool is_own;
int ret = 0;
+ uint8_t *primary_addr;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -1545,16 +1582,17 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
goto out;
}
+ primary_addr = primary_if->net_dev->dev_addr;
seq_printf(seq,
"Claims announced for the mesh %s (orig %pM, group id %04x)\n",
- net_dev->name, primary_if->net_dev->dev_addr,
+ net_dev->name, primary_addr,
ntohs(bat_priv->claim_dest.group));
seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
"Client", "VID", "Originator", "CRC");
@@ -1563,8 +1601,8 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
- is_own = compare_eth(claim->backbone_gw->orig,
- primary_if->net_dev->dev_addr);
+ is_own = batadv_compare_eth(claim->backbone_gw->orig,
+ primary_addr);
seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
claim->addr, claim->vid,
claim->backbone_gw->orig,
@@ -1575,6 +1613,6 @@ int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index e39f93acc28f..563cfbf94a7f 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,80 +15,84 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_BLA_H_
#define _NET_BATMAN_ADV_BLA_H_
#ifdef CONFIG_BATMAN_ADV_BLA
-int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
-int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
-int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node, int hdr_size);
-int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
-int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
-int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet, int hdr_size);
-void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif);
-int bla_init(struct bat_priv *bat_priv);
-void bla_free(struct bat_priv *bat_priv);
+int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid,
+ bool is_bcast);
+int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, short vid);
+int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node, int hdr_size);
+int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, uint8_t *orig);
+int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size);
+void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif);
+int batadv_bla_init(struct batadv_priv *bat_priv);
+void batadv_bla_free(struct batadv_priv *bat_priv);
-#define BLA_CRC_INIT 0
+#define BATADV_BLA_CRC_INIT 0
#else /* ifdef CONFIG_BATMAN_ADV_BLA */
-static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
- short vid)
+static inline int batadv_bla_rx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, short vid,
+ bool is_bcast)
{
return 0;
}
-static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
- short vid)
+static inline int batadv_bla_tx(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, short vid)
{
return 0;
}
-static inline int bla_is_backbone_gw(struct sk_buff *skb,
- struct orig_node *orig_node,
- int hdr_size)
+static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb,
+ struct batadv_orig_node *orig_node,
+ int hdr_size)
{
return 0;
}
-static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
- void *offset)
+static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq,
+ void *offset)
{
return 0;
}
-static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
- uint8_t *orig)
+static inline int batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv,
+ uint8_t *orig)
{
return 0;
}
-static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
- struct bcast_packet *bcast_packet,
- int hdr_size)
+static inline int
+batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
+ struct batadv_bcast_packet *bcast_packet,
+ int hdr_size)
{
return 0;
}
-static inline void bla_update_orig_address(struct bat_priv *bat_priv,
- struct hard_iface *primary_if,
- struct hard_iface *oldif)
+static inline void
+batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *primary_if,
+ struct batadv_hard_iface *oldif)
{
}
-static inline int bla_init(struct bat_priv *bat_priv)
+static inline int batadv_bla_init(struct batadv_priv *bat_priv)
{
return 1;
}
-static inline void bla_free(struct bat_priv *bat_priv)
+static inline void batadv_bla_free(struct batadv_priv *bat_priv)
{
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
new file mode 100644
index 000000000000..34fbb1667bcd
--- /dev/null
+++ b/net/batman-adv/debugfs.c
@@ -0,0 +1,409 @@
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+
+#include <linux/debugfs.h>
+
+#include "debugfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "soft-interface.h"
+#include "vis.h"
+#include "icmp_socket.h"
+#include "bridge_loop_avoidance.h"
+
+static struct dentry *batadv_debugfs;
+
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+#define BATADV_LOG_BUFF_MASK (batadv_log_buff_len - 1)
+
+static const int batadv_log_buff_len = BATADV_LOG_BUF_LEN;
+
+static char *batadv_log_char_addr(struct batadv_debug_log *debug_log,
+ size_t idx)
+{
+ return &debug_log->log_buff[idx & BATADV_LOG_BUFF_MASK];
+}
+
+static void batadv_emit_log_char(struct batadv_debug_log *debug_log, char c)
+{
+ char *char_addr;
+
+ char_addr = batadv_log_char_addr(debug_log, debug_log->log_end);
+ *char_addr = c;
+ debug_log->log_end++;
+
+ if (debug_log->log_end - debug_log->log_start > batadv_log_buff_len)
+ debug_log->log_start = debug_log->log_end - batadv_log_buff_len;
+}
+
+__printf(2, 3)
+static int batadv_fdebug_log(struct batadv_debug_log *debug_log,
+ const char *fmt, ...)
+{
+ va_list args;
+ static char debug_log_buf[256];
+ char *p;
+
+ if (!debug_log)
+ return 0;
+
+ spin_lock_bh(&debug_log->lock);
+ va_start(args, fmt);
+ vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
+ va_end(args);
+
+ for (p = debug_log_buf; *p != 0; p++)
+ batadv_emit_log_char(debug_log, *p);
+
+ spin_unlock_bh(&debug_log->lock);
+
+ wake_up(&debug_log->queue_wait);
+
+ return 0;
+}
+
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+{
+ va_list args;
+ char tmp_log_buf[256];
+
+ va_start(args, fmt);
+ vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
+ batadv_fdebug_log(bat_priv->debug_log, "[%10u] %s",
+ jiffies_to_msecs(jiffies), tmp_log_buf);
+ va_end(args);
+
+ return 0;
+}
+
+static int batadv_log_open(struct inode *inode, struct file *file)
+{
+ nonseekable_open(inode, file);
+ file->private_data = inode->i_private;
+ batadv_inc_module_count();
+ return 0;
+}
+
+static int batadv_log_release(struct inode *inode, struct file *file)
+{
+ batadv_dec_module_count();
+ return 0;
+}
+
+static int batadv_log_empty(struct batadv_debug_log *debug_log)
+{
+ return !(debug_log->log_start - debug_log->log_end);
+}
+
+static ssize_t batadv_log_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct batadv_priv *bat_priv = file->private_data;
+ struct batadv_debug_log *debug_log = bat_priv->debug_log;
+ int error, i = 0;
+ char *char_addr;
+ char c;
+
+ if ((file->f_flags & O_NONBLOCK) && batadv_log_empty(debug_log))
+ return -EAGAIN;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (count == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE, buf, count))
+ return -EFAULT;
+
+ error = wait_event_interruptible(debug_log->queue_wait,
+ (!batadv_log_empty(debug_log)));
+
+ if (error)
+ return error;
+
+ spin_lock_bh(&debug_log->lock);
+
+ while ((!error) && (i < count) &&
+ (debug_log->log_start != debug_log->log_end)) {
+ char_addr = batadv_log_char_addr(debug_log,
+ debug_log->log_start);
+ c = *char_addr;
+
+ debug_log->log_start++;
+
+ spin_unlock_bh(&debug_log->lock);
+
+ error = __put_user(c, buf);
+
+ spin_lock_bh(&debug_log->lock);
+
+ buf++;
+ i++;
+
+ }
+
+ spin_unlock_bh(&debug_log->lock);
+
+ if (!error)
+ return i;
+
+ return error;
+}
+
+static unsigned int batadv_log_poll(struct file *file, poll_table *wait)
+{
+ struct batadv_priv *bat_priv = file->private_data;
+ struct batadv_debug_log *debug_log = bat_priv->debug_log;
+
+ poll_wait(file, &debug_log->queue_wait, wait);
+
+ if (!batadv_log_empty(debug_log))
+ return POLLIN | POLLRDNORM;
+
+ return 0;
+}
+
+static const struct file_operations batadv_log_fops = {
+ .open = batadv_log_open,
+ .release = batadv_log_release,
+ .read = batadv_log_read,
+ .poll = batadv_log_poll,
+ .llseek = no_llseek,
+};
+
+static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
+{
+ struct dentry *d;
+
+ if (!bat_priv->debug_dir)
+ goto err;
+
+ bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
+ if (!bat_priv->debug_log)
+ goto err;
+
+ spin_lock_init(&bat_priv->debug_log->lock);
+ init_waitqueue_head(&bat_priv->debug_log->queue_wait);
+
+ d = debugfs_create_file("log", S_IFREG | S_IRUSR,
+ bat_priv->debug_dir, bat_priv,
+ &batadv_log_fops);
+ if (!d)
+ goto err;
+
+ return 0;
+
+err:
+ return -ENOMEM;
+}
+
+static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
+{
+ kfree(bat_priv->debug_log);
+ bat_priv->debug_log = NULL;
+}
+#else /* CONFIG_BATMAN_ADV_DEBUG */
+static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
+{
+ bat_priv->debug_log = NULL;
+ return 0;
+}
+
+static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
+{
+ return;
+}
+#endif
+
+static int batadv_algorithms_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, batadv_algo_seq_print_text, NULL);
+}
+
+static int batadv_originators_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_orig_seq_print_text, net_dev);
+}
+
+static int batadv_gateways_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_gw_client_seq_print_text, net_dev);
+}
+
+static int batadv_transtable_global_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_tt_global_seq_print_text, net_dev);
+}
+
+#ifdef CONFIG_BATMAN_ADV_BLA
+static int batadv_bla_claim_table_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_bla_claim_table_seq_print_text,
+ net_dev);
+}
+#endif
+
+static int batadv_transtable_local_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_tt_local_seq_print_text, net_dev);
+}
+
+static int batadv_vis_data_open(struct inode *inode, struct file *file)
+{
+ struct net_device *net_dev = (struct net_device *)inode->i_private;
+ return single_open(file, batadv_vis_seq_print_text, net_dev);
+}
+
+struct batadv_debuginfo {
+ struct attribute attr;
+ const struct file_operations fops;
+};
+
+#define BATADV_DEBUGINFO(_name, _mode, _open) \
+struct batadv_debuginfo batadv_debuginfo_##_name = { \
+ .attr = { .name = __stringify(_name), \
+ .mode = _mode, }, \
+ .fops = { .owner = THIS_MODULE, \
+ .open = _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ } \
+};
+
+static BATADV_DEBUGINFO(routing_algos, S_IRUGO, batadv_algorithms_open);
+static BATADV_DEBUGINFO(originators, S_IRUGO, batadv_originators_open);
+static BATADV_DEBUGINFO(gateways, S_IRUGO, batadv_gateways_open);
+static BATADV_DEBUGINFO(transtable_global, S_IRUGO,
+ batadv_transtable_global_open);
+#ifdef CONFIG_BATMAN_ADV_BLA
+static BATADV_DEBUGINFO(bla_claim_table, S_IRUGO, batadv_bla_claim_table_open);
+#endif
+static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
+ batadv_transtable_local_open);
+static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
+
+static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
+ &batadv_debuginfo_originators,
+ &batadv_debuginfo_gateways,
+ &batadv_debuginfo_transtable_global,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &batadv_debuginfo_bla_claim_table,
+#endif
+ &batadv_debuginfo_transtable_local,
+ &batadv_debuginfo_vis_data,
+ NULL,
+};
+
+void batadv_debugfs_init(void)
+{
+ struct batadv_debuginfo *bat_debug;
+ struct dentry *file;
+
+ batadv_debugfs = debugfs_create_dir(BATADV_DEBUGFS_SUBDIR, NULL);
+ if (batadv_debugfs == ERR_PTR(-ENODEV))
+ batadv_debugfs = NULL;
+
+ if (!batadv_debugfs)
+ goto out;
+
+ bat_debug = &batadv_debuginfo_routing_algos;
+ file = debugfs_create_file(bat_debug->attr.name,
+ S_IFREG | bat_debug->attr.mode,
+ batadv_debugfs, NULL, &bat_debug->fops);
+ if (!file)
+ pr_err("Can't add debugfs file: %s\n", bat_debug->attr.name);
+
+out:
+ return;
+}
+
+void batadv_debugfs_destroy(void)
+{
+ if (batadv_debugfs) {
+ debugfs_remove_recursive(batadv_debugfs);
+ batadv_debugfs = NULL;
+ }
+}
+
+int batadv_debugfs_add_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_debuginfo **bat_debug;
+ struct dentry *file;
+
+ if (!batadv_debugfs)
+ goto out;
+
+ bat_priv->debug_dir = debugfs_create_dir(dev->name, batadv_debugfs);
+ if (!bat_priv->debug_dir)
+ goto out;
+
+ if (batadv_socket_setup(bat_priv) < 0)
+ goto rem_attr;
+
+ if (batadv_debug_log_setup(bat_priv) < 0)
+ goto rem_attr;
+
+ for (bat_debug = batadv_mesh_debuginfos; *bat_debug; ++bat_debug) {
+ file = debugfs_create_file(((*bat_debug)->attr).name,
+ S_IFREG | ((*bat_debug)->attr).mode,
+ bat_priv->debug_dir,
+ dev, &(*bat_debug)->fops);
+ if (!file) {
+ batadv_err(dev, "Can't add debugfs file: %s/%s\n",
+ dev->name, ((*bat_debug)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+rem_attr:
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+out:
+#ifdef CONFIG_DEBUG_FS
+ return -ENOMEM;
+#else
+ return 0;
+#endif /* CONFIG_DEBUG_FS */
+}
+
+void batadv_debugfs_del_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+
+ batadv_debug_log_cleanup(bat_priv);
+
+ if (batadv_debugfs) {
+ debugfs_remove_recursive(bat_priv->debug_dir);
+ bat_priv->debug_dir = NULL;
+ }
+}
diff --git a/net/batman-adv/bat_debugfs.h b/net/batman-adv/debugfs.h
index d605c6746428..3319e1f21f55 100644
--- a/net/batman-adv/bat_debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,18 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
#ifndef _NET_BATMAN_ADV_DEBUGFS_H_
#define _NET_BATMAN_ADV_DEBUGFS_H_
-#define DEBUGFS_BAT_SUBDIR "batman_adv"
+#define BATADV_DEBUGFS_SUBDIR "batman_adv"
-void debugfs_init(void);
-void debugfs_destroy(void);
-int debugfs_add_meshif(struct net_device *dev);
-void debugfs_del_meshif(struct net_device *dev);
+void batadv_debugfs_init(void);
+void batadv_debugfs_destroy(void);
+int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_del_meshif(struct net_device *dev);
#endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 47f7186dcefc..b421cc49d2cd 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,11 +15,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "gateway_client.h"
#include "gateway_common.h"
#include "hard-interface.h"
@@ -33,19 +31,21 @@
#include <linux/if_vlan.h>
/* This is the offset of the options field in a dhcp packet starting at
- * the beginning of the dhcp header */
-#define DHCP_OPTIONS_OFFSET 240
-#define DHCP_REQUEST 3
+ * the beginning of the dhcp header
+ */
+#define BATADV_DHCP_OPTIONS_OFFSET 240
+#define BATADV_DHCP_REQUEST 3
-static void gw_node_free_ref(struct gw_node *gw_node)
+static void batadv_gw_node_free_ref(struct batadv_gw_node *gw_node)
{
if (atomic_dec_and_test(&gw_node->refcount))
kfree_rcu(gw_node, rcu);
}
-static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
+static struct batadv_gw_node *
+batadv_gw_get_selected_gw_node(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node;
+ struct batadv_gw_node *gw_node;
rcu_read_lock();
gw_node = rcu_dereference(bat_priv->curr_gw);
@@ -60,12 +60,13 @@ out:
return gw_node;
}
-struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node;
- struct orig_node *orig_node = NULL;
+ struct batadv_gw_node *gw_node;
+ struct batadv_orig_node *orig_node = NULL;
- gw_node = gw_get_selected_gw_node(bat_priv);
+ gw_node = batadv_gw_get_selected_gw_node(bat_priv);
if (!gw_node)
goto out;
@@ -81,13 +82,14 @@ unlock:
rcu_read_unlock();
out:
if (gw_node)
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
return orig_node;
}
-static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
+static void batadv_gw_select(struct batadv_priv *bat_priv,
+ struct batadv_gw_node *new_gw_node)
{
- struct gw_node *curr_gw_node;
+ struct batadv_gw_node *curr_gw_node;
spin_lock_bh(&bat_priv->gw_list_lock);
@@ -98,31 +100,34 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
if (curr_gw_node)
- gw_node_free_ref(curr_gw_node);
+ batadv_gw_node_free_ref(curr_gw_node);
spin_unlock_bh(&bat_priv->gw_list_lock);
}
-void gw_deselect(struct bat_priv *bat_priv)
+void batadv_gw_deselect(struct batadv_priv *bat_priv)
{
atomic_set(&bat_priv->gw_reselect, 1);
}
-static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
+static struct batadv_gw_node *
+batadv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
{
- struct neigh_node *router;
+ struct batadv_neigh_node *router;
struct hlist_node *node;
- struct gw_node *gw_node, *curr_gw = NULL;
+ struct batadv_gw_node *gw_node, *curr_gw = NULL;
uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
uint8_t max_tq = 0;
int down, up;
+ struct batadv_orig_node *orig_node;
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
if (gw_node->deleted)
continue;
- router = orig_node_get_router(gw_node->orig_node);
+ orig_node = gw_node->orig_node;
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
@@ -131,35 +136,34 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
switch (atomic_read(&bat_priv->gw_sel_class)) {
case 1: /* fast connection */
- gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
- &down, &up);
+ batadv_gw_bandwidth_to_kbit(orig_node->gw_flags,
+ &down, &up);
tmp_gw_factor = (router->tq_avg * router->tq_avg *
down * 100 * 100) /
- (TQ_LOCAL_WINDOW_SIZE *
- TQ_LOCAL_WINDOW_SIZE * 64);
+ (BATADV_TQ_LOCAL_WINDOW_SIZE *
+ BATADV_TQ_LOCAL_WINDOW_SIZE * 64);
if ((tmp_gw_factor > max_gw_factor) ||
((tmp_gw_factor == max_gw_factor) &&
(router->tq_avg > max_tq))) {
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
break;
- default: /**
- * 2: stable connection (use best statistic)
+ default: /* 2: stable connection (use best statistic)
* 3: fast-switch (use best statistic but change as
* soon as a better gateway appears)
* XX: late-switch (use best statistic but change as
* soon as a better gateway appears which has
* $routing_class more tq points)
- **/
+ */
if (router->tq_avg > max_tq) {
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
curr_gw = gw_node;
atomic_inc(&curr_gw->refcount);
}
@@ -172,37 +176,36 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
if (tmp_gw_factor > max_gw_factor)
max_gw_factor = tmp_gw_factor;
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
next:
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
rcu_read_unlock();
return curr_gw;
}
-void gw_election(struct bat_priv *bat_priv)
+void batadv_gw_election(struct batadv_priv *bat_priv)
{
- struct gw_node *curr_gw = NULL, *next_gw = NULL;
- struct neigh_node *router = NULL;
+ struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
+ struct batadv_neigh_node *router = NULL;
char gw_addr[18] = { '\0' };
- /**
- * The batman daemon checks here if we already passed a full originator
+ /* The batman daemon checks here if we already passed a full originator
* cycle in order to make sure we don't choose the first gateway we
* hear about. This check is based on the daemon's uptime which we
* don't have.
- **/
- if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
+ */
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
goto out;
- if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
+ if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect))
goto out;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
- next_gw = gw_get_best_gw_node(bat_priv);
+ next_gw = batadv_gw_get_best_gw_node(bat_priv);
if (curr_gw == next_gw)
goto out;
@@ -210,53 +213,57 @@ void gw_election(struct bat_priv *bat_priv)
if (next_gw) {
sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
- router = orig_node_get_router(next_gw->orig_node);
+ router = batadv_orig_node_get_router(next_gw->orig_node);
if (!router) {
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
goto out;
}
}
if ((curr_gw) && (!next_gw)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Removing selected gateway - no gateway in range\n");
- throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Removing selected gateway - no gateway in range\n");
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL,
+ NULL);
} else if ((!curr_gw) && (next_gw)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
- router->tq_avg);
- throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->orig_node->gw_flags, router->tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_ADD,
+ gw_addr);
} else {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
- next_gw->orig_node->orig, next_gw->orig_node->gw_flags,
- router->tq_avg);
- throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n",
+ next_gw->orig_node->orig,
+ next_gw->orig_node->gw_flags, router->tq_avg);
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_CHANGE,
+ gw_addr);
}
- gw_select(bat_priv, next_gw);
+ batadv_gw_select(bat_priv, next_gw);
out:
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
if (next_gw)
- gw_node_free_ref(next_gw);
+ batadv_gw_node_free_ref(next_gw);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
-void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct orig_node *curr_gw_orig;
- struct neigh_node *router_gw = NULL, *router_orig = NULL;
+ struct batadv_orig_node *curr_gw_orig;
+ struct batadv_neigh_node *router_gw = NULL, *router_orig = NULL;
uint8_t gw_tq_avg, orig_tq_avg;
- curr_gw_orig = gw_get_selected_orig(bat_priv);
+ curr_gw_orig = batadv_gw_get_selected_orig(bat_priv);
if (!curr_gw_orig)
goto deselect;
- router_gw = orig_node_get_router(curr_gw_orig);
+ router_gw = batadv_orig_node_get_router(curr_gw_orig);
if (!router_gw)
goto deselect;
@@ -264,7 +271,7 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
if (curr_gw_orig == orig_node)
goto out;
- router_orig = orig_node_get_router(orig_node);
+ router_orig = batadv_orig_node_get_router(orig_node);
if (!router_orig)
goto out;
@@ -275,35 +282,35 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
if (orig_tq_avg < gw_tq_avg)
goto out;
- /**
- * if the routing class is greater than 3 the value tells us how much
+ /* if the routing class is greater than 3 the value tells us how much
* greater the TQ value of the new gateway must be
- **/
+ */
if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
(orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
goto out;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
- gw_tq_avg, orig_tq_avg);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Restarting gateway selection: better gateway found (tq curr: %i, tq new: %i)\n",
+ gw_tq_avg, orig_tq_avg);
deselect:
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
out:
if (curr_gw_orig)
- orig_node_free_ref(curr_gw_orig);
+ batadv_orig_node_free_ref(curr_gw_orig);
if (router_gw)
- neigh_node_free_ref(router_gw);
+ batadv_neigh_node_free_ref(router_gw);
if (router_orig)
- neigh_node_free_ref(router_orig);
+ batadv_neigh_node_free_ref(router_orig);
return;
}
-static void gw_node_add(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags)
+static void batadv_gw_node_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags)
{
- struct gw_node *gw_node;
+ struct batadv_gw_node *gw_node;
int down, up;
gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
@@ -318,47 +325,47 @@ static void gw_node_add(struct bat_priv *bat_priv,
hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
spin_unlock_bh(&bat_priv->gw_list_lock);
- gw_bandwidth_to_kbit(new_gwflags, &down, &up);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
- orig_node->orig, new_gwflags,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
+ orig_node->orig, new_gwflags,
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
}
-void gw_node_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags)
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags)
{
struct hlist_node *node;
- struct gw_node *gw_node, *curr_gw;
+ struct batadv_gw_node *gw_node, *curr_gw;
- /**
- * Note: We don't need a NULL check here, since curr_gw never gets
+ /* Note: We don't need a NULL check here, since curr_gw never gets
* dereferenced. If curr_gw is NULL we also should not exit as we may
* have this gateway in our list (duplication check!) even though we
* have no currently selected gateway.
*/
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
rcu_read_lock();
hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
if (gw_node->orig_node != orig_node)
continue;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Gateway class of originator %pM changed from %i to %i\n",
- orig_node->orig, gw_node->orig_node->gw_flags,
- new_gwflags);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway class of originator %pM changed from %i to %i\n",
+ orig_node->orig, gw_node->orig_node->gw_flags,
+ new_gwflags);
gw_node->deleted = 0;
- if (new_gwflags == NO_FLAGS) {
+ if (new_gwflags == BATADV_NO_FLAGS) {
gw_node->deleted = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Gateway %pM removed from gateway list\n",
- orig_node->orig);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Gateway %pM removed from gateway list\n",
+ orig_node->orig);
if (gw_node == curr_gw)
goto deselect;
@@ -367,34 +374,35 @@ void gw_node_update(struct bat_priv *bat_priv,
goto unlock;
}
- if (new_gwflags == NO_FLAGS)
+ if (new_gwflags == BATADV_NO_FLAGS)
goto unlock;
- gw_node_add(bat_priv, orig_node, new_gwflags);
+ batadv_gw_node_add(bat_priv, orig_node, new_gwflags);
goto unlock;
deselect:
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
unlock:
rcu_read_unlock();
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
}
-void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- gw_node_update(bat_priv, orig_node, 0);
+ batadv_gw_node_update(bat_priv, orig_node, 0);
}
-void gw_node_purge(struct bat_priv *bat_priv)
+void batadv_gw_node_purge(struct batadv_priv *bat_priv)
{
- struct gw_node *gw_node, *curr_gw;
+ struct batadv_gw_node *gw_node, *curr_gw;
struct hlist_node *node, *node_tmp;
- unsigned long timeout = msecs_to_jiffies(2 * PURGE_TIMEOUT);
+ unsigned long timeout = msecs_to_jiffies(2 * BATADV_PURGE_TIMEOUT);
int do_deselect = 0;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
spin_lock_bh(&bat_priv->gw_list_lock);
@@ -402,43 +410,42 @@ void gw_node_purge(struct bat_priv *bat_priv)
&bat_priv->gw_list, list) {
if (((!gw_node->deleted) ||
(time_before(jiffies, gw_node->deleted + timeout))) &&
- atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
+ atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE)
continue;
if (curr_gw == gw_node)
do_deselect = 1;
hlist_del_rcu(&gw_node->list);
- gw_node_free_ref(gw_node);
+ batadv_gw_node_free_ref(gw_node);
}
spin_unlock_bh(&bat_priv->gw_list_lock);
/* gw_deselect() needs to acquire the gw_list_lock */
if (do_deselect)
- gw_deselect(bat_priv);
+ batadv_gw_deselect(bat_priv);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
}
-/**
- * fails if orig_node has no router
- */
-static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
- const struct gw_node *gw_node)
+/* fails if orig_node has no router */
+static int batadv_write_buffer_text(struct batadv_priv *bat_priv,
+ struct seq_file *seq,
+ const struct batadv_gw_node *gw_node)
{
- struct gw_node *curr_gw;
- struct neigh_node *router;
+ struct batadv_gw_node *curr_gw;
+ struct batadv_neigh_node *router;
int down, up, ret = -1;
- gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
+ batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
- router = orig_node_get_router(gw_node->orig_node);
+ router = batadv_orig_node_get_router(gw_node->orig_node);
if (!router)
goto out;
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
(curr_gw == gw_node ? "=>" : " "),
@@ -451,23 +458,23 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
(up > 2048 ? up / 1024 : up),
(up > 2048 ? "MBit" : "KBit"));
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
out:
return ret;
}
-int gw_client_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hard_iface *primary_if;
- struct gw_node *gw_node;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hard_iface *primary_if;
+ struct batadv_gw_node *gw_node;
struct hlist_node *node;
int gw_count = 0, ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -475,7 +482,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -484,8 +491,8 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
" %-12s (%s/%i) %17s [%10s]: gw_class ... [B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- "Gateway", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF",
- SOURCE_VERSION, primary_if->net_dev->name,
+ "Gateway", "#", BATADV_TQ_MAX_VALUE, "Nexthop", "outgoingIF",
+ BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
rcu_read_lock();
@@ -494,7 +501,7 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
continue;
/* fails if orig_node has no router */
- if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
+ if (batadv_write_buffer_text(bat_priv, seq, gw_node) < 0)
continue;
gw_count++;
@@ -506,11 +513,11 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
+static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
unsigned char *p;
@@ -521,27 +528,29 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
pkt_len = skb_headlen(skb);
- if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
+ if (pkt_len < header_len + BATADV_DHCP_OPTIONS_OFFSET + 1)
goto out;
- p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
- pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
+ p = skb->data + header_len + BATADV_DHCP_OPTIONS_OFFSET;
+ pkt_len -= header_len + BATADV_DHCP_OPTIONS_OFFSET + 1;
/* Access the dhcp option lists. Each entry is made up by:
* - octet 1: option type
* - octet 2: option data len (only if type != 255 and 0)
- * - octet 3: option data */
+ * - octet 3: option data
+ */
while (*p != 255 && !ret) {
/* p now points to the first octet: option type */
if (*p == 53) {
/* type 53 is the message type option.
- * Jump the len octet and go to the data octet */
+ * Jump the len octet and go to the data octet
+ */
if (pkt_len < 2)
goto out;
p += 2;
/* check if the message type is what we need */
- if (*p == DHCP_REQUEST)
+ if (*p == BATADV_DHCP_REQUEST)
ret = true;
break;
} else if (*p == 0) {
@@ -568,7 +577,7 @@ out:
return ret;
}
-bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
+bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
struct iphdr *iphdr;
@@ -634,40 +643,41 @@ bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return true;
}
-bool gw_out_of_range(struct bat_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr)
{
- struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
- struct orig_node *orig_dst_node = NULL;
- struct gw_node *curr_gw = NULL;
+ struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
+ struct batadv_orig_node *orig_dst_node = NULL;
+ struct batadv_gw_node *curr_gw = NULL;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
- ret = gw_is_dhcp_target(skb, &header_len);
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (!ret)
goto out;
- orig_dst_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
+ orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
if (!orig_dst_node)
goto out;
if (!orig_dst_node->gw_flags)
goto out;
- ret = is_type_dhcprequest(skb, header_len);
+ ret = batadv_is_type_dhcprequest(skb, header_len);
if (!ret)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_SERVER:
+ case BATADV_GW_MODE_SERVER:
/* If we are a GW then we are our best GW. We can artificially
- * set the tq towards ourself as the maximum value */
- curr_tq_avg = TQ_MAX_VALUE;
+ * set the tq towards ourself as the maximum value
+ */
+ curr_tq_avg = BATADV_TQ_MAX_VALUE;
break;
- case GW_MODE_CLIENT:
- curr_gw = gw_get_selected_gw_node(bat_priv);
+ case BATADV_GW_MODE_CLIENT:
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (!curr_gw)
goto out;
@@ -677,33 +687,35 @@ bool gw_out_of_range(struct bat_priv *bat_priv,
/* If the dhcp packet has been sent to a different gw,
* we have to evaluate whether the old gw is still
- * reliable enough */
- neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL);
+ * reliable enough
+ */
+ neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node,
+ NULL);
if (!neigh_curr)
goto out;
curr_tq_avg = neigh_curr->tq_avg;
break;
- case GW_MODE_OFF:
+ case BATADV_GW_MODE_OFF:
default:
goto out;
}
- neigh_old = find_router(bat_priv, orig_dst_node, NULL);
+ neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL);
if (!neigh_old)
goto out;
- if (curr_tq_avg - neigh_old->tq_avg > GW_THRESHOLD)
+ if (curr_tq_avg - neigh_old->tq_avg > BATADV_GW_THRESHOLD)
out_of_range = true;
out:
if (orig_dst_node)
- orig_node_free_ref(orig_dst_node);
+ batadv_orig_node_free_ref(orig_dst_node);
if (curr_gw)
- gw_node_free_ref(curr_gw);
+ batadv_gw_node_free_ref(curr_gw);
if (neigh_old)
- neigh_node_free_ref(neigh_old);
+ batadv_neigh_node_free_ref(neigh_old);
if (neigh_curr)
- neigh_node_free_ref(neigh_curr);
+ batadv_neigh_node_free_ref(neigh_curr);
return out_of_range;
}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index bf56a5aea10b..f0d129e323c8 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,23 +15,26 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
-void gw_deselect(struct bat_priv *bat_priv);
-void gw_election(struct bat_priv *bat_priv);
-struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
-void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
-void gw_node_update(struct bat_priv *bat_priv,
- struct orig_node *orig_node, uint8_t new_gwflags);
-void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
-void gw_node_purge(struct bat_priv *bat_priv);
-int gw_client_seq_print_text(struct seq_file *seq, void *offset);
-bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool gw_out_of_range(struct bat_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+void batadv_gw_deselect(struct batadv_priv *bat_priv);
+void batadv_gw_election(struct batadv_priv *bat_priv);
+struct batadv_orig_node *
+batadv_gw_get_selected_orig(struct batadv_priv *bat_priv);
+void batadv_gw_check_election(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_update(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint8_t new_gwflags);
+void batadv_gw_node_delete(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node);
+void batadv_gw_node_purge(struct batadv_priv *bat_priv);
+int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
+bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, struct ethhdr *ethhdr);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index ca57ac7d73b2..9001208d1752 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -24,7 +22,7 @@
#include "gateway_client.h"
/* calculates the gateway class from kbit */
-static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
+static void batadv_kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
{
int mdown = 0, tdown, tup, difference;
uint8_t sbit, part;
@@ -59,7 +57,7 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
}
/* returns the up and downspeeds in kbit, calculated from the class */
-void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
+void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
{
int sbit = (gw_srv_class & 0x80) >> 7;
int dpart = (gw_srv_class & 0x78) >> 3;
@@ -75,8 +73,8 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
*up = ((upart + 1) * (*down)) / 8;
}
-static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
- int *up, int *down)
+static bool batadv_parse_gw_bandwidth(struct net_device *net_dev, char *buff,
+ int *up, int *down)
{
int ret, multi = 1;
char *slash_ptr, *tmp_ptr;
@@ -99,9 +97,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
ret = kstrtol(buff, 10, &ldown);
if (ret) {
- bat_err(net_dev,
- "Download speed of gateway mode invalid: %s\n",
- buff);
+ batadv_err(net_dev,
+ "Download speed of gateway mode invalid: %s\n",
+ buff);
return false;
}
@@ -124,9 +122,9 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
ret = kstrtol(slash_ptr + 1, 10, &lup);
if (ret) {
- bat_err(net_dev,
- "Upload speed of gateway mode invalid: %s\n",
- slash_ptr + 1);
+ batadv_err(net_dev,
+ "Upload speed of gateway mode invalid: %s\n",
+ slash_ptr + 1);
return false;
}
@@ -136,14 +134,15 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
return true;
}
-ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
+ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
+ size_t count)
{
- struct bat_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
long gw_bandwidth_tmp = 0;
int up = 0, down = 0;
bool ret;
- ret = parse_gw_bandwidth(net_dev, buff, &up, &down);
+ ret = batadv_parse_gw_bandwidth(net_dev, buff, &up, &down);
if (!ret)
goto end;
@@ -153,23 +152,25 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
if (!up)
up = down / 5;
- kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
+ batadv_kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp);
- /**
- * the gw bandwidth we guessed above might not match the given
+ /* the gw bandwidth we guessed above might not match the given
* speeds, hence we need to calculate it back to show the number
* that is going to be propagated
- **/
- gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
-
- gw_deselect(bat_priv);
- bat_info(net_dev,
- "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
- atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
- (down > 2048 ? down / 1024 : down),
- (down > 2048 ? "MBit" : "KBit"),
- (up > 2048 ? up / 1024 : up),
- (up > 2048 ? "MBit" : "KBit"));
+ */
+ batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
+
+ if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp)
+ return count;
+
+ batadv_gw_deselect(bat_priv);
+ batadv_info(net_dev,
+ "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n",
+ atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
atomic_set(&bat_priv->gw_bandwidth, gw_bandwidth_tmp);
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index b8fb11c4f927..13697f6e7113 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,23 +15,23 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_
#define _NET_BATMAN_ADV_GATEWAY_COMMON_H_
-enum gw_modes {
- GW_MODE_OFF,
- GW_MODE_CLIENT,
- GW_MODE_SERVER,
+enum batadv_gw_modes {
+ BATADV_GW_MODE_OFF,
+ BATADV_GW_MODE_CLIENT,
+ BATADV_GW_MODE_SERVER,
};
-#define GW_MODE_OFF_NAME "off"
-#define GW_MODE_CLIENT_NAME "client"
-#define GW_MODE_SERVER_NAME "server"
+#define BATADV_GW_MODE_OFF_NAME "off"
+#define BATADV_GW_MODE_CLIENT_NAME "client"
+#define BATADV_GW_MODE_SERVER_NAME "server"
-void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
-ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count);
+void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up);
+ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff,
+ size_t count);
#endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dc334fa89847..282bf6e9353e 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -25,28 +23,29 @@
#include "send.h"
#include "translation-table.h"
#include "routing.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "originator.h"
#include "hash.h"
#include "bridge_loop_avoidance.h"
#include <linux/if_arp.h>
-void hardif_free_rcu(struct rcu_head *rcu)
+void batadv_hardif_free_rcu(struct rcu_head *rcu)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
- hard_iface = container_of(rcu, struct hard_iface, rcu);
+ hard_iface = container_of(rcu, struct batadv_hard_iface, rcu);
dev_put(hard_iface->net_dev);
kfree(hard_iface);
}
-struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
+struct batadv_hard_iface *
+batadv_hardif_get_by_netdev(const struct net_device *net_dev)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->net_dev == net_dev &&
atomic_inc_not_zero(&hard_iface->refcount))
goto out;
@@ -59,7 +58,7 @@ out:
return hard_iface;
}
-static int is_valid_iface(const struct net_device *net_dev)
+static int batadv_is_valid_iface(const struct net_device *net_dev)
{
if (net_dev->flags & IFF_LOOPBACK)
return 0;
@@ -71,26 +70,23 @@ static int is_valid_iface(const struct net_device *net_dev)
return 0;
/* no batman over batman */
- if (softif_is_valid(net_dev))
+ if (batadv_softif_is_valid(net_dev))
return 0;
- /* Device is being bridged */
- /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
- return 0; */
-
return 1;
}
-static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
+static struct batadv_hard_iface *
+batadv_hardif_get_active(const struct net_device *soft_iface)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
- if (hard_iface->if_status == IF_ACTIVE &&
+ if (hard_iface->if_status == BATADV_IF_ACTIVE &&
atomic_inc_not_zero(&hard_iface->refcount))
goto out;
}
@@ -102,32 +98,32 @@ out:
return hard_iface;
}
-static void primary_if_update_addr(struct bat_priv *bat_priv,
- struct hard_iface *oldif)
+static void batadv_primary_if_update_addr(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *oldif)
{
- struct vis_packet *vis_packet;
- struct hard_iface *primary_if;
+ struct batadv_vis_packet *vis_packet;
+ struct batadv_hard_iface *primary_if;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- vis_packet = (struct vis_packet *)
+ vis_packet = (struct batadv_vis_packet *)
bat_priv->my_vis_info->skb_packet->data;
memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(vis_packet->sender_orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
- bla_update_orig_address(bat_priv, primary_if, oldif);
+ batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void primary_if_select(struct bat_priv *bat_priv,
- struct hard_iface *new_hard_iface)
+static void batadv_primary_if_select(struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *new_hard_iface)
{
- struct hard_iface *curr_hard_iface;
+ struct batadv_hard_iface *curr_hard_iface;
ASSERT_RTNL();
@@ -141,14 +137,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
goto out;
bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
- primary_if_update_addr(bat_priv, curr_hard_iface);
+ batadv_primary_if_update_addr(bat_priv, curr_hard_iface);
out:
if (curr_hard_iface)
- hardif_free_ref(curr_hard_iface);
+ batadv_hardif_free_ref(curr_hard_iface);
}
-static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
+static bool
+batadv_hardif_is_iface_up(const struct batadv_hard_iface *hard_iface)
{
if (hard_iface->net_dev->flags & IFF_UP)
return true;
@@ -156,21 +153,21 @@ static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
return false;
}
-static void check_known_mac_addr(const struct net_device *net_dev)
+static void batadv_check_known_mac_addr(const struct net_device *net_dev)
{
- const struct hard_iface *hard_iface;
+ const struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
continue;
if (hard_iface->net_dev == net_dev)
continue;
- if (!compare_eth(hard_iface->net_dev->dev_addr,
- net_dev->dev_addr))
+ if (!batadv_compare_eth(hard_iface->net_dev->dev_addr,
+ net_dev->dev_addr))
continue;
pr_warn("The newly added mac address (%pM) already exists on: %s\n",
@@ -180,27 +177,29 @@ static void check_known_mac_addr(const struct net_device *net_dev)
rcu_read_unlock();
}
-int hardif_min_mtu(struct net_device *soft_iface)
+int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
- const struct bat_priv *bat_priv = netdev_priv(soft_iface);
- const struct hard_iface *hard_iface;
+ const struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ const struct batadv_hard_iface *hard_iface;
/* allow big frames if all devices are capable to do so
- * (have MTU > 1500 + BAT_HEADER_LEN) */
+ * (have MTU > 1500 + BAT_HEADER_LEN)
+ */
int min_mtu = ETH_DATA_LEN;
if (atomic_read(&bat_priv->fragmentation))
goto out;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
continue;
if (hard_iface->soft_iface != soft_iface)
continue;
- min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN,
+ min_mtu = min_t(int,
+ hard_iface->net_dev->mtu - BATADV_HEADER_LEN,
min_mtu);
}
rcu_read_unlock();
@@ -209,68 +208,70 @@ out:
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
-void update_min_mtu(struct net_device *soft_iface)
+void batadv_update_min_mtu(struct net_device *soft_iface)
{
int min_mtu;
- min_mtu = hardif_min_mtu(soft_iface);
+ min_mtu = batadv_hardif_min_mtu(soft_iface);
if (soft_iface->mtu != min_mtu)
soft_iface->mtu = min_mtu;
}
-static void hardif_activate_interface(struct hard_iface *hard_iface)
+static void
+batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv;
- struct hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
- if (hard_iface->if_status != IF_INACTIVE)
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
- hard_iface->if_status = IF_TO_BE_ACTIVATED;
+ hard_iface->if_status = BATADV_IF_TO_BE_ACTIVATED;
- /**
- * the first active interface becomes our primary interface or
+ /* the first active interface becomes our primary interface or
* the next active interface after the old primary interface was removed
*/
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
- primary_if_select(bat_priv, hard_iface);
+ batadv_primary_if_select(bat_priv, hard_iface);
- bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Interface activated: %s\n",
+ hard_iface->net_dev->name);
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static void hardif_deactivate_interface(struct hard_iface *hard_iface)
+static void
+batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
{
- if ((hard_iface->if_status != IF_ACTIVE) &&
- (hard_iface->if_status != IF_TO_BE_ACTIVATED))
+ if ((hard_iface->if_status != BATADV_IF_ACTIVE) &&
+ (hard_iface->if_status != BATADV_IF_TO_BE_ACTIVATED))
return;
- hard_iface->if_status = IF_INACTIVE;
+ hard_iface->if_status = BATADV_IF_INACTIVE;
- bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Interface deactivated: %s\n",
+ hard_iface->net_dev->name);
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
}
-int hardif_enable_interface(struct hard_iface *hard_iface,
- const char *iface_name)
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ const char *iface_name)
{
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
struct net_device *soft_iface;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
int ret;
- if (hard_iface->if_status != IF_NOT_IN_USE)
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
goto out;
if (!atomic_inc_not_zero(&hard_iface->refcount))
@@ -284,7 +285,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
soft_iface = dev_get_by_name(&init_net, iface_name);
if (!soft_iface) {
- soft_iface = softif_create(iface_name);
+ soft_iface = batadv_softif_create(iface_name);
if (!soft_iface) {
ret = -ENOMEM;
@@ -295,7 +296,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
dev_hold(soft_iface);
}
- if (!softif_is_valid(soft_iface)) {
+ if (!batadv_softif_is_valid(soft_iface)) {
pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
soft_iface->name);
ret = -EINVAL;
@@ -306,48 +307,46 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
bat_priv = netdev_priv(hard_iface->soft_iface);
ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
- if (ret < 0) {
- ret = -ENOMEM;
+ if (ret < 0)
goto err_dev;
- }
hard_iface->if_num = bat_priv->num_ifaces;
bat_priv->num_ifaces++;
- hard_iface->if_status = IF_INACTIVE;
- orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
+ hard_iface->if_status = BATADV_IF_INACTIVE;
+ batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces);
- hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
- hard_iface->batman_adv_ptype.func = batman_skb_recv;
+ hard_iface->batman_adv_ptype.type = ethertype;
+ hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv;
hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
dev_add_pack(&hard_iface->batman_adv_ptype);
atomic_set(&hard_iface->frag_seqno, 1);
- bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
- hard_iface->net_dev->name);
-
- if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
- hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu <
- ETH_DATA_LEN + BAT_HEADER_LEN)
- bat_info(hard_iface->soft_iface,
- "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
- hard_iface->net_dev->name, hard_iface->net_dev->mtu,
- ETH_DATA_LEN + BAT_HEADER_LEN);
-
- if (hardif_is_iface_up(hard_iface))
- hardif_activate_interface(hard_iface);
+ batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
+ hard_iface->net_dev->name);
+
+ if (atomic_read(&bat_priv->fragmentation) &&
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n",
+ hard_iface->net_dev->name, hard_iface->net_dev->mtu,
+ ETH_DATA_LEN + BATADV_HEADER_LEN);
+
+ if (!atomic_read(&bat_priv->fragmentation) &&
+ hard_iface->net_dev->mtu < ETH_DATA_LEN + BATADV_HEADER_LEN)
+ batadv_info(hard_iface->soft_iface,
+ "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n",
+ hard_iface->net_dev->name, hard_iface->net_dev->mtu,
+ ETH_DATA_LEN + BATADV_HEADER_LEN);
+
+ if (batadv_hardif_is_iface_up(hard_iface))
+ batadv_hardif_activate_interface(hard_iface);
else
- bat_err(hard_iface->soft_iface,
- "Not using interface %s (retrying later): interface not active\n",
- hard_iface->net_dev->name);
+ batadv_err(hard_iface->soft_iface,
+ "Not using interface %s (retrying later): interface not active\n",
+ hard_iface->net_dev->name);
/* begin scheduling originator messages on that interface */
- schedule_bat_ogm(hard_iface);
+ batadv_schedule_bat_ogm(hard_iface);
out:
return 0;
@@ -355,67 +354,68 @@ out:
err_dev:
dev_put(soft_iface);
err:
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
return ret;
}
-void hardif_disable_interface(struct hard_iface *hard_iface)
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
- if (hard_iface->if_status == IF_ACTIVE)
- hardif_deactivate_interface(hard_iface);
+ if (hard_iface->if_status == BATADV_IF_ACTIVE)
+ batadv_hardif_deactivate_interface(hard_iface);
- if (hard_iface->if_status != IF_INACTIVE)
+ if (hard_iface->if_status != BATADV_IF_INACTIVE)
goto out;
- bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
- hard_iface->net_dev->name);
+ batadv_info(hard_iface->soft_iface, "Removing interface: %s\n",
+ hard_iface->net_dev->name);
dev_remove_pack(&hard_iface->batman_adv_ptype);
bat_priv->num_ifaces--;
- orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
+ batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (hard_iface == primary_if) {
- struct hard_iface *new_if;
+ struct batadv_hard_iface *new_if;
- new_if = hardif_get_active(hard_iface->soft_iface);
- primary_if_select(bat_priv, new_if);
+ new_if = batadv_hardif_get_active(hard_iface->soft_iface);
+ batadv_primary_if_select(bat_priv, new_if);
if (new_if)
- hardif_free_ref(new_if);
+ batadv_hardif_free_ref(new_if);
}
bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
- hard_iface->if_status = IF_NOT_IN_USE;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
/* delete all references to this hard_iface */
- purge_orig_ref(bat_priv);
- purge_outstanding_packets(bat_priv, hard_iface);
+ batadv_purge_orig_ref(bat_priv);
+ batadv_purge_outstanding_packets(bat_priv, hard_iface);
dev_put(hard_iface->soft_iface);
/* nobody uses this interface anymore */
if (!bat_priv->num_ifaces)
- softif_destroy(hard_iface->soft_iface);
+ batadv_softif_destroy(hard_iface->soft_iface);
hard_iface->soft_iface = NULL;
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
-static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
+static struct batadv_hard_iface *
+batadv_hardif_add_interface(struct net_device *net_dev)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
int ret;
ASSERT_RTNL();
- ret = is_valid_iface(net_dev);
+ ret = batadv_is_valid_iface(net_dev);
if (ret != 1)
goto out;
@@ -425,23 +425,22 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
if (!hard_iface)
goto release_dev;
- ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
+ ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev);
if (ret)
goto free_if;
hard_iface->if_num = -1;
hard_iface->net_dev = net_dev;
hard_iface->soft_iface = NULL;
- hard_iface->if_status = IF_NOT_IN_USE;
+ hard_iface->if_status = BATADV_IF_NOT_IN_USE;
INIT_LIST_HEAD(&hard_iface->list);
/* extra reference for return */
atomic_set(&hard_iface->refcount, 2);
- check_known_mac_addr(hard_iface->net_dev);
- list_add_tail_rcu(&hard_iface->list, &hardif_list);
+ batadv_check_known_mac_addr(hard_iface->net_dev);
+ list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list);
- /**
- * This can't be called via a bat_priv callback because
+ /* This can't be called via a bat_priv callback because
* we have no bat_priv yet.
*/
atomic_set(&hard_iface->seqno, 1);
@@ -457,102 +456,104 @@ out:
return NULL;
}
-static void hardif_remove_interface(struct hard_iface *hard_iface)
+static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
{
ASSERT_RTNL();
/* first deactivate interface */
- if (hard_iface->if_status != IF_NOT_IN_USE)
- hardif_disable_interface(hard_iface);
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ batadv_hardif_disable_interface(hard_iface);
- if (hard_iface->if_status != IF_NOT_IN_USE)
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
return;
- hard_iface->if_status = IF_TO_BE_REMOVED;
- sysfs_del_hardif(&hard_iface->hardif_obj);
- hardif_free_ref(hard_iface);
+ hard_iface->if_status = BATADV_IF_TO_BE_REMOVED;
+ batadv_sysfs_del_hardif(&hard_iface->hardif_obj);
+ batadv_hardif_free_ref(hard_iface);
}
-void hardif_remove_interfaces(void)
+void batadv_hardif_remove_interfaces(void)
{
- struct hard_iface *hard_iface, *hard_iface_tmp;
+ struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
rtnl_lock();
list_for_each_entry_safe(hard_iface, hard_iface_tmp,
- &hardif_list, list) {
+ &batadv_hardif_list, list) {
list_del_rcu(&hard_iface->list);
- hardif_remove_interface(hard_iface);
+ batadv_hardif_remove_interface(hard_iface);
}
rtnl_unlock();
}
-static int hard_if_event(struct notifier_block *this,
- unsigned long event, void *ptr)
+static int batadv_hard_if_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
{
struct net_device *net_dev = ptr;
- struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
- struct hard_iface *primary_if = NULL;
- struct bat_priv *bat_priv;
+ struct batadv_hard_iface *hard_iface;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_priv *bat_priv;
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface && event == NETDEV_REGISTER)
- hard_iface = hardif_add_interface(net_dev);
+ hard_iface = batadv_hardif_add_interface(net_dev);
if (!hard_iface)
goto out;
switch (event) {
case NETDEV_UP:
- hardif_activate_interface(hard_iface);
+ batadv_hardif_activate_interface(hard_iface);
break;
case NETDEV_GOING_DOWN:
case NETDEV_DOWN:
- hardif_deactivate_interface(hard_iface);
+ batadv_hardif_deactivate_interface(hard_iface);
break;
case NETDEV_UNREGISTER:
list_del_rcu(&hard_iface->list);
- hardif_remove_interface(hard_iface);
+ batadv_hardif_remove_interface(hard_iface);
break;
case NETDEV_CHANGEMTU:
if (hard_iface->soft_iface)
- update_min_mtu(hard_iface->soft_iface);
+ batadv_update_min_mtu(hard_iface->soft_iface);
break;
case NETDEV_CHANGEADDR:
- if (hard_iface->if_status == IF_NOT_IN_USE)
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
goto hardif_put;
- check_known_mac_addr(hard_iface->net_dev);
+ batadv_check_known_mac_addr(hard_iface->net_dev);
bat_priv = netdev_priv(hard_iface->soft_iface);
bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface);
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto hardif_put;
if (hard_iface == primary_if)
- primary_if_update_addr(bat_priv, NULL);
+ batadv_primary_if_update_addr(bat_priv, NULL);
break;
default:
break;
}
hardif_put:
- hardif_free_ref(hard_iface);
+ batadv_hardif_free_ref(hard_iface);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NOTIFY_DONE;
}
/* This function returns true if the interface represented by ifindex is a
- * 802.11 wireless device */
-bool is_wifi_iface(int ifindex)
+ * 802.11 wireless device
+ */
+bool batadv_is_wifi_iface(int ifindex)
{
struct net_device *net_device = NULL;
bool ret = false;
- if (ifindex == NULL_IFINDEX)
+ if (ifindex == BATADV_NULL_IFINDEX)
goto out;
net_device = dev_get_by_index(&init_net, ifindex);
@@ -561,7 +562,8 @@ bool is_wifi_iface(int ifindex)
#ifdef CONFIG_WIRELESS_EXT
/* pre-cfg80211 drivers have to implement WEXT, so it is possible to
- * check for wireless_handlers != NULL */
+ * check for wireless_handlers != NULL
+ */
if (net_device->wireless_handlers)
ret = true;
else
@@ -575,6 +577,6 @@ out:
return ret;
}
-struct notifier_block hard_if_notifier = {
- .notifier_call = hard_if_event,
+struct notifier_block batadv_hard_if_notifier = {
+ .notifier_call = batadv_hard_if_event,
};
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index e68c5655e616..3732366e7445 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,44 +15,44 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
-enum hard_if_state {
- IF_NOT_IN_USE,
- IF_TO_BE_REMOVED,
- IF_INACTIVE,
- IF_ACTIVE,
- IF_TO_BE_ACTIVATED,
- IF_I_WANT_YOU
+enum batadv_hard_if_state {
+ BATADV_IF_NOT_IN_USE,
+ BATADV_IF_TO_BE_REMOVED,
+ BATADV_IF_INACTIVE,
+ BATADV_IF_ACTIVE,
+ BATADV_IF_TO_BE_ACTIVATED,
+ BATADV_IF_I_WANT_YOU,
};
-extern struct notifier_block hard_if_notifier;
+extern struct notifier_block batadv_hard_if_notifier;
-struct hard_iface*
-hardif_get_by_netdev(const struct net_device *net_dev);
-int hardif_enable_interface(struct hard_iface *hard_iface,
- const char *iface_name);
-void hardif_disable_interface(struct hard_iface *hard_iface);
-void hardif_remove_interfaces(void);
-int hardif_min_mtu(struct net_device *soft_iface);
-void update_min_mtu(struct net_device *soft_iface);
-void hardif_free_rcu(struct rcu_head *rcu);
-bool is_wifi_iface(int ifindex);
+struct batadv_hard_iface*
+batadv_hardif_get_by_netdev(const struct net_device *net_dev);
+int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
+ const char *iface_name);
+void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface);
+void batadv_hardif_remove_interfaces(void);
+int batadv_hardif_min_mtu(struct net_device *soft_iface);
+void batadv_update_min_mtu(struct net_device *soft_iface);
+void batadv_hardif_free_rcu(struct rcu_head *rcu);
+bool batadv_is_wifi_iface(int ifindex);
-static inline void hardif_free_ref(struct hard_iface *hard_iface)
+static inline void
+batadv_hardif_free_ref(struct batadv_hard_iface *hard_iface)
{
if (atomic_dec_and_test(&hard_iface->refcount))
- call_rcu(&hard_iface->rcu, hardif_free_rcu);
+ call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu);
}
-static inline struct hard_iface *primary_if_get_selected(
- struct bat_priv *bat_priv)
+static inline struct batadv_hard_iface *
+batadv_primary_if_get_selected(struct batadv_priv *bat_priv)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
rcu_read_lock();
hard_iface = rcu_dereference(bat_priv->primary_if);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index 117687bedf25..15a849c2d414 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,25 +15,24 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
#include "hash.h"
/* clears the hash */
-static void hash_init(struct hashtable_t *hash)
+static void batadv_hash_init(struct batadv_hashtable *hash)
{
uint32_t i;
- for (i = 0 ; i < hash->size; i++) {
+ for (i = 0; i < hash->size; i++) {
INIT_HLIST_HEAD(&hash->table[i]);
spin_lock_init(&hash->list_locks[i]);
}
}
/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash)
+void batadv_hash_destroy(struct batadv_hashtable *hash)
{
kfree(hash->list_locks);
kfree(hash->table);
@@ -42,9 +40,9 @@ void hash_destroy(struct hashtable_t *hash)
}
/* allocates and clears the hash */
-struct hashtable_t *hash_new(uint32_t size)
+struct batadv_hashtable *batadv_hash_new(uint32_t size)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
if (!hash)
@@ -60,7 +58,7 @@ struct hashtable_t *hash_new(uint32_t size)
goto free_table;
hash->size = size;
- hash_init(hash);
+ batadv_hash_init(hash);
return hash;
free_table:
@@ -69,3 +67,12 @@ free_hash:
kfree(hash);
return NULL;
}
+
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key)
+{
+ uint32_t i;
+
+ for (i = 0; i < hash->size; i++)
+ lockdep_set_class(&hash->list_locks[i], key);
+}
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index d4bd7862719b..977de9c75fc2 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_HASH_H_
@@ -24,35 +22,42 @@
#include <linux/list.h>
-/* callback to a compare function. should
- * compare 2 element datas for their keys,
- * return 0 if same and not 0 if not
- * same */
-typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
+/* callback to a compare function. should compare 2 element datas for their
+ * keys, return 0 if same and not 0 if not same
+ */
+typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
+ const void *);
/* the hashfunction, should return an index
* based on the key in the data of the first
- * argument and the size the second */
-typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t);
-typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
+ * argument and the size the second
+ */
+typedef uint32_t (*batadv_hashdata_choose_cb)(const void *, uint32_t);
+typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
-struct hashtable_t {
+struct batadv_hashtable {
struct hlist_head *table; /* the hashtable itself with the buckets */
spinlock_t *list_locks; /* spinlock for each hash list entry */
uint32_t size; /* size of hashtable */
};
/* allocates and clears the hash */
-struct hashtable_t *hash_new(uint32_t size);
+struct batadv_hashtable *batadv_hash_new(uint32_t size);
+
+/* set class key for all locks */
+void batadv_hash_set_lock_class(struct batadv_hashtable *hash,
+ struct lock_class_key *key);
/* free only the hashtable and the hash itself. */
-void hash_destroy(struct hashtable_t *hash);
+void batadv_hash_destroy(struct batadv_hashtable *hash);
/* remove the hash structure. if hashdata_free_cb != NULL, this function will be
* called to remove the elements inside of the hash. if you don't remove the
- * elements, memory might be leaked. */
-static inline void hash_delete(struct hashtable_t *hash,
- hashdata_free_cb free_cb, void *arg)
+ * elements, memory might be leaked.
+ */
+static inline void batadv_hash_delete(struct batadv_hashtable *hash,
+ batadv_hashdata_free_cb free_cb,
+ void *arg)
{
struct hlist_head *head;
struct hlist_node *node, *node_tmp;
@@ -73,11 +78,11 @@ static inline void hash_delete(struct hashtable_t *hash,
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
}
/**
- * hash_add - adds data to the hashtable
+ * batadv_hash_add - adds data to the hashtable
* @hash: storage hash table
* @compare: callback to determine if 2 hash elements are identical
* @choose: callback calculating the hash index
@@ -87,11 +92,11 @@ static inline void hash_delete(struct hashtable_t *hash,
* Returns 0 on success, 1 if the element already is in the hash
* and -1 on error.
*/
-
-static inline int hash_add(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose,
- const void *data, struct hlist_node *data_node)
+static inline int batadv_hash_add(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ const void *data,
+ struct hlist_node *data_node)
{
uint32_t index;
int ret = -1;
@@ -106,26 +111,23 @@ static inline int hash_add(struct hashtable_t *hash,
head = &hash->table[index];
list_lock = &hash->list_locks[index];
- rcu_read_lock();
- __hlist_for_each_rcu(node, head) {
+ spin_lock_bh(list_lock);
+
+ hlist_for_each(node, head) {
if (!compare(node, data))
continue;
ret = 1;
- goto err_unlock;
+ goto unlock;
}
- rcu_read_unlock();
/* no duplicate found in list, add new element */
- spin_lock_bh(list_lock);
hlist_add_head_rcu(data_node, head);
- spin_unlock_bh(list_lock);
ret = 0;
- goto out;
-err_unlock:
- rcu_read_unlock();
+unlock:
+ spin_unlock_bh(list_lock);
out:
return ret;
}
@@ -133,10 +135,12 @@ out:
/* removes data from hash, if found. returns pointer do data on success, so you
* can remove the used structure yourself, or NULL on error . data could be the
* structure you use with just the key filled, we just need the key for
- * comparing. */
-static inline void *hash_remove(struct hashtable_t *hash,
- hashdata_compare_cb compare,
- hashdata_choose_cb choose, void *data)
+ * comparing.
+ */
+static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
+ batadv_hashdata_compare_cb compare,
+ batadv_hashdata_choose_cb choose,
+ void *data)
{
uint32_t index;
struct hlist_node *node;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 2e98a57f3407..bde3cf747507 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -28,21 +26,21 @@
#include "originator.h"
#include "hard-interface.h"
-static struct socket_client *socket_client_hash[256];
+static struct batadv_socket_client *batadv_socket_client_hash[256];
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
+static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
+ struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
-void bat_socket_init(void)
+void batadv_socket_init(void)
{
- memset(socket_client_hash, 0, sizeof(socket_client_hash));
+ memset(batadv_socket_client_hash, 0, sizeof(batadv_socket_client_hash));
}
-static int bat_socket_open(struct inode *inode, struct file *file)
+static int batadv_socket_open(struct inode *inode, struct file *file)
{
unsigned int i;
- struct socket_client *socket_client;
+ struct batadv_socket_client *socket_client;
nonseekable_open(inode, file);
@@ -51,14 +49,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
if (!socket_client)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(socket_client_hash); i++) {
- if (!socket_client_hash[i]) {
- socket_client_hash[i] = socket_client;
+ for (i = 0; i < ARRAY_SIZE(batadv_socket_client_hash); i++) {
+ if (!batadv_socket_client_hash[i]) {
+ batadv_socket_client_hash[i] = socket_client;
break;
}
}
- if (i == ARRAY_SIZE(socket_client_hash)) {
+ if (i == ARRAY_SIZE(batadv_socket_client_hash)) {
pr_err("Error - can't add another packet client: maximum number of clients reached\n");
kfree(socket_client);
return -EXFULL;
@@ -73,14 +71,14 @@ static int bat_socket_open(struct inode *inode, struct file *file)
file->private_data = socket_client;
- inc_module_count();
+ batadv_inc_module_count();
return 0;
}
-static int bat_socket_release(struct inode *inode, struct file *file)
+static int batadv_socket_release(struct inode *inode, struct file *file)
{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_socket_packet *socket_packet;
struct list_head *list_pos, *list_pos_tmp;
spin_lock_bh(&socket_client->lock);
@@ -88,33 +86,33 @@ static int bat_socket_release(struct inode *inode, struct file *file)
/* for all packets in the queue ... */
list_for_each_safe(list_pos, list_pos_tmp, &socket_client->queue_list) {
socket_packet = list_entry(list_pos,
- struct socket_packet, list);
+ struct batadv_socket_packet, list);
list_del(list_pos);
kfree(socket_packet);
}
- socket_client_hash[socket_client->index] = NULL;
+ batadv_socket_client_hash[socket_client->index] = NULL;
spin_unlock_bh(&socket_client->lock);
kfree(socket_client);
- dec_module_count();
+ batadv_dec_module_count();
return 0;
}
-static ssize_t bat_socket_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t batadv_socket_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct socket_client *socket_client = file->private_data;
- struct socket_packet *socket_packet;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_socket_packet *socket_packet;
size_t packet_len;
int error;
if ((file->f_flags & O_NONBLOCK) && (socket_client->queue_len == 0))
return -EAGAIN;
- if ((!buf) || (count < sizeof(struct icmp_packet)))
+ if ((!buf) || (count < sizeof(struct batadv_icmp_packet)))
return -EINVAL;
if (!access_ok(VERIFY_WRITE, buf, count))
@@ -129,7 +127,7 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
spin_lock_bh(&socket_client->lock);
socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
+ struct batadv_socket_packet, list);
list_del(&socket_packet->list);
socket_client->queue_len--;
@@ -146,34 +144,34 @@ static ssize_t bat_socket_read(struct file *file, char __user *buf,
return packet_len;
}
-static ssize_t bat_socket_write(struct file *file, const char __user *buff,
- size_t len, loff_t *off)
+static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
+ size_t len, loff_t *off)
{
- struct socket_client *socket_client = file->private_data;
- struct bat_priv *bat_priv = socket_client->bat_priv;
- struct hard_iface *primary_if = NULL;
+ struct batadv_socket_client *socket_client = file->private_data;
+ struct batadv_priv *bat_priv = socket_client->bat_priv;
+ struct batadv_hard_iface *primary_if = NULL;
struct sk_buff *skb;
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_icmp_packet_rr *icmp_packet;
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- size_t packet_len = sizeof(struct icmp_packet);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ size_t packet_len = sizeof(struct batadv_icmp_packet);
- if (len < sizeof(struct icmp_packet)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: invalid packet size\n");
+ if (len < sizeof(struct batadv_icmp_packet)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: invalid packet size\n");
return -EINVAL;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
len = -EFAULT;
goto out;
}
- if (len >= sizeof(struct icmp_packet_rr))
- packet_len = sizeof(struct icmp_packet_rr);
+ if (len >= sizeof(struct batadv_icmp_packet_rr))
+ packet_len = sizeof(struct batadv_icmp_packet_rr);
skb = dev_alloc_skb(packet_len + ETH_HLEN);
if (!skb) {
@@ -182,81 +180,82 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
}
skb_reserve(skb, ETH_HLEN);
- icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
if (copy_from_user(icmp_packet, buff, packet_len)) {
len = -EFAULT;
goto free_skb;
}
- if (icmp_packet->header.packet_type != BAT_ICMP) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
+ if (icmp_packet->header.packet_type != BATADV_ICMP) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
goto free_skb;
}
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Error - can't send packet from char device: got bogus message type (expected: ECHO_REQUEST)\n");
len = -EINVAL;
goto free_skb;
}
icmp_packet->uid = socket_client->index;
- if (icmp_packet->header.version != COMPAT_VERSION) {
- icmp_packet->msg_type = PARAMETER_PROBLEM;
- icmp_packet->header.version = COMPAT_VERSION;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+ if (icmp_packet->header.version != BATADV_COMPAT_VERSION) {
+ icmp_packet->msg_type = BATADV_PARAMETER_PROBLEM;
+ icmp_packet->header.version = BATADV_COMPAT_VERSION;
+ batadv_socket_add_packet(socket_client, icmp_packet,
+ packet_len);
goto free_skb;
}
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dst_unreach;
- orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
if (!orig_node)
goto dst_unreach;
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto dst_unreach;
if (!neigh_node->if_incoming)
goto dst_unreach;
- if (neigh_node->if_incoming->if_status != IF_ACTIVE)
+ if (neigh_node->if_incoming->if_status != BATADV_IF_ACTIVE)
goto dst_unreach;
memcpy(icmp_packet->orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
- if (packet_len == sizeof(struct icmp_packet_rr))
+ if (packet_len == sizeof(struct batadv_icmp_packet_rr))
memcpy(icmp_packet->rr,
neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
goto out;
dst_unreach:
- icmp_packet->msg_type = DESTINATION_UNREACHABLE;
- bat_socket_add_packet(socket_client, icmp_packet, packet_len);
+ icmp_packet->msg_type = BATADV_DESTINATION_UNREACHABLE;
+ batadv_socket_add_packet(socket_client, icmp_packet, packet_len);
free_skb:
kfree_skb(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return len;
}
-static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
+static unsigned int batadv_socket_poll(struct file *file, poll_table *wait)
{
- struct socket_client *socket_client = file->private_data;
+ struct batadv_socket_client *socket_client = file->private_data;
poll_wait(file, &socket_client->queue_wait, wait);
@@ -266,39 +265,39 @@ static unsigned int bat_socket_poll(struct file *file, poll_table *wait)
return 0;
}
-static const struct file_operations fops = {
+static const struct file_operations batadv_fops = {
.owner = THIS_MODULE,
- .open = bat_socket_open,
- .release = bat_socket_release,
- .read = bat_socket_read,
- .write = bat_socket_write,
- .poll = bat_socket_poll,
+ .open = batadv_socket_open,
+ .release = batadv_socket_release,
+ .read = batadv_socket_read,
+ .write = batadv_socket_write,
+ .poll = batadv_socket_poll,
.llseek = no_llseek,
};
-int bat_socket_setup(struct bat_priv *bat_priv)
+int batadv_socket_setup(struct batadv_priv *bat_priv)
{
struct dentry *d;
if (!bat_priv->debug_dir)
goto err;
- d = debugfs_create_file(ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
- bat_priv->debug_dir, bat_priv, &fops);
- if (d)
+ d = debugfs_create_file(BATADV_ICMP_SOCKET, S_IFREG | S_IWUSR | S_IRUSR,
+ bat_priv->debug_dir, bat_priv, &batadv_fops);
+ if (!d)
goto err;
return 0;
err:
- return 1;
+ return -ENOMEM;
}
-static void bat_socket_add_packet(struct socket_client *socket_client,
- struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
+static void batadv_socket_add_packet(struct batadv_socket_client *socket_client,
+ struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
{
- struct socket_packet *socket_packet;
+ struct batadv_socket_packet *socket_packet;
socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
@@ -312,8 +311,9 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
spin_lock_bh(&socket_client->lock);
/* while waiting for the lock the socket_client could have been
- * deleted */
- if (!socket_client_hash[icmp_packet->uid]) {
+ * deleted
+ */
+ if (!batadv_socket_client_hash[icmp_packet->uid]) {
spin_unlock_bh(&socket_client->lock);
kfree(socket_packet);
return;
@@ -324,7 +324,8 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
if (socket_client->queue_len > 100) {
socket_packet = list_first_entry(&socket_client->queue_list,
- struct socket_packet, list);
+ struct batadv_socket_packet,
+ list);
list_del(&socket_packet->list);
kfree(socket_packet);
@@ -336,11 +337,12 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
wake_up(&socket_client->queue_wait);
}
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len)
+void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len)
{
- struct socket_client *hash = socket_client_hash[icmp_packet->uid];
+ struct batadv_socket_client *hash;
+ hash = batadv_socket_client_hash[icmp_packet->uid];
if (hash)
- bat_socket_add_packet(hash, icmp_packet, icmp_len);
+ batadv_socket_add_packet(hash, icmp_packet, icmp_len);
}
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index 380ed4c2443a..29443a1dbb5c 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,17 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_
#define _NET_BATMAN_ADV_ICMP_SOCKET_H_
-#define ICMP_SOCKET "socket"
+#define BATADV_ICMP_SOCKET "socket"
-void bat_socket_init(void);
-int bat_socket_setup(struct bat_priv *bat_priv);
-void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet,
- size_t icmp_len);
+void batadv_socket_init(void);
+int batadv_socket_setup(struct batadv_priv *bat_priv);
+void batadv_socket_receive_packet(struct batadv_icmp_packet_rr *icmp_packet,
+ size_t icmp_len);
#endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 083a2993efe4..13c88b25ab31 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,12 +15,11 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
-#include "bat_sysfs.h"
-#include "bat_debugfs.h"
+#include "sysfs.h"
+#include "debugfs.h"
#include "routing.h"
#include "send.h"
#include "originator.h"
@@ -37,61 +35,65 @@
/* List manipulations on hardif_list have to be rtnl_lock()'ed,
- * list traversals just rcu-locked */
-struct list_head hardif_list;
-static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *);
-char bat_routing_algo[20] = "BATMAN IV";
-static struct hlist_head bat_algo_list;
+ * list traversals just rcu-locked
+ */
+struct list_head batadv_hardif_list;
+static int (*batadv_rx_handler[256])(struct sk_buff *,
+ struct batadv_hard_iface *);
+char batadv_routing_algo[20] = "BATMAN_IV";
+static struct hlist_head batadv_algo_list;
-unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-struct workqueue_struct *bat_event_workqueue;
+struct workqueue_struct *batadv_event_workqueue;
-static void recv_handler_init(void);
+static void batadv_recv_handler_init(void);
-static int __init batman_init(void)
+static int __init batadv_init(void)
{
- INIT_LIST_HEAD(&hardif_list);
- INIT_HLIST_HEAD(&bat_algo_list);
+ INIT_LIST_HEAD(&batadv_hardif_list);
+ INIT_HLIST_HEAD(&batadv_algo_list);
- recv_handler_init();
+ batadv_recv_handler_init();
- bat_iv_init();
+ batadv_iv_init();
/* the name should not be longer than 10 chars - see
- * http://lwn.net/Articles/23634/ */
- bat_event_workqueue = create_singlethread_workqueue("bat_events");
+ * http://lwn.net/Articles/23634/
+ */
+ batadv_event_workqueue = create_singlethread_workqueue("bat_events");
- if (!bat_event_workqueue)
+ if (!batadv_event_workqueue)
return -ENOMEM;
- bat_socket_init();
- debugfs_init();
+ batadv_socket_init();
+ batadv_debugfs_init();
- register_netdevice_notifier(&hard_if_notifier);
+ register_netdevice_notifier(&batadv_hard_if_notifier);
pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
- SOURCE_VERSION, COMPAT_VERSION);
+ BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
return 0;
}
-static void __exit batman_exit(void)
+static void __exit batadv_exit(void)
{
- debugfs_destroy();
- unregister_netdevice_notifier(&hard_if_notifier);
- hardif_remove_interfaces();
+ batadv_debugfs_destroy();
+ unregister_netdevice_notifier(&batadv_hard_if_notifier);
+ batadv_hardif_remove_interfaces();
- flush_workqueue(bat_event_workqueue);
- destroy_workqueue(bat_event_workqueue);
- bat_event_workqueue = NULL;
+ flush_workqueue(batadv_event_workqueue);
+ destroy_workqueue(batadv_event_workqueue);
+ batadv_event_workqueue = NULL;
rcu_barrier();
}
-int mesh_init(struct net_device *soft_iface)
+int batadv_mesh_init(struct net_device *soft_iface)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ int ret;
spin_lock_init(&bat_priv->forw_bat_list_lock);
spin_lock_init(&bat_priv->forw_bcast_list_lock);
@@ -110,72 +112,77 @@ int mesh_init(struct net_device *soft_iface)
INIT_LIST_HEAD(&bat_priv->tt_req_list);
INIT_LIST_HEAD(&bat_priv->tt_roam_list);
- if (originator_init(bat_priv) < 1)
+ ret = batadv_originator_init(bat_priv);
+ if (ret < 0)
goto err;
- if (tt_init(bat_priv) < 1)
+ ret = batadv_tt_init(bat_priv);
+ if (ret < 0)
goto err;
- tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX);
+ batadv_tt_local_add(soft_iface, soft_iface->dev_addr,
+ BATADV_NULL_IFINDEX);
- if (vis_init(bat_priv) < 1)
+ ret = batadv_vis_init(bat_priv);
+ if (ret < 0)
goto err;
- if (bla_init(bat_priv) < 1)
+ ret = batadv_bla_init(bat_priv);
+ if (ret < 0)
goto err;
atomic_set(&bat_priv->gw_reselect, 0);
- atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
- goto end;
-
-err:
- mesh_free(soft_iface);
- return -1;
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
-end:
return 0;
+
+err:
+ batadv_mesh_free(soft_iface);
+ return ret;
}
-void mesh_free(struct net_device *soft_iface)
+void batadv_mesh_free(struct net_device *soft_iface)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
- atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
- purge_outstanding_packets(bat_priv, NULL);
+ batadv_purge_outstanding_packets(bat_priv, NULL);
- vis_quit(bat_priv);
+ batadv_vis_quit(bat_priv);
- gw_node_purge(bat_priv);
- originator_free(bat_priv);
+ batadv_gw_node_purge(bat_priv);
+ batadv_originator_free(bat_priv);
- tt_free(bat_priv);
+ batadv_tt_free(bat_priv);
- bla_free(bat_priv);
+ batadv_bla_free(bat_priv);
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ free_percpu(bat_priv->bat_counters);
+
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
}
-void inc_module_count(void)
+void batadv_inc_module_count(void)
{
try_module_get(THIS_MODULE);
}
-void dec_module_count(void)
+void batadv_dec_module_count(void)
{
module_put(THIS_MODULE);
}
-int is_my_mac(const uint8_t *addr)
+int batadv_is_my_mac(const uint8_t *addr)
{
- const struct hard_iface *hard_iface;
+ const struct batadv_hard_iface *hard_iface;
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
- if (hard_iface->if_status != IF_ACTIVE)
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
continue;
- if (compare_eth(hard_iface->net_dev->dev_addr, addr)) {
+ if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
rcu_read_unlock();
return 1;
}
@@ -184,8 +191,8 @@ int is_my_mac(const uint8_t *addr)
return 0;
}
-static int recv_unhandled_packet(struct sk_buff *skb,
- struct hard_iface *recv_if)
+static int batadv_recv_unhandled_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
return NET_RX_DROP;
}
@@ -193,16 +200,18 @@ static int recv_unhandled_packet(struct sk_buff *skb,
/* incoming packets with the batman ethertype received on any active hard
* interface
*/
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev)
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev)
{
- struct bat_priv *bat_priv;
- struct batman_ogm_packet *batman_ogm_packet;
- struct hard_iface *hard_iface;
+ struct batadv_priv *bat_priv;
+ struct batadv_ogm_packet *batadv_ogm_packet;
+ struct batadv_hard_iface *hard_iface;
uint8_t idx;
int ret;
- hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype);
+ hard_iface = container_of(ptype, struct batadv_hard_iface,
+ batman_adv_ptype);
skb = skb_share_check(skb, GFP_ATOMIC);
/* skb was released by skb_share_check() */
@@ -222,27 +231,27 @@ int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
bat_priv = netdev_priv(hard_iface->soft_iface);
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto err_free;
/* discard frames on not active interfaces */
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto err_free;
- batman_ogm_packet = (struct batman_ogm_packet *)skb->data;
+ batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
- if (batman_ogm_packet->header.version != COMPAT_VERSION) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Drop packet: incompatible batman version (%i)\n",
- batman_ogm_packet->header.version);
+ if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: incompatible batman version (%i)\n",
+ batadv_ogm_packet->header.version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
- idx = batman_ogm_packet->header.packet_type;
- ret = (*recv_packet_handler[idx])(skb, hard_iface);
+ idx = batadv_ogm_packet->header.packet_type;
+ ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
kfree_skb(skb);
@@ -259,51 +268,52 @@ err_out:
return NET_RX_DROP;
}
-static void recv_handler_init(void)
+static void batadv_recv_handler_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(recv_packet_handler); i++)
- recv_packet_handler[i] = recv_unhandled_packet;
+ for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
+ batadv_rx_handler[i] = batadv_recv_unhandled_packet;
/* batman icmp packet */
- recv_packet_handler[BAT_ICMP] = recv_icmp_packet;
+ batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
/* unicast packet */
- recv_packet_handler[BAT_UNICAST] = recv_unicast_packet;
+ batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
/* fragmented unicast packet */
- recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet;
+ batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_ucast_frag_packet;
/* broadcast packet */
- recv_packet_handler[BAT_BCAST] = recv_bcast_packet;
+ batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
/* vis packet */
- recv_packet_handler[BAT_VIS] = recv_vis_packet;
+ batadv_rx_handler[BATADV_VIS] = batadv_recv_vis_packet;
/* Translation table query (request or response) */
- recv_packet_handler[BAT_TT_QUERY] = recv_tt_query;
+ batadv_rx_handler[BATADV_TT_QUERY] = batadv_recv_tt_query;
/* Roaming advertisement */
- recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv;
+ batadv_rx_handler[BATADV_ROAM_ADV] = batadv_recv_roam_adv;
}
-int recv_handler_register(uint8_t packet_type,
- int (*recv_handler)(struct sk_buff *,
- struct hard_iface *))
+int
+batadv_recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *))
{
- if (recv_packet_handler[packet_type] != &recv_unhandled_packet)
+ if (batadv_rx_handler[packet_type] != &batadv_recv_unhandled_packet)
return -EBUSY;
- recv_packet_handler[packet_type] = recv_handler;
+ batadv_rx_handler[packet_type] = recv_handler;
return 0;
}
-void recv_handler_unregister(uint8_t packet_type)
+void batadv_recv_handler_unregister(uint8_t packet_type)
{
- recv_packet_handler[packet_type] = recv_unhandled_packet;
+ batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
}
-static struct bat_algo_ops *bat_algo_get(char *name)
+static struct batadv_algo_ops *batadv_algo_get(char *name)
{
- struct bat_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
+ struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
struct hlist_node *node;
- hlist_for_each_entry(bat_algo_ops_tmp, node, &bat_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops_tmp, node, &batadv_algo_list, list) {
if (strcmp(bat_algo_ops_tmp->name, name) != 0)
continue;
@@ -314,15 +324,16 @@ static struct bat_algo_ops *bat_algo_get(char *name)
return bat_algo_ops;
}
-int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
{
- struct bat_algo_ops *bat_algo_ops_tmp;
- int ret = -1;
+ struct batadv_algo_ops *bat_algo_ops_tmp;
+ int ret;
- bat_algo_ops_tmp = bat_algo_get(bat_algo_ops->name);
+ bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
if (bat_algo_ops_tmp) {
pr_info("Trying to register already registered routing algorithm: %s\n",
bat_algo_ops->name);
+ ret = -EEXIST;
goto out;
}
@@ -335,23 +346,24 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
!bat_algo_ops->bat_ogm_emit) {
pr_info("Routing algo '%s' does not implement required ops\n",
bat_algo_ops->name);
+ ret = -EINVAL;
goto out;
}
INIT_HLIST_NODE(&bat_algo_ops->list);
- hlist_add_head(&bat_algo_ops->list, &bat_algo_list);
+ hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
ret = 0;
out:
return ret;
}
-int bat_algo_select(struct bat_priv *bat_priv, char *name)
+int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
{
- struct bat_algo_ops *bat_algo_ops;
- int ret = -1;
+ struct batadv_algo_ops *bat_algo_ops;
+ int ret = -EINVAL;
- bat_algo_ops = bat_algo_get(name);
+ bat_algo_ops = batadv_algo_get(name);
if (!bat_algo_ops)
goto out;
@@ -362,50 +374,56 @@ out:
return ret;
}
-int bat_algo_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
{
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_algo_ops *bat_algo_ops;
struct hlist_node *node;
seq_printf(seq, "Available routing algorithms:\n");
- hlist_for_each_entry(bat_algo_ops, node, &bat_algo_list, list) {
+ hlist_for_each_entry(bat_algo_ops, node, &batadv_algo_list, list) {
seq_printf(seq, "%s\n", bat_algo_ops->name);
}
return 0;
}
-static int param_set_ra(const char *val, const struct kernel_param *kp)
+static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
{
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_algo_ops *bat_algo_ops;
+ char *algo_name = (char *)val;
+ size_t name_len = strlen(algo_name);
+
+ if (algo_name[name_len - 1] == '\n')
+ algo_name[name_len - 1] = '\0';
- bat_algo_ops = bat_algo_get((char *)val);
+ bat_algo_ops = batadv_algo_get(algo_name);
if (!bat_algo_ops) {
- pr_err("Routing algorithm '%s' is not supported\n", val);
+ pr_err("Routing algorithm '%s' is not supported\n", algo_name);
return -EINVAL;
}
- return param_set_copystring(val, kp);
+ return param_set_copystring(algo_name, kp);
}
-static const struct kernel_param_ops param_ops_ra = {
- .set = param_set_ra,
+static const struct kernel_param_ops batadv_param_ops_ra = {
+ .set = batadv_param_set_ra,
.get = param_get_string,
};
-static struct kparam_string __param_string_ra = {
- .maxlen = sizeof(bat_routing_algo),
- .string = bat_routing_algo,
+static struct kparam_string batadv_param_string_ra = {
+ .maxlen = sizeof(batadv_routing_algo),
+ .string = batadv_routing_algo,
};
-module_param_cb(routing_algo, &param_ops_ra, &__param_string_ra, 0644);
-module_init(batman_init);
-module_exit(batman_exit);
+module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
+ 0644);
+module_init(batadv_init);
+module_exit(batadv_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
-MODULE_VERSION(SOURCE_VERSION);
+MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
+MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
+MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
+MODULE_VERSION(BATADV_SOURCE_VERSION);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index f4a3ec003479..5d8fa0757947 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,100 +15,106 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_MAIN_H_
#define _NET_BATMAN_ADV_MAIN_H_
-#define DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
- "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
-#define DRIVER_DESC "B.A.T.M.A.N. advanced"
-#define DRIVER_DEVICE "batman-adv"
+#define BATADV_DRIVER_AUTHOR "Marek Lindner <lindner_marek@yahoo.de>, " \
+ "Simon Wunderlich <siwu@hrz.tu-chemnitz.de>"
+#define BATADV_DRIVER_DESC "B.A.T.M.A.N. advanced"
+#define BATADV_DRIVER_DEVICE "batman-adv"
-#ifndef SOURCE_VERSION
-#define SOURCE_VERSION "2012.2.0"
+#ifndef BATADV_SOURCE_VERSION
+#define BATADV_SOURCE_VERSION "2012.3.0"
#endif
/* B.A.T.M.A.N. parameters */
-#define TQ_MAX_VALUE 255
-#define JITTER 20
+#define BATADV_TQ_MAX_VALUE 255
+#define BATADV_JITTER 20
- /* Time To Live of broadcast messages */
-#define TTL 50
+/* Time To Live of broadcast messages */
+#define BATADV_TTL 50
/* purge originators after time in seconds if no valid packet comes in
- * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
-#define PURGE_TIMEOUT 200000 /* 200 seconds */
-#define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
-#define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
+ * -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
+ */
+#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
+#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */
+#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */
/* sliding packet range of received originator messages in sequence numbers
- * (should be a multiple of our word size) */
-#define TQ_LOCAL_WINDOW_SIZE 64
-#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep
- * pending tt_req */
+ * (should be a multiple of our word size)
+ */
+#define BATADV_TQ_LOCAL_WINDOW_SIZE 64
+/* miliseconds we have to keep pending tt_req */
+#define BATADV_TT_REQUEST_TIMEOUT 3000
-#define TQ_GLOBAL_WINDOW_SIZE 5
-#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
-#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
-#define TQ_TOTAL_BIDRECT_LIMIT 1
+#define BATADV_TQ_GLOBAL_WINDOW_SIZE 5
+#define BATADV_TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
+#define BATADV_TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
+#define BATADV_TQ_TOTAL_BIDRECT_LIMIT 1
-#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
+/* number of OGMs sent with the last tt diff */
+#define BATADV_TT_OGM_APPEND_MAX 3
-#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most
- * ROAMING_MAX_COUNT times in miliseconds*/
-#define ROAMING_MAX_COUNT 5
+/* Time in which a client can roam at most ROAMING_MAX_COUNT times in
+ * miliseconds
+ */
+#define BATADV_ROAMING_MAX_TIME 20000
+#define BATADV_ROAMING_MAX_COUNT 5
-#define NO_FLAGS 0
+#define BATADV_NO_FLAGS 0
-#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
+#define BATADV_NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
-#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
+#define BATADV_NUM_WORDS BITS_TO_LONGS(BATADV_TQ_LOCAL_WINDOW_SIZE)
-#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
+#define BATADV_LOG_BUF_LEN 8192 /* has to be a power of 2 */
-#define VIS_INTERVAL 5000 /* 5 seconds */
+#define BATADV_VIS_INTERVAL 5000 /* 5 seconds */
/* how much worse secondary interfaces may be to be considered as bonding
- * candidates */
-#define BONDING_TQ_THRESHOLD 50
+ * candidates
+ */
+#define BATADV_BONDING_TQ_THRESHOLD 50
/* should not be bigger than 512 bytes or change the size of
- * forw_packet->direct_link_flags */
-#define MAX_AGGREGATION_BYTES 512
-#define MAX_AGGREGATION_MS 100
+ * forw_packet->direct_link_flags
+ */
+#define BATADV_MAX_AGGREGATION_BYTES 512
+#define BATADV_MAX_AGGREGATION_MS 100
-#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
-#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
-#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
+#define BATADV_BLA_PERIOD_LENGTH 10000 /* 10 seconds */
+#define BATADV_BLA_BACKBONE_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 3)
+#define BATADV_BLA_CLAIM_TIMEOUT (BATADV_BLA_PERIOD_LENGTH * 10)
-#define DUPLIST_SIZE 16
-#define DUPLIST_TIMEOUT 500 /* 500 ms */
+#define BATADV_DUPLIST_SIZE 16
+#define BATADV_DUPLIST_TIMEOUT 500 /* 500 ms */
/* don't reset again within 30 seconds */
-#define RESET_PROTECTION_MS 30000
-#define EXPECTED_SEQNO_RANGE 65536
+#define BATADV_RESET_PROTECTION_MS 30000
+#define BATADV_EXPECTED_SEQNO_RANGE 65536
-enum mesh_state {
- MESH_INACTIVE,
- MESH_ACTIVE,
- MESH_DEACTIVATING
+enum batadv_mesh_state {
+ BATADV_MESH_INACTIVE,
+ BATADV_MESH_ACTIVE,
+ BATADV_MESH_DEACTIVATING,
};
-#define BCAST_QUEUE_LEN 256
-#define BATMAN_QUEUE_LEN 256
+#define BATADV_BCAST_QUEUE_LEN 256
+#define BATADV_BATMAN_QUEUE_LEN 256
-enum uev_action {
- UEV_ADD = 0,
- UEV_DEL,
- UEV_CHANGE
+enum batadv_uev_action {
+ BATADV_UEV_ADD = 0,
+ BATADV_UEV_DEL,
+ BATADV_UEV_CHANGE,
};
-enum uev_type {
- UEV_GW = 0
+enum batadv_uev_type {
+ BATADV_UEV_GW = 0,
};
-#define GW_THRESHOLD 50
+#define BATADV_GW_THRESHOLD 50
/* Debug Messages */
#ifdef pr_fmt
@@ -119,12 +124,12 @@ enum uev_type {
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* all messages related to routing / flooding / broadcasting / etc */
-enum dbg_level {
- DBG_BATMAN = 1 << 0,
- DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
- DBG_TT = 1 << 2, /* translation table operations */
- DBG_BLA = 1 << 3, /* bridge loop avoidance */
- DBG_ALL = 15
+enum batadv_dbg_level {
+ BATADV_DBG_BATMAN = 1 << 0,
+ BATADV_DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
+ BATADV_DBG_TT = 1 << 2, /* translation table operations */
+ BATADV_DBG_BLA = 1 << 3, /* bridge loop avoidance */
+ BATADV_DBG_ALL = 15,
};
/* Kernel headers */
@@ -138,73 +143,75 @@ enum dbg_level {
#include <linux/kthread.h> /* kernel threads */
#include <linux/pkt_sched.h> /* schedule types */
#include <linux/workqueue.h> /* workqueue */
+#include <linux/percpu.h>
#include <linux/slab.h>
#include <net/sock.h> /* struct sock */
#include <linux/jiffies.h>
#include <linux/seq_file.h>
#include "types.h"
-extern char bat_routing_algo[];
-extern struct list_head hardif_list;
-
-extern unsigned char broadcast_addr[];
-extern struct workqueue_struct *bat_event_workqueue;
-
-int mesh_init(struct net_device *soft_iface);
-void mesh_free(struct net_device *soft_iface);
-void inc_module_count(void);
-void dec_module_count(void);
-int is_my_mac(const uint8_t *addr);
-int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *ptype, struct net_device *orig_dev);
-int recv_handler_register(uint8_t packet_type,
- int (*recv_handler)(struct sk_buff *,
- struct hard_iface *));
-void recv_handler_unregister(uint8_t packet_type);
-int bat_algo_register(struct bat_algo_ops *bat_algo_ops);
-int bat_algo_select(struct bat_priv *bat_priv, char *name);
-int bat_algo_seq_print_text(struct seq_file *seq, void *offset);
+extern char batadv_routing_algo[];
+extern struct list_head batadv_hardif_list;
+
+extern unsigned char batadv_broadcast_addr[];
+extern struct workqueue_struct *batadv_event_workqueue;
+
+int batadv_mesh_init(struct net_device *soft_iface);
+void batadv_mesh_free(struct net_device *soft_iface);
+void batadv_inc_module_count(void);
+void batadv_dec_module_count(void);
+int batadv_is_my_mac(const uint8_t *addr);
+int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype,
+ struct net_device *orig_dev);
+int
+batadv_recv_handler_register(uint8_t packet_type,
+ int (*recv_handler)(struct sk_buff *,
+ struct batadv_hard_iface *));
+void batadv_recv_handler_unregister(uint8_t packet_type);
+int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops);
+int batadv_algo_select(struct batadv_priv *bat_priv, char *name);
+int batadv_algo_seq_print_text(struct seq_file *seq, void *offset);
#ifdef CONFIG_BATMAN_ADV_DEBUG
-int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
+int batadv_debug_log(struct batadv_priv *bat_priv, const char *fmt, ...)
+__printf(2, 3);
-#define bat_dbg(type, bat_priv, fmt, arg...) \
+#define batadv_dbg(type, bat_priv, fmt, arg...) \
do { \
if (atomic_read(&bat_priv->log_level) & type) \
- debug_log(bat_priv, fmt, ## arg); \
+ batadv_debug_log(bat_priv, fmt, ## arg);\
} \
while (0)
#else /* !CONFIG_BATMAN_ADV_DEBUG */
__printf(3, 4)
-static inline void bat_dbg(int type __always_unused,
- struct bat_priv *bat_priv __always_unused,
- const char *fmt __always_unused, ...)
+static inline void batadv_dbg(int type __always_unused,
+ struct batadv_priv *bat_priv __always_unused,
+ const char *fmt __always_unused, ...)
{
}
#endif
-#define bat_info(net_dev, fmt, arg...) \
+#define batadv_info(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_info("%s: " fmt, _netdev->name, ## arg); \
} while (0)
-#define bat_err(net_dev, fmt, arg...) \
+#define batadv_err(net_dev, fmt, arg...) \
do { \
struct net_device *_netdev = (net_dev); \
- struct bat_priv *_batpriv = netdev_priv(_netdev); \
- bat_dbg(DBG_ALL, _batpriv, fmt, ## arg); \
+ struct batadv_priv *_batpriv = netdev_priv(_netdev); \
+ batadv_dbg(BATADV_DBG_ALL, _batpriv, fmt, ## arg); \
pr_err("%s: " fmt, _netdev->name, ## arg); \
} while (0)
-/**
- * returns 1 if they are the same ethernet addr
+/* returns 1 if they are the same ethernet addr
*
* note: can't use compare_ether_addr() as it requires aligned memory
*/
-
-static inline int compare_eth(const void *data1, const void *data2)
+static inline int batadv_compare_eth(const void *data1, const void *data2)
{
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
@@ -216,15 +223,16 @@ static inline int compare_eth(const void *data1, const void *data2)
*
* Returns true if current time is after timestamp + timeout
*/
-static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
+static inline bool batadv_has_timed_out(unsigned long timestamp,
+ unsigned int timeout)
{
return time_is_before_jiffies(timestamp + msecs_to_jiffies(timeout));
}
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+#define batadv_atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
/* Returns the smallest signed integer in two's complement with the sizeof x */
-#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
+#define batadv_smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
* they handle overflows/underflows and can correctly check for a
@@ -234,12 +242,39 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout)
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
- * - after adding more than 127 to the starting value - it is a successor */
-#define seq_before(x, y) ({typeof(x) _d1 = (x); \
- typeof(y) _d2 = (y); \
- typeof(x) _dummy = (_d1 - _d2); \
- (void) (&_d1 == &_d2); \
- _dummy > smallest_signed_int(_dummy); })
-#define seq_after(x, y) seq_before(y, x)
+ * - after adding more than 127 to the starting value - it is a successor
+ */
+#define batadv_seq_before(x, y) ({typeof(x) _d1 = (x); \
+ typeof(y) _d2 = (y); \
+ typeof(x) _dummy = (_d1 - _d2); \
+ (void) (&_d1 == &_d2); \
+ _dummy > batadv_smallest_signed_int(_dummy); })
+#define batadv_seq_after(x, y) batadv_seq_before(y, x)
+
+/* Stop preemption on local cpu while incrementing the counter */
+static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
+ size_t count)
+{
+ int cpu = get_cpu();
+ per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
+ put_cpu();
+}
+
+#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
+
+/* Sum and return the cpu-local counters for index 'idx' */
+static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
+ size_t idx)
+{
+ uint64_t *counters, sum = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ counters = per_cpu_ptr(bat_priv->bat_counters, cpu);
+ sum += counters[idx];
+ }
+
+ return sum;
+}
#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 41147942ba53..ac9bdf8f80a6 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -30,50 +28,52 @@
#include "soft-interface.h"
#include "bridge_loop_avoidance.h"
-static void purge_orig(struct work_struct *work);
+static void batadv_purge_orig(struct work_struct *work);
-static void start_purge_timer(struct bat_priv *bat_priv)
+static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
- queue_delayed_work(bat_event_workqueue,
+ INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
+ queue_delayed_work(batadv_event_workqueue,
&bat_priv->orig_work, msecs_to_jiffies(1000));
}
/* returns 1 if they are the same originator */
-static int compare_orig(const struct hlist_node *node, const void *data2)
+static int batadv_compare_orig(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct orig_node, hash_entry);
+ const void *data1 = container_of(node, struct batadv_orig_node,
+ hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
-int originator_init(struct bat_priv *bat_priv)
+int batadv_originator_init(struct batadv_priv *bat_priv)
{
if (bat_priv->orig_hash)
- return 1;
+ return 0;
- bat_priv->orig_hash = hash_new(1024);
+ bat_priv->orig_hash = batadv_hash_new(1024);
if (!bat_priv->orig_hash)
goto err;
- start_purge_timer(bat_priv);
- return 1;
+ batadv_start_purge_timer(bat_priv);
+ return 0;
err:
- return 0;
+ return -ENOMEM;
}
-void neigh_node_free_ref(struct neigh_node *neigh_node)
+void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
{
if (atomic_dec_and_test(&neigh_node->refcount))
kfree_rcu(neigh_node, rcu);
}
/* increases the refcounter of a found router */
-struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
+struct batadv_neigh_node *
+batadv_orig_node_get_router(struct batadv_orig_node *orig_node)
{
- struct neigh_node *router;
+ struct batadv_neigh_node *router;
rcu_read_lock();
router = rcu_dereference(orig_node->router);
@@ -85,12 +85,12 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
return router;
}
-struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- uint32_t seqno)
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr, uint32_t seqno)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct neigh_node *neigh_node;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_neigh_node *neigh_node;
neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
if (!neigh_node)
@@ -104,21 +104,21 @@ struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
/* extra reference for return */
atomic_set(&neigh_node->refcount, 2);
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM, initial seqno %d\n",
- neigh_addr, seqno);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM, initial seqno %d\n",
+ neigh_addr, seqno);
out:
return neigh_node;
}
-static void orig_node_free_rcu(struct rcu_head *rcu)
+static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
{
struct hlist_node *node, *node_tmp;
- struct neigh_node *neigh_node, *tmp_neigh_node;
- struct orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
+ struct batadv_orig_node *orig_node;
- orig_node = container_of(rcu, struct orig_node, rcu);
+ orig_node = container_of(rcu, struct batadv_orig_node, rcu);
spin_lock_bh(&orig_node->neigh_list_lock);
@@ -126,21 +126,21 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
list_for_each_entry_safe(neigh_node, tmp_neigh_node,
&orig_node->bond_list, bonding_list) {
list_del_rcu(&neigh_node->bonding_list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
hlist_del_rcu(&neigh_node->list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
spin_unlock_bh(&orig_node->neigh_list_lock);
- frag_list_free(&orig_node->frag_list);
- tt_global_del_orig(orig_node->bat_priv, orig_node,
- "originator timed out");
+ batadv_frag_list_free(&orig_node->frag_list);
+ batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
+ "originator timed out");
kfree(orig_node->tt_buff);
kfree(orig_node->bcast_own);
@@ -148,19 +148,19 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
kfree(orig_node);
}
-void orig_node_free_ref(struct orig_node *orig_node)
+void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
{
if (atomic_dec_and_test(&orig_node->refcount))
- call_rcu(&orig_node->rcu, orig_node_free_rcu);
+ call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
}
-void originator_free(struct bat_priv *bat_priv)
+void batadv_originator_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
if (!hash)
@@ -179,28 +179,31 @@ void originator_free(struct bat_priv *bat_priv)
head, hash_entry) {
hlist_del_rcu(node);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
}
/* this function finds or creates an originator entry for the given
- * address if it does not exits */
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
+ * address if it does not exits
+ */
+struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+ const uint8_t *addr)
{
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
int size;
int hash_added;
+ unsigned long reset_time;
- orig_node = orig_hash_find(bat_priv, addr);
+ orig_node = batadv_orig_hash_find(bat_priv, addr);
if (orig_node)
return orig_node;
- bat_dbg(DBG_BATMAN, bat_priv,
- "Creating new originator: %pM\n", addr);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new originator: %pM\n", addr);
orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
if (!orig_node)
@@ -226,14 +229,13 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
atomic_set(&orig_node->tt_size, 0);
- orig_node->bcast_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
- orig_node->batman_seqno_reset = jiffies - 1
- - msecs_to_jiffies(RESET_PROTECTION_MS);
+ reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
+ orig_node->bcast_seqno_reset = reset_time;
+ orig_node->batman_seqno_reset = reset_time;
atomic_set(&orig_node->bond_candidates, 0);
- size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
+ size = bat_priv->num_ifaces * sizeof(unsigned long) * BATADV_NUM_WORDS;
orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bcast_own)
@@ -248,8 +250,9 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
if (!orig_node->bcast_own_sum)
goto free_bcast_own;
- hash_added = hash_add(bat_priv->orig_hash, compare_orig,
- choose_orig, orig_node, &orig_node->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
+ batadv_choose_orig, orig_node,
+ &orig_node->hash_entry);
if (hash_added != 0)
goto free_bcast_own_sum;
@@ -263,14 +266,16 @@ free_orig_node:
return NULL;
}
-static bool purge_orig_neighbors(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node **best_neigh_node)
+static bool
+batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node **best_neigh_node)
{
struct hlist_node *node, *node_tmp;
- struct neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node;
bool neigh_purged = false;
unsigned long last_seen;
+ struct batadv_hard_iface *if_incoming;
*best_neigh_node = NULL;
@@ -280,34 +285,32 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
- if ((has_timed_out(neigh_node->last_seen, PURGE_TIMEOUT)) ||
- (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
- (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
- (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
-
- last_seen = neigh_node->last_seen;
-
- if ((neigh_node->if_incoming->if_status ==
- IF_INACTIVE) ||
- (neigh_node->if_incoming->if_status ==
- IF_NOT_IN_USE) ||
- (neigh_node->if_incoming->if_status ==
- IF_TO_BE_REMOVED))
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
- orig_node->orig, neigh_node->addr,
- neigh_node->if_incoming->net_dev->name);
+ last_seen = neigh_node->last_seen;
+ if_incoming = neigh_node->if_incoming;
+
+ if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
+ (if_incoming->if_status == BATADV_IF_INACTIVE) ||
+ (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
+ (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
+
+ if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
+ (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
+ (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
+ orig_node->orig, neigh_node->addr,
+ if_incoming->net_dev->name);
else
- bat_dbg(DBG_BATMAN, bat_priv,
- "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
- orig_node->orig, neigh_node->addr,
- jiffies_to_msecs(last_seen));
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
+ orig_node->orig, neigh_node->addr,
+ jiffies_to_msecs(last_seen));
neigh_purged = true;
hlist_del_rcu(&neigh_node->list);
- bonding_candidate_del(orig_node, neigh_node);
- neigh_node_free_ref(neigh_node);
+ batadv_bonding_candidate_del(orig_node, neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
} else {
if ((!*best_neigh_node) ||
(neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
@@ -319,33 +322,35 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv,
return neigh_purged;
}
-static bool purge_orig_node(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct neigh_node *best_neigh_node;
-
- if (has_timed_out(orig_node->last_seen, 2 * PURGE_TIMEOUT)) {
- bat_dbg(DBG_BATMAN, bat_priv,
- "Originator timeout: originator %pM, last_seen %u\n",
- orig_node->orig,
- jiffies_to_msecs(orig_node->last_seen));
+ struct batadv_neigh_node *best_neigh_node;
+
+ if (batadv_has_timed_out(orig_node->last_seen,
+ 2 * BATADV_PURGE_TIMEOUT)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Originator timeout: originator %pM, last_seen %u\n",
+ orig_node->orig,
+ jiffies_to_msecs(orig_node->last_seen));
return true;
} else {
- if (purge_orig_neighbors(bat_priv, orig_node,
- &best_neigh_node))
- update_route(bat_priv, orig_node, best_neigh_node);
+ if (batadv_purge_orig_neighbors(bat_priv, orig_node,
+ &best_neigh_node))
+ batadv_update_route(bat_priv, orig_node,
+ best_neigh_node);
}
return false;
}
-static void _purge_orig(struct bat_priv *bat_priv)
+static void _batadv_purge_orig(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* spinlock to protect write access */
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
if (!hash)
@@ -359,58 +364,60 @@ static void _purge_orig(struct bat_priv *bat_priv)
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(orig_node, node, node_tmp,
head, hash_entry) {
- if (purge_orig_node(bat_priv, orig_node)) {
+ if (batadv_purge_orig_node(bat_priv, orig_node)) {
if (orig_node->gw_flags)
- gw_node_delete(bat_priv, orig_node);
+ batadv_gw_node_delete(bat_priv,
+ orig_node);
hlist_del_rcu(node);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
continue;
}
- if (has_timed_out(orig_node->last_frag_packet,
- FRAG_TIMEOUT))
- frag_list_free(&orig_node->frag_list);
+ if (batadv_has_timed_out(orig_node->last_frag_packet,
+ BATADV_FRAG_TIMEOUT))
+ batadv_frag_list_free(&orig_node->frag_list);
}
spin_unlock_bh(list_lock);
}
- gw_node_purge(bat_priv);
- gw_election(bat_priv);
+ batadv_gw_node_purge(bat_priv);
+ batadv_gw_election(bat_priv);
}
-static void purge_orig(struct work_struct *work)
+static void batadv_purge_orig(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, orig_work);
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
- _purge_orig(bat_priv);
- start_purge_timer(bat_priv);
+ delayed_work = container_of(work, struct delayed_work, work);
+ bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
+ _batadv_purge_orig(bat_priv);
+ batadv_start_purge_timer(bat_priv);
}
-void purge_orig_ref(struct bat_priv *bat_priv)
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
{
- _purge_orig(bat_priv);
+ _batadv_purge_orig(bat_priv);
}
-int orig_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct hard_iface *primary_if;
- struct orig_node *orig_node;
- struct neigh_node *neigh_node, *neigh_node_tmp;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node, *neigh_node_tmp;
int batman_count = 0;
int last_seen_secs;
int last_seen_msecs;
+ unsigned long last_seen_jiffies;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
@@ -419,7 +426,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -427,28 +434,28 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
}
seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
- SOURCE_VERSION, primary_if->net_dev->name,
+ BATADV_SOURCE_VERSION, primary_if->net_dev->name,
primary_if->net_dev->dev_addr, net_dev->name);
seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
- "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
- "outgoingIF", "Potential nexthops");
+ "Originator", "last-seen", "#", BATADV_TQ_MAX_VALUE,
+ "Nexthop", "outgoingIF", "Potential nexthops");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
continue;
if (neigh_node->tq_avg == 0)
goto next;
- last_seen_secs = jiffies_to_msecs(jiffies -
- orig_node->last_seen) / 1000;
- last_seen_msecs = jiffies_to_msecs(jiffies -
- orig_node->last_seen) % 1000;
+ last_seen_jiffies = jiffies - orig_node->last_seen;
+ last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+ last_seen_secs = last_seen_msecs / 1000;
+ last_seen_msecs = last_seen_msecs % 1000;
seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
orig_node->orig, last_seen_secs,
@@ -467,7 +474,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
batman_count++;
next:
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
}
rcu_read_unlock();
}
@@ -477,27 +484,29 @@ next:
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
+static int batadv_orig_node_add_if(struct batadv_orig_node *orig_node,
+ int max_if_num)
{
void *data_ptr;
+ size_t data_size, old_size;
- data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
- GFP_ATOMIC);
+ data_size = max_if_num * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ old_size = (max_if_num - 1) * sizeof(unsigned long) * BATADV_NUM_WORDS;
+ data_ptr = kmalloc(data_size, GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
- memcpy(data_ptr, orig_node->bcast_own,
- (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
+ memcpy(data_ptr, orig_node->bcast_own, old_size);
kfree(orig_node->bcast_own);
orig_node->bcast_own = data_ptr;
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
(max_if_num - 1) * sizeof(uint8_t));
@@ -507,28 +516,30 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
return 0;
}
-int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num)
+int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
uint32_t i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
+ * if_num
+ */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = orig_node_add_if(orig_node, max_if_num);
+ ret = batadv_orig_node_add_if(orig_node, max_if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- if (ret == -1)
+ if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();
@@ -541,8 +552,8 @@ err:
return -ENOMEM;
}
-static int orig_node_del_if(struct orig_node *orig_node,
- int max_if_num, int del_if_num)
+static int batadv_orig_node_del_if(struct batadv_orig_node *orig_node,
+ int max_if_num, int del_if_num)
{
void *data_ptr = NULL;
int chunk_size;
@@ -551,10 +562,10 @@ static int orig_node_del_if(struct orig_node *orig_node,
if (max_if_num == 0)
goto free_bcast_own;
- chunk_size = sizeof(unsigned long) * NUM_WORDS;
+ chunk_size = sizeof(unsigned long) * BATADV_NUM_WORDS;
data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
/* copy first part */
memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
@@ -573,7 +584,7 @@ free_bcast_own:
data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
if (!data_ptr)
- return -1;
+ return -ENOMEM;
memcpy(data_ptr, orig_node->bcast_own_sum,
del_if_num * sizeof(uint8_t));
@@ -589,30 +600,32 @@ free_own_sum:
return 0;
}
-int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
+int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct hard_iface *hard_iface_tmp;
- struct orig_node *orig_node;
+ struct batadv_hard_iface *hard_iface_tmp;
+ struct batadv_orig_node *orig_node;
uint32_t i;
int ret;
/* resize all orig nodes because orig_node->bcast_own(_sum) depend on
- * if_num */
+ * if_num
+ */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- ret = orig_node_del_if(orig_node, max_if_num,
- hard_iface->if_num);
+ ret = batadv_orig_node_del_if(orig_node, max_if_num,
+ hard_iface->if_num);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
- if (ret == -1)
+ if (ret == -ENOMEM)
goto err;
}
rcu_read_unlock();
@@ -620,8 +633,8 @@ int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num)
/* renumber remaining batman interfaces _inside_ of orig_hash_lock */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) {
- if (hard_iface_tmp->if_status == IF_NOT_IN_USE)
+ list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
+ if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
continue;
if (hard_iface == hard_iface_tmp)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index f74d0d693359..9778e656dec7 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ORIGINATOR_H_
@@ -24,24 +22,29 @@
#include "hash.h"
-int originator_init(struct bat_priv *bat_priv);
-void originator_free(struct bat_priv *bat_priv);
-void purge_orig_ref(struct bat_priv *bat_priv);
-void orig_node_free_ref(struct orig_node *orig_node);
-struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
-struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface,
- const uint8_t *neigh_addr,
- uint32_t seqno);
-void neigh_node_free_ref(struct neigh_node *neigh_node);
-struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
-int orig_seq_print_text(struct seq_file *seq, void *offset);
-int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
-int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
-
-
-/* hashfunction to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static inline uint32_t choose_orig(const void *data, uint32_t size)
+int batadv_originator_init(struct batadv_priv *bat_priv);
+void batadv_originator_free(struct batadv_priv *bat_priv);
+void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
+void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
+struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
+ const uint8_t *addr);
+struct batadv_neigh_node *
+batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
+ const uint8_t *neigh_addr, uint32_t seqno);
+void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node);
+struct batadv_neigh_node *
+batadv_orig_node_get_router(struct batadv_orig_node *orig_node);
+int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
+int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num);
+int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
+ int max_if_num);
+
+
+/* hashfunction to choose an entry in a hash table of given size
+ * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+ */
+static inline uint32_t batadv_choose_orig(const void *data, uint32_t size)
{
const unsigned char *key = data;
uint32_t hash = 0;
@@ -60,24 +63,24 @@ static inline uint32_t choose_orig(const void *data, uint32_t size)
return hash % size;
}
-static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static inline struct batadv_orig_node *
+batadv_orig_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct orig_node *orig_node, *orig_node_tmp = NULL;
+ struct batadv_orig_node *orig_node, *orig_node_tmp = NULL;
int index;
if (!hash)
return NULL;
- index = choose_orig(data, hash->size);
+ index = batadv_choose_orig(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- if (!compare_eth(orig_node, data))
+ if (!batadv_compare_eth(orig_node, data))
continue;
if (!atomic_inc_not_zero(&orig_node->refcount))
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 0ee1af770798..8d3e55a96adc 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,171 +15,172 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_PACKET_H_
#define _NET_BATMAN_ADV_PACKET_H_
-#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
-
-enum bat_packettype {
- BAT_IV_OGM = 0x01,
- BAT_ICMP = 0x02,
- BAT_UNICAST = 0x03,
- BAT_BCAST = 0x04,
- BAT_VIS = 0x05,
- BAT_UNICAST_FRAG = 0x06,
- BAT_TT_QUERY = 0x07,
- BAT_ROAM_ADV = 0x08
+#define BATADV_ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
+
+enum batadv_packettype {
+ BATADV_IV_OGM = 0x01,
+ BATADV_ICMP = 0x02,
+ BATADV_UNICAST = 0x03,
+ BATADV_BCAST = 0x04,
+ BATADV_VIS = 0x05,
+ BATADV_UNICAST_FRAG = 0x06,
+ BATADV_TT_QUERY = 0x07,
+ BATADV_ROAM_ADV = 0x08,
};
/* this file is included by batctl which needs these defines */
-#define COMPAT_VERSION 14
+#define BATADV_COMPAT_VERSION 14
-enum batman_iv_flags {
- NOT_BEST_NEXT_HOP = 1 << 3,
- PRIMARIES_FIRST_HOP = 1 << 4,
- VIS_SERVER = 1 << 5,
- DIRECTLINK = 1 << 6
+enum batadv_iv_flags {
+ BATADV_NOT_BEST_NEXT_HOP = 1 << 3,
+ BATADV_PRIMARIES_FIRST_HOP = 1 << 4,
+ BATADV_VIS_SERVER = 1 << 5,
+ BATADV_DIRECTLINK = 1 << 6,
};
/* ICMP message types */
-enum icmp_packettype {
- ECHO_REPLY = 0,
- DESTINATION_UNREACHABLE = 3,
- ECHO_REQUEST = 8,
- TTL_EXCEEDED = 11,
- PARAMETER_PROBLEM = 12
+enum batadv_icmp_packettype {
+ BATADV_ECHO_REPLY = 0,
+ BATADV_DESTINATION_UNREACHABLE = 3,
+ BATADV_ECHO_REQUEST = 8,
+ BATADV_TTL_EXCEEDED = 11,
+ BATADV_PARAMETER_PROBLEM = 12,
};
/* vis defines */
-enum vis_packettype {
- VIS_TYPE_SERVER_SYNC = 0,
- VIS_TYPE_CLIENT_UPDATE = 1
+enum batadv_vis_packettype {
+ BATADV_VIS_TYPE_SERVER_SYNC = 0,
+ BATADV_VIS_TYPE_CLIENT_UPDATE = 1,
};
/* fragmentation defines */
-enum unicast_frag_flags {
- UNI_FRAG_HEAD = 1 << 0,
- UNI_FRAG_LARGETAIL = 1 << 1
+enum batadv_unicast_frag_flags {
+ BATADV_UNI_FRAG_HEAD = 1 << 0,
+ BATADV_UNI_FRAG_LARGETAIL = 1 << 1,
};
/* TT_QUERY subtypes */
-#define TT_QUERY_TYPE_MASK 0x3
+#define BATADV_TT_QUERY_TYPE_MASK 0x3
-enum tt_query_packettype {
- TT_REQUEST = 0,
- TT_RESPONSE = 1
+enum batadv_tt_query_packettype {
+ BATADV_TT_REQUEST = 0,
+ BATADV_TT_RESPONSE = 1,
};
/* TT_QUERY flags */
-enum tt_query_flags {
- TT_FULL_TABLE = 1 << 2
+enum batadv_tt_query_flags {
+ BATADV_TT_FULL_TABLE = 1 << 2,
};
-/* TT_CLIENT flags.
+/* BATADV_TT_CLIENT flags.
* Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
- * 1 << 15 are used for local computation only */
-enum tt_client_flags {
- TT_CLIENT_DEL = 1 << 0,
- TT_CLIENT_ROAM = 1 << 1,
- TT_CLIENT_WIFI = 1 << 2,
- TT_CLIENT_NOPURGE = 1 << 8,
- TT_CLIENT_NEW = 1 << 9,
- TT_CLIENT_PENDING = 1 << 10
+ * 1 << 15 are used for local computation only
+ */
+enum batadv_tt_client_flags {
+ BATADV_TT_CLIENT_DEL = 1 << 0,
+ BATADV_TT_CLIENT_ROAM = 1 << 1,
+ BATADV_TT_CLIENT_WIFI = 1 << 2,
+ BATADV_TT_CLIENT_NOPURGE = 1 << 8,
+ BATADV_TT_CLIENT_NEW = 1 << 9,
+ BATADV_TT_CLIENT_PENDING = 1 << 10,
};
/* claim frame types for the bridge loop avoidance */
-enum bla_claimframe {
- CLAIM_TYPE_ADD = 0x00,
- CLAIM_TYPE_DEL = 0x01,
- CLAIM_TYPE_ANNOUNCE = 0x02,
- CLAIM_TYPE_REQUEST = 0x03
+enum batadv_bla_claimframe {
+ BATADV_CLAIM_TYPE_ADD = 0x00,
+ BATADV_CLAIM_TYPE_DEL = 0x01,
+ BATADV_CLAIM_TYPE_ANNOUNCE = 0x02,
+ BATADV_CLAIM_TYPE_REQUEST = 0x03,
};
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
-struct bla_claim_dst {
+struct batadv_bla_claim_dst {
uint8_t magic[3]; /* FF:43:05 */
uint8_t type; /* bla_claimframe */
- uint16_t group; /* group id */
+ __be16 group; /* group id */
} __packed;
-struct batman_header {
+struct batadv_header {
uint8_t packet_type;
uint8_t version; /* batman version field */
uint8_t ttl;
} __packed;
-struct batman_ogm_packet {
- struct batman_header header;
+struct batadv_ogm_packet {
+ struct batadv_header header;
uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
- uint32_t seqno;
+ __be32 seqno;
uint8_t orig[ETH_ALEN];
uint8_t prev_sender[ETH_ALEN];
uint8_t gw_flags; /* flags related to gateway class */
uint8_t tq;
uint8_t tt_num_changes;
uint8_t ttvn; /* translation table version number */
- uint16_t tt_crc;
+ __be16 tt_crc;
} __packed;
-#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
+#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
-struct icmp_packet {
- struct batman_header header;
+struct batadv_icmp_packet {
+ struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
uint8_t uid;
uint8_t reserved;
} __packed;
-#define BAT_RR_LEN 16
+#define BATADV_RR_LEN 16
/* icmp_packet_rr must start with all fields from imcp_packet
- * as this is assumed by code that handles ICMP packets */
-struct icmp_packet_rr {
- struct batman_header header;
+ * as this is assumed by code that handles ICMP packets
+ */
+struct batadv_icmp_packet_rr {
+ struct batadv_header header;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
uint8_t uid;
uint8_t rr_cur;
- uint8_t rr[BAT_RR_LEN][ETH_ALEN];
+ uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
} __packed;
-struct unicast_packet {
- struct batman_header header;
+struct batadv_unicast_packet {
+ struct batadv_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
} __packed;
-struct unicast_frag_packet {
- struct batman_header header;
+struct batadv_unicast_frag_packet {
+ struct batadv_header header;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
uint8_t flags;
uint8_t align;
uint8_t orig[ETH_ALEN];
- uint16_t seqno;
+ __be16 seqno;
} __packed;
-struct bcast_packet {
- struct batman_header header;
+struct batadv_bcast_packet {
+ struct batadv_header header;
uint8_t reserved;
- uint32_t seqno;
+ __be32 seqno;
uint8_t orig[ETH_ALEN];
} __packed;
-struct vis_packet {
- struct batman_header header;
+struct batadv_vis_packet {
+ struct batadv_header header;
uint8_t vis_type; /* which type of vis-participant sent this? */
- uint32_t seqno; /* sequence number */
+ __be32 seqno; /* sequence number */
uint8_t entries; /* number of entries behind this struct */
uint8_t reserved;
uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
@@ -188,11 +188,12 @@ struct vis_packet {
uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
} __packed;
-struct tt_query_packet {
- struct batman_header header;
+struct batadv_tt_query_packet {
+ struct batadv_header header;
/* the flag field is a combination of:
* - TT_REQUEST or TT_RESPONSE
- * - TT_FULL_TABLE */
+ * - TT_FULL_TABLE
+ */
uint8_t flags;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
@@ -200,24 +201,26 @@ struct tt_query_packet {
* if TT_REQUEST: ttvn that triggered the
* request
* if TT_RESPONSE: new ttvn for the src
- * orig_node */
+ * orig_node
+ */
uint8_t ttvn;
/* tt_data field is:
* if TT_REQUEST: crc associated with the
* ttvn
- * if TT_RESPONSE: table_size */
- uint16_t tt_data;
+ * if TT_RESPONSE: table_size
+ */
+ __be16 tt_data;
} __packed;
-struct roam_adv_packet {
- struct batman_header header;
+struct batadv_roam_adv_packet {
+ struct batadv_header header;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
uint8_t client[ETH_ALEN];
} __packed;
-struct tt_change {
+struct batadv_tt_change {
uint8_t flags;
uint8_t addr[ETH_ALEN];
} __packed;
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index fd63951d118d..c8f61e395b74 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,26 +15,26 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
#include "ring_buffer.h"
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
+void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
+ uint8_t value)
{
lq_recv[*lq_index] = value;
- *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
+ *lq_index = (*lq_index + 1) % BATADV_TQ_GLOBAL_WINDOW_SIZE;
}
-uint8_t ring_buffer_avg(const uint8_t lq_recv[])
+uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
{
const uint8_t *ptr;
uint16_t count = 0, i = 0, sum = 0;
ptr = lq_recv;
- while (i < TQ_GLOBAL_WINDOW_SIZE) {
+ while (i < BATADV_TQ_GLOBAL_WINDOW_SIZE) {
if (*ptr != 0) {
count++;
sum += *ptr;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 8b58bd82767d..fda8c17df273 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,13 +15,13 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_RING_BUFFER_H_
#define _NET_BATMAN_ADV_RING_BUFFER_H_
-void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
-uint8_t ring_buffer_avg(const uint8_t lq_recv[]);
+void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index,
+ uint8_t value);
+uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]);
#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 015471d801b4..bc2b88bbea1f 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,19 +29,20 @@
#include "unicast.h"
#include "bridge_loop_avoidance.h"
-static int route_unicast_packet(struct sk_buff *skb,
- struct hard_iface *recv_if);
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
-void slide_own_bcast_window(struct hard_iface *hard_iface)
+void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
unsigned long *word;
uint32_t i;
size_t word_index;
+ uint8_t *w;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -51,49 +50,49 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
spin_lock_bh(&orig_node->ogm_cnt_lock);
- word_index = hard_iface->if_num * NUM_WORDS;
+ word_index = hard_iface->if_num * BATADV_NUM_WORDS;
word = &(orig_node->bcast_own[word_index]);
- bit_get_packet(bat_priv, word, 1, 0);
- orig_node->bcast_own_sum[hard_iface->if_num] =
- bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
+ batadv_bit_get_packet(bat_priv, word, 1, 0);
+ w = &orig_node->bcast_own_sum[hard_iface->if_num];
+ *w = bitmap_weight(word, BATADV_TQ_LOCAL_WINDOW_SIZE);
spin_unlock_bh(&orig_node->ogm_cnt_lock);
}
rcu_read_unlock();
}
}
-static void _update_route(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+static void _batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
- struct neigh_node *curr_router;
+ struct batadv_neigh_node *curr_router;
- curr_router = orig_node_get_router(orig_node);
+ curr_router = batadv_orig_node_get_router(orig_node);
/* route deleted */
if ((curr_router) && (!neigh_node)) {
- bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
- orig_node->orig);
- tt_global_del_orig(bat_priv, orig_node,
- "Deleted route towards originator");
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Deleting route towards: %pM\n", orig_node->orig);
+ batadv_tt_global_del_orig(bat_priv, orig_node,
+ "Deleted route towards originator");
/* route added */
} else if ((!curr_router) && (neigh_node)) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Adding route towards: %pM (via %pM)\n",
- orig_node->orig, neigh_node->addr);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Adding route towards: %pM (via %pM)\n",
+ orig_node->orig, neigh_node->addr);
/* route changed */
} else if (neigh_node && curr_router) {
- bat_dbg(DBG_ROUTES, bat_priv,
- "Changing route towards: %pM (now via %pM - was via %pM)\n",
- orig_node->orig, neigh_node->addr,
- curr_router->addr);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "Changing route towards: %pM (now via %pM - was via %pM)\n",
+ orig_node->orig, neigh_node->addr,
+ curr_router->addr);
}
if (curr_router)
- neigh_node_free_ref(curr_router);
+ batadv_neigh_node_free_ref(curr_router);
/* increase refcount of new best neighbor */
if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
@@ -105,30 +104,31 @@ static void _update_route(struct bat_priv *bat_priv,
/* decrease refcount of previous best neighbor */
if (curr_router)
- neigh_node_free_ref(curr_router);
+ batadv_neigh_node_free_ref(curr_router);
}
-void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
- struct neigh_node *router = NULL;
+ struct batadv_neigh_node *router = NULL;
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (router != neigh_node)
- _update_route(bat_priv, orig_node, neigh_node);
+ _batadv_update_route(bat_priv, orig_node, neigh_node);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* caller must hold the neigh_list_lock */
-void bonding_candidate_del(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
/* this neighbor is not part of our candidate list */
if (list_empty(&neigh_node->bonding_list))
@@ -136,37 +136,36 @@ void bonding_candidate_del(struct orig_node *orig_node,
list_del_rcu(&neigh_node->bonding_list);
INIT_LIST_HEAD(&neigh_node->bonding_list);
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
atomic_dec(&orig_node->bond_candidates);
out:
return;
}
-void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node)
+void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node)
{
struct hlist_node *node;
- struct neigh_node *tmp_neigh_node, *router = NULL;
+ struct batadv_neigh_node *tmp_neigh_node, *router = NULL;
uint8_t interference_candidate = 0;
spin_lock_bh(&orig_node->neigh_list_lock);
/* only consider if it has the same primary address ... */
- if (!compare_eth(orig_node->orig,
- neigh_node->orig_node->primary_addr))
+ if (!batadv_compare_eth(orig_node->orig,
+ neigh_node->orig_node->primary_addr))
goto candidate_del;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto candidate_del;
/* ... and is good enough to be considered */
- if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
+ if (neigh_node->tq_avg < router->tq_avg - BATADV_BONDING_TQ_THRESHOLD)
goto candidate_del;
- /**
- * check if we have another candidate with the same mac address or
+ /* check if we have another candidate with the same mac address or
* interface. If we do, we won't select this candidate because of
* possible interference.
*/
@@ -177,12 +176,14 @@ void bonding_candidate_add(struct orig_node *orig_node,
continue;
/* we only care if the other candidate is even
- * considered as candidate. */
+ * considered as candidate.
+ */
if (list_empty(&tmp_neigh_node->bonding_list))
continue;
if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
- (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
+ (batadv_compare_eth(neigh_node->addr,
+ tmp_neigh_node->addr))) {
interference_candidate = 1;
break;
}
@@ -204,21 +205,22 @@ void bonding_candidate_add(struct orig_node *orig_node,
goto out;
candidate_del:
- bonding_candidate_del(orig_node, neigh_node);
+ batadv_bonding_candidate_del(orig_node, neigh_node);
out:
spin_unlock_bh(&orig_node->neigh_list_lock);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* copy primary address for bonding */
-void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_ogm_packet *batman_ogm_packet)
+void
+batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ const struct batadv_ogm_packet *batman_ogm_packet)
{
- if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
+ if (!(batman_ogm_packet->flags & BATADV_PRIMARIES_FIRST_HOP))
return;
memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
@@ -229,25 +231,26 @@ void bonding_save_primary(const struct orig_node *orig_node,
* 0 if the packet is to be accepted
* 1 if the packet is to be ignored.
*/
-int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
- unsigned long *last_reset)
+int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset)
{
- if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
- (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
- if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
+ if (seq_num_diff <= -BATADV_TQ_LOCAL_WINDOW_SIZE ||
+ seq_num_diff >= BATADV_EXPECTED_SEQNO_RANGE) {
+ if (!batadv_has_timed_out(*last_reset,
+ BATADV_RESET_PROTECTION_MS))
return 1;
*last_reset = jiffies;
- bat_dbg(DBG_BATMAN, bat_priv,
- "old packet received, start protection\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "old packet received, start protection\n");
}
return 0;
}
-bool check_management_packet(struct sk_buff *skb,
- struct hard_iface *hard_iface,
- int header_len)
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len)
{
struct ethhdr *ethhdr;
@@ -276,34 +279,34 @@ bool check_management_packet(struct sk_buff *skb,
return true;
}
-static int recv_my_icmp_packet(struct bat_priv *bat_priv,
- struct sk_buff *skb, size_t icmp_len)
+static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
+ struct sk_buff *skb, size_t icmp_len)
{
- struct hard_iface *primary_if = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_icmp_packet_rr *icmp_packet;
int ret = NET_RX_DROP;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add data to device queue */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
- bat_socket_receive_packet(icmp_packet, icmp_len);
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
+ batadv_socket_receive_packet(icmp_packet, icmp_len);
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* answer echo request (ping) */
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -311,54 +314,54 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = ECHO_REPLY;
- icmp_packet->header.ttl = TTL;
+ icmp_packet->msg_type = BATADV_ECHO_REPLY;
+ icmp_packet->header.ttl = BATADV_TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
- struct sk_buff *skb)
+static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
+ struct sk_buff *skb)
{
- struct hard_iface *primary_if = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- struct icmp_packet *icmp_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ struct batadv_icmp_packet *icmp_packet;
int ret = NET_RX_DROP;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->msg_type != ECHO_REQUEST) {
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
icmp_packet->orig, icmp_packet->dst);
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -366,42 +369,41 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet *)skb->data;
memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
- icmp_packet->msg_type = TTL_EXCEEDED;
- icmp_packet->header.ttl = TTL;
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->header.ttl = BATADV_TTL;
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct icmp_packet_rr *icmp_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_icmp_packet_rr *icmp_packet;
struct ethhdr *ethhdr;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
- int hdr_size = sizeof(struct icmp_packet);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
+ int hdr_size = sizeof(struct batadv_icmp_packet);
int ret = NET_RX_DROP;
- /**
- * we truncate all incoming icmp packets if they don't match our size
- */
- if (skb->len >= sizeof(struct icmp_packet_rr))
- hdr_size = sizeof(struct icmp_packet_rr);
+ /* we truncate all incoming icmp packets if they don't match our size */
+ if (skb->len >= sizeof(struct batadv_icmp_packet_rr))
+ hdr_size = sizeof(struct batadv_icmp_packet_rr);
/* drop packet if it has not necessary minimum size */
if (unlikely(!pskb_may_pull(skb, hdr_size)))
@@ -418,33 +420,33 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* add record route information if not full */
- if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
- (icmp_packet->rr_cur < BAT_RR_LEN)) {
+ if ((hdr_size == sizeof(struct batadv_icmp_packet_rr)) &&
+ (icmp_packet->rr_cur < BATADV_RR_LEN)) {
memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
ethhdr->h_dest, ETH_ALEN);
icmp_packet->rr_cur++;
}
/* packet for me */
- if (is_my_mac(icmp_packet->dst))
- return recv_my_icmp_packet(bat_priv, skb, hdr_size);
+ if (batadv_is_my_mac(icmp_packet->dst))
+ return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);
/* TTL exceeded */
if (icmp_packet->header.ttl < 2)
- return recv_icmp_ttl_exceeded(bat_priv, skb);
+ return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
- orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->dst);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
@@ -452,20 +454,20 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- icmp_packet = (struct icmp_packet_rr *)skb->data;
+ icmp_packet = (struct batadv_icmp_packet_rr *)skb->data;
/* decrement ttl */
icmp_packet->header.ttl--;
/* route it */
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
ret = NET_RX_SUCCESS;
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
@@ -473,12 +475,14 @@ out:
* robin fashion over the remaining interfaces.
*
* This method rotates the bonding list and increases the
- * returned router's refcount. */
-static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
- const struct hard_iface *recv_if)
+ * returned router's refcount.
+ */
+static struct batadv_neigh_node *
+batadv_find_bond_router(struct batadv_orig_node *primary_orig,
+ const struct batadv_hard_iface *recv_if)
{
- struct neigh_node *tmp_neigh_node;
- struct neigh_node *router = NULL, *first_candidate = NULL;
+ struct batadv_neigh_node *tmp_neigh_node;
+ struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -506,10 +510,12 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
goto out;
/* selected should point to the next element
- * after the current router */
+ * after the current router
+ */
spin_lock_bh(&primary_orig->neigh_list_lock);
/* this is a list_move(), which unfortunately
- * does not exist as rcu version */
+ * does not exist as rcu version
+ */
list_del_rcu(&primary_orig->bond_list);
list_add_rcu(&primary_orig->bond_list,
&router->bonding_list);
@@ -524,12 +530,14 @@ out:
* remaining candidates which are not using
* this interface.
*
- * Increases the returned router's refcount */
-static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
- const struct hard_iface *recv_if)
+ * Increases the returned router's refcount
+ */
+static struct batadv_neigh_node *
+batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
+ const struct batadv_hard_iface *recv_if)
{
- struct neigh_node *tmp_neigh_node;
- struct neigh_node *router = NULL, *first_candidate = NULL;
+ struct batadv_neigh_node *tmp_neigh_node;
+ struct batadv_neigh_node *router = NULL, *first_candidate = NULL;
rcu_read_lock();
list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
@@ -545,19 +553,21 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
continue;
/* if we don't have a router yet
- * or this one is better, choose it. */
+ * or this one is better, choose it.
+ */
if ((!router) ||
(tmp_neigh_node->tq_avg > router->tq_avg)) {
/* decrement refcount of
- * previously selected router */
+ * previously selected router
+ */
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
router = tmp_neigh_node;
atomic_inc_not_zero(&router->refcount);
}
- neigh_node_free_ref(tmp_neigh_node);
+ batadv_neigh_node_free_ref(tmp_neigh_node);
}
/* use the first candidate if nothing was found. */
@@ -569,19 +579,22 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
return router;
}
-int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct tt_query_packet *tt_query;
- uint16_t tt_len;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_tt_query_packet *tt_query;
+ uint16_t tt_size;
struct ethhdr *ethhdr;
+ char tt_flag;
+ size_t packet_size;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
+ if (unlikely(!pskb_may_pull(skb,
+ sizeof(struct batadv_tt_query_packet))))
goto out;
/* I could need to modify it */
- if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
+ if (skb_cow(skb, sizeof(struct batadv_tt_query_packet)) < 0)
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -594,47 +607,59 @@ int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
- tt_query = (struct tt_query_packet *)skb->data;
+ tt_query = (struct batadv_tt_query_packet *)skb->data;
- tt_query->tt_data = ntohs(tt_query->tt_data);
+ switch (tt_query->flags & BATADV_TT_QUERY_TYPE_MASK) {
+ case BATADV_TT_REQUEST:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_RX);
- switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
- case TT_REQUEST:
/* If we cannot provide an answer the tt_request is
- * forwarded */
- if (!send_tt_response(bat_priv, tt_query)) {
- bat_dbg(DBG_TT, bat_priv,
- "Routing TT_REQUEST to %pM [%c]\n",
- tt_query->dst,
- (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
- tt_query->tt_data = htons(tt_query->tt_data);
- return route_unicast_packet(skb, recv_if);
+ * forwarded
+ */
+ if (!batadv_send_tt_response(bat_priv, tt_query)) {
+ if (tt_query->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_REQUEST to %pM [%c]\n",
+ tt_query->dst,
+ tt_flag);
+ return batadv_route_unicast_packet(skb, recv_if);
}
break;
- case TT_RESPONSE:
- if (is_my_mac(tt_query->dst)) {
+ case BATADV_TT_RESPONSE:
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX);
+
+ if (batadv_is_my_mac(tt_query->dst)) {
/* packet needs to be linearized to access the TT
- * changes */
+ * changes
+ */
if (skb_linearize(skb) < 0)
goto out;
/* skb_linearize() possibly changed skb->data */
- tt_query = (struct tt_query_packet *)skb->data;
+ tt_query = (struct batadv_tt_query_packet *)skb->data;
- tt_len = tt_query->tt_data * sizeof(struct tt_change);
+ tt_size = batadv_tt_len(ntohs(tt_query->tt_data));
/* Ensure we have all the claimed data */
- if (unlikely(skb_headlen(skb) <
- sizeof(struct tt_query_packet) + tt_len))
+ packet_size = sizeof(struct batadv_tt_query_packet);
+ packet_size += tt_size;
+ if (unlikely(skb_headlen(skb) < packet_size))
goto out;
- handle_tt_response(bat_priv, tt_query);
+ batadv_handle_tt_response(bat_priv, tt_query);
} else {
- bat_dbg(DBG_TT, bat_priv,
- "Routing TT_RESPONSE to %pM [%c]\n",
- tt_query->dst,
- (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
- tt_query->tt_data = htons(tt_query->tt_data);
- return route_unicast_packet(skb, recv_if);
+ if (tt_query->flags & BATADV_TT_FULL_TABLE)
+ tt_flag = 'F';
+ else
+ tt_flag = '.';
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Routing TT_RESPONSE to %pM [%c]\n",
+ tt_query->dst,
+ tt_flag);
+ return batadv_route_unicast_packet(skb, recv_if);
}
break;
}
@@ -644,15 +669,16 @@ out:
return NET_RX_DROP;
}
-int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct roam_adv_packet *roam_adv_packet;
- struct orig_node *orig_node;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_roam_adv_packet *roam_adv_packet;
+ struct batadv_orig_node *orig_node;
struct ethhdr *ethhdr;
/* drop packet if it has not necessary minimum size */
- if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
+ if (unlikely(!pskb_may_pull(skb,
+ sizeof(struct batadv_roam_adv_packet))))
goto out;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -665,35 +691,39 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
if (is_broadcast_ether_addr(ethhdr->h_source))
goto out;
- roam_adv_packet = (struct roam_adv_packet *)skb->data;
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);
+
+ roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data;
- if (!is_my_mac(roam_adv_packet->dst))
- return route_unicast_packet(skb, recv_if);
+ if (!batadv_is_my_mac(roam_adv_packet->dst))
+ return batadv_route_unicast_packet(skb, recv_if);
/* check if it is a backbone gateway. we don't accept
* roaming advertisement from it, as it has the same
* entries as we have.
*/
- if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
goto out;
- orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
+ orig_node = batadv_orig_hash_find(bat_priv, roam_adv_packet->src);
if (!orig_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Received ROAMING_ADV from %pM (client %pM)\n",
- roam_adv_packet->src, roam_adv_packet->client);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received ROAMING_ADV from %pM (client %pM)\n",
+ roam_adv_packet->src, roam_adv_packet->client);
- tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
- atomic_read(&orig_node->last_ttvn) + 1, true, false);
+ batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
+ BATADV_TT_CLIENT_ROAM,
+ atomic_read(&orig_node->last_ttvn) + 1);
/* Roaming phase starts: I have new information but the ttvn has not
* been incremented yet. This flag will make me check all the incoming
- * packets for the correct destination. */
+ * packets for the correct destination.
+ */
bat_priv->tt_poss_change = true;
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
out:
/* returning NET_RX_DROP will make the caller function kfree the skb */
return NET_RX_DROP;
@@ -701,26 +731,30 @@ out:
/* find a suitable router for this originator, and use
* bonding if possible. increases the found neighbors
- * refcount.*/
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct hard_iface *recv_if)
+ * refcount.
+ */
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *recv_if)
{
- struct orig_node *primary_orig_node;
- struct orig_node *router_orig;
- struct neigh_node *router;
+ struct batadv_orig_node *primary_orig_node;
+ struct batadv_orig_node *router_orig;
+ struct batadv_neigh_node *router;
static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
int bonding_enabled;
+ uint8_t *primary_addr;
if (!orig_node)
return NULL;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto err;
/* without bonding, the first node should
- * always choose the default router. */
+ * always choose the default router.
+ */
bonding_enabled = atomic_read(&bat_priv->bonding);
rcu_read_lock();
@@ -732,43 +766,47 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
if ((!recv_if) && (!bonding_enabled))
goto return_router;
+ primary_addr = router_orig->primary_addr;
+
/* if we have something in the primary_addr, we can search
- * for a potential bonding candidate. */
- if (compare_eth(router_orig->primary_addr, zero_mac))
+ * for a potential bonding candidate.
+ */
+ if (batadv_compare_eth(primary_addr, zero_mac))
goto return_router;
/* find the orig_node which has the primary interface. might
- * even be the same as our router_orig in many cases */
-
- if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
+ * even be the same as our router_orig in many cases
+ */
+ if (batadv_compare_eth(primary_addr, router_orig->orig)) {
primary_orig_node = router_orig;
} else {
- primary_orig_node = orig_hash_find(bat_priv,
- router_orig->primary_addr);
+ primary_orig_node = batadv_orig_hash_find(bat_priv,
+ primary_addr);
if (!primary_orig_node)
goto return_router;
- orig_node_free_ref(primary_orig_node);
+ batadv_orig_node_free_ref(primary_orig_node);
}
/* with less than 2 candidates, we can't do any
- * bonding and prefer the original router. */
+ * bonding and prefer the original router.
+ */
if (atomic_read(&primary_orig_node->bond_candidates) < 2)
goto return_router;
/* all nodes between should choose a candidate which
* is is not on the interface where the packet came
- * in. */
-
- neigh_node_free_ref(router);
+ * in.
+ */
+ batadv_neigh_node_free_ref(router);
if (bonding_enabled)
- router = find_bond_router(primary_orig_node, recv_if);
+ router = batadv_find_bond_router(primary_orig_node, recv_if);
else
- router = find_ifalter_router(primary_orig_node, recv_if);
+ router = batadv_find_ifalter_router(primary_orig_node, recv_if);
return_router:
- if (router && router->if_incoming->if_status != IF_ACTIVE)
+ if (router && router->if_incoming->if_status != BATADV_IF_ACTIVE)
goto err_unlock;
rcu_read_unlock();
@@ -777,11 +815,11 @@ err_unlock:
rcu_read_unlock();
err:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
return NULL;
}
-static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
+static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
{
struct ethhdr *ethhdr;
@@ -800,23 +838,24 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
return -1;
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
return -1;
return 0;
}
-static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+static int batadv_route_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
int ret = NET_RX_DROP;
struct sk_buff *new_skb;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
if (unicast_packet->header.ttl < 2) {
@@ -826,13 +865,13 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
/* get routing information */
- orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+ orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->dest);
if (!orig_node)
goto out;
/* find_router() increases neigh_nodes refcount if found. */
- neigh_node = find_router(bat_priv, orig_node, recv_if);
+ neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
if (!neigh_node)
goto out;
@@ -841,20 +880,22 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (skb_cow(skb, ETH_HLEN) < 0)
goto out;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- if (unicast_packet->header.packet_type == BAT_UNICAST &&
+ if (unicast_packet->header.packet_type == BATADV_UNICAST &&
atomic_read(&bat_priv->fragmentation) &&
skb->len > neigh_node->if_incoming->net_dev->mtu) {
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
- if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
- frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
+ if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
+ batadv_frag_can_reassemble(skb,
+ neigh_node->if_incoming->net_dev->mtu)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
goto out;
@@ -866,141 +907,153 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
skb = new_skb;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
}
/* decrement ttl */
unicast_packet->header.ttl--;
+ /* Update stats counter */
+ batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
+ batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
+ skb->len + ETH_HLEN);
+
/* route it */
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = NET_RX_SUCCESS;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-static int check_unicast_ttvn(struct bat_priv *bat_priv,
- struct sk_buff *skb) {
+static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
+ struct sk_buff *skb) {
uint8_t curr_ttvn;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
struct ethhdr *ethhdr;
- struct hard_iface *primary_if;
- struct unicast_packet *unicast_packet;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_unicast_packet *unicast_packet;
bool tt_poss_change;
+ int is_old_ttvn;
/* I could need to modify it */
- if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
+ if (skb_cow(skb, sizeof(struct batadv_unicast_packet)) < 0)
return 0;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- if (is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(unicast_packet->dest)) {
tt_poss_change = bat_priv->tt_poss_change;
curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
} else {
- orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
+ orig_node = batadv_orig_hash_find(bat_priv,
+ unicast_packet->dest);
if (!orig_node)
return 0;
curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
tt_poss_change = orig_node->tt_poss_change;
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
/* Check whether I have to reroute the packet */
- if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
+ is_old_ttvn = batadv_seq_before(unicast_packet->ttvn, curr_ttvn);
+ if (is_old_ttvn || tt_poss_change) {
/* check if there is enough data before accessing it */
- if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
+ if (pskb_may_pull(skb, sizeof(struct batadv_unicast_packet) +
ETH_HLEN) < 0)
return 0;
- ethhdr = (struct ethhdr *)(skb->data +
- sizeof(struct unicast_packet));
+ ethhdr = (struct ethhdr *)(skb->data + sizeof(*unicast_packet));
/* we don't have an updated route for this client, so we should
* not try to reroute the packet!!
*/
- if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ if (batadv_tt_global_client_is_roaming(bat_priv,
+ ethhdr->h_dest))
return 1;
- orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
+ orig_node = batadv_transtable_search(bat_priv, NULL,
+ ethhdr->h_dest);
if (!orig_node) {
- if (!is_my_client(bat_priv, ethhdr->h_dest))
+ if (!batadv_is_my_client(bat_priv, ethhdr->h_dest))
return 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
return 0;
memcpy(unicast_packet->dest,
primary_if->net_dev->dev_addr, ETH_ALEN);
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
} else {
memcpy(unicast_packet->dest, orig_node->orig,
ETH_ALEN);
curr_ttvn = (uint8_t)
atomic_read(&orig_node->last_ttvn);
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
- bat_dbg(DBG_ROUTES, bat_priv,
- "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
- unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
- unicast_packet->dest);
+ batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+ "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
+ unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
+ unicast_packet->dest);
unicast_packet->ttvn = curr_ttvn;
}
return 1;
}
-int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct unicast_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_packet *unicast_packet;
int hdr_size = sizeof(*unicast_packet);
- if (check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
- if (!check_unicast_ttvn(bat_priv, skb))
+ if (!batadv_check_unicast_ttvn(bat_priv, skb))
return NET_RX_DROP;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
- interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+ if (batadv_is_my_mac(unicast_packet->dest)) {
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if,
+ hdr_size);
return NET_RX_SUCCESS;
}
- return route_unicast_packet(skb, recv_if);
+ return batadv_route_unicast_packet(skb, recv_if);
}
-int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct unicast_frag_packet *unicast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_unicast_frag_packet *unicast_packet;
int hdr_size = sizeof(*unicast_packet);
struct sk_buff *new_skb = NULL;
int ret;
- if (check_unicast_packet(skb, hdr_size) < 0)
+ if (batadv_check_unicast_packet(skb, hdr_size) < 0)
return NET_RX_DROP;
- if (!check_unicast_ttvn(bat_priv, skb))
+ if (!batadv_check_unicast_ttvn(bat_priv, skb))
return NET_RX_DROP;
- unicast_packet = (struct unicast_frag_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
/* packet for me */
- if (is_my_mac(unicast_packet->dest)) {
+ if (batadv_is_my_mac(unicast_packet->dest)) {
- ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
+ ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
return NET_RX_DROP;
@@ -1009,20 +1062,21 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (!new_skb)
return NET_RX_SUCCESS;
- interface_rx(recv_if->soft_iface, new_skb, recv_if,
- sizeof(struct unicast_packet));
+ batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if,
+ sizeof(struct batadv_unicast_packet));
return NET_RX_SUCCESS;
}
- return route_unicast_packet(skb, recv_if);
+ return batadv_route_unicast_packet(skb, recv_if);
}
-int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
- struct orig_node *orig_node = NULL;
- struct bcast_packet *bcast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_bcast_packet *bcast_packet;
struct ethhdr *ethhdr;
int hdr_size = sizeof(*bcast_packet);
int ret = NET_RX_DROP;
@@ -1043,19 +1097,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
goto out;
/* ignore broadcasts sent by myself */
- if (is_my_mac(ethhdr->h_source))
+ if (batadv_is_my_mac(ethhdr->h_source))
goto out;
- bcast_packet = (struct bcast_packet *)skb->data;
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
/* ignore broadcasts originated by myself */
- if (is_my_mac(bcast_packet->orig))
+ if (batadv_is_my_mac(bcast_packet->orig))
goto out;
if (bcast_packet->header.ttl < 2)
goto out;
- orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
if (!orig_node)
goto out;
@@ -1063,39 +1117,40 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
spin_lock_bh(&orig_node->bcast_seqno_lock);
/* check whether the packet is a duplicate */
- if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
- ntohl(bcast_packet->seqno)))
+ if (batadv_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
+ ntohl(bcast_packet->seqno)))
goto spin_unlock;
seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
/* check whether the packet is old and the host just restarted. */
- if (window_protected(bat_priv, seq_diff,
- &orig_node->bcast_seqno_reset))
+ if (batadv_window_protected(bat_priv, seq_diff,
+ &orig_node->bcast_seqno_reset))
goto spin_unlock;
/* mark broadcast in flood history, update window position
- * if required. */
- if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
+ * if required.
+ */
+ if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
spin_unlock_bh(&orig_node->bcast_seqno_lock);
/* check whether this has been sent by another originator before */
- if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
+ if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
goto out;
/* rebroadcast packet */
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* don't hand the broadcast up if it is from an originator
* from the same backbone.
*/
- if (bla_is_backbone_gw(skb, orig_node, hdr_size))
+ if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
goto out;
/* broadcast for me */
- interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
+ batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
ret = NET_RX_SUCCESS;
goto out;
@@ -1103,15 +1158,16 @@ spin_unlock:
spin_unlock_bh(&orig_node->bcast_seqno_lock);
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
+int batadv_recv_vis_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if)
{
- struct vis_packet *vis_packet;
+ struct batadv_vis_packet *vis_packet;
struct ethhdr *ethhdr;
- struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
int hdr_size = sizeof(*vis_packet);
/* keep skb linear */
@@ -1121,29 +1177,29 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
if (unlikely(!pskb_may_pull(skb, hdr_size)))
return NET_RX_DROP;
- vis_packet = (struct vis_packet *)skb->data;
+ vis_packet = (struct batadv_vis_packet *)skb->data;
ethhdr = (struct ethhdr *)skb_mac_header(skb);
/* not for me */
- if (!is_my_mac(ethhdr->h_dest))
+ if (!batadv_is_my_mac(ethhdr->h_dest))
return NET_RX_DROP;
/* ignore own packets */
- if (is_my_mac(vis_packet->vis_orig))
+ if (batadv_is_my_mac(vis_packet->vis_orig))
return NET_RX_DROP;
- if (is_my_mac(vis_packet->sender_orig))
+ if (batadv_is_my_mac(vis_packet->sender_orig))
return NET_RX_DROP;
switch (vis_packet->vis_type) {
- case VIS_TYPE_SERVER_SYNC:
- receive_server_sync_packet(bat_priv, vis_packet,
- skb_headlen(skb));
+ case BATADV_VIS_TYPE_SERVER_SYNC:
+ batadv_receive_server_sync_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
- case VIS_TYPE_CLIENT_UPDATE:
- receive_client_update_packet(bat_priv, vis_packet,
- skb_headlen(skb));
+ case BATADV_VIS_TYPE_CLIENT_UPDATE:
+ batadv_receive_client_update_packet(bat_priv, vis_packet,
+ skb_headlen(skb));
break;
default: /* ignore unknown packet */
@@ -1151,6 +1207,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
}
/* We take a copy of the data in the packet, so we should
- always free the skbuf. */
+ * always free the skbuf.
+ */
return NET_RX_DROP;
}
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index d6bbbebb6567..9262279ea667 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,36 +15,45 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_ROUTING_H_
#define _NET_BATMAN_ADV_ROUTING_H_
-void slide_own_bcast_window(struct hard_iface *hard_iface);
-bool check_management_packet(struct sk_buff *skb,
- struct hard_iface *hard_iface,
- int header_len);
-void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
-int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
-struct neigh_node *find_router(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const struct hard_iface *recv_if);
-void bonding_candidate_del(struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-void bonding_candidate_add(struct orig_node *orig_node,
- struct neigh_node *neigh_node);
-void bonding_save_primary(const struct orig_node *orig_node,
- struct orig_node *orig_neigh_node,
- const struct batman_ogm_packet *batman_ogm_packet);
-int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
- unsigned long *last_reset);
+void batadv_slide_own_bcast_window(struct batadv_hard_iface *hard_iface);
+bool batadv_check_management_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ int header_len);
+void batadv_update_route(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+int batadv_recv_icmp_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_unicast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_bcast_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_vis_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_tt_query(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+int batadv_recv_roam_adv(struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if);
+struct batadv_neigh_node *
+batadv_find_router(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *recv_if);
+void batadv_bonding_candidate_del(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
+ struct batadv_neigh_node *neigh_node);
+void batadv_bonding_save_primary(const struct batadv_orig_node *orig_node,
+ struct batadv_orig_node *orig_neigh_node,
+ const struct batadv_ogm_packet
+ *batman_ogm_packet);
+int batadv_window_protected(struct batadv_priv *bat_priv, int32_t seq_num_diff,
+ unsigned long *last_reset);
#endif /* _NET_BATMAN_ADV_ROUTING_H_ */
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index f47299f22c68..3b4b2daa3b3e 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -29,16 +27,18 @@
#include "gateway_common.h"
#include "originator.h"
-static void send_outstanding_bcast_packet(struct work_struct *work);
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
/* send out an already prepared packet to the given address via the
- * specified batman interface */
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr)
+ * specified batman interface
+ */
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t *dst_addr)
{
struct ethhdr *ethhdr;
- if (hard_iface->if_status != IF_ACTIVE)
+ if (hard_iface->if_status != BATADV_IF_ACTIVE)
goto send_skb_err;
if (unlikely(!hard_iface->net_dev))
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
}
/* push to the ethernet header. */
- if (my_skb_head_push(skb, ETH_HLEN) < 0)
+ if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
goto send_skb_err;
skb_reset_mac_header(skb);
@@ -59,129 +59,57 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
ethhdr = (struct ethhdr *)skb_mac_header(skb);
memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
- ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
+ ethhdr->h_proto = __constant_htons(BATADV_ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
skb->priority = TC_PRIO_CONTROL;
- skb->protocol = __constant_htons(ETH_P_BATMAN);
+ skb->protocol = __constant_htons(BATADV_ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
/* dev_queue_xmit() returns a negative result on error. However on
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
- * (which is > 0). This will not be treated as an error. */
-
+ * (which is > 0). This will not be treated as an error.
+ */
return dev_queue_xmit(skb);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
}
-static void realloc_packet_buffer(struct hard_iface *hard_iface,
- int new_len)
+void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
{
- unsigned char *new_buff;
-
- new_buff = kmalloc(new_len, GFP_ATOMIC);
-
- /* keep old buffer if kmalloc should fail */
- if (new_buff) {
- memcpy(new_buff, hard_iface->packet_buff,
- BATMAN_OGM_HLEN);
-
- kfree(hard_iface->packet_buff);
- hard_iface->packet_buff = new_buff;
- hard_iface->packet_len = new_len;
- }
-}
+ struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
-/* when calling this function (hard_iface == primary_if) has to be true */
-static int prepare_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
-{
- int new_len;
-
- new_len = BATMAN_OGM_HLEN +
- tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
-
- /* if we have too many changes for one packet don't send any
- * and wait for the tt table request which will be fragmented */
- if (new_len > hard_iface->soft_iface->mtu)
- new_len = BATMAN_OGM_HLEN;
-
- realloc_packet_buffer(hard_iface, new_len);
-
- atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
-
- /* reset the sending counter */
- atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
-
- return tt_changes_fill_buffer(bat_priv,
- hard_iface->packet_buff + BATMAN_OGM_HLEN,
- hard_iface->packet_len - BATMAN_OGM_HLEN);
-}
-
-static int reset_packet_buffer(struct bat_priv *bat_priv,
- struct hard_iface *hard_iface)
-{
- realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
- return 0;
-}
-
-void schedule_bat_ogm(struct hard_iface *hard_iface)
-{
- struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct hard_iface *primary_if;
- int tt_num_changes = -1;
-
- if ((hard_iface->if_status == IF_NOT_IN_USE) ||
- (hard_iface->if_status == IF_TO_BE_REMOVED))
+ if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
+ (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
return;
- /**
- * the interface gets activated here to avoid race conditions between
+ /* the interface gets activated here to avoid race conditions between
* the moment of activating the interface in
* hardif_activate_interface() where the originator mac is set and
* outdated packets (especially uninitialized mac addresses) in the
* packet queue
*/
- if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
- hard_iface->if_status = IF_ACTIVE;
-
- primary_if = primary_if_get_selected(bat_priv);
-
- if (hard_iface == primary_if) {
- /* if at least one change happened */
- if (atomic_read(&bat_priv->tt_local_changes) > 0) {
- tt_commit_changes(bat_priv);
- tt_num_changes = prepare_packet_buffer(bat_priv,
- hard_iface);
- }
-
- /* if the changes have been sent often enough */
- if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
- tt_num_changes = reset_packet_buffer(bat_priv,
- hard_iface);
- }
-
- if (primary_if)
- hardif_free_ref(primary_if);
+ if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+ hard_iface->if_status = BATADV_IF_ACTIVE;
- bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface, tt_num_changes);
+ bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
}
-static void forw_packet_free(struct forw_packet *forw_packet)
+static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
{
if (forw_packet->skb)
kfree_skb(forw_packet->skb);
if (forw_packet->if_incoming)
- hardif_free_ref(forw_packet->if_incoming);
+ batadv_hardif_free_ref(forw_packet->if_incoming);
kfree(forw_packet);
}
-static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
- struct forw_packet *forw_packet,
- unsigned long send_time)
+static void
+_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ struct batadv_forw_packet *forw_packet,
+ unsigned long send_time)
{
INIT_HLIST_NODE(&forw_packet->list);
@@ -192,8 +120,8 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
/* start timer for this packet */
INIT_DELAYED_WORK(&forw_packet->delayed_work,
- send_outstanding_bcast_packet);
- queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
+ batadv_send_outstanding_bcast_packet);
+ queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
send_time);
}
@@ -204,21 +132,24 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
* errors.
*
* The skb is not consumed, so the caller should make sure that the
- * skb is freed. */
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay)
+ * skb is freed.
+ */
+int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay)
{
- struct hard_iface *primary_if = NULL;
- struct forw_packet *forw_packet;
- struct bcast_packet *bcast_packet;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_forw_packet *forw_packet;
+ struct batadv_bcast_packet *bcast_packet;
struct sk_buff *newskb;
- if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
- bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
+ if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "bcast packet queue full\n");
goto out;
}
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out_and_inc;
@@ -232,7 +163,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
goto packet_free;
/* as we have a copy now, it is safe to decrease the TTL */
- bcast_packet = (struct bcast_packet *)newskb->data;
+ bcast_packet = (struct batadv_bcast_packet *)newskb->data;
bcast_packet->header.ttl--;
skb_reset_mac_header(newskb);
@@ -243,7 +174,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv,
/* how often did we send the bcast packet ? */
forw_packet->num_packets = 0;
- _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
+ _batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
return NETDEV_TX_OK;
packet_free:
@@ -252,38 +183,43 @@ out_and_inc:
atomic_inc(&bat_priv->bcast_queue_left);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NETDEV_TX_BUSY;
}
-static void send_outstanding_bcast_packet(struct work_struct *work)
+static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
{
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
+ struct batadv_forw_packet *forw_packet;
struct sk_buff *skb1;
- struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct net_device *soft_iface;
+ struct batadv_priv *bat_priv;
+
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
+ soft_iface = forw_packet->if_incoming->soft_iface;
+ bat_priv = netdev_priv(soft_iface);
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
/* rebroadcast packet */
rcu_read_lock();
- list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
+ list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
if (hard_iface->soft_iface != soft_iface)
continue;
/* send a copy of the saved skb */
skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
if (skb1)
- send_skb_packet(skb1, hard_iface, broadcast_addr);
+ batadv_send_skb_packet(skb1, hard_iface,
+ batadv_broadcast_addr);
}
rcu_read_unlock();
@@ -291,72 +227,72 @@ static void send_outstanding_bcast_packet(struct work_struct *work)
/* if we still have some more bcasts to send */
if (forw_packet->num_packets < 3) {
- _add_bcast_packet_to_list(bat_priv, forw_packet,
- msecs_to_jiffies(5));
+ _batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
+ msecs_to_jiffies(5));
return;
}
out:
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
atomic_inc(&bat_priv->bcast_queue_left);
}
-void send_outstanding_bat_ogm_packet(struct work_struct *work)
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct forw_packet *forw_packet =
- container_of(delayed_work, struct forw_packet, delayed_work);
- struct bat_priv *bat_priv;
+ struct batadv_forw_packet *forw_packet;
+ struct batadv_priv *bat_priv;
+ forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+ delayed_work);
bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_del(&forw_packet->list);
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
goto out;
bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
- /**
- * we have to have at least one packet in the queue
+ /* we have to have at least one packet in the queue
* to determine the queues wake up time unless we are
* shutting down
*/
if (forw_packet->own)
- schedule_bat_ogm(forw_packet->if_incoming);
+ batadv_schedule_bat_ogm(forw_packet->if_incoming);
out:
/* don't count own packet */
if (!forw_packet->own)
atomic_inc(&bat_priv->batman_queue_left);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface)
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface)
{
- struct forw_packet *forw_packet;
+ struct batadv_forw_packet *forw_packet;
struct hlist_node *tmp_node, *safe_tmp_node;
bool pending;
if (hard_iface)
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets(): %s\n",
- hard_iface->net_dev->name);
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets(): %s\n",
+ hard_iface->net_dev->name);
else
- bat_dbg(DBG_BATMAN, bat_priv,
- "purge_outstanding_packets()\n");
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "purge_outstanding_packets()\n");
/* free bcast list */
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bcast_list, list) {
- /**
- * if purge_outstanding_packets() was called with an argument
+ /* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -365,8 +301,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
- /**
- * send_outstanding_bcast_packet() will lock the list to
+ /* batadv_send_outstanding_bcast_packet() will lock the list to
* delete the item from the list
*/
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -374,7 +309,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
}
spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
@@ -384,8 +319,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bat_list, list) {
- /**
- * if purge_outstanding_packets() was called with an argument
+ /* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
if ((hard_iface) &&
@@ -394,8 +328,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
- /**
- * send_outstanding_bat_packet() will lock the list to
+ /* send_outstanding_bat_packet() will lock the list to
* delete the item from the list
*/
pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
@@ -403,7 +336,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
if (pending) {
hlist_del(&forw_packet->list);
- forw_packet_free(forw_packet);
+ batadv_forw_packet_free(forw_packet);
}
}
spin_unlock_bh(&bat_priv->forw_bat_list_lock);
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 824ef06f9b01..643329b787ed 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,19 +15,21 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_SEND_H_
#define _NET_BATMAN_ADV_SEND_H_
-int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
- const uint8_t *dst_addr);
-void schedule_bat_ogm(struct hard_iface *hard_iface);
-int add_bcast_packet_to_list(struct bat_priv *bat_priv,
- const struct sk_buff *skb, unsigned long delay);
-void send_outstanding_bat_ogm_packet(struct work_struct *work);
-void purge_outstanding_packets(struct bat_priv *bat_priv,
- const struct hard_iface *hard_iface);
+int batadv_send_skb_packet(struct sk_buff *skb,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t *dst_addr);
+void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface);
+int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
+ const struct sk_buff *skb,
+ unsigned long delay);
+void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work);
+void
+batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
+ const struct batadv_hard_iface *hard_iface);
#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6e2530b02043..109ea2aae96c 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -24,12 +22,12 @@
#include "hard-interface.h"
#include "routing.h"
#include "send.h"
-#include "bat_debugfs.h"
+#include "debugfs.h"
#include "translation-table.h"
#include "hash.h"
#include "gateway_common.h"
#include "gateway_client.h"
-#include "bat_sysfs.h"
+#include "sysfs.h"
#include "originator.h"
#include <linux/slab.h>
#include <linux/ethtool.h>
@@ -39,27 +37,33 @@
#include "bridge_loop_avoidance.h"
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info);
-static u32 bat_get_msglevel(struct net_device *dev);
-static void bat_set_msglevel(struct net_device *dev, u32 value);
-static u32 bat_get_link(struct net_device *dev);
-
-static const struct ethtool_ops bat_ethtool_ops = {
- .get_settings = bat_get_settings,
- .get_drvinfo = bat_get_drvinfo,
- .get_msglevel = bat_get_msglevel,
- .set_msglevel = bat_set_msglevel,
- .get_link = bat_get_link,
+static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static void batadv_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info);
+static u32 batadv_get_msglevel(struct net_device *dev);
+static void batadv_set_msglevel(struct net_device *dev, u32 value);
+static u32 batadv_get_link(struct net_device *dev);
+static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data);
+static void batadv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data);
+static int batadv_get_sset_count(struct net_device *dev, int stringset);
+
+static const struct ethtool_ops batadv_ethtool_ops = {
+ .get_settings = batadv_get_settings,
+ .get_drvinfo = batadv_get_drvinfo,
+ .get_msglevel = batadv_get_msglevel,
+ .set_msglevel = batadv_set_msglevel,
+ .get_link = batadv_get_link,
+ .get_strings = batadv_get_strings,
+ .get_ethtool_stats = batadv_get_ethtool_stats,
+ .get_sset_count = batadv_get_sset_count,
};
-int my_skb_head_push(struct sk_buff *skb, unsigned int len)
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len)
{
int result;
- /**
- * TODO: We must check if we can release all references to non-payload
+ /* TODO: We must check if we can release all references to non-payload
* data using skb_header_release in our skbs to allow skb_cow_header to
* work optimally. This means that those skbs are not allowed to read
* or write any data which is before the current position of skb->data
@@ -74,37 +78,37 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
return 0;
}
-static int interface_open(struct net_device *dev)
+static int batadv_interface_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
-static int interface_release(struct net_device *dev)
+static int batadv_interface_release(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
-static struct net_device_stats *interface_stats(struct net_device *dev)
+static struct net_device_stats *batadv_interface_stats(struct net_device *dev)
{
- struct bat_priv *bat_priv = netdev_priv(dev);
+ struct batadv_priv *bat_priv = netdev_priv(dev);
return &bat_priv->stats;
}
-static int interface_set_mac_addr(struct net_device *dev, void *p)
+static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
{
- struct bat_priv *bat_priv = netdev_priv(dev);
+ struct batadv_priv *bat_priv = netdev_priv(dev);
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
/* only modify transtable if it has been initialized before */
- if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
- tt_local_remove(bat_priv, dev->dev_addr,
- "mac address changed", false);
- tt_local_add(dev, addr->sa_data, NULL_IFINDEX);
+ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
+ batadv_tt_local_remove(bat_priv, dev->dev_addr,
+ "mac address changed", false);
+ batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
}
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -112,10 +116,10 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static int interface_change_mtu(struct net_device *dev, int new_mtu)
+static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu)
{
/* check ranges */
- if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev)))
+ if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev)))
return -EINVAL;
dev->mtu = new_mtu;
@@ -123,13 +127,15 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
+static int batadv_interface_tx(struct sk_buff *skb,
+ struct net_device *soft_iface)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct hard_iface *primary_if = NULL;
- struct bcast_packet *bcast_packet;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_hard_iface *primary_if = NULL;
+ struct batadv_bcast_packet *bcast_packet;
struct vlan_ethhdr *vhdr;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
0x00};
unsigned int header_len = 0;
@@ -137,7 +143,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
short vid __maybe_unused = -1;
bool do_bcast = false;
- if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
+ if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
goto dropped;
soft_iface->trans_start = jiffies;
@@ -147,45 +153,47 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
+ if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
/* fall through */
- case ETH_P_BATMAN:
+ case BATADV_ETH_P_BATMAN:
goto dropped;
}
- if (bla_tx(bat_priv, skb, vid))
+ if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
/* Register the client MAC in the transtable */
- tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+ batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
*/
- if (compare_eth(ethhdr->h_dest, stp_addr))
+ if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
goto dropped;
if (is_multicast_ether_addr(ethhdr->h_dest)) {
do_bcast = true;
switch (atomic_read(&bat_priv->gw_mode)) {
- case GW_MODE_SERVER:
+ case BATADV_GW_MODE_SERVER:
/* gateway servers should not send dhcp
- * requests into the mesh */
- ret = gw_is_dhcp_target(skb, &header_len);
+ * requests into the mesh
+ */
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (ret)
goto dropped;
break;
- case GW_MODE_CLIENT:
+ case BATADV_GW_MODE_CLIENT:
/* gateway clients should send dhcp requests
- * via unicast to their gateway */
- ret = gw_is_dhcp_target(skb, &header_len);
+ * via unicast to their gateway
+ */
+ ret = batadv_gw_is_dhcp_target(skb, &header_len);
if (ret)
do_bcast = false;
break;
- case GW_MODE_OFF:
+ case BATADV_GW_MODE_OFF:
default:
break;
}
@@ -193,22 +201,24 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
/* ethernet packet should be broadcasted */
if (do_bcast) {
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
- if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
+ if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
goto dropped;
- bcast_packet = (struct bcast_packet *)skb->data;
- bcast_packet->header.version = COMPAT_VERSION;
- bcast_packet->header.ttl = TTL;
+ bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ bcast_packet->header.version = BATADV_COMPAT_VERSION;
+ bcast_packet->header.ttl = BATADV_TTL;
/* batman packet type: broadcast */
- bcast_packet->header.packet_type = BAT_BCAST;
+ bcast_packet->header.packet_type = BATADV_BCAST;
+ bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
- * this mac is known throughout the mesh */
+ * this mac is known throughout the mesh
+ */
memcpy(bcast_packet->orig,
primary_if->net_dev->dev_addr, ETH_ALEN);
@@ -216,21 +226,22 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
bcast_packet->seqno =
htonl(atomic_inc_return(&bat_priv->bcast_seqno));
- add_bcast_packet_to_list(bat_priv, skb, 1);
+ batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
/* a copy is stored in the bcast list, therefore removing
- * the original skb. */
+ * the original skb.
+ */
kfree_skb(skb);
/* unicast packet */
} else {
- if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) {
- ret = gw_out_of_range(bat_priv, skb, ethhdr);
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
+ ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
if (ret)
goto dropped;
}
- ret = unicast_send_skb(skb, bat_priv);
+ ret = batadv_unicast_send_skb(skb, bat_priv);
if (ret != 0)
goto dropped_freed;
}
@@ -245,18 +256,23 @@ dropped_freed:
bat_priv->stats.tx_dropped++;
end:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return NETDEV_TX_OK;
}
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, struct hard_iface *recv_if,
- int hdr_size)
+void batadv_interface_rx(struct net_device *soft_iface,
+ struct sk_buff *skb, struct batadv_hard_iface *recv_if,
+ int hdr_size)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
struct ethhdr *ethhdr;
struct vlan_ethhdr *vhdr;
+ struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
short vid __maybe_unused = -1;
+ __be16 ethertype = __constant_htons(BATADV_ETH_P_BATMAN);
+ bool is_bcast;
+
+ is_bcast = (batadv_header->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -272,11 +288,11 @@ void interface_rx(struct net_device *soft_iface,
vhdr = (struct vlan_ethhdr *)skb->data;
vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
- if (ntohs(vhdr->h_vlan_encapsulated_proto) != ETH_P_BATMAN)
+ if (vhdr->h_vlan_encapsulated_proto != ethertype)
break;
/* fall through */
- case ETH_P_BATMAN:
+ case BATADV_ETH_P_BATMAN:
goto dropped;
}
@@ -287,22 +303,23 @@ void interface_rx(struct net_device *soft_iface,
/* should not be necessary anymore as we use skb_pull_rcsum()
* TODO: please verify this and remove this TODO
- * -- Dec 21st 2009, Simon Wunderlich */
+ * -- Dec 21st 2009, Simon Wunderlich
+ */
-/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
+ /* skb->ip_summed = CHECKSUM_UNNECESSARY; */
bat_priv->stats.rx_packets++;
bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
soft_iface->last_rx = jiffies;
- if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
+ if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
goto dropped;
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
- if (bla_rx(bat_priv, skb, vid))
+ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
goto out;
netif_rx(skb);
@@ -314,49 +331,50 @@ out:
return;
}
-static const struct net_device_ops bat_netdev_ops = {
- .ndo_open = interface_open,
- .ndo_stop = interface_release,
- .ndo_get_stats = interface_stats,
- .ndo_set_mac_address = interface_set_mac_addr,
- .ndo_change_mtu = interface_change_mtu,
- .ndo_start_xmit = interface_tx,
+static const struct net_device_ops batadv_netdev_ops = {
+ .ndo_open = batadv_interface_open,
+ .ndo_stop = batadv_interface_release,
+ .ndo_get_stats = batadv_interface_stats,
+ .ndo_set_mac_address = batadv_interface_set_mac_addr,
+ .ndo_change_mtu = batadv_interface_change_mtu,
+ .ndo_start_xmit = batadv_interface_tx,
.ndo_validate_addr = eth_validate_addr
};
-static void interface_setup(struct net_device *dev)
+static void batadv_interface_setup(struct net_device *dev)
{
- struct bat_priv *priv = netdev_priv(dev);
+ struct batadv_priv *priv = netdev_priv(dev);
ether_setup(dev);
- dev->netdev_ops = &bat_netdev_ops;
+ dev->netdev_ops = &batadv_netdev_ops;
dev->destructor = free_netdev;
dev->tx_queue_len = 0;
- /**
- * can't call min_mtu, because the needed variables
+ /* can't call min_mtu, because the needed variables
* have not been initialized yet
*/
dev->mtu = ETH_DATA_LEN;
/* reserve more space in the skbuff for our header */
- dev->hard_header_len = BAT_HEADER_LEN;
+ dev->hard_header_len = BATADV_HEADER_LEN;
/* generate random address */
eth_hw_addr_random(dev);
- SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
memset(priv, 0, sizeof(*priv));
}
-struct net_device *softif_create(const char *name)
+struct net_device *batadv_softif_create(const char *name)
{
struct net_device *soft_iface;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
int ret;
+ size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
- soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
+ soft_iface = alloc_netdev(sizeof(*bat_priv), name,
+ batadv_interface_setup);
if (!soft_iface)
goto out;
@@ -374,18 +392,18 @@ struct net_device *softif_create(const char *name)
atomic_set(&bat_priv->bonding, 0);
atomic_set(&bat_priv->bridge_loop_avoidance, 0);
atomic_set(&bat_priv->ap_isolation, 0);
- atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
- atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
+ atomic_set(&bat_priv->vis_mode, BATADV_VIS_TYPE_CLIENT_UPDATE);
+ atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
atomic_set(&bat_priv->gw_sel_class, 20);
atomic_set(&bat_priv->gw_bandwidth, 41);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->hop_penalty, 30);
atomic_set(&bat_priv->log_level, 0);
atomic_set(&bat_priv->fragmentation, 1);
- atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
- atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
+ atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
+ atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
- atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
atomic_set(&bat_priv->bcast_seqno, 1);
atomic_set(&bat_priv->ttvn, 0);
atomic_set(&bat_priv->tt_local_changes, 0);
@@ -399,28 +417,34 @@ struct net_device *softif_create(const char *name)
bat_priv->primary_if = NULL;
bat_priv->num_ifaces = 0;
- ret = bat_algo_select(bat_priv, bat_routing_algo);
- if (ret < 0)
+ bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
+ if (!bat_priv->bat_counters)
goto unreg_soft_iface;
- ret = sysfs_add_meshif(soft_iface);
+ ret = batadv_algo_select(bat_priv, batadv_routing_algo);
if (ret < 0)
- goto unreg_soft_iface;
+ goto free_bat_counters;
- ret = debugfs_add_meshif(soft_iface);
+ ret = batadv_sysfs_add_meshif(soft_iface);
+ if (ret < 0)
+ goto free_bat_counters;
+
+ ret = batadv_debugfs_add_meshif(soft_iface);
if (ret < 0)
goto unreg_sysfs;
- ret = mesh_init(soft_iface);
+ ret = batadv_mesh_init(soft_iface);
if (ret < 0)
goto unreg_debugfs;
return soft_iface;
unreg_debugfs:
- debugfs_del_meshif(soft_iface);
+ batadv_debugfs_del_meshif(soft_iface);
unreg_sysfs:
- sysfs_del_meshif(soft_iface);
+ batadv_sysfs_del_meshif(soft_iface);
+free_bat_counters:
+ free_percpu(bat_priv->bat_counters);
unreg_soft_iface:
unregister_netdevice(soft_iface);
return NULL;
@@ -431,24 +455,24 @@ out:
return NULL;
}
-void softif_destroy(struct net_device *soft_iface)
+void batadv_softif_destroy(struct net_device *soft_iface)
{
- debugfs_del_meshif(soft_iface);
- sysfs_del_meshif(soft_iface);
- mesh_free(soft_iface);
+ batadv_debugfs_del_meshif(soft_iface);
+ batadv_sysfs_del_meshif(soft_iface);
+ batadv_mesh_free(soft_iface);
unregister_netdevice(soft_iface);
}
-int softif_is_valid(const struct net_device *net_dev)
+int batadv_softif_is_valid(const struct net_device *net_dev)
{
- if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
+ if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
return 1;
return 0;
}
/* ethtool */
-static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
@@ -464,25 +488,73 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
}
-static void bat_get_drvinfo(struct net_device *dev,
- struct ethtool_drvinfo *info)
+static void batadv_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
strcpy(info->driver, "B.A.T.M.A.N. advanced");
- strcpy(info->version, SOURCE_VERSION);
+ strcpy(info->version, BATADV_SOURCE_VERSION);
strcpy(info->fw_version, "N/A");
strcpy(info->bus_info, "batman");
}
-static u32 bat_get_msglevel(struct net_device *dev)
+static u32 batadv_get_msglevel(struct net_device *dev)
{
return -EOPNOTSUPP;
}
-static void bat_set_msglevel(struct net_device *dev, u32 value)
+static void batadv_set_msglevel(struct net_device *dev, u32 value)
{
}
-static u32 bat_get_link(struct net_device *dev)
+static u32 batadv_get_link(struct net_device *dev)
{
return 1;
}
+
+/* Inspired by drivers/net/ethernet/dlink/sundance.c:1702
+ * Declare each description string in struct.name[] to get fixed sized buffer
+ * and compile time checking for strings longer than ETH_GSTRING_LEN.
+ */
+static const struct {
+ const char name[ETH_GSTRING_LEN];
+} batadv_counters_strings[] = {
+ { "forward" },
+ { "forward_bytes" },
+ { "mgmt_tx" },
+ { "mgmt_tx_bytes" },
+ { "mgmt_rx" },
+ { "mgmt_rx_bytes" },
+ { "tt_request_tx" },
+ { "tt_request_rx" },
+ { "tt_response_tx" },
+ { "tt_response_rx" },
+ { "tt_roam_adv_tx" },
+ { "tt_roam_adv_rx" },
+};
+
+static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
+ uint8_t *data)
+{
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, batadv_counters_strings,
+ sizeof(batadv_counters_strings));
+}
+
+static void batadv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < BATADV_CNT_NUM; i++)
+ data[i] = batadv_sum_counter(bat_priv, i);
+}
+
+static int batadv_get_sset_count(struct net_device *dev, int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return BATADV_CNT_NUM;
+
+ return -EOPNOTSUPP;
+}
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 020300673884..852c683b06a1 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,18 +15,16 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_
#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
-int my_skb_head_push(struct sk_buff *skb, unsigned int len);
-void interface_rx(struct net_device *soft_iface,
- struct sk_buff *skb, struct hard_iface *recv_if,
- int hdr_size);
-struct net_device *softif_create(const char *name);
-void softif_destroy(struct net_device *soft_iface);
-int softif_is_valid(const struct net_device *net_dev);
+int batadv_skb_head_push(struct sk_buff *skb, unsigned int len);
+void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb,
+ struct batadv_hard_iface *recv_if, int hdr_size);
+struct net_device *batadv_softif_create(const char *name);
+void batadv_softif_destroy(struct net_device *soft_iface);
+int batadv_softif_is_valid(const struct net_device *net_dev);
#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
new file mode 100644
index 000000000000..66518c75c217
--- /dev/null
+++ b/net/batman-adv/sysfs.c
@@ -0,0 +1,787 @@
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+ *
+ * Marek Lindner
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#include "main.h"
+#include "sysfs.h"
+#include "translation-table.h"
+#include "originator.h"
+#include "hard-interface.h"
+#include "gateway_common.h"
+#include "gateway_client.h"
+#include "vis.h"
+
+static struct net_device *batadv_kobj_to_netdev(struct kobject *obj)
+{
+ struct device *dev = container_of(obj->parent, struct device, kobj);
+ return to_net_dev(dev);
+}
+
+static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(obj);
+ return netdev_priv(net_dev);
+}
+
+#define BATADV_UEV_TYPE_VAR "BATTYPE="
+#define BATADV_UEV_ACTION_VAR "BATACTION="
+#define BATADV_UEV_DATA_VAR "BATDATA="
+
+static char *batadv_uev_action_str[] = {
+ "add",
+ "del",
+ "change"
+};
+
+static char *batadv_uev_type_str[] = {
+ "gw"
+};
+
+/* Use this, if you have customized show and store functions */
+#define BATADV_ATTR(_name, _mode, _show, _store) \
+struct batadv_attribute batadv_attr_##_name = { \
+ .attr = {.name = __stringify(_name), \
+ .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+};
+
+#define BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ return __batadv_store_bool_attr(buff, count, _post_func, attr, \
+ &bat_priv->_name, net_dev); \
+}
+
+#define BATADV_ATTR_SIF_SHOW_BOOL(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
+ return sprintf(buff, "%s\n", \
+ atomic_read(&bat_priv->_name) == 0 ? \
+ "disabled" : "enabled"); \
+} \
+
+/* Use this, if you are going to turn a [name] in the soft-interface
+ * (bat_priv) on or off
+ */
+#define BATADV_ATTR_SIF_BOOL(_name, _mode, _post_func) \
+ static BATADV_ATTR_SIF_STORE_BOOL(_name, _post_func) \
+ static BATADV_ATTR_SIF_SHOW_BOOL(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+#define BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_priv *bat_priv = netdev_priv(net_dev); \
+ return __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &bat_priv->_name, net_dev); \
+}
+
+#define BATADV_ATTR_SIF_SHOW_UINT(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj); \
+ return sprintf(buff, "%i\n", atomic_read(&bat_priv->_name)); \
+} \
+
+/* Use this, if you are going to set [name] in the soft-interface
+ * (bat_priv) to an unsigned integer value
+ */
+#define BATADV_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BATADV_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func)\
+ static BATADV_ATTR_SIF_SHOW_UINT(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+#define BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \
+ssize_t batadv_store_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff, \
+ size_t count) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = __batadv_store_uint_attr(buff, count, _min, _max, \
+ _post_func, attr, \
+ &hard_iface->_name, net_dev); \
+ \
+ batadv_hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+#define BATADV_ATTR_HIF_SHOW_UINT(_name) \
+ssize_t batadv_show_##_name(struct kobject *kobj, \
+ struct attribute *attr, char *buff) \
+{ \
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj); \
+ struct batadv_hard_iface *hard_iface; \
+ ssize_t length; \
+ \
+ hard_iface = batadv_hardif_get_by_netdev(net_dev); \
+ if (!hard_iface) \
+ return 0; \
+ \
+ length = sprintf(buff, "%i\n", atomic_read(&hard_iface->_name));\
+ \
+ batadv_hardif_free_ref(hard_iface); \
+ return length; \
+}
+
+/* Use this, if you are going to set [name] in hard_iface to an
+ * unsigned integer value
+ */
+#define BATADV_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \
+ static BATADV_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func)\
+ static BATADV_ATTR_HIF_SHOW_UINT(_name) \
+ static BATADV_ATTR(_name, _mode, batadv_show_##_name, \
+ batadv_store_##_name)
+
+
+static int batadv_store_bool_attr(char *buff, size_t count,
+ struct net_device *net_dev,
+ const char *attr_name, atomic_t *attr)
+{
+ int enabled = -1;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if ((strncmp(buff, "1", 2) == 0) ||
+ (strncmp(buff, "enable", 7) == 0) ||
+ (strncmp(buff, "enabled", 8) == 0))
+ enabled = 1;
+
+ if ((strncmp(buff, "0", 2) == 0) ||
+ (strncmp(buff, "disable", 8) == 0) ||
+ (strncmp(buff, "disabled", 9) == 0))
+ enabled = 0;
+
+ if (enabled < 0) {
+ batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
+ attr_name, buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(attr) == enabled)
+ return count;
+
+ batadv_info(net_dev, "%s: Changing from: %s to: %s\n", attr_name,
+ atomic_read(attr) == 1 ? "enabled" : "disabled",
+ enabled == 1 ? "enabled" : "disabled");
+
+ atomic_set(attr, (unsigned int)enabled);
+ return count;
+}
+
+static inline ssize_t
+__batadv_store_bool_attr(char *buff, size_t count,
+ void (*post_func)(struct net_device *),
+ struct attribute *attr,
+ atomic_t *attr_store, struct net_device *net_dev)
+{
+ int ret;
+
+ ret = batadv_store_bool_attr(buff, count, net_dev, attr->name,
+ attr_store);
+ if (post_func && ret)
+ post_func(net_dev);
+
+ return ret;
+}
+
+static int batadv_store_uint_attr(const char *buff, size_t count,
+ struct net_device *net_dev,
+ const char *attr_name,
+ unsigned int min, unsigned int max,
+ atomic_t *attr)
+{
+ unsigned long uint_val;
+ int ret;
+
+ ret = kstrtoul(buff, 10, &uint_val);
+ if (ret) {
+ batadv_info(net_dev, "%s: Invalid parameter received: %s\n",
+ attr_name, buff);
+ return -EINVAL;
+ }
+
+ if (uint_val < min) {
+ batadv_info(net_dev, "%s: Value is too small: %lu min: %u\n",
+ attr_name, uint_val, min);
+ return -EINVAL;
+ }
+
+ if (uint_val > max) {
+ batadv_info(net_dev, "%s: Value is too big: %lu max: %u\n",
+ attr_name, uint_val, max);
+ return -EINVAL;
+ }
+
+ if (atomic_read(attr) == uint_val)
+ return count;
+
+ batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
+ attr_name, atomic_read(attr), uint_val);
+
+ atomic_set(attr, uint_val);
+ return count;
+}
+
+static inline ssize_t
+__batadv_store_uint_attr(const char *buff, size_t count,
+ int min, int max,
+ void (*post_func)(struct net_device *),
+ const struct attribute *attr,
+ atomic_t *attr_store, struct net_device *net_dev)
+{
+ int ret;
+
+ ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
+ attr_store);
+ if (post_func && ret)
+ post_func(net_dev);
+
+ return ret;
+}
+
+static ssize_t batadv_show_vis_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int vis_mode = atomic_read(&bat_priv->vis_mode);
+ const char *mode;
+
+ if (vis_mode == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ mode = "client";
+ else
+ mode = "server";
+
+ return sprintf(buff, "%s\n", mode);
+}
+
+static ssize_t batadv_store_vis_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ unsigned long val;
+ int ret, vis_mode_tmp = -1;
+ const char *old_mode, *new_mode;
+
+ ret = kstrtoul(buff, 10, &val);
+
+ if (((count == 2) && (!ret) &&
+ (val == BATADV_VIS_TYPE_CLIENT_UPDATE)) ||
+ (strncmp(buff, "client", 6) == 0) ||
+ (strncmp(buff, "off", 3) == 0))
+ vis_mode_tmp = BATADV_VIS_TYPE_CLIENT_UPDATE;
+
+ if (((count == 2) && (!ret) &&
+ (val == BATADV_VIS_TYPE_SERVER_SYNC)) ||
+ (strncmp(buff, "server", 6) == 0))
+ vis_mode_tmp = BATADV_VIS_TYPE_SERVER_SYNC;
+
+ if (vis_mode_tmp < 0) {
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ batadv_info(net_dev,
+ "Invalid parameter for 'vis mode' setting received: %s\n",
+ buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->vis_mode) == vis_mode_tmp)
+ return count;
+
+ if (atomic_read(&bat_priv->vis_mode) == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ old_mode = "client";
+ else
+ old_mode = "server";
+
+ if (vis_mode_tmp == BATADV_VIS_TYPE_CLIENT_UPDATE)
+ new_mode = "client";
+ else
+ new_mode = "server";
+
+ batadv_info(net_dev, "Changing vis mode from: %s to: %s\n", old_mode,
+ new_mode);
+
+ atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
+ return count;
+}
+
+static ssize_t batadv_show_bat_algo(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ return sprintf(buff, "%s\n", bat_priv->bat_algo_ops->name);
+}
+
+static void batadv_post_gw_deselect(struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ batadv_gw_deselect(bat_priv);
+}
+
+static ssize_t batadv_show_gw_mode(struct kobject *kobj, struct attribute *attr,
+ char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int bytes_written;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case BATADV_GW_MODE_CLIENT:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_CLIENT_NAME);
+ break;
+ case BATADV_GW_MODE_SERVER:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_SERVER_NAME);
+ break;
+ default:
+ bytes_written = sprintf(buff, "%s\n",
+ BATADV_GW_MODE_OFF_NAME);
+ break;
+ }
+
+ return bytes_written;
+}
+
+static ssize_t batadv_store_gw_mode(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ char *curr_gw_mode_str;
+ int gw_mode_tmp = -1;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strncmp(buff, BATADV_GW_MODE_OFF_NAME,
+ strlen(BATADV_GW_MODE_OFF_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_OFF;
+
+ if (strncmp(buff, BATADV_GW_MODE_CLIENT_NAME,
+ strlen(BATADV_GW_MODE_CLIENT_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_CLIENT;
+
+ if (strncmp(buff, BATADV_GW_MODE_SERVER_NAME,
+ strlen(BATADV_GW_MODE_SERVER_NAME)) == 0)
+ gw_mode_tmp = BATADV_GW_MODE_SERVER;
+
+ if (gw_mode_tmp < 0) {
+ batadv_info(net_dev,
+ "Invalid parameter for 'gw mode' setting received: %s\n",
+ buff);
+ return -EINVAL;
+ }
+
+ if (atomic_read(&bat_priv->gw_mode) == gw_mode_tmp)
+ return count;
+
+ switch (atomic_read(&bat_priv->gw_mode)) {
+ case BATADV_GW_MODE_CLIENT:
+ curr_gw_mode_str = BATADV_GW_MODE_CLIENT_NAME;
+ break;
+ case BATADV_GW_MODE_SERVER:
+ curr_gw_mode_str = BATADV_GW_MODE_SERVER_NAME;
+ break;
+ default:
+ curr_gw_mode_str = BATADV_GW_MODE_OFF_NAME;
+ break;
+ }
+
+ batadv_info(net_dev, "Changing gw mode from: %s to: %s\n",
+ curr_gw_mode_str, buff);
+
+ batadv_gw_deselect(bat_priv);
+ atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
+ return count;
+}
+
+static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
+ int down, up;
+ int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth);
+
+ batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up);
+ return sprintf(buff, "%i%s/%i%s\n",
+ (down > 2048 ? down / 1024 : down),
+ (down > 2048 ? "MBit" : "KBit"),
+ (up > 2048 ? up / 1024 : up),
+ (up > 2048 ? "MBit" : "KBit"));
+}
+
+static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ return batadv_gw_bandwidth_set(net_dev, buff, count);
+}
+
+BATADV_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
+BATADV_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
+#ifdef CONFIG_BATMAN_ADV_BLA
+BATADV_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
+#endif
+BATADV_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu);
+BATADV_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
+static BATADV_ATTR(vis_mode, S_IRUGO | S_IWUSR, batadv_show_vis_mode,
+ batadv_store_vis_mode);
+static BATADV_ATTR(routing_algo, S_IRUGO, batadv_show_bat_algo, NULL);
+static BATADV_ATTR(gw_mode, S_IRUGO | S_IWUSR, batadv_show_gw_mode,
+ batadv_store_gw_mode);
+BATADV_ATTR_SIF_UINT(orig_interval, S_IRUGO | S_IWUSR, 2 * BATADV_JITTER,
+ INT_MAX, NULL);
+BATADV_ATTR_SIF_UINT(hop_penalty, S_IRUGO | S_IWUSR, 0, BATADV_TQ_MAX_VALUE,
+ NULL);
+BATADV_ATTR_SIF_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, BATADV_TQ_MAX_VALUE,
+ batadv_post_gw_deselect);
+static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
+ batadv_store_gw_bwidth);
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
+#endif
+
+static struct batadv_attribute *batadv_mesh_attrs[] = {
+ &batadv_attr_aggregated_ogms,
+ &batadv_attr_bonding,
+#ifdef CONFIG_BATMAN_ADV_BLA
+ &batadv_attr_bridge_loop_avoidance,
+#endif
+ &batadv_attr_fragmentation,
+ &batadv_attr_ap_isolation,
+ &batadv_attr_vis_mode,
+ &batadv_attr_routing_algo,
+ &batadv_attr_gw_mode,
+ &batadv_attr_orig_interval,
+ &batadv_attr_hop_penalty,
+ &batadv_attr_gw_sel_class,
+ &batadv_attr_gw_bandwidth,
+#ifdef CONFIG_BATMAN_ADV_DEBUG
+ &batadv_attr_log_level,
+#endif
+ NULL,
+};
+
+int batadv_sysfs_add_meshif(struct net_device *dev)
+{
+ struct kobject *batif_kobject = &dev->dev.kobj;
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_attribute **bat_attr;
+ int err;
+
+ bat_priv->mesh_obj = kobject_create_and_add(BATADV_SYSFS_IF_MESH_SUBDIR,
+ batif_kobject);
+ if (!bat_priv->mesh_obj) {
+ batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ BATADV_SYSFS_IF_MESH_SUBDIR);
+ goto out;
+ }
+
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(bat_priv->mesh_obj,
+ &((*bat_attr)->attr));
+ if (err) {
+ batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, BATADV_SYSFS_IF_MESH_SUBDIR,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+out:
+ return -ENOMEM;
+}
+
+void batadv_sysfs_del_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ struct batadv_attribute **bat_attr;
+
+ for (bat_attr = batadv_mesh_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(bat_priv->mesh_obj, &((*bat_attr)->attr));
+
+ kobject_put(bat_priv->mesh_obj);
+ bat_priv->mesh_obj = NULL;
+}
+
+static ssize_t batadv_show_mesh_iface(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ ssize_t length;
+ const char *ifname;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return 0;
+
+ if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
+ ifname = "none";
+ else
+ ifname = hard_iface->soft_iface->name;
+
+ length = sprintf(buff, "%s\n", ifname);
+
+ batadv_hardif_free_ref(hard_iface);
+
+ return length;
+}
+
+static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
+ struct attribute *attr, char *buff,
+ size_t count)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ int status_tmp = -1;
+ int ret = count;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return count;
+
+ if (buff[count - 1] == '\n')
+ buff[count - 1] = '\0';
+
+ if (strlen(buff) >= IFNAMSIZ) {
+ pr_err("Invalid parameter for 'mesh_iface' setting received: interface name too long '%s'\n",
+ buff);
+ batadv_hardif_free_ref(hard_iface);
+ return -EINVAL;
+ }
+
+ if (strncmp(buff, "none", 4) == 0)
+ status_tmp = BATADV_IF_NOT_IN_USE;
+ else
+ status_tmp = BATADV_IF_I_WANT_YOU;
+
+ if (hard_iface->if_status == status_tmp)
+ goto out;
+
+ if ((hard_iface->soft_iface) &&
+ (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
+ goto out;
+
+ if (!rtnl_trylock()) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ if (status_tmp == BATADV_IF_NOT_IN_USE) {
+ batadv_hardif_disable_interface(hard_iface);
+ goto unlock;
+ }
+
+ /* if the interface already is in use */
+ if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
+ batadv_hardif_disable_interface(hard_iface);
+
+ ret = batadv_hardif_enable_interface(hard_iface, buff);
+
+unlock:
+ rtnl_unlock();
+out:
+ batadv_hardif_free_ref(hard_iface);
+ return ret;
+}
+
+static ssize_t batadv_show_iface_status(struct kobject *kobj,
+ struct attribute *attr, char *buff)
+{
+ struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
+ struct batadv_hard_iface *hard_iface;
+ ssize_t length;
+
+ hard_iface = batadv_hardif_get_by_netdev(net_dev);
+ if (!hard_iface)
+ return 0;
+
+ switch (hard_iface->if_status) {
+ case BATADV_IF_TO_BE_REMOVED:
+ length = sprintf(buff, "disabling\n");
+ break;
+ case BATADV_IF_INACTIVE:
+ length = sprintf(buff, "inactive\n");
+ break;
+ case BATADV_IF_ACTIVE:
+ length = sprintf(buff, "active\n");
+ break;
+ case BATADV_IF_TO_BE_ACTIVATED:
+ length = sprintf(buff, "enabling\n");
+ break;
+ case BATADV_IF_NOT_IN_USE:
+ default:
+ length = sprintf(buff, "not in use\n");
+ break;
+ }
+
+ batadv_hardif_free_ref(hard_iface);
+
+ return length;
+}
+
+static BATADV_ATTR(mesh_iface, S_IRUGO | S_IWUSR, batadv_show_mesh_iface,
+ batadv_store_mesh_iface);
+static BATADV_ATTR(iface_status, S_IRUGO, batadv_show_iface_status, NULL);
+
+static struct batadv_attribute *batadv_batman_attrs[] = {
+ &batadv_attr_mesh_iface,
+ &batadv_attr_iface_status,
+ NULL,
+};
+
+int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev)
+{
+ struct kobject *hardif_kobject = &dev->dev.kobj;
+ struct batadv_attribute **bat_attr;
+ int err;
+
+ *hardif_obj = kobject_create_and_add(BATADV_SYSFS_IF_BAT_SUBDIR,
+ hardif_kobject);
+
+ if (!*hardif_obj) {
+ batadv_err(dev, "Can't add sysfs directory: %s/%s\n", dev->name,
+ BATADV_SYSFS_IF_BAT_SUBDIR);
+ goto out;
+ }
+
+ for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr) {
+ err = sysfs_create_file(*hardif_obj, &((*bat_attr)->attr));
+ if (err) {
+ batadv_err(dev, "Can't add sysfs file: %s/%s/%s\n",
+ dev->name, BATADV_SYSFS_IF_BAT_SUBDIR,
+ ((*bat_attr)->attr).name);
+ goto rem_attr;
+ }
+ }
+
+ return 0;
+
+rem_attr:
+ for (bat_attr = batadv_batman_attrs; *bat_attr; ++bat_attr)
+ sysfs_remove_file(*hardif_obj, &((*bat_attr)->attr));
+out:
+ return -ENOMEM;
+}
+
+void batadv_sysfs_del_hardif(struct kobject **hardif_obj)
+{
+ kobject_put(*hardif_obj);
+ *hardif_obj = NULL;
+}
+
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data)
+{
+ int ret = -ENOMEM;
+ struct batadv_hard_iface *primary_if = NULL;
+ struct kobject *bat_kobj;
+ char *uevent_env[4] = { NULL, NULL, NULL, NULL };
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+ if (!primary_if)
+ goto out;
+
+ bat_kobj = &primary_if->soft_iface->dev.kobj;
+
+ uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
+ strlen(batadv_uev_type_str[type]) + 1,
+ GFP_ATOMIC);
+ if (!uevent_env[0])
+ goto out;
+
+ sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
+ batadv_uev_type_str[type]);
+
+ uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
+ strlen(batadv_uev_action_str[action]) + 1,
+ GFP_ATOMIC);
+ if (!uevent_env[1])
+ goto out;
+
+ sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
+ batadv_uev_action_str[action]);
+
+ /* If the event is DEL, ignore the data field */
+ if (action != BATADV_UEV_DEL) {
+ uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
+ strlen(data) + 1, GFP_ATOMIC);
+ if (!uevent_env[2])
+ goto out;
+
+ sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
+ }
+
+ ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
+out:
+ kfree(uevent_env[0]);
+ kfree(uevent_env[1]);
+ kfree(uevent_env[2]);
+
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
+
+ if (ret)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
+ batadv_uev_type_str[type],
+ batadv_uev_action_str[action],
+ (action == BATADV_UEV_DEL ? "NULL" : data), ret);
+ return ret;
+}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/sysfs.h
index fece77ae586e..3fd1412b0620 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -16,17 +15,15 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
#ifndef _NET_BATMAN_ADV_SYSFS_H_
#define _NET_BATMAN_ADV_SYSFS_H_
-#define SYSFS_IF_MESH_SUBDIR "mesh"
-#define SYSFS_IF_BAT_SUBDIR "batman_adv"
+#define BATADV_SYSFS_IF_MESH_SUBDIR "mesh"
+#define BATADV_SYSFS_IF_BAT_SUBDIR "batman_adv"
-struct bat_attribute {
+struct batadv_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
char *buf);
@@ -34,11 +31,12 @@ struct bat_attribute {
char *buf, size_t count);
};
-int sysfs_add_meshif(struct net_device *dev);
-void sysfs_del_meshif(struct net_device *dev);
-int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
-void sysfs_del_hardif(struct kobject **hardif_obj);
-int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
- enum uev_action action, const char *data);
+int batadv_sysfs_add_meshif(struct net_device *dev);
+void batadv_sysfs_del_meshif(struct net_device *dev);
+int batadv_sysfs_add_hardif(struct kobject **hardif_obj,
+ struct net_device *dev);
+void batadv_sysfs_del_hardif(struct kobject **hardif_obj);
+int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
+ enum batadv_uev_action action, const char *data);
#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 2ab83d7fb1f8..a438f4b582fc 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,44 +29,46 @@
#include <linux/crc16.h>
-static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node);
-static void tt_purge(struct work_struct *work);
-static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ struct batadv_orig_node *orig_node);
+static void batadv_tt_purge(struct work_struct *work);
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
/* returns 1 if they are the same mac addr */
-static int compare_tt(const struct hlist_node *node, const void *data2)
+static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
{
- const void *data1 = container_of(node, struct tt_common_entry,
+ const void *data1 = container_of(node, struct batadv_tt_common_entry,
hash_entry);
return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
}
-static void tt_start_timer(struct bat_priv *bat_priv)
+static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
- queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
+ INIT_DELAYED_WORK(&bat_priv->tt_work, batadv_tt_purge);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work,
msecs_to_jiffies(5000));
}
-static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
- const void *data)
+static struct batadv_tt_common_entry *
+batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
{
struct hlist_head *head;
struct hlist_node *node;
- struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
uint32_t index;
if (!hash)
return NULL;
- index = choose_orig(data, hash->size);
+ index = batadv_choose_orig(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
- if (!compare_eth(tt_common_entry, data))
+ if (!batadv_compare_eth(tt_common_entry, data))
continue;
if (!atomic_inc_not_zero(&tt_common_entry->refcount))
@@ -82,80 +82,87 @@ static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
return tt_common_entry_tmp;
}
-static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_tt_local_entry *
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
- tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt_local_hash, data);
if (tt_common_entry)
tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry, common);
+ struct batadv_tt_local_entry,
+ common);
return tt_local_entry;
}
-static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_tt_global_entry *
+batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
- tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
+ tt_common_entry = batadv_tt_hash_find(bat_priv->tt_global_hash, data);
if (tt_common_entry)
tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry, common);
+ struct batadv_tt_global_entry,
+ common);
return tt_global_entry;
}
-static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
+static void
+batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
{
if (atomic_dec_and_test(&tt_local_entry->common.refcount))
kfree_rcu(tt_local_entry, common.rcu);
}
-static void tt_global_entry_free_rcu(struct rcu_head *rcu)
+static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
- tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
- tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
- common);
+ tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry, common);
kfree(tt_global_entry);
}
-static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
+static void
+batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
{
if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
- tt_global_del_orig_list(tt_global_entry);
+ batadv_tt_global_del_orig_list(tt_global_entry);
call_rcu(&tt_global_entry->common.rcu,
- tt_global_entry_free_rcu);
+ batadv_tt_global_entry_free_rcu);
}
}
-static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
{
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
- orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
- orig_node_free_ref(orig_entry->orig_node);
+ orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
+ batadv_orig_node_free_ref(orig_entry->orig_node);
kfree(orig_entry);
}
-static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+static void
+batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
{
/* to avoid race conditions, immediately decrease the tt counter */
atomic_dec(&orig_entry->orig_node->tt_size);
- call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
+ call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
}
-static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
- uint8_t flags)
+static void batadv_tt_local_event(struct batadv_priv *bat_priv,
+ const uint8_t *addr, uint8_t flags)
{
- struct tt_change_node *tt_change_node;
+ struct batadv_tt_change_node *tt_change_node, *entry, *safe;
+ bool event_removed = false;
+ bool del_op_requested, del_op_entry;
tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
@@ -165,50 +172,82 @@ static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
tt_change_node->change.flags = flags;
memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
+ del_op_requested = flags & BATADV_TT_CLIENT_DEL;
+
+ /* check for ADD+DEL or DEL+ADD events */
spin_lock_bh(&bat_priv->tt_changes_list_lock);
+ list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
+ list) {
+ if (!batadv_compare_eth(entry->change.addr, addr))
+ continue;
+
+ /* DEL+ADD in the same orig interval have no effect and can be
+ * removed to avoid silly behaviour on the receiver side. The
+ * other way around (ADD+DEL) can happen in case of roaming of
+ * a client still in the NEW state. Roaming of NEW clients is
+ * now possible due to automatically recognition of "temporary"
+ * clients
+ */
+ del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
+ if (!del_op_requested && del_op_entry)
+ goto del;
+ if (del_op_requested && !del_op_entry)
+ goto del;
+ continue;
+del:
+ list_del(&entry->list);
+ kfree(entry);
+ event_removed = true;
+ goto unlock;
+ }
+
/* track the change in the OGMinterval list */
list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
- atomic_inc(&bat_priv->tt_local_changes);
+
+unlock:
spin_unlock_bh(&bat_priv->tt_changes_list_lock);
- atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
+ if (event_removed)
+ atomic_dec(&bat_priv->tt_local_changes);
+ else
+ atomic_inc(&bat_priv->tt_local_changes);
}
-int tt_len(int changes_num)
+int batadv_tt_len(int changes_num)
{
- return changes_num * sizeof(struct tt_change);
+ return changes_num * sizeof(struct batadv_tt_change);
}
-static int tt_local_init(struct bat_priv *bat_priv)
+static int batadv_tt_local_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt_local_hash)
- return 1;
+ return 0;
- bat_priv->tt_local_hash = hash_new(1024);
+ bat_priv->tt_local_hash = batadv_hash_new(1024);
if (!bat_priv->tt_local_hash)
- return 0;
+ return -ENOMEM;
- return 1;
+ return 0;
}
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex)
+void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex)
{
- struct bat_priv *bat_priv = netdev_priv(soft_iface);
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_priv *bat_priv = netdev_priv(soft_iface);
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
int hash_added;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (tt_local_entry) {
tt_local_entry->last_seen = jiffies;
- /* possibly unset the TT_CLIENT_PENDING flag */
- tt_local_entry->common.flags &= ~TT_CLIENT_PENDING;
+ /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
+ tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
goto out;
}
@@ -216,40 +255,42 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (!tt_local_entry)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
- (uint8_t)atomic_read(&bat_priv->ttvn));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
+ (uint8_t)atomic_read(&bat_priv->ttvn));
memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
- tt_local_entry->common.flags = NO_FLAGS;
- if (is_wifi_iface(ifindex))
- tt_local_entry->common.flags |= TT_CLIENT_WIFI;
+ tt_local_entry->common.flags = BATADV_NO_FLAGS;
+ if (batadv_is_wifi_iface(ifindex))
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
atomic_set(&tt_local_entry->common.refcount, 2);
tt_local_entry->last_seen = jiffies;
/* the batman interface mac address should never be purged */
- if (compare_eth(addr, soft_iface->dev_addr))
- tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
+ if (batadv_compare_eth(addr, soft_iface->dev_addr))
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
/* The local entry has to be marked as NEW to avoid to send it in
* a full table response going out before the next ttvn increment
- * (consistency check) */
- tt_local_entry->common.flags |= TT_CLIENT_NEW;
+ * (consistency check)
+ */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
- hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
- &tt_local_entry->common,
- &tt_local_entry->common.hash_entry);
+ hash_added = batadv_hash_add(bat_priv->tt_local_hash, batadv_compare_tt,
+ batadv_choose_orig,
+ &tt_local_entry->common,
+ &tt_local_entry->common.hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
goto out;
}
- tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
+ batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
/* remove address from global hash if present */
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
/* Check whether it is a roaming! */
if (tt_global_entry) {
@@ -259,31 +300,85 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
hlist_for_each_entry_rcu(orig_entry, node, head, list) {
orig_entry->orig_node->tt_poss_change = true;
- send_roam_adv(bat_priv, tt_global_entry->common.addr,
- orig_entry->orig_node);
+ batadv_send_roam_adv(bat_priv,
+ tt_global_entry->common.addr,
+ orig_entry->orig_node);
}
rcu_read_unlock();
/* The global entry has to be marked as ROAMING and
* has to be kept for consistency purpose
*/
- tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
}
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+}
+
+static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len,
+ int new_packet_len)
+{
+ unsigned char *new_buff;
+
+ new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
+
+ /* keep old buffer if kmalloc should fail */
+ if (new_buff) {
+ memcpy(new_buff, *packet_buff, min_packet_len);
+ kfree(*packet_buff);
+ *packet_buff = new_buff;
+ *packet_buff_len = new_packet_len;
+ }
+}
+
+static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len)
+{
+ struct batadv_hard_iface *primary_if;
+ int req_len;
+
+ primary_if = batadv_primary_if_get_selected(bat_priv);
+
+ req_len = min_packet_len;
+ req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes));
+
+ /* if we have too many changes for one packet don't send any
+ * and wait for the tt table request which will be fragmented
+ */
+ if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
+ req_len = min_packet_len;
+
+ batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
+ min_packet_len, req_len);
+
+ if (primary_if)
+ batadv_hardif_free_ref(primary_if);
}
-int tt_changes_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len)
+static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len,
+ int min_packet_len)
{
- int count = 0, tot_changes = 0;
- struct tt_change_node *entry, *safe;
+ struct batadv_tt_change_node *entry, *safe;
+ int count = 0, tot_changes = 0, new_len;
+ unsigned char *tt_buff;
- if (buff_len > 0)
- tot_changes = buff_len / tt_len(1);
+ batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
+ packet_buff_len, min_packet_len);
+
+ new_len = *packet_buff_len - min_packet_len;
+ tt_buff = *packet_buff + min_packet_len;
+
+ if (new_len > 0)
+ tot_changes = new_len / batadv_tt_len(1);
spin_lock_bh(&bat_priv->tt_changes_list_lock);
atomic_set(&bat_priv->tt_local_changes, 0);
@@ -291,8 +386,8 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
list) {
if (count < tot_changes) {
- memcpy(buff + tt_len(count),
- &entry->change, sizeof(struct tt_change));
+ memcpy(tt_buff + batadv_tt_len(count),
+ &entry->change, sizeof(struct batadv_tt_change));
count++;
}
list_del(&entry->list);
@@ -305,37 +400,35 @@ int tt_changes_fill_buffer(struct bat_priv *bat_priv,
kfree(bat_priv->tt_buff);
bat_priv->tt_buff_len = 0;
bat_priv->tt_buff = NULL;
- /* We check whether this new OGM has no changes due to size
- * problems */
- if (buff_len > 0) {
- /**
- * if kmalloc() fails we will reply with the full table
+ /* check whether this new OGM has no changes due to size problems */
+ if (new_len > 0) {
+ /* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
- bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
+ bat_priv->tt_buff = kmalloc(new_len, GFP_ATOMIC);
if (bat_priv->tt_buff) {
- memcpy(bat_priv->tt_buff, buff, buff_len);
- bat_priv->tt_buff_len = buff_len;
+ memcpy(bat_priv->tt_buff, tt_buff, new_len);
+ bat_priv->tt_buff_len = new_len;
}
}
spin_unlock_bh(&bat_priv->tt_buff_lock);
- return tot_changes;
+ return count;
}
-int tt_local_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -343,7 +436,7 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -363,63 +456,94 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
tt_common_entry->addr,
(tt_common_entry->flags &
- TT_CLIENT_ROAM ? 'R' : '.'),
+ BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_NOPURGE ? 'P' : '.'),
+ BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_NEW ? 'N' : '.'),
+ BATADV_TT_CLIENT_NEW ? 'N' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_PENDING ? 'X' : '.'),
+ BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
(tt_common_entry->flags &
- TT_CLIENT_WIFI ? 'W' : '.'));
+ BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-static void tt_local_set_pending(struct bat_priv *bat_priv,
- struct tt_local_entry *tt_local_entry,
- uint16_t flags, const char *message)
+static void
+batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
+ struct batadv_tt_local_entry *tt_local_entry,
+ uint16_t flags, const char *message)
{
- tt_local_event(bat_priv, tt_local_entry->common.addr,
- tt_local_entry->common.flags | flags);
+ batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
+ tt_local_entry->common.flags | flags);
/* The local client has to be marked as "pending to be removed" but has
* to be kept in the table in order to send it in a full table
- * response issued before the net ttvn increment (consistency check) */
- tt_local_entry->common.flags |= TT_CLIENT_PENDING;
+ * response issued before the net ttvn increment (consistency check)
+ */
+ tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
- bat_dbg(DBG_TT, bat_priv,
- "Local tt entry (%pM) pending to be removed: %s\n",
- tt_local_entry->common.addr, message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local tt entry (%pM) pending to be removed: %s\n",
+ tt_local_entry->common.addr, message);
}
-void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
- const char *message, bool roaming)
+void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
+ const char *message, bool roaming)
{
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ uint16_t flags;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (!tt_local_entry)
goto out;
- tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
- (roaming ? TT_CLIENT_ROAM : NO_FLAGS), message);
+ flags = BATADV_TT_CLIENT_DEL;
+ if (roaming)
+ flags |= BATADV_TT_CLIENT_ROAM;
+
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
}
-static void tt_local_purge(struct bat_priv *bat_priv)
+static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
+ struct hlist_head *head)
{
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_local_entry *tt_local_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
struct hlist_node *node, *node_tmp;
+
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+ hash_entry) {
+ tt_local_entry = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
+ continue;
+
+ /* entry already marked for deletion */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
+ continue;
+
+ if (!batadv_has_timed_out(tt_local_entry->last_seen,
+ BATADV_TT_LOCAL_TIMEOUT))
+ continue;
+
+ batadv_tt_local_set_pending(bat_priv, tt_local_entry,
+ BATADV_TT_CLIENT_DEL, "timed out");
+ }
+}
+
+static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -429,36 +553,18 @@ static void tt_local_purge(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
- continue;
-
- /* entry already marked for deletion */
- if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
- continue;
-
- if (!has_timed_out(tt_local_entry->last_seen,
- TT_LOCAL_TIMEOUT))
- continue;
-
- tt_local_set_pending(bat_priv, tt_local_entry,
- TT_CLIENT_DEL, "timed out");
- }
+ batadv_tt_local_purge_list(bat_priv, head);
spin_unlock_bh(list_lock);
}
}
-static void tt_local_table_free(struct bat_priv *bat_priv)
+static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -476,35 +582,35 @@ static void tt_local_table_free(struct bat_priv *bat_priv)
hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- tt_local_entry_free_ref(tt_local_entry);
+ tt_local = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
bat_priv->tt_local_hash = NULL;
}
-static int tt_global_init(struct bat_priv *bat_priv)
+static int batadv_tt_global_init(struct batadv_priv *bat_priv)
{
if (bat_priv->tt_global_hash)
- return 1;
+ return 0;
- bat_priv->tt_global_hash = hash_new(1024);
+ bat_priv->tt_global_hash = batadv_hash_new(1024);
if (!bat_priv->tt_global_hash)
- return 0;
+ return -ENOMEM;
- return 1;
+ return 0;
}
-static void tt_changes_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
{
- struct tt_change_node *entry, *safe;
+ struct batadv_tt_change_node *entry, *safe;
spin_lock_bh(&bat_priv->tt_changes_list_lock);
@@ -521,10 +627,11 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
/* find out if an orig_node is already in the list of a tt_global_entry.
* returns 1 if found, 0 otherwise
*/
-static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
- const struct orig_node *orig_node)
+static bool
+batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
+ const struct batadv_orig_node *orig_node)
{
- struct tt_orig_list_entry *tmp_orig_entry;
+ struct batadv_tt_orig_list_entry *tmp_orig_entry;
const struct hlist_head *head;
struct hlist_node *node;
bool found = false;
@@ -541,11 +648,11 @@ static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
return found;
}
-static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- int ttvn)
+static void
+batadv_tt_global_add_orig_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node, int ttvn)
{
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
if (!orig_entry)
@@ -564,91 +671,95 @@ static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
}
/* caller must hold orig_node refcount */
-int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
- bool wifi)
+int batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_addr, uint8_t flags,
+ uint8_t ttvn)
{
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
int ret = 0;
int hash_added;
+ struct batadv_tt_common_entry *common;
- tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
if (!tt_global_entry) {
- tt_global_entry = kzalloc(sizeof(*tt_global_entry),
- GFP_ATOMIC);
+ tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
if (!tt_global_entry)
goto out;
- memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
+ common = &tt_global_entry->common;
+ memcpy(common->addr, tt_addr, ETH_ALEN);
- tt_global_entry->common.flags = NO_FLAGS;
+ common->flags = flags;
tt_global_entry->roam_at = 0;
- atomic_set(&tt_global_entry->common.refcount, 2);
+ atomic_set(&common->refcount, 2);
INIT_HLIST_HEAD(&tt_global_entry->orig_list);
spin_lock_init(&tt_global_entry->list_lock);
- hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
- choose_orig, &tt_global_entry->common,
- &tt_global_entry->common.hash_entry);
+ hash_added = batadv_hash_add(bat_priv->tt_global_hash,
+ batadv_compare_tt,
+ batadv_choose_orig, common,
+ &common->hash_entry);
if (unlikely(hash_added != 0)) {
/* remove the reference for the hash */
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
goto out_remove;
}
- tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
+ batadv_tt_global_add_orig_entry(tt_global_entry, orig_node,
+ ttvn);
} else {
/* there is already a global entry, use this one. */
- /* If there is the TT_CLIENT_ROAM flag set, there is only one
- * originator left in the list and we previously received a
+ /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
+ * one originator left in the list and we previously received a
* delete + roaming change for this originator.
*
* We should first delete the old originator before adding the
* new one.
*/
- if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
- tt_global_del_orig_list(tt_global_entry);
- tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
+ if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = 0;
}
- if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
- tt_global_add_orig_entry(tt_global_entry, orig_node,
- ttvn);
+ if (!batadv_tt_global_entry_has_orig(tt_global_entry,
+ orig_node))
+ batadv_tt_global_add_orig_entry(tt_global_entry,
+ orig_node, ttvn);
}
- if (wifi)
- tt_global_entry->common.flags |= TT_CLIENT_WIFI;
-
- bat_dbg(DBG_TT, bat_priv,
- "Creating new global tt entry: %pM (via %pM)\n",
- tt_global_entry->common.addr, orig_node->orig);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Creating new global tt entry: %pM (via %pM)\n",
+ tt_global_entry->common.addr, orig_node->orig);
out_remove:
/* remove address from local hash if present */
- tt_local_remove(bat_priv, tt_global_entry->common.addr,
- "global tt received", roaming);
+ batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
+ "global tt received",
+ flags & BATADV_TT_CLIENT_ROAM);
ret = 1;
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
return ret;
}
/* print all orig nodes who announce the address for this global entry.
* it is assumed that the caller holds rcu_read_lock();
*/
-static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
- struct seq_file *seq)
+static void
+batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
+ struct seq_file *seq)
{
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
uint16_t flags;
uint8_t last_ttvn;
@@ -662,25 +773,25 @@ static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
tt_global_entry->common.addr, orig_entry->ttvn,
orig_entry->orig_node->orig, last_ttvn,
- (flags & TT_CLIENT_ROAM ? 'R' : '.'),
- (flags & TT_CLIENT_WIFI ? 'W' : '.'));
+ (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
+ (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
}
}
-int tt_global_seq_print_text(struct seq_file *seq, void *offset)
+int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
{
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
- struct hard_iface *primary_if;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
+ struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
int ret = 0;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - please specify interfaces to enable it\n",
@@ -688,7 +799,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
goto out;
}
- if (primary_if->if_status != IF_ACTIVE) {
+ if (primary_if->if_status != BATADV_IF_ACTIVE) {
ret = seq_printf(seq,
"BATMAN mesh %s disabled - primary interface not active\n",
net_dev->name);
@@ -707,87 +818,91 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- tt_global_print_entry(tt_global_entry, seq);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ batadv_tt_global_print_entry(tt_global, seq);
}
rcu_read_unlock();
}
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
/* deletes the orig list of a tt_global_entry */
-static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
+static void
+batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
{
struct hlist_head *head;
struct hlist_node *node, *safe;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
hlist_del_rcu(node);
- tt_orig_list_entry_free_ref(orig_entry);
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
}
spin_unlock_bh(&tt_global_entry->list_lock);
}
-static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- const char *message)
+static void
+batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
struct hlist_head *head;
struct hlist_node *node, *safe;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
spin_lock_bh(&tt_global_entry->list_lock);
head = &tt_global_entry->orig_list;
hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
if (orig_entry->orig_node == orig_node) {
- bat_dbg(DBG_TT, bat_priv,
- "Deleting %pM from global tt entry %pM: %s\n",
- orig_node->orig, tt_global_entry->common.addr,
- message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting %pM from global tt entry %pM: %s\n",
+ orig_node->orig,
+ tt_global_entry->common.addr, message);
hlist_del_rcu(node);
- tt_orig_list_entry_free_ref(orig_entry);
+ batadv_tt_orig_list_entry_free_ref(orig_entry);
}
}
spin_unlock_bh(&tt_global_entry->list_lock);
}
-static void tt_global_del_struct(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- const char *message)
+static void
+batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ const char *message)
{
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global_entry->common.addr, message);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM: %s\n",
+ tt_global_entry->common.addr, message);
- hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
- tt_global_entry->common.addr);
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_hash_remove(bat_priv->tt_global_hash, batadv_compare_tt,
+ batadv_choose_orig, tt_global_entry->common.addr);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
}
/* If the client is to be deleted, we check if it is the last origantor entry
- * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
- * otherwise we simply remove the originator scheduled for deletion.
+ * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
+ * timer, otherwise we simply remove the originator scheduled for deletion.
*/
-static void tt_global_del_roaming(struct bat_priv *bat_priv,
- struct tt_global_entry *tt_global_entry,
- struct orig_node *orig_node,
- const char *message)
+static void
+batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
+ struct batadv_tt_global_entry *tt_global_entry,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
bool last_entry = true;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
/* no local entry exists, case 1:
* Check if this is the last one or if other entries exist.
@@ -805,37 +920,37 @@ static void tt_global_del_roaming(struct bat_priv *bat_priv,
if (last_entry) {
/* its the last one, mark for roaming. */
- tt_global_entry->common.flags |= TT_CLIENT_ROAM;
+ tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
tt_global_entry->roam_at = jiffies;
} else
/* there is another entry, we can simply delete this
* one and can still use the other one.
*/
- tt_global_del_orig_entry(bat_priv, tt_global_entry,
- orig_node, message);
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
}
-static void tt_global_del(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *addr,
- const char *message, bool roaming)
+static void batadv_tt_global_del(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr,
+ const char *message, bool roaming)
{
- struct tt_global_entry *tt_global_entry = NULL;
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_local_entry *local_entry = NULL;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
if (!roaming) {
- tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
- message);
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
+ orig_node, message);
if (hlist_empty(&tt_global_entry->orig_list))
- tt_global_del_struct(bat_priv, tt_global_entry,
- message);
+ batadv_tt_global_del_struct(bat_priv, tt_global_entry,
+ message);
goto out;
}
@@ -844,41 +959,42 @@ static void tt_global_del(struct bat_priv *bat_priv,
* event, there are two possibilities:
* 1) the client roamed from node A to node B => if there
* is only one originator left for this client, we mark
- * it with TT_CLIENT_ROAM, we start a timer and we
+ * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
* wait for node B to claim it. In case of timeout
* the entry is purged.
*
* If there are other originators left, we directly delete
* the originator.
* 2) the client roamed to us => we can directly delete
- * the global entry, since it is useless now. */
-
- tt_local_entry = tt_local_hash_find(bat_priv,
- tt_global_entry->common.addr);
- if (tt_local_entry) {
+ * the global entry, since it is useless now.
+ */
+ local_entry = batadv_tt_local_hash_find(bat_priv,
+ tt_global_entry->common.addr);
+ if (local_entry) {
/* local entry exists, case 2: client roamed to us. */
- tt_global_del_orig_list(tt_global_entry);
- tt_global_del_struct(bat_priv, tt_global_entry, message);
+ batadv_tt_global_del_orig_list(tt_global_entry);
+ batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
} else
/* no local entry exists, case 1: check for roaming */
- tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
- message);
+ batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
+ orig_node, message);
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
- if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+ if (local_entry)
+ batadv_tt_local_entry_free_ref(local_entry);
}
-void tt_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const char *message)
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const char *message)
{
- struct tt_global_entry *tt_global_entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
+ struct batadv_tt_common_entry *tt_common_entry;
uint32_t i;
- struct hashtable_t *hash = bat_priv->tt_global_hash;
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
struct hlist_node *node, *safe;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -893,20 +1009,19 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(tt_common_entry, node, safe,
head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
-
- tt_global_del_orig_entry(bat_priv, tt_global_entry,
- orig_node, message);
-
- if (hlist_empty(&tt_global_entry->orig_list)) {
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry %pM: %s\n",
- tt_global_entry->common.addr,
- message);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+
+ batadv_tt_global_del_orig_entry(bat_priv, tt_global,
+ orig_node, message);
+
+ if (hlist_empty(&tt_global->orig_list)) {
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry %pM: %s\n",
+ tt_global->common.addr, message);
hlist_del_rcu(node);
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global);
}
}
spin_unlock_bh(list_lock);
@@ -914,12 +1029,36 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
orig_node->tt_initialised = false;
}
-static void tt_global_roam_purge(struct bat_priv *bat_priv)
+static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
+ struct hlist_head *head)
{
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
struct hlist_node *node, *node_tmp;
+
+ hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
+ hash_entry) {
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
+ continue;
+ if (!batadv_has_timed_out(tt_global_entry->roam_at,
+ BATADV_TT_CLIENT_ROAM_TIMEOUT))
+ continue;
+
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting global tt entry (%pM): Roaming timeout\n",
+ tt_global_entry->common.addr);
+
+ hlist_del_rcu(node);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
+ }
+}
+
+static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
+{
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
uint32_t i;
@@ -929,35 +1068,18 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
- continue;
- if (!has_timed_out(tt_global_entry->roam_at,
- TT_CLIENT_ROAM_TIMEOUT))
- continue;
-
- bat_dbg(DBG_TT, bat_priv,
- "Deleting global tt entry (%pM): Roaming timeout\n",
- tt_global_entry->common.addr);
-
- hlist_del_rcu(node);
- tt_global_entry_free_ref(tt_global_entry);
- }
+ batadv_tt_global_roam_purge_list(bat_priv, head);
spin_unlock_bh(list_lock);
}
}
-static void tt_global_table_free(struct bat_priv *bat_priv)
+static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash;
+ struct batadv_hashtable *hash;
spinlock_t *list_lock; /* protects write access to the hash lists */
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_global_entry *tt_global;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
uint32_t i;
@@ -975,56 +1097,60 @@ static void tt_global_table_free(struct bat_priv *bat_priv)
hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
head, hash_entry) {
hlist_del_rcu(node);
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
- tt_global_entry_free_ref(tt_global_entry);
+ tt_global = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
+ common);
+ batadv_tt_global_entry_free_ref(tt_global);
}
spin_unlock_bh(list_lock);
}
- hash_destroy(hash);
+ batadv_hash_destroy(hash);
bat_priv->tt_global_hash = NULL;
}
-static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
- struct tt_global_entry *tt_global_entry)
+static bool
+_batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
+ struct batadv_tt_global_entry *tt_global_entry)
{
bool ret = false;
- if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
- tt_global_entry->common.flags & TT_CLIENT_WIFI)
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
+ tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
ret = true;
return ret;
}
-struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *src, const uint8_t *addr)
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const uint8_t *src,
+ const uint8_t *addr)
{
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
- struct orig_node *orig_node = NULL;
- struct neigh_node *router = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *router = NULL;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_orig_list_entry *orig_entry;
+ struct batadv_tt_orig_list_entry *orig_entry;
int best_tq;
if (src && atomic_read(&bat_priv->ap_isolation)) {
- tt_local_entry = tt_local_hash_find(bat_priv, src);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
if (!tt_local_entry)
goto out;
}
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
/* check whether the clients should not communicate due to AP
- * isolation */
- if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
+ * isolation
+ */
+ if (tt_local_entry &&
+ _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
best_tq = 0;
@@ -1032,7 +1158,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
rcu_read_lock();
head = &tt_global_entry->orig_list;
hlist_for_each_entry_rcu(orig_entry, node, head, list) {
- router = orig_node_get_router(orig_entry->orig_node);
+ router = batadv_orig_node_get_router(orig_entry->orig_node);
if (!router)
continue;
@@ -1040,7 +1166,7 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
orig_node = orig_entry->orig_node;
best_tq = router->tq_avg;
}
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
/* found anything? */
if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
@@ -1048,21 +1174,21 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
rcu_read_unlock();
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return orig_node;
}
/* Calculates the checksum of the local table of a given orig_node */
-static uint16_t tt_global_crc(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
uint16_t total = 0, total_one;
- struct hashtable_t *hash = bat_priv->tt_global_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_global_entry *tt_global_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_global_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_global_entry *tt_global;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
@@ -1072,30 +1198,29 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
- head, hash_entry) {
- tt_global_entry = container_of(tt_common_entry,
- struct tt_global_entry,
- common);
+ hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
+ tt_global = container_of(tt_common,
+ struct batadv_tt_global_entry,
+ common);
/* Roaming clients are in the global table for
* consistency only. They don't have to be
* taken into account while computing the
* global crc
*/
- if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
+ if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
continue;
/* find out if this global entry is announced by this
* originator
*/
- if (!tt_global_entry_has_orig(tt_global_entry,
- orig_node))
+ if (!batadv_tt_global_entry_has_orig(tt_global,
+ orig_node))
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_global_entry->common.addr[j]);
+ tt_common->addr[j]);
total ^= total_one;
}
rcu_read_unlock();
@@ -1105,11 +1230,11 @@ static uint16_t tt_global_crc(struct bat_priv *bat_priv,
}
/* Calculates the checksum of the local table */
-uint16_t tt_local_crc(struct bat_priv *bat_priv)
+static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
{
uint16_t total = 0, total_one;
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
@@ -1119,16 +1244,16 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
head = &hash->table[i];
rcu_read_lock();
- hlist_for_each_entry_rcu(tt_common_entry, node,
- head, hash_entry) {
+ hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
/* not yet committed clients have not to be taken into
- * account while computing the CRC */
- if (tt_common_entry->flags & TT_CLIENT_NEW)
+ * account while computing the CRC
+ */
+ if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
total_one = 0;
for (j = 0; j < ETH_ALEN; j++)
total_one = crc16_byte(total_one,
- tt_common_entry->addr[j]);
+ tt_common->addr[j]);
total ^= total_one;
}
rcu_read_unlock();
@@ -1137,9 +1262,9 @@ uint16_t tt_local_crc(struct bat_priv *bat_priv)
return total;
}
-static void tt_req_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
{
- struct tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node, *safe;
spin_lock_bh(&bat_priv->tt_req_list_lock);
@@ -1151,15 +1276,16 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_req_list_lock);
}
-static void tt_save_orig_buffer(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- const unsigned char *tt_buff,
- uint8_t tt_num_changes)
+static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff,
+ uint8_t tt_num_changes)
{
- uint16_t tt_buff_len = tt_len(tt_num_changes);
+ uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
/* Replace the old buffer only if I received something in the
- * last OGM (the OGM could carry no changes) */
+ * last OGM (the OGM could carry no changes)
+ */
spin_lock_bh(&orig_node->tt_buff_lock);
if (tt_buff_len > 0) {
kfree(orig_node->tt_buff);
@@ -1173,13 +1299,14 @@ static void tt_save_orig_buffer(struct bat_priv *bat_priv,
spin_unlock_bh(&orig_node->tt_buff_lock);
}
-static void tt_req_purge(struct bat_priv *bat_priv)
+static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
{
- struct tt_req_node *node, *safe;
+ struct batadv_tt_req_node *node, *safe;
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
- if (has_timed_out(node->issued_at, TT_REQUEST_TIMEOUT)) {
+ if (batadv_has_timed_out(node->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT)) {
list_del(&node->list);
kfree(node);
}
@@ -1188,17 +1315,19 @@ static void tt_req_purge(struct bat_priv *bat_priv)
}
/* returns the pointer to the new tt_req_node struct if no request
- * has already been issued for this orig_node, NULL otherwise */
-static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
- struct orig_node *orig_node)
+ * has already been issued for this orig_node, NULL otherwise
+ */
+static struct batadv_tt_req_node *
+batadv_new_tt_req_node(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node)
{
- struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
+ struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
- if (compare_eth(tt_req_node_tmp, orig_node) &&
- !has_timed_out(tt_req_node_tmp->issued_at,
- TT_REQUEST_TIMEOUT))
+ if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
+ !batadv_has_timed_out(tt_req_node_tmp->issued_at,
+ BATADV_TT_REQUEST_TIMEOUT))
goto unlock;
}
@@ -1216,63 +1345,67 @@ unlock:
}
/* data_ptr is useless here, but has to be kept to respect the prototype */
-static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
+static int batadv_tt_local_valid_entry(const void *entry_ptr,
+ const void *data_ptr)
{
- const struct tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
- if (tt_common_entry->flags & TT_CLIENT_NEW)
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
return 0;
return 1;
}
-static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
+static int batadv_tt_global_valid(const void *entry_ptr,
+ const void *data_ptr)
{
- const struct tt_common_entry *tt_common_entry = entry_ptr;
- const struct tt_global_entry *tt_global_entry;
- const struct orig_node *orig_node = data_ptr;
+ const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
+ const struct batadv_tt_global_entry *tt_global_entry;
+ const struct batadv_orig_node *orig_node = data_ptr;
- if (tt_common_entry->flags & TT_CLIENT_ROAM)
+ if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
return 0;
- tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
+ tt_global_entry = container_of(tt_common_entry,
+ struct batadv_tt_global_entry,
common);
- return tt_global_entry_has_orig(tt_global_entry, orig_node);
+ return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
}
-static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
- struct hashtable_t *hash,
- struct hard_iface *primary_if,
- int (*valid_cb)(const void *,
- const void *),
- void *cb_data)
+static struct sk_buff *
+batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
+ struct batadv_hashtable *hash,
+ struct batadv_hard_iface *primary_if,
+ int (*valid_cb)(const void *, const void *),
+ void *cb_data)
{
- struct tt_common_entry *tt_common_entry;
- struct tt_query_packet *tt_response;
- struct tt_change *tt_change;
+ struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_query_packet *tt_response;
+ struct batadv_tt_change *tt_change;
struct hlist_node *node;
struct hlist_head *head;
struct sk_buff *skb = NULL;
uint16_t tt_tot, tt_count;
- ssize_t tt_query_size = sizeof(struct tt_query_packet);
+ ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
uint32_t i;
+ size_t len;
if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
tt_len = primary_if->soft_iface->mtu - tt_query_size;
- tt_len -= tt_len % sizeof(struct tt_change);
+ tt_len -= tt_len % sizeof(struct batadv_tt_change);
}
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
+ len = tt_query_size + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- tt_query_size + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
tt_response->ttvn = ttvn;
- tt_change = (struct tt_change *)(skb->data + tt_query_size);
+ tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
tt_count = 0;
rcu_read_lock();
@@ -1289,7 +1422,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
memcpy(tt_change->addr, tt_common_entry->addr,
ETH_ALEN);
- tt_change->flags = NO_FLAGS;
+ tt_change->flags = BATADV_NO_FLAGS;
tt_count++;
tt_change++;
@@ -1298,72 +1431,78 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
rcu_read_unlock();
/* store in the message the number of entries we have successfully
- * copied */
+ * copied
+ */
tt_response->tt_data = htons(tt_count);
out:
return skb;
}
-static int send_tt_request(struct bat_priv *bat_priv,
- struct orig_node *dst_orig_node,
- uint8_t ttvn, uint16_t tt_crc, bool full_table)
+static int batadv_send_tt_request(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *dst_orig_node,
+ uint8_t ttvn, uint16_t tt_crc,
+ bool full_table)
{
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_request;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if;
- struct tt_req_node *tt_req_node = NULL;
+ struct batadv_tt_query_packet *tt_request;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_tt_req_node *tt_req_node = NULL;
int ret = 1;
+ size_t tt_req_len;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* The new tt_req will be issued only if I'm not waiting for a
- * reply from the same orig_node yet */
- tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
+ * reply from the same orig_node yet
+ */
+ tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
if (!tt_req_node)
goto out;
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
+ skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- tt_request = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet));
+ tt_req_len = sizeof(*tt_request);
+ tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
- tt_request->header.packet_type = BAT_TT_QUERY;
- tt_request->header.version = COMPAT_VERSION;
+ tt_request->header.packet_type = BATADV_TT_QUERY;
+ tt_request->header.version = BATADV_COMPAT_VERSION;
memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
- tt_request->header.ttl = TTL;
+ tt_request->header.ttl = BATADV_TTL;
tt_request->ttvn = ttvn;
tt_request->tt_data = htons(tt_crc);
- tt_request->flags = TT_REQUEST;
+ tt_request->flags = BATADV_TT_REQUEST;
if (full_table)
- tt_request->flags |= TT_FULL_TABLE;
+ tt_request->flags |= BATADV_TT_FULL_TABLE;
- neigh_node = orig_node_get_router(dst_orig_node);
+ neigh_node = batadv_orig_node_get_router(dst_orig_node);
if (!neigh_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_REQUEST to %pM via %pM [%c]\n",
- dst_orig_node->orig, neigh_node->addr,
- (full_table ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_REQUEST to %pM via %pM [%c]\n",
+ dst_orig_node->orig, neigh_node->addr,
+ (full_table ? 'F' : '.'));
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (ret)
kfree_skb(skb);
if (ret && tt_req_node) {
@@ -1375,39 +1514,42 @@ out:
return ret;
}
-static bool send_other_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+static bool
+batadv_send_other_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if = NULL;
+ struct batadv_orig_node *req_dst_orig_node = NULL;
+ struct batadv_orig_node *res_dst_orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
uint8_t orig_ttvn, req_ttvn, ttvn;
int ret = false;
unsigned char *tt_buff;
bool full_table;
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_response;
+ struct batadv_tt_query_packet *tt_response;
+ size_t len;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
- tt_request->src, tt_request->ttvn, tt_request->dst,
- (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
+ tt_request->src, tt_request->ttvn, tt_request->dst,
+ (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
/* Let's get the orig node of the REAL destination */
- req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
+ req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
if (!req_dst_orig_node)
goto out;
- res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
+ res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
if (!res_dst_orig_node)
goto out;
- neigh_node = orig_node_get_router(res_dst_orig_node);
+ neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
if (!neigh_node)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
@@ -1416,71 +1558,75 @@ static bool send_other_tt_response(struct bat_priv *bat_priv,
/* I don't have the requested data */
if (orig_ttvn != req_ttvn ||
- tt_request->tt_data != req_dst_orig_node->tt_crc)
+ tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
goto out;
/* If the full table has been explicitly requested */
- if (tt_request->flags & TT_FULL_TABLE ||
+ if (tt_request->flags & BATADV_TT_FULL_TABLE ||
!req_dst_orig_node->tt_buff)
full_table = true;
else
full_table = false;
/* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can */
+ * I'll send only one packet with as much TT entries as I can
+ */
if (!full_table) {
spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
tt_len = req_dst_orig_node->tt_buff_len;
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
- tt_len + ETH_HLEN);
+ len = sizeof(*tt_response) + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet) + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
+ len);
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
- tt_buff = skb->data + sizeof(struct tt_query_packet);
+ tt_buff = skb->data + sizeof(*tt_response);
/* Copy the last orig_node's OGM buffer */
memcpy(tt_buff, req_dst_orig_node->tt_buff,
req_dst_orig_node->tt_buff_len);
spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
} else {
- tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
- sizeof(struct tt_change);
+ tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
+ tt_len *= sizeof(struct batadv_tt_change);
ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
- skb = tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_global_hash,
- primary_if, tt_global_valid_entry,
- req_dst_orig_node);
+ skb = batadv_tt_response_fill_table(tt_len, ttvn,
+ bat_priv->tt_global_hash,
+ primary_if,
+ batadv_tt_global_valid,
+ req_dst_orig_node);
if (!skb)
goto out;
- tt_response = (struct tt_query_packet *)skb->data;
+ tt_response = (struct batadv_tt_query_packet *)skb->data;
}
- tt_response->header.packet_type = BAT_TT_QUERY;
- tt_response->header.version = COMPAT_VERSION;
- tt_response->header.ttl = TTL;
+ tt_response->header.packet_type = BATADV_TT_QUERY;
+ tt_response->header.version = BATADV_COMPAT_VERSION;
+ tt_response->header.ttl = BATADV_TTL;
memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = TT_RESPONSE;
+ tt_response->flags = BATADV_TT_RESPONSE;
if (full_table)
- tt_response->flags |= TT_FULL_TABLE;
+ tt_response->flags |= BATADV_TT_FULL_TABLE;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
- res_dst_orig_node->orig, neigh_node->addr,
- req_dst_orig_node->orig, req_ttvn);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
+ res_dst_orig_node->orig, neigh_node->addr,
+ req_dst_orig_node->orig, req_ttvn);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@@ -1489,114 +1635,122 @@ unlock:
out:
if (res_dst_orig_node)
- orig_node_free_ref(res_dst_orig_node);
+ batadv_orig_node_free_ref(res_dst_orig_node);
if (req_dst_orig_node)
- orig_node_free_ref(req_dst_orig_node);
+ batadv_orig_node_free_ref(req_dst_orig_node);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (!ret)
kfree_skb(skb);
return ret;
}
-static bool send_my_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+
+static bool
+batadv_send_my_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- struct orig_node *orig_node = NULL;
- struct neigh_node *neigh_node = NULL;
- struct hard_iface *primary_if = NULL;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
+ struct batadv_hard_iface *primary_if = NULL;
uint8_t my_ttvn, req_ttvn, ttvn;
int ret = false;
unsigned char *tt_buff;
bool full_table;
uint16_t tt_len, tt_tot;
struct sk_buff *skb = NULL;
- struct tt_query_packet *tt_response;
+ struct batadv_tt_query_packet *tt_response;
+ size_t len;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
- tt_request->src, tt_request->ttvn,
- (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
+ tt_request->src, tt_request->ttvn,
+ (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
req_ttvn = tt_request->ttvn;
- orig_node = orig_hash_find(bat_priv, tt_request->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
if (!orig_node)
goto out;
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto out;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
/* If the full table has been explicitly requested or the gap
- * is too big send the whole local translation table */
- if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
+ * is too big send the whole local translation table
+ */
+ if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
!bat_priv->tt_buff)
full_table = true;
else
full_table = false;
/* In this version, fragmentation is not implemented, then
- * I'll send only one packet with as much TT entries as I can */
+ * I'll send only one packet with as much TT entries as I can
+ */
if (!full_table) {
spin_lock_bh(&bat_priv->tt_buff_lock);
tt_len = bat_priv->tt_buff_len;
- tt_tot = tt_len / sizeof(struct tt_change);
+ tt_tot = tt_len / sizeof(struct batadv_tt_change);
- skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
- tt_len + ETH_HLEN);
+ len = sizeof(*tt_response) + tt_len;
+ skb = dev_alloc_skb(len + ETH_HLEN);
if (!skb)
goto unlock;
skb_reserve(skb, ETH_HLEN);
- tt_response = (struct tt_query_packet *)skb_put(skb,
- sizeof(struct tt_query_packet) + tt_len);
+ tt_response = (struct batadv_tt_query_packet *)skb_put(skb,
+ len);
tt_response->ttvn = req_ttvn;
tt_response->tt_data = htons(tt_tot);
- tt_buff = skb->data + sizeof(struct tt_query_packet);
+ tt_buff = skb->data + sizeof(*tt_response);
memcpy(tt_buff, bat_priv->tt_buff,
bat_priv->tt_buff_len);
spin_unlock_bh(&bat_priv->tt_buff_lock);
} else {
- tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
- sizeof(struct tt_change);
+ tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt);
+ tt_len *= sizeof(struct batadv_tt_change);
ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
- skb = tt_response_fill_table(tt_len, ttvn,
- bat_priv->tt_local_hash,
- primary_if, tt_local_valid_entry,
- NULL);
+ skb = batadv_tt_response_fill_table(tt_len, ttvn,
+ bat_priv->tt_local_hash,
+ primary_if,
+ batadv_tt_local_valid_entry,
+ NULL);
if (!skb)
goto out;
- tt_response = (struct tt_query_packet *)skb->data;
+ tt_response = (struct batadv_tt_query_packet *)skb->data;
}
- tt_response->header.packet_type = BAT_TT_QUERY;
- tt_response->header.version = COMPAT_VERSION;
- tt_response->header.ttl = TTL;
+ tt_response->header.packet_type = BATADV_TT_QUERY;
+ tt_response->header.version = BATADV_COMPAT_VERSION;
+ tt_response->header.ttl = BATADV_TTL;
memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
- tt_response->flags = TT_RESPONSE;
+ tt_response->flags = BATADV_TT_RESPONSE;
if (full_table)
- tt_response->flags |= TT_FULL_TABLE;
+ tt_response->flags |= BATADV_TT_FULL_TABLE;
- bat_dbg(DBG_TT, bat_priv,
- "Sending TT_RESPONSE to %pM via %pM [%c]\n",
- orig_node->orig, neigh_node->addr,
- (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending TT_RESPONSE to %pM via %pM [%c]\n",
+ orig_node->orig, neigh_node->addr,
+ (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
+
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = true;
goto out;
@@ -1604,49 +1758,50 @@ unlock:
spin_unlock_bh(&bat_priv->tt_buff_lock);
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
if (!ret)
kfree_skb(skb);
/* This packet was for me, so it doesn't need to be re-routed */
return true;
}
-bool send_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request)
+bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request)
{
- if (is_my_mac(tt_request->dst)) {
+ if (batadv_is_my_mac(tt_request->dst)) {
/* don't answer backbone gws! */
- if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
return true;
- return send_my_tt_response(bat_priv, tt_request);
+ return batadv_send_my_tt_response(bat_priv, tt_request);
} else {
- return send_other_tt_response(bat_priv, tt_request);
+ return batadv_send_other_tt_response(bat_priv, tt_request);
}
}
-static void _tt_update_changes(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- struct tt_change *tt_change,
- uint16_t tt_num_changes, uint8_t ttvn)
+static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ struct batadv_tt_change *tt_change,
+ uint16_t tt_num_changes, uint8_t ttvn)
{
int i;
+ int roams;
for (i = 0; i < tt_num_changes; i++) {
- if ((tt_change + i)->flags & TT_CLIENT_DEL)
- tt_global_del(bat_priv, orig_node,
- (tt_change + i)->addr,
- "tt removed by changes",
- (tt_change + i)->flags & TT_CLIENT_ROAM);
- else
- if (!tt_global_add(bat_priv, orig_node,
- (tt_change + i)->addr, ttvn, false,
- (tt_change + i)->flags &
- TT_CLIENT_WIFI))
+ if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
+ roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_del(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ "tt removed by changes",
+ roams);
+ } else {
+ if (!batadv_tt_global_add(bat_priv, orig_node,
+ (tt_change + i)->addr,
+ (tt_change + i)->flags, ttvn))
/* In case of problem while storing a
* global_entry, we stop the updating
* procedure without committing the
@@ -1654,25 +1809,27 @@ static void _tt_update_changes(struct bat_priv *bat_priv,
* corrupted data on tt_request
*/
return;
+ }
}
orig_node->tt_initialised = true;
}
-static void tt_fill_gtable(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response)
+static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response)
{
- struct orig_node *orig_node = NULL;
+ struct batadv_orig_node *orig_node = NULL;
- orig_node = orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
if (!orig_node)
goto out;
/* Purge the old table first.. */
- tt_global_del_orig(bat_priv, orig_node, "Received full table");
+ batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
- _tt_update_changes(bat_priv, orig_node,
- (struct tt_change *)(tt_response + 1),
- tt_response->tt_data, tt_response->ttvn);
+ _batadv_tt_update_changes(bat_priv, orig_node,
+ (struct batadv_tt_change *)(tt_response + 1),
+ ntohs(tt_response->tt_data),
+ tt_response->ttvn);
spin_lock_bh(&orig_node->tt_buff_lock);
kfree(orig_node->tt_buff);
@@ -1684,71 +1841,76 @@ static void tt_fill_gtable(struct bat_priv *bat_priv,
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-static void tt_update_changes(struct bat_priv *bat_priv,
- struct orig_node *orig_node,
- uint16_t tt_num_changes, uint8_t ttvn,
- struct tt_change *tt_change)
+static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ uint16_t tt_num_changes, uint8_t ttvn,
+ struct batadv_tt_change *tt_change)
{
- _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
- ttvn);
+ _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
+ tt_num_changes, ttvn);
- tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
- tt_num_changes);
+ batadv_tt_save_orig_buffer(bat_priv, orig_node,
+ (unsigned char *)tt_change, tt_num_changes);
atomic_set(&orig_node->last_ttvn, ttvn);
}
-bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
{
- struct tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
bool ret = false;
- tt_local_entry = tt_local_hash_find(bat_priv, addr);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
if (!tt_local_entry)
goto out;
/* Check if the client has been logically deleted (but is kept for
- * consistency purpose) */
- if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
+ * consistency purpose)
+ */
+ if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
goto out;
ret = true;
out:
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return ret;
}
-void handle_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response)
+void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response)
{
- struct tt_req_node *node, *safe;
- struct orig_node *orig_node = NULL;
+ struct batadv_tt_req_node *node, *safe;
+ struct batadv_orig_node *orig_node = NULL;
+ struct batadv_tt_change *tt_change;
- bat_dbg(DBG_TT, bat_priv,
- "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
- tt_response->src, tt_response->ttvn, tt_response->tt_data,
- (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
+ tt_response->src, tt_response->ttvn,
+ ntohs(tt_response->tt_data),
+ (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
/* we should have never asked a backbone gw */
- if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
goto out;
- orig_node = orig_hash_find(bat_priv, tt_response->src);
+ orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
if (!orig_node)
goto out;
- if (tt_response->flags & TT_FULL_TABLE)
- tt_fill_gtable(bat_priv, tt_response);
- else
- tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
- tt_response->ttvn,
- (struct tt_change *)(tt_response + 1));
+ if (tt_response->flags & BATADV_TT_FULL_TABLE) {
+ batadv_tt_fill_gtable(bat_priv, tt_response);
+ } else {
+ tt_change = (struct batadv_tt_change *)(tt_response + 1);
+ batadv_tt_update_changes(bat_priv, orig_node,
+ ntohs(tt_response->tt_data),
+ tt_response->ttvn, tt_change);
+ }
/* Delete the tt_req_node from pending tt_requests list */
spin_lock_bh(&bat_priv->tt_req_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
- if (!compare_eth(node->addr, tt_response->src))
+ if (!batadv_compare_eth(node->addr, tt_response->src))
continue;
list_del(&node->list);
kfree(node);
@@ -1756,31 +1918,36 @@ void handle_tt_response(struct bat_priv *bat_priv,
spin_unlock_bh(&bat_priv->tt_req_list_lock);
/* Recalculate the CRC for this orig_node and store it */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+ orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
/* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
+ * unset the flag
+ */
orig_node->tt_poss_change = false;
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-int tt_init(struct bat_priv *bat_priv)
+int batadv_tt_init(struct batadv_priv *bat_priv)
{
- if (!tt_local_init(bat_priv))
- return 0;
+ int ret;
- if (!tt_global_init(bat_priv))
- return 0;
+ ret = batadv_tt_local_init(bat_priv);
+ if (ret < 0)
+ return ret;
- tt_start_timer(bat_priv);
+ ret = batadv_tt_global_init(bat_priv);
+ if (ret < 0)
+ return ret;
+
+ batadv_tt_start_timer(bat_priv);
return 1;
}
-static void tt_roam_list_free(struct bat_priv *bat_priv)
+static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
{
- struct tt_roam_node *node, *safe;
+ struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
@@ -1792,13 +1959,14 @@ static void tt_roam_list_free(struct bat_priv *bat_priv)
spin_unlock_bh(&bat_priv->tt_roam_list_lock);
}
-static void tt_roam_purge(struct bat_priv *bat_priv)
+static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
{
- struct tt_roam_node *node, *safe;
+ struct batadv_tt_roam_node *node, *safe;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
- if (!has_timed_out(node->first_time, ROAMING_MAX_TIME))
+ if (!batadv_has_timed_out(node->first_time,
+ BATADV_ROAMING_MAX_TIME))
continue;
list_del(&node->list);
@@ -1811,24 +1979,27 @@ static void tt_roam_purge(struct bat_priv *bat_priv)
* maximum number of possible roaming phases. In this case the ROAMING_ADV
* will not be sent.
*
- * returns true if the ROAMING_ADV can be sent, false otherwise */
-static bool tt_check_roam_count(struct bat_priv *bat_priv,
- uint8_t *client)
+ * returns true if the ROAMING_ADV can be sent, false otherwise
+ */
+static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
+ uint8_t *client)
{
- struct tt_roam_node *tt_roam_node;
+ struct batadv_tt_roam_node *tt_roam_node;
bool ret = false;
spin_lock_bh(&bat_priv->tt_roam_list_lock);
/* The new tt_req will be issued only if I'm not waiting for a
- * reply from the same orig_node yet */
+ * reply from the same orig_node yet
+ */
list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
- if (!compare_eth(tt_roam_node->addr, client))
+ if (!batadv_compare_eth(tt_roam_node->addr, client))
continue;
- if (has_timed_out(tt_roam_node->first_time, ROAMING_MAX_TIME))
+ if (batadv_has_timed_out(tt_roam_node->first_time,
+ BATADV_ROAMING_MAX_TIME))
continue;
- if (!atomic_dec_not_zero(&tt_roam_node->counter))
+ if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
/* Sorry, you roamed too many times! */
goto unlock;
ret = true;
@@ -1841,7 +2012,8 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv,
goto unlock;
tt_roam_node->first_time = jiffies;
- atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
+ atomic_set(&tt_roam_node->counter,
+ BATADV_ROAMING_MAX_COUNT - 1);
memcpy(tt_roam_node->addr, client, ETH_ALEN);
list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
@@ -1853,97 +2025,103 @@ unlock:
return ret;
}
-static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
- struct orig_node *orig_node)
+static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
+ struct batadv_orig_node *orig_node)
{
- struct neigh_node *neigh_node = NULL;
+ struct batadv_neigh_node *neigh_node = NULL;
struct sk_buff *skb = NULL;
- struct roam_adv_packet *roam_adv_packet;
+ struct batadv_roam_adv_packet *roam_adv_packet;
int ret = 1;
- struct hard_iface *primary_if;
+ struct batadv_hard_iface *primary_if;
+ size_t len = sizeof(*roam_adv_packet);
/* before going on we have to check whether the client has
- * already roamed to us too many times */
- if (!tt_check_roam_count(bat_priv, client))
+ * already roamed to us too many times
+ */
+ if (!batadv_tt_check_roam_count(bat_priv, client))
goto out;
- skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
+ skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
if (!skb)
goto out;
skb_reserve(skb, ETH_HLEN);
- roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
- sizeof(struct roam_adv_packet));
+ roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
- roam_adv_packet->header.packet_type = BAT_ROAM_ADV;
- roam_adv_packet->header.version = COMPAT_VERSION;
- roam_adv_packet->header.ttl = TTL;
- primary_if = primary_if_get_selected(bat_priv);
+ roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
+ roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
+ roam_adv_packet->header.ttl = BATADV_TTL;
+ roam_adv_packet->reserved = 0;
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
memcpy(roam_adv_packet->client, client, ETH_ALEN);
- neigh_node = orig_node_get_router(orig_node);
+ neigh_node = batadv_orig_node_get_router(orig_node);
if (!neigh_node)
goto out;
- bat_dbg(DBG_TT, bat_priv,
- "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
- orig_node->orig, client, neigh_node->addr);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
+ orig_node->orig, client, neigh_node->addr);
+
+ batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (ret)
kfree_skb(skb);
return;
}
-static void tt_purge(struct work_struct *work)
+static void batadv_tt_purge(struct work_struct *work)
{
- struct delayed_work *delayed_work =
- container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, tt_work);
+ struct delayed_work *delayed_work;
+ struct batadv_priv *bat_priv;
- tt_local_purge(bat_priv);
- tt_global_roam_purge(bat_priv);
- tt_req_purge(bat_priv);
- tt_roam_purge(bat_priv);
+ delayed_work = container_of(work, struct delayed_work, work);
+ bat_priv = container_of(delayed_work, struct batadv_priv, tt_work);
- tt_start_timer(bat_priv);
+ batadv_tt_local_purge(bat_priv);
+ batadv_tt_global_roam_purge(bat_priv);
+ batadv_tt_req_purge(bat_priv);
+ batadv_tt_roam_purge(bat_priv);
+
+ batadv_tt_start_timer(bat_priv);
}
-void tt_free(struct bat_priv *bat_priv)
+void batadv_tt_free(struct batadv_priv *bat_priv)
{
cancel_delayed_work_sync(&bat_priv->tt_work);
- tt_local_table_free(bat_priv);
- tt_global_table_free(bat_priv);
- tt_req_list_free(bat_priv);
- tt_changes_list_free(bat_priv);
- tt_roam_list_free(bat_priv);
+ batadv_tt_local_table_free(bat_priv);
+ batadv_tt_global_table_free(bat_priv);
+ batadv_tt_req_list_free(bat_priv);
+ batadv_tt_changes_list_free(bat_priv);
+ batadv_tt_roam_list_free(bat_priv);
kfree(bat_priv->tt_buff);
}
/* This function will enable or disable the specified flags for all the entries
- * in the given hash table and returns the number of modified entries */
-static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
- bool enable)
+ * in the given hash table and returns the number of modified entries
+ */
+static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
+ uint16_t flags, bool enable)
{
uint32_t i;
uint16_t changed_num = 0;
struct hlist_head *head;
struct hlist_node *node;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_tt_common_entry *tt_common_entry;
if (!hash)
goto out;
@@ -1971,12 +2149,12 @@ out:
return changed_num;
}
-/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
-static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
+/* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
+static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->tt_local_hash;
- struct tt_common_entry *tt_common_entry;
- struct tt_local_entry *tt_local_entry;
+ struct batadv_hashtable *hash = bat_priv->tt_local_hash;
+ struct batadv_tt_common_entry *tt_common;
+ struct batadv_tt_local_entry *tt_local;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -1990,103 +2168,149 @@ static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
list_lock = &hash->list_locks[i];
spin_lock_bh(list_lock);
- hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
- head, hash_entry) {
- if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
+ hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
+ hash_entry) {
+ if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
continue;
- bat_dbg(DBG_TT, bat_priv,
- "Deleting local tt entry (%pM): pending\n",
- tt_common_entry->addr);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Deleting local tt entry (%pM): pending\n",
+ tt_common->addr);
atomic_dec(&bat_priv->num_local_tt);
hlist_del_rcu(node);
- tt_local_entry = container_of(tt_common_entry,
- struct tt_local_entry,
- common);
- tt_local_entry_free_ref(tt_local_entry);
+ tt_local = container_of(tt_common,
+ struct batadv_tt_local_entry,
+ common);
+ batadv_tt_local_entry_free_ref(tt_local);
}
spin_unlock_bh(list_lock);
}
}
-void tt_commit_changes(struct bat_priv *bat_priv)
+static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff,
+ int *packet_buff_len, int packet_min_len)
{
- uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
- TT_CLIENT_NEW, false);
- /* all the reset entries have now to be effectively counted as local
- * entries */
+ uint16_t changed_num = 0;
+
+ if (atomic_read(&bat_priv->tt_local_changes) < 1)
+ return -ENOENT;
+
+ changed_num = batadv_tt_set_flags(bat_priv->tt_local_hash,
+ BATADV_TT_CLIENT_NEW, false);
+
+ /* all reset entries have to be counted as local entries */
atomic_add(changed_num, &bat_priv->num_local_tt);
- tt_local_purge_pending_clients(bat_priv);
+ batadv_tt_local_purge_pending_clients(bat_priv);
+ bat_priv->tt_crc = batadv_tt_local_crc(bat_priv);
/* Increment the TTVN only once per OGM interval */
atomic_inc(&bat_priv->ttvn);
- bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
- (uint8_t)atomic_read(&bat_priv->ttvn));
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "Local changes committed, updating to ttvn %u\n",
+ (uint8_t)atomic_read(&bat_priv->ttvn));
bat_priv->tt_poss_change = false;
+
+ /* reset the sending counter */
+ atomic_set(&bat_priv->tt_ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
+
+ return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
+ packet_buff_len, packet_min_len);
+}
+
+/* when calling this function (hard_iface == primary_if) has to be true */
+int batadv_tt_append_diff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff, int *packet_buff_len,
+ int packet_min_len)
+{
+ int tt_num_changes;
+
+ /* if at least one change happened */
+ tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
+ packet_buff_len,
+ packet_min_len);
+
+ /* if the changes have been sent often enough */
+ if ((tt_num_changes < 0) &&
+ (!batadv_atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))) {
+ batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
+ packet_min_len, packet_min_len);
+ tt_num_changes = 0;
+ }
+
+ return tt_num_changes;
}
-bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst)
{
- struct tt_local_entry *tt_local_entry = NULL;
- struct tt_global_entry *tt_global_entry = NULL;
+ struct batadv_tt_local_entry *tt_local_entry = NULL;
+ struct batadv_tt_global_entry *tt_global_entry = NULL;
bool ret = false;
if (!atomic_read(&bat_priv->ap_isolation))
goto out;
- tt_local_entry = tt_local_hash_find(bat_priv, dst);
+ tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
if (!tt_local_entry)
goto out;
- tt_global_entry = tt_global_hash_find(bat_priv, src);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
if (!tt_global_entry)
goto out;
- if (!_is_ap_isolated(tt_local_entry, tt_global_entry))
+ if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
goto out;
ret = true;
out:
if (tt_global_entry)
- tt_global_entry_free_ref(tt_global_entry);
+ batadv_tt_global_entry_free_ref(tt_global_entry);
if (tt_local_entry)
- tt_local_entry_free_ref(tt_local_entry);
+ batadv_tt_local_entry_free_ref(tt_local_entry);
return ret;
}
-void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc)
+void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc)
{
uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
bool full_table = true;
+ struct batadv_tt_change *tt_change;
/* don't care about a backbone gateways updates. */
- if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
+ if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
return;
/* orig table not initialised AND first diff is in the OGM OR the ttvn
- * increased by one -> we can apply the attached changes */
+ * increased by one -> we can apply the attached changes
+ */
if ((!orig_node->tt_initialised && ttvn == 1) ||
ttvn - orig_ttvn == 1) {
/* the OGM could not contain the changes due to their size or
- * because they have already been sent TT_OGM_APPEND_MAX times.
- * In this case send a tt request */
+ * because they have already been sent BATADV_TT_OGM_APPEND_MAX
+ * times.
+ * In this case send a tt request
+ */
if (!tt_num_changes) {
full_table = false;
goto request_table;
}
- tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
- (struct tt_change *)tt_buff);
+ tt_change = (struct batadv_tt_change *)tt_buff;
+ batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
+ ttvn, tt_change);
/* Even if we received the precomputed crc with the OGM, we
* prefer to recompute it to spot any possible inconsistency
- * in the global table */
- orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
+ * in the global table
+ */
+ orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
/* The ttvn alone is not enough to guarantee consistency
* because a single value could represent different states
@@ -2095,26 +2319,28 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
* consistent or not. E.g. a node could disconnect while its
* ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
* checking the CRC value is mandatory to detect the
- * inconsistency */
+ * inconsistency
+ */
if (orig_node->tt_crc != tt_crc)
goto request_table;
/* Roaming phase is over: tables are in sync again. I can
- * unset the flag */
+ * unset the flag
+ */
orig_node->tt_poss_change = false;
} else {
/* if we missed more than one change or our tables are not
- * in sync anymore -> request fresh tt data */
-
+ * in sync anymore -> request fresh tt data
+ */
if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
orig_node->tt_crc != tt_crc) {
request_table:
- bat_dbg(DBG_TT, bat_priv,
- "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
- orig_node->orig, ttvn, orig_ttvn, tt_crc,
- orig_node->tt_crc, tt_num_changes);
- send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
- full_table);
+ batadv_dbg(BATADV_DBG_TT, bat_priv,
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
+ orig_node->orig, ttvn, orig_ttvn, tt_crc,
+ orig_node->tt_crc, tt_num_changes);
+ batadv_send_tt_request(bat_priv, orig_node, ttvn,
+ tt_crc, full_table);
return;
}
}
@@ -2124,17 +2350,18 @@ request_table:
* originator to another one. This entry is kept is still kept for consistency
* purposes
*/
-bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr)
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ uint8_t *addr)
{
- struct tt_global_entry *tt_global_entry;
+ struct batadv_tt_global_entry *tt_global_entry;
bool ret = false;
- tt_global_entry = tt_global_hash_find(bat_priv, addr);
+ tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
goto out;
- ret = tt_global_entry->common.flags & TT_CLIENT_ROAM;
- tt_global_entry_free_ref(tt_global_entry);
+ ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
+ batadv_tt_global_entry_free_ref(tt_global_entry);
out:
return ret;
}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c43374dc364d..ffa87355096b 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -16,44 +15,50 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
-int tt_len(int changes_num);
-int tt_changes_fill_buffer(struct bat_priv *bat_priv,
- unsigned char *buff, int buff_len);
-int tt_init(struct bat_priv *bat_priv);
-void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
- int ifindex);
-void tt_local_remove(struct bat_priv *bat_priv,
- const uint8_t *addr, const char *message, bool roaming);
-int tt_local_seq_print_text(struct seq_file *seq, void *offset);
-void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, int tt_buff_len);
-int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *addr, uint8_t ttvn, bool roaming,
- bool wifi);
-int tt_global_seq_print_text(struct seq_file *seq, void *offset);
-void tt_global_del_orig(struct bat_priv *bat_priv,
- struct orig_node *orig_node, const char *message);
-struct orig_node *transtable_search(struct bat_priv *bat_priv,
- const uint8_t *src, const uint8_t *addr);
-uint16_t tt_local_crc(struct bat_priv *bat_priv);
-void tt_free(struct bat_priv *bat_priv);
-bool send_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_request);
-bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
-void handle_tt_response(struct bat_priv *bat_priv,
- struct tt_query_packet *tt_response);
-void tt_commit_changes(struct bat_priv *bat_priv);
-bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
-void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
- const unsigned char *tt_buff, uint8_t tt_num_changes,
- uint8_t ttvn, uint16_t tt_crc);
-bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr);
+int batadv_tt_len(int changes_num);
+int batadv_tt_init(struct batadv_priv *bat_priv);
+void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
+ int ifindex);
+void batadv_tt_local_remove(struct batadv_priv *bat_priv,
+ const uint8_t *addr, const char *message,
+ bool roaming);
+int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_tt_global_add_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, int tt_buff_len);
+int batadv_tt_global_add(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *addr, uint8_t flags,
+ uint8_t ttvn);
+int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const char *message);
+struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
+ const uint8_t *src,
+ const uint8_t *addr);
+void batadv_tt_free(struct batadv_priv *bat_priv);
+bool batadv_send_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_request);
+bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr);
+void batadv_handle_tt_response(struct batadv_priv *bat_priv,
+ struct batadv_tt_query_packet *tt_response);
+bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
+ uint8_t *dst);
+void batadv_tt_update_orig(struct batadv_priv *bat_priv,
+ struct batadv_orig_node *orig_node,
+ const unsigned char *tt_buff, uint8_t tt_num_changes,
+ uint8_t ttvn, uint16_t tt_crc);
+int batadv_tt_append_diff(struct batadv_priv *bat_priv,
+ unsigned char **packet_buff, int *packet_buff_len,
+ int packet_min_len);
+bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
+ uint8_t *addr);
#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 61308e8016ff..12635fd2c3d3 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -16,24 +15,20 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
-
-
#ifndef _NET_BATMAN_ADV_TYPES_H_
#define _NET_BATMAN_ADV_TYPES_H_
#include "packet.h"
#include "bitarray.h"
+#include <linux/kernel.h>
-#define BAT_HEADER_LEN (ETH_HLEN + \
- ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
- sizeof(struct unicast_packet) : \
- sizeof(struct bcast_packet))))
-
+#define BATADV_HEADER_LEN \
+ (ETH_HLEN + max(sizeof(struct batadv_unicast_packet), \
+ sizeof(struct batadv_bcast_packet)))
-struct hard_iface {
+struct batadv_hard_iface {
struct list_head list;
int16_t if_num;
char if_status;
@@ -50,7 +45,7 @@ struct hard_iface {
};
/**
- * orig_node - structure for orig_list maintaining nodes of mesh
+ * struct batadv_orig_node - structure for orig_list maintaining nodes of mesh
* @primary_addr: hosts primary interface address
* @last_seen: when last packet from this node was received
* @bcast_seqno_reset: time when the broadcast seqno window was reset
@@ -64,10 +59,10 @@ struct hard_iface {
* @candidates: how many candidates are available
* @selected: next bonding candidate
*/
-struct orig_node {
+struct batadv_orig_node {
uint8_t orig[ETH_ALEN];
uint8_t primary_addr[ETH_ALEN];
- struct neigh_node __rcu *router; /* rcu protected pointer */
+ struct batadv_neigh_node __rcu *router; /* rcu protected pointer */
unsigned long *bcast_own;
uint8_t *bcast_own_sum;
unsigned long last_seen;
@@ -86,11 +81,12 @@ struct orig_node {
* If true, then I sent a Roaming_adv to this orig_node and I have to
* inspect every packet directed to it to check whether it is still
* the true destination or not. This flag will be reset to false as
- * soon as I receive a new TTVN from this orig_node */
+ * soon as I receive a new TTVN from this orig_node
+ */
bool tt_poss_change;
uint32_t last_real_seqno;
uint8_t last_ttl;
- DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
+ DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
uint32_t last_bcast_seqno;
struct hlist_head neigh_list;
struct list_head frag_list;
@@ -98,10 +94,11 @@ struct orig_node {
atomic_t refcount;
struct rcu_head rcu;
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
unsigned long last_frag_packet;
/* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
- * neigh_node->real_bits, neigh_node->real_packet_count */
+ * neigh_node->real_bits, neigh_node->real_packet_count
+ */
spinlock_t ogm_cnt_lock;
/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
spinlock_t bcast_seqno_lock;
@@ -110,47 +107,63 @@ struct orig_node {
struct list_head bond_list;
};
-struct gw_node {
+struct batadv_gw_node {
struct hlist_node list;
- struct orig_node *orig_node;
+ struct batadv_orig_node *orig_node;
unsigned long deleted;
atomic_t refcount;
struct rcu_head rcu;
};
-/**
- * neigh_node
+/* batadv_neigh_node
* @last_seen: when last packet via this neighbor was received
*/
-struct neigh_node {
+struct batadv_neigh_node {
struct hlist_node list;
uint8_t addr[ETH_ALEN];
uint8_t real_packet_count;
- uint8_t tq_recv[TQ_GLOBAL_WINDOW_SIZE];
+ uint8_t tq_recv[BATADV_TQ_GLOBAL_WINDOW_SIZE];
uint8_t tq_index;
uint8_t tq_avg;
uint8_t last_ttl;
struct list_head bonding_list;
unsigned long last_seen;
- DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
+ DECLARE_BITMAP(real_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
atomic_t refcount;
struct rcu_head rcu;
- struct orig_node *orig_node;
- struct hard_iface *if_incoming;
+ struct batadv_orig_node *orig_node;
+ struct batadv_hard_iface *if_incoming;
spinlock_t lq_update_lock; /* protects: tq_recv, tq_index */
};
#ifdef CONFIG_BATMAN_ADV_BLA
-struct bcast_duplist_entry {
+struct batadv_bcast_duplist_entry {
uint8_t orig[ETH_ALEN];
uint16_t crc;
unsigned long entrytime;
};
#endif
-struct bat_priv {
+enum batadv_counters {
+ BATADV_CNT_FORWARD,
+ BATADV_CNT_FORWARD_BYTES,
+ BATADV_CNT_MGMT_TX,
+ BATADV_CNT_MGMT_TX_BYTES,
+ BATADV_CNT_MGMT_RX,
+ BATADV_CNT_MGMT_RX_BYTES,
+ BATADV_CNT_TT_REQUEST_TX,
+ BATADV_CNT_TT_REQUEST_RX,
+ BATADV_CNT_TT_RESPONSE_TX,
+ BATADV_CNT_TT_RESPONSE_RX,
+ BATADV_CNT_TT_ROAM_ADV_TX,
+ BATADV_CNT_TT_ROAM_ADV_RX,
+ BATADV_CNT_NUM,
+};
+
+struct batadv_priv {
atomic_t mesh_state;
struct net_device_stats stats;
+ uint64_t __percpu *bat_counters; /* Per cpu counters */
atomic_t aggregated_ogms; /* boolean */
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
@@ -174,10 +187,11 @@ struct bat_priv {
* If true, then I received a Roaming_adv and I have to inspect every
* packet directed to me to check whether I am still the true
* destination or not. This flag will be reset to false as soon as I
- * increase my TTVN */
+ * increase my TTVN
+ */
bool tt_poss_change;
char num_ifaces;
- struct debug_log *debug_log;
+ struct batadv_debug_log *debug_log;
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
@@ -185,20 +199,20 @@ struct bat_priv {
struct hlist_head gw_list;
struct list_head tt_changes_list; /* tracks changes in a OGM int */
struct list_head vis_send_list;
- struct hashtable_t *orig_hash;
- struct hashtable_t *tt_local_hash;
- struct hashtable_t *tt_global_hash;
+ struct batadv_hashtable *orig_hash;
+ struct batadv_hashtable *tt_local_hash;
+ struct batadv_hashtable *tt_global_hash;
#ifdef CONFIG_BATMAN_ADV_BLA
- struct hashtable_t *claim_hash;
- struct hashtable_t *backbone_hash;
+ struct batadv_hashtable *claim_hash;
+ struct batadv_hashtable *backbone_hash;
#endif
struct list_head tt_req_list; /* list of pending tt_requests */
struct list_head tt_roam_list;
- struct hashtable_t *vis_hash;
+ struct batadv_hashtable *vis_hash;
#ifdef CONFIG_BATMAN_ADV_BLA
- struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
+ struct batadv_bcast_duplist_entry bcast_duplist[BATADV_DUPLIST_SIZE];
int bcast_duplist_curr;
- struct bla_claim_dst claim_dest;
+ struct batadv_bla_claim_dst claim_dest;
#endif
spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
spinlock_t forw_bcast_list_lock; /* protects */
@@ -210,7 +224,7 @@ struct bat_priv {
spinlock_t vis_list_lock; /* protects vis_info::recv_list */
atomic_t num_local_tt;
/* Checksum of the local table, recomputed before sending a new OGM */
- atomic_t tt_crc;
+ uint16_t tt_crc;
unsigned char *tt_buff;
int16_t tt_buff_len;
spinlock_t tt_buff_lock; /* protects tt_buff */
@@ -218,29 +232,29 @@ struct bat_priv {
struct delayed_work orig_work;
struct delayed_work vis_work;
struct delayed_work bla_work;
- struct gw_node __rcu *curr_gw; /* rcu protected pointer */
+ struct batadv_gw_node __rcu *curr_gw; /* rcu protected pointer */
atomic_t gw_reselect;
- struct hard_iface __rcu *primary_if; /* rcu protected pointer */
- struct vis_info *my_vis_info;
- struct bat_algo_ops *bat_algo_ops;
+ struct batadv_hard_iface __rcu *primary_if; /* rcu protected pointer */
+ struct batadv_vis_info *my_vis_info;
+ struct batadv_algo_ops *bat_algo_ops;
};
-struct socket_client {
+struct batadv_socket_client {
struct list_head queue_list;
unsigned int queue_len;
unsigned char index;
spinlock_t lock; /* protects queue_list, queue_len, index */
wait_queue_head_t queue_wait;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
};
-struct socket_packet {
+struct batadv_socket_packet {
struct list_head list;
size_t icmp_len;
- struct icmp_packet_rr icmp_packet;
+ struct batadv_icmp_packet_rr icmp_packet;
};
-struct tt_common_entry {
+struct batadv_tt_common_entry {
uint8_t addr[ETH_ALEN];
struct hlist_node hash_entry;
uint16_t flags;
@@ -248,31 +262,31 @@ struct tt_common_entry {
struct rcu_head rcu;
};
-struct tt_local_entry {
- struct tt_common_entry common;
+struct batadv_tt_local_entry {
+ struct batadv_tt_common_entry common;
unsigned long last_seen;
};
-struct tt_global_entry {
- struct tt_common_entry common;
+struct batadv_tt_global_entry {
+ struct batadv_tt_common_entry common;
struct hlist_head orig_list;
spinlock_t list_lock; /* protects the list */
unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
};
-struct tt_orig_list_entry {
- struct orig_node *orig_node;
+struct batadv_tt_orig_list_entry {
+ struct batadv_orig_node *orig_node;
uint8_t ttvn;
struct rcu_head rcu;
struct hlist_node list;
};
#ifdef CONFIG_BATMAN_ADV_BLA
-struct backbone_gw {
+struct batadv_backbone_gw {
uint8_t orig[ETH_ALEN];
short vid; /* used VLAN ID */
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
unsigned long lasttime; /* last time we heard of this backbone gw */
atomic_t request_sent;
atomic_t refcount;
@@ -280,10 +294,10 @@ struct backbone_gw {
uint16_t crc; /* crc checksum over all claims */
};
-struct claim {
+struct batadv_claim {
uint8_t addr[ETH_ALEN];
short vid;
- struct backbone_gw *backbone_gw;
+ struct batadv_backbone_gw *backbone_gw;
unsigned long lasttime; /* last time we heard of claim (locals only) */
struct rcu_head rcu;
atomic_t refcount;
@@ -291,29 +305,28 @@ struct claim {
};
#endif
-struct tt_change_node {
+struct batadv_tt_change_node {
struct list_head list;
- struct tt_change change;
+ struct batadv_tt_change change;
};
-struct tt_req_node {
+struct batadv_tt_req_node {
uint8_t addr[ETH_ALEN];
unsigned long issued_at;
struct list_head list;
};
-struct tt_roam_node {
+struct batadv_tt_roam_node {
uint8_t addr[ETH_ALEN];
atomic_t counter;
unsigned long first_time;
struct list_head list;
};
-/**
- * forw_packet - structure for forw_list maintaining packets to be
+/* forw_packet - structure for forw_list maintaining packets to be
* send/forwarded
*/
-struct forw_packet {
+struct batadv_forw_packet {
struct hlist_node list;
unsigned long send_time;
uint8_t own;
@@ -322,76 +335,76 @@ struct forw_packet {
uint32_t direct_link_flags;
uint8_t num_packets;
struct delayed_work delayed_work;
- struct hard_iface *if_incoming;
+ struct batadv_hard_iface *if_incoming;
};
/* While scanning for vis-entries of a particular vis-originator
* this list collects its interfaces to create a subgraph/cluster
* out of them later
*/
-struct if_list_entry {
+struct batadv_if_list_entry {
uint8_t addr[ETH_ALEN];
bool primary;
struct hlist_node list;
};
-struct debug_log {
- char log_buff[LOG_BUF_LEN];
+struct batadv_debug_log {
+ char log_buff[BATADV_LOG_BUF_LEN];
unsigned long log_start;
unsigned long log_end;
spinlock_t lock; /* protects log_buff, log_start and log_end */
wait_queue_head_t queue_wait;
};
-struct frag_packet_list_entry {
+struct batadv_frag_packet_list_entry {
struct list_head list;
uint16_t seqno;
struct sk_buff *skb;
};
-struct vis_info {
+struct batadv_vis_info {
unsigned long first_seen;
/* list of server-neighbors we received a vis-packet
- * from. we should not reply to them. */
+ * from. we should not reply to them.
+ */
struct list_head recv_list;
struct list_head send_list;
struct kref refcount;
struct hlist_node hash_entry;
- struct bat_priv *bat_priv;
+ struct batadv_priv *bat_priv;
/* this packet might be part of the vis send queue. */
struct sk_buff *skb_packet;
- /* vis_info may follow here*/
+ /* vis_info may follow here */
} __packed;
-struct vis_info_entry {
+struct batadv_vis_info_entry {
uint8_t src[ETH_ALEN];
uint8_t dest[ETH_ALEN];
uint8_t quality; /* quality = 0 client */
} __packed;
-struct recvlist_node {
+struct batadv_recvlist_node {
struct list_head list;
uint8_t mac[ETH_ALEN];
};
-struct bat_algo_ops {
+struct batadv_algo_ops {
struct hlist_node list;
char *name;
/* init routing info when hard-interface is enabled */
- int (*bat_iface_enable)(struct hard_iface *hard_iface);
+ int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
/* de-init routing info when hard-interface is disabled */
- void (*bat_iface_disable)(struct hard_iface *hard_iface);
+ void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
/* (re-)init mac addresses of the protocol information
* belonging to this hard-interface
*/
- void (*bat_iface_update_mac)(struct hard_iface *hard_iface);
+ void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
/* called when primary interface is selected / changed */
- void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
+ void (*bat_primary_iface_set)(struct batadv_hard_iface *hard_iface);
/* prepare a new outgoing OGM for the send queue */
- void (*bat_ogm_schedule)(struct hard_iface *hard_iface,
- int tt_num_changes);
+ void (*bat_ogm_schedule)(struct batadv_hard_iface *hard_iface);
/* send scheduled OGM */
- void (*bat_ogm_emit)(struct forw_packet *forw_packet);
+ void (*bat_ogm_emit)(struct batadv_forw_packet *forw_packet);
};
#endif /* _NET_BATMAN_ADV_TYPES_H_ */
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 74175c210858..00164645b3f7 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -31,19 +29,20 @@
#include "hard-interface.h"
-static struct sk_buff *frag_merge_packet(struct list_head *head,
- struct frag_packet_list_entry *tfp,
- struct sk_buff *skb)
+static struct sk_buff *
+batadv_frag_merge_packet(struct list_head *head,
+ struct batadv_frag_packet_list_entry *tfp,
+ struct sk_buff *skb)
{
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_unicast_frag_packet *up;
struct sk_buff *tmp_skb;
- struct unicast_packet *unicast_packet;
+ struct batadv_unicast_packet *unicast_packet;
int hdr_len = sizeof(*unicast_packet);
int uni_diff = sizeof(*up) - hdr_len;
+ up = (struct batadv_unicast_frag_packet *)skb->data;
/* set skb to the first part and tmp_skb to the second part */
- if (up->flags & UNI_FRAG_HEAD) {
+ if (up->flags & BATADV_UNI_FRAG_HEAD) {
tmp_skb = tfp->skb;
} else {
tmp_skb = skb;
@@ -66,8 +65,9 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
kfree_skb(tmp_skb);
memmove(skb->data + uni_diff, skb->data, hdr_len);
- unicast_packet = (struct unicast_packet *)skb_pull(skb, uni_diff);
- unicast_packet->header.packet_type = BAT_UNICAST;
+ unicast_packet = (struct batadv_unicast_packet *)skb_pull(skb,
+ uni_diff);
+ unicast_packet->header.packet_type = BATADV_UNICAST;
return skb;
@@ -77,11 +77,13 @@ err:
return NULL;
}
-static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
+static void batadv_frag_create_entry(struct list_head *head,
+ struct sk_buff *skb)
{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *up =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_frag_packet_list_entry *tfp;
+ struct batadv_unicast_frag_packet *up;
+
+ up = (struct batadv_unicast_frag_packet *)skb->data;
/* free and oldest packets stand at the end */
tfp = list_entry((head)->prev, typeof(*tfp), list);
@@ -93,15 +95,15 @@ static void frag_create_entry(struct list_head *head, struct sk_buff *skb)
return;
}
-static int frag_create_buffer(struct list_head *head)
+static int batadv_frag_create_buffer(struct list_head *head)
{
int i;
- struct frag_packet_list_entry *tfp;
+ struct batadv_frag_packet_list_entry *tfp;
- for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
+ for (i = 0; i < BATADV_FRAG_BUFFER_SIZE; i++) {
tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
if (!tfp) {
- frag_list_free(head);
+ batadv_frag_list_free(head);
return -ENOMEM;
}
tfp->skb = NULL;
@@ -113,14 +115,15 @@ static int frag_create_buffer(struct list_head *head)
return 0;
}
-static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
- const struct unicast_frag_packet *up)
+static struct batadv_frag_packet_list_entry *
+batadv_frag_search_packet(struct list_head *head,
+ const struct batadv_unicast_frag_packet *up)
{
- struct frag_packet_list_entry *tfp;
- struct unicast_frag_packet *tmp_up = NULL;
+ struct batadv_frag_packet_list_entry *tfp;
+ struct batadv_unicast_frag_packet *tmp_up = NULL;
uint16_t search_seqno;
- if (up->flags & UNI_FRAG_HEAD)
+ if (up->flags & BATADV_UNI_FRAG_HEAD)
search_seqno = ntohs(up->seqno)+1;
else
search_seqno = ntohs(up->seqno)-1;
@@ -133,12 +136,12 @@ static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
if (tfp->seqno == ntohs(up->seqno))
goto mov_tail;
- tmp_up = (struct unicast_frag_packet *)tfp->skb->data;
+ tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
if (tfp->seqno == search_seqno) {
- if ((tmp_up->flags & UNI_FRAG_HEAD) !=
- (up->flags & UNI_FRAG_HEAD))
+ if ((tmp_up->flags & BATADV_UNI_FRAG_HEAD) !=
+ (up->flags & BATADV_UNI_FRAG_HEAD))
return tfp;
else
goto mov_tail;
@@ -151,9 +154,9 @@ mov_tail:
return NULL;
}
-void frag_list_free(struct list_head *head)
+void batadv_frag_list_free(struct list_head *head)
{
- struct frag_packet_list_entry *pf, *tmp_pf;
+ struct batadv_frag_packet_list_entry *pf, *tmp_pf;
if (!list_empty(head)) {
@@ -172,64 +175,66 @@ void frag_list_free(struct list_head *head)
* or the skb could be reassembled (skb_new will point to the new packet and
* skb was freed)
*/
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb)
+int batadv_frag_reassemble_skb(struct sk_buff *skb,
+ struct batadv_priv *bat_priv,
+ struct sk_buff **new_skb)
{
- struct orig_node *orig_node;
- struct frag_packet_list_entry *tmp_frag_entry;
+ struct batadv_orig_node *orig_node;
+ struct batadv_frag_packet_list_entry *tmp_frag_entry;
int ret = NET_RX_DROP;
- struct unicast_frag_packet *unicast_packet =
- (struct unicast_frag_packet *)skb->data;
+ struct batadv_unicast_frag_packet *unicast_packet;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
*new_skb = NULL;
- orig_node = orig_hash_find(bat_priv, unicast_packet->orig);
+ orig_node = batadv_orig_hash_find(bat_priv, unicast_packet->orig);
if (!orig_node)
goto out;
orig_node->last_frag_packet = jiffies;
if (list_empty(&orig_node->frag_list) &&
- frag_create_buffer(&orig_node->frag_list)) {
+ batadv_frag_create_buffer(&orig_node->frag_list)) {
pr_debug("couldn't create frag buffer\n");
goto out;
}
- tmp_frag_entry = frag_search_packet(&orig_node->frag_list,
- unicast_packet);
+ tmp_frag_entry = batadv_frag_search_packet(&orig_node->frag_list,
+ unicast_packet);
if (!tmp_frag_entry) {
- frag_create_entry(&orig_node->frag_list, skb);
+ batadv_frag_create_entry(&orig_node->frag_list, skb);
ret = NET_RX_SUCCESS;
goto out;
}
- *new_skb = frag_merge_packet(&orig_node->frag_list, tmp_frag_entry,
- skb);
+ *new_skb = batadv_frag_merge_packet(&orig_node->frag_list,
+ tmp_frag_entry, skb);
/* if not, merge failed */
if (*new_skb)
ret = NET_RX_SUCCESS;
out:
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
return ret;
}
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[])
+int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t dstaddr[])
{
- struct unicast_packet tmp_uc, *unicast_packet;
- struct hard_iface *primary_if;
+ struct batadv_unicast_packet tmp_uc, *unicast_packet;
+ struct batadv_hard_iface *primary_if;
struct sk_buff *frag_skb;
- struct unicast_frag_packet *frag1, *frag2;
+ struct batadv_unicast_frag_packet *frag1, *frag2;
int uc_hdr_len = sizeof(*unicast_packet);
int ucf_hdr_len = sizeof(*frag1);
int data_len = skb->len - uc_hdr_len;
int large_tail = 0, ret = NET_RX_DROP;
uint16_t seqno;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto dropped;
@@ -238,38 +243,38 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
goto dropped;
skb_reserve(frag_skb, ucf_hdr_len);
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
memcpy(&tmp_uc, unicast_packet, uc_hdr_len);
skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len);
- if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
- my_skb_head_push(frag_skb, ucf_hdr_len) < 0)
+ if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 ||
+ batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0)
goto drop_frag;
- frag1 = (struct unicast_frag_packet *)skb->data;
- frag2 = (struct unicast_frag_packet *)frag_skb->data;
+ frag1 = (struct batadv_unicast_frag_packet *)skb->data;
+ frag2 = (struct batadv_unicast_frag_packet *)frag_skb->data;
memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
frag1->header.ttl--;
- frag1->header.version = COMPAT_VERSION;
- frag1->header.packet_type = BAT_UNICAST_FRAG;
+ frag1->header.version = BATADV_COMPAT_VERSION;
+ frag1->header.packet_type = BATADV_UNICAST_FRAG;
memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
memcpy(frag2, frag1, sizeof(*frag2));
if (data_len & 1)
- large_tail = UNI_FRAG_LARGETAIL;
+ large_tail = BATADV_UNI_FRAG_LARGETAIL;
- frag1->flags = UNI_FRAG_HEAD | large_tail;
+ frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
frag2->flags = large_tail;
seqno = atomic_add_return(2, &hard_iface->frag_seqno);
frag1->seqno = htons(seqno - 1);
frag2->seqno = htons(seqno);
- send_skb_packet(skb, hard_iface, dstaddr);
- send_skb_packet(frag_skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(frag_skb, hard_iface, dstaddr);
ret = NET_RX_SUCCESS;
goto out;
@@ -279,52 +284,53 @@ dropped:
kfree_skb(skb);
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
+int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv)
{
struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
- struct unicast_packet *unicast_packet;
- struct orig_node *orig_node;
- struct neigh_node *neigh_node;
+ struct batadv_unicast_packet *unicast_packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = 1;
+ unsigned int dev_mtu;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
- orig_node = gw_get_selected_orig(bat_priv);
+ orig_node = batadv_gw_get_selected_orig(bat_priv);
if (orig_node)
goto find_router;
}
/* check for tt host - increases orig_node refcount.
- * returns NULL in case of AP isolation */
- orig_node = transtable_search(bat_priv, ethhdr->h_source,
- ethhdr->h_dest);
+ * returns NULL in case of AP isolation
+ */
+ orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
+ ethhdr->h_dest);
find_router:
- /**
- * find_router():
+ /* find_router():
* - if orig_node is NULL it returns NULL
* - increases neigh_nodes refcount if found.
*/
- neigh_node = find_router(bat_priv, orig_node, NULL);
+ neigh_node = batadv_find_router(bat_priv, orig_node, NULL);
if (!neigh_node)
goto out;
- if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
+ if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
goto out;
- unicast_packet = (struct unicast_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = COMPAT_VERSION;
+ unicast_packet->header.version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->header.packet_type = BAT_UNICAST;
+ unicast_packet->header.packet_type = BATADV_UNICAST;
/* set unicast ttl */
- unicast_packet->header.ttl = TTL;
+ unicast_packet->header.ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -336,28 +342,29 @@ find_router:
* try to reroute it because the ttvn contained in the header is less
* than the current one
*/
- if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
+ if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
unicast_packet->ttvn = unicast_packet->ttvn - 1;
+ dev_mtu = neigh_node->if_incoming->net_dev->mtu;
if (atomic_read(&bat_priv->fragmentation) &&
- data_len + sizeof(*unicast_packet) >
- neigh_node->if_incoming->net_dev->mtu) {
+ data_len + sizeof(*unicast_packet) > dev_mtu) {
/* send frag skb decreases ttl */
unicast_packet->header.ttl++;
- ret = frag_send_skb(skb, bat_priv,
- neigh_node->if_incoming, neigh_node->addr);
+ ret = batadv_frag_send_skb(skb, bat_priv,
+ neigh_node->if_incoming,
+ neigh_node->addr);
goto out;
}
- send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
+ batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
ret = 0;
goto out;
out:
if (neigh_node)
- neigh_node_free_ref(neigh_node);
+ batadv_neigh_node_free_ref(neigh_node);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
if (ret == 1)
kfree_skb(skb);
return ret;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index a9faf6b1db19..1c46e2eb1ef9 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors:
*
* Andreas Langer
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_UNICAST_H_
@@ -24,33 +22,35 @@
#include "packet.h"
-#define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
-#define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
+#define BATADV_FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */
+#define BATADV_FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */
-int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct sk_buff **new_skb);
-void frag_list_free(struct list_head *head);
-int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
-int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
- struct hard_iface *hard_iface, const uint8_t dstaddr[]);
+int batadv_frag_reassemble_skb(struct sk_buff *skb,
+ struct batadv_priv *bat_priv,
+ struct sk_buff **new_skb);
+void batadv_frag_list_free(struct list_head *head);
+int batadv_unicast_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv);
+int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
+ struct batadv_hard_iface *hard_iface,
+ const uint8_t dstaddr[]);
-static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
+static inline int batadv_frag_can_reassemble(const struct sk_buff *skb, int mtu)
{
- const struct unicast_frag_packet *unicast_packet;
+ const struct batadv_unicast_frag_packet *unicast_packet;
int uneven_correction = 0;
unsigned int merged_size;
- unicast_packet = (struct unicast_frag_packet *)skb->data;
+ unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;
- if (unicast_packet->flags & UNI_FRAG_LARGETAIL) {
- if (unicast_packet->flags & UNI_FRAG_HEAD)
+ if (unicast_packet->flags & BATADV_UNI_FRAG_LARGETAIL) {
+ if (unicast_packet->flags & BATADV_UNI_FRAG_HEAD)
uneven_correction = 1;
else
uneven_correction = -1;
}
merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
- merged_size += sizeof(struct unicast_packet) + uneven_correction;
+ merged_size += sizeof(struct batadv_unicast_packet) + uneven_correction;
return merged_size <= mtu;
}
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index cec216fb77c7..2a2ea0681469 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -16,7 +15,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#include "main.h"
@@ -28,16 +26,19 @@
#include "hash.h"
#include "originator.h"
-#define MAX_VIS_PACKET_SIZE 1000
+#define BATADV_MAX_VIS_PACKET_SIZE 1000
-static void start_vis_timer(struct bat_priv *bat_priv);
+static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
/* free the info */
-static void free_info(struct kref *ref)
+static void batadv_free_info(struct kref *ref)
{
- struct vis_info *info = container_of(ref, struct vis_info, refcount);
- struct bat_priv *bat_priv = info->bat_priv;
- struct recvlist_node *entry, *tmp;
+ struct batadv_vis_info *info;
+ struct batadv_priv *bat_priv;
+ struct batadv_recvlist_node *entry, *tmp;
+
+ info = container_of(ref, struct batadv_vis_info, refcount);
+ bat_priv = info->bat_priv;
list_del_init(&info->send_list);
spin_lock_bh(&bat_priv->vis_list_lock);
@@ -52,29 +53,30 @@ static void free_info(struct kref *ref)
}
/* Compare two vis packets, used by the hashing algorithm */
-static int vis_info_cmp(const struct hlist_node *node, const void *data2)
+static int batadv_vis_info_cmp(const struct hlist_node *node, const void *data2)
{
- const struct vis_info *d1, *d2;
- const struct vis_packet *p1, *p2;
+ const struct batadv_vis_info *d1, *d2;
+ const struct batadv_vis_packet *p1, *p2;
- d1 = container_of(node, struct vis_info, hash_entry);
+ d1 = container_of(node, struct batadv_vis_info, hash_entry);
d2 = data2;
- p1 = (struct vis_packet *)d1->skb_packet->data;
- p2 = (struct vis_packet *)d2->skb_packet->data;
- return compare_eth(p1->vis_orig, p2->vis_orig);
+ p1 = (struct batadv_vis_packet *)d1->skb_packet->data;
+ p2 = (struct batadv_vis_packet *)d2->skb_packet->data;
+ return batadv_compare_eth(p1->vis_orig, p2->vis_orig);
}
-/* hash function to choose an entry in a hash table of given size */
-/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
-static uint32_t vis_info_choose(const void *data, uint32_t size)
+/* hash function to choose an entry in a hash table of given size
+ * hash algorithm from http://en.wikipedia.org/wiki/Hash_table
+ */
+static uint32_t batadv_vis_info_choose(const void *data, uint32_t size)
{
- const struct vis_info *vis_info = data;
- const struct vis_packet *packet;
+ const struct batadv_vis_info *vis_info = data;
+ const struct batadv_vis_packet *packet;
const unsigned char *key;
uint32_t hash = 0;
size_t i;
- packet = (struct vis_packet *)vis_info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)vis_info->skb_packet->data;
key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
@@ -89,24 +91,24 @@ static uint32_t vis_info_choose(const void *data, uint32_t size)
return hash % size;
}
-static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
- const void *data)
+static struct batadv_vis_info *
+batadv_vis_hash_find(struct batadv_priv *bat_priv, const void *data)
{
- struct hashtable_t *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
struct hlist_head *head;
struct hlist_node *node;
- struct vis_info *vis_info, *vis_info_tmp = NULL;
+ struct batadv_vis_info *vis_info, *vis_info_tmp = NULL;
uint32_t index;
if (!hash)
return NULL;
- index = vis_info_choose(data, hash->size);
+ index = batadv_vis_info_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
- if (!vis_info_cmp(node, data))
+ if (!batadv_vis_info_cmp(node, data))
continue;
vis_info_tmp = vis_info;
@@ -118,16 +120,17 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
}
/* insert interface to the list of interfaces of one originator, if it
- * does not already exist in the list */
-static void vis_data_insert_interface(const uint8_t *interface,
- struct hlist_head *if_list,
- bool primary)
+ * does not already exist in the list
+ */
+static void batadv_vis_data_insert_interface(const uint8_t *interface,
+ struct hlist_head *if_list,
+ bool primary)
{
- struct if_list_entry *entry;
+ struct batadv_if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
- if (compare_eth(entry->addr, interface))
+ if (batadv_compare_eth(entry->addr, interface))
return;
}
@@ -140,195 +143,145 @@ static void vis_data_insert_interface(const uint8_t *interface,
hlist_add_head(&entry->list, if_list);
}
-static ssize_t vis_data_read_prim_sec(char *buff,
- const struct hlist_head *if_list)
+static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
+ const struct hlist_head *if_list)
{
- struct if_list_entry *entry;
+ struct batadv_if_list_entry *entry;
struct hlist_node *pos;
- size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
- len += sprintf(buff + len, "PRIMARY, ");
+ seq_printf(seq, "PRIMARY, ");
else
- len += sprintf(buff + len, "SEC %pM, ", entry->addr);
+ seq_printf(seq, "SEC %pM, ", entry->addr);
}
+}
- return len;
+/* read an entry */
+static ssize_t
+batadv_vis_data_read_entry(struct seq_file *seq,
+ const struct batadv_vis_info_entry *entry,
+ const uint8_t *src, bool primary)
+{
+ if (primary && entry->quality == 0)
+ return seq_printf(seq, "TT %pM, ", entry->dest);
+ else if (batadv_compare_eth(entry->src, src))
+ return seq_printf(seq, "TQ %pM %d, ", entry->dest,
+ entry->quality);
+
+ return 0;
}
-static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
+static void
+batadv_vis_data_insert_interfaces(struct hlist_head *list,
+ struct batadv_vis_packet *packet,
+ struct batadv_vis_info_entry *entries)
{
- struct if_list_entry *entry;
- struct hlist_node *pos;
- size_t count = 0;
+ int i;
- hlist_for_each_entry(entry, pos, if_list, list) {
- if (entry->primary)
- count += 9;
- else
- count += 23;
- }
+ for (i = 0; i < packet->entries; i++) {
+ if (entries[i].quality == 0)
+ continue;
- return count;
+ if (batadv_compare_eth(entries[i].src, packet->vis_orig))
+ continue;
+
+ batadv_vis_data_insert_interface(entries[i].src, list, false);
+ }
}
-/* read an entry */
-static ssize_t vis_data_read_entry(char *buff,
- const struct vis_info_entry *entry,
- const uint8_t *src, bool primary)
+static void batadv_vis_data_read_entries(struct seq_file *seq,
+ struct hlist_head *list,
+ struct batadv_vis_packet *packet,
+ struct batadv_vis_info_entry *entries)
{
- /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
- if (primary && entry->quality == 0)
- return sprintf(buff, "TT %pM, ", entry->dest);
- else if (compare_eth(entry->src, src))
- return sprintf(buff, "TQ %pM %d, ", entry->dest,
- entry->quality);
+ int i;
+ struct batadv_if_list_entry *entry;
+ struct hlist_node *pos;
- return 0;
+ hlist_for_each_entry(entry, pos, list, list) {
+ seq_printf(seq, "%pM,", entry->addr);
+
+ for (i = 0; i < packet->entries; i++)
+ batadv_vis_data_read_entry(seq, &entries[i],
+ entry->addr, entry->primary);
+
+ /* add primary/secondary records */
+ if (batadv_compare_eth(entry->addr, packet->vis_orig))
+ batadv_vis_data_read_prim_sec(seq, list);
+
+ seq_printf(seq, "\n");
+ }
}
-int vis_seq_print_text(struct seq_file *seq, void *offset)
+static void batadv_vis_seq_print_text_bucket(struct seq_file *seq,
+ const struct hlist_head *head)
{
- struct hard_iface *primary_if;
struct hlist_node *node;
+ struct batadv_vis_info *info;
+ struct batadv_vis_packet *packet;
+ uint8_t *entries_pos;
+ struct batadv_vis_info_entry *entries;
+ struct batadv_if_list_entry *entry;
+ struct hlist_node *pos, *n;
+
+ HLIST_HEAD(vis_if_list);
+
+ hlist_for_each_entry_rcu(info, node, head, hash_entry) {
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
+ entries_pos = (uint8_t *)packet + sizeof(*packet);
+ entries = (struct batadv_vis_info_entry *)entries_pos;
+
+ batadv_vis_data_insert_interface(packet->vis_orig, &vis_if_list,
+ true);
+ batadv_vis_data_insert_interfaces(&vis_if_list, packet,
+ entries);
+ batadv_vis_data_read_entries(seq, &vis_if_list, packet,
+ entries);
+
+ hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) {
+ hlist_del(&entry->list);
+ kfree(entry);
+ }
+ }
+}
+
+int batadv_vis_seq_print_text(struct seq_file *seq, void *offset)
+{
+ struct batadv_hard_iface *primary_if;
struct hlist_head *head;
- struct vis_info *info;
- struct vis_packet *packet;
- struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
- struct bat_priv *bat_priv = netdev_priv(net_dev);
- struct hashtable_t *hash = bat_priv->vis_hash;
- HLIST_HEAD(vis_if_list);
- struct if_list_entry *entry;
- struct hlist_node *pos, *n;
+ struct batadv_priv *bat_priv = netdev_priv(net_dev);
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
uint32_t i;
- int j, ret = 0;
+ int ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
- size_t buff_pos, buf_size;
- char *buff;
- int compare;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- if (vis_server == VIS_TYPE_CLIENT_UPDATE)
+ if (vis_server == BATADV_VIS_TYPE_CLIENT_UPDATE)
goto out;
- buf_size = 1;
- /* Estimate length */
spin_lock_bh(&bat_priv->vis_hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(info, node, head, hash_entry) {
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(*packet));
-
- for (j = 0; j < packet->entries; j++) {
- if (entries[j].quality == 0)
- continue;
- compare =
- compare_eth(entries[j].src, packet->vis_orig);
- vis_data_insert_interface(entries[j].src,
- &vis_if_list,
- compare);
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buf_size += 18 + 26 * packet->entries;
-
- /* add primary/secondary records */
- if (compare_eth(entry->addr, packet->vis_orig))
- buf_size +=
- vis_data_count_prim_sec(&vis_if_list);
-
- buf_size += 1;
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
- list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
- rcu_read_unlock();
- }
-
- buff = kmalloc(buf_size, GFP_ATOMIC);
- if (!buff) {
- spin_unlock_bh(&bat_priv->vis_hash_lock);
- ret = -ENOMEM;
- goto out;
- }
- buff[0] = '\0';
- buff_pos = 0;
-
- for (i = 0; i < hash->size; i++) {
- head = &hash->table[i];
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(info, node, head, hash_entry) {
- packet = (struct vis_packet *)info->skb_packet->data;
- entries = (struct vis_info_entry *)
- ((char *)packet + sizeof(*packet));
-
- for (j = 0; j < packet->entries; j++) {
- if (entries[j].quality == 0)
- continue;
- compare =
- compare_eth(entries[j].src, packet->vis_orig);
- vis_data_insert_interface(entries[j].src,
- &vis_if_list,
- compare);
- }
-
- hlist_for_each_entry(entry, pos, &vis_if_list, list) {
- buff_pos += sprintf(buff + buff_pos, "%pM,",
- entry->addr);
-
- for (j = 0; j < packet->entries; j++)
- buff_pos += vis_data_read_entry(
- buff + buff_pos,
- &entries[j],
- entry->addr,
- entry->primary);
-
- /* add primary/secondary records */
- if (compare_eth(entry->addr, packet->vis_orig))
- buff_pos +=
- vis_data_read_prim_sec(buff + buff_pos,
- &vis_if_list);
-
- buff_pos += sprintf(buff + buff_pos, "\n");
- }
-
- hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
- list) {
- hlist_del(&entry->list);
- kfree(entry);
- }
- }
- rcu_read_unlock();
+ batadv_vis_seq_print_text_bucket(seq, head);
}
-
spin_unlock_bh(&bat_priv->vis_hash_lock);
- seq_printf(seq, "%s", buff);
- kfree(buff);
-
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
return ret;
}
/* add the info packet to the send list, if it was not
- * already linked in. */
-static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
+ * already linked in.
+ */
+static void batadv_send_list_add(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
@@ -337,20 +290,21 @@ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
}
/* delete the info packet from the send list, if it was
- * linked in. */
-static void send_list_del(struct vis_info *info)
+ * linked in.
+ */
+static void batadv_send_list_del(struct batadv_vis_info *info)
{
if (!list_empty(&info->send_list)) {
list_del_init(&info->send_list);
- kref_put(&info->refcount, free_info);
+ kref_put(&info->refcount, batadv_free_info);
}
}
/* tries to add one entry to the receive list. */
-static void recv_list_add(struct bat_priv *bat_priv,
- struct list_head *recv_list, const char *mac)
+static void batadv_recv_list_add(struct batadv_priv *bat_priv,
+ struct list_head *recv_list, const char *mac)
{
- struct recvlist_node *entry;
+ struct batadv_recvlist_node *entry;
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
@@ -363,14 +317,15 @@ static void recv_list_add(struct bat_priv *bat_priv,
}
/* returns 1 if this mac is in the recv_list */
-static int recv_list_is_in(struct bat_priv *bat_priv,
- const struct list_head *recv_list, const char *mac)
+static int batadv_recv_list_is_in(struct batadv_priv *bat_priv,
+ const struct list_head *recv_list,
+ const char *mac)
{
- const struct recvlist_node *entry;
+ const struct batadv_recvlist_node *entry;
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
- if (compare_eth(entry->mac, mac)) {
+ if (batadv_compare_eth(entry->mac, mac)) {
spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
@@ -381,17 +336,21 @@ static int recv_list_is_in(struct bat_priv *bat_priv,
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
- * is newer than old entries in the hash. */
-static struct vis_info *add_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len, int *is_new,
- int make_broadcast)
+ * is newer than old entries in the hash.
+ */
+static struct batadv_vis_info *
+batadv_add_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet, int vis_info_len,
+ int *is_new, int make_broadcast)
{
- struct vis_info *info, *old_info;
- struct vis_packet *search_packet, *old_packet;
- struct vis_info search_elem;
- struct vis_packet *packet;
+ struct batadv_vis_info *info, *old_info;
+ struct batadv_vis_packet *search_packet, *old_packet;
+ struct batadv_vis_info search_elem;
+ struct batadv_vis_packet *packet;
+ struct sk_buff *tmp_skb;
int hash_added;
+ size_t len;
+ size_t max_entries;
*is_new = 0;
/* sanity check */
@@ -402,20 +361,23 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
if (!search_elem.skb_packet)
return NULL;
- search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
- sizeof(*search_packet));
+ len = sizeof(*search_packet);
+ tmp_skb = search_elem.skb_packet;
+ search_packet = (struct batadv_vis_packet *)skb_put(tmp_skb, len);
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
- old_info = vis_hash_find(bat_priv, &search_elem);
+ old_info = batadv_vis_hash_find(bat_priv, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info) {
- old_packet = (struct vis_packet *)old_info->skb_packet->data;
- if (!seq_after(ntohl(vis_packet->seqno),
- ntohl(old_packet->seqno))) {
+ tmp_skb = old_info->skb_packet;
+ old_packet = (struct batadv_vis_packet *)tmp_skb->data;
+ if (!batadv_seq_after(ntohl(vis_packet->seqno),
+ ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
- recv_list_add(bat_priv, &old_info->recv_list,
- vis_packet->sender_orig);
+ batadv_recv_list_add(bat_priv,
+ &old_info->recv_list,
+ vis_packet->sender_orig);
return old_info;
} else {
/* newer packet is already in hash. */
@@ -423,52 +385,53 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
}
}
/* remove old entry */
- hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- old_info);
- send_list_del(old_info);
- kref_put(&old_info->refcount, free_info);
+ batadv_hash_remove(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose, old_info);
+ batadv_send_list_del(old_info);
+ kref_put(&old_info->refcount, batadv_free_info);
}
info = kmalloc(sizeof(*info), GFP_ATOMIC);
if (!info)
return NULL;
- info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
- ETH_HLEN);
+ len = sizeof(*packet) + vis_info_len;
+ info->skb_packet = dev_alloc_skb(len + ETH_HLEN);
if (!info->skb_packet) {
kfree(info);
return NULL;
}
skb_reserve(info->skb_packet, ETH_HLEN);
- packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
- + vis_info_len);
+ packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
info->bat_priv = bat_priv;
- memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len);
+ memcpy(packet, vis_packet, len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
+ memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
- if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
- packet->entries = vis_info_len / sizeof(struct vis_info_entry);
+ max_entries = vis_info_len / sizeof(struct batadv_vis_info_entry);
+ if (packet->entries > max_entries)
+ packet->entries = max_entries;
- recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
+ batadv_recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
- hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- info, &info->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose, info,
+ &info->hash_entry);
if (hash_added != 0) {
/* did not work (for some reason) */
- kref_put(&info->refcount, free_info);
+ kref_put(&info->refcount, batadv_free_info);
info = NULL;
}
@@ -476,37 +439,38 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
}
/* handle the server sync packet, forward if needed. */
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
+void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len)
{
- struct vis_info *info;
+ struct batadv_vis_info *info;
int is_new, make_broadcast;
int vis_server = atomic_read(&bat_priv->vis_mode);
- make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
+ make_broadcast = (vis_server == BATADV_VIS_TYPE_SERVER_SYNC);
spin_lock_bh(&bat_priv->vis_hash_lock);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, make_broadcast);
+ info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, make_broadcast);
if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
- * hash.*/
- if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
- send_list_add(bat_priv, info);
+ * hash.
+ */
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC && is_new)
+ batadv_send_list_add(bat_priv, info);
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len)
+void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len)
{
- struct vis_info *info;
- struct vis_packet *packet;
+ struct batadv_vis_info *info;
+ struct batadv_vis_packet *packet;
int is_new;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
@@ -516,28 +480,28 @@ void receive_client_update_packet(struct bat_priv *bat_priv,
return;
/* Are we the target for this VIS packet? */
- if (vis_server == VIS_TYPE_SERVER_SYNC &&
- is_my_mac(vis_packet->target_orig))
+ if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC &&
+ batadv_is_my_mac(vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
- info = add_packet(bat_priv, vis_packet, vis_info_len,
- &is_new, are_target);
+ info = batadv_add_packet(bat_priv, vis_packet, vis_info_len,
+ &is_new, are_target);
if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
- packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
- send_list_add(bat_priv, info);
+ packet->vis_type = BATADV_VIS_TYPE_SERVER_SYNC; /* upgrade! */
+ batadv_send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
- } else if (!is_my_mac(packet->target_orig)) {
- send_list_add(bat_priv, info);
+ } else if (!batadv_is_my_mac(packet->target_orig)) {
+ batadv_send_list_add(bat_priv, info);
}
end:
@@ -547,37 +511,38 @@ end:
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
- * Must be called with the originator hash locked */
-static int find_best_vis_server(struct bat_priv *bat_priv,
- struct vis_info *info)
+ * Must be called with the originator hash locked
+ */
+static int batadv_find_best_vis_server(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
- struct neigh_node *router;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
+ struct batadv_neigh_node *router;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct vis_packet *packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_vis_packet *packet;
int best_tq = -1;
uint32_t i;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
- if ((orig_node->flags & VIS_SERVER) &&
+ if ((orig_node->flags & BATADV_VIS_SERVER) &&
(router->tq_avg > best_tq)) {
best_tq = router->tq_avg;
memcpy(packet->target_orig, orig_node->orig,
ETH_ALEN);
}
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
}
rcu_read_unlock();
}
@@ -586,47 +551,52 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
}
/* Return true if the vis packet is full. */
-static bool vis_packet_full(const struct vis_info *info)
+static bool batadv_vis_packet_full(const struct batadv_vis_info *info)
{
- const struct vis_packet *packet;
- packet = (struct vis_packet *)info->skb_packet->data;
+ const struct batadv_vis_packet *packet;
+ size_t num;
+
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
+ num = BATADV_MAX_VIS_PACKET_SIZE / sizeof(struct batadv_vis_info_entry);
- if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
- < packet->entries + 1)
+ if (num < packet->entries + 1)
return true;
return false;
}
/* generates a packet of own vis data,
- * returns 0 on success, -1 if no packet could be generated */
-static int generate_vis_packet(struct bat_priv *bat_priv)
+ * returns 0 on success, -1 if no packet could be generated
+ */
+static int batadv_generate_vis_packet(struct batadv_priv *bat_priv)
{
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct neigh_node *router;
- struct vis_info *info = bat_priv->my_vis_info;
- struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
- struct vis_info_entry *entry;
- struct tt_common_entry *tt_common_entry;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router;
+ struct batadv_vis_info *info = bat_priv->my_vis_info;
+ struct batadv_vis_packet *packet;
+ struct batadv_vis_info_entry *entry;
+ struct batadv_tt_common_entry *tt_common_entry;
int best_tq = -1;
uint32_t i;
info->first_seen = jiffies;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
- memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
- packet->header.ttl = TTL;
+ memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN);
+ packet->header.ttl = BATADV_TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
+ packet->reserved = 0;
skb_trim(info->skb_packet, sizeof(*packet));
- if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
- best_tq = find_best_vis_server(bat_priv, info);
+ if (packet->vis_type == BATADV_VIS_TYPE_CLIENT_UPDATE) {
+ best_tq = batadv_find_best_vis_server(bat_priv, info);
if (best_tq < 0)
- return -1;
+ return best_tq;
}
for (i = 0; i < hash->size; i++) {
@@ -634,21 +604,21 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
- if (!compare_eth(router->addr, orig_node->orig))
+ if (!batadv_compare_eth(router->addr, orig_node->orig))
goto next;
- if (router->if_incoming->if_status != IF_ACTIVE)
+ if (router->if_incoming->if_status != BATADV_IF_ACTIVE)
goto next;
if (router->tq_avg < 1)
goto next;
/* fill one entry into buffer. */
- entry = (struct vis_info_entry *)
+ entry = (struct batadv_vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
@@ -658,9 +628,9 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
packet->entries++;
next:
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
- if (vis_packet_full(info))
+ if (batadv_vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
@@ -674,7 +644,7 @@ next:
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node, head,
hash_entry) {
- entry = (struct vis_info_entry *)
+ entry = (struct batadv_vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
@@ -682,7 +652,7 @@ next:
entry->quality = 0; /* 0 means TT */
packet->entries++;
- if (vis_packet_full(info))
+ if (batadv_vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
@@ -696,14 +666,15 @@ unlock:
}
/* free old vis packets. Must be called with this vis_hash_lock
- * held */
-static void purge_vis_packets(struct bat_priv *bat_priv)
+ * held
+ */
+static void batadv_purge_vis_packets(struct batadv_priv *bat_priv)
{
uint32_t i;
- struct hashtable_t *hash = bat_priv->vis_hash;
+ struct batadv_hashtable *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
- struct vis_info *info;
+ struct batadv_vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -714,31 +685,32 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
if (info == bat_priv->my_vis_info)
continue;
- if (has_timed_out(info->first_seen, VIS_TIMEOUT)) {
+ if (batadv_has_timed_out(info->first_seen,
+ BATADV_VIS_TIMEOUT)) {
hlist_del(node);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
}
}
}
-static void broadcast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
+static void batadv_broadcast_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct neigh_node *router;
- struct hashtable_t *hash = bat_priv->orig_hash;
+ struct batadv_neigh_node *router;
+ struct batadv_hashtable *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
- struct orig_node *orig_node;
- struct vis_packet *packet;
+ struct batadv_orig_node *orig_node;
+ struct batadv_vis_packet *packet;
struct sk_buff *skb;
- struct hard_iface *hard_iface;
+ struct batadv_hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
uint32_t i;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
for (i = 0; i < hash->size; i++) {
@@ -747,18 +719,19 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
- if (!(orig_node->flags & VIS_SERVER))
+ if (!(orig_node->flags & BATADV_VIS_SERVER))
continue;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
continue;
/* don't send it if we already received the packet from
- * this node. */
- if (recv_list_is_in(bat_priv, &info->recv_list,
- orig_node->orig)) {
- neigh_node_free_ref(router);
+ * this node.
+ */
+ if (batadv_recv_list_is_in(bat_priv, &info->recv_list,
+ orig_node->orig)) {
+ batadv_neigh_node_free_ref(router);
continue;
}
@@ -766,57 +739,59 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
hard_iface = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, hard_iface, dstaddr);
+ batadv_send_skb_packet(skb, hard_iface,
+ dstaddr);
}
rcu_read_unlock();
}
}
-static void unicast_vis_packet(struct bat_priv *bat_priv,
- struct vis_info *info)
+static void batadv_unicast_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct orig_node *orig_node;
- struct neigh_node *router = NULL;
+ struct batadv_orig_node *orig_node;
+ struct batadv_neigh_node *router = NULL;
struct sk_buff *skb;
- struct vis_packet *packet;
+ struct batadv_vis_packet *packet;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
- orig_node = orig_hash_find(bat_priv, packet->target_orig);
+ orig_node = batadv_orig_hash_find(bat_priv, packet->target_orig);
if (!orig_node)
goto out;
- router = orig_node_get_router(orig_node);
+ router = batadv_orig_node_get_router(orig_node);
if (!router)
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
- send_skb_packet(skb, router->if_incoming, router->addr);
+ batadv_send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)
- neigh_node_free_ref(router);
+ batadv_neigh_node_free_ref(router);
if (orig_node)
- orig_node_free_ref(orig_node);
+ batadv_orig_node_free_ref(orig_node);
}
-/* only send one vis packet. called from send_vis_packets() */
-static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
+/* only send one vis packet. called from batadv_send_vis_packets() */
+static void batadv_send_vis_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_info *info)
{
- struct hard_iface *primary_if;
- struct vis_packet *packet;
+ struct batadv_hard_iface *primary_if;
+ struct batadv_vis_packet *packet;
- primary_if = primary_if_get_selected(bat_priv);
+ primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
- packet = (struct vis_packet *)info->skb_packet->data;
+ packet = (struct batadv_vis_packet *)info->skb_packet->data;
if (packet->header.ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
@@ -826,31 +801,31 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
packet->header.ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
- broadcast_vis_packet(bat_priv, info);
+ batadv_broadcast_vis_packet(bat_priv, info);
else
- unicast_vis_packet(bat_priv, info);
+ batadv_unicast_vis_packet(bat_priv, info);
packet->header.ttl++; /* restore TTL */
out:
if (primary_if)
- hardif_free_ref(primary_if);
+ batadv_hardif_free_ref(primary_if);
}
/* called from timer; send (and maybe generate) vis packet. */
-static void send_vis_packets(struct work_struct *work)
+static void batadv_send_vis_packets(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
- struct bat_priv *bat_priv =
- container_of(delayed_work, struct bat_priv, vis_work);
- struct vis_info *info;
+ struct batadv_priv *bat_priv;
+ struct batadv_vis_info *info;
+ bat_priv = container_of(delayed_work, struct batadv_priv, vis_work);
spin_lock_bh(&bat_priv->vis_hash_lock);
- purge_vis_packets(bat_priv);
+ batadv_purge_vis_packets(bat_priv);
- if (generate_vis_packet(bat_priv) == 0) {
+ if (batadv_generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
- send_list_add(bat_priv, bat_priv->my_vis_info);
+ batadv_send_list_add(bat_priv, bat_priv->my_vis_info);
}
while (!list_empty(&bat_priv->vis_send_list)) {
@@ -860,98 +835,103 @@ static void send_vis_packets(struct work_struct *work)
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
- send_vis_packet(bat_priv, info);
+ batadv_send_vis_packet(bat_priv, info);
spin_lock_bh(&bat_priv->vis_hash_lock);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
- start_vis_timer(bat_priv);
+ batadv_start_vis_timer(bat_priv);
}
/* init the vis server. this may only be called when if_list is already
- * initialized (e.g. bat0 is initialized, interfaces have been added) */
-int vis_init(struct bat_priv *bat_priv)
+ * initialized (e.g. bat0 is initialized, interfaces have been added)
+ */
+int batadv_vis_init(struct batadv_priv *bat_priv)
{
- struct vis_packet *packet;
+ struct batadv_vis_packet *packet;
int hash_added;
+ unsigned int len;
+ unsigned long first_seen;
+ struct sk_buff *tmp_skb;
if (bat_priv->vis_hash)
- return 1;
+ return 0;
spin_lock_bh(&bat_priv->vis_hash_lock);
- bat_priv->vis_hash = hash_new(256);
+ bat_priv->vis_hash = batadv_hash_new(256);
if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
- bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
+ bat_priv->my_vis_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->my_vis_info)
goto err;
- bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
- MAX_VIS_PACKET_SIZE +
- ETH_HLEN);
+ len = sizeof(*packet) + BATADV_MAX_VIS_PACKET_SIZE + ETH_HLEN;
+ bat_priv->my_vis_info->skb_packet = dev_alloc_skb(len);
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
- packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
- sizeof(*packet));
+ tmp_skb = bat_priv->my_vis_info->skb_packet;
+ packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
/* prefill the vis info */
- bat_priv->my_vis_info->first_seen = jiffies -
- msecs_to_jiffies(VIS_INTERVAL);
+ first_seen = jiffies - msecs_to_jiffies(BATADV_VIS_INTERVAL);
+ bat_priv->my_vis_info->first_seen = first_seen;
INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
- packet->header.version = COMPAT_VERSION;
- packet->header.packet_type = BAT_VIS;
- packet->header.ttl = TTL;
+ packet->header.version = BATADV_COMPAT_VERSION;
+ packet->header.packet_type = BATADV_VIS;
+ packet->header.ttl = BATADV_TTL;
packet->seqno = 0;
+ packet->reserved = 0;
packet->entries = 0;
INIT_LIST_HEAD(&bat_priv->vis_send_list);
- hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
- bat_priv->my_vis_info,
- &bat_priv->my_vis_info->hash_entry);
+ hash_added = batadv_hash_add(bat_priv->vis_hash, batadv_vis_info_cmp,
+ batadv_vis_info_choose,
+ bat_priv->my_vis_info,
+ &bat_priv->my_vis_info->hash_entry);
if (hash_added != 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
- kref_put(&bat_priv->my_vis_info->refcount, free_info);
+ kref_put(&bat_priv->my_vis_info->refcount, batadv_free_info);
goto err;
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
- start_vis_timer(bat_priv);
- return 1;
+ batadv_start_vis_timer(bat_priv);
+ return 0;
free_info:
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
- vis_quit(bat_priv);
- return 0;
+ batadv_vis_quit(bat_priv);
+ return -ENOMEM;
}
/* Decrease the reference count on a hash item info */
-static void free_info_ref(struct hlist_node *node, void *arg)
+static void batadv_free_info_ref(struct hlist_node *node, void *arg)
{
- struct vis_info *info;
+ struct batadv_vis_info *info;
- info = container_of(node, struct vis_info, hash_entry);
- send_list_del(info);
- kref_put(&info->refcount, free_info);
+ info = container_of(node, struct batadv_vis_info, hash_entry);
+ batadv_send_list_del(info);
+ kref_put(&info->refcount, batadv_free_info);
}
/* shutdown vis-server */
-void vis_quit(struct bat_priv *bat_priv)
+void batadv_vis_quit(struct batadv_priv *bat_priv)
{
if (!bat_priv->vis_hash)
return;
@@ -960,16 +940,16 @@ void vis_quit(struct bat_priv *bat_priv)
spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
- hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
+ batadv_hash_delete(bat_priv->vis_hash, batadv_free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */
-static void start_vis_timer(struct bat_priv *bat_priv)
+static void batadv_start_vis_timer(struct batadv_priv *bat_priv)
{
- INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
- queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
- msecs_to_jiffies(VIS_INTERVAL));
+ INIT_DELAYED_WORK(&bat_priv->vis_work, batadv_send_vis_packets);
+ queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work,
+ msecs_to_jiffies(BATADV_VIS_INTERVAL));
}
diff --git a/net/batman-adv/vis.h b/net/batman-adv/vis.h
index ee2e46e5347b..84e716ed8963 100644
--- a/net/batman-adv/vis.h
+++ b/net/batman-adv/vis.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
@@ -16,23 +15,22 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
- *
*/
#ifndef _NET_BATMAN_ADV_VIS_H_
#define _NET_BATMAN_ADV_VIS_H_
-#define VIS_TIMEOUT 200000 /* timeout of vis packets
- * in miliseconds */
+/* timeout of vis packets in miliseconds */
+#define BATADV_VIS_TIMEOUT 200000
-int vis_seq_print_text(struct seq_file *seq, void *offset);
-void receive_server_sync_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-void receive_client_update_packet(struct bat_priv *bat_priv,
- struct vis_packet *vis_packet,
- int vis_info_len);
-int vis_init(struct bat_priv *bat_priv);
-void vis_quit(struct bat_priv *bat_priv);
+int batadv_vis_seq_print_text(struct seq_file *seq, void *offset);
+void batadv_receive_server_sync_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len);
+void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,
+ struct batadv_vis_packet *vis_packet,
+ int vis_info_len);
+int batadv_vis_init(struct batadv_priv *bat_priv);
+void batadv_vis_quit(struct batadv_priv *bat_priv);
#endif /* _NET_BATMAN_ADV_VIS_H_ */
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index 2dc5a5700f53..fa6d94a4602a 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
obj-$(CONFIG_BT_HIDP) += hidp/
bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
- hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o
+ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \
+ a2mp.o
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
new file mode 100644
index 000000000000..4ff0bf3ba9a5
--- /dev/null
+++ b/net/bluetooth/a2mp.c
@@ -0,0 +1,568 @@
+/*
+ Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ Copyright (c) 2011,2012 Intel Corp.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 and
+ only version 2 as published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+*/
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/l2cap.h>
+#include <net/bluetooth/a2mp.h>
+
+/* A2MP build & send command helper functions */
+static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data)
+{
+ struct a2mp_cmd *cmd;
+ int plen;
+
+ plen = sizeof(*cmd) + len;
+ cmd = kzalloc(plen, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ cmd->code = code;
+ cmd->ident = ident;
+ cmd->len = cpu_to_le16(len);
+
+ memcpy(cmd->data, data, len);
+
+ return cmd;
+}
+
+static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len,
+ void *data)
+{
+ struct l2cap_chan *chan = mgr->a2mp_chan;
+ struct a2mp_cmd *cmd;
+ u16 total_len = len + sizeof(*cmd);
+ struct kvec iv;
+ struct msghdr msg;
+
+ cmd = __a2mp_build(code, ident, len, data);
+ if (!cmd)
+ return;
+
+ iv.iov_base = cmd;
+ iv.iov_len = total_len;
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.msg_iov = (struct iovec *) &iv;
+ msg.msg_iovlen = 1;
+
+ l2cap_chan_send(chan, &msg, total_len, 0);
+
+ kfree(cmd);
+}
+
+static inline void __a2mp_cl_bredr(struct a2mp_cl *cl)
+{
+ cl->id = 0;
+ cl->type = 0;
+ cl->status = 1;
+}
+
+/* hci_dev_list shall be locked */
+static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl)
+{
+ int i = 0;
+ struct hci_dev *hdev;
+
+ __a2mp_cl_bredr(cl);
+
+ list_for_each_entry(hdev, &hci_dev_list, list) {
+ /* Iterate through AMP controllers */
+ if (hdev->id == HCI_BREDR_ID)
+ continue;
+
+ /* Starting from second entry */
+ if (++i >= num_ctrl)
+ return;
+
+ cl[i].id = hdev->id;
+ cl[i].type = hdev->amp_type;
+ cl[i].status = hdev->amp_status;
+ }
+}
+
+/* Processing A2MP messages */
+static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cmd_rej *rej = (void *) skb->data;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*rej))
+ return -EINVAL;
+
+ BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason));
+
+ skb_pull(skb, sizeof(*rej));
+
+ return 0;
+}
+
+static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_discov_req *req = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+ struct a2mp_discov_rsp *rsp;
+ u16 ext_feat;
+ u8 num_ctrl;
+
+ if (len < sizeof(*req))
+ return -EINVAL;
+
+ skb_pull(skb, sizeof(*req));
+
+ ext_feat = le16_to_cpu(req->ext_feat);
+
+ BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat);
+
+ /* check that packet is not broken for now */
+ while (ext_feat & A2MP_FEAT_EXT) {
+ if (len < sizeof(ext_feat))
+ return -EINVAL;
+
+ ext_feat = get_unaligned_le16(skb->data);
+ BT_DBG("efm 0x%4.4x", ext_feat);
+ len -= sizeof(ext_feat);
+ skb_pull(skb, sizeof(ext_feat));
+ }
+
+ read_lock(&hci_dev_list_lock);
+
+ num_ctrl = __hci_num_ctrl();
+ len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp);
+ rsp = kmalloc(len, GFP_ATOMIC);
+ if (!rsp) {
+ read_unlock(&hci_dev_list_lock);
+ return -ENOMEM;
+ }
+
+ rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
+ rsp->ext_feat = 0;
+
+ __a2mp_add_cl(mgr, rsp->cl, num_ctrl);
+
+ read_unlock(&hci_dev_list_lock);
+
+ a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp);
+
+ kfree(rsp);
+ return 0;
+}
+
+static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_cl *cl = (void *) skb->data;
+
+ while (skb->len >= sizeof(*cl)) {
+ BT_DBG("Controller id %d type %d status %d", cl->id, cl->type,
+ cl->status);
+ cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
+ }
+
+ /* TODO send A2MP_CHANGE_RSP */
+
+ return 0;
+}
+
+static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_info_req *req = (void *) skb->data;
+ struct a2mp_info_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ hdev = hci_dev_get(req->id);
+ if (hdev && hdev->amp_type != HCI_BREDR) {
+ rsp.status = 0;
+ rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
+ rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
+ rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
+ rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
+ rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
+ }
+
+ if (hdev)
+ hci_dev_put(hdev);
+
+ a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_amp_assoc_req *req = (void *) skb->data;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("id %d", req->id);
+
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->amp_type == HCI_BREDR) {
+ struct a2mp_amp_assoc_rsp rsp;
+ rsp.id = req->id;
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+
+ a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+ goto clean;
+ }
+
+ /* Placeholder for HCI Read AMP Assoc */
+
+clean:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+
+ hdev = hci_dev_get(req->remote_id);
+ if (!hdev || hdev->amp_type != HCI_AMP) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ /* TODO process physlink create */
+
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+send_rsp:
+ if (hdev)
+ hci_dev_put(hdev);
+
+ a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp),
+ &rsp);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ struct a2mp_physlink_req *req = (void *) skb->data;
+ struct a2mp_physlink_rsp rsp;
+ struct hci_dev *hdev;
+
+ if (le16_to_cpu(hdr->len) < sizeof(*req))
+ return -EINVAL;
+
+ BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+
+ rsp.local_id = req->remote_id;
+ rsp.remote_id = req->local_id;
+ rsp.status = A2MP_STATUS_SUCCESS;
+
+ hdev = hci_dev_get(req->local_id);
+ if (!hdev) {
+ rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
+ goto send_rsp;
+ }
+
+ /* TODO Disconnect Phys Link here */
+
+ hci_dev_put(hdev);
+
+send_rsp:
+ a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp);
+
+ skb_pull(skb, sizeof(*req));
+ return 0;
+}
+
+static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
+ struct a2mp_cmd *hdr)
+{
+ BT_DBG("ident %d code %d", hdr->ident, hdr->code);
+
+ skb_pull(skb, le16_to_cpu(hdr->len));
+ return 0;
+}
+
+/* Handle A2MP signalling */
+static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct a2mp_cmd *hdr = (void *) skb->data;
+ struct amp_mgr *mgr = chan->data;
+ int err = 0;
+
+ amp_mgr_get(mgr);
+
+ while (skb->len >= sizeof(*hdr)) {
+ struct a2mp_cmd *hdr = (void *) skb->data;
+ u16 len = le16_to_cpu(hdr->len);
+
+ BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len);
+
+ skb_pull(skb, sizeof(*hdr));
+
+ if (len > skb->len || !hdr->ident) {
+ err = -EINVAL;
+ break;
+ }
+
+ mgr->ident = hdr->ident;
+
+ switch (hdr->code) {
+ case A2MP_COMMAND_REJ:
+ a2mp_command_rej(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCOVER_REQ:
+ err = a2mp_discover_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_NOTIFY:
+ err = a2mp_change_notify(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETINFO_REQ:
+ err = a2mp_getinfo_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_GETAMPASSOC_REQ:
+ err = a2mp_getampassoc_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CREATEPHYSLINK_REQ:
+ err = a2mp_createphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_DISCONNPHYSLINK_REQ:
+ err = a2mp_discphyslink_req(mgr, skb, hdr);
+ break;
+
+ case A2MP_CHANGE_RSP:
+ case A2MP_DISCOVER_RSP:
+ case A2MP_GETINFO_RSP:
+ case A2MP_GETAMPASSOC_RSP:
+ case A2MP_CREATEPHYSLINK_RSP:
+ case A2MP_DISCONNPHYSLINK_RSP:
+ err = a2mp_cmd_rsp(mgr, skb, hdr);
+ break;
+
+ default:
+ BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ if (err) {
+ struct a2mp_cmd_rej rej;
+ rej.reason = __constant_cpu_to_le16(0);
+
+ BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err);
+
+ a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej),
+ &rej);
+ }
+
+ /* Always free skb and return success error code to prevent
+ from sending L2CAP Disconnect over A2MP channel */
+ kfree_skb(skb);
+
+ amp_mgr_put(mgr);
+
+ return 0;
+}
+
+static void a2mp_chan_close_cb(struct l2cap_chan *chan)
+{
+ l2cap_chan_destroy(chan);
+}
+
+static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state)
+{
+ struct amp_mgr *mgr = chan->data;
+
+ if (!mgr)
+ return;
+
+ BT_DBG("chan %p state %s", chan, state_to_string(state));
+
+ chan->state = state;
+
+ switch (state) {
+ case BT_CLOSED:
+ if (mgr)
+ amp_mgr_put(mgr);
+ break;
+ }
+}
+
+static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan,
+ unsigned long len, int nb)
+{
+ return bt_skb_alloc(len, GFP_KERNEL);
+}
+
+static struct l2cap_ops a2mp_chan_ops = {
+ .name = "L2CAP A2MP channel",
+ .recv = a2mp_chan_recv_cb,
+ .close = a2mp_chan_close_cb,
+ .state_change = a2mp_chan_state_change_cb,
+ .alloc_skb = a2mp_chan_alloc_skb_cb,
+
+ /* Not implemented for A2MP */
+ .new_connection = l2cap_chan_no_new_connection,
+ .teardown = l2cap_chan_no_teardown,
+ .ready = l2cap_chan_no_ready,
+};
+
+static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn)
+{
+ struct l2cap_chan *chan;
+ int err;
+
+ chan = l2cap_chan_create();
+ if (!chan)
+ return NULL;
+
+ BT_DBG("chan %p", chan);
+
+ chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP;
+ chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
+
+ chan->ops = &a2mp_chan_ops;
+
+ l2cap_chan_set_defaults(chan);
+ chan->remote_max_tx = chan->max_tx;
+ chan->remote_tx_win = chan->tx_win;
+
+ chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ skb_queue_head_init(&chan->tx_q);
+
+ chan->mode = L2CAP_MODE_ERTM;
+
+ err = l2cap_ertm_init(chan);
+ if (err < 0) {
+ l2cap_chan_del(chan, 0);
+ return NULL;
+ }
+
+ chan->conf_state = 0;
+
+ l2cap_chan_add(conn, chan);
+
+ chan->remote_mps = chan->omtu;
+ chan->mps = chan->omtu;
+
+ chan->state = BT_CONNECTED;
+
+ return chan;
+}
+
+/* AMP Manager functions */
+void amp_mgr_get(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ kref_get(&mgr->kref);
+}
+
+static void amp_mgr_destroy(struct kref *kref)
+{
+ struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref);
+
+ BT_DBG("mgr %p", mgr);
+
+ kfree(mgr);
+}
+
+int amp_mgr_put(struct amp_mgr *mgr)
+{
+ BT_DBG("mgr %p orig refcnt %d", mgr, atomic_read(&mgr->kref.refcount));
+
+ return kref_put(&mgr->kref, &amp_mgr_destroy);
+}
+
+static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn)
+{
+ struct amp_mgr *mgr;
+ struct l2cap_chan *chan;
+
+ mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+ if (!mgr)
+ return NULL;
+
+ BT_DBG("conn %p mgr %p", conn, mgr);
+
+ mgr->l2cap_conn = conn;
+
+ chan = a2mp_chan_open(conn);
+ if (!chan) {
+ kfree(mgr);
+ return NULL;
+ }
+
+ mgr->a2mp_chan = chan;
+ chan->data = mgr;
+
+ conn->hcon->amp_mgr = mgr;
+
+ kref_init(&mgr->kref);
+
+ return mgr;
+}
+
+struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn,
+ struct sk_buff *skb)
+{
+ struct amp_mgr *mgr;
+
+ mgr = amp_mgr_create(conn);
+ if (!mgr) {
+ BT_ERR("Could not create AMP manager");
+ return NULL;
+ }
+
+ BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan);
+
+ return mgr->a2mp_chan;
+}
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 3e18af4dadc4..f7db5792ec64 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -25,18 +25,7 @@
/* Bluetooth address family and sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/init.h>
-#include <linux/poll.h>
-#include <net/sock.h>
#include <asm/ioctls.h>
-#include <linux/kmod.h>
#include <net/bluetooth/bluetooth.h>
@@ -418,7 +407,8 @@ static inline unsigned int bt_accept_poll(struct sock *parent)
return 0;
}
-unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait)
+unsigned int bt_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask = 0;
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 031d7d656754..4a6620bc1570 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -26,26 +26,9 @@
*/
#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/freezer.h>
-#include <linux/errno.h>
-#include <linux/net.h>
-#include <linux/slab.h>
#include <linux/kthread.h>
-#include <net/sock.h>
-
-#include <linux/socket.h>
#include <linux/file.h>
-
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -306,7 +289,7 @@ static u8 __bnep_rx_hlen[] = {
ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
};
-static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct net_device *dev = s->dev;
struct sk_buff *nskb;
@@ -404,7 +387,7 @@ static u8 __bnep_tx_types[] = {
BNEP_COMPRESSED
};
-static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
+static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
struct socket *sock = s->sock;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index bc4086480d97..98f86f91d47c 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -25,16 +25,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include <linux/socket.h>
-#include <linux/netdevice.h>
+#include <linux/export.h>
#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/wait.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -128,7 +120,7 @@ static void bnep_net_timeout(struct net_device *dev)
}
#ifdef CONFIG_BT_BNEP_MC_FILTER
-static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s)
{
struct ethhdr *eh = (void *) skb->data;
@@ -140,7 +132,7 @@ static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
/* Determine ether protocol. Based on eth_type_trans. */
-static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
+static u16 bnep_net_eth_proto(struct sk_buff *skb)
{
struct ethhdr *eh = (void *) skb->data;
u16 proto = ntohs(eh->h_proto);
@@ -154,7 +146,7 @@ static inline u16 bnep_net_eth_proto(struct sk_buff *skb)
return ETH_P_802_2;
}
-static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
+static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s)
{
u16 proto = bnep_net_eth_proto(skb);
struct bnep_proto_filter *f = s->proto_filter;
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 180bfc45810d..5e5f5b410e0b 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -24,24 +24,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <linux/uaccess.h>
-#include <net/sock.h>
-
#include "bnep.h"
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3f18a6ed9731..5ad7da217474 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -24,24 +24,11 @@
/* Bluetooth HCI connection handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/a2mp.h>
static void hci_le_connect(struct hci_conn *conn)
{
@@ -54,15 +41,15 @@ static void hci_le_connect(struct hci_conn *conn)
conn->sec_level = BT_SECURITY_LOW;
memset(&cp, 0, sizeof(cp));
- cp.scan_interval = cpu_to_le16(0x0060);
- cp.scan_window = cpu_to_le16(0x0030);
+ cp.scan_interval = __constant_cpu_to_le16(0x0060);
+ cp.scan_window = __constant_cpu_to_le16(0x0030);
bacpy(&cp.peer_addr, &conn->dst);
cp.peer_addr_type = conn->dst_type;
- cp.conn_interval_min = cpu_to_le16(0x0028);
- cp.conn_interval_max = cpu_to_le16(0x0038);
- cp.supervision_timeout = cpu_to_le16(0x002a);
- cp.min_ce_len = cpu_to_le16(0x0000);
- cp.max_ce_len = cpu_to_le16(0x0000);
+ cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
+ cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
+ cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0000);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0000);
hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
}
@@ -99,7 +86,7 @@ void hci_acl_connect(struct hci_conn *conn)
cp.pscan_rep_mode = ie->data.pscan_rep_mode;
cp.pscan_mode = ie->data.pscan_mode;
cp.clock_offset = ie->data.clock_offset |
- cpu_to_le16(0x8000);
+ __constant_cpu_to_le16(0x8000);
}
memcpy(conn->dev_class, ie->data.dev_class, 3);
@@ -120,7 +107,7 @@ static void hci_acl_connect_cancel(struct hci_conn *conn)
{
struct hci_cp_create_conn_cancel cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
return;
@@ -133,7 +120,7 @@ void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
{
struct hci_cp_disconnect cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_DISCONN;
@@ -147,7 +134,7 @@ void hci_add_sco(struct hci_conn *conn, __u16 handle)
struct hci_dev *hdev = conn->hdev;
struct hci_cp_add_sco cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -165,7 +152,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
conn->state = BT_CONNECT;
conn->out = true;
@@ -175,9 +162,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
cp.handle = cpu_to_le16(handle);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.voice_setting = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
@@ -185,7 +172,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
- u16 latency, u16 to_multiplier)
+ u16 latency, u16 to_multiplier)
{
struct hci_cp_le_conn_update cp;
struct hci_dev *hdev = conn->hdev;
@@ -197,20 +184,19 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
cp.conn_interval_max = cpu_to_le16(max);
cp.conn_latency = cpu_to_le16(latency);
cp.supervision_timeout = cpu_to_le16(to_multiplier);
- cp.min_ce_len = cpu_to_le16(0x0001);
- cp.max_ce_len = cpu_to_le16(0x0001);
+ cp.min_ce_len = __constant_cpu_to_le16(0x0001);
+ cp.max_ce_len = __constant_cpu_to_le16(0x0001);
hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_conn_update);
void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
- __u8 ltk[16])
+ __u8 ltk[16])
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_le_start_enc cp;
- BT_DBG("%p", conn);
+ BT_DBG("hcon %p", conn);
memset(&cp, 0, sizeof(cp));
@@ -221,18 +207,17 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
}
-EXPORT_SYMBOL(hci_le_start_enc);
/* Device _must_ be locked */
void hci_sco_setup(struct hci_conn *conn, __u8 status)
{
struct hci_conn *sco = conn->link;
- BT_DBG("%p", conn);
-
if (!sco)
return;
+ BT_DBG("hcon %p", conn);
+
if (!status) {
if (lmp_esco_capable(conn->hdev))
hci_setup_sync(sco, conn->handle);
@@ -247,10 +232,10 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status)
static void hci_conn_timeout(struct work_struct *work)
{
struct hci_conn *conn = container_of(work, struct hci_conn,
- disc_work.work);
+ disc_work.work);
__u8 reason;
- BT_DBG("conn %p state %s", conn, state_to_string(conn->state));
+ BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
if (atomic_read(&conn->refcnt))
return;
@@ -281,7 +266,7 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
@@ -295,9 +280,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
struct hci_cp_sniff_subrate cp;
cp.handle = cpu_to_le16(conn->handle);
- cp.max_latency = cpu_to_le16(0);
- cp.min_remote_timeout = cpu_to_le16(0);
- cp.min_local_timeout = cpu_to_le16(0);
+ cp.max_latency = __constant_cpu_to_le16(0);
+ cp.min_remote_timeout = __constant_cpu_to_le16(0);
+ cp.min_local_timeout = __constant_cpu_to_le16(0);
hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
}
@@ -306,8 +291,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
cp.handle = cpu_to_le16(conn->handle);
cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
- cp.attempt = cpu_to_le16(4);
- cp.timeout = cpu_to_le16(1);
+ cp.attempt = __constant_cpu_to_le16(4);
+ cp.timeout = __constant_cpu_to_le16(1);
hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
}
}
@@ -316,7 +301,7 @@ static void hci_conn_idle(unsigned long arg)
{
struct hci_conn *conn = (void *) arg;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
hci_conn_enter_sniff_mode(conn);
}
@@ -327,7 +312,7 @@ static void hci_conn_auto_accept(unsigned long arg)
struct hci_dev *hdev = conn->hdev;
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
- &conn->dst);
+ &conn->dst);
}
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
@@ -376,7 +361,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
- (unsigned long) conn);
+ (unsigned long) conn);
atomic_set(&conn->refcnt, 0);
@@ -397,7 +382,7 @@ int hci_conn_del(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
+ BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
del_timer(&conn->idle_timer);
@@ -425,9 +410,11 @@ int hci_conn_del(struct hci_conn *conn)
}
}
-
hci_chan_list_flush(conn);
+ if (conn->amp_mgr)
+ amp_mgr_put(conn->amp_mgr);
+
hci_conn_hash_del(hdev, conn);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
@@ -454,7 +441,9 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
read_lock(&hci_dev_list_lock);
list_for_each_entry(d, &hci_dev_list, list) {
- if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
+ if (!test_bit(HCI_UP, &d->flags) ||
+ test_bit(HCI_RAW, &d->flags) ||
+ d->dev_type != HCI_BREDR)
continue;
/* Simple routing:
@@ -495,6 +484,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
if (type == LE_LINK) {
le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
if (!le) {
+ le = hci_conn_hash_lookup_state(hdev, LE_LINK,
+ BT_CONNECT);
+ if (le)
+ return ERR_PTR(-EBUSY);
+
le = hci_conn_add(hdev, LE_LINK, dst);
if (!le)
return ERR_PTR(-ENOMEM);
@@ -545,7 +539,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
hci_conn_hold(sco);
if (acl->state == BT_CONNECTED &&
- (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
+ (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
@@ -560,24 +554,22 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
return sco;
}
-EXPORT_SYMBOL(hci_connect);
/* Check link security requirement */
int hci_conn_check_link_mode(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
return 0;
return 1;
}
-EXPORT_SYMBOL(hci_conn_check_link_mode);
/* Authenticate remote device */
static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (conn->pending_sec_level > sec_level)
sec_level = conn->pending_sec_level;
@@ -600,7 +592,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
if (conn->key_type != 0xff)
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
}
@@ -611,21 +603,21 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* Encrypt the the link */
static void hci_conn_encrypt(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
struct hci_cp_set_conn_encrypt cp;
cp.handle = cpu_to_le16(conn->handle);
cp.encrypt = 0x01;
hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
}
}
/* Enable security */
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
/* For sdp we don't need the link key. */
if (sec_level == BT_SECURITY_SDP)
@@ -648,8 +640,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
/* An unauthenticated combination key has sufficient security for
security level 1 and 2. */
if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
- (sec_level == BT_SECURITY_MEDIUM ||
- sec_level == BT_SECURITY_LOW))
+ (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
goto encrypt;
/* A combination key has always sufficient security for the security
@@ -657,8 +648,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
is generated using maximum PIN code length (16).
For pre 2.1 units. */
if (conn->key_type == HCI_LK_COMBINATION &&
- (sec_level != BT_SECURITY_HIGH ||
- conn->pin_length == 16))
+ (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
goto encrypt;
auth:
@@ -680,7 +670,7 @@ EXPORT_SYMBOL(hci_conn_security);
/* Check secure link requirement */
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (sec_level != BT_SECURITY_HIGH)
return 1; /* Accept if non-secure is required */
@@ -695,23 +685,22 @@ EXPORT_SYMBOL(hci_conn_check_secure);
/* Change link key */
int hci_conn_change_link_key(struct hci_conn *conn)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
struct hci_cp_change_conn_link_key cp;
cp.handle = cpu_to_le16(conn->handle);
hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
return 0;
}
-EXPORT_SYMBOL(hci_conn_change_link_key);
/* Switch role */
int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
{
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
if (!role && conn->link_mode & HCI_LM_MASTER)
return 1;
@@ -732,7 +721,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
{
struct hci_dev *hdev = conn->hdev;
- BT_DBG("conn %p mode %d", conn, conn->mode);
+ BT_DBG("hcon %p mode %d", conn, conn->mode);
if (test_bit(HCI_RAW, &hdev->flags))
return;
@@ -752,7 +741,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
timer:
if (hdev->idle_timeout > 0)
mod_timer(&conn->idle_timer,
- jiffies + msecs_to_jiffies(hdev->idle_timeout));
+ jiffies + msecs_to_jiffies(hdev->idle_timeout));
}
/* Drop all connection on the device */
@@ -802,7 +791,7 @@ EXPORT_SYMBOL(hci_conn_put_device);
int hci_get_conn_list(void __user *arg)
{
- register struct hci_conn *c;
+ struct hci_conn *c;
struct hci_conn_list_req req, *cl;
struct hci_conn_info *ci;
struct hci_dev *hdev;
@@ -906,7 +895,7 @@ struct hci_chan *hci_chan_create(struct hci_conn *conn)
struct hci_dev *hdev = conn->hdev;
struct hci_chan *chan;
- BT_DBG("%s conn %p", hdev->name, conn);
+ BT_DBG("%s hcon %p", hdev->name, conn);
chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
if (!chan)
@@ -925,7 +914,7 @@ int hci_chan_del(struct hci_chan *chan)
struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s conn %p chan %p", hdev->name, conn, chan);
+ BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
list_del_rcu(&chan->list);
@@ -941,7 +930,7 @@ void hci_chan_list_flush(struct hci_conn *conn)
{
struct hci_chan *chan, *n;
- BT_DBG("conn %p", conn);
+ BT_DBG("hcon %p", conn);
list_for_each_entry_safe(chan, n, &conn->chan_list, list)
hci_chan_del(chan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 411ace8e647b..d4de5db18d5a 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -25,34 +25,14 @@
/* Bluetooth HCI core. */
-#include <linux/jiffies.h>
-#include <linux/module.h>
-#include <linux/kmod.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/rfkill.h>
-#include <linux/timer.h>
-#include <linux/crypto.h>
-#include <net/sock.h>
+#include <linux/export.h>
+#include <linux/idr.h>
-#include <linux/uaccess.h>
-#include <asm/unaligned.h>
+#include <linux/rfkill.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
-#define AUTO_OFF_TIMEOUT 2000
-
static void hci_rx_work(struct work_struct *work);
static void hci_cmd_work(struct work_struct *work);
static void hci_tx_work(struct work_struct *work);
@@ -65,6 +45,9 @@ DEFINE_RWLOCK(hci_dev_list_lock);
LIST_HEAD(hci_cb_list);
DEFINE_RWLOCK(hci_cb_list_lock);
+/* HCI ID Numbering */
+static DEFINE_IDA(hci_index_ida);
+
/* ---- HCI notifications ---- */
static void hci_notify(struct hci_dev *hdev, int event)
@@ -76,7 +59,7 @@ static void hci_notify(struct hci_dev *hdev, int event)
void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
{
- BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
+ BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
/* If this is the init phase check if the completed command matches
* the last init command, and if not just return.
@@ -124,8 +107,9 @@ static void hci_req_cancel(struct hci_dev *hdev, int err)
}
/* Execute request and wait for completion. */
-static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int __hci_request(struct hci_dev *hdev,
+ void (*req)(struct hci_dev *hdev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
DECLARE_WAITQUEUE(wait, current);
int err = 0;
@@ -166,8 +150,9 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
return err;
}
-static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
- unsigned long opt, __u32 timeout)
+static int hci_request(struct hci_dev *hdev,
+ void (*req)(struct hci_dev *hdev, unsigned long opt),
+ unsigned long opt, __u32 timeout)
{
int ret;
@@ -201,12 +186,6 @@ static void bredr_init(struct hci_dev *hdev)
/* Mandatory initialization */
- /* Reset */
- if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
- set_bit(HCI_RESET, &hdev->flags);
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
- }
-
/* Read Local Supported Features */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
@@ -235,7 +214,7 @@ static void bredr_init(struct hci_dev *hdev)
hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
/* Connection accept timeout ~20 secs */
- param = cpu_to_le16(0x7d00);
+ param = __constant_cpu_to_le16(0x7d00);
hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
bacpy(&cp.bdaddr, BDADDR_ANY);
@@ -247,9 +226,6 @@ static void amp_init(struct hci_dev *hdev)
{
hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
- /* Reset */
- hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
-
/* Read Local Version */
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
@@ -275,6 +251,10 @@ static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
}
skb_queue_purge(&hdev->driver_init);
+ /* Reset */
+ if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
+ hci_reset_req(hdev, 0);
+
switch (hdev->dev_type) {
case HCI_BREDR:
bredr_init(hdev);
@@ -417,7 +397,8 @@ static void inquiry_cache_flush(struct hci_dev *hdev)
INIT_LIST_HEAD(&cache->resolve);
}
-struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
+struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
+ bdaddr_t *bdaddr)
{
struct discovery_state *cache = &hdev->discovery;
struct inquiry_entry *e;
@@ -478,7 +459,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
list_for_each_entry(p, &cache->resolve, list) {
if (p->name_state != NAME_PENDING &&
- abs(p->data.rssi) >= abs(ie->data.rssi))
+ abs(p->data.rssi) >= abs(ie->data.rssi))
break;
pos = &p->list;
}
@@ -503,7 +484,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
*ssp = true;
if (ie->name_state == NAME_NEEDED &&
- data->rssi != ie->data.rssi) {
+ data->rssi != ie->data.rssi) {
ie->data.rssi = data->rssi;
hci_inquiry_cache_update_resolve(hdev, ie);
}
@@ -527,7 +508,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
update:
if (name_known && ie->name_state != NAME_KNOWN &&
- ie->name_state != NAME_PENDING) {
+ ie->name_state != NAME_PENDING) {
ie->name_state = NAME_KNOWN;
list_del(&ie->list);
}
@@ -605,8 +586,7 @@ int hci_inquiry(void __user *arg)
hci_dev_lock(hdev);
if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
- inquiry_cache_empty(hdev) ||
- ir.flags & IREQ_CACHE_FLUSH) {
+ inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
inquiry_cache_flush(hdev);
do_inquiry = 1;
}
@@ -620,7 +600,9 @@ int hci_inquiry(void __user *arg)
goto done;
}
- /* for unlimited number of responses we will use buffer with 255 entries */
+ /* for unlimited number of responses we will use buffer with
+ * 255 entries
+ */
max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
/* cache_dump can't sleep. Therefore we allocate temp buffer and then
@@ -641,7 +623,7 @@ int hci_inquiry(void __user *arg)
if (!copy_to_user(ptr, &ir, sizeof(ir))) {
ptr += sizeof(ir);
if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
- ir.num_rsp))
+ ir.num_rsp))
err = -EFAULT;
} else
err = -EFAULT;
@@ -701,12 +683,11 @@ int hci_dev_open(__u16 dev)
set_bit(HCI_INIT, &hdev->flags);
hdev->init_last_cmd = 0;
- ret = __hci_request(hdev, hci_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
if (lmp_host_le_capable(hdev))
ret = __hci_request(hdev, hci_le_init_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -791,10 +772,9 @@ static int hci_dev_do_close(struct hci_dev *hdev)
skb_queue_purge(&hdev->cmd_q);
atomic_set(&hdev->cmd_cnt, 1);
if (!test_bit(HCI_RAW, &hdev->flags) &&
- test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
+ test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
set_bit(HCI_INIT, &hdev->flags);
- __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(250));
+ __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
clear_bit(HCI_INIT, &hdev->flags);
}
@@ -883,8 +863,7 @@ int hci_dev_reset(__u16 dev)
hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
if (!test_bit(HCI_RAW, &hdev->flags))
- ret = __hci_request(hdev, hci_reset_req, 0,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
done:
hci_req_unlock(hdev);
@@ -924,7 +903,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
switch (cmd) {
case HCISETAUTH:
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETENCRYPT:
@@ -936,23 +915,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg)
if (!test_bit(HCI_AUTH, &hdev->flags)) {
/* Auth must be enabled first */
err = hci_request(hdev, hci_auth_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
if (err)
break;
}
err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETSCAN:
err = hci_request(hdev, hci_scan_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKPOL:
err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
- msecs_to_jiffies(HCI_INIT_TIMEOUT));
+ HCI_INIT_TIMEOUT);
break;
case HCISETLINKMODE:
@@ -1102,8 +1081,7 @@ static void hci_power_on(struct work_struct *work)
return;
if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
- schedule_delayed_work(&hdev->power_off,
- msecs_to_jiffies(AUTO_OFF_TIMEOUT));
+ schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
mgmt_index_added(hdev);
@@ -1112,7 +1090,7 @@ static void hci_power_on(struct work_struct *work)
static void hci_power_off(struct work_struct *work)
{
struct hci_dev *hdev = container_of(work, struct hci_dev,
- power_off.work);
+ power_off.work);
BT_DBG("%s", hdev->name);
@@ -1193,7 +1171,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
}
static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
- u8 key_type, u8 old_key_type)
+ u8 key_type, u8 old_key_type)
{
/* Legacy key */
if (key_type < 0x03)
@@ -1234,7 +1212,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
list_for_each_entry(k, &hdev->long_term_keys, list) {
if (k->ediv != ediv ||
- memcmp(rand, k->rand, sizeof(k->rand)))
+ memcmp(rand, k->rand, sizeof(k->rand)))
continue;
return k;
@@ -1242,7 +1220,6 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk);
struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type)
@@ -1251,12 +1228,11 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
list_for_each_entry(k, &hdev->long_term_keys, list)
if (addr_type == k->bdaddr_type &&
- bacmp(bdaddr, &k->bdaddr) == 0)
+ bacmp(bdaddr, &k->bdaddr) == 0)
return k;
return NULL;
}
-EXPORT_SYMBOL(hci_find_ltk_by_addr);
int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
@@ -1283,15 +1259,14 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
* combination key for legacy pairing even when there's no
* previous key */
if (type == HCI_LK_CHANGED_COMBINATION &&
- (!conn || conn->remote_auth == 0xff) &&
- old_key_type == 0xff) {
+ (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
type = HCI_LK_COMBINATION;
if (conn)
conn->key_type = type;
}
bacpy(&key->bdaddr, bdaddr);
- memcpy(key->val, val, 16);
+ memcpy(key->val, val, HCI_LINK_KEY_SIZE);
key->pin_len = pin_len;
if (type == HCI_LK_CHANGED_COMBINATION)
@@ -1383,11 +1358,19 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
}
/* HCI command timer function */
-static void hci_cmd_timer(unsigned long arg)
+static void hci_cmd_timeout(unsigned long arg)
{
struct hci_dev *hdev = (void *) arg;
- BT_ERR("%s command tx timeout", hdev->name);
+ if (hdev->sent_cmd) {
+ struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
+ u16 opcode = __le16_to_cpu(sent->opcode);
+
+ BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+ } else {
+ BT_ERR("%s command tx timeout", hdev->name);
+ }
+
atomic_set(&hdev->cmd_cnt, 1);
queue_work(hdev->workqueue, &hdev->cmd_work);
}
@@ -1540,6 +1523,7 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
memset(&cp, 0, sizeof(cp));
cp.enable = 1;
+ cp.filter_dup = 1;
hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
}
@@ -1684,7 +1668,7 @@ struct hci_dev *hci_alloc_dev(void)
init_waitqueue_head(&hdev->req_wait_q);
- setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
+ setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
hci_init_sysfs(hdev);
discovery_init(hdev);
@@ -1707,41 +1691,39 @@ EXPORT_SYMBOL(hci_free_dev);
/* Register HCI device */
int hci_register_dev(struct hci_dev *hdev)
{
- struct list_head *head, *p;
int id, error;
if (!hdev->open || !hdev->close)
return -EINVAL;
- write_lock(&hci_dev_list_lock);
-
/* Do not allow HCI_AMP devices to register at index 0,
* so the index can be used as the AMP controller ID.
*/
- id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
- head = &hci_dev_list;
-
- /* Find first available device id */
- list_for_each(p, &hci_dev_list) {
- int nid = list_entry(p, struct hci_dev, list)->id;
- if (nid > id)
- break;
- if (nid == id)
- id++;
- head = p;
+ switch (hdev->dev_type) {
+ case HCI_BREDR:
+ id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
+ break;
+ case HCI_AMP:
+ id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
+ break;
+ default:
+ return -EINVAL;
}
+ if (id < 0)
+ return id;
+
sprintf(hdev->name, "hci%d", id);
hdev->id = id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- list_add(&hdev->list, head);
-
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
write_unlock(&hci_dev_list_lock);
hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
- WQ_MEM_RECLAIM, 1);
+ WQ_MEM_RECLAIM, 1);
if (!hdev->workqueue) {
error = -ENOMEM;
goto err;
@@ -1752,7 +1734,8 @@ int hci_register_dev(struct hci_dev *hdev)
goto err_wqueue;
hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
- RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
+ RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
+ hdev);
if (hdev->rfkill) {
if (rfkill_register(hdev->rfkill) < 0) {
rfkill_destroy(hdev->rfkill);
@@ -1760,8 +1743,11 @@ int hci_register_dev(struct hci_dev *hdev)
}
}
- set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
set_bit(HCI_SETUP, &hdev->dev_flags);
+
+ if (hdev->dev_type != HCI_AMP)
+ set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+
schedule_work(&hdev->power_on);
hci_notify(hdev, HCI_DEV_REG);
@@ -1772,6 +1758,7 @@ int hci_register_dev(struct hci_dev *hdev)
err_wqueue:
destroy_workqueue(hdev->workqueue);
err:
+ ida_simple_remove(&hci_index_ida, hdev->id);
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -1783,12 +1770,14 @@ EXPORT_SYMBOL(hci_register_dev);
/* Unregister HCI device */
void hci_unregister_dev(struct hci_dev *hdev)
{
- int i;
+ int i, id;
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
set_bit(HCI_UNREGISTER, &hdev->dev_flags);
+ id = hdev->id;
+
write_lock(&hci_dev_list_lock);
list_del(&hdev->list);
write_unlock(&hci_dev_list_lock);
@@ -1799,7 +1788,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
kfree_skb(hdev->reassembly[i]);
if (!test_bit(HCI_INIT, &hdev->flags) &&
- !test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ !test_bit(HCI_SETUP, &hdev->dev_flags)) {
hci_dev_lock(hdev);
mgmt_index_removed(hdev);
hci_dev_unlock(hdev);
@@ -1829,6 +1818,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
hci_dev_unlock(hdev);
hci_dev_put(hdev);
+
+ ida_simple_remove(&hci_index_ida, id);
}
EXPORT_SYMBOL(hci_unregister_dev);
@@ -1853,7 +1844,7 @@ int hci_recv_frame(struct sk_buff *skb)
{
struct hci_dev *hdev = (struct hci_dev *) skb->dev;
if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
- && !test_bit(HCI_INIT, &hdev->flags))) {
+ && !test_bit(HCI_INIT, &hdev->flags))) {
kfree_skb(skb);
return -ENXIO;
}
@@ -1872,7 +1863,7 @@ int hci_recv_frame(struct sk_buff *skb)
EXPORT_SYMBOL(hci_recv_frame);
static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
- int count, __u8 index)
+ int count, __u8 index)
{
int len = 0;
int hlen = 0;
@@ -1881,7 +1872,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
struct bt_skb_cb *scb;
if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
- index >= NUM_REASSEMBLY)
+ index >= NUM_REASSEMBLY)
return -EILSEQ;
skb = hdev->reassembly[index];
@@ -2023,7 +2014,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
type = bt_cb(skb)->pkt_type;
rem = hci_reassembly(hdev, type, data, count,
- STREAM_REASSEMBLY);
+ STREAM_REASSEMBLY);
if (rem < 0)
return rem;
@@ -2096,7 +2087,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
struct hci_command_hdr *hdr;
struct sk_buff *skb;
- BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
+ BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
skb = bt_skb_alloc(len, GFP_ATOMIC);
if (!skb) {
@@ -2138,7 +2129,7 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
if (hdr->opcode != cpu_to_le16(opcode))
return NULL;
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
}
@@ -2157,7 +2148,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
}
static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
- struct sk_buff *skb, __u16 flags)
+ struct sk_buff *skb, __u16 flags)
{
struct hci_dev *hdev = conn->hdev;
struct sk_buff *list;
@@ -2208,7 +2199,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
struct hci_conn *conn = chan->conn;
struct hci_dev *hdev = conn->hdev;
- BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
+ BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
skb->dev = (void *) hdev;
@@ -2216,7 +2207,6 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_acl);
/* Send SCO data */
void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
@@ -2239,12 +2229,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
skb_queue_tail(&conn->data_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
-EXPORT_SYMBOL(hci_send_sco);
/* ---- HCI TX task (outgoing data) ---- */
/* HCI Connection scheduler */
-static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
+static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *conn = NULL, *c;
@@ -2303,7 +2293,7 @@ static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int
return conn;
}
-static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
+static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_conn *c;
@@ -2316,16 +2306,16 @@ static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
list_for_each_entry_rcu(c, &h->list, list) {
if (c->type == type && c->sent) {
BT_ERR("%s killing stalled connection %s",
- hdev->name, batostr(&c->dst));
- hci_acl_disconn(c, 0x13);
+ hdev->name, batostr(&c->dst));
+ hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
}
}
rcu_read_unlock();
}
-static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
- int *quote)
+static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
+ int *quote)
{
struct hci_conn_hash *h = &hdev->conn_hash;
struct hci_chan *chan = NULL;
@@ -2442,7 +2432,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
skb->priority = HCI_PRIO_MAX - 1;
BT_DBG("chan %p skb %p promoted to %d", chan, skb,
- skb->priority);
+ skb->priority);
}
if (hci_conn_num(hdev, type) == num)
@@ -2459,18 +2449,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
}
-static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
+static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
{
if (!test_bit(HCI_RAW, &hdev->flags)) {
/* ACL tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!cnt && time_after(jiffies, hdev->acl_last_tx +
- msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
+ HCI_ACL_TX_TIMEOUT))
hci_link_tx_to(hdev, ACL_LINK);
}
}
-static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
+static void hci_sched_acl_pkt(struct hci_dev *hdev)
{
unsigned int cnt = hdev->acl_cnt;
struct hci_chan *chan;
@@ -2480,11 +2470,11 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->acl_cnt &&
- (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2508,7 +2498,7 @@ static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static inline void hci_sched_acl_blk(struct hci_dev *hdev)
+static void hci_sched_acl_blk(struct hci_dev *hdev)
{
unsigned int cnt = hdev->block_cnt;
struct hci_chan *chan;
@@ -2518,13 +2508,13 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
__check_timeout(hdev, cnt);
while (hdev->block_cnt > 0 &&
- (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
+ (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
int blocks;
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2537,7 +2527,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
return;
hci_conn_enter_active_mode(chan->conn,
- bt_cb(skb)->force_active);
+ bt_cb(skb)->force_active);
hci_send_frame(skb);
hdev->acl_last_tx = jiffies;
@@ -2554,7 +2544,7 @@ static inline void hci_sched_acl_blk(struct hci_dev *hdev)
hci_prio_recalculate(hdev, ACL_LINK);
}
-static inline void hci_sched_acl(struct hci_dev *hdev)
+static void hci_sched_acl(struct hci_dev *hdev)
{
BT_DBG("%s", hdev->name);
@@ -2573,7 +2563,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
}
/* Schedule SCO */
-static inline void hci_sched_sco(struct hci_dev *hdev)
+static void hci_sched_sco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2596,7 +2586,7 @@ static inline void hci_sched_sco(struct hci_dev *hdev)
}
}
-static inline void hci_sched_esco(struct hci_dev *hdev)
+static void hci_sched_esco(struct hci_dev *hdev)
{
struct hci_conn *conn;
struct sk_buff *skb;
@@ -2607,7 +2597,8 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
if (!hci_conn_num(hdev, ESCO_LINK))
return;
- while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
+ while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
+ &quote))) {
while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
BT_DBG("skb %p len %d", skb, skb->len);
hci_send_frame(skb);
@@ -2619,7 +2610,7 @@ static inline void hci_sched_esco(struct hci_dev *hdev)
}
}
-static inline void hci_sched_le(struct hci_dev *hdev)
+static void hci_sched_le(struct hci_dev *hdev)
{
struct hci_chan *chan;
struct sk_buff *skb;
@@ -2634,7 +2625,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
/* LE tx timeout must be longer than maximum
* link supervision timeout (40.9 seconds) */
if (!hdev->le_cnt && hdev->le_pkts &&
- time_after(jiffies, hdev->le_last_tx + HZ * 45))
+ time_after(jiffies, hdev->le_last_tx + HZ * 45))
hci_link_tx_to(hdev, LE_LINK);
}
@@ -2644,7 +2635,7 @@ static inline void hci_sched_le(struct hci_dev *hdev)
u32 priority = (skb_peek(&chan->data_q))->priority;
while (quote-- && (skb = skb_peek(&chan->data_q))) {
BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
- skb->len, skb->priority);
+ skb->len, skb->priority);
/* Stop if priority has changed */
if (skb->priority < priority)
@@ -2676,7 +2667,7 @@ static void hci_tx_work(struct work_struct *work)
struct sk_buff *skb;
BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
- hdev->sco_cnt, hdev->le_cnt);
+ hdev->sco_cnt, hdev->le_cnt);
/* Schedule queues and send stuff to HCI driver */
@@ -2696,7 +2687,7 @@ static void hci_tx_work(struct work_struct *work)
/* ----- HCI RX task (incoming data processing) ----- */
/* ACL data packet */
-static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_acl_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2708,7 +2699,8 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
flags = hci_flags(handle);
handle = hci_handle(handle);
- BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
+ BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
+ handle, flags);
hdev->stat.acl_rx++;
@@ -2732,14 +2724,14 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s ACL packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
}
/* SCO data packet */
-static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_sco_hdr *hdr = (void *) skb->data;
struct hci_conn *conn;
@@ -2749,7 +2741,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
handle = __le16_to_cpu(hdr->handle);
- BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
+ BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
hdev->stat.sco_rx++;
@@ -2763,7 +2755,7 @@ static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
return;
} else {
BT_ERR("%s SCO packet for unknown connection handle %d",
- hdev->name, handle);
+ hdev->name, handle);
}
kfree_skb(skb);
@@ -2829,7 +2821,8 @@ static void hci_cmd_work(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
struct sk_buff *skb;
- BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
+ BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
+ atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
/* Send queued commands */
if (atomic_read(&hdev->cmd_cnt)) {
@@ -2847,7 +2840,7 @@ static void hci_cmd_work(struct work_struct *work)
del_timer(&hdev->cmd_timer);
else
mod_timer(&hdev->cmd_timer,
- jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
+ jiffies + HCI_CMD_TIMEOUT);
} else {
skb_queue_head(&hdev->cmd_q, skb);
queue_work(hdev->workqueue, &hdev->cmd_work);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 94ad124a4ea3..41ff978a33f9 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -24,20 +24,7 @@
/* Bluetooth HCI event handling. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/interrupt.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -49,7 +36,7 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status) {
hci_dev_lock(hdev);
@@ -73,7 +60,7 @@ static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -85,7 +72,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -95,7 +82,8 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
hci_conn_check_pending(hdev);
}
-static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
@@ -105,7 +93,7 @@ static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_role_discovery *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -128,7 +116,7 @@ static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_link_policy *rp = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -148,7 +136,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_conn *conn;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -166,11 +154,12 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
hci_dev_unlock(hdev);
}
-static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -178,12 +167,13 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *sk
hdev->link_policy = __le16_to_cpu(rp->policy);
}
-static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
if (!sent)
@@ -199,7 +189,7 @@ static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
clear_bit(HCI_RESET, &hdev->flags);
@@ -217,7 +207,7 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
if (!sent)
@@ -239,7 +229,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_name *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -253,7 +243,7 @@ static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
if (!sent)
@@ -279,7 +269,7 @@ static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
if (!sent)
@@ -303,7 +293,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
int old_pscan, old_iscan;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
if (!sent)
@@ -329,7 +319,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
if (hdev->discov_timeout > 0) {
int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
queue_delayed_work(hdev->workqueue, &hdev->discov_off,
- to);
+ to);
}
} else if (old_iscan)
mgmt_discoverable(hdev, 0);
@@ -350,7 +340,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -358,7 +348,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(hdev->dev_class, rp->dev_class, 3);
BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
}
static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
@@ -366,7 +356,7 @@ static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
if (!sent)
@@ -388,7 +378,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_rp_read_voice_setting *rp = (void *) skb->data;
__u16 setting;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -400,19 +390,20 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
}
-static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_write_voice_setting(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
__u16 setting;
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status)
return;
@@ -428,7 +419,7 @@ static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb
hdev->voice_setting = setting;
- BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
+ BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
@@ -438,7 +429,7 @@ static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
}
@@ -448,7 +439,7 @@ static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
__u8 status = *((__u8 *) skb->data);
void *sent;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
if (!sent)
@@ -473,7 +464,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
return 1;
if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
- hdev->lmp_subver == 0x0757)
+ hdev->lmp_subver == 0x0757)
return 1;
if (hdev->manufacturer == 15) {
@@ -486,7 +477,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
}
if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
- hdev->lmp_subver == 0x1805)
+ hdev->lmp_subver == 0x1805)
return 1;
return 0;
@@ -566,7 +557,7 @@ static void hci_setup(struct hci_dev *hdev)
if (hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
- if (hdev->features[6] & LMP_SIMPLE_PAIR) {
+ if (lmp_ssp_capable(hdev)) {
if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
u8 mode = 0x01;
hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
@@ -606,7 +597,7 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_local_version *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -617,9 +608,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
- BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
- hdev->manufacturer,
- hdev->hci_ver, hdev->hci_rev);
+ BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
+ hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
if (test_bit(HCI_INIT, &hdev->flags))
hci_setup(hdev);
@@ -646,11 +636,12 @@ static void hci_setup_link_policy(struct hci_dev *hdev)
hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
}
-static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_commands(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_commands *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -664,11 +655,12 @@ done:
hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
}
-static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cc_read_local_features(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_rp_read_local_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -713,10 +705,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0], hdev->features[1],
+ hdev->features[2], hdev->features[3],
+ hdev->features[4], hdev->features[5],
+ hdev->features[6], hdev->features[7]);
}
static void hci_set_le_support(struct hci_dev *hdev)
@@ -736,11 +728,11 @@ static void hci_set_le_support(struct hci_dev *hdev)
}
static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
goto done;
@@ -762,11 +754,11 @@ done:
}
static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -780,7 +772,7 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -798,16 +790,15 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
hdev->acl_cnt = hdev->acl_pkts;
hdev->sco_cnt = hdev->sco_pkts;
- BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
- hdev->acl_mtu, hdev->acl_pkts,
- hdev->sco_mtu, hdev->sco_pkts);
+ BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
+ hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
}
static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_read_bd_addr *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
bacpy(&hdev->bdaddr, &rp->bdaddr);
@@ -816,11 +807,11 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_read_data_block_size(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_data_block_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -832,7 +823,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev,
hdev->block_cnt = hdev->num_blocks;
BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
- hdev->block_cnt, hdev->block_len);
+ hdev->block_cnt, hdev->block_len);
hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
}
@@ -841,17 +832,17 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
}
static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -871,11 +862,11 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
}
static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
}
@@ -884,27 +875,27 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
}
static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
}
static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (!rp->status)
hdev->inq_tx_power = rp->tx_power;
@@ -916,7 +907,7 @@ static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
}
@@ -927,7 +918,7 @@ static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_cp_pin_code_reply *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -953,13 +944,13 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
if (test_bit(HCI_MGMT, &hdev->dev_flags))
mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
- rp->status);
+ rp->status);
hci_dev_unlock(hdev);
}
@@ -969,7 +960,7 @@ static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
{
struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -988,7 +979,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1000,11 +991,11 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1019,7 +1010,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1031,11 +1022,11 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
@@ -1047,11 +1038,11 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
}
static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
hci_dev_lock(hdev);
mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
@@ -1063,7 +1054,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
@@ -1076,12 +1067,12 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
}
static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
struct hci_cp_le_set_scan_enable *cp;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
if (!cp)
@@ -1136,7 +1127,7 @@ static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -1148,7 +1139,7 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
- BT_DBG("%s status 0x%x", hdev->name, rp->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
if (rp->status)
return;
@@ -1156,13 +1147,13 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
}
-static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_cp_write_le_host_supported *sent;
__u8 status = *((__u8 *) skb->data);
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
if (!sent)
@@ -1176,15 +1167,15 @@ static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
}
if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
- !test_bit(HCI_INIT, &hdev->flags))
+ !test_bit(HCI_INIT, &hdev->flags))
mgmt_le_enable_complete(hdev, sent->le, status);
hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
}
-static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
+static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (status) {
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1203,12 +1194,12 @@ static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
hci_dev_unlock(hdev);
}
-static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
+static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
{
struct hci_cp_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
if (!cp)
@@ -1218,7 +1209,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
- BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
+ BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1249,7 +1240,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1260,7 +1251,7 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1283,7 +1274,7 @@ static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
struct hci_cp_auth_requested *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1310,7 +1301,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
struct hci_cp_set_conn_encrypt *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1333,7 +1324,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
}
static int hci_outgoing_auth_needed(struct hci_dev *hdev,
- struct hci_conn *conn)
+ struct hci_conn *conn)
{
if (conn->state != BT_CONFIG || !conn->out)
return 0;
@@ -1343,15 +1334,14 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev,
/* Only request authentication for SSP connections or non-SSP
* devices with sec_level HIGH or if MITM protection is requested */
- if (!hci_conn_ssp_enabled(conn) &&
- conn->pending_sec_level != BT_SECURITY_HIGH &&
- !(conn->auth_type & 0x01))
+ if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
+ conn->pending_sec_level != BT_SECURITY_HIGH)
return 0;
return 1;
}
-static inline int hci_resolve_name(struct hci_dev *hdev,
+static int hci_resolve_name(struct hci_dev *hdev,
struct inquiry_entry *e)
{
struct hci_cp_remote_name_req cp;
@@ -1423,7 +1413,7 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
struct hci_cp_remote_name_req *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
/* If successful wait for the name req complete event before
* checking for the need to do authentication */
@@ -1462,7 +1452,7 @@ static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1489,7 +1479,7 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
struct hci_cp_read_remote_ext_features *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1517,7 +1507,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
struct hci_conn *acl, *sco;
__u16 handle;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1528,7 +1518,7 @@ static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
handle = __le16_to_cpu(cp->handle);
- BT_DBG("%s handle %d", hdev->name, handle);
+ BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
hci_dev_lock(hdev);
@@ -1551,7 +1541,7 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1578,7 +1568,7 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
struct hci_cp_exit_sniff_mode *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
if (!status)
return;
@@ -1627,7 +1617,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
struct hci_cp_le_create_conn *cp;
struct hci_conn *conn;
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
if (!cp)
@@ -1638,7 +1628,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
- conn);
+ conn);
if (status) {
if (conn && conn->state == BT_CONNECT) {
@@ -1665,16 +1655,16 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
{
- BT_DBG("%s status 0x%x", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
}
-static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
__u8 status = *((__u8 *) skb->data);
struct discovery_state *discov = &hdev->discovery;
struct inquiry_entry *e;
- BT_DBG("%s status %d", hdev->name, status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
hci_req_complete(hdev, HCI_OP_INQUIRY, status);
@@ -1708,7 +1698,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct inquiry_data data;
struct inquiry_info *info = (void *) (skb->data + 1);
@@ -1745,7 +1735,7 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_unlock(hdev);
}
-static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -1823,18 +1813,18 @@ unlock:
hci_conn_check_pending(hdev);
}
-static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_request *ev = (void *) skb->data;
int mask = hdev->link_mode;
- BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
- batostr(&ev->bdaddr), ev->link_type);
+ BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
+ ev->link_type);
mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
if ((mask & HCI_LM_ACCEPT) &&
- !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
+ !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
/* Connection accepted */
struct inquiry_entry *ie;
struct hci_conn *conn;
@@ -1845,7 +1835,8 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
if (ie)
memcpy(ie->data.dev_class, ev->dev_class, 3);
- conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
+ &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
@@ -1878,9 +1869,9 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
bacpy(&cp.bdaddr, &ev->bdaddr);
cp.pkt_type = cpu_to_le16(conn->pkt_type);
- cp.tx_bandwidth = cpu_to_le32(0x00001f40);
- cp.rx_bandwidth = cpu_to_le32(0x00001f40);
- cp.max_latency = cpu_to_le16(0xffff);
+ cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
cp.content_format = cpu_to_le16(hdev->voice_setting);
cp.retrans_effort = 0xff;
@@ -1897,12 +1888,12 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
}
}
-static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_disconn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1914,10 +1905,10 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn->state = BT_CLOSED;
if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
- (conn->type == ACL_LINK || conn->type == LE_LINK)) {
+ (conn->type == ACL_LINK || conn->type == LE_LINK)) {
if (ev->status != 0)
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
- conn->dst_type, ev->status);
+ conn->dst_type, ev->status);
else
mgmt_device_disconnected(hdev, &conn->dst, conn->type,
conn->dst_type);
@@ -1934,12 +1925,12 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_auth_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -1949,7 +1940,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
if (!ev->status) {
if (!hci_conn_ssp_enabled(conn) &&
- test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
+ test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
BT_INFO("re-auth of legacy device is not possible.");
} else {
conn->link_mode |= HCI_LM_AUTH;
@@ -1969,7 +1960,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
} else {
conn->state = BT_CONNECTED;
hci_proto_connect_cfm(conn, ev->status);
@@ -1989,7 +1980,7 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
cp.handle = ev->handle;
cp.encrypt = 0x01;
hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
- &cp);
+ &cp);
} else {
clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
hci_encrypt_cfm(conn, ev->status, 0x00);
@@ -2000,7 +1991,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_remote_name *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2039,12 +2030,12 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_encrypt_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2082,12 +2073,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2104,12 +2096,13 @@ static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
-static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_features *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2128,7 +2121,7 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff
cp.handle = ev->handle;
cp.page = 0x01;
hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
goto unlock;
}
@@ -2153,17 +2146,18 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_complete *ev = (void *) skb->data;
__u16 opcode;
@@ -2370,7 +2364,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
@@ -2384,7 +2378,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
}
}
-static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_cmd_status *ev = (void *) skb->data;
__u16 opcode;
@@ -2451,7 +2445,7 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_DBG("%s opcode 0x%x", hdev->name, opcode);
+ BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
break;
}
@@ -2465,12 +2459,12 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
}
}
-static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_role_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2491,7 +2485,7 @@ static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_unlock(hdev);
}
-static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
int i;
@@ -2502,7 +2496,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
}
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
+ ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
@@ -2557,8 +2551,7 @@ static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *s
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
int i;
@@ -2569,13 +2562,13 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
}
if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
- ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
+ ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
BT_DBG("%s bad parameters", hdev->name);
return;
}
BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
- ev->num_hndl);
+ ev->num_hndl);
for (i = 0; i < ev->num_hndl; i++) {
struct hci_comp_blocks_info *info = &ev->handles[i];
@@ -2607,12 +2600,12 @@ static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
queue_work(hdev->workqueue, &hdev->tx_work);
}
-static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_mode_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2621,7 +2614,8 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
conn->mode = ev->mode;
conn->interval = __le16_to_cpu(ev->interval);
- if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
+ if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
+ &conn->flags)) {
if (conn->mode == HCI_CM_ACTIVE)
set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
else
@@ -2635,7 +2629,7 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
hci_dev_unlock(hdev);
}
-static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pin_code_req *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2656,7 +2650,7 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
u8 secure;
@@ -2672,7 +2666,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_req *ev = (void *) skb->data;
struct hci_cp_link_key_reply cp;
@@ -2689,15 +2683,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
key = hci_find_link_key(hdev, &ev->bdaddr);
if (!key) {
BT_DBG("%s link key not found for %s", hdev->name,
- batostr(&ev->bdaddr));
+ batostr(&ev->bdaddr));
goto not_found;
}
BT_DBG("%s found key type %u for %s", hdev->name, key->type,
- batostr(&ev->bdaddr));
+ batostr(&ev->bdaddr));
if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
- key->type == HCI_LK_DEBUG_COMBINATION) {
+ key->type == HCI_LK_DEBUG_COMBINATION) {
BT_DBG("%s ignoring debug key", hdev->name);
goto not_found;
}
@@ -2705,16 +2699,15 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
if (conn) {
if (key->type == HCI_LK_UNAUTH_COMBINATION &&
- conn->auth_type != 0xff &&
- (conn->auth_type & 0x01)) {
+ conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
BT_DBG("%s ignoring unauthenticated key", hdev->name);
goto not_found;
}
if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
- conn->pending_sec_level == BT_SECURITY_HIGH) {
- BT_DBG("%s ignoring key unauthenticated for high \
- security", hdev->name);
+ conn->pending_sec_level == BT_SECURITY_HIGH) {
+ BT_DBG("%s ignoring key unauthenticated for high security",
+ hdev->name);
goto not_found;
}
@@ -2723,7 +2716,7 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
}
bacpy(&cp.bdaddr, &ev->bdaddr);
- memcpy(cp.link_key, key->val, 16);
+ memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
@@ -2736,7 +2729,7 @@ not_found:
hci_dev_unlock(hdev);
}
-static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_link_key_notify *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2760,17 +2753,17 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
- ev->key_type, pin_len);
+ ev->key_type, pin_len);
hci_dev_unlock(hdev);
}
-static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_clock_offset *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2788,12 +2781,12 @@ static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *sk
hci_dev_unlock(hdev);
}
-static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pkt_type_change *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2804,7 +2797,7 @@ static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff
hci_dev_unlock(hdev);
}
-static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -2822,7 +2815,8 @@ static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *
hci_dev_unlock(hdev);
}
-static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
int num_rsp = *((__u8 *) skb->data);
@@ -2881,7 +2875,8 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
hci_dev_unlock(hdev);
}
-static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_ext_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_ext_features *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -2929,12 +2924,13 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
@@ -2984,19 +2980,20 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
BT_DBG("%s", hdev->name);
}
-static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_sniff_subrate *ev = (void *) skb->data;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
}
-static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct inquiry_data data;
struct extended_inquiry_info *info = (void *) (skb->data + 1);
@@ -3049,7 +3046,7 @@ static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %u handle %u", hdev->name, ev->status,
+ BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
__le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -3087,7 +3084,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline u8 hci_get_auth_req(struct hci_conn *conn)
+static u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
@@ -3106,7 +3103,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
return conn->auth_type;
}
-static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_request *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3125,7 +3122,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
goto unlock;
if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
- (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
+ (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
struct hci_cp_io_capability_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -3136,14 +3133,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
- if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
- hci_find_remote_oob_data(hdev, &conn->dst))
+ if (hci_find_remote_oob_data(hdev, &conn->dst) &&
+ (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
cp.oob_data = 0x01;
else
cp.oob_data = 0x00;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
} else {
struct hci_cp_io_capability_neg_reply cp;
@@ -3151,14 +3148,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
- sizeof(cp), &cp);
+ sizeof(cp), &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_io_capa_reply *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3180,8 +3177,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_user_confirm_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_user_confirm_req *ev = (void *) skb->data;
int loc_mitm, rem_mitm, confirm_hint = 0;
@@ -3209,13 +3206,13 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
/* If no side requires MITM protection; auto-accept */
if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
+ (!rem_mitm || conn->io_capability == 0x03)) {
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
@@ -3227,7 +3224,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
BT_DBG("Auto-accept of user confirmation with %ums delay",
- hdev->auto_accept_delay);
+ hdev->auto_accept_delay);
if (hdev->auto_accept_delay > 0) {
int delay = msecs_to_jiffies(hdev->auto_accept_delay);
@@ -3236,7 +3233,7 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
- sizeof(ev->bdaddr), &ev->bdaddr);
+ sizeof(ev->bdaddr), &ev->bdaddr);
goto unlock;
}
@@ -3248,8 +3245,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_user_passkey_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_user_passkey_req *ev = (void *) skb->data;
@@ -3263,7 +3260,8 @@ static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
struct hci_conn *conn;
@@ -3291,7 +3289,8 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_remote_host_features_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_host_features *ev = (void *) skb->data;
struct inquiry_entry *ie;
@@ -3307,8 +3306,8 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
hci_dev_unlock(hdev);
}
-static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
+ struct sk_buff *skb)
{
struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
struct oob_data *data;
@@ -3329,28 +3328,41 @@ static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
- &cp);
+ &cp);
} else {
struct hci_cp_remote_oob_data_neg_reply cp;
bacpy(&cp.bdaddr, &ev->bdaddr);
hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
- &cp);
+ &cp);
}
unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_conn_complete *ev = (void *) skb->data;
struct hci_conn *conn;
- BT_DBG("%s status %d", hdev->name, ev->status);
+ BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
hci_dev_lock(hdev);
+ if (ev->status) {
+ conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
+ if (!conn)
+ goto unlock;
+
+ mgmt_connect_failed(hdev, &conn->dst, conn->type,
+ conn->dst_type, ev->status);
+ hci_proto_connect_cfm(conn, ev->status);
+ conn->state = BT_CLOSED;
+ hci_conn_del(conn);
+ goto unlock;
+ }
+
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
if (!conn) {
conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
@@ -3363,15 +3375,6 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
conn->dst_type = ev->bdaddr_type;
}
- if (ev->status) {
- mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
- conn->dst_type, ev->status);
- hci_proto_connect_cfm(conn, ev->status);
- conn->state = BT_CLOSED;
- hci_conn_del(conn);
- goto unlock;
- }
-
if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
conn->dst_type, 0, NULL, 0, NULL);
@@ -3389,8 +3392,7 @@ unlock:
hci_dev_unlock(hdev);
}
-static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 num_reports = skb->data[0];
void *ptr = &skb->data[1];
@@ -3411,8 +3413,7 @@ static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
hci_dev_unlock(hdev);
}
-static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
- struct sk_buff *skb)
+static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_ltk_req *ev = (void *) skb->data;
struct hci_cp_le_ltk_reply cp;
@@ -3420,7 +3421,7 @@ static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
struct hci_conn *conn;
struct smp_ltk *ltk;
- BT_DBG("%s handle %d", hdev->name, __le16_to_cpu(ev->handle));
+ BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
hci_dev_lock(hdev);
@@ -3455,7 +3456,7 @@ not_found:
hci_dev_unlock(hdev);
}
-static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
+static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -3644,7 +3645,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
break;
default:
- BT_DBG("%s event 0x%x", hdev->name, event);
+ BT_DBG("%s event 0x%2.2x", hdev->name, event);
break;
}
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 5914623f426a..a7f04de03d79 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -24,25 +24,7 @@
/* Bluetooth HCI sockets. */
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/compat.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -113,11 +95,12 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
flt = &hci_pi(sk)->filter;
if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ?
- 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
+ 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS),
+ &flt->type_mask))
continue;
if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) {
- register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
+ int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
if (!hci_test_bit(evt, &flt->event_mask))
continue;
@@ -240,7 +223,8 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
struct hci_mon_hdr *hdr;
/* Create a private copy with headroom */
- skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC);
+ skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE,
+ GFP_ATOMIC);
if (!skb_copy)
continue;
@@ -495,7 +479,8 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
}
/* Ioctls that require bound socket */
-static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
+static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
+ unsigned long arg)
{
struct hci_dev *hdev = hci_pi(sk)->hdev;
@@ -540,7 +525,8 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
}
}
-static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *) arg;
@@ -601,7 +587,8 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long a
}
}
-static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
{
struct sockaddr_hci haddr;
struct sock *sk = sock->sk;
@@ -690,7 +677,8 @@ done:
return err;
}
-static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
+static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *addr_len, int peer)
{
struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
struct sock *sk = sock->sk;
@@ -711,13 +699,15 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *add
return 0;
}
-static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
+static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
+ struct sk_buff *skb)
{
__u32 mask = hci_pi(sk)->cmsg_mask;
if (mask & HCI_CMSG_DIR) {
int incoming = bt_cb(skb)->incoming;
- put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming);
+ put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
+ &incoming);
}
if (mask & HCI_CMSG_TSTAMP) {
@@ -747,7 +737,7 @@ static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_
}
static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *msg, size_t len, int flags)
+ struct msghdr *msg, size_t len, int flags)
{
int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
@@ -857,8 +847,9 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
u16 ocf = hci_opcode_ocf(opcode);
if (((ogf > HCI_SFLT_MAX_OGF) ||
- !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) &&
- !capable(CAP_NET_RAW)) {
+ !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
+ &hci_sec_filter.ocf_mask[ogf])) &&
+ !capable(CAP_NET_RAW)) {
err = -EPERM;
goto drop;
}
@@ -891,7 +882,8 @@ drop:
goto done;
}
-static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len)
+static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, unsigned int len)
{
struct hci_ufilter uf = { .opcode = 0 };
struct sock *sk = sock->sk;
@@ -973,7 +965,8 @@ done:
return err;
}
-static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
+static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct hci_ufilter uf;
struct sock *sk = sock->sk;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 937f3187eafa..a20e61c3653d 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -1,10 +1,6 @@
/* Bluetooth HCI driver model support. */
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
#include <linux/module.h>
#include <net/bluetooth/bluetooth.h>
@@ -31,27 +27,30 @@ static inline char *link_typetostr(int type)
}
}
-static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", link_typetostr(conn->type));
}
-static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "%s\n", batostr(&conn->dst));
}
-static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_link_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_conn *conn = to_hci_conn(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0], conn->features[1],
- conn->features[2], conn->features[3],
- conn->features[4], conn->features[5],
- conn->features[6], conn->features[7]);
+ conn->features[0], conn->features[1],
+ conn->features[2], conn->features[3],
+ conn->features[4], conn->features[5],
+ conn->features[6], conn->features[7]);
}
#define LINK_ATTR(_name, _mode, _show, _store) \
@@ -185,19 +184,22 @@ static inline char *host_typetostr(int type)
}
}
-static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_bus(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_bustostr(hdev->bus));
}
-static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_type(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type));
}
-static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
char name[HCI_MAX_NAME_LENGTH + 1];
@@ -210,55 +212,64 @@ static ssize_t show_name(struct device *dev, struct device_attribute *attr, char
return sprintf(buf, "%s\n", name);
}
-static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_class(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
- return sprintf(buf, "0x%.2x%.2x%.2x\n",
- hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
+ return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
+ hdev->dev_class[1], hdev->dev_class[0]);
}
-static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_address(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%s\n", batostr(&hdev->bdaddr));
}
-static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_features(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- hdev->features[0], hdev->features[1],
- hdev->features[2], hdev->features[3],
- hdev->features[4], hdev->features[5],
- hdev->features[6], hdev->features[7]);
+ hdev->features[0], hdev->features[1],
+ hdev->features[2], hdev->features[3],
+ hdev->features[4], hdev->features[5],
+ hdev->features[6], hdev->features[7]);
}
-static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_manufacturer(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->manufacturer);
}
-static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_version(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_ver);
}
-static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_hci_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->hci_rev);
}
-static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_idle_timeout(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->idle_timeout);
}
-static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_idle_timeout(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
unsigned int val;
@@ -276,13 +287,16 @@ static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *a
return count;
}
-static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_max_interval);
}
-static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_max_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@@ -300,13 +314,16 @@ static ssize_t store_sniff_max_interval(struct device *dev, struct device_attrib
return count;
}
-static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = to_hci_dev(dev);
return sprintf(buf, "%d\n", hdev->sniff_min_interval);
}
-static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_sniff_min_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct hci_dev *hdev = to_hci_dev(dev);
u16 val;
@@ -335,11 +352,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL);
static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL);
static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR,
- show_idle_timeout, store_idle_timeout);
+ show_idle_timeout, store_idle_timeout);
static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
- show_sniff_max_interval, store_sniff_max_interval);
+ show_sniff_max_interval, store_sniff_max_interval);
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
- show_sniff_min_interval, store_sniff_min_interval);
+ show_sniff_min_interval, store_sniff_min_interval);
static struct attribute *bt_host_attrs[] = {
&dev_attr_bus.attr,
@@ -455,8 +472,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid)
memcpy(&data5, &uuid[14], 2);
seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n",
- ntohl(data0), ntohs(data1), ntohs(data2),
- ntohs(data3), ntohl(data4), ntohs(data5));
+ ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3),
+ ntohl(data4), ntohs(data5));
}
static int uuids_show(struct seq_file *f, void *p)
@@ -513,7 +530,7 @@ static int auto_accept_delay_get(void *data, u64 *val)
}
DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
- auto_accept_delay_set, "%llu\n");
+ auto_accept_delay_set, "%llu\n");
void hci_init_sysfs(struct hci_dev *hdev)
{
@@ -547,15 +564,15 @@ int hci_add_sysfs(struct hci_dev *hdev)
return 0;
debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
- hdev, &inquiry_cache_fops);
+ hdev, &inquiry_cache_fops);
debugfs_create_file("blacklist", 0444, hdev->debugfs,
- hdev, &blacklist_fops);
+ hdev, &blacklist_fops);
debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
- &auto_accept_delay_fops);
+ &auto_accept_delay_fops);
return 0;
}
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 2c20d765b394..ccd985da6518 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -21,27 +21,8 @@
*/
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/freezer.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <net/sock.h>
-
-#include <linux/input.h>
-#include <linux/hid.h>
#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
@@ -244,7 +225,8 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
}
static int __hidp_send_ctrl_message(struct hidp_session *session,
- unsigned char hdr, unsigned char *data, int size)
+ unsigned char hdr, unsigned char *data,
+ int size)
{
struct sk_buff *skb;
@@ -268,7 +250,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session,
return 0;
}
-static inline int hidp_send_ctrl_message(struct hidp_session *session,
+static int hidp_send_ctrl_message(struct hidp_session *session,
unsigned char hdr, unsigned char *data, int size)
{
int err;
@@ -471,7 +453,7 @@ static void hidp_set_timer(struct hidp_session *session)
mod_timer(&session->timer, jiffies + HZ * session->idle_to);
}
-static inline void hidp_del_timer(struct hidp_session *session)
+static void hidp_del_timer(struct hidp_session *session)
{
if (session->idle_to > 0)
del_timer(&session->timer);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 73a32d705c1f..18b3f6892a36 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -20,22 +20,8 @@
SOFTWARE IS DISCLAIMED.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/skbuff.h>
-#include <linux/socket.h>
-#include <linux/ioctl.h>
+#include <linux/export.h>
#include <linux/file.h>
-#include <linux/init.h>
-#include <linux/compat.h>
-#include <linux/gfp.h>
-#include <net/sock.h>
#include "hidp.h"
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 4554e80d16a3..a8964db04bfb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -30,32 +30,14 @@
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/capability.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
#include <linux/crc16.h>
-#include <net/sock.h>
-
-#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/smp.h>
+#include <net/bluetooth/a2mp.h>
bool disable_ertm;
@@ -73,6 +55,9 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
static void l2cap_send_disconn_req(struct l2cap_conn *conn,
struct l2cap_chan *chan, int err);
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event);
+
/* ---- L2CAP channels ---- */
static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
@@ -196,7 +181,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state)
state_to_string(state));
chan->state = state;
- chan->ops->state_change(chan->data, state);
+ chan->ops->state_change(chan, state);
}
static void l2cap_state_change(struct l2cap_chan *chan, int state)
@@ -224,6 +209,37 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
release_sock(sk);
}
+static void __set_retrans_timer(struct l2cap_chan *chan)
+{
+ if (!delayed_work_pending(&chan->monitor_timer) &&
+ chan->retrans_timeout) {
+ l2cap_set_timer(chan, &chan->retrans_timer,
+ msecs_to_jiffies(chan->retrans_timeout));
+ }
+}
+
+static void __set_monitor_timer(struct l2cap_chan *chan)
+{
+ __clear_retrans_timer(chan);
+ if (chan->monitor_timeout) {
+ l2cap_set_timer(chan, &chan->monitor_timer,
+ msecs_to_jiffies(chan->monitor_timeout));
+ }
+}
+
+static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
+ u16 seq)
+{
+ struct sk_buff *skb;
+
+ skb_queue_walk(head, skb) {
+ if (bt_cb(skb)->control.txseq == seq)
+ return skb;
+ }
+
+ return NULL;
+}
+
/* ---- L2CAP sequence number lists ---- */
/* For ERTM, ordered lists of sequence numbers must be tracked for
@@ -366,7 +382,7 @@ static void l2cap_chan_timeout(struct work_struct *work)
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
mutex_unlock(&conn->chan_lock);
l2cap_chan_put(chan);
@@ -392,6 +408,9 @@ struct l2cap_chan *l2cap_chan_create(void)
atomic_set(&chan->refcnt, 1);
+ /* This flag is cleared in l2cap_chan_ready() */
+ set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
+
BT_DBG("chan %p", chan);
return chan;
@@ -412,6 +431,7 @@ void l2cap_chan_set_defaults(struct l2cap_chan *chan)
chan->max_tx = L2CAP_DEFAULT_MAX_TX;
chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
+ chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
chan->sec_level = BT_SECURITY_LOW;
set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
@@ -430,7 +450,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
case L2CAP_CHAN_CONN_ORIENTED:
if (conn->hcon->type == LE_LINK) {
/* LE connection */
- chan->omtu = L2CAP_LE_DEFAULT_MTU;
+ chan->omtu = L2CAP_DEFAULT_MTU;
chan->scid = L2CAP_CID_LE_DATA;
chan->dcid = L2CAP_CID_LE_DATA;
} else {
@@ -447,6 +467,13 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
chan->omtu = L2CAP_DEFAULT_MTU;
break;
+ case L2CAP_CHAN_CONN_FIX_A2MP:
+ chan->scid = L2CAP_CID_A2MP;
+ chan->dcid = L2CAP_CID_A2MP;
+ chan->omtu = L2CAP_A2MP_DEFAULT_MTU;
+ chan->imtu = L2CAP_A2MP_DEFAULT_MTU;
+ break;
+
default:
/* Raw socket can send/recv signalling messages only */
chan->scid = L2CAP_CID_SIGNALING;
@@ -466,18 +493,16 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
list_add(&chan->list, &conn->chan_l);
}
-static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
+void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
{
mutex_lock(&conn->chan_lock);
__l2cap_chan_add(conn, chan);
mutex_unlock(&conn->chan_lock);
}
-static void l2cap_chan_del(struct l2cap_chan *chan, int err)
+void l2cap_chan_del(struct l2cap_chan *chan, int err)
{
- struct sock *sk = chan->sk;
struct l2cap_conn *conn = chan->conn;
- struct sock *parent = bt_sk(sk)->parent;
__clear_chan_timer(chan);
@@ -490,34 +515,22 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_chan_put(chan);
chan->conn = NULL;
- hci_conn_put(conn->hcon);
- }
-
- lock_sock(sk);
-
- __l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
-
- if (err)
- __l2cap_chan_set_err(chan, err);
- if (parent) {
- bt_accept_unlink(sk);
- parent->sk_data_ready(parent, 0);
- } else
- sk->sk_state_change(sk);
+ if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP)
+ hci_conn_put(conn->hcon);
+ }
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, err);
- if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
- test_bit(CONF_INPUT_DONE, &chan->conf_state)))
+ if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
return;
- skb_queue_purge(&chan->tx_q);
-
- if (chan->mode == L2CAP_MODE_ERTM) {
- struct srej_list *l, *tmp;
+ switch(chan->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
__clear_retrans_timer(chan);
__clear_monitor_timer(chan);
__clear_ack_timer(chan);
@@ -526,30 +539,15 @@ static void l2cap_chan_del(struct l2cap_chan *chan, int err)
l2cap_seq_list_free(&chan->srej_list);
l2cap_seq_list_free(&chan->retrans_list);
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- list_del(&l->list);
- kfree(l);
- }
- }
-}
-static void l2cap_chan_cleanup_listen(struct sock *parent)
-{
- struct sock *sk;
-
- BT_DBG("parent %p", parent);
-
- /* Close not yet accepted channels */
- while ((sk = bt_accept_dequeue(parent, NULL))) {
- struct l2cap_chan *chan = l2cap_pi(sk)->chan;
-
- l2cap_chan_lock(chan);
- __clear_chan_timer(chan);
- l2cap_chan_close(chan, ECONNRESET);
- l2cap_chan_unlock(chan);
+ /* fall through */
- chan->ops->close(chan->data);
+ case L2CAP_MODE_STREAMING:
+ skb_queue_purge(&chan->tx_q);
+ break;
}
+
+ return;
}
void l2cap_chan_close(struct l2cap_chan *chan, int reason)
@@ -562,12 +560,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
switch (chan->state) {
case BT_LISTEN:
- lock_sock(sk);
- l2cap_chan_cleanup_listen(sk);
-
- __l2cap_state_change(chan, BT_CLOSED);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, 0);
break;
case BT_CONNECTED:
@@ -595,7 +589,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
rsp.result = cpu_to_le16(result);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
sizeof(rsp), &rsp);
}
@@ -609,9 +603,8 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
break;
default:
- lock_sock(sk);
- sock_set_flag(sk, SOCK_ZAPPED);
- release_sock(sk);
+ if (chan->ops->teardown)
+ chan->ops->teardown(chan, 0);
break;
}
}
@@ -627,7 +620,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
default:
return HCI_AT_NO_BONDING;
}
- } else if (chan->psm == cpu_to_le16(0x0001)) {
+ } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) {
if (chan->sec_level == BT_SECURITY_LOW)
chan->sec_level = BT_SECURITY_SDP;
@@ -773,9 +766,11 @@ static inline void __unpack_control(struct l2cap_chan *chan,
if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
__unpack_extended_control(get_unaligned_le32(skb->data),
&bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
} else {
__unpack_enhanced_control(get_unaligned_le16(skb->data),
&bt_cb(skb)->control);
+ skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
}
}
@@ -830,66 +825,102 @@ static inline void __pack_control(struct l2cap_chan *chan,
}
}
-static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
+static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
{
- struct sk_buff *skb;
- struct l2cap_hdr *lh;
- struct l2cap_conn *conn = chan->conn;
- int count, hlen;
-
- if (chan->state != BT_CONNECTED)
- return;
-
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
+ return L2CAP_EXT_HDR_SIZE;
else
- hlen = L2CAP_ENH_HDR_SIZE;
+ return L2CAP_ENH_HDR_SIZE;
+}
+
+static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
+ u32 control)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ int hlen = __ertm_hdr_size(chan);
if (chan->fcs == L2CAP_FCS_CRC16)
hlen += L2CAP_FCS_SIZE;
- BT_DBG("chan %p, control 0x%8.8x", chan, control);
-
- count = min_t(unsigned int, conn->mtu, hlen);
+ skb = bt_skb_alloc(hlen, GFP_KERNEL);
- control |= __set_sframe(chan);
-
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
-
- if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
- control |= __set_ctrl_poll(chan);
-
- skb = bt_skb_alloc(count, GFP_ATOMIC);
if (!skb)
- return;
+ return ERR_PTR(-ENOMEM);
lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
lh->cid = cpu_to_le16(chan->dcid);
- __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (chan->fcs == L2CAP_FCS_CRC16) {
- u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
skb->priority = HCI_PRIO_MAX;
- l2cap_do_send(chan, skb);
+ return skb;
}
-static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
+static void l2cap_send_sframe(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
+ struct sk_buff *skb;
+ u32 control_field;
+
+ BT_DBG("chan %p, control %p", chan, control);
+
+ if (!control->sframe)
+ return;
+
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
+ !control->poll)
+ control->final = 1;
+
+ if (control->super == L2CAP_SUPER_RR)
+ clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ else if (control->super == L2CAP_SUPER_RNR)
set_bit(CONN_RNR_SENT, &chan->conn_state);
- } else
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- control |= __set_reqseq(chan, chan->buffer_seq);
+ if (control->super != L2CAP_SUPER_SREJ) {
+ chan->last_acked_seq = control->reqseq;
+ __clear_ack_timer(chan);
+ }
- l2cap_send_sframe(chan, control);
+ BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
+ control->final, control->poll, control->super);
+
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ control_field = __pack_extended_control(control);
+ else
+ control_field = __pack_enhanced_control(control);
+
+ skb = l2cap_create_sframe_pdu(chan, control_field);
+ if (!IS_ERR(skb))
+ l2cap_do_send(chan, skb);
+}
+
+static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p, poll %d", chan, poll);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.poll = poll;
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ control.super = L2CAP_SUPER_RNR;
+ else
+ control.super = L2CAP_SUPER_RR;
+
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
}
static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
@@ -914,25 +945,13 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan)
static void l2cap_chan_ready(struct l2cap_chan *chan)
{
- struct sock *sk = chan->sk;
- struct sock *parent;
-
- lock_sock(sk);
-
- parent = bt_sk(sk)->parent;
-
- BT_DBG("sk %p, parent %p", sk, parent);
-
+ /* This clears all conf flags, including CONF_NOT_COMPLETE */
chan->conf_state = 0;
__clear_chan_timer(chan);
- __l2cap_state_change(chan, BT_CONNECTED);
- sk->sk_state_change(sk);
-
- if (parent)
- parent->sk_data_ready(parent, 0);
+ chan->state = BT_CONNECTED;
- release_sock(sk);
+ chan->ops->ready(chan);
}
static void l2cap_do_start(struct l2cap_chan *chan)
@@ -953,7 +972,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
l2cap_send_conn_req(chan);
} else {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
@@ -995,6 +1014,11 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
__clear_ack_timer(chan);
}
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ __l2cap_state_change(chan, BT_DISCONN);
+ return;
+ }
+
req.dcid = cpu_to_le16(chan->dcid);
req.scid = cpu_to_le16(chan->scid);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
@@ -1053,20 +1077,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
if (test_bit(BT_SK_DEFER_SETUP,
&bt_sk(sk)->flags)) {
struct sock *parent = bt_sk(sk)->parent;
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
if (parent)
parent->sk_data_ready(parent, 0);
} else {
__l2cap_state_change(chan, BT_CONFIG);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
}
release_sock(sk);
} else {
- rsp.result = cpu_to_le16(L2CAP_CR_PEND);
- rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
}
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
@@ -1150,13 +1174,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
lock_sock(parent);
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
- goto clean;
- }
-
- chan = pchan->ops->new_connection(pchan->data);
+ chan = pchan->ops->new_connection(pchan);
if (!chan)
goto clean;
@@ -1171,10 +1189,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
l2cap_chan_add(conn, chan);
- __set_chan_timer(chan, sk->sk_sndtimeo);
-
- __l2cap_state_change(chan, BT_CONNECTED);
- parent->sk_data_ready(parent, 0);
+ l2cap_chan_ready(chan);
clean:
release_sock(parent);
@@ -1198,6 +1213,11 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
l2cap_chan_lock(chan);
+ if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) {
+ l2cap_chan_unlock(chan);
+ continue;
+ }
+
if (conn->hcon->type == LE_LINK) {
if (smp_conn_security(conn, chan->sec_level))
l2cap_chan_ready(chan);
@@ -1270,7 +1290,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
}
@@ -1444,21 +1464,17 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
goto done;
}
- lock_sock(sk);
-
- switch (sk->sk_state) {
+ switch (chan->state) {
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
/* Already connecting */
err = 0;
- release_sock(sk);
goto done;
case BT_CONNECTED:
/* Already connected */
err = -EISCONN;
- release_sock(sk);
goto done;
case BT_OPEN:
@@ -1468,13 +1484,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
default:
err = -EBADFD;
- release_sock(sk);
goto done;
}
/* Set destination address and psm */
+ lock_sock(sk);
bacpy(&bt_sk(sk)->dst, dst);
-
release_sock(sk);
chan->psm = psm;
@@ -1576,23 +1591,20 @@ int __l2cap_wait_ack(struct sock *sk)
static void l2cap_monitor_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- monitor_timer.work);
+ monitor_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- if (chan->retry_count >= chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
+ if (!chan->conn) {
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
return;
}
- chan->retry_count++;
- __set_monitor_timer(chan);
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
@@ -1600,234 +1612,293 @@ static void l2cap_monitor_timeout(struct work_struct *work)
static void l2cap_retrans_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- retrans_timer.work);
+ retrans_timer.work);
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- chan->retry_count = 1;
- __set_monitor_timer(chan);
-
- set_bit(CONN_WAIT_F, &chan->conn_state);
-
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
+ if (!chan->conn) {
+ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ return;
+ }
+ l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
-static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
+static void l2cap_streaming_send(struct l2cap_chan *chan,
+ struct sk_buff_head *skbs)
{
struct sk_buff *skb;
+ struct l2cap_ctrl *control;
- while ((skb = skb_peek(&chan->tx_q)) &&
- chan->unacked_frames) {
- if (bt_cb(skb)->control.txseq == chan->expected_ack_seq)
- break;
+ BT_DBG("chan %p, skbs %p", chan, skbs);
- skb = skb_dequeue(&chan->tx_q);
- kfree_skb(skb);
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
- chan->unacked_frames--;
- }
+ while (!skb_queue_empty(&chan->tx_q)) {
- if (!chan->unacked_frames)
- __clear_retrans_timer(chan);
-}
+ skb = skb_dequeue(&chan->tx_q);
-static void l2cap_streaming_send(struct l2cap_chan *chan)
-{
- struct sk_buff *skb;
- u32 control;
- u16 fcs;
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
+
+ control->reqseq = 0;
+ control->txseq = chan->next_tx_seq;
- while ((skb = skb_dequeue(&chan->tx_q))) {
- control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
- control |= __set_txseq(chan, chan->next_tx_seq);
- control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
- __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
+ __pack_control(chan, control, skb);
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- skb->data + skb->len - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
}
l2cap_do_send(chan, skb);
+ BT_DBG("Sent txseq %u", control->txseq);
+
chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->frames_sent++;
}
}
-static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
+static int l2cap_ertm_send(struct l2cap_chan *chan)
{
struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
+ struct l2cap_ctrl *control;
+ int sent = 0;
- skb = skb_peek(&chan->tx_q);
- if (!skb)
- return;
+ BT_DBG("chan %p", chan);
- while (bt_cb(skb)->control.txseq != tx_seq) {
- if (skb_queue_is_last(&chan->tx_q, skb))
- return;
+ if (chan->state != BT_CONNECTED)
+ return -ENOTCONN;
- skb = skb_queue_next(&chan->tx_q, skb);
- }
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
+ return 0;
- if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
- chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- return;
- }
+ while (chan->tx_send_head &&
+ chan->unacked_frames < chan->remote_tx_win &&
+ chan->tx_state == L2CAP_TX_STATE_XMIT) {
+
+ skb = chan->tx_send_head;
- tx_skb = skb_clone(skb, GFP_ATOMIC);
- bt_cb(skb)->control.retries++;
+ bt_cb(skb)->control.retries = 1;
+ control = &bt_cb(skb)->control;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
+ if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
+ control->final = 1;
+
+ control->reqseq = chan->buffer_seq;
+ chan->last_acked_seq = chan->buffer_seq;
+ control->txseq = chan->next_tx_seq;
- if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
+ __pack_control(chan, control, skb);
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, tx_seq);
+ if (chan->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
+ put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
+ }
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
+ /* Clone after data has been modified. Data is assumed to be
+ read-only (for locking purposes) on cloned sk_buffs.
+ */
+ tx_skb = skb_clone(skb, GFP_KERNEL);
- if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)tx_skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs,
- tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
+ if (!tx_skb)
+ break;
+
+ __set_retrans_timer(chan);
+
+ chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
+ chan->unacked_frames++;
+ chan->frames_sent++;
+ sent++;
+
+ if (skb_queue_is_last(&chan->tx_q, skb))
+ chan->tx_send_head = NULL;
+ else
+ chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+
+ l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent txseq %u", control->txseq);
}
- l2cap_do_send(chan, tx_skb);
+ BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
+ chan->unacked_frames, skb_queue_len(&chan->tx_q));
+
+ return sent;
}
-static int l2cap_ertm_send(struct l2cap_chan *chan)
+static void l2cap_ertm_resend(struct l2cap_chan *chan)
{
- struct sk_buff *skb, *tx_skb;
- u16 fcs;
- u32 control;
- int nsent = 0;
+ struct l2cap_ctrl control;
+ struct sk_buff *skb;
+ struct sk_buff *tx_skb;
+ u16 seq;
- if (chan->state != BT_CONNECTED)
- return -ENOTCONN;
+ BT_DBG("chan %p", chan);
if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- return 0;
+ return;
- while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
+ while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
+ seq = l2cap_seq_list_pop(&chan->retrans_list);
- if (bt_cb(skb)->control.retries == chan->remote_max_tx &&
- chan->remote_max_tx) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
- break;
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
+ if (!skb) {
+ BT_DBG("Error: Can't retransmit seq %d, frame missing",
+ seq);
+ continue;
}
- tx_skb = skb_clone(skb, GFP_ATOMIC);
-
bt_cb(skb)->control.retries++;
+ control = bt_cb(skb)->control;
- control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
- control &= __get_sar_mask(chan);
+ if (chan->max_tx != 0 &&
+ bt_cb(skb)->control.retries > chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+ control.reqseq = chan->buffer_seq;
if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
- control |= __set_ctrl_final(chan);
+ control.final = 1;
+ else
+ control.final = 0;
- control |= __set_reqseq(chan, chan->buffer_seq);
- control |= __set_txseq(chan, chan->next_tx_seq);
- control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
+ if (skb_cloned(skb)) {
+ /* Cloned sk_buffs are read-only, so we need a
+ * writeable copy
+ */
+ tx_skb = skb_copy(skb, GFP_ATOMIC);
+ } else {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ }
- __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
+ if (!tx_skb) {
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ }
+
+ /* Update skb contents */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
+ put_unaligned_le32(__pack_extended_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ } else {
+ put_unaligned_le16(__pack_enhanced_control(&control),
+ tx_skb->data + L2CAP_HDR_SIZE);
+ }
if (chan->fcs == L2CAP_FCS_CRC16) {
- fcs = crc16(0, (u8 *)skb->data,
- tx_skb->len - L2CAP_FCS_SIZE);
- put_unaligned_le16(fcs, skb->data +
- tx_skb->len - L2CAP_FCS_SIZE);
+ u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len);
+ put_unaligned_le16(fcs, skb_put(tx_skb,
+ L2CAP_FCS_SIZE));
}
l2cap_do_send(chan, tx_skb);
- __set_retrans_timer(chan);
-
- bt_cb(skb)->control.txseq = chan->next_tx_seq;
+ BT_DBG("Resent txseq %d", control.txseq);
- chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
-
- if (bt_cb(skb)->control.retries == 1) {
- chan->unacked_frames++;
-
- if (!nsent++)
- __clear_ack_timer(chan);
- }
-
- chan->frames_sent++;
-
- if (skb_queue_is_last(&chan->tx_q, skb))
- chan->tx_send_head = NULL;
- else
- chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
+ chan->last_acked_seq = chan->buffer_seq;
}
-
- return nsent;
}
-static int l2cap_retransmit_frames(struct l2cap_chan *chan)
+static void l2cap_retransmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- int ret;
+ BT_DBG("chan %p, control %p", chan, control);
- if (!skb_queue_empty(&chan->tx_q))
- chan->tx_send_head = chan->tx_q.next;
-
- chan->next_tx_seq = chan->expected_ack_seq;
- ret = l2cap_ertm_send(chan);
- return ret;
+ l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
+ l2cap_ertm_resend(chan);
}
-static void __l2cap_send_ack(struct l2cap_chan *chan)
+static void l2cap_retransmit_all(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- u32 control = 0;
+ struct sk_buff *skb;
- control |= __set_reqseq(chan, chan->buffer_seq);
+ BT_DBG("chan %p, control %p", chan, control);
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
- l2cap_send_sframe(chan, control);
- return;
- }
+ if (control->poll)
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+
+ l2cap_seq_list_clear(&chan->retrans_list);
- if (l2cap_ertm_send(chan) > 0)
+ if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
return;
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
+ if (chan->unacked_frames) {
+ skb_queue_walk(&chan->tx_q, skb) {
+ if (bt_cb(skb)->control.txseq == control->reqseq ||
+ skb == chan->tx_send_head)
+ break;
+ }
+
+ skb_queue_walk_from(&chan->tx_q, skb) {
+ if (skb == chan->tx_send_head)
+ break;
+
+ l2cap_seq_list_append(&chan->retrans_list,
+ bt_cb(skb)->control.txseq);
+ }
+
+ l2cap_ertm_resend(chan);
+ }
}
static void l2cap_send_ack(struct l2cap_chan *chan)
{
- __clear_ack_timer(chan);
- __l2cap_send_ack(chan);
-}
+ struct l2cap_ctrl control;
+ u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
+ int threshold;
-static void l2cap_send_srejtail(struct l2cap_chan *chan)
-{
- struct srej_list *tail;
- u32 control;
+ BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
+ chan, chan->last_acked_seq, chan->buffer_seq);
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_ctrl_final(chan);
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
- tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
- control |= __set_reqseq(chan, tail->tx_seq);
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
+ chan->rx_state == L2CAP_RX_STATE_RECV) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RNR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ } else {
+ if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
+ l2cap_ertm_send(chan);
+ /* If any i-frames were sent, they included an ack */
+ if (chan->buffer_seq == chan->last_acked_seq)
+ frames_to_ack = 0;
+ }
- l2cap_send_sframe(chan, control);
+ /* Ack now if the window is 3/4ths full.
+ * Calculate without mul or div
+ */
+ threshold = chan->ack_win;
+ threshold += threshold << 1;
+ threshold >>= 2;
+
+ BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
+ threshold);
+
+ if (frames_to_ack >= threshold) {
+ __clear_ack_timer(chan);
+ control.super = L2CAP_SUPER_RR;
+ control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &control);
+ frames_to_ack = 0;
+ }
+
+ if (frames_to_ack)
+ __set_ack_timer(chan);
+ }
}
static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
@@ -1876,15 +1947,15 @@ static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
+ BT_DBG("chan %p len %zu priority %u", chan, len, priority);
count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -1910,15 +1981,15 @@ static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u32 priority)
+ struct msghdr *msg, size_t len,
+ u32 priority)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d", chan, (int)len);
+ BT_DBG("chan %p len %zu", chan, len);
count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
@@ -1943,23 +2014,20 @@ static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
}
static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
- struct msghdr *msg, size_t len,
- u16 sdulen)
+ struct msghdr *msg, size_t len,
+ u16 sdulen)
{
struct l2cap_conn *conn = chan->conn;
struct sk_buff *skb;
int err, count, hlen;
struct l2cap_hdr *lh;
- BT_DBG("chan %p len %d", chan, (int)len);
+ BT_DBG("chan %p len %zu", chan, len);
if (!conn)
return ERR_PTR(-ENOTCONN);
- if (test_bit(FLAG_EXT_CTRL, &chan->flags))
- hlen = L2CAP_EXT_HDR_SIZE;
- else
- hlen = L2CAP_ENH_HDR_SIZE;
+ hlen = __ertm_hdr_size(chan);
if (sdulen)
hlen += L2CAP_SDULEN_SIZE;
@@ -1979,7 +2047,11 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
lh->cid = cpu_to_le16(chan->dcid);
lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
- __put_control(chan, 0, skb_put(skb, __ctrl_size(chan)));
+ /* Control header is populated later */
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
+ else
+ put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
if (sdulen)
put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
@@ -1990,9 +2062,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
return ERR_PTR(err);
}
- if (chan->fcs == L2CAP_FCS_CRC16)
- put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
-
+ bt_cb(skb)->control.fcs = chan->fcs;
bt_cb(skb)->control.retries = 0;
return skb;
}
@@ -2004,10 +2074,9 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
struct sk_buff *skb;
u16 sdu_len;
size_t pdu_len;
- int err = 0;
u8 sar;
- BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len);
+ BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
/* It is critical that ERTM PDUs fit in a single HCI fragment,
* so fragmented skbs are not used. The HCI layer's handling
@@ -2020,7 +2089,10 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
/* Adjust for largest possible L2CAP overhead. */
- pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE;
+ if (chan->fcs)
+ pdu_len -= L2CAP_FCS_SIZE;
+
+ pdu_len -= __ertm_hdr_size(chan);
/* Remote device may have requested smaller PDUs */
pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
@@ -2060,7 +2132,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan,
}
}
- return err;
+ return 0;
}
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
@@ -2122,17 +2194,12 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
if (err)
break;
- if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL)
- chan->tx_send_head = seg_queue.next;
- skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
-
if (chan->mode == L2CAP_MODE_ERTM)
- err = l2cap_ertm_send(chan);
+ l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
else
- l2cap_streaming_send(chan);
+ l2cap_streaming_send(chan, &seg_queue);
- if (err >= 0)
- err = len;
+ err = len;
/* If the skbs were not queued for sending, they'll still be in
* seg_queue and need to be purged.
@@ -2148,6 +2215,296 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
return err;
}
+static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ for (seq = chan->expected_tx_seq; seq != txseq;
+ seq = __next_seq(chan, seq)) {
+ if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ }
+ }
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+}
+
+static void l2cap_send_srej_tail(struct l2cap_chan *chan)
+{
+ struct l2cap_ctrl control;
+
+ BT_DBG("chan %p", chan);
+
+ if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
+ return;
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+ control.reqseq = chan->srej_list.tail;
+ l2cap_send_sframe(chan, &control);
+}
+
+static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
+{
+ struct l2cap_ctrl control;
+ u16 initial_head;
+ u16 seq;
+
+ BT_DBG("chan %p, txseq %u", chan, txseq);
+
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.super = L2CAP_SUPER_SREJ;
+
+ /* Capture initial list head to allow only one pass through the list. */
+ initial_head = chan->srej_list.head;
+
+ do {
+ seq = l2cap_seq_list_pop(&chan->srej_list);
+ if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
+ break;
+
+ control.reqseq = seq;
+ l2cap_send_sframe(chan, &control);
+ l2cap_seq_list_append(&chan->srej_list, seq);
+ } while (chan->srej_list.head != initial_head);
+}
+
+static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
+{
+ struct sk_buff *acked_skb;
+ u16 ackseq;
+
+ BT_DBG("chan %p, reqseq %u", chan, reqseq);
+
+ if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
+ return;
+
+ BT_DBG("expected_ack_seq %u, unacked_frames %u",
+ chan->expected_ack_seq, chan->unacked_frames);
+
+ for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
+ ackseq = __next_seq(chan, ackseq)) {
+
+ acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
+ if (acked_skb) {
+ skb_unlink(acked_skb, &chan->tx_q);
+ kfree_skb(acked_skb);
+ chan->unacked_frames--;
+ }
+ }
+
+ chan->expected_ack_seq = reqseq;
+
+ if (chan->unacked_frames == 0)
+ __clear_retrans_timer(chan);
+
+ BT_DBG("unacked_frames %u", chan->unacked_frames);
+}
+
+static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
+{
+ BT_DBG("chan %p", chan);
+
+ chan->expected_tx_seq = chan->buffer_seq;
+ l2cap_seq_list_clear(&chan->srej_list);
+ skb_queue_purge(&chan->srej_q);
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+}
+
+static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ l2cap_ertm_send(chan);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ __clear_ack_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RETRANS_TO:
+ l2cap_send_rr_or_rnr(chan, 1);
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ break;
+ case L2CAP_EV_RECV_FBIT:
+ /* Nothing to process */
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_DATA_REQUEST:
+ if (chan->tx_send_head == NULL)
+ chan->tx_send_head = skb_peek(skbs);
+ /* Queue data, but don't send. */
+ skb_queue_splice_tail_init(skbs, &chan->tx_q);
+ break;
+ case L2CAP_EV_LOCAL_BUSY_DETECTED:
+ BT_DBG("Enter LOCAL_BUSY");
+ set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ /* The SREJ_SENT state must be aborted if we are to
+ * enter the LOCAL_BUSY state.
+ */
+ l2cap_abort_rx_srej_sent(chan);
+ }
+
+ l2cap_send_ack(chan);
+
+ break;
+ case L2CAP_EV_LOCAL_BUSY_CLEAR:
+ BT_DBG("Exit LOCAL_BUSY");
+ clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
+
+ if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
+ struct l2cap_ctrl local_control;
+ memset(&local_control, 0, sizeof(local_control));
+ local_control.sframe = 1;
+ local_control.super = L2CAP_SUPER_RR;
+ local_control.poll = 1;
+ local_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &local_control);
+
+ chan->retry_count = 1;
+ __set_monitor_timer(chan);
+ chan->tx_state = L2CAP_TX_STATE_WAIT_F;
+ }
+ break;
+ case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
+ l2cap_process_reqseq(chan, control->reqseq);
+
+ /* Fall through */
+
+ case L2CAP_EV_RECV_FBIT:
+ if (control && control->final) {
+ __clear_monitor_timer(chan);
+ if (chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ chan->retry_count = 0;
+ chan->tx_state = L2CAP_TX_STATE_XMIT;
+ BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
+ }
+ break;
+ case L2CAP_EV_EXPLICIT_POLL:
+ /* Ignore */
+ break;
+ case L2CAP_EV_MONITOR_TO:
+ if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
+ l2cap_send_rr_or_rnr(chan, 1);
+ __set_monitor_timer(chan);
+ chan->retry_count++;
+ } else {
+ l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff_head *skbs, u8 event)
+{
+ BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
+ chan, control, skbs, event, chan->tx_state);
+
+ switch (chan->tx_state) {
+ case L2CAP_TX_STATE_XMIT:
+ l2cap_tx_state_xmit(chan, control, skbs, event);
+ break;
+ case L2CAP_TX_STATE_WAIT_F:
+ l2cap_tx_state_wait_f(chan, control, skbs, event);
+ break;
+ default:
+ /* Ignore event */
+ break;
+ }
+}
+
+static void l2cap_pass_to_tx(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
+}
+
+static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
+{
+ BT_DBG("chan %p, control %p", chan, control);
+ l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
+}
+
/* Copy frame to all raw sockets on that connection */
static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
@@ -2170,7 +2527,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
if (!nskb)
continue;
- if (chan->ops->recv(chan->data, nskb))
+ if (chan->ops->recv(chan, nskb))
kfree_skb(nskb);
}
@@ -2178,16 +2535,16 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
}
/* ---- L2CAP signalling commands ---- */
-static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
- u8 code, u8 ident, u16 dlen, void *data)
+static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
+ u8 ident, u16 dlen, void *data)
{
struct sk_buff *skb, **frag;
struct l2cap_cmd_hdr *cmd;
struct l2cap_hdr *lh;
int len, count;
- BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
- conn, code, ident, dlen);
+ BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
+ conn, code, ident, dlen);
len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
count = min_t(unsigned int, conn->mtu, len);
@@ -2200,9 +2557,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
if (conn->hcon->type == LE_LINK)
- lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING);
else
- lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
+ lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING);
cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
cmd->code = code;
@@ -2270,7 +2627,7 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned
break;
}
- BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
return len;
}
@@ -2278,7 +2635,7 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
{
struct l2cap_conf_opt *opt = *ptr;
- BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
+ BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
opt->type = type;
opt->len = len;
@@ -2314,8 +2671,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
efs.stype = chan->local_stype;
efs.msdu = cpu_to_le16(chan->local_msdu);
efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
- efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
- efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
+ efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
+ efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
break;
case L2CAP_MODE_STREAMING:
@@ -2338,20 +2695,24 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
static void l2cap_ack_timeout(struct work_struct *work)
{
struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
- ack_timer.work);
+ ack_timer.work);
+ u16 frames_to_ack;
BT_DBG("chan %p", chan);
l2cap_chan_lock(chan);
- __l2cap_send_ack(chan);
+ frames_to_ack = __seq_offset(chan, chan->buffer_seq,
+ chan->last_acked_seq);
- l2cap_chan_unlock(chan);
+ if (frames_to_ack)
+ l2cap_send_rr_or_rnr(chan, 0);
+ l2cap_chan_unlock(chan);
l2cap_chan_put(chan);
}
-static inline int l2cap_ertm_init(struct l2cap_chan *chan)
+int l2cap_ertm_init(struct l2cap_chan *chan)
{
int err;
@@ -2360,7 +2721,6 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
chan->expected_ack_seq = 0;
chan->unacked_frames = 0;
chan->buffer_seq = 0;
- chan->num_acked = 0;
chan->frames_sent = 0;
chan->last_acked_seq = 0;
chan->sdu = NULL;
@@ -2381,12 +2741,15 @@ static inline int l2cap_ertm_init(struct l2cap_chan *chan)
skb_queue_head_init(&chan->srej_q);
- INIT_LIST_HEAD(&chan->srej_l);
err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
if (err < 0)
return err;
- return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
+ if (err < 0)
+ l2cap_seq_list_free(&chan->srej_list);
+
+ return err;
}
static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2424,6 +2787,7 @@ static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
L2CAP_DEFAULT_TX_WINDOW);
chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
}
+ chan->ack_win = chan->tx_win;
}
static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
@@ -2512,6 +2876,7 @@ done:
break;
case L2CAP_MODE_STREAMING:
+ l2cap_txwin_setup(chan);
rfc.mode = L2CAP_MODE_STREAMING;
rfc.txwin_size = 0;
rfc.max_transmit = 0;
@@ -2542,7 +2907,7 @@ done:
}
req->dcid = cpu_to_le16(chan->dcid);
- req->flags = cpu_to_le16(0);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2762,7 +3127,7 @@ done:
}
rsp->scid = cpu_to_le16(chan->dcid);
rsp->result = cpu_to_le16(result);
- rsp->flags = cpu_to_le16(0x0000);
+ rsp->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2812,10 +3177,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
break;
case L2CAP_CONF_EWS:
- chan->tx_win = min_t(u16, val,
- L2CAP_DEFAULT_EXT_WINDOW);
+ chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win);
break;
case L2CAP_CONF_EFS:
@@ -2844,6 +3208,9 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->local_msdu = le16_to_cpu(efs.msdu);
@@ -2861,7 +3228,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
}
req->dcid = cpu_to_le16(chan->dcid);
- req->flags = cpu_to_le16(0x0000);
+ req->flags = __constant_cpu_to_le16(0);
return ptr - data;
}
@@ -2888,8 +3255,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
rsp.scid = cpu_to_le16(chan->dcid);
rsp.dcid = cpu_to_le16(chan->scid);
- rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
- rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS);
+ rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO);
l2cap_send_cmd(conn, chan->ident,
L2CAP_CONN_RSP, sizeof(rsp), &rsp);
@@ -2905,7 +3272,17 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
{
int type, olen;
unsigned long val;
- struct l2cap_conf_rfc rfc;
+ /* Use sane default values in case a misbehaving remote device
+ * did not send an RFC or extended window size option.
+ */
+ u16 txwin_ext = chan->ack_win;
+ struct l2cap_conf_rfc rfc = {
+ .mode = chan->mode,
+ .retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
+ .monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
+ .max_pdu_size = cpu_to_le16(chan->imtu),
+ .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
+ };
BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
@@ -2915,32 +3292,27 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
- if (type != L2CAP_CONF_RFC)
- continue;
-
- if (olen != sizeof(rfc))
+ switch (type) {
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
break;
-
- memcpy(&rfc, (void *)val, olen);
- goto done;
+ case L2CAP_CONF_EWS:
+ txwin_ext = val;
+ break;
+ }
}
- /* Use sane default values in case a misbehaving remote device
- * did not send an RFC option.
- */
- rfc.mode = chan->mode;
- rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
- rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
- rfc.max_pdu_size = cpu_to_le16(chan->imtu);
-
- BT_ERR("Expected RFC option was not found, using defaults");
-
-done:
switch (rfc.mode) {
case L2CAP_MODE_ERTM:
chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
- chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ chan->mps = le16_to_cpu(rfc.max_pdu_size);
+ if (test_bit(FLAG_EXT_CTRL, &chan->flags))
+ chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
+ else
+ chan->ack_win = min_t(u16, chan->ack_win,
+ rfc.txwin_size);
break;
case L2CAP_MODE_STREAMING:
chan->mps = le16_to_cpu(rfc.max_pdu_size);
@@ -2993,7 +3365,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
lock_sock(parent);
/* Check if the ACL is secure enough (if not SDP) */
- if (psm != cpu_to_le16(0x0001) &&
+ if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) &&
!hci_conn_check_link_mode(conn->hcon)) {
conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
result = L2CAP_CR_SEC_BLOCK;
@@ -3002,25 +3374,16 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
result = L2CAP_CR_NO_MEM;
- /* Check for backlog size */
- if (sk_acceptq_is_full(parent)) {
- BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ /* Check if we already have channel with that dcid */
+ if (__l2cap_get_chan_by_dcid(conn, scid))
goto response;
- }
- chan = pchan->ops->new_connection(pchan->data);
+ chan = pchan->ops->new_connection(pchan);
if (!chan)
goto response;
sk = chan->sk;
- /* Check if we already have channel with that dcid */
- if (__l2cap_get_chan_by_dcid(conn, scid)) {
- sock_set_flag(sk, SOCK_ZAPPED);
- chan->ops->close(chan->data);
- goto response;
- }
-
hci_conn_hold(conn->hcon);
bacpy(&bt_sk(sk)->src, conn->src);
@@ -3074,7 +3437,7 @@ sendresp:
if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
struct l2cap_info_req info;
- info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
conn->info_ident = l2cap_get_ident(conn);
@@ -3196,7 +3559,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
struct l2cap_cmd_rej_cid rej;
- rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID);
rej.scid = cpu_to_le16(chan->scid);
rej.dcid = cpu_to_le16(chan->dcid);
@@ -3218,11 +3581,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
memcpy(chan->conf_req + chan->conf_len, req->data, len);
chan->conf_len += len;
- if (flags & 0x0001) {
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
/* Incomplete config. Send empty response. */
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0001), rsp);
+ L2CAP_CONF_SUCCESS, flags), rsp);
goto unlock;
}
@@ -3245,8 +3608,6 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED);
-
if (chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING)
err = l2cap_ertm_init(chan);
@@ -3278,7 +3639,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
l2cap_build_conf_rsp(chan, rsp,
- L2CAP_CONF_SUCCESS, 0x0000), rsp);
+ L2CAP_CONF_SUCCESS, flags), rsp);
}
unlock:
@@ -3369,7 +3730,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto done;
}
- if (flags & 0x01)
+ if (flags & L2CAP_CONF_FLAG_CONTINUATION)
goto done;
set_bit(CONF_INPUT_DONE, &chan->conf_state);
@@ -3377,7 +3738,6 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
set_default_fcs(chan);
- l2cap_state_change(chan, BT_CONNECTED);
if (chan->mode == L2CAP_MODE_ERTM ||
chan->mode == L2CAP_MODE_STREAMING)
err = l2cap_ertm_init(chan);
@@ -3431,7 +3791,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3465,7 +3825,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
l2cap_chan_unlock(chan);
- chan->ops->close(chan->data);
+ chan->ops->close(chan);
l2cap_chan_put(chan);
mutex_unlock(&conn->chan_lock);
@@ -3486,8 +3846,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
u8 buf[8];
u32 feat_mask = l2cap_feat_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
- rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
if (!disable_ertm)
feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
| L2CAP_FEAT_FCS;
@@ -3507,15 +3867,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
else
l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
- rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
- rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
+ rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS);
memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else {
struct l2cap_info_rsp rsp;
rsp.type = cpu_to_le16(type);
- rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
+ rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(rsp), &rsp);
}
@@ -3555,7 +3915,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
struct l2cap_info_req req;
- req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
+ req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN);
conn->info_ident = l2cap_get_ident(conn);
@@ -3598,7 +3958,7 @@ static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
psm = le16_to_cpu(req->psm);
scid = le16_to_cpu(req->scid);
- BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
+ BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
/* Placeholder: Always reject */
rsp.dcid = 0;
@@ -3621,11 +3981,11 @@ static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
}
static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid, u16 result)
+ u16 icid, u16 result)
{
struct l2cap_move_chan_rsp rsp;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
rsp.icid = cpu_to_le16(icid);
rsp.result = cpu_to_le16(result);
@@ -3634,12 +3994,13 @@ static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
}
static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
- struct l2cap_chan *chan, u16 icid, u16 result)
+ struct l2cap_chan *chan,
+ u16 icid, u16 result)
{
struct l2cap_move_chan_cfm cfm;
u8 ident;
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
ident = l2cap_get_ident(conn);
if (chan)
@@ -3652,18 +4013,19 @@ static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
}
static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
- u16 icid)
+ u16 icid)
{
struct l2cap_move_chan_cfm_rsp rsp;
- BT_DBG("icid %d", icid);
+ BT_DBG("icid 0x%4.4x", icid);
rsp.icid = cpu_to_le16(icid);
l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
}
static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_req *req = data;
u16 icid = 0;
@@ -3674,7 +4036,7 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
icid = le16_to_cpu(req->icid);
- BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
+ BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
if (!enable_hs)
return -EINVAL;
@@ -3686,7 +4048,8 @@ static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_rsp *rsp = data;
u16 icid, result;
@@ -3697,7 +4060,7 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
icid = le16_to_cpu(rsp->icid);
result = le16_to_cpu(rsp->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
/* Placeholder: Always unconfirmed */
l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
@@ -3706,7 +4069,8 @@ static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_cfm *cfm = data;
u16 icid, result;
@@ -3717,7 +4081,7 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
icid = le16_to_cpu(cfm->icid);
result = le16_to_cpu(cfm->result);
- BT_DBG("icid %d, result %d", icid, result);
+ BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
@@ -3725,7 +4089,8 @@ static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
}
static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
- struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
+ struct l2cap_cmd_hdr *cmd,
+ u16 cmd_len, void *data)
{
struct l2cap_move_chan_cfm_rsp *rsp = data;
u16 icid;
@@ -3735,7 +4100,7 @@ static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
icid = le16_to_cpu(rsp->icid);
- BT_DBG("icid %d", icid);
+ BT_DBG("icid 0x%4.4x", icid);
return 0;
}
@@ -3790,9 +4155,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
err = l2cap_check_conn_param(min, max, latency, to_multiplier);
if (err)
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
else
- rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
+ rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
sizeof(rsp), &rsp);
@@ -3940,7 +4305,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
BT_ERR("Wrong link type (%d)", err);
/* FIXME: Map err to a valid reason */
- rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
+ rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
}
@@ -3972,65 +4337,38 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
return 0;
}
-static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
+static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
{
- u32 control = 0;
+ struct l2cap_ctrl control;
- chan->frames_sent = 0;
+ BT_DBG("chan %p", chan);
- control |= __set_reqseq(chan, chan->buffer_seq);
+ memset(&control, 0, sizeof(control));
+ control.sframe = 1;
+ control.final = 1;
+ control.reqseq = chan->buffer_seq;
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
- l2cap_send_sframe(chan, control);
- set_bit(CONN_RNR_SENT, &chan->conn_state);
+ control.super = L2CAP_SUPER_RNR;
+ l2cap_send_sframe(chan, &control);
}
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
- l2cap_retransmit_frames(chan);
+ if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
+ chan->unacked_frames > 0)
+ __set_retrans_timer(chan);
+ /* Send pending iframes */
l2cap_ertm_send(chan);
if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
- chan->frames_sent == 0) {
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- }
-}
-
-static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
-{
- struct sk_buff *next_skb;
- int tx_seq_offset, next_tx_seq_offset;
-
- bt_cb(skb)->control.txseq = tx_seq;
- bt_cb(skb)->control.sar = sar;
-
- next_skb = skb_peek(&chan->srej_q);
-
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
-
- while (next_skb) {
- if (bt_cb(next_skb)->control.txseq == tx_seq)
- return -EINVAL;
-
- next_tx_seq_offset = __seq_offset(chan,
- bt_cb(next_skb)->control.txseq, chan->buffer_seq);
-
- if (next_tx_seq_offset > tx_seq_offset) {
- __skb_queue_before(&chan->srej_q, next_skb, skb);
- return 0;
- }
-
- if (skb_queue_is_last(&chan->srej_q, next_skb))
- next_skb = NULL;
- else
- next_skb = skb_queue_next(&chan->srej_q, next_skb);
+ test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
+ /* F-bit wasn't sent in an s-frame or i-frame yet, so
+ * send it now.
+ */
+ control.super = L2CAP_SUPER_RR;
+ l2cap_send_sframe(chan, &control);
}
-
- __skb_queue_tail(&chan->srej_q, skb);
-
- return 0;
}
static void append_skb_frag(struct sk_buff *skb,
@@ -4052,16 +4390,17 @@ static void append_skb_frag(struct sk_buff *skb,
skb->truesize += new_frag->truesize;
}
-static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
+static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
+ struct l2cap_ctrl *control)
{
int err = -EINVAL;
- switch (__get_ctrl_sar(chan, control)) {
+ switch (control->sar) {
case L2CAP_SAR_UNSEGMENTED:
if (chan->sdu)
break;
- err = chan->ops->recv(chan->data, skb);
+ err = chan->ops->recv(chan, skb);
break;
case L2CAP_SAR_START:
@@ -4111,7 +4450,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
if (chan->sdu->len != chan->sdu_len)
break;
- err = chan->ops->recv(chan->data, chan->sdu);
+ err = chan->ops->recv(chan, chan->sdu);
if (!err) {
/* Reassembly complete */
@@ -4133,448 +4472,609 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u3
return err;
}
-static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
+void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
{
- BT_DBG("chan %p, Enter local busy", chan);
+ u8 event;
- set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- l2cap_seq_list_clear(&chan->srej_list);
+ if (chan->mode != L2CAP_MODE_ERTM)
+ return;
- __set_ack_timer(chan);
+ event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
+ l2cap_tx(chan, NULL, NULL, event);
}
-static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
+static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
{
- u32 control;
-
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- goto done;
+ int err = 0;
+ /* Pass sequential frames to l2cap_reassemble_sdu()
+ * until a gap is encountered.
+ */
- control = __set_reqseq(chan, chan->buffer_seq);
- control |= __set_ctrl_poll(chan);
- control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, control);
- chan->retry_count = 1;
+ BT_DBG("chan %p", chan);
- __clear_retrans_timer(chan);
- __set_monitor_timer(chan);
+ while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ struct sk_buff *skb;
+ BT_DBG("Searching for skb with txseq %d (queue len %d)",
+ chan->buffer_seq, skb_queue_len(&chan->srej_q));
- set_bit(CONN_WAIT_F, &chan->conn_state);
+ skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
-done:
- clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
- clear_bit(CONN_RNR_SENT, &chan->conn_state);
+ if (!skb)
+ break;
- BT_DBG("chan %p, Exit local busy", chan);
-}
+ skb_unlink(skb, &chan->srej_q);
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
+ err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
+ if (err)
+ break;
+ }
-void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
-{
- if (chan->mode == L2CAP_MODE_ERTM) {
- if (busy)
- l2cap_ertm_enter_local_busy(chan);
- else
- l2cap_ertm_exit_local_busy(chan);
+ if (skb_queue_empty(&chan->srej_q)) {
+ chan->rx_state = L2CAP_RX_STATE_RECV;
+ l2cap_send_ack(chan);
}
+
+ return err;
}
-static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
+static void l2cap_handle_srej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
struct sk_buff *skb;
- u32 control;
- while ((skb = skb_peek(&chan->srej_q)) &&
- !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- int err;
+ BT_DBG("chan %p, control %p", chan, control);
- if (bt_cb(skb)->control.txseq != tx_seq)
- break;
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
+ }
- skb = skb_dequeue(&chan->srej_q);
- control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar);
- err = l2cap_reassemble_sdu(chan, skb, control);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- break;
- }
+ if (skb == NULL) {
+ BT_DBG("Seq %d not available for retransmission",
+ control->reqseq);
+ return;
+ }
- chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
- tx_seq = __next_seq(chan, tx_seq);
+ if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
}
-}
-static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
-{
- struct srej_list *l, *tmp;
- u32 control;
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- list_del(&l->list);
- kfree(l);
- return;
+ if (control->poll) {
+ l2cap_pass_to_tx(chan, control);
+
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_retransmit(chan, control);
+ l2cap_ertm_send(chan);
+
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
+ } else {
+ l2cap_pass_to_tx_fbit(chan, control);
+
+ if (control->final) {
+ if (chan->srej_save_reqseq != control->reqseq ||
+ !test_and_clear_bit(CONN_SREJ_ACT,
+ &chan->conn_state))
+ l2cap_retransmit(chan, control);
+ } else {
+ l2cap_retransmit(chan, control);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
+ set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ chan->srej_save_reqseq = control->reqseq;
+ }
}
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, l->tx_seq);
- l2cap_send_sframe(chan, control);
- list_del(&l->list);
- list_add_tail(&l->list, &chan->srej_l);
}
}
-static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
+static void l2cap_handle_rej(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control)
{
- struct srej_list *new;
- u32 control;
-
- while (tx_seq != chan->expected_tx_seq) {
- control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
- control |= __set_reqseq(chan, chan->expected_tx_seq);
- l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq);
- l2cap_send_sframe(chan, control);
+ struct sk_buff *skb;
- new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
- if (!new)
- return -ENOMEM;
+ BT_DBG("chan %p, control %p", chan, control);
- new->tx_seq = chan->expected_tx_seq;
+ if (control->reqseq == chan->next_tx_seq) {
+ BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
+ }
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+ skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
- list_add_tail(&new->list, &chan->srej_l);
+ if (chan->max_tx && skb &&
+ bt_cb(skb)->control.retries >= chan->max_tx) {
+ BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ return;
}
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+
+ l2cap_pass_to_tx(chan, control);
- return 0;
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
+ l2cap_retransmit_all(chan, control);
+ } else {
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
+ if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
+ set_bit(CONN_REJ_ACT, &chan->conn_state);
+ }
}
-static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
+static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
{
- u16 tx_seq = __get_txseq(chan, rx_control);
- u16 req_seq = __get_reqseq(chan, rx_control);
- u8 sar = __get_ctrl_sar(chan, rx_control);
- int tx_seq_offset, expected_tx_seq_offset;
- int num_to_ack = (chan->tx_win/6) + 1;
- int err = 0;
+ BT_DBG("chan %p, txseq %d", chan, txseq);
- BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
- tx_seq, rx_control);
+ BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
+ chan->expected_tx_seq);
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
- }
+ if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ /* See notes below regarding "double poll" and
+ * invalid packets.
+ */
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - after SREJ");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - in window after SREJ sent");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ }
- chan->expected_ack_seq = req_seq;
- l2cap_drop_acked_frames(chan);
+ if (chan->srej_list.head == txseq) {
+ BT_DBG("Expected SREJ");
+ return L2CAP_TXSEQ_EXPECTED_SREJ;
+ }
- tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
+ if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
+ BT_DBG("Duplicate SREJ - txseq already stored");
+ return L2CAP_TXSEQ_DUPLICATE_SREJ;
+ }
- /* invalid tx_seq */
- if (tx_seq_offset >= chan->tx_win) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
+ if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
+ BT_DBG("Unexpected SREJ - not requested");
+ return L2CAP_TXSEQ_UNEXPECTED_SREJ;
+ }
}
- if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
- if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- goto drop;
+ if (chan->expected_tx_seq == txseq) {
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
+ chan->tx_win) {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ } else {
+ BT_DBG("Expected");
+ return L2CAP_TXSEQ_EXPECTED;
+ }
}
- if (tx_seq == chan->expected_tx_seq)
- goto expected;
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) <
+ __seq_offset(chan, chan->expected_tx_seq,
+ chan->last_acked_seq)){
+ BT_DBG("Duplicate - expected_tx_seq later than txseq");
+ return L2CAP_TXSEQ_DUPLICATE;
+ }
+
+ if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
+ /* A source of invalid packets is a "double poll" condition,
+ * where delays cause us to send multiple poll packets. If
+ * the remote stack receives and processes both polls,
+ * sequence numbers can wrap around in such a way that a
+ * resent frame has a sequence number that looks like new data
+ * with a sequence gap. This would trigger an erroneous SREJ
+ * request.
+ *
+ * Fortunately, this is impossible with a tx window that's
+ * less than half of the maximum sequence number, which allows
+ * invalid frames to be safely ignored.
+ *
+ * With tx window sizes greater than half of the tx window
+ * maximum, the frame is invalid and cannot be ignored. This
+ * causes a disconnect.
+ */
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- struct srej_list *first;
+ if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
+ BT_DBG("Invalid/Ignore - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID_IGNORE;
+ } else {
+ BT_DBG("Invalid - txseq outside tx window");
+ return L2CAP_TXSEQ_INVALID;
+ }
+ } else {
+ BT_DBG("Unexpected - txseq indicates missing frames");
+ return L2CAP_TXSEQ_UNEXPECTED;
+ }
+}
- first = list_first_entry(&chan->srej_l,
- struct srej_list, list);
- if (tx_seq == first->tx_seq) {
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
- l2cap_check_srej_gap(chan, tx_seq);
+static int l2cap_rx_state_recv(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ bool skb_in_use = 0;
- list_del(&first->list);
- kfree(first);
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
- if (list_empty(&chan->srej_l)) {
- chan->buffer_seq = chan->buffer_seq_srej;
- clear_bit(CONN_SREJ_SENT, &chan->conn_state);
- l2cap_send_ack(chan);
- BT_DBG("chan %p, Exit SREJ_SENT", chan);
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, control->txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding expected seq %d",
+ control->txseq);
+ break;
}
- } else {
- struct srej_list *l;
- /* duplicated tx_seq */
- if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
- goto drop;
+ chan->expected_tx_seq = __next_seq(chan,
+ control->txseq);
+
+ chan->buffer_seq = chan->expected_tx_seq;
+ skb_in_use = 1;
+
+ err = l2cap_reassemble_sdu(chan, skb, control);
+ if (err)
+ break;
- list_for_each_entry(l, &chan->srej_l, list) {
- if (l->tx_seq == tx_seq) {
- l2cap_resend_srejframe(chan, tx_seq);
- return 0;
+ if (control->final) {
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ l2cap_ertm_send(chan);
}
}
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
+ if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
+ l2cap_send_ack(chan);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ l2cap_pass_to_tx(chan, control);
+
+ /* Can't issue SREJ frames in the local busy state.
+ * Drop this frame, it will be seen as missing
+ * when local busy is exited.
+ */
+ if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
+ BT_DBG("Busy, discarding unexpected seq %d",
+ control->txseq);
+ break;
}
- }
- } else {
- expected_tx_seq_offset = __seq_offset(chan,
- chan->expected_tx_seq, chan->buffer_seq);
- /* duplicated tx_seq */
- if (tx_seq_offset < expected_tx_seq_offset)
- goto drop;
-
- set_bit(CONN_SREJ_SENT, &chan->conn_state);
+ /* There was a gap in the sequence, so an SREJ
+ * must be sent for each missing frame. The
+ * current frame is stored for later use.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
- BT_DBG("chan %p, Enter SREJ", chan);
+ clear_bit(CONN_SREJ_ACT, &chan->conn_state);
+ l2cap_seq_list_clear(&chan->srej_list);
+ l2cap_send_srej(chan, control->txseq);
- INIT_LIST_HEAD(&chan->srej_l);
- chan->buffer_seq_srej = chan->buffer_seq;
+ chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- __skb_queue_head_init(&chan->srej_q);
- l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
- /* Set P-bit only if there are some I-frames to ack. */
- if (__clear_ack_timer(chan))
- set_bit(CONN_SEND_PBIT, &chan->conn_state);
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ l2cap_send_i_or_rr_or_rnr(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
+ __set_retrans_timer(chan);
- err = l2cap_send_srejframe(chan, tx_seq);
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, -err);
- return err;
+ l2cap_ertm_send(chan);
}
- }
- return 0;
-
-expected:
- chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
-
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- bt_cb(skb)->control.txseq = tx_seq;
- bt_cb(skb)->control.sar = sar;
- __skb_queue_tail(&chan->srej_q, skb);
- return 0;
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control && control->poll) {
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_rr_or_rnr(chan, 0);
+ }
+ __clear_retrans_timer(chan);
+ l2cap_seq_list_clear(&chan->retrans_list);
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ default:
+ break;
}
- err = l2cap_reassemble_sdu(chan, skb, rx_control);
- chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
-
- if (err < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- return err;
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
}
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- }
+ return err;
+}
+static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
+ struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
+ u16 txseq = control->txseq;
+ bool skb_in_use = 0;
+
+ BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
+ event);
+
+ switch (event) {
+ case L2CAP_EV_RECV_IFRAME:
+ switch (l2cap_classify_txseq(chan, txseq)) {
+ case L2CAP_TXSEQ_EXPECTED:
+ /* Keep frame for reassembly later */
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ chan->expected_tx_seq = __next_seq(chan, txseq);
+ break;
+ case L2CAP_TXSEQ_EXPECTED_SREJ:
+ l2cap_seq_list_pop(&chan->srej_list);
- chan->num_acked = (chan->num_acked + 1) % num_to_ack;
- if (chan->num_acked == num_to_ack - 1)
- l2cap_send_ack(chan);
- else
- __set_ack_timer(chan);
+ l2cap_pass_to_tx(chan, control);
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
- return 0;
+ err = l2cap_rx_queued_iframes(chan);
+ if (err)
+ break;
-drop:
- kfree_skb(skb);
- return 0;
-}
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED:
+ /* Got a frame that can't be reassembled yet.
+ * Save it for later, and send SREJs to cover
+ * the missing frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_UNEXPECTED_SREJ:
+ /* This frame was requested with an SREJ, but
+ * some expected retransmitted frames are
+ * missing. Request retransmission of missing
+ * SREJ'd frames.
+ */
+ skb_queue_tail(&chan->srej_q, skb);
+ skb_in_use = 1;
+ BT_DBG("Queued %p (queue len %d)", skb,
+ skb_queue_len(&chan->srej_q));
+
+ l2cap_pass_to_tx(chan, control);
+ l2cap_send_srej_list(chan, control->txseq);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE_SREJ:
+ /* We've already queued this frame. Drop this copy. */
+ l2cap_pass_to_tx(chan, control);
+ break;
+ case L2CAP_TXSEQ_DUPLICATE:
+ /* Expecting a later sequence number, so this frame
+ * was already received. Ignore it completely.
+ */
+ break;
+ case L2CAP_TXSEQ_INVALID_IGNORE:
+ break;
+ case L2CAP_TXSEQ_INVALID:
+ default:
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
+ break;
+ }
+ break;
+ case L2CAP_EV_RECV_RR:
+ l2cap_pass_to_tx(chan, control);
+ if (control->final) {
+ clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
-{
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
- __get_reqseq(chan, rx_control), rx_control);
+ if (!test_and_clear_bit(CONN_REJ_ACT,
+ &chan->conn_state)) {
+ control->final = 0;
+ l2cap_retransmit_all(chan, control);
+ }
- chan->expected_ack_seq = __get_reqseq(chan, rx_control);
- l2cap_drop_acked_frames(chan);
+ l2cap_ertm_send(chan);
+ } else if (control->poll) {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames) {
+ __set_retrans_timer(chan);
+ }
- if (__is_ctrl_poll(chan, rx_control)) {
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
+ set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ l2cap_send_srej_tail(chan);
+ } else {
+ if (test_and_clear_bit(CONN_REMOTE_BUSY,
+ &chan->conn_state) &&
+ chan->unacked_frames)
__set_retrans_timer(chan);
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- l2cap_send_srejtail(chan);
+ l2cap_send_ack(chan);
+ }
+ break;
+ case L2CAP_EV_RECV_RNR:
+ set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
+ l2cap_pass_to_tx(chan, control);
+ if (control->poll) {
+ l2cap_send_srej_tail(chan);
} else {
- l2cap_send_i_or_rr_or_rnr(chan);
+ struct l2cap_ctrl rr_control;
+ memset(&rr_control, 0, sizeof(rr_control));
+ rr_control.sframe = 1;
+ rr_control.super = L2CAP_SUPER_RR;
+ rr_control.reqseq = chan->buffer_seq;
+ l2cap_send_sframe(chan, &rr_control);
}
- } else if (__is_ctrl_final(chan, rx_control)) {
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
-
- } else {
- if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
- (chan->unacked_frames > 0))
- __set_retrans_timer(chan);
+ break;
+ case L2CAP_EV_RECV_REJ:
+ l2cap_handle_rej(chan, control);
+ break;
+ case L2CAP_EV_RECV_SREJ:
+ l2cap_handle_srej(chan, control);
+ break;
+ }
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
- l2cap_send_ack(chan);
- else
- l2cap_ertm_send(chan);
+ if (skb && !skb_in_use) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
}
+
+ return err;
}
-static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
+static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
{
- u16 tx_seq = __get_reqseq(chan, rx_control);
-
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
+ /* Make sure reqseq is for a packet that has been sent but not acked */
+ u16 unacked;
- if (__is_ctrl_final(chan, rx_control)) {
- if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
- l2cap_retransmit_frames(chan);
- } else {
- l2cap_retransmit_frames(chan);
-
- if (test_bit(CONN_WAIT_F, &chan->conn_state))
- set_bit(CONN_REJ_ACT, &chan->conn_state);
- }
+ unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
+ return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
}
-static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
-{
- u16 tx_seq = __get_reqseq(chan, rx_control);
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
-
- clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
-
- if (__is_ctrl_poll(chan, rx_control)) {
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
-
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
- l2cap_retransmit_one_frame(chan, tx_seq);
+static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb, u8 event)
+{
+ int err = 0;
- l2cap_ertm_send(chan);
+ BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
+ control, skb, event, chan->rx_state);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
+ if (__valid_reqseq(chan, control->reqseq)) {
+ switch (chan->rx_state) {
+ case L2CAP_RX_STATE_RECV:
+ err = l2cap_rx_state_recv(chan, control, skb, event);
+ break;
+ case L2CAP_RX_STATE_SREJ_SENT:
+ err = l2cap_rx_state_srej_sent(chan, control, skb,
+ event);
+ break;
+ default:
+ /* shut it down */
+ break;
}
- } else if (__is_ctrl_final(chan, rx_control)) {
- if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
- chan->srej_save_reqseq == tx_seq)
- clear_bit(CONN_SREJ_ACT, &chan->conn_state);
- else
- l2cap_retransmit_one_frame(chan, tx_seq);
} else {
- l2cap_retransmit_one_frame(chan, tx_seq);
- if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
- chan->srej_save_reqseq = tx_seq;
- set_bit(CONN_SREJ_ACT, &chan->conn_state);
- }
+ BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
+ control->reqseq, chan->next_tx_seq,
+ chan->expected_ack_seq);
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
}
+
+ return err;
}
-static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
+static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
+ struct sk_buff *skb)
{
- u16 tx_seq = __get_reqseq(chan, rx_control);
+ int err = 0;
- BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
+ BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
+ chan->rx_state);
- set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
- chan->expected_ack_seq = tx_seq;
- l2cap_drop_acked_frames(chan);
+ if (l2cap_classify_txseq(chan, control->txseq) ==
+ L2CAP_TXSEQ_EXPECTED) {
+ l2cap_pass_to_tx(chan, control);
- if (__is_ctrl_poll(chan, rx_control))
- set_bit(CONN_SEND_FBIT, &chan->conn_state);
+ BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
+ __next_seq(chan, chan->buffer_seq));
- if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
- __clear_retrans_timer(chan);
- if (__is_ctrl_poll(chan, rx_control))
- l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
- return;
- }
+ chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
- if (__is_ctrl_poll(chan, rx_control)) {
- l2cap_send_srejtail(chan);
+ l2cap_reassemble_sdu(chan, skb, control);
} else {
- rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
- l2cap_send_sframe(chan, rx_control);
- }
-}
-
-static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
-{
- BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
+ if (chan->sdu) {
+ kfree_skb(chan->sdu);
+ chan->sdu = NULL;
+ }
+ chan->sdu_last_frag = NULL;
+ chan->sdu_len = 0;
- if (__is_ctrl_final(chan, rx_control) &&
- test_bit(CONN_WAIT_F, &chan->conn_state)) {
- __clear_monitor_timer(chan);
- if (chan->unacked_frames > 0)
- __set_retrans_timer(chan);
- clear_bit(CONN_WAIT_F, &chan->conn_state);
+ if (skb) {
+ BT_DBG("Freeing %p", skb);
+ kfree_skb(skb);
+ }
}
- switch (__get_ctrl_super(chan, rx_control)) {
- case L2CAP_SUPER_RR:
- l2cap_data_channel_rrframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_REJ:
- l2cap_data_channel_rejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_SREJ:
- l2cap_data_channel_srejframe(chan, rx_control);
- break;
-
- case L2CAP_SUPER_RNR:
- l2cap_data_channel_rnrframe(chan, rx_control);
- break;
- }
+ chan->last_acked_seq = control->txseq;
+ chan->expected_tx_seq = __next_seq(chan, control->txseq);
- kfree_skb(skb);
- return 0;
+ return err;
}
-static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
+static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
{
- u32 control;
- u16 req_seq;
- int len, next_tx_seq_offset, req_seq_offset;
+ struct l2cap_ctrl *control = &bt_cb(skb)->control;
+ u16 len;
+ u8 event;
__unpack_control(chan, skb);
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
len = skb->len;
/*
* We can just drop the corrupted I-frame here.
* Receiver will miss it and start proper recovery
- * procedures and ask retransmission.
+ * procedures and ask for retransmission.
*/
if (l2cap_check_fcs(chan, skb))
goto drop;
- if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
+ if (!control->sframe && control->sar == L2CAP_SAR_START)
len -= L2CAP_SDULEN_SIZE;
if (chan->fcs == L2CAP_FCS_CRC16)
@@ -4585,34 +5085,57 @@ static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
goto drop;
}
- req_seq = __get_reqseq(chan, control);
-
- req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
+ if (!control->sframe) {
+ int err;
- next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
- chan->expected_ack_seq);
+ BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
+ control->sar, control->reqseq, control->final,
+ control->txseq);
- /* check for invalid req-seq */
- if (req_seq_offset > next_tx_seq_offset) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
- goto drop;
- }
-
- if (!__is_sframe(chan, control)) {
- if (len < 0) {
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
+ /* Validate F-bit - F=0 always valid, F=1 only
+ * valid in TX WAIT_F
+ */
+ if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
goto drop;
+
+ if (chan->mode != L2CAP_MODE_STREAMING) {
+ event = L2CAP_EV_RECV_IFRAME;
+ err = l2cap_rx(chan, control, skb, event);
+ } else {
+ err = l2cap_stream_rx(chan, control, skb);
}
- l2cap_data_channel_iframe(chan, control, skb);
+ if (err)
+ l2cap_send_disconn_req(chan->conn, chan,
+ ECONNRESET);
} else {
+ const u8 rx_func_to_event[4] = {
+ L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
+ L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
+ };
+
+ /* Only I-frames are expected in streaming mode */
+ if (chan->mode == L2CAP_MODE_STREAMING)
+ goto drop;
+
+ BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
+ control->reqseq, control->final, control->poll,
+ control->super);
+
if (len != 0) {
BT_ERR("%d", len);
l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
goto drop;
}
- l2cap_data_channel_sframe(chan, control, skb);
+ /* Validate F and P bits */
+ if (control->final && (control->poll ||
+ chan->tx_state != L2CAP_TX_STATE_WAIT_F))
+ goto drop;
+
+ event = rx_func_to_event[control->super];
+ if (l2cap_rx(chan, control, skb, event))
+ l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
}
return 0;
@@ -4622,19 +5145,27 @@ drop:
return 0;
}
-static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
+static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
- u32 control;
- u16 tx_seq;
- int len;
chan = l2cap_get_chan_by_scid(conn, cid);
if (!chan) {
- BT_DBG("unknown cid 0x%4.4x", cid);
- /* Drop packet and return */
- kfree_skb(skb);
- return 0;
+ if (cid == L2CAP_CID_A2MP) {
+ chan = a2mp_channel_create(conn, skb);
+ if (!chan) {
+ kfree_skb(skb);
+ return;
+ }
+
+ l2cap_chan_lock(chan);
+ } else {
+ BT_DBG("unknown cid 0x%4.4x", cid);
+ /* Drop packet and return */
+ kfree_skb(skb);
+ return;
+ }
}
BT_DBG("chan %p, len %d", chan, skb->len);
@@ -4652,49 +5183,13 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
+ if (!chan->ops->recv(chan, skb))
goto done;
break;
case L2CAP_MODE_ERTM:
- l2cap_ertm_data_rcv(chan, skb);
-
- goto done;
-
case L2CAP_MODE_STREAMING:
- control = __get_control(chan, skb->data);
- skb_pull(skb, __ctrl_size(chan));
- len = skb->len;
-
- if (l2cap_check_fcs(chan, skb))
- goto drop;
-
- if (__is_sar_start(chan, control))
- len -= L2CAP_SDULEN_SIZE;
-
- if (chan->fcs == L2CAP_FCS_CRC16)
- len -= L2CAP_FCS_SIZE;
-
- if (len > chan->mps || len < 0 || __is_sframe(chan, control))
- goto drop;
-
- tx_seq = __get_txseq(chan, control);
-
- if (chan->expected_tx_seq != tx_seq) {
- /* Frame(s) missing - must discard partial SDU */
- kfree_skb(chan->sdu);
- chan->sdu = NULL;
- chan->sdu_last_frag = NULL;
- chan->sdu_len = 0;
-
- /* TODO: Notify userland of missing data */
- }
-
- chan->expected_tx_seq = __next_seq(chan, tx_seq);
-
- if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
- l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
-
+ l2cap_data_rcv(chan, skb);
goto done;
default:
@@ -4707,11 +5202,10 @@ drop:
done:
l2cap_chan_unlock(chan);
-
- return 0;
}
-static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
+static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
@@ -4727,17 +5221,15 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
kfree_skb(skb);
-
- return 0;
}
-static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
- struct sk_buff *skb)
+static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
+ struct sk_buff *skb)
{
struct l2cap_chan *chan;
@@ -4753,13 +5245,11 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid,
if (chan->imtu < skb->len)
goto drop;
- if (!chan->ops->recv(chan->data, skb))
- return 0;
+ if (!chan->ops->recv(chan, skb))
+ return;
drop:
kfree_skb(skb);
-
- return 0;
}
static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
@@ -4787,7 +5277,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
case L2CAP_CID_CONN_LESS:
psm = get_unaligned((__le16 *) skb->data);
- skb_pull(skb, 2);
+ skb_pull(skb, L2CAP_PSMLEN_SIZE);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -4898,7 +5388,7 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
if (!conn)
return 0;
- BT_DBG("conn %p", conn);
+ BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
if (hcon->type == LE_LINK) {
if (!status && encrypt)
@@ -4911,7 +5401,8 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
list_for_each_entry(chan, &conn->chan_l, list) {
l2cap_chan_lock(chan);
- BT_DBG("chan->scid %d", chan->scid);
+ BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
+ state_to_string(chan->state));
if (chan->scid == L2CAP_CID_LE_DATA) {
if (!status && encrypt) {
@@ -4981,6 +5472,17 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
rsp.status = cpu_to_le16(stat);
l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
sizeof(rsp), &rsp);
+
+ if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
+ res == L2CAP_CR_SUCCESS) {
+ char buf[128];
+ set_bit(CONF_REQ_SENT, &chan->conf_state);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ,
+ l2cap_build_conf_req(chan, buf),
+ buf);
+ chan->num_conf_req++;
+ }
}
l2cap_chan_unlock(chan);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 3bb1611b9d48..a4bb27e8427e 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -27,7 +27,6 @@
/* Bluetooth L2CAP sockets. */
-#include <linux/security.h>
#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
@@ -89,8 +88,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (err < 0)
goto done;
- if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
- __le16_to_cpu(la.l2_psm) == 0x0003)
+ if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP ||
+ __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM)
chan->sec_level = BT_SECURITY_SDP;
bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
@@ -446,6 +445,22 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
return err;
}
+static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu)
+{
+ switch (chan->scid) {
+ case L2CAP_CID_LE_DATA:
+ if (mtu < L2CAP_LE_MIN_MTU)
+ return false;
+ break;
+
+ default:
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ return false;
+ }
+
+ return true;
+}
+
static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
@@ -484,6 +499,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
+ if (!l2cap_valid_mtu(chan, opts.imtu)) {
+ err = -EINVAL;
+ break;
+ }
+
chan->mode = opts.mode;
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -873,9 +893,34 @@ static int l2cap_sock_release(struct socket *sock)
return err;
}
-static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
+static void l2cap_sock_cleanup_listen(struct sock *parent)
{
- struct sock *sk, *parent = data;
+ struct sock *sk;
+
+ BT_DBG("parent %p", parent);
+
+ /* Close not yet accepted channels */
+ while ((sk = bt_accept_dequeue(parent, NULL))) {
+ struct l2cap_chan *chan = l2cap_pi(sk)->chan;
+
+ l2cap_chan_lock(chan);
+ __clear_chan_timer(chan);
+ l2cap_chan_close(chan, ECONNRESET);
+ l2cap_chan_unlock(chan);
+
+ l2cap_sock_kill(sk);
+ }
+}
+
+static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk, *parent = chan->data;
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(parent)) {
+ BT_DBG("backlog full %d", parent->sk_ack_backlog);
+ return NULL;
+ }
sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
GFP_ATOMIC);
@@ -889,10 +934,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
return l2cap_pi(sk)->chan;
}
-static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
+static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
{
int err;
- struct sock *sk = data;
+ struct sock *sk = chan->data;
struct l2cap_pinfo *pi = l2cap_pi(sk);
lock_sock(sk);
@@ -925,16 +970,57 @@ done:
return err;
}
-static void l2cap_sock_close_cb(void *data)
+static void l2cap_sock_close_cb(struct l2cap_chan *chan)
{
- struct sock *sk = data;
+ struct sock *sk = chan->data;
l2cap_sock_kill(sk);
}
-static void l2cap_sock_state_change_cb(void *data, int state)
+static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
{
- struct sock *sk = data;
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ switch (chan->state) {
+ case BT_OPEN:
+ case BT_BOUND:
+ case BT_CLOSED:
+ break;
+ case BT_LISTEN:
+ l2cap_sock_cleanup_listen(sk);
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ break;
+ default:
+ sk->sk_state = BT_CLOSED;
+ chan->state = BT_CLOSED;
+
+ sk->sk_err = err;
+
+ if (parent) {
+ bt_accept_unlink(sk);
+ parent->sk_data_ready(parent, 0);
+ } else {
+ sk->sk_state_change(sk);
+ }
+
+ break;
+ }
+
+ release_sock(sk);
+}
+
+static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state)
+{
+ struct sock *sk = chan->data;
sk->sk_state = state;
}
@@ -955,12 +1041,34 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan,
return skb;
}
+static void l2cap_sock_ready_cb(struct l2cap_chan *chan)
+{
+ struct sock *sk = chan->data;
+ struct sock *parent;
+
+ lock_sock(sk);
+
+ parent = bt_sk(sk)->parent;
+
+ BT_DBG("sk %p, parent %p", sk, parent);
+
+ sk->sk_state = BT_CONNECTED;
+ sk->sk_state_change(sk);
+
+ if (parent)
+ parent->sk_data_ready(parent, 0);
+
+ release_sock(sk);
+}
+
static struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
.recv = l2cap_sock_recv_cb,
.close = l2cap_sock_close_cb,
+ .teardown = l2cap_sock_teardown_cb,
.state_change = l2cap_sock_state_change_cb,
+ .ready = l2cap_sock_ready_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
};
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index 506628876f36..e1c97527e16c 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -26,12 +26,7 @@
#define pr_fmt(fmt) "Bluetooth: " fmt
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <asm/errno.h>
+#include <linux/export.h>
#include <net/bluetooth/bluetooth.h>
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 3e5e3362ea00..ad6613d17ca6 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -24,8 +24,6 @@
/* Bluetooth HCI Management interface */
-#include <linux/kernel.h>
-#include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/unaligned.h>
@@ -212,7 +210,7 @@ static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -243,7 +241,7 @@ static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
BT_DBG("sock %p", sk);
- skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -689,14 +687,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
{
struct pending_cmd *cmd;
- cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC);
+ cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return NULL;
cmd->opcode = opcode;
cmd->index = hdev->id;
- cmd->param = kmalloc(len, GFP_ATOMIC);
+ cmd->param = kmalloc(len, GFP_KERNEL);
if (!cmd->param) {
kfree(cmd);
return NULL;
@@ -714,7 +712,8 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
}
static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
- void (*cb)(struct pending_cmd *cmd, void *data),
+ void (*cb)(struct pending_cmd *cmd,
+ void *data),
void *data)
{
struct list_head *p, *n;
@@ -813,7 +812,7 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
struct sk_buff *skb;
struct mgmt_hdr *hdr;
- skb = alloc_skb(sizeof(*hdr) + data_len, GFP_ATOMIC);
+ skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -871,7 +870,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
MGMT_STATUS_BUSY);
goto failed;
@@ -978,7 +977,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
- mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
+ mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
MGMT_STATUS_BUSY);
goto failed;
@@ -1001,7 +1000,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
scan = 0;
if (test_bit(HCI_ISCAN, &hdev->flags) &&
- hdev->discov_timeout > 0)
+ hdev->discov_timeout > 0)
cancel_delayed_work(&hdev->discov_off);
}
@@ -1056,7 +1055,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
bool changed = false;
if (!!cp->val != test_bit(HCI_LINK_SECURITY,
- &hdev->dev_flags)) {
+ &hdev->dev_flags)) {
change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
changed = true;
}
@@ -1269,7 +1268,7 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
goto failed;
}
- uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
+ uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
if (!uuid) {
err = -ENOMEM;
goto failed;
@@ -1317,7 +1316,7 @@ static bool enable_service_cache(struct hci_dev *hdev)
}
static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_remove_uuid *cp = data;
struct pending_cmd *cmd;
@@ -1442,7 +1441,7 @@ unlock:
}
static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
- u16 len)
+ u16 len)
{
struct mgmt_cp_load_link_keys *cp = data;
u16 key_count, expected_len;
@@ -1454,13 +1453,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
sizeof(struct mgmt_link_key_info);
if (expected_len != len) {
BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
- len, expected_len);
+ len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
MGMT_STATUS_INVALID_PARAMS);
}
BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
- key_count);
+ key_count);
hci_dev_lock(hdev);
@@ -1535,10 +1534,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
if (cp->disconnect) {
if (cp->addr.type == BDADDR_BREDR)
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
- &cp->addr.bdaddr);
+ &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
- &cp->addr.bdaddr);
+ &cp->addr.bdaddr);
} else {
conn = NULL;
}
@@ -1594,7 +1593,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
}
if (cp->addr.type == BDADDR_BREDR)
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
+ conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
+ &cp->addr.bdaddr);
else
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
@@ -1611,7 +1611,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
}
dc.handle = cpu_to_le16(conn->handle);
- dc.reason = 0x13; /* Remote User Terminated Connection */
+ dc.reason = HCI_ERROR_REMOTE_USER_TERM;
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
if (err < 0)
@@ -1667,7 +1667,7 @@ static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
}
rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
- rp = kmalloc(rp_len, GFP_ATOMIC);
+ rp = kmalloc(rp_len, GFP_KERNEL);
if (!rp) {
err = -ENOMEM;
goto unlock;
@@ -1778,29 +1778,6 @@ failed:
return err;
}
-static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
-{
- struct mgmt_cp_pin_code_neg_reply *cp = data;
- int err;
-
- BT_DBG("");
-
- hci_dev_lock(hdev);
-
- if (!hdev_is_powered(hdev)) {
- err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
- MGMT_STATUS_NOT_POWERED);
- goto failed;
- }
-
- err = send_pin_code_neg_reply(sk, hdev, cp);
-
-failed:
- hci_dev_unlock(hdev);
- return err;
-}
-
static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -1813,7 +1790,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
hdev->io_capability = cp->io_capability;
BT_DBG("%s IO capability set to 0x%02x", hdev->name,
- hdev->io_capability);
+ hdev->io_capability);
hci_dev_unlock(hdev);
@@ -1821,7 +1798,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
0);
}
-static inline struct pending_cmd *find_pairing(struct hci_conn *conn)
+static struct pending_cmd *find_pairing(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
struct pending_cmd *cmd;
@@ -1927,8 +1904,15 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
rp.addr.type = cp->addr.type;
if (IS_ERR(conn)) {
+ int status;
+
+ if (PTR_ERR(conn) == -EBUSY)
+ status = MGMT_STATUS_BUSY;
+ else
+ status = MGMT_STATUS_CONNECT_FAILED;
+
err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
- MGMT_STATUS_CONNECT_FAILED, &rp,
+ status, &rp,
sizeof(rp));
goto unlock;
}
@@ -1959,7 +1943,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
cmd->user_data = conn;
if (conn->state == BT_CONNECTED &&
- hci_conn_security(conn, sec_level, auth_type))
+ hci_conn_security(conn, sec_level, auth_type))
pairing_complete(cmd, 0);
err = 0;
@@ -2076,6 +2060,18 @@ done:
return err;
}
+static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_pin_code_neg_reply *cp = data;
+
+ BT_DBG("");
+
+ return user_pairing_resp(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
+ MGMT_OP_PIN_CODE_NEG_REPLY,
+ HCI_OP_PIN_CODE_NEG_REPLY, 0);
+}
+
static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
u16 len)
{
@@ -2256,7 +2252,7 @@ unlock:
}
static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
- void *data, u16 len)
+ void *data, u16 len)
{
struct mgmt_cp_remove_remote_oob_data *cp = data;
u8 status;
@@ -2425,7 +2421,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
case DISCOVERY_RESOLVING:
e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
- NAME_PENDING);
+ NAME_PENDING);
if (!e) {
mgmt_pending_remove(cmd);
err = cmd_complete(sk, hdev->id,
@@ -2600,8 +2596,8 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
if (cp->val) {
type = PAGE_SCAN_TYPE_INTERLACED;
- /* 22.5 msec page scan interval */
- acp.interval = __constant_cpu_to_le16(0x0024);
+ /* 160 msec page scan interval */
+ acp.interval = __constant_cpu_to_le16(0x0100);
} else {
type = PAGE_SCAN_TYPE_STANDARD; /* default */
@@ -2647,7 +2643,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
sizeof(struct mgmt_ltk_info);
if (expected_len != len) {
BT_ERR("load_keys: expected %u bytes, got %u bytes",
- len, expected_len);
+ len, expected_len);
return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
EINVAL);
}
@@ -2772,7 +2768,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
- mgmt_handlers[opcode].func == NULL) {
+ mgmt_handlers[opcode].func == NULL) {
BT_DBG("Unknown op %u", opcode);
err = cmd_status(sk, index, opcode,
MGMT_STATUS_UNKNOWN_COMMAND);
@@ -2780,7 +2776,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
}
if ((hdev && opcode < MGMT_OP_READ_INFO) ||
- (!hdev && opcode >= MGMT_OP_READ_INFO)) {
+ (!hdev && opcode >= MGMT_OP_READ_INFO)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_INDEX);
goto done;
@@ -2789,7 +2785,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
handler = &mgmt_handlers[opcode];
if ((handler->var_len && len < handler->data_len) ||
- (!handler->var_len && len != handler->data_len)) {
+ (!handler->var_len && len != handler->data_len)) {
err = cmd_status(sk, index, opcode,
MGMT_STATUS_INVALID_PARAMS);
goto done;
@@ -2973,7 +2969,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
ev.key.addr.type = BDADDR_BREDR;
ev.key.type = key->type;
- memcpy(ev.key.val, key->val, 16);
+ memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
ev.key.pin_len = key->pin_len;
return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
@@ -3108,7 +3104,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
mgmt_pending_remove(cmd);
mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
- hdev);
+ hdev);
return err;
}
@@ -3198,7 +3194,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type)
+ u8 link_type, u8 addr_type)
{
struct mgmt_ev_user_passkey_request ev;
@@ -3212,8 +3208,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
- u8 link_type, u8 addr_type, u8 status,
- u8 opcode)
+ u8 link_type, u8 addr_type, u8 status,
+ u8 opcode)
{
struct pending_cmd *cmd;
struct mgmt_rp_user_confirm_reply rp;
@@ -3244,7 +3240,8 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_CONFIRM_NEG_REPLY);
+ status,
+ MGMT_OP_USER_CONFIRM_NEG_REPLY);
}
int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
@@ -3258,7 +3255,8 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 link_type, u8 addr_type, u8 status)
{
return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
- status, MGMT_OP_USER_PASSKEY_NEG_REPLY);
+ status,
+ MGMT_OP_USER_PASSKEY_NEG_REPLY);
}
int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
@@ -3537,9 +3535,9 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
ev->addr.type = link_to_bdaddr(link_type, addr_type);
ev->rssi = rssi;
if (cfm_name)
- ev->flags[0] |= MGMT_DEV_FOUND_CONFIRM_NAME;
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
if (!ssp)
- ev->flags[0] |= MGMT_DEV_FOUND_LEGACY_PAIRING;
+ ev->flags |= cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
if (eir_len > 0)
memcpy(ev->eir, eir, eir_len);
@@ -3549,7 +3547,6 @@ int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
dev_class, 3);
ev->eir_len = cpu_to_le16(eir_len);
-
ev_size = sizeof(*ev) + eir_len;
return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 8a602388f1e7..c75107ef8920 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -26,22 +26,8 @@
*/
#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/net.h>
-#include <linux/mutex.h>
#include <linux/kthread.h>
-#include <linux/slab.h>
-
-#include <net/sock.h>
-#include <linux/uaccess.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -115,14 +101,14 @@ static void rfcomm_session_del(struct rfcomm_session *s);
#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1)
#define __get_rpn_parity(line) (((line) >> 3) & 0x7)
-static inline void rfcomm_schedule(void)
+static void rfcomm_schedule(void)
{
if (!rfcomm_thread)
return;
wake_up_process(rfcomm_thread);
}
-static inline void rfcomm_session_put(struct rfcomm_session *s)
+static void rfcomm_session_put(struct rfcomm_session *s)
{
if (atomic_dec_and_test(&s->refcnt))
rfcomm_session_del(s);
@@ -227,7 +213,7 @@ static int rfcomm_l2sock_create(struct socket **sock)
return err;
}
-static inline int rfcomm_check_security(struct rfcomm_dlc *d)
+static int rfcomm_check_security(struct rfcomm_dlc *d)
{
struct sock *sk = d->session->sock->sk;
struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
@@ -1750,7 +1736,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s)
/* Send data queued for the DLC.
* Return number of frames left in the queue.
*/
-static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
+static int rfcomm_process_tx(struct rfcomm_dlc *d)
{
struct sk_buff *skb;
int err;
@@ -1798,7 +1784,7 @@ static inline int rfcomm_process_tx(struct rfcomm_dlc *d)
return skb_queue_len(&d->tx_queue);
}
-static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
+static void rfcomm_process_dlcs(struct rfcomm_session *s)
{
struct rfcomm_dlc *d;
struct list_head *p, *n;
@@ -1858,7 +1844,7 @@ static inline void rfcomm_process_dlcs(struct rfcomm_session *s)
}
}
-static inline void rfcomm_process_rx(struct rfcomm_session *s)
+static void rfcomm_process_rx(struct rfcomm_session *s)
{
struct socket *sock = s->sock;
struct sock *sk = sock->sk;
@@ -1883,7 +1869,7 @@ static inline void rfcomm_process_rx(struct rfcomm_session *s)
}
}
-static inline void rfcomm_accept_connection(struct rfcomm_session *s)
+static void rfcomm_accept_connection(struct rfcomm_session *s)
{
struct socket *sock = s->sock, *nsock;
int err;
@@ -1917,7 +1903,7 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
sock_release(nsock);
}
-static inline void rfcomm_check_connection(struct rfcomm_session *s)
+static void rfcomm_check_connection(struct rfcomm_session *s)
{
struct sock *sk = s->sock->sk;
@@ -1941,7 +1927,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
}
}
-static inline void rfcomm_process_sessions(void)
+static void rfcomm_process_sessions(void)
{
struct list_head *p, *n;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index e8707debb864..7e1e59645c05 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -25,27 +25,8 @@
* RFCOMM sockets.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/list.h>
-#include <linux/device.h>
+#include <linux/export.h>
#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/security.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index d1820ff14aee..cb960773c002 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -31,11 +31,6 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
-#include <linux/capability.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/workqueue.h>
-
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/rfcomm.h>
@@ -132,7 +127,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id)
return NULL;
}
-static inline struct rfcomm_dev *rfcomm_dev_get(int id)
+static struct rfcomm_dev *rfcomm_dev_get(int id)
{
struct rfcomm_dev *dev;
@@ -345,7 +340,7 @@ static void rfcomm_wfree(struct sk_buff *skb)
tty_port_put(&dev->port);
}
-static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
+static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev)
{
tty_port_get(&dev->port);
atomic_add(skb->truesize, &dev->wmem_alloc);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cbdd313659a7..40bbe25dcff7 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -25,26 +25,8 @@
/* Bluetooth SCO sockets. */
#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/fcntl.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/list.h>
-#include <linux/security.h>
-#include <net/sock.h>
-
-#include <linux/uaccess.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -123,7 +105,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon)
return conn;
}
-static inline struct sock *sco_chan_get(struct sco_conn *conn)
+static struct sock *sco_chan_get(struct sco_conn *conn)
{
struct sock *sk = NULL;
sco_conn_lock(conn);
@@ -157,7 +139,8 @@ static int sco_conn_del(struct hci_conn *hcon, int err)
return 0;
}
-static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent)
+static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
+ struct sock *parent)
{
int err = 0;
@@ -228,7 +211,7 @@ done:
return err;
}
-static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
+static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
{
struct sco_conn *conn = sco_pi(sk)->conn;
struct sk_buff *skb;
@@ -254,7 +237,7 @@ static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len)
return len;
}
-static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
+static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
struct sock *sk = sco_chan_get(conn);
@@ -523,7 +506,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen
goto done;
err = bt_sock_wait_state(sk, BT_CONNECTED,
- sock_sndtimeo(sk, flags & O_NONBLOCK));
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
@@ -788,7 +771,7 @@ static int sco_sock_shutdown(struct socket *sock, int how)
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ sk->sk_lingertime);
}
release_sock(sk);
return err;
@@ -878,7 +861,7 @@ static void sco_conn_ready(struct sco_conn *conn)
bh_lock_sock(parent);
sk = sco_sock_alloc(sock_net(parent), NULL,
- BTPROTO_SCO, GFP_ATOMIC);
+ BTPROTO_SCO, GFP_ATOMIC);
if (!sk) {
bh_unlock_sock(parent);
goto done;
@@ -907,7 +890,7 @@ done:
/* ----- SCO interface with lower layer (HCI) ----- */
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
{
- register struct sock *sk;
+ struct sock *sk;
struct hlist_node *node;
int lm = 0;
@@ -920,7 +903,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
continue;
if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) ||
- !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
+ !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
lm |= HCI_LM_ACCEPT;
break;
}
@@ -981,7 +964,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p)
sk_for_each(sk, node, &sco_sk_list.head) {
seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src),
- batostr(&bt_sk(sk)->dst), sk->sk_state);
+ batostr(&bt_sk(sk)->dst), sk->sk_state);
}
read_unlock(&sco_sk_list.lock);
@@ -1044,8 +1027,8 @@ int __init sco_init(void)
}
if (bt_debugfs) {
- sco_debugfs = debugfs_create_file("sco", 0444,
- bt_debugfs, NULL, &sco_debugfs_fops);
+ sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs,
+ NULL, &sco_debugfs_fops);
if (!sco_debugfs)
BT_ERR("Failed to create SCO debug file");
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 37df4e9b3896..16ef0dc85a0a 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -20,14 +20,15 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+#include <crypto/b128ops.h>
+
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
#include <net/bluetooth/smp.h>
-#include <linux/crypto.h>
-#include <linux/scatterlist.h>
-#include <crypto/b128ops.h>
#define SMP_TIMEOUT msecs_to_jiffies(30000)
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 929e48aed444..333484537600 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -127,9 +127,9 @@ static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
const struct br_cpu_netstats *bstats
= per_cpu_ptr(br->stats, cpu);
do {
- start = u64_stats_fetch_begin(&bstats->syncp);
+ start = u64_stats_fetch_begin_bh(&bstats->syncp);
memcpy(&tmp, bstats, sizeof(tmp));
- } while (u64_stats_fetch_retry(&bstats->syncp, start));
+ } while (u64_stats_fetch_retry_bh(&bstats->syncp, start));
sum.tx_bytes += tmp.tx_bytes;
sum.tx_packets += tmp.tx_packets;
sum.rx_bytes += tmp.rx_bytes;
@@ -246,10 +246,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
if (!np)
goto out;
- np->dev = p->dev;
- strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
-
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, p->dev);
if (err) {
kfree(np);
goto out;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b66581208cb2..241743417f49 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -540,10 +540,11 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
if (mdb->size >= max) {
max *= 2;
- if (unlikely(max >= br->hash_max)) {
- br_warn(br, "Multicast hash table maximum "
- "reached, disabling snooping: %s, %d\n",
- port ? port->dev->name : br->dev->name, max);
+ if (unlikely(max > br->hash_max)) {
+ br_warn(br, "Multicast hash table maximum of %d "
+ "reached, disabling snooping: %s\n",
+ br->hash_max,
+ port ? port->dev->name : br->dev->name);
err = -E2BIG;
disable:
br->multicast_disabled = 1;
@@ -1160,7 +1161,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
goto out;
}
mld = (struct mld_msg *) icmp6_hdr(skb);
- max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
+ max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
} else if (skb->len >= sizeof(*mld2q)) {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e41456bd3cc6..68e8f364bbf8 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -111,7 +111,13 @@ static inline __be16 pppoe_proto(const struct sk_buff *skb)
pppoe_proto(skb) == htons(PPP_IPV6) && \
brnf_filter_pppoe_tagged)
-static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void fake_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -120,7 +126,9 @@ static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
return NULL;
}
-static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
return NULL;
}
@@ -134,6 +142,7 @@ static struct dst_ops fake_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.update_pmtu = fake_update_pmtu,
+ .redirect = fake_redirect,
.cow_metrics = fake_cow_metrics,
.neigh_lookup = fake_neigh_lookup,
.mtu = fake_mtu,
@@ -373,19 +382,29 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
if (!skb->dev)
goto free_skb;
dst = skb_dst(skb);
- neigh = dst_get_neighbour_noref(dst);
- if (neigh->hh.hh_len) {
- neigh_hh_bridge(&neigh->hh, skb);
- skb->dev = nf_bridge->physindev;
- return br_handle_frame_finish(skb);
- } else {
- /* the neighbour function below overwrites the complete
- * MAC header, so we save the Ethernet source address and
- * protocol number. */
- skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
- /* tell br_dev_xmit to continue with forwarding */
- nf_bridge->mask |= BRNF_BRIDGED_DNAT;
- return neigh->output(neigh, skb);
+ neigh = dst_neigh_lookup_skb(dst, skb);
+ if (neigh) {
+ int ret;
+
+ if (neigh->hh.hh_len) {
+ neigh_hh_bridge(&neigh->hh, skb);
+ skb->dev = nf_bridge->physindev;
+ ret = br_handle_frame_finish(skb);
+ } else {
+ /* the neighbour function below overwrites the complete
+ * MAC header, so we save the Ethernet source address and
+ * protocol number.
+ */
+ skb_copy_from_linear_data_offset(skb,
+ -(ETH_HLEN-ETH_ALEN),
+ skb->nf_bridge->data,
+ ETH_HLEN-ETH_ALEN);
+ /* tell br_dev_xmit to continue with forwarding */
+ nf_bridge->mask |= BRNF_BRIDGED_DNAT;
+ ret = neigh->output(neigh, skb);
+ }
+ neigh_release(neigh);
+ return ret;
}
free_skb:
kfree_skb(skb);
@@ -764,9 +783,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
- pf = PF_INET;
+ pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
- pf = PF_INET6;
+ pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@@ -778,13 +797,13 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
nf_bridge->mask |= BRNF_PKT_TYPE;
}
- if (pf == PF_INET && br_parse_ip_options(skb))
+ if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
return NF_DROP;
/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
- if (pf == PF_INET)
+ if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
@@ -871,9 +890,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
return NF_DROP;
if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
- pf = PF_INET;
+ pf = NFPROTO_IPV4;
else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
- pf = PF_INET6;
+ pf = NFPROTO_IPV6;
else
return NF_ACCEPT;
@@ -886,7 +905,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header(skb);
nf_bridge_save_header(skb);
- if (pf == PF_INET)
+ if (pf == NFPROTO_IPV4)
skb->protocol = htons(ETH_P_IP);
else
skb->protocol = htons(ETH_P_IPV6);
@@ -919,49 +938,49 @@ static struct nf_hook_ops br_nf_ops[] __read_mostly = {
{
.hook = br_nf_pre_routing,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_local_in,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_forward_ip,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF - 1,
},
{
.hook = br_nf_forward_arp,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_BRNF,
},
{
.hook = br_nf_post_routing,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_LAST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_FIRST,
},
{
.hook = ip_sabotage_in,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
},
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 5449294bdd5e..19063473c71f 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -145,19 +145,24 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
if (!ub->skb) {
if (!(ub->skb = ulog_alloc_skb(size)))
- goto alloc_failure;
+ goto unlock;
} else if (size > skb_tailroom(ub->skb)) {
ulog_send(group);
if (!(ub->skb = ulog_alloc_skb(size)))
- goto alloc_failure;
+ goto unlock;
}
- nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, 0,
- size - NLMSG_ALIGN(sizeof(*nlh)));
+ nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
+ size - NLMSG_ALIGN(sizeof(*nlh)), 0);
+ if (!nlh) {
+ kfree_skb(ub->skb);
+ ub->skb = NULL;
+ goto unlock;
+ }
ub->qlen++;
- pm = NLMSG_DATA(nlh);
+ pm = nlmsg_data(nlh);
/* Fill in the ulog data */
pm->version = EBT_ULOG_VERSION;
@@ -209,14 +214,6 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
unlock:
spin_unlock_bh(lock);
-
- return;
-
-nlmsg_failure:
- pr_debug("error during NLMSG_PUT. This should "
- "not happen, please report to author.\n");
-alloc_failure:
- goto unlock;
}
/* this function is registered with the netfilter core */
@@ -285,6 +282,9 @@ static int __init ebt_ulog_init(void)
{
int ret;
int i;
+ struct netlink_kernel_cfg cfg = {
+ .groups = EBT_ULOG_MAXNLGROUPS,
+ };
if (nlbufsiz >= 128*1024) {
pr_warning("Netlink buffer has to be <= 128kB,"
@@ -299,8 +299,7 @@ static int __init ebt_ulog_init(void)
}
ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
- EBT_ULOG_MAXNLGROUPS, NULL, NULL,
- THIS_MODULE);
+ THIS_MODULE, &cfg);
if (!ebtulognl)
ret = -ENOMEM;
else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 554b31289607..1ae1d9cb278d 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -90,11 +90,8 @@ static int caifd_refcnt_read(struct caif_device_entry *e)
/* Allocate new CAIF device. */
static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
{
- struct caif_device_entry_list *caifdevs;
struct caif_device_entry *caifd;
- caifdevs = caif_device_list(dev_net(dev));
-
caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
if (!caifd)
return NULL;
@@ -131,6 +128,11 @@ void caif_flow_cb(struct sk_buff *skb)
rcu_read_lock();
caifd = caif_get(skb->dev);
+
+ WARN_ON(caifd == NULL);
+ if (caifd == NULL)
+ return;
+
caifd_hold(caifd);
rcu_read_unlock();
@@ -561,9 +563,9 @@ static int __init caif_device_init(void)
static void __exit caif_device_exit(void)
{
- unregister_pernet_subsys(&caif_net_ops);
unregister_netdevice_notifier(&caif_device_notifier);
dev_remove_pack(&caif_packet_type);
+ unregister_pernet_subsys(&caif_net_ops);
}
module_init(caif_device_init);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 047cd0eec022..44f270fc2d06 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -175,15 +175,17 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
{
+ struct cfpkt *pkt;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt)
- return;
+
if (!dn) {
pr_debug("not able to send enum request\n");
return;
}
+ pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt)
+ return;
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
init_info(cfpkt_info(pkt), cfctrl);
cfpkt_info(pkt)->dev_info->id = physlinkid;
@@ -302,18 +304,17 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cflayer *client)
{
int ret;
+ struct cfpkt *pkt;
struct cfctrl *cfctrl = container_obj(layer);
- struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
struct cflayer *dn = cfctrl->serv.layer.dn;
- if (!pkt)
- return -ENOMEM;
-
if (!dn) {
pr_debug("not able to send link-down request\n");
return -ENODEV;
}
-
+ pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt)
+ return -ENOMEM;
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
cfpkt_addbdy(pkt, channelid);
init_info(cfpkt_info(pkt), cfctrl);
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 0ce2ad0696da..821022a7214f 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -41,6 +41,7 @@
*/
#include <linux/module.h>
+#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/slab.h>
@@ -220,30 +221,46 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
* -ENOBUFS on full driver queue (see net_xmit_errno())
* -ENOMEM when local loopback failed at calling skb_clone()
* -EPERM when trying to send on a non-CAN interface
+ * -EMSGSIZE CAN frame size is bigger than CAN interface MTU
* -EINVAL when the skb->data does not contain a valid CAN frame
*/
int can_send(struct sk_buff *skb, int loop)
{
struct sk_buff *newskb = NULL;
- struct can_frame *cf = (struct can_frame *)skb->data;
- int err;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ int err = -EINVAL;
+
+ if (skb->len == CAN_MTU) {
+ skb->protocol = htons(ETH_P_CAN);
+ if (unlikely(cfd->len > CAN_MAX_DLEN))
+ goto inval_skb;
+ } else if (skb->len == CANFD_MTU) {
+ skb->protocol = htons(ETH_P_CANFD);
+ if (unlikely(cfd->len > CANFD_MAX_DLEN))
+ goto inval_skb;
+ } else
+ goto inval_skb;
- if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
- kfree_skb(skb);
- return -EINVAL;
+ /*
+ * Make sure the CAN frame can pass the selected CAN netdevice.
+ * As structs can_frame and canfd_frame are similar, we can provide
+ * CAN FD frames to legacy CAN drivers as long as the length is <= 8
+ */
+ if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) {
+ err = -EMSGSIZE;
+ goto inval_skb;
}
- if (skb->dev->type != ARPHRD_CAN) {
- kfree_skb(skb);
- return -EPERM;
+ if (unlikely(skb->dev->type != ARPHRD_CAN)) {
+ err = -EPERM;
+ goto inval_skb;
}
- if (!(skb->dev->flags & IFF_UP)) {
- kfree_skb(skb);
- return -ENETDOWN;
+ if (unlikely(!(skb->dev->flags & IFF_UP))) {
+ err = -ENETDOWN;
+ goto inval_skb;
}
- skb->protocol = htons(ETH_P_CAN);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -300,6 +317,10 @@ int can_send(struct sk_buff *skb, int loop)
can_stats.tx_frames_delta++;
return 0;
+
+inval_skb:
+ kfree_skb(skb);
+ return err;
}
EXPORT_SYMBOL(can_send);
@@ -334,8 +355,8 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
* relevant bits for the filter.
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
- * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
- * there is a special filterlist and a special rx path filter handling.
+ * filter for error messages (CAN_ERR_FLAG bit set in mask). For error msg
+ * frames there is a special filterlist and a special rx path filter handling.
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
@@ -347,7 +368,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
- /* filter for error frames in extra filterlist */
+ /* filter for error message frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) {
/* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
@@ -408,7 +429,7 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* <received_can_id> & mask == can_id & mask
*
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
- * filter for error frames (CAN_ERR_FLAG bit set in mask).
+ * filter for error message frames (CAN_ERR_FLAG bit set in mask).
*
* The provided pointer to the sk_buff is guaranteed to be valid as long as
* the callback function is running. The callback function must *not* free
@@ -578,7 +599,7 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return 0;
if (can_id & CAN_ERR_FLAG) {
- /* check for error frame entries only */
+ /* check for error message frame entries only */
hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
if (can_id & r->mask) {
deliver(skb, r);
@@ -632,24 +653,11 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
return matches;
}
-static int can_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev)
+static void can_receive(struct sk_buff *skb, struct net_device *dev)
{
struct dev_rcv_lists *d;
- struct can_frame *cf = (struct can_frame *)skb->data;
int matches;
- if (!net_eq(dev_net(dev), &init_net))
- goto drop;
-
- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
- skb->len != sizeof(struct can_frame) ||
- cf->can_dlc > 8,
- "PF_CAN: dropped non conform skbuf: "
- "dev type %d, len %d, can_dlc %d\n",
- dev->type, skb->len, cf->can_dlc))
- goto drop;
-
/* update statistics */
can_stats.rx_frames++;
can_stats.rx_frames_delta++;
@@ -673,7 +681,49 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
can_stats.matches++;
can_stats.matches_delta++;
}
+}
+static int can_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ goto drop;
+
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != CAN_MTU ||
+ cfd->len > CAN_MAX_DLEN,
+ "PF_CAN: dropped non conform CAN skbuf: "
+ "dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len))
+ goto drop;
+
+ can_receive(skb, dev);
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (unlikely(!net_eq(dev_net(dev), &init_net)))
+ goto drop;
+
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != CANFD_MTU ||
+ cfd->len > CANFD_MAX_DLEN,
+ "PF_CAN: dropped non conform CAN FD skbuf: "
+ "dev type %d, len %d, datalen %d\n",
+ dev->type, skb->len, cfd->len))
+ goto drop;
+
+ can_receive(skb, dev);
return NET_RX_SUCCESS;
drop:
@@ -807,10 +857,14 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
static struct packet_type can_packet __read_mostly = {
.type = cpu_to_be16(ETH_P_CAN),
- .dev = NULL,
.func = can_rcv,
};
+static struct packet_type canfd_packet __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CANFD),
+ .func = canfd_rcv,
+};
+
static const struct net_proto_family can_family_ops = {
.family = PF_CAN,
.create = can_create,
@@ -824,6 +878,12 @@ static struct notifier_block can_netdev_notifier __read_mostly = {
static __init int can_init(void)
{
+ /* check for correct padding to be able to use the structs similarly */
+ BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
+ offsetof(struct canfd_frame, len) ||
+ offsetof(struct can_frame, data) !=
+ offsetof(struct canfd_frame, data));
+
printk(banner);
memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
@@ -846,6 +906,7 @@ static __init int can_init(void)
sock_register(&can_family_ops);
register_netdevice_notifier(&can_netdev_notifier);
dev_add_pack(&can_packet);
+ dev_add_pack(&canfd_packet);
return 0;
}
@@ -860,6 +921,7 @@ static __exit void can_exit(void)
can_remove_proc();
/* protocol unregister */
+ dev_remove_pack(&canfd_packet);
dev_remove_pack(&can_packet);
unregister_netdevice_notifier(&can_netdev_notifier);
sock_unregister(PF_CAN);
diff --git a/net/can/af_can.h b/net/can/af_can.h
index fd882dbadad3..1dccb4c33894 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -104,6 +104,9 @@ struct s_pstats {
unsigned long rcv_entries_max;
};
+/* receive filters subscribed for 'all' CAN devices */
+extern struct dev_rcv_lists can_rx_alldev_list;
+
/* function prototypes for the CAN networklayer procfs (proc.c) */
extern void can_init_proc(void);
extern void can_remove_proc(void);
diff --git a/net/can/gw.c b/net/can/gw.c
index b41acf25668f..b54d5e695b03 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -444,11 +444,14 @@ static int cgw_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
-static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
+static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ u32 pid, u32 seq, int flags)
{
struct cgw_frame_mod mb;
struct rtcanmsg *rtcan;
- struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
if (!nlh)
return -EMSGSIZE;
@@ -462,15 +465,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
if (gwj->handled_frames) {
if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
if (gwj->dropped_frames) {
if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
/* check non default settings of attributes */
@@ -480,8 +479,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.or) {
@@ -489,8 +486,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.xor) {
@@ -498,8 +493,6 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.set) {
@@ -507,26 +500,18 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
mb.modtype = gwj->mod.modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
&gwj->mod.csum.crc8) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + \
- NLA_ALIGN(CGW_CS_CRC8_LEN);
}
if (gwj->mod.csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
&gwj->mod.csum.xor) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + \
- NLA_ALIGN(CGW_CS_XOR_LEN);
}
if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
@@ -535,23 +520,16 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
&gwj->ccgw.filter) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN +
- NLA_ALIGN(sizeof(struct can_filter));
}
if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
goto cancel;
- else
- nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
- return skb->len;
+ return nlmsg_end(skb, nlh);
cancel:
nlmsg_cancel(skb, nlh);
@@ -571,7 +549,8 @@ static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
if (idx < s_idx)
goto cont;
- if (cgw_put_job(skb, gwj) < 0)
+ if (cgw_put_job(skb, gwj, RTM_NEWROUTE, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
break;
cont:
idx++;
@@ -583,6 +562,18 @@ cont:
return skb->len;
}
+static const struct nla_policy cgw_policy[CGW_MAX+1] = {
+ [CGW_MOD_AND] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_OR] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_XOR] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_MOD_SET] = { .len = sizeof(struct cgw_frame_mod) },
+ [CGW_CS_XOR] = { .len = sizeof(struct cgw_csum_xor) },
+ [CGW_CS_CRC8] = { .len = sizeof(struct cgw_csum_crc8) },
+ [CGW_SRC_IF] = { .type = NLA_U32 },
+ [CGW_DST_IF] = { .type = NLA_U32 },
+ [CGW_FILTER] = { .len = sizeof(struct can_filter) },
+};
+
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
u8 gwtype, void *gwtypeattr)
@@ -595,14 +586,14 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
/* initialize modification & checksum data space */
memset(mod, 0, sizeof(*mod));
- err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
+ err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX,
+ cgw_policy);
if (err < 0)
return err;
/* check for AND/OR/XOR/SET modifications */
- if (tb[CGW_MOD_AND] &&
- nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_AND]) {
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.and, &mb.cf);
@@ -618,8 +609,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_and_data;
}
- if (tb[CGW_MOD_OR] &&
- nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_OR]) {
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.or, &mb.cf);
@@ -635,8 +625,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_or_data;
}
- if (tb[CGW_MOD_XOR] &&
- nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_XOR]) {
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.xor, &mb.cf);
@@ -652,8 +641,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->modfunc[modidx++] = mod_xor_data;
}
- if (tb[CGW_MOD_SET] &&
- nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
+ if (tb[CGW_MOD_SET]) {
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.set, &mb.cf);
@@ -672,11 +660,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
/* check for checksum operations after CAN frame modifications */
if (modidx) {
- if (tb[CGW_CS_CRC8] &&
- nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
-
- struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
- nla_data(tb[CGW_CS_CRC8]);
+ if (tb[CGW_CS_CRC8]) {
+ struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
@@ -699,11 +684,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
}
- if (tb[CGW_CS_XOR] &&
- nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
-
- struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
- nla_data(tb[CGW_CS_XOR]);
+ if (tb[CGW_CS_XOR]) {
+ struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
@@ -735,8 +717,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
memset(ccgw, 0, sizeof(*ccgw));
/* check for can_filter in attributes */
- if (tb[CGW_FILTER] &&
- nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
+ if (tb[CGW_FILTER])
nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
sizeof(struct can_filter));
@@ -746,13 +727,8 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
return err;
- if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
- nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
- sizeof(u32));
-
- if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
- nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
- sizeof(u32));
+ ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
+ ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
/* both indices set to 0 for flushing all routing entries */
if (!ccgw->src_idx && !ccgw->dst_idx)
diff --git a/net/can/proc.c b/net/can/proc.c
index ba873c36d2fd..3b6dd3180492 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -83,9 +83,6 @@ static const char rx_list_name[][8] = {
[RX_EFF] = "rx_eff",
};
-/* receive filters subscribed for 'all' CAN devices */
-extern struct dev_rcv_lists can_rx_alldev_list;
-
/*
* af_can statistics stuff
*/
diff --git a/net/can/raw.c b/net/can/raw.c
index 46cca3a91d19..3e9c89356a93 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -82,6 +82,7 @@ struct raw_sock {
struct notifier_block notifier;
int loopback;
int recv_own_msgs;
+ int fd_frames;
int count; /* number of active filters */
struct can_filter dfilter; /* default/single filter */
struct can_filter *filter; /* pointer to filter(s) */
@@ -119,6 +120,14 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!ro->recv_own_msgs && oskb->sk == sk)
return;
+ /* do not pass frames with DLC > 8 to a legacy socket */
+ if (!ro->fd_frames) {
+ struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
+
+ if (unlikely(cfd->len > CAN_MAX_DLEN))
+ return;
+ }
+
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
if (!skb)
@@ -291,6 +300,7 @@ static int raw_init(struct sock *sk)
/* set default loopback behaviour */
ro->loopback = 1;
ro->recv_own_msgs = 0;
+ ro->fd_frames = 0;
/* set notifier */
ro->notifier.notifier_call = raw_notifier;
@@ -569,6 +579,15 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
break;
+ case CAN_RAW_FD_FRAMES:
+ if (optlen != sizeof(ro->fd_frames))
+ return -EINVAL;
+
+ if (copy_from_user(&ro->fd_frames, optval, optlen))
+ return -EFAULT;
+
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -627,6 +646,12 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
val = &ro->recv_own_msgs;
break;
+ case CAN_RAW_FD_FRAMES:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = &ro->fd_frames;
+ break;
+
default:
return -ENOPROTOOPT;
}
@@ -662,8 +687,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
} else
ifindex = ro->ifindex;
- if (size != sizeof(struct can_frame))
- return -EINVAL;
+ if (ro->fd_frames) {
+ if (unlikely(size != CANFD_MTU && size != CAN_MTU))
+ return -EINVAL;
+ } else {
+ if (unlikely(size != CAN_MTU))
+ return -EINVAL;
+ }
dev = dev_get_by_index(&init_net, ifindex);
if (!dev)
@@ -705,7 +735,9 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
+ struct raw_sock *ro = raw_sk(sk);
struct sk_buff *skb;
+ int rxmtu;
int err = 0;
int noblock;
@@ -716,10 +748,20 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
return err;
- if (size < skb->len)
+ /*
+ * when serving a legacy socket the DLC <= 8 is already checked inside
+ * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
+ * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
+ */
+ if (!ro->fd_frames)
+ rxmtu = CAN_MTU;
+ else
+ rxmtu = skb->len;
+
+ if (size < rxmtu)
msg->msg_flags |= MSG_TRUNC;
else
- size = skb->len;
+ size = rxmtu;
err = memcpy_toiovec(msg->msg_iov, skb->data, size);
if (err < 0) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b332c3d76059..10255e81be79 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1423,7 +1423,7 @@ static int process_connect(struct ceph_connection *con)
* dropped messages.
*/
dout("process_connect got RESET peer seq %u\n",
- le32_to_cpu(con->in_connect.connect_seq));
+ le32_to_cpu(con->in_reply.connect_seq));
pr_err("%s%lld %s connection reset\n",
ENTITY_NAME(con->peer_name),
ceph_pr_addr(&con->peer_addr.in_addr));
@@ -1450,10 +1450,10 @@ static int process_connect(struct ceph_connection *con)
* If we sent a smaller connect_seq than the peer has, try
* again with a larger value.
*/
- dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
+ dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
le32_to_cpu(con->out_connect.connect_seq),
- le32_to_cpu(con->in_connect.connect_seq));
- con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
+ le32_to_cpu(con->in_reply.connect_seq));
+ con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
@@ -1468,9 +1468,9 @@ static int process_connect(struct ceph_connection *con)
*/
dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
con->peer_global_seq,
- le32_to_cpu(con->in_connect.global_seq));
+ le32_to_cpu(con->in_reply.global_seq));
get_global_seq(con->msgr,
- le32_to_cpu(con->in_connect.global_seq));
+ le32_to_cpu(con->in_reply.global_seq));
ceph_con_out_kvec_reset(con);
ret = prepare_write_connect(con);
if (ret < 0)
diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c
index 13cb409a7bba..665cd23020ff 100644
--- a/net/ceph/pagelist.c
+++ b/net/ceph/pagelist.c
@@ -72,8 +72,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
}
EXPORT_SYMBOL(ceph_pagelist_append);
-/**
- * Allocate enough pages for a pagelist to append the given amount
+/* Allocate enough pages for a pagelist to append the given amount
* of data without without allocating.
* Returns: 0 on success, -ENOMEM on error.
*/
@@ -95,9 +94,7 @@ int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
}
EXPORT_SYMBOL(ceph_pagelist_reserve);
-/**
- * Free any pages that have been preallocated.
- */
+/* Free any pages that have been preallocated. */
int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
{
while (!list_empty(&pl->free_list)) {
@@ -112,9 +109,7 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
-/**
- * Create a truncation point.
- */
+/* Create a truncation point. */
void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
@@ -124,8 +119,7 @@ void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
}
EXPORT_SYMBOL(ceph_pagelist_set_cursor);
-/**
- * Truncate a pagelist to the given point. Move extra pages to reserve.
+/* Truncate a pagelist to the given point. Move extra pages to reserve.
* This won't sleep.
* Returns: 0 on success,
* -EINVAL if the pagelist doesn't match the trunc point pagelist
diff --git a/net/compat.c b/net/compat.c
index 1b96281892de..74ed1d7a84a2 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -221,6 +221,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
{
struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
struct compat_cmsghdr cmhdr;
+ struct compat_timeval ctv;
+ struct compat_timespec cts[3];
int cmlen;
if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
@@ -229,8 +231,6 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
}
if (!COMPAT_USE_64BIT_TIME) {
- struct compat_timeval ctv;
- struct compat_timespec cts[3];
if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
struct timeval *tv = (struct timeval *)data;
ctv.tv_sec = tv->tv_sec;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index ae6acf6a3dea..0337e2b76862 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -248,7 +248,6 @@ void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
unlock_sock_fast(sk, slow);
/* skb is now orphaned, can be freed outside of locked section */
- trace_kfree_skb(skb, skb_free_datagram_locked);
__kfree_skb(skb);
}
EXPORT_SYMBOL(skb_free_datagram_locked);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6df214041a5e..0ebaea16632f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1136,8 +1136,8 @@ void dev_load(struct net *net, const char *name)
no_module = request_module("netdev-%s", name);
if (no_module && capable(CAP_SYS_MODULE)) {
if (!request_module("%s", name))
- pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
- name);
+ pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
+ name);
}
}
EXPORT_SYMBOL(dev_load);
@@ -1632,6 +1632,8 @@ static inline int deliver_skb(struct sk_buff *skb,
struct packet_type *pt_prev,
struct net_device *orig_dev)
{
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ return -ENOMEM;
atomic_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
@@ -1691,7 +1693,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
rcu_read_unlock();
}
-/* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
+/**
+ * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
* @dev: Network device
* @txq: number of queues available
*
@@ -1793,6 +1796,18 @@ int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
EXPORT_SYMBOL(netif_set_real_num_rx_queues);
#endif
+/**
+ * netif_get_num_default_rss_queues - default number of RSS queues
+ *
+ * This routine should set an upper limit on the number of RSS queues
+ * used by default by multiqueue devices.
+ */
+int netif_get_num_default_rss_queues(void)
+{
+ return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
+}
+EXPORT_SYMBOL(netif_get_num_default_rss_queues);
+
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
@@ -2444,8 +2459,12 @@ static void skb_update_prio(struct sk_buff *skb)
{
struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
- if ((!skb->priority) && (skb->sk) && map)
- skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
+ if (!skb->priority && skb->sk && map) {
+ unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
+
+ if (prioidx < map->priomap_len)
+ skb->priority = map->priomap[prioidx];
+ }
}
#else
#define skb_update_prio(skb)
@@ -2455,6 +2474,23 @@ static DEFINE_PER_CPU(int, xmit_recursion);
#define RECURSION_LIMIT 10
/**
+ * dev_loopback_xmit - loop back @skb
+ * @skb: buffer to transmit
+ */
+int dev_loopback_xmit(struct sk_buff *skb)
+{
+ skb_reset_mac_header(skb);
+ __skb_pull(skb, skb_network_offset(skb));
+ skb->pkt_type = PACKET_LOOPBACK;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ WARN_ON(!skb_dst(skb));
+ skb_dst_force(skb);
+ netif_rx_ni(skb);
+ return 0;
+}
+EXPORT_SYMBOL(dev_loopback_xmit);
+
+/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
*
@@ -3137,8 +3173,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (netpoll_receive_skb(skb))
return NET_RX_DROP;
- if (!skb->skb_iif)
- skb->skb_iif = skb->dev->ifindex;
orig_dev = skb->dev;
skb_reset_network_header(skb);
@@ -3150,6 +3184,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
rcu_read_lock();
another_round:
+ skb->skb_iif = skb->dev->ifindex;
__this_cpu_inc(softnet_data.processed);
@@ -3228,7 +3263,10 @@ ncls:
}
if (pt_prev) {
- ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+ ret = -ENOMEM;
+ else
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
@@ -5642,7 +5680,7 @@ int netdev_refcnt_read(const struct net_device *dev)
}
EXPORT_SYMBOL(netdev_refcnt_read);
-/*
+/**
* netdev_wait_allrefs - wait until all references are gone.
*
* This is called when unregistering network devices.
@@ -6279,7 +6317,8 @@ static struct hlist_head *netdev_create_hash(void)
/* Initialize per network namespace state */
static int __net_init netdev_init(struct net *net)
{
- INIT_LIST_HEAD(&net->dev_base_head);
+ if (net != &init_net)
+ INIT_LIST_HEAD(&net->dev_base_head);
net->dev_name_head = netdev_create_hash();
if (net->dev_name_head == NULL)
diff --git a/net/core/dst.c b/net/core/dst.c
index 43d94cedbf7c..069d51d29414 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -94,7 +94,7 @@ loop:
* But we do not have state "obsoleted, but
* referenced by parent", so it is right.
*/
- if (dst->obsolete > 1)
+ if (dst->obsolete > 0)
continue;
___dst_free(dst);
@@ -152,7 +152,7 @@ EXPORT_SYMBOL(dst_discard);
const u32 dst_default_metrics[RTAX_MAX];
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
- int initial_ref, int initial_obsolete, int flags)
+ int initial_ref, int initial_obsolete, unsigned short flags)
{
struct dst_entry *dst;
@@ -171,7 +171,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst_init_metrics(dst, dst_default_metrics, true);
dst->expires = 0UL;
dst->path = dst;
- RCU_INIT_POINTER(dst->_neighbour, NULL);
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
@@ -188,6 +187,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
+ dst->pending_confirm = 0;
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
@@ -202,7 +202,7 @@ static void ___dst_free(struct dst_entry *dst)
*/
if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
dst->input = dst->output = dst_discard;
- dst->obsolete = 2;
+ dst->obsolete = DST_OBSOLETE_DEAD;
}
void __dst_free(struct dst_entry *dst)
@@ -224,19 +224,12 @@ EXPORT_SYMBOL(__dst_free);
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
struct dst_entry *child;
- struct neighbour *neigh;
smp_rmb();
again:
- neigh = rcu_dereference_protected(dst->_neighbour, 1);
child = dst->child;
- if (neigh) {
- RCU_INIT_POINTER(dst->_neighbour, NULL);
- neigh_release(neigh);
- }
-
if (!(dst->flags & DST_NOCOUNT))
dst_entries_add(dst->ops, -1);
@@ -360,19 +353,9 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
if (!unregister) {
dst->input = dst->output = dst_discard;
} else {
- struct neighbour *neigh;
-
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
- rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
- if (neigh && neigh->dev == dev) {
- neigh->dev = dst->dev;
- dev_hold(dst->dev);
- dev_put(dev);
- }
- rcu_read_unlock();
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9c2afb480270..cbf033dcaf1f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -729,6 +729,40 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
return dev->ethtool_ops->set_wol(dev, &wol);
}
+static int ethtool_get_eee(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_eee edata;
+ int rc;
+
+ if (!dev->ethtool_ops->get_eee)
+ return -EOPNOTSUPP;
+
+ memset(&edata, 0, sizeof(struct ethtool_eee));
+ edata.cmd = ETHTOOL_GEEE;
+ rc = dev->ethtool_ops->get_eee(dev, &edata);
+
+ if (rc)
+ return rc;
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int ethtool_set_eee(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_eee edata;
+
+ if (!dev->ethtool_ops->set_eee)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_eee(dev, &edata);
+}
+
static int ethtool_nway_reset(struct net_device *dev)
{
if (!dev->ethtool_ops->nway_reset)
@@ -1409,6 +1443,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GSET:
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
+ case ETHTOOL_GLINK:
case ETHTOOL_GCOALESCE:
case ETHTOOL_GRINGPARAM:
case ETHTOOL_GPAUSEPARAM:
@@ -1417,6 +1452,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GSG:
case ETHTOOL_GSSET_INFO:
case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GSTATS:
case ETHTOOL_GTSO:
case ETHTOOL_GPERMADDR:
case ETHTOOL_GUFO:
@@ -1429,8 +1465,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_GRXCLSRLCNT:
case ETHTOOL_GRXCLSRULE:
case ETHTOOL_GRXCLSRLALL:
+ case ETHTOOL_GRXFHINDIR:
case ETHTOOL_GFEATURES:
+ case ETHTOOL_GCHANNELS:
case ETHTOOL_GET_TS_INFO:
+ case ETHTOOL_GEEE:
break;
default:
if (!capable(CAP_NET_ADMIN))
@@ -1471,6 +1510,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
rc = ethtool_set_value_void(dev, useraddr,
dev->ethtool_ops->set_msglevel);
break;
+ case ETHTOOL_GEEE:
+ rc = ethtool_get_eee(dev, useraddr);
+ break;
+ case ETHTOOL_SEEE:
+ rc = ethtool_set_eee(dev, useraddr);
+ break;
case ETHTOOL_NWAY_RST:
rc = ethtool_nway_reset(dev);
break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 72cceb79d0d4..ab7db83236c9 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -151,6 +151,8 @@ static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
list_del_rcu(&rule->list);
+ if (ops->delete)
+ ops->delete(rule);
fib_rule_put(rule);
}
}
@@ -499,6 +501,8 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
notify_rule_change(RTM_DELRULE, rule, ops, nlh,
NETLINK_CB(skb).pid);
+ if (ops->delete)
+ ops->delete(rule);
fib_rule_put(rule);
flush_route_cache(ops);
rules_ops_put(ops);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index a225089df5b6..466820b6e344 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -4,6 +4,7 @@
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <linux/if_tunnel.h>
#include <linux/if_pppox.h>
#include <linux/ppp_defs.h>
@@ -55,8 +56,8 @@ ipv6:
return false;
ip_proto = iph->nexthdr;
- flow->src = iph->saddr.s6_addr32[3];
- flow->dst = iph->daddr.s6_addr32[3];
+ flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
+ flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
nhoff += sizeof(struct ipv6hdr);
break;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index d81d026138f0..117afaf51268 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -474,8 +474,8 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
}
EXPORT_SYMBOL(neigh_lookup_nodev);
-struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
- struct net_device *dev)
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref)
{
u32 hash_val;
int key_len = tbl->key_len;
@@ -535,14 +535,16 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
- neigh_hold(n1);
+ if (want_ref)
+ neigh_hold(n1);
rc = n1;
goto out_tbl_unlock;
}
}
n->dead = 0;
- neigh_hold(n);
+ if (want_ref)
+ neigh_hold(n);
rcu_assign_pointer(n->next,
rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock)));
@@ -558,7 +560,7 @@ out_neigh_release:
neigh_release(n);
goto out;
}
-EXPORT_SYMBOL(neigh_create);
+EXPORT_SYMBOL(__neigh_create);
static u32 pneigh_hash(const void *pkey, int key_len)
{
@@ -1199,10 +1201,23 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
write_unlock_bh(&neigh->lock);
rcu_read_lock();
- /* On shaper/eql skb->dst->neighbour != neigh :( */
- if (dst && (n2 = dst_get_neighbour_noref(dst)) != NULL)
- n1 = n2;
+
+ /* Why not just use 'neigh' as-is? The problem is that
+ * things such as shaper, eql, and sch_teql can end up
+ * using alternative, different, neigh objects to output
+ * the packet in the output path. So what we need to do
+ * here is re-lookup the top-level neigh in the path so
+ * we can reinject the packet there.
+ */
+ n2 = NULL;
+ if (dst) {
+ n2 = dst_neigh_lookup_skb(dst, skb);
+ if (n2)
+ n1 = n2;
+ }
n1->output(n1, skb);
+ if (n2)
+ neigh_release(n2);
rcu_read_unlock();
write_lock_bh(&neigh->lock);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index fdf9e61d0651..72607174ea5a 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -417,72 +417,6 @@ static struct attribute_group netstat_group = {
.name = "statistics",
.attrs = netstat_attrs,
};
-
-#ifdef CONFIG_WIRELESS_EXT_SYSFS
-/* helper function that does all the locking etc for wireless stats */
-static ssize_t wireless_show(struct device *d, char *buf,
- ssize_t (*format)(const struct iw_statistics *,
- char *))
-{
- struct net_device *dev = to_net_dev(d);
- const struct iw_statistics *iw;
- ssize_t ret = -EINVAL;
-
- if (!rtnl_trylock())
- return restart_syscall();
- if (dev_isalive(dev)) {
- iw = get_wireless_stats(dev);
- if (iw)
- ret = (*format)(iw, buf);
- }
- rtnl_unlock();
-
- return ret;
-}
-
-/* show function template for wireless fields */
-#define WIRELESS_SHOW(name, field, format_string) \
-static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
-{ \
- return sprintf(buf, format_string, iw->field); \
-} \
-static ssize_t show_iw_##name(struct device *d, \
- struct device_attribute *attr, char *buf) \
-{ \
- return wireless_show(d, buf, format_iw_##name); \
-} \
-static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
-
-WIRELESS_SHOW(status, status, fmt_hex);
-WIRELESS_SHOW(link, qual.qual, fmt_dec);
-WIRELESS_SHOW(level, qual.level, fmt_dec);
-WIRELESS_SHOW(noise, qual.noise, fmt_dec);
-WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
-WIRELESS_SHOW(crypt, discard.code, fmt_dec);
-WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
-WIRELESS_SHOW(misc, discard.misc, fmt_dec);
-WIRELESS_SHOW(retries, discard.retries, fmt_dec);
-WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
-
-static struct attribute *wireless_attrs[] = {
- &dev_attr_status.attr,
- &dev_attr_link.attr,
- &dev_attr_level.attr,
- &dev_attr_noise.attr,
- &dev_attr_nwid.attr,
- &dev_attr_crypt.attr,
- &dev_attr_fragment.attr,
- &dev_attr_retries.attr,
- &dev_attr_misc.attr,
- &dev_attr_beacon.attr,
- NULL
-};
-
-static struct attribute_group wireless_group = {
- .name = "wireless",
- .attrs = wireless_attrs,
-};
-#endif
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_RPS
@@ -1463,14 +1397,6 @@ int netdev_register_kobject(struct net_device *net)
groups++;
*groups++ = &netstat_group;
-#ifdef CONFIG_WIRELESS_EXT_SYSFS
- if (net->ieee80211_ptr)
- *groups++ = &wireless_group;
-#ifdef CONFIG_WIRELESS_EXT
- else if (net->wireless_handlers)
- *groups++ = &wireless_group;
-#endif
-#endif
#endif /* CONFIG_SYSFS */
error = device_add(dev);
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index dddbacb8f28c..42f1e1c7514f 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -27,7 +27,9 @@ static DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
EXPORT_SYMBOL_GPL(net_namespace_list);
-struct net init_net;
+struct net init_net = {
+ .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
+};
EXPORT_SYMBOL(init_net);
#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index f9f40b932e4b..b4c90e42b443 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -715,14 +715,16 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
}
EXPORT_SYMBOL(netpoll_parse_options);
-int __netpoll_setup(struct netpoll *np)
+int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
{
- struct net_device *ndev = np->dev;
struct netpoll_info *npinfo;
const struct net_device_ops *ops;
unsigned long flags;
int err;
+ np->dev = ndev;
+ strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
+
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
!ndev->netdev_ops->ndo_poll_controller) {
np_err(np, "%s doesn't support polling, aborting\n",
@@ -851,13 +853,11 @@ int netpoll_setup(struct netpoll *np)
np_info(np, "local IP %pI4\n", &np->local_ip);
}
- np->dev = ndev;
-
/* fill up the skb queue */
refill_skbs();
rtnl_lock();
- err = __netpoll_setup(np);
+ err = __netpoll_setup(np, ndev);
rtnl_unlock();
if (err)
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 5b8aa2fae48b..ed0c0431fcd8 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -25,6 +25,8 @@
#include <net/sock.h>
#include <net/netprio_cgroup.h>
+#include <linux/fdtable.h>
+
#define PRIOIDX_SZ 128
static unsigned long prioidx_map[PRIOIDX_SZ];
@@ -49,8 +51,9 @@ static int get_prioidx(u32 *prio)
return -ENOSPC;
}
set_bit(prioidx, prioidx_map);
+ if (atomic_read(&max_prioidx) < prioidx)
+ atomic_set(&max_prioidx, prioidx);
spin_unlock_irqrestore(&prioidx_map_lock, flags);
- atomic_set(&max_prioidx, prioidx);
*prio = prioidx;
return 0;
}
@@ -64,7 +67,7 @@ static void put_prioidx(u32 idx)
spin_unlock_irqrestore(&prioidx_map_lock, flags);
}
-static void extend_netdev_table(struct net_device *dev, u32 new_len)
+static int extend_netdev_table(struct net_device *dev, u32 new_len)
{
size_t new_size = sizeof(struct netprio_map) +
((sizeof(u32) * new_len));
@@ -76,7 +79,7 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
if (!new_priomap) {
pr_warn("Unable to alloc new priomap!\n");
- return;
+ return -ENOMEM;
}
for (i = 0;
@@ -89,46 +92,79 @@ static void extend_netdev_table(struct net_device *dev, u32 new_len)
rcu_assign_pointer(dev->priomap, new_priomap);
if (old_priomap)
kfree_rcu(old_priomap, rcu);
+ return 0;
+}
+
+static int write_update_netdev_table(struct net_device *dev)
+{
+ int ret = 0;
+ u32 max_len;
+ struct netprio_map *map;
+
+ rtnl_lock();
+ max_len = atomic_read(&max_prioidx) + 1;
+ map = rtnl_dereference(dev->priomap);
+ if (!map || map->priomap_len < max_len)
+ ret = extend_netdev_table(dev, max_len);
+ rtnl_unlock();
+
+ return ret;
}
-static void update_netdev_tables(void)
+static int update_netdev_tables(void)
{
+ int ret = 0;
struct net_device *dev;
- u32 max_len = atomic_read(&max_prioidx) + 1;
+ u32 max_len;
struct netprio_map *map;
rtnl_lock();
+ max_len = atomic_read(&max_prioidx) + 1;
for_each_netdev(&init_net, dev) {
map = rtnl_dereference(dev->priomap);
- if ((!map) ||
- (map->priomap_len < max_len))
- extend_netdev_table(dev, max_len);
+ /*
+ * don't allocate priomap if we didn't
+ * change net_prio.ifpriomap (map == NULL),
+ * this will speed up skb_update_prio.
+ */
+ if (map && map->priomap_len < max_len) {
+ ret = extend_netdev_table(dev, max_len);
+ if (ret < 0)
+ break;
+ }
}
rtnl_unlock();
+ return ret;
}
static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
{
struct cgroup_netprio_state *cs;
- int ret;
+ int ret = -EINVAL;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
if (!cs)
return ERR_PTR(-ENOMEM);
- if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx) {
- kfree(cs);
- return ERR_PTR(-EINVAL);
- }
+ if (cgrp->parent && cgrp_netprio_state(cgrp->parent)->prioidx)
+ goto out;
ret = get_prioidx(&cs->prioidx);
- if (ret != 0) {
+ if (ret < 0) {
pr_warn("No space in priority index array\n");
- kfree(cs);
- return ERR_PTR(ret);
+ goto out;
+ }
+
+ ret = update_netdev_tables();
+ if (ret < 0) {
+ put_prioidx(cs->prioidx);
+ goto out;
}
return &cs->css;
+out:
+ kfree(cs);
+ return ERR_PTR(ret);
}
static void cgrp_destroy(struct cgroup *cgrp)
@@ -141,7 +177,7 @@ static void cgrp_destroy(struct cgroup *cgrp)
rtnl_lock();
for_each_netdev(&init_net, dev) {
map = rtnl_dereference(dev->priomap);
- if (map)
+ if (map && cs->prioidx < map->priomap_len)
map->priomap[cs->prioidx] = 0;
}
rtnl_unlock();
@@ -165,7 +201,7 @@ static int read_priomap(struct cgroup *cont, struct cftype *cft,
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev) {
map = rcu_dereference(dev->priomap);
- priority = map ? map->priomap[prioidx] : 0;
+ priority = (map && prioidx < map->priomap_len) ? map->priomap[prioidx] : 0;
cb->fill(cb, dev->name, priority);
}
rcu_read_unlock();
@@ -198,7 +234,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
/*
*Separate the devname from the associated priority
- *and advance the priostr poitner to the priority value
+ *and advance the priostr pointer to the priority value
*/
*priostr = '\0';
priostr++;
@@ -220,13 +256,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
if (!dev)
goto out_free_devname;
- update_netdev_tables();
- ret = 0;
+ ret = write_update_netdev_table(dev);
+ if (ret < 0)
+ goto out_put_dev;
+
rcu_read_lock();
map = rcu_dereference(dev->priomap);
if (map)
map->priomap[prioidx] = priority;
rcu_read_unlock();
+
+out_put_dev:
dev_put(dev);
out_free_devname:
@@ -234,6 +274,56 @@ out_free_devname:
return ret;
}
+void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+ struct task_struct *p;
+ char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
+
+ if (!tmp) {
+ pr_warn("Unable to attach cgrp due to alloc failure!\n");
+ return;
+ }
+
+ cgroup_taskset_for_each(p, cgrp, tset) {
+ unsigned int fd;
+ struct fdtable *fdt;
+ struct files_struct *files;
+
+ task_lock(p);
+ files = p->files;
+ if (!files) {
+ task_unlock(p);
+ continue;
+ }
+
+ rcu_read_lock();
+ fdt = files_fdtable(files);
+ for (fd = 0; fd < fdt->max_fds; fd++) {
+ char *path;
+ struct file *file;
+ struct socket *sock;
+ unsigned long s;
+ int rv, err = 0;
+
+ file = fcheck_files(files, fd);
+ if (!file)
+ continue;
+
+ path = d_path(&file->f_path, tmp, PAGE_SIZE);
+ rv = sscanf(path, "socket:[%lu]", &s);
+ if (rv <= 0)
+ continue;
+
+ sock = sock_from_file(file, &err);
+ if (!err)
+ sock_update_netprioidx(sock->sk, p);
+ }
+ rcu_read_unlock();
+ task_unlock(p);
+ }
+ kfree(tmp);
+}
+
static struct cftype ss_files[] = {
{
.name = "prioidx",
@@ -251,6 +341,7 @@ struct cgroup_subsys net_prio_subsys = {
.name = "net_prio",
.create = cgrp_create,
.destroy = cgrp_destroy,
+ .attach = net_prio_attach,
#ifdef CONFIG_NETPRIO_CGROUP
.subsys_id = net_prio_subsys_id,
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 21318d15bbc3..334b930e0de3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -541,19 +541,6 @@ static const int rta_max[RTM_NR_FAMILIES] =
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
};
-void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
-{
- struct rtattr *rta;
- int size = RTA_LENGTH(attrlen);
-
- rta = (struct rtattr *)skb_put(skb, RTA_ALIGN(size));
- rta->rta_type = attrtype;
- rta->rta_len = size;
- memcpy(RTA_DATA(rta), data, attrlen);
- memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
-}
-EXPORT_SYMBOL(__rta_fill);
-
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
{
struct sock *rtnl = net->rtnl;
@@ -628,7 +615,7 @@ nla_put_failure:
EXPORT_SYMBOL(rtnetlink_put_metrics);
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
- u32 ts, u32 tsage, long expires, u32 error)
+ long expires, u32 error)
{
struct rta_cacheinfo ci = {
.rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
@@ -636,8 +623,6 @@ int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
.rta_clntref = atomic_read(&(dst->__refcnt)),
.rta_error = error,
.rta_id = id,
- .rta_ts = ts,
- .rta_tsage = tsage,
};
if (expires)
@@ -786,6 +771,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ + nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ + nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask
@@ -904,6 +891,10 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
+ nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
+#ifdef CONFIG_RPS
+ nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
+#endif
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
(dev->master &&
@@ -1121,6 +1112,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 },
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
+ [IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1639,17 +1632,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
{
int err;
struct net_device *dev;
- unsigned int num_queues = 1;
+ unsigned int num_tx_queues = 1;
+ unsigned int num_rx_queues = 1;
- if (ops->get_tx_queues) {
- err = ops->get_tx_queues(src_net, tb);
- if (err < 0)
- goto err;
- num_queues = err;
- }
+ if (tb[IFLA_NUM_TX_QUEUES])
+ num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
+ else if (ops->get_num_tx_queues)
+ num_tx_queues = ops->get_num_tx_queues();
+
+ if (tb[IFLA_NUM_RX_QUEUES])
+ num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
+ else if (ops->get_num_rx_queues)
+ num_rx_queues = ops->get_num_rx_queues();
err = -ENOMEM;
- dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
+ dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
+ num_tx_queues, num_rx_queues);
if (!dev)
goto err;
@@ -2189,7 +2187,7 @@ skip:
}
/**
- * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
+ * ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
* @nlh: netlink message header
* @dev: netdevice
*
@@ -2366,8 +2364,13 @@ static struct notifier_block rtnetlink_dev_notifier = {
static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
- sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
- rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .groups = RTNLGRP_MAX,
+ .input = rtnetlink_rcv,
+ .cb_mutex = &rtnl_mutex,
+ };
+
+ sk = netlink_kernel_create(net, NETLINK_ROUTE, THIS_MODULE, &cfg);
if (!sk)
return -ENOMEM;
net->rtnl = sk;
diff --git a/net/core/scm.c b/net/core/scm.c
index 611c5efd4cb0..8f6ccfd68ef4 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -109,25 +109,9 @@ void __scm_destroy(struct scm_cookie *scm)
if (fpl) {
scm->fp = NULL;
- if (current->scm_work_list) {
- list_add_tail(&fpl->list, current->scm_work_list);
- } else {
- LIST_HEAD(work_list);
-
- current->scm_work_list = &work_list;
-
- list_add(&fpl->list, &work_list);
- while (!list_empty(&work_list)) {
- fpl = list_first_entry(&work_list, struct scm_fp_list, list);
-
- list_del(&fpl->list);
- for (i=fpl->count-1; i>=0; i--)
- fput(fpl->fp[i]);
- kfree(fpl);
- }
-
- current->scm_work_list = NULL;
- }
+ for (i=fpl->count-1; i>=0; i--)
+ fput(fpl->fp[i]);
+ kfree(fpl);
}
}
EXPORT_SYMBOL(__scm_destroy);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d78671e9d545..368f65c15e4f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -160,8 +160,8 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
* @node: numa node to allocate memory on
*
* Allocate a new &sk_buff. The returned buffer has no headroom and a
- * tail room of size bytes. The object has a reference count of one.
- * The return is the buffer. On a failure the return is %NULL.
+ * tail room of at least size bytes. The object has a reference count
+ * of one. The return is the buffer. On a failure the return is %NULL.
*
* Buffers may only be allocated from interrupts using a @gfp_mask of
* %GFP_ATOMIC.
@@ -296,9 +296,12 @@ EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
struct page *page;
unsigned int offset;
+ unsigned int pagecnt_bias;
};
static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
+#define NETDEV_PAGECNT_BIAS (PAGE_SIZE / SMP_CACHE_BYTES)
+
/**
* netdev_alloc_frag - allocate a page fragment
* @fragsz: fragment size
@@ -317,17 +320,26 @@ void *netdev_alloc_frag(unsigned int fragsz)
if (unlikely(!nc->page)) {
refill:
nc->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!nc->page))
+ goto end;
+recycle:
+ atomic_set(&nc->page->_count, NETDEV_PAGECNT_BIAS);
+ nc->pagecnt_bias = NETDEV_PAGECNT_BIAS;
nc->offset = 0;
}
- if (likely(nc->page)) {
- if (nc->offset + fragsz > PAGE_SIZE) {
- put_page(nc->page);
- goto refill;
- }
- data = page_address(nc->page) + nc->offset;
- nc->offset += fragsz;
- get_page(nc->page);
+
+ if (nc->offset + fragsz > PAGE_SIZE) {
+ /* avoid unnecessary locked operations if possible */
+ if ((atomic_read(&nc->page->_count) == nc->pagecnt_bias) ||
+ atomic_sub_and_test(nc->pagecnt_bias, &nc->page->_count))
+ goto recycle;
+ goto refill;
}
+
+ data = page_address(nc->page) + nc->offset;
+ nc->offset += fragsz;
+ nc->pagecnt_bias--;
+end:
local_irq_restore(flags);
return data;
}
@@ -353,7 +365,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (fragsz <= PAGE_SIZE && !(gfp_mask & __GFP_WAIT)) {
+ if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
void *data = netdev_alloc_frag(fragsz);
if (likely(data)) {
@@ -713,7 +725,8 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
}
EXPORT_SYMBOL_GPL(skb_morph);
-/* skb_copy_ubufs - copy userspace skb frags buffers to kernel
+/**
+ * skb_copy_ubufs - copy userspace skb frags buffers to kernel
* @skb: the skb to modify
* @gfp_mask: allocation priority
*
@@ -738,7 +751,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
u8 *vaddr;
skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(gfp_mask);
if (!page) {
while (head) {
struct page *next = (struct page *)head->private;
@@ -756,22 +769,22 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
}
/* skb frags release userspace buffers */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ for (i = 0; i < num_frags; i++)
skb_frag_unref(skb, i);
uarg->callback(uarg);
/* skb frags point to kernel buffers */
- for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
- __skb_fill_page_desc(skb, i-1, head, 0,
- skb_shinfo(skb)->frags[i - 1].size);
+ for (i = num_frags - 1; i >= 0; i--) {
+ __skb_fill_page_desc(skb, i, head, 0,
+ skb_shinfo(skb)->frags[i].size);
head = (struct page *)head->private;
}
skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
return 0;
}
-
+EXPORT_SYMBOL_GPL(skb_copy_ubufs);
/**
* skb_clone - duplicate an sk_buff
@@ -791,10 +804,8 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *n;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- return NULL;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ return NULL;
n = skb + 1;
if (skb->fclone == SKB_FCLONE_ORIG &&
@@ -914,12 +925,10 @@ struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
if (skb_shinfo(skb)->nr_frags) {
int i;
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask)) {
- kfree_skb(n);
- n = NULL;
- goto out;
- }
+ if (skb_orphan_frags(skb, gfp_mask)) {
+ kfree_skb(n);
+ n = NULL;
+ goto out;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
@@ -992,10 +1001,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
*/
if (skb_cloned(skb)) {
/* copy this zero copy skb frags */
- if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
- if (skb_copy_ubufs(skb, gfp_mask))
- goto nofrags;
- }
+ if (skb_orphan_frags(skb, gfp_mask))
+ goto nofrags;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
skb_frag_ref(skb, i);
@@ -1755,6 +1762,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
+ .nr_pages_max = MAX_SKB_FRAGS,
.flags = flags,
.ops = &sock_pipe_buf_ops,
.spd_release = sock_spd_release,
@@ -2613,7 +2621,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
EXPORT_SYMBOL(skb_find_text);
/**
- * skb_append_datato_frags: - append the user data to a skb
+ * skb_append_datato_frags - append the user data to a skb
* @sk: sock structure
* @skb: skb structure to be appened with user data.
* @getfrag: call back function to be used for getting the user data
diff --git a/net/core/sock.c b/net/core/sock.c
index 9e5b71fda6ec..2676a88f533e 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1180,12 +1180,12 @@ void sock_update_classid(struct sock *sk)
}
EXPORT_SYMBOL(sock_update_classid);
-void sock_update_netprioidx(struct sock *sk)
+void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
{
if (in_interrupt())
return;
- sk->sk_cgrp_prioidx = task_netprioidx(current);
+ sk->sk_cgrp_prioidx = task_netprioidx(task);
}
EXPORT_SYMBOL_GPL(sock_update_netprioidx);
#endif
@@ -1215,7 +1215,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
atomic_set(&sk->sk_wmem_alloc, 1);
sock_update_classid(sk);
- sock_update_netprioidx(sk);
+ sock_update_netprioidx(sk, current);
}
return sk;
@@ -1465,6 +1465,11 @@ void sock_rfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_rfree);
+void sock_edemux(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+EXPORT_SYMBOL(sock_edemux);
int sock_i_uid(struct sock *sk)
{
@@ -2154,6 +2159,10 @@ void release_sock(struct sock *sk)
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_backlog.tail)
__release_sock(sk);
+
+ if (sk->sk_prot->release_cb)
+ sk->sk_prot->release_cb(sk);
+
sk->sk_lock.owned = 0;
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index 5fd146720f39..9d8755e4a7a5 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -4,7 +4,6 @@
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <linux/module.h>
-#include <linux/rtnetlink.h>
#include <net/sock.h>
#include <linux/inet_diag.h>
@@ -35,9 +34,7 @@ EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
{
- __u32 *mem;
-
- mem = RTA_DATA(__RTA_PUT(skb, attrtype, SK_MEMINFO_VARS * sizeof(__u32)));
+ u32 mem[SK_MEMINFO_VARS];
mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
@@ -46,11 +43,9 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
+ mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
- return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(skb, attrtype, sizeof(mem), &mem);
}
EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
@@ -120,7 +115,7 @@ static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int err;
- struct sock_diag_req *req = NLMSG_DATA(nlh);
+ struct sock_diag_req *req = nlmsg_data(nlh);
const struct sock_diag_handler *hndl;
if (nlmsg_len(nlh) < sizeof(*req))
@@ -171,19 +166,36 @@ static void sock_diag_rcv(struct sk_buff *skb)
mutex_unlock(&sock_diag_mutex);
}
-struct sock *sock_diag_nlsk;
-EXPORT_SYMBOL_GPL(sock_diag_nlsk);
+static int __net_init diag_net_init(struct net *net)
+{
+ struct netlink_kernel_cfg cfg = {
+ .input = sock_diag_rcv,
+ };
+
+ net->diag_nlsk = netlink_kernel_create(net, NETLINK_SOCK_DIAG,
+ THIS_MODULE, &cfg);
+ return net->diag_nlsk == NULL ? -ENOMEM : 0;
+}
+
+static void __net_exit diag_net_exit(struct net *net)
+{
+ netlink_kernel_release(net->diag_nlsk);
+ net->diag_nlsk = NULL;
+}
+
+static struct pernet_operations diag_net_ops = {
+ .init = diag_net_init,
+ .exit = diag_net_exit,
+};
static int __init sock_diag_init(void)
{
- sock_diag_nlsk = netlink_kernel_create(&init_net, NETLINK_SOCK_DIAG, 0,
- sock_diag_rcv, NULL, THIS_MODULE);
- return sock_diag_nlsk == NULL ? -ENOMEM : 0;
+ return register_pernet_subsys(&diag_net_ops);
}
static void __exit sock_diag_exit(void)
{
- netlink_kernel_release(sock_diag_nlsk);
+ unregister_pernet_subsys(&diag_net_ops);
}
module_init(sock_diag_init);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 656c7c75b192..81f2bb62dea3 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -28,8 +28,7 @@
#include <linux/module.h>
#include <net/sock.h>
-/**
- * Data Center Bridging (DCB) is a collection of Ethernet enhancements
+/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
* intended to allow network traffic with differing requirements
* (highly reliable, no drops vs. best effort vs. low latency) to operate
* and co-exist on Ethernet. Current DCB features are:
@@ -196,92 +195,66 @@ static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
static LIST_HEAD(dcb_app_list);
static DEFINE_SPINLOCK(dcb_lock);
-/* standard netlink reply call */
-static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
- u32 seq, u16 flags)
+static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
+ u32 flags, struct nlmsghdr **nlhp)
{
- struct sk_buff *dcbnl_skb;
+ struct sk_buff *skb;
struct dcbmsg *dcb;
struct nlmsghdr *nlh;
- int ret = -EINVAL;
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- return ret;
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return NULL;
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
+ nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
+ BUG_ON(!nlh);
- dcb = NLMSG_DATA(nlh);
+ dcb = nlmsg_data(nlh);
dcb->dcb_family = AF_UNSPEC;
dcb->cmd = cmd;
dcb->dcb_pad = 0;
- ret = nla_put_u8(dcbnl_skb, attr, value);
- if (ret)
- goto err;
+ if (nlhp)
+ *nlhp = nlh;
- /* end the message, assign the nlmsg_len. */
- nlmsg_end(dcbnl_skb, nlh);
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- return -EINVAL;
-
- return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
- return ret;
+ return skb;
}
-static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
-
/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
if (!netdev->dcbnl_ops->getstate)
- return ret;
-
- ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
- DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
+ return -EOPNOTSUPP;
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_STATE,
+ netdev->dcbnl_ops->getstate(netdev));
}
-static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
- return ret;
+ if (!tb[DCB_ATTR_PFC_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getpfccfg)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_PFC_GCFG;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
+ nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
if (!nest)
- goto err;
+ return -EMSGSIZE;
if (data[DCB_PFC_UP_ATTR_ALL])
getall = 1;
@@ -292,103 +265,53 @@ static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
&value);
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
}
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
}
-static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
u8 perm_addr[MAX_ADDR_LEN];
- int ret = -EINVAL;
if (!netdev->dcbnl_ops->getpermhwaddr)
- return ret;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GPERM_HWADDR;
+ return -EOPNOTSUPP;
netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
- ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
- perm_addr);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
-
- return 0;
-
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
+ return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
}
-static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
- return ret;
+ if (!tb[DCB_ATTR_CAP])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getcap)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
dcbnl_cap_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GCAP;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
+ nest = nla_nest_start(skb, DCB_ATTR_CAP);
if (!nest)
- goto err;
+ return -EMSGSIZE;
if (data[DCB_CAP_ATTR_ALL])
getall = 1;
@@ -398,69 +321,41 @@ static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
continue;
if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
}
}
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return -EINVAL;
}
-static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
u8 value;
- int ret = -EINVAL;
+ int ret;
int i;
int getall = 0;
- if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
- return ret;
+ if (!tb[DCB_ATTR_NUMTCS])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getnumtcs)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest);
- if (ret) {
- ret = -EINVAL;
- goto err_out;
- }
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb) {
- ret = -EINVAL;
- goto err_out;
- }
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GNUMTCS;
+ if (ret)
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
- if (!nest) {
- ret = -EINVAL;
- goto err;
- }
+ nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
+ if (!nest)
+ return -EMSGSIZE;
if (data[DCB_NUMTCS_ATTR_ALL])
getall = 1;
@@ -471,53 +366,37 @@ static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
if (!ret) {
- ret = nla_put_u8(dcbnl_skb, i, value);
-
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
- ret = -EINVAL;
- goto err;
+ nla_nest_cancel(skb, nest);
+ return ret;
}
- } else {
- goto err;
- }
- }
- nla_nest_end(dcbnl_skb, nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret) {
- ret = -EINVAL;
- goto err_out;
+ } else
+ return -EINVAL;
}
+ nla_nest_end(skb, nest);
return 0;
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- return ret;
}
-static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
- int ret = -EINVAL;
+ int ret;
u8 value;
int i;
- if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
- return ret;
+ if (!tb[DCB_ATTR_NUMTCS])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setnumtcs)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
dcbnl_numtcs_nest);
-
- if (ret) {
- ret = -EINVAL;
- goto err;
- }
+ if (ret)
+ return ret;
for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
if (data[i] == NULL)
@@ -526,84 +405,68 @@ static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
value = nla_get_u8(data[i]);
ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
-
if (ret)
- goto operr;
+ break;
}
-operr:
- ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
- DCB_ATTR_NUMTCS, pid, seq, flags);
-
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
}
-static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
-
if (!netdev->dcbnl_ops->getpfcstate)
- return ret;
-
- ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
- DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
- pid, seq, flags);
+ return -EOPNOTSUPP;
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
+ netdev->dcbnl_ops->getpfcstate(netdev));
}
-static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
u8 value;
- if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
- return ret;
+ if (!tb[DCB_ATTR_PFC_STATE])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpfcstate)
+ return -EOPNOTSUPP;
value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
netdev->dcbnl_ops->setpfcstate(netdev, value);
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
- pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
}
-static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *app_nest;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
u16 id;
u8 up, idtype;
- int ret = -EINVAL;
+ int ret;
if (!tb[DCB_ATTR_APP])
- goto out;
+ return -EINVAL;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
dcbnl_app_nest);
if (ret)
- goto out;
+ return ret;
- ret = -EINVAL;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]))
- goto out;
+ return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
- goto out;
+ return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
@@ -617,138 +480,106 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
up = dcb_getapp(netdev, &app);
}
- /* send this back */
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GAPP;
-
- app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+ app_nest = nla_nest_start(skb, DCB_ATTR_APP);
if (!app_nest)
- goto out_cancel;
+ return -EMSGSIZE;
- ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
+ ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
if (ret)
goto out_cancel;
- ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
+ ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
if (ret)
goto out_cancel;
- ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
+ ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
if (ret)
goto out_cancel;
- nla_nest_end(dcbnl_skb, app_nest);
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto nlmsg_failure;
+ nla_nest_end(skb, app_nest);
- goto out;
+ return 0;
out_cancel:
- nla_nest_cancel(dcbnl_skb, app_nest);
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-out:
+ nla_nest_cancel(skb, app_nest);
return ret;
}
-static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int err, ret = -EINVAL;
+ int ret;
u16 id;
u8 up, idtype;
struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
if (!tb[DCB_ATTR_APP])
- goto out;
+ return -EINVAL;
ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
dcbnl_app_nest);
if (ret)
- goto out;
+ return ret;
- ret = -EINVAL;
/* all must be non-null */
if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
(!app_tb[DCB_APP_ATTR_ID]) ||
(!app_tb[DCB_APP_ATTR_PRIORITY]))
- goto out;
+ return -EINVAL;
/* either by eth type or by socket number */
idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
(idtype != DCB_APP_IDTYPE_PORTNUM))
- goto out;
+ return -EINVAL;
id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
if (netdev->dcbnl_ops->setapp) {
- err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
+ ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
} else {
struct dcb_app app;
app.selector = idtype;
app.protocol = id;
app.priority = up;
- err = dcb_setapp(netdev, &app);
+ ret = dcb_setapp(netdev, &app);
}
- ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
- pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
-out:
+
return ret;
}
-static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags, int dir)
+static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ struct nlattr **tb, struct sk_buff *skb, int dir)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *pg_nest, *param_nest, *data;
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
u8 prio, pgid, tc_pct, up_map;
- int ret = -EINVAL;
+ int ret;
int getall = 0;
int i;
- if (!tb[DCB_ATTR_PG_CFG] ||
- !netdev->dcbnl_ops->getpgtccfgtx ||
+ if (!tb[DCB_ATTR_PG_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getpgtccfgtx ||
!netdev->dcbnl_ops->getpgtccfgrx ||
!netdev->dcbnl_ops->getpgbwgcfgtx ||
!netdev->dcbnl_ops->getpgbwgcfgrx)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
-
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
+ return ret;
- pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
+ pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
if (!pg_nest)
- goto err;
+ return -EMSGSIZE;
if (pg_tb[DCB_PG_ATTR_TC_ALL])
getall = 1;
@@ -766,7 +597,7 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
if (ret)
goto err_pg;
- param_nest = nla_nest_start(dcbnl_skb, i);
+ param_nest = nla_nest_start(skb, i);
if (!param_nest)
goto err_pg;
@@ -789,33 +620,33 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_PGID, pgid);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb,
+ ret = nla_put_u8(skb,
DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
if (ret)
goto err_param;
}
if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
param_tb[DCB_TC_ATTR_PARAM_ALL]) {
- ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
+ ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
tc_pct);
if (ret)
goto err_param;
}
- nla_nest_end(dcbnl_skb, param_nest);
+ nla_nest_end(skb, param_nest);
}
if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
@@ -838,80 +669,71 @@ static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
}
- ret = nla_put_u8(dcbnl_skb, i, tc_pct);
-
+ ret = nla_put_u8(skb, i, tc_pct);
if (ret)
goto err_pg;
}
- nla_nest_end(dcbnl_skb, pg_nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, pg_nest);
return 0;
err_param:
- nla_nest_cancel(dcbnl_skb, param_nest);
+ nla_nest_cancel(skb, param_nest);
err_pg:
- nla_nest_cancel(dcbnl_skb, pg_nest);
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- ret = -EINVAL;
- return ret;
+ nla_nest_cancel(skb, pg_nest);
+
+ return -EMSGSIZE;
}
-static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
+ return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
}
-static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
+ return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
}
-static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
u8 value;
- if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
- return ret;
+ if (!tb[DCB_ATTR_STATE])
+ return -EINVAL;
- value = nla_get_u8(tb[DCB_ATTR_STATE]);
+ if (!netdev->dcbnl_ops->setstate)
+ return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
- RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
- pid, seq, flags);
+ value = nla_get_u8(tb[DCB_ATTR_STATE]);
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_STATE,
+ netdev->dcbnl_ops->setstate(netdev, value));
}
-static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
int i;
- int ret = -EINVAL;
+ int ret;
u8 value;
- if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
- return ret;
+ if (!tb[DCB_ATTR_PFC_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpfccfg)
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
tb[DCB_ATTR_PFC_CFG],
dcbnl_pfc_up_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
if (data[i] == NULL)
@@ -921,50 +743,53 @@ static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
}
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
- pid, seq, flags);
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
}
-static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret = -EINVAL;
+ int ret;
- if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
- return ret;
+ if (!tb[DCB_ATTR_SET_ALL])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setall)
+ return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
- DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
+ netdev->dcbnl_ops->setall(netdev));
dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
return ret;
}
-static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags, int dir)
+static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb,
+ int dir)
{
struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
- int ret = -EINVAL;
+ int ret;
int i;
u8 pgid;
u8 up_map;
u8 prio;
u8 tc_pct;
- if (!tb[DCB_ATTR_PG_CFG] ||
- !netdev->dcbnl_ops->setpgtccfgtx ||
+ if (!tb[DCB_ATTR_PG_CFG])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setpgtccfgtx ||
!netdev->dcbnl_ops->setpgtccfgrx ||
!netdev->dcbnl_ops->setpgbwgcfgtx ||
!netdev->dcbnl_ops->setpgbwgcfgrx)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
if (!pg_tb[i])
@@ -973,7 +798,7 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
pg_tb[i], dcbnl_tc_param_nest);
if (ret)
- goto err;
+ return ret;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1026,63 +851,47 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
}
}
- ret = dcbnl_reply(0, RTM_SETDCB,
- (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
- DCB_ATTR_PG_CFG, pid, seq, flags);
-
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
}
-static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
+ return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
}
-static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
+ return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
}
-static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *bcn_nest;
struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
u8 value_byte;
u32 value_integer;
- int ret = -EINVAL;
+ int ret;
bool getall = false;
int i;
- if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
+ if (!tb[DCB_ATTR_BCN])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->getbcnrp ||
!netdev->dcbnl_ops->getbcncfg)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
-
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb)
- goto err_out;
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_BCN_GCFG;
+ return ret;
- bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
+ bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
if (!bcn_nest)
- goto err;
+ return -EMSGSIZE;
if (bcn_tb[DCB_BCN_ATTR_ALL])
getall = true;
@@ -1093,7 +902,7 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
&value_byte);
- ret = nla_put_u8(dcbnl_skb, i, value_byte);
+ ret = nla_put_u8(skb, i, value_byte);
if (ret)
goto err_bcn;
}
@@ -1104,49 +913,41 @@ static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
netdev->dcbnl_ops->getbcncfg(netdev, i,
&value_integer);
- ret = nla_put_u32(dcbnl_skb, i, value_integer);
+ ret = nla_put_u32(skb, i, value_integer);
if (ret)
goto err_bcn;
}
- nla_nest_end(dcbnl_skb, bcn_nest);
-
- nlmsg_end(dcbnl_skb, nlh);
-
- ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
- if (ret)
- goto err_out;
+ nla_nest_end(skb, bcn_nest);
return 0;
err_bcn:
- nla_nest_cancel(dcbnl_skb, bcn_nest);
-nlmsg_failure:
-err:
- kfree_skb(dcbnl_skb);
-err_out:
- ret = -EINVAL;
+ nla_nest_cancel(skb, bcn_nest);
return ret;
}
-static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
int i;
- int ret = -EINVAL;
+ int ret;
u8 value_byte;
u32 value_int;
- if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
+ if (!tb[DCB_ATTR_BCN])
+ return -EINVAL;
+
+ if (!netdev->dcbnl_ops->setbcncfg ||
!netdev->dcbnl_ops->setbcnrp)
- return ret;
+ return -EOPNOTSUPP;
ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
tb[DCB_ATTR_BCN],
dcbnl_pfc_up_nest);
if (ret)
- goto err;
+ return ret;
for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
if (data[i] == NULL)
@@ -1164,10 +965,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
i, value_int);
}
- ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
- pid, seq, flags);
-err:
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_BCN, 0);
}
static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
@@ -1233,20 +1031,21 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
struct dcb_app_type *itr;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
int dcbx;
- int err = -EMSGSIZE;
+ int err;
if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
- goto nla_put_failure;
+ return -EMSGSIZE;
+
ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
if (!ieee)
- goto nla_put_failure;
+ return -EMSGSIZE;
if (ops->ieee_getets) {
struct ieee_ets ets;
err = ops->ieee_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->ieee_getmaxrate) {
@@ -1256,7 +1055,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
sizeof(maxrate), &maxrate);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
}
@@ -1265,12 +1064,12 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
if (!app)
- goto nla_put_failure;
+ return -EMSGSIZE;
spin_lock(&dcb_lock);
list_for_each_entry(itr, &dcb_app_list, list) {
@@ -1279,7 +1078,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
&itr->app);
if (err) {
spin_unlock(&dcb_lock);
- goto nla_put_failure;
+ return -EMSGSIZE;
}
}
}
@@ -1298,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_peer_getets(netdev, &ets);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->ieee_peer_getpfc) {
@@ -1306,7 +1105,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
err = ops->ieee_peer_getpfc(netdev, &pfc);
if (!err &&
nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1315,20 +1114,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
DCB_ATTR_IEEE_APP_UNSPEC,
DCB_ATTR_IEEE_APP);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
nla_nest_end(skb, ieee);
if (dcbx >= 0) {
err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
if (err)
- goto nla_put_failure;
+ return -EMSGSIZE;
}
return 0;
-
-nla_put_failure:
- return err;
}
static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
@@ -1340,13 +1136,13 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
struct nlattr *pg = nla_nest_start(skb, i);
if (!pg)
- goto nla_put_failure;
+ return -EMSGSIZE;
for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
struct nlattr *tc_nest = nla_nest_start(skb, i);
if (!tc_nest)
- goto nla_put_failure;
+ return -EMSGSIZE;
pgid = DCB_ATTR_VALUE_UNDEFINED;
prio = DCB_ATTR_VALUE_UNDEFINED;
@@ -1364,7 +1160,7 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
- goto nla_put_failure;
+ return -EMSGSIZE;
nla_nest_end(skb, tc_nest);
}
@@ -1378,13 +1174,10 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
&tc_pct);
if (nla_put_u8(skb, i, tc_pct))
- goto nla_put_failure;
+ return -EMSGSIZE;
}
nla_nest_end(skb, pg);
return 0;
-
-nla_put_failure:
- return -EMSGSIZE;
}
static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
@@ -1531,27 +1324,16 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
struct net *net = dev_net(dev);
struct sk_buff *skb;
struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
if (!skb)
return -ENOBUFS;
- nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = cmd;
-
if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
err = dcbnl_ieee_fill(skb, dev);
else
@@ -1559,8 +1341,7 @@ static int dcbnl_notify(struct net_device *dev, int event, int cmd,
if (err < 0) {
/* Report error to broadcast listeners */
- nlmsg_cancel(skb, nlh);
- kfree_skb(skb);
+ nlmsg_free(skb);
rtnl_set_sk_err(net, RTNLGRP_DCB, err);
} else {
/* End nlmsg and notify broadcast listeners */
@@ -1590,15 +1371,15 @@ EXPORT_SYMBOL(dcbnl_cee_notify);
* No attempt is made to reconcile the case where only part of the
* cmd can be completed.
*/
-static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
- int err = -EOPNOTSUPP;
+ int err;
if (!ops)
- return err;
+ return -EOPNOTSUPP;
if (!tb[DCB_ATTR_IEEE])
return -EINVAL;
@@ -1649,58 +1430,28 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
}
err:
- dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
- pid, seq, flags);
+ err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
return err;
}
-static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct net *net = dev_net(netdev);
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_IEEE_GET;
-
- err = dcbnl_ieee_fill(skb, netdev);
-
- if (err < 0) {
- nlmsg_cancel(skb, nlh);
- kfree_skb(skb);
- } else {
- nlmsg_end(skb, nlh);
- err = rtnl_unicast(skb, net, pid);
- }
-
- return err;
+ return dcbnl_ieee_fill(skb, netdev);
}
-static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
- int err = -EOPNOTSUPP;
+ int err;
if (!ops)
return -EOPNOTSUPP;
@@ -1733,32 +1484,26 @@ static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
}
err:
- dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
- pid, seq, flags);
+ err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
return err;
}
/* DCBX configuration */
-static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret;
-
if (!netdev->dcbnl_ops->getdcbx)
return -EOPNOTSUPP;
- ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
- DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_DCBX,
+ netdev->dcbnl_ops->getdcbx(netdev));
}
-static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- int ret;
u8 value;
if (!netdev->dcbnl_ops->setdcbx)
@@ -1769,19 +1514,13 @@ static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
value = nla_get_u8(tb[DCB_ATTR_DCBX]);
- ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
- RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
- pid, seq, flags);
-
- return ret;
+ return nla_put_u8(skb, DCB_ATTR_DCBX,
+ netdev->dcbnl_ops->setdcbx(netdev, value));
}
-static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct sk_buff *dcbnl_skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
u8 value;
int ret, i;
@@ -1796,25 +1535,11 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
dcbnl_featcfg_nest);
if (ret)
- goto err_out;
-
- dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!dcbnl_skb) {
- ret = -ENOBUFS;
- goto err_out;
- }
-
- nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
-
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_GFEATCFG;
+ return ret;
- nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
- if (!nest) {
- ret = -EMSGSIZE;
- goto nla_put_failure;
- }
+ nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
+ if (!nest)
+ return -EMSGSIZE;
if (data[DCB_FEATCFG_ATTR_ALL])
getall = 1;
@@ -1825,28 +1550,21 @@ static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
if (!ret)
- ret = nla_put_u8(dcbnl_skb, i, value);
+ ret = nla_put_u8(skb, i, value);
if (ret) {
- nla_nest_cancel(dcbnl_skb, nest);
+ nla_nest_cancel(skb, nest);
goto nla_put_failure;
}
}
- nla_nest_end(dcbnl_skb, nest);
+ nla_nest_end(skb, nest);
- nlmsg_end(dcbnl_skb, nlh);
-
- return rtnl_unicast(dcbnl_skb, &init_net, pid);
nla_put_failure:
- nlmsg_cancel(dcbnl_skb, nlh);
-nlmsg_failure:
- kfree_skb(dcbnl_skb);
-err_out:
return ret;
}
-static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
int ret, i;
@@ -1876,60 +1594,73 @@ static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
goto err;
}
err:
- dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
- pid, seq, flags);
+ ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
return ret;
}
/* Handle CEE DCBX GET commands. */
-static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
- u32 pid, u32 seq, u16 flags)
+static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
+ u32 seq, struct nlattr **tb, struct sk_buff *skb)
{
- struct net *net = dev_net(netdev);
- struct sk_buff *skb;
- struct nlmsghdr *nlh;
- struct dcbmsg *dcb;
const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
- int err;
if (!ops)
return -EOPNOTSUPP;
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
- if (nlh == NULL) {
- nlmsg_free(skb);
- return -EMSGSIZE;
- }
+ return dcbnl_cee_fill(skb, netdev);
+}
- dcb = NLMSG_DATA(nlh);
- dcb->dcb_family = AF_UNSPEC;
- dcb->cmd = DCB_CMD_CEE_GET;
+struct reply_func {
+ /* reply netlink message type */
+ int type;
- err = dcbnl_cee_fill(skb, netdev);
+ /* function to fill message contents */
+ int (*cb)(struct net_device *, struct nlmsghdr *, u32,
+ struct nlattr **, struct sk_buff *);
+};
- if (err < 0) {
- nlmsg_cancel(skb, nlh);
- nlmsg_free(skb);
- } else {
- nlmsg_end(skb, nlh);
- err = rtnl_unicast(skb, net, pid);
- }
- return err;
-}
+static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
+ [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
+ [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
+ [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
+ [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
+ [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
+ [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
+ [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
+ [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
+ [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
+ [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
+ [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
+ [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
+ [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
+ [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
+ [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
+ [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
+ [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
+ [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
+ [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
+ [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
+ [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
+ [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
+ [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
+ [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
+ [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
+ [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
+ [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
+};
static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct net_device *netdev;
- struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
+ struct dcbmsg *dcb = nlmsg_data(nlh);
struct nlattr *tb[DCB_ATTR_MAX + 1];
u32 pid = skb ? NETLINK_CB(skb).pid : 0;
int ret = -EINVAL;
+ struct sk_buff *reply_skb;
+ struct nlmsghdr *reply_nlh = NULL;
+ const struct reply_func *fn;
if (!net_eq(net, &init_net))
return -EINVAL;
@@ -1939,136 +1670,78 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (ret < 0)
return ret;
+ if (dcb->cmd > DCB_CMD_MAX)
+ return -EINVAL;
+
+ /* check if a reply function has been defined for the command */
+ fn = &reply_funcs[dcb->cmd];
+ if (!fn->cb)
+ return -EOPNOTSUPP;
+
if (!tb[DCB_ATTR_IFNAME])
return -EINVAL;
netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
if (!netdev)
- return -EINVAL;
+ return -ENODEV;
- if (!netdev->dcbnl_ops)
- goto errout;
-
- switch (dcb->cmd) {
- case DCB_CMD_GSTATE:
- ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_GCFG:
- ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GPERM_HWADDR:
- ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGTX_GCFG:
- ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGRX_GCFG:
- ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_BCN_GCFG:
- ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SSTATE:
- ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_SCFG:
- ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ if (!netdev->dcbnl_ops) {
+ ret = -EOPNOTSUPP;
goto out;
+ }
- case DCB_CMD_SET_ALL:
- ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGTX_SCFG:
- ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PGRX_SCFG:
- ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GCAP:
- ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GNUMTCS:
- ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SNUMTCS:
- ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_GSTATE:
- ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_PFC_SSTATE:
- ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_BCN_SCFG:
- ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GAPP:
- ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SAPP:
- ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_SET:
- ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_GET:
- ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_IEEE_DEL:
- ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GDCBX:
- ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SDCBX:
- ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_GFEATCFG:
- ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
- goto out;
- case DCB_CMD_SFEATCFG:
- ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags, &reply_nlh);
+ if (!reply_skb) {
+ ret = -ENOBUFS;
goto out;
- case DCB_CMD_CEE_GET:
- ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
- nlh->nlmsg_flags);
+ }
+
+ ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
+ if (ret < 0) {
+ nlmsg_free(reply_skb);
goto out;
- default:
- goto errout;
}
-errout:
- ret = -EINVAL;
+
+ nlmsg_end(reply_skb, reply_nlh);
+
+ ret = rtnl_unicast(reply_skb, &init_net, pid);
out:
dev_put(netdev);
return ret;
}
+static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
+ int ifindex, int prio)
+{
+ struct dcb_app_type *itr;
+
+ list_for_each_entry(itr, &dcb_app_list, list) {
+ if (itr->app.selector == app->selector &&
+ itr->app.protocol == app->protocol &&
+ itr->ifindex == ifindex &&
+ (!prio || itr->app.priority == prio))
+ return itr;
+ }
+
+ return NULL;
+}
+
+static int dcb_app_add(const struct dcb_app *app, int ifindex)
+{
+ struct dcb_app_type *entry;
+
+ entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(&entry->app, app, sizeof(*app));
+ entry->ifindex = ifindex;
+ list_add(&entry->list, &dcb_app_list);
+
+ return 0;
+}
+
/**
* dcb_getapp - retrieve the DCBX application user priority
*
@@ -2082,14 +1755,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock(&dcb_lock);
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == app->selector &&
- itr->app.protocol == app->protocol &&
- itr->ifindex == dev->ifindex) {
- prio = itr->app.priority;
- break;
- }
- }
+ if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ prio = itr->app.priority;
spin_unlock(&dcb_lock);
return prio;
@@ -2107,6 +1774,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
{
struct dcb_app_type *itr;
struct dcb_app_type event;
+ int err = 0;
event.ifindex = dev->ifindex;
memcpy(&event.app, new, sizeof(event.app));
@@ -2115,36 +1783,23 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock(&dcb_lock);
/* Search for existing match and replace */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == new->selector &&
- itr->app.protocol == new->protocol &&
- itr->ifindex == dev->ifindex) {
- if (new->priority)
- itr->app.priority = new->priority;
- else {
- list_del(&itr->list);
- kfree(itr);
- }
- goto out;
+ if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+ if (new->priority)
+ itr->app.priority = new->priority;
+ else {
+ list_del(&itr->list);
+ kfree(itr);
}
+ goto out;
}
/* App type does not exist add new application type */
- if (new->priority) {
- struct dcb_app_type *entry;
- entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
- if (!entry) {
- spin_unlock(&dcb_lock);
- return -ENOMEM;
- }
-
- memcpy(&entry->app, new, sizeof(*new));
- entry->ifindex = dev->ifindex;
- list_add(&entry->list, &dcb_app_list);
- }
+ if (new->priority)
+ err = dcb_app_add(new, dev->ifindex);
out:
spin_unlock(&dcb_lock);
- call_dcbevent_notifiers(DCB_APP_EVENT, &event);
- return 0;
+ if (!err)
+ call_dcbevent_notifiers(DCB_APP_EVENT, &event);
+ return err;
}
EXPORT_SYMBOL(dcb_setapp);
@@ -2161,13 +1816,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock(&dcb_lock);
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == app->selector &&
- itr->app.protocol == app->protocol &&
- itr->ifindex == dev->ifindex) {
- prio |= 1 << itr->app.priority;
- }
- }
+ if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ prio |= 1 << itr->app.priority;
spin_unlock(&dcb_lock);
return prio;
@@ -2183,7 +1833,6 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
*/
int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
{
- struct dcb_app_type *itr, *entry;
struct dcb_app_type event;
int err = 0;
@@ -2194,26 +1843,12 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock(&dcb_lock);
/* Search for existing match and abort if found */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == new->selector &&
- itr->app.protocol == new->protocol &&
- itr->app.priority == new->priority &&
- itr->ifindex == dev->ifindex) {
- err = -EEXIST;
- goto out;
- }
- }
-
- /* App entry does not exist add new entry */
- entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
- if (!entry) {
- err = -ENOMEM;
+ if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
+ err = -EEXIST;
goto out;
}
- memcpy(&entry->app, new, sizeof(*new));
- entry->ifindex = dev->ifindex;
- list_add(&entry->list, &dcb_app_list);
+ err = dcb_app_add(new, dev->ifindex);
out:
spin_unlock(&dcb_lock);
if (!err)
@@ -2240,19 +1875,12 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
spin_lock(&dcb_lock);
/* Search for existing match and remove it. */
- list_for_each_entry(itr, &dcb_app_list, list) {
- if (itr->app.selector == del->selector &&
- itr->app.protocol == del->protocol &&
- itr->app.priority == del->priority &&
- itr->ifindex == dev->ifindex) {
- list_del(&itr->list);
- kfree(itr);
- err = 0;
- goto out;
- }
+ if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
+ list_del(&itr->list);
+ kfree(itr);
+ err = 0;
}
-out:
spin_unlock(&dcb_lock);
if (!err)
call_dcbevent_notifiers(DCB_APP_EVENT, &event);
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index e2ab0627a5ff..a269aa7f7923 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -50,7 +50,8 @@ static inline u8 dccp_ackvec_state(const u8 *cell)
return *cell & ~DCCPAV_MAX_RUNLEN;
}
-/** struct dccp_ackvec - Ack Vector main data structure
+/**
+ * struct dccp_ackvec - Ack Vector main data structure
*
* This implements a fixed-size circular buffer within an array and is largely
* based on Appendix A of RFC 4340.
@@ -76,7 +77,8 @@ struct dccp_ackvec {
struct list_head av_records;
};
-/** struct dccp_ackvec_record - Records information about sent Ack Vectors
+/**
+ * struct dccp_ackvec_record - Records information about sent Ack Vectors
*
* These list entries define the additional information which the HC-Receiver
* keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
@@ -121,6 +123,7 @@ static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
* @len: length of @vec
* @nonce: whether @vec had an ECN nonce of 0 or 1
* @node: FIFO - arranged in descending order of ack_ackno
+ *
* This structure is used by CCIDs to access Ack Vectors in a received skb.
*/
struct dccp_ackvec_parsed {
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 48b585a5cba7..597557254ddb 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -46,6 +46,7 @@ bool ccid_support_check(u8 const *ccid_array, u8 array_len)
* ccid_get_builtin_ccids - Populate a list of built-in CCIDs
* @ccid_array: pointer to copy into
* @array_len: value to return length into
+ *
* This function allocates memory - caller must see that it is freed after use.
*/
int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8c67bedf85b0..d65e98798eca 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -113,6 +113,7 @@ static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
/**
* ccid3_hc_tx_update_x - Update allowed sending rate X
* @stamp: most recent time if available - can be left NULL.
+ *
* This function tracks draft rfc3448bis, check there for latest details.
*
* Note: X and X_recv are both stored in units of 64 * bytes/second, to support
@@ -161,9 +162,11 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
}
}
-/*
- * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1)
+/**
+ * ccid3_hc_tx_update_s - Track the mean packet size `s'
* @len: DCCP packet payload size in bytes
+ *
+ * cf. RFC 4342, 5.3 and RFC 3448, 4.1
*/
static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len)
{
@@ -270,6 +273,7 @@ out:
/**
* ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets
* @skb: next packet candidate to send on @sk
+ *
* This function uses the convention of ccid_packet_dequeue_eval() and
* returns a millisecond-delay value between 0 and t_mbi = 64000 msec.
*/
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 497723c4d4bb..57f9fd78c4df 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -133,6 +133,7 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
* @rh: Receive history containing a fresh loss event
* @calc_first_li: Caller-dependent routine to compute length of first interval
* @sk: Used by @calc_first_li in caller-specific way (subtyping)
+ *
* Updates I_mean and returns 1 if a new interval has in fact been added to @lh.
*/
int tfrc_lh_interval_add(struct tfrc_loss_hist *lh, struct tfrc_rx_hist *rh,
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index de8fe294bf0b..08df7a3acb3d 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -315,6 +315,7 @@ static void __three_after_loss(struct tfrc_rx_hist *h)
* @ndp: The NDP count belonging to @skb
* @calc_first_li: Caller-dependent computation of first loss interval in @lh
* @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
+ *
* Chooses action according to pending loss, updates LI database when a new
* loss was detected, and does required post-processing. Returns 1 when caller
* should send feedback, 0 otherwise.
@@ -387,7 +388,7 @@ static inline struct tfrc_rx_hist_entry *
}
/**
- * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry
+ * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
*/
static inline struct tfrc_rx_hist_entry *
tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index a052a4377e26..88ef98285bec 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -611,6 +611,7 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
* @s: packet size in bytes
* @R: RTT scaled by 1000000 (i.e., microseconds)
* @p: loss ratio estimate scaled by 1000000
+ *
* Returns X_calc in bytes per second (not scaled).
*/
u32 tfrc_calc_x(u16 s, u32 R, u32 p)
@@ -659,6 +660,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
/**
* tfrc_calc_x_reverse_lookup - try to find p given f(p)
* @fvalue: function value to match, scaled by 1000000
+ *
* Returns closest match for p, also scaled by 1000000
*/
u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 9040be049d8c..708e75bf623d 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -352,6 +352,7 @@ static inline int dccp_bad_service_code(const struct sock *sk,
* @dccpd_opt_len: total length of all options (5.8) in the packet
* @dccpd_seq: sequence number
* @dccpd_ack_seq: acknowledgment number subheader field value
+ *
* This is used for transmission as well as for reception.
*/
struct dccp_skb_cb {
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 78a2ad70e1b0..9733ddbc96cb 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -350,6 +350,7 @@ static int __dccp_feat_activate(struct sock *sk, const int idx,
* @feat_num: feature to activate, one of %dccp_feature_numbers
* @local: whether local (1) or remote (0) @feat_num is meant
* @fval: the value (SP or NN) to activate, or NULL to use the default value
+ *
* For general use this function is preferable over __dccp_feat_activate().
*/
static int dccp_feat_activate(struct sock *sk, u8 feat_num, bool local,
@@ -446,6 +447,7 @@ static struct dccp_feat_entry *dccp_feat_list_lookup(struct list_head *fn_list,
* @head: list to add to
* @feat: feature number
* @local: whether the local (1) or remote feature with number @feat is meant
+ *
* This is the only constructor and serves to ensure the above invariants.
*/
static struct dccp_feat_entry *
@@ -504,6 +506,7 @@ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local,
* @feat: one of %dccp_feature_numbers
* @local: whether local (1) or remote (0) @feat_num is being confirmed
* @fval: pointer to NN/SP value to be inserted or NULL
+ *
* Returns 0 on success, a Reset code for further processing otherwise.
*/
static int dccp_feat_push_confirm(struct list_head *fn_list, u8 feat, u8 local,
@@ -691,6 +694,7 @@ int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq,
* @feat: an NN feature from %dccp_feature_numbers
* @mandatory: use Mandatory option if 1
* @nn_val: value to register (restricted to 4 bytes)
+ *
* Note that NN features are local by definition (RFC 4340, 6.3.2).
*/
static int __feat_register_nn(struct list_head *fn, u8 feat,
@@ -760,6 +764,7 @@ int dccp_feat_register_sp(struct sock *sk, u8 feat, u8 is_local,
* dccp_feat_nn_get - Query current/pending value of NN feature
* @sk: DCCP socket of an established connection
* @feat: NN feature number from %dccp_feature_numbers
+ *
* For a known NN feature, returns value currently being negotiated, or
* current (confirmed) value if no negotiation is going on.
*/
@@ -790,6 +795,7 @@ EXPORT_SYMBOL_GPL(dccp_feat_nn_get);
* @sk: DCCP socket of an established connection
* @feat: NN feature number from %dccp_feature_numbers
* @nn_val: the new value to use
+ *
* This function is used to communicate NN updates out-of-band.
*/
int dccp_feat_signal_nn_change(struct sock *sk, u8 feat, u64 nn_val)
@@ -930,6 +936,7 @@ static const struct ccid_dependency *dccp_feat_ccid_deps(u8 ccid, bool is_local)
* @fn: feature-negotiation list to update
* @id: CCID number to track
* @is_local: whether TX CCID (1) or RX CCID (0) is meant
+ *
* This function needs to be called after registering all other features.
*/
static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
@@ -953,6 +960,7 @@ static int dccp_feat_propagate_ccid(struct list_head *fn, u8 id, bool is_local)
/**
* dccp_feat_finalise_settings - Finalise settings before starting negotiation
* @dp: client or listening socket (settings will be inherited)
+ *
* This is called after all registrations (socket initialisation, sysctls, and
* sockopt calls), and before sending the first packet containing Change options
* (ie. client-Request or server-Response), to ensure internal consistency.
@@ -1284,6 +1292,7 @@ confirmation_failed:
* @feat: NN number, one of %dccp_feature_numbers
* @val: NN value
* @len: length of @val in bytes
+ *
* This function combines the functionality of change_recv/confirm_recv, with
* the following differences (reset codes are the same):
* - cleanup after receiving the Confirm;
@@ -1379,6 +1388,7 @@ fast_path_failed:
* @feat: one of %dccp_feature_numbers
* @val: value contents of @opt
* @len: length of @val in bytes
+ *
* Returns 0 on success, a Reset code for ending the connection otherwise.
*/
int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
diff --git a/net/dccp/input.c b/net/dccp/input.c
index bc93a333931e..14cdafad7a90 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -710,6 +710,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_state_process);
/**
* dccp_sample_rtt - Validate and finalise computation of RTT sample
* @delta: number of microseconds between packet and acknowledgment
+ *
* The routine is kept generic to work in different contexts. It should be
* called immediately when the ACK used for the RTT sample arrives.
*/
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 07f5579ca756..176ecdba4a22 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -161,17 +161,10 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
if (sk->sk_state == DCCP_LISTEN)
return;
- /* We don't check in the destentry if pmtu discovery is forbidden
- * on this route. We just assume that no packet_to_big packets
- * are send back when pmtu discovery is not active.
- * There is a small race when the user changes this flag in the
- * route, but I think that's acceptable.
- */
- if ((dst = __sk_dst_check(sk, 0)) == NULL)
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
return;
- dst->ops->update_pmtu(dst, mtu);
-
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
@@ -195,6 +188,14 @@ static inline void dccp_do_pmtu_discovery(struct sock *sk,
} /* else let the usual retransmit timer handle it */
}
+static void dccp_do_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* This routine is called by the ICMP module when it gets some sort of error
* condition. If err < 0 then the socket should be closed and the error
@@ -259,6 +260,9 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
}
switch (type) {
+ case ICMP_REDIRECT:
+ dccp_do_redirect(skb, sk);
+ goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
@@ -477,7 +481,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
struct rtable *rt;
const struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
- .flowi4_oif = skb_rtable(skb)->rt_iif,
+ .flowi4_oif = inet_iif(skb),
.daddr = iph->saddr,
.saddr = iph->daddr,
.flowi4_tos = RT_CONN_FLAGS(sk),
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index fa9512d86f3b..56840b249f3b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -130,6 +130,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
+ if (type == NDISC_REDIRECT) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
+
if (type == ICMPV6_PKT_TOOBIG) {
struct dst_entry *dst = NULL;
@@ -138,37 +145,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED))
goto out;
- /* icmp should have updated the destination cache entry */
- dst = __sk_dst_check(sk, np->dst_cookie);
- if (dst == NULL) {
- struct inet_sock *inet = inet_sk(sk);
- struct flowi6 fl6;
-
- /* BUGGG_FUTURE: Again, it is not clear how
- to handle rthdr case. Ignore this complexity
- for now.
- */
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_DCCP;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.fl6_dport = inet->inet_dport;
- fl6.fl6_sport = inet->inet_sport;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- goto out;
- }
- } else
- dst_hold(dst);
+ dst = inet6_csk_update_pmtu(sk, ntohl(info));
+ if (!dst)
+ goto out;
- if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
+ if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst))
dccp_sync_mss(sk, dst_mtu(dst));
- } /* else let the usual retransmit timer handle it */
- dst_release(dst);
goto out;
}
@@ -237,7 +219,6 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
struct inet6_request_sock *ireq6 = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
- struct ipv6_txoptions *opt = NULL;
struct in6_addr *final_p, final;
struct flowi6 fl6;
int err = -1;
@@ -253,9 +234,8 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
fl6.fl6_sport = inet_rsk(req)->loc_port;
security_req_classify_flow(req, flowi6_to_flowi(&fl6));
- opt = np->opt;
- final_p = fl6_update_dst(&fl6, opt, &final);
+ final_p = fl6_update_dst(&fl6, np->opt, &final);
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
if (IS_ERR(dst)) {
@@ -272,13 +252,11 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
&ireq6->loc_addr,
&ireq6->rmt_addr);
fl6.daddr = ireq6->rmt_addr;
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
}
done:
- if (opt != NULL && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
return err;
}
@@ -473,7 +451,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
struct inet_sock *newinet;
struct dccp6_sock *newdp6;
struct sock *newsk;
- struct ipv6_txoptions *opt;
if (skb->protocol == htons(ETH_P_IP)) {
/*
@@ -518,7 +495,6 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
return newsk;
}
- opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
@@ -530,7 +506,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_proto = IPPROTO_DCCP;
fl6.daddr = ireq6->rmt_addr;
- final_p = fl6_update_dst(&fl6, opt, &final);
+ final_p = fl6_update_dst(&fl6, np->opt, &final);
fl6.saddr = ireq6->loc_addr;
fl6.flowi6_oif = sk->sk_bound_dev_if;
fl6.fl6_dport = inet_rsk(req)->rmt_port;
@@ -595,11 +571,8 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
* Yes, keeping reference count would be much more clever, but we make
* one more one thing there: reattach optmem to newsk.
*/
- if (opt != NULL) {
- newnp->opt = ipv6_dup_options(newsk, opt);
- if (opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- }
+ if (np->opt != NULL)
+ newnp->opt = ipv6_dup_options(newsk, np->opt);
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt != NULL)
@@ -625,8 +598,6 @@ out_nonewsk:
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
- if (opt != NULL && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
return NULL;
}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 68fa6b7a3e01..a58e0b634050 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -527,6 +527,7 @@ int dccp_insert_option_mandatory(struct sk_buff *skb)
* @val: NN value or SP array (preferred element first) to copy
* @len: true length of @val in bytes (excluding first element repetition)
* @repeat_first: whether to copy the first element of @val twice
+ *
* The last argument is used to construct Confirm options, where the preferred
* value and the preference list appear separately (RFC 4340, 6.3.1). Preference
* lists are kept such that the preferred entry is always first, so we only need
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 787367308797..d17fc90a74b6 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -214,6 +214,7 @@ void dccp_write_space(struct sock *sk)
* dccp_wait_for_ccid - Await CCID send permission
* @sk: socket to wait for
* @delay: timeout in jiffies
+ *
* This is used by CCIDs which need to delay the send time in process context.
*/
static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 7eaf98799729..102d6106a942 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -505,6 +505,14 @@ static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
return 0;
}
+static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
+{
+ if (rta[RTA_TABLE - 1])
+ table = nla_get_u32((struct nlattr *) rta[RTA_TABLE - 1]);
+
+ return table;
+}
+
static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index ac90f658586c..3aede1b459fd 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -202,7 +202,7 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct dn_route *rt = (struct dn_route *)dst;
- struct neighbour *neigh = dst_get_neighbour_noref(dst);
+ struct neighbour *neigh = rt->n;
struct net_device *dev = neigh->dev;
char mac_addr[ETH_ALEN];
unsigned int seq;
@@ -240,7 +240,7 @@ static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_long_output: Increasing headroom\n");
}
@@ -283,7 +283,7 @@ static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_short_output: Increasing headroom\n");
}
@@ -322,7 +322,7 @@ static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
return -ENOBUFS;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
net_info_ratelimited("dn_phase3_output: Increasing headroom\n");
}
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index 564a6ad13ce7..8a96047c7c94 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -322,7 +322,7 @@ static __le16 *dn_mk_ack_header(struct sock *sk, struct sk_buff *skb, unsigned c
/* Set "cross subchannel" bit in ackcrs */
ackcrs |= 0x2000;
- ptr = (__le16 *)dn_mk_common_header(scp, skb, msgflag, hlen);
+ ptr = dn_mk_common_header(scp, skb, msgflag, hlen);
*ptr++ = cpu_to_le16(acknum);
*ptr++ = cpu_to_le16(ackcrs);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 586302e557ad..85a3604c87c8 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -114,10 +114,16 @@ static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
+static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
-static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr);
+static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb , u32 mtu);
+static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
static int dn_route_input(struct sk_buff *);
static void dn_run_flush(unsigned long dummy);
@@ -138,17 +144,37 @@ static struct dst_ops dn_dst_ops = {
.mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
+ .ifdown = dn_dst_ifdown,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
+ .redirect = dn_dst_redirect,
.neigh_lookup = dn_dst_neigh_lookup,
};
static void dn_dst_destroy(struct dst_entry *dst)
{
+ struct dn_route *rt = (struct dn_route *) dst;
+
+ if (rt->n)
+ neigh_release(rt->n);
dst_destroy_metrics_generic(dst);
}
+static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
+{
+ if (how) {
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
+
+ if (n && n->dev == dev) {
+ n->dev = dev_net(dev)->loopback_dev;
+ dev_hold(n->dev);
+ dev_put(dev);
+ }
+ }
+}
+
static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
{
__u16 tmp = (__u16 __force)(src ^ dst);
@@ -242,9 +268,11 @@ static int dn_dst_gc(struct dst_ops *ops)
* We update both the mtu and the advertised mss (i.e. the segment size we
* advertise to the other end).
*/
-static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
- struct neighbour *n = dst_get_neighbour_noref(dst);
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
u32 min_mtu = 230;
struct dn_dev *dn;
@@ -269,6 +297,11 @@ static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
}
}
+static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+}
+
/*
* When a route has been marked obsolete. (e.g. routing cache flush)
*/
@@ -713,7 +746,8 @@ out:
static int dn_to_neigh_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = dst_get_neighbour_noref(dst);
+ struct dn_route *rt = (struct dn_route *) dst;
+ struct neighbour *n = rt->n;
return n->output(n, skb);
}
@@ -727,7 +761,7 @@ static int dn_output(struct sk_buff *skb)
int err = -EINVAL;
- if (dst_get_neighbour_noref(dst) == NULL)
+ if (rt->n == NULL)
goto error;
skb->dev = dev;
@@ -828,7 +862,9 @@ static unsigned int dn_dst_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
}
@@ -848,11 +884,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
}
rt->rt_type = res->type;
- if (dev != NULL && dst_get_neighbour_noref(&rt->dst) == NULL) {
+ if (dev != NULL && rt->n == NULL) {
n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
if (IS_ERR(n))
return PTR_ERR(n);
- dst_set_neighbour(&rt->dst, n);
+ rt->n = n;
}
if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1140,7 +1176,7 @@ make_route:
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
@@ -1159,7 +1195,7 @@ make_route:
rt->rt_dst_map = fld.daddr;
rt->rt_src_map = fld.saddr;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
neigh = NULL;
rt->dst.lastuse = jiffies;
@@ -1388,7 +1424,6 @@ static int dn_route_input_slow(struct sk_buff *skb)
/* Packet was intra-ethernet, so we know its on-link */
if (cb->rt_flags & DN_RT_F_IE) {
gateway = cb->src;
- flags |= RTCF_DIRECTSRC;
goto make_route;
}
@@ -1401,14 +1436,13 @@ static int dn_route_input_slow(struct sk_buff *skb)
/* Close eyes and pray */
gateway = cb->src;
- flags |= RTCF_DIRECTSRC;
goto make_route;
default:
goto e_inval;
}
make_route:
- rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, out_dev, 0, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
@@ -1429,7 +1463,7 @@ make_route:
rt->fld.flowidn_iif = in_dev->ifindex;
rt->fld.flowidn_mark = fld.flowidn_mark;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
rt->dst.lastuse = jiffies;
rt->dst.output = dn_rt_bug;
switch (res.type) {
@@ -1515,54 +1549,68 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
struct dn_route *rt = (struct dn_route *)skb_dst(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
- unsigned char *b = skb_tail_pointer(skb);
long expires;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
- r = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ r = nlmsg_data(nlh);
r->rtm_family = AF_DECnet;
r->rtm_dst_len = 16;
r->rtm_src_len = 0;
r->rtm_tos = 0;
r->rtm_table = RT_TABLE_MAIN;
- RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
r->rtm_type = rt->rt_type;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
+
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
+
+ if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
+ nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
+ goto errout;
+
if (rt->fld.saddr) {
r->rtm_src_len = 16;
- RTA_PUT(skb, RTA_SRC, 2, &rt->fld.saddr);
+ if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
+ goto errout;
}
- if (rt->dst.dev)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
+ if (rt->dst.dev &&
+ nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
+ goto errout;
+
/*
* Note to self - change this if input routes reverse direction when
* they deal only with inputs and not with replies like they do
* currently.
*/
- RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
- if (rt->rt_daddr != rt->rt_gateway)
- RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
+ if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
+ goto errout;
+
+ if (rt->rt_daddr != rt->rt_gateway &&
+ nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
+ goto errout;
+
if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
- goto rtattr_failure;
+ goto errout;
+
expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
rt->dst.error) < 0)
- goto rtattr_failure;
- if (dn_is_input_route(rt))
- RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fld.flowidn_iif);
+ goto errout;
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ if (dn_is_input_route(rt) &&
+ nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
+ goto errout;
-nlmsg_failure:
-rtattr_failure:
- nlmsg_trim(skb, b);
- return -1;
+ return nlmsg_end(skb, nlh);
+
+errout:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
/*
@@ -1572,7 +1620,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
{
struct net *net = sock_net(in_skb->sk);
struct rtattr **rta = arg;
- struct rtmsg *rtm = NLMSG_DATA(nlh);
+ struct rtmsg *rtm = nlmsg_data(nlh);
struct dn_route *rt = NULL;
struct dn_skb_cb *cb;
int err;
@@ -1585,7 +1633,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
memset(&fld, 0, sizeof(fld));
fld.flowidn_proto = DNPROTO_NSP;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb == NULL)
return -ENOBUFS;
skb_reset_mac_header(skb);
@@ -1663,13 +1711,16 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct dn_route *rt;
int h, s_h;
int idx, s_idx;
+ struct rtmsg *rtm;
if (!net_eq(net, &init_net))
return 0;
- if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
+ if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
return -EINVAL;
- if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))
+
+ rtm = nlmsg_data(cb->nlh);
+ if (!(rtm->rtm_flags & RTM_F_CLONED))
return 0;
s_h = cb->args[0];
@@ -1769,12 +1820,11 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
- rt->dst.dev ? rt->dst.dev->name : "*",
- dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
- dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
- atomic_read(&rt->dst.__refcnt),
- rt->dst.__use,
- (int) dst_metric(&rt->dst, RTAX_RTT));
+ rt->dst.dev ? rt->dst.dev->name : "*",
+ dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
+ dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
+ atomic_read(&rt->dst.__refcnt),
+ rt->dst.__use, 0);
return 0;
}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 650f3380c98a..16c986ab1228 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -297,61 +297,75 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
{
struct rtmsg *rtm;
struct nlmsghdr *nlh;
- unsigned char *b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
- rtm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_DECnet;
rtm->rtm_dst_len = dst_len;
rtm->rtm_src_len = 0;
rtm->rtm_tos = 0;
rtm->rtm_table = tb_id;
- RTA_PUT_U32(skb, RTA_TABLE, tb_id);
rtm->rtm_flags = fi->fib_flags;
rtm->rtm_scope = scope;
rtm->rtm_type = type;
- if (rtm->rtm_dst_len)
- RTA_PUT(skb, RTA_DST, 2, dst);
rtm->rtm_protocol = fi->fib_protocol;
- if (fi->fib_priority)
- RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
+
+ if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
+ goto errout;
+
+ if (rtm->rtm_dst_len &&
+ nla_put(skb, RTA_DST, 2, dst) < 0)
+ goto errout;
+
+ if (fi->fib_priority &&
+ nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
+ goto errout;
+
if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
- goto rtattr_failure;
+ goto errout;
+
if (fi->fib_nhs == 1) {
- if (fi->fib_nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw);
- if (fi->fib_nh->nh_oif)
- RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
+ if (fi->fib_nh->nh_gw &&
+ nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
+ goto errout;
+
+ if (fi->fib_nh->nh_oif &&
+ nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
+ goto errout;
}
+
if (fi->fib_nhs > 1) {
struct rtnexthop *nhp;
- struct rtattr *mp_head;
- if (skb_tailroom(skb) <= RTA_SPACE(0))
- goto rtattr_failure;
- mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0));
+ struct nlattr *mp_head;
+
+ if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
+ goto errout;
for_nexthops(fi) {
- if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
- goto rtattr_failure;
- nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
+ goto errout;
+
nhp->rtnh_flags = nh->nh_flags & 0xFF;
nhp->rtnh_hops = nh->nh_weight - 1;
nhp->rtnh_ifindex = nh->nh_oif;
- if (nh->nh_gw)
- RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw);
+
+ if (nh->nh_gw &&
+ nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
+ goto errout;
+
nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
} endfor_nexthops(fi);
- mp_head->rta_type = RTA_MULTIPATH;
- mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
- }
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ nla_nest_end(skb, mp_head);
+ }
+ return nlmsg_end(skb, nlh);
-nlmsg_failure:
-rtattr_failure:
- nlmsg_trim(skb, b);
+errout:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
@@ -476,7 +490,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
return 0;
if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) &&
- ((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)
+ ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
return dn_cache_dump(skb, cb);
s_h = cb->args[0];
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 44b890936fc0..11db0ecf342f 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -42,23 +42,23 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
size = NLMSG_SPACE(rt_skb->len);
size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
skb = alloc_skb(size, GFP_ATOMIC);
- if (!skb)
- goto nlmsg_failure;
+ if (!skb) {
+ *errp = -ENOMEM;
+ return NULL;
+ }
old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, 0, size - sizeof(*nlh));
+ nlh = nlmsg_put(skb, 0, 0, 0, size - sizeof(*nlh), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ *errp = -ENOMEM;
+ return NULL;
+ }
rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh);
rtm->nfdn_ifindex = rt_skb->dev->ifindex;
ptr = NFDN_RTMSG(rtm);
skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
nlh->nlmsg_len = skb->tail - old_tail;
return skb;
-
-nlmsg_failure:
- if (skb)
- kfree_skb(skb);
- *errp = -ENOMEM;
- net_err_ratelimited("dn_rtmsg: error creating netlink message\n");
- return NULL;
}
static void dnrmg_send_peer(struct sk_buff *skb)
@@ -117,7 +117,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
static struct nf_hook_ops dnrmg_ops __read_mostly = {
.hook = dnrmg_hook,
- .pf = PF_DECnet,
+ .pf = NFPROTO_DECNET,
.hooknum = NF_DN_ROUTE,
.priority = NF_DN_PRI_DNRTMSG,
};
@@ -125,11 +125,13 @@ static struct nf_hook_ops dnrmg_ops __read_mostly = {
static int __init dn_rtmsg_init(void)
{
int rv = 0;
+ struct netlink_kernel_cfg cfg = {
+ .groups = DNRNG_NLGRP_MAX,
+ .input = dnrmg_receive_user_skb,
+ };
dnrmg = netlink_kernel_create(&init_net,
- NETLINK_DNRTMSG, DNRNG_NLGRP_MAX,
- dnrmg_receive_user_skb,
- NULL, THIS_MODULE);
+ NETLINK_DNRTMSG, THIS_MODULE, &cfg);
if (dnrmg == NULL) {
printk(KERN_ERR "dn_rtmsg: Cannot create netlink socket");
return -ENOMEM;
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 7cef1d8ace27..323177505404 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,5 +3,3 @@
#
obj-y += eth.o
-obj-$(subst m,y,$(CONFIG_IPX)) += pe2.o
-obj-$(subst m,y,$(CONFIG_ATALK)) += pe2.o
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 36e58800a9e3..4efad533e5f6 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -232,6 +232,7 @@ EXPORT_SYMBOL(eth_header_parse);
* @neigh: source neighbour
* @hh: destination cache entry
* @type: Ethernet type field
+ *
* Create an Ethernet header template from the neighbour.
*/
int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
@@ -274,6 +275,7 @@ EXPORT_SYMBOL(eth_header_cache_update);
* eth_mac_addr - set new Ethernet hardware address
* @dev: network device
* @p: socket address
+ *
* Change hardware address of device.
*
* This doesn't change hardware matching, so needs to be overridden
@@ -283,7 +285,7 @@ int eth_mac_addr(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- if (netif_running(dev))
+ if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -331,6 +333,7 @@ const struct header_ops eth_header_ops ____cacheline_aligned = {
/**
* ether_setup - setup Ethernet network device
* @dev: network device
+ *
* Fill in the fields of the device structure with Ethernet-generic values.
*/
void ether_setup(struct net_device *dev)
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 32eb4179e8fa..6a095225148e 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -55,7 +55,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
#include <net/af_ieee802154.h>
#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
@@ -114,7 +113,6 @@ struct lowpan_dev_record {
struct lowpan_fragment {
struct sk_buff *skb; /* skb to be assembled */
- spinlock_t lock; /* concurency lock */
u16 length; /* length to be assemled */
u32 bytes_rcv; /* bytes received */
u16 tag; /* current fragment tag */
@@ -124,7 +122,7 @@ struct lowpan_fragment {
static unsigned short fragment_tag;
static LIST_HEAD(lowpan_fragments);
-spinlock_t flist_lock;
+static DEFINE_SPINLOCK(flist_lock);
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
@@ -240,8 +238,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
}
- pr_debug("(%s): uncompressing %d + %d => ", __func__, prefcount,
- postcount);
+ pr_debug("uncompressing %d + %d => ", prefcount, postcount);
lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
return 0;
@@ -252,13 +249,11 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
{
struct udphdr *uh = udp_hdr(skb);
- pr_debug("(%s): UDP header compression\n", __func__);
-
if (((uh->source & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT) &&
((uh->dest & LOWPAN_NHC_UDP_4BIT_MASK) ==
LOWPAN_NHC_UDP_4BIT_PORT)) {
- pr_debug("(%s): both ports compression to 4 bits\n", __func__);
+ pr_debug("UDP header: both ports compression to 4 bits\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_11;
**(hc06_ptr + 1) = /* subtraction is faster */
(u8)((uh->dest - LOWPAN_NHC_UDP_4BIT_PORT) +
@@ -266,20 +261,20 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
*hc06_ptr += 2;
} else if ((uh->dest & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("(%s): remove 8 bits of dest\n", __func__);
+ pr_debug("UDP header: remove 8 bits of dest\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_01;
memcpy(*hc06_ptr + 1, &uh->source, 2);
**(hc06_ptr + 3) = (u8)(uh->dest - LOWPAN_NHC_UDP_8BIT_PORT);
*hc06_ptr += 4;
} else if ((uh->source & LOWPAN_NHC_UDP_8BIT_MASK) ==
LOWPAN_NHC_UDP_8BIT_PORT) {
- pr_debug("(%s): remove 8 bits of source\n", __func__);
+ pr_debug("UDP header: remove 8 bits of source\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_10;
memcpy(*hc06_ptr + 1, &uh->dest, 2);
**(hc06_ptr + 3) = (u8)(uh->source - LOWPAN_NHC_UDP_8BIT_PORT);
*hc06_ptr += 4;
} else {
- pr_debug("(%s): can't compress header\n", __func__);
+ pr_debug("UDP header: can't compress\n");
**hc06_ptr = LOWPAN_NHC_UDP_CS_P_00;
memcpy(*hc06_ptr + 1, &uh->source, 2);
memcpy(*hc06_ptr + 3, &uh->dest, 2);
@@ -291,25 +286,26 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
*hc06_ptr += 2;
}
-static u8 lowpan_fetch_skb_u8(struct sk_buff *skb)
+static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
{
- u8 ret;
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
- ret = skb->data[0];
+ *val = skb->data[0];
skb_pull(skb, 1);
- return ret;
+ return 0;
}
-static u16 lowpan_fetch_skb_u16(struct sk_buff *skb)
+static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
{
- u16 ret;
-
- BUG_ON(!pskb_may_pull(skb, 2));
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
- ret = skb->data[0] | (skb->data[1] << 8);
+ *val = (skb->data[0] << 8) | skb->data[1];
skb_pull(skb, 2);
- return ret;
+
+ return 0;
}
static int
@@ -318,10 +314,14 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
struct udphdr *uh = udp_hdr(skb);
u8 tmp;
- tmp = lowpan_fetch_skb_u8(skb);
+ if (!uh)
+ goto err;
+
+ if (lowpan_fetch_skb_u8(skb, &tmp))
+ goto err;
if ((tmp & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
- pr_debug("(%s): UDP header uncompression\n", __func__);
+ pr_debug("UDP header uncompression\n");
switch (tmp & LOWPAN_NHC_UDP_CS_P_11) {
case LOWPAN_NHC_UDP_CS_P_00:
memcpy(&uh->source, &skb->data[0], 2);
@@ -347,19 +347,19 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
skb_pull(skb, 1);
break;
default:
- pr_debug("(%s) ERROR: unknown UDP format\n", __func__);
+ pr_debug("ERROR: unknown UDP format\n");
goto err;
break;
}
- pr_debug("(%s): uncompressed UDP ports: src = %d, dst = %d\n",
- __func__, uh->source, uh->dest);
+ pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
+ uh->source, uh->dest);
/* copy checksum */
memcpy(&uh->check, &skb->data[0], 2);
skb_pull(skb, 2);
} else {
- pr_debug("(%s): ERROR: unsupported NH format\n", __func__);
+ pr_debug("ERROR: unsupported NH format\n");
goto err;
}
@@ -392,10 +392,9 @@ static int lowpan_header_create(struct sk_buff *skb,
hdr = ipv6_hdr(skb);
hc06_ptr = head + 2;
- pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
- "\tnexthdr = 0x%02x\n\thop_lim = %d\n", __func__,
- hdr->version, ntohs(hdr->payload_len), hdr->nexthdr,
- hdr->hop_limit);
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n"
+ "\tnexthdr = 0x%02x\n\thop_lim = %d\n", hdr->version,
+ ntohs(hdr->payload_len), hdr->nexthdr, hdr->hop_limit);
lowpan_raw_dump_table(__func__, "raw skb network header dump",
skb_network_header(skb), sizeof(struct ipv6hdr));
@@ -490,28 +489,28 @@ static int lowpan_header_create(struct sk_buff *skb,
break;
default:
*hc06_ptr = hdr->hop_limit;
+ hc06_ptr += 1;
break;
}
/* source address compression */
if (is_addr_unspecified(&hdr->saddr)) {
- pr_debug("(%s): source address is unspecified, setting SAC\n",
- __func__);
+ pr_debug("source address is unspecified, setting SAC\n");
iphc1 |= LOWPAN_IPHC_SAC;
/* TODO: context lookup */
} else if (is_addr_link_local(&hdr->saddr)) {
- pr_debug("(%s): source address is link-local\n", __func__);
+ pr_debug("source address is link-local\n");
iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
LOWPAN_IPHC_SAM_BIT, &hdr->saddr, saddr);
} else {
- pr_debug("(%s): send the full source address\n", __func__);
+ pr_debug("send the full source address\n");
memcpy(hc06_ptr, &hdr->saddr.s6_addr16[0], 16);
hc06_ptr += 16;
}
/* destination address compression */
if (is_addr_mcast(&hdr->daddr)) {
- pr_debug("(%s): destination address is multicast", __func__);
+ pr_debug("destination address is multicast: ");
iphc1 |= LOWPAN_IPHC_M;
if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
pr_debug("compressed to 1 octet\n");
@@ -540,14 +539,13 @@ static int lowpan_header_create(struct sk_buff *skb,
hc06_ptr += 16;
}
} else {
- pr_debug("(%s): destination address is unicast: ", __func__);
/* TODO: context lookup */
if (is_addr_link_local(&hdr->daddr)) {
- pr_debug("destination address is link-local\n");
+ pr_debug("dest address is unicast and link-local\n");
iphc1 |= lowpan_compress_addr_64(&hc06_ptr,
LOWPAN_IPHC_DAM_BIT, &hdr->daddr, daddr);
} else {
- pr_debug("using full address\n");
+ pr_debug("dest address is unicast: using full one\n");
memcpy(hc06_ptr, &hdr->daddr.s6_addr16[0], 16);
hc06_ptr += 16;
}
@@ -639,19 +637,15 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
{
struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
- pr_debug("%s: timer expired for frame with tag %d\n", __func__,
- entry->tag);
+ pr_debug("timer expired for frame with tag %d\n", entry->tag);
- spin_lock(&flist_lock);
list_del(&entry->list);
- spin_unlock(&flist_lock);
-
dev_kfree_skb(entry->skb);
kfree(entry);
}
static struct lowpan_fragment *
-lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
+lowpan_alloc_new_frame(struct sk_buff *skb, u8 len, u16 tag)
{
struct lowpan_fragment *frame;
@@ -662,12 +656,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
INIT_LIST_HEAD(&frame->list);
- frame->length = (iphc0 & 7) | (len << 3);
+ frame->length = len;
frame->tag = tag;
/* allocate buffer for frame assembling */
- frame->skb = alloc_skb(frame->length +
- sizeof(struct ipv6hdr), GFP_ATOMIC);
+ frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
+ sizeof(struct ipv6hdr));
if (!frame->skb)
goto skb_err;
@@ -710,7 +704,9 @@ lowpan_process_data(struct sk_buff *skb)
/* at least two bytes will be used for the encoding */
if (skb->len < 2)
goto drop;
- iphc0 = lowpan_fetch_skb_u8(skb);
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
/* fragments assembling */
switch (iphc0 & LOWPAN_DISPATCH_MASK) {
@@ -718,18 +714,23 @@ lowpan_process_data(struct sk_buff *skb)
case LOWPAN_DISPATCH_FRAGN:
{
struct lowpan_fragment *frame;
- u8 len, offset;
- u16 tag;
+ /* slen stores the rightmost 8 bits of the 11 bits length */
+ u8 slen, offset;
+ u16 len, tag;
bool found = false;
- len = lowpan_fetch_skb_u8(skb); /* frame length */
- tag = lowpan_fetch_skb_u16(skb);
+ if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
+ lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
+ goto drop;
+
+ /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
+ len = ((iphc0 & 7) << 8) | slen;
/*
* check if frame assembling with the same tag is
* already in progress
*/
- spin_lock(&flist_lock);
+ spin_lock_bh(&flist_lock);
list_for_each_entry(frame, &lowpan_fragments, list)
if (frame->tag == tag) {
@@ -739,7 +740,7 @@ lowpan_process_data(struct sk_buff *skb)
/* alloc new frame structure */
if (!found) {
- frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
+ frame = lowpan_alloc_new_frame(skb, len, tag);
if (!frame)
goto unlock_and_drop;
}
@@ -747,7 +748,8 @@ lowpan_process_data(struct sk_buff *skb)
if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
goto unlock_and_drop;
- offset = lowpan_fetch_skb_u8(skb); /* fetch offset */
+ if (lowpan_fetch_skb_u8(skb, &offset)) /* fetch offset */
+ goto unlock_and_drop;
/* if payload fits buffer, copy it */
if (likely((offset * 8 + skb->len) <= frame->length))
@@ -762,17 +764,20 @@ lowpan_process_data(struct sk_buff *skb)
if ((frame->bytes_rcv == frame->length) &&
frame->timer.expires > jiffies) {
/* if timer haven't expired - first of all delete it */
- del_timer(&frame->timer);
+ del_timer_sync(&frame->timer);
list_del(&frame->list);
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
dev_kfree_skb(skb);
skb = frame->skb;
kfree(frame);
- iphc0 = lowpan_fetch_skb_u8(skb);
+
+ if (lowpan_fetch_skb_u8(skb, &iphc0))
+ goto drop;
+
break;
}
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
return kfree_skb(skb), 0;
}
@@ -780,20 +785,19 @@ lowpan_process_data(struct sk_buff *skb)
break;
}
- iphc1 = lowpan_fetch_skb_u8(skb);
+ if (lowpan_fetch_skb_u8(skb, &iphc1))
+ goto drop;
_saddr = mac_cb(skb)->sa.hwaddr;
_daddr = mac_cb(skb)->da.hwaddr;
- pr_debug("(%s): iphc0 = %02x, iphc1 = %02x\n", __func__, iphc0, iphc1);
+ pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
/* another if the CID flag is set */
if (iphc1 & LOWPAN_IPHC_CID) {
- pr_debug("(%s): CID flag is set, increase header with one\n",
- __func__);
- if (!skb->len)
+ pr_debug("CID flag is set, increase header with one\n");
+ if (lowpan_fetch_skb_u8(skb, &num_context))
goto drop;
- num_context = lowpan_fetch_skb_u8(skb);
}
hdr.version = 6;
@@ -805,9 +809,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
*/
case 0: /* 00b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
skb_pull(skb, 3);
hdr.priority = ((tmp >> 2) & 0x0f);
@@ -819,9 +823,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + DSCP (1 byte), Flow Label is elided
*/
case 1: /* 10b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
hdr.flow_lbl[1] = 0;
@@ -832,9 +836,9 @@ lowpan_process_data(struct sk_buff *skb)
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
*/
case 2: /* 01b */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &tmp))
goto drop;
- tmp = lowpan_fetch_skb_u8(skb);
+
hdr.flow_lbl[0] = (skb->data[0] & 0x0F) | ((tmp >> 2) & 0x30);
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
skb_pull(skb, 2);
@@ -853,27 +857,26 @@ lowpan_process_data(struct sk_buff *skb)
/* Next Header */
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
/* Next header is carried inline */
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &(hdr.nexthdr)))
goto drop;
- hdr.nexthdr = lowpan_fetch_skb_u8(skb);
- pr_debug("(%s): NH flag is set, next header is carried "
- "inline: %02x\n", __func__, hdr.nexthdr);
+
+ pr_debug("NH flag is set, next header carried inline: %02x\n",
+ hdr.nexthdr);
}
/* Hop Limit */
if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I)
hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
else {
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &(hdr.hop_limit)))
goto drop;
- hdr.hop_limit = lowpan_fetch_skb_u8(skb);
}
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
/* Source address uncompression */
- pr_debug("(%s): source address stateless compression\n", __func__);
+ pr_debug("source address stateless compression\n");
err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
lowpan_unc_llconf[tmp], skb->data);
if (err)
@@ -885,19 +888,15 @@ lowpan_process_data(struct sk_buff *skb)
/* check for Multicast Compression */
if (iphc1 & LOWPAN_IPHC_M) {
if (iphc1 & LOWPAN_IPHC_DAC) {
- pr_debug("(%s): destination address context-based "
- "multicast compression\n", __func__);
+ pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
u8 prefix[] = {0xff, 0x02};
- pr_debug("(%s): destination address non-context-based"
- " multicast compression\n", __func__);
+ pr_debug("dest: non context-based mcast compression\n");
if (0 < tmp && tmp < 3) {
- if (!skb->len)
+ if (lowpan_fetch_skb_u8(skb, &prefix[1]))
goto drop;
- else
- prefix[1] = lowpan_fetch_skb_u8(skb);
}
err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
@@ -906,8 +905,7 @@ lowpan_process_data(struct sk_buff *skb)
goto drop;
}
} else {
- pr_debug("(%s): destination address stateless compression\n",
- __func__);
+ pr_debug("dest: stateless compression\n");
err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
lowpan_unc_llconf[tmp], skb->data);
if (err)
@@ -922,11 +920,11 @@ lowpan_process_data(struct sk_buff *skb)
/* Not fragmented package */
hdr.payload_len = htons(skb->len);
- pr_debug("(%s): skb headroom size = %d, data length = %d\n", __func__,
- skb_headroom(skb), skb->len);
+ pr_debug("skb headroom size = %d, data length = %d\n",
+ skb_headroom(skb), skb->len);
- pr_debug("(%s): IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
- "nexthdr = 0x%02x\n\thop_lim = %d\n", __func__, hdr.version,
+ pr_debug("IPv6 header dump:\n\tversion = %d\n\tlength = %d\n\t"
+ "nexthdr = 0x%02x\n\thop_lim = %d\n", hdr.version,
ntohs(hdr.payload_len), hdr.nexthdr, hdr.hop_limit);
lowpan_raw_dump_table(__func__, "raw header dump", (u8 *)&hdr,
@@ -934,12 +932,25 @@ lowpan_process_data(struct sk_buff *skb)
return lowpan_skb_deliver(skb, &hdr);
unlock_and_drop:
- spin_unlock(&flist_lock);
+ spin_unlock_bh(&flist_lock);
drop:
kfree_skb(skb);
return -EINVAL;
}
+static int lowpan_set_address(struct net_device *dev, void *p)
+{
+ struct sockaddr *sa = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* TODO: validate addr */
+ memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+
+ return 0;
+}
+
static int lowpan_get_mac_header_length(struct sk_buff *skb)
{
/*
@@ -997,10 +1008,10 @@ lowpan_skb_fragmentation(struct sk_buff *skb)
tag = fragment_tag++;
/* first fragment header */
- head[0] = LOWPAN_DISPATCH_FRAG1 | (payload_length & 0x7);
- head[1] = (payload_length >> 3) & 0xff;
- head[2] = tag & 0xff;
- head[3] = tag >> 8;
+ head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
+ head[1] = payload_length & 0xff;
+ head[2] = tag >> 8;
+ head[3] = tag & 0xff;
err = lowpan_fragment_xmit(skb, head, header_length, 0, 0);
@@ -1028,11 +1039,11 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
{
int err = -1;
- pr_debug("(%s): package xmit\n", __func__);
+ pr_debug("package xmit\n");
skb->dev = lowpan_dev_info(dev)->real_dev;
if (skb->dev == NULL) {
- pr_debug("(%s) ERROR: no real wpan device found\n", __func__);
+ pr_debug("ERROR: no real wpan device found\n");
goto error;
}
@@ -1041,14 +1052,13 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- pr_debug("(%s): frame is too big, fragmentation is needed\n",
- __func__);
+ pr_debug("frame is too big, fragmentation is needed\n");
err = lowpan_skb_fragmentation(skb);
error:
dev_kfree_skb(skb);
out:
if (err < 0)
- pr_debug("(%s): ERROR: xmit failed\n", __func__);
+ pr_debug("ERROR: xmit failed\n");
return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK);
}
@@ -1083,7 +1093,7 @@ static struct header_ops lowpan_header_ops = {
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_start_xmit = lowpan_xmit,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = lowpan_set_address,
};
static struct ieee802154_mlme_ops lowpan_mlme = {
@@ -1094,8 +1104,6 @@ static struct ieee802154_mlme_ops lowpan_mlme = {
static void lowpan_setup(struct net_device *dev)
{
- pr_debug("(%s)\n", __func__);
-
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
dev->type = ARPHRD_IEEE802154;
@@ -1115,8 +1123,6 @@ static void lowpan_setup(struct net_device *dev)
static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
{
- pr_debug("(%s)\n", __func__);
-
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != IEEE802154_ADDR_LEN)
return -EINVAL;
@@ -1157,7 +1163,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
struct net_device *real_dev;
struct lowpan_dev_record *entry;
- pr_debug("(%s)\n", __func__);
+ pr_debug("adding new link\n");
if (!tb[IFLA_LINK])
return -EINVAL;
@@ -1183,8 +1189,6 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
list_add_tail(&entry->list, &lowpan_devices);
mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx);
- spin_lock_init(&flist_lock);
-
register_netdevice(dev);
return 0;
@@ -1195,19 +1199,9 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head)
struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev);
struct net_device *real_dev = lowpan_dev->real_dev;
struct lowpan_dev_record *entry, *tmp;
- struct lowpan_fragment *frame, *tframe;
ASSERT_RTNL();
- spin_lock(&flist_lock);
- list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
- del_timer(&frame->timer);
- list_del(&frame->list);
- dev_kfree_skb(frame->skb);
- kfree(frame);
- }
- spin_unlock(&flist_lock);
-
mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx);
list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) {
if (entry->ldev == dev) {
@@ -1252,8 +1246,6 @@ static int __init lowpan_init_module(void)
{
int err = 0;
- pr_debug("(%s)\n", __func__);
-
err = lowpan_netlink_init();
if (err < 0)
goto out;
@@ -1265,11 +1257,24 @@ out:
static void __exit lowpan_cleanup_module(void)
{
- pr_debug("(%s)\n", __func__);
+ struct lowpan_fragment *frame, *tframe;
lowpan_netlink_fini();
dev_remove_pack(&lowpan_packet_type);
+
+ /* Now 6lowpan packet_type is removed, so no new fragments are
+ * expected on RX, therefore that's the time to clean incomplete
+ * fragments.
+ */
+ spin_lock_bh(&flist_lock);
+ list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
+ del_timer_sync(&frame->timer);
+ list_del(&frame->list);
+ dev_kfree_skb(frame->skb);
+ kfree(frame);
+ }
+ spin_unlock_bh(&flist_lock);
}
module_init(lowpan_init_module);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 6fbb2ad7bb6d..16705611589a 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -230,6 +230,12 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
mtu = dev->mtu;
pr_debug("name = %s, mtu = %u\n", dev->name, mtu);
+ if (size > mtu) {
+ pr_debug("size = %Zu, mtu = %u\n", size, mtu);
+ err = -EINVAL;
+ goto out_dev;
+ }
+
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = sock_alloc_send_skb(sk, hlen + tlen + size,
@@ -258,12 +264,6 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_skb;
- if (size > mtu) {
- pr_debug("size = %Zu, mtu = %u\n", size, mtu);
- err = -EINVAL;
- goto out_skb;
- }
-
skb->dev = dev;
skb->sk = sk;
skb->protocol = htons(ETH_P_IEEE802154);
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index c8097ae2482f..97351e1d07a4 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -44,7 +44,7 @@ struct genl_family nl802154_family = {
struct sk_buff *ieee802154_nl_create(int flags, u8 req)
{
void *hdr;
- struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
unsigned long f;
if (!msg)
@@ -80,7 +80,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
int flags, u8 req)
{
void *hdr;
- struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ struct sk_buff *msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return NULL;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index ca92587720f4..1e9917124e75 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -530,7 +530,7 @@ static int ieee802154_list_iface(struct sk_buff *skb,
if (!dev)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index eed291626da6..d54be34cca94 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -101,7 +101,7 @@ static int ieee802154_list_phy(struct sk_buff *skb,
if (!phy)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
goto out_dev;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 20f1cb5c8aba..5a19aeb86094 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -310,6 +310,17 @@ config SYN_COOKIES
If unsure, say N.
+config NET_IPVTI
+ tristate "Virtual (secure) IP: tunneling"
+ select INET_TUNNEL
+ depends on INET_XFRM_MODE_TUNNEL
+ ---help---
+ Tunneling means encapsulating data of one protocol type within
+ another protocol and sending it over a channel that understands the
+ encapsulating protocol. This can be used with xfrm mode tunnel to give
+ the notion of a secure tunnel for IPSEC and then use routing protocol
+ on top.
+
config INET_AH
tristate "IP: AH transformation"
select XFRM_ALGO
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index ff75d3bbcd6a..ae2ccf2890e4 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -7,7 +7,7 @@ obj-y := route.o inetpeer.o protocol.o \
ip_output.o ip_sockglue.o inet_hashtables.o \
inet_timewait_sock.o inet_connection_sock.o \
tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \
- tcp_minisocks.o tcp_cong.o \
+ tcp_minisocks.o tcp_cong.o tcp_metrics.o tcp_fastopen.o \
datagram.o raw.o udp.o udplite.o \
arp.o icmp.o devinet.o af_inet.o igmp.o \
fib_frontend.o fib_semantics.o fib_trie.o \
@@ -20,6 +20,7 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
+obj-$(CONFIG_NET_IPVTI) += ip_vti.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o
obj-$(CONFIG_INET_ESP) += esp4.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c8f7aee587d1..fe4582ca969a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -157,6 +157,7 @@ void inet_sock_destruct(struct sock *sk)
kfree(rcu_dereference_protected(inet->inet_opt, 1));
dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
+ dst_release(sk->sk_rx_dst);
sk_refcnt_debug_dec(sk);
}
EXPORT_SYMBOL(inet_sock_destruct);
@@ -242,20 +243,18 @@ void build_ehash_secret(void)
}
EXPORT_SYMBOL(build_ehash_secret);
-static inline int inet_netns_ok(struct net *net, int protocol)
+static inline int inet_netns_ok(struct net *net, __u8 protocol)
{
- int hash;
const struct net_protocol *ipprot;
if (net_eq(net, &init_net))
return 1;
- hash = protocol & (MAX_INET_PROTOS - 1);
- ipprot = rcu_dereference(inet_protos[hash]);
-
- if (ipprot == NULL)
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot == NULL) {
/* raw IP is OK */
return 1;
+ }
return ipprot->netns_ok;
}
@@ -553,15 +552,16 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
if (!inet_sk(sk)->inet_num && inet_autobind(sk))
return -EAGAIN;
- return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
+ return sk->sk_prot->connect(sk, uaddr, addr_len);
}
EXPORT_SYMBOL(inet_dgram_connect);
-static long inet_wait_for_connect(struct sock *sk, long timeo)
+static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
{
DEFINE_WAIT(wait);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ sk->sk_write_pending += writebias;
/* Basic assumption: if someone sets sk->sk_err, he _must_
* change state of the socket from TCP_SYN_*.
@@ -577,6 +577,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
}
finish_wait(sk_sleep(sk), &wait);
+ sk->sk_write_pending -= writebias;
return timeo;
}
@@ -584,8 +585,8 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
-int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
+int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
{
struct sock *sk = sock->sk;
int err;
@@ -594,8 +595,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
if (addr_len < sizeof(uaddr->sa_family))
return -EINVAL;
- lock_sock(sk);
-
if (uaddr->sa_family == AF_UNSPEC) {
err = sk->sk_prot->disconnect(sk, flags);
sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
@@ -635,8 +634,12 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+ int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
+ tcp_sk(sk)->fastopen_req &&
+ tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+
/* Error code is set above */
- if (!timeo || !inet_wait_for_connect(sk, timeo))
+ if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
goto out;
err = sock_intr_errno(timeo);
@@ -658,7 +661,6 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
err = 0;
out:
- release_sock(sk);
return err;
sock_error:
@@ -668,6 +670,18 @@ sock_error:
sock->state = SS_DISCONNECTING;
goto out;
}
+EXPORT_SYMBOL(__inet_stream_connect);
+
+int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ int err;
+
+ lock_sock(sock->sk);
+ err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+ release_sock(sock->sk);
+ return err;
+}
EXPORT_SYMBOL(inet_stream_connect);
/*
@@ -1216,8 +1230,8 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
- const struct iphdr *iph;
const struct net_protocol *ops;
+ const struct iphdr *iph;
int proto;
int ihl;
int err = -EINVAL;
@@ -1236,7 +1250,7 @@ static int inet_gso_send_check(struct sk_buff *skb)
__skb_pull(skb, ihl);
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
err = -EPROTONOSUPPORT;
rcu_read_lock();
@@ -1253,8 +1267,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
- struct iphdr *iph;
const struct net_protocol *ops;
+ struct iphdr *iph;
int proto;
int ihl;
int id;
@@ -1286,7 +1300,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
skb_reset_transport_header(skb);
iph = ip_hdr(skb);
id = ntohs(iph->id);
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
segs = ERR_PTR(-EPROTONOSUPPORT);
rcu_read_lock();
@@ -1340,7 +1354,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
goto out;
}
- proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ proto = iph->protocol;
rcu_read_lock();
ops = rcu_dereference(inet_protos[proto]);
@@ -1398,11 +1412,11 @@ out:
static int inet_gro_complete(struct sk_buff *skb)
{
- const struct net_protocol *ops;
+ __be16 newlen = htons(skb->len - skb_network_offset(skb));
struct iphdr *iph = ip_hdr(skb);
- int proto = iph->protocol & (MAX_INET_PROTOS - 1);
+ const struct net_protocol *ops;
+ int proto = iph->protocol;
int err = -ENOSYS;
- __be16 newlen = htons(skb->len - skb_network_offset(skb));
csum_replace2(&iph->check, iph->tot_len, newlen);
iph->tot_len = newlen;
@@ -1520,14 +1534,15 @@ static const struct net_protocol igmp_protocol = {
#endif
static const struct net_protocol tcp_protocol = {
- .handler = tcp_v4_rcv,
- .err_handler = tcp_v4_err,
- .gso_send_check = tcp_v4_gso_send_check,
- .gso_segment = tcp_tso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- .no_policy = 1,
- .netns_ok = 1,
+ .early_demux = tcp_v4_early_demux,
+ .handler = tcp_v4_rcv,
+ .err_handler = tcp_v4_err,
+ .gso_send_check = tcp_v4_gso_send_check,
+ .gso_segment = tcp_tso_segment,
+ .gro_receive = tcp4_gro_receive,
+ .gro_complete = tcp4_gro_complete,
+ .no_policy = 1,
+ .netns_ok = 1,
};
static const struct net_protocol udp_protocol = {
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e8f2617ecd47..a0d8392491c3 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -398,16 +398,25 @@ static void ah4_err(struct sk_buff *skb, u32 info)
struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
ah->spi, IPPROTO_AH, AF_INET);
if (!x)
return;
- pr_debug("pmtu discovery on SA AH/%08x/%08x\n",
- ntohl(ah->spi), ntohl(iph->daddr));
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index cda37be02f8d..a0124eb7dbea 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -475,8 +475,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
return 1;
}
- paddr = skb_rtable(skb)->rt_gateway;
-
+ paddr = rt_nexthop(skb_rtable(skb), ip_hdr(skb)->daddr);
if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
paddr, dev))
return 0;
@@ -790,7 +789,8 @@ static int arp_process(struct sk_buff *skb)
* Check for bad requests for 127.x.x.x and requests for multicast
* addresses. If this is one such, delete it.
*/
- if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
+ if (ipv4_is_multicast(tip) ||
+ (!IN_DEV_ROUTE_LOCALNET(in_dev) && ipv4_is_loopback(tip)))
goto out;
/*
@@ -827,7 +827,7 @@ static int arp_process(struct sk_buff *skb)
}
if (arp->ar_op == htons(ARPOP_REQUEST) &&
- ip_route_input_noref(skb, tip, sip, 0, dev) == 0) {
+ ip_route_input(skb, tip, sip, 0, dev) == 0) {
rt = skb_rtable(skb);
addr_type = rt->rt_type;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index c48adc565e92..667c1d4ca984 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
case CIPSO_V4_TAG_LOCAL:
/* This is a non-standard tag that we only allow for
* local connections, so if the incoming interface is
- * not the loopback device drop the packet. */
- if (!(skb->dev->flags & IFF_LOOPBACK)) {
+ * not the loopback device drop the packet. Further,
+ * there is no legitimate reason for setting this from
+ * userspace so reject it if skb is NULL. */
+ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
err_offset = opt_iter;
goto validate_return_locked;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 10e15a144e95..44bf82e3aef7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1500,7 +1500,8 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
- if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
+ i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net, 0);
}
@@ -1617,6 +1618,8 @@ static struct devinet_sysctl_table {
"force_igmp_version"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
+ DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
+ "route_localnet"),
},
};
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index cb982a61536f..b61e9deb7c7e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -484,16 +484,25 @@ static void esp4_err(struct sk_buff *skb, u32 info)
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
- ntohl(esph->spi), ntohl(iph->daddr));
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 3854411fa37c..8732cc7920ed 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -31,6 +31,7 @@
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
+#include <linux/cache.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
@@ -85,6 +86,24 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
tb = fib_trie_table(id);
if (!tb)
return NULL;
+
+ switch (id) {
+ case RT_TABLE_LOCAL:
+ net->ipv4.fib_local = tb;
+ break;
+
+ case RT_TABLE_MAIN:
+ net->ipv4.fib_main = tb;
+ break;
+
+ case RT_TABLE_DEFAULT:
+ net->ipv4.fib_default = tb;
+ break;
+
+ default:
+ break;
+ }
+
h = id & (FIB_TABLE_HASHSZ - 1);
hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
return tb;
@@ -150,10 +169,6 @@ static inline unsigned int __inet_dev_addr_type(struct net *net,
if (ipv4_is_multicast(addr))
return RTN_MULTICAST;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
local_table = fib_get_table(net, RT_TABLE_LOCAL);
if (local_table) {
ret = RTN_UNICAST;
@@ -180,6 +195,44 @@ unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
}
EXPORT_SYMBOL(inet_dev_addr_type);
+__be32 fib_compute_spec_dst(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct in_device *in_dev;
+ struct fib_result res;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct net *net;
+ int scope;
+
+ rt = skb_rtable(skb);
+ if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
+ RTCF_LOCAL)
+ return ip_hdr(skb)->daddr;
+
+ in_dev = __in_dev_get_rcu(dev);
+ BUG_ON(!in_dev);
+
+ net = dev_net(dev);
+
+ scope = RT_SCOPE_UNIVERSE;
+ if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+ fl4.flowi4_oif = 0;
+ fl4.flowi4_iif = net->loopback_dev->ifindex;
+ fl4.daddr = ip_hdr(skb)->saddr;
+ fl4.saddr = 0;
+ fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+ fl4.flowi4_scope = scope;
+ fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
+ if (!fib_lookup(net, &fl4, &res))
+ return FIB_RES_PREFSRC(net, res);
+ } else {
+ scope = RT_SCOPE_LINK;
+ }
+
+ return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
+}
+
/* Given (packet source, input interface) and optional (dst, oif, tos):
* - (main) check, that source is valid i.e. not broadcast or our local
* address.
@@ -188,17 +241,15 @@ EXPORT_SYMBOL(inet_dev_addr_type);
* - check, that packet arrived from expected physical interface.
* called with rcu_read_lock()
*/
-int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
- int oif, struct net_device *dev, __be32 *spec_dst,
- u32 *itag)
+static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ int rpf, struct in_device *idev, u32 *itag)
{
- struct in_device *in_dev;
- struct flowi4 fl4;
+ int ret, no_addr, accept_local;
struct fib_result res;
- int no_addr, rpf, accept_local;
- bool dev_match;
- int ret;
+ struct flowi4 fl4;
struct net *net;
+ bool dev_match;
fl4.flowi4_oif = 0;
fl4.flowi4_iif = oif;
@@ -207,20 +258,10 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
- no_addr = rpf = accept_local = 0;
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev) {
- no_addr = in_dev->ifa_list == NULL;
-
- /* Ignore rp_filter for packets protected by IPsec. */
- rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
-
- accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
- fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
- }
+ no_addr = idev->ifa_list == NULL;
- if (in_dev == NULL)
- goto e_inval;
+ accept_local = IN_DEV_ACCEPT_LOCAL(idev);
+ fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res))
@@ -229,7 +270,6 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
if (res.type != RTN_LOCAL || !accept_local)
goto e_inval;
}
- *spec_dst = FIB_RES_PREFSRC(net, res);
fib_combine_itag(itag, &res);
dev_match = false;
@@ -258,17 +298,14 @@ int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
ret = 0;
if (fib_lookup(net, &fl4, &res) == 0) {
- if (res.type == RTN_UNICAST) {
- *spec_dst = FIB_RES_PREFSRC(net, res);
+ if (res.type == RTN_UNICAST)
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
- }
}
return ret;
last_resort:
if (rpf)
goto e_rpf;
- *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
*itag = 0;
return 0;
@@ -278,6 +315,20 @@ e_rpf:
return -EXDEV;
}
+/* Ignore rp_filter for packets protected by IPsec. */
+int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
+ u8 tos, int oif, struct net_device *dev,
+ struct in_device *idev, u32 *itag)
+{
+ int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
+
+ if (!r && !fib_num_tclassid_users(dev_net(dev))) {
+ *itag = 0;
+ return 0;
+ }
+ return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
+}
+
static inline __be32 sk_extract_addr(struct sockaddr *addr)
{
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
@@ -879,10 +930,6 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
.flowi4_scope = frn->fl_scope,
};
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
-
frn->err = -ENOENT;
if (tb) {
local_bh_disable();
@@ -935,8 +982,11 @@ static void nl_fib_input(struct sk_buff *skb)
static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
- sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, 0,
- nl_fib_input, NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .input = nl_fib_input,
+ };
+
+ sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
if (sk == NULL)
return -EAFNOSUPPORT;
net->ipv4.fibnl = sk;
@@ -1021,11 +1071,6 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
rt_cache_flush(dev_net(dev), 0);
break;
case NETDEV_UNREGISTER_BATCH:
- /* The batch unregister is only called on the first
- * device in the list of devices being unregistered.
- * Therefore we should not pass dev_net(dev) in here.
- */
- rt_cache_flush_batch(NULL);
break;
}
return NOTIFY_DONE;
@@ -1090,6 +1135,9 @@ static int __net_init fib_net_init(struct net *net)
{
int error;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ net->ipv4.fib_num_tclassid_users = 0;
+#endif
error = ip_fib_net_init(net);
if (error < 0)
goto out;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 2d043f71ef70..a83d74e498d2 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -47,14 +47,7 @@ struct fib4_rule {
#endif
};
-#ifdef CONFIG_IP_ROUTE_CLASSID
-u32 fib_rules_tclass(const struct fib_result *res)
-{
- return res->r ? ((struct fib4_rule *) res->r)->tclassid : 0;
-}
-#endif
-
-int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
+int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
{
struct fib_lookup_arg arg = {
.result = res,
@@ -63,11 +56,15 @@ int fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
int err;
err = fib_rules_lookup(net->ipv4.rules_ops, flowi4_to_flowi(flp), 0, &arg);
- res->r = arg.rule;
-
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ if (arg.rule)
+ res->tclassid = ((struct fib4_rule *)arg.rule)->tclassid;
+ else
+ res->tclassid = 0;
+#endif
return err;
}
-EXPORT_SYMBOL_GPL(fib_lookup);
+EXPORT_SYMBOL_GPL(__fib_lookup);
static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
@@ -169,8 +166,11 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->dst = nla_get_be32(tb[FRA_DST]);
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (tb[FRA_FLOW])
+ if (tb[FRA_FLOW]) {
rule4->tclassid = nla_get_u32(tb[FRA_FLOW]);
+ if (rule4->tclassid)
+ net->ipv4.fib_num_tclassid_users++;
+ }
#endif
rule4->src_len = frh->src_len;
@@ -179,11 +179,24 @@ static int fib4_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
rule4->dstmask = inet_make_mask(rule4->dst_len);
rule4->tos = frh->tos;
+ net->ipv4.fib_has_custom_rules = true;
err = 0;
errout:
return err;
}
+static void fib4_rule_delete(struct fib_rule *rule)
+{
+ struct net *net = rule->fr_net;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ struct fib4_rule *rule4 = (struct fib4_rule *) rule;
+
+ if (rule4->tclassid)
+ net->ipv4.fib_num_tclassid_users--;
+#endif
+ net->ipv4.fib_has_custom_rules = true;
+}
+
static int fib4_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
@@ -256,6 +269,7 @@ static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = {
.action = fib4_rule_action,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
+ .delete = fib4_rule_delete,
.compare = fib4_rule_compare,
.fill = fib4_rule_fill,
.default_pref = fib_default_rule_pref,
@@ -295,6 +309,7 @@ int __net_init fib4_rules_init(struct net *net)
if (err < 0)
goto fail;
net->ipv4.rules_ops = ops;
+ net->ipv4.fib_has_custom_rules = false;
return 0;
fail:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e5b7182fa099..e55171f184f9 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -140,6 +140,27 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
},
};
+static void free_nh_exceptions(struct fib_nh *nh)
+{
+ struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ int i;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ fnhe = rcu_dereference_protected(hash[i].chain, 1);
+ while (fnhe) {
+ struct fib_nh_exception *next;
+
+ next = rcu_dereference_protected(fnhe->fnhe_next, 1);
+ kfree(fnhe);
+
+ fnhe = next;
+ }
+ }
+ kfree(hash);
+}
+
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
@@ -148,6 +169,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
+ if (nexthop_nh->nh_exceptions)
+ free_nh_exceptions(nexthop_nh);
+ if (nexthop_nh->nh_rth_output)
+ dst_release(&nexthop_nh->nh_rth_output->dst);
+ if (nexthop_nh->nh_rth_input)
+ dst_release(&nexthop_nh->nh_rth_input->dst);
} endfor_nexthops(fi);
release_net(fi->fib_net);
@@ -163,6 +190,12 @@ void free_fib_info(struct fib_info *fi)
return;
}
fib_info_cnt--;
+#ifdef CONFIG_IP_ROUTE_CLASSID
+ change_nexthops(fi) {
+ if (nexthop_nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users--;
+ } endfor_nexthops(fi);
+#endif
call_rcu(&fi->rcu, free_fib_info_rcu);
}
@@ -421,6 +454,8 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
+ if (nexthop_nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
}
@@ -779,9 +814,16 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
int type = nla_type(nla);
if (type) {
+ u32 val;
+
if (type > RTAX_MAX)
goto err_inval;
- fi->fib_metrics[type - 1] = nla_get_u32(nla);
+ val = nla_get_u32(nla);
+ if (type == RTAX_ADVMSS && val > 65535 - 40)
+ val = 65535 - 40;
+ if (type == RTAX_MTU && val > 65535 - 15)
+ val = 65535 - 15;
+ fi->fib_metrics[type - 1] = val;
}
}
}
@@ -810,6 +852,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
nh->nh_flags = cfg->fc_flags;
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid = cfg->fc_flow;
+ if (nh->nh_tclassid)
+ fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight = 1;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 30b88d7b4bd6..18cbc15b20d5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1007,9 +1007,9 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
while (tn != NULL && (tp = node_parent((struct rt_trie_node *)tn)) != NULL) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
wasfull = tnode_full(tp, tnode_get_child(tp, cindex));
- tn = (struct tnode *) resize(t, (struct tnode *)tn);
+ tn = (struct tnode *)resize(t, tn);
- tnode_put_child_reorg((struct tnode *)tp, cindex,
+ tnode_put_child_reorg(tp, cindex,
(struct rt_trie_node *)tn, wasfull);
tp = node_parent((struct rt_trie_node *) tn);
@@ -1024,7 +1024,7 @@ static void trie_rebalance(struct trie *t, struct tnode *tn)
/* Handle last (top) tnode */
if (IS_TNODE(tn))
- tn = (struct tnode *)resize(t, (struct tnode *)tn);
+ tn = (struct tnode *)resize(t, tn);
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tnode_free_flush();
@@ -1125,7 +1125,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
node_set_parent((struct rt_trie_node *)l, tp);
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, (struct rt_trie_node *)l);
+ put_child(t, tp, cindex, (struct rt_trie_node *)l);
} else {
/* Case 3: n is a LEAF or a TNODE and the key doesn't match. */
/*
@@ -1160,8 +1160,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
if (tp) {
cindex = tkey_extract_bits(key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex,
- (struct rt_trie_node *)tn);
+ put_child(t, tp, cindex, (struct rt_trie_node *)tn);
} else {
rcu_assign_pointer(t->trie, (struct rt_trie_node *)tn);
tp = tn;
@@ -1620,7 +1619,7 @@ static void trie_leaf_remove(struct trie *t, struct leaf *l)
if (tp) {
t_key cindex = tkey_extract_bits(l->key, tp->pos, tp->bits);
- put_child(t, (struct tnode *)tp, cindex, NULL);
+ put_child(t, tp, cindex, NULL);
trie_rebalance(t, tp);
} else
RCU_INIT_POINTER(t->trie, NULL);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index c75efbdc71cb..f2eccd531746 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -95,6 +95,7 @@
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
+#include <net/ip_fib.h>
/*
* Build xmit assembly blocks
@@ -253,10 +254,10 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
/* Limit if icmp type is enabled in ratemask. */
if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
- if (!rt->peer)
- rt_bind_peer(rt, fl4->daddr, 1);
- rc = inet_peer_xrlim_allow(rt->peer,
+ struct inet_peer *peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, 1);
+ rc = inet_peer_xrlim_allow(peer,
net->ipv4.sysctl_icmp_ratelimit);
+ inet_putpeer(peer);
}
out:
return rc;
@@ -334,7 +335,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
- __be32 daddr;
+ __be32 daddr, saddr;
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
@@ -348,6 +349,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = ip_hdr(skb)->saddr;
+ saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
ipc.tx_flags = 0;
if (icmp_param->replyopts.opt.opt.optlen) {
@@ -357,7 +359,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
}
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
- fl4.saddr = rt->rt_spec_dst;
+ fl4.saddr = saddr;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
@@ -569,7 +571,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
- dev = dev_get_by_index_rcu(net, rt->rt_iif);
+ dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -632,6 +634,27 @@ out:;
EXPORT_SYMBOL(icmp_send);
+static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct net_protocol *ipprot;
+ int protocol = iph->protocol;
+
+ /* Checkin full IP header plus 8 bytes of protocol to
+ * avoid additional coding at protocol handlers.
+ */
+ if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
+ return;
+
+ raw_icmp_error(skb, protocol, info);
+
+ rcu_read_lock();
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot && ipprot->err_handler)
+ ipprot->err_handler(skb, info);
+ rcu_read_unlock();
+}
+
/*
* Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEED, and ICMP_QUENCH.
*/
@@ -640,10 +663,8 @@ static void icmp_unreach(struct sk_buff *skb)
{
const struct iphdr *iph;
struct icmphdr *icmph;
- int hash, protocol;
- const struct net_protocol *ipprot;
- u32 info = 0;
struct net *net;
+ u32 info = 0;
net = dev_net(skb_dst(skb)->dev);
@@ -674,9 +695,7 @@ static void icmp_unreach(struct sk_buff *skb)
LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"),
&iph->daddr);
} else {
- info = ip_rt_frag_needed(net, iph,
- ntohs(icmph->un.frag.mtu),
- skb->dev);
+ info = ntohs(icmph->un.frag.mtu);
if (!info)
goto out;
}
@@ -720,26 +739,7 @@ static void icmp_unreach(struct sk_buff *skb)
goto out;
}
- /* Checkin full IP header plus 8 bytes of protocol to
- * avoid additional coding at protocol handlers.
- */
- if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
- goto out;
-
- iph = (const struct iphdr *)skb->data;
- protocol = iph->protocol;
-
- /*
- * Deliver ICMP message to raw sockets. Pretty useless feature?
- */
- raw_icmp_error(skb, protocol, info);
-
- hash = protocol & (MAX_INET_PROTOS - 1);
- rcu_read_lock();
- ipprot = rcu_dereference(inet_protos[hash]);
- if (ipprot && ipprot->err_handler)
- ipprot->err_handler(skb, info);
- rcu_read_unlock();
+ icmp_socket_deliver(skb, info);
out:
return;
@@ -755,46 +755,15 @@ out_err:
static void icmp_redirect(struct sk_buff *skb)
{
- const struct iphdr *iph;
-
- if (skb->len < sizeof(struct iphdr))
- goto out_err;
-
- /*
- * Get the copied header of the packet that caused the redirect
- */
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto out;
-
- iph = (const struct iphdr *)skb->data;
-
- switch (icmp_hdr(skb)->code & 7) {
- case ICMP_REDIR_NET:
- case ICMP_REDIR_NETTOS:
- /*
- * As per RFC recommendations now handle it as a host redirect.
- */
- case ICMP_REDIR_HOST:
- case ICMP_REDIR_HOSTTOS:
- ip_rt_redirect(ip_hdr(skb)->saddr, iph->daddr,
- icmp_hdr(skb)->un.gateway,
- iph->saddr, skb->dev);
- break;
+ if (skb->len < sizeof(struct iphdr)) {
+ ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
+ return;
}
- /* Ping wants to see redirects.
- * Let's pretend they are errors of sorts... */
- if (iph->protocol == IPPROTO_ICMP &&
- iph->ihl >= 5 &&
- pskb_may_pull(skb, (iph->ihl<<2)+8)) {
- ping_err(skb, icmp_hdr(skb)->un.gateway);
- }
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return;
-out:
- return;
-out_err:
- ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
- goto out;
+ icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway);
}
/*
@@ -868,86 +837,6 @@ out_err:
goto out;
}
-
-/*
- * Handle ICMP_ADDRESS_MASK requests. (RFC950)
- *
- * RFC1122 (3.2.2.9). A host MUST only send replies to
- * ADDRESS_MASK requests if it's been configured as an address mask
- * agent. Receiving a request doesn't constitute implicit permission to
- * act as one. Of course, implementing this correctly requires (SHOULD)
- * a way to turn the functionality on and off. Another one for sysctl(),
- * I guess. -- MS
- *
- * RFC1812 (4.3.3.9). A router MUST implement it.
- * A router SHOULD have switch turning it on/off.
- * This switch MUST be ON by default.
- *
- * Gratuitous replies, zero-source replies are not implemented,
- * that complies with RFC. DO NOT implement them!!! All the idea
- * of broadcast addrmask replies as specified in RFC950 is broken.
- * The problem is that it is not uncommon to have several prefixes
- * on one physical interface. Moreover, addrmask agent can even be
- * not aware of existing another prefixes.
- * If source is zero, addrmask agent cannot choose correct prefix.
- * Gratuitous mask announcements suffer from the same problem.
- * RFC1812 explains it, but still allows to use ADDRMASK,
- * that is pretty silly. --ANK
- *
- * All these rules are so bizarre, that I removed kernel addrmask
- * support at all. It is wrong, it is obsolete, nobody uses it in
- * any case. --ANK
- *
- * Furthermore you can do it with a usermode address agent program
- * anyway...
- */
-
-static void icmp_address(struct sk_buff *skb)
-{
-#if 0
- net_dbg_ratelimited("a guy asks for address mask. Who is it?\n");
-#endif
-}
-
-/*
- * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
- * loudly if an inconsistency is found.
- * called with rcu_read_lock()
- */
-
-static void icmp_address_reply(struct sk_buff *skb)
-{
- struct rtable *rt = skb_rtable(skb);
- struct net_device *dev = skb->dev;
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
-
- if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
- return;
-
- in_dev = __in_dev_get_rcu(dev);
- if (!in_dev)
- return;
-
- if (in_dev->ifa_list &&
- IN_DEV_LOG_MARTIANS(in_dev) &&
- IN_DEV_FORWARD(in_dev)) {
- __be32 _mask, *mp;
-
- mp = skb_header_pointer(skb, 0, sizeof(_mask), &_mask);
- BUG_ON(mp == NULL);
- for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
- if (*mp == ifa->ifa_mask &&
- inet_ifa_match(ip_hdr(skb)->saddr, ifa))
- break;
- }
- if (!ifa)
- net_info_ratelimited("Wrong address mask %pI4 from %s/%pI4\n",
- mp,
- dev->name, &ip_hdr(skb)->saddr);
- }
-}
-
static void icmp_discard(struct sk_buff *skb)
{
}
@@ -1111,10 +1000,10 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
.handler = icmp_discard,
},
[ICMP_ADDRESS] = {
- .handler = icmp_address,
+ .handler = icmp_discard,
},
[ICMP_ADDRESSREPLY] = {
- .handler = icmp_address_reply,
+ .handler = icmp_discard,
},
};
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index f9ee7417f6a0..db0cf17c00f7 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -374,18 +374,19 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
const struct inet_request_sock *ireq = inet_rsk(req);
struct ip_options_rcu *opt = inet_rsk(req)->opt;
struct net *net = sock_net(sk);
+ int flags = inet_sk_flowi_flags(sk);
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
sk->sk_protocol,
- inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
+ flags,
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
security_req_classify_flow(req, flowi4_to_flowi(fl4));
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gateway)
goto route_err;
return &rt->dst;
@@ -418,7 +419,7 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
rt = ip_route_output_flow(net, fl4, sk);
if (IS_ERR(rt))
goto no_route;
- if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (opt && opt->opt.is_strictroute && rt->rt_gateway)
goto route_err;
return &rt->dst;
@@ -799,3 +800,49 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
}
EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
#endif
+
+static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct ip_options_rcu *inet_opt;
+ __be32 daddr = inet->inet_daddr;
+ struct flowi4 *fl4;
+ struct rtable *rt;
+
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+ fl4 = &fl->u.ip4;
+ rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
+ inet->inet_saddr, inet->inet_dport,
+ inet->inet_sport, sk->sk_protocol,
+ RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ rt = NULL;
+ if (rt)
+ sk_setup_caps(sk, &rt->dst);
+ rcu_read_unlock();
+
+ return &rt->dst;
+}
+
+struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+ struct inet_sock *inet = inet_sk(sk);
+
+ if (!dst) {
+ dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
+ if (!dst)
+ goto out;
+ }
+ dst->ops->update_pmtu(dst, sk, NULL, mtu);
+
+ dst = __sk_dst_check(sk, 0);
+ if (!dst)
+ dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
+out:
+ return dst;
+}
+EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 46d1e7199a8c..570e61f9611f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -46,9 +46,6 @@ struct inet_diag_entry {
u16 userlocks;
};
-#define INET_DIAG_PUT(skb, attrtype, attrlen) \
- RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
-
static DEFINE_MUTEX(inet_diag_table_mutex);
static const struct inet_diag_handler *inet_diag_lock_handler(int proto)
@@ -78,24 +75,22 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
const struct inet_sock *inet = inet_sk(sk);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
+ struct nlattr *attr;
void *info = NULL;
- struct inet_diag_meminfo *minfo = NULL;
- unsigned char *b = skb_tail_pointer(skb);
const struct inet_diag_handler *handler;
int ext = req->idiag_ext;
handler = inet_diag_table[req->sdiag_protocol];
BUG_ON(handler == NULL);
- nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
- nlh->nlmsg_flags = nlmsg_flags;
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
- r = NLMSG_DATA(nlh);
+ r = nlmsg_data(nlh);
BUG_ON(sk->sk_state == TCP_TIME_WAIT);
- if (ext & (1 << (INET_DIAG_MEMINFO - 1)))
- minfo = INET_DIAG_PUT(skb, INET_DIAG_MEMINFO, sizeof(*minfo));
-
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
@@ -113,7 +108,8 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
* hence this needs to be included regardless of socket family.
*/
if (ext & (1 << (INET_DIAG_TOS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+ if (nla_put_u8(skb, INET_DIAG_TOS, inet->tos) < 0)
+ goto errout;
#if IS_ENABLED(CONFIG_IPV6)
if (r->idiag_family == AF_INET6) {
@@ -121,24 +117,31 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
*(struct in6_addr *)r->id.idiag_src = np->rcv_saddr;
*(struct in6_addr *)r->id.idiag_dst = np->daddr;
+
if (ext & (1 << (INET_DIAG_TCLASS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+ if (nla_put_u8(skb, INET_DIAG_TCLASS, np->tclass) < 0)
+ goto errout;
}
#endif
r->idiag_uid = sock_i_uid(sk);
r->idiag_inode = sock_i_ino(sk);
- if (minfo) {
- minfo->idiag_rmem = sk_rmem_alloc_get(sk);
- minfo->idiag_wmem = sk->sk_wmem_queued;
- minfo->idiag_fmem = sk->sk_forward_alloc;
- minfo->idiag_tmem = sk_wmem_alloc_get(sk);
+ if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
+ struct inet_diag_meminfo minfo = {
+ .idiag_rmem = sk_rmem_alloc_get(sk),
+ .idiag_wmem = sk->sk_wmem_queued,
+ .idiag_fmem = sk->sk_forward_alloc,
+ .idiag_tmem = sk_wmem_alloc_get(sk),
+ };
+
+ if (nla_put(skb, INET_DIAG_MEMINFO, sizeof(minfo), &minfo) < 0)
+ goto errout;
}
if (ext & (1 << (INET_DIAG_SKMEMINFO - 1)))
if (sock_diag_put_meminfo(sk, skb, INET_DIAG_SKMEMINFO))
- goto rtattr_failure;
+ goto errout;
if (icsk == NULL) {
handler->idiag_get_info(sk, r, NULL);
@@ -165,16 +168,20 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
}
#undef EXPIRES_IN_MS
- if (ext & (1 << (INET_DIAG_INFO - 1)))
- info = INET_DIAG_PUT(skb, INET_DIAG_INFO, sizeof(struct tcp_info));
-
- if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops) {
- const size_t len = strlen(icsk->icsk_ca_ops->name);
+ if (ext & (1 << (INET_DIAG_INFO - 1))) {
+ attr = nla_reserve(skb, INET_DIAG_INFO,
+ sizeof(struct tcp_info));
+ if (!attr)
+ goto errout;
- strcpy(INET_DIAG_PUT(skb, INET_DIAG_CONG, len + 1),
- icsk->icsk_ca_ops->name);
+ info = nla_data(attr);
}
+ if ((ext & (1 << (INET_DIAG_CONG - 1))) && icsk->icsk_ca_ops)
+ if (nla_put_string(skb, INET_DIAG_CONG,
+ icsk->icsk_ca_ops->name) < 0)
+ goto errout;
+
handler->idiag_get_info(sk, r, info);
if (sk->sk_state < TCP_TIME_WAIT &&
@@ -182,12 +189,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
icsk->icsk_ca_ops->get_info(sk, ext, skb);
out:
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ return nlmsg_end(skb, nlh);
-rtattr_failure:
-nlmsg_failure:
- nlmsg_trim(skb, b);
+errout:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
EXPORT_SYMBOL_GPL(inet_sk_diag_fill);
@@ -208,14 +213,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
{
long tmo;
struct inet_diag_msg *r;
- const unsigned char *previous_tail = skb_tail_pointer(skb);
- struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq,
- unlh->nlmsg_type, sizeof(*r));
+ struct nlmsghdr *nlh;
- r = NLMSG_DATA(nlh);
- BUG_ON(tw->tw_state != TCP_TIME_WAIT);
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ nlmsg_flags);
+ if (!nlh)
+ return -EMSGSIZE;
- nlh->nlmsg_flags = nlmsg_flags;
+ r = nlmsg_data(nlh);
+ BUG_ON(tw->tw_state != TCP_TIME_WAIT);
tmo = tw->tw_ttd - jiffies;
if (tmo < 0)
@@ -245,11 +251,8 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
*(struct in6_addr *)r->id.idiag_dst = tw6->tw_v6_daddr;
}
#endif
- nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail;
- return skb->len;
-nlmsg_failure:
- nlmsg_trim(skb, previous_tail);
- return -EMSGSIZE;
+
+ return nlmsg_end(skb, nlh);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -269,16 +272,17 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
int err;
struct sock *sk;
struct sk_buff *rep;
+ struct net *net = sock_net(in_skb->sk);
err = -EINVAL;
if (req->sdiag_family == AF_INET) {
- sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
+ sk = inet_lookup(net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6) {
- sk = inet6_lookup(&init_net, hashinfo,
+ sk = inet6_lookup(net, hashinfo,
(struct in6_addr *)req->id.idiag_dst,
req->id.idiag_dport,
(struct in6_addr *)req->id.idiag_src,
@@ -298,23 +302,23 @@ int inet_diag_dump_one_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *in_s
if (err)
goto out;
- err = -ENOMEM;
- rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
- sizeof(struct inet_diag_meminfo) +
- sizeof(struct tcp_info) + 64)),
- GFP_KERNEL);
- if (!rep)
+ rep = nlmsg_new(sizeof(struct inet_diag_msg) +
+ sizeof(struct inet_diag_meminfo) +
+ sizeof(struct tcp_info) + 64, GFP_KERNEL);
+ if (!rep) {
+ err = -ENOMEM;
goto out;
+ }
err = sk_diag_fill(sk, rep, req,
NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, nlh);
if (err < 0) {
WARN_ON(err == -EMSGSIZE);
- kfree_skb(rep);
+ nlmsg_free(rep);
goto out;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -592,15 +596,16 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct inet_sock *inet = inet_sk(sk);
- unsigned char *b = skb_tail_pointer(skb);
struct inet_diag_msg *r;
struct nlmsghdr *nlh;
long tmo;
- nlh = NLMSG_PUT(skb, pid, seq, unlh->nlmsg_type, sizeof(*r));
- nlh->nlmsg_flags = NLM_F_MULTI;
- r = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, unlh->nlmsg_type, sizeof(*r),
+ NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+ r = nlmsg_data(nlh);
r->idiag_family = sk->sk_family;
r->idiag_state = TCP_SYN_RECV;
r->idiag_timer = 1;
@@ -628,13 +633,8 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
*(struct in6_addr *)r->id.idiag_dst = inet6_rsk(req)->rmt_addr;
}
#endif
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
-
- return skb->len;
-nlmsg_failure:
- nlmsg_trim(skb, b);
- return -1;
+ return nlmsg_end(skb, nlh);
}
static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
@@ -725,6 +725,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
{
int i, num;
int s_i, s_num;
+ struct net *net = sock_net(skb->sk);
s_i = cb->args[1];
s_num = num = cb->args[2];
@@ -744,6 +745,9 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
sk_nulls_for_each(sk, node, &ilb->head) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
+
if (num < s_num) {
num++;
continue;
@@ -814,6 +818,8 @@ skip_listen_ht:
sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
@@ -840,6 +846,8 @@ next_normal:
inet_twsk_for_each(tw, node,
&head->twchain) {
+ if (!net_eq(twsk_net(tw), net))
+ continue;
if (num < s_num)
goto next_dying;
@@ -892,7 +900,7 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (nlmsg_attrlen(cb->nlh, hdrlen))
bc = nlmsg_find_attr(cb->nlh, hdrlen, INET_DIAG_REQ_BYTECODE);
- return __inet_diag_dump(skb, cb, (struct inet_diag_req_v2 *)NLMSG_DATA(cb->nlh), bc);
+ return __inet_diag_dump(skb, cb, nlmsg_data(cb->nlh), bc);
}
static inline int inet_diag_type2proto(int type)
@@ -909,7 +917,7 @@ static inline int inet_diag_type2proto(int type)
static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct inet_diag_req *rc = NLMSG_DATA(cb->nlh);
+ struct inet_diag_req *rc = nlmsg_data(cb->nlh);
struct inet_diag_req_v2 req;
struct nlattr *bc = NULL;
int hdrlen = sizeof(struct inet_diag_req);
@@ -929,7 +937,7 @@ static int inet_diag_dump_compat(struct sk_buff *skb, struct netlink_callback *c
static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
const struct nlmsghdr *nlh)
{
- struct inet_diag_req *rc = NLMSG_DATA(nlh);
+ struct inet_diag_req *rc = nlmsg_data(nlh);
struct inet_diag_req_v2 req;
req.sdiag_family = rc->idiag_family;
@@ -944,6 +952,7 @@ static int inet_diag_get_exact_compat(struct sk_buff *in_skb,
static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int hdrlen = sizeof(struct inet_diag_req);
+ struct net *net = sock_net(skb->sk);
if (nlh->nlmsg_type >= INET_DIAG_GETSOCK_MAX ||
nlmsg_len(nlh) < hdrlen)
@@ -964,7 +973,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
struct netlink_dump_control c = {
.dump = inet_diag_dump_compat,
};
- return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, nlh, &c);
}
}
@@ -974,6 +983,7 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh)
static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct inet_diag_req_v2);
+ struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
@@ -992,11 +1002,11 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
struct netlink_dump_control c = {
.dump = inet_diag_dump,
};
- return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
}
}
- return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
+ return inet_diag_get_exact(skb, h, nlmsg_data(h));
}
static const struct sock_diag_handler inet_diag_handler = {
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5ff2a51b6d0c..85190e69297b 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -243,12 +243,12 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
if (q == NULL)
return NULL;
+ q->net = nf;
f->constructor(q, arg);
atomic_add(f->qsize, &nf->mem);
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
- q->net = nf;
return q;
}
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dfba343b2509..e1e0a4e8fd34 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -82,23 +82,39 @@ static const struct inet_peer peer_fake_node = {
.avl_height = 0
};
-struct inet_peer_base {
- struct inet_peer __rcu *root;
- seqlock_t lock;
- int total;
-};
+void inet_peer_base_init(struct inet_peer_base *bp)
+{
+ bp->root = peer_avl_empty_rcu;
+ seqlock_init(&bp->lock);
+ bp->flush_seq = ~0U;
+ bp->total = 0;
+}
+EXPORT_SYMBOL_GPL(inet_peer_base_init);
-static struct inet_peer_base v4_peers = {
- .root = peer_avl_empty_rcu,
- .lock = __SEQLOCK_UNLOCKED(v4_peers.lock),
- .total = 0,
-};
+static atomic_t v4_seq = ATOMIC_INIT(0);
+static atomic_t v6_seq = ATOMIC_INIT(0);
-static struct inet_peer_base v6_peers = {
- .root = peer_avl_empty_rcu,
- .lock = __SEQLOCK_UNLOCKED(v6_peers.lock),
- .total = 0,
-};
+static atomic_t *inetpeer_seq_ptr(int family)
+{
+ return (family == AF_INET ? &v4_seq : &v6_seq);
+}
+
+static inline void flush_check(struct inet_peer_base *base, int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ if (unlikely(base->flush_seq != atomic_read(fp))) {
+ inetpeer_invalidate_tree(base);
+ base->flush_seq = atomic_read(fp);
+ }
+}
+
+void inetpeer_invalidate_family(int family)
+{
+ atomic_t *fp = inetpeer_seq_ptr(family);
+
+ atomic_inc(fp);
+}
#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
@@ -110,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
static void inetpeer_gc_worker(struct work_struct *work)
{
- struct inet_peer *p, *n;
+ struct inet_peer *p, *n, *c;
LIST_HEAD(list);
spin_lock_bh(&gc_lock);
@@ -122,17 +138,19 @@ static void inetpeer_gc_worker(struct work_struct *work)
list_for_each_entry_safe(p, n, &list, gc_list) {
- if(need_resched())
+ if (need_resched())
cond_resched();
- if (p->avl_left != peer_avl_empty) {
- list_add_tail(&p->avl_left->gc_list, &list);
- p->avl_left = peer_avl_empty;
+ c = rcu_dereference_protected(p->avl_left, 1);
+ if (c != peer_avl_empty) {
+ list_add_tail(&c->gc_list, &list);
+ p->avl_left = peer_avl_empty_rcu;
}
- if (p->avl_right != peer_avl_empty) {
- list_add_tail(&p->avl_right->gc_list, &list);
- p->avl_right = peer_avl_empty;
+ c = rcu_dereference_protected(p->avl_right, 1);
+ if (c != peer_avl_empty) {
+ list_add_tail(&c->gc_list, &list);
+ p->avl_right = peer_avl_empty_rcu;
}
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
@@ -401,11 +419,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
call_rcu(&p->rcu, inetpeer_free_rcu);
}
-static struct inet_peer_base *family_to_base(int family)
-{
- return family == AF_INET ? &v4_peers : &v6_peers;
-}
-
/* perform garbage collect on all items stacked during a lookup */
static int inet_peer_gc(struct inet_peer_base *base,
struct inet_peer __rcu **stack[PEER_MAXDEPTH],
@@ -443,14 +456,17 @@ static int inet_peer_gc(struct inet_peer_base *base,
return cnt;
}
-struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
+struct inet_peer *inet_getpeer(struct inet_peer_base *base,
+ const struct inetpeer_addr *daddr,
+ int create)
{
struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
- struct inet_peer_base *base = family_to_base(daddr->family);
struct inet_peer *p;
unsigned int sequence;
int invalidated, gccnt = 0;
+ flush_check(base, daddr->family);
+
/* Attempt a lockless lookup first.
* Because of a concurrent writer, we might not find an existing entry.
*/
@@ -492,13 +508,9 @@ relookup:
(daddr->family == AF_INET) ?
secure_ip_id(daddr->addr.a4) :
secure_ipv6_id(daddr->addr.a6));
- p->tcp_ts_stamp = 0;
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
p->rate_last = 0;
- p->pmtu_expires = 0;
- p->pmtu_orig = 0;
- memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
@@ -571,26 +583,19 @@ static void inetpeer_inval_rcu(struct rcu_head *head)
schedule_delayed_work(&gc_work, gc_delay);
}
-void inetpeer_invalidate_tree(int family)
+void inetpeer_invalidate_tree(struct inet_peer_base *base)
{
- struct inet_peer *old, *new, *prev;
- struct inet_peer_base *base = family_to_base(family);
+ struct inet_peer *root;
write_seqlock_bh(&base->lock);
- old = base->root;
- if (old == peer_avl_empty_rcu)
- goto out;
-
- new = peer_avl_empty_rcu;
-
- prev = cmpxchg(&base->root, old, new);
- if (prev == old) {
+ root = rcu_deref_locked(base->root, base);
+ if (root != peer_avl_empty) {
+ base->root = peer_avl_empty_rcu;
base->total = 0;
- call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
+ call_rcu(&root->gc_rcu, inetpeer_inval_rcu);
}
-out:
write_sequnlock_bh(&base->lock);
}
EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 9dbd3dd6022d..7ad88e5e7110 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -171,6 +171,10 @@ static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
static void ip4_frag_init(struct inet_frag_queue *q, void *a)
{
struct ipq *qp = container_of(q, struct ipq, q);
+ struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
+ frags);
+ struct net *net = container_of(ipv4, struct net, ipv4);
+
struct ip4_create_arg *arg = a;
qp->protocol = arg->iph->protocol;
@@ -180,7 +184,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, void *a)
qp->daddr = arg->iph->daddr;
qp->user = arg->user;
qp->peer = sysctl_ipfrag_max_dist ?
- inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
+ inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL;
}
static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
@@ -254,8 +258,8 @@ static void ip_expire(unsigned long arg)
/* skb dst is stale, drop it, and perform route lookup again */
skb_dst_drop(head);
iph = ip_hdr(head);
- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
- iph->tos, head->dev);
+ err = ip_route_input(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
if (err)
goto out_rcu_unlock;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index f49047b79609..b062a98574f2 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -516,9 +516,6 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -531,6 +528,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return;
break;
+
+ case ICMP_REDIRECT:
+ break;
}
rcu_read_lock();
@@ -538,7 +538,20 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
flags & GRE_KEY ?
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
p[1]);
- if (t == NULL || t->parms.iph.daddr == 0 ||
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->parms.link, 0, IPPROTO_GRE, 0);
+ goto out;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
+ IPPROTO_GRE, 0);
+ goto out;
+ }
+ if (t->parms.iph.daddr == 0 ||
ipv4_is_multicast(t->parms.iph.daddr))
goto out;
@@ -753,7 +766,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (skb->protocol == htons(ETH_P_IP)) {
rt = skb_rtable(skb);
- dst = rt->rt_gateway;
+ dst = rt_nexthop(rt, old_iph->daddr);
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -820,7 +833,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->protocol == htons(ETH_P_IP)) {
df |= (old_iph->frag_off&htons(IP_DF));
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 8590144ca330..4ebc6feee250 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -198,14 +198,13 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
rcu_read_lock();
{
int protocol = ip_hdr(skb)->protocol;
- int hash, raw;
const struct net_protocol *ipprot;
+ int raw;
resubmit:
raw = raw_local_deliver(skb, protocol);
- hash = protocol & (MAX_INET_PROTOS - 1);
- ipprot = rcu_dereference(inet_protos[hash]);
+ ipprot = rcu_dereference(inet_protos[protocol]);
if (ipprot != NULL) {
int ret;
@@ -314,26 +313,33 @@ drop:
return true;
}
+int sysctl_ip_early_demux __read_mostly = 1;
+
static int ip_rcv_finish(struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
+ if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ const struct net_protocol *ipprot;
+ int protocol = iph->protocol;
+
+ rcu_read_lock();
+ ipprot = rcu_dereference(inet_protos[protocol]);
+ if (ipprot && ipprot->early_demux)
+ ipprot->early_demux(skb);
+ rcu_read_unlock();
+ }
+
/*
* Initialise the virtual path cache for the packet. It describes
* how the packet travels inside Linux networking.
*/
- if (skb_dst(skb) == NULL) {
- int err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev);
+ if (!skb_dst(skb)) {
+ int err = ip_route_input(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev);
if (unlikely(err)) {
- if (err == -EHOSTUNREACH)
- IP_INC_STATS_BH(dev_net(skb->dev),
- IPSTATS_MIB_INADDRERRORS);
- else if (err == -ENETUNREACH)
- IP_INC_STATS_BH(dev_net(skb->dev),
- IPSTATS_MIB_INNOROUTES);
- else if (err == -EXDEV)
+ if (err == -EXDEV)
NET_INC_STATS_BH(dev_net(skb->dev),
LINUX_MIB_IPRPFILTER);
goto drop;
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 708b99494e23..1dc01f9793d5 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -27,6 +27,7 @@
#include <net/icmp.h>
#include <net/route.h>
#include <net/cipso_ipv4.h>
+#include <net/ip_fib.h>
/*
* Write options to IP header, record destination address to
@@ -92,7 +93,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
unsigned char *sptr, *dptr;
int soffset, doffset;
int optlen;
- __be32 daddr;
memset(dopt, 0, sizeof(struct ip_options));
@@ -104,8 +104,6 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
sptr = skb_network_header(skb);
dptr = dopt->__data;
- daddr = skb_rtable(skb)->rt_spec_dst;
-
if (sopt->rr) {
optlen = sptr[sopt->rr+1];
soffset = sptr[sopt->rr+2];
@@ -179,6 +177,8 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
doffset -= 4;
}
if (doffset > 3) {
+ __be32 daddr = fib_compute_spec_dst(skb);
+
memcpy(&start[doffset-1], &daddr, 4);
dopt->faddr = faddr;
dptr[0] = start[0];
@@ -241,6 +241,15 @@ void ip_options_fragment(struct sk_buff *skb)
opt->ts_needtime = 0;
}
+/* helper used by ip_options_compile() to call fib_compute_spec_dst()
+ * at most one time.
+ */
+static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
+{
+ if (*spec_dst == htonl(INADDR_ANY))
+ *spec_dst = fib_compute_spec_dst(skb);
+}
+
/*
* Verify options and fill pointers in struct options.
* Caller should clear *opt, and set opt->data.
@@ -250,12 +259,12 @@ void ip_options_fragment(struct sk_buff *skb)
int ip_options_compile(struct net *net,
struct ip_options *opt, struct sk_buff *skb)
{
- int l;
- unsigned char *iph;
- unsigned char *optptr;
- int optlen;
+ __be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
struct rtable *rt = NULL;
+ unsigned char *optptr;
+ unsigned char *iph;
+ int optlen, l;
if (skb != NULL) {
rt = skb_rtable(skb);
@@ -331,7 +340,8 @@ int ip_options_compile(struct net *net,
goto error;
}
if (rt) {
- memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ spec_dst_fill(&spec_dst, skb);
+ memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
opt->is_changed = 1;
}
optptr[2] += 4;
@@ -373,7 +383,8 @@ int ip_options_compile(struct net *net,
}
opt->ts = optptr - iph;
if (rt) {
- memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ spec_dst_fill(&spec_dst, skb);
+ memcpy(&optptr[optptr[2]-1], &spec_dst, 4);
timeptr = &optptr[optptr[2]+3];
}
opt->ts_needaddr = 1;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 451f97c42eb4..ba39a52d18c1 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -113,19 +113,6 @@ int ip_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip_local_out);
-/* dev_loopback_xmit for use with netfilter. */
-static int ip_dev_loopback_xmit(struct sk_buff *newskb)
-{
- skb_reset_mac_header(newskb);
- __skb_pull(newskb, skb_network_offset(newskb));
- newskb->pkt_type = PACKET_LOOPBACK;
- newskb->ip_summed = CHECKSUM_UNNECESSARY;
- WARN_ON(!skb_dst(newskb));
- skb_dst_force(newskb);
- netif_rx_ni(newskb);
- return 0;
-}
-
static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
{
int ttl = inet->uc_ttl;
@@ -183,6 +170,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
+ u32 nexthop;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -200,19 +188,22 @@ static inline int ip_finish_output2(struct sk_buff *skb)
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
- kfree_skb(skb);
+ consume_skb(skb);
skb = skb2;
}
- rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ rcu_read_lock_bh();
+ nexthop = rt->rt_gateway ? rt->rt_gateway : ip_hdr(skb)->daddr;
+ neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
+ if (unlikely(!neigh))
+ neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
if (neigh) {
- int res = neigh_output(neigh, skb);
+ int res = dst_neigh_output(dst, neigh, skb);
- rcu_read_unlock();
+ rcu_read_unlock_bh();
return res;
}
- rcu_read_unlock();
+ rcu_read_unlock_bh();
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
@@ -281,7 +272,7 @@ int ip_mc_output(struct sk_buff *skb)
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
- ip_dev_loopback_xmit);
+ dev_loopback_xmit);
}
/* Multicasts with ttl 0 must not go beyond the host */
@@ -296,7 +287,7 @@ int ip_mc_output(struct sk_buff *skb)
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
if (newskb)
NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
- NULL, newskb->dev, ip_dev_loopback_xmit);
+ NULL, newskb->dev, dev_loopback_xmit);
}
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
@@ -380,7 +371,7 @@ int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
skb_dst_set_noref(skb, &rt->dst);
packet_routed:
- if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
+ if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_gateway)
goto no_route;
/* OK, we know where to send it, allocate and build IP header. */
@@ -709,7 +700,7 @@ slow_path:
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
- kfree_skb(skb);
+ consume_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
@@ -1472,19 +1463,34 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
/*
* Generic function to send a packet as reply to another packet.
- * Used to send TCP resets so far. ICMP should use this function too.
+ * Used to send some TCP resets/acks so far.
*
- * Should run single threaded per socket because it uses the sock
- * structure to pass arguments.
+ * Use a fake percpu inet socket to avoid false sharing and contention.
*/
-void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
- const struct ip_reply_arg *arg, unsigned int len)
+static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
+ .sk = {
+ .__sk_common = {
+ .skc_refcnt = ATOMIC_INIT(1),
+ },
+ .sk_wmem_alloc = ATOMIC_INIT(1),
+ .sk_allocation = GFP_ATOMIC,
+ .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
+ },
+ .pmtudisc = IP_PMTUDISC_WANT,
+ .uc_ttl = -1,
+};
+
+void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len)
{
- struct inet_sock *inet = inet_sk(sk);
struct ip_options_data replyopts;
struct ipcm_cookie ipc;
struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb);
+ struct sk_buff *nskb;
+ struct sock *sk;
+ struct inet_sock *inet;
if (ip_options_echo(&replyopts.opt.opt, skb))
return;
@@ -1502,38 +1508,39 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
flowi4_init_output(&fl4, arg->bound_dev_if, 0,
RT_TOS(arg->tos),
- RT_SCOPE_UNIVERSE, sk->sk_protocol,
+ RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
ip_reply_arg_flowi_flags(arg),
- daddr, rt->rt_spec_dst,
+ daddr, saddr,
tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
- rt = ip_route_output_key(sock_net(sk), &fl4);
+ rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
return;
- /* And let IP do all the hard work.
+ inet = &get_cpu_var(unicast_sock);
- This chunk is not reenterable, hence spinlock.
- Note that it uses the fact, that this function is called
- with locally disabled BH and that sk cannot be already spinlocked.
- */
- bh_lock_sock(sk);
inet->tos = arg->tos;
+ sk = &inet->sk;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
+ sock_net_set(sk, net);
+ __skb_queue_head_init(&sk->sk_write_queue);
+ sk->sk_sndbuf = sysctl_wmem_default;
ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
&ipc, &rt, MSG_DONTWAIT);
- if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
+ nskb = skb_peek(&sk->sk_write_queue);
+ if (nskb) {
if (arg->csumoffset >= 0)
- *((__sum16 *)skb_transport_header(skb) +
- arg->csumoffset) = csum_fold(csum_add(skb->csum,
+ *((__sum16 *)skb_transport_header(nskb) +
+ arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
- skb->ip_summed = CHECKSUM_NONE;
+ nskb->ip_summed = CHECKSUM_NONE;
+ skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
- bh_unlock_sock(sk);
+ put_cpu_var(unicast_sock);
ip_rt_put(rt);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 0d11f234d615..5eea4a811042 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -40,6 +40,7 @@
#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
#endif
+#include <net/ip_fib.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
@@ -1019,18 +1020,17 @@ e_inval:
* @sk: socket
* @skb: buffer
*
- * To support IP_CMSG_PKTINFO option, we store rt_iif and rt_spec_dst
- * in skb->cb[] before dst drop.
+ * To support IP_CMSG_PKTINFO option, we store rt_iif and specific
+ * destination in skb->cb[] before dst drop.
* This way, receiver doesnt make cache line misses to read rtable.
*/
void ipv4_pktinfo_prepare(struct sk_buff *skb)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
- const struct rtable *rt = skb_rtable(skb);
- if (rt) {
- pktinfo->ipi_ifindex = rt->rt_iif;
- pktinfo->ipi_spec_dst.s_addr = rt->rt_spec_dst;
+ if (skb_rtable(skb)) {
+ pktinfo->ipi_ifindex = inet_iif(skb);
+ pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else {
pktinfo->ipi_ifindex = 0;
pktinfo->ipi_spec_dst.s_addr = 0;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
new file mode 100644
index 000000000000..3511ffba7bd4
--- /dev/null
+++ b/net/ipv4/ip_vti.c
@@ -0,0 +1,956 @@
+/*
+ * Linux NET3: IP/IP protocol decoder modified to support
+ * virtual tunnel interface
+ *
+ * Authors:
+ * Saurabh Mohan (saurabh.mohan@vyatta.com) 05/07/2012
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+/*
+ This version of net/ipv4/ip_vti.c is cloned of net/ipv4/ipip.c
+
+ For comments look at net/ipv4/ip_gre.c --ANK
+ */
+
+
+#include <linux/capability.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/if_arp.h>
+#include <linux/mroute.h>
+#include <linux/init.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/if_ether.h>
+
+#include <net/sock.h>
+#include <net/ip.h>
+#include <net/icmp.h>
+#include <net/ipip.h>
+#include <net/inet_ecn.h>
+#include <net/xfrm.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+
+#define HASH_SIZE 16
+#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
+
+static struct rtnl_link_ops vti_link_ops __read_mostly;
+
+static int vti_net_id __read_mostly;
+struct vti_net {
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
+
+ struct net_device *fb_tunnel_dev;
+};
+
+static int vti_fb_tunnel_init(struct net_device *dev);
+static int vti_tunnel_init(struct net_device *dev);
+static void vti_tunnel_setup(struct net_device *dev);
+static void vti_dev_free(struct net_device *dev);
+static int vti_tunnel_bind_dev(struct net_device *dev);
+
+/* Locking : hash tables are protected by RCU and RTNL */
+
+#define for_each_ip_tunnel_rcu(start) \
+ for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+#define VTI_XMIT(stats1, stats2) do { \
+ int err; \
+ int pkt_len = skb->len; \
+ err = dst_output(skb); \
+ if (net_xmit_eval(err) == 0) { \
+ u64_stats_update_begin(&(stats1)->syncp); \
+ (stats1)->tx_bytes += pkt_len; \
+ (stats1)->tx_packets++; \
+ u64_stats_update_end(&(stats1)->syncp); \
+ } else { \
+ (stats2)->tx_errors++; \
+ (stats2)->tx_aborted_errors++; \
+ } \
+} while (0)
+
+
+static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+
+ tot->multicast = dev->stats.multicast;
+ tot->rx_crc_errors = dev->stats.rx_crc_errors;
+ tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_errors = dev->stats.rx_errors;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ return tot;
+}
+
+static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
+ __be32 remote, __be32 local)
+{
+ unsigned h0 = HASH(remote);
+ unsigned h1 = HASH(local);
+ struct ip_tunnel *t;
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_r_l[h0 ^ h1])
+ if (local == t->parms.iph.saddr &&
+ remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
+ return t;
+ for_each_ip_tunnel_rcu(ipn->tunnels_r[h0])
+ if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
+ return t;
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_l[h1])
+ if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
+ return t;
+
+ for_each_ip_tunnel_rcu(ipn->tunnels_wc[0])
+ if (t && (t->dev->flags&IFF_UP))
+ return t;
+ return NULL;
+}
+
+static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
+ struct ip_tunnel_parm *parms)
+{
+ __be32 remote = parms->iph.daddr;
+ __be32 local = parms->iph.saddr;
+ unsigned h = 0;
+ int prio = 0;
+
+ if (remote) {
+ prio |= 2;
+ h ^= HASH(remote);
+ }
+ if (local) {
+ prio |= 1;
+ h ^= HASH(local);
+ }
+ return &ipn->tunnels[prio][h];
+}
+
+static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
+ struct ip_tunnel *t)
+{
+ return __vti_bucket(ipn, &t->parms);
+}
+
+static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
+{
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = vti_bucket(ipn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
+ break;
+ }
+ }
+}
+
+static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
+{
+ struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
+
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
+ rcu_assign_pointer(*tp, t);
+}
+
+static struct ip_tunnel *vti_tunnel_locate(struct net *net,
+ struct ip_tunnel_parm *parms,
+ int create)
+{
+ __be32 remote = parms->iph.daddr;
+ __be32 local = parms->iph.saddr;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
+ struct net_device *dev;
+ char name[IFNAMSIZ];
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ for (tp = __vti_bucket(ipn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
+ if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
+ return t;
+ }
+ if (!create)
+ return NULL;
+
+ if (parms->name[0])
+ strlcpy(name, parms->name, IFNAMSIZ);
+ else
+ strcpy(name, "vti%d");
+
+ dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
+ if (dev == NULL)
+ return NULL;
+
+ dev_net_set(dev, net);
+
+ nt = netdev_priv(dev);
+ nt->parms = *parms;
+ dev->rtnl_link_ops = &vti_link_ops;
+
+ vti_tunnel_bind_dev(dev);
+
+ if (register_netdevice(dev) < 0)
+ goto failed_free;
+
+ dev_hold(dev);
+ vti_tunnel_link(ipn, nt);
+ return nt;
+
+failed_free:
+ free_netdev(dev);
+ return NULL;
+}
+
+static void vti_tunnel_uninit(struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ vti_tunnel_unlink(ipn, netdev_priv(dev));
+ dev_put(dev);
+}
+
+static int vti_err(struct sk_buff *skb, u32 info)
+{
+
+ /* All the routers (except for Linux) return only
+ * 8 bytes of packet payload. It means, that precise relaying of
+ * ICMP in the real Internet is absolutely infeasible.
+ */
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ const int type = icmp_hdr(skb)->type;
+ const int code = icmp_hdr(skb)->code;
+ struct ip_tunnel *t;
+ int err;
+
+ switch (type) {
+ default:
+ case ICMP_PARAMETERPROB:
+ return 0;
+
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_SR_FAILED:
+ case ICMP_PORT_UNREACH:
+ /* Impossible event. */
+ return 0;
+ default:
+ /* All others are translated to HOST_UNREACH. */
+ break;
+ }
+ break;
+ case ICMP_TIME_EXCEEDED:
+ if (code != ICMP_EXC_TTL)
+ return 0;
+ break;
+ }
+
+ err = -ENOENT;
+
+ rcu_read_lock();
+ t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->parms.link, 0, IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ err = 0;
+ if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
+ goto out;
+
+ if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
+ t->err_count++;
+ else
+ t->err_count = 1;
+ t->err_time = jiffies;
+out:
+ rcu_read_unlock();
+ return err;
+}
+
+/* We dont digest the packet therefore let the packet pass */
+static int vti_rcv(struct sk_buff *skb)
+{
+ struct ip_tunnel *tunnel;
+ const struct iphdr *iph = ip_hdr(skb);
+
+ rcu_read_lock();
+ tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
+
+ skb->dev = tunnel->dev;
+ rcu_read_unlock();
+ return 1;
+ }
+ rcu_read_unlock();
+
+ return -1;
+}
+
+/* This function assumes it is being called from dev_queue_xmit()
+ * and that skb is filled properly by that function.
+ */
+
+static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct pcpu_tstats *tstats;
+ struct iphdr *tiph = &tunnel->parms.iph;
+ u8 tos;
+ struct rtable *rt; /* Route to the other host */
+ struct net_device *tdev; /* Device to other host */
+ struct iphdr *old_iph = ip_hdr(skb);
+ __be32 dst = tiph->daddr;
+ struct flowi4 fl4;
+
+ if (skb->protocol != htons(ETH_P_IP))
+ goto tx_error;
+
+ tos = old_iph->tos;
+
+ memset(&fl4, 0, sizeof(fl4));
+ flowi4_init_output(&fl4, tunnel->parms.link,
+ htonl(tunnel->parms.i_key), RT_TOS(tos),
+ RT_SCOPE_UNIVERSE,
+ IPPROTO_IPIP, 0,
+ dst, tiph->saddr, 0, 0);
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (IS_ERR(rt)) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ /* if there is no transform then this tunnel is not functional.
+ * Or if the xfrm is not mode tunnel.
+ */
+ if (!rt->dst.xfrm ||
+ rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
+ tdev = rt->dst.dev;
+
+ if (tdev == dev) {
+ ip_rt_put(rt);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
+
+ if (tunnel->err_count > 0) {
+ if (time_before(jiffies,
+ tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
+ tunnel->err_count--;
+ dst_link_failure(skb);
+ } else
+ tunnel->err_count = 0;
+ }
+
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+ skb_dst_set(skb, &rt->dst);
+ nf_reset(skb);
+ skb->dev = skb_dst(skb)->dev;
+
+ tstats = this_cpu_ptr(dev->tstats);
+ VTI_XMIT(tstats, &dev->stats);
+ return NETDEV_TX_OK;
+
+tx_error_icmp:
+ dst_link_failure(skb);
+tx_error:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static int vti_tunnel_bind_dev(struct net_device *dev)
+{
+ struct net_device *tdev = NULL;
+ struct ip_tunnel *tunnel;
+ struct iphdr *iph;
+
+ tunnel = netdev_priv(dev);
+ iph = &tunnel->parms.iph;
+
+ if (iph->daddr) {
+ struct rtable *rt;
+ struct flowi4 fl4;
+ memset(&fl4, 0, sizeof(fl4));
+ flowi4_init_output(&fl4, tunnel->parms.link,
+ htonl(tunnel->parms.i_key),
+ RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
+ IPPROTO_IPIP, 0,
+ iph->daddr, iph->saddr, 0, 0);
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (!IS_ERR(rt)) {
+ tdev = rt->dst.dev;
+ ip_rt_put(rt);
+ }
+ dev->flags |= IFF_POINTOPOINT;
+ }
+
+ if (!tdev && tunnel->parms.link)
+ tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+
+ if (tdev) {
+ dev->hard_header_len = tdev->hard_header_len +
+ sizeof(struct iphdr);
+ dev->mtu = tdev->mtu;
+ }
+ dev->iflink = tunnel->parms.link;
+ return dev->mtu;
+}
+
+static int
+vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int err = 0;
+ struct ip_tunnel_parm p;
+ struct ip_tunnel *t;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ switch (cmd) {
+ case SIOCGETTUNNEL:
+ t = NULL;
+ if (dev == ipn->fb_tunnel_dev) {
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
+ sizeof(p))) {
+ err = -EFAULT;
+ break;
+ }
+ t = vti_tunnel_locate(net, &p, 0);
+ }
+ if (t == NULL)
+ t = netdev_priv(dev);
+ memcpy(&p, &t->parms, sizeof(p));
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ err = -EFAULT;
+ break;
+
+ case SIOCADDTUNNEL:
+ case SIOCCHGTUNNEL:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ goto done;
+
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ goto done;
+
+ err = -EINVAL;
+ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
+ p.iph.ihl != 5)
+ goto done;
+
+ t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
+
+ if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
+ if (t != NULL) {
+ if (t->dev != dev) {
+ err = -EEXIST;
+ break;
+ }
+ } else {
+ if (((dev->flags&IFF_POINTOPOINT) &&
+ !p.iph.daddr) ||
+ (!(dev->flags&IFF_POINTOPOINT) &&
+ p.iph.daddr)) {
+ err = -EINVAL;
+ break;
+ }
+ t = netdev_priv(dev);
+ vti_tunnel_unlink(ipn, t);
+ synchronize_net();
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ t->parms.iph.protocol = IPPROTO_IPIP;
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ vti_tunnel_link(ipn, t);
+ netdev_state_change(dev);
+ }
+ }
+
+ if (t) {
+ err = 0;
+ if (cmd == SIOCCHGTUNNEL) {
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ if (t->parms.link != p.link) {
+ t->parms.link = p.link;
+ vti_tunnel_bind_dev(dev);
+ netdev_state_change(dev);
+ }
+ }
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
+ sizeof(p)))
+ err = -EFAULT;
+ } else
+ err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+ break;
+
+ case SIOCDELTUNNEL:
+ err = -EPERM;
+ if (!capable(CAP_NET_ADMIN))
+ goto done;
+
+ if (dev == ipn->fb_tunnel_dev) {
+ err = -EFAULT;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
+ sizeof(p)))
+ goto done;
+ err = -ENOENT;
+
+ t = vti_tunnel_locate(net, &p, 0);
+ if (t == NULL)
+ goto done;
+ err = -EPERM;
+ if (t->dev == ipn->fb_tunnel_dev)
+ goto done;
+ dev = t->dev;
+ }
+ unregister_netdevice(dev);
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+done:
+ return err;
+}
+
+static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > 0xFFF8)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static const struct net_device_ops vti_netdev_ops = {
+ .ndo_init = vti_tunnel_init,
+ .ndo_uninit = vti_tunnel_uninit,
+ .ndo_start_xmit = vti_tunnel_xmit,
+ .ndo_do_ioctl = vti_tunnel_ioctl,
+ .ndo_change_mtu = vti_tunnel_change_mtu,
+ .ndo_get_stats64 = vti_get_stats64,
+};
+
+static void vti_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
+static void vti_tunnel_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &vti_netdev_ops;
+ dev->destructor = vti_dev_free;
+
+ dev->type = ARPHRD_TUNNEL;
+ dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
+ dev->mtu = ETH_DATA_LEN;
+ dev->flags = IFF_NOARP;
+ dev->iflink = 0;
+ dev->addr_len = 4;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
+ dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+}
+
+static int vti_tunnel_init(struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ tunnel->dev = dev;
+ strcpy(tunnel->parms.name, dev->name);
+
+ memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
+ memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct iphdr *iph = &tunnel->parms.iph;
+ struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
+
+ tunnel->dev = dev;
+ strcpy(tunnel->parms.name, dev->name);
+
+ iph->version = 4;
+ iph->protocol = IPPROTO_IPIP;
+ iph->ihl = 5;
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ dev_hold(dev);
+ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
+}
+
+static struct xfrm_tunnel vti_handler __read_mostly = {
+ .handler = vti_rcv,
+ .err_handler = vti_err,
+ .priority = 1,
+};
+
+static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
+{
+ int prio;
+
+ for (prio = 1; prio < 4; prio++) {
+ int h;
+ for (h = 0; h < HASH_SIZE; h++) {
+ struct ip_tunnel *t;
+
+ t = rtnl_dereference(ipn->tunnels[prio][h]);
+ while (t != NULL) {
+ unregister_netdevice_queue(t->dev, head);
+ t = rtnl_dereference(t->next);
+ }
+ }
+ }
+}
+
+static int __net_init vti_init_net(struct net *net)
+{
+ int err;
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+
+ ipn->tunnels[0] = ipn->tunnels_wc;
+ ipn->tunnels[1] = ipn->tunnels_l;
+ ipn->tunnels[2] = ipn->tunnels_r;
+ ipn->tunnels[3] = ipn->tunnels_r_l;
+
+ ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
+ "ip_vti0",
+ vti_tunnel_setup);
+ if (!ipn->fb_tunnel_dev) {
+ err = -ENOMEM;
+ goto err_alloc_dev;
+ }
+ dev_net_set(ipn->fb_tunnel_dev, net);
+
+ err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
+ ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+
+ err = register_netdev(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
+ return 0;
+
+err_reg_dev:
+ vti_dev_free(ipn->fb_tunnel_dev);
+err_alloc_dev:
+ /* nothing */
+ return err;
+}
+
+static void __net_exit vti_exit_net(struct net *net)
+{
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ vti_destroy_tunnels(ipn, &list);
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations vti_net_ops = {
+ .init = vti_init_net,
+ .exit = vti_exit_net,
+ .id = &vti_net_id,
+ .size = sizeof(struct vti_net),
+};
+
+static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ return 0;
+}
+
+static void vti_netlink_parms(struct nlattr *data[],
+ struct ip_tunnel_parm *parms)
+{
+ memset(parms, 0, sizeof(*parms));
+
+ parms->iph.protocol = IPPROTO_IPIP;
+
+ if (!data)
+ return;
+
+ if (data[IFLA_VTI_LINK])
+ parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
+
+ if (data[IFLA_VTI_IKEY])
+ parms->i_key = nla_get_be32(data[IFLA_VTI_IKEY]);
+
+ if (data[IFLA_VTI_OKEY])
+ parms->o_key = nla_get_be32(data[IFLA_VTI_OKEY]);
+
+ if (data[IFLA_VTI_LOCAL])
+ parms->iph.saddr = nla_get_be32(data[IFLA_VTI_LOCAL]);
+
+ if (data[IFLA_VTI_REMOTE])
+ parms->iph.daddr = nla_get_be32(data[IFLA_VTI_REMOTE]);
+
+}
+
+static int vti_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ip_tunnel *nt;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ int mtu;
+ int err;
+
+ nt = netdev_priv(dev);
+ vti_netlink_parms(data, &nt->parms);
+
+ if (vti_tunnel_locate(net, &nt->parms, 0))
+ return -EEXIST;
+
+ mtu = vti_tunnel_bind_dev(dev);
+ if (!tb[IFLA_MTU])
+ dev->mtu = mtu;
+
+ err = register_netdevice(dev);
+ if (err)
+ goto out;
+
+ dev_hold(dev);
+ vti_tunnel_link(ipn, nt);
+
+out:
+ return err;
+}
+
+static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct ip_tunnel *t, *nt;
+ struct net *net = dev_net(dev);
+ struct vti_net *ipn = net_generic(net, vti_net_id);
+ struct ip_tunnel_parm p;
+ int mtu;
+
+ if (dev == ipn->fb_tunnel_dev)
+ return -EINVAL;
+
+ nt = netdev_priv(dev);
+ vti_netlink_parms(data, &p);
+
+ t = vti_tunnel_locate(net, &p, 0);
+
+ if (t) {
+ if (t->dev != dev)
+ return -EEXIST;
+ } else {
+ t = nt;
+
+ vti_tunnel_unlink(ipn, t);
+ t->parms.iph.saddr = p.iph.saddr;
+ t->parms.iph.daddr = p.iph.daddr;
+ t->parms.i_key = p.i_key;
+ t->parms.o_key = p.o_key;
+ if (dev->type != ARPHRD_ETHER) {
+ memcpy(dev->dev_addr, &p.iph.saddr, 4);
+ memcpy(dev->broadcast, &p.iph.daddr, 4);
+ }
+ vti_tunnel_link(ipn, t);
+ netdev_state_change(dev);
+ }
+
+ if (t->parms.link != p.link) {
+ t->parms.link = p.link;
+ mtu = vti_tunnel_bind_dev(dev);
+ if (!tb[IFLA_MTU])
+ dev->mtu = mtu;
+ netdev_state_change(dev);
+ }
+
+ return 0;
+}
+
+static size_t vti_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_VTI_LINK */
+ nla_total_size(4) +
+ /* IFLA_VTI_IKEY */
+ nla_total_size(4) +
+ /* IFLA_VTI_OKEY */
+ nla_total_size(4) +
+ /* IFLA_VTI_LOCAL */
+ nla_total_size(4) +
+ /* IFLA_VTI_REMOTE */
+ nla_total_size(4) +
+ 0;
+}
+
+static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct ip_tunnel *t = netdev_priv(dev);
+ struct ip_tunnel_parm *p = &t->parms;
+
+ nla_put_u32(skb, IFLA_VTI_LINK, p->link);
+ nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key);
+ nla_put_be32(skb, IFLA_VTI_OKEY, p->o_key);
+ nla_put_be32(skb, IFLA_VTI_LOCAL, p->iph.saddr);
+ nla_put_be32(skb, IFLA_VTI_REMOTE, p->iph.daddr);
+
+ return 0;
+}
+
+static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
+ [IFLA_VTI_LINK] = { .type = NLA_U32 },
+ [IFLA_VTI_IKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_OKEY] = { .type = NLA_U32 },
+ [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+};
+
+static struct rtnl_link_ops vti_link_ops __read_mostly = {
+ .kind = "vti",
+ .maxtype = IFLA_VTI_MAX,
+ .policy = vti_policy,
+ .priv_size = sizeof(struct ip_tunnel),
+ .setup = vti_tunnel_setup,
+ .validate = vti_tunnel_validate,
+ .newlink = vti_newlink,
+ .changelink = vti_changelink,
+ .get_size = vti_get_size,
+ .fill_info = vti_fill_info,
+};
+
+static int __init vti_init(void)
+{
+ int err;
+
+ pr_info("IPv4 over IPSec tunneling driver\n");
+
+ err = register_pernet_device(&vti_net_ops);
+ if (err < 0)
+ return err;
+ err = xfrm4_mode_tunnel_input_register(&vti_handler);
+ if (err < 0) {
+ unregister_pernet_device(&vti_net_ops);
+ pr_info(KERN_INFO "vti init: can't register tunnel\n");
+ }
+
+ err = rtnl_link_register(&vti_link_ops);
+ if (err < 0)
+ goto rtnl_link_failed;
+
+ return err;
+
+rtnl_link_failed:
+ xfrm4_mode_tunnel_input_deregister(&vti_handler);
+ unregister_pernet_device(&vti_net_ops);
+ return err;
+}
+
+static void __exit vti_fini(void)
+{
+ rtnl_link_unregister(&vti_link_ops);
+ if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
+ pr_info("vti close: can't deregister tunnel\n");
+
+ unregister_pernet_device(&vti_net_ops);
+}
+
+module_init(vti_init);
+module_exit(vti_fini);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("vti");
+MODULE_ALIAS_NETDEV("ip_vti0");
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 63b64c45a826..d3ab47e19a89 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -31,17 +31,26 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
- if (icmp_hdr(skb)->type != ICMP_DEST_UNREACH ||
- icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return;
+ case ICMP_REDIRECT:
+ break;
+ default:
return;
+ }
spi = htonl(ntohs(ipch->cpi));
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/%pI4\n",
- spi, &iph->daddr);
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 2d0f99bf61b3..99af1f0cc658 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -348,9 +348,6 @@ static int ipip_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -363,13 +360,32 @@ static int ipip_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return 0;
break;
+ case ICMP_REDIRECT:
+ break;
}
err = -ENOENT;
rcu_read_lock();
t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
- if (t == NULL || t->parms.iph.daddr == 0)
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->dev->ifindex, 0, IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+ IPPROTO_IPIP, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (t->parms.iph.daddr == 0)
goto out;
err = 0;
@@ -471,7 +487,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
goto tx_error;
}
- dst = rt->rt_gateway;
+ dst = rt_nexthop(rt, old_iph->daddr);
}
rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
@@ -503,7 +519,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if ((old_iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index c94bbc6f2ba3..8eec8f4a0536 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -524,8 +524,8 @@ failure:
}
#endif
-/*
- * Delete a VIF entry
+/**
+ * vif_delete - Delete a VIF entry
* @notify: Set to 1, if the caller is a notifier_call
*/
@@ -1795,9 +1795,12 @@ static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowi4_tos = RT_TOS(iph->tos),
- .flowi4_oif = rt->rt_oif,
- .flowi4_iif = rt->rt_iif,
- .flowi4_mark = rt->rt_mark,
+ .flowi4_oif = (rt_is_output_route(rt) ?
+ skb->dev->ifindex : 0),
+ .flowi4_iif = (rt_is_output_route(rt) ?
+ net->loopback_dev->ifindex :
+ skb->dev->ifindex),
+ .flowi4_mark = skb->mark,
};
struct mr_table *mrt;
int err;
@@ -2006,37 +2009,37 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
{
int ct;
struct rtnexthop *nhp;
- u8 *b = skb_tail_pointer(skb);
- struct rtattr *mp_head;
+ struct nlattr *mp_attr;
/* If cache is unresolved, don't try to parse IIF and OIF */
if (c->mfc_parent >= MAXVIFS)
return -ENOENT;
- if (VIF_EXISTS(mrt, c->mfc_parent))
- RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
+ if (VIF_EXISTS(mrt, c->mfc_parent) &&
+ nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
+ return -EMSGSIZE;
- mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
+ if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
+ return -EMSGSIZE;
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
- if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
- goto rtattr_failure;
- nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
+ if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
+ nla_nest_cancel(skb, mp_attr);
+ return -EMSGSIZE;
+ }
+
nhp->rtnh_flags = 0;
nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
nhp->rtnh_len = sizeof(*nhp);
}
}
- mp_head->rta_type = RTA_MULTIPATH;
- mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
+
+ nla_nest_end(skb, mp_attr);
+
rtm->rtm_type = RTN_MULTICAST;
return 1;
-
-rtattr_failure:
- nlmsg_trim(skb, b);
- return -EMSGSIZE;
}
int ipmr_get_route(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 2f210c79dc87..cbb6a1a6f6f7 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -52,7 +52,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
struct nf_nat_ipv4_range newrange;
const struct nf_nat_ipv4_multi_range_compat *mr;
const struct rtable *rt;
- __be32 newsrc;
+ __be32 newsrc, nh;
NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING);
@@ -70,7 +70,8 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
mr = par->targinfo;
rt = skb_rtable(skb);
- newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE);
+ nh = rt_nexthop(rt, ip_hdr(skb)->daddr);
+ newsrc = inet_select_addr(par->out, nh, RT_SCOPE_UNIVERSE);
if (!newsrc) {
pr_info("%s ate my IP address\n", par->out->name);
return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ba5756d20165..1109f7f6c254 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -196,12 +196,15 @@ static void ipt_ulog_packet(unsigned int hooknum,
pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
- /* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
- nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
- sizeof(*pm)+copy_len);
+ nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
+ sizeof(*pm)+copy_len, 0);
+ if (!nlh) {
+ pr_debug("error during nlmsg_put\n");
+ goto out_unlock;
+ }
ub->qlen++;
- pm = NLMSG_DATA(nlh);
+ pm = nlmsg_data(nlh);
/* We might not have a timestamp, get one */
if (skb->tstamp.tv64 == 0)
@@ -261,13 +264,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
nlh->nlmsg_type = NLMSG_DONE;
ulog_send(groupnum);
}
-
+out_unlock:
spin_unlock_bh(&ulog_lock);
return;
-nlmsg_failure:
- pr_debug("error during NLMSG_PUT\n");
alloc_failure:
pr_debug("Error building netlink message\n");
spin_unlock_bh(&ulog_lock);
@@ -380,6 +381,9 @@ static struct nf_logger ipt_ulog_logger __read_mostly = {
static int __init ulog_tg_init(void)
{
int ret, i;
+ struct netlink_kernel_cfg cfg = {
+ .groups = ULOG_MAXNLGROUPS,
+ };
pr_debug("init module\n");
@@ -392,9 +396,8 @@ static int __init ulog_tg_init(void)
for (i = 0; i < ULOG_MAXNLGROUPS; i++)
setup_timer(&ulog_buffers[i].timer, ulog_timer, i);
- nflognl = netlink_kernel_create(&init_net,
- NETLINK_NFLOG, ULOG_MAXNLGROUPS, NULL,
- NULL, THIS_MODULE);
+ nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG,
+ THIS_MODULE, &cfg);
if (!nflognl)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 91747d4ebc26..e7ff2dcab6ce 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -95,11 +95,11 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv4_confirm(unsigned int hooknum,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ipv4_helper(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -110,24 +110,38 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
- goto out;
+ return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
- goto out;
+ return NF_ACCEPT;
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
"nf_ct_%s: dropping packet", helper->name);
- return ret;
}
+ return ret;
+}
+
+static unsigned int ipv4_confirm(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct || ctinfo == IP_CT_RELATED_REPLY)
+ goto out;
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
@@ -185,6 +199,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
.priority = NF_IP_PRI_CONNTRACK,
},
{
+ .hook = ipv4_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
@@ -192,6 +213,13 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
+ .hook = ipv4_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
@@ -207,35 +235,30 @@ static int log_invalid_proto_max = 255;
static ctl_table ip_ct_sysctl_table[] = {
{
.procname = "ip_conntrack_max",
- .data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_count",
- .data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_buckets",
- .data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_checksum",
- .data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_log_invalid",
- .data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -351,6 +374,25 @@ static struct nf_sockopt_ops so_getorigdst = {
.owner = THIS_MODULE,
};
+static int ipv4_init_net(struct net *net)
+{
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ struct nf_ip_net *in = &net->ct.nf_ct_proto;
+ in->ctl_table = kmemdup(ip_ct_sysctl_table,
+ sizeof(ip_ct_sysctl_table),
+ GFP_KERNEL);
+ if (!in->ctl_table)
+ return -ENOMEM;
+
+ in->ctl_table[0].data = &nf_conntrack_max;
+ in->ctl_table[1].data = &net->ct.count;
+ in->ctl_table[2].data = &net->ct.htable_size;
+ in->ctl_table[3].data = &net->ct.sysctl_checksum;
+ in->ctl_table[4].data = &net->ct.sysctl_log_invalid;
+#endif
+ return 0;
+}
+
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.l3proto = PF_INET,
.name = "ipv4",
@@ -366,8 +408,8 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
#endif
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
.ctl_table_path = "net/ipv4/netfilter",
- .ctl_table = ip_ct_sysctl_table,
#endif
+ .init_net = ipv4_init_net,
.me = THIS_MODULE,
};
@@ -378,6 +420,65 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
+static int ipv4_net_init(struct net *net)
+{
+ int ret = 0;
+
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_tcp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_tcp4 :protocol register failed\n");
+ goto out_tcp;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udp4 :protocol register failed\n");
+ goto out_udp;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_icmp);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_icmp4 :protocol register failed\n");
+ goto out_icmp;
+ }
+ ret = nf_conntrack_l3proto_register(net,
+ &nf_conntrack_l3proto_ipv4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l3proto_ipv4 :protocol register failed\n");
+ goto out_ipv4;
+ }
+ return 0;
+out_ipv4:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmp);
+out_icmp:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp4);
+out_udp:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp4);
+out_tcp:
+ return ret;
+}
+
+static void ipv4_net_exit(struct net *net)
+{
+ nf_conntrack_l3proto_unregister(net,
+ &nf_conntrack_l3proto_ipv4);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmp);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp4);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp4);
+}
+
+static struct pernet_operations ipv4_net_ops = {
+ .init = ipv4_net_init,
+ .exit = ipv4_net_exit,
+};
+
static int __init nf_conntrack_l3proto_ipv4_init(void)
{
int ret = 0;
@@ -391,35 +492,17 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
return ret;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4);
+ ret = register_pernet_subsys(&ipv4_net_ops);
if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register tcp.\n");
+ pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
goto cleanup_sockopt;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register udp.\n");
- goto cleanup_tcp;
- }
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register icmp.\n");
- goto cleanup_udp;
- }
-
- ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
- if (ret < 0) {
- pr_err("nf_conntrack_ipv4: can't register ipv4\n");
- goto cleanup_icmp;
- }
-
ret = nf_register_hooks(ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register hooks.\n");
- goto cleanup_ipv4;
+ goto cleanup_pernet;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
ret = nf_conntrack_ipv4_compat_init();
@@ -431,14 +514,8 @@ static int __init nf_conntrack_l3proto_ipv4_init(void)
cleanup_hooks:
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
#endif
- cleanup_ipv4:
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
- cleanup_icmp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
- cleanup_udp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
- cleanup_tcp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
+ cleanup_pernet:
+ unregister_pernet_subsys(&ipv4_net_ops);
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
return ret;
@@ -451,10 +528,7 @@ static void __exit nf_conntrack_l3proto_ipv4_fini(void)
nf_conntrack_ipv4_compat_fini();
#endif
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
+ unregister_pernet_subsys(&ipv4_net_ops);
nf_unregister_sockopt(&so_getorigdst);
}
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 0847e373d33c..5241d997ab75 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -23,6 +23,11 @@
static unsigned int nf_ct_icmp_timeout __read_mostly = 30*HZ;
+static inline struct nf_icmp_net *icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -77,7 +82,7 @@ static int icmp_print_tuple(struct seq_file *s,
static unsigned int *icmp_get_timeouts(struct net *net)
{
- return &nf_ct_icmp_timeout;
+ return &icmp_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -274,16 +279,18 @@ static int icmp_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_icmp_net *in = icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ;
} else {
/* Set default ICMP timeout. */
- *timeout = nf_ct_icmp_timeout;
+ *timeout = in->timeout;
}
return 0;
}
@@ -308,11 +315,9 @@ icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *icmp_sysctl_header;
static struct ctl_table icmp_sysctl_table[] = {
{
.procname = "nf_conntrack_icmp_timeout",
- .data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -323,7 +328,6 @@ static struct ctl_table icmp_sysctl_table[] = {
static struct ctl_table icmp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_icmp_timeout",
- .data = &nf_ct_icmp_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -333,6 +337,62 @@ static struct ctl_table icmp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(icmp_sysctl_table,
+ sizeof(icmp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &in->timeout;
+#endif
+ return 0;
+}
+
+static int icmp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(icmp_compat_sysctl_table,
+ sizeof(icmp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &in->timeout;
+#endif
+#endif
+ return 0;
+}
+
+static int icmp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_proto_net *pn = &in->pn;
+
+ in->timeout = nf_ct_icmp_timeout;
+
+ ret = icmp_kmemdup_compat_sysctl_table(pn, in);
+ if (ret < 0)
+ return ret;
+
+ ret = icmp_kmemdup_sysctl_table(pn, in);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+
+ return ret;
+}
+
+static struct nf_proto_net *icmp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
{
.l3proto = PF_INET,
@@ -362,11 +422,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &icmp_sysctl_header,
- .ctl_table = icmp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = icmp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = icmp_init_net,
+ .get_net_proto = icmp_get_net_proto,
};
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index 9bb1b8a37a22..742815518b0f 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -94,14 +94,14 @@ static struct nf_hook_ops ipv4_defrag_ops[] = {
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv4_conntrack_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK_DEFRAG,
},
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 7b22382ff0e9..3c04d24e2976 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -13,10 +13,10 @@
#include <linux/skbuff.h>
#include <linux/udp.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
#include <linux/netfilter/nf_conntrack_amanda.h>
MODULE_AUTHOR("Brian J. Murrell <netfilter@interlinx.bc.ca>");
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index abb52adf5acd..44b082fd48ab 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -691,6 +691,10 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
.expectfn = nf_nat_follow_master,
};
+static struct nfq_ct_nat_hook nfq_ct_nat = {
+ .seq_adjust = nf_nat_tcp_seq_adjust,
+};
+
static int __init nf_nat_init(void)
{
size_t i;
@@ -731,6 +735,7 @@ static int __init nf_nat_init(void)
nfnetlink_parse_nat_setup);
BUG_ON(nf_ct_nat_offset != NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
+ RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
return 0;
cleanup_extend:
@@ -747,6 +752,7 @@ static void __exit nf_nat_cleanup(void)
RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
+ RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
synchronize_net();
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index cad29c121318..c6784a18c1c4 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -95,7 +95,7 @@ static int set_sig_addr(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data,
TransportAddress *taddr, int count)
{
- const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ const struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
__be16 port;
@@ -178,7 +178,7 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
struct nf_conntrack_expect *rtp_exp,
struct nf_conntrack_expect *rtcp_exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int i;
u_int16_t nated_port;
@@ -330,7 +330,7 @@ static int nat_h245(struct sk_buff *skb, struct nf_conn *ct,
TransportAddress *taddr, __be16 port,
struct nf_conntrack_expect *exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
@@ -419,7 +419,7 @@ static int nat_q931(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data, TransportAddress *taddr, int idx,
__be16 port, struct nf_conntrack_expect *exp)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
u_int16_t nated_port = ntohs(port);
union nf_inet_addr addr;
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index af65958f6308..2e59ad0b90ca 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,6 +153,19 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
}
EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
+void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ u32 ctinfo, int off)
+{
+ const struct tcphdr *th;
+
+ if (nf_ct_protonum(ct) != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
+ nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+}
+EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
+
static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
int datalen, __sum16 *check, int oldlen)
{
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index c273d58980ae..388140881ebe 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -49,7 +49,7 @@ static void pptp_nat_expected(struct nf_conn *ct,
const struct nf_nat_pptp *nat_pptp_info;
struct nf_nat_ipv4_range range;
- ct_pptp_info = &nfct_help(master)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(master);
nat_pptp_info = &nfct_nat(master)->help.nat_pptp_info;
/* And here goes the grand finale of corrosion... */
@@ -123,7 +123,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
__be16 new_callid;
unsigned int cid_off;
- ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(ct);
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
new_callid = ct_pptp_info->pns_call_id;
@@ -192,7 +192,7 @@ pptp_exp_gre(struct nf_conntrack_expect *expect_orig,
struct nf_ct_pptp_master *ct_pptp_info;
struct nf_nat_pptp *nat_pptp_info;
- ct_pptp_info = &nfct_help(ct)->help.ct_pptp_info;
+ ct_pptp_info = nfct_help_data(ct);
nat_pptp_info = &nfct_nat(ct)->help.nat_pptp_info;
/* save original PAC call ID in nat_info */
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 746edec8b86e..bac712293fd6 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -405,7 +405,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
ptr = *octets;
while (ctx->pointer < eoc) {
- if (!asn1_octet_decode(ctx, (unsigned char *)ptr++)) {
+ if (!asn1_octet_decode(ctx, ptr++)) {
kfree(*octets);
*octets = NULL;
return 0;
@@ -759,7 +759,7 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
}
break;
case SNMP_OBJECTID:
- if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) {
+ if (!asn1_oid_decode(ctx, end, &lp, &len)) {
kfree(id);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index a2901bf829c0..9dbb8d284f99 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -8,10 +8,10 @@
#include <linux/module.h>
#include <linux/udp.h>
-#include <net/netfilter/nf_nat_helper.h>
-#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/netfilter/nf_nat_rule.h>
#include <linux/netfilter/nf_conntrack_tftp.h>
MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2c00e8bf684d..6232d476f37e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -371,6 +371,7 @@ void ping_err(struct sk_buff *skb, u32 info)
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+ ipv4_sk_update_pmtu(skb, sk, info);
if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
@@ -386,6 +387,7 @@ void ping_err(struct sk_buff *skb, u32 info)
break;
case ICMP_REDIRECT:
/* See ICMP_SOURCE_QUENCH */
+ ipv4_sk_redirect(skb, sk);
err = EREMOTEIO;
break;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 8af0d44e4e22..957acd12250b 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -232,7 +232,6 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPDSACKOfoSent", LINUX_MIB_TCPDSACKOFOSENT),
SNMP_MIB_ITEM("TCPDSACKRecv", LINUX_MIB_TCPDSACKRECV),
SNMP_MIB_ITEM("TCPDSACKOfoRecv", LINUX_MIB_TCPDSACKOFORECV),
- SNMP_MIB_ITEM("TCPAbortOnSyn", LINUX_MIB_TCPABORTONSYN),
SNMP_MIB_ITEM("TCPAbortOnData", LINUX_MIB_TCPABORTONDATA),
SNMP_MIB_ITEM("TCPAbortOnClose", LINUX_MIB_TCPABORTONCLOSE),
SNMP_MIB_ITEM("TCPAbortOnMemory", LINUX_MIB_TCPABORTONMEMORY),
@@ -258,6 +257,12 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP),
SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL),
SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE),
+ SNMP_MIB_ITEM("TCPOFOQueue", LINUX_MIB_TCPOFOQUEUE),
+ SNMP_MIB_ITEM("TCPOFODrop", LINUX_MIB_TCPOFODROP),
+ SNMP_MIB_ITEM("TCPOFOMerge", LINUX_MIB_TCPOFOMERGE),
+ SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
+ SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
+ SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 9ae5c01cd0b2..8918eff1426d 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -36,9 +36,7 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly;
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash = protocol & (MAX_INET_PROTOS - 1);
-
- return !cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ return !cmpxchg((const struct net_protocol **)&inet_protos[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -49,9 +47,9 @@ EXPORT_SYMBOL(inet_add_protocol);
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret;
- ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash],
+ ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4032b818f3e4..ff0f071969ea 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -216,6 +216,11 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
int err = 0;
int harderr = 0;
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
+ ipv4_sk_update_pmtu(skb, sk, info);
+ else if (type == ICMP_REDIRECT)
+ ipv4_sk_redirect(skb, sk);
+
/* Report error on raw socket, if:
1. User requested ip_recverr.
2. Socket is connected (otherwise the error indication
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 98b30d08efe9..6bcb8fc71cbc 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -133,10 +133,6 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
-static int rt_chain_length_max __read_mostly = 20;
-
-static struct delayed_work expires_work;
-static unsigned long expires_ljiffies;
/*
* Interface to generic destination cache.
@@ -145,11 +141,12 @@ static unsigned long expires_ljiffies;
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int ipv4_mtu(const struct dst_entry *dst);
-static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
-static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
-static int rt_garbage_collect(struct dst_ops *ops);
+static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int how)
@@ -158,54 +155,26 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
- u32 *p = NULL;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
-
- peer = rt->peer;
- if (peer) {
- u32 *old_p = __DST_METRICS_PTR(old);
- unsigned long prev, new;
-
- p = peer->metrics;
- if (inet_metrics_new(peer))
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
-
- new = (unsigned long) p;
- prev = cmpxchg(&dst->_metrics, old, new);
-
- if (prev != old) {
- p = __DST_METRICS_PTR(prev);
- if (prev & DST_METRICS_READ_ONLY)
- p = NULL;
- } else {
- if (rt->fi) {
- fib_info_put(rt->fi);
- rt->fi = NULL;
- }
- }
- }
- return p;
+ WARN_ON(1);
+ return NULL;
}
-static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
+static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
- .gc = rt_garbage_collect,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
- .destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
+ .redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
};
@@ -232,184 +201,30 @@ const __u8 ip_tos2prio[16] = {
};
EXPORT_SYMBOL(ip_tos2prio);
-/*
- * Route cache.
- */
-
-/* The locking scheme is rather straight forward:
- *
- * 1) Read-Copy Update protects the buckets of the central route hash.
- * 2) Only writers remove entries, and they hold the lock
- * as they look at rtable reference counts.
- * 3) Only readers acquire references to rtable entries,
- * they do so with atomic increments and with the
- * lock held.
- */
-
-struct rt_hash_bucket {
- struct rtable __rcu *chain;
-};
-
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
- defined(CONFIG_PROVE_LOCKING)
-/*
- * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
- * The size of this table is a power of two and depends on the number of CPUS.
- * (on lockdep we have a quite big spinlock_t, so keep the size down there)
- */
-#ifdef CONFIG_LOCKDEP
-# define RT_HASH_LOCK_SZ 256
-#else
-# if NR_CPUS >= 32
-# define RT_HASH_LOCK_SZ 4096
-# elif NR_CPUS >= 16
-# define RT_HASH_LOCK_SZ 2048
-# elif NR_CPUS >= 8
-# define RT_HASH_LOCK_SZ 1024
-# elif NR_CPUS >= 4
-# define RT_HASH_LOCK_SZ 512
-# else
-# define RT_HASH_LOCK_SZ 256
-# endif
-#endif
-
-static spinlock_t *rt_hash_locks;
-# define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
-
-static __init void rt_hash_lock_init(void)
-{
- int i;
-
- rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
- GFP_KERNEL);
- if (!rt_hash_locks)
- panic("IP: failed to allocate rt_hash_locks\n");
-
- for (i = 0; i < RT_HASH_LOCK_SZ; i++)
- spin_lock_init(&rt_hash_locks[i]);
-}
-#else
-# define rt_hash_lock_addr(slot) NULL
-
-static inline void rt_hash_lock_init(void)
-{
-}
-#endif
-
-static struct rt_hash_bucket *rt_hash_table __read_mostly;
-static unsigned int rt_hash_mask __read_mostly;
-static unsigned int rt_hash_log __read_mostly;
-
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
-static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
- int genid)
-{
- return jhash_3words((__force u32)daddr, (__force u32)saddr,
- idx, genid)
- & rt_hash_mask;
-}
-
static inline int rt_genid(struct net *net)
{
return atomic_read(&net->ipv4.rt_genid);
}
#ifdef CONFIG_PROC_FS
-struct rt_cache_iter_state {
- struct seq_net_private p;
- int bucket;
- int genid;
-};
-
-static struct rtable *rt_cache_get_first(struct seq_file *seq)
-{
- struct rt_cache_iter_state *st = seq->private;
- struct rtable *r = NULL;
-
- for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
- if (!rcu_access_pointer(rt_hash_table[st->bucket].chain))
- continue;
- rcu_read_lock_bh();
- r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
- while (r) {
- if (dev_net(r->dst.dev) == seq_file_net(seq) &&
- r->rt_genid == st->genid)
- return r;
- r = rcu_dereference_bh(r->dst.rt_next);
- }
- rcu_read_unlock_bh();
- }
- return r;
-}
-
-static struct rtable *__rt_cache_get_next(struct seq_file *seq,
- struct rtable *r)
-{
- struct rt_cache_iter_state *st = seq->private;
-
- r = rcu_dereference_bh(r->dst.rt_next);
- while (!r) {
- rcu_read_unlock_bh();
- do {
- if (--st->bucket < 0)
- return NULL;
- } while (!rcu_access_pointer(rt_hash_table[st->bucket].chain));
- rcu_read_lock_bh();
- r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
- }
- return r;
-}
-
-static struct rtable *rt_cache_get_next(struct seq_file *seq,
- struct rtable *r)
-{
- struct rt_cache_iter_state *st = seq->private;
- while ((r = __rt_cache_get_next(seq, r)) != NULL) {
- if (dev_net(r->dst.dev) != seq_file_net(seq))
- continue;
- if (r->rt_genid == st->genid)
- break;
- }
- return r;
-}
-
-static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
-{
- struct rtable *r = rt_cache_get_first(seq);
-
- if (r)
- while (pos && (r = rt_cache_get_next(seq, r)))
- --pos;
- return pos ? NULL : r;
-}
-
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct rt_cache_iter_state *st = seq->private;
if (*pos)
- return rt_cache_get_idx(seq, *pos - 1);
- st->genid = rt_genid(seq_file_net(seq));
+ return NULL;
return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct rtable *r;
-
- if (v == SEQ_START_TOKEN)
- r = rt_cache_get_first(seq);
- else
- r = rt_cache_get_next(seq, v);
++*pos;
- return r;
+ return NULL;
}
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
- if (v && v != SEQ_START_TOKEN)
- rcu_read_unlock_bh();
}
static int rt_cache_seq_show(struct seq_file *seq, void *v)
@@ -419,34 +234,6 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
- else {
- struct rtable *r = v;
- struct neighbour *n;
- int len, HHUptod;
-
- rcu_read_lock();
- n = dst_get_neighbour_noref(&r->dst);
- HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
- rcu_read_unlock();
-
- seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
- "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
- r->dst.dev ? r->dst.dev->name : "*",
- (__force u32)r->rt_dst,
- (__force u32)r->rt_gateway,
- r->rt_flags, atomic_read(&r->dst.__refcnt),
- r->dst.__use, 0, (__force u32)r->rt_src,
- dst_metric_advmss(&r->dst) + 40,
- dst_metric(&r->dst, RTAX_WINDOW),
- (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
- dst_metric(&r->dst, RTAX_RTTVAR)),
- r->rt_key_tos,
- -1,
- HHUptod,
- r->rt_spec_dst, &len);
-
- seq_printf(seq, "%*s\n", 127 - len, "");
- }
return 0;
}
@@ -459,8 +246,7 @@ static const struct seq_operations rt_cache_seq_ops = {
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &rt_cache_seq_ops,
- sizeof(struct rt_cache_iter_state));
+ return seq_open(file, &rt_cache_seq_ops);
}
static const struct file_operations rt_cache_seq_fops = {
@@ -468,7 +254,7 @@ static const struct file_operations rt_cache_seq_fops = {
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_net,
+ .release = seq_release,
};
@@ -658,275 +444,12 @@ static inline int ip_rt_proc_init(void)
}
#endif /* CONFIG_PROC_FS */
-static inline void rt_free(struct rtable *rt)
-{
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
-static inline void rt_drop(struct rtable *rt)
-{
- ip_rt_put(rt);
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
-static inline int rt_fast_clean(struct rtable *rth)
-{
- /* Kill broadcast/multicast entries very aggresively, if they
- collide in hash table with more useful entries */
- return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
- rt_is_input_route(rth) && rth->dst.rt_next;
-}
-
-static inline int rt_valuable(struct rtable *rth)
-{
- return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
- (rth->peer && rth->peer->pmtu_expires);
-}
-
-static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
-{
- unsigned long age;
- int ret = 0;
-
- if (atomic_read(&rth->dst.__refcnt))
- goto out;
-
- age = jiffies - rth->dst.lastuse;
- if ((age <= tmo1 && !rt_fast_clean(rth)) ||
- (age <= tmo2 && rt_valuable(rth)))
- goto out;
- ret = 1;
-out: return ret;
-}
-
-/* Bits of score are:
- * 31: very valuable
- * 30: not quite useless
- * 29..0: usage counter
- */
-static inline u32 rt_score(struct rtable *rt)
-{
- u32 score = jiffies - rt->dst.lastuse;
-
- score = ~score & ~(3<<30);
-
- if (rt_valuable(rt))
- score |= (1<<31);
-
- if (rt_is_output_route(rt) ||
- !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
- score |= (1<<30);
-
- return score;
-}
-
-static inline bool rt_caching(const struct net *net)
-{
- return net->ipv4.current_rt_cache_rebuild_count <=
- net->ipv4.sysctl_rt_cache_rebuild_count;
-}
-
-static inline bool compare_hash_inputs(const struct rtable *rt1,
- const struct rtable *rt2)
-{
- return ((((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
- ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
- (rt1->rt_route_iif ^ rt2->rt_route_iif)) == 0);
-}
-
-static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
-{
- return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
- ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
- (rt1->rt_mark ^ rt2->rt_mark) |
- (rt1->rt_key_tos ^ rt2->rt_key_tos) |
- (rt1->rt_route_iif ^ rt2->rt_route_iif) |
- (rt1->rt_oif ^ rt2->rt_oif)) == 0;
-}
-
-static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
-{
- return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
-}
-
static inline int rt_is_expired(struct rtable *rth)
{
return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
}
/*
- * Perform a full scan of hash table and free all entries.
- * Can be called by a softirq or a process.
- * In the later case, we want to be reschedule if necessary
- */
-static void rt_do_flush(struct net *net, int process_context)
-{
- unsigned int i;
- struct rtable *rth, *next;
-
- for (i = 0; i <= rt_hash_mask; i++) {
- struct rtable __rcu **pprev;
- struct rtable *list;
-
- if (process_context && need_resched())
- cond_resched();
- rth = rcu_access_pointer(rt_hash_table[i].chain);
- if (!rth)
- continue;
-
- spin_lock_bh(rt_hash_lock_addr(i));
-
- list = NULL;
- pprev = &rt_hash_table[i].chain;
- rth = rcu_dereference_protected(*pprev,
- lockdep_is_held(rt_hash_lock_addr(i)));
-
- while (rth) {
- next = rcu_dereference_protected(rth->dst.rt_next,
- lockdep_is_held(rt_hash_lock_addr(i)));
-
- if (!net ||
- net_eq(dev_net(rth->dst.dev), net)) {
- rcu_assign_pointer(*pprev, next);
- rcu_assign_pointer(rth->dst.rt_next, list);
- list = rth;
- } else {
- pprev = &rth->dst.rt_next;
- }
- rth = next;
- }
-
- spin_unlock_bh(rt_hash_lock_addr(i));
-
- for (; list; list = next) {
- next = rcu_dereference_protected(list->dst.rt_next, 1);
- rt_free(list);
- }
- }
-}
-
-/*
- * While freeing expired entries, we compute average chain length
- * and standard deviation, using fixed-point arithmetic.
- * This to have an estimation of rt_chain_length_max
- * rt_chain_length_max = max(elasticity, AVG + 4*SD)
- * We use 3 bits for frational part, and 29 (or 61) for magnitude.
- */
-
-#define FRACT_BITS 3
-#define ONE (1UL << FRACT_BITS)
-
-/*
- * Given a hash chain and an item in this hash chain,
- * find if a previous entry has the same hash_inputs
- * (but differs on tos, mark or oif)
- * Returns 0 if an alias is found.
- * Returns ONE if rth has no alias before itself.
- */
-static int has_noalias(const struct rtable *head, const struct rtable *rth)
-{
- const struct rtable *aux = head;
-
- while (aux != rth) {
- if (compare_hash_inputs(aux, rth))
- return 0;
- aux = rcu_dereference_protected(aux->dst.rt_next, 1);
- }
- return ONE;
-}
-
-static void rt_check_expire(void)
-{
- static unsigned int rover;
- unsigned int i = rover, goal;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long samples = 0;
- unsigned long sum = 0, sum2 = 0;
- unsigned long delta;
- u64 mult;
-
- delta = jiffies - expires_ljiffies;
- expires_ljiffies = jiffies;
- mult = ((u64)delta) << rt_hash_log;
- if (ip_rt_gc_timeout > 1)
- do_div(mult, ip_rt_gc_timeout);
- goal = (unsigned int)mult;
- if (goal > rt_hash_mask)
- goal = rt_hash_mask + 1;
- for (; goal > 0; goal--) {
- unsigned long tmo = ip_rt_gc_timeout;
- unsigned long length;
-
- i = (i + 1) & rt_hash_mask;
- rthp = &rt_hash_table[i].chain;
-
- if (need_resched())
- cond_resched();
-
- samples++;
-
- if (rcu_dereference_raw(*rthp) == NULL)
- continue;
- length = 0;
- spin_lock_bh(rt_hash_lock_addr(i));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
- prefetch(rth->dst.rt_next);
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (rth->dst.expires) {
- /* Entry is expired even if it is in use */
- if (time_before_eq(jiffies, rth->dst.expires)) {
-nofree:
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- /*
- * We only count entries on
- * a chain with equal hash inputs once
- * so that entries for different QOS
- * levels, and other non-hash input
- * attributes don't unfairly skew
- * the length computation
- */
- length += has_noalias(rt_hash_table[i].chain, rth);
- continue;
- }
- } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
- goto nofree;
-
- /* Cleanup aged off entries. */
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- }
- spin_unlock_bh(rt_hash_lock_addr(i));
- sum += length;
- sum2 += length*length;
- }
- if (samples) {
- unsigned long avg = sum / samples;
- unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
- rt_chain_length_max = max_t(unsigned long,
- ip_rt_gc_elasticity,
- (avg + 4*sd) >> FRACT_BITS);
- }
- rover = i;
-}
-
-/*
- * rt_worker_func() is run in process context.
- * we call rt_check_expire() to scan part of the hash table
- */
-static void rt_worker_func(struct work_struct *work)
-{
- rt_check_expire();
- schedule_delayed_work(&expires_work, ip_rt_gc_interval);
-}
-
-/*
* Perturbation of rt_genid by a small quantity [1..256]
* Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
* many times (2^24) without giving recent rt_genid.
@@ -938,7 +461,6 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
- inetpeer_invalidate_tree(AF_INET);
}
/*
@@ -948,183 +470,22 @@ static void rt_cache_invalidate(struct net *net)
void rt_cache_flush(struct net *net, int delay)
{
rt_cache_invalidate(net);
- if (delay >= 0)
- rt_do_flush(net, !in_softirq());
-}
-
-/* Flush previous cache invalidated entries from the cache */
-void rt_cache_flush_batch(struct net *net)
-{
- rt_do_flush(net, !in_softirq());
-}
-
-static void rt_emergency_hash_rebuild(struct net *net)
-{
- net_warn_ratelimited("Route hash chain too long!\n");
- rt_cache_invalidate(net);
-}
-
-/*
- Short description of GC goals.
-
- We want to build algorithm, which will keep routing cache
- at some equilibrium point, when number of aged off entries
- is kept approximately equal to newly generated ones.
-
- Current expiration strength is variable "expire".
- We try to adjust it dynamically, so that if networking
- is idle expires is large enough to keep enough of warm entries,
- and when load increases it reduces to limit cache size.
- */
-
-static int rt_garbage_collect(struct dst_ops *ops)
-{
- static unsigned long expire = RT_GC_TIMEOUT;
- static unsigned long last_gc;
- static int rover;
- static int equilibrium;
- struct rtable *rth;
- struct rtable __rcu **rthp;
- unsigned long now = jiffies;
- int goal;
- int entries = dst_entries_get_fast(&ipv4_dst_ops);
-
- /*
- * Garbage collection is pretty expensive,
- * do not make it too frequently.
- */
-
- RT_CACHE_STAT_INC(gc_total);
-
- if (now - last_gc < ip_rt_gc_min_interval &&
- entries < ip_rt_max_size) {
- RT_CACHE_STAT_INC(gc_ignored);
- goto out;
- }
-
- entries = dst_entries_get_slow(&ipv4_dst_ops);
- /* Calculate number of entries, which we want to expire now. */
- goal = entries - (ip_rt_gc_elasticity << rt_hash_log);
- if (goal <= 0) {
- if (equilibrium < ipv4_dst_ops.gc_thresh)
- equilibrium = ipv4_dst_ops.gc_thresh;
- goal = entries - equilibrium;
- if (goal > 0) {
- equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
- goal = entries - equilibrium;
- }
- } else {
- /* We are in dangerous area. Try to reduce cache really
- * aggressively.
- */
- goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
- equilibrium = entries - goal;
- }
-
- if (now - last_gc >= ip_rt_gc_min_interval)
- last_gc = now;
-
- if (goal <= 0) {
- equilibrium += goal;
- goto work_done;
- }
-
- do {
- int i, k;
-
- for (i = rt_hash_mask, k = rover; i >= 0; i--) {
- unsigned long tmo = expire;
-
- k = (k + 1) & rt_hash_mask;
- rthp = &rt_hash_table[k].chain;
- spin_lock_bh(rt_hash_lock_addr(k));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(k)))) != NULL) {
- if (!rt_is_expired(rth) &&
- !rt_may_expire(rth, tmo, expire)) {
- tmo >>= 1;
- rthp = &rth->dst.rt_next;
- continue;
- }
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- goal--;
- }
- spin_unlock_bh(rt_hash_lock_addr(k));
- if (goal <= 0)
- break;
- }
- rover = k;
-
- if (goal <= 0)
- goto work_done;
-
- /* Goal is not achieved. We stop process if:
-
- - if expire reduced to zero. Otherwise, expire is halfed.
- - if table is not full.
- - if we are called from interrupt.
- - jiffies check is just fallback/debug loop breaker.
- We will not spin here for long time in any case.
- */
-
- RT_CACHE_STAT_INC(gc_goal_miss);
-
- if (expire == 0)
- break;
-
- expire >>= 1;
-
- if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- } while (!in_softirq() && time_before_eq(jiffies, now));
-
- if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size)
- goto out;
- net_warn_ratelimited("dst cache overflow\n");
- RT_CACHE_STAT_INC(gc_dst_overflow);
- return 1;
-
-work_done:
- expire += ip_rt_gc_min_interval;
- if (expire > ip_rt_gc_timeout ||
- dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
- dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
- expire = ip_rt_gc_timeout;
-out: return 0;
-}
-
-/*
- * Returns number of entries in a hash chain that have different hash_inputs
- */
-static int slow_chain_length(const struct rtable *head)
-{
- int length = 0;
- const struct rtable *rth = head;
-
- while (rth) {
- length += has_noalias(head, rth);
- rth = rcu_dereference_protected(rth->dst.rt_next, 1);
- }
- return length >> FRACT_BITS;
}
-static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
- static const __be32 inaddr_any = 0;
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
-
- if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
- pkey = &inaddr_any;
- else if (rt->rt_gateway)
+ if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
+ else if (skb)
+ pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
@@ -1132,212 +493,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo
return neigh_create(&arp_tbl, pkey, dev);
}
-static int rt_bind_neighbour(struct rtable *rt)
-{
- struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
- dst_set_neighbour(&rt->dst, n);
-
- return 0;
-}
-
-static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
- struct sk_buff *skb, int ifindex)
-{
- struct rtable *rth, *cand;
- struct rtable __rcu **rthp, **candp;
- unsigned long now;
- u32 min_score;
- int chain_length;
- int attempts = !in_softirq();
-
-restart:
- chain_length = 0;
- min_score = ~(u32)0;
- cand = NULL;
- candp = NULL;
- now = jiffies;
-
- if (!rt_caching(dev_net(rt->dst.dev))) {
- /*
- * If we're not caching, just tell the caller we
- * were successful and don't touch the route. The
- * caller hold the sole reference to the cache entry, and
- * it will be released when the caller is done with it.
- * If we drop it here, the callers have no way to resolve routes
- * when we're not caching. Instead, just point *rp at rt, so
- * the caller gets a single use out of the route
- * Note that we do rt_free on this new route entry, so that
- * once its refcount hits zero, we are still able to reap it
- * (Thanks Alexey)
- * Note: To avoid expensive rcu stuff for this uncached dst,
- * we set DST_NOCACHE so that dst_release() can free dst without
- * waiting a grace period.
- */
-
- rt->dst.flags |= DST_NOCACHE;
- if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
- int err = rt_bind_neighbour(rt);
- if (err) {
- net_warn_ratelimited("Neighbour table failure & not caching routes\n");
- ip_rt_put(rt);
- return ERR_PTR(err);
- }
- }
-
- goto skip_hashing;
- }
-
- rthp = &rt_hash_table[hash].chain;
-
- spin_lock_bh(rt_hash_lock_addr(hash));
- while ((rth = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
- if (rt_is_expired(rth)) {
- *rthp = rth->dst.rt_next;
- rt_free(rth);
- continue;
- }
- if (compare_keys(rth, rt) && compare_netns(rth, rt)) {
- /* Put it first */
- *rthp = rth->dst.rt_next;
- /*
- * Since lookup is lockfree, the deletion
- * must be visible to another weakly ordered CPU before
- * the insertion at the start of the hash chain.
- */
- rcu_assign_pointer(rth->dst.rt_next,
- rt_hash_table[hash].chain);
- /*
- * Since lookup is lockfree, the update writes
- * must be ordered for consistency on SMP.
- */
- rcu_assign_pointer(rt_hash_table[hash].chain, rth);
-
- dst_use(&rth->dst, now);
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
- rt_drop(rt);
- if (skb)
- skb_dst_set(skb, &rth->dst);
- return rth;
- }
-
- if (!atomic_read(&rth->dst.__refcnt)) {
- u32 score = rt_score(rth);
-
- if (score <= min_score) {
- cand = rth;
- candp = rthp;
- min_score = score;
- }
- }
-
- chain_length++;
-
- rthp = &rth->dst.rt_next;
- }
-
- if (cand) {
- /* ip_rt_gc_elasticity used to be average length of chain
- * length, when exceeded gc becomes really aggressive.
- *
- * The second limit is less certain. At the moment it allows
- * only 2 entries per bucket. We will see.
- */
- if (chain_length > ip_rt_gc_elasticity) {
- *candp = cand->dst.rt_next;
- rt_free(cand);
- }
- } else {
- if (chain_length > rt_chain_length_max &&
- slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
- struct net *net = dev_net(rt->dst.dev);
- int num = ++net->ipv4.current_rt_cache_rebuild_count;
- if (!rt_caching(net)) {
- pr_warn("%s: %d rebuilds is over limit, route caching disabled\n",
- rt->dst.dev->name, num);
- }
- rt_emergency_hash_rebuild(net);
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
- hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
- ifindex, rt_genid(net));
- goto restart;
- }
- }
-
- /* Try to bind route to arp only if it is output
- route or unicast forwarding path.
- */
- if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
- int err = rt_bind_neighbour(rt);
- if (err) {
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
- if (err != -ENOBUFS) {
- rt_drop(rt);
- return ERR_PTR(err);
- }
-
- /* Neighbour tables are full and nothing
- can be released. Try to shrink route cache,
- it is most likely it holds some neighbour records.
- */
- if (attempts-- > 0) {
- int saved_elasticity = ip_rt_gc_elasticity;
- int saved_int = ip_rt_gc_min_interval;
- ip_rt_gc_elasticity = 1;
- ip_rt_gc_min_interval = 0;
- rt_garbage_collect(&ipv4_dst_ops);
- ip_rt_gc_min_interval = saved_int;
- ip_rt_gc_elasticity = saved_elasticity;
- goto restart;
- }
-
- net_warn_ratelimited("Neighbour table overflow\n");
- rt_drop(rt);
- return ERR_PTR(-ENOBUFS);
- }
- }
-
- rt->dst.rt_next = rt_hash_table[hash].chain;
-
- /*
- * Since lookup is lockfree, we must make sure
- * previous writes to rt are committed to memory
- * before making rt visible to other CPUS.
- */
- rcu_assign_pointer(rt_hash_table[hash].chain, rt);
-
- spin_unlock_bh(rt_hash_lock_addr(hash));
-
-skip_hashing:
- if (skb)
- skb_dst_set(skb, &rt->dst);
- return rt;
-}
-
-static atomic_t __rt_peer_genid = ATOMIC_INIT(0);
-
-static u32 rt_peer_genid(void)
-{
- return atomic_read(&__rt_peer_genid);
-}
-
-void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
-{
- struct inet_peer *peer;
-
- peer = inet_getpeer_v4(daddr, create);
-
- if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
- inet_putpeer(peer);
- else
- rt->rt_peer_genid = rt_peer_genid();
-}
-
/*
* Peer allocation may fail only in serious out-of-memory conditions. However
* we still can generate some output.
@@ -1360,83 +515,188 @@ static void ip_select_fb_ident(struct iphdr *iph)
void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt && !(rt->dst.flags & DST_NOPEER)) {
- if (rt->peer == NULL)
- rt_bind_peer(rt, rt->rt_dst, 1);
+ struct net *net = dev_net(dst->dev);
+ struct inet_peer *peer;
- /* If peer is attached to destination, it is never detached,
- so that we need not to grab a lock to dereference it.
- */
- if (rt->peer) {
- iph->id = htons(inet_getid(rt->peer, more));
- return;
- }
- } else if (!rt)
- pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0));
+ peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
+ if (peer) {
+ iph->id = htons(inet_getid(peer, more));
+ inet_putpeer(peer);
+ return;
+ }
ip_select_fb_ident(iph);
}
EXPORT_SYMBOL(__ip_select_ident);
-static void rt_del(unsigned int hash, struct rtable *rt)
+static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+ const struct iphdr *iph,
+ int oif, u8 tos,
+ u8 prot, u32 mark, int flow_flags)
{
- struct rtable __rcu **rthp;
- struct rtable *aux;
+ if (sk) {
+ const struct inet_sock *inet = inet_sk(sk);
- rthp = &rt_hash_table[hash].chain;
- spin_lock_bh(rt_hash_lock_addr(hash));
- ip_rt_put(rt);
- while ((aux = rcu_dereference_protected(*rthp,
- lockdep_is_held(rt_hash_lock_addr(hash)))) != NULL) {
- if (aux == rt || rt_is_expired(aux)) {
- *rthp = aux->dst.rt_next;
- rt_free(aux);
- continue;
- }
- rthp = &aux->dst.rt_next;
+ oif = sk->sk_bound_dev_if;
+ mark = sk->sk_mark;
+ tos = RT_CONN_FLAGS(sk);
+ prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
- spin_unlock_bh(rt_hash_lock_addr(hash));
+ flowi4_init_output(fl4, oif, mark, tos,
+ RT_SCOPE_UNIVERSE, prot,
+ flow_flags,
+ iph->daddr, iph->saddr, 0, 0);
}
-static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
+ const struct sock *sk)
{
- struct rtable *rt = (struct rtable *) dst;
- __be32 orig_gw = rt->rt_gateway;
- struct neighbour *n, *old_n;
+ const struct iphdr *iph = ip_hdr(skb);
+ int oif = skb->dev->ifindex;
+ u8 tos = RT_TOS(iph->tos);
+ u8 prot = iph->protocol;
+ u32 mark = skb->mark;
- dst_confirm(&rt->dst);
+ __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
+}
- rt->rt_gateway = peer->redirect_learned.a4;
+static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+{
+ const struct inet_sock *inet = inet_sk(sk);
+ const struct ip_options_rcu *inet_opt;
+ __be32 daddr = inet->inet_daddr;
- n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n)) {
- rt->rt_gateway = orig_gw;
- return;
+ rcu_read_lock();
+ inet_opt = rcu_dereference(inet->inet_opt);
+ if (inet_opt && inet_opt->opt.srr)
+ daddr = inet_opt->opt.faddr;
+ flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
+ RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
+ inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+ inet_sk_flowi_flags(sk),
+ daddr, inet->inet_saddr, 0, 0);
+ rcu_read_unlock();
+}
+
+static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ if (skb)
+ build_skb_flow_key(fl4, skb, sk);
+ else
+ build_sk_flow_key(fl4, sk);
+}
+
+static DEFINE_SEQLOCK(fnhe_seqlock);
+
+static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
+{
+ struct fib_nh_exception *fnhe, *oldest;
+
+ oldest = rcu_dereference(hash->chain);
+ for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
+ oldest = fnhe;
}
- old_n = xchg(&rt->dst._neighbour, n);
- if (old_n)
- neigh_release(old_n);
- if (!(n->nud_state & NUD_VALID)) {
- neigh_event_send(n, NULL);
+ return oldest;
+}
+
+static inline u32 fnhe_hashfun(__be32 daddr)
+{
+ u32 hval;
+
+ hval = (__force u32) daddr;
+ hval ^= (hval >> 11) ^ (hval >> 22);
+
+ return hval & (FNHE_HASH_SIZE - 1);
+}
+
+static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
+ u32 pmtu, unsigned long expires)
+{
+ struct fnhe_hash_bucket *hash;
+ struct fib_nh_exception *fnhe;
+ int depth;
+ u32 hval = fnhe_hashfun(daddr);
+
+ write_seqlock_bh(&fnhe_seqlock);
+
+ hash = nh->nh_exceptions;
+ if (!hash) {
+ hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
+ if (!hash)
+ goto out_unlock;
+ nh->nh_exceptions = hash;
+ }
+
+ hash += hval;
+
+ depth = 0;
+ for (fnhe = rcu_dereference(hash->chain); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (fnhe->fnhe_daddr == daddr)
+ break;
+ depth++;
+ }
+
+ if (fnhe) {
+ if (gw)
+ fnhe->fnhe_gw = gw;
+ if (pmtu) {
+ fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_expires = expires;
+ }
} else {
- rt->rt_flags |= RTCF_REDIRECTED;
- call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
+ if (depth > FNHE_RECLAIM_DEPTH)
+ fnhe = fnhe_oldest(hash);
+ else {
+ fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
+ if (!fnhe)
+ goto out_unlock;
+
+ fnhe->fnhe_next = hash->chain;
+ rcu_assign_pointer(hash->chain, fnhe);
+ }
+ fnhe->fnhe_daddr = daddr;
+ fnhe->fnhe_gw = gw;
+ fnhe->fnhe_pmtu = pmtu;
+ fnhe->fnhe_expires = expires;
}
+
+ fnhe->fnhe_stamp = jiffies;
+
+out_unlock:
+ write_sequnlock_bh(&fnhe_seqlock);
+ return;
}
-/* called in rcu_read_lock() section */
-void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
- __be32 saddr, struct net_device *dev)
+static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
+ bool kill_route)
{
- int s, i;
- struct in_device *in_dev = __in_dev_get_rcu(dev);
- __be32 skeys[2] = { saddr, 0 };
- int ikeys[2] = { dev->ifindex, 0 };
- struct inet_peer *peer;
+ __be32 new_gw = icmp_hdr(skb)->un.gateway;
+ __be32 old_gw = ip_hdr(skb)->saddr;
+ struct net_device *dev = skb->dev;
+ struct in_device *in_dev;
+ struct fib_result res;
+ struct neighbour *n;
struct net *net;
+ switch (icmp_hdr(skb)->code & 7) {
+ case ICMP_REDIR_NET:
+ case ICMP_REDIR_NETTOS:
+ case ICMP_REDIR_HOST:
+ case ICMP_REDIR_HOSTTOS:
+ break;
+
+ default:
+ return;
+ }
+
+ if (rt->rt_gateway != old_gw)
+ return;
+
+ in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
@@ -1456,72 +716,50 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
goto reject_redirect;
}
- for (s = 0; s < 2; s++) {
- for (i = 0; i < 2; i++) {
- unsigned int hash;
- struct rtable __rcu **rthp;
- struct rtable *rt;
-
- hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
-
- rthp = &rt_hash_table[hash].chain;
-
- while ((rt = rcu_dereference(*rthp)) != NULL) {
- rthp = &rt->dst.rt_next;
-
- if (rt->rt_key_dst != daddr ||
- rt->rt_key_src != skeys[s] ||
- rt->rt_oif != ikeys[i] ||
- rt_is_input_route(rt) ||
- rt_is_expired(rt) ||
- !net_eq(dev_net(rt->dst.dev), net) ||
- rt->dst.error ||
- rt->dst.dev != dev ||
- rt->rt_gateway != old_gw)
- continue;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
-
- peer = rt->peer;
- if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
- peer->redirect_learned.a4 = new_gw;
- atomic_inc(&__rt_peer_genid);
- }
- check_peer_redir(&rt->dst, peer);
- }
+ n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
+ if (n) {
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
+ } else {
+ if (fib_lookup(net, fl4, &res) == 0) {
+ struct fib_nh *nh = &FIB_RES_NH(res);
+
+ update_or_create_fnhe(nh, fl4->daddr, new_gw,
+ 0, 0);
}
+ if (kill_route)
+ rt->dst.obsolete = DST_OBSOLETE_KILL;
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
+ neigh_release(n);
}
return;
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev))
+ if (IN_DEV_LOG_MARTIANS(in_dev)) {
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ __be32 daddr = iph->daddr;
+ __be32 saddr = iph->saddr;
+
net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
+ }
#endif
;
}
-static bool peer_pmtu_expired(struct inet_peer *peer)
+static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
+ struct rtable *rt;
+ struct flowi4 fl4;
- return orig &&
- time_after_eq(jiffies, orig) &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
-}
+ rt = (struct rtable *) dst;
-static bool peer_pmtu_cleaned(struct inet_peer *peer)
-{
- unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
-
- return orig &&
- cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
+ ip_rt_build_flow_key(&fl4, sk, skb);
+ __ip_do_redirect(rt, skb, &fl4, true);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1533,14 +771,10 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
- } else if (rt->rt_flags & RTCF_REDIRECTED) {
- unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
- rt->rt_oif,
- rt_genid(dev_net(dst->dev)));
- rt_del(hash, rt);
+ } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
+ rt->dst.expires) {
+ ip_rt_put(rt);
ret = NULL;
- } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
- dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
}
}
return ret;
@@ -1567,6 +801,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
+ struct net *net;
int log_martians;
rcu_read_lock();
@@ -1578,9 +813,8 @@ void ip_rt_send_redirect(struct sk_buff *skb)
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
rcu_read_unlock();
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
+ net = dev_net(rt->dst.dev);
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
return;
@@ -1597,7 +831,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
*/
if (peer->rate_tokens >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
- return;
+ goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
@@ -1614,20 +848,38 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
- &ip_hdr(skb)->saddr, rt->rt_iif,
- &rt->rt_dst, &rt->rt_gateway);
+ &ip_hdr(skb)->saddr, inet_iif(skb),
+ &ip_hdr(skb)->daddr, &rt->rt_gateway);
#endif
}
+out_put_peer:
+ inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
{
+ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
struct rtable *rt = skb_rtable(skb);
struct inet_peer *peer;
unsigned long now;
+ struct net *net;
bool send;
int code;
+ net = dev_net(rt->dst.dev);
+ if (!IN_DEV_FORWARD(in_dev)) {
+ switch (rt->dst.error) {
+ case EHOSTUNREACH:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
+ break;
+
+ case ENETUNREACH:
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
+ break;
+ }
+ goto out;
+ }
+
switch (rt->dst.error) {
case EINVAL:
default:
@@ -1637,17 +889,14 @@ static int ip_error(struct sk_buff *skb)
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
- IP_INC_STATS_BH(dev_net(rt->dst.dev),
- IPSTATS_MIB_INNOROUTES);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
break;
}
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
+ peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
send = true;
if (peer) {
@@ -1660,6 +909,7 @@ static int ip_error(struct sk_buff *skb)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
+ inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
@@ -1668,163 +918,120 @@ out: kfree_skb(skb);
return 0;
}
-/*
- * The last two values are not from the RFC but
- * are needed for AMPRnet AX.25 paths.
- */
+static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
+{
+ struct fib_result res;
-static const unsigned short mtu_plateau[] =
-{32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
+ if (mtu < ip_rt_min_pmtu)
+ mtu = ip_rt_min_pmtu;
-static inline unsigned short guess_mtu(unsigned short old_mtu)
-{
- int i;
+ if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
+ struct fib_nh *nh = &FIB_RES_NH(res);
- for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
- if (old_mtu > mtu_plateau[i])
- return mtu_plateau[i];
- return 68;
+ update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
+ jiffies + ip_rt_mtu_expires);
+ }
+ return mtu;
}
-unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
- unsigned short new_mtu,
- struct net_device *dev)
+static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
- unsigned short old_mtu = ntohs(iph->tot_len);
- unsigned short est_mtu = 0;
- struct inet_peer *peer;
-
- peer = inet_getpeer_v4(iph->daddr, 1);
- if (peer) {
- unsigned short mtu = new_mtu;
-
- if (new_mtu < 68 || new_mtu >= old_mtu) {
- /* BSD 4.2 derived systems incorrectly adjust
- * tot_len by the IP header length, and report
- * a zero MTU in the ICMP message.
- */
- if (mtu == 0 &&
- old_mtu >= 68 + (iph->ihl << 2))
- old_mtu -= iph->ihl << 2;
- mtu = guess_mtu(old_mtu);
- }
-
- if (mtu < ip_rt_min_pmtu)
- mtu = ip_rt_min_pmtu;
- if (!peer->pmtu_expires || mtu < peer->pmtu_learned) {
- unsigned long pmtu_expires;
-
- pmtu_expires = jiffies + ip_rt_mtu_expires;
- if (!pmtu_expires)
- pmtu_expires = 1UL;
+ struct rtable *rt = (struct rtable *) dst;
+ struct flowi4 fl4;
- est_mtu = mtu;
- peer->pmtu_learned = mtu;
- peer->pmtu_expires = pmtu_expires;
- atomic_inc(&__rt_peer_genid);
- }
+ ip_rt_build_flow_key(&fl4, sk, skb);
+ mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
- inet_putpeer(peer);
+ if (!rt->rt_pmtu) {
+ dst->obsolete = DST_OBSOLETE_KILL;
+ } else {
+ rt->rt_pmtu = mtu;
+ dst_set_expires(&rt->dst, ip_rt_mtu_expires);
}
- return est_mtu ? : new_mtu;
}
-static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
+void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
+ int oif, u32 mark, u8 protocol, int flow_flags)
{
- unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- if (!expires)
- return;
- if (time_before(jiffies, expires)) {
- u32 orig_dst_mtu = dst_mtu(dst);
- if (peer->pmtu_learned < orig_dst_mtu) {
- if (!peer->pmtu_orig)
- peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU);
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned);
- }
- } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires)
- dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig);
+ __build_flow_key(&fl4, NULL, iph, oif,
+ RT_TOS(iph->tos), protocol, mark, flow_flags);
+ rt = __ip_route_output_key(net, &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_rt_update_pmtu(rt, &fl4, mtu);
+ ip_rt_put(rt);
+ }
}
+EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
-static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
+void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer;
-
- dst_confirm(dst);
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 1);
- peer = rt->peer;
- if (peer) {
- unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
-
- if (mtu < ip_rt_min_pmtu)
- mtu = ip_rt_min_pmtu;
- if (!pmtu_expires || mtu < peer->pmtu_learned) {
-
- pmtu_expires = jiffies + ip_rt_mtu_expires;
- if (!pmtu_expires)
- pmtu_expires = 1UL;
-
- peer->pmtu_learned = mtu;
- peer->pmtu_expires = pmtu_expires;
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- atomic_inc(&__rt_peer_genid);
- rt->rt_peer_genid = rt_peer_genid();
- }
- check_peer_pmtu(dst, peer);
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ rt = __ip_route_output_key(sock_net(sk), &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_rt_update_pmtu(rt, &fl4, mtu);
+ ip_rt_put(rt);
}
}
+EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
-
-static void ipv4_validate_peer(struct rtable *rt)
+void ipv4_redirect(struct sk_buff *skb, struct net *net,
+ int oif, u32 mark, u8 protocol, int flow_flags)
{
- if (rt->rt_peer_genid != rt_peer_genid()) {
- struct inet_peer *peer;
-
- if (!rt->peer)
- rt_bind_peer(rt, rt->rt_dst, 0);
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- peer = rt->peer;
- if (peer) {
- check_peer_pmtu(&rt->dst, peer);
+ __build_flow_key(&fl4, NULL, iph, oif,
+ RT_TOS(iph->tos), protocol, mark, flow_flags);
+ rt = __ip_route_output_key(net, &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_do_redirect(rt, skb, &fl4, false);
+ ip_rt_put(rt);
+ }
+}
+EXPORT_SYMBOL_GPL(ipv4_redirect);
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway)
- check_peer_redir(&rt->dst, peer);
- }
+void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
- rt->rt_peer_genid = rt_peer_genid();
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+ rt = __ip_route_output_key(sock_net(sk), &fl4);
+ if (!IS_ERR(rt)) {
+ __ip_do_redirect(rt, skb, &fl4, false);
+ ip_rt_put(rt);
}
}
+EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
- if (rt_is_expired(rt))
+ /* All IPV4 dsts are created with ->obsolete set to the value
+ * DST_OBSOLETE_FORCE_CHK which forces validation calls down
+ * into this function always.
+ *
+ * When a PMTU/redirect information update invalidates a
+ * route, this is indicated by setting obsolete to
+ * DST_OBSOLETE_KILL.
+ */
+ if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
return NULL;
- ipv4_validate_peer(rt);
return dst;
}
-static void ipv4_dst_destroy(struct dst_entry *dst)
-{
- struct rtable *rt = (struct rtable *) dst;
- struct inet_peer *peer = rt->peer;
-
- if (rt->fi) {
- fib_info_put(rt->fi);
- rt->fi = NULL;
- }
- if (peer) {
- rt->peer = NULL;
- inet_putpeer(peer);
- }
-}
-
-
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
@@ -1832,8 +1039,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
- if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
- dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
+ if (rt)
+ dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct sk_buff *skb)
@@ -1880,8 +1087,9 @@ void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
else
- src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
- RT_SCOPE_UNIVERSE);
+ src = inet_select_addr(rt->dst.dev,
+ rt_nexthop(rt, iph->daddr),
+ RT_SCOPE_UNIVERSE);
rcu_read_unlock();
}
memcpy(addr, &src, 4);
@@ -1913,7 +1121,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *) dst;
- unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+ unsigned int mtu = rt->rt_pmtu;
+
+ if (mtu && time_after_eq(jiffies, rt->dst.expires))
+ mtu = 0;
+
+ if (!mtu)
+ mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu && rt_is_output_route(rt))
return mtu;
@@ -1921,8 +1135,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
-
- if (rt->rt_gateway != rt->rt_dst && mtu > 576)
+ if (rt->rt_gateway && mtu > 576)
mtu = 576;
}
@@ -1932,76 +1145,121 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
return mtu;
}
-static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
- struct fib_info *fi)
+static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
{
- struct inet_peer *peer;
- int create = 0;
+ struct fnhe_hash_bucket *hash = nh->nh_exceptions;
+ struct fib_nh_exception *fnhe;
+ u32 hval;
- /* If a peer entry exists for this destination, we must hook
- * it up in order to get at cached metrics.
- */
- if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
- create = 1;
+ if (!hash)
+ return NULL;
- rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
- if (peer) {
- rt->rt_peer_genid = rt_peer_genid();
- if (inet_metrics_new(peer))
- memcpy(peer->metrics, fi->fib_metrics,
- sizeof(u32) * RTAX_MAX);
- dst_init_metrics(&rt->dst, peer->metrics, false);
-
- check_peer_pmtu(&rt->dst, peer);
-
- if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- rt->rt_gateway = peer->redirect_learned.a4;
- rt->rt_flags |= RTCF_REDIRECTED;
- }
- } else {
- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
- rt->fi = fi;
- atomic_inc(&fi->fib_clntref);
+ hval = fnhe_hashfun(daddr);
+
+ for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
+ fnhe = rcu_dereference(fnhe->fnhe_next)) {
+ if (fnhe->fnhe_daddr == daddr)
+ return fnhe;
+ }
+ return NULL;
+}
+
+static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
+ __be32 daddr)
+{
+ __be32 fnhe_daddr, gw;
+ unsigned long expires;
+ unsigned int seq;
+ u32 pmtu;
+
+restart:
+ seq = read_seqbegin(&fnhe_seqlock);
+ fnhe_daddr = fnhe->fnhe_daddr;
+ gw = fnhe->fnhe_gw;
+ pmtu = fnhe->fnhe_pmtu;
+ expires = fnhe->fnhe_expires;
+ if (read_seqretry(&fnhe_seqlock, seq))
+ goto restart;
+
+ if (daddr != fnhe_daddr)
+ return;
+
+ if (pmtu) {
+ unsigned long diff = expires - jiffies;
+
+ if (time_before(jiffies, expires)) {
+ rt->rt_pmtu = pmtu;
+ dst_set_expires(&rt->dst, diff);
}
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
}
+ if (gw) {
+ rt->rt_flags |= RTCF_REDIRECTED;
+ rt->rt_gateway = gw;
+ }
+ fnhe->fnhe_stamp = jiffies;
+}
+
+static inline void rt_release_rcu(struct rcu_head *head)
+{
+ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
+ dst_release(dst);
+}
+
+static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
+{
+ struct rtable *orig, *prev, **p = &nh->nh_rth_output;
+
+ if (rt_is_input_route(rt))
+ p = &nh->nh_rth_input;
+
+ orig = *p;
+
+ prev = cmpxchg(p, orig, rt);
+ if (prev == orig) {
+ dst_clone(&rt->dst);
+ if (orig)
+ call_rcu_bh(&orig->dst.rcu_head, rt_release_rcu);
+ }
+}
+
+static bool rt_cache_valid(struct rtable *rt)
+{
+ return (rt && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK);
}
-static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
+static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
+ struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag)
{
- struct dst_entry *dst = &rt->dst;
-
if (fi) {
- if (FIB_RES_GW(*res) &&
- FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
- rt->rt_gateway = FIB_RES_GW(*res);
- rt_init_metrics(rt, fl4, fi);
+ struct fib_nh *nh = &FIB_RES_NH(*res);
+
+ if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
+ rt->rt_gateway = nh->nh_gw;
+ if (unlikely(fnhe))
+ rt_bind_exception(rt, fnhe, daddr);
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
#ifdef CONFIG_IP_ROUTE_CLASSID
- dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
+ rt->dst.tclassid = nh->nh_tclassid;
#endif
+ if (!(rt->dst.flags & DST_HOST))
+ rt_cache_route(nh, rt);
}
- if (dst_mtu(dst) > IP_MAX_MTU)
- dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
- if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
- dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
-
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
- set_class_tag(rt, fib_rules_tclass(res));
+ set_class_tag(rt, res->tclassid);
#endif
set_class_tag(rt, itag);
#endif
}
static struct rtable *rt_dst_alloc(struct net_device *dev,
- bool nopolicy, bool noxfrm)
+ bool nopolicy, bool noxfrm, bool will_cache)
{
- return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
- DST_HOST |
+ return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
+ (will_cache ? 0 : DST_HOST) | DST_NOCACHE |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
}
@@ -2010,9 +1268,7 @@ static struct rtable *rt_dst_alloc(struct net_device *dev,
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
- unsigned int hash;
struct rtable *rth;
- __be32 spec_dst;
struct in_device *in_dev = __in_dev_get_rcu(dev);
u32 itag = 0;
int err;
@@ -2023,21 +1279,24 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
- ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
+ skb->protocol != htons(ETH_P_IP))
goto e_inval;
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
+ if (ipv4_is_loopback(saddr))
+ goto e_inval;
+
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr))
goto e_inval;
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
} else {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
- &itag);
+ err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
+ in_dev, &itag);
if (err < 0)
goto e_err;
}
rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
goto e_nobufs;
@@ -2046,23 +1305,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
rth->dst.output = ip_rt_bug;
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
- rth->rt_route_iif = dev->ifindex;
- rth->rt_iif = dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input= 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
if (our) {
rth->dst.input= ip_local_deliver;
rth->rt_flags |= RTCF_LOCAL;
@@ -2074,9 +1323,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
RT_CACHE_STAT_INC(in_slow_mc);
- hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
- rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
- return IS_ERR(rth) ? PTR_ERR(rth) : 0;
+ skb_dst_set(skb, &rth->dst);
+ return 0;
e_nobufs:
return -ENOBUFS;
@@ -2123,7 +1371,7 @@ static int __mkroute_input(struct sk_buff *skb,
int err;
struct in_device *out_dev;
unsigned int flags = 0;
- __be32 spec_dst;
+ bool do_cache;
u32 itag;
/* get a working reference to the output device */
@@ -2135,7 +1383,7 @@ static int __mkroute_input(struct sk_buff *skb,
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
- in_dev->dev, &spec_dst, &itag);
+ in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
@@ -2143,9 +1391,6 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- if (err)
- flags |= RTCF_DIRECTSRC;
-
if (out_dev == in_dev && err &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
@@ -2166,37 +1411,39 @@ static int __mkroute_input(struct sk_buff *skb,
}
}
+ do_cache = false;
+ if (res->fi) {
+ if (!itag) {
+ rth = FIB_RES_NH(*res).nh_rth_input;
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ goto out;
+ }
+ do_cache = true;
+ }
+ }
+
rth = rt_dst_alloc(out_dev->dev,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(out_dev, NOXFRM));
+ IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
- rth->rt_route_iif = in_dev->dev->ifindex;
- rth->rt_iif = in_dev->dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
- rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
-
+ rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
+out:
*result = rth;
err = 0;
cleanup:
@@ -2211,7 +1458,6 @@ static int ip_mkroute_input(struct sk_buff *skb,
{
struct rtable *rth = NULL;
int err;
- unsigned int hash;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1)
@@ -2223,12 +1469,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
if (err)
return err;
- /* put it into the cache */
- hash = rt_hash(daddr, saddr, fl4->flowi4_iif,
- rt_genid(dev_net(rth->dst.dev)));
- rth = rt_intern_hash(hash, rth, skb, fl4->flowi4_iif);
- if (IS_ERR(rth))
- return PTR_ERR(rth);
+ skb_dst_set(skb, &rth->dst);
return 0;
}
@@ -2252,10 +1493,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
unsigned int flags = 0;
u32 itag = 0;
struct rtable *rth;
- unsigned int hash;
- __be32 spec_dst;
int err = -EINVAL;
struct net *net = dev_net(dev);
+ bool do_cache;
/* IP on this device is disabled. */
@@ -2266,10 +1506,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
by fib_lookup.
*/
- if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
- ipv4_is_loopback(saddr))
+ if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
+ res.fi = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
@@ -2279,9 +1519,17 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (ipv4_is_zeronet(saddr))
goto martian_source;
- if (ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr))
+ if (ipv4_is_zeronet(daddr))
goto martian_destination;
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
+ if (ipv4_is_loopback(daddr))
+ goto martian_destination;
+
+ if (ipv4_is_loopback(saddr))
+ goto martian_source;
+ }
+
/*
* Now we are ready to route packet.
*/
@@ -2293,11 +1541,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res);
- if (err != 0) {
- if (!IN_DEV_FORWARD(in_dev))
- goto e_hostunreach;
+ if (err != 0)
goto no_route;
- }
RT_CACHE_STAT_INC(in_slow_tot);
@@ -2307,17 +1552,14 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res.type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
net->loopback_dev->ifindex,
- dev, &spec_dst, &itag);
+ dev, in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
- if (err)
- flags |= RTCF_DIRECTSRC;
- spec_dst = daddr;
goto local_input;
}
if (!IN_DEV_FORWARD(in_dev))
- goto e_hostunreach;
+ goto no_route;
if (res.type != RTN_UNICAST)
goto martian_destination;
@@ -2328,23 +1570,31 @@ brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
- if (ipv4_is_zeronet(saddr))
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
- else {
- err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
- &itag);
+ if (!ipv4_is_zeronet(saddr)) {
+ err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
+ in_dev, &itag);
if (err < 0)
goto martian_source_keep_err;
- if (err)
- flags |= RTCF_DIRECTSRC;
}
flags |= RTCF_BROADCAST;
res.type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
+ do_cache = false;
+ if (res.fi) {
+ if (!itag) {
+ rth = FIB_RES_NH(res).nh_rth_input;
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ goto set_and_out;
+ }
+ do_cache = true;
+ }
+ }
+
rth = rt_dst_alloc(net->loopback_dev,
- IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
+ IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
@@ -2354,41 +1604,27 @@ local_input:
rth->dst.tclassid = itag;
#endif
- rth->rt_key_dst = daddr;
- rth->rt_key_src = saddr;
rth->rt_genid = rt_genid(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
- rth->rt_key_tos = tos;
- rth->rt_dst = daddr;
- rth->rt_src = saddr;
-#ifdef CONFIG_IP_ROUTE_CLASSID
- rth->dst.tclassid = itag;
-#endif
- rth->rt_route_iif = dev->ifindex;
- rth->rt_iif = dev->ifindex;
- rth->rt_oif = 0;
- rth->rt_mark = skb->mark;
- rth->rt_gateway = daddr;
- rth->rt_spec_dst= spec_dst;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 1;
+ rth->rt_iif = 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
- hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
- rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
+ if (do_cache)
+ rt_cache_route(&FIB_RES_NH(res), rth);
+set_and_out:
+ skb_dst_set(skb, &rth->dst);
err = 0;
- if (IS_ERR(rth))
- err = PTR_ERR(rth);
goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
- spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
res.type = RTN_UNREACHABLE;
if (err == -ESRCH)
err = -ENETUNREACH;
@@ -2405,10 +1641,6 @@ martian_destination:
&daddr, &saddr, dev->name);
#endif
-e_hostunreach:
- err = -EHOSTUNREACH;
- goto out;
-
e_inval:
err = -EINVAL;
goto out;
@@ -2424,50 +1656,13 @@ martian_source_keep_err:
goto out;
}
-int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev, bool noref)
+int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev)
{
- struct rtable *rth;
- unsigned int hash;
- int iif = dev->ifindex;
- struct net *net;
int res;
- net = dev_net(dev);
-
rcu_read_lock();
- if (!rt_caching(net))
- goto skip_cache;
-
- tos &= IPTOS_RT_MASK;
- hash = rt_hash(daddr, saddr, iif, rt_genid(net));
-
- for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference(rth->dst.rt_next)) {
- if ((((__force u32)rth->rt_key_dst ^ (__force u32)daddr) |
- ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
- (rth->rt_route_iif ^ iif) |
- (rth->rt_key_tos ^ tos)) == 0 &&
- rth->rt_mark == skb->mark &&
- net_eq(dev_net(rth->dst.dev), net) &&
- !rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
- if (noref) {
- dst_use_noref(&rth->dst, jiffies);
- skb_dst_set_noref(skb, &rth->dst);
- } else {
- dst_use(&rth->dst, jiffies);
- skb_dst_set(skb, &rth->dst);
- }
- RT_CACHE_STAT_INC(in_hit);
- rcu_read_unlock();
- return 0;
- }
- RT_CACHE_STAT_INC(in_hlist_search);
- }
-
-skip_cache:
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2505,24 +1700,28 @@ skip_cache:
rcu_read_unlock();
return res;
}
-EXPORT_SYMBOL(ip_route_input_common);
+EXPORT_SYMBOL(ip_route_input);
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
- const struct flowi4 *fl4,
- __be32 orig_daddr, __be32 orig_saddr,
- int orig_oif, __u8 orig_rtos,
+ const struct flowi4 *fl4, int orig_oif,
struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
+ struct fib_nh_exception *fnhe;
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
- if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ in_dev = __in_dev_get_rcu(dev_out);
+ if (!in_dev)
return ERR_PTR(-EINVAL);
+ if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
+ if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
+ return ERR_PTR(-EINVAL);
+
if (ipv4_is_lbcast(fl4->daddr))
type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl4->daddr))
@@ -2533,10 +1732,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- in_dev = __in_dev_get_rcu(dev_out);
- if (!in_dev)
- return ERR_PTR(-EINVAL);
-
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
@@ -2553,40 +1748,39 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
fi = NULL;
}
+ fnhe = NULL;
+ if (fi) {
+ fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
+ if (!fnhe) {
+ rth = FIB_RES_NH(*res).nh_rth_output;
+ if (rt_cache_valid(rth)) {
+ dst_hold(&rth->dst);
+ return rth;
+ }
+ }
+ }
rth = rt_dst_alloc(dev_out,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
- IN_DEV_CONF_GET(in_dev, NOXFRM));
+ IN_DEV_CONF_GET(in_dev, NOXFRM),
+ fi && !fnhe);
if (!rth)
return ERR_PTR(-ENOBUFS);
rth->dst.output = ip_output;
- rth->rt_key_dst = orig_daddr;
- rth->rt_key_src = orig_saddr;
rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
- rth->rt_key_tos = orig_rtos;
- rth->rt_dst = fl4->daddr;
- rth->rt_src = fl4->saddr;
- rth->rt_route_iif = 0;
- rth->rt_iif = orig_oif ? : dev_out->ifindex;
- rth->rt_oif = orig_oif;
- rth->rt_mark = fl4->flowi4_mark;
- rth->rt_gateway = fl4->daddr;
- rth->rt_spec_dst= fl4->saddr;
- rth->rt_peer_genid = 0;
- rth->peer = NULL;
- rth->fi = NULL;
+ rth->rt_is_input = 0;
+ rth->rt_iif = orig_oif ? : 0;
+ rth->rt_pmtu = 0;
+ rth->rt_gateway = 0;
RT_CACHE_STAT_INC(out_slow_tot);
- if (flags & RTCF_LOCAL) {
+ if (flags & RTCF_LOCAL)
rth->dst.input = ip_local_deliver;
- rth->rt_spec_dst = fl4->daddr;
- }
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
- rth->rt_spec_dst = fl4->saddr;
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->dst.output = ip_mc_output;
@@ -2603,34 +1797,28 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
#endif
}
- rt_set_nexthop(rth, fl4, res, fi, type, 0);
+ rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
return rth;
}
/*
* Major route resolver routine.
- * called with rcu_read_lock();
*/
-static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
+struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
__u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
- __be32 orig_daddr;
- __be32 orig_saddr;
int orig_oif;
+ res.tclassid = 0;
res.fi = NULL;
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
+ res.table = NULL;
- orig_daddr = fl4->daddr;
- orig_saddr = fl4->saddr;
orig_oif = fl4->flowi4_oif;
fl4->flowi4_iif = net->loopback_dev->ifindex;
@@ -2730,6 +1918,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
if (fib_lookup(net, fl4, &res)) {
res.fi = NULL;
+ res.table = NULL;
if (fl4->flowi4_oif) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2791,60 +1980,12 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
make_route:
- rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
- tos, dev_out, flags);
- if (!IS_ERR(rth)) {
- unsigned int hash;
-
- hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
- rt_genid(dev_net(dev_out)));
- rth = rt_intern_hash(hash, rth, NULL, orig_oif);
- }
+ rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
out:
rcu_read_unlock();
return rth;
}
-
-struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
-{
- struct rtable *rth;
- unsigned int hash;
-
- if (!rt_caching(net))
- goto slow_output;
-
- hash = rt_hash(flp4->daddr, flp4->saddr, flp4->flowi4_oif, rt_genid(net));
-
- rcu_read_lock_bh();
- for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
- rth = rcu_dereference_bh(rth->dst.rt_next)) {
- if (rth->rt_key_dst == flp4->daddr &&
- rth->rt_key_src == flp4->saddr &&
- rt_is_output_route(rth) &&
- rth->rt_oif == flp4->flowi4_oif &&
- rth->rt_mark == flp4->flowi4_mark &&
- !((rth->rt_key_tos ^ flp4->flowi4_tos) &
- (IPTOS_RT_MASK | RTO_ONLINK)) &&
- net_eq(dev_net(rth->dst.dev), net) &&
- !rt_is_expired(rth)) {
- ipv4_validate_peer(rth);
- dst_use(&rth->dst, jiffies);
- RT_CACHE_STAT_INC(out_hit);
- rcu_read_unlock_bh();
- if (!flp4->saddr)
- flp4->saddr = rth->rt_src;
- if (!flp4->daddr)
- flp4->daddr = rth->rt_dst;
- return rth;
- }
- RT_CACHE_STAT_INC(out_hlist_search);
- }
- rcu_read_unlock_bh();
-
-slow_output:
- return ip_route_output_slow(net, flp4);
-}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
@@ -2859,7 +2000,13 @@ static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -2872,53 +2019,40 @@ static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
- .destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
.mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
+ .redirect = ipv4_rt_blackhole_redirect,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
.neigh_lookup = ipv4_neigh_lookup,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
- struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
struct rtable *ort = (struct rtable *) dst_orig;
+ struct rtable *rt;
+ rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
- dst_copy_metrics(new, &ort->dst);
new->dev = ort->dst.dev;
if (new->dev)
dev_hold(new->dev);
- rt->rt_key_dst = ort->rt_key_dst;
- rt->rt_key_src = ort->rt_key_src;
- rt->rt_key_tos = ort->rt_key_tos;
- rt->rt_route_iif = ort->rt_route_iif;
+ rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
- rt->rt_oif = ort->rt_oif;
- rt->rt_mark = ort->rt_mark;
+ rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
- rt->rt_dst = ort->rt_dst;
- rt->rt_src = ort->rt_src;
rt->rt_gateway = ort->rt_gateway;
- rt->rt_spec_dst = ort->rt_spec_dst;
- rt->peer = ort->peer;
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
- rt->fi = ort->fi;
- if (rt->fi)
- atomic_inc(&rt->fi->fib_clntref);
dst_free(new);
}
@@ -2945,16 +2079,16 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
-static int rt_fill_info(struct net *net,
- struct sk_buff *skb, u32 pid, u32 seq, int event,
- int nowait, unsigned int flags)
+static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
+ struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
+ u32 seq, int event, int nowait, unsigned int flags)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
- const struct inet_peer *peer = rt->peer;
- u32 id = 0, ts = 0, tsage = 0, error;
+ u32 error;
+ u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
@@ -2964,7 +2098,7 @@ static int rt_fill_info(struct net *net,
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
- r->rtm_tos = rt->rt_key_tos;
+ r->rtm_tos = fl4->flowi4_tos;
r->rtm_table = RT_TABLE_MAIN;
if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
goto nla_put_failure;
@@ -2975,11 +2109,11 @@ static int rt_fill_info(struct net *net,
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
+ if (nla_put_be32(skb, RTA_DST, dst))
goto nla_put_failure;
- if (rt->rt_key_src) {
+ if (src) {
r->rtm_src_len = 32;
- if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
+ if (nla_put_be32(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
@@ -2990,69 +2124,40 @@ static int rt_fill_info(struct net *net,
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
- if (rt_is_input_route(rt)) {
- if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
- goto nla_put_failure;
- } else if (rt->rt_src != rt->rt_key_src) {
- if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
+ if (!rt_is_input_route(rt) &&
+ fl4->saddr != src) {
+ if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
- if (rt->rt_dst != rt->rt_gateway &&
+ if (rt->rt_gateway &&
nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
goto nla_put_failure;
- if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
+ memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
+ if (rt->rt_pmtu)
+ metrics[RTAX_MTU - 1] = rt->rt_pmtu;
+ if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
- if (rt->rt_mark &&
- nla_put_be32(skb, RTA_MARK, rt->rt_mark))
+ if (fl4->flowi4_mark &&
+ nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
error = rt->dst.error;
- if (peer) {
- inet_peer_refcheck(rt->peer);
- id = atomic_read(&peer->ip_id_count) & 0xffff;
- if (peer->tcp_ts_stamp) {
- ts = peer->tcp_ts;
- tsage = get_seconds() - peer->tcp_ts_stamp;
- }
- expires = ACCESS_ONCE(peer->pmtu_expires);
- if (expires) {
- if (time_before(jiffies, expires))
- expires -= jiffies;
- else
- expires = 0;
- }
+ expires = rt->dst.expires;
+ if (expires) {
+ if (time_before(jiffies, expires))
+ expires -= jiffies;
+ else
+ expires = 0;
}
if (rt_is_input_route(rt)) {
-#ifdef CONFIG_IP_MROUTE
- __be32 dst = rt->rt_dst;
-
- if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
- IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
- int err = ipmr_get_route(net, skb,
- rt->rt_src, rt->rt_dst,
- r, nowait);
- if (err <= 0) {
- if (!nowait) {
- if (err == 0)
- return 0;
- goto nla_put_failure;
- } else {
- if (err == -EMSGSIZE)
- goto nla_put_failure;
- error = err;
- }
- }
- } else
-#endif
- if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
- goto nla_put_failure;
+ if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
+ goto nla_put_failure;
}
- if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
- expires, error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -3068,6 +2173,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
+ struct flowi4 fl4;
__be32 dst = 0;
__be32 src = 0;
u32 iif;
@@ -3102,6 +2208,13 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = dst;
+ fl4.saddr = src;
+ fl4.flowi4_tos = rtm->rtm_tos;
+ fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
+ fl4.flowi4_mark = mark;
+
if (iif) {
struct net_device *dev;
@@ -3122,13 +2235,6 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
- struct flowi4 fl4 = {
- .daddr = dst,
- .saddr = src,
- .flowi4_tos = rtm->rtm_tos,
- .flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
- .flowi4_mark = mark,
- };
rt = ip_route_output_key(net, &fl4);
err = 0;
@@ -3143,7 +2249,8 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
- err = rt_fill_info(net, skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
+ err = rt_fill_info(net, dst, src, &fl4, skb,
+ NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
RTM_NEWROUTE, 0, 0);
if (err <= 0)
goto errout_free;
@@ -3159,43 +2266,6 @@ errout_free:
int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct rtable *rt;
- int h, s_h;
- int idx, s_idx;
- struct net *net;
-
- net = sock_net(skb->sk);
-
- s_h = cb->args[0];
- if (s_h < 0)
- s_h = 0;
- s_idx = idx = cb->args[1];
- for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
- if (!rt_hash_table[h].chain)
- continue;
- rcu_read_lock_bh();
- for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
- rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
- if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
- continue;
- if (rt_is_expired(rt))
- continue;
- skb_dst_set_noref(skb, &rt->dst);
- if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWROUTE,
- 1, NLM_F_MULTI) <= 0) {
- skb_dst_drop(skb);
- rcu_read_unlock_bh();
- goto done;
- }
- skb_dst_drop(skb);
- }
- rcu_read_unlock_bh();
- }
-
-done:
- cb->args[0] = h;
- cb->args[1] = idx;
return skb->len;
}
@@ -3400,26 +2470,34 @@ static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
};
+static int __net_init ipv4_inetpeer_init(struct net *net)
+{
+ struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
-#ifdef CONFIG_IP_ROUTE_CLASSID
-struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
-#endif /* CONFIG_IP_ROUTE_CLASSID */
+ if (!bp)
+ return -ENOMEM;
+ inet_peer_base_init(bp);
+ net->ipv4.peers = bp;
+ return 0;
+}
-static __initdata unsigned long rhash_entries;
-static int __init set_rhash_entries(char *str)
+static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
- ssize_t ret;
+ struct inet_peer_base *bp = net->ipv4.peers;
- if (!str)
- return 0;
+ net->ipv4.peers = NULL;
+ inetpeer_invalidate_tree(bp);
+ kfree(bp);
+}
- ret = kstrtoul(str, 0, &rhash_entries);
- if (ret)
- return 0;
+static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
+ .init = ipv4_inetpeer_init,
+ .exit = ipv4_inetpeer_exit,
+};
- return 1;
-}
-__setup("rhash_entries=", set_rhash_entries);
+#ifdef CONFIG_IP_ROUTE_CLASSID
+struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
+#endif /* CONFIG_IP_ROUTE_CLASSID */
int __init ip_rt_init(void)
{
@@ -3443,31 +2521,12 @@ int __init ip_rt_init(void)
if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
- rt_hash_table = (struct rt_hash_bucket *)
- alloc_large_system_hash("IP route cache",
- sizeof(struct rt_hash_bucket),
- rhash_entries,
- (totalram_pages >= 128 * 1024) ?
- 15 : 17,
- 0,
- &rt_hash_log,
- &rt_hash_mask,
- 0,
- rhash_entries ? 0 : 512 * 1024);
- memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
- rt_hash_lock_init();
-
- ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
- ip_rt_max_size = (rt_hash_mask + 1) * 16;
+ ipv4_dst_ops.gc_thresh = ~0;
+ ip_rt_max_size = INT_MAX;
devinet_init();
ip_fib_init();
- INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
- expires_ljiffies = jiffies;
- schedule_delayed_work(&expires_work,
- net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
-
if (ip_rt_proc_init())
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
@@ -3480,6 +2539,7 @@ int __init ip_rt_init(void)
register_pernet_subsys(&sysctl_route_ops);
#endif
register_pernet_subsys(&rt_genid_ops);
+ register_pernet_subsys(&ipv4_inetpeer_ops);
return rc;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index eab2a7fb15d1..650e1528e1e6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -293,7 +293,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index ef32956ed655..5840c3255721 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -301,6 +301,13 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
+ .procname = "ip_early_demux",
+ .data = &sysctl_ip_early_demux,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "ip_dynaddr",
.data = &sysctl_ip_dynaddr,
.maxlen = sizeof(int),
@@ -360,6 +367,13 @@ static struct ctl_table ipv4_table[] = {
},
#endif
{
+ .procname = "tcp_fastopen",
+ .data = &sysctl_tcp_fastopen,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_tw_recycle",
.data = &tcp_death_row.sysctl_tw_recycle,
.maxlen = sizeof(int),
@@ -591,6 +605,20 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "tcp_limit_output_bytes",
+ .data = &sysctl_tcp_limit_output_bytes,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "tcp_challenge_ack_limit",
+ .data = &sysctl_tcp_challenge_ack_limit,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
#ifdef CONFIG_NET_DMA
{
.procname = "tcp_dma_copybreak",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 3ba605f60e4e..581ecf02c6b5 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -270,6 +270,7 @@
#include <linux/slab.h>
#include <net/icmp.h>
+#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
@@ -376,6 +377,7 @@ void tcp_init_sock(struct sock *sk)
skb_queue_head_init(&tp->out_of_order_queue);
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
+ INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
@@ -796,6 +798,10 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
inet_csk(sk)->icsk_ext_hdr_len -
tp->tcp_header_len);
+ /* TSQ : try to have two TSO segments in flight */
+ xmit_size_goal = min_t(u32, xmit_size_goal,
+ sysctl_tcp_limit_output_bytes >> 1);
+
xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
/* We try hard to avoid divides here */
@@ -977,26 +983,67 @@ static inline int select_size(const struct sock *sk, bool sg)
return tmp;
}
+void tcp_free_fastopen_req(struct tcp_sock *tp)
+{
+ if (tp->fastopen_req != NULL) {
+ kfree(tp->fastopen_req);
+ tp->fastopen_req = NULL;
+ }
+}
+
+static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int err, flags;
+
+ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+ return -EOPNOTSUPP;
+ if (tp->fastopen_req != NULL)
+ return -EALREADY; /* Another Fast Open is in progress */
+
+ tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
+ sk->sk_allocation);
+ if (unlikely(tp->fastopen_req == NULL))
+ return -ENOBUFS;
+ tp->fastopen_req->data = msg;
+
+ flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
+ err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ msg->msg_namelen, flags);
+ *size = tp->fastopen_req->copied;
+ tcp_free_fastopen_req(tp);
+ return err;
+}
+
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int iovlen, flags, err, copied;
- int mss_now = 0, size_goal;
+ int iovlen, flags, err, copied = 0;
+ int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
bool sg;
long timeo;
lock_sock(sk);
flags = msg->msg_flags;
+ if (flags & MSG_FASTOPEN) {
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
+ if (err == -EINPROGRESS && copied_syn > 0)
+ goto out;
+ else if (err)
+ goto out_err;
+ offset = copied_syn;
+ }
+
timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
/* Wait for a connection to finish. */
if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
- goto out_err;
+ goto do_error;
if (unlikely(tp->repair)) {
if (tp->repair_queue == TCP_RECV_QUEUE) {
@@ -1032,6 +1079,15 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
unsigned char __user *from = iov->iov_base;
iov++;
+ if (unlikely(offset > 0)) { /* Skip bytes copied in SYN */
+ if (offset >= seglen) {
+ offset -= seglen;
+ continue;
+ }
+ seglen -= offset;
+ from += offset;
+ offset = 0;
+ }
while (seglen > 0) {
int copy = 0;
@@ -1194,7 +1250,7 @@ out:
if (copied && likely(!tp->repair))
tcp_push(sk, flags, mss_now, tp->nonagle);
release_sock(sk);
- return copied;
+ return copied + copied_syn;
do_fault:
if (!skb->len) {
@@ -1207,7 +1263,7 @@ do_fault:
}
do_error:
- if (copied)
+ if (copied + copied_syn)
goto out;
out_err:
err = sk_stream_error(sk, flags, err);
@@ -3310,8 +3366,7 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
#endif
-/**
- * Each Responder maintains up to two secret values concurrently for
+/* Each Responder maintains up to two secret values concurrently for
* efficient secret rollover. Each secret value has 4 states:
*
* Generating. (tcp_secret_generating != tcp_secret_primary)
@@ -3563,6 +3618,8 @@ void __init tcp_init(void)
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
+ tcp_metrics_init();
+
tcp_register_congestion_control(&tcp_reno);
memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
@@ -3573,4 +3630,5 @@ void __init tcp_init(void)
tcp_secret_primary = &tcp_secret_one;
tcp_secret_retiring = &tcp_secret_two;
tcp_secret_secondary = &tcp_secret_two;
+ tcp_tasklet_init();
}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 04dbd7ae7c62..4d4db16e336e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
void tcp_slow_start(struct tcp_sock *tp)
{
int cnt; /* increase in packets */
+ unsigned int delta = 0;
/* RFC3465: ABC Slow start
* Increase only after a full MSS of bytes is acked
@@ -333,9 +334,9 @@ void tcp_slow_start(struct tcp_sock *tp)
tp->snd_cwnd_cnt += cnt;
while (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
tp->snd_cwnd_cnt -= tp->snd_cwnd;
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
+ delta++;
}
+ tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp);
}
EXPORT_SYMBOL_GPL(tcp_slow_start);
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
new file mode 100644
index 000000000000..a7f729c409d7
--- /dev/null
+++ b/net/ipv4/tcp_fastopen.c
@@ -0,0 +1,11 @@
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+int sysctl_tcp_fastopen;
+
+static int __init tcp_fastopen_init(void)
+{
+ return 0;
+}
+
+late_initcall(tcp_fastopen_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8b..3e07a64ca44e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -88,12 +88,14 @@ int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+/* rfc5961 challenge ack rate limiting */
+int sysctl_tcp_challenge_ack_limit = 100;
+
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_frto_response __read_mostly;
-int sysctl_tcp_nometrics_save __read_mostly;
int sysctl_tcp_thin_dupack __read_mostly;
@@ -701,7 +703,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
-static inline void tcp_set_rto(struct sock *sk)
+void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
@@ -728,109 +730,6 @@ static inline void tcp_set_rto(struct sock *sk)
tcp_bound_rto(sk);
}
-/* Save metrics learned by this TCP session.
- This function is called only, when TCP finishes successfully
- i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
- */
-void tcp_update_metrics(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
-
- if (sysctl_tcp_nometrics_save)
- return;
-
- dst_confirm(dst);
-
- if (dst && (dst->flags & DST_HOST)) {
- const struct inet_connection_sock *icsk = inet_csk(sk);
- int m;
- unsigned long rtt;
-
- if (icsk->icsk_backoff || !tp->srtt) {
- /* This session failed to estimate rtt. Why?
- * Probably, no packets returned in time.
- * Reset our results.
- */
- if (!(dst_metric_locked(dst, RTAX_RTT)))
- dst_metric_set(dst, RTAX_RTT, 0);
- return;
- }
-
- rtt = dst_metric_rtt(dst, RTAX_RTT);
- m = rtt - tp->srtt;
-
- /* If newly calculated rtt larger than stored one,
- * store new one. Otherwise, use EWMA. Remember,
- * rtt overestimation is always better than underestimation.
- */
- if (!(dst_metric_locked(dst, RTAX_RTT))) {
- if (m <= 0)
- set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
- else
- set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
- }
-
- if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
- unsigned long var;
- if (m < 0)
- m = -m;
-
- /* Scale deviation to rttvar fixed point */
- m >>= 1;
- if (m < tp->mdev)
- m = tp->mdev;
-
- var = dst_metric_rtt(dst, RTAX_RTTVAR);
- if (m >= var)
- var = m;
- else
- var -= (var - m) >> 2;
-
- set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
- }
-
- if (tcp_in_initial_slowstart(tp)) {
- /* Slow start still did not finish. */
- if (dst_metric(dst, RTAX_SSTHRESH) &&
- !dst_metric_locked(dst, RTAX_SSTHRESH) &&
- (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
- if (!dst_metric_locked(dst, RTAX_CWND) &&
- tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
- } else if (tp->snd_cwnd > tp->snd_ssthresh &&
- icsk->icsk_ca_state == TCP_CA_Open) {
- /* Cong. avoidance phase, cwnd is reliable. */
- if (!dst_metric_locked(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH,
- max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
- if (!dst_metric_locked(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND,
- (dst_metric(dst, RTAX_CWND) +
- tp->snd_cwnd) >> 1);
- } else {
- /* Else slow start did not finish, cwnd is non-sense,
- ssthresh may be also invalid.
- */
- if (!dst_metric_locked(dst, RTAX_CWND))
- dst_metric_set(dst, RTAX_CWND,
- (dst_metric(dst, RTAX_CWND) +
- tp->snd_ssthresh) >> 1);
- if (dst_metric(dst, RTAX_SSTHRESH) &&
- !dst_metric_locked(dst, RTAX_SSTHRESH) &&
- tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
- dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
- }
-
- if (!dst_metric_locked(dst, RTAX_REORDERING)) {
- if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
- tp->reordering != sysctl_tcp_reordering)
- dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
- }
- }
-}
-
__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +766,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
* Packet counting of FACK is based on in-order assumptions, therefore TCP
* disables it when reordering is detected
*/
-static void tcp_disable_fack(struct tcp_sock *tp)
+void tcp_disable_fack(struct tcp_sock *tp)
{
/* RFC3517 uses different metric in lost marker => reset on change */
if (tcp_is_fack(tp))
@@ -881,86 +780,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
}
-/* Initialize metrics on socket. */
-
-static void tcp_init_metrics(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- struct dst_entry *dst = __sk_dst_get(sk);
-
- if (dst == NULL)
- goto reset;
-
- dst_confirm(dst);
-
- if (dst_metric_locked(dst, RTAX_CWND))
- tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
- if (dst_metric(dst, RTAX_SSTHRESH)) {
- tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
- if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
- tp->snd_ssthresh = tp->snd_cwnd_clamp;
- } else {
- /* ssthresh may have been reduced unnecessarily during.
- * 3WHS. Restore it back to its initial default.
- */
- tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- }
- if (dst_metric(dst, RTAX_REORDERING) &&
- tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
- tcp_disable_fack(tp);
- tcp_disable_early_retrans(tp);
- tp->reordering = dst_metric(dst, RTAX_REORDERING);
- }
-
- if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
- goto reset;
-
- /* Initial rtt is determined from SYN,SYN-ACK.
- * The segment is small and rtt may appear much
- * less than real one. Use per-dst memory
- * to make it more realistic.
- *
- * A bit of theory. RTT is time passed after "normal" sized packet
- * is sent until it is ACKed. In normal circumstances sending small
- * packets force peer to delay ACKs and calculation is correct too.
- * The algorithm is adaptive and, provided we follow specs, it
- * NEVER underestimate RTT. BUT! If peer tries to make some clever
- * tricks sort of "quick acks" for time long enough to decrease RTT
- * to low value, and then abruptly stops to do it and starts to delay
- * ACKs, wait for troubles.
- */
- if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
- tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
- tp->rtt_seq = tp->snd_nxt;
- }
- if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
- tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
- tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
- }
- tcp_set_rto(sk);
-reset:
- if (tp->srtt == 0) {
- /* RFC6298: 5.7 We've failed to get a valid RTT sample from
- * 3WHS. This is most likely due to retransmission,
- * including spurious one. Reset the RTO back to 3secs
- * from the more aggressive 1sec to avoid more spurious
- * retransmission.
- */
- tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
- inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
- }
- /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
- * retransmitted. In light of RFC6298 more aggressive 1sec
- * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
- * retransmission has occurred.
- */
- if (tp->total_retrans > 1)
- tp->snd_cwnd = 1;
- else
- tp->snd_cwnd = tcp_init_cwnd(tp, dst);
- tp->snd_cwnd_stamp = tcp_time_stamp;
-}
-
static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
@@ -2702,7 +2521,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag)
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
-static inline int tcp_packet_delayed(const struct tcp_sock *tp)
+static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -2763,7 +2582,7 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
tp->snd_cwnd_stamp = tcp_time_stamp;
}
-static inline int tcp_may_undo(const struct tcp_sock *tp)
+static inline bool tcp_may_undo(const struct tcp_sock *tp)
{
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
@@ -3552,13 +3371,13 @@ static void tcp_ack_probe(struct sock *sk)
}
}
-static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
+static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
{
return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
-static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
+static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
const struct tcp_sock *tp = tcp_sk(sk);
return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
@@ -3568,7 +3387,7 @@ static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
/* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next.
*/
-static inline int tcp_may_update_window(const struct tcp_sock *tp,
+static inline bool tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
@@ -3869,9 +3688,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_cong_avoid(sk, ack, prior_in_flight);
}
- if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
- dst_confirm(__sk_dst_get(sk));
-
+ if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
+ struct dst_entry *dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
+ }
return 1;
no_queue:
@@ -3911,7 +3732,8 @@ old_ack:
* the fast version below fails.
*/
void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
- const u8 **hvpp, int estab)
+ const u8 **hvpp, int estab,
+ struct tcp_fastopen_cookie *foc)
{
const unsigned char *ptr;
const struct tcphdr *th = tcp_hdr(skb);
@@ -4018,8 +3840,25 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
break;
}
break;
- }
+ case TCPOPT_EXP:
+ /* Fast Open option shares code 254 using a
+ * 16 bits magic number. It's valid only in
+ * SYN or SYN-ACK with an even size.
+ */
+ if (opsize < TCPOLEN_EXP_FASTOPEN_BASE ||
+ get_unaligned_be16(ptr) != TCPOPT_FASTOPEN_MAGIC ||
+ foc == NULL || !th->syn || (opsize & 1))
+ break;
+ foc->len = opsize - TCPOLEN_EXP_FASTOPEN_BASE;
+ if (foc->len >= TCP_FASTOPEN_COOKIE_MIN &&
+ foc->len <= TCP_FASTOPEN_COOKIE_MAX)
+ memcpy(foc->val, ptr + 2, foc->len);
+ else if (foc->len != 0)
+ foc->len = -1;
+ break;
+
+ }
ptr += opsize-2;
length -= opsize;
}
@@ -4061,7 +3900,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
if (tcp_parse_aligned_timestamp(tp, th))
return true;
}
- tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
+ tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL);
return true;
}
@@ -4167,7 +4006,7 @@ static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
}
-static inline int tcp_paws_discard(const struct sock *sk,
+static inline bool tcp_paws_discard(const struct sock *sk,
const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -4189,7 +4028,7 @@ static inline int tcp_paws_discard(const struct sock *sk,
* (borrowed from freebsd)
*/
-static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
+static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
{
return !before(end_seq, tp->rcv_wup) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
@@ -4579,8 +4418,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
TCP_ECN_check_ce(tp, skb);
- if (tcp_try_rmem_schedule(sk, skb->truesize)) {
- /* TODO: should increment a counter */
+ if (unlikely(tcp_try_rmem_schedule(sk, skb->truesize))) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
__kfree_skb(skb);
return;
}
@@ -4589,6 +4428,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tp->pred_flags = 0;
inet_csk_schedule_ack(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@@ -4642,6 +4482,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb);
skb = NULL;
tcp_dsack_set(sk, seq, end_seq);
@@ -4680,6 +4521,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
__skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
__kfree_skb(skb1);
}
@@ -5372,7 +5214,7 @@ static __sum16 __tcp_checksum_complete_user(struct sock *sk,
return result;
}
-static inline int tcp_checksum_complete_user(struct sock *sk,
+static inline bool tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
@@ -5426,11 +5268,28 @@ out:
}
#endif /* CONFIG_NET_DMA */
+static void tcp_send_challenge_ack(struct sock *sk)
+{
+ /* unprotected vars, we dont care of overwrites */
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ u32 now = jiffies / HZ;
+
+ if (now != challenge_timestamp) {
+ challenge_timestamp = now;
+ challenge_count = 0;
+ }
+ if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }
+}
+
/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/
-static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, int syn_inerr)
+static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, int syn_inerr)
{
const u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
@@ -5455,14 +5314,26 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
* an acknowledgment should be sent in reply (unless the RST
* bit is set, if so drop the segment and return)".
*/
- if (!th->rst)
+ if (!th->rst) {
+ if (th->syn)
+ goto syn_challenge;
tcp_send_dupack(sk, skb);
+ }
goto discard;
}
/* Step 2: check RST bit */
if (th->rst) {
- tcp_reset(sk);
+ /* RFC 5961 3.2 :
+ * If sequence number exactly matches RCV.NXT, then
+ * RESET the connection
+ * else
+ * Send a challenge ACK
+ */
+ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt)
+ tcp_reset(sk);
+ else
+ tcp_send_challenge_ack(sk);
goto discard;
}
@@ -5473,20 +5344,23 @@ static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
/* step 3: check security and precedence [ignored] */
- /* step 4: Check for a SYN in window. */
- if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
+ /* step 4: Check for a SYN
+ * RFC 5691 4.2 : Send a challenge ack
+ */
+ if (th->syn) {
+syn_challenge:
if (syn_inerr)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
- tcp_reset(sk);
- return -1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
+ tcp_send_challenge_ack(sk);
+ goto discard;
}
- return 1;
+ return true;
discard:
__kfree_skb(skb);
- return 0;
+ return false;
}
/*
@@ -5516,7 +5390,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
- int res;
/*
* Header prediction.
@@ -5693,9 +5566,8 @@ slow_path:
* Standard slow path.
*/
- res = tcp_validate_incoming(sk, skb, th, 1);
- if (res <= 0)
- return -res;
+ if (!tcp_validate_incoming(sk, skb, th, 1))
+ return 0;
step5:
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
@@ -5729,8 +5601,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
tcp_set_state(sk, TCP_ESTABLISHED);
- if (skb != NULL)
+ if (skb != NULL) {
+ sk->sk_rx_dst = dst_clone(skb_dst(skb));
security_inet_conn_established(sk, skb);
+ }
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
@@ -5760,6 +5634,45 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
}
}
+static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
+ struct tcp_fastopen_cookie *cookie)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *data = tp->syn_data ? tcp_write_queue_head(sk) : NULL;
+ u16 mss = tp->rx_opt.mss_clamp;
+ bool syn_drop;
+
+ if (mss == tp->rx_opt.user_mss) {
+ struct tcp_options_received opt;
+ const u8 *hash_location;
+
+ /* Get original SYNACK MSS value if user MSS sets mss_clamp */
+ tcp_clear_options(&opt);
+ opt.user_mss = opt.mss_clamp = 0;
+ tcp_parse_options(synack, &opt, &hash_location, 0, NULL);
+ mss = opt.mss_clamp;
+ }
+
+ if (!tp->syn_fastopen) /* Ignore an unsolicited cookie */
+ cookie->len = -1;
+
+ /* The SYN-ACK neither has cookie nor acknowledges the data. Presumably
+ * the remote receives only the retransmitted (regular) SYNs: either
+ * the original SYN-data or the corresponding SYN-ACK is lost.
+ */
+ syn_drop = (cookie->len <= 0 && data &&
+ inet_csk(sk)->icsk_retransmits);
+
+ tcp_fastopen_cache_set(sk, mss, cookie, syn_drop);
+
+ if (data) { /* Retransmit unacked data in SYN */
+ tcp_retransmit_skb(sk, data);
+ tcp_rearm_rto(sk);
+ return true;
+ }
+ return false;
+}
+
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
@@ -5767,9 +5680,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
+ struct tcp_fastopen_cookie foc = { .len = -1 };
int saved_clamp = tp->rx_opt.mss_clamp;
- tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc);
if (th->ack) {
/* rfc793:
@@ -5779,11 +5693,9 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
* a reset (unless the RST bit is set, if so drop
* the segment and return)"
- *
- * We do not send data with SYN, so that RFC-correct
- * test reduces to:
*/
- if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
+ if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
+ after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt))
goto reset_and_undo;
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
@@ -5895,6 +5807,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
tcp_finish_connect(sk, skb);
+ if ((tp->syn_fastopen || tp->syn_data) &&
+ tcp_rcv_fastopen_synack(sk, skb, &foc))
+ return -1;
+
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
@@ -6013,7 +5929,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int queued = 0;
- int res;
tp->rx_opt.saw_tstamp = 0;
@@ -6068,9 +5983,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
return 0;
}
- res = tcp_validate_incoming(sk, skb, th, 0);
- if (res <= 0)
- return -res;
+ if (!tcp_validate_incoming(sk, skb, th, 0))
+ return 0;
/* step 5: check the ACK field */
if (th->ack) {
@@ -6126,9 +6040,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
+ struct dst_entry *dst;
+
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
- dst_confirm(__sk_dst_get(sk));
+
+ dst = __sk_dst_get(sk);
+ if (dst)
+ dst_confirm(dst);
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index c8d28c433b2b..3e30548ac32a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -209,22 +209,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
if (tcp_death_row.sysctl_tw_recycle &&
- !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
- struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
- /*
- * VJ's idea. We save last timestamp seen from
- * the destination in peer table, when entering state
- * TIME-WAIT * and initialize rx_opt.ts_recent from it,
- * when trying new connection.
- */
- if (peer) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
- }
- }
- }
+ !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
+ tcp_fetch_timewait_stamp(sk, &rt->dst);
inet->inet_dport = usin->sin_port;
inet->inet_daddr = daddr;
@@ -289,12 +275,15 @@ failure:
EXPORT_SYMBOL(tcp_v4_connect);
/*
- * This routine does path mtu discovery as defined in RFC1191.
+ * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
+ * It can be called through tcp_release_cb() if socket was owned by user
+ * at the time tcp_v4_err() was called to handle ICMP message.
*/
-static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
+static void tcp_v4_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
+ u32 mtu = tcp_sk(sk)->mtu_info;
/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
@@ -303,17 +292,10 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
if (sk->sk_state == TCP_LISTEN)
return;
- /* We don't check in the destentry if pmtu discovery is forbidden
- * on this route. We just assume that no packet_to_big packets
- * are send back when pmtu discovery is not active.
- * There is a small race when the user changes this flag in the
- * route, but I think that's acceptable.
- */
- if ((dst = __sk_dst_check(sk, 0)) == NULL)
+ dst = inet_csk_update_pmtu(sk, mtu);
+ if (!dst)
return;
- dst->ops->update_pmtu(dst, mtu);
-
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
@@ -335,6 +317,14 @@ static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
} /* else let the usual retransmit timer handle it */
}
+static void do_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_check(sk, 0);
+
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
@@ -386,8 +376,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
+ * We do take care of PMTU discovery (RFC1191) special case :
+ * we can receive locally generated ICMP messages while socket is held.
*/
- if (sock_owned_by_user(sk))
+ if (sock_owned_by_user(sk) &&
+ type != ICMP_DEST_UNREACH &&
+ code != ICMP_FRAG_NEEDED)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
@@ -408,6 +402,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
}
switch (type) {
+ case ICMP_REDIRECT:
+ do_redirect(icmp_skb, sk);
+ goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
@@ -419,8 +416,11 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
+ tp->mtu_info = info;
if (!sock_owned_by_user(sk))
- do_pmtu_discovery(sk, iph, info);
+ tcp_v4_mtu_reduced(sk);
+ else
+ set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out;
}
@@ -698,8 +698,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
- ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
- &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -781,8 +781,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
- ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
- &arg, arg.iov[0].iov_len);
+ ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
}
@@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct request_values *rvp,
- u16 queue_mapping)
+ u16 queue_mapping,
+ bool nocache)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
@@ -848,7 +849,6 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
err = net_xmit_eval(err);
}
- dst_release(dst);
return err;
}
@@ -856,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
- return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
+ return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
}
/*
@@ -1317,7 +1317,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1375,7 +1375,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
req->cookie_ts = tmp_opt.tstamp_ok;
} else if (!isn) {
- struct inet_peer *peer = NULL;
struct flowi4 fl4;
/* VJ's idea. We save last timestamp seen
@@ -1390,12 +1389,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
(dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
- fl4.daddr == saddr &&
- (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
- (s32)(peer->tcp_ts - req->ts_recent) >
- TCP_PAWS_WINDOW) {
+ fl4.daddr == saddr) {
+ if (!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
@@ -1404,8 +1399,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
- (!peer || !peer->tcp_ts_stamp) &&
- (!dst || !dst_metric(dst, RTAX_RTT))) {
+ !tcp_peer_is_proven(req, dst, false)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
@@ -1425,7 +1419,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (tcp_v4_send_synack(sk, dst, req,
(struct request_values *)&tmp_ext,
- skb_get_queue_mapping(skb)) ||
+ skb_get_queue_mapping(skb),
+ want_cookie) ||
want_cookie)
goto drop_and_free;
@@ -1623,6 +1618,20 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
sock_rps_save_rxhash(sk, skb);
+ if (sk->sk_rx_dst) {
+ struct dst_entry *dst = sk->sk_rx_dst;
+ if (dst->ops->check(dst, 0) == NULL) {
+ dst_release(dst);
+ sk->sk_rx_dst = NULL;
+ }
+ }
+ if (unlikely(sk->sk_rx_dst == NULL)) {
+ struct inet_sock *icsk = inet_sk(sk);
+ struct rtable *rt = skb_rtable(skb);
+
+ sk->sk_rx_dst = dst_clone(&rt->dst);
+ icsk->rx_dst_ifindex = inet_iif(skb);
+ }
if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
rsk = sk;
goto reset;
@@ -1672,6 +1681,49 @@ csum_err:
}
EXPORT_SYMBOL(tcp_v4_do_rcv);
+void tcp_v4_early_demux(struct sk_buff *skb)
+{
+ struct net *net = dev_net(skb->dev);
+ const struct iphdr *iph;
+ const struct tcphdr *th;
+ struct net_device *dev;
+ struct sock *sk;
+
+ if (skb->pkt_type != PACKET_HOST)
+ return;
+
+ if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
+ return;
+
+ iph = ip_hdr(skb);
+ th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
+
+ if (th->doff < sizeof(struct tcphdr) / 4)
+ return;
+
+ if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
+ return;
+
+ dev = skb->dev;
+ sk = __inet_lookup_established(net, &tcp_hashinfo,
+ iph->saddr, th->source,
+ iph->daddr, ntohs(th->dest),
+ dev->ifindex);
+ if (sk) {
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+ if (sk->sk_state != TCP_TIME_WAIT) {
+ struct dst_entry *dst = sk->sk_rx_dst;
+ struct inet_sock *icsk = inet_sk(sk);
+ if (dst)
+ dst = dst_check(dst, 0);
+ if (dst &&
+ icsk->rx_dst_ifindex == dev->ifindex)
+ skb_dst_set_noref(skb, dst);
+ }
+ }
+}
+
/*
* From tcp_input.c
*/
@@ -1821,40 +1873,10 @@ do_time_wait:
goto discard_it;
}
-struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
-{
- struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
- struct inet_sock *inet = inet_sk(sk);
- struct inet_peer *peer;
-
- if (!rt ||
- inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
- peer = inet_getpeer_v4(inet->inet_daddr, 1);
- *release_it = true;
- } else {
- if (!rt->peer)
- rt_bind_peer(rt, inet->inet_daddr, 1);
- peer = rt->peer;
- *release_it = false;
- }
-
- return peer;
-}
-EXPORT_SYMBOL(tcp_v4_get_peer);
-
-void *tcp_v4_tw_get_peer(struct sock *sk)
-{
- const struct inet_timewait_sock *tw = inet_twsk(sk);
-
- return inet_getpeer_v4(tw->tw_daddr, 1);
-}
-EXPORT_SYMBOL(tcp_v4_tw_get_peer);
-
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
- .twsk_getpeer = tcp_v4_tw_get_peer,
};
const struct inet_connection_sock_af_ops ipv4_specific = {
@@ -1863,7 +1885,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
- .get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
@@ -1953,6 +1974,9 @@ void tcp_v4_destroy_sock(struct sock *sk)
tp->cookie_values = NULL;
}
+ /* If socket is aborted during connect operation */
+ tcp_free_fastopen_req(tp);
+
sk_sockets_allocated_dec(sk);
sock_release_memcg(sk);
}
@@ -2593,6 +2617,8 @@ struct proto tcp_prot = {
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv,
+ .release_cb = tcp_release_cb,
+ .mtu_reduced = tcp_v4_mtu_reduced,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
@@ -2624,13 +2650,11 @@ EXPORT_SYMBOL(tcp_prot);
static int __net_init tcp_sk_init(struct net *net)
{
- return inet_ctl_sock_create(&net->ipv4.tcp_sock,
- PF_INET, SOCK_RAW, IPPROTO_TCP, net);
+ return 0;
}
static void __net_exit tcp_sk_exit(struct net *net)
{
- inet_ctl_sock_destroy(net->ipv4.tcp_sock);
}
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
new file mode 100644
index 000000000000..2288a6399e1e
--- /dev/null
+++ b/net/ipv4/tcp_metrics.c
@@ -0,0 +1,745 @@
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/tcp.h>
+#include <linux/hash.h>
+
+#include <net/inet_connection_sock.h>
+#include <net/net_namespace.h>
+#include <net/request_sock.h>
+#include <net/inetpeer.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/dst.h>
+#include <net/tcp.h>
+
+int sysctl_tcp_nometrics_save __read_mostly;
+
+enum tcp_metric_index {
+ TCP_METRIC_RTT,
+ TCP_METRIC_RTTVAR,
+ TCP_METRIC_SSTHRESH,
+ TCP_METRIC_CWND,
+ TCP_METRIC_REORDERING,
+
+ /* Always last. */
+ TCP_METRIC_MAX,
+};
+
+struct tcp_fastopen_metrics {
+ u16 mss;
+ u16 syn_loss:10; /* Recurring Fast Open SYN losses */
+ unsigned long last_syn_loss; /* Last Fast Open SYN loss */
+ struct tcp_fastopen_cookie cookie;
+};
+
+struct tcp_metrics_block {
+ struct tcp_metrics_block __rcu *tcpm_next;
+ struct inetpeer_addr tcpm_addr;
+ unsigned long tcpm_stamp;
+ u32 tcpm_ts;
+ u32 tcpm_ts_stamp;
+ u32 tcpm_lock;
+ u32 tcpm_vals[TCP_METRIC_MAX];
+ struct tcp_fastopen_metrics tcpm_fastopen;
+};
+
+static bool tcp_metric_locked(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return tm->tcpm_lock & (1 << idx);
+}
+
+static u32 tcp_metric_get(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return tm->tcpm_vals[idx];
+}
+
+static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx)
+{
+ return msecs_to_jiffies(tm->tcpm_vals[idx]);
+}
+
+static void tcp_metric_set(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx,
+ u32 val)
+{
+ tm->tcpm_vals[idx] = val;
+}
+
+static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
+ enum tcp_metric_index idx,
+ u32 val)
+{
+ tm->tcpm_vals[idx] = jiffies_to_msecs(val);
+}
+
+static bool addr_same(const struct inetpeer_addr *a,
+ const struct inetpeer_addr *b)
+{
+ const struct in6_addr *a6, *b6;
+
+ if (a->family != b->family)
+ return false;
+ if (a->family == AF_INET)
+ return a->addr.a4 == b->addr.a4;
+
+ a6 = (const struct in6_addr *) &a->addr.a6[0];
+ b6 = (const struct in6_addr *) &b->addr.a6[0];
+
+ return ipv6_addr_equal(a6, b6);
+}
+
+struct tcpm_hash_bucket {
+ struct tcp_metrics_block __rcu *chain;
+};
+
+static DEFINE_SPINLOCK(tcp_metrics_lock);
+
+static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ u32 val;
+
+ tm->tcpm_stamp = jiffies;
+
+ val = 0;
+ if (dst_metric_locked(dst, RTAX_RTT))
+ val |= 1 << TCP_METRIC_RTT;
+ if (dst_metric_locked(dst, RTAX_RTTVAR))
+ val |= 1 << TCP_METRIC_RTTVAR;
+ if (dst_metric_locked(dst, RTAX_SSTHRESH))
+ val |= 1 << TCP_METRIC_SSTHRESH;
+ if (dst_metric_locked(dst, RTAX_CWND))
+ val |= 1 << TCP_METRIC_CWND;
+ if (dst_metric_locked(dst, RTAX_REORDERING))
+ val |= 1 << TCP_METRIC_REORDERING;
+ tm->tcpm_lock = val;
+
+ tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
+ tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
+ tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
+ tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
+ tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
+ tm->tcpm_ts = 0;
+ tm->tcpm_ts_stamp = 0;
+ tm->tcpm_fastopen.mss = 0;
+ tm->tcpm_fastopen.syn_loss = 0;
+ tm->tcpm_fastopen.cookie.len = 0;
+}
+
+static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
+ struct inetpeer_addr *addr,
+ unsigned int hash,
+ bool reclaim)
+{
+ struct tcp_metrics_block *tm;
+ struct net *net;
+
+ spin_lock_bh(&tcp_metrics_lock);
+ net = dev_net(dst->dev);
+ if (unlikely(reclaim)) {
+ struct tcp_metrics_block *oldest;
+
+ oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
+ for (tm = rcu_dereference(oldest->tcpm_next); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
+ oldest = tm;
+ }
+ tm = oldest;
+ } else {
+ tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
+ if (!tm)
+ goto out_unlock;
+ }
+ tm->tcpm_addr = *addr;
+
+ tcpm_suck_dst(tm, dst);
+
+ if (likely(!reclaim)) {
+ tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
+ rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
+ }
+
+out_unlock:
+ spin_unlock_bh(&tcp_metrics_lock);
+ return tm;
+}
+
+#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ tcpm_suck_dst(tm, dst);
+}
+
+#define TCP_METRICS_RECLAIM_DEPTH 5
+#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+
+static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
+{
+ if (tm)
+ return tm;
+ if (depth > TCP_METRICS_RECLAIM_DEPTH)
+ return TCP_METRICS_RECLAIM_PTR;
+ return NULL;
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+ struct net *net, unsigned int hash)
+{
+ struct tcp_metrics_block *tm;
+ int depth = 0;
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, addr))
+ break;
+ depth++;
+ }
+ return tcp_get_encode(tm, depth);
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
+ struct dst_entry *dst)
+{
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+
+ addr.family = req->rsk_ops->family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = inet_rsk(req)->rmt_addr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
+ hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = dev_net(dst->dev);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, &addr))
+ break;
+ }
+ tcpm_check_stamp(tm, dst);
+ return tm;
+}
+
+static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
+{
+ struct inet6_timewait_sock *tw6;
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+
+ addr.family = tw->tw_family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = tw->tw_daddr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ tw6 = inet6_twsk((struct sock *)tw);
+ *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
+ hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = twsk_net(tw);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
+ tm = rcu_dereference(tm->tcpm_next)) {
+ if (addr_same(&tm->tcpm_addr, &addr))
+ break;
+ }
+ return tm;
+}
+
+static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
+ struct dst_entry *dst,
+ bool create)
+{
+ struct tcp_metrics_block *tm;
+ struct inetpeer_addr addr;
+ unsigned int hash;
+ struct net *net;
+ bool reclaim;
+
+ addr.family = sk->sk_family;
+ switch (addr.family) {
+ case AF_INET:
+ addr.addr.a4 = inet_sk(sk)->inet_daddr;
+ hash = (__force unsigned int) addr.addr.a4;
+ break;
+ case AF_INET6:
+ *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
+ hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
+ break;
+ default:
+ return NULL;
+ }
+
+ net = dev_net(dst->dev);
+ hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
+
+ tm = __tcp_get_metrics(&addr, net, hash);
+ reclaim = false;
+ if (tm == TCP_METRICS_RECLAIM_PTR) {
+ reclaim = true;
+ tm = NULL;
+ }
+ if (!tm && create)
+ tm = tcpm_new(dst, &addr, hash, reclaim);
+ else
+ tcpm_check_stamp(tm, dst);
+
+ return tm;
+}
+
+/* Save metrics learned by this TCP session. This function is called
+ * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
+ * or goes from LAST-ACK to CLOSE.
+ */
+void tcp_update_metrics(struct sock *sk)
+{
+ const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_metrics_block *tm;
+ unsigned long rtt;
+ u32 val;
+ int m;
+
+ if (sysctl_tcp_nometrics_save || !dst)
+ return;
+
+ if (dst->flags & DST_HOST)
+ dst_confirm(dst);
+
+ rcu_read_lock();
+ if (icsk->icsk_backoff || !tp->srtt) {
+ /* This session failed to estimate rtt. Why?
+ * Probably, no packets returned in time. Reset our
+ * results.
+ */
+ tm = tcp_get_metrics(sk, dst, false);
+ if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
+ tcp_metric_set(tm, TCP_METRIC_RTT, 0);
+ goto out_unlock;
+ } else
+ tm = tcp_get_metrics(sk, dst, true);
+
+ if (!tm)
+ goto out_unlock;
+
+ rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+ m = rtt - tp->srtt;
+
+ /* If newly calculated rtt larger than stored one, store new
+ * one. Otherwise, use EWMA. Remember, rtt overestimation is
+ * always better than underestimation.
+ */
+ if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
+ if (m <= 0)
+ rtt = tp->srtt;
+ else
+ rtt -= (m >> 3);
+ tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
+ }
+
+ if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
+ unsigned long var;
+
+ if (m < 0)
+ m = -m;
+
+ /* Scale deviation to rttvar fixed point */
+ m >>= 1;
+ if (m < tp->mdev)
+ m = tp->mdev;
+
+ var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+ if (m >= var)
+ var = m;
+ else
+ var -= (var - m) >> 2;
+
+ tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
+ }
+
+ if (tcp_in_initial_slowstart(tp)) {
+ /* Slow start still did not finish. */
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && (tp->snd_cwnd >> 1) > val)
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ tp->snd_cwnd >> 1);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ if (tp->snd_cwnd > val)
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ tp->snd_cwnd);
+ }
+ } else if (tp->snd_cwnd > tp->snd_ssthresh &&
+ icsk->icsk_ca_state == TCP_CA_Open) {
+ /* Cong. avoidance phase, cwnd is reliable. */
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
+ }
+ } else {
+ /* Else slow start did not finish, cwnd is non-sense,
+ * ssthresh may be also invalid.
+ */
+ if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
+ val = tcp_metric_get(tm, TCP_METRIC_CWND);
+ tcp_metric_set(tm, TCP_METRIC_CWND,
+ (val + tp->snd_ssthresh) >> 1);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val && tp->snd_ssthresh > val)
+ tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
+ tp->snd_ssthresh);
+ }
+ if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val < tp->reordering &&
+ tp->reordering != sysctl_tcp_reordering)
+ tcp_metric_set(tm, TCP_METRIC_REORDERING,
+ tp->reordering);
+ }
+ }
+ tm->tcpm_stamp = jiffies;
+out_unlock:
+ rcu_read_unlock();
+}
+
+/* Initialize metrics on socket. */
+
+void tcp_init_metrics(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_metrics_block *tm;
+ u32 val;
+
+ if (dst == NULL)
+ goto reset;
+
+ dst_confirm(dst);
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (!tm) {
+ rcu_read_unlock();
+ goto reset;
+ }
+
+ if (tcp_metric_locked(tm, TCP_METRIC_CWND))
+ tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
+
+ val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
+ if (val) {
+ tp->snd_ssthresh = val;
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+ } else {
+ /* ssthresh may have been reduced unnecessarily during.
+ * 3WHS. Restore it back to its initial default.
+ */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
+ }
+ val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
+ if (val && tp->reordering != val) {
+ tcp_disable_fack(tp);
+ tcp_disable_early_retrans(tp);
+ tp->reordering = val;
+ }
+
+ val = tcp_metric_get(tm, TCP_METRIC_RTT);
+ if (val == 0 || tp->srtt == 0) {
+ rcu_read_unlock();
+ goto reset;
+ }
+ /* Initial rtt is determined from SYN,SYN-ACK.
+ * The segment is small and rtt may appear much
+ * less than real one. Use per-dst memory
+ * to make it more realistic.
+ *
+ * A bit of theory. RTT is time passed after "normal" sized packet
+ * is sent until it is ACKed. In normal circumstances sending small
+ * packets force peer to delay ACKs and calculation is correct too.
+ * The algorithm is adaptive and, provided we follow specs, it
+ * NEVER underestimate RTT. BUT! If peer tries to make some clever
+ * tricks sort of "quick acks" for time long enough to decrease RTT
+ * to low value, and then abruptly stops to do it and starts to delay
+ * ACKs, wait for troubles.
+ */
+ val = msecs_to_jiffies(val);
+ if (val > tp->srtt) {
+ tp->srtt = val;
+ tp->rtt_seq = tp->snd_nxt;
+ }
+ val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+ if (val > tp->mdev) {
+ tp->mdev = val;
+ tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
+ }
+ rcu_read_unlock();
+
+ tcp_set_rto(sk);
+reset:
+ if (tp->srtt == 0) {
+ /* RFC6298: 5.7 We've failed to get a valid RTT sample from
+ * 3WHS. This is most likely due to retransmission,
+ * including spurious one. Reset the RTO back to 3secs
+ * from the more aggressive 1sec to avoid more spurious
+ * retransmission.
+ */
+ tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+ inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
+ }
+ /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
+ * retransmitted. In light of RFC6298 more aggressive 1sec
+ * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
+ * retransmission has occurred.
+ */
+ if (tp->total_retrans > 1)
+ tp->snd_cwnd = 1;
+ else
+ tp->snd_cwnd = tcp_init_cwnd(tp, dst);
+ tp->snd_cwnd_stamp = tcp_time_stamp;
+}
+
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
+{
+ struct tcp_metrics_block *tm;
+ bool ret;
+
+ if (!dst)
+ return false;
+
+ rcu_read_lock();
+ tm = __tcp_get_metrics_req(req, dst);
+ if (paws_check) {
+ if (tm &&
+ (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
+ (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
+ ret = false;
+ else
+ ret = true;
+ } else {
+ if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
+ ret = true;
+ else
+ ret = false;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
+
+void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
+ tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
+ tp->rx_opt.ts_recent = tm->tcpm_ts;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
+
+/* VJ's idea. Save last timestamp seen from this destination and hold
+ * it at least for normal timewait interval to use for duplicate
+ * segment detection in subsequent connections, before they enter
+ * synchronized state.
+ */
+bool tcp_remember_stamp(struct sock *sk)
+{
+ struct dst_entry *dst = __sk_dst_get(sk);
+ bool ret = false;
+
+ if (dst) {
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, dst, true);
+ if (tm) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
+ ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
+ tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+ tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
+ tm->tcpm_ts = tp->rx_opt.ts_recent;
+ }
+ ret = true;
+ }
+ rcu_read_unlock();
+ }
+ return ret;
+}
+
+bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+{
+ struct tcp_metrics_block *tm;
+ bool ret = false;
+
+ rcu_read_lock();
+ tm = __tcp_get_metrics_tw(tw);
+ if (tm) {
+ const struct tcp_timewait_sock *tcptw;
+ struct sock *sk = (struct sock *) tw;
+
+ tcptw = tcp_twsk(sk);
+ if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
+ ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
+ tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+ tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
+ tm->tcpm_ts = tcptw->tw_ts_recent;
+ }
+ ret = true;
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static DEFINE_SEQLOCK(fastopen_seqlock);
+
+void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie,
+ int *syn_loss, unsigned long *last_syn_loss)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&fastopen_seqlock);
+ if (tfom->mss)
+ *mss = tfom->mss;
+ *cookie = tfom->cookie;
+ *syn_loss = tfom->syn_loss;
+ *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
+ } while (read_seqretry(&fastopen_seqlock, seq));
+ }
+ rcu_read_unlock();
+}
+
+void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
+ struct tcp_fastopen_cookie *cookie, bool syn_lost)
+{
+ struct tcp_metrics_block *tm;
+
+ rcu_read_lock();
+ tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
+ if (tm) {
+ struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
+
+ write_seqlock_bh(&fastopen_seqlock);
+ tfom->mss = mss;
+ if (cookie->len > 0)
+ tfom->cookie = *cookie;
+ if (syn_lost) {
+ ++tfom->syn_loss;
+ tfom->last_syn_loss = jiffies;
+ } else
+ tfom->syn_loss = 0;
+ write_sequnlock_bh(&fastopen_seqlock);
+ }
+ rcu_read_unlock();
+}
+
+static unsigned int tcpmhash_entries;
+static int __init set_tcpmhash_entries(char *str)
+{
+ ssize_t ret;
+
+ if (!str)
+ return 0;
+
+ ret = kstrtouint(str, 0, &tcpmhash_entries);
+ if (ret)
+ return 0;
+
+ return 1;
+}
+__setup("tcpmhash_entries=", set_tcpmhash_entries);
+
+static int __net_init tcp_net_metrics_init(struct net *net)
+{
+ size_t size;
+ unsigned int slots;
+
+ slots = tcpmhash_entries;
+ if (!slots) {
+ if (totalram_pages >= 128 * 1024)
+ slots = 16 * 1024;
+ else
+ slots = 8 * 1024;
+ }
+
+ net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
+ size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
+
+ net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL);
+ if (!net->ipv4.tcp_metrics_hash)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __net_exit tcp_net_metrics_exit(struct net *net)
+{
+ kfree(net->ipv4.tcp_metrics_hash);
+}
+
+static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
+ .init = tcp_net_metrics_init,
+ .exit = tcp_net_metrics_exit,
+};
+
+void __init tcp_metrics_init(void)
+{
+ register_pernet_subsys(&tcp_net_metrics_ops);
+}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b85d9fe7d663..5912ac3fd240 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,56 +49,6 @@ struct inet_timewait_death_row tcp_death_row = {
};
EXPORT_SYMBOL_GPL(tcp_death_row);
-/* VJ's idea. Save last timestamp seen from this destination
- * and hold it at least for normal timewait interval to use for duplicate
- * segment detection in subsequent connections, before they enter synchronized
- * state.
- */
-
-static bool tcp_remember_stamp(struct sock *sk)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- struct tcp_sock *tp = tcp_sk(sk);
- struct inet_peer *peer;
- bool release_it;
-
- peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
- if (peer) {
- if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
- peer->tcp_ts = tp->rx_opt.ts_recent;
- }
- if (release_it)
- inet_putpeer(peer);
- return true;
- }
-
- return false;
-}
-
-static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
-{
- struct sock *sk = (struct sock *) tw;
- struct inet_peer *peer;
-
- peer = twsk_getpeer(sk);
- if (peer) {
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
-
- if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
- ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
- peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
- peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
- peer->tcp_ts = tcptw->tw_ts_recent;
- }
- inet_putpeer(peer);
- return true;
- }
- return false;
-}
-
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
{
if (seq == s_win)
@@ -147,7 +97,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = tcptw->tw_ts_recent;
@@ -327,8 +277,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
+ struct inet_sock *inet = inet_sk(sk);
- tw->tw_transparent = inet_sk(sk)->transparent;
+ tw->tw_transparent = inet->transparent;
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
tcptw->tw_snd_nxt = tp->snd_nxt;
@@ -403,6 +354,7 @@ void tcp_twsk_destructor(struct sock *sk)
{
#ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+
if (twsk->tw_md5_key) {
tcp_free_md5sig_pool();
kfree_rcu(twsk->tw_md5_key, rcu);
@@ -435,6 +387,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct tcp_sock *oldtp = tcp_sk(sk);
struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
+ newsk->sk_rx_dst = dst_clone(skb_dst(skb));
+
/* TCP Cookie Transactions require space for the cookie pair,
* as it differs for each connection. There is no need to
* copy any s_data_payload stored at the original socket.
@@ -470,6 +424,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
treq->snt_isn + 1 + tcp_s_data_size(oldtp);
tcp_prequeue_init(newtp);
+ INIT_LIST_HEAD(&newtp->tsq_node);
tcp_init_wl(newtp, treq->rcv_isn);
@@ -579,7 +534,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
tmp_opt.saw_tstamp = 0;
if (th->doff > (sizeof(struct tcphdr)>>2)) {
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.saw_tstamp) {
tmp_opt.ts_recent = req->ts_recent;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 803cbfe82fbc..33cd065cfbd8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -50,6 +50,9 @@ int sysctl_tcp_retrans_collapse __read_mostly = 1;
*/
int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
+/* Default TSQ limit of two TSO segments */
+int sysctl_tcp_limit_output_bytes __read_mostly = 131072;
+
/* This limits the percentage of the congestion window which we
* will allow a single TSO frame to consume. Building TSO frames
* which are too large can cause TCP streams to be bursty.
@@ -65,6 +68,8 @@ int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
+static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
+ int push_one, gfp_t gfp);
/* Account for new data that has been sent to the network. */
static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
@@ -380,15 +385,17 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
#define OPTION_MD5 (1 << 2)
#define OPTION_WSCALE (1 << 3)
#define OPTION_COOKIE_EXTENSION (1 << 4)
+#define OPTION_FAST_OPEN_COOKIE (1 << 8)
struct tcp_out_options {
- u8 options; /* bit field of OPTION_* */
+ u16 options; /* bit field of OPTION_* */
+ u16 mss; /* 0 to disable */
u8 ws; /* window scale, 0 to disable */
u8 num_sack_blocks; /* number of SACK blocks to include */
u8 hash_size; /* bytes in hash_location */
- u16 mss; /* 0 to disable */
- __u32 tsval, tsecr; /* need to include OPTION_TS */
__u8 *hash_location; /* temporary pointer, overloaded */
+ __u32 tsval, tsecr; /* need to include OPTION_TS */
+ struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
};
/* The sysctl int routines are generic, so check consistency here.
@@ -437,7 +444,7 @@ static u8 tcp_cookie_size_check(u8 desired)
static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
struct tcp_out_options *opts)
{
- u8 options = opts->options; /* mungable copy */
+ u16 options = opts->options; /* mungable copy */
/* Having both authentication and cookies for security is redundant,
* and there's certainly not enough room. Instead, the cookie-less
@@ -559,6 +566,21 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
tp->rx_opt.dsack = 0;
}
+
+ if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
+ struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
+
+ *ptr++ = htonl((TCPOPT_EXP << 24) |
+ ((TCPOLEN_EXP_FASTOPEN_BASE + foc->len) << 16) |
+ TCPOPT_FASTOPEN_MAGIC);
+
+ memcpy(ptr, foc->val, foc->len);
+ if ((foc->len & 3) == 2) {
+ u8 *align = ((u8 *)ptr) + foc->len;
+ align[0] = align[1] = TCPOPT_NOP;
+ }
+ ptr += (foc->len + 3) >> 2;
+ }
}
/* Compute TCP options for SYN packets. This is not the final
@@ -574,6 +596,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
tcp_cookie_size_check(cvp->cookie_desired) :
0;
+ struct tcp_fastopen_request *fastopen = tp->fastopen_req;
#ifdef CONFIG_TCP_MD5SIG
*md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -614,6 +637,16 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
remaining -= TCPOLEN_SACKPERM_ALIGNED;
}
+ if (fastopen && fastopen->cookie.len >= 0) {
+ u32 need = TCPOLEN_EXP_FASTOPEN_BASE + fastopen->cookie.len;
+ need = (need + 3) & ~3U; /* Align to 32 bits */
+ if (remaining >= need) {
+ opts->options |= OPTION_FAST_OPEN_COOKIE;
+ opts->fastopen_cookie = &fastopen->cookie;
+ remaining -= need;
+ tp->syn_fastopen = 1;
+ }
+ }
/* Note that timestamps are required by the specification.
*
* Odd numbers of bytes are prohibited by the specification, ensuring
@@ -783,6 +816,156 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
return size;
}
+
+/* TCP SMALL QUEUES (TSQ)
+ *
+ * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
+ * to reduce RTT and bufferbloat.
+ * We do this using a special skb destructor (tcp_wfree).
+ *
+ * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
+ * needs to be reallocated in a driver.
+ * The invariant being skb->truesize substracted from sk->sk_wmem_alloc
+ *
+ * Since transmit from skb destructor is forbidden, we use a tasklet
+ * to process all sockets that eventually need to send more skbs.
+ * We use one tasklet per cpu, with its own queue of sockets.
+ */
+struct tsq_tasklet {
+ struct tasklet_struct tasklet;
+ struct list_head head; /* queue of tcp sockets */
+};
+static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
+
+static void tcp_tsq_handler(struct sock *sk)
+{
+ if ((1 << sk->sk_state) &
+ (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
+ tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+}
+/*
+ * One tasklest per cpu tries to send more skbs.
+ * We run in tasklet context but need to disable irqs when
+ * transfering tsq->head because tcp_wfree() might
+ * interrupt us (non NAPI drivers)
+ */
+static void tcp_tasklet_func(unsigned long data)
+{
+ struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
+ LIST_HEAD(list);
+ unsigned long flags;
+ struct list_head *q, *n;
+ struct tcp_sock *tp;
+ struct sock *sk;
+
+ local_irq_save(flags);
+ list_splice_init(&tsq->head, &list);
+ local_irq_restore(flags);
+
+ list_for_each_safe(q, n, &list) {
+ tp = list_entry(q, struct tcp_sock, tsq_node);
+ list_del(&tp->tsq_node);
+
+ sk = (struct sock *)tp;
+ bh_lock_sock(sk);
+
+ if (!sock_owned_by_user(sk)) {
+ tcp_tsq_handler(sk);
+ } else {
+ /* defer the work to tcp_release_cb() */
+ set_bit(TCP_TSQ_DEFERRED, &tp->tsq_flags);
+ }
+ bh_unlock_sock(sk);
+
+ clear_bit(TSQ_QUEUED, &tp->tsq_flags);
+ sk_free(sk);
+ }
+}
+
+#define TCP_DEFERRED_ALL ((1UL << TCP_TSQ_DEFERRED) | \
+ (1UL << TCP_WRITE_TIMER_DEFERRED) | \
+ (1UL << TCP_DELACK_TIMER_DEFERRED) | \
+ (1UL << TCP_MTU_REDUCED_DEFERRED))
+/**
+ * tcp_release_cb - tcp release_sock() callback
+ * @sk: socket
+ *
+ * called from release_sock() to perform protocol dependent
+ * actions before socket release.
+ */
+void tcp_release_cb(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long flags, nflags;
+
+ /* perform an atomic operation only if at least one flag is set */
+ do {
+ flags = tp->tsq_flags;
+ if (!(flags & TCP_DEFERRED_ALL))
+ return;
+ nflags = flags & ~TCP_DEFERRED_ALL;
+ } while (cmpxchg(&tp->tsq_flags, flags, nflags) != flags);
+
+ if (flags & (1UL << TCP_TSQ_DEFERRED))
+ tcp_tsq_handler(sk);
+
+ if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED))
+ tcp_write_timer_handler(sk);
+
+ if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED))
+ tcp_delack_timer_handler(sk);
+
+ if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED))
+ sk->sk_prot->mtu_reduced(sk);
+}
+EXPORT_SYMBOL(tcp_release_cb);
+
+void __init tcp_tasklet_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
+
+ INIT_LIST_HEAD(&tsq->head);
+ tasklet_init(&tsq->tasklet,
+ tcp_tasklet_func,
+ (unsigned long)tsq);
+ }
+}
+
+/*
+ * Write buffer destructor automatically called from kfree_skb.
+ * We cant xmit new skbs from this context, as we might already
+ * hold qdisc lock.
+ */
+void tcp_wfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (test_and_clear_bit(TSQ_THROTTLED, &tp->tsq_flags) &&
+ !test_and_set_bit(TSQ_QUEUED, &tp->tsq_flags)) {
+ unsigned long flags;
+ struct tsq_tasklet *tsq;
+
+ /* Keep a ref on socket.
+ * This last ref will be released in tcp_tasklet_func()
+ */
+ atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc);
+
+ /* queue this socket to tasklet queue */
+ local_irq_save(flags);
+ tsq = &__get_cpu_var(tsq_tasklet);
+ list_add(&tp->tsq_node, &tsq->head);
+ tasklet_schedule(&tsq->tasklet);
+ local_irq_restore(flags);
+ } else {
+ sock_wfree(skb);
+ }
+}
+
/* This routine actually transmits TCP packets queued in by
* tcp_do_sendmsg(). This is used by both the initial
* transmission and possible later retransmissions.
@@ -844,7 +1027,12 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
- skb_set_owner_w(skb, sk);
+
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ?
+ tcp_wfree : sock_wfree;
+ atomic_add(skb->truesize, &sk->sk_wmem_alloc);
/* Build TCP header and checksum it. */
th = tcp_hdr(skb);
@@ -1780,6 +1968,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
+
tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
BUG_ON(!tso_segs);
@@ -1800,6 +1989,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
break;
}
+ /* TSQ : sk_wmem_alloc accounts skb truesize,
+ * including skb overhead. But thats OK.
+ */
+ if (atomic_read(&sk->sk_wmem_alloc) >= sysctl_tcp_limit_output_bytes) {
+ set_bit(TSQ_THROTTLED, &tp->tsq_flags);
+ break;
+ }
limit = mss_now;
if (tso_segs > 1 && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
@@ -2442,7 +2638,16 @@ int tcp_send_synack(struct sock *sk)
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
-/* Prepare a SYN-ACK. */
+/**
+ * tcp_make_synack - Prepare a SYN-ACK.
+ * sk: listener socket
+ * dst: dst entry attached to the SYNACK
+ * req: request_sock pointer
+ * rvp: request_values pointer
+ *
+ * Allocate one skb and build a SYNACK packet.
+ * @dst is consumed : Caller should not use it again.
+ */
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct request_values *rvp)
@@ -2461,14 +2666,15 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
s_data_desired = cvp->s_data_desired;
- skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
- if (skb == NULL)
+ skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired, GFP_ATOMIC);
+ if (unlikely(!skb)) {
+ dst_release(dst);
return NULL;
-
+ }
/* Reserve space for headers. */
skb_reserve(skb, MAX_TCP_HEADER);
- skb_dst_set(skb, dst_clone(dst));
+ skb_dst_set(skb, dst);
mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
@@ -2645,6 +2851,109 @@ void tcp_connect_init(struct sock *sk)
tcp_clear_retrans(tp);
}
+static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+
+ tcb->end_seq += skb->len;
+ skb_header_release(skb);
+ __tcp_add_write_queue_tail(sk, skb);
+ sk->sk_wmem_queued += skb->truesize;
+ sk_mem_charge(sk, skb->truesize);
+ tp->write_seq = tcb->end_seq;
+ tp->packets_out += tcp_skb_pcount(skb);
+}
+
+/* Build and send a SYN with data and (cached) Fast Open cookie. However,
+ * queue a data-only packet after the regular SYN, such that regular SYNs
+ * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
+ * only the SYN sequence, the data are retransmitted in the first ACK.
+ * If cookie is not cached or other error occurs, falls back to send a
+ * regular SYN with Fast Open cookie request option.
+ */
+static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_fastopen_request *fo = tp->fastopen_req;
+ int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen;
+ struct sk_buff *syn_data = NULL, *data;
+ unsigned long last_syn_loss = 0;
+
+ tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
+ tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
+ &syn_loss, &last_syn_loss);
+ /* Recurring FO SYN losses: revert to regular handshake temporarily */
+ if (syn_loss > 1 &&
+ time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
+ fo->cookie.len = -1;
+ goto fallback;
+ }
+
+ if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
+ fo->cookie.len = -1;
+ else if (fo->cookie.len <= 0)
+ goto fallback;
+
+ /* MSS for SYN-data is based on cached MSS and bounded by PMTU and
+ * user-MSS. Reserve maximum option space for middleboxes that add
+ * private TCP options. The cost is reduced data space in SYN :(
+ */
+ if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
+ tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
+ space = tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
+ MAX_TCP_OPTION_SPACE;
+
+ syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+ sk->sk_allocation);
+ if (syn_data == NULL)
+ goto fallback;
+
+ for (i = 0; i < iovlen && syn_data->len < space; ++i) {
+ struct iovec *iov = &fo->data->msg_iov[i];
+ unsigned char __user *from = iov->iov_base;
+ int len = iov->iov_len;
+
+ if (syn_data->len + len > space)
+ len = space - syn_data->len;
+ else if (i + 1 == iovlen)
+ /* No more data pending in inet_wait_for_connect() */
+ fo->data = NULL;
+
+ if (skb_add_data(syn_data, from, len))
+ goto fallback;
+ }
+
+ /* Queue a data-only packet after the regular SYN for retransmission */
+ data = pskb_copy(syn_data, sk->sk_allocation);
+ if (data == NULL)
+ goto fallback;
+ TCP_SKB_CB(data)->seq++;
+ TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN;
+ TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH);
+ tcp_connect_queue_skb(sk, data);
+ fo->copied = data->len;
+
+ if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
+ tp->syn_data = (fo->copied > 0);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+ goto done;
+ }
+ syn_data = NULL;
+
+fallback:
+ /* Send a regular SYN with Fast Open cookie request option */
+ if (fo->cookie.len > 0)
+ fo->cookie.len = 0;
+ err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
+ if (err)
+ tp->syn_fastopen = 0;
+ kfree_skb(syn_data);
+done:
+ fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */
+ return err;
+}
+
/* Build a SYN and send it off. */
int tcp_connect(struct sock *sk)
{
@@ -2662,17 +2971,13 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER);
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
+ tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp;
+ tcp_connect_queue_skb(sk, buff);
TCP_ECN_send_syn(sk, buff);
- /* Send it off. */
- TCP_SKB_CB(buff)->when = tcp_time_stamp;
- tp->retrans_stamp = TCP_SKB_CB(buff)->when;
- skb_header_release(buff);
- __tcp_add_write_queue_tail(sk, buff);
- sk->sk_wmem_queued += buff->truesize;
- sk_mem_charge(sk, buff->truesize);
- tp->packets_out += tcp_skb_pcount(buff);
- err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+ /* Send off SYN; include data in Fast Open. */
+ err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
+ tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
if (err == -ECONNREFUSED)
return err;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index e911e6c523ec..6df36ad55a38 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
-static void tcp_write_timer(unsigned long);
-static void tcp_delack_timer(unsigned long);
-static void tcp_keepalive_timer (unsigned long data);
-
-void tcp_init_xmit_timers(struct sock *sk)
-{
- inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
- &tcp_keepalive_timer);
-}
-EXPORT_SYMBOL(tcp_init_xmit_timers);
-
static void tcp_write_err(struct sock *sk)
{
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
@@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
return 0;
}
-static void tcp_delack_timer(unsigned long data)
+void tcp_delack_timer_handler(struct sock *sk)
{
- struct sock *sk = (struct sock *)data;
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
- bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- /* Try again later. */
- icsk->icsk_ack.blocked = 1;
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
- sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
- goto out_unlock;
- }
-
sk_mem_reclaim_partial(sk);
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
@@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
out:
if (sk_under_memory_pressure(sk))
sk_mem_reclaim(sk);
-out_unlock:
+}
+
+static void tcp_delack_timer(unsigned long data)
+{
+ struct sock *sk = (struct sock *)data;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ tcp_delack_timer_handler(sk);
+ } else {
+ inet_csk(sk)->icsk_ack.blocked = 1;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
+ /* deleguate our work to tcp_release_cb() */
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ }
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -450,19 +443,11 @@ out_reset_timer:
out:;
}
-static void tcp_write_timer(unsigned long data)
+void tcp_write_timer_handler(struct sock *sk)
{
- struct sock *sk = (struct sock *)data;
struct inet_connection_sock *icsk = inet_csk(sk);
int event;
- bh_lock_sock(sk);
- if (sock_owned_by_user(sk)) {
- /* Try again later */
- sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
- goto out_unlock;
- }
-
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
goto out;
@@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
out:
sk_mem_reclaim(sk);
-out_unlock:
+}
+
+static void tcp_write_timer(unsigned long data)
+{
+ struct sock *sk = (struct sock *)data;
+
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ tcp_write_timer_handler(sk);
+ } else {
+ /* deleguate our work to tcp_release_cb() */
+ set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
+ }
bh_unlock_sock(sk);
sock_put(sk);
}
@@ -602,3 +599,10 @@ out:
bh_unlock_sock(sk);
sock_put(sk);
}
+
+void tcp_init_xmit_timers(struct sock *sk)
+{
+ inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
+ &tcp_keepalive_timer);
+}
+EXPORT_SYMBOL(tcp_init_xmit_timers);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eaca73644e79..b4c3582a991f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -108,6 +108,7 @@
#include <net/xfrm.h>
#include <trace/events/udp.h>
#include <linux/static_key.h>
+#include <trace/events/skb.h>
#include "udp_impl.h"
struct udp_table udp_table __read_mostly;
@@ -615,6 +616,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
break;
case ICMP_DEST_UNREACH:
if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
+ ipv4_sk_update_pmtu(skb, sk, info);
if (inet->pmtudisc != IP_PMTUDISC_DONT) {
err = EMSGSIZE;
harderr = 1;
@@ -628,6 +630,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
err = icmp_err_convert[code].errno;
}
break;
+ case ICMP_REDIRECT:
+ ipv4_sk_redirect(skb, sk);
+ break;
}
/*
@@ -1219,8 +1224,10 @@ try_again:
goto csum_copy_err;
}
- if (err)
+ if (unlikely(err)) {
+ trace_kfree_skb(skb, udp_recvmsg);
goto out_free;
+ }
if (!peeked)
UDP_INC_STATS_USER(sock_net(sk),
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index a7f86a3cd502..16d0960062be 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -34,15 +34,16 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
int err = -EINVAL;
struct sock *sk;
struct sk_buff *rep;
+ struct net *net = sock_net(in_skb->sk);
if (req->sdiag_family == AF_INET)
- sk = __udp4_lib_lookup(&init_net,
+ sk = __udp4_lib_lookup(net,
req->id.idiag_src[0], req->id.idiag_sport,
req->id.idiag_dst[0], req->id.idiag_dport,
req->id.idiag_if, tbl);
#if IS_ENABLED(CONFIG_IPV6)
else if (req->sdiag_family == AF_INET6)
- sk = __udp6_lib_lookup(&init_net,
+ sk = __udp6_lib_lookup(net,
(struct in6_addr *)req->id.idiag_src,
req->id.idiag_sport,
(struct in6_addr *)req->id.idiag_dst,
@@ -75,7 +76,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
kfree_skb(rep);
goto out;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -90,6 +91,7 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
struct inet_diag_req_v2 *r, struct nlattr *bc)
{
int num, s_num, slot, s_slot;
+ struct net *net = sock_net(skb->sk);
s_slot = cb->args[0];
num = s_num = cb->args[1];
@@ -106,6 +108,8 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
sk_nulls_for_each(sk, node, &hslot->head) {
struct inet_sock *inet = inet_sk(sk);
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next;
if (!(r->idiag_states & (1 << sk->sk_state)))
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 06814b6216dc..58d23a572509 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -27,8 +27,8 @@ static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb)
if (skb_dst(skb) == NULL) {
const struct iphdr *iph = ip_hdr(skb);
- if (ip_route_input_noref(skb, iph->daddr, iph->saddr,
- iph->tos, skb->dev))
+ if (ip_route_input(skb, iph->daddr, iph->saddr,
+ iph->tos, skb->dev))
goto drop;
}
return dst_input(skb);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index ed4bf11ef9f4..ddee0a099a2c 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -15,6 +15,65 @@
#include <net/ip.h>
#include <net/xfrm.h>
+/* Informational hook. The decap is still done here. */
+static struct xfrm_tunnel __rcu *rcv_notify_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
+
+int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler)
+{
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+ int ret = -EEXIST;
+ int priority = handler->priority;
+
+ mutex_lock(&xfrm4_mode_tunnel_input_mutex);
+
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority > priority)
+ break;
+ if (t->priority == priority)
+ goto err;
+
+ }
+
+ handler->next = *pprev;
+ rcu_assign_pointer(*pprev, handler);
+
+ ret = 0;
+
+err:
+ mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
+
+int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler)
+{
+ struct xfrm_tunnel __rcu **pprev;
+ struct xfrm_tunnel *t;
+ int ret = -ENOENT;
+
+ mutex_lock(&xfrm4_mode_tunnel_input_mutex);
+ for (pprev = &rcv_notify_handlers;
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
+ *pprev = handler->next;
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_deregister);
+
static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
{
struct iphdr *inner_iph = ipip_hdr(skb);
@@ -64,8 +123,14 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
+#define for_each_input_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next))
+
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
+ struct xfrm_tunnel *handler;
int err = -EINVAL;
if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -74,6 +139,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
+ for_each_input_rcu(rcv_notify_handlers, handler)
+ handler->handler(skb);
+
if (skb_cloned(skb) &&
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
goto out;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0d3426cb5c4f..c6281847f16a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -79,30 +79,19 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
- xdst->u.rt.rt_key_dst = fl4->daddr;
- xdst->u.rt.rt_key_src = fl4->saddr;
- xdst->u.rt.rt_key_tos = fl4->flowi4_tos;
- xdst->u.rt.rt_route_iif = fl4->flowi4_iif;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
- xdst->u.rt.rt_oif = fl4->flowi4_oif;
- xdst->u.rt.rt_mark = fl4->flowi4_mark;
xdst->u.dst.dev = dev;
dev_hold(dev);
- xdst->u.rt.peer = rt->peer;
- if (rt->peer)
- atomic_inc(&rt->peer->refcnt);
-
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
+ xdst->u.rt.rt_is_input = rt->rt_is_input;
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
- xdst->u.rt.rt_src = rt->rt_src;
- xdst->u.rt.rt_dst = rt->rt_dst;
xdst->u.rt.rt_gateway = rt->rt_gateway;
- xdst->u.rt.rt_spec_dst = rt->rt_spec_dst;
+ xdst->u.rt.rt_pmtu = rt->rt_pmtu;
return 0;
}
@@ -198,12 +187,22 @@ static inline int xfrm4_garbage_collect(struct dst_ops *ops)
return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
}
-static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct dst_entry *path = xdst->route;
+
+ path->ops->update_pmtu(path, sk, skb, mtu);
+}
+
+static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, mtu);
+ path->ops->redirect(path, sk, skb);
}
static void xfrm4_dst_destroy(struct dst_entry *dst)
@@ -212,9 +211,6 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
dst_destroy_metrics_generic(dst);
- if (likely(xdst->u.rt.peer))
- inet_putpeer(xdst->u.rt.peer);
-
xfrm_dst_destroy(xdst);
}
@@ -232,6 +228,7 @@ static struct dst_ops xfrm4_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IP),
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
+ .redirect = xfrm4_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8f6411c97189..79181819a24f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -63,6 +63,7 @@
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/string.h>
+#include <linux/hash.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -579,15 +580,9 @@ ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
list_add_tail(&ifp->if_list, p);
}
-static u32 ipv6_addr_hash(const struct in6_addr *addr)
+static u32 inet6_addr_hash(const struct in6_addr *addr)
{
- /*
- * We perform the hash function over the last 64 bits of the address
- * This will include the IEEE address token on links that support it.
- */
- return jhash_2words((__force u32)addr->s6_addr32[2],
- (__force u32)addr->s6_addr32[3], 0)
- & (IN6_ADDR_HSIZE - 1);
+ return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
}
/* On success it returns ifp with increased reference count */
@@ -662,7 +657,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
in6_ifa_hold(ifa);
/* Add to big hash table */
- hash = ipv6_addr_hash(addr);
+ hash = inet6_addr_hash(addr);
hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
spin_unlock(&addrconf_hash_lock);
@@ -1270,7 +1265,7 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
{
struct inet6_ifaddr *ifp;
struct hlist_node *node;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
@@ -1293,7 +1288,7 @@ EXPORT_SYMBOL(ipv6_chk_addr);
static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
struct net_device *dev)
{
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
struct inet6_ifaddr *ifp;
struct hlist_node *node;
@@ -1336,7 +1331,7 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
struct net_device *dev, int strict)
{
struct inet6_ifaddr *ifp, *result = NULL;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
struct hlist_node *node;
rcu_read_lock_bh();
@@ -3223,7 +3218,7 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
int ret = 0;
struct inet6_ifaddr *ifp = NULL;
struct hlist_node *n;
- unsigned int hash = ipv6_addr_hash(addr);
+ unsigned int hash = inet6_addr_hash(addr);
rcu_read_lock_bh();
hlist_for_each_entry_rcu_bh(ifp, n, &inet6_addr_lst[hash], addr_lst) {
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index f1a4a2c28ed3..7e6139508ee7 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -35,6 +35,7 @@
#include <linux/pfkeyv2.h>
#include <linux/string.h>
#include <linux/scatterlist.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -612,16 +613,18 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG)
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET6);
if (!x)
return;
- NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/%pI6\n",
- ntohl(ah->spi), &iph->daddr);
-
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index db1521fcda5b..6dc7fd353ef5 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -433,15 +434,19 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
struct xfrm_state *x;
if (type != ICMPV6_DEST_UNREACH &&
- type != ICMPV6_PKT_TOOBIG)
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET6);
if (!x)
return;
- pr_debug("pmtu discovery on SA ESP/%08x/%pI6\n",
- ntohl(esph->spi), &iph->daddr);
+
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 6447dc49429f..fa3d9c328092 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -791,14 +791,14 @@ static int ipv6_renew_option(void *ohdr,
if (ohdr) {
memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
*hdr = (struct ipv6_opt_hdr *)*p;
- *p += CMSG_ALIGN(ipv6_optlen(*(struct ipv6_opt_hdr **)hdr));
+ *p += CMSG_ALIGN(ipv6_optlen(*hdr));
}
} else {
if (newopt) {
if (copy_from_user(*p, newopt, newoptlen))
return -EFAULT;
*hdr = (struct ipv6_opt_hdr *)*p;
- if (ipv6_optlen(*(struct ipv6_opt_hdr **)hdr) > newoptlen)
+ if (ipv6_optlen(*hdr) > newoptlen)
return -EINVAL;
*p += CMSG_ALIGN(newoptlen);
}
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 091a2971c7b7..24d69dbca4d6 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -188,14 +188,16 @@ static inline bool icmpv6_xrlim_allow(struct sock *sk, u8 type,
} else {
struct rt6_info *rt = (struct rt6_info *)dst;
int tmo = net->ipv6.sysctl.icmpv6_time;
+ struct inet_peer *peer;
/* Give more bandwidth to wider prefixes. */
if (rt->rt6i_dst.plen < 128)
tmo >>= ((128 - rt->rt6i_dst.plen)>>5);
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- res = inet_peer_xrlim_allow(rt->rt6i_peer, tmo);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ res = inet_peer_xrlim_allow(peer, tmo);
+ if (peer)
+ inet_putpeer(peer);
}
dst_release(dst);
return res;
@@ -596,13 +598,12 @@ out:
icmpv6_xmit_unlock(sk);
}
-static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
+void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
const struct inet6_protocol *ipprot;
int inner_offset;
- int hash;
- u8 nexthdr;
__be16 frag_off;
+ u8 nexthdr;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
return;
@@ -629,10 +630,8 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
--ANK (980726)
*/
- hash = nexthdr & (MAX_INET_PROTOS - 1);
-
rcu_read_lock();
- ipprot = rcu_dereference(inet6_protos[hash]);
+ ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot && ipprot->err_handler)
ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
rcu_read_unlock();
@@ -649,7 +648,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
struct net_device *dev = skb->dev;
struct inet6_dev *idev = __in6_dev_get(dev);
const struct in6_addr *saddr, *daddr;
- const struct ipv6hdr *orig_hdr;
struct icmp6hdr *hdr;
u8 type;
@@ -661,7 +659,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
XFRM_STATE_ICMP))
goto drop_no_count;
- if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(*orig_hdr)))
+ if (!pskb_may_pull(skb, sizeof(*hdr) + sizeof(struct ipv6hdr)))
goto drop_no_count;
nh = skb_network_offset(skb);
@@ -722,9 +720,6 @@ static int icmpv6_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto discard_it;
hdr = icmp6_hdr(skb);
- orig_hdr = (struct ipv6hdr *) (hdr + 1);
- rt6_pmtu_discovery(&orig_hdr->daddr, &orig_hdr->saddr, dev,
- ntohl(hdr->icmp6_mtu));
/*
* Drop through to notify
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e6cee5292a0b..0251a6005be8 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -55,26 +55,26 @@ int inet6_csk_bind_conflict(const struct sock *sk,
EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
struct dst_entry *inet6_csk_route_req(struct sock *sk,
+ struct flowi6 *fl6,
const struct request_sock *req)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *final_p, final;
struct dst_entry *dst;
- struct flowi6 fl6;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = treq->rmt_addr;
- final_p = fl6_update_dst(&fl6, np->opt, &final);
- fl6.saddr = treq->loc_addr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = IPPROTO_TCP;
+ fl6->daddr = treq->rmt_addr;
+ final_p = fl6_update_dst(fl6, np->opt, &final);
+ fl6->saddr = treq->loc_addr;
+ fl6->flowi6_oif = treq->iif;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_dport = inet_rsk(req)->rmt_port;
+ fl6->fl6_sport = inet_rsk(req)->loc_port;
+ security_req_classify_flow(req, flowi6_to_flowi(fl6));
+
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
if (IS_ERR(dst))
return NULL;
@@ -171,7 +171,8 @@ EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
static inline
void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ const struct in6_addr *daddr,
+ const struct in6_addr *saddr)
{
__ip6_dst_store(sk, dst, daddr, saddr);
@@ -203,43 +204,52 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
-int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
+static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
+ struct flowi6 *fl6)
{
- struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
- struct flowi6 fl6;
- struct dst_entry *dst;
struct in6_addr *final_p, final;
- int res;
+ struct dst_entry *dst;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = sk->sk_protocol;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowlabel = np->flow_label;
- IP6_ECN_flow_xmit(sk, fl6.flowlabel);
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_sport = inet->inet_sport;
- fl6.fl6_dport = inet->inet_dport;
- security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_proto = sk->sk_protocol;
+ fl6->daddr = np->daddr;
+ fl6->saddr = np->saddr;
+ fl6->flowlabel = np->flow_label;
+ IP6_ECN_flow_xmit(sk, fl6->flowlabel);
+ fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->flowi6_mark = sk->sk_mark;
+ fl6->fl6_sport = inet->inet_sport;
+ fl6->fl6_dport = inet->inet_dport;
+ security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
- final_p = fl6_update_dst(&fl6, np->opt, &final);
+ final_p = fl6_update_dst(fl6, np->opt, &final);
dst = __inet6_csk_dst_check(sk, np->dst_cookie);
+ if (!dst) {
+ dst = ip6_dst_lookup_flow(sk, fl6, final_p, false);
- if (dst == NULL) {
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
+ if (!IS_ERR(dst))
+ __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ }
+ return dst;
+}
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- sk->sk_route_caps = 0;
- kfree_skb(skb);
- return PTR_ERR(dst);
- }
+int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
+{
+ struct sock *sk = skb->sk;
+ struct ipv6_pinfo *np = inet6_sk(sk);
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ int res;
- __inet6_csk_dst_store(sk, dst, NULL, NULL);
+ dst = inet6_csk_route_socket(sk, &fl6);
+ if (IS_ERR(dst)) {
+ sk->sk_err_soft = -PTR_ERR(dst);
+ sk->sk_route_caps = 0;
+ kfree_skb(skb);
+ return PTR_ERR(dst);
}
rcu_read_lock();
@@ -253,3 +263,16 @@ int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
return res;
}
EXPORT_SYMBOL_GPL(inet6_csk_xmit);
+
+struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
+{
+ struct flowi6 fl6;
+ struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
+
+ if (IS_ERR(dst))
+ return NULL;
+ dst->ops->update_pmtu(dst, sk, NULL, mtu);
+
+ return inet6_csk_route_socket(sk, &fl6);
+}
+EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 608327661960..13690d650c3e 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -197,6 +197,7 @@ static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
table->tb6_id = id;
table->tb6_root.leaf = net->ipv6.ip6_null_entry;
table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&table->tb6_peers);
}
return table;
@@ -1633,6 +1634,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
@@ -1643,6 +1645,7 @@ static int __net_init fib6_net_init(struct net *net)
net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
+ inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
#endif
fib6_tables_init(net);
@@ -1666,8 +1669,10 @@ static void fib6_net_exit(struct net *net)
del_timer_sync(&net->ipv6.ip6_fib_timer);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
kfree(net->ipv6.fib6_local_tbl);
#endif
+ inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
kfree(net->ipv6.fib6_main_tbl);
kfree(net->ipv6.fib_table_hash);
kfree(net->ipv6.rt6_stats);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 21a15dfe4a9e..5ab923e51af3 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -168,13 +168,12 @@ drop:
static int ip6_input_finish(struct sk_buff *skb)
{
+ struct net *net = dev_net(skb_dst(skb)->dev);
const struct inet6_protocol *ipprot;
+ struct inet6_dev *idev;
unsigned int nhoff;
int nexthdr;
bool raw;
- u8 hash;
- struct inet6_dev *idev;
- struct net *net = dev_net(skb_dst(skb)->dev);
/*
* Parse extension headers
@@ -189,9 +188,7 @@ resubmit:
nexthdr = skb_network_header(skb)[nhoff];
raw = raw6_local_deliver(skb, nexthdr);
-
- hash = nexthdr & (MAX_INET_PROTOS - 1);
- if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) {
+ if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) {
int ret;
if (ipprot->flags & INET6_PROTO_FINAL) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index decc21d19c53..5b2d63ed793e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -83,24 +83,12 @@ int ip6_local_out(struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ip6_local_out);
-/* dev_loopback_xmit for use with netfilter. */
-static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
-{
- skb_reset_mac_header(newskb);
- __skb_pull(newskb, skb_network_offset(newskb));
- newskb->pkt_type = PACKET_LOOPBACK;
- newskb->ip_summed = CHECKSUM_UNNECESSARY;
- WARN_ON(!skb_dst(newskb));
-
- netif_rx_ni(newskb);
- return 0;
-}
-
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
+ struct rt6_info *rt;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -121,7 +109,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
newskb, NULL, newskb->dev,
- ip6_dev_loopback_xmit);
+ dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(dev_net(dev), idev,
@@ -136,9 +124,10 @@ static int ip6_finish_output2(struct sk_buff *skb)
}
rcu_read_lock();
- neigh = dst_get_neighbour_noref(dst);
+ rt = (struct rt6_info *) dst;
+ neigh = rt->n;
if (neigh) {
- int res = neigh_output(neigh, skb);
+ int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock();
return res;
@@ -463,6 +452,7 @@ int ip6_forward(struct sk_buff *skb)
*/
if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
+ struct inet_peer *peer;
struct rt6_info *rt;
/*
@@ -476,14 +466,15 @@ int ip6_forward(struct sk_buff *skb)
else
target = &hdr->daddr;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
- if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
+ if (peer)
+ inet_putpeer(peer);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
@@ -604,12 +595,13 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
if (rt && !(rt->dst.flags & DST_NOPEER)) {
struct inet_peer *peer;
+ struct net *net;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- peer = rt->rt6i_peer;
+ net = dev_net(rt->dst.dev);
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
if (peer) {
fhdr->identification = htonl(inet_getid(peer, 0));
+ inet_putpeer(peer);
return;
}
}
@@ -960,6 +952,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
struct net *net = sock_net(sk);
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
struct neighbour *n;
+ struct rt6_info *rt;
#endif
int err;
@@ -988,7 +981,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
* dst entry of the nexthop router
*/
rcu_read_lock();
- n = dst_get_neighbour_noref(*dst);
+ rt = (struct rt6_info *) *dst;
+ n = rt->n;
if (n && !(n->nud_state & NUD_VALID)) {
struct inet6_ifaddr *ifp;
struct flowi6 fl_gw6;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c9015fad8d65..9a1d5fe6aef8 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -40,6 +40,7 @@
#include <linux/rtnetlink.h>
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
+#include <linux/hash.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
@@ -70,11 +71,15 @@ MODULE_ALIAS_NETDEV("ip6tnl0");
#define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK)
#define IPV6_TCLASS_SHIFT 20
-#define HASH_SIZE 32
+#define HASH_SIZE_SHIFT 5
+#define HASH_SIZE (1 << HASH_SIZE_SHIFT)
-#define HASH(addr) ((__force u32)((addr)->s6_addr32[0] ^ (addr)->s6_addr32[1] ^ \
- (addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
- (HASH_SIZE - 1))
+static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+{
+ u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+
+ return hash_32(hash, HASH_SIZE_SHIFT);
+}
static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
@@ -166,12 +171,11 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
{
- unsigned int h0 = HASH(remote);
- unsigned int h1 = HASH(local);
+ unsigned int hash = HASH(remote, local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[h0 ^ h1]) {
+ for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr) &&
(t->dev->flags & IFF_UP))
@@ -205,7 +209,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
prio = 1;
- h = HASH(remote) ^ HASH(local);
+ h = HASH(remote, local);
}
return &ip6n->tnls[prio][h];
}
@@ -252,7 +256,7 @@ static void ip6_dev_free(struct net_device *dev)
}
/**
- * ip6_tnl_create() - create a new tunnel
+ * ip6_tnl_create - create a new tunnel
* @p: tunnel parameters
* @pt: pointer to new tunnel
*
@@ -550,6 +554,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
rel_type = ICMP_DEST_UNREACH;
rel_code = ICMP_FRAG_NEEDED;
break;
+ case NDISC_REDIRECT:
+ rel_type = ICMP_REDIRECT;
+ rel_code = ICMP_REDIR_HOST;
default:
return 0;
}
@@ -606,8 +613,10 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (rel_info > dst_mtu(skb_dst(skb2)))
goto out;
- skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), rel_info);
+ skb_dst(skb2)->ops->update_pmtu(skb_dst(skb2), NULL, skb2, rel_info);
}
+ if (rel_type == ICMP_REDIRECT)
+ skb_dst(skb2)->ops->redirect(skb_dst(skb2), NULL, skb2);
icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
@@ -684,24 +693,50 @@ static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
IP6_ECN_set_ce(ipv6_hdr(skb));
}
+static __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr)
+{
+ struct ip6_tnl_parm *p = &t->parms;
+ int ltype = ipv6_addr_type(laddr);
+ int rtype = ipv6_addr_type(raddr);
+ __u32 flags = 0;
+
+ if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
+ flags = IP6_TNL_F_CAP_PER_PACKET;
+ } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+ rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
+ !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
+ (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
+ if (ltype&IPV6_ADDR_UNICAST)
+ flags |= IP6_TNL_F_CAP_XMIT;
+ if (rtype&IPV6_ADDR_UNICAST)
+ flags |= IP6_TNL_F_CAP_RCV;
+ }
+ return flags;
+}
+
/* called with rcu_read_lock() */
-static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
+static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
+ const struct in6_addr *laddr,
+ const struct in6_addr *raddr)
{
struct ip6_tnl_parm *p = &t->parms;
int ret = 0;
struct net *net = dev_net(t->dev);
- if (p->flags & IP6_TNL_F_CAP_RCV) {
+ if ((p->flags & IP6_TNL_F_CAP_RCV) ||
+ ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
+ (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
struct net_device *ldev = NULL;
if (p->link)
ldev = dev_get_by_index_rcu(net, p->link);
- if ((ipv6_addr_is_multicast(&p->laddr) ||
- likely(ipv6_chk_addr(net, &p->laddr, ldev, 0))) &&
- likely(!ipv6_chk_addr(net, &p->raddr, NULL, 0)))
+ if ((ipv6_addr_is_multicast(laddr) ||
+ likely(ipv6_chk_addr(net, laddr, ldev, 0))) &&
+ likely(!ipv6_chk_addr(net, raddr, NULL, 0)))
ret = 1;
-
}
return ret;
}
@@ -740,7 +775,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
goto discard;
}
- if (!ip6_tnl_rcv_ctl(t)) {
+ if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr)) {
t->dev->stats.rx_dropped++;
rcu_read_unlock();
goto discard;
@@ -921,7 +956,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
*pmtu = mtu;
err = -EMSGSIZE;
@@ -1114,25 +1149,6 @@ tx_err:
return NETDEV_TX_OK;
}
-static void ip6_tnl_set_cap(struct ip6_tnl *t)
-{
- struct ip6_tnl_parm *p = &t->parms;
- int ltype = ipv6_addr_type(&p->laddr);
- int rtype = ipv6_addr_type(&p->raddr);
-
- p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV);
-
- if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
- rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
- !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
- (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
- if (ltype&IPV6_ADDR_UNICAST)
- p->flags |= IP6_TNL_F_CAP_XMIT;
- if (rtype&IPV6_ADDR_UNICAST)
- p->flags |= IP6_TNL_F_CAP_RCV;
- }
-}
-
static void ip6_tnl_link_config(struct ip6_tnl *t)
{
struct net_device *dev = t->dev;
@@ -1153,7 +1169,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
- ip6_tnl_set_cap(t);
+ p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
+ p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
dev->flags |= IFF_POINTOPOINT;
@@ -1438,6 +1455,9 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
+
+ ip6_tnl_link_config(t);
+
rcu_assign_pointer(ip6n->tnls_wc[0], t);
return 0;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 461e47c8e956..4532973f0dd4 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2104,8 +2104,9 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
if (c->mf6c_parent >= MAXMIFS)
return -ENOENT;
- if (MIF_EXISTS(mrt, c->mf6c_parent))
- RTA_PUT(skb, RTA_IIF, 4, &mrt->vif6_table[c->mf6c_parent].dev->ifindex);
+ if (MIF_EXISTS(mrt, c->mf6c_parent) &&
+ nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
+ return -EMSGSIZE;
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 5cb75bfe45b1..7af5aee75d98 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -46,6 +46,7 @@
#include <linux/list.h>
#include <linux/vmalloc.h>
#include <linux/rtnetlink.h>
+#include <net/ip6_route.h>
#include <net/icmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
@@ -63,7 +64,9 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
(struct ip_comp_hdr *)(skb->data + offset);
struct xfrm_state *x;
- if (type != ICMPV6_DEST_UNREACH && type != ICMPV6_PKT_TOOBIG)
+ if (type != ICMPV6_DEST_UNREACH &&
+ type != ICMPV6_PKT_TOOBIG &&
+ type != NDISC_REDIRECT)
return;
spi = htonl(ntohs(ipcomph->cpi));
@@ -72,8 +75,10 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (!x)
return;
- pr_debug("pmtu discovery on SA IPCOMP/%08x/%pI6\n",
- spi, &iph->daddr);
+ if (type == NDISC_REDIRECT)
+ ip6_redirect(skb, net, 0, 0);
+ else
+ ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 6d0f5dc8e3a6..92f8e48e4ba4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -211,6 +211,9 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
struct ipv6_mc_socklist __rcu **lnk;
struct net *net = sock_net(sk);
+ if (!ipv6_addr_is_multicast(addr))
+ return -EINVAL;
+
spin_lock(&ipv6_sk_mc_lock);
for (lnk = &np->ipv6_mc_list;
(mc_lst = rcu_dereference_protected(*lnk,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 54f62d3b8dd6..ff36194a71aa 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -143,40 +143,6 @@ struct neigh_table nd_tbl = {
.gc_thresh3 = 1024,
};
-/* ND options */
-struct ndisc_options {
- struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
-#ifdef CONFIG_IPV6_ROUTE_INFO
- struct nd_opt_hdr *nd_opts_ri;
- struct nd_opt_hdr *nd_opts_ri_end;
-#endif
- struct nd_opt_hdr *nd_useropts;
- struct nd_opt_hdr *nd_useropts_end;
-};
-
-#define nd_opts_src_lladdr nd_opt_array[ND_OPT_SOURCE_LL_ADDR]
-#define nd_opts_tgt_lladdr nd_opt_array[ND_OPT_TARGET_LL_ADDR]
-#define nd_opts_pi nd_opt_array[ND_OPT_PREFIX_INFO]
-#define nd_opts_pi_end nd_opt_array[__ND_OPT_PREFIX_INFO_END]
-#define nd_opts_rh nd_opt_array[ND_OPT_REDIRECT_HDR]
-#define nd_opts_mtu nd_opt_array[ND_OPT_MTU]
-
-#define NDISC_OPT_SPACE(len) (((len)+2+7)&~7)
-
-/*
- * Return the padding between the option length and the start of the
- * link addr. Currently only IP-over-InfiniBand needs this, although
- * if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
- * also need a pad of 2.
- */
-static int ndisc_addr_option_pad(unsigned short type)
-{
- switch (type) {
- case ARPHRD_INFINIBAND: return 2;
- default: return 0;
- }
-}
-
static inline int ndisc_opt_addr_space(struct net_device *dev)
{
return NDISC_OPT_SPACE(dev->addr_len + ndisc_addr_option_pad(dev->type));
@@ -233,8 +199,8 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
}
-static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
- struct ndisc_options *ndopts)
+struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
+ struct ndisc_options *ndopts)
{
struct nd_opt_hdr *nd_opt = (struct nd_opt_hdr *)opt;
@@ -297,17 +263,6 @@ static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
return ndopts;
}
-static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
- struct net_device *dev)
-{
- u8 *lladdr = (u8 *)(p + 1);
- int lladdrlen = p->nd_opt_len << 3;
- int prepad = ndisc_addr_option_pad(dev->type);
- if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
- return NULL;
- return lladdr + prepad;
-}
-
int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
{
switch (dev->type) {
@@ -1379,16 +1334,6 @@ out:
static void ndisc_redirect_rcv(struct sk_buff *skb)
{
- struct inet6_dev *in6_dev;
- struct icmp6hdr *icmph;
- const struct in6_addr *dest;
- const struct in6_addr *target; /* new first hop to destination */
- struct neighbour *neigh;
- int on_link = 0;
- struct ndisc_options ndopts;
- int optlen;
- u8 *lladdr = NULL;
-
#ifdef CONFIG_IPV6_NDISC_NODETYPE
switch (skb->ndisc_nodetype) {
case NDISC_NODETYPE_HOST:
@@ -1405,65 +1350,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
return;
}
- optlen = skb->tail - skb->transport_header;
- optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
-
- if (optlen < 0) {
- ND_PRINTK(2, warn, "Redirect: packet too short\n");
- return;
- }
-
- icmph = icmp6_hdr(skb);
- target = (const struct in6_addr *) (icmph + 1);
- dest = target + 1;
-
- if (ipv6_addr_is_multicast(dest)) {
- ND_PRINTK(2, warn,
- "Redirect: destination address is multicast\n");
- return;
- }
-
- if (ipv6_addr_equal(dest, target)) {
- on_link = 1;
- } else if (ipv6_addr_type(target) !=
- (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
- ND_PRINTK(2, warn,
- "Redirect: target address is not link-local unicast\n");
- return;
- }
-
- in6_dev = __in6_dev_get(skb->dev);
- if (!in6_dev)
- return;
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
- return;
-
- /* RFC2461 8.1:
- * The IP source address of the Redirect MUST be the same as the current
- * first-hop router for the specified ICMP Destination Address.
- */
-
- if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
- ND_PRINTK(2, warn, "Redirect: invalid ND options\n");
- return;
- }
- if (ndopts.nd_opts_tgt_lladdr) {
- lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
- skb->dev);
- if (!lladdr) {
- ND_PRINTK(2, warn,
- "Redirect: invalid link-layer address length\n");
- return;
- }
- }
-
- neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
- if (neigh) {
- rt6_redirect(dest, &ipv6_hdr(skb)->daddr,
- &ipv6_hdr(skb)->saddr, neigh, lladdr,
- on_link);
- neigh_release(neigh);
- }
+ icmpv6_notify(skb, NDISC_REDIRECT, 0, 0);
}
void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
@@ -1472,6 +1359,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.ndisc_sk;
int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
+ struct inet_peer *peer;
struct sk_buff *buff;
struct icmp6hdr *icmph;
struct in6_addr saddr_buf;
@@ -1485,6 +1373,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
int rd_len;
int err;
u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
+ bool ret;
if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
@@ -1518,9 +1407,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
"Redirect: destination is not a neighbour\n");
goto release;
}
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
+ ret = inet_peer_xrlim_allow(peer, 1*HZ);
+ if (peer)
+ inet_putpeer(peer);
+ if (!ret)
goto release;
if (dev->addr_len) {
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 3224ef90a21a..4794f96cf2e0 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -143,11 +143,11 @@ static int ipv6_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
return NF_ACCEPT;
}
-static unsigned int ipv6_confirm(unsigned int hooknum,
- struct sk_buff *skb,
- const struct net_device *in,
- const struct net_device *out,
- int (*okfn)(struct sk_buff *))
+static unsigned int ipv6_helper(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
const struct nf_conn_help *help;
@@ -161,15 +161,15 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
- goto out;
+ return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
- goto out;
+ return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
- goto out;
+ return NF_ACCEPT;
protoff = nf_ct_ipv6_skip_exthdr(skb, extoff, &pnum,
skb->len - extoff);
@@ -179,12 +179,19 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
}
ret = helper->help(skb, protoff, ct, ctinfo);
- if (ret != NF_ACCEPT) {
+ if (ret != NF_ACCEPT && (ret & NF_VERDICT_MASK) != NF_QUEUE) {
nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
"nf_ct_%s: dropping packet", helper->name);
- return ret;
}
-out:
+ return ret;
+}
+
+static unsigned int ipv6_confirm(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
@@ -254,6 +261,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
.priority = NF_IP6_PRI_CONNTRACK,
},
{
+ .hook = ipv6_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv6_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
@@ -261,6 +275,13 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
.priority = NF_IP6_PRI_LAST,
},
{
+ .hook = ipv6_helper,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP6_PRI_CONNTRACK_HELPER,
+ },
+ {
.hook = ipv6_confirm,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV6,
@@ -333,37 +354,75 @@ MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
-static int __init nf_conntrack_l3proto_ipv6_init(void)
+static int ipv6_net_init(struct net *net)
{
int ret = 0;
- need_conntrack();
- nf_defrag_ipv6_enable();
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_tcp6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register tcp.\n");
- return ret;
+ printk(KERN_ERR "nf_conntrack_l4proto_tcp6: protocol register failed\n");
+ goto out;
}
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udp6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register udp.\n");
- goto cleanup_tcp;
+ printk(KERN_ERR "nf_conntrack_l4proto_udp6: protocol register failed\n");
+ goto cleanup_tcp6;
}
-
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmpv6);
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_icmpv6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register icmpv6.\n");
- goto cleanup_udp;
+ printk(KERN_ERR "nf_conntrack_l4proto_icmp6: protocol register failed\n");
+ goto cleanup_udp6;
}
-
- ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
+ ret = nf_conntrack_l3proto_register(net,
+ &nf_conntrack_l3proto_ipv6);
if (ret < 0) {
- pr_err("nf_conntrack_ipv6: can't register ipv6\n");
+ printk(KERN_ERR "nf_conntrack_l3proto_ipv6: protocol register failed\n");
goto cleanup_icmpv6;
}
+ return 0;
+ cleanup_icmpv6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmpv6);
+ cleanup_udp6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp6);
+ cleanup_tcp6:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp6);
+ out:
+ return ret;
+}
+static void ipv6_net_exit(struct net *net)
+{
+ nf_conntrack_l3proto_unregister(net,
+ &nf_conntrack_l3proto_ipv6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_icmpv6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_udp6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_tcp6);
+}
+
+static struct pernet_operations ipv6_net_ops = {
+ .init = ipv6_net_init,
+ .exit = ipv6_net_exit,
+};
+
+static int __init nf_conntrack_l3proto_ipv6_init(void)
+{
+ int ret = 0;
+
+ need_conntrack();
+ nf_defrag_ipv6_enable();
+
+ ret = register_pernet_subsys(&ipv6_net_ops);
+ if (ret < 0)
+ goto cleanup_pernet;
ret = nf_register_hooks(ipv6_conntrack_ops,
ARRAY_SIZE(ipv6_conntrack_ops));
if (ret < 0) {
@@ -374,13 +433,8 @@ static int __init nf_conntrack_l3proto_ipv6_init(void)
return ret;
cleanup_ipv6:
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
- cleanup_icmpv6:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
- cleanup_udp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
- cleanup_tcp:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
+ unregister_pernet_subsys(&ipv6_net_ops);
+ cleanup_pernet:
return ret;
}
@@ -388,10 +442,7 @@ static void __exit nf_conntrack_l3proto_ipv6_fini(void)
{
synchronize_net();
nf_unregister_hooks(ipv6_conntrack_ops, ARRAY_SIZE(ipv6_conntrack_ops));
- nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmpv6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp6);
+ unregister_pernet_subsys(&ipv6_net_ops);
}
module_init(nf_conntrack_l3proto_ipv6_init);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 3e81904fbbcd..2d54b2061d68 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -29,6 +29,11 @@
static unsigned int nf_ct_icmpv6_timeout __read_mostly = 30*HZ;
+static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -90,7 +95,7 @@ static int icmpv6_print_tuple(struct seq_file *s,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
- return &nf_ct_icmpv6_timeout;
+ return &icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -281,16 +286,18 @@ static int icmpv6_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_icmp_net *in = icmpv6_pernet(net);
if (tb[CTA_TIMEOUT_ICMPV6_TIMEOUT]) {
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMPV6_TIMEOUT])) * HZ;
} else {
/* Set default ICMPv6 timeout. */
- *timeout = nf_ct_icmpv6_timeout;
+ *timeout = in->timeout;
}
return 0;
}
@@ -315,11 +322,9 @@ icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *icmpv6_sysctl_header;
static struct ctl_table icmpv6_sysctl_table[] = {
{
.procname = "nf_conntrack_icmpv6_timeout",
- .data = &nf_ct_icmpv6_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -328,6 +333,36 @@ static struct ctl_table icmpv6_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_icmp_net *in)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(icmpv6_sysctl_table,
+ sizeof(icmpv6_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &in->timeout;
+#endif
+ return 0;
+}
+
+static int icmpv6_init_net(struct net *net, u_int16_t proto)
+{
+ struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_proto_net *pn = &in->pn;
+
+ in->timeout = nf_ct_icmpv6_timeout;
+
+ return icmpv6_kmemdup_sysctl_table(pn, in);
+}
+
+static struct nf_proto_net *icmpv6_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
{
.l3proto = PF_INET6,
@@ -355,8 +390,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 __read_mostly =
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &icmpv6_sysctl_header,
- .ctl_table = icmpv6_sysctl_table,
-#endif
+ .init_net = icmpv6_init_net,
+ .get_net_proto = icmpv6_get_net_proto,
};
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9a7978fdc02a..053082dfc93e 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -29,9 +29,7 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int hash = protocol & (MAX_INET_PROTOS - 1);
-
- return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet6_add_protocol);
@@ -42,9 +40,9 @@ EXPORT_SYMBOL(inet6_add_protocol);
int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret;
- ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash],
+ ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol],
prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 93d69836fded..ef0579d5bca6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
- hash = nexthdr & (MAX_INET_PROTOS - 1);
+ hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
@@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
- raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]);
+ raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
@@ -328,9 +328,12 @@ static void rawv6_err(struct sock *sk, struct sk_buff *skb,
return;
harderr = icmpv6_err_convert(type, code, &err);
- if (type == ICMPV6_PKT_TOOBIG)
+ if (type == ICMPV6_PKT_TOOBIG) {
+ ip6_sk_update_pmtu(skb, sk, info);
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
-
+ }
+ if (type == NDISC_REDIRECT)
+ ip6_sk_redirect(skb, sk);
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index becb048d18d4..cf02cb97bbdd 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -78,7 +78,10 @@ static int ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
-static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu);
+static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb);
#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
@@ -99,10 +102,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
if (!(rt->dst.flags & DST_HOST))
return NULL;
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
-
- peer = rt->rt6i_peer;
+ peer = rt6_get_peer_create(rt);
if (peer) {
u32 *old_p = __DST_METRICS_PTR(old);
unsigned long prev, new;
@@ -123,21 +123,27 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
return p;
}
-static inline const void *choose_neigh_daddr(struct rt6_info *rt, const void *daddr)
+static inline const void *choose_neigh_daddr(struct rt6_info *rt,
+ struct sk_buff *skb,
+ const void *daddr)
{
struct in6_addr *p = &rt->rt6i_gateway;
if (!ipv6_addr_any(p))
return (const void *) p;
+ else if (skb)
+ return &ipv6_hdr(skb)->daddr;
return daddr;
}
-static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
struct rt6_info *rt = (struct rt6_info *) dst;
struct neighbour *n;
- daddr = choose_neigh_daddr(rt, daddr);
+ daddr = choose_neigh_daddr(rt, skb, daddr);
n = __ipv6_neigh_lookup(&nd_tbl, dst->dev, daddr);
if (n)
return n;
@@ -152,7 +158,7 @@ static int rt6_bind_neighbour(struct rt6_info *rt, struct net_device *dev)
if (IS_ERR(n))
return PTR_ERR(n);
}
- dst_set_neighbour(&rt->dst, n);
+ rt->n = n;
return 0;
}
@@ -171,6 +177,7 @@ static struct dst_ops ip6_dst_ops_template = {
.negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu,
+ .redirect = rt6_do_redirect,
.local_out = __ip6_local_out,
.neigh_lookup = ip6_neigh_lookup,
};
@@ -182,7 +189,13 @@ static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
return mtu ? : dst->dev->mtu;
}
-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
+{
+}
+
+static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
{
}
@@ -200,6 +213,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
.mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
+ .redirect = ip6_rt_blackhole_redirect,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
.neigh_lookup = ip6_neigh_lookup,
};
@@ -261,16 +275,20 @@ static struct rt6_info ip6_blk_hole_entry_template = {
#endif
/* allocate dst with ip6_dst_ops */
-static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
+static inline struct rt6_info *ip6_dst_alloc(struct net *net,
struct net_device *dev,
- int flags)
+ int flags,
+ struct fib6_table *table)
{
- struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
+ struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
+ 0, DST_OBSOLETE_NONE, flags);
- if (rt)
- memset(&rt->rt6i_table, 0,
- sizeof(*rt) - sizeof(struct dst_entry));
+ if (rt) {
+ struct dst_entry *dst = &rt->dst;
+ memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
+ rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
+ }
return rt;
}
@@ -278,7 +296,9 @@ static void ip6_dst_destroy(struct dst_entry *dst)
{
struct rt6_info *rt = (struct rt6_info *)dst;
struct inet6_dev *idev = rt->rt6i_idev;
- struct inet_peer *peer = rt->rt6i_peer;
+
+ if (rt->n)
+ neigh_release(rt->n);
if (!(rt->dst.flags & DST_HOST))
dst_destroy_metrics_generic(dst);
@@ -291,8 +311,8 @@ static void ip6_dst_destroy(struct dst_entry *dst)
if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
dst_release(dst->from);
- if (peer) {
- rt->rt6i_peer = NULL;
+ if (rt6_has_peer(rt)) {
+ struct inet_peer *peer = rt6_peer_ptr(rt);
inet_putpeer(peer);
}
}
@@ -306,13 +326,20 @@ static u32 rt6_peer_genid(void)
void rt6_bind_peer(struct rt6_info *rt, int create)
{
+ struct inet_peer_base *base;
struct inet_peer *peer;
- peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
- if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
- inet_putpeer(peer);
- else
- rt->rt6i_peer_genid = rt6_peer_genid();
+ base = inetpeer_base_ptr(rt->_rt6i_peer);
+ if (!base)
+ return;
+
+ peer = inet_getpeer_v6(base, &rt->rt6i_dst.addr, create);
+ if (peer) {
+ if (!rt6_set_peer(rt, peer))
+ inet_putpeer(peer);
+ else
+ rt->rt6i_peer_genid = rt6_peer_genid();
+ }
}
static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -323,12 +350,19 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
struct net_device *loopback_dev =
dev_net(dev)->loopback_dev;
- if (dev != loopback_dev && idev && idev->dev == dev) {
- struct inet6_dev *loopback_idev =
- in6_dev_get(loopback_dev);
- if (loopback_idev) {
- rt->rt6i_idev = loopback_idev;
- in6_dev_put(idev);
+ if (dev != loopback_dev) {
+ if (idev && idev->dev == dev) {
+ struct inet6_dev *loopback_idev =
+ in6_dev_get(loopback_dev);
+ if (loopback_idev) {
+ rt->rt6i_idev = loopback_idev;
+ in6_dev_put(idev);
+ }
+ }
+ if (rt->n && rt->n->dev == dev) {
+ rt->n->dev = loopback_dev;
+ dev_hold(loopback_dev);
+ dev_put(dev);
}
}
}
@@ -418,7 +452,7 @@ static void rt6_probe(struct rt6_info *rt)
* to no more than one per minute.
*/
rcu_read_lock();
- neigh = rt ? dst_get_neighbour_noref(&rt->dst) : NULL;
+ neigh = rt ? rt->n : NULL;
if (!neigh || (neigh->nud_state & NUD_VALID))
goto out;
read_lock_bh(&neigh->lock);
@@ -465,7 +499,7 @@ static inline int rt6_check_neigh(struct rt6_info *rt)
int m;
rcu_read_lock();
- neigh = dst_get_neighbour_noref(&rt->dst);
+ neigh = rt->n;
if (rt->rt6i_flags & RTF_NONEXTHOP ||
!(rt->rt6i_flags & RTF_GATEWAY))
m = 1;
@@ -812,7 +846,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
if (rt) {
rt->rt6i_flags |= RTF_CACHE;
- dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour_noref_raw(&ort->dst)));
+ rt->n = neigh_clone(ort->n);
}
return rt;
}
@@ -846,7 +880,7 @@ restart:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
+ if (!rt->n && !(rt->rt6i_flags & RTF_NONEXTHOP))
nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
else if (!(rt->dst.flags & DST_HOST))
nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -931,6 +965,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
{
int flags = 0;
+ fl6->flowi6_iif = net->loopback_dev->ifindex;
+
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;
@@ -949,12 +985,13 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
struct dst_entry *new = NULL;
- rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
+ rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
- memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
-
new = &rt->dst;
+ memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
+ rt6_init_peer(rt, net->ipv6.peers);
+
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard;
@@ -996,7 +1033,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) {
if (rt->rt6i_peer_genid != rt6_peer_genid()) {
- if (!rt->rt6i_peer)
+ if (!rt6_has_peer(rt))
rt6_bind_peer(rt, 0);
rt->rt6i_peer_genid = rt6_peer_genid();
}
@@ -1038,11 +1075,15 @@ static void ip6_link_failure(struct sk_buff *skb)
}
}
-static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
struct rt6_info *rt6 = (struct rt6_info*)dst;
+ dst_confirm(dst);
if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
+ struct net *net = dev_net(dst->dev);
+
rt6->rt6i_flags |= RTF_MODIFIED;
if (mtu < IPV6_MIN_MTU) {
u32 features = dst_metric(dst, RTAX_FEATURES);
@@ -1051,9 +1092,66 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
dst_metric_set(dst, RTAX_FEATURES, features);
}
dst_metric_set(dst, RTAX_MTU, mtu);
+ rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
}
+void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
+ int oif, u32 mark)
+{
+ const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ ip6_rt_update_pmtu(dst, NULL, skb, ntohl(mtu));
+ dst_release(dst);
+}
+EXPORT_SYMBOL_GPL(ip6_update_pmtu);
+
+void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
+{
+ ip6_update_pmtu(skb, sock_net(sk), mtu,
+ sk->sk_bound_dev_if, sk->sk_mark);
+}
+EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
+
+void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
+{
+ const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = iph->daddr;
+ fl6.saddr = iph->saddr;
+ fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (!dst->error)
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+EXPORT_SYMBOL_GPL(ip6_redirect);
+
+void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
+{
+ ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
+}
+EXPORT_SYMBOL_GPL(ip6_sk_redirect);
+
static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
@@ -1110,7 +1208,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(!idev))
return ERR_PTR(-ENODEV);
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
+ rt = ip6_dst_alloc(net, dev, 0, NULL);
if (unlikely(!rt)) {
in6_dev_put(idev);
dst = ERR_PTR(-ENOMEM);
@@ -1120,7 +1218,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (neigh)
neigh_hold(neigh);
else {
- neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
+ neigh = ip6_neigh_lookup(&rt->dst, NULL, &fl6->daddr);
if (IS_ERR(neigh)) {
in6_dev_put(idev);
dst_free(&rt->dst);
@@ -1130,7 +1228,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
rt->dst.flags |= DST_HOST;
rt->dst.output = ip6_output;
- dst_set_neighbour(&rt->dst, neigh);
+ rt->n = neigh;
atomic_set(&rt->dst.__refcnt, 1);
rt->rt6i_dst.addr = fl6->daddr;
rt->rt6i_dst.plen = 128;
@@ -1292,7 +1390,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
+ rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
if (!rt) {
err = -ENOMEM;
@@ -1546,107 +1644,94 @@ static int ip6_route_del(struct fib6_config *cfg)
return err;
}
-/*
- * Handle redirects
- */
-struct ip6rd_flowi {
- struct flowi6 fl6;
- struct in6_addr gateway;
-};
-
-static struct rt6_info *__ip6_route_redirect(struct net *net,
- struct fib6_table *table,
- struct flowi6 *fl6,
- int flags)
+static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
- struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
- struct rt6_info *rt;
- struct fib6_node *fn;
+ struct net *net = dev_net(skb->dev);
+ struct netevent_redirect netevent;
+ struct rt6_info *rt, *nrt = NULL;
+ const struct in6_addr *target;
+ struct ndisc_options ndopts;
+ const struct in6_addr *dest;
+ struct neighbour *old_neigh;
+ struct inet6_dev *in6_dev;
+ struct neighbour *neigh;
+ struct icmp6hdr *icmph;
+ int optlen, on_link;
+ u8 *lladdr;
- /*
- * Get the "current" route for this destination and
- * check if the redirect has come from approriate router.
- *
- * RFC 2461 specifies that redirects should only be
- * accepted if they come from the nexthop to the target.
- * Due to the way the routes are chosen, this notion
- * is a bit fuzzy and one might need to check all possible
- * routes.
- */
+ optlen = skb->tail - skb->transport_header;
+ optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
- read_lock_bh(&table->tb6_lock);
- fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
-restart:
- for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
- /*
- * Current route is on-link; redirect is always invalid.
- *
- * Seems, previous statement is not true. It could
- * be node, which looks for us as on-link (f.e. proxy ndisc)
- * But then router serving it might decide, that we should
- * know truth 8)8) --ANK (980726).
- */
- if (rt6_check_expired(rt))
- continue;
- if (!(rt->rt6i_flags & RTF_GATEWAY))
- continue;
- if (fl6->flowi6_oif != rt->dst.dev->ifindex)
- continue;
- if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
- continue;
- break;
+ if (optlen < 0) {
+ net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
+ return;
}
- if (!rt)
- rt = net->ipv6.ip6_null_entry;
- BACKTRACK(net, &fl6->saddr);
-out:
- dst_hold(&rt->dst);
-
- read_unlock_bh(&table->tb6_lock);
-
- return rt;
-};
+ icmph = icmp6_hdr(skb);
+ target = (const struct in6_addr *) (icmph + 1);
+ dest = target + 1;
-static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
- const struct in6_addr *src,
- const struct in6_addr *gateway,
- struct net_device *dev)
-{
- int flags = RT6_LOOKUP_F_HAS_SADDR;
- struct net *net = dev_net(dev);
- struct ip6rd_flowi rdfl = {
- .fl6 = {
- .flowi6_oif = dev->ifindex,
- .daddr = *dest,
- .saddr = *src,
- },
- };
+ if (ipv6_addr_is_multicast(dest)) {
+ net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
+ return;
+ }
- rdfl.gateway = *gateway;
+ on_link = 0;
+ if (ipv6_addr_equal(dest, target)) {
+ on_link = 1;
+ } else if (ipv6_addr_type(target) !=
+ (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
+ net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
+ return;
+ }
- if (rt6_need_strict(dest))
- flags |= RT6_LOOKUP_F_IFACE;
+ in6_dev = __in6_dev_get(skb->dev);
+ if (!in6_dev)
+ return;
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
+ return;
- return (struct rt6_info *)fib6_rule_lookup(net, &rdfl.fl6,
- flags, __ip6_route_redirect);
-}
+ /* RFC2461 8.1:
+ * The IP source address of the Redirect MUST be the same as the current
+ * first-hop router for the specified ICMP Destination Address.
+ */
-void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
- const struct in6_addr *saddr,
- struct neighbour *neigh, u8 *lladdr, int on_link)
-{
- struct rt6_info *rt, *nrt = NULL;
- struct netevent_redirect netevent;
- struct net *net = dev_net(neigh->dev);
+ if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
+ net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
+ return;
+ }
- rt = ip6_route_redirect(dest, src, saddr, neigh->dev);
+ lladdr = NULL;
+ if (ndopts.nd_opts_tgt_lladdr) {
+ lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
+ skb->dev);
+ if (!lladdr) {
+ net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
+ return;
+ }
+ }
+ rt = (struct rt6_info *) dst;
if (rt == net->ipv6.ip6_null_entry) {
net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
- goto out;
+ return;
}
+ /* Redirect received -> path was valid.
+ * Look, redirects are sent only in response to data packets,
+ * so that this nexthop apparently is reachable. --ANK
+ */
+ dst_confirm(&rt->dst);
+
+ neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
+ if (!neigh)
+ return;
+
+ /* Duplicate redirect: silently ignore. */
+ old_neigh = rt->n;
+ if (neigh == old_neigh)
+ goto out;
+
/*
* We have finally decided to accept it.
*/
@@ -1658,17 +1743,6 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
NEIGH_UPDATE_F_ISROUTER))
);
- /*
- * Redirect received -> path was valid.
- * Look, redirects are sent only in response to data packets,
- * so that this nexthop apparently is reachable. --ANK
- */
- dst_confirm(&rt->dst);
-
- /* Duplicate redirect: silently ignore. */
- if (neigh == dst_get_neighbour_noref_raw(&rt->dst))
- goto out;
-
nrt = ip6_rt_copy(rt, dest);
if (!nrt)
goto out;
@@ -1678,132 +1752,25 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
nrt->rt6i_flags &= ~RTF_GATEWAY;
nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
- dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
+ nrt->n = neigh_clone(neigh);
if (ip6_ins_rt(nrt))
goto out;
netevent.old = &rt->dst;
+ netevent.old_neigh = old_neigh;
netevent.new = &nrt->dst;
+ netevent.new_neigh = neigh;
+ netevent.daddr = dest;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
if (rt->rt6i_flags & RTF_CACHE) {
+ rt = (struct rt6_info *) dst_clone(&rt->dst);
ip6_del_rt(rt);
- return;
}
out:
- dst_release(&rt->dst);
-}
-
-/*
- * Handle ICMP "packet too big" messages
- * i.e. Path MTU discovery
- */
-
-static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct net *net, u32 pmtu, int ifindex)
-{
- struct rt6_info *rt, *nrt;
- int allfrag = 0;
-again:
- rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
- if (!rt)
- return;
-
- if (rt6_check_expired(rt)) {
- ip6_del_rt(rt);
- goto again;
- }
-
- if (pmtu >= dst_mtu(&rt->dst))
- goto out;
-
- if (pmtu < IPV6_MIN_MTU) {
- /*
- * According to RFC2460, PMTU is set to the IPv6 Minimum Link
- * MTU (1280) and a fragment header should always be included
- * after a node receiving Too Big message reporting PMTU is
- * less than the IPv6 Minimum Link MTU.
- */
- pmtu = IPV6_MIN_MTU;
- allfrag = 1;
- }
-
- /* New mtu received -> path was valid.
- They are sent only in response to data packets,
- so that this nexthop apparently is reachable. --ANK
- */
- dst_confirm(&rt->dst);
-
- /* Host route. If it is static, it would be better
- not to override it, but add new one, so that
- when cache entry will expire old pmtu
- would return automatically.
- */
- if (rt->rt6i_flags & RTF_CACHE) {
- dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
- if (allfrag) {
- u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(&rt->dst, RTAX_FEATURES, features);
- }
- rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
- rt->rt6i_flags |= RTF_MODIFIED;
- goto out;
- }
-
- /* Network route.
- Two cases are possible:
- 1. It is connected route. Action: COW
- 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
- */
- if (!dst_get_neighbour_noref_raw(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
- nrt = rt6_alloc_cow(rt, daddr, saddr);
- else
- nrt = rt6_alloc_clone(rt, daddr);
-
- if (nrt) {
- dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
- if (allfrag) {
- u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
- }
-
- /* According to RFC 1981, detecting PMTU increase shouldn't be
- * happened within 5 mins, the recommended timer is 10 mins.
- * Here this route expiration time is set to ip6_rt_mtu_expires
- * which is 10 mins. After 10 mins the decreased pmtu is expired
- * and detecting PMTU increase will be automatically happened.
- */
- rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
- nrt->rt6i_flags |= RTF_DYNAMIC;
- ip6_ins_rt(nrt);
- }
-out:
- dst_release(&rt->dst);
-}
-
-void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
- struct net_device *dev, u32 pmtu)
-{
- struct net *net = dev_net(dev);
-
- /*
- * RFC 1981 states that a node "MUST reduce the size of the packets it
- * is sending along the path" that caused the Packet Too Big message.
- * Since it's not possible in the general case to determine which
- * interface was used to send the original packet, we update the MTU
- * on the interface that will be used to send future packets. We also
- * update the MTU on the interface that received the Packet Too Big in
- * case the original packet was forced out that interface with
- * SO_BINDTODEVICE or similar. This is the next best thing to the
- * correct behaviour, which would be to update the MTU on all
- * interfaces.
- */
- rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
- rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+ neigh_release(neigh);
}
/*
@@ -1814,8 +1781,8 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
const struct in6_addr *dest)
{
struct net *net = dev_net(ort->dst.dev);
- struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- ort->dst.dev, 0);
+ struct rt6_info *rt = ip6_dst_alloc(net, ort->dst.dev, 0,
+ ort->rt6i_table);
if (rt) {
rt->dst.input = ort->dst.input;
@@ -2099,8 +2066,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
bool anycast)
{
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
- net->loopback_dev, 0);
+ struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
int err;
if (!rt) {
@@ -2396,13 +2362,11 @@ static int rt6_fill_node(struct net *net,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
{
- const struct inet_peer *peer;
struct rtmsg *rtm;
struct nlmsghdr *nlh;
long expires;
u32 table;
struct neighbour *n;
- u32 ts, tsage;
if (prefix) { /* user wants prefix routes only */
if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
@@ -2440,10 +2404,12 @@ static int rt6_fill_node(struct net *net,
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags & RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
- else if (rt->rt6i_flags & RTF_ADDRCONF)
- rtm->rtm_protocol = RTPROT_KERNEL;
- else if (rt->rt6i_flags & RTF_DEFAULT)
- rtm->rtm_protocol = RTPROT_RA;
+ else if (rt->rt6i_flags & RTF_ADDRCONF) {
+ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
+ rtm->rtm_protocol = RTPROT_RA;
+ else
+ rtm->rtm_protocol = RTPROT_KERNEL;
+ }
if (rt->rt6i_flags & RTF_CACHE)
rtm->rtm_flags |= RTM_F_CLONED;
@@ -2500,7 +2466,7 @@ static int rt6_fill_node(struct net *net,
goto nla_put_failure;
rcu_read_lock();
- n = dst_get_neighbour_noref(&rt->dst);
+ n = rt->n;
if (n) {
if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) {
rcu_read_unlock();
@@ -2521,15 +2487,7 @@ static int rt6_fill_node(struct net *net,
else
expires = INT_MAX;
- peer = rt->rt6i_peer;
- ts = tsage = 0;
- if (peer && peer->tcp_ts_stamp) {
- ts = peer->tcp_ts;
- tsage = get_seconds() - peer->tcp_ts_stamp;
- }
-
- if (rtnl_put_cacheinfo(skb, &rt->dst, 0, ts, tsage,
- expires, rt->dst.error) < 0)
+ if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
goto nla_put_failure;
return nlmsg_end(skb, nlh);
@@ -2722,7 +2680,7 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
rcu_read_lock();
- n = dst_get_neighbour_noref(&rt->dst);
+ n = rt->n;
if (n) {
seq_printf(m, "%pi6", n->primary_key);
} else {
@@ -3007,6 +2965,31 @@ static struct pernet_operations ip6_route_net_ops = {
.exit = ip6_route_net_exit,
};
+static int __net_init ipv6_inetpeer_init(struct net *net)
+{
+ struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
+
+ if (!bp)
+ return -ENOMEM;
+ inet_peer_base_init(bp);
+ net->ipv6.peers = bp;
+ return 0;
+}
+
+static void __net_exit ipv6_inetpeer_exit(struct net *net)
+{
+ struct inet_peer_base *bp = net->ipv6.peers;
+
+ net->ipv6.peers = NULL;
+ inetpeer_invalidate_tree(bp);
+ kfree(bp);
+}
+
+static struct pernet_operations ipv6_inetpeer_ops = {
+ .init = ipv6_inetpeer_init,
+ .exit = ipv6_inetpeer_exit,
+};
+
static struct pernet_operations ip6_route_net_late_ops = {
.init = ip6_route_net_init_late,
.exit = ip6_route_net_exit_late,
@@ -3032,10 +3015,14 @@ int __init ip6_route_init(void)
if (ret)
goto out_kmem_cache;
- ret = register_pernet_subsys(&ip6_route_net_ops);
+ ret = register_pernet_subsys(&ipv6_inetpeer_ops);
if (ret)
goto out_dst_entries;
+ ret = register_pernet_subsys(&ip6_route_net_ops);
+ if (ret)
+ goto out_register_inetpeer;
+
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
/* Registering of the loopback is done before this portion of code,
@@ -3088,6 +3075,8 @@ out_fib6_init:
fib6_gc_cleanup();
out_register_subsys:
unregister_pernet_subsys(&ip6_route_net_ops);
+out_register_inetpeer:
+ unregister_pernet_subsys(&ipv6_inetpeer_ops);
out_dst_entries:
dst_entries_destroy(&ip6_dst_blackhole_ops);
out_kmem_cache:
@@ -3102,6 +3091,7 @@ void ip6_route_cleanup(void)
fib6_rules_cleanup();
xfrm6_fini();
fib6_gc_cleanup();
+ unregister_pernet_subsys(&ipv6_inetpeer_ops);
unregister_pernet_subsys(&ip6_route_net_ops);
dst_entries_destroy(&ip6_dst_blackhole_ops);
kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 60415711563f..3bd1bfc01f85 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -527,9 +527,6 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
case ICMP_PORT_UNREACH:
/* Impossible event. */
return 0;
- case ICMP_FRAG_NEEDED:
- /* Soft state for pmtu is maintained by IP core. */
- return 0;
default:
/* All others are translated to HOST_UNREACH.
rfc2003 contains "deep thoughts" about NET_UNREACH,
@@ -542,6 +539,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
if (code != ICMP_EXC_TTL)
return 0;
break;
+ case ICMP_REDIRECT:
+ break;
}
err = -ENOENT;
@@ -551,7 +550,23 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
skb->dev,
iph->daddr,
iph->saddr);
- if (t == NULL || t->parms.iph.daddr == 0)
+ if (t == NULL)
+ goto out;
+
+ if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
+ ipv4_update_pmtu(skb, dev_net(skb->dev), info,
+ t->dev->ifindex, 0, IPPROTO_IPV6, 0);
+ err = 0;
+ goto out;
+ }
+ if (type == ICMP_REDIRECT) {
+ ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+ IPPROTO_IPV6, 0);
+ err = 0;
+ goto out;
+ }
+
+ if (t->parms.iph.daddr == 0)
goto out;
err = 0;
@@ -792,7 +807,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
}
if (tunnel->parms.iph.daddr && skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8e951d8d3b81..bb46061c813a 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -21,9 +21,6 @@
#include <net/ipv6.h>
#include <net/tcp.h>
-extern int sysctl_tcp_syncookies;
-extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
@@ -180,7 +177,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
/* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt));
- tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 9df64a50b075..f49476e2d884 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -277,22 +277,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
rt = (struct rt6_info *) dst;
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp &&
- ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
- struct inet_peer *peer = rt6_get_peer(rt);
- /*
- * VJ's idea. We save last timestamp seen from
- * the destination in peer table, when entering state
- * TIME-WAIT * and initialize rx_opt.ts_recent from it,
- * when trying new connection.
- */
- if (peer) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
- tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
- tp->rx_opt.ts_recent = peer->tcp_ts;
- }
- }
- }
+ ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr))
+ tcp_fetch_timewait_stamp(sk, dst);
icsk->icsk_ext_hdr_len = 0;
if (np->opt)
@@ -329,6 +315,23 @@ failure:
return err;
}
+static void tcp_v6_mtu_reduced(struct sock *sk)
+{
+ struct dst_entry *dst;
+
+ if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
+ return;
+
+ dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
+ if (!dst)
+ return;
+
+ if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
+ tcp_sync_mss(sk, dst_mtu(dst));
+ tcp_simple_retransmit(sk);
+ }
+}
+
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
@@ -356,7 +359,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
bh_lock_sock(sk);
- if (sock_owned_by_user(sk))
+ if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE)
@@ -377,49 +380,19 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
np = inet6_sk(sk);
- if (type == ICMPV6_PKT_TOOBIG) {
- struct dst_entry *dst;
-
- if (sock_owned_by_user(sk))
- goto out;
- if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
- goto out;
-
- /* icmp should have updated the destination cache entry */
- dst = __sk_dst_check(sk, np->dst_cookie);
-
- if (dst == NULL) {
- struct inet_sock *inet = inet_sk(sk);
- struct flowi6 fl6;
-
- /* BUGGG_FUTURE: Again, it is not clear how
- to handle rthdr case. Ignore this complexity
- for now.
- */
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = np->daddr;
- fl6.saddr = np->saddr;
- fl6.flowi6_oif = sk->sk_bound_dev_if;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet->inet_dport;
- fl6.fl6_sport = inet->inet_sport;
- security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
-
- dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
- if (IS_ERR(dst)) {
- sk->sk_err_soft = -PTR_ERR(dst);
- goto out;
- }
+ if (type == NDISC_REDIRECT) {
+ struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
- } else
- dst_hold(dst);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+ }
- if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
- tcp_sync_mss(sk, dst_mtu(dst));
- tcp_simple_retransmit(sk);
- } /* else let the usual retransmit timer handle it */
- dst_release(dst);
+ if (type == ICMPV6_PKT_TOOBIG) {
+ tp->mtu_info = ntohl(info);
+ if (!sock_owned_by_user(sk))
+ tcp_v6_mtu_reduced(sk);
+ else
+ set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags);
goto out;
}
@@ -475,62 +448,43 @@ out:
}
-static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
+static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
+ struct flowi6 *fl6,
+ struct request_sock *req,
struct request_values *rvp,
u16 queue_mapping)
{
struct inet6_request_sock *treq = inet6_rsk(req);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff * skb;
- struct ipv6_txoptions *opt = NULL;
- struct in6_addr * final_p, final;
- struct flowi6 fl6;
- struct dst_entry *dst;
- int err;
+ int err = -ENOMEM;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_proto = IPPROTO_TCP;
- fl6.daddr = treq->rmt_addr;
- fl6.saddr = treq->loc_addr;
- fl6.flowlabel = 0;
- fl6.flowi6_oif = treq->iif;
- fl6.flowi6_mark = sk->sk_mark;
- fl6.fl6_dport = inet_rsk(req)->rmt_port;
- fl6.fl6_sport = inet_rsk(req)->loc_port;
- security_req_classify_flow(req, flowi6_to_flowi(&fl6));
-
- opt = np->opt;
- final_p = fl6_update_dst(&fl6, opt, &final);
-
- dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
- if (IS_ERR(dst)) {
- err = PTR_ERR(dst);
- dst = NULL;
+ /* First, grab a route. */
+ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
goto done;
- }
+
skb = tcp_make_synack(sk, dst, req, rvp);
- err = -ENOMEM;
+
if (skb) {
__tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
- fl6.daddr = treq->rmt_addr;
+ fl6->daddr = treq->rmt_addr;
skb_set_queue_mapping(skb, queue_mapping);
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, np->opt, np->tclass);
err = net_xmit_eval(err);
}
done:
- if (opt && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- dst_release(dst);
return err;
}
static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values *rvp)
{
+ struct flowi6 fl6;
+
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
- return tcp_v6_send_synack(sk, req, rvp, 0);
+ return tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0);
}
static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1057,6 +1011,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
__u32 isn = TCP_SKB_CB(skb)->when;
struct dst_entry *dst = NULL;
+ struct flowi6 fl6;
bool want_cookie = false;
if (skb->protocol == htons(ETH_P_IP))
@@ -1085,7 +1040,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
tmp_opt.user_mss = tp->rx_opt.user_mss;
- tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
+ tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL);
if (tmp_opt.cookie_plus > 0 &&
tmp_opt.saw_tstamp &&
@@ -1150,8 +1105,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
treq->iif = inet6_iif(skb);
if (!isn) {
- struct inet_peer *peer = NULL;
-
if (ipv6_opt_accepted(sk, skb) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1176,14 +1129,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if (tmp_opt.saw_tstamp &&
tcp_death_row.sysctl_tw_recycle &&
- (dst = inet6_csk_route_req(sk, req)) != NULL &&
- (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
- ipv6_addr_equal((struct in6_addr *)peer->daddr.addr.a6,
- &treq->rmt_addr)) {
- inet_peer_refcheck(peer);
- if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
- (s32)(peer->tcp_ts - req->ts_recent) >
- TCP_PAWS_WINDOW) {
+ (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
+ if (!tcp_peer_is_proven(req, dst, true)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release;
}
@@ -1192,8 +1139,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
else if (!sysctl_tcp_syncookies &&
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
(sysctl_max_syn_backlog >> 2)) &&
- (!peer || !peer->tcp_ts_stamp) &&
- (!dst || !dst_metric(dst, RTAX_RTT))) {
+ !tcp_peer_is_proven(req, dst, false)) {
/* Without syncookies last quarter of
* backlog is filled with destinations,
* proven to be alive.
@@ -1215,7 +1161,7 @@ have_isn:
if (security_inet_conn_request(sk, skb, req))
goto drop_and_release;
- if (tcp_v6_send_synack(sk, req,
+ if (tcp_v6_send_synack(sk, dst, &fl6, req,
(struct request_values *)&tmp_ext,
skb_get_queue_mapping(skb)) ||
want_cookie)
@@ -1242,10 +1188,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
- struct ipv6_txoptions *opt;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
+ struct flowi6 fl6;
if (skb->protocol == htons(ETH_P_IP)) {
/*
@@ -1302,13 +1248,12 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
}
treq = inet6_rsk(req);
- opt = np->opt;
if (sk_acceptq_is_full(sk))
goto out_overflow;
if (!dst) {
- dst = inet6_csk_route_req(sk, req);
+ dst = inet6_csk_route_req(sk, &fl6, req);
if (!dst)
goto out;
}
@@ -1371,11 +1316,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
but we make one more one thing there: reattach optmem
to newsk.
*/
- if (opt) {
- newnp->opt = ipv6_dup_options(newsk, opt);
- if (opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
- }
+ if (np->opt)
+ newnp->opt = ipv6_dup_options(newsk, np->opt);
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (newnp->opt)
@@ -1422,8 +1364,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
- if (opt && opt != np->opt)
- sock_kfree_s(sk, opt, opt->tot_len);
dst_release(dst);
out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
@@ -1734,42 +1674,10 @@ do_time_wait:
goto discard_it;
}
-static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
-{
- struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
- struct inet_peer *peer;
-
- if (!rt ||
- !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
- peer = inet_getpeer_v6(&np->daddr, 1);
- *release_it = true;
- } else {
- if (!rt->rt6i_peer)
- rt6_bind_peer(rt, 1);
- peer = rt->rt6i_peer;
- *release_it = false;
- }
-
- return peer;
-}
-
-static void *tcp_v6_tw_get_peer(struct sock *sk)
-{
- const struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
- const struct inet_timewait_sock *tw = inet_twsk(sk);
-
- if (tw->tw_family == AF_INET)
- return tcp_v4_tw_get_peer(sk);
-
- return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
-}
-
static struct timewait_sock_ops tcp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp6_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
- .twsk_getpeer = tcp_v6_tw_get_peer,
};
static const struct inet_connection_sock_af_ops ipv6_specific = {
@@ -1778,7 +1686,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.rebuild_header = inet6_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
- .get_peer = tcp_v6_get_peer,
.net_header_len = sizeof(struct ipv6hdr),
.net_frag_header_len = sizeof(struct frag_hdr),
.setsockopt = ipv6_setsockopt,
@@ -1810,7 +1717,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.rebuild_header = inet_sk_rebuild_header,
.conn_request = tcp_v6_conn_request,
.syn_recv_sock = tcp_v6_syn_recv_sock,
- .get_peer = tcp_v4_get_peer,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ipv6_setsockopt,
.getsockopt = ipv6_getsockopt,
@@ -2049,6 +1955,8 @@ struct proto tcpv6_prot = {
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v6_do_rcv,
+ .release_cb = tcp_release_cb,
+ .mtu_reduced = tcp_v6_mtu_reduced,
.hash = tcp_v6_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f05099fc5901..99d0077b56b8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -48,6 +48,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <trace/events/skb.h>
#include "udp_impl.h"
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
@@ -385,15 +386,16 @@ try_again:
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied );
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
- if (err)
+ if (unlikely(err)) {
+ trace_kfree_skb(skb, udpv6_recvmsg);
goto out_free;
-
+ }
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
@@ -479,6 +481,11 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sk == NULL)
return;
+ if (type == ICMPV6_PKT_TOOBIG)
+ ip6_sk_update_pmtu(skb, sk, info);
+ if (type == NDISC_REDIRECT)
+ ip6_sk_redirect(skb, sk);
+
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8625fba96db9..ef39812107b1 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -99,12 +99,11 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
if (!xdst->u.rt6.rt6i_idev)
return -ENODEV;
- xdst->u.rt6.rt6i_peer = rt->rt6i_peer;
- if (rt->rt6i_peer)
- atomic_inc(&rt->rt6i_peer->refcnt);
+ rt6_transfer_peer(&xdst->u.rt6, rt);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
+ xdst->u.rt6.n = neigh_clone(rt->n);
xdst->u.rt6.rt6i_flags = rt->rt6i_flags & (RTF_ANYCAST |
RTF_LOCAL);
xdst->u.rt6.rt6i_metric = rt->rt6i_metric;
@@ -208,12 +207,22 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
}
-static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
+static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb, u32 mtu)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
- path->ops->update_pmtu(path, mtu);
+ path->ops->update_pmtu(path, sk, skb, mtu);
+}
+
+static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
+ struct sk_buff *skb)
+{
+ struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
+ struct dst_entry *path = xdst->route;
+
+ path->ops->redirect(path, sk, skb);
}
static void xfrm6_dst_destroy(struct dst_entry *dst)
@@ -223,8 +232,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt6.rt6i_idev))
in6_dev_put(xdst->u.rt6.rt6i_idev);
dst_destroy_metrics_generic(dst);
- if (likely(xdst->u.rt6.rt6i_peer))
- inet_putpeer(xdst->u.rt6.rt6i_peer);
+ if (rt6_has_peer(&xdst->u.rt6)) {
+ struct inet_peer *peer = rt6_peer_ptr(&xdst->u.rt6);
+ inet_putpeer(peer);
+ }
xfrm_dst_destroy(xdst);
}
@@ -260,6 +271,7 @@ static struct dst_ops xfrm6_dst_ops = {
.protocol = cpu_to_be16(ETH_P_IPV6),
.gc = xfrm6_garbage_collect,
.update_pmtu = xfrm6_update_pmtu,
+ .redirect = xfrm6_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm6_dst_destroy,
.ifdown = xfrm6_dst_ifdown,
diff --git a/net/ipx/Makefile b/net/ipx/Makefile
index 4b95e3ea0f8b..440fafa9fd07 100644
--- a/net/ipx/Makefile
+++ b/net/ipx/Makefile
@@ -4,5 +4,5 @@
obj-$(CONFIG_IPX) += ipx.o
-ipx-y := af_ipx.o ipx_route.o ipx_proc.o
+ipx-y := af_ipx.o ipx_route.o ipx_proc.o pe2.o
ipx-$(CONFIG_SYSCTL) += sysctl_net_ipx.o
diff --git a/net/ethernet/pe2.c b/net/ipx/pe2.c
index 85d574addbc1..32dcd601ab32 100644
--- a/net/ethernet/pe2.c
+++ b/net/ipx/pe2.c
@@ -28,10 +28,8 @@ struct datalink_proto *make_EII_client(void)
return proto;
}
-EXPORT_SYMBOL(make_EII_client);
void destroy_EII_client(struct datalink_proto *dl)
{
kfree(dl);
}
-EXPORT_SYMBOL(destroy_EII_client);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index bb14c3477680..bb738c9f9146 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -955,7 +955,7 @@ out:
* The main difference with a "standard" connect is that with IrDA we need
* to resolve the service name into a TSAP selector (in TCP, port number
* doesn't have to be resolved).
- * Because of this service name resoltion, we can offer "auto-connect",
+ * Because of this service name resolution, we can offer "auto-connect",
* where we connect to a service without specifying a destination address.
*
* Note : by consulting "errno", the user space caller may learn the cause
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 32dcaac70b0c..4664855222f4 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -296,7 +296,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command,
skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
/* Bigger param length comes from CMD_GET_MEDIA_CHAR */
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
- IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") +
+ IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BROADCAST") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
GFP_ATOMIC);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index f06947c4fa82..7152624ed5f1 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -523,7 +523,7 @@ void *hashbin_remove_first( hashbin_t *hashbin)
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
@@ -615,7 +615,7 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
*/
if ( found ) {
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
/*
@@ -685,7 +685,7 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
* Dequeue the entry...
*/
dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
- (irda_queue_t*) entry );
+ entry);
hashbin->hb_size--;
entry->q_next = NULL;
entry->q_prev = NULL;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 32b2155e7ab4..393355d37b47 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1128,6 +1128,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
int headroom;
int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
int udp_len;
+ int ret = NET_XMIT_SUCCESS;
/* Check that there's enough headroom in the skb to insert IP,
* UDP and L2TP headers. If not enough, expand it to
@@ -1137,8 +1138,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
uhlen + hdr_len;
old_headroom = skb_headroom(skb);
if (skb_cow_head(skb, headroom)) {
- dev_kfree_skb(skb);
- goto abort;
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
}
new_headroom = skb_headroom(skb);
@@ -1156,7 +1157,8 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- dev_kfree_skb(skb);
+ kfree_skb(skb);
+ ret = NET_XMIT_DROP;
goto out_unlock;
}
@@ -1215,8 +1217,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
out_unlock:
bh_unlock_sock(sk);
-abort:
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 47b259fccd27..f9ee74deeac2 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -44,6 +44,7 @@ struct l2tp_eth {
struct list_head list;
atomic_long_t tx_bytes;
atomic_long_t tx_packets;
+ atomic_long_t tx_dropped;
atomic_long_t rx_bytes;
atomic_long_t rx_packets;
atomic_long_t rx_errors;
@@ -92,12 +93,15 @@ static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
+ unsigned int len = skb->len;
+ int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
- atomic_long_add(skb->len, &priv->tx_bytes);
- atomic_long_inc(&priv->tx_packets);
-
- l2tp_xmit_skb(session, skb, session->hdr_len);
-
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ atomic_long_add(len, &priv->tx_bytes);
+ atomic_long_inc(&priv->tx_packets);
+ } else {
+ atomic_long_inc(&priv->tx_dropped);
+ }
return NETDEV_TX_OK;
}
@@ -108,6 +112,7 @@ static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
stats->tx_packets = atomic_long_read(&priv->tx_packets);
+ stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
stats->rx_packets = atomic_long_read(&priv->rx_packets);
stats->rx_errors = atomic_long_read(&priv->rx_errors);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ddc553e76671..d71cd9229a47 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -72,7 +72,7 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
void *hdr;
int ret = -ENOBUFS;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
@@ -353,7 +353,7 @@ static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
@@ -699,7 +699,7 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
goto out;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 8ef6b9416cba..286366ef8930 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1522,8 +1522,8 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
* handler, according to whether the PPPoX socket is a for a regular session
* or the special tunnel type.
*/
-static int pppol2tp_getsockopt(struct socket *sock, int level,
- int optname, char __user *optval, int __user *optlen)
+static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct l2tp_session *session;
@@ -1535,7 +1535,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
if (level != SOL_PPPOL2TP)
return udp_prot.getsockopt(sk, level, optname, optval, optlen);
- if (get_user(len, (int __user *) optlen))
+ if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
@@ -1568,7 +1568,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level,
err = pppol2tp_session_getsockopt(sk, session, optname, &val);
err = -EFAULT;
- if (put_user(len, (int __user *) optlen))
+ if (put_user(len, optlen))
goto end_put_sess;
if (copy_to_user((void __user *) optval, &val, len))
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index fe5453c3e719..f6fe4d400502 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,7 @@ static int llc_ui_ioctl(struct socket *sock, unsigned int cmd,
* @sock: Socket to set options on.
* @level: Socket level user is requesting operations on.
* @optname: Operation name.
- * @optval User provided operation data.
+ * @optval: User provided operation data.
* @optlen: Length of optval.
*
* Set various connection specific parameters.
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index cf4aea3ba30f..39a8d8924b9c 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -30,12 +30,12 @@
*
* SAP and connection resource manager, one per adapter.
*
- * @state - state of station
- * @xid_r_count - XID response PDU counter
- * @mac_sa - MAC source address
- * @sap_list - list of related SAPs
- * @ev_q - events entering state mach.
- * @mac_pdu_q - PDUs ready to send to MAC
+ * @state: state of station
+ * @xid_r_count: XID response PDU counter
+ * @mac_sa: MAC source address
+ * @sap_list: list of related SAPs
+ * @ev_q: events entering state mach.
+ * @mac_pdu_q: PDUs ready to send to MAC
*/
struct llc_station {
u8 state;
@@ -646,7 +646,7 @@ static void llc_station_service_events(void)
}
/**
- * llc_station_state_process: queue event and try to process queue.
+ * llc_station_state_process - queue event and try to process queue.
* @skb: Address of the event
*
* Queues an event (on the station event queue) for handling by the
@@ -672,7 +672,7 @@ static void llc_station_ack_tmr_cb(unsigned long timeout_data)
}
}
-/*
+/**
* llc_station_rcv - send received pdu to the station state machine
* @skb: received frame.
*
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8d249d705980..63af25458fda 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -107,6 +107,19 @@ config MAC80211_DEBUGFS
Say N unless you know you need this.
+config MAC80211_MESSAGE_TRACING
+ bool "Trace all mac80211 debug messages"
+ depends on MAC80211
+ ---help---
+ Select this option to have mac80211 register the
+ mac80211_msg trace subsystem with tracepoints to
+ collect all debugging messages, independent of
+ printing them into the kernel log.
+
+ The overhead in this option is that all the messages
+ need to be present in the binary and formatted at
+ runtime for tracing.
+
menuconfig MAC80211_DEBUG_MENU
bool "Select mac80211 debugging features"
depends on MAC80211
@@ -140,26 +153,35 @@ config MAC80211_VERBOSE_DEBUG
Do not select this option.
-config MAC80211_HT_DEBUG
- bool "Verbose HT debugging"
+config MAC80211_MLME_DEBUG
+ bool "Verbose managed MLME output"
depends on MAC80211_DEBUG_MENU
---help---
- This option enables 802.11n High Throughput features
- debug tracing output.
-
- It should not be selected on production systems as some
+ Selecting this option causes mac80211 to print out
+ debugging messages for the managed-mode MLME. It
+ should not be selected on production systems as some
of the messages are remotely triggerable.
Do not select this option.
-config MAC80211_TKIP_DEBUG
- bool "Verbose TKIP debugging"
+config MAC80211_STA_DEBUG
+ bool "Verbose station debugging"
depends on MAC80211_DEBUG_MENU
---help---
Selecting this option causes mac80211 to print out
- very verbose TKIP debugging messages. It should not
- be selected on production systems as those messages
- are remotely triggerable.
+ debugging messages for station addition/removal.
+
+ Do not select this option.
+
+config MAC80211_HT_DEBUG
+ bool "Verbose HT debugging"
+ depends on MAC80211_DEBUG_MENU
+ ---help---
+ This option enables 802.11n High Throughput features
+ debug tracing output.
+
+ It should not be selected on production systems as some
+ of the messages are remotely triggerable.
Do not select this option.
@@ -174,7 +196,7 @@ config MAC80211_IBSS_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_PS_DEBUG
+config MAC80211_PS_DEBUG
bool "Verbose powersave mode debugging"
depends on MAC80211_DEBUG_MENU
---help---
@@ -186,7 +208,7 @@ config MAC80211_VERBOSE_PS_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MPL_DEBUG
+config MAC80211_MPL_DEBUG
bool "Verbose mesh peer link debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -199,7 +221,7 @@ config MAC80211_VERBOSE_MPL_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MPATH_DEBUG
+config MAC80211_MPATH_DEBUG
bool "Verbose mesh path debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -212,7 +234,7 @@ config MAC80211_VERBOSE_MPATH_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MHWMP_DEBUG
+config MAC80211_MHWMP_DEBUG
bool "Verbose mesh HWMP routing debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -225,7 +247,7 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_MESH_SYNC_DEBUG
+config MAC80211_MESH_SYNC_DEBUG
bool "Verbose mesh mesh synchronization debugging"
depends on MAC80211_DEBUG_MENU
depends on MAC80211_MESH
@@ -236,7 +258,7 @@ config MAC80211_VERBOSE_MESH_SYNC_DEBUG
Do not select this option.
-config MAC80211_VERBOSE_TDLS_DEBUG
+config MAC80211_TDLS_DEBUG
bool "Verbose TDLS debugging"
depends on MAC80211_DEBUG_MENU
---help---
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 3e9d931bba35..a7dd110faafa 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -9,7 +9,6 @@ mac80211-y := \
scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
ibss.o \
- work.o \
iface.o \
rate.o \
michael.o \
@@ -25,7 +24,7 @@ mac80211-y := \
wme.o \
event.o \
chan.o \
- driver-trace.o mlme.o
+ trace.o mlme.o
mac80211-$(CONFIG_MAC80211_LEDS) += led.o
mac80211-$(CONFIG_MAC80211_DEBUGFS) += \
@@ -43,7 +42,7 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mac80211-$(CONFIG_PM) += pm.o
-CFLAGS_driver-trace.o := -I$(src)
+CFLAGS_trace.o := -I$(src)
# objects for PID algorithm
rc80211_pid-y := rc80211_pid_algo.o
@@ -59,4 +58,4 @@ mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
-ccflags-y += -D__CHECK_ENDIAN__
+ccflags-y += -D__CHECK_ENDIAN__ -DDEBUG
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index c649188314cc..186d9919b043 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -74,18 +74,17 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG
+ ht_dbg(sta->sdata,
"Rx BA session stop requested for %pM tid %u %s reason: %d\n",
sta->sta.addr, tid,
initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
(int)reason);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
&sta->sta, tid, NULL, 0))
- printk(KERN_DEBUG "HW problem - can not stop rx "
- "aggregation for tid %d\n", tid);
+ sdata_info(sta->sdata,
+ "HW problem - can not stop rx aggregation for tid %d\n",
+ tid);
/* check if this is a self generated aggregation halt */
if (initiator == WLAN_BACK_RECIPIENT && tx)
@@ -160,9 +159,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
}
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
-#endif
+ ht_dbg(sta->sdata, "rx session timer expired on tid %d\n", (u16)*ptid);
+
set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
}
@@ -249,10 +247,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
status = WLAN_STATUS_REQUEST_DECLINED;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Suspend in progress. "
- "Denying ADDBA request\n");
-#endif
+ ht_dbg(sta->sdata, "Suspend in progress - Denying ADDBA request\n");
goto end_no_lock;
}
@@ -264,10 +259,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
(!(sta->sta.ht_cap.cap & IEEE80211_HT_CAP_DELAY_BA))) ||
(buf_size > IEEE80211_MAX_AMPDU_BUF)) {
status = WLAN_STATUS_INVALID_QOS_PARAM;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
- mgmt->sa, tid, ba_policy, buf_size);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sta->sdata,
+ "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n",
+ mgmt->sa, tid, ba_policy, buf_size);
goto end_no_lock;
}
/* determine default buffer size */
@@ -282,10 +276,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
mutex_lock(&sta->ampdu_mlme.mtx);
if (sta->ampdu_mlme.tid_rx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("unexpected AddBA Req from %pM on tid %u\n",
- mgmt->sa, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sta->sdata,
+ "unexpected AddBA Req from %pM on tid %u\n",
+ mgmt->sa, tid);
/* delete existing Rx BA session on the same tid */
___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
@@ -324,10 +317,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
&sta->sta, tid, &start_seq_num, 0);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
+ ht_dbg(sta->sdata, "Rx A-MPDU request on tid %d result %d\n", tid, ret);
if (ret) {
kfree(tid_agg_rx->reorder_buf);
kfree(tid_agg_rx->reorder_time);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 7cf07158805c..d0deb3edae21 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -135,7 +135,8 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
bar->control = cpu_to_le16(bar_control);
bar->start_seq_num = cpu_to_le16(ssn);
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_REQ_TX_STATUS;
ieee80211_tx_skb_tid(sdata, skb, tid);
}
EXPORT_SYMBOL(ieee80211_send_bar);
@@ -184,10 +185,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
spin_unlock_bh(&sta->lock);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
+ ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
sta->sta.addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
del_timer_sync(&tid_tx->addba_resp_timer);
del_timer_sync(&tid_tx->session_timer);
@@ -253,17 +252,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
if (!tid_tx ||
test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "timer expired on tid %d but we are not "
- "(or no longer) expecting addBA response there\n",
- tid);
-#endif
+ ht_dbg(sta->sdata,
+ "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n",
+ tid);
return;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid);
ieee80211_stop_tx_ba_session(&sta->sta, tid);
rcu_read_unlock();
@@ -323,8 +318,9 @@ ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_queue_agg(sdata, tid);
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
+ if (WARN(!tid_tx,
+ "TID %d gone but expected when splicing aggregates from the pending queue\n",
+ tid))
return;
if (!skb_queue_empty(&tid_tx->pending)) {
@@ -372,10 +368,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
&sta->sta, tid, &start_seq_num, 0);
if (ret) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - HW unavailable for"
- " tid %d\n", tid);
-#endif
+ ht_dbg(sdata,
+ "BA request denied - HW unavailable for tid %d\n", tid);
spin_lock_bh(&sta->lock);
ieee80211_agg_splice_packets(sdata, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -388,9 +382,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
-#endif
+ ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid);
spin_lock_bh(&sta->lock);
sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
@@ -437,9 +429,7 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
rcu_read_unlock();
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
-#endif
+ ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid);
ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
}
@@ -463,10 +453,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
(local->hw.flags & IEEE80211_HW_TX_AMPDU_SETUP_IN_HW))
return -EINVAL;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Open BA session requested for %pM tid %u\n",
+ ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
pubsta->addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
if (sdata->vif.type != NL80211_IFTYPE_STATION &&
sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
@@ -476,10 +464,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
return -EINVAL;
if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA sessions blocked. "
- "Denying BA session request\n");
-#endif
+ ht_dbg(sdata,
+ "BA sessions blocked - Denying BA session request\n");
return -EINVAL;
}
@@ -497,10 +483,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
*/
if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
!sta->sta.ht_cap.ht_supported) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - IBSS STA %pM"
- "does not advertise HT support\n", pubsta->addr);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata,
+ "BA request denied - IBSS STA %pM does not advertise HT support\n",
+ pubsta->addr);
return -EINVAL;
}
@@ -520,12 +505,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
HT_AGG_RETRIES_PERIOD)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - "
- "waiting a grace period after %d failed requests "
- "on tid %u\n",
+ ht_dbg(sdata,
+ "BA request denied - waiting a grace period after %d failed requests on tid %u\n",
sta->ampdu_mlme.addba_req_num[tid], tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
ret = -EBUSY;
goto err_unlock_sta;
}
@@ -533,10 +515,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/* check if the TID is not in aggregation flow already */
if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "BA request denied - session is not "
- "idle on tid %u\n", tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata,
+ "BA request denied - session is not idle on tid %u\n",
+ tid);
ret = -EAGAIN;
goto err_unlock_sta;
}
@@ -591,9 +572,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid);
drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
@@ -627,10 +606,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
trace_api_start_tx_ba_cb(sdata, ra, tid);
if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
- tid, STA_TID_NUM);
-#endif
+ ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+ tid, STA_TID_NUM);
return;
}
@@ -638,9 +615,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
mutex_unlock(&local->sta_mtx);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Could not find station: %pM\n", ra);
-#endif
+ ht_dbg(sdata, "Could not find station: %pM\n", ra);
return;
}
@@ -648,9 +623,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (WARN_ON(!tid_tx)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "addBA was not requested!\n");
-#endif
+ ht_dbg(sdata, "addBA was not requested!\n");
goto unlock;
}
@@ -750,25 +723,18 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
trace_api_stop_tx_ba_cb(sdata, ra, tid);
if (tid >= STA_TID_NUM) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
- tid, STA_TID_NUM);
-#endif
+ ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
+ tid, STA_TID_NUM);
return;
}
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Stopping Tx BA session for %pM tid %d\n",
- ra, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, ra);
if (!sta) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "Could not find station: %pM\n", ra);
-#endif
+ ht_dbg(sdata, "Could not find station: %pM\n", ra);
goto unlock;
}
@@ -777,9 +743,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
-#endif
+ ht_dbg(sdata, "unexpected callback to A-MPDU stop\n");
goto unlock_sta;
}
@@ -855,17 +819,13 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
goto out;
if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid);
goto out;
}
del_timer_sync(&tid_tx->addba_resp_timer);
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
-#endif
+ ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid);
/*
* addba_resp_timer may have fired before we got here, and
@@ -874,11 +834,9 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
*/
if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
-#ifdef CONFIG_MAC80211_HT_DEBUG
- printk(KERN_DEBUG
+ ht_dbg(sta->sdata,
"got addBA resp for tid %d but we already gave up\n",
tid);
-#endif
goto out;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7d5108a867ad..d41974aacf51 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -20,31 +20,31 @@
#include "rate.h"
#include "mesh.h"
-static struct net_device *ieee80211_add_iface(struct wiphy *wiphy, char *name,
- enum nl80211_iftype type,
- u32 *flags,
- struct vif_params *params)
+static struct wireless_dev *ieee80211_add_iface(struct wiphy *wiphy, char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
- struct net_device *dev;
+ struct wireless_dev *wdev;
struct ieee80211_sub_if_data *sdata;
int err;
- err = ieee80211_if_add(local, name, &dev, type, params);
+ err = ieee80211_if_add(local, name, &wdev, type, params);
if (err)
return ERR_PTR(err);
if (type == NL80211_IFTYPE_MONITOR && flags) {
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
sdata->u.mntr_flags = *flags;
}
- return dev;
+ return wdev;
}
-static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev)
+static int ieee80211_del_iface(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev));
+ ieee80211_if_remove(IEEE80211_WDEV_TO_SUB_IF(wdev));
return 0;
}
@@ -353,6 +353,7 @@ void sta_set_rate_info_tx(struct sta_info *sta,
static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
struct timespec uptime;
sinfo->generation = sdata->local->sta_generation;
@@ -388,7 +389,9 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) ||
(sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) {
sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
- sinfo->signal = (s8)sta->last_signal;
+ if (!local->ops->get_rssi ||
+ drv_get_rssi(local, sdata, &sta->sta, &sinfo->signal))
+ sinfo->signal = (s8)sta->last_signal;
sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal);
}
@@ -517,7 +520,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
* network device.
*/
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
sta = sta_info_get_bss(sdata, sdata->u.mgd.bssid);
@@ -546,7 +549,7 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
data[i] = (u8)sinfo.signal_avg;
i++;
} else {
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
/* Make sure this station belongs to the proper dev */
if (sta->sdata->dev != dev)
continue;
@@ -603,7 +606,7 @@ do_survey:
else
data[i++] = -1LL;
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
if (WARN_ON(i != STA_STATS_LEN))
return;
@@ -629,10 +632,11 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
int idx, u8 *mac, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int ret = -ENOENT;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get_by_idx(sdata, idx);
if (sta) {
@@ -641,7 +645,7 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
sta_set_sinfo(sta, sinfo);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return ret;
}
@@ -658,10 +662,11 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
int ret = -ENOENT;
- rcu_read_lock();
+ mutex_lock(&local->sta_mtx);
sta = sta_info_get_bss(sdata, mac);
if (sta) {
@@ -669,11 +674,54 @@ static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
sta_set_sinfo(sta, sinfo);
}
- rcu_read_unlock();
+ mutex_unlock(&local->sta_mtx);
return ret;
}
+static int ieee80211_set_channel(struct wiphy *wiphy,
+ struct net_device *netdev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = NULL;
+
+ if (netdev)
+ sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
+
+ switch (ieee80211_get_channel_mode(local, NULL)) {
+ case CHAN_MODE_HOPPING:
+ return -EBUSY;
+ case CHAN_MODE_FIXED:
+ if (local->oper_channel != chan ||
+ (!sdata && local->_oper_channel_type != channel_type))
+ return -EBUSY;
+ if (!sdata && local->_oper_channel_type == channel_type)
+ return 0;
+ break;
+ case CHAN_MODE_UNDEFINED:
+ break;
+ }
+
+ if (!ieee80211_set_channel_type(local, sdata, channel_type))
+ return -EBUSY;
+
+ local->oper_channel = chan;
+
+ /* auto-detects changes */
+ ieee80211_hw_config(local, 0);
+
+ return 0;
+}
+
+static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type)
+{
+ return ieee80211_set_channel(wiphy, NULL, chan, channel_type);
+}
+
static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
const u8 *resp, size_t resp_len)
{
@@ -788,6 +836,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
return -EALREADY;
+ err = ieee80211_set_channel(wiphy, dev, params->channel,
+ params->channel_type);
+ if (err)
+ return err;
+
/*
* Apply control port protocol, this allows us to
* not encrypt dynamic WEP control frames.
@@ -864,6 +917,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old, rcu_head);
+ sta_info_flush(sdata->local, sdata);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
return 0;
@@ -1482,7 +1536,7 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask))
conf->dot11MeshTTL = nconf->dot11MeshTTL;
if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
- conf->dot11MeshTTL = nconf->element_ttl;
+ conf->element_ttl = nconf->element_ttl;
if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
conf->auto_open_plinks = nconf->auto_open_plinks;
if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
@@ -1517,17 +1571,16 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
* announcements, so require this ifmsh to also be a root node
* */
if (nconf->dot11MeshGateAnnouncementProtocol &&
- !conf->dot11MeshHWMPRootMode) {
- conf->dot11MeshHWMPRootMode = 1;
+ !(conf->dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)) {
+ conf->dot11MeshHWMPRootMode = IEEE80211_PROACTIVE_RANN;
ieee80211_mesh_root_setup(ifmsh);
}
conf->dot11MeshGateAnnouncementProtocol =
nconf->dot11MeshGateAnnouncementProtocol;
}
- if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask)) {
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_RANN_INTERVAL, mask))
conf->dot11MeshHWMPRannInterval =
nconf->dot11MeshHWMPRannInterval;
- }
if (_chg_mesh_attr(NL80211_MESHCONF_FORWARDING, mask))
conf->dot11MeshForwarding = nconf->dot11MeshForwarding;
if (_chg_mesh_attr(NL80211_MESHCONF_RSSI_THRESHOLD, mask)) {
@@ -1543,6 +1596,15 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
sdata->vif.bss_conf.ht_operation_mode = nconf->ht_opmode;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
}
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT, mask))
+ conf->dot11MeshHWMPactivePathToRootTimeout =
+ nconf->dot11MeshHWMPactivePathToRootTimeout;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOT_INTERVAL, mask))
+ conf->dot11MeshHWMProotInterval =
+ nconf->dot11MeshHWMProotInterval;
+ if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL, mask))
+ conf->dot11MeshHWMPconfirmationInterval =
+ nconf->dot11MeshHWMPconfirmationInterval;
return 0;
}
@@ -1558,6 +1620,12 @@ static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev,
err = copy_mesh_setup(ifmsh, setup);
if (err)
return err;
+
+ err = ieee80211_set_channel(wiphy, dev, setup->channel,
+ setup->channel_type);
+ if (err)
+ return err;
+
ieee80211_start_mesh(sdata);
return 0;
@@ -1674,54 +1742,7 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
return -EINVAL;
}
- return 0;
-}
-
-static int ieee80211_set_channel(struct wiphy *wiphy,
- struct net_device *netdev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
-{
- struct ieee80211_local *local = wiphy_priv(wiphy);
- struct ieee80211_sub_if_data *sdata = NULL;
- struct ieee80211_channel *old_oper;
- enum nl80211_channel_type old_oper_type;
- enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT;
-
- if (netdev)
- sdata = IEEE80211_DEV_TO_SUB_IF(netdev);
-
- switch (ieee80211_get_channel_mode(local, NULL)) {
- case CHAN_MODE_HOPPING:
- return -EBUSY;
- case CHAN_MODE_FIXED:
- if (local->oper_channel != chan)
- return -EBUSY;
- if (!sdata && local->_oper_channel_type == channel_type)
- return 0;
- break;
- case CHAN_MODE_UNDEFINED:
- break;
- }
-
- if (sdata)
- old_vif_oper_type = sdata->vif.bss_conf.channel_type;
- old_oper_type = local->_oper_channel_type;
-
- if (!ieee80211_set_channel_type(local, sdata, channel_type))
- return -EBUSY;
-
- old_oper = local->oper_channel;
- local->oper_channel = chan;
-
- /* Update driver if changes were actually made. */
- if ((old_oper != local->oper_channel) ||
- (old_oper_type != local->_oper_channel_type))
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
-
- if (sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- old_vif_oper_type != sdata->vif.bss_conf.channel_type)
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
return 0;
}
@@ -1743,10 +1764,11 @@ static int ieee80211_resume(struct wiphy *wiphy)
#endif
static int ieee80211_scan(struct wiphy *wiphy,
- struct net_device *dev,
struct cfg80211_scan_request *req)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_WDEV_TO_SUB_IF(req->wdev);
switch (ieee80211_vif_type_p2p(&sdata->vif)) {
case NL80211_IFTYPE_STATION:
@@ -2111,143 +2133,291 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
return 0;
}
-static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local,
- struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type chantype,
- unsigned int duration, u64 *cookie)
+static int ieee80211_start_roc_work(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie,
+ struct sk_buff *txskb)
{
+ struct ieee80211_roc_work *roc, *tmp;
+ bool queued = false;
int ret;
- u32 random_cookie;
lockdep_assert_held(&local->mtx);
- if (local->hw_roc_cookie)
- return -EBUSY;
- /* must be nonzero */
- random_cookie = random32() | 1;
-
- *cookie = random_cookie;
- local->hw_roc_dev = dev;
- local->hw_roc_cookie = random_cookie;
- local->hw_roc_channel = chan;
- local->hw_roc_channel_type = chantype;
- local->hw_roc_duration = duration;
- ret = drv_remain_on_channel(local, chan, chantype, duration);
+ roc = kzalloc(sizeof(*roc), GFP_KERNEL);
+ if (!roc)
+ return -ENOMEM;
+
+ roc->chan = channel;
+ roc->chan_type = channel_type;
+ roc->duration = duration;
+ roc->req_duration = duration;
+ roc->frame = txskb;
+ roc->mgmt_tx_cookie = (unsigned long)txskb;
+ roc->sdata = sdata;
+ INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
+ INIT_LIST_HEAD(&roc->dependents);
+
+ /* if there's one pending or we're scanning, queue this one */
+ if (!list_empty(&local->roc_list) || local->scanning)
+ goto out_check_combine;
+
+ /* if not HW assist, just queue & schedule work */
+ if (!local->ops->remain_on_channel) {
+ ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
+ goto out_queue;
+ }
+
+ /* otherwise actually kick it off here (for error handling) */
+
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 10 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ if (!duration)
+ duration = 10;
+
+ ret = drv_remain_on_channel(local, channel, channel_type, duration);
if (ret) {
- local->hw_roc_channel = NULL;
- local->hw_roc_cookie = 0;
+ kfree(roc);
+ return ret;
}
- return ret;
+ roc->started = true;
+ goto out_queue;
+
+ out_check_combine:
+ list_for_each_entry(tmp, &local->roc_list, list) {
+ if (tmp->chan != channel || tmp->chan_type != channel_type)
+ continue;
+
+ /*
+ * Extend this ROC if possible:
+ *
+ * If it hasn't started yet, just increase the duration
+ * and add the new one to the list of dependents.
+ */
+ if (!tmp->started) {
+ list_add_tail(&roc->list, &tmp->dependents);
+ tmp->duration = max(tmp->duration, roc->duration);
+ queued = true;
+ break;
+ }
+
+ /* If it has already started, it's more difficult ... */
+ if (local->ops->remain_on_channel) {
+ unsigned long j = jiffies;
+
+ /*
+ * In the offloaded ROC case, if it hasn't begun, add
+ * this new one to the dependent list to be handled
+ * when the the master one begins. If it has begun,
+ * check that there's still a minimum time left and
+ * if so, start this one, transmitting the frame, but
+ * add it to the list directly after this one with a
+ * a reduced time so we'll ask the driver to execute
+ * it right after finishing the previous one, in the
+ * hope that it'll also be executed right afterwards,
+ * effectively extending the old one.
+ * If there's no minimum time left, just add it to the
+ * normal list.
+ */
+ if (!tmp->hw_begun) {
+ list_add_tail(&roc->list, &tmp->dependents);
+ queued = true;
+ break;
+ }
+
+ if (time_before(j + IEEE80211_ROC_MIN_LEFT,
+ tmp->hw_start_time +
+ msecs_to_jiffies(tmp->duration))) {
+ int new_dur;
+
+ ieee80211_handle_roc_started(roc);
+
+ new_dur = roc->duration -
+ jiffies_to_msecs(tmp->hw_start_time +
+ msecs_to_jiffies(
+ tmp->duration) -
+ j);
+
+ if (new_dur > 0) {
+ /* add right after tmp */
+ list_add(&roc->list, &tmp->list);
+ } else {
+ list_add_tail(&roc->list,
+ &tmp->dependents);
+ }
+ queued = true;
+ }
+ } else if (del_timer_sync(&tmp->work.timer)) {
+ unsigned long new_end;
+
+ /*
+ * In the software ROC case, cancel the timer, if
+ * that fails then the finish work is already
+ * queued/pending and thus we queue the new ROC
+ * normally, if that succeeds then we can extend
+ * the timer duration and TX the frame (if any.)
+ */
+
+ list_add_tail(&roc->list, &tmp->dependents);
+ queued = true;
+
+ new_end = jiffies + msecs_to_jiffies(roc->duration);
+
+ /* ok, it was started & we canceled timer */
+ if (time_after(new_end, tmp->work.timer.expires))
+ mod_timer(&tmp->work.timer, new_end);
+ else
+ add_timer(&tmp->work.timer);
+
+ ieee80211_handle_roc_started(roc);
+ }
+ break;
+ }
+
+ out_queue:
+ if (!queued)
+ list_add_tail(&roc->list, &local->roc_list);
+
+ /*
+ * cookie is either the roc (for normal roc)
+ * or the SKB (for mgmt TX)
+ */
+ if (txskb)
+ *cookie = (unsigned long)txskb;
+ else
+ *cookie = (unsigned long)roc;
+
+ return 0;
}
static int ieee80211_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration,
u64 *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
+ int ret;
- if (local->ops->remain_on_channel) {
- int ret;
-
- mutex_lock(&local->mtx);
- ret = ieee80211_remain_on_channel_hw(local, dev,
- chan, channel_type,
- duration, cookie);
- local->hw_roc_for_tx = false;
- mutex_unlock(&local->mtx);
-
- return ret;
- }
+ mutex_lock(&local->mtx);
+ ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+ duration, cookie, NULL);
+ mutex_unlock(&local->mtx);
- return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
- duration, cookie);
+ return ret;
}
-static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local,
- u64 cookie)
+static int ieee80211_cancel_roc(struct ieee80211_local *local,
+ u64 cookie, bool mgmt_tx)
{
+ struct ieee80211_roc_work *roc, *tmp, *found = NULL;
int ret;
- lockdep_assert_held(&local->mtx);
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ struct ieee80211_roc_work *dep, *tmp2;
- if (local->hw_roc_cookie != cookie)
- return -ENOENT;
+ list_for_each_entry_safe(dep, tmp2, &roc->dependents, list) {
+ if (!mgmt_tx && (unsigned long)dep != cookie)
+ continue;
+ else if (mgmt_tx && dep->mgmt_tx_cookie != cookie)
+ continue;
+ /* found dependent item -- just remove it */
+ list_del(&dep->list);
+ mutex_unlock(&local->mtx);
- ret = drv_cancel_remain_on_channel(local);
- if (ret)
- return ret;
+ ieee80211_roc_notify_destroy(dep);
+ return 0;
+ }
- local->hw_roc_cookie = 0;
- local->hw_roc_channel = NULL;
+ if (!mgmt_tx && (unsigned long)roc != cookie)
+ continue;
+ else if (mgmt_tx && roc->mgmt_tx_cookie != cookie)
+ continue;
+
+ found = roc;
+ break;
+ }
- ieee80211_recalc_idle(local);
+ if (!found) {
+ mutex_unlock(&local->mtx);
+ return -ENOENT;
+ }
- return 0;
-}
+ /*
+ * We found the item to cancel, so do that. Note that it
+ * may have dependents, which we also cancel (and send
+ * the expired signal for.) Not doing so would be quite
+ * tricky here, but we may need to fix it later.
+ */
-static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
- struct net_device *dev,
- u64 cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
+ if (local->ops->remain_on_channel) {
+ if (found->started) {
+ ret = drv_cancel_remain_on_channel(local);
+ if (WARN_ON_ONCE(ret)) {
+ mutex_unlock(&local->mtx);
+ return ret;
+ }
+ }
- if (local->ops->cancel_remain_on_channel) {
- int ret;
+ list_del(&found->list);
- mutex_lock(&local->mtx);
- ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
+ if (found->started)
+ ieee80211_start_next_roc(local);
mutex_unlock(&local->mtx);
- return ret;
+ ieee80211_roc_notify_destroy(found);
+ } else {
+ /* work may be pending so use it all the time */
+ found->abort = true;
+ ieee80211_queue_delayed_work(&local->hw, &found->work, 0);
+
+ mutex_unlock(&local->mtx);
+
+ /* work will clean up etc */
+ flush_delayed_work(&found->work);
}
- return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
+ return 0;
}
-static enum work_done_result
-ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb)
+static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ u64 cookie)
{
- /*
- * Use the data embedded in the work struct for reporting
- * here so if the driver mangled the SKB before dropping
- * it (which is the only way we really should get here)
- * then we don't report mangled data.
- *
- * If there was no wait time, then by the time we get here
- * the driver will likely not have reported the status yet,
- * so in that case userspace will have to deal with it.
- */
-
- if (wk->offchan_tx.wait && !wk->offchan_tx.status)
- cfg80211_mgmt_tx_status(wk->sdata->dev,
- (unsigned long) wk->offchan_tx.frame,
- wk->data, wk->data_len, false, GFP_KERNEL);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct ieee80211_local *local = sdata->local;
- return WORK_DONE_DESTROY;
+ return ieee80211_cancel_roc(local, cookie, false);
}
-static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
+static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct sta_info *sta;
- struct ieee80211_work *wk;
const struct ieee80211_mgmt *mgmt = (void *)buf;
+ bool need_offchan = false;
u32 flags;
- bool is_offchan = false;
+ int ret;
if (dont_wait_for_ack)
flags = IEEE80211_TX_CTL_NO_ACK;
@@ -2255,33 +2425,28 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
IEEE80211_TX_CTL_REQ_TX_STATUS;
- /* Check that we are on the requested channel for transmission */
- if (chan != local->tmp_channel &&
- chan != local->oper_channel)
- is_offchan = true;
- if (channel_type_valid &&
- (channel_type != local->tmp_channel_type &&
- channel_type != local->_oper_channel_type))
- is_offchan = true;
-
- if (chan == local->hw_roc_channel) {
- /* TODO: check channel type? */
- is_offchan = false;
- flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
- }
-
if (no_cck)
flags |= IEEE80211_TX_CTL_NO_CCK_RATE;
- if (is_offchan && !offchan)
- return -EBUSY;
-
switch (sdata->vif.type) {
case NL80211_IFTYPE_ADHOC:
+ if (!sdata->vif.bss_conf.ibss_joined)
+ need_offchan = true;
+ /* fall through */
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ !sdata->u.mesh.mesh_id_len)
+ need_offchan = true;
+ /* fall through */
+#endif
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_P2P_GO:
- case NL80211_IFTYPE_MESH_POINT:
+ if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
+ !ieee80211_vif_is_mesh(&sdata->vif) &&
+ !rcu_access_pointer(sdata->bss->beacon))
+ need_offchan = true;
if (!ieee80211_is_action(mgmt->frame_control) ||
mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
break;
@@ -2293,167 +2458,101 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ if (!sdata->u.mgd.associated)
+ need_offchan = true;
break;
default:
return -EOPNOTSUPP;
}
+ mutex_lock(&local->mtx);
+
+ /* Check if the operating channel is the requested channel */
+ if (!need_offchan) {
+ need_offchan = chan != local->oper_channel;
+ if (channel_type_valid &&
+ channel_type != local->_oper_channel_type)
+ need_offchan = true;
+ }
+
+ if (need_offchan && !offchan) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
skb_reserve(skb, local->hw.extra_tx_headroom);
memcpy(skb_put(skb, len), buf, len);
IEEE80211_SKB_CB(skb)->flags = flags;
- if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- IEEE80211_SKB_CB(skb)->hw_queue =
- local->hw.offchannel_tx_hw_queue;
-
skb->dev = sdata->dev;
- *cookie = (unsigned long) skb;
-
- if (is_offchan && local->ops->remain_on_channel) {
- unsigned int duration;
- int ret;
-
- mutex_lock(&local->mtx);
- /*
- * If the duration is zero, then the driver
- * wouldn't actually do anything. Set it to
- * 100 for now.
- *
- * TODO: cancel the off-channel operation
- * when we get the SKB's TX status and
- * the wait time was zero before.
- */
- duration = 100;
- if (wait)
- duration = wait;
- ret = ieee80211_remain_on_channel_hw(local, dev, chan,
- channel_type,
- duration, cookie);
- if (ret) {
- kfree_skb(skb);
- mutex_unlock(&local->mtx);
- return ret;
- }
-
- local->hw_roc_for_tx = true;
- local->hw_roc_duration = wait;
-
- /*
- * queue up frame for transmission after
- * ieee80211_ready_on_channel call
- */
+ if (!need_offchan) {
+ *cookie = (unsigned long) skb;
+ ieee80211_tx_skb(sdata, skb);
+ ret = 0;
+ goto out_unlock;
+ }
- /* modify cookie to prevent API mismatches */
- *cookie ^= 2;
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
+ if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
IEEE80211_SKB_CB(skb)->hw_queue =
local->hw.offchannel_tx_hw_queue;
- local->hw_roc_skb = skb;
- local->hw_roc_skb_for_status = skb;
- mutex_unlock(&local->mtx);
-
- return 0;
- }
-
- /*
- * Can transmit right away if the channel was the
- * right one and there's no wait involved... If a
- * wait is involved, we might otherwise not be on
- * the right channel for long enough!
- */
- if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) {
- ieee80211_tx_skb(sdata, skb);
- return 0;
- }
- wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL);
- if (!wk) {
+ /* This will handle all kinds of coalescing and immediate TX */
+ ret = ieee80211_start_roc_work(local, sdata, chan, channel_type,
+ wait, cookie, skb);
+ if (ret)
kfree_skb(skb);
- return -ENOMEM;
- }
-
- wk->type = IEEE80211_WORK_OFFCHANNEL_TX;
- wk->chan = chan;
- wk->chan_type = channel_type;
- wk->sdata = sdata;
- wk->done = ieee80211_offchan_tx_done;
- wk->offchan_tx.frame = skb;
- wk->offchan_tx.wait = wait;
- wk->data_len = len;
- memcpy(wk->data, buf, len);
-
- ieee80211_add_work(wk);
- return 0;
+ out_unlock:
+ mutex_unlock(&local->mtx);
+ return ret;
}
static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u64 cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk;
- int ret = -ENOENT;
-
- mutex_lock(&local->mtx);
-
- if (local->ops->cancel_remain_on_channel) {
- cookie ^= 2;
- ret = ieee80211_cancel_remain_on_channel_hw(local, cookie);
-
- if (ret == 0) {
- kfree_skb(local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- local->hw_roc_skb_for_status = NULL;
- }
-
- mutex_unlock(&local->mtx);
-
- return ret;
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
-
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
-
- if (cookie != (unsigned long) wk->offchan_tx.frame)
- continue;
-
- wk->timeout = jiffies;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
- ret = 0;
- break;
- }
- mutex_unlock(&local->mtx);
+ struct ieee80211_local *local = wiphy_priv(wiphy);
- return ret;
+ return ieee80211_cancel_roc(local, cookie, true);
}
static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
- struct net_device *dev,
+ struct wireless_dev *wdev,
u16 frame_type, bool reg)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ))
- return;
+ switch (frame_type) {
+ case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH:
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
- if (reg)
- local->probe_req_reg++;
- else
- local->probe_req_reg--;
+ if (reg)
+ ifibss->auth_frame_registrations++;
+ else
+ ifibss->auth_frame_registrations--;
+ }
+ break;
+ case IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ:
+ if (reg)
+ local->probe_req_reg++;
+ else
+ local->probe_req_reg--;
- ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+ ieee80211_queue_work(&local->hw, &local->reconfig_filter);
+ break;
+ default:
+ break;
+ }
}
static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
@@ -2573,8 +2672,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_req.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_RESPONSE:
@@ -2587,8 +2686,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
tf->u.setup_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
case WLAN_TDLS_SETUP_CONFIRM:
@@ -2648,8 +2747,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
mgmt->u.action.u.tdls_discover_resp.capability =
cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
- ieee80211_add_srates_ie(&sdata->vif, skb, false);
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
+ ieee80211_add_srates_ie(sdata, skb, false);
+ ieee80211_add_ext_srates_ie(sdata, skb, false);
ieee80211_tdls_add_ext_capab(skb);
break;
default:
@@ -2679,9 +2778,8 @@ static int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
!sdata->u.mgd.associated)
return -EINVAL;
-#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
- printk(KERN_DEBUG "TDLS mgmt action %d peer %pM\n", action_code, peer);
-#endif
+ tdls_dbg(sdata, "TDLS mgmt action %d peer %pM\n",
+ action_code, peer);
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
max(sizeof(struct ieee80211_mgmt),
@@ -2790,9 +2888,7 @@ static int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EINVAL;
-#ifdef CONFIG_MAC80211_VERBOSE_TDLS_DEBUG
- printk(KERN_DEBUG "TDLS oper %d peer %pM\n", oper, peer);
-#endif
+ tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
switch (oper) {
case NL80211_TDLS_ENABLE_LINK:
@@ -2889,8 +2985,8 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
}
static struct ieee80211_channel *
-ieee80211_wiphy_get_channel(struct wiphy *wiphy,
- enum nl80211_channel_type *type)
+ieee80211_cfg_get_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
+ enum nl80211_channel_type *type)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -2936,7 +3032,7 @@ struct cfg80211_ops mac80211_config_ops = {
#endif
.change_bss = ieee80211_change_bss,
.set_txq_params = ieee80211_set_txq_params,
- .set_channel = ieee80211_set_channel,
+ .set_monitor_channel = ieee80211_set_monitor_channel,
.suspend = ieee80211_suspend,
.resume = ieee80211_resume,
.scan = ieee80211_scan,
@@ -2971,7 +3067,6 @@ struct cfg80211_ops mac80211_config_ops = {
.tdls_oper = ieee80211_tdls_oper,
.tdls_mgmt = ieee80211_tdls_mgmt,
.probe_client = ieee80211_probe_client,
- .get_channel = ieee80211_wiphy_get_channel,
.set_noack_map = ieee80211_set_noack_map,
#ifdef CONFIG_PM
.set_wakeup = ieee80211_set_wakeup,
@@ -2979,4 +3074,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_et_sset_count = ieee80211_get_et_sset_count,
.get_et_stats = ieee80211_get_et_stats,
.get_et_strings = ieee80211_get_et_strings,
+ .get_channel = ieee80211_cfg_get_channel,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index c76cf7230c7d..f0f87e5a1d35 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -41,6 +41,10 @@ __ieee80211_get_channel_mode(struct ieee80211_local *local,
if (!sdata->u.ap.beacon)
continue;
break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (!sdata->wdev.mesh_id_len)
+ continue;
+ break;
default:
break;
}
diff --git a/net/mac80211/debug.h b/net/mac80211/debug.h
new file mode 100644
index 000000000000..8f383a576016
--- /dev/null
+++ b/net/mac80211/debug.h
@@ -0,0 +1,170 @@
+#ifndef __MAC80211_DEBUG_H
+#define __MAC80211_DEBUG_H
+#include <net/cfg80211.h>
+
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
+#define MAC80211_IBSS_DEBUG 1
+#else
+#define MAC80211_IBSS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_PS_DEBUG
+#define MAC80211_PS_DEBUG 1
+#else
+#define MAC80211_PS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_HT_DEBUG
+#define MAC80211_HT_DEBUG 1
+#else
+#define MAC80211_HT_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MPL_DEBUG
+#define MAC80211_MPL_DEBUG 1
+#else
+#define MAC80211_MPL_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MPATH_DEBUG
+#define MAC80211_MPATH_DEBUG 1
+#else
+#define MAC80211_MPATH_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MHWMP_DEBUG
+#define MAC80211_MHWMP_DEBUG 1
+#else
+#define MAC80211_MHWMP_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MESH_SYNC_DEBUG
+#define MAC80211_MESH_SYNC_DEBUG 1
+#else
+#define MAC80211_MESH_SYNC_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_TDLS_DEBUG
+#define MAC80211_TDLS_DEBUG 1
+#else
+#define MAC80211_TDLS_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_STA_DEBUG
+#define MAC80211_STA_DEBUG 1
+#else
+#define MAC80211_STA_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MLME_DEBUG
+#define MAC80211_MLME_DEBUG 1
+#else
+#define MAC80211_MLME_DEBUG 0
+#endif
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+void __sdata_info(const char *fmt, ...) __printf(1, 2);
+void __sdata_dbg(bool print, const char *fmt, ...) __printf(2, 3);
+void __sdata_err(const char *fmt, ...) __printf(1, 2);
+void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
+ __printf(3, 4);
+
+#define _sdata_info(sdata, fmt, ...) \
+ __sdata_info("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _sdata_dbg(print, sdata, fmt, ...) \
+ __sdata_dbg(print, "%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _sdata_err(sdata, fmt, ...) \
+ __sdata_err("%s: " fmt, (sdata)->name, ##__VA_ARGS__)
+#define _wiphy_dbg(print, wiphy, fmt, ...) \
+ __wiphy_dbg(wiphy, print, fmt, ##__VA_ARGS__)
+#else
+#define _sdata_info(sdata, fmt, ...) \
+do { \
+ pr_info("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _sdata_dbg(print, sdata, fmt, ...) \
+do { \
+ if (print) \
+ pr_debug("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _sdata_err(sdata, fmt, ...) \
+do { \
+ pr_err("%s: " fmt, \
+ (sdata)->name, ##__VA_ARGS__); \
+} while (0)
+
+#define _wiphy_dbg(print, wiphy, fmt, ...) \
+do { \
+ if (print) \
+ wiphy_dbg((wiphy), fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define sdata_info(sdata, fmt, ...) \
+ _sdata_info(sdata, fmt, ##__VA_ARGS__)
+#define sdata_err(sdata, fmt, ...) \
+ _sdata_err(sdata, fmt, ##__VA_ARGS__)
+#define sdata_dbg(sdata, fmt, ...) \
+ _sdata_dbg(1, sdata, fmt, ##__VA_ARGS__)
+
+#define ht_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_HT_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ht_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_HT_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ibss_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_IBSS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ps_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_PS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define ps_dbg_hw(hw, fmt, ...) \
+ _wiphy_dbg(MAC80211_PS_DEBUG, \
+ (hw)->wiphy, fmt, ##__VA_ARGS__)
+
+#define ps_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_PS_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mpl_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MPL_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mpath_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MPATH_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mhwmp_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MHWMP_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define msync_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MESH_SYNC_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define tdls_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_TDLS_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define sta_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_STA_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mlme_dbg(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MLME_DEBUG, \
+ sdata, fmt, ##__VA_ARGS__)
+
+#define mlme_dbg_ratelimited(sdata, fmt, ...) \
+ _sdata_dbg(MAC80211_MLME_DEBUG && net_ratelimit(), \
+ sdata, fmt, ##__VA_ARGS__)
+
+#endif /* __MAC80211_DEBUG_H */
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 778e5916d7c3..b8dfb440c8ef 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -325,8 +325,6 @@ void debugfs_hw_add(struct ieee80211_local *local)
local->rx_handlers_drop_defrag);
DEBUGFS_STATS_ADD(rx_handlers_drop_short,
local->rx_handlers_drop_short);
- DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
- local->rx_handlers_drop_passive_scan);
DEBUGFS_STATS_ADD(tx_expand_skb_head,
local->tx_expand_skb_head);
DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 7932767bb482..090d08ff22c4 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -283,6 +283,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&sdata->local->key_mtx);
+ if (sdata->debugfs.default_unicast_key) {
+ debugfs_remove(sdata->debugfs.default_unicast_key);
+ sdata->debugfs.default_unicast_key = NULL;
+ }
+
if (sdata->default_unicast_key) {
key = key_mtx_dereference(sdata->local,
sdata->default_unicast_key);
@@ -290,9 +295,11 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.default_unicast_key =
debugfs_create_symlink("default_unicast_key",
sdata->debugfs.dir, buf);
- } else {
- debugfs_remove(sdata->debugfs.default_unicast_key);
- sdata->debugfs.default_unicast_key = NULL;
+ }
+
+ if (sdata->debugfs.default_multicast_key) {
+ debugfs_remove(sdata->debugfs.default_multicast_key);
+ sdata->debugfs.default_multicast_key = NULL;
}
if (sdata->default_multicast_key) {
@@ -302,9 +309,6 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.default_multicast_key =
debugfs_create_symlink("default_multicast_key",
sdata->debugfs.dir, buf);
- } else {
- debugfs_remove(sdata->debugfs.default_multicast_key);
- sdata->debugfs.default_multicast_key = NULL;
}
}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 7ed433c66d68..6d5aec9418ee 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -468,48 +468,54 @@ IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_congestion,
- u.mesh.mshstats.dropped_frames_congestion, DEC);
+ u.mesh.mshstats.dropped_frames_congestion, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
- u.mesh.mshstats.dropped_frames_no_route, DEC);
+ u.mesh.mshstats.dropped_frames_no_route, DEC);
IEEE80211_IF_FILE(estab_plinks, u.mesh.mshstats.estab_plinks, ATOMIC);
/* Mesh parameters */
IEEE80211_IF_FILE(dot11MeshMaxRetries,
- u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
+ u.mesh.mshcfg.dot11MeshMaxRetries, DEC);
IEEE80211_IF_FILE(dot11MeshRetryTimeout,
- u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshRetryTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshConfirmTimeout,
- u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshConfirmTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHoldingTimeout,
- u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshHoldingTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshTTL, u.mesh.mshcfg.dot11MeshTTL, DEC);
IEEE80211_IF_FILE(element_ttl, u.mesh.mshcfg.element_ttl, DEC);
IEEE80211_IF_FILE(auto_open_plinks, u.mesh.mshcfg.auto_open_plinks, DEC);
IEEE80211_IF_FILE(dot11MeshMaxPeerLinks,
- u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
+ u.mesh.mshcfg.dot11MeshMaxPeerLinks, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPactivePathTimeout,
- u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPactivePathTimeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPpreqMinInterval,
- u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPpreqMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPperrMinInterval,
- u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPperrMinInterval, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPnetDiameterTraversalTime,
- u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPmaxPREQretries,
- u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries, DEC);
IEEE80211_IF_FILE(path_refresh_time,
- u.mesh.mshcfg.path_refresh_time, DEC);
+ u.mesh.mshcfg.path_refresh_time, DEC);
IEEE80211_IF_FILE(min_discovery_timeout,
- u.mesh.mshcfg.min_discovery_timeout, DEC);
+ u.mesh.mshcfg.min_discovery_timeout, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
- u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPRootMode, DEC);
IEEE80211_IF_FILE(dot11MeshGateAnnouncementProtocol,
- u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
+ u.mesh.mshcfg.dot11MeshGateAnnouncementProtocol, DEC);
IEEE80211_IF_FILE(dot11MeshHWMPRannInterval,
- u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
+ u.mesh.mshcfg.dot11MeshHWMPRannInterval, DEC);
IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
IEEE80211_IF_FILE(ht_opmode, u.mesh.mshcfg.ht_opmode, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPactivePathToRootTimeout,
+ u.mesh.mshcfg.dot11MeshHWMPactivePathToRootTimeout, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMProotInterval,
+ u.mesh.mshcfg.dot11MeshHWMProotInterval, DEC);
+IEEE80211_IF_FILE(dot11MeshHWMPconfirmationInterval,
+ u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval, DEC);
#endif
#define DEBUGFS_ADD_MODE(name, mode) \
@@ -607,9 +613,13 @@ static void add_mesh_config(struct ieee80211_sub_if_data *sdata)
MESHPARAMS_ADD(min_discovery_timeout);
MESHPARAMS_ADD(dot11MeshHWMPRootMode);
MESHPARAMS_ADD(dot11MeshHWMPRannInterval);
+ MESHPARAMS_ADD(dot11MeshForwarding);
MESHPARAMS_ADD(dot11MeshGateAnnouncementProtocol);
MESHPARAMS_ADD(rssi_threshold);
MESHPARAMS_ADD(ht_opmode);
+ MESHPARAMS_ADD(dot11MeshHWMPactivePathToRootTimeout);
+ MESHPARAMS_ADD(dot11MeshHWMProotInterval);
+ MESHPARAMS_ADD(dot11MeshHWMPconfirmationInterval);
#undef MESHPARAMS_ADD
}
#endif
@@ -685,6 +695,7 @@ void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
- printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
- "dir to %s\n", buf);
+ sdata_err(sdata,
+ "debugfs: failed to rename debugfs dir to %s\n",
+ buf);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 6d33a0c743ab..df9203199102 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -3,7 +3,7 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
-#include "driver-trace.h"
+#include "trace.h"
static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
{
@@ -27,14 +27,6 @@ static inline void drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
local->ops->tx(&local->hw, skb);
}
-static inline void drv_tx_frags(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct sk_buff_head *skbs)
-{
- local->ops->tx_frags(&local->hw, vif, sta, skbs);
-}
-
static inline void drv_get_et_strings(struct ieee80211_sub_if_data *sdata,
u32 sset, u8 *data)
{
@@ -845,4 +837,33 @@ drv_allow_buffered_frames(struct ieee80211_local *local,
more_data);
trace_drv_return_void(local);
}
+
+static inline int drv_get_rssi(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta,
+ s8 *rssi_dbm)
+{
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->get_rssi(&local->hw, &sdata->vif, sta, rssi_dbm);
+ trace_drv_get_rssi(local, sta, *rssi_dbm, ret);
+
+ return ret;
+}
+
+static inline void drv_mgd_prepare_tx(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+
+ check_sdata_in_driver(sdata);
+ WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION);
+
+ trace_drv_mgd_prepare_tx(local, sdata);
+ if (local->ops->mgd_prepare_tx)
+ local->ops->mgd_prepare_tx(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c
deleted file mode 100644
index 8ed8711b1a6d..000000000000
--- a/net/mac80211/driver-trace.c
+++ /dev/null
@@ -1,9 +0,0 @@
-/* bug in tracepoint.h, it should include this */
-#include <linux/module.h>
-
-/* sparse isn't too happy with all macros... */
-#ifndef __CHECKER__
-#include "driver-ops.h"
-#define CREATE_TRACE_POINTS
-#include "driver-trace.h"
-#endif
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 6f8615c54b22..4b4538d63925 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -305,12 +305,10 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
tid = (params & IEEE80211_DELBA_PARAM_TID_MASK) >> 12;
initiator = (params & IEEE80211_DELBA_PARAM_INITIATOR_MASK) >> 11;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- net_dbg_ratelimited("delba from %pM (%s) tid %d reason code %d\n",
- mgmt->sa, initiator ? "initiator" : "recipient",
- tid,
- le16_to_cpu(mgmt->u.action.u.delba.reason_code));
-#endif /* CONFIG_MAC80211_HT_DEBUG */
+ ht_dbg_ratelimited(sdata, "delba from %pM (%s) tid %d reason code %d\n",
+ mgmt->sa, initiator ? "initiator" : "recipient",
+ tid,
+ le16_to_cpu(mgmt->u.action.u.delba.reason_code));
if (initiator == WLAN_BACK_INITIATOR)
__ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 33d9d0c3e3d0..5746d62faba1 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -82,8 +82,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
local->oper_channel = chan;
channel_type = ifibss->channel_type;
- if (channel_type > NL80211_CHAN_HT20 &&
- !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
+ if (!cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
channel_type = NL80211_CHAN_HT20;
if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
/* can only fail due to HT40+/- mismatch */
@@ -262,11 +261,7 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
memcpy(addr, sta->sta.addr, ETH_ALEN);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(sdata->local->hw.wiphy,
- "Adding new IBSS station %pM (dev=%s)\n",
- addr, sdata->name);
-#endif
+ ibss_dbg(sdata, "Adding new IBSS station %pM\n", addr);
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -280,12 +275,10 @@ static struct sta_info *ieee80211_ibss_finish_sta(struct sta_info *sta,
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
return sta_info_get(sdata, addr);
- if (auth) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "TX Auth SA=%pM DA=%pM BSSID=%pM"
- "(auth_transaction=1)\n", sdata->vif.addr,
- sdata->u.ibss.bssid, addr);
-#endif
+ if (auth && !sdata->u.ibss.auth_frame_registrations) {
+ ibss_dbg(sdata,
+ "TX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=1)\n",
+ sdata->vif.addr, sdata->u.ibss.bssid, addr);
ieee80211_send_auth(sdata, 1, WLAN_AUTH_OPEN, NULL, 0,
addr, sdata->u.ibss.bssid, NULL, 0, 0);
}
@@ -308,7 +301,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
sdata->name, addr);
rcu_read_lock();
return NULL;
@@ -355,11 +348,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: RX Auth SA=%pM DA=%pM BSSID=%pM."
- "(auth_transaction=%d)\n",
- sdata->name, mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
-#endif
+ ibss_dbg(sdata,
+ "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
+ mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
sta_info_destroy_addr(sdata, mgmt->sa);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 0, false);
rcu_read_unlock();
@@ -422,15 +413,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_mandatory_rates(local, band);
if (sta->sta.supp_rates[band] != prev_rates) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG
- "%s: updated supp_rates set "
- "for %pM based on beacon"
- "/probe_resp (0x%x -> 0x%x)\n",
- sdata->name, sta->sta.addr,
- prev_rates,
- sta->sta.supp_rates[band]);
-#endif
+ ibss_dbg(sdata,
+ "updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
+ sta->sta.addr, prev_rates,
+ sta->sta.supp_rates[band]);
rates_updated = true;
}
} else {
@@ -545,22 +531,18 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rx_timestamp = drv_get_tsf(local, sdata);
}
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "RX beacon SA=%pM BSSID="
- "%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
- mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
- (unsigned long long)beacon_timestamp,
- (unsigned long long)(rx_timestamp - beacon_timestamp),
- jiffies);
-#endif
+ ibss_dbg(sdata,
+ "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ mgmt->sa, mgmt->bssid,
+ (unsigned long long)rx_timestamp,
+ (unsigned long long)beacon_timestamp,
+ (unsigned long long)(rx_timestamp - beacon_timestamp),
+ jiffies);
if (beacon_timestamp > rx_timestamp) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: beacon TSF higher than "
- "local TSF - IBSS merge with BSSID %pM\n",
- sdata->name, mgmt->bssid);
-#endif
+ ibss_dbg(sdata,
+ "beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
+ mgmt->bssid);
ieee80211_sta_join_ibss(sdata, bss);
supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
@@ -586,7 +568,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
- net_dbg_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
+ net_info_ratelimited("%s: No room for a new IBSS STA entry %pM\n",
sdata->name, addr);
return;
}
@@ -662,8 +644,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
if (ifibss->fixed_channel)
return;
- printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
- "IBSS networks with same SSID (merge)\n", sdata->name);
+ sdata_info(sdata,
+ "No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
ieee80211_request_internal_scan(sdata,
ifibss->ssid, ifibss->ssid_len, NULL);
@@ -691,8 +673,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
bssid[0] |= 0x02;
}
- printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
- sdata->name, bssid);
+ sdata_info(sdata, "Creating new IBSS network, BSSID %pM\n", bssid);
capability = WLAN_CAPABILITY_IBSS;
@@ -723,10 +704,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&ifibss->mtx);
active_ibss = ieee80211_sta_active_ibss(sdata);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
- sdata->name, active_ibss);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "sta_find_ibss (active_ibss=%d)\n", active_ibss);
if (active_ibss)
return;
@@ -749,29 +727,24 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
struct ieee80211_bss *bss;
bss = (void *)cbss->priv;
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
- "%pM\n", cbss->bssid, ifibss->bssid);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
-
- printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
- " based on configured SSID\n",
- sdata->name, cbss->bssid);
+ ibss_dbg(sdata,
+ "sta_find_ibss: selected %pM current %pM\n",
+ cbss->bssid, ifibss->bssid);
+ sdata_info(sdata,
+ "Selected IBSS BSSID %pM based on configured SSID\n",
+ cbss->bssid);
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_rx_bss_put(local, bss);
return;
}
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG " did not try to join ibss\n");
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
/* Selected IBSS not found in current scan results - try to scan */
if (time_after(jiffies, ifibss->last_scan_completed +
IEEE80211_SCAN_INTERVAL)) {
- printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
- "join\n", sdata->name);
+ sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
ieee80211_request_internal_scan(sdata,
ifibss->ssid, ifibss->ssid_len,
@@ -785,9 +758,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
ieee80211_sta_create_ibss(sdata);
return;
}
- printk(KERN_DEBUG "%s: IBSS not allowed on"
- " %d MHz\n", sdata->name,
- local->hw.conf.channel->center_freq);
+ sdata_info(sdata, "IBSS not allowed on %d MHz\n",
+ local->hw.conf.channel->center_freq);
/* No IBSS found - decrease scan interval and continue
* scanning. */
@@ -822,12 +794,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
tx_last_beacon = drv_tx_last_beacon(local);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
- " (tx_last_beacon=%d)\n",
- sdata->name, mgmt->sa, mgmt->da,
- mgmt->bssid, tx_last_beacon);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata,
+ "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
@@ -840,11 +809,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
pos = mgmt->u.probe_req.variable;
if (pos[0] != WLAN_EID_SSID ||
pos + 2 + pos[1] > end) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
- "from %pM\n",
- sdata->name, mgmt->sa);
-#endif
+ ibss_dbg(sdata, "Invalid SSID IE in ProbeReq from %pM\n",
+ mgmt->sa);
return;
}
if (pos[1] != 0 &&
@@ -861,10 +827,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
resp = (struct ieee80211_mgmt *) skb->data;
memcpy(resp->da, mgmt->sa, ETH_ALEN);
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
- sdata->name, resp->da);
-#endif /* CONFIG_MAC80211_IBSS_DEBUG */
+ ibss_dbg(sdata, "Sending ProbeResp to %pM\n", resp->da);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3f3cd50fff16..bb61f7718c4c 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -30,6 +30,7 @@
#include <net/mac80211.h>
#include "key.h"
#include "sta_info.h"
+#include "debug.h"
struct ieee80211_local;
@@ -55,11 +56,14 @@ struct ieee80211_local;
#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+/*
+ * Some APs experience problems when working with U-APSD. Decrease the
+ * probability of that happening by using legacy mode for all ACs but VO.
+ * The AP that caused us trouble was a Cisco 4410N. It ignores our
+ * setting, and always treats non-VO ACs as legacy.
+ */
#define IEEE80211_DEFAULT_UAPSD_QUEUES \
- (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
- IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VO
#define IEEE80211_DEFAULT_MAX_SP_LEN \
IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -81,6 +85,8 @@ struct ieee80211_bss {
size_t ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u32 device_ts;
+
u8 dtim_period;
bool wmm_used;
@@ -203,7 +209,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* enum ieee80211_packet_rx_flags - packet RX flags
* @IEEE80211_RX_RA_MATCH: frame is destined to interface currently processed
* (incl. multicast frames)
- * @IEEE80211_RX_IN_SCAN: received while scanning
* @IEEE80211_RX_FRAGMENTED: fragmented frame
* @IEEE80211_RX_AMSDU: a-MSDU packet
* @IEEE80211_RX_MALFORMED_ACTION_FRM: action frame is malformed
@@ -213,7 +218,6 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
* @rx_flags field of &struct ieee80211_rx_status.
*/
enum ieee80211_packet_rx_flags {
- IEEE80211_RX_IN_SCAN = BIT(0),
IEEE80211_RX_RA_MATCH = BIT(1),
IEEE80211_RX_FRAGMENTED = BIT(2),
IEEE80211_RX_AMSDU = BIT(3),
@@ -317,55 +321,30 @@ struct mesh_preq_queue {
u8 flags;
};
-enum ieee80211_work_type {
- IEEE80211_WORK_ABORT,
- IEEE80211_WORK_REMAIN_ON_CHANNEL,
- IEEE80211_WORK_OFFCHANNEL_TX,
-};
-
-/**
- * enum work_done_result - indicates what to do after work was done
- *
- * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
- * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
- * should be requeued.
- */
-enum work_done_result {
- WORK_DONE_DESTROY,
- WORK_DONE_REQUEUE,
-};
+#if HZ/100 == 0
+#define IEEE80211_ROC_MIN_LEFT 1
+#else
+#define IEEE80211_ROC_MIN_LEFT (HZ/100)
+#endif
-struct ieee80211_work {
+struct ieee80211_roc_work {
struct list_head list;
+ struct list_head dependents;
- struct rcu_head rcu_head;
+ struct delayed_work work;
struct ieee80211_sub_if_data *sdata;
- enum work_done_result (*done)(struct ieee80211_work *wk,
- struct sk_buff *skb);
-
struct ieee80211_channel *chan;
enum nl80211_channel_type chan_type;
- unsigned long timeout;
- enum ieee80211_work_type type;
+ bool started, abort, hw_begun, notified;
- bool started;
+ unsigned long hw_start_time;
- union {
- struct {
- u32 duration;
- } remain;
- struct {
- struct sk_buff *frame;
- u32 wait;
- bool status;
- } offchan_tx;
- };
-
- size_t data_len;
- u8 data[];
+ u32 duration, req_duration;
+ struct sk_buff *frame;
+ u64 mgmt_tx_cookie;
};
/* flags used in struct ieee80211_if_managed.flags */
@@ -399,7 +378,6 @@ struct ieee80211_mgd_auth_data {
struct ieee80211_mgd_assoc_data {
struct cfg80211_bss *bss;
const u8 *supp_rates;
- const u8 *ht_operation_ie;
unsigned long timeout;
int tries;
@@ -414,6 +392,8 @@ struct ieee80211_mgd_assoc_data {
bool sent_assoc;
bool synced;
+ u8 ap_ht_param;
+
size_t ie_len;
u8 ie[];
};
@@ -532,6 +512,7 @@ struct ieee80211_if_ibss {
bool privacy;
bool control_port;
+ unsigned int auth_frame_registrations;
u8 bssid[ETH_ALEN] __aligned(2);
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -701,6 +682,9 @@ struct ieee80211_sub_if_data {
/* TID bitmap for NoAck policy */
u16 noack_map;
+ /* bit field of ACM bits (BIT(802.1D tag)) */
+ u8 wmm_acm;
+
struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
struct ieee80211_key __rcu *default_unicast_key;
struct ieee80211_key __rcu *default_multicast_key;
@@ -847,13 +831,6 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
/*
- * work stuff, potentially off-channel (in the future)
- */
- struct list_head work_list;
- struct timer_list work_timer;
- struct work_struct work_work;
-
- /*
* private workqueue to mac80211. mac80211 makes this accessible
* via ieee80211_queue_work()
*/
@@ -912,6 +889,9 @@ struct ieee80211_local {
/* device is started */
bool started;
+ /* device is during a HW reconfig */
+ bool in_reconfig;
+
/* wowlan is enabled -- don't reconfig on resume */
bool wowlan;
@@ -985,14 +965,14 @@ struct ieee80211_local {
int scan_channel_idx;
int scan_ies_len;
- bool sched_scanning;
struct ieee80211_sched_scan_ies sched_scan_ies;
struct work_struct sched_scan_stopped_work;
+ struct ieee80211_sub_if_data __rcu *sched_scan_sdata;
unsigned long leave_oper_channel_time;
enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
- struct ieee80211_sub_if_data *scan_sdata;
+ struct ieee80211_sub_if_data __rcu *scan_sdata;
enum nl80211_channel_type _oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
@@ -1034,7 +1014,6 @@ struct ieee80211_local {
unsigned int rx_handlers_drop_nullfunc;
unsigned int rx_handlers_drop_defrag;
unsigned int rx_handlers_drop_short;
- unsigned int rx_handlers_drop_passive_scan;
unsigned int tx_expand_skb_head;
unsigned int tx_expand_skb_head_cloned;
unsigned int rx_expand_skb_head;
@@ -1050,7 +1029,6 @@ struct ieee80211_local {
int total_ps_buffered; /* total number of all buffered unicast and
* multicast packets for power saving stations
*/
- unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
bool pspolling;
bool offchannel_ps_enabled;
@@ -1087,14 +1065,12 @@ struct ieee80211_local {
} debugfs;
#endif
- struct ieee80211_channel *hw_roc_channel;
- struct net_device *hw_roc_dev;
- struct sk_buff *hw_roc_skb, *hw_roc_skb_for_status;
+ /*
+ * Remain-on-channel support
+ */
+ struct list_head roc_list;
struct work_struct hw_roc_start, hw_roc_done;
- enum nl80211_channel_type hw_roc_channel_type;
- unsigned int hw_roc_duration;
- u32 hw_roc_cookie;
- bool hw_roc_for_tx;
+ unsigned long hw_roc_start_time;
struct idr ack_status_frames;
spinlock_t ack_status_lock;
@@ -1114,6 +1090,12 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
return netdev_priv(dev);
}
+static inline struct ieee80211_sub_if_data *
+IEEE80211_WDEV_TO_SUB_IF(struct wireless_dev *wdev)
+{
+ return container_of(wdev, struct ieee80211_sub_if_data, wdev);
+}
+
/* this struct represents 802.11n's RA/TID combination */
struct ieee80211_ra_tid {
u8 ra[ETH_ALEN];
@@ -1264,8 +1246,7 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
void ieee80211_run_deferred_scan(struct ieee80211_local *local);
-ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
+void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb);
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
struct ieee80211_bss *
@@ -1290,19 +1271,23 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
bool offchannel_ps_enable);
void ieee80211_offchannel_return(struct ieee80211_local *local,
bool offchannel_ps_disable);
-void ieee80211_hw_roc_setup(struct ieee80211_local *local);
+void ieee80211_roc_setup(struct ieee80211_local *local);
+void ieee80211_start_next_roc(struct ieee80211_local *local);
+void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata);
+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc);
+void ieee80211_sw_roc_work(struct work_struct *work);
+void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
/* interface handling */
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- struct net_device **new_dev, enum nl80211_iftype type,
+ struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params);
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
enum nl80211_iftype type);
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);
void ieee80211_remove_interfaces(struct ieee80211_local *local);
-u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
const int offset);
@@ -1499,18 +1484,12 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
u16 prot_mode);
-
-/* internal work items */
-void ieee80211_work_init(struct ieee80211_local *local);
-void ieee80211_add_work(struct ieee80211_work *wk);
-void free_work(struct ieee80211_work *wk);
-void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
-int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int duration, u64 *cookie);
-int ieee80211_wk_cancel_remain_on_channel(
- struct ieee80211_sub_if_data *sdata, u64 cookie);
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap);
+int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, bool need_basic);
+int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, bool need_basic);
/* channel management */
enum ieee80211_chan_mode {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8664111d0566..bfb57dcc1538 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -43,6 +43,128 @@
*/
+static u32 ieee80211_idle_off(struct ieee80211_local *local,
+ const char *reason)
+{
+ if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
+ return 0;
+
+ local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
+ return IEEE80211_CONF_CHANGE_IDLE;
+}
+
+static u32 ieee80211_idle_on(struct ieee80211_local *local)
+{
+ if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
+ return 0;
+
+ drv_flush(local, false);
+
+ local->hw.conf.flags |= IEEE80211_CONF_IDLE;
+ return IEEE80211_CONF_CHANGE_IDLE;
+}
+
+static u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int count = 0;
+ bool working = false, scanning = false;
+ unsigned int led_trig_start = 0, led_trig_stop = 0;
+ struct ieee80211_roc_work *roc;
+
+#ifdef CONFIG_PROVE_LOCKING
+ WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
+ !lockdep_is_held(&local->iflist_mtx));
+#endif
+ lockdep_assert_held(&local->mtx);
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata)) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+
+ sdata->old_idle = sdata->vif.bss_conf.idle;
+
+ /* do not count disabled managed interfaces */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated &&
+ !sdata->u.mgd.auth_data &&
+ !sdata->u.mgd.assoc_data) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+ /* do not count unused IBSS interfaces */
+ if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+ !sdata->u.ibss.ssid_len) {
+ sdata->vif.bss_conf.idle = true;
+ continue;
+ }
+ /* count everything else */
+ sdata->vif.bss_conf.idle = false;
+ count++;
+ }
+
+ if (!local->ops->remain_on_channel) {
+ list_for_each_entry(roc, &local->roc_list, list) {
+ working = true;
+ roc->sdata->vif.bss_conf.idle = false;
+ }
+ }
+
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));
+ if (sdata && !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
+ scanning = true;
+ sdata->vif.bss_conf.idle = false;
+ }
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
+ if (sdata->old_idle == sdata->vif.bss_conf.idle)
+ continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
+ }
+
+ if (working || scanning)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
+
+ if (count)
+ led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+ else
+ led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
+
+ ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
+
+ if (working)
+ return ieee80211_idle_off(local, "working");
+ if (scanning)
+ return ieee80211_idle_off(local, "scanning");
+ if (!count)
+ return ieee80211_idle_on(local);
+ else
+ return ieee80211_idle_off(local, "in use");
+
+ return 0;
+}
+
+void ieee80211_recalc_idle(struct ieee80211_local *local)
+{
+ u32 chg;
+
+ mutex_lock(&local->iflist_mtx);
+ chg = __ieee80211_recalc_idle(local);
+ mutex_unlock(&local->iflist_mtx);
+ if (chg)
+ ieee80211_hw_config(local, chg);
+}
+
static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
{
int meshhdrlen;
@@ -57,9 +179,6 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
dev->mtu = new_mtu;
return 0;
}
@@ -100,15 +219,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sub_if_data *nsdata;
- struct net_device *dev = sdata->dev;
ASSERT_RTNL();
/* we hold the RTNL here so can safely walk the list */
list_for_each_entry(nsdata, &local->interfaces, list) {
- struct net_device *ndev = nsdata->dev;
-
- if (ndev != dev && ieee80211_sdata_running(nsdata)) {
+ if (nsdata != sdata && ieee80211_sdata_running(nsdata)) {
/*
* Allow only a single IBSS interface to be up at any
* time. This is restricted because beacon distribution
@@ -127,7 +243,8 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
* The remaining checks are only performed for interfaces
* with the same MAC address.
*/
- if (!ether_addr_equal(dev->dev_addr, ndev->dev_addr))
+ if (!ether_addr_equal(sdata->vif.addr,
+ nsdata->vif.addr))
continue;
/*
@@ -217,17 +334,21 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
- int ret;
+ int ret = 0;
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return 0;
+ mutex_lock(&local->iflist_mtx);
+
if (local->monitor_sdata)
- return 0;
+ goto out_unlock;
sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
- if (!sdata)
- return -ENOMEM;
+ if (!sdata) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* set up data */
sdata->local = local;
@@ -241,18 +362,19 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
if (WARN_ON(ret)) {
/* ok .. stupid driver, it asked for this! */
kfree(sdata);
- return ret;
+ goto out_unlock;
}
ret = ieee80211_check_queues(sdata);
if (ret) {
kfree(sdata);
- return ret;
+ goto out_unlock;
}
rcu_assign_pointer(local->monitor_sdata, sdata);
-
- return 0;
+ out_unlock:
+ mutex_unlock(&local->iflist_mtx);
+ return ret;
}
static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@@ -262,10 +384,12 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return;
- sdata = rtnl_dereference(local->monitor_sdata);
+ mutex_lock(&local->iflist_mtx);
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
if (!sdata)
- return;
+ goto out_unlock;
rcu_assign_pointer(local->monitor_sdata, NULL);
synchronize_net();
@@ -273,6 +397,8 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
drv_remove_interface(local, sdata);
kfree(sdata);
+ out_unlock:
+ mutex_unlock(&local->iflist_mtx);
}
/*
@@ -520,7 +646,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- if (local->scan_sdata == sdata)
+ if (rcu_access_pointer(local->scan_sdata) == sdata)
ieee80211_scan_cancel(local);
/*
@@ -528,10 +654,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
*/
netif_tx_stop_all_queues(sdata->dev);
- /*
- * Purge work for this interface.
- */
- ieee80211_work_purge(sdata);
+ ieee80211_roc_purge(sdata);
/*
* Remove all stations associated with this interface.
@@ -637,18 +760,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_configure_filter(local);
break;
default:
- mutex_lock(&local->mtx);
- if (local->hw_roc_dev == sdata->dev &&
- local->hw_roc_channel) {
- /* ignore return value since this is racy */
- drv_cancel_remain_on_channel(local);
- ieee80211_queue_work(&local->hw, &local->hw_roc_done);
- }
- mutex_unlock(&local->mtx);
-
- flush_work(&local->hw_roc_start);
- flush_work(&local->hw_roc_done);
-
flush_work(&sdata->work);
/*
* When we get here, the interface is marked down.
@@ -823,7 +934,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
- return ieee80211_select_queue_80211(local, skb, hdr);
+ return ieee80211_select_queue_80211(sdata, skb, hdr);
}
static const struct net_device_ops ieee80211_monitorif_ops = {
@@ -1238,7 +1349,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
/* not a contiguous mask ... not handled now! */
- printk(KERN_DEBUG "not contiguous\n");
+ pr_info("not contiguous\n");
break;
}
@@ -1284,7 +1395,7 @@ static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
}
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
- struct net_device **new_dev, enum nl80211_iftype type,
+ struct wireless_dev **new_wdev, enum nl80211_iftype type,
struct vif_params *params)
{
struct net_device *ndev;
@@ -1364,6 +1475,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
sdata->u.mgd.use_4addr = params->use_4addr;
}
+ ndev->features |= local->hw.netdev_features;
+
ret = register_netdevice(ndev);
if (ret)
goto fail;
@@ -1372,8 +1485,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
list_add_tail_rcu(&sdata->list, &local->interfaces);
mutex_unlock(&local->iflist_mtx);
- if (new_dev)
- *new_dev = ndev;
+ if (new_wdev)
+ *new_wdev = &sdata->wdev;
return 0;
@@ -1421,138 +1534,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
list_del(&unreg_list);
}
-static u32 ieee80211_idle_off(struct ieee80211_local *local,
- const char *reason)
-{
- if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))
- return 0;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason);
-#endif
-
- local->hw.conf.flags &= ~IEEE80211_CONF_IDLE;
- return IEEE80211_CONF_CHANGE_IDLE;
-}
-
-static u32 ieee80211_idle_on(struct ieee80211_local *local)
-{
- if (local->hw.conf.flags & IEEE80211_CONF_IDLE)
- return 0;
-
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "device now idle\n");
-#endif
-
- drv_flush(local, false);
-
- local->hw.conf.flags |= IEEE80211_CONF_IDLE;
- return IEEE80211_CONF_CHANGE_IDLE;
-}
-
-u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
- int count = 0;
- bool working = false, scanning = false, hw_roc = false;
- struct ieee80211_work *wk;
- unsigned int led_trig_start = 0, led_trig_stop = 0;
-
-#ifdef CONFIG_PROVE_LOCKING
- WARN_ON(debug_locks && !lockdep_rtnl_is_held() &&
- !lockdep_is_held(&local->iflist_mtx));
-#endif
- lockdep_assert_held(&local->mtx);
-
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata)) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
-
- sdata->old_idle = sdata->vif.bss_conf.idle;
-
- /* do not count disabled managed interfaces */
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated &&
- !sdata->u.mgd.auth_data &&
- !sdata->u.mgd.assoc_data) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
- /* do not count unused IBSS interfaces */
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- !sdata->u.ibss.ssid_len) {
- sdata->vif.bss_conf.idle = true;
- continue;
- }
- /* count everything else */
- sdata->vif.bss_conf.idle = false;
- count++;
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- working = true;
- wk->sdata->vif.bss_conf.idle = false;
- }
-
- if (local->scan_sdata &&
- !(local->hw.flags & IEEE80211_HW_SCAN_WHILE_IDLE)) {
- scanning = true;
- local->scan_sdata->vif.bss_conf.idle = false;
- }
-
- if (local->hw_roc_channel)
- hw_roc = true;
-
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
- if (sdata->old_idle == sdata->vif.bss_conf.idle)
- continue;
- if (!ieee80211_sdata_running(sdata))
- continue;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE);
- }
-
- if (working || scanning || hw_roc)
- led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK;
- else
- led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK;
-
- if (count)
- led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
- else
- led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED;
-
- ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);
-
- if (hw_roc)
- return ieee80211_idle_off(local, "hw remain-on-channel");
- if (working)
- return ieee80211_idle_off(local, "working");
- if (scanning)
- return ieee80211_idle_off(local, "scanning");
- if (!count)
- return ieee80211_idle_on(local);
- else
- return ieee80211_idle_off(local, "in use");
-
- return 0;
-}
-
-void ieee80211_recalc_idle(struct ieee80211_local *local)
-{
- u32 chg;
-
- mutex_lock(&local->iflist_mtx);
- chg = __ieee80211_recalc_idle(local);
- mutex_unlock(&local->iflist_mtx);
- if (chg)
- ieee80211_hw_config(local, chg);
-}
-
static int netdev_notify(struct notifier_block *nb,
unsigned long state,
void *ndev)
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 5bb600d93d77..7ae678ba5d67 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
}
if (ret != -ENOSPC && ret != -EOPNOTSUPP)
- wiphy_err(key->local->hw.wiphy,
+ sdata_err(sdata,
"failed to set key (%d, %pM) to hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
@@ -186,7 +186,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta ? &sta->sta : NULL, &key->conf);
if (ret)
- wiphy_err(key->local->hw.wiphy,
+ sdata_err(sdata,
"failed to remove key (%d, %pM) from hardware (%d)\n",
key->conf.keyidx,
sta ? sta->sta.addr : bcast_addr, ret);
@@ -194,26 +194,6 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
}
-void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
-{
- struct ieee80211_key *key;
-
- key = container_of(key_conf, struct ieee80211_key, conf);
-
- might_sleep();
- assert_key_lock(key->local);
-
- key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
-
- /*
- * Flush TX path to avoid attempts to use this key
- * after this function returns. Until then, drivers
- * must be prepared to handle the key.
- */
- synchronize_rcu();
-}
-EXPORT_SYMBOL_GPL(ieee80211_key_removed);
-
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
int idx, bool uni, bool multi)
{
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f5548e953259..c26e231c733a 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -322,7 +322,8 @@ static void ieee80211_restart_work(struct work_struct *work)
mutex_lock(&local->mtx);
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- local->sched_scanning,
+ rcu_dereference_protected(local->sched_scan_sdata,
+ lockdep_is_held(&local->mtx)),
"%s called with hardware scan in progress\n", __func__);
mutex_unlock(&local->mtx);
@@ -345,6 +346,13 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+ /*
+ * Stop all Rx during the reconfig. We don't want state changes
+ * or driver callbacks while this is in progress.
+ */
+ local->in_reconfig = true;
+ barrier();
+
schedule_work(&local->restart_work);
}
EXPORT_SYMBOL(ieee80211_restart_hw);
@@ -455,7 +463,9 @@ static const struct ieee80211_txrx_stypes
ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
- .rx = BIT(IEEE80211_STYPE_ACTION >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_AUTH >> 4) |
+ BIT(IEEE80211_STYPE_DEAUTH >> 4),
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
@@ -578,7 +588,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
- BUG_ON(!ops->tx && !ops->tx_frags);
+ BUG_ON(!ops->tx);
BUG_ON(!ops->start);
BUG_ON(!ops->stop);
BUG_ON(!ops->config);
@@ -625,8 +635,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
- ieee80211_work_init(local);
-
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
@@ -669,7 +677,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
ieee80211_led_names(local);
- ieee80211_hw_roc_setup(local);
+ ieee80211_roc_setup(local);
return &local->hw;
}
@@ -681,7 +689,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
int result, i;
enum ieee80211_band band;
int channels, max_bitrates;
- bool supp_ht;
+ bool supp_ht, supp_vht;
+ netdev_features_t feature_whitelist;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
@@ -698,16 +707,21 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
local->hw.offchannel_tx_hw_queue >= local->hw.queues))
return -EINVAL;
- if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
#ifdef CONFIG_PM
- && (!local->ops->suspend || !local->ops->resume)
-#endif
- )
+ if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) &&
+ (!local->ops->suspend || !local->ops->resume))
return -EINVAL;
+#endif
if ((hw->flags & IEEE80211_HW_SCAN_WHILE_IDLE) && !local->ops->hw_scan)
return -EINVAL;
+ /* Only HW csum features are currently compatible with mac80211 */
+ feature_whitelist = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_CSUM;
+ if (WARN_ON(hw->netdev_features & ~feature_whitelist))
+ return -EINVAL;
+
if (hw->max_report_rates == 0)
hw->max_report_rates = hw->max_rates;
@@ -719,6 +733,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
channels = 0;
max_bitrates = 0;
supp_ht = false;
+ supp_vht = false;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -736,6 +751,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
}
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
@@ -811,6 +827,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
+ if (supp_vht)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_capabilities);
+
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
@@ -1009,12 +1029,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
rtnl_unlock();
- /*
- * Now all work items will be gone, but the
- * timer might still be armed, so delete it
- */
- del_timer_sync(&local->work_timer);
-
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2913113c5833..6fac18c0423f 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -133,7 +133,7 @@ bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
}
/**
- * mesh_accept_plinks_update: update accepting_plink in local mesh beacons
+ * mesh_accept_plinks_update - update accepting_plink in local mesh beacons
*
* @sdata: mesh interface in which mesh beacons are going to be updated
*/
@@ -443,7 +443,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
{
- if (ifmsh->mshcfg.dot11MeshHWMPRootMode)
+ if (ifmsh->mshcfg.dot11MeshHWMPRootMode > IEEE80211_ROOTMODE_ROOT)
set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
else {
clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
@@ -523,11 +523,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
{
bool free_plinks;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: running mesh housekeeping\n",
- sdata->name);
-#endif
-
ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
mesh_path_expire(sdata);
@@ -542,11 +537,17 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ u32 interval;
mesh_path_tx_root_frame(sdata);
+
+ if (ifmsh->mshcfg.dot11MeshHWMPRootMode == IEEE80211_PROACTIVE_RANN)
+ interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
+ else
+ interval = ifmsh->mshcfg.dot11MeshHWMProotInterval;
+
mod_timer(&ifmsh->mesh_path_root_timer,
- round_jiffies(TU_TO_EXP_TIME(
- ifmsh->mshcfg.dot11MeshHWMPRannInterval)));
+ round_jiffies(TU_TO_EXP_TIME(interval)));
}
#ifdef CONFIG_PM
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index e3642756f8f4..faaa39bcfd10 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -104,6 +104,7 @@ enum mesh_deferred_task_flags {
* an mpath to a hash bucket on a path table.
* @rann_snd_addr: the RANN sender address
* @rann_metric: the aggregated path metric towards the root node
+ * @last_preq_to_root: Timestamp of last PREQ sent to root
* @is_root: the destination station of this path is a root node
* @is_gate: the destination station of this path is a mesh gate
*
@@ -131,6 +132,7 @@ struct mesh_path {
spinlock_t state_lock;
u8 rann_snd_addr[ETH_ALEN];
u32 rann_metric;
+ unsigned long last_preq_to_root;
bool is_root;
bool is_gate;
};
@@ -245,7 +247,7 @@ void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *stainfo, struct sk_buff *skb);
+ struct sta_info *sta, struct sk_buff *skb);
void ieee80211s_stop(void);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 9b59658e8650..494bc39f61a4 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -13,13 +13,6 @@
#include "wme.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MHWMP_DEBUG
-#define mhwmp_dbg(fmt, args...) \
- printk(KERN_DEBUG "Mesh HWMP (%s): " fmt "\n", sdata->name, ##args)
-#else
-#define mhwmp_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
#define TEST_FRAME_LEN 8192
#define MAX_METRIC 0xffffffff
#define ARITH_SHIFT 8
@@ -98,6 +91,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
#define max_preq_retries(s) (s->u.mesh.mshcfg.dot11MeshHWMPmaxPREQretries)
#define disc_timeout_jiff(s) \
msecs_to_jiffies(sdata->u.mesh.mshcfg.min_discovery_timeout)
+#define root_path_confirmation_jiffies(s) \
+ msecs_to_jiffies(sdata->u.mesh.mshcfg.dot11MeshHWMPconfirmationInterval)
enum mpath_frame_type {
MPATH_PREQ = 0,
@@ -142,19 +137,19 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
switch (action) {
case MPATH_PREQ:
- mhwmp_dbg("sending PREQ to %pM", target);
+ mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
ie_len = 37;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREQ;
break;
case MPATH_PREP:
- mhwmp_dbg("sending PREP to %pM", target);
+ mhwmp_dbg(sdata, "sending PREP to %pM\n", target);
ie_len = 31;
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_PREP;
break;
case MPATH_RANN:
- mhwmp_dbg("sending RANN from %pM", orig_addr);
+ mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
ie_len = sizeof(struct ieee80211_rann_ie);
pos = skb_put(skb, 2 + ie_len);
*pos++ = WLAN_EID_RANN;
@@ -303,7 +298,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
}
void ieee80211s_update_metric(struct ieee80211_local *local,
- struct sta_info *stainfo, struct sk_buff *skb)
+ struct sta_info *sta, struct sk_buff *skb)
{
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -315,15 +310,14 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
/* moving average, scaled to 100 */
- stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
- if (stainfo->fail_avg > 95)
- mesh_plink_broken(stainfo);
+ sta->fail_avg = ((80 * sta->fail_avg + 5) / 100 + 20 * failed);
+ if (sta->fail_avg > 95)
+ mesh_plink_broken(sta);
}
static u32 airtime_link_metric_get(struct ieee80211_local *local,
struct sta_info *sta)
{
- struct ieee80211_supported_band *sband;
struct rate_info rinfo;
/* This should be adjusted for each device */
int device_constant = 1 << ARITH_SHIFT;
@@ -333,8 +327,6 @@ static u32 airtime_link_metric_get(struct ieee80211_local *local,
u32 tx_time, estimated_retx;
u64 result;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
if (sta->fail_avg >= 100)
return MAX_METRIC;
@@ -519,10 +511,11 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
struct mesh_path *mpath = NULL;
u8 *target_addr, *orig_addr;
const u8 *da;
- u8 target_flags, ttl;
- u32 orig_sn, target_sn, lifetime;
+ u8 target_flags, ttl, flags;
+ u32 orig_sn, target_sn, lifetime, orig_metric;
bool reply = false;
bool forward = true;
+ bool root_is_gate;
/* Update target SN, if present */
target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
@@ -530,11 +523,15 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
target_sn = PREQ_IE_TARGET_SN(preq_elem);
orig_sn = PREQ_IE_ORIG_SN(preq_elem);
target_flags = PREQ_IE_TARGET_F(preq_elem);
+ orig_metric = metric;
+ /* Proactive PREQ gate announcements */
+ flags = PREQ_IE_FLAGS(preq_elem);
+ root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
- mhwmp_dbg("received PREQ from %pM", orig_addr);
+ mhwmp_dbg(sdata, "received PREQ from %pM\n", orig_addr);
if (ether_addr_equal(target_addr, sdata->vif.addr)) {
- mhwmp_dbg("PREQ is for us");
+ mhwmp_dbg(sdata, "PREQ is for us\n");
forward = false;
reply = true;
metric = 0;
@@ -544,6 +541,22 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
target_sn = ++ifmsh->sn;
ifmsh->last_sn_update = jiffies;
}
+ } else if (is_broadcast_ether_addr(target_addr) &&
+ (target_flags & IEEE80211_PREQ_TO_FLAG)) {
+ rcu_read_lock();
+ mpath = mesh_path_lookup(orig_addr, sdata);
+ if (mpath) {
+ if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
+ reply = true;
+ target_addr = sdata->vif.addr;
+ target_sn = ++ifmsh->sn;
+ metric = 0;
+ ifmsh->last_sn_update = jiffies;
+ }
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+ }
+ rcu_read_unlock();
} else {
rcu_read_lock();
mpath = mesh_path_lookup(target_addr, sdata);
@@ -570,19 +583,20 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
lifetime = PREQ_IE_LIFETIME(preq_elem);
ttl = ifmsh->mshcfg.element_ttl;
if (ttl != 0) {
- mhwmp_dbg("replying to the PREQ");
+ mhwmp_dbg(sdata, "replying to the PREQ\n");
mesh_path_sel_frame_tx(MPATH_PREP, 0, orig_addr,
cpu_to_le32(orig_sn), 0, target_addr,
cpu_to_le32(target_sn), mgmt->sa, 0, ttl,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
- } else
+ } else {
ifmsh->mshstats.dropped_frames_ttl++;
+ }
}
if (forward && ifmsh->mshcfg.dot11MeshForwarding) {
u32 preq_id;
- u8 hopcount, flags;
+ u8 hopcount;
ttl = PREQ_IE_TTL(preq_elem);
lifetime = PREQ_IE_LIFETIME(preq_elem);
@@ -590,13 +604,19 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
ifmsh->mshstats.dropped_frames_ttl++;
return;
}
- mhwmp_dbg("forwarding the PREQ from %pM", orig_addr);
+ mhwmp_dbg(sdata, "forwarding the PREQ from %pM\n", orig_addr);
--ttl;
- flags = PREQ_IE_FLAGS(preq_elem);
preq_id = PREQ_IE_PREQ_ID(preq_elem);
hopcount = PREQ_IE_HOPCOUNT(preq_elem) + 1;
da = (mpath && mpath->is_root) ?
mpath->rann_snd_addr : broadcast_addr;
+
+ if (flags & IEEE80211_PREQ_PROACTIVE_PREP_FLAG) {
+ target_addr = PREQ_IE_TARGET_ADDR(preq_elem);
+ target_sn = PREQ_IE_TARGET_SN(preq_elem);
+ metric = orig_metric;
+ }
+
mesh_path_sel_frame_tx(MPATH_PREQ, flags, orig_addr,
cpu_to_le32(orig_sn), target_flags, target_addr,
cpu_to_le32(target_sn), da,
@@ -631,7 +651,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
u8 next_hop[ETH_ALEN];
u32 target_sn, orig_sn, lifetime;
- mhwmp_dbg("received PREP from %pM", PREP_IE_ORIG_ADDR(prep_elem));
+ mhwmp_dbg(sdata, "received PREP from %pM\n",
+ PREP_IE_ORIG_ADDR(prep_elem));
orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -744,11 +765,6 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
bool root_is_gate;
ttl = rann->rann_ttl;
- if (ttl <= 1) {
- ifmsh->mshstats.dropped_frames_ttl++;
- return;
- }
- ttl--;
flags = rann->rann_flags;
root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
orig_addr = rann->rann_addr;
@@ -762,8 +778,9 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
- mhwmp_dbg("received RANN from %pM via neighbour %pM (is_gate=%d)",
- orig_addr, mgmt->sa, root_is_gate);
+ mhwmp_dbg(sdata,
+ "received RANN from %pM via neighbour %pM (is_gate=%d)\n",
+ orig_addr, mgmt->sa, root_is_gate);
rcu_read_lock();
sta = sta_info_get(sdata, mgmt->sa);
@@ -785,34 +802,50 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
}
}
+ if (!(SN_LT(mpath->sn, orig_sn)) &&
+ !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+ rcu_read_unlock();
+ return;
+ }
+
if ((!(mpath->flags & (MESH_PATH_ACTIVE | MESH_PATH_RESOLVING)) ||
- time_after(jiffies, mpath->exp_time - 1*HZ)) &&
- !(mpath->flags & MESH_PATH_FIXED)) {
- mhwmp_dbg("%s time to refresh root mpath %pM", sdata->name,
- orig_addr);
+ (time_after(jiffies, mpath->last_preq_to_root +
+ root_path_confirmation_jiffies(sdata)) ||
+ time_before(jiffies, mpath->last_preq_to_root))) &&
+ !(mpath->flags & MESH_PATH_FIXED) && (ttl != 0)) {
+ mhwmp_dbg(sdata,
+ "time to refresh root mpath %pM\n",
+ orig_addr);
mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
+ mpath->last_preq_to_root = jiffies;
}
- if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
- metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
+ mpath->sn = orig_sn;
+ mpath->rann_metric = metric + metric_txsta;
+ mpath->is_root = true;
+ /* Recording RANNs sender address to send individually
+ * addressed PREQs destined for root mesh STA */
+ memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
+
+ if (root_is_gate)
+ mesh_path_add_gate(mpath);
+
+ if (ttl <= 1) {
+ ifmsh->mshstats.dropped_frames_ttl++;
+ rcu_read_unlock();
+ return;
+ }
+ ttl--;
+
+ if (ifmsh->mshcfg.dot11MeshForwarding) {
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
cpu_to_le32(orig_sn),
0, NULL, 0, broadcast_addr,
hopcount, ttl, cpu_to_le32(interval),
cpu_to_le32(metric + metric_txsta),
0, sdata);
- mpath->sn = orig_sn;
- mpath->rann_metric = metric + metric_txsta;
- /* Recording RANNs sender address to send individually
- * addressed PREQs destined for root mesh STA */
- memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
}
- mpath->is_root = true;
-
- if (root_is_gate)
- mesh_path_add_gate(mpath);
-
rcu_read_unlock();
}
@@ -889,7 +922,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
- mhwmp_dbg("could not allocate PREQ node");
+ mhwmp_dbg(sdata, "could not allocate PREQ node\n");
return;
}
@@ -898,7 +931,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
kfree(preq_node);
if (printk_ratelimit())
- mhwmp_dbg("PREQ node queue full");
+ mhwmp_dbg(sdata, "PREQ node queue full\n");
return;
}
@@ -1021,12 +1054,15 @@ enddiscovery:
kfree(preq_node);
}
-/* mesh_nexthop_resolve - lookup next hop for given skb and start path
- * discovery if no forwarding information is found.
+/**
+ * mesh_nexthop_resolve - lookup next hop; conditionally start path discovery
*
* @skb: 802.11 frame to be sent
* @sdata: network subif the frame will be sent through
*
+ * Lookup next hop for given skb and start path discovery if no
+ * forwarding information is found.
+ *
* Returns: 0 if the next hop was found and -ENOENT if the frame was queued.
* skb is freeed here if no mpath could be allocated.
*/
@@ -1146,7 +1182,7 @@ void mesh_path_timer(unsigned long data)
if (!mpath->is_gate && mesh_gate_num(sdata) > 0) {
ret = mesh_path_send_to_gates(mpath);
if (ret)
- mhwmp_dbg("no gate was reachable");
+ mhwmp_dbg(sdata, "no gate was reachable\n");
} else
mesh_path_flush_pending(mpath);
}
@@ -1157,13 +1193,34 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
- u8 flags;
+ u8 flags, target_flags = 0;
flags = (ifmsh->mshcfg.dot11MeshGateAnnouncementProtocol)
? RANN_FLAG_IS_GATE : 0;
- mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
+
+ switch (ifmsh->mshcfg.dot11MeshHWMPRootMode) {
+ case IEEE80211_PROACTIVE_RANN:
+ mesh_path_sel_frame_tx(MPATH_RANN, flags, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
- 0, sdata->u.mesh.mshcfg.element_ttl,
+ 0, ifmsh->mshcfg.element_ttl,
cpu_to_le32(interval), 0, 0, sdata);
+ break;
+ case IEEE80211_PROACTIVE_PREQ_WITH_PREP:
+ flags |= IEEE80211_PREQ_PROACTIVE_PREP_FLAG;
+ case IEEE80211_PROACTIVE_PREQ_NO_PREP:
+ interval = ifmsh->mshcfg.dot11MeshHWMPactivePathToRootTimeout;
+ target_flags |= IEEE80211_PREQ_TO_FLAG |
+ IEEE80211_PREQ_USN_FLAG;
+ mesh_path_sel_frame_tx(MPATH_PREQ, flags, sdata->vif.addr,
+ cpu_to_le32(++ifmsh->sn), target_flags,
+ (u8 *) broadcast_addr, 0, broadcast_addr,
+ 0, ifmsh->mshcfg.element_ttl,
+ cpu_to_le32(interval),
+ 0, cpu_to_le32(ifmsh->preq_id++), sdata);
+ break;
+ default:
+ mhwmp_dbg(sdata, "Proactive mechanism not supported\n");
+ return;
+ }
}
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index b39224d8255c..075bc535c601 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -18,12 +18,6 @@
#include "ieee80211_i.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
-#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
@@ -322,9 +316,8 @@ static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
skb_queue_splice(&gateq, &gate_mpath->frame_queue);
- mpath_dbg("Mpath queue for gate %pM has %d frames\n",
- gate_mpath->dst,
- skb_queue_len(&gate_mpath->frame_queue));
+ mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
+ gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
if (!copy)
@@ -446,9 +439,9 @@ int mesh_path_add_gate(struct mesh_path *mpath)
hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
rcu_read_unlock();
- mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
- mpath->sdata->name, mpath->dst,
- mpath->sdata->u.mesh.num_gates);
+ mpath_dbg(mpath->sdata,
+ "Mesh path: Recorded new gate: %pM. %d known gates\n",
+ mpath->dst, mpath->sdata->u.mesh.num_gates);
return 0;
err_rcu:
rcu_read_unlock();
@@ -477,8 +470,8 @@ static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
spin_unlock_bh(&tbl->gates_lock);
mpath->sdata->u.mesh.num_gates--;
mpath->is_gate = false;
- mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
- "%d known gates\n", mpath->sdata->name,
+ mpath_dbg(mpath->sdata,
+ "Mesh path: Deleted gate: %pM. %d known gates\n",
mpath->dst, mpath->sdata->u.mesh.num_gates);
break;
}
@@ -785,7 +778,7 @@ static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
- * @sta - mesh peer to match
+ * @sta: mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
@@ -840,7 +833,7 @@ static void table_flush_by_iface(struct mesh_table *tbl,
*
* This function deletes both mesh paths as well as mesh portal paths.
*
- * @sdata - interface data to match
+ * @sdata: interface data to match
*
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
@@ -946,19 +939,20 @@ int mesh_path_send_to_gates(struct mesh_path *mpath)
continue;
if (gate->mpath->flags & MESH_PATH_ACTIVE) {
- mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
+ mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
from_mpath = gate->mpath;
copy = true;
} else {
- mpath_dbg("Not forwarding %p\n", gate->mpath);
- mpath_dbg("flags %x\n", gate->mpath->flags);
+ mpath_dbg(sdata,
+ "Not forwarding %p (flags %#x)\n",
+ gate->mpath, gate->mpath->flags);
}
}
hlist_for_each_entry_rcu(gate, n, known_gates, list)
if (gate->mpath->sdata == sdata) {
- mpath_dbg("Sending to %pM\n", gate->mpath->dst);
+ mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
mesh_path_tx_pending(gate->mpath);
}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 60ef235c9d9b..af671b984df3 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -13,12 +13,6 @@
#include "rate.h"
#include "mesh.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
-#define mpl_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
-#else
-#define mpl_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
#define PLINK_GET_LLID(p) (p + 2)
#define PLINK_GET_PLID(p) (p + 4)
@@ -105,7 +99,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
return sta;
}
-/*
+/**
* mesh_set_ht_prot_mode - set correct HT protection mode
*
* Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
@@ -134,12 +128,14 @@ static u32 mesh_set_ht_prot_mode(struct ieee80211_sub_if_data *sdata)
switch (sta->ch_type) {
case NL80211_CHAN_NO_HT:
- mpl_dbg("mesh_plink %pM: nonHT sta (%pM) is present",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: nonHT sta (%pM) is present\n",
sdata->vif.addr, sta->sta.addr);
non_ht_sta = true;
goto out;
case NL80211_CHAN_HT20:
- mpl_dbg("mesh_plink %pM: HT20 sta (%pM) is present",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: HT20 sta (%pM) is present\n",
sdata->vif.addr, sta->sta.addr);
ht20_sta = true;
default:
@@ -160,7 +156,8 @@ out:
sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
sdata->u.mesh.mshcfg.ht_opmode = ht_opmode;
changed = BSS_CHANGED_HT;
- mpl_dbg("mesh_plink %pM: protection mode changed to %d",
+ mpl_dbg(sdata,
+ "mesh_plink %pM: protection mode changed to %d\n",
sdata->vif.addr, ht_opmode);
}
@@ -261,8 +258,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, 2);
memcpy(pos + 2, &plid, 2);
}
- if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_meshid_ie(skb, sdata) ||
mesh_add_meshconf_ie(skb, sdata))
@@ -323,7 +320,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
return 0;
}
-/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
+/**
+ * mesh_peer_init - initialize new mesh peer and return resulting sta_info
*
* @sdata: local meshif
* @addr: peer's address
@@ -437,7 +435,8 @@ static void mesh_plink_timer(unsigned long data)
spin_unlock_bh(&sta->lock);
return;
}
- mpl_dbg("Mesh plink timer for %pM fired on state %d\n",
+ mpl_dbg(sta->sdata,
+ "Mesh plink timer for %pM fired on state %d\n",
sta->sta.addr, sta->plink_state);
reason = 0;
llid = sta->llid;
@@ -450,7 +449,8 @@ static void mesh_plink_timer(unsigned long data)
/* retry timer */
if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
u32 rand;
- mpl_dbg("Mesh plink for %pM (retry, timeout): %d %d\n",
+ mpl_dbg(sta->sdata,
+ "Mesh plink for %pM (retry, timeout): %d %d\n",
sta->sta.addr, sta->plink_retries,
sta->plink_timeout);
get_random_bytes(&rand, sizeof(u32));
@@ -530,7 +530,8 @@ int mesh_plink_open(struct sta_info *sta)
sta->plink_state = NL80211_PLINK_OPN_SNT;
mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
spin_unlock_bh(&sta->lock);
- mpl_dbg("Mesh plink: starting establishment with %pM\n",
+ mpl_dbg(sdata,
+ "Mesh plink: starting establishment with %pM\n",
sta->sta.addr);
return mesh_plink_frame_tx(sdata, WLAN_SP_MESH_PEERING_OPEN,
@@ -565,7 +566,6 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
u8 *baseaddr;
u32 changed = 0;
__le16 plid, llid, reason;
-#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
static const char *mplstates[] = {
[NL80211_PLINK_LISTEN] = "LISTEN",
[NL80211_PLINK_OPN_SNT] = "OPN-SNT",
@@ -575,14 +575,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
[NL80211_PLINK_HOLDING] = "HOLDING",
[NL80211_PLINK_BLOCKED] = "BLOCKED"
};
-#endif
/* need action_code, aux */
if (len < IEEE80211_MIN_ACTION_SIZE + 3)
return;
if (is_multicast_ether_addr(mgmt->da)) {
- mpl_dbg("Mesh plink: ignore frame from multicast address");
+ mpl_dbg(sdata,
+ "Mesh plink: ignore frame from multicast address\n");
return;
}
@@ -595,12 +595,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
if (!elems.peering) {
- mpl_dbg("Mesh plink: missing necessary peer link ie\n");
+ mpl_dbg(sdata,
+ "Mesh plink: missing necessary peer link ie\n");
return;
}
if (elems.rsn_len &&
sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
- mpl_dbg("Mesh plink: can't establish link with secure peer\n");
+ mpl_dbg(sdata,
+ "Mesh plink: can't establish link with secure peer\n");
return;
}
@@ -610,14 +612,15 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
(ftype == WLAN_SP_MESH_PEERING_CONFIRM && ie_len != 6) ||
(ftype == WLAN_SP_MESH_PEERING_CLOSE && ie_len != 6
&& ie_len != 8)) {
- mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
- ftype, ie_len);
+ mpl_dbg(sdata,
+ "Mesh plink: incorrect plink ie length %d %d\n",
+ ftype, ie_len);
return;
}
if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
(!elems.mesh_id || !elems.mesh_config)) {
- mpl_dbg("Mesh plink: missing necessary ie\n");
+ mpl_dbg(sdata, "Mesh plink: missing necessary ie\n");
return;
}
/* Note the lines below are correct, the llid in the frame is the plid
@@ -632,21 +635,21 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
sta = sta_info_get(sdata, mgmt->sa);
if (!sta && ftype != WLAN_SP_MESH_PEERING_OPEN) {
- mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
+ mpl_dbg(sdata, "Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
return;
}
if (ftype == WLAN_SP_MESH_PEERING_OPEN &&
!rssi_threshold_check(sta, sdata)) {
- mpl_dbg("Mesh plink: %pM does not meet rssi threshold\n",
+ mpl_dbg(sdata, "Mesh plink: %pM does not meet rssi threshold\n",
mgmt->sa);
rcu_read_unlock();
return;
}
if (sta && !test_sta_flag(sta, WLAN_STA_AUTH)) {
- mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
+ mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
rcu_read_unlock();
return;
}
@@ -683,7 +686,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
} else if (!sta) {
/* ftype == WLAN_SP_MESH_PEERING_OPEN */
if (!mesh_plink_free_count(sdata)) {
- mpl_dbg("Mesh plink error: no more free plinks\n");
+ mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
rcu_read_unlock();
return;
}
@@ -724,7 +727,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
event = CLS_ACPT;
break;
default:
- mpl_dbg("Mesh plink: unknown frame subtype\n");
+ mpl_dbg(sdata, "Mesh plink: unknown frame subtype\n");
rcu_read_unlock();
return;
}
@@ -734,13 +737,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
/* allocate sta entry if necessary and update info */
sta = mesh_peer_init(sdata, mgmt->sa, &elems);
if (!sta) {
- mpl_dbg("Mesh plink: failed to init peer!\n");
+ mpl_dbg(sdata, "Mesh plink: failed to init peer!\n");
rcu_read_unlock();
return;
}
}
- mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
+ mpl_dbg(sdata,
+ "Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
mgmt->sa, mplstates[sta->plink_state],
le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
event);
@@ -851,7 +855,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= BSS_CHANGED_BEACON;
- mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
break;
default:
@@ -887,7 +891,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
mesh_plink_inc_estab_count(sdata);
changed |= mesh_set_ht_prot_mode(sdata);
changed |= BSS_CHANGED_BEACON;
- mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
+ mpl_dbg(sdata, "Mesh plink with %pM ESTABLISHED\n",
sta->sta.addr);
mesh_plink_frame_tx(sdata,
WLAN_SP_MESH_PEERING_CONFIRM,
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index 38d30e8ce6dc..accfa00ffcdf 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -12,13 +12,6 @@
#include "mesh.h"
#include "driver-ops.h"
-#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
-#define msync_dbg(fmt, args...) \
- printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
-#else
-#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
-#endif
-
/* This is not in the standard. It represents a tolerable tbtt drift below
* which we do no TSF adjustment.
*/
@@ -65,14 +58,14 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
- msync_dbg("TBTT : max clockdrift=%lld; adjusting",
- (long long) ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
+ (long long) ifmsh->sync_offset_clockdrift_max);
tsfdelta = -ifmsh->sync_offset_clockdrift_max;
ifmsh->sync_offset_clockdrift_max = 0;
} else {
- msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
- (long long) ifmsh->sync_offset_clockdrift_max,
- (unsigned long long) beacon_int_fraction);
+ msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
+ (long long) ifmsh->sync_offset_clockdrift_max,
+ (unsigned long long) beacon_int_fraction);
tsfdelta = -beacon_int_fraction;
ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
}
@@ -120,7 +113,7 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
- msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
+ msync_dbg(sdata, "STA %pM : is adjusting TBTT\n", sta->sta.addr);
goto no_sync;
}
@@ -169,7 +162,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
s64 t_clockdrift = sta->t_offset_setpoint
- sta->t_offset;
- msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
+ msync_dbg(sdata,
+ "STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld\n",
sta->sta.addr,
(long long) sta->t_offset,
(long long)
@@ -178,7 +172,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
- msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
+ msync_dbg(sdata,
+ "STA %pM : t_clockdrift=%lld too large, setpoint reset\n",
sta->sta.addr,
(long long) t_clockdrift);
clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
@@ -197,8 +192,8 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
} else {
sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
- msync_dbg("STA %pM : offset was invalid, "
- " sta->t_offset=%lld",
+ msync_dbg(sdata,
+ "STA %pM : offset was invalid, sta->t_offset=%lld\n",
sta->sta.addr,
(long long) sta->t_offset);
rcu_read_unlock();
@@ -226,17 +221,15 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
* to the driver tsf setter, we punt
* the tsf adjustment to the mesh tasklet
*/
- msync_dbg("TBTT : kicking off TBTT "
- "adjustment with "
- "clockdrift_max=%lld",
- ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata,
+ "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
+ ifmsh->sync_offset_clockdrift_max);
set_bit(MESH_WORK_DRIFT_ADJUST,
&ifmsh->wrkq_flags);
} else {
- msync_dbg("TBTT : max clockdrift=%lld; "
- "too small to adjust",
- (long long)
- ifmsh->sync_offset_clockdrift_max);
+ msync_dbg(sdata,
+ "TBTT : max clockdrift=%lld; too small to adjust\n",
+ (long long)ifmsh->sync_offset_clockdrift_max);
ifmsh->sync_offset_clockdrift_max = 0;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
@@ -268,7 +261,7 @@ static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
const u8 *oui;
WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
- msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
+ msync_dbg(sdata, "called mesh_sync_vendor_rx_bcn_presp\n");
oui = mesh_get_vendor_oui(sdata);
/* here you would implement the vendor offset tracking for this oui */
}
@@ -278,7 +271,7 @@ static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
const u8 *oui;
WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
- msync_dbg("called mesh_sync_vendor_adjust_tbtt");
+ msync_dbg(sdata, "called mesh_sync_vendor_adjust_tbtt\n");
oui = mesh_get_vendor_oui(sdata);
/* here you would implement the vendor tsf adjustment for this oui */
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 66e4fcdd1c6b..cef0c9e79aba 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -258,12 +258,11 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
}
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb, const u8 *ht_oper_ie,
+ struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
struct ieee80211_channel *channel,
enum ieee80211_smps_mode smps)
{
- struct ieee80211_ht_operation *ht_oper;
u8 *pos;
u32 flags = channel->flags;
u16 cap;
@@ -271,21 +270,13 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
- if (!ht_oper_ie)
- return;
-
- if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
- return;
-
memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
ieee80211_apply_htcap_overrides(sdata, &ht_cap);
- ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
-
/* determine capability flags */
cap = ht_cap.cap;
- switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ switch (ap_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -509,7 +500,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
- ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
+ ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
sband, local->oper_channel, ifmgd->ap_smps);
/* if present, add any custom non-vendor IEs that go after HT */
@@ -550,6 +541,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
memcpy(pos, assoc_data->ie + offset, noffset - offset);
}
+ drv_mgd_prepare_tx(local, sdata);
+
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
}
@@ -589,6 +582,9 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
IEEE80211_SKB_CB(skb)->flags |=
IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ drv_mgd_prepare_tx(local, sdata);
+
ieee80211_tx_skb(sdata, skb);
}
}
@@ -911,9 +907,6 @@ static bool ieee80211_powersave_allowed(struct ieee80211_sub_if_data *sdata)
if (!mgd->associated)
return false;
- if (!mgd->associated->beacon_ies)
- return false;
-
if (mgd->flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL))
return false;
@@ -939,11 +932,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
return;
}
- if (!list_empty(&local->work_list)) {
- local->ps_sdata = NULL;
- goto change;
- }
-
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
continue;
@@ -1016,7 +1004,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
local->ps_sdata = NULL;
}
- change:
ieee80211_change_ps(local);
}
@@ -1121,7 +1108,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
}
/* MLME */
-static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
+static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
u8 *wmm_param, size_t wmm_param_len)
{
@@ -1132,23 +1119,23 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
u8 *pos, uapsd_queues = 0;
if (!local->ops->conf_tx)
- return;
+ return false;
if (local->hw.queues < IEEE80211_NUM_ACS)
- return;
+ return false;
if (!wmm_param)
- return;
+ return false;
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
- return;
+ return false;
if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
uapsd_queues = ifmgd->uapsd_queues;
count = wmm_param[6] & 0x0f;
if (count == ifmgd->wmm_last_param_set)
- return;
+ return false;
ifmgd->wmm_last_param_set = count;
pos = wmm_param + 8;
@@ -1156,7 +1143,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
memset(&params, 0, sizeof(params));
- local->wmm_acm = 0;
+ sdata->wmm_acm = 0;
for (; left >= 4; left -= 4, pos += 4) {
int aci = (pos[0] >> 5) & 0x03;
int acm = (pos[0] >> 4) & 0x01;
@@ -1167,21 +1154,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
case 1: /* AC_BK */
queue = 3;
if (acm)
- local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
+ sdata->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
uapsd = true;
break;
case 2: /* AC_VI */
queue = 1;
if (acm)
- local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
+ sdata->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
uapsd = true;
break;
case 3: /* AC_VO */
queue = 0;
if (acm)
- local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
+ sdata->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
uapsd = true;
break;
@@ -1189,7 +1176,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
default:
queue = 2;
if (acm)
- local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
+ sdata->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
uapsd = true;
break;
@@ -1201,23 +1188,21 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.txop = get_unaligned_le16(pos + 2);
params.uapsd = uapsd;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "WMM queue=%d aci=%d acm=%d aifs=%d "
- "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
- queue, aci, acm,
- params.aifs, params.cw_min, params.cw_max,
- params.txop, params.uapsd);
-#endif
+ mlme_dbg(sdata,
+ "WMM queue=%d aci=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
+ queue, aci, acm,
+ params.aifs, params.cw_min, params.cw_max,
+ params.txop, params.uapsd);
sdata->tx_conf[queue] = params;
if (drv_conf_tx(local, sdata, queue, &params))
- wiphy_debug(local->hw.wiphy,
- "failed to set TX queue parameters for queue %d\n",
- queue);
+ sdata_err(sdata,
+ "failed to set TX queue parameters for queue %d\n",
+ queue);
}
/* enable WMM or activate new settings */
sdata->vif.bss_conf.qos = true;
+ return true;
}
static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
@@ -1284,13 +1269,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
bss_info_changed |= BSS_CHANGED_ASSOC;
- /* set timing information */
- bss_conf->beacon_int = cbss->beacon_interval;
- bss_conf->last_tsf = cbss->tsf;
-
- bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
- cbss->capability, bss->has_erp_value, bss->erp_value);
+ bss_conf->assoc_capability, bss->has_erp_value, bss->erp_value);
sdata->u.mgd.beacon_timeout = usecs_to_jiffies(ieee80211_tu_to_usec(
IEEE80211_BEACON_LOSS_COUNT * bss_conf->beacon_int));
@@ -1342,7 +1322,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
u32 changed = 0;
- u8 bssid[ETH_ALEN];
ASSERT_MGD_MTX(ifmgd);
@@ -1354,10 +1333,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
ieee80211_stop_poll(sdata);
- memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
-
ifmgd->associated = NULL;
- memset(ifmgd->bssid, 0, ETH_ALEN);
/*
* we need to commit the associated = NULL change because the
@@ -1377,22 +1353,40 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
netif_carrier_off(sdata->dev);
mutex_lock(&local->sta_mtx);
- sta = sta_info_get(sdata, bssid);
+ sta = sta_info_get(sdata, ifmgd->bssid);
if (sta) {
set_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_sta_tear_down_BA_sessions(sta, tx);
}
mutex_unlock(&local->sta_mtx);
+ /*
+ * if we want to get out of ps before disassoc (why?) we have
+ * to do it before sending disassoc, as otherwise the null-packet
+ * won't be valid.
+ */
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+ local->ps_sdata = NULL;
+
+ /* flush out any pending frame (e.g. DELBA) before deauth/disassoc */
+ if (tx)
+ drv_flush(local, false);
+
/* deauthenticate/disassociate now */
if (tx || frame_buf)
- ieee80211_send_deauth_disassoc(sdata, bssid, stype, reason,
- tx, frame_buf);
+ ieee80211_send_deauth_disassoc(sdata, ifmgd->bssid, stype,
+ reason, tx, frame_buf);
/* flush out frame */
if (tx)
drv_flush(local, false);
+ /* clear bssid only after building the needed mgmt frames */
+ memset(ifmgd->bssid, 0, ETH_ALEN);
+
/* remove AP and TDLS peers */
sta_info_flush(local, sdata);
@@ -1412,12 +1406,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
- local->ps_sdata = NULL;
-
/* Disable ARP filtering */
if (sdata->vif.bss_conf.arp_filter_enabled) {
sdata->vif.bss_conf.arp_filter_enabled = false;
@@ -1582,11 +1570,12 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
goto out;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (beacon)
- net_dbg_ratelimited("%s: detected beacon loss from AP - sending probe request\n",
- sdata->name);
-#endif
+ mlme_dbg_ratelimited(sdata,
+ "detected beacon loss from AP - sending probe request\n");
+
+ ieee80211_cqm_rssi_notify(&sdata->vif,
+ NL80211_CQM_RSSI_BEACON_LOSS_EVENT, GFP_KERNEL);
/*
* The driver/our work has already reported this event or the
@@ -1628,6 +1617,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct cfg80211_bss *cbss;
struct sk_buff *skb;
const u8 *ssid;
int ssid_len;
@@ -1637,16 +1627,22 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
ASSERT_MGD_MTX(ifmgd);
- if (!ifmgd->associated)
+ if (ifmgd->associated)
+ cbss = ifmgd->associated;
+ else if (ifmgd->auth_data)
+ cbss = ifmgd->auth_data->bss;
+ else if (ifmgd->assoc_data)
+ cbss = ifmgd->assoc_data->bss;
+ else
return NULL;
- ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
if (WARN_ON_ONCE(ssid == NULL))
ssid_len = 0;
else
ssid_len = ssid[1];
- skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
+ skb = ieee80211_build_probe_req(sdata, cbss->bssid,
(u32) -1, ssid + 2, ssid_len,
NULL, 0, true);
@@ -1669,8 +1665,7 @@ static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
- printk(KERN_DEBUG "%s: Connection to AP %pM lost.\n",
- sdata->name, bssid);
+ sdata_info(sdata, "Connection to AP %pM lost\n", bssid);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
@@ -1766,6 +1761,7 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
if (!elems.challenge)
return;
auth_data->expected_transaction = 4;
+ drv_mgd_prepare_tx(sdata->local, sdata);
ieee80211_send_auth(sdata, 3, auth_data->algorithm,
elems.challenge - 2, elems.challenge_len + 2,
auth_data->bss->bssid, auth_data->bss->bssid,
@@ -1804,9 +1800,10 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
- sdata->name, mgmt->sa, status_code);
- goto out;
+ sdata_info(sdata, "%pM denied authentication (status %d)\n",
+ mgmt->sa, status_code);
+ ieee80211_destroy_auth_data(sdata, false);
+ return RX_MGMT_CFG80211_RX_AUTH;
}
switch (ifmgd->auth_data->algorithm) {
@@ -1827,8 +1824,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
return RX_MGMT_NONE;
}
- printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
- out:
+ sdata_info(sdata, "authenticated\n");
ifmgd->auth_data->done = true;
ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -1841,8 +1837,7 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
goto out_err;
}
if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
- printk(KERN_DEBUG "%s: failed moving %pM to auth\n",
- sdata->name, bssid);
+ sdata_info(sdata, "failed moving %pM to auth\n", bssid);
goto out_err;
}
mutex_unlock(&sdata->local->sta_mtx);
@@ -1876,8 +1871,8 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
- printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
- sdata->name, bssid, reason_code);
+ sdata_info(sdata, "deauthenticated from %pM (Reason: %u)\n",
+ bssid, reason_code);
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
@@ -1907,8 +1902,8 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
- sdata->name, mgmt->sa, reason_code);
+ sdata_info(sdata, "disassociated from %pM (Reason: %u)\n",
+ mgmt->sa, reason_code);
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
@@ -2000,17 +1995,15 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
- printk(KERN_DEBUG
- "%s: invalid AID value 0x%x; bits 15:14 not set\n",
- sdata->name, aid);
+ sdata_info(sdata, "invalid AID value 0x%x; bits 15:14 not set\n",
+ aid);
aid &= ~(BIT(15) | BIT(14));
ifmgd->broken_ap = false;
if (aid == 0 || aid > IEEE80211_MAX_AID) {
- printk(KERN_DEBUG
- "%s: invalid AID value %d (out of range), turn off PS\n",
- sdata->name, aid);
+ sdata_info(sdata, "invalid AID value %d (out of range), turn off PS\n",
+ aid);
aid = 0;
ifmgd->broken_ap = true;
}
@@ -2019,8 +2012,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
if (!elems.supp_rates) {
- printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
- sdata->name);
+ sdata_info(sdata, "no SuppRates element in AssocResp\n");
return false;
}
@@ -2060,9 +2052,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
err = sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
if (err) {
- printk(KERN_DEBUG
- "%s: failed to move station %pM to desired state\n",
- sdata->name, sta->sta.addr);
+ sdata_info(sdata,
+ "failed to move station %pM to desired state\n",
+ sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
mutex_unlock(&sdata->local->sta_mtx);
return false;
@@ -2145,10 +2137,10 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
- printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
- "status=%d aid=%d)\n",
- sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
- capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
+ sdata_info(sdata,
+ "RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
+ reassoc ? "Rea" : "A", mgmt->sa,
+ capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
@@ -2159,9 +2151,9 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
u32 tu, ms;
tu = get_unaligned_le32(elems.timeout_int + 1);
ms = tu * 1024 / 1000;
- printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
- "comeback duration %u TU (%u ms)\n",
- sdata->name, mgmt->sa, tu, ms);
+ sdata_info(sdata,
+ "%pM rejected association temporarily; comeback duration %u TU (%u ms)\n",
+ mgmt->sa, tu, ms);
assoc_data->timeout = jiffies + msecs_to_jiffies(ms);
if (ms > IEEE80211_ASSOC_TIMEOUT)
run_again(ifmgd, assoc_data->timeout);
@@ -2171,19 +2163,17 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
*bss = assoc_data->bss;
if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
- sdata->name, mgmt->sa, status_code);
+ sdata_info(sdata, "%pM denied association (code=%d)\n",
+ mgmt->sa, status_code);
ieee80211_destroy_assoc_data(sdata, false);
} else {
- printk(KERN_DEBUG "%s: associated\n", sdata->name);
-
if (!ieee80211_assoc_success(sdata, *bss, mgmt, len)) {
/* oops -- internal error -- send timeout for now */
- ieee80211_destroy_assoc_data(sdata, true);
- sta_info_destroy_addr(sdata, mgmt->bssid);
+ ieee80211_destroy_assoc_data(sdata, false);
cfg80211_put_bss(*bss);
return RX_MGMT_CFG80211_ASSOC_TIMEOUT;
}
+ sdata_info(sdata, "associated\n");
/*
* destroy assoc_data afterwards, as otherwise an idle
@@ -2283,7 +2273,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
if (ifmgd->auth_data && !ifmgd->auth_data->bss->proberesp_ies &&
ether_addr_equal(mgmt->bssid, ifmgd->auth_data->bss->bssid)) {
/* got probe response, continue with auth */
- printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
+ sdata_info(sdata, "direct probe responded\n");
ifmgd->auth_data->tries = 0;
ifmgd->auth_data->timeout = jiffies;
run_again(ifmgd, ifmgd->auth_data->timeout);
@@ -2419,10 +2409,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
- sdata->name);
-#endif
+ mlme_dbg_ratelimited(sdata,
+ "cancelling probereq poll due to a received beacon\n");
mutex_lock(&local->mtx);
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
ieee80211_run_deferred_scan(local);
@@ -2448,14 +2436,6 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
directed_tim = ieee80211_check_tim(elems.tim, elems.tim_len,
ifmgd->aid);
- if (ncrc != ifmgd->beacon_crc || !ifmgd->beacon_crc_valid) {
- ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
- true);
-
- ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
- elems.wmm_param_len);
- }
-
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
if (directed_tim) {
if (local->hw.conf.dynamic_ps_timeout > 0) {
@@ -2486,6 +2466,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ifmgd->beacon_crc = ncrc;
ifmgd->beacon_crc_valid = true;
+ ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
+ true);
+
+ if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
+ elems.wmm_param_len))
+ changed |= BSS_CHANGED_QOS;
+
if (elems.erp_info && elems.erp_info_len >= 1) {
erp_valid = true;
erp_value = elems.erp_info[0];
@@ -2645,8 +2632,8 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
auth_data->tries++;
if (auth_data->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: authentication with %pM timed out\n",
- sdata->name, auth_data->bss->bssid);
+ sdata_info(sdata, "authentication with %pM timed out\n",
+ auth_data->bss->bssid);
/*
* Most likely AP is not in the range so remove the
@@ -2657,10 +2644,12 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
return -ETIMEDOUT;
}
+ drv_mgd_prepare_tx(local, sdata);
+
if (auth_data->bss->proberesp_ies) {
- printk(KERN_DEBUG "%s: send auth to %pM (try %d/%d)\n",
- sdata->name, auth_data->bss->bssid, auth_data->tries,
- IEEE80211_AUTH_MAX_TRIES);
+ sdata_info(sdata, "send auth to %pM (try %d/%d)\n",
+ auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
auth_data->expected_transaction = 2;
ieee80211_send_auth(sdata, 1, auth_data->algorithm,
@@ -2670,9 +2659,9 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
} else {
const u8 *ssidie;
- printk(KERN_DEBUG "%s: direct probe to %pM (try %d/%i)\n",
- sdata->name, auth_data->bss->bssid, auth_data->tries,
- IEEE80211_AUTH_MAX_TRIES);
+ sdata_info(sdata, "direct probe to %pM (try %d/%i)\n",
+ auth_data->bss->bssid, auth_data->tries,
+ IEEE80211_AUTH_MAX_TRIES);
ssidie = ieee80211_bss_get_ie(auth_data->bss, WLAN_EID_SSID);
if (!ssidie)
@@ -2700,8 +2689,8 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
assoc_data->tries++;
if (assoc_data->tries > IEEE80211_ASSOC_MAX_TRIES) {
- printk(KERN_DEBUG "%s: association with %pM timed out\n",
- sdata->name, assoc_data->bss->bssid);
+ sdata_info(sdata, "association with %pM timed out\n",
+ assoc_data->bss->bssid);
/*
* Most likely AP is not in the range so remove the
@@ -2712,9 +2701,9 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
return -ETIMEDOUT;
}
- printk(KERN_DEBUG "%s: associate with %pM (try %d/%d)\n",
- sdata->name, assoc_data->bss->bssid, assoc_data->tries,
- IEEE80211_ASSOC_MAX_TRIES);
+ sdata_info(sdata, "associate with %pM (try %d/%d)\n",
+ assoc_data->bss->bssid, assoc_data->tries,
+ IEEE80211_ASSOC_MAX_TRIES);
ieee80211_send_assoc(sdata);
assoc_data->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
@@ -2787,45 +2776,31 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
ieee80211_reset_ap_probe(sdata);
else if (ifmgd->nullfunc_failed) {
if (ifmgd->probe_send_count < max_tries) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No ack for nullfunc frame to"
- " AP %pM, try %d/%i\n",
- sdata->name, bssid,
- ifmgd->probe_send_count, max_tries);
-#endif
+ mlme_dbg(sdata,
+ "No ack for nullfunc frame to AP %pM, try %d/%i\n",
+ bssid, ifmgd->probe_send_count,
+ max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No ack for nullfunc frame to"
- " AP %pM, disconnecting.\n",
- sdata->name, bssid);
-#endif
+ mlme_dbg(sdata,
+ "No ack for nullfunc frame to AP %pM, disconnecting.\n",
+ bssid);
ieee80211_sta_connection_lost(sdata, bssid,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
}
} else if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(ifmgd, ifmgd->probe_timeout);
else if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: Failed to send nullfunc to AP %pM"
- " after %dms, disconnecting.\n",
- sdata->name,
- bssid, probe_wait_ms);
-#endif
+ mlme_dbg(sdata,
+ "Failed to send nullfunc to AP %pM after %dms, disconnecting\n",
+ bssid, probe_wait_ms);
ieee80211_sta_connection_lost(sdata, bssid,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
} else if (ifmgd->probe_send_count < max_tries) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy,
- "%s: No probe response from AP %pM"
- " after %dms, try %d/%i\n",
- sdata->name,
- bssid, probe_wait_ms,
- ifmgd->probe_send_count, max_tries);
-#endif
+ mlme_dbg(sdata,
+ "No probe response from AP %pM after %dms, try %d/%i\n",
+ bssid, probe_wait_ms,
+ ifmgd->probe_send_count, max_tries);
ieee80211_mgd_probe_ap_send(sdata);
} else {
/*
@@ -2940,11 +2915,8 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(sdata->local->hw.wiphy,
- "%s: driver requested disconnect after resume.\n",
- sdata->name);
-#endif
+ mlme_dbg(sdata,
+ "driver requested disconnect after resume\n");
ieee80211_sta_connection_lost(sdata,
ifmgd->associated->bssid,
WLAN_REASON_UNSPECIFIED);
@@ -3002,7 +2974,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
/* scan finished notification */
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ struct ieee80211_sub_if_data *sdata;
/* Restart STA timers */
rcu_read_lock();
@@ -3032,7 +3004,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss *bss = (void *)cbss->priv;
- struct sta_info *sta;
+ struct sta_info *sta = NULL;
bool have_sta = false;
int err;
int ht_cfreq;
@@ -3085,13 +3057,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
* since we look at probe response/beacon data here
* it should be OK.
*/
- printk(KERN_DEBUG
- "%s: Wrong control channel: center-freq: %d"
- " ht-cfreq: %d ht->primary_chan: %d"
- " band: %d. Disabling HT.\n",
- sdata->name, cbss->channel->center_freq,
- ht_cfreq, ht_oper->primary_chan,
- cbss->channel->band);
+ sdata_info(sdata,
+ "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+ cbss->channel->center_freq,
+ ht_cfreq, ht_oper->primary_chan,
+ cbss->channel->band);
ht_oper = NULL;
}
}
@@ -3115,9 +3085,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
/* can only fail due to HT40+/- mismatch */
channel_type = NL80211_CHAN_HT20;
- printk(KERN_DEBUG
- "%s: disabling 40 MHz due to multi-vif mismatch\n",
- sdata->name);
+ sdata_info(sdata,
+ "disabling 40 MHz due to multi-vif mismatch\n");
ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
WARN_ON(!ieee80211_set_channel_type(local, sdata,
channel_type));
@@ -3126,7 +3095,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
local->oper_channel = cbss->channel;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
- if (!have_sta) {
+ if (sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
@@ -3146,9 +3115,8 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
* we can connect -- with a warning.
*/
if (!basic_rates && min_rate_index >= 0) {
- printk(KERN_DEBUG
- "%s: No basic rates, using min rate instead.\n",
- sdata->name);
+ sdata_info(sdata,
+ "No basic rates, using min rate instead\n");
basic_rates = BIT(min_rate_index);
}
@@ -3164,9 +3132,15 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
- /* tell driver about BSSID and basic rates */
+ /* set timing information */
+ sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
+ sdata->vif.bss_conf.sync_tsf = cbss->tsf;
+ sdata->vif.bss_conf.sync_device_ts = bss->device_ts;
+
+ /* tell driver about BSSID, basic rates and timing */
ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES);
+ BSS_CHANGED_BSSID | BSS_CHANGED_BASIC_RATES |
+ BSS_CHANGED_BEACON_INT);
if (assoc)
sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
@@ -3174,9 +3148,9 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
err = sta_info_insert(sta);
sta = NULL;
if (err) {
- printk(KERN_DEBUG
- "%s: failed to insert STA entry for the AP (error %d)\n",
- sdata->name, err);
+ sdata_info(sdata,
+ "failed to insert STA entry for the AP (error %d)\n",
+ err);
return err;
}
} else
@@ -3254,8 +3228,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (ifmgd->associated)
ieee80211_set_disassoc(sdata, 0, 0, false, NULL);
- printk(KERN_DEBUG "%s: authenticate with %pM\n",
- sdata->name, req->bss->bssid);
+ sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid);
err = ieee80211_prep_connection(sdata, req->bss, false);
if (err)
@@ -3290,7 +3263,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss = (void *)req->bss->priv;
struct ieee80211_mgd_assoc_data *assoc_data;
struct ieee80211_supported_band *sband;
- const u8 *ssidie;
+ const u8 *ssidie, *ht_ie;
int i, err;
ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
@@ -3338,11 +3311,15 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* We can set this to true for non-11n hardware, that'll be checked
* separately along with the peer capabilities.
*/
- for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++) {
if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
- req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ netdev_info(sdata->dev,
+ "disabling HT due to WEP/TKIP use\n");
+ }
+ }
if (req->flags & ASSOC_REQ_DISABLE_HT)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
@@ -3350,8 +3327,11 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
/* Also disable HT if we don't support it or the AP doesn't use WMM */
sband = local->hw.wiphy->bands[req->bss->channel->band];
if (!sband->ht_cap.ht_supported ||
- local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
+ local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used) {
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+ netdev_info(sdata->dev,
+ "disabling HT as WMM/QoS is not supported\n");
+ }
memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
@@ -3377,8 +3357,13 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
(local->hw.queues >= IEEE80211_NUM_ACS);
assoc_data->supp_rates = bss->supp_rates;
assoc_data->supp_rates_len = bss->supp_rates_len;
- assoc_data->ht_operation_ie =
- ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
+
+ ht_ie = ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
+ if (ht_ie && ht_ie[1] >= sizeof(struct ieee80211_ht_operation))
+ assoc_data->ap_ht_param =
+ ((struct ieee80211_ht_operation *)(ht_ie + 2))->ht_param;
+ else
+ ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
if (bss->wmm_used && bss->uapsd_supported &&
(sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
@@ -3425,8 +3410,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
* Wait up to one beacon interval ...
* should this be more if we miss one?
*/
- printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
- sdata->name, ifmgd->bssid);
+ sdata_info(sdata, "waiting for beacon from %pM\n",
+ ifmgd->bssid);
assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
} else {
assoc_data->have_beacon = true;
@@ -3445,8 +3430,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
corrupt_type = "beacon";
} else if (bss->corrupt_data & IEEE80211_BSS_CORRUPT_PROBE_RESP)
corrupt_type = "probe response";
- printk(KERN_DEBUG "%s: associating with AP with corrupt %s\n",
- sdata->name, corrupt_type);
+ sdata_info(sdata, "associating with AP with corrupt %s\n",
+ corrupt_type);
}
err = 0;
@@ -3475,9 +3460,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
return 0;
}
- printk(KERN_DEBUG
- "%s: deauthenticating from %pM by local choice (reason=%d)\n",
- sdata->name, req->bssid, req->reason_code);
+ sdata_info(sdata,
+ "deauthenticating from %pM by local choice (reason=%d)\n",
+ req->bssid, req->reason_code);
if (ifmgd->associated &&
ether_addr_equal(ifmgd->associated->bssid, req->bssid))
@@ -3519,8 +3504,9 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
return -ENOLINK;
}
- printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
- sdata->name, req->bss->bssid, req->reason_code);
+ sdata_info(sdata,
+ "disassociating from %pM by local choice (reason=%d)\n",
+ req->bss->bssid, req->reason_code);
memcpy(bssid, req->bss->bssid, ETH_ALEN);
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DISASSOC,
@@ -3561,10 +3547,3 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
}
EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
-
-unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
- return sdata->dev->operstate;
-}
-EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 935aa4b6deee..635c3250c668 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -15,7 +15,7 @@
#include <linux/export.h>
#include <net/mac80211.h>
#include "ieee80211_i.h"
-#include "driver-trace.h"
+#include "driver-ops.h"
/*
* Tell our hardware to disable PS.
@@ -24,8 +24,7 @@
* because we *may* be doing work on-operating channel, and want our
* hardware unconditionally awake, but still let the AP send us normal frames.
*/
-static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
- bool tell_ap)
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -46,8 +45,8 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (tell_ap && (!local->offchannel_ps_enabled ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)))
+ if (!local->offchannel_ps_enabled ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
* frame because AP knows that we are sleeping. But if the
@@ -132,7 +131,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local,
if (offchannel_ps_enable &&
(sdata->vif.type == NL80211_IFTYPE_STATION) &&
sdata->u.mgd.associated)
- ieee80211_offchannel_ps_enable(sdata, true);
+ ieee80211_offchannel_ps_enable(sdata);
}
}
mutex_unlock(&local->iflist_mtx);
@@ -181,34 +180,58 @@ void ieee80211_offchannel_return(struct ieee80211_local *local,
mutex_unlock(&local->iflist_mtx);
}
+void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc)
+{
+ if (roc->notified)
+ return;
+
+ if (roc->mgmt_tx_cookie) {
+ if (!WARN_ON(!roc->frame)) {
+ ieee80211_tx_skb(roc->sdata, roc->frame);
+ roc->frame = NULL;
+ }
+ } else {
+ cfg80211_ready_on_channel(&roc->sdata->wdev, (unsigned long)roc,
+ roc->chan, roc->chan_type,
+ roc->req_duration, GFP_KERNEL);
+ }
+
+ roc->notified = true;
+}
+
static void ieee80211_hw_roc_start(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, hw_roc_start);
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_roc_work *roc, *dep, *tmp;
mutex_lock(&local->mtx);
- if (!local->hw_roc_channel) {
- mutex_unlock(&local->mtx);
- return;
- }
+ if (list_empty(&local->roc_list))
+ goto out_unlock;
- if (local->hw_roc_skb) {
- sdata = IEEE80211_DEV_TO_SUB_IF(local->hw_roc_dev);
- ieee80211_tx_skb(sdata, local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- } else {
- cfg80211_ready_on_channel(local->hw_roc_dev,
- local->hw_roc_cookie,
- local->hw_roc_channel,
- local->hw_roc_channel_type,
- local->hw_roc_duration,
- GFP_KERNEL);
- }
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
+
+ if (!roc->started)
+ goto out_unlock;
- ieee80211_recalc_idle(local);
+ roc->hw_begun = true;
+ roc->hw_start_time = local->hw_roc_start_time;
+ ieee80211_handle_roc_started(roc);
+ list_for_each_entry_safe(dep, tmp, &roc->dependents, list) {
+ ieee80211_handle_roc_started(dep);
+
+ if (dep->duration > roc->duration) {
+ u32 dur = dep->duration;
+ dep->duration = dur - roc->duration;
+ roc->duration = dur;
+ list_del(&dep->list);
+ list_add(&dep->list, &roc->list);
+ }
+ }
+ out_unlock:
mutex_unlock(&local->mtx);
}
@@ -216,52 +239,181 @@ void ieee80211_ready_on_channel(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
+ local->hw_roc_start_time = jiffies;
+
trace_api_ready_on_channel(local);
ieee80211_queue_work(hw, &local->hw_roc_start);
}
EXPORT_SYMBOL_GPL(ieee80211_ready_on_channel);
-static void ieee80211_hw_roc_done(struct work_struct *work)
+void ieee80211_start_next_roc(struct ieee80211_local *local)
{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, hw_roc_done);
+ struct ieee80211_roc_work *roc;
- mutex_lock(&local->mtx);
+ lockdep_assert_held(&local->mtx);
- if (!local->hw_roc_channel) {
- mutex_unlock(&local->mtx);
+ if (list_empty(&local->roc_list)) {
+ ieee80211_run_deferred_scan(local);
return;
}
- /* was never transmitted */
- if (local->hw_roc_skb) {
- u64 cookie;
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
- cookie = local->hw_roc_cookie ^ 2;
+ if (WARN_ON_ONCE(roc->started))
+ return;
+
+ if (local->ops->remain_on_channel) {
+ int ret, duration = roc->duration;
+
+ /* XXX: duplicated, see ieee80211_start_roc_work() */
+ if (!duration)
+ duration = 10;
+
+ ret = drv_remain_on_channel(local, roc->chan,
+ roc->chan_type,
+ duration);
+
+ roc->started = true;
+
+ if (ret) {
+ wiphy_warn(local->hw.wiphy,
+ "failed to start next HW ROC (%d)\n", ret);
+ /*
+ * queue the work struct again to avoid recursion
+ * when multiple failures occur
+ */
+ ieee80211_remain_on_channel_expired(&local->hw);
+ }
+ } else {
+ /* delay it a bit */
+ ieee80211_queue_delayed_work(&local->hw, &roc->work,
+ round_jiffies_relative(HZ/2));
+ }
+}
- cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
- local->hw_roc_skb->data,
- local->hw_roc_skb->len, false,
- GFP_KERNEL);
+void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)
+{
+ struct ieee80211_roc_work *dep, *tmp;
- kfree_skb(local->hw_roc_skb);
- local->hw_roc_skb = NULL;
- local->hw_roc_skb_for_status = NULL;
+ /* was never transmitted */
+ if (roc->frame) {
+ cfg80211_mgmt_tx_status(&roc->sdata->wdev,
+ (unsigned long)roc->frame,
+ roc->frame->data, roc->frame->len,
+ false, GFP_KERNEL);
+ kfree_skb(roc->frame);
}
- if (!local->hw_roc_for_tx)
- cfg80211_remain_on_channel_expired(local->hw_roc_dev,
- local->hw_roc_cookie,
- local->hw_roc_channel,
- local->hw_roc_channel_type,
+ if (!roc->mgmt_tx_cookie)
+ cfg80211_remain_on_channel_expired(&roc->sdata->wdev,
+ (unsigned long)roc,
+ roc->chan, roc->chan_type,
GFP_KERNEL);
- local->hw_roc_channel = NULL;
- local->hw_roc_cookie = 0;
+ list_for_each_entry_safe(dep, tmp, &roc->dependents, list)
+ ieee80211_roc_notify_destroy(dep);
+
+ kfree(roc);
+}
+
+void ieee80211_sw_roc_work(struct work_struct *work)
+{
+ struct ieee80211_roc_work *roc =
+ container_of(work, struct ieee80211_roc_work, work.work);
+ struct ieee80211_sub_if_data *sdata = roc->sdata;
+ struct ieee80211_local *local = sdata->local;
+ bool started;
+
+ mutex_lock(&local->mtx);
+
+ if (roc->abort)
+ goto finish;
+
+ if (WARN_ON(list_empty(&local->roc_list)))
+ goto out_unlock;
+
+ if (WARN_ON(roc != list_first_entry(&local->roc_list,
+ struct ieee80211_roc_work,
+ list)))
+ goto out_unlock;
- ieee80211_recalc_idle(local);
+ if (!roc->started) {
+ struct ieee80211_roc_work *dep;
+ /* start this ROC */
+
+ /* switch channel etc */
+ ieee80211_recalc_idle(local);
+
+ local->tmp_channel = roc->chan;
+ local->tmp_channel_type = roc->chan_type;
+ ieee80211_hw_config(local, 0);
+
+ /* tell userspace or send frame */
+ ieee80211_handle_roc_started(roc);
+ list_for_each_entry(dep, &roc->dependents, list)
+ ieee80211_handle_roc_started(dep);
+
+ /* if it was pure TX, just finish right away */
+ if (!roc->duration)
+ goto finish;
+
+ roc->started = true;
+ ieee80211_queue_delayed_work(&local->hw, &roc->work,
+ msecs_to_jiffies(roc->duration));
+ } else {
+ /* finish this ROC */
+ finish:
+ list_del(&roc->list);
+ started = roc->started;
+ ieee80211_roc_notify_destroy(roc);
+
+ if (started) {
+ drv_flush(local, false);
+
+ local->tmp_channel = NULL;
+ ieee80211_hw_config(local, 0);
+
+ ieee80211_offchannel_return(local, true);
+ }
+
+ ieee80211_recalc_idle(local);
+
+ if (started)
+ ieee80211_start_next_roc(local);
+ }
+
+ out_unlock:
+ mutex_unlock(&local->mtx);
+}
+
+static void ieee80211_hw_roc_done(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, hw_roc_done);
+ struct ieee80211_roc_work *roc;
+
+ mutex_lock(&local->mtx);
+
+ if (list_empty(&local->roc_list))
+ goto out_unlock;
+
+ roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
+ list);
+
+ if (!roc->started)
+ goto out_unlock;
+
+ list_del(&roc->list);
+
+ ieee80211_roc_notify_destroy(roc);
+
+ /* if there's another roc, start it now */
+ ieee80211_start_next_roc(local);
+
+ out_unlock:
mutex_unlock(&local->mtx);
}
@@ -275,8 +427,47 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(ieee80211_remain_on_channel_expired);
-void ieee80211_hw_roc_setup(struct ieee80211_local *local)
+void ieee80211_roc_setup(struct ieee80211_local *local)
{
INIT_WORK(&local->hw_roc_start, ieee80211_hw_roc_start);
INIT_WORK(&local->hw_roc_done, ieee80211_hw_roc_done);
+ INIT_LIST_HEAD(&local->roc_list);
+}
+
+void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_roc_work *roc, *tmp;
+ LIST_HEAD(tmp_list);
+
+ mutex_lock(&local->mtx);
+ list_for_each_entry_safe(roc, tmp, &local->roc_list, list) {
+ if (roc->sdata != sdata)
+ continue;
+
+ if (roc->started && local->ops->remain_on_channel) {
+ /* can race, so ignore return value */
+ drv_cancel_remain_on_channel(local);
+ }
+
+ list_move_tail(&roc->list, &tmp_list);
+ roc->abort = true;
+ }
+
+ ieee80211_start_next_roc(local);
+ mutex_unlock(&local->mtx);
+
+ list_for_each_entry_safe(roc, tmp, &tmp_list, list) {
+ if (local->ops->remain_on_channel) {
+ list_del(&roc->list);
+ ieee80211_roc_notify_destroy(roc);
+ } else {
+ ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);
+
+ /* work will clean up etc */
+ flush_delayed_work(&roc->work);
+ }
+ }
+
+ WARN_ON_ONCE(!list_empty(&tmp_list));
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index af1c4e26e965..5c572e7a1a71 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -77,6 +77,17 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
int err = drv_suspend(local, wowlan);
if (err < 0) {
local->quiescing = false;
+ local->wowlan = false;
+ if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta,
+ &local->sta_list, list) {
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
+ }
+ mutex_unlock(&local->sta_mtx);
+ }
+ ieee80211_wake_queues_by_reason(hw,
+ IEEE80211_QUEUE_STOP_REASON_SUSPEND);
return err;
} else if (err > 0) {
WARN_ON(err != 1);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 2d1acc6c5445..fb1d4aa65e8c 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -626,8 +626,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
#ifdef CONFIG_MAC80211_DEBUGFS
/* use fixed index if set */
- if (mp->fixed_rate_idx != -1)
- sample_idx = mp->fixed_rate_idx;
+ if (mp->fixed_rate_idx != -1) {
+ mi->max_tp_rate = mp->fixed_rate_idx;
+ mi->max_tp_rate2 = mp->fixed_rate_idx;
+ mi->max_prob_rate = mp->fixed_rate_idx;
+ sample_idx = -1;
+ }
#endif
if (sample_idx >= 0) {
@@ -809,7 +813,7 @@ minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
max_rates = sband->n_bitrates;
}
- msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
+ msp = kzalloc(sizeof(*msp), gfp);
if (!msp)
return NULL;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7bcecf73aafb..0cb4edee6af5 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -94,7 +94,7 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
return len;
}
-/*
+/**
* ieee80211_add_rx_radiotap_header - add radiotap header
*
* add a radiotap header containing all the fields which the hardware provided.
@@ -413,29 +413,6 @@ static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
/* rx handlers */
-static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
-{
- struct ieee80211_local *local = rx->local;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- struct sk_buff *skb = rx->skb;
-
- if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !local->sched_scanning))
- return RX_CONTINUE;
-
- if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning) ||
- test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
- local->sched_scanning)
- return ieee80211_scan_rx(rx->sdata, skb);
-
- /* scanning finished during invoking of handlers */
- I802_DEBUG_INC(local->rx_handlers_drop_passive_scan);
- return RX_DROP_UNUSABLE;
-}
-
-
static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -554,11 +531,11 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
}
-static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
+static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
int index)
{
- struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_local *local = sdata->local;
struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
struct ieee80211_rx_status *status;
@@ -578,7 +555,7 @@ no_frame:
tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
}
-static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
+static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
u16 head_seq_num)
{
@@ -589,7 +566,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
}
}
@@ -604,7 +581,7 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
*/
#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
-static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
+static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx)
{
int index, j;
@@ -632,12 +609,9 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
HT_RX_REORDER_BUF_TIMEOUT))
goto set_release_timer;
-#ifdef CONFIG_MAC80211_HT_DEBUG
- if (net_ratelimit())
- wiphy_debug(hw->wiphy,
- "release an RX reorder frame due to timeout on earlier frames\n");
-#endif
- ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
+ ht_dbg_ratelimited(sdata,
+ "release an RX reorder frame due to timeout on earlier frames\n");
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, j);
/*
* Increment the head seq# also for the skipped slots.
@@ -647,7 +621,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
skipped = 0;
}
} else while (tid_agg_rx->reorder_buf[index]) {
- ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
+ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index);
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
tid_agg_rx->buf_size;
}
@@ -677,7 +651,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
* rcu_read_lock protection. It returns false if the frame
* can be processed immediately, true if it was consumed.
*/
-static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
+static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb)
{
@@ -706,7 +680,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
/* release stored frames up to new head to stack */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
+ ieee80211_release_reorder_frames(sdata, tid_agg_rx,
+ head_seq_num);
}
/* Now the new frame is always in the range of the reordering buffer */
@@ -736,7 +711,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
tid_agg_rx->stored_mpdu_num++;
- ieee80211_sta_reorder_release(hw, tid_agg_rx);
+ ieee80211_sta_reorder_release(sdata, tid_agg_rx);
out:
spin_unlock(&tid_agg_rx->reorder_lock);
@@ -751,7 +726,6 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
{
struct sk_buff *skb = rx->skb;
struct ieee80211_local *local = rx->local;
- struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct sta_info *sta = rx->sta;
@@ -813,7 +787,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
* sure that we cannot get to it any more before doing
* anything with it.
*/
- if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb))
+ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb))
return;
dont_reorder:
@@ -1136,24 +1110,18 @@ static void ap_sta_ps_start(struct sta_info *sta)
set_sta_flag(sta, WLAN_STA_PS_STA);
if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
- sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
+ sta->sta.addr, sta->sta.aid);
}
static void ap_sta_ps_end(struct sta_info *sta)
{
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
- sta->sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
+ sta->sta.addr, sta->sta.aid);
if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
- sta->sdata->name, sta->sta.addr, sta->sta.aid);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
+ ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
+ sta->sta.addr, sta->sta.aid);
return;
}
@@ -1383,19 +1351,8 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
sdata->fragment_next = 0;
- if (!skb_queue_empty(&entry->skb_list)) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- struct ieee80211_hdr *hdr =
- (struct ieee80211_hdr *) entry->skb_list.next->data;
- printk(KERN_DEBUG "%s: RX reassembly removed oldest "
- "fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
- "addr1=%pM addr2=%pM\n",
- sdata->name, idx,
- jiffies - entry->first_frag_time, entry->seq,
- entry->last_frag, hdr->addr1, hdr->addr2);
-#endif
+ if (!skb_queue_empty(&entry->skb_list))
__skb_queue_purge(&entry->skb_list);
- }
__skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
*skb = NULL;
@@ -1753,7 +1710,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
*/
xmit_skb = skb_copy(skb, GFP_ATOMIC);
if (!xmit_skb)
- net_dbg_ratelimited("%s: failed to clone multicast frame\n",
+ net_info_ratelimited("%s: failed to clone multicast frame\n",
dev->name);
} else {
dsta = sta_info_get(sdata, skb->data);
@@ -1937,7 +1894,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
ether_addr_equal(sdata->vif.addr, hdr->addr3))
return RX_CONTINUE;
- q = ieee80211_select_queue_80211(local, skb, hdr);
+ q = ieee80211_select_queue_80211(sdata, skb, hdr);
if (ieee80211_queue_stopped(&local->hw, q)) {
IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
return RX_DROP_MONITOR;
@@ -1957,7 +1914,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb) {
- net_dbg_ratelimited("%s: failed to clone mesh frame\n",
+ net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
goto out;
}
@@ -2060,8 +2017,6 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
{
- struct ieee80211_local *local = rx->local;
- struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb = rx->skb;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct tid_ampdu_rx *tid_agg_rx;
@@ -2098,7 +2053,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
spin_lock(&tid_agg_rx->reorder_lock);
/* release stored frames up to start of BAR */
- ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num);
+ ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
+ start_seq_num);
spin_unlock(&tid_agg_rx->reorder_lock);
kfree_skb(skb);
@@ -2425,7 +2381,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
sig = status->signal;
- if (cfg80211_rx_mgmt(rx->sdata->dev, status->freq, sig,
+ if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
rx->skb->data, rx->skb->len,
GFP_ATOMIC)) {
if (rx->sta)
@@ -2455,7 +2411,7 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
* frames that we didn't handle, including returning unknown
* ones. For all other modes we will return them to the sender,
* setting the 0x80 bit in the action category, as required by
- * 802.11-2007 7.3.1.11.
+ * 802.11-2012 9.24.4.
* Newer versions of hostapd shall also use the management frame
* registration mechanisms, but older ones still use cooked
* monitor interfaces so push all frames there.
@@ -2465,6 +2421,9 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
return RX_DROP_MONITOR;
+ if (is_multicast_ether_addr(mgmt->da))
+ return RX_DROP_MONITOR;
+
/* do not return rejected action frames */
if (mgmt->u.action.category & 0x80)
return RX_DROP_UNUSABLE;
@@ -2713,7 +2672,6 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
goto rxh_next; \
} while (0);
- CALL_RXH(ieee80211_rx_h_passive_scan)
CALL_RXH(ieee80211_rx_h_check)
ieee80211_rx_reorder_ampdu(rx);
@@ -2749,7 +2707,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
return;
spin_lock(&tid_agg_rx->reorder_lock);
- ieee80211_sta_reorder_release(&sta->local->hw, tid_agg_rx);
+ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx);
spin_unlock(&tid_agg_rx->reorder_lock);
ieee80211_rx_handlers(&rx);
@@ -2783,11 +2741,8 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
return 0;
if (ieee80211_is_beacon(hdr->frame_control)) {
return 1;
- }
- else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN))
- return 0;
- status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
+ } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
+ return 0;
} else if (!multicast &&
!ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
if (!(sdata->dev->flags & IFF_PROMISC))
@@ -2825,11 +2780,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
* and location updates. Note that mac80211
* itself never looks at these frames.
*/
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- ieee80211_is_public_action(hdr, skb->len))
+ if (ieee80211_is_public_action(hdr, skb->len))
return 1;
- if (!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
- !ieee80211_is_beacon(hdr->frame_control))
+ if (!ieee80211_is_beacon(hdr->frame_control))
return 0;
status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
}
@@ -2895,7 +2848,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr;
@@ -2913,11 +2865,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
local->dot11ReceivedFragmentCount++;
- if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
- test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
- test_bit(SCAN_SW_SCANNING, &local->scanning)))
- status->rx_flags |= IEEE80211_RX_IN_SCAN;
-
if (ieee80211_is_mgmt(fc))
err = skb_linearize(skb);
else
@@ -2932,6 +2879,10 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
+ ieee80211_is_beacon(hdr->frame_control)))
+ ieee80211_scan_rx(local, skb);
+
if (ieee80211_is_data(fc)) {
prev_sta = NULL;
@@ -3029,6 +2980,10 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(local->quiescing || local->suspended))
goto drop;
+ /* We might be during a HW reconfig, prevent Rx for the same reason */
+ if (unlikely(local->in_reconfig))
+ goto drop;
+
/*
* The same happens when we're not even started,
* but that's worth a warning.
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 169da0742c81..bcaee5d12839 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -83,13 +83,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
mgmt, len, signal, GFP_ATOMIC);
-
if (!cbss)
return NULL;
cbss->free_priv = ieee80211_rx_bss_free;
bss = (void *)cbss->priv;
+ bss->device_ts = rx_status->device_timestamp;
+
if (elems->parse_error) {
if (beacon)
bss->corrupt_data |= IEEE80211_BSS_CORRUPT_BEACON;
@@ -114,8 +115,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
if (elems->tim && (!elems->parse_error ||
!(bss->valid_data & IEEE80211_BSS_VALID_DTIM))) {
- struct ieee80211_tim_ie *tim_ie =
- (struct ieee80211_tim_ie *)elems->tim;
+ struct ieee80211_tim_ie *tim_ie = elems->tim;
bss->dtim_period = tim_ie->dtim_period;
if (!elems->parse_error)
bss->valid_data |= IEEE80211_BSS_VALID_DTIM;
@@ -165,52 +165,47 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
return bss;
}
-ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
+void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_mgmt *mgmt;
+ struct ieee80211_sub_if_data *sdata1, *sdata2;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_bss *bss;
u8 *elements;
struct ieee80211_channel *channel;
size_t baselen;
int freq;
- __le16 fc;
- bool presp, beacon = false;
+ bool beacon;
struct ieee802_11_elems elems;
- if (skb->len < 2)
- return RX_DROP_UNUSABLE;
-
- mgmt = (struct ieee80211_mgmt *) skb->data;
- fc = mgmt->frame_control;
+ if (skb->len < 24 ||
+ (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control)))
+ return;
- if (ieee80211_is_ctl(fc))
- return RX_CONTINUE;
+ sdata1 = rcu_dereference(local->scan_sdata);
+ sdata2 = rcu_dereference(local->sched_scan_sdata);
- if (skb->len < 24)
- return RX_CONTINUE;
+ if (likely(!sdata1 && !sdata2))
+ return;
- presp = ieee80211_is_probe_resp(fc);
- if (presp) {
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
/* ignore ProbeResp to foreign address */
- if (!ether_addr_equal(mgmt->da, sdata->vif.addr))
- return RX_DROP_MONITOR;
+ if ((!sdata1 || !ether_addr_equal(mgmt->da, sdata1->vif.addr)) &&
+ (!sdata2 || !ether_addr_equal(mgmt->da, sdata2->vif.addr)))
+ return;
- presp = true;
elements = mgmt->u.probe_resp.variable;
baselen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
+ beacon = false;
} else {
- beacon = ieee80211_is_beacon(fc);
baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
elements = mgmt->u.beacon.variable;
+ beacon = true;
}
- if (!presp && !beacon)
- return RX_CONTINUE;
-
if (baselen > skb->len)
- return RX_DROP_MONITOR;
+ return;
ieee802_11_parse_elems(elements, skb->len - baselen, &elems);
@@ -220,22 +215,16 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
else
freq = rx_status->freq;
- channel = ieee80211_get_channel(sdata->local->hw.wiphy, freq);
+ channel = ieee80211_get_channel(local->hw.wiphy, freq);
if (!channel || channel->flags & IEEE80211_CHAN_DISABLED)
- return RX_DROP_MONITOR;
+ return;
- bss = ieee80211_bss_info_update(sdata->local, rx_status,
+ bss = ieee80211_bss_info_update(local, rx_status,
mgmt, skb->len, &elems,
channel, beacon);
if (bss)
- ieee80211_rx_bss_put(sdata->local, bss);
-
- if (channel == sdata->local->oper_channel)
- return RX_CONTINUE;
-
- dev_kfree_skb(skb);
- return RX_QUEUED;
+ ieee80211_rx_bss_put(local, bss);
}
/* return false if no more work */
@@ -293,7 +282,13 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
return;
if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) {
- int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req);
+ int rc;
+
+ rc = drv_hw_scan(local,
+ rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx)),
+ local->hw_scan_req);
+
if (rc == 0)
return;
}
@@ -323,7 +318,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
ieee80211_mesh_notify_scan_completed(local);
- ieee80211_queue_work(&local->hw, &local->work_work);
+ ieee80211_start_next_roc(local);
}
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
@@ -376,7 +371,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
static bool ieee80211_can_scan(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- if (!list_empty(&local->work_list))
+ if (!list_empty(&local->roc_list))
return false;
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
@@ -394,7 +389,10 @@ void ieee80211_run_deferred_scan(struct ieee80211_local *local)
if (!local->scan_req || local->scanning)
return;
- if (!ieee80211_can_scan(local, local->scan_sdata))
+ if (!ieee80211_can_scan(local,
+ rcu_dereference_protected(
+ local->scan_sdata,
+ lockdep_is_held(&local->mtx))))
return;
ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
@@ -405,9 +403,12 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
unsigned long *next_delay)
{
int i;
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ struct ieee80211_sub_if_data *sdata;
enum ieee80211_band band = local->hw.conf.channel->band;
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));;
+
for (i = 0; i < local->scan_req->n_ssids; i++)
ieee80211_send_probe_req(
sdata, NULL,
@@ -439,7 +440,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_can_scan(local, sdata)) {
/* wait for the work to finish/time out */
local->scan_req = req;
- local->scan_sdata = sdata;
+ rcu_assign_pointer(local->scan_sdata, sdata);
return 0;
}
@@ -473,7 +474,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
}
local->scan_req = req;
- local->scan_sdata = sdata;
+ rcu_assign_pointer(local->scan_sdata, sdata);
if (local->ops->hw_scan) {
__set_bit(SCAN_HW_SCANNING, &local->scanning);
@@ -533,7 +534,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(local);
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
}
return rc;
@@ -720,7 +721,8 @@ void ieee80211_scan_work(struct work_struct *work)
mutex_lock(&local->mtx);
- sdata = local->scan_sdata;
+ sdata = rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx));
/* When scanning on-channel, the first-callback means completed. */
if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
@@ -741,7 +743,7 @@ void ieee80211_scan_work(struct work_struct *work)
int rc;
local->scan_req = NULL;
- local->scan_sdata = NULL;
+ rcu_assign_pointer(local->scan_sdata, NULL);
rc = __ieee80211_start_scan(sdata, req);
if (rc) {
@@ -893,7 +895,9 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
if (local->ops->cancel_hw_scan)
- drv_cancel_hw_scan(local, local->scan_sdata);
+ drv_cancel_hw_scan(local,
+ rcu_dereference_protected(local->scan_sdata,
+ lockdep_is_held(&local->mtx)));
goto out;
}
@@ -915,9 +919,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
int ret, i;
- mutex_lock(&sdata->local->mtx);
+ mutex_lock(&local->mtx);
- if (local->sched_scanning) {
+ if (rcu_access_pointer(local->sched_scan_sdata)) {
ret = -EBUSY;
goto out;
}
@@ -928,6 +932,9 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
}
for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ if (!local->hw.wiphy->bands[i])
+ continue;
+
local->sched_scan_ies.ie[i] = kzalloc(2 +
IEEE80211_MAX_SSID_LEN +
local->scan_ies_len +
@@ -948,7 +955,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ret = drv_sched_scan_start(local, sdata, req,
&local->sched_scan_ies);
if (ret == 0) {
- local->sched_scanning = true;
+ rcu_assign_pointer(local->sched_scan_sdata, sdata);
goto out;
}
@@ -956,7 +963,7 @@ out_free:
while (i > 0)
kfree(local->sched_scan_ies.ie[--i]);
out:
- mutex_unlock(&sdata->local->mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -965,22 +972,22 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
struct ieee80211_local *local = sdata->local;
int ret = 0, i;
- mutex_lock(&sdata->local->mtx);
+ mutex_lock(&local->mtx);
if (!local->ops->sched_scan_stop) {
ret = -ENOTSUPP;
goto out;
}
- if (local->sched_scanning) {
+ if (rcu_access_pointer(local->sched_scan_sdata)) {
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
kfree(local->sched_scan_ies.ie[i]);
drv_sched_scan_stop(local, sdata);
- local->sched_scanning = false;
+ rcu_assign_pointer(local->sched_scan_sdata, NULL);
}
out:
- mutex_unlock(&sdata->local->mtx);
+ mutex_unlock(&local->mtx);
return ret;
}
@@ -1004,7 +1011,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
mutex_lock(&local->mtx);
- if (!local->sched_scanning) {
+ if (!rcu_access_pointer(local->sched_scan_sdata)) {
mutex_unlock(&local->mtx);
return;
}
@@ -1012,7 +1019,7 @@ void ieee80211_sched_scan_stopped_work(struct work_struct *work)
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
kfree(local->sched_scan_ies.ie[i]);
- local->sched_scanning = false;
+ rcu_assign_pointer(local->sched_scan_sdata, NULL);
mutex_unlock(&local->mtx);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index de455f8bbb91..06fa75ceb025 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -169,9 +169,7 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
if (sta->rate_ctrl)
rate_control_free_sta(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Destroyed STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sta->sdata, "Destroyed STA %pM\n", sta->sta.addr);
kfree(sta);
}
@@ -278,9 +276,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < NUM_RX_DATA_QUEUES; i++)
sta->last_seq_ctrl[i] = cpu_to_le16(USHRT_MAX);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Allocated STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Allocated STA %pM\n", sta->sta.addr);
#ifdef CONFIG_MAC80211_MESH
sta->plink_state = NL80211_PLINK_LISTEN;
@@ -333,9 +329,9 @@ static int sta_info_insert_drv_state(struct ieee80211_local *local,
}
if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- printk(KERN_DEBUG
- "%s: failed to move IBSS STA %pM to state %d (%d) - keeping it anyway.\n",
- sdata->name, sta->sta.addr, state + 1, err);
+ sdata_info(sdata,
+ "failed to move IBSS STA %pM to state %d (%d) - keeping it anyway\n",
+ sta->sta.addr, state + 1, err);
err = 0;
}
@@ -390,9 +386,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
sinfo.generation = local->sta_generation;
cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Inserted STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
/* move reference to rcu-protected */
rcu_read_lock();
@@ -618,10 +612,8 @@ static bool sta_info_cleanup_expire_buffered_ac(struct ieee80211_local *local,
break;
local->total_ps_buffered--;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
+ ps_dbg(sta->sdata, "Buffered frame expired (STA %pM)\n",
sta->sta.addr);
-#endif
dev_kfree_skb(skb);
}
@@ -747,9 +739,8 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
mesh_accept_plinks_update(sdata);
#endif
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- wiphy_debug(local->hw.wiphy, "Removed STA %pM\n", sta->sta.addr);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
+
cancel_work_sync(&sta->drv_unblock_wk);
cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
@@ -889,10 +880,8 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
continue;
if (time_after(jiffies, sta->last_rx + exp_time)) {
-#ifdef CONFIG_MAC80211_IBSS_DEBUG
- printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
- sdata->name, sta->sta.addr);
-#endif
+ ibss_dbg(sdata, "expiring inactive STA %pM\n",
+ sta->sta.addr);
WARN_ON(__sta_info_destroy(sta));
}
}
@@ -990,11 +979,9 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
sta_info_recalc_tim(sta);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
- "since STA not sleeping anymore\n", sdata->name,
+ ps_dbg(sdata,
+ "STA %pM aid %d sending %d filtered/%d PS frames since STA not sleeping anymore\n",
sta->sta.addr, sta->sta.aid, filtered, buffered);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
@@ -1384,10 +1371,8 @@ int sta_info_move_state(struct sta_info *sta,
return -EINVAL;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: moving STA %pM to state %d\n",
- sta->sdata->name, sta->sta.addr, new_state);
-#endif
+ sta_dbg(sta->sdata, "moving STA %pM to state %d\n",
+ sta->sta.addr, new_state);
/*
* notify the driver before the actual changes so it can
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 28cfa981cfb1..8cd72914cdaf 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -155,13 +155,10 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
return;
}
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit())
- wiphy_debug(local->hw.wiphy,
- "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
- skb_queue_len(&sta->tx_filtered[ac]),
- !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
-#endif
+ ps_dbg_ratelimited(sta->sdata,
+ "dropped TX filtered frame, queue_len=%d PS=%d @%lu\n",
+ skb_queue_len(&sta->tx_filtered[ac]),
+ !!test_sta_flag(sta, WLAN_STA_PS_STA), jiffies);
dev_kfree_skb(skb);
}
@@ -520,36 +517,21 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = (unsigned long)skb;
+ acked = info->flags & IEEE80211_TX_STAT_ACK;
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- ieee80211_is_qos_nullfunc(hdr->frame_control)) {
- acked = info->flags & IEEE80211_TX_STAT_ACK;
+ /*
+ * TODO: When we have non-netdev frame TX,
+ * we cannot use skb->dev->ieee80211_ptr
+ */
+ if (ieee80211_is_nullfunc(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control))
cfg80211_probe_status(skb->dev, hdr->addr1,
cookie, acked, GFP_ATOMIC);
- } else {
- struct ieee80211_work *wk;
-
- rcu_read_lock();
- list_for_each_entry_rcu(wk, &local->work_list, list) {
- if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX)
- continue;
- if (wk->offchan_tx.frame != skb)
- continue;
- wk->offchan_tx.status = true;
- break;
- }
- rcu_read_unlock();
- if (local->hw_roc_skb_for_status == skb) {
- cookie = local->hw_roc_cookie ^ 2;
- local->hw_roc_skb_for_status = NULL;
- }
-
+ else
cfg80211_mgmt_tx_status(
- skb->dev, cookie, skb->data, skb->len,
- !!(info->flags & IEEE80211_TX_STAT_ACK),
- GFP_ATOMIC);
- }
+ skb->dev->ieee80211_ptr, cookie, skb->data,
+ skb->len, acked, GFP_ATOMIC);
}
if (unlikely(info->ack_frame_id)) {
@@ -589,7 +571,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
/* send frame to monitor interfaces now */
rtap_len = ieee80211_tx_radiotap_len(info);
if (WARN_ON_ONCE(skb_headroom(skb) < rtap_len)) {
- printk(KERN_ERR "ieee80211_tx_status: headroom too small\n");
+ pr_err("ieee80211_tx_status: headroom too small\n");
dev_kfree_skb(skb);
return;
}
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 51077a956a83..57e14d59e12f 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -260,17 +260,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
keyid = pos[3];
iv32 = get_unaligned_le32(pos + 4);
pos += 8;
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- printk(KERN_DEBUG "TKIP decrypt: data(len=%zd)", payload_len);
- for (i = 0; i < payload_len; i++)
- printk(" %02x", payload[i]);
- printk("\n");
- printk(KERN_DEBUG "TKIP decrypt: iv16=%04x iv32=%08x\n",
- iv16, iv32);
- }
-#endif
if (!(keyid & (1 << 5)))
return TKIP_DECRYPT_NO_EXT_IV;
@@ -281,16 +270,8 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
(iv32 < key->u.tkip.rx[queue].iv32 ||
(iv32 == key->u.tkip.rx[queue].iv32 &&
- iv16 <= key->u.tkip.rx[queue].iv16))) {
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- printk(KERN_DEBUG "TKIP replay detected for RX frame from "
- "%pM (RX IV (%04x,%02x) <= prev. IV (%04x,%02x)\n",
- ta,
- iv32, iv16, key->u.tkip.rx[queue].iv32,
- key->u.tkip.rx[queue].iv16);
-#endif
+ iv16 <= key->u.tkip.rx[queue].iv16)))
return TKIP_DECRYPT_REPLAY;
- }
if (only_iv) {
res = TKIP_DECRYPT_OK;
@@ -302,22 +283,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
key->u.tkip.rx[queue].iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- u8 key_offset = NL80211_TKIP_DATA_OFFSET_ENCR_KEY;
- printk(KERN_DEBUG "TKIP decrypt: Phase1 TA=%pM"
- " TK=", ta);
- for (i = 0; i < 16; i++)
- printk("%02x ",
- key->conf.key[key_offset + i]);
- printk("\n");
- printk(KERN_DEBUG "TKIP decrypt: P1K=");
- for (i = 0; i < 5; i++)
- printk("%04x ", key->u.tkip.rx[queue].p1k[i]);
- printk("\n");
- }
-#endif
}
if (key->local->ops->update_tkip_key &&
key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
@@ -333,15 +298,6 @@ int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
}
tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
-#ifdef CONFIG_MAC80211_TKIP_DEBUG
- {
- int i;
- printk(KERN_DEBUG "TKIP decrypt: Phase2 rc4key=");
- for (i = 0; i < 16; i++)
- printk("%02x ", rc4key[i]);
- printk("\n");
- }
-#endif
res = ieee80211_wep_decrypt_data(tfm, rc4key, 16, pos, payload_len - 12);
done:
diff --git a/net/mac80211/trace.c b/net/mac80211/trace.c
new file mode 100644
index 000000000000..386e45d8a958
--- /dev/null
+++ b/net/mac80211/trace.c
@@ -0,0 +1,75 @@
+/* bug in tracepoint.h, it should include this */
+#include <linux/module.h>
+
+/* sparse isn't too happy with all macros... */
+#ifndef __CHECKER__
+#include <net/cfg80211.h>
+#include "driver-ops.h"
+#include "debug.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+void __sdata_info(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ pr_info("%pV", &vaf);
+ trace_mac80211_info(&vaf);
+ va_end(args);
+}
+
+void __sdata_dbg(bool print, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (print)
+ pr_debug("%pV", &vaf);
+ trace_mac80211_dbg(&vaf);
+ va_end(args);
+}
+
+void __sdata_err(const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ pr_err("%pV", &vaf);
+ trace_mac80211_err(&vaf);
+ va_end(args);
+}
+
+void __wiphy_dbg(struct wiphy *wiphy, bool print, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (print)
+ wiphy_dbg(wiphy, "%pV", &vaf);
+ trace_mac80211_dbg(&vaf);
+ va_end(args);
+}
+#endif
+#endif
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/trace.h
index 6de00b2c268c..c6d33b55b2df 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/trace.h
@@ -306,7 +306,8 @@ TRACE_EVENT(drv_bss_info_changed,
__field(u8, dtimper)
__field(u16, bcnint)
__field(u16, assoc_cap)
- __field(u64, timestamp)
+ __field(u64, sync_tsf)
+ __field(u32, sync_device_ts)
__field(u32, basic_rates)
__field(u32, changed)
__field(bool, enable_beacon)
@@ -325,7 +326,8 @@ TRACE_EVENT(drv_bss_info_changed,
__entry->dtimper = info->dtim_period;
__entry->bcnint = info->beacon_int;
__entry->assoc_cap = info->assoc_capability;
- __entry->timestamp = info->last_tsf;
+ __entry->sync_tsf = info->sync_tsf;
+ __entry->sync_device_ts = info->sync_device_ts;
__entry->basic_rates = info->basic_rates;
__entry->enable_beacon = info->enable_beacon;
__entry->ht_operation_mode = info->ht_operation_mode;
@@ -1218,6 +1220,39 @@ DEFINE_EVENT(release_evt, drv_allow_buffered_frames,
TP_ARGS(local, sta, tids, num_frames, reason, more_data)
);
+TRACE_EVENT(drv_get_rssi,
+ TP_PROTO(struct ieee80211_local *local, struct ieee80211_sta *sta,
+ s8 rssi, int ret),
+
+ TP_ARGS(local, sta, rssi, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(s8, rssi)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->rssi = rssi;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " rssi:%d ret:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->rssi, __entry->ret
+ )
+);
+
+DEFINE_EVENT(local_sdata_evt, drv_mgd_prepare_tx,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+
+ TP_ARGS(local, sdata)
+);
+
/*
* Tracing for API calls that drivers call.
*/
@@ -1606,10 +1641,49 @@ TRACE_EVENT(stop_queue,
LOCAL_PR_ARG, __entry->queue, __entry->reason
)
);
+
+#ifdef CONFIG_MAC80211_MESSAGE_TRACING
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mac80211_msg
+
+#define MAX_MSG_LEN 100
+
+DECLARE_EVENT_CLASS(mac80211_msg_event,
+ TP_PROTO(struct va_format *vaf),
+
+ TP_ARGS(vaf),
+
+ TP_STRUCT__entry(
+ __dynamic_array(char, msg, MAX_MSG_LEN)
+ ),
+
+ TP_fast_assign(
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ MAX_MSG_LEN, vaf->fmt,
+ *vaf->va) >= MAX_MSG_LEN);
+ ),
+
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(mac80211_msg_event, mac80211_info,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_dbg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+DEFINE_EVENT(mac80211_msg_event, mac80211_err,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+#endif
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE driver-trace
+#define TRACE_INCLUDE_FILE trace
#include <trace/define_trace.h>
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e453212fa17f..acf712ffb5e6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -140,6 +140,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (r->flags & IEEE80211_RATE_MANDATORY_A)
mrate = r->bitrate;
break;
+ case IEEE80211_BAND_60GHZ:
+ /* TODO, for now fall through */
case IEEE80211_NUM_BANDS:
WARN_ON(1);
break;
@@ -175,12 +177,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
return cpu_to_le16(dur);
}
-static inline int is_ieee80211_device(struct ieee80211_local *local,
- struct net_device *dev)
-{
- return local == wdev_priv(dev->ieee80211_ptr);
-}
-
/* tx handlers */
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
@@ -297,10 +293,10 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(!assoc &&
ieee80211_is_data(hdr->frame_control))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: dropped data frame to not "
- "associated station %pM\n",
- tx->sdata->name, hdr->addr1);
-#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
+ sdata_info(tx->sdata,
+ "dropped data frame to not associated station %pM\n",
+ hdr->addr1);
+#endif
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
}
@@ -367,10 +363,7 @@ static void purge_old_ps_buffers(struct ieee80211_local *local)
rcu_read_unlock();
local->total_ps_buffered = total;
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- wiphy_debug(local->hw.wiphy, "PS buffers full - purged %d frames\n",
- purged);
-#endif
+ ps_dbg_hw(&local->hw, "PS buffers full - purged %d frames\n", purged);
}
static ieee80211_tx_result
@@ -412,10 +405,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- net_dbg_ratelimited("%s: BC TX buffer full - dropping the oldest frame\n",
- tx->sdata->name);
-#endif
+ ps_dbg(tx->sdata,
+ "BC TX buffer full - dropping the oldest frame\n");
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
tx->local->total_ps_buffered++;
@@ -466,18 +457,15 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- printk(KERN_DEBUG "STA %pM aid %d: PS buffer for AC %d\n",
+ ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
sta->sta.addr, sta->sta.aid, ac);
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- net_dbg_ratelimited("%s: STA %pM TX buffer for AC %d full - dropping oldest frame\n",
- tx->sdata->name, sta->sta.addr, ac);
-#endif
+ ps_dbg(tx->sdata,
+ "STA %pM TX buffer for AC %d full - dropping oldest frame\n",
+ sta->sta.addr, ac);
dev_kfree_skb(old);
} else
tx->local->total_ps_buffered++;
@@ -499,14 +487,11 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
sta_info_recalc_tim(sta);
return TX_QUEUED;
+ } else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
+ ps_dbg(tx->sdata,
+ "STA %pM in PS mode, but polling/in SP -> send frame\n",
+ sta->sta.addr);
}
-#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- else if (unlikely(test_sta_flag(sta, WLAN_STA_PS_STA))) {
- printk(KERN_DEBUG
- "%s: STA %pM in PS mode, but polling/in SP -> send frame\n",
- tx->sdata->name, sta->sta.addr);
- }
-#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
return TX_CONTINUE;
}
@@ -538,7 +523,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
{
- struct ieee80211_key *key = NULL;
+ struct ieee80211_key *key;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
@@ -557,16 +542,23 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
else if (!is_multicast_ether_addr(hdr->addr1) &&
(key = rcu_dereference(tx->sdata->default_unicast_key)))
tx->key = key;
- else if (tx->sdata->drop_unencrypted &&
- (tx->skb->protocol != tx->sdata->control_port_protocol) &&
- !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
- (!ieee80211_is_robust_mgmt_frame(hdr) ||
- (ieee80211_is_action(hdr->frame_control) &&
- tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))) {
+ else if (info->flags & IEEE80211_TX_CTL_INJECTED)
+ tx->key = NULL;
+ else if (!tx->sdata->drop_unencrypted)
+ tx->key = NULL;
+ else if (tx->skb->protocol == tx->sdata->control_port_protocol)
+ tx->key = NULL;
+ else if (ieee80211_is_robust_mgmt_frame(hdr) &&
+ !(ieee80211_is_action(hdr->frame_control) &&
+ tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
+ tx->key = NULL;
+ else if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_robust_mgmt_frame(hdr))
+ tx->key = NULL;
+ else {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
return TX_DROP;
- } else
- tx->key = NULL;
+ }
if (tx->key) {
bool skip_hw = false;
@@ -974,8 +966,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
info->control.rates[1].idx = -1;
info->control.rates[2].idx = -1;
info->control.rates[3].idx = -1;
- info->control.rates[4].idx = -1;
- BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
+ BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 4);
info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
hdr->frame_control &= ~morefrags;
@@ -1310,11 +1301,8 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
break;
}
- if (local->ops->tx_frags)
- drv_tx_frags(local, vif, pubsta, skbs);
- else
- result = ieee80211_tx_frags(local, vif, pubsta, skbs,
- txpending);
+ result = ieee80211_tx_frags(local, vif, pubsta, skbs,
+ txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
ieee80211_led_tx(local, 1);
@@ -1836,6 +1824,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
/* RA TA mDA mSA AE:DA SA */
mesh_da = mppath->mpp;
is_mesh_mcast = 0;
+ } else if (mpath) {
+ mesh_da = mpath->dst;
+ is_mesh_mcast = 0;
} else {
/* DA TA mSA AE:SA */
mesh_da = bcast;
@@ -1965,7 +1956,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
(cpu_to_be16(ethertype) != sdata->control_port_protocol ||
!ether_addr_equal(sdata->vif.addr, skb->data + ETH_ALEN)))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- net_dbg_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
+ net_info_ratelimited("%s: dropped frame to %pM (unauthorized port)\n",
dev->name, hdr.addr1);
#endif
@@ -2437,9 +2428,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
*pos++ = WLAN_EID_SSID;
*pos++ = 0x0;
- if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
+ if (ieee80211_add_srates_ie(sdata, skb, true) ||
mesh_add_ds_params_ie(skb, sdata) ||
- ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
+ ieee80211_add_ext_srates_ie(sdata, skb, true) ||
mesh_add_rsn_ie(skb, sdata) ||
mesh_add_ht_cap_ie(skb, sdata) ||
mesh_add_ht_oper_ie(skb, sdata) ||
@@ -2733,7 +2724,7 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, int tid)
{
- int ac = ieee802_1d_to_ac[tid];
+ int ac = ieee802_1d_to_ac[tid & 7];
skb_set_mac_header(skb, 0);
skb_set_network_header(skb, 0);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8dd4712620ff..39b82fee4904 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -268,6 +268,10 @@ EXPORT_SYMBOL(ieee80211_ctstoself_duration);
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
{
struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
@@ -279,7 +283,7 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
continue;
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ for (ac = 0; ac < n_acs; ac++) {
int ac_queue = sdata->vif.hw_queue[ac];
if (ac_queue == queue ||
@@ -341,6 +345,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
+ int n_acs = IEEE80211_NUM_ACS;
trace_stop_queue(local, queue, reason);
@@ -352,11 +357,14 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
__set_bit(reason, &local->queue_stop_reasons[queue]);
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
int ac;
- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ for (ac = 0; ac < n_acs; ac++) {
if (sdata->vif.hw_queue[ac] == queue ||
sdata->vif.cab_queue == queue)
netif_stop_subqueue(sdata->dev, ac);
@@ -521,6 +529,11 @@ void ieee80211_iterate_active_interfaces(
&sdata->vif);
}
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
+ if (sdata)
+ iterator(data, sdata->vif.addr, &sdata->vif);
+
mutex_unlock(&local->iflist_mtx);
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces);
@@ -549,6 +562,10 @@ void ieee80211_iterate_active_interfaces_atomic(
&sdata->vif);
}
+ sdata = rcu_dereference(local->monitor_sdata);
+ if (sdata)
+ iterator(data, sdata->vif.addr, &sdata->vif);
+
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
@@ -804,7 +821,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_queue_params qparam;
int ac;
- bool use_11b;
+ bool use_11b, enable_qos;
int aCWmin, aCWmax;
if (!local->ops->conf_tx)
@@ -818,6 +835,13 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
!(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
+ /*
+ * By default disable QoS in STA mode for old access points, which do
+ * not support 802.11e. New APs will provide proper queue parameters,
+ * that we will configure later.
+ */
+ enable_qos = (sdata->vif.type != NL80211_IFTYPE_STATION);
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
/* Set defaults according to 802.11-2007 Table 7-37 */
aCWmax = 1023;
@@ -826,38 +850,47 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
else
aCWmin = 15;
- switch (ac) {
- case IEEE80211_AC_BK:
- qparam.cw_max = aCWmax;
- qparam.cw_min = aCWmin;
- qparam.txop = 0;
- qparam.aifs = 7;
- break;
- default: /* never happens but let's not leave undefined */
- case IEEE80211_AC_BE:
+ if (enable_qos) {
+ switch (ac) {
+ case IEEE80211_AC_BK:
+ qparam.cw_max = aCWmax;
+ qparam.cw_min = aCWmin;
+ qparam.txop = 0;
+ qparam.aifs = 7;
+ break;
+ /* never happens but let's not leave undefined */
+ default:
+ case IEEE80211_AC_BE:
+ qparam.cw_max = aCWmax;
+ qparam.cw_min = aCWmin;
+ qparam.txop = 0;
+ qparam.aifs = 3;
+ break;
+ case IEEE80211_AC_VI:
+ qparam.cw_max = aCWmin;
+ qparam.cw_min = (aCWmin + 1) / 2 - 1;
+ if (use_11b)
+ qparam.txop = 6016/32;
+ else
+ qparam.txop = 3008/32;
+ qparam.aifs = 2;
+ break;
+ case IEEE80211_AC_VO:
+ qparam.cw_max = (aCWmin + 1) / 2 - 1;
+ qparam.cw_min = (aCWmin + 1) / 4 - 1;
+ if (use_11b)
+ qparam.txop = 3264/32;
+ else
+ qparam.txop = 1504/32;
+ qparam.aifs = 2;
+ break;
+ }
+ } else {
+ /* Confiure old 802.11b/g medium access rules. */
qparam.cw_max = aCWmax;
qparam.cw_min = aCWmin;
qparam.txop = 0;
- qparam.aifs = 3;
- break;
- case IEEE80211_AC_VI:
- qparam.cw_max = aCWmin;
- qparam.cw_min = (aCWmin + 1) / 2 - 1;
- if (use_11b)
- qparam.txop = 6016/32;
- else
- qparam.txop = 3008/32;
- qparam.aifs = 2;
- break;
- case IEEE80211_AC_VO:
- qparam.cw_max = (aCWmin + 1) / 2 - 1;
- qparam.cw_min = (aCWmin + 1) / 4 - 1;
- if (use_11b)
- qparam.txop = 3264/32;
- else
- qparam.txop = 1504/32;
qparam.aifs = 2;
- break;
}
qparam.uapsd = false;
@@ -866,12 +899,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
drv_conf_tx(local, sdata, ac, &qparam);
}
- /* after reinitialize QoS TX queues setting to default,
- * disable QoS at all */
-
if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
- sdata->vif.bss_conf.qos =
- sdata->vif.type != NL80211_IFTYPE_STATION;
+ sdata->vif.bss_conf.qos = enable_qos;
if (bss_notify)
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_QOS);
@@ -979,6 +1008,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
int ext_rates_len;
sband = local->hw.wiphy->bands[band];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
pos = buffer;
@@ -1060,6 +1091,10 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
pos += noffset - offset;
}
+ if (sband->vht_cap.vht_supported)
+ pos = ieee80211_ie_build_vht_cap(pos, &sband->vht_cap,
+ sband->vht_cap.cap);
+
return pos - buffer;
}
@@ -1267,14 +1302,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add STAs back */
mutex_lock(&local->sta_mtx);
list_for_each_entry(sta, &local->sta_list, list) {
- if (sta->uploaded) {
- enum ieee80211_sta_state state;
+ enum ieee80211_sta_state state;
- for (state = IEEE80211_STA_NOTEXIST;
- state < sta->sta_state; state++)
- WARN_ON(drv_sta_state(local, sta->sdata, sta,
- state, state + 1));
- }
+ if (!sta->uploaded)
+ continue;
+
+ /* AP-mode stations will be added later */
+ if (sta->sdata->vif.type == NL80211_IFTYPE_AP)
+ continue;
+
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
+ state + 1));
}
mutex_unlock(&local->sta_mtx);
@@ -1371,12 +1411,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
}
}
+ /* APs are now beaconing, add back stations */
+ mutex_lock(&local->sta_mtx);
+ list_for_each_entry(sta, &local->sta_list, list) {
+ enum ieee80211_sta_state state;
+
+ if (!sta->uploaded)
+ continue;
+
+ if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+ continue;
+
+ for (state = IEEE80211_STA_NOTEXIST;
+ state < sta->sta_state; state++)
+ WARN_ON(drv_sta_state(local, sta->sdata, sta, state,
+ state + 1));
+ }
+ mutex_unlock(&local->sta_mtx);
+
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
wake_up:
+ local->in_reconfig = false;
+ barrier();
+
/*
* Clear the WLAN_STA_BLOCK_BA flag so new aggregation
* sessions can be established after a resume.
@@ -1661,6 +1722,27 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
return pos;
}
+u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
+ u32 cap)
+{
+ __le32 tmp;
+
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_capabilities);
+ memset(pos, 0, sizeof(struct ieee80211_vht_capabilities));
+
+ /* capability flags */
+ tmp = cpu_to_le32(cap);
+ memcpy(pos, &tmp, sizeof(u32));
+ pos += sizeof(u32);
+
+ /* VHT MCS set */
+ memcpy(pos, &vht_cap->vht_mcs, sizeof(vht_cap->vht_mcs));
+ pos += sizeof(vht_cap->vht_mcs);
+
+ return pos;
+}
+
u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
struct ieee80211_channel *channel,
enum nl80211_channel_type channel_type,
@@ -1726,15 +1808,14 @@ ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
return channel_type;
}
-int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
+int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, rates, *pos;
- u32 basic_rates = vif->bss_conf.basic_rates;
+ u32 basic_rates = sdata->vif.bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
rates = sband->n_bitrates;
@@ -1758,15 +1839,14 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
return 0;
}
-int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
+int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
int rate;
u8 i, exrates, *pos;
- u32 basic_rates = vif->bss_conf.basic_rates;
+ u32 basic_rates = sdata->vif.bss_conf.basic_rates;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
exrates = sband->n_bitrates;
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index c3d643a6536c..cea06e9f26f4 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,11 +52,11 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
}
-static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+static u16 ieee80211_downgrade_queue(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
/* in case we are a client verify acm is not set for this ac */
- while (unlikely(local->wmm_acm & BIT(skb->priority))) {
+ while (unlikely(sdata->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
/*
* This should not really happen. The AP has marked all
@@ -73,10 +73,11 @@ static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
}
/* Indicate which queue to use for this fully formed 802.11 frame */
-u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr)
{
+ struct ieee80211_local *local = sdata->local;
u8 *p;
if (local->hw.queues < IEEE80211_NUM_ACS)
@@ -94,7 +95,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
p = ieee80211_get_qos_ctl(hdr);
skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
- return ieee80211_downgrade_queue(local, skb);
+ return ieee80211_downgrade_queue(sdata, skb);
}
/* Indicate which queue to use. */
@@ -156,7 +157,7 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
- return ieee80211_downgrade_queue(local, skb);
+ return ieee80211_downgrade_queue(sdata, skb);
}
void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index ca80818b7b66..7fea4bb8acbc 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -15,7 +15,7 @@
extern const int ieee802_1d_to_ac[8];
-u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
+u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb,
struct ieee80211_hdr *hdr);
u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
deleted file mode 100644
index b2650a9d45ff..000000000000
--- a/net/mac80211/work.c
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * mac80211 work implementation
- *
- * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
- * Copyright 2004, Instant802 Networks, Inc.
- * Copyright 2005, Devicescape Software, Inc.
- * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
- * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
- * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/if_ether.h>
-#include <linux/skbuff.h>
-#include <linux/if_arp.h>
-#include <linux/etherdevice.h>
-#include <linux/crc32.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-
-#include "ieee80211_i.h"
-#include "rate.h"
-#include "driver-ops.h"
-
-enum work_action {
- WORK_ACT_NONE,
- WORK_ACT_TIMEOUT,
-};
-
-
-/* utils */
-static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
-{
- lockdep_assert_held(&local->mtx);
-}
-
-/*
- * We can have multiple work items (and connection probing)
- * scheduling this timer, but we need to take care to only
- * reschedule it when it should fire _earlier_ than it was
- * asked for before, or if it's not pending right now. This
- * function ensures that. Note that it then is required to
- * run this function for all timeouts after the first one
- * has happened -- the work that runs from this timer will
- * do that.
- */
-static void run_again(struct ieee80211_local *local,
- unsigned long timeout)
-{
- ASSERT_WORK_MTX(local);
-
- if (!timer_pending(&local->work_timer) ||
- time_before(timeout, local->work_timer.expires))
- mod_timer(&local->work_timer, timeout);
-}
-
-void free_work(struct ieee80211_work *wk)
-{
- kfree_rcu(wk, rcu_head);
-}
-
-static enum work_action __must_check
-ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
-{
- /*
- * First time we run, do nothing -- the generic code will
- * have switched to the right channel etc.
- */
- if (!wk->started) {
- wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
-
- cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
- wk->chan, wk->chan_type,
- wk->remain.duration, GFP_KERNEL);
-
- return WORK_ACT_NONE;
- }
-
- return WORK_ACT_TIMEOUT;
-}
-
-static enum work_action __must_check
-ieee80211_offchannel_tx(struct ieee80211_work *wk)
-{
- if (!wk->started) {
- wk->timeout = jiffies + msecs_to_jiffies(wk->offchan_tx.wait);
-
- /*
- * After this, offchan_tx.frame remains but now is no
- * longer a valid pointer -- we still need it as the
- * cookie for canceling this work/status matching.
- */
- ieee80211_tx_skb(wk->sdata, wk->offchan_tx.frame);
-
- return WORK_ACT_NONE;
- }
-
- return WORK_ACT_TIMEOUT;
-}
-
-static void ieee80211_work_timer(unsigned long data)
-{
- struct ieee80211_local *local = (void *) data;
-
- if (local->quiescing)
- return;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-}
-
-static void ieee80211_work_work(struct work_struct *work)
-{
- struct ieee80211_local *local =
- container_of(work, struct ieee80211_local, work_work);
- struct ieee80211_work *wk, *tmp;
- LIST_HEAD(free_work);
- enum work_action rma;
- bool remain_off_channel = false;
-
- /*
- * ieee80211_queue_work() should have picked up most cases,
- * here we'll pick the rest.
- */
- if (WARN(local->suspended, "work scheduled while going to suspend\n"))
- return;
-
- mutex_lock(&local->mtx);
-
- if (local->scanning) {
- mutex_unlock(&local->mtx);
- return;
- }
-
- ieee80211_recalc_idle(local);
-
- list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
- bool started = wk->started;
-
- /* mark work as started if it's on the current off-channel */
- if (!started && local->tmp_channel &&
- wk->chan == local->tmp_channel &&
- wk->chan_type == local->tmp_channel_type) {
- started = true;
- wk->timeout = jiffies;
- }
-
- if (!started && !local->tmp_channel) {
- ieee80211_offchannel_stop_vifs(local, true);
-
- local->tmp_channel = wk->chan;
- local->tmp_channel_type = wk->chan_type;
-
- ieee80211_hw_config(local, 0);
-
- started = true;
- wk->timeout = jiffies;
- }
-
- /* don't try to work with items that aren't started */
- if (!started)
- continue;
-
- if (time_is_after_jiffies(wk->timeout)) {
- /*
- * This work item isn't supposed to be worked on
- * right now, but take care to adjust the timer
- * properly.
- */
- run_again(local, wk->timeout);
- continue;
- }
-
- switch (wk->type) {
- default:
- WARN_ON(1);
- /* nothing */
- rma = WORK_ACT_NONE;
- break;
- case IEEE80211_WORK_ABORT:
- rma = WORK_ACT_TIMEOUT;
- break;
- case IEEE80211_WORK_REMAIN_ON_CHANNEL:
- rma = ieee80211_remain_on_channel_timeout(wk);
- break;
- case IEEE80211_WORK_OFFCHANNEL_TX:
- rma = ieee80211_offchannel_tx(wk);
- break;
- }
-
- wk->started = started;
-
- switch (rma) {
- case WORK_ACT_NONE:
- /* might have changed the timeout */
- run_again(local, wk->timeout);
- break;
- case WORK_ACT_TIMEOUT:
- list_del_rcu(&wk->list);
- synchronize_rcu();
- list_add(&wk->list, &free_work);
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
- }
-
- list_for_each_entry(wk, &local->work_list, list) {
- if (!wk->started)
- continue;
- if (wk->chan != local->tmp_channel ||
- wk->chan_type != local->tmp_channel_type)
- continue;
- remain_off_channel = true;
- }
-
- if (!remain_off_channel && local->tmp_channel) {
- local->tmp_channel = NULL;
- ieee80211_hw_config(local, 0);
-
- ieee80211_offchannel_return(local, true);
-
- /* give connection some time to breathe */
- run_again(local, jiffies + HZ/2);
- }
-
- ieee80211_recalc_idle(local);
- ieee80211_run_deferred_scan(local);
-
- mutex_unlock(&local->mtx);
-
- list_for_each_entry_safe(wk, tmp, &free_work, list) {
- wk->done(wk, NULL);
- list_del(&wk->list);
- kfree(wk);
- }
-}
-
-void ieee80211_add_work(struct ieee80211_work *wk)
-{
- struct ieee80211_local *local;
-
- if (WARN_ON(!wk->chan))
- return;
-
- if (WARN_ON(!wk->sdata))
- return;
-
- if (WARN_ON(!wk->done))
- return;
-
- if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
- return;
-
- wk->started = false;
-
- local = wk->sdata->local;
- mutex_lock(&local->mtx);
- list_add_tail(&wk->list, &local->work_list);
- mutex_unlock(&local->mtx);
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-}
-
-void ieee80211_work_init(struct ieee80211_local *local)
-{
- INIT_LIST_HEAD(&local->work_list);
- setup_timer(&local->work_timer, ieee80211_work_timer,
- (unsigned long)local);
- INIT_WORK(&local->work_work, ieee80211_work_work);
-}
-
-void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk;
- bool cleanup = false;
-
- mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
- cleanup = true;
- wk->type = IEEE80211_WORK_ABORT;
- wk->started = true;
- wk->timeout = jiffies;
- }
- mutex_unlock(&local->mtx);
-
- /* run cleanups etc. */
- if (cleanup)
- ieee80211_work_work(&local->work_work);
-
- mutex_lock(&local->mtx);
- list_for_each_entry(wk, &local->work_list, list) {
- if (wk->sdata != sdata)
- continue;
- WARN_ON(1);
- break;
- }
- mutex_unlock(&local->mtx);
-}
-
-static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
- struct sk_buff *skb)
-{
- /*
- * We are done serving the remain-on-channel command.
- */
- cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
- wk->chan, wk->chan_type,
- GFP_KERNEL);
-
- return WORK_DONE_DESTROY;
-}
-
-int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type,
- unsigned int duration, u64 *cookie)
-{
- struct ieee80211_work *wk;
-
- wk = kzalloc(sizeof(*wk), GFP_KERNEL);
- if (!wk)
- return -ENOMEM;
-
- wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
- wk->chan = chan;
- wk->chan_type = channel_type;
- wk->sdata = sdata;
- wk->done = ieee80211_remain_done;
-
- wk->remain.duration = duration;
-
- *cookie = (unsigned long) wk;
-
- ieee80211_add_work(wk);
-
- return 0;
-}
-
-int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
- u64 cookie)
-{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_work *wk, *tmp;
- bool found = false;
-
- mutex_lock(&local->mtx);
- list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
- if ((unsigned long) wk == cookie) {
- wk->timeout = jiffies;
- found = true;
- break;
- }
- }
- mutex_unlock(&local->mtx);
-
- if (!found)
- return -ENOENT;
-
- ieee80211_queue_work(&local->hw, &local->work_work);
-
- return 0;
-}
diff --git a/net/mac802154/Makefile b/net/mac802154/Makefile
index ec1bd3fc1273..57cf5d1a2e4a 100644
--- a/net/mac802154/Makefile
+++ b/net/mac802154/Makefile
@@ -1,2 +1,2 @@
obj-$(CONFIG_MAC802154) += mac802154.o
-mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o
+mac802154-objs := ieee802154_dev.o rx.o tx.o mac_cmd.o mib.o monitor.o wpan.o
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index e3edfb0661b0..e748aed290aa 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -140,6 +140,10 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
name, mac802154_monitor_setup);
break;
+ case IEEE802154_DEV_WPAN:
+ dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
+ name, mac802154_wpan_setup);
+ break;
default:
dev = NULL;
err = -EINVAL;
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index 789d9c948aec..a4dcaf1dd4b6 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -93,6 +93,7 @@ struct mac802154_sub_if_data {
#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
+extern struct ieee802154_mlme_ops mac802154_mlme_wpan;
int mac802154_slave_open(struct net_device *dev);
int mac802154_slave_close(struct net_device *dev);
@@ -100,10 +101,18 @@ int mac802154_slave_close(struct net_device *dev);
void mac802154_monitors_rx(struct mac802154_priv *priv, struct sk_buff *skb);
void mac802154_monitor_setup(struct net_device *dev);
+void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb);
+void mac802154_wpan_setup(struct net_device *dev);
+
netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
u8 page, u8 chan);
/* MIB callbacks */
+void mac802154_dev_set_short_addr(struct net_device *dev, u16 val);
+u16 mac802154_dev_get_short_addr(const struct net_device *dev);
void mac802154_dev_set_ieee_addr(struct net_device *dev);
+u16 mac802154_dev_get_pan_id(const struct net_device *dev);
+void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
+void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index 7a5d0e052cd7..d8d277006089 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -25,13 +25,37 @@
#include <linux/skbuff.h>
#include <linux/if_arp.h>
+#include <net/ieee802154.h>
#include <net/ieee802154_netdev.h>
#include <net/wpan-phy.h>
#include <net/mac802154.h>
+#include <net/nl802154.h>
#include "mac802154.h"
-struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
+static int mac802154_mlme_start_req(struct net_device *dev,
+ struct ieee802154_addr *addr,
+ u8 channel, u8 page,
+ u8 bcn_ord, u8 sf_ord,
+ u8 pan_coord, u8 blx,
+ u8 coord_realign)
+{
+ BUG_ON(addr->addr_type != IEEE802154_ADDR_SHORT);
+
+ mac802154_dev_set_pan_id(dev, addr->pan_id);
+ mac802154_dev_set_short_addr(dev, addr->short_addr);
+ mac802154_dev_set_ieee_addr(dev);
+ mac802154_dev_set_page_channel(dev, page, channel);
+
+ /* FIXME: add validation for unused parameters to be sane
+ * for SoftMAC
+ */
+ ieee802154_nl_start_confirm(dev, IEEE802154_SUCCESS);
+
+ return 0;
+}
+
+static struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -43,3 +67,10 @@ struct wpan_phy *mac802154_get_phy(const struct net_device *dev)
struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced = {
.get_phy = mac802154_get_phy,
};
+
+struct ieee802154_mlme_ops mac802154_mlme_wpan = {
+ .get_phy = mac802154_get_phy,
+ .start_req = mac802154_mlme_start_req,
+ .get_pan_id = mac802154_dev_get_pan_id,
+ .get_short_addr = mac802154_dev_get_short_addr,
+};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index ab59821ec729..f47781ab0ccc 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -28,13 +28,18 @@
#include "mac802154.h"
+struct phy_chan_notify_work {
+ struct work_struct work;
+ struct net_device *dev;
+};
+
struct hw_addr_filt_notify_work {
struct work_struct work;
struct net_device *dev;
unsigned long changed;
};
-struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
+static struct mac802154_priv *mac802154_slave_get_priv(struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -78,6 +83,37 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
return;
}
+void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->short_addr = val;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if ((priv->hw->ops->set_hw_addr_filt) &&
+ (priv->hw->hw.hw_filt.short_addr != priv->short_addr)) {
+ priv->hw->hw.hw_filt.short_addr = priv->short_addr;
+ set_hw_addr_filt(dev, IEEE802515_AFILT_SADDR_CHANGED);
+ }
+}
+
+u16 mac802154_dev_get_short_addr(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ u16 ret;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ ret = priv->short_addr;
+ spin_unlock_bh(&priv->mib_lock);
+
+ return ret;
+}
+
void mac802154_dev_set_ieee_addr(struct net_device *dev)
{
struct mac802154_sub_if_data *priv = netdev_priv(dev);
@@ -91,3 +127,73 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev)
set_hw_addr_filt(dev, IEEE802515_AFILT_IEEEADDR_CHANGED);
}
}
+
+u16 mac802154_dev_get_pan_id(const struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ u16 ret;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ ret = priv->pan_id;
+ spin_unlock_bh(&priv->mib_lock);
+
+ return ret;
+}
+
+void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->pan_id = val;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if ((priv->hw->ops->set_hw_addr_filt) &&
+ (priv->hw->hw.hw_filt.pan_id != priv->pan_id)) {
+ priv->hw->hw.hw_filt.pan_id = priv->pan_id;
+ set_hw_addr_filt(dev, IEEE802515_AFILT_PANID_CHANGED);
+ }
+}
+
+static void phy_chan_notify(struct work_struct *work)
+{
+ struct phy_chan_notify_work *nw = container_of(work,
+ struct phy_chan_notify_work, work);
+ struct mac802154_priv *hw = mac802154_slave_get_priv(nw->dev);
+ struct mac802154_sub_if_data *priv = netdev_priv(nw->dev);
+ int res;
+
+ res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
+ if (res)
+ pr_debug("set_channel failed\n");
+
+ kfree(nw);
+}
+
+void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct phy_chan_notify_work *work;
+
+ BUG_ON(dev->type != ARPHRD_IEEE802154);
+
+ spin_lock_bh(&priv->mib_lock);
+ priv->page = page;
+ priv->chan = chan;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if (priv->hw->phy->current_channel != priv->chan ||
+ priv->hw->phy->current_page != priv->page) {
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, phy_chan_notify);
+ work->dev = dev;
+ queue_work(priv->hw->dev_workqueue, &work->work);
+ }
+}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 4a7d76d4f8bc..38548ec2098f 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -77,6 +77,7 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
}
mac802154_monitors_rx(priv, skb);
+ mac802154_wpans_rx(priv, skb);
out:
dev_kfree_skb(skb);
return;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 434b6873b352..1a4df39c722e 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -88,6 +88,8 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ mac802154_monitors_rx(mac802154_to_priv(&priv->hw), skb);
+
if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
u16 crc = crc_ccitt(0, skb->data, skb->len);
u8 *data = skb_put(skb, 2);
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
new file mode 100644
index 000000000000..f30f6d4beea1
--- /dev/null
+++ b/net/mac802154/wpan.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2007-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Sergey Lapin <slapin@ossfans.org>
+ * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/if_arp.h>
+
+#include <net/rtnetlink.h>
+#include <linux/nl802154.h>
+#include <net/af_ieee802154.h>
+#include <net/mac802154.h>
+#include <net/ieee802154_netdev.h>
+#include <net/ieee802154.h>
+#include <net/wpan-phy.h>
+
+#include "mac802154.h"
+
+static inline int mac802154_fetch_skb_u8(struct sk_buff *skb, u8 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 1)))
+ return -EINVAL;
+
+ *val = skb->data[0];
+ skb_pull(skb, 1);
+
+ return 0;
+}
+
+static inline int mac802154_fetch_skb_u16(struct sk_buff *skb, u16 *val)
+{
+ if (unlikely(!pskb_may_pull(skb, 2)))
+ return -EINVAL;
+
+ *val = skb->data[0] | (skb->data[1] << 8);
+ skb_pull(skb, 2);
+
+ return 0;
+}
+
+static inline void mac802154_haddr_copy_swap(u8 *dest, const u8 *src)
+{
+ int i;
+ for (i = 0; i < IEEE802154_ADDR_LEN; i++)
+ dest[IEEE802154_ADDR_LEN - i - 1] = src[i];
+}
+
+static int
+mac802154_wpan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ struct sockaddr_ieee802154 *sa =
+ (struct sockaddr_ieee802154 *)&ifr->ifr_addr;
+ int err = -ENOIOCTLCMD;
+
+ spin_lock_bh(&priv->mib_lock);
+
+ switch (cmd) {
+ case SIOCGIFADDR:
+ if (priv->pan_id == IEEE802154_PANID_BROADCAST ||
+ priv->short_addr == IEEE802154_ADDR_BROADCAST) {
+ err = -EADDRNOTAVAIL;
+ break;
+ }
+
+ sa->family = AF_IEEE802154;
+ sa->addr.addr_type = IEEE802154_ADDR_SHORT;
+ sa->addr.pan_id = priv->pan_id;
+ sa->addr.short_addr = priv->short_addr;
+
+ err = 0;
+ break;
+ case SIOCSIFADDR:
+ dev_warn(&dev->dev,
+ "Using DEBUGing ioctl SIOCSIFADDR isn't recommened!\n");
+ if (sa->family != AF_IEEE802154 ||
+ sa->addr.addr_type != IEEE802154_ADDR_SHORT ||
+ sa->addr.pan_id == IEEE802154_PANID_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_BROADCAST ||
+ sa->addr.short_addr == IEEE802154_ADDR_UNDEF) {
+ err = -EINVAL;
+ break;
+ }
+
+ priv->pan_id = sa->addr.pan_id;
+ priv->short_addr = sa->addr.short_addr;
+
+ err = 0;
+ break;
+ }
+
+ spin_unlock_bh(&priv->mib_lock);
+ return err;
+}
+
+static int mac802154_wpan_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* FIXME: validate addr */
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ mac802154_dev_set_ieee_addr(dev);
+ return 0;
+}
+
+static int mac802154_header_create(struct sk_buff *skb,
+ struct net_device *dev,
+ unsigned short type,
+ const void *_daddr,
+ const void *_saddr,
+ unsigned len)
+{
+ const struct ieee802154_addr *saddr = _saddr;
+ const struct ieee802154_addr *daddr = _daddr;
+ struct ieee802154_addr dev_addr;
+ struct mac802154_sub_if_data *priv = netdev_priv(dev);
+ int pos = 2;
+ u8 *head;
+ u16 fc;
+
+ if (!daddr)
+ return -EINVAL;
+
+ head = kzalloc(MAC802154_FRAME_HARD_HEADER_LEN, GFP_KERNEL);
+ if (head == NULL)
+ return -ENOMEM;
+
+ head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
+ fc = mac_cb_type(skb);
+
+ if (!saddr) {
+ spin_lock_bh(&priv->mib_lock);
+
+ if (priv->short_addr == IEEE802154_ADDR_BROADCAST ||
+ priv->short_addr == IEEE802154_ADDR_UNDEF ||
+ priv->pan_id == IEEE802154_PANID_BROADCAST) {
+ dev_addr.addr_type = IEEE802154_ADDR_LONG;
+ memcpy(dev_addr.hwaddr, dev->dev_addr,
+ IEEE802154_ADDR_LEN);
+ } else {
+ dev_addr.addr_type = IEEE802154_ADDR_SHORT;
+ dev_addr.short_addr = priv->short_addr;
+ }
+
+ dev_addr.pan_id = priv->pan_id;
+ saddr = &dev_addr;
+
+ spin_unlock_bh(&priv->mib_lock);
+ }
+
+ if (daddr->addr_type != IEEE802154_ADDR_NONE) {
+ fc |= (daddr->addr_type << IEEE802154_FC_DAMODE_SHIFT);
+
+ head[pos++] = daddr->pan_id & 0xff;
+ head[pos++] = daddr->pan_id >> 8;
+
+ if (daddr->addr_type == IEEE802154_ADDR_SHORT) {
+ head[pos++] = daddr->short_addr & 0xff;
+ head[pos++] = daddr->short_addr >> 8;
+ } else {
+ mac802154_haddr_copy_swap(head + pos, daddr->hwaddr);
+ pos += IEEE802154_ADDR_LEN;
+ }
+ }
+
+ if (saddr->addr_type != IEEE802154_ADDR_NONE) {
+ fc |= (saddr->addr_type << IEEE802154_FC_SAMODE_SHIFT);
+
+ if ((saddr->pan_id == daddr->pan_id) &&
+ (saddr->pan_id != IEEE802154_PANID_BROADCAST)) {
+ /* PANID compression/intra PAN */
+ fc |= IEEE802154_FC_INTRA_PAN;
+ } else {
+ head[pos++] = saddr->pan_id & 0xff;
+ head[pos++] = saddr->pan_id >> 8;
+ }
+
+ if (saddr->addr_type == IEEE802154_ADDR_SHORT) {
+ head[pos++] = saddr->short_addr & 0xff;
+ head[pos++] = saddr->short_addr >> 8;
+ } else {
+ mac802154_haddr_copy_swap(head + pos, saddr->hwaddr);
+ pos += IEEE802154_ADDR_LEN;
+ }
+ }
+
+ head[0] = fc;
+ head[1] = fc >> 8;
+
+ memcpy(skb_push(skb, pos), head, pos);
+ kfree(head);
+
+ return pos;
+}
+
+static int
+mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ const u8 *hdr = skb_mac_header(skb);
+ const u8 *tail = skb_tail_pointer(skb);
+ struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr;
+ u16 fc;
+ int da_type;
+
+ if (hdr + 3 > tail)
+ goto malformed;
+
+ fc = hdr[0] | (hdr[1] << 8);
+
+ hdr += 3;
+
+ da_type = IEEE802154_FC_DAMODE(fc);
+ addr->addr_type = IEEE802154_FC_SAMODE(fc);
+
+ switch (da_type) {
+ case IEEE802154_ADDR_NONE:
+ if (fc & IEEE802154_FC_INTRA_PAN)
+ goto malformed;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (fc & IEEE802154_FC_INTRA_PAN) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + IEEE802154_ADDR_LEN > tail)
+ goto malformed;
+
+ hdr += IEEE802154_ADDR_LEN;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (fc & IEEE802154_FC_INTRA_PAN) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + 2 > tail)
+ goto malformed;
+
+ hdr += 2;
+ break;
+ default:
+ goto malformed;
+
+ }
+
+ switch (addr->addr_type) {
+ case IEEE802154_ADDR_NONE:
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (!(fc & IEEE802154_FC_INTRA_PAN)) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + IEEE802154_ADDR_LEN > tail)
+ goto malformed;
+
+ mac802154_haddr_copy_swap(addr->hwaddr, hdr);
+ hdr += IEEE802154_ADDR_LEN;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (!(fc & IEEE802154_FC_INTRA_PAN)) {
+ if (hdr + 2 > tail)
+ goto malformed;
+ addr->pan_id = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ }
+
+ if (hdr + 2 > tail)
+ goto malformed;
+
+ addr->short_addr = hdr[0] | (hdr[1] << 8);
+ hdr += 2;
+ break;
+ default:
+ goto malformed;
+ }
+
+ return sizeof(struct ieee802154_addr);
+
+malformed:
+ pr_debug("malformed packet\n");
+ return 0;
+}
+
+static netdev_tx_t
+mac802154_wpan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+ u8 chan, page;
+
+ priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->mib_lock);
+ chan = priv->chan;
+ page = priv->page;
+ spin_unlock_bh(&priv->mib_lock);
+
+ if (chan == MAC802154_CHAN_NONE ||
+ page >= WPAN_NUM_PAGES ||
+ chan >= WPAN_NUM_CHANNELS)
+ return NETDEV_TX_OK;
+
+ skb->skb_iif = dev->ifindex;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ return mac802154_tx(priv->hw, skb, page, chan);
+}
+
+static struct header_ops mac802154_header_ops = {
+ .create = mac802154_header_create,
+ .parse = mac802154_header_parse,
+};
+
+static const struct net_device_ops mac802154_wpan_ops = {
+ .ndo_open = mac802154_slave_open,
+ .ndo_stop = mac802154_slave_close,
+ .ndo_start_xmit = mac802154_wpan_xmit,
+ .ndo_do_ioctl = mac802154_wpan_ioctl,
+ .ndo_set_mac_address = mac802154_wpan_mac_addr,
+};
+
+void mac802154_wpan_setup(struct net_device *dev)
+{
+ struct mac802154_sub_if_data *priv;
+
+ dev->addr_len = IEEE802154_ADDR_LEN;
+ memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
+
+ dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN;
+ dev->header_ops = &mac802154_header_ops;
+ dev->needed_tailroom = 2; /* FCS */
+ dev->mtu = IEEE802154_MTU;
+ dev->tx_queue_len = 10;
+ dev->type = ARPHRD_IEEE802154;
+ dev->flags = IFF_NOARP | IFF_BROADCAST;
+ dev->watchdog_timeo = 0;
+
+ dev->destructor = free_netdev;
+ dev->netdev_ops = &mac802154_wpan_ops;
+ dev->ml_priv = &mac802154_mlme_wpan;
+
+ priv = netdev_priv(dev);
+ priv->type = IEEE802154_DEV_WPAN;
+
+ priv->chan = MAC802154_CHAN_NONE;
+ priv->page = 0;
+
+ spin_lock_init(&priv->mib_lock);
+
+ get_random_bytes(&priv->bsn, 1);
+ get_random_bytes(&priv->dsn, 1);
+
+ priv->pan_id = IEEE802154_PANID_BROADCAST;
+ priv->short_addr = IEEE802154_ADDR_BROADCAST;
+}
+
+static int mac802154_process_data(struct net_device *dev, struct sk_buff *skb)
+{
+ return netif_rx(skb);
+}
+
+static int
+mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb)
+{
+ pr_debug("getting packet via slave interface %s\n", sdata->dev->name);
+
+ spin_lock_bh(&sdata->mib_lock);
+
+ switch (mac_cb(skb)->da.addr_type) {
+ case IEEE802154_ADDR_NONE:
+ if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE)
+ /* FIXME: check if we are PAN coordinator */
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ /* ACK comes with both addresses empty */
+ skb->pkt_type = PACKET_HOST;
+ break;
+ case IEEE802154_ADDR_LONG:
+ if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
+ mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (!memcmp(mac_cb(skb)->da.hwaddr, sdata->dev->dev_addr,
+ IEEE802154_ADDR_LEN))
+ skb->pkt_type = PACKET_HOST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (mac_cb(skb)->da.pan_id != sdata->pan_id &&
+ mac_cb(skb)->da.pan_id != IEEE802154_PANID_BROADCAST)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else if (mac_cb(skb)->da.short_addr == sdata->short_addr)
+ skb->pkt_type = PACKET_HOST;
+ else if (mac_cb(skb)->da.short_addr ==
+ IEEE802154_ADDR_BROADCAST)
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_OTHERHOST;
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&sdata->mib_lock);
+
+ skb->dev = sdata->dev;
+
+ sdata->dev->stats.rx_packets++;
+ sdata->dev->stats.rx_bytes += skb->len;
+
+ switch (mac_cb_type(skb)) {
+ case IEEE802154_FC_TYPE_DATA:
+ return mac802154_process_data(sdata->dev, skb);
+ default:
+ pr_warning("ieee802154: bad frame received (type = %d)\n",
+ mac_cb_type(skb));
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+}
+
+static int mac802154_parse_frame_start(struct sk_buff *skb)
+{
+ u8 *head = skb->data;
+ u16 fc;
+
+ if (mac802154_fetch_skb_u16(skb, &fc) ||
+ mac802154_fetch_skb_u8(skb, &(mac_cb(skb)->seq)))
+ goto err;
+
+ pr_debug("fc: %04x dsn: %02x\n", fc, head[2]);
+
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE(fc);
+ mac_cb(skb)->sa.addr_type = IEEE802154_FC_SAMODE(fc);
+ mac_cb(skb)->da.addr_type = IEEE802154_FC_DAMODE(fc);
+
+ if (fc & IEEE802154_FC_INTRA_PAN)
+ mac_cb(skb)->flags |= MAC_CB_FLAG_INTRAPAN;
+
+ if (mac_cb(skb)->da.addr_type != IEEE802154_ADDR_NONE) {
+ if (mac802154_fetch_skb_u16(skb, &(mac_cb(skb)->da.pan_id)))
+ goto err;
+
+ /* source PAN id compression */
+ if (mac_cb_is_intrapan(skb))
+ mac_cb(skb)->sa.pan_id = mac_cb(skb)->da.pan_id;
+
+ pr_debug("dest PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
+
+ if (mac_cb(skb)->da.addr_type == IEEE802154_ADDR_SHORT) {
+ u16 *da = &(mac_cb(skb)->da.short_addr);
+
+ if (mac802154_fetch_skb_u16(skb, da))
+ goto err;
+
+ pr_debug("destination address is short: %04x\n",
+ mac_cb(skb)->da.short_addr);
+ } else {
+ if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
+ goto err;
+
+ mac802154_haddr_copy_swap(mac_cb(skb)->da.hwaddr,
+ skb->data);
+ skb_pull(skb, IEEE802154_ADDR_LEN);
+
+ pr_debug("destination address is hardware\n");
+ }
+ }
+
+ if (mac_cb(skb)->sa.addr_type != IEEE802154_ADDR_NONE) {
+ /* non PAN-compression, fetch source address id */
+ if (!(mac_cb_is_intrapan(skb))) {
+ u16 *sa_pan = &(mac_cb(skb)->sa.pan_id);
+
+ if (mac802154_fetch_skb_u16(skb, sa_pan))
+ goto err;
+ }
+
+ pr_debug("source PAN addr: %04x\n", mac_cb(skb)->da.pan_id);
+
+ if (mac_cb(skb)->sa.addr_type == IEEE802154_ADDR_SHORT) {
+ u16 *sa = &(mac_cb(skb)->sa.short_addr);
+
+ if (mac802154_fetch_skb_u16(skb, sa))
+ goto err;
+
+ pr_debug("source address is short: %04x\n",
+ mac_cb(skb)->sa.short_addr);
+ } else {
+ if (!pskb_may_pull(skb, IEEE802154_ADDR_LEN))
+ goto err;
+
+ mac802154_haddr_copy_swap(mac_cb(skb)->sa.hwaddr,
+ skb->data);
+ skb_pull(skb, IEEE802154_ADDR_LEN);
+
+ pr_debug("source address is hardware\n");
+ }
+ }
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb)
+{
+ int ret;
+ struct sk_buff *sskb;
+ struct mac802154_sub_if_data *sdata;
+
+ ret = mac802154_parse_frame_start(skb);
+ if (ret) {
+ pr_debug("got invalid frame\n");
+ return;
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &priv->slaves, list) {
+ if (sdata->type != IEEE802154_DEV_WPAN)
+ continue;
+
+ sskb = skb_clone(skb, GFP_ATOMIC);
+ if (sskb)
+ mac802154_subif_frame(sdata, sskb);
+ }
+ rcu_read_unlock();
+}
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 209c1ed43368..c19b214ffd57 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -335,6 +335,27 @@ config NF_CT_NETLINK_TIMEOUT
If unsure, say `N'.
+config NF_CT_NETLINK_HELPER
+ tristate 'Connection tracking helpers in user-space via Netlink'
+ select NETFILTER_NETLINK
+ depends on NF_CT_NETLINK
+ depends on NETFILTER_NETLINK_QUEUE
+ depends on NETFILTER_NETLINK_QUEUE_CT
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables the user-space connection tracking helpers
+ infrastructure.
+
+ If unsure, say `N'.
+
+config NETFILTER_NETLINK_QUEUE_CT
+ bool "NFQUEUE integration with Connection Tracking"
+ default n
+ depends on NETFILTER_NETLINK_QUEUE
+ help
+ If this option is enabled, NFQUEUE can include Connection Tracking
+ information together with the packet is the enqueued via NFNETLINK.
+
endif # NF_CONNTRACK
# transparent proxy support
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 4e7960cc7b97..1c5160f2278e 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -9,6 +9,8 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
obj-$(CONFIG_NETFILTER_NETLINK_ACCT) += nfnetlink_acct.o
+nfnetlink_queue-y := nfnetlink_queue_core.o
+nfnetlink_queue-$(CONFIG_NETFILTER_NETLINK_QUEUE_CT) += nfnetlink_queue_ct.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
@@ -24,6 +26,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
+obj-$(CONFIG_NF_CT_NETLINK_HELPER) += nfnetlink_cthelper.o
# connection tracking helpers
nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e19f3653db23..0bc6b60db4df 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -264,6 +264,13 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct)
rcu_read_unlock();
}
EXPORT_SYMBOL(nf_conntrack_destroy);
+
+struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfq_ct_hook);
+
+struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook __read_mostly;
+EXPORT_SYMBOL_GPL(nfq_ct_nat_hook);
+
#endif /* CONFIG_NF_CONNTRACK */
#ifdef CONFIG_PROC_FS
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 819c342f5b30..9730882697aa 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -640,6 +640,14 @@ find_free_id(const char *name, ip_set_id_t *index, struct ip_set **set)
}
static int
+ip_set_none(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const attr[])
+{
+ return -EOPNOTSUPP;
+}
+
+static int
ip_set_create(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const attr[])
@@ -1539,6 +1547,10 @@ nlmsg_failure:
}
static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
+ [IPSET_CMD_NONE] = {
+ .call = ip_set_none,
+ .attr_count = IPSET_ATTR_CMD_MAX,
+ },
[IPSET_CMD_CREATE] = {
.call = ip_set_create,
.attr_count = IPSET_ATTR_CMD_MAX,
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index ee863943c826..d5d3607ae7bc 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -38,30 +38,6 @@ struct iface_node {
#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
-static inline long
-ifname_compare(const char *_a, const char *_b)
-{
- const long *a = (const long *)_a;
- const long *b = (const long *)_b;
-
- BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
- if (a[0] != b[0])
- return a[0] - b[0];
- if (IFNAMSIZ > sizeof(long)) {
- if (a[1] != b[1])
- return a[1] - b[1];
- }
- if (IFNAMSIZ > 2 * sizeof(long)) {
- if (a[2] != b[2])
- return a[2] - b[2];
- }
- if (IFNAMSIZ > 3 * sizeof(long)) {
- if (a[3] != b[3])
- return a[3] - b[3];
- }
- return 0;
-}
-
static void
rbtree_destroy(struct rb_root *root)
{
@@ -99,7 +75,7 @@ iface_test(struct rb_root *root, const char **iface)
while (n) {
const char *d = iface_data(n);
- long res = ifname_compare(*iface, d);
+ int res = strcmp(*iface, d);
if (res < 0)
n = n->rb_left;
@@ -121,7 +97,7 @@ iface_add(struct rb_root *root, const char **iface)
while (*n) {
char *ifname = iface_data(*n);
- long res = ifname_compare(*iface, ifname);
+ int res = strcmp(*iface, ifname);
p = *n;
if (res < 0)
@@ -366,7 +342,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
struct hash_netiface4_elem data = { .cidr = HOST_MASK };
u32 ip = 0, ip_to, last;
u32 timeout = h->timeout;
- char iface[IFNAMSIZ] = {};
+ char iface[IFNAMSIZ];
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -663,7 +639,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netiface6_elem data = { .cidr = HOST_MASK };
u32 timeout = h->timeout;
- char iface[IFNAMSIZ] = {};
+ char iface[IFNAMSIZ];
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a54b018c6eea..b54eccef40b5 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1742,7 +1742,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 2,
},
@@ -1752,7 +1752,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_remote_request4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC - 1,
},
@@ -1760,7 +1760,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 1,
},
@@ -1768,7 +1768,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_request4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST + 2,
},
@@ -1777,7 +1777,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_forward_icmp,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
@@ -1785,7 +1785,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply4,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
@@ -1794,7 +1794,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 2,
},
@@ -1804,7 +1804,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_remote_request6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_NAT_SRC - 1,
},
@@ -1812,7 +1812,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 1,
},
@@ -1820,7 +1820,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_local_request6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_NAT_DST + 2,
},
@@ -1829,7 +1829,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_forward_icmp_v6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 99,
},
@@ -1837,7 +1837,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
{
.hook = ip_vs_reply6,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = 100,
},
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index dd811b8dd97c..84444dda194b 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -76,19 +76,19 @@ static void __ip_vs_del_service(struct ip_vs_service *svc);
#ifdef CONFIG_IP_VS_IPV6
/* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
-static int __ip_vs_addr_is_local_v6(struct net *net,
- const struct in6_addr *addr)
+static bool __ip_vs_addr_is_local_v6(struct net *net,
+ const struct in6_addr *addr)
{
- struct rt6_info *rt;
struct flowi6 fl6 = {
.daddr = *addr,
};
+ struct dst_entry *dst = ip6_route_output(net, NULL, &fl6);
+ bool is_local;
- rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
- if (rt && rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
- return 1;
+ is_local = !dst->error && dst->dev && (dst->dev->flags & IFF_LOOPBACK);
- return 0;
+ dst_release(dst);
+ return is_local;
}
#endif
@@ -1521,11 +1521,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
{
struct net_device *dev = ptr;
struct net *net = dev_net(dev);
+ struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_service *svc;
struct ip_vs_dest *dest;
unsigned int idx;
- if (event != NETDEV_UNREGISTER)
+ if (event != NETDEV_UNREGISTER || !ipvs)
return NOTIFY_DONE;
IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
EnterFunction(2);
@@ -1551,7 +1552,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
}
}
- list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+ list_for_each_entry(dest, &ipvs->dest_trash, n_list) {
__ip_vs_dev_reset(dest, dev);
}
mutex_unlock(&__ip_vs_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 7fd66dec859d..65b616ae1716 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -797,7 +797,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_put;
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
df |= (old_iph->frag_off & htons(IP_DF));
@@ -823,7 +823,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = new_skb;
old_iph = ip_hdr(skb);
}
@@ -913,7 +913,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error_put;
}
if (skb_dst(skb))
- skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
+ skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
!skb_is_gso(skb)) {
@@ -942,7 +942,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
- kfree_skb(skb);
+ consume_skb(skb);
skb = new_skb;
old_iph = ipv6_hdr(skb);
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index ac3af97cc468..cf4875565d67 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -531,7 +531,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
tstamp = nf_conn_tstamp_find(ct);
if (tstamp) {
if (skb->tstamp.tv64 == 0)
- __net_timestamp((struct sk_buff *)skb);
+ __net_timestamp(skb);
tstamp->start = ktime_to_ns(skb->tstamp);
}
@@ -819,7 +819,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
__set_bit(IPS_EXPECTED_BIT, &ct->status);
ct->master = exp->master;
if (exp->helper) {
- help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, exp->helper,
+ GFP_ATOMIC);
if (help)
rcu_assign_pointer(help->helper, exp->helper);
}
@@ -1333,7 +1334,6 @@ static void nf_conntrack_cleanup_init_net(void)
while (untrack_refs() > 0)
schedule();
- nf_conntrack_proto_fini();
#ifdef CONFIG_NF_CONNTRACK_ZONES
nf_ct_extend_unregister(&nf_ct_zone_extend);
#endif
@@ -1372,7 +1372,7 @@ void nf_conntrack_cleanup(struct net *net)
netfilter framework. Roll on, two-stage module
delete... */
synchronize_net();
-
+ nf_conntrack_proto_fini(net);
nf_conntrack_cleanup_net(net);
if (net_eq(net, &init_net)) {
@@ -1496,11 +1496,6 @@ static int nf_conntrack_init_init_net(void)
printk(KERN_INFO "nf_conntrack version %s (%u buckets, %d max)\n",
NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
nf_conntrack_max);
-
- ret = nf_conntrack_proto_init();
- if (ret < 0)
- goto err_proto;
-
#ifdef CONFIG_NF_CONNTRACK_ZONES
ret = nf_ct_extend_register(&nf_ct_zone_extend);
if (ret < 0)
@@ -1518,9 +1513,7 @@ static int nf_conntrack_init_init_net(void)
#ifdef CONFIG_NF_CONNTRACK_ZONES
err_extend:
- nf_conntrack_proto_fini();
#endif
-err_proto:
return ret;
}
@@ -1583,9 +1576,7 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_helper_init(net);
if (ret < 0)
goto err_helper;
-
return 0;
-
err_helper:
nf_conntrack_timeout_fini(net);
err_timeout:
@@ -1622,6 +1613,9 @@ int nf_conntrack_init(struct net *net)
if (ret < 0)
goto out_init_net;
}
+ ret = nf_conntrack_proto_init(net);
+ if (ret < 0)
+ goto out_proto;
ret = nf_conntrack_init_net(net);
if (ret < 0)
goto out_net;
@@ -1637,6 +1631,8 @@ int nf_conntrack_init(struct net *net)
return 0;
out_net:
+ nf_conntrack_proto_fini(net);
+out_proto:
if (net_eq(net, &init_net))
nf_conntrack_cleanup_init_net();
out_init_net:
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 641ff5f96718..1a9545965c0d 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -44,7 +44,8 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
EXPORT_SYMBOL(__nf_ct_ext_destroy);
static void *
-nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
+nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp)
{
unsigned int off, len;
struct nf_ct_ext_type *t;
@@ -54,8 +55,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
- len = off + t->len;
- alloc_size = t->alloc_size;
+ len = off + t->len + var_alloc_len;
+ alloc_size = t->alloc_size + var_alloc_len;
rcu_read_unlock();
*ext = kzalloc(alloc_size, gfp);
@@ -68,7 +69,8 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
return (void *)(*ext) + off;
}
-void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
+void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
+ size_t var_alloc_len, gfp_t gfp)
{
struct nf_ct_ext *old, *new;
int i, newlen, newoff;
@@ -79,7 +81,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
old = ct->ext;
if (!old)
- return nf_ct_ext_create(&ct->ext, id, gfp);
+ return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
if (__nf_ct_ext_exist(old, id))
return NULL;
@@ -89,7 +91,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
BUG_ON(t == NULL);
newoff = ALIGN(old->len, t->align);
- newlen = newoff + t->len;
+ newlen = newoff + t->len + var_alloc_len;
rcu_read_unlock();
new = __krealloc(old, newlen, gfp);
@@ -117,7 +119,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
memset((void *)new + newoff, 0, newlen - newoff);
return (void *)new + newoff;
}
-EXPORT_SYMBOL(__nf_ct_ext_add);
+EXPORT_SYMBOL(__nf_ct_ext_add_length);
static void update_alloc_size(struct nf_ct_ext_type *type)
{
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 8c5c95c6d34f..4bb771d1f57a 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -358,7 +358,7 @@ static int help(struct sk_buff *skb,
u32 seq;
int dir = CTINFO2DIR(ctinfo);
unsigned int uninitialized_var(matchlen), uninitialized_var(matchoff);
- struct nf_ct_ftp_master *ct_ftp_info = &nfct_help(ct)->help.ct_ftp_info;
+ struct nf_ct_ftp_master *ct_ftp_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
union nf_inet_addr *daddr;
struct nf_conntrack_man cmd = {};
@@ -512,7 +512,6 @@ out_update_nl:
}
static struct nf_conntrack_helper ftp[MAX_PORTS][2] __read_mostly;
-static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy ftp_exp_policy = {
.max_expected = 1,
@@ -541,7 +540,6 @@ static void nf_conntrack_ftp_fini(void)
static int __init nf_conntrack_ftp_init(void)
{
int i, j = -1, ret = 0;
- char *tmpname;
ftp_buffer = kmalloc(65536, GFP_KERNEL);
if (!ftp_buffer)
@@ -556,17 +554,16 @@ static int __init nf_conntrack_ftp_init(void)
ftp[i][0].tuple.src.l3num = PF_INET;
ftp[i][1].tuple.src.l3num = PF_INET6;
for (j = 0; j < 2; j++) {
+ ftp[i][j].data_len = sizeof(struct nf_ct_ftp_master);
ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
ftp[i][j].expect_policy = &ftp_exp_policy;
ftp[i][j].me = THIS_MODULE;
ftp[i][j].help = help;
- tmpname = &ftp_names[i][j][0];
if (ports[i] == FTP_PORT)
- sprintf(tmpname, "ftp");
+ sprintf(ftp[i][j].name, "ftp");
else
- sprintf(tmpname, "ftp-%d", ports[i]);
- ftp[i][j].name = tmpname;
+ sprintf(ftp[i][j].name, "ftp-%d", ports[i]);
pr_debug("nf_ct_ftp: registering helper for pf: %d "
"port: %d\n",
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 31f50bc3a312..4283b207e63b 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -114,7 +114,7 @@ static int get_tpkt_data(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned char **data, int *datalen, int *dataoff)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
const struct tcphdr *th;
struct tcphdr _tcph;
@@ -617,6 +617,7 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = {
static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = {
.name = "H.245",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_UNSPEC,
.tuple.dst.protonum = IPPROTO_UDP,
.help = h245_help,
@@ -1169,6 +1170,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_q931[] __read_mostly = {
{
.name = "Q.931",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(Q931_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
@@ -1244,7 +1246,7 @@ static int expect_q931(struct sk_buff *skb, struct nf_conn *ct,
unsigned char **data,
TransportAddress *taddr, int count)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret = 0;
int i;
@@ -1359,7 +1361,7 @@ static int process_rrq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationRequest *rrq)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int ret;
typeof(set_ras_addr_hook) set_ras_addr;
@@ -1394,7 +1396,7 @@ static int process_rcf(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, RegistrationConfirm *rcf)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret;
struct nf_conntrack_expect *exp;
@@ -1443,7 +1445,7 @@ static int process_urq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, UnregistrationRequest *urq)
{
- struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
int ret;
typeof(set_sig_addr_hook) set_sig_addr;
@@ -1475,7 +1477,7 @@ static int process_arq(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned char **data, AdmissionRequest *arq)
{
- const struct nf_ct_h323_master *info = &nfct_help(ct)->help.ct_h323_info;
+ const struct nf_ct_h323_master *info = nfct_help_data(ct);
int dir = CTINFO2DIR(ctinfo);
__be16 port;
union nf_inet_addr addr;
@@ -1742,6 +1744,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
@@ -1751,6 +1754,7 @@ static struct nf_conntrack_helper nf_conntrack_helper_ras[] __read_mostly = {
{
.name = "RAS",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_h323_master),
.tuple.src.l3num = AF_INET6,
.tuple.src.u.udp.port = cpu_to_be16(RAS_PORT),
.tuple.dst.protonum = IPPROTO_UDP,
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 4fa2ff961f5a..c4bc637feb76 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -30,8 +30,10 @@
#include <net/netfilter/nf_conntrack_extend.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
-static struct hlist_head *nf_ct_helper_hash __read_mostly;
-static unsigned int nf_ct_helper_hsize __read_mostly;
+struct hlist_head *nf_ct_helper_hash __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_helper_hash);
+unsigned int nf_ct_helper_hsize __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
static unsigned int nf_ct_helper_count __read_mostly;
static bool nf_ct_auto_assign_helper __read_mostly = true;
@@ -161,11 +163,14 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
-struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
+struct nf_conn_help *
+nf_ct_helper_ext_add(struct nf_conn *ct,
+ struct nf_conntrack_helper *helper, gfp_t gfp)
{
struct nf_conn_help *help;
- help = nf_ct_ext_add(ct, NF_CT_EXT_HELPER, gfp);
+ help = nf_ct_ext_add_length(ct, NF_CT_EXT_HELPER,
+ helper->data_len, gfp);
if (help)
INIT_HLIST_HEAD(&help->expectations);
else
@@ -218,13 +223,19 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
}
if (help == NULL) {
- help = nf_ct_helper_ext_add(ct, flags);
+ help = nf_ct_helper_ext_add(ct, helper, flags);
if (help == NULL) {
ret = -ENOMEM;
goto out;
}
} else {
- memset(&help->help, 0, sizeof(help->help));
+ /* We only allow helper re-assignment of the same sort since
+ * we cannot reallocate the helper extension area.
+ */
+ if (help->helper != helper) {
+ RCU_INIT_POINTER(help->helper, NULL);
+ goto out;
+ }
}
rcu_assign_pointer(help->helper, helper);
@@ -319,6 +330,9 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
+ int ret = 0;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n;
unsigned int h = helper_hash(&me->tuple);
BUG_ON(me->expect_policy == NULL);
@@ -326,11 +340,19 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
mutex_lock(&nf_ct_helper_mutex);
+ hlist_for_each_entry(cur, n, &nf_ct_helper_hash[h], hnode) {
+ if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 &&
+ cur->tuple.src.l3num == me->tuple.src.l3num &&
+ cur->tuple.dst.protonum == me->tuple.dst.protonum) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
nf_ct_helper_count++;
+out:
mutex_unlock(&nf_ct_helper_mutex);
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_register);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 81366c118271..009c52cfd1ec 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -221,7 +221,6 @@ static int help(struct sk_buff *skb, unsigned int protoff,
}
static struct nf_conntrack_helper irc[MAX_PORTS] __read_mostly;
-static char irc_names[MAX_PORTS][sizeof("irc-65535")] __read_mostly;
static struct nf_conntrack_expect_policy irc_exp_policy;
static void nf_conntrack_irc_fini(void);
@@ -229,7 +228,6 @@ static void nf_conntrack_irc_fini(void);
static int __init nf_conntrack_irc_init(void)
{
int i, ret;
- char *tmpname;
if (max_dcc_channels < 1) {
printk(KERN_ERR "nf_ct_irc: max_dcc_channels must not be zero\n");
@@ -255,12 +253,10 @@ static int __init nf_conntrack_irc_init(void)
irc[i].me = THIS_MODULE;
irc[i].help = help;
- tmpname = &irc_names[i][0];
if (ports[i] == IRC_PORT)
- sprintf(tmpname, "irc");
+ sprintf(irc[i].name, "irc");
else
- sprintf(tmpname, "irc-%u", i);
- irc[i].name = tmpname;
+ sprintf(irc[i].name, "irc-%u", i);
ret = nf_conntrack_helper_register(&irc[i]);
if (ret) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6f4b00a8fc73..14f67a2cbcb5 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -4,7 +4,7 @@
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2012 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -46,6 +46,7 @@
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_protocol.h>
+#include <net/netfilter/nf_nat_helper.h>
#endif
#include <linux/netfilter/nfnetlink.h>
@@ -477,7 +478,6 @@ nla_put_failure:
return -1;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
static inline size_t
ctnetlink_proto_size(const struct nf_conn *ct)
{
@@ -564,6 +564,7 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
;
}
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
{
@@ -901,7 +902,8 @@ static const struct nla_policy help_nla_policy[CTA_HELP_MAX+1] = {
};
static inline int
-ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
+ctnetlink_parse_help(const struct nlattr *attr, char **helper_name,
+ struct nlattr **helpinfo)
{
struct nlattr *tb[CTA_HELP_MAX+1];
@@ -912,6 +914,9 @@ ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
*helper_name = nla_data(tb[CTA_HELP_NAME]);
+ if (tb[CTA_HELP_INFO])
+ *helpinfo = tb[CTA_HELP_INFO];
+
return 0;
}
@@ -1172,13 +1177,14 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
char *helpname = NULL;
+ struct nlattr *helpinfo = NULL;
int err;
/* don't change helper of sibling connections */
if (ct->master)
return -EBUSY;
- err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
+ err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
return err;
@@ -1213,20 +1219,17 @@ ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
}
if (help) {
- if (help->helper == helper)
+ if (help->helper == helper) {
+ /* update private helper data if allowed. */
+ if (helper->from_nlattr && helpinfo)
+ helper->from_nlattr(helpinfo, ct);
return 0;
- if (help->helper)
+ } else
return -EBUSY;
- /* need to zero data of old helper */
- memset(&help->help, 0, sizeof(help->help));
- } else {
- /* we cannot set a helper for an existing conntrack */
- return -EOPNOTSUPP;
}
- rcu_assign_pointer(help->helper, helper);
-
- return 0;
+ /* we cannot set a helper for an existing conntrack */
+ return -EOPNOTSUPP;
}
static inline int
@@ -1410,8 +1413,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname = NULL;
-
- err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
+ struct nlattr *helpinfo = NULL;
+
+ err = ctnetlink_parse_help(cda[CTA_HELP], &helpname, &helpinfo);
if (err < 0)
goto err2;
@@ -1440,11 +1444,14 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
} else {
struct nf_conn_help *help;
- help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ help = nf_ct_helper_ext_add(ct, helper, GFP_ATOMIC);
if (help == NULL) {
err = -ENOMEM;
goto err2;
}
+ /* set private helper data if allowed. */
+ if (helper->from_nlattr && helpinfo)
+ helper->from_nlattr(helpinfo, ct);
/* not in hash table yet so not strictly necessary */
RCU_INIT_POINTER(help->helper, helper);
@@ -1620,6 +1627,288 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
return err;
}
+static int
+ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
+ __u16 cpu, const struct ip_conntrack_stat *st)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS_CPU);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(cpu);
+
+ if (nla_put_be32(skb, CTA_STATS_SEARCHED, htonl(st->searched)) ||
+ nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
+ nla_put_be32(skb, CTA_STATS_NEW, htonl(st->new)) ||
+ nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
+ nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
+ nla_put_be32(skb, CTA_STATS_DELETE, htonl(st->delete)) ||
+ nla_put_be32(skb, CTA_STATS_DELETE_LIST, htonl(st->delete_list)) ||
+ nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
+ nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
+ htonl(st->insert_failed)) ||
+ nla_put_be32(skb, CTA_STATS_DROP, htonl(st->drop)) ||
+ nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
+ nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
+ nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
+ htonl(st->search_restart)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_ct_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int cpu;
+ struct net *net = sock_net(skb->sk);
+
+ if (cb->args[0] == nr_cpu_ids)
+ return 0;
+
+ for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
+ const struct ip_conntrack_stat *st;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ st = per_cpu_ptr(net->ct.stat, cpu);
+ if (ctnetlink_ct_stat_cpu_fill_info(skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ cpu, st) < 0)
+ break;
+ }
+ cb->args[0] = cpu;
+
+ return skb->len;
+}
+
+static int
+ctnetlink_stat_ct_cpu(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_ct_stat_cpu_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ return 0;
+}
+
+static int
+ctnetlink_stat_ct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ struct net *net)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+ unsigned int nr_conntracks = atomic_read(&net->ct.count);
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_CT_GET_STATS);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_be32(skb, CTA_STATS_GLOBAL_ENTRIES, htonl(nr_conntracks)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_stat_ct(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ struct sk_buff *skb2;
+ int err;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL)
+ return -ENOMEM;
+
+ err = ctnetlink_stat_ct_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ sock_net(skb->sk));
+ if (err <= 0)
+ goto free;
+
+ err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+ if (err < 0)
+ goto out;
+
+ return 0;
+
+free:
+ kfree_skb(skb2);
+out:
+ /* this avoids a loop in nfnetlink. */
+ return err == -EAGAIN ? -ENOBUFS : err;
+}
+
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+static size_t
+ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
+{
+ return 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
+ + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
+ + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
+ + nla_total_size(0) /* CTA_PROTOINFO */
+ + nla_total_size(0) /* CTA_HELP */
+ + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
+ + ctnetlink_secctx_size(ct)
+#ifdef CONFIG_NF_NAT_NEEDED
+ + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
+ + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+ + ctnetlink_proto_size(ct)
+ ;
+}
+
+static int
+ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
+{
+ struct nlattr *nest_parms;
+
+ rcu_read_lock();
+ nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+ if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+
+ nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+ if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+
+ if (nf_ct_zone(ct)) {
+ if (nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
+ goto nla_put_failure;
+ }
+
+ if (ctnetlink_dump_id(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_status(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_timeout(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_protoinfo(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_helpinfo(skb, ct) < 0)
+ goto nla_put_failure;
+
+#ifdef CONFIG_NF_CONNTRACK_SECMARK
+ if (ct->secmark && ctnetlink_dump_secctx(skb, ct) < 0)
+ goto nla_put_failure;
+#endif
+ if (ct->master && ctnetlink_dump_master(skb, ct) < 0)
+ goto nla_put_failure;
+
+ if ((ct->status & IPS_SEQ_ADJUST) &&
+ ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ goto nla_put_failure;
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (ct->mark && ctnetlink_dump_mark(skb, ct) < 0)
+ goto nla_put_failure;
+#endif
+ rcu_read_unlock();
+ return 0;
+
+nla_put_failure:
+ rcu_read_unlock();
+ return -ENOSPC;
+}
+
+static int
+ctnetlink_nfqueue_parse_ct(const struct nlattr *cda[], struct nf_conn *ct)
+{
+ int err;
+
+ if (cda[CTA_TIMEOUT]) {
+ err = ctnetlink_change_timeout(ct, cda);
+ if (err < 0)
+ return err;
+ }
+ if (cda[CTA_STATUS]) {
+ err = ctnetlink_change_status(ct, cda);
+ if (err < 0)
+ return err;
+ }
+ if (cda[CTA_HELP]) {
+ err = ctnetlink_change_helper(ct, cda);
+ if (err < 0)
+ return err;
+ }
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ if (cda[CTA_MARK])
+ ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
+#endif
+ return 0;
+}
+
+static int
+ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
+{
+ struct nlattr *cda[CTA_MAX+1];
+
+ nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy);
+
+ return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct);
+}
+
+static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
+ .build_size = ctnetlink_nfqueue_build_size,
+ .build = ctnetlink_nfqueue_build,
+ .parse = ctnetlink_nfqueue_parse,
+};
+#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
+
/***********************************************************************
* EXPECT
***********************************************************************/
@@ -2300,6 +2589,79 @@ ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
return err;
}
+static int
+ctnetlink_exp_stat_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int cpu,
+ const struct ip_conntrack_stat *st)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0, event;
+
+ event = (NFNL_SUBSYS_CTNETLINK << 8 | IPCTNL_MSG_EXP_GET_STATS_CPU);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = htons(cpu);
+
+ if (nla_put_be32(skb, CTA_STATS_EXP_NEW, htonl(st->expect_new)) ||
+ nla_put_be32(skb, CTA_STATS_EXP_CREATE, htonl(st->expect_create)) ||
+ nla_put_be32(skb, CTA_STATS_EXP_DELETE, htonl(st->expect_delete)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_failure:
+nlmsg_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnetlink_exp_stat_cpu_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int cpu;
+ struct net *net = sock_net(skb->sk);
+
+ if (cb->args[0] == nr_cpu_ids)
+ return 0;
+
+ for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
+ const struct ip_conntrack_stat *st;
+
+ if (!cpu_possible(cpu))
+ continue;
+
+ st = per_cpu_ptr(net->ct.stat, cpu);
+ if (ctnetlink_exp_stat_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ cpu, st) < 0)
+ break;
+ }
+ cb->args[0] = cpu;
+
+ return skb->len;
+}
+
+static int
+ctnetlink_stat_exp_cpu(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_exp_stat_cpu_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NF_CONNTRACK_EVENTS
static struct nf_ct_event_notifier ctnl_notifier = {
.fcn = ctnetlink_conntrack_event,
@@ -2323,6 +2685,8 @@ static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
[IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
.attr_count = CTA_MAX,
.policy = ct_nla_policy },
+ [IPCTNL_MSG_CT_GET_STATS_CPU] = { .call = ctnetlink_stat_ct_cpu },
+ [IPCTNL_MSG_CT_GET_STATS] = { .call = ctnetlink_stat_ct },
};
static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
@@ -2335,6 +2699,7 @@ static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
[IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
.attr_count = CTA_EXPECT_MAX,
.policy = exp_nla_policy },
+ [IPCTNL_MSG_EXP_GET_STATS_CPU] = { .call = ctnetlink_stat_exp_cpu },
};
static const struct nfnetlink_subsystem ctnl_subsys = {
@@ -2424,7 +2789,10 @@ static int __init ctnetlink_init(void)
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
-
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ /* setup interaction between nf_queue and nf_conntrack_netlink. */
+ RCU_INIT_POINTER(nfq_ct_hook, &ctnetlink_nfqueue_hook);
+#endif
return 0;
err_unreg_exp_subsys:
@@ -2442,6 +2810,9 @@ static void __exit ctnetlink_exit(void)
unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
+#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
+ RCU_INIT_POINTER(nfq_ct_hook, NULL);
+#endif
}
module_init(ctnetlink_init);
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 31d56b23b9e9..6fed9ec35248 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -174,7 +174,7 @@ static int destroy_sibling_or_exp(struct net *net, struct nf_conn *ct,
static void pptp_destroy_siblings(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- const struct nf_conn_help *help = nfct_help(ct);
+ const struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_conntrack_tuple t;
nf_ct_gre_keymap_destroy(ct);
@@ -182,16 +182,16 @@ static void pptp_destroy_siblings(struct nf_conn *ct)
/* try original (pns->pac) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
- t.src.u.gre.key = help->help.ct_pptp_info.pns_call_id;
- t.dst.u.gre.key = help->help.ct_pptp_info.pac_call_id;
+ t.src.u.gre.key = ct_pptp_info->pns_call_id;
+ t.dst.u.gre.key = ct_pptp_info->pac_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout original pns->pac ct/exp\n");
/* try reply (pac->pns) tuple */
memcpy(&t, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, sizeof(t));
t.dst.protonum = IPPROTO_GRE;
- t.src.u.gre.key = help->help.ct_pptp_info.pac_call_id;
- t.dst.u.gre.key = help->help.ct_pptp_info.pns_call_id;
+ t.src.u.gre.key = ct_pptp_info->pac_call_id;
+ t.dst.u.gre.key = ct_pptp_info->pns_call_id;
if (!destroy_sibling_or_exp(net, ct, &t))
pr_debug("failed to timeout reply pac->pns ct/exp\n");
}
@@ -269,7 +269,7 @@ pptp_inbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ struct nf_ct_pptp_master *info = nfct_help_data(ct);
u_int16_t msg;
__be16 cid = 0, pcid = 0;
typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound;
@@ -396,7 +396,7 @@ pptp_outbound_pkt(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
- struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ struct nf_ct_pptp_master *info = nfct_help_data(ct);
u_int16_t msg;
__be16 cid = 0, pcid = 0;
typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound;
@@ -506,7 +506,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
{
int dir = CTINFO2DIR(ctinfo);
- const struct nf_ct_pptp_master *info = &nfct_help(ct)->help.ct_pptp_info;
+ const struct nf_ct_pptp_master *info = nfct_help_data(ct);
const struct tcphdr *tcph;
struct tcphdr _tcph;
const struct pptp_pkt_hdr *pptph;
@@ -592,6 +592,7 @@ static const struct nf_conntrack_expect_policy pptp_exp_policy = {
static struct nf_conntrack_helper pptp __read_mostly = {
.name = "pptp",
.me = THIS_MODULE,
+ .data_len = sizeof(struct nf_ct_pptp_master),
.tuple.src.l3num = AF_INET,
.tuple.src.u.tcp.port = cpu_to_be16(PPTP_CONTROL_PORT),
.tuple.dst.protonum = IPPROTO_TCP,
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 8b631b07a645..0dc63854390f 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,28 +36,32 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
static int
-nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
- struct ctl_table *table, unsigned int *users)
+nf_ct_register_sysctl(struct net *net,
+ struct ctl_table_header **header,
+ const char *path,
+ struct ctl_table *table)
{
if (*header == NULL) {
- *header = register_net_sysctl(&init_net, path, table);
+ *header = register_net_sysctl(net, path, table);
if (*header == NULL)
return -ENOMEM;
}
- if (users != NULL)
- (*users)++;
+
return 0;
}
static void
nf_ct_unregister_sysctl(struct ctl_table_header **header,
- struct ctl_table *table, unsigned int *users)
+ struct ctl_table **table,
+ unsigned int users)
{
- if (users != NULL && --*users > 0)
+ if (users > 0)
return;
unregister_net_sysctl_table(*header);
+ kfree(*table);
*header = NULL;
+ *table = NULL;
}
#endif
@@ -161,30 +165,56 @@ static int kill_l4proto(struct nf_conn *i, void *data)
nf_ct_l3num(i) == l4proto->l3proto;
}
-static int nf_ct_l3proto_register_sysctl(struct nf_conntrack_l3proto *l3proto)
+static struct nf_ip_net *nf_ct_l3proto_net(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
{
- int err = 0;
+ if (l3proto->l3proto == PF_INET)
+ return &net->ct.nf_ct_proto;
+ else
+ return NULL;
+}
-#ifdef CONFIG_SYSCTL
- if (l3proto->ctl_table != NULL) {
- err = nf_ct_register_sysctl(&l3proto->ctl_table_header,
+static int nf_ct_l3proto_register_sysctl(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
+{
+ int err = 0;
+ struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
+ /* nf_conntrack_l3proto_ipv6 doesn't support sysctl */
+ if (in == NULL)
+ return 0;
+
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ if (in->ctl_table != NULL) {
+ err = nf_ct_register_sysctl(net,
+ &in->ctl_table_header,
l3proto->ctl_table_path,
- l3proto->ctl_table, NULL);
+ in->ctl_table);
+ if (err < 0) {
+ kfree(in->ctl_table);
+ in->ctl_table = NULL;
+ }
}
#endif
return err;
}
-static void nf_ct_l3proto_unregister_sysctl(struct nf_conntrack_l3proto *l3proto)
+static void nf_ct_l3proto_unregister_sysctl(struct net *net,
+ struct nf_conntrack_l3proto *l3proto)
{
-#ifdef CONFIG_SYSCTL
- if (l3proto->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(&l3proto->ctl_table_header,
- l3proto->ctl_table, NULL);
+ struct nf_ip_net *in = nf_ct_l3proto_net(net, l3proto);
+
+ if (in == NULL)
+ return;
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
+ if (in->ctl_table_header != NULL)
+ nf_ct_unregister_sysctl(&in->ctl_table_header,
+ &in->ctl_table,
+ 0);
#endif
}
-int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
+static int
+nf_conntrack_l3proto_register_net(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
struct nf_conntrack_l3proto *old;
@@ -203,10 +233,6 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
goto out_unlock;
}
- ret = nf_ct_l3proto_register_sysctl(proto);
- if (ret < 0)
- goto out_unlock;
-
if (proto->nlattr_tuple_size)
proto->nla_size = 3 * proto->nlattr_tuple_size();
@@ -215,13 +241,37 @@ int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
+
}
-EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
-void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
+int nf_conntrack_l3proto_register(struct net *net,
+ struct nf_conntrack_l3proto *proto)
{
- struct net *net;
+ int ret = 0;
+
+ if (proto->init_net) {
+ ret = proto->init_net(net);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = nf_ct_l3proto_register_sysctl(net, proto);
+ if (ret < 0)
+ return ret;
+ if (net == &init_net) {
+ ret = nf_conntrack_l3proto_register_net(proto);
+ if (ret < 0)
+ nf_ct_l3proto_unregister_sysctl(net, proto);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_register);
+
+static void
+nf_conntrack_l3proto_unregister_net(struct nf_conntrack_l3proto *proto)
+{
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
@@ -230,68 +280,107 @@ void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
- nf_ct_l3proto_unregister_sysctl(proto);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
+}
+
+void nf_conntrack_l3proto_unregister(struct net *net,
+ struct nf_conntrack_l3proto *proto)
+{
+ if (net == &init_net)
+ nf_conntrack_l3proto_unregister_net(proto);
+
+ nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
rtnl_lock();
- for_each_net(net)
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_l3proto_unregister);
-static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
+static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
+{
+ if (l4proto->get_net_proto) {
+ /* statically built-in protocols use static per-net */
+ return l4proto->get_net_proto(net);
+ } else if (l4proto->net_id) {
+ /* ... and loadable protocols use dynamic per-net */
+ return net_generic(net, *l4proto->net_id);
+ }
+ return NULL;
+}
+
+static
+int nf_ct_l4proto_register_sysctl(struct net *net,
+ struct nf_proto_net *pn,
+ struct nf_conntrack_l4proto *l4proto)
{
int err = 0;
#ifdef CONFIG_SYSCTL
- if (l4proto->ctl_table != NULL) {
- err = nf_ct_register_sysctl(l4proto->ctl_table_header,
+ if (pn->ctl_table != NULL) {
+ err = nf_ct_register_sysctl(net,
+ &pn->ctl_table_header,
"net/netfilter",
- l4proto->ctl_table,
- l4proto->ctl_table_users);
- if (err < 0)
- goto out;
+ pn->ctl_table);
+ if (err < 0) {
+ if (!pn->users) {
+ kfree(pn->ctl_table);
+ pn->ctl_table = NULL;
+ }
+ }
}
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->ctl_compat_table != NULL) {
- err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
+ if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_table != NULL) {
+ if (err < 0) {
+ nf_ct_kfree_compat_sysctl_table(pn);
+ goto out;
+ }
+ err = nf_ct_register_sysctl(net,
+ &pn->ctl_compat_header,
"net/ipv4/netfilter",
- l4proto->ctl_compat_table, NULL);
+ pn->ctl_compat_table);
if (err == 0)
goto out;
- nf_ct_unregister_sysctl(l4proto->ctl_table_header,
- l4proto->ctl_table,
- l4proto->ctl_table_users);
+
+ nf_ct_kfree_compat_sysctl_table(pn);
+ nf_ct_unregister_sysctl(&pn->ctl_table_header,
+ &pn->ctl_table,
+ pn->users);
}
-#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
out:
+#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
return err;
}
-static void nf_ct_l4proto_unregister_sysctl(struct nf_conntrack_l4proto *l4proto)
+static
+void nf_ct_l4proto_unregister_sysctl(struct net *net,
+ struct nf_proto_net *pn,
+ struct nf_conntrack_l4proto *l4proto)
{
#ifdef CONFIG_SYSCTL
- if (l4proto->ctl_table_header != NULL &&
- *l4proto->ctl_table_header != NULL)
- nf_ct_unregister_sysctl(l4proto->ctl_table_header,
- l4proto->ctl_table,
- l4proto->ctl_table_users);
+ if (pn->ctl_table_header != NULL)
+ nf_ct_unregister_sysctl(&pn->ctl_table_header,
+ &pn->ctl_table,
+ pn->users);
+
#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- if (l4proto->ctl_compat_table_header != NULL)
- nf_ct_unregister_sysctl(&l4proto->ctl_compat_table_header,
- l4proto->ctl_compat_table, NULL);
+ if (l4proto->l3proto != AF_INET6 && pn->ctl_compat_header != NULL)
+ nf_ct_unregister_sysctl(&pn->ctl_compat_header,
+ &pn->ctl_compat_table,
+ 0);
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
}
/* FIXME: Allow NULL functions and sub in pointers to generic for
them. --RR */
-int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
+static int
+nf_conntrack_l4proto_register_net(struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
@@ -333,10 +422,6 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
goto out_unlock;
}
- ret = nf_ct_l4proto_register_sysctl(l4proto);
- if (ret < 0)
- goto out_unlock;
-
l4proto->nla_size = 0;
if (l4proto->nlattr_size)
l4proto->nla_size += l4proto->nlattr_size();
@@ -345,17 +430,48 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto)
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
-
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
}
-EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
-void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
+int nf_conntrack_l4proto_register(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
{
- struct net *net;
+ int ret = 0;
+ struct nf_proto_net *pn = NULL;
+ if (l4proto->init_net) {
+ ret = l4proto->init_net(net, l4proto->l3proto);
+ if (ret < 0)
+ goto out;
+ }
+
+ pn = nf_ct_l4proto_net(net, l4proto);
+ if (pn == NULL)
+ goto out;
+
+ ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
+ if (ret < 0)
+ goto out;
+
+ if (net == &init_net) {
+ ret = nf_conntrack_l4proto_register_net(l4proto);
+ if (ret < 0) {
+ nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
+ goto out;
+ }
+ }
+
+ pn->users++;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_register);
+
+static void
+nf_conntrack_l4proto_unregister_net(struct nf_conntrack_l4proto *l4proto)
+{
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
@@ -365,41 +481,73 @@ void nf_conntrack_l4proto_unregister(struct nf_conntrack_l4proto *l4proto)
) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
- nf_ct_l4proto_unregister_sysctl(l4proto);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
+}
+
+void nf_conntrack_l4proto_unregister(struct net *net,
+ struct nf_conntrack_l4proto *l4proto)
+{
+ struct nf_proto_net *pn = NULL;
+
+ if (net == &init_net)
+ nf_conntrack_l4proto_unregister_net(l4proto);
+
+ pn = nf_ct_l4proto_net(net, l4proto);
+ if (pn == NULL)
+ return;
+
+ pn->users--;
+ nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
rtnl_lock();
- for_each_net(net)
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_unregister);
-int nf_conntrack_proto_init(void)
+int nf_conntrack_proto_init(struct net *net)
{
unsigned int i;
int err;
+ struct nf_proto_net *pn = nf_ct_l4proto_net(net,
+ &nf_conntrack_l4proto_generic);
- err = nf_ct_l4proto_register_sysctl(&nf_conntrack_l4proto_generic);
+ err = nf_conntrack_l4proto_generic.init_net(net,
+ nf_conntrack_l4proto_generic.l3proto);
+ if (err < 0)
+ return err;
+ err = nf_ct_l4proto_register_sysctl(net,
+ pn,
+ &nf_conntrack_l4proto_generic);
if (err < 0)
return err;
- for (i = 0; i < AF_MAX; i++)
- rcu_assign_pointer(nf_ct_l3protos[i],
- &nf_conntrack_l3proto_generic);
+ if (net == &init_net) {
+ for (i = 0; i < AF_MAX; i++)
+ rcu_assign_pointer(nf_ct_l3protos[i],
+ &nf_conntrack_l3proto_generic);
+ }
+
+ pn->users++;
return 0;
}
-void nf_conntrack_proto_fini(void)
+void nf_conntrack_proto_fini(struct net *net)
{
unsigned int i;
-
- nf_ct_l4proto_unregister_sysctl(&nf_conntrack_l4proto_generic);
-
- /* free l3proto protocol tables */
- for (i = 0; i < PF_MAX; i++)
- kfree(nf_ct_protos[i]);
+ struct nf_proto_net *pn = nf_ct_l4proto_net(net,
+ &nf_conntrack_l4proto_generic);
+
+ pn->users--;
+ nf_ct_l4proto_unregister_sysctl(net,
+ pn,
+ &nf_conntrack_l4proto_generic);
+ if (net == &init_net) {
+ /* free l3proto protocol tables */
+ for (i = 0; i < PF_MAX; i++)
+ kfree(nf_ct_protos[i]);
+ }
}
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index ef706a485be1..6535326cf07c 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -387,12 +387,9 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
/* this module per-net specifics */
static int dccp_net_id __read_mostly;
struct dccp_net {
+ struct nf_proto_net pn;
int dccp_loose;
unsigned int dccp_timeout[CT_DCCP_MAX + 1];
-#ifdef CONFIG_SYSCTL
- struct ctl_table_header *sysctl_header;
- struct ctl_table *sysctl_table;
-#endif
};
static inline struct dccp_net *dccp_pernet(struct net *net)
@@ -715,9 +712,10 @@ static int dccp_nlattr_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
- struct dccp_net *dn = dccp_pernet(&init_net);
+ struct dccp_net *dn = dccp_pernet(net);
unsigned int *timeouts = data;
int i;
@@ -817,6 +815,51 @@ static struct ctl_table dccp_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int dccp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct dccp_net *dn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(dccp_sysctl_table,
+ sizeof(dccp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
+ pn->ctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
+ pn->ctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
+ pn->ctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
+ pn->ctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
+ pn->ctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
+ pn->ctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
+ pn->ctl_table[7].data = &dn->dccp_loose;
+#endif
+ return 0;
+}
+
+static int dccp_init_net(struct net *net, u_int16_t proto)
+{
+ struct dccp_net *dn = dccp_pernet(net);
+ struct nf_proto_net *pn = &dn->pn;
+
+ if (!pn->users) {
+ /* default values */
+ dn->dccp_loose = 1;
+ dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
+ dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
+ dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
+ }
+
+ return dccp_kmemdup_sysctl_table(pn, dn);
+}
+
static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.l3proto = AF_INET,
.l4proto = IPPROTO_DCCP,
@@ -847,6 +890,8 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &dccp_net_id,
+ .init_net = dccp_init_net,
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -879,55 +924,39 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &dccp_net_id,
+ .init_net = dccp_init_net,
};
static __net_init int dccp_net_init(struct net *net)
{
- struct dccp_net *dn = dccp_pernet(net);
-
- /* default values */
- dn->dccp_loose = 1;
- dn->dccp_timeout[CT_DCCP_REQUEST] = 2 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_RESPOND] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_PARTOPEN] = 4 * DCCP_MSL;
- dn->dccp_timeout[CT_DCCP_OPEN] = 12 * 3600 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
- dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
-
-#ifdef CONFIG_SYSCTL
- dn->sysctl_table = kmemdup(dccp_sysctl_table,
- sizeof(dccp_sysctl_table), GFP_KERNEL);
- if (!dn->sysctl_table)
- return -ENOMEM;
-
- dn->sysctl_table[0].data = &dn->dccp_timeout[CT_DCCP_REQUEST];
- dn->sysctl_table[1].data = &dn->dccp_timeout[CT_DCCP_RESPOND];
- dn->sysctl_table[2].data = &dn->dccp_timeout[CT_DCCP_PARTOPEN];
- dn->sysctl_table[3].data = &dn->dccp_timeout[CT_DCCP_OPEN];
- dn->sysctl_table[4].data = &dn->dccp_timeout[CT_DCCP_CLOSEREQ];
- dn->sysctl_table[5].data = &dn->dccp_timeout[CT_DCCP_CLOSING];
- dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
- dn->sysctl_table[7].data = &dn->dccp_loose;
-
- dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
- dn->sysctl_table);
- if (!dn->sysctl_header) {
- kfree(dn->sysctl_table);
- return -ENOMEM;
+ int ret = 0;
+ ret = nf_conntrack_l4proto_register(net,
+ &dccp_proto4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_dccp4 :protocol register failed.\n");
+ goto out;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &dccp_proto6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_dccp6 :protocol register failed.\n");
+ goto cleanup_dccp4;
}
-#endif
-
return 0;
+cleanup_dccp4:
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto4);
+out:
+ return ret;
}
static __net_exit void dccp_net_exit(struct net *net)
{
- struct dccp_net *dn = dccp_pernet(net);
-#ifdef CONFIG_SYSCTL
- unregister_net_sysctl_table(dn->sysctl_header);
- kfree(dn->sysctl_table);
-#endif
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto6);
+ nf_conntrack_l4proto_unregister(net,
+ &dccp_proto4);
}
static struct pernet_operations dccp_net_ops = {
@@ -939,34 +968,12 @@ static struct pernet_operations dccp_net_ops = {
static int __init nf_conntrack_proto_dccp_init(void)
{
- int err;
-
- err = register_pernet_subsys(&dccp_net_ops);
- if (err < 0)
- goto err1;
-
- err = nf_conntrack_l4proto_register(&dccp_proto4);
- if (err < 0)
- goto err2;
-
- err = nf_conntrack_l4proto_register(&dccp_proto6);
- if (err < 0)
- goto err3;
- return 0;
-
-err3:
- nf_conntrack_l4proto_unregister(&dccp_proto4);
-err2:
- unregister_pernet_subsys(&dccp_net_ops);
-err1:
- return err;
+ return register_pernet_subsys(&dccp_net_ops);
}
static void __exit nf_conntrack_proto_dccp_fini(void)
{
unregister_pernet_subsys(&dccp_net_ops);
- nf_conntrack_l4proto_unregister(&dccp_proto6);
- nf_conntrack_l4proto_unregister(&dccp_proto4);
}
module_init(nf_conntrack_proto_dccp_init);
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index d8923d54b358..d25f29377648 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -14,6 +14,11 @@
static unsigned int nf_ct_generic_timeout __read_mostly = 600*HZ;
+static inline struct nf_generic_net *generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -42,7 +47,7 @@ static int generic_print_tuple(struct seq_file *s,
static unsigned int *generic_get_timeouts(struct net *net)
{
- return &nf_ct_generic_timeout;
+ return &(generic_pernet(net)->timeout);
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -70,16 +75,18 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeout = data;
+ struct nf_generic_net *gn = generic_pernet(net);
if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
*timeout =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
else {
/* Set default generic timeout. */
- *timeout = nf_ct_generic_timeout;
+ *timeout = gn->timeout;
}
return 0;
@@ -106,11 +113,9 @@ generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *generic_sysctl_header;
static struct ctl_table generic_sysctl_table[] = {
{
.procname = "nf_conntrack_generic_timeout",
- .data = &nf_ct_generic_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -121,7 +126,6 @@ static struct ctl_table generic_sysctl_table[] = {
static struct ctl_table generic_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_generic_timeout",
- .data = &nf_ct_generic_timeout,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -131,6 +135,62 @@ static struct ctl_table generic_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_generic_net *gn)
+{
+#ifdef CONFIG_SYSCTL
+ pn->ctl_table = kmemdup(generic_sysctl_table,
+ sizeof(generic_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &gn->timeout;
+#endif
+ return 0;
+}
+
+static int generic_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_generic_net *gn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(generic_compat_sysctl_table,
+ sizeof(generic_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &gn->timeout;
+#endif
+#endif
+ return 0;
+}
+
+static int generic_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_proto_net *pn = &gn->pn;
+
+ gn->timeout = nf_ct_generic_timeout;
+
+ ret = generic_kmemdup_compat_sysctl_table(pn, gn);
+ if (ret < 0)
+ return ret;
+
+ ret = generic_kmemdup_sysctl_table(pn, gn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+
+ return ret;
+}
+
+static struct nf_proto_net *generic_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
{
.l3proto = PF_UNSPEC,
@@ -151,11 +211,6 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_header = &generic_sysctl_header,
- .ctl_table = generic_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = generic_compat_sysctl_table,
-#endif
-#endif
+ .init_net = generic_init_net,
+ .get_net_proto = generic_get_net_proto,
};
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 4bf6b4e4b776..b09b7af7f6f8 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -54,13 +54,20 @@ static unsigned int gre_timeouts[GRE_CT_MAX] = {
static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
+ struct nf_proto_net nf;
rwlock_t keymap_lock;
struct list_head keymap_list;
+ unsigned int gre_timeouts[GRE_CT_MAX];
};
+static inline struct netns_proto_gre *gre_pernet(struct net *net)
+{
+ return net_generic(net, proto_gre_net_id);
+}
+
void nf_ct_gre_keymap_flush(struct net *net)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km, *tmp;
write_lock_bh(&net_gre->keymap_lock);
@@ -85,7 +92,7 @@ static inline int gre_key_cmpfn(const struct nf_ct_gre_keymap *km,
/* look up the source key for a given tuple */
static __be16 gre_keymap_lookup(struct net *net, struct nf_conntrack_tuple *t)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
struct nf_ct_gre_keymap *km;
__be16 key = 0;
@@ -109,11 +116,11 @@ int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct nf_conntrack_tuple *t)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
- struct nf_conn_help *help = nfct_help(ct);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
struct nf_ct_gre_keymap **kmp, *km;
- kmp = &help->help.ct_pptp_info.keymap[dir];
+ kmp = &ct_pptp_info->keymap[dir];
if (*kmp) {
/* check whether it's a retransmission */
read_lock_bh(&net_gre->keymap_lock);
@@ -150,20 +157,20 @@ EXPORT_SYMBOL_GPL(nf_ct_gre_keymap_add);
void nf_ct_gre_keymap_destroy(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
- struct nf_conn_help *help = nfct_help(ct);
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ struct nf_ct_pptp_master *ct_pptp_info = nfct_help_data(ct);
enum ip_conntrack_dir dir;
pr_debug("entering for ct %p\n", ct);
write_lock_bh(&net_gre->keymap_lock);
for (dir = IP_CT_DIR_ORIGINAL; dir < IP_CT_DIR_MAX; dir++) {
- if (help->help.ct_pptp_info.keymap[dir]) {
+ if (ct_pptp_info->keymap[dir]) {
pr_debug("removing %p from list\n",
- help->help.ct_pptp_info.keymap[dir]);
- list_del(&help->help.ct_pptp_info.keymap[dir]->list);
- kfree(help->help.ct_pptp_info.keymap[dir]);
- help->help.ct_pptp_info.keymap[dir] = NULL;
+ ct_pptp_info->keymap[dir]);
+ list_del(&ct_pptp_info->keymap[dir]->list);
+ kfree(ct_pptp_info->keymap[dir]);
+ ct_pptp_info->keymap[dir] = NULL;
}
}
write_unlock_bh(&net_gre->keymap_lock);
@@ -237,7 +244,7 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
static unsigned int *gre_get_timeouts(struct net *net)
{
- return gre_timeouts;
+ return gre_pernet(net)->gre_timeouts;
}
/* Returns verdict for packet, and may modify conntrack */
@@ -297,13 +304,15 @@ static void gre_destroy(struct nf_conn *ct)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int gre_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct netns_proto_gre *net_gre = gre_pernet(net);
/* set default timeouts for GRE. */
- timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
- timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
+ timeouts[GRE_CT_UNREPLIED] = net_gre->gre_timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = net_gre->gre_timeouts[GRE_CT_REPLIED];
if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
timeouts[GRE_CT_UNREPLIED] =
@@ -339,6 +348,19 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+static int gre_init_net(struct net *net, u_int16_t proto)
+{
+ struct netns_proto_gre *net_gre = gre_pernet(net);
+ int i;
+
+ rwlock_init(&net_gre->keymap_lock);
+ INIT_LIST_HEAD(&net_gre->keymap_list);
+ for (i = 0; i < GRE_CT_MAX; i++)
+ net_gre->gre_timeouts[i] = gre_timeouts[i];
+
+ return 0;
+}
+
/* protocol helper struct */
static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.l3proto = AF_INET,
@@ -368,20 +390,22 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+ .net_id = &proto_gre_net_id,
+ .init_net = gre_init_net,
};
static int proto_gre_net_init(struct net *net)
{
- struct netns_proto_gre *net_gre = net_generic(net, proto_gre_net_id);
-
- rwlock_init(&net_gre->keymap_lock);
- INIT_LIST_HEAD(&net_gre->keymap_list);
-
- return 0;
+ int ret = 0;
+ ret = nf_conntrack_l4proto_register(net, &nf_conntrack_l4proto_gre4);
+ if (ret < 0)
+ pr_err("nf_conntrack_l4proto_gre4 :protocol register failed.\n");
+ return ret;
}
static void proto_gre_net_exit(struct net *net)
{
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_gre4);
nf_ct_gre_keymap_flush(net);
}
@@ -394,20 +418,11 @@ static struct pernet_operations proto_gre_net_ops = {
static int __init nf_ct_proto_gre_init(void)
{
- int rv;
-
- rv = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_gre4);
- if (rv < 0)
- return rv;
- rv = register_pernet_subsys(&proto_gre_net_ops);
- if (rv < 0)
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
- return rv;
+ return register_pernet_subsys(&proto_gre_net_ops);
}
static void __exit nf_ct_proto_gre_fini(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_gre4);
unregister_pernet_subsys(&proto_gre_net_ops);
}
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 996db2fa21f7..c746d61f83ed 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -127,6 +127,17 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
}
};
+static int sctp_net_id __read_mostly;
+struct sctp_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[SCTP_CONNTRACK_MAX];
+};
+
+static inline struct sctp_net *sctp_pernet(struct net *net)
+{
+ return net_generic(net, sctp_net_id);
+}
+
static bool sctp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -281,7 +292,7 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
static unsigned int *sctp_get_timeouts(struct net *net)
{
- return sctp_timeouts;
+ return sctp_pernet(net)->timeouts;
}
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
@@ -551,14 +562,16 @@ static int sctp_nlattr_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct sctp_net *sn = sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
for (i=0; i<SCTP_CONNTRACK_MAX; i++)
- timeouts[i] = sctp_timeouts[i];
+ timeouts[i] = sn->timeouts[i];
/* there's a 1:1 mapping between attributes and protocol states. */
for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
@@ -599,54 +612,45 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
#ifdef CONFIG_SYSCTL
-static unsigned int sctp_sysctl_table_users;
-static struct ctl_table_header *sctp_sysctl_header;
static struct ctl_table sctp_sysctl_table[] = {
{
.procname = "nf_conntrack_sctp_timeout_closed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_cookie_wait",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_cookie_echoed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_established",
- .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_recd",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -658,49 +662,42 @@ static struct ctl_table sctp_sysctl_table[] = {
static struct ctl_table sctp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_sctp_timeout_closed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_CLOSED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_cookie_wait",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_cookie_echoed",
- .data = &sctp_timeouts[SCTP_CONNTRACK_COOKIE_ECHOED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_established",
- .data = &sctp_timeouts[SCTP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_recd",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_sctp_timeout_shutdown_ack_sent",
- .data = &sctp_timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -710,6 +707,80 @@ static struct ctl_table sctp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif
+static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct sctp_net *sn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(sctp_sysctl_table,
+ sizeof(sctp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
+ pn->ctl_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
+ pn->ctl_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
+ pn->ctl_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
+ pn->ctl_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
+ pn->ctl_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
+ pn->ctl_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+#endif
+ return 0;
+}
+
+static int sctp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct sctp_net *sn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(sctp_compat_sysctl_table,
+ sizeof(sctp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &sn->timeouts[SCTP_CONNTRACK_CLOSED];
+ pn->ctl_compat_table[1].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_WAIT];
+ pn->ctl_compat_table[2].data = &sn->timeouts[SCTP_CONNTRACK_COOKIE_ECHOED];
+ pn->ctl_compat_table[3].data = &sn->timeouts[SCTP_CONNTRACK_ESTABLISHED];
+ pn->ctl_compat_table[4].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_SENT];
+ pn->ctl_compat_table[5].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_RECD];
+ pn->ctl_compat_table[6].data = &sn->timeouts[SCTP_CONNTRACK_SHUTDOWN_ACK_SENT];
+#endif
+#endif
+ return 0;
+}
+
+static int sctp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct sctp_net *sn = sctp_pernet(net);
+ struct nf_proto_net *pn = &sn->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
+ sn->timeouts[i] = sctp_timeouts[i];
+ }
+
+ if (proto == AF_INET) {
+ ret = sctp_kmemdup_compat_sysctl_table(pn, sn);
+ if (ret < 0)
+ return ret;
+
+ ret = sctp_kmemdup_sysctl_table(pn, sn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = sctp_kmemdup_sysctl_table(pn, sn);
+
+ return ret;
+}
+
static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.l3proto = PF_INET,
.l4proto = IPPROTO_SCTP,
@@ -740,14 +811,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &sctp_sysctl_table_users,
- .ctl_table_header = &sctp_sysctl_header,
- .ctl_table = sctp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = sctp_compat_sysctl_table,
-#endif
-#endif
+ .net_id = &sctp_net_id,
+ .init_net = sctp_init_net,
};
static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
@@ -780,40 +845,58 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &sctp_sysctl_table_users,
- .ctl_table_header = &sctp_sysctl_header,
- .ctl_table = sctp_sysctl_table,
-#endif
+ .net_id = &sctp_net_id,
+ .init_net = sctp_init_net,
};
-static int __init nf_conntrack_proto_sctp_init(void)
+static int sctp_net_init(struct net *net)
{
- int ret;
+ int ret = 0;
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp4);
- if (ret) {
- pr_err("nf_conntrack_l4proto_sctp4: protocol register failed\n");
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_sctp4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_sctp4 :protocol register failed.\n");
goto out;
}
- ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_sctp6);
- if (ret) {
- pr_err("nf_conntrack_l4proto_sctp6: protocol register failed\n");
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_sctp6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_sctp6 :protocol register failed.\n");
goto cleanup_sctp4;
}
+ return 0;
+cleanup_sctp4:
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp4);
+out:
return ret;
+}
- cleanup_sctp4:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
- out:
- return ret;
+static void sctp_net_exit(struct net *net)
+{
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp6);
+ nf_conntrack_l4proto_unregister(net,
+ &nf_conntrack_l4proto_sctp4);
+}
+
+static struct pernet_operations sctp_net_ops = {
+ .init = sctp_net_init,
+ .exit = sctp_net_exit,
+ .id = &sctp_net_id,
+ .size = sizeof(struct sctp_net),
+};
+
+static int __init nf_conntrack_proto_sctp_init(void)
+{
+ return register_pernet_subsys(&sctp_net_ops);
}
static void __exit nf_conntrack_proto_sctp_fini(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_sctp4);
+ unregister_pernet_subsys(&sctp_net_ops);
}
module_init(nf_conntrack_proto_sctp_init);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 21ff1a99f534..a5ac11ebef33 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -270,6 +270,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
}
};
+static inline struct nf_tcp_net *tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
static bool tcp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
{
@@ -516,6 +521,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
u_int8_t pf)
{
struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -720,7 +726,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
} else {
res = false;
if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
- nf_ct_tcp_be_liberal)
+ tn->tcp_be_liberal)
res = true;
if (!res && LOG_INVALID(net, IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
@@ -815,7 +821,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
static unsigned int *tcp_get_timeouts(struct net *net)
{
- return tcp_timeouts;
+ return tcp_pernet(net)->timeouts;
}
/* Returns verdict for packet, or -1 for invalid. */
@@ -828,6 +834,7 @@ static int tcp_packet(struct nf_conn *ct,
unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
enum ip_conntrack_dir dir;
@@ -1020,7 +1027,7 @@ static int tcp_packet(struct nf_conn *ct,
&& new_state == TCP_CONNTRACK_FIN_WAIT)
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
- if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
+ if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
@@ -1065,6 +1072,8 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
enum tcp_conntrack new_state;
const struct tcphdr *th;
struct tcphdr _tcph;
+ struct net *net = nf_ct_net(ct);
+ struct nf_tcp_net *tn = tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
@@ -1093,7 +1102,7 @@ static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
ct->proto.tcp.seen[0].td_end;
tcp_options(skb, dataoff, th, &ct->proto.tcp.seen[0]);
- } else if (nf_ct_tcp_loose == 0) {
+ } else if (tn->tcp_loose == 0) {
/* Don't try to pick up connections. */
return false;
} else {
@@ -1251,14 +1260,16 @@ static int tcp_nlattr_tuple_size(void)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct nf_tcp_net *tn = tcp_pernet(net);
int i;
/* set default TCP timeouts. */
for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
- timeouts[i] = tcp_timeouts[i];
+ timeouts[i] = tn->timeouts[i];
if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
timeouts[TCP_CONNTRACK_SYN_SENT] =
@@ -1355,96 +1366,81 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int tcp_sysctl_table_users;
-static struct ctl_table_header *tcp_sysctl_header;
static struct ctl_table tcp_sysctl_table[] = {
{
.procname = "nf_conntrack_tcp_timeout_syn_sent",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_syn_recv",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_established",
- .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_fin_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_last_ack",
- .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_time_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_close",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
- .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_loose",
- .data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_be_liberal",
- .data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_tcp_max_retrans",
- .data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
@@ -1456,91 +1452,78 @@ static struct ctl_table tcp_sysctl_table[] = {
static struct ctl_table tcp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_tcp_timeout_syn_sent",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_sent2",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_SENT2],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_syn_recv",
- .data = &tcp_timeouts[TCP_CONNTRACK_SYN_RECV],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_established",
- .data = &tcp_timeouts[TCP_CONNTRACK_ESTABLISHED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_fin_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_FIN_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_last_ack",
- .data = &tcp_timeouts[TCP_CONNTRACK_LAST_ACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_time_wait",
- .data = &tcp_timeouts[TCP_CONNTRACK_TIME_WAIT],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_close",
- .data = &tcp_timeouts[TCP_CONNTRACK_CLOSE],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
- .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_tcp_loose",
- .data = &nf_ct_tcp_loose,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_be_liberal",
- .data = &nf_ct_tcp_be_liberal,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "ip_conntrack_tcp_max_retrans",
- .data = &nf_ct_tcp_max_retrans,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
@@ -1550,6 +1533,101 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_tcp_net *tn)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(tcp_sysctl_table,
+ sizeof(tcp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
+ pn->ctl_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
+ pn->ctl_table[2].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ pn->ctl_table[3].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
+ pn->ctl_table[4].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
+ pn->ctl_table[5].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
+ pn->ctl_table[6].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
+ pn->ctl_table[7].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
+ pn->ctl_table[8].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
+ pn->ctl_table[9].data = &tn->timeouts[TCP_CONNTRACK_UNACK];
+ pn->ctl_table[10].data = &tn->tcp_loose;
+ pn->ctl_table[11].data = &tn->tcp_be_liberal;
+ pn->ctl_table[12].data = &tn->tcp_max_retrans;
+#endif
+ return 0;
+}
+
+static int tcp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_tcp_net *tn)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(tcp_compat_sysctl_table,
+ sizeof(tcp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT];
+ pn->ctl_compat_table[1].data = &tn->timeouts[TCP_CONNTRACK_SYN_SENT2];
+ pn->ctl_compat_table[2].data = &tn->timeouts[TCP_CONNTRACK_SYN_RECV];
+ pn->ctl_compat_table[3].data = &tn->timeouts[TCP_CONNTRACK_ESTABLISHED];
+ pn->ctl_compat_table[4].data = &tn->timeouts[TCP_CONNTRACK_FIN_WAIT];
+ pn->ctl_compat_table[5].data = &tn->timeouts[TCP_CONNTRACK_CLOSE_WAIT];
+ pn->ctl_compat_table[6].data = &tn->timeouts[TCP_CONNTRACK_LAST_ACK];
+ pn->ctl_compat_table[7].data = &tn->timeouts[TCP_CONNTRACK_TIME_WAIT];
+ pn->ctl_compat_table[8].data = &tn->timeouts[TCP_CONNTRACK_CLOSE];
+ pn->ctl_compat_table[9].data = &tn->timeouts[TCP_CONNTRACK_RETRANS];
+ pn->ctl_compat_table[10].data = &tn->tcp_loose;
+ pn->ctl_compat_table[11].data = &tn->tcp_be_liberal;
+ pn->ctl_compat_table[12].data = &tn->tcp_max_retrans;
+#endif
+#endif
+ return 0;
+}
+
+static int tcp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_proto_net *pn = &tn->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ tn->timeouts[i] = tcp_timeouts[i];
+
+ tn->tcp_loose = nf_ct_tcp_loose;
+ tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
+ tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
+ }
+
+ if (proto == AF_INET) {
+ ret = tcp_kmemdup_compat_sysctl_table(pn, tn);
+ if (ret < 0)
+ return ret;
+
+ ret = tcp_kmemdup_sysctl_table(pn, tn);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = tcp_kmemdup_sysctl_table(pn, tn);
+
+ return ret;
+}
+
+static struct nf_proto_net *tcp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
{
.l3proto = PF_INET,
@@ -1582,14 +1660,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &tcp_sysctl_table_users,
- .ctl_table_header = &tcp_sysctl_header,
- .ctl_table = tcp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = tcp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = tcp_init_net,
+ .get_net_proto = tcp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp4);
@@ -1625,10 +1697,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &tcp_sysctl_table_users,
- .ctl_table_header = &tcp_sysctl_header,
- .ctl_table = tcp_sysctl_table,
-#endif
+ .init_net = tcp_init_net,
+ .get_net_proto = tcp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7259a6bdeb49..59623cc56e8d 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,17 +25,16 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-enum udp_conntrack {
- UDP_CT_UNREPLIED,
- UDP_CT_REPLIED,
- UDP_CT_MAX
-};
-
static unsigned int udp_timeouts[UDP_CT_MAX] = {
[UDP_CT_UNREPLIED] = 30*HZ,
[UDP_CT_REPLIED] = 180*HZ,
};
+static inline struct nf_udp_net *udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -73,7 +72,7 @@ static int udp_print_tuple(struct seq_file *s,
static unsigned int *udp_get_timeouts(struct net *net)
{
- return udp_timeouts;
+ return udp_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
@@ -157,13 +156,15 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct nf_udp_net *un = udp_pernet(net);
/* set default timeouts for UDP. */
- timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
- timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
+ timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED];
+ timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
timeouts[UDP_CT_UNREPLIED] =
@@ -200,19 +201,15 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int udp_sysctl_table_users;
-static struct ctl_table_header *udp_sysctl_header;
static struct ctl_table udp_sysctl_table[] = {
{
.procname = "nf_conntrack_udp_timeout",
- .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udp_timeout_stream",
- .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -223,14 +220,12 @@ static struct ctl_table udp_sysctl_table[] = {
static struct ctl_table udp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_udp_timeout",
- .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_udp_timeout_stream",
- .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -240,6 +235,73 @@ static struct ctl_table udp_compat_sysctl_table[] = {
#endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */
#endif /* CONFIG_SYSCTL */
+static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct nf_udp_net *un)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+ pn->ctl_table = kmemdup(udp_sysctl_table,
+ sizeof(udp_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+ pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
+ pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED];
+#endif
+ return 0;
+}
+
+static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn,
+ struct nf_udp_net *un)
+{
+#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
+ pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table,
+ sizeof(udp_compat_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_compat_table)
+ return -ENOMEM;
+
+ pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED];
+ pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED];
+#endif
+#endif
+ return 0;
+}
+
+static int udp_init_net(struct net *net, u_int16_t proto)
+{
+ int ret;
+ struct nf_udp_net *un = udp_pernet(net);
+ struct nf_proto_net *pn = &un->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0; i < UDP_CT_MAX; i++)
+ un->timeouts[i] = udp_timeouts[i];
+ }
+
+ if (proto == AF_INET) {
+ ret = udp_kmemdup_compat_sysctl_table(pn, un);
+ if (ret < 0)
+ return ret;
+
+ ret = udp_kmemdup_sysctl_table(pn, un);
+ if (ret < 0)
+ nf_ct_kfree_compat_sysctl_table(pn);
+ } else
+ ret = udp_kmemdup_sysctl_table(pn, un);
+
+ return ret;
+}
+
+static struct nf_proto_net *udp_get_net_proto(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp.pn;
+}
+
struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
{
.l3proto = PF_INET,
@@ -267,14 +329,8 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udp_sysctl_table_users,
- .ctl_table_header = &udp_sysctl_header,
- .ctl_table = udp_sysctl_table,
-#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
- .ctl_compat_table = udp_compat_sysctl_table,
-#endif
-#endif
+ .init_net = udp_init_net,
+ .get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
@@ -305,10 +361,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udp_sysctl_table_users,
- .ctl_table_header = &udp_sysctl_header,
- .ctl_table = udp_sysctl_table,
-#endif
+ .init_net = udp_init_net,
+ .get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index 4d60a5376aa6..4b66df209286 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -35,6 +35,17 @@ static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
[UDPLITE_CT_REPLIED] = 180*HZ,
};
+static int udplite_net_id __read_mostly;
+struct udplite_net {
+ struct nf_proto_net pn;
+ unsigned int timeouts[UDPLITE_CT_MAX];
+};
+
+static inline struct udplite_net *udplite_pernet(struct net *net)
+{
+ return net_generic(net, udplite_net_id);
+}
+
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct nf_conntrack_tuple *tuple)
@@ -70,7 +81,7 @@ static int udplite_print_tuple(struct seq_file *s,
static unsigned int *udplite_get_timeouts(struct net *net)
{
- return udplite_timeouts;
+ return udplite_pernet(net)->timeouts;
}
/* Returns verdict for packet, and may modify conntracktype */
@@ -161,13 +172,15 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
-static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
+ struct net *net, void *data)
{
unsigned int *timeouts = data;
+ struct udplite_net *un = udplite_pernet(net);
/* set default timeouts for UDPlite. */
- timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
- timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
+ timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
+ timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
timeouts[UDPLITE_CT_UNREPLIED] =
@@ -204,19 +217,15 @@ udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
-static unsigned int udplite_sysctl_table_users;
-static struct ctl_table_header *udplite_sysctl_header;
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
- .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
- .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -225,6 +234,40 @@ static struct ctl_table udplite_sysctl_table[] = {
};
#endif /* CONFIG_SYSCTL */
+static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
+ struct udplite_net *un)
+{
+#ifdef CONFIG_SYSCTL
+ if (pn->ctl_table)
+ return 0;
+
+ pn->ctl_table = kmemdup(udplite_sysctl_table,
+ sizeof(udplite_sysctl_table),
+ GFP_KERNEL);
+ if (!pn->ctl_table)
+ return -ENOMEM;
+
+ pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
+ pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
+#endif
+ return 0;
+}
+
+static int udplite_init_net(struct net *net, u_int16_t proto)
+{
+ struct udplite_net *un = udplite_pernet(net);
+ struct nf_proto_net *pn = &un->pn;
+
+ if (!pn->users) {
+ int i;
+
+ for (i = 0 ; i < UDPLITE_CT_MAX; i++)
+ un->timeouts[i] = udplite_timeouts[i];
+ }
+
+ return udplite_kmemdup_sysctl_table(pn, un);
+}
+
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
{
.l3proto = PF_INET,
@@ -253,11 +296,8 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udplite_sysctl_table_users,
- .ctl_table_header = &udplite_sysctl_header,
- .ctl_table = udplite_sysctl_table,
-#endif
+ .net_id = &udplite_net_id,
+ .init_net = udplite_init_net,
};
static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
@@ -288,34 +328,55 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.nla_policy = udplite_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
-#ifdef CONFIG_SYSCTL
- .ctl_table_users = &udplite_sysctl_table_users,
- .ctl_table_header = &udplite_sysctl_header,
- .ctl_table = udplite_sysctl_table,
-#endif
+ .net_id = &udplite_net_id,
+ .init_net = udplite_init_net,
};
-static int __init nf_conntrack_proto_udplite_init(void)
+static int udplite_net_init(struct net *net)
{
- int err;
-
- err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite4);
- if (err < 0)
- goto err1;
- err = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udplite6);
- if (err < 0)
- goto err2;
+ int ret = 0;
+
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udplite4);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
+ goto out;
+ }
+ ret = nf_conntrack_l4proto_register(net,
+ &nf_conntrack_l4proto_udplite6);
+ if (ret < 0) {
+ pr_err("nf_conntrack_l4proto_udplite4 :protocol register failed.\n");
+ goto cleanup_udplite4;
+ }
return 0;
-err2:
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
-err1:
- return err;
+
+cleanup_udplite4:
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
+out:
+ return ret;
+}
+
+static void udplite_net_exit(struct net *net)
+{
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite6);
+ nf_conntrack_l4proto_unregister(net, &nf_conntrack_l4proto_udplite4);
+}
+
+static struct pernet_operations udplite_net_ops = {
+ .init = udplite_net_init,
+ .exit = udplite_net_exit,
+ .id = &udplite_net_id,
+ .size = sizeof(struct udplite_net),
+};
+
+static int __init nf_conntrack_proto_udplite_init(void)
+{
+ return register_pernet_subsys(&udplite_net_ops);
}
static void __exit nf_conntrack_proto_udplite_exit(void)
{
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite6);
- nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udplite4);
+ unregister_pernet_subsys(&udplite_net_ops);
}
module_init(nf_conntrack_proto_udplite_init);
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index 8501823b3f9b..295429f39088 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -69,13 +69,12 @@ static int help(struct sk_buff *skb,
void *sb_ptr;
int ret = NF_ACCEPT;
int dir = CTINFO2DIR(ctinfo);
- struct nf_ct_sane_master *ct_sane_info;
+ struct nf_ct_sane_master *ct_sane_info = nfct_help_data(ct);
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
struct sane_request *req;
struct sane_reply_net_start *reply;
- ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
/* Until there's been traffic both ways, don't look in packets. */
if (ctinfo != IP_CT_ESTABLISHED &&
ctinfo != IP_CT_ESTABLISHED_REPLY)
@@ -163,7 +162,6 @@ out:
}
static struct nf_conntrack_helper sane[MAX_PORTS][2] __read_mostly;
-static char sane_names[MAX_PORTS][2][sizeof("sane-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy sane_exp_policy = {
.max_expected = 1,
@@ -190,7 +188,6 @@ static void nf_conntrack_sane_fini(void)
static int __init nf_conntrack_sane_init(void)
{
int i, j = -1, ret = 0;
- char *tmpname;
sane_buffer = kmalloc(65536, GFP_KERNEL);
if (!sane_buffer)
@@ -205,17 +202,16 @@ static int __init nf_conntrack_sane_init(void)
sane[i][0].tuple.src.l3num = PF_INET;
sane[i][1].tuple.src.l3num = PF_INET6;
for (j = 0; j < 2; j++) {
+ sane[i][j].data_len = sizeof(struct nf_ct_sane_master);
sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
sane[i][j].expect_policy = &sane_exp_policy;
sane[i][j].me = THIS_MODULE;
sane[i][j].help = help;
- tmpname = &sane_names[i][j][0];
if (ports[i] == SANE_PORT)
- sprintf(tmpname, "sane");
+ sprintf(sane[i][j].name, "sane");
else
- sprintf(tmpname, "sane-%d", ports[i]);
- sane[i][j].name = tmpname;
+ sprintf(sane[i][j].name, "sane-%d", ports[i]);
pr_debug("nf_ct_sane: registering helper for pf: %d "
"port: %d\n",
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 93faf6a3a637..758a1bacc126 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1075,12 +1075,12 @@ static int process_invite_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1091,12 +1091,12 @@ static int process_update_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1107,12 +1107,12 @@ static int process_prack_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
if ((code >= 100 && code <= 199) ||
(code >= 200 && code <= 299))
return process_sdp(skb, dataoff, dptr, datalen, cseq);
- else if (help->help.ct_sip_info.invite_cseq == cseq)
+ else if (ct_sip_info->invite_cseq == cseq)
flush_expectations(ct, true);
return NF_ACCEPT;
}
@@ -1123,13 +1123,13 @@ static int process_invite_request(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
unsigned int ret;
flush_expectations(ct, true);
ret = process_sdp(skb, dataoff, dptr, datalen, cseq);
if (ret == NF_ACCEPT)
- help->help.ct_sip_info.invite_cseq = cseq;
+ ct_sip_info->invite_cseq = cseq;
return ret;
}
@@ -1154,7 +1154,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
unsigned int matchoff, matchlen;
struct nf_conntrack_expect *exp;
@@ -1235,7 +1235,7 @@ static int process_register_request(struct sk_buff *skb, unsigned int dataoff,
store_cseq:
if (ret == NF_ACCEPT)
- help->help.ct_sip_info.register_cseq = cseq;
+ ct_sip_info->register_cseq = cseq;
return ret;
}
@@ -1245,7 +1245,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
{
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- struct nf_conn_help *help = nfct_help(ct);
+ struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
union nf_inet_addr addr;
__be16 port;
@@ -1262,7 +1262,7 @@ static int process_register_response(struct sk_buff *skb, unsigned int dataoff,
* responses, so we store the sequence number of the last valid
* request and compare it here.
*/
- if (help->help.ct_sip_info.register_cseq != cseq)
+ if (ct_sip_info->register_cseq != cseq)
return NF_ACCEPT;
if (code >= 100 && code <= 199)
@@ -1556,7 +1556,6 @@ static void nf_conntrack_sip_fini(void)
static int __init nf_conntrack_sip_init(void)
{
int i, j, ret;
- char *tmpname;
if (ports_c == 0)
ports[ports_c++] = SIP_PORT;
@@ -1579,17 +1578,16 @@ static int __init nf_conntrack_sip_init(void)
sip[i][3].help = sip_help_tcp;
for (j = 0; j < ARRAY_SIZE(sip[i]); j++) {
+ sip[i][j].data_len = sizeof(struct nf_ct_sip_master);
sip[i][j].tuple.src.u.udp.port = htons(ports[i]);
sip[i][j].expect_policy = sip_exp_policy;
sip[i][j].expect_class_max = SIP_EXPECT_MAX;
sip[i][j].me = THIS_MODULE;
- tmpname = &sip_names[i][j][0];
if (ports[i] == SIP_PORT)
- sprintf(tmpname, "sip");
+ sprintf(sip_names[i][j], "sip");
else
- sprintf(tmpname, "sip-%u", i);
- sip[i][j].name = tmpname;
+ sprintf(sip_names[i][j], "sip-%u", i);
pr_debug("port #%u: %u\n", i, ports[i]);
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c
index 75466fd72f4f..81fc61c05263 100644
--- a/net/netfilter/nf_conntrack_tftp.c
+++ b/net/netfilter/nf_conntrack_tftp.c
@@ -92,7 +92,6 @@ static int tftp_help(struct sk_buff *skb,
}
static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
-static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy tftp_exp_policy = {
.max_expected = 1,
@@ -112,7 +111,6 @@ static void nf_conntrack_tftp_fini(void)
static int __init nf_conntrack_tftp_init(void)
{
int i, j, ret;
- char *tmpname;
if (ports_c == 0)
ports[ports_c++] = TFTP_PORT;
@@ -129,12 +127,10 @@ static int __init nf_conntrack_tftp_init(void)
tftp[i][j].me = THIS_MODULE;
tftp[i][j].help = tftp_help;
- tmpname = &tftp_names[i][j][0];
if (ports[i] == TFTP_PORT)
- sprintf(tmpname, "tftp");
+ sprintf(tftp[i][j].name, "tftp");
else
- sprintf(tmpname, "tftp-%u", i);
- tftp[i][j].name = tmpname;
+ sprintf(tftp[i][j].name, "tftp-%u", i);
ret = nf_conntrack_helper_register(&tftp[i][j]);
if (ret) {
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 3e797d1fcb94..a26503342e71 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -39,6 +39,15 @@ static char __initdata nfversion[] = "0.30";
static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT];
static DEFINE_MUTEX(nfnl_mutex);
+static const int nfnl_group2type[NFNLGRP_MAX+1] = {
+ [NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
+ [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
+ [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
+ [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
+};
+
void nfnl_lock(void)
{
mutex_lock(&nfnl_mutex);
@@ -169,8 +178,10 @@ replay:
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
return err;
+ }
if (nc->call_rcu) {
err = nc->call_rcu(net->nfnl, skb, nlh,
@@ -184,9 +195,11 @@ replay:
lockdep_is_held(&nfnl_mutex)) != ss ||
nfnetlink_find_client(type, ss) != nc)
err = -EAGAIN;
- else
+ else if (nc->call)
err = nc->call(net->nfnl, skb, nlh,
(const struct nlattr **)cda);
+ else
+ err = -EINVAL;
nfnl_unlock();
}
if (err == -EAGAIN)
@@ -200,12 +213,35 @@ static void nfnetlink_rcv(struct sk_buff *skb)
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}
+#ifdef CONFIG_MODULES
+static void nfnetlink_bind(int group)
+{
+ const struct nfnetlink_subsystem *ss;
+ int type = nfnl_group2type[group];
+
+ rcu_read_lock();
+ ss = nfnetlink_get_subsys(type);
+ if (!ss) {
+ rcu_read_unlock();
+ request_module("nfnetlink-subsys-%d", type);
+ return;
+ }
+ rcu_read_unlock();
+}
+#endif
+
static int __net_init nfnetlink_net_init(struct net *net)
{
struct sock *nfnl;
+ struct netlink_kernel_cfg cfg = {
+ .groups = NFNLGRP_MAX,
+ .input = nfnetlink_rcv,
+#ifdef CONFIG_MODULES
+ .bind = nfnetlink_bind,
+#endif
+ };
- nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, NFNLGRP_MAX,
- nfnetlink_rcv, NULL, THIS_MODULE);
+ nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, THIS_MODULE, &cfg);
if (!nfnl)
return -ENOMEM;
net->nfnl_stash = nfnl;
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
new file mode 100644
index 000000000000..d6836193d479
--- /dev/null
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -0,0 +1,672 @@
+/*
+ * (C) 2012 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ *
+ * This software has been sponsored by Vyatta Inc. <http://www.vyatta.com>
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netlink.h>
+#include <linux/rculist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_conntrack.h>
+#include <linux/netfilter/nfnetlink_cthelper.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nfnl_cthelper: User-space connection tracking helpers");
+
+static int
+nfnl_userspace_cthelper(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ const struct nf_conn_help *help;
+ struct nf_conntrack_helper *helper;
+
+ help = nfct_help(ct);
+ if (help == NULL)
+ return NF_DROP;
+
+ /* rcu_read_lock()ed by nf_hook_slow */
+ helper = rcu_dereference(help->helper);
+ if (helper == NULL)
+ return NF_DROP;
+
+ /* This is an user-space helper not yet configured, skip. */
+ if ((helper->flags &
+ (NF_CT_HELPER_F_USERSPACE | NF_CT_HELPER_F_CONFIGURED)) ==
+ NF_CT_HELPER_F_USERSPACE)
+ return NF_ACCEPT;
+
+ /* If the user-space helper is not available, don't block traffic. */
+ return NF_QUEUE_NR(helper->queue_num) | NF_VERDICT_FLAG_QUEUE_BYPASS;
+}
+
+static const struct nla_policy nfnl_cthelper_tuple_pol[NFCTH_TUPLE_MAX+1] = {
+ [NFCTH_TUPLE_L3PROTONUM] = { .type = NLA_U16, },
+ [NFCTH_TUPLE_L4PROTONUM] = { .type = NLA_U8, },
+};
+
+static int
+nfnl_cthelper_parse_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_TUPLE_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_TUPLE_MAX, attr, nfnl_cthelper_tuple_pol);
+
+ if (!tb[NFCTH_TUPLE_L3PROTONUM] || !tb[NFCTH_TUPLE_L4PROTONUM])
+ return -EINVAL;
+
+ tuple->src.l3num = ntohs(nla_get_u16(tb[NFCTH_TUPLE_L3PROTONUM]));
+ tuple->dst.protonum = nla_get_u8(tb[NFCTH_TUPLE_L4PROTONUM]);
+
+ return 0;
+}
+
+static int
+nfnl_cthelper_from_nlattr(struct nlattr *attr, struct nf_conn *ct)
+{
+ const struct nf_conn_help *help = nfct_help(ct);
+
+ if (help->helper->data_len == 0)
+ return -EINVAL;
+
+ memcpy(&help->data, nla_data(attr), help->helper->data_len);
+ return 0;
+}
+
+static int
+nfnl_cthelper_to_nlattr(struct sk_buff *skb, const struct nf_conn *ct)
+{
+ const struct nf_conn_help *help = nfct_help(ct);
+
+ if (help->helper->data_len &&
+ nla_put(skb, CTA_HELP_INFO, help->helper->data_len, &help->data))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy nfnl_cthelper_expect_pol[NFCTH_POLICY_MAX+1] = {
+ [NFCTH_POLICY_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN-1 },
+ [NFCTH_POLICY_EXPECT_MAX] = { .type = NLA_U32, },
+ [NFCTH_POLICY_EXPECT_TIMEOUT] = { .type = NLA_U32, },
+};
+
+static int
+nfnl_cthelper_expect_policy(struct nf_conntrack_expect_policy *expect_policy,
+ const struct nlattr *attr)
+{
+ struct nlattr *tb[NFCTH_POLICY_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_POLICY_MAX, attr, nfnl_cthelper_expect_pol);
+
+ if (!tb[NFCTH_POLICY_NAME] ||
+ !tb[NFCTH_POLICY_EXPECT_MAX] ||
+ !tb[NFCTH_POLICY_EXPECT_TIMEOUT])
+ return -EINVAL;
+
+ strncpy(expect_policy->name,
+ nla_data(tb[NFCTH_POLICY_NAME]), NF_CT_HELPER_NAME_LEN);
+ expect_policy->max_expected =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_MAX]));
+ expect_policy->timeout =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_EXPECT_TIMEOUT]));
+
+ return 0;
+}
+
+static const struct nla_policy
+nfnl_cthelper_expect_policy_set[NFCTH_POLICY_SET_MAX+1] = {
+ [NFCTH_POLICY_SET_NUM] = { .type = NLA_U32, },
+};
+
+static int
+nfnl_cthelper_parse_expect_policy(struct nf_conntrack_helper *helper,
+ const struct nlattr *attr)
+{
+ int i, ret;
+ struct nf_conntrack_expect_policy *expect_policy;
+ struct nlattr *tb[NFCTH_POLICY_SET_MAX+1];
+
+ nla_parse_nested(tb, NFCTH_POLICY_SET_MAX, attr,
+ nfnl_cthelper_expect_policy_set);
+
+ if (!tb[NFCTH_POLICY_SET_NUM])
+ return -EINVAL;
+
+ helper->expect_class_max =
+ ntohl(nla_get_be32(tb[NFCTH_POLICY_SET_NUM]));
+
+ if (helper->expect_class_max != 0 &&
+ helper->expect_class_max > NF_CT_MAX_EXPECT_CLASSES)
+ return -EOVERFLOW;
+
+ expect_policy = kzalloc(sizeof(struct nf_conntrack_expect_policy) *
+ helper->expect_class_max, GFP_KERNEL);
+ if (expect_policy == NULL)
+ return -ENOMEM;
+
+ for (i=0; i<helper->expect_class_max; i++) {
+ if (!tb[NFCTH_POLICY_SET+i])
+ goto err;
+
+ ret = nfnl_cthelper_expect_policy(&expect_policy[i],
+ tb[NFCTH_POLICY_SET+i]);
+ if (ret < 0)
+ goto err;
+ }
+ helper->expect_policy = expect_policy;
+ return 0;
+err:
+ kfree(expect_policy);
+ return -EINVAL;
+}
+
+static int
+nfnl_cthelper_create(const struct nlattr * const tb[],
+ struct nf_conntrack_tuple *tuple)
+{
+ struct nf_conntrack_helper *helper;
+ int ret;
+
+ if (!tb[NFCTH_TUPLE] || !tb[NFCTH_POLICY] || !tb[NFCTH_PRIV_DATA_LEN])
+ return -EINVAL;
+
+ helper = kzalloc(sizeof(struct nf_conntrack_helper), GFP_KERNEL);
+ if (helper == NULL)
+ return -ENOMEM;
+
+ ret = nfnl_cthelper_parse_expect_policy(helper, tb[NFCTH_POLICY]);
+ if (ret < 0)
+ goto err;
+
+ strncpy(helper->name, nla_data(tb[NFCTH_NAME]), NF_CT_HELPER_NAME_LEN);
+ helper->data_len = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
+ helper->flags |= NF_CT_HELPER_F_USERSPACE;
+ memcpy(&helper->tuple, tuple, sizeof(struct nf_conntrack_tuple));
+
+ helper->me = THIS_MODULE;
+ helper->help = nfnl_userspace_cthelper;
+ helper->from_nlattr = nfnl_cthelper_from_nlattr;
+ helper->to_nlattr = nfnl_cthelper_to_nlattr;
+
+ /* Default to queue number zero, this can be updated at any time. */
+ if (tb[NFCTH_QUEUE_NUM])
+ helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
+
+ if (tb[NFCTH_STATUS]) {
+ int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
+
+ switch(status) {
+ case NFCT_HELPER_STATUS_ENABLED:
+ helper->flags |= NF_CT_HELPER_F_CONFIGURED;
+ break;
+ case NFCT_HELPER_STATUS_DISABLED:
+ helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
+ break;
+ }
+ }
+
+ ret = nf_conntrack_helper_register(helper);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+err:
+ kfree(helper);
+ return ret;
+}
+
+static int
+nfnl_cthelper_update(const struct nlattr * const tb[],
+ struct nf_conntrack_helper *helper)
+{
+ int ret;
+
+ if (tb[NFCTH_PRIV_DATA_LEN])
+ return -EBUSY;
+
+ if (tb[NFCTH_POLICY]) {
+ ret = nfnl_cthelper_parse_expect_policy(helper,
+ tb[NFCTH_POLICY]);
+ if (ret < 0)
+ return ret;
+ }
+ if (tb[NFCTH_QUEUE_NUM])
+ helper->queue_num = ntohl(nla_get_be32(tb[NFCTH_QUEUE_NUM]));
+
+ if (tb[NFCTH_STATUS]) {
+ int status = ntohl(nla_get_be32(tb[NFCTH_STATUS]));
+
+ switch(status) {
+ case NFCT_HELPER_STATUS_ENABLED:
+ helper->flags |= NF_CT_HELPER_F_CONFIGURED;
+ break;
+ case NFCT_HELPER_STATUS_DISABLED:
+ helper->flags &= ~NF_CT_HELPER_F_CONFIGURED;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int
+nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ const char *helper_name;
+ struct nf_conntrack_helper *cur, *helper = NULL;
+ struct nf_conntrack_tuple tuple;
+ struct hlist_node *n;
+ int ret = 0, i;
+
+ if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
+ return -EINVAL;
+
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ rcu_read_lock();
+ for (i = 0; i < nf_ct_helper_hsize && !helper; i++) {
+ hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0)
+ continue;
+
+ if ((tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL) {
+ ret = -EEXIST;
+ goto err;
+ }
+ helper = cur;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ if (helper == NULL)
+ ret = nfnl_cthelper_create(tb, &tuple);
+ else
+ ret = nfnl_cthelper_update(tb, helper);
+
+ return ret;
+err:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int
+nfnl_cthelper_dump_tuple(struct sk_buff *skb,
+ struct nf_conntrack_helper *helper)
+{
+ struct nlattr *nest_parms;
+
+ nest_parms = nla_nest_start(skb, NFCTH_TUPLE | NLA_F_NESTED);
+ if (nest_parms == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_be16(skb, NFCTH_TUPLE_L3PROTONUM,
+ htons(helper->tuple.src.l3num)))
+ goto nla_put_failure;
+
+ if (nla_put_u8(skb, NFCTH_TUPLE_L4PROTONUM, helper->tuple.dst.protonum))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int
+nfnl_cthelper_dump_policy(struct sk_buff *skb,
+ struct nf_conntrack_helper *helper)
+{
+ int i;
+ struct nlattr *nest_parms1, *nest_parms2;
+
+ nest_parms1 = nla_nest_start(skb, NFCTH_POLICY | NLA_F_NESTED);
+ if (nest_parms1 == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_SET_NUM,
+ htonl(helper->expect_class_max)))
+ goto nla_put_failure;
+
+ for (i=0; i<helper->expect_class_max; i++) {
+ nest_parms2 = nla_nest_start(skb,
+ (NFCTH_POLICY_SET+i) | NLA_F_NESTED);
+ if (nest_parms2 == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_string(skb, NFCTH_POLICY_NAME,
+ helper->expect_policy[i].name))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_MAX,
+ htonl(helper->expect_policy[i].max_expected)))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_POLICY_EXPECT_TIMEOUT,
+ htonl(helper->expect_policy[i].timeout)))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms2);
+ }
+ nla_nest_end(skb, nest_parms1);
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+static int
+nfnl_cthelper_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ int event, struct nf_conntrack_helper *helper)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0;
+ int status;
+
+ event |= NFNL_SUBSYS_CTHELPER << 8;
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ if (nla_put_string(skb, NFCTH_NAME, helper->name))
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_QUEUE_NUM, htonl(helper->queue_num)))
+ goto nla_put_failure;
+
+ if (nfnl_cthelper_dump_tuple(skb, helper) < 0)
+ goto nla_put_failure;
+
+ if (nfnl_cthelper_dump_policy(skb, helper) < 0)
+ goto nla_put_failure;
+
+ if (nla_put_be32(skb, NFCTH_PRIV_DATA_LEN, htonl(helper->data_len)))
+ goto nla_put_failure;
+
+ if (helper->flags & NF_CT_HELPER_F_CONFIGURED)
+ status = NFCT_HELPER_STATUS_ENABLED;
+ else
+ status = NFCT_HELPER_STATUS_DISABLED;
+
+ if (nla_put_be32(skb, NFCTH_STATUS, htonl(status)))
+ goto nla_put_failure;
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+nfnl_cthelper_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nf_conntrack_helper *cur, *last;
+ struct hlist_node *n;
+
+ rcu_read_lock();
+ last = (struct nf_conntrack_helper *)cb->args[1];
+ for (; cb->args[0] < nf_ct_helper_hsize; cb->args[0]++) {
+restart:
+ hlist_for_each_entry_rcu(cur, n,
+ &nf_ct_helper_hash[cb->args[0]], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (cb->args[1]) {
+ if (cur != last)
+ continue;
+ cb->args[1] = 0;
+ }
+ if (nfnl_cthelper_fill_info(skb,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur) < 0) {
+ cb->args[1] = (unsigned long)cur;
+ goto out;
+ }
+ }
+ }
+ if (cb->args[1]) {
+ cb->args[1] = 0;
+ goto restart;
+ }
+out:
+ rcu_read_unlock();
+ return skb->len;
+}
+
+static int
+nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ int ret = -ENOENT, i;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n;
+ struct sk_buff *skb2;
+ char *helper_name = NULL;
+ struct nf_conntrack_tuple tuple;
+ bool tuple_set = false;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = nfnl_cthelper_dump_table,
+ };
+ return netlink_dump_start(nfnl, skb, nlh, &c);
+ }
+
+ if (tb[NFCTH_NAME])
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ if (tb[NFCTH_TUPLE]) {
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ tuple_set = true;
+ }
+
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_rcu(cur, n, &nf_ct_helper_hash[i], hnode) {
+
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ if (helper_name && strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0) {
+ continue;
+ }
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = nfnl_cthelper_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ NFNL_MSG_CTHELPER_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
+
+ ret = netlink_unicast(nfnl, skb2, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ }
+ return ret;
+}
+
+static int
+nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh, const struct nlattr * const tb[])
+{
+ char *helper_name = NULL;
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n, *tmp;
+ struct nf_conntrack_tuple tuple;
+ bool tuple_set = false, found = false;
+ int i, j = 0, ret;
+
+ if (tb[NFCTH_NAME])
+ helper_name = nla_data(tb[NFCTH_NAME]);
+
+ if (tb[NFCTH_TUPLE]) {
+ ret = nfnl_cthelper_parse_tuple(&tuple, tb[NFCTH_TUPLE]);
+ if (ret < 0)
+ return ret;
+
+ tuple_set = true;
+ }
+
+ for (i = 0; i < nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hnode) {
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ j++;
+
+ if (helper_name && strncmp(cur->name, helper_name,
+ NF_CT_HELPER_NAME_LEN) != 0) {
+ continue;
+ }
+ if (tuple_set &&
+ (tuple.src.l3num != cur->tuple.src.l3num ||
+ tuple.dst.protonum != cur->tuple.dst.protonum))
+ continue;
+
+ found = true;
+ nf_conntrack_helper_unregister(cur);
+ }
+ }
+ /* Make sure we return success if we flush and there is no helpers */
+ return (found || j == 0) ? 0 : -ENOENT;
+}
+
+static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
+ [NFCTH_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN-1 },
+ [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+};
+
+static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
+ [NFNL_MSG_CTHELPER_NEW] = { .call = nfnl_cthelper_new,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+ [NFNL_MSG_CTHELPER_GET] = { .call = nfnl_cthelper_get,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+ [NFNL_MSG_CTHELPER_DEL] = { .call = nfnl_cthelper_del,
+ .attr_count = NFCTH_MAX,
+ .policy = nfnl_cthelper_policy },
+};
+
+static const struct nfnetlink_subsystem nfnl_cthelper_subsys = {
+ .name = "cthelper",
+ .subsys_id = NFNL_SUBSYS_CTHELPER,
+ .cb_count = NFNL_MSG_CTHELPER_MAX,
+ .cb = nfnl_cthelper_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTHELPER);
+
+static int __init nfnl_cthelper_init(void)
+{
+ int ret;
+
+ ret = nfnetlink_subsys_register(&nfnl_cthelper_subsys);
+ if (ret < 0) {
+ pr_err("nfnl_cthelper: cannot register with nfnetlink.\n");
+ goto err_out;
+ }
+ return 0;
+err_out:
+ return ret;
+}
+
+static void __exit nfnl_cthelper_exit(void)
+{
+ struct nf_conntrack_helper *cur;
+ struct hlist_node *n, *tmp;
+ int i;
+
+ nfnetlink_subsys_unregister(&nfnl_cthelper_subsys);
+
+ for (i=0; i<nf_ct_helper_hsize; i++) {
+ hlist_for_each_entry_safe(cur, n, tmp, &nf_ct_helper_hash[i],
+ hnode) {
+ /* skip non-userspace conntrack helpers. */
+ if (!(cur->flags & NF_CT_HELPER_F_USERSPACE))
+ continue;
+
+ nf_conntrack_helper_unregister(cur);
+ }
+ }
+}
+
+module_init(nfnl_cthelper_init);
+module_exit(nfnl_cthelper_exit);
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 3e655288d1d6..cdecbc8fe965 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -49,8 +49,9 @@ static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
static int
ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
- struct nf_conntrack_l4proto *l4proto,
- const struct nlattr *attr)
+ struct nf_conntrack_l4proto *l4proto,
+ struct net *net,
+ const struct nlattr *attr)
{
int ret = 0;
@@ -60,7 +61,8 @@ ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
attr, l4proto->ctnl_timeout.nla_policy);
- ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, net,
+ &timeout->data);
}
return ret;
}
@@ -74,6 +76,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
__u8 l4num;
struct nf_conntrack_l4proto *l4proto;
struct ctnl_timeout *timeout, *matching = NULL;
+ struct net *net = sock_net(skb->sk);
char *name;
int ret;
@@ -117,7 +120,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(matching, l4proto,
+ ret = ctnl_timeout_parse_policy(matching, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
return ret;
}
@@ -132,7 +135,7 @@ cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
goto err_proto_put;
}
- ret = ctnl_timeout_parse_policy(timeout, l4proto,
+ ret = ctnl_timeout_parse_policy(timeout, l4proto, net,
cda[CTA_TIMEOUT_DATA]);
if (ret < 0)
goto err;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c3cfc0cc9b5..169ab59ed9d4 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -326,18 +326,20 @@ __nfulnl_send(struct nfulnl_instance *inst)
{
int status = -1;
- if (inst->qlen > 1)
- NLMSG_PUT(inst->skb, 0, 0,
- NLMSG_DONE,
- sizeof(struct nfgenmsg));
-
+ if (inst->qlen > 1) {
+ struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
+ NLMSG_DONE,
+ sizeof(struct nfgenmsg),
+ 0);
+ if (!nlh)
+ goto out;
+ }
status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_pid,
MSG_DONTWAIT);
inst->qlen = 0;
inst->skb = NULL;
-
-nlmsg_failure:
+out:
return status;
}
@@ -380,10 +382,12 @@ __build_packet_message(struct nfulnl_instance *inst,
struct nfgenmsg *nfmsg;
sk_buff_data_t old_tail = inst->skb->tail;
- nlh = NLMSG_PUT(inst->skb, 0, 0,
+ nlh = nlmsg_put(inst->skb, 0, 0,
NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
- sizeof(struct nfgenmsg));
- nfmsg = NLMSG_DATA(nlh);
+ sizeof(struct nfgenmsg), 0);
+ if (!nlh)
+ return -1;
+ nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
@@ -526,7 +530,7 @@ __build_packet_message(struct nfulnl_instance *inst,
if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
- goto nlmsg_failure;
+ return -1;
}
nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
@@ -540,7 +544,6 @@ __build_packet_message(struct nfulnl_instance *inst,
nlh->nlmsg_len = inst->skb->tail - old_tail;
return 0;
-nlmsg_failure:
nla_put_failure:
PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
return -1;
@@ -745,7 +748,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfula[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t group_num = ntohs(nfmsg->res_id);
struct nfulnl_instance *inst;
struct nfulnl_msg_config_cmd *cmd = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue_core.c
index 4162437b8361..c0496a55ad0c 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
#include <linux/list.h>
#include <net/sock.h>
#include <net/netfilter/nf_queue.h>
+#include <net/netfilter/nfnetlink_queue.h>
#include <linux/atomic.h>
@@ -52,6 +53,7 @@ struct nfqnl_instance {
u_int16_t queue_num; /* number of this queue */
u_int8_t copy_mode;
+ u_int32_t flags; /* Set using NFQA_CFG_FLAGS */
/*
* Following fields are dirtied for each queued packet,
* keep them in same cache line if possible.
@@ -232,6 +234,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
struct sk_buff *entskb = entry->skb;
struct net_device *indev;
struct net_device *outdev;
+ struct nf_conn *ct = NULL;
+ enum ip_conntrack_info uninitialized_var(ctinfo);
size = NLMSG_SPACE(sizeof(struct nfgenmsg))
+ nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
@@ -265,16 +269,22 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
break;
}
+ if (queue->flags & NFQA_CFG_F_CONNTRACK)
+ ct = nfqnl_ct_get(entskb, &size, &ctinfo);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
- goto nlmsg_failure;
+ return NULL;
old_tail = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0,
+ nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
- sizeof(struct nfgenmsg));
- nfmsg = NLMSG_DATA(nlh);
+ sizeof(struct nfgenmsg), 0);
+ if (!nlh) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ nfmsg = nlmsg_data(nlh);
nfmsg->nfgen_family = entry->pf;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(queue->queue_num);
@@ -377,7 +387,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (skb_tailroom(skb) < nla_total_size(data_len)) {
printk(KERN_WARNING "nf_queue: no tailroom!\n");
- goto nlmsg_failure;
+ kfree_skb(skb);
+ return NULL;
}
nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len));
@@ -388,10 +399,12 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
BUG();
}
+ if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
+ goto nla_put_failure;
+
nlh->nlmsg_len = skb->tail - old_tail;
return skb;
-nlmsg_failure:
nla_put_failure:
if (skb)
kfree_skb(skb);
@@ -406,6 +419,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
struct nfqnl_instance *queue;
int err = -ENOBUFS;
__be32 *packet_id_ptr;
+ int failopen = 0;
/* rcu_read_lock()ed by nf_hook_slow() */
queue = instance_lookup(queuenum);
@@ -431,9 +445,14 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
goto err_out_free_nskb;
}
if (queue->queue_total >= queue->queue_maxlen) {
- queue->queue_dropped++;
- net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
- queue->queue_total);
+ if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
+ failopen = 1;
+ err = 0;
+ } else {
+ queue->queue_dropped++;
+ net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
+ queue->queue_total);
+ }
goto err_out_free_nskb;
}
entry->id = ++queue->id_sequence;
@@ -455,17 +474,17 @@ err_out_free_nskb:
kfree_skb(nskb);
err_out_unlock:
spin_unlock_bh(&queue->lock);
+ if (failopen)
+ nf_reinject(entry, NF_ACCEPT);
err_out:
return err;
}
static int
-nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
+nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
{
struct sk_buff *nskb;
- int diff;
- diff = data_len - e->skb->len;
if (diff < 0) {
if (pskb_trim(e->skb, data_len))
return -ENOMEM;
@@ -623,6 +642,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
+ [NFQA_CT] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -670,7 +690,7 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
struct nf_queue_entry *entry, *tmp;
unsigned int verdict, maxid;
struct nfqnl_msg_verdict_hdr *vhdr;
@@ -716,13 +736,15 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
struct nfqnl_msg_verdict_hdr *vhdr;
struct nfqnl_instance *queue;
unsigned int verdict;
struct nf_queue_entry *entry;
+ enum ip_conntrack_info uninitialized_var(ctinfo);
+ struct nf_conn *ct = NULL;
queue = instance_lookup(queue_num);
if (!queue)
@@ -741,11 +763,22 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
+ rcu_read_lock();
+ if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+
if (nfqa[NFQA_PAYLOAD]) {
+ u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
+ int diff = payload_len - entry->skb->len;
+
if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
- nla_len(nfqa[NFQA_PAYLOAD]), entry) < 0)
+ payload_len, entry, diff) < 0)
verdict = NF_DROP;
+
+ if (ct)
+ nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
+ rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
@@ -777,7 +810,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nfqa[])
{
- struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
+ struct nfgenmsg *nfmsg = nlmsg_data(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
struct nfqnl_instance *queue;
struct nfqnl_msg_config_cmd *cmd = NULL;
@@ -858,6 +891,36 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
spin_unlock_bh(&queue->lock);
}
+ if (nfqa[NFQA_CFG_FLAGS]) {
+ __u32 flags, mask;
+
+ if (!queue) {
+ ret = -ENODEV;
+ goto err_out_unlock;
+ }
+
+ if (!nfqa[NFQA_CFG_MASK]) {
+ /* A mask is needed to specify which flags are being
+ * changed.
+ */
+ ret = -EINVAL;
+ goto err_out_unlock;
+ }
+
+ flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
+ mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
+
+ if (flags >= NFQA_CFG_F_MAX) {
+ ret = -EOPNOTSUPP;
+ goto err_out_unlock;
+ }
+
+ spin_lock_bh(&queue->lock);
+ queue->flags &= ~mask;
+ queue->flags |= flags & mask;
+ spin_unlock_bh(&queue->lock);
+ }
+
err_out_unlock:
rcu_read_unlock();
return ret;
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
new file mode 100644
index 000000000000..ab61d66bc0b9
--- /dev/null
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -0,0 +1,98 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_queue.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nfnetlink_queue.h>
+
+struct nf_conn *nfqnl_ct_get(struct sk_buff *entskb, size_t *size,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nf_conn *ct;
+
+ /* rcu_read_lock()ed by __nf_queue already. */
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return NULL;
+
+ ct = nf_ct_get(entskb, ctinfo);
+ if (ct) {
+ if (!nf_ct_is_untracked(ct))
+ *size += nfq_ct->build_size(ct);
+ else
+ ct = NULL;
+ }
+ return ct;
+}
+
+struct nf_conn *
+nfqnl_ct_parse(const struct sk_buff *skb, const struct nlattr *attr,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nf_conn *ct;
+
+ /* rcu_read_lock()ed by __nf_queue already. */
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return NULL;
+
+ ct = nf_ct_get(skb, ctinfo);
+ if (ct && !nf_ct_is_untracked(ct))
+ nfq_ct->parse(attr, ct);
+
+ return ct;
+}
+
+int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ struct nfq_ct_hook *nfq_ct;
+ struct nlattr *nest_parms;
+ u_int32_t tmp;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return 0;
+
+ nest_parms = nla_nest_start(skb, NFQA_CT | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ if (nfq_ct->build(skb, ct) < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+
+ tmp = ctinfo;
+ if (nla_put_be32(skb, NFQA_CT_INFO, htonl(tmp)))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -1;
+}
+
+void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo, int diff)
+{
+ struct nfq_ct_nat_hook *nfq_nat_ct;
+
+ nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
+ if (nfq_nat_ct == NULL)
+ return;
+
+ if ((ct->status & IPS_NAT_MASK) && diff)
+ nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
+}
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index a51de9b052be..116018560c60 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -112,6 +112,8 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
goto err3;
if (info->helper[0]) {
+ struct nf_conntrack_helper *helper;
+
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
@@ -120,19 +122,21 @@ static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
goto err3;
}
- ret = -ENOMEM;
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (help == NULL)
- goto err3;
-
ret = -ENOENT;
- help->helper = nf_conntrack_helper_try_module_get(info->helper,
- par->family,
- proto);
- if (help->helper == NULL) {
+ helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (helper == NULL) {
pr_info("No such helper \"%s\"\n", info->helper);
goto err3;
}
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ help->helper = helper;
}
__set_bit(IPS_TEMPLATE_BIT, &ct->status);
@@ -202,6 +206,8 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
goto err3;
if (info->helper[0]) {
+ struct nf_conntrack_helper *helper;
+
ret = -ENOENT;
proto = xt_ct_find_proto(par);
if (!proto) {
@@ -210,19 +216,21 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
goto err3;
}
- ret = -ENOMEM;
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (help == NULL)
- goto err3;
-
ret = -ENOENT;
- help->helper = nf_conntrack_helper_try_module_get(info->helper,
- par->family,
- proto);
- if (help->helper == NULL) {
+ helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (helper == NULL) {
pr_info("No such helper \"%s\"\n", info->helper);
goto err3;
}
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ help->helper = helper;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 95237c89607a..7babe7d68716 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -41,26 +41,36 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par)
static u32 hash_v4(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
- __be32 ipaddr;
/* packets in either direction go into same queue */
- ipaddr = iph->saddr ^ iph->daddr;
+ if (iph->saddr < iph->daddr)
+ return jhash_3words((__force u32)iph->saddr,
+ (__force u32)iph->daddr, iph->protocol, jhash_initval);
- return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
+ return jhash_3words((__force u32)iph->daddr,
+ (__force u32)iph->saddr, iph->protocol, jhash_initval);
}
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
static u32 hash_v6(const struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- __be32 addr[4];
+ u32 a, b, c;
+
+ if (ip6h->saddr.s6_addr32[3] < ip6h->daddr.s6_addr32[3]) {
+ a = (__force u32) ip6h->saddr.s6_addr32[3];
+ b = (__force u32) ip6h->daddr.s6_addr32[3];
+ } else {
+ b = (__force u32) ip6h->saddr.s6_addr32[3];
+ a = (__force u32) ip6h->daddr.s6_addr32[3];
+ }
- addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
- addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
- addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
- addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
+ if (ip6h->saddr.s6_addr32[1] < ip6h->daddr.s6_addr32[1])
+ c = (__force u32) ip6h->saddr.s6_addr32[1];
+ else
+ c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
+ return jhash_3words(a, b, c, jhash_initval);
}
#endif
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 146033a86de8..d7f195388f66 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -69,7 +69,7 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
}
/**
- * tproxy_handle_time_wait4() - handle IPv4 TCP TIME_WAIT reopen redirections
+ * tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
* @laddr: IPv4 address to redirect to or zero.
* @lport: TCP port to redirect to or zero.
@@ -220,7 +220,7 @@ tproxy_laddr6(struct sk_buff *skb, const struct in6_addr *user_laddr,
}
/**
- * tproxy_handle_time_wait6() - handle IPv6 TCP TIME_WAIT reopen redirections
+ * tproxy_handle_time_wait6 - handle IPv6 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
* @tproto: Transport protocol.
* @thoff: Transport protocol header offset.
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index c6d5a83450c9..70b5591a2586 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -274,38 +274,25 @@ static void connlimit_mt_destroy(const struct xt_mtdtor_param *par)
kfree(info->data);
}
-static struct xt_match connlimit_mt_reg[] __read_mostly = {
- {
- .name = "connlimit",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
- },
- {
- .name = "connlimit",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connlimit_mt_check,
- .match = connlimit_mt,
- .matchsize = sizeof(struct xt_connlimit_info),
- .destroy = connlimit_mt_destroy,
- .me = THIS_MODULE,
- },
+static struct xt_match connlimit_mt_reg __read_mostly = {
+ .name = "connlimit",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connlimit_mt_check,
+ .match = connlimit_mt,
+ .matchsize = sizeof(struct xt_connlimit_info),
+ .destroy = connlimit_mt_destroy,
+ .me = THIS_MODULE,
};
static int __init connlimit_mt_init(void)
{
- return xt_register_matches(connlimit_mt_reg,
- ARRAY_SIZE(connlimit_mt_reg));
+ return xt_register_match(&connlimit_mt_reg);
}
static void __exit connlimit_mt_exit(void)
{
- xt_unregister_matches(connlimit_mt_reg, ARRAY_SIZE(connlimit_mt_reg));
+ xt_unregister_match(&connlimit_mt_reg);
}
module_init(connlimit_mt_init);
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index fc0d6dbe5d17..ae2ad1eec8d0 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -75,6 +75,7 @@ struct recent_entry {
struct recent_table {
struct list_head list;
char name[XT_RECENT_NAME_LEN];
+ union nf_inet_addr mask;
unsigned int refcnt;
unsigned int entries;
struct list_head lru_list;
@@ -228,10 +229,10 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
struct net *net = dev_net(par->in ? par->in : par->out);
struct recent_net *recent_net = recent_pernet(net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
+ const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
struct recent_entry *e;
- union nf_inet_addr addr = {};
+ union nf_inet_addr addr = {}, addr_mask;
u_int8_t ttl;
bool ret = info->invert;
@@ -261,12 +262,15 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
spin_lock_bh(&recent_lock);
t = recent_table_lookup(recent_net, info->name);
- e = recent_entry_lookup(t, &addr, par->family,
+
+ nf_inet_addr_mask(&addr, &addr_mask, &t->mask);
+
+ e = recent_entry_lookup(t, &addr_mask, par->family,
(info->check_set & XT_RECENT_TTL) ? ttl : 0);
if (e == NULL) {
if (!(info->check_set & XT_RECENT_SET))
goto out;
- e = recent_entry_init(t, &addr, par->family, ttl);
+ e = recent_entry_init(t, &addr_mask, par->family, ttl);
if (e == NULL)
par->hotdrop = true;
ret = !ret;
@@ -306,10 +310,10 @@ out:
return ret;
}
-static int recent_mt_check(const struct xt_mtchk_param *par)
+static int recent_mt_check(const struct xt_mtchk_param *par,
+ const struct xt_recent_mtinfo_v1 *info)
{
struct recent_net *recent_net = recent_pernet(par->net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
struct recent_table *t;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *pde;
@@ -361,6 +365,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
goto out;
}
t->refcnt = 1;
+
+ memcpy(&t->mask, &info->mask, sizeof(t->mask));
strcpy(t->name, info->name);
INIT_LIST_HEAD(&t->lru_list);
for (i = 0; i < ip_list_hash_size; i++)
@@ -385,10 +391,28 @@ out:
return ret;
}
+static int recent_mt_check_v0(const struct xt_mtchk_param *par)
+{
+ const struct xt_recent_mtinfo_v0 *info_v0 = par->matchinfo;
+ struct xt_recent_mtinfo_v1 info_v1;
+
+ /* Copy revision 0 structure to revision 1 */
+ memcpy(&info_v1, info_v0, sizeof(struct xt_recent_mtinfo));
+ /* Set default mask to ensure backward compatible behaviour */
+ memset(info_v1.mask.all, 0xFF, sizeof(info_v1.mask.all));
+
+ return recent_mt_check(par, &info_v1);
+}
+
+static int recent_mt_check_v1(const struct xt_mtchk_param *par)
+{
+ return recent_mt_check(par, par->matchinfo);
+}
+
static void recent_mt_destroy(const struct xt_mtdtor_param *par)
{
struct recent_net *recent_net = recent_pernet(par->net);
- const struct xt_recent_mtinfo *info = par->matchinfo;
+ const struct xt_recent_mtinfo_v1 *info = par->matchinfo;
struct recent_table *t;
mutex_lock(&recent_mutex);
@@ -625,7 +649,7 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV4,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
- .checkentry = recent_mt_check,
+ .checkentry = recent_mt_check_v0,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
@@ -635,10 +659,30 @@ static struct xt_match recent_mt_reg[] __read_mostly = {
.family = NFPROTO_IPV6,
.match = recent_mt,
.matchsize = sizeof(struct xt_recent_mtinfo),
- .checkentry = recent_mt_check,
+ .checkentry = recent_mt_check_v0,
+ .destroy = recent_mt_destroy,
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "recent",
+ .revision = 1,
+ .family = NFPROTO_IPV4,
+ .match = recent_mt,
+ .matchsize = sizeof(struct xt_recent_mtinfo_v1),
+ .checkentry = recent_mt_check_v1,
.destroy = recent_mt_destroy,
.me = THIS_MODULE,
},
+ {
+ .name = "recent",
+ .revision = 1,
+ .family = NFPROTO_IPV6,
+ .match = recent_mt,
+ .matchsize = sizeof(struct xt_recent_mtinfo_v1),
+ .checkentry = recent_mt_check_v1,
+ .destroy = recent_mt_destroy,
+ .me = THIS_MODULE,
+ }
};
static int __init recent_mt_init(void)
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index 035960ec5cb9..c6f7db720d84 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -16,6 +16,7 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_set.h>
+#include <linux/netfilter/ipset/ip_set_timeout.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
@@ -310,7 +311,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
info->del_set.flags, 0, UINT_MAX);
/* Normalize to fit into jiffies */
- if (add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
+ if (add_opt.timeout != IPSET_NO_TIMEOUT &&
+ add_opt.timeout > UINT_MAX/MSEC_PER_SEC)
add_opt.timeout = UINT_MAX/MSEC_PER_SEC;
if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index b3025a603d56..5463969da45b 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -80,6 +80,7 @@ struct netlink_sock {
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
+ void (*netlink_bind)(int group);
struct module *module;
};
@@ -124,6 +125,7 @@ struct netlink_table {
unsigned int groups;
struct mutex *cb_mutex;
struct module *module;
+ void (*bind)(int group);
int registered;
};
@@ -444,6 +446,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
+ void (*bind)(int group);
int err = 0;
sock->state = SS_UNCONNECTED;
@@ -468,6 +471,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
else
err = -EPROTONOSUPPORT;
cb_mutex = nl_table[protocol].cb_mutex;
+ bind = nl_table[protocol].bind;
netlink_unlock_table();
if (err < 0)
@@ -483,6 +487,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
nlk = nlk_sk(sock->sk);
nlk->module = module;
+ nlk->netlink_bind = bind;
out:
return err;
@@ -683,6 +688,15 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_update_listeners(sk);
netlink_table_ungrab();
+ if (nlk->netlink_bind && nlk->groups[0]) {
+ int i;
+
+ for (i=0; i<nlk->ngroups; i++) {
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_bind(i);
+ }
+ }
+
return 0;
}
@@ -1239,6 +1253,10 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
netlink_update_socket_mc(nlk, val,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
+
+ if (nlk->netlink_bind)
+ nlk->netlink_bind(val);
+
err = 0;
break;
}
@@ -1503,14 +1521,16 @@ static void netlink_data_ready(struct sock *sk, int len)
*/
struct sock *
-netlink_kernel_create(struct net *net, int unit, unsigned int groups,
- void (*input)(struct sk_buff *skb),
- struct mutex *cb_mutex, struct module *module)
+netlink_kernel_create(struct net *net, int unit,
+ struct module *module,
+ struct netlink_kernel_cfg *cfg)
{
struct socket *sock;
struct sock *sk;
struct netlink_sock *nlk;
struct listeners *listeners = NULL;
+ struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
+ unsigned int groups;
BUG_ON(!nl_table);
@@ -1532,16 +1552,18 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
sk = sock->sk;
sk_change_net(sk, net);
- if (groups < 32)
+ if (!cfg || cfg->groups < 32)
groups = 32;
+ else
+ groups = cfg->groups;
listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
if (!listeners)
goto out_sock_release;
sk->sk_data_ready = netlink_data_ready;
- if (input)
- nlk_sk(sk)->netlink_rcv = input;
+ if (cfg && cfg->input)
+ nlk_sk(sk)->netlink_rcv = cfg->input;
if (netlink_insert(sk, net, 0))
goto out_sock_release;
@@ -1555,6 +1577,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
rcu_assign_pointer(nl_table[unit].listeners, listeners);
nl_table[unit].cb_mutex = cb_mutex;
nl_table[unit].module = module;
+ nl_table[unit].bind = cfg ? cfg->bind : NULL;
nl_table[unit].registered = 1;
} else {
kfree(listeners);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2cc7c1ee7690..fda497412fc3 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,7 +33,7 @@ void genl_unlock(void)
}
EXPORT_SYMBOL(genl_unlock);
-#ifdef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_LOCKDEP
int lockdep_genl_is_held(void)
{
return lockdep_is_held(&genl_mutex);
@@ -504,7 +504,7 @@ EXPORT_SYMBOL(genl_unregister_family);
* @pid: netlink pid the message is addressed to
* @seq: sequence number (usually the one of the sender)
* @family: generic netlink family
- * @flags netlink message flags
+ * @flags: netlink message flags
* @cmd: generic netlink command
*
* Returns pointer to user specific header
@@ -915,10 +915,14 @@ static struct genl_multicast_group notify_grp = {
static int __net_init genl_pernet_init(struct net *net)
{
+ struct netlink_kernel_cfg cfg = {
+ .input = genl_rcv,
+ .cb_mutex = &genl_mutex,
+ };
+
/* we'll bump the group number right afterwards */
- net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0,
- genl_rcv, &genl_mutex,
- THIS_MODULE);
+ net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC,
+ THIS_MODULE, &cfg);
if (!net->genl_sock && net_eq(net, &init_net))
panic("GENL: Cannot initialize generic netlink\n");
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 9f6ce011d35d..ff749794bc5b 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -29,6 +29,8 @@
#include <linux/slab.h>
#include <linux/nfc.h>
+#include <net/genetlink.h>
+
#include "nfc.h"
#define VERSION "0.1"
@@ -121,14 +123,14 @@ error:
* The device remains polling for targets until a target is found or
* the nfc_stop_poll function is called.
*/
-int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
+int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols)
{
int rc;
- pr_debug("dev_name=%s protocols=0x%x\n",
- dev_name(&dev->dev), protocols);
+ pr_debug("dev_name %s initiator protocols 0x%x target protocols 0x%x\n",
+ dev_name(&dev->dev), im_protocols, tm_protocols);
- if (!protocols)
+ if (!im_protocols && !tm_protocols)
return -EINVAL;
device_lock(&dev->dev);
@@ -143,9 +145,11 @@ int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
goto error;
}
- rc = dev->ops->start_poll(dev, protocols);
- if (!rc)
+ rc = dev->ops->start_poll(dev, im_protocols, tm_protocols);
+ if (!rc) {
dev->polling = true;
+ dev->rf_mode = NFC_RF_NONE;
+ }
error:
device_unlock(&dev->dev);
@@ -235,8 +239,10 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
}
rc = dev->ops->dep_link_up(dev, target, comm_mode, gb, gb_len);
- if (!rc)
+ if (!rc) {
dev->active_target = target;
+ dev->rf_mode = NFC_RF_INITIATOR;
+ }
error:
device_unlock(&dev->dev);
@@ -264,11 +270,6 @@ int nfc_dep_link_down(struct nfc_dev *dev)
goto error;
}
- if (dev->dep_rf_mode == NFC_RF_TARGET) {
- rc = -EOPNOTSUPP;
- goto error;
- }
-
rc = dev->ops->dep_link_down(dev);
if (!rc) {
dev->dep_link_up = false;
@@ -286,7 +287,6 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode)
{
dev->dep_link_up = true;
- dev->dep_rf_mode = rf_mode;
nfc_llcp_mac_is_up(dev, target_idx, comm_mode, rf_mode);
@@ -330,6 +330,7 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
rc = dev->ops->activate_target(dev, target, protocol);
if (!rc) {
dev->active_target = target;
+ dev->rf_mode = NFC_RF_INITIATOR;
if (dev->ops->check_presence)
mod_timer(&dev->check_pres_timer, jiffies +
@@ -409,27 +410,30 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
goto error;
}
- if (dev->active_target == NULL) {
- rc = -ENOTCONN;
- kfree_skb(skb);
- goto error;
- }
+ if (dev->rf_mode == NFC_RF_INITIATOR && dev->active_target != NULL) {
+ if (dev->active_target->idx != target_idx) {
+ rc = -EADDRNOTAVAIL;
+ kfree_skb(skb);
+ goto error;
+ }
- if (dev->active_target->idx != target_idx) {
- rc = -EADDRNOTAVAIL;
+ if (dev->ops->check_presence)
+ del_timer_sync(&dev->check_pres_timer);
+
+ rc = dev->ops->im_transceive(dev, dev->active_target, skb, cb,
+ cb_context);
+
+ if (!rc && dev->ops->check_presence)
+ mod_timer(&dev->check_pres_timer, jiffies +
+ msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
+ } else if (dev->rf_mode == NFC_RF_TARGET && dev->ops->tm_send != NULL) {
+ rc = dev->ops->tm_send(dev, skb);
+ } else {
+ rc = -ENOTCONN;
kfree_skb(skb);
goto error;
}
- if (dev->ops->check_presence)
- del_timer_sync(&dev->check_pres_timer);
-
- rc = dev->ops->data_exchange(dev, dev->active_target, skb, cb,
- cb_context);
-
- if (!rc && dev->ops->check_presence)
- mod_timer(&dev->check_pres_timer, jiffies +
- msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
error:
device_unlock(&dev->dev);
@@ -447,6 +451,63 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, u8 *gb, u8 gb_len)
}
EXPORT_SYMBOL(nfc_set_remote_general_bytes);
+u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len)
+{
+ pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+
+ return nfc_llcp_general_bytes(dev, gb_len);
+}
+EXPORT_SYMBOL(nfc_get_local_general_bytes);
+
+int nfc_tm_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+{
+ /* Only LLCP target mode for now */
+ if (dev->dep_link_up == false) {
+ kfree_skb(skb);
+ return -ENOLINK;
+ }
+
+ return nfc_llcp_data_received(dev, skb);
+}
+EXPORT_SYMBOL(nfc_tm_data_received);
+
+int nfc_tm_activated(struct nfc_dev *dev, u32 protocol, u8 comm_mode,
+ u8 *gb, size_t gb_len)
+{
+ int rc;
+
+ device_lock(&dev->dev);
+
+ dev->polling = false;
+
+ if (gb != NULL) {
+ rc = nfc_set_remote_general_bytes(dev, gb, gb_len);
+ if (rc < 0)
+ goto out;
+ }
+
+ dev->rf_mode = NFC_RF_TARGET;
+
+ if (protocol == NFC_PROTO_NFC_DEP_MASK)
+ nfc_dep_link_is_up(dev, 0, comm_mode, NFC_RF_TARGET);
+
+ rc = nfc_genl_tm_activated(dev, protocol);
+
+out:
+ device_unlock(&dev->dev);
+
+ return rc;
+}
+EXPORT_SYMBOL(nfc_tm_activated);
+
+int nfc_tm_deactivated(struct nfc_dev *dev)
+{
+ dev->dep_link_up = false;
+
+ return nfc_genl_tm_deactivated(dev);
+}
+EXPORT_SYMBOL(nfc_tm_deactivated);
+
/**
* nfc_alloc_send_skb - allocate a skb for data exchange responses
*
@@ -501,6 +562,8 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
* The device driver must call this function when one or many nfc targets
* are found. After calling this function, the device driver must stop
* polling for targets.
+ * NOTE: This function can be called with targets=NULL and n_targets=0 to
+ * notify a driver error, meaning that the polling operation cannot complete.
* IMPORTANT: this function must not be called from an atomic context.
* In addition, it must also not be called from a context that would prevent
* the NFC Core to call other nfc ops entry point concurrently.
@@ -512,23 +575,33 @@ int nfc_targets_found(struct nfc_dev *dev,
pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
- dev->polling = false;
-
for (i = 0; i < n_targets; i++)
targets[i].idx = dev->target_next_idx++;
device_lock(&dev->dev);
+ if (dev->polling == false) {
+ device_unlock(&dev->dev);
+ return 0;
+ }
+
+ dev->polling = false;
+
dev->targets_generation++;
kfree(dev->targets);
- dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
- GFP_ATOMIC);
+ dev->targets = NULL;
- if (!dev->targets) {
- dev->n_targets = 0;
- device_unlock(&dev->dev);
- return -ENOMEM;
+ if (targets) {
+ dev->targets = kmemdup(targets,
+ n_targets * sizeof(struct nfc_target),
+ GFP_ATOMIC);
+
+ if (!dev->targets) {
+ dev->n_targets = 0;
+ device_unlock(&dev->dev);
+ return -ENOMEM;
+ }
}
dev->n_targets = n_targets;
@@ -592,6 +665,12 @@ int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
}
EXPORT_SYMBOL(nfc_target_lost);
+inline void nfc_driver_failure(struct nfc_dev *dev, int err)
+{
+ nfc_targets_found(dev, NULL, 0);
+}
+EXPORT_SYMBOL(nfc_driver_failure);
+
static void nfc_release(struct device *d)
{
struct nfc_dev *dev = to_nfc_dev(d);
@@ -678,7 +757,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
struct nfc_dev *dev;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
- !ops->deactivate_target || !ops->data_exchange)
+ !ops->deactivate_target || !ops->im_transceive)
return NULL;
if (!supported_protocols)
@@ -847,3 +926,5 @@ MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
MODULE_DESCRIPTION("NFC Core ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_NFC);
+MODULE_ALIAS_GENL_FAMILY(NFC_GENL_NAME);
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
index 8729abf5f18b..46362ef979db 100644
--- a/net/nfc/hci/command.c
+++ b/net/nfc/hci/command.c
@@ -28,26 +28,14 @@
#include "hci.h"
-static int nfc_hci_result_to_errno(u8 result)
-{
- switch (result) {
- case NFC_HCI_ANY_OK:
- return 0;
- case NFC_HCI_ANY_E_TIMEOUT:
- return -ETIMEDOUT;
- default:
- return -1;
- }
-}
-
-static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
+static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, int err,
struct sk_buff *skb, void *cb_data)
{
struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
- pr_debug("HCI Cmd completed with HCI result=%d\n", result);
+ pr_debug("HCI Cmd completed with result=%d\n", err);
- hcp_ew->exec_result = nfc_hci_result_to_errno(result);
+ hcp_ew->exec_result = err;
if (hcp_ew->exec_result == 0)
hcp_ew->result_skb = skb;
else
@@ -311,9 +299,9 @@ int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
}
EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
-int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe)
{
- u8 pipe = NFC_HCI_INVALID_PIPE;
bool pipe_created = false;
int r;
@@ -322,6 +310,9 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
return -EADDRINUSE;
+ if (pipe != NFC_HCI_INVALID_PIPE)
+ goto pipe_is_open;
+
switch (dest_gate) {
case NFC_HCI_LINK_MGMT_GATE:
pipe = NFC_HCI_LINK_MGMT_PIPE;
@@ -347,6 +338,7 @@ int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
return r;
}
+pipe_is_open:
hdev->gate2pipe[dest_gate] = pipe;
return 0;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index e1a640d2b588..1ac7b3fac6c9 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -32,6 +32,18 @@
/* Largest headroom needed for outgoing HCI commands */
#define HCI_CMDS_HEADROOM 1
+static int nfc_hci_result_to_errno(u8 result)
+{
+ switch (result) {
+ case NFC_HCI_ANY_OK:
+ return 0;
+ case NFC_HCI_ANY_E_TIMEOUT:
+ return -ETIME;
+ default:
+ return -1;
+ }
+}
+
static void nfc_hci_msg_tx_work(struct work_struct *work)
{
struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
@@ -46,7 +58,7 @@ static void nfc_hci_msg_tx_work(struct work_struct *work)
if (timer_pending(&hdev->cmd_timer) == 0) {
if (hdev->cmd_pending_msg->cb)
hdev->cmd_pending_msg->cb(hdev,
- NFC_HCI_ANY_E_TIMEOUT,
+ -ETIME,
NULL,
hdev->
cmd_pending_msg->
@@ -71,8 +83,7 @@ next_msg:
kfree_skb(skb);
skb_queue_purge(&msg->msg_frags);
if (msg->cb)
- msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
- msg->cb_context);
+ msg->cb(hdev, r, NULL, msg->cb_context);
kfree(msg);
break;
}
@@ -116,20 +127,13 @@ static void nfc_hci_msg_rx_work(struct work_struct *work)
}
}
-void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
- struct sk_buff *skb)
+static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
+ struct sk_buff *skb)
{
- mutex_lock(&hdev->msg_tx_mutex);
-
- if (hdev->cmd_pending_msg == NULL) {
- kfree_skb(skb);
- goto exit;
- }
-
del_timer_sync(&hdev->cmd_timer);
if (hdev->cmd_pending_msg->cb)
- hdev->cmd_pending_msg->cb(hdev, result, skb,
+ hdev->cmd_pending_msg->cb(hdev, err, skb,
hdev->cmd_pending_msg->cb_context);
else
kfree_skb(skb);
@@ -138,6 +142,19 @@ void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
hdev->cmd_pending_msg = NULL;
queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
+}
+
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
exit:
mutex_unlock(&hdev->msg_tx_mutex);
@@ -170,6 +187,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
struct nfc_target *targets;
struct sk_buff *atqa_skb = NULL;
struct sk_buff *sak_skb = NULL;
+ struct sk_buff *uid_skb = NULL;
int r;
pr_debug("from gate %d\n", gate);
@@ -205,6 +223,19 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
targets->sel_res = sak_skb->data[0];
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_UID, &uid_skb);
+ if (r < 0)
+ goto exit;
+
+ if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ memcpy(targets->nfcid1, uid_skb->data, uid_skb->len);
+ targets->nfcid1_len = uid_skb->len;
+
if (hdev->ops->complete_target_discovered) {
r = hdev->ops->complete_target_discovered(hdev, gate,
targets);
@@ -213,7 +244,7 @@ static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
}
break;
case NFC_HCI_RF_READER_B_GATE:
- targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
+ targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
break;
default:
if (hdev->ops->target_from_gate)
@@ -240,6 +271,7 @@ exit:
kfree(targets);
kfree_skb(atqa_skb);
kfree_skb(sak_skb);
+ kfree_skb(uid_skb);
return r;
}
@@ -298,15 +330,15 @@ static void nfc_hci_cmd_timeout(unsigned long data)
}
static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
- u8 gates[])
+ struct nfc_hci_gate *gates)
{
int r;
- u8 *p = gates;
while (gate_count--) {
- r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ gates->gate, gates->pipe);
if (r < 0)
return r;
- p++;
+ gates++;
}
return 0;
@@ -316,14 +348,13 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
{
struct sk_buff *skb = NULL;
int r;
- u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */
- NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
- NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
- NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
- };
+
+ if (hdev->init_data.gates[0].gate != NFC_HCI_ADMIN_GATE)
+ return -EPROTO;
r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
- NFC_HCI_ADMIN_GATE);
+ hdev->init_data.gates[0].gate,
+ hdev->init_data.gates[0].pipe);
if (r < 0)
goto exit;
@@ -351,10 +382,6 @@ static int hci_dev_session_init(struct nfc_hci_dev *hdev)
if (r < 0)
goto exit;
- r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
- if (r < 0)
- goto disconnect_all;
-
r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
hdev->init_data.gates);
if (r < 0)
@@ -481,12 +508,13 @@ static int hci_dev_down(struct nfc_dev *nfc_dev)
return 0;
}
-static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
+static int hci_start_poll(struct nfc_dev *nfc_dev,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
if (hdev->ops->start_poll)
- return hdev->ops->start_poll(hdev, protocols);
+ return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
else
return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
@@ -511,9 +539,9 @@ static void hci_deactivate_target(struct nfc_dev *nfc_dev,
{
}
-static int hci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
- struct sk_buff *skb, data_exchange_cb_t cb,
- void *cb_context)
+static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
int r;
@@ -579,7 +607,7 @@ static struct nfc_ops hci_nfc_ops = {
.stop_poll = hci_stop_poll,
.activate_target = hci_activate_target,
.deactivate_target = hci_deactivate_target,
- .data_exchange = hci_data_exchange,
+ .im_transceive = hci_transceive,
.check_presence = hci_check_presence,
};
@@ -682,13 +710,12 @@ EXPORT_SYMBOL(nfc_hci_register_device);
void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
{
- struct hci_msg *msg;
+ struct hci_msg *msg, *n;
skb_queue_purge(&hdev->rx_hcp_frags);
skb_queue_purge(&hdev->msg_rx_queue);
- while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
- msg_l)) != NULL) {
+ list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) {
list_del(&msg->msg_l);
skb_queue_purge(&msg->msg_frags);
kfree(msg);
@@ -716,6 +743,27 @@ void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
}
EXPORT_SYMBOL(nfc_hci_get_clientdata);
+static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ nfc_driver_failure(hdev->ndev, err);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, err, NULL);
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
+{
+ nfc_hci_failure(hdev, err);
+}
+EXPORT_SYMBOL(nfc_hci_driver_failure);
+
void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
{
struct hcp_packet *packet;
@@ -726,16 +774,6 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
struct sk_buff *frag_skb;
int msg_len;
- if (skb == NULL) {
- /* TODO ELa: lower layer had permanent failure, need to
- * propagate that up
- */
-
- skb_queue_purge(&hdev->rx_hcp_frags);
-
- return;
- }
-
packet = (struct hcp_packet *)skb->data;
if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
skb_queue_tail(&hdev->rx_hcp_frags, skb);
@@ -756,9 +794,8 @@ void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
msg_len, GFP_KERNEL);
if (hcp_skb == NULL) {
- /* TODO ELa: cannot deliver HCP message. How to
- * propagate error up?
- */
+ nfc_hci_failure(hdev, -ENOMEM);
+ return;
}
*skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
index 45f2fe4fd486..fa9a21e92239 100644
--- a/net/nfc/hci/hci.h
+++ b/net/nfc/hci/hci.h
@@ -37,10 +37,11 @@ struct hcp_packet {
/*
* HCI command execution completion callback.
- * result will be one of the HCI response codes.
- * skb contains the response data and must be disposed.
+ * result will be a standard linux error (may be converted from HCI response)
+ * skb contains the response data and must be disposed, or may be NULL if
+ * an error occured
*/
-typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
+typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, int result,
struct sk_buff *skb, void *cb_data);
struct hcp_exec_waiter {
@@ -131,9 +132,4 @@ void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
-/* Pipes */
-#define NFC_HCI_INVALID_PIPE 0x80
-#define NFC_HCI_LINK_MGMT_PIPE 0x00
-#define NFC_HCI_ADMIN_PIPE 0x01
-
#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
index 7212cf2c5785..f4dad1a89740 100644
--- a/net/nfc/hci/hcp.c
+++ b/net/nfc/hci/hcp.c
@@ -105,7 +105,7 @@ int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
}
mutex_lock(&hdev->msg_tx_mutex);
- list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
+ list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
mutex_unlock(&hdev->msg_tx_mutex);
queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
index 5665dc6d893a..6f840c18c892 100644
--- a/net/nfc/hci/shdlc.c
+++ b/net/nfc/hci/shdlc.c
@@ -340,15 +340,6 @@ static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
shdlc->state = SHDLC_CONNECTED;
} else {
shdlc->state = SHDLC_DISCONNECTED;
-
- /*
- * TODO: Could it be possible that there are pending
- * executing commands that are waiting for connect to complete
- * before they can be carried? As connect is a blocking
- * operation, it would require that the userspace process can
- * send commands on the same device from a second thread before
- * the device is up. I don't think that is possible, is it?
- */
}
shdlc->connect_result = r;
@@ -413,12 +404,12 @@ static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
r = nfc_shdlc_connect_send_ua(shdlc);
nfc_shdlc_connect_complete(shdlc, r);
}
- } else if (shdlc->state > SHDLC_NEGOCIATING) {
+ } else if (shdlc->state == SHDLC_CONNECTED) {
/*
- * TODO: Chip wants to reset link
- * send ua, empty skb lists, reset counters
- * propagate info to HCI layer
+ * Chip wants to reset link. This is unexpected and
+ * unsupported.
*/
+ shdlc->hard_fault = -ECONNRESET;
}
break;
case U_FRAME_UA:
@@ -523,10 +514,6 @@ static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
r = shdlc->ops->xmit(shdlc, skb);
if (r < 0) {
- /*
- * TODO: Cannot send, shdlc machine is dead, we
- * must propagate the information up to HCI.
- */
shdlc->hard_fault = r;
break;
}
@@ -590,6 +577,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
skb_queue_purge(&shdlc->ack_pending_q);
break;
case SHDLC_CONNECTING:
+ if (shdlc->hard_fault) {
+ nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
+
if (shdlc->connect_tries++ < 5)
r = nfc_shdlc_connect_initiate(shdlc);
else
@@ -610,6 +602,11 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
}
nfc_shdlc_handle_rcv_queue(shdlc);
+
+ if (shdlc->hard_fault) {
+ nfc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
break;
case SHDLC_CONNECTED:
nfc_shdlc_handle_rcv_queue(shdlc);
@@ -637,10 +634,7 @@ static void nfc_shdlc_sm_work(struct work_struct *work)
}
if (shdlc->hard_fault) {
- /*
- * TODO: Handle hard_fault that occured during
- * this invocation of the shdlc worker
- */
+ nfc_hci_driver_failure(shdlc->hdev, shdlc->hard_fault);
}
break;
default:
@@ -765,14 +759,16 @@ static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
+static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev,
+ u32 im_protocols, u32 tm_protocols)
{
struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
pr_debug("\n");
if (shdlc->ops->start_poll)
- return shdlc->ops->start_poll(shdlc, protocols);
+ return shdlc->ops->start_poll(shdlc,
+ im_protocols, tm_protocols);
return 0;
}
@@ -921,8 +917,6 @@ void nfc_shdlc_free(struct nfc_shdlc *shdlc)
{
pr_debug("\n");
- /* TODO: Check that this cannot be called while still in use */
-
nfc_hci_unregister_device(shdlc->hdev);
nfc_hci_free_device(shdlc->hdev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index bf8ae4f0b90c..b982b5b890d7 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -51,7 +51,7 @@ static u8 llcp_tlv8(u8 *tlv, u8 type)
return tlv[2];
}
-static u8 llcp_tlv16(u8 *tlv, u8 type)
+static u16 llcp_tlv16(u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
@@ -67,7 +67,7 @@ static u8 llcp_tlv_version(u8 *tlv)
static u16 llcp_tlv_miux(u8 *tlv)
{
- return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7f;
+ return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
}
static u16 llcp_tlv_wks(u8 *tlv)
@@ -117,8 +117,8 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
return tlv;
}
-int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len)
+int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len)
{
u8 *tlv = tlv_array, type, length, offset = 0;
@@ -149,8 +149,45 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
case LLCP_TLV_OPT:
local->remote_opt = llcp_tlv_opt(tlv);
break;
+ default:
+ pr_err("Invalid gt tlv value 0x%x\n", type);
+ break;
+ }
+
+ offset += length + 2;
+ tlv += length + 2;
+ }
+
+ pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n",
+ local->remote_version, local->remote_miu,
+ local->remote_lto, local->remote_opt,
+ local->remote_wks);
+
+ return 0;
+}
+
+int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
+ u8 *tlv_array, u16 tlv_array_len)
+{
+ u8 *tlv = tlv_array, type, length, offset = 0;
+
+ pr_debug("TLV array length %d\n", tlv_array_len);
+
+ if (sock == NULL)
+ return -ENOTCONN;
+
+ while (offset < tlv_array_len) {
+ type = tlv[0];
+ length = tlv[1];
+
+ pr_debug("type 0x%x length %d\n", type, length);
+
+ switch (type) {
+ case LLCP_TLV_MIUX:
+ sock->miu = llcp_tlv_miux(tlv) + 128;
+ break;
case LLCP_TLV_RW:
- local->remote_rw = llcp_tlv_rw(tlv);
+ sock->rw = llcp_tlv_rw(tlv);
break;
case LLCP_TLV_SN:
break;
@@ -163,10 +200,7 @@ int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
tlv += length + 2;
}
- pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x rw %d\n",
- local->remote_version, local->remote_miu,
- local->remote_lto, local->remote_opt,
- local->remote_wks, local->remote_rw);
+ pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu);
return 0;
}
@@ -474,7 +508,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
while (remaining_len > 0) {
- frag_len = min_t(size_t, local->remote_miu, remaining_len);
+ frag_len = min_t(size_t, sock->miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 42994fac26d6..82f0f7588b46 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -31,47 +31,41 @@ static u8 llcp_magic[3] = {0x46, 0x66, 0x6d};
static struct list_head llcp_devices;
-static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
+void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *sk)
{
- struct nfc_llcp_sock *parent, *s, *n;
- struct sock *sk, *parent_sk;
- int i;
-
- mutex_lock(&local->socket_lock);
-
- for (i = 0; i < LLCP_MAX_SAP; i++) {
- parent = local->sockets[i];
- if (parent == NULL)
- continue;
-
- /* Release all child sockets */
- list_for_each_entry_safe(s, n, &parent->list, list) {
- list_del_init(&s->list);
- sk = &s->sk;
-
- lock_sock(sk);
-
- if (sk->sk_state == LLCP_CONNECTED)
- nfc_put_device(s->dev);
+ write_lock(&l->lock);
+ sk_add_node(sk, &l->head);
+ write_unlock(&l->lock);
+}
- sk->sk_state = LLCP_CLOSED;
+void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *sk)
+{
+ write_lock(&l->lock);
+ sk_del_node_init(sk);
+ write_unlock(&l->lock);
+}
- release_sock(sk);
+static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)
+{
+ struct sock *sk;
+ struct hlist_node *node, *tmp;
+ struct nfc_llcp_sock *llcp_sock;
- sock_orphan(sk);
+ write_lock(&local->sockets.lock);
- s->local = NULL;
- }
+ sk_for_each_safe(sk, node, tmp, &local->sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
- parent_sk = &parent->sk;
+ lock_sock(sk);
- lock_sock(parent_sk);
+ if (sk->sk_state == LLCP_CONNECTED)
+ nfc_put_device(llcp_sock->dev);
- if (parent_sk->sk_state == LLCP_LISTEN) {
+ if (sk->sk_state == LLCP_LISTEN) {
struct nfc_llcp_sock *lsk, *n;
struct sock *accept_sk;
- list_for_each_entry_safe(lsk, n, &parent->accept_queue,
+ list_for_each_entry_safe(lsk, n, &llcp_sock->accept_queue,
accept_queue) {
accept_sk = &lsk->sk;
lock_sock(accept_sk);
@@ -83,35 +77,94 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local)
release_sock(accept_sk);
sock_orphan(accept_sk);
+ }
- lsk->local = NULL;
+ if (listen == true) {
+ release_sock(sk);
+ continue;
}
}
- if (parent_sk->sk_state == LLCP_CONNECTED)
- nfc_put_device(parent->dev);
-
- parent_sk->sk_state = LLCP_CLOSED;
+ sk->sk_state = LLCP_CLOSED;
- release_sock(parent_sk);
+ release_sock(sk);
- sock_orphan(parent_sk);
+ sock_orphan(sk);
- parent->local = NULL;
+ sk_del_node_init(sk);
}
- mutex_unlock(&local->socket_lock);
+ write_unlock(&local->sockets.lock);
}
-static void nfc_llcp_clear_sdp(struct nfc_llcp_local *local)
+struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)
{
- mutex_lock(&local->sdp_lock);
+ kref_get(&local->ref);
- local->local_wks = 0;
- local->local_sdp = 0;
- local->local_sap = 0;
+ return local;
+}
- mutex_unlock(&local->sdp_lock);
+static void local_release(struct kref *ref)
+{
+ struct nfc_llcp_local *local;
+
+ local = container_of(ref, struct nfc_llcp_local, ref);
+
+ list_del(&local->list);
+ nfc_llcp_socket_release(local, false);
+ del_timer_sync(&local->link_timer);
+ skb_queue_purge(&local->tx_queue);
+ destroy_workqueue(local->tx_wq);
+ destroy_workqueue(local->rx_wq);
+ destroy_workqueue(local->timeout_wq);
+ kfree_skb(local->rx_pending);
+ kfree(local);
+}
+
+int nfc_llcp_local_put(struct nfc_llcp_local *local)
+{
+ if (local == NULL)
+ return 0;
+
+ return kref_put(&local->ref, local_release);
+}
+
+static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
+ u8 ssap, u8 dsap)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct nfc_llcp_sock *llcp_sock;
+
+ pr_debug("ssap dsap %d %d\n", ssap, dsap);
+
+ if (ssap == 0 && dsap == 0)
+ return NULL;
+
+ read_lock(&local->sockets.lock);
+
+ llcp_sock = NULL;
+
+ sk_for_each(sk, node, &local->sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
+
+ if (llcp_sock->ssap == ssap && llcp_sock->dsap == dsap)
+ break;
+ }
+
+ read_unlock(&local->sockets.lock);
+
+ if (llcp_sock == NULL)
+ return NULL;
+
+ sock_hold(&llcp_sock->sk);
+
+ return llcp_sock;
+}
+
+static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+{
+ sock_put(&sock->sk);
}
static void nfc_llcp_timeout_work(struct work_struct *work)
@@ -174,6 +227,51 @@ static int nfc_llcp_wks_sap(char *service_name, size_t service_name_len)
return -EINVAL;
}
+static
+struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
+ u8 *sn, size_t sn_len)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+ struct nfc_llcp_sock *llcp_sock, *tmp_sock;
+
+ pr_debug("sn %zd %p\n", sn_len, sn);
+
+ if (sn == NULL || sn_len == 0)
+ return NULL;
+
+ read_lock(&local->sockets.lock);
+
+ llcp_sock = NULL;
+
+ sk_for_each(sk, node, &local->sockets.head) {
+ tmp_sock = nfc_llcp_sock(sk);
+
+ pr_debug("llcp sock %p\n", tmp_sock);
+
+ if (tmp_sock->sk.sk_state != LLCP_LISTEN)
+ continue;
+
+ if (tmp_sock->service_name == NULL ||
+ tmp_sock->service_name_len == 0)
+ continue;
+
+ if (tmp_sock->service_name_len != sn_len)
+ continue;
+
+ if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
+ llcp_sock = tmp_sock;
+ break;
+ }
+ }
+
+ read_unlock(&local->sockets.lock);
+
+ pr_debug("Found llcp sock %p\n", llcp_sock);
+
+ return llcp_sock;
+}
+
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock)
{
@@ -200,41 +298,26 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
}
/*
- * This is not a well known service,
- * we should try to find a local SDP free spot
+ * Check if there already is a non WKS socket bound
+ * to this service name.
*/
- ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
- if (ssap == LLCP_SDP_NUM_SAP) {
+ if (nfc_llcp_sock_from_sn(local, sock->service_name,
+ sock->service_name_len) != NULL) {
mutex_unlock(&local->sdp_lock);
return LLCP_SAP_MAX;
}
- pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
-
- set_bit(ssap, &local->local_sdp);
mutex_unlock(&local->sdp_lock);
- return LLCP_WKS_NUM_SAP + ssap;
+ return LLCP_SDP_UNBOUND;
- } else if (sock->ssap != 0) {
- if (sock->ssap < LLCP_WKS_NUM_SAP) {
- if (!test_bit(sock->ssap, &local->local_wks)) {
- set_bit(sock->ssap, &local->local_wks);
- mutex_unlock(&local->sdp_lock);
-
- return sock->ssap;
- }
-
- } else if (sock->ssap < LLCP_SDP_NUM_SAP) {
- if (!test_bit(sock->ssap - LLCP_WKS_NUM_SAP,
- &local->local_sdp)) {
- set_bit(sock->ssap - LLCP_WKS_NUM_SAP,
- &local->local_sdp);
- mutex_unlock(&local->sdp_lock);
+ } else if (sock->ssap != 0 && sock->ssap < LLCP_WKS_NUM_SAP) {
+ if (!test_bit(sock->ssap, &local->local_wks)) {
+ set_bit(sock->ssap, &local->local_wks);
+ mutex_unlock(&local->sdp_lock);
- return sock->ssap;
- }
+ return sock->ssap;
}
}
@@ -271,8 +354,34 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
local_ssap = ssap;
sdp = &local->local_wks;
} else if (ssap < LLCP_LOCAL_NUM_SAP) {
+ atomic_t *client_cnt;
+
local_ssap = ssap - LLCP_WKS_NUM_SAP;
sdp = &local->local_sdp;
+ client_cnt = &local->local_sdp_cnt[local_ssap];
+
+ pr_debug("%d clients\n", atomic_read(client_cnt));
+
+ mutex_lock(&local->sdp_lock);
+
+ if (atomic_dec_and_test(client_cnt)) {
+ struct nfc_llcp_sock *l_sock;
+
+ pr_debug("No more clients for SAP %d\n", ssap);
+
+ clear_bit(local_ssap, sdp);
+
+ /* Find the listening sock and set it back to UNBOUND */
+ l_sock = nfc_llcp_sock_get(local, ssap, LLCP_SAP_SDP);
+ if (l_sock) {
+ l_sock->ssap = LLCP_SDP_UNBOUND;
+ nfc_llcp_sock_put(l_sock);
+ }
+ }
+
+ mutex_unlock(&local->sdp_lock);
+
+ return;
} else if (ssap < LLCP_MAX_SAP) {
local_ssap = ssap - LLCP_LOCAL_NUM_SAP;
sdp = &local->local_sap;
@@ -287,19 +396,26 @@ void nfc_llcp_put_ssap(struct nfc_llcp_local *local, u8 ssap)
mutex_unlock(&local->sdp_lock);
}
-u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
{
- struct nfc_llcp_local *local;
+ u8 ssap;
- local = nfc_llcp_find_local(dev);
- if (local == NULL) {
- *general_bytes_len = 0;
- return NULL;
+ mutex_lock(&local->sdp_lock);
+
+ ssap = find_first_zero_bit(&local->local_sdp, LLCP_SDP_NUM_SAP);
+ if (ssap == LLCP_SDP_NUM_SAP) {
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_SAP_MAX;
}
- *general_bytes_len = local->gb_len;
+ pr_debug("SDP ssap %d\n", LLCP_WKS_NUM_SAP + ssap);
- return local->gb;
+ set_bit(ssap, &local->local_sdp);
+
+ mutex_unlock(&local->sdp_lock);
+
+ return LLCP_WKS_NUM_SAP + ssap;
}
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
@@ -363,6 +479,23 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
return 0;
}
+u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL) {
+ *general_bytes_len = 0;
+ return NULL;
+ }
+
+ nfc_llcp_build_gb(local);
+
+ *general_bytes_len = local->gb_len;
+
+ return local->gb;
+}
+
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
{
struct nfc_llcp_local *local = nfc_llcp_find_local(dev);
@@ -384,31 +517,9 @@ int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len)
return -EINVAL;
}
- return nfc_llcp_parse_tlv(local,
- &local->remote_gb[3],
- local->remote_gb_len - 3);
-}
-
-static void nfc_llcp_tx_work(struct work_struct *work)
-{
- struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
- tx_work);
- struct sk_buff *skb;
-
- skb = skb_dequeue(&local->tx_queue);
- if (skb != NULL) {
- pr_debug("Sending pending skb\n");
- print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
- 16, 1, skb->data, skb->len, true);
-
- nfc_data_exchange(local->dev, local->target_idx,
- skb, nfc_llcp_recv, local);
- } else {
- nfc_llcp_send_symm(local->dev);
- }
-
- mod_timer(&local->link_timer,
- jiffies + msecs_to_jiffies(local->remote_lto));
+ return nfc_llcp_parse_gb_tlv(local,
+ &local->remote_gb[3],
+ local->remote_gb_len - 3);
}
static u8 nfc_llcp_dsap(struct sk_buff *pdu)
@@ -443,51 +554,84 @@ static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
sock->recv_ack_n = (sock->recv_n - 1) % 16;
}
-static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
- u8 ssap, u8 dsap)
+static void nfc_llcp_tx_work(struct work_struct *work)
{
- struct nfc_llcp_sock *sock, *llcp_sock, *n;
+ struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
+ tx_work);
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct nfc_llcp_sock *llcp_sock;
- pr_debug("ssap dsap %d %d\n", ssap, dsap);
+ skb = skb_dequeue(&local->tx_queue);
+ if (skb != NULL) {
+ sk = skb->sk;
+ llcp_sock = nfc_llcp_sock(sk);
+ if (llcp_sock != NULL) {
+ int ret;
+
+ pr_debug("Sending pending skb\n");
+ print_hex_dump(KERN_DEBUG, "LLCP Tx: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+
+ ret = nfc_data_exchange(local->dev, local->target_idx,
+ skb, nfc_llcp_recv, local);
+
+ if (!ret && nfc_llcp_ptype(skb) == LLCP_PDU_I) {
+ skb = skb_get(skb);
+ skb_queue_tail(&llcp_sock->tx_pending_queue,
+ skb);
+ }
+ } else {
+ nfc_llcp_send_symm(local->dev);
+ }
+ } else {
+ nfc_llcp_send_symm(local->dev);
+ }
- if (ssap == 0 && dsap == 0)
- return NULL;
+ mod_timer(&local->link_timer,
+ jiffies + msecs_to_jiffies(2 * local->remote_lto));
+}
- mutex_lock(&local->socket_lock);
- sock = local->sockets[ssap];
- if (sock == NULL) {
- mutex_unlock(&local->socket_lock);
- return NULL;
- }
+static struct nfc_llcp_sock *nfc_llcp_connecting_sock_get(struct nfc_llcp_local *local,
+ u8 ssap)
+{
+ struct sock *sk;
+ struct nfc_llcp_sock *llcp_sock;
+ struct hlist_node *node;
- pr_debug("root dsap %d (%d)\n", sock->dsap, dsap);
+ read_lock(&local->connecting_sockets.lock);
- if (sock->dsap == dsap) {
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
- return sock;
- }
+ sk_for_each(sk, node, &local->connecting_sockets.head) {
+ llcp_sock = nfc_llcp_sock(sk);
- list_for_each_entry_safe(llcp_sock, n, &sock->list, list) {
- pr_debug("llcp_sock %p sk %p dsap %d\n", llcp_sock,
- &llcp_sock->sk, llcp_sock->dsap);
- if (llcp_sock->dsap == dsap) {
+ if (llcp_sock->ssap == ssap) {
sock_hold(&llcp_sock->sk);
- mutex_unlock(&local->socket_lock);
- return llcp_sock;
+ goto out;
}
}
- pr_err("Could not find socket for %d %d\n", ssap, dsap);
+ llcp_sock = NULL;
- mutex_unlock(&local->socket_lock);
+out:
+ read_unlock(&local->connecting_sockets.lock);
- return NULL;
+ return llcp_sock;
}
-static void nfc_llcp_sock_put(struct nfc_llcp_sock *sock)
+static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
+ u8 *sn, size_t sn_len)
{
- sock_put(&sock->sk);
+ struct nfc_llcp_sock *llcp_sock;
+
+ llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
+
+ if (llcp_sock == NULL)
+ return NULL;
+
+ sock_hold(&llcp_sock->sk);
+
+ return llcp_sock;
}
static u8 *nfc_llcp_connect_sn(struct sk_buff *skb, size_t *sn_len)
@@ -518,35 +662,19 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
{
struct sock *new_sk, *parent;
struct nfc_llcp_sock *sock, *new_sock;
- u8 dsap, ssap, bound_sap, reason;
+ u8 dsap, ssap, reason;
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
pr_debug("%d %d\n", dsap, ssap);
- nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
-
if (dsap != LLCP_SAP_SDP) {
- bound_sap = dsap;
-
- mutex_lock(&local->socket_lock);
- sock = local->sockets[dsap];
- if (sock == NULL) {
- mutex_unlock(&local->socket_lock);
+ sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
+ if (sock == NULL || sock->sk.sk_state != LLCP_LISTEN) {
reason = LLCP_DM_NOBOUND;
goto fail;
}
-
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
-
- lock_sock(&sock->sk);
-
- if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN)
- goto enqueue;
} else {
u8 *sn;
size_t sn_len;
@@ -559,40 +687,15 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
pr_debug("Service name length %zu\n", sn_len);
- mutex_lock(&local->socket_lock);
- for (bound_sap = 0; bound_sap < LLCP_LOCAL_SAP_OFFSET;
- bound_sap++) {
- sock = local->sockets[bound_sap];
- if (sock == NULL)
- continue;
-
- if (sock->service_name == NULL ||
- sock->service_name_len == 0)
- continue;
-
- if (sock->service_name_len != sn_len)
- continue;
-
- if (sock->dsap == LLCP_SAP_SDP &&
- sock->sk.sk_state == LLCP_LISTEN &&
- !memcmp(sn, sock->service_name, sn_len)) {
- pr_debug("Found service name at SAP %d\n",
- bound_sap);
- sock_hold(&sock->sk);
- mutex_unlock(&local->socket_lock);
-
- lock_sock(&sock->sk);
-
- goto enqueue;
- }
+ sock = nfc_llcp_sock_get_sn(local, sn, sn_len);
+ if (sock == NULL) {
+ reason = LLCP_DM_NOBOUND;
+ goto fail;
}
- mutex_unlock(&local->socket_lock);
}
- reason = LLCP_DM_NOBOUND;
- goto fail;
+ lock_sock(&sock->sk);
-enqueue:
parent = &sock->sk;
if (sk_acceptq_is_full(parent)) {
@@ -602,6 +705,21 @@ enqueue:
goto fail;
}
+ if (sock->ssap == LLCP_SDP_UNBOUND) {
+ u8 ssap = nfc_llcp_reserve_sdp_ssap(local);
+
+ pr_debug("First client, reserving %d\n", ssap);
+
+ if (ssap == LLCP_SAP_MAX) {
+ reason = LLCP_DM_REJ;
+ release_sock(&sock->sk);
+ sock_put(&sock->sk);
+ goto fail;
+ }
+
+ sock->ssap = ssap;
+ }
+
new_sk = nfc_llcp_sock_alloc(NULL, parent->sk_type, GFP_ATOMIC);
if (new_sk == NULL) {
reason = LLCP_DM_REJ;
@@ -612,15 +730,31 @@ enqueue:
new_sock = nfc_llcp_sock(new_sk);
new_sock->dev = local->dev;
- new_sock->local = local;
+ new_sock->local = nfc_llcp_local_get(local);
+ new_sock->miu = local->remote_miu;
new_sock->nfc_protocol = sock->nfc_protocol;
- new_sock->ssap = bound_sap;
new_sock->dsap = ssap;
+ new_sock->target_idx = local->target_idx;
new_sock->parent = parent;
+ new_sock->ssap = sock->ssap;
+ if (sock->ssap < LLCP_LOCAL_NUM_SAP && sock->ssap >= LLCP_WKS_NUM_SAP) {
+ atomic_t *client_count;
+
+ pr_debug("reserved_ssap %d for %p\n", sock->ssap, new_sock);
+
+ client_count =
+ &local->local_sdp_cnt[sock->ssap - LLCP_WKS_NUM_SAP];
+
+ atomic_inc(client_count);
+ new_sock->reserved_ssap = sock->ssap;
+ }
+
+ nfc_llcp_parse_connection_tlv(new_sock, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
pr_debug("new sock %p sk %p\n", new_sock, &new_sock->sk);
- list_add_tail(&new_sock->list, &sock->list);
+ nfc_llcp_sock_link(&local->sockets, new_sk);
nfc_llcp_accept_enqueue(&sock->sk, new_sk);
@@ -654,12 +788,12 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
pr_debug("Remote ready %d tx queue len %d remote rw %d",
sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
- local->remote_rw);
+ sock->rw);
/* Try to queue some I frames for transmission */
while (sock->remote_ready &&
- skb_queue_len(&sock->tx_pending_queue) < local->remote_rw) {
- struct sk_buff *pdu, *pending_pdu;
+ skb_queue_len(&sock->tx_pending_queue) < sock->rw) {
+ struct sk_buff *pdu;
pdu = skb_dequeue(&sock->tx_queue);
if (pdu == NULL)
@@ -668,10 +802,7 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
/* Update N(S)/N(R) */
nfc_llcp_set_nrns(sock, pdu);
- pending_pdu = skb_clone(pdu, GFP_KERNEL);
-
skb_queue_tail(&local->tx_queue, pdu);
- skb_queue_tail(&sock->tx_pending_queue, pending_pdu);
nr_frames++;
}
@@ -728,11 +859,21 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
llcp_sock->send_ack_n = nr;
- skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp)
- if (nfc_llcp_ns(s) <= nr) {
- skb_unlink(s, &llcp_sock->tx_pending_queue);
- kfree_skb(s);
- }
+ /* Remove and free all skbs until ns == nr */
+ skb_queue_walk_safe(&llcp_sock->tx_pending_queue, s, tmp) {
+ skb_unlink(s, &llcp_sock->tx_pending_queue);
+ kfree_skb(s);
+
+ if (nfc_llcp_ns(s) == nr)
+ break;
+ }
+
+ /* Re-queue the remaining skbs for transmission */
+ skb_queue_reverse_walk_safe(&llcp_sock->tx_pending_queue,
+ s, tmp) {
+ skb_unlink(s, &llcp_sock->tx_pending_queue);
+ skb_queue_head(&local->tx_queue, s);
+ }
}
if (ptype == LLCP_PDU_RR)
@@ -740,7 +881,7 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,
else if (ptype == LLCP_PDU_RNR)
llcp_sock->remote_ready = false;
- if (nfc_llcp_queue_i_frames(llcp_sock) == 0)
+ if (nfc_llcp_queue_i_frames(llcp_sock) == 0 && ptype == LLCP_PDU_I)
nfc_llcp_send_rr(llcp_sock);
release_sock(sk);
@@ -791,11 +932,7 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
dsap = nfc_llcp_dsap(skb);
ssap = nfc_llcp_ssap(skb);
- llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
-
- if (llcp_sock == NULL)
- llcp_sock = nfc_llcp_sock_get(local, dsap, LLCP_SAP_SDP);
-
+ llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
if (llcp_sock == NULL) {
pr_err("Invalid CC\n");
nfc_llcp_send_dm(local, dsap, ssap, LLCP_DM_NOCONN);
@@ -803,11 +940,15 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
return;
}
- llcp_sock->dsap = ssap;
sk = &llcp_sock->sk;
- nfc_llcp_parse_tlv(local, &skb->data[LLCP_HEADER_SIZE],
- skb->len - LLCP_HEADER_SIZE);
+ /* Unlink from connecting and link to the client array */
+ nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+ nfc_llcp_sock_link(&local->sockets, sk);
+ llcp_sock->dsap = ssap;
+
+ nfc_llcp_parse_connection_tlv(llcp_sock, &skb->data[LLCP_HEADER_SIZE],
+ skb->len - LLCP_HEADER_SIZE);
sk->sk_state = LLCP_CONNECTED;
sk->sk_state_change(sk);
@@ -815,6 +956,45 @@ static void nfc_llcp_recv_cc(struct nfc_llcp_local *local, struct sk_buff *skb)
nfc_llcp_sock_put(llcp_sock);
}
+static void nfc_llcp_recv_dm(struct nfc_llcp_local *local, struct sk_buff *skb)
+{
+ struct nfc_llcp_sock *llcp_sock;
+ struct sock *sk;
+ u8 dsap, ssap, reason;
+
+ dsap = nfc_llcp_dsap(skb);
+ ssap = nfc_llcp_ssap(skb);
+ reason = skb->data[2];
+
+ pr_debug("%d %d reason %d\n", ssap, dsap, reason);
+
+ switch (reason) {
+ case LLCP_DM_NOBOUND:
+ case LLCP_DM_REJ:
+ llcp_sock = nfc_llcp_connecting_sock_get(local, dsap);
+ break;
+
+ default:
+ llcp_sock = nfc_llcp_sock_get(local, dsap, ssap);
+ break;
+ }
+
+ if (llcp_sock == NULL) {
+ pr_err("Invalid DM\n");
+ return;
+ }
+
+ sk = &llcp_sock->sk;
+
+ sk->sk_err = ENXIO;
+ sk->sk_state = LLCP_CLOSED;
+ sk->sk_state_change(sk);
+
+ nfc_llcp_sock_put(llcp_sock);
+
+ return;
+}
+
static void nfc_llcp_rx_work(struct work_struct *work)
{
struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
@@ -858,6 +1038,11 @@ static void nfc_llcp_rx_work(struct work_struct *work)
nfc_llcp_recv_cc(local, skb);
break;
+ case LLCP_PDU_DM:
+ pr_debug("DM\n");
+ nfc_llcp_recv_dm(local, skb);
+ break;
+
case LLCP_PDU_I:
case LLCP_PDU_RR:
case LLCP_PDU_RNR:
@@ -891,6 +1076,21 @@ void nfc_llcp_recv(void *data, struct sk_buff *skb, int err)
return;
}
+int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb)
+{
+ struct nfc_llcp_local *local;
+
+ local = nfc_llcp_find_local(dev);
+ if (local == NULL)
+ return -ENODEV;
+
+ local->rx_pending = skb_get(skb);
+ del_timer(&local->link_timer);
+ queue_work(local->rx_wq, &local->rx_work);
+
+ return 0;
+}
+
void nfc_llcp_mac_is_down(struct nfc_dev *dev)
{
struct nfc_llcp_local *local;
@@ -899,10 +1099,8 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)
if (local == NULL)
return;
- nfc_llcp_clear_sdp(local);
-
/* Close and purge all existing sockets */
- nfc_llcp_socket_release(local);
+ nfc_llcp_socket_release(local, true);
}
void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx,
@@ -943,8 +1141,8 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
local->dev = ndev;
INIT_LIST_HEAD(&local->list);
+ kref_init(&local->ref);
mutex_init(&local->sdp_lock);
- mutex_init(&local->socket_lock);
init_timer(&local->link_timer);
local->link_timer.data = (unsigned long) local;
local->link_timer.function = nfc_llcp_symm_timer;
@@ -984,11 +1182,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
goto err_rx_wq;
}
+ local->sockets.lock = __RW_LOCK_UNLOCKED(local->sockets.lock);
+ local->connecting_sockets.lock = __RW_LOCK_UNLOCKED(local->connecting_sockets.lock);
+
nfc_llcp_build_gb(local);
local->remote_miu = LLCP_DEFAULT_MIU;
local->remote_lto = LLCP_DEFAULT_LTO;
- local->remote_rw = LLCP_DEFAULT_RW;
list_add(&llcp_devices, &local->list);
@@ -1015,14 +1215,7 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)
return;
}
- list_del(&local->list);
- nfc_llcp_socket_release(local);
- del_timer_sync(&local->link_timer);
- skb_queue_purge(&local->tx_queue);
- destroy_workqueue(local->tx_wq);
- destroy_workqueue(local->rx_wq);
- kfree_skb(local->rx_pending);
- kfree(local);
+ nfc_llcp_local_put(local);
}
int __init nfc_llcp_init(void)
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 50680ce5ae43..83b8bba5a280 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -37,15 +37,22 @@ enum llcp_state {
#define LLCP_LOCAL_NUM_SAP 32
#define LLCP_LOCAL_SAP_OFFSET (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP)
#define LLCP_MAX_SAP (LLCP_WKS_NUM_SAP + LLCP_SDP_NUM_SAP + LLCP_LOCAL_NUM_SAP)
+#define LLCP_SDP_UNBOUND (LLCP_MAX_SAP + 1)
struct nfc_llcp_sock;
+struct llcp_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+};
+
struct nfc_llcp_local {
struct list_head list;
struct nfc_dev *dev;
+ struct kref ref;
+
struct mutex sdp_lock;
- struct mutex socket_lock;
struct timer_list link_timer;
struct sk_buff_head tx_queue;
@@ -63,6 +70,7 @@ struct nfc_llcp_local {
unsigned long local_wks; /* Well known services */
unsigned long local_sdp; /* Local services */
unsigned long local_sap; /* Local SAPs, not available for discovery */
+ atomic_t local_sdp_cnt[LLCP_SDP_NUM_SAP];
/* local */
u8 gb[NFC_MAX_GT_LEN];
@@ -77,24 +85,26 @@ struct nfc_llcp_local {
u16 remote_lto;
u8 remote_opt;
u16 remote_wks;
- u8 remote_rw;
/* sockets array */
- struct nfc_llcp_sock *sockets[LLCP_MAX_SAP];
+ struct llcp_sock_list sockets;
+ struct llcp_sock_list connecting_sockets;
};
struct nfc_llcp_sock {
struct sock sk;
- struct list_head list;
struct nfc_dev *dev;
struct nfc_llcp_local *local;
u32 target_idx;
u32 nfc_protocol;
+ /* Link parameters */
u8 ssap;
u8 dsap;
char *service_name;
size_t service_name_len;
+ u8 rw;
+ u16 miu;
/* Link variables */
u8 send_n;
@@ -105,6 +115,9 @@ struct nfc_llcp_sock {
/* Is the remote peer ready to receive */
u8 remote_ready;
+ /* Reserved source SAP */
+ u8 reserved_ssap;
+
struct sk_buff_head tx_queue;
struct sk_buff_head tx_pending_queue;
struct sk_buff_head tx_backlog_queue;
@@ -164,7 +177,11 @@ struct nfc_llcp_sock {
#define LLCP_DM_REJ 0x03
+void nfc_llcp_sock_link(struct llcp_sock_list *l, struct sock *s);
+void nfc_llcp_sock_unlink(struct llcp_sock_list *l, struct sock *s);
struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
+struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local);
+int nfc_llcp_local_put(struct nfc_llcp_local *local);
u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
struct nfc_llcp_sock *sock);
u8 nfc_llcp_get_local_ssap(struct nfc_llcp_local *local);
@@ -179,8 +196,10 @@ void nfc_llcp_accept_enqueue(struct sock *parent, struct sock *sk);
struct sock *nfc_llcp_accept_dequeue(struct sock *sk, struct socket *newsock);
/* TLV API */
-int nfc_llcp_parse_tlv(struct nfc_llcp_local *local,
- u8 *tlv_array, u16 tlv_array_len);
+int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
+ u8 *tlv_array, u16 tlv_array_len);
+int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
+ u8 *tlv_array, u16 tlv_array_len);
/* Commands API */
void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 17a707db40eb..ddeb9aa398f0 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -78,11 +78,11 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
-
if (!addr || addr->sa_family != AF_NFC)
return -EINVAL;
+ pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
+
memset(&llcp_addr, 0, sizeof(llcp_addr));
len = min_t(unsigned int, sizeof(llcp_addr), alen);
memcpy(&llcp_addr, addr, len);
@@ -111,7 +111,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
}
llcp_sock->dev = dev;
- llcp_sock->local = local;
+ llcp_sock->local = nfc_llcp_local_get(local);
llcp_sock->nfc_protocol = llcp_addr.nfc_protocol;
llcp_sock->service_name_len = min_t(unsigned int,
llcp_addr.service_name_len,
@@ -121,10 +121,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
GFP_KERNEL);
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
- if (llcp_sock->ssap == LLCP_MAX_SAP)
+ if (llcp_sock->ssap == LLCP_SAP_MAX) {
+ ret = -EADDRINUSE;
goto put_dev;
+ }
+
+ llcp_sock->reserved_ssap = llcp_sock->ssap;
- local->sockets[llcp_sock->ssap] = llcp_sock;
+ nfc_llcp_sock_link(&local->sockets, sk);
pr_debug("Socket bound to SAP %d\n", llcp_sock->ssap);
@@ -283,22 +287,28 @@ error:
return ret;
}
-static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
+static int llcp_sock_getname(struct socket *sock, struct sockaddr *uaddr,
int *len, int peer)
{
- struct sockaddr_nfc_llcp *llcp_addr = (struct sockaddr_nfc_llcp *)addr;
struct sock *sk = sock->sk;
struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
+ DECLARE_SOCKADDR(struct sockaddr_nfc_llcp *, llcp_addr, uaddr);
- pr_debug("%p\n", sk);
+ if (llcp_sock == NULL || llcp_sock->dev == NULL)
+ return -EBADFD;
+
+ pr_debug("%p %d %d %d\n", sk, llcp_sock->target_idx,
+ llcp_sock->dsap, llcp_sock->ssap);
- if (llcp_sock == NULL)
+ if (llcp_sock == NULL || llcp_sock->dev == NULL)
return -EBADFD;
- addr->sa_family = AF_NFC;
+ uaddr->sa_family = AF_NFC;
+
*len = sizeof(struct sockaddr_nfc_llcp);
llcp_addr->dev_idx = llcp_sock->dev->idx;
+ llcp_addr->target_idx = llcp_sock->target_idx;
llcp_addr->dsap = llcp_sock->dsap;
llcp_addr->ssap = llcp_sock->ssap;
llcp_addr->service_name_len = llcp_sock->service_name_len;
@@ -382,15 +392,6 @@ static int llcp_sock_release(struct socket *sock)
goto out;
}
- mutex_lock(&local->socket_lock);
-
- if (llcp_sock == local->sockets[llcp_sock->ssap])
- local->sockets[llcp_sock->ssap] = NULL;
- else
- list_del_init(&llcp_sock->list);
-
- mutex_unlock(&local->socket_lock);
-
lock_sock(sk);
/* Send a DISC */
@@ -415,14 +416,13 @@ static int llcp_sock_release(struct socket *sock)
}
}
- /* Freeing the SAP */
- if ((sk->sk_state == LLCP_CONNECTED
- && llcp_sock->ssap > LLCP_LOCAL_SAP_OFFSET) ||
- sk->sk_state == LLCP_BOUND || sk->sk_state == LLCP_LISTEN)
+ if (llcp_sock->reserved_ssap < LLCP_SAP_MAX)
nfc_llcp_put_ssap(llcp_sock->local, llcp_sock->ssap);
release_sock(sk);
+ nfc_llcp_sock_unlink(&local->sockets, sk);
+
out:
sock_orphan(sk);
sock_put(sk);
@@ -490,12 +490,16 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
}
llcp_sock->dev = dev;
- llcp_sock->local = local;
+ llcp_sock->local = nfc_llcp_local_get(local);
+ llcp_sock->miu = llcp_sock->local->remote_miu;
llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
ret = -ENOMEM;
goto put_dev;
}
+
+ llcp_sock->reserved_ssap = llcp_sock->ssap;
+
if (addr->service_name_len == 0)
llcp_sock->dsap = addr->dsap;
else
@@ -508,21 +512,26 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
llcp_sock->service_name_len,
GFP_KERNEL);
- local->sockets[llcp_sock->ssap] = llcp_sock;
+ nfc_llcp_sock_link(&local->connecting_sockets, sk);
ret = nfc_llcp_send_connect(llcp_sock);
if (ret)
- goto put_dev;
+ goto sock_unlink;
ret = sock_wait_state(sk, LLCP_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
if (ret)
- goto put_dev;
+ goto sock_unlink;
release_sock(sk);
return 0;
+sock_unlink:
+ nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
+ nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
+
put_dev:
nfc_put_device(dev);
@@ -687,13 +696,15 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
llcp_sock->ssap = 0;
llcp_sock->dsap = LLCP_SAP_SDP;
+ llcp_sock->rw = LLCP_DEFAULT_RW;
+ llcp_sock->miu = LLCP_DEFAULT_MIU;
llcp_sock->send_n = llcp_sock->send_ack_n = 0;
llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
llcp_sock->remote_ready = 1;
+ llcp_sock->reserved_ssap = LLCP_SAP_MAX;
skb_queue_head_init(&llcp_sock->tx_queue);
skb_queue_head_init(&llcp_sock->tx_pending_queue);
skb_queue_head_init(&llcp_sock->tx_backlog_queue);
- INIT_LIST_HEAD(&llcp_sock->list);
INIT_LIST_HEAD(&llcp_sock->accept_queue);
if (sock != NULL)
@@ -704,8 +715,6 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
{
- struct nfc_llcp_local *local = sock->local;
-
kfree(sock->service_name);
skb_queue_purge(&sock->tx_queue);
@@ -714,12 +723,9 @@ void nfc_llcp_sock_free(struct nfc_llcp_sock *sock)
list_del_init(&sock->accept_queue);
- if (local != NULL && sock == local->sockets[sock->ssap])
- local->sockets[sock->ssap] = NULL;
- else
- list_del_init(&sock->list);
-
sock->parent = NULL;
+
+ nfc_llcp_local_put(sock->local);
}
static int llcp_sock_create(struct net *net, struct socket *sock,
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index d560e6f13072..f81efe13985a 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -27,6 +27,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": %s: " fmt, __func__
+#include <linux/module.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -194,7 +195,7 @@ static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
}
if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
- (protocols & NFC_PROTO_ISO14443_MASK)) {
+ (protocols & NFC_PROTO_ISO14443_B_MASK)) {
cmd.disc_configs[cmd.num_disc_configs].rf_tech_and_mode =
NCI_NFC_B_PASSIVE_POLL_MODE;
cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
@@ -387,7 +388,8 @@ static int nci_dev_down(struct nfc_dev *nfc_dev)
return nci_close_device(ndev);
}
-static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
+static int nci_start_poll(struct nfc_dev *nfc_dev,
+ __u32 im_protocols, __u32 tm_protocols)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -413,11 +415,11 @@ static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
return -EBUSY;
}
- rc = nci_request(ndev, nci_rf_discover_req, protocols,
+ rc = nci_request(ndev, nci_rf_discover_req, im_protocols,
msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
if (!rc)
- ndev->poll_prots = protocols;
+ ndev->poll_prots = im_protocols;
return rc;
}
@@ -485,7 +487,8 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
param.rf_protocol = NCI_RF_PROTOCOL_T2T;
else if (protocol == NFC_PROTO_FELICA)
param.rf_protocol = NCI_RF_PROTOCOL_T3T;
- else if (protocol == NFC_PROTO_ISO14443)
+ else if (protocol == NFC_PROTO_ISO14443 ||
+ protocol == NFC_PROTO_ISO14443_B)
param.rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
else
param.rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
@@ -521,9 +524,9 @@ static void nci_deactivate_target(struct nfc_dev *nfc_dev,
}
}
-static int nci_data_exchange(struct nfc_dev *nfc_dev, struct nfc_target *target,
- struct sk_buff *skb,
- data_exchange_cb_t cb, void *cb_context)
+static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb,
+ data_exchange_cb_t cb, void *cb_context)
{
struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
int rc;
@@ -556,7 +559,7 @@ static struct nfc_ops nci_nfc_ops = {
.stop_poll = nci_stop_poll,
.activate_target = nci_activate_target,
.deactivate_target = nci_deactivate_target,
- .data_exchange = nci_data_exchange,
+ .im_transceive = nci_transceive,
};
/* ---- Interface to NCI drivers ---- */
@@ -878,3 +881,5 @@ static void nci_cmd_work(struct work_struct *work)
jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
}
}
+
+MODULE_LICENSE("GPL");
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index cb2646179e5f..af7a93b04393 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -106,7 +106,7 @@ static __u8 *nci_extract_rf_params_nfca_passive_poll(struct nci_dev *ndev,
nfca_poll->sens_res = __le16_to_cpu(*((__u16 *)data));
data += 2;
- nfca_poll->nfcid1_len = *data++;
+ nfca_poll->nfcid1_len = min_t(__u8, *data++, NFC_NFCID1_MAXSIZE);
pr_debug("sens_res 0x%x, nfcid1_len %d\n",
nfca_poll->sens_res, nfca_poll->nfcid1_len);
@@ -130,7 +130,7 @@ static __u8 *nci_extract_rf_params_nfcb_passive_poll(struct nci_dev *ndev,
struct rf_tech_specific_params_nfcb_poll *nfcb_poll,
__u8 *data)
{
- nfcb_poll->sensb_res_len = *data++;
+ nfcb_poll->sensb_res_len = min_t(__u8, *data++, NFC_SENSB_RES_MAXSIZE);
pr_debug("sensb_res_len %d\n", nfcb_poll->sensb_res_len);
@@ -145,7 +145,7 @@ static __u8 *nci_extract_rf_params_nfcf_passive_poll(struct nci_dev *ndev,
__u8 *data)
{
nfcf_poll->bit_rate = *data++;
- nfcf_poll->sensf_res_len = *data++;
+ nfcf_poll->sensf_res_len = min_t(__u8, *data++, NFC_SENSF_RES_MAXSIZE);
pr_debug("bit_rate %d, sensf_res_len %d\n",
nfcf_poll->bit_rate, nfcf_poll->sensf_res_len);
@@ -170,7 +170,10 @@ static int nci_add_new_protocol(struct nci_dev *ndev,
if (rf_protocol == NCI_RF_PROTOCOL_T2T)
protocol = NFC_PROTO_MIFARE_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_ISO_DEP)
- protocol = NFC_PROTO_ISO14443_MASK;
+ if (rf_tech_and_mode == NCI_NFC_A_PASSIVE_POLL_MODE)
+ protocol = NFC_PROTO_ISO14443_MASK;
+ else
+ protocol = NFC_PROTO_ISO14443_B_MASK;
else if (rf_protocol == NCI_RF_PROTOCOL_T3T)
protocol = NFC_PROTO_FELICA_MASK;
else
@@ -331,7 +334,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
switch (ntf->activation_rf_tech_and_mode) {
case NCI_NFC_A_PASSIVE_POLL_MODE:
nfca_poll = &ntf->activation_params.nfca_poll_iso_dep;
- nfca_poll->rats_res_len = *data++;
+ nfca_poll->rats_res_len = min_t(__u8, *data++, 20);
pr_debug("rats_res_len %d\n", nfca_poll->rats_res_len);
if (nfca_poll->rats_res_len > 0) {
memcpy(nfca_poll->rats_res,
@@ -341,7 +344,7 @@ static int nci_extract_activation_params_iso_dep(struct nci_dev *ndev,
case NCI_NFC_B_PASSIVE_POLL_MODE:
nfcb_poll = &ntf->activation_params.nfcb_poll_iso_dep;
- nfcb_poll->attrib_res_len = *data++;
+ nfcb_poll->attrib_res_len = min_t(__u8, *data++, 50);
pr_debug("attrib_res_len %d\n", nfcb_poll->attrib_res_len);
if (nfcb_poll->attrib_res_len > 0) {
memcpy(nfcb_poll->attrib_res,
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 581d419083aa..4c51714ee741 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -49,6 +49,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
[NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
[NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
[NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
+ [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
+ [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
};
static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -165,7 +167,7 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
dev->genl_data.poll_req_pid = 0;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -193,7 +195,7 @@ int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -219,12 +221,74 @@ free_msg:
return -EMSGSIZE;
}
+int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_TM_ACTIVATED);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+ if (nla_put_u32(msg, NFC_ATTR_TM_PROTOCOLS, protocol))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
+int nfc_genl_tm_deactivated(struct nfc_dev *dev)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
+ NFC_EVENT_TM_DEACTIVATED);
+ if (!hdr)
+ goto free_msg;
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
+}
+
int nfc_genl_device_added(struct nfc_dev *dev)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -257,7 +321,7 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -370,7 +434,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
pr_debug("DEP link is up\n");
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -409,7 +473,7 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
pr_debug("DEP link is down\n");
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
return -ENOMEM;
@@ -450,7 +514,7 @@ static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
rc = -ENOMEM;
goto out_putdev;
@@ -519,16 +583,25 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
struct nfc_dev *dev;
int rc;
u32 idx;
- u32 protocols;
+ u32 im_protocols = 0, tm_protocols = 0;
pr_debug("Poll start\n");
if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
- !info->attrs[NFC_ATTR_PROTOCOLS])
+ ((!info->attrs[NFC_ATTR_IM_PROTOCOLS] &&
+ !info->attrs[NFC_ATTR_PROTOCOLS]) &&
+ !info->attrs[NFC_ATTR_TM_PROTOCOLS]))
return -EINVAL;
idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
- protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
+
+ if (info->attrs[NFC_ATTR_TM_PROTOCOLS])
+ tm_protocols = nla_get_u32(info->attrs[NFC_ATTR_TM_PROTOCOLS]);
+
+ if (info->attrs[NFC_ATTR_IM_PROTOCOLS])
+ im_protocols = nla_get_u32(info->attrs[NFC_ATTR_IM_PROTOCOLS]);
+ else if (info->attrs[NFC_ATTR_PROTOCOLS])
+ im_protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
dev = nfc_get_device(idx);
if (!dev)
@@ -536,7 +609,7 @@ static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&dev->genl_data.genl_data_mutex);
- rc = nfc_start_poll(dev, protocols);
+ rc = nfc_start_poll(dev, im_protocols, tm_protocols);
if (!rc)
dev->genl_data.poll_req_pid = info->snd_pid;
@@ -561,6 +634,15 @@ static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
if (!dev)
return -ENODEV;
+ device_lock(&dev->dev);
+
+ if (!dev->polling) {
+ device_unlock(&dev->dev);
+ return -EINVAL;
+ }
+
+ device_unlock(&dev->dev);
+
mutex_lock(&dev->genl_data.genl_data_mutex);
if (dev->genl_data.poll_req_pid != info->snd_pid) {
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 3dd4232ae664..c5e42b79a418 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -55,6 +55,7 @@ int nfc_llcp_register_device(struct nfc_dev *dev);
void nfc_llcp_unregister_device(struct nfc_dev *dev);
int nfc_llcp_set_remote_gb(struct nfc_dev *dev, u8 *gb, u8 gb_len);
u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *general_bytes_len);
+int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
int __init nfc_llcp_init(void);
void nfc_llcp_exit(void);
@@ -90,6 +91,12 @@ static inline u8 *nfc_llcp_general_bytes(struct nfc_dev *dev, size_t *gb_len)
return NULL;
}
+static inline int nfc_llcp_data_received(struct nfc_dev *dev,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
static inline int nfc_llcp_init(void)
{
return 0;
@@ -128,6 +135,9 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
u8 comm_mode, u8 rf_mode);
int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
+int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
+int nfc_genl_tm_deactivated(struct nfc_dev *dev);
+
struct nfc_dev *nfc_get_device(unsigned int idx);
static inline void nfc_put_device(struct nfc_dev *dev)
@@ -158,7 +168,7 @@ int nfc_dev_up(struct nfc_dev *dev);
int nfc_dev_down(struct nfc_dev *dev);
-int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
+int nfc_start_poll(struct nfc_dev *dev, u32 im_protocols, u32 tm_protocols);
int nfc_stop_poll(struct nfc_dev *dev);
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index ec1134c9e07f..8b8a6a2b2bad 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -54,7 +54,10 @@ static int rawsock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
- pr_debug("sock=%p\n", sock);
+ pr_debug("sock=%p sk=%p\n", sock, sk);
+
+ if (!sk)
+ return 0;
sock_orphan(sk);
sock_put(sk);
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 48badffaafc1..320fa0e6951a 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -325,6 +325,9 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
}
}
+ if (!acts_list)
+ return 0;
+
return do_execute_actions(dp, skb, nla_data(acts_list),
nla_len(acts_list), true);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2c74daa5aca5..d8277d29e710 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -263,14 +263,15 @@ err:
static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
const struct dp_upcall_info *upcall_info)
{
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
struct dp_upcall_info later_info;
struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
segs = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (IS_ERR(segs))
+ return PTR_ERR(segs);
/* Queue all of the segments. */
skb = segs;
@@ -279,7 +280,7 @@ static int queue_gso_packets(int dp_ifindex, struct sk_buff *skb,
if (err)
break;
- if (skb == segs && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
+ if (skb == segs && gso_type & SKB_GSO_UDP) {
/* The initial flow key extracted by ovs_flow_extract()
* in this case is for a first fragment, so we need to
* properly mark later fragments.
@@ -1649,7 +1650,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
- if (!err && a[OVS_VPORT_ATTR_UPCALL_PID])
+ if (err)
+ goto exit_unlock;
+ if (a[OVS_VPORT_ATTR_UPCALL_PID])
vport->upcall_pid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq,
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index c73370cc1f02..c1105c147531 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index 46736518c453..36dcee8fc84a 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 6d4d8097cf96..b7f38b161909 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -182,7 +182,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb)
{
u8 tcp_flags = 0;
- if (flow->key.eth.type == htons(ETH_P_IP) &&
+ if ((flow->key.eth.type == htons(ETH_P_IP) ||
+ flow->key.eth.type == htons(ETH_P_IPV6)) &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
u8 *tcp = (u8 *)tcp_hdr(skb);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 2747dc2c4ac1..9b75617ca4e0 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index b6b1d7daa3cb..4061b9ee07f7 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -24,6 +24,9 @@
#include <linux/ethtool.h>
#include <linux/skbuff.h>
+#include <net/dst.h>
+#include <net/xfrm.h>
+
#include "datapath.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -209,6 +212,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb)
int len;
len = skb->len;
+
+ skb_dst_drop(skb);
+ nf_reset(skb);
+ secpath_reset(skb);
+
skb->dev = netdev;
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
diff --git a/net/openvswitch/vport-internal_dev.h b/net/openvswitch/vport-internal_dev.h
index 3454447c5f11..9a7d30ecc6a2 100644
--- a/net/openvswitch/vport-internal_dev.h
+++ b/net/openvswitch/vport-internal_dev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 3fd6c0d88e12..6ea3551cc78c 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index fd9b008a0e6e..f7072a25c604 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2011 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 6c066ba25dc7..6140336e79d7 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 19609629dabd..aac680ca2b06 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 0f661745df0f..ceaca7c134a0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -531,6 +531,7 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
struct ethtool_cmd ecmd;
int err;
+ u32 speed;
rtnl_lock();
dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
@@ -539,25 +540,18 @@ static int prb_calc_retire_blk_tmo(struct packet_sock *po,
return DEFAULT_PRB_RETIRE_TOV;
}
err = __ethtool_get_settings(dev, &ecmd);
+ speed = ethtool_cmd_speed(&ecmd);
rtnl_unlock();
if (!err) {
- switch (ecmd.speed) {
- case SPEED_10000:
- msec = 1;
- div = 10000/1000;
- break;
- case SPEED_1000:
- msec = 1;
- div = 1000/1000;
- break;
/*
* If the link speed is so slow you don't really
* need to worry about perf anyways
*/
- case SPEED_100:
- case SPEED_10:
- default:
+ if (speed < SPEED_1000 || speed == SPEED_UNKNOWN) {
return DEFAULT_PRB_RETIRE_TOV;
+ } else {
+ msec = 1;
+ div = speed / 1000;
}
}
@@ -592,7 +586,7 @@ static void init_prb_bdqc(struct packet_sock *po,
p1->knxt_seq_num = 1;
p1->pkbdq = pg_vec;
pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
- p1->pkblk_start = (char *)pg_vec[0].buffer;
+ p1->pkblk_start = pg_vec[0].buffer;
p1->kblk_size = req_u->req3.tp_block_size;
p1->knum_blocks = req_u->req3.tp_block_nr;
p1->hdrlen = po->tp_hdrlen;
@@ -824,8 +818,7 @@ static void prb_open_block(struct tpacket_kbdq_core *pkc1,
h1->ts_first_pkt.ts_sec = ts.tv_sec;
h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
pkc1->pkblk_start = (char *)pbd1;
- pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
- BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
+ pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
pbd1->version = pkc1->version;
@@ -1018,7 +1011,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
struct tpacket_block_desc *pbd;
char *curr, *end;
- pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
+ pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
/* Queue is frozen when user space is lagging behind */
@@ -1044,7 +1037,7 @@ static void *__packet_lookup_frame_in_block(struct packet_sock *po,
smp_mb();
curr = pkc->nxt_offset;
pkc->skb = skb;
- end = (char *) ((char *)pbd + pkc->kblk_size);
+ end = (char *)pbd + pkc->kblk_size;
/* first try the current block */
if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
@@ -1476,7 +1469,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
* Find the device first to size check it
*/
- saddr->spkt_device[13] = 0;
+ saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
retry:
rcu_read_lock();
dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
diff --git a/net/rds/page.c b/net/rds/page.c
index 2499cd108421..9005a2c920ee 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -74,11 +74,12 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
}
EXPORT_SYMBOL_GPL(rds_page_copy_user);
-/*
- * Message allocation uses this to build up regions of a message.
+/**
+ * rds_page_remainder_alloc - build up regions of a message.
*
- * @bytes - the number of bytes needed.
- * @gfp - the waiting behaviour of the allocation
+ * @scat: Scatter list for message
+ * @bytes: the number of bytes needed.
+ * @gfp: the waiting behaviour of the allocation
*
* @gfp is always ored with __GFP_HIGHMEM. Callers must be prepared to
* kmap the pages, etc.
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 5c6e9f132026..9f0f17cf6bf9 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -410,6 +410,8 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+ msg->msg_namelen = 0;
+
if (msg_flags & MSG_OOB)
goto out;
@@ -485,6 +487,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
sin->sin_port = inc->i_hdr.h_sport;
sin->sin_addr.s_addr = inc->i_saddr;
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ msg->msg_namelen = sizeof(*sin);
}
break;
}
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index f974961754ca..752b72360ebc 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -325,7 +325,7 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
rfkill_global_states[type].cur = blocked;
list_for_each_entry(rfkill, &rfkill_list, node) {
- if (rfkill->type != type)
+ if (rfkill->type != type && type != RFKILL_TYPE_ALL)
continue;
rfkill_set_block(rfkill, blocked);
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index 5d6b572a6704..a9206087b4d7 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -81,10 +81,6 @@ void rxrpc_UDP_error_report(struct sock *sk)
_net("I/F MTU %u", mtu);
}
- /* ip_rt_frag_needed() may have eaten the info */
- if (mtu == 0)
- mtu = ntohs(icmp_hdr(skb)->un.frag.mtu);
-
if (mtu == 0) {
/* they didn't give us a size, estimate one */
if (mtu > 1500) {
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 16ae88762d00..e1ac183d50bb 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -242,7 +242,7 @@ int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
EXPORT_SYMBOL(rxrpc_kernel_send_data);
-/*
+/**
* rxrpc_kernel_abort_call - Allow a kernel service to abort a call
* @call: The call to be aborted
* @abort_code: The abort code to stick into the ABORT packet
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 2754f098d436..bebaa43484bc 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -229,7 +229,7 @@ found_UDP_peer:
return peer;
new_UDP_peer:
- _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id);
+ _net("Rx UDP DGRAM from NEW peer");
read_unlock_bh(&rxrpc_peer_lock);
_leave(" = -EBUSY [new]");
return ERR_PTR(-EBUSY);
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index e7a8976bf25c..62fb51face8a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -507,6 +507,26 @@ config NET_EMATCH_TEXT
To compile this code as a module, choose M here: the
module will be called em_text.
+config NET_EMATCH_CANID
+ tristate "CAN Identifier"
+ depends on NET_EMATCH && CAN
+ ---help---
+ Say Y here if you want to be able to classify CAN frames based
+ on CAN Identifier.
+
+ To compile this code as a module, choose M here: the
+ module will be called em_canid.
+
+config NET_EMATCH_IPSET
+ tristate "IPset"
+ depends on NET_EMATCH && IP_SET
+ ---help---
+ Say Y here if you want to be able to classify packets based on
+ ipset membership.
+
+ To compile this code as a module, choose M here: the
+ module will be called em_ipset.
+
config NET_CLS_ACT
bool "Actions"
---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 5940a1992f0d..978cbf004e80 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -55,3 +55,5 @@ obj-$(CONFIG_NET_EMATCH_NBYTE) += em_nbyte.o
obj-$(CONFIG_NET_EMATCH_U32) += em_u32.o
obj-$(CONFIG_NET_EMATCH_META) += em_meta.o
obj-$(CONFIG_NET_EMATCH_TEXT) += em_text.o
+obj-$(CONFIG_NET_EMATCH_CANID) += em_canid.o
+obj-$(CONFIG_NET_EMATCH_IPSET) += em_ipset.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 5cfb160df063..e3d2c78cb52c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -652,27 +652,27 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 pid, u32 seq,
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
-
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_nlmsg_trim;
if (tcf_action_dump(skb, a, bind, ref) < 0)
- goto nla_put_failure;
+ goto out_nlmsg_trim;
nla_nest_end(skb, nest);
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nla_put_failure:
-nlmsg_failure:
+out_nlmsg_trim:
nlmsg_trim(skb, b);
return -1;
}
@@ -799,19 +799,21 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
if (a->ops == NULL)
goto err_out;
- nlh = NLMSG_PUT(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t));
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
+ if (!nlh)
+ goto out_module_put;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_module_put;
err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
if (err < 0)
- goto nla_put_failure;
+ goto out_module_put;
if (err == 0)
goto noflush_out;
@@ -828,8 +830,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
return err;
-nla_put_failure:
-nlmsg_failure:
+out_module_put:
module_put(a->ops->owner);
err_out:
noflush_out:
@@ -919,18 +920,20 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags);
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*t), flags);
+ if (!nlh)
+ goto out_kfree_skb;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_kfree_skb;
if (tcf_action_dump(skb, a, 0, 0) < 0)
- goto nla_put_failure;
+ goto out_kfree_skb;
nla_nest_end(skb, nest);
@@ -942,8 +945,7 @@ static int tcf_add_notify(struct net *net, struct tc_action *a,
err = 0;
return err;
-nla_put_failure:
-nlmsg_failure:
+out_kfree_skb:
kfree_skb(skb);
return -1;
}
@@ -1062,7 +1064,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
struct tc_action_ops *a_o;
struct tc_action a;
int ret = 0;
- struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
+ struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
struct nlattr *kind = find_dump_kind(cb->nlh);
if (kind == NULL) {
@@ -1080,23 +1082,25 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
if (a_o->walk == NULL) {
WARN(1, "tc_dump_action: %s !capable of dumping table\n",
a_o->kind);
- goto nla_put_failure;
+ goto out_module_put;
}
- nlh = NLMSG_PUT(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
- cb->nlh->nlmsg_type, sizeof(*t));
- t = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, sizeof(*t), 0);
+ if (!nlh)
+ goto out_module_put;
+ t = nlmsg_data(nlh);
t->tca_family = AF_UNSPEC;
t->tca__pad1 = 0;
t->tca__pad2 = 0;
nest = nla_nest_start(skb, TCA_ACT_TAB);
if (nest == NULL)
- goto nla_put_failure;
+ goto out_module_put;
ret = a_o->walk(skb, cb, RTM_GETACTION, &a);
if (ret < 0)
- goto nla_put_failure;
+ goto out_module_put;
if (ret > 0) {
nla_nest_end(skb, nest);
@@ -1110,8 +1114,7 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
module_put(a_o->owner);
return skb->len;
-nla_put_failure:
-nlmsg_failure:
+out_module_put:
module_put(a_o->owner);
nlmsg_trim(skb, b);
return skb->len;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f452f696b4b3..6dd1131f2ec1 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -140,7 +140,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
int tp_created = 0;
replay:
- t = NLMSG_DATA(n);
+ t = nlmsg_data(n);
protocol = TC_H_MIN(t->tcm_info);
prio = TC_H_MAJ(t->tcm_info);
nprio = prio;
@@ -349,8 +349,10 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -368,7 +370,7 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -418,7 +420,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
struct net_device *dev;
struct Qdisc *q;
struct tcf_proto *tp, **chain;
- struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
unsigned long cl = 0;
const struct Qdisc_class_ops *cops;
struct tcf_dump_args arg;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 36fec4227401..44f405cb9aaf 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -143,7 +143,7 @@ static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
if (head == NULL)
goto old_method;
- iif = ((struct rtable *)dst)->rt_iif;
+ iif = inet_iif(skb);
h = route4_fastmap_hash(id, iif);
if (id == head->fastmap[h].id &&
diff --git a/net/sched/em_canid.c b/net/sched/em_canid.c
new file mode 100644
index 000000000000..bfd34e4c1afc
--- /dev/null
+++ b/net/sched/em_canid.c
@@ -0,0 +1,240 @@
+/*
+ * em_canid.c Ematch rule to match CAN frames according to their CAN IDs
+ *
+ * This program is free software; you can distribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Idea: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
+ * Copyright: (c) 2011 Czech Technical University in Prague
+ * (c) 2011 Volkswagen Group Research
+ * Authors: Michal Sojka <sojkam1@fel.cvut.cz>
+ * Pavel Pisa <pisa@cmp.felk.cvut.cz>
+ * Rostislav Lisovy <lisovy@gmail.cz>
+ * Funded by: Volkswagen Group Research
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/pkt_cls.h>
+#include <linux/can.h>
+
+#define EM_CAN_RULES_MAX 500
+
+struct canid_match {
+ /* For each SFF CAN ID (11 bit) there is one record in this bitfield */
+ DECLARE_BITMAP(match_sff, (1 << CAN_SFF_ID_BITS));
+
+ int rules_count;
+ int sff_rules_count;
+ int eff_rules_count;
+
+ /*
+ * Raw rules copied from netlink message; Used for sending
+ * information to userspace (when 'tc filter show' is invoked)
+ * AND when matching EFF frames
+ */
+ struct can_filter rules_raw[];
+};
+
+/**
+ * em_canid_get_id() - Extracts Can ID out of the sk_buff structure.
+ */
+static canid_t em_canid_get_id(struct sk_buff *skb)
+{
+ /* CAN ID is stored within the data field */
+ struct can_frame *cf = (struct can_frame *)skb->data;
+
+ return cf->can_id;
+}
+
+static void em_canid_sff_match_add(struct canid_match *cm, u32 can_id,
+ u32 can_mask)
+{
+ int i;
+
+ /*
+ * Limit can_mask and can_id to SFF range to
+ * protect against write after end of array
+ */
+ can_mask &= CAN_SFF_MASK;
+ can_id &= can_mask;
+
+ /* Single frame */
+ if (can_mask == CAN_SFF_MASK) {
+ set_bit(can_id, cm->match_sff);
+ return;
+ }
+
+ /* All frames */
+ if (can_mask == 0) {
+ bitmap_fill(cm->match_sff, (1 << CAN_SFF_ID_BITS));
+ return;
+ }
+
+ /*
+ * Individual frame filter.
+ * Add record (set bit to 1) for each ID that
+ * conforms particular rule
+ */
+ for (i = 0; i < (1 << CAN_SFF_ID_BITS); i++) {
+ if ((i & can_mask) == can_id)
+ set_bit(i, cm->match_sff);
+ }
+}
+
+static inline struct canid_match *em_canid_priv(struct tcf_ematch *m)
+{
+ return (struct canid_match *)m->data;
+}
+
+static int em_canid_match(struct sk_buff *skb, struct tcf_ematch *m,
+ struct tcf_pkt_info *info)
+{
+ struct canid_match *cm = em_canid_priv(m);
+ canid_t can_id;
+ int match = 0;
+ int i;
+ const struct can_filter *lp;
+
+ can_id = em_canid_get_id(skb);
+
+ if (can_id & CAN_EFF_FLAG) {
+ for (i = 0, lp = cm->rules_raw;
+ i < cm->eff_rules_count; i++, lp++) {
+ if (!(((lp->can_id ^ can_id) & lp->can_mask))) {
+ match = 1;
+ break;
+ }
+ }
+ } else { /* SFF */
+ can_id &= CAN_SFF_MASK;
+ match = (test_bit(can_id, cm->match_sff) ? 1 : 0);
+ }
+
+ return match;
+}
+
+static int em_canid_change(struct tcf_proto *tp, void *data, int len,
+ struct tcf_ematch *m)
+{
+ struct can_filter *conf = data; /* Array with rules */
+ struct canid_match *cm;
+ struct canid_match *cm_old = (struct canid_match *)m->data;
+ int i;
+
+ if (!len)
+ return -EINVAL;
+
+ if (len % sizeof(struct can_filter))
+ return -EINVAL;
+
+ if (len > sizeof(struct can_filter) * EM_CAN_RULES_MAX)
+ return -EINVAL;
+
+ cm = kzalloc(sizeof(struct canid_match) + len, GFP_KERNEL);
+ if (!cm)
+ return -ENOMEM;
+
+ cm->rules_count = len / sizeof(struct can_filter);
+
+ /*
+ * We need two for() loops for copying rules into two contiguous
+ * areas in rules_raw to process all eff rules with a simple loop.
+ * NB: The configuration interface supports sff and eff rules.
+ * We do not support filters here that match for the same can_id
+ * provided in a SFF and EFF frame (e.g. 0x123 / 0x80000123).
+ * For this (unusual case) two filters have to be specified. The
+ * SFF/EFF separation is done with the CAN_EFF_FLAG in the can_id.
+ */
+
+ /* Fill rules_raw with EFF rules first */
+ for (i = 0; i < cm->rules_count; i++) {
+ if (conf[i].can_id & CAN_EFF_FLAG) {
+ memcpy(cm->rules_raw + cm->eff_rules_count,
+ &conf[i],
+ sizeof(struct can_filter));
+
+ cm->eff_rules_count++;
+ }
+ }
+
+ /* append SFF frame rules */
+ for (i = 0; i < cm->rules_count; i++) {
+ if (!(conf[i].can_id & CAN_EFF_FLAG)) {
+ memcpy(cm->rules_raw
+ + cm->eff_rules_count
+ + cm->sff_rules_count,
+ &conf[i], sizeof(struct can_filter));
+
+ cm->sff_rules_count++;
+
+ em_canid_sff_match_add(cm,
+ conf[i].can_id, conf[i].can_mask);
+ }
+ }
+
+ m->datalen = sizeof(struct canid_match) + len;
+ m->data = (unsigned long)cm;
+
+ if (cm_old != NULL) {
+ pr_err("canid: Configuring an existing ematch!\n");
+ kfree(cm_old);
+ }
+
+ return 0;
+}
+
+static void em_canid_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
+{
+ struct canid_match *cm = em_canid_priv(m);
+
+ kfree(cm);
+}
+
+static int em_canid_dump(struct sk_buff *skb, struct tcf_ematch *m)
+{
+ struct canid_match *cm = em_canid_priv(m);
+
+ /*
+ * When configuring this ematch 'rules_count' is set not to exceed
+ * 'rules_raw' array size
+ */
+ if (nla_put_nohdr(skb, sizeof(struct can_filter) * cm->rules_count,
+ &cm->rules_raw) < 0)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static struct tcf_ematch_ops em_canid_ops = {
+ .kind = TCF_EM_CANID,
+ .change = em_canid_change,
+ .match = em_canid_match,
+ .destroy = em_canid_destroy,
+ .dump = em_canid_dump,
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(em_canid_ops.link)
+};
+
+static int __init init_em_canid(void)
+{
+ return tcf_em_register(&em_canid_ops);
+}
+
+static void __exit exit_em_canid(void)
+{
+ tcf_em_unregister(&em_canid_ops);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(init_em_canid);
+module_exit(exit_em_canid);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_CANID);
diff --git a/net/sched/em_ipset.c b/net/sched/em_ipset.c
new file mode 100644
index 000000000000..3130320997e2
--- /dev/null
+++ b/net/sched/em_ipset.c
@@ -0,0 +1,135 @@
+/*
+ * net/sched/em_ipset.c ipset ematch
+ *
+ * Copyright (c) 2012 Florian Westphal <fw@strlen.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/xt_set.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/pkt_cls.h>
+
+static int em_ipset_change(struct tcf_proto *tp, void *data, int data_len,
+ struct tcf_ematch *em)
+{
+ struct xt_set_info *set = data;
+ ip_set_id_t index;
+
+ if (data_len != sizeof(*set))
+ return -EINVAL;
+
+ index = ip_set_nfnl_get_byindex(set->index);
+ if (index == IPSET_INVALID_ID)
+ return -ENOENT;
+
+ em->datalen = sizeof(*set);
+ em->data = (unsigned long)kmemdup(data, em->datalen, GFP_KERNEL);
+ if (em->data)
+ return 0;
+
+ ip_set_nfnl_put(index);
+ return -ENOMEM;
+}
+
+static void em_ipset_destroy(struct tcf_proto *p, struct tcf_ematch *em)
+{
+ const struct xt_set_info *set = (const void *) em->data;
+ if (set) {
+ ip_set_nfnl_put(set->index);
+ kfree((void *) em->data);
+ }
+}
+
+static int em_ipset_match(struct sk_buff *skb, struct tcf_ematch *em,
+ struct tcf_pkt_info *info)
+{
+ struct ip_set_adt_opt opt;
+ struct xt_action_param acpar;
+ const struct xt_set_info *set = (const void *) em->data;
+ struct net_device *dev, *indev = NULL;
+ int ret, network_offset;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ acpar.family = NFPROTO_IPV4;
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ return 0;
+ acpar.thoff = ip_hdrlen(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ acpar.family = NFPROTO_IPV6;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
+ return 0;
+ /* doesn't call ipv6_find_hdr() because ipset doesn't use thoff, yet */
+ acpar.thoff = sizeof(struct ipv6hdr);
+ break;
+ default:
+ return 0;
+ }
+
+ acpar.hooknum = 0;
+
+ opt.family = acpar.family;
+ opt.dim = set->dim;
+ opt.flags = set->flags;
+ opt.cmdflags = 0;
+ opt.timeout = ~0u;
+
+ network_offset = skb_network_offset(skb);
+ skb_pull(skb, network_offset);
+
+ dev = skb->dev;
+
+ rcu_read_lock();
+
+ if (dev && skb->skb_iif)
+ indev = dev_get_by_index_rcu(dev_net(dev), skb->skb_iif);
+
+ acpar.in = indev ? indev : dev;
+ acpar.out = dev;
+
+ ret = ip_set_test(set->index, skb, &acpar, &opt);
+
+ rcu_read_unlock();
+
+ skb_push(skb, network_offset);
+ return ret;
+}
+
+static struct tcf_ematch_ops em_ipset_ops = {
+ .kind = TCF_EM_IPSET,
+ .change = em_ipset_change,
+ .destroy = em_ipset_destroy,
+ .match = em_ipset_match,
+ .owner = THIS_MODULE,
+ .link = LIST_HEAD_INIT(em_ipset_ops.link)
+};
+
+static int __init init_em_ipset(void)
+{
+ return tcf_em_register(&em_ipset_ops);
+}
+
+static void __exit exit_em_ipset(void)
+{
+ tcf_em_unregister(&em_ipset_ops);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
+MODULE_DESCRIPTION("TC extended match for IP sets");
+
+module_init(init_em_ipset);
+module_exit(exit_em_ipset);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_IPSET);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 4790c696cbce..4ab6e3325573 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -264,7 +264,7 @@ META_COLLECTOR(int_rtiif)
if (unlikely(skb_rtable(skb) == NULL))
*err = -1;
else
- dst->value = skb_rtable(skb)->rt_iif;
+ dst->value = inet_iif(skb);
}
/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 085ce53d570a..a08b4ab3e421 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -973,7 +973,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = NLMSG_DATA(n);
+ struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
u32 clid = tcm->tcm_parent;
@@ -1046,7 +1046,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
replay:
/* Reinit, just in case something touches this. */
- tcm = NLMSG_DATA(n);
+ tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
q = p = NULL;
@@ -1193,8 +1193,10 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
struct gnet_dump d;
struct qdisc_size_table *stab;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -1230,7 +1232,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -1366,7 +1368,7 @@ done:
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = NLMSG_DATA(n);
+ struct tcmsg *tcm = nlmsg_data(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
struct Qdisc *q = NULL;
@@ -1498,8 +1500,10 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
struct gnet_dump d;
const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
- nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
- tcm = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*tcm), flags);
+ if (!nlh)
+ goto out_nlmsg_trim;
+ tcm = nlmsg_data(nlh);
tcm->tcm_family = AF_UNSPEC;
tcm->tcm__pad1 = 0;
tcm->tcm__pad2 = 0;
@@ -1525,7 +1529,7 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
return skb->len;
-nlmsg_failure:
+out_nlmsg_trim:
nla_put_failure:
nlmsg_trim(skb, b);
return -1;
@@ -1616,7 +1620,7 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
{
- struct tcmsg *tcm = (struct tcmsg *)NLMSG_DATA(cb->nlh);
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
struct net_device *dev;
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a2a95aabf9c2..298c0ddfb57e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -331,29 +331,22 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
return PSCHED_NS2TICKS(ticks);
}
-static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
+static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
{
struct sk_buff_head *list = &sch->q;
psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
- struct sk_buff *skb;
-
- if (likely(skb_queue_len(list) < sch->limit)) {
- skb = skb_peek_tail(list);
- /* Optimize for add at tail */
- if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
- return qdisc_enqueue_tail(nskb, sch);
+ struct sk_buff *skb = skb_peek_tail(list);
- skb_queue_reverse_walk(list, skb) {
- if (tnext >= netem_skb_cb(skb)->time_to_send)
- break;
- }
+ /* Optimize for add at tail */
+ if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
+ return __skb_queue_tail(list, nskb);
- __skb_queue_after(list, skb, nskb);
- sch->qstats.backlog += qdisc_pkt_len(nskb);
- return NET_XMIT_SUCCESS;
+ skb_queue_reverse_walk(list, skb) {
+ if (tnext >= netem_skb_cb(skb)->time_to_send)
+ break;
}
- return qdisc_reshape_fail(nskb, sch);
+ __skb_queue_after(list, skb, nskb);
}
/*
@@ -368,7 +361,6 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* We don't fill cb now as skb_unshare() may invalidate it */
struct netem_skb_cb *cb;
struct sk_buff *skb2;
- int ret;
int count = 1;
/* Random duplication */
@@ -388,7 +380,14 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
- skb_orphan(skb);
+ /* If a delay is expected, orphan the skb. (orphaning usually takes
+ * place at TX completion time, so _before_ the link transit delay)
+ * Ideally, this orphaning should be done after the rate limiting
+ * module, because this breaks TCP Small Queue, and other mechanisms
+ * based on socket sk_wmem_alloc.
+ */
+ if (q->latency || q->jitter)
+ skb_orphan(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
@@ -419,6 +418,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
}
+ if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
+ return qdisc_reshape_fail(skb, sch);
+
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+
cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */
q->counter < q->gap - 1 || /* inside last reordering gap */
@@ -450,7 +454,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cb->time_to_send = now + delay;
++q->counter;
- ret = tfifo_enqueue(skb, sch);
+ tfifo_enqueue(skb, sch);
} else {
/*
* Do re-ordering by putting one out of N packets at the front
@@ -460,16 +464,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->counter = 0;
__skb_queue_head(&sch->q, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
sch->qstats.requeues++;
- ret = NET_XMIT_SUCCESS;
- }
-
- if (ret != NET_XMIT_SUCCESS) {
- if (net_xmit_drop_count(ret)) {
- sch->qstats.drops++;
- return ret;
- }
}
return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 74305c883bd3..30ea4674cabd 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,6 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch->qstats.backlog = q->qdisc->qstats.backlog;
opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
return nla_nest_end(skb, opts);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index ca0c29695d51..474167162947 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -67,7 +67,6 @@ struct teql_master {
struct teql_sched_data {
struct Qdisc *next;
struct teql_master *m;
- struct neighbour *ncache;
struct sk_buff_head q;
};
@@ -134,7 +133,6 @@ teql_reset(struct Qdisc *sch)
skb_queue_purge(&dat->q);
sch->q.qlen = 0;
- teql_neigh_release(xchg(&dat->ncache, NULL));
}
static void
@@ -166,7 +164,6 @@ teql_destroy(struct Qdisc *sch)
}
}
skb_queue_purge(&dat->q);
- teql_neigh_release(xchg(&dat->ncache, NULL));
break;
}
@@ -225,21 +222,25 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
struct net_device *dev, struct netdev_queue *txq,
- struct neighbour *mn)
+ struct dst_entry *dst)
{
- struct teql_sched_data *q = qdisc_priv(txq->qdisc);
- struct neighbour *n = q->ncache;
+ struct neighbour *n;
+ int err = 0;
- if (mn->tbl == NULL)
- return -EINVAL;
- if (n && n->tbl == mn->tbl &&
- memcmp(n->primary_key, mn->primary_key, mn->tbl->key_len) == 0) {
- atomic_inc(&n->refcnt);
- } else {
- n = __neigh_lookup_errno(mn->tbl, mn->primary_key, dev);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ n = dst_neigh_lookup_skb(dst, skb);
+ if (!n)
+ return -ENOENT;
+
+ if (dst->dev != dev) {
+ struct neighbour *mn;
+
+ mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
+ neigh_release(n);
+ if (IS_ERR(mn))
+ return PTR_ERR(mn);
+ n = mn;
}
+
if (neigh_event_send(n, skb_res) == 0) {
int err;
char haddr[MAX_ADDR_LEN];
@@ -248,15 +249,13 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
NULL, skb->len);
- if (err < 0) {
- neigh_release(n);
- return -EINVAL;
- }
- teql_neigh_release(xchg(&q->ncache, n));
- return 0;
+ if (err < 0)
+ err = -EINVAL;
+ } else {
+ err = (skb_res == NULL) ? -EAGAIN : 1;
}
neigh_release(n);
- return (skb_res == NULL) ? -EAGAIN : 1;
+ return err;
}
static inline int teql_resolve(struct sk_buff *skb,
@@ -265,7 +264,6 @@ static inline int teql_resolve(struct sk_buff *skb,
struct netdev_queue *txq)
{
struct dst_entry *dst = skb_dst(skb);
- struct neighbour *mn;
int res;
if (txq->qdisc == &noop_qdisc)
@@ -275,8 +273,7 @@ static inline int teql_resolve(struct sk_buff *skb,
return 0;
rcu_read_lock();
- mn = dst_get_neighbour_noref(dst);
- res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ res = __teql_resolve(skb, skb_res, dev, txq, dst);
rcu_read_unlock();
return res;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5bc9ab161b37..ebaef3ed6065 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -124,6 +124,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
+ asoc->pf_retrans = sctp_pf_retrans;
+
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
@@ -271,6 +273,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->peer.sack_needed = 1;
asoc->peer.sack_cnt = 0;
+ asoc->peer.sack_generation = 1;
/* Assume that the peer will tell us if he recognizes ASCONF
* as part of INIT exchange.
@@ -685,6 +688,9 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
+ /* And the partial failure retrnas threshold */
+ peer->pf_retrans = asoc->pf_retrans;
+
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
*/
@@ -840,6 +846,7 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
struct sctp_ulpevent *event;
struct sockaddr_storage addr;
int spc_state = 0;
+ bool ulp_notify = true;
/* Record the transition on the transport. */
switch (command) {
@@ -853,6 +860,14 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
spc_state = SCTP_ADDR_CONFIRMED;
else
spc_state = SCTP_ADDR_AVAILABLE;
+ /* Don't inform ULP about transition from PF to
+ * active state and set cwnd to 1, see SCTP
+ * Quick failover draft section 5.1, point 5
+ */
+ if (transport->state == SCTP_PF) {
+ ulp_notify = false;
+ transport->cwnd = 1;
+ }
transport->state = SCTP_ACTIVE;
break;
@@ -871,6 +886,11 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
spc_state = SCTP_ADDR_UNREACHABLE;
break;
+ case SCTP_TRANSPORT_PF:
+ transport->state = SCTP_PF;
+ ulp_notify = false;
+ break;
+
default:
return;
}
@@ -878,12 +898,15 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
* user.
*/
- memset(&addr, 0, sizeof(struct sockaddr_storage));
- memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len);
- event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
- 0, spc_state, error, GFP_ATOMIC);
- if (event)
- sctp_ulpq_tail_event(&asoc->ulpq, event);
+ if (ulp_notify) {
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
+ memcpy(&addr, &transport->ipaddr,
+ transport->af_specific->sockaddr_len);
+ event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
+ 0, spc_state, error, GFP_ATOMIC);
+ if (event)
+ sctp_ulpq_tail_event(&asoc->ulpq, event);
+ }
/* Select new active and retran paths. */
@@ -899,7 +922,8 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
transports) {
if ((t->state == SCTP_INACTIVE) ||
- (t->state == SCTP_UNCONFIRMED))
+ (t->state == SCTP_UNCONFIRMED) ||
+ (t->state == SCTP_PF))
continue;
if (!first || t->last_time_heard > first->last_time_heard) {
second = first;
@@ -1359,7 +1383,7 @@ struct sctp_transport *sctp_assoc_choose_alter_transport(
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
-void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
+void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
{
struct sctp_transport *t;
__u32 pmtu = 0;
@@ -1371,7 +1395,7 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->pmtu_pending && t->dst) {
- sctp_transport_update_pmtu(t, dst_mtu(t->dst));
+ sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80564fe03024..e64d5210ed13 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -408,10 +408,10 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
if (t->param_flags & SPP_PMTUD_ENABLE) {
/* Update transports view of the MTU */
- sctp_transport_update_pmtu(t, pmtu);
+ sctp_transport_update_pmtu(sk, t, pmtu);
/* Update association pmtu. */
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
}
/* Retransmit with the new pmtu setting.
@@ -423,6 +423,18 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD);
}
+void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst;
+
+ if (!t)
+ return;
+ dst = sctp_transport_dst_check(t);
+ if (dst)
+ dst->ops->redirect(dst, sk, skb);
+}
+
/*
* SCTP Implementer's Guide, 2.37 ICMP handling procedures
*
@@ -628,6 +640,10 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
err = EHOSTUNREACH;
break;
+ case ICMP_REDIRECT:
+ sctp_icmp_redirect(sk, transport, skb);
+ err = 0;
+ break;
default:
goto out_unlock;
}
@@ -736,15 +752,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
- if (hlist_unhashed(&epb->node))
- return;
-
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- __hlist_del(&epb->node);
+ hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -825,7 +838,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- __hlist_del(&epb->node);
+ hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 91f479121c55..ed7139ea7978 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -185,6 +185,9 @@ SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out_unlock;
}
break;
+ case NDISC_REDIRECT:
+ sctp_icmp_redirect(sk, transport, skb);
+ break;
default:
break;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index f1b7d4bb591e..838e18b4d7ea 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -64,6 +64,8 @@
#include <net/sctp/checksum.h>
/* Forward declarations for private helpers. */
+static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
struct sctp_chunk *chunk);
static void sctp_packet_append_data(struct sctp_packet *packet,
@@ -224,7 +226,10 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
if (!auth)
return retval;
- retval = sctp_packet_append_chunk(pkt, auth);
+ retval = __sctp_packet_append_chunk(pkt, auth);
+
+ if (retval != SCTP_XMIT_OK)
+ sctp_chunk_free(auth);
return retval;
}
@@ -248,51 +253,39 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
/* If the SACK timer is running, we have a pending SACK */
if (timer_pending(timer)) {
struct sctp_chunk *sack;
+
+ if (pkt->transport->sack_generation !=
+ pkt->transport->asoc->peer.sack_generation)
+ return retval;
+
asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (sack) {
- retval = sctp_packet_append_chunk(pkt, sack);
+ retval = __sctp_packet_append_chunk(pkt, sack);
+ if (retval != SCTP_XMIT_OK) {
+ sctp_chunk_free(sack);
+ goto out;
+ }
asoc->peer.sack_needed = 0;
if (del_timer(timer))
sctp_association_put(asoc);
}
}
}
+out:
return retval;
}
+
/* Append a chunk to the offered packet reporting back any inability to do
* so.
*/
-sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
- struct sctp_chunk *chunk)
+static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
__u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
- SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
- chunk);
-
- /* Data chunks are special. Before seeing what else we can
- * bundle into this packet, check to see if we are allowed to
- * send this DATA.
- */
- if (sctp_chunk_is_data(chunk)) {
- retval = sctp_packet_can_append_data(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
- }
-
- /* Try to bundle AUTH chunk */
- retval = sctp_packet_bundle_auth(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
-
- /* Try to bundle SACK chunk */
- retval = sctp_packet_bundle_sack(packet, chunk);
- if (retval != SCTP_XMIT_OK)
- goto finish;
-
/* Check to see if this chunk will fit into the packet */
retval = sctp_packet_will_fit(packet, chunk, chunk_len);
if (retval != SCTP_XMIT_OK)
@@ -334,6 +327,43 @@ finish:
return retval;
}
+/* Append a chunk to the offered packet reporting back any inability to do
+ * so.
+ */
+sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ sctp_xmit_t retval = SCTP_XMIT_OK;
+
+ SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
+ chunk);
+
+ /* Data chunks are special. Before seeing what else we can
+ * bundle into this packet, check to see if we are allowed to
+ * send this DATA.
+ */
+ if (sctp_chunk_is_data(chunk)) {
+ retval = sctp_packet_can_append_data(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+ }
+
+ /* Try to bundle AUTH chunk */
+ retval = sctp_packet_bundle_auth(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ /* Try to bundle SACK chunk */
+ retval = sctp_packet_bundle_sack(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+
+ retval = __sctp_packet_append_chunk(packet, chunk);
+
+finish:
+ return retval;
+}
+
/* All packets are sent to the network through this function from
* sctp_outq_tail().
*
@@ -380,7 +410,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sk, asoc);
}
}
dst = dst_clone(tp->dst);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index a0fa19f5650c..e7aa177c9522 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -792,7 +792,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
if (!new_transport)
new_transport = asoc->peer.active_path;
} else if ((new_transport->state == SCTP_INACTIVE) ||
- (new_transport->state == SCTP_UNCONFIRMED)) {
+ (new_transport->state == SCTP_UNCONFIRMED) ||
+ (new_transport->state == SCTP_PF)) {
/* If the chunk is Heartbeat or Heartbeat Ack,
* send it to chunk->transport, even if it's
* inactive.
@@ -987,7 +988,8 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
new_transport = chunk->transport;
if (!new_transport ||
((new_transport->state == SCTP_INACTIVE) ||
- (new_transport->state == SCTP_UNCONFIRMED)))
+ (new_transport->state == SCTP_UNCONFIRMED) ||
+ (new_transport->state == SCTP_PF)))
new_transport = asoc->peer.active_path;
if (new_transport->state == SCTP_UNCONFIRMED)
continue;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 9c90811d1134..1f89c4e69645 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -568,7 +568,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk,
/* What interface did this skb arrive on? */
static int sctp_v4_skb_iif(const struct sk_buff *skb)
{
- return skb_rtable(skb)->rt_iif;
+ return inet_iif(skb);
}
/* Was this packet marked by Explicit Congestion Notification? */
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a85eeeb55dd0..479a70ef6ff8 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -132,7 +132,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __be16 cause_code,
* abort chunk. Differs from sctp_init_cause in that it won't oops
* if there isn't enough space in the op error chunk
*/
-int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
+static int sctp_init_cause_fixed(struct sctp_chunk *chunk, __be16 cause_code,
size_t paylen)
{
sctp_errhdr_t err;
@@ -734,8 +734,10 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
int len;
__u32 ctsn;
__u16 num_gabs, num_dup_tsns;
+ struct sctp_association *aptr = (struct sctp_association *)asoc;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sctp_gap_ack_block gabs[SCTP_MAX_GABS];
+ struct sctp_transport *trans;
memset(gabs, 0, sizeof(gabs));
ctsn = sctp_tsnmap_get_ctsn(map);
@@ -805,6 +807,20 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns,
sctp_tsnmap_get_dups(map));
+ /* Once we have a sack generated, check to see what our sack
+ * generation is, if its 0, reset the transports to 0, and reset
+ * the association generation to 1
+ *
+ * The idea is that zero is never used as a valid generation for the
+ * association so no transport will match after a wrap event like this,
+ * Until the next sack
+ */
+ if (++aptr->peer.sack_generation == 0) {
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports)
+ trans->sack_generation = 0;
+ aptr->peer.sack_generation = 1;
+ }
nodata:
return retval;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c96d1a81cf42..fe99628e1257 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -76,6 +76,8 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
sctp_cmd_seq_t *commands,
gfp_t gfp);
+static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds,
+ struct sctp_transport *t);
/********************************************************************
* Helper functions
********************************************************************/
@@ -470,7 +472,8 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
* notification SHOULD be sent to the upper layer.
*
*/
-static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
+static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
+ struct sctp_association *asoc,
struct sctp_transport *transport,
int is_hb)
{
@@ -495,6 +498,23 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
transport->error_count++;
}
+ /* If the transport error count is greater than the pf_retrans
+ * threshold, and less than pathmaxrtx, then mark this transport
+ * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
+ * point 1
+ */
+ if ((transport->state != SCTP_PF) &&
+ (asoc->pf_retrans < transport->pathmaxrxt) &&
+ (transport->error_count > asoc->pf_retrans)) {
+
+ sctp_assoc_control_transport(asoc, transport,
+ SCTP_TRANSPORT_PF,
+ 0);
+
+ /* Update the hb timer to resend a heartbeat every rto */
+ sctp_cmd_hb_timer_update(commands, transport);
+ }
+
if (transport->state != SCTP_INACTIVE &&
(transport->error_count > transport->pathmaxrxt)) {
SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
@@ -699,6 +719,10 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
SCTP_HEARTBEAT_SUCCESS);
}
+ if (t->state == SCTP_PF)
+ sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
+ SCTP_HEARTBEAT_SUCCESS);
+
/* The receiver of the HEARTBEAT ACK should also perform an
* RTT measurement for that destination transport address
* using the time value carried in the HEARTBEAT ACK chunk.
@@ -1268,7 +1292,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_REPORT_TSN:
/* Record the arrival of a TSN. */
error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
- cmd->obj.u32);
+ cmd->obj.u32, NULL);
break;
case SCTP_CMD_REPORT_FWDTSN:
@@ -1565,8 +1589,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_STRIKE:
/* Mark one strike against a transport. */
- sctp_do_8_2_transport_strike(asoc, cmd->obj.transport,
- 0);
+ sctp_do_8_2_transport_strike(commands, asoc,
+ cmd->obj.transport, 0);
break;
case SCTP_CMD_TRANSPORT_IDLE:
@@ -1576,7 +1600,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_TRANSPORT_HB_SENT:
t = cmd->obj.transport;
- sctp_do_8_2_transport_strike(asoc, t, 1);
+ sctp_do_8_2_transport_strike(commands, asoc,
+ t, 1);
t->hb_sent = 1;
break;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b3b8a8d813eb..5e259817a7f3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1231,8 +1231,14 @@ out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
- if (asoc)
+ if (asoc) {
+ /* sctp_primitive_ASSOCIATE may have added this association
+ * To the hash table, try to unhash it, just in case, its a noop
+ * if it wasn't hashed so we're safe
+ */
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
return err;
}
@@ -1853,7 +1859,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
}
if (asoc->pmtu_pending)
- sctp_assoc_pending_pmtu(asoc);
+ sctp_assoc_pending_pmtu(sk, asoc);
/* If fragmentation is disabled and the message length exceeds the
* association fragmentation point, return EMSGSIZE. The I-D
@@ -1942,8 +1948,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
out_free:
- if (new_asoc)
+ if (new_asoc) {
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
out_unlock:
sctp_release_sock(sk);
@@ -2365,7 +2373,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
if ((params->spp_flags & SPP_PMTUD_DISABLE) && params->spp_pathmtu) {
if (trans) {
trans->pathmtu = params->spp_pathmtu;
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
sctp_frag_point(asoc, params->spp_pathmtu);
@@ -2382,7 +2390,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
(trans->param_flags & ~SPP_PMTUD) | pmtud_change;
if (update) {
sctp_transport_pmtu(trans, sctp_opt2sk(sp));
- sctp_assoc_sync_pmtu(asoc);
+ sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
}
} else if (asoc) {
asoc->param_flags =
@@ -3470,6 +3478,56 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
}
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to alter the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (optlen < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval,
+ sizeof(struct sctp_paddrthlds)))
+ return -EFAULT;
+
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+ list_for_each_entry(trans, &asoc->peer.transport_addr_list,
+ transports) {
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ if (val.spt_pathmaxrxt)
+ asoc->pathmaxrxt = val.spt_pathmaxrxt;
+ asoc->pf_retrans = val.spt_pathpfthld;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ if (val.spt_pathmaxrxt)
+ trans->pathmaxrxt = val.spt_pathmaxrxt;
+ trans->pf_retrans = val.spt_pathpfthld;
+ }
+
+ return 0;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3619,6 +3677,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTO_ASCONF:
retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -5490,6 +5551,51 @@ static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
return 0;
}
+/*
+ * SCTP_PEER_ADDR_THLDS
+ *
+ * This option allows us to fetch the partially failed threshold for one or all
+ * transports in an association. See Section 6.1 of:
+ * http://www.ietf.org/id/draft-nishida-tsvwg-sctp-failover-05.txt
+ */
+static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
+ char __user *optval,
+ int len,
+ int __user *optlen)
+{
+ struct sctp_paddrthlds val;
+ struct sctp_transport *trans;
+ struct sctp_association *asoc;
+
+ if (len < sizeof(struct sctp_paddrthlds))
+ return -EINVAL;
+ len = sizeof(struct sctp_paddrthlds);
+ if (copy_from_user(&val, (struct sctp_paddrthlds __user *)optval, len))
+ return -EFAULT;
+
+ if (sctp_is_any(sk, (const union sctp_addr *)&val.spt_address)) {
+ asoc = sctp_id2assoc(sk, val.spt_assoc_id);
+ if (!asoc)
+ return -ENOENT;
+
+ val.spt_pathpfthld = asoc->pf_retrans;
+ val.spt_pathmaxrxt = asoc->pathmaxrxt;
+ } else {
+ trans = sctp_addr_id2transport(sk, &val.spt_address,
+ val.spt_assoc_id);
+ if (!trans)
+ return -ENOENT;
+
+ val.spt_pathmaxrxt = trans->pathmaxrxt;
+ val.spt_pathpfthld = trans->pf_retrans;
+ }
+
+ if (put_user(len, optlen) || copy_to_user(optval, &val, len))
+ return -EFAULT;
+
+ return 0;
+}
+
SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -5628,6 +5734,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
case SCTP_AUTO_ASCONF:
retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
break;
+ case SCTP_PEER_ADDR_THLDS:
+ retval = sctp_getsockopt_paddr_thresholds(sk, optval, len, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index e5fe639c89e7..2b2bfe933ff1 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -141,6 +141,15 @@ static ctl_table sctp_table[] = {
.extra2 = &int_max
},
{
+ .procname = "pf_retrans",
+ .data = &sctp_pf_retrans,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &int_max
+ },
+ {
.procname = "max_init_retransmits",
.data = &sctp_max_retrans_init,
.maxlen = sizeof(int),
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index b026ba0c6992..c97472b248a2 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -68,6 +68,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
memset(&peer->saddr, 0, sizeof(union sctp_addr));
+ peer->sack_generation = 0;
+
/* From 6.3.1 RTO Calculation:
*
* C1) Until an RTT measurement has been made for a packet sent to the
@@ -85,6 +87,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
/* Initialize the default path max_retrans. */
peer->pathmaxrxt = sctp_max_retrans_path;
+ peer->pf_retrans = sctp_pf_retrans;
INIT_LIST_HEAD(&peer->transmitted);
INIT_LIST_HEAD(&peer->send_ready);
@@ -214,7 +217,7 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
{
/* If we don't have a fresh route, look one up */
- if (!transport->dst || transport->dst->obsolete > 1) {
+ if (!transport->dst || transport->dst->obsolete) {
dst_release(transport->dst);
transport->af_specific->get_dst(transport, &transport->saddr,
&transport->fl, sk);
@@ -226,7 +229,7 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
-void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
+void sctp_transport_update_pmtu(struct sock *sk, struct sctp_transport *t, u32 pmtu)
{
struct dst_entry *dst;
@@ -243,8 +246,16 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
}
dst = sctp_transport_dst_check(t);
- if (dst)
- dst->ops->update_pmtu(dst, pmtu);
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+
+ if (dst) {
+ dst->ops->update_pmtu(dst, sk, NULL, pmtu);
+
+ dst = sctp_transport_dst_check(t);
+ if (!dst)
+ t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
+ }
}
/* Caches the dst entry and source address for a transport's destination
@@ -585,7 +596,8 @@ unsigned long sctp_transport_timeout(struct sctp_transport *t)
{
unsigned long timeout;
timeout = t->rto + sctp_jitter(t->rto);
- if (t->state != SCTP_UNCONFIRMED)
+ if ((t->state != SCTP_UNCONFIRMED) &&
+ (t->state != SCTP_PF))
timeout += t->hbinterval;
timeout += jiffies;
return timeout;
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index f1e40cebc981..b5fb7c409023 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -114,7 +114,8 @@ int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn)
/* Mark this TSN as seen. */
-int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
+int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn,
+ struct sctp_transport *trans)
{
u16 gap;
@@ -133,6 +134,9 @@ int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn)
*/
map->max_tsn_seen++;
map->cumulative_tsn_ack_point++;
+ if (trans)
+ trans->sack_generation =
+ trans->asoc->peer.sack_generation;
map->base_tsn++;
} else {
/* Either we already have a gap, or about to record a gap, so
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 8a84017834c2..33d894776192 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -715,7 +715,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
* can mark it as received so the tsn_map is updated correctly.
*/
if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
- ntohl(chunk->subh.data_hdr->tsn)))
+ ntohl(chunk->subh.data_hdr->tsn),
+ chunk->transport))
goto fail_mark;
/* First calculate the padding, so we don't inadvertently
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index f2d1de7f2ffb..f5a6a4f4faf7 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -1051,7 +1051,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
if (chunk && (freed >= needed)) {
__u32 tsn;
tsn = ntohl(chunk->subh.data_hdr->tsn);
- sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
+ sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
sctp_ulpq_tail_data(ulpq, chunk, gfp);
sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
diff --git a/net/socket.c b/net/socket.c
index 6e0ccc09b313..dfe5b66c97e0 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -398,7 +398,7 @@ int sock_map_fd(struct socket *sock, int flags)
}
EXPORT_SYMBOL(sock_map_fd);
-static struct socket *sock_from_file(struct file *file, int *err)
+struct socket *sock_from_file(struct file *file, int *err)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
@@ -406,6 +406,7 @@ static struct socket *sock_from_file(struct file *file, int *err)
*err = -ENOTSOCK;
return NULL;
}
+EXPORT_SYMBOL(sock_from_file);
/**
* sockfd_lookup - Go from a file number to its socket slot
@@ -522,6 +523,9 @@ void sock_release(struct socket *sock)
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
printk(KERN_ERR "sock_release: fasync list not empty!\n");
+ if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
+ return;
+
this_cpu_sub(sockets_in_use, 1);
if (!sock->file) {
iput(SOCK_INODE(sock));
@@ -551,8 +555,6 @@ static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
sock_update_classid(sock->sk);
- sock_update_netprioidx(sock->sk);
-
si->sock = sock;
si->scm = NULL;
si->msg = msg;
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 31def68a0f6e..5a3d675d2f2f 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -176,13 +176,14 @@ out_free:
}
EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
-/*
- * Destroys the backchannel preallocated structures.
+/**
+ * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
+ * @xprt: the transport holding the preallocated strucures
+ * @max_reqs the maximum number of preallocated structures to destroy
+ *
* Since these structures may have been allocated by multiple calls
* to xprt_setup_backchannel, we only destroy up to the maximum number
* of reqs specified by the caller.
- * @xprt: the transport holding the preallocated strucures
- * @max_reqs the maximum number of preallocated structures to destroy
*/
void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
{
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f56f045778ae..00eb859b7de5 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -385,7 +385,7 @@ out_no_rpciod:
return ERR_PTR(err);
}
-/*
+/**
* rpc_create - create an RPC client and transport with one call
* @args: rpc_clnt create argument structure
*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2777fa896645..4d0129203733 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -104,23 +104,9 @@ static void ip_map_put(struct kref *kref)
kfree(im);
}
-#if IP_HASHBITS == 8
-/* hash_long on a 64 bit machine is currently REALLY BAD for
- * IP addresses in reverse-endian (i.e. on a little-endian machine).
- * So use a trivial but reliable hash instead
- */
-static inline int hash_ip(__be32 ip)
-{
- int hash = (__force u32)ip ^ ((__force u32)ip>>16);
- return (hash ^ (hash>>8)) & 0xff;
-}
-#endif
-static inline int hash_ip6(struct in6_addr ip)
+static inline int hash_ip6(const struct in6_addr *ip)
{
- return (hash_ip(ip.s6_addr32[0]) ^
- hash_ip(ip.s6_addr32[1]) ^
- hash_ip(ip.s6_addr32[2]) ^
- hash_ip(ip.s6_addr32[3]));
+ return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
}
static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
{
@@ -301,7 +287,7 @@ static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
ip.m_addr = *addr;
ch = sunrpc_cache_lookup(cd, &ip.h,
hash_str(class, IP_HASHBITS) ^
- hash_ip6(*addr));
+ hash_ip6(addr));
if (ch)
return container_of(ch, struct ip_map, h);
@@ -331,7 +317,7 @@ static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
ip.h.expiry_time = expiry;
ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
hash_str(ipm->m_class, IP_HASHBITS) ^
- hash_ip6(ipm->m_addr));
+ hash_ip6(&ipm->m_addr));
if (!ch)
return -ENOMEM;
cache_put(ch, cd);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a6de09de5d21..18bc130255a7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -43,6 +43,7 @@
#include <net/tcp_states.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
+#include <trace/events/skb.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/clnt.h>
@@ -619,6 +620,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (!svc_udp_get_dest_address(rqstp, cmh)) {
net_warn_ratelimited("svc: received unknown control message %d/%d; dropping RPC reply datagram\n",
cmh->cmsg_level, cmh->cmsg_type);
+out_free:
+ trace_kfree_skb(skb, svc_udp_recvfrom);
skb_free_datagram_locked(svsk->sk_sk, skb);
return 0;
}
@@ -630,8 +633,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) {
local_bh_enable();
/* checksum error */
- skb_free_datagram_locked(svsk->sk_sk, skb);
- return 0;
+ goto out_free;
}
local_bh_enable();
skb_free_datagram_locked(svsk->sk_sk, skb);
@@ -640,10 +642,8 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.head[0].iov_base = skb->data +
sizeof(struct udphdr);
rqstp->rq_arg.head[0].iov_len = len;
- if (skb_checksum_complete(skb)) {
- skb_free_datagram_locked(svsk->sk_sk, skb);
- return 0;
- }
+ if (skb_checksum_complete(skb))
+ goto out_free;
rqstp->rq_xprt_ctxt = skb;
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index fddcccfcdf76..0cf165580d8d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -180,7 +180,9 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages);
/*
* Helper routines for doing 'memmove' like operations on a struct xdr_buf
- *
+ */
+
+/**
* _shift_data_right_pages
* @pages: vector of pages containing both the source and dest memory area.
* @pgto_base: page vector address of destination
@@ -242,7 +244,7 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
} while ((len -= copy) != 0);
}
-/*
+/**
* _copy_to_pages
* @pages: array of pages
* @pgbase: page vector address of destination
@@ -286,7 +288,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
flush_dcache_page(*pgto);
}
-/*
+/**
* _copy_from_pages
* @p: pointer to destination
* @pages: array of pages
@@ -326,7 +328,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
}
EXPORT_SYMBOL_GPL(_copy_from_pages);
-/*
+/**
* xdr_shrink_bufhead
* @buf: xdr_buf
* @len: bytes to remove from buf->head[0]
@@ -399,7 +401,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
buf->len = buf->buflen;
}
-/*
+/**
* xdr_shrink_pagelen
* @buf: xdr_buf
* @len: bytes to remove from buf->pages
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c83035cdaa9..a5a402a7d21f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -531,7 +531,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_set_retrans_timeout_def);
-/*
+/**
* xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
* @task: task whose timeout is to be set
*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 890b03f8d877..62d0dac8f780 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1014,9 +1014,6 @@ static void xs_udp_data_ready(struct sock *sk, int len)
UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
- /* Something worked... */
- dst_confirm(skb_dst(skb));
-
xprt_adjust_cwnd(task, copied);
xprt_complete_rqst(task, copied);
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 2c5954b85933..585460180ffb 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -41,29 +41,4 @@ config TIPC_PORTS
Setting this to a smaller value saves some memory,
setting it to higher allows for more ports.
-config TIPC_LOG
- int "Size of log buffer"
- depends on TIPC_ADVANCED
- range 0 32768
- default "0"
- help
- Size (in bytes) of TIPC's internal log buffer, which records the
- occurrence of significant events. Can range from 0 to 32768 bytes;
- default is 0.
-
- There is no need to enable the log buffer unless the node will be
- managed remotely via TIPC.
-
-config TIPC_DEBUG
- bool "Enable debugging support"
- default n
- help
- Saying Y here enables TIPC debugging capabilities used by developers.
- Most users do not need to bother; if unsure, just say N.
-
- Enabling debugging support causes TIPC to display data about its
- internal state when certain abnormal conditions occur. It also
- makes it easy for developers to capture additional information of
- interest using the dbg() or msg_dbg() macros.
-
endif # TIPC
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 2625f5ebe3e8..e4e6d8cd47e6 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -162,7 +162,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
}
-/*
+/**
* tipc_bclink_retransmit_to - get most recent node to request retransmission
*
* Called with bc_lock locked
@@ -270,7 +270,7 @@ exit:
spin_unlock_bh(&bc_lock);
}
-/*
+/**
* tipc_bclink_update_link_state - update broadcast link state
*
* tipc_net_lock and node lock set
@@ -330,7 +330,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
}
}
-/*
+/**
* bclink_peek_nack - monitor retransmission requests sent by other nodes
*
* Delay any upcoming NACK by this node if another node has already
@@ -381,7 +381,7 @@ exit:
return res;
}
-/*
+/**
* bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
*
* Called with both sending node's lock and bc_lock taken.
@@ -406,7 +406,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
}
}
-/*
+/**
* tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
*
* tipc_net_lock is read_locked, no other locks set
@@ -701,48 +701,43 @@ void tipc_bcbearer_sort(void)
int tipc_bclink_stats(char *buf, const u32 buf_size)
{
- struct print_buf pb;
+ int ret;
+ struct tipc_stats *s;
if (!bcl)
return 0;
- tipc_printbuf_init(&pb, buf, buf_size);
-
spin_lock_bh(&bc_lock);
- tipc_printf(&pb, "Link <%s>\n"
- " Window:%u packets\n",
- bcl->name, bcl->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.recv_info,
- bcl->stats.recv_fragments,
- bcl->stats.recv_fragmented,
- bcl->stats.recv_bundles,
- bcl->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- bcl->stats.sent_info,
- bcl->stats.sent_fragments,
- bcl->stats.sent_fragmented,
- bcl->stats.sent_bundles,
- bcl->stats.sent_bundled);
- tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
- bcl->stats.recv_nacks,
- bcl->stats.deferred_recv,
- bcl->stats.duplicates);
- tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
- bcl->stats.sent_nacks,
- bcl->stats.sent_acks,
- bcl->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- bcl->stats.bearer_congs,
- bcl->stats.link_congs,
- bcl->stats.max_queue_sz,
- bcl->stats.queue_sz_counts
- ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
- : 0);
+ s = &bcl->stats;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " Window:%u packets\n",
+ bcl->name, bcl->queue_limit[0]);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX naks:%u defs:%u dups:%u\n",
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX naks:%u acks:%u dups:%u\n",
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
+ s->bearer_congs, s->link_congs, s->max_queue_sz,
+ s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
spin_unlock_bh(&bc_lock);
- return tipc_printbuf_validate(&pb);
+ return ret;
}
int tipc_bclink_reset_stats(void)
@@ -880,7 +875,7 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
if (!item->next) {
item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
if (!item->next) {
- warn("Incomplete multicast delivery, no memory\n");
+ pr_warn("Incomplete multicast delivery, no memory\n");
return;
}
item->next->next = NULL;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a297e3a2e3e7..09e71241265d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -123,28 +123,30 @@ int tipc_register_media(struct tipc_media *m_ptr)
exit:
write_unlock_bh(&tipc_net_lock);
if (res)
- warn("Media <%s> registration error\n", m_ptr->name);
+ pr_warn("Media <%s> registration error\n", m_ptr->name);
return res;
}
/**
* tipc_media_addr_printf - record media address in print buffer
*/
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
{
char addr_str[MAX_ADDR_STR];
struct tipc_media *m_ptr;
+ int ret;
m_ptr = media_find_id(a->media_id);
if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
- tipc_printf(pb, "%s(%s)", m_ptr->name, addr_str);
+ ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
else {
u32 i;
- tipc_printf(pb, "UNKNOWN(%u)", a->media_id);
+ ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id);
for (i = 0; i < sizeof(a->value); i++)
- tipc_printf(pb, "-%02x", a->value[i]);
+ ret += tipc_snprintf(buf - ret, len + ret,
+ "-%02x", a->value[i]);
}
}
@@ -172,8 +174,8 @@ struct sk_buff *tipc_media_get_names(void)
/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
- * @name - ptr to bearer name string
- * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * @name: ptr to bearer name string
+ * @name_parts: ptr to area for bearer name components (or NULL if not needed)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
@@ -418,12 +420,12 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
int res = -EINVAL;
if (!tipc_own_addr) {
- warn("Bearer <%s> rejected, not supported in standalone mode\n",
- name);
+ pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
+ name);
return -ENOPROTOOPT;
}
if (!bearer_name_validate(name, &b_names)) {
- warn("Bearer <%s> rejected, illegal name\n", name);
+ pr_warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (tipc_addr_domain_valid(disc_domain) &&
@@ -435,12 +437,13 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
res = 0; /* accept specified node in own cluster */
}
if (res) {
- warn("Bearer <%s> rejected, illegal discovery domain\n", name);
+ pr_warn("Bearer <%s> rejected, illegal discovery domain\n",
+ name);
return -EINVAL;
}
if ((priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI)) {
- warn("Bearer <%s> rejected, illegal priority\n", name);
+ pr_warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
@@ -448,8 +451,8 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
m_ptr = tipc_media_find(b_names.media_name);
if (!m_ptr) {
- warn("Bearer <%s> rejected, media <%s> not registered\n", name,
- b_names.media_name);
+ pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
+ name, b_names.media_name);
goto exit;
}
@@ -465,24 +468,25 @@ restart:
continue;
}
if (!strcmp(name, tipc_bearers[i].name)) {
- warn("Bearer <%s> rejected, already enabled\n", name);
+ pr_warn("Bearer <%s> rejected, already enabled\n",
+ name);
goto exit;
}
if ((tipc_bearers[i].priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
- warn("Bearer <%s> rejected, duplicate priority\n",
- name);
+ pr_warn("Bearer <%s> rejected, duplicate priority\n",
+ name);
goto exit;
}
- warn("Bearer <%s> priority adjustment required %u->%u\n",
- name, priority + 1, priority);
+ pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
+ name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
- warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
- name, MAX_BEARERS);
+ pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
+ name, MAX_BEARERS);
goto exit;
}
@@ -490,7 +494,8 @@ restart:
strcpy(b_ptr->name, name);
res = m_ptr->enable_bearer(b_ptr);
if (res) {
- warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
+ pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
+ name, -res);
goto exit;
}
@@ -508,20 +513,20 @@ restart:
res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
if (res) {
bearer_disable(b_ptr);
- warn("Bearer <%s> rejected, discovery object creation failed\n",
- name);
+ pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
+ name);
goto exit;
}
- info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
- name, tipc_addr_string_fill(addr_string, disc_domain), priority);
+ pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
+ name,
+ tipc_addr_string_fill(addr_string, disc_domain), priority);
exit:
write_unlock_bh(&tipc_net_lock);
return res;
}
/**
- * tipc_block_bearer(): Block the bearer with the given name,
- * and reset all its links
+ * tipc_block_bearer - Block the bearer with the given name, and reset all its links
*/
int tipc_block_bearer(const char *name)
{
@@ -532,12 +537,12 @@ int tipc_block_bearer(const char *name)
read_lock_bh(&tipc_net_lock);
b_ptr = tipc_bearer_find(name);
if (!b_ptr) {
- warn("Attempt to block unknown bearer <%s>\n", name);
+ pr_warn("Attempt to block unknown bearer <%s>\n", name);
read_unlock_bh(&tipc_net_lock);
return -EINVAL;
}
- info("Blocking bearer <%s>\n", name);
+ pr_info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
list_splice_init(&b_ptr->cong_links, &b_ptr->links);
@@ -563,7 +568,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
- info("Disabling bearer <%s>\n", b_ptr->name);
+ pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
b_ptr->media->disable_bearer(b_ptr);
@@ -585,7 +590,7 @@ int tipc_disable_bearer(const char *name)
write_lock_bh(&tipc_net_lock);
b_ptr = tipc_bearer_find(name);
if (b_ptr == NULL) {
- warn("Attempt to disable unknown bearer <%s>\n", name);
+ pr_warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
bearer_disable(b_ptr);
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index e3b2be37fb31..dd4c2abf08e7 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -57,7 +57,7 @@
*/
#define TIPC_MEDIA_TYPE_ETH 1
-/*
+/**
* struct tipc_media_addr - destination address used by TIPC bearers
* @value: address info (format defined by media)
* @media_id: TIPC media type identifier
@@ -179,7 +179,7 @@ void tipc_eth_media_stop(void);
int tipc_media_set_priority(const char *name, u32 new_value);
int tipc_media_set_window(const char *name, u32 new_value);
-void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
struct sk_buff *tipc_media_get_names(void);
struct sk_buff *tipc_bearer_get_names(void);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c5712a343810..a056a3852f71 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -39,6 +39,8 @@
#include "name_table.h"
#include "config.h"
+#define REPLY_TRUNCATED "<truncated>\n"
+
static u32 config_port_ref;
static DEFINE_SPINLOCK(config_lock);
@@ -104,13 +106,12 @@ struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
return buf;
}
-#define MAX_STATS_INFO 2000
-
static struct sk_buff *tipc_show_stats(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
+ char *pb;
+ int pb_len;
int str_len;
u32 value;
@@ -121,17 +122,16 @@ static struct sk_buff *tipc_show_stats(void)
if (value != 0)
return tipc_cfg_reply_error_string("unsupported argument");
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (buf == NULL)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO);
-
- tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- /* Use additional tipc_printf()'s to return more info ... */
- str_len = tipc_printbuf_validate(&pb);
+ str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -334,12 +334,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_SHOW_PORTS:
rep_tlv_buf = tipc_port_get_ports();
break;
- case TIPC_CMD_SET_LOG_SIZE:
- rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space);
- break;
- case TIPC_CMD_DUMP_LOG:
- rep_tlv_buf = tipc_log_dump();
- break;
case TIPC_CMD_SHOW_STATS:
rep_tlv_buf = tipc_show_stats();
break;
@@ -399,6 +393,8 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
case TIPC_CMD_GET_MAX_CLUSTERS:
case TIPC_CMD_SET_MAX_NODES:
case TIPC_CMD_GET_MAX_NODES:
+ case TIPC_CMD_SET_LOG_SIZE:
+ case TIPC_CMD_DUMP_LOG:
rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (obsolete command)");
break;
@@ -408,6 +404,15 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
break;
}
+ WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
+
+ /* Append an error message if we cannot return all requested data */
+ if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
+ if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
+ sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
+ sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
+ }
+
/* Return reply buffer */
exit:
spin_unlock_bh(&config_lock);
@@ -432,7 +437,7 @@ static void cfg_named_msg_event(void *userdata,
if ((size < sizeof(*req_hdr)) ||
(size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
(ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
- warn("Invalid configuration message discarded\n");
+ pr_warn("Invalid configuration message discarded\n");
return;
}
@@ -478,7 +483,7 @@ int tipc_cfg_init(void)
return 0;
failed:
- err("Unable to create configuration service\n");
+ pr_err("Unable to create configuration service\n");
return res;
}
@@ -494,7 +499,7 @@ void tipc_cfg_reinit(void)
seq.lower = seq.upper = tipc_own_addr;
res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
if (res)
- err("Unable to reinitialize configuration service\n");
+ pr_err("Unable to reinitialize configuration service\n");
}
void tipc_cfg_stop(void)
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f7b95239ebda..6586eac6a50e 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,22 +34,18 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-
#include "core.h"
#include "ref.h"
#include "name_table.h"
#include "subscr.h"
#include "config.h"
+#include <linux/module.h>
#ifndef CONFIG_TIPC_PORTS
#define CONFIG_TIPC_PORTS 8191
#endif
-#ifndef CONFIG_TIPC_LOG
-#define CONFIG_TIPC_LOG 0
-#endif
/* global variables used by multiple sub-systems within TIPC */
int tipc_random;
@@ -125,7 +121,6 @@ static void tipc_core_stop(void)
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
- tipc_log_resize(0);
}
/**
@@ -161,10 +156,7 @@ static int __init tipc_init(void)
{
int res;
- if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
- warn("Unable to create log buffer\n");
-
- info("Activated (version " TIPC_MOD_VER ")\n");
+ pr_info("Activated (version " TIPC_MOD_VER ")\n");
tipc_own_addr = 0;
tipc_remote_management = 1;
@@ -175,9 +167,9 @@ static int __init tipc_init(void)
res = tipc_core_start();
if (res)
- err("Unable to start in single node mode\n");
+ pr_err("Unable to start in single node mode\n");
else
- info("Started in single node mode\n");
+ pr_info("Started in single node mode\n");
return res;
}
@@ -185,7 +177,7 @@ static void __exit tipc_exit(void)
{
tipc_core_stop_net();
tipc_core_stop();
- info("Deactivated\n");
+ pr_info("Deactivated\n");
}
module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 2a9bb99537b3..fd42e106c185 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,6 +37,8 @@
#ifndef _TIPC_CORE_H
#define _TIPC_CORE_H
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/tipc.h>
#include <linux/tipc_config.h>
#include <linux/types.h>
@@ -58,68 +60,11 @@
#define TIPC_MOD_VER "2.0.0"
-struct tipc_msg; /* msg.h */
-struct print_buf; /* log.h */
-
-/*
- * TIPC system monitoring code
- */
-
-/*
- * TIPC's print buffer subsystem supports the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- * &buf : user-defined buffer (struct print_buf *)
- *
- * Note: TIPC_LOG is configured to echo its output to the system console;
- * user-defined buffers can be configured to do the same thing.
- */
-extern struct print_buf *const TIPC_NULL;
-extern struct print_buf *const TIPC_CONS;
-extern struct print_buf *const TIPC_LOG;
-
-void tipc_printf(struct print_buf *, const char *fmt, ...);
-
-/*
- * TIPC_OUTPUT is the destination print buffer for system messages.
- */
-#ifndef TIPC_OUTPUT
-#define TIPC_OUTPUT TIPC_LOG
-#endif
+#define ULTRA_STRING_MAX_LEN 32768
-#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_ERR "TIPC: " fmt, ## arg)
-#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_WARNING "TIPC: " fmt, ## arg)
-#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, \
- KERN_NOTICE "TIPC: " fmt, ## arg)
-
-#ifdef CONFIG_TIPC_DEBUG
-
-/*
- * DBG_OUTPUT is the destination print buffer for debug messages.
- */
-#ifndef DBG_OUTPUT
-#define DBG_OUTPUT TIPC_LOG
-#endif
-
-#define dbg(fmt, arg...) tipc_printf(DBG_OUTPUT, KERN_DEBUG fmt, ## arg);
-
-#define msg_dbg(msg, txt) tipc_msg_dbg(DBG_OUTPUT, msg, txt);
-
-void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
-
-#else
-
-#define dbg(fmt, arg...) do {} while (0)
-#define msg_dbg(msg, txt) do {} while (0)
-
-#define tipc_msg_dbg(buf, msg, txt) do {} while (0)
-
-#endif
+struct tipc_msg; /* msg.h */
+int tipc_snprintf(char *buf, int len, const char *fmt, ...);
/*
* TIPC-specific error codes
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index ae054cfe179f..50eaa403eb6e 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -100,14 +100,12 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
{
char node_addr_str[16];
char media_addr_str[64];
- struct print_buf pb;
tipc_addr_string_fill(node_addr_str, node_addr);
- tipc_printbuf_init(&pb, media_addr_str, sizeof(media_addr_str));
- tipc_media_addr_printf(&pb, media_addr);
- tipc_printbuf_validate(&pb);
- warn("Duplicate %s using %s seen on <%s>\n",
- node_addr_str, media_addr_str, b_ptr->name);
+ tipc_media_addr_printf(media_addr_str, sizeof(media_addr_str),
+ media_addr);
+ pr_warn("Duplicate %s using %s seen on <%s>\n", node_addr_str,
+ media_addr_str, b_ptr->name);
}
/**
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 9c6f22ff1c6d..7a52d3922f3c 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -57,14 +57,14 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
struct queue_item *item;
if (!handler_enabled) {
- err("Signal request ignored by handler\n");
+ pr_err("Signal request ignored by handler\n");
return -ENOPROTOOPT;
}
spin_lock_bh(&qitem_lock);
item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
if (!item) {
- err("Signal queue out of memory\n");
+ pr_err("Signal queue out of memory\n");
spin_unlock_bh(&qitem_lock);
return -ENOMEM;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 7a614f43549d..1c1e6151875e 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -41,6 +41,12 @@
#include "discover.h"
#include "config.h"
+/*
+ * Error message prefixes
+ */
+static const char *link_co_err = "Link changeover error, ";
+static const char *link_rst_msg = "Resetting link ";
+static const char *link_unk_evt = "Unknown link event ";
/*
* Out-of-range value for link session numbers
@@ -153,8 +159,8 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
/**
* link_name_validate - validate & (optionally) deconstruct tipc_link name
- * @name - ptr to link name string
- * @name_parts - ptr to area for link name components (or NULL if not needed)
+ * @name: ptr to link name string
+ * @name_parts: ptr to area for link name components (or NULL if not needed)
*
* Returns 1 if link name is valid, otherwise 0.
*/
@@ -300,20 +306,20 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
if (n_ptr->link_cnt >= 2) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- err("Attempt to establish third link to %s\n", addr_string);
+ pr_err("Attempt to establish third link to %s\n", addr_string);
return NULL;
}
if (n_ptr->links[b_ptr->identity]) {
tipc_addr_string_fill(addr_string, n_ptr->addr);
- err("Attempt to establish second link on <%s> to %s\n",
- b_ptr->name, addr_string);
+ pr_err("Attempt to establish second link on <%s> to %s\n",
+ b_ptr->name, addr_string);
return NULL;
}
l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
if (!l_ptr) {
- warn("Link creation failed, no memory\n");
+ pr_warn("Link creation failed, no memory\n");
return NULL;
}
@@ -371,7 +377,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
void tipc_link_delete(struct tipc_link *l_ptr)
{
if (!l_ptr) {
- err("Attempt to delete non-existent link\n");
+ pr_err("Attempt to delete non-existent link\n");
return;
}
@@ -632,8 +638,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv / 4);
break;
case RESET_MSG:
- info("Resetting link <%s>, requested by peer\n",
- l_ptr->name);
+ pr_info("%s<%s>, requested by peer\n", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -642,7 +648,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in WW state\n", event);
+ pr_err("%s%u in WW state\n", link_unk_evt, event);
}
break;
case WORKING_UNKNOWN:
@@ -654,8 +660,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
- info("Resetting link <%s>, requested by peer "
- "while probing\n", l_ptr->name);
+ pr_info("%s<%s>, requested by peer while probing\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
@@ -680,8 +686,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
- warn("Resetting link <%s>, peer not responding\n",
- l_ptr->name);
+ pr_warn("%s<%s>, peer not responding\n",
+ link_rst_msg, l_ptr->name);
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
@@ -692,7 +698,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
break;
default:
- err("Unknown link event %u in WU state\n", event);
+ pr_err("%s%u in WU state\n", link_unk_evt, event);
}
break;
case RESET_UNKNOWN:
@@ -726,7 +732,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in RU state\n", event);
+ pr_err("%s%u in RU state\n", link_unk_evt, event);
}
break;
case RESET_RESET:
@@ -751,11 +757,11 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
link_set_timer(l_ptr, cont_intv);
break;
default:
- err("Unknown link event %u in RR state\n", event);
+ pr_err("%s%u in RR state\n", link_unk_evt, event);
}
break;
default:
- err("Unknown link state %u/%u\n", l_ptr->state, event);
+ pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
}
}
@@ -856,7 +862,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
kfree_skb(buf);
if (imp > CONN_MANAGER) {
- warn("Resetting link <%s>, send queue full", l_ptr->name);
+ pr_warn("%s<%s>, send queue full", link_rst_msg,
+ l_ptr->name);
tipc_link_reset(l_ptr);
}
return dsz;
@@ -944,7 +951,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
return res;
}
-/*
+/**
* tipc_link_send_names - send name table entries to new neighbor
*
* Send routine for bulk delivery of name table messages when contact
@@ -1409,8 +1416,8 @@ static void link_reset_all(unsigned long addr)
tipc_node_lock(n_ptr);
- warn("Resetting all links to %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_warn("Resetting all links to %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
for (i = 0; i < MAX_BEARERS; i++) {
if (n_ptr->links[i]) {
@@ -1428,7 +1435,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
{
struct tipc_msg *msg = buf_msg(buf);
- warn("Retransmission failure on link <%s>\n", l_ptr->name);
+ pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
if (l_ptr->addr) {
/* Handle failure on standard link */
@@ -1440,21 +1447,23 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
struct tipc_node *n_ptr;
char addr_string[16];
- info("Msg seq number: %u, ", msg_seqno(msg));
- info("Outstanding acks: %lu\n",
- (unsigned long) TIPC_SKB_CB(buf)->handle);
+ pr_info("Msg seq number: %u, ", msg_seqno(msg));
+ pr_cont("Outstanding acks: %lu\n",
+ (unsigned long) TIPC_SKB_CB(buf)->handle);
n_ptr = tipc_bclink_retransmit_to();
tipc_node_lock(n_ptr);
tipc_addr_string_fill(addr_string, n_ptr->addr);
- info("Broadcast link info for %s\n", addr_string);
- info("Supportable: %d, ", n_ptr->bclink.supportable);
- info("Supported: %d, ", n_ptr->bclink.supported);
- info("Acked: %u\n", n_ptr->bclink.acked);
- info("Last in: %u, ", n_ptr->bclink.last_in);
- info("Oos state: %u, ", n_ptr->bclink.oos_state);
- info("Last sent: %u\n", n_ptr->bclink.last_sent);
+ pr_info("Broadcast link info for %s\n", addr_string);
+ pr_info("Supportable: %d, Supported: %d, Acked: %u\n",
+ n_ptr->bclink.supportable,
+ n_ptr->bclink.supported,
+ n_ptr->bclink.acked);
+ pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
+ n_ptr->bclink.last_in,
+ n_ptr->bclink.oos_state,
+ n_ptr->bclink.last_sent);
tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
@@ -1479,8 +1488,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
l_ptr->retransm_queue_head = msg_seqno(msg);
l_ptr->retransm_queue_size = retransmits;
} else {
- err("Unexpected retransmit on link %s (qsize=%d)\n",
- l_ptr->name, l_ptr->retransm_queue_size);
+ pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
+ l_ptr->name, l_ptr->retransm_queue_size);
}
return;
} else {
@@ -1787,7 +1796,7 @@ cont:
read_unlock_bh(&tipc_net_lock);
}
-/*
+/**
* tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
*
* Returns increase in queue length (i.e. 0 or 1)
@@ -2074,8 +2083,9 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
if (msg_linkprio(msg) &&
(msg_linkprio(msg) != l_ptr->priority)) {
- warn("Resetting link <%s>, priority change %u->%u\n",
- l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+ pr_warn("%s<%s>, priority change %u->%u\n",
+ link_rst_msg, l_ptr->name, l_ptr->priority,
+ msg_linkprio(msg));
l_ptr->priority = msg_linkprio(msg);
tipc_link_reset(l_ptr); /* Enforce change to take effect */
break;
@@ -2139,15 +2149,13 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
tunnel = l_ptr->owner->active_links[selector & 1];
if (!tipc_link_is_up(tunnel)) {
- warn("Link changeover error, "
- "tunnel link no longer available\n");
+ pr_warn("%stunnel link no longer available\n", link_co_err);
return;
}
msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = tipc_buf_acquire(length + INT_H_SIZE);
if (!buf) {
- warn("Link changeover error, "
- "unable to send tunnel msg\n");
+ pr_warn("%sunable to send tunnel msg\n", link_co_err);
return;
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
@@ -2173,8 +2181,7 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
return;
if (!l_ptr->owner->permit_changeover) {
- warn("Link changeover error, "
- "peer did not permit changeover\n");
+ pr_warn("%speer did not permit changeover\n", link_co_err);
return;
}
@@ -2192,8 +2199,8 @@ void tipc_link_changeover(struct tipc_link *l_ptr)
msg_set_size(&tunnel_hdr, INT_H_SIZE);
tipc_link_send_buf(tunnel, buf);
} else {
- warn("Link changeover error, "
- "unable to send changeover msg\n");
+ pr_warn("%sunable to send changeover msg\n",
+ link_co_err);
}
return;
}
@@ -2246,8 +2253,8 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
outbuf = tipc_buf_acquire(length + INT_H_SIZE);
if (outbuf == NULL) {
- warn("Link changeover error, "
- "unable to send duplicate msg\n");
+ pr_warn("%sunable to send duplicate msg\n",
+ link_co_err);
return;
}
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
@@ -2298,8 +2305,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
if (!dest_link)
goto exit;
if (dest_link == *l_ptr) {
- err("Unexpected changeover message on link <%s>\n",
- (*l_ptr)->name);
+ pr_err("Unexpected changeover message on link <%s>\n",
+ (*l_ptr)->name);
goto exit;
}
*l_ptr = dest_link;
@@ -2310,7 +2317,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
goto exit;
*buf = buf_extract(tunnel_buf, INT_H_SIZE);
if (*buf == NULL) {
- warn("Link changeover error, duplicate msg dropped\n");
+ pr_warn("%sduplicate msg dropped\n", link_co_err);
goto exit;
}
kfree_skb(tunnel_buf);
@@ -2319,8 +2326,8 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
/* First original message ?: */
if (tipc_link_is_up(dest_link)) {
- info("Resetting link <%s>, changeover initiated by peer\n",
- dest_link->name);
+ pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
+ dest_link->name);
tipc_link_reset(dest_link);
dest_link->exp_msg_count = msg_count;
if (!msg_count)
@@ -2333,8 +2340,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
/* Receive original message */
if (dest_link->exp_msg_count == 0) {
- warn("Link switchover error, "
- "got too many tunnelled messages\n");
+ pr_warn("%sgot too many tunnelled messages\n", link_co_err);
goto exit;
}
dest_link->exp_msg_count--;
@@ -2346,7 +2352,7 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
kfree_skb(tunnel_buf);
return 1;
} else {
- warn("Link changeover error, original msg dropped\n");
+ pr_warn("%soriginal msg dropped\n", link_co_err);
}
}
exit:
@@ -2367,7 +2373,7 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
while (msgcount--) {
obuf = buf_extract(buf, pos);
if (obuf == NULL) {
- warn("Link unable to unbundle message(s)\n");
+ pr_warn("Link unable to unbundle message(s)\n");
break;
}
pos += align(msg_size(buf_msg(obuf)));
@@ -2538,7 +2544,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
set_fragm_size(pbuf, fragm_sz);
set_expected_frags(pbuf, exp_fragm_cnt - 1);
} else {
- dbg("Link unable to reassemble fragmented message\n");
+ pr_debug("Link unable to reassemble fragmented message\n");
kfree_skb(fbuf);
return -1;
}
@@ -2635,8 +2641,8 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
/**
* link_find_link - locate link by name
- * @name - ptr to link name string
- * @node - ptr to area to be filled with ptr to associated node
+ * @name: ptr to link name string
+ * @node: ptr to area to be filled with ptr to associated node
*
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
* this also prevents link deletion.
@@ -2671,8 +2677,8 @@ static struct tipc_link *link_find_link(const char *name,
/**
* link_value_is_valid -- validate proposed link tolerance/priority/window
*
- * @cmd - value type (TIPC_CMD_SET_LINK_*)
- * @new_value - the new value
+ * @cmd: value type (TIPC_CMD_SET_LINK_*)
+ * @new_value: the new value
*
* Returns 1 if value is within range, 0 if not.
*/
@@ -2693,9 +2699,9 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
/**
* link_cmd_set_value - change priority/tolerance/window for link/bearer/media
- * @name - ptr to link, bearer, or media name
- * @new_value - new value of link, bearer, or media setting
- * @cmd - which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
+ * @name: ptr to link, bearer, or media name
+ * @new_value: new value of link, bearer, or media setting
+ * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
*
* Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
*
@@ -2860,112 +2866,114 @@ static u32 percent(u32 count, u32 total)
*/
static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
{
- struct print_buf pb;
- struct tipc_link *l_ptr;
+ struct tipc_link *l;
+ struct tipc_stats *s;
struct tipc_node *node;
char *status;
u32 profile_total = 0;
+ int ret;
if (!strcmp(name, tipc_bclink_name))
return tipc_bclink_stats(buf, buf_size);
- tipc_printbuf_init(&pb, buf, buf_size);
-
read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(name, &node);
- if (!l_ptr) {
+ l = link_find_link(name, &node);
+ if (!l) {
read_unlock_bh(&tipc_net_lock);
return 0;
}
tipc_node_lock(node);
+ s = &l->stats;
- if (tipc_link_is_active(l_ptr))
+ if (tipc_link_is_active(l))
status = "ACTIVE";
- else if (tipc_link_is_up(l_ptr))
+ else if (tipc_link_is_up(l))
status = "STANDBY";
else
status = "DEFUNCT";
- tipc_printf(&pb, "Link <%s>\n"
- " %s MTU:%u Priority:%u Tolerance:%u ms"
- " Window:%u packets\n",
- l_ptr->name, status, l_ptr->max_pkt,
- l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
- tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_in_no - l_ptr->stats.recv_info,
- l_ptr->stats.recv_fragments,
- l_ptr->stats.recv_fragmented,
- l_ptr->stats.recv_bundles,
- l_ptr->stats.recv_bundled);
- tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
- l_ptr->next_out_no - l_ptr->stats.sent_info,
- l_ptr->stats.sent_fragments,
- l_ptr->stats.sent_fragmented,
- l_ptr->stats.sent_bundles,
- l_ptr->stats.sent_bundled);
- profile_total = l_ptr->stats.msg_length_counts;
+
+ ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
+ " %s MTU:%u Priority:%u Tolerance:%u ms"
+ " Window:%u packets\n",
+ l->name, status, l->max_pkt, l->priority,
+ l->tolerance, l->queue_limit[0]);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_in_no - s->recv_info, s->recv_fragments,
+ s->recv_fragmented, s->recv_bundles,
+ s->recv_bundled);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
+ l->next_out_no - s->sent_info, s->sent_fragments,
+ s->sent_fragmented, s->sent_bundles,
+ s->sent_bundled);
+
+ profile_total = s->msg_length_counts;
if (!profile_total)
profile_total = 1;
- tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
- " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
- "-16384:%u%% -32768:%u%% -66000:%u%%\n",
- l_ptr->stats.msg_length_counts,
- l_ptr->stats.msg_lengths_total / profile_total,
- percent(l_ptr->stats.msg_length_profile[0], profile_total),
- percent(l_ptr->stats.msg_length_profile[1], profile_total),
- percent(l_ptr->stats.msg_length_profile[2], profile_total),
- percent(l_ptr->stats.msg_length_profile[3], profile_total),
- percent(l_ptr->stats.msg_length_profile[4], profile_total),
- percent(l_ptr->stats.msg_length_profile[5], profile_total),
- percent(l_ptr->stats.msg_length_profile[6], profile_total));
- tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
- l_ptr->stats.recv_states,
- l_ptr->stats.recv_probes,
- l_ptr->stats.recv_nacks,
- l_ptr->stats.deferred_recv,
- l_ptr->stats.duplicates);
- tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
- l_ptr->stats.sent_states,
- l_ptr->stats.sent_probes,
- l_ptr->stats.sent_nacks,
- l_ptr->stats.sent_acks,
- l_ptr->stats.retransmitted);
- tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
- l_ptr->stats.bearer_congs,
- l_ptr->stats.link_congs,
- l_ptr->stats.max_queue_sz,
- l_ptr->stats.queue_sz_counts
- ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
- : 0);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX profile sample:%u packets average:%u octets\n"
+ " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+ "-16384:%u%% -32768:%u%% -66000:%u%%\n",
+ s->msg_length_counts,
+ s->msg_lengths_total / profile_total,
+ percent(s->msg_length_profile[0], profile_total),
+ percent(s->msg_length_profile[1], profile_total),
+ percent(s->msg_length_profile[2], profile_total),
+ percent(s->msg_length_profile[3], profile_total),
+ percent(s->msg_length_profile[4], profile_total),
+ percent(s->msg_length_profile[5], profile_total),
+ percent(s->msg_length_profile[6], profile_total));
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " RX states:%u probes:%u naks:%u defs:%u"
+ " dups:%u\n", s->recv_states, s->recv_probes,
+ s->recv_nacks, s->deferred_recv, s->duplicates);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " TX states:%u probes:%u naks:%u acks:%u"
+ " dups:%u\n", s->sent_states, s->sent_probes,
+ s->sent_nacks, s->sent_acks, s->retransmitted);
+
+ ret += tipc_snprintf(buf + ret, buf_size - ret,
+ " Congestion bearer:%u link:%u Send queue"
+ " max:%u avg:%u\n", s->bearer_congs, s->link_congs,
+ s->max_queue_sz, s->queue_sz_counts ?
+ (s->accu_queue_sz / s->queue_sz_counts) : 0);
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
- return tipc_printbuf_validate(&pb);
+ return ret;
}
-#define MAX_LINK_STATS_INFO 2000
-
struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
int str_len;
+ int pb_len;
+ char *pb;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
-
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
- (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+ pb, pb_len);
if (!str_len) {
kfree_skb(buf);
return tipc_cfg_reply_error_string("link not found");
}
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -3003,62 +3011,16 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
static void link_print(struct tipc_link *l_ptr, const char *str)
{
- char print_area[256];
- struct print_buf pb;
- struct print_buf *buf = &pb;
-
- tipc_printbuf_init(buf, print_area, sizeof(print_area));
-
- tipc_printf(buf, str);
- tipc_printf(buf, "Link %x<%s>:",
- l_ptr->addr, l_ptr->b_ptr->name);
-
-#ifdef CONFIG_TIPC_DEBUG
- if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
- goto print_state;
-
- tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
- tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
- tipc_printf(buf, "SQUE");
- if (l_ptr->first_out) {
- tipc_printf(buf, "[%u..", buf_seqno(l_ptr->first_out));
- if (l_ptr->next_out)
- tipc_printf(buf, "%u..", buf_seqno(l_ptr->next_out));
- tipc_printf(buf, "%u]", buf_seqno(l_ptr->last_out));
- if ((mod(buf_seqno(l_ptr->last_out) -
- buf_seqno(l_ptr->first_out))
- != (l_ptr->out_queue_size - 1)) ||
- (l_ptr->last_out->next != NULL)) {
- tipc_printf(buf, "\nSend queue inconsistency\n");
- tipc_printf(buf, "first_out= %p ", l_ptr->first_out);
- tipc_printf(buf, "next_out= %p ", l_ptr->next_out);
- tipc_printf(buf, "last_out= %p ", l_ptr->last_out);
- }
- } else
- tipc_printf(buf, "[]");
- tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
- if (l_ptr->oldest_deferred_in) {
- u32 o = buf_seqno(l_ptr->oldest_deferred_in);
- u32 n = buf_seqno(l_ptr->newest_deferred_in);
- tipc_printf(buf, ":RQUE[%u..%u]", o, n);
- if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
- tipc_printf(buf, ":RQSIZ(%u)",
- l_ptr->deferred_inqueue_sz);
- }
- }
-print_state:
-#endif
+ pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
if (link_working_unknown(l_ptr))
- tipc_printf(buf, ":WU");
+ pr_cont(":WU\n");
else if (link_reset_reset(l_ptr))
- tipc_printf(buf, ":RR");
+ pr_cont(":RR\n");
else if (link_reset_unknown(l_ptr))
- tipc_printf(buf, ":RU");
+ pr_cont(":RU\n");
else if (link_working_working(l_ptr))
- tipc_printf(buf, ":WW");
- tipc_printf(buf, "\n");
-
- tipc_printbuf_validate(buf);
- info("%s", print_area);
+ pr_cont(":WW\n");
+ else
+ pr_cont("\n");
}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index d6a60a963ce6..6e921121be06 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -37,7 +37,6 @@
#ifndef _TIPC_LINK_H
#define _TIPC_LINK_H
-#include "log.h"
#include "msg.h"
#include "node.h"
@@ -63,6 +62,37 @@
*/
#define MAX_PKT_DEFAULT 1500
+struct tipc_stats {
+ u32 sent_info; /* used in counting # sent packets */
+ u32 recv_info; /* used in counting # recv'd packets */
+ u32 sent_states;
+ u32 recv_states;
+ u32 sent_probes;
+ u32 recv_probes;
+ u32 sent_nacks;
+ u32 recv_nacks;
+ u32 sent_acks;
+ u32 sent_bundled;
+ u32 sent_bundles;
+ u32 recv_bundled;
+ u32 recv_bundles;
+ u32 retransmitted;
+ u32 sent_fragmented;
+ u32 sent_fragments;
+ u32 recv_fragmented;
+ u32 recv_fragments;
+ u32 link_congs; /* # port sends blocked by congestion */
+ u32 bearer_congs;
+ u32 deferred_recv;
+ u32 duplicates;
+ u32 max_queue_sz; /* send queue size high water mark */
+ u32 accu_queue_sz; /* used for send queue size profiling */
+ u32 queue_sz_counts; /* used for send queue size profiling */
+ u32 msg_length_counts; /* used for message length profiling */
+ u32 msg_lengths_total; /* used for message length profiling */
+ u32 msg_length_profile[7]; /* used for msg. length profiling */
+};
+
/**
* struct tipc_link - TIPC link data structure
* @addr: network address of link's peer node
@@ -175,36 +205,7 @@ struct tipc_link {
struct sk_buff *defragm_buf;
/* Statistics */
- struct {
- u32 sent_info; /* used in counting # sent packets */
- u32 recv_info; /* used in counting # recv'd packets */
- u32 sent_states;
- u32 recv_states;
- u32 sent_probes;
- u32 recv_probes;
- u32 sent_nacks;
- u32 recv_nacks;
- u32 sent_acks;
- u32 sent_bundled;
- u32 sent_bundles;
- u32 recv_bundled;
- u32 recv_bundles;
- u32 retransmitted;
- u32 sent_fragmented;
- u32 sent_fragments;
- u32 recv_fragmented;
- u32 recv_fragments;
- u32 link_congs; /* # port sends blocked by congestion */
- u32 bearer_congs;
- u32 deferred_recv;
- u32 duplicates;
- u32 max_queue_sz; /* send queue size high water mark */
- u32 accu_queue_sz; /* used for send queue size profiling */
- u32 queue_sz_counts; /* used for send queue size profiling */
- u32 msg_length_counts; /* used for message length profiling */
- u32 msg_lengths_total; /* used for message length profiling */
- u32 msg_length_profile[7]; /* used for msg. length profiling */
- } stats;
+ struct tipc_stats stats;
};
struct tipc_port;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 026733f24919..abef644f27d8 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -36,302 +36,20 @@
#include "core.h"
#include "config.h"
-#include "log.h"
-
-/*
- * TIPC pre-defines the following print buffers:
- *
- * TIPC_NULL : null buffer (i.e. print nowhere)
- * TIPC_CONS : system console
- * TIPC_LOG : TIPC log buffer
- *
- * Additional user-defined print buffers are also permitted.
- */
-static struct print_buf null_buf = { NULL, 0, NULL, 0 };
-struct print_buf *const TIPC_NULL = &null_buf;
-
-static struct print_buf cons_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_CONS = &cons_buf;
-
-static struct print_buf log_buf = { NULL, 0, NULL, 1 };
-struct print_buf *const TIPC_LOG = &log_buf;
-
-/*
- * Locking policy when using print buffers.
- *
- * 1) tipc_printf() uses 'print_lock' to protect against concurrent access to
- * 'print_string' when writing to a print buffer. This also protects against
- * concurrent writes to the print buffer being written to.
- *
- * 2) tipc_log_XXX() leverages the aforementioned use of 'print_lock' to
- * protect against all types of concurrent operations on their associated
- * print buffer (not just write operations).
- *
- * Note: All routines of the form tipc_printbuf_XXX() are lock-free, and rely
- * on the caller to prevent simultaneous use of the print buffer(s) being
- * manipulated.
- */
-static char print_string[TIPC_PB_MAX_STR];
-static DEFINE_SPINLOCK(print_lock);
-
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from);
-
-#define FORMAT(PTR, LEN, FMT) \
-{\
- va_list args;\
- va_start(args, FMT);\
- LEN = vsprintf(PTR, FMT, args);\
- va_end(args);\
- *(PTR + LEN) = '\0';\
-}
-
-/**
- * tipc_printbuf_init - initialize print buffer to empty
- * @pb: pointer to print buffer structure
- * @raw: pointer to character array used by print buffer
- * @size: size of character array
- *
- * Note: If the character array is too small (or absent), the print buffer
- * becomes a null device that discards anything written to it.
- */
-void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
-{
- pb->buf = raw;
- pb->crs = raw;
- pb->size = size;
- pb->echo = 0;
-
- if (size < TIPC_PB_MIN_SIZE) {
- pb->buf = NULL;
- } else if (raw) {
- pb->buf[0] = 0;
- pb->buf[size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_reset - reinitialize print buffer to empty state
- * @pb: pointer to print buffer structure
- */
-static void tipc_printbuf_reset(struct print_buf *pb)
-{
- if (pb->buf) {
- pb->crs = pb->buf;
- pb->buf[0] = 0;
- pb->buf[pb->size - 1] = ~0;
- }
-}
-
-/**
- * tipc_printbuf_empty - test if print buffer is in empty state
- * @pb: pointer to print buffer structure
- *
- * Returns non-zero if print buffer is empty.
- */
-static int tipc_printbuf_empty(struct print_buf *pb)
-{
- return !pb->buf || (pb->crs == pb->buf);
-}
-
-/**
- * tipc_printbuf_validate - check for print buffer overflow
- * @pb: pointer to print buffer structure
- *
- * Verifies that a print buffer has captured all data written to it.
- * If data has been lost, linearize buffer and prepend an error message
- *
- * Returns length of print buffer data string (including trailing NUL)
- */
-int tipc_printbuf_validate(struct print_buf *pb)
-{
- char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
- char *cp_buf;
- struct print_buf cb;
-
- if (!pb->buf)
- return 0;
-
- if (pb->buf[pb->size - 1] == 0) {
- cp_buf = kmalloc(pb->size, GFP_ATOMIC);
- if (cp_buf) {
- tipc_printbuf_init(&cb, cp_buf, pb->size);
- tipc_printbuf_move(&cb, pb);
- tipc_printbuf_move(pb, &cb);
- kfree(cp_buf);
- memcpy(pb->buf, err, strlen(err));
- } else {
- tipc_printbuf_reset(pb);
- tipc_printf(pb, err);
- }
- }
- return pb->crs - pb->buf + 1;
-}
-
-/**
- * tipc_printbuf_move - move print buffer contents to another print buffer
- * @pb_to: pointer to destination print buffer structure
- * @pb_from: pointer to source print buffer structure
- *
- * Current contents of destination print buffer (if any) are discarded.
- * Source print buffer becomes empty if a successful move occurs.
- */
-static void tipc_printbuf_move(struct print_buf *pb_to,
- struct print_buf *pb_from)
-{
- int len;
-
- /* Handle the cases where contents can't be moved */
- if (!pb_to->buf)
- return;
-
- if (!pb_from->buf) {
- tipc_printbuf_reset(pb_to);
- return;
- }
-
- if (pb_to->size < pb_from->size) {
- strcpy(pb_to->buf, "*** PRINT BUFFER MOVE ERROR ***");
- pb_to->buf[pb_to->size - 1] = ~0;
- pb_to->crs = strchr(pb_to->buf, 0);
- return;
- }
-
- /* Copy data from char after cursor to end (if used) */
- len = pb_from->buf + pb_from->size - pb_from->crs - 2;
- if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
- strcpy(pb_to->buf, pb_from->crs + 1);
- pb_to->crs = pb_to->buf + len;
- } else
- pb_to->crs = pb_to->buf;
-
- /* Copy data from start to cursor (always) */
- len = pb_from->crs - pb_from->buf;
- strcpy(pb_to->crs, pb_from->buf);
- pb_to->crs += len;
-
- tipc_printbuf_reset(pb_from);
-}
/**
- * tipc_printf - append formatted output to print buffer
- * @pb: pointer to print buffer
+ * tipc_snprintf - append formatted output to print buffer
+ * @buf: pointer to print buffer
+ * @len: buffer length
* @fmt: formatted info to be printed
*/
-void tipc_printf(struct print_buf *pb, const char *fmt, ...)
-{
- int chars_to_add;
- int chars_left;
- char save_char;
-
- spin_lock_bh(&print_lock);
-
- FORMAT(print_string, chars_to_add, fmt);
- if (chars_to_add >= TIPC_PB_MAX_STR)
- strcpy(print_string, "*** PRINT BUFFER STRING TOO LONG ***");
-
- if (pb->buf) {
- chars_left = pb->buf + pb->size - pb->crs - 1;
- if (chars_to_add <= chars_left) {
- strcpy(pb->crs, print_string);
- pb->crs += chars_to_add;
- } else if (chars_to_add >= (pb->size - 1)) {
- strcpy(pb->buf, print_string + chars_to_add + 1
- - pb->size);
- pb->crs = pb->buf + pb->size - 1;
- } else {
- strcpy(pb->buf, print_string + chars_left);
- save_char = print_string[chars_left];
- print_string[chars_left] = 0;
- strcpy(pb->crs, print_string);
- print_string[chars_left] = save_char;
- pb->crs = pb->buf + chars_to_add - chars_left;
- }
- }
-
- if (pb->echo)
- printk("%s", print_string);
-
- spin_unlock_bh(&print_lock);
-}
-
-/**
- * tipc_log_resize - change the size of the TIPC log buffer
- * @log_size: print buffer size to use
- */
-int tipc_log_resize(int log_size)
-{
- int res = 0;
-
- spin_lock_bh(&print_lock);
- kfree(TIPC_LOG->buf);
- TIPC_LOG->buf = NULL;
- if (log_size) {
- if (log_size < TIPC_PB_MIN_SIZE)
- log_size = TIPC_PB_MIN_SIZE;
- res = TIPC_LOG->echo;
- tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC),
- log_size);
- TIPC_LOG->echo = res;
- res = !TIPC_LOG->buf;
- }
- spin_unlock_bh(&print_lock);
-
- return res;
-}
-
-/**
- * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
- */
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
-{
- u32 value;
-
- if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
- return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
-
- value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
- if (value > 32768)
- return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
- " (log size must be 0-32768)");
- if (tipc_log_resize(value))
- return tipc_cfg_reply_error_string(
- "unable to create specified log (log size is now 0)");
- return tipc_cfg_reply_none();
-}
-
-/**
- * tipc_log_dump - capture TIPC log buffer contents in configuration message
- */
-struct sk_buff *tipc_log_dump(void)
+int tipc_snprintf(char *buf, int len, const char *fmt, ...)
{
- struct sk_buff *reply;
-
- spin_lock_bh(&print_lock);
- if (!TIPC_LOG->buf) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log not activated\n");
- } else if (tipc_printbuf_empty(TIPC_LOG)) {
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_ultra_string("log is empty\n");
- } else {
- struct tlv_desc *rep_tlv;
- struct print_buf pb;
- int str_len;
+ int i;
+ va_list args;
- str_len = min(TIPC_LOG->size, 32768u);
- spin_unlock_bh(&print_lock);
- reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
- if (reply) {
- rep_tlv = (struct tlv_desc *)reply->data;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
- spin_lock_bh(&print_lock);
- tipc_printbuf_move(&pb, TIPC_LOG);
- spin_unlock_bh(&print_lock);
- str_len = strlen(TLV_DATA(rep_tlv)) + 1;
- skb_put(reply, TLV_SPACE(str_len));
- TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
- }
- }
- return reply;
+ va_start(args, fmt);
+ i = vscnprintf(buf, len, fmt, args);
+ va_end(args);
+ return i;
}
diff --git a/net/tipc/log.h b/net/tipc/log.h
deleted file mode 100644
index d1f5eb967fd8..000000000000
--- a/net/tipc/log.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * net/tipc/log.h: Include file for TIPC print buffer routines
- *
- * Copyright (c) 1997-2006, Ericsson AB
- * Copyright (c) 2005-2007, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _TIPC_LOG_H
-#define _TIPC_LOG_H
-
-/**
- * struct print_buf - TIPC print buffer structure
- * @buf: pointer to character array containing print buffer contents
- * @size: size of character array
- * @crs: pointer to first unused space in character array (i.e. final NUL)
- * @echo: echo output to system console if non-zero
- */
-struct print_buf {
- char *buf;
- u32 size;
- char *crs;
- int echo;
-};
-
-#define TIPC_PB_MIN_SIZE 64 /* minimum size for a print buffer's array */
-#define TIPC_PB_MAX_STR 512 /* max printable string (with trailing NUL) */
-
-void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 size);
-int tipc_printbuf_validate(struct print_buf *pb);
-
-int tipc_log_resize(int log_size);
-
-struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area,
- int req_tlv_space);
-struct sk_buff *tipc_log_dump(void);
-
-#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index deea0d232dca..f2db8a87d9c5 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -109,245 +109,3 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
*buf = NULL;
return -EFAULT;
}
-
-#ifdef CONFIG_TIPC_DEBUG
-void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
-{
- u32 usr = msg_user(msg);
- tipc_printf(buf, KERN_DEBUG);
- tipc_printf(buf, str);
-
- switch (usr) {
- case MSG_BUNDLER:
- tipc_printf(buf, "BNDL::");
- tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
- break;
- case BCAST_PROTOCOL:
- tipc_printf(buf, "BCASTP::");
- break;
- case MSG_FRAGMENTER:
- tipc_printf(buf, "FRAGM::");
- switch (msg_type(msg)) {
- case FIRST_FRAGMENT:
- tipc_printf(buf, "FIRST:");
- break;
- case FRAGMENT:
- tipc_printf(buf, "BODY:");
- break;
- case LAST_FRAGMENT:
- tipc_printf(buf, "LAST:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
-
- }
- tipc_printf(buf, "NO(%u/%u):", msg_long_msgno(msg),
- msg_fragm_no(msg));
- break;
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- tipc_printf(buf, "DAT%u:", msg_user(msg));
- if (msg_short(msg)) {
- tipc_printf(buf, "CON:");
- break;
- }
- switch (msg_type(msg)) {
- case TIPC_CONN_MSG:
- tipc_printf(buf, "CON:");
- break;
- case TIPC_MCAST_MSG:
- tipc_printf(buf, "MCST:");
- break;
- case TIPC_NAMED_MSG:
- tipc_printf(buf, "NAM:");
- break;
- case TIPC_DIRECT_MSG:
- tipc_printf(buf, "DIR:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case NAME_DISTRIBUTOR:
- tipc_printf(buf, "NMD::");
- switch (msg_type(msg)) {
- case PUBLICATION:
- tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
- break;
- case WITHDRAWAL:
- tipc_printf(buf, "WDRW:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN:%x", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):",
- msg_reroute_cnt(msg));
- break;
- case CONN_MANAGER:
- tipc_printf(buf, "CONN_MNG:");
- switch (msg_type(msg)) {
- case CONN_PROBE:
- tipc_printf(buf, "PROBE:");
- break;
- case CONN_PROBE_REPLY:
- tipc_printf(buf, "PROBE_REPLY:");
- break;
- case CONN_ACK:
- tipc_printf(buf, "CONN_ACK:");
- tipc_printf(buf, "ACK(%u):", msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- if (msg_reroute_cnt(msg))
- tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg));
- break;
- case LINK_PROTOCOL:
- switch (msg_type(msg)) {
- case STATE_MSG:
- tipc_printf(buf, "STATE:");
- tipc_printf(buf, "%s:", msg_probe(msg) ? "PRB" : "");
- tipc_printf(buf, "NXS(%u):", msg_next_sent(msg));
- tipc_printf(buf, "GAP(%u):", msg_seq_gap(msg));
- tipc_printf(buf, "LSTBC(%u):", msg_last_bcast(msg));
- break;
- case RESET_MSG:
- tipc_printf(buf, "RESET:");
- if (msg_size(msg) != msg_hdr_sz(msg))
- tipc_printf(buf, "BEAR:%s:", msg_data(msg));
- break;
- case ACTIVATE_MSG:
- tipc_printf(buf, "ACTIVATE:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- tipc_printf(buf, "PLANE(%c):", msg_net_plane(msg));
- tipc_printf(buf, "SESS(%u):", msg_session(msg));
- break;
- case CHANGEOVER_PROTOCOL:
- tipc_printf(buf, "TUNL:");
- switch (msg_type(msg)) {
- case DUPLICATE_MSG:
- tipc_printf(buf, "DUPL:");
- break;
- case ORIGINAL_MSG:
- tipc_printf(buf, "ORIG:");
- tipc_printf(buf, "EXP(%u)", msg_msgcnt(msg));
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg));
- }
- break;
- case LINK_CONFIG:
- tipc_printf(buf, "CFG:");
- switch (msg_type(msg)) {
- case DSC_REQ_MSG:
- tipc_printf(buf, "DSC_REQ:");
- break;
- case DSC_RESP_MSG:
- tipc_printf(buf, "DSC_RESP:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN TYPE:%x:", msg_type(msg));
- break;
- }
- break;
- default:
- tipc_printf(buf, "UNKNOWN USER:");
- }
-
- switch (usr) {
- case CONN_MANAGER:
- case TIPC_LOW_IMPORTANCE:
- case TIPC_MEDIUM_IMPORTANCE:
- case TIPC_HIGH_IMPORTANCE:
- case TIPC_CRITICAL_IMPORTANCE:
- switch (msg_errcode(msg)) {
- case TIPC_OK:
- break;
- case TIPC_ERR_NO_NAME:
- tipc_printf(buf, "NO_NAME:");
- break;
- case TIPC_ERR_NO_PORT:
- tipc_printf(buf, "NO_PORT:");
- break;
- case TIPC_ERR_NO_NODE:
- tipc_printf(buf, "NO_PROC:");
- break;
- case TIPC_ERR_OVERLOAD:
- tipc_printf(buf, "OVERLOAD:");
- break;
- case TIPC_CONN_SHUTDOWN:
- tipc_printf(buf, "SHUTDOWN:");
- break;
- default:
- tipc_printf(buf, "UNKNOWN ERROR(%x):",
- msg_errcode(msg));
- }
- default:
- break;
- }
-
- tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
- tipc_printf(buf, "SZ(%u):", msg_size(msg));
- tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
-
- if (msg_non_seq(msg))
- tipc_printf(buf, "NOSEQ:");
- else
- tipc_printf(buf, "ACK(%u):", msg_ack(msg));
- tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
- tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
-
- if (msg_isdata(msg)) {
- if (msg_named(msg)) {
- tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
- tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
- }
- }
-
- if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
- (usr != MSG_BUNDLER)) {
- if (!msg_short(msg)) {
- tipc_printf(buf, ":ORIG(%x:%u):",
- msg_orignode(msg), msg_origport(msg));
- tipc_printf(buf, ":DEST(%x:%u):",
- msg_destnode(msg), msg_destport(msg));
- } else {
- tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
- tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
- }
- }
- if (msg_user(msg) == NAME_DISTRIBUTOR) {
- tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
- tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
- }
-
- if (msg_user(msg) == LINK_CONFIG) {
- struct tipc_media_addr orig;
-
- tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
- tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
- memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value));
- orig.media_id = 0;
- orig.broadcast = 0;
- tipc_media_addr_printf(buf, &orig);
- }
- if (msg_user(msg) == BCAST_PROTOCOL) {
- tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
- tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
- }
- tipc_printf(buf, "\n");
- if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg)))
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
- if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
- tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
-}
-#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 158318e67b08..55d3928dfd67 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -161,7 +161,7 @@ void tipc_named_publish(struct publication *publ)
buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
if (!buf) {
- warn("Publication distribution failure\n");
+ pr_warn("Publication distribution failure\n");
return;
}
@@ -186,7 +186,7 @@ void tipc_named_withdraw(struct publication *publ)
buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
if (!buf) {
- warn("Withdrawal distribution failure\n");
+ pr_warn("Withdrawal distribution failure\n");
return;
}
@@ -213,7 +213,7 @@ static void named_distribute(struct list_head *message_list, u32 node,
rest -= left;
buf = named_prepare_buf(PUBLICATION, left, node);
if (!buf) {
- warn("Bulk publication failure\n");
+ pr_warn("Bulk publication failure\n");
return;
}
item = (struct distr_item *)msg_data(buf_msg(buf));
@@ -283,9 +283,10 @@ static void named_purge_publ(struct publication *publ)
write_unlock_bh(&tipc_nametbl_lock);
if (p != publ) {
- err("Unable to remove publication from failed node\n"
- "(type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
- publ->type, publ->lower, publ->node, publ->ref, publ->key);
+ pr_err("Unable to remove publication from failed node\n"
+ " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
+ publ->type, publ->lower, publ->node, publ->ref,
+ publ->key);
}
kfree(p);
@@ -329,14 +330,14 @@ void tipc_named_recv(struct sk_buff *buf)
tipc_nodesub_unsubscribe(&publ->subscr);
kfree(publ);
} else {
- err("Unable to remove publication by node 0x%x\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- msg_orignode(msg),
- ntohl(item->type), ntohl(item->lower),
- ntohl(item->ref), ntohl(item->key));
+ pr_err("Unable to remove publication by node 0x%x\n"
+ " (type=%u, lower=%u, ref=%u, key=%u)\n",
+ msg_orignode(msg), ntohl(item->type),
+ ntohl(item->lower), ntohl(item->ref),
+ ntohl(item->key));
}
} else {
- warn("Unrecognized name table message received\n");
+ pr_warn("Unrecognized name table message received\n");
}
item++;
}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 010f24a59da2..360c478b0b53 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -126,7 +126,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
{
struct publication *publ = kzalloc(sizeof(*publ), GFP_ATOMIC);
if (publ == NULL) {
- warn("Publication creation failure, no memory\n");
+ pr_warn("Publication creation failure, no memory\n");
return NULL;
}
@@ -163,7 +163,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
struct sub_seq *sseq = tipc_subseq_alloc(1);
if (!nseq || !sseq) {
- warn("Name sequence creation failed, no memory\n");
+ pr_warn("Name sequence creation failed, no memory\n");
kfree(nseq);
kfree(sseq);
return NULL;
@@ -191,7 +191,7 @@ static void nameseq_delete_empty(struct name_seq *seq)
}
}
-/*
+/**
* nameseq_find_subseq - find sub-sequence (if any) matching a name instance
*
* Very time-critical, so binary searches through sub-sequence array.
@@ -263,8 +263,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Lower end overlaps existing entry => need an exact match */
if ((sseq->lower != lower) || (sseq->upper != upper)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
@@ -286,8 +286,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
/* Fail if upper end overlaps into an existing entry */
if ((inspos < nseq->first_free) &&
(upper >= nseq->sseqs[inspos].lower)) {
- warn("Cannot publish {%u,%u,%u}, overlap error\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
+ type, lower, upper);
return NULL;
}
@@ -296,8 +296,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
if (!sseqs) {
- warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
memcpy(sseqs, nseq->sseqs,
@@ -309,8 +309,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
info = kzalloc(sizeof(*info), GFP_ATOMIC);
if (!info) {
- warn("Cannot publish {%u,%u,%u}, no memory\n",
- type, lower, upper);
+ pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
+ type, lower, upper);
return NULL;
}
@@ -435,7 +435,7 @@ found:
}
/**
- * tipc_nameseq_subscribe: attach a subscription, and issue
+ * tipc_nameseq_subscribe - attach a subscription, and issue
* the prescribed number of events if there is any sub-
* sequence overlapping with the requested sequence
*/
@@ -492,8 +492,8 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
(lower > upper)) {
- dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
- type, lower, upper, scope);
+ pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
+ type, lower, upper, scope);
return NULL;
}
@@ -520,7 +520,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
return publ;
}
-/*
+/**
* tipc_nametbl_translate - perform name translation
*
* On entry, 'destnode' is the search domain used during translation.
@@ -668,8 +668,8 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
struct publication *publ;
if (table.local_publ_count >= tipc_max_publications) {
- warn("Publication failed, local publication limit reached (%u)\n",
- tipc_max_publications);
+ pr_warn("Publication failed, local publication limit reached (%u)\n",
+ tipc_max_publications);
return NULL;
}
@@ -702,9 +702,9 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
return 1;
}
write_unlock_bh(&tipc_nametbl_lock);
- err("Unable to remove local publication\n"
- "(type=%u, lower=%u, ref=%u, key=%u)\n",
- type, lower, ref, key);
+ pr_err("Unable to remove local publication\n"
+ "(type=%u, lower=%u, ref=%u, key=%u)\n",
+ type, lower, ref, key);
return 0;
}
@@ -725,8 +725,8 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
tipc_nameseq_subscribe(seq, s);
spin_unlock_bh(&seq->lock);
} else {
- warn("Failed to create subscription for {%u,%u,%u}\n",
- s->seq.type, s->seq.lower, s->seq.upper);
+ pr_warn("Failed to create subscription for {%u,%u,%u}\n",
+ s->seq.type, s->seq.lower, s->seq.upper);
}
write_unlock_bh(&tipc_nametbl_lock);
}
@@ -751,21 +751,22 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
/**
- * subseq_list: print specified sub-sequence contents into the given buffer
+ * subseq_list - print specified sub-sequence contents into the given buffer
*/
-static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
+static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
u32 index)
{
char portIdStr[27];
const char *scope_str[] = {"", " zone", " cluster", " node"};
struct publication *publ;
struct name_info *info;
+ int ret;
- tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+ ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
if (depth == 2) {
- tipc_printf(buf, "\n");
- return;
+ ret += tipc_snprintf(buf - ret, len + ret, "\n");
+ return ret;
}
info = sseq->info;
@@ -774,52 +775,58 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
sprintf(portIdStr, "<%u.%u.%u:%u>",
tipc_zone(publ->node), tipc_cluster(publ->node),
tipc_node(publ->node), publ->ref);
- tipc_printf(buf, "%-26s ", portIdStr);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
if (depth > 3) {
- tipc_printf(buf, "%-10u %s", publ->key,
- scope_str[publ->scope]);
+ ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
+ publ->key, scope_str[publ->scope]);
}
if (!list_is_last(&publ->zone_list, &info->zone_list))
- tipc_printf(buf, "\n%33s", " ");
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "\n%33s", " ");
};
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
- * nameseq_list: print specified name sequence contents into the given buffer
+ * nameseq_list - print specified name sequence contents into the given buffer
*/
-static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
+static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
u32 type, u32 lowbound, u32 upbound, u32 index)
{
struct sub_seq *sseq;
char typearea[11];
+ int ret = 0;
if (seq->first_free == 0)
- return;
+ return 0;
sprintf(typearea, "%-10u", seq->type);
if (depth == 1) {
- tipc_printf(buf, "%s\n", typearea);
- return;
+ ret += tipc_snprintf(buf, len, "%s\n", typearea);
+ return ret;
}
for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
- tipc_printf(buf, "%s ", typearea);
+ ret += tipc_snprintf(buf + ret, len - ret, "%s ",
+ typearea);
spin_lock_bh(&seq->lock);
- subseq_list(sseq, buf, depth, index);
+ ret += subseq_list(sseq, buf + ret, len - ret,
+ depth, index);
spin_unlock_bh(&seq->lock);
sprintf(typearea, "%10s", " ");
}
}
+ return ret;
}
/**
* nametbl_header - print name table header into the given buffer
*/
-static void nametbl_header(struct print_buf *buf, u32 depth)
+static int nametbl_header(char *buf, int len, u32 depth)
{
const char *header[] = {
"Type ",
@@ -829,24 +836,27 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
};
int i;
+ int ret = 0;
if (depth > 4)
depth = 4;
for (i = 0; i < depth; i++)
- tipc_printf(buf, header[i]);
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, header[i]);
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
/**
* nametbl_list - print specified name table contents into the given buffer
*/
-static void nametbl_list(struct print_buf *buf, u32 depth_info,
+static int nametbl_list(char *buf, int len, u32 depth_info,
u32 type, u32 lowbound, u32 upbound)
{
struct hlist_head *seq_head;
struct hlist_node *seq_node;
struct name_seq *seq;
int all_types;
+ int ret = 0;
u32 depth;
u32 i;
@@ -854,65 +864,69 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
if (depth == 0)
- return;
+ return 0;
if (all_types) {
/* display all entries in name table to specified depth */
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf, len, depth);
lowbound = 0;
upbound = ~0;
for (i = 0; i < tipc_nametbl_size; i++) {
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
- nameseq_list(seq, buf, depth, seq->type,
- lowbound, upbound, i);
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, seq->type,
+ lowbound, upbound, i);
}
}
} else {
/* display only the sequence that matches the specified type */
if (upbound < lowbound) {
- tipc_printf(buf, "invalid name sequence specified\n");
- return;
+ ret += tipc_snprintf(buf + ret, len - ret,
+ "invalid name sequence specified\n");
+ return ret;
}
- nametbl_header(buf, depth);
+ ret += nametbl_header(buf + ret, len - ret, depth);
i = hash(type);
seq_head = &table.types[i];
hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
if (seq->type == type) {
- nameseq_list(seq, buf, depth, type,
- lowbound, upbound, i);
+ ret += nameseq_list(seq, buf + ret, len - ret,
+ depth, type,
+ lowbound, upbound, i);
break;
}
}
}
+ return ret;
}
-#define MAX_NAME_TBL_QUERY 32768
-
struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
{
struct sk_buff *buf;
struct tipc_name_table_query *argv;
struct tlv_desc *rep_tlv;
- struct print_buf b;
+ char *pb;
+ int pb_len;
int str_len;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
- tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
read_lock_bh(&tipc_nametbl_lock);
- nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
- ntohl(argv->lowbound), ntohl(argv->upbound));
+ str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
+ ntohl(argv->type),
+ ntohl(argv->lowbound), ntohl(argv->upbound));
read_unlock_bh(&tipc_nametbl_lock);
- str_len = tipc_printbuf_validate(&b);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -940,8 +954,10 @@ void tipc_nametbl_stop(void)
/* Verify name table is empty, then release it */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < tipc_nametbl_size; i++) {
- if (!hlist_empty(&table.types[i]))
- err("tipc_nametbl_stop(): hash chain %u is non-null\n", i);
+ if (hlist_empty(&table.types[i]))
+ continue;
+ pr_err("nametbl_stop(): orphaned hash chain detected\n");
+ break;
}
kfree(table.types);
table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7c236c89cf5e..5b5cea259caf 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -184,9 +184,9 @@ int tipc_net_start(u32 addr)
tipc_cfg_reinit();
- info("Started in network mode\n");
- info("Own node address %s, network identity %u\n",
- tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+ pr_info("Started in network mode\n");
+ pr_info("Own node address %s, network identity %u\n",
+ tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
return 0;
}
@@ -202,5 +202,5 @@ void tipc_net_stop(void)
list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
tipc_node_delete(node);
write_unlock_bh(&tipc_net_lock);
- info("Left network mode\n");
+ pr_info("Left network mode\n");
}
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 7bda8e3d1398..47a839df27dc 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -90,7 +90,7 @@ int tipc_netlink_start(void)
res = genl_register_family_with_ops(&tipc_genl_family,
&tipc_genl_ops, 1);
if (res) {
- err("Failed to register netlink interface\n");
+ pr_err("Failed to register netlink interface\n");
return res;
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index d4fd341e6e0d..d21db204e25a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -105,7 +105,7 @@ struct tipc_node *tipc_node_create(u32 addr)
n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
if (!n_ptr) {
spin_unlock_bh(&node_create_lock);
- warn("Node creation failed, no memory\n");
+ pr_warn("Node creation failed, no memory\n");
return NULL;
}
@@ -151,8 +151,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->working_links++;
- info("Established link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Established link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
if (!active[0]) {
active[0] = active[1] = l_ptr;
@@ -160,7 +160,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
return;
}
if (l_ptr->priority < active[0]->priority) {
- info("New link <%s> becomes standby\n", l_ptr->name);
+ pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
tipc_link_send_duplicate(active[0], l_ptr);
@@ -168,9 +168,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
active[0] = l_ptr;
return;
}
- info("Old link <%s> becomes standby\n", active[0]->name);
+ pr_info("Old link <%s> becomes standby\n", active[0]->name);
if (active[1] != active[0])
- info("Old link <%s> becomes standby\n", active[1]->name);
+ pr_info("Old link <%s> becomes standby\n", active[1]->name);
active[0] = active[1] = l_ptr;
}
@@ -211,11 +211,11 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
n_ptr->working_links--;
if (!tipc_link_is_active(l_ptr)) {
- info("Lost standby link <%s> on network plane %c\n",
- l_ptr->name, l_ptr->b_ptr->net_plane);
+ pr_info("Lost standby link <%s> on network plane %c\n",
+ l_ptr->name, l_ptr->b_ptr->net_plane);
return;
}
- info("Lost link <%s> on network plane %c\n",
+ pr_info("Lost link <%s> on network plane %c\n",
l_ptr->name, l_ptr->b_ptr->net_plane);
active = &n_ptr->active_links[0];
@@ -290,8 +290,8 @@ static void node_lost_contact(struct tipc_node *n_ptr)
char addr_string[16];
u32 i;
- info("Lost contact with %s\n",
- tipc_addr_string_fill(addr_string, n_ptr->addr));
+ pr_info("Lost contact with %s\n",
+ tipc_addr_string_fill(addr_string, n_ptr->addr));
/* Flush broadcast link info associated with lost node */
if (n_ptr->bclink.supported) {
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 7a27344108fe..5e34b015da45 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -51,7 +51,8 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
node_sub->node = tipc_node_find(addr);
if (!node_sub->node) {
- warn("Node subscription rejected, unknown node 0x%x\n", addr);
+ pr_warn("Node subscription rejected, unknown node 0x%x\n",
+ addr);
return;
}
node_sub->handle_node_down = handle_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 2ad37a4db376..07c42fba672b 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,7 +69,7 @@ static u32 port_peerport(struct tipc_port *p_ptr)
return msg_destport(&p_ptr->phdr);
}
-/*
+/**
* tipc_port_peer_msg - verify message was sent by connected port's peer
*
* Handles cases where the node's network address has changed from
@@ -191,7 +191,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
if (b == NULL) {
- warn("Unable to deliver multicast message(s)\n");
+ pr_warn("Unable to deliver multicast message(s)\n");
goto exit;
}
if ((index == 0) && (cnt != 0))
@@ -221,12 +221,12 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
p_ptr = kzalloc(sizeof(*p_ptr), GFP_ATOMIC);
if (!p_ptr) {
- warn("Port creation failed, no memory\n");
+ pr_warn("Port creation failed, no memory\n");
return NULL;
}
ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
if (!ref) {
- warn("Port creation failed, reference table exhausted\n");
+ pr_warn("Port creation failed, ref. table exhausted\n");
kfree(p_ptr);
return NULL;
}
@@ -581,67 +581,73 @@ exit:
kfree_skb(buf);
}
-static void port_print(struct tipc_port *p_ptr, struct print_buf *buf, int full_id)
+static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
{
struct publication *publ;
+ int ret;
if (full_id)
- tipc_printf(buf, "<%u.%u.%u:%u>:",
- tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
- tipc_node(tipc_own_addr), p_ptr->ref);
+ ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
+ tipc_zone(tipc_own_addr),
+ tipc_cluster(tipc_own_addr),
+ tipc_node(tipc_own_addr), p_ptr->ref);
else
- tipc_printf(buf, "%-10u:", p_ptr->ref);
+ ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
if (p_ptr->connected) {
u32 dport = port_peerport(p_ptr);
u32 destnode = port_peernode(p_ptr);
- tipc_printf(buf, " connected to <%u.%u.%u:%u>",
- tipc_zone(destnode), tipc_cluster(destnode),
- tipc_node(destnode), dport);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " connected to <%u.%u.%u:%u>",
+ tipc_zone(destnode),
+ tipc_cluster(destnode),
+ tipc_node(destnode), dport);
if (p_ptr->conn_type != 0)
- tipc_printf(buf, " via {%u,%u}",
- p_ptr->conn_type,
- p_ptr->conn_instance);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " via {%u,%u}", p_ptr->conn_type,
+ p_ptr->conn_instance);
} else if (p_ptr->published) {
- tipc_printf(buf, " bound to");
+ ret += tipc_snprintf(buf + ret, len - ret, " bound to");
list_for_each_entry(publ, &p_ptr->publications, pport_list) {
if (publ->lower == publ->upper)
- tipc_printf(buf, " {%u,%u}", publ->type,
- publ->lower);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u}", publ->type,
+ publ->lower);
else
- tipc_printf(buf, " {%u,%u,%u}", publ->type,
- publ->lower, publ->upper);
+ ret += tipc_snprintf(buf + ret, len - ret,
+ " {%u,%u,%u}", publ->type,
+ publ->lower, publ->upper);
}
}
- tipc_printf(buf, "\n");
+ ret += tipc_snprintf(buf + ret, len - ret, "\n");
+ return ret;
}
-#define MAX_PORT_QUERY 32768
-
struct sk_buff *tipc_port_get_ports(void)
{
struct sk_buff *buf;
struct tlv_desc *rep_tlv;
- struct print_buf pb;
+ char *pb;
+ int pb_len;
struct tipc_port *p_ptr;
- int str_len;
+ int str_len = 0;
- buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+ buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
if (!buf)
return NULL;
rep_tlv = (struct tlv_desc *)buf->data;
+ pb = TLV_DATA(rep_tlv);
+ pb_len = ULTRA_STRING_MAX_LEN;
- tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
spin_lock_bh(&tipc_port_list_lock);
list_for_each_entry(p_ptr, &ports, port_list) {
spin_lock_bh(p_ptr->lock);
- port_print(p_ptr, &pb, 0);
+ str_len += port_print(p_ptr, pb, pb_len, 0);
spin_unlock_bh(p_ptr->lock);
}
spin_unlock_bh(&tipc_port_list_lock);
- str_len = tipc_printbuf_validate(&pb);
-
+ str_len += 1; /* for "\0" */
skb_put(buf, TLV_SPACE(str_len));
TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -906,11 +912,11 @@ int tipc_createport(void *usr_handle,
up_ptr = kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
if (!up_ptr) {
- warn("Port creation failed, no memory\n");
+ pr_warn("Port creation failed, no memory\n");
return -ENOMEM;
}
- p_ptr = (struct tipc_port *)tipc_createport_raw(NULL, port_dispatcher,
- port_wakeup, importance);
+ p_ptr = tipc_createport_raw(NULL, port_dispatcher, port_wakeup,
+ importance);
if (!p_ptr) {
kfree(up_ptr);
return -ENOMEM;
@@ -1078,8 +1084,7 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
if (tp_ptr->connected) {
tp_ptr->connected = 0;
/* let timer expire on it's own to avoid deadlock! */
- tipc_nodesub_unsubscribe(
- &((struct tipc_port *)tp_ptr)->subscription);
+ tipc_nodesub_unsubscribe(&tp_ptr->subscription);
res = 0;
} else {
res = -ENOTCONN;
@@ -1099,7 +1104,7 @@ int tipc_disconnect(u32 ref)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = tipc_disconnect_port((struct tipc_port *)p_ptr);
+ res = tipc_disconnect_port(p_ptr);
tipc_port_unlock(p_ptr);
return res;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 98cbec9c4532..4660e3065790 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -79,6 +79,7 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
* struct user_port - TIPC user port (used with native API)
* @usr_handle: user-specified field
* @ref: object reference to associated TIPC port
+ *
* <various callback routines>
*/
struct user_port {
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 5cada0e38e03..2a2a938dc22c 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -153,11 +153,11 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
struct reference *entry = NULL;
if (!object) {
- err("Attempt to acquire reference to non-existent object\n");
+ pr_err("Attempt to acquire ref. to non-existent obj\n");
return 0;
}
if (!tipc_ref_table.entries) {
- err("Reference table not found during acquisition attempt\n");
+ pr_err("Ref. table not found in acquisition attempt\n");
return 0;
}
@@ -211,7 +211,7 @@ void tipc_ref_discard(u32 ref)
u32 index_mask;
if (!tipc_ref_table.entries) {
- err("Reference table not found during discard attempt\n");
+ pr_err("Ref. table not found during discard attempt\n");
return;
}
@@ -222,11 +222,11 @@ void tipc_ref_discard(u32 ref)
write_lock_bh(&ref_table_lock);
if (!entry->object) {
- err("Attempt to discard reference to non-existent object\n");
+ pr_err("Attempt to discard ref. to non-existent obj\n");
goto exit;
}
if (entry->ref != ref) {
- err("Attempt to discard non-existent reference\n");
+ pr_err("Attempt to discard non-existent reference\n");
goto exit;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 5577a447f531..09dc5b97e079 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -34,12 +34,12 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/export.h>
-#include <net/sock.h>
-
#include "core.h"
#include "port.h"
+#include <linux/export.h>
+#include <net/sock.h>
+
#define SS_LISTENING -1 /* socket is listening */
#define SS_READY -2 /* socket is connectionless */
@@ -54,7 +54,7 @@ struct tipc_sock {
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
-#define tipc_sk_port(sk) ((struct tipc_port *)(tipc_sk(sk)->p))
+#define tipc_sk_port(sk) (tipc_sk(sk)->p)
#define tipc_rx_ready(sock) (!skb_queue_empty(&sock->sk->sk_receive_queue) || \
(sock->state == SS_DISCONNECTING))
@@ -1699,9 +1699,8 @@ static int getsockopt(struct socket *sock,
return put_user(sizeof(value), ol);
}
-/**
- * Protocol switches for the various types of TIPC sockets
- */
+/* Protocol switches for the various types of TIPC sockets */
+
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
@@ -1788,13 +1787,13 @@ int tipc_socket_init(void)
res = proto_register(&tipc_proto, 1);
if (res) {
- err("Failed to register TIPC protocol type\n");
+ pr_err("Failed to register TIPC protocol type\n");
goto out;
}
res = sock_register(&tipc_family_ops);
if (res) {
- err("Failed to register TIPC socket type\n");
+ pr_err("Failed to register TIPC socket type\n");
proto_unregister(&tipc_proto);
goto out;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index f976e9cd6a72..5ed5965eb0be 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -305,8 +305,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
/* Refuse subscription if global limit exceeded */
if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
- warn("Subscription rejected, subscription limit reached (%u)\n",
- tipc_max_subscriptions);
+ pr_warn("Subscription rejected, limit reached (%u)\n",
+ tipc_max_subscriptions);
subscr_terminate(subscriber);
return NULL;
}
@@ -314,7 +314,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
- warn("Subscription rejected, no memory\n");
+ pr_warn("Subscription rejected, no memory\n");
subscr_terminate(subscriber);
return NULL;
}
@@ -328,7 +328,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
if ((!(sub->filter & TIPC_SUB_PORTS) ==
!(sub->filter & TIPC_SUB_SERVICE)) ||
(sub->seq.lower > sub->seq.upper)) {
- warn("Subscription rejected, illegal request\n");
+ pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
subscr_terminate(subscriber);
return NULL;
@@ -440,7 +440,7 @@ static void subscr_named_msg_event(void *usr_handle,
/* Create subscriber object */
subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
if (subscriber == NULL) {
- warn("Subscriber rejected, no memory\n");
+ pr_warn("Subscriber rejected, no memory\n");
return;
}
INIT_LIST_HEAD(&subscriber->subscription_list);
@@ -458,7 +458,7 @@ static void subscr_named_msg_event(void *usr_handle,
NULL,
&subscriber->port_ref);
if (subscriber->port_ref == 0) {
- warn("Subscriber rejected, unable to create port\n");
+ pr_warn("Subscriber rejected, unable to create port\n");
kfree(subscriber);
return;
}
@@ -517,7 +517,7 @@ int tipc_subscr_start(void)
return 0;
failed:
- err("Failed to create subscription service\n");
+ pr_err("Failed to create subscription service\n");
return res;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 641f2e47f165..79981d97bc9c 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -115,15 +115,24 @@
#include <net/checksum.h>
#include <linux/security.h>
-struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
+struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
DEFINE_SPINLOCK(unix_table_lock);
EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
-#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
-#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
+static struct hlist_head *unix_sockets_unbound(void *addr)
+{
+ unsigned long hash = (unsigned long)addr;
+
+ hash ^= hash >> 16;
+ hash ^= hash >> 8;
+ hash %= UNIX_HASH_SIZE;
+ return &unix_socket_table[UNIX_HASH_SIZE + hash];
+}
+
+#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
@@ -645,7 +654,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock)
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
- unix_insert_socket(unix_sockets_unbound, sk);
+ unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
atomic_long_dec(&unix_nr_socks);
@@ -2239,47 +2248,54 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
}
#ifdef CONFIG_PROC_FS
-static struct sock *first_unix_socket(int *i)
+
+#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
+
+#define get_bucket(x) ((x) >> BUCKET_SPACE)
+#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
+#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
+
+static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
- for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
- if (!hlist_empty(&unix_socket_table[*i]))
- return __sk_head(&unix_socket_table[*i]);
+ unsigned long offset = get_offset(*pos);
+ unsigned long bucket = get_bucket(*pos);
+ struct sock *sk;
+ unsigned long count = 0;
+
+ for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
+ if (sock_net(sk) != seq_file_net(seq))
+ continue;
+ if (++count == offset)
+ break;
}
- return NULL;
+
+ return sk;
}
-static struct sock *next_unix_socket(int *i, struct sock *s)
+static struct sock *unix_next_socket(struct seq_file *seq,
+ struct sock *sk,
+ loff_t *pos)
{
- struct sock *next = sk_next(s);
- /* More in this chain? */
- if (next)
- return next;
- /* Look for next non-empty chain. */
- for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
- if (!hlist_empty(&unix_socket_table[*i]))
- return __sk_head(&unix_socket_table[*i]);
+ unsigned long bucket;
+
+ while (sk > (struct sock *)SEQ_START_TOKEN) {
+ sk = sk_next(sk);
+ if (!sk)
+ goto next_bucket;
+ if (sock_net(sk) == seq_file_net(seq))
+ return sk;
}
- return NULL;
-}
-struct unix_iter_state {
- struct seq_net_private p;
- int i;
-};
+ do {
+ sk = unix_from_bucket(seq, pos);
+ if (sk)
+ return sk;
-static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
-{
- struct unix_iter_state *iter = seq->private;
- loff_t off = 0;
- struct sock *s;
+next_bucket:
+ bucket = get_bucket(*pos) + 1;
+ *pos = set_bucket_offset(bucket, 1);
+ } while (bucket < ARRAY_SIZE(unix_socket_table));
- for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
- if (sock_net(s) != seq_file_net(seq))
- continue;
- if (off == pos)
- return s;
- ++off;
- }
return NULL;
}
@@ -2287,22 +2303,20 @@ static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(unix_table_lock)
{
spin_lock(&unix_table_lock);
- return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+ if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
+ return NULL;
+
+ return unix_next_socket(seq, NULL, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct unix_iter_state *iter = seq->private;
- struct sock *sk = v;
++*pos;
-
- if (v == SEQ_START_TOKEN)
- sk = first_unix_socket(&iter->i);
- else
- sk = next_unix_socket(&iter->i, sk);
- while (sk && (sock_net(sk) != seq_file_net(seq)))
- sk = next_unix_socket(&iter->i, sk);
- return sk;
+ return unix_next_socket(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
@@ -2365,7 +2379,7 @@ static const struct seq_operations unix_seq_ops = {
static int unix_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &unix_seq_ops,
- sizeof(struct unix_iter_state));
+ sizeof(struct seq_net_private));
}
static const struct file_operations unix_seq_fops = {
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 47d3002737f5..750b13408449 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -8,40 +8,31 @@
#include <net/af_unix.h>
#include <net/tcp_states.h>
-#define UNIX_DIAG_PUT(skb, attrtype, attrlen) \
- RTA_DATA(__RTA_PUT(skb, attrtype, attrlen))
-
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
struct unix_address *addr = unix_sk(sk)->addr;
- char *s;
-
- if (addr) {
- s = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short));
- memcpy(s, addr->name->sun_path, addr->len - sizeof(short));
- }
- return 0;
+ if (!addr)
+ return 0;
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(nlskb, UNIX_DIAG_NAME, addr->len - sizeof(short),
+ addr->name->sun_path);
}
static int sk_diag_dump_vfs(struct sock *sk, struct sk_buff *nlskb)
{
struct dentry *dentry = unix_sk(sk)->path.dentry;
- struct unix_diag_vfs *uv;
if (dentry) {
- uv = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_VFS, sizeof(*uv));
- uv->udiag_vfs_ino = dentry->d_inode->i_ino;
- uv->udiag_vfs_dev = dentry->d_sb->s_dev;
+ struct unix_diag_vfs uv = {
+ .udiag_vfs_ino = dentry->d_inode->i_ino,
+ .udiag_vfs_dev = dentry->d_sb->s_dev,
+ };
+
+ return nla_put(nlskb, UNIX_DIAG_VFS, sizeof(uv), &uv);
}
return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
}
static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
@@ -56,24 +47,28 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb)
unix_state_unlock(peer);
sock_put(peer);
- RTA_PUT_U32(nlskb, UNIX_DIAG_PEER, ino);
+ return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino);
}
return 0;
-rtattr_failure:
- return -EMSGSIZE;
}
static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
{
struct sk_buff *skb;
+ struct nlattr *attr;
u32 *buf;
int i;
if (sk->sk_state == TCP_LISTEN) {
spin_lock(&sk->sk_receive_queue.lock);
- buf = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_ICONS,
- sk->sk_receive_queue.qlen * sizeof(u32));
+
+ attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
+ sk->sk_receive_queue.qlen * sizeof(u32));
+ if (!attr)
+ goto errout;
+
+ buf = nla_data(attr);
i = 0;
skb_queue_walk(&sk->sk_receive_queue, skb) {
struct sock *req, *peer;
@@ -94,43 +89,38 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
return 0;
-rtattr_failure:
+errout:
spin_unlock(&sk->sk_receive_queue.lock);
return -EMSGSIZE;
}
static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_diag_rqlen *rql;
-
- rql = UNIX_DIAG_PUT(nlskb, UNIX_DIAG_RQLEN, sizeof(*rql));
+ struct unix_diag_rqlen rql;
if (sk->sk_state == TCP_LISTEN) {
- rql->udiag_rqueue = sk->sk_receive_queue.qlen;
- rql->udiag_wqueue = sk->sk_max_ack_backlog;
+ rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+ rql.udiag_wqueue = sk->sk_max_ack_backlog;
} else {
- rql->udiag_rqueue = (__u32)unix_inq_len(sk);
- rql->udiag_wqueue = (__u32)unix_outq_len(sk);
+ rql.udiag_rqueue = (u32) unix_inq_len(sk);
+ rql.udiag_wqueue = (u32) unix_outq_len(sk);
}
- return 0;
-
-rtattr_failure:
- return -EMSGSIZE;
+ return nla_put(nlskb, UNIX_DIAG_RQLEN, sizeof(rql), &rql);
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req,
u32 pid, u32 seq, u32 flags, int sk_ino)
{
- unsigned char *b = skb_tail_pointer(skb);
struct nlmsghdr *nlh;
struct unix_diag_msg *rep;
- nlh = NLMSG_PUT(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep));
- nlh->nlmsg_flags = flags;
-
- rep = NLMSG_DATA(nlh);
+ nlh = nlmsg_put(skb, pid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
+ flags);
+ if (!nlh)
+ return -EMSGSIZE;
+ rep = nlmsg_data(nlh);
rep->udiag_family = AF_UNIX;
rep->udiag_type = sk->sk_type;
rep->udiag_state = sk->sk_state;
@@ -139,33 +129,32 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
if ((req->udiag_show & UDIAG_SHOW_NAME) &&
sk_diag_dump_name(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_VFS) &&
sk_diag_dump_vfs(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_PEER) &&
sk_diag_dump_peer(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_ICONS) &&
sk_diag_dump_icons(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_RQLEN) &&
sk_diag_show_rqlen(sk, skb))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
if ((req->udiag_show & UDIAG_SHOW_MEMINFO) &&
sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
- goto nlmsg_failure;
+ goto out_nlmsg_trim;
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- return skb->len;
+ return nlmsg_end(skb, nlh);
-nlmsg_failure:
- nlmsg_trim(skb, b);
+out_nlmsg_trim:
+ nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
@@ -188,19 +177,24 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct unix_diag_req *req;
int num, s_num, slot, s_slot;
+ struct net *net = sock_net(skb->sk);
- req = NLMSG_DATA(cb->nlh);
+ req = nlmsg_data(cb->nlh);
s_slot = cb->args[0];
num = s_num = cb->args[1];
spin_lock(&unix_table_lock);
- for (slot = s_slot; slot <= UNIX_HASH_SIZE; s_num = 0, slot++) {
+ for (slot = s_slot;
+ slot < ARRAY_SIZE(unix_socket_table);
+ s_num = 0, slot++) {
struct sock *sk;
struct hlist_node *node;
num = 0;
sk_for_each(sk, node, &unix_socket_table[slot]) {
+ if (!net_eq(sock_net(sk), net))
+ continue;
if (num < s_num)
goto next;
if (!(req->udiag_states & (1 << sk->sk_state)))
@@ -228,7 +222,7 @@ static struct sock *unix_lookup_by_ino(int ino)
struct sock *sk;
spin_lock(&unix_table_lock);
- for (i = 0; i <= UNIX_HASH_SIZE; i++) {
+ for (i = 0; i < ARRAY_SIZE(unix_socket_table); i++) {
struct hlist_node *node;
sk_for_each(sk, node, &unix_socket_table[i])
@@ -252,6 +246,7 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
struct sock *sk;
struct sk_buff *rep;
unsigned int extra_len;
+ struct net *net = sock_net(in_skb->sk);
if (req->udiag_ino == 0)
goto out_nosk;
@@ -268,22 +263,21 @@ static int unix_diag_get_exact(struct sk_buff *in_skb,
extra_len = 256;
again:
err = -ENOMEM;
- rep = alloc_skb(NLMSG_SPACE((sizeof(struct unix_diag_msg) + extra_len)),
- GFP_KERNEL);
+ rep = nlmsg_new(sizeof(struct unix_diag_msg) + extra_len, GFP_KERNEL);
if (!rep)
goto out;
err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, req->udiag_ino);
if (err < 0) {
- kfree_skb(rep);
+ nlmsg_free(rep);
extra_len += 256;
if (extra_len >= PAGE_SIZE)
goto out;
goto again;
}
- err = netlink_unicast(sock_diag_nlsk, rep, NETLINK_CB(in_skb).pid,
+ err = netlink_unicast(net->diag_nlsk, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
err = 0;
@@ -297,6 +291,7 @@ out_nosk:
static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
{
int hdrlen = sizeof(struct unix_diag_req);
+ struct net *net = sock_net(skb->sk);
if (nlmsg_len(h) < hdrlen)
return -EINVAL;
@@ -305,9 +300,9 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
struct netlink_dump_control c = {
.dump = unix_diag_dump,
};
- return netlink_dump_start(sock_diag_nlsk, skb, h, &c);
+ return netlink_dump_start(net->diag_nlsk, skb, h, &c);
} else
- return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
+ return unix_diag_get_exact(skb, h, nlmsg_data(h));
}
static const struct sock_diag_handler unix_diag_handler = {
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 2e4444fedbe0..fe4adb12b3ef 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -74,6 +74,27 @@ config CFG80211_REG_DEBUG
If unsure, say N.
+config CFG80211_CERTIFICATION_ONUS
+ bool "cfg80211 certification onus"
+ depends on CFG80211 && EXPERT
+ default n
+ ---help---
+ You should disable this option unless you are both capable
+ and willing to ensure your system will remain regulatory
+ compliant with the features available under this option.
+ Some options may still be under heavy development and
+ for whatever reason regulatory compliance has not or
+ cannot yet be verified. Regulatory verification may at
+ times only be possible until you have the final system
+ in place.
+
+ This option should only be enabled by system integrators
+ or distributions that have done work necessary to ensure
+ regulatory certification on the system with the enabled
+ features. Alternatively you can enable this option if
+ you are a wireless researcher and are working in a controlled
+ and approved environment by your local regulatory agency.
+
config CFG80211_DEFAULT_PS
bool "enable powersave by default"
depends on CFG80211
@@ -114,24 +135,10 @@ config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility"
depends on CFG80211
select WEXT_CORE
- default y
help
Enable this option if you need old userspace for wireless
extensions with cfg80211-based drivers.
-config WIRELESS_EXT_SYSFS
- bool "Wireless extensions sysfs files"
- depends on WEXT_CORE && SYSFS
- help
- This option enables the deprecated wireless statistics
- files in /sys/class/net/*/wireless/. The same information
- is available via the ioctls as well.
-
- Say N. If you know you have ancient tools requiring it,
- like very old versions of hal (prior to 0.5.12 release),
- say Y and update the tools as soon as possible as this
- option will be removed soon.
-
config LIB80211
tristate "Common routines for IEEE802.11 drivers"
default n
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 55a28ab21db9..0f7e0d621ab0 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_WEXT_SPY) += wext-spy.o
obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
-cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
new file mode 100644
index 000000000000..fcc60d8dbefa
--- /dev/null
+++ b/net/wireless/ap.c
@@ -0,0 +1,46 @@
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/cfg80211.h>
+#include "nl80211.h"
+#include "core.h"
+
+
+static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!rdev->ops->stop_ap)
+ return -EOPNOTSUPP;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ if (!wdev->beacon_interval)
+ return -ENOENT;
+
+ err = rdev->ops->stop_ap(&rdev->wiphy, dev);
+ if (!err) {
+ wdev->beacon_interval = 0;
+ wdev->channel = NULL;
+ }
+
+ return err;
+}
+
+int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_stop_ap(rdev, dev);
+ wdev_unlock(wdev);
+
+ return err;
+}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 884801ac4dd0..d355f67d0cdd 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -60,7 +60,7 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
diff = -20;
break;
default:
- return false;
+ return true;
}
sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff);
@@ -78,60 +78,75 @@ bool cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
}
EXPORT_SYMBOL(cfg80211_can_beacon_sec_chan);
-int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, int freq,
- enum nl80211_channel_type channel_type)
+int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type chantype)
{
struct ieee80211_channel *chan;
- int result;
-
- if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR)
- wdev = NULL;
- if (wdev) {
- ASSERT_WDEV_LOCK(wdev);
-
- if (!netif_running(wdev->netdev))
- return -ENETDOWN;
- }
-
- if (!rdev->ops->set_channel)
+ if (!rdev->ops->set_monitor_channel)
return -EOPNOTSUPP;
+ if (!cfg80211_has_monitors_only(rdev))
+ return -EBUSY;
- chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ chan = rdev_freq_to_chan(rdev, freq, chantype);
if (!chan)
return -EINVAL;
- /* Both channels should be able to initiate communication */
- if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC ||
- wdev->iftype == NL80211_IFTYPE_AP ||
- wdev->iftype == NL80211_IFTYPE_AP_VLAN ||
- wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
- wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
- switch (channel_type) {
- case NL80211_CHAN_HT40PLUS:
- case NL80211_CHAN_HT40MINUS:
- if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, chan,
- channel_type)) {
- printk(KERN_DEBUG
- "cfg80211: Secondary channel not "
- "allowed to initiate communication\n");
- return -EINVAL;
- }
- break;
- default:
- break;
+ return rdev->ops->set_monitor_channel(&rdev->wiphy, chan, chantype);
+}
+
+void
+cfg80211_get_chan_state(struct wireless_dev *wdev,
+ struct ieee80211_channel **chan,
+ enum cfg80211_chan_mode *chanmode)
+{
+ *chan = NULL;
+ *chanmode = CHAN_MODE_UNDEFINED;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ if (wdev->current_bss) {
+ *chan = wdev->current_bss->pub.channel;
+ *chanmode = wdev->ibss_fixed
+ ? CHAN_MODE_SHARED
+ : CHAN_MODE_EXCLUSIVE;
+ return;
+ }
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ if (wdev->current_bss) {
+ *chan = wdev->current_bss->pub.channel;
+ *chanmode = CHAN_MODE_SHARED;
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->beacon_interval) {
+ *chan = wdev->channel;
+ *chanmode = CHAN_MODE_SHARED;
}
+ return;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (wdev->mesh_id_len) {
+ *chan = wdev->channel;
+ *chanmode = CHAN_MODE_SHARED;
+ }
+ return;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_WDS:
+ /* these interface types don't really have a channel */
+ return;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ WARN_ON(1);
}
- result = rdev->ops->set_channel(&rdev->wiphy,
- wdev ? wdev->netdev : NULL,
- chan, channel_type);
- if (result)
- return result;
-
- if (wdev)
- wdev->channel = chan;
-
- return 0;
+ return;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a87d43552974..31b40cc4a9c3 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -96,69 +96,6 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
return &rdev->wiphy;
}
-/* requires cfg80211_mutex to be held! */
-struct cfg80211_registered_device *
-__cfg80211_rdev_from_info(struct genl_info *info)
-{
- int ifindex;
- struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL;
- struct net_device *dev;
- int err = -EINVAL;
-
- assert_cfg80211_lock();
-
- if (info->attrs[NL80211_ATTR_WIPHY]) {
- bywiphyidx = cfg80211_rdev_by_wiphy_idx(
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY]));
- err = -ENODEV;
- }
-
- if (info->attrs[NL80211_ATTR_IFINDEX]) {
- ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
- dev = dev_get_by_index(genl_info_net(info), ifindex);
- if (dev) {
- if (dev->ieee80211_ptr)
- byifidx =
- wiphy_to_dev(dev->ieee80211_ptr->wiphy);
- dev_put(dev);
- }
- err = -ENODEV;
- }
-
- if (bywiphyidx && byifidx) {
- if (bywiphyidx != byifidx)
- return ERR_PTR(-EINVAL);
- else
- return bywiphyidx; /* == byifidx */
- }
- if (bywiphyidx)
- return bywiphyidx;
-
- if (byifidx)
- return byifidx;
-
- return ERR_PTR(err);
-}
-
-struct cfg80211_registered_device *
-cfg80211_get_dev_from_info(struct genl_info *info)
-{
- struct cfg80211_registered_device *rdev;
-
- mutex_lock(&cfg80211_mutex);
- rdev = __cfg80211_rdev_from_info(info);
-
- /* if it is not an error we grab the lock on
- * it to assure it won't be going away while
- * we operate on it */
- if (!IS_ERR(rdev))
- mutex_lock(&rdev->mtx);
-
- mutex_unlock(&cfg80211_mutex);
-
- return rdev;
-}
-
struct cfg80211_registered_device *
cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
{
@@ -239,7 +176,9 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
if (!(rdev->wiphy.flags & WIPHY_FLAG_NETNS_OK))
return -EOPNOTSUPP;
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (!wdev->netdev)
+ continue;
wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wdev->netdev, net, "wlan%d");
if (err)
@@ -251,8 +190,10 @@ int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
/* failed -- clean up to old netns */
net = wiphy_net(&rdev->wiphy);
- list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list,
+ list_for_each_entry_continue_reverse(wdev, &rdev->wdev_list,
list) {
+ if (!wdev->netdev)
+ continue;
wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
err = dev_change_net_namespace(wdev->netdev, net,
"wlan%d");
@@ -289,8 +230,9 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
rtnl_lock();
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list)
- dev_close(wdev->netdev);
+ list_for_each_entry(wdev, &rdev->wdev_list, list)
+ if (wdev->netdev)
+ dev_close(wdev->netdev);
mutex_unlock(&rdev->devlist_mtx);
rtnl_unlock();
@@ -367,7 +309,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
mutex_init(&rdev->mtx);
mutex_init(&rdev->devlist_mtx);
mutex_init(&rdev->sched_scan_mtx);
- INIT_LIST_HEAD(&rdev->netdev_list);
+ INIT_LIST_HEAD(&rdev->wdev_list);
spin_lock_init(&rdev->bss_lock);
INIT_LIST_HEAD(&rdev->bss_list);
INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
@@ -436,6 +378,14 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
if (WARN_ON(!c->num_different_channels))
return -EINVAL;
+ /*
+ * Put a sane limit on maximum number of different
+ * channels to simplify channel accounting code.
+ */
+ if (WARN_ON(c->num_different_channels >
+ CFG80211_MAX_NUM_DIFFERENT_CHANNELS))
+ return -EINVAL;
+
if (WARN_ON(!c->n_limits))
return -EINVAL;
@@ -484,9 +434,11 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
+#ifdef CONFIG_PM
if (WARN_ON((wiphy->wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
!(wiphy->wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY)))
return -EINVAL;
+#endif
if (WARN_ON(wiphy->ap_sme_capa &&
!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
@@ -521,8 +473,14 @@ int wiphy_register(struct wiphy *wiphy)
continue;
sband->band = band;
-
- if (WARN_ON(!sband->n_channels || !sband->n_bitrates))
+ if (WARN_ON(!sband->n_channels))
+ return -EINVAL;
+ /*
+ * on 60gHz band, there are no legacy rates, so
+ * n_bitrates is 0
+ */
+ if (WARN_ON(band != IEEE80211_BAND_60GHZ &&
+ !sband->n_bitrates))
return -EINVAL;
/*
@@ -563,12 +521,14 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
}
+#ifdef CONFIG_PM
if (rdev->wiphy.wowlan.n_patterns) {
if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
rdev->wiphy.wowlan.pattern_min_len >
rdev->wiphy.wowlan.pattern_max_len))
return -EINVAL;
}
+#endif
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
@@ -582,7 +542,7 @@ int wiphy_register(struct wiphy *wiphy)
}
/* set up regulatory info */
- regulatory_update(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ wiphy_regulatory_register(wiphy);
list_add_rcu(&rdev->list, &cfg80211_rdev_list);
cfg80211_rdev_list_generation++;
@@ -667,7 +627,7 @@ void wiphy_unregister(struct wiphy *wiphy)
__count == 0; }));
mutex_lock(&rdev->devlist_mtx);
- BUG_ON(!list_empty(&rdev->netdev_list));
+ BUG_ON(!list_empty(&rdev->wdev_list));
mutex_unlock(&rdev->devlist_mtx);
/*
@@ -692,9 +652,11 @@ void wiphy_unregister(struct wiphy *wiphy)
/* nothing */
cfg80211_unlock_rdev(rdev);
- /* If this device got a regulatory hint tell core its
- * free to listen now to a new shiny device regulatory hint */
- reg_device_remove(wiphy);
+ /*
+ * If this device got a regulatory hint tell core its
+ * free to listen now to a new shiny device regulatory hint
+ */
+ wiphy_regulatory_deregister(wiphy);
cfg80211_rdev_list_generation++;
device_del(&rdev->wiphy.dev);
@@ -748,7 +710,7 @@ static void wdev_cleanup_work(struct work_struct *work)
cfg80211_lock_rdev(rdev);
- if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) {
+ if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
rdev->scan_req->aborted = true;
___cfg80211_scan_done(rdev, true);
}
@@ -776,6 +738,16 @@ static struct device_type wiphy_type = {
.name = "wlan",
};
+void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, int num)
+{
+ ASSERT_RTNL();
+
+ rdev->num_running_ifaces += num;
+ if (iftype == NL80211_IFTYPE_MONITOR)
+ rdev->num_running_monitor_ifaces += num;
+}
+
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
unsigned long state,
void *ndev)
@@ -810,7 +782,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
spin_lock_init(&wdev->mgmt_registrations_lock);
mutex_lock(&rdev->devlist_mtx);
- list_add_rcu(&wdev->list, &rdev->netdev_list);
+ wdev->identifier = ++rdev->wdev_id;
+ list_add_rcu(&wdev->list, &rdev->wdev_list);
rdev->devlist_generation++;
/* can only change netns with wiphy */
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -869,12 +842,16 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
case NL80211_IFTYPE_MESH_POINT:
cfg80211_leave_mesh(rdev, dev);
break;
+ case NL80211_IFTYPE_AP:
+ cfg80211_stop_ap(rdev, dev);
+ break;
default:
break;
}
wdev->beacon_interval = 0;
break;
case NETDEV_DOWN:
+ cfg80211_update_iface_num(rdev, wdev->iftype, -1);
dev_hold(dev);
queue_work(cfg80211_wq, &wdev->cleanup_work);
break;
@@ -891,6 +868,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
mutex_unlock(&rdev->devlist_mtx);
dev_put(dev);
}
+ cfg80211_update_iface_num(rdev, wdev->iftype, 1);
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
@@ -980,7 +958,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
return notifier_from_errno(-EOPNOTSUPP);
if (rfkill_blocked(rdev->rfkill))
return notifier_from_errno(-ERFKILL);
+ mutex_lock(&rdev->devlist_mtx);
ret = cfg80211_can_add_interface(rdev, wdev->iftype);
+ mutex_unlock(&rdev->devlist_mtx);
if (ret)
return notifier_from_errno(ret);
break;
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 8523f3878677..5206c6844fd7 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -13,6 +13,7 @@
#include <linux/debugfs.h>
#include <linux/rfkill.h>
#include <linux/workqueue.h>
+#include <linux/rtnetlink.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
#include "reg.h"
@@ -46,16 +47,20 @@ struct cfg80211_registered_device {
/* wiphy index, internal only */
int wiphy_idx;
- /* associate netdev list */
+ /* associated wireless interfaces */
struct mutex devlist_mtx;
/* protected by devlist_mtx or RCU */
- struct list_head netdev_list;
- int devlist_generation;
+ struct list_head wdev_list;
+ int devlist_generation, wdev_id;
int opencount; /* also protected by devlist_mtx */
wait_queue_head_t dev_wait;
u32 ap_beacons_nlpid;
+ /* protected by RTNL only */
+ int num_running_ifaces;
+ int num_running_monitor_ifaces;
+
/* BSSes/scanning */
spinlock_t bss_lock;
struct list_head bss_list;
@@ -159,32 +164,6 @@ static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
int get_wiphy_idx(struct wiphy *wiphy);
-struct cfg80211_registered_device *
-__cfg80211_rdev_from_info(struct genl_info *info);
-
-/*
- * This function returns a pointer to the driver
- * that the genl_info item that is passed refers to.
- * If successful, it returns non-NULL and also locks
- * the driver's mutex!
- *
- * This means that you need to call cfg80211_unlock_rdev()
- * before being allowed to acquire &cfg80211_mutex!
- *
- * This is necessary because we need to lock the global
- * mutex to get an item off the list safely, and then
- * we lock the rdev mutex so it doesn't go away under us.
- *
- * We don't want to keep cfg80211_mutex locked
- * for all the time in order to allow requests on
- * other interfaces to go through at the same time.
- *
- * The result of this can be a PTR_ERR and hence must
- * be checked with IS_ERR() for errors.
- */
-extern struct cfg80211_registered_device *
-cfg80211_get_dev_from_info(struct genl_info *info);
-
/* requires cfg80211_rdev_mutex to be held! */
struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
@@ -223,6 +202,14 @@ static inline void wdev_unlock(struct wireless_dev *wdev)
#define ASSERT_RDEV_LOCK(rdev) lockdep_assert_held(&(rdev)->mtx)
#define ASSERT_WDEV_LOCK(wdev) lockdep_assert_held(&(wdev)->mtx)
+static inline bool cfg80211_has_monitors_only(struct cfg80211_registered_device *rdev)
+{
+ ASSERT_RTNL();
+
+ return rdev->num_running_ifaces == rdev->num_running_monitor_ifaces &&
+ rdev->num_running_ifaces > 0;
+}
+
enum cfg80211_event_type {
EVENT_CONNECT_RESULT,
EVENT_ROAMED,
@@ -267,6 +254,12 @@ struct cfg80211_cached_keys {
int def, defmgmt;
};
+enum cfg80211_chan_mode {
+ CHAN_MODE_UNDEFINED,
+ CHAN_MODE_SHARED,
+ CHAN_MODE_EXCLUSIVE,
+};
+
/* free object */
extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
@@ -303,14 +296,21 @@ extern const struct mesh_config default_mesh_config;
extern const struct mesh_setup default_mesh_setup;
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf);
int cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev);
+int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type);
+
+/* AP */
+int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
/* MLME */
int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
@@ -369,7 +369,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid);
void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev);
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
@@ -427,9 +427,20 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
-int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype);
+int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode);
+
+static inline int
+cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype)
+{
+ return cfg80211_can_use_iftype_chan(rdev, wdev, iftype, NULL,
+ CHAN_MODE_UNDEFINED);
+}
static inline int
cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
@@ -438,12 +449,26 @@ cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
return cfg80211_can_change_interface(rdev, NULL, iftype);
}
+static inline int
+cfg80211_can_use_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode)
+{
+ return cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+ chan, chanmode);
+}
+
+void
+cfg80211_get_chan_state(struct wireless_dev *wdev,
+ struct ieee80211_channel **chan,
+ enum cfg80211_chan_mode *chanmode);
+
struct ieee80211_channel *
rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type);
-int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev, int freq,
- enum nl80211_channel_type channel_type);
+int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type chantype);
int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
const u8 *rates, unsigned int n_rates,
@@ -452,6 +477,11 @@ int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
u32 beacon_int);
+void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
+ enum nl80211_iftype iftype, int num);
+
+#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 89baa3328411..ca5672f6ee2f 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -113,10 +113,21 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
kfree(wdev->connect_keys);
wdev->connect_keys = connkeys;
+ wdev->ibss_fixed = params->channel_fixed;
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.channel = params->channel;
#endif
wdev->sme_state = CFG80211_SME_CONNECTING;
+
+ err = cfg80211_can_use_chan(rdev, wdev, params->channel,
+ params->channel_fixed
+ ? CHAN_MODE_SHARED
+ : CHAN_MODE_EXCLUSIVE);
+ if (err) {
+ wdev->connect_keys = NULL;
+ return err;
+ }
+
err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
if (err) {
wdev->connect_keys = NULL;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 2749cb86b462..c384e77ff77a 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -14,6 +14,9 @@
#define MESH_PATH_TIMEOUT 5000
#define MESH_RANN_INTERVAL 5000
+#define MESH_PATH_TO_ROOT_TIMEOUT 6000
+#define MESH_ROOT_INTERVAL 5000
+#define MESH_ROOT_CONFIRMATION_INTERVAL 2000
/*
* Minimum interval between two consecutive PREQs originated by the same
@@ -62,9 +65,15 @@ const struct mesh_config default_mesh_config = {
.dot11MeshForwarding = true,
.rssi_threshold = MESH_RSSI_THRESHOLD,
.ht_opmode = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED,
+ .dot11MeshHWMPactivePathToRootTimeout = MESH_PATH_TO_ROOT_TIMEOUT,
+ .dot11MeshHWMProotInterval = MESH_ROOT_INTERVAL,
+ .dot11MeshHWMPconfirmationInterval = MESH_ROOT_CONFIRMATION_INTERVAL,
};
const struct mesh_setup default_mesh_setup = {
+ /* cfg80211_join_mesh() will pick a channel if needed */
+ .channel = NULL,
+ .channel_type = NL80211_CHAN_NO_HT,
.sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
.path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
.path_metric = IEEE80211_PATH_METRIC_AIRTIME,
@@ -75,7 +84,7 @@ const struct mesh_setup default_mesh_setup = {
int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -101,10 +110,61 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!rdev->ops->join_mesh)
return -EOPNOTSUPP;
+ if (!setup->channel) {
+ /* if no channel explicitly given, use preset channel */
+ setup->channel = wdev->preset_chan;
+ setup->channel_type = wdev->preset_chantype;
+ }
+
+ if (!setup->channel) {
+ /* if we don't have that either, use the first usable channel */
+ enum ieee80211_band band;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *chan;
+ int i;
+
+ sband = rdev->wiphy.bands[band];
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ chan = &sband->channels[i];
+ if (chan->flags & (IEEE80211_CHAN_NO_IBSS |
+ IEEE80211_CHAN_PASSIVE_SCAN |
+ IEEE80211_CHAN_DISABLED |
+ IEEE80211_CHAN_RADAR))
+ continue;
+ setup->channel = chan;
+ break;
+ }
+
+ if (setup->channel)
+ break;
+ }
+
+ /* no usable channel ... */
+ if (!setup->channel)
+ return -EINVAL;
+
+ setup->channel_type = NL80211_CHAN_NO_HT;
+ }
+
+ if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, setup->channel,
+ setup->channel_type))
+ return -EINVAL;
+
+ err = cfg80211_can_use_chan(rdev, wdev, setup->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ return err;
+
err = rdev->ops->join_mesh(&rdev->wiphy, dev, conf, setup);
if (!err) {
memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
wdev->mesh_id_len = setup->mesh_id_len;
+ wdev->channel = setup->channel;
}
return err;
@@ -112,19 +172,71 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- const struct mesh_setup *setup,
+ struct mesh_setup *setup,
const struct mesh_config *conf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = __cfg80211_join_mesh(rdev, dev, setup, conf);
wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
+int cfg80211_set_mesh_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, int freq,
+ enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *channel;
+ int err;
+
+ channel = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
+ channel,
+ channel_type)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Workaround for libertas (only!), it puts the interface
+ * into mesh mode but doesn't implement join_mesh. Instead,
+ * it is configured via sysfs and then joins the mesh when
+ * you set the channel. Note that the libertas mesh isn't
+ * compatible with 802.11 mesh.
+ */
+ if (rdev->ops->libertas_set_mesh_channel) {
+ if (channel_type != NL80211_CHAN_NO_HT)
+ return -EINVAL;
+
+ if (!netif_running(wdev->netdev))
+ return -ENETDOWN;
+
+ err = cfg80211_can_use_chan(rdev, wdev, channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ return err;
+
+ err = rdev->ops->libertas_set_mesh_channel(&rdev->wiphy,
+ wdev->netdev,
+ channel);
+ if (!err)
+ wdev->channel = channel;
+
+ return err;
+ }
+
+ if (wdev->mesh_id_len)
+ return -EBUSY;
+
+ wdev->preset_chan = channel;
+ wdev->preset_chantype = channel_type;
+ return 0;
+}
+
void cfg80211_notify_new_peer_candidate(struct net_device *dev,
const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
{
@@ -156,8 +268,11 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
return -ENOTCONN;
err = rdev->ops->leave_mesh(&rdev->wiphy, dev);
- if (!err)
+ if (!err) {
wdev->mesh_id_len = 0;
+ wdev->channel = NULL;
+ }
+
return err;
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index eb90988bbd36..1cdb1d5e6b0f 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -302,8 +302,14 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
if (!req.bss)
return -ENOENT;
+ err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ goto out;
+
err = rdev->ops->auth(&rdev->wiphy, dev, &req);
+out:
cfg80211_put_bss(req.bss);
return err;
}
@@ -317,11 +323,13 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
{
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
ssid, ssid_len, ie, ie_len,
key, key_len, key_idx);
wdev_unlock(dev->ieee80211_ptr);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
@@ -397,8 +405,14 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
return -ENOENT;
}
+ err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel,
+ CHAN_MODE_SHARED);
+ if (err)
+ goto out;
+
err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
+out:
if (err) {
if (was_connected)
wdev->sme_state = CFG80211_SME_CONNECTED;
@@ -421,11 +435,13 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ mutex_lock(&rdev->devlist_mtx);
wdev_lock(wdev);
err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
ssid, ssid_len, ie, ie_len, use_mfp, crypt,
assoc_flags, ht_capa, ht_capa_mask);
wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
return err;
}
@@ -551,29 +567,28 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
}
}
-void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
+ nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, channel_type,
duration, gfp);
}
EXPORT_SYMBOL(cfg80211_ready_on_channel);
-void cfg80211_remain_on_channel_expired(struct net_device *dev,
- u64 cookie,
+void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
gfp_t gfp)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
+ nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan,
channel_type, gfp);
}
EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
@@ -662,8 +677,7 @@ int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_pid,
list_add(&nreg->list, &wdev->mgmt_registrations);
if (rdev->ops->mgmt_frame_register)
- rdev->ops->mgmt_frame_register(wiphy, wdev->netdev,
- frame_type, true);
+ rdev->ops->mgmt_frame_register(wiphy, wdev, frame_type, true);
out:
spin_unlock_bh(&wdev->mgmt_registrations_lock);
@@ -686,7 +700,7 @@ void cfg80211_mlme_unregister_socket(struct wireless_dev *wdev, u32 nlpid)
if (rdev->ops->mgmt_frame_register) {
u16 frame_type = le16_to_cpu(reg->frame_type);
- rdev->ops->mgmt_frame_register(wiphy, wdev->netdev,
+ rdev->ops->mgmt_frame_register(wiphy, wdev,
frame_type, false);
}
@@ -715,14 +729,14 @@ void cfg80211_mlme_purge_registrations(struct wireless_dev *wdev)
}
int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
+ struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
const u8 *buf, size_t len, bool no_cck,
bool dont_wait_for_ack, u64 *cookie)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct net_device *dev = wdev->netdev;
const struct ieee80211_mgmt *mgmt;
u16 stype;
@@ -809,16 +823,15 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return -EINVAL;
/* Transmit the Action frame as requested by user space */
- return rdev->ops->mgmt_tx(&rdev->wiphy, dev, chan, offchan,
+ return rdev->ops->mgmt_tx(&rdev->wiphy, wdev, chan, offchan,
channel_type, channel_type_valid,
wait, buf, len, no_cck, dont_wait_for_ack,
cookie);
}
-bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
+bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
const u8 *buf, size_t len, gfp_t gfp)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
struct cfg80211_mgmt_registration *reg;
@@ -855,7 +868,7 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
/* found match! */
/* Indicate the received Action frame to user space */
- if (nl80211_send_mgmt(rdev, dev, reg->nlpid,
+ if (nl80211_send_mgmt(rdev, wdev, reg->nlpid,
freq, sig_mbm,
buf, len, gfp))
continue;
@@ -870,15 +883,14 @@ bool cfg80211_rx_mgmt(struct net_device *dev, int freq, int sig_mbm,
}
EXPORT_SYMBOL(cfg80211_rx_mgmt);
-void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie,
+void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack, gfp_t gfp)
{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
/* Indicate TX status of the Action frame to user space */
- nl80211_send_mgmt_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
+ nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
}
EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
@@ -907,6 +919,19 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
}
EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
+void cfg80211_cqm_txe_notify(struct net_device *dev,
+ const u8 *peer, u32 num_packets,
+ u32 rate, u32 intvl, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_cqm_txe_notify(rdev, dev, peer, num_packets,
+ rate, intvl, gfp);
+}
+EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
+
void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp)
{
@@ -948,7 +973,6 @@ void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
goto out;
wdev->channel = chan;
-
nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
out:
wdev_unlock(wdev);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 206465dc0cab..97026f3b215a 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -46,28 +46,175 @@ static struct genl_family nl80211_fam = {
.post_doit = nl80211_post_doit,
};
-/* internal helper: get rdev and dev */
-static int get_rdev_dev_by_ifindex(struct net *netns, struct nlattr **attrs,
- struct cfg80211_registered_device **rdev,
- struct net_device **dev)
+/* returns ERR_PTR values */
+static struct wireless_dev *
+__cfg80211_wdev_from_attrs(struct net *netns, struct nlattr **attrs)
{
- int ifindex;
+ struct cfg80211_registered_device *rdev;
+ struct wireless_dev *result = NULL;
+ bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
+ bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
+ u64 wdev_id;
+ int wiphy_idx = -1;
+ int ifidx = -1;
- if (!attrs[NL80211_ATTR_IFINDEX])
- return -EINVAL;
+ assert_cfg80211_lock();
- ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
- *dev = dev_get_by_index(netns, ifindex);
- if (!*dev)
- return -ENODEV;
+ if (!have_ifidx && !have_wdev_id)
+ return ERR_PTR(-EINVAL);
- *rdev = cfg80211_get_dev_from_ifindex(netns, ifindex);
- if (IS_ERR(*rdev)) {
- dev_put(*dev);
- return PTR_ERR(*rdev);
+ if (have_ifidx)
+ ifidx = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
+ if (have_wdev_id) {
+ wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
+ wiphy_idx = wdev_id >> 32;
}
- return 0;
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ struct wireless_dev *wdev;
+
+ if (wiphy_net(&rdev->wiphy) != netns)
+ continue;
+
+ if (have_wdev_id && rdev->wiphy_idx != wiphy_idx)
+ continue;
+
+ mutex_lock(&rdev->devlist_mtx);
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (have_ifidx && wdev->netdev &&
+ wdev->netdev->ifindex == ifidx) {
+ result = wdev;
+ break;
+ }
+ if (have_wdev_id && wdev->identifier == (u32)wdev_id) {
+ result = wdev;
+ break;
+ }
+ }
+ mutex_unlock(&rdev->devlist_mtx);
+
+ if (result)
+ break;
+ }
+
+ if (result)
+ return result;
+ return ERR_PTR(-ENODEV);
+}
+
+static struct cfg80211_registered_device *
+__cfg80211_rdev_from_attrs(struct net *netns, struct nlattr **attrs)
+{
+ struct cfg80211_registered_device *rdev = NULL, *tmp;
+ struct net_device *netdev;
+
+ assert_cfg80211_lock();
+
+ if (!attrs[NL80211_ATTR_WIPHY] &&
+ !attrs[NL80211_ATTR_IFINDEX] &&
+ !attrs[NL80211_ATTR_WDEV])
+ return ERR_PTR(-EINVAL);
+
+ if (attrs[NL80211_ATTR_WIPHY])
+ rdev = cfg80211_rdev_by_wiphy_idx(
+ nla_get_u32(attrs[NL80211_ATTR_WIPHY]));
+
+ if (attrs[NL80211_ATTR_WDEV]) {
+ u64 wdev_id = nla_get_u64(attrs[NL80211_ATTR_WDEV]);
+ struct wireless_dev *wdev;
+ bool found = false;
+
+ tmp = cfg80211_rdev_by_wiphy_idx(wdev_id >> 32);
+ if (tmp) {
+ /* make sure wdev exists */
+ mutex_lock(&tmp->devlist_mtx);
+ list_for_each_entry(wdev, &tmp->wdev_list, list) {
+ if (wdev->identifier != (u32)wdev_id)
+ continue;
+ found = true;
+ break;
+ }
+ mutex_unlock(&tmp->devlist_mtx);
+
+ if (!found)
+ tmp = NULL;
+
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+ rdev = tmp;
+ }
+ }
+
+ if (attrs[NL80211_ATTR_IFINDEX]) {
+ int ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
+ netdev = dev_get_by_index(netns, ifindex);
+ if (netdev) {
+ if (netdev->ieee80211_ptr)
+ tmp = wiphy_to_dev(
+ netdev->ieee80211_ptr->wiphy);
+ else
+ tmp = NULL;
+
+ dev_put(netdev);
+
+ /* not wireless device -- return error */
+ if (!tmp)
+ return ERR_PTR(-EINVAL);
+
+ /* mismatch -- return error */
+ if (rdev && tmp != rdev)
+ return ERR_PTR(-EINVAL);
+
+ rdev = tmp;
+ }
+ }
+
+ if (!rdev)
+ return ERR_PTR(-ENODEV);
+
+ if (netns != wiphy_net(&rdev->wiphy))
+ return ERR_PTR(-ENODEV);
+
+ return rdev;
+}
+
+/*
+ * This function returns a pointer to the driver
+ * that the genl_info item that is passed refers to.
+ * If successful, it returns non-NULL and also locks
+ * the driver's mutex!
+ *
+ * This means that you need to call cfg80211_unlock_rdev()
+ * before being allowed to acquire &cfg80211_mutex!
+ *
+ * This is necessary because we need to lock the global
+ * mutex to get an item off the list safely, and then
+ * we lock the rdev mutex so it doesn't go away under us.
+ *
+ * We don't want to keep cfg80211_mutex locked
+ * for all the time in order to allow requests on
+ * other interfaces to go through at the same time.
+ *
+ * The result of this can be a PTR_ERR and hence must
+ * be checked with IS_ERR() for errors.
+ */
+static struct cfg80211_registered_device *
+cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+
+ mutex_lock(&cfg80211_mutex);
+ rdev = __cfg80211_rdev_from_attrs(netns, info->attrs);
+
+ /* if it is not an error we grab the lock on
+ * it to assure it won't be going away while
+ * we operate on it */
+ if (!IS_ERR(rdev))
+ mutex_lock(&rdev->mtx);
+
+ mutex_unlock(&cfg80211_mutex);
+
+ return rdev;
}
/* policy for the attributes */
@@ -115,7 +262,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 },
[NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
[NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_MESH_ID_LEN },
+ .len = IEEE80211_MAX_MESH_ID_LEN },
[NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
[NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
@@ -206,6 +353,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_NOACK_MAP] = { .type = NLA_U16 },
[NL80211_ATTR_INACTIVITY_TIMEOUT] = { .type = NLA_U16 },
[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
+ [NL80211_ATTR_WDEV] = { .type = NLA_U64 },
+ [NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -250,8 +399,9 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
static const struct nla_policy
nl80211_match_policy[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1] = {
- [NL80211_ATTR_SCHED_SCAN_MATCH_SSID] = { .type = NLA_BINARY,
+ [NL80211_SCHED_SCAN_MATCH_ATTR_SSID] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_SSID_LEN },
+ [NL80211_SCHED_SCAN_MATCH_ATTR_RSSI] = { .type = NLA_U32 },
};
/* ifidx get helper */
@@ -832,6 +982,15 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.bands[band]->ht_cap.ampdu_density)))
goto nla_put_failure;
+ /* add VHT info */
+ if (dev->wiphy.bands[band]->vht_cap.vht_supported &&
+ (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
+ sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs),
+ &dev->wiphy.bands[band]->vht_cap.vht_mcs) ||
+ nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
+ dev->wiphy.bands[band]->vht_cap.cap)))
+ goto nla_put_failure;
+
/* add frequencies */
nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
if (!nl_freqs)
@@ -921,7 +1080,12 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
goto nla_put_failure;
}
- CMD(set_channel, SET_CHANNEL);
+ if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
+ dev->ops->join_mesh) {
+ i++;
+ if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
+ goto nla_put_failure;
+ }
CMD(set_wds_peer, SET_WDS_PEER);
if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
CMD(tdls_mgmt, TDLS_MGMT);
@@ -1018,6 +1182,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_ifs);
}
+#ifdef CONFIG_PM
if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) {
struct nlattr *nl_wowlan;
@@ -1058,6 +1223,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
nla_nest_end(msg, nl_wowlan);
}
+#endif
if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
dev->wiphy.software_iftypes))
@@ -1162,18 +1328,22 @@ static int parse_txq_params(struct nlattr *tb[],
static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev)
{
/*
- * You can only set the channel explicitly for AP, mesh
- * and WDS type interfaces; all others have their channel
- * managed via their respective "establish a connection"
- * command (connect, join, ...)
+ * You can only set the channel explicitly for WDS interfaces,
+ * all others have their channel managed via their respective
+ * "establish a connection" command (connect, join, ...)
+ *
+ * For AP/GO and mesh mode, the channel can be set with the
+ * channel userspace API, but is only stored and passed to the
+ * low-level driver when the AP starts or the mesh is joined.
+ * This is for backward compatibility, userspace can also give
+ * the channel in the start-ap or join-mesh commands instead.
*
* Monitors are special as they are normally slaved to
- * whatever else is going on, so they behave as though
- * you tried setting the wiphy channel itself.
+ * whatever else is going on, so they have their own special
+ * operation to set the monitor channel if possible.
*/
return !wdev ||
wdev->iftype == NL80211_IFTYPE_AP ||
- wdev->iftype == NL80211_IFTYPE_WDS ||
wdev->iftype == NL80211_IFTYPE_MESH_POINT ||
wdev->iftype == NL80211_IFTYPE_MONITOR ||
wdev->iftype == NL80211_IFTYPE_P2P_GO;
@@ -1204,9 +1374,14 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
struct genl_info *info)
{
+ struct ieee80211_channel *channel;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
u32 freq;
int result;
+ enum nl80211_iftype iftype = NL80211_IFTYPE_MONITOR;
+
+ if (wdev)
+ iftype = wdev->iftype;
if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
@@ -1221,12 +1396,32 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
mutex_lock(&rdev->devlist_mtx);
- if (wdev) {
- wdev_lock(wdev);
- result = cfg80211_set_freq(rdev, wdev, freq, channel_type);
- wdev_unlock(wdev);
- } else {
- result = cfg80211_set_freq(rdev, NULL, freq, channel_type);
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (wdev->beacon_interval) {
+ result = -EBUSY;
+ break;
+ }
+ channel = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!channel || !cfg80211_can_beacon_sec_chan(&rdev->wiphy,
+ channel,
+ channel_type)) {
+ result = -EINVAL;
+ break;
+ }
+ wdev->preset_chan = channel;
+ wdev->preset_chantype = channel_type;
+ result = 0;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ result = cfg80211_set_mesh_freq(rdev, wdev, freq, channel_type);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ result = cfg80211_set_monitor_channel(rdev, freq, channel_type);
+ break;
+ default:
+ result = -EINVAL;
}
mutex_unlock(&rdev->devlist_mtx);
@@ -1300,7 +1495,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
if (!netdev) {
- rdev = __cfg80211_rdev_from_info(info);
+ rdev = __cfg80211_rdev_from_attrs(genl_info_net(info),
+ info->attrs);
if (IS_ERR(rdev)) {
mutex_unlock(&cfg80211_mutex);
return PTR_ERR(rdev);
@@ -1310,8 +1506,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
result = 0;
mutex_lock(&rdev->mtx);
- } else if (netif_running(netdev) &&
- nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
+ } else if (nl80211_can_set_dev_channel(netdev->ieee80211_ptr))
wdev = netdev->ieee80211_ptr;
else
wdev = NULL;
@@ -1534,22 +1729,32 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
return result;
}
+static inline u64 wdev_id(struct wireless_dev *wdev)
+{
+ return (u64)wdev->identifier |
+ ((u64)wiphy_to_dev(wdev->wiphy)->wiphy_idx << 32);
+}
static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct cfg80211_registered_device *rdev,
- struct net_device *dev)
+ struct wireless_dev *wdev)
{
+ struct net_device *dev = wdev->netdev;
void *hdr;
hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE);
if (!hdr)
return -1;
- if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
- nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
- nla_put_u32(msg, NL80211_ATTR_IFTYPE,
- dev->ieee80211_ptr->iftype) ||
+ if (dev &&
+ (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
+ nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dev->dev_addr)))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFTYPE, wdev->iftype) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_GENERATION,
rdev->devlist_generation ^
(cfg80211_rdev_list_generation << 2)))
@@ -1559,12 +1764,13 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type;
- chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
+ chan = rdev->ops->get_channel(&rdev->wiphy, wdev,
+ &channel_type);
if (chan &&
(nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
- chan->center_freq) ||
+ chan->center_freq) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
- channel_type)))
+ channel_type)))
goto nla_put_failure;
}
@@ -1595,14 +1801,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
if_idx = 0;
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
}
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- rdev, wdev->netdev) < 0) {
+ rdev, wdev) < 0) {
mutex_unlock(&rdev->devlist_mtx);
goto out;
}
@@ -1625,14 +1831,14 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
struct cfg80211_registered_device *dev = info->user_ptr[0];
- struct net_device *netdev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
- dev, netdev) < 0) {
+ dev, wdev) < 0) {
nlmsg_free(msg);
return -ENOBUFS;
}
@@ -1772,7 +1978,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct vif_params params;
- struct net_device *dev;
+ struct wireless_dev *wdev;
+ struct sk_buff *msg;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
u32 flags;
@@ -1799,19 +2006,23 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
- dev = rdev->ops->add_virtual_intf(&rdev->wiphy,
+ wdev = rdev->ops->add_virtual_intf(&rdev->wiphy,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ if (IS_ERR(wdev)) {
+ nlmsg_free(msg);
+ return PTR_ERR(wdev);
+ }
if (type == NL80211_IFTYPE_MESH_POINT &&
info->attrs[NL80211_ATTR_MESH_ID]) {
- struct wireless_dev *wdev = dev->ieee80211_ptr;
-
wdev_lock(wdev);
BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN !=
IEEE80211_MAX_MESH_ID_LEN);
@@ -1822,18 +2033,34 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
wdev_unlock(wdev);
}
- return 0;
+ if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0,
+ rdev, wdev) < 0) {
+ nlmsg_free(msg);
+ return -ENOBUFS;
+ }
+
+ return genlmsg_reply(msg, info);
}
static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
if (!rdev->ops->del_virtual_intf)
return -EOPNOTSUPP;
- return rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
+ /*
+ * If we remove a wireless device without a netdev then clear
+ * user_ptr[1] so that nl80211_post_doit won't dereference it
+ * to check if it needs to do dev_put(). Otherwise it crashes
+ * since the wdev has been freed, unlike with a netdev where
+ * we need the dev_put() for the netdev to really be freed.
+ */
+ if (!wdev->netdev)
+ info->user_ptr[1] = NULL;
+
+ return rdev->ops->del_virtual_intf(&rdev->wiphy, wdev);
}
static int nl80211_set_noack_map(struct sk_buff *skb, struct genl_info *info)
@@ -2213,6 +2440,33 @@ static int nl80211_parse_beacon(struct genl_info *info,
return 0;
}
+static bool nl80211_get_ap_channel(struct cfg80211_registered_device *rdev,
+ struct cfg80211_ap_settings *params)
+{
+ struct wireless_dev *wdev;
+ bool ret = false;
+
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
+ if (wdev->iftype != NL80211_IFTYPE_AP &&
+ wdev->iftype != NL80211_IFTYPE_P2P_GO)
+ continue;
+
+ if (!wdev->preset_chan)
+ continue;
+
+ params->channel = wdev->preset_chan;
+ params->channel_type = wdev->preset_chantype;
+ ret = true;
+ break;
+ }
+
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return ret;
+}
+
static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -2299,9 +2553,44 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
info->attrs[NL80211_ATTR_INACTIVITY_TIMEOUT]);
}
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
+
+ params.channel = rdev_freq_to_chan(rdev,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+ channel_type);
+ if (!params.channel)
+ return -EINVAL;
+ params.channel_type = channel_type;
+ } else if (wdev->preset_chan) {
+ params.channel = wdev->preset_chan;
+ params.channel_type = wdev->preset_chantype;
+ } else if (!nl80211_get_ap_channel(rdev, &params))
+ return -EINVAL;
+
+ if (!cfg80211_can_beacon_sec_chan(&rdev->wiphy, params.channel,
+ params.channel_type))
+ return -EINVAL;
+
+ mutex_lock(&rdev->devlist_mtx);
+ err = cfg80211_can_use_chan(rdev, wdev, params.channel,
+ CHAN_MODE_SHARED);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ if (err)
+ return err;
+
err = rdev->ops->start_ap(&rdev->wiphy, dev, &params);
- if (!err)
+ if (!err) {
+ wdev->preset_chan = params.channel;
+ wdev->preset_chantype = params.channel_type;
wdev->beacon_interval = params.beacon_interval;
+ wdev->channel = params.channel;
+ }
return err;
}
@@ -2334,23 +2623,8 @@ static int nl80211_stop_ap(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct net_device *dev = info->user_ptr[1];
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
- if (!rdev->ops->stop_ap)
- return -EOPNOTSUPP;
-
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
- return -EOPNOTSUPP;
-
- if (!wdev->beacon_interval)
- return -ENOENT;
-
- err = rdev->ops->stop_ap(&rdev->wiphy, dev);
- if (!err)
- wdev->beacon_interval = 0;
- return err;
+ return cfg80211_stop_ap(rdev, dev);
}
static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -2442,7 +2716,8 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
int attr)
{
struct nlattr *rate;
- u16 bitrate;
+ u32 bitrate;
+ u16 bitrate_compat;
rate = nla_nest_start(msg, attr);
if (!rate)
@@ -2450,8 +2725,12 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
/* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
bitrate = cfg80211_calculate_bitrate(info);
+ /* report 16-bit bitrate only if we can */
+ bitrate_compat = bitrate < (1UL << 16) ? bitrate : 0;
if ((bitrate > 0 &&
- nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
+ nla_put_u32(msg, NL80211_RATE_INFO_BITRATE32, bitrate)) ||
+ (bitrate_compat > 0 &&
+ nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate_compat)) ||
((info->flags & RATE_INFO_FLAGS_MCS) &&
nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
@@ -3304,6 +3583,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
{
int r;
char *data = NULL;
+ enum nl80211_user_reg_hint_type user_reg_hint_type;
/*
* You should only get this when cfg80211 hasn't yet initialized
@@ -3323,7 +3603,21 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
- r = regulatory_hint_user(data);
+ if (info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE])
+ user_reg_hint_type =
+ nla_get_u32(info->attrs[NL80211_ATTR_USER_REG_HINT_TYPE]);
+ else
+ user_reg_hint_type = NL80211_USER_REG_HINT_USER;
+
+ switch (user_reg_hint_type) {
+ case NL80211_USER_REG_HINT_USER:
+ case NL80211_USER_REG_HINT_CELL_BASE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ r = regulatory_hint_user(data, user_reg_hint_type);
return r;
}
@@ -3413,7 +3707,13 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
cur_params.rssi_threshold) ||
nla_put_u32(msg, NL80211_MESHCONF_HT_OPMODE,
- cur_params.ht_opmode))
+ cur_params.ht_opmode) ||
+ nla_put_u32(msg, NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ cur_params.dot11MeshHWMPactivePathToRootTimeout) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ cur_params.dot11MeshHWMProotInterval) ||
+ nla_put_u16(msg, NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
+ cur_params.dot11MeshHWMPconfirmationInterval))
goto nla_put_failure;
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
@@ -3436,7 +3736,6 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
[NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
[NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
-
[NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
[NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
[NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 },
@@ -3448,8 +3747,11 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
[NL80211_MESHCONF_HWMP_RANN_INTERVAL] = { .type = NLA_U16 },
[NL80211_MESHCONF_GATE_ANNOUNCEMENTS] = { .type = NLA_U8 },
[NL80211_MESHCONF_FORWARDING] = { .type = NLA_U8 },
- [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32},
- [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16},
+ [NL80211_MESHCONF_RSSI_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HT_OPMODE] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT] = { .type = NLA_U32 },
+ [NL80211_MESHCONF_HWMP_ROOT_INTERVAL] = { .type = NLA_U16 },
+ [NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL] = { .type = NLA_U16 },
};
static const struct nla_policy
@@ -3459,7 +3761,7 @@ static const struct nla_policy
[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
[NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
[NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
- .len = IEEE80211_MAX_DATA_LEN },
+ .len = IEEE80211_MAX_DATA_LEN },
[NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
};
@@ -3492,63 +3794,82 @@ do {\
/* Fill in the params struct */
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout,
- mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_RETRY_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout,
- mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_CONFIRM_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout,
- mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16);
+ mask, NL80211_MESHCONF_HOLDING_TIMEOUT,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks,
- mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16);
+ mask, NL80211_MESHCONF_MAX_PEER_LINKS,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries,
- mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8);
+ mask, NL80211_MESHCONF_MAX_RETRIES,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL,
- mask, NL80211_MESHCONF_TTL, nla_get_u8);
+ mask, NL80211_MESHCONF_TTL, nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl,
- mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
+ mask, NL80211_MESHCONF_ELEMENT_TTL,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
- mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
- mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
- nla_get_u32);
+ mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
+ nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor, mask,
+ NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
- mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
- nla_get_u8);
+ mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time,
- mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32);
+ mask, NL80211_MESHCONF_PATH_REFRESH_TIME,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout,
- mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
- nla_get_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout,
- mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
- nla_get_u32);
+ mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask,
+ NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval,
- mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
- nla_get_u16);
+ mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPperrMinInterval,
- mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
- nla_get_u16);
+ mask, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPnetDiameterTraversalTime,
- mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
- nla_get_u16);
+ dot11MeshHWMPnetDiameterTraversalTime, mask,
+ NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask,
+ NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRannInterval, mask,
+ NL80211_MESHCONF_HWMP_RANN_INTERVAL,
+ nla_get_u16);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPRootMode, mask,
- NL80211_MESHCONF_HWMP_ROOTMODE,
- nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshHWMPRannInterval, mask,
- NL80211_MESHCONF_HWMP_RANN_INTERVAL,
- nla_get_u16);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
- dot11MeshGateAnnouncementProtocol, mask,
- NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
- nla_get_u8);
+ dot11MeshGateAnnouncementProtocol, mask,
+ NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding,
- mask, NL80211_MESHCONF_FORWARDING, nla_get_u8);
+ mask, NL80211_MESHCONF_FORWARDING,
+ nla_get_u8);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold,
- mask, NL80211_MESHCONF_RSSI_THRESHOLD, nla_get_u32);
+ mask, NL80211_MESHCONF_RSSI_THRESHOLD,
+ nla_get_u32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode,
- mask, NL80211_MESHCONF_HT_OPMODE, nla_get_u16);
+ mask, NL80211_MESHCONF_HT_OPMODE,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
+ mask,
+ NL80211_MESHCONF_HWMP_PATH_TO_ROOT_TIMEOUT,
+ nla_get_u32);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMProotInterval,
+ mask, NL80211_MESHCONF_HWMP_ROOT_INTERVAL,
+ nla_get_u16);
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg,
+ dot11MeshHWMPconfirmationInterval, mask,
+ NL80211_MESHCONF_HWMP_CONFIRMATION_INTERVAL,
+ nla_get_u16);
if (mask_out)
*mask_out = mask;
@@ -3666,6 +3987,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
cfg80211_regdomain->dfs_region)))
goto nla_put_failure;
+ if (reg_last_request_cell_base() &&
+ nla_put_u32(msg, NL80211_ATTR_USER_REG_HINT_TYPE,
+ NL80211_USER_REG_HINT_CELL_BASE))
+ goto nla_put_failure;
+
nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
if (!nl_reg_rules)
goto nla_put_failure;
@@ -3831,7 +4157,7 @@ static int validate_scan_freqs(struct nlattr *freqs)
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct cfg80211_scan_request *request;
struct nlattr *attr;
struct wiphy *wiphy;
@@ -3991,15 +4317,16 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->no_cck =
nla_get_flag(info->attrs[NL80211_ATTR_TX_NO_CCK_RATE]);
- request->dev = dev;
+ request->wdev = wdev;
request->wiphy = &rdev->wiphy;
rdev->scan_req = request;
- err = rdev->ops->scan(&rdev->wiphy, dev, request);
+ err = rdev->ops->scan(&rdev->wiphy, request);
if (!err) {
- nl80211_send_scan_start(rdev, dev);
- dev_hold(dev);
+ nl80211_send_scan_start(rdev, wdev);
+ if (wdev->netdev)
+ dev_hold(wdev->netdev);
} else {
out_free:
rdev->scan_req = NULL;
@@ -4185,12 +4512,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
tmp) {
- struct nlattr *ssid;
+ struct nlattr *ssid, *rssi;
nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
nla_data(attr), nla_len(attr),
nl80211_match_policy);
- ssid = tb[NL80211_ATTR_SCHED_SCAN_MATCH_SSID];
+ ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
if (ssid) {
if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
@@ -4201,6 +4528,12 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
request->match_sets[i].ssid.ssid_len =
nla_len(ssid);
}
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ request->rssi_thold = nla_get_u32(rssi);
+ else
+ request->rssi_thold =
+ NL80211_SCAN_RSSI_THOLD_OFF;
i++;
}
}
@@ -5058,21 +5391,18 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
nl80211_policy);
if (err)
return err;
- if (nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) {
- phy_idx = nla_get_u32(
- nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
- } else {
- struct net_device *netdev;
- err = get_rdev_dev_by_ifindex(sock_net(skb->sk),
- nl80211_fam.attrbuf,
- &rdev, &netdev);
- if (err)
- return err;
- dev_put(netdev);
- phy_idx = rdev->wiphy_idx;
- cfg80211_unlock_rdev(rdev);
+ mutex_lock(&cfg80211_mutex);
+ rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
+ nl80211_fam.attrbuf);
+ if (IS_ERR(rdev)) {
+ mutex_unlock(&cfg80211_mutex);
+ return PTR_ERR(rdev);
}
+ phy_idx = rdev->wiphy_idx;
+ rdev = NULL;
+ mutex_unlock(&cfg80211_mutex);
+
if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
cb->args[1] =
(long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
@@ -5474,7 +5804,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct ieee80211_channel *chan;
struct sk_buff *msg;
void *hdr;
@@ -5489,18 +5819,18 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+ if (!rdev->ops->remain_on_channel ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
+ return -EOPNOTSUPP;
+
/*
- * We should be on that channel for at least one jiffie,
- * and more than 5 seconds seems excessive.
+ * We should be on that channel for at least a minimum amount of
+ * time (10ms) but no longer than the driver supports.
*/
- if (!duration || !msecs_to_jiffies(duration) ||
+ if (duration < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
duration > rdev->wiphy.max_remain_on_channel_duration)
return -EINVAL;
- if (!rdev->ops->remain_on_channel ||
- !(rdev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL))
- return -EOPNOTSUPP;
-
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
!nl80211_valid_channel_type(info, &channel_type))
return -EINVAL;
@@ -5522,7 +5852,7 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
goto free_msg;
}
- err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
+ err = rdev->ops->remain_on_channel(&rdev->wiphy, wdev, chan,
channel_type, duration, &cookie);
if (err)
@@ -5546,7 +5876,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u64 cookie;
if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5557,7 +5887,7 @@ static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
- return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
+ return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, wdev, cookie);
}
static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
@@ -5706,7 +6036,7 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION;
if (!info->attrs[NL80211_ATTR_FRAME_MATCH])
@@ -5715,21 +6045,24 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_FRAME_TYPE])
frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]);
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
/* not much point in registering if we can't reply */
if (!rdev->ops->mgmt_tx)
return -EOPNOTSUPP;
- return cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid,
- frame_type,
+ return cfg80211_mlme_register_mgmt(wdev, info->snd_pid, frame_type,
nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]),
nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH]));
}
@@ -5737,7 +6070,7 @@ static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info)
static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
struct ieee80211_channel *chan;
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
bool channel_type_valid = false;
@@ -5758,19 +6091,32 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->mgmt_tx)
return -EOPNOTSUPP;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
if (info->attrs[NL80211_ATTR_DURATION]) {
if (!(rdev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX))
return -EINVAL;
wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+
+ /*
+ * We should wait on the channel for at least a minimum amount
+ * of time (10ms) but no longer than the driver supports.
+ */
+ if (wait < NL80211_MIN_REMAIN_ON_CHANNEL_TIME ||
+ wait > rdev->wiphy.max_remain_on_channel_duration)
+ return -EINVAL;
+
}
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -5805,7 +6151,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
}
}
- err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type,
+ err = cfg80211_mlme_mgmt_tx(rdev, wdev, chan, offchan, channel_type,
channel_type_valid, wait,
nla_data(info->attrs[NL80211_ATTR_FRAME]),
nla_len(info->attrs[NL80211_ATTR_FRAME]),
@@ -5833,7 +6179,7 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = info->user_ptr[1];
u64 cookie;
if (!info->attrs[NL80211_ATTR_COOKIE])
@@ -5842,17 +6188,21 @@ static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *in
if (!rdev->ops->mgmt_tx_cancel_wait)
return -EOPNOTSUPP;
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
- dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ case NL80211_IFTYPE_P2P_GO:
+ break;
+ default:
return -EOPNOTSUPP;
+ }
cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
- return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, dev, cookie);
+ return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, wdev, cookie);
}
static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info)
@@ -5938,8 +6288,35 @@ nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
[NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_RATE] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_PKTS] = { .type = NLA_U32 },
+ [NL80211_ATTR_CQM_TXE_INTVL] = { .type = NLA_U32 },
};
+static int nl80211_set_cqm_txe(struct genl_info *info,
+ u32 rate, u32 pkts, u32 intvl)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev;
+ struct net_device *dev = info->user_ptr[1];
+
+ if ((rate < 0 || rate > 100) ||
+ (intvl < 0 || intvl > NL80211_CQM_TXE_MAX_INTVL))
+ return -EINVAL;
+
+ wdev = dev->ieee80211_ptr;
+
+ if (!rdev->ops->set_cqm_txe_config)
+ return -EOPNOTSUPP;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+ return -EOPNOTSUPP;
+
+ return rdev->ops->set_cqm_txe_config(wdev->wiphy, dev,
+ rate, pkts, intvl);
+}
+
static int nl80211_set_cqm_rssi(struct genl_info *info,
s32 threshold, u32 hysteresis)
{
@@ -5987,6 +6364,14 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
+ attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
+ attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
+ u32 rate, pkts, intvl;
+ rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
+ pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
+ intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
+ err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
} else
err = -EINVAL;
@@ -6032,6 +6417,24 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE] &&
+ !nl80211_valid_channel_type(info, &channel_type))
+ return -EINVAL;
+
+ setup.channel = rdev_freq_to_chan(rdev,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]),
+ channel_type);
+ if (!setup.channel)
+ return -EINVAL;
+ setup.channel_type = channel_type;
+ } else {
+ /* cfg80211_join_mesh() will sort it out */
+ setup.channel = NULL;
+ }
+
return cfg80211_join_mesh(rdev, dev, &setup, &cfg);
}
@@ -6043,6 +6446,7 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
return cfg80211_leave_mesh(rdev, dev);
}
+#ifdef CONFIG_PM
static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -6124,8 +6528,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
- struct cfg80211_wowlan no_triggers = {};
struct cfg80211_wowlan new_triggers = {};
+ struct cfg80211_wowlan *ntrig;
struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
int err, i;
bool prev_enabled = rdev->wowlan;
@@ -6133,8 +6537,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
return -EOPNOTSUPP;
- if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS])
- goto no_triggers;
+ if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) {
+ cfg80211_rdev_free_wowlan(rdev);
+ rdev->wowlan = NULL;
+ goto set_wakeup;
+ }
err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG,
nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
@@ -6245,22 +6652,15 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
}
- if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) {
- struct cfg80211_wowlan *ntrig;
- ntrig = kmemdup(&new_triggers, sizeof(new_triggers),
- GFP_KERNEL);
- if (!ntrig) {
- err = -ENOMEM;
- goto error;
- }
- cfg80211_rdev_free_wowlan(rdev);
- rdev->wowlan = ntrig;
- } else {
- no_triggers:
- cfg80211_rdev_free_wowlan(rdev);
- rdev->wowlan = NULL;
+ ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL);
+ if (!ntrig) {
+ err = -ENOMEM;
+ goto error;
}
+ cfg80211_rdev_free_wowlan(rdev);
+ rdev->wowlan = ntrig;
+ set_wakeup:
if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
@@ -6271,6 +6671,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
kfree(new_triggers.patterns);
return err;
}
+#endif
static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
{
@@ -6415,44 +6816,75 @@ static int nl80211_register_beacons(struct sk_buff *skb, struct genl_info *info)
#define NL80211_FLAG_CHECK_NETDEV_UP 0x08
#define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\
NL80211_FLAG_CHECK_NETDEV_UP)
+#define NL80211_FLAG_NEED_WDEV 0x10
+/* If a netdev is associated, it must be UP */
+#define NL80211_FLAG_NEED_WDEV_UP (NL80211_FLAG_NEED_WDEV |\
+ NL80211_FLAG_CHECK_NETDEV_UP)
static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb,
struct genl_info *info)
{
struct cfg80211_registered_device *rdev;
+ struct wireless_dev *wdev;
struct net_device *dev;
- int err;
bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL;
if (rtnl)
rtnl_lock();
if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) {
- rdev = cfg80211_get_dev_from_info(info);
+ rdev = cfg80211_get_dev_from_info(genl_info_net(info), info);
if (IS_ERR(rdev)) {
if (rtnl)
rtnl_unlock();
return PTR_ERR(rdev);
}
info->user_ptr[0] = rdev;
- } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
- err = get_rdev_dev_by_ifindex(genl_info_net(info), info->attrs,
- &rdev, &dev);
- if (err) {
+ } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV ||
+ ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ mutex_lock(&cfg80211_mutex);
+ wdev = __cfg80211_wdev_from_attrs(genl_info_net(info),
+ info->attrs);
+ if (IS_ERR(wdev)) {
+ mutex_unlock(&cfg80211_mutex);
if (rtnl)
rtnl_unlock();
- return err;
+ return PTR_ERR(wdev);
}
- if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
- !netif_running(dev)) {
- cfg80211_unlock_rdev(rdev);
- dev_put(dev);
- if (rtnl)
- rtnl_unlock();
- return -ENETDOWN;
+
+ dev = wdev->netdev;
+ rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) {
+ if (!dev) {
+ mutex_unlock(&cfg80211_mutex);
+ if (rtnl)
+ rtnl_unlock();
+ return -EINVAL;
+ }
+
+ info->user_ptr[1] = dev;
+ } else {
+ info->user_ptr[1] = wdev;
}
+
+ if (dev) {
+ if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP &&
+ !netif_running(dev)) {
+ mutex_unlock(&cfg80211_mutex);
+ if (rtnl)
+ rtnl_unlock();
+ return -ENETDOWN;
+ }
+
+ dev_hold(dev);
+ }
+
+ cfg80211_lock_rdev(rdev);
+
+ mutex_unlock(&cfg80211_mutex);
+
info->user_ptr[0] = rdev;
- info->user_ptr[1] = dev;
}
return 0;
@@ -6463,8 +6895,16 @@ static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb,
{
if (info->user_ptr[0])
cfg80211_unlock_rdev(info->user_ptr[0]);
- if (info->user_ptr[1])
- dev_put(info->user_ptr[1]);
+ if (info->user_ptr[1]) {
+ if (ops->internal_flags & NL80211_FLAG_NEED_WDEV) {
+ struct wireless_dev *wdev = info->user_ptr[1];
+
+ if (wdev->netdev)
+ dev_put(wdev->netdev);
+ } else {
+ dev_put(info->user_ptr[1]);
+ }
+ }
if (ops->internal_flags & NL80211_FLAG_NEED_RTNL)
rtnl_unlock();
}
@@ -6491,7 +6931,7 @@ static struct genl_ops nl80211_ops[] = {
.dumpit = nl80211_dump_interface,
.policy = nl80211_policy,
/* can be retrieved by unprivileged users */
- .internal_flags = NL80211_FLAG_NEED_NETDEV,
+ .internal_flags = NL80211_FLAG_NEED_WDEV,
},
{
.cmd = NL80211_CMD_SET_INTERFACE,
@@ -6514,7 +6954,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_del_interface,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6685,7 +7125,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_trigger_scan,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6826,7 +7266,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_remain_on_channel,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6834,7 +7274,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_cancel_remain_on_channel,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6850,7 +7290,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_register_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV |
+ .internal_flags = NL80211_FLAG_NEED_WDEV |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6858,7 +7298,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_tx_mgmt,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6866,7 +7306,7 @@ static struct genl_ops nl80211_ops[] = {
.doit = nl80211_tx_mgmt_cancel_wait,
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
- .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ .internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
{
@@ -6925,6 +7365,7 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
NL80211_FLAG_NEED_RTNL,
},
+#ifdef CONFIG_PM
{
.cmd = NL80211_CMD_GET_WOWLAN,
.doit = nl80211_get_wowlan,
@@ -6941,6 +7382,7 @@ static struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_WIPHY |
NL80211_FLAG_NEED_RTNL,
},
+#endif
{
.cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
.doit = nl80211_set_rekey_data,
@@ -7075,7 +7517,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
static int nl80211_send_scan_msg(struct sk_buff *msg,
struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
+ struct wireless_dev *wdev,
u32 pid, u32 seq, int flags,
u32 cmd)
{
@@ -7086,7 +7528,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
return -1;
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
goto nla_put_failure;
/* ignore errors and send incomplete event anyway */
@@ -7123,15 +7567,15 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
}
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_TRIGGER_SCAN) < 0) {
nlmsg_free(msg);
return;
@@ -7142,7 +7586,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
}
void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
@@ -7150,7 +7594,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
return;
@@ -7161,7 +7605,7 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
}
void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+ struct wireless_dev *wdev)
{
struct sk_buff *msg;
@@ -7169,7 +7613,7 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_SCAN_ABORTED) < 0) {
nlmsg_free(msg);
return;
@@ -7203,7 +7647,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -7419,7 +7863,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7459,7 +7903,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7497,7 +7941,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
@@ -7692,7 +8136,7 @@ nla_put_failure:
static void nl80211_send_remain_on_chan_event(
int cmd, struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
@@ -7711,7 +8155,9 @@ static void nl80211_send_remain_on_chan_event(
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (wdev->netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ wdev->netdev->ifindex)) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
@@ -7733,23 +8179,24 @@ static void nl80211_send_remain_on_chan_event(
}
void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp)
{
nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
- rdev, netdev, cookie, chan,
+ rdev, wdev, cookie, chan,
channel_type, duration, gfp);
}
void nl80211_send_remain_on_channel_cancel(
- struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
u64 cookie, struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type, gfp_t gfp)
{
nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
- rdev, netdev, cookie, chan,
+ rdev, wdev, cookie, chan,
channel_type, 0, gfp);
}
@@ -7759,7 +8206,7 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
{
struct sk_buff *msg;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7780,7 +8227,7 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7863,10 +8310,11 @@ bool nl80211_unexpected_4addr_frame(struct net_device *dev,
}
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid,
+ struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp)
{
+ struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
void *hdr;
@@ -7881,7 +8329,8 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ netdev->ifindex)) ||
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -7899,10 +8348,11 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
}
void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack,
gfp_t gfp)
{
+ struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
void *hdr;
@@ -7917,7 +8367,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
}
if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
+ netdev->ifindex)) ||
nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
(ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
@@ -7943,7 +8394,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
struct nlattr *pinfoattr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -7986,7 +8437,7 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct nlattr *rekey_attr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8030,7 +8481,7 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
struct nlattr *attr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8074,7 +8525,7 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8101,6 +8552,56 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
}
void
+nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ struct nlattr *pinfoattr;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
+ goto nla_put_failure;
+
+ pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
+ if (!pinfoattr)
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_PKTS, num_packets))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_RATE, rate))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_CQM_TXE_INTVL, intvl))
+ goto nla_put_failure;
+
+ nla_nest_end(msg, pinfoattr);
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void
nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
u32 num_packets, gfp_t gfp)
@@ -8109,7 +8610,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct nlattr *pinfoattr;
void *hdr;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8153,7 +8654,7 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
void *hdr;
int err;
- msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -8241,7 +8742,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
rcu_read_lock();
list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
- list_for_each_entry_rcu(wdev, &rdev->netdev_list, list)
+ list_for_each_entry_rcu(wdev, &rdev->wdev_list, list)
cfg80211_mlme_unregister_socket(wdev, notify->pid);
if (rdev->ap_beacons_nlpid == notify->pid)
rdev->ap_beacons_nlpid = 0;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 01a1122c3b33..9f2616fffb40 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -7,11 +7,11 @@ int nl80211_init(void);
void nl80211_exit(void);
void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
+ struct wireless_dev *wdev);
void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
@@ -74,13 +74,13 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
gfp_t gfp);
void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type,
unsigned int duration, gfp_t gfp);
void nl80211_send_remain_on_channel_cancel(
- struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
u64 cookie, struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type, gfp_t gfp);
@@ -92,11 +92,11 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
gfp_t gfp);
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u32 nlpid,
+ struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
const u8 *buf, size_t len, gfp_t gfp);
void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, u64 cookie,
+ struct wireless_dev *wdev, u64 cookie,
const u8 *buf, size_t len, bool ack,
gfp_t gfp);
@@ -110,6 +110,11 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *peer,
u32 num_packets, gfp_t gfp);
+void
+nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *peer,
+ u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
+
void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *replay_ctr, gfp_t gfp);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index baf5704740ee..2303ee73b50a 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -97,9 +97,16 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
* - cfg80211_world_regdom
* - cfg80211_regdom
* - last_request
+ * - reg_num_devs_support_basehint
*/
static DEFINE_MUTEX(reg_mutex);
+/*
+ * Number of devices that registered to the core
+ * that support cellular base station regulatory hints
+ */
+static int reg_num_devs_support_basehint;
+
static inline void assert_reg_lock(void)
{
lockdep_assert_held(&reg_mutex);
@@ -129,7 +136,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
/* We keep a static world regulatory domain in case of the absence of CRDA */
static const struct ieee80211_regdomain world_regdom = {
- .n_reg_rules = 5,
+ .n_reg_rules = 6,
.alpha2 = "00",
.reg_rules = {
/* IEEE 802.11b/g, channels 1..11 */
@@ -156,6 +163,9 @@ static const struct ieee80211_regdomain world_regdom = {
REG_RULE(5745-10, 5825+10, 40, 6, 20,
NL80211_RRF_PASSIVE_SCAN |
NL80211_RRF_NO_IBSS),
+
+ /* IEEE 802.11ad (60gHz), channels 1..3 */
+ REG_RULE(56160+2160*1-1080, 56160+2160*3+1080, 2160, 0, 0, 0),
}
};
@@ -908,6 +918,61 @@ static void handle_band(struct wiphy *wiphy,
handle_channel(wiphy, initiator, band, i);
}
+static bool reg_request_cell_base(struct regulatory_request *request)
+{
+ if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+ return false;
+ if (request->user_reg_hint_type != NL80211_USER_REG_HINT_CELL_BASE)
+ return false;
+ return true;
+}
+
+bool reg_last_request_cell_base(void)
+{
+ bool val;
+ assert_cfg80211_lock();
+
+ mutex_lock(&reg_mutex);
+ val = reg_request_cell_base(last_request);
+ mutex_unlock(&reg_mutex);
+ return val;
+}
+
+#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
+
+/* Core specific check */
+static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+{
+ if (!reg_num_devs_support_basehint)
+ return -EOPNOTSUPP;
+
+ if (reg_request_cell_base(last_request)) {
+ if (!regdom_changes(pending_request->alpha2))
+ return -EALREADY;
+ return 0;
+ }
+ return 0;
+}
+
+/* Device specific check */
+static bool reg_dev_ignore_cell_hint(struct wiphy *wiphy)
+{
+ if (!(wiphy->features & NL80211_FEATURE_CELL_BASE_REG_HINTS))
+ return true;
+ return false;
+}
+#else
+static int reg_ignore_cell_hint(struct regulatory_request *pending_request)
+{
+ return -EOPNOTSUPP;
+}
+static int reg_dev_ignore_cell_hint(struct wiphy *wiphy)
+{
+ return true;
+}
+#endif
+
+
static bool ignore_reg_update(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
@@ -941,6 +1006,9 @@ static bool ignore_reg_update(struct wiphy *wiphy,
return true;
}
+ if (reg_request_cell_base(last_request))
+ return reg_dev_ignore_cell_hint(wiphy);
+
return false;
}
@@ -1166,14 +1234,6 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
wiphy->reg_notifier(wiphy, last_request);
}
-void regulatory_update(struct wiphy *wiphy,
- enum nl80211_reg_initiator setby)
-{
- mutex_lock(&reg_mutex);
- wiphy_update_regulatory(wiphy, setby);
- mutex_unlock(&reg_mutex);
-}
-
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
struct cfg80211_registered_device *rdev;
@@ -1304,6 +1364,13 @@ static int ignore_request(struct wiphy *wiphy,
return 0;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ if (reg_request_cell_base(last_request)) {
+ /* Trust a Cell base station over the AP's country IE */
+ if (regdom_changes(pending_request->alpha2))
+ return -EOPNOTSUPP;
+ return -EALREADY;
+ }
+
last_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (unlikely(!is_an_alpha2(pending_request->alpha2)))
@@ -1348,6 +1415,12 @@ static int ignore_request(struct wiphy *wiphy,
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_USER:
+ if (reg_request_cell_base(pending_request))
+ return reg_ignore_cell_hint(pending_request);
+
+ if (reg_request_cell_base(last_request))
+ return -EOPNOTSUPP;
+
if (last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)
return REG_INTERSECT;
/*
@@ -1637,7 +1710,8 @@ static int regulatory_hint_core(const char *alpha2)
}
/* User hints */
-int regulatory_hint_user(const char *alpha2)
+int regulatory_hint_user(const char *alpha2,
+ enum nl80211_user_reg_hint_type user_reg_hint_type)
{
struct regulatory_request *request;
@@ -1651,6 +1725,7 @@ int regulatory_hint_user(const char *alpha2)
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_USER;
+ request->user_reg_hint_type = user_reg_hint_type;
queue_regulatory_request(request);
@@ -1903,7 +1978,7 @@ static void restore_regulatory_settings(bool reset_user)
* settings, user regulatory settings takes precedence.
*/
if (is_an_alpha2(alpha2))
- regulatory_hint_user(user_alpha2);
+ regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
if (list_empty(&tmp_reg_req_list))
return;
@@ -2078,9 +2153,16 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
else {
if (is_unknown_alpha2(rd->alpha2))
pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
- else
- pr_info("Regulatory domain changed to country: %c%c\n",
- rd->alpha2[0], rd->alpha2[1]);
+ else {
+ if (reg_request_cell_base(last_request))
+ pr_info("Regulatory domain changed "
+ "to country: %c%c by Cell Station\n",
+ rd->alpha2[0], rd->alpha2[1]);
+ else
+ pr_info("Regulatory domain changed "
+ "to country: %c%c\n",
+ rd->alpha2[0], rd->alpha2[1]);
+ }
}
print_dfs_region(rd->dfs_region);
print_rd_rules(rd);
@@ -2125,7 +2207,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
* checking if the alpha2 changes if CRDA was already called
*/
if (!regdom_changes(rd->alpha2))
- return -EINVAL;
+ return -EALREADY;
}
/*
@@ -2245,6 +2327,9 @@ int set_regdom(const struct ieee80211_regdomain *rd)
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
+ if (r == -EALREADY)
+ reg_set_request_processed();
+
kfree(rd);
mutex_unlock(&reg_mutex);
return r;
@@ -2287,8 +2372,22 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif /* CONFIG_HOTPLUG */
+void wiphy_regulatory_register(struct wiphy *wiphy)
+{
+ assert_cfg80211_lock();
+
+ mutex_lock(&reg_mutex);
+
+ if (!reg_dev_ignore_cell_hint(wiphy))
+ reg_num_devs_support_basehint++;
+
+ wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+
+ mutex_unlock(&reg_mutex);
+}
+
/* Caller must hold cfg80211_mutex */
-void reg_device_remove(struct wiphy *wiphy)
+void wiphy_regulatory_deregister(struct wiphy *wiphy)
{
struct wiphy *request_wiphy = NULL;
@@ -2296,6 +2395,9 @@ void reg_device_remove(struct wiphy *wiphy)
mutex_lock(&reg_mutex);
+ if (!reg_dev_ignore_cell_hint(wiphy))
+ reg_num_devs_support_basehint--;
+
kfree(wiphy->regd);
if (last_request)
@@ -2361,7 +2463,8 @@ int __init regulatory_init(void)
* as a user hint.
*/
if (!is_world_regdom(ieee80211_regdom))
- regulatory_hint_user(ieee80211_regdom);
+ regulatory_hint_user(ieee80211_regdom,
+ NL80211_USER_REG_HINT_USER);
return 0;
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index e2aaaf525a22..f023c8a31c60 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -22,17 +22,19 @@ bool is_world_regdom(const char *alpha2);
bool reg_is_valid_request(const char *alpha2);
bool reg_supported_dfs_region(u8 dfs_region);
-int regulatory_hint_user(const char *alpha2);
+int regulatory_hint_user(const char *alpha2,
+ enum nl80211_user_reg_hint_type user_reg_hint_type);
int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env);
-void reg_device_remove(struct wiphy *wiphy);
+void wiphy_regulatory_register(struct wiphy *wiphy);
+void wiphy_regulatory_deregister(struct wiphy *wiphy);
int __init regulatory_init(void);
void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
-void regulatory_update(struct wiphy *wiphy, enum nl80211_reg_initiator setby);
+bool reg_last_request_cell_base(void);
/**
* regulatory_hint_found_beacon - hints a beacon was found on a channel
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index af2b1caa37fa..848523a2b22f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -23,7 +23,7 @@
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
{
struct cfg80211_scan_request *request;
- struct net_device *dev;
+ struct wireless_dev *wdev;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
@@ -35,29 +35,31 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
if (!request)
return;
- dev = request->dev;
+ wdev = request->wdev;
/*
* This must be before sending the other events!
* Otherwise, wpa_supplicant gets completely confused with
* wext events.
*/
- cfg80211_sme_scan_done(dev);
+ if (wdev->netdev)
+ cfg80211_sme_scan_done(wdev->netdev);
if (request->aborted)
- nl80211_send_scan_aborted(rdev, dev);
+ nl80211_send_scan_aborted(rdev, wdev);
else
- nl80211_send_scan_done(rdev, dev);
+ nl80211_send_scan_done(rdev, wdev);
#ifdef CONFIG_CFG80211_WEXT
- if (!request->aborted) {
+ if (wdev->netdev && !request->aborted) {
memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+ wireless_send_event(wdev->netdev, SIOCGIWSCAN, &wrqu, NULL);
}
#endif
- dev_put(dev);
+ if (wdev->netdev)
+ dev_put(wdev->netdev);
rdev->scan_req = NULL;
@@ -955,7 +957,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
}
creq->wiphy = wiphy;
- creq->dev = dev;
+ creq->wdev = dev->ieee80211_ptr;
/* SSIDs come after channels */
creq->ssids = (void *)&creq->channels[n_channels];
creq->n_channels = n_channels;
@@ -1024,12 +1026,12 @@ int cfg80211_wext_siwscan(struct net_device *dev,
creq->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
rdev->scan_req = creq;
- err = rdev->ops->scan(wiphy, dev, creq);
+ err = rdev->ops->scan(wiphy, creq);
if (err) {
rdev->scan_req = NULL;
/* creq will be freed below */
} else {
- nl80211_send_scan_start(rdev, dev);
+ nl80211_send_scan_start(rdev, dev->ieee80211_ptr);
/* creq now owned by driver */
creq = NULL;
dev_hold(dev);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f7e937ff8978..6f39cb808302 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -51,7 +51,7 @@ static bool cfg80211_is_all_idle(void)
*/
list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
cfg80211_lock_rdev(rdev);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
if (wdev->sme_state != CFG80211_SME_IDLE)
is_all_idle = false;
@@ -136,15 +136,15 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
wdev->conn->params.ssid_len);
request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
- request->dev = wdev->netdev;
+ request->wdev = wdev;
request->wiphy = &rdev->wiphy;
rdev->scan_req = request;
- err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request);
+ err = rdev->ops->scan(wdev->wiphy, request);
if (!err) {
wdev->conn->state = CFG80211_CONN_SCANNING;
- nl80211_send_scan_start(rdev, wdev->netdev);
+ nl80211_send_scan_start(rdev, wdev);
dev_hold(wdev->netdev);
} else {
rdev->scan_req = NULL;
@@ -221,7 +221,7 @@ void cfg80211_conn_work(struct work_struct *work)
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
wdev_lock(wdev);
if (!netif_running(wdev->netdev)) {
wdev_unlock(wdev);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 316cfd00914f..26f8cd30f712 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -35,19 +35,29 @@ int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band)
{
/* see 802.11 17.3.8.3.2 and Annex J
* there are overlapping channel numbers in 5GHz and 2GHz bands */
- if (band == IEEE80211_BAND_5GHZ) {
- if (chan >= 182 && chan <= 196)
- return 4000 + chan * 5;
- else
- return 5000 + chan * 5;
- } else { /* IEEE80211_BAND_2GHZ */
+ if (chan <= 0)
+ return 0; /* not supported */
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
if (chan == 14)
return 2484;
else if (chan < 14)
return 2407 + chan * 5;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ if (chan >= 182 && chan <= 196)
+ return 4000 + chan * 5;
else
- return 0; /* not supported */
+ return 5000 + chan * 5;
+ break;
+ case IEEE80211_BAND_60GHZ:
+ if (chan < 5)
+ return 56160 + chan * 2160;
+ break;
+ default:
+ ;
}
+ return 0; /* not supported */
}
EXPORT_SYMBOL(ieee80211_channel_to_frequency);
@@ -60,8 +70,12 @@ int ieee80211_frequency_to_channel(int freq)
return (freq - 2407) / 5;
else if (freq >= 4910 && freq <= 4980)
return (freq - 4000) / 5;
- else
+ else if (freq <= 45000) /* DMG band lower limit */
return (freq - 5000) / 5;
+ else if (freq >= 58320 && freq <= 64800)
+ return (freq - 56160) / 2160;
+ else
+ return 0;
}
EXPORT_SYMBOL(ieee80211_frequency_to_channel);
@@ -137,6 +151,11 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
}
WARN_ON(want != 0 && want != 3 && want != 6);
break;
+ case IEEE80211_BAND_60GHZ:
+ /* check for mandatory HT MCS 1..4 */
+ WARN_ON(!sband->ht_cap.ht_supported);
+ WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
+ break;
case IEEE80211_NUM_BANDS:
WARN_ON(1);
break;
@@ -774,7 +793,7 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list)
+ list_for_each_entry(wdev, &rdev->wdev_list, list)
cfg80211_process_wdev_events(wdev);
mutex_unlock(&rdev->devlist_mtx);
@@ -805,8 +824,10 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return -EBUSY;
if (ntype != otype && netif_running(dev)) {
+ mutex_lock(&rdev->devlist_mtx);
err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
ntype);
+ mutex_unlock(&rdev->devlist_mtx);
if (err)
return err;
@@ -814,6 +835,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
dev->ieee80211_ptr->mesh_id_up_len = 0;
switch (otype) {
+ case NL80211_IFTYPE_AP:
+ cfg80211_stop_ap(rdev, dev);
+ break;
case NL80211_IFTYPE_ADHOC:
cfg80211_leave_ibss(rdev, dev, false);
break;
@@ -868,15 +892,69 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
}
}
+ if (!err && ntype != otype && netif_running(dev)) {
+ cfg80211_update_iface_num(rdev, ntype, 1);
+ cfg80211_update_iface_num(rdev, otype, -1);
+ }
+
return err;
}
-u16 cfg80211_calculate_bitrate(struct rate_info *rate)
+static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
+{
+ static const u32 __mcs2bitrate[] = {
+ /* control PHY */
+ [0] = 275,
+ /* SC PHY */
+ [1] = 3850,
+ [2] = 7700,
+ [3] = 9625,
+ [4] = 11550,
+ [5] = 12512, /* 1251.25 mbps */
+ [6] = 15400,
+ [7] = 19250,
+ [8] = 23100,
+ [9] = 25025,
+ [10] = 30800,
+ [11] = 38500,
+ [12] = 46200,
+ /* OFDM PHY */
+ [13] = 6930,
+ [14] = 8662, /* 866.25 mbps */
+ [15] = 13860,
+ [16] = 17325,
+ [17] = 20790,
+ [18] = 27720,
+ [19] = 34650,
+ [20] = 41580,
+ [21] = 45045,
+ [22] = 51975,
+ [23] = 62370,
+ [24] = 67568, /* 6756.75 mbps */
+ /* LP-SC PHY */
+ [25] = 6260,
+ [26] = 8340,
+ [27] = 11120,
+ [28] = 12510,
+ [29] = 16680,
+ [30] = 22240,
+ [31] = 25030,
+ };
+
+ if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
+ return 0;
+
+ return __mcs2bitrate[rate->mcs];
+}
+
+u32 cfg80211_calculate_bitrate(struct rate_info *rate)
{
int modulation, streams, bitrate;
if (!(rate->flags & RATE_INFO_FLAGS_MCS))
return rate->legacy;
+ if (rate->flags & RATE_INFO_FLAGS_60G)
+ return cfg80211_calculate_bitrate_60g(rate);
/* the formula below does only work for MCS values smaller than 32 */
if (WARN_ON_ONCE(rate->mcs >= 32))
@@ -916,7 +994,7 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ list_for_each_entry(wdev, &rdev->wdev_list, list) {
if (!wdev->beacon_interval)
continue;
if (wdev->beacon_interval != beacon_int) {
@@ -930,28 +1008,49 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
return res;
}
-int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev,
- enum nl80211_iftype iftype)
+int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
+ enum nl80211_iftype iftype,
+ struct ieee80211_channel *chan,
+ enum cfg80211_chan_mode chanmode)
{
struct wireless_dev *wdev_iter;
u32 used_iftypes = BIT(iftype);
int num[NUM_NL80211_IFTYPES];
+ struct ieee80211_channel
+ *used_channels[CFG80211_MAX_NUM_DIFFERENT_CHANNELS];
+ struct ieee80211_channel *ch;
+ enum cfg80211_chan_mode chmode;
+ int num_different_channels = 0;
int total = 1;
int i, j;
ASSERT_RTNL();
+ lockdep_assert_held(&rdev->devlist_mtx);
/* Always allow software iftypes */
if (rdev->wiphy.software_iftypes & BIT(iftype))
return 0;
memset(num, 0, sizeof(num));
+ memset(used_channels, 0, sizeof(used_channels));
num[iftype] = 1;
- mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev_iter, &rdev->netdev_list, list) {
+ switch (chanmode) {
+ case CHAN_MODE_UNDEFINED:
+ break;
+ case CHAN_MODE_SHARED:
+ WARN_ON(!chan);
+ used_channels[0] = chan;
+ num_different_channels++;
+ break;
+ case CHAN_MODE_EXCLUSIVE:
+ num_different_channels++;
+ break;
+ }
+
+ list_for_each_entry(wdev_iter, &rdev->wdev_list, list) {
if (wdev_iter == wdev)
continue;
if (!netif_running(wdev_iter->netdev))
@@ -960,11 +1059,42 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
continue;
+ /*
+ * We may be holding the "wdev" mutex, but now need to lock
+ * wdev_iter. This is OK because once we get here wdev_iter
+ * is not wdev (tested above), but we need to use the nested
+ * locking for lockdep.
+ */
+ mutex_lock_nested(&wdev_iter->mtx, 1);
+ __acquire(wdev_iter->mtx);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
+ wdev_unlock(wdev_iter);
+
+ switch (chmode) {
+ case CHAN_MODE_UNDEFINED:
+ break;
+ case CHAN_MODE_SHARED:
+ for (i = 0; i < CFG80211_MAX_NUM_DIFFERENT_CHANNELS; i++)
+ if (!used_channels[i] || used_channels[i] == ch)
+ break;
+
+ if (i == CFG80211_MAX_NUM_DIFFERENT_CHANNELS)
+ return -EBUSY;
+
+ if (used_channels[i] == NULL) {
+ used_channels[i] = ch;
+ num_different_channels++;
+ }
+ break;
+ case CHAN_MODE_EXCLUSIVE:
+ num_different_channels++;
+ break;
+ }
+
num[wdev_iter->iftype]++;
total++;
used_iftypes |= BIT(wdev_iter->iftype);
}
- mutex_unlock(&rdev->devlist_mtx);
if (total == 1)
return 0;
@@ -976,12 +1106,15 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
c = &rdev->wiphy.iface_combinations[i];
+ if (total > c->max_interfaces)
+ continue;
+ if (num_different_channels > c->num_different_channels)
+ continue;
+
limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
GFP_KERNEL);
if (!limits)
return -ENOMEM;
- if (total > c->max_interfaces)
- goto cont;
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
if (rdev->wiphy.software_iftypes & BIT(iftype))
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 6a6181a673ca..494379eb464f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -796,7 +796,15 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
case NL80211_IFTYPE_ADHOC:
return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_WDS:
+ freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+ if (freq < 0)
+ return freq;
+ if (freq == 0)
+ return -EINVAL;
+ mutex_lock(&rdev->devlist_mtx);
+ err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
+ mutex_unlock(&rdev->devlist_mtx);
+ return err;
case NL80211_IFTYPE_MESH_POINT:
freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
if (freq < 0)
@@ -804,9 +812,8 @@ static int cfg80211_wext_siwfreq(struct net_device *dev,
if (freq == 0)
return -EINVAL;
mutex_lock(&rdev->devlist_mtx);
- wdev_lock(wdev);
- err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
- wdev_unlock(wdev);
+ err = cfg80211_set_mesh_freq(rdev, wdev, freq,
+ NL80211_CHAN_NO_HT);
mutex_unlock(&rdev->devlist_mtx);
return err;
default:
@@ -832,18 +839,14 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
if (!rdev->ops->get_channel)
return -EINVAL;
- chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
+ chan = rdev->ops->get_channel(wdev->wiphy, wdev, &channel_type);
if (!chan)
return -EINVAL;
freq->m = chan->center_freq;
freq->e = 6;
return 0;
default:
- if (!wdev->channel)
- return -EINVAL;
- freq->m = wdev->channel->center_freq;
- freq->e = 6;
- return 0;
+ return -EINVAL;
}
}
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 7decbd357d51..1f773f668d1a 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -111,9 +111,15 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
wdev->wext.connect.channel = chan;
- /* SSID is not set, we just want to switch channel */
+ /*
+ * SSID is not set, we just want to switch monitor channel,
+ * this is really just backward compatibility, if the SSID
+ * is set then we use the channel to select the BSS to use
+ * to connect to instead. If we were connected on another
+ * channel we disconnected above and reconnect below.
+ */
if (chan && !wdev->wext.connect.ssid_len) {
- err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
+ err = cfg80211_set_monitor_channel(rdev, freq, NL80211_CHAN_NO_HT);
goto out;
}
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index cf6366270054..277c8d2448d6 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -66,7 +66,7 @@ out:
/**
* __x25_remove_route - remove route from x25_route_list
- * @rt - route to remove
+ * @rt: route to remove
*
* Remove route from x25_route_list. If it was there.
* Caller must hold x25_route_list_lock.
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ccfbd328a69d..c5a5165a5927 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1350,11 +1350,12 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
default:
BUG();
}
- xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
+ xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0);
if (likely(xdst)) {
- memset(&xdst->u.rt6.rt6i_table, 0,
- sizeof(*xdst) - sizeof(struct dst_entry));
+ struct dst_entry *dst = &xdst->u.dst;
+
+ memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
xdst->flo.ops = &xfrm_bundle_fc_ops;
} else
xdst = ERR_PTR(-ENOBUFS);
@@ -1476,7 +1477,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
dst1->xfrm = xfrm[i];
xdst->xfrm_genid = xfrm[i]->genid;
- dst1->obsolete = -1;
+ dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
dst1->flags |= DST_HOST;
dst1->lastuse = now;
@@ -1500,9 +1501,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
if (!dev)
goto free_dst;
- /* Copy neighbour for reachability confirmation */
- dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst)));
-
xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
xfrm_init_pmtu(dst_prev);
@@ -2221,12 +2219,13 @@ EXPORT_SYMBOL(__xfrm_route_forward);
static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
{
/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
- * to "-1" to force all XFRM destinations to get validated by
- * dst_ops->check on every use. We do this because when a
- * normal route referenced by an XFRM dst is obsoleted we do
- * not go looking around for all parent referencing XFRM dsts
- * so that we can invalidate them. It is just too much work.
- * Instead we make the checks here on every use. For example:
+ * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
+ * get validated by dst_ops->check on every use. We do this
+ * because when a normal route referenced by an XFRM dst is
+ * obsoleted we do not go looking around for all parent
+ * referencing XFRM dsts so that we can invalidate them. It
+ * is just too much work. Instead we make the checks here on
+ * every use. For example:
*
* XFRM dst A --> IPv4 dst X
*
@@ -2236,9 +2235,9 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
* stale_bundle() check.
*
* When a policy's bundle is pruned, we dst_free() the XFRM
- * dst which causes it's ->obsolete field to be set to a
- * positive non-zero integer. If an XFRM dst has been pruned
- * like this, we want to force a new route lookup.
+ * dst which causes it's ->obsolete field to be set to
+ * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like
+ * this, we want to force a new route lookup.
*/
if (dst->obsolete < 0 && !stale_bundle(dst))
return dst;
@@ -2404,9 +2403,11 @@ static unsigned int xfrm_mtu(const struct dst_entry *dst)
return mtu ? : dst_mtu(dst->path);
}
-static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
+static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
+ struct sk_buff *skb,
+ const void *daddr)
{
- return dst_neigh_lookup(dst->path, daddr);
+ return dst->path->ops->neigh_lookup(dst, skb, daddr);
}
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 44293b3fd6a1..e75d8e47f35c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -754,58 +754,67 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
struct xfrm_usersa_info *p,
struct sk_buff *skb)
{
- copy_to_user_state(x, p);
-
- if (x->coaddr &&
- nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
- goto nla_put_failure;
-
- if (x->lastused &&
- nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
- goto nla_put_failure;
-
- if (x->aead &&
- nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
- goto nla_put_failure;
-
- if (x->aalg &&
- (copy_to_user_auth(x->aalg, skb) ||
- nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
- xfrm_alg_auth_len(x->aalg), x->aalg)))
- goto nla_put_failure;
-
- if (x->ealg &&
- nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
- goto nla_put_failure;
-
- if (x->calg &&
- nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
- goto nla_put_failure;
-
- if (x->encap &&
- nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
- goto nla_put_failure;
+ int ret = 0;
- if (x->tfcpad &&
- nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
- goto nla_put_failure;
-
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
-
- if (x->replay_esn &&
- nla_put(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn),
- x->replay_esn))
- goto nla_put_failure;
-
- if (x->security && copy_sec_ctx(x->security, skb))
- goto nla_put_failure;
-
- return 0;
+ copy_to_user_state(x, p);
-nla_put_failure:
- return -EMSGSIZE;
+ if (x->coaddr) {
+ ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
+ if (ret)
+ goto out;
+ }
+ if (x->lastused) {
+ ret = nla_put_u64(skb, XFRMA_LASTUSED, x->lastused);
+ if (ret)
+ goto out;
+ }
+ if (x->aead) {
+ ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
+ if (ret)
+ goto out;
+ }
+ if (x->aalg) {
+ ret = copy_to_user_auth(x->aalg, skb);
+ if (!ret)
+ ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
+ xfrm_alg_auth_len(x->aalg), x->aalg);
+ if (ret)
+ goto out;
+ }
+ if (x->ealg) {
+ ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
+ if (ret)
+ goto out;
+ }
+ if (x->calg) {
+ ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
+ if (ret)
+ goto out;
+ }
+ if (x->encap) {
+ ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
+ if (ret)
+ goto out;
+ }
+ if (x->tfcpad) {
+ ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
+ if (ret)
+ goto out;
+ }
+ ret = xfrm_mark_put(skb, &x->mark);
+ if (ret)
+ goto out;
+ if (x->replay_esn) {
+ ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn);
+ if (ret)
+ goto out;
+ }
+ if (x->security)
+ ret = copy_sec_ctx(x->security, skb);
+out:
+ return ret;
}
static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
@@ -825,15 +834,12 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
p = nlmsg_data(nlh);
err = copy_to_user_state_extra(x, p, skb);
- if (err)
- goto nla_put_failure;
-
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
nlmsg_end(skb, nlh);
return 0;
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return err;
}
static int xfrm_dump_sa_done(struct netlink_callback *cb)
@@ -904,6 +910,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
struct xfrmu_spdinfo spc;
struct xfrmu_spdhinfo sph;
struct nlmsghdr *nlh;
+ int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
@@ -922,15 +929,15 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
sph.spdhcnt = si.spdhcnt;
sph.spdhmcnt = si.spdhmcnt;
- if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
- nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
+ if (!err)
+ err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -965,6 +972,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
struct xfrmk_sadinfo si;
struct xfrmu_sadhinfo sh;
struct nlmsghdr *nlh;
+ int err;
u32 *f;
nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
@@ -978,15 +986,15 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
sh.sadhmcnt = si.sadhmcnt;
sh.sadhcnt = si.sadhcnt;
- if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
- nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
- goto nla_put_failure;
+ err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
+ if (!err)
+ err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -1439,9 +1447,8 @@ static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buf
static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
{
- if (xp->security) {
+ if (xp->security)
return copy_sec_ctx(xp->security, skb);
- }
return 0;
}
static inline size_t userpolicy_type_attrsize(void)
@@ -1477,6 +1484,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq,
XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
@@ -1485,22 +1493,19 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
p = nlmsg_data(nlh);
copy_to_user_policy(xp, p, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_sec_ctx(xp, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
-
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_sec_ctx(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
nlmsg_end(skb, nlh);
return 0;
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_dump_policy_done(struct netlink_callback *cb)
@@ -1688,6 +1693,7 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
{
struct xfrm_aevent_id *id;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
if (nlh == NULL)
@@ -1703,35 +1709,39 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
id->flags = c->data.aevent;
if (x->replay_esn) {
- if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
- xfrm_replay_state_esn_len(x->replay_esn),
- x->replay_esn))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
+ xfrm_replay_state_esn_len(x->replay_esn),
+ x->replay_esn);
} else {
- if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
- &x->replay))
- goto nla_put_failure;
+ err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
+ &x->replay);
}
- if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
- goto nla_put_failure;
-
- if ((id->flags & XFRM_AE_RTHR) &&
- nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
- goto nla_put_failure;
-
- if ((id->flags & XFRM_AE_ETHR) &&
- nla_put_u32(skb, XFRMA_ETIMER_THRESH,
- x->replay_maxage * 10 / HZ))
- goto nla_put_failure;
+ if (err)
+ goto out_cancel;
+ err = nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft);
+ if (err)
+ goto out_cancel;
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
+ if (id->flags & XFRM_AE_RTHR) {
+ err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
+ if (err)
+ goto out_cancel;
+ }
+ if (id->flags & XFRM_AE_ETHR) {
+ err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
+ x->replay_maxage * 10 / HZ);
+ if (err)
+ goto out_cancel;
+ }
+ err = xfrm_mark_put(skb, &x->mark);
+ if (err)
+ goto out_cancel;
return nlmsg_end(skb, nlh);
-nla_put_failure:
+out_cancel:
nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ return err;
}
static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -2155,7 +2165,7 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
const struct xfrm_migrate *mp;
struct xfrm_userpolicy_id *pol_id;
struct nlmsghdr *nlh;
- int i;
+ int i, err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
if (nlh == NULL)
@@ -2167,21 +2177,25 @@ static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
pol_id->dir = dir;
- if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0))
- goto nlmsg_failure;
-
- if (copy_to_user_policy_type(type, skb) < 0)
- goto nlmsg_failure;
-
+ if (k != NULL) {
+ err = copy_to_user_kmaddress(k, skb);
+ if (err)
+ goto out_cancel;
+ }
+ err = copy_to_user_policy_type(type, skb);
+ if (err)
+ goto out_cancel;
for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
- if (copy_to_user_migrate(mp, skb) < 0)
- goto nlmsg_failure;
+ err = copy_to_user_migrate(mp, skb);
+ if (err)
+ goto out_cancel;
}
return nlmsg_end(skb, nlh);
-nlmsg_failure:
+
+out_cancel:
nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ return err;
}
static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
@@ -2354,6 +2368,7 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
{
struct xfrm_user_expire *ue;
struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
if (nlh == NULL)
@@ -2363,13 +2378,11 @@ static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct
copy_to_user_state(x, &ue->state);
ue->hard = (c->data.hard != 0) ? 1 : 0;
- if (xfrm_mark_put(skb, &x->mark))
- goto nla_put_failure;
+ err = xfrm_mark_put(skb, &x->mark);
+ if (err)
+ return err;
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- return -EMSGSIZE;
}
static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2470,7 +2483,7 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
struct nlmsghdr *nlh;
struct sk_buff *skb;
int len = xfrm_sa_len(x);
- int headlen;
+ int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELSA) {
@@ -2485,8 +2498,9 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nla_put_failure;
+ goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELSA) {
@@ -2499,24 +2513,23 @@ static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
id->proto = x->id.proto;
attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
+ err = -EMSGSIZE;
if (attr == NULL)
- goto nla_put_failure;
+ goto out_free_skb;
p = nla_data(attr);
}
-
- if (copy_to_user_state_extra(x, p, skb))
- goto nla_put_failure;
+ err = copy_to_user_state_extra(x, p, skb);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC);
-nla_put_failure:
- /* Somebody screwed up with xfrm_sa_len! */
- WARN_ON(1);
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
@@ -2557,9 +2570,10 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_tmpl *xt, struct xfrm_policy *xp,
int dir)
{
+ __u32 seq = xfrm_get_acqseq();
struct xfrm_user_acquire *ua;
struct nlmsghdr *nlh;
- __u32 seq = xfrm_get_acqseq();
+ int err;
nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
if (nlh == NULL)
@@ -2575,21 +2589,19 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
ua->calgos = xt->calgos;
ua->seq = x->km.seq = seq;
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_state_sec_ctx(x, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_state_sec_ctx(x, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
@@ -2681,8 +2693,9 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
int dir, const struct km_event *c)
{
struct xfrm_user_polexpire *upe;
- struct nlmsghdr *nlh;
int hard = c->data.hard;
+ struct nlmsghdr *nlh;
+ int err;
nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
if (nlh == NULL)
@@ -2690,22 +2703,20 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
upe = nlmsg_data(nlh);
copy_to_user_policy(xp, &upe->pol, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_sec_ctx(xp, skb))
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_sec_ctx(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
upe->hard = !!hard;
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
-nlmsg_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2725,13 +2736,13 @@ static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct
static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
{
+ int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
struct net *net = xp_net(xp);
struct xfrm_userpolicy_info *p;
struct xfrm_userpolicy_id *id;
struct nlmsghdr *nlh;
struct sk_buff *skb;
- int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
- int headlen;
+ int headlen, err;
headlen = sizeof(*p);
if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2747,8 +2758,9 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nlmsg_failure;
+ goto out_free_skb;
p = nlmsg_data(nlh);
if (c->event == XFRM_MSG_DELPOLICY) {
@@ -2763,29 +2775,29 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
memcpy(&id->sel, &xp->selector, sizeof(id->sel));
attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
+ err = -EMSGSIZE;
if (attr == NULL)
- goto nlmsg_failure;
+ goto out_free_skb;
p = nla_data(attr);
}
copy_to_user_policy(xp, p, dir);
- if (copy_to_user_tmpl(xp, skb) < 0)
- goto nlmsg_failure;
- if (copy_to_user_policy_type(xp->type, skb) < 0)
- goto nlmsg_failure;
-
- if (xfrm_mark_put(skb, &xp->mark))
- goto nla_put_failure;
+ err = copy_to_user_tmpl(xp, skb);
+ if (!err)
+ err = copy_to_user_policy_type(xp->type, skb);
+ if (!err)
+ err = xfrm_mark_put(skb, &xp->mark);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
-nla_put_failure:
-nlmsg_failure:
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_notify_policy_flush(const struct km_event *c)
@@ -2793,24 +2805,27 @@ static int xfrm_notify_policy_flush(const struct km_event *c)
struct net *net = c->net;
struct nlmsghdr *nlh;
struct sk_buff *skb;
+ int err;
skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
if (skb == NULL)
return -ENOMEM;
nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
+ err = -EMSGSIZE;
if (nlh == NULL)
- goto nlmsg_failure;
- if (copy_to_user_policy_type(c->data.type, skb) < 0)
- goto nlmsg_failure;
+ goto out_free_skb;
+ err = copy_to_user_policy_type(c->data.type, skb);
+ if (err)
+ goto out_free_skb;
nlmsg_end(skb, nlh);
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC);
-nlmsg_failure:
+out_free_skb:
kfree_skb(skb);
- return -1;
+ return err;
}
static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
@@ -2853,15 +2868,14 @@ static int build_report(struct sk_buff *skb, u8 proto,
ur->proto = proto;
memcpy(&ur->sel, sel, sizeof(ur->sel));
- if (addr &&
- nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
- goto nla_put_failure;
-
+ if (addr) {
+ int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
+ }
return nlmsg_end(skb, nlh);
-
-nla_put_failure:
- nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
}
static int xfrm_send_report(struct net *net, u8 proto,
@@ -2945,9 +2959,12 @@ static struct xfrm_mgr netlink_mgr = {
static int __net_init xfrm_user_net_init(struct net *net)
{
struct sock *nlsk;
+ struct netlink_kernel_cfg cfg = {
+ .groups = XFRMNLGRP_MAX,
+ .input = xfrm_netlink_rcv,
+ };
- nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX,
- xfrm_netlink_rcv, NULL, THIS_MODULE);
+ nlsk = netlink_kernel_create(net, NETLINK_XFRM, THIS_MODULE, &cfg);
if (nlsk == NULL)
return -ENOMEM;
net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
diff --git a/samples/seccomp/.gitignore b/samples/seccomp/.gitignore
new file mode 100644
index 000000000000..78fb78184291
--- /dev/null
+++ b/samples/seccomp/.gitignore
@@ -0,0 +1,3 @@
+bpf-direct
+bpf-fancy
+dropper
diff --git a/samples/uhid/Makefile b/samples/uhid/Makefile
new file mode 100644
index 000000000000..c95a696560a7
--- /dev/null
+++ b/samples/uhid/Makefile
@@ -0,0 +1,10 @@
+# kbuild trick to avoid linker error. Can be omitted if a module is built.
+obj- := dummy.o
+
+# List of programs to build
+hostprogs-y := uhid-example
+
+# Tell kbuild to always build the programs
+always := $(hostprogs-y)
+
+HOSTCFLAGS_uhid-example.o += -I$(objtree)/usr/include
diff --git a/samples/uhid/uhid-example.c b/samples/uhid/uhid-example.c
new file mode 100644
index 000000000000..03ce3c059a5e
--- /dev/null
+++ b/samples/uhid/uhid-example.c
@@ -0,0 +1,381 @@
+/*
+ * UHID Example
+ *
+ * Copyright (c) 2012 David Herrmann <dh.herrmann@googlemail.com>
+ *
+ * The code may be used by anyone for any purpose,
+ * and can serve as a starting point for developing
+ * applications using uhid.
+ */
+
+/* UHID Example
+ * This example emulates a basic 3 buttons mouse with wheel over UHID. Run this
+ * program as root and then use the following keys to control the mouse:
+ * q: Quit the application
+ * 1: Toggle left button (down, up, ...)
+ * 2: Toggle right button
+ * 3: Toggle middle button
+ * a: Move mouse left
+ * d: Move mouse right
+ * w: Move mouse up
+ * s: Move mouse down
+ * r: Move wheel up
+ * f: Move wheel down
+ *
+ * If uhid is not available as /dev/uhid, then you can pass a different path as
+ * first argument.
+ * If <linux/uhid.h> is not installed in /usr, then compile this with:
+ * gcc -o ./uhid_test -Wall -I./include ./samples/uhid/uhid-example.c
+ * And ignore the warning about kernel headers. However, it is recommended to
+ * use the installed uhid.h if available.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <linux/uhid.h>
+
+/* HID Report Desciptor
+ * We emulate a basic 3 button mouse with wheel. This is the report-descriptor
+ * as the kernel will parse it:
+ *
+ * INPUT[INPUT]
+ * Field(0)
+ * Physical(GenericDesktop.Pointer)
+ * Application(GenericDesktop.Mouse)
+ * Usage(3)
+ * Button.0001
+ * Button.0002
+ * Button.0003
+ * Logical Minimum(0)
+ * Logical Maximum(1)
+ * Report Size(1)
+ * Report Count(3)
+ * Report Offset(0)
+ * Flags( Variable Absolute )
+ * Field(1)
+ * Physical(GenericDesktop.Pointer)
+ * Application(GenericDesktop.Mouse)
+ * Usage(3)
+ * GenericDesktop.X
+ * GenericDesktop.Y
+ * GenericDesktop.Wheel
+ * Logical Minimum(-128)
+ * Logical Maximum(127)
+ * Report Size(8)
+ * Report Count(3)
+ * Report Offset(8)
+ * Flags( Variable Relative )
+ *
+ * This is the mapping that we expect:
+ * Button.0001 ---> Key.LeftBtn
+ * Button.0002 ---> Key.RightBtn
+ * Button.0003 ---> Key.MiddleBtn
+ * GenericDesktop.X ---> Relative.X
+ * GenericDesktop.Y ---> Relative.Y
+ * GenericDesktop.Wheel ---> Relative.Wheel
+ *
+ * This information can be verified by reading /sys/kernel/debug/hid/<dev>/rdesc
+ * This file should print the same information as showed above.
+ */
+
+static unsigned char rdesc[] = {
+ 0x05, 0x01, 0x09, 0x02, 0xa1, 0x01, 0x09, 0x01,
+ 0xa1, 0x00, 0x05, 0x09, 0x19, 0x01, 0x29, 0x03,
+ 0x15, 0x00, 0x25, 0x01, 0x95, 0x03, 0x75, 0x01,
+ 0x81, 0x02, 0x95, 0x01, 0x75, 0x05, 0x81, 0x01,
+ 0x05, 0x01, 0x09, 0x30, 0x09, 0x31, 0x09, 0x38,
+ 0x15, 0x80, 0x25, 0x7f, 0x75, 0x08, 0x95, 0x03,
+ 0x81, 0x06, 0xc0, 0xc0,
+};
+
+static int uhid_write(int fd, const struct uhid_event *ev)
+{
+ ssize_t ret;
+
+ ret = write(fd, ev, sizeof(*ev));
+ if (ret < 0) {
+ fprintf(stderr, "Cannot write to uhid: %m\n");
+ return -errno;
+ } else if (ret != sizeof(*ev)) {
+ fprintf(stderr, "Wrong size written to uhid: %ld != %lu\n",
+ ret, sizeof(ev));
+ return -EFAULT;
+ } else {
+ return 0;
+ }
+}
+
+static int create(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_CREATE;
+ strcpy((char*)ev.u.create.name, "test-uhid-device");
+ ev.u.create.rd_data = rdesc;
+ ev.u.create.rd_size = sizeof(rdesc);
+ ev.u.create.bus = BUS_USB;
+ ev.u.create.vendor = 0x15d9;
+ ev.u.create.product = 0x0a37;
+ ev.u.create.version = 0;
+ ev.u.create.country = 0;
+
+ return uhid_write(fd, &ev);
+}
+
+static void destroy(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_DESTROY;
+
+ uhid_write(fd, &ev);
+}
+
+static int event(int fd)
+{
+ struct uhid_event ev;
+ ssize_t ret;
+
+ memset(&ev, 0, sizeof(ev));
+ ret = read(fd, &ev, sizeof(ev));
+ if (ret == 0) {
+ fprintf(stderr, "Read HUP on uhid-cdev\n");
+ return -EFAULT;
+ } else if (ret < 0) {
+ fprintf(stderr, "Cannot read uhid-cdev: %m\n");
+ return -errno;
+ } else if (ret != sizeof(ev)) {
+ fprintf(stderr, "Invalid size read from uhid-dev: %ld != %lu\n",
+ ret, sizeof(ev));
+ return -EFAULT;
+ }
+
+ switch (ev.type) {
+ case UHID_START:
+ fprintf(stderr, "UHID_START from uhid-dev\n");
+ break;
+ case UHID_STOP:
+ fprintf(stderr, "UHID_STOP from uhid-dev\n");
+ break;
+ case UHID_OPEN:
+ fprintf(stderr, "UHID_OPEN from uhid-dev\n");
+ break;
+ case UHID_CLOSE:
+ fprintf(stderr, "UHID_CLOSE from uhid-dev\n");
+ break;
+ case UHID_OUTPUT:
+ fprintf(stderr, "UHID_OUTPUT from uhid-dev\n");
+ break;
+ case UHID_OUTPUT_EV:
+ fprintf(stderr, "UHID_OUTPUT_EV from uhid-dev\n");
+ break;
+ default:
+ fprintf(stderr, "Invalid event from uhid-dev: %u\n", ev.type);
+ }
+
+ return 0;
+}
+
+static bool btn1_down;
+static bool btn2_down;
+static bool btn3_down;
+static signed char abs_hor;
+static signed char abs_ver;
+static signed char wheel;
+
+static int send_event(int fd)
+{
+ struct uhid_event ev;
+
+ memset(&ev, 0, sizeof(ev));
+ ev.type = UHID_INPUT;
+ ev.u.input.size = 4;
+
+ if (btn1_down)
+ ev.u.input.data[0] |= 0x1;
+ if (btn2_down)
+ ev.u.input.data[0] |= 0x2;
+ if (btn3_down)
+ ev.u.input.data[0] |= 0x4;
+
+ ev.u.input.data[1] = abs_hor;
+ ev.u.input.data[2] = abs_ver;
+ ev.u.input.data[3] = wheel;
+
+ return uhid_write(fd, &ev);
+}
+
+static int keyboard(int fd)
+{
+ char buf[128];
+ ssize_t ret, i;
+
+ ret = read(STDIN_FILENO, buf, sizeof(buf));
+ if (ret == 0) {
+ fprintf(stderr, "Read HUP on stdin\n");
+ return -EFAULT;
+ } else if (ret < 0) {
+ fprintf(stderr, "Cannot read stdin: %m\n");
+ return -errno;
+ }
+
+ for (i = 0; i < ret; ++i) {
+ switch (buf[i]) {
+ case '1':
+ btn1_down = !btn1_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case '2':
+ btn2_down = !btn2_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case '3':
+ btn3_down = !btn3_down;
+ ret = send_event(fd);
+ if (ret)
+ return ret;
+ break;
+ case 'a':
+ abs_hor = -20;
+ ret = send_event(fd);
+ abs_hor = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'd':
+ abs_hor = 20;
+ ret = send_event(fd);
+ abs_hor = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'w':
+ abs_ver = -20;
+ ret = send_event(fd);
+ abs_ver = 0;
+ if (ret)
+ return ret;
+ break;
+ case 's':
+ abs_ver = 20;
+ ret = send_event(fd);
+ abs_ver = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'r':
+ wheel = 1;
+ ret = send_event(fd);
+ wheel = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'f':
+ wheel = -1;
+ ret = send_event(fd);
+ wheel = 0;
+ if (ret)
+ return ret;
+ break;
+ case 'q':
+ return -ECANCELED;
+ default:
+ fprintf(stderr, "Invalid input: %c\n", buf[i]);
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int fd;
+ const char *path = "/dev/uhid";
+ struct pollfd pfds[2];
+ int ret;
+ struct termios state;
+
+ ret = tcgetattr(STDIN_FILENO, &state);
+ if (ret) {
+ fprintf(stderr, "Cannot get tty state\n");
+ } else {
+ state.c_lflag &= ~ICANON;
+ state.c_cc[VMIN] = 1;
+ ret = tcsetattr(STDIN_FILENO, TCSANOW, &state);
+ if (ret)
+ fprintf(stderr, "Cannot set tty state\n");
+ }
+
+ if (argc >= 2) {
+ if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ fprintf(stderr, "Usage: %s [%s]\n", argv[0], path);
+ return EXIT_SUCCESS;
+ } else {
+ path = argv[1];
+ }
+ }
+
+ fprintf(stderr, "Open uhid-cdev %s\n", path);
+ fd = open(path, O_RDWR | O_CLOEXEC);
+ if (fd < 0) {
+ fprintf(stderr, "Cannot open uhid-cdev %s: %m\n", path);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(stderr, "Create uhid device\n");
+ ret = create(fd);
+ if (ret) {
+ close(fd);
+ return EXIT_FAILURE;
+ }
+
+ pfds[0].fd = STDIN_FILENO;
+ pfds[0].events = POLLIN;
+ pfds[1].fd = fd;
+ pfds[1].events = POLLIN;
+
+ fprintf(stderr, "Press 'q' to quit...\n");
+ while (1) {
+ ret = poll(pfds, 2, -1);
+ if (ret < 0) {
+ fprintf(stderr, "Cannot poll for fds: %m\n");
+ break;
+ }
+ if (pfds[0].revents & POLLHUP) {
+ fprintf(stderr, "Received HUP on stdin\n");
+ break;
+ }
+ if (pfds[1].revents & POLLHUP) {
+ fprintf(stderr, "Received HUP on uhid-cdev\n");
+ break;
+ }
+
+ if (pfds[0].revents & POLLIN) {
+ ret = keyboard(fd);
+ if (ret)
+ break;
+ }
+ if (pfds[1].revents & POLLIN) {
+ ret = event(fd);
+ if (ret)
+ break;
+ }
+ }
+
+ fprintf(stderr, "Destroy uhid device\n");
+ destroy(fd);
+ return EXIT_SUCCESS;
+}
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 6e133a0bae7a..c1b6191ef879 100644
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -16,7 +16,7 @@
# The second row specify the type of the symbol:
# A = Absolute
# B = Uninitialised data (.bss)
-# C = Comon symbol
+# C = Common symbol
# D = Initialised data
# G = Initialised data for small objects
# I = Indirect reference to another symbol
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0f84bb38eb0d..68e9f5ed0a6f 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -865,6 +865,11 @@ static void check_section(const char *modname, struct elf_info *elf,
#define ALL_EXIT_TEXT_SECTIONS \
".exit.text$", ".devexit.text$", ".cpuexit.text$", ".memexit.text$"
+#define ALL_PCI_INIT_SECTIONS \
+ ".pci_fixup_early$", ".pci_fixup_header$", ".pci_fixup_final$", \
+ ".pci_fixup_enable$", ".pci_fixup_resume$", \
+ ".pci_fixup_resume_early$", ".pci_fixup_suspend$"
+
#define ALL_XXXINIT_SECTIONS DEV_INIT_SECTIONS, CPU_INIT_SECTIONS, \
MEM_INIT_SECTIONS
#define ALL_XXXEXIT_SECTIONS DEV_EXIT_SECTIONS, CPU_EXIT_SECTIONS, \
@@ -1027,6 +1032,12 @@ const struct sectioncheck sectioncheck[] = {
.mismatch = ANY_EXIT_TO_ANY_INIT,
.symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
},
+{
+ .fromsec = { ALL_PCI_INIT_SECTIONS, NULL },
+ .tosec = { INIT_SECTIONS, NULL },
+ .mismatch = ANY_INIT_TO_ANY_EXIT,
+ .symbol_white_list = { NULL },
+},
/* Do not export init/exit functions or data */
{
.fromsec = { "__ksymtab*", NULL },
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 35664fe6daa1..b9c1219924f1 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -38,8 +38,9 @@ config IMA_MEASURE_PCR_IDX
measurement list. If unsure, use the default 10.
config IMA_AUDIT
- bool
+ bool "Enables auditing support"
depends on IMA
+ depends on AUDIT
default y
help
This option adds a kernel parameter 'ima_audit', which
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 5690c021de8f..5f740f6971e1 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_audit.o
+ ima_policy.o
+ima-$(CONFIG_IMA_AUDIT) += ima_audit.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3ccf7acac6df..e7c99fd0d223 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -61,10 +61,19 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
+#ifdef CONFIG_IMA_AUDIT
/* declarations */
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int info);
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info)
+{
+}
+#endif
/* Internal IMA function definitions */
int ima_init(void);
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 88a2788b981d..032ff03ad907 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -175,7 +175,9 @@ void ima_store_measurement(struct integrity_iint_cache *iint,
}
memset(&entry->template, 0, sizeof(entry->template));
memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE);
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
+ strcpy(entry->template.file_name,
+ (strlen(filename) > IMA_EVENT_NAME_LEN_MAX) ?
+ file->f_dentry->d_name.name : filename);
result = ima_store_template(entry, violation, inode);
if (!result || result == -EEXIST)
diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/ima/ima_audit.c
index 21e96bf188df..7a57f6769e9c 100644
--- a/security/integrity/ima/ima_audit.c
+++ b/security/integrity/ima/ima_audit.c
@@ -17,8 +17,6 @@
static int ima_audit;
-#ifdef CONFIG_IMA_AUDIT
-
/* ima_audit_setup - enable informational auditing messages */
static int __init ima_audit_setup(char *str)
{
@@ -29,7 +27,6 @@ static int __init ima_audit_setup(char *str)
return 1;
}
__setup("ima_audit=", ima_audit_setup);
-#endif
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index e1aa2b482dd2..38477c9c3415 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -367,20 +367,11 @@ int __init ima_fs_init(void)
return 0;
out:
- securityfs_remove(runtime_measurements_count);
- securityfs_remove(ascii_runtime_measurements);
- securityfs_remove(binary_runtime_measurements);
- securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
- return -1;
-}
-
-void __exit ima_fs_cleanup(void)
-{
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
+ return -1;
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 17f1f060306f..b5dfd534f13d 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -90,8 +90,3 @@ int __init ima_init(void)
return ima_fs_init();
}
-
-void __exit ima_cleanup(void)
-{
- ima_fs_cleanup();
-}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index b17be79b9cf2..be8294915cf7 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -54,6 +54,7 @@ static void ima_rdwr_violation_check(struct file *file)
fmode_t mode = file->f_mode;
int rc;
bool send_tomtou = false, send_writers = false;
+ unsigned char *pathname = NULL, *pathbuf = NULL;
if (!S_ISREG(inode->i_mode) || !ima_initialized)
return;
@@ -75,12 +76,27 @@ static void ima_rdwr_violation_check(struct file *file)
out:
mutex_unlock(&inode->i_mutex);
+ if (!send_tomtou && !send_writers)
+ return;
+
+ /* We will allow 11 spaces for ' (deleted)' to be appended */
+ pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ if (pathbuf) {
+ pathname = d_path(&file->f_path, pathbuf, PATH_MAX + 11);
+ if (IS_ERR(pathname))
+ pathname = NULL;
+ else if (strlen(pathname) > IMA_EVENT_NAME_LEN_MAX)
+ pathname = NULL;
+ }
if (send_tomtou)
- ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
- "ToMToU");
+ ima_add_violation(inode,
+ !pathname ? dentry->d_name.name : pathname,
+ "invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
- "open_writers");
+ ima_add_violation(inode,
+ !pathname ? dentry->d_name.name : pathname,
+ "invalid_pcr", "open_writers");
+ kfree(pathbuf);
}
static void ima_check_last_writer(struct integrity_iint_cache *iint,
@@ -123,6 +139,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
{
struct inode *inode = file->f_dentry->d_inode;
struct integrity_iint_cache *iint;
+ unsigned char *pathname = NULL, *pathbuf = NULL;
int rc = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
@@ -147,8 +164,21 @@ retry:
goto out;
rc = ima_collect_measurement(iint, file);
- if (!rc)
- ima_store_measurement(iint, file, filename);
+ if (rc != 0)
+ goto out;
+
+ if (function != BPRM_CHECK) {
+ /* We will allow 11 spaces for ' (deleted)' to be appended */
+ pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ if (pathbuf) {
+ pathname =
+ d_path(&file->f_path, pathbuf, PATH_MAX + 11);
+ if (IS_ERR(pathname))
+ pathname = NULL;
+ }
+ }
+ ima_store_measurement(iint, file, !pathname ? filename : pathname);
+ kfree(pathbuf);
out:
mutex_unlock(&iint->mutex);
return rc;
@@ -228,15 +258,11 @@ static int __init init_ima(void)
int error;
error = ima_init();
- ima_initialized = 1;
+ if (!error)
+ ima_initialized = 1;
return error;
}
-static void __exit cleanup_ima(void)
-{
- ima_cleanup();
-}
-
late_initcall(init_ima); /* Start IMA after the TPM is available */
MODULE_DESCRIPTION("Integrity Measurement Architecture");
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d8edff209bf3..1a9583008aae 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -63,6 +63,8 @@ static struct ima_measure_rule_entry default_rules[] = {
{.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = RAMFS_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = DEVPTS_SUPER_MAGIC,.flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE,.fsmagic = BINFMTFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
{.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
{.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC,
diff --git a/security/keys/compat.c b/security/keys/compat.c
index c92d42b021aa..1c261763f479 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -24,7 +24,7 @@
*
* If successful, 0 will be returned.
*/
-long compat_keyctl_instantiate_key_iov(
+static long compat_keyctl_instantiate_key_iov(
key_serial_t id,
const struct compat_iovec __user *_payload_iov,
unsigned ioc,
@@ -33,7 +33,7 @@ long compat_keyctl_instantiate_key_iov(
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
long ret;
- if (_payload_iov == 0 || ioc == 0)
+ if (!_payload_iov || !ioc)
goto no_payload;
ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 3dcbf86b0d31..22ff05269e3d 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -149,7 +149,7 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
#define KEY_LOOKUP_FOR_UNLINK 0x04
extern long join_session_keyring(const char *name);
-extern void key_change_session_keyring(struct task_work *twork);
+extern void key_change_session_keyring(struct callback_head *twork);
extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
@@ -242,7 +242,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
extern long keyctl_invalidate_key(key_serial_t);
extern long keyctl_instantiate_key_common(key_serial_t,
- const struct iovec __user *,
+ const struct iovec *,
unsigned, size_t, key_serial_t);
/*
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 0f5b3f027299..3364fbf46807 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1106,7 +1106,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
long ret;
- if (_payload_iov == 0 || ioc == 0)
+ if (!_payload_iov || !ioc)
goto no_payload;
ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
@@ -1456,7 +1456,7 @@ long keyctl_session_to_parent(void)
{
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
- struct task_work *newwork, *oldwork;
+ struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
struct cred *cred;
int ret;
@@ -1466,19 +1466,17 @@ long keyctl_session_to_parent(void)
return PTR_ERR(keyring_r);
ret = -ENOMEM;
- newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL);
- if (!newwork)
- goto error_keyring;
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
cred = cred_alloc_blank();
if (!cred)
- goto error_newwork;
+ goto error_keyring;
+ newwork = &cred->rcu;
cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
- init_task_work(newwork, key_change_session_keyring, cred);
+ init_task_work(newwork, key_change_session_keyring);
me = current;
rcu_read_lock();
@@ -1488,6 +1486,7 @@ long keyctl_session_to_parent(void)
oldwork = NULL;
parent = me->real_parent;
+ task_lock(parent);
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto unlock;
@@ -1531,20 +1530,15 @@ long keyctl_session_to_parent(void)
if (!ret)
newwork = NULL;
unlock:
+ task_unlock(parent);
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
- if (oldwork) {
- put_cred(oldwork->data);
- kfree(oldwork);
- }
- if (newwork) {
- put_cred(newwork->data);
- kfree(newwork);
- }
+ if (oldwork)
+ put_cred(container_of(oldwork, struct cred, rcu));
+ if (newwork)
+ put_cred(cred);
return ret;
-error_newwork:
- kfree(newwork);
error_keyring:
key_ref_put(keyring_r);
return ret;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 7445875f6818..81e7852d281d 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -751,6 +751,7 @@ static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
int __key_link_begin(struct key *keyring, const struct key_type *type,
const char *description, unsigned long *_prealloc)
__acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_sem)
{
struct keyring_list *klist, *nklist;
unsigned long prealloc;
@@ -960,6 +961,7 @@ void __key_link(struct key *keyring, struct key *key,
void __key_link_end(struct key *keyring, struct key_type *type,
unsigned long prealloc)
__releases(&keyring->sem)
+ __releases(&keyring_serialise_link_sem)
{
BUG_ON(type == NULL);
BUG_ON(type->name == NULL);
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 4ad54eea1ea4..54339cfd6734 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -834,12 +834,11 @@ error:
* Replace a process's session keyring on behalf of one of its children when
* the target process is about to resume userspace execution.
*/
-void key_change_session_keyring(struct task_work *twork)
+void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
- struct cred *new = twork->data;
+ struct cred *new = container_of(twork, struct cred, rcu);
- kfree(twork);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
diff --git a/security/security.c b/security/security.c
index 3efc9b12aef4..860aeb349cb3 100644
--- a/security/security.c
+++ b/security/security.c
@@ -23,6 +23,7 @@
#include <linux/mman.h>
#include <linux/mount.h>
#include <linux/personality.h>
+#include <linux/backing-dev.h>
#include <net/flow.h>
#define MAX_LSM_EVM_XATTR 2
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 372ec6502aa8..689fe2d22165 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2157,8 +2157,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
get_file(devnull);
} else {
devnull = dentry_open(
- dget(selinux_null),
- mntget(selinuxfs_mount),
+ &selinux_null,
O_RDWR, cred);
if (IS_ERR(devnull)) {
devnull = NULL;
@@ -2717,7 +2716,7 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- if (ia_valid & ATTR_SIZE)
+ if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
av |= FILE__OPEN;
return dentry_has_perm(cred, dentry, av);
@@ -5763,21 +5762,21 @@ static struct nf_hook_ops selinux_ipv4_ops[] = {
{
.hook = selinux_ipv4_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv4_forward,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
{
.hook = selinux_ipv4_output,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
}
@@ -5789,14 +5788,14 @@ static struct nf_hook_ops selinux_ipv6_ops[] = {
{
.hook = selinux_ipv6_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv6_forward,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SELINUX_FIRST,
}
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index b8c53723e09b..df2de54a958d 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -145,7 +145,9 @@ struct security_class_mapping secclass_map[] = {
"node_bind", "name_connect", NULL } },
{ "memprotect", { "mmap_zero", NULL } },
{ "peer", { "recv", NULL } },
- { "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
+ { "capability2",
+ { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+ NULL } },
{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
{ "tun_socket",
{ COMMON_SOCK_PERMS, NULL } },
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index dde2005407aa..6d3885165d14 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -221,7 +221,7 @@ extern void selinux_status_update_policyload(int seqno);
extern void selinux_complete_init(void);
extern int selinux_disable(void);
extern void exit_sel_fs(void);
-extern struct dentry *selinux_null;
+extern struct path selinux_null;
extern struct vfsmount *selinuxfs_mount;
extern void selnl_notify_setenforce(int val);
extern void selnl_notify_policyload(u32 seqno);
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 161e01a6c7ef..8a77725423e0 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -19,6 +19,7 @@
#include <linux/netlink.h>
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include <net/netlink.h>
#include "security.h"
@@ -47,7 +48,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
{
switch (msgtype) {
case SELNL_MSG_SETENFORCE: {
- struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->val = *((int *)data);
@@ -55,7 +56,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
}
case SELNL_MSG_POLICYLOAD: {
- struct selnl_msg_policyload *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_policyload *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->seqno = *((u32 *)data);
@@ -81,7 +82,9 @@ static void selnl_notify(int msgtype, void *data)
goto oom;
tmp = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, msgtype, len);
+ nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
+ if (!nlh)
+ goto out_kfree_skb;
selnl_add_payload(nlh, len, msgtype, data);
nlh->nlmsg_len = skb->tail - tmp;
NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
@@ -89,7 +92,7 @@ static void selnl_notify(int msgtype, void *data)
out:
return;
-nlmsg_failure:
+out_kfree_skb:
kfree_skb(skb);
oom:
printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
@@ -108,8 +111,12 @@ void selnl_notify_policyload(u32 seqno)
static int __init selnl_init(void)
{
+ struct netlink_kernel_cfg cfg = {
+ .groups = SELNLGRP_MAX,
+ };
+
selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
- SELNLGRP_MAX, NULL, NULL, THIS_MODULE);
+ THIS_MODULE, &cfg);
if (selnl == NULL)
panic("SELinux: Cannot create netlink socket.");
netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 3ad290251288..298e695d6822 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1297,7 +1297,7 @@ out:
#define NULL_FILE_NAME "null"
-struct dentry *selinux_null;
+struct path selinux_null;
static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
@@ -1838,7 +1838,7 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
d_add(dentry, inode);
- selinux_null = dentry;
+ selinux_null.dentry = dentry;
dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
if (IS_ERR(dentry)) {
@@ -1912,7 +1912,7 @@ static int __init init_sel_fs(void)
return err;
}
- selinuxfs_mount = kern_mount(&sel_fs_type);
+ selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
if (IS_ERR(selinuxfs_mount)) {
printk(KERN_ERR "selinuxfs: could not mount!\n");
err = PTR_ERR(selinuxfs_mount);
diff --git a/security/smack/smack.h b/security/smack/smack.h
index cc361b8f3d13..99b36124f712 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -43,7 +43,6 @@ struct superblock_smack {
char *smk_hat;
char *smk_default;
int smk_initialized;
- spinlock_t smk_sblock; /* for initialization */
};
struct socket_smack {
@@ -284,6 +283,19 @@ static inline char *smk_of_current(void)
}
/*
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ */
+static inline int smack_privileged(int cap)
+{
+ if (!capable(cap))
+ return 0;
+ if (smack_onlycap == NULL || smack_onlycap == smk_of_current())
+ return 1;
+ return 0;
+}
+
+/*
* logging functions
*/
#define SMACK_AUDIT_DENIED 0x1
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 9f3705e92712..db14689a21e0 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -220,14 +220,9 @@ int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
}
/*
- * Return if a specific label has been designated as the
- * only one that gets privilege and current does not
- * have that label.
+ * Allow for priviliged to override policy.
*/
- if (smack_onlycap != NULL && smack_onlycap != sp)
- goto out_audit;
-
- if (capable(CAP_MAC_OVERRIDE))
+ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
rc = 0;
out_audit:
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index ee0bb5735f35..8221514cc997 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -217,7 +217,7 @@ static int smack_syslog(int typefrom_file)
int rc = 0;
char *sp = smk_of_current();
- if (capable(CAP_MAC_OVERRIDE))
+ if (smack_privileged(CAP_MAC_OVERRIDE))
return 0;
if (sp != smack_known_floor.smk_known)
@@ -251,7 +251,6 @@ static int smack_sb_alloc_security(struct super_block *sb)
sbsp->smk_floor = smack_known_floor.smk_known;
sbsp->smk_hat = smack_known_hat.smk_known;
sbsp->smk_initialized = 0;
- spin_lock_init(&sbsp->smk_sblock);
sb->s_security = sbsp;
@@ -332,13 +331,10 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
char *commap;
char *nsp;
- spin_lock(&sp->smk_sblock);
- if (sp->smk_initialized != 0) {
- spin_unlock(&sp->smk_sblock);
+ if (sp->smk_initialized != 0)
return 0;
- }
+
sp->smk_initialized = 1;
- spin_unlock(&sp->smk_sblock);
for (op = data; op != NULL; op = commap) {
commap = strchr(op, ',');
@@ -825,7 +821,7 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
/*
* check label validity here so import wont fail on
@@ -835,7 +831,7 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
smk_import(value, size) == NULL)
rc = -EINVAL;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
if (size != TRANS_TRUE_SIZE ||
strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
@@ -931,7 +927,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
strcmp(name, XATTR_NAME_SMACKMMAP)) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
} else
rc = cap_inode_removexattr(dentry, name);
@@ -1720,7 +1716,8 @@ static int smack_task_wait(struct task_struct *p)
* state into account in the decision as well as
* the smack value.
*/
- if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE))
+ if (smack_privileged(CAP_MAC_OVERRIDE) ||
+ has_capability(p, CAP_MAC_OVERRIDE))
rc = 0;
/* we log only if we didn't get overriden */
out_log:
@@ -2721,7 +2718,7 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (p != current)
return -EPERM;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
@@ -2784,7 +2781,7 @@ static int smack_unix_stream_connect(struct sock *sock,
smk_ad_setfield_u_net_sk(&ad, other);
#endif
- if (!capable(CAP_MAC_OVERRIDE))
+ if (!smack_privileged(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
/*
@@ -2820,7 +2817,7 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
smk_ad_setfield_u_net_sk(&ad, other->sk);
#endif
- if (!capable(CAP_MAC_OVERRIDE))
+ if (!smack_privileged(CAP_MAC_OVERRIDE))
rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
return rc;
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 1810c9a4ed48..d31e6d957c21 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -215,28 +215,27 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
* @access: access string
* @rule: Smack rule
* @import: if non-zero, import labels
+ * @len: label length limit
*
* Returns 0 on success, -1 on failure
*/
static int smk_fill_rule(const char *subject, const char *object,
const char *access, struct smack_rule *rule,
- int import)
+ int import, int len)
{
- int rc = -1;
- int done;
const char *cp;
struct smack_known *skp;
if (import) {
- rule->smk_subject = smk_import(subject, 0);
+ rule->smk_subject = smk_import(subject, len);
if (rule->smk_subject == NULL)
return -1;
- rule->smk_object = smk_import(object, 0);
+ rule->smk_object = smk_import(object, len);
if (rule->smk_object == NULL)
return -1;
} else {
- cp = smk_parse_smack(subject, 0);
+ cp = smk_parse_smack(subject, len);
if (cp == NULL)
return -1;
skp = smk_find_entry(cp);
@@ -245,7 +244,7 @@ static int smk_fill_rule(const char *subject, const char *object,
return -1;
rule->smk_subject = skp->smk_known;
- cp = smk_parse_smack(object, 0);
+ cp = smk_parse_smack(object, len);
if (cp == NULL)
return -1;
skp = smk_find_entry(cp);
@@ -257,7 +256,7 @@ static int smk_fill_rule(const char *subject, const char *object,
rule->smk_access = 0;
- for (cp = access, done = 0; *cp && !done; cp++) {
+ for (cp = access; *cp != '\0'; cp++) {
switch (*cp) {
case '-':
break;
@@ -282,13 +281,11 @@ static int smk_fill_rule(const char *subject, const char *object,
rule->smk_access |= MAY_TRANSMUTE;
break;
default:
- done = 1;
- break;
+ return 0;
}
}
- rc = 0;
- return rc;
+ return 0;
}
/**
@@ -304,7 +301,8 @@ static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
int rc;
rc = smk_fill_rule(data, data + SMK_LABELLEN,
- data + SMK_LABELLEN + SMK_LABELLEN, rule, import);
+ data + SMK_LABELLEN + SMK_LABELLEN, rule, import,
+ SMK_LABELLEN);
return rc;
}
@@ -340,7 +338,7 @@ static int smk_parse_long_rule(const char *data, struct smack_rule *rule,
goto free_out_o;
if (sscanf(data, "%s %s %s", subject, object, access) == 3)
- rc = smk_fill_rule(subject, object, access, rule, import);
+ rc = smk_fill_rule(subject, object, access, rule, import, 0);
kfree(access);
free_out_o:
@@ -520,6 +518,9 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
if (strlen(srp->smk_subject) >= max || strlen(srp->smk_object) >= max)
return;
+ if (srp->smk_access == 0)
+ return;
+
seq_printf(s, "%s %s", srp->smk_subject, srp->smk_object);
seq_putc(s, ' ');
@@ -534,8 +535,6 @@ static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
seq_putc(s, 'a');
if (srp->smk_access & MAY_TRANSMUTE)
seq_putc(s, 't');
- if (srp->smk_access == 0)
- seq_putc(s, '-');
seq_putc(s, '\n');
}
@@ -595,13 +594,12 @@ static int smk_open_load(struct inode *inode, struct file *file)
static ssize_t smk_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
-
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
@@ -787,7 +785,7 @@ static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
@@ -1090,7 +1088,7 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
* "<addr/mask, as a.b.c.d/e><space><label>"
* "<addr, as a.b.c.d><space><label>"
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
@@ -1267,7 +1265,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf,
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1334,7 +1332,7 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf,
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1412,7 +1410,7 @@ static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1503,7 +1501,7 @@ static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
char *data;
int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
data = kzalloc(count + 1, GFP_KERNEL);
@@ -1586,7 +1584,7 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
char *sp = smk_of_task(current->cred->security);
int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
/*
@@ -1664,7 +1662,7 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf,
char temp[32];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1885,7 +1883,7 @@ static ssize_t smk_write_load2(struct file *file, const char __user *buf,
/*
* Must have privilege.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
@@ -2051,7 +2049,6 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
}
root_inode = sb->s_root->d_inode;
- root_inode->i_security = new_inode_smack(smack_known_floor.smk_known);
return 0;
}
diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
index 270790d384e2..4cedc6950d72 100644
--- a/sound/aoa/codecs/onyx.c
+++ b/sound/aoa/codecs/onyx.c
@@ -997,45 +997,10 @@ static void onyx_exit_codec(struct aoa_codec *codec)
onyx->codec.soundbus_dev->detach_codec(onyx->codec.soundbus_dev, onyx);
}
-static int onyx_create(struct i2c_adapter *adapter,
- struct device_node *node,
- int addr)
-{
- struct i2c_board_info info;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "aoa_codec_onyx", I2C_NAME_SIZE);
- info.addr = addr;
- info.platform_data = node;
- client = i2c_new_device(adapter, &info);
- if (!client)
- return -ENODEV;
-
- /*
- * We know the driver is already loaded, so the device should be
- * already bound. If not it means binding failed, which suggests
- * the device doesn't really exist and should be deleted.
- * Ideally this would be replaced by better checks _before_
- * instantiating the device.
- */
- if (!client->driver) {
- i2c_unregister_device(client);
- return -ENODEV;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &client->driver->clients);
- return 0;
-}
-
static int onyx_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct device_node *node = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
struct onyx *onyx;
u8 dummy;
@@ -1071,40 +1036,6 @@ static int onyx_i2c_probe(struct i2c_client *client,
return -ENODEV;
}
-static int onyx_i2c_attach(struct i2c_adapter *adapter)
-{
- struct device_node *busnode, *dev = NULL;
- struct pmac_i2c_bus *bus;
-
- bus = pmac_i2c_adapter_to_bus(adapter);
- if (bus == NULL)
- return -ENODEV;
- busnode = pmac_i2c_get_bus_node(bus);
-
- while ((dev = of_get_next_child(busnode, dev)) != NULL) {
- if (of_device_is_compatible(dev, "pcm3052")) {
- const u32 *addr;
- printk(KERN_DEBUG PFX "found pcm3052\n");
- addr = of_get_property(dev, "reg", NULL);
- if (!addr)
- return -ENODEV;
- return onyx_create(adapter, dev, (*addr)>>1);
- }
- }
-
- /* if that didn't work, try desperate mode for older
- * machines that have stuff missing from the device tree */
-
- if (!of_device_is_compatible(busnode, "k2-i2c"))
- return -ENODEV;
-
- printk(KERN_DEBUG PFX "found k2-i2c, checking if onyx chip is on it\n");
- /* probe both possible addresses for the onyx chip */
- if (onyx_create(adapter, NULL, 0x46) == 0)
- return 0;
- return onyx_create(adapter, NULL, 0x47);
-}
-
static int onyx_i2c_remove(struct i2c_client *client)
{
struct onyx *onyx = i2c_get_clientdata(client);
@@ -1117,16 +1048,16 @@ static int onyx_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id onyx_i2c_id[] = {
- { "aoa_codec_onyx", 0 },
+ { "MAC,pcm3052", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c,onyx_i2c_id);
static struct i2c_driver onyx_driver = {
.driver = {
.name = "aoa_codec_onyx",
.owner = THIS_MODULE,
},
- .attach_adapter = onyx_i2c_attach,
.probe = onyx_i2c_probe,
.remove = onyx_i2c_remove,
.id_table = onyx_i2c_id,
diff --git a/sound/aoa/codecs/tas.c b/sound/aoa/codecs/tas.c
index 8e63d1f35ce1..c491ae0f749c 100644
--- a/sound/aoa/codecs/tas.c
+++ b/sound/aoa/codecs/tas.c
@@ -883,43 +883,10 @@ static void tas_exit_codec(struct aoa_codec *codec)
}
-static int tas_create(struct i2c_adapter *adapter,
- struct device_node *node,
- int addr)
-{
- struct i2c_board_info info;
- struct i2c_client *client;
-
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "aoa_codec_tas", I2C_NAME_SIZE);
- info.addr = addr;
- info.platform_data = node;
-
- client = i2c_new_device(adapter, &info);
- if (!client)
- return -ENODEV;
- /*
- * We know the driver is already loaded, so the device should be
- * already bound. If not it means binding failed, and then there
- * is no point in keeping the device instantiated.
- */
- if (!client->driver) {
- i2c_unregister_device(client);
- return -ENODEV;
- }
-
- /*
- * Let i2c-core delete that device on driver removal.
- * This is safe because i2c-core holds the core_lock mutex for us.
- */
- list_add_tail(&client->detected, &client->driver->clients);
- return 0;
-}
-
static int tas_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct device_node *node = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
struct tas *tas;
tas = kzalloc(sizeof(struct tas), GFP_KERNEL);
@@ -953,47 +920,6 @@ static int tas_i2c_probe(struct i2c_client *client,
return -EINVAL;
}
-static int tas_i2c_attach(struct i2c_adapter *adapter)
-{
- struct device_node *busnode, *dev = NULL;
- struct pmac_i2c_bus *bus;
-
- bus = pmac_i2c_adapter_to_bus(adapter);
- if (bus == NULL)
- return -ENODEV;
- busnode = pmac_i2c_get_bus_node(bus);
-
- while ((dev = of_get_next_child(busnode, dev)) != NULL) {
- if (of_device_is_compatible(dev, "tas3004")) {
- const u32 *addr;
- printk(KERN_DEBUG PFX "found tas3004\n");
- addr = of_get_property(dev, "reg", NULL);
- if (!addr)
- continue;
- return tas_create(adapter, dev, ((*addr) >> 1) & 0x7f);
- }
- /* older machines have no 'codec' node with a 'compatible'
- * property that says 'tas3004', they just have a 'deq'
- * node without any such property... */
- if (strcmp(dev->name, "deq") == 0) {
- const u32 *_addr;
- u32 addr;
- printk(KERN_DEBUG PFX "found 'deq' node\n");
- _addr = of_get_property(dev, "i2c-address", NULL);
- if (!_addr)
- continue;
- addr = ((*_addr) >> 1) & 0x7f;
- /* now, if the address doesn't match any of the two
- * that a tas3004 can have, we cannot handle this.
- * I doubt it ever happens but hey. */
- if (addr != 0x34 && addr != 0x35)
- continue;
- return tas_create(adapter, dev, addr);
- }
- }
- return -ENODEV;
-}
-
static int tas_i2c_remove(struct i2c_client *client)
{
struct tas *tas = i2c_get_clientdata(client);
@@ -1011,16 +937,16 @@ static int tas_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id tas_i2c_id[] = {
- { "aoa_codec_tas", 0 },
+ { "MAC,tas3004", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c,tas_i2c_id);
static struct i2c_driver tas_driver = {
.driver = {
.name = "aoa_codec_tas",
.owner = THIS_MODULE,
},
- .attach_adapter = tas_i2c_attach,
.probe = tas_i2c_probe,
.remove = tas_i2c_remove,
.id_table = tas_i2c_id,
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index afef72c4f0d3..0d7b25e81643 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -108,7 +108,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = {
#ifdef CONFIG_PM
-static int pxa2xx_ac97_do_suspend(struct snd_card *card, pm_message_t state)
+static int pxa2xx_ac97_do_suspend(struct snd_card *card)
{
pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data;
@@ -144,7 +144,7 @@ static int pxa2xx_ac97_suspend(struct device *dev)
int ret = 0;
if (card)
- ret = pxa2xx_ac97_do_suspend(card, PMSG_SUSPEND);
+ ret = pxa2xx_ac97_do_suspend(card);
return ret;
}
@@ -160,10 +160,7 @@ static int pxa2xx_ac97_resume(struct device *dev)
return ret;
}
-static const struct dev_pm_ops pxa2xx_ac97_pm_ops = {
- .suspend = pxa2xx_ac97_suspend,
- .resume = pxa2xx_ac97_resume,
-};
+static SIMPLE_DEV_PM_OPS(pxa2xx_ac97_pm_ops, pxa2xx_ac97_suspend, pxa2xx_ac97_resume);
#endif
static int __devinit pxa2xx_ac97_probe(struct platform_device *dev)
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index f7c2bb08055d..eb4ceb71123e 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -535,9 +535,9 @@ out_put_pclk:
}
#ifdef CONFIG_PM
-static int atmel_abdac_suspend(struct platform_device *pdev, pm_message_t msg)
+static int atmel_abdac_suspend(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_abdac *dac = card->private_data;
dw_dma_cyclic_stop(dac->dma.chan);
@@ -547,9 +547,9 @@ static int atmel_abdac_suspend(struct platform_device *pdev, pm_message_t msg)
return 0;
}
-static int atmel_abdac_resume(struct platform_device *pdev)
+static int atmel_abdac_resume(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_abdac *dac = card->private_data;
clk_enable(dac->pclk);
@@ -559,9 +559,11 @@ static int atmel_abdac_resume(struct platform_device *pdev)
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(atmel_abdac_pm, atmel_abdac_suspend, atmel_abdac_resume);
+#define ATMEL_ABDAC_PM_OPS &atmel_abdac_pm
#else
-#define atmel_abdac_suspend NULL
-#define atmel_abdac_resume NULL
+#define ATMEL_ABDAC_PM_OPS NULL
#endif
static int __devexit atmel_abdac_remove(struct platform_device *pdev)
@@ -589,9 +591,9 @@ static struct platform_driver atmel_abdac_driver = {
.remove = __devexit_p(atmel_abdac_remove),
.driver = {
.name = "atmel_abdac",
+ .owner = THIS_MODULE,
+ .pm = ATMEL_ABDAC_PM_OPS,
},
- .suspend = atmel_abdac_suspend,
- .resume = atmel_abdac_resume,
};
static int __init atmel_abdac_init(void)
diff --git a/sound/atmel/ac97c.c b/sound/atmel/ac97c.c
index f5ded640b395..bf47025bdf45 100644
--- a/sound/atmel/ac97c.c
+++ b/sound/atmel/ac97c.c
@@ -1135,9 +1135,9 @@ err_snd_card_new:
}
#ifdef CONFIG_PM
-static int atmel_ac97c_suspend(struct platform_device *pdev, pm_message_t msg)
+static int atmel_ac97c_suspend(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
if (cpu_is_at32ap7000()) {
@@ -1151,9 +1151,9 @@ static int atmel_ac97c_suspend(struct platform_device *pdev, pm_message_t msg)
return 0;
}
-static int atmel_ac97c_resume(struct platform_device *pdev)
+static int atmel_ac97c_resume(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct atmel_ac97c *chip = card->private_data;
clk_enable(chip->pclk);
@@ -1165,9 +1165,11 @@ static int atmel_ac97c_resume(struct platform_device *pdev)
}
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(atmel_ac97c_pm, atmel_ac97c_suspend, atmel_ac97c_resume);
+#define ATMEL_AC97C_PM_OPS &atmel_ac97c_pm
#else
-#define atmel_ac97c_suspend NULL
-#define atmel_ac97c_resume NULL
+#define ATMEL_AC97C_PM_OPS NULL
#endif
static int __devexit atmel_ac97c_remove(struct platform_device *pdev)
@@ -1210,9 +1212,9 @@ static struct platform_driver atmel_ac97c_driver = {
.remove = __devexit_p(atmel_ac97c_remove),
.driver = {
.name = "atmel_ac97c",
+ .owner = THIS_MODULE,
+ .pm = ATMEL_AC97C_PM_OPS,
},
- .suspend = atmel_ac97c_suspend,
- .resume = atmel_ac97c_resume,
};
static int __init atmel_ac97c_init(void)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 8f312fa6c282..7ae671923393 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1250,10 +1250,10 @@ static int snd_pcm_hw_rule_list(struct snd_pcm_hw_params *params,
int snd_pcm_hw_constraint_list(struct snd_pcm_runtime *runtime,
unsigned int cond,
snd_pcm_hw_param_t var,
- struct snd_pcm_hw_constraint_list *l)
+ const struct snd_pcm_hw_constraint_list *l)
{
return snd_pcm_hw_rule_add(runtime, cond, var,
- snd_pcm_hw_rule_list, l,
+ snd_pcm_hw_rule_list, (void *)l,
var, -1);
}
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index 9c9eff9afbac..d4fc1bfbe457 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -488,3 +488,21 @@ unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate)
return SNDRV_PCM_RATE_KNOT;
}
EXPORT_SYMBOL(snd_pcm_rate_to_rate_bit);
+
+/**
+ * snd_pcm_rate_bit_to_rate - converts SNDRV_PCM_RATE_xxx bit to sample rate
+ * @rate_bit: the rate bit to convert
+ *
+ * Returns the sample rate that corresponds to the given SNDRV_PCM_RATE_xxx flag
+ * or 0 for an unknown rate bit
+ */
+unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit)
+{
+ unsigned int i;
+
+ for (i = 0; i < snd_pcm_known_rates.count; i++)
+ if ((1u << i) == rate_bit)
+ return snd_pcm_known_rates.list[i];
+ return 0;
+}
+EXPORT_SYMBOL(snd_pcm_rate_bit_to_rate);
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 8b5c36f4d303..1128b35b2b05 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -1177,10 +1177,9 @@ static int __devexit loopback_remove(struct platform_device *devptr)
}
#ifdef CONFIG_PM
-static int loopback_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int loopback_suspend(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct loopback *loopback = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -1190,13 +1189,18 @@ static int loopback_suspend(struct platform_device *pdev,
return 0;
}
-static int loopback_resume(struct platform_device *pdev)
+static int loopback_resume(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(loopback_pm, loopback_suspend, loopback_resume);
+#define LOOPBACK_PM_OPS &loopback_pm
+#else
+#define LOOPBACK_PM_OPS NULL
#endif
#define SND_LOOPBACK_DRIVER "snd_aloop"
@@ -1204,12 +1208,10 @@ static int loopback_resume(struct platform_device *pdev)
static struct platform_driver loopback_driver = {
.probe = loopback_probe,
.remove = __devexit_p(loopback_remove),
-#ifdef CONFIG_PM
- .suspend = loopback_suspend,
- .resume = loopback_resume,
-#endif
.driver = {
- .name = SND_LOOPBACK_DRIVER
+ .name = SND_LOOPBACK_DRIVER,
+ .owner = THIS_MODULE,
+ .pm = LOOPBACK_PM_OPS,
},
};
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index ad9434fd6370..f7d3bfc6bca8 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1065,9 +1065,9 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr)
}
#ifdef CONFIG_PM
-static int snd_dummy_suspend(struct platform_device *pdev, pm_message_t state)
+static int snd_dummy_suspend(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
struct snd_dummy *dummy = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -1075,13 +1075,18 @@ static int snd_dummy_suspend(struct platform_device *pdev, pm_message_t state)
return 0;
}
-static int snd_dummy_resume(struct platform_device *pdev)
+static int snd_dummy_resume(struct device *pdev)
{
- struct snd_card *card = platform_get_drvdata(pdev);
+ struct snd_card *card = dev_get_drvdata(pdev);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_dummy_pm, snd_dummy_suspend, snd_dummy_resume);
+#define SND_DUMMY_PM_OPS &snd_dummy_pm
+#else
+#define SND_DUMMY_PM_OPS NULL
#endif
#define SND_DUMMY_DRIVER "snd_dummy"
@@ -1089,12 +1094,10 @@ static int snd_dummy_resume(struct platform_device *pdev)
static struct platform_driver snd_dummy_driver = {
.probe = snd_dummy_probe,
.remove = __devexit_p(snd_dummy_remove),
-#ifdef CONFIG_PM
- .suspend = snd_dummy_suspend,
- .resume = snd_dummy_resume,
-#endif
.driver = {
- .name = SND_DUMMY_DRIVER
+ .name = SND_DUMMY_DRIVER,
+ .owner = THIS_MODULE,
+ .pm = SND_DUMMY_PM_OPS,
},
};
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c
index 86f5fbc2da72..bc03a2046c9c 100644
--- a/sound/drivers/mpu401/mpu401.c
+++ b/sound/drivers/mpu401/mpu401.c
@@ -139,7 +139,8 @@ static struct platform_driver snd_mpu401_driver = {
.probe = snd_mpu401_probe,
.remove = __devexit_p(snd_mpu401_remove),
.driver = {
- .name = SND_MPU401_DRIVER
+ .name = SND_MPU401_DRIVER,
+ .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/mtpav.c b/sound/drivers/mtpav.c
index 76930793fb69..cad73af3860c 100644
--- a/sound/drivers/mtpav.c
+++ b/sound/drivers/mtpav.c
@@ -759,7 +759,8 @@ static struct platform_driver snd_mtpav_driver = {
.probe = snd_mtpav_probe,
.remove = __devexit_p(snd_mtpav_remove),
.driver = {
- .name = SND_MTPAV_DRIVER
+ .name = SND_MTPAV_DRIVER,
+ .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
index 621e60e2029f..2d5514b0a290 100644
--- a/sound/drivers/mts64.c
+++ b/sound/drivers/mts64.c
@@ -1040,7 +1040,8 @@ static struct platform_driver snd_mts64_driver = {
.probe = snd_mts64_probe,
.remove = __devexit_p(snd_mts64_remove),
.driver = {
- .name = PLATFORM_DRIVER
+ .name = PLATFORM_DRIVER,
+ .owner = THIS_MODULE,
}
};
diff --git a/sound/drivers/pcsp/pcsp.c b/sound/drivers/pcsp/pcsp.c
index 99704e6a2e26..6ca59fc6dcb9 100644
--- a/sound/drivers/pcsp/pcsp.c
+++ b/sound/drivers/pcsp/pcsp.c
@@ -200,15 +200,18 @@ static void pcsp_stop_beep(struct snd_pcsp *chip)
}
#ifdef CONFIG_PM
-static int pcsp_suspend(struct platform_device *dev, pm_message_t state)
+static int pcsp_suspend(struct device *dev)
{
- struct snd_pcsp *chip = platform_get_drvdata(dev);
+ struct snd_pcsp *chip = dev_get_drvdata(dev);
pcsp_stop_beep(chip);
snd_pcm_suspend_all(chip->pcm);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL);
+#define PCSP_PM_OPS &pcsp_pm
#else
-#define pcsp_suspend NULL
+#define PCSP_PM_OPS NULL
#endif /* CONFIG_PM */
static void pcsp_shutdown(struct platform_device *dev)
@@ -221,10 +224,10 @@ static struct platform_driver pcsp_platform_driver = {
.driver = {
.name = "pcspkr",
.owner = THIS_MODULE,
+ .pm = PCSP_PM_OPS,
},
.probe = pcsp_probe,
.remove = __devexit_p(pcsp_remove),
- .suspend = pcsp_suspend,
.shutdown = pcsp_shutdown,
};
diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
index 3e32bd3d95d9..8364855ed14f 100644
--- a/sound/drivers/portman2x4.c
+++ b/sound/drivers/portman2x4.c
@@ -829,7 +829,8 @@ static struct platform_driver snd_portman_driver = {
.probe = snd_portman_probe,
.remove = __devexit_p(snd_portman_remove),
.driver = {
- .name = PLATFORM_DRIVER
+ .name = PLATFORM_DRIVER,
+ .owner = THIS_MODULE,
}
};
diff --git a/sound/drivers/serial-u16550.c b/sound/drivers/serial-u16550.c
index b2d0e8e49bed..86700671d1ac 100644
--- a/sound/drivers/serial-u16550.c
+++ b/sound/drivers/serial-u16550.c
@@ -995,7 +995,8 @@ static struct platform_driver snd_serial_driver = {
.probe = snd_serial_probe,
.remove = __devexit_p( snd_serial_remove),
.driver = {
- .name = SND_SERIAL_DRIVER
+ .name = SND_SERIAL_DRIVER,
+ .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/virmidi.c b/sound/drivers/virmidi.c
index 9d97478a18b3..d7d514df9058 100644
--- a/sound/drivers/virmidi.c
+++ b/sound/drivers/virmidi.c
@@ -142,7 +142,8 @@ static struct platform_driver snd_virmidi_driver = {
.probe = snd_virmidi_probe,
.remove = __devexit_p(snd_virmidi_remove),
.driver = {
- .name = SND_VIRMIDI_DRIVER
+ .name = SND_VIRMIDI_DRIVER,
+ .owner = THIS_MODULE,
},
};
diff --git a/sound/drivers/vx/vx_core.c b/sound/drivers/vx/vx_core.c
index b8e515999bc2..de5055a3b0d0 100644
--- a/sound/drivers/vx/vx_core.c
+++ b/sound/drivers/vx/vx_core.c
@@ -725,7 +725,7 @@ EXPORT_SYMBOL(snd_vx_dsp_load);
/*
* suspend
*/
-int snd_vx_suspend(struct vx_core *chip, pm_message_t state)
+int snd_vx_suspend(struct vx_core *chip)
{
unsigned int i;
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index d7ccf28bd66a..f8fbe22515c9 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -135,10 +135,9 @@ struct snd_opti9xx {
unsigned long mc_base_size;
#ifdef OPTi93X
unsigned long mc_indir_index;
- unsigned long mc_indir_size;
struct resource *res_mc_indir;
- struct snd_wss *codec;
#endif /* OPTi93X */
+ struct snd_wss *codec;
unsigned long pwd_reg;
spinlock_t lock;
@@ -245,10 +244,8 @@ static int __devinit snd_opti9xx_init(struct snd_opti9xx *chip,
case OPTi9XX_HW_82C931:
case OPTi9XX_HW_82C933:
chip->mc_base = (hardware == OPTi9XX_HW_82C930) ? 0xf8f : 0xf8d;
- if (!chip->mc_indir_index) {
+ if (!chip->mc_indir_index)
chip->mc_indir_index = 0xe0e;
- chip->mc_indir_size = 2;
- }
chip->password = 0xe4;
chip->pwd_reg = 0;
break;
@@ -351,7 +348,7 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
(snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
-static int __devinit snd_opti9xx_configure(struct snd_opti9xx *chip,
+static int snd_opti9xx_configure(struct snd_opti9xx *chip,
long port,
int irq, int dma1, int dma2,
long mpu_port, int mpu_irq)
@@ -403,7 +400,9 @@ static int __devinit snd_opti9xx_configure(struct snd_opti9xx *chip,
#else /* OPTi93X */
case OPTi9XX_HW_82C931:
- case OPTi9XX_HW_82C933:
+ /* disable 3D sound (set GPIO1 as output, low) */
+ snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(20), 0x04, 0x0c);
+ case OPTi9XX_HW_82C933: /* FALL THROUGH */
/*
* The BTC 1817DW has QS1000 wavetable which is connected
* to the serial digital input of the OPTI931.
@@ -696,8 +695,7 @@ static int __devinit snd_opti9xx_read_check(struct snd_opti9xx *chip)
if (value == snd_opti9xx_read(chip, OPTi9XX_MC_REG(1)))
return 0;
#else /* OPTi93X */
- chip->res_mc_indir = request_region(chip->mc_indir_index,
- chip->mc_indir_size,
+ chip->res_mc_indir = request_region(chip->mc_indir_index, 2,
"OPTi93x MC");
if (chip->res_mc_indir == NULL)
return -EBUSY;
@@ -770,8 +768,9 @@ static int __devinit snd_card_opti9xx_pnp(struct snd_opti9xx *chip,
#ifdef OPTi93X
port = pnp_port_start(pdev, 0) - 4;
fm_port = pnp_port_start(pdev, 1) + 8;
- chip->mc_indir_index = pnp_port_start(pdev, 3) + 2;
- chip->mc_indir_size = pnp_port_len(pdev, 3) - 2;
+ /* adjust mc_indir_index - some cards report it at 0xe?d,
+ other at 0xe?c but it really is always at 0xe?e */
+ chip->mc_indir_index = (pnp_port_start(pdev, 3) & ~0xf) | 0xe;
#else
devmc = pnp_request_card_device(card, pid->devs[2].id, NULL);
if (devmc == NULL)
@@ -871,9 +870,7 @@ static int __devinit snd_opti9xx_probe(struct snd_card *card)
&codec);
if (error < 0)
return error;
-#ifdef OPTi93X
chip->codec = codec;
-#endif
error = snd_wss_pcm(codec, 0, &pcm);
if (error < 0)
return error;
@@ -1054,11 +1051,55 @@ static int __devexit snd_opti9xx_isa_remove(struct device *devptr,
return 0;
}
+#ifdef CONFIG_PM
+static int snd_opti9xx_suspend(struct snd_card *card)
+{
+ struct snd_opti9xx *chip = card->private_data;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ chip->codec->suspend(chip->codec);
+ return 0;
+}
+
+static int snd_opti9xx_resume(struct snd_card *card)
+{
+ struct snd_opti9xx *chip = card->private_data;
+ int error, xdma2;
+#if defined(CS4231) || defined(OPTi93X)
+ xdma2 = dma2;
+#else
+ xdma2 = -1;
+#endif
+
+ error = snd_opti9xx_configure(chip, port, irq, dma1, xdma2,
+ mpu_port, mpu_irq);
+ if (error)
+ return error;
+ chip->codec->resume(chip->codec);
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+
+static int snd_opti9xx_isa_suspend(struct device *dev, unsigned int n,
+ pm_message_t state)
+{
+ return snd_opti9xx_suspend(dev_get_drvdata(dev));
+}
+
+static int snd_opti9xx_isa_resume(struct device *dev, unsigned int n)
+{
+ return snd_opti9xx_resume(dev_get_drvdata(dev));
+}
+#endif
+
static struct isa_driver snd_opti9xx_driver = {
.match = snd_opti9xx_isa_match,
.probe = snd_opti9xx_isa_probe,
.remove = __devexit_p(snd_opti9xx_isa_remove),
- /* FIXME: suspend/resume */
+#ifdef CONFIG_PM
+ .suspend = snd_opti9xx_isa_suspend,
+ .resume = snd_opti9xx_isa_resume,
+#endif
.driver = {
.name = DEV_NAME
},
@@ -1124,12 +1165,29 @@ static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard)
snd_opti9xx_pnp_is_probed = 0;
}
+#ifdef CONFIG_PM
+static int snd_opti9xx_pnp_suspend(struct pnp_card_link *pcard,
+ pm_message_t state)
+{
+ return snd_opti9xx_suspend(pnp_get_card_drvdata(pcard));
+}
+
+static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard)
+{
+ return snd_opti9xx_resume(pnp_get_card_drvdata(pcard));
+}
+#endif
+
static struct pnp_card_driver opti9xx_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
.name = "opti9xx",
.id_table = snd_opti9xx_pnpids,
.probe = snd_opti9xx_pnp_probe,
.remove = __devexit_p(snd_opti9xx_pnp_remove),
+#ifdef CONFIG_PM
+ .suspend = snd_opti9xx_pnp_suspend,
+ .resume = snd_opti9xx_pnp_resume,
+#endif
};
#endif
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 49c8a0c2442c..360b08b03e1d 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1456,7 +1456,6 @@ static struct snd_pcm_hardware snd_wss_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM |
SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE),
@@ -1657,6 +1656,10 @@ static void snd_wss_resume(struct snd_wss *chip)
break;
}
}
+ /* Yamaha needs this to resume properly */
+ if (chip->hardware == WSS_HW_OPL3SA2)
+ snd_wss_out(chip, CS4231_PLAYBK_FORMAT,
+ chip->image[CS4231_PLAYBK_FORMAT]);
spin_unlock_irqrestore(&chip->reg_lock, flags);
#if 1
snd_wss_mce_down(chip);
diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
index 09d46484bc1a..7d8803a00b79 100644
--- a/sound/oss/swarm_cs4297a.c
+++ b/sound/oss/swarm_cs4297a.c
@@ -69,7 +69,6 @@
#include <linux/sound.h>
#include <linux/slab.h>
#include <linux/soundcard.h>
-#include <linux/ac97_codec.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
@@ -199,6 +198,22 @@ static const char invalid_magic[] =
} \
})
+/* AC97 registers */
+#define AC97_MASTER_VOL_STEREO 0x0002 /* Line Out */
+#define AC97_PCBEEP_VOL 0x000a /* none */
+#define AC97_PHONE_VOL 0x000c /* TAD Input (mono) */
+#define AC97_MIC_VOL 0x000e /* MIC Input (mono) */
+#define AC97_LINEIN_VOL 0x0010 /* Line Input (stereo) */
+#define AC97_CD_VOL 0x0012 /* CD Input (stereo) */
+#define AC97_AUX_VOL 0x0016 /* Aux Input (stereo) */
+#define AC97_PCMOUT_VOL 0x0018 /* Wave Output (stereo) */
+#define AC97_RECORD_SELECT 0x001a /* */
+#define AC97_RECORD_GAIN 0x001c
+#define AC97_GENERAL_PURPOSE 0x0020
+#define AC97_3D_CONTROL 0x0022
+#define AC97_POWER_CONTROL 0x0026
+#define AC97_VENDOR_ID1 0x007c
+
struct list_head cs4297a_devs = { &cs4297a_devs, &cs4297a_devs };
typedef struct serdma_descr_s {
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c
index 643f1113b1d8..7e814a5c3677 100644
--- a/sound/oss/vwsnd.c
+++ b/sound/oss/vwsnd.c
@@ -438,7 +438,7 @@ static __inline__ void li_writeb(lithium_t *lith, int off, unsigned char val)
*
* Observe that (mask & -mask) is (1 << low_set_bit_of(mask)).
* As long as mask is constant, we trust the compiler will change the
- * multipy and divide into shifts.
+ * multiply and divide into shifts.
*/
#define SHIFT_FIELD(val, mask) (((val) * ((mask) & -(mask))) & (mask))
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 9dfc27bf6cc6..ee895f3c8605 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1884,9 +1884,10 @@ static int __devinit snd_ali_mixer(struct snd_ali * codec)
}
#ifdef CONFIG_PM
-static int ali_suspend(struct pci_dev *pci, pm_message_t state)
+static int ali_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ali *chip = card->private_data;
struct snd_ali_image *im;
int i, j;
@@ -1929,13 +1930,14 @@ static int ali_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int ali_resume(struct pci_dev *pci)
+static int ali_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ali *chip = card->private_data;
struct snd_ali_image *im;
int i, j;
@@ -1982,6 +1984,11 @@ static int ali_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(ali_pm, ali_suspend, ali_resume);
+#define ALI_PM_OPS &ali_pm
+#else
+#define ALI_PM_OPS NULL
#endif /* CONFIG_PM */
static int snd_ali_free(struct snd_ali * codec)
@@ -2299,10 +2306,9 @@ static struct pci_driver ali5451_driver = {
.id_table = snd_ali_ids,
.probe = snd_ali_probe,
.remove = __devexit_p(snd_ali_remove),
-#ifdef CONFIG_PM
- .suspend = ali_suspend,
- .resume = ali_resume,
-#endif
+ .driver = {
+ .pm = ALI_PM_OPS,
+ },
};
module_pci_driver(ali5451_driver);
diff --git a/sound/pci/als300.c b/sound/pci/als300.c
index 59d65388faf5..68c4469c6d19 100644
--- a/sound/pci/als300.c
+++ b/sound/pci/als300.c
@@ -766,9 +766,10 @@ static int __devinit snd_als300_create(struct snd_card *card,
}
#ifdef CONFIG_PM
-static int snd_als300_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_als300_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -777,13 +778,14 @@ static int snd_als300_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_als300_resume(struct pci_dev *pci)
+static int snd_als300_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_als300 *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -802,6 +804,11 @@ static int snd_als300_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_als300_pm, snd_als300_suspend, snd_als300_resume);
+#define SND_ALS300_PM_OPS &snd_als300_pm
+#else
+#define SND_ALS300_PM_OPS NULL
#endif
static int __devinit snd_als300_probe(struct pci_dev *pci,
@@ -857,10 +864,9 @@ static struct pci_driver als300_driver = {
.id_table = snd_als300_ids,
.probe = snd_als300_probe,
.remove = __devexit_p(snd_als300_remove),
-#ifdef CONFIG_PM
- .suspend = snd_als300_suspend,
- .resume = snd_als300_resume,
-#endif
+ .driver = {
+ .pm = SND_ALS300_PM_OPS,
+ },
};
module_pci_driver(als300_driver);
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 7d7f2598c748..0eeca49c5754 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -988,9 +988,10 @@ static void __devexit snd_card_als4000_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
-static int snd_als4000_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_als4000_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_card_als4000 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
@@ -1001,13 +1002,14 @@ static int snd_als4000_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_als4000_resume(struct pci_dev *pci)
+static int snd_als4000_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_card_als4000 *acard = card->private_data;
struct snd_sb *chip = acard->chip;
@@ -1033,18 +1035,21 @@ static int snd_als4000_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(snd_als4000_pm, snd_als4000_suspend, snd_als4000_resume);
+#define SND_ALS4000_PM_OPS &snd_als4000_pm
+#else
+#define SND_ALS4000_PM_OPS NULL
+#endif /* CONFIG_PM */
static struct pci_driver als4000_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_als4000_ids,
.probe = snd_card_als4000_probe,
.remove = __devexit_p(snd_card_als4000_remove),
-#ifdef CONFIG_PM
- .suspend = snd_als4000_suspend,
- .resume = snd_als4000_resume,
-#endif
+ .driver = {
+ .pm = SND_ALS4000_PM_OPS,
+ },
};
module_pci_driver(als4000_driver);
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index 156a94f8a123..31020d2a868b 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1462,9 +1462,10 @@ static int __devinit snd_atiixp_mixer_new(struct atiixp *chip, int clock,
/*
* power management
*/
-static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_atiixp_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
@@ -1484,13 +1485,14 @@ static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_atiixp_resume(struct pci_dev *pci)
+static int snd_atiixp_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct atiixp *chip = card->private_data;
int i;
@@ -1526,6 +1528,11 @@ static int snd_atiixp_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_atiixp_pm, snd_atiixp_suspend, snd_atiixp_resume);
+#define SND_ATIIXP_PM_OPS &snd_atiixp_pm
+#else
+#define SND_ATIIXP_PM_OPS NULL
#endif /* CONFIG_PM */
@@ -1705,10 +1712,9 @@ static struct pci_driver atiixp_driver = {
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.remove = __devexit_p(snd_atiixp_remove),
-#ifdef CONFIG_PM
- .suspend = snd_atiixp_suspend,
- .resume = snd_atiixp_resume,
-#endif
+ .driver = {
+ .pm = SND_ATIIXP_PM_OPS,
+ },
};
module_pci_driver(atiixp_driver);
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 30a4fd96ce73..79e204ec623f 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -1117,9 +1117,10 @@ static int __devinit snd_atiixp_mixer_new(struct atiixp_modem *chip, int clock)
/*
* power management
*/
-static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_atiixp_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
@@ -1133,13 +1134,14 @@ static int snd_atiixp_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_atiixp_resume(struct pci_dev *pci)
+static int snd_atiixp_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct atiixp_modem *chip = card->private_data;
int i;
@@ -1162,8 +1164,12 @@ static int snd_atiixp_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(snd_atiixp_pm, snd_atiixp_suspend, snd_atiixp_resume);
+#define SND_ATIIXP_PM_OPS &snd_atiixp_pm
+#else
+#define SND_ATIIXP_PM_OPS NULL
+#endif /* CONFIG_PM */
#ifdef CONFIG_PROC_FS
/*
@@ -1336,10 +1342,9 @@ static struct pci_driver atiixp_modem_driver = {
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.remove = __devexit_p(snd_atiixp_remove),
-#ifdef CONFIG_PM
- .suspend = snd_atiixp_suspend,
- .resume = snd_atiixp_resume,
-#endif
+ .driver = {
+ .pm = SND_ATIIXP_PM_OPS,
+ },
};
module_pci_driver(atiixp_modem_driver);
diff --git a/sound/pci/au88x0/au88x0_mixer.c b/sound/pci/au88x0/au88x0_mixer.c
index 557c782ae4fc..fa13efbebdaf 100644
--- a/sound/pci/au88x0/au88x0_mixer.c
+++ b/sound/pci/au88x0/au88x0_mixer.c
@@ -10,6 +10,15 @@
#include <sound/core.h>
#include "au88x0.h"
+static int remove_ctl(struct snd_card *card, const char *name)
+{
+ struct snd_ctl_elem_id id;
+ memset(&id, 0, sizeof(id));
+ strcpy(id.name, name);
+ id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ return snd_ctl_remove_id(card, &id);
+}
+
static int __devinit snd_vortex_mixer(vortex_t * vortex)
{
struct snd_ac97_bus *pbus;
@@ -28,5 +37,7 @@ static int __devinit snd_vortex_mixer(vortex_t * vortex)
ac97.scaps = AC97_SCAP_NO_SPDIF;
err = snd_ac97_mixer(pbus, &ac97, &vortex->codec);
vortex->isquad = ((vortex->codec == NULL) ? 0 : (vortex->codec->ext_id&0x80));
+ remove_ctl(vortex->card, "Master Mono Playback Volume");
+ remove_ctl(vortex->card, "Master Mono Playback Switch");
return err;
}
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index f0b4d7493af5..4dddd871548b 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -2794,9 +2794,10 @@ snd_azf3328_resume_ac97(const struct snd_azf3328 *chip)
}
static int
-snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
+snd_azf3328_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_azf3328 *chip = card->private_data;
u16 *saved_regs_ctrl_u16;
@@ -2824,14 +2825,15 @@ snd_azf3328_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
static int
-snd_azf3328_resume(struct pci_dev *pci)
+snd_azf3328_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
const struct snd_azf3328 *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -2859,18 +2861,21 @@ snd_azf3328_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(snd_azf3328_pm, snd_azf3328_suspend, snd_azf3328_resume);
+#define SND_AZF3328_PM_OPS &snd_azf3328_pm
+#else
+#define SND_AZF3328_PM_OPS NULL
+#endif /* CONFIG_PM */
static struct pci_driver azf3328_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_azf3328_ids,
.probe = snd_azf3328_probe,
.remove = __devexit_p(snd_azf3328_remove),
-#ifdef CONFIG_PM
- .suspend = snd_azf3328_suspend,
- .resume = snd_azf3328_resume,
-#endif
+ .driver = {
+ .pm = SND_AZF3328_PM_OPS,
+ },
};
module_pci_driver(azf3328_driver);
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index e76d68a7081f..83277b747b36 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1872,9 +1872,10 @@ static void __devexit snd_ca0106_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
-static int snd_ca0106_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_ca0106_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ca0106 *chip = card->private_data;
int i;
@@ -1889,13 +1890,14 @@ static int snd_ca0106_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_ca0106_resume(struct pci_dev *pci)
+static int snd_ca0106_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ca0106 *chip = card->private_data;
int i;
@@ -1922,6 +1924,11 @@ static int snd_ca0106_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_ca0106_pm, snd_ca0106_suspend, snd_ca0106_resume);
+#define SND_CA0106_PM_OPS &snd_ca0106_pm
+#else
+#define SND_CA0106_PM_OPS NULL
#endif
// PCI IDs
@@ -1937,10 +1944,9 @@ static struct pci_driver ca0106_driver = {
.id_table = snd_ca0106_ids,
.probe = snd_ca0106_probe,
.remove = __devexit_p(snd_ca0106_remove),
-#ifdef CONFIG_PM
- .suspend = snd_ca0106_suspend,
- .resume = snd_ca0106_resume,
-#endif
+ .driver = {
+ .pm = SND_CA0106_PM_OPS,
+ },
};
module_pci_driver(ca0106_driver);
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 3815bd4c6779..b7d6f2b886ef 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3338,9 +3338,10 @@ static unsigned char saved_mixers[] = {
SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT,
};
-static int snd_cmipci_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_cmipci_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cmipci *cm = card->private_data;
int i;
@@ -3361,13 +3362,14 @@ static int snd_cmipci_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_cmipci_resume(struct pci_dev *pci)
+static int snd_cmipci_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cmipci *cm = card->private_data;
int i;
@@ -3396,6 +3398,11 @@ static int snd_cmipci_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_cmipci_pm, snd_cmipci_suspend, snd_cmipci_resume);
+#define SND_CMIPCI_PM_OPS &snd_cmipci_pm
+#else
+#define SND_CMIPCI_PM_OPS NULL
#endif /* CONFIG_PM */
static struct pci_driver cmipci_driver = {
@@ -3403,10 +3410,9 @@ static struct pci_driver cmipci_driver = {
.id_table = snd_cmipci_ids,
.probe = snd_cmipci_probe,
.remove = __devexit_p(snd_cmipci_remove),
-#ifdef CONFIG_PM
- .suspend = snd_cmipci_suspend,
- .resume = snd_cmipci_resume,
-#endif
+ .driver = {
+ .pm = SND_CMIPCI_PM_OPS,
+ },
};
module_pci_driver(cmipci_driver);
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index 33506ee569bd..45a8317085f4 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -1997,9 +1997,10 @@ static int saved_regs[SUSPEND_REGISTERS] = {
#define CLKCR1_CKRA 0x00010000L
-static int cs4281_suspend(struct pci_dev *pci, pm_message_t state)
+static int cs4281_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cs4281 *chip = card->private_data;
u32 ulCLK;
unsigned int i;
@@ -2040,13 +2041,14 @@ static int cs4281_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int cs4281_resume(struct pci_dev *pci)
+static int cs4281_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cs4281 *chip = card->private_data;
unsigned int i;
u32 ulCLK;
@@ -2082,6 +2084,11 @@ static int cs4281_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(cs4281_pm, cs4281_suspend, cs4281_resume);
+#define CS4281_PM_OPS &cs4281_pm
+#else
+#define CS4281_PM_OPS NULL
#endif /* CONFIG_PM */
static struct pci_driver cs4281_driver = {
@@ -2089,10 +2096,9 @@ static struct pci_driver cs4281_driver = {
.id_table = snd_cs4281_ids,
.probe = snd_cs4281_probe,
.remove = __devexit_p(snd_cs4281_remove),
-#ifdef CONFIG_PM
- .suspend = cs4281_suspend,
- .resume = cs4281_resume,
-#endif
+ .driver = {
+ .pm = CS4281_PM_OPS,
+ },
};
module_pci_driver(cs4281_driver);
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index 6cc7404e0e8f..1e007c736a8b 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -30,7 +30,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
-#include <sound/cs46xx.h>
+#include "cs46xx.h"
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
@@ -167,8 +167,9 @@ static struct pci_driver cs46xx_driver = {
.probe = snd_card_cs46xx_probe,
.remove = __devexit_p(snd_card_cs46xx_remove),
#ifdef CONFIG_PM
- .suspend = snd_cs46xx_suspend,
- .resume = snd_cs46xx_resume,
+ .driver = {
+ .pm = &snd_cs46xx_pm,
+ },
#endif
};
diff --git a/include/sound/cs46xx.h b/sound/pci/cs46xx/cs46xx.h
index e3005a674a24..29d8a8da1ba7 100644
--- a/include/sound/cs46xx.h
+++ b/sound/pci/cs46xx/cs46xx.h
@@ -23,10 +23,10 @@
*
*/
-#include "pcm.h"
-#include "pcm-indirect.h"
-#include "rawmidi.h"
-#include "ac97_codec.h"
+#include <sound/pcm.h>
+#include <sound/pcm-indirect.h>
+#include <sound/rawmidi.h>
+#include <sound/ac97_codec.h>
#include "cs46xx_dsp_spos.h"
/*
@@ -1730,8 +1730,7 @@ int snd_cs46xx_create(struct snd_card *card,
struct pci_dev *pci,
int external_amp, int thinkpad,
struct snd_cs46xx **rcodec);
-int snd_cs46xx_suspend(struct pci_dev *pci, pm_message_t state);
-int snd_cs46xx_resume(struct pci_dev *pci);
+extern const struct dev_pm_ops snd_cs46xx_pm;
int snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
int snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct snd_pcm **rpcm);
diff --git a/include/sound/cs46xx_dsp_scb_types.h b/sound/pci/cs46xx/cs46xx_dsp_scb_types.h
index 080857ad0ca2..080857ad0ca2 100644
--- a/include/sound/cs46xx_dsp_scb_types.h
+++ b/sound/pci/cs46xx/cs46xx_dsp_scb_types.h
diff --git a/include/sound/cs46xx_dsp_spos.h b/sound/pci/cs46xx/cs46xx_dsp_spos.h
index 8008c59288a6..8008c59288a6 100644
--- a/include/sound/cs46xx_dsp_spos.h
+++ b/sound/pci/cs46xx/cs46xx_dsp_spos.h
diff --git a/include/sound/cs46xx_dsp_task_types.h b/sound/pci/cs46xx/cs46xx_dsp_task_types.h
index 5cf920bfda27..5cf920bfda27 100644
--- a/include/sound/cs46xx_dsp_task_types.h
+++ b/sound/pci/cs46xx/cs46xx_dsp_task_types.h
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 4fa53161b094..f75f5ffdfdfb 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -61,7 +61,7 @@
#include <sound/info.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
-#include <sound/cs46xx.h>
+#include "cs46xx.h"
#include <asm/io.h>
@@ -3599,9 +3599,10 @@ static unsigned int saved_regs[] = {
BA1_CVOL,
};
-int snd_cs46xx_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_cs46xx_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_cs46xx *chip = card->private_data;
int i, amp_saved;
@@ -3628,13 +3629,14 @@ int snd_cs46xx_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-int snd_cs46xx_resume(struct pci_dev *pci)
+static int snd_cs46xx_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_cs46xx *chip = card->private_data;
int amp_saved;
#ifdef CONFIG_SND_CS46XX_NEW_DSP
@@ -3707,6 +3709,8 @@ int snd_cs46xx_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+SIMPLE_DEV_PM_OPS(snd_cs46xx_pm, snd_cs46xx_suspend, snd_cs46xx_resume);
#endif /* CONFIG_PM */
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index e377287192aa..56fec0bc0efb 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -32,7 +32,7 @@
#include <sound/control.h>
#include <sound/info.h>
#include <sound/asoundef.h>
-#include <sound/cs46xx.h>
+#include "cs46xx.h"
#include "cs46xx_lib.h"
#include "dsp_spos.h"
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index 00b148a10239..c2c695b07f8c 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -31,7 +31,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/info.h>
-#include <sound/cs46xx.h>
+#include "cs46xx.h"
#include "cs46xx_lib.h"
#include "dsp_spos.h"
diff --git a/sound/pci/cs5535audio/cs5535audio.c b/sound/pci/cs5535audio/cs5535audio.c
index 2c9697cf0a1a..51f64ba5facf 100644
--- a/sound/pci/cs5535audio/cs5535audio.c
+++ b/sound/pci/cs5535audio/cs5535audio.c
@@ -400,8 +400,9 @@ static struct pci_driver cs5535audio_driver = {
.probe = snd_cs5535audio_probe,
.remove = __devexit_p(snd_cs5535audio_remove),
#ifdef CONFIG_PM
- .suspend = snd_cs5535audio_suspend,
- .resume = snd_cs5535audio_resume,
+ .driver = {
+ .pm = &snd_cs5535audio_pm,
+ },
#endif
};
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 51966d782a3c..bb3cc641130c 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -94,10 +94,7 @@ struct cs5535audio {
struct cs5535audio_dma dmas[NUM_CS5535AUDIO_DMAS];
};
-#ifdef CONFIG_PM
-int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state);
-int snd_cs5535audio_resume(struct pci_dev *pci);
-#endif
+extern const struct dev_pm_ops snd_cs5535audio_pm;
#ifdef CONFIG_OLPC
void __devinit olpc_prequirks(struct snd_card *card,
diff --git a/sound/pci/cs5535audio/cs5535audio_pm.c b/sound/pci/cs5535audio/cs5535audio_pm.c
index 185b00088320..6c34def5986d 100644
--- a/sound/pci/cs5535audio/cs5535audio_pm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pm.c
@@ -55,9 +55,10 @@ static void snd_cs5535audio_stop_hardware(struct cs5535audio *cs5535au)
}
-int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_cs5535audio_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cs5535audio *cs5535au = card->private_data;
int i;
@@ -77,13 +78,14 @@ int snd_cs5535audio_suspend(struct pci_dev *pci, pm_message_t state)
return -EIO;
}
pci_disable_device(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-int snd_cs5535audio_resume(struct pci_dev *pci)
+static int snd_cs5535audio_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct cs5535audio *cs5535au = card->private_data;
u32 tmp;
int timeout;
@@ -129,3 +131,4 @@ int snd_cs5535audio_resume(struct pci_dev *pci)
return 0;
}
+SIMPLE_DEV_PM_OPS(snd_cs5535audio_pm, snd_cs5535audio_suspend, snd_cs5535audio_resume);
diff --git a/sound/pci/ctxfi/ctatc.c b/sound/pci/ctxfi/ctatc.c
index d8a4423539ce..8e40262d4117 100644
--- a/sound/pci/ctxfi/ctatc.c
+++ b/sound/pci/ctxfi/ctatc.c
@@ -1537,7 +1537,7 @@ static void atc_connect_resources(struct ct_atc *atc)
}
#ifdef CONFIG_PM
-static int atc_suspend(struct ct_atc *atc, pm_message_t state)
+static int atc_suspend(struct ct_atc *atc)
{
int i;
struct hw *hw = atc->hw;
@@ -1553,7 +1553,7 @@ static int atc_suspend(struct ct_atc *atc, pm_message_t state)
atc_release_resources(atc);
- hw->suspend(hw, state);
+ hw->suspend(hw);
return 0;
}
diff --git a/sound/pci/ctxfi/ctatc.h b/sound/pci/ctxfi/ctatc.h
index 3a0def656af0..653e813ad142 100644
--- a/sound/pci/ctxfi/ctatc.h
+++ b/sound/pci/ctxfi/ctatc.h
@@ -144,7 +144,7 @@ struct ct_atc {
struct ct_timer *timer;
#ifdef CONFIG_PM
- int (*suspend)(struct ct_atc *atc, pm_message_t state);
+ int (*suspend)(struct ct_atc *atc);
int (*resume)(struct ct_atc *atc);
#define NUM_PCMS (NUM_CTALSADEVS - 1)
struct snd_pcm *pcms[NUM_PCMS];
diff --git a/sound/pci/ctxfi/cthardware.h b/sound/pci/ctxfi/cthardware.h
index 908315bec3b4..c56fe533b3f3 100644
--- a/sound/pci/ctxfi/cthardware.h
+++ b/sound/pci/ctxfi/cthardware.h
@@ -73,7 +73,7 @@ struct hw {
int (*card_stop)(struct hw *hw);
int (*pll_init)(struct hw *hw, unsigned int rsr);
#ifdef CONFIG_PM
- int (*suspend)(struct hw *hw, pm_message_t state);
+ int (*suspend)(struct hw *hw);
int (*resume)(struct hw *hw, struct card_conf *info);
#endif
int (*is_adc_source_selected)(struct hw *hw, enum ADCSRC source);
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index a7df19791f5a..dc1969bc67d4 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -2086,7 +2086,7 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
}
#ifdef CONFIG_PM
-static int hw_suspend(struct hw *hw, pm_message_t state)
+static int hw_suspend(struct hw *hw)
{
struct pci_dev *pci = hw->pci;
@@ -2099,7 +2099,7 @@ static int hw_suspend(struct hw *hw, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
diff --git a/sound/pci/ctxfi/cthw20k2.c b/sound/pci/ctxfi/cthw20k2.c
index d6c54b524bfa..9d1231dc4ae2 100644
--- a/sound/pci/ctxfi/cthw20k2.c
+++ b/sound/pci/ctxfi/cthw20k2.c
@@ -2202,7 +2202,7 @@ static int hw_card_init(struct hw *hw, struct card_conf *info)
}
#ifdef CONFIG_PM
-static int hw_suspend(struct hw *hw, pm_message_t state)
+static int hw_suspend(struct hw *hw)
{
struct pci_dev *pci = hw->pci;
@@ -2210,7 +2210,7 @@ static int hw_suspend(struct hw *hw, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
diff --git a/sound/pci/ctxfi/xfi.c b/sound/pci/ctxfi/xfi.c
index 75aa2c338410..e002183ef8b2 100644
--- a/sound/pci/ctxfi/xfi.c
+++ b/sound/pci/ctxfi/xfi.c
@@ -126,21 +126,26 @@ static void __devexit ct_card_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
-static int ct_card_suspend(struct pci_dev *pci, pm_message_t state)
+static int ct_card_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct ct_atc *atc = card->private_data;
- return atc->suspend(atc, state);
+ return atc->suspend(atc);
}
-static int ct_card_resume(struct pci_dev *pci)
+static int ct_card_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct ct_atc *atc = card->private_data;
return atc->resume(atc);
}
+
+static SIMPLE_DEV_PM_OPS(ct_card_pm, ct_card_suspend, ct_card_resume);
+#define CT_CARD_PM_OPS &ct_card_pm
+#else
+#define CT_CARD_PM_OPS NULL
#endif
static struct pci_driver ct_driver = {
@@ -148,10 +153,9 @@ static struct pci_driver ct_driver = {
.id_table = ct_pci_dev_ids,
.probe = ct_card_probe,
.remove = __devexit_p(ct_card_remove),
-#ifdef CONFIG_PM
- .suspend = ct_card_suspend,
- .resume = ct_card_resume,
-#endif
+ .driver = {
+ .pm = CT_CARD_PM_OPS,
+ },
};
module_pci_driver(ct_driver);
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
index 0f8eda1dafdb..0ff754f180d0 100644
--- a/sound/pci/echoaudio/echoaudio.c
+++ b/sound/pci/echoaudio/echoaudio.c
@@ -2205,9 +2205,10 @@ ctl_error:
#if defined(CONFIG_PM)
-static int snd_echo_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_echo_suspend(struct device *dev)
{
- struct echoaudio *chip = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct echoaudio *chip = dev_get_drvdata(dev);
DE_INIT(("suspend start\n"));
snd_pcm_suspend_all(chip->analog_pcm);
@@ -2242,9 +2243,10 @@ static int snd_echo_suspend(struct pci_dev *pci, pm_message_t state)
-static int snd_echo_resume(struct pci_dev *pci)
+static int snd_echo_resume(struct device *dev)
{
- struct echoaudio *chip = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct echoaudio *chip = dev_get_drvdata(dev);
struct comm_page *commpage, *commpage_bak;
u32 pipe_alloc_mask;
int err;
@@ -2307,10 +2309,13 @@ static int snd_echo_resume(struct pci_dev *pci)
return 0;
}
+static SIMPLE_DEV_PM_OPS(snd_echo_pm, snd_echo_suspend, snd_echo_resume);
+#define SND_ECHO_PM_OPS &snd_echo_pm
+#else
+#define SND_ECHO_PM_OPS NULL
#endif /* CONFIG_PM */
-
static void __devexit snd_echo_remove(struct pci_dev *pci)
{
struct echoaudio *chip;
@@ -2333,10 +2338,9 @@ static struct pci_driver echo_driver = {
.id_table = snd_echo_ids,
.probe = snd_echo_probe,
.remove = __devexit_p(snd_echo_remove),
-#ifdef CONFIG_PM
- .suspend = snd_echo_suspend,
- .resume = snd_echo_resume,
-#endif /* CONFIG_PM */
+ .driver = {
+ .pm = SND_ECHO_PM_OPS,
+ },
};
module_pci_driver(echo_driver);
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index 7fdbbe4d9965..ddac4e6d660d 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -207,9 +207,10 @@ static void __devexit snd_card_emu10k1_remove(struct pci_dev *pci)
#ifdef CONFIG_PM
-static int snd_emu10k1_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_emu10k1_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -231,13 +232,14 @@ static int snd_emu10k1_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_emu10k1_resume(struct pci_dev *pci)
+static int snd_emu10k1_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_emu10k1 *emu = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -261,17 +263,21 @@ static int snd_emu10k1_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif
+
+static SIMPLE_DEV_PM_OPS(snd_emu10k1_pm, snd_emu10k1_suspend, snd_emu10k1_resume);
+#define SND_EMU10K1_PM_OPS &snd_emu10k1_pm
+#else
+#define SND_EMU10K1_PM_OPS NULL
+#endif /* CONFIG_PM */
static struct pci_driver emu10k1_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_emu10k1_ids,
.probe = snd_card_emu10k1_probe,
.remove = __devexit_p(snd_card_emu10k1_remove),
-#ifdef CONFIG_PM
- .suspend = snd_emu10k1_suspend,
- .resume = snd_emu10k1_resume,
-#endif
+ .driver = {
+ .pm = SND_EMU10K1_PM_OPS,
+ },
};
module_pci_driver(emu10k1_driver);
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 3821c81d1c99..f7e6f73186e1 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -2033,9 +2033,10 @@ static void snd_ensoniq_chip_init(struct ensoniq *ensoniq)
}
#ifdef CONFIG_PM
-static int snd_ensoniq_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_ensoniq_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -2058,13 +2059,14 @@ static int snd_ensoniq_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_ensoniq_resume(struct pci_dev *pci)
+static int snd_ensoniq_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct ensoniq *ensoniq = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -2087,8 +2089,12 @@ static int snd_ensoniq_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(snd_ensoniq_pm, snd_ensoniq_suspend, snd_ensoniq_resume);
+#define SND_ENSONIQ_PM_OPS &snd_ensoniq_pm
+#else
+#define SND_ENSONIQ_PM_OPS NULL
+#endif /* CONFIG_PM */
static int __devinit snd_ensoniq_create(struct snd_card *card,
struct pci_dev *pci,
@@ -2493,10 +2499,9 @@ static struct pci_driver ens137x_driver = {
.id_table = snd_audiopci_ids,
.probe = snd_audiopci_probe,
.remove = __devexit_p(snd_audiopci_remove),
-#ifdef CONFIG_PM
- .suspend = snd_ensoniq_suspend,
- .resume = snd_ensoniq_resume,
-#endif
+ .driver = {
+ .pm = SND_ENSONIQ_PM_OPS,
+ },
};
module_pci_driver(ens137x_driver);
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 82c8d8c5c52a..dbb81807bc1a 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1321,35 +1321,30 @@ static int snd_es1938_put_double(struct snd_kcontrol *kcontrol,
return change;
}
-static unsigned int db_scale_master[] = {
- TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(db_scale_master,
0, 54, TLV_DB_SCALE_ITEM(-3600, 50, 1),
54, 63, TLV_DB_SCALE_ITEM(-900, 100, 0),
-};
+);
-static unsigned int db_scale_audio1[] = {
- TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(db_scale_audio1,
0, 8, TLV_DB_SCALE_ITEM(-3300, 300, 1),
8, 15, TLV_DB_SCALE_ITEM(-900, 150, 0),
-};
+);
-static unsigned int db_scale_audio2[] = {
- TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(db_scale_audio2,
0, 8, TLV_DB_SCALE_ITEM(-3450, 300, 1),
8, 15, TLV_DB_SCALE_ITEM(-1050, 150, 0),
-};
+);
-static unsigned int db_scale_mic[] = {
- TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(db_scale_mic,
0, 8, TLV_DB_SCALE_ITEM(-2400, 300, 1),
8, 15, TLV_DB_SCALE_ITEM(0, 150, 0),
-};
+);
-static unsigned int db_scale_line[] = {
- TLV_DB_RANGE_HEAD(2),
+static const DECLARE_TLV_DB_RANGE(db_scale_line,
0, 8, TLV_DB_SCALE_ITEM(-3150, 300, 1),
8, 15, TLV_DB_SCALE_ITEM(-750, 150, 0),
-};
+);
static const DECLARE_TLV_DB_SCALE(db_scale_capture, 0, 150, 0);
@@ -1474,9 +1469,10 @@ static unsigned char saved_regs[SAVED_REG_SIZE+1] = {
};
-static int es1938_suspend(struct pci_dev *pci, pm_message_t state)
+static int es1938_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct es1938 *chip = card->private_data;
unsigned char *s, *d;
@@ -1494,13 +1490,14 @@ static int es1938_suspend(struct pci_dev *pci, pm_message_t state)
}
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int es1938_resume(struct pci_dev *pci)
+static int es1938_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct es1938 *chip = card->private_data;
unsigned char *s, *d;
@@ -1534,6 +1531,11 @@ static int es1938_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(es1938_pm, es1938_suspend, es1938_resume);
+#define ES1938_PM_OPS &es1938_pm
+#else
+#define ES1938_PM_OPS NULL
#endif /* CONFIG_PM */
#ifdef SUPPORT_JOYSTICK
@@ -1887,10 +1889,9 @@ static struct pci_driver es1938_driver = {
.id_table = snd_es1938_ids,
.probe = snd_es1938_probe,
.remove = __devexit_p(snd_es1938_remove),
-#ifdef CONFIG_PM
- .suspend = es1938_suspend,
- .resume = es1938_resume,
-#endif
+ .driver = {
+ .pm = ES1938_PM_OPS,
+ },
};
module_pci_driver(es1938_driver);
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 52b5c0bf90c1..fb4c90b99c00 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -2381,9 +2381,10 @@ static void snd_es1968_start_irq(struct es1968 *chip)
/*
* PM support
*/
-static int es1968_suspend(struct pci_dev *pci, pm_message_t state)
+static int es1968_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct es1968 *chip = card->private_data;
if (! chip->do_pm)
@@ -2398,13 +2399,14 @@ static int es1968_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int es1968_resume(struct pci_dev *pci)
+static int es1968_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct es1968 *chip = card->private_data;
struct esschan *es;
@@ -2454,6 +2456,11 @@ static int es1968_resume(struct pci_dev *pci)
chip->in_suspend = 0;
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(es1968_pm, es1968_suspend, es1968_resume);
+#define ES1968_PM_OPS &es1968_pm
+#else
+#define ES1968_PM_OPS NULL
#endif /* CONFIG_PM */
#ifdef SUPPORT_JOYSTICK
@@ -2903,10 +2910,9 @@ static struct pci_driver es1968_driver = {
.id_table = snd_es1968_ids,
.probe = snd_es1968_probe,
.remove = __devexit_p(snd_es1968_remove),
-#ifdef CONFIG_PM
- .suspend = es1968_suspend,
- .resume = es1968_resume,
-#endif
+ .driver = {
+ .pm = ES1968_PM_OPS,
+ },
};
module_pci_driver(es1968_driver);
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index b32e8024ea86..522c8706f244 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1369,9 +1369,10 @@ static unsigned char saved_regs[] = {
FM801_CODEC_CTRL, FM801_I2S_MODE, FM801_VOLUME, FM801_GEN_CTRL,
};
-static int snd_fm801_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_fm801_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
@@ -1385,13 +1386,14 @@ static int snd_fm801_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_fm801_resume(struct pci_dev *pci)
+static int snd_fm801_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct fm801 *chip = card->private_data;
int i;
@@ -1414,17 +1416,21 @@ static int snd_fm801_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif
+
+static SIMPLE_DEV_PM_OPS(snd_fm801_pm, snd_fm801_suspend, snd_fm801_resume);
+#define SND_FM801_PM_OPS &snd_fm801_pm
+#else
+#define SND_FM801_PM_OPS NULL
+#endif /* CONFIG_PM */
static struct pci_driver fm801_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_fm801_ids,
.probe = snd_card_fm801_probe,
.remove = __devexit_p(snd_card_fm801_remove),
-#ifdef CONFIG_PM
- .suspend = snd_fm801_suspend,
- .resume = snd_fm801_resume,
-#endif
+ .driver = {
+ .pm = SND_FM801_PM_OPS,
+ },
};
module_pci_driver(fm801_driver);
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index d03079764189..194d625c1f83 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -53,15 +53,14 @@ config SND_HDA_INPUT_BEEP
driver. This interface is used to generate digital beeps.
config SND_HDA_INPUT_BEEP_MODE
- int "Digital beep registration mode (0=off, 1=on, 2=mute sw on/off)"
+ int "Digital beep registration mode (0=off, 1=on)"
depends on SND_HDA_INPUT_BEEP=y
default "1"
- range 0 2
+ range 0 1
help
Set 0 to disable the digital beep interface for HD-audio by default.
Set 1 to always enable the digital beep interface for HD-audio by
- default. Set 2 to control the beep device registration to input
- layer using a "Beep Switch" in mixer applications.
+ default.
config SND_HDA_INPUT_JACK
bool "Support jack plugging notification via input layer"
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index f7520b9f909c..647218d69f68 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -727,7 +727,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
models++;
}
}
- if (id < 0) {
+ if (id < 0 && quirk) {
q = snd_pci_quirk_lookup(codec->bus->pci, quirk);
if (q) {
id = q->value;
@@ -736,7 +736,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
#endif
}
}
- if (id < 0) {
+ if (id < 0 && quirk) {
for (q = quirk; q->subvendor; q++) {
unsigned int vendorid =
q->subdevice | (q->subvendor << 16);
diff --git a/sound/pci/hda/hda_beep.c b/sound/pci/hda/hda_beep.c
index 60738e52b8f9..0bc2315b181d 100644
--- a/sound/pci/hda/hda_beep.c
+++ b/sound/pci/hda/hda_beep.c
@@ -162,50 +162,20 @@ static int snd_hda_do_attach(struct hda_beep *beep)
return 0;
}
-static void snd_hda_do_register(struct work_struct *work)
-{
- struct hda_beep *beep =
- container_of(work, struct hda_beep, register_work);
-
- mutex_lock(&beep->mutex);
- if (beep->enabled && !beep->dev)
- snd_hda_do_attach(beep);
- mutex_unlock(&beep->mutex);
-}
-
-static void snd_hda_do_unregister(struct work_struct *work)
-{
- struct hda_beep *beep =
- container_of(work, struct hda_beep, unregister_work.work);
-
- mutex_lock(&beep->mutex);
- if (!beep->enabled && beep->dev)
- snd_hda_do_detach(beep);
- mutex_unlock(&beep->mutex);
-}
-
int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
{
struct hda_beep *beep = codec->beep;
- enable = !!enable;
- if (beep == NULL)
+ if (!beep)
return 0;
+ enable = !!enable;
if (beep->enabled != enable) {
beep->enabled = enable;
if (!enable) {
+ cancel_work_sync(&beep->beep_work);
/* turn off beep */
snd_hda_codec_write(beep->codec, beep->nid, 0,
AC_VERB_SET_BEEP_CONTROL, 0);
}
- if (beep->mode == HDA_BEEP_MODE_SWREG) {
- if (enable) {
- cancel_delayed_work(&beep->unregister_work);
- schedule_work(&beep->register_work);
- } else {
- schedule_delayed_work(&beep->unregister_work,
- HZ);
- }
- }
return 1;
}
return 0;
@@ -215,6 +185,7 @@ EXPORT_SYMBOL_HDA(snd_hda_enable_beep_device);
int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
{
struct hda_beep *beep;
+ int err;
if (!snd_hda_get_bool_hint(codec, "beep"))
return 0; /* disabled explicitly by hints */
@@ -232,21 +203,16 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
beep->nid = nid;
beep->codec = codec;
- beep->mode = codec->beep_mode;
codec->beep = beep;
- INIT_WORK(&beep->register_work, &snd_hda_do_register);
- INIT_DELAYED_WORK(&beep->unregister_work, &snd_hda_do_unregister);
INIT_WORK(&beep->beep_work, &snd_hda_generate_beep);
mutex_init(&beep->mutex);
- if (beep->mode == HDA_BEEP_MODE_ON) {
- int err = snd_hda_do_attach(beep);
- if (err < 0) {
- kfree(beep);
- codec->beep = NULL;
- return err;
- }
+ err = snd_hda_do_attach(beep);
+ if (err < 0) {
+ kfree(beep);
+ codec->beep = NULL;
+ return err;
}
return 0;
@@ -257,8 +223,6 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
{
struct hda_beep *beep = codec->beep;
if (beep) {
- cancel_work_sync(&beep->register_work);
- cancel_delayed_work(&beep->unregister_work);
if (beep->dev)
snd_hda_do_detach(beep);
codec->beep = NULL;
@@ -266,3 +230,31 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
}
}
EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device);
+
+/* get/put callbacks for beep mute mixer switches */
+int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hda_beep *beep = codec->beep;
+ if (beep) {
+ ucontrol->value.integer.value[0] =
+ ucontrol->value.integer.value[1] =
+ beep->enabled;
+ return 0;
+ }
+ return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol);
+}
+EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_get_beep);
+
+int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct hda_beep *beep = codec->beep;
+ if (beep)
+ snd_hda_enable_beep_device(codec,
+ *ucontrol->value.integer.value);
+ return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
+}
+EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep);
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index 55f0647458c7..4dc6933bc655 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -26,21 +26,16 @@
#define HDA_BEEP_MODE_OFF 0
#define HDA_BEEP_MODE_ON 1
-#define HDA_BEEP_MODE_SWREG 2
/* beep information */
struct hda_beep {
struct input_dev *dev;
struct hda_codec *codec;
- unsigned int mode;
char phys[32];
int tone;
hda_nid_t nid;
unsigned int enabled:1;
- unsigned int request_enable:1;
unsigned int linear_tone:1; /* linear tone for IDT/STAC codec */
- struct work_struct register_work; /* registration work */
- struct delayed_work unregister_work; /* unregistration work */
struct work_struct beep_work; /* scheduled task for beep event */
struct mutex mutex;
};
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 51cb2a2e4fce..88a9c20eb7a2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2676,25 +2676,6 @@ int snd_hda_mixer_amp_switch_put(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put);
-#ifdef CONFIG_SND_HDA_INPUT_BEEP
-/**
- * snd_hda_mixer_amp_switch_put_beep - Put callback for a beep AMP switch
- *
- * This function calls snd_hda_enable_beep_device(), which behaves differently
- * depending on beep_mode option.
- */
-int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- long *valp = ucontrol->value.integer.value;
-
- snd_hda_enable_beep_device(codec, *valp);
- return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
-}
-EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep);
-#endif /* CONFIG_SND_HDA_INPUT_BEEP */
-
/*
* bound volume controls
*
@@ -3509,22 +3490,52 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
EXPORT_SYMBOL_HDA(snd_hda_codec_set_power_to_all);
/*
+ * supported power states check
+ */
+static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg,
+ unsigned int power_state)
+{
+ int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE);
+
+ if (sup < 0)
+ return false;
+ if (sup & power_state)
+ return true;
+ else
+ return false;
+}
+
+/*
* set power state of the codec
*/
static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state)
{
+ int count;
+ unsigned int state;
+
if (codec->patch_ops.set_power_state) {
codec->patch_ops.set_power_state(codec, fg, power_state);
return;
}
/* this delay seems necessary to avoid click noise at power-down */
- if (power_state == AC_PWRST_D3)
- msleep(100);
- snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
- power_state);
- snd_hda_codec_set_power_to_all(codec, fg, power_state, true);
+ if (power_state == AC_PWRST_D3) {
+ /* transition time less than 10ms for power down */
+ bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS);
+ msleep(epss ? 10 : 100);
+ }
+
+ /* repeat power states setting at most 10 times*/
+ for (count = 0; count < 10; count++) {
+ snd_hda_codec_read(codec, fg, 0, AC_VERB_SET_POWER_STATE,
+ power_state);
+ snd_hda_codec_set_power_to_all(codec, fg, power_state, true);
+ state = snd_hda_codec_read(codec, fg, 0,
+ AC_VERB_GET_POWER_STATE, 0);
+ if (!(state & AC_PWRST_ERROR))
+ break;
+ }
}
#ifdef CONFIG_SND_HDA_HWDEP
@@ -3545,7 +3556,7 @@ static inline void hda_exec_init_verbs(struct hda_codec *codec) {}
static void hda_call_codec_suspend(struct hda_codec *codec)
{
if (codec->patch_ops.suspend)
- codec->patch_ops.suspend(codec, PMSG_SUSPEND);
+ codec->patch_ops.suspend(codec);
hda_cleanup_all_streams(codec);
hda_set_power_state(codec,
codec->afg ? codec->afg : codec->mfg,
@@ -4418,6 +4429,13 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
cancel_delayed_work_sync(&codec->power_work);
spin_lock(&codec->power_lock);
+ /* If the power down delayed work was cancelled above before starting,
+ * then there is no need to go through power up here.
+ */
+ if (codec->power_on) {
+ spin_unlock(&codec->power_lock);
+ return;
+ }
trace_hda_power_up(codec);
snd_hda_update_power_acct(codec);
codec->power_on = 1;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 2fdaadbb4326..c422d330ca54 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -323,6 +323,9 @@ enum {
#define AC_PWRST_D1 0x01
#define AC_PWRST_D2 0x02
#define AC_PWRST_D3 0x03
+#define AC_PWRST_ERROR (1<<8)
+#define AC_PWRST_CLK_STOP_OK (1<<9)
+#define AC_PWRST_SETTING_RESET (1<<10)
/* Processing capabilies */
#define AC_PCAP_BENIGN (1<<0)
@@ -703,7 +706,7 @@ struct hda_codec_ops {
void (*set_power_state)(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state);
#ifdef CONFIG_PM
- int (*suspend)(struct hda_codec *codec, pm_message_t state);
+ int (*suspend)(struct hda_codec *codec);
int (*resume)(struct hda_codec *codec);
#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 7757536b9d5f..c8aced182fd1 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -72,7 +72,7 @@ static int enable_msi = -1;
static char *patch[SNDRV_CARDS];
#endif
#ifdef CONFIG_SND_HDA_INPUT_BEEP
-static int beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
+static bool beep_mode[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS-1)] =
CONFIG_SND_HDA_INPUT_BEEP_MODE};
#endif
@@ -103,9 +103,9 @@ module_param_array(patch, charp, NULL, 0444);
MODULE_PARM_DESC(patch, "Patch file for Intel HD audio interface.");
#endif
#ifdef CONFIG_SND_HDA_INPUT_BEEP
-module_param_array(beep_mode, int, NULL, 0444);
+module_param_array(beep_mode, bool, NULL, 0444);
MODULE_PARM_DESC(beep_mode, "Select HDA Beep registration mode "
- "(0=off, 1=on, 2=mute switch on/off) (default=1).");
+ "(0=off, 1=on) (default=1).");
#endif
#ifdef CONFIG_SND_HDA_POWER_SAVE
@@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
"{Intel, CPT},"
"{Intel, PPT},"
"{Intel, LPT},"
+ "{Intel, HPT},"
"{Intel, PBG},"
"{Intel, SCH},"
"{ATI, SB450},"
@@ -535,6 +536,7 @@ enum {
#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
+#define AZX_DCAPS_POSFIX_COMBO (1 << 24) /* Use COMBO as default */
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
@@ -2403,9 +2405,10 @@ static void azx_power_notify(struct hda_bus *bus)
* power management
*/
-static int azx_suspend(struct pci_dev *pci, pm_message_t state)
+static int azx_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct azx_pcm *p;
@@ -2424,13 +2427,14 @@ static int azx_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_msi(chip->pci);
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int azx_resume(struct pci_dev *pci)
+static int azx_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -2455,6 +2459,12 @@ static int azx_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+static SIMPLE_DEV_PM_OPS(azx_pm, azx_suspend, azx_resume);
+#define AZX_PM_OPS &azx_pm
+#else
+#define azx_suspend(dev)
+#define azx_resume(dev)
+#define AZX_PM_OPS NULL
#endif /* CONFIG_PM */
@@ -2521,13 +2531,13 @@ static void azx_vs_set_state(struct pci_dev *pci,
disabled ? "Disabling" : "Enabling",
pci_name(chip->pci));
if (disabled) {
- azx_suspend(pci, PMSG_FREEZE);
+ azx_suspend(&pci->dev);
chip->disabled = true;
snd_hda_lock_devices(chip->bus);
} else {
snd_hda_unlock_devices(chip->bus);
chip->disabled = false;
- azx_resume(pci);
+ azx_resume(&pci->dev);
}
}
}
@@ -2731,6 +2741,10 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
snd_printd(SFX "Using LPIB position fix\n");
return POS_FIX_LPIB;
}
+ if (chip->driver_caps & AZX_DCAPS_POSFIX_COMBO) {
+ snd_printd(SFX "Using COMBO position fix\n");
+ return POS_FIX_COMBO;
+ }
return POS_FIX_AUTO;
}
@@ -3243,7 +3257,7 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* CPT */
{ PCI_DEVICE(0x8086, 0x1c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE },
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
/* PBG */
{ PCI_DEVICE(0x8086, 0x1d20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
@@ -3251,11 +3265,15 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* Panther Point */
{ PCI_DEVICE(0x8086, 0x1e20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
/* Lynx Point */
{ PCI_DEVICE(0x8086, 0x8c20),
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
+ /* Haswell */
+ { PCI_DEVICE(0x8086, 0x0c0c),
+ .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO },
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
@@ -3341,6 +3359,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
/* VIA VT8251/VT8237A */
{ PCI_DEVICE(0x1106, 0x3288),
.driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
+ /* VIA GFX VT7122/VX900 */
+ { PCI_DEVICE(0x1106, 0x9170), .driver_data = AZX_DRIVER_GENERIC },
+ /* VIA GFX VT6122/VX11 */
+ { PCI_DEVICE(0x1106, 0x9140), .driver_data = AZX_DRIVER_GENERIC },
/* SIS966 */
{ PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
/* ULI M5461 */
@@ -3398,10 +3420,9 @@ static struct pci_driver azx_driver = {
.id_table = azx_ids,
.probe = azx_probe,
.remove = __devexit_p(azx_remove),
-#ifdef CONFIG_PM
- .suspend = azx_suspend,
- .resume = azx_resume,
-#endif
+ .driver = {
+ .pm = AZX_PM_OPS,
+ },
};
module_pci_driver(azx_driver);
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 2dd1c113a4c1..aaccc0236bda 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -127,10 +127,15 @@ void snd_hda_jack_tbl_clear(struct hda_codec *codec)
static void jack_detect_update(struct hda_codec *codec,
struct hda_jack_tbl *jack)
{
- if (jack->jack_dirty || !jack->jack_detect) {
+ if (!jack->jack_dirty)
+ return;
+
+ if (jack->phantom_jack)
+ jack->pin_sense = AC_PINSENSE_PRESENCE;
+ else
jack->pin_sense = read_pin_sense(codec, jack->nid);
- jack->jack_dirty = 0;
- }
+
+ jack->jack_dirty = 0;
}
/**
@@ -264,8 +269,8 @@ static void hda_free_jack_priv(struct snd_jack *jack)
* This assigns a jack-detection kctl to the given pin. The kcontrol
* will have the given name and index.
*/
-int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
- const char *name, int idx)
+static int __snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
+ const char *name, int idx, bool phantom_jack)
{
struct hda_jack_tbl *jack;
struct snd_kcontrol *kctl;
@@ -283,47 +288,81 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
if (err < 0)
return err;
jack->kctl = kctl;
+ jack->phantom_jack = !!phantom_jack;
+
state = snd_hda_jack_detect(codec, nid);
snd_kctl_jack_report(codec->bus->card, kctl, state);
#ifdef CONFIG_SND_HDA_INPUT_JACK
- jack->type = get_input_jack_type(codec, nid);
- err = snd_jack_new(codec->bus->card, name, jack->type, &jack->jack);
- if (err < 0)
- return err;
- jack->jack->private_data = jack;
- jack->jack->private_free = hda_free_jack_priv;
- snd_jack_report(jack->jack, state ? jack->type : 0);
+ if (!phantom_jack) {
+ jack->type = get_input_jack_type(codec, nid);
+ err = snd_jack_new(codec->bus->card, name, jack->type,
+ &jack->jack);
+ if (err < 0)
+ return err;
+ jack->jack->private_data = jack;
+ jack->jack->private_free = hda_free_jack_priv;
+ snd_jack_report(jack->jack, state ? jack->type : 0);
+ }
#endif
return 0;
}
+
+int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
+ const char *name, int idx)
+{
+ return __snd_hda_jack_add_kctl(codec, nid, name, idx, false);
+}
EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl);
+/* get the unique index number for the given kctl name */
+static int get_unique_index(struct hda_codec *codec, const char *name, int idx)
+{
+ struct hda_jack_tbl *jack;
+ int i, len = strlen(name);
+ again:
+ jack = codec->jacktbl.list;
+ for (i = 0; i < codec->jacktbl.used; i++, jack++) {
+ /* jack->kctl.id contains "XXX Jack" name string with index */
+ if (jack->kctl &&
+ !strncmp(name, jack->kctl->id.name, len) &&
+ !strcmp(" Jack", jack->kctl->id.name + len) &&
+ jack->kctl->id.index == idx) {
+ idx++;
+ goto again;
+ }
+ }
+ return idx;
+}
+
static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
- const struct auto_pin_cfg *cfg,
- char *lastname, int *lastidx)
+ const struct auto_pin_cfg *cfg)
{
unsigned int def_conf, conn;
char name[44];
int idx, err;
+ bool phantom_jack;
if (!nid)
return 0;
- if (!is_jack_detectable(codec, nid))
- return 0;
def_conf = snd_hda_codec_get_pincfg(codec, nid);
conn = get_defcfg_connect(def_conf);
- if (conn != AC_JACK_PORT_COMPLEX)
+ if (conn == AC_JACK_PORT_NONE)
return 0;
+ phantom_jack = (conn != AC_JACK_PORT_COMPLEX) ||
+ !is_jack_detectable(codec, nid);
snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx);
- if (!strcmp(name, lastname) && idx == *lastidx)
- idx++;
- strncpy(lastname, name, 44);
- *lastidx = idx;
- err = snd_hda_jack_add_kctl(codec, nid, name, idx);
+ if (phantom_jack)
+ /* Example final name: "Internal Mic Phantom Jack" */
+ strncat(name, " Phantom", sizeof(name) - strlen(name) - 1);
+ idx = get_unique_index(codec, name, idx);
+ err = __snd_hda_jack_add_kctl(codec, nid, name, idx, phantom_jack);
if (err < 0)
return err;
- return snd_hda_jack_detect_enable(codec, nid, 0);
+
+ if (!phantom_jack)
+ return snd_hda_jack_detect_enable(codec, nid, 0);
+ return 0;
}
/**
@@ -333,42 +372,41 @@ int snd_hda_jack_add_kctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
const hda_nid_t *p;
- int i, err, lastidx = 0;
- char lastname[44] = "";
+ int i, err;
for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) {
- err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, *p, cfg);
if (err < 0)
return err;
}
for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) {
if (*p == *cfg->line_out_pins) /* might be duplicated */
break;
- err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, *p, cfg);
if (err < 0)
return err;
}
for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) {
if (*p == *cfg->line_out_pins) /* might be duplicated */
break;
- err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, *p, cfg);
if (err < 0)
return err;
}
for (i = 0; i < cfg->num_inputs; i++) {
- err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg);
if (err < 0)
return err;
}
for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) {
- err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, *p, cfg);
if (err < 0)
return err;
}
- err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, cfg->dig_in_pin, cfg);
if (err < 0)
return err;
- err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, lastname, &lastidx);
+ err = add_jack_kctl(codec, cfg->mono_out_pin, cfg);
if (err < 0)
return err;
return 0;
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 8ae52465ec5d..a9803da633c0 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -23,6 +23,7 @@ struct hda_jack_tbl {
unsigned int pin_sense; /* cached pin-sense value */
unsigned int jack_detect:1; /* capable of jack-detection? */
unsigned int jack_dirty:1; /* needs to update? */
+ unsigned int phantom_jack:1; /* a fixed, always present port? */
struct snd_kcontrol *kctl; /* assigned kctl for jack-detection */
#ifdef CONFIG_SND_HDA_INPUT_JACK
int type;
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 9a096a8e0fc5..1b4c12941baa 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -89,7 +89,7 @@
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xcidx, \
.subdevice = HDA_SUBDEV_AMP_FLAG, \
.info = snd_hda_mixer_amp_switch_info, \
- .get = snd_hda_mixer_amp_switch_get, \
+ .get = snd_hda_mixer_amp_switch_get_beep, \
.put = snd_hda_mixer_amp_switch_put_beep, \
.private_value = HDA_COMPOSE_AMP_VAL(nid, channel, xindex, direction) }
#else
@@ -121,6 +121,8 @@ int snd_hda_mixer_amp_switch_get(struct snd_kcontrol *kcontrol,
int snd_hda_mixer_amp_switch_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
#ifdef CONFIG_SND_HDA_INPUT_BEEP
+int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
#endif
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index e59e2f059b6e..7e46258fc700 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -426,10 +426,10 @@ static void print_digital_conv(struct snd_info_buffer *buffer,
static const char *get_pwr_state(u32 state)
{
- static const char * const buf[4] = {
- "D0", "D1", "D2", "D3"
+ static const char * const buf[] = {
+ "D0", "D1", "D2", "D3", "D3cold"
};
- if (state < 4)
+ if (state < ARRAY_SIZE(buf))
return buf[state];
return "UNKNOWN";
}
@@ -451,14 +451,21 @@ static void print_power_state(struct snd_info_buffer *buffer,
int sup = snd_hda_param_read(codec, nid, AC_PAR_POWER_STATE);
int pwr = snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_POWER_STATE, 0);
- if (sup)
+ if (sup != -1)
snd_iprintf(buffer, " Power states: %s\n",
bits_names(sup, names, ARRAY_SIZE(names)));
- snd_iprintf(buffer, " Power: setting=%s, actual=%s\n",
+ snd_iprintf(buffer, " Power: setting=%s, actual=%s",
get_pwr_state(pwr & AC_PWRST_SETTING),
get_pwr_state((pwr & AC_PWRST_ACTUAL) >>
AC_PWRST_ACTUAL_SHIFT));
+ if (pwr & AC_PWRST_ERROR)
+ snd_iprintf(buffer, ", Error");
+ if (pwr & AC_PWRST_CLK_STOP_OK)
+ snd_iprintf(buffer, ", Clock-stop-OK");
+ if (pwr & AC_PWRST_SETTING_RESET)
+ snd_iprintf(buffer, ", Setting-reset");
+ snd_iprintf(buffer, "\n");
}
static void print_unsol_cap(struct snd_info_buffer *buffer,
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index d8b2d6dee986..0208fa121e5a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -642,7 +642,7 @@ static void ad198x_free(struct hda_codec *codec)
}
#ifdef CONFIG_PM
-static int ad198x_suspend(struct hda_codec *codec, pm_message_t state)
+static int ad198x_suspend(struct hda_codec *codec)
{
ad198x_shutup(codec);
return 0;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 9647ed4d7929..0c4c1a61b378 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1892,7 +1892,7 @@ static int cs421x_parse_auto_config(struct hda_codec *codec)
Manage PDREF, when transitioning to D3hot
(DAC,ADC) -> D3, PDREF=1, AFG->D3
*/
-static int cs421x_suspend(struct hda_codec *codec, pm_message_t state)
+static int cs421x_suspend(struct hda_codec *codec)
{
struct cs_spec *spec = codec->spec;
unsigned int coef;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 2bf99fc1cbf2..14361184ae1e 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -554,7 +554,7 @@ static int conexant_build_controls(struct hda_codec *codec)
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
-static int conexant_suspend(struct hda_codec *codec, pm_message_t state)
+static int conexant_suspend(struct hda_codec *codec)
{
snd_hda_shutup_pins(codec);
return 0;
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index ad319d4dc32f..641408dc28c0 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -85,7 +85,7 @@ struct hdmi_spec {
* Non-generic ATI/NVIDIA specific
*/
struct hda_multi_out multiout;
- const struct hda_pcm_stream *pcm_playback;
+ struct hda_pcm_stream pcm_playback;
};
@@ -787,7 +787,7 @@ static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
int cp_ready = !!(res & AC_UNSOL_RES_CP_READY);
printk(KERN_INFO
- "HDMI CP event: CODEC=%d PIN=%d SUBTAG=0x%x CP_STATE=%d CP_READY=%d\n",
+ "HDMI CP event: CODEC=%d TAG=%d SUBTAG=0x%x CP_STATE=%d CP_READY=%d\n",
codec->addr,
tag,
subtag,
@@ -876,7 +876,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
struct hdmi_spec_per_pin *per_pin;
struct hdmi_eld *eld;
struct hdmi_spec_per_cvt *per_cvt = NULL;
- int pinctl;
/* Validate hinfo */
pin_idx = hinfo_to_pin_index(spec, hinfo);
@@ -912,11 +911,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
snd_hda_codec_write(codec, per_pin->pin_nid, 0,
AC_VERB_SET_CONNECT_SEL,
mux_idx);
- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL,
- pinctl | PIN_OUT);
snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
/* Initially set the converter's capabilities */
@@ -1153,11 +1147,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
struct hdmi_spec *spec = codec->spec;
int pin_idx = hinfo_to_pin_index(spec, hinfo);
hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
+ int pinctl;
hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
+ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
+
return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
@@ -1277,23 +1277,34 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
return 0;
}
-static int generic_hdmi_init(struct hda_codec *codec)
+static int generic_hdmi_init_per_pins(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
int pin_idx;
for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
- hda_nid_t pin_nid = per_pin->pin_nid;
struct hdmi_eld *eld = &per_pin->sink_eld;
- hdmi_init_pin(codec, pin_nid);
- snd_hda_jack_detect_enable(codec, pin_nid, pin_nid);
-
per_pin->codec = codec;
INIT_DELAYED_WORK(&per_pin->work, hdmi_repoll_eld);
snd_hda_eld_proc_new(codec, eld, pin_idx);
}
+ return 0;
+}
+
+static int generic_hdmi_init(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx;
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = &spec->pins[pin_idx];
+ hda_nid_t pin_nid = per_pin->pin_nid;
+
+ hdmi_init_pin(codec, pin_nid);
+ snd_hda_jack_detect_enable(codec, pin_nid, pin_nid);
+ }
snd_hda_jack_report_sync(codec);
return 0;
}
@@ -1338,6 +1349,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
return -EINVAL;
}
codec->patch_ops = generic_hdmi_patch_ops;
+ generic_hdmi_init_per_pins(codec);
init_channel_allocations();
@@ -1352,45 +1364,65 @@ static int simple_playback_build_pcms(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
struct hda_pcm *info = spec->pcm_rec;
- int i;
+ unsigned int chans;
+ struct hda_pcm_stream *pstr;
- codec->num_pcms = spec->num_cvts;
+ codec->num_pcms = 1;
codec->pcm_info = info;
- for (i = 0; i < codec->num_pcms; i++, info++) {
- unsigned int chans;
- struct hda_pcm_stream *pstr;
-
- chans = get_wcaps(codec, spec->cvts[i].cvt_nid);
- chans = get_wcaps_channels(chans);
+ chans = get_wcaps(codec, spec->cvts[0].cvt_nid);
+ chans = get_wcaps_channels(chans);
- info->name = get_hdmi_pcm_name(i);
- info->pcm_type = HDA_PCM_TYPE_HDMI;
- pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
- snd_BUG_ON(!spec->pcm_playback);
- *pstr = *spec->pcm_playback;
- pstr->nid = spec->cvts[i].cvt_nid;
- if (pstr->channels_max <= 2 && chans && chans <= 16)
- pstr->channels_max = chans;
- }
+ info->name = get_hdmi_pcm_name(0);
+ info->pcm_type = HDA_PCM_TYPE_HDMI;
+ pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
+ *pstr = spec->pcm_playback;
+ pstr->nid = spec->cvts[0].cvt_nid;
+ if (pstr->channels_max <= 2 && chans && chans <= 16)
+ pstr->channels_max = chans;
return 0;
}
+/* unsolicited event for jack sensing */
+static void simple_hdmi_unsol_event(struct hda_codec *codec,
+ unsigned int res)
+{
+ snd_hda_jack_set_dirty_all(codec);
+ snd_hda_jack_report_sync(codec);
+}
+
+/* generic_hdmi_build_jack can be used for simple_hdmi, too,
+ * as long as spec->pins[] is set correctly
+ */
+#define simple_hdmi_build_jack generic_hdmi_build_jack
+
static int simple_playback_build_controls(struct hda_codec *codec)
{
struct hdmi_spec *spec = codec->spec;
int err;
- int i;
- for (i = 0; i < codec->num_pcms; i++) {
- err = snd_hda_create_spdif_out_ctls(codec,
- spec->cvts[i].cvt_nid,
- spec->cvts[i].cvt_nid);
- if (err < 0)
- return err;
- }
+ err = snd_hda_create_spdif_out_ctls(codec,
+ spec->cvts[0].cvt_nid,
+ spec->cvts[0].cvt_nid);
+ if (err < 0)
+ return err;
+ return simple_hdmi_build_jack(codec, 0);
+}
+static int simple_playback_init(struct hda_codec *codec)
+{
+ struct hdmi_spec *spec = codec->spec;
+ hda_nid_t pin = spec->pins[0].pin_nid;
+
+ snd_hda_codec_write(codec, pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+ /* some codecs require to unmute the pin */
+ if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP)
+ snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_UNMUTE);
+ snd_hda_jack_detect_enable(codec, pin, pin);
+ snd_hda_jack_report_sync(codec);
return 0;
}
@@ -1418,7 +1450,15 @@ static const hda_nid_t nvhdmi_con_nids_7x[4] = {
0x6, 0x8, 0xa, 0xc,
};
-static const struct hda_verb nvhdmi_basic_init_7x[] = {
+static const struct hda_verb nvhdmi_basic_init_7x_2ch[] = {
+ /* set audio protect on */
+ { 0x1, Nv_VERB_SET_Audio_Protection_On, 0x1},
+ /* enable digital output on pin widget */
+ { 0x5, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | 0x5 },
+ {} /* terminator */
+};
+
+static const struct hda_verb nvhdmi_basic_init_7x_8ch[] = {
/* set audio protect on */
{ 0x1, Nv_VERB_SET_Audio_Protection_On, 0x1},
/* enable digital output on pin widget */
@@ -1446,9 +1486,15 @@ static const struct hda_verb nvhdmi_basic_init_7x[] = {
(SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
#endif
-static int nvhdmi_7x_init(struct hda_codec *codec)
+static int nvhdmi_7x_init_2ch(struct hda_codec *codec)
{
- snd_hda_sequence_write(codec, nvhdmi_basic_init_7x);
+ snd_hda_sequence_write(codec, nvhdmi_basic_init_7x_2ch);
+ return 0;
+}
+
+static int nvhdmi_7x_init_8ch(struct hda_codec *codec)
+{
+ snd_hda_sequence_write(codec, nvhdmi_basic_init_7x_8ch);
return 0;
}
@@ -1524,6 +1570,50 @@ static int simple_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
stream_tag, format, substream);
}
+static const struct hda_pcm_stream simple_pcm_playback = {
+ .substreams = 1,
+ .channels_min = 2,
+ .channels_max = 2,
+ .ops = {
+ .open = simple_playback_pcm_open,
+ .close = simple_playback_pcm_close,
+ .prepare = simple_playback_pcm_prepare
+ },
+};
+
+static const struct hda_codec_ops simple_hdmi_patch_ops = {
+ .build_controls = simple_playback_build_controls,
+ .build_pcms = simple_playback_build_pcms,
+ .init = simple_playback_init,
+ .free = simple_playback_free,
+ .unsol_event = simple_hdmi_unsol_event,
+};
+
+static int patch_simple_hdmi(struct hda_codec *codec,
+ hda_nid_t cvt_nid, hda_nid_t pin_nid)
+{
+ struct hdmi_spec *spec;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ codec->spec = spec;
+
+ spec->multiout.num_dacs = 0; /* no analog */
+ spec->multiout.max_channels = 2;
+ spec->multiout.dig_out_nid = cvt_nid;
+ spec->num_cvts = 1;
+ spec->num_pins = 1;
+ spec->cvts[0].cvt_nid = cvt_nid;
+ spec->pins[0].pin_nid = pin_nid;
+ spec->pcm_playback = simple_pcm_playback;
+
+ codec->patch_ops = simple_hdmi_patch_ops;
+
+ return 0;
+}
+
static void nvhdmi_8ch_7x_set_info_frame_parameters(struct hda_codec *codec,
int channels)
{
@@ -1696,54 +1786,20 @@ static const struct hda_pcm_stream nvhdmi_pcm_playback_8ch_7x = {
},
};
-static const struct hda_pcm_stream nvhdmi_pcm_playback_2ch = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
- .nid = nvhdmi_master_con_nid_7x,
- .rates = SUPPORTED_RATES,
- .maxbps = SUPPORTED_MAXBPS,
- .formats = SUPPORTED_FORMATS,
- .ops = {
- .open = simple_playback_pcm_open,
- .close = simple_playback_pcm_close,
- .prepare = simple_playback_pcm_prepare
- },
-};
-
-static const struct hda_codec_ops nvhdmi_patch_ops_8ch_7x = {
- .build_controls = simple_playback_build_controls,
- .build_pcms = simple_playback_build_pcms,
- .init = nvhdmi_7x_init,
- .free = simple_playback_free,
-};
-
-static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
- .build_controls = simple_playback_build_controls,
- .build_pcms = simple_playback_build_pcms,
- .init = nvhdmi_7x_init,
- .free = simple_playback_free,
-};
-
static int patch_nvhdmi_2ch(struct hda_codec *codec)
{
struct hdmi_spec *spec;
+ int err = patch_simple_hdmi(codec, nvhdmi_master_con_nid_7x,
+ nvhdmi_master_pin_nid_7x);
+ if (err < 0)
+ return err;
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
-
- spec->multiout.num_dacs = 0; /* no analog */
- spec->multiout.max_channels = 2;
- spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
- spec->num_cvts = 1;
- spec->cvts[0].cvt_nid = nvhdmi_master_con_nid_7x;
- spec->pcm_playback = &nvhdmi_pcm_playback_2ch;
-
- codec->patch_ops = nvhdmi_patch_ops_2ch;
-
+ codec->patch_ops.init = nvhdmi_7x_init_2ch;
+ /* override the PCM rates, etc, as the codec doesn't give full list */
+ spec = codec->spec;
+ spec->pcm_playback.rates = SUPPORTED_RATES;
+ spec->pcm_playback.maxbps = SUPPORTED_MAXBPS;
+ spec->pcm_playback.formats = SUPPORTED_FORMATS;
return 0;
}
@@ -1751,13 +1807,12 @@ static int patch_nvhdmi_8ch_7x(struct hda_codec *codec)
{
struct hdmi_spec *spec;
int err = patch_nvhdmi_2ch(codec);
-
if (err < 0)
return err;
spec = codec->spec;
spec->multiout.max_channels = 8;
- spec->pcm_playback = &nvhdmi_pcm_playback_8ch_7x;
- codec->patch_ops = nvhdmi_patch_ops_8ch_7x;
+ spec->pcm_playback = nvhdmi_pcm_playback_8ch_7x;
+ codec->patch_ops.init = nvhdmi_7x_init_8ch;
/* Initialize the audio infoframe channel mask and checksum to something
* valid */
@@ -1801,69 +1856,26 @@ static int atihdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
return 0;
}
-static const struct hda_pcm_stream atihdmi_pcm_digital_playback = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
- .nid = ATIHDMI_CVT_NID,
- .ops = {
- .open = simple_playback_pcm_open,
- .close = simple_playback_pcm_close,
- .prepare = atihdmi_playback_pcm_prepare
- },
-};
-
-static const struct hda_verb atihdmi_basic_init[] = {
- /* enable digital output on pin widget */
- { 0x03, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- {} /* terminator */
-};
-
-static int atihdmi_init(struct hda_codec *codec)
+static int patch_atihdmi(struct hda_codec *codec)
{
- struct hdmi_spec *spec = codec->spec;
-
- snd_hda_sequence_write(codec, atihdmi_basic_init);
- /* SI codec requires to unmute the pin */
- if (get_wcaps(codec, spec->pins[0].pin_nid) & AC_WCAP_OUT_AMP)
- snd_hda_codec_write(codec, spec->pins[0].pin_nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_OUT_UNMUTE);
+ struct hdmi_spec *spec;
+ int err = patch_simple_hdmi(codec, ATIHDMI_CVT_NID, ATIHDMI_PIN_NID);
+ if (err < 0)
+ return err;
+ spec = codec->spec;
+ spec->pcm_playback.ops.prepare = atihdmi_playback_pcm_prepare;
return 0;
}
-static const struct hda_codec_ops atihdmi_patch_ops = {
- .build_controls = simple_playback_build_controls,
- .build_pcms = simple_playback_build_pcms,
- .init = atihdmi_init,
- .free = simple_playback_free,
-};
+/* VIA HDMI Implementation */
+#define VIAHDMI_CVT_NID 0x02 /* audio converter1 */
+#define VIAHDMI_PIN_NID 0x03 /* HDMI output pin1 */
-
-static int patch_atihdmi(struct hda_codec *codec)
+static int patch_via_hdmi(struct hda_codec *codec)
{
- struct hdmi_spec *spec;
-
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
- if (spec == NULL)
- return -ENOMEM;
-
- codec->spec = spec;
-
- spec->multiout.num_dacs = 0; /* no analog */
- spec->multiout.max_channels = 2;
- spec->multiout.dig_out_nid = ATIHDMI_CVT_NID;
- spec->num_cvts = 1;
- spec->cvts[0].cvt_nid = ATIHDMI_CVT_NID;
- spec->pins[0].pin_nid = ATIHDMI_PIN_NID;
- spec->pcm_playback = &atihdmi_pcm_digital_playback;
-
- codec->patch_ops = atihdmi_patch_ops;
-
- return 0;
+ return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID);
}
-
/*
* patch entries
*/
@@ -1902,8 +1914,13 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
+{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
+{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
+{ .id = 0x11069f84, .name = "VX11 HDMI/DP", .patch = patch_generic_hdmi },
+{ .id = 0x11069f85, .name = "VX11 HDMI/DP", .patch = patch_generic_hdmi },
{ .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862801, .name = "Bearlake HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862802, .name = "Cantiga HDMI", .patch = patch_generic_hdmi },
@@ -1911,6 +1928,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x80862804, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862805, .name = "CougarPoint HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
+{ .id = 0x80862807, .name = "Haswell HDMI", .patch = patch_generic_hdmi },
{ .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
{ .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi },
{} /* terminator */
@@ -1948,8 +1966,13 @@ MODULE_ALIAS("snd-hda-codec-id:10de0041");
MODULE_ALIAS("snd-hda-codec-id:10de0042");
MODULE_ALIAS("snd-hda-codec-id:10de0043");
MODULE_ALIAS("snd-hda-codec-id:10de0044");
+MODULE_ALIAS("snd-hda-codec-id:10de0051");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
+MODULE_ALIAS("snd-hda-codec-id:11069f80");
+MODULE_ALIAS("snd-hda-codec-id:11069f81");
+MODULE_ALIAS("snd-hda-codec-id:11069f84");
+MODULE_ALIAS("snd-hda-codec-id:11069f85");
MODULE_ALIAS("snd-hda-codec-id:17e80047");
MODULE_ALIAS("snd-hda-codec-id:80860054");
MODULE_ALIAS("snd-hda-codec-id:80862801");
@@ -1958,6 +1981,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862803");
MODULE_ALIAS("snd-hda-codec-id:80862804");
MODULE_ALIAS("snd-hda-codec-id:80862805");
MODULE_ALIAS("snd-hda-codec-id:80862806");
+MODULE_ALIAS("snd-hda-codec-id:80862807");
MODULE_ALIAS("snd-hda-codec-id:80862880");
MODULE_ALIAS("snd-hda-codec-id:808629fb");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5ccf10a4d593..f141395dfee6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -170,10 +170,10 @@ struct alc_spec {
hda_nid_t imux_pins[HDA_MAX_NUM_INPUTS];
unsigned int dyn_adc_idx[HDA_MAX_NUM_INPUTS];
int int_mic_idx, ext_mic_idx, dock_mic_idx; /* for auto-mic */
+ hda_nid_t inv_dmic_pin;
/* hooks */
void (*init_hook)(struct hda_codec *codec);
- void (*unsol_event)(struct hda_codec *codec, unsigned int res);
#ifdef CONFIG_SND_HDA_POWER_SAVE
void (*power_hook)(struct hda_codec *codec);
#endif
@@ -201,6 +201,8 @@ struct alc_spec {
unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */
unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */
unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
+ unsigned int inv_dmic_fixup:1; /* has inverted digital-mic workaround */
+ unsigned int inv_dmic_muted:1; /* R-ch of inv d-mic is muted? */
/* auto-mute control */
int automute_mode;
@@ -298,6 +300,39 @@ static inline hda_nid_t get_capsrc(struct alc_spec *spec, int idx)
}
static void call_update_outputs(struct hda_codec *codec);
+static void alc_inv_dmic_sync(struct hda_codec *codec, bool force);
+
+/* for shared I/O, change the pin-control accordingly */
+static void update_shared_mic_hp(struct hda_codec *codec, bool set_as_mic)
+{
+ struct alc_spec *spec = codec->spec;
+ unsigned int val;
+ hda_nid_t pin = spec->autocfg.inputs[1].pin;
+ /* NOTE: this assumes that there are only two inputs, the
+ * first is the real internal mic and the second is HP/mic jack.
+ */
+
+ val = snd_hda_get_default_vref(codec, pin);
+
+ /* This pin does not have vref caps - let's enable vref on pin 0x18
+ instead, as suggested by Realtek */
+ if (val == AC_PINCTL_VREF_HIZ) {
+ const hda_nid_t vref_pin = 0x18;
+ /* Sanity check pin 0x18 */
+ if (get_wcaps_type(get_wcaps(codec, vref_pin)) == AC_WID_PIN &&
+ get_defcfg_connect(snd_hda_codec_get_pincfg(codec, vref_pin)) == AC_JACK_PORT_NONE) {
+ unsigned int vref_val = snd_hda_get_default_vref(codec, vref_pin);
+ if (vref_val != AC_PINCTL_VREF_HIZ)
+ snd_hda_set_pin_ctl(codec, vref_pin, PIN_IN | (set_as_mic ? vref_val : 0));
+ }
+ }
+
+ val = set_as_mic ? val | PIN_IN : PIN_HP;
+ snd_hda_set_pin_ctl(codec, pin, val);
+
+ spec->automute_speaker = !set_as_mic;
+ call_update_outputs(codec);
+}
/* select the given imux item; either unmute exclusively or select the route */
static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
@@ -325,21 +360,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 0;
spec->cur_mux[adc_idx] = idx;
- /* for shared I/O, change the pin-control accordingly */
- if (spec->shared_mic_hp) {
- unsigned int val;
- hda_nid_t pin = spec->autocfg.inputs[1].pin;
- /* NOTE: this assumes that there are only two inputs, the
- * first is the real internal mic and the second is HP jack.
- */
- if (spec->cur_mux[adc_idx])
- val = snd_hda_get_default_vref(codec, pin) | PIN_IN;
- else
- val = PIN_HP;
- snd_hda_set_pin_ctl(codec, pin, val);
- spec->automute_speaker = !spec->cur_mux[adc_idx];
- call_update_outputs(codec);
- }
+ if (spec->shared_mic_hp)
+ update_shared_mic_hp(codec, spec->cur_mux[adc_idx]);
if (spec->dyn_adc_switch) {
alc_dyn_adc_pcm_resetup(codec, idx);
@@ -368,6 +390,7 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
AC_VERB_SET_CONNECT_SEL,
imux->items[idx].index);
}
+ alc_inv_dmic_sync(codec, true);
return 1;
}
@@ -664,7 +687,7 @@ static void alc_update_knob_master(struct hda_codec *codec, hda_nid_t nid)
}
/* unsolicited event for HP jack sensing */
-static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
+static void alc_unsol_event(struct hda_codec *codec, unsigned int res)
{
int action;
@@ -1000,11 +1023,9 @@ static void alc_init_automute(struct hda_codec *codec)
spec->automute_lo = spec->automute_lo_possible;
spec->automute_speaker = spec->automute_speaker_possible;
- if (spec->automute_speaker_possible || spec->automute_lo_possible) {
+ if (spec->automute_speaker_possible || spec->automute_lo_possible)
/* create a control for automute mode */
alc_add_automute_mode_enum(codec);
- spec->unsol_event = alc_sku_unsol_event;
- }
}
/* return the position of NID in the list, or -1 if not found */
@@ -1167,7 +1188,6 @@ static void alc_init_auto_mic(struct hda_codec *codec)
snd_printdd("realtek: Enable auto-mic switch on NID 0x%x/0x%x/0x%x\n",
ext, fixed, dock);
- spec->unsol_event = alc_sku_unsol_event;
}
/* check the availabilities of auto-mute and auto-mic switches */
@@ -1556,14 +1576,14 @@ typedef int (*getput_call_t)(struct snd_kcontrol *kcontrol,
static int alc_cap_getput_caller(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol,
- getput_call_t func, bool check_adc_switch)
+ getput_call_t func, bool is_put)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct alc_spec *spec = codec->spec;
int i, err = 0;
mutex_lock(&codec->control_mutex);
- if (check_adc_switch && spec->dyn_adc_switch) {
+ if (is_put && spec->dyn_adc_switch) {
for (i = 0; i < spec->num_adc_nids; i++) {
kcontrol->private_value =
HDA_COMPOSE_AMP_VAL(spec->adc_nids[i],
@@ -1584,6 +1604,8 @@ static int alc_cap_getput_caller(struct snd_kcontrol *kcontrol,
3, 0, HDA_INPUT);
err = func(kcontrol, ucontrol);
}
+ if (err >= 0 && is_put)
+ alc_inv_dmic_sync(codec, false);
error:
mutex_unlock(&codec->control_mutex);
return err;
@@ -1676,6 +1698,116 @@ DEFINE_CAPMIX_NOSRC(2);
DEFINE_CAPMIX_NOSRC(3);
/*
+ * Inverted digital-mic handling
+ *
+ * First off, it's a bit tricky. The "Inverted Internal Mic Capture Switch"
+ * gives the additional mute only to the right channel of the digital mic
+ * capture stream. This is a workaround for avoiding the almost silence
+ * by summing the stereo stream from some (known to be ForteMedia)
+ * digital mic unit.
+ *
+ * The logic is to call alc_inv_dmic_sync() after each action (possibly)
+ * modifying ADC amp. When the mute flag is set, it mutes the R-channel
+ * without caching so that the cache can still keep the original value.
+ * The cached value is then restored when the flag is set off or any other
+ * than d-mic is used as the current input source.
+ */
+static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
+{
+ struct alc_spec *spec = codec->spec;
+ int i;
+
+ if (!spec->inv_dmic_fixup)
+ return;
+ if (!spec->inv_dmic_muted && !force)
+ return;
+ for (i = 0; i < spec->num_adc_nids; i++) {
+ int src = spec->dyn_adc_switch ? 0 : i;
+ bool dmic_fixup = false;
+ hda_nid_t nid;
+ int parm, dir, v;
+
+ if (spec->inv_dmic_muted &&
+ spec->imux_pins[spec->cur_mux[src]] == spec->inv_dmic_pin)
+ dmic_fixup = true;
+ if (!dmic_fixup && !force)
+ continue;
+ if (spec->vol_in_capsrc) {
+ nid = spec->capsrc_nids[i];
+ parm = AC_AMP_SET_RIGHT | AC_AMP_SET_OUTPUT;
+ dir = HDA_OUTPUT;
+ } else {
+ nid = spec->adc_nids[i];
+ parm = AC_AMP_SET_RIGHT | AC_AMP_SET_INPUT;
+ dir = HDA_INPUT;
+ }
+ /* we care only right channel */
+ v = snd_hda_codec_amp_read(codec, nid, 1, dir, 0);
+ if (v & 0x80) /* if already muted, we don't need to touch */
+ continue;
+ if (dmic_fixup) /* add mute for d-mic */
+ v |= 0x80;
+ snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_AMP_GAIN_MUTE,
+ parm | v);
+ }
+}
+
+static int alc_inv_dmic_sw_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct alc_spec *spec = codec->spec;
+
+ ucontrol->value.integer.value[0] = !spec->inv_dmic_muted;
+ return 0;
+}
+
+static int alc_inv_dmic_sw_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct alc_spec *spec = codec->spec;
+ unsigned int val = !ucontrol->value.integer.value[0];
+
+ if (val == spec->inv_dmic_muted)
+ return 0;
+ spec->inv_dmic_muted = val;
+ alc_inv_dmic_sync(codec, true);
+ return 0;
+}
+
+static const struct snd_kcontrol_new alc_inv_dmic_sw = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .info = snd_ctl_boolean_mono_info,
+ .get = alc_inv_dmic_sw_get,
+ .put = alc_inv_dmic_sw_put,
+};
+
+static int alc_add_inv_dmic_mixer(struct hda_codec *codec, hda_nid_t nid)
+{
+ struct alc_spec *spec = codec->spec;
+ struct snd_kcontrol_new *knew = alc_kcontrol_new(spec);
+ if (!knew)
+ return -ENOMEM;
+ *knew = alc_inv_dmic_sw;
+ knew->name = kstrdup("Inverted Internal Mic Capture Switch", GFP_KERNEL);
+ if (!knew->name)
+ return -ENOMEM;
+ spec->inv_dmic_fixup = 1;
+ spec->inv_dmic_muted = 0;
+ spec->inv_dmic_pin = nid;
+ return 0;
+}
+
+/* typically the digital mic is put at node 0x12 */
+static void alc_fixup_inv_dmic_0x12(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+{
+ if (action == ALC_FIXUP_ACT_PROBE)
+ alc_add_inv_dmic_mixer(codec, 0x12);
+}
+
+/*
* virtual master controls
*/
@@ -1865,13 +1997,31 @@ static int __alc_build_controls(struct hda_codec *codec)
return 0;
}
-static int alc_build_controls(struct hda_codec *codec)
+static int alc_build_jacks(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
+
+ if (spec->shared_mic_hp) {
+ int err;
+ int nid = spec->autocfg.inputs[1].pin;
+ err = snd_hda_jack_add_kctl(codec, nid, "Headphone Mic", 0);
+ if (err < 0)
+ return err;
+ err = snd_hda_jack_detect_enable(codec, nid, 0);
+ if (err < 0)
+ return err;
+ }
+
+ return snd_hda_jack_add_kctls(codec, &spec->autocfg);
+}
+
+static int alc_build_controls(struct hda_codec *codec)
+{
int err = __alc_build_controls(codec);
if (err < 0)
return err;
- err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
+
+ err = alc_build_jacks(codec);
if (err < 0)
return err;
alc_apply_fixup(codec, ALC_FIXUP_ACT_BUILD);
@@ -1908,14 +2058,6 @@ static int alc_init(struct hda_codec *codec)
return 0;
}
-static void alc_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- struct alc_spec *spec = codec->spec;
-
- if (spec->unsol_event)
- spec->unsol_event(codec, res);
-}
-
#ifdef CONFIG_SND_HDA_POWER_SAVE
static int alc_check_power_status(struct hda_codec *codec, hda_nid_t nid)
{
@@ -2300,7 +2442,7 @@ static void alc_power_eapd(struct hda_codec *codec)
alc_auto_setup_eapd(codec, false);
}
-static int alc_suspend(struct hda_codec *codec, pm_message_t state)
+static int alc_suspend(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
alc_shutup(codec);
@@ -2317,6 +2459,7 @@ static int alc_resume(struct hda_codec *codec)
codec->patch_ops.init(codec);
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
+ alc_inv_dmic_sync(codec, true);
hda_call_check_power_status(codec, 0x01);
return 0;
}
@@ -4116,14 +4259,12 @@ static void set_capture_mixer(struct hda_codec *codec)
*/
static void alc_auto_init_std(struct hda_codec *codec)
{
- struct alc_spec *spec = codec->spec;
alc_auto_init_multi_out(codec);
alc_auto_init_extra_out(codec);
alc_auto_init_analog_input(codec);
alc_auto_init_input_src(codec);
alc_auto_init_digital(codec);
- if (spec->unsol_event)
- alc_inithook(codec);
+ alc_inithook(codec);
}
/*
@@ -4724,7 +4865,6 @@ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec,
spec->automute_speaker = 1;
spec->autocfg.hp_pins[0] = 0x0f; /* copy it for automute */
snd_hda_jack_detect_enable(codec, 0x0f, ALC_HP_EVENT);
- spec->unsol_event = alc_sku_unsol_event;
snd_hda_gen_add_verbs(&spec->gen, alc_gpio1_init_verbs);
}
}
@@ -4909,6 +5049,7 @@ enum {
ALC889_FIXUP_DAC_ROUTE,
ALC889_FIXUP_MBP_VREF,
ALC889_FIXUP_IMAC91_VREF,
+ ALC882_FIXUP_INV_DMIC,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -5212,6 +5353,10 @@ static const struct alc_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC882_FIXUP_GPIO1,
},
+ [ALC882_FIXUP_INV_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -5286,6 +5431,7 @@ static const struct alc_model_fixup alc882_fixup_models[] = {
{.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
{.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
{.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
+ {.id = ALC882_FIXUP_INV_DMIC, .name = "inv-dmic"},
{}
};
@@ -5373,6 +5519,7 @@ enum {
ALC262_FIXUP_LENOVO_3000,
ALC262_FIXUP_BENQ,
ALC262_FIXUP_BENQ_T31,
+ ALC262_FIXUP_INV_DMIC,
};
static const struct alc_fixup alc262_fixups[] = {
@@ -5424,6 +5571,10 @@ static const struct alc_fixup alc262_fixups[] = {
{}
}
},
+ [ALC262_FIXUP_INV_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
};
static const struct snd_pci_quirk alc262_fixup_tbl[] = {
@@ -5438,6 +5589,10 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
{}
};
+static const struct alc_model_fixup alc262_fixup_models[] = {
+ {.id = ALC262_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {}
+};
/*
*/
@@ -5466,7 +5621,8 @@ static int patch_alc262(struct hda_codec *codec)
#endif
alc_fix_pll_init(codec, 0x20, 0x0a, 10);
- alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups);
+ alc_pick_fixup(codec, alc262_fixup_models, alc262_fixup_tbl,
+ alc262_fixups);
alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
alc_auto_parse_customize_define(codec);
@@ -5522,6 +5678,22 @@ static const struct hda_verb alc268_beep_init_verbs[] = {
{ }
};
+enum {
+ ALC268_FIXUP_INV_DMIC,
+};
+
+static const struct alc_fixup alc268_fixups[] = {
+ [ALC268_FIXUP_INV_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
+};
+
+static const struct alc_model_fixup alc268_fixup_models[] = {
+ {.id = ALC268_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {}
+};
+
/*
* BIOS auto configuration
*/
@@ -5553,6 +5725,9 @@ static int patch_alc268(struct hda_codec *codec)
spec = codec->spec;
+ alc_pick_fixup(codec, alc268_fixup_models, NULL, alc268_fixups);
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+
/* automatic parse from the BIOS config */
err = alc268_parse_auto_config(codec);
if (err < 0)
@@ -5582,6 +5757,8 @@ static int patch_alc268(struct hda_codec *codec)
codec->patch_ops = alc_patch_ops;
spec->shutup = alc_eapd_shutup;
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PROBE);
+
return 0;
error:
@@ -5704,6 +5881,15 @@ static int alc269_resume(struct hda_codec *codec)
}
#endif /* CONFIG_PM */
+static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (action == ALC_FIXUP_ACT_PRE_PROBE)
+ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+}
+
static void alc269_fixup_hweq(struct hda_codec *codec,
const struct alc_fixup *fix, int action)
{
@@ -5810,6 +5996,7 @@ static void alc269_fixup_mic2_mute(struct hda_codec *codec,
}
}
+
enum {
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -5828,6 +6015,9 @@ enum {
ALC269VB_FIXUP_AMIC,
ALC269VB_FIXUP_DMIC,
ALC269_FIXUP_MIC2_MUTE_LED,
+ ALC269_FIXUP_INV_DMIC,
+ ALC269_FIXUP_LENOVO_DOCK,
+ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
};
static const struct alc_fixup alc269_fixups[] = {
@@ -5952,12 +6142,33 @@ static const struct alc_fixup alc269_fixups[] = {
.type = ALC_FIXUP_FUNC,
.v.func = alc269_fixup_mic2_mute,
},
+ [ALC269_FIXUP_INV_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
+ [ALC269_FIXUP_LENOVO_DOCK] = {
+ .type = ALC_FIXUP_PINS,
+ .v.pins = (const struct alc_pincfg[]) {
+ { 0x19, 0x23a11040 }, /* dock mic */
+ { 0x1b, 0x2121103f }, /* dock headphone */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
+ },
+ [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
@@ -5975,6 +6186,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -6033,6 +6245,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
static const struct alc_model_fixup alc269_fixup_models[] = {
{.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
{.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
+ {.id = ALC269_FIXUP_STEREO_DMIC, .name = "alc269-dmic"},
+ {.id = ALC271_FIXUP_DMIC, .name = "alc271-dmic"},
+ {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
+ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
{}
};
@@ -6329,12 +6545,6 @@ static const struct snd_pci_quirk alc861vd_fixup_tbl[] = {
{}
};
-static const struct hda_verb alc660vd_eapd_verbs[] = {
- {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2},
- {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
- { }
-};
-
/*
*/
static int patch_alc861vd(struct hda_codec *codec)
@@ -6356,11 +6566,6 @@ static int patch_alc861vd(struct hda_codec *codec)
if (err < 0)
goto error;
- if (codec->vendor_id == 0x10ec0660) {
- /* always turn on EAPD */
- snd_hda_gen_add_verbs(&spec->gen, alc660vd_eapd_verbs);
- }
-
if (!spec->no_analog) {
err = snd_hda_attach_beep_device(codec, 0x23);
if (err < 0)
@@ -6443,6 +6648,7 @@ enum {
ALC662_FIXUP_ASUS_MODE8,
ALC662_FIXUP_NO_JACK_DETECT,
ALC662_FIXUP_ZOTAC_Z68,
+ ALC662_FIXUP_INV_DMIC,
};
static const struct alc_fixup alc662_fixups[] = {
@@ -6599,12 +6805,17 @@ static const struct alc_fixup alc662_fixups[] = {
{ }
}
},
+ [ALC662_FIXUP_INV_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc_fixup_inv_dmic_0x12,
+ },
};
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
@@ -6685,9 +6896,35 @@ static const struct alc_model_fixup alc662_fixup_models[] = {
{.id = ALC662_FIXUP_ASUS_MODE6, .name = "asus-mode6"},
{.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
{.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
+ {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
{}
};
+static void alc662_fill_coef(struct hda_codec *codec)
+{
+ int val, coef;
+
+ coef = alc_get_coef0(codec);
+
+ switch (codec->vendor_id) {
+ case 0x10ec0662:
+ if ((coef & 0x00f0) == 0x0030) {
+ val = alc_read_coef_idx(codec, 0x4); /* EAPD Ctrl */
+ alc_write_coef_idx(codec, 0x4, val & ~(1<<10));
+ }
+ break;
+ case 0x10ec0272:
+ case 0x10ec0273:
+ case 0x10ec0663:
+ case 0x10ec0665:
+ case 0x10ec0670:
+ case 0x10ec0671:
+ case 0x10ec0672:
+ val = alc_read_coef_idx(codec, 0xd); /* EAPD Ctrl */
+ alc_write_coef_idx(codec, 0xd, val | (1<<14));
+ break;
+ }
+}
/*
*/
@@ -6707,6 +6944,9 @@ static int patch_alc662(struct hda_codec *codec)
alc_fix_pll_init(codec, 0x20, 0x04, 15);
+ spec->init_hook = alc662_fill_coef;
+ alc662_fill_coef(codec);
+
alc_pick_fixup(codec, alc662_fixup_models,
alc662_fixup_tbl, alc662_fixups);
alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
@@ -6803,6 +7043,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
{ .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
{ .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 07675282015a..a1596a3b171c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4997,7 +4997,7 @@ static int stac92xx_resume(struct hda_codec *codec)
return 0;
}
-static int stac92xx_suspend(struct hda_codec *codec, pm_message_t state)
+static int stac92xx_suspend(struct hda_codec *codec)
{
stac92xx_shutup(codec);
return 0;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 82b368068e08..90645560ed39 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -1748,7 +1748,7 @@ static void via_unsol_event(struct hda_codec *codec,
}
#ifdef CONFIG_PM
-static int via_suspend(struct hda_codec *codec, pm_message_t state)
+static int via_suspend(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
vt1708_stop_hp_work(spec);
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index a01a00d1cf4d..bed9f34f4efe 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -2793,9 +2793,10 @@ static void __devexit snd_vt1724_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
-static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_vt1724_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
if (!ice->pm_suspend_enabled)
@@ -2820,13 +2821,14 @@ static int snd_vt1724_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_vt1724_resume(struct pci_dev *pci)
+static int snd_vt1724_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ice1712 *ice = card->private_data;
if (!ice->pm_suspend_enabled)
@@ -2871,17 +2873,21 @@ static int snd_vt1724_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-#endif
+
+static SIMPLE_DEV_PM_OPS(snd_vt1724_pm, snd_vt1724_suspend, snd_vt1724_resume);
+#define SND_VT1724_PM_OPS &snd_vt1724_pm
+#else
+#define SND_VT1724_PM_OPS NULL
+#endif /* CONFIG_PM */
static struct pci_driver vt1724_driver = {
.name = KBUILD_MODNAME,
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
.remove = __devexit_p(snd_vt1724_remove),
-#ifdef CONFIG_PM
- .suspend = snd_vt1724_suspend,
- .resume = snd_vt1724_resume,
-#endif
+ .driver = {
+ .pm = SND_VT1724_PM_OPS,
+ },
};
module_pci_driver(vt1724_driver);
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index f4e2dd4da8cf..cd553f592e2d 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2624,9 +2624,10 @@ static int snd_intel8x0_free(struct intel8x0 *chip)
/*
* power management
*/
-static int intel8x0_suspend(struct pci_dev *pci, pm_message_t state)
+static int intel8x0_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0 *chip = card->private_data;
int i;
@@ -2658,13 +2659,14 @@ static int intel8x0_suspend(struct pci_dev *pci, pm_message_t state)
/* The call below may disable built-in speaker on some laptops
* after S2RAM. So, don't touch it.
*/
- /* pci_set_power_state(pci, pci_choose_state(pci, state)); */
+ /* pci_set_power_state(pci, PCI_D3hot); */
return 0;
}
-static int intel8x0_resume(struct pci_dev *pci)
+static int intel8x0_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0 *chip = card->private_data;
int i;
@@ -2734,6 +2736,11 @@ static int intel8x0_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(intel8x0_pm, intel8x0_suspend, intel8x0_resume);
+#define INTEL8X0_PM_OPS &intel8x0_pm
+#else
+#define INTEL8X0_PM_OPS NULL
#endif /* CONFIG_PM */
#define INTEL8X0_TESTBUF_SIZE 32768 /* enough large for one shot */
@@ -3343,10 +3350,9 @@ static struct pci_driver intel8x0_driver = {
.id_table = snd_intel8x0_ids,
.probe = snd_intel8x0_probe,
.remove = __devexit_p(snd_intel8x0_remove),
-#ifdef CONFIG_PM
- .suspend = intel8x0_suspend,
- .resume = intel8x0_resume,
-#endif
+ .driver = {
+ .pm = INTEL8X0_PM_OPS,
+ },
};
module_pci_driver(intel8x0_driver);
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index fc27a6a69e77..da44bb3f8e7a 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1012,9 +1012,10 @@ static int snd_intel8x0m_free(struct intel8x0m *chip)
/*
* power management
*/
-static int intel8x0m_suspend(struct pci_dev *pci, pm_message_t state)
+static int intel8x0m_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0m *chip = card->private_data;
int i;
@@ -1028,13 +1029,14 @@ static int intel8x0m_suspend(struct pci_dev *pci, pm_message_t state)
}
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int intel8x0m_resume(struct pci_dev *pci)
+static int intel8x0m_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct intel8x0m *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -1060,6 +1062,11 @@ static int intel8x0m_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(intel8x0m_pm, intel8x0m_suspend, intel8x0m_resume);
+#define INTEL8X0M_PM_OPS &intel8x0m_pm
+#else
+#define INTEL8X0M_PM_OPS NULL
#endif /* CONFIG_PM */
#ifdef CONFIG_PROC_FS
@@ -1329,10 +1336,9 @@ static struct pci_driver intel8x0m_driver = {
.id_table = snd_intel8x0m_ids,
.probe = snd_intel8x0m_probe,
.remove = __devexit_p(snd_intel8x0m_remove),
-#ifdef CONFIG_PM
- .suspend = intel8x0m_suspend,
- .resume = intel8x0m_resume,
-#endif
+ .driver = {
+ .pm = INTEL8X0M_PM_OPS,
+ },
};
module_pci_driver(intel8x0m_driver);
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index deef21399586..c85d1ffcc955 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -361,74 +361,6 @@ MODULE_PARM_DESC(amp_gpio, "GPIO pin number for external amp. (default = -1)");
#define DSP2HOST_REQ_I2SRATE 0x02
#define DSP2HOST_REQ_TIMER 0x04
-/* AC97 registers */
-/* XXX fix this crap up */
-/*#define AC97_RESET 0x00*/
-
-#define AC97_VOL_MUTE_B 0x8000
-#define AC97_VOL_M 0x1F
-#define AC97_LEFT_VOL_S 8
-
-#define AC97_MASTER_VOL 0x02
-#define AC97_LINE_LEVEL_VOL 0x04
-#define AC97_MASTER_MONO_VOL 0x06
-#define AC97_PC_BEEP_VOL 0x0A
-#define AC97_PC_BEEP_VOL_M 0x0F
-#define AC97_SROUND_MASTER_VOL 0x38
-#define AC97_PC_BEEP_VOL_S 1
-
-/*#define AC97_PHONE_VOL 0x0C
-#define AC97_MIC_VOL 0x0E*/
-#define AC97_MIC_20DB_ENABLE 0x40
-
-/*#define AC97_LINEIN_VOL 0x10
-#define AC97_CD_VOL 0x12
-#define AC97_VIDEO_VOL 0x14
-#define AC97_AUX_VOL 0x16*/
-#define AC97_PCM_OUT_VOL 0x18
-/*#define AC97_RECORD_SELECT 0x1A*/
-#define AC97_RECORD_MIC 0x00
-#define AC97_RECORD_CD 0x01
-#define AC97_RECORD_VIDEO 0x02
-#define AC97_RECORD_AUX 0x03
-#define AC97_RECORD_MONO_MUX 0x02
-#define AC97_RECORD_DIGITAL 0x03
-#define AC97_RECORD_LINE 0x04
-#define AC97_RECORD_STEREO 0x05
-#define AC97_RECORD_MONO 0x06
-#define AC97_RECORD_PHONE 0x07
-
-/*#define AC97_RECORD_GAIN 0x1C*/
-#define AC97_RECORD_VOL_M 0x0F
-
-/*#define AC97_GENERAL_PURPOSE 0x20*/
-#define AC97_POWER_DOWN_CTRL 0x26
-#define AC97_ADC_READY 0x0001
-#define AC97_DAC_READY 0x0002
-#define AC97_ANALOG_READY 0x0004
-#define AC97_VREF_ON 0x0008
-#define AC97_PR0 0x0100
-#define AC97_PR1 0x0200
-#define AC97_PR2 0x0400
-#define AC97_PR3 0x0800
-#define AC97_PR4 0x1000
-
-#define AC97_RESERVED1 0x28
-
-#define AC97_VENDOR_TEST 0x5A
-
-#define AC97_CLOCK_DELAY 0x5C
-#define AC97_LINEOUT_MUX_SEL 0x0001
-#define AC97_MONO_MUX_SEL 0x0002
-#define AC97_CLOCK_DELAY_SEL 0x1F
-#define AC97_DAC_CDS_SHIFT 6
-#define AC97_ADC_CDS_SHIFT 11
-
-#define AC97_MULTI_CHANNEL_SEL 0x74
-
-/*#define AC97_VENDOR_ID1 0x7C
-#define AC97_VENDOR_ID2 0x7E*/
-
/*
* ASSP control regs
*/
@@ -2459,9 +2391,10 @@ static int snd_m3_free(struct snd_m3 *chip)
* APM support
*/
#ifdef CONFIG_PM
-static int m3_suspend(struct pci_dev *pci, pm_message_t state)
+static int m3_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_m3 *chip = card->private_data;
int i, dsp_index;
@@ -2489,13 +2422,14 @@ static int m3_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int m3_resume(struct pci_dev *pci)
+static int m3_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_m3 *chip = card->private_data;
int i, dsp_index;
@@ -2546,6 +2480,11 @@ static int m3_resume(struct pci_dev *pci)
chip->in_suspend = 0;
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(m3_pm, m3_suspend, m3_resume);
+#define M3_PM_OPS &m3_pm
+#else
+#define M3_PM_OPS NULL
#endif /* CONFIG_PM */
#ifdef CONFIG_SND_MAESTRO3_INPUT
@@ -2842,10 +2781,9 @@ static struct pci_driver m3_driver = {
.id_table = snd_m3_ids,
.probe = snd_m3_probe,
.remove = __devexit_p(snd_m3_remove),
-#ifdef CONFIG_PM
- .suspend = m3_suspend,
- .resume = m3_resume,
-#endif
+ .driver = {
+ .pm = M3_PM_OPS,
+ },
};
module_pci_driver(m3_driver);
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 8159b05ee94d..465cff25b146 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -1382,9 +1382,10 @@ snd_nm256_peek_for_sig(struct nm256 *chip)
* APM event handler, so the card is properly reinitialized after a power
* event.
*/
-static int nm256_suspend(struct pci_dev *pci, pm_message_t state)
+static int nm256_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct nm256 *chip = card->private_data;
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -1393,13 +1394,14 @@ static int nm256_suspend(struct pci_dev *pci, pm_message_t state)
chip->coeffs_current = 0;
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int nm256_resume(struct pci_dev *pci)
+static int nm256_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct nm256 *chip = card->private_data;
int i;
@@ -1434,6 +1436,11 @@ static int nm256_resume(struct pci_dev *pci)
chip->in_resume = 0;
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(nm256_pm, nm256_suspend, nm256_resume);
+#define NM256_PM_OPS &nm256_pm
+#else
+#define NM256_PM_OPS NULL
#endif /* CONFIG_PM */
static int snd_nm256_free(struct nm256 *chip)
@@ -1747,10 +1754,9 @@ static struct pci_driver nm256_driver = {
.id_table = snd_nm256_ids,
.probe = snd_nm256_probe,
.remove = __devexit_p(snd_nm256_remove),
-#ifdef CONFIG_PM
- .suspend = nm256_suspend,
- .resume = nm256_resume,
-#endif
+ .driver = {
+ .pm = NM256_PM_OPS,
+ },
};
module_pci_driver(nm256_driver);
diff --git a/sound/pci/oxygen/oxygen.c b/sound/pci/oxygen/oxygen.c
index 610275bfbaeb..37520a2b4dcf 100644
--- a/sound/pci/oxygen/oxygen.c
+++ b/sound/pci/oxygen/oxygen.c
@@ -873,8 +873,9 @@ static struct pci_driver oxygen_driver = {
.probe = generic_oxygen_probe,
.remove = __devexit_p(oxygen_pci_remove),
#ifdef CONFIG_PM
- .suspend = oxygen_pci_suspend,
- .resume = oxygen_pci_resume,
+ .driver = {
+ .pm = &oxygen_pci_pm,
+ },
#endif
};
diff --git a/sound/pci/oxygen/oxygen.h b/sound/pci/oxygen/oxygen.h
index f53897a708b4..7112a89fb8bd 100644
--- a/sound/pci/oxygen/oxygen.h
+++ b/sound/pci/oxygen/oxygen.h
@@ -162,8 +162,7 @@ int oxygen_pci_probe(struct pci_dev *pci, int index, char *id,
);
void oxygen_pci_remove(struct pci_dev *pci);
#ifdef CONFIG_PM
-int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state);
-int oxygen_pci_resume(struct pci_dev *pci);
+extern const struct dev_pm_ops oxygen_pci_pm;
#endif
void oxygen_pci_shutdown(struct pci_dev *pci);
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c
index 92e2d67f16a1..ab8738e21ad1 100644
--- a/sound/pci/oxygen/oxygen_lib.c
+++ b/sound/pci/oxygen/oxygen_lib.c
@@ -727,9 +727,10 @@ void oxygen_pci_remove(struct pci_dev *pci)
EXPORT_SYMBOL(oxygen_pci_remove);
#ifdef CONFIG_PM
-int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state)
+static int oxygen_pci_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct oxygen *chip = card->private_data;
unsigned int i, saved_interrupt_mask;
@@ -756,10 +757,9 @@ int oxygen_pci_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-EXPORT_SYMBOL(oxygen_pci_suspend);
static const u32 registers_to_restore[OXYGEN_IO_SIZE / 32] = {
0xffffffff, 0x00ff077f, 0x00011d08, 0x007f00ff,
@@ -787,9 +787,10 @@ static void oxygen_restore_ac97(struct oxygen *chip, unsigned int codec)
chip->saved_ac97_registers[codec][i]);
}
-int oxygen_pci_resume(struct pci_dev *pci)
+static int oxygen_pci_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct oxygen *chip = card->private_data;
unsigned int i;
@@ -820,7 +821,9 @@ int oxygen_pci_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
-EXPORT_SYMBOL(oxygen_pci_resume);
+
+SIMPLE_DEV_PM_OPS(oxygen_pci_pm, oxygen_pci_suspend, oxygen_pci_resume);
+EXPORT_SYMBOL(oxygen_pci_pm);
#endif /* CONFIG_PM */
void oxygen_pci_shutdown(struct pci_dev *pci)
diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c
index 19962c6d38c3..d3b606b69f3b 100644
--- a/sound/pci/oxygen/virtuoso.c
+++ b/sound/pci/oxygen/virtuoso.c
@@ -94,8 +94,9 @@ static struct pci_driver xonar_driver = {
.probe = xonar_probe,
.remove = __devexit_p(oxygen_pci_remove),
#ifdef CONFIG_PM
- .suspend = oxygen_pci_suspend,
- .resume = oxygen_pci_resume,
+ .driver = {
+ .pm = &oxygen_pci_pm,
+ },
#endif
.shutdown = oxygen_pci_shutdown,
};
diff --git a/sound/pci/pcxhr/pcxhr.c b/sound/pci/pcxhr/pcxhr.c
index 0435f45e9513..e3ac1f768ff6 100644
--- a/sound/pci/pcxhr/pcxhr.c
+++ b/sound/pci/pcxhr/pcxhr.c
@@ -1368,6 +1368,67 @@ static void pcxhr_proc_gpo_write(struct snd_info_entry *entry,
}
}
+/* Access to the results of the CMD_GET_TIME_CODE RMH */
+#define TIME_CODE_VALID_MASK 0x00800000
+#define TIME_CODE_NEW_MASK 0x00400000
+#define TIME_CODE_BACK_MASK 0x00200000
+#define TIME_CODE_WAIT_MASK 0x00100000
+
+/* Values for the CMD_MANAGE_SIGNAL RMH */
+#define MANAGE_SIGNAL_TIME_CODE 0x01
+#define MANAGE_SIGNAL_MIDI 0x02
+
+/* linear time code read proc*/
+static void pcxhr_proc_ltc(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct snd_pcxhr *chip = entry->private_data;
+ struct pcxhr_mgr *mgr = chip->mgr;
+ struct pcxhr_rmh rmh;
+ unsigned int ltcHrs, ltcMin, ltcSec, ltcFrm;
+ int err;
+ /* commands available when embedded DSP is running */
+ if (!(mgr->dsp_loaded & (1 << PCXHR_FIRMWARE_DSP_MAIN_INDEX))) {
+ snd_iprintf(buffer, "no firmware loaded\n");
+ return;
+ }
+ if (!mgr->capture_ltc) {
+ pcxhr_init_rmh(&rmh, CMD_MANAGE_SIGNAL);
+ rmh.cmd[0] |= MANAGE_SIGNAL_TIME_CODE;
+ err = pcxhr_send_msg(mgr, &rmh);
+ if (err) {
+ snd_iprintf(buffer, "ltc not activated (%d)\n", err);
+ return;
+ }
+ if (mgr->is_hr_stereo)
+ hr222_manage_timecode(mgr, 1);
+ else
+ pcxhr_write_io_num_reg_cont(mgr, REG_CONT_VALSMPTE,
+ REG_CONT_VALSMPTE, NULL);
+ mgr->capture_ltc = 1;
+ }
+ pcxhr_init_rmh(&rmh, CMD_GET_TIME_CODE);
+ err = pcxhr_send_msg(mgr, &rmh);
+ if (err) {
+ snd_iprintf(buffer, "ltc read error (err=%d)\n", err);
+ return ;
+ }
+ ltcHrs = 10*((rmh.stat[0] >> 8) & 0x3) + (rmh.stat[0] & 0xf);
+ ltcMin = 10*((rmh.stat[1] >> 16) & 0x7) + ((rmh.stat[1] >> 8) & 0xf);
+ ltcSec = 10*(rmh.stat[1] & 0x7) + ((rmh.stat[2] >> 16) & 0xf);
+ ltcFrm = 10*((rmh.stat[2] >> 8) & 0x3) + (rmh.stat[2] & 0xf);
+
+ snd_iprintf(buffer, "timecode: %02u:%02u:%02u-%02u\n",
+ ltcHrs, ltcMin, ltcSec, ltcFrm);
+ snd_iprintf(buffer, "raw: 0x%04x%06x%06x\n", rmh.stat[0] & 0x00ffff,
+ rmh.stat[1] & 0xffffff, rmh.stat[2] & 0xffffff);
+ /*snd_iprintf(buffer, "dsp ref time: 0x%06x%06x\n",
+ rmh.stat[3] & 0xffffff, rmh.stat[4] & 0xffffff);*/
+ if (!(rmh.stat[0] & TIME_CODE_VALID_MASK)) {
+ snd_iprintf(buffer, "warning: linear timecode not valid\n");
+ }
+}
+
static void __devinit pcxhr_proc_init(struct snd_pcxhr *chip)
{
struct snd_info_entry *entry;
@@ -1383,6 +1444,8 @@ static void __devinit pcxhr_proc_init(struct snd_pcxhr *chip)
entry->c.text.write = pcxhr_proc_gpo_write;
entry->mode |= S_IWUSR;
}
+ if (!snd_card_proc_new(chip->card, "ltc", &entry))
+ snd_info_set_text_ops(entry, chip, pcxhr_proc_ltc);
}
/* end of proc interface */
diff --git a/sound/pci/pcxhr/pcxhr.h b/sound/pci/pcxhr/pcxhr.h
index bda776c49884..a4c602c45173 100644
--- a/sound/pci/pcxhr/pcxhr.h
+++ b/sound/pci/pcxhr/pcxhr.h
@@ -103,6 +103,7 @@ struct pcxhr_mgr {
unsigned int board_has_mic:1; /* if 1 the board has microphone input */
unsigned int board_aes_in_192k:1;/* if 1 the aes input plugs do support 192kHz */
unsigned int mono_capture:1; /* if 1 the board does mono capture */
+ unsigned int capture_ltc:1; /* if 1 the board captures LTC input */
struct snd_dma_buffer hostport;
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index 304411c1fe4b..b33db1e006e7 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -504,6 +504,8 @@ static struct pcxhr_cmd_info pcxhr_dsp_cmds[] = {
[CMD_FORMAT_STREAM_IN] = { 0x870000, 0, RMH_SSIZE_FIXED },
[CMD_STREAM_SAMPLE_COUNT] = { 0x902000, 2, RMH_SSIZE_FIXED },
[CMD_AUDIO_LEVEL_ADJUST] = { 0xc22000, 0, RMH_SSIZE_FIXED },
+[CMD_GET_TIME_CODE] = { 0x060000, 5, RMH_SSIZE_FIXED },
+[CMD_MANAGE_SIGNAL] = { 0x0f0000, 0, RMH_SSIZE_FIXED },
};
#ifdef CONFIG_SND_DEBUG_VERBOSE
@@ -533,6 +535,8 @@ static char* cmd_names[] = {
[CMD_FORMAT_STREAM_IN] = "CMD_FORMAT_STREAM_IN",
[CMD_STREAM_SAMPLE_COUNT] = "CMD_STREAM_SAMPLE_COUNT",
[CMD_AUDIO_LEVEL_ADJUST] = "CMD_AUDIO_LEVEL_ADJUST",
+[CMD_GET_TIME_CODE] = "CMD_GET_TIME_CODE",
+[CMD_MANAGE_SIGNAL] = "CMD_MANAGE_SIGNAL",
};
#endif
@@ -1133,13 +1137,12 @@ static u_int64_t pcxhr_stream_read_position(struct pcxhr_mgr *mgr,
hw_sample_count = ((u_int64_t)rmh.stat[0]) << 24;
hw_sample_count += (u_int64_t)rmh.stat[1];
- snd_printdd("stream %c%d : abs samples real(%ld) timer(%ld)\n",
+ snd_printdd("stream %c%d : abs samples real(%llu) timer(%llu)\n",
stream->pipe->is_capture ? 'C' : 'P',
stream->substream->number,
- (long unsigned int)hw_sample_count,
- (long unsigned int)(stream->timer_abs_periods +
- stream->timer_period_frag +
- mgr->granularity));
+ hw_sample_count,
+ stream->timer_abs_periods + stream->timer_period_frag +
+ mgr->granularity);
return hw_sample_count;
}
@@ -1243,10 +1246,18 @@ irqreturn_t pcxhr_interrupt(int irq, void *dev_id)
if ((dsp_time_diff < 0) &&
(mgr->dsp_time_last != PCXHR_DSP_TIME_INVALID)) {
- snd_printdd("ERROR DSP TIME old(%d) new(%d) -> "
- "resynchronize all streams\n",
+ /* handle dsp counter wraparound without resync */
+ int tmp_diff = dsp_time_diff + PCXHR_DSP_TIME_MASK + 1;
+ snd_printdd("WARNING DSP timestamp old(%d) new(%d)",
mgr->dsp_time_last, dsp_time_new);
- mgr->dsp_time_err++;
+ if (tmp_diff > 0 && tmp_diff <= (2*mgr->granularity)) {
+ snd_printdd("-> timestamp wraparound OK: "
+ "diff=%d\n", tmp_diff);
+ dsp_time_diff = tmp_diff;
+ } else {
+ snd_printdd("-> resynchronize all streams\n");
+ mgr->dsp_time_err++;
+ }
}
#ifdef CONFIG_SND_DEBUG_VERBOSE
if (dsp_time_diff == 0)
diff --git a/sound/pci/pcxhr/pcxhr_core.h b/sound/pci/pcxhr/pcxhr_core.h
index be0173796cdb..a81ab6b811e7 100644
--- a/sound/pci/pcxhr/pcxhr_core.h
+++ b/sound/pci/pcxhr/pcxhr_core.h
@@ -79,6 +79,8 @@ enum {
CMD_FORMAT_STREAM_IN, /* cmd_len >= 4 stat_len = 0 */
CMD_STREAM_SAMPLE_COUNT, /* cmd_len = 2 stat_len = (2 * nb_stream) */
CMD_AUDIO_LEVEL_ADJUST, /* cmd_len = 3 stat_len = 0 */
+ CMD_GET_TIME_CODE, /* cmd_len = 1 stat_len = 5 */
+ CMD_MANAGE_SIGNAL, /* cmd_len = 1 stat_len = 0 */
CMD_LAST_INDEX
};
@@ -116,7 +118,7 @@ int pcxhr_send_msg(struct pcxhr_mgr *mgr, struct pcxhr_rmh *rmh);
#define IO_NUM_REG_OUT_ANA_LEVEL 20
#define IO_NUM_REG_IN_ANA_LEVEL 21
-
+#define REG_CONT_VALSMPTE 0x000800
#define REG_CONT_UNMUTE_INPUTS 0x020000
/* parameters used with register IO_NUM_REG_STATUS */
diff --git a/sound/pci/pcxhr/pcxhr_mix22.c b/sound/pci/pcxhr/pcxhr_mix22.c
index 1cb82c0a9cb3..84fe57626eba 100644
--- a/sound/pci/pcxhr/pcxhr_mix22.c
+++ b/sound/pci/pcxhr/pcxhr_mix22.c
@@ -53,6 +53,7 @@
#define PCXHR_DSP_RESET_DSP 0x01
#define PCXHR_DSP_RESET_MUTE 0x02
#define PCXHR_DSP_RESET_CODEC 0x08
+#define PCXHR_DSP_RESET_SMPTE 0x10
#define PCXHR_DSP_RESET_GPO_OFFSET 5
#define PCXHR_DSP_RESET_GPO_MASK 0x60
@@ -527,6 +528,16 @@ int hr222_write_gpo(struct pcxhr_mgr *mgr, int value)
return 0;
}
+int hr222_manage_timecode(struct pcxhr_mgr *mgr, int enable)
+{
+ if (enable)
+ mgr->dsp_reset |= PCXHR_DSP_RESET_SMPTE;
+ else
+ mgr->dsp_reset &= ~PCXHR_DSP_RESET_SMPTE;
+
+ PCXHR_OUTPB(mgr, PCXHR_DSP_RESET, mgr->dsp_reset);
+ return 0;
+}
int hr222_update_analog_audio_level(struct snd_pcxhr *chip,
int is_capture, int channel)
diff --git a/sound/pci/pcxhr/pcxhr_mix22.h b/sound/pci/pcxhr/pcxhr_mix22.h
index 5a37a0007e8f..5971b9933f41 100644
--- a/sound/pci/pcxhr/pcxhr_mix22.h
+++ b/sound/pci/pcxhr/pcxhr_mix22.h
@@ -34,6 +34,7 @@ int hr222_get_external_clock(struct pcxhr_mgr *mgr,
int hr222_read_gpio(struct pcxhr_mgr *mgr, int is_gpi, int *value);
int hr222_write_gpo(struct pcxhr_mgr *mgr, int value);
+int hr222_manage_timecode(struct pcxhr_mgr *mgr, int enable);
#define HR222_LINE_PLAYBACK_LEVEL_MIN 0 /* -25.5 dB */
#define HR222_LINE_PLAYBACK_ZERO_LEVEL 51 /* 0.0 dB */
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index cbeb3f77350c..760ee467cd9a 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1151,9 +1151,10 @@ static void riptide_handleirq(unsigned long dev_id)
}
#ifdef CONFIG_PM
-static int riptide_suspend(struct pci_dev *pci, pm_message_t state)
+static int riptide_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_riptide *chip = card->private_data;
chip->in_suspend = 1;
@@ -1162,13 +1163,14 @@ static int riptide_suspend(struct pci_dev *pci, pm_message_t state)
snd_ac97_suspend(chip->ac97);
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int riptide_resume(struct pci_dev *pci)
+static int riptide_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_riptide *chip = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -1186,7 +1188,12 @@ static int riptide_resume(struct pci_dev *pci)
chip->in_suspend = 0;
return 0;
}
-#endif
+
+static SIMPLE_DEV_PM_OPS(riptide_pm, riptide_suspend, riptide_resume);
+#define RIPTIDE_PM_OPS &riptide_pm
+#else
+#define RIPTIDE_PM_OPS NULL
+#endif /* CONFIG_PM */
static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip)
{
@@ -2180,10 +2187,9 @@ static struct pci_driver driver = {
.id_table = snd_riptide_ids,
.probe = snd_card_riptide_probe,
.remove = __devexit_p(snd_card_riptide_remove),
-#ifdef CONFIG_PM
- .suspend = riptide_suspend,
- .resume = riptide_resume,
-#endif
+ .driver = {
+ .pm = RIPTIDE_PM_OPS,
+ },
};
#ifdef SUPPORT_JOYSTICK
diff --git a/sound/pci/sis7019.c b/sound/pci/sis7019.c
index 1552642765d6..512434efcc31 100644
--- a/sound/pci/sis7019.c
+++ b/sound/pci/sis7019.c
@@ -1209,9 +1209,10 @@ static int sis_chip_init(struct sis7019 *sis)
}
#ifdef CONFIG_PM
-static int sis_suspend(struct pci_dev *pci, pm_message_t state)
+static int sis_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct sis7019 *sis = card->private_data;
void __iomem *ioaddr = sis->ioaddr;
int i;
@@ -1241,13 +1242,14 @@ static int sis_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int sis_resume(struct pci_dev *pci)
+static int sis_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct sis7019 *sis = card->private_data;
void __iomem *ioaddr = sis->ioaddr;
int i;
@@ -1298,6 +1300,11 @@ error:
snd_card_disconnect(card);
return -EIO;
}
+
+static SIMPLE_DEV_PM_OPS(sis_pm, sis_suspend, sis_resume);
+#define SIS_PM_OPS &sis_pm
+#else
+#define SIS_PM_OPS NULL
#endif /* CONFIG_PM */
static int sis_alloc_suspend(struct sis7019 *sis)
@@ -1481,11 +1488,9 @@ static struct pci_driver sis7019_driver = {
.id_table = snd_sis7019_ids,
.probe = snd_sis7019_probe,
.remove = __devexit_p(snd_sis7019_remove),
-
-#ifdef CONFIG_PM
- .suspend = sis_suspend,
- .resume = sis_resume,
-#endif
+ .driver = {
+ .pm = SIS_PM_OPS,
+ },
};
module_pci_driver(sis7019_driver);
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index 611983ec7321..d36e6ca147e1 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -26,7 +26,7 @@
#include <linux/time.h>
#include <linux/module.h>
#include <sound/core.h>
-#include <sound/trident.h>
+#include "trident.h"
#include <sound/initval.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>, <audio@tridentmicro.com>");
@@ -178,8 +178,9 @@ static struct pci_driver trident_driver = {
.probe = snd_trident_probe,
.remove = __devexit_p(snd_trident_remove),
#ifdef CONFIG_PM
- .suspend = snd_trident_suspend,
- .resume = snd_trident_resume,
+ .driver = {
+ .pm = &snd_trident_pm,
+ },
#endif
};
diff --git a/include/sound/trident.h b/sound/pci/trident/trident.h
index 9f191a0a1e19..5f110eb56e47 100644
--- a/include/sound/trident.h
+++ b/sound/pci/trident/trident.h
@@ -23,10 +23,10 @@
*
*/
-#include "pcm.h"
-#include "mpu401.h"
-#include "ac97_codec.h"
-#include "util_mem.h"
+#include <sound/pcm.h>
+#include <sound/mpu401.h>
+#include <sound/ac97_codec.h>
+#include <sound/util_mem.h>
#define TRIDENT_DEVICE_ID_DX ((PCI_VENDOR_ID_TRIDENT<<16)|PCI_DEVICE_ID_TRIDENT_4DWAVE_DX)
#define TRIDENT_DEVICE_ID_NX ((PCI_VENDOR_ID_TRIDENT<<16)|PCI_DEVICE_ID_TRIDENT_4DWAVE_NX)
@@ -430,8 +430,7 @@ void snd_trident_free_voice(struct snd_trident * trident, struct snd_trident_voi
void snd_trident_start_voice(struct snd_trident * trident, unsigned int voice);
void snd_trident_stop_voice(struct snd_trident * trident, unsigned int voice);
void snd_trident_write_voice_regs(struct snd_trident * trident, struct snd_trident_voice *voice);
-int snd_trident_suspend(struct pci_dev *pci, pm_message_t state);
-int snd_trident_resume(struct pci_dev *pci);
+extern const struct dev_pm_ops snd_trident_pm;
/* TLB memory allocation */
struct snd_util_memblk *snd_trident_alloc_pages(struct snd_trident *trident,
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 61d3c0e8d4ce..94011dcae731 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -41,7 +41,7 @@
#include <sound/info.h>
#include <sound/control.h>
#include <sound/tlv.h>
-#include <sound/trident.h>
+#include "trident.h"
#include <sound/asoundef.h>
#include <asm/io.h>
@@ -3920,9 +3920,10 @@ static void snd_trident_clear_voices(struct snd_trident * trident, unsigned shor
}
#ifdef CONFIG_PM
-int snd_trident_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_trident_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_trident *trident = card->private_data;
trident->in_suspend = 1;
@@ -3936,13 +3937,14 @@ int snd_trident_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-int snd_trident_resume(struct pci_dev *pci)
+static int snd_trident_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_trident *trident = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -3979,4 +3981,6 @@ int snd_trident_resume(struct pci_dev *pci)
trident->in_suspend = 0;
return 0;
}
+
+SIMPLE_DEV_PM_OPS(snd_trident_pm, snd_trident_suspend, snd_trident_resume);
#endif /* CONFIG_PM */
diff --git a/sound/pci/trident/trident_memory.c b/sound/pci/trident/trident_memory.c
index f9779e23fe57..3102a579660b 100644
--- a/sound/pci/trident/trident_memory.c
+++ b/sound/pci/trident/trident_memory.c
@@ -29,7 +29,7 @@
#include <linux/mutex.h>
#include <sound/core.h>
-#include <sound/trident.h>
+#include "trident.h"
/* page arguments of these two macros are Trident page (4096 bytes), not like
* aligned pages in others
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index b5afab48943e..0eb7245dd362 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -2242,9 +2242,10 @@ static int snd_via82xx_chip_init(struct via82xx *chip)
/*
* power management
*/
-static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_via82xx_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct via82xx *chip = card->private_data;
int i;
@@ -2265,13 +2266,14 @@ static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_via82xx_resume(struct pci_dev *pci)
+static int snd_via82xx_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct via82xx *chip = card->private_data;
int i;
@@ -2306,6 +2308,11 @@ static int snd_via82xx_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_via82xx_pm, snd_via82xx_suspend, snd_via82xx_resume);
+#define SND_VIA82XX_PM_OPS &snd_via82xx_pm
+#else
+#define SND_VIA82XX_PM_OPS NULL
#endif /* CONFIG_PM */
static int snd_via82xx_free(struct via82xx *chip)
@@ -2624,10 +2631,9 @@ static struct pci_driver via82xx_driver = {
.id_table = snd_via82xx_ids,
.probe = snd_via82xx_probe,
.remove = __devexit_p(snd_via82xx_remove),
-#ifdef CONFIG_PM
- .suspend = snd_via82xx_suspend,
- .resume = snd_via82xx_resume,
-#endif
+ .driver = {
+ .pm = SND_VIA82XX_PM_OPS,
+ },
};
module_pci_driver(via82xx_driver);
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 59fd47ed0a31..e886bc16999d 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -1023,9 +1023,10 @@ static int snd_via82xx_chip_init(struct via82xx_modem *chip)
/*
* power management
*/
-static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_via82xx_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
@@ -1039,13 +1040,14 @@ static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state)
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-static int snd_via82xx_resume(struct pci_dev *pci)
+static int snd_via82xx_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct via82xx_modem *chip = card->private_data;
int i;
@@ -1069,6 +1071,11 @@ static int snd_via82xx_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_via82xx_pm, snd_via82xx_suspend, snd_via82xx_resume);
+#define SND_VIA82XX_PM_OPS &snd_via82xx_pm
+#else
+#define SND_VIA82XX_PM_OPS NULL
#endif /* CONFIG_PM */
static int snd_via82xx_free(struct via82xx_modem *chip)
@@ -1228,10 +1235,9 @@ static struct pci_driver via82xx_modem_driver = {
.id_table = snd_via82xx_modem_ids,
.probe = snd_via82xx_probe,
.remove = __devexit_p(snd_via82xx_remove),
-#ifdef CONFIG_PM
- .suspend = snd_via82xx_suspend,
- .resume = snd_via82xx_resume,
-#endif
+ .driver = {
+ .pm = SND_VIA82XX_PM_OPS,
+ },
};
module_pci_driver(via82xx_modem_driver);
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index 1ea1f656a5dc..b89e7a86e9d8 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -258,22 +258,24 @@ static void __devexit snd_vx222_remove(struct pci_dev *pci)
}
#ifdef CONFIG_PM
-static int snd_vx222_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_vx222_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_vx222 *vx = card->private_data;
int err;
- err = snd_vx_suspend(&vx->core, state);
+ err = snd_vx_suspend(&vx->core);
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return err;
}
-static int snd_vx222_resume(struct pci_dev *pci)
+static int snd_vx222_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_vx222 *vx = card->private_data;
pci_set_power_state(pci, PCI_D0);
@@ -287,6 +289,11 @@ static int snd_vx222_resume(struct pci_dev *pci)
pci_set_master(pci);
return snd_vx_resume(&vx->core);
}
+
+static SIMPLE_DEV_PM_OPS(snd_vx222_pm, snd_vx222_suspend, snd_vx222_resume);
+#define SND_VX222_PM_OPS &snd_vx222_pm
+#else
+#define SND_VX222_PM_OPS NULL
#endif
static struct pci_driver vx222_driver = {
@@ -294,10 +301,9 @@ static struct pci_driver vx222_driver = {
.id_table = snd_vx222_ids,
.probe = snd_vx222_probe,
.remove = __devexit_p(snd_vx222_remove),
-#ifdef CONFIG_PM
- .suspend = snd_vx222_suspend,
- .resume = snd_vx222_resume,
-#endif
+ .driver = {
+ .pm = SND_VX222_PM_OPS,
+ },
};
module_pci_driver(vx222_driver);
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 9a1d01d653a7..4810356b97ba 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -24,7 +24,7 @@
#include <linux/time.h>
#include <linux/module.h>
#include <sound/core.h>
-#include <sound/ymfpci.h>
+#include "ymfpci.h"
#include <sound/mpu401.h>
#include <sound/opl3.h>
#include <sound/initval.h>
@@ -356,8 +356,9 @@ static struct pci_driver ymfpci_driver = {
.probe = snd_card_ymfpci_probe,
.remove = __devexit_p(snd_card_ymfpci_remove),
#ifdef CONFIG_PM
- .suspend = snd_ymfpci_suspend,
- .resume = snd_ymfpci_resume,
+ .driver = {
+ .pm = &snd_ymfpci_pm,
+ },
#endif
};
diff --git a/include/sound/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
index 41199664666b..bddc4052286b 100644
--- a/include/sound/ymfpci.h
+++ b/sound/pci/ymfpci/ymfpci.h
@@ -22,10 +22,10 @@
*
*/
-#include "pcm.h"
-#include "rawmidi.h"
-#include "ac97_codec.h"
-#include "timer.h"
+#include <sound/pcm.h>
+#include <sound/rawmidi.h>
+#include <sound/ac97_codec.h>
+#include <sound/timer.h>
#include <linux/gameport.h>
/*
@@ -377,8 +377,7 @@ int snd_ymfpci_create(struct snd_card *card,
struct snd_ymfpci ** rcodec);
void snd_ymfpci_free_gameport(struct snd_ymfpci *chip);
-int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state);
-int snd_ymfpci_resume(struct pci_dev *pci);
+extern const struct dev_pm_ops snd_ymfpci_pm;
int snd_ymfpci_pcm(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
int snd_ymfpci_pcm2(struct snd_ymfpci *chip, int device, struct snd_pcm **rpcm);
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index a8159b81e9c4..62b23635b754 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -33,7 +33,7 @@
#include <sound/control.h>
#include <sound/info.h>
#include <sound/tlv.h>
-#include <sound/ymfpci.h>
+#include "ymfpci.h"
#include <sound/asoundef.h>
#include <sound/mpu401.h>
@@ -2302,9 +2302,10 @@ static int saved_regs_index[] = {
};
#define YDSXGR_NUM_SAVED_REGS ARRAY_SIZE(saved_regs_index)
-int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state)
+static int snd_ymfpci_suspend(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ymfpci *chip = card->private_data;
unsigned int i;
@@ -2326,13 +2327,14 @@ int snd_ymfpci_suspend(struct pci_dev *pci, pm_message_t state)
snd_ymfpci_disable_dsp(chip);
pci_disable_device(pci);
pci_save_state(pci);
- pci_set_power_state(pci, pci_choose_state(pci, state));
+ pci_set_power_state(pci, PCI_D3hot);
return 0;
}
-int snd_ymfpci_resume(struct pci_dev *pci)
+static int snd_ymfpci_resume(struct device *dev)
{
- struct snd_card *card = pci_get_drvdata(pci);
+ struct pci_dev *pci = to_pci_dev(dev);
+ struct snd_card *card = dev_get_drvdata(dev);
struct snd_ymfpci *chip = card->private_data;
unsigned int i;
@@ -2370,6 +2372,8 @@ int snd_ymfpci_resume(struct pci_dev *pci)
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
+
+SIMPLE_DEV_PM_OPS(snd_ymfpci_pm, snd_ymfpci_suspend, snd_ymfpci_resume);
#endif /* CONFIG_PM */
int __devinit snd_ymfpci_create(struct snd_card *card,
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.c b/sound/pcmcia/pdaudiocf/pdaudiocf.c
index 830839a874b6..f9b5229b2723 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.c
@@ -251,7 +251,7 @@ static int pdacf_suspend(struct pcmcia_device *link)
snd_printdd(KERN_DEBUG "SUSPEND\n");
if (chip) {
snd_printdd(KERN_DEBUG "snd_pdacf_suspend calling\n");
- snd_pdacf_suspend(chip, PMSG_SUSPEND);
+ snd_pdacf_suspend(chip);
}
return 0;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf.h b/sound/pcmcia/pdaudiocf/pdaudiocf.h
index 6ce9ad700290..ea41e57d7179 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf.h
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf.h
@@ -131,7 +131,7 @@ struct snd_pdacf *snd_pdacf_create(struct snd_card *card);
int snd_pdacf_ak4117_create(struct snd_pdacf *pdacf);
void snd_pdacf_powerdown(struct snd_pdacf *chip);
#ifdef CONFIG_PM
-int snd_pdacf_suspend(struct snd_pdacf *chip, pm_message_t state);
+int snd_pdacf_suspend(struct snd_pdacf *chip);
int snd_pdacf_resume(struct snd_pdacf *chip);
#endif
int snd_pdacf_pcm_new(struct snd_pdacf *chip);
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
index 9dce0bde5c05..ea0adfb984ad 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_core.c
@@ -262,7 +262,7 @@ void snd_pdacf_powerdown(struct snd_pdacf *chip)
#ifdef CONFIG_PM
-int snd_pdacf_suspend(struct snd_pdacf *chip, pm_message_t state)
+int snd_pdacf_suspend(struct snd_pdacf *chip)
{
u16 val;
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 512f0b472375..8f9350475c7b 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -260,7 +260,7 @@ static int vxp_suspend(struct pcmcia_device *link)
snd_printdd(KERN_DEBUG "SUSPEND\n");
if (chip) {
snd_printdd(KERN_DEBUG "snd_vx_suspend calling\n");
- snd_vx_suspend(chip, PMSG_SUSPEND);
+ snd_vx_suspend(chip);
}
return 0;
diff --git a/sound/ppc/powermac.c b/sound/ppc/powermac.c
index 5a4e263b5b0f..f5ceb6f282de 100644
--- a/sound/ppc/powermac.c
+++ b/sound/ppc/powermac.c
@@ -144,19 +144,24 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr)
}
#ifdef CONFIG_PM
-static int snd_pmac_driver_suspend(struct platform_device *devptr, pm_message_t state)
+static int snd_pmac_driver_suspend(struct device *dev)
{
- struct snd_card *card = platform_get_drvdata(devptr);
+ struct snd_card *card = dev_get_drvdata(dev);
snd_pmac_suspend(card->private_data);
return 0;
}
-static int snd_pmac_driver_resume(struct platform_device *devptr)
+static int snd_pmac_driver_resume(struct device *dev)
{
- struct snd_card *card = platform_get_drvdata(devptr);
+ struct snd_card *card = dev_get_drvdata(dev);
snd_pmac_resume(card->private_data);
return 0;
}
+
+static SIMPLE_DEV_PM_OPS(snd_pmac_pm, snd_pmac_driver_suspend, snd_pmac_driver_resume);
+#define SND_PMAC_PM_OPS &snd_pmac_pm
+#else
+#define SND_PMAC_PM_OPS NULL
#endif
#define SND_PMAC_DRIVER "snd_powermac"
@@ -164,12 +169,10 @@ static int snd_pmac_driver_resume(struct platform_device *devptr)
static struct platform_driver snd_pmac_driver = {
.probe = snd_pmac_probe,
.remove = __devexit_p(snd_pmac_remove),
-#ifdef CONFIG_PM
- .suspend = snd_pmac_driver_suspend,
- .resume = snd_pmac_driver_resume,
-#endif
.driver = {
- .name = SND_PMAC_DRIVER
+ .name = SND_PMAC_DRIVER,
+ .owner = THIS_MODULE,
+ .pm = SND_PMAC_PM_OPS,
},
};
diff --git a/sound/sh/aica.c b/sound/sh/aica.c
index 391a38ca58bc..d48b523207eb 100644
--- a/sound/sh/aica.c
+++ b/sound/sh/aica.c
@@ -654,7 +654,9 @@ static struct platform_driver snd_aica_driver = {
.probe = snd_aica_probe,
.remove = __devexit_p(snd_aica_remove),
.driver = {
- .name = SND_AICA_DRIVER},
+ .name = SND_AICA_DRIVER,
+ .owner = THIS_MODULE,
+ },
};
static int __init aica_init(void)
diff --git a/sound/sh/sh_dac_audio.c b/sound/sh/sh_dac_audio.c
index f8b01c77b298..0a3394751ed2 100644
--- a/sound/sh/sh_dac_audio.c
+++ b/sound/sh/sh_dac_audio.c
@@ -438,6 +438,7 @@ static struct platform_driver sh_dac_driver = {
.remove = snd_sh_dac_remove,
.driver = {
.name = "dac_audio",
+ .owner = THIS_MODULE,
},
};
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 40b2ad1bb1cd..c5de0a84566f 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -33,6 +33,7 @@ source "sound/soc/atmel/Kconfig"
source "sound/soc/au1x/Kconfig"
source "sound/soc/blackfin/Kconfig"
source "sound/soc/davinci/Kconfig"
+source "sound/soc/dwc/Kconfig"
source "sound/soc/ep93xx/Kconfig"
source "sound/soc/fsl/Kconfig"
source "sound/soc/jz4740/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 70990f4017f4..00a555a743b6 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_SND_SOC) += atmel/
obj-$(CONFIG_SND_SOC) += au1x/
obj-$(CONFIG_SND_SOC) += blackfin/
obj-$(CONFIG_SND_SOC) += davinci/
+obj-$(CONFIG_SND_SOC) += dwc/
obj-$(CONFIG_SND_SOC) += ep93xx/
obj-$(CONFIG_SND_SOC) += fsl/
obj-$(CONFIG_SND_SOC) += jz4740/
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 9f6bc55fc399..16b88f5c26e2 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -1,7 +1,8 @@
config SND_BF5XX_I2S
- tristate "SoC I2S Audio for the ADI BF5xx chip"
+ tristate "SoC I2S Audio for the ADI Blackfin chip"
depends on BLACKFIN
- select SND_BF5XX_SOC_SPORT
+ select SND_BF5XX_SOC_SPORT if !BF60x
+ select SND_BF6XX_SOC_SPORT if BF60x
help
Say Y or M if you want to add support for codecs attached to
the Blackfin SPORT (synchronous serial ports) interface in I2S
@@ -9,12 +10,14 @@ config SND_BF5XX_I2S
You will also need to select the audio interfaces to support below.
config SND_BF5XX_SOC_SSM2602
- tristate "SoC SSM2602 Audio support for BF52x ezkit"
+ tristate "SoC SSM2602 Audio Codec Add-On Card support"
depends on SND_BF5XX_I2S && (SPI_MASTER || I2C)
- select SND_BF5XX_SOC_I2S
+ select SND_BF5XX_SOC_I2S if !BF60x
+ select SND_BF6XX_SOC_I2S if BF60x
select SND_SOC_SSM2602
help
- Say Y if you want to add support for SoC audio on BF527-EZKIT.
+ Say Y if you want to add support for the Analog Devices
+ SSM2602 Audio Codec Add-On Card.
config SND_SOC_BFIN_EVAL_ADAU1701
tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards"
@@ -162,9 +165,15 @@ config SND_BF5XX_SOC_AD1980
config SND_BF5XX_SOC_SPORT
tristate
+config SND_BF6XX_SOC_SPORT
+ tristate
+
config SND_BF5XX_SOC_I2S
tristate
+config SND_BF6XX_SOC_I2S
+ tristate
+
config SND_BF5XX_SOC_TDM
tristate
@@ -173,7 +182,7 @@ config SND_BF5XX_SOC_AC97
config SND_BF5XX_SPORT_NUM
int "Set a SPORT for Sound chip"
- depends on (SND_BF5XX_I2S || SND_BF5XX_AC97 || SND_BF5XX_TDM)
+ depends on (SND_BF5XX_SOC_SPORT || SND_BF6XX_SOC_SPORT)
range 0 3 if BF54x
range 0 1 if !BF54x
default 0
diff --git a/sound/soc/blackfin/Makefile b/sound/soc/blackfin/Makefile
index 1bf86ccaa8de..6fea1f4cbee2 100644
--- a/sound/soc/blackfin/Makefile
+++ b/sound/soc/blackfin/Makefile
@@ -3,16 +3,20 @@ snd-bf5xx-ac97-objs := bf5xx-ac97-pcm.o
snd-bf5xx-i2s-objs := bf5xx-i2s-pcm.o
snd-bf5xx-tdm-objs := bf5xx-tdm-pcm.o
snd-soc-bf5xx-sport-objs := bf5xx-sport.o
+snd-soc-bf6xx-sport-objs := bf6xx-sport.o
snd-soc-bf5xx-ac97-objs := bf5xx-ac97.o
snd-soc-bf5xx-i2s-objs := bf5xx-i2s.o
+snd-soc-bf6xx-i2s-objs := bf6xx-i2s.o
snd-soc-bf5xx-tdm-objs := bf5xx-tdm.o
obj-$(CONFIG_SND_BF5XX_AC97) += snd-bf5xx-ac97.o
obj-$(CONFIG_SND_BF5XX_I2S) += snd-bf5xx-i2s.o
obj-$(CONFIG_SND_BF5XX_TDM) += snd-bf5xx-tdm.o
obj-$(CONFIG_SND_BF5XX_SOC_SPORT) += snd-soc-bf5xx-sport.o
+obj-$(CONFIG_SND_BF6XX_SOC_SPORT) += snd-soc-bf6xx-sport.o
obj-$(CONFIG_SND_BF5XX_SOC_AC97) += snd-soc-bf5xx-ac97.o
obj-$(CONFIG_SND_BF5XX_SOC_I2S) += snd-soc-bf5xx-i2s.o
+obj-$(CONFIG_SND_BF6XX_SOC_I2S) += snd-soc-bf6xx-i2s.o
obj-$(CONFIG_SND_BF5XX_SOC_TDM) += snd-soc-bf5xx-tdm.o
# Blackfin Machine Support
diff --git a/sound/soc/blackfin/bf6xx-i2s.c b/sound/soc/blackfin/bf6xx-i2s.c
new file mode 100644
index 000000000000..c3c2466d3a42
--- /dev/null
+++ b/sound/soc/blackfin/bf6xx-i2s.c
@@ -0,0 +1,234 @@
+/*
+ * bf6xx-i2s.c - Analog Devices BF6XX i2s interface driver
+ *
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+
+#include "bf6xx-sport.h"
+
+struct sport_params param;
+
+static int bfin_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ struct sport_device *sport = snd_soc_dai_get_drvdata(cpu_dai);
+ struct device *dev = &sport->pdev->dev;
+ int ret = 0;
+
+ param.spctl &= ~(SPORT_CTL_OPMODE | SPORT_CTL_CKRE | SPORT_CTL_FSR
+ | SPORT_CTL_LFS | SPORT_CTL_LAFS);
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ param.spctl |= SPORT_CTL_OPMODE | SPORT_CTL_CKRE
+ | SPORT_CTL_LFS;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ param.spctl |= SPORT_CTL_FSR;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ param.spctl |= SPORT_CTL_OPMODE | SPORT_CTL_LFS
+ | SPORT_CTL_LAFS;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown DAI format type\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ param.spctl &= ~(SPORT_CTL_ICLK | SPORT_CTL_IFS);
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ case SND_SOC_DAIFMT_CBM_CFS:
+ case SND_SOC_DAIFMT_CBS_CFM:
+ ret = -EINVAL;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown DAI master type\n", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int bfin_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
+ struct device *dev = &sport->pdev->dev;
+ int ret = 0;
+
+ param.spctl &= ~SPORT_CTL_SLEN;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ param.spctl |= 0x70;
+ sport->wdsize = 1;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ param.spctl |= 0xf0;
+ sport->wdsize = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ param.spctl |= 0x170;
+ sport->wdsize = 3;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ param.spctl |= 0x1f0;
+ sport->wdsize = 4;
+ break;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = sport_set_tx_params(sport, &param);
+ if (ret) {
+ dev_err(dev, "SPORT tx is busy!\n");
+ return ret;
+ }
+ } else {
+ ret = sport_set_rx_params(sport, &param);
+ if (ret) {
+ dev_err(dev, "SPORT rx is busy!\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_i2s_suspend(struct snd_soc_dai *dai)
+{
+ struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
+
+ if (dai->capture_active)
+ sport_rx_stop(sport);
+ if (dai->playback_active)
+ sport_tx_stop(sport);
+ return 0;
+}
+
+static int bfin_i2s_resume(struct snd_soc_dai *dai)
+{
+ struct sport_device *sport = snd_soc_dai_get_drvdata(dai);
+ struct device *dev = &sport->pdev->dev;
+ int ret;
+
+ ret = sport_set_tx_params(sport, &param);
+ if (ret) {
+ dev_err(dev, "SPORT tx is busy!\n");
+ return ret;
+ }
+ ret = sport_set_rx_params(sport, &param);
+ if (ret) {
+ dev_err(dev, "SPORT rx is busy!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#else
+#define bfin_i2s_suspend NULL
+#define bfin_i2s_resume NULL
+#endif
+
+#define BFIN_I2S_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
+ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define BFIN_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops bfin_i2s_dai_ops = {
+ .hw_params = bfin_i2s_hw_params,
+ .set_fmt = bfin_i2s_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver bfin_i2s_dai = {
+ .suspend = bfin_i2s_suspend,
+ .resume = bfin_i2s_resume,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = BFIN_I2S_RATES,
+ .formats = BFIN_I2S_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = BFIN_I2S_RATES,
+ .formats = BFIN_I2S_FORMATS,
+ },
+ .ops = &bfin_i2s_dai_ops,
+};
+
+static int __devinit bfin_i2s_probe(struct platform_device *pdev)
+{
+ struct sport_device *sport;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ sport = sport_create(pdev);
+ if (!sport)
+ return -ENODEV;
+
+ /* register with the ASoC layers */
+ ret = snd_soc_register_dai(dev, &bfin_i2s_dai);
+ if (ret) {
+ dev_err(dev, "Failed to register DAI: %d\n", ret);
+ sport_delete(sport);
+ return ret;
+ }
+ platform_set_drvdata(pdev, sport);
+
+ return 0;
+}
+
+static int __devexit bfin_i2s_remove(struct platform_device *pdev)
+{
+ struct sport_device *sport = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_dai(&pdev->dev);
+ sport_delete(sport);
+
+ return 0;
+}
+
+static struct platform_driver bfin_i2s_driver = {
+ .probe = bfin_i2s_probe,
+ .remove = __devexit_p(bfin_i2s_remove),
+ .driver = {
+ .name = "bfin-i2s",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(bfin_i2s_driver);
+
+MODULE_DESCRIPTION("Analog Devices BF6XX i2s interface driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/blackfin/bf6xx-sport.c b/sound/soc/blackfin/bf6xx-sport.c
new file mode 100644
index 000000000000..318c5ba5360f
--- /dev/null
+++ b/sound/soc/blackfin/bf6xx-sport.c
@@ -0,0 +1,422 @@
+/*
+ * bf6xx_sport.c Analog Devices BF6XX SPORT driver
+ *
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+#include "bf6xx-sport.h"
+
+int sport_set_tx_params(struct sport_device *sport,
+ struct sport_params *params)
+{
+ if (sport->tx_regs->spctl & SPORT_CTL_SPENPRI)
+ return -EBUSY;
+ sport->tx_regs->spctl = params->spctl | SPORT_CTL_SPTRAN;
+ sport->tx_regs->div = params->div;
+ SSYNC();
+ return 0;
+}
+EXPORT_SYMBOL(sport_set_tx_params);
+
+int sport_set_rx_params(struct sport_device *sport,
+ struct sport_params *params)
+{
+ if (sport->rx_regs->spctl & SPORT_CTL_SPENPRI)
+ return -EBUSY;
+ sport->rx_regs->spctl = params->spctl & ~SPORT_CTL_SPTRAN;
+ sport->rx_regs->div = params->div;
+ SSYNC();
+ return 0;
+}
+EXPORT_SYMBOL(sport_set_rx_params);
+
+static int compute_wdsize(size_t wdsize)
+{
+ switch (wdsize) {
+ case 1:
+ return WDSIZE_8 | PSIZE_8;
+ case 2:
+ return WDSIZE_16 | PSIZE_16;
+ default:
+ return WDSIZE_32 | PSIZE_32;
+ }
+}
+
+void sport_tx_start(struct sport_device *sport)
+{
+ set_dma_next_desc_addr(sport->tx_dma_chan, sport->tx_desc);
+ set_dma_config(sport->tx_dma_chan, DMAFLOW_LIST | DI_EN
+ | compute_wdsize(sport->wdsize) | NDSIZE_6);
+ enable_dma(sport->tx_dma_chan);
+ sport->tx_regs->spctl |= SPORT_CTL_SPENPRI;
+ SSYNC();
+}
+EXPORT_SYMBOL(sport_tx_start);
+
+void sport_rx_start(struct sport_device *sport)
+{
+ set_dma_next_desc_addr(sport->rx_dma_chan, sport->rx_desc);
+ set_dma_config(sport->rx_dma_chan, DMAFLOW_LIST | DI_EN | WNR
+ | compute_wdsize(sport->wdsize) | NDSIZE_6);
+ enable_dma(sport->rx_dma_chan);
+ sport->rx_regs->spctl |= SPORT_CTL_SPENPRI;
+ SSYNC();
+}
+EXPORT_SYMBOL(sport_rx_start);
+
+void sport_tx_stop(struct sport_device *sport)
+{
+ sport->tx_regs->spctl &= ~SPORT_CTL_SPENPRI;
+ SSYNC();
+ disable_dma(sport->tx_dma_chan);
+}
+EXPORT_SYMBOL(sport_tx_stop);
+
+void sport_rx_stop(struct sport_device *sport)
+{
+ sport->rx_regs->spctl &= ~SPORT_CTL_SPENPRI;
+ SSYNC();
+ disable_dma(sport->rx_dma_chan);
+}
+EXPORT_SYMBOL(sport_rx_stop);
+
+void sport_set_tx_callback(struct sport_device *sport,
+ void (*tx_callback)(void *), void *tx_data)
+{
+ sport->tx_callback = tx_callback;
+ sport->tx_data = tx_data;
+}
+EXPORT_SYMBOL(sport_set_tx_callback);
+
+void sport_set_rx_callback(struct sport_device *sport,
+ void (*rx_callback)(void *), void *rx_data)
+{
+ sport->rx_callback = rx_callback;
+ sport->rx_data = rx_data;
+}
+EXPORT_SYMBOL(sport_set_rx_callback);
+
+static void setup_desc(struct dmasg *desc, void *buf, int fragcount,
+ size_t fragsize, unsigned int cfg,
+ unsigned int count, size_t wdsize)
+{
+
+ int i;
+
+ for (i = 0; i < fragcount; ++i) {
+ desc[i].next_desc_addr = &(desc[i + 1]);
+ desc[i].start_addr = (unsigned long)buf + i*fragsize;
+ desc[i].cfg = cfg;
+ desc[i].x_count = count;
+ desc[i].x_modify = wdsize;
+ desc[i].y_count = 0;
+ desc[i].y_modify = 0;
+ }
+
+ /* make circular */
+ desc[fragcount-1].next_desc_addr = desc;
+}
+
+int sport_config_tx_dma(struct sport_device *sport, void *buf,
+ int fragcount, size_t fragsize)
+{
+ unsigned int count;
+ unsigned int cfg;
+ dma_addr_t addr;
+
+ count = fragsize/sport->wdsize;
+
+ if (sport->tx_desc)
+ dma_free_coherent(NULL, sport->tx_desc_size,
+ sport->tx_desc, 0);
+
+ sport->tx_desc = dma_alloc_coherent(NULL,
+ fragcount * sizeof(struct dmasg), &addr, 0);
+ sport->tx_desc_size = fragcount * sizeof(struct dmasg);
+ if (!sport->tx_desc)
+ return -ENOMEM;
+
+ sport->tx_buf = buf;
+ sport->tx_fragsize = fragsize;
+ sport->tx_frags = fragcount;
+ cfg = DMAFLOW_LIST | DI_EN | compute_wdsize(sport->wdsize) | NDSIZE_6;
+
+ setup_desc(sport->tx_desc, buf, fragcount, fragsize,
+ cfg|DMAEN, count, sport->wdsize);
+
+ return 0;
+}
+EXPORT_SYMBOL(sport_config_tx_dma);
+
+int sport_config_rx_dma(struct sport_device *sport, void *buf,
+ int fragcount, size_t fragsize)
+{
+ unsigned int count;
+ unsigned int cfg;
+ dma_addr_t addr;
+
+ count = fragsize/sport->wdsize;
+
+ if (sport->rx_desc)
+ dma_free_coherent(NULL, sport->rx_desc_size,
+ sport->rx_desc, 0);
+
+ sport->rx_desc = dma_alloc_coherent(NULL,
+ fragcount * sizeof(struct dmasg), &addr, 0);
+ sport->rx_desc_size = fragcount * sizeof(struct dmasg);
+ if (!sport->rx_desc)
+ return -ENOMEM;
+
+ sport->rx_buf = buf;
+ sport->rx_fragsize = fragsize;
+ sport->rx_frags = fragcount;
+ cfg = DMAFLOW_LIST | DI_EN | compute_wdsize(sport->wdsize)
+ | WNR | NDSIZE_6;
+
+ setup_desc(sport->rx_desc, buf, fragcount, fragsize,
+ cfg|DMAEN, count, sport->wdsize);
+
+ return 0;
+}
+EXPORT_SYMBOL(sport_config_rx_dma);
+
+unsigned long sport_curr_offset_tx(struct sport_device *sport)
+{
+ unsigned long curr = get_dma_curr_addr(sport->tx_dma_chan);
+
+ return (unsigned char *)curr - sport->tx_buf;
+}
+EXPORT_SYMBOL(sport_curr_offset_tx);
+
+unsigned long sport_curr_offset_rx(struct sport_device *sport)
+{
+ unsigned long curr = get_dma_curr_addr(sport->rx_dma_chan);
+
+ return (unsigned char *)curr - sport->rx_buf;
+}
+EXPORT_SYMBOL(sport_curr_offset_rx);
+
+static irqreturn_t sport_tx_irq(int irq, void *dev_id)
+{
+ struct sport_device *sport = dev_id;
+ static unsigned long status;
+
+ status = get_dma_curr_irqstat(sport->tx_dma_chan);
+ if (status & (DMA_DONE|DMA_ERR)) {
+ clear_dma_irqstat(sport->tx_dma_chan);
+ SSYNC();
+ }
+ if (sport->tx_callback)
+ sport->tx_callback(sport->tx_data);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_rx_irq(int irq, void *dev_id)
+{
+ struct sport_device *sport = dev_id;
+ unsigned long status;
+
+ status = get_dma_curr_irqstat(sport->rx_dma_chan);
+ if (status & (DMA_DONE|DMA_ERR)) {
+ clear_dma_irqstat(sport->rx_dma_chan);
+ SSYNC();
+ }
+ if (sport->rx_callback)
+ sport->rx_callback(sport->rx_data);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_err_irq(int irq, void *dev_id)
+{
+ struct sport_device *sport = dev_id;
+ struct device *dev = &sport->pdev->dev;
+
+ if (sport->tx_regs->spctl & SPORT_CTL_DERRPRI)
+ dev_err(dev, "sport error: TUVF\n");
+ if (sport->rx_regs->spctl & SPORT_CTL_DERRPRI)
+ dev_err(dev, "sport error: ROVF\n");
+
+ return IRQ_HANDLED;
+}
+
+static int sport_get_resource(struct sport_device *sport)
+{
+ struct platform_device *pdev = sport->pdev;
+ struct device *dev = &pdev->dev;
+ struct bfin_snd_platform_data *pdata = dev->platform_data;
+ struct resource *res;
+
+ if (!pdata) {
+ dev_err(dev, "No platform data\n");
+ return -ENODEV;
+ }
+ sport->pin_req = pdata->pin_req;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "No tx MEM resource\n");
+ return -ENODEV;
+ }
+ sport->tx_regs = (struct sport_register *)res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(dev, "No rx MEM resource\n");
+ return -ENODEV;
+ }
+ sport->rx_regs = (struct sport_register *)res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "No tx DMA resource\n");
+ return -ENODEV;
+ }
+ sport->tx_dma_chan = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(dev, "No rx DMA resource\n");
+ return -ENODEV;
+ }
+ sport->rx_dma_chan = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "No tx error irq resource\n");
+ return -ENODEV;
+ }
+ sport->tx_err_irq = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!res) {
+ dev_err(dev, "No rx error irq resource\n");
+ return -ENODEV;
+ }
+ sport->rx_err_irq = res->start;
+
+ return 0;
+}
+
+static int sport_request_resource(struct sport_device *sport)
+{
+ struct device *dev = &sport->pdev->dev;
+ int ret;
+
+ ret = peripheral_request_list(sport->pin_req, "soc-audio");
+ if (ret) {
+ dev_err(dev, "Unable to request sport pin\n");
+ return ret;
+ }
+
+ ret = request_dma(sport->tx_dma_chan, "SPORT TX Data");
+ if (ret) {
+ dev_err(dev, "Unable to allocate DMA channel for sport tx\n");
+ goto err_tx_dma;
+ }
+ set_dma_callback(sport->tx_dma_chan, sport_tx_irq, sport);
+
+ ret = request_dma(sport->rx_dma_chan, "SPORT RX Data");
+ if (ret) {
+ dev_err(dev, "Unable to allocate DMA channel for sport rx\n");
+ goto err_rx_dma;
+ }
+ set_dma_callback(sport->rx_dma_chan, sport_rx_irq, sport);
+
+ ret = request_irq(sport->tx_err_irq, sport_err_irq,
+ 0, "SPORT TX ERROR", sport);
+ if (ret) {
+ dev_err(dev, "Unable to allocate tx error IRQ for sport\n");
+ goto err_tx_irq;
+ }
+
+ ret = request_irq(sport->rx_err_irq, sport_err_irq,
+ 0, "SPORT RX ERROR", sport);
+ if (ret) {
+ dev_err(dev, "Unable to allocate rx error IRQ for sport\n");
+ goto err_rx_irq;
+ }
+
+ return 0;
+err_rx_irq:
+ free_irq(sport->tx_err_irq, sport);
+err_tx_irq:
+ free_dma(sport->rx_dma_chan);
+err_rx_dma:
+ free_dma(sport->tx_dma_chan);
+err_tx_dma:
+ peripheral_free_list(sport->pin_req);
+ return ret;
+}
+
+static void sport_free_resource(struct sport_device *sport)
+{
+ free_irq(sport->rx_err_irq, sport);
+ free_irq(sport->tx_err_irq, sport);
+ free_dma(sport->rx_dma_chan);
+ free_dma(sport->tx_dma_chan);
+ peripheral_free_list(sport->pin_req);
+}
+
+struct sport_device *sport_create(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct sport_device *sport;
+ int ret;
+
+ sport = kzalloc(sizeof(*sport), GFP_KERNEL);
+ if (!sport) {
+ dev_err(dev, "Unable to allocate memory for sport device\n");
+ return NULL;
+ }
+ sport->pdev = pdev;
+
+ ret = sport_get_resource(sport);
+ if (ret) {
+ kfree(sport);
+ return NULL;
+ }
+
+ ret = sport_request_resource(sport);
+ if (ret) {
+ kfree(sport);
+ return NULL;
+ }
+
+ dev_dbg(dev, "SPORT create success\n");
+ return sport;
+}
+EXPORT_SYMBOL(sport_create);
+
+void sport_delete(struct sport_device *sport)
+{
+ sport_free_resource(sport);
+}
+EXPORT_SYMBOL(sport_delete);
+
+MODULE_DESCRIPTION("Analog Devices BF6XX SPORT driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/blackfin/bf6xx-sport.h b/sound/soc/blackfin/bf6xx-sport.h
new file mode 100644
index 000000000000..307d193cfcef
--- /dev/null
+++ b/sound/soc/blackfin/bf6xx-sport.h
@@ -0,0 +1,82 @@
+/*
+ * bf6xx_sport - Analog Devices BF6XX SPORT driver
+ *
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _BF6XX_SPORT_H_
+#define _BF6XX_SPORT_H_
+
+#include <linux/platform_device.h>
+#include <asm/bfin_sport3.h>
+
+struct sport_device {
+ struct platform_device *pdev;
+ const unsigned short *pin_req;
+ struct sport_register *tx_regs;
+ struct sport_register *rx_regs;
+ int tx_dma_chan;
+ int rx_dma_chan;
+ int tx_err_irq;
+ int rx_err_irq;
+
+ void (*tx_callback)(void *data);
+ void *tx_data;
+ void (*rx_callback)(void *data);
+ void *rx_data;
+
+ struct dmasg *tx_desc;
+ struct dmasg *rx_desc;
+ unsigned int tx_desc_size;
+ unsigned int rx_desc_size;
+ unsigned char *tx_buf;
+ unsigned char *rx_buf;
+ unsigned int tx_fragsize;
+ unsigned int rx_fragsize;
+ unsigned int tx_frags;
+ unsigned int rx_frags;
+ unsigned int wdsize;
+};
+
+struct sport_params {
+ u32 spctl;
+ u32 div;
+};
+
+struct sport_device *sport_create(struct platform_device *pdev);
+void sport_delete(struct sport_device *sport);
+int sport_set_tx_params(struct sport_device *sport,
+ struct sport_params *params);
+int sport_set_rx_params(struct sport_device *sport,
+ struct sport_params *params);
+void sport_tx_start(struct sport_device *sport);
+void sport_rx_start(struct sport_device *sport);
+void sport_tx_stop(struct sport_device *sport);
+void sport_rx_stop(struct sport_device *sport);
+void sport_set_tx_callback(struct sport_device *sport,
+ void (*tx_callback)(void *), void *tx_data);
+void sport_set_rx_callback(struct sport_device *sport,
+ void (*rx_callback)(void *), void *rx_data);
+int sport_config_tx_dma(struct sport_device *sport, void *buf,
+ int fragcount, size_t fragsize);
+int sport_config_rx_dma(struct sport_device *sport, void *buf,
+ int fragcount, size_t fragsize);
+unsigned long sport_curr_offset_tx(struct sport_device *sport);
+unsigned long sport_curr_offset_rx(struct sport_device *sport);
+
+
+
+#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 1e1613a438dd..9f8e8594aeb9 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -12,6 +12,7 @@ config SND_SOC_ALL_CODECS
tristate "Build all ASoC CODEC drivers"
select SND_SOC_88PM860X if MFD_88PM860X
select SND_SOC_L3
+ select SND_SOC_AB8500_CODEC if ABX500_CORE
select SND_SOC_AC97_CODEC if SND_SOC_AC97_BUS
select SND_SOC_AD1836 if SPI_MASTER
select SND_SOC_AD193X if SND_SOC_I2C_AND_SPI
@@ -35,7 +36,9 @@ config SND_SOC_ALL_CODECS
select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
+ select SND_SOC_DA732X if I2C
select SND_SOC_DFBMCS320
+ select SND_SOC_ISABELLE if I2C
select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_LM49453 if I2C
@@ -54,6 +57,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_SPDIF
select SND_SOC_SSM2602 if SND_SOC_I2C_AND_SPI
select SND_SOC_STA32X if I2C
+ select SND_SOC_STA529 if I2C
select SND_SOC_STAC9766 if SND_SOC_AC97_BUS
select SND_SOC_TLV320AIC23 if I2C
select SND_SOC_TLV320AIC26 if SPI_MASTER
@@ -70,6 +74,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM2000 if I2C
select SND_SOC_WM2200 if I2C
select SND_SOC_WM5100 if I2C
+ select SND_SOC_WM5102 if MFD_WM5102
+ select SND_SOC_WM5110 if MFD_WM5110
select SND_SOC_WM8350 if MFD_WM8350
select SND_SOC_WM8400 if MFD_WM8400
select SND_SOC_WM8510 if SND_SOC_I2C_AND_SPI
@@ -126,11 +132,21 @@ config SND_SOC_ALL_CODECS
config SND_SOC_88PM860X
tristate
+config SND_SOC_ARIZONA
+ tristate
+ default y if SND_SOC_WM5102=y
+ default y if SND_SOC_WM5110=y
+ default m if SND_SOC_WM5102=m
+ default m if SND_SOC_WM5110=m
+
config SND_SOC_WM_HUBS
tristate
default y if SND_SOC_WM8993=y || SND_SOC_WM8994=y
default m if SND_SOC_WM8993=m || SND_SOC_WM8994=m
+config SND_SOC_AB8500_CODEC
+ tristate
+
config SND_SOC_AC97_CODEC
tristate
select SND_AC97_CODEC
@@ -219,12 +235,18 @@ config SND_SOC_L3
config SND_SOC_DA7210
tristate
+config SND_SOC_DA732X
+ tristate
+
config SND_SOC_DFBMCS320
tristate
config SND_SOC_DMIC
tristate
+config SND_SOC_ISABELLE
+ tristate
+
config SND_SOC_LM49453
tristate
@@ -266,6 +288,9 @@ config SND_SOC_SSM2602
config SND_SOC_STA32X
tristate
+config SND_SOC_STA529
+ tristate
+
config SND_SOC_STAC9766
tristate
@@ -313,6 +338,12 @@ config SND_SOC_WM2200
config SND_SOC_WM5100
tristate
+config SND_SOC_WM5102
+ tristate
+
+config SND_SOC_WM5110
+ tristate
+
config SND_SOC_WM8350
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index fc27fec39487..34148bb59c68 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -1,4 +1,5 @@
snd-soc-88pm860x-objs := 88pm860x-codec.o
+snd-soc-ab8500-codec-objs := ab8500-codec.o
snd-soc-ac97-objs := ac97.o
snd-soc-ad1836-objs := ad1836.o
snd-soc-ad193x-objs := ad193x.o
@@ -13,6 +14,7 @@ snd-soc-ak4535-objs := ak4535.o
snd-soc-ak4641-objs := ak4641.o
snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
+snd-soc-arizona-objs := arizona.o
snd-soc-cq93vc-objs := cq93vc.o
snd-soc-cs42l51-objs := cs42l51.o
snd-soc-cs42l52-objs := cs42l52.o
@@ -21,8 +23,10 @@ snd-soc-cs4270-objs := cs4270.o
snd-soc-cs4271-objs := cs4271.o
snd-soc-cx20442-objs := cx20442.o
snd-soc-da7210-objs := da7210.o
+snd-soc-da732x-objs := da732x.o
snd-soc-dfbmcs320-objs := dfbmcs320.o
snd-soc-dmic-objs := dmic.o
+snd-soc-isabelle-objs := isabelle.o
snd-soc-jz4740-codec-objs := jz4740.o
snd-soc-l3-objs := l3.o
snd-soc-lm4857-objs := lm4857.o
@@ -41,9 +45,11 @@ snd-soc-alc5623-objs := alc5623.o
snd-soc-alc5632-objs := alc5632.o
snd-soc-sigmadsp-objs := sigmadsp.o
snd-soc-sn95031-objs := sn95031.o
-snd-soc-spdif-objs := spdif_transciever.o
+snd-soc-spdif-tx-objs := spdif_transciever.o
+snd-soc-spdif-rx-objs := spdif_receiver.o
snd-soc-ssm2602-objs := ssm2602.o
snd-soc-sta32x-objs := sta32x.o
+snd-soc-sta529-objs := sta529.o
snd-soc-stac9766-objs := stac9766.o
snd-soc-tlv320aic23-objs := tlv320aic23.o
snd-soc-tlv320aic26-objs := tlv320aic26.o
@@ -59,6 +65,8 @@ snd-soc-wm1250-ev1-objs := wm1250-ev1.o
snd-soc-wm2000-objs := wm2000.o
snd-soc-wm2200-objs := wm2200.o
snd-soc-wm5100-objs := wm5100.o wm5100-tables.o
+snd-soc-wm5102-objs := wm5102.o
+snd-soc-wm5110-objs := wm5110.o
snd-soc-wm8350-objs := wm8350.o
snd-soc-wm8400-objs := wm8400.o
snd-soc-wm8510-objs := wm8510.o
@@ -108,6 +116,7 @@ snd-soc-max9877-objs := max9877.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
obj-$(CONFIG_SND_SOC_88PM860X) += snd-soc-88pm860x.o
+obj-$(CONFIG_SND_SOC_AB8500_CODEC) += snd-soc-ab8500-codec.o
obj-$(CONFIG_SND_SOC_AC97_CODEC) += snd-soc-ac97.o
obj-$(CONFIG_SND_SOC_AD1836) += snd-soc-ad1836.o
obj-$(CONFIG_SND_SOC_AD193X) += snd-soc-ad193x.o
@@ -124,6 +133,7 @@ obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
obj-$(CONFIG_SND_SOC_ALC5623) += snd-soc-alc5623.o
obj-$(CONFIG_SND_SOC_ALC5632) += snd-soc-alc5632.o
+obj-$(CONFIG_SND_SOC_ARIZONA) += snd-soc-arizona.o
obj-$(CONFIG_SND_SOC_CQ0093VC) += snd-soc-cq93vc.o
obj-$(CONFIG_SND_SOC_CS42L51) += snd-soc-cs42l51.o
obj-$(CONFIG_SND_SOC_CS42L52) += snd-soc-cs42l52.o
@@ -132,8 +142,10 @@ obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
+obj-$(CONFIG_SND_SOC_DA732X) += snd-soc-da732x.o
obj-$(CONFIG_SND_SOC_DFBMCS320) += snd-soc-dfbmcs320.o
obj-$(CONFIG_SND_SOC_DMIC) += snd-soc-dmic.o
+obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
obj-$(CONFIG_SND_SOC_JZ4740_CODEC) += snd-soc-jz4740-codec.o
obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
obj-$(CONFIG_SND_SOC_LM4857) += snd-soc-lm4857.o
@@ -150,9 +162,10 @@ obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_SGTL5000) += snd-soc-sgtl5000.o
obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
obj-$(CONFIG_SND_SOC_SN95031) +=snd-soc-sn95031.o
-obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
+obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif-rx.o snd-soc-spdif-tx.o
obj-$(CONFIG_SND_SOC_SSM2602) += snd-soc-ssm2602.o
obj-$(CONFIG_SND_SOC_STA32X) += snd-soc-sta32x.o
+obj-$(CONFIG_SND_SOC_STA529) += snd-soc-sta529.o
obj-$(CONFIG_SND_SOC_STAC9766) += snd-soc-stac9766.o
obj-$(CONFIG_SND_SOC_TLV320AIC23) += snd-soc-tlv320aic23.o
obj-$(CONFIG_SND_SOC_TLV320AIC26) += snd-soc-tlv320aic26.o
@@ -168,6 +181,8 @@ obj-$(CONFIG_SND_SOC_WM1250_EV1) += snd-soc-wm1250-ev1.o
obj-$(CONFIG_SND_SOC_WM2000) += snd-soc-wm2000.o
obj-$(CONFIG_SND_SOC_WM2200) += snd-soc-wm2200.o
obj-$(CONFIG_SND_SOC_WM5100) += snd-soc-wm5100.o
+obj-$(CONFIG_SND_SOC_WM5102) += snd-soc-wm5102.o
+obj-$(CONFIG_SND_SOC_WM5110) += snd-soc-wm5110.o
obj-$(CONFIG_SND_SOC_WM8350) += snd-soc-wm8350.o
obj-$(CONFIG_SND_SOC_WM8400) += snd-soc-wm8400.o
obj-$(CONFIG_SND_SOC_WM8510) += snd-soc-wm8510.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
new file mode 100644
index 000000000000..3c795921c5f6
--- /dev/null
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -0,0 +1,2522 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>,
+ * for ST-Ericsson.
+ *
+ * Based on the early work done by:
+ * Mikko J. Lehto <mikko.lehto@symbio.com>,
+ * Mikko Sarmanne <mikko.sarmanne@symbio.com>,
+ * Jarmo K. Kuronen <jarmo.kuronen@symbio.com>,
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
+#include <linux/mfd/abx500/ab8500-codec.h>
+#include <linux/regulator/consumer.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "ab8500-codec.h"
+
+/* Macrocell value definitions */
+#define CLK_32K_OUT2_DISABLE 0x01
+#define INACTIVE_RESET_AUDIO 0x02
+#define ENABLE_AUDIO_CLK_TO_AUDIO_BLK 0x10
+#define ENABLE_VINTCORE12_SUPPLY 0x04
+#define GPIO27_DIR_OUTPUT 0x04
+#define GPIO29_DIR_OUTPUT 0x10
+#define GPIO31_DIR_OUTPUT 0x40
+
+/* Macrocell register definitions */
+#define AB8500_CTRL3_REG 0x0200
+#define AB8500_GPIO_DIR4_REG 0x1013
+
+/* Nr of FIR/IIR-coeff banks in ANC-block */
+#define AB8500_NR_OF_ANC_COEFF_BANKS 2
+
+/* Minimum duration to keep ANC IIR Init bit high or
+low before proceeding with the configuration sequence */
+#define AB8500_ANC_SM_DELAY 2000
+
+#define AB8500_FILTER_CONTROL(xname, xcount, xmin, xmax) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
+ .info = filter_control_info, \
+ .get = filter_control_get, .put = filter_control_put, \
+ .private_value = (unsigned long)&(struct filter_control) \
+ {.count = xcount, .min = xmin, .max = xmax} }
+
+struct filter_control {
+ long min, max;
+ unsigned int count;
+ long value[128];
+};
+
+/* Sidetone states */
+static const char * const enum_sid_state[] = {
+ "Unconfigured",
+ "Apply FIR",
+ "FIR is configured",
+};
+enum sid_state {
+ SID_UNCONFIGURED = 0,
+ SID_APPLY_FIR = 1,
+ SID_FIR_CONFIGURED = 2,
+};
+
+static const char * const enum_anc_state[] = {
+ "Unconfigured",
+ "Apply FIR and IIR",
+ "FIR and IIR are configured",
+ "Apply FIR",
+ "FIR is configured",
+ "Apply IIR",
+ "IIR is configured"
+};
+enum anc_state {
+ ANC_UNCONFIGURED = 0,
+ ANC_APPLY_FIR_IIR = 1,
+ ANC_FIR_IIR_CONFIGURED = 2,
+ ANC_APPLY_FIR = 3,
+ ANC_FIR_CONFIGURED = 4,
+ ANC_APPLY_IIR = 5,
+ ANC_IIR_CONFIGURED = 6
+};
+
+/* Analog microphones */
+enum amic_idx {
+ AMIC_IDX_1A,
+ AMIC_IDX_1B,
+ AMIC_IDX_2
+};
+
+struct ab8500_codec_drvdata_dbg {
+ struct regulator *vaud;
+ struct regulator *vamic1;
+ struct regulator *vamic2;
+ struct regulator *vdmic;
+};
+
+/* Private data for AB8500 device-driver */
+struct ab8500_codec_drvdata {
+ /* Sidetone */
+ long *sid_fir_values;
+ enum sid_state sid_status;
+
+ /* ANC */
+ struct mutex anc_lock;
+ long *anc_fir_values;
+ long *anc_iir_values;
+ enum anc_state anc_status;
+};
+
+static inline const char *amic_micbias_str(enum amic_micbias micbias)
+{
+ switch (micbias) {
+ case AMIC_MICBIAS_VAMIC1:
+ return "VAMIC1";
+ case AMIC_MICBIAS_VAMIC2:
+ return "VAMIC2";
+ default:
+ return "Unknown";
+ }
+}
+
+static inline const char *amic_type_str(enum amic_type type)
+{
+ switch (type) {
+ case AMIC_TYPE_DIFFERENTIAL:
+ return "DIFFERENTIAL";
+ case AMIC_TYPE_SINGLE_ENDED:
+ return "SINGLE ENDED";
+ default:
+ return "Unknown";
+ }
+}
+
+/*
+ * Read'n'write functions
+ */
+
+/* Read a register from the audio-bank of AB8500 */
+static unsigned int ab8500_codec_read_reg(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ int status;
+ unsigned int value = 0;
+
+ u8 value8;
+ status = abx500_get_register_interruptible(codec->dev, AB8500_AUDIO,
+ reg, &value8);
+ if (status < 0) {
+ dev_err(codec->dev,
+ "%s: ERROR: Register (0x%02x:0x%02x) read failed (%d).\n",
+ __func__, (u8)AB8500_AUDIO, (u8)reg, status);
+ } else {
+ dev_dbg(codec->dev,
+ "%s: Read 0x%02x from register 0x%02x:0x%02x\n",
+ __func__, value8, (u8)AB8500_AUDIO, (u8)reg);
+ value = (unsigned int)value8;
+ }
+
+ return value;
+}
+
+/* Write to a register in the audio-bank of AB8500 */
+static int ab8500_codec_write_reg(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int value)
+{
+ int status;
+
+ status = abx500_set_register_interruptible(codec->dev, AB8500_AUDIO,
+ reg, value);
+ if (status < 0)
+ dev_err(codec->dev,
+ "%s: ERROR: Register (%02x:%02x) write failed (%d).\n",
+ __func__, (u8)AB8500_AUDIO, (u8)reg, status);
+ else
+ dev_dbg(codec->dev,
+ "%s: Wrote 0x%02x into register %02x:%02x\n",
+ __func__, (u8)value, (u8)AB8500_AUDIO, (u8)reg);
+
+ return status;
+}
+
+/*
+ * Controls - DAPM
+ */
+
+/* Earpiece */
+
+/* Earpiece source selector */
+static const char * const enum_ear_lineout_source[] = {"Headset Left",
+ "Speaker Left"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ear_lineout_source, AB8500_DMICFILTCONF,
+ AB8500_DMICFILTCONF_DA3TOEAR, enum_ear_lineout_source);
+static const struct snd_kcontrol_new dapm_ear_lineout_source =
+ SOC_DAPM_ENUM("Earpiece or LineOut Mono Source",
+ dapm_enum_ear_lineout_source);
+
+/* LineOut */
+
+/* LineOut source selector */
+static const char * const enum_lineout_source[] = {"Mono Path", "Stereo Path"};
+static SOC_ENUM_DOUBLE_DECL(dapm_enum_lineout_source, AB8500_ANACONF5,
+ AB8500_ANACONF5_HSLDACTOLOL,
+ AB8500_ANACONF5_HSRDACTOLOR, enum_lineout_source);
+static const struct snd_kcontrol_new dapm_lineout_source[] = {
+ SOC_DAPM_ENUM("LineOut Source", dapm_enum_lineout_source),
+};
+
+/* Handsfree */
+
+/* Speaker Left - ANC selector */
+static const char * const enum_HFx_sel[] = {"Audio Path", "ANC"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_HFl_sel, AB8500_DIGMULTCONF2,
+ AB8500_DIGMULTCONF2_HFLSEL, enum_HFx_sel);
+static const struct snd_kcontrol_new dapm_HFl_select[] = {
+ SOC_DAPM_ENUM("Speaker Left Source", dapm_enum_HFl_sel),
+};
+
+/* Speaker Right - ANC selector */
+static SOC_ENUM_SINGLE_DECL(dapm_enum_HFr_sel, AB8500_DIGMULTCONF2,
+ AB8500_DIGMULTCONF2_HFRSEL, enum_HFx_sel);
+static const struct snd_kcontrol_new dapm_HFr_select[] = {
+ SOC_DAPM_ENUM("Speaker Right Source", dapm_enum_HFr_sel),
+};
+
+/* Mic 1 */
+
+/* Mic 1 - Mic 1a or 1b selector */
+static const char * const enum_mic1ab_sel[] = {"Mic 1b", "Mic 1a"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_mic1ab_sel, AB8500_ANACONF3,
+ AB8500_ANACONF3_MIC1SEL, enum_mic1ab_sel);
+static const struct snd_kcontrol_new dapm_mic1ab_mux[] = {
+ SOC_DAPM_ENUM("Mic 1a or 1b Select", dapm_enum_mic1ab_sel),
+};
+
+/* Mic 1 - AD3 - Mic 1 or DMic 3 selector */
+static const char * const enum_ad3_sel[] = {"Mic 1", "DMic 3"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad3_sel, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_AD3SEL, enum_ad3_sel);
+static const struct snd_kcontrol_new dapm_ad3_select[] = {
+ SOC_DAPM_ENUM("AD3 Source Select", dapm_enum_ad3_sel),
+};
+
+/* Mic 1 - AD6 - Mic 1 or DMic 6 selector */
+static const char * const enum_ad6_sel[] = {"Mic 1", "DMic 6"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad6_sel, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_AD6SEL, enum_ad6_sel);
+static const struct snd_kcontrol_new dapm_ad6_select[] = {
+ SOC_DAPM_ENUM("AD6 Source Select", dapm_enum_ad6_sel),
+};
+
+/* Mic 2 */
+
+/* Mic 2 - AD5 - Mic 2 or DMic 5 selector */
+static const char * const enum_ad5_sel[] = {"Mic 2", "DMic 5"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad5_sel, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_AD5SEL, enum_ad5_sel);
+static const struct snd_kcontrol_new dapm_ad5_select[] = {
+ SOC_DAPM_ENUM("AD5 Source Select", dapm_enum_ad5_sel),
+};
+
+/* LineIn */
+
+/* LineIn left - AD1 - LineIn Left or DMic 1 selector */
+static const char * const enum_ad1_sel[] = {"LineIn Left", "DMic 1"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad1_sel, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_AD1SEL, enum_ad1_sel);
+static const struct snd_kcontrol_new dapm_ad1_select[] = {
+ SOC_DAPM_ENUM("AD1 Source Select", dapm_enum_ad1_sel),
+};
+
+/* LineIn right - Mic 2 or LineIn Right selector */
+static const char * const enum_mic2lr_sel[] = {"Mic 2", "LineIn Right"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_mic2lr_sel, AB8500_ANACONF3,
+ AB8500_ANACONF3_LINRSEL, enum_mic2lr_sel);
+static const struct snd_kcontrol_new dapm_mic2lr_select[] = {
+ SOC_DAPM_ENUM("Mic 2 or LINR Select", dapm_enum_mic2lr_sel),
+};
+
+/* LineIn right - AD2 - LineIn Right or DMic2 selector */
+static const char * const enum_ad2_sel[] = {"LineIn Right", "DMic 2"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_ad2_sel, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_AD2SEL, enum_ad2_sel);
+static const struct snd_kcontrol_new dapm_ad2_select[] = {
+ SOC_DAPM_ENUM("AD2 Source Select", dapm_enum_ad2_sel),
+};
+
+
+/* ANC */
+
+static const char * const enum_anc_in_sel[] = {"Mic 1 / DMic 6",
+ "Mic 2 / DMic 5"};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_anc_in_sel, AB8500_DMICFILTCONF,
+ AB8500_DMICFILTCONF_ANCINSEL, enum_anc_in_sel);
+static const struct snd_kcontrol_new dapm_anc_in_select[] = {
+ SOC_DAPM_ENUM("ANC Source", dapm_enum_anc_in_sel),
+};
+
+/* ANC - Enable/Disable */
+static const struct snd_kcontrol_new dapm_anc_enable[] = {
+ SOC_DAPM_SINGLE("Switch", AB8500_ANCCONF1,
+ AB8500_ANCCONF1_ENANC, 0, 0),
+};
+
+/* ANC to Earpiece - Mute */
+static const struct snd_kcontrol_new dapm_anc_ear_mute[] = {
+ SOC_DAPM_SINGLE("Switch", AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_ANCSEL, 1, 0),
+};
+
+
+
+/* Sidetone left */
+
+/* Sidetone left - Input selector */
+static const char * const enum_stfir1_in_sel[] = {
+ "LineIn Left", "LineIn Right", "Mic 1", "Headset Left"
+};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_stfir1_in_sel, AB8500_DIGMULTCONF2,
+ AB8500_DIGMULTCONF2_FIRSID1SEL, enum_stfir1_in_sel);
+static const struct snd_kcontrol_new dapm_stfir1_in_select[] = {
+ SOC_DAPM_ENUM("Sidetone Left Source", dapm_enum_stfir1_in_sel),
+};
+
+/* Sidetone right path */
+
+/* Sidetone right - Input selector */
+static const char * const enum_stfir2_in_sel[] = {
+ "LineIn Right", "Mic 1", "DMic 4", "Headset Right"
+};
+static SOC_ENUM_SINGLE_DECL(dapm_enum_stfir2_in_sel, AB8500_DIGMULTCONF2,
+ AB8500_DIGMULTCONF2_FIRSID2SEL, enum_stfir2_in_sel);
+static const struct snd_kcontrol_new dapm_stfir2_in_select[] = {
+ SOC_DAPM_ENUM("Sidetone Right Source", dapm_enum_stfir2_in_sel),
+};
+
+/* Vibra */
+
+static const char * const enum_pwm2vibx[] = {"Audio Path", "PWM Generator"};
+
+static SOC_ENUM_SINGLE_DECL(dapm_enum_pwm2vib1, AB8500_PWMGENCONF1,
+ AB8500_PWMGENCONF1_PWMTOVIB1, enum_pwm2vibx);
+
+static const struct snd_kcontrol_new dapm_pwm2vib1[] = {
+ SOC_DAPM_ENUM("Vibra 1 Controller", dapm_enum_pwm2vib1),
+};
+
+static SOC_ENUM_SINGLE_DECL(dapm_enum_pwm2vib2, AB8500_PWMGENCONF1,
+ AB8500_PWMGENCONF1_PWMTOVIB2, enum_pwm2vibx);
+
+static const struct snd_kcontrol_new dapm_pwm2vib2[] = {
+ SOC_DAPM_ENUM("Vibra 2 Controller", dapm_enum_pwm2vib2),
+};
+
+/*
+ * DAPM-widgets
+ */
+
+static const struct snd_soc_dapm_widget ab8500_dapm_widgets[] = {
+
+ /* Clocks */
+ SND_SOC_DAPM_CLOCK_SUPPLY("audioclk"),
+
+ /* Regulators */
+ SND_SOC_DAPM_REGULATOR_SUPPLY("V-AUD", 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("V-AMIC1", 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("V-AMIC2", 0),
+ SND_SOC_DAPM_REGULATOR_SUPPLY("V-DMIC", 0),
+
+ /* Power */
+ SND_SOC_DAPM_SUPPLY("Audio Power",
+ AB8500_POWERUP, AB8500_POWERUP_POWERUP, 0,
+ NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("Audio Analog Power",
+ AB8500_POWERUP, AB8500_POWERUP_ENANA, 0,
+ NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* Main supply node */
+ SND_SOC_DAPM_SUPPLY("Main Supply", SND_SOC_NOPM, 0, 0,
+ NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* DA/AD */
+
+ SND_SOC_DAPM_INPUT("ADC Input"),
+ SND_SOC_DAPM_ADC("ADC", "ab8500_0c", SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_DAC("DAC", NULL, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_OUTPUT("DAC Output"),
+
+ SND_SOC_DAPM_AIF_IN("DA_IN1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN3", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN4", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN5", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("DA_IN6", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT1", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT2", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT3", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT4", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT57", NULL, 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AD_OUT68", NULL, 0, SND_SOC_NOPM, 0, 0),
+
+ /* Headset path */
+
+ SND_SOC_DAPM_SUPPLY("Charge Pump", AB8500_ANACONF5,
+ AB8500_ANACONF5_ENCPHS, 0, NULL, 0),
+
+ SND_SOC_DAPM_DAC("DA1 Enable", "ab8500_0p",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA1, 0),
+ SND_SOC_DAPM_DAC("DA2 Enable", "ab8500_0p",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA2, 0),
+
+ SND_SOC_DAPM_PGA("HSL Digital Volume", SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("HSR Digital Volume", SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_DAC("HSL DAC", "ab8500_0p",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACHSL, 0),
+ SND_SOC_DAPM_DAC("HSR DAC", "ab8500_0p",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACHSR, 0),
+ SND_SOC_DAPM_MIXER("HSL DAC Mute", AB8500_MUTECONF,
+ AB8500_MUTECONF_MUTDACHSL, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR DAC Mute", AB8500_MUTECONF,
+ AB8500_MUTECONF_MUTDACHSR, 1,
+ NULL, 0),
+ SND_SOC_DAPM_DAC("HSL DAC Driver", "ab8500_0p",
+ AB8500_ANACONF3, AB8500_ANACONF3_ENDRVHSL, 0),
+ SND_SOC_DAPM_DAC("HSR DAC Driver", "ab8500_0p",
+ AB8500_ANACONF3, AB8500_ANACONF3_ENDRVHSR, 0),
+
+ SND_SOC_DAPM_MIXER("HSL Mute",
+ AB8500_MUTECONF, AB8500_MUTECONF_MUTHSL, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR Mute",
+ AB8500_MUTECONF, AB8500_MUTECONF_MUTHSR, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HSL Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHSL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HSR Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHSR, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("HSL Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_PGA("HSR Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("Headset Left"),
+ SND_SOC_DAPM_OUTPUT("Headset Right"),
+
+ /* LineOut path */
+
+ SND_SOC_DAPM_MUX("LineOut Source",
+ SND_SOC_NOPM, 0, 0, dapm_lineout_source),
+
+ SND_SOC_DAPM_MIXER("LOL Disable HFL",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHFL, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LOR Disable HFR",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHFR, 1,
+ NULL, 0),
+
+ SND_SOC_DAPM_MIXER("LOL Enable",
+ AB8500_ANACONF5, AB8500_ANACONF5_ENLOL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LOR Enable",
+ AB8500_ANACONF5, AB8500_ANACONF5_ENLOR, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("LineOut Left"),
+ SND_SOC_DAPM_OUTPUT("LineOut Right"),
+
+ /* Earpiece path */
+
+ SND_SOC_DAPM_MUX("Earpiece or LineOut Mono Source",
+ SND_SOC_NOPM, 0, 0, &dapm_ear_lineout_source),
+ SND_SOC_DAPM_MIXER("EAR DAC",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACEAR, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("EAR Mute",
+ AB8500_MUTECONF, AB8500_MUTECONF_MUTEAR, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("EAR Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENEAR, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("Earpiece"),
+
+ /* Handsfree path */
+
+ SND_SOC_DAPM_MIXER("DA3 Channel Volume",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA3, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DA4 Channel Volume",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA4, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MUX("Speaker Left Source",
+ SND_SOC_NOPM, 0, 0, dapm_HFl_select),
+ SND_SOC_DAPM_MUX("Speaker Right Source",
+ SND_SOC_NOPM, 0, 0, dapm_HFr_select),
+ SND_SOC_DAPM_MIXER("HFL DAC", AB8500_DAPATHCONF,
+ AB8500_DAPATHCONF_ENDACHFL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HFR DAC",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACHFR, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DA4 or ANC path to HfR",
+ AB8500_DIGMULTCONF2, AB8500_DIGMULTCONF2_DATOHFREN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DA3 or ANC path to HfL",
+ AB8500_DIGMULTCONF2, AB8500_DIGMULTCONF2_DATOHFLEN, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HFL Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHFL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("HFR Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENHFR, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("Speaker Left"),
+ SND_SOC_DAPM_OUTPUT("Speaker Right"),
+
+ /* Vibrator path */
+
+ SND_SOC_DAPM_INPUT("PWMGEN1"),
+ SND_SOC_DAPM_INPUT("PWMGEN2"),
+
+ SND_SOC_DAPM_MIXER("DA5 Channel Volume",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA5, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DA6 Channel Volume",
+ AB8500_DAPATHENA, AB8500_DAPATHENA_ENDA6, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("VIB1 DAC",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACVIB1, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("VIB2 DAC",
+ AB8500_DAPATHCONF, AB8500_DAPATHCONF_ENDACVIB2, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MUX("Vibra 1 Controller",
+ SND_SOC_NOPM, 0, 0, dapm_pwm2vib1),
+ SND_SOC_DAPM_MUX("Vibra 2 Controller",
+ SND_SOC_NOPM, 0, 0, dapm_pwm2vib2),
+ SND_SOC_DAPM_MIXER("VIB1 Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENVIB1, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("VIB2 Enable",
+ AB8500_ANACONF4, AB8500_ANACONF4_ENVIB2, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("Vibra 1"),
+ SND_SOC_DAPM_OUTPUT("Vibra 2"),
+
+ /* Mic 1 */
+
+ SND_SOC_DAPM_INPUT("Mic 1"),
+
+ SND_SOC_DAPM_MUX("Mic 1a or 1b Select",
+ SND_SOC_NOPM, 0, 0, dapm_mic1ab_mux),
+ SND_SOC_DAPM_MIXER("MIC1 Mute",
+ AB8500_ANACONF2, AB8500_ANACONF2_MUTMIC1, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("MIC1A V-AMICx Enable",
+ AB8500_ANACONF2, AB8500_ANACONF2_ENMIC1, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("MIC1B V-AMICx Enable",
+ AB8500_ANACONF2, AB8500_ANACONF2_ENMIC1, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("MIC1 ADC",
+ AB8500_ANACONF3, AB8500_ANACONF3_ENADCMIC, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MUX("AD3 Source Select",
+ SND_SOC_NOPM, 0, 0, dapm_ad3_select),
+ SND_SOC_DAPM_MIXER("AD3 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD3 Enable",
+ AB8500_ADPATHENA, AB8500_ADPATHENA_ENAD34, 0,
+ NULL, 0),
+
+ /* Mic 2 */
+
+ SND_SOC_DAPM_INPUT("Mic 2"),
+
+ SND_SOC_DAPM_MIXER("MIC2 Mute",
+ AB8500_ANACONF2, AB8500_ANACONF2_MUTMIC2, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("MIC2 V-AMICx Enable", AB8500_ANACONF2,
+ AB8500_ANACONF2_ENMIC2, 0,
+ NULL, 0),
+
+ /* LineIn */
+
+ SND_SOC_DAPM_INPUT("LineIn Left"),
+ SND_SOC_DAPM_INPUT("LineIn Right"),
+
+ SND_SOC_DAPM_MIXER("LINL Mute",
+ AB8500_ANACONF2, AB8500_ANACONF2_MUTLINL, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR Mute",
+ AB8500_ANACONF2, AB8500_ANACONF2_MUTLINR, 1,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LINL Enable", AB8500_ANACONF2,
+ AB8500_ANACONF2_ENLINL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR Enable", AB8500_ANACONF2,
+ AB8500_ANACONF2_ENLINR, 0,
+ NULL, 0),
+
+ /* LineIn Bypass path */
+ SND_SOC_DAPM_MIXER("LINL to HSL Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR to HSR Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+
+ /* LineIn, Mic 2 */
+ SND_SOC_DAPM_MUX("Mic 2 or LINR Select",
+ SND_SOC_NOPM, 0, 0, dapm_mic2lr_select),
+ SND_SOC_DAPM_MIXER("LINL ADC", AB8500_ANACONF3,
+ AB8500_ANACONF3_ENADCLINL, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("LINR ADC", AB8500_ANACONF3,
+ AB8500_ANACONF3_ENADCLINR, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MUX("AD1 Source Select",
+ SND_SOC_NOPM, 0, 0, dapm_ad1_select),
+ SND_SOC_DAPM_MUX("AD2 Source Select",
+ SND_SOC_NOPM, 0, 0, dapm_ad2_select),
+ SND_SOC_DAPM_MIXER("AD1 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD2 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+
+ SND_SOC_DAPM_MIXER("AD12 Enable",
+ AB8500_ADPATHENA, AB8500_ADPATHENA_ENAD12, 0,
+ NULL, 0),
+
+ /* HD Capture path */
+
+ SND_SOC_DAPM_MUX("AD5 Source Select",
+ SND_SOC_NOPM, 0, 0, dapm_ad5_select),
+ SND_SOC_DAPM_MUX("AD6 Source Select",
+ SND_SOC_NOPM, 0, 0, dapm_ad6_select),
+ SND_SOC_DAPM_MIXER("AD5 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD6 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD57 Enable",
+ AB8500_ADPATHENA, AB8500_ADPATHENA_ENAD5768, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD68 Enable",
+ AB8500_ADPATHENA, AB8500_ADPATHENA_ENAD5768, 0,
+ NULL, 0),
+
+ /* Digital Microphone path */
+
+ SND_SOC_DAPM_INPUT("DMic 1"),
+ SND_SOC_DAPM_INPUT("DMic 2"),
+ SND_SOC_DAPM_INPUT("DMic 3"),
+ SND_SOC_DAPM_INPUT("DMic 4"),
+ SND_SOC_DAPM_INPUT("DMic 5"),
+ SND_SOC_DAPM_INPUT("DMic 6"),
+
+ SND_SOC_DAPM_MIXER("DMIC1",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC1, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DMIC2",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC2, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DMIC3",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC3, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DMIC4",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC4, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DMIC5",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC5, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("DMIC6",
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_ENDMIC6, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD4 Channel Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("AD4 Enable",
+ AB8500_ADPATHENA, AB8500_ADPATHENA_ENAD34,
+ 0, NULL, 0),
+
+ /* Acoustical Noise Cancellation path */
+
+ SND_SOC_DAPM_INPUT("ANC Configure Input"),
+ SND_SOC_DAPM_OUTPUT("ANC Configure Output"),
+
+ SND_SOC_DAPM_MUX("ANC Source",
+ SND_SOC_NOPM, 0, 0,
+ dapm_anc_in_select),
+ SND_SOC_DAPM_SWITCH("ANC",
+ SND_SOC_NOPM, 0, 0,
+ dapm_anc_enable),
+ SND_SOC_DAPM_SWITCH("ANC to Earpiece",
+ SND_SOC_NOPM, 0, 0,
+ dapm_anc_ear_mute),
+
+ /* Sidetone Filter path */
+
+ SND_SOC_DAPM_MUX("Sidetone Left Source",
+ SND_SOC_NOPM, 0, 0,
+ dapm_stfir1_in_select),
+ SND_SOC_DAPM_MUX("Sidetone Right Source",
+ SND_SOC_NOPM, 0, 0,
+ dapm_stfir2_in_select),
+ SND_SOC_DAPM_MIXER("STFIR1 Control",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("STFIR2 Control",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("STFIR1 Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+ SND_SOC_DAPM_MIXER("STFIR2 Volume",
+ SND_SOC_NOPM, 0, 0,
+ NULL, 0),
+};
+
+/*
+ * DAPM-routes
+ */
+static const struct snd_soc_dapm_route ab8500_dapm_routes[] = {
+ /* Power AB8500 audio-block when AD/DA is active */
+ {"Main Supply", NULL, "V-AUD"},
+ {"Main Supply", NULL, "audioclk"},
+ {"Main Supply", NULL, "Audio Power"},
+ {"Main Supply", NULL, "Audio Analog Power"},
+
+ {"DAC", NULL, "ab8500_0p"},
+ {"DAC", NULL, "Main Supply"},
+ {"ADC", NULL, "ab8500_0c"},
+ {"ADC", NULL, "Main Supply"},
+
+ /* ANC Configure */
+ {"ANC Configure Input", NULL, "Main Supply"},
+ {"ANC Configure Output", NULL, "ANC Configure Input"},
+
+ /* AD/DA */
+ {"ADC", NULL, "ADC Input"},
+ {"DAC Output", NULL, "DAC"},
+
+ /* Powerup charge pump if DA1/2 is in use */
+
+ {"DA_IN1", NULL, "ab8500_0p"},
+ {"DA_IN1", NULL, "Charge Pump"},
+ {"DA_IN2", NULL, "ab8500_0p"},
+ {"DA_IN2", NULL, "Charge Pump"},
+
+ /* Headset path */
+
+ {"DA1 Enable", NULL, "DA_IN1"},
+ {"DA2 Enable", NULL, "DA_IN2"},
+
+ {"HSL Digital Volume", NULL, "DA1 Enable"},
+ {"HSR Digital Volume", NULL, "DA2 Enable"},
+
+ {"HSL DAC", NULL, "HSL Digital Volume"},
+ {"HSR DAC", NULL, "HSR Digital Volume"},
+
+ {"HSL DAC Mute", NULL, "HSL DAC"},
+ {"HSR DAC Mute", NULL, "HSR DAC"},
+
+ {"HSL DAC Driver", NULL, "HSL DAC Mute"},
+ {"HSR DAC Driver", NULL, "HSR DAC Mute"},
+
+ {"HSL Mute", NULL, "HSL DAC Driver"},
+ {"HSR Mute", NULL, "HSR DAC Driver"},
+
+ {"HSL Enable", NULL, "HSL Mute"},
+ {"HSR Enable", NULL, "HSR Mute"},
+
+ {"HSL Volume", NULL, "HSL Enable"},
+ {"HSR Volume", NULL, "HSR Enable"},
+
+ {"Headset Left", NULL, "HSL Volume"},
+ {"Headset Right", NULL, "HSR Volume"},
+
+ /* HF or LineOut path */
+
+ {"DA_IN3", NULL, "ab8500_0p"},
+ {"DA3 Channel Volume", NULL, "DA_IN3"},
+ {"DA_IN4", NULL, "ab8500_0p"},
+ {"DA4 Channel Volume", NULL, "DA_IN4"},
+
+ {"Speaker Left Source", "Audio Path", "DA3 Channel Volume"},
+ {"Speaker Right Source", "Audio Path", "DA4 Channel Volume"},
+
+ {"DA3 or ANC path to HfL", NULL, "Speaker Left Source"},
+ {"DA4 or ANC path to HfR", NULL, "Speaker Right Source"},
+
+ /* HF path */
+
+ {"HFL DAC", NULL, "DA3 or ANC path to HfL"},
+ {"HFR DAC", NULL, "DA4 or ANC path to HfR"},
+
+ {"HFL Enable", NULL, "HFL DAC"},
+ {"HFR Enable", NULL, "HFR DAC"},
+
+ {"Speaker Left", NULL, "HFL Enable"},
+ {"Speaker Right", NULL, "HFR Enable"},
+
+ /* Earpiece path */
+
+ {"Earpiece or LineOut Mono Source", "Headset Left",
+ "HSL Digital Volume"},
+ {"Earpiece or LineOut Mono Source", "Speaker Left",
+ "DA3 or ANC path to HfL"},
+
+ {"EAR DAC", NULL, "Earpiece or LineOut Mono Source"},
+
+ {"EAR Mute", NULL, "EAR DAC"},
+
+ {"EAR Enable", NULL, "EAR Mute"},
+
+ {"Earpiece", NULL, "EAR Enable"},
+
+ /* LineOut path stereo */
+
+ {"LineOut Source", "Stereo Path", "HSL DAC Driver"},
+ {"LineOut Source", "Stereo Path", "HSR DAC Driver"},
+
+ /* LineOut path mono */
+
+ {"LineOut Source", "Mono Path", "EAR DAC"},
+
+ /* LineOut path */
+
+ {"LOL Disable HFL", NULL, "LineOut Source"},
+ {"LOR Disable HFR", NULL, "LineOut Source"},
+
+ {"LOL Enable", NULL, "LOL Disable HFL"},
+ {"LOR Enable", NULL, "LOR Disable HFR"},
+
+ {"LineOut Left", NULL, "LOL Enable"},
+ {"LineOut Right", NULL, "LOR Enable"},
+
+ /* Vibrator path */
+
+ {"DA_IN5", NULL, "ab8500_0p"},
+ {"DA5 Channel Volume", NULL, "DA_IN5"},
+ {"DA_IN6", NULL, "ab8500_0p"},
+ {"DA6 Channel Volume", NULL, "DA_IN6"},
+
+ {"VIB1 DAC", NULL, "DA5 Channel Volume"},
+ {"VIB2 DAC", NULL, "DA6 Channel Volume"},
+
+ {"Vibra 1 Controller", "Audio Path", "VIB1 DAC"},
+ {"Vibra 2 Controller", "Audio Path", "VIB2 DAC"},
+ {"Vibra 1 Controller", "PWM Generator", "PWMGEN1"},
+ {"Vibra 2 Controller", "PWM Generator", "PWMGEN2"},
+
+ {"VIB1 Enable", NULL, "Vibra 1 Controller"},
+ {"VIB2 Enable", NULL, "Vibra 2 Controller"},
+
+ {"Vibra 1", NULL, "VIB1 Enable"},
+ {"Vibra 2", NULL, "VIB2 Enable"},
+
+
+ /* Mic 2 */
+
+ {"MIC2 V-AMICx Enable", NULL, "Mic 2"},
+
+ /* LineIn */
+ {"LINL Mute", NULL, "LineIn Left"},
+ {"LINR Mute", NULL, "LineIn Right"},
+
+ {"LINL Enable", NULL, "LINL Mute"},
+ {"LINR Enable", NULL, "LINR Mute"},
+
+ /* LineIn, Mic 2 */
+ {"Mic 2 or LINR Select", "LineIn Right", "LINR Enable"},
+ {"Mic 2 or LINR Select", "Mic 2", "MIC2 V-AMICx Enable"},
+
+ {"LINL ADC", NULL, "LINL Enable"},
+ {"LINR ADC", NULL, "Mic 2 or LINR Select"},
+
+ {"AD1 Source Select", "LineIn Left", "LINL ADC"},
+ {"AD2 Source Select", "LineIn Right", "LINR ADC"},
+
+ {"AD1 Channel Volume", NULL, "AD1 Source Select"},
+ {"AD2 Channel Volume", NULL, "AD2 Source Select"},
+
+ {"AD12 Enable", NULL, "AD1 Channel Volume"},
+ {"AD12 Enable", NULL, "AD2 Channel Volume"},
+
+ {"AD_OUT1", NULL, "ab8500_0c"},
+ {"AD_OUT1", NULL, "AD12 Enable"},
+ {"AD_OUT2", NULL, "ab8500_0c"},
+ {"AD_OUT2", NULL, "AD12 Enable"},
+
+ /* Mic 1 */
+
+ {"MIC1 Mute", NULL, "Mic 1"},
+
+ {"MIC1A V-AMICx Enable", NULL, "MIC1 Mute"},
+ {"MIC1B V-AMICx Enable", NULL, "MIC1 Mute"},
+
+ {"Mic 1a or 1b Select", "Mic 1a", "MIC1A V-AMICx Enable"},
+ {"Mic 1a or 1b Select", "Mic 1b", "MIC1B V-AMICx Enable"},
+
+ {"MIC1 ADC", NULL, "Mic 1a or 1b Select"},
+
+ {"AD3 Source Select", "Mic 1", "MIC1 ADC"},
+
+ {"AD3 Channel Volume", NULL, "AD3 Source Select"},
+
+ {"AD3 Enable", NULL, "AD3 Channel Volume"},
+
+ {"AD_OUT3", NULL, "ab8500_0c"},
+ {"AD_OUT3", NULL, "AD3 Enable"},
+
+ /* HD Capture path */
+
+ {"AD5 Source Select", "Mic 2", "LINR ADC"},
+ {"AD6 Source Select", "Mic 1", "MIC1 ADC"},
+
+ {"AD5 Channel Volume", NULL, "AD5 Source Select"},
+ {"AD6 Channel Volume", NULL, "AD6 Source Select"},
+
+ {"AD57 Enable", NULL, "AD5 Channel Volume"},
+ {"AD68 Enable", NULL, "AD6 Channel Volume"},
+
+ {"AD_OUT57", NULL, "ab8500_0c"},
+ {"AD_OUT57", NULL, "AD57 Enable"},
+ {"AD_OUT68", NULL, "ab8500_0c"},
+ {"AD_OUT68", NULL, "AD68 Enable"},
+
+ /* Digital Microphone path */
+
+ {"DMic 1", NULL, "V-DMIC"},
+ {"DMic 2", NULL, "V-DMIC"},
+ {"DMic 3", NULL, "V-DMIC"},
+ {"DMic 4", NULL, "V-DMIC"},
+ {"DMic 5", NULL, "V-DMIC"},
+ {"DMic 6", NULL, "V-DMIC"},
+
+ {"AD1 Source Select", NULL, "DMic 1"},
+ {"AD2 Source Select", NULL, "DMic 2"},
+ {"AD3 Source Select", NULL, "DMic 3"},
+ {"AD5 Source Select", NULL, "DMic 5"},
+ {"AD6 Source Select", NULL, "DMic 6"},
+
+ {"AD4 Channel Volume", NULL, "DMic 4"},
+ {"AD4 Enable", NULL, "AD4 Channel Volume"},
+
+ {"AD_OUT4", NULL, "ab8500_0c"},
+ {"AD_OUT4", NULL, "AD4 Enable"},
+
+ /* LineIn Bypass path */
+
+ {"LINL to HSL Volume", NULL, "LINL Enable"},
+ {"LINR to HSR Volume", NULL, "LINR Enable"},
+
+ {"HSL DAC Driver", NULL, "LINL to HSL Volume"},
+ {"HSR DAC Driver", NULL, "LINR to HSR Volume"},
+
+ /* ANC path (Acoustic Noise Cancellation) */
+
+ {"ANC Source", "Mic 2 / DMic 5", "AD5 Channel Volume"},
+ {"ANC Source", "Mic 1 / DMic 6", "AD6 Channel Volume"},
+
+ {"ANC", "Switch", "ANC Source"},
+
+ {"Speaker Left Source", "ANC", "ANC"},
+ {"Speaker Right Source", "ANC", "ANC"},
+ {"ANC to Earpiece", "Switch", "ANC"},
+
+ {"HSL Digital Volume", NULL, "ANC to Earpiece"},
+
+ /* Sidetone Filter path */
+
+ {"Sidetone Left Source", "LineIn Left", "AD12 Enable"},
+ {"Sidetone Left Source", "LineIn Right", "AD12 Enable"},
+ {"Sidetone Left Source", "Mic 1", "AD3 Enable"},
+ {"Sidetone Left Source", "Headset Left", "DA_IN1"},
+ {"Sidetone Right Source", "LineIn Right", "AD12 Enable"},
+ {"Sidetone Right Source", "Mic 1", "AD3 Enable"},
+ {"Sidetone Right Source", "DMic 4", "AD4 Enable"},
+ {"Sidetone Right Source", "Headset Right", "DA_IN2"},
+
+ {"STFIR1 Control", NULL, "Sidetone Left Source"},
+ {"STFIR2 Control", NULL, "Sidetone Right Source"},
+
+ {"STFIR1 Volume", NULL, "STFIR1 Control"},
+ {"STFIR2 Volume", NULL, "STFIR2 Control"},
+
+ {"DA1 Enable", NULL, "STFIR1 Volume"},
+ {"DA2 Enable", NULL, "STFIR2 Volume"},
+};
+
+static const struct snd_soc_dapm_route ab8500_dapm_routes_mic1a_vamicx[] = {
+ {"MIC1A V-AMICx Enable", NULL, "V-AMIC1"},
+ {"MIC1A V-AMICx Enable", NULL, "V-AMIC2"},
+};
+
+static const struct snd_soc_dapm_route ab8500_dapm_routes_mic1b_vamicx[] = {
+ {"MIC1B V-AMICx Enable", NULL, "V-AMIC1"},
+ {"MIC1B V-AMICx Enable", NULL, "V-AMIC2"},
+};
+
+static const struct snd_soc_dapm_route ab8500_dapm_routes_mic2_vamicx[] = {
+ {"MIC2 V-AMICx Enable", NULL, "V-AMIC1"},
+ {"MIC2 V-AMICx Enable", NULL, "V-AMIC2"},
+};
+
+/* ANC FIR-coefficients configuration sequence */
+static void anc_fir(struct snd_soc_codec *codec,
+ unsigned int bnk, unsigned int par, unsigned int val)
+{
+ if (par == 0 && bnk == 0)
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCFIRUPDATE),
+ BIT(AB8500_ANCCONF1_ANCFIRUPDATE));
+
+ snd_soc_write(codec, AB8500_ANCCONF5, val >> 8 & 0xff);
+ snd_soc_write(codec, AB8500_ANCCONF6, val & 0xff);
+
+ if (par == AB8500_ANC_FIR_COEFFS - 1 && bnk == 1)
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCFIRUPDATE), 0);
+}
+
+/* ANC IIR-coefficients configuration sequence */
+static void anc_iir(struct snd_soc_codec *codec, unsigned int bnk,
+ unsigned int par, unsigned int val)
+{
+ if (par == 0) {
+ if (bnk == 0) {
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCIIRINIT),
+ BIT(AB8500_ANCCONF1_ANCIIRINIT));
+ usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY);
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCIIRINIT), 0);
+ usleep_range(AB8500_ANC_SM_DELAY, AB8500_ANC_SM_DELAY);
+ } else {
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCIIRUPDATE),
+ BIT(AB8500_ANCCONF1_ANCIIRUPDATE));
+ }
+ } else if (par > 3) {
+ snd_soc_write(codec, AB8500_ANCCONF7, 0);
+ snd_soc_write(codec, AB8500_ANCCONF8, val >> 16 & 0xff);
+ }
+
+ snd_soc_write(codec, AB8500_ANCCONF7, val >> 8 & 0xff);
+ snd_soc_write(codec, AB8500_ANCCONF8, val & 0xff);
+
+ if (par == AB8500_ANC_IIR_COEFFS - 1 && bnk == 1)
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ANCIIRUPDATE), 0);
+}
+
+/* ANC IIR-/FIR-coefficients configuration sequence */
+static void anc_configure(struct snd_soc_codec *codec,
+ bool apply_fir, bool apply_iir)
+{
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+ unsigned int bnk, par, val;
+
+ dev_dbg(codec->dev, "%s: Enter.\n", __func__);
+
+ if (apply_fir)
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ENANC), 0);
+
+ snd_soc_update_bits(codec, AB8500_ANCCONF1,
+ BIT(AB8500_ANCCONF1_ENANC), BIT(AB8500_ANCCONF1_ENANC));
+
+ if (apply_fir)
+ for (bnk = 0; bnk < AB8500_NR_OF_ANC_COEFF_BANKS; bnk++)
+ for (par = 0; par < AB8500_ANC_FIR_COEFFS; par++) {
+ val = snd_soc_read(codec,
+ drvdata->anc_fir_values[par]);
+ anc_fir(codec, bnk, par, val);
+ }
+
+ if (apply_iir)
+ for (bnk = 0; bnk < AB8500_NR_OF_ANC_COEFF_BANKS; bnk++)
+ for (par = 0; par < AB8500_ANC_IIR_COEFFS; par++) {
+ val = snd_soc_read(codec,
+ drvdata->anc_iir_values[par]);
+ anc_iir(codec, bnk, par, val);
+ }
+
+ dev_dbg(codec->dev, "%s: Exit.\n", __func__);
+}
+
+/*
+ * Control-events
+ */
+
+static int sid_status_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+
+ mutex_lock(&codec->mutex);
+ ucontrol->value.integer.value[0] = drvdata->sid_status;
+ mutex_unlock(&codec->mutex);
+
+ return 0;
+}
+
+/* Write sidetone FIR-coefficients configuration sequence */
+static int sid_status_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+ unsigned int param, sidconf, val;
+ int status = 1;
+
+ dev_dbg(codec->dev, "%s: Enter\n", __func__);
+
+ if (ucontrol->value.integer.value[0] != SID_APPLY_FIR) {
+ dev_err(codec->dev,
+ "%s: ERROR: This control supports '%s' only!\n",
+ __func__, enum_sid_state[SID_APPLY_FIR]);
+ return -EIO;
+ }
+
+ mutex_lock(&codec->mutex);
+
+ sidconf = snd_soc_read(codec, AB8500_SIDFIRCONF);
+ if (((sidconf & BIT(AB8500_SIDFIRCONF_FIRSIDBUSY)) != 0)) {
+ if ((sidconf & BIT(AB8500_SIDFIRCONF_ENFIRSIDS)) == 0) {
+ dev_err(codec->dev, "%s: Sidetone busy while off!\n",
+ __func__);
+ status = -EPERM;
+ } else {
+ status = -EBUSY;
+ }
+ goto out;
+ }
+
+ snd_soc_write(codec, AB8500_SIDFIRADR, 0);
+
+ for (param = 0; param < AB8500_SID_FIR_COEFFS; param++) {
+ val = snd_soc_read(codec, drvdata->sid_fir_values[param]);
+ snd_soc_write(codec, AB8500_SIDFIRCOEF1, val >> 8 & 0xff);
+ snd_soc_write(codec, AB8500_SIDFIRCOEF2, val & 0xff);
+ }
+
+ snd_soc_update_bits(codec, AB8500_SIDFIRADR,
+ BIT(AB8500_SIDFIRADR_FIRSIDSET),
+ BIT(AB8500_SIDFIRADR_FIRSIDSET));
+ snd_soc_update_bits(codec, AB8500_SIDFIRADR,
+ BIT(AB8500_SIDFIRADR_FIRSIDSET), 0);
+
+ drvdata->sid_status = SID_FIR_CONFIGURED;
+
+out:
+ mutex_unlock(&codec->mutex);
+
+ dev_dbg(codec->dev, "%s: Exit\n", __func__);
+
+ return status;
+}
+
+static int anc_status_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+
+ mutex_lock(&codec->mutex);
+ ucontrol->value.integer.value[0] = drvdata->anc_status;
+ mutex_unlock(&codec->mutex);
+
+ return 0;
+}
+
+static int anc_status_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
+ struct device *dev = codec->dev;
+ bool apply_fir, apply_iir;
+ int req, status;
+
+ dev_dbg(dev, "%s: Enter.\n", __func__);
+
+ mutex_lock(&drvdata->anc_lock);
+
+ req = ucontrol->value.integer.value[0];
+ if (req != ANC_APPLY_FIR_IIR && req != ANC_APPLY_FIR &&
+ req != ANC_APPLY_IIR) {
+ dev_err(dev, "%s: ERROR: Unsupported status to set '%s'!\n",
+ __func__, enum_anc_state[req]);
+ status = -EINVAL;
+ goto cleanup;
+ }
+ apply_fir = req == ANC_APPLY_FIR || req == ANC_APPLY_FIR_IIR;
+ apply_iir = req == ANC_APPLY_IIR || req == ANC_APPLY_FIR_IIR;
+
+ status = snd_soc_dapm_force_enable_pin(&codec->dapm,
+ "ANC Configure Input");
+ if (status < 0) {
+ dev_err(dev,
+ "%s: ERROR: Failed to enable power (status = %d)!\n",
+ __func__, status);
+ goto cleanup;
+ }
+ snd_soc_dapm_sync(&codec->dapm);
+
+ mutex_lock(&codec->mutex);
+ anc_configure(codec, apply_fir, apply_iir);
+ mutex_unlock(&codec->mutex);
+
+ if (apply_fir) {
+ if (drvdata->anc_status == ANC_IIR_CONFIGURED)
+ drvdata->anc_status = ANC_FIR_IIR_CONFIGURED;
+ else if (drvdata->anc_status != ANC_FIR_IIR_CONFIGURED)
+ drvdata->anc_status = ANC_FIR_CONFIGURED;
+ }
+ if (apply_iir) {
+ if (drvdata->anc_status == ANC_FIR_CONFIGURED)
+ drvdata->anc_status = ANC_FIR_IIR_CONFIGURED;
+ else if (drvdata->anc_status != ANC_FIR_IIR_CONFIGURED)
+ drvdata->anc_status = ANC_IIR_CONFIGURED;
+ }
+
+ status = snd_soc_dapm_disable_pin(&codec->dapm, "ANC Configure Input");
+ snd_soc_dapm_sync(&codec->dapm);
+
+cleanup:
+ mutex_unlock(&drvdata->anc_lock);
+
+ if (status < 0)
+ dev_err(dev, "%s: Unable to configure ANC! (status = %d)\n",
+ __func__, status);
+
+ dev_dbg(dev, "%s: Exit.\n", __func__);
+
+ return (status < 0) ? status : 1;
+}
+
+static int filter_control_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct filter_control *fc =
+ (struct filter_control *)kcontrol->private_value;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = fc->count;
+ uinfo->value.integer.min = fc->min;
+ uinfo->value.integer.max = fc->max;
+
+ return 0;
+}
+
+static int filter_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct filter_control *fc =
+ (struct filter_control *)kcontrol->private_value;
+ unsigned int i;
+
+ mutex_lock(&codec->mutex);
+ for (i = 0; i < fc->count; i++)
+ ucontrol->value.integer.value[i] = fc->value[i];
+ mutex_unlock(&codec->mutex);
+
+ return 0;
+}
+
+static int filter_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct filter_control *fc =
+ (struct filter_control *)kcontrol->private_value;
+ unsigned int i;
+
+ mutex_lock(&codec->mutex);
+ for (i = 0; i < fc->count; i++)
+ fc->value[i] = ucontrol->value.integer.value[i];
+ mutex_unlock(&codec->mutex);
+
+ return 0;
+}
+
+/*
+ * Controls - Non-DAPM ASoC
+ */
+
+static DECLARE_TLV_DB_SCALE(adx_dig_gain_tlv, -3200, 100, 1);
+/* -32dB = Mute */
+
+static DECLARE_TLV_DB_SCALE(dax_dig_gain_tlv, -6300, 100, 1);
+/* -63dB = Mute */
+
+static DECLARE_TLV_DB_SCALE(hs_ear_dig_gain_tlv, -100, 100, 1);
+/* -1dB = Mute */
+
+static const unsigned int hs_gain_tlv[] = {
+ TLV_DB_RANGE_HEAD(2),
+ 0, 3, TLV_DB_SCALE_ITEM(-3200, 400, 0),
+ 4, 15, TLV_DB_SCALE_ITEM(-1800, 200, 0),
+};
+
+static DECLARE_TLV_DB_SCALE(mic_gain_tlv, 0, 100, 0);
+
+static DECLARE_TLV_DB_SCALE(lin_gain_tlv, -1000, 200, 0);
+
+static DECLARE_TLV_DB_SCALE(lin2hs_gain_tlv, -3800, 200, 1);
+/* -38dB = Mute */
+
+static const char * const enum_hsfadspeed[] = {"2ms", "0.5ms", "10.6ms",
+ "5ms"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_hsfadspeed,
+ AB8500_DIGMICCONF, AB8500_DIGMICCONF_HSFADSPEED, enum_hsfadspeed);
+
+static const char * const enum_envdetthre[] = {
+ "250mV", "300mV", "350mV", "400mV",
+ "450mV", "500mV", "550mV", "600mV",
+ "650mV", "700mV", "750mV", "800mV",
+ "850mV", "900mV", "950mV", "1.00V" };
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdeththre,
+ AB8500_ENVCPCONF, AB8500_ENVCPCONF_ENVDETHTHRE, enum_envdetthre);
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdetlthre,
+ AB8500_ENVCPCONF, AB8500_ENVCPCONF_ENVDETLTHRE, enum_envdetthre);
+static const char * const enum_envdettime[] = {
+ "26.6us", "53.2us", "106us", "213us",
+ "426us", "851us", "1.70ms", "3.40ms",
+ "6.81ms", "13.6ms", "27.2ms", "54.5ms",
+ "109ms", "218ms", "436ms", "872ms" };
+static SOC_ENUM_SINGLE_DECL(soc_enum_envdettime,
+ AB8500_SIGENVCONF, AB8500_SIGENVCONF_ENVDETTIME, enum_envdettime);
+
+static const char * const enum_sinc31[] = {"Sinc 3", "Sinc 1"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_hsesinc, AB8500_HSLEARDIGGAIN,
+ AB8500_HSLEARDIGGAIN_HSSINC1, enum_sinc31);
+
+static const char * const enum_fadespeed[] = {"1ms", "4ms", "8ms", "16ms"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_fadespeed, AB8500_HSRDIGGAIN,
+ AB8500_HSRDIGGAIN_FADESPEED, enum_fadespeed);
+
+/* Earpiece */
+
+static const char * const enum_lowpow[] = {"Normal", "Low Power"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_eardaclowpow, AB8500_ANACONF1,
+ AB8500_ANACONF1_EARDACLOWPOW, enum_lowpow);
+static SOC_ENUM_SINGLE_DECL(soc_enum_eardrvlowpow, AB8500_ANACONF1,
+ AB8500_ANACONF1_EARDRVLOWPOW, enum_lowpow);
+
+static const char * const enum_av_mode[] = {"Audio", "Voice"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad12voice, AB8500_ADFILTCONF,
+ AB8500_ADFILTCONF_AD1VOICE, AB8500_ADFILTCONF_AD2VOICE, enum_av_mode);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_ad34voice, AB8500_ADFILTCONF,
+ AB8500_ADFILTCONF_AD3VOICE, AB8500_ADFILTCONF_AD4VOICE, enum_av_mode);
+
+/* DA */
+
+static SOC_ENUM_SINGLE_DECL(soc_enum_da12voice,
+ AB8500_DASLOTCONF1, AB8500_DASLOTCONF1_DA12VOICE,
+ enum_av_mode);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da34voice,
+ AB8500_DASLOTCONF3, AB8500_DASLOTCONF3_DA34VOICE,
+ enum_av_mode);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da56voice,
+ AB8500_DASLOTCONF5, AB8500_DASLOTCONF5_DA56VOICE,
+ enum_av_mode);
+
+static const char * const enum_da2hslr[] = {"Sidetone", "Audio Path"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_da2hslr, AB8500_DIGMULTCONF1,
+ AB8500_DIGMULTCONF1_DATOHSLEN,
+ AB8500_DIGMULTCONF1_DATOHSREN, enum_da2hslr);
+
+static const char * const enum_sinc53[] = {"Sinc 5", "Sinc 3"};
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic12sinc, AB8500_DMICFILTCONF,
+ AB8500_DMICFILTCONF_DMIC1SINC3,
+ AB8500_DMICFILTCONF_DMIC2SINC3, enum_sinc53);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic34sinc, AB8500_DMICFILTCONF,
+ AB8500_DMICFILTCONF_DMIC3SINC3,
+ AB8500_DMICFILTCONF_DMIC4SINC3, enum_sinc53);
+static SOC_ENUM_DOUBLE_DECL(soc_enum_dmic56sinc, AB8500_DMICFILTCONF,
+ AB8500_DMICFILTCONF_DMIC5SINC3,
+ AB8500_DMICFILTCONF_DMIC6SINC3, enum_sinc53);
+
+/* Digital interface - DA from slot mapping */
+static const char * const enum_da_from_slot_map[] = {"SLOT0",
+ "SLOT1",
+ "SLOT2",
+ "SLOT3",
+ "SLOT4",
+ "SLOT5",
+ "SLOT6",
+ "SLOT7",
+ "SLOT8",
+ "SLOT9",
+ "SLOT10",
+ "SLOT11",
+ "SLOT12",
+ "SLOT13",
+ "SLOT14",
+ "SLOT15",
+ "SLOT16",
+ "SLOT17",
+ "SLOT18",
+ "SLOT19",
+ "SLOT20",
+ "SLOT21",
+ "SLOT22",
+ "SLOT23",
+ "SLOT24",
+ "SLOT25",
+ "SLOT26",
+ "SLOT27",
+ "SLOT28",
+ "SLOT29",
+ "SLOT30",
+ "SLOT31"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_da1slotmap,
+ AB8500_DASLOTCONF1, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da2slotmap,
+ AB8500_DASLOTCONF2, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da3slotmap,
+ AB8500_DASLOTCONF3, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da4slotmap,
+ AB8500_DASLOTCONF4, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da5slotmap,
+ AB8500_DASLOTCONF5, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da6slotmap,
+ AB8500_DASLOTCONF6, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da7slotmap,
+ AB8500_DASLOTCONF7, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_da8slotmap,
+ AB8500_DASLOTCONF8, AB8500_DASLOTCONFX_SLTODAX_SHIFT,
+ enum_da_from_slot_map);
+
+/* Digital interface - AD to slot mapping */
+static const char * const enum_ad_to_slot_map[] = {"AD_OUT1",
+ "AD_OUT2",
+ "AD_OUT3",
+ "AD_OUT4",
+ "AD_OUT5",
+ "AD_OUT6",
+ "AD_OUT7",
+ "AD_OUT8",
+ "zeroes",
+ "tristate"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot0map,
+ AB8500_ADSLOTSEL1, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot1map,
+ AB8500_ADSLOTSEL1, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot2map,
+ AB8500_ADSLOTSEL2, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot3map,
+ AB8500_ADSLOTSEL2, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot4map,
+ AB8500_ADSLOTSEL3, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot5map,
+ AB8500_ADSLOTSEL3, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot6map,
+ AB8500_ADSLOTSEL4, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot7map,
+ AB8500_ADSLOTSEL4, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot8map,
+ AB8500_ADSLOTSEL5, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot9map,
+ AB8500_ADSLOTSEL5, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot10map,
+ AB8500_ADSLOTSEL6, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot11map,
+ AB8500_ADSLOTSEL6, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot12map,
+ AB8500_ADSLOTSEL7, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot13map,
+ AB8500_ADSLOTSEL7, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot14map,
+ AB8500_ADSLOTSEL8, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot15map,
+ AB8500_ADSLOTSEL8, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot16map,
+ AB8500_ADSLOTSEL9, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot17map,
+ AB8500_ADSLOTSEL9, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot18map,
+ AB8500_ADSLOTSEL10, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot19map,
+ AB8500_ADSLOTSEL10, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot20map,
+ AB8500_ADSLOTSEL11, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot21map,
+ AB8500_ADSLOTSEL11, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot22map,
+ AB8500_ADSLOTSEL12, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot23map,
+ AB8500_ADSLOTSEL12, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot24map,
+ AB8500_ADSLOTSEL13, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot25map,
+ AB8500_ADSLOTSEL13, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot26map,
+ AB8500_ADSLOTSEL14, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot27map,
+ AB8500_ADSLOTSEL14, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot28map,
+ AB8500_ADSLOTSEL15, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot29map,
+ AB8500_ADSLOTSEL15, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot30map,
+ AB8500_ADSLOTSEL16, AB8500_ADSLOTSELX_EVEN_SHIFT,
+ enum_ad_to_slot_map);
+static SOC_ENUM_SINGLE_DECL(soc_enum_adslot31map,
+ AB8500_ADSLOTSEL16, AB8500_ADSLOTSELX_ODD_SHIFT,
+ enum_ad_to_slot_map);
+
+/* Digital interface - Burst mode */
+static const char * const enum_mask[] = {"Unmasked", "Masked"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifomask,
+ AB8500_FIFOCONF1, AB8500_FIFOCONF1_BFIFOMASK,
+ enum_mask);
+static const char * const enum_bitclk0[] = {"19_2_MHz", "38_4_MHz"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifo19m2,
+ AB8500_FIFOCONF1, AB8500_FIFOCONF1_BFIFO19M2,
+ enum_bitclk0);
+static const char * const enum_slavemaster[] = {"Slave", "Master"};
+static SOC_ENUM_SINGLE_DECL(soc_enum_bfifomast,
+ AB8500_FIFOCONF3, AB8500_FIFOCONF3_BFIFOMAST_SHIFT,
+ enum_slavemaster);
+
+/* Sidetone */
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_sidstate, enum_sid_state);
+
+/* ANC */
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_ancstate, enum_anc_state);
+
+static struct snd_kcontrol_new ab8500_ctrls[] = {
+ /* Charge pump */
+ SOC_ENUM("Charge Pump High Threshold For Low Voltage",
+ soc_enum_envdeththre),
+ SOC_ENUM("Charge Pump Low Threshold For Low Voltage",
+ soc_enum_envdetlthre),
+ SOC_SINGLE("Charge Pump Envelope Detection Switch",
+ AB8500_SIGENVCONF, AB8500_SIGENVCONF_ENVDETCPEN,
+ 1, 0),
+ SOC_ENUM("Charge Pump Envelope Detection Decay Time",
+ soc_enum_envdettime),
+
+ /* Headset */
+ SOC_ENUM("Headset Mode", soc_enum_da12voice),
+ SOC_SINGLE("Headset High Pass Switch",
+ AB8500_ANACONF1, AB8500_ANACONF1_HSHPEN,
+ 1, 0),
+ SOC_SINGLE("Headset Low Power Switch",
+ AB8500_ANACONF1, AB8500_ANACONF1_HSLOWPOW,
+ 1, 0),
+ SOC_SINGLE("Headset DAC Low Power Switch",
+ AB8500_ANACONF1, AB8500_ANACONF1_DACLOWPOW1,
+ 1, 0),
+ SOC_SINGLE("Headset DAC Drv Low Power Switch",
+ AB8500_ANACONF1, AB8500_ANACONF1_DACLOWPOW0,
+ 1, 0),
+ SOC_ENUM("Headset Fade Speed", soc_enum_hsfadspeed),
+ SOC_ENUM("Headset Source", soc_enum_da2hslr),
+ SOC_ENUM("Headset Filter", soc_enum_hsesinc),
+ SOC_DOUBLE_R_TLV("Headset Master Volume",
+ AB8500_DADIGGAIN1, AB8500_DADIGGAIN2,
+ 0, AB8500_DADIGGAINX_DAXGAIN_MAX, 1, dax_dig_gain_tlv),
+ SOC_DOUBLE_R_TLV("Headset Digital Volume",
+ AB8500_HSLEARDIGGAIN, AB8500_HSRDIGGAIN,
+ 0, AB8500_HSLEARDIGGAIN_HSLDGAIN_MAX, 1, hs_ear_dig_gain_tlv),
+ SOC_DOUBLE_TLV("Headset Volume",
+ AB8500_ANAGAIN3,
+ AB8500_ANAGAIN3_HSLGAIN, AB8500_ANAGAIN3_HSRGAIN,
+ AB8500_ANAGAIN3_HSXGAIN_MAX, 1, hs_gain_tlv),
+
+ /* Earpiece */
+ SOC_ENUM("Earpiece DAC Mode",
+ soc_enum_eardaclowpow),
+ SOC_ENUM("Earpiece DAC Drv Mode",
+ soc_enum_eardrvlowpow),
+
+ /* HandsFree */
+ SOC_ENUM("HF Mode", soc_enum_da34voice),
+ SOC_SINGLE("HF and Headset Swap Switch",
+ AB8500_DASLOTCONF1, AB8500_DASLOTCONF1_SWAPDA12_34,
+ 1, 0),
+ SOC_DOUBLE("HF Low EMI Mode Switch",
+ AB8500_CLASSDCONF1,
+ AB8500_CLASSDCONF1_HFLSWAPEN, AB8500_CLASSDCONF1_HFRSWAPEN,
+ 1, 0),
+ SOC_DOUBLE("HF FIR Bypass Switch",
+ AB8500_CLASSDCONF2,
+ AB8500_CLASSDCONF2_FIRBYP0, AB8500_CLASSDCONF2_FIRBYP1,
+ 1, 0),
+ SOC_DOUBLE("HF High Volume Switch",
+ AB8500_CLASSDCONF2,
+ AB8500_CLASSDCONF2_HIGHVOLEN0, AB8500_CLASSDCONF2_HIGHVOLEN1,
+ 1, 0),
+ SOC_SINGLE("HF L and R Bridge Switch",
+ AB8500_CLASSDCONF1, AB8500_CLASSDCONF1_PARLHF,
+ 1, 0),
+ SOC_DOUBLE_R_TLV("HF Master Volume",
+ AB8500_DADIGGAIN3, AB8500_DADIGGAIN4,
+ 0, AB8500_DADIGGAINX_DAXGAIN_MAX, 1, dax_dig_gain_tlv),
+
+ /* Vibra */
+ SOC_DOUBLE("Vibra High Volume Switch",
+ AB8500_CLASSDCONF2,
+ AB8500_CLASSDCONF2_HIGHVOLEN2, AB8500_CLASSDCONF2_HIGHVOLEN3,
+ 1, 0),
+ SOC_DOUBLE("Vibra Low EMI Mode Switch",
+ AB8500_CLASSDCONF1,
+ AB8500_CLASSDCONF1_VIB1SWAPEN, AB8500_CLASSDCONF1_VIB2SWAPEN,
+ 1, 0),
+ SOC_DOUBLE("Vibra FIR Bypass Switch",
+ AB8500_CLASSDCONF2,
+ AB8500_CLASSDCONF2_FIRBYP2, AB8500_CLASSDCONF2_FIRBYP3,
+ 1, 0),
+ SOC_ENUM("Vibra Mode", soc_enum_da56voice),
+ SOC_DOUBLE_R("Vibra PWM Duty Cycle N",
+ AB8500_PWMGENCONF3, AB8500_PWMGENCONF5,
+ AB8500_PWMGENCONFX_PWMVIBXDUTCYC,
+ AB8500_PWMGENCONFX_PWMVIBXDUTCYC_MAX, 0),
+ SOC_DOUBLE_R("Vibra PWM Duty Cycle P",
+ AB8500_PWMGENCONF2, AB8500_PWMGENCONF4,
+ AB8500_PWMGENCONFX_PWMVIBXDUTCYC,
+ AB8500_PWMGENCONFX_PWMVIBXDUTCYC_MAX, 0),
+ SOC_SINGLE("Vibra 1 and 2 Bridge Switch",
+ AB8500_CLASSDCONF1, AB8500_CLASSDCONF1_PARLVIB,
+ 1, 0),
+ SOC_DOUBLE_R_TLV("Vibra Master Volume",
+ AB8500_DADIGGAIN5, AB8500_DADIGGAIN6,
+ 0, AB8500_DADIGGAINX_DAXGAIN_MAX, 1, dax_dig_gain_tlv),
+
+ /* HandsFree, Vibra */
+ SOC_SINGLE("ClassD High Pass Volume",
+ AB8500_CLASSDCONF3, AB8500_CLASSDCONF3_DITHHPGAIN,
+ AB8500_CLASSDCONF3_DITHHPGAIN_MAX, 0),
+ SOC_SINGLE("ClassD White Volume",
+ AB8500_CLASSDCONF3, AB8500_CLASSDCONF3_DITHWGAIN,
+ AB8500_CLASSDCONF3_DITHWGAIN_MAX, 0),
+
+ /* Mic 1, Mic 2, LineIn */
+ SOC_DOUBLE_R_TLV("Mic Master Volume",
+ AB8500_ADDIGGAIN3, AB8500_ADDIGGAIN4,
+ 0, AB8500_ADDIGGAINX_ADXGAIN_MAX, 1, adx_dig_gain_tlv),
+
+ /* Mic 1 */
+ SOC_SINGLE_TLV("Mic 1",
+ AB8500_ANAGAIN1,
+ AB8500_ANAGAINX_MICXGAIN,
+ AB8500_ANAGAINX_MICXGAIN_MAX, 0, mic_gain_tlv),
+ SOC_SINGLE("Mic 1 Low Power Switch",
+ AB8500_ANAGAIN1, AB8500_ANAGAINX_LOWPOWMICX,
+ 1, 0),
+
+ /* Mic 2 */
+ SOC_DOUBLE("Mic High Pass Switch",
+ AB8500_ADFILTCONF,
+ AB8500_ADFILTCONF_AD3NH, AB8500_ADFILTCONF_AD4NH,
+ 1, 1),
+ SOC_ENUM("Mic Mode", soc_enum_ad34voice),
+ SOC_ENUM("Mic Filter", soc_enum_dmic34sinc),
+ SOC_SINGLE_TLV("Mic 2",
+ AB8500_ANAGAIN2,
+ AB8500_ANAGAINX_MICXGAIN,
+ AB8500_ANAGAINX_MICXGAIN_MAX, 0, mic_gain_tlv),
+ SOC_SINGLE("Mic 2 Low Power Switch",
+ AB8500_ANAGAIN2, AB8500_ANAGAINX_LOWPOWMICX,
+ 1, 0),
+
+ /* LineIn */
+ SOC_DOUBLE("LineIn High Pass Switch",
+ AB8500_ADFILTCONF,
+ AB8500_ADFILTCONF_AD1NH, AB8500_ADFILTCONF_AD2NH,
+ 1, 1),
+ SOC_ENUM("LineIn Filter", soc_enum_dmic12sinc),
+ SOC_ENUM("LineIn Mode", soc_enum_ad12voice),
+ SOC_DOUBLE_R_TLV("LineIn Master Volume",
+ AB8500_ADDIGGAIN1, AB8500_ADDIGGAIN2,
+ 0, AB8500_ADDIGGAINX_ADXGAIN_MAX, 1, adx_dig_gain_tlv),
+ SOC_DOUBLE_TLV("LineIn",
+ AB8500_ANAGAIN4,
+ AB8500_ANAGAIN4_LINLGAIN, AB8500_ANAGAIN4_LINRGAIN,
+ AB8500_ANAGAIN4_LINXGAIN_MAX, 0, lin_gain_tlv),
+ SOC_DOUBLE_R_TLV("LineIn to Headset Volume",
+ AB8500_DIGLINHSLGAIN, AB8500_DIGLINHSRGAIN,
+ AB8500_DIGLINHSXGAIN_LINTOHSXGAIN,
+ AB8500_DIGLINHSXGAIN_LINTOHSXGAIN_MAX,
+ 1, lin2hs_gain_tlv),
+
+ /* DMic */
+ SOC_ENUM("DMic Filter", soc_enum_dmic56sinc),
+ SOC_DOUBLE_R_TLV("DMic Master Volume",
+ AB8500_ADDIGGAIN5, AB8500_ADDIGGAIN6,
+ 0, AB8500_ADDIGGAINX_ADXGAIN_MAX, 1, adx_dig_gain_tlv),
+
+ /* Digital gains */
+ SOC_ENUM("Digital Gain Fade Speed", soc_enum_fadespeed),
+
+ /* Analog loopback */
+ SOC_DOUBLE_R_TLV("Analog Loopback Volume",
+ AB8500_ADDIGLOOPGAIN1, AB8500_ADDIGLOOPGAIN2,
+ 0, AB8500_ADDIGLOOPGAINX_ADXLBGAIN_MAX, 1, dax_dig_gain_tlv),
+
+ /* Digital interface - DA from slot mapping */
+ SOC_ENUM("Digital Interface DA 1 From Slot Map", soc_enum_da1slotmap),
+ SOC_ENUM("Digital Interface DA 2 From Slot Map", soc_enum_da2slotmap),
+ SOC_ENUM("Digital Interface DA 3 From Slot Map", soc_enum_da3slotmap),
+ SOC_ENUM("Digital Interface DA 4 From Slot Map", soc_enum_da4slotmap),
+ SOC_ENUM("Digital Interface DA 5 From Slot Map", soc_enum_da5slotmap),
+ SOC_ENUM("Digital Interface DA 6 From Slot Map", soc_enum_da6slotmap),
+ SOC_ENUM("Digital Interface DA 7 From Slot Map", soc_enum_da7slotmap),
+ SOC_ENUM("Digital Interface DA 8 From Slot Map", soc_enum_da8slotmap),
+
+ /* Digital interface - AD to slot mapping */
+ SOC_ENUM("Digital Interface AD To Slot 0 Map", soc_enum_adslot0map),
+ SOC_ENUM("Digital Interface AD To Slot 1 Map", soc_enum_adslot1map),
+ SOC_ENUM("Digital Interface AD To Slot 2 Map", soc_enum_adslot2map),
+ SOC_ENUM("Digital Interface AD To Slot 3 Map", soc_enum_adslot3map),
+ SOC_ENUM("Digital Interface AD To Slot 4 Map", soc_enum_adslot4map),
+ SOC_ENUM("Digital Interface AD To Slot 5 Map", soc_enum_adslot5map),
+ SOC_ENUM("Digital Interface AD To Slot 6 Map", soc_enum_adslot6map),
+ SOC_ENUM("Digital Interface AD To Slot 7 Map", soc_enum_adslot7map),
+ SOC_ENUM("Digital Interface AD To Slot 8 Map", soc_enum_adslot8map),
+ SOC_ENUM("Digital Interface AD To Slot 9 Map", soc_enum_adslot9map),
+ SOC_ENUM("Digital Interface AD To Slot 10 Map", soc_enum_adslot10map),
+ SOC_ENUM("Digital Interface AD To Slot 11 Map", soc_enum_adslot11map),
+ SOC_ENUM("Digital Interface AD To Slot 12 Map", soc_enum_adslot12map),
+ SOC_ENUM("Digital Interface AD To Slot 13 Map", soc_enum_adslot13map),
+ SOC_ENUM("Digital Interface AD To Slot 14 Map", soc_enum_adslot14map),
+ SOC_ENUM("Digital Interface AD To Slot 15 Map", soc_enum_adslot15map),
+ SOC_ENUM("Digital Interface AD To Slot 16 Map", soc_enum_adslot16map),
+ SOC_ENUM("Digital Interface AD To Slot 17 Map", soc_enum_adslot17map),
+ SOC_ENUM("Digital Interface AD To Slot 18 Map", soc_enum_adslot18map),
+ SOC_ENUM("Digital Interface AD To Slot 19 Map", soc_enum_adslot19map),
+ SOC_ENUM("Digital Interface AD To Slot 20 Map", soc_enum_adslot20map),
+ SOC_ENUM("Digital Interface AD To Slot 21 Map", soc_enum_adslot21map),
+ SOC_ENUM("Digital Interface AD To Slot 22 Map", soc_enum_adslot22map),
+ SOC_ENUM("Digital Interface AD To Slot 23 Map", soc_enum_adslot23map),
+ SOC_ENUM("Digital Interface AD To Slot 24 Map", soc_enum_adslot24map),
+ SOC_ENUM("Digital Interface AD To Slot 25 Map", soc_enum_adslot25map),
+ SOC_ENUM("Digital Interface AD To Slot 26 Map", soc_enum_adslot26map),
+ SOC_ENUM("Digital Interface AD To Slot 27 Map", soc_enum_adslot27map),
+ SOC_ENUM("Digital Interface AD To Slot 28 Map", soc_enum_adslot28map),
+ SOC_ENUM("Digital Interface AD To Slot 29 Map", soc_enum_adslot29map),
+ SOC_ENUM("Digital Interface AD To Slot 30 Map", soc_enum_adslot30map),
+ SOC_ENUM("Digital Interface AD To Slot 31 Map", soc_enum_adslot31map),
+
+ /* Digital interface - Loopback */
+ SOC_SINGLE("Digital Interface AD 1 Loopback Switch",
+ AB8500_DASLOTCONF1, AB8500_DASLOTCONF1_DAI7TOADO1,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 2 Loopback Switch",
+ AB8500_DASLOTCONF2, AB8500_DASLOTCONF2_DAI8TOADO2,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 3 Loopback Switch",
+ AB8500_DASLOTCONF3, AB8500_DASLOTCONF3_DAI7TOADO3,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 4 Loopback Switch",
+ AB8500_DASLOTCONF4, AB8500_DASLOTCONF4_DAI8TOADO4,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 5 Loopback Switch",
+ AB8500_DASLOTCONF5, AB8500_DASLOTCONF5_DAI7TOADO5,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 6 Loopback Switch",
+ AB8500_DASLOTCONF6, AB8500_DASLOTCONF6_DAI8TOADO6,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 7 Loopback Switch",
+ AB8500_DASLOTCONF7, AB8500_DASLOTCONF7_DAI8TOADO7,
+ 1, 0),
+ SOC_SINGLE("Digital Interface AD 8 Loopback Switch",
+ AB8500_DASLOTCONF8, AB8500_DASLOTCONF8_DAI7TOADO8,
+ 1, 0),
+
+ /* Digital interface - Burst FIFO */
+ SOC_SINGLE("Digital Interface 0 FIFO Enable Switch",
+ AB8500_DIGIFCONF3, AB8500_DIGIFCONF3_IF0BFIFOEN,
+ 1, 0),
+ SOC_ENUM("Burst FIFO Mask", soc_enum_bfifomask),
+ SOC_ENUM("Burst FIFO Bit-clock Frequency", soc_enum_bfifo19m2),
+ SOC_SINGLE("Burst FIFO Threshold",
+ AB8500_FIFOCONF1, AB8500_FIFOCONF1_BFIFOINT_SHIFT,
+ AB8500_FIFOCONF1_BFIFOINT_MAX, 0),
+ SOC_SINGLE("Burst FIFO Length",
+ AB8500_FIFOCONF2, AB8500_FIFOCONF2_BFIFOTX_SHIFT,
+ AB8500_FIFOCONF2_BFIFOTX_MAX, 0),
+ SOC_SINGLE("Burst FIFO EOS Extra Slots",
+ AB8500_FIFOCONF3, AB8500_FIFOCONF3_BFIFOEXSL_SHIFT,
+ AB8500_FIFOCONF3_BFIFOEXSL_MAX, 0),
+ SOC_SINGLE("Burst FIFO FS Extra Bit-clocks",
+ AB8500_FIFOCONF3, AB8500_FIFOCONF3_PREBITCLK0_SHIFT,
+ AB8500_FIFOCONF3_PREBITCLK0_MAX, 0),
+ SOC_ENUM("Burst FIFO Interface Mode", soc_enum_bfifomast),
+
+ SOC_SINGLE("Burst FIFO Interface Switch",
+ AB8500_FIFOCONF3, AB8500_FIFOCONF3_BFIFORUN_SHIFT,
+ 1, 0),
+ SOC_SINGLE("Burst FIFO Switch Frame Number",
+ AB8500_FIFOCONF4, AB8500_FIFOCONF4_BFIFOFRAMSW_SHIFT,
+ AB8500_FIFOCONF4_BFIFOFRAMSW_MAX, 0),
+ SOC_SINGLE("Burst FIFO Wake Up Delay",
+ AB8500_FIFOCONF5, AB8500_FIFOCONF5_BFIFOWAKEUP_SHIFT,
+ AB8500_FIFOCONF5_BFIFOWAKEUP_MAX, 0),
+ SOC_SINGLE("Burst FIFO Samples In FIFO",
+ AB8500_FIFOCONF6, AB8500_FIFOCONF6_BFIFOSAMPLE_SHIFT,
+ AB8500_FIFOCONF6_BFIFOSAMPLE_MAX, 0),
+
+ /* ANC */
+ SOC_ENUM_EXT("ANC Status", soc_enum_ancstate,
+ anc_status_control_get, anc_status_control_put),
+ SOC_SINGLE_XR_SX("ANC Warp Delay Shift",
+ AB8500_ANCCONF2, 1, AB8500_ANCCONF2_SHIFT,
+ AB8500_ANCCONF2_MIN, AB8500_ANCCONF2_MAX, 0),
+ SOC_SINGLE_XR_SX("ANC FIR Output Shift",
+ AB8500_ANCCONF3, 1, AB8500_ANCCONF3_SHIFT,
+ AB8500_ANCCONF3_MIN, AB8500_ANCCONF3_MAX, 0),
+ SOC_SINGLE_XR_SX("ANC IIR Output Shift",
+ AB8500_ANCCONF4, 1, AB8500_ANCCONF4_SHIFT,
+ AB8500_ANCCONF4_MIN, AB8500_ANCCONF4_MAX, 0),
+ SOC_SINGLE_XR_SX("ANC Warp Delay",
+ AB8500_ANCCONF9, 2, AB8500_ANC_WARP_DELAY_SHIFT,
+ AB8500_ANC_WARP_DELAY_MIN, AB8500_ANC_WARP_DELAY_MAX, 0),
+
+ /* Sidetone */
+ SOC_ENUM_EXT("Sidetone Status", soc_enum_sidstate,
+ sid_status_control_get, sid_status_control_put),
+ SOC_SINGLE_STROBE("Sidetone Reset",
+ AB8500_SIDFIRADR, AB8500_SIDFIRADR_FIRSIDSET, 0),
+};
+
+static struct snd_kcontrol_new ab8500_filter_controls[] = {
+ AB8500_FILTER_CONTROL("ANC FIR Coefficients", AB8500_ANC_FIR_COEFFS,
+ AB8500_ANC_FIR_COEFF_MIN, AB8500_ANC_FIR_COEFF_MAX),
+ AB8500_FILTER_CONTROL("ANC IIR Coefficients", AB8500_ANC_IIR_COEFFS,
+ AB8500_ANC_IIR_COEFF_MIN, AB8500_ANC_IIR_COEFF_MAX),
+ AB8500_FILTER_CONTROL("Sidetone FIR Coefficients",
+ AB8500_SID_FIR_COEFFS, AB8500_SID_FIR_COEFF_MIN,
+ AB8500_SID_FIR_COEFF_MAX)
+};
+enum ab8500_filter {
+ AB8500_FILTER_ANC_FIR = 0,
+ AB8500_FILTER_ANC_IIR = 1,
+ AB8500_FILTER_SID_FIR = 2,
+};
+
+/*
+ * Extended interface for codec-driver
+ */
+
+static int ab8500_audio_init_audioblock(struct snd_soc_codec *codec)
+{
+ int status;
+
+ dev_dbg(codec->dev, "%s: Enter.\n", __func__);
+
+ /* Reset audio-registers and disable 32kHz-clock output 2 */
+ status = ab8500_sysctrl_write(AB8500_STW4500CTRL3,
+ AB8500_STW4500CTRL3_CLK32KOUT2DIS |
+ AB8500_STW4500CTRL3_RESETAUDN,
+ AB8500_STW4500CTRL3_RESETAUDN);
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+static int ab8500_audio_setup_mics(struct snd_soc_codec *codec,
+ struct amic_settings *amics)
+{
+ u8 value8;
+ unsigned int value;
+ int status;
+ const struct snd_soc_dapm_route *route;
+
+ dev_dbg(codec->dev, "%s: Enter.\n", __func__);
+
+ /* Set DMic-clocks to outputs */
+ status = abx500_get_register_interruptible(codec->dev, (u8)AB8500_MISC,
+ (u8)AB8500_GPIO_DIR4_REG,
+ &value8);
+ if (status < 0)
+ return status;
+ value = value8 | GPIO27_DIR_OUTPUT | GPIO29_DIR_OUTPUT |
+ GPIO31_DIR_OUTPUT;
+ status = abx500_set_register_interruptible(codec->dev,
+ (u8)AB8500_MISC,
+ (u8)AB8500_GPIO_DIR4_REG,
+ value);
+ if (status < 0)
+ return status;
+
+ /* Attach regulators to AMic DAPM-paths */
+ dev_dbg(codec->dev, "%s: Mic 1a regulator: %s\n", __func__,
+ amic_micbias_str(amics->mic1a_micbias));
+ route = &ab8500_dapm_routes_mic1a_vamicx[amics->mic1a_micbias];
+ status = snd_soc_dapm_add_routes(&codec->dapm, route, 1);
+ dev_dbg(codec->dev, "%s: Mic 1b regulator: %s\n", __func__,
+ amic_micbias_str(amics->mic1b_micbias));
+ route = &ab8500_dapm_routes_mic1b_vamicx[amics->mic1b_micbias];
+ status |= snd_soc_dapm_add_routes(&codec->dapm, route, 1);
+ dev_dbg(codec->dev, "%s: Mic 2 regulator: %s\n", __func__,
+ amic_micbias_str(amics->mic2_micbias));
+ route = &ab8500_dapm_routes_mic2_vamicx[amics->mic2_micbias];
+ status |= snd_soc_dapm_add_routes(&codec->dapm, route, 1);
+ if (status < 0) {
+ dev_err(codec->dev,
+ "%s: Failed to add AMic-regulator DAPM-routes (%d).\n",
+ __func__, status);
+ return status;
+ }
+
+ /* Set AMic-configuration */
+ dev_dbg(codec->dev, "%s: Mic 1 mic-type: %s\n", __func__,
+ amic_type_str(amics->mic1_type));
+ snd_soc_update_bits(codec, AB8500_ANAGAIN1, AB8500_ANAGAINX_ENSEMICX,
+ amics->mic1_type == AMIC_TYPE_DIFFERENTIAL ?
+ 0 : AB8500_ANAGAINX_ENSEMICX);
+ dev_dbg(codec->dev, "%s: Mic 2 mic-type: %s\n", __func__,
+ amic_type_str(amics->mic2_type));
+ snd_soc_update_bits(codec, AB8500_ANAGAIN2, AB8500_ANAGAINX_ENSEMICX,
+ amics->mic2_type == AMIC_TYPE_DIFFERENTIAL ?
+ 0 : AB8500_ANAGAINX_ENSEMICX);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ab8500_audio_setup_mics);
+
+static int ab8500_audio_set_ear_cmv(struct snd_soc_codec *codec,
+ enum ear_cm_voltage ear_cmv)
+{
+ char *cmv_str;
+
+ switch (ear_cmv) {
+ case EAR_CMV_0_95V:
+ cmv_str = "0.95V";
+ break;
+ case EAR_CMV_1_10V:
+ cmv_str = "1.10V";
+ break;
+ case EAR_CMV_1_27V:
+ cmv_str = "1.27V";
+ break;
+ case EAR_CMV_1_58V:
+ cmv_str = "1.58V";
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: Unknown earpiece CM-voltage (%d)!\n",
+ __func__, (int)ear_cmv);
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: Earpiece CM-voltage: %s\n", __func__,
+ cmv_str);
+ snd_soc_update_bits(codec, AB8500_ANACONF1, AB8500_ANACONF1_EARSELCM,
+ ear_cmv);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ab8500_audio_set_ear_cmv);
+
+static int ab8500_audio_set_bit_delay(struct snd_soc_dai *dai,
+ unsigned int delay)
+{
+ unsigned int mask, val;
+ struct snd_soc_codec *codec = dai->codec;
+
+ mask = BIT(AB8500_DIGIFCONF2_IF0DEL);
+ val = 0;
+
+ switch (delay) {
+ case 0:
+ break;
+ case 1:
+ val |= BIT(AB8500_DIGIFCONF2_IF0DEL);
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: ERROR: Unsupported bit-delay (0x%x)!\n",
+ __func__, delay);
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->codec->dev, "%s: IF0 Bit-delay: %d bits.\n",
+ __func__, delay);
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF2, mask, val);
+
+ return 0;
+}
+
+/* Gates clocking according format mask */
+static int ab8500_codec_set_dai_clock_gate(struct snd_soc_codec *codec,
+ unsigned int fmt)
+{
+ unsigned int mask;
+ unsigned int val;
+
+ mask = BIT(AB8500_DIGIFCONF1_ENMASTGEN) |
+ BIT(AB8500_DIGIFCONF1_ENFSBITCLK0);
+
+ val = BIT(AB8500_DIGIFCONF1_ENMASTGEN);
+
+ switch (fmt & SND_SOC_DAIFMT_CLOCK_MASK) {
+ case SND_SOC_DAIFMT_CONT: /* continuous clock */
+ dev_dbg(codec->dev, "%s: IF0 Clock is continuous.\n",
+ __func__);
+ val |= BIT(AB8500_DIGIFCONF1_ENFSBITCLK0);
+ break;
+ case SND_SOC_DAIFMT_GATED: /* clock is gated */
+ dev_dbg(codec->dev, "%s: IF0 Clock is gated.\n",
+ __func__);
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: ERROR: Unsupported clock mask (0x%x)!\n",
+ __func__, fmt & SND_SOC_DAIFMT_CLOCK_MASK);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF1, mask, val);
+
+ return 0;
+}
+
+static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ unsigned int mask;
+ unsigned int val;
+ struct snd_soc_codec *codec = dai->codec;
+ int status;
+
+ dev_dbg(codec->dev, "%s: Enter (fmt = 0x%x)\n", __func__, fmt);
+
+ mask = BIT(AB8500_DIGIFCONF3_IF1DATOIF0AD) |
+ BIT(AB8500_DIGIFCONF3_IF1CLKTOIF0CLK) |
+ BIT(AB8500_DIGIFCONF3_IF0BFIFOEN) |
+ BIT(AB8500_DIGIFCONF3_IF0MASTER);
+ val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & FRM master */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0 Master-mode: AB8500 master.\n", __func__);
+ val |= BIT(AB8500_DIGIFCONF3_IF0MASTER);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & FRM slave */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0 Master-mode: AB8500 slave.\n", __func__);
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & FRM master */
+ case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */
+ dev_err(dai->codec->dev,
+ "%s: ERROR: The device is either a master or a slave.\n",
+ __func__);
+ default:
+ dev_err(dai->codec->dev,
+ "%s: ERROR: Unsupporter master mask 0x%x\n",
+ __func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ return -EINVAL;
+ break;
+ }
+
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF3, mask, val);
+
+ /* Set clock gating */
+ status = ab8500_codec_set_dai_clock_gate(codec, fmt);
+ if (status) {
+ dev_err(dai->codec->dev,
+ "%s: ERRROR: Failed to set clock gate (%d).\n",
+ __func__, status);
+ return status;
+ }
+
+ /* Setting data transfer format */
+
+ mask = BIT(AB8500_DIGIFCONF2_IF0FORMAT0) |
+ BIT(AB8500_DIGIFCONF2_IF0FORMAT1) |
+ BIT(AB8500_DIGIFCONF2_FSYNC0P) |
+ BIT(AB8500_DIGIFCONF2_BITCLK0P);
+ val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S: /* I2S mode */
+ dev_dbg(dai->codec->dev, "%s: IF0 Protocol: I2S\n", __func__);
+ val |= BIT(AB8500_DIGIFCONF2_IF0FORMAT1);
+ ab8500_audio_set_bit_delay(dai, 0);
+ break;
+
+ case SND_SOC_DAIFMT_DSP_A: /* L data MSB after FRM LRC */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0 Protocol: DSP A (TDM)\n", __func__);
+ val |= BIT(AB8500_DIGIFCONF2_IF0FORMAT0);
+ ab8500_audio_set_bit_delay(dai, 1);
+ break;
+
+ case SND_SOC_DAIFMT_DSP_B: /* L data MSB during FRM LRC */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0 Protocol: DSP B (TDM)\n", __func__);
+ val |= BIT(AB8500_DIGIFCONF2_IF0FORMAT0);
+ ab8500_audio_set_bit_delay(dai, 0);
+ break;
+
+ default:
+ dev_err(dai->codec->dev,
+ "%s: ERROR: Unsupported format (0x%x)!\n",
+ __func__, fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0: Normal bit clock, normal frame\n",
+ __func__);
+ break;
+ case SND_SOC_DAIFMT_NB_IF: /* normal BCLK + inv FRM */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0: Normal bit clock, inverted frame\n",
+ __func__);
+ val |= BIT(AB8500_DIGIFCONF2_FSYNC0P);
+ break;
+ case SND_SOC_DAIFMT_IB_NF: /* invert BCLK + nor FRM */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0: Inverted bit clock, normal frame\n",
+ __func__);
+ val |= BIT(AB8500_DIGIFCONF2_BITCLK0P);
+ break;
+ case SND_SOC_DAIFMT_IB_IF: /* invert BCLK + FRM */
+ dev_dbg(dai->codec->dev,
+ "%s: IF0: Inverted bit clock, inverted frame\n",
+ __func__);
+ val |= BIT(AB8500_DIGIFCONF2_FSYNC0P);
+ val |= BIT(AB8500_DIGIFCONF2_BITCLK0P);
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: ERROR: Unsupported INV mask 0x%x\n",
+ __func__, fmt & SND_SOC_DAIFMT_INV_MASK);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF2, mask, val);
+
+ return 0;
+}
+
+static int ab8500_codec_set_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int val, mask, slots_active;
+
+ mask = BIT(AB8500_DIGIFCONF2_IF0WL0) |
+ BIT(AB8500_DIGIFCONF2_IF0WL1);
+ val = 0;
+
+ switch (slot_width) {
+ case 16:
+ break;
+ case 20:
+ val |= BIT(AB8500_DIGIFCONF2_IF0WL0);
+ break;
+ case 24:
+ val |= BIT(AB8500_DIGIFCONF2_IF0WL1);
+ break;
+ case 32:
+ val |= BIT(AB8500_DIGIFCONF2_IF0WL1) |
+ BIT(AB8500_DIGIFCONF2_IF0WL0);
+ break;
+ default:
+ dev_err(dai->codec->dev, "%s: Unsupported slot-width 0x%x\n",
+ __func__, slot_width);
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->codec->dev, "%s: IF0 slot-width: %d bits.\n",
+ __func__, slot_width);
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF2, mask, val);
+
+ /* Setup TDM clocking according to slot count */
+ dev_dbg(dai->codec->dev, "%s: Slots, total: %d\n", __func__, slots);
+ mask = BIT(AB8500_DIGIFCONF1_IF0BITCLKOS0) |
+ BIT(AB8500_DIGIFCONF1_IF0BITCLKOS1);
+ switch (slots) {
+ case 2:
+ val = AB8500_MASK_NONE;
+ break;
+ case 4:
+ val = BIT(AB8500_DIGIFCONF1_IF0BITCLKOS0);
+ break;
+ case 8:
+ val = BIT(AB8500_DIGIFCONF1_IF0BITCLKOS1);
+ break;
+ case 16:
+ val = BIT(AB8500_DIGIFCONF1_IF0BITCLKOS0) |
+ BIT(AB8500_DIGIFCONF1_IF0BITCLKOS1);
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: ERROR: Unsupported number of slots (%d)!\n",
+ __func__, slots);
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, AB8500_DIGIFCONF1, mask, val);
+
+ /* Setup TDM DA according to active tx slots */
+ mask = AB8500_DASLOTCONFX_SLTODAX_MASK;
+ slots_active = hweight32(tx_mask);
+ dev_dbg(dai->codec->dev, "%s: Slots, active, TX: %d\n", __func__,
+ slots_active);
+ switch (slots_active) {
+ case 0:
+ break;
+ case 1:
+ /* Slot 9 -> DA_IN1 & DA_IN3 */
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, 11);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, 11);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, 11);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, 11);
+ break;
+ case 2:
+ /* Slot 9 -> DA_IN1 & DA_IN3, Slot 11 -> DA_IN2 & DA_IN4 */
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF1, mask, 9);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF3, mask, 9);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF2, mask, 11);
+ snd_soc_update_bits(codec, AB8500_DASLOTCONF4, mask, 11);
+
+ break;
+ case 8:
+ dev_dbg(dai->codec->dev,
+ "%s: In 8-channel mode DA-from-slot mapping is set manually.",
+ __func__);
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Unsupported number of active TX-slots (%d)!\n",
+ __func__, slots_active);
+ return -EINVAL;
+ }
+
+ /* Setup TDM AD according to active RX-slots */
+ slots_active = hweight32(rx_mask);
+ dev_dbg(dai->codec->dev, "%s: Slots, active, RX: %d\n", __func__,
+ slots_active);
+ switch (slots_active) {
+ case 0:
+ break;
+ case 1:
+ /* AD_OUT3 -> slot 0 & 1 */
+ snd_soc_update_bits(codec, AB8500_ADSLOTSEL1, AB8500_MASK_ALL,
+ AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN |
+ AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD);
+ break;
+ case 2:
+ /* AD_OUT3 -> slot 0, AD_OUT2 -> slot 1 */
+ snd_soc_update_bits(codec,
+ AB8500_ADSLOTSEL1,
+ AB8500_MASK_ALL,
+ AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN |
+ AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD);
+ break;
+ case 8:
+ dev_dbg(dai->codec->dev,
+ "%s: In 8-channel mode AD-to-slot mapping is set manually.",
+ __func__);
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Unsupported number of active RX-slots (%d)!\n",
+ __func__, slots_active);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct snd_soc_dai_driver ab8500_codec_dai[] = {
+ {
+ .name = "ab8500-codec-dai.0",
+ .id = 0,
+ .playback = {
+ .stream_name = "ab8500_0p",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = AB8500_SUPPORTED_RATE,
+ .formats = AB8500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_tdm_slot = ab8500_codec_set_dai_tdm_slot,
+ .set_fmt = ab8500_codec_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1
+ },
+ {
+ .name = "ab8500-codec-dai.1",
+ .id = 1,
+ .capture = {
+ .stream_name = "ab8500_0c",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = AB8500_SUPPORTED_RATE,
+ .formats = AB8500_SUPPORTED_FMT,
+ },
+ .ops = (struct snd_soc_dai_ops[]) {
+ {
+ .set_tdm_slot = ab8500_codec_set_dai_tdm_slot,
+ .set_fmt = ab8500_codec_set_dai_fmt,
+ }
+ },
+ .symmetric_rates = 1
+ }
+};
+
+static int ab8500_codec_probe(struct snd_soc_codec *codec)
+{
+ struct device *dev = codec->dev;
+ struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(dev);
+ struct ab8500_platform_data *pdata;
+ struct filter_control *fc;
+ int status;
+
+ dev_dbg(dev, "%s: Enter.\n", __func__);
+
+ /* Setup AB8500 according to board-settings */
+ pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent);
+ status = ab8500_audio_setup_mics(codec, &pdata->codec->amics);
+ if (status < 0) {
+ pr_err("%s: Failed to setup mics (%d)!\n", __func__, status);
+ return status;
+ }
+ status = ab8500_audio_set_ear_cmv(codec, pdata->codec->ear_cmv);
+ if (status < 0) {
+ pr_err("%s: Failed to set earpiece CM-voltage (%d)!\n",
+ __func__, status);
+ return status;
+ }
+
+ status = ab8500_audio_init_audioblock(codec);
+ if (status < 0) {
+ dev_err(dev, "%s: failed to init audio-block (%d)!\n",
+ __func__, status);
+ return status;
+ }
+
+ /* Override HW-defaults */
+ ab8500_codec_write_reg(codec,
+ AB8500_ANACONF5,
+ BIT(AB8500_ANACONF5_HSAUTOEN));
+ ab8500_codec_write_reg(codec,
+ AB8500_SHORTCIRCONF,
+ BIT(AB8500_SHORTCIRCONF_HSZCDDIS));
+
+ /* Add filter controls */
+ status = snd_soc_add_codec_controls(codec, ab8500_filter_controls,
+ ARRAY_SIZE(ab8500_filter_controls));
+ if (status < 0) {
+ dev_err(dev,
+ "%s: failed to add ab8500 filter controls (%d).\n",
+ __func__, status);
+ return status;
+ }
+ fc = (struct filter_control *)
+ &ab8500_filter_controls[AB8500_FILTER_ANC_FIR].private_value;
+ drvdata->anc_fir_values = (long *)fc->value;
+ fc = (struct filter_control *)
+ &ab8500_filter_controls[AB8500_FILTER_ANC_IIR].private_value;
+ drvdata->anc_iir_values = (long *)fc->value;
+ fc = (struct filter_control *)
+ &ab8500_filter_controls[AB8500_FILTER_SID_FIR].private_value;
+ drvdata->sid_fir_values = (long *)fc->value;
+
+ (void)snd_soc_dapm_disable_pin(&codec->dapm, "ANC Configure Input");
+
+ mutex_init(&drvdata->anc_lock);
+
+ return status;
+}
+
+static struct snd_soc_codec_driver ab8500_codec_driver = {
+ .probe = ab8500_codec_probe,
+ .read = ab8500_codec_read_reg,
+ .write = ab8500_codec_write_reg,
+ .reg_word_size = sizeof(u8),
+ .controls = ab8500_ctrls,
+ .num_controls = ARRAY_SIZE(ab8500_ctrls),
+ .dapm_widgets = ab8500_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ab8500_dapm_widgets),
+ .dapm_routes = ab8500_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ab8500_dapm_routes),
+};
+
+static int __devinit ab8500_codec_driver_probe(struct platform_device *pdev)
+{
+ int status;
+ struct ab8500_codec_drvdata *drvdata;
+
+ dev_dbg(&pdev->dev, "%s: Enter.\n", __func__);
+
+ /* Create driver private-data struct */
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_codec_drvdata),
+ GFP_KERNEL);
+ drvdata->sid_status = SID_UNCONFIGURED;
+ drvdata->anc_status = ANC_UNCONFIGURED;
+ dev_set_drvdata(&pdev->dev, drvdata);
+
+ dev_dbg(&pdev->dev, "%s: Register codec.\n", __func__);
+ status = snd_soc_register_codec(&pdev->dev, &ab8500_codec_driver,
+ ab8500_codec_dai,
+ ARRAY_SIZE(ab8500_codec_dai));
+ if (status < 0)
+ dev_err(&pdev->dev,
+ "%s: Error: Failed to register codec (%d).\n",
+ __func__, status);
+
+ return status;
+}
+
+static int __devexit ab8500_codec_driver_remove(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s Enter.\n", __func__);
+
+ snd_soc_unregister_codec(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver ab8500_codec_platform_driver = {
+ .driver = {
+ .name = "ab8500-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = ab8500_codec_driver_probe,
+ .remove = __devexit_p(ab8500_codec_driver_remove),
+ .suspend = NULL,
+ .resume = NULL,
+};
+module_platform_driver(ab8500_codec_platform_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/ab8500-codec.h b/sound/soc/codecs/ab8500-codec.h
new file mode 100644
index 000000000000..114f69a0c629
--- /dev/null
+++ b/sound/soc/codecs/ab8500-codec.h
@@ -0,0 +1,590 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>,
+ * for ST-Ericsson.
+ *
+ * Based on the early work done by:
+ * Mikko J. Lehto <mikko.lehto@symbio.com>,
+ * Mikko Sarmanne <mikko.sarmanne@symbio.com>,
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef AB8500_CODEC_REGISTERS_H
+#define AB8500_CODEC_REGISTERS_H
+
+#define AB8500_SUPPORTED_RATE (SNDRV_PCM_RATE_48000)
+#define AB8500_SUPPORTED_FMT (SNDRV_PCM_FMTBIT_S16_LE)
+
+/* AB8500 audio bank (0x0d) register definitions */
+
+#define AB8500_POWERUP 0x00
+#define AB8500_AUDSWRESET 0x01
+#define AB8500_ADPATHENA 0x02
+#define AB8500_DAPATHENA 0x03
+#define AB8500_ANACONF1 0x04
+#define AB8500_ANACONF2 0x05
+#define AB8500_DIGMICCONF 0x06
+#define AB8500_ANACONF3 0x07
+#define AB8500_ANACONF4 0x08
+#define AB8500_DAPATHCONF 0x09
+#define AB8500_MUTECONF 0x0A
+#define AB8500_SHORTCIRCONF 0x0B
+#define AB8500_ANACONF5 0x0C
+#define AB8500_ENVCPCONF 0x0D
+#define AB8500_SIGENVCONF 0x0E
+#define AB8500_PWMGENCONF1 0x0F
+#define AB8500_PWMGENCONF2 0x10
+#define AB8500_PWMGENCONF3 0x11
+#define AB8500_PWMGENCONF4 0x12
+#define AB8500_PWMGENCONF5 0x13
+#define AB8500_ANAGAIN1 0x14
+#define AB8500_ANAGAIN2 0x15
+#define AB8500_ANAGAIN3 0x16
+#define AB8500_ANAGAIN4 0x17
+#define AB8500_DIGLINHSLGAIN 0x18
+#define AB8500_DIGLINHSRGAIN 0x19
+#define AB8500_ADFILTCONF 0x1A
+#define AB8500_DIGIFCONF1 0x1B
+#define AB8500_DIGIFCONF2 0x1C
+#define AB8500_DIGIFCONF3 0x1D
+#define AB8500_DIGIFCONF4 0x1E
+#define AB8500_ADSLOTSEL1 0x1F
+#define AB8500_ADSLOTSEL2 0x20
+#define AB8500_ADSLOTSEL3 0x21
+#define AB8500_ADSLOTSEL4 0x22
+#define AB8500_ADSLOTSEL5 0x23
+#define AB8500_ADSLOTSEL6 0x24
+#define AB8500_ADSLOTSEL7 0x25
+#define AB8500_ADSLOTSEL8 0x26
+#define AB8500_ADSLOTSEL9 0x27
+#define AB8500_ADSLOTSEL10 0x28
+#define AB8500_ADSLOTSEL11 0x29
+#define AB8500_ADSLOTSEL12 0x2A
+#define AB8500_ADSLOTSEL13 0x2B
+#define AB8500_ADSLOTSEL14 0x2C
+#define AB8500_ADSLOTSEL15 0x2D
+#define AB8500_ADSLOTSEL16 0x2E
+#define AB8500_ADSLOTHIZCTRL1 0x2F
+#define AB8500_ADSLOTHIZCTRL2 0x30
+#define AB8500_ADSLOTHIZCTRL3 0x31
+#define AB8500_ADSLOTHIZCTRL4 0x32
+#define AB8500_DASLOTCONF1 0x33
+#define AB8500_DASLOTCONF2 0x34
+#define AB8500_DASLOTCONF3 0x35
+#define AB8500_DASLOTCONF4 0x36
+#define AB8500_DASLOTCONF5 0x37
+#define AB8500_DASLOTCONF6 0x38
+#define AB8500_DASLOTCONF7 0x39
+#define AB8500_DASLOTCONF8 0x3A
+#define AB8500_CLASSDCONF1 0x3B
+#define AB8500_CLASSDCONF2 0x3C
+#define AB8500_CLASSDCONF3 0x3D
+#define AB8500_DMICFILTCONF 0x3E
+#define AB8500_DIGMULTCONF1 0x3F
+#define AB8500_DIGMULTCONF2 0x40
+#define AB8500_ADDIGGAIN1 0x41
+#define AB8500_ADDIGGAIN2 0x42
+#define AB8500_ADDIGGAIN3 0x43
+#define AB8500_ADDIGGAIN4 0x44
+#define AB8500_ADDIGGAIN5 0x45
+#define AB8500_ADDIGGAIN6 0x46
+#define AB8500_DADIGGAIN1 0x47
+#define AB8500_DADIGGAIN2 0x48
+#define AB8500_DADIGGAIN3 0x49
+#define AB8500_DADIGGAIN4 0x4A
+#define AB8500_DADIGGAIN5 0x4B
+#define AB8500_DADIGGAIN6 0x4C
+#define AB8500_ADDIGLOOPGAIN1 0x4D
+#define AB8500_ADDIGLOOPGAIN2 0x4E
+#define AB8500_HSLEARDIGGAIN 0x4F
+#define AB8500_HSRDIGGAIN 0x50
+#define AB8500_SIDFIRGAIN1 0x51
+#define AB8500_SIDFIRGAIN2 0x52
+#define AB8500_ANCCONF1 0x53
+#define AB8500_ANCCONF2 0x54
+#define AB8500_ANCCONF3 0x55
+#define AB8500_ANCCONF4 0x56
+#define AB8500_ANCCONF5 0x57
+#define AB8500_ANCCONF6 0x58
+#define AB8500_ANCCONF7 0x59
+#define AB8500_ANCCONF8 0x5A
+#define AB8500_ANCCONF9 0x5B
+#define AB8500_ANCCONF10 0x5C
+#define AB8500_ANCCONF11 0x5D
+#define AB8500_ANCCONF12 0x5E
+#define AB8500_ANCCONF13 0x5F
+#define AB8500_ANCCONF14 0x60
+#define AB8500_SIDFIRADR 0x61
+#define AB8500_SIDFIRCOEF1 0x62
+#define AB8500_SIDFIRCOEF2 0x63
+#define AB8500_SIDFIRCONF 0x64
+#define AB8500_AUDINTMASK1 0x65
+#define AB8500_AUDINTSOURCE1 0x66
+#define AB8500_AUDINTMASK2 0x67
+#define AB8500_AUDINTSOURCE2 0x68
+#define AB8500_FIFOCONF1 0x69
+#define AB8500_FIFOCONF2 0x6A
+#define AB8500_FIFOCONF3 0x6B
+#define AB8500_FIFOCONF4 0x6C
+#define AB8500_FIFOCONF5 0x6D
+#define AB8500_FIFOCONF6 0x6E
+#define AB8500_AUDREV 0x6F
+
+#define AB8500_FIRST_REG AB8500_POWERUP
+#define AB8500_LAST_REG AB8500_AUDREV
+#define AB8500_CACHEREGNUM (AB8500_LAST_REG + 1)
+
+#define AB8500_MASK_ALL 0xFF
+#define AB8500_MASK_NONE 0x00
+
+/* AB8500_POWERUP */
+#define AB8500_POWERUP_POWERUP 7
+#define AB8500_POWERUP_ENANA 3
+
+/* AB8500_AUDSWRESET */
+#define AB8500_AUDSWRESET_SWRESET 7
+
+/* AB8500_ADPATHENA */
+#define AB8500_ADPATHENA_ENAD12 7
+#define AB8500_ADPATHENA_ENAD34 5
+#define AB8500_ADPATHENA_ENAD5768 3
+
+/* AB8500_DAPATHENA */
+#define AB8500_DAPATHENA_ENDA1 7
+#define AB8500_DAPATHENA_ENDA2 6
+#define AB8500_DAPATHENA_ENDA3 5
+#define AB8500_DAPATHENA_ENDA4 4
+#define AB8500_DAPATHENA_ENDA5 3
+#define AB8500_DAPATHENA_ENDA6 2
+
+/* AB8500_ANACONF1 */
+#define AB8500_ANACONF1_HSLOWPOW 7
+#define AB8500_ANACONF1_DACLOWPOW1 6
+#define AB8500_ANACONF1_DACLOWPOW0 5
+#define AB8500_ANACONF1_EARDACLOWPOW 4
+#define AB8500_ANACONF1_EARSELCM 2
+#define AB8500_ANACONF1_HSHPEN 1
+#define AB8500_ANACONF1_EARDRVLOWPOW 0
+
+/* AB8500_ANACONF2 */
+#define AB8500_ANACONF2_ENMIC1 7
+#define AB8500_ANACONF2_ENMIC2 6
+#define AB8500_ANACONF2_ENLINL 5
+#define AB8500_ANACONF2_ENLINR 4
+#define AB8500_ANACONF2_MUTMIC1 3
+#define AB8500_ANACONF2_MUTMIC2 2
+#define AB8500_ANACONF2_MUTLINL 1
+#define AB8500_ANACONF2_MUTLINR 0
+
+/* AB8500_DIGMICCONF */
+#define AB8500_DIGMICCONF_ENDMIC1 7
+#define AB8500_DIGMICCONF_ENDMIC2 6
+#define AB8500_DIGMICCONF_ENDMIC3 5
+#define AB8500_DIGMICCONF_ENDMIC4 4
+#define AB8500_DIGMICCONF_ENDMIC5 3
+#define AB8500_DIGMICCONF_ENDMIC6 2
+#define AB8500_DIGMICCONF_HSFADSPEED 0
+
+/* AB8500_ANACONF3 */
+#define AB8500_ANACONF3_MIC1SEL 7
+#define AB8500_ANACONF3_LINRSEL 6
+#define AB8500_ANACONF3_ENDRVHSL 5
+#define AB8500_ANACONF3_ENDRVHSR 4
+#define AB8500_ANACONF3_ENADCMIC 2
+#define AB8500_ANACONF3_ENADCLINL 1
+#define AB8500_ANACONF3_ENADCLINR 0
+
+/* AB8500_ANACONF4 */
+#define AB8500_ANACONF4_DISPDVSS 7
+#define AB8500_ANACONF4_ENEAR 6
+#define AB8500_ANACONF4_ENHSL 5
+#define AB8500_ANACONF4_ENHSR 4
+#define AB8500_ANACONF4_ENHFL 3
+#define AB8500_ANACONF4_ENHFR 2
+#define AB8500_ANACONF4_ENVIB1 1
+#define AB8500_ANACONF4_ENVIB2 0
+
+/* AB8500_DAPATHCONF */
+#define AB8500_DAPATHCONF_ENDACEAR 6
+#define AB8500_DAPATHCONF_ENDACHSL 5
+#define AB8500_DAPATHCONF_ENDACHSR 4
+#define AB8500_DAPATHCONF_ENDACHFL 3
+#define AB8500_DAPATHCONF_ENDACHFR 2
+#define AB8500_DAPATHCONF_ENDACVIB1 1
+#define AB8500_DAPATHCONF_ENDACVIB2 0
+
+/* AB8500_MUTECONF */
+#define AB8500_MUTECONF_MUTEAR 6
+#define AB8500_MUTECONF_MUTHSL 5
+#define AB8500_MUTECONF_MUTHSR 4
+#define AB8500_MUTECONF_MUTDACEAR 2
+#define AB8500_MUTECONF_MUTDACHSL 1
+#define AB8500_MUTECONF_MUTDACHSR 0
+
+/* AB8500_SHORTCIRCONF */
+#define AB8500_SHORTCIRCONF_ENSHORTPWD 7
+#define AB8500_SHORTCIRCONF_EARSHORTDIS 6
+#define AB8500_SHORTCIRCONF_HSSHORTDIS 5
+#define AB8500_SHORTCIRCONF_HSPULLDEN 4
+#define AB8500_SHORTCIRCONF_HSOSCEN 2
+#define AB8500_SHORTCIRCONF_HSFADDIS 1
+#define AB8500_SHORTCIRCONF_HSZCDDIS 0
+/* Zero cross should be disabled */
+
+/* AB8500_ANACONF5 */
+#define AB8500_ANACONF5_ENCPHS 7
+#define AB8500_ANACONF5_HSLDACTOLOL 5
+#define AB8500_ANACONF5_HSRDACTOLOR 4
+#define AB8500_ANACONF5_ENLOL 3
+#define AB8500_ANACONF5_ENLOR 2
+#define AB8500_ANACONF5_HSAUTOEN 0
+
+/* AB8500_ENVCPCONF */
+#define AB8500_ENVCPCONF_ENVDETHTHRE 4
+#define AB8500_ENVCPCONF_ENVDETLTHRE 0
+#define AB8500_ENVCPCONF_ENVDETHTHRE_MAX 0x0F
+#define AB8500_ENVCPCONF_ENVDETLTHRE_MAX 0x0F
+
+/* AB8500_SIGENVCONF */
+#define AB8500_SIGENVCONF_CPLVEN 5
+#define AB8500_SIGENVCONF_ENVDETCPEN 4
+#define AB8500_SIGENVCONF_ENVDETTIME 0
+#define AB8500_SIGENVCONF_ENVDETTIME_MAX 0x0F
+
+/* AB8500_PWMGENCONF1 */
+#define AB8500_PWMGENCONF1_PWMTOVIB1 7
+#define AB8500_PWMGENCONF1_PWMTOVIB2 6
+#define AB8500_PWMGENCONF1_PWM1CTRL 5
+#define AB8500_PWMGENCONF1_PWM2CTRL 4
+#define AB8500_PWMGENCONF1_PWM1NCTRL 3
+#define AB8500_PWMGENCONF1_PWM1PCTRL 2
+#define AB8500_PWMGENCONF1_PWM2NCTRL 1
+#define AB8500_PWMGENCONF1_PWM2PCTRL 0
+
+/* AB8500_PWMGENCONF2 */
+/* AB8500_PWMGENCONF3 */
+/* AB8500_PWMGENCONF4 */
+/* AB8500_PWMGENCONF5 */
+#define AB8500_PWMGENCONFX_PWMVIBXPOL 7
+#define AB8500_PWMGENCONFX_PWMVIBXDUTCYC 0
+#define AB8500_PWMGENCONFX_PWMVIBXDUTCYC_MAX 0x64
+
+/* AB8500_ANAGAIN1 */
+/* AB8500_ANAGAIN2 */
+#define AB8500_ANAGAINX_ENSEMICX 7
+#define AB8500_ANAGAINX_LOWPOWMICX 6
+#define AB8500_ANAGAINX_MICXGAIN 0
+#define AB8500_ANAGAINX_MICXGAIN_MAX 0x1F
+
+/* AB8500_ANAGAIN3 */
+#define AB8500_ANAGAIN3_HSLGAIN 4
+#define AB8500_ANAGAIN3_HSRGAIN 0
+#define AB8500_ANAGAIN3_HSXGAIN_MAX 0x0F
+
+/* AB8500_ANAGAIN4 */
+#define AB8500_ANAGAIN4_LINLGAIN 4
+#define AB8500_ANAGAIN4_LINRGAIN 0
+#define AB8500_ANAGAIN4_LINXGAIN_MAX 0x0F
+
+/* AB8500_DIGLINHSLGAIN */
+/* AB8500_DIGLINHSRGAIN */
+#define AB8500_DIGLINHSXGAIN_LINTOHSXGAIN 0
+#define AB8500_DIGLINHSXGAIN_LINTOHSXGAIN_MAX 0x13
+
+/* AB8500_ADFILTCONF */
+#define AB8500_ADFILTCONF_AD1NH 7
+#define AB8500_ADFILTCONF_AD2NH 6
+#define AB8500_ADFILTCONF_AD3NH 5
+#define AB8500_ADFILTCONF_AD4NH 4
+#define AB8500_ADFILTCONF_AD1VOICE 3
+#define AB8500_ADFILTCONF_AD2VOICE 2
+#define AB8500_ADFILTCONF_AD3VOICE 1
+#define AB8500_ADFILTCONF_AD4VOICE 0
+
+/* AB8500_DIGIFCONF1 */
+#define AB8500_DIGIFCONF1_ENMASTGEN 7
+#define AB8500_DIGIFCONF1_IF1BITCLKOS1 6
+#define AB8500_DIGIFCONF1_IF1BITCLKOS0 5
+#define AB8500_DIGIFCONF1_ENFSBITCLK1 4
+#define AB8500_DIGIFCONF1_IF0BITCLKOS1 2
+#define AB8500_DIGIFCONF1_IF0BITCLKOS0 1
+#define AB8500_DIGIFCONF1_ENFSBITCLK0 0
+
+/* AB8500_DIGIFCONF2 */
+#define AB8500_DIGIFCONF2_FSYNC0P 6
+#define AB8500_DIGIFCONF2_BITCLK0P 5
+#define AB8500_DIGIFCONF2_IF0DEL 4
+#define AB8500_DIGIFCONF2_IF0FORMAT1 3
+#define AB8500_DIGIFCONF2_IF0FORMAT0 2
+#define AB8500_DIGIFCONF2_IF0WL1 1
+#define AB8500_DIGIFCONF2_IF0WL0 0
+
+/* AB8500_DIGIFCONF3 */
+#define AB8500_DIGIFCONF3_IF0DATOIF1AD 7
+#define AB8500_DIGIFCONF3_IF0CLKTOIF1CLK 6
+#define AB8500_DIGIFCONF3_IF1MASTER 5
+#define AB8500_DIGIFCONF3_IF1DATOIF0AD 3
+#define AB8500_DIGIFCONF3_IF1CLKTOIF0CLK 2
+#define AB8500_DIGIFCONF3_IF0MASTER 1
+#define AB8500_DIGIFCONF3_IF0BFIFOEN 0
+
+/* AB8500_DIGIFCONF4 */
+#define AB8500_DIGIFCONF4_FSYNC1P 6
+#define AB8500_DIGIFCONF4_BITCLK1P 5
+#define AB8500_DIGIFCONF4_IF1DEL 4
+#define AB8500_DIGIFCONF4_IF1FORMAT1 3
+#define AB8500_DIGIFCONF4_IF1FORMAT0 2
+#define AB8500_DIGIFCONF4_IF1WL1 1
+#define AB8500_DIGIFCONF4_IF1WL0 0
+
+/* AB8500_ADSLOTSELX */
+#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F
+#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
+#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10
+#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20
+#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30
+#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40
+#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50
+#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60
+#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70
+#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80
+#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0
+#define AB8500_ADSLOTSELX_EVEN_SHIFT 0
+#define AB8500_ADSLOTSELX_ODD_SHIFT 4
+
+/* AB8500_ADSLOTHIZCTRL1 */
+/* AB8500_ADSLOTHIZCTRL2 */
+/* AB8500_ADSLOTHIZCTRL3 */
+/* AB8500_ADSLOTHIZCTRL4 */
+/* AB8500_DASLOTCONF1 */
+#define AB8500_DASLOTCONF1_DA12VOICE 7
+#define AB8500_DASLOTCONF1_SWAPDA12_34 6
+#define AB8500_DASLOTCONF1_DAI7TOADO1 5
+
+/* AB8500_DASLOTCONF2 */
+#define AB8500_DASLOTCONF2_DAI8TOADO2 5
+
+/* AB8500_DASLOTCONF3 */
+#define AB8500_DASLOTCONF3_DA34VOICE 7
+#define AB8500_DASLOTCONF3_DAI7TOADO3 5
+
+/* AB8500_DASLOTCONF4 */
+#define AB8500_DASLOTCONF4_DAI8TOADO4 5
+
+/* AB8500_DASLOTCONF5 */
+#define AB8500_DASLOTCONF5_DA56VOICE 7
+#define AB8500_DASLOTCONF5_DAI7TOADO5 5
+
+/* AB8500_DASLOTCONF6 */
+#define AB8500_DASLOTCONF6_DAI8TOADO6 5
+
+/* AB8500_DASLOTCONF7 */
+#define AB8500_DASLOTCONF7_DAI8TOADO7 5
+
+/* AB8500_DASLOTCONF8 */
+#define AB8500_DASLOTCONF8_DAI7TOADO8 5
+
+#define AB8500_DASLOTCONFX_SLTODAX_SHIFT 0
+#define AB8500_DASLOTCONFX_SLTODAX_MASK 0x1F
+
+/* AB8500_CLASSDCONF1 */
+#define AB8500_CLASSDCONF1_PARLHF 7
+#define AB8500_CLASSDCONF1_PARLVIB 6
+#define AB8500_CLASSDCONF1_VIB1SWAPEN 3
+#define AB8500_CLASSDCONF1_VIB2SWAPEN 2
+#define AB8500_CLASSDCONF1_HFLSWAPEN 1
+#define AB8500_CLASSDCONF1_HFRSWAPEN 0
+
+/* AB8500_CLASSDCONF2 */
+#define AB8500_CLASSDCONF2_FIRBYP3 7
+#define AB8500_CLASSDCONF2_FIRBYP2 6
+#define AB8500_CLASSDCONF2_FIRBYP1 5
+#define AB8500_CLASSDCONF2_FIRBYP0 4
+#define AB8500_CLASSDCONF2_HIGHVOLEN3 3
+#define AB8500_CLASSDCONF2_HIGHVOLEN2 2
+#define AB8500_CLASSDCONF2_HIGHVOLEN1 1
+#define AB8500_CLASSDCONF2_HIGHVOLEN0 0
+
+/* AB8500_CLASSDCONF3 */
+#define AB8500_CLASSDCONF3_DITHHPGAIN 4
+#define AB8500_CLASSDCONF3_DITHHPGAIN_MAX 0x0A
+#define AB8500_CLASSDCONF3_DITHWGAIN 0
+#define AB8500_CLASSDCONF3_DITHWGAIN_MAX 0x0A
+
+/* AB8500_DMICFILTCONF */
+#define AB8500_DMICFILTCONF_ANCINSEL 7
+#define AB8500_DMICFILTCONF_DA3TOEAR 6
+#define AB8500_DMICFILTCONF_DMIC1SINC3 5
+#define AB8500_DMICFILTCONF_DMIC2SINC3 4
+#define AB8500_DMICFILTCONF_DMIC3SINC3 3
+#define AB8500_DMICFILTCONF_DMIC4SINC3 2
+#define AB8500_DMICFILTCONF_DMIC5SINC3 1
+#define AB8500_DMICFILTCONF_DMIC6SINC3 0
+
+/* AB8500_DIGMULTCONF1 */
+#define AB8500_DIGMULTCONF1_DATOHSLEN 7
+#define AB8500_DIGMULTCONF1_DATOHSREN 6
+#define AB8500_DIGMULTCONF1_AD1SEL 5
+#define AB8500_DIGMULTCONF1_AD2SEL 4
+#define AB8500_DIGMULTCONF1_AD3SEL 3
+#define AB8500_DIGMULTCONF1_AD5SEL 2
+#define AB8500_DIGMULTCONF1_AD6SEL 1
+#define AB8500_DIGMULTCONF1_ANCSEL 0
+
+/* AB8500_DIGMULTCONF2 */
+#define AB8500_DIGMULTCONF2_DATOHFREN 7
+#define AB8500_DIGMULTCONF2_DATOHFLEN 6
+#define AB8500_DIGMULTCONF2_HFRSEL 5
+#define AB8500_DIGMULTCONF2_HFLSEL 4
+#define AB8500_DIGMULTCONF2_FIRSID1SEL 2
+#define AB8500_DIGMULTCONF2_FIRSID2SEL 0
+
+/* AB8500_ADDIGGAIN1 */
+/* AB8500_ADDIGGAIN2 */
+/* AB8500_ADDIGGAIN3 */
+/* AB8500_ADDIGGAIN4 */
+/* AB8500_ADDIGGAIN5 */
+/* AB8500_ADDIGGAIN6 */
+#define AB8500_ADDIGGAINX_FADEDISADX 6
+#define AB8500_ADDIGGAINX_ADXGAIN_MAX 0x3F
+
+/* AB8500_DADIGGAIN1 */
+/* AB8500_DADIGGAIN2 */
+/* AB8500_DADIGGAIN3 */
+/* AB8500_DADIGGAIN4 */
+/* AB8500_DADIGGAIN5 */
+/* AB8500_DADIGGAIN6 */
+#define AB8500_DADIGGAINX_FADEDISDAX 6
+#define AB8500_DADIGGAINX_DAXGAIN_MAX 0x3F
+
+/* AB8500_ADDIGLOOPGAIN1 */
+/* AB8500_ADDIGLOOPGAIN2 */
+#define AB8500_ADDIGLOOPGAINX_FADEDISADXL 6
+#define AB8500_ADDIGLOOPGAINX_ADXLBGAIN_MAX 0x3F
+
+/* AB8500_HSLEARDIGGAIN */
+#define AB8500_HSLEARDIGGAIN_HSSINC1 7
+#define AB8500_HSLEARDIGGAIN_FADEDISHSL 4
+#define AB8500_HSLEARDIGGAIN_HSLDGAIN_MAX 0x09
+
+/* AB8500_HSRDIGGAIN */
+#define AB8500_HSRDIGGAIN_FADESPEED 6
+#define AB8500_HSRDIGGAIN_FADEDISHSR 4
+#define AB8500_HSRDIGGAIN_HSRDGAIN_MAX 0x09
+
+/* AB8500_SIDFIRGAIN1 */
+/* AB8500_SIDFIRGAIN2 */
+#define AB8500_SIDFIRGAINX_FIRSIDXGAIN_MAX 0x1F
+
+/* AB8500_ANCCONF1 */
+#define AB8500_ANCCONF1_ANCIIRUPDATE 3
+#define AB8500_ANCCONF1_ENANC 2
+#define AB8500_ANCCONF1_ANCIIRINIT 1
+#define AB8500_ANCCONF1_ANCFIRUPDATE 0
+
+/* AB8500_ANCCONF2 */
+#define AB8500_ANCCONF2_SHIFT 5
+#define AB8500_ANCCONF2_MIN -0x10
+#define AB8500_ANCCONF2_MAX 0xF
+
+/* AB8500_ANCCONF3 */
+#define AB8500_ANCCONF3_SHIFT 5
+#define AB8500_ANCCONF3_MIN -0x10
+#define AB8500_ANCCONF3_MAX 0xF
+
+/* AB8500_ANCCONF4 */
+#define AB8500_ANCCONF4_SHIFT 5
+#define AB8500_ANCCONF4_MIN -0x10
+#define AB8500_ANCCONF4_MAX 0xF
+
+/* AB8500_ANC_FIR_COEFFS */
+#define AB8500_ANC_FIR_COEFF_MIN -0x8000
+#define AB8500_ANC_FIR_COEFF_MAX 0x7FFF
+#define AB8500_ANC_FIR_COEFFS 15
+
+/* AB8500_ANC_IIR_COEFFS */
+#define AB8500_ANC_IIR_COEFF_MIN -0x800000
+#define AB8500_ANC_IIR_COEFF_MAX 0x7FFFFF
+#define AB8500_ANC_IIR_COEFFS 24
+/* AB8500_ANC_WARP_DELAY */
+#define AB8500_ANC_WARP_DELAY_SHIFT 16
+#define AB8500_ANC_WARP_DELAY_MIN 0x0000
+#define AB8500_ANC_WARP_DELAY_MAX 0xFFFF
+
+/* AB8500_ANCCONF11 */
+/* AB8500_ANCCONF12 */
+/* AB8500_ANCCONF13 */
+/* AB8500_ANCCONF14 */
+
+/* AB8500_SIDFIRADR */
+#define AB8500_SIDFIRADR_FIRSIDSET 7
+#define AB8500_SIDFIRADR_ADDRESS_SHIFT 0
+#define AB8500_SIDFIRADR_ADDRESS_MAX 0x7F
+
+/* AB8500_SIDFIRCOEF1 */
+/* AB8500_SIDFIRCOEF2 */
+#define AB8500_SID_FIR_COEFF_MIN 0
+#define AB8500_SID_FIR_COEFF_MAX 0xFFFF
+#define AB8500_SID_FIR_COEFFS 128
+
+/* AB8500_SIDFIRCONF */
+#define AB8500_SIDFIRCONF_ENFIRSIDS 2
+#define AB8500_SIDFIRCONF_FIRSIDSTOIF1 1
+#define AB8500_SIDFIRCONF_FIRSIDBUSY 0
+
+/* AB8500_AUDINTMASK1 */
+/* AB8500_AUDINTSOURCE1 */
+/* AB8500_AUDINTMASK2 */
+/* AB8500_AUDINTSOURCE2 */
+
+/* AB8500_FIFOCONF1 */
+#define AB8500_FIFOCONF1_BFIFOMASK 0x80
+#define AB8500_FIFOCONF1_BFIFO19M2 0x40
+#define AB8500_FIFOCONF1_BFIFOINT_SHIFT 0
+#define AB8500_FIFOCONF1_BFIFOINT_MAX 0x3F
+
+/* AB8500_FIFOCONF2 */
+#define AB8500_FIFOCONF2_BFIFOTX_SHIFT 0
+#define AB8500_FIFOCONF2_BFIFOTX_MAX 0xFF
+
+/* AB8500_FIFOCONF3 */
+#define AB8500_FIFOCONF3_BFIFOEXSL_SHIFT 5
+#define AB8500_FIFOCONF3_BFIFOEXSL_MAX 0x5
+#define AB8500_FIFOCONF3_PREBITCLK0_SHIFT 2
+#define AB8500_FIFOCONF3_PREBITCLK0_MAX 0x7
+#define AB8500_FIFOCONF3_BFIFOMAST_SHIFT 1
+#define AB8500_FIFOCONF3_BFIFORUN_SHIFT 0
+
+/* AB8500_FIFOCONF4 */
+#define AB8500_FIFOCONF4_BFIFOFRAMSW_SHIFT 0
+#define AB8500_FIFOCONF4_BFIFOFRAMSW_MAX 0xFF
+
+/* AB8500_FIFOCONF5 */
+#define AB8500_FIFOCONF5_BFIFOWAKEUP_SHIFT 0
+#define AB8500_FIFOCONF5_BFIFOWAKEUP_MAX 0xFF
+
+/* AB8500_FIFOCONF6 */
+#define AB8500_FIFOCONF6_BFIFOSAMPLE_SHIFT 0
+#define AB8500_FIFOCONF6_BFIFOSAMPLE_MAX 0xFF
+
+/* AB8500_AUDREV */
+
+#endif
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index 2023c749f232..ea06b834a7de 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -91,11 +91,6 @@ static int ac97_soc_probe(struct snd_soc_codec *codec)
return 0;
}
-static int ac97_soc_remove(struct snd_soc_codec *codec)
-{
- return 0;
-}
-
#ifdef CONFIG_PM
static int ac97_soc_suspend(struct snd_soc_codec *codec)
{
@@ -119,7 +114,6 @@ static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
.write = ac97_write,
.read = ac97_read,
.probe = ac97_soc_probe,
- .remove = ac97_soc_remove,
.suspend = ac97_soc_suspend,
.resume = ac97_soc_resume,
};
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
new file mode 100644
index 000000000000..5c9cacaf2d52
--- /dev/null
+++ b/sound/soc/codecs/arizona.c
@@ -0,0 +1,937 @@
+/*
+ * arizona.c - Wolfson Arizona class device shared support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/gcd.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+
+#define ARIZONA_AIF_BCLK_CTRL 0x00
+#define ARIZONA_AIF_TX_PIN_CTRL 0x01
+#define ARIZONA_AIF_RX_PIN_CTRL 0x02
+#define ARIZONA_AIF_RATE_CTRL 0x03
+#define ARIZONA_AIF_FORMAT 0x04
+#define ARIZONA_AIF_TX_BCLK_RATE 0x05
+#define ARIZONA_AIF_RX_BCLK_RATE 0x06
+#define ARIZONA_AIF_FRAME_CTRL_1 0x07
+#define ARIZONA_AIF_FRAME_CTRL_2 0x08
+#define ARIZONA_AIF_FRAME_CTRL_3 0x09
+#define ARIZONA_AIF_FRAME_CTRL_4 0x0A
+#define ARIZONA_AIF_FRAME_CTRL_5 0x0B
+#define ARIZONA_AIF_FRAME_CTRL_6 0x0C
+#define ARIZONA_AIF_FRAME_CTRL_7 0x0D
+#define ARIZONA_AIF_FRAME_CTRL_8 0x0E
+#define ARIZONA_AIF_FRAME_CTRL_9 0x0F
+#define ARIZONA_AIF_FRAME_CTRL_10 0x10
+#define ARIZONA_AIF_FRAME_CTRL_11 0x11
+#define ARIZONA_AIF_FRAME_CTRL_12 0x12
+#define ARIZONA_AIF_FRAME_CTRL_13 0x13
+#define ARIZONA_AIF_FRAME_CTRL_14 0x14
+#define ARIZONA_AIF_FRAME_CTRL_15 0x15
+#define ARIZONA_AIF_FRAME_CTRL_16 0x16
+#define ARIZONA_AIF_FRAME_CTRL_17 0x17
+#define ARIZONA_AIF_FRAME_CTRL_18 0x18
+#define ARIZONA_AIF_TX_ENABLES 0x19
+#define ARIZONA_AIF_RX_ENABLES 0x1A
+#define ARIZONA_AIF_FORCE_WRITE 0x1B
+
+#define arizona_fll_err(_fll, fmt, ...) \
+ dev_err(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__)
+#define arizona_fll_warn(_fll, fmt, ...) \
+ dev_warn(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__)
+#define arizona_fll_dbg(_fll, fmt, ...) \
+ dev_err(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__)
+
+#define arizona_aif_err(_dai, fmt, ...) \
+ dev_err(_dai->dev, "AIF%d: " fmt, _dai->id, ##__VA_ARGS__)
+#define arizona_aif_warn(_dai, fmt, ...) \
+ dev_warn(_dai->dev, "AIF%d: " fmt, _dai->id, ##__VA_ARGS__)
+#define arizona_aif_dbg(_dai, fmt, ...) \
+ dev_err(_dai->dev, "AIF%d: " fmt, _dai->id, ##__VA_ARGS__)
+
+const char *arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
+ "None",
+ "Tone Generator 1",
+ "Tone Generator 2",
+ "Haptics",
+ "AEC",
+ "Mic Mute Mixer",
+ "Noise Generator",
+ "IN1L",
+ "IN1R",
+ "IN2L",
+ "IN2R",
+ "IN3L",
+ "IN3R",
+ "IN4L",
+ "IN4R",
+ "AIF1RX1",
+ "AIF1RX2",
+ "AIF1RX3",
+ "AIF1RX4",
+ "AIF1RX5",
+ "AIF1RX6",
+ "AIF1RX7",
+ "AIF1RX8",
+ "AIF2RX1",
+ "AIF2RX2",
+ "AIF3RX1",
+ "AIF3RX2",
+ "SLIMRX1",
+ "SLIMRX2",
+ "SLIMRX3",
+ "SLIMRX4",
+ "SLIMRX5",
+ "SLIMRX6",
+ "SLIMRX7",
+ "SLIMRX8",
+ "EQ1",
+ "EQ2",
+ "EQ3",
+ "EQ4",
+ "DRC1L",
+ "DRC1R",
+ "DRC2L",
+ "DRC2R",
+ "LHPF1",
+ "LHPF2",
+ "LHPF3",
+ "LHPF4",
+ "DSP1.1",
+ "DSP1.2",
+ "DSP1.3",
+ "DSP1.4",
+ "DSP1.5",
+ "DSP1.6",
+ "ASRC1L",
+ "ASRC1R",
+ "ASRC2L",
+ "ASRC2R",
+};
+EXPORT_SYMBOL_GPL(arizona_mixer_texts);
+
+int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS] = {
+ 0x00, /* None */
+ 0x04, /* Tone */
+ 0x05,
+ 0x06, /* Haptics */
+ 0x08, /* AEC */
+ 0x0c, /* Noise mixer */
+ 0x0d, /* Comfort noise */
+ 0x10, /* IN1L */
+ 0x11,
+ 0x12,
+ 0x13,
+ 0x14,
+ 0x15,
+ 0x16,
+ 0x17,
+ 0x20, /* AIF1RX1 */
+ 0x21,
+ 0x22,
+ 0x23,
+ 0x24,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x28, /* AIF2RX1 */
+ 0x29,
+ 0x30, /* AIF3RX1 */
+ 0x31,
+ 0x38, /* SLIMRX1 */
+ 0x39,
+ 0x3a,
+ 0x3b,
+ 0x3c,
+ 0x3d,
+ 0x3e,
+ 0x3f,
+ 0x50, /* EQ1 */
+ 0x51,
+ 0x52,
+ 0x53,
+ 0x58, /* DRC1L */
+ 0x59,
+ 0x5a,
+ 0x5b,
+ 0x60, /* LHPF1 */
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x68, /* DSP1.1 */
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x90, /* ASRC1L */
+ 0x91,
+ 0x92,
+ 0x93,
+};
+EXPORT_SYMBOL_GPL(arizona_mixer_values);
+
+const DECLARE_TLV_DB_SCALE(arizona_mixer_tlv, -3200, 100, 0);
+EXPORT_SYMBOL_GPL(arizona_mixer_tlv);
+
+static const char *arizona_lhpf_mode_text[] = {
+ "Low-pass", "High-pass"
+};
+
+const struct soc_enum arizona_lhpf1_mode =
+ SOC_ENUM_SINGLE(ARIZONA_HPLPF1_1, ARIZONA_LHPF1_MODE_SHIFT, 2,
+ arizona_lhpf_mode_text);
+EXPORT_SYMBOL_GPL(arizona_lhpf1_mode);
+
+const struct soc_enum arizona_lhpf2_mode =
+ SOC_ENUM_SINGLE(ARIZONA_HPLPF2_1, ARIZONA_LHPF2_MODE_SHIFT, 2,
+ arizona_lhpf_mode_text);
+EXPORT_SYMBOL_GPL(arizona_lhpf2_mode);
+
+const struct soc_enum arizona_lhpf3_mode =
+ SOC_ENUM_SINGLE(ARIZONA_HPLPF3_1, ARIZONA_LHPF3_MODE_SHIFT, 2,
+ arizona_lhpf_mode_text);
+EXPORT_SYMBOL_GPL(arizona_lhpf3_mode);
+
+const struct soc_enum arizona_lhpf4_mode =
+ SOC_ENUM_SINGLE(ARIZONA_HPLPF4_1, ARIZONA_LHPF4_MODE_SHIFT, 2,
+ arizona_lhpf_mode_text);
+EXPORT_SYMBOL_GPL(arizona_lhpf4_mode);
+
+int arizona_in_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol,
+ int event)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_in_ev);
+
+int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_out_ev);
+
+int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
+ int source, unsigned int freq, int dir)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+ char *name;
+ unsigned int reg;
+ unsigned int mask = ARIZONA_SYSCLK_FREQ_MASK | ARIZONA_SYSCLK_SRC_MASK;
+ unsigned int val = source << ARIZONA_SYSCLK_SRC_SHIFT;
+ unsigned int *clk;
+
+ switch (clk_id) {
+ case ARIZONA_CLK_SYSCLK:
+ name = "SYSCLK";
+ reg = ARIZONA_SYSTEM_CLOCK_1;
+ clk = &priv->sysclk;
+ mask |= ARIZONA_SYSCLK_FRAC;
+ break;
+ case ARIZONA_CLK_ASYNCCLK:
+ name = "ASYNCCLK";
+ reg = ARIZONA_ASYNC_CLOCK_1;
+ clk = &priv->asyncclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (freq) {
+ case 5644800:
+ case 6144000:
+ break;
+ case 11289600:
+ case 12288000:
+ val |= 1 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
+ case 22579200:
+ case 24576000:
+ val |= 2 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
+ case 45158400:
+ case 49152000:
+ val |= 3 << ARIZONA_SYSCLK_FREQ_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *clk = freq;
+
+ if (freq % 6144000)
+ val |= ARIZONA_SYSCLK_FRAC;
+
+ dev_dbg(arizona->dev, "%s set to %uHz", name, freq);
+
+ return regmap_update_bits(arizona->regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(arizona_set_sysclk);
+
+static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ int lrclk, bclk, mode, base;
+
+ base = dai->driver->base;
+
+ lrclk = 0;
+ bclk = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ mode = 0;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ mode = 1;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ mode = 2;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ mode = 3;
+ break;
+ default:
+ arizona_aif_err(dai, "Unsupported DAI format %d\n",
+ fmt & SND_SOC_DAIFMT_FORMAT_MASK);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ lrclk |= ARIZONA_AIF1TX_LRCLK_MSTR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ bclk |= ARIZONA_AIF1_BCLK_MSTR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ bclk |= ARIZONA_AIF1_BCLK_MSTR;
+ lrclk |= ARIZONA_AIF1TX_LRCLK_MSTR;
+ break;
+ default:
+ arizona_aif_err(dai, "Unsupported master mode %d\n",
+ fmt & SND_SOC_DAIFMT_MASTER_MASK);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ bclk |= ARIZONA_AIF1_BCLK_INV;
+ lrclk |= ARIZONA_AIF1TX_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ bclk |= ARIZONA_AIF1_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ lrclk |= ARIZONA_AIF1TX_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_BCLK_CTRL,
+ ARIZONA_AIF1_BCLK_INV | ARIZONA_AIF1_BCLK_MSTR,
+ bclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_TX_PIN_CTRL,
+ ARIZONA_AIF1TX_LRCLK_INV |
+ ARIZONA_AIF1TX_LRCLK_MSTR, lrclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_RX_PIN_CTRL,
+ ARIZONA_AIF1RX_LRCLK_INV |
+ ARIZONA_AIF1RX_LRCLK_MSTR, lrclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_FORMAT,
+ ARIZONA_AIF1_FMT_MASK, mode);
+
+ return 0;
+}
+
+static const int arizona_48k_bclk_rates[] = {
+ -1,
+ 48000,
+ 64000,
+ 96000,
+ 128000,
+ 192000,
+ 256000,
+ 384000,
+ 512000,
+ 768000,
+ 1024000,
+ 1536000,
+ 2048000,
+ 3072000,
+ 4096000,
+ 6144000,
+ 8192000,
+ 12288000,
+ 24576000,
+};
+
+static const unsigned int arizona_48k_rates[] = {
+ 12000,
+ 24000,
+ 48000,
+ 96000,
+ 192000,
+ 384000,
+ 768000,
+ 4000,
+ 8000,
+ 16000,
+ 32000,
+ 64000,
+ 128000,
+ 256000,
+ 512000,
+};
+
+static const struct snd_pcm_hw_constraint_list arizona_48k_constraint = {
+ .count = ARRAY_SIZE(arizona_48k_rates),
+ .list = arizona_48k_rates,
+};
+
+static const int arizona_44k1_bclk_rates[] = {
+ -1,
+ 44100,
+ 58800,
+ 88200,
+ 117600,
+ 177640,
+ 235200,
+ 352800,
+ 470400,
+ 705600,
+ 940800,
+ 1411200,
+ 1881600,
+ 2882400,
+ 3763200,
+ 5644800,
+ 7526400,
+ 11289600,
+ 22579200,
+};
+
+static const unsigned int arizona_44k1_rates[] = {
+ 11025,
+ 22050,
+ 44100,
+ 88200,
+ 176400,
+ 352800,
+ 705600,
+};
+
+static const struct snd_pcm_hw_constraint_list arizona_44k1_constraint = {
+ .count = ARRAY_SIZE(arizona_44k1_rates),
+ .list = arizona_44k1_rates,
+};
+
+static int arizona_sr_vals[] = {
+ 0,
+ 12000,
+ 24000,
+ 48000,
+ 96000,
+ 192000,
+ 384000,
+ 768000,
+ 0,
+ 11025,
+ 22050,
+ 44100,
+ 88200,
+ 176400,
+ 352800,
+ 705600,
+ 4000,
+ 8000,
+ 16000,
+ 32000,
+ 64000,
+ 128000,
+ 256000,
+ 512000,
+};
+
+static int arizona_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona_dai_priv *dai_priv = &priv->dai[dai->id - 1];
+ const struct snd_pcm_hw_constraint_list *constraint;
+ unsigned int base_rate;
+
+ switch (dai_priv->clk) {
+ case ARIZONA_CLK_SYSCLK:
+ base_rate = priv->sysclk;
+ break;
+ case ARIZONA_CLK_ASYNCCLK:
+ base_rate = priv->asyncclk;
+ break;
+ default:
+ return 0;
+ }
+
+ if (base_rate % 8000)
+ constraint = &arizona_44k1_constraint;
+ else
+ constraint = &arizona_48k_constraint;
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ constraint);
+}
+
+static int arizona_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona_dai_priv *dai_priv = &priv->dai[dai->id - 1];
+ int base = dai->driver->base;
+ const int *rates;
+ int i;
+ int bclk, lrclk, wl, frame, sr_val;
+
+ if (params_rate(params) % 8000)
+ rates = &arizona_44k1_bclk_rates[0];
+ else
+ rates = &arizona_48k_bclk_rates[0];
+
+ for (i = 0; i < ARRAY_SIZE(arizona_44k1_bclk_rates); i++) {
+ if (rates[i] >= snd_soc_params_to_bclk(params) &&
+ rates[i] % params_rate(params) == 0) {
+ bclk = i;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(arizona_44k1_bclk_rates)) {
+ arizona_aif_err(dai, "Unsupported sample rate %dHz\n",
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(arizona_sr_vals); i++)
+ if (arizona_sr_vals[i] == params_rate(params))
+ break;
+ if (i == ARRAY_SIZE(arizona_sr_vals)) {
+ arizona_aif_err(dai, "Unsupported sample rate %dHz\n",
+ params_rate(params));
+ return -EINVAL;
+ }
+ sr_val = i;
+
+ lrclk = snd_soc_params_to_bclk(params) / params_rate(params);
+
+ arizona_aif_dbg(dai, "BCLK %dHz LRCLK %dHz\n",
+ rates[bclk], rates[bclk] / lrclk);
+
+ wl = snd_pcm_format_width(params_format(params));
+ frame = wl << ARIZONA_AIF1TX_WL_SHIFT | wl;
+
+ /*
+ * We will need to be more flexible than this in future,
+ * currently we use a single sample rate for SYSCLK.
+ */
+ switch (dai_priv->clk) {
+ case ARIZONA_CLK_SYSCLK:
+ snd_soc_update_bits(codec, ARIZONA_SAMPLE_RATE_1,
+ ARIZONA_SAMPLE_RATE_1_MASK, sr_val);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
+ ARIZONA_AIF1_RATE_MASK, 0);
+ break;
+ case ARIZONA_CLK_ASYNCCLK:
+ snd_soc_update_bits(codec, ARIZONA_ASYNC_SAMPLE_RATE_1,
+ ARIZONA_ASYNC_SAMPLE_RATE_MASK, sr_val);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_RATE_CTRL,
+ ARIZONA_AIF1_RATE_MASK, 8);
+ break;
+ default:
+ arizona_aif_err(dai, "Invalid clock %d\n", dai_priv->clk);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_BCLK_CTRL,
+ ARIZONA_AIF1_BCLK_FREQ_MASK, bclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_TX_BCLK_RATE,
+ ARIZONA_AIF1TX_BCPF_MASK, lrclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_RX_BCLK_RATE,
+ ARIZONA_AIF1RX_BCPF_MASK, lrclk);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_FRAME_CTRL_1,
+ ARIZONA_AIF1TX_WL_MASK |
+ ARIZONA_AIF1TX_SLOT_LEN_MASK, frame);
+ snd_soc_update_bits(codec, base + ARIZONA_AIF_FRAME_CTRL_2,
+ ARIZONA_AIF1RX_WL_MASK |
+ ARIZONA_AIF1RX_SLOT_LEN_MASK, frame);
+
+ return 0;
+}
+
+static const char *arizona_dai_clk_str(int clk_id)
+{
+ switch (clk_id) {
+ case ARIZONA_CLK_SYSCLK:
+ return "SYSCLK";
+ case ARIZONA_CLK_ASYNCCLK:
+ return "ASYNCCLK";
+ default:
+ return "Unknown clock";
+ }
+}
+
+static int arizona_dai_set_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona_dai_priv *dai_priv = &priv->dai[dai->id - 1];
+ struct snd_soc_dapm_route routes[2];
+
+ switch (clk_id) {
+ case ARIZONA_CLK_SYSCLK:
+ case ARIZONA_CLK_ASYNCCLK:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (clk_id == dai_priv->clk)
+ return 0;
+
+ if (dai->active) {
+ dev_err(codec->dev, "Can't change clock on active DAI %d\n",
+ dai->id);
+ return -EBUSY;
+ }
+
+ memset(&routes, 0, sizeof(routes));
+ routes[0].sink = dai->driver->capture.stream_name;
+ routes[1].sink = dai->driver->playback.stream_name;
+
+ routes[0].source = arizona_dai_clk_str(dai_priv->clk);
+ routes[1].source = arizona_dai_clk_str(dai_priv->clk);
+ snd_soc_dapm_del_routes(&codec->dapm, routes, ARRAY_SIZE(routes));
+
+ routes[0].source = arizona_dai_clk_str(clk_id);
+ routes[1].source = arizona_dai_clk_str(clk_id);
+ snd_soc_dapm_add_routes(&codec->dapm, routes, ARRAY_SIZE(routes));
+
+ return snd_soc_dapm_sync(&codec->dapm);
+}
+
+const struct snd_soc_dai_ops arizona_dai_ops = {
+ .startup = arizona_startup,
+ .set_fmt = arizona_set_fmt,
+ .hw_params = arizona_hw_params,
+ .set_sysclk = arizona_dai_set_sysclk,
+};
+EXPORT_SYMBOL_GPL(arizona_dai_ops);
+
+int arizona_init_dai(struct arizona_priv *priv, int id)
+{
+ struct arizona_dai_priv *dai_priv = &priv->dai[id];
+
+ dai_priv->clk = ARIZONA_CLK_SYSCLK;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_dai);
+
+static irqreturn_t arizona_fll_lock(int irq, void *data)
+{
+ struct arizona_fll *fll = data;
+
+ arizona_fll_dbg(fll, "Locked\n");
+
+ complete(&fll->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t arizona_fll_clock_ok(int irq, void *data)
+{
+ struct arizona_fll *fll = data;
+
+ arizona_fll_dbg(fll, "clock OK\n");
+
+ complete(&fll->ok);
+
+ return IRQ_HANDLED;
+}
+
+static struct {
+ unsigned int min;
+ unsigned int max;
+ u16 fratio;
+ int ratio;
+} fll_fratios[] = {
+ { 0, 64000, 4, 16 },
+ { 64000, 128000, 3, 8 },
+ { 128000, 256000, 2, 4 },
+ { 256000, 1000000, 1, 2 },
+ { 1000000, 13500000, 0, 1 },
+};
+
+struct arizona_fll_cfg {
+ int n;
+ int theta;
+ int lambda;
+ int refdiv;
+ int outdiv;
+ int fratio;
+};
+
+static int arizona_calc_fll(struct arizona_fll *fll,
+ struct arizona_fll_cfg *cfg,
+ unsigned int Fref,
+ unsigned int Fout)
+{
+ unsigned int target, div, gcd_fll;
+ int i, ratio;
+
+ arizona_fll_dbg(fll, "Fref=%u Fout=%u\n", Fref, Fout);
+
+ /* Fref must be <=13.5MHz */
+ div = 1;
+ cfg->refdiv = 0;
+ while ((Fref / div) > 13500000) {
+ div *= 2;
+ cfg->refdiv++;
+
+ if (div > 8) {
+ arizona_fll_err(fll,
+ "Can't scale %dMHz in to <=13.5MHz\n",
+ Fref);
+ return -EINVAL;
+ }
+ }
+
+ /* Apply the division for our remaining calculations */
+ Fref /= div;
+
+ /* Fvco should be over the targt; don't check the upper bound */
+ div = 1;
+ while (Fout * div < 90000000 * fll->vco_mult) {
+ div++;
+ if (div > 7) {
+ arizona_fll_err(fll, "No FLL_OUTDIV for Fout=%uHz\n",
+ Fout);
+ return -EINVAL;
+ }
+ }
+ target = Fout * div / fll->vco_mult;
+ cfg->outdiv = div;
+
+ arizona_fll_dbg(fll, "Fvco=%dHz\n", target);
+
+ /* Find an appropraite FLL_FRATIO and factor it out of the target */
+ for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
+ if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
+ cfg->fratio = fll_fratios[i].fratio;
+ ratio = fll_fratios[i].ratio;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(fll_fratios)) {
+ arizona_fll_err(fll, "Unable to find FRATIO for Fref=%uHz\n",
+ Fref);
+ return -EINVAL;
+ }
+
+ cfg->n = target / (ratio * Fref);
+
+ if (target % Fref) {
+ gcd_fll = gcd(target, ratio * Fref);
+ arizona_fll_dbg(fll, "GCD=%u\n", gcd_fll);
+
+ cfg->theta = (target - (cfg->n * ratio * Fref))
+ / gcd_fll;
+ cfg->lambda = (ratio * Fref) / gcd_fll;
+ } else {
+ cfg->theta = 0;
+ cfg->lambda = 0;
+ }
+
+ arizona_fll_dbg(fll, "N=%x THETA=%x LAMBDA=%x\n",
+ cfg->n, cfg->theta, cfg->lambda);
+ arizona_fll_dbg(fll, "FRATIO=%x(%d) OUTDIV=%x REFCLK_DIV=%x\n",
+ cfg->fratio, cfg->fratio, cfg->outdiv, cfg->refdiv);
+
+ return 0;
+
+}
+
+static void arizona_apply_fll(struct arizona *arizona, unsigned int base,
+ struct arizona_fll_cfg *cfg, int source)
+{
+ regmap_update_bits(arizona->regmap, base + 3,
+ ARIZONA_FLL1_THETA_MASK, cfg->theta);
+ regmap_update_bits(arizona->regmap, base + 4,
+ ARIZONA_FLL1_LAMBDA_MASK, cfg->lambda);
+ regmap_update_bits(arizona->regmap, base + 5,
+ ARIZONA_FLL1_FRATIO_MASK,
+ cfg->fratio << ARIZONA_FLL1_FRATIO_SHIFT);
+ regmap_update_bits(arizona->regmap, base + 6,
+ ARIZONA_FLL1_CLK_REF_DIV_MASK |
+ ARIZONA_FLL1_CLK_REF_SRC_MASK,
+ cfg->refdiv << ARIZONA_FLL1_CLK_REF_DIV_SHIFT |
+ source << ARIZONA_FLL1_CLK_REF_SRC_SHIFT);
+
+ regmap_update_bits(arizona->regmap, base + 2,
+ ARIZONA_FLL1_CTRL_UPD | ARIZONA_FLL1_N_MASK,
+ ARIZONA_FLL1_CTRL_UPD | cfg->n);
+}
+
+int arizona_set_fll(struct arizona_fll *fll, int source,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct arizona *arizona = fll->arizona;
+ struct arizona_fll_cfg cfg, sync;
+ unsigned int reg, val;
+ int syncsrc;
+ bool ena;
+ int ret;
+
+ ret = regmap_read(arizona->regmap, fll->base + 1, &reg);
+ if (ret != 0) {
+ arizona_fll_err(fll, "Failed to read current state: %d\n",
+ ret);
+ return ret;
+ }
+ ena = reg & ARIZONA_FLL1_ENA;
+
+ if (Fout) {
+ /* Do we have a 32kHz reference? */
+ regmap_read(arizona->regmap, ARIZONA_CLOCK_32K_1, &val);
+ switch (val & ARIZONA_CLK_32K_SRC_MASK) {
+ case ARIZONA_CLK_SRC_MCLK1:
+ case ARIZONA_CLK_SRC_MCLK2:
+ syncsrc = val & ARIZONA_CLK_32K_SRC_MASK;
+ break;
+ default:
+ syncsrc = -1;
+ }
+
+ if (source == syncsrc)
+ syncsrc = -1;
+
+ if (syncsrc >= 0) {
+ ret = arizona_calc_fll(fll, &sync, Fref, Fout);
+ if (ret != 0)
+ return ret;
+
+ ret = arizona_calc_fll(fll, &cfg, 32768, Fout);
+ if (ret != 0)
+ return ret;
+ } else {
+ ret = arizona_calc_fll(fll, &cfg, Fref, Fout);
+ if (ret != 0)
+ return ret;
+ }
+ } else {
+ regmap_update_bits(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_ENA, 0);
+ regmap_update_bits(arizona->regmap, fll->base + 0x11,
+ ARIZONA_FLL1_SYNC_ENA, 0);
+
+ if (ena)
+ pm_runtime_put_autosuspend(arizona->dev);
+
+ return 0;
+ }
+
+ regmap_update_bits(arizona->regmap, fll->base + 5,
+ ARIZONA_FLL1_OUTDIV_MASK,
+ cfg.outdiv << ARIZONA_FLL1_OUTDIV_SHIFT);
+
+ if (syncsrc >= 0) {
+ arizona_apply_fll(arizona, fll->base, &cfg, syncsrc);
+ arizona_apply_fll(arizona, fll->base + 0x10, &sync, source);
+ } else {
+ arizona_apply_fll(arizona, fll->base, &cfg, source);
+ }
+
+ if (!ena)
+ pm_runtime_get(arizona->dev);
+
+ /* Clear any pending completions */
+ try_wait_for_completion(&fll->ok);
+
+ regmap_update_bits(arizona->regmap, fll->base + 1,
+ ARIZONA_FLL1_ENA, ARIZONA_FLL1_ENA);
+ if (syncsrc >= 0)
+ regmap_update_bits(arizona->regmap, fll->base + 0x11,
+ ARIZONA_FLL1_SYNC_ENA,
+ ARIZONA_FLL1_SYNC_ENA);
+
+ ret = wait_for_completion_timeout(&fll->ok,
+ msecs_to_jiffies(25));
+ if (ret == 0)
+ arizona_fll_warn(fll, "Timed out waiting for lock\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_set_fll);
+
+int arizona_init_fll(struct arizona *arizona, int id, int base, int lock_irq,
+ int ok_irq, struct arizona_fll *fll)
+{
+ int ret;
+
+ init_completion(&fll->lock);
+ init_completion(&fll->ok);
+
+ fll->id = id;
+ fll->base = base;
+ fll->arizona = arizona;
+
+ snprintf(fll->lock_name, sizeof(fll->lock_name), "FLL%d lock", id);
+ snprintf(fll->clock_ok_name, sizeof(fll->clock_ok_name),
+ "FLL%d clock OK", id);
+
+ ret = arizona_request_irq(arizona, lock_irq, fll->lock_name,
+ arizona_fll_lock, fll);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to get FLL%d lock IRQ: %d\n",
+ id, ret);
+ }
+
+ ret = arizona_request_irq(arizona, ok_irq, fll->clock_ok_name,
+ arizona_fll_clock_ok, fll);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to get FLL%d clock OK IRQ: %d\n",
+ id, ret);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_fll);
+
+MODULE_DESCRIPTION("ASoC Wolfson Arizona class device support");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
new file mode 100644
index 000000000000..59caca8865e8
--- /dev/null
+++ b/sound/soc/codecs/arizona.h
@@ -0,0 +1,159 @@
+/*
+ * arizona.h - Wolfson Arizona class device shared support
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASOC_ARIZONA_H
+#define _ASOC_ARIZONA_H
+
+#include <linux/completion.h>
+
+#include <sound/soc.h>
+
+#define ARIZONA_CLK_SYSCLK 1
+#define ARIZONA_CLK_ASYNCCLK 2
+
+#define ARIZONA_CLK_SRC_MCLK1 0x0
+#define ARIZONA_CLK_SRC_MCLK2 0x1
+#define ARIZONA_CLK_SRC_FLL1 0x4
+#define ARIZONA_CLK_SRC_FLL2 0x5
+#define ARIZONA_CLK_SRC_AIF1BCLK 0x8
+#define ARIZONA_CLK_SRC_AIF2BCLK 0x9
+#define ARIZONA_CLK_SRC_AIF3BCLK 0xa
+
+#define ARIZONA_FLL_SRC_MCLK1 0
+#define ARIZONA_FLL_SRC_MCLK2 1
+#define ARIZONA_FLL_SRC_SLIMCLK 2
+#define ARIZONA_FLL_SRC_FLL1 3
+#define ARIZONA_FLL_SRC_FLL2 4
+#define ARIZONA_FLL_SRC_AIF1BCLK 5
+#define ARIZONA_FLL_SRC_AIF2BCLK 6
+#define ARIZONA_FLL_SRC_AIF3BCLK 7
+#define ARIZONA_FLL_SRC_AIF1LRCLK 8
+#define ARIZONA_FLL_SRC_AIF2LRCLK 9
+#define ARIZONA_FLL_SRC_AIF3LRCLK 10
+
+#define ARIZONA_MIXER_VOL_MASK 0x00FE
+#define ARIZONA_MIXER_VOL_SHIFT 1
+#define ARIZONA_MIXER_VOL_WIDTH 7
+
+#define ARIZONA_MAX_DAI 3
+
+struct arizona;
+
+struct arizona_dai_priv {
+ int clk;
+};
+
+struct arizona_priv {
+ struct arizona *arizona;
+ int sysclk;
+ int asyncclk;
+ struct arizona_dai_priv dai[ARIZONA_MAX_DAI];
+};
+
+#define ARIZONA_NUM_MIXER_INPUTS 57
+
+extern const unsigned int arizona_mixer_tlv[];
+extern const char *arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS];
+extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
+
+#define ARIZONA_MIXER_CONTROLS(name, base) \
+ SOC_SINGLE_RANGE_TLV(name " Input 1 Volume", base + 1, \
+ ARIZONA_MIXER_VOL_SHIFT, 0x20, 0x50, 0, \
+ arizona_mixer_tlv), \
+ SOC_SINGLE_RANGE_TLV(name " Input 2 Volume", base + 3, \
+ ARIZONA_MIXER_VOL_SHIFT, 0x20, 0x50, 0, \
+ arizona_mixer_tlv), \
+ SOC_SINGLE_RANGE_TLV(name " Input 3 Volume", base + 5, \
+ ARIZONA_MIXER_VOL_SHIFT, 0x20, 0x50, 0, \
+ arizona_mixer_tlv), \
+ SOC_SINGLE_RANGE_TLV(name " Input 4 Volume", base + 7, \
+ ARIZONA_MIXER_VOL_SHIFT, 0x20, 0x50, 0, \
+ arizona_mixer_tlv)
+
+#define ARIZONA_MUX_ENUM_DECL(name, reg) \
+ SOC_VALUE_ENUM_SINGLE_DECL(name, reg, 0, 0xff, \
+ arizona_mixer_texts, arizona_mixer_values)
+
+#define ARIZONA_MUX_CTL_DECL(name) \
+ const struct snd_kcontrol_new name##_mux = \
+ SOC_DAPM_VALUE_ENUM("Route", name##_enum)
+
+#define ARIZONA_MIXER_ENUMS(name, base_reg) \
+ static ARIZONA_MUX_ENUM_DECL(name##_in1_enum, base_reg); \
+ static ARIZONA_MUX_ENUM_DECL(name##_in2_enum, base_reg + 2); \
+ static ARIZONA_MUX_ENUM_DECL(name##_in3_enum, base_reg + 4); \
+ static ARIZONA_MUX_ENUM_DECL(name##_in4_enum, base_reg + 6); \
+ static ARIZONA_MUX_CTL_DECL(name##_in1); \
+ static ARIZONA_MUX_CTL_DECL(name##_in2); \
+ static ARIZONA_MUX_CTL_DECL(name##_in3); \
+ static ARIZONA_MUX_CTL_DECL(name##_in4)
+
+#define ARIZONA_MUX(name, ctrl) \
+ SND_SOC_DAPM_VALUE_MUX(name, SND_SOC_NOPM, 0, 0, ctrl)
+
+#define ARIZONA_MIXER_WIDGETS(name, name_str) \
+ ARIZONA_MUX(name_str " Input 1", &name##_in1_mux), \
+ ARIZONA_MUX(name_str " Input 2", &name##_in2_mux), \
+ ARIZONA_MUX(name_str " Input 3", &name##_in3_mux), \
+ ARIZONA_MUX(name_str " Input 4", &name##_in4_mux), \
+ SND_SOC_DAPM_MIXER(name_str " Mixer", SND_SOC_NOPM, 0, 0, NULL, 0)
+
+#define ARIZONA_MIXER_ROUTES(widget, name) \
+ { widget, NULL, name " Mixer" }, \
+ { name " Mixer", NULL, name " Input 1" }, \
+ { name " Mixer", NULL, name " Input 2" }, \
+ { name " Mixer", NULL, name " Input 3" }, \
+ { name " Mixer", NULL, name " Input 4" }, \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Input 1"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Input 2"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Input 3"), \
+ ARIZONA_MIXER_INPUT_ROUTES(name " Input 4")
+
+extern const struct soc_enum arizona_lhpf1_mode;
+extern const struct soc_enum arizona_lhpf2_mode;
+extern const struct soc_enum arizona_lhpf3_mode;
+extern const struct soc_enum arizona_lhpf4_mode;
+
+extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event);
+extern int arizona_out_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event);
+
+extern int arizona_set_sysclk(struct snd_soc_codec *codec, int clk_id,
+ int source, unsigned int freq, int dir);
+
+extern const struct snd_soc_dai_ops arizona_dai_ops;
+
+#define ARIZONA_FLL_NAME_LEN 20
+
+struct arizona_fll {
+ struct arizona *arizona;
+ int id;
+ unsigned int base;
+ unsigned int vco_mult;
+ struct completion lock;
+ struct completion ok;
+
+ char lock_name[ARIZONA_FLL_NAME_LEN];
+ char clock_ok_name[ARIZONA_FLL_NAME_LEN];
+};
+
+extern int arizona_init_fll(struct arizona *arizona, int id, int base,
+ int lock_irq, int ok_irq, struct arizona_fll *fll);
+extern int arizona_set_fll(struct arizona_fll *fll, int source,
+ unsigned int Fref, unsigned int Fout);
+
+extern int arizona_init_dai(struct arizona_priv *priv, int dai);
+
+#endif
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index a7109413aef1..628daf6a1d97 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -1217,11 +1216,11 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
return -ENOMEM;
cs42l52->dev = &i2c_client->dev;
- cs42l52->regmap = regmap_init_i2c(i2c_client, &cs42l52_regmap);
+ cs42l52->regmap = devm_regmap_init_i2c(i2c_client, &cs42l52_regmap);
if (IS_ERR(cs42l52->regmap)) {
ret = PTR_ERR(cs42l52->regmap);
dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
- goto err;
+ return ret;
}
i2c_set_clientdata(i2c_client, cs42l52);
@@ -1243,7 +1242,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS42L52 Device ID (%X). Expected %X\n",
devid, CS42L52_CHIP_ID);
- goto err_regmap;
+ return ret;
}
regcache_cache_only(cs42l52->regmap, true);
@@ -1251,23 +1250,13 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
ret = snd_soc_register_codec(&i2c_client->dev,
&soc_codec_dev_cs42l52, &cs42l52_dai, 1);
if (ret < 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(cs42l52->regmap);
-
-err:
- return ret;
}
static int cs42l52_i2c_remove(struct i2c_client *client)
{
- struct cs42l52_private *cs42l52 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(cs42l52->regmap);
-
return 0;
}
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index e0d45fdaa750..2c08c4cb465a 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -1362,11 +1362,11 @@ static __devinit int cs42l73_i2c_probe(struct i2c_client *i2c_client,
i2c_set_clientdata(i2c_client, cs42l73);
- cs42l73->regmap = regmap_init_i2c(i2c_client, &cs42l73_regmap);
+ cs42l73->regmap = devm_regmap_init_i2c(i2c_client, &cs42l73_regmap);
if (IS_ERR(cs42l73->regmap)) {
ret = PTR_ERR(cs42l73->regmap);
dev_err(&i2c_client->dev, "regmap_init() failed: %d\n", ret);
- goto err;
+ return ret;
}
/* initialize codec */
ret = regmap_read(cs42l73->regmap, CS42L73_DEVID_AB, &reg);
@@ -1384,13 +1384,13 @@ static __devinit int cs42l73_i2c_probe(struct i2c_client *i2c_client,
dev_err(&i2c_client->dev,
"CS42L73 Device ID (%X). Expected %X\n",
devid, CS42L73_DEVID);
- goto err_regmap;
+ return ret;
}
ret = regmap_read(cs42l73->regmap, CS42L73_REVID, &reg);
if (ret < 0) {
dev_err(&i2c_client->dev, "Get Revision ID failed\n");
- goto err_regmap;
+ return ret;;
}
dev_info(&i2c_client->dev,
@@ -1402,23 +1402,13 @@ static __devinit int cs42l73_i2c_probe(struct i2c_client *i2c_client,
&soc_codec_dev_cs42l73, cs42l73_dai,
ARRAY_SIZE(cs42l73_dai));
if (ret < 0)
- goto err_regmap;
+ return ret;
return 0;
-
-err_regmap:
- regmap_exit(cs42l73->regmap);
-
-err:
- return ret;
}
static __devexit int cs42l73_i2c_remove(struct i2c_client *client)
{
- struct cs42l73_private *cs42l73 = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(cs42l73->regmap);
-
return 0;
}
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
new file mode 100644
index 000000000000..01be2a320e21
--- /dev/null
+++ b/sound/soc/codecs/da732x.c
@@ -0,0 +1,1627 @@
+/*
+ * da732x.c --- Dialog DA732X ALSA SoC Audio Driver
+ *
+ * Copyright (C) 2012 Dialog Semiconductor GmbH
+ *
+ * Author: Michal Hajduk <Michal.Hajduk@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <asm/div64.h>
+
+#include "da732x.h"
+#include "da732x_reg.h"
+
+
+struct da732x_priv {
+ struct regmap *regmap;
+ struct snd_soc_codec *codec;
+
+ unsigned int sysclk;
+ bool pll_en;
+};
+
+/*
+ * da732x register cache - default settings
+ */
+static struct reg_default da732x_reg_cache[] = {
+ { DA732X_REG_REF1 , 0x02 },
+ { DA732X_REG_BIAS_EN , 0x80 },
+ { DA732X_REG_BIAS1 , 0x00 },
+ { DA732X_REG_BIAS2 , 0x00 },
+ { DA732X_REG_BIAS3 , 0x00 },
+ { DA732X_REG_BIAS4 , 0x00 },
+ { DA732X_REG_MICBIAS2 , 0x00 },
+ { DA732X_REG_MICBIAS1 , 0x00 },
+ { DA732X_REG_MICDET , 0x00 },
+ { DA732X_REG_MIC1_PRE , 0x01 },
+ { DA732X_REG_MIC1 , 0x40 },
+ { DA732X_REG_MIC2_PRE , 0x01 },
+ { DA732X_REG_MIC2 , 0x40 },
+ { DA732X_REG_AUX1L , 0x75 },
+ { DA732X_REG_AUX1R , 0x75 },
+ { DA732X_REG_MIC3_PRE , 0x01 },
+ { DA732X_REG_MIC3 , 0x40 },
+ { DA732X_REG_INP_PINBIAS , 0x00 },
+ { DA732X_REG_INP_ZC_EN , 0x00 },
+ { DA732X_REG_INP_MUX , 0x50 },
+ { DA732X_REG_HP_DET , 0x00 },
+ { DA732X_REG_HPL_DAC_OFFSET , 0x00 },
+ { DA732X_REG_HPL_DAC_OFF_CNTL , 0x00 },
+ { DA732X_REG_HPL_OUT_OFFSET , 0x00 },
+ { DA732X_REG_HPL , 0x40 },
+ { DA732X_REG_HPL_VOL , 0x0F },
+ { DA732X_REG_HPR_DAC_OFFSET , 0x00 },
+ { DA732X_REG_HPR_DAC_OFF_CNTL , 0x00 },
+ { DA732X_REG_HPR_OUT_OFFSET , 0x00 },
+ { DA732X_REG_HPR , 0x40 },
+ { DA732X_REG_HPR_VOL , 0x0F },
+ { DA732X_REG_LIN2 , 0x4F },
+ { DA732X_REG_LIN3 , 0x4F },
+ { DA732X_REG_LIN4 , 0x4F },
+ { DA732X_REG_OUT_ZC_EN , 0x00 },
+ { DA732X_REG_HP_LIN1_GNDSEL , 0x00 },
+ { DA732X_REG_CP_HP1 , 0x0C },
+ { DA732X_REG_CP_HP2 , 0x03 },
+ { DA732X_REG_CP_CTRL1 , 0x00 },
+ { DA732X_REG_CP_CTRL2 , 0x99 },
+ { DA732X_REG_CP_CTRL3 , 0x25 },
+ { DA732X_REG_CP_LEVEL_MASK , 0x3F },
+ { DA732X_REG_CP_DET , 0x00 },
+ { DA732X_REG_CP_STATUS , 0x00 },
+ { DA732X_REG_CP_THRESH1 , 0x00 },
+ { DA732X_REG_CP_THRESH2 , 0x00 },
+ { DA732X_REG_CP_THRESH3 , 0x00 },
+ { DA732X_REG_CP_THRESH4 , 0x00 },
+ { DA732X_REG_CP_THRESH5 , 0x00 },
+ { DA732X_REG_CP_THRESH6 , 0x00 },
+ { DA732X_REG_CP_THRESH7 , 0x00 },
+ { DA732X_REG_CP_THRESH8 , 0x00 },
+ { DA732X_REG_PLL_DIV_LO , 0x00 },
+ { DA732X_REG_PLL_DIV_MID , 0x00 },
+ { DA732X_REG_PLL_DIV_HI , 0x00 },
+ { DA732X_REG_PLL_CTRL , 0x02 },
+ { DA732X_REG_CLK_CTRL , 0xaa },
+ { DA732X_REG_CLK_DSP , 0x07 },
+ { DA732X_REG_CLK_EN1 , 0x00 },
+ { DA732X_REG_CLK_EN2 , 0x00 },
+ { DA732X_REG_CLK_EN3 , 0x00 },
+ { DA732X_REG_CLK_EN4 , 0x00 },
+ { DA732X_REG_CLK_EN5 , 0x00 },
+ { DA732X_REG_AIF_MCLK , 0x00 },
+ { DA732X_REG_AIFA1 , 0x02 },
+ { DA732X_REG_AIFA2 , 0x00 },
+ { DA732X_REG_AIFA3 , 0x08 },
+ { DA732X_REG_AIFB1 , 0x02 },
+ { DA732X_REG_AIFB2 , 0x00 },
+ { DA732X_REG_AIFB3 , 0x08 },
+ { DA732X_REG_PC_CTRL , 0xC0 },
+ { DA732X_REG_DATA_ROUTE , 0x00 },
+ { DA732X_REG_DSP_CTRL , 0x00 },
+ { DA732X_REG_CIF_CTRL2 , 0x00 },
+ { DA732X_REG_HANDSHAKE , 0x00 },
+ { DA732X_REG_SPARE1_OUT , 0x00 },
+ { DA732X_REG_SPARE2_OUT , 0x00 },
+ { DA732X_REG_SPARE1_IN , 0x00 },
+ { DA732X_REG_ADC1_PD , 0x00 },
+ { DA732X_REG_ADC1_HPF , 0x00 },
+ { DA732X_REG_ADC1_SEL , 0x00 },
+ { DA732X_REG_ADC1_EQ12 , 0x00 },
+ { DA732X_REG_ADC1_EQ34 , 0x00 },
+ { DA732X_REG_ADC1_EQ5 , 0x00 },
+ { DA732X_REG_ADC2_PD , 0x00 },
+ { DA732X_REG_ADC2_HPF , 0x00 },
+ { DA732X_REG_ADC2_SEL , 0x00 },
+ { DA732X_REG_ADC2_EQ12 , 0x00 },
+ { DA732X_REG_ADC2_EQ34 , 0x00 },
+ { DA732X_REG_ADC2_EQ5 , 0x00 },
+ { DA732X_REG_DAC1_HPF , 0x00 },
+ { DA732X_REG_DAC1_L_VOL , 0x00 },
+ { DA732X_REG_DAC1_R_VOL , 0x00 },
+ { DA732X_REG_DAC1_SEL , 0x00 },
+ { DA732X_REG_DAC1_SOFTMUTE , 0x00 },
+ { DA732X_REG_DAC1_EQ12 , 0x00 },
+ { DA732X_REG_DAC1_EQ34 , 0x00 },
+ { DA732X_REG_DAC1_EQ5 , 0x00 },
+ { DA732X_REG_DAC2_HPF , 0x00 },
+ { DA732X_REG_DAC2_L_VOL , 0x00 },
+ { DA732X_REG_DAC2_R_VOL , 0x00 },
+ { DA732X_REG_DAC2_SEL , 0x00 },
+ { DA732X_REG_DAC2_SOFTMUTE , 0x00 },
+ { DA732X_REG_DAC2_EQ12 , 0x00 },
+ { DA732X_REG_DAC2_EQ34 , 0x00 },
+ { DA732X_REG_DAC2_EQ5 , 0x00 },
+ { DA732X_REG_DAC3_HPF , 0x00 },
+ { DA732X_REG_DAC3_VOL , 0x00 },
+ { DA732X_REG_DAC3_SEL , 0x00 },
+ { DA732X_REG_DAC3_SOFTMUTE , 0x00 },
+ { DA732X_REG_DAC3_EQ12 , 0x00 },
+ { DA732X_REG_DAC3_EQ34 , 0x00 },
+ { DA732X_REG_DAC3_EQ5 , 0x00 },
+ { DA732X_REG_BIQ_BYP , 0x00 },
+ { DA732X_REG_DMA_CMD , 0x00 },
+ { DA732X_REG_DMA_ADDR0 , 0x00 },
+ { DA732X_REG_DMA_ADDR1 , 0x00 },
+ { DA732X_REG_DMA_DATA0 , 0x00 },
+ { DA732X_REG_DMA_DATA1 , 0x00 },
+ { DA732X_REG_DMA_DATA2 , 0x00 },
+ { DA732X_REG_DMA_DATA3 , 0x00 },
+ { DA732X_REG_UNLOCK , 0x00 },
+};
+
+static inline int da732x_get_input_div(struct snd_soc_codec *codec, int sysclk)
+{
+ int val;
+ int ret;
+
+ if (sysclk < DA732X_MCLK_10MHZ) {
+ val = DA732X_MCLK_RET_0_10MHZ;
+ ret = DA732X_MCLK_VAL_0_10MHZ;
+ } else if ((sysclk >= DA732X_MCLK_10MHZ) &&
+ (sysclk < DA732X_MCLK_20MHZ)) {
+ val = DA732X_MCLK_RET_10_20MHZ;
+ ret = DA732X_MCLK_VAL_10_20MHZ;
+ } else if ((sysclk >= DA732X_MCLK_20MHZ) &&
+ (sysclk < DA732X_MCLK_40MHZ)) {
+ val = DA732X_MCLK_RET_20_40MHZ;
+ ret = DA732X_MCLK_VAL_20_40MHZ;
+ } else if ((sysclk >= DA732X_MCLK_40MHZ) &&
+ (sysclk <= DA732X_MCLK_54MHZ)) {
+ val = DA732X_MCLK_RET_40_54MHZ;
+ ret = DA732X_MCLK_VAL_40_54MHZ;
+ } else {
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, DA732X_REG_PLL_CTRL, val);
+
+ return ret;
+}
+
+static void da732x_set_charge_pump(struct snd_soc_codec *codec, int state)
+{
+ switch (state) {
+ case DA732X_ENABLE_CP:
+ snd_soc_write(codec, DA732X_REG_CLK_EN2, DA732X_CP_CLK_EN);
+ snd_soc_write(codec, DA732X_REG_CP_HP2, DA732X_HP_CP_EN |
+ DA732X_HP_CP_REG | DA732X_HP_CP_PULSESKIP);
+ snd_soc_write(codec, DA732X_REG_CP_CTRL1, DA732X_CP_EN |
+ DA732X_CP_CTRL_CPVDD1);
+ snd_soc_write(codec, DA732X_REG_CP_CTRL2,
+ DA732X_CP_MANAGE_MAGNITUDE | DA732X_CP_BOOST);
+ snd_soc_write(codec, DA732X_REG_CP_CTRL3, DA732X_CP_1MHZ);
+ break;
+ case DA732X_DISABLE_CP:
+ snd_soc_write(codec, DA732X_REG_CLK_EN2, DA732X_CP_CLK_DIS);
+ snd_soc_write(codec, DA732X_REG_CP_HP2, DA732X_HP_CP_DIS);
+ snd_soc_write(codec, DA732X_REG_CP_CTRL1, DA723X_CP_DIS);
+ break;
+ default:
+ pr_err(KERN_ERR "Wrong charge pump state\n");
+ break;
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(mic_boost_tlv, DA732X_MIC_PRE_VOL_DB_MIN,
+ DA732X_MIC_PRE_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(mic_pga_tlv, DA732X_MIC_VOL_DB_MIN,
+ DA732X_MIC_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(aux_pga_tlv, DA732X_AUX_VOL_DB_MIN,
+ DA732X_AUX_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(hp_pga_tlv, DA732X_HP_VOL_DB_MIN,
+ DA732X_AUX_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(lin2_pga_tlv, DA732X_LIN2_VOL_DB_MIN,
+ DA732X_LIN2_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(lin3_pga_tlv, DA732X_LIN3_VOL_DB_MIN,
+ DA732X_LIN3_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(lin4_pga_tlv, DA732X_LIN4_VOL_DB_MIN,
+ DA732X_LIN4_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(adc_pga_tlv, DA732X_ADC_VOL_DB_MIN,
+ DA732X_ADC_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(dac_pga_tlv, DA732X_DAC_VOL_DB_MIN,
+ DA732X_DAC_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(eq_band_pga_tlv, DA732X_EQ_BAND_VOL_DB_MIN,
+ DA732X_EQ_BAND_VOL_DB_INC, 0);
+
+static const DECLARE_TLV_DB_SCALE(eq_overall_tlv, DA732X_EQ_OVERALL_VOL_DB_MIN,
+ DA732X_EQ_OVERALL_VOL_DB_INC, 0);
+
+/* High Pass Filter */
+static const char *da732x_hpf_mode[] = {
+ "Disable", "Music", "Voice",
+};
+
+static const char *da732x_hpf_music[] = {
+ "1.8Hz", "3.75Hz", "7.5Hz", "15Hz",
+};
+
+static const char *da732x_hpf_voice[] = {
+ "2.5Hz", "25Hz", "50Hz", "100Hz",
+ "150Hz", "200Hz", "300Hz", "400Hz"
+};
+
+static const struct soc_enum da732x_dac1_hpf_mode_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC1_HPF, DA732X_HPF_MODE_SHIFT,
+ DA732X_HPF_MODE_MAX, da732x_hpf_mode)
+};
+
+static const struct soc_enum da732x_dac2_hpf_mode_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC2_HPF, DA732X_HPF_MODE_SHIFT,
+ DA732X_HPF_MODE_MAX, da732x_hpf_mode)
+};
+
+static const struct soc_enum da732x_dac3_hpf_mode_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC3_HPF, DA732X_HPF_MODE_SHIFT,
+ DA732X_HPF_MODE_MAX, da732x_hpf_mode)
+};
+
+static const struct soc_enum da732x_adc1_hpf_mode_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC1_HPF, DA732X_HPF_MODE_SHIFT,
+ DA732X_HPF_MODE_MAX, da732x_hpf_mode)
+};
+
+static const struct soc_enum da732x_adc2_hpf_mode_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC2_HPF, DA732X_HPF_MODE_SHIFT,
+ DA732X_HPF_MODE_MAX, da732x_hpf_mode)
+};
+
+static const struct soc_enum da732x_dac1_hp_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC1_HPF, DA732X_HPF_MUSIC_SHIFT,
+ DA732X_HPF_MUSIC_MAX, da732x_hpf_music)
+};
+
+static const struct soc_enum da732x_dac2_hp_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC2_HPF, DA732X_HPF_MUSIC_SHIFT,
+ DA732X_HPF_MUSIC_MAX, da732x_hpf_music)
+};
+
+static const struct soc_enum da732x_dac3_hp_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC3_HPF, DA732X_HPF_MUSIC_SHIFT,
+ DA732X_HPF_MUSIC_MAX, da732x_hpf_music)
+};
+
+static const struct soc_enum da732x_adc1_hp_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC1_HPF, DA732X_HPF_MUSIC_SHIFT,
+ DA732X_HPF_MUSIC_MAX, da732x_hpf_music)
+};
+
+static const struct soc_enum da732x_adc2_hp_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC2_HPF, DA732X_HPF_MUSIC_SHIFT,
+ DA732X_HPF_MUSIC_MAX, da732x_hpf_music)
+};
+
+static const struct soc_enum da732x_dac1_voice_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC1_HPF, DA732X_HPF_VOICE_SHIFT,
+ DA732X_HPF_VOICE_MAX, da732x_hpf_voice)
+};
+
+static const struct soc_enum da732x_dac2_voice_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC2_HPF, DA732X_HPF_VOICE_SHIFT,
+ DA732X_HPF_VOICE_MAX, da732x_hpf_voice)
+};
+
+static const struct soc_enum da732x_dac3_voice_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_DAC3_HPF, DA732X_HPF_VOICE_SHIFT,
+ DA732X_HPF_VOICE_MAX, da732x_hpf_voice)
+};
+
+static const struct soc_enum da732x_adc1_voice_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC1_HPF, DA732X_HPF_VOICE_SHIFT,
+ DA732X_HPF_VOICE_MAX, da732x_hpf_voice)
+};
+
+static const struct soc_enum da732x_adc2_voice_filter_enum[] = {
+ SOC_ENUM_SINGLE(DA732X_REG_ADC2_HPF, DA732X_HPF_VOICE_SHIFT,
+ DA732X_HPF_VOICE_MAX, da732x_hpf_voice)
+};
+
+
+static int da732x_hpf_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *enum_ctrl = (struct soc_enum *)kcontrol->private_value;
+ unsigned int reg = enum_ctrl->reg;
+ unsigned int sel = ucontrol->value.integer.value[0];
+ unsigned int bits;
+
+ switch (sel) {
+ case DA732X_HPF_DISABLED:
+ bits = DA732X_HPF_DIS;
+ break;
+ case DA732X_HPF_VOICE:
+ bits = DA732X_HPF_VOICE_EN;
+ break;
+ case DA732X_HPF_MUSIC:
+ bits = DA732X_HPF_MUSIC_EN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, reg, DA732X_HPF_MASK, bits);
+
+ return 0;
+}
+
+static int da732x_hpf_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct soc_enum *enum_ctrl = (struct soc_enum *)kcontrol->private_value;
+ unsigned int reg = enum_ctrl->reg;
+ int val;
+
+ val = snd_soc_read(codec, reg) & DA732X_HPF_MASK;
+
+ switch (val) {
+ case DA732X_HPF_VOICE_EN:
+ ucontrol->value.integer.value[0] = DA732X_HPF_VOICE;
+ break;
+ case DA732X_HPF_MUSIC_EN:
+ ucontrol->value.integer.value[0] = DA732X_HPF_MUSIC;
+ break;
+ default:
+ ucontrol->value.integer.value[0] = DA732X_HPF_DISABLED;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_kcontrol_new da732x_snd_controls[] = {
+ /* Input PGAs */
+ SOC_SINGLE_RANGE_TLV("MIC1 Boost Volume", DA732X_REG_MIC1_PRE,
+ DA732X_MICBOOST_SHIFT, DA732X_MICBOOST_MIN,
+ DA732X_MICBOOST_MAX, 0, mic_boost_tlv),
+ SOC_SINGLE_RANGE_TLV("MIC2 Boost Volume", DA732X_REG_MIC2_PRE,
+ DA732X_MICBOOST_SHIFT, DA732X_MICBOOST_MIN,
+ DA732X_MICBOOST_MAX, 0, mic_boost_tlv),
+ SOC_SINGLE_RANGE_TLV("MIC3 Boost Volume", DA732X_REG_MIC3_PRE,
+ DA732X_MICBOOST_SHIFT, DA732X_MICBOOST_MIN,
+ DA732X_MICBOOST_MAX, 0, mic_boost_tlv),
+
+ /* MICs */
+ SOC_SINGLE("MIC1 Switch", DA732X_REG_MIC1, DA732X_MIC_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_RANGE_TLV("MIC1 Volume", DA732X_REG_MIC1,
+ DA732X_MIC_VOL_SHIFT, DA732X_MIC_VOL_VAL_MIN,
+ DA732X_MIC_VOL_VAL_MAX, 0, mic_pga_tlv),
+ SOC_SINGLE("MIC2 Switch", DA732X_REG_MIC2, DA732X_MIC_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_RANGE_TLV("MIC2 Volume", DA732X_REG_MIC2,
+ DA732X_MIC_VOL_SHIFT, DA732X_MIC_VOL_VAL_MIN,
+ DA732X_MIC_VOL_VAL_MAX, 0, mic_pga_tlv),
+ SOC_SINGLE("MIC3 Switch", DA732X_REG_MIC3, DA732X_MIC_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_RANGE_TLV("MIC3 Volume", DA732X_REG_MIC3,
+ DA732X_MIC_VOL_SHIFT, DA732X_MIC_VOL_VAL_MIN,
+ DA732X_MIC_VOL_VAL_MAX, 0, mic_pga_tlv),
+
+ /* AUXs */
+ SOC_SINGLE("AUX1L Switch", DA732X_REG_AUX1L, DA732X_AUX_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("AUX1L Volume", DA732X_REG_AUX1L,
+ DA732X_AUX_VOL_SHIFT, DA732X_AUX_VOL_VAL_MAX,
+ DA732X_NO_INVERT, aux_pga_tlv),
+ SOC_SINGLE("AUX1R Switch", DA732X_REG_AUX1R, DA732X_AUX_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("AUX1R Volume", DA732X_REG_AUX1R,
+ DA732X_AUX_VOL_SHIFT, DA732X_AUX_VOL_VAL_MAX,
+ DA732X_NO_INVERT, aux_pga_tlv),
+
+ /* ADCs */
+ SOC_DOUBLE_TLV("ADC1 Volume", DA732X_REG_ADC1_SEL,
+ DA732X_ADCL_VOL_SHIFT, DA732X_ADCR_VOL_SHIFT,
+ DA732X_ADC_VOL_VAL_MAX, DA732X_INVERT, adc_pga_tlv),
+
+ SOC_DOUBLE_TLV("ADC2 Volume", DA732X_REG_ADC2_SEL,
+ DA732X_ADCL_VOL_SHIFT, DA732X_ADCR_VOL_SHIFT,
+ DA732X_ADC_VOL_VAL_MAX, DA732X_INVERT, adc_pga_tlv),
+
+ /* DACs */
+ SOC_DOUBLE("Digital Playback DAC12 Switch", DA732X_REG_DAC1_SEL,
+ DA732X_DACL_MUTE_SHIFT, DA732X_DACR_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_DOUBLE_R_TLV("Digital Playback DAC12 Volume", DA732X_REG_DAC1_L_VOL,
+ DA732X_REG_DAC1_R_VOL, DA732X_DAC_VOL_SHIFT,
+ DA732X_DAC_VOL_VAL_MAX, DA732X_INVERT, dac_pga_tlv),
+ SOC_SINGLE("Digital Playback DAC3 Switch", DA732X_REG_DAC2_SEL,
+ DA732X_DACL_MUTE_SHIFT, DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Digital Playback DAC3 Volume", DA732X_REG_DAC2_L_VOL,
+ DA732X_DAC_VOL_SHIFT, DA732X_DAC_VOL_VAL_MAX,
+ DA732X_INVERT, dac_pga_tlv),
+ SOC_SINGLE("Digital Playback DAC4 Switch", DA732X_REG_DAC2_SEL,
+ DA732X_DACR_MUTE_SHIFT, DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Digital Playback DAC4 Volume", DA732X_REG_DAC2_R_VOL,
+ DA732X_DAC_VOL_SHIFT, DA732X_DAC_VOL_VAL_MAX,
+ DA732X_INVERT, dac_pga_tlv),
+ SOC_SINGLE("Digital Playback DAC5 Switch", DA732X_REG_DAC3_SEL,
+ DA732X_DACL_MUTE_SHIFT, DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Digital Playback DAC5 Volume", DA732X_REG_DAC3_VOL,
+ DA732X_DAC_VOL_SHIFT, DA732X_DAC_VOL_VAL_MAX,
+ DA732X_INVERT, dac_pga_tlv),
+
+ /* High Pass Filters */
+ SOC_ENUM_EXT("DAC1 High Pass Filter Mode",
+ da732x_dac1_hpf_mode_enum, da732x_hpf_get, da732x_hpf_set),
+ SOC_ENUM("DAC1 High Pass Filter", da732x_dac1_hp_filter_enum),
+ SOC_ENUM("DAC1 Voice Filter", da732x_dac1_voice_filter_enum),
+
+ SOC_ENUM_EXT("DAC2 High Pass Filter Mode",
+ da732x_dac2_hpf_mode_enum, da732x_hpf_get, da732x_hpf_set),
+ SOC_ENUM("DAC2 High Pass Filter", da732x_dac2_hp_filter_enum),
+ SOC_ENUM("DAC2 Voice Filter", da732x_dac2_voice_filter_enum),
+
+ SOC_ENUM_EXT("DAC3 High Pass Filter Mode",
+ da732x_dac3_hpf_mode_enum, da732x_hpf_get, da732x_hpf_set),
+ SOC_ENUM("DAC3 High Pass Filter", da732x_dac3_hp_filter_enum),
+ SOC_ENUM("DAC3 Filter Mode", da732x_dac3_voice_filter_enum),
+
+ SOC_ENUM_EXT("ADC1 High Pass Filter Mode",
+ da732x_adc1_hpf_mode_enum, da732x_hpf_get, da732x_hpf_set),
+ SOC_ENUM("ADC1 High Pass Filter", da732x_adc1_hp_filter_enum),
+ SOC_ENUM("ADC1 Voice Filter", da732x_adc1_voice_filter_enum),
+
+ SOC_ENUM_EXT("ADC2 High Pass Filter Mode",
+ da732x_adc2_hpf_mode_enum, da732x_hpf_get, da732x_hpf_set),
+ SOC_ENUM("ADC2 High Pass Filter", da732x_adc2_hp_filter_enum),
+ SOC_ENUM("ADC2 Voice Filter", da732x_adc2_voice_filter_enum),
+
+ /* Equalizers */
+ SOC_SINGLE("ADC1 EQ Switch", DA732X_REG_ADC1_EQ5,
+ DA732X_EQ_EN_SHIFT, DA732X_EQ_EN_MAX, DA732X_NO_INVERT),
+ SOC_SINGLE_TLV("ADC1 EQ Band 1 Volume", DA732X_REG_ADC1_EQ12,
+ DA732X_EQ_BAND1_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC1 EQ Band 2 Volume", DA732X_REG_ADC1_EQ12,
+ DA732X_EQ_BAND2_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC1 EQ Band 3 Volume", DA732X_REG_ADC1_EQ34,
+ DA732X_EQ_BAND3_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC1 EQ Band 4 Volume", DA732X_REG_ADC1_EQ34,
+ DA732X_EQ_BAND4_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC1 EQ Band 5 Volume", DA732X_REG_ADC1_EQ5,
+ DA732X_EQ_BAND5_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC1 EQ Overall Volume", DA732X_REG_ADC1_EQ5,
+ DA732X_EQ_OVERALL_SHIFT, DA732X_EQ_OVERALL_VOL_VAL_MAX,
+ DA732X_INVERT, eq_overall_tlv),
+
+ SOC_SINGLE("ADC2 EQ Switch", DA732X_REG_ADC2_EQ5,
+ DA732X_EQ_EN_SHIFT, DA732X_EQ_EN_MAX, DA732X_NO_INVERT),
+ SOC_SINGLE_TLV("ADC2 EQ Band 1 Volume", DA732X_REG_ADC2_EQ12,
+ DA732X_EQ_BAND1_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC2 EQ Band 2 Volume", DA732X_REG_ADC2_EQ12,
+ DA732X_EQ_BAND2_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC2 EQ Band 3 Volume", DA732X_REG_ADC2_EQ34,
+ DA732X_EQ_BAND3_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ACD2 EQ Band 4 Volume", DA732X_REG_ADC2_EQ34,
+ DA732X_EQ_BAND4_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ACD2 EQ Band 5 Volume", DA732X_REG_ADC2_EQ5,
+ DA732X_EQ_BAND5_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("ADC2 EQ Overall Volume", DA732X_REG_ADC1_EQ5,
+ DA732X_EQ_OVERALL_SHIFT, DA732X_EQ_OVERALL_VOL_VAL_MAX,
+ DA732X_INVERT, eq_overall_tlv),
+
+ SOC_SINGLE("DAC1 EQ Switch", DA732X_REG_DAC1_EQ5,
+ DA732X_EQ_EN_SHIFT, DA732X_EQ_EN_MAX, DA732X_NO_INVERT),
+ SOC_SINGLE_TLV("DAC1 EQ Band 1 Volume", DA732X_REG_DAC1_EQ12,
+ DA732X_EQ_BAND1_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC1 EQ Band 2 Volume", DA732X_REG_DAC1_EQ12,
+ DA732X_EQ_BAND2_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC1 EQ Band 3 Volume", DA732X_REG_DAC1_EQ34,
+ DA732X_EQ_BAND3_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC1 EQ Band 4 Volume", DA732X_REG_DAC1_EQ34,
+ DA732X_EQ_BAND4_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC1 EQ Band 5 Volume", DA732X_REG_DAC1_EQ5,
+ DA732X_EQ_BAND5_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+
+ SOC_SINGLE("DAC2 EQ Switch", DA732X_REG_DAC2_EQ5,
+ DA732X_EQ_EN_SHIFT, DA732X_EQ_EN_MAX, DA732X_NO_INVERT),
+ SOC_SINGLE_TLV("DAC2 EQ Band 1 Volume", DA732X_REG_DAC2_EQ12,
+ DA732X_EQ_BAND1_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC2 EQ Band 2 Volume", DA732X_REG_DAC2_EQ12,
+ DA732X_EQ_BAND2_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC2 EQ Band 3 Volume", DA732X_REG_DAC2_EQ34,
+ DA732X_EQ_BAND3_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC2 EQ Band 4 Volume", DA732X_REG_DAC2_EQ34,
+ DA732X_EQ_BAND4_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC2 EQ Band 5 Volume", DA732X_REG_DAC2_EQ5,
+ DA732X_EQ_BAND5_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+
+ SOC_SINGLE("DAC3 EQ Switch", DA732X_REG_DAC3_EQ5,
+ DA732X_EQ_EN_SHIFT, DA732X_EQ_EN_MAX, DA732X_NO_INVERT),
+ SOC_SINGLE_TLV("DAC3 EQ Band 1 Volume", DA732X_REG_DAC3_EQ12,
+ DA732X_EQ_BAND1_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC3 EQ Band 2 Volume", DA732X_REG_DAC3_EQ12,
+ DA732X_EQ_BAND2_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC3 EQ Band 3 Volume", DA732X_REG_DAC3_EQ34,
+ DA732X_EQ_BAND3_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC3 EQ Band 4 Volume", DA732X_REG_DAC3_EQ34,
+ DA732X_EQ_BAND4_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+ SOC_SINGLE_TLV("DAC3 EQ Band 5 Volume", DA732X_REG_DAC3_EQ5,
+ DA732X_EQ_BAND5_SHIFT, DA732X_EQ_VOL_VAL_MAX,
+ DA732X_INVERT, eq_band_pga_tlv),
+
+ /* Lineout 2 Reciever*/
+ SOC_SINGLE("Lineout 2 Switch", DA732X_REG_LIN2, DA732X_LOUT_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Lineout 2 Volume", DA732X_REG_LIN2,
+ DA732X_LOUT_VOL_SHIFT, DA732X_LOUT_VOL_VAL_MAX,
+ DA732X_NO_INVERT, lin2_pga_tlv),
+
+ /* Lineout 3 SPEAKER*/
+ SOC_SINGLE("Lineout 3 Switch", DA732X_REG_LIN3, DA732X_LOUT_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Lineout 3 Volume", DA732X_REG_LIN3,
+ DA732X_LOUT_VOL_SHIFT, DA732X_LOUT_VOL_VAL_MAX,
+ DA732X_NO_INVERT, lin3_pga_tlv),
+
+ /* Lineout 4 */
+ SOC_SINGLE("Lineout 4 Switch", DA732X_REG_LIN4, DA732X_LOUT_MUTE_SHIFT,
+ DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_SINGLE_TLV("Lineout 4 Volume", DA732X_REG_LIN4,
+ DA732X_LOUT_VOL_SHIFT, DA732X_LOUT_VOL_VAL_MAX,
+ DA732X_NO_INVERT, lin4_pga_tlv),
+
+ /* Headphones */
+ SOC_DOUBLE_R("Headphone Switch", DA732X_REG_HPR, DA732X_REG_HPL,
+ DA732X_HP_MUTE_SHIFT, DA732X_SWITCH_MAX, DA732X_INVERT),
+ SOC_DOUBLE_R_TLV("Headphone Volume", DA732X_REG_HPL_VOL,
+ DA732X_REG_HPR_VOL, DA732X_HP_VOL_SHIFT,
+ DA732X_HP_VOL_VAL_MAX, DA732X_NO_INVERT, hp_pga_tlv),
+};
+
+static int da732x_adc_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ switch (w->reg) {
+ case DA732X_REG_ADC1_PD:
+ snd_soc_update_bits(codec, DA732X_REG_CLK_EN3,
+ DA732X_ADCA_BB_CLK_EN,
+ DA732X_ADCA_BB_CLK_EN);
+ break;
+ case DA732X_REG_ADC2_PD:
+ snd_soc_update_bits(codec, DA732X_REG_CLK_EN3,
+ DA732X_ADCC_BB_CLK_EN,
+ DA732X_ADCC_BB_CLK_EN);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, w->reg, DA732X_ADC_RST_MASK,
+ DA732X_ADC_SET_ACT);
+ snd_soc_update_bits(codec, w->reg, DA732X_ADC_PD_MASK,
+ DA732X_ADC_ON);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, w->reg, DA732X_ADC_PD_MASK,
+ DA732X_ADC_OFF);
+ snd_soc_update_bits(codec, w->reg, DA732X_ADC_RST_MASK,
+ DA732X_ADC_SET_RST);
+
+ switch (w->reg) {
+ case DA732X_REG_ADC1_PD:
+ snd_soc_update_bits(codec, DA732X_REG_CLK_EN3,
+ DA732X_ADCA_BB_CLK_EN, 0);
+ break;
+ case DA732X_REG_ADC2_PD:
+ snd_soc_update_bits(codec, DA732X_REG_CLK_EN3,
+ DA732X_ADCC_BB_CLK_EN, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int da732x_out_pga_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec, w->reg,
+ (1 << w->shift) | DA732X_OUT_HIZ_EN,
+ (1 << w->shift) | DA732X_OUT_HIZ_EN);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, w->reg,
+ (1 << w->shift) | DA732X_OUT_HIZ_EN,
+ (1 << w->shift) | DA732X_OUT_HIZ_DIS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char *adcl_text[] = {
+ "AUX1L", "MIC1"
+};
+
+static const char *adcr_text[] = {
+ "AUX1R", "MIC2", "MIC3"
+};
+
+static const char *enable_text[] = {
+ "Disabled",
+ "Enabled"
+};
+
+/* ADC1LMUX */
+static const struct soc_enum adc1l_enum =
+ SOC_ENUM_SINGLE(DA732X_REG_INP_MUX, DA732X_ADC1L_MUX_SEL_SHIFT,
+ DA732X_ADCL_MUX_MAX, adcl_text);
+static const struct snd_kcontrol_new adc1l_mux =
+ SOC_DAPM_ENUM("ADC Route", adc1l_enum);
+
+/* ADC1RMUX */
+static const struct soc_enum adc1r_enum =
+ SOC_ENUM_SINGLE(DA732X_REG_INP_MUX, DA732X_ADC1R_MUX_SEL_SHIFT,
+ DA732X_ADCR_MUX_MAX, adcr_text);
+static const struct snd_kcontrol_new adc1r_mux =
+ SOC_DAPM_ENUM("ADC Route", adc1r_enum);
+
+/* ADC2LMUX */
+static const struct soc_enum adc2l_enum =
+ SOC_ENUM_SINGLE(DA732X_REG_INP_MUX, DA732X_ADC2L_MUX_SEL_SHIFT,
+ DA732X_ADCL_MUX_MAX, adcl_text);
+static const struct snd_kcontrol_new adc2l_mux =
+ SOC_DAPM_ENUM("ADC Route", adc2l_enum);
+
+/* ADC2RMUX */
+static const struct soc_enum adc2r_enum =
+ SOC_ENUM_SINGLE(DA732X_REG_INP_MUX, DA732X_ADC2R_MUX_SEL_SHIFT,
+ DA732X_ADCR_MUX_MAX, adcr_text);
+
+static const struct snd_kcontrol_new adc2r_mux =
+ SOC_DAPM_ENUM("ADC Route", adc2r_enum);
+
+static const struct soc_enum da732x_hp_left_output =
+ SOC_ENUM_SINGLE(DA732X_REG_HPL, DA732X_HP_OUT_DAC_EN_SHIFT,
+ DA732X_DAC_EN_MAX, enable_text);
+
+static const struct snd_kcontrol_new hpl_mux =
+ SOC_DAPM_ENUM("HPL Switch", da732x_hp_left_output);
+
+static const struct soc_enum da732x_hp_right_output =
+ SOC_ENUM_SINGLE(DA732X_REG_HPR, DA732X_HP_OUT_DAC_EN_SHIFT,
+ DA732X_DAC_EN_MAX, enable_text);
+
+static const struct snd_kcontrol_new hpr_mux =
+ SOC_DAPM_ENUM("HPR Switch", da732x_hp_right_output);
+
+static const struct soc_enum da732x_speaker_output =
+ SOC_ENUM_SINGLE(DA732X_REG_LIN3, DA732X_LOUT_DAC_EN_SHIFT,
+ DA732X_DAC_EN_MAX, enable_text);
+
+static const struct snd_kcontrol_new spk_mux =
+ SOC_DAPM_ENUM("SPK Switch", da732x_speaker_output);
+
+static const struct soc_enum da732x_lout4_output =
+ SOC_ENUM_SINGLE(DA732X_REG_LIN4, DA732X_LOUT_DAC_EN_SHIFT,
+ DA732X_DAC_EN_MAX, enable_text);
+
+static const struct snd_kcontrol_new lout4_mux =
+ SOC_DAPM_ENUM("LOUT4 Switch", da732x_lout4_output);
+
+static const struct soc_enum da732x_lout2_output =
+ SOC_ENUM_SINGLE(DA732X_REG_LIN2, DA732X_LOUT_DAC_EN_SHIFT,
+ DA732X_DAC_EN_MAX, enable_text);
+
+static const struct snd_kcontrol_new lout2_mux =
+ SOC_DAPM_ENUM("LOUT2 Switch", da732x_lout2_output);
+
+static const struct snd_soc_dapm_widget da732x_dapm_widgets[] = {
+ /* Supplies */
+ SND_SOC_DAPM_SUPPLY("ADC1 Supply", DA732X_REG_ADC1_PD, 0,
+ DA732X_NO_INVERT, da732x_adc_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("ADC2 Supply", DA732X_REG_ADC2_PD, 0,
+ DA732X_NO_INVERT, da732x_adc_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("DAC1 CLK", DA732X_REG_CLK_EN4,
+ DA732X_DACA_BB_CLK_SHIFT, DA732X_NO_INVERT,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC2 CLK", DA732X_REG_CLK_EN4,
+ DA732X_DACC_BB_CLK_SHIFT, DA732X_NO_INVERT,
+ NULL, 0),
+ SND_SOC_DAPM_SUPPLY("DAC3 CLK", DA732X_REG_CLK_EN5,
+ DA732X_DACE_BB_CLK_SHIFT, DA732X_NO_INVERT,
+ NULL, 0),
+
+ /* Micbias */
+ SND_SOC_DAPM_SUPPLY("MICBIAS1", DA732X_REG_MICBIAS1,
+ DA732X_MICBIAS_EN_SHIFT,
+ DA732X_NO_INVERT, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("MICBIAS2", DA732X_REG_MICBIAS2,
+ DA732X_MICBIAS_EN_SHIFT,
+ DA732X_NO_INVERT, NULL, 0),
+
+ /* Inputs */
+ SND_SOC_DAPM_INPUT("MIC1"),
+ SND_SOC_DAPM_INPUT("MIC2"),
+ SND_SOC_DAPM_INPUT("MIC3"),
+ SND_SOC_DAPM_INPUT("AUX1L"),
+ SND_SOC_DAPM_INPUT("AUX1R"),
+
+ /* Outputs */
+ SND_SOC_DAPM_OUTPUT("HPL"),
+ SND_SOC_DAPM_OUTPUT("HPR"),
+ SND_SOC_DAPM_OUTPUT("LOUTL"),
+ SND_SOC_DAPM_OUTPUT("LOUTR"),
+ SND_SOC_DAPM_OUTPUT("ClassD"),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC1L", NULL, DA732X_REG_ADC1_SEL,
+ DA732X_ADCL_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_ADC("ADC1R", NULL, DA732X_REG_ADC1_SEL,
+ DA732X_ADCR_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_ADC("ADC2L", NULL, DA732X_REG_ADC2_SEL,
+ DA732X_ADCL_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_ADC("ADC2R", NULL, DA732X_REG_ADC2_SEL,
+ DA732X_ADCR_EN_SHIFT, DA732X_NO_INVERT),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC1L", NULL, DA732X_REG_DAC1_SEL,
+ DA732X_DACL_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_DAC("DAC1R", NULL, DA732X_REG_DAC1_SEL,
+ DA732X_DACR_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_DAC("DAC2L", NULL, DA732X_REG_DAC2_SEL,
+ DA732X_DACL_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_DAC("DAC2R", NULL, DA732X_REG_DAC2_SEL,
+ DA732X_DACR_EN_SHIFT, DA732X_NO_INVERT),
+ SND_SOC_DAPM_DAC("DAC3", NULL, DA732X_REG_DAC3_SEL,
+ DA732X_DACL_EN_SHIFT, DA732X_NO_INVERT),
+
+ /* Input Pgas */
+ SND_SOC_DAPM_PGA("MIC1 PGA", DA732X_REG_MIC1, DA732X_MIC_EN_SHIFT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC2 PGA", DA732X_REG_MIC2, DA732X_MIC_EN_SHIFT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MIC3 PGA", DA732X_REG_MIC3, DA732X_MIC_EN_SHIFT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("AUX1L PGA", DA732X_REG_AUX1L, DA732X_AUX_EN_SHIFT,
+ 0, NULL, 0),
+ SND_SOC_DAPM_PGA("AUX1R PGA", DA732X_REG_AUX1R, DA732X_AUX_EN_SHIFT,
+ 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA_E("HP Left", DA732X_REG_HPL, DA732X_HP_OUT_EN_SHIFT,
+ 0, NULL, 0, da732x_out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("HP Right", DA732X_REG_HPR, DA732X_HP_OUT_EN_SHIFT,
+ 0, NULL, 0, da732x_out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LIN2", DA732X_REG_LIN2, DA732X_LIN_OUT_EN_SHIFT,
+ 0, NULL, 0, da732x_out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LIN3", DA732X_REG_LIN3, DA732X_LIN_OUT_EN_SHIFT,
+ 0, NULL, 0, da732x_out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_PGA_E("LIN4", DA732X_REG_LIN4, DA732X_LIN_OUT_EN_SHIFT,
+ 0, NULL, 0, da732x_out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ /* MUXs */
+ SND_SOC_DAPM_MUX("ADC1 Left MUX", SND_SOC_NOPM, 0, 0, &adc1l_mux),
+ SND_SOC_DAPM_MUX("ADC1 Right MUX", SND_SOC_NOPM, 0, 0, &adc1r_mux),
+ SND_SOC_DAPM_MUX("ADC2 Left MUX", SND_SOC_NOPM, 0, 0, &adc2l_mux),
+ SND_SOC_DAPM_MUX("ADC2 Right MUX", SND_SOC_NOPM, 0, 0, &adc2r_mux),
+
+ SND_SOC_DAPM_MUX("HP Left MUX", SND_SOC_NOPM, 0, 0, &hpl_mux),
+ SND_SOC_DAPM_MUX("HP Right MUX", SND_SOC_NOPM, 0, 0, &hpr_mux),
+ SND_SOC_DAPM_MUX("Speaker MUX", SND_SOC_NOPM, 0, 0, &spk_mux),
+ SND_SOC_DAPM_MUX("LOUT2 MUX", SND_SOC_NOPM, 0, 0, &lout2_mux),
+ SND_SOC_DAPM_MUX("LOUT4 MUX", SND_SOC_NOPM, 0, 0, &lout4_mux),
+
+ /* AIF interfaces */
+ SND_SOC_DAPM_AIF_OUT("AIFA Output", "AIFA Capture", 0, DA732X_REG_AIFA3,
+ DA732X_AIF_EN_SHIFT, 0),
+ SND_SOC_DAPM_AIF_IN("AIFA Input", "AIFA Playback", 0, DA732X_REG_AIFA3,
+ DA732X_AIF_EN_SHIFT, 0),
+
+ SND_SOC_DAPM_AIF_OUT("AIFB Output", "AIFB Capture", 0, DA732X_REG_AIFB3,
+ DA732X_AIF_EN_SHIFT, 0),
+ SND_SOC_DAPM_AIF_IN("AIFB Input", "AIFB Playback", 0, DA732X_REG_AIFB3,
+ DA732X_AIF_EN_SHIFT, 0),
+};
+
+static const struct snd_soc_dapm_route da732x_dapm_routes[] = {
+ /* Inputs */
+ {"AUX1L PGA", "NULL", "AUX1L"},
+ {"AUX1R PGA", "NULL", "AUX1R"},
+ {"MIC1 PGA", NULL, "MIC1"},
+ {"MIC2 PGA", "NULL", "MIC2"},
+ {"MIC3 PGA", "NULL", "MIC3"},
+
+ /* Capture Path */
+ {"ADC1 Left MUX", "MIC1", "MIC1 PGA"},
+ {"ADC1 Left MUX", "AUX1L", "AUX1L PGA"},
+
+ {"ADC1 Right MUX", "AUX1R", "AUX1R PGA"},
+ {"ADC1 Right MUX", "MIC2", "MIC2 PGA"},
+ {"ADC1 Right MUX", "MIC3", "MIC3 PGA"},
+
+ {"ADC2 Left MUX", "AUX1L", "AUX1L PGA"},
+ {"ADC2 Left MUX", "MIC1", "MIC1 PGA"},
+
+ {"ADC2 Right MUX", "AUX1R", "AUX1R PGA"},
+ {"ADC2 Right MUX", "MIC2", "MIC2 PGA"},
+ {"ADC2 Right MUX", "MIC3", "MIC3 PGA"},
+
+ {"ADC1L", NULL, "ADC1 Supply"},
+ {"ADC1R", NULL, "ADC1 Supply"},
+ {"ADC2L", NULL, "ADC2 Supply"},
+ {"ADC2R", NULL, "ADC2 Supply"},
+
+ {"ADC1L", NULL, "ADC1 Left MUX"},
+ {"ADC1R", NULL, "ADC1 Right MUX"},
+ {"ADC2L", NULL, "ADC2 Left MUX"},
+ {"ADC2R", NULL, "ADC2 Right MUX"},
+
+ {"AIFA Output", NULL, "ADC1L"},
+ {"AIFA Output", NULL, "ADC1R"},
+ {"AIFB Output", NULL, "ADC2L"},
+ {"AIFB Output", NULL, "ADC2R"},
+
+ {"HP Left MUX", "Enabled", "AIFA Input"},
+ {"HP Right MUX", "Enabled", "AIFA Input"},
+ {"Speaker MUX", "Enabled", "AIFB Input"},
+ {"LOUT2 MUX", "Enabled", "AIFB Input"},
+ {"LOUT4 MUX", "Enabled", "AIFB Input"},
+
+ {"DAC1L", NULL, "DAC1 CLK"},
+ {"DAC1R", NULL, "DAC1 CLK"},
+ {"DAC2L", NULL, "DAC2 CLK"},
+ {"DAC2R", NULL, "DAC2 CLK"},
+ {"DAC3", NULL, "DAC3 CLK"},
+
+ {"DAC1L", NULL, "HP Left MUX"},
+ {"DAC1R", NULL, "HP Right MUX"},
+ {"DAC2L", NULL, "Speaker MUX"},
+ {"DAC2R", NULL, "LOUT4 MUX"},
+ {"DAC3", NULL, "LOUT2 MUX"},
+
+ /* Output Pgas */
+ {"HP Left", NULL, "DAC1L"},
+ {"HP Right", NULL, "DAC1R"},
+ {"LIN3", NULL, "DAC2L"},
+ {"LIN4", NULL, "DAC2R"},
+ {"LIN2", NULL, "DAC3"},
+
+ /* Outputs */
+ {"ClassD", NULL, "LIN3"},
+ {"LOUTL", NULL, "LIN2"},
+ {"LOUTR", NULL, "LIN4"},
+ {"HPL", NULL, "HP Left"},
+ {"HPR", NULL, "HP Right"},
+};
+
+static int da732x_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u32 aif = 0;
+ u32 reg_aif;
+ u32 fs;
+
+ reg_aif = dai->driver->base;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ aif |= DA732X_AIF_WORD_16;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ aif |= DA732X_AIF_WORD_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ aif |= DA732X_AIF_WORD_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ aif |= DA732X_AIF_WORD_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_rate(params)) {
+ case 8000:
+ fs = DA732X_SR_8KHZ;
+ break;
+ case 11025:
+ fs = DA732X_SR_11_025KHZ;
+ break;
+ case 12000:
+ fs = DA732X_SR_12KHZ;
+ break;
+ case 16000:
+ fs = DA732X_SR_16KHZ;
+ break;
+ case 22050:
+ fs = DA732X_SR_22_05KHZ;
+ break;
+ case 24000:
+ fs = DA732X_SR_24KHZ;
+ break;
+ case 32000:
+ fs = DA732X_SR_32KHZ;
+ break;
+ case 44100:
+ fs = DA732X_SR_44_1KHZ;
+ break;
+ case 48000:
+ fs = DA732X_SR_48KHZ;
+ break;
+ case 88100:
+ fs = DA732X_SR_88_1KHZ;
+ break;
+ case 96000:
+ fs = DA732X_SR_96KHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, reg_aif, DA732X_AIF_WORD_MASK, aif);
+ snd_soc_update_bits(codec, DA732X_REG_CLK_CTRL, DA732X_SR1_MASK, fs);
+
+ return 0;
+}
+
+static int da732x_set_dai_fmt(struct snd_soc_dai *dai, u32 fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u32 aif_mclk, pc_count;
+ u32 reg_aif1, aif1;
+ u32 reg_aif3, aif3;
+
+ switch (dai->id) {
+ case DA732X_DAI_ID1:
+ reg_aif1 = DA732X_REG_AIFA1;
+ reg_aif3 = DA732X_REG_AIFA3;
+ pc_count = DA732X_PC_PULSE_AIFA | DA732X_PC_RESYNC_NOT_AUT |
+ DA732X_PC_SAME;
+ break;
+ case DA732X_DAI_ID2:
+ reg_aif1 = DA732X_REG_AIFB1;
+ reg_aif3 = DA732X_REG_AIFB3;
+ pc_count = DA732X_PC_PULSE_AIFB | DA732X_PC_RESYNC_NOT_AUT |
+ DA732X_PC_SAME;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ aif1 = DA732X_AIF_SLAVE;
+ aif_mclk = DA732X_AIFM_FRAME_64 | DA732X_AIFM_SRC_SEL_AIFA;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif1 = DA732X_AIF_CLK_FROM_SRC;
+ aif_mclk = DA732X_CLK_GENERATION_AIF_A;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ aif3 = DA732X_AIF_I2S_MODE;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ aif3 = DA732X_AIF_RIGHT_J_MODE;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif3 = DA732X_AIF_LEFT_J_MODE;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ aif3 = DA732X_AIF_DSP_MODE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif3 |= DA732X_AIF_BCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_LEFT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif3 |= DA732X_AIF_BCLK_INV | DA732X_AIF_WCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif3 |= DA732X_AIF_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif3 |= DA732X_AIF_WCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_write(codec, DA732X_REG_AIF_MCLK, aif_mclk);
+ snd_soc_update_bits(codec, reg_aif1, DA732X_AIF1_CLK_MASK, aif1);
+ snd_soc_update_bits(codec, reg_aif3, DA732X_AIF_BCLK_INV |
+ DA732X_AIF_WCLK_INV | DA732X_AIF_MODE_MASK, aif3);
+ snd_soc_write(codec, DA732X_REG_PC_CTRL, pc_count);
+
+ return 0;
+}
+
+
+
+static int da732x_set_dai_pll(struct snd_soc_codec *codec, int pll_id,
+ int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ struct da732x_priv *da732x = snd_soc_codec_get_drvdata(codec);
+ int fref, indiv;
+ u8 div_lo, div_mid, div_hi;
+ u64 frac_div;
+
+ /* Disable PLL */
+ if (freq_out == 0) {
+ snd_soc_update_bits(codec, DA732X_REG_PLL_CTRL,
+ DA732X_PLL_EN, 0);
+ da732x->pll_en = false;
+ return 0;
+ }
+
+ if (da732x->pll_en)
+ return -EBUSY;
+
+ if (source == DA732X_SRCCLK_MCLK) {
+ /* Validate Sysclk rate */
+ switch (da732x->sysclk) {
+ case 11290000:
+ case 12288000:
+ case 22580000:
+ case 24576000:
+ case 45160000:
+ case 49152000:
+ snd_soc_write(codec, DA732X_REG_PLL_CTRL,
+ DA732X_PLL_BYPASS);
+ return 0;
+ default:
+ dev_err(codec->dev,
+ "Cannot use PLL Bypass, invalid SYSCLK rate\n");
+ return -EINVAL;
+ }
+ }
+
+ indiv = da732x_get_input_div(codec, da732x->sysclk);
+ if (indiv < 0)
+ return indiv;
+
+ fref = (da732x->sysclk / indiv);
+ div_hi = freq_out / fref;
+ frac_div = (u64)(freq_out % fref) * 8192ULL;
+ do_div(frac_div, fref);
+ div_mid = (frac_div >> DA732X_1BYTE_SHIFT) & DA732X_U8_MASK;
+ div_lo = (frac_div) & DA732X_U8_MASK;
+
+ snd_soc_write(codec, DA732X_REG_PLL_DIV_LO, div_lo);
+ snd_soc_write(codec, DA732X_REG_PLL_DIV_MID, div_mid);
+ snd_soc_write(codec, DA732X_REG_PLL_DIV_HI, div_hi);
+
+ snd_soc_update_bits(codec, DA732X_REG_PLL_CTRL, DA732X_PLL_EN,
+ DA732X_PLL_EN);
+
+ da732x->pll_en = true;
+
+ return 0;
+}
+
+static int da732x_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct da732x_priv *da732x = snd_soc_codec_get_drvdata(codec);
+
+ da732x->sysclk = freq;
+
+ return 0;
+}
+
+#define DA732X_RATES SNDRV_PCM_RATE_8000_96000
+
+#define DA732X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops da732x_dai1_ops = {
+ .hw_params = da732x_hw_params,
+ .set_fmt = da732x_set_dai_fmt,
+ .set_sysclk = da732x_set_dai_sysclk,
+};
+
+static struct snd_soc_dai_ops da732x_dai2_ops = {
+ .hw_params = da732x_hw_params,
+ .set_fmt = da732x_set_dai_fmt,
+ .set_sysclk = da732x_set_dai_sysclk,
+};
+
+static struct snd_soc_dai_driver da732x_dai[] = {
+ {
+ .name = "DA732X_AIFA",
+ .id = DA732X_DAI_ID1,
+ .base = DA732X_REG_AIFA1,
+ .playback = {
+ .stream_name = "AIFA Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DA732X_RATES,
+ .formats = DA732X_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIFA Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DA732X_RATES,
+ .formats = DA732X_FORMATS,
+ },
+ .ops = &da732x_dai1_ops,
+ },
+ {
+ .name = "DA732X_AIFB",
+ .id = DA732X_DAI_ID2,
+ .base = DA732X_REG_AIFB1,
+ .playback = {
+ .stream_name = "AIFB Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DA732X_RATES,
+ .formats = DA732X_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIFB Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = DA732X_RATES,
+ .formats = DA732X_FORMATS,
+ },
+ .ops = &da732x_dai2_ops,
+ },
+};
+
+static const struct regmap_config da732x_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = DA732X_MAX_REG,
+ .reg_defaults = da732x_reg_cache,
+ .num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+
+static void da732x_dac_offset_adjust(struct snd_soc_codec *codec)
+{
+ u8 offset[DA732X_HP_DACS];
+ u8 sign[DA732X_HP_DACS];
+ u8 step = DA732X_DAC_OFFSET_STEP;
+
+ /* Initialize DAC offset calibration circuits and registers */
+ snd_soc_write(codec, DA732X_REG_HPL_DAC_OFFSET,
+ DA732X_HP_DAC_OFFSET_TRIM_VAL);
+ snd_soc_write(codec, DA732X_REG_HPR_DAC_OFFSET,
+ DA732X_HP_DAC_OFFSET_TRIM_VAL);
+ snd_soc_write(codec, DA732X_REG_HPL_DAC_OFF_CNTL,
+ DA732X_HP_DAC_OFF_CALIBRATION |
+ DA732X_HP_DAC_OFF_SCALE_STEPS);
+ snd_soc_write(codec, DA732X_REG_HPR_DAC_OFF_CNTL,
+ DA732X_HP_DAC_OFF_CALIBRATION |
+ DA732X_HP_DAC_OFF_SCALE_STEPS);
+
+ /* Wait for voltage stabilization */
+ msleep(DA732X_WAIT_FOR_STABILIZATION);
+
+ /* Check DAC offset sign */
+ sign[DA732X_HPL_DAC] = (codec->hw_read(codec, DA732X_REG_HPL_DAC_OFF_CNTL) &
+ DA732X_HP_DAC_OFF_CNTL_COMPO);
+ sign[DA732X_HPR_DAC] = (codec->hw_read(codec, DA732X_REG_HPR_DAC_OFF_CNTL) &
+ DA732X_HP_DAC_OFF_CNTL_COMPO);
+
+ /* Binary search DAC offset values (both channels at once) */
+ offset[DA732X_HPL_DAC] = sign[DA732X_HPL_DAC] << DA732X_HP_DAC_COMPO_SHIFT;
+ offset[DA732X_HPR_DAC] = sign[DA732X_HPR_DAC] << DA732X_HP_DAC_COMPO_SHIFT;
+
+ do {
+ offset[DA732X_HPL_DAC] |= step;
+ offset[DA732X_HPR_DAC] |= step;
+ snd_soc_write(codec, DA732X_REG_HPL_DAC_OFFSET,
+ ~offset[DA732X_HPL_DAC] & DA732X_HP_DAC_OFF_MASK);
+ snd_soc_write(codec, DA732X_REG_HPR_DAC_OFFSET,
+ ~offset[DA732X_HPR_DAC] & DA732X_HP_DAC_OFF_MASK);
+
+ msleep(DA732X_WAIT_FOR_STABILIZATION);
+
+ if ((codec->hw_read(codec, DA732X_REG_HPL_DAC_OFF_CNTL) &
+ DA732X_HP_DAC_OFF_CNTL_COMPO) ^ sign[DA732X_HPL_DAC])
+ offset[DA732X_HPL_DAC] &= ~step;
+ if ((codec->hw_read(codec, DA732X_REG_HPR_DAC_OFF_CNTL) &
+ DA732X_HP_DAC_OFF_CNTL_COMPO) ^ sign[DA732X_HPR_DAC])
+ offset[DA732X_HPR_DAC] &= ~step;
+
+ step >>= 1;
+ } while (step);
+
+ /* Write final DAC offsets to registers */
+ snd_soc_write(codec, DA732X_REG_HPL_DAC_OFFSET,
+ ~offset[DA732X_HPL_DAC] & DA732X_HP_DAC_OFF_MASK);
+ snd_soc_write(codec, DA732X_REG_HPR_DAC_OFFSET,
+ ~offset[DA732X_HPR_DAC] & DA732X_HP_DAC_OFF_MASK);
+
+ /* End DAC calibration mode */
+ snd_soc_write(codec, DA732X_REG_HPL_DAC_OFF_CNTL,
+ DA732X_HP_DAC_OFF_SCALE_STEPS);
+ snd_soc_write(codec, DA732X_REG_HPR_DAC_OFF_CNTL,
+ DA732X_HP_DAC_OFF_SCALE_STEPS);
+}
+
+static void da732x_output_offset_adjust(struct snd_soc_codec *codec)
+{
+ u8 offset[DA732X_HP_AMPS];
+ u8 sign[DA732X_HP_AMPS];
+ u8 step = DA732X_OUTPUT_OFFSET_STEP;
+
+ offset[DA732X_HPL_AMP] = DA732X_HP_OUT_TRIM_VAL;
+ offset[DA732X_HPR_AMP] = DA732X_HP_OUT_TRIM_VAL;
+
+ /* Initialize output offset calibration circuits and registers */
+ snd_soc_write(codec, DA732X_REG_HPL_OUT_OFFSET, DA732X_HP_OUT_TRIM_VAL);
+ snd_soc_write(codec, DA732X_REG_HPR_OUT_OFFSET, DA732X_HP_OUT_TRIM_VAL);
+ snd_soc_write(codec, DA732X_REG_HPL,
+ DA732X_HP_OUT_COMP | DA732X_HP_OUT_EN);
+ snd_soc_write(codec, DA732X_REG_HPR,
+ DA732X_HP_OUT_COMP | DA732X_HP_OUT_EN);
+
+ /* Wait for voltage stabilization */
+ msleep(DA732X_WAIT_FOR_STABILIZATION);
+
+ /* Check output offset sign */
+ sign[DA732X_HPL_AMP] = codec->hw_read(codec, DA732X_REG_HPL) &
+ DA732X_HP_OUT_COMPO;
+ sign[DA732X_HPR_AMP] = codec->hw_read(codec, DA732X_REG_HPR) &
+ DA732X_HP_OUT_COMPO;
+
+ snd_soc_write(codec, DA732X_REG_HPL, DA732X_HP_OUT_COMP |
+ (sign[DA732X_HPL_AMP] >> DA732X_HP_OUT_COMPO_SHIFT) |
+ DA732X_HP_OUT_EN);
+ snd_soc_write(codec, DA732X_REG_HPR, DA732X_HP_OUT_COMP |
+ (sign[DA732X_HPR_AMP] >> DA732X_HP_OUT_COMPO_SHIFT) |
+ DA732X_HP_OUT_EN);
+
+ /* Binary search output offset values (both channels at once) */
+ do {
+ offset[DA732X_HPL_AMP] |= step;
+ offset[DA732X_HPR_AMP] |= step;
+ snd_soc_write(codec, DA732X_REG_HPL_OUT_OFFSET,
+ offset[DA732X_HPL_AMP]);
+ snd_soc_write(codec, DA732X_REG_HPR_OUT_OFFSET,
+ offset[DA732X_HPR_AMP]);
+
+ msleep(DA732X_WAIT_FOR_STABILIZATION);
+
+ if ((codec->hw_read(codec, DA732X_REG_HPL) &
+ DA732X_HP_OUT_COMPO) ^ sign[DA732X_HPL_AMP])
+ offset[DA732X_HPL_AMP] &= ~step;
+ if ((codec->hw_read(codec, DA732X_REG_HPR) &
+ DA732X_HP_OUT_COMPO) ^ sign[DA732X_HPR_AMP])
+ offset[DA732X_HPR_AMP] &= ~step;
+
+ step >>= 1;
+ } while (step);
+
+ /* Write final DAC offsets to registers */
+ snd_soc_write(codec, DA732X_REG_HPL_OUT_OFFSET, offset[DA732X_HPL_AMP]);
+ snd_soc_write(codec, DA732X_REG_HPR_OUT_OFFSET, offset[DA732X_HPR_AMP]);
+}
+
+static void da732x_hp_dc_offset_cancellation(struct snd_soc_codec *codec)
+{
+ /* Make sure that we have Soft Mute enabled */
+ snd_soc_write(codec, DA732X_REG_DAC1_SOFTMUTE, DA732X_SOFTMUTE_EN |
+ DA732X_GAIN_RAMPED | DA732X_16_SAMPLES);
+ snd_soc_write(codec, DA732X_REG_DAC1_SEL, DA732X_DACL_EN |
+ DA732X_DACR_EN | DA732X_DACL_SDM | DA732X_DACR_SDM |
+ DA732X_DACL_MUTE | DA732X_DACR_MUTE);
+ snd_soc_write(codec, DA732X_REG_HPL, DA732X_HP_OUT_DAC_EN |
+ DA732X_HP_OUT_MUTE | DA732X_HP_OUT_EN);
+ snd_soc_write(codec, DA732X_REG_HPR, DA732X_HP_OUT_EN |
+ DA732X_HP_OUT_MUTE | DA732X_HP_OUT_DAC_EN);
+
+ da732x_dac_offset_adjust(codec);
+ da732x_output_offset_adjust(codec);
+
+ snd_soc_write(codec, DA732X_REG_DAC1_SEL, DA732X_DACS_DIS);
+ snd_soc_write(codec, DA732X_REG_HPL, DA732X_HP_DIS);
+ snd_soc_write(codec, DA732X_REG_HPR, DA732X_HP_DIS);
+}
+
+static int da732x_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct da732x_priv *da732x = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ snd_soc_update_bits(codec, DA732X_REG_BIAS_EN,
+ DA732X_BIAS_BOOST_MASK,
+ DA732X_BIAS_BOOST_100PC);
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ /* Init Codec */
+ snd_soc_write(codec, DA732X_REG_REF1,
+ DA732X_VMID_FASTCHG);
+ snd_soc_write(codec, DA732X_REG_BIAS_EN,
+ DA732X_BIAS_EN);
+
+ mdelay(DA732X_STARTUP_DELAY);
+
+ /* Disable Fast Charge and enable DAC ref voltage */
+ snd_soc_write(codec, DA732X_REG_REF1,
+ DA732X_REFBUFX2_EN);
+
+ /* Enable bypass DSP routing */
+ snd_soc_write(codec, DA732X_REG_DATA_ROUTE,
+ DA732X_BYPASS_DSP);
+
+ /* Enable Digital subsystem */
+ snd_soc_write(codec, DA732X_REG_DSP_CTRL,
+ DA732X_DIGITAL_EN);
+
+ snd_soc_write(codec, DA732X_REG_SPARE1_OUT,
+ DA732X_HP_DRIVER_EN |
+ DA732X_HP_GATE_LOW |
+ DA732X_HP_LOOP_GAIN_CTRL);
+ snd_soc_write(codec, DA732X_REG_HP_LIN1_GNDSEL,
+ DA732X_HP_OUT_GNDSEL);
+
+ da732x_set_charge_pump(codec, DA732X_ENABLE_CP);
+
+ snd_soc_write(codec, DA732X_REG_CLK_EN1,
+ DA732X_SYS3_CLK_EN | DA732X_PC_CLK_EN);
+
+ /* Enable Zero Crossing */
+ snd_soc_write(codec, DA732X_REG_INP_ZC_EN,
+ DA732X_MIC1_PRE_ZC_EN |
+ DA732X_MIC1_ZC_EN |
+ DA732X_MIC2_PRE_ZC_EN |
+ DA732X_MIC2_ZC_EN |
+ DA732X_AUXL_ZC_EN |
+ DA732X_AUXR_ZC_EN |
+ DA732X_MIC3_PRE_ZC_EN |
+ DA732X_MIC3_ZC_EN);
+ snd_soc_write(codec, DA732X_REG_OUT_ZC_EN,
+ DA732X_HPL_ZC_EN | DA732X_HPR_ZC_EN |
+ DA732X_LIN2_ZC_EN | DA732X_LIN3_ZC_EN |
+ DA732X_LIN4_ZC_EN);
+
+ da732x_hp_dc_offset_cancellation(codec);
+
+ regcache_cache_only(codec->control_data, false);
+ regcache_sync(codec->control_data);
+ } else {
+ snd_soc_update_bits(codec, DA732X_REG_BIAS_EN,
+ DA732X_BIAS_BOOST_MASK,
+ DA732X_BIAS_BOOST_50PC);
+ snd_soc_update_bits(codec, DA732X_REG_PLL_CTRL,
+ DA732X_PLL_EN, 0);
+ da732x->pll_en = false;
+ }
+ break;
+ case SND_SOC_BIAS_OFF:
+ regcache_cache_only(codec->control_data, true);
+ da732x_set_charge_pump(codec, DA732X_DISABLE_CP);
+ snd_soc_update_bits(codec, DA732X_REG_BIAS_EN, DA732X_BIAS_EN,
+ DA732X_BIAS_DIS);
+ da732x->pll_en = false;
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+
+ return 0;
+}
+
+static int da732x_probe(struct snd_soc_codec *codec)
+{
+ struct da732x_priv *da732x = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+ int ret = 0;
+
+ da732x->codec = codec;
+
+ dapm->idle_bias_off = false;
+
+ codec->control_data = da732x->regmap;
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec.\n");
+ goto err;
+ }
+
+ da732x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+err:
+ return ret;
+}
+
+static int da732x_remove(struct snd_soc_codec *codec)
+{
+
+ da732x_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_da732x = {
+ .probe = da732x_probe,
+ .remove = da732x_remove,
+ .set_bias_level = da732x_set_bias_level,
+ .controls = da732x_snd_controls,
+ .num_controls = ARRAY_SIZE(da732x_snd_controls),
+ .dapm_widgets = da732x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(da732x_dapm_widgets),
+ .dapm_routes = da732x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(da732x_dapm_routes),
+ .set_pll = da732x_set_dai_pll,
+ .reg_cache_size = ARRAY_SIZE(da732x_reg_cache),
+};
+
+static __devinit int da732x_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct da732x_priv *da732x;
+ unsigned int reg;
+ int ret;
+
+ da732x = devm_kzalloc(&i2c->dev, sizeof(struct da732x_priv),
+ GFP_KERNEL);
+ if (!da732x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, da732x);
+
+ da732x->regmap = devm_regmap_init_i2c(i2c, &da732x_regmap);
+ if (IS_ERR(da732x->regmap)) {
+ ret = PTR_ERR(da732x->regmap);
+ dev_err(&i2c->dev, "Failed to initialize regmap\n");
+ goto err;
+ }
+
+ ret = regmap_read(da732x->regmap, DA732X_REG_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read ID register: %d\n", ret);
+ goto err;
+ }
+
+ dev_info(&i2c->dev, "Revision: %d.%d\n",
+ (reg & DA732X_ID_MAJOR_MASK), (reg & DA732X_ID_MINOR_MASK));
+
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x,
+ da732x_dai, ARRAY_SIZE(da732x_dai));
+ if (ret != 0)
+ dev_err(&i2c->dev, "Failed to register codec.\n");
+
+err:
+ return ret;
+}
+
+static __devexit int da732x_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id da732x_i2c_id[] = {
+ { "da7320", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, da732x_i2c_id);
+
+static struct i2c_driver da732x_i2c_driver = {
+ .driver = {
+ .name = "da7320",
+ .owner = THIS_MODULE,
+ },
+ .probe = da732x_i2c_probe,
+ .remove = __devexit_p(da732x_i2c_remove),
+ .id_table = da732x_i2c_id,
+};
+
+module_i2c_driver(da732x_i2c_driver);
+
+
+MODULE_DESCRIPTION("ASoC DA732X driver");
+MODULE_AUTHOR("Michal Hajduk <michal.hajduk@diasemi.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/da732x.h b/sound/soc/codecs/da732x.h
new file mode 100644
index 000000000000..c8ce5475de22
--- /dev/null
+++ b/sound/soc/codecs/da732x.h
@@ -0,0 +1,133 @@
+/*
+ * da732x.h -- Dialog DA732X ALSA SoC Audio Driver Header File
+ *
+ * Copyright (C) 2012 Dialog Semiconductor GmbH
+ *
+ * Author: Michal Hajduk <Michal.Hajduk@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DA732X_H_
+#define __DA732X_H
+
+#include <sound/soc.h>
+
+/* General */
+#define DA732X_U8_MASK 0xFF
+#define DA732X_4BYTES 4
+#define DA732X_3BYTES 3
+#define DA732X_2BYTES 2
+#define DA732X_1BYTE 1
+#define DA732X_1BYTE_SHIFT 8
+#define DA732X_2BYTES_SHIFT 16
+#define DA732X_3BYTES_SHIFT 24
+#define DA732X_4BYTES_SHIFT 32
+
+#define DA732X_DACS_DIS 0x0
+#define DA732X_HP_DIS 0x0
+#define DA732X_CLEAR_REG 0x0
+
+/* Calibration */
+#define DA732X_DAC_OFFSET_STEP 0x20
+#define DA732X_OUTPUT_OFFSET_STEP 0x80
+#define DA732X_HP_OUT_TRIM_VAL 0x0
+#define DA732X_WAIT_FOR_STABILIZATION 1
+#define DA732X_HPL_DAC 0
+#define DA732X_HPR_DAC 1
+#define DA732X_HP_DACS 2
+#define DA732X_HPL_AMP 0
+#define DA732X_HPR_AMP 1
+#define DA732X_HP_AMPS 2
+
+/* Clock settings */
+#define DA732X_STARTUP_DELAY 100
+#define DA732X_PLL_OUT_196608 196608000
+#define DA732X_PLL_OUT_180634 180633600
+#define DA732X_PLL_OUT_SRM 188620800
+#define DA732X_MCLK_10MHZ 10000000
+#define DA732X_MCLK_20MHZ 20000000
+#define DA732X_MCLK_40MHZ 40000000
+#define DA732X_MCLK_54MHZ 54000000
+#define DA732X_MCLK_RET_0_10MHZ 0
+#define DA732X_MCLK_VAL_0_10MHZ 1
+#define DA732X_MCLK_RET_10_20MHZ 1
+#define DA732X_MCLK_VAL_10_20MHZ 2
+#define DA732X_MCLK_RET_20_40MHZ 2
+#define DA732X_MCLK_VAL_20_40MHZ 4
+#define DA732X_MCLK_RET_40_54MHZ 3
+#define DA732X_MCLK_VAL_40_54MHZ 8
+#define DA732X_DAI_ID1 0
+#define DA732X_DAI_ID2 1
+#define DA732X_SRCCLK_PLL 0
+#define DA732X_SRCCLK_MCLK 1
+
+#define DA732X_LIN_LP_VOL 0x4F
+#define DA732X_LP_VOL 0x40
+
+/* Kcontrols */
+#define DA732X_DAC_EN_MAX 2
+#define DA732X_ADCL_MUX_MAX 2
+#define DA732X_ADCR_MUX_MAX 3
+#define DA732X_HPF_MODE_MAX 3
+#define DA732X_HPF_MODE_SHIFT 4
+#define DA732X_HPF_MUSIC_SHIFT 0
+#define DA732X_HPF_MUSIC_MAX 4
+#define DA732X_HPF_VOICE_SHIFT 4
+#define DA732X_HPF_VOICE_MAX 8
+#define DA732X_EQ_EN_MAX 1
+#define DA732X_HPF_VOICE 1
+#define DA732X_HPF_MUSIC 2
+#define DA732X_HPF_DISABLED 0
+#define DA732X_NO_INVERT 0
+#define DA732X_INVERT 1
+#define DA732X_SWITCH_MAX 1
+#define DA732X_ENABLE_CP 1
+#define DA732X_DISABLE_CP 0
+#define DA732X_DISABLE_ALL_CLKS 0
+#define DA732X_RESET_ADCS 0
+
+/* dB values */
+#define DA732X_MIC_VOL_DB_MIN 0
+#define DA732X_MIC_VOL_DB_INC 50
+#define DA732X_MIC_PRE_VOL_DB_MIN 0
+#define DA732X_MIC_PRE_VOL_DB_INC 600
+#define DA732X_AUX_VOL_DB_MIN -6000
+#define DA732X_AUX_VOL_DB_INC 150
+#define DA732X_HP_VOL_DB_MIN -2250
+#define DA732X_HP_VOL_DB_INC 150
+#define DA732X_LIN2_VOL_DB_MIN -1650
+#define DA732X_LIN2_VOL_DB_INC 150
+#define DA732X_LIN3_VOL_DB_MIN -1650
+#define DA732X_LIN3_VOL_DB_INC 150
+#define DA732X_LIN4_VOL_DB_MIN -2250
+#define DA732X_LIN4_VOL_DB_INC 150
+#define DA732X_EQ_BAND_VOL_DB_MIN -1050
+#define DA732X_EQ_BAND_VOL_DB_INC 150
+#define DA732X_DAC_VOL_DB_MIN -7725
+#define DA732X_DAC_VOL_DB_INC 75
+#define DA732X_ADC_VOL_DB_MIN 0
+#define DA732X_ADC_VOL_DB_INC -1
+#define DA732X_EQ_OVERALL_VOL_DB_MIN -1800
+#define DA732X_EQ_OVERALL_VOL_DB_INC 600
+
+#define DA732X_SOC_ENUM_DOUBLE_R(xreg, xrreg, xmax, xtext) \
+ {.reg = xreg, .reg2 = xrreg, .max = xmax, .texts = xtext}
+
+enum da732x_sysctl {
+ DA732X_SR_8KHZ = 0x1,
+ DA732X_SR_11_025KHZ = 0x2,
+ DA732X_SR_12KHZ = 0x3,
+ DA732X_SR_16KHZ = 0x5,
+ DA732X_SR_22_05KHZ = 0x6,
+ DA732X_SR_24KHZ = 0x7,
+ DA732X_SR_32KHZ = 0x9,
+ DA732X_SR_44_1KHZ = 0xA,
+ DA732X_SR_48KHZ = 0xB,
+ DA732X_SR_88_1KHZ = 0xE,
+ DA732X_SR_96KHZ = 0xF,
+};
+
+#endif /* __DA732X_H_ */
diff --git a/sound/soc/codecs/da732x_reg.h b/sound/soc/codecs/da732x_reg.h
new file mode 100644
index 000000000000..bdd03ca4b2de
--- /dev/null
+++ b/sound/soc/codecs/da732x_reg.h
@@ -0,0 +1,654 @@
+/*
+ * da732x_reg.h --- Dialog DA732X ALSA SoC Audio Registers Header File
+ *
+ * Copyright (C) 2012 Dialog Semiconductor GmbH
+ *
+ * Author: Michal Hajduk <Michal.Hajduk@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DA732X_REG_H_
+#define __DA732X_REG_H_
+
+/* DA732X registers */
+#define DA732X_REG_STATUS_EXT 0x00
+#define DA732X_REG_STATUS 0x01
+#define DA732X_REG_REF1 0x02
+#define DA732X_REG_BIAS_EN 0x03
+#define DA732X_REG_BIAS1 0x04
+#define DA732X_REG_BIAS2 0x05
+#define DA732X_REG_BIAS3 0x06
+#define DA732X_REG_BIAS4 0x07
+#define DA732X_REG_MICBIAS2 0x0F
+#define DA732X_REG_MICBIAS1 0x10
+#define DA732X_REG_MICDET 0x11
+#define DA732X_REG_MIC1_PRE 0x12
+#define DA732X_REG_MIC1 0x13
+#define DA732X_REG_MIC2_PRE 0x14
+#define DA732X_REG_MIC2 0x15
+#define DA732X_REG_AUX1L 0x16
+#define DA732X_REG_AUX1R 0x17
+#define DA732X_REG_MIC3_PRE 0x18
+#define DA732X_REG_MIC3 0x19
+#define DA732X_REG_INP_PINBIAS 0x1A
+#define DA732X_REG_INP_ZC_EN 0x1B
+#define DA732X_REG_INP_MUX 0x1D
+#define DA732X_REG_HP_DET 0x20
+#define DA732X_REG_HPL_DAC_OFFSET 0x21
+#define DA732X_REG_HPL_DAC_OFF_CNTL 0x22
+#define DA732X_REG_HPL_OUT_OFFSET 0x23
+#define DA732X_REG_HPL 0x24
+#define DA732X_REG_HPL_VOL 0x25
+#define DA732X_REG_HPR_DAC_OFFSET 0x26
+#define DA732X_REG_HPR_DAC_OFF_CNTL 0x27
+#define DA732X_REG_HPR_OUT_OFFSET 0x28
+#define DA732X_REG_HPR 0x29
+#define DA732X_REG_HPR_VOL 0x2A
+#define DA732X_REG_LIN2 0x2B
+#define DA732X_REG_LIN3 0x2C
+#define DA732X_REG_LIN4 0x2D
+#define DA732X_REG_OUT_ZC_EN 0x2E
+#define DA732X_REG_HP_LIN1_GNDSEL 0x37
+#define DA732X_REG_CP_HP1 0x3A
+#define DA732X_REG_CP_HP2 0x3B
+#define DA732X_REG_CP_CTRL1 0x40
+#define DA732X_REG_CP_CTRL2 0x41
+#define DA732X_REG_CP_CTRL3 0x42
+#define DA732X_REG_CP_LEVEL_MASK 0x43
+#define DA732X_REG_CP_DET 0x44
+#define DA732X_REG_CP_STATUS 0x45
+#define DA732X_REG_CP_THRESH1 0x46
+#define DA732X_REG_CP_THRESH2 0x47
+#define DA732X_REG_CP_THRESH3 0x48
+#define DA732X_REG_CP_THRESH4 0x49
+#define DA732X_REG_CP_THRESH5 0x4A
+#define DA732X_REG_CP_THRESH6 0x4B
+#define DA732X_REG_CP_THRESH7 0x4C
+#define DA732X_REG_CP_THRESH8 0x4D
+#define DA732X_REG_PLL_DIV_LO 0x50
+#define DA732X_REG_PLL_DIV_MID 0x51
+#define DA732X_REG_PLL_DIV_HI 0x52
+#define DA732X_REG_PLL_CTRL 0x53
+#define DA732X_REG_CLK_CTRL 0x54
+#define DA732X_REG_CLK_DSP 0x5A
+#define DA732X_REG_CLK_EN1 0x5B
+#define DA732X_REG_CLK_EN2 0x5C
+#define DA732X_REG_CLK_EN3 0x5D
+#define DA732X_REG_CLK_EN4 0x5E
+#define DA732X_REG_CLK_EN5 0x5F
+#define DA732X_REG_AIF_MCLK 0x60
+#define DA732X_REG_AIFA1 0x61
+#define DA732X_REG_AIFA2 0x62
+#define DA732X_REG_AIFA3 0x63
+#define DA732X_REG_AIFB1 0x64
+#define DA732X_REG_AIFB2 0x65
+#define DA732X_REG_AIFB3 0x66
+#define DA732X_REG_PC_CTRL 0x6A
+#define DA732X_REG_DATA_ROUTE 0x70
+#define DA732X_REG_DSP_CTRL 0x71
+#define DA732X_REG_CIF_CTRL2 0x74
+#define DA732X_REG_HANDSHAKE 0x75
+#define DA732X_REG_MBOX0 0x76
+#define DA732X_REG_MBOX1 0x77
+#define DA732X_REG_MBOX2 0x78
+#define DA732X_REG_MBOX_STATUS 0x79
+#define DA732X_REG_SPARE1_OUT 0x7D
+#define DA732X_REG_SPARE2_OUT 0x7E
+#define DA732X_REG_SPARE1_IN 0x7F
+#define DA732X_REG_ID 0x81
+#define DA732X_REG_ADC1_PD 0x90
+#define DA732X_REG_ADC1_HPF 0x93
+#define DA732X_REG_ADC1_SEL 0x94
+#define DA732X_REG_ADC1_EQ12 0x95
+#define DA732X_REG_ADC1_EQ34 0x96
+#define DA732X_REG_ADC1_EQ5 0x97
+#define DA732X_REG_ADC2_PD 0x98
+#define DA732X_REG_ADC2_HPF 0x9B
+#define DA732X_REG_ADC2_SEL 0x9C
+#define DA732X_REG_ADC2_EQ12 0x9D
+#define DA732X_REG_ADC2_EQ34 0x9E
+#define DA732X_REG_ADC2_EQ5 0x9F
+#define DA732X_REG_DAC1_HPF 0xA0
+#define DA732X_REG_DAC1_L_VOL 0xA1
+#define DA732X_REG_DAC1_R_VOL 0xA2
+#define DA732X_REG_DAC1_SEL 0xA3
+#define DA732X_REG_DAC1_SOFTMUTE 0xA4
+#define DA732X_REG_DAC1_EQ12 0xA5
+#define DA732X_REG_DAC1_EQ34 0xA6
+#define DA732X_REG_DAC1_EQ5 0xA7
+#define DA732X_REG_DAC2_HPF 0xB0
+#define DA732X_REG_DAC2_L_VOL 0xB1
+#define DA732X_REG_DAC2_R_VOL 0xB2
+#define DA732X_REG_DAC2_SEL 0xB3
+#define DA732X_REG_DAC2_SOFTMUTE 0xB4
+#define DA732X_REG_DAC2_EQ12 0xB5
+#define DA732X_REG_DAC2_EQ34 0xB6
+#define DA732X_REG_DAC2_EQ5 0xB7
+#define DA732X_REG_DAC3_HPF 0xC0
+#define DA732X_REG_DAC3_VOL 0xC1
+#define DA732X_REG_DAC3_SEL 0xC3
+#define DA732X_REG_DAC3_SOFTMUTE 0xC4
+#define DA732X_REG_DAC3_EQ12 0xC5
+#define DA732X_REG_DAC3_EQ34 0xC6
+#define DA732X_REG_DAC3_EQ5 0xC7
+#define DA732X_REG_BIQ_BYP 0xD2
+#define DA732X_REG_DMA_CMD 0xD3
+#define DA732X_REG_DMA_ADDR0 0xD4
+#define DA732X_REG_DMA_ADDR1 0xD5
+#define DA732X_REG_DMA_DATA0 0xD6
+#define DA732X_REG_DMA_DATA1 0xD7
+#define DA732X_REG_DMA_DATA2 0xD8
+#define DA732X_REG_DMA_DATA3 0xD9
+#define DA732X_REG_DMA_STATUS 0xDA
+#define DA732X_REG_BROWNOUT 0xDF
+#define DA732X_REG_UNLOCK 0xE0
+
+#define DA732X_MAX_REG DA732X_REG_UNLOCK
+/*
+ * Bits
+ */
+
+/* DA732X_REG_STATUS_EXT (addr=0x00) */
+#define DA732X_STATUS_EXT_DSP (1 << 4)
+#define DA732X_STATUS_EXT_CLEAR (0 << 0)
+
+/* DA732X_REG_STATUS (addr=0x01) */
+#define DA732X_STATUS_PLL_LOCK (1 << 0)
+#define DA732X_STATUS_PLL_MCLK_DET (1 << 1)
+#define DA732X_STATUS_HPDET_OUT (1 << 2)
+#define DA732X_STATUS_INP_MIXDET_1 (1 << 3)
+#define DA732X_STATUS_INP_MIXDET_2 (1 << 4)
+#define DA732X_STATUS_BO_STATUS (1 << 5)
+
+/* DA732X_REG_REF1 (addr=0x02) */
+#define DA732X_VMID_FASTCHG (1 << 1)
+#define DA732X_VMID_FASTDISCHG (1 << 2)
+#define DA732X_REFBUFX2_EN (1 << 6)
+#define DA732X_REFBUFX2_DIS (0 << 6)
+
+/* DA732X_REG_BIAS_EN (addr=0x03) */
+#define DA732X_BIAS_BOOST_MASK (3 << 0)
+#define DA732X_BIAS_BOOST_100PC (0 << 0)
+#define DA732X_BIAS_BOOST_133PC (1 << 0)
+#define DA732X_BIAS_BOOST_88PC (2 << 0)
+#define DA732X_BIAS_BOOST_50PC (3 << 0)
+#define DA732X_BIAS_EN (1 << 7)
+#define DA732X_BIAS_DIS (0 << 7)
+
+/* DA732X_REG_BIAS1 (addr=0x04) */
+#define DA732X_BIAS1_HP_DAC_BIAS_MASK (3 << 0)
+#define DA732X_BIAS1_HP_DAC_BIAS_100PC (0 << 0)
+#define DA732X_BIAS1_HP_DAC_BIAS_150PC (1 << 0)
+#define DA732X_BIAS1_HP_DAC_BIAS_50PC (2 << 0)
+#define DA732X_BIAS1_HP_DAC_BIAS_75PC (3 << 0)
+#define DA732X_BIAS1_HP_OUT_BIAS_MASK (7 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_100PC (0 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_125PC (1 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_150PC (2 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_175PC (3 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_200PC (4 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_250PC (5 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_300PC (6 << 4)
+#define DA732X_BIAS1_HP_OUT_BIAS_350PC (7 << 4)
+
+/* DA732X_REG_BIAS2 (addr=0x05) */
+#define DA732X_BIAS2_LINE2_DAC_BIAS_MASK (3 << 0)
+#define DA732X_BIAS2_LINE2_DAC_BIAS_100PC (0 << 0)
+#define DA732X_BIAS2_LINE2_DAC_BIAS_150PC (1 << 0)
+#define DA732X_BIAS2_LINE2_DAC_BIAS_50PC (2 << 0)
+#define DA732X_BIAS2_LINE2_DAC_BIAS_75PC (3 << 0)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_MASK (7 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_100PC (0 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_125PC (1 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_150PC (2 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_175PC (3 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_200PC (4 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_250PC (5 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_300PC (6 << 4)
+#define DA732X_BIAS2_LINE2_OUT_BIAS_350PC (7 << 4)
+
+/* DA732X_REG_BIAS3 (addr=0x06) */
+#define DA732X_BIAS3_LINE3_DAC_BIAS_MASK (3 << 0)
+#define DA732X_BIAS3_LINE3_DAC_BIAS_100PC (0 << 0)
+#define DA732X_BIAS3_LINE3_DAC_BIAS_150PC (1 << 0)
+#define DA732X_BIAS3_LINE3_DAC_BIAS_50PC (2 << 0)
+#define DA732X_BIAS3_LINE3_DAC_BIAS_75PC (3 << 0)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_MASK (7 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_100PC (0 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_125PC (1 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_150PC (2 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_175PC (3 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_200PC (4 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_250PC (5 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_300PC (6 << 4)
+#define DA732X_BIAS3_LINE3_OUT_BIAS_350PC (7 << 4)
+
+/* DA732X_REG_BIAS4 (addr=0x07) */
+#define DA732X_BIAS4_LINE4_DAC_BIAS_MASK (3 << 0)
+#define DA732X_BIAS4_LINE4_DAC_BIAS_100PC (0 << 0)
+#define DA732X_BIAS4_LINE4_DAC_BIAS_150PC (1 << 0)
+#define DA732X_BIAS4_LINE4_DAC_BIAS_50PC (2 << 0)
+#define DA732X_BIAS4_LINE4_DAC_BIAS_75PC (3 << 0)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_MASK (7 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_100PC (0 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_125PC (1 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_150PC (2 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_175PC (3 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_200PC (4 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_250PC (5 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_300PC (6 << 4)
+#define DA732X_BIAS4_LINE4_OUT_BIAS_350PC (7 << 4)
+
+/* DA732X_REG_SIF_VDD_SEL (addr=0x08) */
+#define DA732X_SIF_VDD_SEL_AIFA_VDD2 (1 << 0)
+#define DA732X_SIF_VDD_SEL_AIFB_VDD2 (1 << 1)
+#define DA732X_SIF_VDD_SEL_CIFA_VDD2 (1 << 4)
+
+/* DA732X_REG_MICBIAS2/1 (addr=0x0F/0x10) */
+#define DA732X_MICBIAS_VOLTAGE_MASK (0x0F << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V (0x00 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V05 (0x01 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V1 (0x02 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V15 (0x03 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V2 (0x04 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V25 (0x05 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V3 (0x06 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V35 (0x07 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V4 (0x08 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V45 (0x09 << 0)
+#define DA732X_MICBIAS_VOLTAGE_2V5 (0x0A << 0)
+#define DA732X_MICBIAS_EN (1 << 7)
+#define DA732X_MICBIAS_EN_SHIFT 7
+#define DA732X_MICBIAS_VOLTAGE_SHIFT 0
+#define DA732X_MICBIAS_VOLTAGE_MAX 0x0B
+
+/* DA732X_REG_MICDET (addr=0x11) */
+#define DA732X_MICDET_INP_MICRES (1 << 0)
+#define DA732X_MICDET_INP_MICHOOK (1 << 1)
+#define DA732X_MICDET_INP_DEBOUNCE_PRD_8MS (0 << 0)
+#define DA732X_MICDET_INP_DEBOUNCE_PRD_16MS (1 << 0)
+#define DA732X_MICDET_INP_DEBOUNCE_PRD_32MS (2 << 0)
+#define DA732X_MICDET_INP_DEBOUNCE_PRD_64MS (3 << 0)
+#define DA732X_MICDET_INP_MICDET_EN (1 << 7)
+
+/* DA732X_REG_MIC1/2/3_PRE (addr=0x11/0x14/0x18) */
+#define DA732X_MICBOOST_MASK 0x7
+#define DA732X_MICBOOST_SHIFT 0
+#define DA732X_MICBOOST_MIN 0x1
+#define DA732X_MICBOOST_MAX DA732X_MICBOOST_MASK
+
+/* DA732X_REG_MIC1/2/3 (addr=0x13/0x15/0x19) */
+#define DA732X_MIC_VOL_SHIFT 0
+#define DA732X_MIC_VOL_VAL_MASK 0x1F
+#define DA732X_MIC_MUTE_SHIFT 6
+#define DA732X_MIC_EN_SHIFT 7
+#define DA732X_MIC_VOL_VAL_MIN 0x7
+#define DA732X_MIC_VOL_VAL_MAX DA732X_MIC_VOL_VAL_MASK
+
+/* DA732X_REG_AUX1L/R (addr=0x16/0x17) */
+#define DA732X_AUX_VOL_SHIFT 0
+#define DA732X_AUX_VOL_MASK 0x7
+#define DA732X_AUX_MUTE_SHIFT 6
+#define DA732X_AUX_EN_SHIFT 7
+#define DA732X_AUX_VOL_VAL_MAX DA732X_AUX_VOL_MASK
+
+/* DA732X_REG_INP_PINBIAS (addr=0x1A) */
+#define DA732X_INP_MICL_PINBIAS_EN (1 << 0)
+#define DA732X_INP_MICR_PINBIAS_EN (1 << 1)
+#define DA732X_INP_AUX1L_PINBIAS_EN (1 << 2)
+#define DA732X_INP_AUX1R_PINBIAS_EN (1 << 3)
+#define DA732X_INP_AUX2_PINBIAS_EN (1 << 4)
+
+/* DA732X_REG_INP_ZC_EN (addr=0x1B) */
+#define DA732X_MIC1_PRE_ZC_EN (1 << 0)
+#define DA732X_MIC1_ZC_EN (1 << 1)
+#define DA732X_MIC2_PRE_ZC_EN (1 << 2)
+#define DA732X_MIC2_ZC_EN (1 << 3)
+#define DA732X_AUXL_ZC_EN (1 << 4)
+#define DA732X_AUXR_ZC_EN (1 << 5)
+#define DA732X_MIC3_PRE_ZC_EN (1 << 6)
+#define DA732X_MIC3_ZC_EN (1 << 7)
+
+/* DA732X_REG_INP_MUX (addr=0x1D) */
+#define DA732X_INP_ADC1L_MUX_SEL_AUX1L (0 << 0)
+#define DA732X_INP_ADC1L_MUX_SEL_MIC1 (1 << 0)
+#define DA732X_INP_ADC1R_MUX_SEL_MASK (3 << 2)
+#define DA732X_INP_ADC1R_MUX_SEL_AUX1R (0 << 2)
+#define DA732X_INP_ADC1R_MUX_SEL_MIC2 (1 << 2)
+#define DA732X_INP_ADC1R_MUX_SEL_MIC3 (2 << 2)
+#define DA732X_INP_ADC2L_MUX_SEL_AUX1L (0 << 4)
+#define DA732X_INP_ADC2L_MUX_SEL_MICL (1 << 4)
+#define DA732X_INP_ADC2R_MUX_SEL_MASK (3 << 6)
+#define DA732X_INP_ADC2R_MUX_SEL_AUX1R (0 << 6)
+#define DA732X_INP_ADC2R_MUX_SEL_MICR (1 << 6)
+#define DA732X_INP_ADC2R_MUX_SEL_AUX2 (2 << 6)
+#define DA732X_ADC1L_MUX_SEL_SHIFT 0
+#define DA732X_ADC1R_MUX_SEL_SHIFT 2
+#define DA732X_ADC2L_MUX_SEL_SHIFT 4
+#define DA732X_ADC2R_MUX_SEL_SHIFT 6
+
+/* DA732X_REG_HP_DET (addr=0x20) */
+#define DA732X_HP_DET_AZ (1 << 0)
+#define DA732X_HP_DET_SEL1 (1 << 1)
+#define DA732X_HP_DET_IS_MASK (3 << 2)
+#define DA732X_HP_DET_IS_0_5UA (0 << 2)
+#define DA732X_HP_DET_IS_1UA (1 << 2)
+#define DA732X_HP_DET_IS_2UA (2 << 2)
+#define DA732X_HP_DET_IS_4UA (3 << 2)
+#define DA732X_HP_DET_RS_MASK (3 << 4)
+#define DA732X_HP_DET_RS_INFINITE (0 << 4)
+#define DA732X_HP_DET_RS_100KOHM (1 << 4)
+#define DA732X_HP_DET_RS_10KOHM (2 << 4)
+#define DA732X_HP_DET_RS_1KOHM (3 << 4)
+#define DA732X_HP_DET_EN (1 << 7)
+
+/* DA732X_REG_HPL_DAC_OFFSET (addr=0x21/0x26) */
+#define DA732X_HP_DAC_OFFSET_TRIM_MASK (0x3F << 0)
+#define DA732X_HP_DAC_OFFSET_DAC_SIGN (1 << 6)
+
+/* DA732X_REG_HPL_DAC_OFF_CNTL (addr=0x22/0x27) */
+#define DA732X_HP_DAC_OFF_CNTL_CONT_MASK (7 << 0)
+#define DA732X_HP_DAC_OFF_CNTL_COMPO (1 << 3)
+#define DA732X_HP_DAC_OFF_CALIBRATION (1 << 0)
+#define DA732X_HP_DAC_OFF_SCALE_STEPS (1 << 1)
+#define DA732X_HP_DAC_OFF_MASK 0x7F
+#define DA732X_HP_DAC_COMPO_SHIFT 3
+
+/* DA732X_REG_HPL_OUT_OFFSET (addr=0x23/0x28) */
+#define DA732X_HP_OUT_OFFSET_MASK (0xFF << 0)
+#define DA732X_HP_DAC_OFFSET_TRIM_VAL 0x7F
+
+/* DA732X_REG_HPL/R (addr=0x24/0x29) */
+#define DA732X_HP_OUT_SIGN (1 << 0)
+#define DA732X_HP_OUT_COMP (1 << 1)
+#define DA732X_HP_OUT_RESERVED (1 << 2)
+#define DA732X_HP_OUT_COMPO (1 << 3)
+#define DA732X_HP_OUT_DAC_EN (1 << 4)
+#define DA732X_HP_OUT_HIZ_EN (1 << 5)
+#define DA732X_HP_OUT_HIZ_DIS (0 << 5)
+#define DA732X_HP_OUT_MUTE (1 << 6)
+#define DA732X_HP_OUT_EN (1 << 7)
+#define DA732X_HP_OUT_COMPO_SHIFT 3
+#define DA732X_HP_OUT_DAC_EN_SHIFT 4
+#define DA732X_HP_HIZ_SHIFT 5
+#define DA732X_HP_MUTE_SHIFT 6
+#define DA732X_HP_OUT_EN_SHIFT 7
+
+#define DA732X_OUT_HIZ_EN (1 << 5)
+#define DA732X_OUT_HIZ_DIS (0 << 5)
+
+/* DA732X_REG_HPL/R_VOL (addr=0x25/0x2A) */
+#define DA732X_HP_VOL_VAL_MASK 0xF
+#define DA732X_HP_VOL_SHIFT 0
+#define DA732X_HP_VOL_VAL_MAX DA732X_HP_VOL_VAL_MASK
+
+/* DA732X_REG_LIN2/3/4 (addr=0x2B/0x2C/0x2D) */
+#define DA732X_LOUT_VOL_SHIFT 0
+#define DA732X_LOUT_VOL_MASK 0x0F
+#define DA732X_LOUT_DAC_OFF (0 << 4)
+#define DA732X_LOUT_DAC_EN (1 << 4)
+#define DA732X_LOUT_HIZ_N_DIS (0 << 5)
+#define DA732X_LOUT_HIZ_N_EN (1 << 5)
+#define DA732X_LOUT_UNMUTED (0 << 6)
+#define DA732X_LOUT_MUTED (1 << 6)
+#define DA732X_LOUT_EN (0 << 7)
+#define DA732X_LOUT_DIS (1 << 7)
+#define DA732X_LOUT_DAC_EN_SHIFT 4
+#define DA732X_LOUT_MUTE_SHIFT 6
+#define DA732X_LIN_OUT_EN_SHIFT 7
+#define DA732X_LOUT_VOL_VAL_MAX DA732X_LOUT_VOL_MASK
+
+/* DA732X_REG_OUT_ZC_EN (addr=0x2E) */
+#define DA732X_HPL_ZC_EN_SHIFT 0
+#define DA732X_HPR_ZC_EN_SHIFT 1
+#define DA732X_HPL_ZC_EN (1 << 0)
+#define DA732X_HPL_ZC_DIS (0 << 0)
+#define DA732X_HPR_ZC_EN (1 << 1)
+#define DA732X_HPR_ZC_DIS (0 << 1)
+#define DA732X_LIN2_ZC_EN (1 << 2)
+#define DA732X_LIN2_ZC_DIS (0 << 2)
+#define DA732X_LIN3_ZC_EN (1 << 3)
+#define DA732X_LIN3_ZC_DIS (0 << 3)
+#define DA732X_LIN4_ZC_EN (1 << 4)
+#define DA732X_LIN4_ZC_DIS (0 << 4)
+
+/* DA732X_REG_HP_LIN1_GNDSEL (addr=0x37) */
+#define DA732X_HP_OUT_GNDSEL (1 << 0)
+
+/* DA732X_REG_CP_HP2 (addr=0x3a) */
+#define DA732X_HP_CP_PULSESKIP (1 << 0)
+#define DA732X_HP_CP_REG (1 << 1)
+#define DA732X_HP_CP_EN (1 << 3)
+#define DA732X_HP_CP_DIS (0 << 3)
+
+/* DA732X_REG_CP_CTRL1 (addr=0x40) */
+#define DA732X_CP_MODE_MASK (7 << 1)
+#define DA732X_CP_CTRL_STANDBY (0 << 1)
+#define DA732X_CP_CTRL_CPVDD6 (2 << 1)
+#define DA732X_CP_CTRL_CPVDD5 (3 << 1)
+#define DA732X_CP_CTRL_CPVDD4 (4 << 1)
+#define DA732X_CP_CTRL_CPVDD3 (5 << 1)
+#define DA732X_CP_CTRL_CPVDD2 (6 << 1)
+#define DA732X_CP_CTRL_CPVDD1 (7 << 1)
+#define DA723X_CP_DIS (0 << 7)
+#define DA732X_CP_EN (1 << 7)
+
+/* DA732X_REG_CP_CTRL2 (addr=0x41) */
+#define DA732X_CP_BOOST (1 << 0)
+#define DA732X_CP_MANAGE_MAGNITUDE (2 << 2)
+
+/* DA732X_REG_CP_CTRL3 (addr=0x42) */
+#define DA732X_CP_1MHZ (0 << 0)
+#define DA732X_CP_500KHZ (1 << 0)
+#define DA732X_CP_250KHZ (2 << 0)
+#define DA732X_CP_125KHZ (3 << 0)
+#define DA732X_CP_63KHZ (4 << 0)
+#define DA732X_CP_0KHZ (5 << 0)
+
+/* DA732X_REG_PLL_CTRL (addr=0x53) */
+#define DA732X_PLL_INDIV_MASK (3 << 0)
+#define DA732X_PLL_SRM_EN (1 << 2)
+#define DA732X_PLL_EN (1 << 7)
+#define DA732X_PLL_BYPASS (0 << 0)
+
+/* DA732X_REG_CLK_CTRL (addr=0x54) */
+#define DA732X_SR1_MASK (0xF)
+#define DA732X_SR2_MASK (0xF0)
+
+/* DA732X_REG_CLK_DSP (addr=0x5A) */
+#define DA732X_DSP_FREQ_MASK (7 << 0)
+#define DA732X_DSP_FREQ_12MHZ (0 << 0)
+#define DA732X_DSP_FREQ_24MHZ (1 << 0)
+#define DA732X_DSP_FREQ_36MHZ (2 << 0)
+#define DA732X_DSP_FREQ_48MHZ (3 << 0)
+#define DA732X_DSP_FREQ_60MHZ (4 << 0)
+#define DA732X_DSP_FREQ_72MHZ (5 << 0)
+#define DA732X_DSP_FREQ_84MHZ (6 << 0)
+#define DA732X_DSP_FREQ_96MHZ (7 << 0)
+
+/* DA732X_REG_CLK_EN1 (addr=0x5B) */
+#define DA732X_DSP_CLK_EN (1 << 0)
+#define DA732X_SYS3_CLK_EN (1 << 1)
+#define DA732X_DSP12_CLK_EN (1 << 2)
+#define DA732X_PC_CLK_EN (1 << 3)
+#define DA732X_MCLK_SQR_EN (1 << 7)
+
+/* DA732X_REG_CLK_EN2 (addr=0x5C) */
+#define DA732X_UART_CLK_EN (1 << 1)
+#define DA732X_CP_CLK_EN (1 << 2)
+#define DA732X_CP_CLK_DIS (0 << 2)
+
+/* DA732X_REG_CLK_EN3 (addr=0x5D) */
+#define DA732X_ADCA_BB_CLK_EN (1 << 0)
+#define DA732X_ADCC_BB_CLK_EN (1 << 4)
+
+/* DA732X_REG_CLK_EN4 (addr=0x5E) */
+#define DA732X_DACA_BB_CLK_EN (1 << 0)
+#define DA732X_DACC_BB_CLK_EN (1 << 4)
+#define DA732X_DACA_BB_CLK_SHIFT 0
+#define DA732X_DACC_BB_CLK_SHIFT 4
+
+/* DA732X_REG_CLK_EN5 (addr=0x5F) */
+#define DA732X_DACE_BB_CLK_EN (1 << 0)
+#define DA732X_DACE_BB_CLK_SHIFT 0
+
+/* DA732X_REG_AIF_MCLK (addr=0x60) */
+#define DA732X_AIFM_FRAME_64 (1 << 2)
+#define DA732X_AIFM_SRC_SEL_AIFA (1 << 6)
+#define DA732X_CLK_GENERATION_AIF_A (1 << 4)
+#define DA732X_NO_CLK_GENERATION 0x0
+
+/* DA732X_REG_AIFA1 (addr=0x61) */
+#define DA732X_AIF_WORD_MASK (0x3 << 0)
+#define DA732X_AIF_WORD_16 (0 << 0)
+#define DA732X_AIF_WORD_20 (1 << 0)
+#define DA732X_AIF_WORD_24 (2 << 0)
+#define DA732X_AIF_WORD_32 (3 << 0)
+#define DA732X_AIF_TDM_MONO_SHIFT (1 << 6)
+#define DA732X_AIF1_CLK_MASK (1 << 7)
+#define DA732X_AIF_SLAVE (0 << 7)
+#define DA732X_AIF_CLK_FROM_SRC (1 << 7)
+
+/* DA732X_REG_AIFA3 (addr=0x63) */
+#define DA732X_AIF_MODE_SHIFT 0
+#define DA732X_AIF_MODE_MASK 0x3
+#define DA732X_AIF_I2S_MODE (0 << 0)
+#define DA732X_AIF_LEFT_J_MODE (1 << 0)
+#define DA732X_AIF_RIGHT_J_MODE (2 << 0)
+#define DA732X_AIF_DSP_MODE (3 << 0)
+#define DA732X_AIF_WCLK_INV (1 << 4)
+#define DA732X_AIF_BCLK_INV (1 << 5)
+#define DA732X_AIF_EN (1 << 7)
+#define DA732X_AIF_EN_SHIFT 7
+
+/* DA732X_REG_PC_CTRL (addr=0x6a) */
+#define DA732X_PC_PULSE_AIFA (0 << 0)
+#define DA732X_PC_PULSE_AIFB (1 << 0)
+#define DA732X_PC_RESYNC_AUT (1 << 6)
+#define DA732X_PC_RESYNC_NOT_AUT (0 << 6)
+#define DA732X_PC_SAME (1 << 7)
+
+/* DA732X_REG_DATA_ROUTE (addr=0x70) */
+#define DA732X_ADC1_TO_AIFA (0 << 0)
+#define DA732X_DSP_TO_AIFA (1 << 0)
+#define DA732X_ADC2_TO_AIFB (0 << 1)
+#define DA732X_DSP_TO_AIFB (1 << 1)
+#define DA732X_AIFA_TO_DAC1L (0 << 2)
+#define DA732X_DSP_TO_DAC1L (1 << 2)
+#define DA732X_AIFA_TO_DAC1R (0 << 3)
+#define DA732X_DSP_TO_DAC1R (1 << 3)
+#define DA732X_AIFB_TO_DAC2L (0 << 4)
+#define DA732X_DSP_TO_DAC2L (1 << 4)
+#define DA732X_AIFB_TO_DAC2R (0 << 5)
+#define DA732X_DSP_TO_DAC2R (1 << 5)
+#define DA732X_AIFB_TO_DAC3 (0 << 6)
+#define DA732X_DSP_TO_DAC3 (1 << 6)
+#define DA732X_BYPASS_DSP (0 << 0)
+#define DA732X_ALL_TO_DSP (0x7F << 0)
+
+/* DA732X_REG_DSP_CTRL (addr=0x71) */
+#define DA732X_DIGITAL_EN (1 << 0)
+#define DA732X_DIGITAL_RESET (0 << 0)
+#define DA732X_DSP_CORE_EN (1 << 1)
+#define DA732X_DSP_CORE_RESET (0 << 1)
+
+/* DA732X_REG_SPARE1_OUT (addr=0x7D)*/
+#define DA732X_HP_DRIVER_EN (1 << 0)
+#define DA732X_HP_GATE_LOW (1 << 2)
+#define DA732X_HP_LOOP_GAIN_CTRL (1 << 3)
+
+/* DA732X_REG_ID (addr=0x81)*/
+#define DA732X_ID_MINOR_MASK (0xF << 0)
+#define DA732X_ID_MAJOR_MASK (0xF << 4)
+
+/* DA732X_REG_ADC1/2_PD (addr=0x90/0x98) */
+#define DA732X_ADC_RST_MASK (0x3 << 0)
+#define DA732X_ADC_PD_MASK (0x3 << 2)
+#define DA732X_ADC_SET_ACT (0x3 << 0)
+#define DA732X_ADC_SET_RST (0x0 << 0)
+#define DA732X_ADC_ON (0x3 << 2)
+#define DA732X_ADC_OFF (0x0 << 2)
+
+/* DA732X_REG_ADC1/2_SEL (addr=0x94/0x9C) */
+#define DA732X_ADC_VOL_VAL_MASK 0x7
+#define DA732X_ADCL_VOL_SHIFT 0
+#define DA732X_ADCR_VOL_SHIFT 4
+#define DA732X_ADCL_EN_SHIFT 2
+#define DA732X_ADCR_EN_SHIFT 3
+#define DA732X_ADCL_EN (1 << 2)
+#define DA732X_ADCR_EN (1 << 3)
+#define DA732X_ADC_VOL_VAL_MAX DA732X_ADC_VOL_VAL_MASK
+
+/*
+ * DA732X_REG_ADC1/2_HPF (addr=0x93/0x9b)
+ * DA732x_REG_DAC1/2/3_HPG (addr=0xA5/0xB5/0xC5)
+ */
+#define DA732X_HPF_MUSIC_EN (1 << 3)
+#define DA732X_HPF_VOICE_EN ((1 << 3) | (1 << 7))
+#define DA732X_HPF_MASK ((1 << 3) | (1 << 7))
+#define DA732X_HPF_DIS ((0 << 3) | (0 << 7))
+
+/* DA732X_REG_DAC1/2/3_VOL */
+#define DA732X_DAC_VOL_VAL_MASK 0x7F
+#define DA732X_DAC_VOL_SHIFT 0
+#define DA732X_DAC_VOL_VAL_MAX DA732X_DAC_VOL_VAL_MASK
+
+/* DA732X_REG_DAC1/2/3_SEL (addr=0xA3/0xB3/0xC3) */
+#define DA732X_DACL_EN_SHIFT 3
+#define DA732X_DACR_EN_SHIFT 7
+#define DA732X_DACL_MUTE_SHIFT 2
+#define DA732X_DACR_MUTE_SHIFT 6
+#define DA732X_DACL_EN (1 << 3)
+#define DA732X_DACR_EN (1 << 7)
+#define DA732X_DACL_SDM (1 << 0)
+#define DA732X_DACR_SDM (1 << 4)
+#define DA732X_DACL_MUTE (1 << 2)
+#define DA732X_DACR_MUTE (1 << 6)
+
+/* DA732X_REG_DAC_SOFTMUTE (addr=0xA4/0xB4/0xC4) */
+#define DA732X_SOFTMUTE_EN (1 << 7)
+#define DA732X_GAIN_RAMPED (1 << 6)
+#define DA732X_16_SAMPLES (4 << 0)
+#define DA732X_SOFTMUTE_MASK (1 << 7)
+#define DA732X_SOFTMUTE_SHIFT 7
+
+/*
+ * DA732x_REG_ADC1/2_EQ12 (addr=0x95/0x9D)
+ * DA732x_REG_ADC1/2_EQ34 (addr=0x96/0x9E)
+ * DA732x_REG_ADC1/2_EQ5 (addr=0x97/0x9F)
+ * DA732x_REG_DAC1/2/3_EQ12 (addr=0xA5/0xB5/0xC5)
+ * DA732x_REG_DAC1/2/3_EQ34 (addr=0xA6/0xB6/0xC6)
+ * DA732x_REG_DAC1/2/3_EQ5 (addr=0xA7/0xB7/0xB7)
+ */
+#define DA732X_EQ_VOL_VAL_MASK 0xF
+#define DA732X_EQ_BAND1_SHIFT 0
+#define DA732X_EQ_BAND2_SHIFT 4
+#define DA732X_EQ_BAND3_SHIFT 0
+#define DA732X_EQ_BAND4_SHIFT 4
+#define DA732X_EQ_BAND5_SHIFT 0
+#define DA732X_EQ_OVERALL_SHIFT 4
+#define DA732X_EQ_OVERALL_VOL_VAL_MASK 0x3
+#define DA732X_EQ_DIS (0 << 7)
+#define DA732X_EQ_EN (1 << 7)
+#define DA732X_EQ_EN_SHIFT 7
+#define DA732X_EQ_VOL_VAL_MAX DA732X_EQ_VOL_VAL_MASK
+#define DA732X_EQ_OVERALL_VOL_VAL_MAX DA732X_EQ_OVERALL_VOL_VAL_MASK
+
+/* DA732X_REG_DMA_CMD (addr=0xD3) */
+#define DA732X_SEL_DSP_DMA_MASK (3 << 0)
+#define DA732X_SEL_DSP_DMA_DIS (0 << 0)
+#define DA732X_SEL_DSP_DMA_PMEM (1 << 0)
+#define DA732X_SEL_DSP_DMA_XMEM (2 << 0)
+#define DA732X_SEL_DSP_DMA_YMEM (3 << 0)
+#define DA732X_DSP_RW_MASK (1 << 4)
+#define DA732X_DSP_DMA_WRITE (0 << 4)
+#define DA732X_DSP_DMA_READ (1 << 4)
+
+/* DA732X_REG_DMA_STATUS (addr=0xDA) */
+#define DA732X_DSP_DMA_FREE (0 << 0)
+#define DA732X_DSP_DMA_BUSY (1 << 0)
+
+#endif /* __DA732X_REG_H_ */
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
new file mode 100644
index 000000000000..5d8f39e32978
--- /dev/null
+++ b/sound/soc/codecs/isabelle.c
@@ -0,0 +1,1176 @@
+/*
+ * isabelle.c - Low power high fidelity audio codec driver
+ *
+ * Copyright (c) 2012 Texas Instruments, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ *
+ * Initially based on sound/soc/codecs/twl6040.c
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <asm/div64.h>
+#include "isabelle.h"
+
+
+/* Register default values for ISABELLE driver. */
+static struct reg_default isabelle_reg_defs[] = {
+ { 0, 0x00 },
+ { 1, 0x00 },
+ { 2, 0x00 },
+ { 3, 0x00 },
+ { 4, 0x00 },
+ { 5, 0x00 },
+ { 6, 0x00 },
+ { 7, 0x00 },
+ { 8, 0x00 },
+ { 9, 0x00 },
+ { 10, 0x00 },
+ { 11, 0x00 },
+ { 12, 0x00 },
+ { 13, 0x00 },
+ { 14, 0x00 },
+ { 15, 0x00 },
+ { 16, 0x00 },
+ { 17, 0x00 },
+ { 18, 0x00 },
+ { 19, 0x00 },
+ { 20, 0x00 },
+ { 21, 0x02 },
+ { 22, 0x02 },
+ { 23, 0x02 },
+ { 24, 0x02 },
+ { 25, 0x0F },
+ { 26, 0x8F },
+ { 27, 0x0F },
+ { 28, 0x8F },
+ { 29, 0x00 },
+ { 30, 0x00 },
+ { 31, 0x00 },
+ { 32, 0x00 },
+ { 33, 0x00 },
+ { 34, 0x00 },
+ { 35, 0x00 },
+ { 36, 0x00 },
+ { 37, 0x00 },
+ { 38, 0x00 },
+ { 39, 0x00 },
+ { 40, 0x00 },
+ { 41, 0x00 },
+ { 42, 0x00 },
+ { 43, 0x00 },
+ { 44, 0x00 },
+ { 45, 0x00 },
+ { 46, 0x00 },
+ { 47, 0x00 },
+ { 48, 0x00 },
+ { 49, 0x00 },
+ { 50, 0x00 },
+ { 51, 0x00 },
+ { 52, 0x00 },
+ { 53, 0x00 },
+ { 54, 0x00 },
+ { 55, 0x00 },
+ { 56, 0x00 },
+ { 57, 0x00 },
+ { 58, 0x00 },
+ { 59, 0x00 },
+ { 60, 0x00 },
+ { 61, 0x00 },
+ { 62, 0x00 },
+ { 63, 0x00 },
+ { 64, 0x00 },
+ { 65, 0x00 },
+ { 66, 0x00 },
+ { 67, 0x00 },
+ { 68, 0x00 },
+ { 69, 0x90 },
+ { 70, 0x90 },
+ { 71, 0x90 },
+ { 72, 0x00 },
+ { 73, 0x00 },
+ { 74, 0x00 },
+ { 75, 0x00 },
+ { 76, 0x00 },
+ { 77, 0x00 },
+ { 78, 0x00 },
+ { 79, 0x00 },
+ { 80, 0x00 },
+ { 81, 0x00 },
+ { 82, 0x00 },
+ { 83, 0x00 },
+ { 84, 0x00 },
+ { 85, 0x07 },
+ { 86, 0x00 },
+ { 87, 0x00 },
+ { 88, 0x00 },
+ { 89, 0x07 },
+ { 90, 0x80 },
+ { 91, 0x07 },
+ { 92, 0x07 },
+ { 93, 0x00 },
+ { 94, 0x00 },
+ { 95, 0x00 },
+ { 96, 0x00 },
+ { 97, 0x00 },
+ { 98, 0x00 },
+ { 99, 0x00 },
+};
+
+static const char *isabelle_rx1_texts[] = {"VRX1", "ARX1"};
+static const char *isabelle_rx2_texts[] = {"VRX2", "ARX2"};
+
+static const struct soc_enum isabelle_rx1_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3, 1, isabelle_rx1_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5, 1, isabelle_rx1_texts),
+};
+
+static const struct soc_enum isabelle_rx2_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2, 1, isabelle_rx2_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4, 1, isabelle_rx2_texts),
+};
+
+/* Headset DAC playback switches */
+static const struct snd_kcontrol_new rx1_mux_controls =
+ SOC_DAPM_ENUM("Route", isabelle_rx1_enum);
+
+static const struct snd_kcontrol_new rx2_mux_controls =
+ SOC_DAPM_ENUM("Route", isabelle_rx2_enum);
+
+/* TX input selection */
+static const char *isabelle_atx_texts[] = {"AMIC1", "DMIC"};
+static const char *isabelle_vtx_texts[] = {"AMIC2", "DMIC"};
+
+static const struct soc_enum isabelle_atx_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7, 1, isabelle_atx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_atx_texts),
+};
+
+static const struct soc_enum isabelle_vtx_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6, 1, isabelle_vtx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_vtx_texts),
+};
+
+static const struct snd_kcontrol_new atx_mux_controls =
+ SOC_DAPM_ENUM("Route", isabelle_atx_enum);
+
+static const struct snd_kcontrol_new vtx_mux_controls =
+ SOC_DAPM_ENUM("Route", isabelle_vtx_enum);
+
+/* Left analog microphone selection */
+static const char *isabelle_amic1_texts[] = {
+ "Main Mic", "Headset Mic", "Aux/FM Left"};
+
+/* Left analog microphone selection */
+static const char *isabelle_amic2_texts[] = {"Sub Mic", "Aux/FM Right"};
+
+static const struct soc_enum isabelle_amic1_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 5,
+ ARRAY_SIZE(isabelle_amic1_texts),
+ isabelle_amic1_texts),
+};
+
+static const struct soc_enum isabelle_amic2_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 4,
+ ARRAY_SIZE(isabelle_amic2_texts),
+ isabelle_amic2_texts),
+};
+
+static const struct snd_kcontrol_new amic1_control =
+ SOC_DAPM_ENUM("Route", isabelle_amic1_enum);
+
+static const struct snd_kcontrol_new amic2_control =
+ SOC_DAPM_ENUM("Route", isabelle_amic2_enum);
+
+static const char *isabelle_st_audio_texts[] = {"ATX1", "ATX2"};
+
+static const char *isabelle_st_voice_texts[] = {"VTX1", "VTX2"};
+
+static const struct soc_enum isabelle_st_audio_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7, 1,
+ isabelle_st_audio_texts),
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7, 1,
+ isabelle_st_audio_texts),
+};
+
+static const struct soc_enum isabelle_st_voice_enum[] = {
+ SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7, 1,
+ isabelle_st_voice_texts),
+ SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7, 1,
+ isabelle_st_voice_texts),
+};
+
+static const struct snd_kcontrol_new st_audio_control =
+ SOC_DAPM_ENUM("Route", isabelle_st_audio_enum);
+
+static const struct snd_kcontrol_new st_voice_control =
+ SOC_DAPM_ENUM("Route", isabelle_st_voice_enum);
+
+/* Mixer controls */
+static const struct snd_kcontrol_new isabelle_hs_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC1L Playback Switch", ISABELLE_HSDRV_CFG1_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("APGA1 Playback Switch", ISABELLE_HSDRV_CFG1_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_hs_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC1R Playback Switch", ISABELLE_HSDRV_CFG1_REG, 5, 1, 0),
+SOC_DAPM_SINGLE("APGA2 Playback Switch", ISABELLE_HSDRV_CFG1_REG, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_hf_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC2L Playback Switch", ISABELLE_HFLPGA_CFG_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("APGA1 Playback Switch", ISABELLE_HFLPGA_CFG_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_hf_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC2R Playback Switch", ISABELLE_HFRPGA_CFG_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("APGA2 Playback Switch", ISABELLE_HFRPGA_CFG_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_ep_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC2L Playback Switch", ISABELLE_EARDRV_CFG1_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("APGA1 Playback Switch", ISABELLE_EARDRV_CFG1_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_aux_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC3L Playback Switch", ISABELLE_LINEAMP_CFG_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("APGA1 Playback Switch", ISABELLE_LINEAMP_CFG_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_aux_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("DAC3R Playback Switch", ISABELLE_LINEAMP_CFG_REG, 5, 1, 0),
+SOC_DAPM_SINGLE("APGA2 Playback Switch", ISABELLE_LINEAMP_CFG_REG, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga1_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("RX1 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("RX3 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 6, 1, 0),
+SOC_DAPM_SINGLE("RX5 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga1_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("RX2 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 3, 1, 0),
+SOC_DAPM_SINGLE("RX4 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 2, 1, 0),
+SOC_DAPM_SINGLE("RX6 Playback Switch", ISABELLE_DPGA1LR_IN_SEL_REG, 1, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga2_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("RX1 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("RX2 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 6, 1, 0),
+SOC_DAPM_SINGLE("RX3 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 5, 1, 0),
+SOC_DAPM_SINGLE("RX4 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 4, 1, 0),
+SOC_DAPM_SINGLE("RX5 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 3, 1, 0),
+SOC_DAPM_SINGLE("RX6 Playback Switch", ISABELLE_DPGA2L_IN_SEL_REG, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga2_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("USNC Playback Switch", ISABELLE_DPGA2R_IN_SEL_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("RX2 Playback Switch", ISABELLE_DPGA2R_IN_SEL_REG, 3, 1, 0),
+SOC_DAPM_SINGLE("RX4 Playback Switch", ISABELLE_DPGA2R_IN_SEL_REG, 2, 1, 0),
+SOC_DAPM_SINGLE("RX6 Playback Switch", ISABELLE_DPGA2R_IN_SEL_REG, 1, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga3_left_mixer_controls[] = {
+SOC_DAPM_SINGLE("RX1 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("RX3 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 6, 1, 0),
+SOC_DAPM_SINGLE("RX5 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_dpga3_right_mixer_controls[] = {
+SOC_DAPM_SINGLE("RX2 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 3, 1, 0),
+SOC_DAPM_SINGLE("RX4 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 2, 1, 0),
+SOC_DAPM_SINGLE("RX6 Playback Switch", ISABELLE_DPGA3LR_IN_SEL_REG, 1, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx1_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST1 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("DL1 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx2_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST2 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 5, 1, 0),
+SOC_DAPM_SINGLE("DL2 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx3_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST1 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 3, 1, 0),
+SOC_DAPM_SINGLE("DL3 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 2, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx4_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST2 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 1, 1, 0),
+SOC_DAPM_SINGLE("DL4 Playback Switch", ISABELLE_RX_INPUT_CFG_REG, 0, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx5_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST1 Playback Switch", ISABELLE_RX_INPUT_CFG2_REG, 7, 1, 0),
+SOC_DAPM_SINGLE("DL5 Playback Switch", ISABELLE_RX_INPUT_CFG2_REG, 6, 1, 0),
+};
+
+static const struct snd_kcontrol_new isabelle_rx6_mixer_controls[] = {
+SOC_DAPM_SINGLE("ST2 Playback Switch", ISABELLE_RX_INPUT_CFG2_REG, 5, 1, 0),
+SOC_DAPM_SINGLE("DL6 Playback Switch", ISABELLE_RX_INPUT_CFG2_REG, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new ep_path_enable_control =
+ SOC_DAPM_SINGLE("Switch", ISABELLE_EARDRV_CFG2_REG, 0, 1, 0);
+
+/* TLV Declarations */
+static const DECLARE_TLV_DB_SCALE(mic_amp_tlv, 0, 100, 0);
+static const DECLARE_TLV_DB_SCALE(afm_amp_tlv, -3300, 300, 0);
+static const DECLARE_TLV_DB_SCALE(dac_tlv, -1200, 200, 0);
+static const DECLARE_TLV_DB_SCALE(hf_tlv, -5000, 200, 0);
+
+/* from -63 to 0 dB in 1 dB steps */
+static const DECLARE_TLV_DB_SCALE(dpga_tlv, -6300, 100, 1);
+
+/* from -63 to 9 dB in 1 dB steps */
+static const DECLARE_TLV_DB_SCALE(rx_tlv, -6300, 100, 1);
+
+static const DECLARE_TLV_DB_SCALE(st_tlv, -2700, 300, 1);
+static const DECLARE_TLV_DB_SCALE(tx_tlv, -600, 100, 0);
+
+static const struct snd_kcontrol_new isabelle_snd_controls[] = {
+ SOC_DOUBLE_TLV("Headset Playback Volume", ISABELLE_HSDRV_GAIN_REG,
+ 4, 0, 0xF, 0, dac_tlv),
+ SOC_DOUBLE_R_TLV("Handsfree Playback Volume",
+ ISABELLE_HFLPGA_CFG_REG, ISABELLE_HFRPGA_CFG_REG,
+ 0, 0x1F, 0, hf_tlv),
+ SOC_DOUBLE_TLV("Aux Playback Volume", ISABELLE_LINEAMP_GAIN_REG,
+ 4, 0, 0xF, 0, dac_tlv),
+ SOC_SINGLE_TLV("Earpiece Playback Volume", ISABELLE_EARDRV_CFG1_REG,
+ 0, 0xF, 0, dac_tlv),
+
+ SOC_DOUBLE_TLV("Aux FM Volume", ISABELLE_APGA_GAIN_REG, 4, 0, 0xF, 0,
+ afm_amp_tlv),
+ SOC_SINGLE_TLV("Mic1 Capture Volume", ISABELLE_MIC1_GAIN_REG, 3, 0x1F,
+ 0, mic_amp_tlv),
+ SOC_SINGLE_TLV("Mic2 Capture Volume", ISABELLE_MIC2_GAIN_REG, 3, 0x1F,
+ 0, mic_amp_tlv),
+
+ SOC_DOUBLE_R_TLV("DPGA1 Volume", ISABELLE_DPGA1L_GAIN_REG,
+ ISABELLE_DPGA1R_GAIN_REG, 0, 0x3F, 0, dpga_tlv),
+ SOC_DOUBLE_R_TLV("DPGA2 Volume", ISABELLE_DPGA2L_GAIN_REG,
+ ISABELLE_DPGA2R_GAIN_REG, 0, 0x3F, 0, dpga_tlv),
+ SOC_DOUBLE_R_TLV("DPGA3 Volume", ISABELLE_DPGA3L_GAIN_REG,
+ ISABELLE_DPGA3R_GAIN_REG, 0, 0x3F, 0, dpga_tlv),
+
+ SOC_SINGLE_TLV("Sidetone Audio TX1 Volume",
+ ISABELLE_ATX_STPGA1_CFG_REG, 0, 0xF, 0, st_tlv),
+ SOC_SINGLE_TLV("Sidetone Audio TX2 Volume",
+ ISABELLE_ATX_STPGA2_CFG_REG, 0, 0xF, 0, st_tlv),
+ SOC_SINGLE_TLV("Sidetone Voice TX1 Volume",
+ ISABELLE_VTX_STPGA1_CFG_REG, 0, 0xF, 0, st_tlv),
+ SOC_SINGLE_TLV("Sidetone Voice TX2 Volume",
+ ISABELLE_VTX2_STPGA2_CFG_REG, 0, 0xF, 0, st_tlv),
+
+ SOC_SINGLE_TLV("Audio TX1 Volume", ISABELLE_ATX1_DPGA_REG, 4, 0xF, 0,
+ tx_tlv),
+ SOC_SINGLE_TLV("Audio TX2 Volume", ISABELLE_ATX2_DPGA_REG, 4, 0xF, 0,
+ tx_tlv),
+ SOC_SINGLE_TLV("Voice TX1 Volume", ISABELLE_VTX1_DPGA_REG, 4, 0xF, 0,
+ tx_tlv),
+ SOC_SINGLE_TLV("Voice TX2 Volume", ISABELLE_VTX2_DPGA_REG, 4, 0xF, 0,
+ tx_tlv),
+
+ SOC_SINGLE_TLV("RX1 DPGA Volume", ISABELLE_RX1_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+ SOC_SINGLE_TLV("RX2 DPGA Volume", ISABELLE_RX2_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+ SOC_SINGLE_TLV("RX3 DPGA Volume", ISABELLE_RX3_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+ SOC_SINGLE_TLV("RX4 DPGA Volume", ISABELLE_RX4_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+ SOC_SINGLE_TLV("RX5 DPGA Volume", ISABELLE_RX5_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+ SOC_SINGLE_TLV("RX6 DPGA Volume", ISABELLE_RX6_DPGA_REG, 0, 0x3F, 0,
+ rx_tlv),
+
+ SOC_SINGLE("Headset Noise Gate", ISABELLE_HS_NG_CFG1_REG, 7, 1, 0),
+ SOC_SINGLE("Handsfree Noise Gate", ISABELLE_HF_NG_CFG1_REG, 7, 1, 0),
+
+ SOC_SINGLE("ATX1 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 7, 1, 0),
+ SOC_SINGLE("ATX2 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 6, 1, 0),
+ SOC_SINGLE("ARX1 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 5, 1, 0),
+ SOC_SINGLE("ARX2 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 4, 1, 0),
+ SOC_SINGLE("ARX3 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 3, 1, 0),
+ SOC_SINGLE("ARX4 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 2, 1, 0),
+ SOC_SINGLE("ARX5 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 1, 1, 0),
+ SOC_SINGLE("ARX6 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 0, 1, 0),
+ SOC_SINGLE("VRX1 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 3, 1, 0),
+ SOC_SINGLE("VRX2 Filter Bypass Switch", ISABELLE_AUDIO_HPF_CFG_REG,
+ 2, 1, 0),
+
+ SOC_SINGLE("ATX1 Filter Enable Switch", ISABELLE_ALU_TX_EN_REG,
+ 7, 1, 0),
+ SOC_SINGLE("ATX2 Filter Enable Switch", ISABELLE_ALU_TX_EN_REG,
+ 6, 1, 0),
+ SOC_SINGLE("VTX1 Filter Enable Switch", ISABELLE_ALU_TX_EN_REG,
+ 5, 1, 0),
+ SOC_SINGLE("VTX2 Filter Enable Switch", ISABELLE_ALU_TX_EN_REG,
+ 4, 1, 0),
+ SOC_SINGLE("RX1 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 5, 1, 0),
+ SOC_SINGLE("RX2 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 4, 1, 0),
+ SOC_SINGLE("RX3 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 3, 1, 0),
+ SOC_SINGLE("RX4 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 2, 1, 0),
+ SOC_SINGLE("RX5 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 1, 1, 0),
+ SOC_SINGLE("RX6 Filter Enable Switch", ISABELLE_ALU_RX_EN_REG,
+ 0, 1, 0),
+
+ SOC_SINGLE("ULATX12 Capture Switch", ISABELLE_ULATX12_INTF_CFG_REG,
+ 7, 1, 0),
+
+ SOC_SINGLE("DL12 Playback Switch", ISABELLE_DL12_INTF_CFG_REG,
+ 7, 1, 0),
+ SOC_SINGLE("DL34 Playback Switch", ISABELLE_DL34_INTF_CFG_REG,
+ 7, 1, 0),
+ SOC_SINGLE("DL56 Playback Switch", ISABELLE_DL56_INTF_CFG_REG,
+ 7, 1, 0),
+
+ /* DMIC Switch */
+ SOC_SINGLE("DMIC Switch", ISABELLE_DMIC_CFG_REG, 0, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget isabelle_dapm_widgets[] = {
+ /* Inputs */
+ SND_SOC_DAPM_INPUT("MAINMIC"),
+ SND_SOC_DAPM_INPUT("HSMIC"),
+ SND_SOC_DAPM_INPUT("SUBMIC"),
+ SND_SOC_DAPM_INPUT("LINEIN1"),
+ SND_SOC_DAPM_INPUT("LINEIN2"),
+ SND_SOC_DAPM_INPUT("DMICDAT"),
+
+ /* Outputs */
+ SND_SOC_DAPM_OUTPUT("HSOL"),
+ SND_SOC_DAPM_OUTPUT("HSOR"),
+ SND_SOC_DAPM_OUTPUT("HFL"),
+ SND_SOC_DAPM_OUTPUT("HFR"),
+ SND_SOC_DAPM_OUTPUT("EP"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT1"),
+ SND_SOC_DAPM_OUTPUT("LINEOUT2"),
+
+ SND_SOC_DAPM_PGA("DL1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DL2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DL3", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DL4", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DL5", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DL6", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Analog input muxes for the capture amplifiers */
+ SND_SOC_DAPM_MUX("Analog Left Capture Route",
+ SND_SOC_NOPM, 0, 0, &amic1_control),
+ SND_SOC_DAPM_MUX("Analog Right Capture Route",
+ SND_SOC_NOPM, 0, 0, &amic2_control),
+
+ SND_SOC_DAPM_MUX("Sidetone Audio Playback", SND_SOC_NOPM, 0, 0,
+ &st_audio_control),
+ SND_SOC_DAPM_MUX("Sidetone Voice Playback", SND_SOC_NOPM, 0, 0,
+ &st_voice_control),
+
+ /* AIF */
+ SND_SOC_DAPM_AIF_IN("INTF1_SDI", NULL, 0, ISABELLE_INTF_EN_REG, 7, 0),
+ SND_SOC_DAPM_AIF_IN("INTF2_SDI", NULL, 0, ISABELLE_INTF_EN_REG, 6, 0),
+
+ SND_SOC_DAPM_AIF_OUT("INTF1_SDO", NULL, 0, ISABELLE_INTF_EN_REG, 5, 0),
+ SND_SOC_DAPM_AIF_OUT("INTF2_SDO", NULL, 0, ISABELLE_INTF_EN_REG, 4, 0),
+
+ SND_SOC_DAPM_OUT_DRV("ULATX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("ULATX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("ULVTX1", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("ULVTX2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Analog Capture PGAs */
+ SND_SOC_DAPM_PGA("MicAmp1", ISABELLE_AMIC_CFG_REG, 5, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("MicAmp2", ISABELLE_AMIC_CFG_REG, 4, 0, NULL, 0),
+
+ /* Auxiliary FM PGAs */
+ SND_SOC_DAPM_PGA("APGA1", ISABELLE_APGA_CFG_REG, 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("APGA2", ISABELLE_APGA_CFG_REG, 6, 0, NULL, 0),
+
+ /* ADCs */
+ SND_SOC_DAPM_ADC("ADC1", "Left Front Capture",
+ ISABELLE_AMIC_CFG_REG, 7, 0),
+ SND_SOC_DAPM_ADC("ADC2", "Right Front Capture",
+ ISABELLE_AMIC_CFG_REG, 6, 0),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("Headset Mic Bias", ISABELLE_ABIAS_CFG_REG,
+ 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Main Mic Bias", ISABELLE_ABIAS_CFG_REG,
+ 2, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Digital Mic1 Bias",
+ ISABELLE_DBIAS_CFG_REG, 3, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("Digital Mic2 Bias",
+ ISABELLE_DBIAS_CFG_REG, 2, 0, NULL, 0),
+
+ /* Mixers */
+ SND_SOC_DAPM_MIXER("Headset Left Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_hs_left_mixer_controls,
+ ARRAY_SIZE(isabelle_hs_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Headset Right Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_hs_right_mixer_controls,
+ ARRAY_SIZE(isabelle_hs_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Handsfree Left Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_hf_left_mixer_controls,
+ ARRAY_SIZE(isabelle_hf_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Handsfree Right Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_hf_right_mixer_controls,
+ ARRAY_SIZE(isabelle_hf_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LINEOUT1 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_aux_left_mixer_controls,
+ ARRAY_SIZE(isabelle_aux_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("LINEOUT2 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_aux_right_mixer_controls,
+ ARRAY_SIZE(isabelle_aux_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Earphone Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_ep_mixer_controls,
+ ARRAY_SIZE(isabelle_ep_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("DPGA1L Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga1_left_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga1_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DPGA1R Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga1_right_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga1_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DPGA2L Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga2_left_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga2_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DPGA2R Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga2_right_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga2_right_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DPGA3L Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga3_left_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga3_left_mixer_controls)),
+ SND_SOC_DAPM_MIXER("DPGA3R Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_dpga3_right_mixer_controls,
+ ARRAY_SIZE(isabelle_dpga3_right_mixer_controls)),
+
+ SND_SOC_DAPM_MIXER("RX1 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx1_mixer_controls,
+ ARRAY_SIZE(isabelle_rx1_mixer_controls)),
+ SND_SOC_DAPM_MIXER("RX2 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx2_mixer_controls,
+ ARRAY_SIZE(isabelle_rx2_mixer_controls)),
+ SND_SOC_DAPM_MIXER("RX3 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx3_mixer_controls,
+ ARRAY_SIZE(isabelle_rx3_mixer_controls)),
+ SND_SOC_DAPM_MIXER("RX4 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx4_mixer_controls,
+ ARRAY_SIZE(isabelle_rx4_mixer_controls)),
+ SND_SOC_DAPM_MIXER("RX5 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx5_mixer_controls,
+ ARRAY_SIZE(isabelle_rx5_mixer_controls)),
+ SND_SOC_DAPM_MIXER("RX6 Mixer", SND_SOC_NOPM, 0, 0,
+ isabelle_rx6_mixer_controls,
+ ARRAY_SIZE(isabelle_rx6_mixer_controls)),
+
+ /* DACs */
+ SND_SOC_DAPM_DAC("DAC1L", "Headset Playback", ISABELLE_DAC_CFG_REG,
+ 5, 0),
+ SND_SOC_DAPM_DAC("DAC1R", "Headset Playback", ISABELLE_DAC_CFG_REG,
+ 4, 0),
+ SND_SOC_DAPM_DAC("DAC2L", "Handsfree Playback", ISABELLE_DAC_CFG_REG,
+ 3, 0),
+ SND_SOC_DAPM_DAC("DAC2R", "Handsfree Playback", ISABELLE_DAC_CFG_REG,
+ 2, 0),
+ SND_SOC_DAPM_DAC("DAC3L", "Lineout Playback", ISABELLE_DAC_CFG_REG,
+ 1, 0),
+ SND_SOC_DAPM_DAC("DAC3R", "Lineout Playback", ISABELLE_DAC_CFG_REG,
+ 0, 0),
+
+ /* Analog Playback PGAs */
+ SND_SOC_DAPM_PGA("Sidetone Audio PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Sidetone Voice PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HF Left PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HF Right PGA", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA1L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA1R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA2L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA2R", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA3L", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("DPGA3R", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ /* Analog Playback Mux */
+ SND_SOC_DAPM_MUX("RX1 Playback", ISABELLE_ALU_RX_EN_REG, 5, 0,
+ &rx1_mux_controls),
+ SND_SOC_DAPM_MUX("RX2 Playback", ISABELLE_ALU_RX_EN_REG, 4, 0,
+ &rx2_mux_controls),
+
+ /* TX Select */
+ SND_SOC_DAPM_MUX("ATX Select", ISABELLE_TX_INPUT_CFG_REG,
+ 7, 0, &atx_mux_controls),
+ SND_SOC_DAPM_MUX("VTX Select", ISABELLE_TX_INPUT_CFG_REG,
+ 6, 0, &vtx_mux_controls),
+
+ SND_SOC_DAPM_SWITCH("Earphone Playback", SND_SOC_NOPM, 0, 0,
+ &ep_path_enable_control),
+
+ /* Output Drivers */
+ SND_SOC_DAPM_OUT_DRV("HS Left Driver", ISABELLE_HSDRV_CFG2_REG,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("HS Right Driver", ISABELLE_HSDRV_CFG2_REG,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("LINEOUT1 Left Driver", ISABELLE_LINEAMP_CFG_REG,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("LINEOUT2 Right Driver", ISABELLE_LINEAMP_CFG_REG,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("Earphone Driver", ISABELLE_EARDRV_CFG2_REG,
+ 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUT_DRV("HF Left Driver", ISABELLE_HFDRV_CFG_REG,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_OUT_DRV("HF Right Driver", ISABELLE_HFDRV_CFG_REG,
+ 0, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_route isabelle_intercon[] = {
+ /* Interface mapping */
+ { "DL1", "DL12 Playback Switch", "INTF1_SDI" },
+ { "DL2", "DL12 Playback Switch", "INTF1_SDI" },
+ { "DL3", "DL34 Playback Switch", "INTF1_SDI" },
+ { "DL4", "DL34 Playback Switch", "INTF1_SDI" },
+ { "DL5", "DL56 Playback Switch", "INTF1_SDI" },
+ { "DL6", "DL56 Playback Switch", "INTF1_SDI" },
+
+ { "DL1", "DL12 Playback Switch", "INTF2_SDI" },
+ { "DL2", "DL12 Playback Switch", "INTF2_SDI" },
+ { "DL3", "DL34 Playback Switch", "INTF2_SDI" },
+ { "DL4", "DL34 Playback Switch", "INTF2_SDI" },
+ { "DL5", "DL56 Playback Switch", "INTF2_SDI" },
+ { "DL6", "DL56 Playback Switch", "INTF2_SDI" },
+
+ /* Input side mapping */
+ { "Sidetone Audio PGA", NULL, "Sidetone Audio Playback" },
+ { "Sidetone Voice PGA", NULL, "Sidetone Voice Playback" },
+
+ { "RX1 Mixer", "ST1 Playback Switch", "Sidetone Audio PGA" },
+
+ { "RX1 Mixer", "ST1 Playback Switch", "Sidetone Voice PGA" },
+ { "RX1 Mixer", "DL1 Playback Switch", "DL1" },
+
+ { "RX2 Mixer", "ST2 Playback Switch", "Sidetone Audio PGA" },
+
+ { "RX2 Mixer", "ST2 Playback Switch", "Sidetone Voice PGA" },
+ { "RX2 Mixer", "DL2 Playback Switch", "DL2" },
+
+ { "RX3 Mixer", "ST1 Playback Switch", "Sidetone Voice PGA" },
+ { "RX3 Mixer", "DL3 Playback Switch", "DL3" },
+
+ { "RX4 Mixer", "ST2 Playback Switch", "Sidetone Voice PGA" },
+ { "RX4 Mixer", "DL4 Playback Switch", "DL4" },
+
+ { "RX5 Mixer", "ST1 Playback Switch", "Sidetone Voice PGA" },
+ { "RX5 Mixer", "DL5 Playback Switch", "DL5" },
+
+ { "RX6 Mixer", "ST2 Playback Switch", "Sidetone Voice PGA" },
+ { "RX6 Mixer", "DL6 Playback Switch", "DL6" },
+
+ /* Capture path */
+ { "Analog Left Capture Route", "Headset Mic", "HSMIC" },
+ { "Analog Left Capture Route", "Main Mic", "MAINMIC" },
+ { "Analog Left Capture Route", "Aux/FM Left", "LINEIN1" },
+
+ { "Analog Right Capture Route", "Sub Mic", "SUBMIC" },
+ { "Analog Right Capture Route", "Aux/FM Right", "LINEIN2" },
+
+ { "MicAmp1", NULL, "Analog Left Capture Route" },
+ { "MicAmp2", NULL, "Analog Right Capture Route" },
+
+ { "ADC1", NULL, "MicAmp1" },
+ { "ADC2", NULL, "MicAmp2" },
+
+ { "ATX Select", "AMIC1", "ADC1" },
+ { "ATX Select", "DMIC", "DMICDAT" },
+ { "ATX Select", "AMIC2", "ADC2" },
+
+ { "VTX Select", "AMIC1", "ADC1" },
+ { "VTX Select", "DMIC", "DMICDAT" },
+ { "VTX Select", "AMIC2", "ADC2" },
+
+ { "ULATX1", "ATX1 Filter Enable Switch", "ATX Select" },
+ { "ULATX1", "ATX1 Filter Bypass Switch", "ATX Select" },
+ { "ULATX2", "ATX2 Filter Enable Switch", "ATX Select" },
+ { "ULATX2", "ATX2 Filter Bypass Switch", "ATX Select" },
+
+ { "ULVTX1", "VTX1 Filter Enable Switch", "VTX Select" },
+ { "ULVTX1", "VTX1 Filter Bypass Switch", "VTX Select" },
+ { "ULVTX2", "VTX2 Filter Enable Switch", "VTX Select" },
+ { "ULVTX2", "VTX2 Filter Bypass Switch", "VTX Select" },
+
+ { "INTF1_SDO", "ULATX12 Capture Switch", "ULATX1" },
+ { "INTF1_SDO", "ULATX12 Capture Switch", "ULATX2" },
+ { "INTF2_SDO", "ULATX12 Capture Switch", "ULATX1" },
+ { "INTF2_SDO", "ULATX12 Capture Switch", "ULATX2" },
+
+ { "INTF1_SDO", NULL, "ULVTX1" },
+ { "INTF1_SDO", NULL, "ULVTX2" },
+ { "INTF2_SDO", NULL, "ULVTX1" },
+ { "INTF2_SDO", NULL, "ULVTX2" },
+
+ /* AFM Path */
+ { "APGA1", NULL, "LINEIN1" },
+ { "APGA2", NULL, "LINEIN2" },
+
+ { "RX1 Playback", "VRX1 Filter Bypass Switch", "RX1 Mixer" },
+ { "RX1 Playback", "ARX1 Filter Bypass Switch", "RX1 Mixer" },
+ { "RX1 Playback", "RX1 Filter Enable Switch", "RX1 Mixer" },
+
+ { "RX2 Playback", "VRX2 Filter Bypass Switch", "RX2 Mixer" },
+ { "RX2 Playback", "ARX2 Filter Bypass Switch", "RX2 Mixer" },
+ { "RX2 Playback", "RX2 Filter Enable Switch", "RX2 Mixer" },
+
+ { "RX3 Playback", "ARX3 Filter Bypass Switch", "RX3 Mixer" },
+ { "RX3 Playback", "RX3 Filter Enable Switch", "RX3 Mixer" },
+
+ { "RX4 Playback", "ARX4 Filter Bypass Switch", "RX4 Mixer" },
+ { "RX4 Playback", "RX4 Filter Enable Switch", "RX4 Mixer" },
+
+ { "RX5 Playback", "ARX5 Filter Bypass Switch", "RX5 Mixer" },
+ { "RX5 Playback", "RX5 Filter Enable Switch", "RX5 Mixer" },
+
+ { "RX6 Playback", "ARX6 Filter Bypass Switch", "RX6 Mixer" },
+ { "RX6 Playback", "RX6 Filter Enable Switch", "RX6 Mixer" },
+
+ { "DPGA1L Mixer", "RX1 Playback Switch", "RX1 Playback" },
+ { "DPGA1L Mixer", "RX3 Playback Switch", "RX3 Playback" },
+ { "DPGA1L Mixer", "RX5 Playback Switch", "RX5 Playback" },
+
+ { "DPGA1R Mixer", "RX2 Playback Switch", "RX2 Playback" },
+ { "DPGA1R Mixer", "RX4 Playback Switch", "RX4 Playback" },
+ { "DPGA1R Mixer", "RX6 Playback Switch", "RX6 Playback" },
+
+ { "DPGA1L", NULL, "DPGA1L Mixer" },
+ { "DPGA1R", NULL, "DPGA1R Mixer" },
+
+ { "DAC1L", NULL, "DPGA1L" },
+ { "DAC1R", NULL, "DPGA1R" },
+
+ { "DPGA2L Mixer", "RX1 Playback Switch", "RX1 Playback" },
+ { "DPGA2L Mixer", "RX2 Playback Switch", "RX2 Playback" },
+ { "DPGA2L Mixer", "RX3 Playback Switch", "RX3 Playback" },
+ { "DPGA2L Mixer", "RX4 Playback Switch", "RX4 Playback" },
+ { "DPGA2L Mixer", "RX5 Playback Switch", "RX5 Playback" },
+ { "DPGA2L Mixer", "RX6 Playback Switch", "RX6 Playback" },
+
+ { "DPGA2R Mixer", "RX2 Playback Switch", "RX2 Playback" },
+ { "DPGA2R Mixer", "RX4 Playback Switch", "RX4 Playback" },
+ { "DPGA2R Mixer", "RX6 Playback Switch", "RX6 Playback" },
+
+ { "DPGA2L", NULL, "DPGA2L Mixer" },
+ { "DPGA2R", NULL, "DPGA2R Mixer" },
+
+ { "DAC2L", NULL, "DPGA2L" },
+ { "DAC2R", NULL, "DPGA2R" },
+
+ { "DPGA3L Mixer", "RX1 Playback Switch", "RX1 Playback" },
+ { "DPGA3L Mixer", "RX3 Playback Switch", "RX3 Playback" },
+ { "DPGA3L Mixer", "RX5 Playback Switch", "RX5 Playback" },
+
+ { "DPGA3R Mixer", "RX2 Playback Switch", "RX2 Playback" },
+ { "DPGA3R Mixer", "RX4 Playback Switch", "RX4 Playback" },
+ { "DPGA3R Mixer", "RX6 Playback Switch", "RX6 Playback" },
+
+ { "DPGA3L", NULL, "DPGA3L Mixer" },
+ { "DPGA3R", NULL, "DPGA3R Mixer" },
+
+ { "DAC3L", NULL, "DPGA3L" },
+ { "DAC3R", NULL, "DPGA3R" },
+
+ { "Headset Left Mixer", "DAC1L Playback Switch", "DAC1L" },
+ { "Headset Left Mixer", "APGA1 Playback Switch", "APGA1" },
+
+ { "Headset Right Mixer", "DAC1R Playback Switch", "DAC1R" },
+ { "Headset Right Mixer", "APGA2 Playback Switch", "APGA2" },
+
+ { "HS Left Driver", NULL, "Headset Left Mixer" },
+ { "HS Right Driver", NULL, "Headset Right Mixer" },
+
+ { "HSOL", NULL, "HS Left Driver" },
+ { "HSOR", NULL, "HS Right Driver" },
+
+ /* Earphone playback path */
+ { "Earphone Mixer", "DAC2L Playback Switch", "DAC2L" },
+ { "Earphone Mixer", "APGA1 Playback Switch", "APGA1" },
+
+ { "Earphone Playback", "Switch", "Earphone Mixer" },
+ { "Earphone Driver", NULL, "Earphone Playback" },
+ { "EP", NULL, "Earphone Driver" },
+
+ { "Handsfree Left Mixer", "DAC2L Playback Switch", "DAC2L" },
+ { "Handsfree Left Mixer", "APGA1 Playback Switch", "APGA1" },
+
+ { "Handsfree Right Mixer", "DAC2R Playback Switch", "DAC2R" },
+ { "Handsfree Right Mixer", "APGA2 Playback Switch", "APGA2" },
+
+ { "HF Left PGA", NULL, "Handsfree Left Mixer" },
+ { "HF Right PGA", NULL, "Handsfree Right Mixer" },
+
+ { "HF Left Driver", NULL, "HF Left PGA" },
+ { "HF Right Driver", NULL, "HF Right PGA" },
+
+ { "HFL", NULL, "HF Left Driver" },
+ { "HFR", NULL, "HF Right Driver" },
+
+ { "LINEOUT1 Mixer", "DAC3L Playback Switch", "DAC3L" },
+ { "LINEOUT1 Mixer", "APGA1 Playback Switch", "APGA1" },
+
+ { "LINEOUT2 Mixer", "DAC3R Playback Switch", "DAC3R" },
+ { "LINEOUT2 Mixer", "APGA2 Playback Switch", "APGA2" },
+
+ { "LINEOUT1 Driver", NULL, "LINEOUT1 Mixer" },
+ { "LINEOUT2 Driver", NULL, "LINEOUT2 Mixer" },
+
+ { "LINEOUT1", NULL, "LINEOUT1 Driver" },
+ { "LINEOUT2", NULL, "LINEOUT2 Driver" },
+};
+
+static int isabelle_hs_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec, ISABELLE_DAC1_SOFTRAMP_REG,
+ BIT(4), (mute ? BIT(4) : 0));
+
+ return 0;
+}
+
+static int isabelle_hf_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec, ISABELLE_DAC2_SOFTRAMP_REG,
+ BIT(4), (mute ? BIT(4) : 0));
+
+ return 0;
+}
+
+static int isabelle_line_mute(struct snd_soc_dai *dai, int mute)
+{
+ snd_soc_update_bits(dai->codec, ISABELLE_DAC3_SOFTRAMP_REG,
+ BIT(4), (mute ? BIT(4) : 0));
+
+ return 0;
+}
+
+static int isabelle_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ snd_soc_update_bits(codec, ISABELLE_PWR_EN_REG,
+ ISABELLE_CHIP_EN, BIT(0));
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_update_bits(codec, ISABELLE_PWR_EN_REG,
+ ISABELLE_CHIP_EN, 0);
+ break;
+ }
+
+ codec->dapm.bias_level = level;
+
+ return 0;
+}
+
+static int isabelle_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ u16 aif = 0;
+ unsigned int fs_val = 0;
+
+ switch (params_rate(params)) {
+ case 8000:
+ fs_val = ISABELLE_FS_RATE_8;
+ break;
+ case 11025:
+ fs_val = ISABELLE_FS_RATE_11;
+ break;
+ case 12000:
+ fs_val = ISABELLE_FS_RATE_12;
+ break;
+ case 16000:
+ fs_val = ISABELLE_FS_RATE_16;
+ break;
+ case 22050:
+ fs_val = ISABELLE_FS_RATE_22;
+ break;
+ case 24000:
+ fs_val = ISABELLE_FS_RATE_24;
+ break;
+ case 32000:
+ fs_val = ISABELLE_FS_RATE_32;
+ break;
+ case 44100:
+ fs_val = ISABELLE_FS_RATE_44;
+ break;
+ case 48000:
+ fs_val = ISABELLE_FS_RATE_48;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, ISABELLE_FS_RATE_CFG_REG,
+ ISABELLE_FS_RATE_MASK, fs_val);
+
+ /* bit size */
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ aif |= ISABELLE_AIF_LENGTH_20;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ aif |= ISABELLE_AIF_LENGTH_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, ISABELLE_INTF_CFG_REG,
+ ISABELLE_AIF_LENGTH_MASK, aif);
+
+ return 0;
+}
+
+static int isabelle_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ unsigned int aif_val = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ aif_val &= ~ISABELLE_AIF_MS;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif_val |= ISABELLE_AIF_MS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ aif_val |= ISABELLE_I2S_MODE;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif_val |= ISABELLE_LEFT_J_MODE;
+ break;
+ case SND_SOC_DAIFMT_PDM:
+ aif_val |= ISABELLE_PDM_MODE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, ISABELLE_INTF_CFG_REG,
+ (ISABELLE_AIF_MS | ISABELLE_AIF_FMT_MASK), aif_val);
+
+ return 0;
+}
+
+/* Rates supported by Isabelle driver */
+#define ISABELLE_RATES SNDRV_PCM_RATE_8000_48000
+
+/* Formates supported by Isabelle driver. */
+#define ISABELLE_FORMATS (SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops isabelle_hs_dai_ops = {
+ .hw_params = isabelle_hw_params,
+ .set_fmt = isabelle_set_dai_fmt,
+ .digital_mute = isabelle_hs_mute,
+};
+
+static struct snd_soc_dai_ops isabelle_hf_dai_ops = {
+ .hw_params = isabelle_hw_params,
+ .set_fmt = isabelle_set_dai_fmt,
+ .digital_mute = isabelle_hf_mute,
+};
+
+static struct snd_soc_dai_ops isabelle_line_dai_ops = {
+ .hw_params = isabelle_hw_params,
+ .set_fmt = isabelle_set_dai_fmt,
+ .digital_mute = isabelle_line_mute,
+};
+
+static struct snd_soc_dai_ops isabelle_ul_dai_ops = {
+ .hw_params = isabelle_hw_params,
+ .set_fmt = isabelle_set_dai_fmt,
+};
+
+/* ISABELLE dai structure */
+static struct snd_soc_dai_driver isabelle_dai[] = {
+ {
+ .name = "isabelle-dl1",
+ .playback = {
+ .stream_name = "Headset Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = ISABELLE_RATES,
+ .formats = ISABELLE_FORMATS,
+ },
+ .ops = &isabelle_hs_dai_ops,
+ },
+ {
+ .name = "isabelle-dl2",
+ .playback = {
+ .stream_name = "Handsfree Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = ISABELLE_RATES,
+ .formats = ISABELLE_FORMATS,
+ },
+ .ops = &isabelle_hf_dai_ops,
+ },
+ {
+ .name = "isabelle-lineout",
+ .playback = {
+ .stream_name = "Lineout Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = ISABELLE_RATES,
+ .formats = ISABELLE_FORMATS,
+ },
+ .ops = &isabelle_line_dai_ops,
+ },
+ {
+ .name = "isabelle-ul",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = ISABELLE_RATES,
+ .formats = ISABELLE_FORMATS,
+ },
+ .ops = &isabelle_ul_dai_ops,
+ },
+};
+
+static int isabelle_probe(struct snd_soc_codec *codec)
+{
+ int ret = 0;
+
+ codec->control_data = dev_get_regmap(codec->dev, NULL);
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_isabelle = {
+ .probe = isabelle_probe,
+ .set_bias_level = isabelle_set_bias_level,
+ .controls = isabelle_snd_controls,
+ .num_controls = ARRAY_SIZE(isabelle_snd_controls),
+ .dapm_widgets = isabelle_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(isabelle_dapm_widgets),
+ .dapm_routes = isabelle_intercon,
+ .num_dapm_routes = ARRAY_SIZE(isabelle_intercon),
+ .idle_bias_off = true,
+};
+
+static const struct regmap_config isabelle_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = ISABELLE_MAX_REGISTER,
+ .reg_defaults = isabelle_reg_defs,
+ .num_reg_defaults = ARRAY_SIZE(isabelle_reg_defs),
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int __devinit isabelle_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *isabelle_regmap;
+ int ret = 0;
+
+ isabelle_regmap = devm_regmap_init_i2c(i2c, &isabelle_regmap_config);
+ if (IS_ERR(isabelle_regmap)) {
+ ret = PTR_ERR(isabelle_regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ ret);
+ return ret;
+ }
+ i2c_set_clientdata(i2c, isabelle_regmap);
+
+ ret = snd_soc_register_codec(&i2c->dev,
+ &soc_codec_dev_isabelle, isabelle_dai,
+ ARRAY_SIZE(isabelle_dai));
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __devexit isabelle_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id isabelle_i2c_id[] = {
+ { "isabelle", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, isabelle_i2c_id);
+
+static struct i2c_driver isabelle_i2c_driver = {
+ .driver = {
+ .name = "isabelle",
+ .owner = THIS_MODULE,
+ },
+ .probe = isabelle_i2c_probe,
+ .remove = __devexit_p(isabelle_i2c_remove),
+ .id_table = isabelle_i2c_id,
+};
+
+module_i2c_driver(isabelle_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC ISABELLE driver");
+MODULE_AUTHOR("Vishwas A Deshpande <vishwas.a.deshpande@ti.com>");
+MODULE_AUTHOR("M R Swami Reddy <MR.Swami.Reddy@ti.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/isabelle.h b/sound/soc/codecs/isabelle.h
new file mode 100644
index 000000000000..96d839a8c956
--- /dev/null
+++ b/sound/soc/codecs/isabelle.h
@@ -0,0 +1,143 @@
+/*
+ * isabelle.h - Low power high fidelity audio codec driver header file
+ *
+ * Copyright (c) 2012 Texas Instruments, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ */
+
+#ifndef _ISABELLE_H
+#define _ISABELLE_H
+
+#include <linux/bitops.h>
+
+/* ISABELLE REGISTERS */
+
+#define ISABELLE_PWR_CFG_REG 0x01
+#define ISABELLE_PWR_EN_REG 0x02
+#define ISABELLE_PS_EN1_REG 0x03
+#define ISABELLE_INT1_STATUS_REG 0x04
+#define ISABELLE_INT1_MASK_REG 0x05
+#define ISABELLE_INT2_STATUS_REG 0x06
+#define ISABELLE_INT2_MASK_REG 0x07
+#define ISABELLE_HKCTL1_REG 0x08
+#define ISABELLE_HKCTL2_REG 0x09
+#define ISABELLE_HKCTL3_REG 0x0A
+#define ISABELLE_ACCDET_STATUS_REG 0x0B
+#define ISABELLE_BUTTON_ID_REG 0x0C
+#define ISABELLE_PLL_CFG_REG 0x10
+#define ISABELLE_PLL_EN_REG 0x11
+#define ISABELLE_FS_RATE_CFG_REG 0x12
+#define ISABELLE_INTF_CFG_REG 0x13
+#define ISABELLE_INTF_EN_REG 0x14
+#define ISABELLE_ULATX12_INTF_CFG_REG 0x15
+#define ISABELLE_DL12_INTF_CFG_REG 0x16
+#define ISABELLE_DL34_INTF_CFG_REG 0x17
+#define ISABELLE_DL56_INTF_CFG_REG 0x18
+#define ISABELLE_ATX_STPGA1_CFG_REG 0x19
+#define ISABELLE_ATX_STPGA2_CFG_REG 0x1A
+#define ISABELLE_VTX_STPGA1_CFG_REG 0x1B
+#define ISABELLE_VTX2_STPGA2_CFG_REG 0x1C
+#define ISABELLE_ATX1_DPGA_REG 0x1D
+#define ISABELLE_ATX2_DPGA_REG 0x1E
+#define ISABELLE_VTX1_DPGA_REG 0x1F
+#define ISABELLE_VTX2_DPGA_REG 0x20
+#define ISABELLE_TX_INPUT_CFG_REG 0x21
+#define ISABELLE_RX_INPUT_CFG_REG 0x22
+#define ISABELLE_RX_INPUT_CFG2_REG 0x23
+#define ISABELLE_VOICE_HPF_CFG_REG 0x24
+#define ISABELLE_AUDIO_HPF_CFG_REG 0x25
+#define ISABELLE_RX1_DPGA_REG 0x26
+#define ISABELLE_RX2_DPGA_REG 0x27
+#define ISABELLE_RX3_DPGA_REG 0x28
+#define ISABELLE_RX4_DPGA_REG 0x29
+#define ISABELLE_RX5_DPGA_REG 0x2A
+#define ISABELLE_RX6_DPGA_REG 0x2B
+#define ISABELLE_ALU_TX_EN_REG 0x2C
+#define ISABELLE_ALU_RX_EN_REG 0x2D
+#define ISABELLE_IIR_RESYNC_REG 0x2E
+#define ISABELLE_ABIAS_CFG_REG 0x30
+#define ISABELLE_DBIAS_CFG_REG 0x31
+#define ISABELLE_MIC1_GAIN_REG 0x32
+#define ISABELLE_MIC2_GAIN_REG 0x33
+#define ISABELLE_AMIC_CFG_REG 0x34
+#define ISABELLE_DMIC_CFG_REG 0x35
+#define ISABELLE_APGA_GAIN_REG 0x36
+#define ISABELLE_APGA_CFG_REG 0x37
+#define ISABELLE_TX_GAIN_DLY_REG 0x38
+#define ISABELLE_RX_GAIN_DLY_REG 0x39
+#define ISABELLE_RX_PWR_CTRL_REG 0x3A
+#define ISABELLE_DPGA1LR_IN_SEL_REG 0x3B
+#define ISABELLE_DPGA1L_GAIN_REG 0x3C
+#define ISABELLE_DPGA1R_GAIN_REG 0x3D
+#define ISABELLE_DPGA2L_IN_SEL_REG 0x3E
+#define ISABELLE_DPGA2R_IN_SEL_REG 0x3F
+#define ISABELLE_DPGA2L_GAIN_REG 0x40
+#define ISABELLE_DPGA2R_GAIN_REG 0x41
+#define ISABELLE_DPGA3LR_IN_SEL_REG 0x42
+#define ISABELLE_DPGA3L_GAIN_REG 0x43
+#define ISABELLE_DPGA3R_GAIN_REG 0x44
+#define ISABELLE_DAC1_SOFTRAMP_REG 0x45
+#define ISABELLE_DAC2_SOFTRAMP_REG 0x46
+#define ISABELLE_DAC3_SOFTRAMP_REG 0x47
+#define ISABELLE_DAC_CFG_REG 0x48
+#define ISABELLE_EARDRV_CFG1_REG 0x49
+#define ISABELLE_EARDRV_CFG2_REG 0x4A
+#define ISABELLE_HSDRV_GAIN_REG 0x4B
+#define ISABELLE_HSDRV_CFG1_REG 0x4C
+#define ISABELLE_HSDRV_CFG2_REG 0x4D
+#define ISABELLE_HS_NG_CFG1_REG 0x4E
+#define ISABELLE_HS_NG_CFG2_REG 0x4F
+#define ISABELLE_LINEAMP_GAIN_REG 0x50
+#define ISABELLE_LINEAMP_CFG_REG 0x51
+#define ISABELLE_HFL_VOL_CTRL_REG 0x52
+#define ISABELLE_HFL_SFTVOL_CTRL_REG 0x53
+#define ISABELLE_HFL_LIM_CTRL_1_REG 0x54
+#define ISABELLE_HFL_LIM_CTRL_2_REG 0x55
+#define ISABELLE_HFR_VOL_CTRL_REG 0x56
+#define ISABELLE_HFR_SFTVOL_CTRL_REG 0x57
+#define ISABELLE_HFR_LIM_CTRL_1_REG 0x58
+#define ISABELLE_HFR_LIM_CTRL_2_REG 0x59
+#define ISABELLE_HF_MODE_REG 0x5A
+#define ISABELLE_HFLPGA_CFG_REG 0x5B
+#define ISABELLE_HFRPGA_CFG_REG 0x5C
+#define ISABELLE_HFDRV_CFG_REG 0x5D
+#define ISABELLE_PDMOUT_CFG1_REG 0x5E
+#define ISABELLE_PDMOUT_CFG2_REG 0x5F
+#define ISABELLE_PDMOUT_L_WM_REG 0x60
+#define ISABELLE_PDMOUT_R_WM_REG 0x61
+#define ISABELLE_HF_NG_CFG1_REG 0x62
+#define ISABELLE_HF_NG_CFG2_REG 0x63
+
+/* ISABELLE_PWR_EN_REG (0x02h) */
+#define ISABELLE_CHIP_EN BIT(0)
+
+/* ISABELLE DAI FORMATS */
+#define ISABELLE_AIF_FMT_MASK 0x70
+#define ISABELLE_I2S_MODE 0x0
+#define ISABELLE_LEFT_J_MODE 0x1
+#define ISABELLE_PDM_MODE 0x2
+
+#define ISABELLE_AIF_LENGTH_MASK 0x30
+#define ISABELLE_AIF_LENGTH_20 0x00
+#define ISABELLE_AIF_LENGTH_32 0x10
+
+#define ISABELLE_AIF_MS 0x80
+
+#define ISABELLE_FS_RATE_MASK 0xF
+#define ISABELLE_FS_RATE_8 0x0
+#define ISABELLE_FS_RATE_11 0x1
+#define ISABELLE_FS_RATE_12 0x2
+#define ISABELLE_FS_RATE_16 0x4
+#define ISABELLE_FS_RATE_22 0x5
+#define ISABELLE_FS_RATE_24 0x6
+#define ISABELLE_FS_RATE_32 0x8
+#define ISABELLE_FS_RATE_44 0x9
+#define ISABELLE_FS_RATE_48 0xA
+
+#define ISABELLE_MAX_REGISTER 0xFF
+
+#endif
diff --git a/sound/soc/codecs/lm49453.c b/sound/soc/codecs/lm49453.c
index 802b9f176b16..99b0a9dcff34 100644
--- a/sound/soc/codecs/lm49453.c
+++ b/sound/soc/codecs/lm49453.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -1358,7 +1357,7 @@ static struct snd_soc_dai_ops lm49453_lineout_dai_ops = {
};
/* LM49453 dai structure. */
-static const struct snd_soc_dai_driver lm49453_dai[] = {
+static struct snd_soc_dai_driver lm49453_dai[] = {
{
.name = "LM49453 Headset",
.playback = {
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 35179e2c23c9..7cd508e16a5c 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -2216,7 +2216,7 @@ static irqreturn_t max98095_report_jack(int irq, void *data)
return IRQ_HANDLED;
}
-int max98095_jack_detect_enable(struct snd_soc_codec *codec)
+static int max98095_jack_detect_enable(struct snd_soc_codec *codec)
{
struct max98095_priv *max98095 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
@@ -2245,7 +2245,7 @@ int max98095_jack_detect_enable(struct snd_soc_codec *codec)
return ret;
}
-int max98095_jack_detect_disable(struct snd_soc_codec *codec)
+static int max98095_jack_detect_disable(struct snd_soc_codec *codec)
{
int ret = 0;
@@ -2286,6 +2286,7 @@ int max98095_jack_detect(struct snd_soc_codec *codec,
max98095_report_jack(client->irq, codec);
return 0;
}
+EXPORT_SYMBOL_GPL(max98095_jack_detect);
#ifdef CONFIG_PM
static int max98095_suspend(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/ml26124.c b/sound/soc/codecs/ml26124.c
index 22cb5bf59273..96aa5fa05160 100644
--- a/sound/soc/codecs/ml26124.c
+++ b/sound/soc/codecs/ml26124.c
@@ -638,7 +638,7 @@ static __devinit int ml26124_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, priv);
- priv->regmap = regmap_init_i2c(i2c, &ml26124_i2c_regmap);
+ priv->regmap = devm_regmap_init_i2c(i2c, &ml26124_i2c_regmap);
if (IS_ERR(priv->regmap)) {
ret = PTR_ERR(priv->regmap);
dev_err(&i2c->dev, "regmap_init_i2c() failed: %d\n", ret);
@@ -651,10 +651,7 @@ static __devinit int ml26124_i2c_probe(struct i2c_client *i2c,
static __devexit int ml26124_i2c_remove(struct i2c_client *client)
{
- struct ml26124_priv *priv = i2c_get_clientdata(client);
-
snd_soc_unregister_codec(&client->dev);
- regmap_exit(priv->regmap);
return 0;
}
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
new file mode 100644
index 000000000000..dd8d856053fc
--- /dev/null
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -0,0 +1,67 @@
+/*
+ * ALSA SoC SPDIF DIR (Digital Interface Reciever) driver
+ *
+ * Based on ALSA SoC SPDIF DIT driver
+ *
+ * This driver is used by controllers which can operate in DIR (SPDI/F) where
+ * no codec is needed. This file provides stub codec that can be used
+ * in these configurations. SPEAr SPDIF IN Audio controller uses this driver.
+ *
+ * Author: Vipin Kumar, <vipin.kumar@st.com>
+ * Copyright: (C) 2012 ST Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <sound/soc.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+
+#define STUB_RATES SNDRV_PCM_RATE_8000_192000
+#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
+static struct snd_soc_codec_driver soc_codec_spdif_dir;
+
+static struct snd_soc_dai_driver dir_stub_dai = {
+ .name = "dir-hifi",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 384,
+ .rates = STUB_RATES,
+ .formats = STUB_FORMATS,
+ },
+};
+
+static int spdif_dir_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_spdif_dir,
+ &dir_stub_dai, 1);
+}
+
+static int spdif_dir_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver spdif_dir_driver = {
+ .probe = spdif_dir_probe,
+ .remove = spdif_dir_remove,
+ .driver = {
+ .name = "spdif-dir",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(spdif_dir_driver);
+
+MODULE_DESCRIPTION("ASoC SPDIF DIR driver");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sta529.c b/sound/soc/codecs/sta529.c
new file mode 100644
index 000000000000..0c225cd569d2
--- /dev/null
+++ b/sound/soc/codecs/sta529.c
@@ -0,0 +1,442 @@
+/*
+ * ASoC codec driver for spear platform
+ *
+ * sound/soc/codecs/sta529.c -- spear ALSA Soc codec driver
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Rajeev Kumar <rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+/* STA529 Register offsets */
+#define STA529_FFXCFG0 0x00
+#define STA529_FFXCFG1 0x01
+#define STA529_MVOL 0x02
+#define STA529_LVOL 0x03
+#define STA529_RVOL 0x04
+#define STA529_TTF0 0x05
+#define STA529_TTF1 0x06
+#define STA529_TTP0 0x07
+#define STA529_TTP1 0x08
+#define STA529_S2PCFG0 0x0A
+#define STA529_S2PCFG1 0x0B
+#define STA529_P2SCFG0 0x0C
+#define STA529_P2SCFG1 0x0D
+#define STA529_PLLCFG0 0x14
+#define STA529_PLLCFG1 0x15
+#define STA529_PLLCFG2 0x16
+#define STA529_PLLCFG3 0x17
+#define STA529_PLLPFE 0x18
+#define STA529_PLLST 0x19
+#define STA529_ADCCFG 0x1E /*mic_select*/
+#define STA529_CKOCFG 0x1F
+#define STA529_MISC 0x20
+#define STA529_PADST0 0x21
+#define STA529_PADST1 0x22
+#define STA529_FFXST 0x23
+#define STA529_PWMIN1 0x2D
+#define STA529_PWMIN2 0x2E
+#define STA529_POWST 0x32
+
+#define STA529_MAX_REGISTER 0x32
+
+#define STA529_RATES (SNDRV_PCM_RATE_8000 | \
+ SNDRV_PCM_RATE_11025 | \
+ SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_22050 | \
+ SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define STA529_FORMAT (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+#define S2PC_VALUE 0x98
+#define CLOCK_OUT 0x60
+#define LEFT_J_DATA_FORMAT 0x10
+#define I2S_DATA_FORMAT 0x12
+#define RIGHT_J_DATA_FORMAT 0x14
+#define CODEC_MUTE_VAL 0x80
+
+#define POWER_CNTLMSAK 0x40
+#define POWER_STDBY 0x40
+#define FFX_MASK 0x80
+#define FFX_OFF 0x80
+#define POWER_UP 0x00
+#define FFX_CLK_ENB 0x01
+#define FFX_CLK_DIS 0x00
+#define FFX_CLK_MSK 0x01
+#define PLAY_FREQ_RANGE_MSK 0x70
+#define CAP_FREQ_RANGE_MSK 0x0C
+#define PDATA_LEN_MSK 0xC0
+#define BCLK_TO_FS_MSK 0x30
+#define AUDIO_MUTE_MSK 0x80
+
+static const struct reg_default sta529_reg_defaults[] = {
+ { 0, 0x35 }, /* R0 - FFX Configuration reg 0 */
+ { 1, 0xc8 }, /* R1 - FFX Configuration reg 1 */
+ { 2, 0x50 }, /* R2 - Master Volume */
+ { 3, 0x00 }, /* R3 - Left Volume */
+ { 4, 0x00 }, /* R4 - Right Volume */
+ { 10, 0xb2 }, /* R10 - S2P Config Reg 0 */
+ { 11, 0x41 }, /* R11 - S2P Config Reg 1 */
+ { 12, 0x92 }, /* R12 - P2S Config Reg 0 */
+ { 13, 0x41 }, /* R13 - P2S Config Reg 1 */
+ { 30, 0xd2 }, /* R30 - ADC Config Reg */
+ { 31, 0x40 }, /* R31 - clock Out Reg */
+ { 32, 0x21 }, /* R32 - Misc Register */
+};
+
+struct sta529 {
+ struct regmap *regmap;
+};
+
+static bool sta529_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+
+ case STA529_FFXCFG0:
+ case STA529_FFXCFG1:
+ case STA529_MVOL:
+ case STA529_LVOL:
+ case STA529_RVOL:
+ case STA529_S2PCFG0:
+ case STA529_S2PCFG1:
+ case STA529_P2SCFG0:
+ case STA529_P2SCFG1:
+ case STA529_ADCCFG:
+ case STA529_CKOCFG:
+ case STA529_MISC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+static const char *pwm_mode_text[] = { "Binary", "Headphone", "Ternary",
+ "Phase-shift"};
+
+static const DECLARE_TLV_DB_SCALE(out_gain_tlv, -9150, 50, 0);
+static const DECLARE_TLV_DB_SCALE(master_vol_tlv, -12750, 50, 0);
+static const SOC_ENUM_SINGLE_DECL(pwm_src, STA529_FFXCFG1, 4, pwm_mode_text);
+
+static const struct snd_kcontrol_new sta529_snd_controls[] = {
+ SOC_DOUBLE_R_TLV("Digital Playback Volume", STA529_LVOL, STA529_RVOL, 0,
+ 127, 0, out_gain_tlv),
+ SOC_SINGLE_TLV("Master Playback Volume", STA529_MVOL, 0, 127, 1,
+ master_vol_tlv),
+ SOC_ENUM("PWM Select", pwm_src),
+};
+
+static int sta529_set_bias_level(struct snd_soc_codec *codec, enum
+ snd_soc_bias_level level)
+{
+ struct sta529 *sta529 = snd_soc_codec_get_drvdata(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_update_bits(codec, STA529_FFXCFG0, POWER_CNTLMSAK,
+ POWER_UP);
+ snd_soc_update_bits(codec, STA529_MISC, FFX_CLK_MSK,
+ FFX_CLK_ENB);
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ regcache_sync(sta529->regmap);
+ snd_soc_update_bits(codec, STA529_FFXCFG0,
+ POWER_CNTLMSAK, POWER_STDBY);
+ /* Making FFX output to zero */
+ snd_soc_update_bits(codec, STA529_FFXCFG0, FFX_MASK,
+ FFX_OFF);
+ snd_soc_update_bits(codec, STA529_MISC, FFX_CLK_MSK,
+ FFX_CLK_DIS);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+
+ /*
+ * store the label for powers down audio subsystem for suspend.This is
+ * used by soc core layer
+ */
+ codec->dapm.bias_level = level;
+
+ return 0;
+
+}
+
+static int sta529_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ int pdata, play_freq_val, record_freq_val;
+ int bclk_to_fs_ratio;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ pdata = 1;
+ bclk_to_fs_ratio = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ pdata = 2;
+ bclk_to_fs_ratio = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ pdata = 3;
+ bclk_to_fs_ratio = 2;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported format\n");
+ return -EINVAL;
+ }
+
+ switch (params_rate(params)) {
+ case 8000:
+ case 11025:
+ play_freq_val = 0;
+ record_freq_val = 2;
+ break;
+ case 16000:
+ case 22050:
+ play_freq_val = 1;
+ record_freq_val = 0;
+ break;
+
+ case 32000:
+ case 44100:
+ case 48000:
+ play_freq_val = 2;
+ record_freq_val = 0;
+ break;
+ default:
+ dev_err(codec->dev, "Unsupported rate\n");
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ snd_soc_update_bits(codec, STA529_S2PCFG1, PDATA_LEN_MSK,
+ pdata << 6);
+ snd_soc_update_bits(codec, STA529_S2PCFG1, BCLK_TO_FS_MSK,
+ bclk_to_fs_ratio << 4);
+ snd_soc_update_bits(codec, STA529_MISC, PLAY_FREQ_RANGE_MSK,
+ play_freq_val << 4);
+ } else {
+ snd_soc_update_bits(codec, STA529_P2SCFG1, PDATA_LEN_MSK,
+ pdata << 6);
+ snd_soc_update_bits(codec, STA529_P2SCFG1, BCLK_TO_FS_MSK,
+ bclk_to_fs_ratio << 4);
+ snd_soc_update_bits(codec, STA529_MISC, CAP_FREQ_RANGE_MSK,
+ record_freq_val << 2);
+ }
+
+ return 0;
+}
+
+static int sta529_mute(struct snd_soc_dai *dai, int mute)
+{
+ u8 val = 0;
+
+ if (mute)
+ val |= CODEC_MUTE_VAL;
+
+ snd_soc_update_bits(dai->codec, STA529_FFXCFG0, AUDIO_MUTE_MSK, val);
+
+ return 0;
+}
+
+static int sta529_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u8 mode = 0;
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_LEFT_J:
+ mode = LEFT_J_DATA_FORMAT;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ mode = I2S_DATA_FORMAT;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ mode = RIGHT_J_DATA_FORMAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, STA529_S2PCFG0, 0x0D, mode);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops sta529_dai_ops = {
+ .hw_params = sta529_hw_params,
+ .set_fmt = sta529_set_dai_fmt,
+ .digital_mute = sta529_mute,
+};
+
+static struct snd_soc_dai_driver sta529_dai = {
+ .name = "sta529-audio",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = STA529_RATES,
+ .formats = STA529_FORMAT,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = STA529_RATES,
+ .formats = STA529_FORMAT,
+ },
+ .ops = &sta529_dai_ops,
+};
+
+static int sta529_probe(struct snd_soc_codec *codec)
+{
+ struct sta529 *sta529 = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ codec->control_data = sta529->regmap;
+ ret = snd_soc_codec_set_cache_io(codec, 8, 8, SND_SOC_REGMAP);
+
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ return ret;
+ }
+ sta529_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+/* power down chip */
+static int sta529_remove(struct snd_soc_codec *codec)
+{
+ sta529_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int sta529_suspend(struct snd_soc_codec *codec)
+{
+ sta529_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int sta529_resume(struct snd_soc_codec *codec)
+{
+ sta529_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+struct snd_soc_codec_driver sta529_codec_driver = {
+ .probe = sta529_probe,
+ .remove = sta529_remove,
+ .set_bias_level = sta529_set_bias_level,
+ .suspend = sta529_suspend,
+ .resume = sta529_resume,
+ .controls = sta529_snd_controls,
+ .num_controls = ARRAY_SIZE(sta529_snd_controls),
+};
+
+static const struct regmap_config sta529_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = STA529_MAX_REGISTER,
+ .readable_reg = sta529_readable,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = sta529_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(sta529_reg_defaults),
+};
+
+static __devinit int sta529_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct sta529 *sta529;
+ int ret;
+
+ if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EINVAL;
+
+ sta529 = devm_kzalloc(&i2c->dev, sizeof(struct sta529), GFP_KERNEL);
+ if (sta529 == NULL) {
+ dev_err(&i2c->dev, "Can not allocate memory\n");
+ return -ENOMEM;
+ }
+
+ sta529->regmap = devm_regmap_init_i2c(i2c, &sta529_regmap);
+ if (IS_ERR(sta529->regmap)) {
+ ret = PTR_ERR(sta529->regmap);
+ dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(i2c, sta529);
+
+ ret = snd_soc_register_codec(&i2c->dev,
+ &sta529_codec_driver, &sta529_dai, 1);
+ if (ret != 0)
+ dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
+
+ return ret;
+}
+
+static int __devexit sta529_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id sta529_i2c_id[] = {
+ { "sta529", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sta529_i2c_id);
+
+static struct i2c_driver sta529_i2c_driver = {
+ .driver = {
+ .name = "sta529",
+ .owner = THIS_MODULE,
+ },
+ .probe = sta529_i2c_probe,
+ .remove = __devexit_p(sta529_i2c_remove),
+ .id_table = sta529_i2c_id,
+};
+
+module_i2c_driver(sta529_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC STA529 codec driver");
+MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 64d2a4fa34b2..dc78f5a4bcbf 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -118,7 +118,9 @@ static const u8 aic3x_reg[AIC3X_CACHEREGNUM] = {
0x00, 0x00, 0x00, 0x00, /* 88 */
0x00, 0x00, 0x00, 0x00, /* 92 */
0x00, 0x00, 0x00, 0x00, /* 96 */
- 0x00, 0x00, 0x02, /* 100 */
+ 0x00, 0x00, 0x02, 0x00, /* 100 */
+ 0x00, 0x00, 0x00, 0x00, /* 104 */
+ 0x00, 0x00, /* 108 */
};
#define SOC_DAPM_SINGLE_AIC3X(xname, reg, shift, mask, invert) \
@@ -229,6 +231,25 @@ static const struct soc_enum aic3x_enum[] = {
SOC_ENUM_DOUBLE(AIC3X_CODEC_DFILT_CTRL, 6, 4, 4, aic3x_adc_hpf),
};
+static const char *aic3x_agc_level[] =
+ { "-5.5dB", "-8dB", "-10dB", "-12dB", "-14dB", "-17dB", "-20dB", "-24dB" };
+static const struct soc_enum aic3x_agc_level_enum[] = {
+ SOC_ENUM_SINGLE(LAGC_CTRL_A, 4, 8, aic3x_agc_level),
+ SOC_ENUM_SINGLE(RAGC_CTRL_A, 4, 8, aic3x_agc_level),
+};
+
+static const char *aic3x_agc_attack[] = { "8ms", "11ms", "16ms", "20ms" };
+static const struct soc_enum aic3x_agc_attack_enum[] = {
+ SOC_ENUM_SINGLE(LAGC_CTRL_A, 2, 4, aic3x_agc_attack),
+ SOC_ENUM_SINGLE(RAGC_CTRL_A, 2, 4, aic3x_agc_attack),
+};
+
+static const char *aic3x_agc_decay[] = { "100ms", "200ms", "400ms", "500ms" };
+static const struct soc_enum aic3x_agc_decay_enum[] = {
+ SOC_ENUM_SINGLE(LAGC_CTRL_A, 0, 4, aic3x_agc_decay),
+ SOC_ENUM_SINGLE(RAGC_CTRL_A, 0, 4, aic3x_agc_decay),
+};
+
/*
* DAC digital volumes. From -63.5 to 0 dB in 0.5 dB steps
*/
@@ -353,6 +374,15 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
* adjust PGA to max value when ADC is on and will never go back.
*/
SOC_DOUBLE_R("AGC Switch", LAGC_CTRL_A, RAGC_CTRL_A, 7, 0x01, 0),
+ SOC_ENUM("Left AGC Target level", aic3x_agc_level_enum[0]),
+ SOC_ENUM("Right AGC Target level", aic3x_agc_level_enum[1]),
+ SOC_ENUM("Left AGC Attack time", aic3x_agc_attack_enum[0]),
+ SOC_ENUM("Right AGC Attack time", aic3x_agc_attack_enum[1]),
+ SOC_ENUM("Left AGC Decay time", aic3x_agc_decay_enum[0]),
+ SOC_ENUM("Right AGC Decay time", aic3x_agc_decay_enum[1]),
+
+ /* De-emphasis */
+ SOC_DOUBLE("De-emphasis Switch", AIC3X_CODEC_DFILT_CTRL, 2, 0, 0x01, 0),
/* Input */
SOC_DOUBLE_R_TLV("PGA Capture Volume", LADC_VOL, RADC_VOL,
@@ -368,7 +398,7 @@ static const struct snd_kcontrol_new aic3x_snd_controls[] = {
static DECLARE_TLV_DB_SCALE(classd_amp_tlv, 0, 600, 0);
static const struct snd_kcontrol_new aic3x_classd_amp_gain_ctrl =
- SOC_DOUBLE_TLV("Class-D Amplifier Gain", CLASSD_CTRL, 6, 4, 3, 0, classd_amp_tlv);
+ SOC_DOUBLE_TLV("Class-D Playback Volume", CLASSD_CTRL, 6, 4, 3, 0, classd_amp_tlv);
/* Left DAC Mux */
static const struct snd_kcontrol_new aic3x_left_dac_mux_controls =
@@ -935,9 +965,7 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
}
found:
- data = snd_soc_read(codec, AIC3X_PLL_PROGA_REG);
- snd_soc_write(codec, AIC3X_PLL_PROGA_REG,
- data | (pll_p << PLLP_SHIFT));
+ snd_soc_update_bits(codec, AIC3X_PLL_PROGA_REG, PLLP_MASK, pll_p);
snd_soc_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG,
pll_r << PLLR_SHIFT);
snd_soc_write(codec, AIC3X_PLL_PROGB_REG, pll_j << PLLJ_SHIFT);
@@ -972,6 +1000,12 @@ static int aic3x_set_dai_sysclk(struct snd_soc_dai *codec_dai,
struct snd_soc_codec *codec = codec_dai->codec;
struct aic3x_priv *aic3x = snd_soc_codec_get_drvdata(codec);
+ /* set clock on MCLK or GPIO2 or BCLK */
+ snd_soc_update_bits(codec, AIC3X_CLKGEN_CTRL_REG, PLLCLK_IN_MASK,
+ clk_id << PLLCLK_IN_SHIFT);
+ snd_soc_update_bits(codec, AIC3X_CLKGEN_CTRL_REG, CLKDIV_IN_MASK,
+ clk_id << CLKDIV_IN_SHIFT);
+
aic3x->sysclk = freq;
return 0;
}
diff --git a/sound/soc/codecs/tlv320aic3x.h b/sound/soc/codecs/tlv320aic3x.h
index 6f097fb60683..6db3c41b0163 100644
--- a/sound/soc/codecs/tlv320aic3x.h
+++ b/sound/soc/codecs/tlv320aic3x.h
@@ -13,7 +13,7 @@
#define _AIC3X_H
/* AIC3X register space */
-#define AIC3X_CACHEREGNUM 103
+#define AIC3X_CACHEREGNUM 110
/* Page select register */
#define AIC3X_PAGE_SELECT 0
@@ -74,6 +74,8 @@
#define HPLCOM_CFG 37
/* Right High Power Output control registers */
#define HPRCOM_CFG 38
+/* High Power Output Stage Control Register */
+#define HPOUT_SC 40
/* DAC Output Switching control registers */
#define DAC_LINE_MUX 41
/* High Power Output Driver Pop Reduction registers */
@@ -148,6 +150,17 @@
#define AIC3X_GPIOB_REG 101
/* Clock generation control register */
#define AIC3X_CLKGEN_CTRL_REG 102
+/* New AGC registers */
+#define LAGCN_ATTACK 103
+#define LAGCN_DECAY 104
+#define RAGCN_ATTACK 105
+#define RAGCN_DECAY 106
+/* New Programmable ADC Digital Path and I2C Bus Condition Register */
+#define NEW_ADC_DIGITALPATH 107
+/* Passive Analog Signal Bypass Selection During Powerdown Register */
+#define PASSIVE_BYPASS 108
+/* DAC Quiescent Current Adjustment Register */
+#define DAC_ICC_ADJ 109
/* Page select register bits */
#define PAGE0_SELECT 0
@@ -163,9 +176,14 @@
#define DUAL_RATE_MODE ((1 << 5) | (1 << 6))
#define LDAC2LCH (0x1 << 3)
#define RDAC2RCH (0x1 << 1)
+#define LDAC2RCH (0x2 << 3)
+#define RDAC2LCH (0x2 << 1)
+#define LDAC2MONOMIX (0x3 << 3)
+#define RDAC2MONOMIX (0x3 << 1)
/* PLL registers bitfields */
#define PLLP_SHIFT 0
+#define PLLP_MASK 7
#define PLLQ_SHIFT 3
#define PLLR_SHIFT 0
#define PLLJ_SHIFT 2
@@ -178,6 +196,14 @@
#define PLL_CLKIN_SHIFT 4
#define MCLK_SOURCE 0x0
#define PLL_CLKDIV_SHIFT 0
+#define PLLCLK_IN_MASK 0x30
+#define PLLCLK_IN_SHIFT 4
+#define CLKDIV_IN_MASK 0xc0
+#define CLKDIV_IN_SHIFT 6
+/* clock in source */
+#define CLKIN_MCLK 0
+#define CLKIN_GPIO2 1
+#define CLKIN_BCLK 2
/* Software reset register bits */
#define SOFT_RESET 0x80
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index a36e9fcdf184..0ff1e70b7770 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -553,7 +553,7 @@ static const struct snd_kcontrol_new vibrar_mux_controls =
/* Headset power mode */
static const char *twl6040_power_mode_texts[] = {
- "Low-Power", "High-Perfomance",
+ "Low-Power", "High-Performance",
};
static const struct soc_enum twl6040_power_mode_enum =
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c
index e0b51e9f8b12..951d7b49476a 100644
--- a/sound/soc/codecs/wm1250-ev1.c
+++ b/sound/soc/codecs/wm1250-ev1.c
@@ -121,20 +121,23 @@ static const struct snd_soc_dai_ops wm1250_ev1_ops = {
.hw_params = wm1250_ev1_hw_params,
};
+#define WM1250_EV1_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_64000)
+
static struct snd_soc_dai_driver wm1250_ev1_dai = {
.name = "wm1250-ev1",
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = WM1250_EV1_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
.stream_name = "Capture",
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = WM1250_EV1_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.ops = &wm1250_ev1_ops,
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index 0418fa11e6bd..3fd5b29dc933 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -1,7 +1,7 @@
/*
* wm2000.c -- WM2000 ALSA Soc Audio driver
*
- * Copyright 2008-2010 Wolfson Microelectronics PLC.
+ * Copyright 2008-2011 Wolfson Microelectronics PLC.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -674,9 +674,39 @@ static int wm2000_resume(struct snd_soc_codec *codec)
#define wm2000_resume NULL
#endif
+static bool wm2000_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM2000_REG_SYS_START:
+ case WM2000_REG_SPEECH_CLARITY:
+ case WM2000_REG_SYS_WATCHDOG:
+ case WM2000_REG_ANA_VMID_PD_TIME:
+ case WM2000_REG_ANA_VMID_PU_TIME:
+ case WM2000_REG_CAT_FLTR_INDX:
+ case WM2000_REG_CAT_GAIN_0:
+ case WM2000_REG_SYS_STATUS:
+ case WM2000_REG_SYS_MODE_CNTRL:
+ case WM2000_REG_SYS_START0:
+ case WM2000_REG_SYS_START1:
+ case WM2000_REG_ID1:
+ case WM2000_REG_ID2:
+ case WM2000_REG_REVISON:
+ case WM2000_REG_SYS_CTL1:
+ case WM2000_REG_SYS_CTL2:
+ case WM2000_REG_ANC_STAT:
+ case WM2000_REG_IF_CTL:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config wm2000_regmap = {
.reg_bits = 8,
.val_bits = 8,
+
+ .max_register = WM2000_REG_IF_CTL,
+ .readable_reg = wm2000_readable_reg,
};
static int wm2000_probe(struct snd_soc_codec *codec)
diff --git a/sound/soc/codecs/wm2200.c b/sound/soc/codecs/wm2200.c
index acbdc5fde923..32682c1b7cde 100644
--- a/sound/soc/codecs/wm2200.c
+++ b/sound/soc/codecs/wm2200.c
@@ -1491,6 +1491,7 @@ static int wm2200_bclk_rates_dat[WM2200_NUM_BCLK_RATES] = {
static int wm2200_bclk_rates_cd[WM2200_NUM_BCLK_RATES] = {
5644800,
+ 3763200,
2882400,
1881600,
1411200,
diff --git a/sound/soc/codecs/wm5100-tables.c b/sound/soc/codecs/wm5100-tables.c
index e167207a19cc..e239f4bf2460 100644
--- a/sound/soc/codecs/wm5100-tables.c
+++ b/sound/soc/codecs/wm5100-tables.c
@@ -1,7 +1,7 @@
/*
* wm5100-tables.c -- WM5100 ALSA SoC Audio driver data
*
- * Copyright 2011 Wolfson Microelectronics plc
+ * Copyright 2011-2 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm5100.c b/sound/soc/codecs/wm5100.c
index cb6d5372103a..f4817292ef45 100644
--- a/sound/soc/codecs/wm5100.c
+++ b/sound/soc/codecs/wm5100.c
@@ -1,7 +1,7 @@
/*
* wm5100.c -- WM5100 ALSA SoC Audio driver
*
- * Copyright 2011 Wolfson Microelectronics plc
+ * Copyright 2011-2 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -2378,13 +2378,6 @@ static int wm5100_remove(struct snd_soc_codec *codec)
return 0;
}
-static int wm5100_soc_volatile(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- return true;
-}
-
-
static struct snd_soc_codec_driver soc_codec_dev_wm5100 = {
.probe = wm5100_probe,
.remove = wm5100_remove,
@@ -2392,8 +2385,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5100 = {
.set_sysclk = wm5100_set_sysclk,
.set_pll = wm5100_set_fll,
.idle_bias_off = 1,
- .reg_cache_size = WM5100_MAX_REGISTER,
- .volatile_register = wm5100_soc_volatile,
.seq_notifier = wm5100_seq_notifier,
.controls = wm5100_snd_controls,
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
new file mode 100644
index 000000000000..6537f16d383e
--- /dev/null
+++ b/sound/soc/codecs/wm5102.c
@@ -0,0 +1,903 @@
+/*
+ * wm5102.c -- WM5102 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+#include "wm5102.h"
+
+struct wm5102_priv {
+ struct arizona_priv core;
+ struct arizona_fll fll[2];
+};
+
+static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new wm5102_snd_controls[] = {
+SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN3 High Performance Switch", ARIZONA_IN3L_CONTROL,
+ ARIZONA_IN3_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R_RANGE_TLV("IN1 Volume", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1R_CONTROL,
+ ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("IN2 Volume", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2R_CONTROL,
+ ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("IN3 Volume", ARIZONA_IN3L_CONTROL,
+ ARIZONA_IN3R_CONTROL,
+ ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+
+SOC_DOUBLE_R("IN1 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("IN2 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("IN3 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("IN1 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("IN2 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("IN3 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+
+ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
+ ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
+SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5,
+ ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),
+
+ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+
+SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
+SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
+SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
+SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
+
+ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv),
+
+ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT2L", ARIZONA_OUT2LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT2R", ARIZONA_OUT2RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUTL", ARIZONA_OUT4LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUTR", ARIZONA_OUT4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
+ ARIZONA_OUT1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("OUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
+ ARIZONA_OUT2_OSR_SHIFT, 1, 0),
+SOC_SINGLE("EPOUT High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
+ ARIZONA_OUT3_OSR_SHIFT, 1, 0),
+SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
+ ARIZONA_OUT4_OSR_SHIFT, 1, 0),
+SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
+ ARIZONA_OUT5_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("OUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("OUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+
+SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
+ ARIZONA_OUTPUT_PATH_CONFIG_1R,
+ ARIZONA_OUT1L_PGA_VOL_SHIFT,
+ 0x34, 0x40, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("OUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
+ ARIZONA_OUTPUT_PATH_CONFIG_2R,
+ ARIZONA_OUT2L_PGA_VOL_SHIFT,
+ 0x34, 0x40, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("EPOUT Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
+ ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
+
+SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
+ ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
+
+ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
+};
+
+ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT2L, ARIZONA_OUT2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT2R, ARIZONA_OUT2RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUTL, ARIZONA_OUT4LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUTR, ARIZONA_OUT4RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
+
+static const struct snd_soc_dapm_widget wm5102_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD3", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20),
+SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDL", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0),
+
+SND_SOC_DAPM_SIGGEN("TONE"),
+SND_SOC_DAPM_SIGGEN("NOISE"),
+
+SND_SOC_DAPM_INPUT("IN1L"),
+SND_SOC_DAPM_INPUT("IN1R"),
+SND_SOC_DAPM_INPUT("IN2L"),
+SND_SOC_DAPM_INPUT("IN2R"),
+SND_SOC_DAPM_INPUT("IN3L"),
+SND_SOC_DAPM_INPUT("IN3R"),
+
+SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN3L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1,
+ ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT,
+ 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ASRC1L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC1R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1R_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
+ ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+ ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
+ ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+ ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_PGA_E("OUT1L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT1R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT4L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT4R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"),
+ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"),
+ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"),
+ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
+
+ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
+ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
+ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"),
+ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),
+
+ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
+ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
+ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"),
+ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"),
+
+ARIZONA_MIXER_WIDGETS(Mic, "Mic"),
+ARIZONA_MIXER_WIDGETS(Noise, "Noise"),
+
+ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"),
+ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"),
+
+ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"),
+ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"),
+ARIZONA_MIXER_WIDGETS(OUT2L, "HPOUT2L"),
+ARIZONA_MIXER_WIDGETS(OUT2R, "HPOUT2R"),
+ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"),
+ARIZONA_MIXER_WIDGETS(SPKOUTL, "SPKOUTL"),
+ARIZONA_MIXER_WIDGETS(SPKOUTR, "SPKOUTR"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"),
+
+ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
+ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
+ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
+ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
+ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
+ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
+ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"),
+ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"),
+
+ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"),
+ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
+
+ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"),
+ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"),
+
+ARIZONA_MIXER_WIDGETS(ASRC1L, "ASRC1L"),
+ARIZONA_MIXER_WIDGETS(ASRC1R, "ASRC1R"),
+ARIZONA_MIXER_WIDGETS(ASRC2L, "ASRC2L"),
+ARIZONA_MIXER_WIDGETS(ASRC2R, "ASRC2R"),
+
+SND_SOC_DAPM_OUTPUT("HPOUT1L"),
+SND_SOC_DAPM_OUTPUT("HPOUT1R"),
+SND_SOC_DAPM_OUTPUT("HPOUT2L"),
+SND_SOC_DAPM_OUTPUT("HPOUT2R"),
+SND_SOC_DAPM_OUTPUT("EPOUTN"),
+SND_SOC_DAPM_OUTPUT("EPOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTLN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTLP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
+};
+
+#define ARIZONA_MIXER_INPUT_ROUTES(name) \
+ { name, "Noise Generator", "Noise Generator" }, \
+ { name, "Tone Generator 1", "Tone Generator 1" }, \
+ { name, "Tone Generator 2", "Tone Generator 2" }, \
+ { name, "IN1L", "IN1L PGA" }, \
+ { name, "IN1R", "IN1R PGA" }, \
+ { name, "IN2L", "IN2L PGA" }, \
+ { name, "IN2R", "IN2R PGA" }, \
+ { name, "IN3L", "IN3L PGA" }, \
+ { name, "IN3R", "IN3R PGA" }, \
+ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \
+ { name, "AIF1RX1", "AIF1RX1" }, \
+ { name, "AIF1RX2", "AIF1RX2" }, \
+ { name, "AIF1RX3", "AIF1RX3" }, \
+ { name, "AIF1RX4", "AIF1RX4" }, \
+ { name, "AIF1RX5", "AIF1RX5" }, \
+ { name, "AIF1RX6", "AIF1RX6" }, \
+ { name, "AIF1RX7", "AIF1RX7" }, \
+ { name, "AIF1RX8", "AIF1RX8" }, \
+ { name, "AIF2RX1", "AIF2RX1" }, \
+ { name, "AIF2RX2", "AIF2RX2" }, \
+ { name, "AIF3RX1", "AIF3RX1" }, \
+ { name, "AIF3RX2", "AIF3RX2" }, \
+ { name, "EQ1", "EQ1" }, \
+ { name, "EQ2", "EQ2" }, \
+ { name, "EQ3", "EQ3" }, \
+ { name, "EQ4", "EQ4" }, \
+ { name, "DRC1L", "DRC1L" }, \
+ { name, "DRC1R", "DRC1R" }, \
+ { name, "DRC2L", "DRC2L" }, \
+ { name, "DRC2R", "DRC2R" }, \
+ { name, "LHPF1", "LHPF1" }, \
+ { name, "LHPF2", "LHPF2" }, \
+ { name, "LHPF3", "LHPF3" }, \
+ { name, "LHPF4", "LHPF4" }, \
+ { name, "ASRC1L", "ASRC1L" }, \
+ { name, "ASRC1R", "ASRC1R" }, \
+ { name, "ASRC2L", "ASRC2L" }, \
+ { name, "ASRC2R", "ASRC2R" }
+
+static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
+ { "AIF2 Capture", NULL, "DBVDD2" },
+ { "AIF2 Playback", NULL, "DBVDD2" },
+
+ { "AIF3 Capture", NULL, "DBVDD3" },
+ { "AIF3 Playback", NULL, "DBVDD3" },
+
+ { "OUT1L", NULL, "CPVDD" },
+ { "OUT1R", NULL, "CPVDD" },
+ { "OUT2L", NULL, "CPVDD" },
+ { "OUT2R", NULL, "CPVDD" },
+ { "OUT3L", NULL, "CPVDD" },
+
+ { "OUT4L", NULL, "SPKVDDL" },
+ { "OUT4R", NULL, "SPKVDDR" },
+
+ { "OUT1L", NULL, "SYSCLK" },
+ { "OUT1R", NULL, "SYSCLK" },
+ { "OUT2L", NULL, "SYSCLK" },
+ { "OUT2R", NULL, "SYSCLK" },
+ { "OUT3L", NULL, "SYSCLK" },
+ { "OUT4L", NULL, "SYSCLK" },
+ { "OUT4R", NULL, "SYSCLK" },
+ { "OUT5L", NULL, "SYSCLK" },
+ { "OUT5R", NULL, "SYSCLK" },
+
+ { "MICBIAS1", NULL, "MICVDD" },
+ { "MICBIAS2", NULL, "MICVDD" },
+ { "MICBIAS3", NULL, "MICVDD" },
+
+ { "Noise Generator", NULL, "NOISE" },
+ { "Tone Generator 1", NULL, "TONE" },
+ { "Tone Generator 2", NULL, "TONE" },
+
+ { "Mic Mute Mixer", NULL, "Noise Mixer" },
+ { "Mic Mute Mixer", NULL, "Mic Mixer" },
+
+ { "AIF1 Capture", NULL, "AIF1TX1" },
+ { "AIF1 Capture", NULL, "AIF1TX2" },
+ { "AIF1 Capture", NULL, "AIF1TX3" },
+ { "AIF1 Capture", NULL, "AIF1TX4" },
+ { "AIF1 Capture", NULL, "AIF1TX5" },
+ { "AIF1 Capture", NULL, "AIF1TX6" },
+ { "AIF1 Capture", NULL, "AIF1TX7" },
+ { "AIF1 Capture", NULL, "AIF1TX8" },
+
+ { "AIF1RX1", NULL, "AIF1 Playback" },
+ { "AIF1RX2", NULL, "AIF1 Playback" },
+ { "AIF1RX3", NULL, "AIF1 Playback" },
+ { "AIF1RX4", NULL, "AIF1 Playback" },
+ { "AIF1RX5", NULL, "AIF1 Playback" },
+ { "AIF1RX6", NULL, "AIF1 Playback" },
+ { "AIF1RX7", NULL, "AIF1 Playback" },
+ { "AIF1RX8", NULL, "AIF1 Playback" },
+
+ { "AIF2 Capture", NULL, "AIF2TX1" },
+ { "AIF2 Capture", NULL, "AIF2TX2" },
+
+ { "AIF2RX1", NULL, "AIF2 Playback" },
+ { "AIF2RX2", NULL, "AIF2 Playback" },
+
+ { "AIF3 Capture", NULL, "AIF3TX1" },
+ { "AIF3 Capture", NULL, "AIF3TX2" },
+
+ { "AIF3RX1", NULL, "AIF3 Playback" },
+ { "AIF3RX2", NULL, "AIF3 Playback" },
+
+ { "AIF1 Playback", NULL, "SYSCLK" },
+ { "AIF2 Playback", NULL, "SYSCLK" },
+ { "AIF3 Playback", NULL, "SYSCLK" },
+
+ { "AIF1 Capture", NULL, "SYSCLK" },
+ { "AIF2 Capture", NULL, "SYSCLK" },
+ { "AIF3 Capture", NULL, "SYSCLK" },
+
+ ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
+ ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
+ ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
+ ARIZONA_MIXER_ROUTES("OUT2R", "HPOUT2R"),
+ ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"),
+
+ ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUTL"),
+ ARIZONA_MIXER_ROUTES("OUT4R", "SPKOUTR"),
+ ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"),
+ ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"),
+
+ ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"),
+ ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"),
+
+ ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
+ ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
+ ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
+ ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
+ ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
+ ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
+ ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"),
+ ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"),
+
+ ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"),
+ ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"),
+
+ ARIZONA_MIXER_ROUTES("AIF3TX1", "AIF3TX1"),
+ ARIZONA_MIXER_ROUTES("AIF3TX2", "AIF3TX2"),
+
+ ARIZONA_MIXER_ROUTES("EQ1", "EQ1"),
+ ARIZONA_MIXER_ROUTES("EQ2", "EQ2"),
+ ARIZONA_MIXER_ROUTES("EQ3", "EQ3"),
+ ARIZONA_MIXER_ROUTES("EQ4", "EQ4"),
+
+ ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
+ ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
+ ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"),
+ ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),
+
+ ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
+ ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
+ ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
+ ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
+
+ ARIZONA_MIXER_ROUTES("ASRC1L", "ASRC1L"),
+ ARIZONA_MIXER_ROUTES("ASRC1R", "ASRC1R"),
+ ARIZONA_MIXER_ROUTES("ASRC2L", "ASRC2L"),
+ ARIZONA_MIXER_ROUTES("ASRC2R", "ASRC2R"),
+
+ { "HPOUT1L", NULL, "OUT1L" },
+ { "HPOUT1R", NULL, "OUT1R" },
+
+ { "HPOUT2L", NULL, "OUT2L" },
+ { "HPOUT2R", NULL, "OUT2R" },
+
+ { "EPOUTN", NULL, "OUT3L" },
+ { "EPOUTP", NULL, "OUT3L" },
+
+ { "SPKOUTLN", NULL, "OUT4L" },
+ { "SPKOUTLP", NULL, "OUT4L" },
+
+ { "SPKOUTRN", NULL, "OUT4R" },
+ { "SPKOUTRP", NULL, "OUT4R" },
+
+ { "SPKDAT1L", NULL, "OUT5L" },
+ { "SPKDAT1R", NULL, "OUT5R" },
+};
+
+static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct wm5102_priv *wm5102 = snd_soc_codec_get_drvdata(codec);
+
+ switch (fll_id) {
+ case WM5102_FLL1:
+ return arizona_set_fll(&wm5102->fll[0], source, Fref, Fout);
+ case WM5102_FLL2:
+ return arizona_set_fll(&wm5102->fll[1], source, Fref, Fout);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define WM5102_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM5102_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wm5102_dai[] = {
+ {
+ .name = "wm5102-aif1",
+ .id = 1,
+ .base = ARIZONA_AIF1_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm5102-aif2",
+ .id = 2,
+ .base = ARIZONA_AIF2_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm5102-aif3",
+ .id = 3,
+ .base = ARIZONA_AIF3_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5102_RATES,
+ .formats = WM5102_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static int wm5102_codec_probe(struct snd_soc_codec *codec)
+{
+ struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ codec->control_data = priv->core.arizona->regmap;
+ return snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+}
+
+#define WM5102_DIG_VU 0x0200
+
+static unsigned int wm5102_digital_vu[] = {
+ ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R,
+ ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R,
+ ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R,
+
+ ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R,
+ ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R,
+ ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_DAC_DIGITAL_VOLUME_3R,
+ ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R,
+ ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
+ .probe = wm5102_codec_probe,
+
+ .idle_bias_off = true,
+
+ .set_sysclk = arizona_set_sysclk,
+ .set_pll = wm5102_set_fll,
+
+ .controls = wm5102_snd_controls,
+ .num_controls = ARRAY_SIZE(wm5102_snd_controls),
+ .dapm_widgets = wm5102_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm5102_dapm_widgets),
+ .dapm_routes = wm5102_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
+};
+
+static int __devinit wm5102_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct wm5102_priv *wm5102;
+ int i;
+
+ wm5102 = devm_kzalloc(&pdev->dev, sizeof(struct wm5102_priv),
+ GFP_KERNEL);
+ if (wm5102 == NULL)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, wm5102);
+
+ wm5102->core.arizona = arizona;
+
+ for (i = 0; i < ARRAY_SIZE(wm5102->fll); i++)
+ wm5102->fll[i].vco_mult = 1;
+
+ arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK,
+ &wm5102->fll[0]);
+ arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK,
+ &wm5102->fll[1]);
+
+ for (i = 0; i < ARRAY_SIZE(wm5102_dai); i++)
+ arizona_init_dai(&wm5102->core, i);
+
+ /* Latch volume update bits */
+ for (i = 0; i < ARRAY_SIZE(wm5102_digital_vu); i++)
+ regmap_update_bits(arizona->regmap, wm5102_digital_vu[i],
+ WM5102_DIG_VU, WM5102_DIG_VU);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102,
+ wm5102_dai, ARRAY_SIZE(wm5102_dai));
+}
+
+static int __devexit wm5102_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver wm5102_codec_driver = {
+ .driver = {
+ .name = "wm5102-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm5102_probe,
+ .remove = __devexit_p(wm5102_remove),
+};
+
+module_platform_driver(wm5102_codec_driver);
+
+MODULE_DESCRIPTION("ASoC WM5102 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm5102-codec");
diff --git a/sound/soc/codecs/wm5102.h b/sound/soc/codecs/wm5102.h
new file mode 100644
index 000000000000..d30477f3070c
--- /dev/null
+++ b/sound/soc/codecs/wm5102.h
@@ -0,0 +1,21 @@
+/*
+ * wm5102.h -- WM5102 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM5102_H
+#define _WM5102_H
+
+#include "arizona.h"
+
+#define WM5102_FLL1 1
+#define WM5102_FLL2 2
+
+#endif
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
new file mode 100644
index 000000000000..8033f7065189
--- /dev/null
+++ b/sound/soc/codecs/wm5110.c
@@ -0,0 +1,950 @@
+/*
+ * wm5110.c -- WM5110 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+#include "wm5110.h"
+
+struct wm5110_priv {
+ struct arizona_priv core;
+ struct arizona_fll fll[2];
+};
+
+static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+
+static const struct snd_kcontrol_new wm5110_snd_controls[] = {
+SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN3 High Performance Switch", ARIZONA_IN3L_CONTROL,
+ ARIZONA_IN3_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN4 High Performance Switch", ARIZONA_IN4L_CONTROL,
+ ARIZONA_IN4_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R_RANGE_TLV("IN1 Volume", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1R_CONTROL,
+ ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("IN2 Volume", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2R_CONTROL,
+ ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("IN3 Volume", ARIZONA_IN3L_CONTROL,
+ ARIZONA_IN3R_CONTROL,
+ ARIZONA_IN3L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+
+SOC_DOUBLE_R("IN1 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("IN2 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("IN3 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("IN4 Digital Switch", ARIZONA_ADC_DIGITAL_VOLUME_4L,
+ ARIZONA_ADC_DIGITAL_VOLUME_4R, ARIZONA_IN4L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("IN1 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R, ARIZONA_IN1L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("IN2 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R, ARIZONA_IN2L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("IN3 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R, ARIZONA_IN3L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("IN4 Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_4L,
+ ARIZONA_ADC_DIGITAL_VOLUME_4R, ARIZONA_IN4L_DIG_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+
+ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
+ ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
+SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5,
+ ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA),
+
+ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+
+SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
+SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
+SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
+SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
+
+ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv),
+
+ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT2L", ARIZONA_OUT2LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT2R", ARIZONA_OUT2RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUTL", ARIZONA_OUT4LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUTR", ARIZONA_OUT4RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT2L", ARIZONA_OUT6LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT2R", ARIZONA_OUT6RMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE("HPOUT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_1L,
+ ARIZONA_OUT1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("OUT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_2L,
+ ARIZONA_OUT2_OSR_SHIFT, 1, 0),
+SOC_SINGLE("EPOUT High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_3L,
+ ARIZONA_OUT3_OSR_SHIFT, 1, 0),
+SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
+ ARIZONA_OUT4_OSR_SHIFT, 1, 0),
+SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
+ ARIZONA_OUT5_OSR_SHIFT, 1, 0),
+SOC_SINGLE("SPKDAT2 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_6L,
+ ARIZONA_OUT6_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("OUT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("SPKDAT2 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_6L,
+ ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("OUT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R, ARIZONA_OUT2L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R, ARIZONA_OUT4L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("SPKDAT2 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_6L,
+ ARIZONA_DAC_DIGITAL_VOLUME_6R, ARIZONA_OUT6L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+
+SOC_DOUBLE_R_RANGE_TLV("HPOUT1 Volume", ARIZONA_OUTPUT_PATH_CONFIG_1L,
+ ARIZONA_OUTPUT_PATH_CONFIG_1R,
+ ARIZONA_OUT1L_PGA_VOL_SHIFT,
+ 0x34, 0x40, 0, ana_tlv),
+SOC_DOUBLE_R_RANGE_TLV("OUT2 Volume", ARIZONA_OUTPUT_PATH_CONFIG_2L,
+ ARIZONA_OUTPUT_PATH_CONFIG_2R,
+ ARIZONA_OUT2L_PGA_VOL_SHIFT,
+ 0x34, 0x40, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("EPOUT Volume", ARIZONA_OUTPUT_PATH_CONFIG_3L,
+ ARIZONA_OUT3L_PGA_VOL_SHIFT, 0x34, 0x40, 0, ana_tlv),
+
+SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
+ ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE("SPKDAT2 Switch", ARIZONA_PDM_SPK2_CTRL_1, ARIZONA_SPK2L_MUTE_SHIFT,
+ ARIZONA_SPK2R_MUTE_SHIFT, 1, 1),
+
+ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF3TX1", ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF3TX2", ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE),
+};
+
+ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT2L, ARIZONA_OUT2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT2R, ARIZONA_OUT2RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUTL, ARIZONA_OUT4LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUTR, ARIZONA_OUT4RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT2L, ARIZONA_OUT6LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT2R, ARIZONA_OUT6RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF3TX1, ARIZONA_AIF3TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF3TX2, ARIZONA_AIF3TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(ASRC1L, ARIZONA_ASRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC1R, ARIZONA_ASRC1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC2L, ARIZONA_ASRC2LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(ASRC2R, ARIZONA_ASRC2RMIX_INPUT_1_SOURCE);
+
+static const struct snd_soc_dapm_widget wm5110_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD3", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20),
+SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDL", 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDDR", 0),
+
+SND_SOC_DAPM_SIGGEN("TONE"),
+SND_SOC_DAPM_SIGGEN("NOISE"),
+
+SND_SOC_DAPM_INPUT("IN1L"),
+SND_SOC_DAPM_INPUT("IN1R"),
+SND_SOC_DAPM_INPUT("IN2L"),
+SND_SOC_DAPM_INPUT("IN2R"),
+SND_SOC_DAPM_INPUT("IN3L"),
+SND_SOC_DAPM_INPUT("IN3R"),
+SND_SOC_DAPM_INPUT("IN4L"),
+SND_SOC_DAPM_INPUT("IN4R"),
+
+SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN3L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN3R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN3R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN4L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN4L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN4R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN4R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1,
+ ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT,
+ 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ASRC1L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC1R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC1R_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF3TX1", NULL, 0,
+ ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF3TX2", NULL, 0,
+ ARIZONA_AIF3_TX_ENABLES, ARIZONA_AIF3TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF3RX1", NULL, 0,
+ ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF3RX2", NULL, 0,
+ ARIZONA_AIF3_RX_ENABLES, ARIZONA_AIF3RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_PGA_E("OUT1L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT1R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT2L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT2L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT2R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT2R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT4L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT4L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT4R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT4R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT6L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT6L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT6R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT6R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"),
+ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"),
+ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"),
+ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
+
+ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
+ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
+ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"),
+ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"),
+
+ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
+ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
+ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"),
+ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"),
+
+ARIZONA_MIXER_WIDGETS(Mic, "Mic"),
+ARIZONA_MIXER_WIDGETS(Noise, "Noise"),
+
+ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"),
+ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"),
+
+ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"),
+ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"),
+ARIZONA_MIXER_WIDGETS(OUT2L, "HPOUT2L"),
+ARIZONA_MIXER_WIDGETS(OUT2R, "HPOUT2R"),
+ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"),
+ARIZONA_MIXER_WIDGETS(SPKOUTL, "SPKOUTL"),
+ARIZONA_MIXER_WIDGETS(SPKOUTR, "SPKOUTR"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"),
+ARIZONA_MIXER_WIDGETS(SPKDAT2L, "SPKDAT2L"),
+ARIZONA_MIXER_WIDGETS(SPKDAT2R, "SPKDAT2R"),
+
+ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
+ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
+ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
+ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
+ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
+ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
+ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"),
+ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"),
+
+ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"),
+ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
+
+ARIZONA_MIXER_WIDGETS(AIF3TX1, "AIF3TX1"),
+ARIZONA_MIXER_WIDGETS(AIF3TX2, "AIF3TX2"),
+
+ARIZONA_MIXER_WIDGETS(ASRC1L, "ASRC1L"),
+ARIZONA_MIXER_WIDGETS(ASRC1R, "ASRC1R"),
+ARIZONA_MIXER_WIDGETS(ASRC2L, "ASRC2L"),
+ARIZONA_MIXER_WIDGETS(ASRC2R, "ASRC2R"),
+
+SND_SOC_DAPM_OUTPUT("HPOUT1L"),
+SND_SOC_DAPM_OUTPUT("HPOUT1R"),
+SND_SOC_DAPM_OUTPUT("HPOUT2L"),
+SND_SOC_DAPM_OUTPUT("HPOUT2R"),
+SND_SOC_DAPM_OUTPUT("EPOUTN"),
+SND_SOC_DAPM_OUTPUT("EPOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTLN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTLP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTRP"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
+SND_SOC_DAPM_OUTPUT("SPKDAT2L"),
+SND_SOC_DAPM_OUTPUT("SPKDAT2R"),
+};
+
+#define ARIZONA_MIXER_INPUT_ROUTES(name) \
+ { name, "Noise Generator", "Noise Generator" }, \
+ { name, "Tone Generator 1", "Tone Generator 1" }, \
+ { name, "Tone Generator 2", "Tone Generator 2" }, \
+ { name, "IN1L", "IN1L PGA" }, \
+ { name, "IN1R", "IN1R PGA" }, \
+ { name, "IN2L", "IN2L PGA" }, \
+ { name, "IN2R", "IN2R PGA" }, \
+ { name, "IN3L", "IN3L PGA" }, \
+ { name, "IN3R", "IN3R PGA" }, \
+ { name, "IN4L", "IN4L PGA" }, \
+ { name, "IN4R", "IN4R PGA" }, \
+ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \
+ { name, "AIF1RX1", "AIF1RX1" }, \
+ { name, "AIF1RX2", "AIF1RX2" }, \
+ { name, "AIF1RX3", "AIF1RX3" }, \
+ { name, "AIF1RX4", "AIF1RX4" }, \
+ { name, "AIF1RX5", "AIF1RX5" }, \
+ { name, "AIF1RX6", "AIF1RX6" }, \
+ { name, "AIF1RX7", "AIF1RX7" }, \
+ { name, "AIF1RX8", "AIF1RX8" }, \
+ { name, "AIF2RX1", "AIF2RX1" }, \
+ { name, "AIF2RX2", "AIF2RX2" }, \
+ { name, "AIF3RX1", "AIF3RX1" }, \
+ { name, "AIF3RX2", "AIF3RX2" }, \
+ { name, "EQ1", "EQ1" }, \
+ { name, "EQ2", "EQ2" }, \
+ { name, "EQ3", "EQ3" }, \
+ { name, "EQ4", "EQ4" }, \
+ { name, "DRC1L", "DRC1L" }, \
+ { name, "DRC1R", "DRC1R" }, \
+ { name, "DRC2L", "DRC2L" }, \
+ { name, "DRC2R", "DRC2R" }, \
+ { name, "LHPF1", "LHPF1" }, \
+ { name, "LHPF2", "LHPF2" }, \
+ { name, "LHPF3", "LHPF3" }, \
+ { name, "LHPF4", "LHPF4" }, \
+ { name, "ASRC1L", "ASRC1L" }, \
+ { name, "ASRC1R", "ASRC1R" }, \
+ { name, "ASRC2L", "ASRC2L" }, \
+ { name, "ASRC2R", "ASRC2R" }
+
+static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
+ { "AIF2 Capture", NULL, "DBVDD2" },
+ { "AIF2 Playback", NULL, "DBVDD2" },
+
+ { "AIF3 Capture", NULL, "DBVDD3" },
+ { "AIF3 Playback", NULL, "DBVDD3" },
+
+ { "OUT1L", NULL, "CPVDD" },
+ { "OUT1R", NULL, "CPVDD" },
+ { "OUT2L", NULL, "CPVDD" },
+ { "OUT2R", NULL, "CPVDD" },
+ { "OUT3L", NULL, "CPVDD" },
+
+ { "OUT4L", NULL, "SPKVDDL" },
+ { "OUT4R", NULL, "SPKVDDR" },
+
+ { "OUT1L", NULL, "SYSCLK" },
+ { "OUT1R", NULL, "SYSCLK" },
+ { "OUT2L", NULL, "SYSCLK" },
+ { "OUT2R", NULL, "SYSCLK" },
+ { "OUT3L", NULL, "SYSCLK" },
+ { "OUT4L", NULL, "SYSCLK" },
+ { "OUT4R", NULL, "SYSCLK" },
+ { "OUT5L", NULL, "SYSCLK" },
+ { "OUT5R", NULL, "SYSCLK" },
+ { "OUT6L", NULL, "SYSCLK" },
+ { "OUT6R", NULL, "SYSCLK" },
+
+ { "MICBIAS1", NULL, "MICVDD" },
+ { "MICBIAS2", NULL, "MICVDD" },
+ { "MICBIAS3", NULL, "MICVDD" },
+
+ { "Noise Generator", NULL, "NOISE" },
+ { "Tone Generator 1", NULL, "TONE" },
+ { "Tone Generator 2", NULL, "TONE" },
+
+ { "Mic Mute Mixer", NULL, "Noise Mixer" },
+ { "Mic Mute Mixer", NULL, "Mic Mixer" },
+
+ { "AIF1 Capture", NULL, "AIF1TX1" },
+ { "AIF1 Capture", NULL, "AIF1TX2" },
+ { "AIF1 Capture", NULL, "AIF1TX3" },
+ { "AIF1 Capture", NULL, "AIF1TX4" },
+ { "AIF1 Capture", NULL, "AIF1TX5" },
+ { "AIF1 Capture", NULL, "AIF1TX6" },
+ { "AIF1 Capture", NULL, "AIF1TX7" },
+ { "AIF1 Capture", NULL, "AIF1TX8" },
+
+ { "AIF1RX1", NULL, "AIF1 Playback" },
+ { "AIF1RX2", NULL, "AIF1 Playback" },
+ { "AIF1RX3", NULL, "AIF1 Playback" },
+ { "AIF1RX4", NULL, "AIF1 Playback" },
+ { "AIF1RX5", NULL, "AIF1 Playback" },
+ { "AIF1RX6", NULL, "AIF1 Playback" },
+ { "AIF1RX7", NULL, "AIF1 Playback" },
+ { "AIF1RX8", NULL, "AIF1 Playback" },
+
+ { "AIF2 Capture", NULL, "AIF2TX1" },
+ { "AIF2 Capture", NULL, "AIF2TX2" },
+
+ { "AIF2RX1", NULL, "AIF2 Playback" },
+ { "AIF2RX2", NULL, "AIF2 Playback" },
+
+ { "AIF3 Capture", NULL, "AIF3TX1" },
+ { "AIF3 Capture", NULL, "AIF3TX2" },
+
+ { "AIF3RX1", NULL, "AIF3 Playback" },
+ { "AIF3RX2", NULL, "AIF3 Playback" },
+
+ { "AIF1 Playback", NULL, "SYSCLK" },
+ { "AIF2 Playback", NULL, "SYSCLK" },
+ { "AIF3 Playback", NULL, "SYSCLK" },
+
+ { "AIF1 Capture", NULL, "SYSCLK" },
+ { "AIF2 Capture", NULL, "SYSCLK" },
+ { "AIF3 Capture", NULL, "SYSCLK" },
+
+ ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
+ ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
+ ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
+ ARIZONA_MIXER_ROUTES("OUT2R", "HPOUT2R"),
+ ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"),
+
+ ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUTL"),
+ ARIZONA_MIXER_ROUTES("OUT4R", "SPKOUTR"),
+ ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"),
+ ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"),
+ ARIZONA_MIXER_ROUTES("OUT6L", "SPKDAT2L"),
+ ARIZONA_MIXER_ROUTES("OUT6R", "SPKDAT2R"),
+
+ ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"),
+ ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"),
+
+ ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
+ ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
+ ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
+ ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
+ ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
+ ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
+ ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"),
+ ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"),
+
+ ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"),
+ ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"),
+
+ ARIZONA_MIXER_ROUTES("AIF3TX1", "AIF3TX1"),
+ ARIZONA_MIXER_ROUTES("AIF3TX2", "AIF3TX2"),
+
+ ARIZONA_MIXER_ROUTES("EQ1", "EQ1"),
+ ARIZONA_MIXER_ROUTES("EQ2", "EQ2"),
+ ARIZONA_MIXER_ROUTES("EQ3", "EQ3"),
+ ARIZONA_MIXER_ROUTES("EQ4", "EQ4"),
+
+ ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
+ ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
+ ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"),
+ ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"),
+
+ ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
+ ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
+ ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
+ ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
+
+ ARIZONA_MIXER_ROUTES("ASRC1L", "ASRC1L"),
+ ARIZONA_MIXER_ROUTES("ASRC1R", "ASRC1R"),
+ ARIZONA_MIXER_ROUTES("ASRC2L", "ASRC2L"),
+ ARIZONA_MIXER_ROUTES("ASRC2R", "ASRC2R"),
+
+ { "HPOUT1L", NULL, "OUT1L" },
+ { "HPOUT1R", NULL, "OUT1R" },
+
+ { "HPOUT2L", NULL, "OUT2L" },
+ { "HPOUT2R", NULL, "OUT2R" },
+
+ { "EPOUTN", NULL, "OUT3L" },
+ { "EPOUTP", NULL, "OUT3L" },
+
+ { "SPKOUTLN", NULL, "OUT4L" },
+ { "SPKOUTLP", NULL, "OUT4L" },
+
+ { "SPKOUTRN", NULL, "OUT4R" },
+ { "SPKOUTRP", NULL, "OUT4R" },
+
+ { "SPKDAT1L", NULL, "OUT5L" },
+ { "SPKDAT1R", NULL, "OUT5R" },
+
+ { "SPKDAT2L", NULL, "OUT6L" },
+ { "SPKDAT2R", NULL, "OUT6R" },
+};
+
+static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct wm5110_priv *wm5110 = snd_soc_codec_get_drvdata(codec);
+
+ switch (fll_id) {
+ case WM5110_FLL1:
+ return arizona_set_fll(&wm5110->fll[0], source, Fref, Fout);
+ case WM5110_FLL2:
+ return arizona_set_fll(&wm5110->fll[1], source, Fref, Fout);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define WM5110_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM5110_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wm5110_dai[] = {
+ {
+ .name = "wm5110-aif1",
+ .id = 1,
+ .base = ARIZONA_AIF1_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm5110-aif2",
+ .id = 2,
+ .base = ARIZONA_AIF2_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm5110-aif3",
+ .id = 3,
+ .base = ARIZONA_AIF3_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM5110_RATES,
+ .formats = WM5110_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+};
+
+static int wm5110_codec_probe(struct snd_soc_codec *codec)
+{
+ struct wm5110_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ codec->control_data = priv->core.arizona->regmap;
+ return snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+}
+
+#define WM5110_DIG_VU 0x0200
+
+static unsigned int wm5110_digital_vu[] = {
+ ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_ADC_DIGITAL_VOLUME_1R,
+ ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_ADC_DIGITAL_VOLUME_2R,
+ ARIZONA_ADC_DIGITAL_VOLUME_3L,
+ ARIZONA_ADC_DIGITAL_VOLUME_3R,
+
+ ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R,
+ ARIZONA_DAC_DIGITAL_VOLUME_2L,
+ ARIZONA_DAC_DIGITAL_VOLUME_2R,
+ ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_DAC_DIGITAL_VOLUME_3R,
+ ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4R,
+ ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm5110 = {
+ .probe = wm5110_codec_probe,
+
+ .idle_bias_off = true,
+
+ .set_sysclk = arizona_set_sysclk,
+ .set_pll = wm5110_set_fll,
+
+ .controls = wm5110_snd_controls,
+ .num_controls = ARRAY_SIZE(wm5110_snd_controls),
+ .dapm_widgets = wm5110_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm5110_dapm_widgets),
+ .dapm_routes = wm5110_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm5110_dapm_routes),
+};
+
+static int __devinit wm5110_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct wm5110_priv *wm5110;
+ int i;
+
+ wm5110 = devm_kzalloc(&pdev->dev, sizeof(struct wm5110_priv),
+ GFP_KERNEL);
+ if (wm5110 == NULL)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, wm5110);
+
+ wm5110->core.arizona = arizona;
+
+ for (i = 0; i < ARRAY_SIZE(wm5110->fll); i++)
+ wm5110->fll[i].vco_mult = 3;
+
+ arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK,
+ &wm5110->fll[0]);
+ arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK,
+ &wm5110->fll[1]);
+
+ for (i = 0; i < ARRAY_SIZE(wm5110_dai); i++)
+ arizona_init_dai(&wm5110->core, i);
+
+ /* Latch volume update bits */
+ for (i = 0; i < ARRAY_SIZE(wm5110_digital_vu); i++)
+ regmap_update_bits(arizona->regmap, wm5110_digital_vu[i],
+ WM5110_DIG_VU, WM5110_DIG_VU);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5110,
+ wm5110_dai, ARRAY_SIZE(wm5110_dai));
+}
+
+static int __devexit wm5110_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver wm5110_codec_driver = {
+ .driver = {
+ .name = "wm5110-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm5110_probe,
+ .remove = __devexit_p(wm5110_remove),
+};
+
+module_platform_driver(wm5110_codec_driver);
+
+MODULE_DESCRIPTION("ASoC WM5110 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm5110-codec");
diff --git a/sound/soc/codecs/wm5110.h b/sound/soc/codecs/wm5110.h
new file mode 100644
index 000000000000..75e9351ccab0
--- /dev/null
+++ b/sound/soc/codecs/wm5110.h
@@ -0,0 +1,21 @@
+/*
+ * wm5110.h -- WM5110 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM5110_H
+#define _WM5110_H
+
+#include "arizona.h"
+
+#define WM5110_FLL1 1
+#define WM5110_FLL2 2
+
+#endif
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 555ee146ae0d..d26c8ae4e6d9 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1,7 +1,7 @@
/*
* wm8350.c -- WM8350 ALSA SoC audio driver
*
- * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright (C) 2007-12 Wolfson Microelectronics PLC.
*
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
@@ -71,20 +71,6 @@ struct wm8350_data {
int fll_freq_in;
};
-static unsigned int wm8350_codec_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- struct wm8350 *wm8350 = codec->control_data;
- return wm8350_reg_read(wm8350, reg);
-}
-
-static int wm8350_codec_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- struct wm8350 *wm8350 = codec->control_data;
- return wm8350_reg_write(wm8350, reg, value);
-}
-
/*
* Ramp OUT1 PGA volume to minimise pops at stream startup and shutdown.
*/
@@ -1519,7 +1505,9 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
if (ret != 0)
return ret;
- codec->control_data = wm8350;
+ codec->control_data = wm8350->regmap;
+
+ snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP);
/* Put the codec into reset if it wasn't already */
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_CODEC_ENA);
@@ -1629,8 +1617,6 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8350 = {
.remove = wm8350_codec_remove,
.suspend = wm8350_suspend,
.resume = wm8350_resume,
- .read = wm8350_codec_read,
- .write = wm8350_codec_write,
.set_bias_level = wm8350_set_bias_level,
.controls = wm8350_snd_controls,
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 5dc31ebcd0e7..5d277a915f81 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -1,7 +1,7 @@
/*
* wm8400.c -- WM8400 ALSA Soc Audio driver
*
- * Copyright 2008, 2009 Wolfson Microelectronics PLC.
+ * Copyright 2008-11 Wolfson Microelectronics PLC.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c
index 211285164d70..7c68226376e4 100644
--- a/sound/soc/codecs/wm8580.c
+++ b/sound/soc/codecs/wm8580.c
@@ -1,7 +1,7 @@
/*
* wm8580.c -- WM8580 ALSA Soc Audio driver
*
- * Copyright 2008, 2009 Wolfson Microelectronics PLC.
+ * Copyright 2008-11 Wolfson Microelectronics PLC.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 9d1b9b0271f1..bb1d26919b10 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -2,6 +2,7 @@
* wm8731.c -- WM8731 ALSA SoC Audio driver
*
* Copyright 2005 Openedhand Ltd.
+ * Copyright 2006-12 Wolfson Microelectronics, plc
*
* Author: Richard Purdie <richard@openedhand.com>
*
diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 6e849cb04243..35f3d23200e0 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -1,7 +1,7 @@
/*
* wm8741.c -- WM8741 ALSA SoC Audio driver
*
- * Copyright 2010 Wolfson Microelectronics plc
+ * Copyright 2010-1 Wolfson Microelectronics plc
*
* Author: Ian Lartey <ian@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index a26482cd7654..13bff87ddcf5 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1,7 +1,7 @@
/*
* wm8753.c -- WM8753 ALSA Soc Audio driver
*
- * Copyright 2003 Wolfson Microelectronics PLC.
+ * Copyright 2003-11 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index a19db5a0a17a..879c356a9045 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -1,7 +1,7 @@
/*
* wm8776.c -- WM8776 ALSA SoC Audio driver
*
- * Copyright 2009 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 6bd1b767b138..c088020172ab 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -1,7 +1,7 @@
/*
* wm8804.c -- WM8804 S/PDIF transceiver driver
*
- * Copyright 2010 Wolfson Microelectronics plc
+ * Copyright 2010-11 Wolfson Microelectronics plc
*
* Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 86b8a2926591..73f1c8d7bafb 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1,8 +1,8 @@
/*
* wm8903.c -- WM8903 ALSA SoC Audio driver
*
- * Copyright 2008 Wolfson Microelectronics
- * Copyright 2011 NVIDIA, Inc.
+ * Copyright 2008-12 Wolfson Microelectronics
+ * Copyright 2011-2012 NVIDIA, Inc.
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -116,6 +116,7 @@ static const struct reg_default wm8903_reg_defaults[] = {
struct wm8903_priv {
struct wm8903_platform_data *pdata;
+ struct device *dev;
struct snd_soc_codec *codec;
struct regmap *regmap;
@@ -1635,17 +1636,27 @@ EXPORT_SYMBOL_GPL(wm8903_mic_detect);
static irqreturn_t wm8903_irq(int irq, void *data)
{
- struct snd_soc_codec *codec = data;
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
- int mic_report;
- int int_pol;
- int int_val = 0;
- int mask = ~snd_soc_read(codec, WM8903_INTERRUPT_STATUS_1_MASK);
+ struct wm8903_priv *wm8903 = data;
+ int mic_report, ret;
+ unsigned int int_val, mask, int_pol;
- int_val = snd_soc_read(codec, WM8903_INTERRUPT_STATUS_1) & mask;
+ ret = regmap_read(wm8903->regmap, WM8903_INTERRUPT_STATUS_1_MASK,
+ &mask);
+ if (ret != 0) {
+ dev_err(wm8903->dev, "Failed to read IRQ mask: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ ret = regmap_read(wm8903->regmap, WM8903_INTERRUPT_STATUS_1, &int_val);
+ if (ret != 0) {
+ dev_err(wm8903->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ int_val &= ~mask;
if (int_val & WM8903_WSEQ_BUSY_EINT) {
- dev_warn(codec->dev, "Write sequencer done\n");
+ dev_warn(wm8903->dev, "Write sequencer done\n");
}
/*
@@ -1656,22 +1667,28 @@ static irqreturn_t wm8903_irq(int irq, void *data)
* the polarity register.
*/
mic_report = wm8903->mic_last_report;
- int_pol = snd_soc_read(codec, WM8903_INTERRUPT_POLARITY_1);
+ ret = regmap_read(wm8903->regmap, WM8903_INTERRUPT_POLARITY_1,
+ &int_pol);
+ if (ret != 0) {
+ dev_err(wm8903->dev, "Failed to read interrupt polarity: %d\n",
+ ret);
+ return IRQ_HANDLED;
+ }
#ifndef CONFIG_SND_SOC_WM8903_MODULE
if (int_val & (WM8903_MICSHRT_EINT | WM8903_MICDET_EINT))
- trace_snd_soc_jack_irq(dev_name(codec->dev));
+ trace_snd_soc_jack_irq(dev_name(wm8903->dev));
#endif
if (int_val & WM8903_MICSHRT_EINT) {
- dev_dbg(codec->dev, "Microphone short (pol=%x)\n", int_pol);
+ dev_dbg(wm8903->dev, "Microphone short (pol=%x)\n", int_pol);
mic_report ^= wm8903->mic_short;
int_pol ^= WM8903_MICSHRT_INV;
}
if (int_val & WM8903_MICDET_EINT) {
- dev_dbg(codec->dev, "Microphone detect (pol=%x)\n", int_pol);
+ dev_dbg(wm8903->dev, "Microphone detect (pol=%x)\n", int_pol);
mic_report ^= wm8903->mic_det;
int_pol ^= WM8903_MICDET_INV;
@@ -1679,8 +1696,8 @@ static irqreturn_t wm8903_irq(int irq, void *data)
msleep(wm8903->mic_delay);
}
- snd_soc_update_bits(codec, WM8903_INTERRUPT_POLARITY_1,
- WM8903_MICSHRT_INV | WM8903_MICDET_INV, int_pol);
+ regmap_update_bits(wm8903->regmap, WM8903_INTERRUPT_POLARITY_1,
+ WM8903_MICSHRT_INV | WM8903_MICDET_INV, int_pol);
snd_soc_jack_report(wm8903->mic_jack, mic_report,
wm8903->mic_short | wm8903->mic_det);
@@ -1774,7 +1791,6 @@ static int wm8903_gpio_request(struct gpio_chip *chip, unsigned offset)
static int wm8903_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
{
struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
- struct snd_soc_codec *codec = wm8903->codec;
unsigned int mask, val;
int ret;
@@ -1782,8 +1798,8 @@ static int wm8903_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
val = (WM8903_GPn_FN_GPIO_INPUT << WM8903_GP1_FN_SHIFT) |
WM8903_GP1_DIR;
- ret = snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
- mask, val);
+ ret = regmap_update_bits(wm8903->regmap,
+ WM8903_GPIO_CONTROL_1 + offset, mask, val);
if (ret < 0)
return ret;
@@ -1793,10 +1809,9 @@ static int wm8903_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
static int wm8903_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
- struct snd_soc_codec *codec = wm8903->codec;
- int reg;
+ unsigned int reg;
- reg = snd_soc_read(codec, WM8903_GPIO_CONTROL_1 + offset);
+ regmap_read(wm8903->regmap, WM8903_GPIO_CONTROL_1 + offset, &reg);
return (reg & WM8903_GP1_LVL_MASK) >> WM8903_GP1_LVL_SHIFT;
}
@@ -1805,7 +1820,6 @@ static int wm8903_gpio_direction_out(struct gpio_chip *chip,
unsigned offset, int value)
{
struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
- struct snd_soc_codec *codec = wm8903->codec;
unsigned int mask, val;
int ret;
@@ -1813,8 +1827,8 @@ static int wm8903_gpio_direction_out(struct gpio_chip *chip,
val = (WM8903_GPn_FN_GPIO_OUTPUT << WM8903_GP1_FN_SHIFT) |
(value << WM8903_GP2_LVL_SHIFT);
- ret = snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
- mask, val);
+ ret = regmap_update_bits(wm8903->regmap,
+ WM8903_GPIO_CONTROL_1 + offset, mask, val);
if (ret < 0)
return ret;
@@ -1824,11 +1838,10 @@ static int wm8903_gpio_direction_out(struct gpio_chip *chip,
static void wm8903_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct wm8903_priv *wm8903 = gpio_to_wm8903(chip);
- struct snd_soc_codec *codec = wm8903->codec;
- snd_soc_update_bits(codec, WM8903_GPIO_CONTROL_1 + offset,
- WM8903_GP1_LVL_MASK,
- !!value << WM8903_GP1_LVL_SHIFT);
+ regmap_update_bits(wm8903->regmap, WM8903_GPIO_CONTROL_1 + offset,
+ WM8903_GP1_LVL_MASK,
+ !!value << WM8903_GP1_LVL_SHIFT);
}
static struct gpio_chip wm8903_template_chip = {
@@ -1842,15 +1855,14 @@ static struct gpio_chip wm8903_template_chip = {
.can_sleep = 1,
};
-static void wm8903_init_gpio(struct snd_soc_codec *codec)
+static void wm8903_init_gpio(struct wm8903_priv *wm8903)
{
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
struct wm8903_platform_data *pdata = wm8903->pdata;
int ret;
wm8903->gpio_chip = wm8903_template_chip;
wm8903->gpio_chip.ngpio = WM8903_NUM_GPIO;
- wm8903->gpio_chip.dev = codec->dev;
+ wm8903->gpio_chip.dev = wm8903->dev;
if (pdata->gpio_base)
wm8903->gpio_chip.base = pdata->gpio_base;
@@ -1859,24 +1871,23 @@ static void wm8903_init_gpio(struct snd_soc_codec *codec)
ret = gpiochip_add(&wm8903->gpio_chip);
if (ret != 0)
- dev_err(codec->dev, "Failed to add GPIOs: %d\n", ret);
+ dev_err(wm8903->dev, "Failed to add GPIOs: %d\n", ret);
}
-static void wm8903_free_gpio(struct snd_soc_codec *codec)
+static void wm8903_free_gpio(struct wm8903_priv *wm8903)
{
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
int ret;
ret = gpiochip_remove(&wm8903->gpio_chip);
if (ret != 0)
- dev_err(codec->dev, "Failed to remove GPIOs: %d\n", ret);
+ dev_err(wm8903->dev, "Failed to remove GPIOs: %d\n", ret);
}
#else
-static void wm8903_init_gpio(struct snd_soc_codec *codec)
+static void wm8903_init_gpio(struct wm8903_priv *wm8903)
{
}
-static void wm8903_free_gpio(struct snd_soc_codec *codec)
+static void wm8903_free_gpio(struct wm8903_priv *wm8903)
{
}
#endif
@@ -1884,11 +1895,7 @@ static void wm8903_free_gpio(struct snd_soc_codec *codec)
static int wm8903_probe(struct snd_soc_codec *codec)
{
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
- struct wm8903_platform_data *pdata = wm8903->pdata;
- int ret, i;
- int trigger, irq_pol;
- u16 val;
- bool mic_gpio = false;
+ int ret;
wm8903->codec = codec;
codec->control_data = wm8903->regmap;
@@ -1899,121 +1906,16 @@ static int wm8903_probe(struct snd_soc_codec *codec)
return ret;
}
- /* Set up GPIOs, detect if any are MIC detect outputs */
- for (i = 0; i < ARRAY_SIZE(pdata->gpio_cfg); i++) {
- if ((!pdata->gpio_cfg[i]) ||
- (pdata->gpio_cfg[i] > WM8903_GPIO_CONFIG_ZERO))
- continue;
-
- snd_soc_write(codec, WM8903_GPIO_CONTROL_1 + i,
- pdata->gpio_cfg[i] & 0x7fff);
-
- val = (pdata->gpio_cfg[i] & WM8903_GP1_FN_MASK)
- >> WM8903_GP1_FN_SHIFT;
-
- switch (val) {
- case WM8903_GPn_FN_MICBIAS_CURRENT_DETECT:
- case WM8903_GPn_FN_MICBIAS_SHORT_DETECT:
- mic_gpio = true;
- break;
- default:
- break;
- }
- }
-
- /* Set up microphone detection */
- snd_soc_write(codec, WM8903_MIC_BIAS_CONTROL_0,
- pdata->micdet_cfg);
-
- /* Microphone detection needs the WSEQ clock */
- if (pdata->micdet_cfg)
- snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
- WM8903_WSEQ_ENA, WM8903_WSEQ_ENA);
-
- /* If microphone detection is enabled by pdata but
- * detected via IRQ then interrupts can be lost before
- * the machine driver has set up microphone detection
- * IRQs as the IRQs are clear on read. The detection
- * will be enabled when the machine driver configures.
- */
- WARN_ON(!mic_gpio && (pdata->micdet_cfg & WM8903_MICDET_ENA));
-
- wm8903->mic_delay = pdata->micdet_delay;
-
- if (wm8903->irq) {
- if (pdata->irq_active_low) {
- trigger = IRQF_TRIGGER_LOW;
- irq_pol = WM8903_IRQ_POL;
- } else {
- trigger = IRQF_TRIGGER_HIGH;
- irq_pol = 0;
- }
-
- snd_soc_update_bits(codec, WM8903_INTERRUPT_CONTROL,
- WM8903_IRQ_POL, irq_pol);
-
- ret = request_threaded_irq(wm8903->irq, NULL, wm8903_irq,
- trigger | IRQF_ONESHOT,
- "wm8903", codec);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request IRQ: %d\n",
- ret);
- return ret;
- }
-
- /* Enable write sequencer interrupts */
- snd_soc_update_bits(codec, WM8903_INTERRUPT_STATUS_1_MASK,
- WM8903_IM_WSEQ_BUSY_EINT, 0);
- }
-
/* power on device */
wm8903_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- /* Latch volume update bits */
- val = snd_soc_read(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT);
- val |= WM8903_ADCVU;
- snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_LEFT, val);
- snd_soc_write(codec, WM8903_ADC_DIGITAL_VOLUME_RIGHT, val);
-
- val = snd_soc_read(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT);
- val |= WM8903_DACVU;
- snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_LEFT, val);
- snd_soc_write(codec, WM8903_DAC_DIGITAL_VOLUME_RIGHT, val);
-
- val = snd_soc_read(codec, WM8903_ANALOGUE_OUT1_LEFT);
- val |= WM8903_HPOUTVU;
- snd_soc_write(codec, WM8903_ANALOGUE_OUT1_LEFT, val);
- snd_soc_write(codec, WM8903_ANALOGUE_OUT1_RIGHT, val);
-
- val = snd_soc_read(codec, WM8903_ANALOGUE_OUT2_LEFT);
- val |= WM8903_LINEOUTVU;
- snd_soc_write(codec, WM8903_ANALOGUE_OUT2_LEFT, val);
- snd_soc_write(codec, WM8903_ANALOGUE_OUT2_RIGHT, val);
-
- val = snd_soc_read(codec, WM8903_ANALOGUE_OUT3_LEFT);
- val |= WM8903_SPKVU;
- snd_soc_write(codec, WM8903_ANALOGUE_OUT3_LEFT, val);
- snd_soc_write(codec, WM8903_ANALOGUE_OUT3_RIGHT, val);
-
- /* Enable DAC soft mute by default */
- snd_soc_update_bits(codec, WM8903_DAC_DIGITAL_1,
- WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE,
- WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE);
-
- wm8903_init_gpio(codec);
-
return ret;
}
/* power down chip */
static int wm8903_remove(struct snd_soc_codec *codec)
{
- struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
-
- wm8903_free_gpio(codec);
wm8903_set_bias_level(codec, SND_SOC_BIAS_OFF);
- if (wm8903->irq)
- free_irq(wm8903->irq, codec);
return 0;
}
@@ -2123,15 +2025,18 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
{
struct wm8903_platform_data *pdata = dev_get_platdata(&i2c->dev);
struct wm8903_priv *wm8903;
- unsigned int val;
- int ret;
+ int trigger;
+ bool mic_gpio = false;
+ unsigned int val, irq_pol;
+ int ret, i;
wm8903 = devm_kzalloc(&i2c->dev, sizeof(struct wm8903_priv),
GFP_KERNEL);
if (wm8903 == NULL)
return -ENOMEM;
+ wm8903->dev = &i2c->dev;
- wm8903->regmap = regmap_init_i2c(i2c, &wm8903_regmap);
+ wm8903->regmap = devm_regmap_init_i2c(i2c, &wm8903_regmap);
if (IS_ERR(wm8903->regmap)) {
ret = PTR_ERR(wm8903->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -2140,7 +2045,6 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
}
i2c_set_clientdata(i2c, wm8903);
- wm8903->irq = i2c->irq;
/* If no platform data was supplied, create storage for defaults */
if (pdata) {
@@ -2167,6 +2071,8 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
}
}
+ pdata = wm8903->pdata;
+
ret = regmap_read(wm8903->regmap, WM8903_SW_RESET_AND_ID, &val);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to read chip ID: %d\n", ret);
@@ -2189,6 +2095,107 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
/* Reset the device */
regmap_write(wm8903->regmap, WM8903_SW_RESET_AND_ID, 0x8903);
+ wm8903_init_gpio(wm8903);
+
+ /* Set up GPIO pin state, detect if any are MIC detect outputs */
+ for (i = 0; i < ARRAY_SIZE(pdata->gpio_cfg); i++) {
+ if ((!pdata->gpio_cfg[i]) ||
+ (pdata->gpio_cfg[i] > WM8903_GPIO_CONFIG_ZERO))
+ continue;
+
+ regmap_write(wm8903->regmap, WM8903_GPIO_CONTROL_1 + i,
+ pdata->gpio_cfg[i] & 0x7fff);
+
+ val = (pdata->gpio_cfg[i] & WM8903_GP1_FN_MASK)
+ >> WM8903_GP1_FN_SHIFT;
+
+ switch (val) {
+ case WM8903_GPn_FN_MICBIAS_CURRENT_DETECT:
+ case WM8903_GPn_FN_MICBIAS_SHORT_DETECT:
+ mic_gpio = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Set up microphone detection */
+ regmap_write(wm8903->regmap, WM8903_MIC_BIAS_CONTROL_0,
+ pdata->micdet_cfg);
+
+ /* Microphone detection needs the WSEQ clock */
+ if (pdata->micdet_cfg)
+ regmap_update_bits(wm8903->regmap, WM8903_WRITE_SEQUENCER_0,
+ WM8903_WSEQ_ENA, WM8903_WSEQ_ENA);
+
+ /* If microphone detection is enabled by pdata but
+ * detected via IRQ then interrupts can be lost before
+ * the machine driver has set up microphone detection
+ * IRQs as the IRQs are clear on read. The detection
+ * will be enabled when the machine driver configures.
+ */
+ WARN_ON(!mic_gpio && (pdata->micdet_cfg & WM8903_MICDET_ENA));
+
+ wm8903->mic_delay = pdata->micdet_delay;
+
+ if (i2c->irq) {
+ if (pdata->irq_active_low) {
+ trigger = IRQF_TRIGGER_LOW;
+ irq_pol = WM8903_IRQ_POL;
+ } else {
+ trigger = IRQF_TRIGGER_HIGH;
+ irq_pol = 0;
+ }
+
+ regmap_update_bits(wm8903->regmap, WM8903_INTERRUPT_CONTROL,
+ WM8903_IRQ_POL, irq_pol);
+
+ ret = request_threaded_irq(i2c->irq, NULL, wm8903_irq,
+ trigger | IRQF_ONESHOT,
+ "wm8903", wm8903);
+ if (ret != 0) {
+ dev_err(wm8903->dev, "Failed to request IRQ: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Enable write sequencer interrupts */
+ regmap_update_bits(wm8903->regmap,
+ WM8903_INTERRUPT_STATUS_1_MASK,
+ WM8903_IM_WSEQ_BUSY_EINT, 0);
+ }
+
+ /* Latch volume update bits */
+ regmap_update_bits(wm8903->regmap, WM8903_ADC_DIGITAL_VOLUME_LEFT,
+ WM8903_ADCVU, WM8903_ADCVU);
+ regmap_update_bits(wm8903->regmap, WM8903_ADC_DIGITAL_VOLUME_RIGHT,
+ WM8903_ADCVU, WM8903_ADCVU);
+
+ regmap_update_bits(wm8903->regmap, WM8903_DAC_DIGITAL_VOLUME_LEFT,
+ WM8903_DACVU, WM8903_DACVU);
+ regmap_update_bits(wm8903->regmap, WM8903_DAC_DIGITAL_VOLUME_RIGHT,
+ WM8903_DACVU, WM8903_DACVU);
+
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT1_LEFT,
+ WM8903_HPOUTVU, WM8903_HPOUTVU);
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT1_RIGHT,
+ WM8903_HPOUTVU, WM8903_HPOUTVU);
+
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT2_LEFT,
+ WM8903_LINEOUTVU, WM8903_LINEOUTVU);
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT2_RIGHT,
+ WM8903_LINEOUTVU, WM8903_LINEOUTVU);
+
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT3_LEFT,
+ WM8903_SPKVU, WM8903_SPKVU);
+ regmap_update_bits(wm8903->regmap, WM8903_ANALOGUE_OUT3_RIGHT,
+ WM8903_SPKVU, WM8903_SPKVU);
+
+ /* Enable DAC soft mute by default */
+ regmap_update_bits(wm8903->regmap, WM8903_DAC_DIGITAL_1,
+ WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE,
+ WM8903_DAC_MUTEMODE | WM8903_DAC_MUTE);
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8903, &wm8903_dai, 1);
if (ret != 0)
@@ -2196,7 +2203,6 @@ static __devinit int wm8903_i2c_probe(struct i2c_client *i2c,
return 0;
err:
- regmap_exit(wm8903->regmap);
return ret;
}
@@ -2204,7 +2210,9 @@ static __devexit int wm8903_i2c_remove(struct i2c_client *client)
{
struct wm8903_priv *wm8903 = i2c_get_clientdata(client);
- regmap_exit(wm8903->regmap);
+ if (client->irq)
+ free_irq(client->irq, wm8903);
+ wm8903_free_gpio(wm8903);
snd_soc_unregister_codec(&client->dev);
return 0;
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 812acd83fb48..0013afe48e66 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1,7 +1,7 @@
/*
* wm8904.c -- WM8904 ALSA SoC Audio driver
*
- * Copyright 2009 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -314,11 +314,6 @@ static bool wm8904_readable_register(struct device *dev, unsigned int reg)
}
}
-static int wm8904_reset(struct snd_soc_codec *codec)
-{
- return snd_soc_write(codec, WM8904_SW_RESET_AND_ID, 0);
-}
-
static int wm8904_configure_clocking(struct snd_soc_codec *codec)
{
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
@@ -1945,25 +1940,6 @@ static struct snd_soc_dai_driver wm8904_dai = {
.symmetric_rates = 1,
};
-#ifdef CONFIG_PM
-static int wm8904_suspend(struct snd_soc_codec *codec)
-{
- wm8904_set_bias_level(codec, SND_SOC_BIAS_OFF);
-
- return 0;
-}
-
-static int wm8904_resume(struct snd_soc_codec *codec)
-{
- wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- return 0;
-}
-#else
-#define wm8904_suspend NULL
-#define wm8904_resume NULL
-#endif
-
static void wm8904_handle_retune_mobile_pdata(struct snd_soc_codec *codec)
{
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
@@ -2078,8 +2054,7 @@ static void wm8904_handle_pdata(struct snd_soc_codec *codec)
static int wm8904_probe(struct snd_soc_codec *codec)
{
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
- struct wm8904_pdata *pdata = wm8904->pdata;
- int ret, i;
+ int ret;
codec->control_data = wm8904->regmap;
@@ -2101,127 +2076,17 @@ static int wm8904_probe(struct snd_soc_codec *codec)
return ret;
}
- for (i = 0; i < ARRAY_SIZE(wm8904->supplies); i++)
- wm8904->supplies[i].supply = wm8904_supply_names[i];
-
- ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8904->supplies),
- wm8904->supplies);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
- return ret;
- }
-
- ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
- wm8904->supplies);
- if (ret != 0) {
- dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
- goto err_get;
- }
-
- ret = snd_soc_read(codec, WM8904_SW_RESET_AND_ID);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to read ID register\n");
- goto err_enable;
- }
- if (ret != 0x8904) {
- dev_err(codec->dev, "Device is not a WM8904, ID is %x\n", ret);
- ret = -EINVAL;
- goto err_enable;
- }
-
- ret = snd_soc_read(codec, WM8904_REVISION);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to read device revision: %d\n",
- ret);
- goto err_enable;
- }
- dev_info(codec->dev, "revision %c\n", ret + 'A');
-
- ret = wm8904_reset(codec);
- if (ret < 0) {
- dev_err(codec->dev, "Failed to issue reset\n");
- goto err_enable;
- }
-
- regcache_cache_only(wm8904->regmap, true);
- /* Change some default settings - latch VU and enable ZC */
- snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
- WM8904_ADC_VU, WM8904_ADC_VU);
- snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_RIGHT,
- WM8904_ADC_VU, WM8904_ADC_VU);
- snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_VOLUME_LEFT,
- WM8904_DAC_VU, WM8904_DAC_VU);
- snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_VOLUME_RIGHT,
- WM8904_DAC_VU, WM8904_DAC_VU);
- snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT1_LEFT,
- WM8904_HPOUT_VU | WM8904_HPOUTLZC,
- WM8904_HPOUT_VU | WM8904_HPOUTLZC);
- snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT1_RIGHT,
- WM8904_HPOUT_VU | WM8904_HPOUTRZC,
- WM8904_HPOUT_VU | WM8904_HPOUTRZC);
- snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT2_LEFT,
- WM8904_LINEOUT_VU | WM8904_LINEOUTLZC,
- WM8904_LINEOUT_VU | WM8904_LINEOUTLZC);
- snd_soc_update_bits(codec, WM8904_ANALOGUE_OUT2_RIGHT,
- WM8904_LINEOUT_VU | WM8904_LINEOUTRZC,
- WM8904_LINEOUT_VU | WM8904_LINEOUTRZC);
- snd_soc_update_bits(codec, WM8904_CLOCK_RATES_0,
- WM8904_SR_MODE, 0);
-
- /* Apply configuration from the platform data. */
- if (wm8904->pdata) {
- for (i = 0; i < WM8904_GPIO_REGS; i++) {
- if (!pdata->gpio_cfg[i])
- continue;
-
- regmap_update_bits(wm8904->regmap,
- WM8904_GPIO_CONTROL_1 + i,
- 0xffff,
- pdata->gpio_cfg[i]);
- }
-
- /* Zero is the default value for these anyway */
- for (i = 0; i < WM8904_MIC_REGS; i++)
- regmap_update_bits(wm8904->regmap,
- WM8904_MIC_BIAS_CONTROL_0 + i,
- 0xffff,
- pdata->mic_cfg[i]);
- }
-
- /* Set Class W by default - this will be managed by the Class
- * G widget at runtime where bypass paths are available.
- */
- snd_soc_update_bits(codec, WM8904_CLASS_W_0,
- WM8904_CP_DYN_PWR, WM8904_CP_DYN_PWR);
-
- /* Use normal bias source */
- snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
- WM8904_POBCTRL, 0);
-
- wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
-
- /* Bias level configuration will have done an extra enable */
- regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
-
wm8904_handle_pdata(codec);
wm8904_add_widgets(codec);
return 0;
-
-err_enable:
- regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
-err_get:
- regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
- return ret;
}
static int wm8904_remove(struct snd_soc_codec *codec)
{
struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
- wm8904_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
kfree(wm8904->retune_mobile_texts);
kfree(wm8904->drc_texts);
@@ -2231,8 +2096,6 @@ static int wm8904_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_wm8904 = {
.probe = wm8904_probe,
.remove = wm8904_remove,
- .suspend = wm8904_suspend,
- .resume = wm8904_resume,
.set_bias_level = wm8904_set_bias_level,
.idle_bias_off = true,
};
@@ -2254,14 +2117,15 @@ static __devinit int wm8904_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8904_priv *wm8904;
- int ret;
+ unsigned int val;
+ int ret, i;
wm8904 = devm_kzalloc(&i2c->dev, sizeof(struct wm8904_priv),
GFP_KERNEL);
if (wm8904 == NULL)
return -ENOMEM;
- wm8904->regmap = regmap_init_i2c(i2c, &wm8904_regmap);
+ wm8904->regmap = devm_regmap_init_i2c(i2c, &wm8904_regmap);
if (IS_ERR(wm8904->regmap)) {
ret = PTR_ERR(wm8904->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
@@ -2273,23 +2137,121 @@ static __devinit int wm8904_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, wm8904);
wm8904->pdata = i2c->dev.platform_data;
+ for (i = 0; i < ARRAY_SIZE(wm8904->supplies); i++)
+ wm8904->supplies[i].supply = wm8904_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to enable supplies: %d\n", ret);
+ return ret;
+ }
+
+ ret = regmap_read(wm8904->regmap, WM8904_SW_RESET_AND_ID, &val);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read ID register: %d\n", ret);
+ goto err_enable;
+ }
+ if (val != 0x8904) {
+ dev_err(&i2c->dev, "Device is not a WM8904, ID is %x\n", val);
+ ret = -EINVAL;
+ goto err_enable;
+ }
+
+ ret = regmap_read(wm8904->regmap, WM8904_REVISION, &val);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to read device revision: %d\n",
+ ret);
+ goto err_enable;
+ }
+ dev_info(&i2c->dev, "revision %c\n", val + 'A');
+
+ ret = regmap_write(wm8904->regmap, WM8904_SW_RESET_AND_ID, 0);
+ if (ret < 0) {
+ dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
+ goto err_enable;
+ }
+
+ /* Change some default settings - latch VU and enable ZC */
+ regmap_update_bits(wm8904->regmap, WM8904_ADC_DIGITAL_VOLUME_LEFT,
+ WM8904_ADC_VU, WM8904_ADC_VU);
+ regmap_update_bits(wm8904->regmap, WM8904_ADC_DIGITAL_VOLUME_RIGHT,
+ WM8904_ADC_VU, WM8904_ADC_VU);
+ regmap_update_bits(wm8904->regmap, WM8904_DAC_DIGITAL_VOLUME_LEFT,
+ WM8904_DAC_VU, WM8904_DAC_VU);
+ regmap_update_bits(wm8904->regmap, WM8904_DAC_DIGITAL_VOLUME_RIGHT,
+ WM8904_DAC_VU, WM8904_DAC_VU);
+ regmap_update_bits(wm8904->regmap, WM8904_ANALOGUE_OUT1_LEFT,
+ WM8904_HPOUT_VU | WM8904_HPOUTLZC,
+ WM8904_HPOUT_VU | WM8904_HPOUTLZC);
+ regmap_update_bits(wm8904->regmap, WM8904_ANALOGUE_OUT1_RIGHT,
+ WM8904_HPOUT_VU | WM8904_HPOUTRZC,
+ WM8904_HPOUT_VU | WM8904_HPOUTRZC);
+ regmap_update_bits(wm8904->regmap, WM8904_ANALOGUE_OUT2_LEFT,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTLZC,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTLZC);
+ regmap_update_bits(wm8904->regmap, WM8904_ANALOGUE_OUT2_RIGHT,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTRZC,
+ WM8904_LINEOUT_VU | WM8904_LINEOUTRZC);
+ regmap_update_bits(wm8904->regmap, WM8904_CLOCK_RATES_0,
+ WM8904_SR_MODE, 0);
+
+ /* Apply configuration from the platform data. */
+ if (wm8904->pdata) {
+ for (i = 0; i < WM8904_GPIO_REGS; i++) {
+ if (!wm8904->pdata->gpio_cfg[i])
+ continue;
+
+ regmap_update_bits(wm8904->regmap,
+ WM8904_GPIO_CONTROL_1 + i,
+ 0xffff,
+ wm8904->pdata->gpio_cfg[i]);
+ }
+
+ /* Zero is the default value for these anyway */
+ for (i = 0; i < WM8904_MIC_REGS; i++)
+ regmap_update_bits(wm8904->regmap,
+ WM8904_MIC_BIAS_CONTROL_0 + i,
+ 0xffff,
+ wm8904->pdata->mic_cfg[i]);
+ }
+
+ /* Set Class W by default - this will be managed by the Class
+ * G widget at runtime where bypass paths are available.
+ */
+ regmap_update_bits(wm8904->regmap, WM8904_CLASS_W_0,
+ WM8904_CP_DYN_PWR, WM8904_CP_DYN_PWR);
+
+ /* Use normal bias source */
+ regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0,
+ WM8904_POBCTRL, 0);
+
+ /* Can leave the device powered off until we need it */
+ regcache_cache_only(wm8904->regmap, true);
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8904, &wm8904_dai, 1);
if (ret != 0)
- goto err;
+ return ret;
return 0;
-err:
- regmap_exit(wm8904->regmap);
+err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
return ret;
}
static __devexit int wm8904_i2c_remove(struct i2c_client *client)
{
- struct wm8904_priv *wm8904 = i2c_get_clientdata(client);
snd_soc_unregister_codec(&client->dev);
- regmap_exit(wm8904->regmap);
return 0;
}
@@ -2311,23 +2273,7 @@ static struct i2c_driver wm8904_i2c_driver = {
.id_table = wm8904_i2c_id,
};
-static int __init wm8904_modinit(void)
-{
- int ret = 0;
- ret = i2c_add_driver(&wm8904_i2c_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register wm8904 I2C driver: %d\n",
- ret);
- }
- return ret;
-}
-module_init(wm8904_modinit);
-
-static void __exit wm8904_exit(void)
-{
- i2c_del_driver(&wm8904_i2c_driver);
-}
-module_exit(wm8904_exit);
+module_i2c_driver(wm8904_i2c_driver);
MODULE_DESCRIPTION("ASoC WM8904 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 8bc659d8dd2e..96518ac8e24c 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -1,6 +1,8 @@
/*
* wm8960.c -- WM8960 ALSA SoC Audio driver
*
+ * Copyright 2007-11 Wolfson Microelectronics, plc
+ *
* Author: Liam Girdwood
*
* This program is free software; you can redistribute it and/or modify
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 05ea7c274093..01edbcc754d2 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -1,6 +1,8 @@
/*
* wm8961.c -- WM8961 ALSA SoC Audio driver
*
+ * Copyright 2009-10 Wolfson Microelectronics, plc
+ *
* Author: Mark Brown
*
* This program is free software; you can redistribute it and/or modify
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 0cfce9999c89..eaf65863ec21 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1,7 +1,7 @@
/*
* wm8962.c -- WM8962 ALSA SoC Audio driver
*
- * Copyright 2010 Wolfson Microelectronics plc
+ * Copyright 2010-2 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -2580,6 +2580,9 @@ static int wm8962_hw_params(struct snd_pcm_substream *substream,
WM8962_SAMPLE_RATE_INT_MODE |
WM8962_SAMPLE_RATE_MASK, adctl3);
+ dev_dbg(codec->dev, "hw_params set BCLK %dHz LRCLK %dHz\n",
+ wm8962->bclk, wm8962->lrclk);
+
if (codec->dapm.bias_level == SND_SOC_BIAS_ON)
wm8962_configure_bclk(codec);
@@ -3722,6 +3725,9 @@ static int wm8962_runtime_resume(struct device *dev)
}
regcache_cache_only(wm8962->regmap, false);
+
+ wm8962_reset(wm8962);
+
regcache_sync(wm8962->regmap);
regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 36acfccab999..9fd80d688979 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1,7 +1,7 @@
/*
* wm8993.c -- WM8993 ALSA SoC audio driver
*
- * Copyright 2009, 2010 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 1436b6ce74d1..bb62f4b3d563 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -1,7 +1,7 @@
/*
* wm8994.c -- WM8994 ALSA SoC Audio driver
*
- * Copyright 2009 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
@@ -2967,23 +2967,8 @@ static struct snd_soc_dai_driver wm8994_dai[] = {
static int wm8994_codec_suspend(struct snd_soc_codec *codec)
{
struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
- struct wm8994 *control = wm8994->wm8994;
int i, ret;
- switch (control->type) {
- case WM8994:
- snd_soc_update_bits(codec, WM8994_MICBIAS, WM8994_MICD_ENA, 0);
- break;
- case WM1811:
- snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
- WM1811_JACKDET_MODE_MASK, 0);
- /* Fall through */
- case WM8958:
- snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
- WM8958_MICD_ENA, 0);
- break;
- }
-
for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
memcpy(&wm8994->fll_suspend[i], &wm8994->fll[i],
sizeof(struct wm8994_fll_config));
@@ -3033,28 +3018,6 @@ static int wm8994_codec_resume(struct snd_soc_codec *codec)
i + 1, ret);
}
- switch (control->type) {
- case WM8994:
- if (wm8994->micdet[0].jack || wm8994->micdet[1].jack)
- snd_soc_update_bits(codec, WM8994_MICBIAS,
- WM8994_MICD_ENA, WM8994_MICD_ENA);
- break;
- case WM1811:
- if (wm8994->jackdet && wm8994->jack_cb) {
- /* Restart from idle */
- snd_soc_update_bits(codec, WM8994_ANTIPOP_2,
- WM1811_JACKDET_MODE_MASK,
- WM1811_JACKDET_MODE_JACK);
- break;
- }
- break;
- case WM8958:
- if (wm8994->jack_cb)
- snd_soc_update_bits(codec, WM8958_MIC_DETECT_1,
- WM8958_MICD_ENA, WM8958_MICD_ENA);
- break;
- }
-
return 0;
}
#else
@@ -3729,9 +3692,6 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
if (wm8994->pdata && wm8994->pdata->micdet_irq)
wm8994->micdet_irq = wm8994->pdata->micdet_irq;
- else if (wm8994->pdata && wm8994->pdata->irq_base)
- wm8994->micdet_irq = wm8994->pdata->irq_base +
- WM8994_IRQ_MIC1_DET;
pm_runtime_enable(codec->dev);
pm_runtime_idle(codec->dev);
@@ -3870,6 +3830,10 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
dev_warn(codec->dev,
"Failed to request Mic detect IRQ: %d\n",
ret);
+ } else {
+ wm8994_request_irq(wm8994->wm8994, WM8994_IRQ_MIC1_DET,
+ wm8958_mic_irq, "Mic detect",
+ wm8994);
}
}
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index dc9b42b7fc4d..00f183dfa454 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -1,7 +1,7 @@
/*
* wm8996.c - WM8996 audio codec interface
*
- * Copyright 2011 Wolfson Microelectronics PLC.
+ * Copyright 2011-2 Wolfson Microelectronics PLC.
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify it
@@ -296,184 +296,6 @@ static struct reg_default wm8996_reg[] = {
{ WM8996_RIGHT_PDM_SPEAKER, 0x1 },
{ WM8996_PDM_SPEAKER_MUTE_SEQUENCE, 0x69 },
{ WM8996_PDM_SPEAKER_VOLUME, 0x66 },
- { WM8996_WRITE_SEQUENCER_0, 0x1 },
- { WM8996_WRITE_SEQUENCER_1, 0x1 },
- { WM8996_WRITE_SEQUENCER_3, 0x6 },
- { WM8996_WRITE_SEQUENCER_4, 0x40 },
- { WM8996_WRITE_SEQUENCER_5, 0x1 },
- { WM8996_WRITE_SEQUENCER_6, 0xf },
- { WM8996_WRITE_SEQUENCER_7, 0x6 },
- { WM8996_WRITE_SEQUENCER_8, 0x1 },
- { WM8996_WRITE_SEQUENCER_9, 0x3 },
- { WM8996_WRITE_SEQUENCER_10, 0x104 },
- { WM8996_WRITE_SEQUENCER_12, 0x60 },
- { WM8996_WRITE_SEQUENCER_13, 0x11 },
- { WM8996_WRITE_SEQUENCER_14, 0x401 },
- { WM8996_WRITE_SEQUENCER_16, 0x50 },
- { WM8996_WRITE_SEQUENCER_17, 0x3 },
- { WM8996_WRITE_SEQUENCER_18, 0x100 },
- { WM8996_WRITE_SEQUENCER_20, 0x51 },
- { WM8996_WRITE_SEQUENCER_21, 0x3 },
- { WM8996_WRITE_SEQUENCER_22, 0x104 },
- { WM8996_WRITE_SEQUENCER_23, 0xa },
- { WM8996_WRITE_SEQUENCER_24, 0x60 },
- { WM8996_WRITE_SEQUENCER_25, 0x3b },
- { WM8996_WRITE_SEQUENCER_26, 0x502 },
- { WM8996_WRITE_SEQUENCER_27, 0x100 },
- { WM8996_WRITE_SEQUENCER_28, 0x2fff },
- { WM8996_WRITE_SEQUENCER_32, 0x2fff },
- { WM8996_WRITE_SEQUENCER_36, 0x2fff },
- { WM8996_WRITE_SEQUENCER_40, 0x2fff },
- { WM8996_WRITE_SEQUENCER_44, 0x2fff },
- { WM8996_WRITE_SEQUENCER_48, 0x2fff },
- { WM8996_WRITE_SEQUENCER_52, 0x2fff },
- { WM8996_WRITE_SEQUENCER_56, 0x2fff },
- { WM8996_WRITE_SEQUENCER_60, 0x2fff },
- { WM8996_WRITE_SEQUENCER_64, 0x1 },
- { WM8996_WRITE_SEQUENCER_65, 0x1 },
- { WM8996_WRITE_SEQUENCER_67, 0x6 },
- { WM8996_WRITE_SEQUENCER_68, 0x40 },
- { WM8996_WRITE_SEQUENCER_69, 0x1 },
- { WM8996_WRITE_SEQUENCER_70, 0xf },
- { WM8996_WRITE_SEQUENCER_71, 0x6 },
- { WM8996_WRITE_SEQUENCER_72, 0x1 },
- { WM8996_WRITE_SEQUENCER_73, 0x3 },
- { WM8996_WRITE_SEQUENCER_74, 0x104 },
- { WM8996_WRITE_SEQUENCER_76, 0x60 },
- { WM8996_WRITE_SEQUENCER_77, 0x11 },
- { WM8996_WRITE_SEQUENCER_78, 0x401 },
- { WM8996_WRITE_SEQUENCER_80, 0x50 },
- { WM8996_WRITE_SEQUENCER_81, 0x3 },
- { WM8996_WRITE_SEQUENCER_82, 0x100 },
- { WM8996_WRITE_SEQUENCER_84, 0x60 },
- { WM8996_WRITE_SEQUENCER_85, 0x3b },
- { WM8996_WRITE_SEQUENCER_86, 0x502 },
- { WM8996_WRITE_SEQUENCER_87, 0x100 },
- { WM8996_WRITE_SEQUENCER_88, 0x2fff },
- { WM8996_WRITE_SEQUENCER_92, 0x2fff },
- { WM8996_WRITE_SEQUENCER_96, 0x2fff },
- { WM8996_WRITE_SEQUENCER_100, 0x2fff },
- { WM8996_WRITE_SEQUENCER_104, 0x2fff },
- { WM8996_WRITE_SEQUENCER_108, 0x2fff },
- { WM8996_WRITE_SEQUENCER_112, 0x2fff },
- { WM8996_WRITE_SEQUENCER_116, 0x2fff },
- { WM8996_WRITE_SEQUENCER_120, 0x2fff },
- { WM8996_WRITE_SEQUENCER_124, 0x2fff },
- { WM8996_WRITE_SEQUENCER_128, 0x1 },
- { WM8996_WRITE_SEQUENCER_129, 0x1 },
- { WM8996_WRITE_SEQUENCER_131, 0x6 },
- { WM8996_WRITE_SEQUENCER_132, 0x40 },
- { WM8996_WRITE_SEQUENCER_133, 0x1 },
- { WM8996_WRITE_SEQUENCER_134, 0xf },
- { WM8996_WRITE_SEQUENCER_135, 0x6 },
- { WM8996_WRITE_SEQUENCER_136, 0x1 },
- { WM8996_WRITE_SEQUENCER_137, 0x3 },
- { WM8996_WRITE_SEQUENCER_138, 0x106 },
- { WM8996_WRITE_SEQUENCER_140, 0x61 },
- { WM8996_WRITE_SEQUENCER_141, 0x11 },
- { WM8996_WRITE_SEQUENCER_142, 0x401 },
- { WM8996_WRITE_SEQUENCER_144, 0x50 },
- { WM8996_WRITE_SEQUENCER_145, 0x3 },
- { WM8996_WRITE_SEQUENCER_146, 0x102 },
- { WM8996_WRITE_SEQUENCER_148, 0x51 },
- { WM8996_WRITE_SEQUENCER_149, 0x3 },
- { WM8996_WRITE_SEQUENCER_150, 0x106 },
- { WM8996_WRITE_SEQUENCER_151, 0xa },
- { WM8996_WRITE_SEQUENCER_152, 0x61 },
- { WM8996_WRITE_SEQUENCER_153, 0x3b },
- { WM8996_WRITE_SEQUENCER_154, 0x502 },
- { WM8996_WRITE_SEQUENCER_155, 0x100 },
- { WM8996_WRITE_SEQUENCER_156, 0x2fff },
- { WM8996_WRITE_SEQUENCER_160, 0x2fff },
- { WM8996_WRITE_SEQUENCER_164, 0x2fff },
- { WM8996_WRITE_SEQUENCER_168, 0x2fff },
- { WM8996_WRITE_SEQUENCER_172, 0x2fff },
- { WM8996_WRITE_SEQUENCER_176, 0x2fff },
- { WM8996_WRITE_SEQUENCER_180, 0x2fff },
- { WM8996_WRITE_SEQUENCER_184, 0x2fff },
- { WM8996_WRITE_SEQUENCER_188, 0x2fff },
- { WM8996_WRITE_SEQUENCER_192, 0x1 },
- { WM8996_WRITE_SEQUENCER_193, 0x1 },
- { WM8996_WRITE_SEQUENCER_195, 0x6 },
- { WM8996_WRITE_SEQUENCER_196, 0x40 },
- { WM8996_WRITE_SEQUENCER_197, 0x1 },
- { WM8996_WRITE_SEQUENCER_198, 0xf },
- { WM8996_WRITE_SEQUENCER_199, 0x6 },
- { WM8996_WRITE_SEQUENCER_200, 0x1 },
- { WM8996_WRITE_SEQUENCER_201, 0x3 },
- { WM8996_WRITE_SEQUENCER_202, 0x106 },
- { WM8996_WRITE_SEQUENCER_204, 0x61 },
- { WM8996_WRITE_SEQUENCER_205, 0x11 },
- { WM8996_WRITE_SEQUENCER_206, 0x401 },
- { WM8996_WRITE_SEQUENCER_208, 0x50 },
- { WM8996_WRITE_SEQUENCER_209, 0x3 },
- { WM8996_WRITE_SEQUENCER_210, 0x102 },
- { WM8996_WRITE_SEQUENCER_212, 0x61 },
- { WM8996_WRITE_SEQUENCER_213, 0x3b },
- { WM8996_WRITE_SEQUENCER_214, 0x502 },
- { WM8996_WRITE_SEQUENCER_215, 0x100 },
- { WM8996_WRITE_SEQUENCER_216, 0x2fff },
- { WM8996_WRITE_SEQUENCER_220, 0x2fff },
- { WM8996_WRITE_SEQUENCER_224, 0x2fff },
- { WM8996_WRITE_SEQUENCER_228, 0x2fff },
- { WM8996_WRITE_SEQUENCER_232, 0x2fff },
- { WM8996_WRITE_SEQUENCER_236, 0x2fff },
- { WM8996_WRITE_SEQUENCER_240, 0x2fff },
- { WM8996_WRITE_SEQUENCER_244, 0x2fff },
- { WM8996_WRITE_SEQUENCER_248, 0x2fff },
- { WM8996_WRITE_SEQUENCER_252, 0x2fff },
- { WM8996_WRITE_SEQUENCER_256, 0x60 },
- { WM8996_WRITE_SEQUENCER_258, 0x601 },
- { WM8996_WRITE_SEQUENCER_260, 0x50 },
- { WM8996_WRITE_SEQUENCER_262, 0x100 },
- { WM8996_WRITE_SEQUENCER_264, 0x1 },
- { WM8996_WRITE_SEQUENCER_266, 0x104 },
- { WM8996_WRITE_SEQUENCER_267, 0x100 },
- { WM8996_WRITE_SEQUENCER_268, 0x2fff },
- { WM8996_WRITE_SEQUENCER_272, 0x2fff },
- { WM8996_WRITE_SEQUENCER_276, 0x2fff },
- { WM8996_WRITE_SEQUENCER_280, 0x2fff },
- { WM8996_WRITE_SEQUENCER_284, 0x2fff },
- { WM8996_WRITE_SEQUENCER_288, 0x2fff },
- { WM8996_WRITE_SEQUENCER_292, 0x2fff },
- { WM8996_WRITE_SEQUENCER_296, 0x2fff },
- { WM8996_WRITE_SEQUENCER_300, 0x2fff },
- { WM8996_WRITE_SEQUENCER_304, 0x2fff },
- { WM8996_WRITE_SEQUENCER_308, 0x2fff },
- { WM8996_WRITE_SEQUENCER_312, 0x2fff },
- { WM8996_WRITE_SEQUENCER_316, 0x2fff },
- { WM8996_WRITE_SEQUENCER_320, 0x61 },
- { WM8996_WRITE_SEQUENCER_322, 0x601 },
- { WM8996_WRITE_SEQUENCER_324, 0x50 },
- { WM8996_WRITE_SEQUENCER_326, 0x102 },
- { WM8996_WRITE_SEQUENCER_328, 0x1 },
- { WM8996_WRITE_SEQUENCER_330, 0x106 },
- { WM8996_WRITE_SEQUENCER_331, 0x100 },
- { WM8996_WRITE_SEQUENCER_332, 0x2fff },
- { WM8996_WRITE_SEQUENCER_336, 0x2fff },
- { WM8996_WRITE_SEQUENCER_340, 0x2fff },
- { WM8996_WRITE_SEQUENCER_344, 0x2fff },
- { WM8996_WRITE_SEQUENCER_348, 0x2fff },
- { WM8996_WRITE_SEQUENCER_352, 0x2fff },
- { WM8996_WRITE_SEQUENCER_356, 0x2fff },
- { WM8996_WRITE_SEQUENCER_360, 0x2fff },
- { WM8996_WRITE_SEQUENCER_364, 0x2fff },
- { WM8996_WRITE_SEQUENCER_368, 0x2fff },
- { WM8996_WRITE_SEQUENCER_372, 0x2fff },
- { WM8996_WRITE_SEQUENCER_376, 0x2fff },
- { WM8996_WRITE_SEQUENCER_380, 0x2fff },
- { WM8996_WRITE_SEQUENCER_384, 0x60 },
- { WM8996_WRITE_SEQUENCER_386, 0x601 },
- { WM8996_WRITE_SEQUENCER_388, 0x61 },
- { WM8996_WRITE_SEQUENCER_390, 0x601 },
- { WM8996_WRITE_SEQUENCER_392, 0x50 },
- { WM8996_WRITE_SEQUENCER_394, 0x300 },
- { WM8996_WRITE_SEQUENCER_396, 0x1 },
- { WM8996_WRITE_SEQUENCER_398, 0x304 },
- { WM8996_WRITE_SEQUENCER_400, 0x40 },
- { WM8996_WRITE_SEQUENCER_402, 0xf },
- { WM8996_WRITE_SEQUENCER_404, 0x1 },
- { WM8996_WRITE_SEQUENCER_407, 0x100 },
};
static const DECLARE_TLV_DB_SCALE(inpga_tlv, 0, 100, 0);
@@ -1706,18 +1528,6 @@ static bool wm8996_volatile_register(struct device *dev, unsigned int reg)
}
}
-static int wm8996_reset(struct wm8996_priv *wm8996)
-{
- if (wm8996->pdata.ldo_ena > 0) {
- gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
- gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 1);
- return 0;
- } else {
- return regmap_write(wm8996->regmap, WM8996_SOFTWARE_RESET,
- 0x8915);
- }
-}
-
static const int bclk_divs[] = {
1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96
};
@@ -1809,8 +1619,10 @@ static int wm8996_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_OFF:
regcache_cache_only(codec->control_data, true);
- if (wm8996->pdata.ldo_ena >= 0)
+ if (wm8996->pdata.ldo_ena >= 0) {
gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
+ regcache_cache_only(codec->control_data, true);
+ }
regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies),
wm8996->supplies);
break;
@@ -2807,7 +2619,7 @@ static int wm8996_probe(struct snd_soc_codec *codec)
int ret;
struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = to_i2c_client(codec->dev);
- int i, irq_flags;
+ int irq_flags;
wm8996->codec = codec;
@@ -2822,177 +2634,12 @@ static int wm8996_probe(struct snd_soc_codec *codec)
goto err;
}
- wm8996->disable_nb[0].notifier_call = wm8996_regulator_event_0;
- wm8996->disable_nb[1].notifier_call = wm8996_regulator_event_1;
- wm8996->disable_nb[2].notifier_call = wm8996_regulator_event_2;
-
- /* This should really be moved into the regulator core */
- for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) {
- ret = regulator_register_notifier(wm8996->supplies[i].consumer,
- &wm8996->disable_nb[i]);
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to register regulator notifier: %d\n",
- ret);
- }
- }
-
- /* Apply platform data settings */
- snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
- WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
- wm8996->pdata.inl_mode << WM8996_INL_MODE_SHIFT |
- wm8996->pdata.inr_mode);
-
- for (i = 0; i < ARRAY_SIZE(wm8996->pdata.gpio_default); i++) {
- if (!wm8996->pdata.gpio_default[i])
- continue;
-
- snd_soc_write(codec, WM8996_GPIO_1 + i,
- wm8996->pdata.gpio_default[i] & 0xffff);
- }
-
- if (wm8996->pdata.spkmute_seq)
- snd_soc_update_bits(codec, WM8996_PDM_SPEAKER_MUTE_SEQUENCE,
- WM8996_SPK_MUTE_ENDIAN |
- WM8996_SPK_MUTE_SEQ1_MASK,
- wm8996->pdata.spkmute_seq);
-
- snd_soc_update_bits(codec, WM8996_ACCESSORY_DETECT_MODE_2,
- WM8996_MICD_BIAS_SRC | WM8996_HPOUT1FB_SRC |
- WM8996_MICD_SRC, wm8996->pdata.micdet_def);
-
- /* Latch volume update bits */
- snd_soc_update_bits(codec, WM8996_LEFT_LINE_INPUT_VOLUME,
- WM8996_IN1_VU, WM8996_IN1_VU);
- snd_soc_update_bits(codec, WM8996_RIGHT_LINE_INPUT_VOLUME,
- WM8996_IN1_VU, WM8996_IN1_VU);
-
- snd_soc_update_bits(codec, WM8996_DAC1_LEFT_VOLUME,
- WM8996_DAC1_VU, WM8996_DAC1_VU);
- snd_soc_update_bits(codec, WM8996_DAC1_RIGHT_VOLUME,
- WM8996_DAC1_VU, WM8996_DAC1_VU);
- snd_soc_update_bits(codec, WM8996_DAC2_LEFT_VOLUME,
- WM8996_DAC2_VU, WM8996_DAC2_VU);
- snd_soc_update_bits(codec, WM8996_DAC2_RIGHT_VOLUME,
- WM8996_DAC2_VU, WM8996_DAC2_VU);
-
- snd_soc_update_bits(codec, WM8996_OUTPUT1_LEFT_VOLUME,
- WM8996_DAC1_VU, WM8996_DAC1_VU);
- snd_soc_update_bits(codec, WM8996_OUTPUT1_RIGHT_VOLUME,
- WM8996_DAC1_VU, WM8996_DAC1_VU);
- snd_soc_update_bits(codec, WM8996_OUTPUT2_LEFT_VOLUME,
- WM8996_DAC2_VU, WM8996_DAC2_VU);
- snd_soc_update_bits(codec, WM8996_OUTPUT2_RIGHT_VOLUME,
- WM8996_DAC2_VU, WM8996_DAC2_VU);
-
- snd_soc_update_bits(codec, WM8996_DSP1_TX_LEFT_VOLUME,
- WM8996_DSP1TX_VU, WM8996_DSP1TX_VU);
- snd_soc_update_bits(codec, WM8996_DSP1_TX_RIGHT_VOLUME,
- WM8996_DSP1TX_VU, WM8996_DSP1TX_VU);
- snd_soc_update_bits(codec, WM8996_DSP2_TX_LEFT_VOLUME,
- WM8996_DSP2TX_VU, WM8996_DSP2TX_VU);
- snd_soc_update_bits(codec, WM8996_DSP2_TX_RIGHT_VOLUME,
- WM8996_DSP2TX_VU, WM8996_DSP2TX_VU);
-
- snd_soc_update_bits(codec, WM8996_DSP1_RX_LEFT_VOLUME,
- WM8996_DSP1RX_VU, WM8996_DSP1RX_VU);
- snd_soc_update_bits(codec, WM8996_DSP1_RX_RIGHT_VOLUME,
- WM8996_DSP1RX_VU, WM8996_DSP1RX_VU);
- snd_soc_update_bits(codec, WM8996_DSP2_RX_LEFT_VOLUME,
- WM8996_DSP2RX_VU, WM8996_DSP2RX_VU);
- snd_soc_update_bits(codec, WM8996_DSP2_RX_RIGHT_VOLUME,
- WM8996_DSP2RX_VU, WM8996_DSP2RX_VU);
-
- /* No support currently for the underclocked TDM modes and
- * pick a default TDM layout with each channel pair working with
- * slots 0 and 1. */
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_0_CONFIGURATION,
- WM8996_AIF1RX_CHAN0_SLOTS_MASK |
- WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN0_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_1_CONFIGURATION,
- WM8996_AIF1RX_CHAN1_SLOTS_MASK |
- WM8996_AIF1RX_CHAN1_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN1_SLOTS_SHIFT | 1);
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_2_CONFIGURATION,
- WM8996_AIF1RX_CHAN2_SLOTS_MASK |
- WM8996_AIF1RX_CHAN2_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN2_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_3_CONFIGURATION,
- WM8996_AIF1RX_CHAN3_SLOTS_MASK |
- WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN3_SLOTS_SHIFT | 1);
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_4_CONFIGURATION,
- WM8996_AIF1RX_CHAN4_SLOTS_MASK |
- WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN4_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1RX_CHANNEL_5_CONFIGURATION,
- WM8996_AIF1RX_CHAN5_SLOTS_MASK |
- WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1RX_CHAN5_SLOTS_SHIFT | 1);
-
- snd_soc_update_bits(codec, WM8996_AIF2RX_CHANNEL_0_CONFIGURATION,
- WM8996_AIF2RX_CHAN0_SLOTS_MASK |
- WM8996_AIF2RX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF2RX_CHAN0_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF2RX_CHANNEL_1_CONFIGURATION,
- WM8996_AIF2RX_CHAN1_SLOTS_MASK |
- WM8996_AIF2RX_CHAN1_START_SLOT_MASK,
- 1 << WM8996_AIF2RX_CHAN1_SLOTS_SHIFT | 1);
-
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_0_CONFIGURATION,
- WM8996_AIF1TX_CHAN0_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN0_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_1_CONFIGURATION,
- WM8996_AIF1TX_CHAN1_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_2_CONFIGURATION,
- WM8996_AIF1TX_CHAN2_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN2_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_3_CONFIGURATION,
- WM8996_AIF1TX_CHAN3_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN3_SLOTS_SHIFT | 1);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_4_CONFIGURATION,
- WM8996_AIF1TX_CHAN4_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN4_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_5_CONFIGURATION,
- WM8996_AIF1TX_CHAN5_SLOTS_MASK |
- WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN5_SLOTS_SHIFT | 1);
-
- snd_soc_update_bits(codec, WM8996_AIF2TX_CHANNEL_0_CONFIGURATION,
- WM8996_AIF2TX_CHAN0_SLOTS_MASK |
- WM8996_AIF2TX_CHAN0_START_SLOT_MASK,
- 1 << WM8996_AIF2TX_CHAN0_SLOTS_SHIFT | 0);
- snd_soc_update_bits(codec, WM8996_AIF1TX_CHANNEL_1_CONFIGURATION,
- WM8996_AIF2TX_CHAN1_SLOTS_MASK |
- WM8996_AIF2TX_CHAN1_START_SLOT_MASK,
- 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1);
-
if (wm8996->pdata.num_retune_mobile_cfgs)
wm8996_retune_mobile_pdata(codec);
else
snd_soc_add_codec_controls(codec, wm8996_eq_controls,
ARRAY_SIZE(wm8996_eq_controls));
- /* If the TX LRCLK pins are not in LRCLK mode configure the
- * AIFs to source their clocks from the RX LRCLKs.
- */
- if ((snd_soc_read(codec, WM8996_GPIO_1)))
- snd_soc_update_bits(codec, WM8996_AIF1_TX_LRCLK_2,
- WM8996_AIF1TX_LRCLK_MODE,
- WM8996_AIF1TX_LRCLK_MODE);
-
- if ((snd_soc_read(codec, WM8996_GPIO_2)))
- snd_soc_update_bits(codec, WM8996_AIF2_TX_LRCLK_2,
- WM8996_AIF2TX_LRCLK_MODE,
- WM8996_AIF2TX_LRCLK_MODE);
-
if (i2c->irq) {
if (wm8996->pdata.irq_flags)
irq_flags = wm8996->pdata.irq_flags;
@@ -3036,9 +2683,7 @@ err:
static int wm8996_remove(struct snd_soc_codec *codec)
{
- struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
struct i2c_client *i2c = to_i2c_client(codec->dev);
- int i;
snd_soc_update_bits(codec, WM8996_INTERRUPT_CONTROL,
WM8996_IM_IRQ, WM8996_IM_IRQ);
@@ -3046,10 +2691,6 @@ static int wm8996_remove(struct snd_soc_codec *codec)
if (i2c->irq)
free_irq(i2c->irq, codec);
- for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
- regulator_unregister_notifier(wm8996->supplies[i].consumer,
- &wm8996->disable_nb[i]);
-
return 0;
}
@@ -3163,6 +2804,21 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
goto err_gpio;
}
+ wm8996->disable_nb[0].notifier_call = wm8996_regulator_event_0;
+ wm8996->disable_nb[1].notifier_call = wm8996_regulator_event_1;
+ wm8996->disable_nb[2].notifier_call = wm8996_regulator_event_2;
+
+ /* This should really be moved into the regulator core */
+ for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) {
+ ret = regulator_register_notifier(wm8996->supplies[i].consumer,
+ &wm8996->disable_nb[i]);
+ if (ret != 0) {
+ dev_err(&i2c->dev,
+ "Failed to register regulator notifier: %d\n",
+ ret);
+ }
+ }
+
ret = regulator_bulk_enable(ARRAY_SIZE(wm8996->supplies),
wm8996->supplies);
if (ret != 0) {
@@ -3175,7 +2831,7 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
msleep(5);
}
- wm8996->regmap = regmap_init_i2c(i2c, &wm8996_regmap);
+ wm8996->regmap = devm_regmap_init_i2c(i2c, &wm8996_regmap);
if (IS_ERR(wm8996->regmap)) {
ret = PTR_ERR(wm8996->regmap);
dev_err(&i2c->dev, "regmap_init() failed: %d\n", ret);
@@ -3203,15 +2859,199 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
dev_info(&i2c->dev, "revision %c\n",
(reg & WM8996_CHIP_REV_MASK) + 'A');
- ret = wm8996_reset(wm8996);
- if (ret < 0) {
- dev_err(&i2c->dev, "Failed to issue reset\n");
- goto err_regmap;
+ if (wm8996->pdata.ldo_ena > 0) {
+ gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
+ regcache_cache_only(wm8996->regmap, true);
+ } else {
+ ret = regmap_write(wm8996->regmap, WM8996_SOFTWARE_RESET,
+ 0x8915);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
+ goto err_regmap;
+ }
}
- regcache_cache_only(wm8996->regmap, true);
regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
+ /* Apply platform data settings */
+ regmap_update_bits(wm8996->regmap, WM8996_LINE_INPUT_CONTROL,
+ WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
+ wm8996->pdata.inl_mode << WM8996_INL_MODE_SHIFT |
+ wm8996->pdata.inr_mode);
+
+ for (i = 0; i < ARRAY_SIZE(wm8996->pdata.gpio_default); i++) {
+ if (!wm8996->pdata.gpio_default[i])
+ continue;
+
+ regmap_write(wm8996->regmap, WM8996_GPIO_1 + i,
+ wm8996->pdata.gpio_default[i] & 0xffff);
+ }
+
+ if (wm8996->pdata.spkmute_seq)
+ regmap_update_bits(wm8996->regmap,
+ WM8996_PDM_SPEAKER_MUTE_SEQUENCE,
+ WM8996_SPK_MUTE_ENDIAN |
+ WM8996_SPK_MUTE_SEQ1_MASK,
+ wm8996->pdata.spkmute_seq);
+
+ regmap_update_bits(wm8996->regmap, WM8996_ACCESSORY_DETECT_MODE_2,
+ WM8996_MICD_BIAS_SRC | WM8996_HPOUT1FB_SRC |
+ WM8996_MICD_SRC, wm8996->pdata.micdet_def);
+
+ /* Latch volume update bits */
+ regmap_update_bits(wm8996->regmap, WM8996_LEFT_LINE_INPUT_VOLUME,
+ WM8996_IN1_VU, WM8996_IN1_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_RIGHT_LINE_INPUT_VOLUME,
+ WM8996_IN1_VU, WM8996_IN1_VU);
+
+ regmap_update_bits(wm8996->regmap, WM8996_DAC1_LEFT_VOLUME,
+ WM8996_DAC1_VU, WM8996_DAC1_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DAC1_RIGHT_VOLUME,
+ WM8996_DAC1_VU, WM8996_DAC1_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DAC2_LEFT_VOLUME,
+ WM8996_DAC2_VU, WM8996_DAC2_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DAC2_RIGHT_VOLUME,
+ WM8996_DAC2_VU, WM8996_DAC2_VU);
+
+ regmap_update_bits(wm8996->regmap, WM8996_OUTPUT1_LEFT_VOLUME,
+ WM8996_DAC1_VU, WM8996_DAC1_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_OUTPUT1_RIGHT_VOLUME,
+ WM8996_DAC1_VU, WM8996_DAC1_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_OUTPUT2_LEFT_VOLUME,
+ WM8996_DAC2_VU, WM8996_DAC2_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_OUTPUT2_RIGHT_VOLUME,
+ WM8996_DAC2_VU, WM8996_DAC2_VU);
+
+ regmap_update_bits(wm8996->regmap, WM8996_DSP1_TX_LEFT_VOLUME,
+ WM8996_DSP1TX_VU, WM8996_DSP1TX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP1_TX_RIGHT_VOLUME,
+ WM8996_DSP1TX_VU, WM8996_DSP1TX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP2_TX_LEFT_VOLUME,
+ WM8996_DSP2TX_VU, WM8996_DSP2TX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP2_TX_RIGHT_VOLUME,
+ WM8996_DSP2TX_VU, WM8996_DSP2TX_VU);
+
+ regmap_update_bits(wm8996->regmap, WM8996_DSP1_RX_LEFT_VOLUME,
+ WM8996_DSP1RX_VU, WM8996_DSP1RX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP1_RX_RIGHT_VOLUME,
+ WM8996_DSP1RX_VU, WM8996_DSP1RX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP2_RX_LEFT_VOLUME,
+ WM8996_DSP2RX_VU, WM8996_DSP2RX_VU);
+ regmap_update_bits(wm8996->regmap, WM8996_DSP2_RX_RIGHT_VOLUME,
+ WM8996_DSP2RX_VU, WM8996_DSP2RX_VU);
+
+ /* No support currently for the underclocked TDM modes and
+ * pick a default TDM layout with each channel pair working with
+ * slots 0 and 1. */
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_0_CONFIGURATION,
+ WM8996_AIF1RX_CHAN0_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN0_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_1_CONFIGURATION,
+ WM8996_AIF1RX_CHAN1_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN1_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN1_SLOTS_SHIFT | 1);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_2_CONFIGURATION,
+ WM8996_AIF1RX_CHAN2_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN2_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN2_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_3_CONFIGURATION,
+ WM8996_AIF1RX_CHAN3_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN3_SLOTS_SHIFT | 1);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_4_CONFIGURATION,
+ WM8996_AIF1RX_CHAN4_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN4_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1RX_CHANNEL_5_CONFIGURATION,
+ WM8996_AIF1RX_CHAN5_SLOTS_MASK |
+ WM8996_AIF1RX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1RX_CHAN5_SLOTS_SHIFT | 1);
+
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF2RX_CHANNEL_0_CONFIGURATION,
+ WM8996_AIF2RX_CHAN0_SLOTS_MASK |
+ WM8996_AIF2RX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF2RX_CHAN0_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF2RX_CHANNEL_1_CONFIGURATION,
+ WM8996_AIF2RX_CHAN1_SLOTS_MASK |
+ WM8996_AIF2RX_CHAN1_START_SLOT_MASK,
+ 1 << WM8996_AIF2RX_CHAN1_SLOTS_SHIFT | 1);
+
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_0_CONFIGURATION,
+ WM8996_AIF1TX_CHAN0_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN0_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_1_CONFIGURATION,
+ WM8996_AIF1TX_CHAN1_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_2_CONFIGURATION,
+ WM8996_AIF1TX_CHAN2_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN2_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_3_CONFIGURATION,
+ WM8996_AIF1TX_CHAN3_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN3_SLOTS_SHIFT | 1);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_4_CONFIGURATION,
+ WM8996_AIF1TX_CHAN4_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN4_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_5_CONFIGURATION,
+ WM8996_AIF1TX_CHAN5_SLOTS_MASK |
+ WM8996_AIF1TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN5_SLOTS_SHIFT | 1);
+
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF2TX_CHANNEL_0_CONFIGURATION,
+ WM8996_AIF2TX_CHAN0_SLOTS_MASK |
+ WM8996_AIF2TX_CHAN0_START_SLOT_MASK,
+ 1 << WM8996_AIF2TX_CHAN0_SLOTS_SHIFT | 0);
+ regmap_update_bits(wm8996->regmap,
+ WM8996_AIF1TX_CHANNEL_1_CONFIGURATION,
+ WM8996_AIF2TX_CHAN1_SLOTS_MASK |
+ WM8996_AIF2TX_CHAN1_START_SLOT_MASK,
+ 1 << WM8996_AIF1TX_CHAN1_SLOTS_SHIFT | 1);
+
+ /* If the TX LRCLK pins are not in LRCLK mode configure the
+ * AIFs to source their clocks from the RX LRCLKs.
+ */
+ ret = regmap_read(wm8996->regmap, WM8996_GPIO_1, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read GPIO1: %d\n", ret);
+ goto err_regmap;
+ }
+
+ if (reg & WM8996_GP1_FN_MASK)
+ regmap_update_bits(wm8996->regmap, WM8996_AIF1_TX_LRCLK_2,
+ WM8996_AIF1TX_LRCLK_MODE,
+ WM8996_AIF1TX_LRCLK_MODE);
+
+ ret = regmap_read(wm8996->regmap, WM8996_GPIO_2, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to read GPIO2: %d\n", ret);
+ goto err_regmap;
+ }
+
+ if (reg & WM8996_GP2_FN_MASK)
+ regmap_update_bits(wm8996->regmap, WM8996_AIF2_TX_LRCLK_2,
+ WM8996_AIF2TX_LRCLK_MODE,
+ WM8996_AIF2TX_LRCLK_MODE);
+
wm8996_init_gpio(wm8996);
ret = snd_soc_register_codec(&i2c->dev,
@@ -3225,7 +3065,6 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
err_gpiolib:
wm8996_free_gpio(wm8996);
err_regmap:
- regmap_exit(wm8996->regmap);
err_enable:
if (wm8996->pdata.ldo_ena > 0)
gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
@@ -3241,14 +3080,18 @@ err:
static __devexit int wm8996_i2c_remove(struct i2c_client *client)
{
struct wm8996_priv *wm8996 = i2c_get_clientdata(client);
+ int i;
snd_soc_unregister_codec(&client->dev);
wm8996_free_gpio(wm8996);
- regmap_exit(wm8996->regmap);
if (wm8996->pdata.ldo_ena > 0) {
gpio_set_value_cansleep(wm8996->pdata.ldo_ena, 0);
gpio_free(wm8996->pdata.ldo_ena);
}
+ for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
+ regulator_unregister_notifier(wm8996->supplies[i].consumer,
+ &wm8996->disable_nb[i]);
+
return 0;
}
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 9328270df16c..2de74e1ea225 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -3,7 +3,7 @@
*
* Author: Mark Brown
*
- * Copyright 2009 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/sound/soc/codecs/wm9090.c b/sound/soc/codecs/wm9090.c
index 4b263b6edf13..2c2346fdd637 100644
--- a/sound/soc/codecs/wm9090.c
+++ b/sound/soc/codecs/wm9090.c
@@ -1,7 +1,7 @@
/*
* ALSA SoC WM9090 driver
*
- * Copyright 2009, 2010 Wolfson Microelectronics
+ * Copyright 2009-12 Wolfson Microelectronics
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index a1541414d904..099e6ec32125 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -1,7 +1,7 @@
/*
* wm9712.c -- ALSA Soc WM9712 codec support
*
- * Copyright 2006 Wolfson Microelectronics PLC.
+ * Copyright 2006-12 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 2d22cc70d536..3eb19fb71d17 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -1,7 +1,7 @@
/*
* wm9713.c -- ALSA Soc WM9713 codec support
*
- * Copyright 2006 Wolfson Microelectronics PLC.
+ * Copyright 2006-10 Wolfson Microelectronics PLC.
* Author: Liam Girdwood <lrg@slimlogic.co.uk>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index dfe957a47f29..61baa48823cb 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -1,7 +1,7 @@
/*
* wm_hubs.c -- WM8993/4 common code
*
- * Copyright 2009 Wolfson Microelectronics plc
+ * Copyright 2009-12 Wolfson Microelectronics plc
*
* Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
*
diff --git a/sound/soc/dwc/Kconfig b/sound/soc/dwc/Kconfig
new file mode 100644
index 000000000000..e334900cf0b8
--- /dev/null
+++ b/sound/soc/dwc/Kconfig
@@ -0,0 +1,9 @@
+config SND_DESIGNWARE_I2S
+ tristate "Synopsys I2S Device Driver"
+ depends on CLKDEV_LOOKUP
+ help
+ Say Y or M if you want to add support for I2S driver for
+ Synopsys desigwnware I2S device. The device supports upto
+ maximum of 8 channels each for play and record.
+
+
diff --git a/sound/soc/dwc/Makefile b/sound/soc/dwc/Makefile
new file mode 100644
index 000000000000..319371f690f4
--- /dev/null
+++ b/sound/soc/dwc/Makefile
@@ -0,0 +1,3 @@
+# SYNOPSYS Platform Support
+obj-$(CONFIG_SND_DESIGNWARE_I2S) += designware_i2s.o
+
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
new file mode 100644
index 000000000000..1aa51300c564
--- /dev/null
+++ b/sound/soc/dwc/designware_i2s.c
@@ -0,0 +1,455 @@
+/*
+ * ALSA SoC Synopsys I2S Audio Layer
+ *
+ * sound/soc/spear/designware_i2s.c
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar <rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <sound/designware_i2s.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+/* common register for all channel */
+#define IER 0x000
+#define IRER 0x004
+#define ITER 0x008
+#define CER 0x00C
+#define CCR 0x010
+#define RXFFR 0x014
+#define TXFFR 0x018
+
+/* I2STxRxRegisters for all channels */
+#define LRBR_LTHR(x) (0x40 * x + 0x020)
+#define RRBR_RTHR(x) (0x40 * x + 0x024)
+#define RER(x) (0x40 * x + 0x028)
+#define TER(x) (0x40 * x + 0x02C)
+#define RCR(x) (0x40 * x + 0x030)
+#define TCR(x) (0x40 * x + 0x034)
+#define ISR(x) (0x40 * x + 0x038)
+#define IMR(x) (0x40 * x + 0x03C)
+#define ROR(x) (0x40 * x + 0x040)
+#define TOR(x) (0x40 * x + 0x044)
+#define RFCR(x) (0x40 * x + 0x048)
+#define TFCR(x) (0x40 * x + 0x04C)
+#define RFF(x) (0x40 * x + 0x050)
+#define TFF(x) (0x40 * x + 0x054)
+
+/* I2SCOMPRegisters */
+#define I2S_COMP_PARAM_2 0x01F0
+#define I2S_COMP_PARAM_1 0x01F4
+#define I2S_COMP_VERSION 0x01F8
+#define I2S_COMP_TYPE 0x01FC
+
+#define MAX_CHANNEL_NUM 8
+#define MIN_CHANNEL_NUM 2
+
+struct dw_i2s_dev {
+ void __iomem *i2s_base;
+ struct clk *clk;
+ int active;
+ unsigned int capability;
+ struct device *dev;
+
+ /* data related to DMA transfers b/w i2s and DMAC */
+ struct i2s_dma_data play_dma_data;
+ struct i2s_dma_data capture_dma_data;
+ struct i2s_clk_config_data config;
+ int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+};
+
+static inline void i2s_write_reg(void __iomem *io_base, int reg, u32 val)
+{
+ writel(val, io_base + reg);
+}
+
+static inline u32 i2s_read_reg(void __iomem *io_base, int reg)
+{
+ return readl(io_base + reg);
+}
+
+static inline void i2s_disable_channels(struct dw_i2s_dev *dev, u32 stream)
+{
+ u32 i = 0;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < 4; i++)
+ i2s_write_reg(dev->i2s_base, TER(i), 0);
+ } else {
+ for (i = 0; i < 4; i++)
+ i2s_write_reg(dev->i2s_base, RER(i), 0);
+ }
+}
+
+static inline void i2s_clear_irqs(struct dw_i2s_dev *dev, u32 stream)
+{
+ u32 i = 0;
+
+ if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ for (i = 0; i < 4; i++)
+ i2s_write_reg(dev->i2s_base, TOR(i), 0);
+ } else {
+ for (i = 0; i < 4; i++)
+ i2s_write_reg(dev->i2s_base, ROR(i), 0);
+ }
+}
+
+static void i2s_start(struct dw_i2s_dev *dev,
+ struct snd_pcm_substream *substream)
+{
+
+ i2s_write_reg(dev->i2s_base, IER, 1);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, ITER, 1);
+ else
+ i2s_write_reg(dev->i2s_base, IRER, 1);
+
+ i2s_write_reg(dev->i2s_base, CER, 1);
+}
+
+static void i2s_stop(struct dw_i2s_dev *dev,
+ struct snd_pcm_substream *substream)
+{
+ u32 i = 0, irq;
+
+ i2s_clear_irqs(dev, substream->stream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ i2s_write_reg(dev->i2s_base, ITER, 0);
+
+ for (i = 0; i < 4; i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x30);
+ }
+ } else {
+ i2s_write_reg(dev->i2s_base, IRER, 0);
+
+ for (i = 0; i < 4; i++) {
+ irq = i2s_read_reg(dev->i2s_base, IMR(i));
+ i2s_write_reg(dev->i2s_base, IMR(i), irq | 0x03);
+ }
+ }
+
+ if (!dev->active) {
+ i2s_write_reg(dev->i2s_base, CER, 0);
+ i2s_write_reg(dev->i2s_base, IER, 0);
+ }
+}
+
+static int dw_i2s_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
+ struct i2s_dma_data *dma_data = NULL;
+
+ if (!(dev->capability & DWC_I2S_RECORD) &&
+ (substream->stream == SNDRV_PCM_STREAM_CAPTURE))
+ return -EINVAL;
+
+ if (!(dev->capability & DWC_I2S_PLAY) &&
+ (substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
+ return -EINVAL;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dma_data = &dev->play_dma_data;
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ dma_data = &dev->capture_dma_data;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)dma_data);
+
+ return 0;
+}
+
+static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+ struct i2s_clk_config_data *config = &dev->config;
+ u32 ccr, xfer_resolution, ch_reg, irq;
+ int ret;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ config->data_width = 16;
+ ccr = 0x00;
+ xfer_resolution = 0x02;
+ break;
+
+ case SNDRV_PCM_FORMAT_S24_LE:
+ config->data_width = 24;
+ ccr = 0x08;
+ xfer_resolution = 0x04;
+ break;
+
+ case SNDRV_PCM_FORMAT_S32_LE:
+ config->data_width = 32;
+ ccr = 0x10;
+ xfer_resolution = 0x05;
+ break;
+
+ default:
+ dev_err(dev->dev, "designware-i2s: unsuppted PCM fmt");
+ return -EINVAL;
+ }
+
+ config->chan_nr = params_channels(params);
+
+ switch (config->chan_nr) {
+ case EIGHT_CHANNEL_SUPPORT:
+ ch_reg = 3;
+ case SIX_CHANNEL_SUPPORT:
+ ch_reg = 2;
+ case FOUR_CHANNEL_SUPPORT:
+ ch_reg = 1;
+ case TWO_CHANNEL_SUPPORT:
+ ch_reg = 0;
+ break;
+ default:
+ dev_err(dev->dev, "channel not supported\n");
+ }
+
+ i2s_disable_channels(dev, substream->stream);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
+ i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+ i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+ } else {
+ i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
+ i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+ i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ }
+
+ i2s_write_reg(dev->i2s_base, CCR, ccr);
+
+ config->sample_rate = params_rate(params);
+
+ if (!dev->i2s_clk_cfg)
+ return -EINVAL;
+
+ ret = dev->i2s_clk_cfg(config);
+ if (ret < 0) {
+ dev_err(dev->dev, "runtime audio clk config fail\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+}
+
+static int dw_i2s_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dev->active++;
+ i2s_start(dev, substream);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dev->active--;
+ i2s_stop(dev, substream);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static struct snd_soc_dai_ops dw_i2s_dai_ops = {
+ .startup = dw_i2s_startup,
+ .shutdown = dw_i2s_shutdown,
+ .hw_params = dw_i2s_hw_params,
+ .trigger = dw_i2s_trigger,
+};
+
+#ifdef CONFIG_PM
+
+static int dw_i2s_suspend(struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable(dev->clk);
+ return 0;
+}
+
+static int dw_i2s_resume(struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ clk_enable(dev->clk);
+ return 0;
+}
+
+#else
+#define dw_i2s_suspend NULL
+#define dw_i2s_resume NULL
+#endif
+
+static int dw_i2s_probe(struct platform_device *pdev)
+{
+ const struct i2s_platform_data *pdata = pdev->dev.platform_data;
+ struct dw_i2s_dev *dev;
+ struct resource *res;
+ int ret;
+ unsigned int cap;
+ struct snd_soc_dai_driver *dw_i2s_dai;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no i2s resource defined\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "i2s region already claimed\n");
+ return -EBUSY;
+ }
+
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ dev_warn(&pdev->dev, "kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ dev->i2s_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!dev->i2s_base) {
+ dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
+ return -ENOMEM;
+ }
+
+ cap = pdata->cap;
+ dev->capability = cap;
+ dev->i2s_clk_cfg = pdata->i2s_clk_cfg;
+
+ /* Set DMA slaves info */
+
+ dev->play_dma_data.data = pdata->play_dma_data;
+ dev->capture_dma_data.data = pdata->capture_dma_data;
+ dev->play_dma_data.addr = res->start + I2S_TXDMA;
+ dev->capture_dma_data.addr = res->start + I2S_RXDMA;
+ dev->play_dma_data.max_burst = 16;
+ dev->capture_dma_data.max_burst = 16;
+ dev->play_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dev->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dev->play_dma_data.filter = pdata->filter;
+ dev->capture_dma_data.filter = pdata->filter;
+
+ dev->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dev->clk))
+ return PTR_ERR(dev->clk);
+
+ ret = clk_enable(dev->clk);
+ if (ret < 0)
+ goto err_clk_put;
+
+ dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
+ if (!dw_i2s_dai) {
+ dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
+ ret = -ENOMEM;
+ goto err_clk_disable;
+ }
+
+ if (cap & DWC_I2S_PLAY) {
+ dev_dbg(&pdev->dev, " SPEAr: play supported\n");
+ dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
+ dw_i2s_dai->playback.channels_max = pdata->channel;
+ dw_i2s_dai->playback.formats = pdata->snd_fmts;
+ dw_i2s_dai->playback.rates = pdata->snd_rates;
+ }
+
+ if (cap & DWC_I2S_RECORD) {
+ dev_dbg(&pdev->dev, "SPEAr: record supported\n");
+ dw_i2s_dai->capture.channels_min = MIN_CHANNEL_NUM;
+ dw_i2s_dai->capture.channels_max = pdata->channel;
+ dw_i2s_dai->capture.formats = pdata->snd_fmts;
+ dw_i2s_dai->capture.rates = pdata->snd_rates;
+ }
+
+ dw_i2s_dai->ops = &dw_i2s_dai_ops;
+ dw_i2s_dai->suspend = dw_i2s_suspend;
+ dw_i2s_dai->resume = dw_i2s_resume;
+
+ dev->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, dev);
+ ret = snd_soc_register_dai(&pdev->dev, dw_i2s_dai);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "not able to register dai\n");
+ goto err_set_drvdata;
+ }
+
+ return 0;
+
+err_set_drvdata:
+ dev_set_drvdata(&pdev->dev, NULL);
+err_clk_disable:
+ clk_disable(dev->clk);
+err_clk_put:
+ clk_put(dev->clk);
+ return ret;
+}
+
+static int dw_i2s_remove(struct platform_device *pdev)
+{
+ struct dw_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_dai(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ clk_put(dev->clk);
+
+ return 0;
+}
+
+static struct platform_driver dw_i2s_driver = {
+ .probe = dw_i2s_probe,
+ .remove = dw_i2s_remove,
+ .driver = {
+ .name = "designware-i2s",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(dw_i2s_driver);
+
+MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_DESCRIPTION("DESIGNWARE I2S SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:designware_i2s");
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index 162dbb74f4cc..4eea98b42bc8 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -136,7 +136,7 @@ static struct snd_pcm_ops ep93xx_pcm_ops = {
.hw_params = ep93xx_pcm_hw_params,
.hw_free = ep93xx_pcm_hw_free,
.trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer,
+ .pointer = snd_dmaengine_pcm_pointer_no_residue,
.mmap = ep93xx_pcm_mmap,
};
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 080327414c6b..e7c800ebbd75 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -156,7 +156,7 @@ static void __init audmux_debugfs_init(void)
return;
}
- for (i = 0; i < MX31_AUDMUX_PORT6_SSI_PINS_6 + 1; i++) {
+ for (i = 0; i < MX31_AUDMUX_PORT7_SSI_PINS_7 + 1; i++) {
snprintf(buf, sizeof(buf), "ssi%d", i);
if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
(void *)i, &audmux_debugfs_fops))
diff --git a/sound/soc/fsl/imx-audmux.h b/sound/soc/fsl/imx-audmux.h
index 04ebbab8d7b9..b8ff44b9dafa 100644
--- a/sound/soc/fsl/imx-audmux.h
+++ b/sound/soc/fsl/imx-audmux.h
@@ -14,6 +14,7 @@
#define MX31_AUDMUX_PORT4_SSI_PINS_4 3
#define MX31_AUDMUX_PORT5_SSI_PINS_5 4
#define MX31_AUDMUX_PORT6_SSI_PINS_6 5
+#define MX31_AUDMUX_PORT7_SSI_PINS_7 6
#define MX51_AUDMUX_PORT1_SSI0 0
#define MX51_AUDMUX_PORT2_SSI1 1
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index f59c34943662..549b31fdc9dd 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -111,22 +111,39 @@ static int __devinit imx_mc13783_probe(struct platform_device *pdev)
return ret;
}
- imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
- IMX_AUDMUX_V2_PTCR_SYN,
- IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
- IMX_AUDMUX_V2_PDCR_MODE(1) |
- IMX_AUDMUX_V2_PDCR_INMMASK(0xfc));
- imx_audmux_v2_configure_port(MX31_AUDMUX_PORT1_SSI0,
- IMX_AUDMUX_V2_PTCR_SYN |
- IMX_AUDMUX_V2_PTCR_TFSDIR |
- IMX_AUDMUX_V2_PTCR_TFSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
- IMX_AUDMUX_V2_PTCR_TCLKDIR |
- IMX_AUDMUX_V2_PTCR_TCSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
- IMX_AUDMUX_V2_PTCR_RFSDIR |
- IMX_AUDMUX_V2_PTCR_RFSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
- IMX_AUDMUX_V2_PTCR_RCLKDIR |
- IMX_AUDMUX_V2_PTCR_RCSEL(MX31_AUDMUX_PORT4_SSI_PINS_4),
- IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT4_SSI_PINS_4));
+ if (machine_is_mx31_3ds()) {
+ imx_audmux_v2_configure_port(MX31_AUDMUX_PORT4_SSI_PINS_4,
+ IMX_AUDMUX_V2_PTCR_SYN,
+ IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0) |
+ IMX_AUDMUX_V2_PDCR_MODE(1) |
+ IMX_AUDMUX_V2_PDCR_INMMASK(0xfc));
+ imx_audmux_v2_configure_port(MX31_AUDMUX_PORT1_SSI0,
+ IMX_AUDMUX_V2_PTCR_SYN |
+ IMX_AUDMUX_V2_PTCR_TFSDIR |
+ IMX_AUDMUX_V2_PTCR_TFSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
+ IMX_AUDMUX_V2_PTCR_TCLKDIR |
+ IMX_AUDMUX_V2_PTCR_TCSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
+ IMX_AUDMUX_V2_PTCR_RFSDIR |
+ IMX_AUDMUX_V2_PTCR_RFSEL(MX31_AUDMUX_PORT4_SSI_PINS_4) |
+ IMX_AUDMUX_V2_PTCR_RCLKDIR |
+ IMX_AUDMUX_V2_PTCR_RCSEL(MX31_AUDMUX_PORT4_SSI_PINS_4),
+ IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT4_SSI_PINS_4));
+ } else if (machine_is_mx27_3ds()) {
+ imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
+ IMX_AUDMUX_V1_PCR_SYN |
+ IMX_AUDMUX_V1_PCR_TFSDIR |
+ IMX_AUDMUX_V1_PCR_TCLKDIR |
+ IMX_AUDMUX_V1_PCR_RFSDIR |
+ IMX_AUDMUX_V1_PCR_RCLKDIR |
+ IMX_AUDMUX_V1_PCR_TFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
+ IMX_AUDMUX_V1_PCR_RFCSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4) |
+ IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR3_SSI_PINS_4)
+ );
+ imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR3_SSI_PINS_4,
+ IMX_AUDMUX_V1_PCR_SYN |
+ IMX_AUDMUX_V1_PCR_RXDSEL(MX27_AUDMUX_HPCR1_SSI0)
+ );
+ }
return ret;
}
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index f3c0a5ef35c8..48f9d886f020 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -141,7 +141,7 @@ static struct snd_pcm_ops imx_pcm_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_imx_pcm_hw_params,
.trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer,
+ .pointer = snd_dmaengine_pcm_pointer_no_residue,
.mmap = snd_imx_pcm_mmap,
};
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 456b7d723d66..ee27ba3933bd 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -29,6 +29,7 @@
#include <asm/fiq.h>
+#include <mach/irqs.h>
#include <mach/ssi.h>
#include "imx-ssi.h"
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 3a729caeb8c8..fb21b17f17f5 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -95,8 +95,7 @@ static int __devinit imx_sgtl5000_probe(struct platform_device *pdev)
return ret;
}
imx_audmux_v2_configure_port(ext_port,
- IMX_AUDMUX_V2_PTCR_SYN |
- IMX_AUDMUX_V2_PTCR_TCSEL(int_port),
+ IMX_AUDMUX_V2_PTCR_SYN,
IMX_AUDMUX_V2_PDCR_RXDSEL(int_port));
if (ret) {
dev_err(&pdev->dev, "audmux external port setup failed\n");
diff --git a/sound/soc/mxs/mxs-pcm.c b/sound/soc/mxs/mxs-pcm.c
index 373dec90579f..f82d766cbf9e 100644
--- a/sound/soc/mxs/mxs-pcm.c
+++ b/sound/soc/mxs/mxs-pcm.c
@@ -141,7 +141,7 @@ static struct snd_pcm_ops mxs_pcm_ops = {
.ioctl = snd_pcm_lib_ioctl,
.hw_params = snd_mxs_pcm_hw_params,
.trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer,
+ .pointer = snd_dmaengine_pcm_pointer_no_residue,
.mmap = snd_mxs_pcm_mmap,
};
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 3e6e8764b2e6..215113b05f7d 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -133,7 +133,7 @@ static int __devinit mxs_sgtl5000_probe_dt(struct platform_device *pdev)
mxs_sgtl5000_dai[i].codec_name = NULL;
mxs_sgtl5000_dai[i].codec_of_node = codec_np;
mxs_sgtl5000_dai[i].cpu_dai_name = NULL;
- mxs_sgtl5000_dai[i].cpu_dai_of_node = saif_np[i];
+ mxs_sgtl5000_dai[i].cpu_of_node = saif_np[i];
mxs_sgtl5000_dai[i].platform_name = NULL;
mxs_sgtl5000_dai[i].platform_of_node = saif_np[i];
}
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 59d47ab5b15d..2c66e2498a45 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -527,6 +527,7 @@ static struct platform_driver asoc_mcpdm_driver = {
module_platform_driver(asoc_mcpdm_driver);
+MODULE_ALIAS("platform:omap-mcpdm");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
MODULE_DESCRIPTION("OMAP PDM SoC Interface");
MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index a0f7d3cfa470..4d2e46fae77c 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -8,6 +8,15 @@ config SND_PXA2XX_SOC
the PXA2xx AC97, I2S or SSP interface. You will also need
to select the audio interfaces to support below.
+config SND_MMP_SOC
+ bool "Soc Audio for Marvell MMP chips"
+ depends on ARCH_MMP
+ select SND_SOC_DMAENGINE_PCM
+ select SND_ARM
+ help
+ Say Y if you want to add support for codecs attached to
+ the MMP SSPA interface.
+
config SND_PXA2XX_AC97
tristate
select SND_AC97_CODEC
@@ -26,6 +35,9 @@ config SND_PXA_SOC_SSP
tristate
select PXA_SSP
+config SND_MMP_SOC_SSPA
+ tristate
+
config SND_PXA2XX_SOC_CORGI
tristate "SoC Audio support for Sharp Zaurus SL-C7x0"
depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx
@@ -138,6 +150,26 @@ config SND_SOC_TAVOREVB3
Say Y if you want to add support for SoC audio on the
Marvell Saarb reference platform.
+config SND_PXA910_SOC
+ tristate "SoC Audio for Marvell PXA910 chip"
+ depends on ARCH_MMP && SND
+ select SND_PCM
+ help
+ Say Y if you want to add support for SoC audio on the
+ Marvell PXA910 reference platform.
+
+config SND_SOC_TTC_DKB
+ bool "SoC Audio support for TTC DKB"
+ depends on SND_PXA910_SOC && MACH_TTC_DKB
+ select PXA_SSP
+ select SND_PXA_SOC_SSP
+ select SND_MMP_SOC
+ select MFD_88PM860X
+ select SND_SOC_88PM860X
+ help
+ Say Y if you want to add support for SoC audio on TTC DKB
+
+
config SND_SOC_ZYLONITE
tristate "SoC Audio support for Marvell Zylonite"
depends on SND_PXA2XX_SOC && MACH_ZYLONITE
@@ -194,3 +226,13 @@ config SND_PXA2XX_SOC_IMOTE2
help
Say Y if you want to add support for SoC audio on the
IMote 2.
+
+config SND_MMP_SOC_BROWNSTONE
+ tristate "SoC Audio support for Marvell Brownstone"
+ depends on SND_MMP_SOC && MACH_BROWNSTONE
+ select SND_MMP_SOC_SSPA
+ select MFD_WM8994
+ select SND_SOC_WM8994
+ help
+ Say Y if you want to add support for SoC audio on the
+ Marvell Brownstone reference platform.
diff --git a/sound/soc/pxa/Makefile b/sound/soc/pxa/Makefile
index af357623be9d..d8a265d2d5d7 100644
--- a/sound/soc/pxa/Makefile
+++ b/sound/soc/pxa/Makefile
@@ -3,11 +3,15 @@ snd-soc-pxa2xx-objs := pxa2xx-pcm.o
snd-soc-pxa2xx-ac97-objs := pxa2xx-ac97.o
snd-soc-pxa2xx-i2s-objs := pxa2xx-i2s.o
snd-soc-pxa-ssp-objs := pxa-ssp.o
+snd-soc-mmp-objs := mmp-pcm.o
+snd-soc-mmp-sspa-objs := mmp-sspa.o
obj-$(CONFIG_SND_PXA2XX_SOC) += snd-soc-pxa2xx.o
obj-$(CONFIG_SND_PXA2XX_SOC_AC97) += snd-soc-pxa2xx-ac97.o
obj-$(CONFIG_SND_PXA2XX_SOC_I2S) += snd-soc-pxa2xx-i2s.o
obj-$(CONFIG_SND_PXA_SOC_SSP) += snd-soc-pxa-ssp.o
+obj-$(CONFIG_SND_MMP_SOC) += snd-soc-mmp.o
+obj-$(CONFIG_SND_MMP_SOC_SSPA) += snd-soc-mmp-sspa.o
# PXA Machine Support
snd-soc-corgi-objs := corgi.o
@@ -28,6 +32,8 @@ snd-soc-mioa701-objs := mioa701_wm9713.o
snd-soc-z2-objs := z2.o
snd-soc-imote2-objs := imote2.o
snd-soc-raumfeld-objs := raumfeld.o
+snd-soc-brownstone-objs := brownstone.o
+snd-soc-ttc-dkb-objs := ttc-dkb.o
obj-$(CONFIG_SND_PXA2XX_SOC_CORGI) += snd-soc-corgi.o
obj-$(CONFIG_SND_PXA2XX_SOC_POODLE) += snd-soc-poodle.o
@@ -47,3 +53,5 @@ obj-$(CONFIG_SND_SOC_TAVOREVB3) += snd-soc-tavorevb3.o
obj-$(CONFIG_SND_SOC_ZYLONITE) += snd-soc-zylonite.o
obj-$(CONFIG_SND_PXA2XX_SOC_IMOTE2) += snd-soc-imote2.o
obj-$(CONFIG_SND_SOC_RAUMFELD) += snd-soc-raumfeld.o
+obj-$(CONFIG_SND_MMP_SOC_BROWNSTONE) += snd-soc-brownstone.o
+obj-$(CONFIG_SND_SOC_TTC_DKB) += snd-soc-ttc-dkb.o
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
new file mode 100644
index 000000000000..5e666e03d333
--- /dev/null
+++ b/sound/soc/pxa/brownstone.c
@@ -0,0 +1,174 @@
+/*
+ * linux/sound/soc/pxa/brownstone.c
+ *
+ * Copyright (C) 2011 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "../codecs/wm8994.h"
+#include "mmp-sspa.h"
+
+static const struct snd_kcontrol_new brownstone_dapm_control[] = {
+ SOC_DAPM_PIN_SWITCH("Ext Spk"),
+};
+
+static const struct snd_soc_dapm_widget brownstone_dapm_widgets[] = {
+ SND_SOC_DAPM_SPK("Ext Spk", NULL),
+ SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Main Mic", NULL),
+};
+
+static const struct snd_soc_dapm_route brownstone_audio_map[] = {
+ {"Ext Spk", NULL, "SPKOUTLP"},
+ {"Ext Spk", NULL, "SPKOUTLN"},
+ {"Ext Spk", NULL, "SPKOUTRP"},
+ {"Ext Spk", NULL, "SPKOUTRN"},
+
+ {"Headset Stereophone", NULL, "HPOUT1L"},
+ {"Headset Stereophone", NULL, "HPOUT1R"},
+
+ {"IN1RN", NULL, "Headset Mic"},
+
+ {"DMIC1DAT", NULL, "MICBIAS1"},
+ {"MICBIAS1", NULL, "Main Mic"},
+};
+
+static int brownstone_wm8994_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ snd_soc_dapm_enable_pin(dapm, "Ext Spk");
+ snd_soc_dapm_enable_pin(dapm, "Headset Stereophone");
+ snd_soc_dapm_enable_pin(dapm, "Headset Mic");
+ snd_soc_dapm_enable_pin(dapm, "Main Mic");
+
+ /* set endpoints to not connected */
+ snd_soc_dapm_nc_pin(dapm, "HPOUT2P");
+ snd_soc_dapm_nc_pin(dapm, "HPOUT2N");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT1N");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT1P");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT2N");
+ snd_soc_dapm_nc_pin(dapm, "LINEOUT2P");
+ snd_soc_dapm_nc_pin(dapm, "IN1LN");
+ snd_soc_dapm_nc_pin(dapm, "IN1LP");
+ snd_soc_dapm_nc_pin(dapm, "IN1RP");
+ snd_soc_dapm_nc_pin(dapm, "IN2LP:VXRN");
+ snd_soc_dapm_nc_pin(dapm, "IN2RN");
+ snd_soc_dapm_nc_pin(dapm, "IN2RP:VXRP");
+ snd_soc_dapm_nc_pin(dapm, "IN2LN");
+
+ snd_soc_dapm_sync(dapm);
+
+ return 0;
+}
+
+static int brownstone_wm8994_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ int freq_out, sspa_mclk, sysclk;
+ int sspa_div;
+
+ if (params_rate(params) > 11025) {
+ freq_out = params_rate(params) * 512;
+ sysclk = params_rate(params) * 256;
+ sspa_mclk = params_rate(params) * 64;
+ } else {
+ freq_out = params_rate(params) * 1024;
+ sysclk = params_rate(params) * 512;
+ sspa_mclk = params_rate(params) * 64;
+ }
+ sspa_div = freq_out;
+ do_div(sspa_div, sspa_mclk);
+
+ snd_soc_dai_set_sysclk(cpu_dai, MMP_SSPA_CLK_AUDIO, freq_out, 0);
+ snd_soc_dai_set_pll(cpu_dai, MMP_SYSCLK, 0, freq_out, sysclk);
+ snd_soc_dai_set_pll(cpu_dai, MMP_SSPA_CLK, 0, freq_out, sspa_mclk);
+
+ /* set wm8994 sysclk */
+ snd_soc_dai_set_sysclk(codec_dai, WM8994_SYSCLK_MCLK1, sysclk, 0);
+
+ return 0;
+}
+
+/* machine stream operations */
+static struct snd_soc_ops brownstone_ops = {
+ .hw_params = brownstone_wm8994_hw_params,
+};
+
+static struct snd_soc_dai_link brownstone_wm8994_dai[] = {
+{
+ .name = "WM8994",
+ .stream_name = "WM8994 HiFi",
+ .cpu_dai_name = "mmp-sspa-dai.0",
+ .codec_dai_name = "wm8994-aif1",
+ .platform_name = "mmp-pcm-audio",
+ .codec_name = "wm8994-codec",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &brownstone_ops,
+ .init = brownstone_wm8994_init,
+},
+};
+
+/* audio machine driver */
+static struct snd_soc_card brownstone = {
+ .name = "brownstone",
+ .dai_link = brownstone_wm8994_dai,
+ .num_links = ARRAY_SIZE(brownstone_wm8994_dai),
+
+ .controls = brownstone_dapm_control,
+ .num_controls = ARRAY_SIZE(brownstone_dapm_control),
+ .dapm_widgets = brownstone_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(brownstone_dapm_widgets),
+ .dapm_routes = brownstone_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(brownstone_audio_map),
+};
+
+static int __devinit brownstone_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ brownstone.dev = &pdev->dev;
+ ret = snd_soc_register_card(&brownstone);
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+ ret);
+ return ret;
+}
+
+static int __devexit brownstone_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_card(&brownstone);
+ return 0;
+}
+
+static struct platform_driver mmp_driver = {
+ .driver = {
+ .name = "brownstone-audio",
+ .owner = THIS_MODULE,
+ },
+ .probe = brownstone_probe,
+ .remove = __devexit_p(brownstone_remove),
+};
+
+module_platform_driver(mmp_driver);
+
+MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
+MODULE_DESCRIPTION("ALSA SoC Brownstone");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 9c585af59b5f..8687c1c65d29 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -186,36 +186,27 @@ static struct snd_soc_card mioa701 = {
.num_links = ARRAY_SIZE(mioa701_dai),
};
-static struct platform_device *mioa701_snd_device;
-
-static int mioa701_wm9713_probe(struct platform_device *pdev)
+static int __devinit mioa701_wm9713_probe(struct platform_device *pdev)
{
- int ret;
+ int rc;
if (!machine_is_mioa701())
return -ENODEV;
- dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will"
- "lead to overheating and possible destruction of your device."
- "Do not use without a good knowledge of mio's board design!\n");
-
- mioa701_snd_device = platform_device_alloc("soc-audio", -1);
- if (!mioa701_snd_device)
- return -ENOMEM;
-
- platform_set_drvdata(mioa701_snd_device, &mioa701);
-
- ret = platform_device_add(mioa701_snd_device);
- if (!ret)
- return 0;
-
- platform_device_put(mioa701_snd_device);
- return ret;
+ mioa701.dev = &pdev->dev;
+ rc = snd_soc_register_card(&mioa701);
+ if (!rc)
+ dev_warn(&pdev->dev, "Be warned that incorrect mixers/muxes setup will"
+ "lead to overheating and possible destruction of your device."
+ " Do not use without a good knowledge of mio's board design!\n");
+ return rc;
}
static int __devexit mioa701_wm9713_remove(struct platform_device *pdev)
{
- platform_device_unregister(mioa701_snd_device);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
return 0;
}
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
new file mode 100644
index 000000000000..73ac5463c9e4
--- /dev/null
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -0,0 +1,297 @@
+/*
+ * linux/sound/soc/pxa/mmp-pcm.c
+ *
+ * Copyright (C) 2011 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_data/mmp_audio.h>
+#include <sound/pxa2xx-lib.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <mach/sram.h>
+#include <sound/dmaengine_pcm.h>
+
+struct mmp_dma_data {
+ int ssp_id;
+ struct resource *dma_res;
+};
+
+#define MMP_PCM_INFO (SNDRV_PCM_INFO_MMAP | \
+ SNDRV_PCM_INFO_MMAP_VALID | \
+ SNDRV_PCM_INFO_INTERLEAVED | \
+ SNDRV_PCM_INFO_PAUSE | \
+ SNDRV_PCM_INFO_RESUME)
+
+#define MMP_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_pcm_hardware mmp_pcm_hardware[] = {
+ {
+ .info = MMP_PCM_INFO,
+ .formats = MMP_PCM_FORMATS,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 32,
+ .buffer_bytes_max = 4096,
+ .fifo_size = 32,
+ },
+ {
+ .info = MMP_PCM_INFO,
+ .formats = MMP_PCM_FORMATS,
+ .period_bytes_min = 1024,
+ .period_bytes_max = 2048,
+ .periods_min = 2,
+ .periods_max = 32,
+ .buffer_bytes_max = 4096,
+ .fifo_size = 32,
+ },
+};
+
+static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct pxa2xx_pcm_dma_params *dma_params;
+ struct dma_slave_config slave_config;
+ int ret;
+
+ dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ if (!dma_params)
+ return 0;
+
+ ret = snd_hwparams_to_dma_slave_config(substream, params, &slave_config);
+ if (ret)
+ return ret;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config.dst_addr = dma_params->dev_addr;
+ slave_config.dst_maxburst = 4;
+ } else {
+ slave_config.src_addr = dma_params->dev_addr;
+ slave_config.src_maxburst = 4;
+ }
+
+ ret = dmaengine_slave_config(chan, &slave_config);
+ if (ret)
+ return ret;
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static bool filter(struct dma_chan *chan, void *param)
+{
+ struct mmp_dma_data *dma_data = param;
+ bool found = false;
+ char *devname;
+
+ devname = kasprintf(GFP_KERNEL, "%s.%d", dma_data->dma_res->name,
+ dma_data->ssp_id);
+ if ((strcmp(dev_name(chan->device->dev), devname) == 0) &&
+ (chan->chan_id == dma_data->dma_res->start)) {
+ found = true;
+ }
+
+ kfree(devname);
+ return found;
+}
+
+static int mmp_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct platform_device *pdev = to_platform_device(rtd->platform->dev);
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct mmp_dma_data *dma_data;
+ struct resource *r;
+ int ret;
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, substream->stream);
+ if (!r)
+ return -EBUSY;
+
+ snd_soc_set_runtime_hwparams(substream,
+ &mmp_pcm_hardware[substream->stream]);
+ dma_data = devm_kzalloc(&pdev->dev,
+ sizeof(struct mmp_dma_data), GFP_KERNEL);
+ if (dma_data == NULL)
+ return -ENOMEM;
+
+ dma_data->dma_res = r;
+ dma_data->ssp_id = cpu_dai->id;
+
+ ret = snd_dmaengine_pcm_open(substream, filter, dma_data);
+ if (ret) {
+ devm_kfree(&pdev->dev, dma_data);
+ return ret;
+ }
+
+ snd_dmaengine_pcm_set_data(substream, dma_data);
+ return 0;
+}
+
+static int mmp_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct mmp_dma_data *dma_data = snd_dmaengine_pcm_get_data(substream);
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct platform_device *pdev = to_platform_device(rtd->platform->dev);
+
+ snd_dmaengine_pcm_close(substream);
+ devm_kfree(&pdev->dev, dma_data);
+ return 0;
+}
+
+static int mmp_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned long off = vma->vm_pgoff;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return remap_pfn_range(vma, vma->vm_start,
+ __phys_to_pfn(runtime->dma_addr) + off,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct snd_pcm_ops mmp_pcm_ops = {
+ .open = mmp_pcm_open,
+ .close = mmp_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = mmp_pcm_hw_params,
+ .trigger = snd_dmaengine_pcm_trigger,
+ .pointer = snd_dmaengine_pcm_pointer,
+ .mmap = mmp_pcm_mmap,
+};
+
+static void mmp_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+ int stream;
+ struct gen_pool *gpool;
+
+ gpool = sram_get_gpool("asram");
+ if (!gpool)
+ return;
+
+ for (stream = 0; stream < 2; stream++) {
+ size_t size = mmp_pcm_hardware[stream].buffer_bytes_max;
+
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+
+ buf = &substream->dma_buffer;
+ if (!buf->area)
+ continue;
+ gen_pool_free(gpool, (unsigned long)buf->area, size);
+ buf->area = NULL;
+ }
+
+ return;
+}
+
+static int mmp_pcm_preallocate_dma_buffer(struct snd_pcm_substream *substream,
+ int stream)
+{
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+ size_t size = mmp_pcm_hardware[stream].buffer_bytes_max;
+ struct gen_pool *gpool;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = substream->pcm->card->dev;
+ buf->private_data = NULL;
+
+ gpool = sram_get_gpool("asram");
+ if (!gpool)
+ return -ENOMEM;
+
+ buf->area = (unsigned char *)gen_pool_alloc(gpool, size);
+ if (!buf->area)
+ return -ENOMEM;
+ buf->addr = gen_pool_virt_to_phys(gpool, (unsigned long)buf->area);
+ buf->bytes = size;
+ return 0;
+}
+
+int mmp_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_pcm *pcm = rtd->pcm;
+ int ret = 0, stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+
+ ret = mmp_pcm_preallocate_dma_buffer(substream, stream);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mmp_pcm_free_dma_buffers(pcm);
+ return ret;
+}
+
+struct snd_soc_platform_driver mmp_soc_platform = {
+ .ops = &mmp_pcm_ops,
+ .pcm_new = mmp_pcm_new,
+ .pcm_free = mmp_pcm_free_dma_buffers,
+};
+
+static __devinit int mmp_pcm_probe(struct platform_device *pdev)
+{
+ struct mmp_audio_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata) {
+ mmp_pcm_hardware[SNDRV_PCM_STREAM_PLAYBACK].buffer_bytes_max =
+ pdata->buffer_max_playback;
+ mmp_pcm_hardware[SNDRV_PCM_STREAM_PLAYBACK].period_bytes_max =
+ pdata->period_max_playback;
+ mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].buffer_bytes_max =
+ pdata->buffer_max_capture;
+ mmp_pcm_hardware[SNDRV_PCM_STREAM_CAPTURE].period_bytes_max =
+ pdata->period_max_capture;
+ }
+ return snd_soc_register_platform(&pdev->dev, &mmp_soc_platform);
+}
+
+static int __devexit mmp_pcm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver mmp_pcm_driver = {
+ .driver = {
+ .name = "mmp-pcm-audio",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = mmp_pcm_probe,
+ .remove = __devexit_p(mmp_pcm_remove),
+};
+
+module_platform_driver(mmp_pcm_driver);
+
+MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
+MODULE_DESCRIPTION("MMP Soc Audio DMA module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
new file mode 100644
index 000000000000..4d6cb8a30fc8
--- /dev/null
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -0,0 +1,480 @@
+/*
+ * linux/sound/soc/pxa/mmp-sspa.c
+ * Base on pxa2xx-ssp.c
+ *
+ * Copyright (C) 2011 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/io.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/pxa2xx-lib.h>
+#include "mmp-sspa.h"
+
+/*
+ * SSPA audio private data
+ */
+struct sspa_priv {
+ struct ssp_device *sspa;
+ struct pxa2xx_pcm_dma_params *dma_params;
+ struct clk *audio_clk;
+ struct clk *sysclk;
+ int dai_fmt;
+ int running_cnt;
+};
+
+static void mmp_sspa_write_reg(struct ssp_device *sspa, u32 reg, u32 val)
+{
+ __raw_writel(val, sspa->mmio_base + reg);
+}
+
+static u32 mmp_sspa_read_reg(struct ssp_device *sspa, u32 reg)
+{
+ return __raw_readl(sspa->mmio_base + reg);
+}
+
+static void mmp_sspa_tx_enable(struct ssp_device *sspa)
+{
+ unsigned int sspa_sp;
+
+ sspa_sp = mmp_sspa_read_reg(sspa, SSPA_TXSP);
+ sspa_sp |= SSPA_SP_S_EN;
+ sspa_sp |= SSPA_SP_WEN;
+ mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+}
+
+static void mmp_sspa_tx_disable(struct ssp_device *sspa)
+{
+ unsigned int sspa_sp;
+
+ sspa_sp = mmp_sspa_read_reg(sspa, SSPA_TXSP);
+ sspa_sp &= ~SSPA_SP_S_EN;
+ sspa_sp |= SSPA_SP_WEN;
+ mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+}
+
+static void mmp_sspa_rx_enable(struct ssp_device *sspa)
+{
+ unsigned int sspa_sp;
+
+ sspa_sp = mmp_sspa_read_reg(sspa, SSPA_RXSP);
+ sspa_sp |= SSPA_SP_S_EN;
+ sspa_sp |= SSPA_SP_WEN;
+ mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+}
+
+static void mmp_sspa_rx_disable(struct ssp_device *sspa)
+{
+ unsigned int sspa_sp;
+
+ sspa_sp = mmp_sspa_read_reg(sspa, SSPA_RXSP);
+ sspa_sp &= ~SSPA_SP_S_EN;
+ sspa_sp |= SSPA_SP_WEN;
+ mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+}
+
+static int mmp_sspa_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sspa_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_enable(priv->sysclk);
+ clk_enable(priv->sspa->clk);
+
+ return 0;
+}
+
+static void mmp_sspa_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct sspa_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable(priv->sspa->clk);
+ clk_disable(priv->sysclk);
+
+ return;
+}
+
+/*
+ * Set the SSP ports SYSCLK.
+ */
+static int mmp_sspa_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ struct sspa_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+
+ switch (clk_id) {
+ case MMP_SSPA_CLK_AUDIO:
+ ret = clk_set_rate(priv->audio_clk, freq);
+ if (ret)
+ return ret;
+ break;
+ case MMP_SSPA_CLK_PLL:
+ case MMP_SSPA_CLK_VCXO:
+ /* not support yet */
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mmp_sspa_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
+ int source, unsigned int freq_in,
+ unsigned int freq_out)
+{
+ struct sspa_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+
+ switch (pll_id) {
+ case MMP_SYSCLK:
+ ret = clk_set_rate(priv->sysclk, freq_out);
+ if (ret)
+ return ret;
+ break;
+ case MMP_SSPA_CLK:
+ ret = clk_set_rate(priv->sspa->clk, freq_out);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/*
+ * Set up the sspa dai format. The sspa port must be inactive
+ * before calling this function as the physical
+ * interface format is changed.
+ */
+static int mmp_sspa_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+{
+ struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct ssp_device *sspa = sspa_priv->sspa;
+ u32 sspa_sp, sspa_ctrl;
+
+ /* check if we need to change anything at all */
+ if (sspa_priv->dai_fmt == fmt)
+ return 0;
+
+ /* we can only change the settings if the port is not in use */
+ if ((mmp_sspa_read_reg(sspa, SSPA_TXSP) & SSPA_SP_S_EN) ||
+ (mmp_sspa_read_reg(sspa, SSPA_RXSP) & SSPA_SP_S_EN)) {
+ dev_err(&sspa->pdev->dev,
+ "can't change hardware dai format: stream is in use\n");
+ return -EINVAL;
+ }
+
+ /* reset port settings */
+ sspa_sp = SSPA_SP_WEN | SSPA_SP_S_RST | SSPA_SP_FFLUSH;
+ sspa_ctrl = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ sspa_sp |= SSPA_SP_MSL;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ sspa_sp |= SSPA_SP_FSP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ sspa_sp |= SSPA_TXSP_FPER(63);
+ sspa_sp |= SSPA_SP_FWID(31);
+ sspa_ctrl |= SSPA_CTL_XDATDLY(1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+ mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+
+ sspa_sp &= ~(SSPA_SP_S_RST | SSPA_SP_FFLUSH);
+ mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+ mmp_sspa_write_reg(sspa, SSPA_RXSP, sspa_sp);
+
+ /*
+ * FIXME: hw issue, for the tx serial port,
+ * can not config the master/slave mode;
+ * so must clean this bit.
+ * The master/slave mode has been set in the
+ * rx port.
+ */
+ sspa_sp &= ~SSPA_SP_MSL;
+ mmp_sspa_write_reg(sspa, SSPA_TXSP, sspa_sp);
+
+ mmp_sspa_write_reg(sspa, SSPA_TXCTL, sspa_ctrl);
+ mmp_sspa_write_reg(sspa, SSPA_RXCTL, sspa_ctrl);
+
+ /* Since we are configuring the timings for the format by hand
+ * we have to defer some things until hw_params() where we
+ * know parameters like the sample size.
+ */
+ sspa_priv->dai_fmt = fmt;
+ return 0;
+}
+
+/*
+ * Set the SSPA audio DMA parameters and sample size.
+ * Can be called multiple times by oss emulation.
+ */
+static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
+ struct ssp_device *sspa = sspa_priv->sspa;
+ struct pxa2xx_pcm_dma_params *dma_params;
+ u32 sspa_ctrl;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ sspa_ctrl = mmp_sspa_read_reg(sspa, SSPA_TXCTL);
+ else
+ sspa_ctrl = mmp_sspa_read_reg(sspa, SSPA_RXCTL);
+
+ sspa_ctrl &= ~SSPA_CTL_XFRLEN1_MASK;
+ sspa_ctrl |= SSPA_CTL_XFRLEN1(params_channels(params) - 1);
+ sspa_ctrl &= ~SSPA_CTL_XWDLEN1_MASK;
+ sspa_ctrl |= SSPA_CTL_XWDLEN1(SSPA_CTL_32_BITS);
+ sspa_ctrl &= ~SSPA_CTL_XSSZ1_MASK;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S8:
+ sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_8_BITS);
+ break;
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_16_BITS);
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_20_BITS);
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_24_BITS);
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ sspa_ctrl |= SSPA_CTL_XSSZ1(SSPA_CTL_32_BITS);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ mmp_sspa_write_reg(sspa, SSPA_TXCTL, sspa_ctrl);
+ mmp_sspa_write_reg(sspa, SSPA_TXFIFO_LL, 0x1);
+ } else {
+ mmp_sspa_write_reg(sspa, SSPA_RXCTL, sspa_ctrl);
+ mmp_sspa_write_reg(sspa, SSPA_RXFIFO_UL, 0x0);
+ }
+
+ dma_params = &sspa_priv->dma_params[substream->stream];
+ dma_params->dev_addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ (sspa->phys_base + SSPA_TXD) :
+ (sspa->phys_base + SSPA_RXD);
+ snd_soc_dai_set_dma_data(cpu_dai, substream, dma_params);
+ return 0;
+}
+
+static int mmp_sspa_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
+ struct ssp_device *sspa = sspa_priv->sspa;
+ int ret = 0;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ /*
+ * whatever playback or capture, must enable rx.
+ * this is a hw issue, so need check if rx has been
+ * enabled or not; if has been enabled by another
+ * stream, do not enable again.
+ */
+ if (!sspa_priv->running_cnt)
+ mmp_sspa_rx_enable(sspa);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mmp_sspa_tx_enable(sspa);
+
+ sspa_priv->running_cnt++;
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ sspa_priv->running_cnt--;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ mmp_sspa_tx_disable(sspa);
+
+ /* have no capture stream, disable rx port */
+ if (!sspa_priv->running_cnt)
+ mmp_sspa_rx_disable(sspa);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mmp_sspa_probe(struct snd_soc_dai *dai)
+{
+ struct sspa_priv *priv = dev_get_drvdata(dai->dev);
+
+ snd_soc_dai_set_drvdata(dai, priv);
+ return 0;
+
+}
+
+#define MMP_SSPA_RATES SNDRV_PCM_RATE_8000_192000
+#define MMP_SSPA_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops mmp_sspa_dai_ops = {
+ .startup = mmp_sspa_startup,
+ .shutdown = mmp_sspa_shutdown,
+ .trigger = mmp_sspa_trigger,
+ .hw_params = mmp_sspa_hw_params,
+ .set_sysclk = mmp_sspa_set_dai_sysclk,
+ .set_pll = mmp_sspa_set_dai_pll,
+ .set_fmt = mmp_sspa_set_dai_fmt,
+};
+
+struct snd_soc_dai_driver mmp_sspa_dai = {
+ .probe = mmp_sspa_probe,
+ .playback = {
+ .channels_min = 1,
+ .channels_max = 128,
+ .rates = MMP_SSPA_RATES,
+ .formats = MMP_SSPA_FORMATS,
+ },
+ .capture = {
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MMP_SSPA_RATES,
+ .formats = MMP_SSPA_FORMATS,
+ },
+ .ops = &mmp_sspa_dai_ops,
+};
+
+static __devinit int asoc_mmp_sspa_probe(struct platform_device *pdev)
+{
+ struct sspa_priv *priv;
+ struct resource *res;
+
+ priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct sspa_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->sspa = devm_kzalloc(&pdev->dev,
+ sizeof(struct ssp_device), GFP_KERNEL);
+ if (priv->sspa == NULL)
+ return -ENOMEM;
+
+ priv->dma_params = devm_kzalloc(&pdev->dev,
+ 2 * sizeof(struct pxa2xx_pcm_dma_params), GFP_KERNEL);
+ if (priv->dma_params == NULL)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -ENOMEM;
+
+ priv->sspa->mmio_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (priv->sspa->mmio_base == NULL)
+ return -ENODEV;
+
+ priv->sspa->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->sspa->clk))
+ return PTR_ERR(priv->sspa->clk);
+
+ priv->audio_clk = clk_get(NULL, "mmp-audio");
+ if (IS_ERR(priv->audio_clk))
+ return PTR_ERR(priv->audio_clk);
+
+ priv->sysclk = clk_get(NULL, "mmp-sysclk");
+ if (IS_ERR(priv->sysclk)) {
+ clk_put(priv->audio_clk);
+ return PTR_ERR(priv->sysclk);
+ }
+ clk_enable(priv->audio_clk);
+ priv->dai_fmt = (unsigned int) -1;
+ platform_set_drvdata(pdev, priv);
+
+ return snd_soc_register_dai(&pdev->dev, &mmp_sspa_dai);
+}
+
+static int __devexit asoc_mmp_sspa_remove(struct platform_device *pdev)
+{
+ struct sspa_priv *priv = platform_get_drvdata(pdev);
+
+ clk_disable(priv->audio_clk);
+ clk_put(priv->audio_clk);
+ clk_put(priv->sysclk);
+ snd_soc_unregister_dai(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver asoc_mmp_sspa_driver = {
+ .driver = {
+ .name = "mmp-sspa-dai",
+ .owner = THIS_MODULE,
+ },
+ .probe = asoc_mmp_sspa_probe,
+ .remove = __devexit_p(asoc_mmp_sspa_remove),
+};
+
+module_platform_driver(asoc_mmp_sspa_driver);
+
+MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
+MODULE_DESCRIPTION("MMP SSPA SoC Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/pxa/mmp-sspa.h b/sound/soc/pxa/mmp-sspa.h
new file mode 100644
index 000000000000..ea365cb9e784
--- /dev/null
+++ b/sound/soc/pxa/mmp-sspa.h
@@ -0,0 +1,92 @@
+/*
+ * linux/sound/soc/pxa/mmp-sspa.h
+ *
+ * Copyright (C) 2011 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef _MMP_SSPA_H
+#define _MMP_SSPA_H
+
+/*
+ * SSPA Registers
+ */
+#define SSPA_RXD (0x00)
+#define SSPA_RXID (0x04)
+#define SSPA_RXCTL (0x08)
+#define SSPA_RXSP (0x0c)
+#define SSPA_RXFIFO_UL (0x10)
+#define SSPA_RXINT_MASK (0x14)
+#define SSPA_RXC (0x18)
+#define SSPA_RXFIFO_NOFS (0x1c)
+#define SSPA_RXFIFO_SIZE (0x20)
+
+#define SSPA_TXD (0x80)
+#define SSPA_TXID (0x84)
+#define SSPA_TXCTL (0x88)
+#define SSPA_TXSP (0x8c)
+#define SSPA_TXFIFO_LL (0x90)
+#define SSPA_TXINT_MASK (0x94)
+#define SSPA_TXC (0x98)
+#define SSPA_TXFIFO_NOFS (0x9c)
+#define SSPA_TXFIFO_SIZE (0xa0)
+
+/* SSPA Control Register */
+#define SSPA_CTL_XPH (1 << 31) /* Read Phase */
+#define SSPA_CTL_XFIG (1 << 15) /* Transmit Zeros when FIFO Empty */
+#define SSPA_CTL_JST (1 << 3) /* Audio Sample Justification */
+#define SSPA_CTL_XFRLEN2_MASK (7 << 24)
+#define SSPA_CTL_XFRLEN2(x) ((x) << 24) /* Transmit Frame Length in Phase 2 */
+#define SSPA_CTL_XWDLEN2_MASK (7 << 21)
+#define SSPA_CTL_XWDLEN2(x) ((x) << 21) /* Transmit Word Length in Phase 2 */
+#define SSPA_CTL_XDATDLY(x) ((x) << 19) /* Tansmit Data Delay */
+#define SSPA_CTL_XSSZ2_MASK (7 << 16)
+#define SSPA_CTL_XSSZ2(x) ((x) << 16) /* Transmit Sample Audio Size */
+#define SSPA_CTL_XFRLEN1_MASK (7 << 8)
+#define SSPA_CTL_XFRLEN1(x) ((x) << 8) /* Transmit Frame Length in Phase 1 */
+#define SSPA_CTL_XWDLEN1_MASK (7 << 5)
+#define SSPA_CTL_XWDLEN1(x) ((x) << 5) /* Transmit Word Length in Phase 1 */
+#define SSPA_CTL_XSSZ1_MASK (7 << 0)
+#define SSPA_CTL_XSSZ1(x) ((x) << 0) /* XSSZ1 */
+
+#define SSPA_CTL_8_BITS (0x0) /* Sample Size */
+#define SSPA_CTL_12_BITS (0x1)
+#define SSPA_CTL_16_BITS (0x2)
+#define SSPA_CTL_20_BITS (0x3)
+#define SSPA_CTL_24_BITS (0x4)
+#define SSPA_CTL_32_BITS (0x5)
+
+/* SSPA Serial Port Register */
+#define SSPA_SP_WEN (1 << 31) /* Write Configuration Enable */
+#define SSPA_SP_MSL (1 << 18) /* Master Slave Configuration */
+#define SSPA_SP_CLKP (1 << 17) /* CLKP Polarity Clock Edge Select */
+#define SSPA_SP_FSP (1 << 16) /* FSP Polarity Clock Edge Select */
+#define SSPA_SP_FFLUSH (1 << 2) /* FIFO Flush */
+#define SSPA_SP_S_RST (1 << 1) /* Active High Reset Signal */
+#define SSPA_SP_S_EN (1 << 0) /* Serial Clock Domain Enable */
+#define SSPA_SP_FWID(x) ((x) << 20) /* Frame-Sync Width */
+#define SSPA_TXSP_FPER(x) ((x) << 4) /* Frame-Sync Active */
+
+/* sspa clock sources */
+#define MMP_SSPA_CLK_PLL 0
+#define MMP_SSPA_CLK_VCXO 1
+#define MMP_SSPA_CLK_AUDIO 3
+
+/* sspa pll id */
+#define MMP_SYSCLK 0
+#define MMP_SSPA_CLK 1
+
+#endif /* _MMP_SSPA_H */
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
new file mode 100644
index 000000000000..935491a8a770
--- /dev/null
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -0,0 +1,173 @@
+/*
+ * linux/sound/soc/pxa/ttc_dkb.c
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <asm/mach-types.h>
+#include <sound/pcm_params.h>
+#include "../codecs/88pm860x-codec.h"
+
+static struct snd_soc_jack hs_jack, mic_jack;
+
+static struct snd_soc_jack_pin hs_jack_pins[] = {
+ { .pin = "Headset Stereophone", .mask = SND_JACK_HEADPHONE, },
+};
+
+static struct snd_soc_jack_pin mic_jack_pins[] = {
+ { .pin = "Headset Mic 2", .mask = SND_JACK_MICROPHONE, },
+};
+
+/* ttc machine dapm widgets */
+static const struct snd_soc_dapm_widget ttc_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headset Stereophone", NULL),
+ SND_SOC_DAPM_LINE("Lineout Out 1", NULL),
+ SND_SOC_DAPM_LINE("Lineout Out 2", NULL),
+ SND_SOC_DAPM_SPK("Ext Speaker", NULL),
+ SND_SOC_DAPM_MIC("Ext Mic 1", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic 2", NULL),
+ SND_SOC_DAPM_MIC("Ext Mic 3", NULL),
+};
+
+/* ttc machine audio map */
+static const struct snd_soc_dapm_route ttc_audio_map[] = {
+ {"Headset Stereophone", NULL, "HS1"},
+ {"Headset Stereophone", NULL, "HS2"},
+
+ {"Ext Speaker", NULL, "LSP"},
+ {"Ext Speaker", NULL, "LSN"},
+
+ {"Lineout Out 1", NULL, "LINEOUT1"},
+ {"Lineout Out 2", NULL, "LINEOUT2"},
+
+ {"MIC1P", NULL, "Mic1 Bias"},
+ {"MIC1N", NULL, "Mic1 Bias"},
+ {"Mic1 Bias", NULL, "Ext Mic 1"},
+
+ {"MIC2P", NULL, "Mic1 Bias"},
+ {"MIC2N", NULL, "Mic1 Bias"},
+ {"Mic1 Bias", NULL, "Headset Mic 2"},
+
+ {"MIC3P", NULL, "Mic3 Bias"},
+ {"MIC3N", NULL, "Mic3 Bias"},
+ {"Mic3 Bias", NULL, "Ext Mic 3"},
+};
+
+static int ttc_pm860x_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dapm_context *dapm = &codec->dapm;
+
+ /* connected pins */
+ snd_soc_dapm_enable_pin(dapm, "Ext Speaker");
+ snd_soc_dapm_enable_pin(dapm, "Ext Mic 1");
+ snd_soc_dapm_enable_pin(dapm, "Ext Mic 3");
+ snd_soc_dapm_disable_pin(dapm, "Headset Mic 2");
+ snd_soc_dapm_disable_pin(dapm, "Headset Stereophone");
+
+ /* Headset jack detection */
+ snd_soc_jack_new(codec, "Headphone Jack", SND_JACK_HEADPHONE
+ | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2,
+ &hs_jack);
+ snd_soc_jack_add_pins(&hs_jack, ARRAY_SIZE(hs_jack_pins),
+ hs_jack_pins);
+ snd_soc_jack_new(codec, "Microphone Jack", SND_JACK_MICROPHONE,
+ &mic_jack);
+ snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins),
+ mic_jack_pins);
+
+ /* headphone, microphone detection & headset short detection */
+ pm860x_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADPHONE,
+ SND_JACK_BTN_0, SND_JACK_BTN_1, SND_JACK_BTN_2);
+ pm860x_mic_jack_detect(codec, &hs_jack, SND_JACK_MICROPHONE);
+
+ return 0;
+}
+
+/* ttc/td-dkb digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link ttc_pm860x_hifi_dai[] = {
+{
+ .name = "88pm860x i2s",
+ .stream_name = "audio playback",
+ .codec_name = "88pm860x-codec",
+ .platform_name = "mmp-pcm-audio",
+ .cpu_dai_name = "pxa-ssp-dai.1",
+ .codec_dai_name = "88pm860x-i2s",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
+ .init = ttc_pm860x_init,
+},
+};
+
+/* ttc/td audio machine driver */
+static struct snd_soc_card ttc_dkb_card = {
+ .name = "ttc-dkb-hifi",
+ .dai_link = ttc_pm860x_hifi_dai,
+ .num_links = ARRAY_SIZE(ttc_pm860x_hifi_dai),
+
+ .dapm_widgets = ttc_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ttc_dapm_widgets),
+ .dapm_routes = ttc_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(ttc_audio_map),
+};
+
+static int __devinit ttc_dkb_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &ttc_dkb_card;
+ int ret;
+
+ card->dev = &pdev->dev;
+
+ ret = snd_soc_register_card(card);
+ if (ret)
+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int __devexit ttc_dkb_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+
+ return 0;
+}
+
+static struct platform_driver ttc_dkb_driver = {
+ .driver = {
+ .name = "ttc-dkb-audio",
+ .owner = THIS_MODULE,
+ },
+ .probe = ttc_dkb_probe,
+ .remove = __devexit_p(ttc_dkb_remove),
+};
+
+module_platform_driver(ttc_dkb_driver);
+
+/* Module information */
+MODULE_AUTHOR("Qiao Zhou, <zhouqiao@marvell.com>");
+MODULE_DESCRIPTION("ALSA SoC TTC DKB");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ttc-dkb-audio");
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index ddc6cde14e2a..f3ebc38c10fe 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -74,7 +74,7 @@ static void dma_enqueue(struct snd_pcm_substream *substream)
struct runtime_data *prtd = substream->runtime->private_data;
dma_addr_t pos = prtd->dma_pos;
unsigned int limit;
- struct samsung_dma_prep_info dma_info;
+ struct samsung_dma_prep dma_info;
pr_debug("Entered %s\n", __func__);
@@ -146,7 +146,8 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
unsigned long totbytes = params_buffer_bytes(params);
struct s3c_dma_params *dma =
snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- struct samsung_dma_info dma_info;
+ struct samsung_dma_req req;
+ struct samsung_dma_config config;
pr_debug("Entered %s\n", __func__);
@@ -166,16 +167,17 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
prtd->params->ops = samsung_dma_get_ops();
- dma_info.cap = (samsung_dma_has_circular() ?
+ req.cap = (samsung_dma_has_circular() ?
DMA_CYCLIC : DMA_SLAVE);
- dma_info.client = prtd->params->client;
- dma_info.direction =
+ req.client = prtd->params->client;
+ config.direction =
(substream->stream == SNDRV_PCM_STREAM_PLAYBACK
? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
- dma_info.width = prtd->params->dma_size;
- dma_info.fifo = prtd->params->dma_addr;
+ config.width = prtd->params->dma_size;
+ config.fifo = prtd->params->dma_addr;
prtd->params->ch = prtd->params->ops->request(
- prtd->params->channel, &dma_info);
+ prtd->params->channel, &req);
+ prtd->params->ops->config(prtd->params->ch, &config);
}
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
diff --git a/sound/soc/samsung/littlemill.c b/sound/soc/samsung/littlemill.c
index c82c646b8a08..ee52c8a00779 100644
--- a/sound/soc/samsung/littlemill.c
+++ b/sound/soc/samsung/littlemill.c
@@ -211,6 +211,11 @@ static int bbclk_ev(struct snd_soc_dapm_widget *w,
return 0;
}
+static const struct snd_kcontrol_new controls[] = {
+ SOC_DAPM_PIN_SWITCH("WM1250 Input"),
+ SOC_DAPM_PIN_SWITCH("WM1250 Output"),
+};
+
static struct snd_soc_dapm_widget widgets[] = {
SND_SOC_DAPM_HP("Headphone", NULL),
@@ -282,6 +287,8 @@ static struct snd_soc_card littlemill = {
.set_bias_level = littlemill_set_bias_level,
.set_bias_level_post = littlemill_set_bias_level_post,
+ .controls = controls,
+ .num_controls = ARRAY_SIZE(controls),
.dapm_widgets = widgets,
.num_dapm_widgets = ARRAY_SIZE(widgets),
.dapm_routes = audio_paths,
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 79fbeea99d46..ac7701b3c5dc 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -25,7 +25,6 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
-#include <mach/regs-gpio.h>
#include <mach/dma.h>
#include "dma.h"
@@ -83,12 +82,9 @@ static int s3c2412_i2s_probe(struct snd_soc_dai *dai)
s3c2412_i2s.iis_cclk = s3c2412_i2s.iis_pclk;
- /* Configure the I2S pins in correct mode */
- s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
- s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
- s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
+ /* Configure the I2S pins (GPE0...GPE4) in correct mode */
+ s3c_gpio_cfgall_range(S3C2410_GPE(0), 5, S3C_GPIO_SFN(2),
+ S3C_GPIO_PULL_NONE);
return 0;
}
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index c4aa4d412fbf..0aae3a3883dc 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -23,7 +23,6 @@
#include <sound/soc.h>
#include <sound/pcm_params.h>
-#include <mach/regs-gpio.h>
#include <mach/dma.h>
#include <plat/regs-iis.h>
@@ -391,12 +390,9 @@ static int s3c24xx_i2s_probe(struct snd_soc_dai *dai)
}
clk_enable(s3c24xx_i2s.iis_clk);
- /* Configure the I2S pins in correct mode */
- s3c2410_gpio_cfgpin(S3C2410_GPE0, S3C2410_GPE0_I2SLRCK);
- s3c2410_gpio_cfgpin(S3C2410_GPE1, S3C2410_GPE1_I2SSCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE2, S3C2410_GPE2_CDCLK);
- s3c2410_gpio_cfgpin(S3C2410_GPE3, S3C2410_GPE3_I2SSDI);
- s3c2410_gpio_cfgpin(S3C2410_GPE4, S3C2410_GPE4_I2SSDO);
+ /* Configure the I2S pins (GPE0...GPE4) in correct mode */
+ s3c_gpio_cfgall_range(S3C2410_GPE(0), 5, S3C_GPIO_SFN(2),
+ S3C_GPIO_PULL_NONE);
writel(S3C2410_IISCON_IISEN, s3c24xx_i2s.regs + S3C2410_IISCON);
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 8eb309f23d18..48dd4dd9ee08 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -149,31 +149,41 @@ static struct snd_soc_card smdk = {
.num_links = ARRAY_SIZE(smdk_dai),
};
-static struct platform_device *smdk_snd_device;
-static int __init smdk_audio_init(void)
+static int __devinit smdk_audio_probe(struct platform_device *pdev)
{
int ret;
+ struct snd_soc_card *card = &smdk;
- smdk_snd_device = platform_device_alloc("soc-audio", -1);
- if (!smdk_snd_device)
- return -ENOMEM;
+ card->dev = &pdev->dev;
+ ret = snd_soc_register_card(card);
- platform_set_drvdata(smdk_snd_device, &smdk);
-
- ret = platform_device_add(smdk_snd_device);
if (ret)
- platform_device_put(smdk_snd_device);
+ dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
return ret;
}
-module_init(smdk_audio_init);
-static void __exit smdk_audio_exit(void)
+static int __devexit smdk_audio_remove(struct platform_device *pdev)
{
- platform_device_unregister(smdk_snd_device);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+ snd_soc_unregister_card(card);
+
+ return 0;
}
-module_exit(smdk_audio_exit);
+
+static struct platform_driver smdk_audio_driver = {
+ .driver = {
+ .name = "smdk-audio",
+ .owner = THIS_MODULE,
+ },
+ .probe = smdk_audio_probe,
+ .remove = __devexit_p(smdk_audio_remove),
+};
+
+module_platform_driver(smdk_audio_driver);
MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:smdk-audio");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 2ef98536f1da..0540408a9fa9 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -247,7 +247,7 @@ struct fsi_priv {
struct fsi_stream_handler {
int (*init)(struct fsi_priv *fsi, struct fsi_stream *io);
int (*quit)(struct fsi_priv *fsi, struct fsi_stream *io);
- int (*probe)(struct fsi_priv *fsi, struct fsi_stream *io);
+ int (*probe)(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev);
int (*transfer)(struct fsi_priv *fsi, struct fsi_stream *io);
int (*remove)(struct fsi_priv *fsi, struct fsi_stream *io);
void (*start_stop)(struct fsi_priv *fsi, struct fsi_stream *io,
@@ -571,16 +571,16 @@ static int fsi_stream_transfer(struct fsi_stream *io)
#define fsi_stream_stop(fsi, io)\
fsi_stream_handler_call(io, start_stop, fsi, io, 0)
-static int fsi_stream_probe(struct fsi_priv *fsi)
+static int fsi_stream_probe(struct fsi_priv *fsi, struct device *dev)
{
struct fsi_stream *io;
int ret1, ret2;
io = &fsi->playback;
- ret1 = fsi_stream_handler_call(io, probe, fsi, io);
+ ret1 = fsi_stream_handler_call(io, probe, fsi, io, dev);
io = &fsi->capture;
- ret2 = fsi_stream_handler_call(io, probe, fsi, io);
+ ret2 = fsi_stream_handler_call(io, probe, fsi, io, dev);
if (ret1 < 0)
return ret1;
@@ -1089,13 +1089,10 @@ static void fsi_dma_do_tasklet(unsigned long data)
{
struct fsi_stream *io = (struct fsi_stream *)data;
struct fsi_priv *fsi = fsi_stream_to_priv(io);
- struct dma_chan *chan;
struct snd_soc_dai *dai;
struct dma_async_tx_descriptor *desc;
- struct scatterlist sg;
struct snd_pcm_runtime *runtime;
enum dma_data_direction dir;
- dma_cookie_t cookie;
int is_play = fsi_stream_is_play(fsi, io);
int len;
dma_addr_t buf;
@@ -1104,7 +1101,6 @@ static void fsi_dma_do_tasklet(unsigned long data)
return;
dai = fsi_get_dai(io->substream);
- chan = io->chan;
runtime = io->substream->runtime;
dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
len = samples_to_bytes(runtime, io->period_samples);
@@ -1112,14 +1108,8 @@ static void fsi_dma_do_tasklet(unsigned long data)
dma_sync_single_for_device(dai->dev, buf, len, dir);
- sg_init_table(&sg, 1);
- sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
- len , offset_in_page(buf));
- sg_dma_address(&sg) = buf;
- sg_dma_len(&sg) = len;
-
- desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ desc = dmaengine_prep_slave_single(io->chan, buf, len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n");
return;
@@ -1128,13 +1118,12 @@ static void fsi_dma_do_tasklet(unsigned long data)
desc->callback = fsi_dma_complete;
desc->callback_param = io;
- cookie = desc->tx_submit(desc);
- if (cookie < 0) {
+ if (dmaengine_submit(desc) < 0) {
dev_err(dai->dev, "tx_submit() fail\n");
return;
}
- dma_async_issue_pending(chan);
+ dma_async_issue_pending(io->chan);
/*
* FIXME
@@ -1184,7 +1173,7 @@ static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0);
}
-static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
+static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io, struct device *dev)
{
dma_cap_mask_t mask;
@@ -1192,8 +1181,19 @@ static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
dma_cap_set(DMA_SLAVE, mask);
io->chan = dma_request_channel(mask, fsi_dma_filter, &io->slave);
- if (!io->chan)
- return -EIO;
+ if (!io->chan) {
+
+ /* switch to PIO handler */
+ if (fsi_stream_is_play(fsi, io))
+ fsi->playback.handler = &fsi_pio_push_handler;
+ else
+ fsi->capture.handler = &fsi_pio_pop_handler;
+
+ dev_info(dev, "switch handler (dma => pio)\n");
+
+ /* probe again */
+ return fsi_stream_probe(fsi, dev);
+ }
tasklet_init(&io->tasklet, fsi_dma_do_tasklet, (unsigned long)io);
@@ -1631,8 +1631,8 @@ static void fsi_handler_init(struct fsi_priv *fsi)
fsi->capture.priv = fsi;
if (fsi->info->tx_id) {
- fsi->playback.slave.slave_id = fsi->info->tx_id;
- fsi->playback.handler = &fsi_dma_push_handler;
+ fsi->playback.slave.shdma_slave.slave_id = fsi->info->tx_id;
+ fsi->playback.handler = &fsi_dma_push_handler;
}
}
@@ -1683,7 +1683,7 @@ static int fsi_probe(struct platform_device *pdev)
master->fsia.master = master;
master->fsia.info = &info->port_a;
fsi_handler_init(&master->fsia);
- ret = fsi_stream_probe(&master->fsia);
+ ret = fsi_stream_probe(&master->fsia, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "FSIA stream probe failed\n");
goto exit_iounmap;
@@ -1694,7 +1694,7 @@ static int fsi_probe(struct platform_device *pdev)
master->fsib.master = master;
master->fsib.info = &info->port_b;
fsi_handler_init(&master->fsib);
- ret = fsi_stream_probe(&master->fsib);
+ ret = fsi_stream_probe(&master->fsib, &pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "FSIB stream probe failed\n");
goto exit_fsia;
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index 5cfcc655e95f..488f9becb44f 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -330,12 +330,9 @@ static bool filter(struct dma_chan *chan, void *slave)
{
struct sh_dmae_slave *param = slave;
- pr_debug("%s: slave ID %d\n", __func__, param->slave_id);
+ pr_debug("%s: slave ID %d\n", __func__, param->shdma_slave.slave_id);
- if (unlikely(param->dma_dev != chan->device->dev))
- return false;
-
- chan->private = param;
+ chan->private = &param->shdma_slave;
return true;
}
@@ -360,16 +357,15 @@ static int siu_pcm_open(struct snd_pcm_substream *ss)
if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) {
siu_stream = &port_info->playback;
param = &siu_stream->param;
- param->slave_id = port ? pdata->dma_slave_tx_b :
+ param->shdma_slave.slave_id = port ? pdata->dma_slave_tx_b :
pdata->dma_slave_tx_a;
} else {
siu_stream = &port_info->capture;
param = &siu_stream->param;
- param->slave_id = port ? pdata->dma_slave_rx_b :
+ param->shdma_slave.slave_id = port ? pdata->dma_slave_rx_b :
pdata->dma_slave_rx_a;
}
- param->dma_dev = pdata->dma_dev;
/* Get DMA channel */
siu_stream->chan = dma_request_channel(mask, filter, param);
if (!siu_stream->chan) {
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index b37ee8077ed1..f219b2f7ee68 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -812,13 +812,15 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
/* Find CPU DAI from registered DAIs*/
list_for_each_entry(cpu_dai, &dai_list, list) {
- if (dai_link->cpu_dai_of_node) {
- if (cpu_dai->dev->of_node != dai_link->cpu_dai_of_node)
- continue;
- } else {
- if (strcmp(cpu_dai->name, dai_link->cpu_dai_name))
- continue;
- }
+ if (dai_link->cpu_of_node &&
+ (cpu_dai->dev->of_node != dai_link->cpu_of_node))
+ continue;
+ if (dai_link->cpu_name &&
+ strcmp(dev_name(cpu_dai->dev), dai_link->cpu_name))
+ continue;
+ if (dai_link->cpu_dai_name &&
+ strcmp(cpu_dai->name, dai_link->cpu_dai_name))
+ continue;
rtd->cpu_dai = cpu_dai;
}
@@ -896,6 +898,28 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
return 0;
}
+static int soc_remove_platform(struct snd_soc_platform *platform)
+{
+ int ret;
+
+ if (platform->driver->remove) {
+ ret = platform->driver->remove(platform);
+ if (ret < 0)
+ pr_err("asoc: failed to remove %s: %d\n",
+ platform->name, ret);
+ }
+
+ /* Make sure all DAPM widgets are freed */
+ snd_soc_dapm_free(&platform->dapm);
+
+ soc_cleanup_platform_debugfs(platform);
+ platform->probed = 0;
+ list_del(&platform->card_list);
+ module_put(platform->dev->driver->owner);
+
+ return 0;
+}
+
static void soc_remove_codec(struct snd_soc_codec *codec)
{
int err;
@@ -917,11 +941,9 @@ static void soc_remove_codec(struct snd_soc_codec *codec)
module_put(codec->dev->driver->owner);
}
-static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
+static void soc_remove_link_dais(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai, *cpu_dai = rtd->cpu_dai;
int err;
@@ -946,30 +968,6 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
list_del(&codec_dai->card_list);
}
- /* remove the platform */
- if (platform && platform->probed &&
- platform->driver->remove_order == order) {
- if (platform->driver->remove) {
- err = platform->driver->remove(platform);
- if (err < 0)
- pr_err("asoc: failed to remove %s: %d\n",
- platform->name, err);
- }
-
- /* Make sure all DAPM widgets are freed */
- snd_soc_dapm_free(&platform->dapm);
-
- soc_cleanup_platform_debugfs(platform);
- platform->probed = 0;
- list_del(&platform->card_list);
- module_put(platform->dev->driver->owner);
- }
-
- /* remove the CODEC */
- if (codec && codec->probed &&
- codec->driver->remove_order == order)
- soc_remove_codec(codec);
-
/* remove the cpu_dai */
if (cpu_dai && cpu_dai->probed &&
cpu_dai->driver->remove_order == order) {
@@ -981,7 +979,43 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
}
cpu_dai->probed = 0;
list_del(&cpu_dai->card_list);
- module_put(cpu_dai->dev->driver->owner);
+
+ if (!cpu_dai->codec) {
+ snd_soc_dapm_free(&cpu_dai->dapm);
+ module_put(cpu_dai->dev->driver->owner);
+ }
+ }
+}
+
+static void soc_remove_link_components(struct snd_soc_card *card, int num,
+ int order)
+{
+ struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_platform *platform = rtd->platform;
+ struct snd_soc_codec *codec;
+
+ /* remove the platform */
+ if (platform && platform->probed &&
+ platform->driver->remove_order == order) {
+ soc_remove_platform(platform);
+ }
+
+ /* remove the CODEC-side CODEC */
+ if (codec_dai) {
+ codec = codec_dai->codec;
+ if (codec && codec->probed &&
+ codec->driver->remove_order == order)
+ soc_remove_codec(codec);
+ }
+
+ /* remove any CPU-side CODEC */
+ if (cpu_dai) {
+ codec = cpu_dai->codec;
+ if (codec && codec->probed &&
+ codec->driver->remove_order == order)
+ soc_remove_codec(codec);
}
}
@@ -992,8 +1026,15 @@ static void soc_remove_dai_links(struct snd_soc_card *card)
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
for (dai = 0; dai < card->num_rtd; dai++)
- soc_remove_dai_link(card, dai, order);
+ soc_remove_link_dais(card, dai, order);
}
+
+ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
+ order++) {
+ for (dai = 0; dai < card->num_rtd; dai++)
+ soc_remove_link_components(card, dai, order);
+ }
+
card->num_rtd = 0;
}
@@ -1054,6 +1095,10 @@ static int soc_probe_codec(struct snd_soc_card *card,
}
}
+ /* If the driver didn't set I/O up try regmap */
+ if (!codec->control_data)
+ snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+
if (driver->controls)
snd_soc_add_codec_controls(codec, driver->controls,
driver->num_controls);
@@ -1230,7 +1275,44 @@ out:
return 0;
}
-static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
+static int soc_probe_link_components(struct snd_soc_card *card, int num,
+ int order)
+{
+ struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_platform *platform = rtd->platform;
+ int ret;
+
+ /* probe the CPU-side component, if it is a CODEC */
+ if (cpu_dai->codec &&
+ !cpu_dai->codec->probed &&
+ cpu_dai->codec->driver->probe_order == order) {
+ ret = soc_probe_codec(card, cpu_dai->codec);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* probe the CODEC-side component */
+ if (!codec_dai->codec->probed &&
+ codec_dai->codec->driver->probe_order == order) {
+ ret = soc_probe_codec(card, codec_dai->codec);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* probe the platform */
+ if (!platform->probed &&
+ platform->driver->probe_order == order) {
+ ret = soc_probe_platform(card, platform);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
{
struct snd_soc_dai_link *dai_link = &card->dai_link[num];
struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
@@ -1255,11 +1337,14 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
/* probe the cpu_dai */
if (!cpu_dai->probed &&
cpu_dai->driver->probe_order == order) {
- cpu_dai->dapm.card = card;
- if (!try_module_get(cpu_dai->dev->driver->owner))
- return -ENODEV;
+ if (!cpu_dai->codec) {
+ cpu_dai->dapm.card = card;
+ if (!try_module_get(cpu_dai->dev->driver->owner))
+ return -ENODEV;
- snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
+ list_add(&cpu_dai->dapm.list, &card->dapm_list);
+ snd_soc_dapm_new_dai_widgets(&cpu_dai->dapm, cpu_dai);
+ }
if (cpu_dai->driver->probe) {
ret = cpu_dai->driver->probe(cpu_dai);
@@ -1275,22 +1360,6 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num, int order)
list_add(&cpu_dai->card_list, &card->dai_dev_list);
}
- /* probe the CODEC */
- if (!codec->probed &&
- codec->driver->probe_order == order) {
- ret = soc_probe_codec(card, codec);
- if (ret < 0)
- return ret;
- }
-
- /* probe the platform */
- if (!platform->probed &&
- platform->driver->probe_order == order) {
- ret = soc_probe_platform(card, platform);
- if (ret < 0)
- return ret;
- }
-
/* probe the CODEC DAI */
if (!codec_dai->probed && codec_dai->driver->probe_order == order) {
if (codec_dai->driver->probe) {
@@ -1565,14 +1634,27 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
goto card_probe_error;
}
- /* early DAI link probe */
+ /* probe all components used by DAI links on this card */
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
for (i = 0; i < card->num_links; i++) {
- ret = soc_probe_dai_link(card, i, order);
+ ret = soc_probe_link_components(card, i, order);
if (ret < 0) {
pr_err("asoc: failed to instantiate card %s: %d\n",
- card->name, ret);
+ card->name, ret);
+ goto probe_dai_err;
+ }
+ }
+ }
+
+ /* probe all DAI links on this card */
+ for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
+ order++) {
+ for (i = 0; i < card->num_links; i++) {
+ ret = soc_probe_link_dais(card, i, order);
+ if (ret < 0) {
+ pr_err("asoc: failed to instantiate card %s: %d\n",
+ card->name, ret);
goto probe_dai_err;
}
}
@@ -2790,6 +2872,104 @@ int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
/**
+ * snd_soc_info_volsw_range - single mixer info callback with range.
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information, within a range, about a single
+ * mixer control.
+ *
+ * returns 0 for success.
+ */
+int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ int platform_max;
+ int min = mc->min;
+
+ if (!mc->platform_max)
+ mc->platform_max = mc->max;
+ platform_max = mc->platform_max;
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = platform_max - min;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range);
+
+/**
+ * snd_soc_put_volsw_range - single mixer put value callback with range.
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value, within a range, for a single mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ int min = mc->min;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+ unsigned int val, val_mask;
+
+ val = ((ucontrol->value.integer.value[0] + min) & mask);
+ if (invert)
+ val = max - val;
+ val_mask = mask << shift;
+ val = val << shift;
+
+ return snd_soc_update_bits_locked(codec, reg, val_mask, val);
+}
+EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
+
+/**
+ * snd_soc_get_volsw_range - single mixer get callback with range
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to get the value, within a range, of a single mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ unsigned int reg = mc->reg;
+ unsigned int shift = mc->shift;
+ int min = mc->min;
+ int max = mc->max;
+ unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+
+ ucontrol->value.integer.value[0] =
+ (snd_soc_read(codec, reg) >> shift) & mask;
+ if (invert)
+ ucontrol->value.integer.value[0] =
+ max - ucontrol->value.integer.value[0];
+ ucontrol->value.integer.value[0] =
+ ucontrol->value.integer.value[0] - min;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
+
+/**
* snd_soc_limit_volume - Set new limit to an existing volume control.
*
* @codec: where to look for the control
@@ -3346,6 +3526,12 @@ int snd_soc_register_card(struct snd_soc_card *card)
link->name);
return -EINVAL;
}
+ /* Codec DAI name must be specified */
+ if (!link->codec_dai_name) {
+ dev_err(card->dev, "codec_dai_name not set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
/*
* Platform may be specified by either name or OF node, but
@@ -3358,12 +3544,24 @@ int snd_soc_register_card(struct snd_soc_card *card)
}
/*
- * CPU DAI must be specified by 1 of name or OF node,
- * not both or neither.
+ * CPU device may be specified by either name or OF node, but
+ * can be left unspecified, and will be matched based on DAI
+ * name alone..
+ */
+ if (link->cpu_name && link->cpu_of_node) {
+ dev_err(card->dev,
+ "Neither/both cpu name/of_node are set for %s\n",
+ link->name);
+ return -EINVAL;
+ }
+ /*
+ * At least one of CPU DAI name or CPU device name/node must be
+ * specified
*/
- if (!!link->cpu_dai_name == !!link->cpu_dai_of_node) {
+ if (!link->cpu_dai_name &&
+ !(link->cpu_name || link->cpu_of_node)) {
dev_err(card->dev,
- "Neither/both cpu_dai name/of_node are set for %s\n",
+ "Neither cpu_dai_name nor cpu_name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
@@ -3938,6 +4136,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
dev_err(card->dev,
"Property '%s' index %d could not be read: %d\n",
propname, 2 * i, ret);
+ kfree(routes);
return -EINVAL;
}
ret = of_property_read_string_index(np, propname,
@@ -3946,6 +4145,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
dev_err(card->dev,
"Property '%s' index %d could not be read: %d\n",
propname, (2 * i) + 1, ret);
+ kfree(routes);
return -EINVAL;
}
}
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 89eae93445cf..dd7c49fafd75 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -35,6 +35,7 @@
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -51,6 +52,7 @@ static int dapm_up_seq[] = {
[snd_soc_dapm_pre] = 0,
[snd_soc_dapm_supply] = 1,
[snd_soc_dapm_regulator_supply] = 1,
+ [snd_soc_dapm_clock_supply] = 1,
[snd_soc_dapm_micbias] = 2,
[snd_soc_dapm_dai_link] = 2,
[snd_soc_dapm_dai] = 3,
@@ -92,6 +94,7 @@ static int dapm_down_seq[] = {
[snd_soc_dapm_aif_out] = 10,
[snd_soc_dapm_dai] = 10,
[snd_soc_dapm_dai_link] = 11,
+ [snd_soc_dapm_clock_supply] = 12,
[snd_soc_dapm_regulator_supply] = 12,
[snd_soc_dapm_supply] = 12,
[snd_soc_dapm_post] = 13,
@@ -288,9 +291,9 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm,
if (dapm->codec->driver->set_bias_level)
ret = dapm->codec->driver->set_bias_level(dapm->codec,
level);
- else
- dapm->bias_level = level;
- }
+ } else
+ dapm->bias_level = level;
+
if (ret != 0)
goto out;
@@ -321,11 +324,10 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
val = soc_widget_read(w, reg);
val = (val >> shift) & mask;
+ if (invert)
+ val = max - val;
- if ((invert && !val) || (!invert && val))
- p->connect = 1;
- else
- p->connect = 0;
+ p->connect = !!val;
}
break;
case snd_soc_dapm_mux: {
@@ -391,6 +393,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_vmid:
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
case snd_soc_dapm_aif_in:
case snd_soc_dapm_aif_out:
case snd_soc_dapm_dai:
@@ -764,6 +767,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
switch (widget->id) {
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
return 0;
default:
break;
@@ -850,6 +854,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
switch (widget->id) {
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
return 0;
default:
break;
@@ -996,6 +1001,27 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
}
EXPORT_SYMBOL_GPL(dapm_regulator_event);
+/*
+ * Handler for clock supply widget.
+ */
+int dapm_clock_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ if (!w->clk)
+ return -EIO;
+
+#ifdef CONFIG_HAVE_CLK
+ if (SND_SOC_DAPM_EVENT_ON(event)) {
+ return clk_enable(w->clk);
+ } else {
+ clk_disable(w->clk);
+ return 0;
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dapm_clock_event);
+
static int dapm_widget_power_check(struct snd_soc_dapm_widget *w)
{
if (w->power_checked)
@@ -1487,6 +1513,7 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
switch (w->id) {
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
/* Supplies can't affect their outputs, only their inputs */
break;
default:
@@ -1545,7 +1572,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
struct snd_soc_dapm_context *d;
LIST_HEAD(up_list);
LIST_HEAD(down_list);
- LIST_HEAD(async_domain);
+ ASYNC_DOMAIN_EXCLUSIVE(async_domain);
enum snd_soc_bias_level bias;
trace_snd_soc_dapm_start(card);
@@ -1570,7 +1597,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
}
list_for_each_entry(w, &card->widgets, list) {
- list_del_init(&w->dirty);
+ switch (w->id) {
+ case snd_soc_dapm_pre:
+ case snd_soc_dapm_post:
+ /* These widgets always need to be powered */
+ break;
+ default:
+ list_del_init(&w->dirty);
+ break;
+ }
if (w->power) {
d = w->dapm;
@@ -1587,6 +1622,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
break;
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
case snd_soc_dapm_micbias:
if (d->target_bias_level < SND_SOC_BIAS_STANDBY)
d->target_bias_level = SND_SOC_BIAS_STANDBY;
@@ -1941,6 +1977,7 @@ static ssize_t dapm_widget_show(struct device *dev,
case snd_soc_dapm_mixer_named_ctl:
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
if (w->name)
count += sprintf(buf + count, "%s: %s\n",
w->name, w->power ? "On":"Off");
@@ -2187,6 +2224,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
case snd_soc_dapm_post:
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
case snd_soc_dapm_aif_in:
case snd_soc_dapm_aif_out:
case snd_soc_dapm_dai:
@@ -2221,6 +2259,10 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
path->connect = 0;
return 0;
}
+
+ dapm_mark_dirty(wsource, "Route added");
+ dapm_mark_dirty(wsink, "Route added");
+
return 0;
err:
@@ -2230,6 +2272,59 @@ err:
return ret;
}
+static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route)
+{
+ struct snd_soc_dapm_path *path, *p;
+ const char *sink;
+ const char *source;
+ char prefixed_sink[80];
+ char prefixed_source[80];
+
+ if (route->control) {
+ dev_err(dapm->dev,
+ "Removal of routes with controls not supported\n");
+ return -EINVAL;
+ }
+
+ if (dapm->codec && dapm->codec->name_prefix) {
+ snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+ dapm->codec->name_prefix, route->sink);
+ sink = prefixed_sink;
+ snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+ dapm->codec->name_prefix, route->source);
+ source = prefixed_source;
+ } else {
+ sink = route->sink;
+ source = route->source;
+ }
+
+ path = NULL;
+ list_for_each_entry(p, &dapm->card->paths, list) {
+ if (strcmp(p->source->name, source) != 0)
+ continue;
+ if (strcmp(p->sink->name, sink) != 0)
+ continue;
+ path = p;
+ break;
+ }
+
+ if (path) {
+ dapm_mark_dirty(path->source, "Route removed");
+ dapm_mark_dirty(path->sink, "Route removed");
+
+ list_del(&path->list);
+ list_del(&path->list_sink);
+ list_del(&path->list_source);
+ kfree(path);
+ } else {
+ dev_warn(dapm->dev, "Route %s->%s does not exist\n",
+ source, sink);
+ }
+
+ return 0;
+}
+
/**
* snd_soc_dapm_add_routes - Add routes between DAPM widgets
* @dapm: DAPM context
@@ -2246,15 +2341,15 @@ err:
int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num)
{
- int i, ret = 0;
+ int i, r, ret = 0;
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
for (i = 0; i < num; i++) {
- ret = snd_soc_dapm_add_route(dapm, route);
- if (ret < 0) {
+ r = snd_soc_dapm_add_route(dapm, route);
+ if (r < 0) {
dev_err(dapm->dev, "Failed to add route %s->%s\n",
route->source, route->sink);
- break;
+ ret = r;
}
route++;
}
@@ -2264,6 +2359,30 @@ int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
+/**
+ * snd_soc_dapm_del_routes - Remove routes between DAPM widgets
+ * @dapm: DAPM context
+ * @route: audio routes
+ * @num: number of routes
+ *
+ * Removes routes from the DAPM context.
+ */
+int snd_soc_dapm_del_routes(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route, int num)
+{
+ int i, ret = 0;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
+ for (i = 0; i < num; i++) {
+ snd_soc_dapm_del_route(dapm, route);
+ route++;
+ }
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_del_routes);
+
static int snd_soc_dapm_weak_route(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route)
{
@@ -2434,23 +2553,20 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
(struct soc_mixer_control *)kcontrol->private_value;
unsigned int reg = mc->reg;
unsigned int shift = mc->shift;
- unsigned int rshift = mc->rshift;
int max = mc->max;
- unsigned int invert = mc->invert;
unsigned int mask = (1 << fls(max)) - 1;
+ unsigned int invert = mc->invert;
+
+ if (snd_soc_volsw_is_stereo(mc))
+ dev_warn(widget->dapm->dev,
+ "Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
ucontrol->value.integer.value[0] =
(snd_soc_read(widget->codec, reg) >> shift) & mask;
- if (shift != rshift)
- ucontrol->value.integer.value[1] =
- (snd_soc_read(widget->codec, reg) >> rshift) & mask;
- if (invert) {
+ if (invert)
ucontrol->value.integer.value[0] =
max - ucontrol->value.integer.value[0];
- if (shift != rshift)
- ucontrol->value.integer.value[1] =
- max - ucontrol->value.integer.value[1];
- }
return 0;
}
@@ -2484,20 +2600,19 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_soc_dapm_update update;
int wi;
+ if (snd_soc_volsw_is_stereo(mc))
+ dev_warn(widget->dapm->dev,
+ "Control '%s' is stereo, which is not supported\n",
+ kcontrol->id.name);
+
val = (ucontrol->value.integer.value[0] & mask);
+ connect = !!val;
if (invert)
val = max - val;
mask = mask << shift;
val = val << shift;
- if (val)
- /* new connection */
- connect = invert ? 0 : 1;
- else
- /* old connection must be powered down */
- connect = invert ? 1 : 0;
-
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
change = snd_soc_test_bits(widget->codec, reg, mask, val);
@@ -2873,6 +2988,19 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
return NULL;
}
break;
+ case snd_soc_dapm_clock_supply:
+#ifdef CONFIG_CLKDEV_LOOKUP
+ w->clk = devm_clk_get(dapm->dev, w->name);
+ if (IS_ERR(w->clk)) {
+ ret = PTR_ERR(w->clk);
+ dev_err(dapm->dev, "Failed to request %s: %d\n",
+ w->name, ret);
+ return NULL;
+ }
+#else
+ return NULL;
+#endif
+ break;
default:
break;
}
@@ -2924,6 +3052,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
break;
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
+ case snd_soc_dapm_clock_supply:
w->power_check = dapm_supply_check_power;
break;
case snd_soc_dapm_dai:
@@ -3538,10 +3667,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
{
+ struct snd_soc_card *card = dapm->card;
struct snd_soc_dapm_widget *w;
LIST_HEAD(down_list);
int powerdown = 0;
+ mutex_lock(&card->dapm_mutex);
+
list_for_each_entry(w, &dapm->card->widgets, list) {
if (w->dapm != dapm)
continue;
@@ -3564,6 +3696,8 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
snd_soc_dapm_set_bias_level(dapm,
SND_SOC_BIAS_STANDBY);
}
+
+ mutex_unlock(&card->dapm_mutex);
}
/*
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/soc/soc-dmaengine-pcm.c
index 475695234b3d..5df529eda251 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/soc/soc-dmaengine-pcm.c
@@ -30,6 +30,7 @@
struct dmaengine_pcm_runtime_data {
struct dma_chan *dma_chan;
+ dma_cookie_t cookie;
unsigned int pos;
@@ -153,7 +154,7 @@ static int dmaengine_pcm_prepare_and_submit(struct snd_pcm_substream *substream)
desc->callback = dmaengine_pcm_dma_complete;
desc->callback_param = substream;
- dmaengine_submit(desc);
+ prtd->cookie = dmaengine_submit(desc);
return 0;
}
@@ -200,6 +201,20 @@ int snd_dmaengine_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
/**
+ * snd_dmaengine_pcm_pointer_no_residue - dmaengine based PCM pointer implementation
+ * @substream: PCM substream
+ *
+ * This function is deprecated and should not be used by new drivers, as its
+ * results may be unreliable.
+ */
+snd_pcm_uframes_t snd_dmaengine_pcm_pointer_no_residue(struct snd_pcm_substream *substream)
+{
+ struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
+ return bytes_to_frames(substream->runtime, prtd->pos);
+}
+EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer_no_residue);
+
+/**
* snd_dmaengine_pcm_pointer - dmaengine based PCM pointer implementation
* @substream: PCM substream
*
@@ -209,7 +224,19 @@ EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_trigger);
snd_pcm_uframes_t snd_dmaengine_pcm_pointer(struct snd_pcm_substream *substream)
{
struct dmaengine_pcm_runtime_data *prtd = substream_to_prtd(substream);
- return bytes_to_frames(substream->runtime, prtd->pos);
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int buf_size;
+ unsigned int pos = 0;
+
+ status = dmaengine_tx_status(prtd->dma_chan, prtd->cookie, &state);
+ if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
+ buf_size = snd_pcm_lib_buffer_bytes(substream);
+ if (state.residue > 0 && state.residue <= buf_size)
+ pos = buf_size - state.residue;
+ }
+
+ return bytes_to_frames(substream->runtime, pos);
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_pointer);
@@ -243,7 +270,7 @@ static int dmaengine_pcm_request_channel(struct dmaengine_pcm_runtime_data *prtd
* Note that this function will use private_data field of the substream's
* runtime. So it is not availabe to your pcm driver implementation. If you need
* to keep additional data attached to a substream use
- * snd_dmaeinge_pcm_{set,get}_data.
+ * snd_dmaengine_pcm_{set,get}_data.
*/
int snd_dmaengine_pcm_open(struct snd_pcm_substream *substream,
dma_filter_fn filter_fn, void *filter_data)
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 4d8dc6a27d4d..29183ef2b93d 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -142,11 +142,16 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
case SND_SOC_REGMAP:
/* Device has made its own regmap arrangements */
codec->using_regmap = true;
-
- ret = regmap_get_val_bytes(codec->control_data);
- /* Errors are legitimate for non-integer byte multiples */
- if (ret > 0)
- codec->val_bytes = ret;
+ if (!codec->control_data)
+ codec->control_data = dev_get_regmap(codec->dev, NULL);
+
+ if (codec->control_data) {
+ ret = regmap_get_val_bytes(codec->control_data);
+ /* Errors are legitimate for non-integer byte
+ * multiples */
+ if (ret > 0)
+ codec->val_bytes = ret;
+ }
break;
default:
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 48fd15b312c1..ef22d0bd9e9e 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1955,10 +1955,8 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
fe->dpcm[stream].runtime = fe_substream->runtime;
if (dpcm_path_get(fe, stream, &list) <= 0) {
- dev_warn(fe->dev, "asoc: %s no valid %s route\n",
+ dev_dbg(fe->dev, "asoc: %s no valid %s route\n",
fe->dai_link->name, stream ? "capture" : "playback");
- mutex_unlock(&fe->card->mutex);
- return -EINVAL;
}
/* calculate valid and active FE <-> BE dpcms */
@@ -2003,7 +2001,6 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
/* create a new pcm */
int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
{
- struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
@@ -2042,7 +2039,8 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
capture, &pcm);
}
if (ret < 0) {
- printk(KERN_ERR "asoc: can't create pcm for codec %s\n", codec->name);
+ dev_err(rtd->card->dev, "can't create pcm for %s\n",
+ rtd->dai_link->name);
return ret;
}
dev_dbg(rtd->card->dev, "registered pcm #%d %s\n",num, new_name);
@@ -2099,14 +2097,14 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
if (platform->driver->pcm_new) {
ret = platform->driver->pcm_new(rtd);
if (ret < 0) {
- pr_err("asoc: platform pcm constructor failed\n");
+ dev_err(platform->dev, "pcm constructor failed\n");
return ret;
}
}
pcm->private_free = platform->driver->pcm_free;
out:
- printk(KERN_INFO "asoc: %s <-> %s mapping ok\n", codec_dai->name,
+ dev_info(rtd->card->dev, " %s <-> %s mapping ok\n", codec_dai->name,
cpu_dai->name);
return ret;
}
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
new file mode 100644
index 000000000000..c7c4b20395bb
--- /dev/null
+++ b/sound/soc/spear/spdif_in.c
@@ -0,0 +1,297 @@
+/*
+ * ALSA SoC SPDIF In Audio Layer for spear processors
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/spear_dma.h>
+#include <sound/spear_spdif.h>
+#include "spdif_in_regs.h"
+
+struct spdif_in_params {
+ u32 format;
+};
+
+struct spdif_in_dev {
+ struct clk *clk;
+ struct spear_dma_data dma_params;
+ struct spdif_in_params saved_params;
+ void *io_base;
+ struct device *dev;
+ void (*reset_perip)(void);
+ int irq;
+};
+
+static void spdif_in_configure(struct spdif_in_dev *host)
+{
+ u32 ctrl = SPDIF_IN_PRTYEN | SPDIF_IN_STATEN | SPDIF_IN_USREN |
+ SPDIF_IN_VALEN | SPDIF_IN_BLKEN;
+ ctrl |= SPDIF_MODE_16BIT | SPDIF_FIFO_THRES_16;
+
+ writel(ctrl, host->io_base + SPDIF_IN_CTRL);
+ writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK);
+}
+
+static int spdif_in_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct spdif_in_dev *host = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return -EINVAL;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&host->dma_params);
+ return 0;
+}
+
+static void spdif_in_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai);
+
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return;
+
+ writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK);
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+}
+
+static void spdif_in_format(struct spdif_in_dev *host, u32 format)
+{
+ u32 ctrl = readl(host->io_base + SPDIF_IN_CTRL);
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ ctrl |= SPDIF_XTRACT_16BIT;
+ break;
+
+ case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE:
+ ctrl &= ~SPDIF_XTRACT_16BIT;
+ break;
+ }
+
+ writel(ctrl, host->io_base + SPDIF_IN_CTRL);
+}
+
+static int spdif_in_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai);
+ u32 format;
+
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return -EINVAL;
+
+ format = params_format(params);
+ host->saved_params.format = format;
+
+ return 0;
+}
+
+static int spdif_in_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai);
+ u32 ctrl;
+ int ret = 0;
+
+ if (substream->stream != SNDRV_PCM_STREAM_CAPTURE)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ clk_enable(host->clk);
+ spdif_in_configure(host);
+ spdif_in_format(host, host->saved_params.format);
+
+ ctrl = readl(host->io_base + SPDIF_IN_CTRL);
+ ctrl |= SPDIF_IN_SAMPLE | SPDIF_IN_ENB;
+ writel(ctrl, host->io_base + SPDIF_IN_CTRL);
+ writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ctrl = readl(host->io_base + SPDIF_IN_CTRL);
+ ctrl &= ~(SPDIF_IN_SAMPLE | SPDIF_IN_ENB);
+ writel(ctrl, host->io_base + SPDIF_IN_CTRL);
+ writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK);
+
+ if (host->reset_perip)
+ host->reset_perip();
+ clk_disable(host->clk);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static struct snd_soc_dai_ops spdif_in_dai_ops = {
+ .startup = spdif_in_startup,
+ .shutdown = spdif_in_shutdown,
+ .trigger = spdif_in_trigger,
+ .hw_params = spdif_in_hw_params,
+};
+
+struct snd_soc_dai_driver spdif_in_dai = {
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_192000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE,
+ },
+ .ops = &spdif_in_dai_ops,
+};
+
+static irqreturn_t spdif_in_irq(int irq, void *arg)
+{
+ struct spdif_in_dev *host = (struct spdif_in_dev *)arg;
+
+ u32 irq_status = readl(host->io_base + SPDIF_IN_IRQ);
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ if (irq_status & SPDIF_IRQ_FIFOWRITE)
+ dev_err(host->dev, "spdif in: fifo write error");
+ if (irq_status & SPDIF_IRQ_EMPTYFIFOREAD)
+ dev_err(host->dev, "spdif in: empty fifo read error");
+ if (irq_status & SPDIF_IRQ_FIFOFULL)
+ dev_err(host->dev, "spdif in: fifo full error");
+ if (irq_status & SPDIF_IRQ_OUTOFRANGE)
+ dev_err(host->dev, "spdif in: out of range error");
+
+ writel(0, host->io_base + SPDIF_IN_IRQ);
+
+ return IRQ_HANDLED;
+}
+
+static int spdif_in_probe(struct platform_device *pdev)
+{
+ struct spdif_in_dev *host;
+ struct spear_spdif_platform_data *pdata;
+ struct resource *res, *res_fifo;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res_fifo)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name)) {
+ dev_warn(&pdev->dev, "Failed to get memory resourse\n");
+ return -ENOENT;
+ }
+
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_warn(&pdev->dev, "kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ host->io_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!host->io_base) {
+ dev_warn(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ host->irq = platform_get_irq(pdev, 0);
+ if (host->irq < 0)
+ return -EINVAL;
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (!pdata)
+ return -EINVAL;
+
+ host->dma_params.data = pdata->dma_params;
+ host->dma_params.addr = res_fifo->start;
+ host->dma_params.max_burst = 16;
+ host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_params.filter = pdata->filter;
+ host->reset_perip = pdata->reset_perip;
+
+ host->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, host);
+
+ ret = devm_request_irq(&pdev->dev, host->irq, spdif_in_irq, 0,
+ "spdif-in", host);
+ if (ret) {
+ clk_put(host->clk);
+ dev_warn(&pdev->dev, "request_irq failed\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&pdev->dev, &spdif_in_dai);
+ if (ret != 0) {
+ clk_put(host->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spdif_in_remove(struct platform_device *pdev)
+{
+ struct spdif_in_dev *host = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_dai(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ clk_put(host->clk);
+
+ return 0;
+}
+
+
+static struct platform_driver spdif_in_driver = {
+ .probe = spdif_in_probe,
+ .remove = spdif_in_remove,
+ .driver = {
+ .name = "spdif-in",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(spdif_in_driver);
+
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>");
+MODULE_DESCRIPTION("SPEAr SPDIF IN SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spdif_in");
diff --git a/sound/soc/spear/spdif_in_regs.h b/sound/soc/spear/spdif_in_regs.h
new file mode 100644
index 000000000000..37af7bc66b7f
--- /dev/null
+++ b/sound/soc/spear/spdif_in_regs.h
@@ -0,0 +1,60 @@
+/*
+ * SPEAr SPDIF IN controller header file
+ *
+ * Copyright (ST) 2011 Vipin Kumar (vipin.kumar@st.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SPDIF_IN_REGS_H
+#define SPDIF_IN_REGS_H
+
+#define SPDIF_IN_CTRL 0x00
+ #define SPDIF_IN_PRTYEN (1 << 20)
+ #define SPDIF_IN_STATEN (1 << 19)
+ #define SPDIF_IN_USREN (1 << 18)
+ #define SPDIF_IN_VALEN (1 << 17)
+ #define SPDIF_IN_BLKEN (1 << 16)
+
+ #define SPDIF_MODE_24BIT (8 << 12)
+ #define SPDIF_MODE_23BIT (7 << 12)
+ #define SPDIF_MODE_22BIT (6 << 12)
+ #define SPDIF_MODE_21BIT (5 << 12)
+ #define SPDIF_MODE_20BIT (4 << 12)
+ #define SPDIF_MODE_19BIT (3 << 12)
+ #define SPDIF_MODE_18BIT (2 << 12)
+ #define SPDIF_MODE_17BIT (1 << 12)
+ #define SPDIF_MODE_16BIT (0 << 12)
+ #define SPDIF_MODE_MASK (0x0F << 12)
+
+ #define SPDIF_IN_VALID (1 << 11)
+ #define SPDIF_IN_SAMPLE (1 << 10)
+ #define SPDIF_DATA_SWAP (1 << 9)
+ #define SPDIF_IN_ENB (1 << 8)
+ #define SPDIF_DATA_REVERT (1 << 7)
+ #define SPDIF_XTRACT_16BIT (1 << 6)
+ #define SPDIF_FIFO_THRES_16 (16 << 0)
+
+#define SPDIF_IN_IRQ_MASK 0x04
+#define SPDIF_IN_IRQ 0x08
+ #define SPDIF_IRQ_FIFOWRITE (1 << 0)
+ #define SPDIF_IRQ_EMPTYFIFOREAD (1 << 1)
+ #define SPDIF_IRQ_FIFOFULL (1 << 2)
+ #define SPDIF_IRQ_OUTOFRANGE (1 << 3)
+
+#define SPDIF_IN_STA 0x0C
+ #define SPDIF_IN_LOCK (0x1 << 0)
+
+#endif /* SPDIF_IN_REGS_H */
diff --git a/sound/soc/spear/spdif_out.c b/sound/soc/spear/spdif_out.c
new file mode 100644
index 000000000000..5eac4cda2fd7
--- /dev/null
+++ b/sound/soc/spear/spdif_out.c
@@ -0,0 +1,389 @@
+/*
+ * ALSA SoC SPDIF Out Audio Layer for spear processors
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/soc.h>
+#include <sound/spear_dma.h>
+#include <sound/spear_spdif.h>
+#include "spdif_out_regs.h"
+
+struct spdif_out_params {
+ u32 rate;
+ u32 core_freq;
+ u32 mute;
+};
+
+struct spdif_out_dev {
+ struct clk *clk;
+ struct spear_dma_data dma_params;
+ struct spdif_out_params saved_params;
+ u32 running;
+ void __iomem *io_base;
+};
+
+static void spdif_out_configure(struct spdif_out_dev *host)
+{
+ writel(SPDIF_OUT_RESET, host->io_base + SPDIF_OUT_SOFT_RST);
+ mdelay(1);
+ writel(readl(host->io_base + SPDIF_OUT_SOFT_RST) & ~SPDIF_OUT_RESET,
+ host->io_base + SPDIF_OUT_SOFT_RST);
+
+ writel(SPDIF_OUT_FDMA_TRIG_16 | SPDIF_OUT_MEMFMT_16_16 |
+ SPDIF_OUT_VALID_HW | SPDIF_OUT_USER_HW |
+ SPDIF_OUT_CHNLSTA_HW | SPDIF_OUT_PARITY_HW,
+ host->io_base + SPDIF_OUT_CFG);
+
+ writel(0x7F, host->io_base + SPDIF_OUT_INT_STA_CLR);
+ writel(0x7F, host->io_base + SPDIF_OUT_INT_EN_CLR);
+}
+
+static int spdif_out_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret;
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&host->dma_params);
+
+ ret = clk_enable(host->clk);
+ if (ret)
+ return ret;
+
+ host->running = true;
+ spdif_out_configure(host);
+
+ return 0;
+}
+
+static void spdif_out_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai);
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return;
+
+ clk_disable(host->clk);
+ host->running = false;
+ snd_soc_dai_set_dma_data(dai, substream, NULL);
+}
+
+static void spdif_out_clock(struct spdif_out_dev *host, u32 core_freq,
+ u32 rate)
+{
+ u32 divider, ctrl;
+
+ clk_set_rate(host->clk, core_freq);
+ divider = DIV_ROUND_CLOSEST(clk_get_rate(host->clk), (rate * 128));
+
+ ctrl = readl(host->io_base + SPDIF_OUT_CTRL);
+ ctrl &= ~SPDIF_DIVIDER_MASK;
+ ctrl |= (divider << SPDIF_DIVIDER_SHIFT) & SPDIF_DIVIDER_MASK;
+ writel(ctrl, host->io_base + SPDIF_OUT_CTRL);
+}
+
+static int spdif_out_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai);
+ u32 rate, core_freq;
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+
+ rate = params_rate(params);
+
+ switch (rate) {
+ case 8000:
+ case 16000:
+ case 32000:
+ case 64000:
+ /*
+ * The clock is multiplied by 10 to bring it to feasible range
+ * of frequencies for sscg
+ */
+ core_freq = 64000 * 128 * 10; /* 81.92 MHz */
+ break;
+ case 5512:
+ case 11025:
+ case 22050:
+ case 44100:
+ case 88200:
+ case 176400:
+ core_freq = 176400 * 128; /* 22.5792 MHz */
+ break;
+ case 48000:
+ case 96000:
+ case 192000:
+ default:
+ core_freq = 192000 * 128; /* 24.576 MHz */
+ break;
+ }
+
+ spdif_out_clock(host, core_freq, rate);
+ host->saved_params.core_freq = core_freq;
+ host->saved_params.rate = rate;
+
+ return 0;
+}
+
+static int spdif_out_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai);
+ u32 ctrl;
+ int ret = 0;
+
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ return -EINVAL;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ctrl = readl(host->io_base + SPDIF_OUT_CTRL);
+ ctrl &= ~SPDIF_OPMODE_MASK;
+ if (!host->saved_params.mute)
+ ctrl |= SPDIF_OPMODE_AUD_DATA |
+ SPDIF_STATE_NORMAL;
+ else
+ ctrl |= SPDIF_OPMODE_MUTE_PCM;
+ writel(ctrl, host->io_base + SPDIF_OUT_CTRL);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ctrl = readl(host->io_base + SPDIF_OUT_CTRL);
+ ctrl &= ~SPDIF_OPMODE_MASK;
+ ctrl |= SPDIF_OPMODE_OFF;
+ writel(ctrl, host->io_base + SPDIF_OUT_CTRL);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int spdif_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(dai);
+ u32 val;
+
+ host->saved_params.mute = mute;
+ val = readl(host->io_base + SPDIF_OUT_CTRL);
+ val &= ~SPDIF_OPMODE_MASK;
+
+ if (mute)
+ val |= SPDIF_OPMODE_MUTE_PCM;
+ else {
+ if (host->running)
+ val |= SPDIF_OPMODE_AUD_DATA | SPDIF_STATE_NORMAL;
+ else
+ val |= SPDIF_OPMODE_OFF;
+ }
+
+ writel(val, host->io_base + SPDIF_OUT_CTRL);
+ return 0;
+}
+
+static int spdif_mute_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_card *card = codec->card;
+ struct snd_soc_pcm_runtime *rtd = card->rtd;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai);
+
+ ucontrol->value.integer.value[0] = host->saved_params.mute;
+ return 0;
+}
+
+static int spdif_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_card *card = codec->card;
+ struct snd_soc_pcm_runtime *rtd = card->rtd;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct spdif_out_dev *host = snd_soc_dai_get_drvdata(cpu_dai);
+
+ if (host->saved_params.mute == ucontrol->value.integer.value[0])
+ return 0;
+
+ spdif_digital_mute(cpu_dai, ucontrol->value.integer.value[0]);
+
+ return 1;
+}
+static const struct snd_kcontrol_new spdif_out_controls[] = {
+ SOC_SINGLE_BOOL_EXT("IEC958 Playback Switch", 0,
+ spdif_mute_get, spdif_mute_put),
+};
+
+int spdif_soc_dai_probe(struct snd_soc_dai *dai)
+{
+ return snd_soc_add_dai_controls(dai, spdif_out_controls,
+ ARRAY_SIZE(spdif_out_controls));
+}
+
+static const struct snd_soc_dai_ops spdif_out_dai_ops = {
+ .digital_mute = spdif_digital_mute,
+ .startup = spdif_out_startup,
+ .shutdown = spdif_out_shutdown,
+ .trigger = spdif_out_trigger,
+ .hw_params = spdif_out_hw_params,
+};
+
+static struct snd_soc_dai_driver spdif_out_dai = {
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | \
+ SNDRV_PCM_RATE_192000),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .probe = spdif_soc_dai_probe,
+ .ops = &spdif_out_dai_ops,
+};
+
+static int spdif_out_probe(struct platform_device *pdev)
+{
+ struct spdif_out_dev *host;
+ struct spear_spdif_platform_data *pdata;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name)) {
+ dev_warn(&pdev->dev, "Failed to get memory resourse\n");
+ return -ENOENT;
+ }
+
+ host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+ if (!host) {
+ dev_warn(&pdev->dev, "kzalloc fail\n");
+ return -ENOMEM;
+ }
+
+ host->io_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!host->io_base) {
+ dev_warn(&pdev->dev, "ioremap failed\n");
+ return -ENOMEM;
+ }
+
+ host->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(host->clk))
+ return PTR_ERR(host->clk);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ host->dma_params.data = pdata->dma_params;
+ host->dma_params.addr = res->start + SPDIF_OUT_FIFO_DATA;
+ host->dma_params.max_burst = 16;
+ host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_params.filter = pdata->filter;
+
+ dev_set_drvdata(&pdev->dev, host);
+
+ ret = snd_soc_register_dai(&pdev->dev, &spdif_out_dai);
+ if (ret != 0) {
+ clk_put(host->clk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int spdif_out_remove(struct platform_device *pdev)
+{
+ struct spdif_out_dev *host = dev_get_drvdata(&pdev->dev);
+
+ snd_soc_unregister_dai(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ clk_put(host->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spdif_out_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spdif_out_dev *host = dev_get_drvdata(&pdev->dev);
+
+ if (host->running)
+ clk_disable(host->clk);
+
+ return 0;
+}
+
+static int spdif_out_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct spdif_out_dev *host = dev_get_drvdata(&pdev->dev);
+
+ if (host->running) {
+ clk_enable(host->clk);
+ spdif_out_configure(host);
+ spdif_out_clock(host, host->saved_params.core_freq,
+ host->saved_params.rate);
+ }
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(spdif_out_dev_pm_ops, spdif_out_suspend, \
+ spdif_out_resume);
+
+#define SPDIF_OUT_DEV_PM_OPS (&spdif_out_dev_pm_ops)
+
+#else
+#define SPDIF_OUT_DEV_PM_OPS NULL
+
+#endif
+
+static struct platform_driver spdif_out_driver = {
+ .probe = spdif_out_probe,
+ .remove = spdif_out_remove,
+ .driver = {
+ .name = "spdif-out",
+ .owner = THIS_MODULE,
+ .pm = SPDIF_OUT_DEV_PM_OPS,
+ },
+};
+
+module_platform_driver(spdif_out_driver);
+
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>");
+MODULE_DESCRIPTION("SPEAr SPDIF OUT SoC Interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spdif_out");
diff --git a/sound/soc/spear/spdif_out_regs.h b/sound/soc/spear/spdif_out_regs.h
new file mode 100644
index 000000000000..a5e53324b452
--- /dev/null
+++ b/sound/soc/spear/spdif_out_regs.h
@@ -0,0 +1,79 @@
+/*
+ * SPEAr SPDIF OUT controller header file
+ *
+ * Copyright (ST) 2011 Vipin Kumar (vipin.kumar@st.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef SPDIF_OUT_REGS_H
+#define SPDIF_OUT_REGS_H
+
+#define SPDIF_OUT_SOFT_RST 0x00
+ #define SPDIF_OUT_RESET (1 << 0)
+#define SPDIF_OUT_FIFO_DATA 0x04
+#define SPDIF_OUT_INT_STA 0x08
+#define SPDIF_OUT_INT_STA_CLR 0x0C
+ #define SPDIF_INT_UNDERFLOW (1 << 0)
+ #define SPDIF_INT_EODATA (1 << 1)
+ #define SPDIF_INT_EOBLOCK (1 << 2)
+ #define SPDIF_INT_EOLATENCY (1 << 3)
+ #define SPDIF_INT_EOPD_DATA (1 << 4)
+ #define SPDIF_INT_MEMFULLREAD (1 << 5)
+ #define SPDIF_INT_EOPD_PAUSE (1 << 6)
+
+#define SPDIF_OUT_INT_EN 0x10
+#define SPDIF_OUT_INT_EN_SET 0x14
+#define SPDIF_OUT_INT_EN_CLR 0x18
+#define SPDIF_OUT_CTRL 0x1C
+ #define SPDIF_OPMODE_MASK (7 << 0)
+ #define SPDIF_OPMODE_OFF (0 << 0)
+ #define SPDIF_OPMODE_MUTE_PCM (1 << 0)
+ #define SPDIF_OPMODE_MUTE_PAUSE (2 << 0)
+ #define SPDIF_OPMODE_AUD_DATA (3 << 0)
+ #define SPDIF_OPMODE_ENCODE (4 << 0)
+ #define SPDIF_STATE_NORMAL (1 << 3)
+ #define SPDIF_DIVIDER_MASK (0xff << 5)
+ #define SPDIF_DIVIDER_SHIFT (5)
+ #define SPDIF_SAMPLEREAD_MASK (0x1ffff << 15)
+ #define SPDIF_SAMPLEREAD_SHIFT (15)
+#define SPDIF_OUT_STA 0x20
+#define SPDIF_OUT_PA_PB 0x24
+#define SPDIF_OUT_PC_PD 0x28
+#define SPDIF_OUT_CL1 0x2C
+#define SPDIF_OUT_CR1 0x30
+#define SPDIF_OUT_CL2_CR2_UV 0x34
+#define SPDIF_OUT_PAUSE_LAT 0x38
+#define SPDIF_OUT_FRMLEN_BRST 0x3C
+#define SPDIF_OUT_CFG 0x40
+ #define SPDIF_OUT_MEMFMT_16_0 (0 << 5)
+ #define SPDIF_OUT_MEMFMT_16_16 (1 << 5)
+ #define SPDIF_OUT_VALID_DMA (0 << 3)
+ #define SPDIF_OUT_VALID_HW (1 << 3)
+ #define SPDIF_OUT_USER_DMA (0 << 2)
+ #define SPDIF_OUT_USER_HW (1 << 2)
+ #define SPDIF_OUT_CHNLSTA_DMA (0 << 1)
+ #define SPDIF_OUT_CHNLSTA_HW (1 << 1)
+ #define SPDIF_OUT_PARITY_HW (0 << 0)
+ #define SPDIF_OUT_PARITY_DMA (1 << 0)
+ #define SPDIF_OUT_FDMA_TRIG_2 (2 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_6 (6 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_8 (8 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_10 (10 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_12 (12 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_16 (16 << 8)
+ #define SPDIF_OUT_FDMA_TRIG_18 (18 << 8)
+
+#endif /* SPDIF_OUT_REGS_H */
diff --git a/sound/soc/spear/spear_pcm.c b/sound/soc/spear/spear_pcm.c
new file mode 100644
index 000000000000..97c2cac8e92c
--- /dev/null
+++ b/sound/soc/spear/spear_pcm.c
@@ -0,0 +1,214 @@
+/*
+ * ALSA PCM interface for ST SPEAr Processors
+ *
+ * sound/soc/spear/spear_pcm.c
+ *
+ * Copyright (C) 2012 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/spear_dma.h>
+
+struct snd_pcm_hardware spear_pcm_hardware = {
+ .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .buffer_bytes_max = 16 * 1024, /* max buffer size */
+ .period_bytes_min = 2 * 1024, /* 1 msec data minimum period size */
+ .period_bytes_max = 2 * 1024, /* maximum period size */
+ .periods_min = 1, /* min # periods */
+ .periods_max = 8, /* max # of periods */
+ .fifo_size = 0, /* fifo size in bytes */
+};
+
+static int spear_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+
+ return 0;
+}
+
+static int spear_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_set_runtime_buffer(substream, NULL);
+
+ return 0;
+}
+
+static int spear_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ struct spear_dma_data *dma_data = (struct spear_dma_data *)
+ snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+ int ret;
+
+ ret = snd_soc_set_runtime_hwparams(substream, &spear_pcm_hardware);
+ if (ret)
+ return ret;
+
+ ret = snd_dmaengine_pcm_open(substream, dma_data->filter, dma_data);
+ if (ret)
+ return ret;
+
+ snd_dmaengine_pcm_set_data(substream, dma_data);
+
+ return 0;
+}
+
+static int spear_pcm_close(struct snd_pcm_substream *substream)
+{
+
+ snd_dmaengine_pcm_close(substream);
+
+ return 0;
+}
+
+static int spear_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+ runtime->dma_area, runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops spear_pcm_ops = {
+ .open = spear_pcm_open,
+ .close = spear_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = spear_pcm_hw_params,
+ .hw_free = spear_pcm_hw_free,
+ .trigger = snd_dmaengine_pcm_trigger,
+ .pointer = snd_dmaengine_pcm_pointer,
+ .mmap = spear_pcm_mmap,
+};
+
+static int
+spear_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream,
+ size_t size)
+{
+ struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+ struct snd_dma_buffer *buf = &substream->dma_buffer;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = pcm->card->dev;
+ buf->private_data = NULL;
+
+ buf->area = dma_alloc_writecombine(pcm->card->dev, size,
+ &buf->addr, GFP_KERNEL);
+ if (!buf->area)
+ return -ENOMEM;
+
+ dev_info(buf->dev.dev,
+ " preallocate_dma_buffer: area=%p, addr=%p, size=%d\n",
+ (void *)buf->area, (void *)buf->addr, size);
+
+ buf->bytes = size;
+ return 0;
+}
+
+static void spear_pcm_free(struct snd_pcm *pcm)
+{
+ struct snd_pcm_substream *substream;
+ struct snd_dma_buffer *buf;
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ substream = pcm->streams[stream].substream;
+ if (!substream)
+ continue;
+
+ buf = &substream->dma_buffer;
+ if (!buf && !buf->area)
+ continue;
+
+ dma_free_writecombine(pcm->card->dev, buf->bytes,
+ buf->area, buf->addr);
+ buf->area = NULL;
+ }
+}
+
+static u64 spear_pcm_dmamask = DMA_BIT_MASK(32);
+
+static int spear_pcm_new(struct snd_card *card,
+ struct snd_soc_dai *dai, struct snd_pcm *pcm)
+{
+ int ret;
+
+ if (!card->dev->dma_mask)
+ card->dev->dma_mask = &spear_pcm_dmamask;
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+
+ if (dai->driver->playback.channels_min) {
+ ret = spear_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_PLAYBACK,
+ spear_pcm_hardware.buffer_bytes_max);
+ if (ret)
+ return ret;
+ }
+
+ if (dai->driver->capture.channels_min) {
+ ret = spear_pcm_preallocate_dma_buffer(pcm,
+ SNDRV_PCM_STREAM_CAPTURE,
+ spear_pcm_hardware.buffer_bytes_max);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+struct snd_soc_platform_driver spear_soc_platform = {
+ .ops = &spear_pcm_ops,
+ .pcm_new = spear_pcm_new,
+ .pcm_free = spear_pcm_free,
+};
+
+static int __devinit spear_soc_platform_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_platform(&pdev->dev, &spear_soc_platform);
+}
+
+static int __devexit spear_soc_platform_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver spear_pcm_driver = {
+ .driver = {
+ .name = "spear-pcm-audio",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = spear_soc_platform_probe,
+ .remove = __devexit_p(spear_soc_platform_remove),
+};
+
+module_platform_driver(spear_pcm_driver);
+
+MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_DESCRIPTION("SPEAr PCM DMA module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spear-pcm-audio");
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index c1c8e955f4d3..02bcd308c189 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -1,7 +1,8 @@
config SND_SOC_TEGRA
tristate "SoC Audio for the Tegra System-on-Chip"
- depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
+ depends on ARCH_TEGRA && (TEGRA_SYSTEM_DMA || TEGRA20_APB_DMA)
select REGMAP_MMIO
+ select SND_SOC_DMAENGINE_PCM if TEGRA20_APB_DMA
help
Say Y or M here if you want support for SoC audio on Tegra.
@@ -58,17 +59,9 @@ config SND_SOC_TEGRA_WM8753
Say Y or M here if you want to add support for SoC audio on Tegra
boards using the WM8753 codec, such as Whistler.
-config MACH_HAS_SND_SOC_TEGRA_WM8903
- bool
- help
- Machines that use the SND_SOC_TEGRA_WM8903 driver should select
- this config option, in order to allow the user to enable
- SND_SOC_TEGRA_WM8903.
-
config SND_SOC_TEGRA_WM8903
tristate "SoC Audio support for Tegra boards using a WM8903 codec"
depends on SND_SOC_TEGRA && I2C
- depends on MACH_HAS_SND_SOC_TEGRA_WM8903
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
select SND_SOC_WM8903
@@ -79,7 +72,7 @@ config SND_SOC_TEGRA_WM8903
config SND_SOC_TEGRA_TRIMSLICE
tristate "SoC Audio support for TrimSlice board"
- depends on SND_SOC_TEGRA && MACH_TRIMSLICE && I2C
+ depends on SND_SOC_TEGRA && I2C
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_TLV320AIC23
help
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 0c7af63d444b..0832e8afd73c 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -46,23 +46,11 @@
#define DRV_NAME "tegra20-i2s"
-static inline void tegra20_i2s_write(struct tegra20_i2s *i2s, u32 reg, u32 val)
-{
- regmap_write(i2s->regmap, reg, val);
-}
-
-static inline u32 tegra20_i2s_read(struct tegra20_i2s *i2s, u32 reg)
-{
- u32 val;
- regmap_read(i2s->regmap, reg, &val);
- return val;
-}
-
static int tegra20_i2s_runtime_suspend(struct device *dev)
{
struct tegra20_i2s *i2s = dev_get_drvdata(dev);
- clk_disable(i2s->clk_i2s);
+ clk_disable_unprepare(i2s->clk_i2s);
return 0;
}
@@ -72,7 +60,7 @@ static int tegra20_i2s_runtime_resume(struct device *dev)
struct tegra20_i2s *i2s = dev_get_drvdata(dev);
int ret;
- ret = clk_enable(i2s->clk_i2s);
+ ret = clk_prepare_enable(i2s->clk_i2s);
if (ret) {
dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
@@ -85,6 +73,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned int mask, val;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -93,10 +82,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
@@ -104,33 +93,35 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- i2s->reg_ctrl &= ~(TEGRA20_I2S_CTRL_BIT_FORMAT_MASK |
- TEGRA20_I2S_CTRL_LRCK_MASK);
+ mask |= TEGRA20_I2S_CTRL_BIT_FORMAT_MASK |
+ TEGRA20_I2S_CTRL_LRCK_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
+ val |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_DSP_B:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_R_LOW;
+ val |= TEGRA20_I2S_CTRL_BIT_FORMAT_DSP;
+ val |= TEGRA20_I2S_CTRL_LRCK_R_LOW;
break;
case SND_SOC_DAIFMT_I2S:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_I2S;
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA20_I2S_CTRL_BIT_FORMAT_I2S;
+ val |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_RIGHT_J:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_RJM;
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA20_I2S_CTRL_BIT_FORMAT_RJM;
+ val |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_LEFT_J:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_FORMAT_LJM;
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA20_I2S_CTRL_BIT_FORMAT_LJM;
+ val |= TEGRA20_I2S_CTRL_LRCK_L_LOW;
break;
default:
return -EINVAL;
}
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL, mask, val);
+
return 0;
}
@@ -138,29 +129,34 @@ static int tegra20_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct device *dev = substream->pcm->card->dev;
+ struct device *dev = dai->dev;
struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- u32 reg;
+ unsigned int mask, val;
int ret, sample_size, srate, i2sclock, bitcnt;
- i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_BIT_SIZE_MASK;
+ mask = TEGRA20_I2S_CTRL_BIT_SIZE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_16;
+ val = TEGRA20_I2S_CTRL_BIT_SIZE_16;
sample_size = 16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_24;
+ val = TEGRA20_I2S_CTRL_BIT_SIZE_24;
sample_size = 24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_BIT_SIZE_32;
+ val = TEGRA20_I2S_CTRL_BIT_SIZE_32;
sample_size = 32;
break;
default:
return -EINVAL;
}
+ mask |= TEGRA20_I2S_CTRL_FIFO_FORMAT_MASK;
+ val |= TEGRA20_I2S_CTRL_FIFO_FORMAT_PACKED;
+
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL, mask, val);
+
srate = params_rate(params);
/* Final "* 2" required by Tegra hardware */
@@ -175,42 +171,44 @@ static int tegra20_i2s_hw_params(struct snd_pcm_substream *substream,
bitcnt = (i2sclock / (2 * srate)) - 1;
if (bitcnt < 0 || bitcnt > TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_MASK_US)
return -EINVAL;
- reg = bitcnt << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
+ val = bitcnt << TEGRA20_I2S_TIMING_CHANNEL_BIT_COUNT_SHIFT;
if (i2sclock % (2 * srate))
- reg |= TEGRA20_I2S_TIMING_NON_SYM_ENABLE;
+ val |= TEGRA20_I2S_TIMING_NON_SYM_ENABLE;
- tegra20_i2s_write(i2s, TEGRA20_I2S_TIMING, reg);
+ regmap_write(i2s->regmap, TEGRA20_I2S_TIMING, val);
- tegra20_i2s_write(i2s, TEGRA20_I2S_FIFO_SCR,
- TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
- TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
+ regmap_write(i2s->regmap, TEGRA20_I2S_FIFO_SCR,
+ TEGRA20_I2S_FIFO_SCR_FIFO2_ATN_LVL_FOUR_SLOTS |
+ TEGRA20_I2S_FIFO_SCR_FIFO1_ATN_LVL_FOUR_SLOTS);
return 0;
}
static void tegra20_i2s_start_playback(struct tegra20_i2s *i2s)
{
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO1_ENABLE;
- tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL,
+ TEGRA20_I2S_CTRL_FIFO1_ENABLE,
+ TEGRA20_I2S_CTRL_FIFO1_ENABLE);
}
static void tegra20_i2s_stop_playback(struct tegra20_i2s *i2s)
{
- i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_FIFO1_ENABLE;
- tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL,
+ TEGRA20_I2S_CTRL_FIFO1_ENABLE, 0);
}
static void tegra20_i2s_start_capture(struct tegra20_i2s *i2s)
{
- i2s->reg_ctrl |= TEGRA20_I2S_CTRL_FIFO2_ENABLE;
- tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL,
+ TEGRA20_I2S_CTRL_FIFO2_ENABLE,
+ TEGRA20_I2S_CTRL_FIFO2_ENABLE);
}
static void tegra20_i2s_stop_capture(struct tegra20_i2s *i2s)
{
- i2s->reg_ctrl &= ~TEGRA20_I2S_CTRL_FIFO2_ENABLE;
- tegra20_i2s_write(i2s, TEGRA20_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA20_I2S_CTRL,
+ TEGRA20_I2S_CTRL_FIFO2_ENABLE, 0);
}
static int tegra20_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -261,12 +259,14 @@ static const struct snd_soc_dai_ops tegra20_i2s_dai_ops = {
static const struct snd_soc_dai_driver tegra20_i2s_dai_template = {
.probe = tegra20_i2s_probe,
.playback = {
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
+ .stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
@@ -412,8 +412,6 @@ static __devinit int tegra20_i2s_platform_probe(struct platform_device *pdev)
i2s->playback_dma_data.width = 32;
i2s->playback_dma_data.req_sel = dma_ch;
- i2s->reg_ctrl = TEGRA20_I2S_CTRL_FIFO_FORMAT_PACKED;
-
pm_runtime_enable(&pdev->dev);
if (!pm_runtime_enabled(&pdev->dev)) {
ret = tegra20_i2s_runtime_resume(&pdev->dev);
diff --git a/sound/soc/tegra/tegra20_i2s.h b/sound/soc/tegra/tegra20_i2s.h
index a57efc6a597e..c27069d24d77 100644
--- a/sound/soc/tegra/tegra20_i2s.h
+++ b/sound/soc/tegra/tegra20_i2s.h
@@ -158,7 +158,6 @@ struct tegra20_i2s {
struct tegra_pcm_dma_params capture_dma_data;
struct tegra_pcm_dma_params playback_dma_data;
struct regmap *regmap;
- u32 reg_ctrl;
};
#endif
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index f9b57418bd08..3ebc8670ba00 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -37,24 +37,11 @@
#define DRV_NAME "tegra20-spdif"
-static inline void tegra20_spdif_write(struct tegra20_spdif *spdif, u32 reg,
- u32 val)
-{
- regmap_write(spdif->regmap, reg, val);
-}
-
-static inline u32 tegra20_spdif_read(struct tegra20_spdif *spdif, u32 reg)
-{
- u32 val;
- regmap_read(spdif->regmap, reg, &val);
- return val;
-}
-
static int tegra20_spdif_runtime_suspend(struct device *dev)
{
struct tegra20_spdif *spdif = dev_get_drvdata(dev);
- clk_disable(spdif->clk_spdif_out);
+ clk_disable_unprepare(spdif->clk_spdif_out);
return 0;
}
@@ -64,7 +51,7 @@ static int tegra20_spdif_runtime_resume(struct device *dev)
struct tegra20_spdif *spdif = dev_get_drvdata(dev);
int ret;
- ret = clk_enable(spdif->clk_spdif_out);
+ ret = clk_prepare_enable(spdif->clk_spdif_out);
if (ret) {
dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
@@ -77,21 +64,24 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct device *dev = substream->pcm->card->dev;
+ struct device *dev = dai->dev;
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
+ unsigned int mask, val;
int ret, spdifclock;
- spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_PACK;
- spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+ mask = TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_PACK;
- spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+ val = TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
break;
default:
return -EINVAL;
}
+ regmap_update_bits(spdif->regmap, TEGRA20_SPDIF_CTRL, mask, val);
+
switch (params_rate(params)) {
case 32000:
spdifclock = 4096000;
@@ -129,14 +119,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
static void tegra20_spdif_start_playback(struct tegra20_spdif *spdif)
{
- spdif->reg_ctrl |= TEGRA20_SPDIF_CTRL_TX_EN;
- tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
+ regmap_update_bits(spdif->regmap, TEGRA20_SPDIF_CTRL,
+ TEGRA20_SPDIF_CTRL_TX_EN,
+ TEGRA20_SPDIF_CTRL_TX_EN);
}
static void tegra20_spdif_stop_playback(struct tegra20_spdif *spdif)
{
- spdif->reg_ctrl &= ~TEGRA20_SPDIF_CTRL_TX_EN;
- tegra20_spdif_write(spdif, TEGRA20_SPDIF_CTRL, spdif->reg_ctrl);
+ regmap_update_bits(spdif->regmap, TEGRA20_SPDIF_CTRL,
+ TEGRA20_SPDIF_CTRL_TX_EN, 0);
}
static int tegra20_spdif_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -181,6 +172,7 @@ static struct snd_soc_dai_driver tegra20_spdif_dai = {
.name = DRV_NAME,
.probe = tegra20_spdif_probe,
.playback = {
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
diff --git a/sound/soc/tegra/tegra20_spdif.h b/sound/soc/tegra/tegra20_spdif.h
index ed756527efea..b48d699fd583 100644
--- a/sound/soc/tegra/tegra20_spdif.h
+++ b/sound/soc/tegra/tegra20_spdif.h
@@ -465,7 +465,6 @@ struct tegra20_spdif {
struct tegra_pcm_dma_params capture_dma_data;
struct tegra_pcm_dma_params playback_dma_data;
struct regmap *regmap;
- u32 reg_ctrl;
};
#endif
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index f43edb364a18..bf5610122c76 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -56,8 +56,8 @@ static int tegra30_ahub_runtime_suspend(struct device *dev)
regcache_cache_only(ahub->regmap_apbif, true);
regcache_cache_only(ahub->regmap_ahub, true);
- clk_disable(ahub->clk_apbif);
- clk_disable(ahub->clk_d_audio);
+ clk_disable_unprepare(ahub->clk_apbif);
+ clk_disable_unprepare(ahub->clk_d_audio);
return 0;
}
@@ -77,12 +77,12 @@ static int tegra30_ahub_runtime_resume(struct device *dev)
{
int ret;
- ret = clk_enable(ahub->clk_d_audio);
+ ret = clk_prepare_enable(ahub->clk_d_audio);
if (ret) {
dev_err(dev, "clk_enable d_audio failed: %d\n", ret);
return ret;
}
- ret = clk_enable(ahub->clk_apbif);
+ ret = clk_prepare_enable(ahub->clk_apbif);
if (ret) {
dev_err(dev, "clk_enable apbif failed: %d\n", ret);
clk_disable(ahub->clk_d_audio);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 8596032985dc..44184228d1f0 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -44,25 +44,13 @@
#define DRV_NAME "tegra30-i2s"
-static inline void tegra30_i2s_write(struct tegra30_i2s *i2s, u32 reg, u32 val)
-{
- regmap_write(i2s->regmap, reg, val);
-}
-
-static inline u32 tegra30_i2s_read(struct tegra30_i2s *i2s, u32 reg)
-{
- u32 val;
- regmap_read(i2s->regmap, reg, &val);
- return val;
-}
-
static int tegra30_i2s_runtime_suspend(struct device *dev)
{
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
regcache_cache_only(i2s->regmap, true);
- clk_disable(i2s->clk_i2s);
+ clk_disable_unprepare(i2s->clk_i2s);
return 0;
}
@@ -72,7 +60,7 @@ static int tegra30_i2s_runtime_resume(struct device *dev)
struct tegra30_i2s *i2s = dev_get_drvdata(dev);
int ret;
- ret = clk_enable(i2s->clk_i2s);
+ ret = clk_prepare_enable(i2s->clk_i2s);
if (ret) {
dev_err(dev, "clk_enable failed: %d\n", ret);
return ret;
@@ -128,6 +116,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
+ unsigned int mask, val;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -136,10 +125,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
@@ -147,33 +136,37 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- i2s->reg_ctrl &= ~(TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK |
- TEGRA30_I2S_CTRL_LRCK_MASK);
+ mask |= TEGRA30_I2S_CTRL_FRAME_FORMAT_MASK |
+ TEGRA30_I2S_CTRL_LRCK_MASK;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
+ val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_DSP_B:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
+ val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_FSYNC;
+ val |= TEGRA30_I2S_CTRL_LRCK_R_LOW;
break;
case SND_SOC_DAIFMT_I2S:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_RIGHT_J:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
case SND_SOC_DAIFMT_LEFT_J:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
+ val |= TEGRA30_I2S_CTRL_FRAME_FORMAT_LRCK;
+ val |= TEGRA30_I2S_CTRL_LRCK_L_LOW;
break;
default:
return -EINVAL;
}
+ pm_runtime_get_sync(dai->dev);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL, mask, val);
+ pm_runtime_put(dai->dev);
+
return 0;
}
@@ -181,24 +174,26 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct device *dev = substream->pcm->card->dev;
+ struct device *dev = dai->dev;
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- u32 val;
+ unsigned int mask, val, reg;
int ret, sample_size, srate, i2sclock, bitcnt;
if (params_channels(params) != 2)
return -EINVAL;
- i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_BIT_SIZE_MASK;
+ mask = TEGRA30_I2S_CTRL_BIT_SIZE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_BIT_SIZE_16;
+ val = TEGRA30_I2S_CTRL_BIT_SIZE_16;
sample_size = 16;
break;
default:
return -EINVAL;
}
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL, mask, val);
+
srate = params_rate(params);
/* Final "* 2" required by Tegra hardware */
@@ -219,7 +214,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
if (i2sclock % (2 * srate))
val |= TEGRA30_I2S_TIMING_NON_SYM_ENABLE;
- tegra30_i2s_write(i2s, TEGRA30_I2S_TIMING, val);
+ regmap_write(i2s->regmap, TEGRA30_I2S_TIMING, val);
val = (0 << TEGRA30_AUDIOCIF_CTRL_FIFO_THRESHOLD_SHIFT) |
(1 << TEGRA30_AUDIOCIF_CTRL_AUDIO_CHANNELS_SHIFT) |
@@ -229,15 +224,17 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_RX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_RX_CTRL, val);
+ reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CIF_TX_CTRL, val);
+ reg = TEGRA30_I2S_CIF_RX_CTRL;
}
+ regmap_write(i2s->regmap, reg, val);
+
val = (1 << TEGRA30_I2S_OFFSET_RX_DATA_OFFSET_SHIFT) |
(1 << TEGRA30_I2S_OFFSET_TX_DATA_OFFSET_SHIFT);
- tegra30_i2s_write(i2s, TEGRA30_I2S_OFFSET, val);
+ regmap_write(i2s->regmap, TEGRA30_I2S_OFFSET, val);
return 0;
}
@@ -245,29 +242,31 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
static void tegra30_i2s_start_playback(struct tegra30_i2s *i2s)
{
tegra30_ahub_enable_tx_fifo(i2s->playback_fifo_cif);
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_TX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
+ TEGRA30_I2S_CTRL_XFER_EN_TX,
+ TEGRA30_I2S_CTRL_XFER_EN_TX);
}
static void tegra30_i2s_stop_playback(struct tegra30_i2s *i2s)
{
tegra30_ahub_disable_tx_fifo(i2s->playback_fifo_cif);
- i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_TX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
+ TEGRA30_I2S_CTRL_XFER_EN_TX, 0);
}
static void tegra30_i2s_start_capture(struct tegra30_i2s *i2s)
{
tegra30_ahub_enable_rx_fifo(i2s->capture_fifo_cif);
- i2s->reg_ctrl |= TEGRA30_I2S_CTRL_XFER_EN_RX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
+ TEGRA30_I2S_CTRL_XFER_EN_RX,
+ TEGRA30_I2S_CTRL_XFER_EN_RX);
}
static void tegra30_i2s_stop_capture(struct tegra30_i2s *i2s)
{
tegra30_ahub_disable_rx_fifo(i2s->capture_fifo_cif);
- i2s->reg_ctrl &= ~TEGRA30_I2S_CTRL_XFER_EN_RX;
- tegra30_i2s_write(i2s, TEGRA30_I2S_CTRL, i2s->reg_ctrl);
+ regmap_update_bits(i2s->regmap, TEGRA30_I2S_CTRL,
+ TEGRA30_I2S_CTRL_XFER_EN_RX, 0);
}
static int tegra30_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
@@ -320,12 +319,14 @@ static struct snd_soc_dai_ops tegra30_i2s_dai_ops = {
static const struct snd_soc_dai_driver tegra30_i2s_dai_template = {
.probe = tegra30_i2s_probe,
.playback = {
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
+ .stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
diff --git a/sound/soc/tegra/tegra30_i2s.h b/sound/soc/tegra/tegra30_i2s.h
index 91adf29c7a87..34dc47b9581c 100644
--- a/sound/soc/tegra/tegra30_i2s.h
+++ b/sound/soc/tegra/tegra30_i2s.h
@@ -236,7 +236,6 @@ struct tegra30_i2s {
enum tegra30_ahub_txcif playback_fifo_cif;
struct tegra_pcm_dma_params playback_dma_data;
struct regmap *regmap;
- u32 reg_ctrl;
};
#endif
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 32de7006daf0..d684df294c0c 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -1,5 +1,5 @@
/*
- * tegra_alc5632.c -- Toshiba AC100(PAZ00) machine ASoC driver
+* tegra_alc5632.c -- Toshiba AC100(PAZ00) machine ASoC driver
*
* Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
* Copyright (C) 2012 - NVIDIA, Inc.
@@ -33,11 +33,8 @@
#define DRV_NAME "tegra-alc5632"
-#define GPIO_HP_DET BIT(0)
-
struct tegra_alc5632 {
struct tegra_asoc_utils_data util_data;
- int gpio_requested;
int gpio_hp_det;
};
@@ -46,7 +43,7 @@ static int tegra_alc5632_asoc_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_card *card = codec->card;
struct tegra_alc5632 *alc5632 = snd_soc_card_get_drvdata(card);
int srate, mclk;
@@ -108,9 +105,9 @@ static const struct snd_kcontrol_new tegra_alc5632_controls[] = {
static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- struct device_node *np = codec->card->dev->of_node;
struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(codec->card);
snd_soc_jack_new(codec, "Headset Jack", SND_JACK_HEADSET,
@@ -119,14 +116,11 @@ static int tegra_alc5632_asoc_init(struct snd_soc_pcm_runtime *rtd)
ARRAY_SIZE(tegra_alc5632_hs_jack_pins),
tegra_alc5632_hs_jack_pins);
- machine->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
-
if (gpio_is_valid(machine->gpio_hp_det)) {
tegra_alc5632_hp_jack_gpio.gpio = machine->gpio_hp_det;
snd_soc_jack_add_gpios(&tegra_alc5632_hs_jack,
1,
&tegra_alc5632_hp_jack_gpio);
- machine->gpio_requested |= GPIO_HP_DET;
}
snd_soc_dapm_force_enable_pin(dapm, "MICBIAS1");
@@ -159,6 +153,7 @@ static struct snd_soc_card snd_soc_tegra_alc5632 = {
static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &snd_soc_tegra_alc5632;
struct tegra_alc5632 *alc5632;
int ret;
@@ -181,6 +176,10 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
goto err;
}
+ alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0);
+ if (alc5632->gpio_hp_det == -ENODEV)
+ return -EPROBE_DEFER;
+
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
goto err;
@@ -199,16 +198,16 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev)
goto err;
}
- tegra_alc5632_dai.cpu_dai_of_node = of_parse_phandle(
+ tegra_alc5632_dai.cpu_of_node = of_parse_phandle(
pdev->dev.of_node, "nvidia,i2s-controller", 0);
- if (!tegra_alc5632_dai.cpu_dai_of_node) {
+ if (!tegra_alc5632_dai.cpu_of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
goto err;
}
- tegra_alc5632_dai.platform_of_node = tegra_alc5632_dai.cpu_dai_of_node;
+ tegra_alc5632_dai.platform_of_node = tegra_alc5632_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&alc5632->util_data, &pdev->dev);
if (ret)
@@ -234,11 +233,8 @@ static int __devexit tegra_alc5632_remove(struct platform_device *pdev)
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_alc5632 *machine = snd_soc_card_get_drvdata(card);
- if (machine->gpio_requested & GPIO_HP_DET)
- snd_soc_jack_free_gpios(&tegra_alc5632_hs_jack,
- 1,
- &tegra_alc5632_hp_jack_gpio);
- machine->gpio_requested = 0;
+ snd_soc_jack_free_gpios(&tegra_alc5632_hs_jack, 1,
+ &tegra_alc5632_hp_jack_gpio);
snd_soc_unregister_card(card);
diff --git a/sound/soc/tegra/tegra_asoc_utils.c b/sound/soc/tegra/tegra_asoc_utils.c
index 9515ce58ea02..6872c77a1196 100644
--- a/sound/soc/tegra/tegra_asoc_utils.c
+++ b/sound/soc/tegra/tegra_asoc_utils.c
@@ -69,9 +69,9 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
data->set_baseclock = 0;
data->set_mclk = 0;
- clk_disable(data->clk_cdev1);
- clk_disable(data->clk_pll_a_out0);
- clk_disable(data->clk_pll_a);
+ clk_disable_unprepare(data->clk_cdev1);
+ clk_disable_unprepare(data->clk_pll_a_out0);
+ clk_disable_unprepare(data->clk_pll_a);
err = clk_set_rate(data->clk_pll_a, new_baseclock);
if (err) {
@@ -87,19 +87,19 @@ int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
/* Don't set cdev1/extern1 rate; it's locked to pll_a_out0 */
- err = clk_enable(data->clk_pll_a);
+ err = clk_prepare_enable(data->clk_pll_a);
if (err) {
dev_err(data->dev, "Can't enable pll_a: %d\n", err);
return err;
}
- err = clk_enable(data->clk_pll_a_out0);
+ err = clk_prepare_enable(data->clk_pll_a_out0);
if (err) {
dev_err(data->dev, "Can't enable pll_a_out0: %d\n", err);
return err;
}
- err = clk_enable(data->clk_cdev1);
+ err = clk_prepare_enable(data->clk_cdev1);
if (err) {
dev_err(data->dev, "Can't enable cdev1: %d\n", err);
return err;
diff --git a/sound/soc/tegra/tegra_pcm.c b/sound/soc/tegra/tegra_pcm.c
index 127348dc09b1..5658bcec1931 100644
--- a/sound/soc/tegra/tegra_pcm.c
+++ b/sound/soc/tegra/tegra_pcm.c
@@ -36,6 +36,7 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
#include "tegra_pcm.h"
@@ -56,6 +57,7 @@ static const struct snd_pcm_hardware tegra_pcm_hardware = {
.fifo_size = 4,
};
+#if defined(CONFIG_TEGRA_SYSTEM_DMA)
static void tegra_pcm_queue_dma(struct tegra_runtime_data *prtd)
{
struct snd_pcm_substream *substream = prtd->substream;
@@ -285,6 +287,119 @@ static struct snd_pcm_ops tegra_pcm_ops = {
.pointer = tegra_pcm_pointer,
.mmap = tegra_pcm_mmap,
};
+#else
+static int tegra_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct device *dev = rtd->platform->dev;
+ int ret;
+
+ /* Set HW params now that initialization is complete */
+ snd_soc_set_runtime_hwparams(substream, &tegra_pcm_hardware);
+
+ ret = snd_dmaengine_pcm_open(substream, NULL, NULL);
+ if (ret) {
+ dev_err(dev, "dmaengine pcm open failed with err %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tegra_pcm_close(struct snd_pcm_substream *substream)
+{
+ snd_dmaengine_pcm_close(substream);
+ return 0;
+}
+
+static int tegra_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct device *dev = rtd->platform->dev;
+ struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
+ struct tegra_pcm_dma_params *dmap;
+ struct dma_slave_config slave_config;
+ int ret;
+
+ dmap = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
+
+ ret = snd_hwparams_to_dma_slave_config(substream, params,
+ &slave_config);
+ if (ret) {
+ dev_err(dev, "hw params config failed with err %d\n", ret);
+ return ret;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave_config.dst_addr = dmap->addr;
+ slave_config.src_maxburst = 0;
+ } else {
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ slave_config.src_addr = dmap->addr;
+ slave_config.dst_maxburst = 0;
+ }
+ slave_config.slave_id = dmap->req_sel;
+
+ ret = dmaengine_slave_config(chan, &slave_config);
+ if (ret < 0) {
+ dev_err(dev, "dma slave config failed with err %d\n", ret);
+ return ret;
+ }
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ return 0;
+}
+
+static int tegra_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ snd_pcm_set_runtime_buffer(substream, NULL);
+ return 0;
+}
+
+static int tegra_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_START);
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return snd_dmaengine_pcm_trigger(substream,
+ SNDRV_PCM_TRIGGER_STOP);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int tegra_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+ runtime->dma_area,
+ runtime->dma_addr,
+ runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops tegra_pcm_ops = {
+ .open = tegra_pcm_open,
+ .close = tegra_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = tegra_pcm_hw_params,
+ .hw_free = tegra_pcm_hw_free,
+ .trigger = tegra_pcm_trigger,
+ .pointer = snd_dmaengine_pcm_pointer,
+ .mmap = tegra_pcm_mmap,
+};
+#endif
static int tegra_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream)
{
diff --git a/sound/soc/tegra/tegra_pcm.h b/sound/soc/tegra/tegra_pcm.h
index 985d418a35e7..a3a450352dcf 100644
--- a/sound/soc/tegra/tegra_pcm.h
+++ b/sound/soc/tegra/tegra_pcm.h
@@ -40,6 +40,7 @@ struct tegra_pcm_dma_params {
unsigned long req_sel;
};
+#if defined(CONFIG_TEGRA_SYSTEM_DMA)
struct tegra_runtime_data {
struct snd_pcm_substream *substream;
spinlock_t lock;
@@ -51,6 +52,7 @@ struct tegra_runtime_data {
struct tegra_dma_req dma_req[2];
struct tegra_dma_channel *dma_chan;
};
+#endif
int tegra_pcm_platform_register(struct device *dev);
void tegra_pcm_platform_unregister(struct device *dev);
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index 4e77026807a2..ea9166d5c4eb 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -57,7 +57,7 @@ static int tegra_wm8753_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_card *card = codec->card;
struct tegra_wm8753 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
@@ -157,9 +157,9 @@ static __devinit int tegra_wm8753_driver_probe(struct platform_device *pdev)
goto err;
}
- tegra_wm8753_dai.cpu_dai_of_node = of_parse_phandle(
+ tegra_wm8753_dai.cpu_of_node = of_parse_phandle(
pdev->dev.of_node, "nvidia,i2s-controller", 0);
- if (!tegra_wm8753_dai.cpu_dai_of_node) {
+ if (!tegra_wm8753_dai.cpu_of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
@@ -167,7 +167,7 @@ static __devinit int tegra_wm8753_driver_probe(struct platform_device *pdev)
}
tegra_wm8753_dai.platform_of_node =
- tegra_wm8753_dai.cpu_dai_of_node;
+ tegra_wm8753_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 3b6da91188a9..0c5bb33d258e 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -28,8 +28,6 @@
*
*/
-#include <asm/mach-types.h>
-
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -50,16 +48,9 @@
#define DRV_NAME "tegra-snd-wm8903"
-#define GPIO_SPKR_EN BIT(0)
-#define GPIO_HP_MUTE BIT(1)
-#define GPIO_INT_MIC_EN BIT(2)
-#define GPIO_EXT_MIC_EN BIT(3)
-#define GPIO_HP_DET BIT(4)
-
struct tegra_wm8903 {
struct tegra_wm8903_platform_data pdata;
struct tegra_asoc_utils_data util_data;
- int gpio_requested;
};
static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
@@ -67,8 +58,7 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_card *card = codec->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
int srate, mclk;
@@ -95,24 +85,6 @@ static int tegra_wm8903_hw_params(struct snd_pcm_substream *substream,
return err;
}
- err = snd_soc_dai_set_fmt(codec_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (err < 0) {
- dev_err(card->dev, "codec_dai fmt not set\n");
- return err;
- }
-
- err = snd_soc_dai_set_fmt(cpu_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (err < 0) {
- dev_err(card->dev, "cpu_dai fmt not set\n");
- return err;
- }
-
err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
if (err < 0) {
@@ -160,7 +132,7 @@ static int tegra_wm8903_event_int_spk(struct snd_soc_dapm_widget *w,
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
struct tegra_wm8903_platform_data *pdata = &machine->pdata;
- if (!(machine->gpio_requested & GPIO_SPKR_EN))
+ if (!gpio_is_valid(pdata->gpio_spkr_en))
return 0;
gpio_set_value_cansleep(pdata->gpio_spkr_en,
@@ -177,7 +149,7 @@ static int tegra_wm8903_event_hp(struct snd_soc_dapm_widget *w,
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
struct tegra_wm8903_platform_data *pdata = &machine->pdata;
- if (!(machine->gpio_requested & GPIO_HP_MUTE))
+ if (!gpio_is_valid(pdata->gpio_hp_mute))
return 0;
gpio_set_value_cansleep(pdata->gpio_hp_mute,
@@ -203,122 +175,18 @@ static const struct snd_soc_dapm_route harmony_audio_map[] = {
{"IN1L", NULL, "Mic Jack"},
};
-static const struct snd_soc_dapm_route seaboard_audio_map[] = {
- {"Headphone Jack", NULL, "HPOUTR"},
- {"Headphone Jack", NULL, "HPOUTL"},
- {"Int Spk", NULL, "ROP"},
- {"Int Spk", NULL, "RON"},
- {"Int Spk", NULL, "LOP"},
- {"Int Spk", NULL, "LON"},
- {"Mic Jack", NULL, "MICBIAS"},
- {"IN1R", NULL, "Mic Jack"},
-};
-
-static const struct snd_soc_dapm_route kaen_audio_map[] = {
- {"Headphone Jack", NULL, "HPOUTR"},
- {"Headphone Jack", NULL, "HPOUTL"},
- {"Int Spk", NULL, "ROP"},
- {"Int Spk", NULL, "RON"},
- {"Int Spk", NULL, "LOP"},
- {"Int Spk", NULL, "LON"},
- {"Mic Jack", NULL, "MICBIAS"},
- {"IN2R", NULL, "Mic Jack"},
-};
-
-static const struct snd_soc_dapm_route aebl_audio_map[] = {
- {"Headphone Jack", NULL, "HPOUTR"},
- {"Headphone Jack", NULL, "HPOUTL"},
- {"Int Spk", NULL, "LINEOUTR"},
- {"Int Spk", NULL, "LINEOUTL"},
- {"Mic Jack", NULL, "MICBIAS"},
- {"IN1R", NULL, "Mic Jack"},
-};
-
static const struct snd_kcontrol_new tegra_wm8903_controls[] = {
SOC_DAPM_PIN_SWITCH("Int Spk"),
};
static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
struct snd_soc_card *card = codec->card;
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
struct tegra_wm8903_platform_data *pdata = &machine->pdata;
- struct device_node *np = card->dev->of_node;
- int ret;
-
- if (card->dev->platform_data) {
- memcpy(pdata, card->dev->platform_data, sizeof(*pdata));
- } else if (np) {
- /*
- * This part must be in init() rather than probe() in order to
- * guarantee that the WM8903 has been probed, and hence its
- * GPIO controller registered, which is a pre-condition for
- * of_get_named_gpio() to be able to map the phandles in the
- * properties to the controller node. Given this, all
- * pdata handling is in init() for consistency.
- */
- pdata->gpio_spkr_en = of_get_named_gpio(np,
- "nvidia,spkr-en-gpios", 0);
- pdata->gpio_hp_mute = of_get_named_gpio(np,
- "nvidia,hp-mute-gpios", 0);
- pdata->gpio_hp_det = of_get_named_gpio(np,
- "nvidia,hp-det-gpios", 0);
- pdata->gpio_int_mic_en = of_get_named_gpio(np,
- "nvidia,int-mic-en-gpios", 0);
- pdata->gpio_ext_mic_en = of_get_named_gpio(np,
- "nvidia,ext-mic-en-gpios", 0);
- } else {
- dev_err(card->dev, "No platform data supplied\n");
- return -EINVAL;
- }
-
- if (gpio_is_valid(pdata->gpio_spkr_en)) {
- ret = gpio_request(pdata->gpio_spkr_en, "spkr_en");
- if (ret) {
- dev_err(card->dev, "cannot get spkr_en gpio\n");
- return ret;
- }
- machine->gpio_requested |= GPIO_SPKR_EN;
-
- gpio_direction_output(pdata->gpio_spkr_en, 0);
- }
-
- if (gpio_is_valid(pdata->gpio_hp_mute)) {
- ret = gpio_request(pdata->gpio_hp_mute, "hp_mute");
- if (ret) {
- dev_err(card->dev, "cannot get hp_mute gpio\n");
- return ret;
- }
- machine->gpio_requested |= GPIO_HP_MUTE;
-
- gpio_direction_output(pdata->gpio_hp_mute, 1);
- }
-
- if (gpio_is_valid(pdata->gpio_int_mic_en)) {
- ret = gpio_request(pdata->gpio_int_mic_en, "int_mic_en");
- if (ret) {
- dev_err(card->dev, "cannot get int_mic_en gpio\n");
- return ret;
- }
- machine->gpio_requested |= GPIO_INT_MIC_EN;
-
- /* Disable int mic; enable signal is active-high */
- gpio_direction_output(pdata->gpio_int_mic_en, 0);
- }
-
- if (gpio_is_valid(pdata->gpio_ext_mic_en)) {
- ret = gpio_request(pdata->gpio_ext_mic_en, "ext_mic_en");
- if (ret) {
- dev_err(card->dev, "cannot get ext_mic_en gpio\n");
- return ret;
- }
- machine->gpio_requested |= GPIO_EXT_MIC_EN;
-
- /* Enable ext mic; enable signal is active-low */
- gpio_direction_output(pdata->gpio_ext_mic_en, 0);
- }
if (gpio_is_valid(pdata->gpio_hp_det)) {
tegra_wm8903_hp_jack_gpio.gpio = pdata->gpio_hp_det;
@@ -330,7 +198,6 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
snd_soc_jack_add_gpios(&tegra_wm8903_hp_jack,
1,
&tegra_wm8903_hp_jack_gpio);
- machine->gpio_requested |= GPIO_HP_DET;
}
snd_soc_jack_new(codec, "Mic Jack", SND_JACK_MICROPHONE,
@@ -366,6 +233,9 @@ static struct snd_soc_dai_link tegra_wm8903_dai = {
.codec_dai_name = "wm8903-hifi",
.init = tegra_wm8903_init,
.ops = &tegra_wm8903_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
};
static struct snd_soc_card snd_soc_tegra_wm8903 = {
@@ -385,8 +255,10 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
{
+ struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &snd_soc_tegra_wm8903;
struct tegra_wm8903 *machine;
+ struct tegra_wm8903_platform_data *pdata;
int ret;
if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -401,12 +273,42 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto err;
}
+ pdata = &machine->pdata;
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
snd_soc_card_set_drvdata(card, machine);
- if (pdev->dev.of_node) {
+ if (pdev->dev.platform_data) {
+ memcpy(pdata, card->dev->platform_data, sizeof(*pdata));
+ } else if (np) {
+ pdata->gpio_spkr_en = of_get_named_gpio(np,
+ "nvidia,spkr-en-gpios", 0);
+ if (pdata->gpio_spkr_en == -ENODEV)
+ return -EPROBE_DEFER;
+
+ pdata->gpio_hp_mute = of_get_named_gpio(np,
+ "nvidia,hp-mute-gpios", 0);
+ if (pdata->gpio_hp_mute == -ENODEV)
+ return -EPROBE_DEFER;
+
+ pdata->gpio_hp_det = of_get_named_gpio(np,
+ "nvidia,hp-det-gpios", 0);
+ if (pdata->gpio_hp_det == -ENODEV)
+ return -EPROBE_DEFER;
+
+ pdata->gpio_int_mic_en = of_get_named_gpio(np,
+ "nvidia,int-mic-en-gpios", 0);
+ if (pdata->gpio_int_mic_en == -ENODEV)
+ return -EPROBE_DEFER;
+
+ pdata->gpio_ext_mic_en = of_get_named_gpio(np,
+ "nvidia,ext-mic-en-gpios", 0);
+ if (pdata->gpio_ext_mic_en == -ENODEV)
+ return -EPROBE_DEFER;
+ }
+
+ if (np) {
ret = snd_soc_of_parse_card_name(card, "nvidia,model");
if (ret)
goto err;
@@ -417,8 +319,8 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
goto err;
tegra_wm8903_dai.codec_name = NULL;
- tegra_wm8903_dai.codec_of_node = of_parse_phandle(
- pdev->dev.of_node, "nvidia,audio-codec", 0);
+ tegra_wm8903_dai.codec_of_node = of_parse_phandle(np,
+ "nvidia,audio-codec", 0);
if (!tegra_wm8903_dai.codec_of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,audio-codec' missing or invalid\n");
@@ -427,9 +329,9 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
}
tegra_wm8903_dai.cpu_dai_name = NULL;
- tegra_wm8903_dai.cpu_dai_of_node = of_parse_phandle(
- pdev->dev.of_node, "nvidia,i2s-controller", 0);
- if (!tegra_wm8903_dai.cpu_dai_of_node) {
+ tegra_wm8903_dai.cpu_of_node = of_parse_phandle(np,
+ "nvidia,i2s-controller", 0);
+ if (!tegra_wm8903_dai.cpu_of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
@@ -438,20 +340,47 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev)
tegra_wm8903_dai.platform_name = NULL;
tegra_wm8903_dai.platform_of_node =
- tegra_wm8903_dai.cpu_dai_of_node;
+ tegra_wm8903_dai.cpu_of_node;
} else {
- if (machine_is_harmony()) {
- card->dapm_routes = harmony_audio_map;
- card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map);
- } else if (machine_is_seaboard()) {
- card->dapm_routes = seaboard_audio_map;
- card->num_dapm_routes = ARRAY_SIZE(seaboard_audio_map);
- } else if (machine_is_kaen()) {
- card->dapm_routes = kaen_audio_map;
- card->num_dapm_routes = ARRAY_SIZE(kaen_audio_map);
- } else {
- card->dapm_routes = aebl_audio_map;
- card->num_dapm_routes = ARRAY_SIZE(aebl_audio_map);
+ card->dapm_routes = harmony_audio_map;
+ card->num_dapm_routes = ARRAY_SIZE(harmony_audio_map);
+ }
+
+ if (gpio_is_valid(pdata->gpio_spkr_en)) {
+ ret = devm_gpio_request_one(&pdev->dev, pdata->gpio_spkr_en,
+ GPIOF_OUT_INIT_LOW, "spkr_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get spkr_en gpio\n");
+ return ret;
+ }
+ }
+
+ if (gpio_is_valid(pdata->gpio_hp_mute)) {
+ ret = devm_gpio_request_one(&pdev->dev, pdata->gpio_hp_mute,
+ GPIOF_OUT_INIT_HIGH, "hp_mute");
+ if (ret) {
+ dev_err(card->dev, "cannot get hp_mute gpio\n");
+ return ret;
+ }
+ }
+
+ if (gpio_is_valid(pdata->gpio_int_mic_en)) {
+ /* Disable int mic; enable signal is active-high */
+ ret = devm_gpio_request_one(&pdev->dev, pdata->gpio_int_mic_en,
+ GPIOF_OUT_INIT_LOW, "int_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get int_mic_en gpio\n");
+ return ret;
+ }
+ }
+
+ if (gpio_is_valid(pdata->gpio_ext_mic_en)) {
+ /* Enable ext mic; enable signal is active-low */
+ ret = devm_gpio_request_one(&pdev->dev, pdata->gpio_ext_mic_en,
+ GPIOF_OUT_INIT_LOW, "ext_mic_en");
+ if (ret) {
+ dev_err(card->dev, "cannot get ext_mic_en gpio\n");
+ return ret;
}
}
@@ -478,21 +407,9 @@ static int __devexit tegra_wm8903_driver_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
struct tegra_wm8903 *machine = snd_soc_card_get_drvdata(card);
- struct tegra_wm8903_platform_data *pdata = &machine->pdata;
- if (machine->gpio_requested & GPIO_HP_DET)
- snd_soc_jack_free_gpios(&tegra_wm8903_hp_jack,
- 1,
- &tegra_wm8903_hp_jack_gpio);
- if (machine->gpio_requested & GPIO_EXT_MIC_EN)
- gpio_free(pdata->gpio_ext_mic_en);
- if (machine->gpio_requested & GPIO_INT_MIC_EN)
- gpio_free(pdata->gpio_int_mic_en);
- if (machine->gpio_requested & GPIO_HP_MUTE)
- gpio_free(pdata->gpio_hp_mute);
- if (machine->gpio_requested & GPIO_SPKR_EN)
- gpio_free(pdata->gpio_spkr_en);
- machine->gpio_requested = 0;
+ snd_soc_jack_free_gpios(&tegra_wm8903_hp_jack, 1,
+ &tegra_wm8903_hp_jack_gpio);
snd_soc_unregister_card(card);
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 4a8d5b672c9f..e69a4f7000d6 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -52,8 +52,7 @@ static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_codec *codec = codec_dai->codec;
struct snd_soc_card *card = codec->card;
struct tegra_trimslice *trimslice = snd_soc_card_get_drvdata(card);
int srate, mclk;
@@ -68,24 +67,6 @@ static int trimslice_asoc_hw_params(struct snd_pcm_substream *substream,
return err;
}
- err = snd_soc_dai_set_fmt(codec_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (err < 0) {
- dev_err(card->dev, "codec_dai fmt not set\n");
- return err;
- }
-
- err = snd_soc_dai_set_fmt(cpu_dai,
- SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
- if (err < 0) {
- dev_err(card->dev, "cpu_dai fmt not set\n");
- return err;
- }
-
err = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
if (err < 0) {
@@ -121,6 +102,9 @@ static struct snd_soc_dai_link trimslice_tlv320aic23_dai = {
.cpu_dai_name = "tegra20-i2s.0",
.codec_dai_name = "tlv320aic23-hifi",
.ops = &trimslice_asoc_ops,
+ .dai_fmt = SND_SOC_DAIFMT_I2S |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
};
static struct snd_soc_card snd_soc_trimslice = {
@@ -162,9 +146,9 @@ static __devinit int tegra_snd_trimslice_probe(struct platform_device *pdev)
}
trimslice_tlv320aic23_dai.cpu_dai_name = NULL;
- trimslice_tlv320aic23_dai.cpu_dai_of_node = of_parse_phandle(
+ trimslice_tlv320aic23_dai.cpu_of_node = of_parse_phandle(
pdev->dev.of_node, "nvidia,i2s-controller", 0);
- if (!trimslice_tlv320aic23_dai.cpu_dai_of_node) {
+ if (!trimslice_tlv320aic23_dai.cpu_of_node) {
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing or invalid\n");
ret = -EINVAL;
@@ -173,7 +157,7 @@ static __devinit int tegra_snd_trimslice_probe(struct platform_device *pdev)
trimslice_tlv320aic23_dai.platform_name = NULL;
trimslice_tlv320aic23_dai.platform_of_node =
- trimslice_tlv320aic23_dai.cpu_dai_of_node;
+ trimslice_tlv320aic23_dai.cpu_of_node;
}
ret = tegra_asoc_utils_init(&trimslice->util_data, &pdev->dev);
diff --git a/sound/soc/ux500/Kconfig b/sound/soc/ux500/Kconfig
index 44cf43404cd9..069330d82be5 100644
--- a/sound/soc/ux500/Kconfig
+++ b/sound/soc/ux500/Kconfig
@@ -12,3 +12,21 @@ menuconfig SND_SOC_UX500
config SND_SOC_UX500_PLAT_MSP_I2S
tristate
depends on SND_SOC_UX500
+
+config SND_SOC_UX500_PLAT_DMA
+ tristate "Platform - DB8500 (DMA)"
+ depends on SND_SOC_UX500
+ select SND_SOC_DMAENGINE_PCM
+ help
+ Say Y if you want to enable the Ux500 platform-driver.
+
++config SND_SOC_UX500_MACH_MOP500
++ tristate "Machine - MOP500 (Ux500 + AB8500)"
+ depends on AB8500_CORE && AB8500_GPADC && SND_SOC_UX500
+ select SND_SOC_AB8500_CODEC
+ select SND_SOC_UX500_PLAT_MSP_I2S
+ select SND_SOC_UX500_PLAT_DMA
+ help
+ Select this to enable the MOP500 machine-driver.
+ This will enable platform-drivers for: Ux500
+ This will enable codec-drivers for: AB8500
diff --git a/sound/soc/ux500/Makefile b/sound/soc/ux500/Makefile
index 19974c5a2ea1..cce0c11a4d86 100644
--- a/sound/soc/ux500/Makefile
+++ b/sound/soc/ux500/Makefile
@@ -2,3 +2,9 @@
snd-soc-ux500-plat-msp-i2s-objs := ux500_msp_dai.o ux500_msp_i2s.o
obj-$(CONFIG_SND_SOC_UX500_PLAT_MSP_I2S) += snd-soc-ux500-plat-msp-i2s.o
+
+snd-soc-ux500-plat-dma-objs := ux500_pcm.o
+obj-$(CONFIG_SND_SOC_UX500_PLAT_DMA) += snd-soc-ux500-plat-dma.o
+
+snd-soc-ux500-mach-mop500-objs := mop500.o mop500_ab8500.o
+obj-$(CONFIG_SND_SOC_UX500_MACH_MOP500) += snd-soc-ux500-mach-mop500.o
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
new file mode 100644
index 000000000000..31c4d26d0359
--- /dev/null
+++ b/sound/soc/ux500/mop500.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja (ola.o.lilja@stericsson.com)
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/mach-types.h>
+
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+
+#include <sound/soc.h>
+#include <sound/initval.h>
+
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+
+#include <mop500_ab8500.h>
+
+/* Define the whole MOP500 soundcard, linking platform to the codec-drivers */
+struct snd_soc_dai_link mop500_dai_links[] = {
+ {
+ .name = "ab8500_0",
+ .stream_name = "ab8500_0",
+ .cpu_dai_name = "ux500-msp-i2s.1",
+ .codec_dai_name = "ab8500-codec-dai.0",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab8500-codec.0",
+ .init = mop500_ab8500_machine_init,
+ .ops = mop500_ab8500_ops,
+ },
+ {
+ .name = "ab8500_1",
+ .stream_name = "ab8500_1",
+ .cpu_dai_name = "ux500-msp-i2s.3",
+ .codec_dai_name = "ab8500-codec-dai.1",
+ .platform_name = "ux500-pcm.0",
+ .codec_name = "ab8500-codec.0",
+ .init = NULL,
+ .ops = mop500_ab8500_ops,
+ },
+};
+
+static struct snd_soc_card mop500_card = {
+ .name = "MOP500-card",
+ .probe = NULL,
+ .dai_link = mop500_dai_links,
+ .num_links = ARRAY_SIZE(mop500_dai_links),
+};
+
+static int __devinit mop500_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ dev_dbg(&pdev->dev, "%s: Enter.\n", __func__);
+
+ mop500_card.dev = &pdev->dev;
+
+ dev_dbg(&pdev->dev, "%s: Card %s: Set platform drvdata.\n",
+ __func__, mop500_card.name);
+ platform_set_drvdata(pdev, &mop500_card);
+
+ snd_soc_card_set_drvdata(&mop500_card, NULL);
+
+ dev_dbg(&pdev->dev, "%s: Card %s: num_links = %d\n",
+ __func__, mop500_card.name, mop500_card.num_links);
+ dev_dbg(&pdev->dev, "%s: Card %s: DAI-link 0: name = %s\n",
+ __func__, mop500_card.name, mop500_card.dai_link[0].name);
+ dev_dbg(&pdev->dev, "%s: Card %s: DAI-link 0: stream_name = %s\n",
+ __func__, mop500_card.name,
+ mop500_card.dai_link[0].stream_name);
+
+ ret = snd_soc_register_card(&mop500_card);
+ if (ret)
+ dev_err(&pdev->dev,
+ "Error: snd_soc_register_card failed (%d)!\n",
+ ret);
+
+ return ret;
+}
+
+static int __devexit mop500_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *mop500_card = platform_get_drvdata(pdev);
+
+ pr_debug("%s: Enter.\n", __func__);
+
+ snd_soc_unregister_card(mop500_card);
+ mop500_ab8500_remove(mop500_card);
+
+ return 0;
+}
+
+static struct platform_driver snd_soc_mop500_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "snd-soc-mop500",
+ },
+ .probe = mop500_probe,
+ .remove = __devexit_p(mop500_remove),
+};
+
+module_platform_driver(snd_soc_mop500_driver);
diff --git a/sound/soc/ux500/mop500_ab8500.c b/sound/soc/ux500/mop500_ab8500.c
new file mode 100644
index 000000000000..78cce236693e
--- /dev/null
+++ b/sound/soc/ux500/mop500_ab8500.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Kristoffer Karlsson <kristoffer.karlsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+
+#include <mach/hardware.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+
+#include "ux500_pcm.h"
+#include "ux500_msp_dai.h"
+#include "../codecs/ab8500-codec.h"
+
+#define TX_SLOT_MONO 0x0008
+#define TX_SLOT_STEREO 0x000a
+#define RX_SLOT_MONO 0x0001
+#define RX_SLOT_STEREO 0x0003
+#define TX_SLOT_8CH 0x00FF
+#define RX_SLOT_8CH 0x00FF
+
+#define DEF_TX_SLOTS TX_SLOT_STEREO
+#define DEF_RX_SLOTS RX_SLOT_MONO
+
+#define DRIVERMODE_NORMAL 0
+#define DRIVERMODE_CODEC_ONLY 1
+
+/* Slot configuration */
+static unsigned int tx_slots = DEF_TX_SLOTS;
+static unsigned int rx_slots = DEF_RX_SLOTS;
+
+/* Clocks */
+static const char * const enum_mclk[] = {
+ "SYSCLK",
+ "ULPCLK"
+};
+enum mclk {
+ MCLK_SYSCLK,
+ MCLK_ULPCLK,
+};
+
+static SOC_ENUM_SINGLE_EXT_DECL(soc_enum_mclk, enum_mclk);
+
+/* Private data for machine-part MOP500<->AB8500 */
+struct mop500_ab8500_drvdata {
+ /* Clocks */
+ enum mclk mclk_sel;
+ struct clk *clk_ptr_intclk;
+ struct clk *clk_ptr_sysclk;
+ struct clk *clk_ptr_ulpclk;
+};
+
+static inline const char *get_mclk_str(enum mclk mclk_sel)
+{
+ switch (mclk_sel) {
+ case MCLK_SYSCLK:
+ return "SYSCLK";
+ case MCLK_ULPCLK:
+ return "ULPCLK";
+ default:
+ return "Unknown";
+ }
+}
+
+static int mop500_ab8500_set_mclk(struct device *dev,
+ struct mop500_ab8500_drvdata *drvdata)
+{
+ int status;
+ struct clk *clk_ptr;
+
+ if (IS_ERR(drvdata->clk_ptr_intclk)) {
+ dev_err(dev,
+ "%s: ERROR: intclk not initialized!\n", __func__);
+ return -EIO;
+ }
+
+ switch (drvdata->mclk_sel) {
+ case MCLK_SYSCLK:
+ clk_ptr = drvdata->clk_ptr_sysclk;
+ break;
+ case MCLK_ULPCLK:
+ clk_ptr = drvdata->clk_ptr_ulpclk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(clk_ptr)) {
+ dev_err(dev, "%s: ERROR: %s not initialized!\n", __func__,
+ get_mclk_str(drvdata->mclk_sel));
+ return -EIO;
+ }
+
+ status = clk_set_parent(drvdata->clk_ptr_intclk, clk_ptr);
+ if (status)
+ dev_err(dev,
+ "%s: ERROR: Setting intclk parent to %s failed (ret = %d)!",
+ __func__, get_mclk_str(drvdata->mclk_sel), status);
+ else
+ dev_dbg(dev,
+ "%s: intclk parent changed to %s.\n",
+ __func__, get_mclk_str(drvdata->mclk_sel));
+
+ return status;
+}
+
+/*
+ * Control-events
+ */
+
+static int mclk_input_control_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct mop500_ab8500_drvdata *drvdata =
+ snd_soc_card_get_drvdata(codec->card);
+
+ ucontrol->value.enumerated.item[0] = drvdata->mclk_sel;
+
+ return 0;
+}
+
+static int mclk_input_control_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct mop500_ab8500_drvdata *drvdata =
+ snd_soc_card_get_drvdata(codec->card);
+ unsigned int val = ucontrol->value.enumerated.item[0];
+
+ if (val > (unsigned int)MCLK_ULPCLK)
+ return -EINVAL;
+ if (drvdata->mclk_sel == val)
+ return 0;
+
+ drvdata->mclk_sel = val;
+
+ return 1;
+}
+
+/*
+ * Controls
+ */
+
+static struct snd_kcontrol_new mop500_ab8500_ctrls[] = {
+ SOC_ENUM_EXT("Master Clock Select",
+ soc_enum_mclk,
+ mclk_input_control_get, mclk_input_control_put),
+ /* Digital interface - Clocks */
+ SOC_SINGLE("Digital Interface Master Generator Switch",
+ AB8500_DIGIFCONF1, AB8500_DIGIFCONF1_ENMASTGEN,
+ 1, 0),
+ SOC_SINGLE("Digital Interface 0 Bit-clock Switch",
+ AB8500_DIGIFCONF1, AB8500_DIGIFCONF1_ENFSBITCLK0,
+ 1, 0),
+ SOC_SINGLE("Digital Interface 1 Bit-clock Switch",
+ AB8500_DIGIFCONF1, AB8500_DIGIFCONF1_ENFSBITCLK1,
+ 1, 0),
+ SOC_DAPM_PIN_SWITCH("Headset Left"),
+ SOC_DAPM_PIN_SWITCH("Headset Right"),
+ SOC_DAPM_PIN_SWITCH("Earpiece"),
+ SOC_DAPM_PIN_SWITCH("Speaker Left"),
+ SOC_DAPM_PIN_SWITCH("Speaker Right"),
+ SOC_DAPM_PIN_SWITCH("LineOut Left"),
+ SOC_DAPM_PIN_SWITCH("LineOut Right"),
+ SOC_DAPM_PIN_SWITCH("Vibra 1"),
+ SOC_DAPM_PIN_SWITCH("Vibra 2"),
+ SOC_DAPM_PIN_SWITCH("Mic 1"),
+ SOC_DAPM_PIN_SWITCH("Mic 2"),
+ SOC_DAPM_PIN_SWITCH("LineIn Left"),
+ SOC_DAPM_PIN_SWITCH("LineIn Right"),
+ SOC_DAPM_PIN_SWITCH("DMic 1"),
+ SOC_DAPM_PIN_SWITCH("DMic 2"),
+ SOC_DAPM_PIN_SWITCH("DMic 3"),
+ SOC_DAPM_PIN_SWITCH("DMic 4"),
+ SOC_DAPM_PIN_SWITCH("DMic 5"),
+ SOC_DAPM_PIN_SWITCH("DMic 6"),
+};
+
+/* ASoC */
+
+int mop500_ab8500_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ /* Set audio-clock source */
+ return mop500_ab8500_set_mclk(rtd->card->dev,
+ snd_soc_card_get_drvdata(rtd->card));
+}
+
+void mop500_ab8500_shutdown(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct device *dev = rtd->card->dev;
+
+ dev_dbg(dev, "%s: Enter\n", __func__);
+
+ /* Reset slots configuration to default(s) */
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ tx_slots = DEF_TX_SLOTS;
+ else
+ rx_slots = DEF_RX_SLOTS;
+}
+
+int mop500_ab8500_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct device *dev = rtd->card->dev;
+ unsigned int fmt;
+ int channels, ret = 0, driver_mode, slots;
+ unsigned int sw_codec, sw_cpu;
+ bool is_playback;
+
+ dev_dbg(dev, "%s: Enter\n", __func__);
+
+ dev_dbg(dev, "%s: substream->pcm->name = %s\n"
+ "substream->pcm->id = %s.\n"
+ "substream->name = %s.\n"
+ "substream->number = %d.\n",
+ __func__,
+ substream->pcm->name,
+ substream->pcm->id,
+ substream->name,
+ substream->number);
+
+ channels = params_channels(params);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S32_LE:
+ sw_cpu = 32;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ sw_cpu = 16;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ /* Setup codec depending on driver-mode */
+ if (channels == 8)
+ driver_mode = DRIVERMODE_CODEC_ONLY;
+ else
+ driver_mode = DRIVERMODE_NORMAL;
+ dev_dbg(dev, "%s: Driver-mode: %s.\n", __func__,
+ (driver_mode == DRIVERMODE_NORMAL) ? "NORMAL" : "CODEC_ONLY");
+
+ /* Setup format */
+
+ if (driver_mode == DRIVERMODE_NORMAL) {
+ fmt = SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CONT;
+ } else {
+ fmt = SND_SOC_DAIFMT_DSP_A |
+ SND_SOC_DAIFMT_CBM_CFM |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_GATED;
+ }
+
+ ret = snd_soc_dai_set_fmt(codec_dai, fmt);
+ if (ret < 0) {
+ dev_err(dev,
+ "%s: ERROR: snd_soc_dai_set_fmt failed for codec_dai (ret = %d)!\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
+ if (ret < 0) {
+ dev_err(dev,
+ "%s: ERROR: snd_soc_dai_set_fmt failed for cpu_dai (ret = %d)!\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* Setup TDM-slots */
+
+ is_playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ switch (channels) {
+ case 1:
+ slots = 16;
+ tx_slots = (is_playback) ? TX_SLOT_MONO : 0;
+ rx_slots = (is_playback) ? 0 : RX_SLOT_MONO;
+ break;
+ case 2:
+ slots = 16;
+ tx_slots = (is_playback) ? TX_SLOT_STEREO : 0;
+ rx_slots = (is_playback) ? 0 : RX_SLOT_STEREO;
+ break;
+ case 8:
+ slots = 16;
+ tx_slots = (is_playback) ? TX_SLOT_8CH : 0;
+ rx_slots = (is_playback) ? 0 : RX_SLOT_8CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (driver_mode == DRIVERMODE_NORMAL)
+ sw_codec = sw_cpu;
+ else
+ sw_codec = 20;
+
+ dev_dbg(dev, "%s: CPU-DAI TDM: TX=0x%04X RX=0x%04x\n", __func__,
+ tx_slots, rx_slots);
+ ret = snd_soc_dai_set_tdm_slot(cpu_dai, tx_slots, rx_slots, slots,
+ sw_cpu);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "%s: CODEC-DAI TDM: TX=0x%04X RX=0x%04x\n", __func__,
+ tx_slots, rx_slots);
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, tx_slots, rx_slots, slots,
+ sw_codec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+struct snd_soc_ops mop500_ab8500_ops[] = {
+ {
+ .hw_params = mop500_ab8500_hw_params,
+ .startup = mop500_ab8500_startup,
+ .shutdown = mop500_ab8500_shutdown,
+ }
+};
+
+int mop500_ab8500_machine_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_codec *codec = rtd->codec;
+ struct device *dev = rtd->card->dev;
+ struct mop500_ab8500_drvdata *drvdata;
+ int ret;
+
+ dev_dbg(dev, "%s Enter.\n", __func__);
+
+ /* Create driver private-data struct */
+ drvdata = devm_kzalloc(dev, sizeof(struct mop500_ab8500_drvdata),
+ GFP_KERNEL);
+ snd_soc_card_set_drvdata(rtd->card, drvdata);
+
+ /* Setup clocks */
+
+ drvdata->clk_ptr_sysclk = clk_get(dev, "sysclk");
+ if (IS_ERR(drvdata->clk_ptr_sysclk))
+ dev_warn(dev, "%s: WARNING: clk_get failed for 'sysclk'!\n",
+ __func__);
+ drvdata->clk_ptr_ulpclk = clk_get(dev, "ulpclk");
+ if (IS_ERR(drvdata->clk_ptr_ulpclk))
+ dev_warn(dev, "%s: WARNING: clk_get failed for 'ulpclk'!\n",
+ __func__);
+ drvdata->clk_ptr_intclk = clk_get(dev, "intclk");
+ if (IS_ERR(drvdata->clk_ptr_intclk))
+ dev_warn(dev, "%s: WARNING: clk_get failed for 'intclk'!\n",
+ __func__);
+
+ /* Set intclk default parent to ulpclk */
+ drvdata->mclk_sel = MCLK_ULPCLK;
+ ret = mop500_ab8500_set_mclk(dev, drvdata);
+ if (ret < 0)
+ dev_warn(dev, "%s: WARNING: mop500_ab8500_set_mclk!\n",
+ __func__);
+
+ drvdata->mclk_sel = MCLK_ULPCLK;
+
+ /* Add controls */
+ ret = snd_soc_add_codec_controls(codec, mop500_ab8500_ctrls,
+ ARRAY_SIZE(mop500_ab8500_ctrls));
+ if (ret < 0) {
+ pr_err("%s: Failed to add machine-controls (%d)!\n",
+ __func__, ret);
+ return ret;
+ }
+
+ ret = snd_soc_dapm_disable_pin(&codec->dapm, "Earpiece");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Left");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Speaker Right");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Left");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineOut Right");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 1");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Vibra 2");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 1");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "Mic 2");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Left");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "LineIn Right");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 1");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 2");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 3");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 4");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 5");
+ ret |= snd_soc_dapm_disable_pin(&codec->dapm, "DMic 6");
+
+ return ret;
+}
+
+void mop500_ab8500_remove(struct snd_soc_card *card)
+{
+ struct mop500_ab8500_drvdata *drvdata = snd_soc_card_get_drvdata(card);
+
+ if (drvdata->clk_ptr_sysclk != NULL)
+ clk_put(drvdata->clk_ptr_sysclk);
+ if (drvdata->clk_ptr_ulpclk != NULL)
+ clk_put(drvdata->clk_ptr_ulpclk);
+ if (drvdata->clk_ptr_intclk != NULL)
+ clk_put(drvdata->clk_ptr_intclk);
+
+ snd_soc_card_set_drvdata(card, drvdata);
+}
diff --git a/sound/soc/ux500/mop500_ab8500.h b/sound/soc/ux500/mop500_ab8500.h
new file mode 100644
index 000000000000..cca5b33964b6
--- /dev/null
+++ b/sound/soc/ux500/mop500_ab8500.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef MOP500_AB8500_H
+#define MOP500_AB8500_H
+
+extern struct snd_soc_ops mop500_ab8500_ops[];
+
+int mop500_ab8500_machine_init(struct snd_soc_pcm_runtime *runtime);
+void mop500_ab8500_remove(struct snd_soc_card *card);
+
+#endif
diff --git a/sound/soc/ux500/ux500_msp_dai.c b/sound/soc/ux500/ux500_msp_dai.c
index 93c6c40e724c..62ac0285bfaf 100644
--- a/sound/soc/ux500/ux500_msp_dai.c
+++ b/sound/soc/ux500/ux500_msp_dai.c
@@ -840,4 +840,4 @@ static struct platform_driver msp_i2s_driver = {
};
module_platform_driver(msp_i2s_driver);
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ux500/ux500_msp_i2s.c b/sound/soc/ux500/ux500_msp_i2s.c
index 496dec10c96e..ee14d2dac2f5 100644
--- a/sound/soc/ux500/ux500_msp_i2s.c
+++ b/sound/soc/ux500/ux500_msp_i2s.c
@@ -739,4 +739,4 @@ void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev,
devm_kfree(&pdev->dev, msp);
}
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ux500/ux500_pcm.c b/sound/soc/ux500/ux500_pcm.c
new file mode 100644
index 000000000000..1a04e248453c
--- /dev/null
+++ b/sound/soc/ux500/ux500_pcm.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <asm/page.h>
+
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/slab.h>
+
+#include <plat/ste_dma40.h>
+
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "ux500_msp_i2s.h"
+#include "ux500_pcm.h"
+
+static struct snd_pcm_hardware ux500_pcm_hw_playback = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = UX500_PLATFORM_MIN_RATE_PLAYBACK,
+ .rate_max = UX500_PLATFORM_MAX_RATE_PLAYBACK,
+ .channels_min = UX500_PLATFORM_MIN_CHANNELS,
+ .channels_max = UX500_PLATFORM_MAX_CHANNELS,
+ .buffer_bytes_max = UX500_PLATFORM_BUFFER_BYTES_MAX,
+ .period_bytes_min = UX500_PLATFORM_PERIODS_BYTES_MIN,
+ .period_bytes_max = UX500_PLATFORM_PERIODS_BYTES_MAX,
+ .periods_min = UX500_PLATFORM_PERIODS_MIN,
+ .periods_max = UX500_PLATFORM_PERIODS_MAX,
+};
+
+static struct snd_pcm_hardware ux500_pcm_hw_capture = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_U16_LE |
+ SNDRV_PCM_FMTBIT_S16_BE |
+ SNDRV_PCM_FMTBIT_U16_BE,
+ .rates = SNDRV_PCM_RATE_KNOT,
+ .rate_min = UX500_PLATFORM_MIN_RATE_CAPTURE,
+ .rate_max = UX500_PLATFORM_MAX_RATE_CAPTURE,
+ .channels_min = UX500_PLATFORM_MIN_CHANNELS,
+ .channels_max = UX500_PLATFORM_MAX_CHANNELS,
+ .buffer_bytes_max = UX500_PLATFORM_BUFFER_BYTES_MAX,
+ .period_bytes_min = UX500_PLATFORM_PERIODS_BYTES_MIN,
+ .period_bytes_max = UX500_PLATFORM_PERIODS_BYTES_MAX,
+ .periods_min = UX500_PLATFORM_PERIODS_MIN,
+ .periods_max = UX500_PLATFORM_PERIODS_MAX,
+};
+
+static void ux500_pcm_dma_hw_free(struct device *dev,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_dma_buffer *buf = runtime->dma_buffer_p;
+
+ if (runtime->dma_area == NULL)
+ return;
+
+ if (buf != &substream->dma_buffer) {
+ dma_free_coherent(buf->dev.dev, buf->bytes, buf->area,
+ buf->addr);
+ kfree(runtime->dma_buffer_p);
+ }
+
+ snd_pcm_set_runtime_buffer(substream, NULL);
+}
+
+static int ux500_pcm_open(struct snd_pcm_substream *substream)
+{
+ int stream_id = substream->pstr->stream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+ struct device *dev = dai->dev;
+ int ret;
+ struct ux500_msp_dma_params *dma_params;
+ u16 per_data_width, mem_data_width;
+ struct stedma40_chan_cfg *dma_cfg;
+
+ dev_dbg(dev, "%s: MSP %d (%s): Enter.\n", __func__, dai->id,
+ snd_pcm_stream_str(substream));
+
+ dev_dbg(dev, "%s: Set runtime hwparams.\n", __func__);
+ if (stream_id == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_soc_set_runtime_hwparams(substream,
+ &ux500_pcm_hw_playback);
+ else
+ snd_soc_set_runtime_hwparams(substream,
+ &ux500_pcm_hw_capture);
+
+ /* ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0) {
+ dev_err(dev, "%s: Error: snd_pcm_hw_constraints failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ dev_dbg(dev, "%s: Set hw-struct for %s.\n", __func__,
+ snd_pcm_stream_str(substream));
+ runtime->hw = (stream_id == SNDRV_PCM_STREAM_PLAYBACK) ?
+ ux500_pcm_hw_playback : ux500_pcm_hw_capture;
+
+ mem_data_width = STEDMA40_HALFWORD_WIDTH;
+
+ dma_params = snd_soc_dai_get_dma_data(dai, substream);
+ switch (dma_params->data_size) {
+ case 32:
+ per_data_width = STEDMA40_WORD_WIDTH;
+ break;
+ case 16:
+ per_data_width = STEDMA40_HALFWORD_WIDTH;
+ break;
+ case 8:
+ per_data_width = STEDMA40_BYTE_WIDTH;
+ break;
+ default:
+ per_data_width = STEDMA40_WORD_WIDTH;
+ dev_warn(rtd->platform->dev,
+ "%s: Unknown data-size (%d)! Assuming 32 bits.\n",
+ __func__, dma_params->data_size);
+ }
+
+ dma_cfg = dma_params->dma_cfg;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ dma_cfg->src_info.data_width = mem_data_width;
+ dma_cfg->dst_info.data_width = per_data_width;
+ } else {
+ dma_cfg->src_info.data_width = per_data_width;
+ dma_cfg->dst_info.data_width = mem_data_width;
+ }
+
+
+ ret = snd_dmaengine_pcm_open(substream, stedma40_filter, dma_cfg);
+ if (ret) {
+ dev_dbg(dai->dev,
+ "%s: ERROR: snd_dmaengine_pcm_open failed (%d)!\n",
+ __func__, ret);
+ return ret;
+ }
+
+ snd_dmaengine_pcm_set_data(substream, dma_cfg);
+
+ return 0;
+}
+
+static int ux500_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *dai = rtd->cpu_dai;
+
+ dev_dbg(dai->dev, "%s: Enter\n", __func__);
+
+ snd_dmaengine_pcm_close(substream);
+
+ return 0;
+}
+
+static int ux500_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_dma_buffer *buf = runtime->dma_buffer_p;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int ret = 0;
+ int size;
+
+ dev_dbg(rtd->platform->dev, "%s: Enter\n", __func__);
+
+ size = params_buffer_bytes(hw_params);
+
+ if (buf) {
+ if (buf->bytes >= size)
+ goto out;
+ ux500_pcm_dma_hw_free(NULL, substream);
+ }
+
+ if (substream->dma_buffer.area != NULL &&
+ substream->dma_buffer.bytes >= size) {
+ buf = &substream->dma_buffer;
+ } else {
+ buf = kmalloc(sizeof(struct snd_dma_buffer), GFP_KERNEL);
+ if (!buf)
+ goto nomem;
+
+ buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ buf->dev.dev = NULL;
+ buf->area = dma_alloc_coherent(NULL, size, &buf->addr,
+ GFP_KERNEL);
+ buf->bytes = size;
+ buf->private_data = NULL;
+
+ if (!buf->area)
+ goto free;
+ }
+ snd_pcm_set_runtime_buffer(substream, buf);
+ ret = 1;
+ out:
+ runtime->dma_bytes = size;
+ return ret;
+
+ free:
+ kfree(buf);
+ nomem:
+ return -ENOMEM;
+}
+
+static int ux500_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ dev_dbg(rtd->platform->dev, "%s: Enter\n", __func__);
+
+ ux500_pcm_dma_hw_free(NULL, substream);
+
+ return 0;
+}
+
+static int ux500_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ dev_dbg(rtd->platform->dev, "%s: Enter.\n", __func__);
+
+ return dma_mmap_coherent(NULL, vma, runtime->dma_area,
+ runtime->dma_addr, runtime->dma_bytes);
+}
+
+static struct snd_pcm_ops ux500_pcm_ops = {
+ .open = ux500_pcm_open,
+ .close = ux500_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = ux500_pcm_hw_params,
+ .hw_free = ux500_pcm_hw_free,
+ .trigger = snd_dmaengine_pcm_trigger,
+ .pointer = snd_dmaengine_pcm_pointer_no_residue,
+ .mmap = ux500_pcm_mmap
+};
+
+int ux500_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_pcm *pcm = rtd->pcm;
+
+ dev_dbg(rtd->platform->dev, "%s: Enter (id = '%s').\n", __func__,
+ pcm->id);
+
+ pcm->info_flags = 0;
+
+ return 0;
+}
+
+static struct snd_soc_platform_driver ux500_pcm_soc_drv = {
+ .ops = &ux500_pcm_ops,
+ .pcm_new = ux500_pcm_new,
+};
+
+static int __devexit ux500_pcm_drv_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = snd_soc_register_platform(&pdev->dev, &ux500_pcm_soc_drv);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "%s: ERROR: Failed to register platform '%s' (%d)!\n",
+ __func__, pdev->name, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit ux500_pcm_drv_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver ux500_pcm_driver = {
+ .driver = {
+ .name = "ux500-pcm",
+ .owner = THIS_MODULE,
+ },
+
+ .probe = ux500_pcm_drv_probe,
+ .remove = __devexit_p(ux500_pcm_drv_remove),
+};
+module_platform_driver(ux500_pcm_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/ux500/ux500_pcm.h b/sound/soc/ux500/ux500_pcm.h
new file mode 100644
index 000000000000..77ed44d371e9
--- /dev/null
+++ b/sound/soc/ux500/ux500_pcm.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) ST-Ericsson SA 2012
+ *
+ * Author: Ola Lilja <ola.o.lilja@stericsson.com>,
+ * Roger Nilsson <roger.xr.nilsson@stericsson.com>
+ * for ST-Ericsson.
+ *
+ * License terms:
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+#ifndef UX500_PCM_H
+#define UX500_PCM_H
+
+#include <asm/page.h>
+
+#include <linux/workqueue.h>
+
+#define UX500_PLATFORM_MIN_RATE_PLAYBACK 8000
+#define UX500_PLATFORM_MAX_RATE_PLAYBACK 48000
+#define UX500_PLATFORM_MIN_RATE_CAPTURE 8000
+#define UX500_PLATFORM_MAX_RATE_CAPTURE 48000
+
+#define UX500_PLATFORM_MIN_CHANNELS 1
+#define UX500_PLATFORM_MAX_CHANNELS 8
+
+#define UX500_PLATFORM_PERIODS_BYTES_MIN 128
+#define UX500_PLATFORM_PERIODS_BYTES_MAX (64 * PAGE_SIZE)
+#define UX500_PLATFORM_PERIODS_MIN 2
+#define UX500_PLATFORM_PERIODS_MAX 48
+#define UX500_PLATFORM_BUFFER_BYTES_MAX (2048 * PAGE_SIZE)
+
+#endif
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index 64aed432ae22..7da0d0aa72cb 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -485,7 +485,7 @@ static int __devinit snd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int ret;
- struct snd_card *card;
+ struct snd_card *card = NULL;
struct usb_device *device = interface_to_usbdev(intf);
ret = create_card(device, intf, &card);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index e6906901debb..0f647d22cb4a 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -414,7 +414,7 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
{
struct list_head *p;
struct snd_usb_endpoint *ep;
- int ret, is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+ int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock(&chip->mutex);
@@ -434,16 +434,6 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync",
ep_num);
- /* select the alt setting once so the endpoints become valid */
- ret = usb_set_interface(chip->dev, alts->desc.bInterfaceNumber,
- alts->desc.bAlternateSetting);
- if (ret < 0) {
- snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
- __func__, ret);
- ep = NULL;
- goto __exit_unlock;
- }
-
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (!ep)
goto __exit_unlock;
@@ -831,9 +821,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
if (++ep->use_count != 1)
return 0;
- if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
- return -EINVAL;
-
/* just to be sure */
deactivate_urbs(ep, 0, 1);
wait_clear_urbs(ep);
@@ -911,9 +898,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
if (snd_BUG_ON(ep->use_count == 0))
return;
- if (snd_BUG_ON(!test_bit(EP_FLAG_ACTIVATED, &ep->flags)))
- return;
-
if (--ep->use_count == 0) {
deactivate_urbs(ep, force, can_sleep);
ep->data_subs = NULL;
@@ -927,42 +911,6 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
}
/**
- * snd_usb_endpoint_activate: activate an snd_usb_endpoint
- *
- * @ep: the endpoint to activate
- *
- * If the endpoint is not currently in use, this functions will select the
- * correct alternate interface setting for the interface of this endpoint.
- *
- * In case of any active users, this functions does nothing.
- *
- * Returns an error if usb_set_interface() failed, 0 in all other
- * cases.
- */
-int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep)
-{
- if (ep->use_count != 0)
- return 0;
-
- if (!ep->chip->shutdown &&
- !test_and_set_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
- int ret;
-
- ret = usb_set_interface(ep->chip->dev, ep->iface, ep->alt_idx);
- if (ret < 0) {
- snd_printk(KERN_ERR "%s() usb_set_interface() failed, ret = %d\n",
- __func__, ret);
- clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
- return ret;
- }
-
- return 0;
- }
-
- return -EBUSY;
-}
-
-/**
* snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint
*
* @ep: the endpoint to deactivate
@@ -980,24 +928,15 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
if (!ep)
return -EINVAL;
+ deactivate_urbs(ep, 1, 1);
+ wait_clear_urbs(ep);
+
if (ep->use_count != 0)
return 0;
- if (!ep->chip->shutdown &&
- test_and_clear_bit(EP_FLAG_ACTIVATED, &ep->flags)) {
- int ret;
-
- ret = usb_set_interface(ep->chip->dev, ep->iface, 0);
- if (ret < 0) {
- snd_printk(KERN_ERR "%s(): usb_set_interface() failed, ret = %d\n",
- __func__, ret);
- return ret;
- }
+ clear_bit(EP_FLAG_ACTIVATED, &ep->flags);
- return 0;
- }
-
- return -EBUSY;
+ return 0;
}
/**
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 41f4b6911920..690000db0ec0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -42,6 +42,13 @@
extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
+struct std_mono_table {
+ unsigned int unitid, control, cmask;
+ int val_type;
+ const char *name;
+ snd_kcontrol_tlv_rw_t *tlv_callback;
+};
+
/* private_free callback */
static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
{
@@ -114,6 +121,25 @@ static int snd_create_std_mono_ctl(struct usb_mixer_interface *mixer,
}
/*
+ * Create a set of standard UAC controls from a table
+ */
+static int snd_create_std_mono_table(struct usb_mixer_interface *mixer,
+ struct std_mono_table *t)
+{
+ int err;
+
+ while (t->name != NULL) {
+ err = snd_create_std_mono_ctl(mixer, t->unitid, t->control,
+ t->cmask, t->val_type, t->name, t->tlv_callback);
+ if (err < 0)
+ return err;
+ t++;
+ }
+
+ return 0;
+}
+
+/*
* Sound Blaster remote control configuration
*
* format of remote control data:
@@ -916,61 +942,6 @@ static int snd_ftu_create_mixer(struct usb_mixer_interface *mixer)
return 0;
}
-
-/*
- * Create mixer for Electrix Ebox-44
- *
- * The mixer units from this device are corrupt, and even where they
- * are valid they presents mono controls as L and R channels of
- * stereo. So we create a good mixer in code.
- */
-
-static int snd_ebox44_create_mixer(struct usb_mixer_interface *mixer)
-{
- int err;
-
- err = snd_create_std_mono_ctl(mixer, 4, 1, 0x0, USB_MIXER_INV_BOOLEAN,
- "Headphone Playback Switch", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 4, 2, 0x1, USB_MIXER_S16,
- "Headphone A Mix Playback Volume", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 4, 2, 0x2, USB_MIXER_S16,
- "Headphone B Mix Playback Volume", NULL);
- if (err < 0)
- return err;
-
- err = snd_create_std_mono_ctl(mixer, 7, 1, 0x0, USB_MIXER_INV_BOOLEAN,
- "Output Playback Switch", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 7, 2, 0x1, USB_MIXER_S16,
- "Output A Playback Volume", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 7, 2, 0x2, USB_MIXER_S16,
- "Output B Playback Volume", NULL);
- if (err < 0)
- return err;
-
- err = snd_create_std_mono_ctl(mixer, 10, 1, 0x0, USB_MIXER_INV_BOOLEAN,
- "Input Capture Switch", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 10, 2, 0x1, USB_MIXER_S16,
- "Input A Capture Volume", NULL);
- if (err < 0)
- return err;
- err = snd_create_std_mono_ctl(mixer, 10, 2, 0x2, USB_MIXER_S16,
- "Input B Capture Volume", NULL);
- if (err < 0)
- return err;
-
- return 0;
-}
-
void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
unsigned char samplerate_id)
{
@@ -990,6 +961,81 @@ void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
}
}
+/*
+ * The mixer units for Ebox-44 are corrupt, and even where they
+ * are valid they presents mono controls as L and R channels of
+ * stereo. So we provide a good mixer here.
+ */
+struct std_mono_table ebox44_table[] = {
+ {
+ .unitid = 4,
+ .control = 1,
+ .cmask = 0x0,
+ .val_type = USB_MIXER_INV_BOOLEAN,
+ .name = "Headphone Playback Switch"
+ },
+ {
+ .unitid = 4,
+ .control = 2,
+ .cmask = 0x1,
+ .val_type = USB_MIXER_S16,
+ .name = "Headphone A Mix Playback Volume"
+ },
+ {
+ .unitid = 4,
+ .control = 2,
+ .cmask = 0x2,
+ .val_type = USB_MIXER_S16,
+ .name = "Headphone B Mix Playback Volume"
+ },
+
+ {
+ .unitid = 7,
+ .control = 1,
+ .cmask = 0x0,
+ .val_type = USB_MIXER_INV_BOOLEAN,
+ .name = "Output Playback Switch"
+ },
+ {
+ .unitid = 7,
+ .control = 2,
+ .cmask = 0x1,
+ .val_type = USB_MIXER_S16,
+ .name = "Output A Playback Volume"
+ },
+ {
+ .unitid = 7,
+ .control = 2,
+ .cmask = 0x2,
+ .val_type = USB_MIXER_S16,
+ .name = "Output B Playback Volume"
+ },
+
+ {
+ .unitid = 10,
+ .control = 1,
+ .cmask = 0x0,
+ .val_type = USB_MIXER_INV_BOOLEAN,
+ .name = "Input Capture Switch"
+ },
+ {
+ .unitid = 10,
+ .control = 2,
+ .cmask = 0x1,
+ .val_type = USB_MIXER_S16,
+ .name = "Input A Capture Volume"
+ },
+ {
+ .unitid = 10,
+ .control = 2,
+ .cmask = 0x2,
+ .val_type = USB_MIXER_S16,
+ .name = "Input B Capture Volume"
+ },
+
+ {}
+};
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1035,7 +1081,8 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
break;
case USB_ID(0x200c, 0x1018): /* Electrix Ebox-44 */
- err = snd_ebox44_create_mixer(mixer);
+ /* detection is disabled in mixer_maps.c */
+ err = snd_create_std_mono_table(mixer, ebox44_table);
break;
}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 54607f8c4f66..a1298f379428 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -261,19 +261,6 @@ static void stop_endpoints(struct snd_usb_substream *subs,
force, can_sleep, wait);
}
-static int activate_endpoints(struct snd_usb_substream *subs)
-{
- if (subs->sync_endpoint) {
- int ret;
-
- ret = snd_usb_endpoint_activate(subs->sync_endpoint);
- if (ret < 0)
- return ret;
- }
-
- return snd_usb_endpoint_activate(subs->data_endpoint);
-}
-
static int deactivate_endpoints(struct snd_usb_substream *subs)
{
int reta, retb;
@@ -314,6 +301,33 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
if (fmt == subs->cur_audiofmt)
return 0;
+ /* close the old interface */
+ if (subs->interface >= 0 && subs->interface != fmt->iface) {
+ err = usb_set_interface(subs->dev, subs->interface, 0);
+ if (err < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: return to setting 0 failed (%d)\n",
+ dev->devnum, fmt->iface, fmt->altsetting, err);
+ return -EIO;
+ }
+ subs->interface = -1;
+ subs->altset_idx = 0;
+ }
+
+ /* set interface */
+ if (subs->interface != fmt->iface ||
+ subs->altset_idx != fmt->altset_idx) {
+ err = usb_set_interface(dev, fmt->iface, fmt->altsetting);
+ if (err < 0) {
+ snd_printk(KERN_ERR "%d:%d:%d: usb_set_interface failed (%d)\n",
+ dev->devnum, fmt->iface, fmt->altsetting, err);
+ return -EIO;
+ }
+ snd_printdd(KERN_INFO "setting usb interface %d:%d\n",
+ fmt->iface, fmt->altsetting);
+ subs->interface = fmt->iface;
+ subs->altset_idx = fmt->altset_idx;
+ }
+
subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
alts, fmt->endpoint, subs->direction,
SND_USB_ENDPOINT_TYPE_DATA);
@@ -387,7 +401,7 @@ add_sync_ep:
subs->data_endpoint->sync_master = subs->sync_endpoint;
}
- if ((err = snd_usb_init_pitch(subs->stream->chip, subs->interface, alts, fmt)) < 0)
+ if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
return err;
subs->cur_audiofmt = fmt;
@@ -450,7 +464,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
struct usb_interface *iface;
iface = usb_ifnum_to_if(subs->dev, fmt->iface);
alts = &iface->altsetting[fmt->altset_idx];
- ret = snd_usb_init_sample_rate(subs->stream->chip, subs->interface, alts, fmt, rate);
+ ret = snd_usb_init_sample_rate(subs->stream->chip, fmt->iface, alts, fmt, rate);
if (ret < 0)
return ret;
subs->cur_rate = rate;
@@ -460,12 +474,6 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
mutex_lock(&subs->stream->chip->shutdown_mutex);
/* format changed */
stop_endpoints(subs, 0, 0, 0);
- deactivate_endpoints(subs);
-
- ret = activate_endpoints(subs);
- if (ret < 0)
- goto unlock;
-
ret = snd_usb_endpoint_set_params(subs->data_endpoint, hw_params, fmt,
subs->sync_endpoint);
if (ret < 0)
@@ -500,6 +508,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
subs->period_bytes = 0;
mutex_lock(&subs->stream->chip->shutdown_mutex);
stop_endpoints(subs, 0, 1, 1);
+ deactivate_endpoints(subs);
mutex_unlock(&subs->stream->chip->shutdown_mutex);
return snd_pcm_lib_free_vmalloc_buffer(substream);
}
@@ -938,16 +947,20 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction)
static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
{
- int ret;
struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
struct snd_usb_substream *subs = &as->substream[direction];
stop_endpoints(subs, 0, 0, 0);
- ret = deactivate_endpoints(subs);
+
+ if (!as->chip->shutdown && subs->interface >= 0) {
+ usb_set_interface(subs->dev, subs->interface, 0);
+ subs->interface = -1;
+ }
+
subs->pcm_substream = NULL;
snd_usb_autosuspend(subs->stream->chip);
- return ret;
+ return 0;
}
/* Since a URB can handle only a single linear buffer, we must use double
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 3d69aa9ff51e..46c2f6b7b123 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -250,8 +250,12 @@ endef
all_objs := $(sort $(ALL_OBJS))
all_deps := $(all_objs:%.o=.%.d)
+# let .d file also depends on the source and header files
define check_deps
- $(CC) -M $(CFLAGS) $< > $@;
+ @set -e; $(RM) $@; \
+ $(CC) -M $(CFLAGS) $< > $@.$$$$; \
+ sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
+ $(RM) $@.$$$$
endef
$(gui_deps): ks_version.h
@@ -270,11 +274,13 @@ endif
tags: force
$(RM) tags
- find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px
+ find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
+ --regex-c++='/_PE\(([^,)]*).*/PEVENT_ERRNO__\1/'
TAGS: force
$(RM) TAGS
- find . -name '*.[ch]' | xargs etags
+ find . -name '*.[ch]' | xargs etags \
+ --regex='/_PE(\([^,)]*\).*/PEVENT_ERRNO__\1/'
define do_install
$(print_install) \
@@ -290,7 +296,7 @@ install_lib: all_cmd install_plugins install_python
install: install_lib
clean:
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES).*.d
+ $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d
$(RM) tags TAGS
endif # skip-makefile
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 554828219c33..5f34aa371b56 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -467,8 +467,10 @@ int pevent_register_function(struct pevent *pevent, char *func,
item->mod = NULL;
item->addr = addr;
- pevent->funclist = item;
+ if (!item->func || (mod && !item->mod))
+ die("malloc func");
+ pevent->funclist = item;
pevent->func_count++;
return 0;
@@ -511,12 +513,12 @@ struct printk_list {
static int printk_cmp(const void *a, const void *b)
{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
+ const struct printk_map *pa = a;
+ const struct printk_map *pb = b;
- if (fa->addr < fb->addr)
+ if (pa->addr < pb->addr)
return -1;
- if (fa->addr > fb->addr)
+ if (pa->addr > pb->addr)
return 1;
return 0;
@@ -583,10 +585,13 @@ int pevent_register_print_string(struct pevent *pevent, char *fmt,
item = malloc_or_die(sizeof(*item));
item->next = pevent->printklist;
- pevent->printklist = item;
item->printk = strdup(fmt);
item->addr = addr;
+ if (!item->printk)
+ die("malloc fmt");
+
+ pevent->printklist = item;
pevent->printk_count++;
return 0;
@@ -616,7 +621,9 @@ static struct event_format *alloc_event(void)
{
struct event_format *event;
- event = malloc_or_die(sizeof(*event));
+ event = malloc(sizeof(*event));
+ if (!event)
+ return NULL;
memset(event, 0, sizeof(*event));
return event;
@@ -626,12 +633,8 @@ static void add_event(struct pevent *pevent, struct event_format *event)
{
int i;
- if (!pevent->events)
- pevent->events = malloc_or_die(sizeof(event));
- else
- pevent->events =
- realloc(pevent->events, sizeof(event) *
- (pevent->nr_events + 1));
+ pevent->events = realloc(pevent->events, sizeof(event) *
+ (pevent->nr_events + 1));
if (!pevent->events)
die("Can not allocate events");
@@ -697,6 +700,10 @@ static void free_arg(struct print_arg *arg)
free_arg(arg->symbol.field);
free_flag_sym(arg->symbol.symbols);
break;
+ case PRINT_HEX:
+ free_arg(arg->hex.field);
+ free_arg(arg->hex.size);
+ break;
case PRINT_TYPE:
free(arg->typecast.type);
free_arg(arg->typecast.item);
@@ -775,6 +782,25 @@ int pevent_peek_char(void)
return __peek_char();
}
+static int extend_token(char **tok, char *buf, int size)
+{
+ char *newtok = realloc(*tok, size);
+
+ if (!newtok) {
+ free(*tok);
+ *tok = NULL;
+ return -1;
+ }
+
+ if (!*tok)
+ strcpy(newtok, buf);
+ else
+ strcat(newtok, buf);
+ *tok = newtok;
+
+ return 0;
+}
+
static enum event_type force_token(const char *str, char **tok);
static enum event_type __read_token(char **tok)
@@ -859,17 +885,10 @@ static enum event_type __read_token(char **tok)
do {
if (i == (BUFSIZ - 1)) {
buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + BUFSIZ);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
+ tok_size += BUFSIZ;
- if (!*tok)
+ if (extend_token(tok, buf, tok_size) < 0)
return EVENT_NONE;
- tok_size += BUFSIZ;
i = 0;
}
last_ch = ch;
@@ -908,17 +927,10 @@ static enum event_type __read_token(char **tok)
while (get_type(__peek_char()) == type) {
if (i == (BUFSIZ - 1)) {
buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + BUFSIZ);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
+ tok_size += BUFSIZ;
- if (!*tok)
+ if (extend_token(tok, buf, tok_size) < 0)
return EVENT_NONE;
- tok_size += BUFSIZ;
i = 0;
}
ch = __read_char();
@@ -927,14 +939,7 @@ static enum event_type __read_token(char **tok)
out:
buf[i] = 0;
- if (*tok) {
- *tok = realloc(*tok, tok_size + i);
- if (!*tok)
- return EVENT_NONE;
- strcat(*tok, buf);
- } else
- *tok = strdup(buf);
- if (!*tok)
+ if (extend_token(tok, buf, tok_size + i + 1) < 0)
return EVENT_NONE;
if (type == EVENT_ITEM) {
@@ -1255,9 +1260,15 @@ static int event_read_fields(struct event_format *event, struct format_field **f
field->flags |= FIELD_IS_POINTER;
if (field->type) {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(last_token) + 2);
+ char *new_type;
+ new_type = realloc(field->type,
+ strlen(field->type) +
+ strlen(last_token) + 2);
+ if (!new_type) {
+ free(last_token);
+ goto fail;
+ }
+ field->type = new_type;
strcat(field->type, " ");
strcat(field->type, last_token);
free(last_token);
@@ -1282,6 +1293,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
if (strcmp(token, "[") == 0) {
enum event_type last_type = type;
char *brackets = token;
+ char *new_brackets;
int len;
field->flags |= FIELD_IS_ARRAY;
@@ -1301,9 +1313,14 @@ static int event_read_fields(struct event_format *event, struct format_field **f
len = 1;
last_type = type;
- brackets = realloc(brackets,
- strlen(brackets) +
- strlen(token) + len);
+ new_brackets = realloc(brackets,
+ strlen(brackets) +
+ strlen(token) + len);
+ if (!new_brackets) {
+ free(brackets);
+ goto fail;
+ }
+ brackets = new_brackets;
if (len == 2)
strcat(brackets, " ");
strcat(brackets, token);
@@ -1319,7 +1336,12 @@ static int event_read_fields(struct event_format *event, struct format_field **f
free_token(token);
- brackets = realloc(brackets, strlen(brackets) + 2);
+ new_brackets = realloc(brackets, strlen(brackets) + 2);
+ if (!new_brackets) {
+ free(brackets);
+ goto fail;
+ }
+ brackets = new_brackets;
strcat(brackets, "]");
/* add brackets to type */
@@ -1330,10 +1352,16 @@ static int event_read_fields(struct event_format *event, struct format_field **f
* the format: type [] item;
*/
if (type == EVENT_ITEM) {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(field->name) +
- strlen(brackets) + 2);
+ char *new_type;
+ new_type = realloc(field->type,
+ strlen(field->type) +
+ strlen(field->name) +
+ strlen(brackets) + 2);
+ if (!new_type) {
+ free(brackets);
+ goto fail;
+ }
+ field->type = new_type;
strcat(field->type, " ");
strcat(field->type, field->name);
free_token(field->name);
@@ -1341,9 +1369,15 @@ static int event_read_fields(struct event_format *event, struct format_field **f
field->name = token;
type = read_token(&token);
} else {
- field->type = realloc(field->type,
- strlen(field->type) +
- strlen(brackets) + 1);
+ char *new_type;
+ new_type = realloc(field->type,
+ strlen(field->type) +
+ strlen(brackets) + 1);
+ if (!new_type) {
+ free(brackets);
+ goto fail;
+ }
+ field->type = new_type;
strcat(field->type, brackets);
}
free(brackets);
@@ -1726,10 +1760,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok)
/* could just be a type pointer */
if ((strcmp(arg->op.op, "*") == 0) &&
type == EVENT_DELIM && (strcmp(token, ")") == 0)) {
+ char *new_atom;
+
if (left->type != PRINT_ATOM)
die("bad pointer type");
- left->atom.atom = realloc(left->atom.atom,
+ new_atom = realloc(left->atom.atom,
strlen(left->atom.atom) + 3);
+ if (!new_atom)
+ goto out_free;
+
+ left->atom.atom = new_atom;
strcat(left->atom.atom, " *");
free(arg->op.op);
*arg = *left;
@@ -2146,6 +2186,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
if (value == NULL)
goto out_free;
field->value = strdup(value);
+ if (field->value == NULL)
+ goto out_free;
free_arg(arg);
arg = alloc_arg();
@@ -2159,6 +2201,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char **
if (value == NULL)
goto out_free;
field->str = strdup(value);
+ if (field->str == NULL)
+ goto out_free;
free_arg(arg);
arg = NULL;
@@ -2260,6 +2304,45 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
}
static enum event_type
+process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+{
+ struct print_arg *field;
+ enum event_type type;
+ char *token;
+
+ memset(arg, 0, sizeof(*arg));
+ arg->type = PRINT_HEX;
+
+ field = alloc_arg();
+ type = process_arg(event, field, &token);
+
+ if (test_type_token(type, token, EVENT_DELIM, ","))
+ goto out_free;
+
+ arg->hex.field = field;
+
+ free_token(token);
+
+ field = alloc_arg();
+ type = process_arg(event, field, &token);
+
+ if (test_type_token(type, token, EVENT_DELIM, ")"))
+ goto out_free;
+
+ arg->hex.size = field;
+
+ free_token(token);
+ type = read_token_item(tok);
+ return type;
+
+ out_free:
+ free_arg(field);
+ free_token(token);
+ *tok = NULL;
+ return EVENT_ERROR;
+}
+
+static enum event_type
process_dynamic_array(struct event_format *event, struct print_arg *arg, char **tok)
{
struct format_field *field;
@@ -2488,6 +2571,10 @@ process_function(struct event_format *event, struct print_arg *arg,
is_symbolic_field = 1;
return process_symbols(event, arg, tok);
}
+ if (strcmp(token, "__print_hex") == 0) {
+ free_token(token);
+ return process_hex(event, arg, tok);
+ }
if (strcmp(token, "__get_str") == 0) {
free_token(token);
return process_str(event, arg, tok);
@@ -2541,7 +2628,16 @@ process_arg_token(struct event_format *event, struct print_arg *arg,
}
/* atoms can be more than one token long */
while (type == EVENT_ITEM) {
- atom = realloc(atom, strlen(atom) + strlen(token) + 2);
+ char *new_atom;
+ new_atom = realloc(atom,
+ strlen(atom) + strlen(token) + 2);
+ if (!new_atom) {
+ free(atom);
+ *tok = NULL;
+ free_token(token);
+ return EVENT_ERROR;
+ }
+ atom = new_atom;
strcat(atom, " ");
strcat(atom, token);
free_token(token);
@@ -2835,7 +2931,7 @@ static int get_common_info(struct pevent *pevent,
event = pevent->events[0];
field = pevent_find_common_field(event, type);
if (!field)
- die("field '%s' not found", type);
+ return -1;
*offset = field->offset;
*size = field->size;
@@ -2886,15 +2982,16 @@ static int parse_common_flags(struct pevent *pevent, void *data)
static int parse_common_lock_depth(struct pevent *pevent, void *data)
{
- int ret;
-
- ret = __parse_common(pevent, data,
- &pevent->ld_size, &pevent->ld_offset,
- "common_lock_depth");
- if (ret < 0)
- return -1;
+ return __parse_common(pevent, data,
+ &pevent->ld_size, &pevent->ld_offset,
+ "common_lock_depth");
+}
- return ret;
+static int parse_common_migrate_disable(struct pevent *pevent, void *data)
+{
+ return __parse_common(pevent, data,
+ &pevent->ld_size, &pevent->ld_offset,
+ "common_migrate_disable");
}
static int events_id_cmp(const void *a, const void *b);
@@ -2995,6 +3092,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
break;
case PRINT_FLAGS:
case PRINT_SYMBOL:
+ case PRINT_HEX:
break;
case PRINT_TYPE:
val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3214,11 +3312,13 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
{
struct pevent *pevent = event->pevent;
struct print_flag_sym *flag;
+ struct format_field *field;
unsigned long long val, fval;
unsigned long addr;
char *str;
+ unsigned char *hex;
int print;
- int len;
+ int i, len;
switch (arg->type) {
case PRINT_NULL:
@@ -3228,27 +3328,29 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
print_str_to_seq(s, format, len_arg, arg->atom.atom);
return;
case PRINT_FIELD:
- if (!arg->field.field) {
- arg->field.field = pevent_find_any_field(event, arg->field.name);
- if (!arg->field.field)
+ field = arg->field.field;
+ if (!field) {
+ field = pevent_find_any_field(event, arg->field.name);
+ if (!field)
die("field %s not found", arg->field.name);
+ arg->field.field = field;
}
/* Zero sized fields, mean the rest of the data */
- len = arg->field.field->size ? : size - arg->field.field->offset;
+ len = field->size ? : size - field->offset;
/*
* Some events pass in pointers. If this is not an array
* and the size is the same as long_size, assume that it
* is a pointer.
*/
- if (!(arg->field.field->flags & FIELD_IS_ARRAY) &&
- arg->field.field->size == pevent->long_size) {
- addr = *(unsigned long *)(data + arg->field.field->offset);
+ if (!(field->flags & FIELD_IS_ARRAY) &&
+ field->size == pevent->long_size) {
+ addr = *(unsigned long *)(data + field->offset);
trace_seq_printf(s, "%lx", addr);
break;
}
str = malloc_or_die(len + 1);
- memcpy(str, data + arg->field.field->offset, len);
+ memcpy(str, data + field->offset, len);
str[len] = 0;
print_str_to_seq(s, format, len_arg, str);
free(str);
@@ -3281,6 +3383,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
}
break;
+ case PRINT_HEX:
+ field = arg->hex.field->field.field;
+ if (!field) {
+ str = arg->hex.field->field.name;
+ field = pevent_find_any_field(event, str);
+ if (!field)
+ die("field %s not found", str);
+ arg->hex.field->field.field = field;
+ }
+ hex = data + field->offset;
+ len = eval_num_arg(data, size, event, arg->hex.size);
+ for (i = 0; i < len; i++) {
+ if (i)
+ trace_seq_putc(s, ' ');
+ trace_seq_printf(s, "%02x", hex[i]);
+ }
+ break;
case PRINT_TYPE:
break;
@@ -3299,7 +3418,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
break;
}
case PRINT_BSTRING:
- trace_seq_printf(s, format, arg->string.string);
+ print_str_to_seq(s, format, len_arg, arg->string.string);
break;
case PRINT_OP:
/*
@@ -3363,6 +3482,10 @@ process_defined_func(struct trace_seq *s, void *data, int size,
string = malloc_or_die(sizeof(*string));
string->next = strings;
string->str = strdup(str.buffer);
+ if (!string->str)
+ die("malloc str");
+
+ args[i] = (unsigned long long)string->str;
strings = string;
trace_seq_destroy(&str);
break;
@@ -3400,6 +3523,7 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
unsigned long long ip, val;
char *ptr;
void *bptr;
+ int vsize;
field = pevent->bprint_buf_field;
ip_field = pevent->bprint_ip_field;
@@ -3448,6 +3572,8 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
goto process_again;
case '0' ... '9':
goto process_again;
+ case '.':
+ goto process_again;
case 'p':
ls = 1;
/* fall through */
@@ -3455,23 +3581,30 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
case 'u':
case 'x':
case 'i':
- /* the pointers are always 4 bytes aligned */
- bptr = (void *)(((unsigned long)bptr + 3) &
- ~3);
switch (ls) {
case 0:
- ls = 4;
+ vsize = 4;
break;
case 1:
- ls = pevent->long_size;
+ vsize = pevent->long_size;
break;
case 2:
- ls = 8;
+ vsize = 8;
+ break;
default:
+ vsize = ls; /* ? */
break;
}
- val = pevent_read_number(pevent, bptr, ls);
- bptr += ls;
+ /* fall through */
+ case '*':
+ if (*ptr == '*')
+ vsize = 4;
+
+ /* the pointers are always 4 bytes aligned */
+ bptr = (void *)(((unsigned long)bptr + 3) &
+ ~3);
+ val = pevent_read_number(pevent, bptr, vsize);
+ bptr += vsize;
arg = alloc_arg();
arg->next = NULL;
arg->type = PRINT_ATOM;
@@ -3479,12 +3612,21 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc
sprintf(arg->atom.atom, "%lld", val);
*next = arg;
next = &arg->next;
+ /*
+ * The '*' case means that an arg is used as the length.
+ * We need to continue to figure out for what.
+ */
+ if (*ptr == '*')
+ goto process_again;
+
break;
case 's':
arg = alloc_arg();
arg->next = NULL;
arg->type = PRINT_BSTRING;
arg->string.string = strdup(bptr);
+ if (!arg->string.string)
+ break;
bptr += strlen(bptr) + 1;
*next = arg;
next = &arg->next;
@@ -3589,6 +3731,16 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
}
+static int is_printable_array(char *p, unsigned int len)
+{
+ unsigned int i;
+
+ for (i = 0; i < len && p[i]; i++)
+ if (!isprint(p[i]))
+ return 0;
+ return 1;
+}
+
static void print_event_fields(struct trace_seq *s, void *data, int size,
struct event_format *event)
{
@@ -3608,7 +3760,8 @@ static void print_event_fields(struct trace_seq *s, void *data, int size,
len = offset >> 16;
offset &= 0xffff;
}
- if (field->flags & FIELD_IS_STRING) {
+ if (field->flags & FIELD_IS_STRING &&
+ is_printable_array(data + offset, len)) {
trace_seq_printf(s, "%s", (char *)data + offset);
} else {
trace_seq_puts(s, "ARRAY[");
@@ -3619,6 +3772,7 @@ static void print_event_fields(struct trace_seq *s, void *data, int size,
*((unsigned char *)data + offset + i));
}
trace_seq_putc(s, ']');
+ field->flags &= ~FIELD_IS_STRING;
}
} else {
val = pevent_read_number(event->pevent, data + field->offset,
@@ -3758,6 +3912,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
} else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
print_mac_arg(s, *(ptr+1), data, size, event, arg);
ptr++;
+ arg = arg->next;
break;
}
@@ -3794,14 +3949,15 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
break;
}
}
- if (pevent->long_size == 8 && ls) {
+ if (pevent->long_size == 8 && ls &&
+ sizeof(long) != 8) {
char *p;
ls = 2;
/* make %l into %ll */
p = strchr(format, 'l');
if (p)
- memmove(p, p+1, strlen(p)+1);
+ memmove(p+1, p, strlen(p)+1);
else if (strcmp(format, "%p") == 0)
strcpy(format, "0x%llx");
}
@@ -3878,8 +4034,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
* pevent_data_lat_fmt - parse the data for the latency format
* @pevent: a handle to the pevent
* @s: the trace_seq to write to
- * @data: the raw data to read from
- * @size: currently unused.
+ * @record: the record to read from
*
* This parses out the Latency format (interrupts disabled,
* need rescheduling, in hard/soft interrupt, preempt count
@@ -3889,10 +4044,13 @@ void pevent_data_lat_fmt(struct pevent *pevent,
struct trace_seq *s, struct pevent_record *record)
{
static int check_lock_depth = 1;
+ static int check_migrate_disable = 1;
static int lock_depth_exists;
+ static int migrate_disable_exists;
unsigned int lat_flags;
unsigned int pc;
int lock_depth;
+ int migrate_disable;
int hardirq;
int softirq;
void *data = record->data;
@@ -3900,18 +4058,26 @@ void pevent_data_lat_fmt(struct pevent *pevent,
lat_flags = parse_common_flags(pevent, data);
pc = parse_common_pc(pevent, data);
/* lock_depth may not always exist */
- if (check_lock_depth) {
- struct format_field *field;
- struct event_format *event;
-
- check_lock_depth = 0;
- event = pevent->events[0];
- field = pevent_find_common_field(event, "common_lock_depth");
- if (field)
- lock_depth_exists = 1;
- }
if (lock_depth_exists)
lock_depth = parse_common_lock_depth(pevent, data);
+ else if (check_lock_depth) {
+ lock_depth = parse_common_lock_depth(pevent, data);
+ if (lock_depth < 0)
+ check_lock_depth = 0;
+ else
+ lock_depth_exists = 1;
+ }
+
+ /* migrate_disable may not always exist */
+ if (migrate_disable_exists)
+ migrate_disable = parse_common_migrate_disable(pevent, data);
+ else if (check_migrate_disable) {
+ migrate_disable = parse_common_migrate_disable(pevent, data);
+ if (migrate_disable < 0)
+ check_migrate_disable = 0;
+ else
+ migrate_disable_exists = 1;
+ }
hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
@@ -3930,6 +4096,13 @@ void pevent_data_lat_fmt(struct pevent *pevent,
else
trace_seq_putc(s, '.');
+ if (migrate_disable_exists) {
+ if (migrate_disable < 0)
+ trace_seq_putc(s, '.');
+ else
+ trace_seq_printf(s, "%d", migrate_disable);
+ }
+
if (lock_depth_exists) {
if (lock_depth < 0)
trace_seq_putc(s, '.');
@@ -3996,10 +4169,7 @@ const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid)
* pevent_data_comm_from_pid - parse the data into the print format
* @s: the trace_seq to write to
* @event: the handle to the event
- * @cpu: the cpu the event was recorded on
- * @data: the raw data
- * @size: the size of the raw data
- * @nsecs: the timestamp of the event
+ * @record: the record to read from
*
* This parses the raw @data using the given @event information and
* writes the print format into the trace_seq.
@@ -4279,6 +4449,13 @@ static void print_args(struct print_arg *args)
trace_seq_destroy(&s);
printf(")");
break;
+ case PRINT_HEX:
+ printf("__print_hex(");
+ print_args(args->hex.field);
+ printf(", ");
+ print_args(args->hex.size);
+ printf(")");
+ break;
case PRINT_STRING:
case PRINT_BSTRING:
printf("__get_str(%s)", args->string.string);
@@ -4541,6 +4718,8 @@ int pevent_parse_event(struct pevent *pevent,
die("failed to read event id");
event->system = strdup(sys);
+ if (!event->system)
+ die("failed to allocate system");
/* Add pevent to event so that it can be referenced */
event->pevent = pevent;
@@ -4582,6 +4761,11 @@ int pevent_parse_event(struct pevent *pevent,
list = &arg->next;
arg->type = PRINT_FIELD;
arg->field.name = strdup(field->name);
+ if (!arg->field.name) {
+ do_warning("failed to allocate field name");
+ event->flags |= EVENT_FL_FAILED;
+ return -1;
+ }
arg->field.field = field;
}
return 0;
@@ -4753,7 +4937,7 @@ int pevent_get_any_field_val(struct trace_seq *s, struct event_format *event,
* @record: The record with the field name.
* @err: print default error if failed.
*
- * Returns: 0 on success, -1 field not fould, or 1 if buffer is full.
+ * Returns: 0 on success, -1 field not found, or 1 if buffer is full.
*/
int pevent_print_num_field(struct trace_seq *s, const char *fmt,
struct event_format *event, const char *name,
@@ -4795,11 +4979,12 @@ static void free_func_handle(struct pevent_function_handler *func)
* pevent_register_print_function - register a helper function
* @pevent: the handle to the pevent
* @func: the function to process the helper function
+ * @ret_type: the return type of the helper function
* @name: the name of the helper function
* @parameters: A list of enum pevent_func_arg_type
*
* Some events may have helper functions in the print format arguments.
- * This allows a plugin to dynmically create a way to process one
+ * This allows a plugin to dynamically create a way to process one
* of these functions.
*
* The @parameters is a variable list of pevent_func_arg_type enums that
@@ -4870,12 +5055,13 @@ int pevent_register_print_function(struct pevent *pevent,
}
/**
- * pevent_register_event_handle - register a way to parse an event
+ * pevent_register_event_handler - register a way to parse an event
* @pevent: the handle to the pevent
* @id: the id of the event to register
* @sys_name: the system name the event belongs to
* @event_name: the name of the event
* @func: the function to call to parse the event information
+ * @context: the data to be passed to @func
*
* This function allows a developer to override the parsing of
* a given event. If for some reason the default print format
@@ -4925,6 +5111,11 @@ int pevent_register_event_handler(struct pevent *pevent,
if (sys_name)
handle->sys_name = strdup(sys_name);
+ if ((event_name && !handle->event_name) ||
+ (sys_name && !handle->sys_name)) {
+ die("Failed to allocate event/sys name");
+ }
+
handle->func = func;
handle->next = pevent->handlers;
pevent->handlers = handle;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index ac997bc7b592..5772ad8cb386 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -226,6 +226,11 @@ struct print_arg_symbol {
struct print_flag_sym *symbols;
};
+struct print_arg_hex {
+ struct print_arg *field;
+ struct print_arg *size;
+};
+
struct print_arg_dynarray {
struct format_field *field;
struct print_arg *index;
@@ -253,6 +258,7 @@ enum print_arg_type {
PRINT_FIELD,
PRINT_FLAGS,
PRINT_SYMBOL,
+ PRINT_HEX,
PRINT_TYPE,
PRINT_STRING,
PRINT_BSTRING,
@@ -270,6 +276,7 @@ struct print_arg {
struct print_arg_typecast typecast;
struct print_arg_flags flags;
struct print_arg_symbol symbol;
+ struct print_arg_hex hex;
struct print_arg_func func;
struct print_arg_string string;
struct print_arg_op op;
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
index dfcfe2c131de..ad17855528f9 100644
--- a/tools/lib/traceevent/parse-filter.c
+++ b/tools/lib/traceevent/parse-filter.c
@@ -96,7 +96,7 @@ static enum event_type read_token(char **tok)
(strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
pevent_peek_char() == '~') {
/* append it */
- *tok = malloc(3);
+ *tok = malloc_or_die(3);
sprintf(*tok, "%c%c", *token, '~');
free_token(token);
/* Now remove the '~' from the buffer */
@@ -148,17 +148,11 @@ add_filter_type(struct event_filter *filter, int id)
if (filter_type)
return filter_type;
- if (!filter->filters)
- filter->event_filters =
- malloc_or_die(sizeof(*filter->event_filters));
- else {
- filter->event_filters =
- realloc(filter->event_filters,
- sizeof(*filter->event_filters) *
- (filter->filters + 1));
- if (!filter->event_filters)
- die("Could not allocate filter");
- }
+ filter->event_filters = realloc(filter->event_filters,
+ sizeof(*filter->event_filters) *
+ (filter->filters + 1));
+ if (!filter->event_filters)
+ die("Could not allocate filter");
for (i = 0; i < filter->filters; i++) {
if (filter->event_filters[i].event_id > id)
@@ -1480,7 +1474,7 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
{
struct filter_type *filter_type;
int count = 0;
- int *ids;
+ int *ids = NULL;
int i;
if (!filter->filters)
@@ -1504,10 +1498,8 @@ void pevent_filter_clear_trivial(struct event_filter *filter,
default:
break;
}
- if (count)
- ids = realloc(ids, sizeof(*ids) * (count + 1));
- else
- ids = malloc(sizeof(*ids));
+
+ ids = realloc(ids, sizeof(*ids) * (count + 1));
if (!ids)
die("Can't allocate ids");
ids[count++] = filter_type->event_id;
@@ -1710,18 +1702,43 @@ static int test_num(struct event_format *event,
static const char *get_field_str(struct filter_arg *arg, struct pevent_record *record)
{
- const char *val = record->data + arg->str.field->offset;
+ struct event_format *event;
+ struct pevent *pevent;
+ unsigned long long addr;
+ const char *val = NULL;
+ char hex[64];
- /*
- * We need to copy the data since we can't be sure the field
- * is null terminated.
- */
- if (*(val + arg->str.field->size - 1)) {
- /* copy it */
- memcpy(arg->str.buffer, val, arg->str.field->size);
- /* the buffer is already NULL terminated */
- val = arg->str.buffer;
+ /* If the field is not a string convert it */
+ if (arg->str.field->flags & FIELD_IS_STRING) {
+ val = record->data + arg->str.field->offset;
+
+ /*
+ * We need to copy the data since we can't be sure the field
+ * is null terminated.
+ */
+ if (*(val + arg->str.field->size - 1)) {
+ /* copy it */
+ memcpy(arg->str.buffer, val, arg->str.field->size);
+ /* the buffer is already NULL terminated */
+ val = arg->str.buffer;
+ }
+
+ } else {
+ event = arg->str.field->event;
+ pevent = event->pevent;
+ addr = get_value(event, arg->str.field, record);
+
+ if (arg->str.field->flags & (FIELD_IS_POINTER | FIELD_IS_LONG))
+ /* convert to a kernel symbol */
+ val = pevent_find_function(pevent, addr);
+
+ if (val == NULL) {
+ /* just use the hex of the string name */
+ snprintf(hex, 64, "0x%llx", addr);
+ val = hex;
+ }
}
+
return val;
}
@@ -2001,11 +2018,13 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
char *lstr;
char *rstr;
char *op;
- char *str;
+ char *str = NULL;
int len;
lstr = arg_to_str(filter, arg->exp.left);
rstr = arg_to_str(filter, arg->exp.right);
+ if (!lstr || !rstr)
+ goto out;
switch (arg->exp.type) {
case FILTER_EXP_ADD:
@@ -2045,6 +2064,7 @@ static char *exp_to_str(struct event_filter *filter, struct filter_arg *arg)
len = strlen(op) + strlen(lstr) + strlen(rstr) + 4;
str = malloc_or_die(len);
snprintf(str, len, "%s %s %s", lstr, op, rstr);
+out:
free(lstr);
free(rstr);
@@ -2061,6 +2081,8 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
lstr = arg_to_str(filter, arg->num.left);
rstr = arg_to_str(filter, arg->num.right);
+ if (!lstr || !rstr)
+ goto out;
switch (arg->num.type) {
case FILTER_CMP_EQ:
@@ -2097,6 +2119,7 @@ static char *num_to_str(struct event_filter *filter, struct filter_arg *arg)
break;
}
+out:
free(lstr);
free(rstr);
return str;
@@ -2247,7 +2270,12 @@ int pevent_filter_compare(struct event_filter *filter1, struct event_filter *fil
/* The best way to compare complex filters is with strings */
str1 = arg_to_str(filter1, filter_type1->filter);
str2 = arg_to_str(filter2, filter_type2->filter);
- result = strcmp(str1, str2) != 0;
+ if (str1 && str2)
+ result = strcmp(str1, str2) != 0;
+ else
+ /* bail out if allocation fails */
+ result = 1;
+
free(str1);
free(str2);
if (result)
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index a3dbadb26ef5..7065cd6fbdfc 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -12,7 +12,7 @@ SYNOPSIS
DESCRIPTION
-----------
-This 'perf bench' command is general framework for benchmark suites.
+This 'perf bench' command is a general framework for benchmark suites.
COMMON OPTIONS
--------------
@@ -45,14 +45,20 @@ SUBSYSTEM
'sched'::
Scheduler and IPC mechanisms.
+'mem'::
+ Memory access performance.
+
+'all'::
+ All benchmark subsystems.
+
SUITES FOR 'sched'
~~~~~~~~~~~~~~~~~~
*messaging*::
Suite for evaluating performance of scheduler and IPC mechanisms.
Based on hackbench by Rusty Russell.
-Options of *pipe*
-^^^^^^^^^^^^^^^^^
+Options of *messaging*
+^^^^^^^^^^^^^^^^^^^^^^
-p::
--pipe::
Use pipe() instead of socketpair()
@@ -115,6 +121,72 @@ Example of *pipe*
59004 ops/sec
---------------------
+SUITES FOR 'mem'
+~~~~~~~~~~~~~~~~
+*memcpy*::
+Suite for evaluating performance of simple memory copy in various ways.
+
+Options of *memcpy*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to copy (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to copy (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-movsq and x86-64-movsb are supported.
+
+-i::
+--iterations::
+Repeat memcpy invocation this number of times.
+
+-c::
+--cycle::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memcpy.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memcpy.
+
+*memset*::
+Suite for evaluating performance of simple memory set in various ways.
+
+Options of *memset*
+^^^^^^^^^^^^^^^^^^^
+-l::
+--length::
+Specify length of memory to set (default: 1MB).
+Available units are B, KB, MB, GB and TB (case insensitive).
+
+-r::
+--routine::
+Specify routine to set (default: default).
+Available routines are depend on the architecture.
+On x86-64, x86-64-unrolled, x86-64-stosq and x86-64-stosb are supported.
+
+-i::
+--iterations::
+Repeat memset invocation this number of times.
+
+-c::
+--cycle::
+Use perf's cpu-cycles event instead of gettimeofday syscall.
+
+-o::
+--only-prefault::
+Show only the result with page faults before memset.
+
+-n::
+--no-prefault::
+Show only the result without page faults before memset.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 2d89f02719b5..495210a612c4 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -57,7 +57,7 @@ OPTIONS
-s::
--sort=::
- Sort by key(s): pid, comm, dso, symbol, parent.
+ Sort by key(s): pid, comm, dso, symbol, parent, srcline.
-p::
--parent=<regex>::
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 4a5680cb242e..5b80d84d6b4a 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -112,7 +112,7 @@ Default is to monitor all CPUS.
-s::
--sort::
- Sort by key(s): pid, comm, dso, symbol, parent
+ Sort by key(s): pid, comm, dso, symbol, parent, srcline.
-n::
--show-nr-samples::
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0eee64cfe9a0..75d74e5db8d5 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -155,7 +155,7 @@ endif
### --- END CONFIGURATION SECTION ---
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)/util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -I$(OUTPUT)util -I$(TRACE_EVENT_DIR) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
BASIC_LDFLAGS =
# Guard against environment variables
@@ -503,6 +503,7 @@ else
LIB_OBJS += $(OUTPUT)ui/progress.o
LIB_OBJS += $(OUTPUT)ui/util.o
LIB_OBJS += $(OUTPUT)ui/tui/setup.o
+ LIB_OBJS += $(OUTPUT)ui/tui/util.o
LIB_H += ui/browser.h
LIB_H += ui/browsers/map.h
LIB_H += ui/helpline.h
@@ -522,13 +523,18 @@ else
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
BASIC_CFLAGS += -DNO_GTK2_SUPPORT
else
+ ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y)
+ BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR
+ endif
BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0)
EXTLIBS += $(shell pkg-config --libs gtk+-2.0)
LIB_OBJS += $(OUTPUT)ui/gtk/browser.o
LIB_OBJS += $(OUTPUT)ui/gtk/setup.o
+ LIB_OBJS += $(OUTPUT)ui/gtk/util.o
# Make sure that it'd be included only once.
ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),)
LIB_OBJS += $(OUTPUT)ui/setup.o
+ LIB_OBJS += $(OUTPUT)ui/util.o
endif
endif
endif
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 71557225bf92..02dad5d3359b 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -24,21 +24,21 @@
static const char *length_str = "1MB";
static const char *routine = "default";
static int iterations = 1;
-static bool use_clock;
-static int clock_fd;
+static bool use_cycle;
+static int cycle_fd;
static bool only_prefault;
static bool no_prefault;
static const struct option options[] = {
OPT_STRING('l', "length", &length_str, "1MB",
"Specify length of memory to copy. "
- "available unit: B, MB, GB (upper and lower)"),
+ "Available units: B, KB, MB, GB and TB (upper and lower)"),
OPT_STRING('r', "routine", &routine, "default",
"Specify routine to copy"),
OPT_INTEGER('i', "iterations", &iterations,
"repeat memcpy() invocation this number of times"),
- OPT_BOOLEAN('c', "clock", &use_clock,
- "Use CPU clock for measuring"),
+ OPT_BOOLEAN('c', "cycle", &use_cycle,
+ "Use cycles event instead of gettimeofday() for measuring"),
OPT_BOOLEAN('o', "only-prefault", &only_prefault,
"Show only the result with page faults before memcpy()"),
OPT_BOOLEAN('n', "no-prefault", &no_prefault,
@@ -76,27 +76,27 @@ static const char * const bench_mem_memcpy_usage[] = {
NULL
};
-static struct perf_event_attr clock_attr = {
+static struct perf_event_attr cycle_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES
};
-static void init_clock(void)
+static void init_cycle(void)
{
- clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
+ cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
- if (clock_fd < 0 && errno == ENOSYS)
+ if (cycle_fd < 0 && errno == ENOSYS)
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
else
- BUG_ON(clock_fd < 0);
+ BUG_ON(cycle_fd < 0);
}
-static u64 get_clock(void)
+static u64 get_cycle(void)
{
int ret;
u64 clk;
- ret = read(clock_fd, &clk, sizeof(u64));
+ ret = read(cycle_fd, &clk, sizeof(u64));
BUG_ON(ret != sizeof(u64));
return clk;
@@ -119,9 +119,9 @@ static void alloc_mem(void **dst, void **src, size_t length)
die("memory allocation failed - maybe length is too large?\n");
}
-static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault)
+static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault)
{
- u64 clock_start = 0ULL, clock_end = 0ULL;
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
void *src = NULL, *dst = NULL;
int i;
@@ -130,14 +130,14 @@ static u64 do_memcpy_clock(memcpy_t fn, size_t len, bool prefault)
if (prefault)
fn(dst, src, len);
- clock_start = get_clock();
+ cycle_start = get_cycle();
for (i = 0; i < iterations; ++i)
fn(dst, src, len);
- clock_end = get_clock();
+ cycle_end = get_cycle();
free(src);
free(dst);
- return clock_end - clock_start;
+ return cycle_end - cycle_start;
}
static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
@@ -182,17 +182,17 @@ int bench_mem_memcpy(int argc, const char **argv,
int i;
size_t len;
double result_bps[2];
- u64 result_clock[2];
+ u64 result_cycle[2];
argc = parse_options(argc, argv, options,
bench_mem_memcpy_usage, 0);
- if (use_clock)
- init_clock();
+ if (use_cycle)
+ init_cycle();
len = (size_t)perf_atoll((char *)length_str);
- result_clock[0] = result_clock[1] = 0ULL;
+ result_cycle[0] = result_cycle[1] = 0ULL;
result_bps[0] = result_bps[1] = 0.0;
if ((s64)len <= 0) {
@@ -223,11 +223,11 @@ int bench_mem_memcpy(int argc, const char **argv,
if (!only_prefault && !no_prefault) {
/* show both of results */
- if (use_clock) {
- result_clock[0] =
- do_memcpy_clock(routines[i].fn, len, false);
- result_clock[1] =
- do_memcpy_clock(routines[i].fn, len, true);
+ if (use_cycle) {
+ result_cycle[0] =
+ do_memcpy_cycle(routines[i].fn, len, false);
+ result_cycle[1] =
+ do_memcpy_cycle(routines[i].fn, len, true);
} else {
result_bps[0] =
do_memcpy_gettimeofday(routines[i].fn,
@@ -237,9 +237,9 @@ int bench_mem_memcpy(int argc, const char **argv,
len, true);
}
} else {
- if (use_clock) {
- result_clock[pf] =
- do_memcpy_clock(routines[i].fn,
+ if (use_cycle) {
+ result_cycle[pf] =
+ do_memcpy_cycle(routines[i].fn,
len, only_prefault);
} else {
result_bps[pf] =
@@ -251,12 +251,12 @@ int bench_mem_memcpy(int argc, const char **argv,
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
if (!only_prefault && !no_prefault) {
- if (use_clock) {
- printf(" %14lf Clock/Byte\n",
- (double)result_clock[0]
+ if (use_cycle) {
+ printf(" %14lf Cycle/Byte\n",
+ (double)result_cycle[0]
/ (double)len);
- printf(" %14lf Clock/Byte (with prefault)\n",
- (double)result_clock[1]
+ printf(" %14lf Cycle/Byte (with prefault)\n",
+ (double)result_cycle[1]
/ (double)len);
} else {
print_bps(result_bps[0]);
@@ -265,9 +265,9 @@ int bench_mem_memcpy(int argc, const char **argv,
printf(" (with prefault)\n");
}
} else {
- if (use_clock) {
- printf(" %14lf Clock/Byte",
- (double)result_clock[pf]
+ if (use_cycle) {
+ printf(" %14lf Cycle/Byte",
+ (double)result_cycle[pf]
/ (double)len);
} else
print_bps(result_bps[pf]);
@@ -277,17 +277,17 @@ int bench_mem_memcpy(int argc, const char **argv,
break;
case BENCH_FORMAT_SIMPLE:
if (!only_prefault && !no_prefault) {
- if (use_clock) {
+ if (use_cycle) {
printf("%lf %lf\n",
- (double)result_clock[0] / (double)len,
- (double)result_clock[1] / (double)len);
+ (double)result_cycle[0] / (double)len,
+ (double)result_cycle[1] / (double)len);
} else {
printf("%lf %lf\n",
result_bps[0], result_bps[1]);
}
} else {
- if (use_clock) {
- printf("%lf\n", (double)result_clock[pf]
+ if (use_cycle) {
+ printf("%lf\n", (double)result_cycle[pf]
/ (double)len);
} else
printf("%lf\n", result_bps[pf]);
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
index e9079185bd72..350cc9557265 100644
--- a/tools/perf/bench/mem-memset.c
+++ b/tools/perf/bench/mem-memset.c
@@ -24,21 +24,21 @@
static const char *length_str = "1MB";
static const char *routine = "default";
static int iterations = 1;
-static bool use_clock;
-static int clock_fd;
+static bool use_cycle;
+static int cycle_fd;
static bool only_prefault;
static bool no_prefault;
static const struct option options[] = {
OPT_STRING('l', "length", &length_str, "1MB",
- "Specify length of memory to copy. "
- "available unit: B, MB, GB (upper and lower)"),
+ "Specify length of memory to set. "
+ "Available units: B, KB, MB, GB and TB (upper and lower)"),
OPT_STRING('r', "routine", &routine, "default",
- "Specify routine to copy"),
+ "Specify routine to set"),
OPT_INTEGER('i', "iterations", &iterations,
"repeat memset() invocation this number of times"),
- OPT_BOOLEAN('c', "clock", &use_clock,
- "Use CPU clock for measuring"),
+ OPT_BOOLEAN('c', "cycle", &use_cycle,
+ "Use cycles event instead of gettimeofday() for measuring"),
OPT_BOOLEAN('o', "only-prefault", &only_prefault,
"Show only the result with page faults before memset()"),
OPT_BOOLEAN('n', "no-prefault", &no_prefault,
@@ -76,27 +76,27 @@ static const char * const bench_mem_memset_usage[] = {
NULL
};
-static struct perf_event_attr clock_attr = {
+static struct perf_event_attr cycle_attr = {
.type = PERF_TYPE_HARDWARE,
.config = PERF_COUNT_HW_CPU_CYCLES
};
-static void init_clock(void)
+static void init_cycle(void)
{
- clock_fd = sys_perf_event_open(&clock_attr, getpid(), -1, -1, 0);
+ cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
- if (clock_fd < 0 && errno == ENOSYS)
+ if (cycle_fd < 0 && errno == ENOSYS)
die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
else
- BUG_ON(clock_fd < 0);
+ BUG_ON(cycle_fd < 0);
}
-static u64 get_clock(void)
+static u64 get_cycle(void)
{
int ret;
u64 clk;
- ret = read(clock_fd, &clk, sizeof(u64));
+ ret = read(cycle_fd, &clk, sizeof(u64));
BUG_ON(ret != sizeof(u64));
return clk;
@@ -115,9 +115,9 @@ static void alloc_mem(void **dst, size_t length)
die("memory allocation failed - maybe length is too large?\n");
}
-static u64 do_memset_clock(memset_t fn, size_t len, bool prefault)
+static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault)
{
- u64 clock_start = 0ULL, clock_end = 0ULL;
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
void *dst = NULL;
int i;
@@ -126,13 +126,13 @@ static u64 do_memset_clock(memset_t fn, size_t len, bool prefault)
if (prefault)
fn(dst, -1, len);
- clock_start = get_clock();
+ cycle_start = get_cycle();
for (i = 0; i < iterations; ++i)
fn(dst, i, len);
- clock_end = get_clock();
+ cycle_end = get_cycle();
free(dst);
- return clock_end - clock_start;
+ return cycle_end - cycle_start;
}
static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
@@ -176,17 +176,17 @@ int bench_mem_memset(int argc, const char **argv,
int i;
size_t len;
double result_bps[2];
- u64 result_clock[2];
+ u64 result_cycle[2];
argc = parse_options(argc, argv, options,
bench_mem_memset_usage, 0);
- if (use_clock)
- init_clock();
+ if (use_cycle)
+ init_cycle();
len = (size_t)perf_atoll((char *)length_str);
- result_clock[0] = result_clock[1] = 0ULL;
+ result_cycle[0] = result_cycle[1] = 0ULL;
result_bps[0] = result_bps[1] = 0.0;
if ((s64)len <= 0) {
@@ -217,11 +217,11 @@ int bench_mem_memset(int argc, const char **argv,
if (!only_prefault && !no_prefault) {
/* show both of results */
- if (use_clock) {
- result_clock[0] =
- do_memset_clock(routines[i].fn, len, false);
- result_clock[1] =
- do_memset_clock(routines[i].fn, len, true);
+ if (use_cycle) {
+ result_cycle[0] =
+ do_memset_cycle(routines[i].fn, len, false);
+ result_cycle[1] =
+ do_memset_cycle(routines[i].fn, len, true);
} else {
result_bps[0] =
do_memset_gettimeofday(routines[i].fn,
@@ -231,9 +231,9 @@ int bench_mem_memset(int argc, const char **argv,
len, true);
}
} else {
- if (use_clock) {
- result_clock[pf] =
- do_memset_clock(routines[i].fn,
+ if (use_cycle) {
+ result_cycle[pf] =
+ do_memset_cycle(routines[i].fn,
len, only_prefault);
} else {
result_bps[pf] =
@@ -245,12 +245,12 @@ int bench_mem_memset(int argc, const char **argv,
switch (bench_format) {
case BENCH_FORMAT_DEFAULT:
if (!only_prefault && !no_prefault) {
- if (use_clock) {
- printf(" %14lf Clock/Byte\n",
- (double)result_clock[0]
+ if (use_cycle) {
+ printf(" %14lf Cycle/Byte\n",
+ (double)result_cycle[0]
/ (double)len);
- printf(" %14lf Clock/Byte (with prefault)\n ",
- (double)result_clock[1]
+ printf(" %14lf Cycle/Byte (with prefault)\n ",
+ (double)result_cycle[1]
/ (double)len);
} else {
print_bps(result_bps[0]);
@@ -259,9 +259,9 @@ int bench_mem_memset(int argc, const char **argv,
printf(" (with prefault)\n");
}
} else {
- if (use_clock) {
- printf(" %14lf Clock/Byte",
- (double)result_clock[pf]
+ if (use_cycle) {
+ printf(" %14lf Cycle/Byte",
+ (double)result_cycle[pf]
/ (double)len);
} else
print_bps(result_bps[pf]);
@@ -271,17 +271,17 @@ int bench_mem_memset(int argc, const char **argv,
break;
case BENCH_FORMAT_SIMPLE:
if (!only_prefault && !no_prefault) {
- if (use_clock) {
+ if (use_cycle) {
printf("%lf %lf\n",
- (double)result_clock[0] / (double)len,
- (double)result_clock[1] / (double)len);
+ (double)result_cycle[0] / (double)len,
+ (double)result_cycle[1] / (double)len);
} else {
printf("%lf %lf\n",
result_bps[0], result_bps[1]);
}
} else {
- if (use_clock) {
- printf("%lf\n", (double)result_clock[pf]
+ if (use_cycle) {
+ printf("%lf\n", (double)result_cycle[pf]
/ (double)len);
} else
printf("%lf\n", result_bps[pf]);
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index b0e74ab2d7a2..1f3100216448 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -33,7 +33,7 @@ struct bench_suite {
};
\
/* sentinel: easy for help */
-#define suite_all { "all", "test all suite (pseudo suite)", NULL }
+#define suite_all { "all", "Test all benchmark suites", NULL }
static struct bench_suite sched_suites[] = {
{ "messaging",
@@ -75,7 +75,7 @@ static struct bench_subsys subsystems[] = {
"memory access performance",
mem_suites },
{ "all", /* sentinel: easy for help */
- "test all subsystem (pseudo subsystem)",
+ "all benchmark subsystem",
NULL },
{ NULL,
NULL,
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index acd78dc28341..0dd5a058f766 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -60,7 +60,7 @@ static int __cmd_evlist(const char *input_name, struct perf_attr_details *detail
list_for_each_entry(pos, &session->evlist->entries, node) {
bool first = true;
- printf("%s", event_name(pos));
+ printf("%s", perf_evsel__name(pos));
if (details->verbose || details->freq) {
comma_printf(&first, " sample_freq=%" PRIu64,
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 547af48deb4f..ce35015f2dc6 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -57,6 +57,11 @@ static unsigned long nr_allocs, nr_cross_allocs;
#define PATH_SYS_NODE "/sys/devices/system/node"
+struct perf_kmem {
+ struct perf_tool tool;
+ struct perf_session *session;
+};
+
static void init_cpunode_map(void)
{
FILE *fp;
@@ -278,14 +283,16 @@ static void process_free_event(void *data,
s_alloc->alloc_cpu = -1;
}
-static void process_raw_event(union perf_event *raw_event __used, void *data,
+static void process_raw_event(struct perf_tool *tool,
+ union perf_event *raw_event __used, void *data,
int cpu, u64 timestamp, struct thread *thread)
{
+ struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool);
struct event_format *event;
int type;
- type = trace_parse_common_type(data);
- event = trace_find_event(type);
+ type = trace_parse_common_type(kmem->session->pevent, data);
+ event = pevent_find_event(kmem->session->pevent, type);
if (!strcmp(event->name, "kmalloc") ||
!strcmp(event->name, "kmem_cache_alloc")) {
@@ -306,7 +313,7 @@ static void process_raw_event(union perf_event *raw_event __used, void *data,
}
}
-static int process_sample_event(struct perf_tool *tool __used,
+static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel __used,
@@ -322,16 +329,18 @@ static int process_sample_event(struct perf_tool *tool __used,
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
- process_raw_event(event, sample->raw_data, sample->cpu,
+ process_raw_event(tool, event, sample->raw_data, sample->cpu,
sample->time, thread);
return 0;
}
-static struct perf_tool perf_kmem = {
- .sample = process_sample_event,
- .comm = perf_event__process_comm,
- .ordered_samples = true,
+static struct perf_kmem perf_kmem = {
+ .tool = {
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .ordered_samples = true,
+ },
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -486,11 +495,15 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
int err = -EINVAL;
- struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &perf_kmem);
+ struct perf_session *session;
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false,
+ &perf_kmem.tool);
if (session == NULL)
return -ENOMEM;
+ perf_kmem.session = session;
+
if (perf_session__create_kernel_maps(session) < 0)
goto out_delete;
@@ -498,7 +511,7 @@ static int __cmd_kmem(void)
goto out_delete;
setup_pager();
- err = perf_session__process_events(session, &perf_kmem);
+ err = perf_session__process_events(session, &perf_kmem.tool);
if (err != 0)
goto out_delete;
sort_result();
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index fd53319de20d..b3c428548868 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -724,8 +724,8 @@ process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
struct event_format *event;
int type;
- type = trace_parse_common_type(data);
- event = trace_find_event(type);
+ type = trace_parse_common_type(session->pevent, data);
+ event = pevent_find_event(session->pevent, type);
if (!strcmp(event->name, "lock_acquire"))
process_lock_acquire_event(data, event, cpu, timestamp, thread);
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index f95840d04e4c..f5a6452931e6 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -265,7 +265,7 @@ try_again:
if (err == ENOENT) {
ui__error("The %s event is not supported.\n",
- event_name(pos));
+ perf_evsel__name(pos));
exit(EXIT_FAILURE);
}
@@ -916,7 +916,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
usage_with_options(record_usage, record_options);
list_for_each_entry(pos, &evsel_list->entries, node) {
- if (perf_header__push_event(pos->attr.config, event_name(pos)))
+ if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
goto out_free_fd;
}
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 25249f76329d..69b1c1185159 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -69,7 +69,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain)
&& sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al->thread,
+ err = machine__resolve_callchain(machine, al->thread,
sample->callchain, &parent);
if (err)
return err;
@@ -140,7 +140,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct hist_entry *he;
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al->thread,
+ err = machine__resolve_callchain(machine, al->thread,
sample->callchain, &parent);
if (err)
return err;
@@ -230,7 +230,7 @@ static int process_read_event(struct perf_tool *tool,
struct perf_report *rep = container_of(tool, struct perf_report, tool);
if (rep->show_threads) {
- const char *name = evsel ? event_name(evsel) : "unknown";
+ const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
event->read.id,
@@ -239,17 +239,18 @@ static int process_read_event(struct perf_tool *tool,
}
dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
- evsel ? event_name(evsel) : "FAIL",
+ evsel ? perf_evsel__name(evsel) : "FAIL",
event->read.value);
return 0;
}
+/* For pipe mode, sample_type is not currently set */
static int perf_report__setup_sample_type(struct perf_report *rep)
{
struct perf_session *self = rep->session;
- if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
ui__error("Selected --sort parent, but no "
"callchain data. Did you call "
@@ -272,7 +273,8 @@ static int perf_report__setup_sample_type(struct perf_report *rep)
}
if (sort__branch_mode == 1) {
- if (!(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
+ if (!self->fd_pipe &&
+ !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) {
ui__error("Selected -b but no branch data. "
"Did you call perf record without -b?\n");
return -1;
@@ -314,7 +316,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
list_for_each_entry(pos, &evlist->entries, node) {
struct hists *hists = &pos->hists;
- const char *evname = event_name(pos);
+ const char *evname = perf_evsel__name(pos);
hists__fprintf_nr_sample_events(hists, evname, stdout);
hists__fprintf(hists, NULL, false, true, 0, 0, stdout);
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index b125e07eb399..7a9ad2b1ee76 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -43,6 +43,11 @@ static u64 sleep_measurement_overhead;
static unsigned long nr_tasks;
+struct perf_sched {
+ struct perf_tool tool;
+ struct perf_session *session;
+};
+
struct sched_atom;
struct task_desc {
@@ -1597,11 +1602,13 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine)
{
+ struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
+ struct pevent *pevent = sched->session->pevent;
struct thread *thread = machine__findnew_thread(machine, sample->pid);
if (thread == NULL) {
pr_debug("problem processing %s event, skipping it.\n",
- evsel->name);
+ perf_evsel__name(evsel));
return -1;
}
@@ -1612,7 +1619,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
tracepoint_handler f = evsel->handler.func;
if (evsel->handler.data == NULL)
- evsel->handler.data = trace_find_event(evsel->attr.config);
+ evsel->handler.data = pevent_find_event(pevent,
+ evsel->attr.config);
f(tool, evsel->handler.data, sample, machine, thread);
}
@@ -1620,12 +1628,14 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool,
return 0;
}
-static struct perf_tool perf_sched = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_event__process_task,
- .ordered_samples = true,
+static struct perf_sched perf_sched = {
+ .tool = {
+ .sample = perf_sched__process_tracepoint_sample,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_event__process_task,
+ .ordered_samples = true,
+ },
};
static void read_events(bool destroy, struct perf_session **psession)
@@ -1640,16 +1650,20 @@ static void read_events(bool destroy, struct perf_session **psession)
{ "sched:sched_process_exit", process_sched_exit_event, },
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
- struct perf_session *session = perf_session__new(input_name, O_RDONLY,
- 0, false, &perf_sched);
+ struct perf_session *session;
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false,
+ &perf_sched.tool);
if (session == NULL)
die("No Memory");
- err = perf_evlist__set_tracepoints_handlers_array(session->evlist, handlers);
+ perf_sched.session = session;
+
+ err = perf_session__set_tracepoints_handlers(session, handlers);
assert(err == 0);
if (perf_session__has_traces(session, "record -R")) {
- err = perf_session__process_events(session, &perf_sched);
+ err = perf_session__process_events(session, &perf_sched.tool);
if (err)
die("Failed to process events, error %d", err);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 8e395a538eb9..1e60ab70b2b1 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -28,6 +28,11 @@ static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
+struct perf_script {
+ struct perf_tool tool;
+ struct perf_session *session;
+};
+
enum perf_output_field {
PERF_OUTPUT_COMM = 1U << 0,
PERF_OUTPUT_TID = 1U << 1,
@@ -137,10 +142,11 @@ static const char *output_field2str(enum perf_output_field field)
#define PRINT_FIELD(x) (output[attr->type].fields & PERF_OUTPUT_##x)
-static int perf_event_attr__check_stype(struct perf_event_attr *attr,
- u64 sample_type, const char *sample_msg,
- enum perf_output_field field)
+static int perf_evsel__check_stype(struct perf_evsel *evsel,
+ u64 sample_type, const char *sample_msg,
+ enum perf_output_field field)
{
+ struct perf_event_attr *attr = &evsel->attr;
int type = attr->type;
const char *evname;
@@ -148,7 +154,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
return 0;
if (output[type].user_set) {
- evname = __event_name(attr->type, attr->config);
+ evname = perf_evsel__name(evsel);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -157,7 +163,7 @@ static int perf_event_attr__check_stype(struct perf_event_attr *attr,
/* user did not ask for it explicitly so remove from the default list */
output[type].fields &= ~field;
- evname = __event_name(attr->type, attr->config);
+ evname = perf_evsel__name(evsel);
pr_debug("Samples for '%s' event do not have %s attribute set. "
"Skipping '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -175,8 +181,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
return -EINVAL;
if (PRINT_FIELD(IP)) {
- if (perf_event_attr__check_stype(attr, PERF_SAMPLE_IP, "IP",
- PERF_OUTPUT_IP))
+ if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
+ PERF_OUTPUT_IP))
return -EINVAL;
if (!no_callchain &&
@@ -185,8 +191,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
}
if (PRINT_FIELD(ADDR) &&
- perf_event_attr__check_stype(attr, PERF_SAMPLE_ADDR, "ADDR",
- PERF_OUTPUT_ADDR))
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
+ PERF_OUTPUT_ADDR))
return -EINVAL;
if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
@@ -208,18 +214,18 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
}
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
- perf_event_attr__check_stype(attr, PERF_SAMPLE_TID, "TID",
- PERF_OUTPUT_TID|PERF_OUTPUT_PID))
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
+ PERF_OUTPUT_TID|PERF_OUTPUT_PID))
return -EINVAL;
if (PRINT_FIELD(TIME) &&
- perf_event_attr__check_stype(attr, PERF_SAMPLE_TIME, "TIME",
- PERF_OUTPUT_TIME))
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME",
+ PERF_OUTPUT_TIME))
return -EINVAL;
if (PRINT_FIELD(CPU) &&
- perf_event_attr__check_stype(attr, PERF_SAMPLE_CPU, "CPU",
- PERF_OUTPUT_CPU))
+ perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
+ PERF_OUTPUT_CPU))
return -EINVAL;
return 0;
@@ -256,11 +262,13 @@ static int perf_session__check_output_opt(struct perf_session *session)
return 0;
}
-static void print_sample_start(struct perf_sample *sample,
+static void print_sample_start(struct pevent *pevent,
+ struct perf_sample *sample,
struct thread *thread,
- struct perf_event_attr *attr)
+ struct perf_evsel *evsel)
{
int type;
+ struct perf_event_attr *attr = &evsel->attr;
struct event_format *event;
const char *evname = NULL;
unsigned long secs;
@@ -300,12 +308,18 @@ static void print_sample_start(struct perf_sample *sample,
if (PRINT_FIELD(EVNAME)) {
if (attr->type == PERF_TYPE_TRACEPOINT) {
- type = trace_parse_common_type(sample->raw_data);
- event = trace_find_event(type);
+ /*
+ * XXX Do we really need this here?
+ * perf_evlist__set_tracepoint_names should have done
+ * this already
+ */
+ type = trace_parse_common_type(pevent,
+ sample->raw_data);
+ event = pevent_find_event(pevent, type);
if (event)
evname = event->name;
} else
- evname = __event_name(attr->type, attr->config);
+ evname = perf_evsel__name(evsel);
printf("%s: ", evname ? evname : "[unknown]");
}
@@ -387,7 +401,7 @@ static void print_sample_bts(union perf_event *event,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine, evsel,
+ perf_event__print_ip(event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -402,6 +416,7 @@ static void print_sample_bts(union perf_event *event,
}
static void process_event(union perf_event *event __unused,
+ struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
@@ -412,7 +427,7 @@ static void process_event(union perf_event *event __unused,
if (output[attr->type].fields == 0)
return;
- print_sample_start(sample, thread, attr);
+ print_sample_start(pevent, sample, thread, evsel);
if (is_bts_event(attr)) {
print_sample_bts(event, sample, evsel, machine, thread);
@@ -420,7 +435,7 @@ static void process_event(union perf_event *event __unused,
}
if (PRINT_FIELD(TRACE))
- print_trace_event(sample->cpu, sample->raw_data,
+ print_trace_event(pevent, sample->cpu, sample->raw_data,
sample->raw_size);
if (PRINT_FIELD(ADDR))
@@ -431,7 +446,7 @@ static void process_event(union perf_event *event __unused,
printf(" ");
else
printf("\n");
- perf_event__print_ip(event, sample, machine, evsel,
+ perf_event__print_ip(event, sample, machine,
PRINT_FIELD(SYM), PRINT_FIELD(DSO),
PRINT_FIELD(SYMOFFSET));
}
@@ -451,7 +466,8 @@ static int default_stop_script(void)
return 0;
}
-static int default_generate_script(const char *outfile __unused)
+static int default_generate_script(struct pevent *pevent __unused,
+ const char *outfile __unused)
{
return 0;
}
@@ -489,6 +505,7 @@ static int process_sample_event(struct perf_tool *tool __used,
struct machine *machine)
{
struct addr_location al;
+ struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
if (thread == NULL) {
@@ -520,24 +537,27 @@ static int process_sample_event(struct perf_tool *tool __used,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, machine, thread);
+ scripting_ops->process_event(event, scr->session->pevent,
+ sample, evsel, machine, thread);
evsel->hists.stats.total_period += sample->period;
return 0;
}
-static struct perf_tool perf_script = {
- .sample = process_sample_event,
- .mmap = perf_event__process_mmap,
- .comm = perf_event__process_comm,
- .exit = perf_event__process_task,
- .fork = perf_event__process_task,
- .attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
- .tracing_data = perf_event__process_tracing_data,
- .build_id = perf_event__process_build_id,
- .ordered_samples = true,
- .ordering_requires_timestamps = true,
+static struct perf_script perf_script = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .comm = perf_event__process_comm,
+ .exit = perf_event__process_task,
+ .fork = perf_event__process_task,
+ .attr = perf_event__process_attr,
+ .event_type = perf_event__process_event_type,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .ordered_samples = true,
+ .ordering_requires_timestamps = true,
+ },
};
extern volatile int session_done;
@@ -553,7 +573,7 @@ static int __cmd_script(struct perf_session *session)
signal(SIGINT, sig_handler);
- ret = perf_session__process_events(session, &perf_script);
+ ret = perf_session__process_events(session, &perf_script.tool);
if (debug_mode)
pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered);
@@ -1335,10 +1355,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
if (!script_name)
setup_pager();
- session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_script);
+ session = perf_session__new(input_name, O_RDONLY, 0, false,
+ &perf_script.tool);
if (session == NULL)
return -ENOMEM;
+ perf_script.session = session;
+
if (cpu_list) {
if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap))
return -1;
@@ -1384,7 +1407,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __used)
return -1;
}
- err = scripting_ops->generate_script("perf-script");
+ err = scripting_ops->generate_script(session->pevent,
+ "perf-script");
goto out;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 07b5c7703dd1..861f0aec77ae 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -391,7 +391,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
if (verbose) {
fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
- event_name(counter), count[0], count[1], count[2]);
+ perf_evsel__name(counter), count[0], count[1], count[2]);
}
/*
@@ -496,7 +496,7 @@ static int run_perf_stat(int argc __used, const char **argv)
errno == ENXIO) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
- event_name(counter));
+ perf_evsel__name(counter));
counter->supported = false;
continue;
}
@@ -594,7 +594,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
csv_output ? 0 : -4,
evsel_list->cpus->map[cpu], csv_sep);
- fprintf(output, fmt, cpustr, msecs, csv_sep, event_name(evsel));
+ fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel));
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -792,7 +792,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
else
cpu = 0;
- fprintf(output, fmt, cpustr, avg, csv_sep, event_name(evsel));
+ fprintf(output, fmt, cpustr, avg, csv_sep, perf_evsel__name(evsel));
if (evsel->cgrp)
fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
@@ -908,7 +908,7 @@ static void print_counter_aggr(struct perf_evsel *counter)
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
csv_sep,
csv_output ? 0 : -24,
- event_name(counter));
+ perf_evsel__name(counter));
if (counter->cgrp)
fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
@@ -961,7 +961,7 @@ static void print_counter(struct perf_evsel *counter)
counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
csv_sep,
csv_output ? 0 : -24,
- event_name(counter));
+ perf_evsel__name(counter));
if (counter->cgrp)
fprintf(output, "%s%s",
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 5a8727c08757..5ce30305462b 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -583,7 +583,7 @@ static int test__basic_mmap(void)
if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
pr_debug("expected %d %s events, got %d\n",
expected_nr_events[evsel->idx],
- event_name(evsel), nr_events[evsel->idx]);
+ perf_evsel__name(evsel), nr_events[evsel->idx]);
goto out_munmap;
}
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 6bb0277b7dfe..e3cab5f088f8 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -245,7 +245,7 @@ static void perf_top__show_details(struct perf_top *top)
if (notes->src == NULL)
goto out_unlock;
- printf("Showing %s for %s\n", event_name(top->sym_evsel), symbol->name);
+ printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel->idx,
@@ -408,7 +408,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
if (top->evlist->nr_entries > 1)
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(top->sym_evsel));
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
@@ -503,13 +503,13 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
fprintf(stderr, "\nAvailable events:");
list_for_each_entry(top->sym_evsel, &top->evlist->entries, node)
- fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, event_name(top->sym_evsel));
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->nr_entries) {
top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node);
- fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top->sym_evsel));
+ fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
sleep(1);
break;
}
@@ -774,7 +774,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
- err = machine__resolve_callchain(machine, evsel, al.thread,
+ err = machine__resolve_callchain(machine, al.thread,
sample->callchain, &parent);
if (err)
return;
@@ -960,7 +960,7 @@ try_again:
if (err == ENOENT) {
ui__error("The %s event is not supported.\n",
- event_name(counter));
+ perf_evsel__name(counter));
goto out_err;
} else if (err == EMFILE) {
ui__error("Too many events are opened.\n"
diff --git a/tools/perf/config/feature-tests.mak b/tools/perf/config/feature-tests.mak
index d9084e03ce56..6c18785a6417 100644
--- a/tools/perf/config/feature-tests.mak
+++ b/tools/perf/config/feature-tests.mak
@@ -78,6 +78,19 @@ int main(int argc, char *argv[])
return 0;
}
endef
+
+define SOURCE_GTK2_INFOBAR
+#pragma GCC diagnostic ignored \"-Wstrict-prototypes\"
+#include <gtk/gtk.h>
+#pragma GCC diagnostic error \"-Wstrict-prototypes\"
+
+int main(void)
+{
+ gtk_info_bar_new();
+
+ return 0;
+}
+endef
endif
ifndef NO_LIBPERL
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 34b1c46eaf42..67a2703e666a 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -814,7 +814,7 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
{
struct disasm_line *pos, *n;
struct annotation *notes;
- const size_t size = symbol__size(sym);
+ size_t size;
struct map_symbol ms = {
.map = map,
.sym = sym,
@@ -834,6 +834,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx,
if (sym == NULL)
return -1;
+ size = symbol__size(sym);
+
if (map->dso->annotate_warned)
return -1;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 53f6697d014e..482f0517b61e 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -23,6 +23,7 @@ struct hist_browser {
struct hists *hists;
struct hist_entry *he_selection;
struct map_symbol *selection;
+ int print_seq;
bool has_symbols;
};
@@ -800,6 +801,196 @@ do_offset:
}
}
+static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *browser,
+ struct callchain_node *chain_node,
+ u64 total, int level,
+ FILE *fp)
+{
+ struct rb_node *node;
+ int offset = level * LEVEL_OFFSET_STEP;
+ u64 new_total, remaining;
+ int printed = 0;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ new_total = chain_node->children_hit;
+ else
+ new_total = total;
+
+ remaining = new_total;
+ node = rb_first(&chain_node->rb_root);
+ while (node) {
+ struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
+ struct rb_node *next = rb_next(node);
+ u64 cumul = callchain_cumul_hits(child);
+ struct callchain_list *chain;
+ char folded_sign = ' ';
+ int first = true;
+ int extra_offset = 0;
+
+ remaining -= cumul;
+
+ list_for_each_entry(chain, &child->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str;
+ const char *str;
+ bool was_first = first;
+
+ if (first)
+ first = false;
+ else
+ extra_offset = LEVEL_OFFSET_STEP;
+
+ folded_sign = callchain_list__folded(chain);
+
+ alloc_str = NULL;
+ str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ if (was_first) {
+ double percent = cumul * 100.0 / new_total;
+
+ if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0)
+ str = "Not enough memory!";
+ else
+ str = alloc_str;
+ }
+
+ printed += fprintf(fp, "%*s%c %s\n", offset + extra_offset, " ", folded_sign, str);
+ free(alloc_str);
+ if (folded_sign == '+')
+ break;
+ }
+
+ if (folded_sign == '-') {
+ const int new_level = level + (extra_offset ? 2 : 1);
+ printed += hist_browser__fprintf_callchain_node_rb_tree(browser, child, new_total,
+ new_level, fp);
+ }
+
+ node = next;
+ }
+
+ return printed;
+}
+
+static int hist_browser__fprintf_callchain_node(struct hist_browser *browser,
+ struct callchain_node *node,
+ int level, FILE *fp)
+{
+ struct callchain_list *chain;
+ int offset = level * LEVEL_OFFSET_STEP;
+ char folded_sign = ' ';
+ int printed = 0;
+
+ list_for_each_entry(chain, &node->val, list) {
+ char ipstr[BITS_PER_LONG / 4 + 1], *s;
+
+ folded_sign = callchain_list__folded(chain);
+ s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr));
+ printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s);
+ }
+
+ if (folded_sign == '-')
+ printed += hist_browser__fprintf_callchain_node_rb_tree(browser, node,
+ browser->hists->stats.total_period,
+ level + 1, fp);
+ return printed;
+}
+
+static int hist_browser__fprintf_callchain(struct hist_browser *browser,
+ struct rb_root *chain, int level, FILE *fp)
+{
+ struct rb_node *nd;
+ int printed = 0;
+
+ for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
+ struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
+
+ printed += hist_browser__fprintf_callchain_node(browser, node, level, fp);
+ }
+
+ return printed;
+}
+
+static int hist_browser__fprintf_entry(struct hist_browser *browser,
+ struct hist_entry *he, FILE *fp)
+{
+ char s[8192];
+ double percent;
+ int printed = 0;
+ char folded_sign = ' ';
+
+ if (symbol_conf.use_callchain)
+ folded_sign = hist_entry__folded(he);
+
+ hist_entry__snprintf(he, s, sizeof(s), browser->hists);
+ percent = (he->period * 100.0) / browser->hists->stats.total_period;
+
+ if (symbol_conf.use_callchain)
+ printed += fprintf(fp, "%c ", folded_sign);
+
+ printed += fprintf(fp, " %5.2f%%", percent);
+
+ if (symbol_conf.show_nr_samples)
+ printed += fprintf(fp, " %11u", he->nr_events);
+
+ if (symbol_conf.show_total_period)
+ printed += fprintf(fp, " %12" PRIu64, he->period);
+
+ printed += fprintf(fp, "%s\n", rtrim(s));
+
+ if (folded_sign == '-')
+ printed += hist_browser__fprintf_callchain(browser, &he->sorted_chain, 1, fp);
+
+ return printed;
+}
+
+static int hist_browser__fprintf(struct hist_browser *browser, FILE *fp)
+{
+ struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries));
+ int printed = 0;
+
+ while (nd) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ printed += hist_browser__fprintf_entry(browser, h, fp);
+ nd = hists__filter_entries(rb_next(nd));
+ }
+
+ return printed;
+}
+
+static int hist_browser__dump(struct hist_browser *browser)
+{
+ char filename[64];
+ FILE *fp;
+
+ while (1) {
+ scnprintf(filename, sizeof(filename), "perf.hist.%d", browser->print_seq);
+ if (access(filename, F_OK))
+ break;
+ /*
+ * XXX: Just an arbitrary lazy upper limit
+ */
+ if (++browser->print_seq == 8192) {
+ ui_helpline__fpush("Too many perf.hist.N files, nothing written!");
+ return -1;
+ }
+ }
+
+ fp = fopen(filename, "w");
+ if (fp == NULL) {
+ char bf[64];
+ strerror_r(errno, bf, sizeof(bf));
+ ui_helpline__fpush("Couldn't write to %s: %s", filename, bf);
+ return -1;
+ }
+
+ ++browser->print_seq;
+ hist_browser__fprintf(browser, fp);
+ fclose(fp);
+ ui_helpline__fpush("%s written!", filename);
+
+ return 0;
+}
+
static struct hist_browser *hist_browser__new(struct hists *hists)
{
struct hist_browser *browser = zalloc(sizeof(*browser));
@@ -937,6 +1128,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
browser->selection->map->dso->annotate_warned)
continue;
goto do_annotate;
+ case 'P':
+ hist_browser__dump(browser);
+ continue;
case 'd':
goto zoom_dso;
case 't':
@@ -969,6 +1163,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
"E Expand all callchains\n"
"d Zoom into current DSO\n"
"t Zoom into current Thread\n"
+ "P Print histograms to perf.hist.N\n"
"/ Filter symbol by name");
continue;
case K_ENTER:
@@ -1172,7 +1367,7 @@ static void perf_evsel_menu__write(struct ui_browser *browser,
struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node);
bool current_entry = ui_browser__is_current_entry(browser, row);
unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE];
- const char *ev_name = event_name(evsel);
+ const char *ev_name = perf_evsel__name(evsel);
char bf[256], unit;
const char *warn = " ";
size_t printed;
@@ -1240,7 +1435,7 @@ browse_hists:
*/
if (timer)
timer(arg);
- ev_name = event_name(pos);
+ ev_name = perf_evsel__name(pos);
key = perf_evsel__hists_browse(pos, nr_events, help,
ev_name, true, timer,
arg, delay_secs);
@@ -1309,17 +1504,11 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
ui_helpline__push("Press ESC to exit");
list_for_each_entry(pos, &evlist->entries, node) {
- const char *ev_name = event_name(pos);
+ const char *ev_name = perf_evsel__name(pos);
size_t line_len = strlen(ev_name) + 7;
if (menu.b.width < line_len)
menu.b.width = line_len;
- /*
- * Cache the evsel name, tracepoints have a _high_ cost per
- * event_name() call.
- */
- if (pos->name == NULL)
- pos->name = strdup(ev_name);
}
return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer,
@@ -1330,11 +1519,10 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
void(*timer)(void *arg), void *arg,
int delay_secs)
{
-
if (evlist->nr_entries == 1) {
struct perf_evsel *first = list_entry(evlist->entries.next,
struct perf_evsel, node);
- const char *ev_name = event_name(first);
+ const char *ev_name = perf_evsel__name(first);
return perf_evsel__hists_browse(first, evlist->nr_entries, help,
ev_name, false, timer, arg,
delay_secs);
diff --git a/tools/perf/ui/gtk/browser.c b/tools/perf/ui/gtk/browser.c
index 0656c381a89c..ec12e0b4ded6 100644
--- a/tools/perf/ui/gtk/browser.c
+++ b/tools/perf/ui/gtk/browser.c
@@ -11,8 +11,8 @@
static void perf_gtk__signal(int sig)
{
+ perf_gtk__exit(false);
psignal(sig, "perf");
- gtk_main_quit();
}
static void perf_gtk__resize_window(GtkWidget *window)
@@ -122,13 +122,59 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists)
gtk_container_add(GTK_CONTAINER(window), view);
}
+#ifdef HAVE_GTK_INFO_BAR
+static GtkWidget *perf_gtk__setup_info_bar(void)
+{
+ GtkWidget *info_bar;
+ GtkWidget *label;
+ GtkWidget *content_area;
+
+ info_bar = gtk_info_bar_new();
+ gtk_widget_set_no_show_all(info_bar, TRUE);
+
+ label = gtk_label_new("");
+ gtk_widget_show(label);
+
+ content_area = gtk_info_bar_get_content_area(GTK_INFO_BAR(info_bar));
+ gtk_container_add(GTK_CONTAINER(content_area), label);
+
+ gtk_info_bar_add_button(GTK_INFO_BAR(info_bar), GTK_STOCK_OK,
+ GTK_RESPONSE_OK);
+ g_signal_connect(info_bar, "response",
+ G_CALLBACK(gtk_widget_hide), NULL);
+
+ pgctx->info_bar = info_bar;
+ pgctx->message_label = label;
+
+ return info_bar;
+}
+#endif
+
+static GtkWidget *perf_gtk__setup_statusbar(void)
+{
+ GtkWidget *stbar;
+ unsigned ctxid;
+
+ stbar = gtk_statusbar_new();
+
+ ctxid = gtk_statusbar_get_context_id(GTK_STATUSBAR(stbar),
+ "perf report");
+ pgctx->statbar = stbar;
+ pgctx->statbar_ctx_id = ctxid;
+
+ return stbar;
+}
+
int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
const char *help __used,
void (*timer) (void *arg)__used,
void *arg __used, int delay_secs __used)
{
struct perf_evsel *pos;
+ GtkWidget *vbox;
GtkWidget *notebook;
+ GtkWidget *info_bar;
+ GtkWidget *statbar;
GtkWidget *window;
signal(SIGSEGV, perf_gtk__signal);
@@ -143,11 +189,17 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
g_signal_connect(window, "delete_event", gtk_main_quit, NULL);
+ pgctx = perf_gtk__activate_context(window);
+ if (!pgctx)
+ return -1;
+
+ vbox = gtk_vbox_new(FALSE, 0);
+
notebook = gtk_notebook_new();
list_for_each_entry(pos, &evlist->entries, node) {
struct hists *hists = &pos->hists;
- const char *evname = event_name(pos);
+ const char *evname = perf_evsel__name(pos);
GtkWidget *scrolled_window;
GtkWidget *tab_label;
@@ -164,7 +216,16 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_notebook_append_page(GTK_NOTEBOOK(notebook), scrolled_window, tab_label);
}
- gtk_container_add(GTK_CONTAINER(window), notebook);
+ gtk_box_pack_start(GTK_BOX(vbox), notebook, TRUE, TRUE, 0);
+
+ info_bar = perf_gtk__setup_info_bar();
+ if (info_bar)
+ gtk_box_pack_start(GTK_BOX(vbox), info_bar, FALSE, FALSE, 0);
+
+ statbar = perf_gtk__setup_statusbar();
+ gtk_box_pack_start(GTK_BOX(vbox), statbar, FALSE, FALSE, 0);
+
+ gtk_container_add(GTK_CONTAINER(window), vbox);
gtk_widget_show_all(window);
@@ -174,5 +235,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
gtk_main();
+ perf_gtk__deactivate_context(&pgctx);
+
return 0;
}
diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h
index 75177ee04032..a4d0f2b4a2dc 100644
--- a/tools/perf/ui/gtk/gtk.h
+++ b/tools/perf/ui/gtk/gtk.h
@@ -1,8 +1,39 @@
#ifndef _PERF_GTK_H_
#define _PERF_GTK_H_ 1
+#include <stdbool.h>
+
#pragma GCC diagnostic ignored "-Wstrict-prototypes"
#include <gtk/gtk.h>
#pragma GCC diagnostic error "-Wstrict-prototypes"
+
+struct perf_gtk_context {
+ GtkWidget *main_window;
+
+#ifdef HAVE_GTK_INFO_BAR
+ GtkWidget *info_bar;
+ GtkWidget *message_label;
+#endif
+ GtkWidget *statbar;
+ guint statbar_ctx_id;
+};
+
+extern struct perf_gtk_context *pgctx;
+
+static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx)
+{
+ return ctx && ctx->main_window;
+}
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window);
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
+
+#ifndef HAVE_GTK_INFO_BAR
+static inline GtkWidget *perf_gtk__setup_info_bar(void)
+{
+ return NULL;
+}
+#endif
+
#endif /* _PERF_GTK_H_ */
diff --git a/tools/perf/ui/gtk/setup.c b/tools/perf/ui/gtk/setup.c
index 829529957766..92879ce61e2f 100644
--- a/tools/perf/ui/gtk/setup.c
+++ b/tools/perf/ui/gtk/setup.c
@@ -1,12 +1,17 @@
#include "gtk.h"
#include "../../util/cache.h"
+#include "../../util/debug.h"
+
+extern struct perf_error_ops perf_gtk_eops;
int perf_gtk__init(void)
{
+ perf_error__register(&perf_gtk_eops);
return gtk_init_check(NULL, NULL) ? 0 : -1;
}
void perf_gtk__exit(bool wait_for_ok __used)
{
+ perf_error__unregister(&perf_gtk_eops);
gtk_main_quit();
}
diff --git a/tools/perf/ui/gtk/util.c b/tools/perf/ui/gtk/util.c
new file mode 100644
index 000000000000..0ead373c0dfb
--- /dev/null
+++ b/tools/perf/ui/gtk/util.c
@@ -0,0 +1,129 @@
+#include "../util.h"
+#include "../../util/debug.h"
+#include "gtk.h"
+
+#include <string.h>
+
+
+struct perf_gtk_context *pgctx;
+
+struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window)
+{
+ struct perf_gtk_context *ctx;
+
+ ctx = malloc(sizeof(*pgctx));
+ if (ctx)
+ ctx->main_window = window;
+
+ return ctx;
+}
+
+int perf_gtk__deactivate_context(struct perf_gtk_context **ctx)
+{
+ if (!perf_gtk__is_active_context(*ctx))
+ return -1;
+
+ free(*ctx);
+ *ctx = NULL;
+ return 0;
+}
+
+static int perf_gtk__error(const char *format, va_list args)
+{
+ char *msg;
+ GtkWidget *dialog;
+
+ if (!perf_gtk__is_active_context(pgctx) ||
+ vasprintf(&msg, format, args) < 0) {
+ fprintf(stderr, "Error:\n");
+ vfprintf(stderr, format, args);
+ fprintf(stderr, "\n");
+ return -1;
+ }
+
+ dialog = gtk_message_dialog_new_with_markup(GTK_WINDOW(pgctx->main_window),
+ GTK_DIALOG_DESTROY_WITH_PARENT,
+ GTK_MESSAGE_ERROR,
+ GTK_BUTTONS_CLOSE,
+ "<b>Error</b>\n\n%s", msg);
+ gtk_dialog_run(GTK_DIALOG(dialog));
+
+ gtk_widget_destroy(dialog);
+ free(msg);
+ return 0;
+}
+
+#ifdef HAVE_GTK_INFO_BAR
+static int perf_gtk__warning_info_bar(const char *format, va_list args)
+{
+ char *msg;
+
+ if (!perf_gtk__is_active_context(pgctx) ||
+ vasprintf(&msg, format, args) < 0) {
+ fprintf(stderr, "Warning:\n");
+ vfprintf(stderr, format, args);
+ fprintf(stderr, "\n");
+ return -1;
+ }
+
+ gtk_label_set_text(GTK_LABEL(pgctx->message_label), msg);
+ gtk_info_bar_set_message_type(GTK_INFO_BAR(pgctx->info_bar),
+ GTK_MESSAGE_WARNING);
+ gtk_widget_show(pgctx->info_bar);
+
+ free(msg);
+ return 0;
+}
+#else
+static int perf_gtk__warning_statusbar(const char *format, va_list args)
+{
+ char *msg, *p;
+
+ if (!perf_gtk__is_active_context(pgctx) ||
+ vasprintf(&msg, format, args) < 0) {
+ fprintf(stderr, "Warning:\n");
+ vfprintf(stderr, format, args);
+ fprintf(stderr, "\n");
+ return -1;
+ }
+
+ gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id);
+
+ /* Only first line can be displayed */
+ p = strchr(msg, '\n');
+ if (p)
+ *p = '\0';
+
+ gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar),
+ pgctx->statbar_ctx_id, msg);
+
+ free(msg);
+ return 0;
+}
+#endif
+
+struct perf_error_ops perf_gtk_eops = {
+ .error = perf_gtk__error,
+#ifdef HAVE_GTK_INFO_BAR
+ .warning = perf_gtk__warning_info_bar,
+#else
+ .warning = perf_gtk__warning_statusbar,
+#endif
+};
+
+/*
+ * FIXME: Functions below should be implemented properly.
+ * For now, just add stubs for NO_NEWT=1 build.
+ */
+#ifdef NO_NEWT_SUPPORT
+int ui_helpline__show_help(const char *format __used, va_list ap __used)
+{
+ return 0;
+}
+
+void ui_progress__update(u64 curr __used, u64 total __used,
+ const char *title __used)
+{
+}
+#endif
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index d33e943ac434..e813c1d17346 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -15,6 +15,8 @@ pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER;
static volatile int ui__need_resize;
+extern struct perf_error_ops perf_tui_eops;
+
void ui__refresh_dimensions(bool force)
{
if (force || ui__need_resize) {
@@ -122,6 +124,8 @@ int ui__init(void)
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
+
+ perf_error__register(&perf_tui_eops);
out:
return err;
}
@@ -137,4 +141,6 @@ void ui__exit(bool wait_for_ok)
SLsmg_refresh();
SLsmg_reset_smg();
SLang_reset_tty();
+
+ perf_error__unregister(&perf_tui_eops);
}
diff --git a/tools/perf/ui/tui/util.c b/tools/perf/ui/tui/util.c
new file mode 100644
index 000000000000..092902e30cee
--- /dev/null
+++ b/tools/perf/ui/tui/util.c
@@ -0,0 +1,243 @@
+#include "../../util/util.h"
+#include <signal.h>
+#include <stdbool.h>
+#include <string.h>
+#include <sys/ttydefaults.h>
+
+#include "../../util/cache.h"
+#include "../../util/debug.h"
+#include "../browser.h"
+#include "../keysyms.h"
+#include "../helpline.h"
+#include "../ui.h"
+#include "../util.h"
+#include "../libslang.h"
+
+static void ui_browser__argv_write(struct ui_browser *browser,
+ void *entry, int row)
+{
+ char **arg = entry;
+ bool current_entry = ui_browser__is_current_entry(browser, row);
+
+ ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
+ HE_COLORSET_NORMAL);
+ slsmg_write_nstring(*arg, browser->width);
+}
+
+static int popup_menu__run(struct ui_browser *menu)
+{
+ int key;
+
+ if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
+ return -1;
+
+ while (1) {
+ key = ui_browser__run(menu, 0);
+
+ switch (key) {
+ case K_RIGHT:
+ case K_ENTER:
+ key = menu->index;
+ break;
+ case K_LEFT:
+ case K_ESC:
+ case 'q':
+ case CTRL('c'):
+ key = -1;
+ break;
+ default:
+ continue;
+ }
+
+ break;
+ }
+
+ ui_browser__hide(menu);
+ return key;
+}
+
+int ui__popup_menu(int argc, char * const argv[])
+{
+ struct ui_browser menu = {
+ .entries = (void *)argv,
+ .refresh = ui_browser__argv_refresh,
+ .seek = ui_browser__argv_seek,
+ .write = ui_browser__argv_write,
+ .nr_entries = argc,
+ };
+
+ return popup_menu__run(&menu);
+}
+
+int ui_browser__input_window(const char *title, const char *text, char *input,
+ const char *exit_msg, int delay_secs)
+{
+ int x, y, len, key;
+ int max_len = 60, nr_lines = 0;
+ static char buf[50];
+ const char *t;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ max_len += 2;
+ nr_lines += 8;
+ y = SLtt_Screen_Rows / 2 - nr_lines / 2;
+ x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, x++, nr_lines, max_len);
+ if (title) {
+ SLsmg_gotorc(y, x + 1);
+ SLsmg_write_string((char *)title);
+ }
+ SLsmg_gotorc(++y, x);
+ nr_lines -= 7;
+ max_len -= 2;
+ SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+ nr_lines, max_len, 1);
+ y += nr_lines;
+ len = 5;
+ while (len--) {
+ SLsmg_gotorc(y + len - 1, x);
+ SLsmg_write_nstring((char *)" ", max_len);
+ }
+ SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
+
+ SLsmg_gotorc(y + 3, x);
+ SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_refresh();
+
+ x += 2;
+ len = 0;
+ key = ui__getch(delay_secs);
+ while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
+ if (key == K_BKSPC) {
+ if (len == 0)
+ goto next_key;
+ SLsmg_gotorc(y, x + --len);
+ SLsmg_write_char(' ');
+ } else {
+ buf[len] = key;
+ SLsmg_gotorc(y, x + len++);
+ SLsmg_write_char(key);
+ }
+ SLsmg_refresh();
+
+ /* XXX more graceful overflow handling needed */
+ if (len == sizeof(buf) - 1) {
+ ui_helpline__push("maximum size of symbol name reached!");
+ key = K_ENTER;
+ break;
+ }
+next_key:
+ key = ui__getch(delay_secs);
+ }
+
+ buf[len] = '\0';
+ strncpy(input, buf, len+1);
+ return key;
+}
+
+int ui__question_window(const char *title, const char *text,
+ const char *exit_msg, int delay_secs)
+{
+ int x, y;
+ int max_len = 0, nr_lines = 0;
+ const char *t;
+
+ t = text;
+ while (1) {
+ const char *sep = strchr(t, '\n');
+ int len;
+
+ if (sep == NULL)
+ sep = strchr(t, '\0');
+ len = sep - t;
+ if (max_len < len)
+ max_len = len;
+ ++nr_lines;
+ if (*sep == '\0')
+ break;
+ t = sep + 1;
+ }
+
+ max_len += 2;
+ nr_lines += 4;
+ y = SLtt_Screen_Rows / 2 - nr_lines / 2,
+ x = SLtt_Screen_Cols / 2 - max_len / 2;
+
+ SLsmg_set_color(0);
+ SLsmg_draw_box(y, x++, nr_lines, max_len);
+ if (title) {
+ SLsmg_gotorc(y, x + 1);
+ SLsmg_write_string((char *)title);
+ }
+ SLsmg_gotorc(++y, x);
+ nr_lines -= 2;
+ max_len -= 2;
+ SLsmg_write_wrapped_string((unsigned char *)text, y, x,
+ nr_lines, max_len, 1);
+ SLsmg_gotorc(y + nr_lines - 2, x);
+ SLsmg_write_nstring((char *)" ", max_len);
+ SLsmg_gotorc(y + nr_lines - 1, x);
+ SLsmg_write_nstring((char *)exit_msg, max_len);
+ SLsmg_refresh();
+ return ui__getch(delay_secs);
+}
+
+int ui__help_window(const char *text)
+{
+ return ui__question_window("Help", text, "Press any key...", 0);
+}
+
+int ui__dialog_yesno(const char *msg)
+{
+ return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
+}
+
+static int __ui__warning(const char *title, const char *format, va_list args)
+{
+ char *s;
+
+ if (vasprintf(&s, format, args) > 0) {
+ int key;
+
+ pthread_mutex_lock(&ui__lock);
+ key = ui__question_window(title, s, "Press any key...", 0);
+ pthread_mutex_unlock(&ui__lock);
+ free(s);
+ return key;
+ }
+
+ fprintf(stderr, "%s\n", title);
+ vfprintf(stderr, format, args);
+ return K_ESC;
+}
+
+static int perf_tui__error(const char *format, va_list args)
+{
+ return __ui__warning("Error:", format, args);
+}
+
+static int perf_tui__warning(const char *format, va_list args)
+{
+ return __ui__warning("Warning:", format, args);
+}
+
+struct perf_error_ops perf_tui_eops = {
+ .error = perf_tui__error,
+ .warning = perf_tui__warning,
+};
diff --git a/tools/perf/ui/util.c b/tools/perf/ui/util.c
index ad4374a16bb0..4f989774c8c6 100644
--- a/tools/perf/ui/util.c
+++ b/tools/perf/ui/util.c
@@ -1,250 +1,85 @@
-#include "../util.h"
-#include <signal.h>
-#include <stdbool.h>
-#include <string.h>
-#include <sys/ttydefaults.h>
-
-#include "../cache.h"
-#include "../debug.h"
-#include "browser.h"
-#include "keysyms.h"
-#include "helpline.h"
-#include "ui.h"
#include "util.h"
-#include "libslang.h"
-
-static void ui_browser__argv_write(struct ui_browser *browser,
- void *entry, int row)
-{
- char **arg = entry;
- bool current_entry = ui_browser__is_current_entry(browser, row);
-
- ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED :
- HE_COLORSET_NORMAL);
- slsmg_write_nstring(*arg, browser->width);
-}
-
-static int popup_menu__run(struct ui_browser *menu)
-{
- int key;
-
- if (ui_browser__show(menu, " ", "ESC: exit, ENTER|->: Select option") < 0)
- return -1;
+#include "../debug.h"
- while (1) {
- key = ui_browser__run(menu, 0);
-
- switch (key) {
- case K_RIGHT:
- case K_ENTER:
- key = menu->index;
- break;
- case K_LEFT:
- case K_ESC:
- case 'q':
- case CTRL('c'):
- key = -1;
- break;
- default:
- continue;
- }
-
- break;
- }
-
- ui_browser__hide(menu);
- return key;
-}
-int ui__popup_menu(int argc, char * const argv[])
+/*
+ * Default error logging functions
+ */
+static int perf_stdio__error(const char *format, va_list args)
{
- struct ui_browser menu = {
- .entries = (void *)argv,
- .refresh = ui_browser__argv_refresh,
- .seek = ui_browser__argv_seek,
- .write = ui_browser__argv_write,
- .nr_entries = argc,
- };
-
- return popup_menu__run(&menu);
+ fprintf(stderr, "Error:\n");
+ vfprintf(stderr, format, args);
+ return 0;
}
-int ui_browser__input_window(const char *title, const char *text, char *input,
- const char *exit_msg, int delay_secs)
+static int perf_stdio__warning(const char *format, va_list args)
{
- int x, y, len, key;
- int max_len = 60, nr_lines = 0;
- static char buf[50];
- const char *t;
-
- t = text;
- while (1) {
- const char *sep = strchr(t, '\n');
-
- if (sep == NULL)
- sep = strchr(t, '\0');
- len = sep - t;
- if (max_len < len)
- max_len = len;
- ++nr_lines;
- if (*sep == '\0')
- break;
- t = sep + 1;
- }
-
- max_len += 2;
- nr_lines += 8;
- y = SLtt_Screen_Rows / 2 - nr_lines / 2;
- x = SLtt_Screen_Cols / 2 - max_len / 2;
-
- SLsmg_set_color(0);
- SLsmg_draw_box(y, x++, nr_lines, max_len);
- if (title) {
- SLsmg_gotorc(y, x + 1);
- SLsmg_write_string((char *)title);
- }
- SLsmg_gotorc(++y, x);
- nr_lines -= 7;
- max_len -= 2;
- SLsmg_write_wrapped_string((unsigned char *)text, y, x,
- nr_lines, max_len, 1);
- y += nr_lines;
- len = 5;
- while (len--) {
- SLsmg_gotorc(y + len - 1, x);
- SLsmg_write_nstring((char *)" ", max_len);
- }
- SLsmg_draw_box(y++, x + 1, 3, max_len - 2);
-
- SLsmg_gotorc(y + 3, x);
- SLsmg_write_nstring((char *)exit_msg, max_len);
- SLsmg_refresh();
-
- x += 2;
- len = 0;
- key = ui__getch(delay_secs);
- while (key != K_TIMER && key != K_ENTER && key != K_ESC) {
- if (key == K_BKSPC) {
- if (len == 0)
- goto next_key;
- SLsmg_gotorc(y, x + --len);
- SLsmg_write_char(' ');
- } else {
- buf[len] = key;
- SLsmg_gotorc(y, x + len++);
- SLsmg_write_char(key);
- }
- SLsmg_refresh();
-
- /* XXX more graceful overflow handling needed */
- if (len == sizeof(buf) - 1) {
- ui_helpline__push("maximum size of symbol name reached!");
- key = K_ENTER;
- break;
- }
-next_key:
- key = ui__getch(delay_secs);
- }
-
- buf[len] = '\0';
- strncpy(input, buf, len+1);
- return key;
+ fprintf(stderr, "Warning:\n");
+ vfprintf(stderr, format, args);
+ return 0;
}
-int ui__question_window(const char *title, const char *text,
- const char *exit_msg, int delay_secs)
+static struct perf_error_ops default_eops =
{
- int x, y;
- int max_len = 0, nr_lines = 0;
- const char *t;
-
- t = text;
- while (1) {
- const char *sep = strchr(t, '\n');
- int len;
-
- if (sep == NULL)
- sep = strchr(t, '\0');
- len = sep - t;
- if (max_len < len)
- max_len = len;
- ++nr_lines;
- if (*sep == '\0')
- break;
- t = sep + 1;
- }
-
- max_len += 2;
- nr_lines += 4;
- y = SLtt_Screen_Rows / 2 - nr_lines / 2,
- x = SLtt_Screen_Cols / 2 - max_len / 2;
-
- SLsmg_set_color(0);
- SLsmg_draw_box(y, x++, nr_lines, max_len);
- if (title) {
- SLsmg_gotorc(y, x + 1);
- SLsmg_write_string((char *)title);
- }
- SLsmg_gotorc(++y, x);
- nr_lines -= 2;
- max_len -= 2;
- SLsmg_write_wrapped_string((unsigned char *)text, y, x,
- nr_lines, max_len, 1);
- SLsmg_gotorc(y + nr_lines - 2, x);
- SLsmg_write_nstring((char *)" ", max_len);
- SLsmg_gotorc(y + nr_lines - 1, x);
- SLsmg_write_nstring((char *)exit_msg, max_len);
- SLsmg_refresh();
- return ui__getch(delay_secs);
-}
+ .error = perf_stdio__error,
+ .warning = perf_stdio__warning,
+};
-int ui__help_window(const char *text)
-{
- return ui__question_window("Help", text, "Press any key...", 0);
-}
+static struct perf_error_ops *perf_eops = &default_eops;
-int ui__dialog_yesno(const char *msg)
-{
- return ui__question_window(NULL, msg, "Enter: Yes, ESC: No", 0);
-}
-int __ui__warning(const char *title, const char *format, va_list args)
+int ui__error(const char *format, ...)
{
- char *s;
-
- if (use_browser > 0 && vasprintf(&s, format, args) > 0) {
- int key;
+ int ret;
+ va_list args;
- pthread_mutex_lock(&ui__lock);
- key = ui__question_window(title, s, "Press any key...", 0);
- pthread_mutex_unlock(&ui__lock);
- free(s);
- return key;
- }
+ va_start(args, format);
+ ret = perf_eops->error(format, args);
+ va_end(args);
- fprintf(stderr, "%s:\n", title);
- vfprintf(stderr, format, args);
- return K_ESC;
+ return ret;
}
int ui__warning(const char *format, ...)
{
- int key;
+ int ret;
va_list args;
va_start(args, format);
- key = __ui__warning("Warning", format, args);
+ ret = perf_eops->warning(format, args);
va_end(args);
- return key;
+
+ return ret;
}
-int ui__error(const char *format, ...)
+
+/**
+ * perf_error__register - Register error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Register UI-specific error logging functions. Before calling this,
+ * other logging functions should be unregistered, if any.
+ */
+int perf_error__register(struct perf_error_ops *eops)
{
- int key;
- va_list args;
+ if (perf_eops != &default_eops)
+ return -1;
- va_start(args, format);
- key = __ui__warning("Error", format, args);
- va_end(args);
- return key;
+ perf_eops = eops;
+ return 0;
+}
+
+/**
+ * perf_error__unregister - Unregister error logging functions
+ * @eops: The pointer to error logging function struct
+ *
+ * Unregister already registered error logging functions.
+ */
+int perf_error__unregister(struct perf_error_ops *eops)
+{
+ if (perf_eops != eops)
+ return -1;
+
+ perf_eops = &default_eops;
+ return 0;
}
diff --git a/tools/perf/ui/util.h b/tools/perf/ui/util.h
index 2d1738bd71c8..361f08c52d37 100644
--- a/tools/perf/ui/util.h
+++ b/tools/perf/ui/util.h
@@ -9,6 +9,13 @@ int ui__help_window(const char *text);
int ui__dialog_yesno(const char *msg);
int ui__question_window(const char *title, const char *text,
const char *exit_msg, int delay_secs);
-int __ui__warning(const char *title, const char *format, va_list args);
+
+struct perf_error_ops {
+ int (*error)(const char *format, va_list args);
+ int (*warning)(const char *format, va_list args);
+};
+
+int perf_error__register(struct perf_error_ops *eops);
+int perf_error__unregister(struct perf_error_ops *eops);
#endif /* _PERF_UI_UTIL_H_ */
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index efb1fce259a4..4dfe0bb3c322 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -47,7 +47,7 @@ int dump_printf(const char *fmt, ...)
return ret;
}
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
int ui__warning(const char *format, ...)
{
va_list args;
diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h
index 6bebe7f0a20c..015c91dbc096 100644
--- a/tools/perf/util/debug.h
+++ b/tools/perf/util/debug.h
@@ -12,8 +12,9 @@ int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
void trace_event(union perf_event *event);
struct ui_progress;
+struct perf_error_ops;
-#ifdef NO_NEWT_SUPPORT
+#if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT)
static inline int ui_helpline__show_help(const char *format __used, va_list ap __used)
{
return 0;
@@ -23,12 +24,28 @@ static inline void ui_progress__update(u64 curr __used, u64 total __used,
const char *title __used) {}
#define ui__error(format, arg...) ui__warning(format, ##arg)
-#else
+
+static inline int
+perf_error__register(struct perf_error_ops *eops __used)
+{
+ return 0;
+}
+
+static inline int
+perf_error__unregister(struct perf_error_ops *eops __used)
+{
+ return 0;
+}
+
+#else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
+
extern char ui_helpline__last_msg[];
int ui_helpline__show_help(const char *format, va_list ap);
#include "../ui/progress.h"
int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2)));
-#endif
+#include "../ui/util.h"
+
+#endif /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */
int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
int ui__error_paranoid(void);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 7400fb3fc50c..f74e9560350e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -224,8 +224,8 @@ out_free_attrs:
return err;
}
-static struct perf_evsel *
- perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
{
struct perf_evsel *evsel;
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 989bee9624c2..40d4d3cdced0 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -73,6 +73,9 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
#define perf_evlist__set_tracepoints_handlers_array(evlist, array) \
perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array))
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 9f6cebd798ee..e81771364867 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,7 +15,7 @@
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
-#include "../../include/linux/perf_event.h"
+#include "../../../include/linux/hw_breakpoint.h"
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0))
@@ -78,7 +78,7 @@ static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
"ref-cycles",
};
-const char *__perf_evsel__hw_name(u64 config)
+static const char *__perf_evsel__hw_name(u64 config)
{
if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
return perf_evsel__hw_names[config];
@@ -86,16 +86,15 @@ const char *__perf_evsel__hw_name(u64 config)
return "unknown-hardware";
}
-static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
{
- int colon = 0;
+ int colon = 0, r = 0;
struct perf_event_attr *attr = &evsel->attr;
- int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(attr->config));
bool exclude_guest_default = false;
#define MOD_PRINT(context, mod) do { \
if (!attr->exclude_##context) { \
- if (!colon) colon = r++; \
+ if (!colon) colon = ++r; \
r += scnprintf(bf + r, size - r, "%c", mod); \
} } while(0)
@@ -108,7 +107,7 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
if (attr->precise_ip) {
if (!colon)
- colon = r++;
+ colon = ++r;
r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
exclude_guest_default = true;
}
@@ -119,39 +118,211 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
}
#undef MOD_PRINT
if (colon)
- bf[colon] = ':';
+ bf[colon - 1] = ':';
return r;
}
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size)
+static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
+ "cpu-clock",
+ "task-clock",
+ "page-faults",
+ "context-switches",
+ "CPU-migrations",
+ "minor-faults",
+ "major-faults",
+ "alignment-faults",
+ "emulation-faults",
+};
+
+static const char *__perf_evsel__sw_name(u64 config)
{
- int ret;
+ if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
+ return perf_evsel__sw_names[config];
+ return "unknown-software";
+}
+
+static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
+{
+ int r;
+
+ r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
+
+ if (type & HW_BREAKPOINT_R)
+ r += scnprintf(bf + r, size - r, "r");
+
+ if (type & HW_BREAKPOINT_W)
+ r += scnprintf(bf + r, size - r, "w");
+
+ if (type & HW_BREAKPOINT_X)
+ r += scnprintf(bf + r, size - r, "x");
+
+ return r;
+}
+
+static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ struct perf_event_attr *attr = &evsel->attr;
+ int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
+ return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+}
+
+const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "L1-dcache", "l1-d", "l1d", "L1-data", },
+ { "L1-icache", "l1-i", "l1i", "L1-instruction", },
+ { "LLC", "L2", },
+ { "dTLB", "d-tlb", "Data-TLB", },
+ { "iTLB", "i-tlb", "Instruction-TLB", },
+ { "branch", "branches", "bpu", "btb", "bpc", },
+ { "node", },
+};
+
+const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "load", "loads", "read", },
+ { "store", "stores", "write", },
+ { "prefetch", "prefetches", "speculative-read", "speculative-load", },
+};
+
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES] = {
+ { "refs", "Reference", "ops", "access", },
+ { "misses", "miss", },
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define CACHE_READ (1 << C(OP_READ))
+#define CACHE_WRITE (1 << C(OP_WRITE))
+#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
+#define COP(x) (1 << x)
+
+/*
+ * cache operartion stat
+ * L1I : Read and prefetch only
+ * ITLB and BPU : Read-only
+ */
+static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
+ [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
+ [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+ [C(ITLB)] = (CACHE_READ),
+ [C(BPU)] = (CACHE_READ),
+ [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
+};
+
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
+{
+ if (perf_evsel__hw_cache_stat[type] & COP(op))
+ return true; /* valid */
+ else
+ return false; /* invalid */
+}
+
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size)
+{
+ if (result) {
+ return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][0],
+ perf_evsel__hw_cache_result[result][0]);
+ }
+
+ return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
+ perf_evsel__hw_cache_op[op][1]);
+}
+
+static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
+{
+ u8 op, result, type = (config >> 0) & 0xff;
+ const char *err = "unknown-ext-hardware-cache-type";
+
+ if (type > PERF_COUNT_HW_CACHE_MAX)
+ goto out_err;
+
+ op = (config >> 8) & 0xff;
+ err = "unknown-ext-hardware-cache-op";
+ if (op > PERF_COUNT_HW_CACHE_OP_MAX)
+ goto out_err;
+
+ result = (config >> 16) & 0xff;
+ err = "unknown-ext-hardware-cache-result";
+ if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
+ goto out_err;
+
+ err = "invalid-cache";
+ if (!perf_evsel__is_cache_op_valid(type, op))
+ goto out_err;
+
+ return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
+out_err:
+ return scnprintf(bf, size, "%s", err);
+}
+
+static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
+{
+ int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+}
+
+const char *perf_evsel__name(struct perf_evsel *evsel)
+{
+ char bf[128];
+
+ if (evsel->name)
+ return evsel->name;
switch (evsel->attr.type) {
case PERF_TYPE_RAW:
- ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
+ perf_evsel__raw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_HARDWARE:
- ret = perf_evsel__hw_name(evsel, bf, size);
+ perf_evsel__hw_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_HW_CACHE:
+ perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
+ break;
+
+ case PERF_TYPE_SOFTWARE:
+ perf_evsel__sw_name(evsel, bf, sizeof(bf));
break;
+
+ case PERF_TYPE_TRACEPOINT:
+ scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
+ break;
+
+ case PERF_TYPE_BREAKPOINT:
+ perf_evsel__bp_name(evsel, bf, sizeof(bf));
+ break;
+
default:
- /*
- * FIXME
- *
- * This is the minimal perf_evsel__name so that we can
- * reconstruct event names taking into account event modifiers.
- *
- * The old event_name uses it now for raw anr hw events, so that
- * we don't drag all the parsing stuff into the python binding.
- *
- * On the next devel cycle the rest of the event naming will be
- * brought here.
- */
- return 0;
- }
-
- return ret;
+ scnprintf(bf, sizeof(bf), "%s", "unknown attr type");
+ break;
+ }
+
+ evsel->name = strdup(bf);
+
+ return evsel->name ?: "unknown";
}
void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 4ba8b564e6f4..67cc5033d192 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -83,8 +83,19 @@ void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts,
struct perf_evsel *first);
-const char* __perf_evsel__hw_name(u64 config);
-int perf_evsel__name(struct perf_evsel *evsel, char *bf, size_t size);
+bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
+
+#define PERF_EVSEL__MAX_ALIASES 8
+
+extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
+ [PERF_EVSEL__MAX_ALIASES];
+int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
+ char *bf, size_t size);
+const char *perf_evsel__name(struct perf_evsel *evsel);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index e909d43cf542..5a47aba46759 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -641,7 +641,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, event_name(attr));
+ ret = do_write_string(fd, perf_evsel__name(attr));
if (ret < 0)
return ret;
/*
@@ -1474,15 +1474,15 @@ out:
static int process_tracing_data(struct perf_file_section *section __unused,
struct perf_header *ph __unused,
- int feat __unused, int fd)
+ int feat __unused, int fd, void *data)
{
- trace_report(fd, false);
+ trace_report(fd, data, false);
return 0;
}
static int process_build_id(struct perf_file_section *section,
struct perf_header *ph,
- int feat __unused, int fd)
+ int feat __unused, int fd, void *data __used)
{
if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
pr_debug("Failed to read buildids, continuing...\n");
@@ -1493,7 +1493,7 @@ struct feature_ops {
int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
void (*print)(struct perf_header *h, int fd, FILE *fp);
int (*process)(struct perf_file_section *section,
- struct perf_header *h, int feat, int fd);
+ struct perf_header *h, int feat, int fd, void *data);
const char *name;
bool full_only;
};
@@ -1988,7 +1988,7 @@ int perf_file_header__read(struct perf_file_header *header,
static int perf_file_section__process(struct perf_file_section *section,
struct perf_header *ph,
- int feat, int fd, void *data __used)
+ int feat, int fd, void *data)
{
if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
@@ -2004,7 +2004,7 @@ static int perf_file_section__process(struct perf_file_section *section,
if (!feat_ops[feat].process)
return 0;
- return feat_ops[feat].process(section, ph, feat, fd);
+ return feat_ops[feat].process(section, ph, feat, fd, data);
}
static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
@@ -2093,9 +2093,11 @@ static int read_attr(int fd, struct perf_header *ph,
return ret <= 0 ? -1 : 0;
}
-static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
+static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel,
+ struct pevent *pevent)
{
- struct event_format *event = trace_find_event(evsel->attr.config);
+ struct event_format *event = pevent_find_event(pevent,
+ evsel->attr.config);
char bf[128];
if (event == NULL)
@@ -2109,13 +2111,14 @@ static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
return 0;
}
-static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
+static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist,
+ struct pevent *pevent)
{
struct perf_evsel *pos;
list_for_each_entry(pos, &evlist->entries, node) {
if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
- perf_evsel__set_tracepoint_name(pos))
+ perf_evsel__set_tracepoint_name(pos, pevent))
return -1;
}
@@ -2198,12 +2201,12 @@ int perf_session__read_header(struct perf_session *session, int fd)
event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
}
- perf_header__process_sections(header, fd, NULL,
+ perf_header__process_sections(header, fd, &session->pevent,
perf_file_section__process);
lseek(fd, header->data_offset, SEEK_SET);
- if (perf_evlist__set_tracepoint_names(session->evlist))
+ if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent))
goto out_delete_evlist;
header->frozen = 1;
@@ -2419,8 +2422,8 @@ int perf_event__process_tracing_data(union perf_event *event,
lseek(session->fd, offset + sizeof(struct tracing_data_event),
SEEK_SET);
- size_read = trace_report(session->fd, session->repipe);
-
+ size_read = trace_report(session->fd, &session->pevent,
+ session->repipe);
padding = ALIGN(size_read, sizeof(u64)) - size_read;
if (read(session->fd, buf, padding) < 0)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 34bb556d6219..0b096c27a419 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -47,6 +47,7 @@ enum hist_column {
HISTC_SYMBOL_TO,
HISTC_DSO_FROM,
HISTC_DSO_TO,
+ HISTC_SRCLINE,
HISTC_NR_COLS, /* Last entry */
};
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
index 1eb804fd3fbf..b6842c1d02a8 100644
--- a/tools/perf/util/include/linux/kernel.h
+++ b/tools/perf/util/include/linux/kernel.h
@@ -108,4 +108,14 @@ int eprintf(int level,
#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
#endif
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 35ae56864e4f..a1f4e3669142 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -669,25 +669,26 @@ struct machine *machines__find(struct rb_root *self, pid_t pid)
struct machine *machines__findnew(struct rb_root *self, pid_t pid)
{
char path[PATH_MAX];
- const char *root_dir;
+ const char *root_dir = "";
struct machine *machine = machines__find(self, pid);
- if (!machine || machine->pid != pid) {
- if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID)
- root_dir = "";
- else {
- if (!symbol_conf.guestmount)
- goto out;
- sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
- if (access(path, R_OK)) {
- pr_err("Can't access file %s\n", path);
- goto out;
- }
- root_dir = path;
+ if (machine && (machine->pid == pid))
+ goto out;
+
+ if ((pid != HOST_KERNEL_ID) &&
+ (pid != DEFAULT_GUEST_KERNEL_ID) &&
+ (symbol_conf.guestmount)) {
+ sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
+ if (access(path, R_OK)) {
+ pr_err("Can't access file %s\n", path);
+ machine = NULL;
+ goto out;
}
- machine = machines__add(self, pid, root_dir);
+ root_dir = path;
}
+ machine = machines__add(self, pid, root_dir);
+
out:
return machine;
}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 81371bad4ef0..c14c665d9a25 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -157,7 +157,7 @@ void machine__exit(struct machine *self);
void machine__delete(struct machine *self);
int machine__resolve_callchain(struct machine *machine,
- struct perf_evsel *evsel, struct thread *thread,
+ struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent);
int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
diff --git a/tools/perf/util/parse-events-test.c b/tools/perf/util/parse-events-test.c
index 76b98e2a587d..1b997d2b89ce 100644
--- a/tools/perf/util/parse-events-test.c
+++ b/tools/perf/util/parse-events-test.c
@@ -181,6 +181,22 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist)
return 0;
}
+static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries);
+ TEST_ASSERT_VAL("wrong type",
+ PERF_TYPE_BREAKPOINT == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config", 0 == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong bp_type",
+ (HW_BREAKPOINT_R|HW_BREAKPOINT_W) == evsel->attr.bp_type);
+ TEST_ASSERT_VAL("wrong bp_len",
+ HW_BREAKPOINT_LEN_4 == evsel->attr.bp_len);
+ return 0;
+}
+
static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = list_entry(evlist->entries.next,
@@ -309,6 +325,8 @@ static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u"));
return test__checkevent_breakpoint(evlist);
}
@@ -322,6 +340,8 @@ static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k"));
return test__checkevent_breakpoint_x(evlist);
}
@@ -335,6 +355,8 @@ static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp"));
return test__checkevent_breakpoint_r(evlist);
}
@@ -348,10 +370,27 @@ static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up"));
return test__checkevent_breakpoint_w(evlist);
}
+static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp"));
+
+ return test__checkevent_breakpoint_rw(evlist);
+}
+
static int test__checkevent_pmu(struct perf_evlist *evlist)
{
@@ -413,19 +452,63 @@ static int test__checkevent_pmu_name(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
- /* cpu/config=1,name=krava1/u */
+ /* cpu/config=1,name=krava/u */
evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config);
- TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "krava"));
+ TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava"));
- /* cpu/config=2/" */
+ /* cpu/config=2/u" */
evsel = list_entry(evsel->node.next, struct perf_evsel, node);
TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type);
TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config);
- TEST_ASSERT_VAL("wrong name", !strcmp(evsel->name, "raw 0x2"));
+ TEST_ASSERT_VAL("wrong name",
+ !strcmp(perf_evsel__name(evsel), "raw 0x2:u"));
+
+ return 0;
+}
+
+static int test__checkterms_simple(struct list_head *terms)
+{
+ struct parse_events__term *term;
+
+ /* config=10 */
+ term = list_entry(terms->next, struct parse_events__term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 10);
+ TEST_ASSERT_VAL("wrong config", !term->config);
+
+ /* config1 */
+ term = list_entry(term->list.next, struct parse_events__term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG1);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+ TEST_ASSERT_VAL("wrong config", !term->config);
+
+ /* config2=3 */
+ term = list_entry(term->list.next, struct parse_events__term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_CONFIG2);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 3);
+ TEST_ASSERT_VAL("wrong config", !term->config);
+
+ /* umask=1*/
+ term = list_entry(term->list.next, struct parse_events__term, list);
+ TEST_ASSERT_VAL("wrong type term",
+ term->type_term == PARSE_EVENTS__TERM_TYPE_USER);
+ TEST_ASSERT_VAL("wrong type val",
+ term->type_val == PARSE_EVENTS__TERM_TYPE_NUM);
+ TEST_ASSERT_VAL("wrong val", term->val.num == 1);
+ TEST_ASSERT_VAL("wrong config", !strcmp(term->config, "umask"));
return 0;
}
@@ -541,10 +624,16 @@ static struct test__event_st test__events[] = {
.name = "instructions:H",
.check = test__checkevent_exclude_guest_modifier,
},
+ [26] = {
+ .name = "mem:0:rw",
+ .check = test__checkevent_breakpoint_rw,
+ },
+ [27] = {
+ .name = "mem:0:rw:kp",
+ .check = test__checkevent_breakpoint_rw_modifier,
+ },
};
-#define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st))
-
static struct test__event_st test__events_pmu[] = {
[0] = {
.name = "cpu/config=10,config1,config2=3,period=1000/u",
@@ -556,10 +645,23 @@ static struct test__event_st test__events_pmu[] = {
},
};
-#define TEST__EVENTS_PMU_CNT (sizeof(test__events_pmu) / \
- sizeof(struct test__event_st))
+struct test__term {
+ const char *str;
+ __u32 type;
+ int (*check)(struct list_head *terms);
+};
+
+static struct test__term test__terms[] = {
+ [0] = {
+ .str = "config=10,config1,config2=3,umask=1",
+ .check = test__checkterms_simple,
+ },
+};
+
+#define TEST__TERMS_CNT (sizeof(test__terms) / \
+ sizeof(struct test__term))
-static int test(struct test__event_st *e)
+static int test_event(struct test__event_st *e)
{
struct perf_evlist *evlist;
int ret;
@@ -590,7 +692,48 @@ static int test_events(struct test__event_st *events, unsigned cnt)
struct test__event_st *e = &events[i];
pr_debug("running test %d '%s'\n", i, e->name);
- ret = test(e);
+ ret = test_event(e);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int test_term(struct test__term *t)
+{
+ struct list_head *terms;
+ int ret;
+
+ terms = malloc(sizeof(*terms));
+ if (!terms)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(terms);
+
+ ret = parse_events_terms(terms, t->str);
+ if (ret) {
+ pr_debug("failed to parse terms '%s', err %d\n",
+ t->str , ret);
+ return ret;
+ }
+
+ ret = t->check(terms);
+ parse_events__free_terms(terms);
+
+ return ret;
+}
+
+static int test_terms(struct test__term *terms, unsigned cnt)
+{
+ int ret = 0;
+ unsigned i;
+
+ for (i = 0; i < cnt; i++) {
+ struct test__term *t = &terms[i];
+
+ pr_debug("running test %d '%s'\n", i, t->str);
+ ret = test_term(t);
if (ret)
break;
}
@@ -617,9 +760,17 @@ int parse_events__test(void)
{
int ret;
- ret = test_events(test__events, TEST__EVENTS_CNT);
- if (!ret && test_pmu())
- ret = test_events(test__events_pmu, TEST__EVENTS_PMU_CNT);
+#define TEST_EVENTS(tests) \
+do { \
+ ret = test_events(tests, ARRAY_SIZE(tests)); \
+ if (ret) \
+ return ret; \
+} while (0)
- return ret;
+ TEST_EVENTS(test__events);
+
+ if (test_pmu())
+ TEST_EVENTS(test__events_pmu);
+
+ return test_terms(test__terms, ARRAY_SIZE(test__terms));
}
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 05dbc8b3c767..1aa721d7c10f 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -11,14 +11,14 @@
#include "cache.h"
#include "header.h"
#include "debugfs.h"
+#include "parse-events-bison.h"
+#define YY_EXTRA_TYPE int
#include "parse-events-flex.h"
#include "pmu.h"
#define MAX_NAME_LEN 100
struct event_symbol {
- u8 type;
- u64 config;
const char *symbol;
const char *alias;
};
@@ -26,32 +26,88 @@ struct event_symbol {
#ifdef PARSER_DEBUG
extern int parse_events_debug;
#endif
-int parse_events_parse(struct list_head *list, int *idx);
-
-#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
-#define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
-
-static struct event_symbol event_symbols[] = {
- { CHW(CPU_CYCLES), "cpu-cycles", "cycles" },
- { CHW(STALLED_CYCLES_FRONTEND), "stalled-cycles-frontend", "idle-cycles-frontend" },
- { CHW(STALLED_CYCLES_BACKEND), "stalled-cycles-backend", "idle-cycles-backend" },
- { CHW(INSTRUCTIONS), "instructions", "" },
- { CHW(CACHE_REFERENCES), "cache-references", "" },
- { CHW(CACHE_MISSES), "cache-misses", "" },
- { CHW(BRANCH_INSTRUCTIONS), "branch-instructions", "branches" },
- { CHW(BRANCH_MISSES), "branch-misses", "" },
- { CHW(BUS_CYCLES), "bus-cycles", "" },
- { CHW(REF_CPU_CYCLES), "ref-cycles", "" },
-
- { CSW(CPU_CLOCK), "cpu-clock", "" },
- { CSW(TASK_CLOCK), "task-clock", "" },
- { CSW(PAGE_FAULTS), "page-faults", "faults" },
- { CSW(PAGE_FAULTS_MIN), "minor-faults", "" },
- { CSW(PAGE_FAULTS_MAJ), "major-faults", "" },
- { CSW(CONTEXT_SWITCHES), "context-switches", "cs" },
- { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
- { CSW(ALIGNMENT_FAULTS), "alignment-faults", "" },
- { CSW(EMULATION_FAULTS), "emulation-faults", "" },
+int parse_events_parse(void *data, void *scanner);
+
+static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = {
+ .symbol = "cpu-cycles",
+ .alias = "cycles",
+ },
+ [PERF_COUNT_HW_INSTRUCTIONS] = {
+ .symbol = "instructions",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = {
+ .symbol = "cache-references",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_CACHE_MISSES] = {
+ .symbol = "cache-misses",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
+ .symbol = "branch-instructions",
+ .alias = "branches",
+ },
+ [PERF_COUNT_HW_BRANCH_MISSES] = {
+ .symbol = "branch-misses",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_BUS_CYCLES] = {
+ .symbol = "bus-cycles",
+ .alias = "",
+ },
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
+ .symbol = "stalled-cycles-frontend",
+ .alias = "idle-cycles-frontend",
+ },
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
+ .symbol = "stalled-cycles-backend",
+ .alias = "idle-cycles-backend",
+ },
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = {
+ .symbol = "ref-cycles",
+ .alias = "",
+ },
+};
+
+static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
+ [PERF_COUNT_SW_CPU_CLOCK] = {
+ .symbol = "cpu-clock",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_TASK_CLOCK] = {
+ .symbol = "task-clock",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS] = {
+ .symbol = "page-faults",
+ .alias = "faults",
+ },
+ [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
+ .symbol = "context-switches",
+ .alias = "cs",
+ },
+ [PERF_COUNT_SW_CPU_MIGRATIONS] = {
+ .symbol = "cpu-migrations",
+ .alias = "migrations",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
+ .symbol = "minor-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
+ .symbol = "major-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
+ .symbol = "alignment-faults",
+ .alias = "",
+ },
+ [PERF_COUNT_SW_EMULATION_FAULTS] = {
+ .symbol = "emulation-faults",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -62,63 +118,6 @@ static struct event_symbol event_symbols[] = {
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-static const char *sw_event_names[PERF_COUNT_SW_MAX] = {
- "cpu-clock",
- "task-clock",
- "page-faults",
- "context-switches",
- "CPU-migrations",
- "minor-faults",
- "major-faults",
- "alignment-faults",
- "emulation-faults",
-};
-
-#define MAX_ALIASES 8
-
-static const char *hw_cache[PERF_COUNT_HW_CACHE_MAX][MAX_ALIASES] = {
- { "L1-dcache", "l1-d", "l1d", "L1-data", },
- { "L1-icache", "l1-i", "l1i", "L1-instruction", },
- { "LLC", "L2", },
- { "dTLB", "d-tlb", "Data-TLB", },
- { "iTLB", "i-tlb", "Instruction-TLB", },
- { "branch", "branches", "bpu", "btb", "bpc", },
- { "node", },
-};
-
-static const char *hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][MAX_ALIASES] = {
- { "load", "loads", "read", },
- { "store", "stores", "write", },
- { "prefetch", "prefetches", "speculative-read", "speculative-load", },
-};
-
-static const char *hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
- [MAX_ALIASES] = {
- { "refs", "Reference", "ops", "access", },
- { "misses", "miss", },
-};
-
-#define C(x) PERF_COUNT_HW_CACHE_##x
-#define CACHE_READ (1 << C(OP_READ))
-#define CACHE_WRITE (1 << C(OP_WRITE))
-#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
-#define COP(x) (1 << x)
-
-/*
- * cache operartion stat
- * L1I : Read and prefetch only
- * ITLB and BPU : Read-only
- */
-static unsigned long hw_cache_stat[C(MAX)] = {
- [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
- [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
- [C(ITLB)] = (CACHE_READ),
- [C(BPU)] = (CACHE_READ),
- [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
-};
-
#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
if (sys_dirent.d_type == DT_DIR && \
@@ -218,48 +217,6 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
return NULL;
}
-#define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
-static const char *tracepoint_id_to_name(u64 config)
-{
- static char buf[TP_PATH_LEN];
- struct tracepoint_path *path;
-
- path = tracepoint_id_to_path(config);
- if (path) {
- snprintf(buf, TP_PATH_LEN, "%s:%s", path->system, path->name);
- free(path->name);
- free(path->system);
- free(path);
- } else
- snprintf(buf, TP_PATH_LEN, "%s:%s", "unknown", "unknown");
-
- return buf;
-}
-
-static int is_cache_op_valid(u8 cache_type, u8 cache_op)
-{
- if (hw_cache_stat[cache_type] & COP(cache_op))
- return 1; /* valid */
- else
- return 0; /* invalid */
-}
-
-static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
-{
- static char name[50];
-
- if (cache_result) {
- sprintf(name, "%s-%s-%s", hw_cache[cache_type][0],
- hw_cache_op[cache_op][0],
- hw_cache_result[cache_result][0]);
- } else {
- sprintf(name, "%s-%s", hw_cache[cache_type][0],
- hw_cache_op[cache_op][1]);
- }
-
- return name;
-}
-
const char *event_type(int type)
{
switch (type) {
@@ -282,76 +239,6 @@ const char *event_type(int type)
return "unknown";
}
-const char *event_name(struct perf_evsel *evsel)
-{
- u64 config = evsel->attr.config;
- int type = evsel->attr.type;
-
- if (type == PERF_TYPE_RAW || type == PERF_TYPE_HARDWARE) {
- /*
- * XXX minimal fix, see comment on perf_evsen__name, this static buffer
- * will go away together with event_name in the next devel cycle.
- */
- static char bf[128];
- perf_evsel__name(evsel, bf, sizeof(bf));
- return bf;
- }
-
- if (evsel->name)
- return evsel->name;
-
- return __event_name(type, config);
-}
-
-const char *__event_name(int type, u64 config)
-{
- static char buf[32];
-
- if (type == PERF_TYPE_RAW) {
- sprintf(buf, "raw 0x%" PRIx64, config);
- return buf;
- }
-
- switch (type) {
- case PERF_TYPE_HARDWARE:
- return __perf_evsel__hw_name(config);
-
- case PERF_TYPE_HW_CACHE: {
- u8 cache_type, cache_op, cache_result;
-
- cache_type = (config >> 0) & 0xff;
- if (cache_type > PERF_COUNT_HW_CACHE_MAX)
- return "unknown-ext-hardware-cache-type";
-
- cache_op = (config >> 8) & 0xff;
- if (cache_op > PERF_COUNT_HW_CACHE_OP_MAX)
- return "unknown-ext-hardware-cache-op";
-
- cache_result = (config >> 16) & 0xff;
- if (cache_result > PERF_COUNT_HW_CACHE_RESULT_MAX)
- return "unknown-ext-hardware-cache-result";
-
- if (!is_cache_op_valid(cache_type, cache_op))
- return "invalid-cache";
-
- return event_cache_name(cache_type, cache_op, cache_result);
- }
-
- case PERF_TYPE_SOFTWARE:
- if (config < PERF_COUNT_SW_MAX && sw_event_names[config])
- return sw_event_names[config];
- return "unknown-software";
-
- case PERF_TYPE_TRACEPOINT:
- return tracepoint_id_to_name(config);
-
- default:
- break;
- }
-
- return "unknown";
-}
-
static int add_event(struct list_head **_list, int *idx,
struct perf_event_attr *attr, char *name)
{
@@ -373,19 +260,20 @@ static int add_event(struct list_head **_list, int *idx,
return -ENOMEM;
}
- evsel->name = strdup(name);
+ if (name)
+ evsel->name = strdup(name);
list_add_tail(&evsel->node, list);
*_list = list;
return 0;
}
-static int parse_aliases(char *str, const char *names[][MAX_ALIASES], int size)
+static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
{
int i, j;
int n, longest = -1;
for (i = 0; i < size; i++) {
- for (j = 0; j < MAX_ALIASES && names[i][j]; j++) {
+ for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
n = strlen(names[i][j]);
if (n > longest && !strncasecmp(str, names[i][j], n))
longest = n;
@@ -410,7 +298,7 @@ int parse_events_add_cache(struct list_head **list, int *idx,
* No fallback - if we cannot get a clear cache type
* then bail out:
*/
- cache_type = parse_aliases(type, hw_cache,
+ cache_type = parse_aliases(type, perf_evsel__hw_cache,
PERF_COUNT_HW_CACHE_MAX);
if (cache_type == -1)
return -EINVAL;
@@ -423,18 +311,18 @@ int parse_events_add_cache(struct list_head **list, int *idx,
snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str);
if (cache_op == -1) {
- cache_op = parse_aliases(str, hw_cache_op,
+ cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
PERF_COUNT_HW_CACHE_OP_MAX);
if (cache_op >= 0) {
- if (!is_cache_op_valid(cache_type, cache_op))
+ if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
return -EINVAL;
continue;
}
}
if (cache_result == -1) {
- cache_result = parse_aliases(str, hw_cache_result,
- PERF_COUNT_HW_CACHE_RESULT_MAX);
+ cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
+ PERF_COUNT_HW_CACHE_RESULT_MAX);
if (cache_result >= 0)
continue;
}
@@ -549,21 +437,31 @@ parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
if (!type || !type[i])
break;
+#define CHECK_SET_TYPE(bit) \
+do { \
+ if (attr->bp_type & bit) \
+ return -EINVAL; \
+ else \
+ attr->bp_type |= bit; \
+} while (0)
+
switch (type[i]) {
case 'r':
- attr->bp_type |= HW_BREAKPOINT_R;
+ CHECK_SET_TYPE(HW_BREAKPOINT_R);
break;
case 'w':
- attr->bp_type |= HW_BREAKPOINT_W;
+ CHECK_SET_TYPE(HW_BREAKPOINT_W);
break;
case 'x':
- attr->bp_type |= HW_BREAKPOINT_X;
+ CHECK_SET_TYPE(HW_BREAKPOINT_X);
break;
default:
return -EINVAL;
}
}
+#undef CHECK_SET_TYPE
+
if (!attr->bp_type) /* Default */
attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
@@ -574,7 +472,6 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
void *ptr, char *type)
{
struct perf_event_attr attr;
- char name[MAX_NAME_LEN];
memset(&attr, 0, sizeof(attr));
attr.bp_addr = (unsigned long) ptr;
@@ -593,8 +490,7 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx,
attr.type = PERF_TYPE_BREAKPOINT;
- snprintf(name, MAX_NAME_LEN, "mem:%p:%s", ptr, type ? type : "rw");
- return add_event(list, idx, &attr, name);
+ return add_event(list, idx, &attr, NULL);
}
static int config_term(struct perf_event_attr *attr,
@@ -666,8 +562,7 @@ int parse_events_add_numeric(struct list_head **list, int *idx,
config_attr(&attr, head_config, 1))
return -EINVAL;
- return add_event(list, idx, &attr,
- (char *) __event_name(type, config));
+ return add_event(list, idx, &attr, NULL);
}
static int parse_events__is_name_term(struct parse_events__term *term)
@@ -675,8 +570,7 @@ static int parse_events__is_name_term(struct parse_events__term *term)
return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
}
-static char *pmu_event_name(struct perf_event_attr *attr,
- struct list_head *head_terms)
+static char *pmu_event_name(struct list_head *head_terms)
{
struct parse_events__term *term;
@@ -684,7 +578,7 @@ static char *pmu_event_name(struct perf_event_attr *attr,
if (parse_events__is_name_term(term))
return term->val.str;
- return (char *) __event_name(PERF_TYPE_RAW, attr->config);
+ return NULL;
}
int parse_events_add_pmu(struct list_head **list, int *idx,
@@ -699,6 +593,9 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
memset(&attr, 0, sizeof(attr));
+ if (perf_pmu__check_alias(pmu, head_config))
+ return -EINVAL;
+
/*
* Configure hardcoded terms first, no need to check
* return value when called with fail == 0 ;)
@@ -709,7 +606,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
return -EINVAL;
return add_event(list, idx, &attr,
- pmu_event_name(&attr, head_config));
+ pmu_event_name(head_config));
}
void parse_events_update_lists(struct list_head *list_event,
@@ -787,27 +684,62 @@ int parse_events_modifier(struct list_head *list, char *str)
return 0;
}
-int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+static int parse_events__scanner(const char *str, void *data, int start_token)
{
- LIST_HEAD(list);
- LIST_HEAD(list_tmp);
YY_BUFFER_STATE buffer;
- int ret, idx = evlist->nr_entries;
+ void *scanner;
+ int ret;
+
+ ret = parse_events_lex_init_extra(start_token, &scanner);
+ if (ret)
+ return ret;
- buffer = parse_events__scan_string(str);
+ buffer = parse_events__scan_string(str, scanner);
#ifdef PARSER_DEBUG
parse_events_debug = 1;
#endif
- ret = parse_events_parse(&list, &idx);
+ ret = parse_events_parse(data, scanner);
- parse_events__flush_buffer(buffer);
- parse_events__delete_buffer(buffer);
- parse_events_lex_destroy();
+ parse_events__flush_buffer(buffer, scanner);
+ parse_events__delete_buffer(buffer, scanner);
+ parse_events_lex_destroy(scanner);
+ return ret;
+}
+
+/*
+ * parse event config string, return a list of event terms.
+ */
+int parse_events_terms(struct list_head *terms, const char *str)
+{
+ struct parse_events_data__terms data = {
+ .terms = NULL,
+ };
+ int ret;
+ ret = parse_events__scanner(str, &data, PE_START_TERMS);
if (!ret) {
- int entries = idx - evlist->nr_entries;
- perf_evlist__splice_list_tail(evlist, &list, entries);
+ list_splice(data.terms, terms);
+ free(data.terms);
+ return 0;
+ }
+
+ parse_events__free_terms(data.terms);
+ return ret;
+}
+
+int parse_events(struct perf_evlist *evlist, const char *str, int unset __used)
+{
+ struct parse_events_data__events data = {
+ .list = LIST_HEAD_INIT(data.list),
+ .idx = evlist->nr_entries,
+ };
+ int ret;
+
+ ret = parse_events__scanner(str, &data, PE_START_EVENTS);
+ if (!ret) {
+ int entries = data.idx - evlist->nr_entries;
+ perf_evlist__splice_list_tail(evlist, &data.list, entries);
return 0;
}
@@ -946,16 +878,13 @@ int is_valid_tracepoint(const char *event_string)
return 0;
}
-void print_events_type(u8 type)
+static void __print_events_type(u8 type, struct event_symbol *syms,
+ unsigned max)
{
- struct event_symbol *syms = event_symbols;
- unsigned int i;
char name[64];
+ unsigned i;
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
- if (type != syms->type)
- continue;
-
+ for (i = 0; i < max ; i++, syms++) {
if (strlen(syms->alias))
snprintf(name, sizeof(name), "%s OR %s",
syms->symbol, syms->alias);
@@ -967,19 +896,28 @@ void print_events_type(u8 type)
}
}
+void print_events_type(u8 type)
+{
+ if (type == PERF_TYPE_SOFTWARE)
+ __print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
+ else
+ __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
+}
+
int print_hwcache_events(const char *event_glob)
{
unsigned int type, op, i, printed = 0;
+ char name[64];
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
/* skip invalid cache type */
- if (!is_cache_op_valid(type, op))
+ if (!perf_evsel__is_cache_op_valid(type, op))
continue;
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
- char *name = event_cache_name(type, op, i);
-
+ __perf_evsel__hw_cache_type_op_res_name(type, op, i,
+ name, sizeof(name));
if (event_glob != NULL && !strglobmatch(name, event_glob))
continue;
@@ -993,26 +931,13 @@ int print_hwcache_events(const char *event_glob)
return printed;
}
-/*
- * Print the help text for the event symbols:
- */
-void print_events(const char *event_glob)
+static void print_symbol_events(const char *event_glob, unsigned type,
+ struct event_symbol *syms, unsigned max)
{
- unsigned int i, type, prev_type = -1, printed = 0, ntypes_printed = 0;
- struct event_symbol *syms = event_symbols;
+ unsigned i, printed = 0;
char name[MAX_NAME_LEN];
- printf("\n");
- printf("List of pre-defined events (to be used in -e):\n");
-
- for (i = 0; i < ARRAY_SIZE(event_symbols); i++, syms++) {
- type = syms->type;
-
- if (type != prev_type && printed) {
- printf("\n");
- printed = 0;
- ntypes_printed++;
- }
+ for (i = 0; i < max; i++, syms++) {
if (event_glob != NULL &&
!(strglobmatch(syms->symbol, event_glob) ||
@@ -1023,17 +948,31 @@ void print_events(const char *event_glob)
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
strncpy(name, syms->symbol, MAX_NAME_LEN);
- printf(" %-50s [%s]\n", name,
- event_type_descriptors[type]);
- prev_type = type;
- ++printed;
+ printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
+
+ printed++;
}
- if (ntypes_printed) {
- printed = 0;
+ if (printed)
printf("\n");
- }
+}
+
+/*
+ * Print the help text for the event symbols:
+ */
+void print_events(const char *event_glob)
+{
+
+ printf("\n");
+ printf("List of pre-defined events (to be used in -e):\n");
+
+ print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX);
+
+ print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX);
+
print_hwcache_events(event_glob);
if (event_glob != NULL)
@@ -1106,6 +1045,13 @@ int parse_events__term_str(struct parse_events__term **term,
config, str, 0);
}
+int parse_events__term_clone(struct parse_events__term **new,
+ struct parse_events__term *term)
+{
+ return new_term(new, term->type_val, term->type_term, term->config,
+ term->val.str, term->val.num);
+}
+
void parse_events__free_terms(struct list_head *terms)
{
struct parse_events__term *term, *h;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 8cac57ab4ee6..ee9c218a193c 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -26,13 +26,12 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
extern bool have_tracepoints(struct list_head *evlist);
const char *event_type(int type);
-const char *event_name(struct perf_evsel *event);
-extern const char *__event_name(int type, u64 config);
extern int parse_events_option(const struct option *opt, const char *str,
int unset);
extern int parse_events(struct perf_evlist *evlist, const char *str,
int unset);
+extern int parse_events_terms(struct list_head *terms, const char *str);
extern int parse_filter(const struct option *opt, const char *str, int unset);
#define EVENTS_HELP_MAX (128*1024)
@@ -63,11 +62,22 @@ struct parse_events__term {
struct list_head list;
};
+struct parse_events_data__events {
+ struct list_head list;
+ int idx;
+};
+
+struct parse_events_data__terms {
+ struct list_head *terms;
+};
+
int parse_events__is_hardcoded_term(struct parse_events__term *term);
int parse_events__term_num(struct parse_events__term **_term,
int type_term, char *config, long num);
int parse_events__term_str(struct parse_events__term **_term,
int type_term, char *config, char *str);
+int parse_events__term_clone(struct parse_events__term **new,
+ struct parse_events__term *term);
void parse_events__free_terms(struct list_head *terms);
int parse_events_modifier(struct list_head *list, char *str);
int parse_events_add_tracepoint(struct list_head **list, int *idx,
@@ -83,8 +93,7 @@ int parse_events_add_pmu(struct list_head **list, int *idx,
char *pmu , struct list_head *head_config);
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all);
-void parse_events_error(struct list_head *list_all,
- int *idx, char const *msg);
+void parse_events_error(void *data, void *scanner, char const *msg);
int parse_events__test(void);
void print_events(const char *event_glob);
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 618a8e788399..384ca74c6b22 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -1,4 +1,6 @@
+%option reentrant
+%option bison-bridge
%option prefix="parse_events_"
%option stack
@@ -8,7 +10,10 @@
#include "parse-events-bison.h"
#include "parse-events.h"
-static int __value(char *str, int base, int token)
+char *parse_events_get_text(yyscan_t yyscanner);
+YYSTYPE *parse_events_get_lval(yyscan_t yyscanner);
+
+static int __value(YYSTYPE *yylval, char *str, int base, int token)
{
long num;
@@ -17,35 +22,48 @@ static int __value(char *str, int base, int token)
if (errno)
return PE_ERROR;
- parse_events_lval.num = num;
+ yylval->num = num;
return token;
}
-static int value(int base)
+static int value(yyscan_t scanner, int base)
{
- return __value(parse_events_text, base, PE_VALUE);
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ return __value(yylval, text, base, PE_VALUE);
}
-static int raw(void)
+static int raw(yyscan_t scanner)
{
- return __value(parse_events_text + 1, 16, PE_RAW);
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ return __value(yylval, text + 1, 16, PE_RAW);
}
-static int str(int token)
+static int str(yyscan_t scanner, int token)
{
- parse_events_lval.str = strdup(parse_events_text);
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+ char *text = parse_events_get_text(scanner);
+
+ yylval->str = strdup(text);
return token;
}
-static int sym(int type, int config)
+static int sym(yyscan_t scanner, int type, int config)
{
- parse_events_lval.num = (type << 16) + config;
- return PE_VALUE_SYM;
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = (type << 16) + config;
+ return type == PERF_TYPE_HARDWARE ? PE_VALUE_SYM_HW : PE_VALUE_SYM_SW;
}
-static int term(int type)
+static int term(yyscan_t scanner, int type)
{
- parse_events_lval.num = type;
+ YYSTYPE *yylval = parse_events_get_lval(scanner);
+
+ yylval->num = type;
return PE_TERM;
}
@@ -58,28 +76,41 @@ num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]*
modifier_event [ukhpGH]{1,8}
-modifier_bp [rwx]
+modifier_bp [rwx]{1,3}
%%
-cpu-cycles|cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
-stalled-cycles-frontend|idle-cycles-frontend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
-stalled-cycles-backend|idle-cycles-backend { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
-instructions { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
-cache-references { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
-cache-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
-branch-instructions|branches { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
-branch-misses { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
-bus-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
-ref-cycles { return sym(PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
-cpu-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
-task-clock { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
-page-faults|faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
-minor-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
-major-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
-context-switches|cs { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
-cpu-migrations|migrations { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
-alignment-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
-emulation-faults { return sym(PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+
+%{
+ {
+ int start_token;
+
+ start_token = (int) parse_events_get_extra(yyscanner);
+ if (start_token) {
+ parse_events_set_extra(NULL, yyscanner);
+ return start_token;
+ }
+ }
+%}
+
+cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); }
+stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); }
+stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); }
+instructions { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_INSTRUCTIONS); }
+cache-references { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_REFERENCES); }
+cache-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CACHE_MISSES); }
+branch-instructions|branches { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); }
+branch-misses { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BRANCH_MISSES); }
+bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); }
+ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
+cpu-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_CLOCK); }
+task-clock { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_TASK_CLOCK); }
+page-faults|faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS); }
+minor-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MIN); }
+major-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_PAGE_FAULTS_MAJ); }
+context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CONTEXT_SWITCHES); }
+cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
+alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
+emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
L1-dcache|l1-d|l1d|L1-data |
L1-icache|l1-i|l1i|L1-instruction |
@@ -87,14 +118,14 @@ LLC|L2 |
dTLB|d-tlb|Data-TLB |
iTLB|i-tlb|Instruction-TLB |
branch|branches|bpu|btb|bpc |
-node { return str(PE_NAME_CACHE_TYPE); }
+node { return str(yyscanner, PE_NAME_CACHE_TYPE); }
load|loads|read |
store|stores|write |
prefetch|prefetches |
speculative-read|speculative-load |
refs|Reference|ops|access |
-misses|miss { return str(PE_NAME_CACHE_OP_RESULT); }
+misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); }
/*
* These are event config hardcoded term names to be specified
@@ -102,38 +133,39 @@ misses|miss { return str(PE_NAME_CACHE_OP_RESULT); }
* so we can put them here directly. In case the we have a conflict
* in future, this needs to go into '//' condition block.
*/
-config { return term(PARSE_EVENTS__TERM_TYPE_CONFIG); }
-config1 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG1); }
-config2 { return term(PARSE_EVENTS__TERM_TYPE_CONFIG2); }
-name { return term(PARSE_EVENTS__TERM_TYPE_NAME); }
-period { return term(PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
-branch_type { return term(PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
+config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
+config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
+config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
+name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); }
+period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); }
+branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); }
mem: { BEGIN(mem); return PE_PREFIX_MEM; }
-r{num_raw_hex} { return raw(); }
-{num_dec} { return value(10); }
-{num_hex} { return value(16); }
+r{num_raw_hex} { return raw(yyscanner); }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
-{modifier_event} { return str(PE_MODIFIER_EVENT); }
-{name} { return str(PE_NAME); }
+{modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); }
+{name} { return str(yyscanner, PE_NAME); }
"/" { return '/'; }
- { return '-'; }
, { return ','; }
: { return ':'; }
= { return '='; }
+\n { }
<mem>{
-{modifier_bp} { return str(PE_MODIFIER_BP); }
+{modifier_bp} { return str(yyscanner, PE_MODIFIER_BP); }
: { return ':'; }
-{num_dec} { return value(10); }
-{num_hex} { return value(16); }
+{num_dec} { return value(yyscanner, 10); }
+{num_hex} { return value(yyscanner, 16); }
/*
* We need to separate 'mem:' scanner part, in order to get specific
* modifier bits parsed out. Otherwise we would need to handle PE_NAME
* and we'd need to parse it manually. During the escape from <mem>
* state we need to put the escaping char back, so we dont miss it.
*/
-. { unput(*parse_events_text); BEGIN(INITIAL); }
+. { unput(*yytext); BEGIN(INITIAL); }
/*
* We destroy the scanner after reaching EOF,
* but anyway just to be sure get back to INIT state.
@@ -143,7 +175,7 @@ r{num_raw_hex} { return raw(); }
%%
-int parse_events_wrap(void)
+int parse_events_wrap(void *scanner __used)
{
return 1;
}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 362cc59332ae..2bc5fbff2b5d 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -1,7 +1,8 @@
-
+%pure-parser
%name-prefix "parse_events_"
-%parse-param {struct list_head *list_all}
-%parse-param {int *idx}
+%parse-param {void *_data}
+%parse-param {void *scanner}
+%lex-param {void* scanner}
%{
@@ -12,8 +13,9 @@
#include "types.h"
#include "util.h"
#include "parse-events.h"
+#include "parse-events-bison.h"
-extern int parse_events_lex (void);
+extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
#define ABORT_ON(val) \
do { \
@@ -23,14 +25,16 @@ do { \
%}
-%token PE_VALUE PE_VALUE_SYM PE_RAW PE_TERM
+%token PE_START_EVENTS PE_START_TERMS
+%token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM
%token PE_NAME
%token PE_MODIFIER_EVENT PE_MODIFIER_BP
%token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT
%token PE_PREFIX_MEM PE_PREFIX_RAW
%token PE_ERROR
%type <num> PE_VALUE
-%type <num> PE_VALUE_SYM
+%type <num> PE_VALUE_SYM_HW
+%type <num> PE_VALUE_SYM_SW
%type <num> PE_RAW
%type <num> PE_TERM
%type <str> PE_NAME
@@ -38,6 +42,7 @@ do { \
%type <str> PE_NAME_CACHE_OP_RESULT
%type <str> PE_MODIFIER_EVENT
%type <str> PE_MODIFIER_BP
+%type <num> value_sym
%type <head> event_config
%type <term> event_term
%type <head> event_pmu
@@ -58,24 +63,33 @@ do { \
}
%%
+start:
+PE_START_EVENTS events
+|
+PE_START_TERMS terms
+
events:
events ',' event | event
event:
event_def PE_MODIFIER_EVENT
{
+ struct parse_events_data__events *data = _data;
+
/*
* Apply modifier on all events added by single event definition
* (there could be more events added for multiple tracepoint
* definitions via '*?'.
*/
ABORT_ON(parse_events_modifier($1, $2));
- parse_events_update_lists($1, list_all);
+ parse_events_update_lists($1, &data->list);
}
|
event_def
{
- parse_events_update_lists($1, list_all);
+ struct parse_events_data__events *data = _data;
+
+ parse_events_update_lists($1, &data->list);
}
event_def: event_pmu |
@@ -89,104 +103,131 @@ event_def: event_pmu |
event_pmu:
PE_NAME '/' event_config '/'
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_pmu(&list, idx, $1, $3));
+ ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
parse_events__free_terms($3);
$$ = list;
}
+value_sym:
+PE_VALUE_SYM_HW
+|
+PE_VALUE_SYM_SW
+
event_legacy_symbol:
-PE_VALUE_SYM '/' event_config '/'
+value_sym '/' event_config '/'
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, idx, type, config, $3));
+ ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ type, config, $3));
parse_events__free_terms($3);
$$ = list;
}
|
-PE_VALUE_SYM sep_slash_dc
+value_sym sep_slash_dc
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, idx, type, config, NULL));
+ ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ type, config, NULL));
$$ = list;
}
event_legacy_cache:
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, $5));
+ ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
$$ = list;
}
|
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_cache(&list, idx, $1, $3, NULL));
+ ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
$$ = list;
}
|
PE_NAME_CACHE_TYPE
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_cache(&list, idx, $1, NULL, NULL));
+ ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
$$ = list;
}
event_legacy_mem:
PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, $4));
+ ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ (void *) $2, $4));
$$ = list;
}
|
PE_PREFIX_MEM PE_VALUE sep_dc
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_breakpoint(&list, idx, (void *) $2, NULL));
+ ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ (void *) $2, NULL));
$$ = list;
}
event_legacy_tracepoint:
PE_NAME ':' PE_NAME
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_tracepoint(&list, idx, $1, $3));
+ ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
$$ = list;
}
event_legacy_numeric:
PE_VALUE ':' PE_VALUE
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_numeric(&list, idx, $1, $3, NULL));
+ ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL));
$$ = list;
}
event_legacy_raw:
PE_RAW
{
+ struct parse_events_data__events *data = _data;
struct list_head *list = NULL;
- ABORT_ON(parse_events_add_numeric(&list, idx, PERF_TYPE_RAW, $1, NULL));
+ ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ PERF_TYPE_RAW, $1, NULL));
$$ = list;
}
+terms: event_config
+{
+ struct parse_events_data__terms *data = _data;
+ data->terms = $1;
+}
+
event_config:
event_config ',' event_term
{
@@ -267,8 +308,7 @@ sep_slash_dc: '/' | ':' |
%%
-void parse_events_error(struct list_head *list_all __used,
- int *idx __used,
+void parse_events_error(void *data __used, void *scanner __used,
char const *msg __used)
{
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index a119a5371699..67715a42cd6d 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -72,7 +72,7 @@ static int pmu_format(char *name, struct list_head *format)
"%s/bus/event_source/devices/%s/format", sysfs, name);
if (stat(path, &st) < 0)
- return -1;
+ return 0; /* no error if format does not exist */
if (pmu_format_parse(path, format))
return -1;
@@ -80,6 +80,114 @@ static int pmu_format(char *name, struct list_head *format)
return 0;
}
+static int perf_pmu__new_alias(struct list_head *list, char *name, FILE *file)
+{
+ struct perf_pmu__alias *alias;
+ char buf[256];
+ int ret;
+
+ ret = fread(buf, 1, sizeof(buf), file);
+ if (ret == 0)
+ return -EINVAL;
+ buf[ret] = 0;
+
+ alias = malloc(sizeof(*alias));
+ if (!alias)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&alias->terms);
+ ret = parse_events_terms(&alias->terms, buf);
+ if (ret) {
+ free(alias);
+ return ret;
+ }
+
+ alias->name = strdup(name);
+ list_add_tail(&alias->list, list);
+ return 0;
+}
+
+/*
+ * Process all the sysfs attributes located under the directory
+ * specified in 'dir' parameter.
+ */
+static int pmu_aliases_parse(char *dir, struct list_head *head)
+{
+ struct dirent *evt_ent;
+ DIR *event_dir;
+ int ret = 0;
+
+ event_dir = opendir(dir);
+ if (!event_dir)
+ return -EINVAL;
+
+ while (!ret && (evt_ent = readdir(event_dir))) {
+ char path[PATH_MAX];
+ char *name = evt_ent->d_name;
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", dir, name);
+
+ ret = -EINVAL;
+ file = fopen(path, "r");
+ if (!file)
+ break;
+ ret = perf_pmu__new_alias(head, name, file);
+ fclose(file);
+ }
+
+ closedir(event_dir);
+ return ret;
+}
+
+/*
+ * Reading the pmu event aliases definition, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
+ */
+static int pmu_aliases(char *name, struct list_head *head)
+{
+ struct stat st;
+ char path[PATH_MAX];
+ const char *sysfs;
+
+ sysfs = sysfs_find_mountpoint();
+ if (!sysfs)
+ return -1;
+
+ snprintf(path, PATH_MAX,
+ "%s/bus/event_source/devices/%s/events", sysfs, name);
+
+ if (stat(path, &st) < 0)
+ return -1;
+
+ if (pmu_aliases_parse(path, head))
+ return -1;
+
+ return 0;
+}
+
+static int pmu_alias_terms(struct perf_pmu__alias *alias,
+ struct list_head *terms)
+{
+ struct parse_events__term *term, *clone;
+ LIST_HEAD(list);
+ int ret;
+
+ list_for_each_entry(term, &alias->terms, list) {
+ ret = parse_events__term_clone(&clone, term);
+ if (ret) {
+ parse_events__free_terms(&list);
+ return ret;
+ }
+ list_add_tail(&clone->list, &list);
+ }
+ list_splice(&list, terms);
+ return 0;
+}
+
/*
* Reading/parsing the default pmu type value, which should be
* located at:
@@ -118,6 +226,7 @@ static struct perf_pmu *pmu_lookup(char *name)
{
struct perf_pmu *pmu;
LIST_HEAD(format);
+ LIST_HEAD(aliases);
__u32 type;
/*
@@ -135,10 +244,15 @@ static struct perf_pmu *pmu_lookup(char *name)
if (!pmu)
return NULL;
+ pmu_aliases(name, &aliases);
+
INIT_LIST_HEAD(&pmu->format);
+ INIT_LIST_HEAD(&pmu->aliases);
list_splice(&format, &pmu->format);
+ list_splice(&aliases, &pmu->aliases);
pmu->name = strdup(name);
pmu->type = type;
+ list_add_tail(&pmu->list, &pmus);
return pmu;
}
@@ -279,6 +393,59 @@ int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
return pmu_config(&pmu->format, attr, head_terms);
}
+static struct perf_pmu__alias *pmu_find_alias(struct perf_pmu *pmu,
+ struct parse_events__term *term)
+{
+ struct perf_pmu__alias *alias;
+ char *name;
+
+ if (parse_events__is_hardcoded_term(term))
+ return NULL;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ if (term->val.num != 1)
+ return NULL;
+ if (pmu_find_format(&pmu->format, term->config))
+ return NULL;
+ name = term->config;
+ } else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
+ if (strcasecmp(term->config, "event"))
+ return NULL;
+ name = term->val.str;
+ } else {
+ return NULL;
+ }
+
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ if (!strcasecmp(alias->name, name))
+ return alias;
+ }
+ return NULL;
+}
+
+/*
+ * Find alias in the terms list and replace it with the terms
+ * defined for the alias
+ */
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms)
+{
+ struct parse_events__term *term, *h;
+ struct perf_pmu__alias *alias;
+ int ret;
+
+ list_for_each_entry_safe(term, h, head_terms, list) {
+ alias = pmu_find_alias(pmu, term);
+ if (!alias)
+ continue;
+ ret = pmu_alias_terms(alias, &term->list);
+ if (ret)
+ return ret;
+ list_del(&term->list);
+ free(term);
+ }
+ return 0;
+}
+
int perf_pmu__new_format(struct list_head *list, char *name,
int config, unsigned long *bits)
{
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 68c0db965e1f..535f2c5258ab 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -19,17 +19,26 @@ struct perf_pmu__format {
struct list_head list;
};
+struct perf_pmu__alias {
+ char *name;
+ struct list_head terms;
+ struct list_head list;
+};
+
struct perf_pmu {
char *name;
__u32 type;
struct list_head format;
+ struct list_head aliases;
struct list_head list;
};
struct perf_pmu *perf_pmu__find(char *name);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms);
-
+int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms);
+struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
+ struct list_head *head_terms);
int perf_pmu_wrap(void);
void perf_pmu_error(struct list_head *list, char *name, char const *msg);
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index 4c1b3d72a1d2..02dfa19a467f 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -209,6 +209,10 @@ static void define_event_symbols(struct event_format *event,
define_symbolic_values(args->symbol.symbols, ev_name,
cur_field_name);
break;
+ case PRINT_HEX:
+ define_event_symbols(event, ev_name, args->hex.field);
+ define_event_symbols(event, ev_name, args->hex.size);
+ break;
case PRINT_BSTRING:
case PRINT_DYNAMIC_ARRAY:
case PRINT_STRING:
@@ -233,7 +237,8 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
{
static char ev_name[256];
struct event_format *event;
@@ -241,7 +246,7 @@ static inline struct event_format *find_cache_event(int type)
if (events[type])
return events[type];
- events[type] = event = trace_find_event(type);
+ events[type] = event = pevent_find_event(pevent, type);
if (!event)
return NULL;
@@ -252,7 +257,8 @@ static inline struct event_format *find_cache_event(int type)
return event;
}
-static void perl_process_tracepoint(union perf_event *pevent __unused,
+static void perl_process_tracepoint(union perf_event *perf_event __unused,
+ struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __unused,
@@ -275,13 +281,13 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
return;
- type = trace_parse_common_type(data);
+ type = trace_parse_common_type(pevent, data);
- event = find_cache_event(type);
+ event = find_cache_event(pevent, type);
if (!event)
die("ug! no event found for type %d", type);
- pid = trace_parse_common_pid(data);
+ pid = trace_parse_common_pid(pevent, data);
sprintf(handler, "%s::%s", event->system, event->name);
@@ -314,7 +320,8 @@ static void perl_process_tracepoint(union perf_event *pevent __unused,
offset = field->offset;
XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0)));
} else { /* FIELD_IS_NUMERIC */
- val = read_size(data + field->offset, field->size);
+ val = read_size(pevent, data + field->offset,
+ field->size);
if (field->flags & FIELD_IS_SIGNED) {
XPUSHs(sv_2mortal(newSViv(val)));
} else {
@@ -368,14 +375,15 @@ static void perl_process_event_generic(union perf_event *pevent __unused,
LEAVE;
}
-static void perl_process_event(union perf_event *pevent,
+static void perl_process_event(union perf_event *event,
+ struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
struct thread *thread)
{
- perl_process_tracepoint(pevent, sample, evsel, machine, thread);
- perl_process_event_generic(pevent, sample, evsel, machine, thread);
+ perl_process_tracepoint(event, pevent, sample, evsel, machine, thread);
+ perl_process_event_generic(event, sample, evsel, machine, thread);
}
static void run_start_sub(void)
@@ -448,7 +456,7 @@ static int perl_stop_script(void)
return 0;
}
-static int perl_generate_script(const char *outfile)
+static int perl_generate_script(struct pevent *pevent, const char *outfile)
{
struct event_format *event = NULL;
struct format_field *f;
@@ -495,7 +503,7 @@ static int perl_generate_script(const char *outfile)
fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n");
fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n");
- while ((event = trace_find_next_event(event))) {
+ while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name);
fprintf(ofp, "\tmy (");
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index acb9795286c4..ce4d1b0c3862 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -166,6 +166,10 @@ static void define_event_symbols(struct event_format *event,
define_values(PRINT_SYMBOL, args->symbol.symbols, ev_name,
cur_field_name);
break;
+ case PRINT_HEX:
+ define_event_symbols(event, ev_name, args->hex.field);
+ define_event_symbols(event, ev_name, args->hex.size);
+ break;
case PRINT_STRING:
break;
case PRINT_TYPE:
@@ -190,7 +194,8 @@ static void define_event_symbols(struct event_format *event,
define_event_symbols(event, ev_name, args->next);
}
-static inline struct event_format *find_cache_event(int type)
+static inline
+struct event_format *find_cache_event(struct pevent *pevent, int type)
{
static char ev_name[256];
struct event_format *event;
@@ -198,7 +203,7 @@ static inline struct event_format *find_cache_event(int type)
if (events[type])
return events[type];
- events[type] = event = trace_find_event(type);
+ events[type] = event = pevent_find_event(pevent, type);
if (!event)
return NULL;
@@ -209,7 +214,8 @@ static inline struct event_format *find_cache_event(int type)
return event;
}
-static void python_process_event(union perf_event *pevent __unused,
+static void python_process_event(union perf_event *perf_event __unused,
+ struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel __unused,
struct machine *machine __unused,
@@ -233,13 +239,13 @@ static void python_process_event(union perf_event *pevent __unused,
if (!t)
Py_FatalError("couldn't create Python tuple");
- type = trace_parse_common_type(data);
+ type = trace_parse_common_type(pevent, data);
- event = find_cache_event(type);
+ event = find_cache_event(pevent, type);
if (!event)
die("ug! no event found for type %d", type);
- pid = trace_parse_common_pid(data);
+ pid = trace_parse_common_pid(pevent, data);
sprintf(handler_name, "%s__%s", event->system, event->name);
@@ -284,7 +290,8 @@ static void python_process_event(union perf_event *pevent __unused,
offset = field->offset;
obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
- val = read_size(data + field->offset, field->size);
+ val = read_size(pevent, data + field->offset,
+ field->size);
if (field->flags & FIELD_IS_SIGNED) {
if ((long long)val >= LONG_MIN &&
(long long)val <= LONG_MAX)
@@ -438,7 +445,7 @@ out:
return err;
}
-static int python_generate_script(const char *outfile)
+static int python_generate_script(struct pevent *pevent, const char *outfile)
{
struct event_format *event = NULL;
struct format_field *f;
@@ -487,7 +494,7 @@ static int python_generate_script(const char *outfile)
fprintf(ofp, "def trace_end():\n");
fprintf(ofp, "\tprint \"in trace_end\"\n\n");
- while ((event = trace_find_next_event(event))) {
+ while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
fprintf(ofp, "event_name, ");
fprintf(ofp, "context, ");
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index c3e399bcf18d..8e485592ca20 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -14,6 +14,7 @@
#include "sort.h"
#include "util.h"
#include "cpumap.h"
+#include "event-parse.h"
static int perf_session__open(struct perf_session *self, bool force)
{
@@ -289,7 +290,6 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
}
int machine__resolve_callchain(struct machine *self,
- struct perf_evsel *evsel __used,
struct thread *thread,
struct ip_callchain *chain,
struct symbol **parent)
@@ -926,7 +926,7 @@ static struct machine *
else
pid = event->ip.pid;
- return perf_session__find_machine(session, pid);
+ return perf_session__findnew_machine(session, pid);
}
return perf_session__find_host_machine(session);
@@ -1449,7 +1449,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
ret += hists__fprintf_nr_events(&session->hists, fp);
list_for_each_entry(pos, &session->evlist->entries, node) {
- ret += fprintf(fp, "%s stats:\n", event_name(pos));
+ ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
ret += hists__fprintf_nr_events(&pos->hists, fp);
}
@@ -1490,8 +1490,8 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
}
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
- struct machine *machine, struct perf_evsel *evsel,
- int print_sym, int print_dso, int print_symoffset)
+ struct machine *machine, int print_sym,
+ int print_dso, int print_symoffset)
{
struct addr_location al;
struct callchain_cursor_node *node;
@@ -1505,7 +1505,7 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
if (symbol_conf.use_callchain && sample->callchain) {
- if (machine__resolve_callchain(machine, evsel, al.thread,
+ if (machine__resolve_callchain(machine, al.thread,
sample->callchain, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
@@ -1611,3 +1611,58 @@ void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
perf_header__fprintf_info(session, fp, full);
fprintf(fp, "# ========\n#\n");
}
+
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs)
+{
+ struct perf_evlist *evlist = session->evlist;
+ struct event_format *format;
+ struct perf_evsel *evsel;
+ char *tracepoint, *name;
+ size_t i;
+ int err;
+
+ for (i = 0; i < nr_assocs; i++) {
+ err = -ENOMEM;
+ tracepoint = strdup(assocs[i].name);
+ if (tracepoint == NULL)
+ goto out;
+
+ err = -ENOENT;
+ name = strchr(tracepoint, ':');
+ if (name == NULL)
+ goto out_free;
+
+ *name++ = '\0';
+ format = pevent_find_event_by_name(session->pevent,
+ tracepoint, name);
+ if (format == NULL) {
+ /*
+ * Adding a handler for an event not in the session,
+ * just ignore it.
+ */
+ goto next;
+ }
+
+ evsel = perf_evlist__find_tracepoint_by_id(evlist, format->id);
+ if (evsel == NULL)
+ goto next;
+
+ err = -EEXIST;
+ if (evsel->handler.func != NULL)
+ goto out_free;
+ evsel->handler.func = assocs[i].handler;
+next:
+ free(tracepoint);
+ }
+
+ err = 0;
+out:
+ return err;
+
+out_free:
+ free(tracepoint);
+ goto out;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 0c702e3f0a36..7c435bde6eb0 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -33,6 +33,7 @@ struct perf_session {
struct machine host_machine;
struct rb_root machines;
struct perf_evlist *evlist;
+ struct pevent *pevent;
/*
* FIXME: Need to split this up further, we need global
* stats + per event stats. 'perf diff' also needs
@@ -151,11 +152,20 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
- struct machine *machine, struct perf_evsel *evsel,
- int print_sym, int print_dso, int print_symoffset);
+ struct machine *machine, int print_sym,
+ int print_dso, int print_symoffset);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
void perf_session__fprintf_info(struct perf_session *s, FILE *fp, bool full);
+
+struct perf_evsel_str_handler;
+
+int __perf_session__set_tracepoints_handlers(struct perf_session *session,
+ const struct perf_evsel_str_handler *assocs,
+ size_t nr_assocs);
+
+#define perf_session__set_tracepoints_handlers(session, array) \
+ __perf_session__set_tracepoints_handlers(session, array, ARRAY_SIZE(array))
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index a27237430c5f..0f5a0a496bc4 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -241,6 +241,54 @@ struct sort_entry sort_sym = {
.se_width_idx = HISTC_SYMBOL,
};
+/* --sort srcline */
+
+static int64_t
+sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return (int64_t)(right->ip - left->ip);
+}
+
+static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf,
+ size_t size, unsigned int width __used)
+{
+ FILE *fp;
+ char cmd[PATH_MAX + 2], *path = self->srcline, *nl;
+ size_t line_len;
+
+ if (path != NULL)
+ goto out_path;
+
+ snprintf(cmd, sizeof(cmd), "addr2line -e %s %016" PRIx64,
+ self->ms.map->dso->long_name, self->ip);
+ fp = popen(cmd, "r");
+ if (!fp)
+ goto out_ip;
+
+ if (getline(&path, &line_len, fp) < 0 || !line_len)
+ goto out_ip;
+ fclose(fp);
+ self->srcline = strdup(path);
+ if (self->srcline == NULL)
+ goto out_ip;
+
+ nl = strchr(self->srcline, '\n');
+ if (nl != NULL)
+ *nl = '\0';
+ path = self->srcline;
+out_path:
+ return repsep_snprintf(bf, size, "%s", path);
+out_ip:
+ return repsep_snprintf(bf, size, "%-#*llx", BITS_PER_LONG / 4, self->ip);
+}
+
+struct sort_entry sort_srcline = {
+ .se_header = "Source:Line",
+ .se_cmp = sort__srcline_cmp,
+ .se_snprintf = hist_entry__srcline_snprintf,
+ .se_width_idx = HISTC_SRCLINE,
+};
+
/* --sort parent */
static int64_t
@@ -439,6 +487,7 @@ static struct sort_dimension sort_dimensions[] = {
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
+ DIM(SORT_SRCLINE, "srcline", sort_srcline),
};
int sort_dimension__add(const char *tok)
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 472aa5a63a58..e724b26acd51 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -71,6 +71,7 @@ struct hist_entry {
char level;
bool used;
u8 filtered;
+ char *srcline;
struct symbol *parent;
union {
unsigned long position;
@@ -93,6 +94,7 @@ enum sort_type {
SORT_SYM_FROM,
SORT_SYM_TO,
SORT_MISPREDICT,
+ SORT_SRCLINE,
};
/*
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index d5836382ff2c..199bc4d8905d 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -313,3 +313,25 @@ int strtailcmp(const char *s1, const char *s2)
return 0;
}
+/**
+ * rtrim - Removes trailing whitespace from @s.
+ * @s: The string to be stripped.
+ *
+ * Note that the first trailing whitespace is replaced with a %NUL-terminator
+ * in the given string @s. Returns @s.
+ */
+char *rtrim(char *s)
+{
+ size_t size = strlen(s);
+ char *end;
+
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return s;
+}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 3e2e5ea0f03f..50958bbeb26a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1478,14 +1478,31 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size)
goto out;
}
- sec = elf_section_by_name(elf, &ehdr, &shdr,
- ".note.gnu.build-id", NULL);
- if (sec == NULL) {
+ /*
+ * Check following sections for notes:
+ * '.note.gnu.build-id'
+ * '.notes'
+ * '.note' (VDSO specific)
+ */
+ do {
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note.gnu.build-id", NULL);
+ if (sec)
+ break;
+
sec = elf_section_by_name(elf, &ehdr, &shdr,
".notes", NULL);
- if (sec == NULL)
- goto out;
- }
+ if (sec)
+ break;
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".note", NULL);
+ if (sec)
+ break;
+
+ return err;
+
+ } while (0);
data = elf_getdata(sec, NULL);
if (data == NULL)
@@ -1590,11 +1607,62 @@ out:
return err;
}
+static int filename__read_debuglink(const char *filename,
+ char *debuglink, size_t size)
+{
+ int fd, err = -1;
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+ Elf_Data *data;
+ Elf_Scn *sec;
+ Elf_Kind ek;
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0)
+ goto out;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL) {
+ pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename);
+ goto out_close;
+ }
+
+ ek = elf_kind(elf);
+ if (ek != ELF_K_ELF)
+ goto out_close;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL) {
+ pr_err("%s: cannot get elf header.\n", __func__);
+ goto out_close;
+ }
+
+ sec = elf_section_by_name(elf, &ehdr, &shdr,
+ ".gnu_debuglink", NULL);
+ if (sec == NULL)
+ goto out_close;
+
+ data = elf_getdata(sec, NULL);
+ if (data == NULL)
+ goto out_close;
+
+ /* the start of this section is a zero-terminated string */
+ strncpy(debuglink, data->d_buf, size);
+
+ elf_end(elf);
+
+out_close:
+ close(fd);
+out:
+ return err;
+}
+
char dso__symtab_origin(const struct dso *dso)
{
static const char origin[] = {
[SYMTAB__KALLSYMS] = 'k',
[SYMTAB__JAVA_JIT] = 'j',
+ [SYMTAB__DEBUGLINK] = 'l',
[SYMTAB__BUILD_ID_CACHE] = 'B',
[SYMTAB__FEDORA_DEBUGINFO] = 'f',
[SYMTAB__UBUNTU_DEBUGINFO] = 'u',
@@ -1662,10 +1730,22 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
*/
want_symtab = 1;
restart:
- for (dso->symtab_type = SYMTAB__BUILD_ID_CACHE;
+ for (dso->symtab_type = SYMTAB__DEBUGLINK;
dso->symtab_type != SYMTAB__NOT_FOUND;
dso->symtab_type++) {
switch (dso->symtab_type) {
+ case SYMTAB__DEBUGLINK: {
+ char *debuglink;
+ strncpy(name, dso->long_name, size);
+ debuglink = name + dso->long_name_len;
+ while (debuglink != name && *debuglink != '/')
+ debuglink--;
+ if (*debuglink == '/')
+ debuglink++;
+ filename__read_debuglink(dso->long_name, debuglink,
+ size - (debuglink - name));
+ }
+ break;
case SYMTAB__BUILD_ID_CACHE:
/* skip the locally configured cache if a symfs is given */
if (symbol_conf.symfs[0] ||
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index af0752b1aca1..a884b99017f0 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -257,6 +257,7 @@ enum symtab_type {
SYMTAB__KALLSYMS = 0,
SYMTAB__GUEST_KALLSYMS,
SYMTAB__JAVA_JIT,
+ SYMTAB__DEBUGLINK,
SYMTAB__BUILD_ID_CACHE,
SYMTAB__FEDORA_DEBUGINFO,
SYMTAB__UBUNTU_DEBUGINFO,
diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c
index abe0e8e95068..7eeebcee291c 100644
--- a/tools/perf/util/top.c
+++ b/tools/perf/util/top.c
@@ -65,7 +65,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
top->freq ? "Hz" : "");
}
- ret += SNPRINTF(bf + ret, size - ret, "%s", event_name(top->sym_evsel));
+ ret += SNPRINTF(bf + ret, size - ret, "%s", perf_evsel__name(top->sym_evsel));
ret += SNPRINTF(bf + ret, size - ret, "], ");
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index df2fddbf0cd2..0715c843c2e7 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -32,29 +32,25 @@ int header_page_size_size;
int header_page_ts_size;
int header_page_data_offset;
-struct pevent *perf_pevent;
-static struct pevent *pevent;
-
bool latency_format;
-int read_trace_init(int file_bigendian, int host_bigendian)
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
{
- if (pevent)
- return 0;
-
- perf_pevent = pevent_alloc();
- pevent = perf_pevent;
+ struct pevent *pevent = pevent_alloc();
- pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
- pevent_set_file_bigendian(pevent, file_bigendian);
- pevent_set_host_bigendian(pevent, host_bigendian);
+ if (pevent != NULL) {
+ pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT);
+ pevent_set_file_bigendian(pevent, file_bigendian);
+ pevent_set_host_bigendian(pevent, host_bigendian);
+ }
- return 0;
+ return pevent;
}
static int get_common_field(struct scripting_context *context,
int *offset, int *size, const char *type)
{
+ struct pevent *pevent = context->pevent;
struct event_format *event;
struct format_field *field;
@@ -150,7 +146,7 @@ void *raw_field_ptr(struct event_format *event, const char *name, void *data)
return data + field->offset;
}
-int trace_parse_common_type(void *data)
+int trace_parse_common_type(struct pevent *pevent, void *data)
{
struct pevent_record record;
@@ -158,7 +154,7 @@ int trace_parse_common_type(void *data)
return pevent_data_type(pevent, &record);
}
-int trace_parse_common_pid(void *data)
+int trace_parse_common_pid(struct pevent *pevent, void *data)
{
struct pevent_record record;
@@ -166,27 +162,21 @@ int trace_parse_common_pid(void *data)
return pevent_data_pid(pevent, &record);
}
-unsigned long long read_size(void *ptr, int size)
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size)
{
return pevent_read_number(pevent, ptr, size);
}
-struct event_format *trace_find_event(int type)
-{
- return pevent_find_event(pevent, type);
-}
-
-
-void print_trace_event(int cpu, void *data, int size)
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size)
{
struct event_format *event;
struct pevent_record record;
struct trace_seq s;
int type;
- type = trace_parse_common_type(data);
+ type = trace_parse_common_type(pevent, data);
- event = trace_find_event(type);
+ event = pevent_find_event(pevent, type);
if (!event) {
warning("ug! no event found for type %d", type);
return;
@@ -198,13 +188,12 @@ void print_trace_event(int cpu, void *data, int size)
record.data = data;
trace_seq_init(&s);
- pevent_print_event(pevent, &s, &record);
+ pevent_event_info(&s, event, &record);
trace_seq_do_printf(&s);
- printf("\n");
}
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
- char *comm)
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+ unsigned long long nsecs, char *comm)
{
struct pevent_record record;
struct trace_seq s;
@@ -227,7 +216,8 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
printf("\n");
}
-void parse_proc_kallsyms(char *file, unsigned int size __unused)
+void parse_proc_kallsyms(struct pevent *pevent,
+ char *file, unsigned int size __unused)
{
unsigned long long addr;
char *func;
@@ -258,7 +248,8 @@ void parse_proc_kallsyms(char *file, unsigned int size __unused)
}
}
-void parse_ftrace_printk(char *file, unsigned int size __unused)
+void parse_ftrace_printk(struct pevent *pevent,
+ char *file, unsigned int size __unused)
{
unsigned long long addr;
char *printk;
@@ -282,17 +273,19 @@ void parse_ftrace_printk(char *file, unsigned int size __unused)
}
}
-int parse_ftrace_file(char *buf, unsigned long size)
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size)
{
return pevent_parse_event(pevent, buf, size, "ftrace");
}
-int parse_event_file(char *buf, unsigned long size, char *sys)
+int parse_event_file(struct pevent *pevent,
+ char *buf, unsigned long size, char *sys)
{
return pevent_parse_event(pevent, buf, size, sys);
}
-struct event_format *trace_find_next_event(struct event_format *event)
+struct event_format *trace_find_next_event(struct pevent *pevent,
+ struct event_format *event)
{
static int idx;
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index f097e0dd6c5c..719ed74a8565 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -114,20 +114,20 @@ static void skip(int size)
};
}
-static unsigned int read4(void)
+static unsigned int read4(struct pevent *pevent)
{
unsigned int data;
read_or_die(&data, 4);
- return __data2host4(perf_pevent, data);
+ return __data2host4(pevent, data);
}
-static unsigned long long read8(void)
+static unsigned long long read8(struct pevent *pevent)
{
unsigned long long data;
read_or_die(&data, 8);
- return __data2host8(perf_pevent, data);
+ return __data2host8(pevent, data);
}
static char *read_string(void)
@@ -168,12 +168,12 @@ static char *read_string(void)
return str;
}
-static void read_proc_kallsyms(void)
+static void read_proc_kallsyms(struct pevent *pevent)
{
unsigned int size;
char *buf;
- size = read4();
+ size = read4(pevent);
if (!size)
return;
@@ -181,29 +181,29 @@ static void read_proc_kallsyms(void)
read_or_die(buf, size);
buf[size] = '\0';
- parse_proc_kallsyms(buf, size);
+ parse_proc_kallsyms(pevent, buf, size);
free(buf);
}
-static void read_ftrace_printk(void)
+static void read_ftrace_printk(struct pevent *pevent)
{
unsigned int size;
char *buf;
- size = read4();
+ size = read4(pevent);
if (!size)
return;
buf = malloc_or_die(size);
read_or_die(buf, size);
- parse_ftrace_printk(buf, size);
+ parse_ftrace_printk(pevent, buf, size);
free(buf);
}
-static void read_header_files(void)
+static void read_header_files(struct pevent *pevent)
{
unsigned long long size;
char *header_event;
@@ -214,7 +214,7 @@ static void read_header_files(void)
if (memcmp(buf, "header_page", 12) != 0)
die("did not read header page");
- size = read8();
+ size = read8(pevent);
skip(size);
/*
@@ -227,47 +227,48 @@ static void read_header_files(void)
if (memcmp(buf, "header_event", 13) != 0)
die("did not read header event");
- size = read8();
+ size = read8(pevent);
header_event = malloc_or_die(size);
read_or_die(header_event, size);
free(header_event);
}
-static void read_ftrace_file(unsigned long long size)
+static void read_ftrace_file(struct pevent *pevent, unsigned long long size)
{
char *buf;
buf = malloc_or_die(size);
read_or_die(buf, size);
- parse_ftrace_file(buf, size);
+ parse_ftrace_file(pevent, buf, size);
free(buf);
}
-static void read_event_file(char *sys, unsigned long long size)
+static void read_event_file(struct pevent *pevent, char *sys,
+ unsigned long long size)
{
char *buf;
buf = malloc_or_die(size);
read_or_die(buf, size);
- parse_event_file(buf, size, sys);
+ parse_event_file(pevent, buf, size, sys);
free(buf);
}
-static void read_ftrace_files(void)
+static void read_ftrace_files(struct pevent *pevent)
{
unsigned long long size;
int count;
int i;
- count = read4();
+ count = read4(pevent);
for (i = 0; i < count; i++) {
- size = read8();
- read_ftrace_file(size);
+ size = read8(pevent);
+ read_ftrace_file(pevent, size);
}
}
-static void read_event_files(void)
+static void read_event_files(struct pevent *pevent)
{
unsigned long long size;
char *sys;
@@ -275,15 +276,15 @@ static void read_event_files(void)
int count;
int i,x;
- systems = read4();
+ systems = read4(pevent);
for (i = 0; i < systems; i++) {
sys = read_string();
- count = read4();
+ count = read4(pevent);
for (x=0; x < count; x++) {
- size = read8();
- read_event_file(sys, size);
+ size = read8(pevent);
+ read_event_file(pevent, sys, size);
}
}
}
@@ -377,7 +378,7 @@ static int calc_index(void *ptr, int cpu)
return (unsigned long)ptr - (unsigned long)cpu_data[cpu].page;
}
-struct pevent_record *trace_peek_data(int cpu)
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu)
{
struct pevent_record *data;
void *page = cpu_data[cpu].page;
@@ -399,15 +400,15 @@ struct pevent_record *trace_peek_data(int cpu)
/* FIXME: handle header page */
if (header_page_ts_size != 8)
die("expected a long long type for timestamp");
- cpu_data[cpu].timestamp = data2host8(perf_pevent, ptr);
+ cpu_data[cpu].timestamp = data2host8(pevent, ptr);
ptr += 8;
switch (header_page_size_size) {
case 4:
- cpu_data[cpu].page_size = data2host4(perf_pevent, ptr);
+ cpu_data[cpu].page_size = data2host4(pevent, ptr);
ptr += 4;
break;
case 8:
- cpu_data[cpu].page_size = data2host8(perf_pevent, ptr);
+ cpu_data[cpu].page_size = data2host8(pevent, ptr);
ptr += 8;
break;
default:
@@ -421,10 +422,10 @@ read_again:
if (idx >= cpu_data[cpu].page_size) {
get_next_page(cpu);
- return trace_peek_data(cpu);
+ return trace_peek_data(pevent, cpu);
}
- type_len_ts = data2host4(perf_pevent, ptr);
+ type_len_ts = data2host4(pevent, ptr);
ptr += 4;
type_len = type_len4host(type_len_ts);
@@ -434,14 +435,14 @@ read_again:
case RINGBUF_TYPE_PADDING:
if (!delta)
die("error, hit unexpected end of page");
- length = data2host4(perf_pevent, ptr);
+ length = data2host4(pevent, ptr);
ptr += 4;
length *= 4;
ptr += length;
goto read_again;
case RINGBUF_TYPE_TIME_EXTEND:
- extend = data2host4(perf_pevent, ptr);
+ extend = data2host4(pevent, ptr);
ptr += 4;
extend <<= TS_SHIFT;
extend += delta;
@@ -452,7 +453,7 @@ read_again:
ptr += 12;
break;
case 0:
- length = data2host4(perf_pevent, ptr);
+ length = data2host4(pevent, ptr);
ptr += 4;
die("here! length=%d", length);
break;
@@ -477,17 +478,17 @@ read_again:
return data;
}
-struct pevent_record *trace_read_data(int cpu)
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu)
{
struct pevent_record *data;
- data = trace_peek_data(cpu);
+ data = trace_peek_data(pevent, cpu);
cpu_data[cpu].next = NULL;
return data;
}
-ssize_t trace_report(int fd, bool __repipe)
+ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
{
char buf[BUFSIZ];
char test[] = { 23, 8, 68 };
@@ -519,30 +520,32 @@ ssize_t trace_report(int fd, bool __repipe)
file_bigendian = buf[0];
host_bigendian = bigendian();
- read_trace_init(file_bigendian, host_bigendian);
+ *ppevent = read_trace_init(file_bigendian, host_bigendian);
+ if (*ppevent == NULL)
+ die("read_trace_init failed");
read_or_die(buf, 1);
long_size = buf[0];
- page_size = read4();
+ page_size = read4(*ppevent);
- read_header_files();
+ read_header_files(*ppevent);
- read_ftrace_files();
- read_event_files();
- read_proc_kallsyms();
- read_ftrace_printk();
+ read_ftrace_files(*ppevent);
+ read_event_files(*ppevent);
+ read_proc_kallsyms(*ppevent);
+ read_ftrace_printk(*ppevent);
size = calc_data_size - 1;
calc_data_size = 0;
repipe = false;
if (show_funcs) {
- pevent_print_funcs(perf_pevent);
+ pevent_print_funcs(*ppevent);
return size;
}
if (show_printk) {
- pevent_print_printk(perf_pevent);
+ pevent_print_printk(*ppevent);
return size;
}
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 18ae6c1831d3..474aa7a7df43 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -36,6 +36,7 @@ static int stop_script_unsupported(void)
}
static void process_event_unsupported(union perf_event *event __unused,
+ struct pevent *pevent __unused,
struct perf_sample *sample __unused,
struct perf_evsel *evsel __unused,
struct machine *machine __unused,
@@ -61,7 +62,8 @@ static int python_start_script_unsupported(const char *script __unused,
return -1;
}
-static int python_generate_script_unsupported(const char *outfile __unused)
+static int python_generate_script_unsupported(struct pevent *pevent __unused,
+ const char *outfile __unused)
{
print_python_unsupported_msg();
@@ -122,7 +124,8 @@ static int perl_start_script_unsupported(const char *script __unused,
return -1;
}
-static int perl_generate_script_unsupported(const char *outfile __unused)
+static int perl_generate_script_unsupported(struct pevent *pevent __unused,
+ const char *outfile __unused)
{
print_perl_unsupported_msg();
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 639852ac1117..8fef1d6687b7 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -8,6 +8,7 @@
struct machine;
struct perf_sample;
union perf_event;
+struct perf_tool;
struct thread;
extern int header_page_size_size;
@@ -29,35 +30,36 @@ enum {
int bigendian(void);
-int read_trace_init(int file_bigendian, int host_bigendian);
-void print_trace_event(int cpu, void *data, int size);
+struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
+void print_trace_event(struct pevent *pevent, int cpu, void *data, int size);
-void print_event(int cpu, void *data, int size, unsigned long long nsecs,
- char *comm);
+void print_event(struct pevent *pevent, int cpu, void *data, int size,
+ unsigned long long nsecs, char *comm);
-int parse_ftrace_file(char *buf, unsigned long size);
-int parse_event_file(char *buf, unsigned long size, char *sys);
+int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size);
+int parse_event_file(struct pevent *pevent,
+ char *buf, unsigned long size, char *sys);
-struct pevent_record *trace_peek_data(int cpu);
-struct event_format *trace_find_event(int type);
+struct pevent_record *trace_peek_data(struct pevent *pevent, int cpu);
unsigned long long
raw_field_value(struct event_format *event, const char *name, void *data);
void *raw_field_ptr(struct event_format *event, const char *name, void *data);
-void parse_proc_kallsyms(char *file, unsigned int size __unused);
-void parse_ftrace_printk(char *file, unsigned int size __unused);
+void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size);
+void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size);
-ssize_t trace_report(int fd, bool repipe);
+ssize_t trace_report(int fd, struct pevent **pevent, bool repipe);
-int trace_parse_common_type(void *data);
-int trace_parse_common_pid(void *data);
+int trace_parse_common_type(struct pevent *pevent, void *data);
+int trace_parse_common_pid(struct pevent *pevent, void *data);
-struct event_format *trace_find_next_event(struct event_format *event);
-unsigned long long read_size(void *ptr, int size);
+struct event_format *trace_find_next_event(struct pevent *pevent,
+ struct event_format *event);
+unsigned long long read_size(struct pevent *pevent, void *ptr, int size);
unsigned long long eval_flag(const char *flag);
-struct pevent_record *trace_read_data(int cpu);
+struct pevent_record *trace_read_data(struct pevent *pevent, int cpu);
int read_tracing_data(int fd, struct list_head *pattrs);
struct tracing_data {
@@ -77,11 +79,12 @@ struct scripting_ops {
int (*start_script) (const char *script, int argc, const char **argv);
int (*stop_script) (void);
void (*process_event) (union perf_event *event,
+ struct pevent *pevent,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
struct thread *thread);
- int (*generate_script) (const char *outfile);
+ int (*generate_script) (struct pevent *pevent, const char *outfile);
};
int script_spec_register(const char *spec, struct scripting_ops *ops);
@@ -90,6 +93,7 @@ void setup_perl_scripting(void);
void setup_python_scripting(void);
struct scripting_context {
+ struct pevent *pevent;
void *event_data;
};
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 2daaedb83d84..b13c7331eaf8 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -264,4 +264,6 @@ bool is_power_of_2(unsigned long n)
size_t hex_width(u64 v);
+char *rtrim(char *s);
+
#endif
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index b1e091ae2f37..23a41a9f8db9 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -334,6 +334,11 @@ static int assigned_device_enable_host_intx(struct kvm *kvm,
}
#ifdef __KVM_HAVE_MSI
+static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static int assigned_device_enable_host_msi(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
@@ -346,7 +351,7 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
}
dev->host_irq = dev->dev->irq;
- if (request_threaded_irq(dev->host_irq, NULL,
+ if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
kvm_assigned_dev_thread_msi, 0,
dev->irq_name, dev)) {
pci_disable_msi(dev->dev);
@@ -358,6 +363,11 @@ static int assigned_device_enable_host_msi(struct kvm *kvm,
#endif
#ifdef __KVM_HAVE_MSIX
+static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
static int assigned_device_enable_host_msix(struct kvm *kvm,
struct kvm_assigned_dev_kernel *dev)
{
@@ -374,7 +384,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
for (i = 0; i < dev->entries_nr; i++) {
r = request_threaded_irq(dev->host_msix_entries[i].vector,
- NULL, kvm_assigned_dev_thread_msix,
+ kvm_assigned_dev_msix,
+ kvm_assigned_dev_thread_msix,
0, dev->irq_name, dev);
if (r)
goto err;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index f59c1e8de7a2..7d7e2aaffece 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -198,7 +198,7 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
}
static int
-kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct kvm_irq_routing_table *irq_rt;
struct _irqfd *irqfd, *tmp;
@@ -212,12 +212,12 @@ kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
return -ENOMEM;
irqfd->kvm = kvm;
- irqfd->gsi = gsi;
+ irqfd->gsi = args->gsi;
INIT_LIST_HEAD(&irqfd->list);
INIT_WORK(&irqfd->inject, irqfd_inject);
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
- file = eventfd_fget(fd);
+ file = eventfd_fget(args->fd);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto fail;
@@ -298,19 +298,19 @@ kvm_eventfd_init(struct kvm *kvm)
* shutdown any irqfd's that match fd+gsi
*/
static int
-kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
+kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct _irqfd *irqfd, *tmp;
struct eventfd_ctx *eventfd;
- eventfd = eventfd_ctx_fdget(fd);
+ eventfd = eventfd_ctx_fdget(args->fd);
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
spin_lock_irq(&kvm->irqfds.lock);
list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
- if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
+ if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
/*
* This rcu_assign_pointer is needed for when
* another thread calls kvm_irq_routing_update before
@@ -338,12 +338,15 @@ kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
}
int
-kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
+kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
{
- if (flags & KVM_IRQFD_FLAG_DEASSIGN)
- return kvm_irqfd_deassign(kvm, fd, gsi);
+ if (args->flags & ~KVM_IRQFD_FLAG_DEASSIGN)
+ return -EINVAL;
+
+ if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
+ return kvm_irqfd_deassign(kvm, args);
- return kvm_irqfd_assign(kvm, fd, gsi);
+ return kvm_irqfd_assign(kvm, args);
}
/*
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 26fd54dc459e..ef61d529a6c4 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -191,7 +191,8 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
}
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
+int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
+ int level)
{
u32 old_irr;
u32 mask = 1 << irq;
@@ -201,9 +202,11 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
spin_lock(&ioapic->lock);
old_irr = ioapic->irr;
if (irq >= 0 && irq < IOAPIC_NUM_PINS) {
+ int irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
+ irq_source_id, level);
entry = ioapic->redirtbl[irq];
- level ^= entry.fields.polarity;
- if (!level)
+ irq_level ^= entry.fields.polarity;
+ if (!irq_level)
ioapic->irr &= ~mask;
else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
@@ -221,6 +224,16 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level)
return ret;
}
+void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
+{
+ int i;
+
+ spin_lock(&ioapic->lock);
+ for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
+ __clear_bit(irq_source_id, &ioapic->irq_states[i]);
+ spin_unlock(&ioapic->lock);
+}
+
static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
int trigger_mode)
{
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 32872a09b63f..a30abfe6ed16 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -74,7 +74,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_destroy(struct kvm *kvm);
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
+int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
+ int level);
+void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 5afb43114020..83402d74a767 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -33,26 +33,12 @@
#include "ioapic.h"
-static inline int kvm_irq_line_state(unsigned long *irq_state,
- int irq_source_id, int level)
-{
- /* Logical OR for level trig interrupt */
- if (level)
- set_bit(irq_source_id, irq_state);
- else
- clear_bit(irq_source_id, irq_state);
-
- return !!(*irq_state);
-}
-
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level)
{
#ifdef CONFIG_X86
struct kvm_pic *pic = pic_irqchip(kvm);
- level = kvm_irq_line_state(&pic->irq_states[e->irqchip.pin],
- irq_source_id, level);
- return kvm_pic_set_irq(pic, e->irqchip.pin, level);
+ return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
#else
return -1;
#endif
@@ -62,10 +48,7 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- level = kvm_irq_line_state(&ioapic->irq_states[e->irqchip.pin],
- irq_source_id, level);
-
- return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, level);
+ return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level);
}
inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
@@ -249,8 +232,6 @@ unlock:
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
{
- int i;
-
ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
mutex_lock(&kvm->irq_lock);
@@ -263,14 +244,10 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
if (!irqchip_in_kernel(kvm))
goto unlock;
- for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++) {
- clear_bit(irq_source_id, &kvm->arch.vioapic->irq_states[i]);
- if (i >= 16)
- continue;
+ kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
#ifdef CONFIG_X86
- clear_bit(irq_source_id, &pic_irqchip(kvm)->irq_states[i]);
+ kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
#endif
- }
unlock:
mutex_unlock(&kvm->irq_lock);
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7e140683ff14..246852397e30 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -516,16 +516,32 @@ out_err_nodisable:
return ERR_PTR(r);
}
+/*
+ * Avoid using vmalloc for a small buffer.
+ * Should not be used when the size is statically known.
+ */
+void *kvm_kvzalloc(unsigned long size)
+{
+ if (size > PAGE_SIZE)
+ return vzalloc(size);
+ else
+ return kzalloc(size, GFP_KERNEL);
+}
+
+void kvm_kvfree(const void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ vfree(addr);
+ else
+ kfree(addr);
+}
+
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
if (!memslot->dirty_bitmap)
return;
- if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
- vfree(memslot->dirty_bitmap);
- else
- kfree(memslot->dirty_bitmap);
-
+ kvm_kvfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
@@ -617,11 +633,7 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
#ifndef CONFIG_S390
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
- if (dirty_bytes > PAGE_SIZE)
- memslot->dirty_bitmap = vzalloc(dirty_bytes);
- else
- memslot->dirty_bitmap = kzalloc(dirty_bytes, GFP_KERNEL);
-
+ memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
if (!memslot->dirty_bitmap)
return -ENOMEM;
@@ -1586,7 +1598,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
*/
for (pass = 0; pass < 2 && !yielded; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!pass && i < last_boosted_vcpu) {
+ if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
} else if (pass && i > last_boosted_vcpu)
@@ -2047,7 +2059,7 @@ static long kvm_vm_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&data, argp, sizeof data))
goto out;
- r = kvm_irqfd(kvm, data.fd, data.gsi, data.flags);
+ r = kvm_irqfd(kvm, &data);
break;
}
case KVM_IOEVENTFD: {
@@ -2213,7 +2225,7 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
case KVM_CAP_SIGNAL_MSI:
#endif
return 1;
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
+#ifdef KVM_CAP_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
@@ -2845,6 +2857,7 @@ void kvm_exit(void)
kvm_arch_hardware_unsetup();
kvm_arch_exit();
free_cpumask_var(cpus_hardware_enabled);
+ __free_page(fault_page);
__free_page(hwpoison_page);
__free_page(bad_page);
}